summaryrefslogtreecommitdiffstats
path: root/venv/lib/python3.9/site-packages/numpy
diff options
context:
space:
mode:
authornoptuno <repollo.marrero@gmail.com>2023-04-28 02:40:47 +0200
committernoptuno <repollo.marrero@gmail.com>2023-04-28 02:40:47 +0200
commit6f6a73987201c9c303047c61389b82ad98b15597 (patch)
treebf67eb590d49979d6740bc1e94b4018df48bce98 /venv/lib/python3.9/site-packages/numpy
parentResolved merge conflicts and merged pr_218 into STREAMLIT_CHAT_IMPLEMENTATION (diff)
parentMerging PR_218 openai_rev package with new streamlit chat app (diff)
downloadgpt4free-6f6a73987201c9c303047c61389b82ad98b15597.tar
gpt4free-6f6a73987201c9c303047c61389b82ad98b15597.tar.gz
gpt4free-6f6a73987201c9c303047c61389b82ad98b15597.tar.bz2
gpt4free-6f6a73987201c9c303047c61389b82ad98b15597.tar.lz
gpt4free-6f6a73987201c9c303047c61389b82ad98b15597.tar.xz
gpt4free-6f6a73987201c9c303047c61389b82ad98b15597.tar.zst
gpt4free-6f6a73987201c9c303047c61389b82ad98b15597.zip
Diffstat (limited to 'venv/lib/python3.9/site-packages/numpy')
-rw-r--r--venv/lib/python3.9/site-packages/numpy/.dylibs/libgcc_s.1.1.dylibbin0 -> 138704 bytes
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/.dylibs/libgfortran.5.dylibbin0 -> 6786304 bytes
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/.dylibs/libopenblas64_.0.dylibbin0 -> 66728064 bytes
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/.dylibs/libquadmath.0.dylibbin0 -> 352704 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/LICENSE.txt819
-rw-r--r--venv/lib/python3.9/site-packages/numpy/__config__.py115
-rw-r--r--venv/lib/python3.9/site-packages/numpy/__init__.cython-30.pxd1052
-rw-r--r--venv/lib/python3.9/site-packages/numpy/__init__.pxd1017
-rw-r--r--venv/lib/python3.9/site-packages/numpy/__init__.py439
-rw-r--r--venv/lib/python3.9/site-packages/numpy/__init__.pyi4410
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_distributor_init.py10
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_globals.py125
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_pyinstaller/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_pyinstaller/hook-numpy.py40
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_pyinstaller/pyinstaller-smoke.py32
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_pyinstaller/test_pyinstaller.py35
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_pytesttester.py206
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_pytesttester.pyi18
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/__init__.py225
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/_add_docstring.py152
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/_array_like.py165
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/_callable.pyi338
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/_char_codes.py111
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/_dtype_like.py249
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/_extended_precision.py43
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/_generic_alias.py245
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/_nbit.py16
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/_nested_sequence.py92
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/_scalars.py30
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/_shape.py6
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/_ufunc.pyi445
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_typing/setup.py10
-rw-r--r--venv/lib/python3.9/site-packages/numpy/_version.py21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/__init__.py377
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/_array_object.py1118
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/_constants.py6
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/_creation_functions.py351
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/_data_type_functions.py146
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/_dtypes.py143
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/_elementwise_functions.py729
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/_manipulation_functions.py98
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/_searching_functions.py47
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/_set_functions.py106
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/_sorting_functions.py49
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/_statistical_functions.py115
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/_typing.py74
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/_utility_functions.py37
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/linalg.py446
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/setup.py12
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/tests/__init__.py7
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/tests/test_array_object.py375
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/tests/test_creation_functions.py142
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/tests/test_data_type_functions.py19
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/tests/test_elementwise_functions.py111
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/tests/test_set_functions.py19
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/tests/test_sorting_functions.py23
-rw-r--r--venv/lib/python3.9/site-packages/numpy/array_api/tests/test_validation.py27
-rw-r--r--venv/lib/python3.9/site-packages/numpy/compat/__init__.py18
-rw-r--r--venv/lib/python3.9/site-packages/numpy/compat/_inspect.py191
-rw-r--r--venv/lib/python3.9/site-packages/numpy/compat/_pep440.py487
-rw-r--r--venv/lib/python3.9/site-packages/numpy/compat/py3k.py137
-rw-r--r--venv/lib/python3.9/site-packages/numpy/compat/setup.py10
-rw-r--r--venv/lib/python3.9/site-packages/numpy/compat/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/compat/tests/test_compat.py19
-rw-r--r--venv/lib/python3.9/site-packages/numpy/conftest.py136
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/__init__.py178
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/__init__.pyi2
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_add_newdocs.py7083
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_add_newdocs_scalars.py368
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_asarray.py140
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_asarray.pyi42
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_dtype.py365
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_dtype_ctypes.py117
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_exceptions.py280
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_internal.py932
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_internal.pyi30
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_machar.py357
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_methods.py297
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/core/_multiarray_tests.cpython-39-darwin.sobin0 -> 149192 bytes
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/core/_multiarray_umath.cpython-39-darwin.sobin0 -> 5952736 bytes
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/core/_operand_flag_tests.cpython-39-darwin.sobin0 -> 35016 bytes
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/core/_rational_tests.cpython-39-darwin.sobin0 -> 80432 bytes
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/core/_simd.cpython-39-darwin.sobin0 -> 1920480 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_string_helpers.py100
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/core/_struct_ufunc_tests.cpython-39-darwin.sobin0 -> 35288 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_type_aliases.py245
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_type_aliases.pyi13
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_ufunc_config.py466
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/_ufunc_config.pyi37
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/core/_umath_tests.cpython-39-darwin.sobin0 -> 60240 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/arrayprint.py1701
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/arrayprint.pyi142
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/cversions.py13
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/defchararray.py2900
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/defchararray.pyi421
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/einsumfunc.py1443
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/einsumfunc.pyi144
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/fromnumeric.py3813
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/fromnumeric.pyi1049
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/function_base.py537
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/function_base.pyi187
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/generate_numpy_api.py244
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/getlimits.py718
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/getlimits.pyi6
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/.doxyfile2
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/__multiarray_api.h1561
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/__ufunc_api.h311
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h90
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/_numpyconfig.h30
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/arrayobject.h12
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/arrayscalars.h182
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/experimental_dtype_api.h502
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/halffloat.h70
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/libdivide/LICENSE.txt21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/libdivide/libdivide.h2079
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/multiarray_api.txt2501
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/ndarrayobject.h251
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/ndarraytypes.h1956
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/noprefix.h211
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h124
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_3kcompat.h597
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_common.h1122
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_cpu.h129
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_endian.h77
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_interrupt.h56
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_math.h590
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_os.h36
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/numpyconfig.h84
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/old_defines.h187
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/oldnumeric.h32
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/random/bitgen.h20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/random/distributions.h209
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/ufunc_api.txt335
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/ufuncobject.h357
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/include/numpy/utils.h37
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/lib/libnpymath.abin0 -> 141312 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini12
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/memmap.py337
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/memmap.pyi3
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/multiarray.py1714
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/multiarray.pyi1021
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/numeric.py2559
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/numeric.pyi657
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/numerictypes.py670
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/numerictypes.pyi161
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/overrides.py225
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/records.py1099
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/records.pyi234
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/setup.py1197
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/setup_common.py476
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/shape_base.py938
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/shape_base.pyi123
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/_locales.py74
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/astype_copy.pklbin0 -> 716 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/generate_umath_validation_data.cpp170
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/recarray_from_file.fitsbin0 -> 8640 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-README.txt15
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arccos.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arccosh.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arcsin.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arcsinh.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arctan.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arctanh.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-cbrt.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-cos.csv1375
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-cosh.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-exp.csv412
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-exp2.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-expm1.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log.csv271
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log10.csv1629
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log1p.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log2.csv1629
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-sin.csv1370
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-sinh.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-tan.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-tanh.csv1429
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/examples/cython/checks.pyx32
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/examples/cython/setup.py25
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/limited_api.c17
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/setup.py22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test__exceptions.py88
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_abc.py54
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_api.py602
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_argparse.py62
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_array_coercion.py835
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_array_interface.py216
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_arraymethod.py93
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_arrayprint.py967
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_casting_floatingpoint_errors.py154
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_casting_unittests.py819
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_conversion_utils.py208
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_cpu_dispatcher.py42
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_cpu_features.py185
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_custom_dtypes.py201
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_cython.py137
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_datetime.py2552
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_defchararray.py672
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_deprecations.py1193
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_dlpack.py123
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_dtype.py1827
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_einsum.py1137
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_errstate.py61
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_extint128.py219
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_function_base.py409
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_getlimits.py147
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_half.py563
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_hashtable.py30
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_indexerrors.py133
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_indexing.py1417
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_item_selection.py86
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_limited_api.py44
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_longdouble.py370
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_machar.py30
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_mem_overlap.py931
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_mem_policy.py425
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_memmap.py215
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_multiarray.py9775
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_nditer.py3316
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_nep50_promotions.py182
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_numeric.py3608
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_numerictypes.py564
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_overrides.py642
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_print.py200
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_protocols.py44
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_records.py520
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_regression.py2558
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_scalar_ctors.py186
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_scalar_methods.py212
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarbuffer.py153
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarinherit.py98
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarmath.py1087
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarprint.py382
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_shape_base.py824
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_simd.py1214
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_simd_module.py99
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_strings.py99
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_ufunc.py2798
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_umath.py4419
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_umath_accuracy.py75
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_umath_complex.py622
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/tests/test_unicode.py368
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/umath.py36
-rw-r--r--venv/lib/python3.9/site-packages/numpy/core/umath_tests.py13
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ctypeslib.py547
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ctypeslib.pyi251
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/__config__.py115
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/__init__.py64
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/__init__.pyi4
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/_shell_utils.py91
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/armccompiler.py26
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/ccompiler.py814
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/ccompiler_opt.py2659
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimd.c27
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c16
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdfhm.c19
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdhp.c15
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx2.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_clx.c22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c24
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_icl.c26
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knl.c25
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knm.c30
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_skx.c26
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512cd.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512f.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_f16c.c22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma3.c22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma4.c13
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon.c19
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_fp16.c11
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_popcnt.c32
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse2.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse3.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse41.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse42.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_ssse3.c20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx.c21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx2.c13
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx3.c13
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx4.c14
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vx.c16
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe.c25
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe2.c21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_xop.c12
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c18
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c16
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c41
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx4_mma.c21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx_asm.c36
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/checks/test_flags.c1
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/__init__.py41
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/autodist.py148
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/bdist_rpm.py22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/build.py62
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/build_clib.py468
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/build_ext.py740
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/build_py.py31
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/build_scripts.py49
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/build_src.py773
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/config.py516
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/config_compiler.py126
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/develop.py15
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/egg_info.py25
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/install.py79
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/install_clib.py40
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/install_data.py24
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py25
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/command/sdist.py27
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/conv_template.py329
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/core.py215
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/cpuinfo.py683
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/exec_command.py315
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/extension.py107
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/__init__.py1030
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/absoft.py156
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/arm.py71
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/compaq.py120
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/environment.py88
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/fujitsu.py46
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/g95.py42
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/gnu.py555
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py41
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/ibm.py97
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/intel.py211
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/lahey.py45
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/mips.py54
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nag.py87
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/none.py28
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nv.py53
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pathf95.py33
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pg.py128
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/sun.py51
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/vast.py52
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/from_template.py261
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/intelccompiler.py111
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/lib2def.py116
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/line_endings.py77
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/log.py111
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c6
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/mingw32ccompiler.py592
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/misc_util.py2493
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/msvc9compiler.py63
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/msvccompiler.py76
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/npy_pkg_config.py437
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/numpy_distribution.py17
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/pathccompiler.py21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/setup.py17
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/system_info.py3172
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_build_ext.py74
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt.py808
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py176
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_exec_command.py217
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler.py43
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py55
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_intel.py30
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_from_template.py44
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py34
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py42
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_misc_util.py82
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_npy_pkg_config.py84
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_shell_utils.py79
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/tests/test_system_info.py323
-rw-r--r--venv/lib/python3.9/site-packages/numpy/distutils/unixccompiler.py141
-rw-r--r--venv/lib/python3.9/site-packages/numpy/doc/__init__.py26
-rw-r--r--venv/lib/python3.9/site-packages/numpy/doc/constants.py412
-rw-r--r--venv/lib/python3.9/site-packages/numpy/doc/ufuncs.py137
-rw-r--r--venv/lib/python3.9/site-packages/numpy/dual.py83
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/__init__.py186
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/__init__.pyi43
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/__main__.py5
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/__version__.py1
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/auxfuncs.py890
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/capi_maps.py880
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/cb_rules.py649
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/cfuncs.py1522
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/common_rules.py149
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/crackfortran.py3545
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/diagnose.py154
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/f2py2e.py704
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/f90mod_rules.py264
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/func2subr.py303
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/rules.py1571
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/setup.py71
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.c1422
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.h173
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/symbolic.py1510
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f9034
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f906
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c230
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap1
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f9034
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f9041
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f9019
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f904
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/block_docstring/foo.f6
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/foo.f62
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh17797.f907
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh18335.f9017
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hi77.f3
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hiworld.f903
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/common/block.f11
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f9013
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f906
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f16
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f12
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f9013
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/operators.f9049
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f9011
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f9010
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f9010
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f904
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap1
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f909
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/kind/foo.f9020
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo.f5
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f908
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_free.f908
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/mod.modbin0 -> 412 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/module_data_docstring.f9012
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f907
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_both.f9057
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f9015
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f9022
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f9023
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_real.f9023
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/quoted_character/foo.f14
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/regression/inout.f909
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo77.f45
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo90.f9048
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo77.f45
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo90.f9048
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo77.f56
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo90.f9059
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo77.f56
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo90.f9059
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo77.f45
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo90.f9048
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/size/foo.f9044
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/char.f9029
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/fixed_string.f9034
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/scalar_string.f909
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/string.f12
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f909
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_abstract_interface.py25
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_array_from_pyobj.py686
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_assumed_shape.py49
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_block_docstring.py17
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_callback.py230
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_character.py592
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_common.py18
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_compile_function.py117
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_crackfortran.py278
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_docs.py55
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_f2cmap.py15
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_f2py2e.py769
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_kind.py26
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_mixed.py33
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_module_doc.py27
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_parameter.py112
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_quoted_character.py16
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_regression.py66
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_character.py45
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_complex.py65
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_integer.py55
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_logical.py64
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_real.py109
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_semicolon_split.py74
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_size.py45
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_string.py100
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_symbolic.py494
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/test_value_attrspec.py14
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/tests/util.py419
-rw-r--r--venv/lib/python3.9/site-packages/numpy/f2py/use_rules.py113
-rw-r--r--venv/lib/python3.9/site-packages/numpy/fft/__init__.py212
-rw-r--r--venv/lib/python3.9/site-packages/numpy/fft/__init__.pyi29
-rw-r--r--venv/lib/python3.9/site-packages/numpy/fft/_pocketfft.py1424
-rw-r--r--venv/lib/python3.9/site-packages/numpy/fft/_pocketfft.pyi108
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/fft/_pocketfft_internal.cpython-39-darwin.sobin0 -> 105600 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/fft/helper.py221
-rw-r--r--venv/lib/python3.9/site-packages/numpy/fft/helper.pyi47
-rw-r--r--venv/lib/python3.9/site-packages/numpy/fft/setup.py22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/fft/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/fft/tests/test_helper.py167
-rw-r--r--venv/lib/python3.9/site-packages/numpy/fft/tests/test_pocketfft.py308
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/__init__.py79
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/__init__.pyi245
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/_datasource.py704
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/_iotools.py897
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/_version.py155
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/_version.pyi17
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/arraypad.py877
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/arraypad.pyi85
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/arraysetops.py981
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/arraysetops.pyi360
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/arrayterator.py219
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/arrayterator.pyi49
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/format.py968
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/format.pyi22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/function_base.py5614
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/function_base.pyi697
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/histograms.py1070
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/histograms.pyi47
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/index_tricks.py1021
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/index_tricks.pyi162
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/mixins.py176
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/mixins.pyi74
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/nanfunctions.py1880
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/nanfunctions.pyi38
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/npyio.py2546
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/npyio.pyi327
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/polynomial.py1452
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/polynomial.pyi303
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/recfunctions.py1590
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/scimath.py625
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/scimath.pyi94
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/setup.py12
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/shape_base.py1280
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/shape_base.pyi215
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/stride_tricks.py547
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/stride_tricks.pyi80
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/data/py2-objarr.npybin0 -> 258 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/data/py2-objarr.npzbin0 -> 366 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/data/py3-objarr.npybin0 -> 341 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/data/py3-objarr.npzbin0 -> 449 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/data/python3.npybin0 -> 96 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/data/win64python2.npybin0 -> 96 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test__datasource.py350
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test__iotools.py353
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test__version.py64
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_arraypad.py1363
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_arraysetops.py944
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_arrayterator.py46
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_financial_expired.py11
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_format.py1027
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_function_base.py3980
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_histograms.py808
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_index_tricks.py551
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_io.py2749
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_loadtxt.py1039
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_mixins.py216
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_nanfunctions.py1246
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_packbits.py376
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_polynomial.py303
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_recfunctions.py987
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_regression.py247
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_shape_base.py787
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_stride_tricks.py645
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_twodim_base.py548
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_type_check.py478
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_ufunclike.py104
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/tests/test_utils.py178
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/twodim_base.py1128
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/twodim_base.pyi239
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/type_check.py735
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/type_check.pyi222
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/ufunclike.py268
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/ufunclike.pyi66
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/user_array.py286
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/utils.py1148
-rw-r--r--venv/lib/python3.9/site-packages/numpy/lib/utils.pyi91
-rw-r--r--venv/lib/python3.9/site-packages/numpy/linalg/__init__.py80
-rw-r--r--venv/lib/python3.9/site-packages/numpy/linalg/__init__.pyi30
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/linalg/_umath_linalg.cpython-39-darwin.sobin0 -> 219312 bytes
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/linalg/lapack_lite.cpython-39-darwin.sobin0 -> 56480 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/linalg/linalg.py2795
-rw-r--r--venv/lib/python3.9/site-packages/numpy/linalg/linalg.pyi282
-rw-r--r--venv/lib/python3.9/site-packages/numpy/linalg/setup.py92
-rw-r--r--venv/lib/python3.9/site-packages/numpy/linalg/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/linalg/tests/test_deprecations.py20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/linalg/tests/test_linalg.py2186
-rw-r--r--venv/lib/python3.9/site-packages/numpy/linalg/tests/test_regression.py148
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/__init__.py54
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/__init__.pyi235
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/bench.py130
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/core.py8404
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/core.pyi472
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/extras.py2045
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/extras.pyi85
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/mrecords.py783
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/mrecords.pyi90
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/setup.py12
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/tests/test_core.py5564
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/tests/test_deprecations.py84
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/tests/test_extras.py1829
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/tests/test_mrecords.py493
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/tests/test_old_ma.py874
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/tests/test_regression.py91
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/tests/test_subclassing.py450
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/testutils.py288
-rw-r--r--venv/lib/python3.9/site-packages/numpy/ma/timer_comparison.py443
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matlib.py378
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matrixlib/__init__.py11
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matrixlib/__init__.pyi15
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.py1113
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.pyi16
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matrixlib/setup.py12
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matrixlib/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_defmatrix.py453
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_interaction.py354
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_masked_matrix.py231
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py93
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_multiarray.py16
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_numeric.py17
-rw-r--r--venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_regression.py31
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/__init__.py185
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/__init__.pyi22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/_polybase.py1184
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/_polybase.pyi71
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/chebyshev.py2076
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/chebyshev.pyi51
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/hermite.py1697
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/hermite.pyi46
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/hermite_e.py1689
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/hermite_e.pyi46
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/laguerre.py1645
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/laguerre.pyi46
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/legendre.py1658
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/legendre.pyi46
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/polynomial.py1536
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/polynomial.pyi41
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/polyutils.py789
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/polyutils.pyi11
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/setup.py10
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_chebyshev.py619
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_classes.py600
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite.py555
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite_e.py556
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_laguerre.py537
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_legendre.py568
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_polynomial.py611
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_polyutils.py121
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_printing.py530
-rw-r--r--venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_symbol.py216
-rw-r--r--venv/lib/python3.9/site-packages/numpy/py.typed0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/__init__.pxd14
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/__init__.py215
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/__init__.pyi72
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/random/_bounded_integers.cpython-39-darwin.sobin0 -> 482096 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_bounded_integers.pxd29
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/random/_common.cpython-39-darwin.sobin0 -> 326192 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_common.pxd106
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_examples/cffi/extending.py40
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_examples/cffi/parse.py55
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_examples/cython/extending.pyx78
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_examples/cython/extending_distributions.pyx117
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_examples/cython/setup.py46
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_examples/numba/extending.py84
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_examples/numba/extending_distributions.py67
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/random/_generator.cpython-39-darwin.sobin0 -> 984160 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_generator.pyi638
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/random/_mt19937.cpython-39-darwin.sobin0 -> 116008 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_mt19937.pyi22
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/random/_pcg64.cpython-39-darwin.sobin0 -> 118896 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_pcg64.pyi42
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/random/_philox.cpython-39-darwin.sobin0 -> 113152 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_philox.pyi36
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_pickle.py80
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/random/_sfc64.cpython-39-darwin.sobin0 -> 74544 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/_sfc64.pyi28
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/random/bit_generator.cpython-39-darwin.sobin0 -> 247352 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/bit_generator.pxd35
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/bit_generator.pyi109
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/c_distributions.pxd114
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/lib/libnpyrandom.abin0 -> 172288 bytes
-rwxr-xr-xvenv/lib/python3.9/site-packages/numpy/random/mtrand.cpython-39-darwin.sobin0 -> 842440 bytes
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/mtrand.pyi570
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/setup.py159
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/data/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/data/mt19937-testset-1.csv1001
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/data/mt19937-testset-2.csv1001
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64-testset-1.csv1001
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64-testset-2.csv1001
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv1001
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv1001
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/data/philox-testset-1.csv1001
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/data/philox-testset-2.csv1001
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/data/sfc64-testset-1.csv1001
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/data/sfc64-testset-2.csv1001
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/test_direct.py478
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/test_extending.py99
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937.py2724
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py150
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/test_random.py1745
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/test_randomstate.py2117
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/test_randomstate_regression.py216
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/test_regression.py149
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/test_seed_sequence.py80
-rw-r--r--venv/lib/python3.9/site-packages/numpy/random/tests/test_smoke.py818
-rw-r--r--venv/lib/python3.9/site-packages/numpy/setup.py32
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/__init__.py22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/__init__.pyi56
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/_private/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/_private/decorators.py331
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/_private/extbuild.py251
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/_private/noseclasses.py364
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/_private/nosetester.py545
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/_private/parameterized.py432
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py2595
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/_private/utils.pyi399
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/print_coercion_tables.py200
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/setup.py21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/tests/test_doctesting.py57
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/tests/test_utils.py1664
-rw-r--r--venv/lib/python3.9/site-packages/numpy/testing/utils.py29
-rw-r--r--venv/lib/python3.9/site-packages/numpy/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/tests/test__all__.py9
-rw-r--r--venv/lib/python3.9/site-packages/numpy/tests/test_ctypeslib.py368
-rw-r--r--venv/lib/python3.9/site-packages/numpy/tests/test_lazyloading.py38
-rw-r--r--venv/lib/python3.9/site-packages/numpy/tests/test_matlib.py58
-rw-r--r--venv/lib/python3.9/site-packages/numpy/tests/test_numpy_version.py44
-rw-r--r--venv/lib/python3.9/site-packages/numpy/tests/test_public_api.py506
-rw-r--r--venv/lib/python3.9/site-packages/numpy/tests/test_reloading.py72
-rw-r--r--venv/lib/python3.9/site-packages/numpy/tests/test_scripts.py47
-rw-r--r--venv/lib/python3.9/site-packages/numpy/tests/test_warnings.py74
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/__init__.py175
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/mypy_plugin.py197
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/setup.py11
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/__init__.py0
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/arithmetic.pyi121
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/array_constructors.pyi33
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/array_like.pyi16
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/array_pad.pyi6
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/arrayprint.pyi14
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/arrayterator.pyi14
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/bitwise_ops.pyi20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/char.pyi66
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/chararray.pyi62
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/comparisons.pyi27
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/constants.pyi7
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/datasource.pyi15
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/dtype.pyi20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/einsumfunc.pyi15
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/false_positives.pyi11
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/flatiter.pyi25
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/fromnumeric.pyi161
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/histograms.pyi12
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/index_tricks.pyi14
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_function_base.pyi53
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_polynomial.pyi29
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_utils.pyi13
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_version.pyi6
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/linalg.pyi48
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/memmap.pyi5
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/modules.pyi18
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/multiarray.pyi55
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ndarray.pyi11
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ndarray_misc.pyi43
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/nditer.pyi8
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/nested_sequence.pyi17
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/npyio.pyi30
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/numerictypes.pyi13
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/random.pyi61
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/rec.pyi17
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/scalars.pyi92
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/shape_base.pyi8
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/stride_tricks.pyi9
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/testing.pyi28
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/twodim_base.pyi37
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/type_check.pyi13
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ufunc_config.pyi21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ufunclike.pyi21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ufuncs.pyi41
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/warnings_and_errors.pyi5
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/misc/extended_precision.pyi17
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/mypy.ini10
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arithmetic.py594
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/array_constructors.py137
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/array_like.py41
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arrayprint.py37
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arrayterator.py27
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/bitwise_ops.py131
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/comparisons.py301
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/dtype.py57
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/einsumfunc.py36
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/flatiter.py16
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/fromnumeric.py260
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/index_tricks.py64
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/lib_utils.py25
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/lib_version.py18
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/literal.py47
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/mod.py149
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/modules.py43
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/multiarray.py76
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_conversion.py94
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_misc.py185
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py47
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/numeric.py90
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/numerictypes.py47
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/random.py1499
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/scalars.py248
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/simple.py165
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/simple_py3.py6
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufunc_config.py50
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufunclike.py46
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufuncs.py17
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.py6
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arithmetic.pyi526
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/array_constructors.pyi205
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arraypad.pyi22
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arrayprint.pyi20
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arraysetops.pyi60
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arrayterator.pyi24
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/bitwise_ops.pyi131
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/char.pyi147
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/chararray.pyi132
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/comparisons.pyi261
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/constants.pyi52
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ctypeslib.pyi87
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/datasource.pyi21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/dtype.pyi76
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/einsumfunc.pyi35
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/emath.pyi52
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/false_positives.pyi10
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/fft.pyi35
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/flatiter.pyi23
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/fromnumeric.pyi297
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/getlimits.pyi47
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/histograms.pyi19
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/index_tricks.pyi66
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_function_base.pyi177
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_polynomial.pyi111
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_utils.pyi30
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_version.pyi18
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/linalg.pyi97
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/matrix.pyi69
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/memmap.pyi18
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/mod.pyi147
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/modules.pyi47
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/multiarray.pyi144
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/nbit_base_example.pyi21
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ndarray_conversion.pyi51
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ndarray_misc.pyi220
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi35
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/nditer.pyi46
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/nested_sequence.pyi26
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/npyio.pyi92
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/numeric.pyi133
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/numerictypes.pyi42
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/random.pyi1542
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/rec.pyi128
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/scalars.pyi158
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/shape_base.pyi57
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/stride_tricks.pyi28
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/testing.pyi178
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/twodim_base.pyi72
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/type_check.pyi73
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ufunc_config.pyi25
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ufunclike.pyi29
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ufuncs.pyi68
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/version.pyi8
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/warnings_and_errors.pyi9
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/test_generic_alias.py188
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/test_isfile.py30
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/test_runtime.py113
-rw-r--r--venv/lib/python3.9/site-packages/numpy/typing/tests/test_typing.py455
-rw-r--r--venv/lib/python3.9/site-packages/numpy/version.py15
871 files changed, 331170 insertions, 0 deletions
diff --git a/venv/lib/python3.9/site-packages/numpy/.dylibs/libgcc_s.1.1.dylib b/venv/lib/python3.9/site-packages/numpy/.dylibs/libgcc_s.1.1.dylib
new file mode 100644
index 00000000..878ef801
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/.dylibs/libgcc_s.1.1.dylib
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/.dylibs/libgfortran.5.dylib b/venv/lib/python3.9/site-packages/numpy/.dylibs/libgfortran.5.dylib
new file mode 100755
index 00000000..67437d4e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/.dylibs/libgfortran.5.dylib
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/.dylibs/libopenblas64_.0.dylib b/venv/lib/python3.9/site-packages/numpy/.dylibs/libopenblas64_.0.dylib
new file mode 100755
index 00000000..525b6f8f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/.dylibs/libopenblas64_.0.dylib
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/.dylibs/libquadmath.0.dylib b/venv/lib/python3.9/site-packages/numpy/.dylibs/libquadmath.0.dylib
new file mode 100755
index 00000000..63106b59
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/.dylibs/libquadmath.0.dylib
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/LICENSE.txt b/venv/lib/python3.9/site-packages/numpy/LICENSE.txt
new file mode 100644
index 00000000..977d2fa3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/LICENSE.txt
@@ -0,0 +1,819 @@
+Copyright (c) 2005-2022, NumPy Developers.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of the NumPy Developers nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----
+
+This binary distribution of NumPy also bundles the following software:
+
+
+Name: GCC runtime library
+Files: .dylibs/*
+Description: dynamically linked to files compiled with gcc
+Availability: https://gcc.gnu.org/viewcvs/gcc/
+License: GPLv3 + runtime exception
+ Copyright (C) 2002-2017 Free Software Foundation, Inc.
+
+ Libgfortran is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ Libgfortran is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>.
+
+----
+
+Full text of license texts referred to above follows (that they are
+listed below does not necessarily imply the conditions apply to the
+present binary release):
+
+----
+
+GCC RUNTIME LIBRARY EXCEPTION
+
+Version 3.1, 31 March 2009
+
+Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/>
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+This GCC Runtime Library Exception ("Exception") is an additional
+permission under section 7 of the GNU General Public License, version
+3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
+bears a notice placed by the copyright holder of the file stating that
+the file is governed by GPLv3 along with this Exception.
+
+When you use GCC to compile a program, GCC may combine portions of
+certain GCC header files and runtime libraries with the compiled
+program. The purpose of this Exception is to allow compilation of
+non-GPL (including proprietary) programs to use, in this way, the
+header files and runtime libraries covered by this Exception.
+
+0. Definitions.
+
+A file is an "Independent Module" if it either requires the Runtime
+Library for execution after a Compilation Process, or makes use of an
+interface provided by the Runtime Library, but is not otherwise based
+on the Runtime Library.
+
+"GCC" means a version of the GNU Compiler Collection, with or without
+modifications, governed by version 3 (or a specified later version) of
+the GNU General Public License (GPL) with the option of using any
+subsequent versions published by the FSF.
+
+"GPL-compatible Software" is software whose conditions of propagation,
+modification and use would permit combination with GCC in accord with
+the license of GCC.
+
+"Target Code" refers to output from any compiler for a real or virtual
+target processor architecture, in executable form or suitable for
+input to an assembler, loader, linker and/or execution
+phase. Notwithstanding that, Target Code does not include data in any
+format that is used as a compiler intermediate representation, or used
+for producing a compiler intermediate representation.
+
+The "Compilation Process" transforms code entirely represented in
+non-intermediate languages designed for human-written code, and/or in
+Java Virtual Machine byte code, into Target Code. Thus, for example,
+use of source code generators and preprocessors need not be considered
+part of the Compilation Process, since the Compilation Process can be
+understood as starting with the output of the generators or
+preprocessors.
+
+A Compilation Process is "Eligible" if it is done using GCC, alone or
+with other GPL-compatible software, or if it is done without using any
+work based on GCC. For example, using non-GPL-compatible Software to
+optimize any GCC intermediate representations would not qualify as an
+Eligible Compilation Process.
+
+1. Grant of Additional Permission.
+
+You have permission to propagate a work of Target Code formed by
+combining the Runtime Library with Independent Modules, even if such
+propagation would otherwise violate the terms of GPLv3, provided that
+all Target Code was generated by Eligible Compilation Processes. You
+may then convey such a combination under terms of your choice,
+consistent with the licensing of the Independent Modules.
+
+2. No Weakening of GCC Copyleft.
+
+The availability of this Exception does not imply any general
+presumption that third-party software is unaffected by the copyleft
+requirements of the license of GCC.
+
+----
+
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/venv/lib/python3.9/site-packages/numpy/__config__.py b/venv/lib/python3.9/site-packages/numpy/__config__.py
new file mode 100644
index 00000000..2590dbb9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/__config__.py
@@ -0,0 +1,115 @@
+# This file is generated by numpy's setup.py
+# It contains system_info results at the time of building this package.
+__all__ = ["get_info","show"]
+
+
+import os
+import sys
+
+extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
+
+if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
+ os.add_dll_directory(extra_dll_dir)
+
+openblas64__info={'libraries': ['openblas64_', 'openblas64_'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None)], 'runtime_library_dirs': ['/usr/local/lib']}
+blas_ilp64_opt_info={'libraries': ['openblas64_', 'openblas64_'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None)], 'runtime_library_dirs': ['/usr/local/lib']}
+openblas64__lapack_info={'libraries': ['openblas64_', 'openblas64_'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None), ('HAVE_LAPACKE', None)], 'runtime_library_dirs': ['/usr/local/lib']}
+lapack_ilp64_opt_info={'libraries': ['openblas64_', 'openblas64_'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None), ('HAVE_LAPACKE', None)], 'runtime_library_dirs': ['/usr/local/lib']}
+
+def get_info(name):
+ g = globals()
+ return g.get(name, g.get(name + "_info", {}))
+
+def show():
+ """
+ Show libraries in the system on which NumPy was built.
+
+ Print information about various resources (libraries, library
+ directories, include directories, etc.) in the system on which
+ NumPy was built.
+
+ See Also
+ --------
+ get_include : Returns the directory containing NumPy C
+ header files.
+
+ Notes
+ -----
+ 1. Classes specifying the information to be printed are defined
+ in the `numpy.distutils.system_info` module.
+
+ Information may include:
+
+ * ``language``: language used to write the libraries (mostly
+ C or f77)
+ * ``libraries``: names of libraries found in the system
+ * ``library_dirs``: directories containing the libraries
+ * ``include_dirs``: directories containing library header files
+ * ``src_dirs``: directories containing library source files
+ * ``define_macros``: preprocessor macros used by
+ ``distutils.setup``
+ * ``baseline``: minimum CPU features required
+ * ``found``: dispatched features supported in the system
+ * ``not found``: dispatched features that are not supported
+ in the system
+
+ 2. NumPy BLAS/LAPACK Installation Notes
+
+ Installing a numpy wheel (``pip install numpy`` or force it
+ via ``pip install numpy --only-binary :numpy: numpy``) includes
+ an OpenBLAS implementation of the BLAS and LAPACK linear algebra
+ APIs. In this case, ``library_dirs`` reports the original build
+ time configuration as compiled with gcc/gfortran; at run time
+ the OpenBLAS library is in
+ ``site-packages/numpy.libs/`` (linux), or
+ ``site-packages/numpy/.dylibs/`` (macOS), or
+ ``site-packages/numpy/.libs/`` (windows).
+
+ Installing numpy from source
+ (``pip install numpy --no-binary numpy``) searches for BLAS and
+ LAPACK dynamic link libraries at build time as influenced by
+ environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and
+ NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER;
+ or the optional file ``~/.numpy-site.cfg``.
+ NumPy remembers those locations and expects to load the same
+ libraries at run-time.
+ In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS
+ library) is in the default build-time search order after
+ 'openblas'.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.show_config()
+ blas_opt_info:
+ language = c
+ define_macros = [('HAVE_CBLAS', None)]
+ libraries = ['openblas', 'openblas']
+ library_dirs = ['/usr/local/lib']
+ """
+ from numpy.core._multiarray_umath import (
+ __cpu_features__, __cpu_baseline__, __cpu_dispatch__
+ )
+ for name,info_dict in globals().items():
+ if name[0] == "_" or type(info_dict) is not type({}): continue
+ print(name + ":")
+ if not info_dict:
+ print(" NOT AVAILABLE")
+ for k,v in info_dict.items():
+ v = str(v)
+ if k == "sources" and len(v) > 200:
+ v = v[:60] + " ...\n... " + v[-60:]
+ print(" %s = %s" % (k,v))
+
+ features_found, features_not_found = [], []
+ for feature in __cpu_dispatch__:
+ if __cpu_features__[feature]:
+ features_found.append(feature)
+ else:
+ features_not_found.append(feature)
+
+ print("Supported SIMD extensions in this NumPy install:")
+ print(" baseline = %s" % (','.join(__cpu_baseline__)))
+ print(" found = %s" % (','.join(features_found)))
+ print(" not found = %s" % (','.join(features_not_found)))
+
diff --git a/venv/lib/python3.9/site-packages/numpy/__init__.cython-30.pxd b/venv/lib/python3.9/site-packages/numpy/__init__.cython-30.pxd
new file mode 100644
index 00000000..5fd6086e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/__init__.cython-30.pxd
@@ -0,0 +1,1052 @@
+# NumPy static imports for Cython >= 3.0
+#
+# If any of the PyArray_* functions are called, import_array must be
+# called first. This is done automatically by Cython 3.0+ if a call
+# is not detected inside of the module.
+#
+# Author: Dag Sverre Seljebotn
+#
+
+from cpython.ref cimport Py_INCREF
+from cpython.object cimport PyObject, PyTypeObject, PyObject_TypeCheck
+cimport libc.stdio as stdio
+
+
+cdef extern from *:
+ # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython.
+ # See https://github.com/cython/cython/issues/3573
+ """
+ /* Using NumPy API declarations from "numpy/__init__.cython-30.pxd" */
+ """
+
+
+cdef extern from "Python.h":
+ ctypedef Py_ssize_t Py_intptr_t
+
+cdef extern from "numpy/arrayobject.h":
+ ctypedef Py_intptr_t npy_intp
+ ctypedef size_t npy_uintp
+
+ cdef enum NPY_TYPES:
+ NPY_BOOL
+ NPY_BYTE
+ NPY_UBYTE
+ NPY_SHORT
+ NPY_USHORT
+ NPY_INT
+ NPY_UINT
+ NPY_LONG
+ NPY_ULONG
+ NPY_LONGLONG
+ NPY_ULONGLONG
+ NPY_FLOAT
+ NPY_DOUBLE
+ NPY_LONGDOUBLE
+ NPY_CFLOAT
+ NPY_CDOUBLE
+ NPY_CLONGDOUBLE
+ NPY_OBJECT
+ NPY_STRING
+ NPY_UNICODE
+ NPY_VOID
+ NPY_DATETIME
+ NPY_TIMEDELTA
+ NPY_NTYPES
+ NPY_NOTYPE
+
+ NPY_INT8
+ NPY_INT16
+ NPY_INT32
+ NPY_INT64
+ NPY_INT128
+ NPY_INT256
+ NPY_UINT8
+ NPY_UINT16
+ NPY_UINT32
+ NPY_UINT64
+ NPY_UINT128
+ NPY_UINT256
+ NPY_FLOAT16
+ NPY_FLOAT32
+ NPY_FLOAT64
+ NPY_FLOAT80
+ NPY_FLOAT96
+ NPY_FLOAT128
+ NPY_FLOAT256
+ NPY_COMPLEX32
+ NPY_COMPLEX64
+ NPY_COMPLEX128
+ NPY_COMPLEX160
+ NPY_COMPLEX192
+ NPY_COMPLEX256
+ NPY_COMPLEX512
+
+ NPY_INTP
+
+ ctypedef enum NPY_ORDER:
+ NPY_ANYORDER
+ NPY_CORDER
+ NPY_FORTRANORDER
+ NPY_KEEPORDER
+
+ ctypedef enum NPY_CASTING:
+ NPY_NO_CASTING
+ NPY_EQUIV_CASTING
+ NPY_SAFE_CASTING
+ NPY_SAME_KIND_CASTING
+ NPY_UNSAFE_CASTING
+
+ ctypedef enum NPY_CLIPMODE:
+ NPY_CLIP
+ NPY_WRAP
+ NPY_RAISE
+
+ ctypedef enum NPY_SCALARKIND:
+ NPY_NOSCALAR,
+ NPY_BOOL_SCALAR,
+ NPY_INTPOS_SCALAR,
+ NPY_INTNEG_SCALAR,
+ NPY_FLOAT_SCALAR,
+ NPY_COMPLEX_SCALAR,
+ NPY_OBJECT_SCALAR
+
+ ctypedef enum NPY_SORTKIND:
+ NPY_QUICKSORT
+ NPY_HEAPSORT
+ NPY_MERGESORT
+
+ ctypedef enum NPY_SEARCHSIDE:
+ NPY_SEARCHLEFT
+ NPY_SEARCHRIGHT
+
+ enum:
+ # DEPRECATED since NumPy 1.7 ! Do not use in new code!
+ NPY_C_CONTIGUOUS
+ NPY_F_CONTIGUOUS
+ NPY_CONTIGUOUS
+ NPY_FORTRAN
+ NPY_OWNDATA
+ NPY_FORCECAST
+ NPY_ENSURECOPY
+ NPY_ENSUREARRAY
+ NPY_ELEMENTSTRIDES
+ NPY_ALIGNED
+ NPY_NOTSWAPPED
+ NPY_WRITEABLE
+ NPY_ARR_HAS_DESCR
+
+ NPY_BEHAVED
+ NPY_BEHAVED_NS
+ NPY_CARRAY
+ NPY_CARRAY_RO
+ NPY_FARRAY
+ NPY_FARRAY_RO
+ NPY_DEFAULT
+
+ NPY_IN_ARRAY
+ NPY_OUT_ARRAY
+ NPY_INOUT_ARRAY
+ NPY_IN_FARRAY
+ NPY_OUT_FARRAY
+ NPY_INOUT_FARRAY
+
+ NPY_UPDATE_ALL
+
+ enum:
+ # Added in NumPy 1.7 to replace the deprecated enums above.
+ NPY_ARRAY_C_CONTIGUOUS
+ NPY_ARRAY_F_CONTIGUOUS
+ NPY_ARRAY_OWNDATA
+ NPY_ARRAY_FORCECAST
+ NPY_ARRAY_ENSURECOPY
+ NPY_ARRAY_ENSUREARRAY
+ NPY_ARRAY_ELEMENTSTRIDES
+ NPY_ARRAY_ALIGNED
+ NPY_ARRAY_NOTSWAPPED
+ NPY_ARRAY_WRITEABLE
+ NPY_ARRAY_WRITEBACKIFCOPY
+
+ NPY_ARRAY_BEHAVED
+ NPY_ARRAY_BEHAVED_NS
+ NPY_ARRAY_CARRAY
+ NPY_ARRAY_CARRAY_RO
+ NPY_ARRAY_FARRAY
+ NPY_ARRAY_FARRAY_RO
+ NPY_ARRAY_DEFAULT
+
+ NPY_ARRAY_IN_ARRAY
+ NPY_ARRAY_OUT_ARRAY
+ NPY_ARRAY_INOUT_ARRAY
+ NPY_ARRAY_IN_FARRAY
+ NPY_ARRAY_OUT_FARRAY
+ NPY_ARRAY_INOUT_FARRAY
+
+ NPY_ARRAY_UPDATE_ALL
+
+ cdef enum:
+ NPY_MAXDIMS
+
+ npy_intp NPY_MAX_ELSIZE
+
+ ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *)
+
+ ctypedef struct PyArray_ArrayDescr:
+ # shape is a tuple, but Cython doesn't support "tuple shape"
+ # inside a non-PyObject declaration, so we have to declare it
+ # as just a PyObject*.
+ PyObject* shape
+
+ ctypedef struct PyArray_Descr:
+ pass
+
+ ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]:
+ # Use PyDataType_* macros when possible, however there are no macros
+ # for accessing some of the fields, so some are defined.
+ cdef PyTypeObject* typeobj
+ cdef char kind
+ cdef char type
+ # Numpy sometimes mutates this without warning (e.g. it'll
+ # sometimes change "|" to "<" in shared dtype objects on
+ # little-endian machines). If this matters to you, use
+ # PyArray_IsNativeByteOrder(dtype.byteorder) instead of
+ # directly accessing this field.
+ cdef char byteorder
+ cdef char flags
+ cdef int type_num
+ cdef int itemsize "elsize"
+ cdef int alignment
+ cdef object fields
+ cdef tuple names
+ # Use PyDataType_HASSUBARRAY to test whether this field is
+ # valid (the pointer can be NULL). Most users should access
+ # this field via the inline helper method PyDataType_SHAPE.
+ cdef PyArray_ArrayDescr* subarray
+
+ ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]:
+ # Use through macros
+ pass
+
+ ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]:
+ # Use through macros
+ pass
+
+ ctypedef struct PyArrayObject:
+ # For use in situations where ndarray can't replace PyArrayObject*,
+ # like PyArrayObject**.
+ pass
+
+ ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]:
+ cdef __cythonbufferdefaults__ = {"mode": "strided"}
+
+ # NOTE: no field declarations since direct access is deprecated since NumPy 1.7
+ # Instead, we use properties that map to the corresponding C-API functions.
+
+ @property
+ cdef inline PyObject* base(self) nogil:
+ """Returns a borrowed reference to the object owning the data/memory.
+ """
+ return PyArray_BASE(self)
+
+ @property
+ cdef inline dtype descr(self):
+ """Returns an owned reference to the dtype of the array.
+ """
+ return <dtype>PyArray_DESCR(self)
+
+ @property
+ cdef inline int ndim(self) nogil:
+ """Returns the number of dimensions in the array.
+ """
+ return PyArray_NDIM(self)
+
+ @property
+ cdef inline npy_intp *shape(self) nogil:
+ """Returns a pointer to the dimensions/shape of the array.
+ The number of elements matches the number of dimensions of the array (ndim).
+ Can return NULL for 0-dimensional arrays.
+ """
+ return PyArray_DIMS(self)
+
+ @property
+ cdef inline npy_intp *strides(self) nogil:
+ """Returns a pointer to the strides of the array.
+ The number of elements matches the number of dimensions of the array (ndim).
+ """
+ return PyArray_STRIDES(self)
+
+ @property
+ cdef inline npy_intp size(self) nogil:
+ """Returns the total size (in number of elements) of the array.
+ """
+ return PyArray_SIZE(self)
+
+ @property
+ cdef inline char* data(self) nogil:
+ """The pointer to the data buffer as a char*.
+ This is provided for legacy reasons to avoid direct struct field access.
+ For new code that needs this access, you probably want to cast the result
+ of `PyArray_DATA()` instead, which returns a 'void*'.
+ """
+ return PyArray_BYTES(self)
+
+ ctypedef unsigned char npy_bool
+
+ ctypedef signed char npy_byte
+ ctypedef signed short npy_short
+ ctypedef signed int npy_int
+ ctypedef signed long npy_long
+ ctypedef signed long long npy_longlong
+
+ ctypedef unsigned char npy_ubyte
+ ctypedef unsigned short npy_ushort
+ ctypedef unsigned int npy_uint
+ ctypedef unsigned long npy_ulong
+ ctypedef unsigned long long npy_ulonglong
+
+ ctypedef float npy_float
+ ctypedef double npy_double
+ ctypedef long double npy_longdouble
+
+ ctypedef signed char npy_int8
+ ctypedef signed short npy_int16
+ ctypedef signed int npy_int32
+ ctypedef signed long long npy_int64
+ ctypedef signed long long npy_int96
+ ctypedef signed long long npy_int128
+
+ ctypedef unsigned char npy_uint8
+ ctypedef unsigned short npy_uint16
+ ctypedef unsigned int npy_uint32
+ ctypedef unsigned long long npy_uint64
+ ctypedef unsigned long long npy_uint96
+ ctypedef unsigned long long npy_uint128
+
+ ctypedef float npy_float32
+ ctypedef double npy_float64
+ ctypedef long double npy_float80
+ ctypedef long double npy_float96
+ ctypedef long double npy_float128
+
+ ctypedef struct npy_cfloat:
+ float real
+ float imag
+
+ ctypedef struct npy_cdouble:
+ double real
+ double imag
+
+ ctypedef struct npy_clongdouble:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex64:
+ float real
+ float imag
+
+ ctypedef struct npy_complex128:
+ double real
+ double imag
+
+ ctypedef struct npy_complex160:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex192:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex256:
+ long double real
+ long double imag
+
+ ctypedef struct PyArray_Dims:
+ npy_intp *ptr
+ int len
+
+ int _import_array() except -1
+ # A second definition so _import_array isn't marked as used when we use it here.
+ # Do not use - subject to change any time.
+ int __pyx_import_array "_import_array"() except -1
+
+ #
+ # Macros from ndarrayobject.h
+ #
+ bint PyArray_CHKFLAGS(ndarray m, int flags) nogil
+ bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil
+ bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil
+ bint PyArray_ISCONTIGUOUS(ndarray m) nogil
+ bint PyArray_ISWRITEABLE(ndarray m) nogil
+ bint PyArray_ISALIGNED(ndarray m) nogil
+
+ int PyArray_NDIM(ndarray) nogil
+ bint PyArray_ISONESEGMENT(ndarray) nogil
+ bint PyArray_ISFORTRAN(ndarray) nogil
+ int PyArray_FORTRANIF(ndarray) nogil
+
+ void* PyArray_DATA(ndarray) nogil
+ char* PyArray_BYTES(ndarray) nogil
+
+ npy_intp* PyArray_DIMS(ndarray) nogil
+ npy_intp* PyArray_STRIDES(ndarray) nogil
+ npy_intp PyArray_DIM(ndarray, size_t) nogil
+ npy_intp PyArray_STRIDE(ndarray, size_t) nogil
+
+ PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference!
+ PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype!
+ PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr.
+ int PyArray_FLAGS(ndarray) nogil
+ void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7
+ void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7
+ npy_intp PyArray_ITEMSIZE(ndarray) nogil
+ int PyArray_TYPE(ndarray arr) nogil
+
+ object PyArray_GETITEM(ndarray arr, void *itemptr)
+ int PyArray_SETITEM(ndarray arr, void *itemptr, object obj)
+
+ bint PyTypeNum_ISBOOL(int) nogil
+ bint PyTypeNum_ISUNSIGNED(int) nogil
+ bint PyTypeNum_ISSIGNED(int) nogil
+ bint PyTypeNum_ISINTEGER(int) nogil
+ bint PyTypeNum_ISFLOAT(int) nogil
+ bint PyTypeNum_ISNUMBER(int) nogil
+ bint PyTypeNum_ISSTRING(int) nogil
+ bint PyTypeNum_ISCOMPLEX(int) nogil
+ bint PyTypeNum_ISPYTHON(int) nogil
+ bint PyTypeNum_ISFLEXIBLE(int) nogil
+ bint PyTypeNum_ISUSERDEF(int) nogil
+ bint PyTypeNum_ISEXTENDED(int) nogil
+ bint PyTypeNum_ISOBJECT(int) nogil
+
+ bint PyDataType_ISBOOL(dtype) nogil
+ bint PyDataType_ISUNSIGNED(dtype) nogil
+ bint PyDataType_ISSIGNED(dtype) nogil
+ bint PyDataType_ISINTEGER(dtype) nogil
+ bint PyDataType_ISFLOAT(dtype) nogil
+ bint PyDataType_ISNUMBER(dtype) nogil
+ bint PyDataType_ISSTRING(dtype) nogil
+ bint PyDataType_ISCOMPLEX(dtype) nogil
+ bint PyDataType_ISPYTHON(dtype) nogil
+ bint PyDataType_ISFLEXIBLE(dtype) nogil
+ bint PyDataType_ISUSERDEF(dtype) nogil
+ bint PyDataType_ISEXTENDED(dtype) nogil
+ bint PyDataType_ISOBJECT(dtype) nogil
+ bint PyDataType_HASFIELDS(dtype) nogil
+ bint PyDataType_HASSUBARRAY(dtype) nogil
+
+ bint PyArray_ISBOOL(ndarray) nogil
+ bint PyArray_ISUNSIGNED(ndarray) nogil
+ bint PyArray_ISSIGNED(ndarray) nogil
+ bint PyArray_ISINTEGER(ndarray) nogil
+ bint PyArray_ISFLOAT(ndarray) nogil
+ bint PyArray_ISNUMBER(ndarray) nogil
+ bint PyArray_ISSTRING(ndarray) nogil
+ bint PyArray_ISCOMPLEX(ndarray) nogil
+ bint PyArray_ISPYTHON(ndarray) nogil
+ bint PyArray_ISFLEXIBLE(ndarray) nogil
+ bint PyArray_ISUSERDEF(ndarray) nogil
+ bint PyArray_ISEXTENDED(ndarray) nogil
+ bint PyArray_ISOBJECT(ndarray) nogil
+ bint PyArray_HASFIELDS(ndarray) nogil
+
+ bint PyArray_ISVARIABLE(ndarray) nogil
+
+ bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil
+ bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder
+ bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder
+ bint PyArray_ISNOTSWAPPED(ndarray) nogil
+ bint PyArray_ISBYTESWAPPED(ndarray) nogil
+
+ bint PyArray_FLAGSWAP(ndarray, int) nogil
+
+ bint PyArray_ISCARRAY(ndarray) nogil
+ bint PyArray_ISCARRAY_RO(ndarray) nogil
+ bint PyArray_ISFARRAY(ndarray) nogil
+ bint PyArray_ISFARRAY_RO(ndarray) nogil
+ bint PyArray_ISBEHAVED(ndarray) nogil
+ bint PyArray_ISBEHAVED_RO(ndarray) nogil
+
+
+ bint PyDataType_ISNOTSWAPPED(dtype) nogil
+ bint PyDataType_ISBYTESWAPPED(dtype) nogil
+
+ bint PyArray_DescrCheck(object)
+
+ bint PyArray_Check(object)
+ bint PyArray_CheckExact(object)
+
+ # Cannot be supported due to out arg:
+ # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&)
+ # bint PyArray_HasArrayInterface(op, out)
+
+
+ bint PyArray_IsZeroDim(object)
+ # Cannot be supported due to ## ## in macro:
+ # bint PyArray_IsScalar(object, verbatim work)
+ bint PyArray_CheckScalar(object)
+ bint PyArray_IsPythonNumber(object)
+ bint PyArray_IsPythonScalar(object)
+ bint PyArray_IsAnyScalar(object)
+ bint PyArray_CheckAnyScalar(object)
+
+ ndarray PyArray_GETCONTIGUOUS(ndarray)
+ bint PyArray_SAMESHAPE(ndarray, ndarray) nogil
+ npy_intp PyArray_SIZE(ndarray) nogil
+ npy_intp PyArray_NBYTES(ndarray) nogil
+
+ object PyArray_FROM_O(object)
+ object PyArray_FROM_OF(object m, int flags)
+ object PyArray_FROM_OT(object m, int type)
+ object PyArray_FROM_OTF(object m, int type, int flags)
+ object PyArray_FROMANY(object m, int type, int min, int max, int flags)
+ object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran)
+ object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran)
+ void PyArray_FILLWBYTE(object, int val)
+ npy_intp PyArray_REFCOUNT(object)
+ object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth)
+ unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2)
+ bint PyArray_EquivByteorders(int b1, int b2) nogil
+ object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
+ object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data)
+ #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr)
+ object PyArray_ToScalar(void* data, ndarray arr)
+
+ void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil
+ void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil
+ void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil
+ void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil
+
+ void PyArray_XDECREF_ERR(ndarray)
+ # Cannot be supported due to out arg
+ # void PyArray_DESCR_REPLACE(descr)
+
+
+ object PyArray_Copy(ndarray)
+ object PyArray_FromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth)
+
+ object PyArray_Cast(ndarray mp, int type_num)
+ object PyArray_Take(ndarray ap, object items, int axis)
+ object PyArray_Put(ndarray ap, object items, object values)
+
+ void PyArray_ITER_RESET(flatiter it) nogil
+ void PyArray_ITER_NEXT(flatiter it) nogil
+ void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil
+ void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil
+ void* PyArray_ITER_DATA(flatiter it) nogil
+ bint PyArray_ITER_NOTDONE(flatiter it) nogil
+
+ void PyArray_MultiIter_RESET(broadcast multi) nogil
+ void PyArray_MultiIter_NEXT(broadcast multi) nogil
+ void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil
+ void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil
+ void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil
+ void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil
+ bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil
+
+ # Functions from __multiarray_api.h
+
+ # Functions taking dtype and returning object/ndarray are disabled
+ # for now as they steal dtype references. I'm conservative and disable
+ # more than is probably needed until it can be checked further.
+ int PyArray_SetNumericOps (object)
+ object PyArray_GetNumericOps ()
+ int PyArray_INCREF (ndarray)
+ int PyArray_XDECREF (ndarray)
+ void PyArray_SetStringFunction (object, int)
+ dtype PyArray_DescrFromType (int)
+ object PyArray_TypeObjectFromType (int)
+ char * PyArray_Zero (ndarray)
+ char * PyArray_One (ndarray)
+ #object PyArray_CastToType (ndarray, dtype, int)
+ int PyArray_CastTo (ndarray, ndarray)
+ int PyArray_CastAnyTo (ndarray, ndarray)
+ int PyArray_CanCastSafely (int, int)
+ npy_bool PyArray_CanCastTo (dtype, dtype)
+ int PyArray_ObjectType (object, int)
+ dtype PyArray_DescrFromObject (object, dtype)
+ #ndarray* PyArray_ConvertToCommonType (object, int *)
+ dtype PyArray_DescrFromScalar (object)
+ dtype PyArray_DescrFromTypeObject (object)
+ npy_intp PyArray_Size (object)
+ #object PyArray_Scalar (void *, dtype, object)
+ #object PyArray_FromScalar (object, dtype)
+ void PyArray_ScalarAsCtype (object, void *)
+ #int PyArray_CastScalarToCtype (object, void *, dtype)
+ #int PyArray_CastScalarDirect (object, dtype, void *, int)
+ object PyArray_ScalarFromObject (object)
+ #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int)
+ object PyArray_FromDims (int, int *, int)
+ #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *)
+ #object PyArray_FromAny (object, dtype, int, int, int, object)
+ object PyArray_EnsureArray (object)
+ object PyArray_EnsureAnyArray (object)
+ #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *)
+ #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *)
+ #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp)
+ #object PyArray_FromIter (object, dtype, npy_intp)
+ object PyArray_Return (ndarray)
+ #object PyArray_GetField (ndarray, dtype, int)
+ #int PyArray_SetField (ndarray, dtype, int, object)
+ object PyArray_Byteswap (ndarray, npy_bool)
+ object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER)
+ int PyArray_MoveInto (ndarray, ndarray)
+ int PyArray_CopyInto (ndarray, ndarray)
+ int PyArray_CopyAnyInto (ndarray, ndarray)
+ int PyArray_CopyObject (ndarray, object)
+ object PyArray_NewCopy (ndarray, NPY_ORDER)
+ object PyArray_ToList (ndarray)
+ object PyArray_ToString (ndarray, NPY_ORDER)
+ int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *)
+ int PyArray_Dump (object, object, int)
+ object PyArray_Dumps (object, int)
+ int PyArray_ValidType (int)
+ void PyArray_UpdateFlags (ndarray, int)
+ object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object)
+ #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object)
+ #dtype PyArray_DescrNew (dtype)
+ dtype PyArray_DescrNewFromType (int)
+ double PyArray_GetPriority (object, double)
+ object PyArray_IterNew (object)
+ object PyArray_MultiIterNew (int, ...)
+
+ int PyArray_PyIntAsInt (object)
+ npy_intp PyArray_PyIntAsIntp (object)
+ int PyArray_Broadcast (broadcast)
+ void PyArray_FillObjectArray (ndarray, object)
+ int PyArray_FillWithScalar (ndarray, object)
+ npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)
+ dtype PyArray_DescrNewByteorder (dtype, char)
+ object PyArray_IterAllButAxis (object, int *)
+ #object PyArray_CheckFromAny (object, dtype, int, int, int, object)
+ #object PyArray_FromArray (ndarray, dtype, int)
+ object PyArray_FromInterface (object)
+ object PyArray_FromStructInterface (object)
+ #object PyArray_FromArrayAttr (object, dtype, object)
+ #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*)
+ int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND)
+ object PyArray_NewFlagsObject (object)
+ npy_bool PyArray_CanCastScalar (type, type)
+ #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t)
+ int PyArray_RemoveSmallest (broadcast)
+ int PyArray_ElementStrides (object)
+ void PyArray_Item_INCREF (char *, dtype)
+ void PyArray_Item_XDECREF (char *, dtype)
+ object PyArray_FieldNames (object)
+ object PyArray_Transpose (ndarray, PyArray_Dims *)
+ object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE)
+ object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE)
+ object PyArray_PutMask (ndarray, object, object)
+ object PyArray_Repeat (ndarray, object, int)
+ object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE)
+ int PyArray_Sort (ndarray, int, NPY_SORTKIND)
+ object PyArray_ArgSort (ndarray, int, NPY_SORTKIND)
+ object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *)
+ object PyArray_ArgMax (ndarray, int, ndarray)
+ object PyArray_ArgMin (ndarray, int, ndarray)
+ object PyArray_Reshape (ndarray, object)
+ object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER)
+ object PyArray_Squeeze (ndarray)
+ #object PyArray_View (ndarray, dtype, type)
+ object PyArray_SwapAxes (ndarray, int, int)
+ object PyArray_Max (ndarray, int, ndarray)
+ object PyArray_Min (ndarray, int, ndarray)
+ object PyArray_Ptp (ndarray, int, ndarray)
+ object PyArray_Mean (ndarray, int, int, ndarray)
+ object PyArray_Trace (ndarray, int, int, int, int, ndarray)
+ object PyArray_Diagonal (ndarray, int, int, int)
+ object PyArray_Clip (ndarray, object, object, ndarray)
+ object PyArray_Conjugate (ndarray, ndarray)
+ object PyArray_Nonzero (ndarray)
+ object PyArray_Std (ndarray, int, int, ndarray, int)
+ object PyArray_Sum (ndarray, int, int, ndarray)
+ object PyArray_CumSum (ndarray, int, int, ndarray)
+ object PyArray_Prod (ndarray, int, int, ndarray)
+ object PyArray_CumProd (ndarray, int, int, ndarray)
+ object PyArray_All (ndarray, int, ndarray)
+ object PyArray_Any (ndarray, int, ndarray)
+ object PyArray_Compress (ndarray, object, int, ndarray)
+ object PyArray_Flatten (ndarray, NPY_ORDER)
+ object PyArray_Ravel (ndarray, NPY_ORDER)
+ npy_intp PyArray_MultiplyList (npy_intp *, int)
+ int PyArray_MultiplyIntList (int *, int)
+ void * PyArray_GetPtr (ndarray, npy_intp*)
+ int PyArray_CompareLists (npy_intp *, npy_intp *, int)
+ #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype)
+ #int PyArray_As1D (object*, char **, int *, int)
+ #int PyArray_As2D (object*, char ***, int *, int *, int)
+ int PyArray_Free (object, void *)
+ #int PyArray_Converter (object, object*)
+ int PyArray_IntpFromSequence (object, npy_intp *, int)
+ object PyArray_Concatenate (object, int)
+ object PyArray_InnerProduct (object, object)
+ object PyArray_MatrixProduct (object, object)
+ object PyArray_CopyAndTranspose (object)
+ object PyArray_Correlate (object, object, int)
+ int PyArray_TypestrConvert (int, int)
+ #int PyArray_DescrConverter (object, dtype*)
+ #int PyArray_DescrConverter2 (object, dtype*)
+ int PyArray_IntpConverter (object, PyArray_Dims *)
+ #int PyArray_BufferConverter (object, chunk)
+ int PyArray_AxisConverter (object, int *)
+ int PyArray_BoolConverter (object, npy_bool *)
+ int PyArray_ByteorderConverter (object, char *)
+ int PyArray_OrderConverter (object, NPY_ORDER *)
+ unsigned char PyArray_EquivTypes (dtype, dtype)
+ #object PyArray_Zeros (int, npy_intp *, dtype, int)
+ #object PyArray_Empty (int, npy_intp *, dtype, int)
+ object PyArray_Where (object, object, object)
+ object PyArray_Arange (double, double, double, int)
+ #object PyArray_ArangeObj (object, object, object, dtype)
+ int PyArray_SortkindConverter (object, NPY_SORTKIND *)
+ object PyArray_LexSort (object, int)
+ object PyArray_Round (ndarray, int, ndarray)
+ unsigned char PyArray_EquivTypenums (int, int)
+ int PyArray_RegisterDataType (dtype)
+ int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *)
+ int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND)
+ #void PyArray_InitArrFuncs (PyArray_ArrFuncs *)
+ object PyArray_IntTupleFromIntp (int, npy_intp *)
+ int PyArray_TypeNumFromName (char *)
+ int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *)
+ #int PyArray_OutputConverter (object, ndarray*)
+ object PyArray_BroadcastToShape (object, npy_intp *, int)
+ void _PyArray_SigintHandler (int)
+ void* _PyArray_GetSigintBuf ()
+ #int PyArray_DescrAlignConverter (object, dtype*)
+ #int PyArray_DescrAlignConverter2 (object, dtype*)
+ int PyArray_SearchsideConverter (object, void *)
+ object PyArray_CheckAxis (ndarray, int *, int)
+ npy_intp PyArray_OverflowMultiplyList (npy_intp *, int)
+ int PyArray_CompareString (char *, char *, size_t)
+ int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead.
+
+
+# Typedefs that matches the runtime dtype objects in
+# the numpy module.
+
+# The ones that are commented out needs an IFDEF function
+# in Cython to enable them only on the right systems.
+
+ctypedef npy_int8 int8_t
+ctypedef npy_int16 int16_t
+ctypedef npy_int32 int32_t
+ctypedef npy_int64 int64_t
+#ctypedef npy_int96 int96_t
+#ctypedef npy_int128 int128_t
+
+ctypedef npy_uint8 uint8_t
+ctypedef npy_uint16 uint16_t
+ctypedef npy_uint32 uint32_t
+ctypedef npy_uint64 uint64_t
+#ctypedef npy_uint96 uint96_t
+#ctypedef npy_uint128 uint128_t
+
+ctypedef npy_float32 float32_t
+ctypedef npy_float64 float64_t
+#ctypedef npy_float80 float80_t
+#ctypedef npy_float128 float128_t
+
+ctypedef float complex complex64_t
+ctypedef double complex complex128_t
+
+# The int types are mapped a bit surprising --
+# numpy.int corresponds to 'l' and numpy.long to 'q'
+ctypedef npy_long int_t
+ctypedef npy_longlong long_t
+ctypedef npy_longlong longlong_t
+
+ctypedef npy_ulong uint_t
+ctypedef npy_ulonglong ulong_t
+ctypedef npy_ulonglong ulonglong_t
+
+ctypedef npy_intp intp_t
+ctypedef npy_uintp uintp_t
+
+ctypedef npy_double float_t
+ctypedef npy_double double_t
+ctypedef npy_longdouble longdouble_t
+
+ctypedef npy_cfloat cfloat_t
+ctypedef npy_cdouble cdouble_t
+ctypedef npy_clongdouble clongdouble_t
+
+ctypedef npy_cdouble complex_t
+
+cdef inline object PyArray_MultiIterNew1(a):
+ return PyArray_MultiIterNew(1, <void*>a)
+
+cdef inline object PyArray_MultiIterNew2(a, b):
+ return PyArray_MultiIterNew(2, <void*>a, <void*>b)
+
+cdef inline object PyArray_MultiIterNew3(a, b, c):
+ return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
+
+cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
+
+cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
+
+cdef inline tuple PyDataType_SHAPE(dtype d):
+ if PyDataType_HASSUBARRAY(d):
+ return <tuple>d.subarray.shape
+ else:
+ return ()
+
+
+cdef extern from "numpy/ndarrayobject.h":
+ PyTypeObject PyTimedeltaArrType_Type
+ PyTypeObject PyDatetimeArrType_Type
+ ctypedef int64_t npy_timedelta
+ ctypedef int64_t npy_datetime
+
+cdef extern from "numpy/ndarraytypes.h":
+ ctypedef struct PyArray_DatetimeMetaData:
+ NPY_DATETIMEUNIT base
+ int64_t num
+
+cdef extern from "numpy/arrayscalars.h":
+
+ # abstract types
+ ctypedef class numpy.generic [object PyObject]:
+ pass
+ ctypedef class numpy.number [object PyObject]:
+ pass
+ ctypedef class numpy.integer [object PyObject]:
+ pass
+ ctypedef class numpy.signedinteger [object PyObject]:
+ pass
+ ctypedef class numpy.unsignedinteger [object PyObject]:
+ pass
+ ctypedef class numpy.inexact [object PyObject]:
+ pass
+ ctypedef class numpy.floating [object PyObject]:
+ pass
+ ctypedef class numpy.complexfloating [object PyObject]:
+ pass
+ ctypedef class numpy.flexible [object PyObject]:
+ pass
+ ctypedef class numpy.character [object PyObject]:
+ pass
+
+ ctypedef struct PyDatetimeScalarObject:
+ # PyObject_HEAD
+ npy_datetime obval
+ PyArray_DatetimeMetaData obmeta
+
+ ctypedef struct PyTimedeltaScalarObject:
+ # PyObject_HEAD
+ npy_timedelta obval
+ PyArray_DatetimeMetaData obmeta
+
+ ctypedef enum NPY_DATETIMEUNIT:
+ NPY_FR_Y
+ NPY_FR_M
+ NPY_FR_W
+ NPY_FR_D
+ NPY_FR_B
+ NPY_FR_h
+ NPY_FR_m
+ NPY_FR_s
+ NPY_FR_ms
+ NPY_FR_us
+ NPY_FR_ns
+ NPY_FR_ps
+ NPY_FR_fs
+ NPY_FR_as
+
+
+#
+# ufunc API
+#
+
+cdef extern from "numpy/ufuncobject.h":
+
+ ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *)
+
+ ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]:
+ cdef:
+ int nin, nout, nargs
+ int identity
+ PyUFuncGenericFunction *functions
+ void **data
+ int ntypes
+ int check_return
+ char *name
+ char *types
+ char *doc
+ void *ptr
+ PyObject *obj
+ PyObject *userloops
+
+ cdef enum:
+ PyUFunc_Zero
+ PyUFunc_One
+ PyUFunc_None
+ UFUNC_ERR_IGNORE
+ UFUNC_ERR_WARN
+ UFUNC_ERR_RAISE
+ UFUNC_ERR_CALL
+ UFUNC_ERR_PRINT
+ UFUNC_ERR_LOG
+ UFUNC_MASK_DIVIDEBYZERO
+ UFUNC_MASK_OVERFLOW
+ UFUNC_MASK_UNDERFLOW
+ UFUNC_MASK_INVALID
+ UFUNC_SHIFT_DIVIDEBYZERO
+ UFUNC_SHIFT_OVERFLOW
+ UFUNC_SHIFT_UNDERFLOW
+ UFUNC_SHIFT_INVALID
+ UFUNC_FPE_DIVIDEBYZERO
+ UFUNC_FPE_OVERFLOW
+ UFUNC_FPE_UNDERFLOW
+ UFUNC_FPE_INVALID
+ UFUNC_ERR_DEFAULT
+ UFUNC_ERR_DEFAULT2
+
+ object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *,
+ void **, char *, int, int, int, int, char *, char *, int)
+ int PyUFunc_RegisterLoopForType(ufunc, int,
+ PyUFuncGenericFunction, int *, void *)
+ void PyUFunc_f_f_As_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_f_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_g_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F_As_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_G_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f_As_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_gg_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F_As_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_GG_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_On_Om \
+ (char **, npy_intp *, npy_intp *, void *)
+ int PyUFunc_GetPyValues \
+ (char *, int *, int *, PyObject **)
+ int PyUFunc_checkfperr \
+ (int, PyObject *, int *)
+ void PyUFunc_clearfperr()
+ int PyUFunc_getfperr()
+ int PyUFunc_handlefperr \
+ (int, PyObject *, int, int *)
+ int PyUFunc_ReplaceLoopBySignature \
+ (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)
+ object PyUFunc_FromFuncAndDataAndSignature \
+ (PyUFuncGenericFunction *, void **, char *, int, int, int,
+ int, char *, char *, int, char *)
+
+ int _import_umath() except -1
+
+cdef inline void set_array_base(ndarray arr, object base):
+ Py_INCREF(base) # important to do this before stealing the reference below!
+ PyArray_SetBaseObject(arr, base)
+
+cdef inline object get_array_base(ndarray arr):
+ base = PyArray_BASE(arr)
+ if base is NULL:
+ return None
+ return <object>base
+
+# Versions of the import_* functions which are more suitable for
+# Cython code.
+cdef inline int import_array() except -1:
+ try:
+ __pyx_import_array()
+ except Exception:
+ raise ImportError("numpy.core.multiarray failed to import")
+
+cdef inline int import_umath() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy.core.umath failed to import")
+
+cdef inline int import_ufunc() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy.core.umath failed to import")
+
+
+cdef inline bint is_timedelta64_object(object obj):
+ """
+ Cython equivalent of `isinstance(obj, np.timedelta64)`
+
+ Parameters
+ ----------
+ obj : object
+
+ Returns
+ -------
+ bool
+ """
+ return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type)
+
+
+cdef inline bint is_datetime64_object(object obj):
+ """
+ Cython equivalent of `isinstance(obj, np.datetime64)`
+
+ Parameters
+ ----------
+ obj : object
+
+ Returns
+ -------
+ bool
+ """
+ return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type)
+
+
+cdef inline npy_datetime get_datetime64_value(object obj) nogil:
+ """
+ returns the int64 value underlying scalar numpy datetime64 object
+
+ Note that to interpret this as a datetime, the corresponding unit is
+ also needed. That can be found using `get_datetime64_unit`.
+ """
+ return (<PyDatetimeScalarObject*>obj).obval
+
+
+cdef inline npy_timedelta get_timedelta64_value(object obj) nogil:
+ """
+ returns the int64 value underlying scalar numpy timedelta64 object
+ """
+ return (<PyTimedeltaScalarObject*>obj).obval
+
+
+cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil:
+ """
+ returns the unit part of the dtype for a numpy datetime64 object.
+ """
+ return <NPY_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base
diff --git a/venv/lib/python3.9/site-packages/numpy/__init__.pxd b/venv/lib/python3.9/site-packages/numpy/__init__.pxd
new file mode 100644
index 00000000..03db9a0c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/__init__.pxd
@@ -0,0 +1,1017 @@
+# NumPy static imports for Cython < 3.0
+#
+# If any of the PyArray_* functions are called, import_array must be
+# called first.
+#
+# Author: Dag Sverre Seljebotn
+#
+
+DEF _buffer_format_string_len = 255
+
+cimport cpython.buffer as pybuf
+from cpython.ref cimport Py_INCREF
+from cpython.mem cimport PyObject_Malloc, PyObject_Free
+from cpython.object cimport PyObject, PyTypeObject
+from cpython.buffer cimport PyObject_GetBuffer
+from cpython.type cimport type
+cimport libc.stdio as stdio
+
+cdef extern from "Python.h":
+ ctypedef int Py_intptr_t
+ bint PyObject_TypeCheck(object obj, PyTypeObject* type)
+
+cdef extern from "numpy/arrayobject.h":
+ ctypedef Py_intptr_t npy_intp
+ ctypedef size_t npy_uintp
+
+ cdef enum NPY_TYPES:
+ NPY_BOOL
+ NPY_BYTE
+ NPY_UBYTE
+ NPY_SHORT
+ NPY_USHORT
+ NPY_INT
+ NPY_UINT
+ NPY_LONG
+ NPY_ULONG
+ NPY_LONGLONG
+ NPY_ULONGLONG
+ NPY_FLOAT
+ NPY_DOUBLE
+ NPY_LONGDOUBLE
+ NPY_CFLOAT
+ NPY_CDOUBLE
+ NPY_CLONGDOUBLE
+ NPY_OBJECT
+ NPY_STRING
+ NPY_UNICODE
+ NPY_VOID
+ NPY_DATETIME
+ NPY_TIMEDELTA
+ NPY_NTYPES
+ NPY_NOTYPE
+
+ NPY_INT8
+ NPY_INT16
+ NPY_INT32
+ NPY_INT64
+ NPY_INT128
+ NPY_INT256
+ NPY_UINT8
+ NPY_UINT16
+ NPY_UINT32
+ NPY_UINT64
+ NPY_UINT128
+ NPY_UINT256
+ NPY_FLOAT16
+ NPY_FLOAT32
+ NPY_FLOAT64
+ NPY_FLOAT80
+ NPY_FLOAT96
+ NPY_FLOAT128
+ NPY_FLOAT256
+ NPY_COMPLEX32
+ NPY_COMPLEX64
+ NPY_COMPLEX128
+ NPY_COMPLEX160
+ NPY_COMPLEX192
+ NPY_COMPLEX256
+ NPY_COMPLEX512
+
+ NPY_INTP
+
+ ctypedef enum NPY_ORDER:
+ NPY_ANYORDER
+ NPY_CORDER
+ NPY_FORTRANORDER
+ NPY_KEEPORDER
+
+ ctypedef enum NPY_CASTING:
+ NPY_NO_CASTING
+ NPY_EQUIV_CASTING
+ NPY_SAFE_CASTING
+ NPY_SAME_KIND_CASTING
+ NPY_UNSAFE_CASTING
+
+ ctypedef enum NPY_CLIPMODE:
+ NPY_CLIP
+ NPY_WRAP
+ NPY_RAISE
+
+ ctypedef enum NPY_SCALARKIND:
+ NPY_NOSCALAR,
+ NPY_BOOL_SCALAR,
+ NPY_INTPOS_SCALAR,
+ NPY_INTNEG_SCALAR,
+ NPY_FLOAT_SCALAR,
+ NPY_COMPLEX_SCALAR,
+ NPY_OBJECT_SCALAR
+
+ ctypedef enum NPY_SORTKIND:
+ NPY_QUICKSORT
+ NPY_HEAPSORT
+ NPY_MERGESORT
+
+ ctypedef enum NPY_SEARCHSIDE:
+ NPY_SEARCHLEFT
+ NPY_SEARCHRIGHT
+
+ enum:
+ # DEPRECATED since NumPy 1.7 ! Do not use in new code!
+ NPY_C_CONTIGUOUS
+ NPY_F_CONTIGUOUS
+ NPY_CONTIGUOUS
+ NPY_FORTRAN
+ NPY_OWNDATA
+ NPY_FORCECAST
+ NPY_ENSURECOPY
+ NPY_ENSUREARRAY
+ NPY_ELEMENTSTRIDES
+ NPY_ALIGNED
+ NPY_NOTSWAPPED
+ NPY_WRITEABLE
+ NPY_ARR_HAS_DESCR
+
+ NPY_BEHAVED
+ NPY_BEHAVED_NS
+ NPY_CARRAY
+ NPY_CARRAY_RO
+ NPY_FARRAY
+ NPY_FARRAY_RO
+ NPY_DEFAULT
+
+ NPY_IN_ARRAY
+ NPY_OUT_ARRAY
+ NPY_INOUT_ARRAY
+ NPY_IN_FARRAY
+ NPY_OUT_FARRAY
+ NPY_INOUT_FARRAY
+
+ NPY_UPDATE_ALL
+
+ enum:
+ # Added in NumPy 1.7 to replace the deprecated enums above.
+ NPY_ARRAY_C_CONTIGUOUS
+ NPY_ARRAY_F_CONTIGUOUS
+ NPY_ARRAY_OWNDATA
+ NPY_ARRAY_FORCECAST
+ NPY_ARRAY_ENSURECOPY
+ NPY_ARRAY_ENSUREARRAY
+ NPY_ARRAY_ELEMENTSTRIDES
+ NPY_ARRAY_ALIGNED
+ NPY_ARRAY_NOTSWAPPED
+ NPY_ARRAY_WRITEABLE
+ NPY_ARRAY_WRITEBACKIFCOPY
+
+ NPY_ARRAY_BEHAVED
+ NPY_ARRAY_BEHAVED_NS
+ NPY_ARRAY_CARRAY
+ NPY_ARRAY_CARRAY_RO
+ NPY_ARRAY_FARRAY
+ NPY_ARRAY_FARRAY_RO
+ NPY_ARRAY_DEFAULT
+
+ NPY_ARRAY_IN_ARRAY
+ NPY_ARRAY_OUT_ARRAY
+ NPY_ARRAY_INOUT_ARRAY
+ NPY_ARRAY_IN_FARRAY
+ NPY_ARRAY_OUT_FARRAY
+ NPY_ARRAY_INOUT_FARRAY
+
+ NPY_ARRAY_UPDATE_ALL
+
+ cdef enum:
+ NPY_MAXDIMS
+
+ npy_intp NPY_MAX_ELSIZE
+
+ ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *)
+
+ ctypedef struct PyArray_ArrayDescr:
+ # shape is a tuple, but Cython doesn't support "tuple shape"
+ # inside a non-PyObject declaration, so we have to declare it
+ # as just a PyObject*.
+ PyObject* shape
+
+ ctypedef struct PyArray_Descr:
+ pass
+
+ ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]:
+ # Use PyDataType_* macros when possible, however there are no macros
+ # for accessing some of the fields, so some are defined.
+ cdef PyTypeObject* typeobj
+ cdef char kind
+ cdef char type
+ # Numpy sometimes mutates this without warning (e.g. it'll
+ # sometimes change "|" to "<" in shared dtype objects on
+ # little-endian machines). If this matters to you, use
+ # PyArray_IsNativeByteOrder(dtype.byteorder) instead of
+ # directly accessing this field.
+ cdef char byteorder
+ cdef char flags
+ cdef int type_num
+ cdef int itemsize "elsize"
+ cdef int alignment
+ cdef object fields
+ cdef tuple names
+ # Use PyDataType_HASSUBARRAY to test whether this field is
+ # valid (the pointer can be NULL). Most users should access
+ # this field via the inline helper method PyDataType_SHAPE.
+ cdef PyArray_ArrayDescr* subarray
+
+ ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]:
+ # Use through macros
+ pass
+
+ ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]:
+ cdef int numiter
+ cdef npy_intp size, index
+ cdef int nd
+ cdef npy_intp *dimensions
+ cdef void **iters
+
+ ctypedef struct PyArrayObject:
+ # For use in situations where ndarray can't replace PyArrayObject*,
+ # like PyArrayObject**.
+ pass
+
+ ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]:
+ cdef __cythonbufferdefaults__ = {"mode": "strided"}
+
+ cdef:
+ # Only taking a few of the most commonly used and stable fields.
+ # One should use PyArray_* macros instead to access the C fields.
+ char *data
+ int ndim "nd"
+ npy_intp *shape "dimensions"
+ npy_intp *strides
+ dtype descr # deprecated since NumPy 1.7 !
+ PyObject* base # NOT PUBLIC, DO NOT USE !
+
+
+
+ ctypedef unsigned char npy_bool
+
+ ctypedef signed char npy_byte
+ ctypedef signed short npy_short
+ ctypedef signed int npy_int
+ ctypedef signed long npy_long
+ ctypedef signed long long npy_longlong
+
+ ctypedef unsigned char npy_ubyte
+ ctypedef unsigned short npy_ushort
+ ctypedef unsigned int npy_uint
+ ctypedef unsigned long npy_ulong
+ ctypedef unsigned long long npy_ulonglong
+
+ ctypedef float npy_float
+ ctypedef double npy_double
+ ctypedef long double npy_longdouble
+
+ ctypedef signed char npy_int8
+ ctypedef signed short npy_int16
+ ctypedef signed int npy_int32
+ ctypedef signed long long npy_int64
+ ctypedef signed long long npy_int96
+ ctypedef signed long long npy_int128
+
+ ctypedef unsigned char npy_uint8
+ ctypedef unsigned short npy_uint16
+ ctypedef unsigned int npy_uint32
+ ctypedef unsigned long long npy_uint64
+ ctypedef unsigned long long npy_uint96
+ ctypedef unsigned long long npy_uint128
+
+ ctypedef float npy_float32
+ ctypedef double npy_float64
+ ctypedef long double npy_float80
+ ctypedef long double npy_float96
+ ctypedef long double npy_float128
+
+ ctypedef struct npy_cfloat:
+ float real
+ float imag
+
+ ctypedef struct npy_cdouble:
+ double real
+ double imag
+
+ ctypedef struct npy_clongdouble:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex64:
+ float real
+ float imag
+
+ ctypedef struct npy_complex128:
+ double real
+ double imag
+
+ ctypedef struct npy_complex160:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex192:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex256:
+ long double real
+ long double imag
+
+ ctypedef struct PyArray_Dims:
+ npy_intp *ptr
+ int len
+
+ int _import_array() except -1
+ # A second definition so _import_array isn't marked as used when we use it here.
+ # Do not use - subject to change any time.
+ int __pyx_import_array "_import_array"() except -1
+
+ #
+ # Macros from ndarrayobject.h
+ #
+ bint PyArray_CHKFLAGS(ndarray m, int flags) nogil
+ bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil
+ bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil
+ bint PyArray_ISCONTIGUOUS(ndarray m) nogil
+ bint PyArray_ISWRITEABLE(ndarray m) nogil
+ bint PyArray_ISALIGNED(ndarray m) nogil
+
+ int PyArray_NDIM(ndarray) nogil
+ bint PyArray_ISONESEGMENT(ndarray) nogil
+ bint PyArray_ISFORTRAN(ndarray) nogil
+ int PyArray_FORTRANIF(ndarray) nogil
+
+ void* PyArray_DATA(ndarray) nogil
+ char* PyArray_BYTES(ndarray) nogil
+
+ npy_intp* PyArray_DIMS(ndarray) nogil
+ npy_intp* PyArray_STRIDES(ndarray) nogil
+ npy_intp PyArray_DIM(ndarray, size_t) nogil
+ npy_intp PyArray_STRIDE(ndarray, size_t) nogil
+
+ PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference!
+ PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype!
+ int PyArray_FLAGS(ndarray) nogil
+ npy_intp PyArray_ITEMSIZE(ndarray) nogil
+ int PyArray_TYPE(ndarray arr) nogil
+
+ object PyArray_GETITEM(ndarray arr, void *itemptr)
+ int PyArray_SETITEM(ndarray arr, void *itemptr, object obj)
+
+ bint PyTypeNum_ISBOOL(int) nogil
+ bint PyTypeNum_ISUNSIGNED(int) nogil
+ bint PyTypeNum_ISSIGNED(int) nogil
+ bint PyTypeNum_ISINTEGER(int) nogil
+ bint PyTypeNum_ISFLOAT(int) nogil
+ bint PyTypeNum_ISNUMBER(int) nogil
+ bint PyTypeNum_ISSTRING(int) nogil
+ bint PyTypeNum_ISCOMPLEX(int) nogil
+ bint PyTypeNum_ISPYTHON(int) nogil
+ bint PyTypeNum_ISFLEXIBLE(int) nogil
+ bint PyTypeNum_ISUSERDEF(int) nogil
+ bint PyTypeNum_ISEXTENDED(int) nogil
+ bint PyTypeNum_ISOBJECT(int) nogil
+
+ bint PyDataType_ISBOOL(dtype) nogil
+ bint PyDataType_ISUNSIGNED(dtype) nogil
+ bint PyDataType_ISSIGNED(dtype) nogil
+ bint PyDataType_ISINTEGER(dtype) nogil
+ bint PyDataType_ISFLOAT(dtype) nogil
+ bint PyDataType_ISNUMBER(dtype) nogil
+ bint PyDataType_ISSTRING(dtype) nogil
+ bint PyDataType_ISCOMPLEX(dtype) nogil
+ bint PyDataType_ISPYTHON(dtype) nogil
+ bint PyDataType_ISFLEXIBLE(dtype) nogil
+ bint PyDataType_ISUSERDEF(dtype) nogil
+ bint PyDataType_ISEXTENDED(dtype) nogil
+ bint PyDataType_ISOBJECT(dtype) nogil
+ bint PyDataType_HASFIELDS(dtype) nogil
+ bint PyDataType_HASSUBARRAY(dtype) nogil
+
+ bint PyArray_ISBOOL(ndarray) nogil
+ bint PyArray_ISUNSIGNED(ndarray) nogil
+ bint PyArray_ISSIGNED(ndarray) nogil
+ bint PyArray_ISINTEGER(ndarray) nogil
+ bint PyArray_ISFLOAT(ndarray) nogil
+ bint PyArray_ISNUMBER(ndarray) nogil
+ bint PyArray_ISSTRING(ndarray) nogil
+ bint PyArray_ISCOMPLEX(ndarray) nogil
+ bint PyArray_ISPYTHON(ndarray) nogil
+ bint PyArray_ISFLEXIBLE(ndarray) nogil
+ bint PyArray_ISUSERDEF(ndarray) nogil
+ bint PyArray_ISEXTENDED(ndarray) nogil
+ bint PyArray_ISOBJECT(ndarray) nogil
+ bint PyArray_HASFIELDS(ndarray) nogil
+
+ bint PyArray_ISVARIABLE(ndarray) nogil
+
+ bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil
+ bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder
+ bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder
+ bint PyArray_ISNOTSWAPPED(ndarray) nogil
+ bint PyArray_ISBYTESWAPPED(ndarray) nogil
+
+ bint PyArray_FLAGSWAP(ndarray, int) nogil
+
+ bint PyArray_ISCARRAY(ndarray) nogil
+ bint PyArray_ISCARRAY_RO(ndarray) nogil
+ bint PyArray_ISFARRAY(ndarray) nogil
+ bint PyArray_ISFARRAY_RO(ndarray) nogil
+ bint PyArray_ISBEHAVED(ndarray) nogil
+ bint PyArray_ISBEHAVED_RO(ndarray) nogil
+
+
+ bint PyDataType_ISNOTSWAPPED(dtype) nogil
+ bint PyDataType_ISBYTESWAPPED(dtype) nogil
+
+ bint PyArray_DescrCheck(object)
+
+ bint PyArray_Check(object)
+ bint PyArray_CheckExact(object)
+
+ # Cannot be supported due to out arg:
+ # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&)
+ # bint PyArray_HasArrayInterface(op, out)
+
+
+ bint PyArray_IsZeroDim(object)
+ # Cannot be supported due to ## ## in macro:
+ # bint PyArray_IsScalar(object, verbatim work)
+ bint PyArray_CheckScalar(object)
+ bint PyArray_IsPythonNumber(object)
+ bint PyArray_IsPythonScalar(object)
+ bint PyArray_IsAnyScalar(object)
+ bint PyArray_CheckAnyScalar(object)
+
+ ndarray PyArray_GETCONTIGUOUS(ndarray)
+ bint PyArray_SAMESHAPE(ndarray, ndarray) nogil
+ npy_intp PyArray_SIZE(ndarray) nogil
+ npy_intp PyArray_NBYTES(ndarray) nogil
+
+ object PyArray_FROM_O(object)
+ object PyArray_FROM_OF(object m, int flags)
+ object PyArray_FROM_OT(object m, int type)
+ object PyArray_FROM_OTF(object m, int type, int flags)
+ object PyArray_FROMANY(object m, int type, int min, int max, int flags)
+ object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran)
+ object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran)
+ void PyArray_FILLWBYTE(object, int val)
+ npy_intp PyArray_REFCOUNT(object)
+ object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth)
+ unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2)
+ bint PyArray_EquivByteorders(int b1, int b2) nogil
+ object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
+ object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data)
+ #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr)
+ object PyArray_ToScalar(void* data, ndarray arr)
+
+ void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil
+ void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil
+ void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil
+ void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil
+
+ void PyArray_XDECREF_ERR(ndarray)
+ # Cannot be supported due to out arg
+ # void PyArray_DESCR_REPLACE(descr)
+
+
+ object PyArray_Copy(ndarray)
+ object PyArray_FromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth)
+
+ object PyArray_Cast(ndarray mp, int type_num)
+ object PyArray_Take(ndarray ap, object items, int axis)
+ object PyArray_Put(ndarray ap, object items, object values)
+
+ void PyArray_ITER_RESET(flatiter it) nogil
+ void PyArray_ITER_NEXT(flatiter it) nogil
+ void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil
+ void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil
+ void* PyArray_ITER_DATA(flatiter it) nogil
+ bint PyArray_ITER_NOTDONE(flatiter it) nogil
+
+ void PyArray_MultiIter_RESET(broadcast multi) nogil
+ void PyArray_MultiIter_NEXT(broadcast multi) nogil
+ void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil
+ void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil
+ void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil
+ void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil
+ bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil
+
+ # Functions from __multiarray_api.h
+
+ # Functions taking dtype and returning object/ndarray are disabled
+ # for now as they steal dtype references. I'm conservative and disable
+ # more than is probably needed until it can be checked further.
+ int PyArray_SetNumericOps (object)
+ object PyArray_GetNumericOps ()
+ int PyArray_INCREF (ndarray)
+ int PyArray_XDECREF (ndarray)
+ void PyArray_SetStringFunction (object, int)
+ dtype PyArray_DescrFromType (int)
+ object PyArray_TypeObjectFromType (int)
+ char * PyArray_Zero (ndarray)
+ char * PyArray_One (ndarray)
+ #object PyArray_CastToType (ndarray, dtype, int)
+ int PyArray_CastTo (ndarray, ndarray)
+ int PyArray_CastAnyTo (ndarray, ndarray)
+ int PyArray_CanCastSafely (int, int)
+ npy_bool PyArray_CanCastTo (dtype, dtype)
+ int PyArray_ObjectType (object, int)
+ dtype PyArray_DescrFromObject (object, dtype)
+ #ndarray* PyArray_ConvertToCommonType (object, int *)
+ dtype PyArray_DescrFromScalar (object)
+ dtype PyArray_DescrFromTypeObject (object)
+ npy_intp PyArray_Size (object)
+ #object PyArray_Scalar (void *, dtype, object)
+ #object PyArray_FromScalar (object, dtype)
+ void PyArray_ScalarAsCtype (object, void *)
+ #int PyArray_CastScalarToCtype (object, void *, dtype)
+ #int PyArray_CastScalarDirect (object, dtype, void *, int)
+ object PyArray_ScalarFromObject (object)
+ #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int)
+ object PyArray_FromDims (int, int *, int)
+ #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *)
+ #object PyArray_FromAny (object, dtype, int, int, int, object)
+ object PyArray_EnsureArray (object)
+ object PyArray_EnsureAnyArray (object)
+ #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *)
+ #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *)
+ #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp)
+ #object PyArray_FromIter (object, dtype, npy_intp)
+ object PyArray_Return (ndarray)
+ #object PyArray_GetField (ndarray, dtype, int)
+ #int PyArray_SetField (ndarray, dtype, int, object)
+ object PyArray_Byteswap (ndarray, npy_bool)
+ object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER)
+ int PyArray_MoveInto (ndarray, ndarray)
+ int PyArray_CopyInto (ndarray, ndarray)
+ int PyArray_CopyAnyInto (ndarray, ndarray)
+ int PyArray_CopyObject (ndarray, object)
+ object PyArray_NewCopy (ndarray, NPY_ORDER)
+ object PyArray_ToList (ndarray)
+ object PyArray_ToString (ndarray, NPY_ORDER)
+ int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *)
+ int PyArray_Dump (object, object, int)
+ object PyArray_Dumps (object, int)
+ int PyArray_ValidType (int)
+ void PyArray_UpdateFlags (ndarray, int)
+ object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object)
+ #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object)
+ #dtype PyArray_DescrNew (dtype)
+ dtype PyArray_DescrNewFromType (int)
+ double PyArray_GetPriority (object, double)
+ object PyArray_IterNew (object)
+ object PyArray_MultiIterNew (int, ...)
+
+ int PyArray_PyIntAsInt (object)
+ npy_intp PyArray_PyIntAsIntp (object)
+ int PyArray_Broadcast (broadcast)
+ void PyArray_FillObjectArray (ndarray, object)
+ int PyArray_FillWithScalar (ndarray, object)
+ npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)
+ dtype PyArray_DescrNewByteorder (dtype, char)
+ object PyArray_IterAllButAxis (object, int *)
+ #object PyArray_CheckFromAny (object, dtype, int, int, int, object)
+ #object PyArray_FromArray (ndarray, dtype, int)
+ object PyArray_FromInterface (object)
+ object PyArray_FromStructInterface (object)
+ #object PyArray_FromArrayAttr (object, dtype, object)
+ #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*)
+ int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND)
+ object PyArray_NewFlagsObject (object)
+ npy_bool PyArray_CanCastScalar (type, type)
+ #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t)
+ int PyArray_RemoveSmallest (broadcast)
+ int PyArray_ElementStrides (object)
+ void PyArray_Item_INCREF (char *, dtype)
+ void PyArray_Item_XDECREF (char *, dtype)
+ object PyArray_FieldNames (object)
+ object PyArray_Transpose (ndarray, PyArray_Dims *)
+ object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE)
+ object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE)
+ object PyArray_PutMask (ndarray, object, object)
+ object PyArray_Repeat (ndarray, object, int)
+ object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE)
+ int PyArray_Sort (ndarray, int, NPY_SORTKIND)
+ object PyArray_ArgSort (ndarray, int, NPY_SORTKIND)
+ object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *)
+ object PyArray_ArgMax (ndarray, int, ndarray)
+ object PyArray_ArgMin (ndarray, int, ndarray)
+ object PyArray_Reshape (ndarray, object)
+ object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER)
+ object PyArray_Squeeze (ndarray)
+ #object PyArray_View (ndarray, dtype, type)
+ object PyArray_SwapAxes (ndarray, int, int)
+ object PyArray_Max (ndarray, int, ndarray)
+ object PyArray_Min (ndarray, int, ndarray)
+ object PyArray_Ptp (ndarray, int, ndarray)
+ object PyArray_Mean (ndarray, int, int, ndarray)
+ object PyArray_Trace (ndarray, int, int, int, int, ndarray)
+ object PyArray_Diagonal (ndarray, int, int, int)
+ object PyArray_Clip (ndarray, object, object, ndarray)
+ object PyArray_Conjugate (ndarray, ndarray)
+ object PyArray_Nonzero (ndarray)
+ object PyArray_Std (ndarray, int, int, ndarray, int)
+ object PyArray_Sum (ndarray, int, int, ndarray)
+ object PyArray_CumSum (ndarray, int, int, ndarray)
+ object PyArray_Prod (ndarray, int, int, ndarray)
+ object PyArray_CumProd (ndarray, int, int, ndarray)
+ object PyArray_All (ndarray, int, ndarray)
+ object PyArray_Any (ndarray, int, ndarray)
+ object PyArray_Compress (ndarray, object, int, ndarray)
+ object PyArray_Flatten (ndarray, NPY_ORDER)
+ object PyArray_Ravel (ndarray, NPY_ORDER)
+ npy_intp PyArray_MultiplyList (npy_intp *, int)
+ int PyArray_MultiplyIntList (int *, int)
+ void * PyArray_GetPtr (ndarray, npy_intp*)
+ int PyArray_CompareLists (npy_intp *, npy_intp *, int)
+ #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype)
+ #int PyArray_As1D (object*, char **, int *, int)
+ #int PyArray_As2D (object*, char ***, int *, int *, int)
+ int PyArray_Free (object, void *)
+ #int PyArray_Converter (object, object*)
+ int PyArray_IntpFromSequence (object, npy_intp *, int)
+ object PyArray_Concatenate (object, int)
+ object PyArray_InnerProduct (object, object)
+ object PyArray_MatrixProduct (object, object)
+ object PyArray_CopyAndTranspose (object)
+ object PyArray_Correlate (object, object, int)
+ int PyArray_TypestrConvert (int, int)
+ #int PyArray_DescrConverter (object, dtype*)
+ #int PyArray_DescrConverter2 (object, dtype*)
+ int PyArray_IntpConverter (object, PyArray_Dims *)
+ #int PyArray_BufferConverter (object, chunk)
+ int PyArray_AxisConverter (object, int *)
+ int PyArray_BoolConverter (object, npy_bool *)
+ int PyArray_ByteorderConverter (object, char *)
+ int PyArray_OrderConverter (object, NPY_ORDER *)
+ unsigned char PyArray_EquivTypes (dtype, dtype)
+ #object PyArray_Zeros (int, npy_intp *, dtype, int)
+ #object PyArray_Empty (int, npy_intp *, dtype, int)
+ object PyArray_Where (object, object, object)
+ object PyArray_Arange (double, double, double, int)
+ #object PyArray_ArangeObj (object, object, object, dtype)
+ int PyArray_SortkindConverter (object, NPY_SORTKIND *)
+ object PyArray_LexSort (object, int)
+ object PyArray_Round (ndarray, int, ndarray)
+ unsigned char PyArray_EquivTypenums (int, int)
+ int PyArray_RegisterDataType (dtype)
+ int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *)
+ int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND)
+ #void PyArray_InitArrFuncs (PyArray_ArrFuncs *)
+ object PyArray_IntTupleFromIntp (int, npy_intp *)
+ int PyArray_TypeNumFromName (char *)
+ int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *)
+ #int PyArray_OutputConverter (object, ndarray*)
+ object PyArray_BroadcastToShape (object, npy_intp *, int)
+ void _PyArray_SigintHandler (int)
+ void* _PyArray_GetSigintBuf ()
+ #int PyArray_DescrAlignConverter (object, dtype*)
+ #int PyArray_DescrAlignConverter2 (object, dtype*)
+ int PyArray_SearchsideConverter (object, void *)
+ object PyArray_CheckAxis (ndarray, int *, int)
+ npy_intp PyArray_OverflowMultiplyList (npy_intp *, int)
+ int PyArray_CompareString (char *, char *, size_t)
+ int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead.
+
+
+# Typedefs that matches the runtime dtype objects in
+# the numpy module.
+
+# The ones that are commented out needs an IFDEF function
+# in Cython to enable them only on the right systems.
+
+ctypedef npy_int8 int8_t
+ctypedef npy_int16 int16_t
+ctypedef npy_int32 int32_t
+ctypedef npy_int64 int64_t
+#ctypedef npy_int96 int96_t
+#ctypedef npy_int128 int128_t
+
+ctypedef npy_uint8 uint8_t
+ctypedef npy_uint16 uint16_t
+ctypedef npy_uint32 uint32_t
+ctypedef npy_uint64 uint64_t
+#ctypedef npy_uint96 uint96_t
+#ctypedef npy_uint128 uint128_t
+
+ctypedef npy_float32 float32_t
+ctypedef npy_float64 float64_t
+#ctypedef npy_float80 float80_t
+#ctypedef npy_float128 float128_t
+
+ctypedef float complex complex64_t
+ctypedef double complex complex128_t
+
+# The int types are mapped a bit surprising --
+# numpy.int corresponds to 'l' and numpy.long to 'q'
+ctypedef npy_long int_t
+ctypedef npy_longlong long_t
+ctypedef npy_longlong longlong_t
+
+ctypedef npy_ulong uint_t
+ctypedef npy_ulonglong ulong_t
+ctypedef npy_ulonglong ulonglong_t
+
+ctypedef npy_intp intp_t
+ctypedef npy_uintp uintp_t
+
+ctypedef npy_double float_t
+ctypedef npy_double double_t
+ctypedef npy_longdouble longdouble_t
+
+ctypedef npy_cfloat cfloat_t
+ctypedef npy_cdouble cdouble_t
+ctypedef npy_clongdouble clongdouble_t
+
+ctypedef npy_cdouble complex_t
+
+cdef inline object PyArray_MultiIterNew1(a):
+ return PyArray_MultiIterNew(1, <void*>a)
+
+cdef inline object PyArray_MultiIterNew2(a, b):
+ return PyArray_MultiIterNew(2, <void*>a, <void*>b)
+
+cdef inline object PyArray_MultiIterNew3(a, b, c):
+ return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
+
+cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
+
+cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
+
+cdef inline tuple PyDataType_SHAPE(dtype d):
+ if PyDataType_HASSUBARRAY(d):
+ return <tuple>d.subarray.shape
+ else:
+ return ()
+
+
+cdef extern from "numpy/ndarrayobject.h":
+ PyTypeObject PyTimedeltaArrType_Type
+ PyTypeObject PyDatetimeArrType_Type
+ ctypedef int64_t npy_timedelta
+ ctypedef int64_t npy_datetime
+
+cdef extern from "numpy/ndarraytypes.h":
+ ctypedef struct PyArray_DatetimeMetaData:
+ NPY_DATETIMEUNIT base
+ int64_t num
+
+cdef extern from "numpy/arrayscalars.h":
+
+ # abstract types
+ ctypedef class numpy.generic [object PyObject]:
+ pass
+ ctypedef class numpy.number [object PyObject]:
+ pass
+ ctypedef class numpy.integer [object PyObject]:
+ pass
+ ctypedef class numpy.signedinteger [object PyObject]:
+ pass
+ ctypedef class numpy.unsignedinteger [object PyObject]:
+ pass
+ ctypedef class numpy.inexact [object PyObject]:
+ pass
+ ctypedef class numpy.floating [object PyObject]:
+ pass
+ ctypedef class numpy.complexfloating [object PyObject]:
+ pass
+ ctypedef class numpy.flexible [object PyObject]:
+ pass
+ ctypedef class numpy.character [object PyObject]:
+ pass
+
+ ctypedef struct PyDatetimeScalarObject:
+ # PyObject_HEAD
+ npy_datetime obval
+ PyArray_DatetimeMetaData obmeta
+
+ ctypedef struct PyTimedeltaScalarObject:
+ # PyObject_HEAD
+ npy_timedelta obval
+ PyArray_DatetimeMetaData obmeta
+
+ ctypedef enum NPY_DATETIMEUNIT:
+ NPY_FR_Y
+ NPY_FR_M
+ NPY_FR_W
+ NPY_FR_D
+ NPY_FR_B
+ NPY_FR_h
+ NPY_FR_m
+ NPY_FR_s
+ NPY_FR_ms
+ NPY_FR_us
+ NPY_FR_ns
+ NPY_FR_ps
+ NPY_FR_fs
+ NPY_FR_as
+
+
+#
+# ufunc API
+#
+
+cdef extern from "numpy/ufuncobject.h":
+
+ ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *)
+
+ ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]:
+ cdef:
+ int nin, nout, nargs
+ int identity
+ PyUFuncGenericFunction *functions
+ void **data
+ int ntypes
+ int check_return
+ char *name
+ char *types
+ char *doc
+ void *ptr
+ PyObject *obj
+ PyObject *userloops
+
+ cdef enum:
+ PyUFunc_Zero
+ PyUFunc_One
+ PyUFunc_None
+ UFUNC_ERR_IGNORE
+ UFUNC_ERR_WARN
+ UFUNC_ERR_RAISE
+ UFUNC_ERR_CALL
+ UFUNC_ERR_PRINT
+ UFUNC_ERR_LOG
+ UFUNC_MASK_DIVIDEBYZERO
+ UFUNC_MASK_OVERFLOW
+ UFUNC_MASK_UNDERFLOW
+ UFUNC_MASK_INVALID
+ UFUNC_SHIFT_DIVIDEBYZERO
+ UFUNC_SHIFT_OVERFLOW
+ UFUNC_SHIFT_UNDERFLOW
+ UFUNC_SHIFT_INVALID
+ UFUNC_FPE_DIVIDEBYZERO
+ UFUNC_FPE_OVERFLOW
+ UFUNC_FPE_UNDERFLOW
+ UFUNC_FPE_INVALID
+ UFUNC_ERR_DEFAULT
+ UFUNC_ERR_DEFAULT2
+
+ object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *,
+ void **, char *, int, int, int, int, char *, char *, int)
+ int PyUFunc_RegisterLoopForType(ufunc, int,
+ PyUFuncGenericFunction, int *, void *)
+ void PyUFunc_f_f_As_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_f_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_g_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F_As_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_G_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f_As_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_gg_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F_As_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_GG_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_On_Om \
+ (char **, npy_intp *, npy_intp *, void *)
+ int PyUFunc_GetPyValues \
+ (char *, int *, int *, PyObject **)
+ int PyUFunc_checkfperr \
+ (int, PyObject *, int *)
+ void PyUFunc_clearfperr()
+ int PyUFunc_getfperr()
+ int PyUFunc_handlefperr \
+ (int, PyObject *, int, int *)
+ int PyUFunc_ReplaceLoopBySignature \
+ (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)
+ object PyUFunc_FromFuncAndDataAndSignature \
+ (PyUFuncGenericFunction *, void **, char *, int, int, int,
+ int, char *, char *, int, char *)
+
+ int _import_umath() except -1
+
+cdef inline void set_array_base(ndarray arr, object base):
+ Py_INCREF(base) # important to do this before stealing the reference below!
+ PyArray_SetBaseObject(arr, base)
+
+cdef inline object get_array_base(ndarray arr):
+ base = PyArray_BASE(arr)
+ if base is NULL:
+ return None
+ return <object>base
+
+# Versions of the import_* functions which are more suitable for
+# Cython code.
+cdef inline int import_array() except -1:
+ try:
+ __pyx_import_array()
+ except Exception:
+ raise ImportError("numpy.core.multiarray failed to import")
+
+cdef inline int import_umath() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy.core.umath failed to import")
+
+cdef inline int import_ufunc() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy.core.umath failed to import")
+
+cdef extern from *:
+ # Leave a marker that the NumPy declarations came from this file
+ # See https://github.com/cython/cython/issues/3573
+ """
+ /* NumPy API declarations from "numpy/__init__.pxd" */
+ """
+
+
+cdef inline bint is_timedelta64_object(object obj):
+ """
+ Cython equivalent of `isinstance(obj, np.timedelta64)`
+
+ Parameters
+ ----------
+ obj : object
+
+ Returns
+ -------
+ bool
+ """
+ return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type)
+
+
+cdef inline bint is_datetime64_object(object obj):
+ """
+ Cython equivalent of `isinstance(obj, np.datetime64)`
+
+ Parameters
+ ----------
+ obj : object
+
+ Returns
+ -------
+ bool
+ """
+ return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type)
+
+
+cdef inline npy_datetime get_datetime64_value(object obj) nogil:
+ """
+ returns the int64 value underlying scalar numpy datetime64 object
+
+ Note that to interpret this as a datetime, the corresponding unit is
+ also needed. That can be found using `get_datetime64_unit`.
+ """
+ return (<PyDatetimeScalarObject*>obj).obval
+
+
+cdef inline npy_timedelta get_timedelta64_value(object obj) nogil:
+ """
+ returns the int64 value underlying scalar numpy timedelta64 object
+ """
+ return (<PyTimedeltaScalarObject*>obj).obval
+
+
+cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil:
+ """
+ returns the unit part of the dtype for a numpy datetime64 object.
+ """
+ return <NPY_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base
diff --git a/venv/lib/python3.9/site-packages/numpy/__init__.py b/venv/lib/python3.9/site-packages/numpy/__init__.py
new file mode 100644
index 00000000..e20a83ab
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/__init__.py
@@ -0,0 +1,439 @@
+"""
+NumPy
+=====
+
+Provides
+ 1. An array object of arbitrary homogeneous items
+ 2. Fast mathematical operations over arrays
+ 3. Linear Algebra, Fourier Transforms, Random Number Generation
+
+How to use the documentation
+----------------------------
+Documentation is available in two forms: docstrings provided
+with the code, and a loose standing reference guide, available from
+`the NumPy homepage <https://numpy.org>`_.
+
+We recommend exploring the docstrings using
+`IPython <https://ipython.org>`_, an advanced Python shell with
+TAB-completion and introspection capabilities. See below for further
+instructions.
+
+The docstring examples assume that `numpy` has been imported as ``np``::
+
+ >>> import numpy as np
+
+Code snippets are indicated by three greater-than signs::
+
+ >>> x = 42
+ >>> x = x + 1
+
+Use the built-in ``help`` function to view a function's docstring::
+
+ >>> help(np.sort)
+ ... # doctest: +SKIP
+
+For some objects, ``np.info(obj)`` may provide additional help. This is
+particularly true if you see the line "Help on ufunc object:" at the top
+of the help() page. Ufuncs are implemented in C, not Python, for speed.
+The native Python help() does not know how to view their help, but our
+np.info() function does.
+
+To search for documents containing a keyword, do::
+
+ >>> np.lookfor('keyword')
+ ... # doctest: +SKIP
+
+General-purpose documents like a glossary and help on the basic concepts
+of numpy are available under the ``doc`` sub-module::
+
+ >>> from numpy import doc
+ >>> help(doc)
+ ... # doctest: +SKIP
+
+Available subpackages
+---------------------
+lib
+ Basic functions used by several sub-packages.
+random
+ Core Random Tools
+linalg
+ Core Linear Algebra Tools
+fft
+ Core FFT routines
+polynomial
+ Polynomial tools
+testing
+ NumPy testing tools
+distutils
+ Enhancements to distutils with support for
+ Fortran compilers support and more.
+
+Utilities
+---------
+test
+ Run numpy unittests
+show_config
+ Show numpy build configuration
+dual
+ Overwrite certain functions with high-performance SciPy tools.
+ Note: `numpy.dual` is deprecated. Use the functions from NumPy or Scipy
+ directly instead of importing them from `numpy.dual`.
+matlib
+ Make everything matrices.
+__version__
+ NumPy version string
+
+Viewing documentation using IPython
+-----------------------------------
+
+Start IPython and import `numpy` usually under the alias ``np``: `import
+numpy as np`. Then, directly past or use the ``%cpaste`` magic to paste
+examples into the shell. To see which functions are available in `numpy`,
+type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use
+``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow
+down the list. To view the docstring for a function, use
+``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view
+the source code).
+
+Copies vs. in-place operation
+-----------------------------
+Most of the functions in `numpy` return a copy of the array argument
+(e.g., `np.sort`). In-place versions of these functions are often
+available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
+Exceptions to this rule are documented.
+
+"""
+import sys
+import warnings
+
+from ._globals import (
+ ModuleDeprecationWarning, VisibleDeprecationWarning,
+ _NoValue, _CopyMode
+)
+
+# We first need to detect if we're being called as part of the numpy setup
+# procedure itself in a reliable manner.
+try:
+ __NUMPY_SETUP__
+except NameError:
+ __NUMPY_SETUP__ = False
+
+if __NUMPY_SETUP__:
+ sys.stderr.write('Running from numpy source directory.\n')
+else:
+ try:
+ from numpy.__config__ import show as show_config
+ except ImportError as e:
+ msg = """Error importing numpy: you should not try to import numpy from
+ its source directory; please exit the numpy source tree, and relaunch
+ your python interpreter from there."""
+ raise ImportError(msg) from e
+
+ __all__ = ['ModuleDeprecationWarning',
+ 'VisibleDeprecationWarning']
+
+ # mapping of {name: (value, deprecation_msg)}
+ __deprecated_attrs__ = {}
+
+ # Allow distributors to run custom init code
+ from . import _distributor_init
+
+ from . import core
+ from .core import *
+ from . import compat
+ from . import lib
+ # NOTE: to be revisited following future namespace cleanup.
+ # See gh-14454 and gh-15672 for discussion.
+ from .lib import *
+
+ from . import linalg
+ from . import fft
+ from . import polynomial
+ from . import random
+ from . import ctypeslib
+ from . import ma
+ from . import matrixlib as _mat
+ from .matrixlib import *
+
+ # Deprecations introduced in NumPy 1.20.0, 2020-06-06
+ import builtins as _builtins
+
+ _msg = (
+ "module 'numpy' has no attribute '{n}'.\n"
+ "`np.{n}` was a deprecated alias for the builtin `{n}`. "
+ "To avoid this error in existing code, use `{n}` by itself. "
+ "Doing this will not modify any behavior and is safe. {extended_msg}\n"
+ "The aliases was originally deprecated in NumPy 1.20; for more "
+ "details and guidance see the original release note at:\n"
+ " https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations")
+
+ _specific_msg = (
+ "If you specifically wanted the numpy scalar type, use `np.{}` here.")
+
+ _int_extended_msg = (
+ "When replacing `np.{}`, you may wish to use e.g. `np.int64` "
+ "or `np.int32` to specify the precision. If you wish to review "
+ "your current use, check the release note link for "
+ "additional information.")
+
+ _type_info = [
+ ("object", ""), # The NumPy scalar only exists by name.
+ ("bool", _specific_msg.format("bool_")),
+ ("float", _specific_msg.format("float64")),
+ ("complex", _specific_msg.format("complex128")),
+ ("str", _specific_msg.format("str_")),
+ ("int", _int_extended_msg.format("int"))]
+
+ __former_attrs__ = {
+ n: _msg.format(n=n, extended_msg=extended_msg)
+ for n, extended_msg in _type_info
+ }
+
+ # Future warning introduced in NumPy 1.24.0, 2022-11-17
+ _msg = (
+ "`np.{n}` is a deprecated alias for `{an}`. (Deprecated NumPy 1.24)")
+
+ # Some of these are awkward (since `np.str` may be preferable in the long
+ # term), but overall the names ending in 0 seem undesireable
+ _type_info = [
+ ("bool8", bool_, "np.bool_"),
+ ("int0", intp, "np.intp"),
+ ("uint0", uintp, "np.uintp"),
+ ("str0", str_, "np.str_"),
+ ("bytes0", bytes_, "np.bytes_"),
+ ("void0", void, "np.void"),
+ ("object0", object_,
+ "`np.object0` is a deprecated alias for `np.object_`. "
+ "`object` can be used instead. (Deprecated NumPy 1.24)")]
+
+ # Some of these could be defined right away, but most were aliases to
+ # the Python objects and only removed in NumPy 1.24. Defining them should
+ # probably wait for NumPy 1.26 or 2.0.
+ # When defined, these should possibly not be added to `__all__` to avoid
+ # import with `from numpy import *`.
+ __future_scalars__ = {"bool", "long", "ulong", "str", "bytes", "object"}
+
+ __deprecated_attrs__.update({
+ n: (alias, _msg.format(n=n, an=an)) for n, alias, an in _type_info})
+
+ del _msg, _type_info
+
+ from .core import round, abs, max, min
+ # now that numpy modules are imported, can initialize limits
+ core.getlimits._register_known_types()
+
+ __all__.extend(['__version__', 'show_config'])
+ __all__.extend(core.__all__)
+ __all__.extend(_mat.__all__)
+ __all__.extend(lib.__all__)
+ __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
+
+ # Remove one of the two occurrences of `issubdtype`, which is exposed as
+ # both `numpy.core.issubdtype` and `numpy.lib.issubdtype`.
+ __all__.remove('issubdtype')
+
+ # These are exported by np.core, but are replaced by the builtins below
+ # remove them to ensure that we don't end up with `np.long == np.int_`,
+ # which would be a breaking change.
+ del long, unicode
+ __all__.remove('long')
+ __all__.remove('unicode')
+
+ # Remove things that are in the numpy.lib but not in the numpy namespace
+ # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
+ # that prevents adding more things to the main namespace by accident.
+ # The list below will grow until the `from .lib import *` fixme above is
+ # taken care of
+ __all__.remove('Arrayterator')
+ del Arrayterator
+
+ # These names were removed in NumPy 1.20. For at least one release,
+ # attempts to access these names in the numpy namespace will trigger
+ # a warning, and calling the function will raise an exception.
+ _financial_names = ['fv', 'ipmt', 'irr', 'mirr', 'nper', 'npv', 'pmt',
+ 'ppmt', 'pv', 'rate']
+ __expired_functions__ = {
+ name: (f'In accordance with NEP 32, the function {name} was removed '
+ 'from NumPy version 1.20. A replacement for this function '
+ 'is available in the numpy_financial library: '
+ 'https://pypi.org/project/numpy-financial')
+ for name in _financial_names}
+
+ # Filter out Cython harmless warnings
+ warnings.filterwarnings("ignore", message="numpy.dtype size changed")
+ warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
+ warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
+
+ # oldnumeric and numarray were removed in 1.9. In case some packages import
+ # but do not use them, we define them here for backward compatibility.
+ oldnumeric = 'removed'
+ numarray = 'removed'
+
+ def __getattr__(attr):
+ # Warn for expired attributes, and return a dummy function
+ # that always raises an exception.
+ import warnings
+ try:
+ msg = __expired_functions__[attr]
+ except KeyError:
+ pass
+ else:
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
+
+ def _expired(*args, **kwds):
+ raise RuntimeError(msg)
+
+ return _expired
+
+ # Emit warnings for deprecated attributes
+ try:
+ val, msg = __deprecated_attrs__[attr]
+ except KeyError:
+ pass
+ else:
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ return val
+
+ if attr in __future_scalars__:
+ # And future warnings for those that will change, but also give
+ # the AttributeError
+ warnings.warn(
+ f"In the future `np.{attr}` will be defined as the "
+ "corresponding NumPy scalar.", FutureWarning, stacklevel=2)
+
+ if attr in __former_attrs__:
+ raise AttributeError(__former_attrs__[attr])
+
+ # Importing Tester requires importing all of UnitTest which is not a
+ # cheap import Since it is mainly used in test suits, we lazy import it
+ # here to save on the order of 10 ms of import time for most users
+ #
+ # The previous way Tester was imported also had a side effect of adding
+ # the full `numpy.testing` namespace
+ if attr == 'testing':
+ import numpy.testing as testing
+ return testing
+ elif attr == 'Tester':
+ from .testing import Tester
+ return Tester
+
+ raise AttributeError("module {!r} has no attribute "
+ "{!r}".format(__name__, attr))
+
+ def __dir__():
+ public_symbols = globals().keys() | {'Tester', 'testing'}
+ public_symbols -= {
+ "core", "matrixlib",
+ }
+ return list(public_symbols)
+
+ # Pytest testing
+ from numpy._pytesttester import PytestTester
+ test = PytestTester(__name__)
+ del PytestTester
+
+ def _sanity_check():
+ """
+ Quick sanity checks for common bugs caused by environment.
+ There are some cases e.g. with wrong BLAS ABI that cause wrong
+ results under specific runtime conditions that are not necessarily
+ achieved during test suite runs, and it is useful to catch those early.
+
+ See https://github.com/numpy/numpy/issues/8577 and other
+ similar bug reports.
+
+ """
+ try:
+ x = ones(2, dtype=float32)
+ if not abs(x.dot(x) - float32(2.0)) < 1e-5:
+ raise AssertionError()
+ except AssertionError:
+ msg = ("The current Numpy installation ({!r}) fails to "
+ "pass simple sanity checks. This can be caused for example "
+ "by incorrect BLAS library being linked in, or by mixing "
+ "package managers (pip, conda, apt, ...). Search closed "
+ "numpy issues for similar problems.")
+ raise RuntimeError(msg.format(__file__)) from None
+
+ _sanity_check()
+ del _sanity_check
+
+ def _mac_os_check():
+ """
+ Quick Sanity check for Mac OS look for accelerate build bugs.
+ Testing numpy polyfit calls init_dgelsd(LAPACK)
+ """
+ try:
+ c = array([3., 2., 1.])
+ x = linspace(0, 2, 5)
+ y = polyval(c, x)
+ _ = polyfit(x, y, 2, cov=True)
+ except ValueError:
+ pass
+
+ if sys.platform == "darwin":
+ with warnings.catch_warnings(record=True) as w:
+ _mac_os_check()
+ # Throw runtime error, if the test failed Check for warning and error_message
+ error_message = ""
+ if len(w) > 0:
+ error_message = "{}: {}".format(w[-1].category.__name__, str(w[-1].message))
+ msg = (
+ "Polyfit sanity test emitted a warning, most likely due "
+ "to using a buggy Accelerate backend."
+ "\nIf you compiled yourself, more information is available at:"
+ "\nhttps://numpy.org/doc/stable/user/building.html#accelerated-blas-lapack-libraries"
+ "\nOtherwise report this to the vendor "
+ "that provided NumPy.\n{}\n".format(error_message))
+ raise RuntimeError(msg)
+ del _mac_os_check
+
+ # We usually use madvise hugepages support, but on some old kernels it
+ # is slow and thus better avoided.
+ # Specifically kernel version 4.6 had a bug fix which probably fixed this:
+ # https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff
+ import os
+ use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None)
+ if sys.platform == "linux" and use_hugepage is None:
+ # If there is an issue with parsing the kernel version,
+ # set use_hugepages to 0. Usage of LooseVersion will handle
+ # the kernel version parsing better, but avoided since it
+ # will increase the import time. See: #16679 for related discussion.
+ try:
+ use_hugepage = 1
+ kernel_version = os.uname().release.split(".")[:2]
+ kernel_version = tuple(int(v) for v in kernel_version)
+ if kernel_version < (4, 6):
+ use_hugepage = 0
+ except ValueError:
+ use_hugepages = 0
+ elif use_hugepage is None:
+ # This is not Linux, so it should not matter, just enable anyway
+ use_hugepage = 1
+ else:
+ use_hugepage = int(use_hugepage)
+
+ # Note that this will currently only make a difference on Linux
+ core.multiarray._set_madvise_hugepage(use_hugepage)
+
+ # Give a warning if NumPy is reloaded or imported on a sub-interpreter
+ # We do this from python, since the C-module may not be reloaded and
+ # it is tidier organized.
+ core.multiarray._multiarray_umath._reload_guard()
+
+ core._set_promotion_state(os.environ.get("NPY_PROMOTION_STATE", "legacy"))
+
+ # Tell PyInstaller where to find hook-numpy.py
+ def _pyinstaller_hooks_dir():
+ from pathlib import Path
+ return [str(Path(__file__).with_name("_pyinstaller").resolve())]
+
+ # Remove symbols imported for internal use
+ del os
+
+
+# get the version using versioneer
+from .version import __version__, git_revision as __git_version__
+
+# Remove symbols imported for internal use
+del sys, warnings
diff --git a/venv/lib/python3.9/site-packages/numpy/__init__.pyi b/venv/lib/python3.9/site-packages/numpy/__init__.pyi
new file mode 100644
index 00000000..853444f2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/__init__.pyi
@@ -0,0 +1,4410 @@
+import builtins
+import os
+import sys
+import mmap
+import ctypes as ct
+import array as _array
+import datetime as dt
+import enum
+from abc import abstractmethod
+from types import TracebackType, MappingProxyType
+from contextlib import ContextDecorator
+from contextlib import contextmanager
+
+if sys.version_info >= (3, 9):
+ from types import GenericAlias
+
+from numpy._pytesttester import PytestTester
+from numpy.core._internal import _ctypes
+
+from numpy._typing import (
+ # Arrays
+ ArrayLike,
+ NDArray,
+ _SupportsArray,
+ _NestedSequence,
+ _FiniteNestedSequence,
+ _SupportsArray,
+ _ArrayLikeBool_co,
+ _ArrayLikeUInt_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeNumber_co,
+ _ArrayLikeTD64_co,
+ _ArrayLikeDT64_co,
+ _ArrayLikeObject_co,
+ _ArrayLikeStr_co,
+ _ArrayLikeBytes_co,
+ _ArrayLikeUnknown,
+ _UnknownType,
+
+ # DTypes
+ DTypeLike,
+ _DTypeLike,
+ _DTypeLikeVoid,
+ _SupportsDType,
+ _VoidDTypeLike,
+
+ # Shapes
+ _Shape,
+ _ShapeLike,
+
+ # Scalars
+ _CharLike_co,
+ _BoolLike_co,
+ _IntLike_co,
+ _FloatLike_co,
+ _ComplexLike_co,
+ _TD64Like_co,
+ _NumberLike_co,
+ _ScalarLike_co,
+
+ # `number` precision
+ NBitBase,
+ _256Bit,
+ _128Bit,
+ _96Bit,
+ _80Bit,
+ _64Bit,
+ _32Bit,
+ _16Bit,
+ _8Bit,
+ _NBitByte,
+ _NBitShort,
+ _NBitIntC,
+ _NBitIntP,
+ _NBitInt,
+ _NBitLongLong,
+ _NBitHalf,
+ _NBitSingle,
+ _NBitDouble,
+ _NBitLongDouble,
+
+ # Character codes
+ _BoolCodes,
+ _UInt8Codes,
+ _UInt16Codes,
+ _UInt32Codes,
+ _UInt64Codes,
+ _Int8Codes,
+ _Int16Codes,
+ _Int32Codes,
+ _Int64Codes,
+ _Float16Codes,
+ _Float32Codes,
+ _Float64Codes,
+ _Complex64Codes,
+ _Complex128Codes,
+ _ByteCodes,
+ _ShortCodes,
+ _IntCCodes,
+ _IntPCodes,
+ _IntCodes,
+ _LongLongCodes,
+ _UByteCodes,
+ _UShortCodes,
+ _UIntCCodes,
+ _UIntPCodes,
+ _UIntCodes,
+ _ULongLongCodes,
+ _HalfCodes,
+ _SingleCodes,
+ _DoubleCodes,
+ _LongDoubleCodes,
+ _CSingleCodes,
+ _CDoubleCodes,
+ _CLongDoubleCodes,
+ _DT64Codes,
+ _TD64Codes,
+ _StrCodes,
+ _BytesCodes,
+ _VoidCodes,
+ _ObjectCodes,
+
+ # Ufuncs
+ _UFunc_Nin1_Nout1,
+ _UFunc_Nin2_Nout1,
+ _UFunc_Nin1_Nout2,
+ _UFunc_Nin2_Nout2,
+ _GUFunc_Nin2_Nout1,
+)
+
+from numpy._typing._callable import (
+ _BoolOp,
+ _BoolBitOp,
+ _BoolSub,
+ _BoolTrueDiv,
+ _BoolMod,
+ _BoolDivMod,
+ _TD64Div,
+ _IntTrueDiv,
+ _UnsignedIntOp,
+ _UnsignedIntBitOp,
+ _UnsignedIntMod,
+ _UnsignedIntDivMod,
+ _SignedIntOp,
+ _SignedIntBitOp,
+ _SignedIntMod,
+ _SignedIntDivMod,
+ _FloatOp,
+ _FloatMod,
+ _FloatDivMod,
+ _ComplexOp,
+ _NumberOp,
+ _ComparisonOp,
+)
+
+# NOTE: Numpy's mypy plugin is used for removing the types unavailable
+# to the specific platform
+from numpy._typing._extended_precision import (
+ uint128 as uint128,
+ uint256 as uint256,
+ int128 as int128,
+ int256 as int256,
+ float80 as float80,
+ float96 as float96,
+ float128 as float128,
+ float256 as float256,
+ complex160 as complex160,
+ complex192 as complex192,
+ complex256 as complex256,
+ complex512 as complex512,
+)
+
+from collections.abc import (
+ Callable,
+ Container,
+ Iterable,
+ Iterator,
+ Mapping,
+ Sequence,
+ Sized,
+)
+from typing import (
+ Literal as L,
+ Any,
+ Generator,
+ Generic,
+ IO,
+ NoReturn,
+ overload,
+ SupportsComplex,
+ SupportsFloat,
+ SupportsInt,
+ TypeVar,
+ Union,
+ Protocol,
+ SupportsIndex,
+ Final,
+ final,
+ ClassVar,
+)
+
+# Ensures that the stubs are picked up
+from numpy import (
+ ctypeslib as ctypeslib,
+ fft as fft,
+ lib as lib,
+ linalg as linalg,
+ ma as ma,
+ polynomial as polynomial,
+ random as random,
+ testing as testing,
+ version as version,
+)
+
+from numpy.core import defchararray, records
+char = defchararray
+rec = records
+
+from numpy.core.function_base import (
+ linspace as linspace,
+ logspace as logspace,
+ geomspace as geomspace,
+)
+
+from numpy.core.fromnumeric import (
+ take as take,
+ reshape as reshape,
+ choose as choose,
+ repeat as repeat,
+ put as put,
+ swapaxes as swapaxes,
+ transpose as transpose,
+ partition as partition,
+ argpartition as argpartition,
+ sort as sort,
+ argsort as argsort,
+ argmax as argmax,
+ argmin as argmin,
+ searchsorted as searchsorted,
+ resize as resize,
+ squeeze as squeeze,
+ diagonal as diagonal,
+ trace as trace,
+ ravel as ravel,
+ nonzero as nonzero,
+ shape as shape,
+ compress as compress,
+ clip as clip,
+ sum as sum,
+ all as all,
+ any as any,
+ cumsum as cumsum,
+ ptp as ptp,
+ amax as amax,
+ amin as amin,
+ prod as prod,
+ cumprod as cumprod,
+ ndim as ndim,
+ size as size,
+ around as around,
+ mean as mean,
+ std as std,
+ var as var,
+)
+
+from numpy.core._asarray import (
+ require as require,
+)
+
+from numpy.core._type_aliases import (
+ sctypes as sctypes,
+ sctypeDict as sctypeDict,
+)
+
+from numpy.core._ufunc_config import (
+ seterr as seterr,
+ geterr as geterr,
+ setbufsize as setbufsize,
+ getbufsize as getbufsize,
+ seterrcall as seterrcall,
+ geterrcall as geterrcall,
+ _ErrKind,
+ _ErrFunc,
+ _ErrDictOptional,
+)
+
+from numpy.core.arrayprint import (
+ set_printoptions as set_printoptions,
+ get_printoptions as get_printoptions,
+ array2string as array2string,
+ format_float_scientific as format_float_scientific,
+ format_float_positional as format_float_positional,
+ array_repr as array_repr,
+ array_str as array_str,
+ set_string_function as set_string_function,
+ printoptions as printoptions,
+)
+
+from numpy.core.einsumfunc import (
+ einsum as einsum,
+ einsum_path as einsum_path,
+)
+
+from numpy.core.multiarray import (
+ ALLOW_THREADS as ALLOW_THREADS,
+ BUFSIZE as BUFSIZE,
+ CLIP as CLIP,
+ MAXDIMS as MAXDIMS,
+ MAY_SHARE_BOUNDS as MAY_SHARE_BOUNDS,
+ MAY_SHARE_EXACT as MAY_SHARE_EXACT,
+ RAISE as RAISE,
+ WRAP as WRAP,
+ tracemalloc_domain as tracemalloc_domain,
+ array as array,
+ empty_like as empty_like,
+ empty as empty,
+ zeros as zeros,
+ concatenate as concatenate,
+ inner as inner,
+ where as where,
+ lexsort as lexsort,
+ can_cast as can_cast,
+ min_scalar_type as min_scalar_type,
+ result_type as result_type,
+ dot as dot,
+ vdot as vdot,
+ bincount as bincount,
+ copyto as copyto,
+ putmask as putmask,
+ packbits as packbits,
+ unpackbits as unpackbits,
+ shares_memory as shares_memory,
+ may_share_memory as may_share_memory,
+ asarray as asarray,
+ asanyarray as asanyarray,
+ ascontiguousarray as ascontiguousarray,
+ asfortranarray as asfortranarray,
+ arange as arange,
+ busday_count as busday_count,
+ busday_offset as busday_offset,
+ compare_chararrays as compare_chararrays,
+ datetime_as_string as datetime_as_string,
+ datetime_data as datetime_data,
+ frombuffer as frombuffer,
+ fromfile as fromfile,
+ fromiter as fromiter,
+ is_busday as is_busday,
+ promote_types as promote_types,
+ seterrobj as seterrobj,
+ geterrobj as geterrobj,
+ fromstring as fromstring,
+ frompyfunc as frompyfunc,
+ nested_iters as nested_iters,
+ flagsobj,
+)
+
+from numpy.core.numeric import (
+ zeros_like as zeros_like,
+ ones as ones,
+ ones_like as ones_like,
+ full as full,
+ full_like as full_like,
+ count_nonzero as count_nonzero,
+ isfortran as isfortran,
+ argwhere as argwhere,
+ flatnonzero as flatnonzero,
+ correlate as correlate,
+ convolve as convolve,
+ outer as outer,
+ tensordot as tensordot,
+ roll as roll,
+ rollaxis as rollaxis,
+ moveaxis as moveaxis,
+ cross as cross,
+ indices as indices,
+ fromfunction as fromfunction,
+ isscalar as isscalar,
+ binary_repr as binary_repr,
+ base_repr as base_repr,
+ identity as identity,
+ allclose as allclose,
+ isclose as isclose,
+ array_equal as array_equal,
+ array_equiv as array_equiv,
+)
+
+from numpy.core.numerictypes import (
+ maximum_sctype as maximum_sctype,
+ issctype as issctype,
+ obj2sctype as obj2sctype,
+ issubclass_ as issubclass_,
+ issubsctype as issubsctype,
+ issubdtype as issubdtype,
+ sctype2char as sctype2char,
+ find_common_type as find_common_type,
+ nbytes as nbytes,
+ cast as cast,
+ ScalarType as ScalarType,
+ typecodes as typecodes,
+)
+
+from numpy.core.shape_base import (
+ atleast_1d as atleast_1d,
+ atleast_2d as atleast_2d,
+ atleast_3d as atleast_3d,
+ block as block,
+ hstack as hstack,
+ stack as stack,
+ vstack as vstack,
+)
+
+from numpy.lib import (
+ emath as emath,
+)
+
+from numpy.lib.arraypad import (
+ pad as pad,
+)
+
+from numpy.lib.arraysetops import (
+ ediff1d as ediff1d,
+ intersect1d as intersect1d,
+ setxor1d as setxor1d,
+ union1d as union1d,
+ setdiff1d as setdiff1d,
+ unique as unique,
+ in1d as in1d,
+ isin as isin,
+)
+
+from numpy.lib.arrayterator import (
+ Arrayterator as Arrayterator,
+)
+
+from numpy.lib.function_base import (
+ select as select,
+ piecewise as piecewise,
+ trim_zeros as trim_zeros,
+ copy as copy,
+ iterable as iterable,
+ percentile as percentile,
+ diff as diff,
+ gradient as gradient,
+ angle as angle,
+ unwrap as unwrap,
+ sort_complex as sort_complex,
+ disp as disp,
+ flip as flip,
+ rot90 as rot90,
+ extract as extract,
+ place as place,
+ asarray_chkfinite as asarray_chkfinite,
+ average as average,
+ bincount as bincount,
+ digitize as digitize,
+ cov as cov,
+ corrcoef as corrcoef,
+ median as median,
+ sinc as sinc,
+ hamming as hamming,
+ hanning as hanning,
+ bartlett as bartlett,
+ blackman as blackman,
+ kaiser as kaiser,
+ trapz as trapz,
+ i0 as i0,
+ add_newdoc as add_newdoc,
+ add_docstring as add_docstring,
+ meshgrid as meshgrid,
+ delete as delete,
+ insert as insert,
+ append as append,
+ interp as interp,
+ add_newdoc_ufunc as add_newdoc_ufunc,
+ quantile as quantile,
+)
+
+from numpy.lib.histograms import (
+ histogram_bin_edges as histogram_bin_edges,
+ histogram as histogram,
+ histogramdd as histogramdd,
+)
+
+from numpy.lib.index_tricks import (
+ ravel_multi_index as ravel_multi_index,
+ unravel_index as unravel_index,
+ mgrid as mgrid,
+ ogrid as ogrid,
+ r_ as r_,
+ c_ as c_,
+ s_ as s_,
+ index_exp as index_exp,
+ ix_ as ix_,
+ fill_diagonal as fill_diagonal,
+ diag_indices as diag_indices,
+ diag_indices_from as diag_indices_from,
+)
+
+from numpy.lib.nanfunctions import (
+ nansum as nansum,
+ nanmax as nanmax,
+ nanmin as nanmin,
+ nanargmax as nanargmax,
+ nanargmin as nanargmin,
+ nanmean as nanmean,
+ nanmedian as nanmedian,
+ nanpercentile as nanpercentile,
+ nanvar as nanvar,
+ nanstd as nanstd,
+ nanprod as nanprod,
+ nancumsum as nancumsum,
+ nancumprod as nancumprod,
+ nanquantile as nanquantile,
+)
+
+from numpy.lib.npyio import (
+ savetxt as savetxt,
+ loadtxt as loadtxt,
+ genfromtxt as genfromtxt,
+ recfromtxt as recfromtxt,
+ recfromcsv as recfromcsv,
+ load as load,
+ save as save,
+ savez as savez,
+ savez_compressed as savez_compressed,
+ packbits as packbits,
+ unpackbits as unpackbits,
+ fromregex as fromregex,
+)
+
+from numpy.lib.polynomial import (
+ poly as poly,
+ roots as roots,
+ polyint as polyint,
+ polyder as polyder,
+ polyadd as polyadd,
+ polysub as polysub,
+ polymul as polymul,
+ polydiv as polydiv,
+ polyval as polyval,
+ polyfit as polyfit,
+)
+
+from numpy.lib.shape_base import (
+ column_stack as column_stack,
+ row_stack as row_stack,
+ dstack as dstack,
+ array_split as array_split,
+ split as split,
+ hsplit as hsplit,
+ vsplit as vsplit,
+ dsplit as dsplit,
+ apply_over_axes as apply_over_axes,
+ expand_dims as expand_dims,
+ apply_along_axis as apply_along_axis,
+ kron as kron,
+ tile as tile,
+ get_array_wrap as get_array_wrap,
+ take_along_axis as take_along_axis,
+ put_along_axis as put_along_axis,
+)
+
+from numpy.lib.stride_tricks import (
+ broadcast_to as broadcast_to,
+ broadcast_arrays as broadcast_arrays,
+ broadcast_shapes as broadcast_shapes,
+)
+
+from numpy.lib.twodim_base import (
+ diag as diag,
+ diagflat as diagflat,
+ eye as eye,
+ fliplr as fliplr,
+ flipud as flipud,
+ tri as tri,
+ triu as triu,
+ tril as tril,
+ vander as vander,
+ histogram2d as histogram2d,
+ mask_indices as mask_indices,
+ tril_indices as tril_indices,
+ tril_indices_from as tril_indices_from,
+ triu_indices as triu_indices,
+ triu_indices_from as triu_indices_from,
+)
+
+from numpy.lib.type_check import (
+ mintypecode as mintypecode,
+ asfarray as asfarray,
+ real as real,
+ imag as imag,
+ iscomplex as iscomplex,
+ isreal as isreal,
+ iscomplexobj as iscomplexobj,
+ isrealobj as isrealobj,
+ nan_to_num as nan_to_num,
+ real_if_close as real_if_close,
+ typename as typename,
+ common_type as common_type,
+)
+
+from numpy.lib.ufunclike import (
+ fix as fix,
+ isposinf as isposinf,
+ isneginf as isneginf,
+)
+
+from numpy.lib.utils import (
+ issubclass_ as issubclass_,
+ issubsctype as issubsctype,
+ issubdtype as issubdtype,
+ deprecate as deprecate,
+ deprecate_with_doc as deprecate_with_doc,
+ get_include as get_include,
+ info as info,
+ source as source,
+ who as who,
+ lookfor as lookfor,
+ byte_bounds as byte_bounds,
+ safe_eval as safe_eval,
+ show_runtime as show_runtime,
+)
+
+from numpy.matrixlib import (
+ asmatrix as asmatrix,
+ mat as mat,
+ bmat as bmat,
+)
+
+_AnyStr_contra = TypeVar("_AnyStr_contra", str, bytes, contravariant=True)
+
+# Protocol for representing file-like-objects accepted
+# by `ndarray.tofile` and `fromfile`
+class _IOProtocol(Protocol):
+ def flush(self) -> object: ...
+ def fileno(self) -> int: ...
+ def tell(self) -> SupportsIndex: ...
+ def seek(self, offset: int, whence: int, /) -> object: ...
+
+# NOTE: `seek`, `write` and `flush` are technically only required
+# for `readwrite`/`write` modes
+class _MemMapIOProtocol(Protocol):
+ def flush(self) -> object: ...
+ def fileno(self) -> SupportsIndex: ...
+ def tell(self) -> int: ...
+ def seek(self, offset: int, whence: int, /) -> object: ...
+ def write(self, s: bytes, /) -> object: ...
+ @property
+ def read(self) -> object: ...
+
+class _SupportsWrite(Protocol[_AnyStr_contra]):
+ def write(self, s: _AnyStr_contra, /) -> object: ...
+
+__all__: list[str]
+__path__: list[str]
+__version__: str
+__git_version__: str
+test: PytestTester
+
+# TODO: Move placeholders to their respective module once
+# their annotations are properly implemented
+#
+# Placeholders for classes
+
+# Some of these are aliases; others are wrappers with an identical signature
+round = around
+round_ = around
+max = amax
+min = amin
+product = prod
+cumproduct = cumprod
+sometrue = any
+alltrue = all
+
+def show_config() -> None: ...
+
+_NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray)
+_DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic)
+_ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I"]
+
+@final
+class dtype(Generic[_DTypeScalar_co]):
+ names: None | tuple[builtins.str, ...]
+ # Overload for subclass of generic
+ @overload
+ def __new__(
+ cls,
+ dtype: type[_DTypeScalar_co],
+ align: bool = ...,
+ copy: bool = ...,
+ ) -> dtype[_DTypeScalar_co]: ...
+ # Overloads for string aliases, Python types, and some assorted
+ # other special cases. Order is sometimes important because of the
+ # subtype relationships
+ #
+ # bool < int < float < complex < object
+ #
+ # so we have to make sure the overloads for the narrowest type is
+ # first.
+ # Builtin types
+ @overload
+ def __new__(cls, dtype: type[bool], align: bool = ..., copy: bool = ...) -> dtype[bool_]: ...
+ @overload
+ def __new__(cls, dtype: type[int], align: bool = ..., copy: bool = ...) -> dtype[int_]: ...
+ @overload
+ def __new__(cls, dtype: None | type[float], align: bool = ..., copy: bool = ...) -> dtype[float_]: ...
+ @overload
+ def __new__(cls, dtype: type[complex], align: bool = ..., copy: bool = ...) -> dtype[complex_]: ...
+ @overload
+ def __new__(cls, dtype: type[builtins.str], align: bool = ..., copy: bool = ...) -> dtype[str_]: ...
+ @overload
+ def __new__(cls, dtype: type[bytes], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ...
+
+ # `unsignedinteger` string-based representations and ctypes
+ @overload
+ def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: bool = ..., copy: bool = ...) -> dtype[uint8]: ...
+ @overload
+ def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: bool = ..., copy: bool = ...) -> dtype[uint16]: ...
+ @overload
+ def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: bool = ..., copy: bool = ...) -> dtype[uint32]: ...
+ @overload
+ def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: bool = ..., copy: bool = ...) -> dtype[uint64]: ...
+ @overload
+ def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: bool = ..., copy: bool = ...) -> dtype[ubyte]: ...
+ @overload
+ def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: bool = ..., copy: bool = ...) -> dtype[ushort]: ...
+ @overload
+ def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: bool = ..., copy: bool = ...) -> dtype[uintc]: ...
+
+ # NOTE: We're assuming here that `uint_ptr_t == size_t`,
+ # an assumption that does not hold in rare cases (same for `ssize_t`)
+ @overload
+ def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: bool = ..., copy: bool = ...) -> dtype[uintp]: ...
+ @overload
+ def __new__(cls, dtype: _UIntCodes | type[ct.c_ulong], align: bool = ..., copy: bool = ...) -> dtype[uint]: ...
+ @overload
+ def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: bool = ..., copy: bool = ...) -> dtype[ulonglong]: ...
+
+ # `signedinteger` string-based representations and ctypes
+ @overload
+ def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: bool = ..., copy: bool = ...) -> dtype[int8]: ...
+ @overload
+ def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: bool = ..., copy: bool = ...) -> dtype[int16]: ...
+ @overload
+ def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: bool = ..., copy: bool = ...) -> dtype[int32]: ...
+ @overload
+ def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: bool = ..., copy: bool = ...) -> dtype[int64]: ...
+ @overload
+ def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: bool = ..., copy: bool = ...) -> dtype[byte]: ...
+ @overload
+ def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: bool = ..., copy: bool = ...) -> dtype[short]: ...
+ @overload
+ def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: bool = ..., copy: bool = ...) -> dtype[intc]: ...
+ @overload
+ def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: bool = ..., copy: bool = ...) -> dtype[intp]: ...
+ @overload
+ def __new__(cls, dtype: _IntCodes | type[ct.c_long], align: bool = ..., copy: bool = ...) -> dtype[int_]: ...
+ @overload
+ def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: bool = ..., copy: bool = ...) -> dtype[longlong]: ...
+
+ # `floating` string-based representations and ctypes
+ @overload
+ def __new__(cls, dtype: _Float16Codes, align: bool = ..., copy: bool = ...) -> dtype[float16]: ...
+ @overload
+ def __new__(cls, dtype: _Float32Codes, align: bool = ..., copy: bool = ...) -> dtype[float32]: ...
+ @overload
+ def __new__(cls, dtype: _Float64Codes, align: bool = ..., copy: bool = ...) -> dtype[float64]: ...
+ @overload
+ def __new__(cls, dtype: _HalfCodes, align: bool = ..., copy: bool = ...) -> dtype[half]: ...
+ @overload
+ def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: bool = ..., copy: bool = ...) -> dtype[single]: ...
+ @overload
+ def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: bool = ..., copy: bool = ...) -> dtype[double]: ...
+ @overload
+ def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: bool = ..., copy: bool = ...) -> dtype[longdouble]: ...
+
+ # `complexfloating` string-based representations
+ @overload
+ def __new__(cls, dtype: _Complex64Codes, align: bool = ..., copy: bool = ...) -> dtype[complex64]: ...
+ @overload
+ def __new__(cls, dtype: _Complex128Codes, align: bool = ..., copy: bool = ...) -> dtype[complex128]: ...
+ @overload
+ def __new__(cls, dtype: _CSingleCodes, align: bool = ..., copy: bool = ...) -> dtype[csingle]: ...
+ @overload
+ def __new__(cls, dtype: _CDoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[cdouble]: ...
+ @overload
+ def __new__(cls, dtype: _CLongDoubleCodes, align: bool = ..., copy: bool = ...) -> dtype[clongdouble]: ...
+
+ # Miscellaneous string-based representations and ctypes
+ @overload
+ def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: bool = ..., copy: bool = ...) -> dtype[bool_]: ...
+ @overload
+ def __new__(cls, dtype: _TD64Codes, align: bool = ..., copy: bool = ...) -> dtype[timedelta64]: ...
+ @overload
+ def __new__(cls, dtype: _DT64Codes, align: bool = ..., copy: bool = ...) -> dtype[datetime64]: ...
+ @overload
+ def __new__(cls, dtype: _StrCodes, align: bool = ..., copy: bool = ...) -> dtype[str_]: ...
+ @overload
+ def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: bool = ..., copy: bool = ...) -> dtype[bytes_]: ...
+ @overload
+ def __new__(cls, dtype: _VoidCodes, align: bool = ..., copy: bool = ...) -> dtype[void]: ...
+ @overload
+ def __new__(cls, dtype: _ObjectCodes | type[ct.py_object], align: bool = ..., copy: bool = ...) -> dtype[object_]: ...
+
+ # dtype of a dtype is the same dtype
+ @overload
+ def __new__(
+ cls,
+ dtype: dtype[_DTypeScalar_co],
+ align: bool = ...,
+ copy: bool = ...,
+ ) -> dtype[_DTypeScalar_co]: ...
+ @overload
+ def __new__(
+ cls,
+ dtype: _SupportsDType[dtype[_DTypeScalar_co]],
+ align: bool = ...,
+ copy: bool = ...,
+ ) -> dtype[_DTypeScalar_co]: ...
+ # Handle strings that can't be expressed as literals; i.e. s1, s2, ...
+ @overload
+ def __new__(
+ cls,
+ dtype: builtins.str,
+ align: bool = ...,
+ copy: bool = ...,
+ ) -> dtype[Any]: ...
+ # Catchall overload for void-likes
+ @overload
+ def __new__(
+ cls,
+ dtype: _VoidDTypeLike,
+ align: bool = ...,
+ copy: bool = ...,
+ ) -> dtype[void]: ...
+ # Catchall overload for object-likes
+ @overload
+ def __new__(
+ cls,
+ dtype: type[object],
+ align: bool = ...,
+ copy: bool = ...,
+ ) -> dtype[object_]: ...
+
+ if sys.version_info >= (3, 9):
+ def __class_getitem__(self, item: Any) -> GenericAlias: ...
+
+ @overload
+ def __getitem__(self: dtype[void], key: list[builtins.str]) -> dtype[void]: ...
+ @overload
+ def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex) -> dtype[Any]: ...
+
+ # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes
+ @overload
+ def __mul__(self: _DType, value: L[1]) -> _DType: ...
+ @overload
+ def __mul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ...
+ @overload
+ def __mul__(self, value: SupportsIndex) -> dtype[void]: ...
+
+ # NOTE: `__rmul__` seems to be broken when used in combination with
+ # literals as of mypy 0.902. Set the return-type to `dtype[Any]` for
+ # now for non-flexible dtypes.
+ @overload
+ def __rmul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ...
+ @overload
+ def __rmul__(self, value: SupportsIndex) -> dtype[Any]: ...
+
+ def __gt__(self, other: DTypeLike) -> bool: ...
+ def __ge__(self, other: DTypeLike) -> bool: ...
+ def __lt__(self, other: DTypeLike) -> bool: ...
+ def __le__(self, other: DTypeLike) -> bool: ...
+
+ # Explicitly defined `__eq__` and `__ne__` to get around mypy's
+ # `strict_equality` option; even though their signatures are
+ # identical to their `object`-based counterpart
+ def __eq__(self, other: Any) -> bool: ...
+ def __ne__(self, other: Any) -> bool: ...
+
+ @property
+ def alignment(self) -> int: ...
+ @property
+ def base(self) -> dtype[Any]: ...
+ @property
+ def byteorder(self) -> builtins.str: ...
+ @property
+ def char(self) -> builtins.str: ...
+ @property
+ def descr(self) -> list[tuple[builtins.str, builtins.str] | tuple[builtins.str, builtins.str, _Shape]]: ...
+ @property
+ def fields(
+ self,
+ ) -> None | MappingProxyType[builtins.str, tuple[dtype[Any], int] | tuple[dtype[Any], int, Any]]: ...
+ @property
+ def flags(self) -> int: ...
+ @property
+ def hasobject(self) -> bool: ...
+ @property
+ def isbuiltin(self) -> int: ...
+ @property
+ def isnative(self) -> bool: ...
+ @property
+ def isalignedstruct(self) -> bool: ...
+ @property
+ def itemsize(self) -> int: ...
+ @property
+ def kind(self) -> builtins.str: ...
+ @property
+ def metadata(self) -> None | MappingProxyType[builtins.str, Any]: ...
+ @property
+ def name(self) -> builtins.str: ...
+ @property
+ def num(self) -> int: ...
+ @property
+ def shape(self) -> _Shape: ...
+ @property
+ def ndim(self) -> int: ...
+ @property
+ def subdtype(self) -> None | tuple[dtype[Any], _Shape]: ...
+ def newbyteorder(self: _DType, __new_order: _ByteOrder = ...) -> _DType: ...
+ @property
+ def str(self) -> builtins.str: ...
+ @property
+ def type(self) -> type[_DTypeScalar_co]: ...
+
+_ArrayLikeInt = Union[
+ int,
+ integer,
+ Sequence[Union[int, integer]],
+ Sequence[Sequence[Any]], # TODO: wait for support for recursive types
+ ndarray
+]
+
+_FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter)
+
+@final
+class flatiter(Generic[_NdArraySubClass]):
+ __hash__: ClassVar[None]
+ @property
+ def base(self) -> _NdArraySubClass: ...
+ @property
+ def coords(self) -> _Shape: ...
+ @property
+ def index(self) -> int: ...
+ def copy(self) -> _NdArraySubClass: ...
+ def __iter__(self: _FlatIterSelf) -> _FlatIterSelf: ...
+ def __next__(self: flatiter[ndarray[Any, dtype[_ScalarType]]]) -> _ScalarType: ...
+ def __len__(self) -> int: ...
+ @overload
+ def __getitem__(
+ self: flatiter[ndarray[Any, dtype[_ScalarType]]],
+ key: int | integer | tuple[int | integer],
+ ) -> _ScalarType: ...
+ @overload
+ def __getitem__(
+ self,
+ key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis],
+ ) -> _NdArraySubClass: ...
+ # TODO: `__setitem__` operates via `unsafe` casting rules, and can
+ # thus accept any type accepted by the relevant underlying `np.generic`
+ # constructor.
+ # This means that `value` must in reality be a supertype of `npt.ArrayLike`.
+ def __setitem__(
+ self,
+ key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis],
+ value: Any,
+ ) -> None: ...
+ @overload
+ def __array__(self: flatiter[ndarray[Any, _DType]], dtype: None = ..., /) -> ndarray[Any, _DType]: ...
+ @overload
+ def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ...
+
+_OrderKACF = L[None, "K", "A", "C", "F"]
+_OrderACF = L[None, "A", "C", "F"]
+_OrderCF = L[None, "C", "F"]
+
+_ModeKind = L["raise", "wrap", "clip"]
+_PartitionKind = L["introselect"]
+_SortKind = L["quicksort", "mergesort", "heapsort", "stable"]
+_SortSide = L["left", "right"]
+
+_ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon)
+
+class _ArrayOrScalarCommon:
+ @property
+ def T(self: _ArraySelf) -> _ArraySelf: ...
+ @property
+ def data(self) -> memoryview: ...
+ @property
+ def flags(self) -> flagsobj: ...
+ @property
+ def itemsize(self) -> int: ...
+ @property
+ def nbytes(self) -> int: ...
+ def __bool__(self) -> bool: ...
+ def __bytes__(self) -> bytes: ...
+ def __str__(self) -> str: ...
+ def __repr__(self) -> str: ...
+ def __copy__(self: _ArraySelf) -> _ArraySelf: ...
+ def __deepcopy__(self: _ArraySelf, memo: None | dict[int, Any], /) -> _ArraySelf: ...
+
+ # TODO: How to deal with the non-commutative nature of `==` and `!=`?
+ # xref numpy/numpy#17368
+ def __eq__(self, other: Any) -> Any: ...
+ def __ne__(self, other: Any) -> Any: ...
+ def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ...
+ def dump(self, file: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsWrite[bytes]) -> None: ...
+ def dumps(self) -> bytes: ...
+ def tobytes(self, order: _OrderKACF = ...) -> bytes: ...
+ # NOTE: `tostring()` is deprecated and therefore excluded
+ # def tostring(self, order=...): ...
+ def tofile(
+ self,
+ fid: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _IOProtocol,
+ sep: str = ...,
+ format: str = ...,
+ ) -> None: ...
+ # generics and 0d arrays return builtin scalars
+ def tolist(self) -> Any: ...
+
+ @property
+ def __array_interface__(self) -> dict[str, Any]: ...
+ @property
+ def __array_priority__(self) -> float: ...
+ @property
+ def __array_struct__(self) -> Any: ... # builtins.PyCapsule
+ def __setstate__(self, state: tuple[
+ SupportsIndex, # version
+ _ShapeLike, # Shape
+ _DType_co, # DType
+ bool, # F-continuous
+ bytes | list[Any], # Data
+ ], /) -> None: ...
+ # a `bool_` is returned when `keepdims=True` and `self` is a 0d array
+
+ @overload
+ def all(
+ self,
+ axis: None = ...,
+ out: None = ...,
+ keepdims: L[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+ ) -> bool_: ...
+ @overload
+ def all(
+ self,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+ ) -> Any: ...
+ @overload
+ def all(
+ self,
+ axis: None | _ShapeLike = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def any(
+ self,
+ axis: None = ...,
+ out: None = ...,
+ keepdims: L[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+ ) -> bool_: ...
+ @overload
+ def any(
+ self,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+ ) -> Any: ...
+ @overload
+ def any(
+ self,
+ axis: None | _ShapeLike = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def argmax(
+ self,
+ axis: None = ...,
+ out: None = ...,
+ *,
+ keepdims: L[False] = ...,
+ ) -> intp: ...
+ @overload
+ def argmax(
+ self,
+ axis: SupportsIndex = ...,
+ out: None = ...,
+ *,
+ keepdims: bool = ...,
+ ) -> Any: ...
+ @overload
+ def argmax(
+ self,
+ axis: None | SupportsIndex = ...,
+ out: _NdArraySubClass = ...,
+ *,
+ keepdims: bool = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def argmin(
+ self,
+ axis: None = ...,
+ out: None = ...,
+ *,
+ keepdims: L[False] = ...,
+ ) -> intp: ...
+ @overload
+ def argmin(
+ self,
+ axis: SupportsIndex = ...,
+ out: None = ...,
+ *,
+ keepdims: bool = ...,
+ ) -> Any: ...
+ @overload
+ def argmin(
+ self,
+ axis: None | SupportsIndex = ...,
+ out: _NdArraySubClass = ...,
+ *,
+ keepdims: bool = ...,
+ ) -> _NdArraySubClass: ...
+
+ def argsort(
+ self,
+ axis: None | SupportsIndex = ...,
+ kind: None | _SortKind = ...,
+ order: None | str | Sequence[str] = ...,
+ ) -> ndarray: ...
+
+ @overload
+ def choose(
+ self,
+ choices: ArrayLike,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> ndarray: ...
+ @overload
+ def choose(
+ self,
+ choices: ArrayLike,
+ out: _NdArraySubClass = ...,
+ mode: _ModeKind = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def clip(
+ self,
+ min: ArrayLike = ...,
+ max: None | ArrayLike = ...,
+ out: None = ...,
+ **kwargs: Any,
+ ) -> ndarray: ...
+ @overload
+ def clip(
+ self,
+ min: None = ...,
+ max: ArrayLike = ...,
+ out: None = ...,
+ **kwargs: Any,
+ ) -> ndarray: ...
+ @overload
+ def clip(
+ self,
+ min: ArrayLike = ...,
+ max: None | ArrayLike = ...,
+ out: _NdArraySubClass = ...,
+ **kwargs: Any,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def clip(
+ self,
+ min: None = ...,
+ max: ArrayLike = ...,
+ out: _NdArraySubClass = ...,
+ **kwargs: Any,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def compress(
+ self,
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ ) -> ndarray: ...
+ @overload
+ def compress(
+ self,
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ out: _NdArraySubClass = ...,
+ ) -> _NdArraySubClass: ...
+
+ def conj(self: _ArraySelf) -> _ArraySelf: ...
+
+ def conjugate(self: _ArraySelf) -> _ArraySelf: ...
+
+ @overload
+ def cumprod(
+ self,
+ axis: None | SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ ) -> ndarray: ...
+ @overload
+ def cumprod(
+ self,
+ axis: None | SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: _NdArraySubClass = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def cumsum(
+ self,
+ axis: None | SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ ) -> ndarray: ...
+ @overload
+ def cumsum(
+ self,
+ axis: None | SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: _NdArraySubClass = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def max(
+ self,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+ ) -> Any: ...
+ @overload
+ def max(
+ self,
+ axis: None | _ShapeLike = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def mean(
+ self,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+ ) -> Any: ...
+ @overload
+ def mean(
+ self,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def min(
+ self,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+ ) -> Any: ...
+ @overload
+ def min(
+ self,
+ axis: None | _ShapeLike = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+ ) -> _NdArraySubClass: ...
+
+ def newbyteorder(
+ self: _ArraySelf,
+ __new_order: _ByteOrder = ...,
+ ) -> _ArraySelf: ...
+
+ @overload
+ def prod(
+ self,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+ ) -> Any: ...
+ @overload
+ def prod(
+ self,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def ptp(
+ self,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ ) -> Any: ...
+ @overload
+ def ptp(
+ self,
+ axis: None | _ShapeLike = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def round(
+ self: _ArraySelf,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+ ) -> _ArraySelf: ...
+ @overload
+ def round(
+ self,
+ decimals: SupportsIndex = ...,
+ out: _NdArraySubClass = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def std(
+ self,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+ ) -> Any: ...
+ @overload
+ def std(
+ self,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: _NdArraySubClass = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def sum(
+ self,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+ ) -> Any: ...
+ @overload
+ def sum(
+ self,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def var(
+ self,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+ ) -> Any: ...
+ @overload
+ def var(
+ self,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: _NdArraySubClass = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+ ) -> _NdArraySubClass: ...
+
+_DType = TypeVar("_DType", bound=dtype[Any])
+_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any])
+_FlexDType = TypeVar("_FlexDType", bound=dtype[flexible])
+
+# TODO: Set the `bound` to something more suitable once we
+# have proper shape support
+_ShapeType = TypeVar("_ShapeType", bound=Any)
+_ShapeType2 = TypeVar("_ShapeType2", bound=Any)
+_NumberType = TypeVar("_NumberType", bound=number[Any])
+
+# There is currently no exhaustive way to type the buffer protocol,
+# as it is implemented exclusively in the C API (python/typing#593)
+_SupportsBuffer = Union[
+ bytes,
+ bytearray,
+ memoryview,
+ _array.array[Any],
+ mmap.mmap,
+ NDArray[Any],
+ generic,
+]
+
+_T = TypeVar("_T")
+_T_co = TypeVar("_T_co", covariant=True)
+_T_contra = TypeVar("_T_contra", contravariant=True)
+_2Tuple = tuple[_T, _T]
+_CastingKind = L["no", "equiv", "safe", "same_kind", "unsafe"]
+
+_ArrayUInt_co = NDArray[Union[bool_, unsignedinteger[Any]]]
+_ArrayInt_co = NDArray[Union[bool_, integer[Any]]]
+_ArrayFloat_co = NDArray[Union[bool_, integer[Any], floating[Any]]]
+_ArrayComplex_co = NDArray[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]]
+_ArrayNumber_co = NDArray[Union[bool_, number[Any]]]
+_ArrayTD64_co = NDArray[Union[bool_, integer[Any], timedelta64]]
+
+# Introduce an alias for `dtype` to avoid naming conflicts.
+_dtype = dtype
+
+# `builtins.PyCapsule` unfortunately lacks annotations as of the moment;
+# use `Any` as a stopgap measure
+_PyCapsule = Any
+
+class _SupportsItem(Protocol[_T_co]):
+ def item(self, args: Any, /) -> _T_co: ...
+
+class _SupportsReal(Protocol[_T_co]):
+ @property
+ def real(self) -> _T_co: ...
+
+class _SupportsImag(Protocol[_T_co]):
+ @property
+ def imag(self) -> _T_co: ...
+
+class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
+ __hash__: ClassVar[None]
+ @property
+ def base(self) -> None | ndarray: ...
+ @property
+ def ndim(self) -> int: ...
+ @property
+ def size(self) -> int: ...
+ @property
+ def real(
+ self: ndarray[_ShapeType, dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var]
+ ) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ...
+ @real.setter
+ def real(self, value: ArrayLike) -> None: ...
+ @property
+ def imag(
+ self: ndarray[_ShapeType, dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var]
+ ) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ...
+ @imag.setter
+ def imag(self, value: ArrayLike) -> None: ...
+ def __new__(
+ cls: type[_ArraySelf],
+ shape: _ShapeLike,
+ dtype: DTypeLike = ...,
+ buffer: None | _SupportsBuffer = ...,
+ offset: SupportsIndex = ...,
+ strides: None | _ShapeLike = ...,
+ order: _OrderKACF = ...,
+ ) -> _ArraySelf: ...
+
+ if sys.version_info >= (3, 9):
+ def __class_getitem__(self, item: Any) -> GenericAlias: ...
+
+ @overload
+ def __array__(self, dtype: None = ..., /) -> ndarray[Any, _DType_co]: ...
+ @overload
+ def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ...
+
+ def __array_ufunc__(
+ self,
+ ufunc: ufunc,
+ method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"],
+ *inputs: Any,
+ **kwargs: Any,
+ ) -> Any: ...
+
+ def __array_function__(
+ self,
+ func: Callable[..., Any],
+ types: Iterable[type],
+ args: Iterable[Any],
+ kwargs: Mapping[str, Any],
+ ) -> Any: ...
+
+ # NOTE: In practice any object is accepted by `obj`, but as `__array_finalize__`
+ # is a pseudo-abstract method the type has been narrowed down in order to
+ # grant subclasses a bit more flexiblity
+ def __array_finalize__(self, obj: None | NDArray[Any], /) -> None: ...
+
+ def __array_wrap__(
+ self,
+ array: ndarray[_ShapeType2, _DType],
+ context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
+ /,
+ ) -> ndarray[_ShapeType2, _DType]: ...
+
+ def __array_prepare__(
+ self,
+ array: ndarray[_ShapeType2, _DType],
+ context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
+ /,
+ ) -> ndarray[_ShapeType2, _DType]: ...
+
+ @overload
+ def __getitem__(self, key: (
+ NDArray[integer[Any]]
+ | NDArray[bool_]
+ | tuple[NDArray[integer[Any]] | NDArray[bool_], ...]
+ )) -> ndarray[Any, _DType_co]: ...
+ @overload
+ def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...]) -> Any: ...
+ @overload
+ def __getitem__(self, key: (
+ None
+ | slice
+ | ellipsis
+ | SupportsIndex
+ | _ArrayLikeInt_co
+ | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...]
+ )) -> ndarray[Any, _DType_co]: ...
+ @overload
+ def __getitem__(self: NDArray[void], key: str) -> NDArray[Any]: ...
+ @overload
+ def __getitem__(self: NDArray[void], key: list[str]) -> ndarray[_ShapeType, _dtype[void]]: ...
+
+ @property
+ def ctypes(self) -> _ctypes[int]: ...
+ @property
+ def shape(self) -> _Shape: ...
+ @shape.setter
+ def shape(self, value: _ShapeLike) -> None: ...
+ @property
+ def strides(self) -> _Shape: ...
+ @strides.setter
+ def strides(self, value: _ShapeLike) -> None: ...
+ def byteswap(self: _ArraySelf, inplace: bool = ...) -> _ArraySelf: ...
+ def fill(self, value: Any) -> None: ...
+ @property
+ def flat(self: _NdArraySubClass) -> flatiter[_NdArraySubClass]: ...
+
+ # Use the same output type as that of the underlying `generic`
+ @overload
+ def item(
+ self: ndarray[Any, _dtype[_SupportsItem[_T]]], # type: ignore[type-var]
+ *args: SupportsIndex,
+ ) -> _T: ...
+ @overload
+ def item(
+ self: ndarray[Any, _dtype[_SupportsItem[_T]]], # type: ignore[type-var]
+ args: tuple[SupportsIndex, ...],
+ /,
+ ) -> _T: ...
+
+ @overload
+ def itemset(self, value: Any, /) -> None: ...
+ @overload
+ def itemset(self, item: _ShapeLike, value: Any, /) -> None: ...
+
+ @overload
+ def resize(self, new_shape: _ShapeLike, /, *, refcheck: bool = ...) -> None: ...
+ @overload
+ def resize(self, *new_shape: SupportsIndex, refcheck: bool = ...) -> None: ...
+
+ def setflags(
+ self, write: bool = ..., align: bool = ..., uic: bool = ...
+ ) -> None: ...
+
+ def squeeze(
+ self,
+ axis: None | SupportsIndex | tuple[SupportsIndex, ...] = ...,
+ ) -> ndarray[Any, _DType_co]: ...
+
+ def swapaxes(
+ self,
+ axis1: SupportsIndex,
+ axis2: SupportsIndex,
+ ) -> ndarray[Any, _DType_co]: ...
+
+ @overload
+ def transpose(self: _ArraySelf, axes: None | _ShapeLike, /) -> _ArraySelf: ...
+ @overload
+ def transpose(self: _ArraySelf, *axes: SupportsIndex) -> _ArraySelf: ...
+
+ def argpartition(
+ self,
+ kth: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ kind: _PartitionKind = ...,
+ order: None | str | Sequence[str] = ...,
+ ) -> ndarray[Any, _dtype[intp]]: ...
+
+ def diagonal(
+ self,
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ...,
+ ) -> ndarray[Any, _DType_co]: ...
+
+ # 1D + 1D returns a scalar;
+ # all other with at least 1 non-0D array return an ndarray.
+ @overload
+ def dot(self, b: _ScalarLike_co, out: None = ...) -> ndarray: ...
+ @overload
+ def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc]
+ @overload
+ def dot(self, b: ArrayLike, out: _NdArraySubClass) -> _NdArraySubClass: ...
+
+ # `nonzero()` is deprecated for 0d arrays/generics
+ def nonzero(self) -> tuple[ndarray[Any, _dtype[intp]], ...]: ...
+
+ def partition(
+ self,
+ kth: _ArrayLikeInt_co,
+ axis: SupportsIndex = ...,
+ kind: _PartitionKind = ...,
+ order: None | str | Sequence[str] = ...,
+ ) -> None: ...
+
+ # `put` is technically available to `generic`,
+ # but is pointless as `generic`s are immutable
+ def put(
+ self,
+ ind: _ArrayLikeInt_co,
+ v: ArrayLike,
+ mode: _ModeKind = ...,
+ ) -> None: ...
+
+ @overload
+ def searchsorted( # type: ignore[misc]
+ self, # >= 1D array
+ v: _ScalarLike_co, # 0D array-like
+ side: _SortSide = ...,
+ sorter: None | _ArrayLikeInt_co = ...,
+ ) -> intp: ...
+ @overload
+ def searchsorted(
+ self, # >= 1D array
+ v: ArrayLike,
+ side: _SortSide = ...,
+ sorter: None | _ArrayLikeInt_co = ...,
+ ) -> ndarray[Any, _dtype[intp]]: ...
+
+ def setfield(
+ self,
+ val: ArrayLike,
+ dtype: DTypeLike,
+ offset: SupportsIndex = ...,
+ ) -> None: ...
+
+ def sort(
+ self,
+ axis: SupportsIndex = ...,
+ kind: None | _SortKind = ...,
+ order: None | str | Sequence[str] = ...,
+ ) -> None: ...
+
+ @overload
+ def trace(
+ self, # >= 2D array
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ ) -> Any: ...
+ @overload
+ def trace(
+ self, # >= 2D array
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: _NdArraySubClass = ...,
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def take( # type: ignore[misc]
+ self: ndarray[Any, _dtype[_ScalarType]],
+ indices: _IntLike_co,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> _ScalarType: ...
+ @overload
+ def take( # type: ignore[misc]
+ self,
+ indices: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> ndarray[Any, _DType_co]: ...
+ @overload
+ def take(
+ self,
+ indices: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ out: _NdArraySubClass = ...,
+ mode: _ModeKind = ...,
+ ) -> _NdArraySubClass: ...
+
+ def repeat(
+ self,
+ repeats: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ ) -> ndarray[Any, _DType_co]: ...
+
+ def flatten(
+ self,
+ order: _OrderKACF = ...,
+ ) -> ndarray[Any, _DType_co]: ...
+
+ def ravel(
+ self,
+ order: _OrderKACF = ...,
+ ) -> ndarray[Any, _DType_co]: ...
+
+ @overload
+ def reshape(
+ self, shape: _ShapeLike, /, *, order: _OrderACF = ...
+ ) -> ndarray[Any, _DType_co]: ...
+ @overload
+ def reshape(
+ self, *shape: SupportsIndex, order: _OrderACF = ...
+ ) -> ndarray[Any, _DType_co]: ...
+
+ @overload
+ def astype(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ order: _OrderKACF = ...,
+ casting: _CastingKind = ...,
+ subok: bool = ...,
+ copy: bool | _CopyMode = ...,
+ ) -> NDArray[_ScalarType]: ...
+ @overload
+ def astype(
+ self,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ casting: _CastingKind = ...,
+ subok: bool = ...,
+ copy: bool | _CopyMode = ...,
+ ) -> NDArray[Any]: ...
+
+ @overload
+ def view(self: _ArraySelf) -> _ArraySelf: ...
+ @overload
+ def view(self, type: type[_NdArraySubClass]) -> _NdArraySubClass: ...
+ @overload
+ def view(self, dtype: _DTypeLike[_ScalarType]) -> NDArray[_ScalarType]: ...
+ @overload
+ def view(self, dtype: DTypeLike) -> NDArray[Any]: ...
+ @overload
+ def view(
+ self,
+ dtype: DTypeLike,
+ type: type[_NdArraySubClass],
+ ) -> _NdArraySubClass: ...
+
+ @overload
+ def getfield(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ offset: SupportsIndex = ...
+ ) -> NDArray[_ScalarType]: ...
+ @overload
+ def getfield(
+ self,
+ dtype: DTypeLike,
+ offset: SupportsIndex = ...
+ ) -> NDArray[Any]: ...
+
+ # Dispatch to the underlying `generic` via protocols
+ def __int__(
+ self: ndarray[Any, _dtype[SupportsInt]], # type: ignore[type-var]
+ ) -> int: ...
+
+ def __float__(
+ self: ndarray[Any, _dtype[SupportsFloat]], # type: ignore[type-var]
+ ) -> float: ...
+
+ def __complex__(
+ self: ndarray[Any, _dtype[SupportsComplex]], # type: ignore[type-var]
+ ) -> complex: ...
+
+ def __index__(
+ self: ndarray[Any, _dtype[SupportsIndex]], # type: ignore[type-var]
+ ) -> int: ...
+
+ def __len__(self) -> int: ...
+ def __setitem__(self, key, value): ...
+ def __iter__(self) -> Any: ...
+ def __contains__(self, key) -> bool: ...
+
+ # The last overload is for catching recursive objects whose
+ # nesting is too deep.
+ # The first overload is for catching `bytes` (as they are a subtype of
+ # `Sequence[int]`) and `str`. As `str` is a recursive sequence of
+ # strings, it will pass through the final overload otherwise
+
+ @overload
+ def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ...
+ @overload
+ def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ...
+ @overload
+ def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ...
+ @overload
+ def __lt__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ...
+ @overload
+ def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ...
+
+ @overload
+ def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ...
+ @overload
+ def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ...
+ @overload
+ def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ...
+ @overload
+ def __le__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ...
+ @overload
+ def __le__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ...
+
+ @overload
+ def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ...
+ @overload
+ def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ...
+ @overload
+ def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ...
+ @overload
+ def __gt__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ...
+ @overload
+ def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ...
+
+ @overload
+ def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[bool_]: ...
+ @overload
+ def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[bool_]: ...
+ @overload
+ def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[bool_]: ...
+ @overload
+ def __ge__(self: NDArray[object_], other: Any) -> NDArray[bool_]: ...
+ @overload
+ def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[bool_]: ...
+
+ # Unary ops
+ @overload
+ def __abs__(self: NDArray[bool_]) -> NDArray[bool_]: ...
+ @overload
+ def __abs__(self: NDArray[complexfloating[_NBit1, _NBit1]]) -> NDArray[floating[_NBit1]]: ...
+ @overload
+ def __abs__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ...
+ @overload
+ def __abs__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ...
+ @overload
+ def __abs__(self: NDArray[object_]) -> Any: ...
+
+ @overload
+ def __invert__(self: NDArray[bool_]) -> NDArray[bool_]: ...
+ @overload
+ def __invert__(self: NDArray[_IntType]) -> NDArray[_IntType]: ...
+ @overload
+ def __invert__(self: NDArray[object_]) -> Any: ...
+
+ @overload
+ def __pos__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ...
+ @overload
+ def __pos__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ...
+ @overload
+ def __pos__(self: NDArray[object_]) -> Any: ...
+
+ @overload
+ def __neg__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ...
+ @overload
+ def __neg__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ...
+ @overload
+ def __neg__(self: NDArray[object_]) -> Any: ...
+
+ # Binary ops
+ # NOTE: `ndarray` does not implement `__imatmul__`
+ @overload
+ def __matmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
+ @overload
+ def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+ @overload
+ def __matmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
+ def __matmul__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __rmatmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
+ @overload
+ def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+ @overload
+ def __rmatmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
+ def __rmatmul__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __mod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __mod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ...
+ @overload
+ def __mod__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __rmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ...
+ @overload
+ def __rmod__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __divmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc]
+ @overload
+ def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc]
+ @overload
+ def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc]
+ @overload
+ def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc]
+ @overload
+ def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ...
+
+ @overload
+ def __rdivmod__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc]
+ @overload
+ def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc]
+ @overload
+ def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc]
+ @overload
+ def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc]
+ @overload
+ def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ...
+
+ @overload
+ def __add__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
+ @overload
+ def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
+ @overload
+ def __add__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
+ def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc]
+ @overload
+ def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ...
+ @overload
+ def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ...
+ @overload
+ def __add__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __add__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __radd__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
+ @overload
+ def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
+ @overload
+ def __radd__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
+ def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc]
+ @overload
+ def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ...
+ @overload
+ def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ...
+ @overload
+ def __radd__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __sub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ...
+ @overload
+ def __sub__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NoReturn: ...
+ @overload
+ def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
+ @overload
+ def __sub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
+ def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc]
+ @overload
+ def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ...
+ @overload
+ def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __sub__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __rsub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ...
+ @overload
+ def __rsub__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NoReturn: ...
+ @overload
+ def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
+ @overload
+ def __rsub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
+ def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc]
+ @overload
+ def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... # type: ignore[misc]
+ @overload
+ def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __rsub__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __mul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
+ @overload
+ def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
+ @overload
+ def __mul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
+ def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __mul__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __rmul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
+ @overload
+ def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
+ @overload
+ def __rmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
+ def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __rmul__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __floordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __floordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ...
+ @overload
+ def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ...
+ @overload
+ def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __floordiv__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rfloordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ...
+ @overload
+ def __rfloordiv__(self: NDArray[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ...
+ @overload
+ def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __rfloordiv__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __pow__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+ @overload
+ def __pow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
+ def __pow__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __rpow__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+ @overload
+ def __rpow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
+ def __rpow__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc]
+ @overload
+ def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
+ @overload
+ def __truediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
+ def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ...
+ @overload
+ def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ...
+ @overload
+ def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __truediv__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc]
+ @overload
+ def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
+ @overload
+ def __rtruediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
+ def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ...
+ @overload
+ def __rtruediv__(self: NDArray[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ...
+ @overload
+ def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __rtruediv__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __lshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
+ @overload
+ def __lshift__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __rlshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
+ @overload
+ def __rlshift__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __rshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
+ @overload
+ def __rshift__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __rrshift__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc]
+ @overload
+ def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
+ @overload
+ def __rrshift__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __and__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
+ @overload
+ def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
+ @overload
+ def __and__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __and__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __rand__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
+ @overload
+ def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
+ @overload
+ def __rand__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __xor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
+ @overload
+ def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
+ @overload
+ def __xor__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __rxor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
+ @overload
+ def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
+ @overload
+ def __rxor__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __or__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
+ @overload
+ def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
+ @overload
+ def __or__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __or__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ @overload
+ def __ror__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
+ @overload
+ def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+ @overload
+ def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...
+ @overload
+ def __ror__(self: NDArray[object_], other: Any) -> Any: ...
+ @overload
+ def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
+
+ # `np.generic` does not support inplace operations
+
+ # NOTE: Inplace ops generally use "same_kind" casting w.r.t. to the left
+ # operand. An exception to this rule are unsigned integers though, which
+ # also accepts a signed integer for the right operand as long it is a 0D
+ # object and its value is >= 0
+ @overload
+ def __iadd__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ...
+ @overload
+ def __iadd__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
+ @overload
+ def __iadd__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
+ @overload
+ def __iadd__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
+ @overload
+ def __iadd__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ...
+ @overload
+ def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ...
+ @overload
+ def __iadd__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+
+ @overload
+ def __isub__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
+ @overload
+ def __isub__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
+ @overload
+ def __isub__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
+ @overload
+ def __isub__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ...
+ @overload
+ def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ...
+ @overload
+ def __isub__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+
+ @overload
+ def __imul__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ...
+ @overload
+ def __imul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
+ @overload
+ def __imul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
+ @overload
+ def __imul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
+ @overload
+ def __imul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ...
+ @overload
+ def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __imul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+
+ @overload
+ def __itruediv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
+ @overload
+ def __itruediv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ...
+ @overload
+ def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ...
+ @overload
+ def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __itruediv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+
+ @overload
+ def __ifloordiv__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
+ @overload
+ def __ifloordiv__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
+ @overload
+ def __ifloordiv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
+ @overload
+ def __ifloordiv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ...
+ @overload
+ def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ...
+ @overload
+ def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ...
+ @overload
+ def __ifloordiv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+
+ @overload
+ def __ipow__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
+ @overload
+ def __ipow__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
+ @overload
+ def __ipow__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
+ @overload
+ def __ipow__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ...
+ @overload
+ def __ipow__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+
+ @overload
+ def __imod__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
+ @overload
+ def __imod__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
+ @overload
+ def __imod__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ...
+ @overload
+ def __imod__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ...
+ @overload
+ def __imod__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+
+ @overload
+ def __ilshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
+ @overload
+ def __ilshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
+ @overload
+ def __ilshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+
+ @overload
+ def __irshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
+ @overload
+ def __irshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
+ @overload
+ def __irshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+
+ @overload
+ def __iand__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ...
+ @overload
+ def __iand__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
+ @overload
+ def __iand__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
+ @overload
+ def __iand__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+
+ @overload
+ def __ixor__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ...
+ @overload
+ def __ixor__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
+ @overload
+ def __ixor__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
+ @overload
+ def __ixor__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+
+ @overload
+ def __ior__(self: NDArray[bool_], other: _ArrayLikeBool_co) -> NDArray[bool_]: ...
+ @overload
+ def __ior__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ...
+ @overload
+ def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ...
+ @overload
+ def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ...
+
+ def __dlpack__(self: NDArray[number[Any]], *, stream: None = ...) -> _PyCapsule: ...
+ def __dlpack_device__(self) -> tuple[int, L[0]]: ...
+
+ # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype`
+ @property
+ def dtype(self) -> _DType_co: ...
+
+# NOTE: while `np.generic` is not technically an instance of `ABCMeta`,
+# the `@abstractmethod` decorator is herein used to (forcefully) deny
+# the creation of `np.generic` instances.
+# The `# type: ignore` comments are necessary to silence mypy errors regarding
+# the missing `ABCMeta` metaclass.
+
+# See https://github.com/numpy/numpy-stubs/pull/80 for more details.
+
+_ScalarType = TypeVar("_ScalarType", bound=generic)
+_NBit1 = TypeVar("_NBit1", bound=NBitBase)
+_NBit2 = TypeVar("_NBit2", bound=NBitBase)
+
+class generic(_ArrayOrScalarCommon):
+ @abstractmethod
+ def __init__(self, *args: Any, **kwargs: Any) -> None: ...
+ @overload
+ def __array__(self: _ScalarType, dtype: None = ..., /) -> ndarray[Any, _dtype[_ScalarType]]: ...
+ @overload
+ def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ...
+ @property
+ def base(self) -> None: ...
+ @property
+ def ndim(self) -> L[0]: ...
+ @property
+ def size(self) -> L[1]: ...
+ @property
+ def shape(self) -> tuple[()]: ...
+ @property
+ def strides(self) -> tuple[()]: ...
+ def byteswap(self: _ScalarType, inplace: L[False] = ...) -> _ScalarType: ...
+ @property
+ def flat(self: _ScalarType) -> flatiter[ndarray[Any, _dtype[_ScalarType]]]: ...
+
+ @overload
+ def astype(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ order: _OrderKACF = ...,
+ casting: _CastingKind = ...,
+ subok: bool = ...,
+ copy: bool | _CopyMode = ...,
+ ) -> _ScalarType: ...
+ @overload
+ def astype(
+ self,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ casting: _CastingKind = ...,
+ subok: bool = ...,
+ copy: bool | _CopyMode = ...,
+ ) -> Any: ...
+
+ # NOTE: `view` will perform a 0D->scalar cast,
+ # thus the array `type` is irrelevant to the output type
+ @overload
+ def view(
+ self: _ScalarType,
+ type: type[ndarray[Any, Any]] = ...,
+ ) -> _ScalarType: ...
+ @overload
+ def view(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ type: type[ndarray[Any, Any]] = ...,
+ ) -> _ScalarType: ...
+ @overload
+ def view(
+ self,
+ dtype: DTypeLike,
+ type: type[ndarray[Any, Any]] = ...,
+ ) -> Any: ...
+
+ @overload
+ def getfield(
+ self,
+ dtype: _DTypeLike[_ScalarType],
+ offset: SupportsIndex = ...
+ ) -> _ScalarType: ...
+ @overload
+ def getfield(
+ self,
+ dtype: DTypeLike,
+ offset: SupportsIndex = ...
+ ) -> Any: ...
+
+ def item(
+ self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /,
+ ) -> Any: ...
+
+ @overload
+ def take( # type: ignore[misc]
+ self: _ScalarType,
+ indices: _IntLike_co,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> _ScalarType: ...
+ @overload
+ def take( # type: ignore[misc]
+ self: _ScalarType,
+ indices: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> ndarray[Any, _dtype[_ScalarType]]: ...
+ @overload
+ def take(
+ self,
+ indices: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ out: _NdArraySubClass = ...,
+ mode: _ModeKind = ...,
+ ) -> _NdArraySubClass: ...
+
+ def repeat(
+ self: _ScalarType,
+ repeats: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ ) -> ndarray[Any, _dtype[_ScalarType]]: ...
+
+ def flatten(
+ self: _ScalarType,
+ order: _OrderKACF = ...,
+ ) -> ndarray[Any, _dtype[_ScalarType]]: ...
+
+ def ravel(
+ self: _ScalarType,
+ order: _OrderKACF = ...,
+ ) -> ndarray[Any, _dtype[_ScalarType]]: ...
+
+ @overload
+ def reshape(
+ self: _ScalarType, shape: _ShapeLike, /, *, order: _OrderACF = ...
+ ) -> ndarray[Any, _dtype[_ScalarType]]: ...
+ @overload
+ def reshape(
+ self: _ScalarType, *shape: SupportsIndex, order: _OrderACF = ...
+ ) -> ndarray[Any, _dtype[_ScalarType]]: ...
+
+ def squeeze(
+ self: _ScalarType, axis: None | L[0] | tuple[()] = ...
+ ) -> _ScalarType: ...
+ def transpose(self: _ScalarType, axes: None | tuple[()] = ..., /) -> _ScalarType: ...
+ # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype`
+ @property
+ def dtype(self: _ScalarType) -> _dtype[_ScalarType]: ...
+
+class number(generic, Generic[_NBit1]): # type: ignore
+ @property
+ def real(self: _ArraySelf) -> _ArraySelf: ...
+ @property
+ def imag(self: _ArraySelf) -> _ArraySelf: ...
+ if sys.version_info >= (3, 9):
+ def __class_getitem__(self, item: Any) -> GenericAlias: ...
+ def __int__(self) -> int: ...
+ def __float__(self) -> float: ...
+ def __complex__(self) -> complex: ...
+ def __neg__(self: _ArraySelf) -> _ArraySelf: ...
+ def __pos__(self: _ArraySelf) -> _ArraySelf: ...
+ def __abs__(self: _ArraySelf) -> _ArraySelf: ...
+ # Ensure that objects annotated as `number` support arithmetic operations
+ __add__: _NumberOp
+ __radd__: _NumberOp
+ __sub__: _NumberOp
+ __rsub__: _NumberOp
+ __mul__: _NumberOp
+ __rmul__: _NumberOp
+ __floordiv__: _NumberOp
+ __rfloordiv__: _NumberOp
+ __pow__: _NumberOp
+ __rpow__: _NumberOp
+ __truediv__: _NumberOp
+ __rtruediv__: _NumberOp
+ __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
+ __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
+ __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
+ __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
+
+class bool_(generic):
+ def __init__(self, value: object = ..., /) -> None: ...
+ def item(
+ self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /,
+ ) -> bool: ...
+ def tolist(self) -> bool: ...
+ @property
+ def real(self: _ArraySelf) -> _ArraySelf: ...
+ @property
+ def imag(self: _ArraySelf) -> _ArraySelf: ...
+ def __int__(self) -> int: ...
+ def __float__(self) -> float: ...
+ def __complex__(self) -> complex: ...
+ def __abs__(self: _ArraySelf) -> _ArraySelf: ...
+ __add__: _BoolOp[bool_]
+ __radd__: _BoolOp[bool_]
+ __sub__: _BoolSub
+ __rsub__: _BoolSub
+ __mul__: _BoolOp[bool_]
+ __rmul__: _BoolOp[bool_]
+ __floordiv__: _BoolOp[int8]
+ __rfloordiv__: _BoolOp[int8]
+ __pow__: _BoolOp[int8]
+ __rpow__: _BoolOp[int8]
+ __truediv__: _BoolTrueDiv
+ __rtruediv__: _BoolTrueDiv
+ def __invert__(self) -> bool_: ...
+ __lshift__: _BoolBitOp[int8]
+ __rlshift__: _BoolBitOp[int8]
+ __rshift__: _BoolBitOp[int8]
+ __rrshift__: _BoolBitOp[int8]
+ __and__: _BoolBitOp[bool_]
+ __rand__: _BoolBitOp[bool_]
+ __xor__: _BoolBitOp[bool_]
+ __rxor__: _BoolBitOp[bool_]
+ __or__: _BoolBitOp[bool_]
+ __ror__: _BoolBitOp[bool_]
+ __mod__: _BoolMod
+ __rmod__: _BoolMod
+ __divmod__: _BoolDivMod
+ __rdivmod__: _BoolDivMod
+ __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
+ __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
+ __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
+ __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co]
+
+class object_(generic):
+ def __init__(self, value: object = ..., /) -> None: ...
+ @property
+ def real(self: _ArraySelf) -> _ArraySelf: ...
+ @property
+ def imag(self: _ArraySelf) -> _ArraySelf: ...
+ # The 3 protocols below may or may not raise,
+ # depending on the underlying object
+ def __int__(self) -> int: ...
+ def __float__(self) -> float: ...
+ def __complex__(self) -> complex: ...
+
+# The `datetime64` constructors requires an object with the three attributes below,
+# and thus supports datetime duck typing
+class _DatetimeScalar(Protocol):
+ @property
+ def day(self) -> int: ...
+ @property
+ def month(self) -> int: ...
+ @property
+ def year(self) -> int: ...
+
+# TODO: `item`/`tolist` returns either `dt.date`, `dt.datetime` or `int`
+# depending on the unit
+class datetime64(generic):
+ @overload
+ def __init__(
+ self,
+ value: None | datetime64 | _CharLike_co | _DatetimeScalar = ...,
+ format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ...,
+ /,
+ ) -> None: ...
+ @overload
+ def __init__(
+ self,
+ value: int,
+ format: _CharLike_co | tuple[_CharLike_co, _IntLike_co],
+ /,
+ ) -> None: ...
+ def __add__(self, other: _TD64Like_co) -> datetime64: ...
+ def __radd__(self, other: _TD64Like_co) -> datetime64: ...
+ @overload
+ def __sub__(self, other: datetime64) -> timedelta64: ...
+ @overload
+ def __sub__(self, other: _TD64Like_co) -> datetime64: ...
+ def __rsub__(self, other: datetime64) -> timedelta64: ...
+ __lt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co]
+ __le__: _ComparisonOp[datetime64, _ArrayLikeDT64_co]
+ __gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co]
+ __ge__: _ComparisonOp[datetime64, _ArrayLikeDT64_co]
+
+_IntValue = Union[SupportsInt, _CharLike_co, SupportsIndex]
+_FloatValue = Union[None, _CharLike_co, SupportsFloat, SupportsIndex]
+_ComplexValue = Union[
+ None,
+ _CharLike_co,
+ SupportsFloat,
+ SupportsComplex,
+ SupportsIndex,
+ complex, # `complex` is not a subtype of `SupportsComplex`
+]
+
+class integer(number[_NBit1]): # type: ignore
+ @property
+ def numerator(self: _ScalarType) -> _ScalarType: ...
+ @property
+ def denominator(self) -> L[1]: ...
+ @overload
+ def __round__(self, ndigits: None = ...) -> int: ...
+ @overload
+ def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ...
+
+ # NOTE: `__index__` is technically defined in the bottom-most
+ # sub-classes (`int64`, `uint32`, etc)
+ def item(
+ self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /,
+ ) -> int: ...
+ def tolist(self) -> int: ...
+ def is_integer(self) -> L[True]: ...
+ def bit_count(self: _ScalarType) -> int: ...
+ def __index__(self) -> int: ...
+ __truediv__: _IntTrueDiv[_NBit1]
+ __rtruediv__: _IntTrueDiv[_NBit1]
+ def __mod__(self, value: _IntLike_co) -> integer: ...
+ def __rmod__(self, value: _IntLike_co) -> integer: ...
+ def __invert__(self: _IntType) -> _IntType: ...
+ # Ensure that objects annotated as `integer` support bit-wise operations
+ def __lshift__(self, other: _IntLike_co) -> integer: ...
+ def __rlshift__(self, other: _IntLike_co) -> integer: ...
+ def __rshift__(self, other: _IntLike_co) -> integer: ...
+ def __rrshift__(self, other: _IntLike_co) -> integer: ...
+ def __and__(self, other: _IntLike_co) -> integer: ...
+ def __rand__(self, other: _IntLike_co) -> integer: ...
+ def __or__(self, other: _IntLike_co) -> integer: ...
+ def __ror__(self, other: _IntLike_co) -> integer: ...
+ def __xor__(self, other: _IntLike_co) -> integer: ...
+ def __rxor__(self, other: _IntLike_co) -> integer: ...
+
+class signedinteger(integer[_NBit1]):
+ def __init__(self, value: _IntValue = ..., /) -> None: ...
+ __add__: _SignedIntOp[_NBit1]
+ __radd__: _SignedIntOp[_NBit1]
+ __sub__: _SignedIntOp[_NBit1]
+ __rsub__: _SignedIntOp[_NBit1]
+ __mul__: _SignedIntOp[_NBit1]
+ __rmul__: _SignedIntOp[_NBit1]
+ __floordiv__: _SignedIntOp[_NBit1]
+ __rfloordiv__: _SignedIntOp[_NBit1]
+ __pow__: _SignedIntOp[_NBit1]
+ __rpow__: _SignedIntOp[_NBit1]
+ __lshift__: _SignedIntBitOp[_NBit1]
+ __rlshift__: _SignedIntBitOp[_NBit1]
+ __rshift__: _SignedIntBitOp[_NBit1]
+ __rrshift__: _SignedIntBitOp[_NBit1]
+ __and__: _SignedIntBitOp[_NBit1]
+ __rand__: _SignedIntBitOp[_NBit1]
+ __xor__: _SignedIntBitOp[_NBit1]
+ __rxor__: _SignedIntBitOp[_NBit1]
+ __or__: _SignedIntBitOp[_NBit1]
+ __ror__: _SignedIntBitOp[_NBit1]
+ __mod__: _SignedIntMod[_NBit1]
+ __rmod__: _SignedIntMod[_NBit1]
+ __divmod__: _SignedIntDivMod[_NBit1]
+ __rdivmod__: _SignedIntDivMod[_NBit1]
+
+int8 = signedinteger[_8Bit]
+int16 = signedinteger[_16Bit]
+int32 = signedinteger[_32Bit]
+int64 = signedinteger[_64Bit]
+
+byte = signedinteger[_NBitByte]
+short = signedinteger[_NBitShort]
+intc = signedinteger[_NBitIntC]
+intp = signedinteger[_NBitIntP]
+int_ = signedinteger[_NBitInt]
+longlong = signedinteger[_NBitLongLong]
+
+# TODO: `item`/`tolist` returns either `dt.timedelta` or `int`
+# depending on the unit
+class timedelta64(generic):
+ def __init__(
+ self,
+ value: None | int | _CharLike_co | dt.timedelta | timedelta64 = ...,
+ format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ...,
+ /,
+ ) -> None: ...
+ @property
+ def numerator(self: _ScalarType) -> _ScalarType: ...
+ @property
+ def denominator(self) -> L[1]: ...
+
+ # NOTE: Only a limited number of units support conversion
+ # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as`
+ def __int__(self) -> int: ...
+ def __float__(self) -> float: ...
+ def __complex__(self) -> complex: ...
+ def __neg__(self: _ArraySelf) -> _ArraySelf: ...
+ def __pos__(self: _ArraySelf) -> _ArraySelf: ...
+ def __abs__(self: _ArraySelf) -> _ArraySelf: ...
+ def __add__(self, other: _TD64Like_co) -> timedelta64: ...
+ def __radd__(self, other: _TD64Like_co) -> timedelta64: ...
+ def __sub__(self, other: _TD64Like_co) -> timedelta64: ...
+ def __rsub__(self, other: _TD64Like_co) -> timedelta64: ...
+ def __mul__(self, other: _FloatLike_co) -> timedelta64: ...
+ def __rmul__(self, other: _FloatLike_co) -> timedelta64: ...
+ __truediv__: _TD64Div[float64]
+ __floordiv__: _TD64Div[int64]
+ def __rtruediv__(self, other: timedelta64) -> float64: ...
+ def __rfloordiv__(self, other: timedelta64) -> int64: ...
+ def __mod__(self, other: timedelta64) -> timedelta64: ...
+ def __rmod__(self, other: timedelta64) -> timedelta64: ...
+ def __divmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ...
+ def __rdivmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ...
+ __lt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co]
+ __le__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co]
+ __gt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co]
+ __ge__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co]
+
+class unsignedinteger(integer[_NBit1]):
+ # NOTE: `uint64 + signedinteger -> float64`
+ def __init__(self, value: _IntValue = ..., /) -> None: ...
+ __add__: _UnsignedIntOp[_NBit1]
+ __radd__: _UnsignedIntOp[_NBit1]
+ __sub__: _UnsignedIntOp[_NBit1]
+ __rsub__: _UnsignedIntOp[_NBit1]
+ __mul__: _UnsignedIntOp[_NBit1]
+ __rmul__: _UnsignedIntOp[_NBit1]
+ __floordiv__: _UnsignedIntOp[_NBit1]
+ __rfloordiv__: _UnsignedIntOp[_NBit1]
+ __pow__: _UnsignedIntOp[_NBit1]
+ __rpow__: _UnsignedIntOp[_NBit1]
+ __lshift__: _UnsignedIntBitOp[_NBit1]
+ __rlshift__: _UnsignedIntBitOp[_NBit1]
+ __rshift__: _UnsignedIntBitOp[_NBit1]
+ __rrshift__: _UnsignedIntBitOp[_NBit1]
+ __and__: _UnsignedIntBitOp[_NBit1]
+ __rand__: _UnsignedIntBitOp[_NBit1]
+ __xor__: _UnsignedIntBitOp[_NBit1]
+ __rxor__: _UnsignedIntBitOp[_NBit1]
+ __or__: _UnsignedIntBitOp[_NBit1]
+ __ror__: _UnsignedIntBitOp[_NBit1]
+ __mod__: _UnsignedIntMod[_NBit1]
+ __rmod__: _UnsignedIntMod[_NBit1]
+ __divmod__: _UnsignedIntDivMod[_NBit1]
+ __rdivmod__: _UnsignedIntDivMod[_NBit1]
+
+uint8 = unsignedinteger[_8Bit]
+uint16 = unsignedinteger[_16Bit]
+uint32 = unsignedinteger[_32Bit]
+uint64 = unsignedinteger[_64Bit]
+
+ubyte = unsignedinteger[_NBitByte]
+ushort = unsignedinteger[_NBitShort]
+uintc = unsignedinteger[_NBitIntC]
+uintp = unsignedinteger[_NBitIntP]
+uint = unsignedinteger[_NBitInt]
+ulonglong = unsignedinteger[_NBitLongLong]
+
+class inexact(number[_NBit1]): # type: ignore
+ def __getnewargs__(self: inexact[_64Bit]) -> tuple[float, ...]: ...
+
+_IntType = TypeVar("_IntType", bound=integer)
+_FloatType = TypeVar('_FloatType', bound=floating)
+
+class floating(inexact[_NBit1]):
+ def __init__(self, value: _FloatValue = ..., /) -> None: ...
+ def item(
+ self, args: L[0] | tuple[()] | tuple[L[0]] = ...,
+ /,
+ ) -> float: ...
+ def tolist(self) -> float: ...
+ def is_integer(self) -> bool: ...
+ def hex(self: float64) -> str: ...
+ @classmethod
+ def fromhex(cls: type[float64], string: str, /) -> float64: ...
+ def as_integer_ratio(self) -> tuple[int, int]: ...
+ if sys.version_info >= (3, 9):
+ def __ceil__(self: float64) -> int: ...
+ def __floor__(self: float64) -> int: ...
+ def __trunc__(self: float64) -> int: ...
+ def __getnewargs__(self: float64) -> tuple[float]: ...
+ def __getformat__(self: float64, typestr: L["double", "float"], /) -> str: ...
+ @overload
+ def __round__(self, ndigits: None = ...) -> int: ...
+ @overload
+ def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ...
+ __add__: _FloatOp[_NBit1]
+ __radd__: _FloatOp[_NBit1]
+ __sub__: _FloatOp[_NBit1]
+ __rsub__: _FloatOp[_NBit1]
+ __mul__: _FloatOp[_NBit1]
+ __rmul__: _FloatOp[_NBit1]
+ __truediv__: _FloatOp[_NBit1]
+ __rtruediv__: _FloatOp[_NBit1]
+ __floordiv__: _FloatOp[_NBit1]
+ __rfloordiv__: _FloatOp[_NBit1]
+ __pow__: _FloatOp[_NBit1]
+ __rpow__: _FloatOp[_NBit1]
+ __mod__: _FloatMod[_NBit1]
+ __rmod__: _FloatMod[_NBit1]
+ __divmod__: _FloatDivMod[_NBit1]
+ __rdivmod__: _FloatDivMod[_NBit1]
+
+float16 = floating[_16Bit]
+float32 = floating[_32Bit]
+float64 = floating[_64Bit]
+
+half = floating[_NBitHalf]
+single = floating[_NBitSingle]
+double = floating[_NBitDouble]
+float_ = floating[_NBitDouble]
+longdouble = floating[_NBitLongDouble]
+longfloat = floating[_NBitLongDouble]
+
+# The main reason for `complexfloating` having two typevars is cosmetic.
+# It is used to clarify why `complex128`s precision is `_64Bit`, the latter
+# describing the two 64 bit floats representing its real and imaginary component
+
+class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]):
+ def __init__(self, value: _ComplexValue = ..., /) -> None: ...
+ def item(
+ self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /,
+ ) -> complex: ...
+ def tolist(self) -> complex: ...
+ @property
+ def real(self) -> floating[_NBit1]: ... # type: ignore[override]
+ @property
+ def imag(self) -> floating[_NBit2]: ... # type: ignore[override]
+ def __abs__(self) -> floating[_NBit1]: ... # type: ignore[override]
+ def __getnewargs__(self: complex128) -> tuple[float, float]: ...
+ # NOTE: Deprecated
+ # def __round__(self, ndigits=...): ...
+ __add__: _ComplexOp[_NBit1]
+ __radd__: _ComplexOp[_NBit1]
+ __sub__: _ComplexOp[_NBit1]
+ __rsub__: _ComplexOp[_NBit1]
+ __mul__: _ComplexOp[_NBit1]
+ __rmul__: _ComplexOp[_NBit1]
+ __truediv__: _ComplexOp[_NBit1]
+ __rtruediv__: _ComplexOp[_NBit1]
+ __pow__: _ComplexOp[_NBit1]
+ __rpow__: _ComplexOp[_NBit1]
+
+complex64 = complexfloating[_32Bit, _32Bit]
+complex128 = complexfloating[_64Bit, _64Bit]
+
+csingle = complexfloating[_NBitSingle, _NBitSingle]
+singlecomplex = complexfloating[_NBitSingle, _NBitSingle]
+cdouble = complexfloating[_NBitDouble, _NBitDouble]
+complex_ = complexfloating[_NBitDouble, _NBitDouble]
+cfloat = complexfloating[_NBitDouble, _NBitDouble]
+clongdouble = complexfloating[_NBitLongDouble, _NBitLongDouble]
+clongfloat = complexfloating[_NBitLongDouble, _NBitLongDouble]
+longcomplex = complexfloating[_NBitLongDouble, _NBitLongDouble]
+
+class flexible(generic): ... # type: ignore
+
+# TODO: `item`/`tolist` returns either `bytes` or `tuple`
+# depending on whether or not it's used as an opaque bytes sequence
+# or a structure
+class void(flexible):
+ @overload
+ def __init__(self, value: _IntLike_co | bytes, /, dtype : None = ...) -> None: ...
+ @overload
+ def __init__(self, value: Any, /, dtype: _DTypeLikeVoid) -> None: ...
+ @property
+ def real(self: _ArraySelf) -> _ArraySelf: ...
+ @property
+ def imag(self: _ArraySelf) -> _ArraySelf: ...
+ def setfield(
+ self, val: ArrayLike, dtype: DTypeLike, offset: int = ...
+ ) -> None: ...
+ @overload
+ def __getitem__(self, key: str | SupportsIndex) -> Any: ...
+ @overload
+ def __getitem__(self, key: list[str]) -> void: ...
+ def __setitem__(
+ self,
+ key: str | list[str] | SupportsIndex,
+ value: ArrayLike,
+ ) -> None: ...
+
+class character(flexible): # type: ignore
+ def __int__(self) -> int: ...
+ def __float__(self) -> float: ...
+
+# NOTE: Most `np.bytes_` / `np.str_` methods return their
+# builtin `bytes` / `str` counterpart
+
+class bytes_(character, bytes):
+ @overload
+ def __init__(self, value: object = ..., /) -> None: ...
+ @overload
+ def __init__(
+ self, value: str, /, encoding: str = ..., errors: str = ...
+ ) -> None: ...
+ def item(
+ self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /,
+ ) -> bytes: ...
+ def tolist(self) -> bytes: ...
+
+string_ = bytes_
+
+class str_(character, str):
+ @overload
+ def __init__(self, value: object = ..., /) -> None: ...
+ @overload
+ def __init__(
+ self, value: bytes, /, encoding: str = ..., errors: str = ...
+ ) -> None: ...
+ def item(
+ self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /,
+ ) -> str: ...
+ def tolist(self) -> str: ...
+
+unicode_ = str_
+
+#
+# Constants
+#
+
+Inf: Final[float]
+Infinity: Final[float]
+NAN: Final[float]
+NINF: Final[float]
+NZERO: Final[float]
+NaN: Final[float]
+PINF: Final[float]
+PZERO: Final[float]
+e: Final[float]
+euler_gamma: Final[float]
+inf: Final[float]
+infty: Final[float]
+nan: Final[float]
+pi: Final[float]
+
+ERR_IGNORE: L[0]
+ERR_WARN: L[1]
+ERR_RAISE: L[2]
+ERR_CALL: L[3]
+ERR_PRINT: L[4]
+ERR_LOG: L[5]
+ERR_DEFAULT: L[521]
+
+SHIFT_DIVIDEBYZERO: L[0]
+SHIFT_OVERFLOW: L[3]
+SHIFT_UNDERFLOW: L[6]
+SHIFT_INVALID: L[9]
+
+FPE_DIVIDEBYZERO: L[1]
+FPE_OVERFLOW: L[2]
+FPE_UNDERFLOW: L[4]
+FPE_INVALID: L[8]
+
+FLOATING_POINT_SUPPORT: L[1]
+UFUNC_BUFSIZE_DEFAULT = BUFSIZE
+
+little_endian: Final[bool]
+True_: Final[bool_]
+False_: Final[bool_]
+
+UFUNC_PYVALS_NAME: L["UFUNC_PYVALS"]
+
+newaxis: None
+
+# See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs
+@final
+class ufunc:
+ @property
+ def __name__(self) -> str: ...
+ @property
+ def __doc__(self) -> str: ...
+ __call__: Callable[..., Any]
+ @property
+ def nin(self) -> int: ...
+ @property
+ def nout(self) -> int: ...
+ @property
+ def nargs(self) -> int: ...
+ @property
+ def ntypes(self) -> int: ...
+ @property
+ def types(self) -> list[str]: ...
+ # Broad return type because it has to encompass things like
+ #
+ # >>> np.logical_and.identity is True
+ # True
+ # >>> np.add.identity is 0
+ # True
+ # >>> np.sin.identity is None
+ # True
+ #
+ # and any user-defined ufuncs.
+ @property
+ def identity(self) -> Any: ...
+ # This is None for ufuncs and a string for gufuncs.
+ @property
+ def signature(self) -> None | str: ...
+ # The next four methods will always exist, but they will just
+ # raise a ValueError ufuncs with that don't accept two input
+ # arguments and return one output argument. Because of that we
+ # can't type them very precisely.
+ reduce: Any
+ accumulate: Any
+ reduceat: Any
+ outer: Any
+ # Similarly at won't be defined for ufuncs that return multiple
+ # outputs, so we can't type it very precisely.
+ at: Any
+
+# Parameters: `__name__`, `ntypes` and `identity`
+absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None]
+add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]]
+arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None]
+arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None]
+arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None]
+arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None]
+arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None]
+arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None]
+arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None]
+bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]]
+bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None]
+bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]]
+bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]]
+cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None]
+ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None]
+conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None]
+conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None]
+copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None]
+cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None]
+cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None]
+deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None]
+degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None]
+divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None]
+divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None]
+equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None]
+exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None]
+exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None]
+expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None]
+fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None]
+float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None]
+floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None]
+floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None]
+fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None]
+fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None]
+fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None]
+frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None]
+gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]]
+greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None]
+greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None]
+heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None]
+hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]]
+invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None]
+isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None]
+isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None]
+isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None]
+isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None]
+lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None]
+ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None]
+left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None]
+less: _UFunc_Nin2_Nout1[L['less'], L[23], None]
+less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None]
+log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None]
+log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None]
+log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None]
+log: _UFunc_Nin1_Nout1[L['log'], L[10], None]
+logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float]
+logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float]
+logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]]
+logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None]
+logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]]
+logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]]
+matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None]
+maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None]
+minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None]
+mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None]
+modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None]
+multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]]
+negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None]
+nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None]
+not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None]
+positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None]
+power: _UFunc_Nin2_Nout1[L['power'], L[18], None]
+rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None]
+radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None]
+reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None]
+remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None]
+right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None]
+rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None]
+sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None]
+signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None]
+sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None]
+sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None]
+spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None]
+sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None]
+square: _UFunc_Nin1_Nout1[L['square'], L[18], None]
+subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None]
+tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None]
+tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None]
+true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None]
+trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None]
+
+abs = absolute
+
+class _CopyMode(enum.Enum):
+ ALWAYS: L[True]
+ IF_NEEDED: L[False]
+ NEVER: L[2]
+
+# Warnings
+class ModuleDeprecationWarning(DeprecationWarning): ...
+class VisibleDeprecationWarning(UserWarning): ...
+class ComplexWarning(RuntimeWarning): ...
+class RankWarning(UserWarning): ...
+
+# Errors
+class TooHardError(RuntimeError): ...
+
+class AxisError(ValueError, IndexError):
+ axis: None | int
+ ndim: None | int
+ @overload
+ def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ...
+ @overload
+ def __init__(self, axis: int, ndim: int, msg_prefix: None | str = ...) -> None: ...
+
+_CallType = TypeVar("_CallType", bound=_ErrFunc | _SupportsWrite[str])
+
+class errstate(Generic[_CallType], ContextDecorator):
+ call: _CallType
+ kwargs: _ErrDictOptional
+
+ # Expand `**kwargs` into explicit keyword-only arguments
+ def __init__(
+ self,
+ *,
+ call: _CallType = ...,
+ all: None | _ErrKind = ...,
+ divide: None | _ErrKind = ...,
+ over: None | _ErrKind = ...,
+ under: None | _ErrKind = ...,
+ invalid: None | _ErrKind = ...,
+ ) -> None: ...
+ def __enter__(self) -> None: ...
+ def __exit__(
+ self,
+ exc_type: None | type[BaseException],
+ exc_value: None | BaseException,
+ traceback: None | TracebackType,
+ /,
+ ) -> None: ...
+
+@contextmanager
+def _no_nep50_warning() -> Generator[None, None, None]: ...
+def _get_promotion_state() -> str: ...
+def _set_promotion_state(state: str, /) -> None: ...
+
+class ndenumerate(Generic[_ScalarType]):
+ iter: flatiter[NDArray[_ScalarType]]
+ @overload
+ def __new__(
+ cls, arr: _FiniteNestedSequence[_SupportsArray[dtype[_ScalarType]]],
+ ) -> ndenumerate[_ScalarType]: ...
+ @overload
+ def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[str_]: ...
+ @overload
+ def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[bytes_]: ...
+ @overload
+ def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[bool_]: ...
+ @overload
+ def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[int_]: ...
+ @overload
+ def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[float_]: ...
+ @overload
+ def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[complex_]: ...
+ def __next__(self: ndenumerate[_ScalarType]) -> tuple[_Shape, _ScalarType]: ...
+ def __iter__(self: _T) -> _T: ...
+
+class ndindex:
+ @overload
+ def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ...
+ @overload
+ def __init__(self, *shape: SupportsIndex) -> None: ...
+ def __iter__(self: _T) -> _T: ...
+ def __next__(self) -> _Shape: ...
+
+class DataSource:
+ def __init__(
+ self,
+ destpath: None | str | os.PathLike[str] = ...,
+ ) -> None: ...
+ def __del__(self) -> None: ...
+ def abspath(self, path: str) -> str: ...
+ def exists(self, path: str) -> bool: ...
+
+ # Whether the file-object is opened in string or bytes mode (by default)
+ # depends on the file-extension of `path`
+ def open(
+ self,
+ path: str,
+ mode: str = ...,
+ encoding: None | str = ...,
+ newline: None | str = ...,
+ ) -> IO[Any]: ...
+
+# TODO: The type of each `__next__` and `iters` return-type depends
+# on the length and dtype of `args`; we can't describe this behavior yet
+# as we lack variadics (PEP 646).
+@final
+class broadcast:
+ def __new__(cls, *args: ArrayLike) -> broadcast: ...
+ @property
+ def index(self) -> int: ...
+ @property
+ def iters(self) -> tuple[flatiter[Any], ...]: ...
+ @property
+ def nd(self) -> int: ...
+ @property
+ def ndim(self) -> int: ...
+ @property
+ def numiter(self) -> int: ...
+ @property
+ def shape(self) -> _Shape: ...
+ @property
+ def size(self) -> int: ...
+ def __next__(self) -> tuple[Any, ...]: ...
+ def __iter__(self: _T) -> _T: ...
+ def reset(self) -> None: ...
+
+@final
+class busdaycalendar:
+ def __new__(
+ cls,
+ weekmask: ArrayLike = ...,
+ holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ ) -> busdaycalendar: ...
+ @property
+ def weekmask(self) -> NDArray[bool_]: ...
+ @property
+ def holidays(self) -> NDArray[datetime64]: ...
+
+class finfo(Generic[_FloatType]):
+ dtype: dtype[_FloatType]
+ bits: int
+ eps: _FloatType
+ epsneg: _FloatType
+ iexp: int
+ machep: int
+ max: _FloatType
+ maxexp: int
+ min: _FloatType
+ minexp: int
+ negep: int
+ nexp: int
+ nmant: int
+ precision: int
+ resolution: _FloatType
+ smallest_subnormal: _FloatType
+ @property
+ def smallest_normal(self) -> _FloatType: ...
+ @property
+ def tiny(self) -> _FloatType: ...
+ @overload
+ def __new__(
+ cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]]
+ ) -> finfo[floating[_NBit1]]: ...
+ @overload
+ def __new__(
+ cls, dtype: complex | float | type[complex] | type[float]
+ ) -> finfo[float_]: ...
+ @overload
+ def __new__(
+ cls, dtype: str
+ ) -> finfo[floating[Any]]: ...
+
+class iinfo(Generic[_IntType]):
+ dtype: dtype[_IntType]
+ kind: str
+ bits: int
+ key: str
+ @property
+ def min(self) -> int: ...
+ @property
+ def max(self) -> int: ...
+
+ @overload
+ def __new__(cls, dtype: _IntType | _DTypeLike[_IntType]) -> iinfo[_IntType]: ...
+ @overload
+ def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ...
+ @overload
+ def __new__(cls, dtype: str) -> iinfo[Any]: ...
+
+class format_parser:
+ dtype: dtype[void]
+ def __init__(
+ self,
+ formats: DTypeLike,
+ names: None | str | Sequence[str],
+ titles: None | str | Sequence[str],
+ aligned: bool = ...,
+ byteorder: None | _ByteOrder = ...,
+ ) -> None: ...
+
+class recarray(ndarray[_ShapeType, _DType_co]):
+ # NOTE: While not strictly mandatory, we're demanding here that arguments
+ # for the `format_parser`- and `dtype`-based dtype constructors are
+ # mutually exclusive
+ @overload
+ def __new__(
+ subtype,
+ shape: _ShapeLike,
+ dtype: None = ...,
+ buf: None | _SupportsBuffer = ...,
+ offset: SupportsIndex = ...,
+ strides: None | _ShapeLike = ...,
+ *,
+ formats: DTypeLike,
+ names: None | str | Sequence[str] = ...,
+ titles: None | str | Sequence[str] = ...,
+ byteorder: None | _ByteOrder = ...,
+ aligned: bool = ...,
+ order: _OrderKACF = ...,
+ ) -> recarray[Any, dtype[record]]: ...
+ @overload
+ def __new__(
+ subtype,
+ shape: _ShapeLike,
+ dtype: DTypeLike,
+ buf: None | _SupportsBuffer = ...,
+ offset: SupportsIndex = ...,
+ strides: None | _ShapeLike = ...,
+ formats: None = ...,
+ names: None = ...,
+ titles: None = ...,
+ byteorder: None = ...,
+ aligned: L[False] = ...,
+ order: _OrderKACF = ...,
+ ) -> recarray[Any, dtype[Any]]: ...
+ def __array_finalize__(self, obj: object) -> None: ...
+ def __getattribute__(self, attr: str) -> Any: ...
+ def __setattr__(self, attr: str, val: ArrayLike) -> None: ...
+ @overload
+ def __getitem__(self, indx: (
+ SupportsIndex
+ | _ArrayLikeInt_co
+ | tuple[SupportsIndex | _ArrayLikeInt_co, ...]
+ )) -> Any: ...
+ @overload
+ def __getitem__(self: recarray[Any, dtype[void]], indx: (
+ None
+ | slice
+ | ellipsis
+ | SupportsIndex
+ | _ArrayLikeInt_co
+ | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...]
+ )) -> recarray[Any, _DType_co]: ...
+ @overload
+ def __getitem__(self, indx: (
+ None
+ | slice
+ | ellipsis
+ | SupportsIndex
+ | _ArrayLikeInt_co
+ | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...]
+ )) -> ndarray[Any, _DType_co]: ...
+ @overload
+ def __getitem__(self, indx: str) -> NDArray[Any]: ...
+ @overload
+ def __getitem__(self, indx: list[str]) -> recarray[_ShapeType, dtype[record]]: ...
+ @overload
+ def field(self, attr: int | str, val: None = ...) -> Any: ...
+ @overload
+ def field(self, attr: int | str, val: ArrayLike) -> None: ...
+
+class record(void):
+ def __getattribute__(self, attr: str) -> Any: ...
+ def __setattr__(self, attr: str, val: ArrayLike) -> None: ...
+ def pprint(self) -> str: ...
+ @overload
+ def __getitem__(self, key: str | SupportsIndex) -> Any: ...
+ @overload
+ def __getitem__(self, key: list[str]) -> record: ...
+
+_NDIterFlagsKind = L[
+ "buffered",
+ "c_index",
+ "copy_if_overlap",
+ "common_dtype",
+ "delay_bufalloc",
+ "external_loop",
+ "f_index",
+ "grow_inner", "growinner",
+ "multi_index",
+ "ranged",
+ "refs_ok",
+ "reduce_ok",
+ "zerosize_ok",
+]
+
+_NDIterOpFlagsKind = L[
+ "aligned",
+ "allocate",
+ "arraymask",
+ "copy",
+ "config",
+ "nbo",
+ "no_subtype",
+ "no_broadcast",
+ "overlap_assume_elementwise",
+ "readonly",
+ "readwrite",
+ "updateifcopy",
+ "virtual",
+ "writeonly",
+ "writemasked"
+]
+
+@final
+class nditer:
+ def __new__(
+ cls,
+ op: ArrayLike | Sequence[ArrayLike],
+ flags: None | Sequence[_NDIterFlagsKind] = ...,
+ op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ...,
+ op_dtypes: DTypeLike | Sequence[DTypeLike] = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingKind = ...,
+ op_axes: None | Sequence[Sequence[SupportsIndex]] = ...,
+ itershape: None | _ShapeLike = ...,
+ buffersize: SupportsIndex = ...,
+ ) -> nditer: ...
+ def __enter__(self) -> nditer: ...
+ def __exit__(
+ self,
+ exc_type: None | type[BaseException],
+ exc_value: None | BaseException,
+ traceback: None | TracebackType,
+ ) -> None: ...
+ def __iter__(self) -> nditer: ...
+ def __next__(self) -> tuple[NDArray[Any], ...]: ...
+ def __len__(self) -> int: ...
+ def __copy__(self) -> nditer: ...
+ @overload
+ def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ...
+ @overload
+ def __getitem__(self, index: slice) -> tuple[NDArray[Any], ...]: ...
+ def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ...
+ def close(self) -> None: ...
+ def copy(self) -> nditer: ...
+ def debug_print(self) -> None: ...
+ def enable_external_loop(self) -> None: ...
+ def iternext(self) -> bool: ...
+ def remove_axis(self, i: SupportsIndex, /) -> None: ...
+ def remove_multi_index(self) -> None: ...
+ def reset(self) -> None: ...
+ @property
+ def dtypes(self) -> tuple[dtype[Any], ...]: ...
+ @property
+ def finished(self) -> bool: ...
+ @property
+ def has_delayed_bufalloc(self) -> bool: ...
+ @property
+ def has_index(self) -> bool: ...
+ @property
+ def has_multi_index(self) -> bool: ...
+ @property
+ def index(self) -> int: ...
+ @property
+ def iterationneedsapi(self) -> bool: ...
+ @property
+ def iterindex(self) -> int: ...
+ @property
+ def iterrange(self) -> tuple[int, ...]: ...
+ @property
+ def itersize(self) -> int: ...
+ @property
+ def itviews(self) -> tuple[NDArray[Any], ...]: ...
+ @property
+ def multi_index(self) -> tuple[int, ...]: ...
+ @property
+ def ndim(self) -> int: ...
+ @property
+ def nop(self) -> int: ...
+ @property
+ def operands(self) -> tuple[NDArray[Any], ...]: ...
+ @property
+ def shape(self) -> tuple[int, ...]: ...
+ @property
+ def value(self) -> tuple[NDArray[Any], ...]: ...
+
+_MemMapModeKind = L[
+ "readonly", "r",
+ "copyonwrite", "c",
+ "readwrite", "r+",
+ "write", "w+",
+]
+
+class memmap(ndarray[_ShapeType, _DType_co]):
+ __array_priority__: ClassVar[float]
+ filename: str | None
+ offset: int
+ mode: str
+ @overload
+ def __new__(
+ subtype,
+ filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol,
+ dtype: type[uint8] = ...,
+ mode: _MemMapModeKind = ...,
+ offset: int = ...,
+ shape: None | int | tuple[int, ...] = ...,
+ order: _OrderKACF = ...,
+ ) -> memmap[Any, dtype[uint8]]: ...
+ @overload
+ def __new__(
+ subtype,
+ filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol,
+ dtype: _DTypeLike[_ScalarType],
+ mode: _MemMapModeKind = ...,
+ offset: int = ...,
+ shape: None | int | tuple[int, ...] = ...,
+ order: _OrderKACF = ...,
+ ) -> memmap[Any, dtype[_ScalarType]]: ...
+ @overload
+ def __new__(
+ subtype,
+ filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol,
+ dtype: DTypeLike,
+ mode: _MemMapModeKind = ...,
+ offset: int = ...,
+ shape: None | int | tuple[int, ...] = ...,
+ order: _OrderKACF = ...,
+ ) -> memmap[Any, dtype[Any]]: ...
+ def __array_finalize__(self, obj: object) -> None: ...
+ def __array_wrap__(
+ self,
+ array: memmap[_ShapeType, _DType_co],
+ context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
+ ) -> Any: ...
+ def flush(self) -> None: ...
+
+# TODO: Add a mypy plugin for managing functions whose output type is dependent
+# on the literal value of some sort of signature (e.g. `einsum` and `vectorize`)
+class vectorize:
+ pyfunc: Callable[..., Any]
+ cache: bool
+ signature: None | str
+ otypes: None | str
+ excluded: set[int | str]
+ __doc__: None | str
+ def __init__(
+ self,
+ pyfunc: Callable[..., Any],
+ otypes: None | str | Iterable[DTypeLike] = ...,
+ doc: None | str = ...,
+ excluded: None | Iterable[int | str] = ...,
+ cache: bool = ...,
+ signature: None | str = ...,
+ ) -> None: ...
+ def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
+
+class poly1d:
+ @property
+ def variable(self) -> str: ...
+ @property
+ def order(self) -> int: ...
+ @property
+ def o(self) -> int: ...
+ @property
+ def roots(self) -> NDArray[Any]: ...
+ @property
+ def r(self) -> NDArray[Any]: ...
+
+ @property
+ def coeffs(self) -> NDArray[Any]: ...
+ @coeffs.setter
+ def coeffs(self, value: NDArray[Any]) -> None: ...
+
+ @property
+ def c(self) -> NDArray[Any]: ...
+ @c.setter
+ def c(self, value: NDArray[Any]) -> None: ...
+
+ @property
+ def coef(self) -> NDArray[Any]: ...
+ @coef.setter
+ def coef(self, value: NDArray[Any]) -> None: ...
+
+ @property
+ def coefficients(self) -> NDArray[Any]: ...
+ @coefficients.setter
+ def coefficients(self, value: NDArray[Any]) -> None: ...
+
+ __hash__: ClassVar[None] # type: ignore
+
+ @overload
+ def __array__(self, t: None = ...) -> NDArray[Any]: ...
+ @overload
+ def __array__(self, t: _DType) -> ndarray[Any, _DType]: ...
+
+ @overload
+ def __call__(self, val: _ScalarLike_co) -> Any: ...
+ @overload
+ def __call__(self, val: poly1d) -> poly1d: ...
+ @overload
+ def __call__(self, val: ArrayLike) -> NDArray[Any]: ...
+
+ def __init__(
+ self,
+ c_or_r: ArrayLike,
+ r: bool = ...,
+ variable: None | str = ...,
+ ) -> None: ...
+ def __len__(self) -> int: ...
+ def __neg__(self) -> poly1d: ...
+ def __pos__(self) -> poly1d: ...
+ def __mul__(self, other: ArrayLike) -> poly1d: ...
+ def __rmul__(self, other: ArrayLike) -> poly1d: ...
+ def __add__(self, other: ArrayLike) -> poly1d: ...
+ def __radd__(self, other: ArrayLike) -> poly1d: ...
+ def __pow__(self, val: _FloatLike_co) -> poly1d: ... # Integral floats are accepted
+ def __sub__(self, other: ArrayLike) -> poly1d: ...
+ def __rsub__(self, other: ArrayLike) -> poly1d: ...
+ def __div__(self, other: ArrayLike) -> poly1d: ...
+ def __truediv__(self, other: ArrayLike) -> poly1d: ...
+ def __rdiv__(self, other: ArrayLike) -> poly1d: ...
+ def __rtruediv__(self, other: ArrayLike) -> poly1d: ...
+ def __getitem__(self, val: int) -> Any: ...
+ def __setitem__(self, key: int, val: Any) -> None: ...
+ def __iter__(self) -> Iterator[Any]: ...
+ def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ...
+ def integ(
+ self,
+ m: SupportsInt | SupportsIndex = ...,
+ k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
+ ) -> poly1d: ...
+
+class matrix(ndarray[_ShapeType, _DType_co]):
+ __array_priority__: ClassVar[float]
+ def __new__(
+ subtype,
+ data: ArrayLike,
+ dtype: DTypeLike = ...,
+ copy: bool = ...,
+ ) -> matrix[Any, Any]: ...
+ def __array_finalize__(self, obj: object) -> None: ...
+
+ @overload
+ def __getitem__(self, key: (
+ SupportsIndex
+ | _ArrayLikeInt_co
+ | tuple[SupportsIndex | _ArrayLikeInt_co, ...]
+ )) -> Any: ...
+ @overload
+ def __getitem__(self, key: (
+ None
+ | slice
+ | ellipsis
+ | SupportsIndex
+ | _ArrayLikeInt_co
+ | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...]
+ )) -> matrix[Any, _DType_co]: ...
+ @overload
+ def __getitem__(self: NDArray[void], key: str) -> matrix[Any, dtype[Any]]: ...
+ @overload
+ def __getitem__(self: NDArray[void], key: list[str]) -> matrix[_ShapeType, dtype[void]]: ...
+
+ def __mul__(self, other: ArrayLike) -> matrix[Any, Any]: ...
+ def __rmul__(self, other: ArrayLike) -> matrix[Any, Any]: ...
+ def __imul__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ...
+ def __pow__(self, other: ArrayLike) -> matrix[Any, Any]: ...
+ def __ipow__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ...
+
+ @overload
+ def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ...
+ @overload
+ def sum(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ...
+ @overload
+ def sum(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def mean(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ...
+ @overload
+ def mean(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ...
+ @overload
+ def mean(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def std(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ...
+ @overload
+ def std(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ...
+ @overload
+ def std(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def var(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ...
+ @overload
+ def var(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ...
+ @overload
+ def var(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def prod(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ...
+ @overload
+ def prod(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ...
+ @overload
+ def prod(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def any(self, axis: None = ..., out: None = ...) -> bool_: ...
+ @overload
+ def any(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[bool_]]: ...
+ @overload
+ def any(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def all(self, axis: None = ..., out: None = ...) -> bool_: ...
+ @overload
+ def all(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[bool_]]: ...
+ @overload
+ def all(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def max(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ...
+ @overload
+ def max(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ...
+ @overload
+ def max(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def min(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ...
+ @overload
+ def min(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ...
+ @overload
+ def min(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def argmax(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ...
+ @overload
+ def argmax(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ...
+ @overload
+ def argmax(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def argmin(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ...
+ @overload
+ def argmin(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ...
+ @overload
+ def argmin(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ @overload
+ def ptp(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ...
+ @overload
+ def ptp(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ...
+ @overload
+ def ptp(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+
+ def squeeze(self, axis: None | _ShapeLike = ...) -> matrix[Any, _DType_co]: ...
+ def tolist(self: matrix[Any, dtype[_SupportsItem[_T]]]) -> list[list[_T]]: ... # type: ignore[typevar]
+ def ravel(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ...
+ def flatten(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ...
+
+ @property
+ def T(self) -> matrix[Any, _DType_co]: ...
+ @property
+ def I(self) -> matrix[Any, Any]: ...
+ @property
+ def A(self) -> ndarray[_ShapeType, _DType_co]: ...
+ @property
+ def A1(self) -> ndarray[Any, _DType_co]: ...
+ @property
+ def H(self) -> matrix[Any, _DType_co]: ...
+ def getT(self) -> matrix[Any, _DType_co]: ...
+ def getI(self) -> matrix[Any, Any]: ...
+ def getA(self) -> ndarray[_ShapeType, _DType_co]: ...
+ def getA1(self) -> ndarray[Any, _DType_co]: ...
+ def getH(self) -> matrix[Any, _DType_co]: ...
+
+_CharType = TypeVar("_CharType", str_, bytes_)
+_CharDType = TypeVar("_CharDType", dtype[str_], dtype[bytes_])
+_CharArray = chararray[Any, dtype[_CharType]]
+
+class chararray(ndarray[_ShapeType, _CharDType]):
+ @overload
+ def __new__(
+ subtype,
+ shape: _ShapeLike,
+ itemsize: SupportsIndex | SupportsInt = ...,
+ unicode: L[False] = ...,
+ buffer: _SupportsBuffer = ...,
+ offset: SupportsIndex = ...,
+ strides: _ShapeLike = ...,
+ order: _OrderKACF = ...,
+ ) -> chararray[Any, dtype[bytes_]]: ...
+ @overload
+ def __new__(
+ subtype,
+ shape: _ShapeLike,
+ itemsize: SupportsIndex | SupportsInt = ...,
+ unicode: L[True] = ...,
+ buffer: _SupportsBuffer = ...,
+ offset: SupportsIndex = ...,
+ strides: _ShapeLike = ...,
+ order: _OrderKACF = ...,
+ ) -> chararray[Any, dtype[str_]]: ...
+
+ def __array_finalize__(self, obj: object) -> None: ...
+ def __mul__(self, other: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ...
+ def __rmul__(self, other: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ...
+ def __mod__(self, i: Any) -> chararray[Any, _CharDType]: ...
+
+ @overload
+ def __eq__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def __eq__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def __ne__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def __ne__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def __ge__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def __ge__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def __le__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def __le__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def __gt__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def __gt__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def __lt__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def __lt__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def __add__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def __add__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def __radd__(
+ self: _CharArray[str_],
+ other: _ArrayLikeStr_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def __radd__(
+ self: _CharArray[bytes_],
+ other: _ArrayLikeBytes_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def center(
+ self: _CharArray[str_],
+ width: _ArrayLikeInt_co,
+ fillchar: _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def center(
+ self: _CharArray[bytes_],
+ width: _ArrayLikeInt_co,
+ fillchar: _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def count(
+ self: _CharArray[str_],
+ sub: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def count(
+ self: _CharArray[bytes_],
+ sub: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+
+ def decode(
+ self: _CharArray[bytes_],
+ encoding: None | str = ...,
+ errors: None | str = ...,
+ ) -> _CharArray[str_]: ...
+
+ def encode(
+ self: _CharArray[str_],
+ encoding: None | str = ...,
+ errors: None | str = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def endswith(
+ self: _CharArray[str_],
+ suffix: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def endswith(
+ self: _CharArray[bytes_],
+ suffix: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[bool_]: ...
+
+ def expandtabs(
+ self,
+ tabsize: _ArrayLikeInt_co = ...,
+ ) -> chararray[Any, _CharDType]: ...
+
+ @overload
+ def find(
+ self: _CharArray[str_],
+ sub: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def find(
+ self: _CharArray[bytes_],
+ sub: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+
+ @overload
+ def index(
+ self: _CharArray[str_],
+ sub: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def index(
+ self: _CharArray[bytes_],
+ sub: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+
+ @overload
+ def join(
+ self: _CharArray[str_],
+ seq: _ArrayLikeStr_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def join(
+ self: _CharArray[bytes_],
+ seq: _ArrayLikeBytes_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def ljust(
+ self: _CharArray[str_],
+ width: _ArrayLikeInt_co,
+ fillchar: _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def ljust(
+ self: _CharArray[bytes_],
+ width: _ArrayLikeInt_co,
+ fillchar: _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def lstrip(
+ self: _CharArray[str_],
+ chars: None | _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def lstrip(
+ self: _CharArray[bytes_],
+ chars: None | _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def partition(
+ self: _CharArray[str_],
+ sep: _ArrayLikeStr_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def partition(
+ self: _CharArray[bytes_],
+ sep: _ArrayLikeBytes_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def replace(
+ self: _CharArray[str_],
+ old: _ArrayLikeStr_co,
+ new: _ArrayLikeStr_co,
+ count: None | _ArrayLikeInt_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def replace(
+ self: _CharArray[bytes_],
+ old: _ArrayLikeBytes_co,
+ new: _ArrayLikeBytes_co,
+ count: None | _ArrayLikeInt_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def rfind(
+ self: _CharArray[str_],
+ sub: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def rfind(
+ self: _CharArray[bytes_],
+ sub: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+
+ @overload
+ def rindex(
+ self: _CharArray[str_],
+ sub: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+ @overload
+ def rindex(
+ self: _CharArray[bytes_],
+ sub: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[int_]: ...
+
+ @overload
+ def rjust(
+ self: _CharArray[str_],
+ width: _ArrayLikeInt_co,
+ fillchar: _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def rjust(
+ self: _CharArray[bytes_],
+ width: _ArrayLikeInt_co,
+ fillchar: _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def rpartition(
+ self: _CharArray[str_],
+ sep: _ArrayLikeStr_co,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def rpartition(
+ self: _CharArray[bytes_],
+ sep: _ArrayLikeBytes_co,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def rsplit(
+ self: _CharArray[str_],
+ sep: None | _ArrayLikeStr_co = ...,
+ maxsplit: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[object_]: ...
+ @overload
+ def rsplit(
+ self: _CharArray[bytes_],
+ sep: None | _ArrayLikeBytes_co = ...,
+ maxsplit: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[object_]: ...
+
+ @overload
+ def rstrip(
+ self: _CharArray[str_],
+ chars: None | _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def rstrip(
+ self: _CharArray[bytes_],
+ chars: None | _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def split(
+ self: _CharArray[str_],
+ sep: None | _ArrayLikeStr_co = ...,
+ maxsplit: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[object_]: ...
+ @overload
+ def split(
+ self: _CharArray[bytes_],
+ sep: None | _ArrayLikeBytes_co = ...,
+ maxsplit: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[object_]: ...
+
+ def splitlines(self, keepends: None | _ArrayLikeBool_co = ...) -> NDArray[object_]: ...
+
+ @overload
+ def startswith(
+ self: _CharArray[str_],
+ prefix: _ArrayLikeStr_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[bool_]: ...
+ @overload
+ def startswith(
+ self: _CharArray[bytes_],
+ prefix: _ArrayLikeBytes_co,
+ start: _ArrayLikeInt_co = ...,
+ end: None | _ArrayLikeInt_co = ...,
+ ) -> NDArray[bool_]: ...
+
+ @overload
+ def strip(
+ self: _CharArray[str_],
+ chars: None | _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def strip(
+ self: _CharArray[bytes_],
+ chars: None | _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ @overload
+ def translate(
+ self: _CharArray[str_],
+ table: _ArrayLikeStr_co,
+ deletechars: None | _ArrayLikeStr_co = ...,
+ ) -> _CharArray[str_]: ...
+ @overload
+ def translate(
+ self: _CharArray[bytes_],
+ table: _ArrayLikeBytes_co,
+ deletechars: None | _ArrayLikeBytes_co = ...,
+ ) -> _CharArray[bytes_]: ...
+
+ def zfill(self, width: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ...
+ def capitalize(self) -> chararray[_ShapeType, _CharDType]: ...
+ def title(self) -> chararray[_ShapeType, _CharDType]: ...
+ def swapcase(self) -> chararray[_ShapeType, _CharDType]: ...
+ def lower(self) -> chararray[_ShapeType, _CharDType]: ...
+ def upper(self) -> chararray[_ShapeType, _CharDType]: ...
+ def isalnum(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def isalpha(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def isdigit(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def islower(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def isspace(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def istitle(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def isupper(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def isnumeric(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+ def isdecimal(self) -> ndarray[_ShapeType, dtype[bool_]]: ...
+
+# NOTE: Deprecated
+# class MachAr: ...
+
+class _SupportsDLPack(Protocol[_T_contra]):
+ def __dlpack__(self, *, stream: None | _T_contra = ...) -> _PyCapsule: ...
+
+def from_dlpack(obj: _SupportsDLPack[None], /) -> NDArray[Any]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/_distributor_init.py b/venv/lib/python3.9/site-packages/numpy/_distributor_init.py
new file mode 100644
index 00000000..d893ba37
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_distributor_init.py
@@ -0,0 +1,10 @@
+""" Distributor init file
+
+Distributors: you can add custom code here to support particular distributions
+of numpy.
+
+For example, this is a good place to put any checks for hardware requirements.
+
+The numpy standard source distribution will not put code in this file, so you
+can safely replace this file with your own version.
+"""
diff --git a/venv/lib/python3.9/site-packages/numpy/_globals.py b/venv/lib/python3.9/site-packages/numpy/_globals.py
new file mode 100644
index 00000000..1e4f26cd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_globals.py
@@ -0,0 +1,125 @@
+"""
+Module defining global singleton classes.
+
+This module raises a RuntimeError if an attempt to reload it is made. In that
+way the identities of the classes defined here are fixed and will remain so
+even if numpy itself is reloaded. In particular, a function like the following
+will still work correctly after numpy is reloaded::
+
+ def foo(arg=np._NoValue):
+ if arg is np._NoValue:
+ ...
+
+That was not the case when the singleton classes were defined in the numpy
+``__init__.py`` file. See gh-7844 for a discussion of the reload problem that
+motivated this module.
+
+"""
+import enum
+
+__ALL__ = [
+ 'ModuleDeprecationWarning', 'VisibleDeprecationWarning',
+ '_NoValue', '_CopyMode'
+ ]
+
+
+# Disallow reloading this module so as to preserve the identities of the
+# classes defined here.
+if '_is_loaded' in globals():
+ raise RuntimeError('Reloading numpy._globals is not allowed')
+_is_loaded = True
+
+
+class ModuleDeprecationWarning(DeprecationWarning):
+ """Module deprecation warning.
+
+ The nose tester turns ordinary Deprecation warnings into test failures.
+ That makes it hard to deprecate whole modules, because they get
+ imported by default. So this is a special Deprecation warning that the
+ nose tester will let pass without making tests fail.
+
+ """
+
+
+ModuleDeprecationWarning.__module__ = 'numpy'
+
+
+class VisibleDeprecationWarning(UserWarning):
+ """Visible deprecation warning.
+
+ By default, python will not show deprecation warnings, so this class
+ can be used when a very visible warning is helpful, for example because
+ the usage is most likely a user bug.
+
+ """
+
+
+VisibleDeprecationWarning.__module__ = 'numpy'
+
+
+class _NoValueType:
+ """Special keyword value.
+
+ The instance of this class may be used as the default value assigned to a
+ keyword if no other obvious default (e.g., `None`) is suitable,
+
+ Common reasons for using this keyword are:
+
+ - A new keyword is added to a function, and that function forwards its
+ inputs to another function or method which can be defined outside of
+ NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims``
+ keyword was added that could only be forwarded if the user explicitly
+ specified ``keepdims``; downstream array libraries may not have added
+ the same keyword, so adding ``x.std(..., keepdims=keepdims)``
+ unconditionally could have broken previously working code.
+ - A keyword is being deprecated, and a deprecation warning must only be
+ emitted when the keyword is used.
+
+ """
+ __instance = None
+ def __new__(cls):
+ # ensure that only one instance exists
+ if not cls.__instance:
+ cls.__instance = super().__new__(cls)
+ return cls.__instance
+
+ def __repr__(self):
+ return "<no value>"
+
+
+_NoValue = _NoValueType()
+
+
+class _CopyMode(enum.Enum):
+ """
+ An enumeration for the copy modes supported
+ by numpy.copy() and numpy.array(). The following three modes are supported,
+
+ - ALWAYS: This means that a deep copy of the input
+ array will always be taken.
+ - IF_NEEDED: This means that a deep copy of the input
+ array will be taken only if necessary.
+ - NEVER: This means that the deep copy will never be taken.
+ If a copy cannot be avoided then a `ValueError` will be
+ raised.
+
+ Note that the buffer-protocol could in theory do copies. NumPy currently
+ assumes an object exporting the buffer protocol will never do this.
+ """
+
+ ALWAYS = True
+ IF_NEEDED = False
+ NEVER = 2
+
+ def __bool__(self):
+ # For backwards compatibility
+ if self == _CopyMode.ALWAYS:
+ return True
+
+ if self == _CopyMode.IF_NEEDED:
+ return False
+
+ raise ValueError(f"{self} is neither True nor False.")
+
+
+_CopyMode.__module__ = 'numpy'
diff --git a/venv/lib/python3.9/site-packages/numpy/_pyinstaller/__init__.py b/venv/lib/python3.9/site-packages/numpy/_pyinstaller/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_pyinstaller/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/_pyinstaller/hook-numpy.py b/venv/lib/python3.9/site-packages/numpy/_pyinstaller/hook-numpy.py
new file mode 100644
index 00000000..a08b7c96
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_pyinstaller/hook-numpy.py
@@ -0,0 +1,40 @@
+"""This hook should collect all binary files and any hidden modules that numpy
+needs.
+
+Our (some-what inadequate) docs for writing PyInstaller hooks are kept here:
+https://pyinstaller.readthedocs.io/en/stable/hooks.html
+
+"""
+from PyInstaller.compat import is_conda, is_pure_conda
+from PyInstaller.utils.hooks import collect_dynamic_libs, is_module_satisfies
+
+# Collect all DLLs inside numpy's installation folder, dump them into built
+# app's root.
+binaries = collect_dynamic_libs("numpy", ".")
+
+# If using Conda without any non-conda virtual environment manager:
+if is_pure_conda:
+ # Assume running the NumPy from Conda-forge and collect it's DLLs from the
+ # communal Conda bin directory. DLLs from NumPy's dependencies must also be
+ # collected to capture MKL, OpenBlas, OpenMP, etc.
+ from PyInstaller.utils.hooks import conda_support
+ datas = conda_support.collect_dynamic_libs("numpy", dependencies=True)
+
+# Submodules PyInstaller cannot detect (probably because they are only imported
+# by extension modules, which PyInstaller cannot read).
+hiddenimports = ['numpy.core._dtype_ctypes']
+if is_conda:
+ hiddenimports.append("six")
+
+# Remove testing and building code and packages that are referenced throughout
+# NumPy but are not really dependencies.
+excludedimports = [
+ "scipy",
+ "pytest",
+ "nose",
+ "f2py",
+ "setuptools",
+ "numpy.f2py",
+ "distutils",
+ "numpy.distutils",
+]
diff --git a/venv/lib/python3.9/site-packages/numpy/_pyinstaller/pyinstaller-smoke.py b/venv/lib/python3.9/site-packages/numpy/_pyinstaller/pyinstaller-smoke.py
new file mode 100644
index 00000000..eb28070e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_pyinstaller/pyinstaller-smoke.py
@@ -0,0 +1,32 @@
+"""A crude *bit of everything* smoke test to verify PyInstaller compatibility.
+
+PyInstaller typically goes wrong by forgetting to package modules, extension
+modules or shared libraries. This script should aim to touch as many of those
+as possible in an attempt to trip a ModuleNotFoundError or a DLL load failure
+due to an uncollected resource. Missing resources are unlikely to lead to
+arithmetic errors so there's generally no need to verify any calculation's
+output - merely that it made it to the end OK. This script should not
+explicitly import any of numpy's submodules as that gives PyInstaller undue
+hints that those submodules exist and should be collected (accessing implicitly
+loaded submodules is OK).
+
+"""
+import numpy as np
+
+a = np.arange(1., 10.).reshape((3, 3)) % 5
+np.linalg.det(a)
+a @ a
+a @ a.T
+np.linalg.inv(a)
+np.sin(np.exp(a))
+np.linalg.svd(a)
+np.linalg.eigh(a)
+
+np.unique(np.random.randint(0, 10, 100))
+np.sort(np.random.uniform(0, 10, 100))
+
+np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
+np.ma.masked_array(np.arange(10), np.random.rand(10) < .5).sum()
+np.polynomial.Legendre([7, 8, 9]).roots()
+
+print("I made it!")
diff --git a/venv/lib/python3.9/site-packages/numpy/_pyinstaller/test_pyinstaller.py b/venv/lib/python3.9/site-packages/numpy/_pyinstaller/test_pyinstaller.py
new file mode 100644
index 00000000..a9061da1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_pyinstaller/test_pyinstaller.py
@@ -0,0 +1,35 @@
+import subprocess
+from pathlib import Path
+
+import pytest
+
+
+# PyInstaller has been very unproactive about replacing 'imp' with 'importlib'.
+@pytest.mark.filterwarnings('ignore::DeprecationWarning')
+# It also leaks io.BytesIO()s.
+@pytest.mark.filterwarnings('ignore::ResourceWarning')
+@pytest.mark.parametrize("mode", ["--onedir", "--onefile"])
+@pytest.mark.slow
+def test_pyinstaller(mode, tmp_path):
+ """Compile and run pyinstaller-smoke.py using PyInstaller."""
+
+ pyinstaller_cli = pytest.importorskip("PyInstaller.__main__").run
+
+ source = Path(__file__).with_name("pyinstaller-smoke.py").resolve()
+ args = [
+ # Place all generated files in ``tmp_path``.
+ '--workpath', str(tmp_path / "build"),
+ '--distpath', str(tmp_path / "dist"),
+ '--specpath', str(tmp_path),
+ mode,
+ str(source),
+ ]
+ pyinstaller_cli(args)
+
+ if mode == "--onefile":
+ exe = tmp_path / "dist" / source.stem
+ else:
+ exe = tmp_path / "dist" / source.stem / source.stem
+
+ p = subprocess.run([str(exe)], check=True, stdout=subprocess.PIPE)
+ assert p.stdout.strip() == b"I made it!"
diff --git a/venv/lib/python3.9/site-packages/numpy/_pytesttester.py b/venv/lib/python3.9/site-packages/numpy/_pytesttester.py
new file mode 100644
index 00000000..01ddaaf9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_pytesttester.py
@@ -0,0 +1,206 @@
+"""
+Pytest test running.
+
+This module implements the ``test()`` function for NumPy modules. The usual
+boiler plate for doing that is to put the following in the module
+``__init__.py`` file::
+
+ from numpy._pytesttester import PytestTester
+ test = PytestTester(__name__)
+ del PytestTester
+
+
+Warnings filtering and other runtime settings should be dealt with in the
+``pytest.ini`` file in the numpy repo root. The behavior of the test depends on
+whether or not that file is found as follows:
+
+* ``pytest.ini`` is present (develop mode)
+ All warnings except those explicitly filtered out are raised as error.
+* ``pytest.ini`` is absent (release mode)
+ DeprecationWarnings and PendingDeprecationWarnings are ignored, other
+ warnings are passed through.
+
+In practice, tests run from the numpy repo are run in develop mode. That
+includes the standard ``python runtests.py`` invocation.
+
+This module is imported by every numpy subpackage, so lies at the top level to
+simplify circular import issues. For the same reason, it contains no numpy
+imports at module scope, instead importing numpy within function calls.
+"""
+import sys
+import os
+
+__all__ = ['PytestTester']
+
+
+def _show_numpy_info():
+ import numpy as np
+
+ print("NumPy version %s" % np.__version__)
+ relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous
+ print("NumPy relaxed strides checking option:", relaxed_strides)
+ info = np.lib.utils._opt_info()
+ print("NumPy CPU features: ", (info if info else 'nothing enabled'))
+
+
+class PytestTester:
+ """
+ Pytest test runner.
+
+ A test function is typically added to a package's __init__.py like so::
+
+ from numpy._pytesttester import PytestTester
+ test = PytestTester(__name__).test
+ del PytestTester
+
+ Calling this test function finds and runs all tests associated with the
+ module and all its sub-modules.
+
+ Attributes
+ ----------
+ module_name : str
+ Full path to the package to test.
+
+ Parameters
+ ----------
+ module_name : module name
+ The name of the module to test.
+
+ Notes
+ -----
+ Unlike the previous ``nose``-based implementation, this class is not
+ publicly exposed as it performs some ``numpy``-specific warning
+ suppression.
+
+ """
+ def __init__(self, module_name):
+ self.module_name = module_name
+
+ def __call__(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False, durations=-1, tests=None):
+ """
+ Run tests for module using pytest.
+
+ Parameters
+ ----------
+ label : {'fast', 'full'}, optional
+ Identifies the tests to run. When set to 'fast', tests decorated
+ with `pytest.mark.slow` are skipped, when 'full', the slow marker
+ is ignored.
+ verbose : int, optional
+ Verbosity value for test outputs, in the range 1-3. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to pytests.
+ doctests : bool, optional
+ .. note:: Not supported
+ coverage : bool, optional
+ If True, report coverage of NumPy code. Default is False.
+ Requires installation of (pip) pytest-cov.
+ durations : int, optional
+ If < 0, do nothing, If 0, report time of all tests, if > 0,
+ report the time of the slowest `timer` tests. Default is -1.
+ tests : test or list of tests
+ Tests to be executed with pytest '--pyargs'
+
+ Returns
+ -------
+ result : bool
+ Return True on success, false otherwise.
+
+ Notes
+ -----
+ Each NumPy module exposes `test` in its namespace to run all tests for
+ it. For example, to run all tests for numpy.lib:
+
+ >>> np.lib.test() #doctest: +SKIP
+
+ Examples
+ --------
+ >>> result = np.lib.test() #doctest: +SKIP
+ ...
+ 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
+ >>> result
+ True
+
+ """
+ import pytest
+ import warnings
+
+ module = sys.modules[self.module_name]
+ module_path = os.path.abspath(module.__path__[0])
+
+ # setup the pytest arguments
+ pytest_args = ["-l"]
+
+ # offset verbosity. The "-q" cancels a "-v".
+ pytest_args += ["-q"]
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ # Filter out distutils cpu warnings (could be localized to
+ # distutils tests). ASV has problems with top level import,
+ # so fetch module for suppression here.
+ from numpy.distutils import cpuinfo
+
+ with warnings.catch_warnings(record=True):
+ # Ignore the warning from importing the array_api submodule. This
+ # warning is done on import, so it would break pytest collection,
+ # but importing it early here prevents the warning from being
+ # issued when it imported again.
+ import numpy.array_api
+
+ # Filter out annoying import messages. Want these in both develop and
+ # release mode.
+ pytest_args += [
+ "-W ignore:Not importing directory",
+ "-W ignore:numpy.dtype size changed",
+ "-W ignore:numpy.ufunc size changed",
+ "-W ignore::UserWarning:cpuinfo",
+ ]
+
+ # When testing matrices, ignore their PendingDeprecationWarnings
+ pytest_args += [
+ "-W ignore:the matrix subclass is not",
+ "-W ignore:Importing from numpy.matlib is",
+ ]
+
+ if doctests:
+ pytest_args += ["--doctest-modules"]
+
+ if extra_argv:
+ pytest_args += list(extra_argv)
+
+ if verbose > 1:
+ pytest_args += ["-" + "v"*(verbose - 1)]
+
+ if coverage:
+ pytest_args += ["--cov=" + module_path]
+
+ if label == "fast":
+ # not importing at the top level to avoid circular import of module
+ from numpy.testing import IS_PYPY
+ if IS_PYPY:
+ pytest_args += ["-m", "not slow and not slow_pypy"]
+ else:
+ pytest_args += ["-m", "not slow"]
+
+ elif label != "full":
+ pytest_args += ["-m", label]
+
+ if durations >= 0:
+ pytest_args += ["--durations=%s" % durations]
+
+ if tests is None:
+ tests = [self.module_name]
+
+ pytest_args += ["--pyargs"] + list(tests)
+
+ # run tests.
+ _show_numpy_info()
+
+ try:
+ code = pytest.main(pytest_args)
+ except SystemExit as exc:
+ code = exc.code
+
+ return code == 0
diff --git a/venv/lib/python3.9/site-packages/numpy/_pytesttester.pyi b/venv/lib/python3.9/site-packages/numpy/_pytesttester.pyi
new file mode 100644
index 00000000..67ac87b3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_pytesttester.pyi
@@ -0,0 +1,18 @@
+from collections.abc import Iterable
+from typing import Literal as L
+
+__all__: list[str]
+
+class PytestTester:
+ module_name: str
+ def __init__(self, module_name: str) -> None: ...
+ def __call__(
+ self,
+ label: L["fast", "full"] = ...,
+ verbose: int = ...,
+ extra_argv: None | Iterable[str] = ...,
+ doctests: L[False] = ...,
+ coverage: bool = ...,
+ durations: int = ...,
+ tests: None | Iterable[str] = ...,
+ ) -> bool: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/__init__.py b/venv/lib/python3.9/site-packages/numpy/_typing/__init__.py
new file mode 100644
index 00000000..b800c54b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/__init__.py
@@ -0,0 +1,225 @@
+"""Private counterpart of ``numpy.typing``."""
+
+from __future__ import annotations
+
+from numpy import ufunc
+from numpy.core.overrides import set_module
+from typing import TYPE_CHECKING, final
+
+
+@final # Disallow the creation of arbitrary `NBitBase` subclasses
+@set_module("numpy.typing")
+class NBitBase:
+ """
+ A type representing `numpy.number` precision during static type checking.
+
+ Used exclusively for the purpose static type checking, `NBitBase`
+ represents the base of a hierarchical set of subclasses.
+ Each subsequent subclass is herein used for representing a lower level
+ of precision, *e.g.* ``64Bit > 32Bit > 16Bit``.
+
+ .. versionadded:: 1.20
+
+ Examples
+ --------
+ Below is a typical usage example: `NBitBase` is herein used for annotating
+ a function that takes a float and integer of arbitrary precision
+ as arguments and returns a new float of whichever precision is largest
+ (*e.g.* ``np.float16 + np.int64 -> np.float64``).
+
+ .. code-block:: python
+
+ >>> from __future__ import annotations
+ >>> from typing import TypeVar, TYPE_CHECKING
+ >>> import numpy as np
+ >>> import numpy.typing as npt
+
+ >>> T1 = TypeVar("T1", bound=npt.NBitBase)
+ >>> T2 = TypeVar("T2", bound=npt.NBitBase)
+
+ >>> def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]:
+ ... return a + b
+
+ >>> a = np.float16()
+ >>> b = np.int64()
+ >>> out = add(a, b)
+
+ >>> if TYPE_CHECKING:
+ ... reveal_locals()
+ ... # note: Revealed local types are:
+ ... # note: a: numpy.floating[numpy.typing._16Bit*]
+ ... # note: b: numpy.signedinteger[numpy.typing._64Bit*]
+ ... # note: out: numpy.floating[numpy.typing._64Bit*]
+
+ """
+
+ def __init_subclass__(cls) -> None:
+ allowed_names = {
+ "NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit",
+ "_64Bit", "_32Bit", "_16Bit", "_8Bit",
+ }
+ if cls.__name__ not in allowed_names:
+ raise TypeError('cannot inherit from final class "NBitBase"')
+ super().__init_subclass__()
+
+
+# Silence errors about subclassing a `@final`-decorated class
+class _256Bit(NBitBase): # type: ignore[misc]
+ pass
+
+class _128Bit(_256Bit): # type: ignore[misc]
+ pass
+
+class _96Bit(_128Bit): # type: ignore[misc]
+ pass
+
+class _80Bit(_96Bit): # type: ignore[misc]
+ pass
+
+class _64Bit(_80Bit): # type: ignore[misc]
+ pass
+
+class _32Bit(_64Bit): # type: ignore[misc]
+ pass
+
+class _16Bit(_32Bit): # type: ignore[misc]
+ pass
+
+class _8Bit(_16Bit): # type: ignore[misc]
+ pass
+
+
+from ._nested_sequence import (
+ _NestedSequence as _NestedSequence,
+)
+from ._nbit import (
+ _NBitByte as _NBitByte,
+ _NBitShort as _NBitShort,
+ _NBitIntC as _NBitIntC,
+ _NBitIntP as _NBitIntP,
+ _NBitInt as _NBitInt,
+ _NBitLongLong as _NBitLongLong,
+ _NBitHalf as _NBitHalf,
+ _NBitSingle as _NBitSingle,
+ _NBitDouble as _NBitDouble,
+ _NBitLongDouble as _NBitLongDouble,
+)
+from ._char_codes import (
+ _BoolCodes as _BoolCodes,
+ _UInt8Codes as _UInt8Codes,
+ _UInt16Codes as _UInt16Codes,
+ _UInt32Codes as _UInt32Codes,
+ _UInt64Codes as _UInt64Codes,
+ _Int8Codes as _Int8Codes,
+ _Int16Codes as _Int16Codes,
+ _Int32Codes as _Int32Codes,
+ _Int64Codes as _Int64Codes,
+ _Float16Codes as _Float16Codes,
+ _Float32Codes as _Float32Codes,
+ _Float64Codes as _Float64Codes,
+ _Complex64Codes as _Complex64Codes,
+ _Complex128Codes as _Complex128Codes,
+ _ByteCodes as _ByteCodes,
+ _ShortCodes as _ShortCodes,
+ _IntCCodes as _IntCCodes,
+ _IntPCodes as _IntPCodes,
+ _IntCodes as _IntCodes,
+ _LongLongCodes as _LongLongCodes,
+ _UByteCodes as _UByteCodes,
+ _UShortCodes as _UShortCodes,
+ _UIntCCodes as _UIntCCodes,
+ _UIntPCodes as _UIntPCodes,
+ _UIntCodes as _UIntCodes,
+ _ULongLongCodes as _ULongLongCodes,
+ _HalfCodes as _HalfCodes,
+ _SingleCodes as _SingleCodes,
+ _DoubleCodes as _DoubleCodes,
+ _LongDoubleCodes as _LongDoubleCodes,
+ _CSingleCodes as _CSingleCodes,
+ _CDoubleCodes as _CDoubleCodes,
+ _CLongDoubleCodes as _CLongDoubleCodes,
+ _DT64Codes as _DT64Codes,
+ _TD64Codes as _TD64Codes,
+ _StrCodes as _StrCodes,
+ _BytesCodes as _BytesCodes,
+ _VoidCodes as _VoidCodes,
+ _ObjectCodes as _ObjectCodes,
+)
+from ._scalars import (
+ _CharLike_co as _CharLike_co,
+ _BoolLike_co as _BoolLike_co,
+ _UIntLike_co as _UIntLike_co,
+ _IntLike_co as _IntLike_co,
+ _FloatLike_co as _FloatLike_co,
+ _ComplexLike_co as _ComplexLike_co,
+ _TD64Like_co as _TD64Like_co,
+ _NumberLike_co as _NumberLike_co,
+ _ScalarLike_co as _ScalarLike_co,
+ _VoidLike_co as _VoidLike_co,
+)
+from ._shape import (
+ _Shape as _Shape,
+ _ShapeLike as _ShapeLike,
+)
+from ._dtype_like import (
+ DTypeLike as DTypeLike,
+ _DTypeLike as _DTypeLike,
+ _SupportsDType as _SupportsDType,
+ _VoidDTypeLike as _VoidDTypeLike,
+ _DTypeLikeBool as _DTypeLikeBool,
+ _DTypeLikeUInt as _DTypeLikeUInt,
+ _DTypeLikeInt as _DTypeLikeInt,
+ _DTypeLikeFloat as _DTypeLikeFloat,
+ _DTypeLikeComplex as _DTypeLikeComplex,
+ _DTypeLikeTD64 as _DTypeLikeTD64,
+ _DTypeLikeDT64 as _DTypeLikeDT64,
+ _DTypeLikeObject as _DTypeLikeObject,
+ _DTypeLikeVoid as _DTypeLikeVoid,
+ _DTypeLikeStr as _DTypeLikeStr,
+ _DTypeLikeBytes as _DTypeLikeBytes,
+ _DTypeLikeComplex_co as _DTypeLikeComplex_co,
+)
+from ._array_like import (
+ ArrayLike as ArrayLike,
+ _ArrayLike as _ArrayLike,
+ _FiniteNestedSequence as _FiniteNestedSequence,
+ _SupportsArray as _SupportsArray,
+ _SupportsArrayFunc as _SupportsArrayFunc,
+ _ArrayLikeInt as _ArrayLikeInt,
+ _ArrayLikeBool_co as _ArrayLikeBool_co,
+ _ArrayLikeUInt_co as _ArrayLikeUInt_co,
+ _ArrayLikeInt_co as _ArrayLikeInt_co,
+ _ArrayLikeFloat_co as _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co as _ArrayLikeComplex_co,
+ _ArrayLikeNumber_co as _ArrayLikeNumber_co,
+ _ArrayLikeTD64_co as _ArrayLikeTD64_co,
+ _ArrayLikeDT64_co as _ArrayLikeDT64_co,
+ _ArrayLikeObject_co as _ArrayLikeObject_co,
+ _ArrayLikeVoid_co as _ArrayLikeVoid_co,
+ _ArrayLikeStr_co as _ArrayLikeStr_co,
+ _ArrayLikeBytes_co as _ArrayLikeBytes_co,
+ _ArrayLikeUnknown as _ArrayLikeUnknown,
+ _UnknownType as _UnknownType,
+)
+from ._generic_alias import (
+ NDArray as NDArray,
+ _DType as _DType,
+ _GenericAlias as _GenericAlias,
+)
+
+if TYPE_CHECKING:
+ from ._ufunc import (
+ _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1,
+ _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1,
+ _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2,
+ _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2,
+ _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1,
+ )
+else:
+ # Declare the (type-check-only) ufunc subclasses as ufunc aliases during
+ # runtime; this helps autocompletion tools such as Jedi (numpy/numpy#19834)
+ _UFunc_Nin1_Nout1 = ufunc
+ _UFunc_Nin2_Nout1 = ufunc
+ _UFunc_Nin1_Nout2 = ufunc
+ _UFunc_Nin2_Nout2 = ufunc
+ _GUFunc_Nin2_Nout1 = ufunc
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/_add_docstring.py b/venv/lib/python3.9/site-packages/numpy/_typing/_add_docstring.py
new file mode 100644
index 00000000..10d77f51
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/_add_docstring.py
@@ -0,0 +1,152 @@
+"""A module for creating docstrings for sphinx ``data`` domains."""
+
+import re
+import textwrap
+
+from ._generic_alias import NDArray
+
+_docstrings_list = []
+
+
+def add_newdoc(name: str, value: str, doc: str) -> None:
+ """Append ``_docstrings_list`` with a docstring for `name`.
+
+ Parameters
+ ----------
+ name : str
+ The name of the object.
+ value : str
+ A string-representation of the object.
+ doc : str
+ The docstring of the object.
+
+ """
+ _docstrings_list.append((name, value, doc))
+
+
+def _parse_docstrings() -> str:
+ """Convert all docstrings in ``_docstrings_list`` into a single
+ sphinx-legible text block.
+
+ """
+ type_list_ret = []
+ for name, value, doc in _docstrings_list:
+ s = textwrap.dedent(doc).replace("\n", "\n ")
+
+ # Replace sections by rubrics
+ lines = s.split("\n")
+ new_lines = []
+ indent = ""
+ for line in lines:
+ m = re.match(r'^(\s+)[-=]+\s*$', line)
+ if m and new_lines:
+ prev = textwrap.dedent(new_lines.pop())
+ if prev == "Examples":
+ indent = ""
+ new_lines.append(f'{m.group(1)}.. rubric:: {prev}')
+ else:
+ indent = 4 * " "
+ new_lines.append(f'{m.group(1)}.. admonition:: {prev}')
+ new_lines.append("")
+ else:
+ new_lines.append(f"{indent}{line}")
+
+ s = "\n".join(new_lines)
+ s_block = f""".. data:: {name}\n :value: {value}\n {s}"""
+ type_list_ret.append(s_block)
+ return "\n".join(type_list_ret)
+
+
+add_newdoc('ArrayLike', 'typing.Union[...]',
+ """
+ A `~typing.Union` representing objects that can be coerced
+ into an `~numpy.ndarray`.
+
+ Among others this includes the likes of:
+
+ * Scalars.
+ * (Nested) sequences.
+ * Objects implementing the `~class.__array__` protocol.
+
+ .. versionadded:: 1.20
+
+ See Also
+ --------
+ :term:`array_like`:
+ Any scalar or sequence that can be interpreted as an ndarray.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ >>> import numpy as np
+ >>> import numpy.typing as npt
+
+ >>> def as_array(a: npt.ArrayLike) -> np.ndarray:
+ ... return np.array(a)
+
+ """)
+
+add_newdoc('DTypeLike', 'typing.Union[...]',
+ """
+ A `~typing.Union` representing objects that can be coerced
+ into a `~numpy.dtype`.
+
+ Among others this includes the likes of:
+
+ * :class:`type` objects.
+ * Character codes or the names of :class:`type` objects.
+ * Objects with the ``.dtype`` attribute.
+
+ .. versionadded:: 1.20
+
+ See Also
+ --------
+ :ref:`Specifying and constructing data types <arrays.dtypes.constructing>`
+ A comprehensive overview of all objects that can be coerced
+ into data types.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ >>> import numpy as np
+ >>> import numpy.typing as npt
+
+ >>> def as_dtype(d: npt.DTypeLike) -> np.dtype:
+ ... return np.dtype(d)
+
+ """)
+
+add_newdoc('NDArray', repr(NDArray),
+ """
+ A :term:`generic <generic type>` version of
+ `np.ndarray[Any, np.dtype[+ScalarType]] <numpy.ndarray>`.
+
+ Can be used during runtime for typing arrays with a given dtype
+ and unspecified shape.
+
+ .. versionadded:: 1.21
+
+ Examples
+ --------
+ .. code-block:: python
+
+ >>> import numpy as np
+ >>> import numpy.typing as npt
+
+ >>> print(npt.NDArray)
+ numpy.ndarray[typing.Any, numpy.dtype[+ScalarType]]
+
+ >>> print(npt.NDArray[np.float64])
+ numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]]
+
+ >>> NDArrayInt = npt.NDArray[np.int_]
+ >>> a: NDArrayInt = np.arange(10)
+
+ >>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]:
+ ... return np.array(a)
+
+ """)
+
+_docstrings = _parse_docstrings()
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/_array_like.py b/venv/lib/python3.9/site-packages/numpy/_typing/_array_like.py
new file mode 100644
index 00000000..16c9defc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/_array_like.py
@@ -0,0 +1,165 @@
+from __future__ import annotations
+
+# NOTE: Import `Sequence` from `typing` as we it is needed for a type-alias,
+# not an annotation
+import sys
+from collections.abc import Collection, Callable
+from typing import Any, Sequence, Protocol, Union, TypeVar, runtime_checkable
+from numpy import (
+ ndarray,
+ dtype,
+ generic,
+ bool_,
+ unsignedinteger,
+ integer,
+ floating,
+ complexfloating,
+ number,
+ timedelta64,
+ datetime64,
+ object_,
+ void,
+ str_,
+ bytes_,
+)
+from ._nested_sequence import _NestedSequence
+
+_T = TypeVar("_T")
+_ScalarType = TypeVar("_ScalarType", bound=generic)
+_DType = TypeVar("_DType", bound="dtype[Any]")
+_DType_co = TypeVar("_DType_co", covariant=True, bound="dtype[Any]")
+
+# The `_SupportsArray` protocol only cares about the default dtype
+# (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned
+# array.
+# Concrete implementations of the protocol are responsible for adding
+# any and all remaining overloads
+@runtime_checkable
+class _SupportsArray(Protocol[_DType_co]):
+ def __array__(self) -> ndarray[Any, _DType_co]: ...
+
+
+@runtime_checkable
+class _SupportsArrayFunc(Protocol):
+ """A protocol class representing `~class.__array_function__`."""
+ def __array_function__(
+ self,
+ func: Callable[..., Any],
+ types: Collection[type[Any]],
+ args: tuple[Any, ...],
+ kwargs: dict[str, Any],
+ ) -> object: ...
+
+
+# TODO: Wait until mypy supports recursive objects in combination with typevars
+_FiniteNestedSequence = Union[
+ _T,
+ Sequence[_T],
+ Sequence[Sequence[_T]],
+ Sequence[Sequence[Sequence[_T]]],
+ Sequence[Sequence[Sequence[Sequence[_T]]]],
+]
+
+# A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic`
+_ArrayLike = Union[
+ _SupportsArray["dtype[_ScalarType]"],
+ _NestedSequence[_SupportsArray["dtype[_ScalarType]"]],
+]
+
+# A union representing array-like objects; consists of two typevars:
+# One representing types that can be parametrized w.r.t. `np.dtype`
+# and another one for the rest
+_DualArrayLike = Union[
+ _SupportsArray[_DType],
+ _NestedSequence[_SupportsArray[_DType]],
+ _T,
+ _NestedSequence[_T],
+]
+
+# TODO: support buffer protocols once
+#
+# https://bugs.python.org/issue27501
+#
+# is resolved. See also the mypy issue:
+#
+# https://github.com/python/typing/issues/593
+if sys.version_info[:2] < (3, 9):
+ ArrayLike = _DualArrayLike[
+ dtype,
+ Union[bool, int, float, complex, str, bytes],
+ ]
+else:
+ ArrayLike = _DualArrayLike[
+ dtype[Any],
+ Union[bool, int, float, complex, str, bytes],
+ ]
+
+# `ArrayLike<X>_co`: array-like objects that can be coerced into `X`
+# given the casting rules `same_kind`
+_ArrayLikeBool_co = _DualArrayLike[
+ "dtype[bool_]",
+ bool,
+]
+_ArrayLikeUInt_co = _DualArrayLike[
+ "dtype[Union[bool_, unsignedinteger[Any]]]",
+ bool,
+]
+_ArrayLikeInt_co = _DualArrayLike[
+ "dtype[Union[bool_, integer[Any]]]",
+ Union[bool, int],
+]
+_ArrayLikeFloat_co = _DualArrayLike[
+ "dtype[Union[bool_, integer[Any], floating[Any]]]",
+ Union[bool, int, float],
+]
+_ArrayLikeComplex_co = _DualArrayLike[
+ "dtype[Union[bool_, integer[Any], floating[Any], complexfloating[Any, Any]]]",
+ Union[bool, int, float, complex],
+]
+_ArrayLikeNumber_co = _DualArrayLike[
+ "dtype[Union[bool_, number[Any]]]",
+ Union[bool, int, float, complex],
+]
+_ArrayLikeTD64_co = _DualArrayLike[
+ "dtype[Union[bool_, integer[Any], timedelta64]]",
+ Union[bool, int],
+]
+_ArrayLikeDT64_co = Union[
+ _SupportsArray["dtype[datetime64]"],
+ _NestedSequence[_SupportsArray["dtype[datetime64]"]],
+]
+_ArrayLikeObject_co = Union[
+ _SupportsArray["dtype[object_]"],
+ _NestedSequence[_SupportsArray["dtype[object_]"]],
+]
+
+_ArrayLikeVoid_co = Union[
+ _SupportsArray["dtype[void]"],
+ _NestedSequence[_SupportsArray["dtype[void]"]],
+]
+_ArrayLikeStr_co = _DualArrayLike[
+ "dtype[str_]",
+ str,
+]
+_ArrayLikeBytes_co = _DualArrayLike[
+ "dtype[bytes_]",
+ bytes,
+]
+
+_ArrayLikeInt = _DualArrayLike[
+ "dtype[integer[Any]]",
+ int,
+]
+
+# Extra ArrayLike type so that pyright can deal with NDArray[Any]
+# Used as the first overload, should only match NDArray[Any],
+# not any actual types.
+# https://github.com/numpy/numpy/pull/22193
+class _UnknownType:
+ ...
+
+
+_ArrayLikeUnknown = _DualArrayLike[
+ "dtype[_UnknownType]",
+ _UnknownType,
+]
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/_callable.pyi b/venv/lib/python3.9/site-packages/numpy/_typing/_callable.pyi
new file mode 100644
index 00000000..5dfe9e7c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/_callable.pyi
@@ -0,0 +1,338 @@
+"""
+A module with various ``typing.Protocol`` subclasses that implement
+the ``__call__`` magic method.
+
+See the `Mypy documentation`_ on protocols for more details.
+
+.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols
+
+"""
+
+from __future__ import annotations
+
+from typing import (
+ TypeVar,
+ overload,
+ Any,
+ NoReturn,
+ Protocol,
+)
+
+from numpy import (
+ ndarray,
+ dtype,
+ generic,
+ bool_,
+ timedelta64,
+ number,
+ integer,
+ unsignedinteger,
+ signedinteger,
+ int8,
+ int_,
+ floating,
+ float64,
+ complexfloating,
+ complex128,
+)
+from ._nbit import _NBitInt, _NBitDouble
+from ._scalars import (
+ _BoolLike_co,
+ _IntLike_co,
+ _FloatLike_co,
+ _NumberLike_co,
+)
+from . import NBitBase
+from ._generic_alias import NDArray
+from ._nested_sequence import _NestedSequence
+
+_T1 = TypeVar("_T1")
+_T2 = TypeVar("_T2")
+_T1_contra = TypeVar("_T1_contra", contravariant=True)
+_T2_contra = TypeVar("_T2_contra", contravariant=True)
+_2Tuple = tuple[_T1, _T1]
+
+_NBit1 = TypeVar("_NBit1", bound=NBitBase)
+_NBit2 = TypeVar("_NBit2", bound=NBitBase)
+
+_IntType = TypeVar("_IntType", bound=integer)
+_FloatType = TypeVar("_FloatType", bound=floating)
+_NumberType = TypeVar("_NumberType", bound=number)
+_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number)
+_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic)
+
+class _BoolOp(Protocol[_GenericType_co]):
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> int_: ...
+ @overload
+ def __call__(self, other: float, /) -> float64: ...
+ @overload
+ def __call__(self, other: complex, /) -> complex128: ...
+ @overload
+ def __call__(self, other: _NumberType, /) -> _NumberType: ...
+
+class _BoolBitOp(Protocol[_GenericType_co]):
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> int_: ...
+ @overload
+ def __call__(self, other: _IntType, /) -> _IntType: ...
+
+class _BoolSub(Protocol):
+ # Note that `other: bool_` is absent here
+ @overload
+ def __call__(self, other: bool, /) -> NoReturn: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> int_: ...
+ @overload
+ def __call__(self, other: float, /) -> float64: ...
+ @overload
+ def __call__(self, other: complex, /) -> complex128: ...
+ @overload
+ def __call__(self, other: _NumberType, /) -> _NumberType: ...
+
+class _BoolTrueDiv(Protocol):
+ @overload
+ def __call__(self, other: float | _IntLike_co, /) -> float64: ...
+ @overload
+ def __call__(self, other: complex, /) -> complex128: ...
+ @overload
+ def __call__(self, other: _NumberType, /) -> _NumberType: ...
+
+class _BoolMod(Protocol):
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> int8: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> int_: ...
+ @overload
+ def __call__(self, other: float, /) -> float64: ...
+ @overload
+ def __call__(self, other: _IntType, /) -> _IntType: ...
+ @overload
+ def __call__(self, other: _FloatType, /) -> _FloatType: ...
+
+class _BoolDivMod(Protocol):
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ...
+ @overload # platform dependent
+ def __call__(self, other: int, /) -> _2Tuple[int_]: ...
+ @overload
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
+ @overload
+ def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ...
+ @overload
+ def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ...
+
+class _TD64Div(Protocol[_NumberType_co]):
+ @overload
+ def __call__(self, other: timedelta64, /) -> _NumberType_co: ...
+ @overload
+ def __call__(self, other: _BoolLike_co, /) -> NoReturn: ...
+ @overload
+ def __call__(self, other: _FloatLike_co, /) -> timedelta64: ...
+
+class _IntTrueDiv(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> floating[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(self, other: integer[_NBit2], /) -> floating[_NBit1 | _NBit2]: ...
+
+class _UnsignedIntOp(Protocol[_NBit1]):
+ # NOTE: `uint64 + signedinteger -> float64`
+ @overload
+ def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
+ @overload
+ def __call__(
+ self, other: int | signedinteger[Any], /
+ ) -> Any: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: unsignedinteger[_NBit2], /
+ ) -> unsignedinteger[_NBit1 | _NBit2]: ...
+
+class _UnsignedIntBitOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> signedinteger[Any]: ...
+ @overload
+ def __call__(self, other: signedinteger[Any], /) -> signedinteger[Any]: ...
+ @overload
+ def __call__(
+ self, other: unsignedinteger[_NBit2], /
+ ) -> unsignedinteger[_NBit1 | _NBit2]: ...
+
+class _UnsignedIntMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
+ @overload
+ def __call__(
+ self, other: int | signedinteger[Any], /
+ ) -> Any: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: unsignedinteger[_NBit2], /
+ ) -> unsignedinteger[_NBit1 | _NBit2]: ...
+
+class _UnsignedIntDivMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ...
+ @overload
+ def __call__(
+ self, other: int | signedinteger[Any], /
+ ) -> _2Tuple[Any]: ...
+ @overload
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
+ @overload
+ def __call__(
+ self, other: unsignedinteger[_NBit2], /
+ ) -> _2Tuple[unsignedinteger[_NBit1 | _NBit2]]: ...
+
+class _SignedIntOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: signedinteger[_NBit2], /,
+ ) -> signedinteger[_NBit1 | _NBit2]: ...
+
+class _SignedIntBitOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(
+ self, other: signedinteger[_NBit2], /,
+ ) -> signedinteger[_NBit1 | _NBit2]: ...
+
+class _SignedIntMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: signedinteger[_NBit2], /,
+ ) -> signedinteger[_NBit1 | _NBit2]: ...
+
+class _SignedIntDivMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ...
+ @overload
+ def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1 | _NBitInt]]: ...
+ @overload
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
+ @overload
+ def __call__(
+ self, other: signedinteger[_NBit2], /,
+ ) -> _2Tuple[signedinteger[_NBit1 | _NBit2]]: ...
+
+class _FloatOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> floating[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: integer[_NBit2] | floating[_NBit2], /
+ ) -> floating[_NBit1 | _NBit2]: ...
+
+class _FloatMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> floating[_NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self, other: integer[_NBit2] | floating[_NBit2], /
+ ) -> floating[_NBit1 | _NBit2]: ...
+
+class _FloatDivMod(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ...
+ @overload
+ def __call__(self, other: int, /) -> _2Tuple[floating[_NBit1 | _NBitInt]]: ...
+ @overload
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
+ @overload
+ def __call__(
+ self, other: integer[_NBit2] | floating[_NBit2], /
+ ) -> _2Tuple[floating[_NBit1 | _NBit2]]: ...
+
+class _ComplexOp(Protocol[_NBit1]):
+ @overload
+ def __call__(self, other: bool, /) -> complexfloating[_NBit1, _NBit1]: ...
+ @overload
+ def __call__(self, other: int, /) -> complexfloating[_NBit1 | _NBitInt, _NBit1 | _NBitInt]: ...
+ @overload
+ def __call__(
+ self, other: complex, /,
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
+ @overload
+ def __call__(
+ self,
+ other: (
+ integer[_NBit2]
+ | floating[_NBit2]
+ | complexfloating[_NBit2, _NBit2]
+ ), /,
+ ) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ...
+
+class _NumberOp(Protocol):
+ def __call__(self, other: _NumberLike_co, /) -> Any: ...
+
+class _SupportsLT(Protocol):
+ def __lt__(self, other: Any, /) -> object: ...
+
+class _SupportsGT(Protocol):
+ def __gt__(self, other: Any, /) -> object: ...
+
+class _ComparisonOp(Protocol[_T1_contra, _T2_contra]):
+ @overload
+ def __call__(self, other: _T1_contra, /) -> bool_: ...
+ @overload
+ def __call__(self, other: _T2_contra, /) -> NDArray[bool_]: ...
+ @overload
+ def __call__(
+ self,
+ other: _SupportsLT | _SupportsGT | _NestedSequence[_SupportsLT | _SupportsGT],
+ /,
+ ) -> Any: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/_char_codes.py b/venv/lib/python3.9/site-packages/numpy/_typing/_char_codes.py
new file mode 100644
index 00000000..f840d17b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/_char_codes.py
@@ -0,0 +1,111 @@
+from typing import Literal
+
+_BoolCodes = Literal["?", "=?", "<?", ">?", "bool", "bool_", "bool8"]
+
+_UInt8Codes = Literal["uint8", "u1", "=u1", "<u1", ">u1"]
+_UInt16Codes = Literal["uint16", "u2", "=u2", "<u2", ">u2"]
+_UInt32Codes = Literal["uint32", "u4", "=u4", "<u4", ">u4"]
+_UInt64Codes = Literal["uint64", "u8", "=u8", "<u8", ">u8"]
+
+_Int8Codes = Literal["int8", "i1", "=i1", "<i1", ">i1"]
+_Int16Codes = Literal["int16", "i2", "=i2", "<i2", ">i2"]
+_Int32Codes = Literal["int32", "i4", "=i4", "<i4", ">i4"]
+_Int64Codes = Literal["int64", "i8", "=i8", "<i8", ">i8"]
+
+_Float16Codes = Literal["float16", "f2", "=f2", "<f2", ">f2"]
+_Float32Codes = Literal["float32", "f4", "=f4", "<f4", ">f4"]
+_Float64Codes = Literal["float64", "f8", "=f8", "<f8", ">f8"]
+
+_Complex64Codes = Literal["complex64", "c8", "=c8", "<c8", ">c8"]
+_Complex128Codes = Literal["complex128", "c16", "=c16", "<c16", ">c16"]
+
+_ByteCodes = Literal["byte", "b", "=b", "<b", ">b"]
+_ShortCodes = Literal["short", "h", "=h", "<h", ">h"]
+_IntCCodes = Literal["intc", "i", "=i", "<i", ">i"]
+_IntPCodes = Literal["intp", "int0", "p", "=p", "<p", ">p"]
+_IntCodes = Literal["long", "int", "int_", "l", "=l", "<l", ">l"]
+_LongLongCodes = Literal["longlong", "q", "=q", "<q", ">q"]
+
+_UByteCodes = Literal["ubyte", "B", "=B", "<B", ">B"]
+_UShortCodes = Literal["ushort", "H", "=H", "<H", ">H"]
+_UIntCCodes = Literal["uintc", "I", "=I", "<I", ">I"]
+_UIntPCodes = Literal["uintp", "uint0", "P", "=P", "<P", ">P"]
+_UIntCodes = Literal["ulong", "uint", "L", "=L", "<L", ">L"]
+_ULongLongCodes = Literal["ulonglong", "Q", "=Q", "<Q", ">Q"]
+
+_HalfCodes = Literal["half", "e", "=e", "<e", ">e"]
+_SingleCodes = Literal["single", "f", "=f", "<f", ">f"]
+_DoubleCodes = Literal["double", "float", "float_", "d", "=d", "<d", ">d"]
+_LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "<g", ">g"]
+
+_CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "<F", ">F"]
+_CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "<D", ">D"]
+_CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "<G", ">G"]
+
+_StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "<U", ">U"]
+_BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "<S", ">S"]
+_VoidCodes = Literal["void", "void0", "V", "=V", "<V", ">V"]
+_ObjectCodes = Literal["object", "object_", "O", "=O", "<O", ">O"]
+
+_DT64Codes = Literal[
+ "datetime64", "=datetime64", "<datetime64", ">datetime64",
+ "datetime64[Y]", "=datetime64[Y]", "<datetime64[Y]", ">datetime64[Y]",
+ "datetime64[M]", "=datetime64[M]", "<datetime64[M]", ">datetime64[M]",
+ "datetime64[W]", "=datetime64[W]", "<datetime64[W]", ">datetime64[W]",
+ "datetime64[D]", "=datetime64[D]", "<datetime64[D]", ">datetime64[D]",
+ "datetime64[h]", "=datetime64[h]", "<datetime64[h]", ">datetime64[h]",
+ "datetime64[m]", "=datetime64[m]", "<datetime64[m]", ">datetime64[m]",
+ "datetime64[s]", "=datetime64[s]", "<datetime64[s]", ">datetime64[s]",
+ "datetime64[ms]", "=datetime64[ms]", "<datetime64[ms]", ">datetime64[ms]",
+ "datetime64[us]", "=datetime64[us]", "<datetime64[us]", ">datetime64[us]",
+ "datetime64[ns]", "=datetime64[ns]", "<datetime64[ns]", ">datetime64[ns]",
+ "datetime64[ps]", "=datetime64[ps]", "<datetime64[ps]", ">datetime64[ps]",
+ "datetime64[fs]", "=datetime64[fs]", "<datetime64[fs]", ">datetime64[fs]",
+ "datetime64[as]", "=datetime64[as]", "<datetime64[as]", ">datetime64[as]",
+ "M", "=M", "<M", ">M",
+ "M8", "=M8", "<M8", ">M8",
+ "M8[Y]", "=M8[Y]", "<M8[Y]", ">M8[Y]",
+ "M8[M]", "=M8[M]", "<M8[M]", ">M8[M]",
+ "M8[W]", "=M8[W]", "<M8[W]", ">M8[W]",
+ "M8[D]", "=M8[D]", "<M8[D]", ">M8[D]",
+ "M8[h]", "=M8[h]", "<M8[h]", ">M8[h]",
+ "M8[m]", "=M8[m]", "<M8[m]", ">M8[m]",
+ "M8[s]", "=M8[s]", "<M8[s]", ">M8[s]",
+ "M8[ms]", "=M8[ms]", "<M8[ms]", ">M8[ms]",
+ "M8[us]", "=M8[us]", "<M8[us]", ">M8[us]",
+ "M8[ns]", "=M8[ns]", "<M8[ns]", ">M8[ns]",
+ "M8[ps]", "=M8[ps]", "<M8[ps]", ">M8[ps]",
+ "M8[fs]", "=M8[fs]", "<M8[fs]", ">M8[fs]",
+ "M8[as]", "=M8[as]", "<M8[as]", ">M8[as]",
+]
+_TD64Codes = Literal[
+ "timedelta64", "=timedelta64", "<timedelta64", ">timedelta64",
+ "timedelta64[Y]", "=timedelta64[Y]", "<timedelta64[Y]", ">timedelta64[Y]",
+ "timedelta64[M]", "=timedelta64[M]", "<timedelta64[M]", ">timedelta64[M]",
+ "timedelta64[W]", "=timedelta64[W]", "<timedelta64[W]", ">timedelta64[W]",
+ "timedelta64[D]", "=timedelta64[D]", "<timedelta64[D]", ">timedelta64[D]",
+ "timedelta64[h]", "=timedelta64[h]", "<timedelta64[h]", ">timedelta64[h]",
+ "timedelta64[m]", "=timedelta64[m]", "<timedelta64[m]", ">timedelta64[m]",
+ "timedelta64[s]", "=timedelta64[s]", "<timedelta64[s]", ">timedelta64[s]",
+ "timedelta64[ms]", "=timedelta64[ms]", "<timedelta64[ms]", ">timedelta64[ms]",
+ "timedelta64[us]", "=timedelta64[us]", "<timedelta64[us]", ">timedelta64[us]",
+ "timedelta64[ns]", "=timedelta64[ns]", "<timedelta64[ns]", ">timedelta64[ns]",
+ "timedelta64[ps]", "=timedelta64[ps]", "<timedelta64[ps]", ">timedelta64[ps]",
+ "timedelta64[fs]", "=timedelta64[fs]", "<timedelta64[fs]", ">timedelta64[fs]",
+ "timedelta64[as]", "=timedelta64[as]", "<timedelta64[as]", ">timedelta64[as]",
+ "m", "=m", "<m", ">m",
+ "m8", "=m8", "<m8", ">m8",
+ "m8[Y]", "=m8[Y]", "<m8[Y]", ">m8[Y]",
+ "m8[M]", "=m8[M]", "<m8[M]", ">m8[M]",
+ "m8[W]", "=m8[W]", "<m8[W]", ">m8[W]",
+ "m8[D]", "=m8[D]", "<m8[D]", ">m8[D]",
+ "m8[h]", "=m8[h]", "<m8[h]", ">m8[h]",
+ "m8[m]", "=m8[m]", "<m8[m]", ">m8[m]",
+ "m8[s]", "=m8[s]", "<m8[s]", ">m8[s]",
+ "m8[ms]", "=m8[ms]", "<m8[ms]", ">m8[ms]",
+ "m8[us]", "=m8[us]", "<m8[us]", ">m8[us]",
+ "m8[ns]", "=m8[ns]", "<m8[ns]", ">m8[ns]",
+ "m8[ps]", "=m8[ps]", "<m8[ps]", ">m8[ps]",
+ "m8[fs]", "=m8[fs]", "<m8[fs]", ">m8[fs]",
+ "m8[as]", "=m8[as]", "<m8[as]", ">m8[as]",
+]
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/_dtype_like.py b/venv/lib/python3.9/site-packages/numpy/_typing/_dtype_like.py
new file mode 100644
index 00000000..e92e17dd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/_dtype_like.py
@@ -0,0 +1,249 @@
+from typing import (
+ Any,
+ List,
+ Sequence,
+ Tuple,
+ Union,
+ Type,
+ TypeVar,
+ Protocol,
+ TypedDict,
+ runtime_checkable,
+)
+
+import numpy as np
+
+from ._shape import _ShapeLike
+from ._generic_alias import _DType as DType
+
+from ._char_codes import (
+ _BoolCodes,
+ _UInt8Codes,
+ _UInt16Codes,
+ _UInt32Codes,
+ _UInt64Codes,
+ _Int8Codes,
+ _Int16Codes,
+ _Int32Codes,
+ _Int64Codes,
+ _Float16Codes,
+ _Float32Codes,
+ _Float64Codes,
+ _Complex64Codes,
+ _Complex128Codes,
+ _ByteCodes,
+ _ShortCodes,
+ _IntCCodes,
+ _IntPCodes,
+ _IntCodes,
+ _LongLongCodes,
+ _UByteCodes,
+ _UShortCodes,
+ _UIntCCodes,
+ _UIntPCodes,
+ _UIntCodes,
+ _ULongLongCodes,
+ _HalfCodes,
+ _SingleCodes,
+ _DoubleCodes,
+ _LongDoubleCodes,
+ _CSingleCodes,
+ _CDoubleCodes,
+ _CLongDoubleCodes,
+ _DT64Codes,
+ _TD64Codes,
+ _StrCodes,
+ _BytesCodes,
+ _VoidCodes,
+ _ObjectCodes,
+)
+
+_SCT = TypeVar("_SCT", bound=np.generic)
+_DType_co = TypeVar("_DType_co", covariant=True, bound=DType[Any])
+
+_DTypeLikeNested = Any # TODO: wait for support for recursive types
+
+
+# Mandatory keys
+class _DTypeDictBase(TypedDict):
+ names: Sequence[str]
+ formats: Sequence[_DTypeLikeNested]
+
+
+# Mandatory + optional keys
+class _DTypeDict(_DTypeDictBase, total=False):
+ # Only `str` elements are usable as indexing aliases,
+ # but `titles` can in principle accept any object
+ offsets: Sequence[int]
+ titles: Sequence[Any]
+ itemsize: int
+ aligned: bool
+
+
+# A protocol for anything with the dtype attribute
+@runtime_checkable
+class _SupportsDType(Protocol[_DType_co]):
+ @property
+ def dtype(self) -> _DType_co: ...
+
+
+# A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic`
+_DTypeLike = Union[
+ "np.dtype[_SCT]",
+ Type[_SCT],
+ _SupportsDType["np.dtype[_SCT]"],
+]
+
+
+# Would create a dtype[np.void]
+_VoidDTypeLike = Union[
+ # (flexible_dtype, itemsize)
+ Tuple[_DTypeLikeNested, int],
+ # (fixed_dtype, shape)
+ Tuple[_DTypeLikeNested, _ShapeLike],
+ # [(field_name, field_dtype, field_shape), ...]
+ #
+ # The type here is quite broad because NumPy accepts quite a wide
+ # range of inputs inside the list; see the tests for some
+ # examples.
+ List[Any],
+ # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ...,
+ # 'itemsize': ...}
+ _DTypeDict,
+ # (base_dtype, new_dtype)
+ Tuple[_DTypeLikeNested, _DTypeLikeNested],
+]
+
+# Anything that can be coerced into numpy.dtype.
+# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
+DTypeLike = Union[
+ DType[Any],
+ # default data type (float64)
+ None,
+ # array-scalar types and generic types
+ Type[Any], # NOTE: We're stuck with `Type[Any]` due to object dtypes
+ # anything with a dtype attribute
+ _SupportsDType[DType[Any]],
+ # character codes, type strings or comma-separated fields, e.g., 'float64'
+ str,
+ _VoidDTypeLike,
+]
+
+# NOTE: while it is possible to provide the dtype as a dict of
+# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`),
+# this syntax is officially discourged and
+# therefore not included in the Union defining `DTypeLike`.
+#
+# See https://github.com/numpy/numpy/issues/16891 for more details.
+
+# Aliases for commonly used dtype-like objects.
+# Note that the precision of `np.number` subclasses is ignored herein.
+_DTypeLikeBool = Union[
+ Type[bool],
+ Type[np.bool_],
+ DType[np.bool_],
+ _SupportsDType[DType[np.bool_]],
+ _BoolCodes,
+]
+_DTypeLikeUInt = Union[
+ Type[np.unsignedinteger],
+ DType[np.unsignedinteger],
+ _SupportsDType[DType[np.unsignedinteger]],
+ _UInt8Codes,
+ _UInt16Codes,
+ _UInt32Codes,
+ _UInt64Codes,
+ _UByteCodes,
+ _UShortCodes,
+ _UIntCCodes,
+ _UIntPCodes,
+ _UIntCodes,
+ _ULongLongCodes,
+]
+_DTypeLikeInt = Union[
+ Type[int],
+ Type[np.signedinteger],
+ DType[np.signedinteger],
+ _SupportsDType[DType[np.signedinteger]],
+ _Int8Codes,
+ _Int16Codes,
+ _Int32Codes,
+ _Int64Codes,
+ _ByteCodes,
+ _ShortCodes,
+ _IntCCodes,
+ _IntPCodes,
+ _IntCodes,
+ _LongLongCodes,
+]
+_DTypeLikeFloat = Union[
+ Type[float],
+ Type[np.floating],
+ DType[np.floating],
+ _SupportsDType[DType[np.floating]],
+ _Float16Codes,
+ _Float32Codes,
+ _Float64Codes,
+ _HalfCodes,
+ _SingleCodes,
+ _DoubleCodes,
+ _LongDoubleCodes,
+]
+_DTypeLikeComplex = Union[
+ Type[complex],
+ Type[np.complexfloating],
+ DType[np.complexfloating],
+ _SupportsDType[DType[np.complexfloating]],
+ _Complex64Codes,
+ _Complex128Codes,
+ _CSingleCodes,
+ _CDoubleCodes,
+ _CLongDoubleCodes,
+]
+_DTypeLikeDT64 = Union[
+ Type[np.timedelta64],
+ DType[np.timedelta64],
+ _SupportsDType[DType[np.timedelta64]],
+ _TD64Codes,
+]
+_DTypeLikeTD64 = Union[
+ Type[np.datetime64],
+ DType[np.datetime64],
+ _SupportsDType[DType[np.datetime64]],
+ _DT64Codes,
+]
+_DTypeLikeStr = Union[
+ Type[str],
+ Type[np.str_],
+ DType[np.str_],
+ _SupportsDType[DType[np.str_]],
+ _StrCodes,
+]
+_DTypeLikeBytes = Union[
+ Type[bytes],
+ Type[np.bytes_],
+ DType[np.bytes_],
+ _SupportsDType[DType[np.bytes_]],
+ _BytesCodes,
+]
+_DTypeLikeVoid = Union[
+ Type[np.void],
+ DType[np.void],
+ _SupportsDType[DType[np.void]],
+ _VoidCodes,
+ _VoidDTypeLike,
+]
+_DTypeLikeObject = Union[
+ type,
+ DType[np.object_],
+ _SupportsDType[DType[np.object_]],
+ _ObjectCodes,
+]
+
+_DTypeLikeComplex_co = Union[
+ _DTypeLikeBool,
+ _DTypeLikeUInt,
+ _DTypeLikeInt,
+ _DTypeLikeFloat,
+ _DTypeLikeComplex,
+]
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/_extended_precision.py b/venv/lib/python3.9/site-packages/numpy/_typing/_extended_precision.py
new file mode 100644
index 00000000..edc1778c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/_extended_precision.py
@@ -0,0 +1,43 @@
+"""A module with platform-specific extended precision
+`numpy.number` subclasses.
+
+The subclasses are defined here (instead of ``__init__.pyi``) such
+that they can be imported conditionally via the numpy's mypy plugin.
+"""
+
+from typing import TYPE_CHECKING
+
+import numpy as np
+from . import (
+ _80Bit,
+ _96Bit,
+ _128Bit,
+ _256Bit,
+)
+
+if TYPE_CHECKING:
+ uint128 = np.unsignedinteger[_128Bit]
+ uint256 = np.unsignedinteger[_256Bit]
+ int128 = np.signedinteger[_128Bit]
+ int256 = np.signedinteger[_256Bit]
+ float80 = np.floating[_80Bit]
+ float96 = np.floating[_96Bit]
+ float128 = np.floating[_128Bit]
+ float256 = np.floating[_256Bit]
+ complex160 = np.complexfloating[_80Bit, _80Bit]
+ complex192 = np.complexfloating[_96Bit, _96Bit]
+ complex256 = np.complexfloating[_128Bit, _128Bit]
+ complex512 = np.complexfloating[_256Bit, _256Bit]
+else:
+ uint128 = Any
+ uint256 = Any
+ int128 = Any
+ int256 = Any
+ float80 = Any
+ float96 = Any
+ float128 = Any
+ float256 = Any
+ complex160 = Any
+ complex192 = Any
+ complex256 = Any
+ complex512 = Any
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/_generic_alias.py b/venv/lib/python3.9/site-packages/numpy/_typing/_generic_alias.py
new file mode 100644
index 00000000..01cd224a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/_generic_alias.py
@@ -0,0 +1,245 @@
+from __future__ import annotations
+
+import sys
+import types
+from collections.abc import Generator, Iterable, Iterator
+from typing import (
+ Any,
+ ClassVar,
+ NoReturn,
+ TypeVar,
+ TYPE_CHECKING,
+)
+
+import numpy as np
+
+__all__ = ["_GenericAlias", "NDArray"]
+
+_T = TypeVar("_T", bound="_GenericAlias")
+
+
+def _to_str(obj: object) -> str:
+ """Helper function for `_GenericAlias.__repr__`."""
+ if obj is Ellipsis:
+ return '...'
+ elif isinstance(obj, type) and not isinstance(obj, _GENERIC_ALIAS_TYPE):
+ if obj.__module__ == 'builtins':
+ return obj.__qualname__
+ else:
+ return f'{obj.__module__}.{obj.__qualname__}'
+ else:
+ return repr(obj)
+
+
+def _parse_parameters(args: Iterable[Any]) -> Generator[TypeVar, None, None]:
+ """Search for all typevars and typevar-containing objects in `args`.
+
+ Helper function for `_GenericAlias.__init__`.
+
+ """
+ for i in args:
+ if hasattr(i, "__parameters__"):
+ yield from i.__parameters__
+ elif isinstance(i, TypeVar):
+ yield i
+
+
+def _reconstruct_alias(alias: _T, parameters: Iterator[TypeVar]) -> _T:
+ """Recursively replace all typevars with those from `parameters`.
+
+ Helper function for `_GenericAlias.__getitem__`.
+
+ """
+ args = []
+ for i in alias.__args__:
+ if isinstance(i, TypeVar):
+ value: Any = next(parameters)
+ elif isinstance(i, _GenericAlias):
+ value = _reconstruct_alias(i, parameters)
+ elif hasattr(i, "__parameters__"):
+ prm_tup = tuple(next(parameters) for _ in i.__parameters__)
+ value = i[prm_tup]
+ else:
+ value = i
+ args.append(value)
+
+ cls = type(alias)
+ return cls(alias.__origin__, tuple(args), alias.__unpacked__)
+
+
+class _GenericAlias:
+ """A python-based backport of the `types.GenericAlias` class.
+
+ E.g. for ``t = list[int]``, ``t.__origin__`` is ``list`` and
+ ``t.__args__`` is ``(int,)``.
+
+ See Also
+ --------
+ :pep:`585`
+ The PEP responsible for introducing `types.GenericAlias`.
+
+ """
+
+ __slots__ = (
+ "__weakref__",
+ "_origin",
+ "_args",
+ "_parameters",
+ "_hash",
+ "_starred",
+ )
+
+ @property
+ def __origin__(self) -> type:
+ return super().__getattribute__("_origin")
+
+ @property
+ def __args__(self) -> tuple[object, ...]:
+ return super().__getattribute__("_args")
+
+ @property
+ def __parameters__(self) -> tuple[TypeVar, ...]:
+ """Type variables in the ``GenericAlias``."""
+ return super().__getattribute__("_parameters")
+
+ @property
+ def __unpacked__(self) -> bool:
+ return super().__getattribute__("_starred")
+
+ @property
+ def __typing_unpacked_tuple_args__(self) -> tuple[object, ...] | None:
+ # NOTE: This should return `__args__` if `__origin__` is a tuple,
+ # which should never be the case with how `_GenericAlias` is used
+ # within numpy
+ return None
+
+ def __init__(
+ self,
+ origin: type,
+ args: object | tuple[object, ...],
+ starred: bool = False,
+ ) -> None:
+ self._origin = origin
+ self._args = args if isinstance(args, tuple) else (args,)
+ self._parameters = tuple(_parse_parameters(self.__args__))
+ self._starred = starred
+
+ @property
+ def __call__(self) -> type[Any]:
+ return self.__origin__
+
+ def __reduce__(self: _T) -> tuple[
+ type[_T],
+ tuple[type[Any], tuple[object, ...], bool],
+ ]:
+ cls = type(self)
+ return cls, (self.__origin__, self.__args__, self.__unpacked__)
+
+ def __mro_entries__(self, bases: Iterable[object]) -> tuple[type[Any]]:
+ return (self.__origin__,)
+
+ def __dir__(self) -> list[str]:
+ """Implement ``dir(self)``."""
+ cls = type(self)
+ dir_origin = set(dir(self.__origin__))
+ return sorted(cls._ATTR_EXCEPTIONS | dir_origin)
+
+ def __hash__(self) -> int:
+ """Return ``hash(self)``."""
+ # Attempt to use the cached hash
+ try:
+ return super().__getattribute__("_hash")
+ except AttributeError:
+ self._hash: int = (
+ hash(self.__origin__) ^
+ hash(self.__args__) ^
+ hash(self.__unpacked__)
+ )
+ return super().__getattribute__("_hash")
+
+ def __instancecheck__(self, obj: object) -> NoReturn:
+ """Check if an `obj` is an instance."""
+ raise TypeError("isinstance() argument 2 cannot be a "
+ "parameterized generic")
+
+ def __subclasscheck__(self, cls: type) -> NoReturn:
+ """Check if a `cls` is a subclass."""
+ raise TypeError("issubclass() argument 2 cannot be a "
+ "parameterized generic")
+
+ def __repr__(self) -> str:
+ """Return ``repr(self)``."""
+ args = ", ".join(_to_str(i) for i in self.__args__)
+ origin = _to_str(self.__origin__)
+ prefix = "*" if self.__unpacked__ else ""
+ return f"{prefix}{origin}[{args}]"
+
+ def __getitem__(self: _T, key: object | tuple[object, ...]) -> _T:
+ """Return ``self[key]``."""
+ key_tup = key if isinstance(key, tuple) else (key,)
+
+ if len(self.__parameters__) == 0:
+ raise TypeError(f"There are no type variables left in {self}")
+ elif len(key_tup) > len(self.__parameters__):
+ raise TypeError(f"Too many arguments for {self}")
+ elif len(key_tup) < len(self.__parameters__):
+ raise TypeError(f"Too few arguments for {self}")
+
+ key_iter = iter(key_tup)
+ return _reconstruct_alias(self, key_iter)
+
+ def __eq__(self, value: object) -> bool:
+ """Return ``self == value``."""
+ if not isinstance(value, _GENERIC_ALIAS_TYPE):
+ return NotImplemented
+ return (
+ self.__origin__ == value.__origin__ and
+ self.__args__ == value.__args__ and
+ self.__unpacked__ == getattr(
+ value, "__unpacked__", self.__unpacked__
+ )
+ )
+
+ def __iter__(self: _T) -> Generator[_T, None, None]:
+ """Return ``iter(self)``."""
+ cls = type(self)
+ yield cls(self.__origin__, self.__args__, True)
+
+ _ATTR_EXCEPTIONS: ClassVar[frozenset[str]] = frozenset({
+ "__origin__",
+ "__args__",
+ "__parameters__",
+ "__mro_entries__",
+ "__reduce__",
+ "__reduce_ex__",
+ "__copy__",
+ "__deepcopy__",
+ "__unpacked__",
+ "__typing_unpacked_tuple_args__",
+ "__class__",
+ })
+
+ def __getattribute__(self, name: str) -> Any:
+ """Return ``getattr(self, name)``."""
+ # Pull the attribute from `__origin__` unless its
+ # name is in `_ATTR_EXCEPTIONS`
+ cls = type(self)
+ if name in cls._ATTR_EXCEPTIONS:
+ return super().__getattribute__(name)
+ return getattr(self.__origin__, name)
+
+
+# See `_GenericAlias.__eq__`
+if sys.version_info >= (3, 9):
+ _GENERIC_ALIAS_TYPE = (_GenericAlias, types.GenericAlias)
+else:
+ _GENERIC_ALIAS_TYPE = (_GenericAlias,)
+
+ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True)
+
+if TYPE_CHECKING or sys.version_info >= (3, 9):
+ _DType = np.dtype[ScalarType]
+ NDArray = np.ndarray[Any, np.dtype[ScalarType]]
+else:
+ _DType = _GenericAlias(np.dtype, (ScalarType,))
+ NDArray = _GenericAlias(np.ndarray, (Any, _DType))
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/_nbit.py b/venv/lib/python3.9/site-packages/numpy/_typing/_nbit.py
new file mode 100644
index 00000000..b8d35db4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/_nbit.py
@@ -0,0 +1,16 @@
+"""A module with the precisions of platform-specific `~numpy.number`s."""
+
+from typing import Any
+
+# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin
+_NBitByte = Any
+_NBitShort = Any
+_NBitIntC = Any
+_NBitIntP = Any
+_NBitInt = Any
+_NBitLongLong = Any
+
+_NBitHalf = Any
+_NBitSingle = Any
+_NBitDouble = Any
+_NBitLongDouble = Any
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/_nested_sequence.py b/venv/lib/python3.9/site-packages/numpy/_typing/_nested_sequence.py
new file mode 100644
index 00000000..789bf384
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/_nested_sequence.py
@@ -0,0 +1,92 @@
+"""A module containing the `_NestedSequence` protocol."""
+
+from __future__ import annotations
+
+from typing import (
+ Any,
+ Iterator,
+ overload,
+ TypeVar,
+ Protocol,
+ runtime_checkable,
+)
+
+__all__ = ["_NestedSequence"]
+
+_T_co = TypeVar("_T_co", covariant=True)
+
+
+@runtime_checkable
+class _NestedSequence(Protocol[_T_co]):
+ """A protocol for representing nested sequences.
+
+ Warning
+ -------
+ `_NestedSequence` currently does not work in combination with typevars,
+ *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``.
+
+ See Also
+ --------
+ collections.abc.Sequence
+ ABCs for read-only and mutable :term:`sequences`.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ >>> from __future__ import annotations
+
+ >>> from typing import TYPE_CHECKING
+ >>> import numpy as np
+ >>> from numpy._typing import _NestedSequence
+
+ >>> def get_dtype(seq: _NestedSequence[float]) -> np.dtype[np.float64]:
+ ... return np.asarray(seq).dtype
+
+ >>> a = get_dtype([1.0])
+ >>> b = get_dtype([[1.0]])
+ >>> c = get_dtype([[[1.0]]])
+ >>> d = get_dtype([[[[1.0]]]])
+
+ >>> if TYPE_CHECKING:
+ ... reveal_locals()
+ ... # note: Revealed local types are:
+ ... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
+ ... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
+ ... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
+ ... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
+
+ """
+
+ def __len__(self, /) -> int:
+ """Implement ``len(self)``."""
+ raise NotImplementedError
+
+ @overload
+ def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: ...
+ @overload
+ def __getitem__(self, index: slice, /) -> _NestedSequence[_T_co]: ...
+
+ def __getitem__(self, index, /):
+ """Implement ``self[x]``."""
+ raise NotImplementedError
+
+ def __contains__(self, x: object, /) -> bool:
+ """Implement ``x in self``."""
+ raise NotImplementedError
+
+ def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
+ """Implement ``iter(self)``."""
+ raise NotImplementedError
+
+ def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
+ """Implement ``reversed(self)``."""
+ raise NotImplementedError
+
+ def count(self, value: Any, /) -> int:
+ """Return the number of occurrences of `value`."""
+ raise NotImplementedError
+
+ def index(self, value: Any, /) -> int:
+ """Return the first index of `value`."""
+ raise NotImplementedError
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/_scalars.py b/venv/lib/python3.9/site-packages/numpy/_typing/_scalars.py
new file mode 100644
index 00000000..516b996d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/_scalars.py
@@ -0,0 +1,30 @@
+from typing import Union, Tuple, Any
+
+import numpy as np
+
+# NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and
+# `np.bytes_` are already subclasses of their builtin counterpart
+
+_CharLike_co = Union[str, bytes]
+
+# The 6 `<X>Like_co` type-aliases below represent all scalars that can be
+# coerced into `<X>` (with the casting rule `same_kind`)
+_BoolLike_co = Union[bool, np.bool_]
+_UIntLike_co = Union[_BoolLike_co, np.unsignedinteger]
+_IntLike_co = Union[_BoolLike_co, int, np.integer]
+_FloatLike_co = Union[_IntLike_co, float, np.floating]
+_ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating]
+_TD64Like_co = Union[_IntLike_co, np.timedelta64]
+
+_NumberLike_co = Union[int, float, complex, np.number, np.bool_]
+_ScalarLike_co = Union[
+ int,
+ float,
+ complex,
+ str,
+ bytes,
+ np.generic,
+]
+
+# `_VoidLike_co` is technically not a scalar, but it's close enough
+_VoidLike_co = Union[Tuple[Any, ...], np.void]
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/_shape.py b/venv/lib/python3.9/site-packages/numpy/_typing/_shape.py
new file mode 100644
index 00000000..c28859b1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/_shape.py
@@ -0,0 +1,6 @@
+from typing import Sequence, Tuple, Union, SupportsIndex
+
+_Shape = Tuple[int, ...]
+
+# Anything that can be coerced to a shape tuple
+_ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]]
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/_ufunc.pyi b/venv/lib/python3.9/site-packages/numpy/_typing/_ufunc.pyi
new file mode 100644
index 00000000..9f8e0d4e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/_ufunc.pyi
@@ -0,0 +1,445 @@
+"""A module with private type-check-only `numpy.ufunc` subclasses.
+
+The signatures of the ufuncs are too varied to reasonably type
+with a single class. So instead, `ufunc` has been expanded into
+four private subclasses, one for each combination of
+`~ufunc.nin` and `~ufunc.nout`.
+
+"""
+
+from typing import (
+ Any,
+ Generic,
+ overload,
+ TypeVar,
+ Literal,
+ SupportsIndex,
+ Protocol,
+)
+
+from numpy import ufunc, _CastingKind, _OrderKACF
+from numpy.typing import NDArray
+
+from ._shape import _ShapeLike
+from ._scalars import _ScalarLike_co
+from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co
+from ._dtype_like import DTypeLike
+
+_T = TypeVar("_T")
+_2Tuple = tuple[_T, _T]
+_3Tuple = tuple[_T, _T, _T]
+_4Tuple = tuple[_T, _T, _T, _T]
+
+_NTypes = TypeVar("_NTypes", bound=int)
+_IDType = TypeVar("_IDType", bound=Any)
+_NameType = TypeVar("_NameType", bound=str)
+
+
+class _SupportsArrayUFunc(Protocol):
+ def __array_ufunc__(
+ self,
+ ufunc: ufunc,
+ method: Literal["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"],
+ *inputs: Any,
+ **kwargs: Any,
+ ) -> Any: ...
+
+
+# NOTE: In reality `extobj` should be a length of list 3 containing an
+# int, an int, and a callable, but there's no way to properly express
+# non-homogenous lists.
+# Use `Any` over `Union` to avoid issues related to lists invariance.
+
+# NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for
+# ufuncs that don't accept two input arguments and return one output argument.
+# In such cases the respective methods are simply typed as `None`.
+
+# NOTE: Similarly, `at` won't be defined for ufuncs that return
+# multiple outputs; in such cases `at` is typed as `None`
+
+# NOTE: If 2 output types are returned then `out` must be a
+# 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable
+
+class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[1]: ...
+ @property
+ def nout(self) -> Literal[1]: ...
+ @property
+ def nargs(self) -> Literal[2]: ...
+ @property
+ def signature(self) -> None: ...
+ @property
+ def reduce(self) -> None: ...
+ @property
+ def accumulate(self) -> None: ...
+ @property
+ def reduceat(self) -> None: ...
+ @property
+ def outer(self) -> None: ...
+
+ @overload
+ def __call__(
+ self,
+ __x1: _ScalarLike_co,
+ out: None = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _2Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> Any: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _2Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> NDArray[Any]: ...
+ @overload
+ def __call__(
+ self,
+ __x1: _SupportsArrayUFunc,
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _2Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> Any: ...
+
+ def at(
+ self,
+ a: _SupportsArrayUFunc,
+ indices: _ArrayLikeInt_co,
+ /,
+ ) -> None: ...
+
+class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[2]: ...
+ @property
+ def nout(self) -> Literal[1]: ...
+ @property
+ def nargs(self) -> Literal[3]: ...
+ @property
+ def signature(self) -> None: ...
+
+ @overload
+ def __call__(
+ self,
+ __x1: _ScalarLike_co,
+ __x2: _ScalarLike_co,
+ out: None = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> Any: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __x2: ArrayLike,
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> NDArray[Any]: ...
+
+ def at(
+ self,
+ a: NDArray[Any],
+ indices: _ArrayLikeInt_co,
+ b: ArrayLike,
+ /,
+ ) -> None: ...
+
+ def reduce(
+ self,
+ array: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: None | NDArray[Any] = ...,
+ keepdims: bool = ...,
+ initial: Any = ...,
+ where: _ArrayLikeBool_co = ...,
+ ) -> Any: ...
+
+ def accumulate(
+ self,
+ array: ArrayLike,
+ axis: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None | NDArray[Any] = ...,
+ ) -> NDArray[Any]: ...
+
+ def reduceat(
+ self,
+ array: ArrayLike,
+ indices: _ArrayLikeInt_co,
+ axis: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None | NDArray[Any] = ...,
+ ) -> NDArray[Any]: ...
+
+ # Expand `**kwargs` into explicit keyword-only arguments
+ @overload
+ def outer(
+ self,
+ A: _ScalarLike_co,
+ B: _ScalarLike_co,
+ /, *,
+ out: None = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> Any: ...
+ @overload
+ def outer( # type: ignore[misc]
+ self,
+ A: ArrayLike,
+ B: ArrayLike,
+ /, *,
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> NDArray[Any]: ...
+
+class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[1]: ...
+ @property
+ def nout(self) -> Literal[2]: ...
+ @property
+ def nargs(self) -> Literal[3]: ...
+ @property
+ def signature(self) -> None: ...
+ @property
+ def at(self) -> None: ...
+ @property
+ def reduce(self) -> None: ...
+ @property
+ def accumulate(self) -> None: ...
+ @property
+ def reduceat(self) -> None: ...
+ @property
+ def outer(self) -> None: ...
+
+ @overload
+ def __call__(
+ self,
+ __x1: _ScalarLike_co,
+ __out1: None = ...,
+ __out2: None = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[Any]: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __out1: None | NDArray[Any] = ...,
+ __out2: None | NDArray[Any] = ...,
+ *,
+ out: _2Tuple[NDArray[Any]] = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[NDArray[Any]]: ...
+ @overload
+ def __call__(
+ self,
+ __x1: _SupportsArrayUFunc,
+ __out1: None | NDArray[Any] = ...,
+ __out2: None | NDArray[Any] = ...,
+ *,
+ out: _2Tuple[NDArray[Any]] = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[Any]: ...
+
+class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[2]: ...
+ @property
+ def nout(self) -> Literal[2]: ...
+ @property
+ def nargs(self) -> Literal[4]: ...
+ @property
+ def signature(self) -> None: ...
+ @property
+ def at(self) -> None: ...
+ @property
+ def reduce(self) -> None: ...
+ @property
+ def accumulate(self) -> None: ...
+ @property
+ def reduceat(self) -> None: ...
+ @property
+ def outer(self) -> None: ...
+
+ @overload
+ def __call__(
+ self,
+ __x1: _ScalarLike_co,
+ __x2: _ScalarLike_co,
+ __out1: None = ...,
+ __out2: None = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _4Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[Any]: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __x2: ArrayLike,
+ __out1: None | NDArray[Any] = ...,
+ __out2: None | NDArray[Any] = ...,
+ *,
+ out: _2Tuple[NDArray[Any]] = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _4Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[NDArray[Any]]: ...
+
+class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[2]: ...
+ @property
+ def nout(self) -> Literal[1]: ...
+ @property
+ def nargs(self) -> Literal[3]: ...
+
+ # NOTE: In practice the only gufunc in the main namespace is `matmul`,
+ # so we can use its signature here
+ @property
+ def signature(self) -> Literal["(n?,k),(k,m?)->(n?,m?)"]: ...
+ @property
+ def reduce(self) -> None: ...
+ @property
+ def accumulate(self) -> None: ...
+ @property
+ def reduceat(self) -> None: ...
+ @property
+ def outer(self) -> None: ...
+ @property
+ def at(self) -> None: ...
+
+ # Scalar for 1D array-likes; ndarray otherwise
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __x2: ArrayLike,
+ out: None = ...,
+ *,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ axes: list[_2Tuple[SupportsIndex]] = ...,
+ ) -> Any: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __x2: ArrayLike,
+ out: NDArray[Any] | tuple[NDArray[Any]],
+ *,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ axes: list[_2Tuple[SupportsIndex]] = ...,
+ ) -> NDArray[Any]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/_typing/setup.py b/venv/lib/python3.9/site-packages/numpy/_typing/setup.py
new file mode 100644
index 00000000..24022fda
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_typing/setup.py
@@ -0,0 +1,10 @@
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('_typing', parent_package, top_path)
+ config.add_data_files('*.pyi')
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/venv/lib/python3.9/site-packages/numpy/_version.py b/venv/lib/python3.9/site-packages/numpy/_version.py
new file mode 100644
index 00000000..eda6f844
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/_version.py
@@ -0,0 +1,21 @@
+
+# This file was generated by 'versioneer.py' (0.26) from
+# revision-control system data, or from the parent directory name of an
+# unpacked source archive. Distribution tarballs contain a pre-generated copy
+# of this file.
+
+import json
+
+version_json = '''
+{
+ "date": "2023-04-22T13:47:13-0400",
+ "dirty": false,
+ "error": null,
+ "full-revisionid": "14bb214bca49b167abc375fa873466a811e62102",
+ "version": "1.24.3"
+}
+''' # END VERSION_JSON
+
+
+def get_versions():
+ return json.loads(version_json)
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/__init__.py b/venv/lib/python3.9/site-packages/numpy/array_api/__init__.py
new file mode 100644
index 00000000..5e58ee0a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/__init__.py
@@ -0,0 +1,377 @@
+"""
+A NumPy sub-namespace that conforms to the Python array API standard.
+
+This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It
+is still considered experimental, and will issue a warning when imported.
+
+This is a proof-of-concept namespace that wraps the corresponding NumPy
+functions to give a conforming implementation of the Python array API standard
+(https://data-apis.github.io/array-api/latest/). The standard is currently in
+an RFC phase and comments on it are both welcome and encouraged. Comments
+should be made either at https://github.com/data-apis/array-api or at
+https://github.com/data-apis/consortium-feedback/discussions.
+
+NumPy already follows the proposed spec for the most part, so this module
+serves mostly as a thin wrapper around it. However, NumPy also implements a
+lot of behavior that is not included in the spec, so this serves as a
+restricted subset of the API. Only those functions that are part of the spec
+are included in this namespace, and all functions are given with the exact
+signature given in the spec, including the use of position-only arguments, and
+omitting any extra keyword arguments implemented by NumPy but not part of the
+spec. The behavior of some functions is also modified from the NumPy behavior
+to conform to the standard. Note that the underlying array object itself is
+wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule
+is implemented in pure Python with no C extensions.
+
+The array API spec is designed as a "minimal API subset" and explicitly allows
+libraries to include behaviors not specified by it. But users of this module
+that intend to write portable code should be aware that only those behaviors
+that are listed in the spec are guaranteed to be implemented across libraries.
+Consequently, the NumPy implementation was chosen to be both conforming and
+minimal, so that users can use this implementation of the array API namespace
+and be sure that behaviors that it defines will be available in conforming
+namespaces from other libraries.
+
+A few notes about the current state of this submodule:
+
+- There is a test suite that tests modules against the array API standard at
+ https://github.com/data-apis/array-api-tests. The test suite is still a work
+ in progress, but the existing tests pass on this module, with a few
+ exceptions:
+
+ - DLPack support (see https://github.com/data-apis/array-api/pull/106) is
+ not included here, as it requires a full implementation in NumPy proper
+ first.
+
+ The test suite is not yet complete, and even the tests that exist are not
+ guaranteed to give a comprehensive coverage of the spec. Therefore, when
+ reviewing and using this submodule, you should refer to the standard
+ documents themselves. There are some tests in numpy.array_api.tests, but
+ they primarily focus on things that are not tested by the official array API
+ test suite.
+
+- There is a custom array object, numpy.array_api.Array, which is returned by
+ all functions in this module. All functions in the array API namespace
+ implicitly assume that they will only receive this object as input. The only
+ way to create instances of this object is to use one of the array creation
+ functions. It does not have a public constructor on the object itself. The
+ object is a small wrapper class around numpy.ndarray. The main purpose of it
+ is to restrict the namespace of the array object to only those dtypes and
+ only those methods that are required by the spec, as well as to limit/change
+ certain behavior that differs in the spec. In particular:
+
+ - The array API namespace does not have scalar objects, only 0-D arrays.
+ Operations on Array that would create a scalar in NumPy create a 0-D
+ array.
+
+ - Indexing: Only a subset of indices supported by NumPy are required by the
+ spec. The Array object restricts indexing to only allow those types of
+ indices that are required by the spec. See the docstring of the
+ numpy.array_api.Array._validate_indices helper function for more
+ information.
+
+ - Type promotion: Some type promotion rules are different in the spec. In
+ particular, the spec does not have any value-based casting. The spec also
+ does not require cross-kind casting, like integer -> floating-point. Only
+ those promotions that are explicitly required by the array API
+ specification are allowed in this module. See NEP 47 for more info.
+
+ - Functions do not automatically call asarray() on their input, and will not
+ work if the input type is not Array. The exception is array creation
+ functions, and Python operators on the Array object, which accept Python
+ scalars of the same type as the array dtype.
+
+- All functions include type annotations, corresponding to those given in the
+ spec (see _typing.py for definitions of some custom types). These do not
+ currently fully pass mypy due to some limitations in mypy.
+
+- Dtype objects are just the NumPy dtype objects, e.g., float64 =
+ np.dtype('float64'). The spec does not require any behavior on these dtype
+ objects other than that they be accessible by name and be comparable by
+ equality, but it was considered too much extra complexity to create custom
+ objects to represent dtypes.
+
+- All places where the implementations in this submodule are known to deviate
+ from their corresponding functions in NumPy are marked with "# Note:"
+ comments.
+
+Still TODO in this module are:
+
+- DLPack support for numpy.ndarray is still in progress. See
+ https://github.com/numpy/numpy/pull/19083.
+
+- The copy=False keyword argument to asarray() is not yet implemented. This
+ requires support in numpy.asarray() first.
+
+- Some functions are not yet fully tested in the array API test suite, and may
+ require updates that are not yet known until the tests are written.
+
+- The spec is still in an RFC phase and may still have minor updates, which
+ will need to be reflected here.
+
+- Complex number support in array API spec is planned but not yet finalized,
+ as are the fft extension and certain linear algebra functions such as eig
+ that require complex dtypes.
+
+"""
+
+import warnings
+
+warnings.warn(
+ "The numpy.array_api submodule is still experimental. See NEP 47.", stacklevel=2
+)
+
+__array_api_version__ = "2021.12"
+
+__all__ = ["__array_api_version__"]
+
+from ._constants import e, inf, nan, pi
+
+__all__ += ["e", "inf", "nan", "pi"]
+
+from ._creation_functions import (
+ asarray,
+ arange,
+ empty,
+ empty_like,
+ eye,
+ from_dlpack,
+ full,
+ full_like,
+ linspace,
+ meshgrid,
+ ones,
+ ones_like,
+ tril,
+ triu,
+ zeros,
+ zeros_like,
+)
+
+__all__ += [
+ "asarray",
+ "arange",
+ "empty",
+ "empty_like",
+ "eye",
+ "from_dlpack",
+ "full",
+ "full_like",
+ "linspace",
+ "meshgrid",
+ "ones",
+ "ones_like",
+ "tril",
+ "triu",
+ "zeros",
+ "zeros_like",
+]
+
+from ._data_type_functions import (
+ astype,
+ broadcast_arrays,
+ broadcast_to,
+ can_cast,
+ finfo,
+ iinfo,
+ result_type,
+)
+
+__all__ += [
+ "astype",
+ "broadcast_arrays",
+ "broadcast_to",
+ "can_cast",
+ "finfo",
+ "iinfo",
+ "result_type",
+]
+
+from ._dtypes import (
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+ bool,
+)
+
+__all__ += [
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "float32",
+ "float64",
+ "bool",
+]
+
+from ._elementwise_functions import (
+ abs,
+ acos,
+ acosh,
+ add,
+ asin,
+ asinh,
+ atan,
+ atan2,
+ atanh,
+ bitwise_and,
+ bitwise_left_shift,
+ bitwise_invert,
+ bitwise_or,
+ bitwise_right_shift,
+ bitwise_xor,
+ ceil,
+ cos,
+ cosh,
+ divide,
+ equal,
+ exp,
+ expm1,
+ floor,
+ floor_divide,
+ greater,
+ greater_equal,
+ isfinite,
+ isinf,
+ isnan,
+ less,
+ less_equal,
+ log,
+ log1p,
+ log2,
+ log10,
+ logaddexp,
+ logical_and,
+ logical_not,
+ logical_or,
+ logical_xor,
+ multiply,
+ negative,
+ not_equal,
+ positive,
+ pow,
+ remainder,
+ round,
+ sign,
+ sin,
+ sinh,
+ square,
+ sqrt,
+ subtract,
+ tan,
+ tanh,
+ trunc,
+)
+
+__all__ += [
+ "abs",
+ "acos",
+ "acosh",
+ "add",
+ "asin",
+ "asinh",
+ "atan",
+ "atan2",
+ "atanh",
+ "bitwise_and",
+ "bitwise_left_shift",
+ "bitwise_invert",
+ "bitwise_or",
+ "bitwise_right_shift",
+ "bitwise_xor",
+ "ceil",
+ "cos",
+ "cosh",
+ "divide",
+ "equal",
+ "exp",
+ "expm1",
+ "floor",
+ "floor_divide",
+ "greater",
+ "greater_equal",
+ "isfinite",
+ "isinf",
+ "isnan",
+ "less",
+ "less_equal",
+ "log",
+ "log1p",
+ "log2",
+ "log10",
+ "logaddexp",
+ "logical_and",
+ "logical_not",
+ "logical_or",
+ "logical_xor",
+ "multiply",
+ "negative",
+ "not_equal",
+ "positive",
+ "pow",
+ "remainder",
+ "round",
+ "sign",
+ "sin",
+ "sinh",
+ "square",
+ "sqrt",
+ "subtract",
+ "tan",
+ "tanh",
+ "trunc",
+]
+
+# linalg is an extension in the array API spec, which is a sub-namespace. Only
+# a subset of functions in it are imported into the top-level namespace.
+from . import linalg
+
+__all__ += ["linalg"]
+
+from .linalg import matmul, tensordot, matrix_transpose, vecdot
+
+__all__ += ["matmul", "tensordot", "matrix_transpose", "vecdot"]
+
+from ._manipulation_functions import (
+ concat,
+ expand_dims,
+ flip,
+ permute_dims,
+ reshape,
+ roll,
+ squeeze,
+ stack,
+)
+
+__all__ += ["concat", "expand_dims", "flip", "permute_dims", "reshape", "roll", "squeeze", "stack"]
+
+from ._searching_functions import argmax, argmin, nonzero, where
+
+__all__ += ["argmax", "argmin", "nonzero", "where"]
+
+from ._set_functions import unique_all, unique_counts, unique_inverse, unique_values
+
+__all__ += ["unique_all", "unique_counts", "unique_inverse", "unique_values"]
+
+from ._sorting_functions import argsort, sort
+
+__all__ += ["argsort", "sort"]
+
+from ._statistical_functions import max, mean, min, prod, std, sum, var
+
+__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"]
+
+from ._utility_functions import all, any
+
+__all__ += ["all", "any"]
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/_array_object.py b/venv/lib/python3.9/site-packages/numpy/array_api/_array_object.py
new file mode 100644
index 00000000..c4746fad
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/_array_object.py
@@ -0,0 +1,1118 @@
+"""
+Wrapper class around the ndarray object for the array API standard.
+
+The array API standard defines some behaviors differently than ndarray, in
+particular, type promotion rules are different (the standard has no
+value-based casting). The standard also specifies a more limited subset of
+array methods and functionalities than are implemented on ndarray. Since the
+goal of the array_api namespace is to be a minimal implementation of the array
+API standard, we need to define a separate wrapper class for the array_api
+namespace.
+
+The standard compliant class is only a wrapper class. It is *not* a subclass
+of ndarray.
+"""
+
+from __future__ import annotations
+
+import operator
+from enum import IntEnum
+from ._creation_functions import asarray
+from ._dtypes import (
+ _all_dtypes,
+ _boolean_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _floating_dtypes,
+ _numeric_dtypes,
+ _result_type,
+ _dtype_categories,
+)
+
+from typing import TYPE_CHECKING, Optional, Tuple, Union, Any, SupportsIndex
+import types
+
+if TYPE_CHECKING:
+ from ._typing import Any, PyCapsule, Device, Dtype
+ import numpy.typing as npt
+
+import numpy as np
+
+from numpy import array_api
+
+
+class Array:
+ """
+ n-d array object for the array API namespace.
+
+ See the docstring of :py:obj:`np.ndarray <numpy.ndarray>` for more
+ information.
+
+ This is a wrapper around numpy.ndarray that restricts the usage to only
+ those things that are required by the array API namespace. Note,
+ attributes on this object that start with a single underscore are not part
+ of the API specification and should only be used internally. This object
+ should not be constructed directly. Rather, use one of the creation
+ functions, such as asarray().
+
+ """
+ _array: np.ndarray
+
+ # Use a custom constructor instead of __init__, as manually initializing
+ # this class is not supported API.
+ @classmethod
+ def _new(cls, x, /):
+ """
+ This is a private method for initializing the array API Array
+ object.
+
+ Functions outside of the array_api submodule should not use this
+ method. Use one of the creation functions instead, such as
+ ``asarray``.
+
+ """
+ obj = super().__new__(cls)
+ # Note: The spec does not have array scalars, only 0-D arrays.
+ if isinstance(x, np.generic):
+ # Convert the array scalar to a 0-D array
+ x = np.asarray(x)
+ if x.dtype not in _all_dtypes:
+ raise TypeError(
+ f"The array_api namespace does not support the dtype '{x.dtype}'"
+ )
+ obj._array = x
+ return obj
+
+ # Prevent Array() from working
+ def __new__(cls, *args, **kwargs):
+ raise TypeError(
+ "The array_api Array object should not be instantiated directly. Use an array creation function, such as asarray(), instead."
+ )
+
+ # These functions are not required by the spec, but are implemented for
+ # the sake of usability.
+
+ def __str__(self: Array, /) -> str:
+ """
+ Performs the operation __str__.
+ """
+ return self._array.__str__().replace("array", "Array")
+
+ def __repr__(self: Array, /) -> str:
+ """
+ Performs the operation __repr__.
+ """
+ suffix = f", dtype={self.dtype.name})"
+ if 0 in self.shape:
+ prefix = "empty("
+ mid = str(self.shape)
+ else:
+ prefix = "Array("
+ mid = np.array2string(self._array, separator=', ', prefix=prefix, suffix=suffix)
+ return prefix + mid + suffix
+
+ # This function is not required by the spec, but we implement it here for
+ # convenience so that np.asarray(np.array_api.Array) will work.
+ def __array__(self, dtype: None | np.dtype[Any] = None) -> npt.NDArray[Any]:
+ """
+ Warning: this method is NOT part of the array API spec. Implementers
+ of other libraries need not include it, and users should not assume it
+ will be present in other implementations.
+
+ """
+ return np.asarray(self._array, dtype=dtype)
+
+ # These are various helper functions to make the array behavior match the
+ # spec in places where it either deviates from or is more strict than
+ # NumPy behavior
+
+ def _check_allowed_dtypes(self, other: bool | int | float | Array, dtype_category: str, op: str) -> Array:
+ """
+ Helper function for operators to only allow specific input dtypes
+
+ Use like
+
+ other = self._check_allowed_dtypes(other, 'numeric', '__add__')
+ if other is NotImplemented:
+ return other
+ """
+
+ if self.dtype not in _dtype_categories[dtype_category]:
+ raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
+ if isinstance(other, (int, float, bool)):
+ other = self._promote_scalar(other)
+ elif isinstance(other, Array):
+ if other.dtype not in _dtype_categories[dtype_category]:
+ raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
+ else:
+ return NotImplemented
+
+ # This will raise TypeError for type combinations that are not allowed
+ # to promote in the spec (even if the NumPy array operator would
+ # promote them).
+ res_dtype = _result_type(self.dtype, other.dtype)
+ if op.startswith("__i"):
+ # Note: NumPy will allow in-place operators in some cases where
+ # the type promoted operator does not match the left-hand side
+ # operand. For example,
+
+ # >>> a = np.array(1, dtype=np.int8)
+ # >>> a += np.array(1, dtype=np.int16)
+
+ # The spec explicitly disallows this.
+ if res_dtype != self.dtype:
+ raise TypeError(
+ f"Cannot perform {op} with dtypes {self.dtype} and {other.dtype}"
+ )
+
+ return other
+
+ # Helper function to match the type promotion rules in the spec
+ def _promote_scalar(self, scalar):
+ """
+ Returns a promoted version of a Python scalar appropriate for use with
+ operations on self.
+
+ This may raise an OverflowError in cases where the scalar is an
+ integer that is too large to fit in a NumPy integer dtype, or
+ TypeError when the scalar type is incompatible with the dtype of self.
+ """
+ # Note: Only Python scalar types that match the array dtype are
+ # allowed.
+ if isinstance(scalar, bool):
+ if self.dtype not in _boolean_dtypes:
+ raise TypeError(
+ "Python bool scalars can only be promoted with bool arrays"
+ )
+ elif isinstance(scalar, int):
+ if self.dtype in _boolean_dtypes:
+ raise TypeError(
+ "Python int scalars cannot be promoted with bool arrays"
+ )
+ elif isinstance(scalar, float):
+ if self.dtype not in _floating_dtypes:
+ raise TypeError(
+ "Python float scalars can only be promoted with floating-point arrays."
+ )
+ else:
+ raise TypeError("'scalar' must be a Python scalar")
+
+ # Note: scalars are unconditionally cast to the same dtype as the
+ # array.
+
+ # Note: the spec only specifies integer-dtype/int promotion
+ # behavior for integers within the bounds of the integer dtype.
+ # Outside of those bounds we use the default NumPy behavior (either
+ # cast or raise OverflowError).
+ return Array._new(np.array(scalar, self.dtype))
+
+ @staticmethod
+ def _normalize_two_args(x1, x2) -> Tuple[Array, Array]:
+ """
+ Normalize inputs to two arg functions to fix type promotion rules
+
+ NumPy deviates from the spec type promotion rules in cases where one
+ argument is 0-dimensional and the other is not. For example:
+
+ >>> import numpy as np
+ >>> a = np.array([1.0], dtype=np.float32)
+ >>> b = np.array(1.0, dtype=np.float64)
+ >>> np.add(a, b) # The spec says this should be float64
+ array([2.], dtype=float32)
+
+ To fix this, we add a dimension to the 0-dimension array before passing it
+ through. This works because a dimension would be added anyway from
+ broadcasting, so the resulting shape is the same, but this prevents NumPy
+ from not promoting the dtype.
+ """
+ # Another option would be to use signature=(x1.dtype, x2.dtype, None),
+ # but that only works for ufuncs, so we would have to call the ufuncs
+ # directly in the operator methods. One should also note that this
+ # sort of trick wouldn't work for functions like searchsorted, which
+ # don't do normal broadcasting, but there aren't any functions like
+ # that in the array API namespace.
+ if x1.ndim == 0 and x2.ndim != 0:
+ # The _array[None] workaround was chosen because it is relatively
+ # performant. broadcast_to(x1._array, x2.shape) is much slower. We
+ # could also manually type promote x2, but that is more complicated
+ # and about the same performance as this.
+ x1 = Array._new(x1._array[None])
+ elif x2.ndim == 0 and x1.ndim != 0:
+ x2 = Array._new(x2._array[None])
+ return (x1, x2)
+
+ # Note: A large fraction of allowed indices are disallowed here (see the
+ # docstring below)
+ def _validate_index(self, key):
+ """
+ Validate an index according to the array API.
+
+ The array API specification only requires a subset of indices that are
+ supported by NumPy. This function will reject any index that is
+ allowed by NumPy but not required by the array API specification. We
+ always raise ``IndexError`` on such indices (the spec does not require
+ any specific behavior on them, but this makes the NumPy array API
+ namespace a minimal implementation of the spec). See
+ https://data-apis.org/array-api/latest/API_specification/indexing.html
+ for the full list of required indexing behavior
+
+ This function raises IndexError if the index ``key`` is invalid. It
+ only raises ``IndexError`` on indices that are not already rejected by
+ NumPy, as NumPy will already raise the appropriate error on such
+ indices. ``shape`` may be None, in which case, only cases that are
+ independent of the array shape are checked.
+
+ The following cases are allowed by NumPy, but not specified by the array
+ API specification:
+
+ - Indices to not include an implicit ellipsis at the end. That is,
+ every axis of an array must be explicitly indexed or an ellipsis
+ included. This behaviour is sometimes referred to as flat indexing.
+
+ - The start and stop of a slice may not be out of bounds. In
+ particular, for a slice ``i:j:k`` on an axis of size ``n``, only the
+ following are allowed:
+
+ - ``i`` or ``j`` omitted (``None``).
+ - ``-n <= i <= max(0, n - 1)``.
+ - For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``.
+ - For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``.
+
+ - Boolean array indices are not allowed as part of a larger tuple
+ index.
+
+ - Integer array indices are not allowed (with the exception of 0-D
+ arrays, which are treated the same as scalars).
+
+ Additionally, it should be noted that indices that would return a
+ scalar in NumPy will return a 0-D array. Array scalars are not allowed
+ in the specification, only 0-D arrays. This is done in the
+ ``Array._new`` constructor, not this function.
+
+ """
+ _key = key if isinstance(key, tuple) else (key,)
+ for i in _key:
+ if isinstance(i, bool) or not (
+ isinstance(i, SupportsIndex) # i.e. ints
+ or isinstance(i, slice)
+ or i == Ellipsis
+ or i is None
+ or isinstance(i, Array)
+ or isinstance(i, np.ndarray)
+ ):
+ raise IndexError(
+ f"Single-axes index {i} has {type(i)=}, but only "
+ "integers, slices (:), ellipsis (...), newaxis (None), "
+ "zero-dimensional integer arrays and boolean arrays "
+ "are specified in the Array API."
+ )
+
+ nonexpanding_key = []
+ single_axes = []
+ n_ellipsis = 0
+ key_has_mask = False
+ for i in _key:
+ if i is not None:
+ nonexpanding_key.append(i)
+ if isinstance(i, Array) or isinstance(i, np.ndarray):
+ if i.dtype in _boolean_dtypes:
+ key_has_mask = True
+ single_axes.append(i)
+ else:
+ # i must not be an array here, to avoid elementwise equals
+ if i == Ellipsis:
+ n_ellipsis += 1
+ else:
+ single_axes.append(i)
+
+ n_single_axes = len(single_axes)
+ if n_ellipsis > 1:
+ return # handled by ndarray
+ elif n_ellipsis == 0:
+ # Note boolean masks must be the sole index, which we check for
+ # later on.
+ if not key_has_mask and n_single_axes < self.ndim:
+ raise IndexError(
+ f"{self.ndim=}, but the multi-axes index only specifies "
+ f"{n_single_axes} dimensions. If this was intentional, "
+ "add a trailing ellipsis (...) which expands into as many "
+ "slices (:) as necessary - this is what np.ndarray arrays "
+ "implicitly do, but such flat indexing behaviour is not "
+ "specified in the Array API."
+ )
+
+ if n_ellipsis == 0:
+ indexed_shape = self.shape
+ else:
+ ellipsis_start = None
+ for pos, i in enumerate(nonexpanding_key):
+ if not (isinstance(i, Array) or isinstance(i, np.ndarray)):
+ if i == Ellipsis:
+ ellipsis_start = pos
+ break
+ assert ellipsis_start is not None # sanity check
+ ellipsis_end = self.ndim - (n_single_axes - ellipsis_start)
+ indexed_shape = (
+ self.shape[:ellipsis_start] + self.shape[ellipsis_end:]
+ )
+ for i, side in zip(single_axes, indexed_shape):
+ if isinstance(i, slice):
+ if side == 0:
+ f_range = "0 (or None)"
+ else:
+ f_range = f"between -{side} and {side - 1} (or None)"
+ if i.start is not None:
+ try:
+ start = operator.index(i.start)
+ except TypeError:
+ pass # handled by ndarray
+ else:
+ if not (-side <= start <= side):
+ raise IndexError(
+ f"Slice {i} contains {start=}, but should be "
+ f"{f_range} for an axis of size {side} "
+ "(out-of-bounds starts are not specified in "
+ "the Array API)"
+ )
+ if i.stop is not None:
+ try:
+ stop = operator.index(i.stop)
+ except TypeError:
+ pass # handled by ndarray
+ else:
+ if not (-side <= stop <= side):
+ raise IndexError(
+ f"Slice {i} contains {stop=}, but should be "
+ f"{f_range} for an axis of size {side} "
+ "(out-of-bounds stops are not specified in "
+ "the Array API)"
+ )
+ elif isinstance(i, Array):
+ if i.dtype in _boolean_dtypes and len(_key) != 1:
+ assert isinstance(key, tuple) # sanity check
+ raise IndexError(
+ f"Single-axes index {i} is a boolean array and "
+ f"{len(key)=}, but masking is only specified in the "
+ "Array API when the array is the sole index."
+ )
+ elif i.dtype in _integer_dtypes and i.ndim != 0:
+ raise IndexError(
+ f"Single-axes index {i} is a non-zero-dimensional "
+ "integer array, but advanced integer indexing is not "
+ "specified in the Array API."
+ )
+ elif isinstance(i, tuple):
+ raise IndexError(
+ f"Single-axes index {i} is a tuple, but nested tuple "
+ "indices are not specified in the Array API."
+ )
+
+ # Everything below this line is required by the spec.
+
+ def __abs__(self: Array, /) -> Array:
+ """
+ Performs the operation __abs__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __abs__")
+ res = self._array.__abs__()
+ return self.__class__._new(res)
+
+ def __add__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __add__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__add__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__add__(other._array)
+ return self.__class__._new(res)
+
+ def __and__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __and__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__and__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__and__(other._array)
+ return self.__class__._new(res)
+
+ def __array_namespace__(
+ self: Array, /, *, api_version: Optional[str] = None
+ ) -> types.ModuleType:
+ if api_version is not None and not api_version.startswith("2021."):
+ raise ValueError(f"Unrecognized array API version: {api_version!r}")
+ return array_api
+
+ def __bool__(self: Array, /) -> bool:
+ """
+ Performs the operation __bool__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("bool is only allowed on arrays with 0 dimensions")
+ if self.dtype not in _boolean_dtypes:
+ raise ValueError("bool is only allowed on boolean arrays")
+ res = self._array.__bool__()
+ return res
+
+ def __dlpack__(self: Array, /, *, stream: None = None) -> PyCapsule:
+ """
+ Performs the operation __dlpack__.
+ """
+ return self._array.__dlpack__(stream=stream)
+
+ def __dlpack_device__(self: Array, /) -> Tuple[IntEnum, int]:
+ """
+ Performs the operation __dlpack_device__.
+ """
+ # Note: device support is required for this
+ return self._array.__dlpack_device__()
+
+ def __eq__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
+ """
+ Performs the operation __eq__.
+ """
+ # Even though "all" dtypes are allowed, we still require them to be
+ # promotable with each other.
+ other = self._check_allowed_dtypes(other, "all", "__eq__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__eq__(other._array)
+ return self.__class__._new(res)
+
+ def __float__(self: Array, /) -> float:
+ """
+ Performs the operation __float__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("float is only allowed on arrays with 0 dimensions")
+ if self.dtype not in _floating_dtypes:
+ raise ValueError("float is only allowed on floating-point arrays")
+ res = self._array.__float__()
+ return res
+
+ def __floordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __floordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__floordiv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__floordiv__(other._array)
+ return self.__class__._new(res)
+
+ def __ge__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __ge__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__ge__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ge__(other._array)
+ return self.__class__._new(res)
+
+ def __getitem__(
+ self: Array,
+ key: Union[
+ int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
+ ],
+ /,
+ ) -> Array:
+ """
+ Performs the operation __getitem__.
+ """
+ # Note: Only indices required by the spec are allowed. See the
+ # docstring of _validate_index
+ self._validate_index(key)
+ if isinstance(key, Array):
+ # Indexing self._array with array_api arrays can be erroneous
+ key = key._array
+ res = self._array.__getitem__(key)
+ return self._new(res)
+
+ def __gt__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __gt__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__gt__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__gt__(other._array)
+ return self.__class__._new(res)
+
+ def __int__(self: Array, /) -> int:
+ """
+ Performs the operation __int__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("int is only allowed on arrays with 0 dimensions")
+ if self.dtype not in _integer_dtypes:
+ raise ValueError("int is only allowed on integer arrays")
+ res = self._array.__int__()
+ return res
+
+ def __index__(self: Array, /) -> int:
+ """
+ Performs the operation __index__.
+ """
+ res = self._array.__index__()
+ return res
+
+ def __invert__(self: Array, /) -> Array:
+ """
+ Performs the operation __invert__.
+ """
+ if self.dtype not in _integer_or_boolean_dtypes:
+ raise TypeError("Only integer or boolean dtypes are allowed in __invert__")
+ res = self._array.__invert__()
+ return self.__class__._new(res)
+
+ def __le__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __le__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__le__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__le__(other._array)
+ return self.__class__._new(res)
+
+ def __lshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __lshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__lshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__lshift__(other._array)
+ return self.__class__._new(res)
+
+ def __lt__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __lt__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__lt__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__lt__(other._array)
+ return self.__class__._new(res)
+
+ def __matmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __matmul__.
+ """
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__matmul__")
+ if other is NotImplemented:
+ return other
+ res = self._array.__matmul__(other._array)
+ return self.__class__._new(res)
+
+ def __mod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __mod__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__mod__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__mod__(other._array)
+ return self.__class__._new(res)
+
+ def __mul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __mul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__mul__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__mul__(other._array)
+ return self.__class__._new(res)
+
+ def __ne__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
+ """
+ Performs the operation __ne__.
+ """
+ other = self._check_allowed_dtypes(other, "all", "__ne__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ne__(other._array)
+ return self.__class__._new(res)
+
+ def __neg__(self: Array, /) -> Array:
+ """
+ Performs the operation __neg__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __neg__")
+ res = self._array.__neg__()
+ return self.__class__._new(res)
+
+ def __or__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __or__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__or__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__or__(other._array)
+ return self.__class__._new(res)
+
+ def __pos__(self: Array, /) -> Array:
+ """
+ Performs the operation __pos__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __pos__")
+ res = self._array.__pos__()
+ return self.__class__._new(res)
+
+ def __pow__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __pow__.
+ """
+ from ._elementwise_functions import pow
+
+ other = self._check_allowed_dtypes(other, "numeric", "__pow__")
+ if other is NotImplemented:
+ return other
+ # Note: NumPy's __pow__ does not follow type promotion rules for 0-d
+ # arrays, so we use pow() here instead.
+ return pow(self, other)
+
+ def __rshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rshift__(other._array)
+ return self.__class__._new(res)
+
+ def __setitem__(
+ self,
+ key: Union[
+ int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
+ ],
+ value: Union[int, float, bool, Array],
+ /,
+ ) -> None:
+ """
+ Performs the operation __setitem__.
+ """
+ # Note: Only indices required by the spec are allowed. See the
+ # docstring of _validate_index
+ self._validate_index(key)
+ if isinstance(key, Array):
+ # Indexing self._array with array_api arrays can be erroneous
+ key = key._array
+ self._array.__setitem__(key, asarray(value)._array)
+
+ def __sub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __sub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__sub__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__sub__(other._array)
+ return self.__class__._new(res)
+
+ # PEP 484 requires int to be a subtype of float, but __truediv__ should
+ # not accept int.
+ def __truediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __truediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__truediv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__truediv__(other._array)
+ return self.__class__._new(res)
+
+ def __xor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __xor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__xor__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__xor__(other._array)
+ return self.__class__._new(res)
+
+ def __iadd__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __iadd__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__iadd__")
+ if other is NotImplemented:
+ return other
+ self._array.__iadd__(other._array)
+ return self
+
+ def __radd__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __radd__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__radd__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__radd__(other._array)
+ return self.__class__._new(res)
+
+ def __iand__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __iand__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__iand__")
+ if other is NotImplemented:
+ return other
+ self._array.__iand__(other._array)
+ return self
+
+ def __rand__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __rand__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__rand__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rand__(other._array)
+ return self.__class__._new(res)
+
+ def __ifloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __ifloordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__ifloordiv__")
+ if other is NotImplemented:
+ return other
+ self._array.__ifloordiv__(other._array)
+ return self
+
+ def __rfloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rfloordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rfloordiv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rfloordiv__(other._array)
+ return self.__class__._new(res)
+
+ def __ilshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __ilshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__ilshift__")
+ if other is NotImplemented:
+ return other
+ self._array.__ilshift__(other._array)
+ return self
+
+ def __rlshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rlshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rlshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rlshift__(other._array)
+ return self.__class__._new(res)
+
+ def __imatmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __imatmul__.
+ """
+ # Note: NumPy does not implement __imatmul__.
+
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__imatmul__")
+ if other is NotImplemented:
+ return other
+
+ # __imatmul__ can only be allowed when it would not change the shape
+ # of self.
+ other_shape = other.shape
+ if self.shape == () or other_shape == ():
+ raise ValueError("@= requires at least one dimension")
+ if len(other_shape) == 1 or other_shape[-1] != other_shape[-2]:
+ raise ValueError("@= cannot change the shape of the input array")
+ self._array[:] = self._array.__matmul__(other._array)
+ return self
+
+ def __rmatmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __rmatmul__.
+ """
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__rmatmul__")
+ if other is NotImplemented:
+ return other
+ res = self._array.__rmatmul__(other._array)
+ return self.__class__._new(res)
+
+ def __imod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __imod__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__imod__")
+ if other is NotImplemented:
+ return other
+ self._array.__imod__(other._array)
+ return self
+
+ def __rmod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rmod__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rmod__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rmod__(other._array)
+ return self.__class__._new(res)
+
+ def __imul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __imul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__imul__")
+ if other is NotImplemented:
+ return other
+ self._array.__imul__(other._array)
+ return self
+
+ def __rmul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rmul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rmul__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rmul__(other._array)
+ return self.__class__._new(res)
+
+ def __ior__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ior__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ior__")
+ if other is NotImplemented:
+ return other
+ self._array.__ior__(other._array)
+ return self
+
+ def __ror__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ror__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ror__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ror__(other._array)
+ return self.__class__._new(res)
+
+ def __ipow__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __ipow__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__ipow__")
+ if other is NotImplemented:
+ return other
+ self._array.__ipow__(other._array)
+ return self
+
+ def __rpow__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rpow__.
+ """
+ from ._elementwise_functions import pow
+
+ other = self._check_allowed_dtypes(other, "numeric", "__rpow__")
+ if other is NotImplemented:
+ return other
+ # Note: NumPy's __pow__ does not follow the spec type promotion rules
+ # for 0-d arrays, so we use pow() here instead.
+ return pow(other, self)
+
+ def __irshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __irshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__irshift__")
+ if other is NotImplemented:
+ return other
+ self._array.__irshift__(other._array)
+ return self
+
+ def __rrshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rrshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rrshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rrshift__(other._array)
+ return self.__class__._new(res)
+
+ def __isub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __isub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__isub__")
+ if other is NotImplemented:
+ return other
+ self._array.__isub__(other._array)
+ return self
+
+ def __rsub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rsub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rsub__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rsub__(other._array)
+ return self.__class__._new(res)
+
+ def __itruediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __itruediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__itruediv__")
+ if other is NotImplemented:
+ return other
+ self._array.__itruediv__(other._array)
+ return self
+
+ def __rtruediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __rtruediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__rtruediv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rtruediv__(other._array)
+ return self.__class__._new(res)
+
+ def __ixor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ixor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ixor__")
+ if other is NotImplemented:
+ return other
+ self._array.__ixor__(other._array)
+ return self
+
+ def __rxor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __rxor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__rxor__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rxor__(other._array)
+ return self.__class__._new(res)
+
+ def to_device(self: Array, device: Device, /, stream: None = None) -> Array:
+ if stream is not None:
+ raise ValueError("The stream argument to to_device() is not supported")
+ if device == 'cpu':
+ return self
+ raise ValueError(f"Unsupported device {device!r}")
+
+ @property
+ def dtype(self) -> Dtype:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.dtype <numpy.ndarray.dtype>`.
+
+ See its docstring for more information.
+ """
+ return self._array.dtype
+
+ @property
+ def device(self) -> Device:
+ return "cpu"
+
+ # Note: mT is new in array API spec (see matrix_transpose)
+ @property
+ def mT(self) -> Array:
+ from .linalg import matrix_transpose
+ return matrix_transpose(self)
+
+ @property
+ def ndim(self) -> int:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.ndim <numpy.ndarray.ndim>`.
+
+ See its docstring for more information.
+ """
+ return self._array.ndim
+
+ @property
+ def shape(self) -> Tuple[int, ...]:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.shape <numpy.ndarray.shape>`.
+
+ See its docstring for more information.
+ """
+ return self._array.shape
+
+ @property
+ def size(self) -> int:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.size <numpy.ndarray.size>`.
+
+ See its docstring for more information.
+ """
+ return self._array.size
+
+ @property
+ def T(self) -> Array:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.T <numpy.ndarray.T>`.
+
+ See its docstring for more information.
+ """
+ # Note: T only works on 2-dimensional arrays. See the corresponding
+ # note in the specification:
+ # https://data-apis.org/array-api/latest/API_specification/array_object.html#t
+ if self.ndim != 2:
+ raise ValueError("x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.")
+ return self.__class__._new(self._array.T)
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/_constants.py b/venv/lib/python3.9/site-packages/numpy/array_api/_constants.py
new file mode 100644
index 00000000..9541941e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/_constants.py
@@ -0,0 +1,6 @@
+import numpy as np
+
+e = np.e
+inf = np.inf
+nan = np.nan
+pi = np.pi
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/_creation_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/_creation_functions.py
new file mode 100644
index 00000000..3b014d37
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/_creation_functions.py
@@ -0,0 +1,351 @@
+from __future__ import annotations
+
+
+from typing import TYPE_CHECKING, List, Optional, Tuple, Union
+
+if TYPE_CHECKING:
+ from ._typing import (
+ Array,
+ Device,
+ Dtype,
+ NestedSequence,
+ SupportsBufferProtocol,
+ )
+ from collections.abc import Sequence
+from ._dtypes import _all_dtypes
+
+import numpy as np
+
+
+def _check_valid_dtype(dtype):
+ # Note: Only spelling dtypes as the dtype objects is supported.
+
+ # We use this instead of "dtype in _all_dtypes" because the dtype objects
+ # define equality with the sorts of things we want to disallow.
+ for d in (None,) + _all_dtypes:
+ if dtype is d:
+ return
+ raise ValueError("dtype must be one of the supported dtypes")
+
+
+def asarray(
+ obj: Union[
+ Array,
+ bool,
+ int,
+ float,
+ NestedSequence[bool | int | float],
+ SupportsBufferProtocol,
+ ],
+ /,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+ copy: Optional[Union[bool, np._CopyMode]] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.asarray <numpy.asarray>`.
+
+ See its docstring for more information.
+ """
+ # _array_object imports in this file are inside the functions to avoid
+ # circular imports
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ if copy in (False, np._CopyMode.IF_NEEDED):
+ # Note: copy=False is not yet implemented in np.asarray
+ raise NotImplementedError("copy=False is not yet implemented")
+ if isinstance(obj, Array):
+ if dtype is not None and obj.dtype != dtype:
+ copy = True
+ if copy in (True, np._CopyMode.ALWAYS):
+ return Array._new(np.array(obj._array, copy=True, dtype=dtype))
+ return obj
+ if dtype is None and isinstance(obj, int) and (obj > 2 ** 64 or obj < -(2 ** 63)):
+ # Give a better error message in this case. NumPy would convert this
+ # to an object array. TODO: This won't handle large integers in lists.
+ raise OverflowError("Integer out of bounds for array dtypes")
+ res = np.asarray(obj, dtype=dtype)
+ return Array._new(res)
+
+
+def arange(
+ start: Union[int, float],
+ /,
+ stop: Optional[Union[int, float]] = None,
+ step: Union[int, float] = 1,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arange <numpy.arange>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.arange(start, stop=stop, step=step, dtype=dtype))
+
+
+def empty(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.empty <numpy.empty>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.empty(shape, dtype=dtype))
+
+
+def empty_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.empty_like <numpy.empty_like>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.empty_like(x._array, dtype=dtype))
+
+
+def eye(
+ n_rows: int,
+ n_cols: Optional[int] = None,
+ /,
+ *,
+ k: int = 0,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.eye <numpy.eye>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.eye(n_rows, M=n_cols, k=k, dtype=dtype))
+
+
+def from_dlpack(x: object, /) -> Array:
+ from ._array_object import Array
+
+ return Array._new(np.from_dlpack(x))
+
+
+def full(
+ shape: Union[int, Tuple[int, ...]],
+ fill_value: Union[int, float],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.full <numpy.full>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ if isinstance(fill_value, Array) and fill_value.ndim == 0:
+ fill_value = fill_value._array
+ res = np.full(shape, fill_value, dtype=dtype)
+ if res.dtype not in _all_dtypes:
+ # This will happen if the fill value is not something that NumPy
+ # coerces to one of the acceptable dtypes.
+ raise TypeError("Invalid input to full")
+ return Array._new(res)
+
+
+def full_like(
+ x: Array,
+ /,
+ fill_value: Union[int, float],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.full_like <numpy.full_like>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ res = np.full_like(x._array, fill_value, dtype=dtype)
+ if res.dtype not in _all_dtypes:
+ # This will happen if the fill value is not something that NumPy
+ # coerces to one of the acceptable dtypes.
+ raise TypeError("Invalid input to full_like")
+ return Array._new(res)
+
+
+def linspace(
+ start: Union[int, float],
+ stop: Union[int, float],
+ /,
+ num: int,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+ endpoint: bool = True,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.linspace <numpy.linspace>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.linspace(start, stop, num, dtype=dtype, endpoint=endpoint))
+
+
+def meshgrid(*arrays: Array, indexing: str = "xy") -> List[Array]:
+ """
+ Array API compatible wrapper for :py:func:`np.meshgrid <numpy.meshgrid>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ # Note: unlike np.meshgrid, only inputs with all the same dtype are
+ # allowed
+
+ if len({a.dtype for a in arrays}) > 1:
+ raise ValueError("meshgrid inputs must all have the same dtype")
+
+ return [
+ Array._new(array)
+ for array in np.meshgrid(*[a._array for a in arrays], indexing=indexing)
+ ]
+
+
+def ones(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ones <numpy.ones>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.ones(shape, dtype=dtype))
+
+
+def ones_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ones_like <numpy.ones_like>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.ones_like(x._array, dtype=dtype))
+
+
+def tril(x: Array, /, *, k: int = 0) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.tril <numpy.tril>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ if x.ndim < 2:
+ # Note: Unlike np.tril, x must be at least 2-D
+ raise ValueError("x must be at least 2-dimensional for tril")
+ return Array._new(np.tril(x._array, k=k))
+
+
+def triu(x: Array, /, *, k: int = 0) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.triu <numpy.triu>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ if x.ndim < 2:
+ # Note: Unlike np.triu, x must be at least 2-D
+ raise ValueError("x must be at least 2-dimensional for triu")
+ return Array._new(np.triu(x._array, k=k))
+
+
+def zeros(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.zeros <numpy.zeros>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.zeros(shape, dtype=dtype))
+
+
+def zeros_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.zeros_like <numpy.zeros_like>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.zeros_like(x._array, dtype=dtype))
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/_data_type_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/_data_type_functions.py
new file mode 100644
index 00000000..7026bd48
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/_data_type_functions.py
@@ -0,0 +1,146 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import _all_dtypes, _result_type
+
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, List, Tuple, Union
+
+if TYPE_CHECKING:
+ from ._typing import Dtype
+ from collections.abc import Sequence
+
+import numpy as np
+
+
+# Note: astype is a function, not an array method as in NumPy.
+def astype(x: Array, dtype: Dtype, /, *, copy: bool = True) -> Array:
+ if not copy and dtype == x.dtype:
+ return x
+ return Array._new(x._array.astype(dtype=dtype, copy=copy))
+
+
+def broadcast_arrays(*arrays: Array) -> List[Array]:
+ """
+ Array API compatible wrapper for :py:func:`np.broadcast_arrays <numpy.broadcast_arrays>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ return [
+ Array._new(array) for array in np.broadcast_arrays(*[a._array for a in arrays])
+ ]
+
+
+def broadcast_to(x: Array, /, shape: Tuple[int, ...]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.broadcast_to <numpy.broadcast_to>`.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ return Array._new(np.broadcast_to(x._array, shape))
+
+
+def can_cast(from_: Union[Dtype, Array], to: Dtype, /) -> bool:
+ """
+ Array API compatible wrapper for :py:func:`np.can_cast <numpy.can_cast>`.
+
+ See its docstring for more information.
+ """
+ if isinstance(from_, Array):
+ from_ = from_.dtype
+ elif from_ not in _all_dtypes:
+ raise TypeError(f"{from_=}, but should be an array_api array or dtype")
+ if to not in _all_dtypes:
+ raise TypeError(f"{to=}, but should be a dtype")
+ # Note: We avoid np.can_cast() as it has discrepancies with the array API,
+ # since NumPy allows cross-kind casting (e.g., NumPy allows bool -> int8).
+ # See https://github.com/numpy/numpy/issues/20870
+ try:
+ # We promote `from_` and `to` together. We then check if the promoted
+ # dtype is `to`, which indicates if `from_` can (up)cast to `to`.
+ dtype = _result_type(from_, to)
+ return to == dtype
+ except TypeError:
+ # _result_type() raises if the dtypes don't promote together
+ return False
+
+
+# These are internal objects for the return types of finfo and iinfo, since
+# the NumPy versions contain extra data that isn't part of the spec.
+@dataclass
+class finfo_object:
+ bits: int
+ # Note: The types of the float data here are float, whereas in NumPy they
+ # are scalars of the corresponding float dtype.
+ eps: float
+ max: float
+ min: float
+ smallest_normal: float
+
+
+@dataclass
+class iinfo_object:
+ bits: int
+ max: int
+ min: int
+
+
+def finfo(type: Union[Dtype, Array], /) -> finfo_object:
+ """
+ Array API compatible wrapper for :py:func:`np.finfo <numpy.finfo>`.
+
+ See its docstring for more information.
+ """
+ fi = np.finfo(type)
+ # Note: The types of the float data here are float, whereas in NumPy they
+ # are scalars of the corresponding float dtype.
+ return finfo_object(
+ fi.bits,
+ float(fi.eps),
+ float(fi.max),
+ float(fi.min),
+ float(fi.smallest_normal),
+ )
+
+
+def iinfo(type: Union[Dtype, Array], /) -> iinfo_object:
+ """
+ Array API compatible wrapper for :py:func:`np.iinfo <numpy.iinfo>`.
+
+ See its docstring for more information.
+ """
+ ii = np.iinfo(type)
+ return iinfo_object(ii.bits, ii.max, ii.min)
+
+
+def result_type(*arrays_and_dtypes: Union[Array, Dtype]) -> Dtype:
+ """
+ Array API compatible wrapper for :py:func:`np.result_type <numpy.result_type>`.
+
+ See its docstring for more information.
+ """
+ # Note: we use a custom implementation that gives only the type promotions
+ # required by the spec rather than using np.result_type. NumPy implements
+ # too many extra type promotions like int64 + uint64 -> float64, and does
+ # value-based casting on scalar arrays.
+ A = []
+ for a in arrays_and_dtypes:
+ if isinstance(a, Array):
+ a = a.dtype
+ elif isinstance(a, np.ndarray) or a not in _all_dtypes:
+ raise TypeError("result_type() inputs must be array_api arrays or dtypes")
+ A.append(a)
+
+ if len(A) == 0:
+ raise ValueError("at least one array or dtype is required")
+ elif len(A) == 1:
+ return A[0]
+ else:
+ t = A[0]
+ for t2 in A[1:]:
+ t = _result_type(t, t2)
+ return t
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/_dtypes.py b/venv/lib/python3.9/site-packages/numpy/array_api/_dtypes.py
new file mode 100644
index 00000000..476d619f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/_dtypes.py
@@ -0,0 +1,143 @@
+import numpy as np
+
+# Note: we use dtype objects instead of dtype classes. The spec does not
+# require any behavior on dtypes other than equality.
+int8 = np.dtype("int8")
+int16 = np.dtype("int16")
+int32 = np.dtype("int32")
+int64 = np.dtype("int64")
+uint8 = np.dtype("uint8")
+uint16 = np.dtype("uint16")
+uint32 = np.dtype("uint32")
+uint64 = np.dtype("uint64")
+float32 = np.dtype("float32")
+float64 = np.dtype("float64")
+# Note: This name is changed
+bool = np.dtype("bool")
+
+_all_dtypes = (
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+ bool,
+)
+_boolean_dtypes = (bool,)
+_floating_dtypes = (float32, float64)
+_integer_dtypes = (int8, int16, int32, int64, uint8, uint16, uint32, uint64)
+_integer_or_boolean_dtypes = (
+ bool,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+_numeric_dtypes = (
+ float32,
+ float64,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+
+_dtype_categories = {
+ "all": _all_dtypes,
+ "numeric": _numeric_dtypes,
+ "integer": _integer_dtypes,
+ "integer or boolean": _integer_or_boolean_dtypes,
+ "boolean": _boolean_dtypes,
+ "floating-point": _floating_dtypes,
+}
+
+
+# Note: the spec defines a restricted type promotion table compared to NumPy.
+# In particular, cross-kind promotions like integer + float or boolean +
+# integer are not allowed, even for functions that accept both kinds.
+# Additionally, NumPy promotes signed integer + uint64 to float64, but this
+# promotion is not allowed here. To be clear, Python scalar int objects are
+# allowed to promote to floating-point dtypes, but only in array operators
+# (see Array._promote_scalar) method in _array_object.py.
+_promotion_table = {
+ (int8, int8): int8,
+ (int8, int16): int16,
+ (int8, int32): int32,
+ (int8, int64): int64,
+ (int16, int8): int16,
+ (int16, int16): int16,
+ (int16, int32): int32,
+ (int16, int64): int64,
+ (int32, int8): int32,
+ (int32, int16): int32,
+ (int32, int32): int32,
+ (int32, int64): int64,
+ (int64, int8): int64,
+ (int64, int16): int64,
+ (int64, int32): int64,
+ (int64, int64): int64,
+ (uint8, uint8): uint8,
+ (uint8, uint16): uint16,
+ (uint8, uint32): uint32,
+ (uint8, uint64): uint64,
+ (uint16, uint8): uint16,
+ (uint16, uint16): uint16,
+ (uint16, uint32): uint32,
+ (uint16, uint64): uint64,
+ (uint32, uint8): uint32,
+ (uint32, uint16): uint32,
+ (uint32, uint32): uint32,
+ (uint32, uint64): uint64,
+ (uint64, uint8): uint64,
+ (uint64, uint16): uint64,
+ (uint64, uint32): uint64,
+ (uint64, uint64): uint64,
+ (int8, uint8): int16,
+ (int8, uint16): int32,
+ (int8, uint32): int64,
+ (int16, uint8): int16,
+ (int16, uint16): int32,
+ (int16, uint32): int64,
+ (int32, uint8): int32,
+ (int32, uint16): int32,
+ (int32, uint32): int64,
+ (int64, uint8): int64,
+ (int64, uint16): int64,
+ (int64, uint32): int64,
+ (uint8, int8): int16,
+ (uint16, int8): int32,
+ (uint32, int8): int64,
+ (uint8, int16): int16,
+ (uint16, int16): int32,
+ (uint32, int16): int64,
+ (uint8, int32): int32,
+ (uint16, int32): int32,
+ (uint32, int32): int64,
+ (uint8, int64): int64,
+ (uint16, int64): int64,
+ (uint32, int64): int64,
+ (float32, float32): float32,
+ (float32, float64): float64,
+ (float64, float32): float64,
+ (float64, float64): float64,
+ (bool, bool): bool,
+}
+
+
+def _result_type(type1, type2):
+ if (type1, type2) in _promotion_table:
+ return _promotion_table[type1, type2]
+ raise TypeError(f"{type1} and {type2} cannot be type promoted together")
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/_elementwise_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/_elementwise_functions.py
new file mode 100644
index 00000000..c758a094
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/_elementwise_functions.py
@@ -0,0 +1,729 @@
+from __future__ import annotations
+
+from ._dtypes import (
+ _boolean_dtypes,
+ _floating_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _numeric_dtypes,
+ _result_type,
+)
+from ._array_object import Array
+
+import numpy as np
+
+
+def abs(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.abs <numpy.abs>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in abs")
+ return Array._new(np.abs(x._array))
+
+
+# Note: the function name is different here
+def acos(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arccos <numpy.arccos>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in acos")
+ return Array._new(np.arccos(x._array))
+
+
+# Note: the function name is different here
+def acosh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arccosh <numpy.arccosh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in acosh")
+ return Array._new(np.arccosh(x._array))
+
+
+def add(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.add <numpy.add>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in add")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.add(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def asin(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arcsin <numpy.arcsin>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in asin")
+ return Array._new(np.arcsin(x._array))
+
+
+# Note: the function name is different here
+def asinh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arcsinh <numpy.arcsinh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in asinh")
+ return Array._new(np.arcsinh(x._array))
+
+
+# Note: the function name is different here
+def atan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctan <numpy.arctan>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atan")
+ return Array._new(np.arctan(x._array))
+
+
+# Note: the function name is different here
+def atan2(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctan2 <numpy.arctan2>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atan2")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.arctan2(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def atanh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctanh <numpy.arctanh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atanh")
+ return Array._new(np.arctanh(x._array))
+
+
+def bitwise_and(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_and <numpy.bitwise_and>`.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_and")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_and(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_left_shift(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.left_shift <numpy.left_shift>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
+ raise TypeError("Only integer dtypes are allowed in bitwise_left_shift")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ # Note: bitwise_left_shift is only defined for x2 nonnegative.
+ if np.any(x2._array < 0):
+ raise ValueError("bitwise_left_shift(x1, x2) is only defined for x2 >= 0")
+ return Array._new(np.left_shift(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_invert(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.invert <numpy.invert>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _integer_or_boolean_dtypes:
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_invert")
+ return Array._new(np.invert(x._array))
+
+
+def bitwise_or(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_or <numpy.bitwise_or>`.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_or")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_or(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_right_shift(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.right_shift <numpy.right_shift>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
+ raise TypeError("Only integer dtypes are allowed in bitwise_right_shift")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ # Note: bitwise_right_shift is only defined for x2 nonnegative.
+ if np.any(x2._array < 0):
+ raise ValueError("bitwise_right_shift(x1, x2) is only defined for x2 >= 0")
+ return Array._new(np.right_shift(x1._array, x2._array))
+
+
+def bitwise_xor(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_xor <numpy.bitwise_xor>`.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_xor")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_xor(x1._array, x2._array))
+
+
+def ceil(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ceil <numpy.ceil>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in ceil")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of ceil is the same as the input
+ return x
+ return Array._new(np.ceil(x._array))
+
+
+def cos(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.cos <numpy.cos>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in cos")
+ return Array._new(np.cos(x._array))
+
+
+def cosh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.cosh <numpy.cosh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in cosh")
+ return Array._new(np.cosh(x._array))
+
+
+def divide(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.divide <numpy.divide>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in divide")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.divide(x1._array, x2._array))
+
+
+def equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.equal <numpy.equal>`.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.equal(x1._array, x2._array))
+
+
+def exp(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.exp <numpy.exp>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in exp")
+ return Array._new(np.exp(x._array))
+
+
+def expm1(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.expm1 <numpy.expm1>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in expm1")
+ return Array._new(np.expm1(x._array))
+
+
+def floor(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.floor <numpy.floor>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in floor")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of floor is the same as the input
+ return x
+ return Array._new(np.floor(x._array))
+
+
+def floor_divide(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.floor_divide <numpy.floor_divide>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in floor_divide")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.floor_divide(x1._array, x2._array))
+
+
+def greater(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.greater <numpy.greater>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in greater")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.greater(x1._array, x2._array))
+
+
+def greater_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.greater_equal <numpy.greater_equal>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in greater_equal")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.greater_equal(x1._array, x2._array))
+
+
+def isfinite(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isfinite <numpy.isfinite>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isfinite")
+ return Array._new(np.isfinite(x._array))
+
+
+def isinf(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isinf <numpy.isinf>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isinf")
+ return Array._new(np.isinf(x._array))
+
+
+def isnan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isnan <numpy.isnan>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isnan")
+ return Array._new(np.isnan(x._array))
+
+
+def less(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.less <numpy.less>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in less")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.less(x1._array, x2._array))
+
+
+def less_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.less_equal <numpy.less_equal>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in less_equal")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.less_equal(x1._array, x2._array))
+
+
+def log(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log <numpy.log>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log")
+ return Array._new(np.log(x._array))
+
+
+def log1p(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log1p <numpy.log1p>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log1p")
+ return Array._new(np.log1p(x._array))
+
+
+def log2(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log2 <numpy.log2>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log2")
+ return Array._new(np.log2(x._array))
+
+
+def log10(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log10 <numpy.log10>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log10")
+ return Array._new(np.log10(x._array))
+
+
+def logaddexp(x1: Array, x2: Array) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logaddexp <numpy.logaddexp>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in logaddexp")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logaddexp(x1._array, x2._array))
+
+
+def logical_and(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_and <numpy.logical_and>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_and")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_and(x1._array, x2._array))
+
+
+def logical_not(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_not <numpy.logical_not>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_not")
+ return Array._new(np.logical_not(x._array))
+
+
+def logical_or(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_or <numpy.logical_or>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_or")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_or(x1._array, x2._array))
+
+
+def logical_xor(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_xor <numpy.logical_xor>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_xor")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_xor(x1._array, x2._array))
+
+
+def multiply(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.multiply <numpy.multiply>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in multiply")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.multiply(x1._array, x2._array))
+
+
+def negative(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.negative <numpy.negative>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in negative")
+ return Array._new(np.negative(x._array))
+
+
+def not_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.not_equal <numpy.not_equal>`.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.not_equal(x1._array, x2._array))
+
+
+def positive(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.positive <numpy.positive>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in positive")
+ return Array._new(np.positive(x._array))
+
+
+# Note: the function name is different here
+def pow(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.power <numpy.power>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in pow")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.power(x1._array, x2._array))
+
+
+def remainder(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.remainder <numpy.remainder>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in remainder")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.remainder(x1._array, x2._array))
+
+
+def round(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.round <numpy.round>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in round")
+ return Array._new(np.round(x._array))
+
+
+def sign(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sign <numpy.sign>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in sign")
+ return Array._new(np.sign(x._array))
+
+
+def sin(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sin <numpy.sin>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in sin")
+ return Array._new(np.sin(x._array))
+
+
+def sinh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sinh <numpy.sinh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in sinh")
+ return Array._new(np.sinh(x._array))
+
+
+def square(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.square <numpy.square>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in square")
+ return Array._new(np.square(x._array))
+
+
+def sqrt(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sqrt <numpy.sqrt>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in sqrt")
+ return Array._new(np.sqrt(x._array))
+
+
+def subtract(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.subtract <numpy.subtract>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in subtract")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.subtract(x1._array, x2._array))
+
+
+def tan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.tan <numpy.tan>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in tan")
+ return Array._new(np.tan(x._array))
+
+
+def tanh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.tanh <numpy.tanh>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in tanh")
+ return Array._new(np.tanh(x._array))
+
+
+def trunc(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.trunc <numpy.trunc>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in trunc")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of trunc is the same as the input
+ return x
+ return Array._new(np.trunc(x._array))
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/_manipulation_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/_manipulation_functions.py
new file mode 100644
index 00000000..7991f46a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/_manipulation_functions.py
@@ -0,0 +1,98 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._data_type_functions import result_type
+
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+
+# Note: the function name is different here
+def concat(
+ arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: Optional[int] = 0
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.concatenate <numpy.concatenate>`.
+
+ See its docstring for more information.
+ """
+ # Note: Casting rules here are different from the np.concatenate default
+ # (no for scalars with axis=None, no cross-kind casting)
+ dtype = result_type(*arrays)
+ arrays = tuple(a._array for a in arrays)
+ return Array._new(np.concatenate(arrays, axis=axis, dtype=dtype))
+
+
+def expand_dims(x: Array, /, *, axis: int) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.expand_dims <numpy.expand_dims>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.expand_dims(x._array, axis))
+
+
+def flip(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.flip <numpy.flip>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.flip(x._array, axis=axis))
+
+
+# Note: The function name is different here (see also matrix_transpose).
+# Unlike transpose(), the axes argument is required.
+def permute_dims(x: Array, /, axes: Tuple[int, ...]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.transpose <numpy.transpose>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.transpose(x._array, axes))
+
+
+# Note: the optional argument is called 'shape', not 'newshape'
+def reshape(x: Array, /, shape: Tuple[int, ...]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.reshape <numpy.reshape>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.reshape(x._array, shape))
+
+
+def roll(
+ x: Array,
+ /,
+ shift: Union[int, Tuple[int, ...]],
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.roll <numpy.roll>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.roll(x._array, shift, axis=axis))
+
+
+def squeeze(x: Array, /, axis: Union[int, Tuple[int, ...]]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.squeeze <numpy.squeeze>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.squeeze(x._array, axis=axis))
+
+
+def stack(arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: int = 0) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.stack <numpy.stack>`.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ result_type(*arrays)
+ arrays = tuple(a._array for a in arrays)
+ return Array._new(np.stack(arrays, axis=axis))
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/_searching_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/_searching_functions.py
new file mode 100644
index 00000000..40f5a4d2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/_searching_functions.py
@@ -0,0 +1,47 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import _result_type
+
+from typing import Optional, Tuple
+
+import numpy as np
+
+
+def argmax(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.argmax <numpy.argmax>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.asarray(np.argmax(x._array, axis=axis, keepdims=keepdims)))
+
+
+def argmin(x: Array, /, *, axis: Optional[int] = None, keepdims: bool = False) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.argmin <numpy.argmin>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.asarray(np.argmin(x._array, axis=axis, keepdims=keepdims)))
+
+
+def nonzero(x: Array, /) -> Tuple[Array, ...]:
+ """
+ Array API compatible wrapper for :py:func:`np.nonzero <numpy.nonzero>`.
+
+ See its docstring for more information.
+ """
+ return tuple(Array._new(i) for i in np.nonzero(x._array))
+
+
+def where(condition: Array, x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.where <numpy.where>`.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.where(condition._array, x1._array, x2._array))
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/_set_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/_set_functions.py
new file mode 100644
index 00000000..0b4132cf
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/_set_functions.py
@@ -0,0 +1,106 @@
+from __future__ import annotations
+
+from ._array_object import Array
+
+from typing import NamedTuple
+
+import numpy as np
+
+# Note: np.unique() is split into four functions in the array API:
+# unique_all, unique_counts, unique_inverse, and unique_values (this is done
+# to remove polymorphic return types).
+
+# Note: The various unique() functions are supposed to return multiple NaNs.
+# This does not match the NumPy behavior, however, this is currently left as a
+# TODO in this implementation as this behavior may be reverted in np.unique().
+# See https://github.com/numpy/numpy/issues/20326.
+
+# Note: The functions here return a namedtuple (np.unique() returns a normal
+# tuple).
+
+class UniqueAllResult(NamedTuple):
+ values: Array
+ indices: Array
+ inverse_indices: Array
+ counts: Array
+
+
+class UniqueCountsResult(NamedTuple):
+ values: Array
+ counts: Array
+
+
+class UniqueInverseResult(NamedTuple):
+ values: Array
+ inverse_indices: Array
+
+
+def unique_all(x: Array, /) -> UniqueAllResult:
+ """
+ Array API compatible wrapper for :py:func:`np.unique <numpy.unique>`.
+
+ See its docstring for more information.
+ """
+ values, indices, inverse_indices, counts = np.unique(
+ x._array,
+ return_counts=True,
+ return_index=True,
+ return_inverse=True,
+ equal_nan=False,
+ )
+ # np.unique() flattens inverse indices, but they need to share x's shape
+ # See https://github.com/numpy/numpy/issues/20638
+ inverse_indices = inverse_indices.reshape(x.shape)
+ return UniqueAllResult(
+ Array._new(values),
+ Array._new(indices),
+ Array._new(inverse_indices),
+ Array._new(counts),
+ )
+
+
+def unique_counts(x: Array, /) -> UniqueCountsResult:
+ res = np.unique(
+ x._array,
+ return_counts=True,
+ return_index=False,
+ return_inverse=False,
+ equal_nan=False,
+ )
+
+ return UniqueCountsResult(*[Array._new(i) for i in res])
+
+
+def unique_inverse(x: Array, /) -> UniqueInverseResult:
+ """
+ Array API compatible wrapper for :py:func:`np.unique <numpy.unique>`.
+
+ See its docstring for more information.
+ """
+ values, inverse_indices = np.unique(
+ x._array,
+ return_counts=False,
+ return_index=False,
+ return_inverse=True,
+ equal_nan=False,
+ )
+ # np.unique() flattens inverse indices, but they need to share x's shape
+ # See https://github.com/numpy/numpy/issues/20638
+ inverse_indices = inverse_indices.reshape(x.shape)
+ return UniqueInverseResult(Array._new(values), Array._new(inverse_indices))
+
+
+def unique_values(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.unique <numpy.unique>`.
+
+ See its docstring for more information.
+ """
+ res = np.unique(
+ x._array,
+ return_counts=False,
+ return_index=False,
+ return_inverse=False,
+ equal_nan=False,
+ )
+ return Array._new(res)
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/_sorting_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/_sorting_functions.py
new file mode 100644
index 00000000..afbb412f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/_sorting_functions.py
@@ -0,0 +1,49 @@
+from __future__ import annotations
+
+from ._array_object import Array
+
+import numpy as np
+
+
+# Note: the descending keyword argument is new in this function
+def argsort(
+ x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.argsort <numpy.argsort>`.
+
+ See its docstring for more information.
+ """
+ # Note: this keyword argument is different, and the default is different.
+ kind = "stable" if stable else "quicksort"
+ if not descending:
+ res = np.argsort(x._array, axis=axis, kind=kind)
+ else:
+ # As NumPy has no native descending sort, we imitate it here. Note that
+ # simply flipping the results of np.argsort(x._array, ...) would not
+ # respect the relative order like it would in native descending sorts.
+ res = np.flip(
+ np.argsort(np.flip(x._array, axis=axis), axis=axis, kind=kind),
+ axis=axis,
+ )
+ # Rely on flip()/argsort() to validate axis
+ normalised_axis = axis if axis >= 0 else x.ndim + axis
+ max_i = x.shape[normalised_axis] - 1
+ res = max_i - res
+ return Array._new(res)
+
+# Note: the descending keyword argument is new in this function
+def sort(
+ x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sort <numpy.sort>`.
+
+ See its docstring for more information.
+ """
+ # Note: this keyword argument is different, and the default is different.
+ kind = "stable" if stable else "quicksort"
+ res = np.sort(x._array, axis=axis, kind=kind)
+ if descending:
+ res = np.flip(res, axis=axis)
+ return Array._new(res)
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/_statistical_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/_statistical_functions.py
new file mode 100644
index 00000000..5bc831ac
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/_statistical_functions.py
@@ -0,0 +1,115 @@
+from __future__ import annotations
+
+from ._dtypes import (
+ _floating_dtypes,
+ _numeric_dtypes,
+)
+from ._array_object import Array
+from ._creation_functions import asarray
+from ._dtypes import float32, float64
+
+from typing import TYPE_CHECKING, Optional, Tuple, Union
+
+if TYPE_CHECKING:
+ from ._typing import Dtype
+
+import numpy as np
+
+
+def max(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in max")
+ return Array._new(np.max(x._array, axis=axis, keepdims=keepdims))
+
+
+def mean(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in mean")
+ return Array._new(np.mean(x._array, axis=axis, keepdims=keepdims))
+
+
+def min(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in min")
+ return Array._new(np.min(x._array, axis=axis, keepdims=keepdims))
+
+
+def prod(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ dtype: Optional[Dtype] = None,
+ keepdims: bool = False,
+) -> Array:
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in prod")
+ # Note: sum() and prod() always upcast float32 to float64 for dtype=None
+ # We need to do so here before computing the product to avoid overflow
+ if dtype is None and x.dtype == float32:
+ dtype = float64
+ return Array._new(np.prod(x._array, dtype=dtype, axis=axis, keepdims=keepdims))
+
+
+def std(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ correction: Union[int, float] = 0.0,
+ keepdims: bool = False,
+) -> Array:
+ # Note: the keyword argument correction is different here
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in std")
+ return Array._new(np.std(x._array, axis=axis, ddof=correction, keepdims=keepdims))
+
+
+def sum(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ dtype: Optional[Dtype] = None,
+ keepdims: bool = False,
+) -> Array:
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in sum")
+ # Note: sum() and prod() always upcast integers to (u)int64 and float32 to
+ # float64 for dtype=None. `np.sum` does that too for integers, but not for
+ # float32, so we need to special-case it here
+ if dtype is None and x.dtype == float32:
+ dtype = float64
+ return Array._new(np.sum(x._array, axis=axis, dtype=dtype, keepdims=keepdims))
+
+
+def var(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ correction: Union[int, float] = 0.0,
+ keepdims: bool = False,
+) -> Array:
+ # Note: the keyword argument correction is different here
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in var")
+ return Array._new(np.var(x._array, axis=axis, ddof=correction, keepdims=keepdims))
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/_typing.py b/venv/lib/python3.9/site-packages/numpy/array_api/_typing.py
new file mode 100644
index 00000000..dfa87b35
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/_typing.py
@@ -0,0 +1,74 @@
+"""
+This file defines the types for type annotations.
+
+These names aren't part of the module namespace, but they are used in the
+annotations in the function signatures. The functions in the module are only
+valid for inputs that match the given type annotations.
+"""
+
+from __future__ import annotations
+
+__all__ = [
+ "Array",
+ "Device",
+ "Dtype",
+ "SupportsDLPack",
+ "SupportsBufferProtocol",
+ "PyCapsule",
+]
+
+import sys
+from typing import (
+ Any,
+ Literal,
+ Sequence,
+ Type,
+ Union,
+ TYPE_CHECKING,
+ TypeVar,
+ Protocol,
+)
+
+from ._array_object import Array
+from numpy import (
+ dtype,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+)
+
+_T_co = TypeVar("_T_co", covariant=True)
+
+class NestedSequence(Protocol[_T_co]):
+ def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ...
+ def __len__(self, /) -> int: ...
+
+Device = Literal["cpu"]
+if TYPE_CHECKING or sys.version_info >= (3, 9):
+ Dtype = dtype[Union[
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+ ]]
+else:
+ Dtype = dtype
+
+SupportsBufferProtocol = Any
+PyCapsule = Any
+
+class SupportsDLPack(Protocol):
+ def __dlpack__(self, /, *, stream: None = ...) -> PyCapsule: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/_utility_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/_utility_functions.py
new file mode 100644
index 00000000..5ecb4bd9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/_utility_functions.py
@@ -0,0 +1,37 @@
+from __future__ import annotations
+
+from ._array_object import Array
+
+from typing import Optional, Tuple, Union
+
+import numpy as np
+
+
+def all(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.all <numpy.all>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.asarray(np.all(x._array, axis=axis, keepdims=keepdims)))
+
+
+def any(
+ x: Array,
+ /,
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+ keepdims: bool = False,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.any <numpy.any>`.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.asarray(np.any(x._array, axis=axis, keepdims=keepdims)))
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/linalg.py b/venv/lib/python3.9/site-packages/numpy/array_api/linalg.py
new file mode 100644
index 00000000..d214046e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/linalg.py
@@ -0,0 +1,446 @@
+from __future__ import annotations
+
+from ._dtypes import _floating_dtypes, _numeric_dtypes
+from ._manipulation_functions import reshape
+from ._array_object import Array
+
+from ..core.numeric import normalize_axis_tuple
+
+from typing import TYPE_CHECKING
+if TYPE_CHECKING:
+ from ._typing import Literal, Optional, Sequence, Tuple, Union
+
+from typing import NamedTuple
+
+import numpy.linalg
+import numpy as np
+
+class EighResult(NamedTuple):
+ eigenvalues: Array
+ eigenvectors: Array
+
+class QRResult(NamedTuple):
+ Q: Array
+ R: Array
+
+class SlogdetResult(NamedTuple):
+ sign: Array
+ logabsdet: Array
+
+class SVDResult(NamedTuple):
+ U: Array
+ S: Array
+ Vh: Array
+
+# Note: the inclusion of the upper keyword is different from
+# np.linalg.cholesky, which does not have it.
+def cholesky(x: Array, /, *, upper: bool = False) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.linalg.cholesky <numpy.linalg.cholesky>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to floating-point dtypes only is different from
+ # np.linalg.cholesky.
+ if x.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed in cholesky')
+ L = np.linalg.cholesky(x._array)
+ if upper:
+ return Array._new(L).mT
+ return Array._new(L)
+
+# Note: cross is the numpy top-level namespace, not np.linalg
+def cross(x1: Array, x2: Array, /, *, axis: int = -1) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.cross <numpy.cross>`.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError('Only numeric dtypes are allowed in cross')
+ # Note: this is different from np.cross(), which broadcasts
+ if x1.shape != x2.shape:
+ raise ValueError('x1 and x2 must have the same shape')
+ if x1.ndim == 0:
+ raise ValueError('cross() requires arrays of dimension at least 1')
+ # Note: this is different from np.cross(), which allows dimension 2
+ if x1.shape[axis] != 3:
+ raise ValueError('cross() dimension must equal 3')
+ return Array._new(np.cross(x1._array, x2._array, axis=axis))
+
+def det(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.linalg.det <numpy.linalg.det>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to floating-point dtypes only is different from
+ # np.linalg.det.
+ if x.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed in det')
+ return Array._new(np.linalg.det(x._array))
+
+# Note: diagonal is the numpy top-level namespace, not np.linalg
+def diagonal(x: Array, /, *, offset: int = 0) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.diagonal <numpy.diagonal>`.
+
+ See its docstring for more information.
+ """
+ # Note: diagonal always operates on the last two axes, whereas np.diagonal
+ # operates on the first two axes by default
+ return Array._new(np.diagonal(x._array, offset=offset, axis1=-2, axis2=-1))
+
+
+def eigh(x: Array, /) -> EighResult:
+ """
+ Array API compatible wrapper for :py:func:`np.linalg.eigh <numpy.linalg.eigh>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to floating-point dtypes only is different from
+ # np.linalg.eigh.
+ if x.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed in eigh')
+
+ # Note: the return type here is a namedtuple, which is different from
+ # np.eigh, which only returns a tuple.
+ return EighResult(*map(Array._new, np.linalg.eigh(x._array)))
+
+
+def eigvalsh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.linalg.eigvalsh <numpy.linalg.eigvalsh>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to floating-point dtypes only is different from
+ # np.linalg.eigvalsh.
+ if x.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed in eigvalsh')
+
+ return Array._new(np.linalg.eigvalsh(x._array))
+
+def inv(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.linalg.inv <numpy.linalg.inv>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to floating-point dtypes only is different from
+ # np.linalg.inv.
+ if x.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed in inv')
+
+ return Array._new(np.linalg.inv(x._array))
+
+
+# Note: matmul is the numpy top-level namespace but not in np.linalg
+def matmul(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.matmul <numpy.matmul>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to numeric dtypes only is different from
+ # np.matmul.
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError('Only numeric dtypes are allowed in matmul')
+
+ return Array._new(np.matmul(x1._array, x2._array))
+
+
+# Note: the name here is different from norm(). The array API norm is split
+# into matrix_norm and vector_norm().
+
+# The type for ord should be Optional[Union[int, float, Literal[np.inf,
+# -np.inf, 'fro', 'nuc']]], but Literal does not support floating-point
+# literals.
+def matrix_norm(x: Array, /, *, keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.linalg.norm <numpy.linalg.norm>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to floating-point dtypes only is different from
+ # np.linalg.norm.
+ if x.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed in matrix_norm')
+
+ return Array._new(np.linalg.norm(x._array, axis=(-2, -1), keepdims=keepdims, ord=ord))
+
+
+def matrix_power(x: Array, n: int, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.matrix_power <numpy.matrix_power>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to floating-point dtypes only is different from
+ # np.linalg.matrix_power.
+ if x.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed for the first argument of matrix_power')
+
+ # np.matrix_power already checks if n is an integer
+ return Array._new(np.linalg.matrix_power(x._array, n))
+
+# Note: the keyword argument name rtol is different from np.linalg.matrix_rank
+def matrix_rank(x: Array, /, *, rtol: Optional[Union[float, Array]] = None) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.matrix_rank <numpy.matrix_rank>`.
+
+ See its docstring for more information.
+ """
+ # Note: this is different from np.linalg.matrix_rank, which supports 1
+ # dimensional arrays.
+ if x.ndim < 2:
+ raise np.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional")
+ S = np.linalg.svd(x._array, compute_uv=False)
+ if rtol is None:
+ tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * np.finfo(S.dtype).eps
+ else:
+ if isinstance(rtol, Array):
+ rtol = rtol._array
+ # Note: this is different from np.linalg.matrix_rank, which does not multiply
+ # the tolerance by the largest singular value.
+ tol = S.max(axis=-1, keepdims=True)*np.asarray(rtol)[..., np.newaxis]
+ return Array._new(np.count_nonzero(S > tol, axis=-1))
+
+
+# Note: this function is new in the array API spec. Unlike transpose, it only
+# transposes the last two axes.
+def matrix_transpose(x: Array, /) -> Array:
+ if x.ndim < 2:
+ raise ValueError("x must be at least 2-dimensional for matrix_transpose")
+ return Array._new(np.swapaxes(x._array, -1, -2))
+
+# Note: outer is the numpy top-level namespace, not np.linalg
+def outer(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.outer <numpy.outer>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to numeric dtypes only is different from
+ # np.outer.
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError('Only numeric dtypes are allowed in outer')
+
+ # Note: the restriction to only 1-dim arrays is different from np.outer
+ if x1.ndim != 1 or x2.ndim != 1:
+ raise ValueError('The input arrays to outer must be 1-dimensional')
+
+ return Array._new(np.outer(x1._array, x2._array))
+
+# Note: the keyword argument name rtol is different from np.linalg.pinv
+def pinv(x: Array, /, *, rtol: Optional[Union[float, Array]] = None) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.linalg.pinv <numpy.linalg.pinv>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to floating-point dtypes only is different from
+ # np.linalg.pinv.
+ if x.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed in pinv')
+
+ # Note: this is different from np.linalg.pinv, which does not multiply the
+ # default tolerance by max(M, N).
+ if rtol is None:
+ rtol = max(x.shape[-2:]) * np.finfo(x.dtype).eps
+ return Array._new(np.linalg.pinv(x._array, rcond=rtol))
+
+def qr(x: Array, /, *, mode: Literal['reduced', 'complete'] = 'reduced') -> QRResult:
+ """
+ Array API compatible wrapper for :py:func:`np.linalg.qr <numpy.linalg.qr>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to floating-point dtypes only is different from
+ # np.linalg.qr.
+ if x.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed in qr')
+
+ # Note: the return type here is a namedtuple, which is different from
+ # np.linalg.qr, which only returns a tuple.
+ return QRResult(*map(Array._new, np.linalg.qr(x._array, mode=mode)))
+
+def slogdet(x: Array, /) -> SlogdetResult:
+ """
+ Array API compatible wrapper for :py:func:`np.linalg.slogdet <numpy.linalg.slogdet>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to floating-point dtypes only is different from
+ # np.linalg.slogdet.
+ if x.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed in slogdet')
+
+ # Note: the return type here is a namedtuple, which is different from
+ # np.linalg.slogdet, which only returns a tuple.
+ return SlogdetResult(*map(Array._new, np.linalg.slogdet(x._array)))
+
+# Note: unlike np.linalg.solve, the array API solve() only accepts x2 as a
+# vector when it is exactly 1-dimensional. All other cases treat x2 as a stack
+# of matrices. The np.linalg.solve behavior of allowing stacks of both
+# matrices and vectors is ambiguous c.f.
+# https://github.com/numpy/numpy/issues/15349 and
+# https://github.com/data-apis/array-api/issues/285.
+
+# To workaround this, the below is the code from np.linalg.solve except
+# only calling solve1 in the exactly 1D case.
+def _solve(a, b):
+ from ..linalg.linalg import (_makearray, _assert_stacked_2d,
+ _assert_stacked_square, _commonType,
+ isComplexType, get_linalg_error_extobj,
+ _raise_linalgerror_singular)
+ from ..linalg import _umath_linalg
+
+ a, _ = _makearray(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ b, wrap = _makearray(b)
+ t, result_t = _commonType(a, b)
+
+ # This part is different from np.linalg.solve
+ if b.ndim == 1:
+ gufunc = _umath_linalg.solve1
+ else:
+ gufunc = _umath_linalg.solve
+
+ # This does nothing currently but is left in because it will be relevant
+ # when complex dtype support is added to the spec in 2022.
+ signature = 'DD->D' if isComplexType(t) else 'dd->d'
+ extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
+ r = gufunc(a, b, signature=signature, extobj=extobj)
+
+ return wrap(r.astype(result_t, copy=False))
+
+def solve(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.linalg.solve <numpy.linalg.solve>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to floating-point dtypes only is different from
+ # np.linalg.solve.
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed in solve')
+
+ return Array._new(_solve(x1._array, x2._array))
+
+def svd(x: Array, /, *, full_matrices: bool = True) -> SVDResult:
+ """
+ Array API compatible wrapper for :py:func:`np.linalg.svd <numpy.linalg.svd>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to floating-point dtypes only is different from
+ # np.linalg.svd.
+ if x.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed in svd')
+
+ # Note: the return type here is a namedtuple, which is different from
+ # np.svd, which only returns a tuple.
+ return SVDResult(*map(Array._new, np.linalg.svd(x._array, full_matrices=full_matrices)))
+
+# Note: svdvals is not in NumPy (but it is in SciPy). It is equivalent to
+# np.linalg.svd(compute_uv=False).
+def svdvals(x: Array, /) -> Union[Array, Tuple[Array, ...]]:
+ if x.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed in svdvals')
+ return Array._new(np.linalg.svd(x._array, compute_uv=False))
+
+# Note: tensordot is the numpy top-level namespace but not in np.linalg
+
+# Note: axes must be a tuple, unlike np.tensordot where it can be an array or array-like.
+def tensordot(x1: Array, x2: Array, /, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2) -> Array:
+ # Note: the restriction to numeric dtypes only is different from
+ # np.tensordot.
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError('Only numeric dtypes are allowed in tensordot')
+
+ return Array._new(np.tensordot(x1._array, x2._array, axes=axes))
+
+# Note: trace is the numpy top-level namespace, not np.linalg
+def trace(x: Array, /, *, offset: int = 0) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.trace <numpy.trace>`.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError('Only numeric dtypes are allowed in trace')
+ # Note: trace always operates on the last two axes, whereas np.trace
+ # operates on the first two axes by default
+ return Array._new(np.asarray(np.trace(x._array, offset=offset, axis1=-2, axis2=-1)))
+
+# Note: vecdot is not in NumPy
+def vecdot(x1: Array, x2: Array, /, *, axis: int = -1) -> Array:
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError('Only numeric dtypes are allowed in vecdot')
+ ndim = max(x1.ndim, x2.ndim)
+ x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape)
+ x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape)
+ if x1_shape[axis] != x2_shape[axis]:
+ raise ValueError("x1 and x2 must have the same size along the given axis")
+
+ x1_, x2_ = np.broadcast_arrays(x1._array, x2._array)
+ x1_ = np.moveaxis(x1_, axis, -1)
+ x2_ = np.moveaxis(x2_, axis, -1)
+
+ res = x1_[..., None, :] @ x2_[..., None]
+ return Array._new(res[..., 0, 0])
+
+
+# Note: the name here is different from norm(). The array API norm is split
+# into matrix_norm and vector_norm().
+
+# The type for ord should be Optional[Union[int, float, Literal[np.inf,
+# -np.inf]]] but Literal does not support floating-point literals.
+def vector_norm(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ord: Optional[Union[int, float]] = 2) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.linalg.norm <numpy.linalg.norm>`.
+
+ See its docstring for more information.
+ """
+ # Note: the restriction to floating-point dtypes only is different from
+ # np.linalg.norm.
+ if x.dtype not in _floating_dtypes:
+ raise TypeError('Only floating-point dtypes are allowed in norm')
+
+ # np.linalg.norm tries to do a matrix norm whenever axis is a 2-tuple or
+ # when axis=None and the input is 2-D, so to force a vector norm, we make
+ # it so the input is 1-D (for axis=None), or reshape so that norm is done
+ # on a single dimension.
+ a = x._array
+ if axis is None:
+ # Note: np.linalg.norm() doesn't handle 0-D arrays
+ a = a.ravel()
+ _axis = 0
+ elif isinstance(axis, tuple):
+ # Note: The axis argument supports any number of axes, whereas
+ # np.linalg.norm() only supports a single axis for vector norm.
+ normalized_axis = normalize_axis_tuple(axis, x.ndim)
+ rest = tuple(i for i in range(a.ndim) if i not in normalized_axis)
+ newshape = axis + rest
+ a = np.transpose(a, newshape).reshape(
+ (np.prod([a.shape[i] for i in axis], dtype=int), *[a.shape[i] for i in rest]))
+ _axis = 0
+ else:
+ _axis = axis
+
+ res = Array._new(np.linalg.norm(a, axis=_axis, ord=ord))
+
+ if keepdims:
+ # We can't reuse np.linalg.norm(keepdims) because of the reshape hacks
+ # above to avoid matrix norm logic.
+ shape = list(x.shape)
+ _axis = normalize_axis_tuple(range(x.ndim) if axis is None else axis, x.ndim)
+ for i in _axis:
+ shape[i] = 1
+ res = reshape(res, tuple(shape))
+
+ return res
+
+__all__ = ['cholesky', 'cross', 'det', 'diagonal', 'eigh', 'eigvalsh', 'inv', 'matmul', 'matrix_norm', 'matrix_power', 'matrix_rank', 'matrix_transpose', 'outer', 'pinv', 'qr', 'slogdet', 'solve', 'svd', 'svdvals', 'tensordot', 'trace', 'vecdot', 'vector_norm']
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/setup.py b/venv/lib/python3.9/site-packages/numpy/array_api/setup.py
new file mode 100644
index 00000000..c8bc2910
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/setup.py
@@ -0,0 +1,12 @@
+def configuration(parent_package="", top_path=None):
+ from numpy.distutils.misc_util import Configuration
+
+ config = Configuration("array_api", parent_package, top_path)
+ config.add_subpackage("tests")
+ return config
+
+
+if __name__ == "__main__":
+ from numpy.distutils.core import setup
+
+ setup(configuration=configuration)
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/array_api/tests/__init__.py
new file mode 100644
index 00000000..536062e3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/tests/__init__.py
@@ -0,0 +1,7 @@
+"""
+Tests for the array API namespace.
+
+Note, full compliance with the array API can be tested with the official array API test
+suite https://github.com/data-apis/array-api-tests. This test suite primarily
+focuses on those things that are not tested by the official test suite.
+"""
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_array_object.py b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_array_object.py
new file mode 100644
index 00000000..f6efacef
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_array_object.py
@@ -0,0 +1,375 @@
+import operator
+
+from numpy.testing import assert_raises
+import numpy as np
+import pytest
+
+from .. import ones, asarray, reshape, result_type, all, equal
+from .._array_object import Array
+from .._dtypes import (
+ _all_dtypes,
+ _boolean_dtypes,
+ _floating_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _numeric_dtypes,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint64,
+ bool as bool_,
+)
+
+
+def test_validate_index():
+ # The indexing tests in the official array API test suite test that the
+ # array object correctly handles the subset of indices that are required
+ # by the spec. But the NumPy array API implementation specifically
+ # disallows any index not required by the spec, via Array._validate_index.
+ # This test focuses on testing that non-valid indices are correctly
+ # rejected. See
+ # https://data-apis.org/array-api/latest/API_specification/indexing.html
+ # and the docstring of Array._validate_index for the exact indexing
+ # behavior that should be allowed. This does not test indices that are
+ # already invalid in NumPy itself because Array will generally just pass
+ # such indices directly to the underlying np.ndarray.
+
+ a = ones((3, 4))
+
+ # Out of bounds slices are not allowed
+ assert_raises(IndexError, lambda: a[:4])
+ assert_raises(IndexError, lambda: a[:-4])
+ assert_raises(IndexError, lambda: a[:3:-1])
+ assert_raises(IndexError, lambda: a[:-5:-1])
+ assert_raises(IndexError, lambda: a[4:])
+ assert_raises(IndexError, lambda: a[-4:])
+ assert_raises(IndexError, lambda: a[4::-1])
+ assert_raises(IndexError, lambda: a[-4::-1])
+
+ assert_raises(IndexError, lambda: a[...,:5])
+ assert_raises(IndexError, lambda: a[...,:-5])
+ assert_raises(IndexError, lambda: a[...,:5:-1])
+ assert_raises(IndexError, lambda: a[...,:-6:-1])
+ assert_raises(IndexError, lambda: a[...,5:])
+ assert_raises(IndexError, lambda: a[...,-5:])
+ assert_raises(IndexError, lambda: a[...,5::-1])
+ assert_raises(IndexError, lambda: a[...,-5::-1])
+
+ # Boolean indices cannot be part of a larger tuple index
+ assert_raises(IndexError, lambda: a[a[:,0]==1,0])
+ assert_raises(IndexError, lambda: a[a[:,0]==1,...])
+ assert_raises(IndexError, lambda: a[..., a[0]==1])
+ assert_raises(IndexError, lambda: a[[True, True, True]])
+ assert_raises(IndexError, lambda: a[(True, True, True),])
+
+ # Integer array indices are not allowed (except for 0-D)
+ idx = asarray([[0, 1]])
+ assert_raises(IndexError, lambda: a[idx])
+ assert_raises(IndexError, lambda: a[idx,])
+ assert_raises(IndexError, lambda: a[[0, 1]])
+ assert_raises(IndexError, lambda: a[(0, 1), (0, 1)])
+ assert_raises(IndexError, lambda: a[[0, 1]])
+ assert_raises(IndexError, lambda: a[np.array([[0, 1]])])
+
+ # Multiaxis indices must contain exactly as many indices as dimensions
+ assert_raises(IndexError, lambda: a[()])
+ assert_raises(IndexError, lambda: a[0,])
+ assert_raises(IndexError, lambda: a[0])
+ assert_raises(IndexError, lambda: a[:])
+
+def test_operators():
+ # For every operator, we test that it works for the required type
+ # combinations and raises TypeError otherwise
+ binary_op_dtypes = {
+ "__add__": "numeric",
+ "__and__": "integer_or_boolean",
+ "__eq__": "all",
+ "__floordiv__": "numeric",
+ "__ge__": "numeric",
+ "__gt__": "numeric",
+ "__le__": "numeric",
+ "__lshift__": "integer",
+ "__lt__": "numeric",
+ "__mod__": "numeric",
+ "__mul__": "numeric",
+ "__ne__": "all",
+ "__or__": "integer_or_boolean",
+ "__pow__": "numeric",
+ "__rshift__": "integer",
+ "__sub__": "numeric",
+ "__truediv__": "floating",
+ "__xor__": "integer_or_boolean",
+ }
+
+ # Recompute each time because of in-place ops
+ def _array_vals():
+ for d in _integer_dtypes:
+ yield asarray(1, dtype=d)
+ for d in _boolean_dtypes:
+ yield asarray(False, dtype=d)
+ for d in _floating_dtypes:
+ yield asarray(1.0, dtype=d)
+
+ for op, dtypes in binary_op_dtypes.items():
+ ops = [op]
+ if op not in ["__eq__", "__ne__", "__le__", "__ge__", "__lt__", "__gt__"]:
+ rop = "__r" + op[2:]
+ iop = "__i" + op[2:]
+ ops += [rop, iop]
+ for s in [1, 1.0, False]:
+ for _op in ops:
+ for a in _array_vals():
+ # Test array op scalar. From the spec, the following combinations
+ # are supported:
+
+ # - Python bool for a bool array dtype,
+ # - a Python int within the bounds of the given dtype for integer array dtypes,
+ # - a Python int or float for floating-point array dtypes
+
+ # We do not do bounds checking for int scalars, but rather use the default
+ # NumPy behavior for casting in that case.
+
+ if ((dtypes == "all"
+ or dtypes == "numeric" and a.dtype in _numeric_dtypes
+ or dtypes == "integer" and a.dtype in _integer_dtypes
+ or dtypes == "integer_or_boolean" and a.dtype in _integer_or_boolean_dtypes
+ or dtypes == "boolean" and a.dtype in _boolean_dtypes
+ or dtypes == "floating" and a.dtype in _floating_dtypes
+ )
+ # bool is a subtype of int, which is why we avoid
+ # isinstance here.
+ and (a.dtype in _boolean_dtypes and type(s) == bool
+ or a.dtype in _integer_dtypes and type(s) == int
+ or a.dtype in _floating_dtypes and type(s) in [float, int]
+ )):
+ # Only test for no error
+ getattr(a, _op)(s)
+ else:
+ assert_raises(TypeError, lambda: getattr(a, _op)(s))
+
+ # Test array op array.
+ for _op in ops:
+ for x in _array_vals():
+ for y in _array_vals():
+ # See the promotion table in NEP 47 or the array
+ # API spec page on type promotion. Mixed kind
+ # promotion is not defined.
+ if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
+ or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
+ or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
+ or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
+ or x.dtype in _boolean_dtypes and y.dtype not in _boolean_dtypes
+ or y.dtype in _boolean_dtypes and x.dtype not in _boolean_dtypes
+ or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
+ or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
+ ):
+ assert_raises(TypeError, lambda: getattr(x, _op)(y))
+ # Ensure in-place operators only promote to the same dtype as the left operand.
+ elif (
+ _op.startswith("__i")
+ and result_type(x.dtype, y.dtype) != x.dtype
+ ):
+ assert_raises(TypeError, lambda: getattr(x, _op)(y))
+ # Ensure only those dtypes that are required for every operator are allowed.
+ elif (dtypes == "all" and (x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
+ or x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
+ or (dtypes == "numeric" and x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
+ or dtypes == "integer" and x.dtype in _integer_dtypes and y.dtype in _numeric_dtypes
+ or dtypes == "integer_or_boolean" and (x.dtype in _integer_dtypes and y.dtype in _integer_dtypes
+ or x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes)
+ or dtypes == "boolean" and x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
+ or dtypes == "floating" and x.dtype in _floating_dtypes and y.dtype in _floating_dtypes
+ ):
+ getattr(x, _op)(y)
+ else:
+ assert_raises(TypeError, lambda: getattr(x, _op)(y))
+
+ unary_op_dtypes = {
+ "__abs__": "numeric",
+ "__invert__": "integer_or_boolean",
+ "__neg__": "numeric",
+ "__pos__": "numeric",
+ }
+ for op, dtypes in unary_op_dtypes.items():
+ for a in _array_vals():
+ if (
+ dtypes == "numeric"
+ and a.dtype in _numeric_dtypes
+ or dtypes == "integer_or_boolean"
+ and a.dtype in _integer_or_boolean_dtypes
+ ):
+ # Only test for no error
+ getattr(a, op)()
+ else:
+ assert_raises(TypeError, lambda: getattr(a, op)())
+
+ # Finally, matmul() must be tested separately, because it works a bit
+ # different from the other operations.
+ def _matmul_array_vals():
+ for a in _array_vals():
+ yield a
+ for d in _all_dtypes:
+ yield ones((3, 4), dtype=d)
+ yield ones((4, 2), dtype=d)
+ yield ones((4, 4), dtype=d)
+
+ # Scalars always error
+ for _op in ["__matmul__", "__rmatmul__", "__imatmul__"]:
+ for s in [1, 1.0, False]:
+ for a in _matmul_array_vals():
+ if (type(s) in [float, int] and a.dtype in _floating_dtypes
+ or type(s) == int and a.dtype in _integer_dtypes):
+ # Type promotion is valid, but @ is not allowed on 0-D
+ # inputs, so the error is a ValueError
+ assert_raises(ValueError, lambda: getattr(a, _op)(s))
+ else:
+ assert_raises(TypeError, lambda: getattr(a, _op)(s))
+
+ for x in _matmul_array_vals():
+ for y in _matmul_array_vals():
+ if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
+ or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
+ or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
+ or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
+ or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
+ or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
+ or x.dtype in _boolean_dtypes
+ or y.dtype in _boolean_dtypes
+ ):
+ assert_raises(TypeError, lambda: x.__matmul__(y))
+ assert_raises(TypeError, lambda: y.__rmatmul__(x))
+ assert_raises(TypeError, lambda: x.__imatmul__(y))
+ elif x.shape == () or y.shape == () or x.shape[1] != y.shape[0]:
+ assert_raises(ValueError, lambda: x.__matmul__(y))
+ assert_raises(ValueError, lambda: y.__rmatmul__(x))
+ if result_type(x.dtype, y.dtype) != x.dtype:
+ assert_raises(TypeError, lambda: x.__imatmul__(y))
+ else:
+ assert_raises(ValueError, lambda: x.__imatmul__(y))
+ else:
+ x.__matmul__(y)
+ y.__rmatmul__(x)
+ if result_type(x.dtype, y.dtype) != x.dtype:
+ assert_raises(TypeError, lambda: x.__imatmul__(y))
+ elif y.shape[0] != y.shape[1]:
+ # This one fails because x @ y has a different shape from x
+ assert_raises(ValueError, lambda: x.__imatmul__(y))
+ else:
+ x.__imatmul__(y)
+
+
+def test_python_scalar_construtors():
+ b = asarray(False)
+ i = asarray(0)
+ f = asarray(0.0)
+
+ assert bool(b) == False
+ assert int(i) == 0
+ assert float(f) == 0.0
+ assert operator.index(i) == 0
+
+ # bool/int/float should only be allowed on 0-D arrays.
+ assert_raises(TypeError, lambda: bool(asarray([False])))
+ assert_raises(TypeError, lambda: int(asarray([0])))
+ assert_raises(TypeError, lambda: float(asarray([0.0])))
+ assert_raises(TypeError, lambda: operator.index(asarray([0])))
+
+ # bool/int/float should only be allowed on arrays of the corresponding
+ # dtype
+ assert_raises(ValueError, lambda: bool(i))
+ assert_raises(ValueError, lambda: bool(f))
+
+ assert_raises(ValueError, lambda: int(b))
+ assert_raises(ValueError, lambda: int(f))
+
+ assert_raises(ValueError, lambda: float(b))
+ assert_raises(ValueError, lambda: float(i))
+
+ assert_raises(TypeError, lambda: operator.index(b))
+ assert_raises(TypeError, lambda: operator.index(f))
+
+
+def test_device_property():
+ a = ones((3, 4))
+ assert a.device == 'cpu'
+
+ assert all(equal(a.to_device('cpu'), a))
+ assert_raises(ValueError, lambda: a.to_device('gpu'))
+
+ assert all(equal(asarray(a, device='cpu'), a))
+ assert_raises(ValueError, lambda: asarray(a, device='gpu'))
+
+def test_array_properties():
+ a = ones((1, 2, 3))
+ b = ones((2, 3))
+ assert_raises(ValueError, lambda: a.T)
+
+ assert isinstance(b.T, Array)
+ assert b.T.shape == (3, 2)
+
+ assert isinstance(a.mT, Array)
+ assert a.mT.shape == (1, 3, 2)
+ assert isinstance(b.mT, Array)
+ assert b.mT.shape == (3, 2)
+
+def test___array__():
+ a = ones((2, 3), dtype=int16)
+ assert np.asarray(a) is a._array
+ b = np.asarray(a, dtype=np.float64)
+ assert np.all(np.equal(b, np.ones((2, 3), dtype=np.float64)))
+ assert b.dtype == np.float64
+
+def test_allow_newaxis():
+ a = ones(5)
+ indexed_a = a[None, :]
+ assert indexed_a.shape == (1, 5)
+
+def test_disallow_flat_indexing_with_newaxis():
+ a = ones((3, 3, 3))
+ with pytest.raises(IndexError):
+ a[None, 0, 0]
+
+def test_disallow_mask_with_newaxis():
+ a = ones((3, 3, 3))
+ with pytest.raises(IndexError):
+ a[None, asarray(True)]
+
+@pytest.mark.parametrize("shape", [(), (5,), (3, 3, 3)])
+@pytest.mark.parametrize("index", ["string", False, True])
+def test_error_on_invalid_index(shape, index):
+ a = ones(shape)
+ with pytest.raises(IndexError):
+ a[index]
+
+def test_mask_0d_array_without_errors():
+ a = ones(())
+ a[asarray(True)]
+
+@pytest.mark.parametrize(
+ "i", [slice(5), slice(5, 0), asarray(True), asarray([0, 1])]
+)
+def test_error_on_invalid_index_with_ellipsis(i):
+ a = ones((3, 3, 3))
+ with pytest.raises(IndexError):
+ a[..., i]
+ with pytest.raises(IndexError):
+ a[i, ...]
+
+def test_array_keys_use_private_array():
+ """
+ Indexing operations convert array keys before indexing the internal array
+
+ Fails when array_api array keys are not converted into NumPy-proper arrays
+ in __getitem__(). This is achieved by passing array_api arrays with 0-sized
+ dimensions, which NumPy-proper treats erroneously - not sure why!
+
+ TODO: Find and use appropriate __setitem__() case.
+ """
+ a = ones((0, 0), dtype=bool_)
+ assert a[a].shape == (0,)
+
+ a = ones((0,), dtype=bool_)
+ key = ones((0, 0), dtype=bool_)
+ with pytest.raises(IndexError):
+ a[key]
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_creation_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_creation_functions.py
new file mode 100644
index 00000000..be9eaa38
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_creation_functions.py
@@ -0,0 +1,142 @@
+from numpy.testing import assert_raises
+import numpy as np
+
+from .. import all
+from .._creation_functions import (
+ asarray,
+ arange,
+ empty,
+ empty_like,
+ eye,
+ full,
+ full_like,
+ linspace,
+ meshgrid,
+ ones,
+ ones_like,
+ zeros,
+ zeros_like,
+)
+from .._dtypes import float32, float64
+from .._array_object import Array
+
+
+def test_asarray_errors():
+ # Test various protections against incorrect usage
+ assert_raises(TypeError, lambda: Array([1]))
+ assert_raises(TypeError, lambda: asarray(["a"]))
+ assert_raises(ValueError, lambda: asarray([1.0], dtype=np.float16))
+ assert_raises(OverflowError, lambda: asarray(2**100))
+ # Preferably this would be OverflowError
+ # assert_raises(OverflowError, lambda: asarray([2**100]))
+ assert_raises(TypeError, lambda: asarray([2**100]))
+ asarray([1], device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: asarray([1], device="gpu"))
+
+ assert_raises(ValueError, lambda: asarray([1], dtype=int))
+ assert_raises(ValueError, lambda: asarray([1], dtype="i"))
+
+
+def test_asarray_copy():
+ a = asarray([1])
+ b = asarray(a, copy=True)
+ a[0] = 0
+ assert all(b[0] == 1)
+ assert all(a[0] == 0)
+ a = asarray([1])
+ b = asarray(a, copy=np._CopyMode.ALWAYS)
+ a[0] = 0
+ assert all(b[0] == 1)
+ assert all(a[0] == 0)
+ a = asarray([1])
+ b = asarray(a, copy=np._CopyMode.NEVER)
+ a[0] = 0
+ assert all(b[0] == 0)
+ assert_raises(NotImplementedError, lambda: asarray(a, copy=False))
+ assert_raises(NotImplementedError,
+ lambda: asarray(a, copy=np._CopyMode.IF_NEEDED))
+
+
+def test_arange_errors():
+ arange(1, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: arange(1, device="gpu"))
+ assert_raises(ValueError, lambda: arange(1, dtype=int))
+ assert_raises(ValueError, lambda: arange(1, dtype="i"))
+
+
+def test_empty_errors():
+ empty((1,), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: empty((1,), device="gpu"))
+ assert_raises(ValueError, lambda: empty((1,), dtype=int))
+ assert_raises(ValueError, lambda: empty((1,), dtype="i"))
+
+
+def test_empty_like_errors():
+ empty_like(asarray(1), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: empty_like(asarray(1), device="gpu"))
+ assert_raises(ValueError, lambda: empty_like(asarray(1), dtype=int))
+ assert_raises(ValueError, lambda: empty_like(asarray(1), dtype="i"))
+
+
+def test_eye_errors():
+ eye(1, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: eye(1, device="gpu"))
+ assert_raises(ValueError, lambda: eye(1, dtype=int))
+ assert_raises(ValueError, lambda: eye(1, dtype="i"))
+
+
+def test_full_errors():
+ full((1,), 0, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: full((1,), 0, device="gpu"))
+ assert_raises(ValueError, lambda: full((1,), 0, dtype=int))
+ assert_raises(ValueError, lambda: full((1,), 0, dtype="i"))
+
+
+def test_full_like_errors():
+ full_like(asarray(1), 0, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: full_like(asarray(1), 0, device="gpu"))
+ assert_raises(ValueError, lambda: full_like(asarray(1), 0, dtype=int))
+ assert_raises(ValueError, lambda: full_like(asarray(1), 0, dtype="i"))
+
+
+def test_linspace_errors():
+ linspace(0, 1, 10, device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: linspace(0, 1, 10, device="gpu"))
+ assert_raises(ValueError, lambda: linspace(0, 1, 10, dtype=float))
+ assert_raises(ValueError, lambda: linspace(0, 1, 10, dtype="f"))
+
+
+def test_ones_errors():
+ ones((1,), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: ones((1,), device="gpu"))
+ assert_raises(ValueError, lambda: ones((1,), dtype=int))
+ assert_raises(ValueError, lambda: ones((1,), dtype="i"))
+
+
+def test_ones_like_errors():
+ ones_like(asarray(1), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: ones_like(asarray(1), device="gpu"))
+ assert_raises(ValueError, lambda: ones_like(asarray(1), dtype=int))
+ assert_raises(ValueError, lambda: ones_like(asarray(1), dtype="i"))
+
+
+def test_zeros_errors():
+ zeros((1,), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: zeros((1,), device="gpu"))
+ assert_raises(ValueError, lambda: zeros((1,), dtype=int))
+ assert_raises(ValueError, lambda: zeros((1,), dtype="i"))
+
+
+def test_zeros_like_errors():
+ zeros_like(asarray(1), device="cpu") # Doesn't error
+ assert_raises(ValueError, lambda: zeros_like(asarray(1), device="gpu"))
+ assert_raises(ValueError, lambda: zeros_like(asarray(1), dtype=int))
+ assert_raises(ValueError, lambda: zeros_like(asarray(1), dtype="i"))
+
+def test_meshgrid_dtype_errors():
+ # Doesn't raise
+ meshgrid()
+ meshgrid(asarray([1.], dtype=float32))
+ meshgrid(asarray([1.], dtype=float32), asarray([1.], dtype=float32))
+
+ assert_raises(ValueError, lambda: meshgrid(asarray([1.], dtype=float32), asarray([1.], dtype=float64)))
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_data_type_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_data_type_functions.py
new file mode 100644
index 00000000..efe3d0ab
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_data_type_functions.py
@@ -0,0 +1,19 @@
+import pytest
+
+from numpy import array_api as xp
+
+
+@pytest.mark.parametrize(
+ "from_, to, expected",
+ [
+ (xp.int8, xp.int16, True),
+ (xp.int16, xp.int8, False),
+ (xp.bool, xp.int8, False),
+ (xp.asarray(0, dtype=xp.uint8), xp.int8, False),
+ ],
+)
+def test_can_cast(from_, to, expected):
+ """
+ can_cast() returns correct result
+ """
+ assert xp.can_cast(from_, to) == expected
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_elementwise_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_elementwise_functions.py
new file mode 100644
index 00000000..b2fb44e7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_elementwise_functions.py
@@ -0,0 +1,111 @@
+from inspect import getfullargspec
+
+from numpy.testing import assert_raises
+
+from .. import asarray, _elementwise_functions
+from .._elementwise_functions import bitwise_left_shift, bitwise_right_shift
+from .._dtypes import (
+ _dtype_categories,
+ _boolean_dtypes,
+ _floating_dtypes,
+ _integer_dtypes,
+)
+
+
+def nargs(func):
+ return len(getfullargspec(func).args)
+
+
+def test_function_types():
+ # Test that every function accepts only the required input types. We only
+ # test the negative cases here (error). The positive cases are tested in
+ # the array API test suite.
+
+ elementwise_function_input_types = {
+ "abs": "numeric",
+ "acos": "floating-point",
+ "acosh": "floating-point",
+ "add": "numeric",
+ "asin": "floating-point",
+ "asinh": "floating-point",
+ "atan": "floating-point",
+ "atan2": "floating-point",
+ "atanh": "floating-point",
+ "bitwise_and": "integer or boolean",
+ "bitwise_invert": "integer or boolean",
+ "bitwise_left_shift": "integer",
+ "bitwise_or": "integer or boolean",
+ "bitwise_right_shift": "integer",
+ "bitwise_xor": "integer or boolean",
+ "ceil": "numeric",
+ "cos": "floating-point",
+ "cosh": "floating-point",
+ "divide": "floating-point",
+ "equal": "all",
+ "exp": "floating-point",
+ "expm1": "floating-point",
+ "floor": "numeric",
+ "floor_divide": "numeric",
+ "greater": "numeric",
+ "greater_equal": "numeric",
+ "isfinite": "numeric",
+ "isinf": "numeric",
+ "isnan": "numeric",
+ "less": "numeric",
+ "less_equal": "numeric",
+ "log": "floating-point",
+ "logaddexp": "floating-point",
+ "log10": "floating-point",
+ "log1p": "floating-point",
+ "log2": "floating-point",
+ "logical_and": "boolean",
+ "logical_not": "boolean",
+ "logical_or": "boolean",
+ "logical_xor": "boolean",
+ "multiply": "numeric",
+ "negative": "numeric",
+ "not_equal": "all",
+ "positive": "numeric",
+ "pow": "numeric",
+ "remainder": "numeric",
+ "round": "numeric",
+ "sign": "numeric",
+ "sin": "floating-point",
+ "sinh": "floating-point",
+ "sqrt": "floating-point",
+ "square": "numeric",
+ "subtract": "numeric",
+ "tan": "floating-point",
+ "tanh": "floating-point",
+ "trunc": "numeric",
+ }
+
+ def _array_vals():
+ for d in _integer_dtypes:
+ yield asarray(1, dtype=d)
+ for d in _boolean_dtypes:
+ yield asarray(False, dtype=d)
+ for d in _floating_dtypes:
+ yield asarray(1.0, dtype=d)
+
+ for x in _array_vals():
+ for func_name, types in elementwise_function_input_types.items():
+ dtypes = _dtype_categories[types]
+ func = getattr(_elementwise_functions, func_name)
+ if nargs(func) == 2:
+ for y in _array_vals():
+ if x.dtype not in dtypes or y.dtype not in dtypes:
+ assert_raises(TypeError, lambda: func(x, y))
+ else:
+ if x.dtype not in dtypes:
+ assert_raises(TypeError, lambda: func(x))
+
+
+def test_bitwise_shift_error():
+ # bitwise shift functions should raise when the second argument is negative
+ assert_raises(
+ ValueError, lambda: bitwise_left_shift(asarray([1, 1]), asarray([1, -1]))
+ )
+ assert_raises(
+ ValueError, lambda: bitwise_right_shift(asarray([1, 1]), asarray([1, -1]))
+ )
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_set_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_set_functions.py
new file mode 100644
index 00000000..b8eb65d4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_set_functions.py
@@ -0,0 +1,19 @@
+import pytest
+from hypothesis import given
+from hypothesis.extra.array_api import make_strategies_namespace
+
+from numpy import array_api as xp
+
+xps = make_strategies_namespace(xp)
+
+
+@pytest.mark.parametrize("func", [xp.unique_all, xp.unique_inverse])
+@given(xps.arrays(dtype=xps.scalar_dtypes(), shape=xps.array_shapes()))
+def test_inverse_indices_shape(func, x):
+ """
+ Inverse indices share shape of input array
+
+ See https://github.com/numpy/numpy/issues/20638
+ """
+ out = func(x)
+ assert out.inverse_indices.shape == x.shape
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_sorting_functions.py b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_sorting_functions.py
new file mode 100644
index 00000000..9848bbfe
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_sorting_functions.py
@@ -0,0 +1,23 @@
+import pytest
+
+from numpy import array_api as xp
+
+
+@pytest.mark.parametrize(
+ "obj, axis, expected",
+ [
+ ([0, 0], -1, [0, 1]),
+ ([0, 1, 0], -1, [1, 0, 2]),
+ ([[0, 1], [1, 1]], 0, [[1, 0], [0, 1]]),
+ ([[0, 1], [1, 1]], 1, [[1, 0], [0, 1]]),
+ ],
+)
+def test_stable_desc_argsort(obj, axis, expected):
+ """
+ Indices respect relative order of a descending stable-sort
+
+ See https://github.com/numpy/numpy/issues/20778
+ """
+ x = xp.asarray(obj)
+ out = xp.argsort(x, axis=axis, stable=True, descending=True)
+ assert xp.all(out == xp.asarray(expected))
diff --git a/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_validation.py b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_validation.py
new file mode 100644
index 00000000..0dd100d1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/array_api/tests/test_validation.py
@@ -0,0 +1,27 @@
+from typing import Callable
+
+import pytest
+
+from numpy import array_api as xp
+
+
+def p(func: Callable, *args, **kwargs):
+ f_sig = ", ".join(
+ [str(a) for a in args] + [f"{k}={v}" for k, v in kwargs.items()]
+ )
+ id_ = f"{func.__name__}({f_sig})"
+ return pytest.param(func, args, kwargs, id=id_)
+
+
+@pytest.mark.parametrize(
+ "func, args, kwargs",
+ [
+ p(xp.can_cast, 42, xp.int8),
+ p(xp.can_cast, xp.int8, 42),
+ p(xp.result_type, 42),
+ ],
+)
+def test_raises_on_invalid_types(func, args, kwargs):
+ """Function raises TypeError when passed invalidly-typed inputs"""
+ with pytest.raises(TypeError):
+ func(*args, **kwargs)
diff --git a/venv/lib/python3.9/site-packages/numpy/compat/__init__.py b/venv/lib/python3.9/site-packages/numpy/compat/__init__.py
new file mode 100644
index 00000000..afee621b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/compat/__init__.py
@@ -0,0 +1,18 @@
+"""
+Compatibility module.
+
+This module contains duplicated code from Python itself or 3rd party
+extensions, which may be included for the following reasons:
+
+ * compatibility
+ * we may only need a small subset of the copied library/module
+
+"""
+from . import _inspect
+from . import py3k
+from ._inspect import getargspec, formatargspec
+from .py3k import *
+
+__all__ = []
+__all__.extend(_inspect.__all__)
+__all__.extend(py3k.__all__)
diff --git a/venv/lib/python3.9/site-packages/numpy/compat/_inspect.py b/venv/lib/python3.9/site-packages/numpy/compat/_inspect.py
new file mode 100644
index 00000000..9a874a71
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/compat/_inspect.py
@@ -0,0 +1,191 @@
+"""Subset of inspect module from upstream python
+
+We use this instead of upstream because upstream inspect is slow to import, and
+significantly contributes to numpy import times. Importing this copy has almost
+no overhead.
+
+"""
+import types
+
+__all__ = ['getargspec', 'formatargspec']
+
+# ----------------------------------------------------------- type-checking
+def ismethod(object):
+ """Return true if the object is an instance method.
+
+ Instance method objects provide these attributes:
+ __doc__ documentation string
+ __name__ name with which this method was defined
+ im_class class object in which this method belongs
+ im_func function object containing implementation of method
+ im_self instance to which this method is bound, or None
+
+ """
+ return isinstance(object, types.MethodType)
+
+def isfunction(object):
+ """Return true if the object is a user-defined function.
+
+ Function objects provide these attributes:
+ __doc__ documentation string
+ __name__ name with which this function was defined
+ func_code code object containing compiled function bytecode
+ func_defaults tuple of any default values for arguments
+ func_doc (same as __doc__)
+ func_globals global namespace in which this function was defined
+ func_name (same as __name__)
+
+ """
+ return isinstance(object, types.FunctionType)
+
+def iscode(object):
+ """Return true if the object is a code object.
+
+ Code objects provide these attributes:
+ co_argcount number of arguments (not including * or ** args)
+ co_code string of raw compiled bytecode
+ co_consts tuple of constants used in the bytecode
+ co_filename name of file in which this code object was created
+ co_firstlineno number of first line in Python source code
+ co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
+ co_lnotab encoded mapping of line numbers to bytecode indices
+ co_name name with which this code object was defined
+ co_names tuple of names of local variables
+ co_nlocals number of local variables
+ co_stacksize virtual machine stack space required
+ co_varnames tuple of names of arguments and local variables
+
+ """
+ return isinstance(object, types.CodeType)
+
+# ------------------------------------------------ argument list extraction
+# These constants are from Python's compile.h.
+CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
+
+def getargs(co):
+ """Get information about the arguments accepted by a code object.
+
+ Three things are returned: (args, varargs, varkw), where 'args' is
+ a list of argument names (possibly containing nested lists), and
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None.
+
+ """
+
+ if not iscode(co):
+ raise TypeError('arg is not a code object')
+
+ nargs = co.co_argcount
+ names = co.co_varnames
+ args = list(names[:nargs])
+
+ # The following acrobatics are for anonymous (tuple) arguments.
+ # Which we do not need to support, so remove to avoid importing
+ # the dis module.
+ for i in range(nargs):
+ if args[i][:1] in ['', '.']:
+ raise TypeError("tuple function arguments are not supported")
+ varargs = None
+ if co.co_flags & CO_VARARGS:
+ varargs = co.co_varnames[nargs]
+ nargs = nargs + 1
+ varkw = None
+ if co.co_flags & CO_VARKEYWORDS:
+ varkw = co.co_varnames[nargs]
+ return args, varargs, varkw
+
+def getargspec(func):
+ """Get the names and default values of a function's arguments.
+
+ A tuple of four things is returned: (args, varargs, varkw, defaults).
+ 'args' is a list of the argument names (it may contain nested lists).
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None.
+ 'defaults' is an n-tuple of the default values of the last n arguments.
+
+ """
+
+ if ismethod(func):
+ func = func.__func__
+ if not isfunction(func):
+ raise TypeError('arg is not a Python function')
+ args, varargs, varkw = getargs(func.__code__)
+ return args, varargs, varkw, func.__defaults__
+
+def getargvalues(frame):
+ """Get information about arguments passed into a particular frame.
+
+ A tuple of four things is returned: (args, varargs, varkw, locals).
+ 'args' is a list of the argument names (it may contain nested lists).
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None.
+ 'locals' is the locals dictionary of the given frame.
+
+ """
+ args, varargs, varkw = getargs(frame.f_code)
+ return args, varargs, varkw, frame.f_locals
+
+def joinseq(seq):
+ if len(seq) == 1:
+ return '(' + seq[0] + ',)'
+ else:
+ return '(' + ', '.join(seq) + ')'
+
+def strseq(object, convert, join=joinseq):
+ """Recursively walk a sequence, stringifying each element.
+
+ """
+ if type(object) in [list, tuple]:
+ return join([strseq(_o, convert, join) for _o in object])
+ else:
+ return convert(object)
+
+def formatargspec(args, varargs=None, varkw=None, defaults=None,
+ formatarg=str,
+ formatvarargs=lambda name: '*' + name,
+ formatvarkw=lambda name: '**' + name,
+ formatvalue=lambda value: '=' + repr(value),
+ join=joinseq):
+ """Format an argument spec from the 4 values returned by getargspec.
+
+ The first four arguments are (args, varargs, varkw, defaults). The
+ other four arguments are the corresponding optional formatting functions
+ that are called to turn names and values into strings. The ninth
+ argument is an optional function to format the sequence of arguments.
+
+ """
+ specs = []
+ if defaults:
+ firstdefault = len(args) - len(defaults)
+ for i in range(len(args)):
+ spec = strseq(args[i], formatarg, join)
+ if defaults and i >= firstdefault:
+ spec = spec + formatvalue(defaults[i - firstdefault])
+ specs.append(spec)
+ if varargs is not None:
+ specs.append(formatvarargs(varargs))
+ if varkw is not None:
+ specs.append(formatvarkw(varkw))
+ return '(' + ', '.join(specs) + ')'
+
+def formatargvalues(args, varargs, varkw, locals,
+ formatarg=str,
+ formatvarargs=lambda name: '*' + name,
+ formatvarkw=lambda name: '**' + name,
+ formatvalue=lambda value: '=' + repr(value),
+ join=joinseq):
+ """Format an argument spec from the 4 values returned by getargvalues.
+
+ The first four arguments are (args, varargs, varkw, locals). The
+ next four arguments are the corresponding optional formatting functions
+ that are called to turn names and values into strings. The ninth
+ argument is an optional function to format the sequence of arguments.
+
+ """
+ def convert(name, locals=locals,
+ formatarg=formatarg, formatvalue=formatvalue):
+ return formatarg(name) + formatvalue(locals[name])
+ specs = [strseq(arg, convert, join) for arg in args]
+
+ if varargs:
+ specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
+ if varkw:
+ specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
+ return '(' + ', '.join(specs) + ')'
diff --git a/venv/lib/python3.9/site-packages/numpy/compat/_pep440.py b/venv/lib/python3.9/site-packages/numpy/compat/_pep440.py
new file mode 100644
index 00000000..73d0afb5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/compat/_pep440.py
@@ -0,0 +1,487 @@
+"""Utility to compare pep440 compatible version strings.
+
+The LooseVersion and StrictVersion classes that distutils provides don't
+work; they don't recognize anything like alpha/beta/rc/dev versions.
+"""
+
+# Copyright (c) Donald Stufft and individual contributors.
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import collections
+import itertools
+import re
+
+
+__all__ = [
+ "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN",
+]
+
+
+# BEGIN packaging/_structures.py
+
+
+class Infinity:
+ def __repr__(self):
+ return "Infinity"
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def __lt__(self, other):
+ return False
+
+ def __le__(self, other):
+ return False
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__)
+
+ def __ne__(self, other):
+ return not isinstance(other, self.__class__)
+
+ def __gt__(self, other):
+ return True
+
+ def __ge__(self, other):
+ return True
+
+ def __neg__(self):
+ return NegativeInfinity
+
+
+Infinity = Infinity()
+
+
+class NegativeInfinity:
+ def __repr__(self):
+ return "-Infinity"
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def __lt__(self, other):
+ return True
+
+ def __le__(self, other):
+ return True
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__)
+
+ def __ne__(self, other):
+ return not isinstance(other, self.__class__)
+
+ def __gt__(self, other):
+ return False
+
+ def __ge__(self, other):
+ return False
+
+ def __neg__(self):
+ return Infinity
+
+
+# BEGIN packaging/version.py
+
+
+NegativeInfinity = NegativeInfinity()
+
+_Version = collections.namedtuple(
+ "_Version",
+ ["epoch", "release", "dev", "pre", "post", "local"],
+)
+
+
+def parse(version):
+ """
+ Parse the given version string and return either a :class:`Version` object
+ or a :class:`LegacyVersion` object depending on if the given version is
+ a valid PEP 440 version or a legacy version.
+ """
+ try:
+ return Version(version)
+ except InvalidVersion:
+ return LegacyVersion(version)
+
+
+class InvalidVersion(ValueError):
+ """
+ An invalid version was found, users should refer to PEP 440.
+ """
+
+
+class _BaseVersion:
+
+ def __hash__(self):
+ return hash(self._key)
+
+ def __lt__(self, other):
+ return self._compare(other, lambda s, o: s < o)
+
+ def __le__(self, other):
+ return self._compare(other, lambda s, o: s <= o)
+
+ def __eq__(self, other):
+ return self._compare(other, lambda s, o: s == o)
+
+ def __ge__(self, other):
+ return self._compare(other, lambda s, o: s >= o)
+
+ def __gt__(self, other):
+ return self._compare(other, lambda s, o: s > o)
+
+ def __ne__(self, other):
+ return self._compare(other, lambda s, o: s != o)
+
+ def _compare(self, other, method):
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return method(self._key, other._key)
+
+
+class LegacyVersion(_BaseVersion):
+
+ def __init__(self, version):
+ self._version = str(version)
+ self._key = _legacy_cmpkey(self._version)
+
+ def __str__(self):
+ return self._version
+
+ def __repr__(self):
+ return "<LegacyVersion({0})>".format(repr(str(self)))
+
+ @property
+ def public(self):
+ return self._version
+
+ @property
+ def base_version(self):
+ return self._version
+
+ @property
+ def local(self):
+ return None
+
+ @property
+ def is_prerelease(self):
+ return False
+
+ @property
+ def is_postrelease(self):
+ return False
+
+
+_legacy_version_component_re = re.compile(
+ r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
+)
+
+_legacy_version_replacement_map = {
+ "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
+}
+
+
+def _parse_version_parts(s):
+ for part in _legacy_version_component_re.split(s):
+ part = _legacy_version_replacement_map.get(part, part)
+
+ if not part or part == ".":
+ continue
+
+ if part[:1] in "0123456789":
+ # pad for numeric comparison
+ yield part.zfill(8)
+ else:
+ yield "*" + part
+
+ # ensure that alpha/beta/candidate are before final
+ yield "*final"
+
+
+def _legacy_cmpkey(version):
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
+ # which uses the defacto standard originally implemented by setuptools,
+ # as before all PEP 440 versions.
+ epoch = -1
+
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
+ # its adoption of the packaging library.
+ parts = []
+ for part in _parse_version_parts(version.lower()):
+ if part.startswith("*"):
+ # remove "-" before a prerelease tag
+ if part < "*final":
+ while parts and parts[-1] == "*final-":
+ parts.pop()
+
+ # remove trailing zeros from each series of numeric parts
+ while parts and parts[-1] == "00000000":
+ parts.pop()
+
+ parts.append(part)
+ parts = tuple(parts)
+
+ return epoch, parts
+
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
+ v?
+ (?:
+ (?:(?P<epoch>[0-9]+)!)? # epoch
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
+ (?P<pre> # pre-release
+ [-_\.]?
+ (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
+ [-_\.]?
+ (?P<pre_n>[0-9]+)?
+ )?
+ (?P<post> # post release
+ (?:-(?P<post_n1>[0-9]+))
+ |
+ (?:
+ [-_\.]?
+ (?P<post_l>post|rev|r)
+ [-_\.]?
+ (?P<post_n2>[0-9]+)?
+ )
+ )?
+ (?P<dev> # dev release
+ [-_\.]?
+ (?P<dev_l>dev)
+ [-_\.]?
+ (?P<dev_n>[0-9]+)?
+ )?
+ )
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
+"""
+
+
+class Version(_BaseVersion):
+
+ _regex = re.compile(
+ r"^\s*" + VERSION_PATTERN + r"\s*$",
+ re.VERBOSE | re.IGNORECASE,
+ )
+
+ def __init__(self, version):
+ # Validate the version and parse it into pieces
+ match = self._regex.search(version)
+ if not match:
+ raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+ # Store the parsed out pieces of the version
+ self._version = _Version(
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+ release=tuple(int(i) for i in match.group("release").split(".")),
+ pre=_parse_letter_version(
+ match.group("pre_l"),
+ match.group("pre_n"),
+ ),
+ post=_parse_letter_version(
+ match.group("post_l"),
+ match.group("post_n1") or match.group("post_n2"),
+ ),
+ dev=_parse_letter_version(
+ match.group("dev_l"),
+ match.group("dev_n"),
+ ),
+ local=_parse_local_version(match.group("local")),
+ )
+
+ # Generate a key which will be used for sorting
+ self._key = _cmpkey(
+ self._version.epoch,
+ self._version.release,
+ self._version.pre,
+ self._version.post,
+ self._version.dev,
+ self._version.local,
+ )
+
+ def __repr__(self):
+ return "<Version({0})>".format(repr(str(self)))
+
+ def __str__(self):
+ parts = []
+
+ # Epoch
+ if self._version.epoch != 0:
+ parts.append("{0}!".format(self._version.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self._version.release))
+
+ # Pre-release
+ if self._version.pre is not None:
+ parts.append("".join(str(x) for x in self._version.pre))
+
+ # Post-release
+ if self._version.post is not None:
+ parts.append(".post{0}".format(self._version.post[1]))
+
+ # Development release
+ if self._version.dev is not None:
+ parts.append(".dev{0}".format(self._version.dev[1]))
+
+ # Local version segment
+ if self._version.local is not None:
+ parts.append(
+ "+{0}".format(".".join(str(x) for x in self._version.local))
+ )
+
+ return "".join(parts)
+
+ @property
+ def public(self):
+ return str(self).split("+", 1)[0]
+
+ @property
+ def base_version(self):
+ parts = []
+
+ # Epoch
+ if self._version.epoch != 0:
+ parts.append("{0}!".format(self._version.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self._version.release))
+
+ return "".join(parts)
+
+ @property
+ def local(self):
+ version_string = str(self)
+ if "+" in version_string:
+ return version_string.split("+", 1)[1]
+
+ @property
+ def is_prerelease(self):
+ return bool(self._version.dev or self._version.pre)
+
+ @property
+ def is_postrelease(self):
+ return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+ if letter:
+ # We assume there is an implicit 0 in a pre-release if there is
+ # no numeral associated with it.
+ if number is None:
+ number = 0
+
+ # We normalize any letters to their lower-case form
+ letter = letter.lower()
+
+ # We consider some words to be alternate spellings of other words and
+ # in those cases we want to normalize the spellings to our preferred
+ # spelling.
+ if letter == "alpha":
+ letter = "a"
+ elif letter == "beta":
+ letter = "b"
+ elif letter in ["c", "pre", "preview"]:
+ letter = "rc"
+ elif letter in ["rev", "r"]:
+ letter = "post"
+
+ return letter, int(number)
+ if not letter and number:
+ # We assume that if we are given a number but not given a letter,
+ # then this is using the implicit post release syntax (e.g., 1.0-1)
+ letter = "post"
+
+ return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+ """
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+ """
+ if local is not None:
+ return tuple(
+ part.lower() if not part.isdigit() else int(part)
+ for part in _local_version_seperators.split(local)
+ )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+ # When we compare a release version, we want to compare it with all of the
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
+ # leading zeros until we come to something non-zero, then take the rest,
+ # re-reverse it back into the correct order, and make it a tuple and use
+ # that for our sorting key.
+ release = tuple(
+ reversed(list(
+ itertools.dropwhile(
+ lambda x: x == 0,
+ reversed(release),
+ )
+ ))
+ )
+
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+ # We'll do this by abusing the pre-segment, but we _only_ want to do this
+ # if there is no pre- or a post-segment. If we have one of those, then
+ # the normal sorting rules will handle this case correctly.
+ if pre is None and post is None and dev is not None:
+ pre = -Infinity
+ # Versions without a pre-release (except as noted above) should sort after
+ # those with one.
+ elif pre is None:
+ pre = Infinity
+
+ # Versions without a post-segment should sort before those with one.
+ if post is None:
+ post = -Infinity
+
+ # Versions without a development segment should sort after those with one.
+ if dev is None:
+ dev = Infinity
+
+ if local is None:
+ # Versions without a local segment should sort before those with one.
+ local = -Infinity
+ else:
+ # Versions with a local segment need that segment parsed to implement
+ # the sorting rules in PEP440.
+ # - Alphanumeric segments sort before numeric segments
+ # - Alphanumeric segments sort lexicographically
+ # - Numeric segments sort numerically
+ # - Shorter versions sort before longer versions when the prefixes
+ # match exactly
+ local = tuple(
+ (i, "") if isinstance(i, int) else (-Infinity, i)
+ for i in local
+ )
+
+ return epoch, release, pre, post, dev, local
diff --git a/venv/lib/python3.9/site-packages/numpy/compat/py3k.py b/venv/lib/python3.9/site-packages/numpy/compat/py3k.py
new file mode 100644
index 00000000..3d10bb98
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/compat/py3k.py
@@ -0,0 +1,137 @@
+"""
+Python 3.X compatibility tools.
+
+While this file was originally intended for Python 2 -> 3 transition,
+it is now used to create a compatibility layer between different
+minor versions of Python 3.
+
+While the active version of numpy may not support a given version of python, we
+allow downstream libraries to continue to use these shims for forward
+compatibility with numpy while they transition their code to newer versions of
+Python.
+"""
+__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
+ 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
+ 'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
+ 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path',
+ 'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike']
+
+import sys
+import os
+from pathlib import Path
+import io
+try:
+ import pickle5 as pickle
+except ImportError:
+ import pickle
+
+long = int
+integer_types = (int,)
+basestring = str
+unicode = str
+bytes = bytes
+
+def asunicode(s):
+ if isinstance(s, bytes):
+ return s.decode('latin1')
+ return str(s)
+
+def asbytes(s):
+ if isinstance(s, bytes):
+ return s
+ return str(s).encode('latin1')
+
+def asstr(s):
+ if isinstance(s, bytes):
+ return s.decode('latin1')
+ return str(s)
+
+def isfileobj(f):
+ return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter))
+
+def open_latin1(filename, mode='r'):
+ return open(filename, mode=mode, encoding='iso-8859-1')
+
+def sixu(s):
+ return s
+
+strchar = 'U'
+
+def getexception():
+ return sys.exc_info()[1]
+
+def asbytes_nested(x):
+ if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
+ return [asbytes_nested(y) for y in x]
+ else:
+ return asbytes(x)
+
+def asunicode_nested(x):
+ if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
+ return [asunicode_nested(y) for y in x]
+ else:
+ return asunicode(x)
+
+def is_pathlib_path(obj):
+ """
+ Check whether obj is a `pathlib.Path` object.
+
+ Prefer using ``isinstance(obj, os.PathLike)`` instead of this function.
+ """
+ return isinstance(obj, Path)
+
+# from Python 3.7
+class contextlib_nullcontext:
+ """Context manager that does no additional processing.
+
+ Used as a stand-in for a normal context manager, when a particular
+ block of code is only sometimes used with a normal context manager:
+
+ cm = optional_cm if condition else nullcontext()
+ with cm:
+ # Perform operation, using optional_cm if condition is True
+
+ .. note::
+ Prefer using `contextlib.nullcontext` instead of this context manager.
+ """
+
+ def __init__(self, enter_result=None):
+ self.enter_result = enter_result
+
+ def __enter__(self):
+ return self.enter_result
+
+ def __exit__(self, *excinfo):
+ pass
+
+
+def npy_load_module(name, fn, info=None):
+ """
+ Load a module. Uses ``load_module`` which will be deprecated in python
+ 3.12. An alternative that uses ``exec_module`` is in
+ numpy.distutils.misc_util.exec_mod_from_location
+
+ .. versionadded:: 1.11.2
+
+ Parameters
+ ----------
+ name : str
+ Full module name.
+ fn : str
+ Path to module file.
+ info : tuple, optional
+ Only here for backward compatibility with Python 2.*.
+
+ Returns
+ -------
+ mod : module
+
+ """
+ # Explicitly lazy import this to avoid paying the cost
+ # of importing importlib at startup
+ from importlib.machinery import SourceFileLoader
+ return SourceFileLoader(name, fn).load_module()
+
+
+os_fspath = os.fspath
+os_PathLike = os.PathLike
diff --git a/venv/lib/python3.9/site-packages/numpy/compat/setup.py b/venv/lib/python3.9/site-packages/numpy/compat/setup.py
new file mode 100644
index 00000000..c1b34a2c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/compat/setup.py
@@ -0,0 +1,10 @@
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+
+ config = Configuration('compat', parent_package, top_path)
+ config.add_subpackage('tests')
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/venv/lib/python3.9/site-packages/numpy/compat/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/compat/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/compat/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/compat/tests/test_compat.py b/venv/lib/python3.9/site-packages/numpy/compat/tests/test_compat.py
new file mode 100644
index 00000000..2b8acbaa
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/compat/tests/test_compat.py
@@ -0,0 +1,19 @@
+from os.path import join
+
+from numpy.compat import isfileobj
+from numpy.testing import assert_
+from numpy.testing import tempdir
+
+
+def test_isfileobj():
+ with tempdir(prefix="numpy_test_compat_") as folder:
+ filename = join(folder, 'a.bin')
+
+ with open(filename, 'wb') as f:
+ assert_(isfileobj(f))
+
+ with open(filename, 'ab') as f:
+ assert_(isfileobj(f))
+
+ with open(filename, 'rb') as f:
+ assert_(isfileobj(f))
diff --git a/venv/lib/python3.9/site-packages/numpy/conftest.py b/venv/lib/python3.9/site-packages/numpy/conftest.py
new file mode 100644
index 00000000..3d110c87
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/conftest.py
@@ -0,0 +1,136 @@
+"""
+Pytest configuration and fixtures for the Numpy test suite.
+"""
+import os
+import tempfile
+
+import hypothesis
+import pytest
+import numpy
+
+from numpy.core._multiarray_tests import get_fpu_mode
+
+
+_old_fpu_mode = None
+_collect_results = {}
+
+# Use a known and persistent tmpdir for hypothesis' caches, which
+# can be automatically cleared by the OS or user.
+hypothesis.configuration.set_hypothesis_home_dir(
+ os.path.join(tempfile.gettempdir(), ".hypothesis")
+)
+
+# We register two custom profiles for Numpy - for details see
+# https://hypothesis.readthedocs.io/en/latest/settings.html
+# The first is designed for our own CI runs; the latter also
+# forces determinism and is designed for use via np.test()
+hypothesis.settings.register_profile(
+ name="numpy-profile", deadline=None, print_blob=True,
+)
+hypothesis.settings.register_profile(
+ name="np.test() profile",
+ deadline=None, print_blob=True, database=None, derandomize=True,
+ suppress_health_check=list(hypothesis.HealthCheck),
+)
+# Note that the default profile is chosen based on the presence
+# of pytest.ini, but can be overridden by passing the
+# --hypothesis-profile=NAME argument to pytest.
+_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini")
+hypothesis.settings.load_profile(
+ "numpy-profile" if os.path.isfile(_pytest_ini) else "np.test() profile"
+)
+
+
+def pytest_configure(config):
+ config.addinivalue_line("markers",
+ "valgrind_error: Tests that are known to error under valgrind.")
+ config.addinivalue_line("markers",
+ "leaks_references: Tests that are known to leak references.")
+ config.addinivalue_line("markers",
+ "slow: Tests that are very slow.")
+ config.addinivalue_line("markers",
+ "slow_pypy: Tests that are very slow on pypy.")
+
+
+def pytest_addoption(parser):
+ parser.addoption("--available-memory", action="store", default=None,
+ help=("Set amount of memory available for running the "
+ "test suite. This can result to tests requiring "
+ "especially large amounts of memory to be skipped. "
+ "Equivalent to setting environment variable "
+ "NPY_AVAILABLE_MEM. Default: determined"
+ "automatically."))
+
+
+def pytest_sessionstart(session):
+ available_mem = session.config.getoption('available_memory')
+ if available_mem is not None:
+ os.environ['NPY_AVAILABLE_MEM'] = available_mem
+
+
+#FIXME when yield tests are gone.
+@pytest.hookimpl()
+def pytest_itemcollected(item):
+ """
+ Check FPU precision mode was not changed during test collection.
+
+ The clumsy way we do it here is mainly necessary because numpy
+ still uses yield tests, which can execute code at test collection
+ time.
+ """
+ global _old_fpu_mode
+
+ mode = get_fpu_mode()
+
+ if _old_fpu_mode is None:
+ _old_fpu_mode = mode
+ elif mode != _old_fpu_mode:
+ _collect_results[item] = (_old_fpu_mode, mode)
+ _old_fpu_mode = mode
+
+
+@pytest.fixture(scope="function", autouse=True)
+def check_fpu_mode(request):
+ """
+ Check FPU precision mode was not changed during the test.
+ """
+ old_mode = get_fpu_mode()
+ yield
+ new_mode = get_fpu_mode()
+
+ if old_mode != new_mode:
+ raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
+ " during the test".format(old_mode, new_mode))
+
+ collect_result = _collect_results.get(request.node)
+ if collect_result is not None:
+ old_mode, new_mode = collect_result
+ raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
+ " when collecting the test".format(old_mode,
+ new_mode))
+
+
+@pytest.fixture(autouse=True)
+def add_np(doctest_namespace):
+ doctest_namespace['np'] = numpy
+
+@pytest.fixture(autouse=True)
+def env_setup(monkeypatch):
+ monkeypatch.setenv('PYTHONHASHSEED', '0')
+
+
+@pytest.fixture(params=[True, False])
+def weak_promotion(request):
+ """
+ Fixture to ensure "legacy" promotion state or change it to use the new
+ weak promotion (plus warning). `old_promotion` should be used as a
+ parameter in the function.
+ """
+ state = numpy._get_promotion_state()
+ if request.param:
+ numpy._set_promotion_state("weak_and_warn")
+ else:
+ numpy._set_promotion_state("legacy")
+
+ yield request.param
+ numpy._set_promotion_state(state)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/__init__.py b/venv/lib/python3.9/site-packages/numpy/core/__init__.py
new file mode 100644
index 00000000..748705e3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/__init__.py
@@ -0,0 +1,178 @@
+"""
+Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
+
+Please note that this module is private. All functions and objects
+are available in the main ``numpy`` namespace - use that instead.
+
+"""
+
+from numpy.version import version as __version__
+
+import os
+import warnings
+
+# disables OpenBLAS affinity setting of the main thread that limits
+# python threads or processes to one core
+env_added = []
+for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
+ if envkey not in os.environ:
+ os.environ[envkey] = '1'
+ env_added.append(envkey)
+
+try:
+ from . import multiarray
+except ImportError as exc:
+ import sys
+ msg = """
+
+IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
+
+Importing the numpy C-extensions failed. This error can happen for
+many reasons, often due to issues with your setup or how NumPy was
+installed.
+
+We have compiled some common reasons and troubleshooting tips at:
+
+ https://numpy.org/devdocs/user/troubleshooting-importerror.html
+
+Please note and check the following:
+
+ * The Python version is: Python%d.%d from "%s"
+ * The NumPy version is: "%s"
+
+and make sure that they are the versions you expect.
+Please carefully study the documentation linked above for further help.
+
+Original error was: %s
+""" % (sys.version_info[0], sys.version_info[1], sys.executable,
+ __version__, exc)
+ raise ImportError(msg)
+finally:
+ for envkey in env_added:
+ del os.environ[envkey]
+del envkey
+del env_added
+del os
+
+from . import umath
+
+# Check that multiarray,umath are pure python modules wrapping
+# _multiarray_umath and not either of the old c-extension modules
+if not (hasattr(multiarray, '_multiarray_umath') and
+ hasattr(umath, '_multiarray_umath')):
+ import sys
+ path = sys.modules['numpy'].__path__
+ msg = ("Something is wrong with the numpy installation. "
+ "While importing we detected an older version of "
+ "numpy in {}. One method of fixing this is to repeatedly uninstall "
+ "numpy until none is found, then reinstall this version.")
+ raise ImportError(msg.format(path))
+
+from . import numerictypes as nt
+multiarray.set_typeDict(nt.sctypeDict)
+from . import numeric
+from .numeric import *
+from . import fromnumeric
+from .fromnumeric import *
+from . import defchararray as char
+from . import records
+from . import records as rec
+from .records import record, recarray, format_parser
+# Note: module name memmap is overwritten by a class with same name
+from .memmap import *
+from .defchararray import chararray
+from . import function_base
+from .function_base import *
+from . import _machar
+from ._machar import *
+from . import getlimits
+from .getlimits import *
+from . import shape_base
+from .shape_base import *
+from . import einsumfunc
+from .einsumfunc import *
+del nt
+
+from .fromnumeric import amax as max, amin as min, round_ as round
+from .numeric import absolute as abs
+
+# do this after everything else, to minimize the chance of this misleadingly
+# appearing in an import-time traceback
+from . import _add_newdocs
+from . import _add_newdocs_scalars
+# add these for module-freeze analysis (like PyInstaller)
+from . import _dtype_ctypes
+from . import _internal
+from . import _dtype
+from . import _methods
+
+__all__ = ['char', 'rec', 'memmap']
+__all__ += numeric.__all__
+__all__ += ['record', 'recarray', 'format_parser']
+__all__ += ['chararray']
+__all__ += function_base.__all__
+__all__ += getlimits.__all__
+__all__ += shape_base.__all__
+__all__ += einsumfunc.__all__
+
+# We used to use `np.core._ufunc_reconstruct` to unpickle. This is unnecessary,
+# but old pickles saved before 1.20 will be using it, and there is no reason
+# to break loading them.
+def _ufunc_reconstruct(module, name):
+ # The `fromlist` kwarg is required to ensure that `mod` points to the
+ # inner-most module rather than the parent package when module name is
+ # nested. This makes it possible to pickle non-toplevel ufuncs such as
+ # scipy.special.expit for instance.
+ mod = __import__(module, fromlist=[name])
+ return getattr(mod, name)
+
+
+def _ufunc_reduce(func):
+ # Report the `__name__`. pickle will try to find the module. Note that
+ # pickle supports for this `__name__` to be a `__qualname__`. It may
+ # make sense to add a `__qualname__` to ufuncs, to allow this more
+ # explicitly (Numba has ufuncs as attributes).
+ # See also: https://github.com/dask/distributed/issues/3450
+ return func.__name__
+
+
+def _DType_reconstruct(scalar_type):
+ # This is a work-around to pickle type(np.dtype(np.float64)), etc.
+ # and it should eventually be replaced with a better solution, e.g. when
+ # DTypes become HeapTypes.
+ return type(dtype(scalar_type))
+
+
+def _DType_reduce(DType):
+ # To pickle a DType without having to add top-level names, pickle the
+ # scalar type for now (and assume that reconstruction will be possible).
+ if DType is dtype:
+ return "dtype" # must pickle `np.dtype` as a singleton.
+ scalar_type = DType.type # pickle the scalar type for reconstruction
+ return _DType_reconstruct, (scalar_type,)
+
+
+def __getattr__(name):
+ # Deprecated 2021-10-20, NumPy 1.22
+ if name == "machar":
+ warnings.warn(
+ "The `np.core.machar` module is deprecated (NumPy 1.22)",
+ DeprecationWarning, stacklevel=2,
+ )
+ return _machar
+ raise AttributeError(f"Module {__name__!r} has no attribute {name!r}")
+
+
+import copyreg
+
+copyreg.pickle(ufunc, _ufunc_reduce)
+copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct)
+
+# Unclutter namespace (must keep _*_reconstruct for unpickling)
+del copyreg
+del _ufunc_reduce
+del _DType_reduce
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/core/__init__.pyi b/venv/lib/python3.9/site-packages/numpy/core/__init__.pyi
new file mode 100644
index 00000000..4c7a42bf
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/__init__.pyi
@@ -0,0 +1,2 @@
+# NOTE: The `np.core` namespace is deliberately kept empty due to it
+# being private (despite the lack of leading underscore)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_add_newdocs.py b/venv/lib/python3.9/site-packages/numpy/core/_add_newdocs.py
new file mode 100644
index 00000000..d75f9ec6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_add_newdocs.py
@@ -0,0 +1,7083 @@
+"""
+This is only meant to add docs to objects defined in C-extension modules.
+The purpose is to allow easier editing of the docstrings without
+requiring a re-compile.
+
+NOTE: Many of the methods of ndarray have corresponding functions.
+ If you update these docstrings, please keep also the ones in
+ core/fromnumeric.py, core/defmatrix.py up-to-date.
+
+"""
+
+from numpy.core.function_base import add_newdoc
+from numpy.core.overrides import array_function_like_doc
+
+###############################################################################
+#
+# flatiter
+#
+# flatiter needs a toplevel description
+#
+###############################################################################
+
+add_newdoc('numpy.core', 'flatiter',
+ """
+ Flat iterator object to iterate over arrays.
+
+ A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
+ It allows iterating over the array as if it were a 1-D array,
+ either in a for-loop or by calling its `next` method.
+
+ Iteration is done in row-major, C-style order (the last
+ index varying the fastest). The iterator can also be indexed using
+ basic slicing or advanced indexing.
+
+ See Also
+ --------
+ ndarray.flat : Return a flat iterator over an array.
+ ndarray.flatten : Returns a flattened copy of an array.
+
+ Notes
+ -----
+ A `flatiter` iterator can not be constructed directly from Python code
+ by calling the `flatiter` constructor.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> fl = x.flat
+ >>> type(fl)
+ <class 'numpy.flatiter'>
+ >>> for item in fl:
+ ... print(item)
+ ...
+ 0
+ 1
+ 2
+ 3
+ 4
+ 5
+
+ >>> fl[2:4]
+ array([2, 3])
+
+ """)
+
+# flatiter attributes
+
+add_newdoc('numpy.core', 'flatiter', ('base',
+ """
+ A reference to the array that is iterated over.
+
+ Examples
+ --------
+ >>> x = np.arange(5)
+ >>> fl = x.flat
+ >>> fl.base is x
+ True
+
+ """))
+
+
+
+add_newdoc('numpy.core', 'flatiter', ('coords',
+ """
+ An N-dimensional tuple of current coordinates.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> fl = x.flat
+ >>> fl.coords
+ (0, 0)
+ >>> next(fl)
+ 0
+ >>> fl.coords
+ (0, 1)
+
+ """))
+
+
+
+add_newdoc('numpy.core', 'flatiter', ('index',
+ """
+ Current flat index into the array.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> fl = x.flat
+ >>> fl.index
+ 0
+ >>> next(fl)
+ 0
+ >>> fl.index
+ 1
+
+ """))
+
+# flatiter functions
+
+add_newdoc('numpy.core', 'flatiter', ('__array__',
+ """__array__(type=None) Get array from iterator
+
+ """))
+
+
+add_newdoc('numpy.core', 'flatiter', ('copy',
+ """
+ copy()
+
+ Get a copy of the iterator as a 1-D array.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> x
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> fl = x.flat
+ >>> fl.copy()
+ array([0, 1, 2, 3, 4, 5])
+
+ """))
+
+
+###############################################################################
+#
+# nditer
+#
+###############################################################################
+
+add_newdoc('numpy.core', 'nditer',
+ """
+ nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0)
+
+ Efficient multi-dimensional iterator object to iterate over arrays.
+ To get started using this object, see the
+ :ref:`introductory guide to array iteration <arrays.nditer>`.
+
+ Parameters
+ ----------
+ op : ndarray or sequence of array_like
+ The array(s) to iterate over.
+
+ flags : sequence of str, optional
+ Flags to control the behavior of the iterator.
+
+ * ``buffered`` enables buffering when required.
+ * ``c_index`` causes a C-order index to be tracked.
+ * ``f_index`` causes a Fortran-order index to be tracked.
+ * ``multi_index`` causes a multi-index, or a tuple of indices
+ with one per iteration dimension, to be tracked.
+ * ``common_dtype`` causes all the operands to be converted to
+ a common data type, with copying or buffering as necessary.
+ * ``copy_if_overlap`` causes the iterator to determine if read
+ operands have overlap with write operands, and make temporary
+ copies as necessary to avoid overlap. False positives (needless
+ copying) are possible in some cases.
+ * ``delay_bufalloc`` delays allocation of the buffers until
+ a reset() call is made. Allows ``allocate`` operands to
+ be initialized before their values are copied into the buffers.
+ * ``external_loop`` causes the ``values`` given to be
+ one-dimensional arrays with multiple values instead of
+ zero-dimensional arrays.
+ * ``grow_inner`` allows the ``value`` array sizes to be made
+ larger than the buffer size when both ``buffered`` and
+ ``external_loop`` is used.
+ * ``ranged`` allows the iterator to be restricted to a sub-range
+ of the iterindex values.
+ * ``refs_ok`` enables iteration of reference types, such as
+ object arrays.
+ * ``reduce_ok`` enables iteration of ``readwrite`` operands
+ which are broadcasted, also known as reduction operands.
+ * ``zerosize_ok`` allows `itersize` to be zero.
+ op_flags : list of list of str, optional
+ This is a list of flags for each operand. At minimum, one of
+ ``readonly``, ``readwrite``, or ``writeonly`` must be specified.
+
+ * ``readonly`` indicates the operand will only be read from.
+ * ``readwrite`` indicates the operand will be read from and written to.
+ * ``writeonly`` indicates the operand will only be written to.
+ * ``no_broadcast`` prevents the operand from being broadcasted.
+ * ``contig`` forces the operand data to be contiguous.
+ * ``aligned`` forces the operand data to be aligned.
+ * ``nbo`` forces the operand data to be in native byte order.
+ * ``copy`` allows a temporary read-only copy if required.
+ * ``updateifcopy`` allows a temporary read-write copy if required.
+ * ``allocate`` causes the array to be allocated if it is None
+ in the ``op`` parameter.
+ * ``no_subtype`` prevents an ``allocate`` operand from using a subtype.
+ * ``arraymask`` indicates that this operand is the mask to use
+ for selecting elements when writing to operands with the
+ 'writemasked' flag set. The iterator does not enforce this,
+ but when writing from a buffer back to the array, it only
+ copies those elements indicated by this mask.
+ * ``writemasked`` indicates that only elements where the chosen
+ ``arraymask`` operand is True will be written to.
+ * ``overlap_assume_elementwise`` can be used to mark operands that are
+ accessed only in the iterator order, to allow less conservative
+ copying when ``copy_if_overlap`` is present.
+ op_dtypes : dtype or tuple of dtype(s), optional
+ The required data type(s) of the operands. If copying or buffering
+ is enabled, the data will be converted to/from their original types.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the iteration order. 'C' means C order, 'F' means
+ Fortran order, 'A' means 'F' order if all the arrays are Fortran
+ contiguous, 'C' order otherwise, and 'K' means as close to the
+ order the array elements appear in memory as possible. This also
+ affects the element memory order of ``allocate`` operands, as they
+ are allocated to be compatible with iteration order.
+ Default is 'K'.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur when making a copy
+ or buffering. Setting this to 'unsafe' is not recommended,
+ as it can adversely affect accumulations.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+ op_axes : list of list of ints, optional
+ If provided, is a list of ints or None for each operands.
+ The list of axes for an operand is a mapping from the dimensions
+ of the iterator to the dimensions of the operand. A value of
+ -1 can be placed for entries, causing that dimension to be
+ treated as `newaxis`.
+ itershape : tuple of ints, optional
+ The desired shape of the iterator. This allows ``allocate`` operands
+ with a dimension mapped by op_axes not corresponding to a dimension
+ of a different operand to get a value not equal to 1 for that
+ dimension.
+ buffersize : int, optional
+ When buffering is enabled, controls the size of the temporary
+ buffers. Set to 0 for the default value.
+
+ Attributes
+ ----------
+ dtypes : tuple of dtype(s)
+ The data types of the values provided in `value`. This may be
+ different from the operand data types if buffering is enabled.
+ Valid only before the iterator is closed.
+ finished : bool
+ Whether the iteration over the operands is finished or not.
+ has_delayed_bufalloc : bool
+ If True, the iterator was created with the ``delay_bufalloc`` flag,
+ and no reset() function was called on it yet.
+ has_index : bool
+ If True, the iterator was created with either the ``c_index`` or
+ the ``f_index`` flag, and the property `index` can be used to
+ retrieve it.
+ has_multi_index : bool
+ If True, the iterator was created with the ``multi_index`` flag,
+ and the property `multi_index` can be used to retrieve it.
+ index
+ When the ``c_index`` or ``f_index`` flag was used, this property
+ provides access to the index. Raises a ValueError if accessed
+ and ``has_index`` is False.
+ iterationneedsapi : bool
+ Whether iteration requires access to the Python API, for example
+ if one of the operands is an object array.
+ iterindex : int
+ An index which matches the order of iteration.
+ itersize : int
+ Size of the iterator.
+ itviews
+ Structured view(s) of `operands` in memory, matching the reordered
+ and optimized iterator access pattern. Valid only before the iterator
+ is closed.
+ multi_index
+ When the ``multi_index`` flag was used, this property
+ provides access to the index. Raises a ValueError if accessed
+ accessed and ``has_multi_index`` is False.
+ ndim : int
+ The dimensions of the iterator.
+ nop : int
+ The number of iterator operands.
+ operands : tuple of operand(s)
+ The array(s) to be iterated over. Valid only before the iterator is
+ closed.
+ shape : tuple of ints
+ Shape tuple, the shape of the iterator.
+ value
+ Value of ``operands`` at current iteration. Normally, this is a
+ tuple of array scalars, but if the flag ``external_loop`` is used,
+ it is a tuple of one dimensional arrays.
+
+ Notes
+ -----
+ `nditer` supersedes `flatiter`. The iterator implementation behind
+ `nditer` is also exposed by the NumPy C API.
+
+ The Python exposure supplies two iteration interfaces, one which follows
+ the Python iterator protocol, and another which mirrors the C-style
+ do-while pattern. The native Python approach is better in most cases, but
+ if you need the coordinates or index of an iterator, use the C-style pattern.
+
+ Examples
+ --------
+ Here is how we might write an ``iter_add`` function, using the
+ Python iterator protocol:
+
+ >>> def iter_add_py(x, y, out=None):
+ ... addop = np.add
+ ... it = np.nditer([x, y, out], [],
+ ... [['readonly'], ['readonly'], ['writeonly','allocate']])
+ ... with it:
+ ... for (a, b, c) in it:
+ ... addop(a, b, out=c)
+ ... return it.operands[2]
+
+ Here is the same function, but following the C-style pattern:
+
+ >>> def iter_add(x, y, out=None):
+ ... addop = np.add
+ ... it = np.nditer([x, y, out], [],
+ ... [['readonly'], ['readonly'], ['writeonly','allocate']])
+ ... with it:
+ ... while not it.finished:
+ ... addop(it[0], it[1], out=it[2])
+ ... it.iternext()
+ ... return it.operands[2]
+
+ Here is an example outer product function:
+
+ >>> def outer_it(x, y, out=None):
+ ... mulop = np.multiply
+ ... it = np.nditer([x, y, out], ['external_loop'],
+ ... [['readonly'], ['readonly'], ['writeonly', 'allocate']],
+ ... op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
+ ... [-1] * x.ndim + list(range(y.ndim)),
+ ... None])
+ ... with it:
+ ... for (a, b, c) in it:
+ ... mulop(a, b, out=c)
+ ... return it.operands[2]
+
+ >>> a = np.arange(2)+1
+ >>> b = np.arange(3)+1
+ >>> outer_it(a,b)
+ array([[1, 2, 3],
+ [2, 4, 6]])
+
+ Here is an example function which operates like a "lambda" ufunc:
+
+ >>> def luf(lamdaexpr, *args, **kwargs):
+ ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)'''
+ ... nargs = len(args)
+ ... op = (kwargs.get('out',None),) + args
+ ... it = np.nditer(op, ['buffered','external_loop'],
+ ... [['writeonly','allocate','no_broadcast']] +
+ ... [['readonly','nbo','aligned']]*nargs,
+ ... order=kwargs.get('order','K'),
+ ... casting=kwargs.get('casting','safe'),
+ ... buffersize=kwargs.get('buffersize',0))
+ ... while not it.finished:
+ ... it[0] = lamdaexpr(*it[1:])
+ ... it.iternext()
+ ... return it.operands[0]
+
+ >>> a = np.arange(5)
+ >>> b = np.ones(5)
+ >>> luf(lambda i,j:i*i + j/2, a, b)
+ array([ 0.5, 1.5, 4.5, 9.5, 16.5])
+
+ If operand flags ``"writeonly"`` or ``"readwrite"`` are used the
+ operands may be views into the original data with the
+ `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a
+ context manager or the `nditer.close` method must be called before
+ using the result. The temporary data will be written back to the
+ original data when the `__exit__` function is called but not before:
+
+ >>> a = np.arange(6, dtype='i4')[::-2]
+ >>> with np.nditer(a, [],
+ ... [['writeonly', 'updateifcopy']],
+ ... casting='unsafe',
+ ... op_dtypes=[np.dtype('f4')]) as i:
+ ... x = i.operands[0]
+ ... x[:] = [-1, -2, -3]
+ ... # a still unchanged here
+ >>> a, x
+ (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32))
+
+ It is important to note that once the iterator is exited, dangling
+ references (like `x` in the example) may or may not share data with
+ the original data `a`. If writeback semantics were active, i.e. if
+ `x.base.flags.writebackifcopy` is `True`, then exiting the iterator
+ will sever the connection between `x` and `a`, writing to `x` will
+ no longer write to `a`. If writeback semantics are not active, then
+ `x.data` will still point at some part of `a.data`, and writing to
+ one will affect the other.
+
+ Context management and the `close` method appeared in version 1.15.0.
+
+ """)
+
+# nditer methods
+
+add_newdoc('numpy.core', 'nditer', ('copy',
+ """
+ copy()
+
+ Get a copy of the iterator in its current state.
+
+ Examples
+ --------
+ >>> x = np.arange(10)
+ >>> y = x + 1
+ >>> it = np.nditer([x, y])
+ >>> next(it)
+ (array(0), array(1))
+ >>> it2 = it.copy()
+ >>> next(it2)
+ (array(1), array(2))
+
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('operands',
+ """
+ operands[`Slice`]
+
+ The array(s) to be iterated over. Valid only before the iterator is closed.
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('debug_print',
+ """
+ debug_print()
+
+ Print the current state of the `nditer` instance and debug info to stdout.
+
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
+ """
+ enable_external_loop()
+
+ When the "external_loop" was not used during construction, but
+ is desired, this modifies the iterator to behave as if the flag
+ was specified.
+
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('iternext',
+ """
+ iternext()
+
+ Check whether iterations are left, and perform a single internal iteration
+ without returning the result. Used in the C-style pattern do-while
+ pattern. For an example, see `nditer`.
+
+ Returns
+ -------
+ iternext : bool
+ Whether or not there are iterations left.
+
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('remove_axis',
+ """
+ remove_axis(i, /)
+
+ Removes axis `i` from the iterator. Requires that the flag "multi_index"
+ be enabled.
+
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
+ """
+ remove_multi_index()
+
+ When the "multi_index" flag was specified, this removes it, allowing
+ the internal iteration structure to be optimized further.
+
+ """))
+
+add_newdoc('numpy.core', 'nditer', ('reset',
+ """
+ reset()
+
+ Reset the iterator to its initial state.
+
+ """))
+
+add_newdoc('numpy.core', 'nested_iters',
+ """
+ nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, \
+ order="K", casting="safe", buffersize=0)
+
+ Create nditers for use in nested loops
+
+ Create a tuple of `nditer` objects which iterate in nested loops over
+ different axes of the op argument. The first iterator is used in the
+ outermost loop, the last in the innermost loop. Advancing one will change
+ the subsequent iterators to point at its new element.
+
+ Parameters
+ ----------
+ op : ndarray or sequence of array_like
+ The array(s) to iterate over.
+
+ axes : list of list of int
+ Each item is used as an "op_axes" argument to an nditer
+
+ flags, op_flags, op_dtypes, order, casting, buffersize (optional)
+ See `nditer` parameters of the same name
+
+ Returns
+ -------
+ iters : tuple of nditer
+ An nditer for each item in `axes`, outermost first
+
+ See Also
+ --------
+ nditer
+
+ Examples
+ --------
+
+ Basic usage. Note how y is the "flattened" version of
+ [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified
+ the first iter's axes as [1]
+
+ >>> a = np.arange(12).reshape(2, 3, 2)
+ >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"])
+ >>> for x in i:
+ ... print(i.multi_index)
+ ... for y in j:
+ ... print('', j.multi_index, y)
+ (0,)
+ (0, 0) 0
+ (0, 1) 1
+ (1, 0) 6
+ (1, 1) 7
+ (1,)
+ (0, 0) 2
+ (0, 1) 3
+ (1, 0) 8
+ (1, 1) 9
+ (2,)
+ (0, 0) 4
+ (0, 1) 5
+ (1, 0) 10
+ (1, 1) 11
+
+ """)
+
+add_newdoc('numpy.core', 'nditer', ('close',
+ """
+ close()
+
+ Resolve all writeback semantics in writeable operands.
+
+ .. versionadded:: 1.15.0
+
+ See Also
+ --------
+
+ :ref:`nditer-context-manager`
+
+ """))
+
+
+###############################################################################
+#
+# broadcast
+#
+###############################################################################
+
+add_newdoc('numpy.core', 'broadcast',
+ """
+ Produce an object that mimics broadcasting.
+
+ Parameters
+ ----------
+ in1, in2, ... : array_like
+ Input parameters.
+
+ Returns
+ -------
+ b : broadcast object
+ Broadcast the input parameters against one another, and
+ return an object that encapsulates the result.
+ Amongst others, it has ``shape`` and ``nd`` properties, and
+ may be used as an iterator.
+
+ See Also
+ --------
+ broadcast_arrays
+ broadcast_to
+ broadcast_shapes
+
+ Examples
+ --------
+
+ Manually adding two vectors, using broadcasting:
+
+ >>> x = np.array([[1], [2], [3]])
+ >>> y = np.array([4, 5, 6])
+ >>> b = np.broadcast(x, y)
+
+ >>> out = np.empty(b.shape)
+ >>> out.flat = [u+v for (u,v) in b]
+ >>> out
+ array([[5., 6., 7.],
+ [6., 7., 8.],
+ [7., 8., 9.]])
+
+ Compare against built-in broadcasting:
+
+ >>> x + y
+ array([[5, 6, 7],
+ [6, 7, 8],
+ [7, 8, 9]])
+
+ """)
+
+# attributes
+
+add_newdoc('numpy.core', 'broadcast', ('index',
+ """
+ current index in broadcasted result
+
+ Examples
+ --------
+ >>> x = np.array([[1], [2], [3]])
+ >>> y = np.array([4, 5, 6])
+ >>> b = np.broadcast(x, y)
+ >>> b.index
+ 0
+ >>> next(b), next(b), next(b)
+ ((1, 4), (1, 5), (1, 6))
+ >>> b.index
+ 3
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('iters',
+ """
+ tuple of iterators along ``self``'s "components."
+
+ Returns a tuple of `numpy.flatiter` objects, one for each "component"
+ of ``self``.
+
+ See Also
+ --------
+ numpy.flatiter
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> row, col = b.iters
+ >>> next(row), next(col)
+ (1, 4)
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('ndim',
+ """
+ Number of dimensions of broadcasted result. Alias for `nd`.
+
+ .. versionadded:: 1.12.0
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.ndim
+ 2
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('nd',
+ """
+ Number of dimensions of broadcasted result. For code intended for NumPy
+ 1.12.0 and later the more consistent `ndim` is preferred.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.nd
+ 2
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('numiter',
+ """
+ Number of iterators possessed by the broadcasted result.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.numiter
+ 2
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('shape',
+ """
+ Shape of broadcasted result.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.shape
+ (3, 3)
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('size',
+ """
+ Total size of broadcasted result.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.size
+ 9
+
+ """))
+
+add_newdoc('numpy.core', 'broadcast', ('reset',
+ """
+ reset()
+
+ Reset the broadcasted result's iterator(s).
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> y = np.array([[4], [5], [6]])
+ >>> b = np.broadcast(x, y)
+ >>> b.index
+ 0
+ >>> next(b), next(b), next(b)
+ ((1, 4), (2, 4), (3, 4))
+ >>> b.index
+ 3
+ >>> b.reset()
+ >>> b.index
+ 0
+
+ """))
+
+###############################################################################
+#
+# numpy functions
+#
+###############################################################################
+
+add_newdoc('numpy.core.multiarray', 'array',
+ """
+ array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0,
+ like=None)
+
+ Create an array.
+
+ Parameters
+ ----------
+ object : array_like
+ An array, any object exposing the array interface, an object whose
+ __array__ method returns an array, or any (nested) sequence.
+ If object is a scalar, a 0-dimensional array containing object is
+ returned.
+ dtype : data-type, optional
+ The desired data-type for the array. If not given, then the type will
+ be determined as the minimum type required to hold the objects in the
+ sequence.
+ copy : bool, optional
+ If true (default), then the object is copied. Otherwise, a copy will
+ only be made if __array__ returns a copy, if obj is a nested sequence,
+ or if a copy is needed to satisfy any of the other requirements
+ (`dtype`, `order`, etc.).
+ order : {'K', 'A', 'C', 'F'}, optional
+ Specify the memory layout of the array. If object is not an array, the
+ newly created array will be in C order (row major) unless 'F' is
+ specified, in which case it will be in Fortran order (column major).
+ If object is an array the following holds.
+
+ ===== ========= ===================================================
+ order no copy copy=True
+ ===== ========= ===================================================
+ 'K' unchanged F & C order preserved, otherwise most similar order
+ 'A' unchanged F order if input is F and not C, otherwise C order
+ 'C' C order C order
+ 'F' F order F order
+ ===== ========= ===================================================
+
+ When ``copy=False`` and a copy is made for other reasons, the result is
+ the same as if ``copy=True``, with some exceptions for 'A', see the
+ Notes section. The default order is 'K'.
+ subok : bool, optional
+ If True, then sub-classes will be passed-through, otherwise
+ the returned array will be forced to be a base-class array (default).
+ ndmin : int, optional
+ Specifies the minimum number of dimensions that the resulting
+ array should have. Ones will be prepended to the shape as
+ needed to meet this requirement.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ An array object satisfying the specified requirements.
+
+ See Also
+ --------
+ empty_like : Return an empty array with shape and type of input.
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+
+
+ Notes
+ -----
+ When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
+ and a copy is forced by a change in dtype, then the order of the result is
+ not necessarily 'C' as expected. This is likely a bug.
+
+ Examples
+ --------
+ >>> np.array([1, 2, 3])
+ array([1, 2, 3])
+
+ Upcasting:
+
+ >>> np.array([1, 2, 3.0])
+ array([ 1., 2., 3.])
+
+ More than one dimension:
+
+ >>> np.array([[1, 2], [3, 4]])
+ array([[1, 2],
+ [3, 4]])
+
+ Minimum dimensions 2:
+
+ >>> np.array([1, 2, 3], ndmin=2)
+ array([[1, 2, 3]])
+
+ Type provided:
+
+ >>> np.array([1, 2, 3], dtype=complex)
+ array([ 1.+0.j, 2.+0.j, 3.+0.j])
+
+ Data-type consisting of more than one element:
+
+ >>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
+ >>> x['a']
+ array([1, 3])
+
+ Creating an array from sub-classes:
+
+ >>> np.array(np.mat('1 2; 3 4'))
+ array([[1, 2],
+ [3, 4]])
+
+ >>> np.array(np.mat('1 2; 3 4'), subok=True)
+ matrix([[1, 2],
+ [3, 4]])
+
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
+
+add_newdoc('numpy.core.multiarray', 'asarray',
+ """
+ asarray(a, dtype=None, order=None, *, like=None)
+
+ Convert the input to an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data, in any form that can be converted to an array. This
+ includes lists, lists of tuples, tuples, tuples of tuples, tuples
+ of lists and ndarrays.
+ dtype : data-type, optional
+ By default, the data-type is inferred from the input data.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Memory layout. 'A' and 'K' depend on the order of input array a.
+ 'C' row-major (C-style),
+ 'F' column-major (Fortran-style) memory representation.
+ 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
+ 'K' (keep) preserve input order
+ Defaults to 'K'.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Array interpretation of `a`. No copy is performed if the input
+ is already an ndarray with matching dtype and order. If `a` is a
+ subclass of ndarray, a base class ndarray is returned.
+
+ See Also
+ --------
+ asanyarray : Similar function which passes through subclasses.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfarray : Convert input to a floating point ndarray.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ asarray_chkfinite : Similar function which checks input for NaNs and Infs.
+ fromiter : Create an array from an iterator.
+ fromfunction : Construct an array by executing a function on grid
+ positions.
+
+ Examples
+ --------
+ Convert a list into an array:
+
+ >>> a = [1, 2]
+ >>> np.asarray(a)
+ array([1, 2])
+
+ Existing arrays are not copied:
+
+ >>> a = np.array([1, 2])
+ >>> np.asarray(a) is a
+ True
+
+ If `dtype` is set, array is copied only if dtype does not match:
+
+ >>> a = np.array([1, 2], dtype=np.float32)
+ >>> np.asarray(a, dtype=np.float32) is a
+ True
+ >>> np.asarray(a, dtype=np.float64) is a
+ False
+
+ Contrary to `asanyarray`, ndarray subclasses are not passed through:
+
+ >>> issubclass(np.recarray, np.ndarray)
+ True
+ >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
+ >>> np.asarray(a) is a
+ False
+ >>> np.asanyarray(a) is a
+ True
+
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
+
+add_newdoc('numpy.core.multiarray', 'asanyarray',
+ """
+ asanyarray(a, dtype=None, order=None, *, like=None)
+
+ Convert the input to an ndarray, but pass ndarray subclasses through.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data, in any form that can be converted to an array. This
+ includes scalars, lists, lists of tuples, tuples, tuples of tuples,
+ tuples of lists, and ndarrays.
+ dtype : data-type, optional
+ By default, the data-type is inferred from the input data.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Memory layout. 'A' and 'K' depend on the order of input array a.
+ 'C' row-major (C-style),
+ 'F' column-major (Fortran-style) memory representation.
+ 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
+ 'K' (keep) preserve input order
+ Defaults to 'C'.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray or an ndarray subclass
+ Array interpretation of `a`. If `a` is an ndarray or a subclass
+ of ndarray, it is returned as-is and no copy is performed.
+
+ See Also
+ --------
+ asarray : Similar function which always returns ndarrays.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfarray : Convert input to a floating point ndarray.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ asarray_chkfinite : Similar function which checks input for NaNs and
+ Infs.
+ fromiter : Create an array from an iterator.
+ fromfunction : Construct an array by executing a function on grid
+ positions.
+
+ Examples
+ --------
+ Convert a list into an array:
+
+ >>> a = [1, 2]
+ >>> np.asanyarray(a)
+ array([1, 2])
+
+ Instances of `ndarray` subclasses are passed through as-is:
+
+ >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
+ >>> np.asanyarray(a) is a
+ True
+
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
+
+add_newdoc('numpy.core.multiarray', 'ascontiguousarray',
+ """
+ ascontiguousarray(a, dtype=None, *, like=None)
+
+ Return a contiguous array (ndim >= 1) in memory (C order).
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ dtype : str or dtype object, optional
+ Data-type of returned array.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Contiguous array of same shape and content as `a`, with type `dtype`
+ if specified.
+
+ See Also
+ --------
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ require : Return an ndarray that satisfies requirements.
+ ndarray.flags : Information about the memory layout of the array.
+
+ Examples
+ --------
+ Starting with a Fortran-contiguous array:
+
+ >>> x = np.ones((2, 3), order='F')
+ >>> x.flags['F_CONTIGUOUS']
+ True
+
+ Calling ``ascontiguousarray`` makes a C-contiguous copy:
+
+ >>> y = np.ascontiguousarray(x)
+ >>> y.flags['C_CONTIGUOUS']
+ True
+ >>> np.may_share_memory(x, y)
+ False
+
+ Now, starting with a C-contiguous array:
+
+ >>> x = np.ones((2, 3), order='C')
+ >>> x.flags['C_CONTIGUOUS']
+ True
+
+ Then, calling ``ascontiguousarray`` returns the same object:
+
+ >>> y = np.ascontiguousarray(x)
+ >>> x is y
+ True
+
+ Note: This function returns an array with at least one-dimension (1-d)
+ so it will not preserve 0-d arrays.
+
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
+
+add_newdoc('numpy.core.multiarray', 'asfortranarray',
+ """
+ asfortranarray(a, dtype=None, *, like=None)
+
+ Return an array (ndim >= 1) laid out in Fortran order in memory.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ dtype : str or dtype object, optional
+ By default, the data-type is inferred from the input data.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ The input `a` in Fortran, or column-major, order.
+
+ See Also
+ --------
+ ascontiguousarray : Convert input to a contiguous (C order) array.
+ asanyarray : Convert input to an ndarray with either row or
+ column-major memory order.
+ require : Return an ndarray that satisfies requirements.
+ ndarray.flags : Information about the memory layout of the array.
+
+ Examples
+ --------
+ Starting with a C-contiguous array:
+
+ >>> x = np.ones((2, 3), order='C')
+ >>> x.flags['C_CONTIGUOUS']
+ True
+
+ Calling ``asfortranarray`` makes a Fortran-contiguous copy:
+
+ >>> y = np.asfortranarray(x)
+ >>> y.flags['F_CONTIGUOUS']
+ True
+ >>> np.may_share_memory(x, y)
+ False
+
+ Now, starting with a Fortran-contiguous array:
+
+ >>> x = np.ones((2, 3), order='F')
+ >>> x.flags['F_CONTIGUOUS']
+ True
+
+ Then, calling ``asfortranarray`` returns the same object:
+
+ >>> y = np.asfortranarray(x)
+ >>> x is y
+ True
+
+ Note: This function returns an array with at least one-dimension (1-d)
+ so it will not preserve 0-d arrays.
+
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
+
+add_newdoc('numpy.core.multiarray', 'empty',
+ """
+ empty(shape, dtype=float, order='C', *, like=None)
+
+ Return a new array of given shape and type, without initializing entries.
+
+ Parameters
+ ----------
+ shape : int or tuple of int
+ Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
+ dtype : data-type, optional
+ Desired output data-type for the array, e.g, `numpy.int8`. Default is
+ `numpy.float64`.
+ order : {'C', 'F'}, optional, default: 'C'
+ Whether to store multi-dimensional data in row-major
+ (C-style) or column-major (Fortran-style) order in
+ memory.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Array of uninitialized (arbitrary) data of the given shape, dtype, and
+ order. Object arrays will be initialized to None.
+
+ See Also
+ --------
+ empty_like : Return an empty array with shape and type of input.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+
+
+ Notes
+ -----
+ `empty`, unlike `zeros`, does not set the array values to zero,
+ and may therefore be marginally faster. On the other hand, it requires
+ the user to manually set all the values in the array, and should be
+ used with caution.
+
+ Examples
+ --------
+ >>> np.empty([2, 2])
+ array([[ -9.74499359e+001, 6.69583040e-309],
+ [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized
+
+ >>> np.empty([2, 2], dtype=int)
+ array([[-1073741821, -1067949133],
+ [ 496041986, 19249760]]) #uninitialized
+
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
+
+add_newdoc('numpy.core.multiarray', 'scalar',
+ """
+ scalar(dtype, obj)
+
+ Return a new scalar array of the given type initialized with obj.
+
+ This function is meant mainly for pickle support. `dtype` must be a
+ valid data-type descriptor. If `dtype` corresponds to an object
+ descriptor, then `obj` can be any object, otherwise `obj` must be a
+ string. If `obj` is not given, it will be interpreted as None for object
+ type and as zeros for all other types.
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'zeros',
+ """
+ zeros(shape, dtype=float, order='C', *, like=None)
+
+ Return a new array of given shape and type, filled with zeros.
+
+ Parameters
+ ----------
+ shape : int or tuple of ints
+ Shape of the new array, e.g., ``(2, 3)`` or ``2``.
+ dtype : data-type, optional
+ The desired data-type for the array, e.g., `numpy.int8`. Default is
+ `numpy.float64`.
+ order : {'C', 'F'}, optional, default: 'C'
+ Whether to store multi-dimensional data in row-major
+ (C-style) or column-major (Fortran-style) order in
+ memory.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Array of zeros with the given shape, dtype, and order.
+
+ See Also
+ --------
+ zeros_like : Return an array of zeros with shape and type of input.
+ empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ full : Return a new array of given shape filled with value.
+
+ Examples
+ --------
+ >>> np.zeros(5)
+ array([ 0., 0., 0., 0., 0.])
+
+ >>> np.zeros((5,), dtype=int)
+ array([0, 0, 0, 0, 0])
+
+ >>> np.zeros((2, 1))
+ array([[ 0.],
+ [ 0.]])
+
+ >>> s = (2,2)
+ >>> np.zeros(s)
+ array([[ 0., 0.],
+ [ 0., 0.]])
+
+ >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
+ array([(0, 0), (0, 0)],
+ dtype=[('x', '<i4'), ('y', '<i4')])
+
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
+
+add_newdoc('numpy.core.multiarray', 'set_typeDict',
+ """set_typeDict(dict)
+
+ Set the internal dictionary that can look up an array type using a
+ registered code.
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'fromstring',
+ """
+ fromstring(string, dtype=float, count=-1, *, sep, like=None)
+
+ A new 1-D array initialized from text data in a string.
+
+ Parameters
+ ----------
+ string : str
+ A string containing the data.
+ dtype : data-type, optional
+ The data type of the array; default: float. For binary input data,
+ the data must be in exactly this format. Most builtin numeric types are
+ supported and extension types may be supported.
+
+ .. versionadded:: 1.18.0
+ Complex dtypes.
+
+ count : int, optional
+ Read this number of `dtype` elements from the data. If this is
+ negative (the default), the count will be determined from the
+ length of the data.
+ sep : str, optional
+ The string separating numbers in the data; extra whitespace between
+ elements is also ignored.
+
+ .. deprecated:: 1.14
+ Passing ``sep=''``, the default, is deprecated since it will
+ trigger the deprecated binary mode of this function. This mode
+ interprets `string` as binary bytes, rather than ASCII text with
+ decimal numbers, an operation which is better spelt
+ ``frombuffer(string, dtype, count)``. If `string` contains unicode
+ text, the binary mode of `fromstring` will first encode it into
+ bytes using utf-8, which will not produce sane results.
+
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ arr : ndarray
+ The constructed array.
+
+ Raises
+ ------
+ ValueError
+ If the string is not the correct size to satisfy the requested
+ `dtype` and `count`.
+
+ See Also
+ --------
+ frombuffer, fromfile, fromiter
+
+ Examples
+ --------
+ >>> np.fromstring('1 2', dtype=int, sep=' ')
+ array([1, 2])
+ >>> np.fromstring('1, 2', dtype=int, sep=',')
+ array([1, 2])
+
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
+
+add_newdoc('numpy.core.multiarray', 'compare_chararrays',
+ """
+ compare_chararrays(a1, a2, cmp, rstrip)
+
+ Performs element-wise comparison of two string arrays using the
+ comparison operator specified by `cmp_op`.
+
+ Parameters
+ ----------
+ a1, a2 : array_like
+ Arrays to be compared.
+ cmp : {"<", "<=", "==", ">=", ">", "!="}
+ Type of comparison.
+ rstrip : Boolean
+ If True, the spaces at the end of Strings are removed before the comparison.
+
+ Returns
+ -------
+ out : ndarray
+ The output array of type Boolean with the same shape as a and b.
+
+ Raises
+ ------
+ ValueError
+ If `cmp_op` is not valid.
+ TypeError
+ If at least one of `a` or `b` is a non-string array
+
+ Examples
+ --------
+ >>> a = np.array(["a", "b", "cde"])
+ >>> b = np.array(["a", "a", "dec"])
+ >>> np.compare_chararrays(a, b, ">", True)
+ array([False, True, False])
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'fromiter',
+ """
+ fromiter(iter, dtype, count=-1, *, like=None)
+
+ Create a new 1-dimensional array from an iterable object.
+
+ Parameters
+ ----------
+ iter : iterable object
+ An iterable object providing data for the array.
+ dtype : data-type
+ The data-type of the returned array.
+
+ .. versionchanged:: 1.23
+ Object and subarray dtypes are now supported (note that the final
+ result is not 1-D for a subarray dtype).
+
+ count : int, optional
+ The number of items to read from *iterable*. The default is -1,
+ which means all data is read.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ The output array.
+
+ Notes
+ -----
+ Specify `count` to improve performance. It allows ``fromiter`` to
+ pre-allocate the output array, instead of resizing it on demand.
+
+ Examples
+ --------
+ >>> iterable = (x*x for x in range(5))
+ >>> np.fromiter(iterable, float)
+ array([ 0., 1., 4., 9., 16.])
+
+ A carefully constructed subarray dtype will lead to higher dimensional
+ results:
+
+ >>> iterable = ((x+1, x+2) for x in range(5))
+ >>> np.fromiter(iterable, dtype=np.dtype((int, 2)))
+ array([[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6]])
+
+
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
+
+add_newdoc('numpy.core.multiarray', 'fromfile',
+ """
+ fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None)
+
+ Construct an array from data in a text or binary file.
+
+ A highly efficient way of reading binary data with a known data-type,
+ as well as parsing simply formatted text files. Data written using the
+ `tofile` method can be read using this function.
+
+ Parameters
+ ----------
+ file : file or str or Path
+ Open file object or filename.
+
+ .. versionchanged:: 1.17.0
+ `pathlib.Path` objects are now accepted.
+
+ dtype : data-type
+ Data type of the returned array.
+ For binary files, it is used to determine the size and byte-order
+ of the items in the file.
+ Most builtin numeric types are supported and extension types may be supported.
+
+ .. versionadded:: 1.18.0
+ Complex dtypes.
+
+ count : int
+ Number of items to read. ``-1`` means all items (i.e., the complete
+ file).
+ sep : str
+ Separator between items if file is a text file.
+ Empty ("") separator means the file should be treated as binary.
+ Spaces (" ") in the separator match zero or more whitespace characters.
+ A separator consisting only of spaces must match at least one
+ whitespace.
+ offset : int
+ The offset (in bytes) from the file's current position. Defaults to 0.
+ Only permitted for binary files.
+
+ .. versionadded:: 1.17.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ See also
+ --------
+ load, save
+ ndarray.tofile
+ loadtxt : More flexible way of loading data from a text file.
+
+ Notes
+ -----
+ Do not rely on the combination of `tofile` and `fromfile` for
+ data storage, as the binary files generated are not platform
+ independent. In particular, no byte-order or data-type information is
+ saved. Data can be stored in the platform independent ``.npy`` format
+ using `save` and `load` instead.
+
+ Examples
+ --------
+ Construct an ndarray:
+
+ >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]),
+ ... ('temp', float)])
+ >>> x = np.zeros((1,), dtype=dt)
+ >>> x['time']['min'] = 10; x['temp'] = 98.25
+ >>> x
+ array([((10, 0), 98.25)],
+ dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
+
+ Save the raw data to disk:
+
+ >>> import tempfile
+ >>> fname = tempfile.mkstemp()[1]
+ >>> x.tofile(fname)
+
+ Read the raw data from disk:
+
+ >>> np.fromfile(fname, dtype=dt)
+ array([((10, 0), 98.25)],
+ dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
+
+ The recommended way to store and load data:
+
+ >>> np.save(fname, x)
+ >>> np.load(fname + '.npy')
+ array([((10, 0), 98.25)],
+ dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
+
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
+
+add_newdoc('numpy.core.multiarray', 'frombuffer',
+ """
+ frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None)
+
+ Interpret a buffer as a 1-dimensional array.
+
+ Parameters
+ ----------
+ buffer : buffer_like
+ An object that exposes the buffer interface.
+ dtype : data-type, optional
+ Data-type of the returned array; default: float.
+ count : int, optional
+ Number of items to read. ``-1`` means all data in the buffer.
+ offset : int, optional
+ Start reading the buffer from this offset (in bytes); default: 0.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+
+ See also
+ --------
+ ndarray.tobytes
+ Inverse of this operation, construct Python bytes from the raw data
+ bytes in the array.
+
+ Notes
+ -----
+ If the buffer has data that is not in machine byte-order, this should
+ be specified as part of the data-type, e.g.::
+
+ >>> dt = np.dtype(int)
+ >>> dt = dt.newbyteorder('>')
+ >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP
+
+ The data of the resulting array will not be byteswapped, but will be
+ interpreted correctly.
+
+ This function creates a view into the original object. This should be safe
+ in general, but it may make sense to copy the result when the original
+ object is mutable or untrusted.
+
+ Examples
+ --------
+ >>> s = b'hello world'
+ >>> np.frombuffer(s, dtype='S1', count=5, offset=6)
+ array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1')
+
+ >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8)
+ array([1, 2], dtype=uint8)
+ >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
+ array([1, 2, 3], dtype=uint8)
+
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
+
+add_newdoc('numpy.core.multiarray', 'from_dlpack',
+ """
+ from_dlpack(x, /)
+
+ Create a NumPy array from an object implementing the ``__dlpack__``
+ protocol. Generally, the returned NumPy array is a read-only view
+ of the input object. See [1]_ and [2]_ for more details.
+
+ Parameters
+ ----------
+ x : object
+ A Python object that implements the ``__dlpack__`` and
+ ``__dlpack_device__`` methods.
+
+ Returns
+ -------
+ out : ndarray
+
+ References
+ ----------
+ .. [1] Array API documentation,
+ https://data-apis.org/array-api/latest/design_topics/data_interchange.html#syntax-for-data-interchange-with-dlpack
+
+ .. [2] Python specification for DLPack,
+ https://dmlc.github.io/dlpack/latest/python_spec.html
+
+ Examples
+ --------
+ >>> import torch
+ >>> x = torch.arange(10)
+ >>> # create a view of the torch tensor "x" in NumPy
+ >>> y = np.from_dlpack(x)
+ """)
+
+add_newdoc('numpy.core', 'fastCopyAndTranspose',
+ """
+ fastCopyAndTranspose(a)
+
+ .. deprecated:: 1.24
+
+ fastCopyAndTranspose is deprecated and will be removed. Use the copy and
+ transpose methods instead, e.g. ``arr.T.copy()``
+ """)
+
+add_newdoc('numpy.core.multiarray', 'correlate',
+ """cross_correlate(a,v, mode=0)""")
+
+add_newdoc('numpy.core.multiarray', 'arange',
+ """
+ arange([start,] stop[, step,], dtype=None, *, like=None)
+
+ Return evenly spaced values within a given interval.
+
+ ``arange`` can be called with a varying number of positional arguments:
+
+ * ``arange(stop)``: Values are generated within the half-open interval
+ ``[0, stop)`` (in other words, the interval including `start` but
+ excluding `stop`).
+ * ``arange(start, stop)``: Values are generated within the half-open
+ interval ``[start, stop)``.
+ * ``arange(start, stop, step)`` Values are generated within the half-open
+ interval ``[start, stop)``, with spacing between values given by
+ ``step``.
+
+ For integer arguments the function is roughly equivalent to the Python
+ built-in :py:class:`range`, but returns an ndarray rather than a ``range``
+ instance.
+
+ When using a non-integer step, such as 0.1, it is often better to use
+ `numpy.linspace`.
+
+ See the Warning sections below for more information.
+
+ Parameters
+ ----------
+ start : integer or real, optional
+ Start of interval. The interval includes this value. The default
+ start value is 0.
+ stop : integer or real
+ End of interval. The interval does not include this value, except
+ in some cases where `step` is not an integer and floating point
+ round-off affects the length of `out`.
+ step : integer or real, optional
+ Spacing between values. For any output `out`, this is the distance
+ between two adjacent values, ``out[i+1] - out[i]``. The default
+ step size is 1. If `step` is specified as a position argument,
+ `start` must also be given.
+ dtype : dtype, optional
+ The type of the output array. If `dtype` is not given, infer the data
+ type from the other input arguments.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ arange : ndarray
+ Array of evenly spaced values.
+
+ For floating point arguments, the length of the result is
+ ``ceil((stop - start)/step)``. Because of floating point overflow,
+ this rule may result in the last element of `out` being greater
+ than `stop`.
+
+ Warnings
+ --------
+ The length of the output might not be numerically stable.
+
+ Another stability issue is due to the internal implementation of
+ `numpy.arange`.
+ The actual step value used to populate the array is
+ ``dtype(start + step) - dtype(start)`` and not `step`. Precision loss
+ can occur here, due to casting or due to using floating points when
+ `start` is much larger than `step`. This can lead to unexpected
+ behaviour. For example::
+
+ >>> np.arange(0, 5, 0.5, dtype=int)
+ array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+ >>> np.arange(-3, 3, 0.5, dtype=int)
+ array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
+
+ In such cases, the use of `numpy.linspace` should be preferred.
+
+ The built-in :py:class:`range` generates :std:doc:`Python built-in integers
+ that have arbitrary size <python:c-api/long>`, while `numpy.arange`
+ produces `numpy.int32` or `numpy.int64` numbers. This may result in
+ incorrect results for large integer values::
+
+ >>> power = 40
+ >>> modulo = 10000
+ >>> x1 = [(n ** power) % modulo for n in range(8)]
+ >>> x2 = [(n ** power) % modulo for n in np.arange(8)]
+ >>> print(x1)
+ [0, 1, 7776, 8801, 6176, 625, 6576, 4001] # correct
+ >>> print(x2)
+ [0, 1, 7776, 7185, 0, 5969, 4816, 3361] # incorrect
+
+ See Also
+ --------
+ numpy.linspace : Evenly spaced numbers with careful handling of endpoints.
+ numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions.
+ numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
+ :ref:`how-to-partition`
+
+ Examples
+ --------
+ >>> np.arange(3)
+ array([0, 1, 2])
+ >>> np.arange(3.0)
+ array([ 0., 1., 2.])
+ >>> np.arange(3,7)
+ array([3, 4, 5, 6])
+ >>> np.arange(3,7,2)
+ array([3, 5])
+
+ """.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ ))
+
+add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
+ """_get_ndarray_c_version()
+
+ Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number.
+
+ """)
+
+add_newdoc('numpy.core.multiarray', '_reconstruct',
+ """_reconstruct(subtype, shape, dtype)
+
+ Construct an empty array. Used by Pickles.
+
+ """)
+
+
+add_newdoc('numpy.core.multiarray', 'set_string_function',
+ """
+ set_string_function(f, repr=1)
+
+ Internal method to set a function to be used when pretty printing arrays.
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
+ """
+ set_numeric_ops(op1=func1, op2=func2, ...)
+
+ Set numerical operators for array objects.
+
+ .. deprecated:: 1.16
+
+ For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`.
+ For ndarray subclasses, define the ``__array_ufunc__`` method and
+ override the relevant ufunc.
+
+ Parameters
+ ----------
+ op1, op2, ... : callable
+ Each ``op = func`` pair describes an operator to be replaced.
+ For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
+ addition by modulus 5 addition.
+
+ Returns
+ -------
+ saved_ops : list of callables
+ A list of all operators, stored before making replacements.
+
+ Notes
+ -----
+ .. warning::
+ Use with care! Incorrect usage may lead to memory errors.
+
+ A function replacing an operator cannot make use of that operator.
+ For example, when replacing add, you may not use ``+``. Instead,
+ directly call ufuncs.
+
+ Examples
+ --------
+ >>> def add_mod5(x, y):
+ ... return np.add(x, y) % 5
+ ...
+ >>> old_funcs = np.set_numeric_ops(add=add_mod5)
+
+ >>> x = np.arange(12).reshape((3, 4))
+ >>> x + x
+ array([[0, 2, 4, 1],
+ [3, 0, 2, 4],
+ [1, 3, 0, 2]])
+
+ >>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'promote_types',
+ """
+ promote_types(type1, type2)
+
+ Returns the data type with the smallest size and smallest scalar
+ kind to which both ``type1`` and ``type2`` may be safely cast.
+ The returned data type is always considered "canonical", this mainly
+ means that the promoted dtype will always be in native byte order.
+
+ This function is symmetric, but rarely associative.
+
+ Parameters
+ ----------
+ type1 : dtype or dtype specifier
+ First data type.
+ type2 : dtype or dtype specifier
+ Second data type.
+
+ Returns
+ -------
+ out : dtype
+ The promoted data type.
+
+ Notes
+ -----
+ Please see `numpy.result_type` for additional information about promotion.
+
+ .. versionadded:: 1.6.0
+
+ Starting in NumPy 1.9, promote_types function now returns a valid string
+ length when given an integer or float dtype as one argument and a string
+ dtype as another argument. Previously it always returned the input string
+ dtype, even if it wasn't long enough to store the max integer/float value
+ converted to a string.
+
+ .. versionchanged:: 1.23.0
+
+ NumPy now supports promotion for more structured dtypes. It will now
+ remove unnecessary padding from a structure dtype and promote included
+ fields individually.
+
+ See Also
+ --------
+ result_type, dtype, can_cast
+
+ Examples
+ --------
+ >>> np.promote_types('f4', 'f8')
+ dtype('float64')
+
+ >>> np.promote_types('i8', 'f4')
+ dtype('float64')
+
+ >>> np.promote_types('>i8', '<c8')
+ dtype('complex128')
+
+ >>> np.promote_types('i4', 'S8')
+ dtype('S11')
+
+ An example of a non-associative case:
+
+ >>> p = np.promote_types
+ >>> p('S', p('i1', 'u1'))
+ dtype('S6')
+ >>> p(p('S', 'i1'), 'u1')
+ dtype('S4')
+
+ """)
+
+add_newdoc('numpy.core.multiarray', 'c_einsum',
+ """
+ c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
+ casting='safe')
+
+ *This documentation shadows that of the native python implementation of the `einsum` function,
+ except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
+
+ Evaluates the Einstein summation convention on the operands.
+
+ Using the Einstein summation convention, many common multi-dimensional,
+ linear algebraic array operations can be represented in a simple fashion.
+ In *implicit* mode `einsum` computes these values.
+
+ In *explicit* mode, `einsum` provides further flexibility to compute
+ other array operations that might not be considered classical Einstein
+ summation operations, by disabling, or forcing summation over specified
+ subscript labels.
+
+ See the notes and examples for clarification.
+
+ Parameters
+ ----------
+ subscripts : str
+ Specifies the subscripts for summation as comma separated list of
+ subscript labels. An implicit (classical Einstein summation)
+ calculation is performed unless the explicit indicator '->' is
+ included as well as subscript labels of the precise output form.
+ operands : list of array_like
+ These are the arrays for the operation.
+ out : ndarray, optional
+ If provided, the calculation is done into this array.
+ dtype : {data-type, None}, optional
+ If provided, forces the calculation to use the data type specified.
+ Note that you may have to also give a more liberal `casting`
+ parameter to allow the conversions. Default is None.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout of the output. 'C' means it should
+ be C contiguous. 'F' means it should be Fortran contiguous,
+ 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
+ 'K' means it should be as close to the layout of the inputs as
+ is possible, including arbitrarily permuted axes.
+ Default is 'K'.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Setting this to
+ 'unsafe' is not recommended, as it can adversely affect accumulations.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+
+ Default is 'safe'.
+ optimize : {False, True, 'greedy', 'optimal'}, optional
+ Controls if intermediate optimization should occur. No optimization
+ will occur if False and True will default to the 'greedy' algorithm.
+ Also accepts an explicit contraction list from the ``np.einsum_path``
+ function. See ``np.einsum_path`` for more details. Defaults to False.
+
+ Returns
+ -------
+ output : ndarray
+ The calculation based on the Einstein summation convention.
+
+ See Also
+ --------
+ einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ The Einstein summation convention can be used to compute
+ many multi-dimensional, linear algebraic array operations. `einsum`
+ provides a succinct way of representing these.
+
+ A non-exhaustive list of these operations,
+ which can be computed by `einsum`, is shown below along with examples:
+
+ * Trace of an array, :py:func:`numpy.trace`.
+ * Return a diagonal, :py:func:`numpy.diag`.
+ * Array axis summations, :py:func:`numpy.sum`.
+ * Transpositions and permutations, :py:func:`numpy.transpose`.
+ * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
+ * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
+ * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
+ * Tensor contractions, :py:func:`numpy.tensordot`.
+ * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
+
+ The subscripts string is a comma-separated list of subscript labels,
+ where each label refers to a dimension of the corresponding operand.
+ Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
+ is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
+ appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
+ view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
+ describes traditional matrix multiplication and is equivalent to
+ :py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
+ operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
+ to :py:func:`np.trace(a) <numpy.trace>`.
+
+ In *implicit mode*, the chosen subscripts are important
+ since the axes of the output are reordered alphabetically. This
+ means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
+ ``np.einsum('ji', a)`` takes its transpose. Additionally,
+ ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
+ ``np.einsum('ij,jh', a, b)`` returns the transpose of the
+ multiplication since subscript 'h' precedes subscript 'i'.
+
+ In *explicit mode* the output can be directly controlled by
+ specifying output subscript labels. This requires the
+ identifier '->' as well as the list of output subscript labels.
+ This feature increases the flexibility of the function since
+ summing can be disabled or forced when required. The call
+ ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
+ and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
+ The difference is that `einsum` does not allow broadcasting by default.
+ Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
+ order of the output subscript labels and therefore returns matrix
+ multiplication, unlike the example above in implicit mode.
+
+ To enable and control broadcasting, use an ellipsis. Default
+ NumPy-style broadcasting is done by adding an ellipsis
+ to the left of each term, like ``np.einsum('...ii->...i', a)``.
+ To take the trace along the first and last axes,
+ you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
+ product with the left-most indices instead of rightmost, one can do
+ ``np.einsum('ij...,jk...->ik...', a, b)``.
+
+ When there is only one operand, no axes are summed, and no output
+ parameter is provided, a view into the operand is returned instead
+ of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
+ produces a view (changed in version 1.10.0).
+
+ `einsum` also provides an alternative way to provide the subscripts
+ and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
+ If the output shape is not provided in this format `einsum` will be
+ calculated in implicit mode, otherwise it will be performed explicitly.
+ The examples below have corresponding `einsum` calls with the two
+ parameter methods.
+
+ .. versionadded:: 1.10.0
+
+ Views returned from einsum are now writeable whenever the input array
+ is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
+ have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
+ and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
+ of a 2D array.
+
+ Examples
+ --------
+ >>> a = np.arange(25).reshape(5,5)
+ >>> b = np.arange(5)
+ >>> c = np.arange(6).reshape(2,3)
+
+ Trace of a matrix:
+
+ >>> np.einsum('ii', a)
+ 60
+ >>> np.einsum(a, [0,0])
+ 60
+ >>> np.trace(a)
+ 60
+
+ Extract the diagonal (requires explicit form):
+
+ >>> np.einsum('ii->i', a)
+ array([ 0, 6, 12, 18, 24])
+ >>> np.einsum(a, [0,0], [0])
+ array([ 0, 6, 12, 18, 24])
+ >>> np.diag(a)
+ array([ 0, 6, 12, 18, 24])
+
+ Sum over an axis (requires explicit form):
+
+ >>> np.einsum('ij->i', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [0,1], [0])
+ array([ 10, 35, 60, 85, 110])
+ >>> np.sum(a, axis=1)
+ array([ 10, 35, 60, 85, 110])
+
+ For higher dimensional arrays summing a single axis can be done with ellipsis:
+
+ >>> np.einsum('...j->...', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [Ellipsis,1], [Ellipsis])
+ array([ 10, 35, 60, 85, 110])
+
+ Compute a matrix transpose, or reorder any number of axes:
+
+ >>> np.einsum('ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.einsum('ij->ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.einsum(c, [1,0])
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.transpose(c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+
+ Vector inner products:
+
+ >>> np.einsum('i,i', b, b)
+ 30
+ >>> np.einsum(b, [0], b, [0])
+ 30
+ >>> np.inner(b,b)
+ 30
+
+ Matrix vector multiplication:
+
+ >>> np.einsum('ij,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum(a, [0,1], b, [1])
+ array([ 30, 80, 130, 180, 230])
+ >>> np.dot(a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum('...j,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+
+ Broadcasting and scalar multiplication:
+
+ >>> np.einsum('..., ...', 3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.einsum(',ij', 3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.einsum(3, [Ellipsis], c, [Ellipsis])
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.multiply(3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+
+ Vector outer product:
+
+ >>> np.einsum('i,j', np.arange(2)+1, b)
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+ >>> np.einsum(np.arange(2)+1, [0], b, [1])
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+ >>> np.outer(np.arange(2)+1, b)
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+
+ Tensor contraction:
+
+ >>> a = np.arange(60.).reshape(3,4,5)
+ >>> b = np.arange(24.).reshape(4,3,2)
+ >>> np.einsum('ijk,jil->kl', a, b)
+ array([[ 4400., 4730.],
+ [ 4532., 4874.],
+ [ 4664., 5018.],
+ [ 4796., 5162.],
+ [ 4928., 5306.]])
+ >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
+ array([[ 4400., 4730.],
+ [ 4532., 4874.],
+ [ 4664., 5018.],
+ [ 4796., 5162.],
+ [ 4928., 5306.]])
+ >>> np.tensordot(a,b, axes=([1,0],[0,1]))
+ array([[ 4400., 4730.],
+ [ 4532., 4874.],
+ [ 4664., 5018.],
+ [ 4796., 5162.],
+ [ 4928., 5306.]])
+
+ Writeable returned arrays (since version 1.10.0):
+
+ >>> a = np.zeros((3, 3))
+ >>> np.einsum('ii->i', a)[:] = 1
+ >>> a
+ array([[ 1., 0., 0.],
+ [ 0., 1., 0.],
+ [ 0., 0., 1.]])
+
+ Example of ellipsis use:
+
+ >>> a = np.arange(6).reshape((3,2))
+ >>> b = np.arange(12).reshape((4,3))
+ >>> np.einsum('ki,jk->ij', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+ >>> np.einsum('ki,...k->i...', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+ >>> np.einsum('k...,jk', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+
+ """)
+
+
+##############################################################################
+#
+# Documentation for ndarray attributes and methods
+#
+##############################################################################
+
+
+##############################################################################
+#
+# ndarray object
+#
+##############################################################################
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray',
+ """
+ ndarray(shape, dtype=float, buffer=None, offset=0,
+ strides=None, order=None)
+
+ An array object represents a multidimensional, homogeneous array
+ of fixed-size items. An associated data-type object describes the
+ format of each element in the array (its byte-order, how many bytes it
+ occupies in memory, whether it is an integer, a floating point number,
+ or something else, etc.)
+
+ Arrays should be constructed using `array`, `zeros` or `empty` (refer
+ to the See Also section below). The parameters given here refer to
+ a low-level method (`ndarray(...)`) for instantiating an array.
+
+ For more information, refer to the `numpy` module and examine the
+ methods and attributes of an array.
+
+ Parameters
+ ----------
+ (for the __new__ method; see Notes below)
+
+ shape : tuple of ints
+ Shape of created array.
+ dtype : data-type, optional
+ Any object that can be interpreted as a numpy data type.
+ buffer : object exposing buffer interface, optional
+ Used to fill the array with data.
+ offset : int, optional
+ Offset of array data in buffer.
+ strides : tuple of ints, optional
+ Strides of data in memory.
+ order : {'C', 'F'}, optional
+ Row-major (C-style) or column-major (Fortran-style) order.
+
+ Attributes
+ ----------
+ T : ndarray
+ Transpose of the array.
+ data : buffer
+ The array's elements, in memory.
+ dtype : dtype object
+ Describes the format of the elements in the array.
+ flags : dict
+ Dictionary containing information related to memory use, e.g.,
+ 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
+ flat : numpy.flatiter object
+ Flattened version of the array as an iterator. The iterator
+ allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
+ assignment examples; TODO).
+ imag : ndarray
+ Imaginary part of the array.
+ real : ndarray
+ Real part of the array.
+ size : int
+ Number of elements in the array.
+ itemsize : int
+ The memory use of each array element in bytes.
+ nbytes : int
+ The total number of bytes required to store the array data,
+ i.e., ``itemsize * size``.
+ ndim : int
+ The array's number of dimensions.
+ shape : tuple of ints
+ Shape of the array.
+ strides : tuple of ints
+ The step-size required to move from one element to the next in
+ memory. For example, a contiguous ``(3, 4)`` array of type
+ ``int16`` in C-order has strides ``(8, 2)``. This implies that
+ to move from element to element in memory requires jumps of 2 bytes.
+ To move from row-to-row, one needs to jump 8 bytes at a time
+ (``2 * 4``).
+ ctypes : ctypes object
+ Class containing properties of the array needed for interaction
+ with ctypes.
+ base : ndarray
+ If the array is a view into another array, that array is its `base`
+ (unless that array is also a view). The `base` array is where the
+ array data is actually stored.
+
+ See Also
+ --------
+ array : Construct an array.
+ zeros : Create an array, each element of which is zero.
+ empty : Create an array, but leave its allocated memory unchanged (i.e.,
+ it contains "garbage").
+ dtype : Create a data-type.
+ numpy.typing.NDArray : An ndarray alias :term:`generic <generic type>`
+ w.r.t. its `dtype.type <numpy.dtype.type>`.
+
+ Notes
+ -----
+ There are two modes of creating an array using ``__new__``:
+
+ 1. If `buffer` is None, then only `shape`, `dtype`, and `order`
+ are used.
+ 2. If `buffer` is an object exposing the buffer interface, then
+ all keywords are interpreted.
+
+ No ``__init__`` method is needed because the array is fully initialized
+ after the ``__new__`` method.
+
+ Examples
+ --------
+ These examples illustrate the low-level `ndarray` constructor. Refer
+ to the `See Also` section above for easier ways of constructing an
+ ndarray.
+
+ First mode, `buffer` is None:
+
+ >>> np.ndarray(shape=(2,2), dtype=float, order='F')
+ array([[0.0e+000, 0.0e+000], # random
+ [ nan, 2.5e-323]])
+
+ Second mode:
+
+ >>> np.ndarray((2,), buffer=np.array([1,2,3]),
+ ... offset=np.int_().itemsize,
+ ... dtype=int) # offset = 1*itemsize, i.e. skip first element
+ array([2, 3])
+
+ """)
+
+
+##############################################################################
+#
+# ndarray attributes
+#
+##############################################################################
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
+ """Array protocol: Python side."""))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
+ """Array priority."""))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
+ """Array protocol: C-struct side."""))
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__dlpack__',
+ """a.__dlpack__(*, stream=None)
+
+ DLPack Protocol: Part of the Array API."""))
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__dlpack_device__',
+ """a.__dlpack_device__()
+
+ DLPack Protocol: Part of the Array API."""))
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
+ """
+ Base object if memory is from some other object.
+
+ Examples
+ --------
+ The base of an array that owns its memory is None:
+
+ >>> x = np.array([1,2,3,4])
+ >>> x.base is None
+ True
+
+ Slicing creates a view, whose memory is shared with x:
+
+ >>> y = x[2:]
+ >>> y.base is x
+ True
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
+ """
+ An object to simplify the interaction of the array with the ctypes
+ module.
+
+ This attribute creates an object that makes it easier to use arrays
+ when calling shared libraries with the ctypes module. The returned
+ object has, among others, data, shape, and strides attributes (see
+ Notes below) which themselves return ctypes objects that can be used
+ as arguments to a shared library.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ c : Python object
+ Possessing attributes data, shape, strides, etc.
+
+ See Also
+ --------
+ numpy.ctypeslib
+
+ Notes
+ -----
+ Below are the public attributes of this object which were documented
+ in "Guide to NumPy" (we have omitted undocumented public attributes,
+ as well as documented private attributes):
+
+ .. autoattribute:: numpy.core._internal._ctypes.data
+ :noindex:
+
+ .. autoattribute:: numpy.core._internal._ctypes.shape
+ :noindex:
+
+ .. autoattribute:: numpy.core._internal._ctypes.strides
+ :noindex:
+
+ .. automethod:: numpy.core._internal._ctypes.data_as
+ :noindex:
+
+ .. automethod:: numpy.core._internal._ctypes.shape_as
+ :noindex:
+
+ .. automethod:: numpy.core._internal._ctypes.strides_as
+ :noindex:
+
+ If the ctypes module is not available, then the ctypes attribute
+ of array objects still returns something useful, but ctypes objects
+ are not returned and errors may be raised instead. In particular,
+ the object will still have the ``as_parameter`` attribute which will
+ return an integer equal to the data attribute.
+
+ Examples
+ --------
+ >>> import ctypes
+ >>> x = np.array([[0, 1], [2, 3]], dtype=np.int32)
+ >>> x
+ array([[0, 1],
+ [2, 3]], dtype=int32)
+ >>> x.ctypes.data
+ 31962608 # may vary
+ >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32))
+ <__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary
+ >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents
+ c_uint(0)
+ >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents
+ c_ulong(4294967296)
+ >>> x.ctypes.shape
+ <numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1fce60> # may vary
+ >>> x.ctypes.strides
+ <numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1ff320> # may vary
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
+ """Python buffer object pointing to the start of the array's data."""))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
+ """
+ Data-type of the array's elements.
+
+ .. warning::
+
+ Setting ``arr.dtype`` is discouraged and may be deprecated in the
+ future. Setting will replace the ``dtype`` without modifying the
+ memory (see also `ndarray.view` and `ndarray.astype`).
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ d : numpy dtype object
+
+ See Also
+ --------
+ ndarray.astype : Cast the values contained in the array to a new data-type.
+ ndarray.view : Create a view of the same data but a different data-type.
+ numpy.dtype
+
+ Examples
+ --------
+ >>> x
+ array([[0, 1],
+ [2, 3]])
+ >>> x.dtype
+ dtype('int32')
+ >>> type(x.dtype)
+ <type 'numpy.dtype'>
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
+ """
+ The imaginary part of the array.
+
+ Examples
+ --------
+ >>> x = np.sqrt([1+0j, 0+1j])
+ >>> x.imag
+ array([ 0. , 0.70710678])
+ >>> x.imag.dtype
+ dtype('float64')
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
+ """
+ Length of one array element in bytes.
+
+ Examples
+ --------
+ >>> x = np.array([1,2,3], dtype=np.float64)
+ >>> x.itemsize
+ 8
+ >>> x = np.array([1,2,3], dtype=np.complex128)
+ >>> x.itemsize
+ 16
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
+ """
+ Information about the memory layout of the array.
+
+ Attributes
+ ----------
+ C_CONTIGUOUS (C)
+ The data is in a single, C-style contiguous segment.
+ F_CONTIGUOUS (F)
+ The data is in a single, Fortran-style contiguous segment.
+ OWNDATA (O)
+ The array owns the memory it uses or borrows it from another object.
+ WRITEABLE (W)
+ The data area can be written to. Setting this to False locks
+ the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
+ from its base array at creation time, but a view of a writeable
+ array may be subsequently locked while the base array remains writeable.
+ (The opposite is not true, in that a view of a locked array may not
+ be made writeable. However, currently, locking a base object does not
+ lock any views that already reference it, so under that circumstance it
+ is possible to alter the contents of a locked array via a previously
+ created writeable view onto it.) Attempting to change a non-writeable
+ array raises a RuntimeError exception.
+ ALIGNED (A)
+ The data and all elements are aligned appropriately for the hardware.
+ WRITEBACKIFCOPY (X)
+ This array is a copy of some other array. The C-API function
+ PyArray_ResolveWritebackIfCopy must be called before deallocating
+ to the base array will be updated with the contents of this array.
+ FNC
+ F_CONTIGUOUS and not C_CONTIGUOUS.
+ FORC
+ F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
+ BEHAVED (B)
+ ALIGNED and WRITEABLE.
+ CARRAY (CA)
+ BEHAVED and C_CONTIGUOUS.
+ FARRAY (FA)
+ BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
+
+ Notes
+ -----
+ The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
+ or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
+ names are only supported in dictionary access.
+
+ Only the WRITEBACKIFCOPY, WRITEABLE, and ALIGNED flags can be
+ changed by the user, via direct assignment to the attribute or dictionary
+ entry, or by calling `ndarray.setflags`.
+
+ The array flags cannot be set arbitrarily:
+
+ - WRITEBACKIFCOPY can only be set ``False``.
+ - ALIGNED can only be set ``True`` if the data is truly aligned.
+ - WRITEABLE can only be set ``True`` if the array owns its own memory
+ or the ultimate owner of the memory exposes a writeable buffer
+ interface or is a string.
+
+ Arrays can be both C-style and Fortran-style contiguous simultaneously.
+ This is clear for 1-dimensional arrays, but can also be true for higher
+ dimensional arrays.
+
+ Even for contiguous arrays a stride for a given dimension
+ ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
+ or the array has no elements.
+ It does *not* generally hold that ``self.strides[-1] == self.itemsize``
+ for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
+ Fortran-style contiguous arrays is true.
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
+ """
+ A 1-D iterator over the array.
+
+ This is a `numpy.flatiter` instance, which acts similarly to, but is not
+ a subclass of, Python's built-in iterator object.
+
+ See Also
+ --------
+ flatten : Return a copy of the array collapsed into one dimension.
+
+ flatiter
+
+ Examples
+ --------
+ >>> x = np.arange(1, 7).reshape(2, 3)
+ >>> x
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> x.flat[3]
+ 4
+ >>> x.T
+ array([[1, 4],
+ [2, 5],
+ [3, 6]])
+ >>> x.T.flat[3]
+ 5
+ >>> type(x.flat)
+ <class 'numpy.flatiter'>
+
+ An assignment example:
+
+ >>> x.flat = 3; x
+ array([[3, 3, 3],
+ [3, 3, 3]])
+ >>> x.flat[[1,4]] = 1; x
+ array([[3, 1, 3],
+ [3, 1, 3]])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
+ """
+ Total bytes consumed by the elements of the array.
+
+ Notes
+ -----
+ Does not include memory consumed by non-element attributes of the
+ array object.
+
+ Examples
+ --------
+ >>> x = np.zeros((3,5,2), dtype=np.complex128)
+ >>> x.nbytes
+ 480
+ >>> np.prod(x.shape) * x.itemsize
+ 480
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
+ """
+ Number of array dimensions.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> x.ndim
+ 1
+ >>> y = np.zeros((2, 3, 4))
+ >>> y.ndim
+ 3
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
+ """
+ The real part of the array.
+
+ Examples
+ --------
+ >>> x = np.sqrt([1+0j, 0+1j])
+ >>> x.real
+ array([ 1. , 0.70710678])
+ >>> x.real.dtype
+ dtype('float64')
+
+ See Also
+ --------
+ numpy.real : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
+ """
+ Tuple of array dimensions.
+
+ The shape property is usually used to get the current shape of an array,
+ but may also be used to reshape the array in-place by assigning a tuple of
+ array dimensions to it. As with `numpy.reshape`, one of the new shape
+ dimensions can be -1, in which case its value is inferred from the size of
+ the array and the remaining dimensions. Reshaping an array in-place will
+ fail if a copy is required.
+
+ .. warning::
+
+ Setting ``arr.shape`` is discouraged and may be deprecated in the
+ future. Using `ndarray.reshape` is the preferred approach.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3, 4])
+ >>> x.shape
+ (4,)
+ >>> y = np.zeros((2, 3, 4))
+ >>> y.shape
+ (2, 3, 4)
+ >>> y.shape = (3, 8)
+ >>> y
+ array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0., 0., 0., 0.]])
+ >>> y.shape = (3, 6)
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ValueError: total size of new array must be unchanged
+ >>> np.zeros((4,2))[::2].shape = (-1,)
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ AttributeError: Incompatible shape for in-place modification. Use
+ `.reshape()` to make a copy with the desired shape.
+
+ See Also
+ --------
+ numpy.shape : Equivalent getter function.
+ numpy.reshape : Function similar to setting ``shape``.
+ ndarray.reshape : Method similar to setting ``shape``.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
+ """
+ Number of elements in the array.
+
+ Equal to ``np.prod(a.shape)``, i.e., the product of the array's
+ dimensions.
+
+ Notes
+ -----
+ `a.size` returns a standard arbitrary precision Python integer. This
+ may not be the case with other methods of obtaining the same value
+ (like the suggested ``np.prod(a.shape)``, which returns an instance
+ of ``np.int_``), and may be relevant if the value is used further in
+ calculations that may overflow a fixed size integer type.
+
+ Examples
+ --------
+ >>> x = np.zeros((3, 5, 2), dtype=np.complex128)
+ >>> x.size
+ 30
+ >>> np.prod(x.shape)
+ 30
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
+ """
+ Tuple of bytes to step in each dimension when traversing an array.
+
+ The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
+ is::
+
+ offset = sum(np.array(i) * a.strides)
+
+ A more detailed explanation of strides can be found in the
+ "ndarray.rst" file in the NumPy reference guide.
+
+ .. warning::
+
+ Setting ``arr.strides`` is discouraged and may be deprecated in the
+ future. `numpy.lib.stride_tricks.as_strided` should be preferred
+ to create a new view of the same data in a safer way.
+
+ Notes
+ -----
+ Imagine an array of 32-bit integers (each 4 bytes)::
+
+ x = np.array([[0, 1, 2, 3, 4],
+ [5, 6, 7, 8, 9]], dtype=np.int32)
+
+ This array is stored in memory as 40 bytes, one after the other
+ (known as a contiguous block of memory). The strides of an array tell
+ us how many bytes we have to skip in memory to move to the next position
+ along a certain axis. For example, we have to skip 4 bytes (1 value) to
+ move to the next column, but 20 bytes (5 values) to get to the same
+ position in the next row. As such, the strides for the array `x` will be
+ ``(20, 4)``.
+
+ See Also
+ --------
+ numpy.lib.stride_tricks.as_strided
+
+ Examples
+ --------
+ >>> y = np.reshape(np.arange(2*3*4), (2,3,4))
+ >>> y
+ array([[[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]],
+ [[12, 13, 14, 15],
+ [16, 17, 18, 19],
+ [20, 21, 22, 23]]])
+ >>> y.strides
+ (48, 16, 4)
+ >>> y[1,1,1]
+ 17
+ >>> offset=sum(y.strides * np.array((1,1,1)))
+ >>> offset/y.itemsize
+ 17
+
+ >>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
+ >>> x.strides
+ (32, 4, 224, 1344)
+ >>> i = np.array([3,5,2,2])
+ >>> offset = sum(i * x.strides)
+ >>> x[3,5,2,2]
+ 813
+ >>> offset / x.itemsize
+ 813
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
+ """
+ View of the transposed array.
+
+ Same as ``self.transpose()``.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> a
+ array([[1, 2],
+ [3, 4]])
+ >>> a.T
+ array([[1, 3],
+ [2, 4]])
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> a
+ array([1, 2, 3, 4])
+ >>> a.T
+ array([1, 2, 3, 4])
+
+ See Also
+ --------
+ transpose
+
+ """))
+
+
+##############################################################################
+#
+# ndarray methods
+#
+##############################################################################
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
+ """ a.__array__([dtype], /) -> reference if type unchanged, copy otherwise.
+
+ Returns either a new reference to self if dtype is not given or a new array
+ of provided data type if dtype is different from the current dtype of the
+ array.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
+ """a.__array_finalize__(obj, /)
+
+ Present so subclasses can call super. Does nothing.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
+ """a.__array_prepare__(array[, context], /)
+
+ Returns a view of `array` with the same type as self.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
+ """a.__array_wrap__(array[, context], /)
+
+ Returns a view of `array` with the same type as self.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
+ """a.__copy__()
+
+ Used if :func:`copy.copy` is called on an array. Returns a copy of the array.
+
+ Equivalent to ``a.copy(order='K')``.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__class_getitem__',
+ """a.__class_getitem__(item, /)
+
+ Return a parametrized wrapper around the `~numpy.ndarray` type.
+
+ .. versionadded:: 1.22
+
+ Returns
+ -------
+ alias : types.GenericAlias
+ A parametrized `~numpy.ndarray` type.
+
+ Examples
+ --------
+ >>> from typing import Any
+ >>> import numpy as np
+
+ >>> np.ndarray[Any, np.dtype[Any]]
+ numpy.ndarray[typing.Any, numpy.dtype[typing.Any]]
+
+ Notes
+ -----
+ This method is only available for python 3.9 and later.
+
+ See Also
+ --------
+ :pep:`585` : Type hinting generics in standard collections.
+ numpy.typing.NDArray : An ndarray alias :term:`generic <generic type>`
+ w.r.t. its `dtype.type <numpy.dtype.type>`.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
+ """a.__deepcopy__(memo, /) -> Deep copy of array.
+
+ Used if :func:`copy.deepcopy` is called on an array.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
+ """a.__reduce__()
+
+ For pickling.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
+ """a.__setstate__(state, /)
+
+ For unpickling.
+
+ The `state` argument must be a sequence that contains the following
+ elements:
+
+ Parameters
+ ----------
+ version : int
+ optional pickle version. If omitted defaults to 0.
+ shape : tuple
+ dtype : data-type
+ isFortran : bool
+ rawdata : string or list
+ a binary string with the data (or a list if 'a' is an object array)
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
+ """
+ a.all(axis=None, out=None, keepdims=False, *, where=True)
+
+ Returns True if all elements evaluate to True.
+
+ Refer to `numpy.all` for full documentation.
+
+ See Also
+ --------
+ numpy.all : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
+ """
+ a.any(axis=None, out=None, keepdims=False, *, where=True)
+
+ Returns True if any of the elements of `a` evaluate to True.
+
+ Refer to `numpy.any` for full documentation.
+
+ See Also
+ --------
+ numpy.any : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
+ """
+ a.argmax(axis=None, out=None, *, keepdims=False)
+
+ Return indices of the maximum values along the given axis.
+
+ Refer to `numpy.argmax` for full documentation.
+
+ See Also
+ --------
+ numpy.argmax : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
+ """
+ a.argmin(axis=None, out=None, *, keepdims=False)
+
+ Return indices of the minimum values along the given axis.
+
+ Refer to `numpy.argmin` for detailed documentation.
+
+ See Also
+ --------
+ numpy.argmin : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
+ """
+ a.argsort(axis=-1, kind=None, order=None)
+
+ Returns the indices that would sort this array.
+
+ Refer to `numpy.argsort` for full documentation.
+
+ See Also
+ --------
+ numpy.argsort : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
+ """
+ a.argpartition(kth, axis=-1, kind='introselect', order=None)
+
+ Returns the indices that would partition this array.
+
+ Refer to `numpy.argpartition` for full documentation.
+
+ .. versionadded:: 1.8.0
+
+ See Also
+ --------
+ numpy.argpartition : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
+ """
+ a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
+
+ Copy of the array, cast to a specified type.
+
+ Parameters
+ ----------
+ dtype : str or dtype
+ Typecode or data-type to which the array is cast.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout order of the result.
+ 'C' means C order, 'F' means Fortran order, 'A'
+ means 'F' order if all the arrays are Fortran contiguous,
+ 'C' order otherwise, and 'K' means as close to the
+ order the array elements appear in memory as possible.
+ Default is 'K'.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Defaults to 'unsafe'
+ for backwards compatibility.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+ subok : bool, optional
+ If True, then sub-classes will be passed-through (default), otherwise
+ the returned array will be forced to be a base-class array.
+ copy : bool, optional
+ By default, astype always returns a newly allocated array. If this
+ is set to false, and the `dtype`, `order`, and `subok`
+ requirements are satisfied, the input array is returned instead
+ of a copy.
+
+ Returns
+ -------
+ arr_t : ndarray
+ Unless `copy` is False and the other conditions for returning the input
+ array are satisfied (see description for `copy` input parameter), `arr_t`
+ is a new array of the same shape as the input array, with dtype, order
+ given by `dtype`, `order`.
+
+ Notes
+ -----
+ .. versionchanged:: 1.17.0
+ Casting between a simple data type and a structured one is possible only
+ for "unsafe" casting. Casting to multiple fields is allowed, but
+ casting from multiple fields is not.
+
+ .. versionchanged:: 1.9.0
+ Casting from numeric to string types in 'safe' casting mode requires
+ that the string dtype length is long enough to store the max
+ integer/float value converted.
+
+ Raises
+ ------
+ ComplexWarning
+ When casting from complex to float or int. To avoid this,
+ one should use ``a.real.astype(t)``.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 2.5])
+ >>> x
+ array([1. , 2. , 2.5])
+
+ >>> x.astype(int)
+ array([1, 2, 2])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
+ """
+ a.byteswap(inplace=False)
+
+ Swap the bytes of the array elements
+
+ Toggle between low-endian and big-endian data representation by
+ returning a byteswapped array, optionally swapped in-place.
+ Arrays of byte-strings are not swapped. The real and imaginary
+ parts of a complex number are swapped individually.
+
+ Parameters
+ ----------
+ inplace : bool, optional
+ If ``True``, swap bytes in-place, default is ``False``.
+
+ Returns
+ -------
+ out : ndarray
+ The byteswapped array. If `inplace` is ``True``, this is
+ a view to self.
+
+ Examples
+ --------
+ >>> A = np.array([1, 256, 8755], dtype=np.int16)
+ >>> list(map(hex, A))
+ ['0x1', '0x100', '0x2233']
+ >>> A.byteswap(inplace=True)
+ array([ 256, 1, 13090], dtype=int16)
+ >>> list(map(hex, A))
+ ['0x100', '0x1', '0x3322']
+
+ Arrays of byte-strings are not swapped
+
+ >>> A = np.array([b'ceg', b'fac'])
+ >>> A.byteswap()
+ array([b'ceg', b'fac'], dtype='|S3')
+
+ ``A.newbyteorder().byteswap()`` produces an array with the same values
+ but different representation in memory
+
+ >>> A = np.array([1, 2, 3])
+ >>> A.view(np.uint8)
+ array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
+ 0, 0], dtype=uint8)
+ >>> A.newbyteorder().byteswap(inplace=True)
+ array([1, 2, 3])
+ >>> A.view(np.uint8)
+ array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0,
+ 0, 3], dtype=uint8)
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
+ """
+ a.choose(choices, out=None, mode='raise')
+
+ Use an index array to construct a new array from a set of choices.
+
+ Refer to `numpy.choose` for full documentation.
+
+ See Also
+ --------
+ numpy.choose : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
+ """
+ a.clip(min=None, max=None, out=None, **kwargs)
+
+ Return an array whose values are limited to ``[min, max]``.
+ One of max or min must be given.
+
+ Refer to `numpy.clip` for full documentation.
+
+ See Also
+ --------
+ numpy.clip : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
+ """
+ a.compress(condition, axis=None, out=None)
+
+ Return selected slices of this array along given axis.
+
+ Refer to `numpy.compress` for full documentation.
+
+ See Also
+ --------
+ numpy.compress : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
+ """
+ a.conj()
+
+ Complex-conjugate all elements.
+
+ Refer to `numpy.conjugate` for full documentation.
+
+ See Also
+ --------
+ numpy.conjugate : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
+ """
+ a.conjugate()
+
+ Return the complex conjugate, element-wise.
+
+ Refer to `numpy.conjugate` for full documentation.
+
+ See Also
+ --------
+ numpy.conjugate : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
+ """
+ a.copy(order='C')
+
+ Return a copy of the array.
+
+ Parameters
+ ----------
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout of the copy. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+ 'C' otherwise. 'K' means match the layout of `a` as closely
+ as possible. (Note that this function and :func:`numpy.copy` are very
+ similar but have different default values for their order=
+ arguments, and this function always passes sub-classes through.)
+
+ See also
+ --------
+ numpy.copy : Similar function with different default behavior
+ numpy.copyto
+
+ Notes
+ -----
+ This function is the preferred method for creating an array copy. The
+ function :func:`numpy.copy` is similar, but it defaults to using order 'K',
+ and will not pass sub-classes through by default.
+
+ Examples
+ --------
+ >>> x = np.array([[1,2,3],[4,5,6]], order='F')
+
+ >>> y = x.copy()
+
+ >>> x.fill(0)
+
+ >>> x
+ array([[0, 0, 0],
+ [0, 0, 0]])
+
+ >>> y
+ array([[1, 2, 3],
+ [4, 5, 6]])
+
+ >>> y.flags['C_CONTIGUOUS']
+ True
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
+ """
+ a.cumprod(axis=None, dtype=None, out=None)
+
+ Return the cumulative product of the elements along the given axis.
+
+ Refer to `numpy.cumprod` for full documentation.
+
+ See Also
+ --------
+ numpy.cumprod : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
+ """
+ a.cumsum(axis=None, dtype=None, out=None)
+
+ Return the cumulative sum of the elements along the given axis.
+
+ Refer to `numpy.cumsum` for full documentation.
+
+ See Also
+ --------
+ numpy.cumsum : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
+ """
+ a.diagonal(offset=0, axis1=0, axis2=1)
+
+ Return specified diagonals. In NumPy 1.9 the returned array is a
+ read-only view instead of a copy as in previous NumPy versions. In
+ a future version the read-only restriction will be removed.
+
+ Refer to :func:`numpy.diagonal` for full documentation.
+
+ See Also
+ --------
+ numpy.diagonal : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('dot'))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
+ """a.dump(file)
+
+ Dump a pickle of the array to the specified file.
+ The array can be read back with pickle.load or numpy.load.
+
+ Parameters
+ ----------
+ file : str or Path
+ A string naming the dump file.
+
+ .. versionchanged:: 1.17.0
+ `pathlib.Path` objects are now accepted.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
+ """
+ a.dumps()
+
+ Returns the pickle of the array as a string.
+ pickle.loads will convert the string back to an array.
+
+ Parameters
+ ----------
+ None
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
+ """
+ a.fill(value)
+
+ Fill the array with a scalar value.
+
+ Parameters
+ ----------
+ value : scalar
+ All elements of `a` will be assigned this value.
+
+ Examples
+ --------
+ >>> a = np.array([1, 2])
+ >>> a.fill(0)
+ >>> a
+ array([0, 0])
+ >>> a = np.empty(2)
+ >>> a.fill(1)
+ >>> a
+ array([1., 1.])
+
+ Fill expects a scalar value and always behaves the same as assigning
+ to a single array element. The following is a rare example where this
+ distinction is important:
+
+ >>> a = np.array([None, None], dtype=object)
+ >>> a[0] = np.array(3)
+ >>> a
+ array([array(3), None], dtype=object)
+ >>> a.fill(np.array(3))
+ >>> a
+ array([array(3), array(3)], dtype=object)
+
+ Where other forms of assignments will unpack the array being assigned:
+
+ >>> a[...] = np.array(3)
+ >>> a
+ array([3, 3], dtype=object)
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
+ """
+ a.flatten(order='C')
+
+ Return a copy of the array collapsed into one dimension.
+
+ Parameters
+ ----------
+ order : {'C', 'F', 'A', 'K'}, optional
+ 'C' means to flatten in row-major (C-style) order.
+ 'F' means to flatten in column-major (Fortran-
+ style) order. 'A' means to flatten in column-major
+ order if `a` is Fortran *contiguous* in memory,
+ row-major order otherwise. 'K' means to flatten
+ `a` in the order the elements occur in memory.
+ The default is 'C'.
+
+ Returns
+ -------
+ y : ndarray
+ A copy of the input array, flattened to one dimension.
+
+ See Also
+ --------
+ ravel : Return a flattened array.
+ flat : A 1-D flat iterator over the array.
+
+ Examples
+ --------
+ >>> a = np.array([[1,2], [3,4]])
+ >>> a.flatten()
+ array([1, 2, 3, 4])
+ >>> a.flatten('F')
+ array([1, 3, 2, 4])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
+ """
+ a.getfield(dtype, offset=0)
+
+ Returns a field of the given array as a certain type.
+
+ A field is a view of the array data with a given data-type. The values in
+ the view are determined by the given type and the offset into the current
+ array in bytes. The offset needs to be such that the view dtype fits in the
+ array dtype; for example an array of dtype complex128 has 16-byte elements.
+ If taking a view with a 32-bit integer (4 bytes), the offset needs to be
+ between 0 and 12 bytes.
+
+ Parameters
+ ----------
+ dtype : str or dtype
+ The data type of the view. The dtype size of the view can not be larger
+ than that of the array itself.
+ offset : int
+ Number of bytes to skip before beginning the element view.
+
+ Examples
+ --------
+ >>> x = np.diag([1.+1.j]*2)
+ >>> x[1, 1] = 2 + 4.j
+ >>> x
+ array([[1.+1.j, 0.+0.j],
+ [0.+0.j, 2.+4.j]])
+ >>> x.getfield(np.float64)
+ array([[1., 0.],
+ [0., 2.]])
+
+ By choosing an offset of 8 bytes we can select the complex part of the
+ array for our view:
+
+ >>> x.getfield(np.float64, offset=8)
+ array([[1., 0.],
+ [0., 4.]])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
+ """
+ a.item(*args)
+
+ Copy an element of an array to a standard Python scalar and return it.
+
+ Parameters
+ ----------
+ \\*args : Arguments (variable number and type)
+
+ * none: in this case, the method only works for arrays
+ with one element (`a.size == 1`), which element is
+ copied into a standard Python scalar object and returned.
+
+ * int_type: this argument is interpreted as a flat index into
+ the array, specifying which element to copy and return.
+
+ * tuple of int_types: functions as does a single int_type argument,
+ except that the argument is interpreted as an nd-index into the
+ array.
+
+ Returns
+ -------
+ z : Standard Python scalar object
+ A copy of the specified element of the array as a suitable
+ Python scalar
+
+ Notes
+ -----
+ When the data type of `a` is longdouble or clongdouble, item() returns
+ a scalar array object because there is no available Python scalar that
+ would not lose information. Void arrays return a buffer object for item(),
+ unless fields are defined, in which case a tuple is returned.
+
+ `item` is very similar to a[args], except, instead of an array scalar,
+ a standard Python scalar is returned. This can be useful for speeding up
+ access to elements of the array and doing arithmetic on elements of the
+ array using Python's optimized math.
+
+ Examples
+ --------
+ >>> np.random.seed(123)
+ >>> x = np.random.randint(9, size=(3, 3))
+ >>> x
+ array([[2, 2, 6],
+ [1, 3, 6],
+ [1, 0, 1]])
+ >>> x.item(3)
+ 1
+ >>> x.item(7)
+ 0
+ >>> x.item((0, 1))
+ 2
+ >>> x.item((2, 2))
+ 1
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
+ """
+ a.itemset(*args)
+
+ Insert scalar into an array (scalar is cast to array's dtype, if possible)
+
+ There must be at least 1 argument, and define the last argument
+ as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
+ than ``a[args] = item``. The item should be a scalar value and `args`
+ must select a single item in the array `a`.
+
+ Parameters
+ ----------
+ \\*args : Arguments
+ If one argument: a scalar, only used in case `a` is of size 1.
+ If two arguments: the last argument is the value to be set
+ and must be a scalar, the first argument specifies a single array
+ element location. It is either an int or a tuple.
+
+ Notes
+ -----
+ Compared to indexing syntax, `itemset` provides some speed increase
+ for placing a scalar into a particular location in an `ndarray`,
+ if you must do this. However, generally this is discouraged:
+ among other problems, it complicates the appearance of the code.
+ Also, when using `itemset` (and `item`) inside a loop, be sure
+ to assign the methods to a local variable to avoid the attribute
+ look-up at each loop iteration.
+
+ Examples
+ --------
+ >>> np.random.seed(123)
+ >>> x = np.random.randint(9, size=(3, 3))
+ >>> x
+ array([[2, 2, 6],
+ [1, 3, 6],
+ [1, 0, 1]])
+ >>> x.itemset(4, 0)
+ >>> x.itemset((2, 2), 9)
+ >>> x
+ array([[2, 2, 6],
+ [1, 0, 6],
+ [1, 0, 9]])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
+ """
+ a.max(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
+
+ Return the maximum along a given axis.
+
+ Refer to `numpy.amax` for full documentation.
+
+ See Also
+ --------
+ numpy.amax : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
+ """
+ a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True)
+
+ Returns the average of the array elements along given axis.
+
+ Refer to `numpy.mean` for full documentation.
+
+ See Also
+ --------
+ numpy.mean : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
+ """
+ a.min(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
+
+ Return the minimum along a given axis.
+
+ Refer to `numpy.amin` for full documentation.
+
+ See Also
+ --------
+ numpy.amin : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
+ """
+ arr.newbyteorder(new_order='S', /)
+
+ Return the array with the same data viewed with a different byte order.
+
+ Equivalent to::
+
+ arr.view(arr.dtype.newbytorder(new_order))
+
+ Changes are also made in all fields and sub-arrays of the array data
+ type.
+
+
+
+ Parameters
+ ----------
+ new_order : string, optional
+ Byte order to force; a value from the byte order specifications
+ below. `new_order` codes can be any of:
+
+ * 'S' - swap dtype from current to opposite endian
+ * {'<', 'little'} - little endian
+ * {'>', 'big'} - big endian
+ * {'=', 'native'} - native order, equivalent to `sys.byteorder`
+ * {'|', 'I'} - ignore (no change to byte order)
+
+ The default value ('S') results in swapping the current
+ byte order.
+
+
+ Returns
+ -------
+ new_arr : array
+ New array object with the dtype reflecting given change to the
+ byte order.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
+ """
+ a.nonzero()
+
+ Return the indices of the elements that are non-zero.
+
+ Refer to `numpy.nonzero` for full documentation.
+
+ See Also
+ --------
+ numpy.nonzero : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
+ """
+ a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True)
+
+ Return the product of the array elements over the given axis
+
+ Refer to `numpy.prod` for full documentation.
+
+ See Also
+ --------
+ numpy.prod : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
+ """
+ a.ptp(axis=None, out=None, keepdims=False)
+
+ Peak to peak (maximum - minimum) value along a given axis.
+
+ Refer to `numpy.ptp` for full documentation.
+
+ See Also
+ --------
+ numpy.ptp : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
+ """
+ a.put(indices, values, mode='raise')
+
+ Set ``a.flat[n] = values[n]`` for all `n` in indices.
+
+ Refer to `numpy.put` for full documentation.
+
+ See Also
+ --------
+ numpy.put : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
+ """
+ a.ravel([order])
+
+ Return a flattened array.
+
+ Refer to `numpy.ravel` for full documentation.
+
+ See Also
+ --------
+ numpy.ravel : equivalent function
+
+ ndarray.flat : a flat iterator on the array.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
+ """
+ a.repeat(repeats, axis=None)
+
+ Repeat elements of an array.
+
+ Refer to `numpy.repeat` for full documentation.
+
+ See Also
+ --------
+ numpy.repeat : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
+ """
+ a.reshape(shape, order='C')
+
+ Returns an array containing the same data with a new shape.
+
+ Refer to `numpy.reshape` for full documentation.
+
+ See Also
+ --------
+ numpy.reshape : equivalent function
+
+ Notes
+ -----
+ Unlike the free function `numpy.reshape`, this method on `ndarray` allows
+ the elements of the shape parameter to be passed in as separate arguments.
+ For example, ``a.reshape(10, 11)`` is equivalent to
+ ``a.reshape((10, 11))``.
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
+ """
+ a.resize(new_shape, refcheck=True)
+
+ Change shape and size of array in-place.
+
+ Parameters
+ ----------
+ new_shape : tuple of ints, or `n` ints
+ Shape of resized array.
+ refcheck : bool, optional
+ If False, reference count will not be checked. Default is True.
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ ValueError
+ If `a` does not own its own data or references or views to it exist,
+ and the data memory must be changed.
+ PyPy only: will always raise if the data memory must be changed, since
+ there is no reliable way to determine if references or views to it
+ exist.
+
+ SystemError
+ If the `order` keyword argument is specified. This behaviour is a
+ bug in NumPy.
+
+ See Also
+ --------
+ resize : Return a new array with the specified shape.
+
+ Notes
+ -----
+ This reallocates space for the data area if necessary.
+
+ Only contiguous arrays (data elements consecutive in memory) can be
+ resized.
+
+ The purpose of the reference count check is to make sure you
+ do not use this array as a buffer for another Python object and then
+ reallocate the memory. However, reference counts can increase in
+ other ways so if you are sure that you have not shared the memory
+ for this array with another Python object, then you may safely set
+ `refcheck` to False.
+
+ Examples
+ --------
+ Shrinking an array: array is flattened (in the order that the data are
+ stored in memory), resized, and reshaped:
+
+ >>> a = np.array([[0, 1], [2, 3]], order='C')
+ >>> a.resize((2, 1))
+ >>> a
+ array([[0],
+ [1]])
+
+ >>> a = np.array([[0, 1], [2, 3]], order='F')
+ >>> a.resize((2, 1))
+ >>> a
+ array([[0],
+ [2]])
+
+ Enlarging an array: as above, but missing entries are filled with zeros:
+
+ >>> b = np.array([[0, 1], [2, 3]])
+ >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
+ >>> b
+ array([[0, 1, 2],
+ [3, 0, 0]])
+
+ Referencing an array prevents resizing...
+
+ >>> c = a
+ >>> a.resize((1, 1))
+ Traceback (most recent call last):
+ ...
+ ValueError: cannot resize an array that references or is referenced ...
+
+ Unless `refcheck` is False:
+
+ >>> a.resize((1, 1), refcheck=False)
+ >>> a
+ array([[0]])
+ >>> c
+ array([[0]])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
+ """
+ a.round(decimals=0, out=None)
+
+ Return `a` with each element rounded to the given number of decimals.
+
+ Refer to `numpy.around` for full documentation.
+
+ See Also
+ --------
+ numpy.around : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
+ """
+ a.searchsorted(v, side='left', sorter=None)
+
+ Find indices where elements of v should be inserted in a to maintain order.
+
+ For full documentation, see `numpy.searchsorted`
+
+ See Also
+ --------
+ numpy.searchsorted : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
+ """
+ a.setfield(val, dtype, offset=0)
+
+ Put a value into a specified place in a field defined by a data-type.
+
+ Place `val` into `a`'s field defined by `dtype` and beginning `offset`
+ bytes into the field.
+
+ Parameters
+ ----------
+ val : object
+ Value to be placed in field.
+ dtype : dtype object
+ Data-type of the field in which to place `val`.
+ offset : int, optional
+ The number of bytes into the field at which to place `val`.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ getfield
+
+ Examples
+ --------
+ >>> x = np.eye(3)
+ >>> x.getfield(np.float64)
+ array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
+ >>> x.setfield(3, np.int32)
+ >>> x.getfield(np.int32)
+ array([[3, 3, 3],
+ [3, 3, 3],
+ [3, 3, 3]], dtype=int32)
+ >>> x
+ array([[1.0e+000, 1.5e-323, 1.5e-323],
+ [1.5e-323, 1.0e+000, 1.5e-323],
+ [1.5e-323, 1.5e-323, 1.0e+000]])
+ >>> x.setfield(np.eye(3), np.int32)
+ >>> x
+ array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
+ """
+ a.setflags(write=None, align=None, uic=None)
+
+ Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY,
+ respectively.
+
+ These Boolean-valued flags affect how numpy interprets the memory
+ area used by `a` (see Notes below). The ALIGNED flag can only
+ be set to True if the data is actually aligned according to the type.
+ The WRITEBACKIFCOPY and flag can never be set
+ to True. The flag WRITEABLE can only be set to True if the array owns its
+ own memory, or the ultimate owner of the memory exposes a writeable buffer
+ interface, or is a string. (The exception for string is made so that
+ unpickling can be done without copying memory.)
+
+ Parameters
+ ----------
+ write : bool, optional
+ Describes whether or not `a` can be written to.
+ align : bool, optional
+ Describes whether or not `a` is aligned properly for its type.
+ uic : bool, optional
+ Describes whether or not `a` is a copy of another "base" array.
+
+ Notes
+ -----
+ Array flags provide information about how the memory area used
+ for the array is to be interpreted. There are 7 Boolean flags
+ in use, only four of which can be changed by the user:
+ WRITEBACKIFCOPY, WRITEABLE, and ALIGNED.
+
+ WRITEABLE (W) the data area can be written to;
+
+ ALIGNED (A) the data and strides are aligned appropriately for the hardware
+ (as determined by the compiler);
+
+ WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
+ by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
+ called, the base array will be updated with the contents of this array.
+
+ All flags can be accessed using the single (upper case) letter as well
+ as the full name.
+
+ Examples
+ --------
+ >>> y = np.array([[3, 1, 7],
+ ... [2, 0, 0],
+ ... [8, 5, 9]])
+ >>> y
+ array([[3, 1, 7],
+ [2, 0, 0],
+ [8, 5, 9]])
+ >>> y.flags
+ C_CONTIGUOUS : True
+ F_CONTIGUOUS : False
+ OWNDATA : True
+ WRITEABLE : True
+ ALIGNED : True
+ WRITEBACKIFCOPY : False
+ >>> y.setflags(write=0, align=0)
+ >>> y.flags
+ C_CONTIGUOUS : True
+ F_CONTIGUOUS : False
+ OWNDATA : True
+ WRITEABLE : False
+ ALIGNED : False
+ WRITEBACKIFCOPY : False
+ >>> y.setflags(uic=1)
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ValueError: cannot set WRITEBACKIFCOPY flag to True
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
+ """
+ a.sort(axis=-1, kind=None, order=None)
+
+ Sort an array in-place. Refer to `numpy.sort` for full documentation.
+
+ Parameters
+ ----------
+ axis : int, optional
+ Axis along which to sort. Default is -1, which means sort along the
+ last axis.
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
+ Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
+ and 'mergesort' use timsort under the covers and, in general, the
+ actual implementation will vary with datatype. The 'mergesort' option
+ is retained for backwards compatibility.
+
+ .. versionchanged:: 1.15.0
+ The 'stable' option was added.
+
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument specifies
+ which fields to compare first, second, etc. A single field can
+ be specified as a string, and not all fields need be specified,
+ but unspecified fields will still be used, in the order in which
+ they come up in the dtype, to break ties.
+
+ See Also
+ --------
+ numpy.sort : Return a sorted copy of an array.
+ numpy.argsort : Indirect sort.
+ numpy.lexsort : Indirect stable sort on multiple keys.
+ numpy.searchsorted : Find elements in sorted array.
+ numpy.partition: Partial sort.
+
+ Notes
+ -----
+ See `numpy.sort` for notes on the different sorting algorithms.
+
+ Examples
+ --------
+ >>> a = np.array([[1,4], [3,1]])
+ >>> a.sort(axis=1)
+ >>> a
+ array([[1, 4],
+ [1, 3]])
+ >>> a.sort(axis=0)
+ >>> a
+ array([[1, 3],
+ [1, 4]])
+
+ Use the `order` keyword to specify a field to use when sorting a
+ structured array:
+
+ >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
+ >>> a.sort(order='y')
+ >>> a
+ array([(b'c', 1), (b'a', 2)],
+ dtype=[('x', 'S1'), ('y', '<i8')])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
+ """
+ a.partition(kth, axis=-1, kind='introselect', order=None)
+
+ Rearranges the elements in the array in such a way that the value of the
+ element in kth position is in the position it would be in a sorted array.
+ All elements smaller than the kth element are moved before this element and
+ all equal or greater are moved behind it. The ordering of the elements in
+ the two partitions is undefined.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ kth : int or sequence of ints
+ Element index to partition by. The kth element value will be in its
+ final sorted position and all smaller elements will be moved before it
+ and all equal or greater elements behind it.
+ The order of all elements in the partitions is undefined.
+ If provided with a sequence of kth it will partition all elements
+ indexed by kth of them into their sorted position at once.
+
+ .. deprecated:: 1.22.0
+ Passing booleans as index is deprecated.
+ axis : int, optional
+ Axis along which to sort. Default is -1, which means sort along the
+ last axis.
+ kind : {'introselect'}, optional
+ Selection algorithm. Default is 'introselect'.
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument specifies
+ which fields to compare first, second, etc. A single field can
+ be specified as a string, and not all fields need to be specified,
+ but unspecified fields will still be used, in the order in which
+ they come up in the dtype, to break ties.
+
+ See Also
+ --------
+ numpy.partition : Return a partitioned copy of an array.
+ argpartition : Indirect partition.
+ sort : Full sort.
+
+ Notes
+ -----
+ See ``np.partition`` for notes on the different algorithms.
+
+ Examples
+ --------
+ >>> a = np.array([3, 4, 2, 1])
+ >>> a.partition(3)
+ >>> a
+ array([2, 1, 3, 4])
+
+ >>> a.partition((1, 3))
+ >>> a
+ array([1, 2, 3, 4])
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
+ """
+ a.squeeze(axis=None)
+
+ Remove axes of length one from `a`.
+
+ Refer to `numpy.squeeze` for full documentation.
+
+ See Also
+ --------
+ numpy.squeeze : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
+ """
+ a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True)
+
+ Returns the standard deviation of the array elements along given axis.
+
+ Refer to `numpy.std` for full documentation.
+
+ See Also
+ --------
+ numpy.std : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
+ """
+ a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True)
+
+ Return the sum of the array elements over the given axis.
+
+ Refer to `numpy.sum` for full documentation.
+
+ See Also
+ --------
+ numpy.sum : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
+ """
+ a.swapaxes(axis1, axis2)
+
+ Return a view of the array with `axis1` and `axis2` interchanged.
+
+ Refer to `numpy.swapaxes` for full documentation.
+
+ See Also
+ --------
+ numpy.swapaxes : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
+ """
+ a.take(indices, axis=None, out=None, mode='raise')
+
+ Return an array formed from the elements of `a` at the given indices.
+
+ Refer to `numpy.take` for full documentation.
+
+ See Also
+ --------
+ numpy.take : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
+ """
+ a.tofile(fid, sep="", format="%s")
+
+ Write array to a file as text or binary (default).
+
+ Data is always written in 'C' order, independent of the order of `a`.
+ The data produced by this method can be recovered using the function
+ fromfile().
+
+ Parameters
+ ----------
+ fid : file or str or Path
+ An open file object, or a string containing a filename.
+
+ .. versionchanged:: 1.17.0
+ `pathlib.Path` objects are now accepted.
+
+ sep : str
+ Separator between array items for text output.
+ If "" (empty), a binary file is written, equivalent to
+ ``file.write(a.tobytes())``.
+ format : str
+ Format string for text file output.
+ Each entry in the array is formatted to text by first converting
+ it to the closest Python type, and then using "format" % item.
+
+ Notes
+ -----
+ This is a convenience function for quick storage of array data.
+ Information on endianness and precision is lost, so this method is not a
+ good choice for files intended to archive data or transport data between
+ machines with different endianness. Some of these problems can be overcome
+ by outputting the data as text files, at the expense of speed and file
+ size.
+
+ When fid is a file object, array contents are directly written to the
+ file, bypassing the file object's ``write`` method. As a result, tofile
+ cannot be used with files objects supporting compression (e.g., GzipFile)
+ or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
+ """
+ a.tolist()
+
+ Return the array as an ``a.ndim``-levels deep nested list of Python scalars.
+
+ Return a copy of the array data as a (nested) Python list.
+ Data items are converted to the nearest compatible builtin Python type, via
+ the `~numpy.ndarray.item` function.
+
+ If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will
+ not be a list at all, but a simple Python scalar.
+
+ Parameters
+ ----------
+ none
+
+ Returns
+ -------
+ y : object, or list of object, or list of list of object, or ...
+ The possibly nested list of array elements.
+
+ Notes
+ -----
+ The array may be recreated via ``a = np.array(a.tolist())``, although this
+ may sometimes lose precision.
+
+ Examples
+ --------
+ For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
+ except that ``tolist`` changes numpy scalars to Python scalars:
+
+ >>> a = np.uint32([1, 2])
+ >>> a_list = list(a)
+ >>> a_list
+ [1, 2]
+ >>> type(a_list[0])
+ <class 'numpy.uint32'>
+ >>> a_tolist = a.tolist()
+ >>> a_tolist
+ [1, 2]
+ >>> type(a_tolist[0])
+ <class 'int'>
+
+ Additionally, for a 2D array, ``tolist`` applies recursively:
+
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> list(a)
+ [array([1, 2]), array([3, 4])]
+ >>> a.tolist()
+ [[1, 2], [3, 4]]
+
+ The base case for this recursion is a 0D array:
+
+ >>> a = np.array(1)
+ >>> list(a)
+ Traceback (most recent call last):
+ ...
+ TypeError: iteration over a 0-d array
+ >>> a.tolist()
+ 1
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('tobytes', """
+ a.tobytes(order='C')
+
+ Construct Python bytes containing the raw data bytes in the array.
+
+ Constructs Python bytes showing a copy of the raw contents of
+ data memory. The bytes object is produced in C-order by default.
+ This behavior is controlled by the ``order`` parameter.
+
+ .. versionadded:: 1.9.0
+
+ Parameters
+ ----------
+ order : {'C', 'F', 'A'}, optional
+ Controls the memory layout of the bytes object. 'C' means C-order,
+ 'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is
+ Fortran contiguous, 'C' otherwise. Default is 'C'.
+
+ Returns
+ -------
+ s : bytes
+ Python bytes exhibiting a copy of `a`'s raw data.
+
+ See also
+ --------
+ frombuffer
+ Inverse of this operation, construct a 1-dimensional array from Python
+ bytes.
+
+ Examples
+ --------
+ >>> x = np.array([[0, 1], [2, 3]], dtype='<u2')
+ >>> x.tobytes()
+ b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00'
+ >>> x.tobytes('C') == x.tobytes()
+ True
+ >>> x.tobytes('F')
+ b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00'
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', r"""
+ a.tostring(order='C')
+
+ A compatibility alias for `tobytes`, with exactly the same behavior.
+
+ Despite its name, it returns `bytes` not `str`\ s.
+
+ .. deprecated:: 1.19.0
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
+ """
+ a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
+
+ Return the sum along diagonals of the array.
+
+ Refer to `numpy.trace` for full documentation.
+
+ See Also
+ --------
+ numpy.trace : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
+ """
+ a.transpose(*axes)
+
+ Returns a view of the array with axes transposed.
+
+ Refer to `numpy.transpose` for full documentation.
+
+ Parameters
+ ----------
+ axes : None, tuple of ints, or `n` ints
+
+ * None or no argument: reverses the order of the axes.
+
+ * tuple of ints: `i` in the `j`-th place in the tuple means that the
+ array's `i`-th axis becomes the transposed array's `j`-th axis.
+
+ * `n` ints: same as an n-tuple of the same ints (this form is
+ intended simply as a "convenience" alternative to the tuple form).
+
+ Returns
+ -------
+ p : ndarray
+ View of the array with its axes suitably permuted.
+
+ See Also
+ --------
+ transpose : Equivalent function.
+ ndarray.T : Array property returning the array transposed.
+ ndarray.reshape : Give a new shape to an array without changing its data.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> a
+ array([[1, 2],
+ [3, 4]])
+ >>> a.transpose()
+ array([[1, 3],
+ [2, 4]])
+ >>> a.transpose((1, 0))
+ array([[1, 3],
+ [2, 4]])
+ >>> a.transpose(1, 0)
+ array([[1, 3],
+ [2, 4]])
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> a
+ array([1, 2, 3, 4])
+ >>> a.transpose()
+ array([1, 2, 3, 4])
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
+ """
+ a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True)
+
+ Returns the variance of the array elements, along given axis.
+
+ Refer to `numpy.var` for full documentation.
+
+ See Also
+ --------
+ numpy.var : equivalent function
+
+ """))
+
+
+add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
+ """
+ a.view([dtype][, type])
+
+ New view of array with the same data.
+
+ .. note::
+ Passing None for ``dtype`` is different from omitting the parameter,
+ since the former invokes ``dtype(None)`` which is an alias for
+ ``dtype('float_')``.
+
+ Parameters
+ ----------
+ dtype : data-type or ndarray sub-class, optional
+ Data-type descriptor of the returned view, e.g., float32 or int16.
+ Omitting it results in the view having the same data-type as `a`.
+ This argument can also be specified as an ndarray sub-class, which
+ then specifies the type of the returned object (this is equivalent to
+ setting the ``type`` parameter).
+ type : Python type, optional
+ Type of the returned view, e.g., ndarray or matrix. Again, omission
+ of the parameter results in type preservation.
+
+ Notes
+ -----
+ ``a.view()`` is used two different ways:
+
+ ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
+ of the array's memory with a different data-type. This can cause a
+ reinterpretation of the bytes of memory.
+
+ ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
+ returns an instance of `ndarray_subclass` that looks at the same array
+ (same shape, dtype, etc.) This does not cause a reinterpretation of the
+ memory.
+
+ For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
+ bytes per entry than the previous dtype (for example, converting a regular
+ array to a structured array), then the last axis of ``a`` must be
+ contiguous. This axis will be resized in the result.
+
+ .. versionchanged:: 1.23.0
+ Only the last axis needs to be contiguous. Previously, the entire array
+ had to be C-contiguous.
+
+ Examples
+ --------
+ >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
+
+ Viewing array data using a different type and dtype:
+
+ >>> y = x.view(dtype=np.int16, type=np.matrix)
+ >>> y
+ matrix([[513]], dtype=int16)
+ >>> print(type(y))
+ <class 'numpy.matrix'>
+
+ Creating a view on a structured array so it can be used in calculations
+
+ >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
+ >>> xv = x.view(dtype=np.int8).reshape(-1,2)
+ >>> xv
+ array([[1, 2],
+ [3, 4]], dtype=int8)
+ >>> xv.mean(0)
+ array([2., 3.])
+
+ Making changes to the view changes the underlying array
+
+ >>> xv[0,1] = 20
+ >>> x
+ array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')])
+
+ Using a view to convert an array to a recarray:
+
+ >>> z = x.view(np.recarray)
+ >>> z.a
+ array([1, 3], dtype=int8)
+
+ Views share data:
+
+ >>> x[0] = (9, 10)
+ >>> z[0]
+ (9, 10)
+
+ Views that change the dtype size (bytes per entry) should normally be
+ avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
+
+ >>> x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int16)
+ >>> y = x[:, ::2]
+ >>> y
+ array([[1, 3],
+ [4, 6]], dtype=int16)
+ >>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
+ Traceback (most recent call last):
+ ...
+ ValueError: To change to a dtype of a different size, the last axis must be contiguous
+ >>> z = y.copy()
+ >>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
+ array([[(1, 3)],
+ [(4, 6)]], dtype=[('width', '<i2'), ('length', '<i2')])
+
+ However, views that change dtype are totally fine for arrays with a
+ contiguous last axis, even if the rest of the axes are not C-contiguous:
+
+ >>> x = np.arange(2 * 3 * 4, dtype=np.int8).reshape(2, 3, 4)
+ >>> x.transpose(1, 0, 2).view(np.int16)
+ array([[[ 256, 770],
+ [3340, 3854]],
+ <BLANKLINE>
+ [[1284, 1798],
+ [4368, 4882]],
+ <BLANKLINE>
+ [[2312, 2826],
+ [5396, 5910]]], dtype=int16)
+
+ """))
+
+
+##############################################################################
+#
+# umath functions
+#
+##############################################################################
+
+add_newdoc('numpy.core.umath', 'frompyfunc',
+ """
+ frompyfunc(func, /, nin, nout, *[, identity])
+
+ Takes an arbitrary Python function and returns a NumPy ufunc.
+
+ Can be used, for example, to add broadcasting to a built-in Python
+ function (see Examples section).
+
+ Parameters
+ ----------
+ func : Python function object
+ An arbitrary Python function.
+ nin : int
+ The number of input arguments.
+ nout : int
+ The number of objects returned by `func`.
+ identity : object, optional
+ The value to use for the `~numpy.ufunc.identity` attribute of the resulting
+ object. If specified, this is equivalent to setting the underlying
+ C ``identity`` field to ``PyUFunc_IdentityValue``.
+ If omitted, the identity is set to ``PyUFunc_None``. Note that this is
+ _not_ equivalent to setting the identity to ``None``, which implies the
+ operation is reorderable.
+
+ Returns
+ -------
+ out : ufunc
+ Returns a NumPy universal function (``ufunc``) object.
+
+ See Also
+ --------
+ vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy.
+
+ Notes
+ -----
+ The returned ufunc always returns PyObject arrays.
+
+ Examples
+ --------
+ Use frompyfunc to add broadcasting to the Python function ``oct``:
+
+ >>> oct_array = np.frompyfunc(oct, 1, 1)
+ >>> oct_array(np.array((10, 30, 100)))
+ array(['0o12', '0o36', '0o144'], dtype=object)
+ >>> np.array((oct(10), oct(30), oct(100))) # for comparison
+ array(['0o12', '0o36', '0o144'], dtype='<U5')
+
+ """)
+
+add_newdoc('numpy.core.umath', 'geterrobj',
+ """
+ geterrobj()
+
+ Return the current object that defines floating-point error handling.
+
+ The error object contains all information that defines the error handling
+ behavior in NumPy. `geterrobj` is used internally by the other
+ functions that get and set error handling behavior (`geterr`, `seterr`,
+ `geterrcall`, `seterrcall`).
+
+ Returns
+ -------
+ errobj : list
+ The error object, a list containing three elements:
+ [internal numpy buffer size, error mask, error callback function].
+
+ The error mask is a single integer that holds the treatment information
+ on all four floating point errors. The information for each error type
+ is contained in three bits of the integer. If we print it in base 8, we
+ can see what treatment is set for "invalid", "under", "over", and
+ "divide" (in that order). The printed string can be interpreted with
+
+ * 0 : 'ignore'
+ * 1 : 'warn'
+ * 2 : 'raise'
+ * 3 : 'call'
+ * 4 : 'print'
+ * 5 : 'log'
+
+ See Also
+ --------
+ seterrobj, seterr, geterr, seterrcall, geterrcall
+ getbufsize, setbufsize
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> np.geterrobj() # first get the defaults
+ [8192, 521, None]
+
+ >>> def err_handler(type, flag):
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
+ ...
+ >>> old_bufsize = np.setbufsize(20000)
+ >>> old_err = np.seterr(divide='raise')
+ >>> old_handler = np.seterrcall(err_handler)
+ >>> np.geterrobj()
+ [8192, 521, <function err_handler at 0x91dcaac>]
+
+ >>> old_err = np.seterr(all='ignore')
+ >>> np.base_repr(np.geterrobj()[1], 8)
+ '0'
+ >>> old_err = np.seterr(divide='warn', over='log', under='call',
+ ... invalid='print')
+ >>> np.base_repr(np.geterrobj()[1], 8)
+ '4351'
+
+ """)
+
+add_newdoc('numpy.core.umath', 'seterrobj',
+ """
+ seterrobj(errobj, /)
+
+ Set the object that defines floating-point error handling.
+
+ The error object contains all information that defines the error handling
+ behavior in NumPy. `seterrobj` is used internally by the other
+ functions that set error handling behavior (`seterr`, `seterrcall`).
+
+ Parameters
+ ----------
+ errobj : list
+ The error object, a list containing three elements:
+ [internal numpy buffer size, error mask, error callback function].
+
+ The error mask is a single integer that holds the treatment information
+ on all four floating point errors. The information for each error type
+ is contained in three bits of the integer. If we print it in base 8, we
+ can see what treatment is set for "invalid", "under", "over", and
+ "divide" (in that order). The printed string can be interpreted with
+
+ * 0 : 'ignore'
+ * 1 : 'warn'
+ * 2 : 'raise'
+ * 3 : 'call'
+ * 4 : 'print'
+ * 5 : 'log'
+
+ See Also
+ --------
+ geterrobj, seterr, geterr, seterrcall, geterrcall
+ getbufsize, setbufsize
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> old_errobj = np.geterrobj() # first get the defaults
+ >>> old_errobj
+ [8192, 521, None]
+
+ >>> def err_handler(type, flag):
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
+ ...
+ >>> new_errobj = [20000, 12, err_handler]
+ >>> np.seterrobj(new_errobj)
+ >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
+ '14'
+ >>> np.geterr()
+ {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
+ >>> np.geterrcall() is err_handler
+ True
+
+ """)
+
+
+##############################################################################
+#
+# compiled_base functions
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'add_docstring',
+ """
+ add_docstring(obj, docstring)
+
+ Add a docstring to a built-in obj if possible.
+ If the obj already has a docstring raise a RuntimeError
+ If this routine does not know how to add a docstring to the object
+ raise a TypeError
+ """)
+
+add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
+ """
+ add_ufunc_docstring(ufunc, new_docstring)
+
+ Replace the docstring for a ufunc with new_docstring.
+ This method will only work if the current docstring for
+ the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
+
+ Parameters
+ ----------
+ ufunc : numpy.ufunc
+ A ufunc whose current doc is NULL.
+ new_docstring : string
+ The new docstring for the ufunc.
+
+ Notes
+ -----
+ This method allocates memory for new_docstring on
+ the heap. Technically this creates a mempory leak, since this
+ memory will not be reclaimed until the end of the program
+ even if the ufunc itself is removed. However this will only
+ be a problem if the user is repeatedly creating ufuncs with
+ no documentation, adding documentation via add_newdoc_ufunc,
+ and then throwing away the ufunc.
+ """)
+
+add_newdoc('numpy.core.multiarray', 'get_handler_name',
+ """
+ get_handler_name(a: ndarray) -> str,None
+
+ Return the name of the memory handler used by `a`. If not provided, return
+ the name of the memory handler that will be used to allocate data for the
+ next `ndarray` in this context. May return None if `a` does not own its
+ memory, in which case you can traverse ``a.base`` for a memory handler.
+ """)
+
+add_newdoc('numpy.core.multiarray', 'get_handler_version',
+ """
+ get_handler_version(a: ndarray) -> int,None
+
+ Return the version of the memory handler used by `a`. If not provided,
+ return the version of the memory handler that will be used to allocate data
+ for the next `ndarray` in this context. May return None if `a` does not own
+ its memory, in which case you can traverse ``a.base`` for a memory handler.
+ """)
+
+add_newdoc('numpy.core.multiarray', '_get_madvise_hugepage',
+ """
+ _get_madvise_hugepage() -> bool
+
+ Get use of ``madvise (2)`` MADV_HUGEPAGE support when
+ allocating the array data. Returns the currently set value.
+ See `global_state` for more information.
+ """)
+
+add_newdoc('numpy.core.multiarray', '_set_madvise_hugepage',
+ """
+ _set_madvise_hugepage(enabled: bool) -> bool
+
+ Set or unset use of ``madvise (2)`` MADV_HUGEPAGE support when
+ allocating the array data. Returns the previously set value.
+ See `global_state` for more information.
+ """)
+
+add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g',
+ """
+ format_float_OSprintf_g(val, precision)
+
+ Print a floating point scalar using the system's printf function,
+ equivalent to:
+
+ printf("%.*g", precision, val);
+
+ for half/float/double, or replacing 'g' by 'Lg' for longdouble. This
+ method is designed to help cross-validate the format_float_* methods.
+
+ Parameters
+ ----------
+ val : python float or numpy floating scalar
+ Value to format.
+
+ precision : non-negative integer, optional
+ Precision given to printf.
+
+ Returns
+ -------
+ rep : string
+ The string representation of the floating point value
+
+ See Also
+ --------
+ format_float_scientific
+ format_float_positional
+ """)
+
+
+##############################################################################
+#
+# Documentation for ufunc attributes and methods
+#
+##############################################################################
+
+
+##############################################################################
+#
+# ufunc object
+#
+##############################################################################
+
+add_newdoc('numpy.core', 'ufunc',
+ """
+ Functions that operate element by element on whole arrays.
+
+ To see the documentation for a specific ufunc, use `info`. For
+ example, ``np.info(np.sin)``. Because ufuncs are written in C
+ (for speed) and linked into Python with NumPy's ufunc facility,
+ Python's help() function finds this page whenever help() is called
+ on a ufunc.
+
+ A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`.
+
+ **Calling ufuncs:** ``op(*x[, out], where=True, **kwargs)``
+
+ Apply `op` to the arguments `*x` elementwise, broadcasting the arguments.
+
+ The broadcasting rules are:
+
+ * Dimensions of length 1 may be prepended to either array.
+ * Arrays may be repeated along dimensions of length 1.
+
+ Parameters
+ ----------
+ *x : array_like
+ Input arrays.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ Alternate array object(s) in which to put the result; if provided, it
+ must have a shape that the inputs broadcast to. A tuple of arrays
+ (possible only as a keyword argument) must have length equal to the
+ number of outputs; use None for uninitialized outputs to be
+ allocated by the ufunc.
+ where : array_like, optional
+ This condition is broadcast over the input. At locations where the
+ condition is True, the `out` array will be set to the ufunc result.
+ Elsewhere, the `out` array will retain its original value.
+ Note that if an uninitialized `out` array is created via the default
+ ``out=None``, locations within it where the condition is False will
+ remain uninitialized.
+ **kwargs
+ For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
+
+ Returns
+ -------
+ r : ndarray or tuple of ndarray
+ `r` will have the shape that the arrays in `x` broadcast to; if `out` is
+ provided, it will be returned. If not, `r` will be allocated and
+ may contain uninitialized values. If the function has more than one
+ output, then the result will be a tuple of arrays.
+
+ """)
+
+
+##############################################################################
+#
+# ufunc attributes
+#
+##############################################################################
+
+add_newdoc('numpy.core', 'ufunc', ('identity',
+ """
+ The identity value.
+
+ Data attribute containing the identity element for the ufunc, if it has one.
+ If it does not, the attribute value is None.
+
+ Examples
+ --------
+ >>> np.add.identity
+ 0
+ >>> np.multiply.identity
+ 1
+ >>> np.power.identity
+ 1
+ >>> print(np.exp.identity)
+ None
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('nargs',
+ """
+ The number of arguments.
+
+ Data attribute containing the number of arguments the ufunc takes, including
+ optional ones.
+
+ Notes
+ -----
+ Typically this value will be one more than what you might expect because all
+ ufuncs take the optional "out" argument.
+
+ Examples
+ --------
+ >>> np.add.nargs
+ 3
+ >>> np.multiply.nargs
+ 3
+ >>> np.power.nargs
+ 3
+ >>> np.exp.nargs
+ 2
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('nin',
+ """
+ The number of inputs.
+
+ Data attribute containing the number of arguments the ufunc treats as input.
+
+ Examples
+ --------
+ >>> np.add.nin
+ 2
+ >>> np.multiply.nin
+ 2
+ >>> np.power.nin
+ 2
+ >>> np.exp.nin
+ 1
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('nout',
+ """
+ The number of outputs.
+
+ Data attribute containing the number of arguments the ufunc treats as output.
+
+ Notes
+ -----
+ Since all ufuncs can take output arguments, this will always be (at least) 1.
+
+ Examples
+ --------
+ >>> np.add.nout
+ 1
+ >>> np.multiply.nout
+ 1
+ >>> np.power.nout
+ 1
+ >>> np.exp.nout
+ 1
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('ntypes',
+ """
+ The number of types.
+
+ The number of numerical NumPy types - of which there are 18 total - on which
+ the ufunc can operate.
+
+ See Also
+ --------
+ numpy.ufunc.types
+
+ Examples
+ --------
+ >>> np.add.ntypes
+ 18
+ >>> np.multiply.ntypes
+ 18
+ >>> np.power.ntypes
+ 17
+ >>> np.exp.ntypes
+ 7
+ >>> np.remainder.ntypes
+ 14
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('types',
+ """
+ Returns a list with types grouped input->output.
+
+ Data attribute listing the data-type "Domain-Range" groupings the ufunc can
+ deliver. The data-types are given using the character codes.
+
+ See Also
+ --------
+ numpy.ufunc.ntypes
+
+ Examples
+ --------
+ >>> np.add.types
+ ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
+ 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
+ 'GG->G', 'OO->O']
+
+ >>> np.multiply.types
+ ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
+ 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
+ 'GG->G', 'OO->O']
+
+ >>> np.power.types
+ ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
+ 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
+ 'OO->O']
+
+ >>> np.exp.types
+ ['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
+
+ >>> np.remainder.types
+ ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
+ 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('signature',
+ """
+ Definition of the core elements a generalized ufunc operates on.
+
+ The signature determines how the dimensions of each input/output array
+ are split into core and loop dimensions:
+
+ 1. Each dimension in the signature is matched to a dimension of the
+ corresponding passed-in array, starting from the end of the shape tuple.
+ 2. Core dimensions assigned to the same label in the signature must have
+ exactly matching sizes, no broadcasting is performed.
+ 3. The core dimensions are removed from all inputs and the remaining
+ dimensions are broadcast together, defining the loop dimensions.
+
+ Notes
+ -----
+ Generalized ufuncs are used internally in many linalg functions, and in
+ the testing suite; the examples below are taken from these.
+ For ufuncs that operate on scalars, the signature is None, which is
+ equivalent to '()' for every argument.
+
+ Examples
+ --------
+ >>> np.core.umath_tests.matrix_multiply.signature
+ '(m,n),(n,p)->(m,p)'
+ >>> np.linalg._umath_linalg.det.signature
+ '(m,m)->()'
+ >>> np.add.signature is None
+ True # equivalent to '(),()->()'
+ """))
+
+##############################################################################
+#
+# ufunc methods
+#
+##############################################################################
+
+add_newdoc('numpy.core', 'ufunc', ('reduce',
+ """
+ reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True)
+
+ Reduces `array`'s dimension by one, by applying ufunc along one axis.
+
+ Let :math:`array.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
+ :math:`ufunc.reduce(array, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
+ the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
+ ufunc to each :math:`array[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
+ For a one-dimensional array, reduce produces results equivalent to:
+ ::
+
+ r = op.identity # op = ufunc
+ for i in range(len(A)):
+ r = op(r, A[i])
+ return r
+
+ For example, add.reduce() is equivalent to sum().
+
+ Parameters
+ ----------
+ array : array_like
+ The array to act on.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a reduction is performed.
+ The default (`axis` = 0) is perform a reduction over the first
+ dimension of the input array. `axis` may be negative, in
+ which case it counts from the last to the first axis.
+
+ .. versionadded:: 1.7.0
+
+ If this is None, a reduction is performed over all the axes.
+ If this is a tuple of ints, a reduction is performed on multiple
+ axes, instead of a single axis or all the axes as before.
+
+ For operations which are either not commutative or not associative,
+ doing a reduction over multiple axes is not well-defined. The
+ ufuncs do not currently raise an exception in this case, but will
+ likely do so in the future.
+ dtype : data-type code, optional
+ The type used to represent the intermediate results. Defaults
+ to the data-type of the output array if this is provided, or
+ the data-type of the input array if no output array is provided.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If not provided or None,
+ a freshly-allocated array is returned. For consistency with
+ ``ufunc.__call__``, if given as a keyword, this may be wrapped in a
+ 1-element tuple.
+
+ .. versionchanged:: 1.13.0
+ Tuples are allowed for keyword argument.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `array`.
+
+ .. versionadded:: 1.7.0
+ initial : scalar, optional
+ The value with which to start the reduction.
+ If the ufunc has no identity or the dtype is object, this defaults
+ to None - otherwise it defaults to ufunc.identity.
+ If ``None`` is given, the first element of the reduction is used,
+ and an error is thrown if the reduction is empty.
+
+ .. versionadded:: 1.15.0
+
+ where : array_like of bool, optional
+ A boolean array which is broadcasted to match the dimensions
+ of `array`, and selects elements to include in the reduction. Note
+ that for ufuncs like ``minimum`` that do not have an identity
+ defined, one has to pass in also ``initial``.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ r : ndarray
+ The reduced array. If `out` was supplied, `r` is a reference to it.
+
+ Examples
+ --------
+ >>> np.multiply.reduce([2,3,5])
+ 30
+
+ A multi-dimensional array example:
+
+ >>> X = np.arange(8).reshape((2,2,2))
+ >>> X
+ array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+ >>> np.add.reduce(X, 0)
+ array([[ 4, 6],
+ [ 8, 10]])
+ >>> np.add.reduce(X) # confirm: default axis value is 0
+ array([[ 4, 6],
+ [ 8, 10]])
+ >>> np.add.reduce(X, 1)
+ array([[ 2, 4],
+ [10, 12]])
+ >>> np.add.reduce(X, 2)
+ array([[ 1, 5],
+ [ 9, 13]])
+
+ You can use the ``initial`` keyword argument to initialize the reduction
+ with a different value, and ``where`` to select specific elements to include:
+
+ >>> np.add.reduce([10], initial=5)
+ 15
+ >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10)
+ array([14., 14.])
+ >>> a = np.array([10., np.nan, 10])
+ >>> np.add.reduce(a, where=~np.isnan(a))
+ 20.0
+
+ Allows reductions of empty arrays where they would normally fail, i.e.
+ for ufuncs without an identity.
+
+ >>> np.minimum.reduce([], initial=np.inf)
+ inf
+ >>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False])
+ array([ 1., 10.])
+ >>> np.minimum.reduce([])
+ Traceback (most recent call last):
+ ...
+ ValueError: zero-size array to reduction operation minimum which has no identity
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('accumulate',
+ """
+ accumulate(array, axis=0, dtype=None, out=None)
+
+ Accumulate the result of applying the operator to all elements.
+
+ For a one-dimensional array, accumulate produces results equivalent to::
+
+ r = np.empty(len(A))
+ t = op.identity # op = the ufunc being applied to A's elements
+ for i in range(len(A)):
+ t = op(t, A[i])
+ r[i] = t
+ return r
+
+ For example, add.accumulate() is equivalent to np.cumsum().
+
+ For a multi-dimensional array, accumulate is applied along only one
+ axis (axis zero by default; see Examples below) so repeated use is
+ necessary if one wants to accumulate over multiple axes.
+
+ Parameters
+ ----------
+ array : array_like
+ The array to act on.
+ axis : int, optional
+ The axis along which to apply the accumulation; default is zero.
+ dtype : data-type code, optional
+ The data-type used to represent the intermediate results. Defaults
+ to the data-type of the output array if such is provided, or the
+ data-type of the input array if no output array is provided.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If not provided or None,
+ a freshly-allocated array is returned. For consistency with
+ ``ufunc.__call__``, if given as a keyword, this may be wrapped in a
+ 1-element tuple.
+
+ .. versionchanged:: 1.13.0
+ Tuples are allowed for keyword argument.
+
+ Returns
+ -------
+ r : ndarray
+ The accumulated values. If `out` was supplied, `r` is a reference to
+ `out`.
+
+ Examples
+ --------
+ 1-D array examples:
+
+ >>> np.add.accumulate([2, 3, 5])
+ array([ 2, 5, 10])
+ >>> np.multiply.accumulate([2, 3, 5])
+ array([ 2, 6, 30])
+
+ 2-D array examples:
+
+ >>> I = np.eye(2)
+ >>> I
+ array([[1., 0.],
+ [0., 1.]])
+
+ Accumulate along axis 0 (rows), down columns:
+
+ >>> np.add.accumulate(I, 0)
+ array([[1., 0.],
+ [1., 1.]])
+ >>> np.add.accumulate(I) # no axis specified = axis zero
+ array([[1., 0.],
+ [1., 1.]])
+
+ Accumulate along axis 1 (columns), through rows:
+
+ >>> np.add.accumulate(I, 1)
+ array([[1., 1.],
+ [0., 1.]])
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('reduceat',
+ """
+ reduceat(array, indices, axis=0, dtype=None, out=None)
+
+ Performs a (local) reduce with specified slices over a single axis.
+
+ For i in ``range(len(indices))``, `reduceat` computes
+ ``ufunc.reduce(array[indices[i]:indices[i+1]])``, which becomes the i-th
+ generalized "row" parallel to `axis` in the final result (i.e., in a
+ 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
+ `axis = 1`, it becomes the i-th column). There are three exceptions to this:
+
+ * when ``i = len(indices) - 1`` (so for the last index),
+ ``indices[i+1] = array.shape[axis]``.
+ * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
+ simply ``array[indices[i]]``.
+ * if ``indices[i] >= len(array)`` or ``indices[i] < 0``, an error is raised.
+
+ The shape of the output depends on the size of `indices`, and may be
+ larger than `array` (this happens if ``len(indices) > array.shape[axis]``).
+
+ Parameters
+ ----------
+ array : array_like
+ The array to act on.
+ indices : array_like
+ Paired indices, comma separated (not colon), specifying slices to
+ reduce.
+ axis : int, optional
+ The axis along which to apply the reduceat.
+ dtype : data-type code, optional
+ The type used to represent the intermediate results. Defaults
+ to the data type of the output array if this is provided, or
+ the data type of the input array if no output array is provided.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If not provided or None,
+ a freshly-allocated array is returned. For consistency with
+ ``ufunc.__call__``, if given as a keyword, this may be wrapped in a
+ 1-element tuple.
+
+ .. versionchanged:: 1.13.0
+ Tuples are allowed for keyword argument.
+
+ Returns
+ -------
+ r : ndarray
+ The reduced values. If `out` was supplied, `r` is a reference to
+ `out`.
+
+ Notes
+ -----
+ A descriptive example:
+
+ If `array` is 1-D, the function `ufunc.accumulate(array)` is the same as
+ ``ufunc.reduceat(array, indices)[::2]`` where `indices` is
+ ``range(len(array) - 1)`` with a zero placed
+ in every other element:
+ ``indices = zeros(2 * len(array) - 1)``,
+ ``indices[1::2] = range(1, len(array))``.
+
+ Don't be fooled by this attribute's name: `reduceat(array)` is not
+ necessarily smaller than `array`.
+
+ Examples
+ --------
+ To take the running sum of four successive values:
+
+ >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
+ array([ 6, 10, 14, 18])
+
+ A 2-D example:
+
+ >>> x = np.linspace(0, 15, 16).reshape(4,4)
+ >>> x
+ array([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])
+
+ ::
+
+ # reduce such that the result has the following five rows:
+ # [row1 + row2 + row3]
+ # [row4]
+ # [row2]
+ # [row3]
+ # [row1 + row2 + row3 + row4]
+
+ >>> np.add.reduceat(x, [0, 3, 1, 2, 0])
+ array([[12., 15., 18., 21.],
+ [12., 13., 14., 15.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [24., 28., 32., 36.]])
+
+ ::
+
+ # reduce such that result has the following two columns:
+ # [col1 * col2 * col3, col4]
+
+ >>> np.multiply.reduceat(x, [0, 3], 1)
+ array([[ 0., 3.],
+ [ 120., 7.],
+ [ 720., 11.],
+ [2184., 15.]])
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('outer',
+ r"""
+ outer(A, B, /, **kwargs)
+
+ Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
+
+ Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
+ ``op.outer(A, B)`` is an array of dimension M + N such that:
+
+ .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
+ op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
+
+ For `A` and `B` one-dimensional, this is equivalent to::
+
+ r = empty(len(A),len(B))
+ for i in range(len(A)):
+ for j in range(len(B)):
+ r[i,j] = op(A[i], B[j]) # op = ufunc in question
+
+ Parameters
+ ----------
+ A : array_like
+ First array
+ B : array_like
+ Second array
+ kwargs : any
+ Arguments to pass on to the ufunc. Typically `dtype` or `out`.
+ See `ufunc` for a comprehensive overview of all available arguments.
+
+ Returns
+ -------
+ r : ndarray
+ Output array
+
+ See Also
+ --------
+ numpy.outer : A less powerful version of ``np.multiply.outer``
+ that `ravel`\ s all inputs to 1D. This exists
+ primarily for compatibility with old code.
+
+ tensordot : ``np.tensordot(a, b, axes=((), ()))`` and
+ ``np.multiply.outer(a, b)`` behave same for all
+ dimensions of a and b.
+
+ Examples
+ --------
+ >>> np.multiply.outer([1, 2, 3], [4, 5, 6])
+ array([[ 4, 5, 6],
+ [ 8, 10, 12],
+ [12, 15, 18]])
+
+ A multi-dimensional example:
+
+ >>> A = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> A.shape
+ (2, 3)
+ >>> B = np.array([[1, 2, 3, 4]])
+ >>> B.shape
+ (1, 4)
+ >>> C = np.multiply.outer(A, B)
+ >>> C.shape; C
+ (2, 3, 1, 4)
+ array([[[[ 1, 2, 3, 4]],
+ [[ 2, 4, 6, 8]],
+ [[ 3, 6, 9, 12]]],
+ [[[ 4, 8, 12, 16]],
+ [[ 5, 10, 15, 20]],
+ [[ 6, 12, 18, 24]]]])
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('at',
+ """
+ at(a, indices, b=None, /)
+
+ Performs unbuffered in place operation on operand 'a' for elements
+ specified by 'indices'. For addition ufunc, this method is equivalent to
+ ``a[indices] += b``, except that results are accumulated for elements that
+ are indexed more than once. For example, ``a[[0,0]] += 1`` will only
+ increment the first element once because of buffering, whereas
+ ``add.at(a, [0,0], 1)`` will increment the first element twice.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ The array to perform in place operation on.
+ indices : array_like or tuple
+ Array like index object or slice object for indexing into first
+ operand. If first operand has multiple dimensions, indices can be a
+ tuple of array like index objects or slice objects.
+ b : array_like
+ Second operand for ufuncs requiring two operands. Operand must be
+ broadcastable over first operand after indexing or slicing.
+
+ Examples
+ --------
+ Set items 0 and 1 to their negative values:
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> np.negative.at(a, [0, 1])
+ >>> a
+ array([-1, -2, 3, 4])
+
+ Increment items 0 and 1, and increment item 2 twice:
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> np.add.at(a, [0, 1, 2, 2], 1)
+ >>> a
+ array([2, 3, 5, 4])
+
+ Add items 0 and 1 in first array to second array,
+ and store results in first array:
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> b = np.array([1, 2])
+ >>> np.add.at(a, [0, 1], b)
+ >>> a
+ array([2, 4, 3, 4])
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('resolve_dtypes',
+ """
+ resolve_dtypes(dtypes, *, signature=None, casting=None, reduction=False)
+
+ Find the dtypes NumPy will use for the operation. Both input and
+ output dtypes are returned and may differ from those provided.
+
+ .. note::
+
+ This function always applies NEP 50 rules since it is not provided
+ any actual values. The Python types ``int``, ``float``, and
+ ``complex`` thus behave weak and should be passed for "untyped"
+ Python input.
+
+ Parameters
+ ----------
+ dtypes : tuple of dtypes, None, or literal int, float, complex
+ The input dtypes for each operand. Output operands can be
+ None, indicating that the dtype must be found.
+ signature : tuple of DTypes or None, optional
+ If given, enforces exact DType (classes) of the specific operand.
+ The ufunc ``dtype`` argument is equivalent to passing a tuple with
+ only output dtypes set.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ The casting mode when casting is necessary. This is identical to
+ the ufunc call casting modes.
+ reduction : boolean
+ If given, the resolution assumes a reduce operation is happening
+ which slightly changes the promotion and type resolution rules.
+ `dtypes` is usually something like ``(None, np.dtype("i2"), None)``
+ for reductions (first input is also the output).
+
+ .. note::
+
+ The default casting mode is "same_kind", however, as of
+ NumPy 1.24, NumPy uses "unsafe" for reductions.
+
+ Returns
+ -------
+ dtypes : tuple of dtypes
+ The dtypes which NumPy would use for the calculation. Note that
+ dtypes may not match the passed in ones (casting is necessary).
+
+ See Also
+ --------
+ numpy.ufunc._resolve_dtypes_and_context :
+ Similar function to this, but returns additional information which
+ give access to the core C functionality of NumPy.
+
+ Examples
+ --------
+ This API requires passing dtypes, define them for convenience:
+
+ >>> int32 = np.dtype("int32")
+ >>> float32 = np.dtype("float32")
+
+ The typical ufunc call does not pass an output dtype. `np.add` has two
+ inputs and one output, so leave the output as ``None`` (not provided):
+
+ >>> np.add.resolve_dtypes((int32, float32, None))
+ (dtype('float64'), dtype('float64'), dtype('float64'))
+
+ The loop found uses "float64" for all operands (including the output), the
+ first input would be cast.
+
+ ``resolve_dtypes`` supports "weak" handling for Python scalars by passing
+ ``int``, ``float``, or ``complex``:
+
+ >>> np.add.resolve_dtypes((float32, float, None))
+ (dtype('float32'), dtype('float32'), dtype('float32'))
+
+ Where the Python ``float`` behaves samilar to a Python value ``0.0``
+ in a ufunc call. (See :ref:`NEP 50 <NEP50>` for details.)
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('_resolve_dtypes_and_context',
+ """
+ _resolve_dtypes_and_context(dtypes, *, signature=None, casting=None, reduction=False)
+
+ See `numpy.ufunc.resolve_dtypes` for parameter information. This
+ function is considered *unstable*. You may use it, but the returned
+ information is NumPy version specific and expected to change.
+ Large API/ABI changes are not expected, but a new NumPy version is
+ expected to require updating code using this functionality.
+
+ This function is designed to be used in conjunction with
+ `numpy.ufunc._get_strided_loop`. The calls are split to mirror the C API
+ and allow future improvements.
+
+ Returns
+ -------
+ dtypes : tuple of dtypes
+ call_info :
+ PyCapsule with all necessary information to get access to low level
+ C calls. See `numpy.ufunc._get_strided_loop` for more information.
+
+ """))
+
+add_newdoc('numpy.core', 'ufunc', ('_get_strided_loop',
+ """
+ _get_strided_loop(call_info, /, *, fixed_strides=None)
+
+ This function fills in the ``call_info`` capsule to include all
+ information necessary to call the low-level strided loop from NumPy.
+
+ See notes for more information.
+
+ Parameters
+ ----------
+ call_info : PyCapsule
+ The PyCapsule returned by `numpy.ufunc._resolve_dtypes_and_context`.
+ fixed_strides : tuple of int or None, optional
+ A tuple with fixed byte strides of all input arrays. NumPy may use
+ this information to find specialized loops, so any call must follow
+ the given stride. Use ``None`` to indicate that the stride is not
+ known (or not fixed) for all calls.
+
+ Notes
+ -----
+ Together with `numpy.ufunc._resolve_dtypes_and_context` this function
+ gives low-level access to the NumPy ufunc loops.
+ The first function does general preparation and returns the required
+ information. It returns this as a C capsule with the version specific
+ name ``numpy_1.24_ufunc_call_info``.
+ The NumPy 1.24 ufunc call info capsule has the following layout::
+
+ typedef struct {
+ PyArrayMethod_StridedLoop *strided_loop;
+ PyArrayMethod_Context *context;
+ NpyAuxData *auxdata;
+
+ /* Flag information (expected to change) */
+ npy_bool requires_pyapi; /* GIL is required by loop */
+
+ /* Loop doesn't set FPE flags; if not set check FPE flags */
+ npy_bool no_floatingpoint_errors;
+ } ufunc_call_info;
+
+ Note that the first call only fills in the ``context``. The call to
+ ``_get_strided_loop`` fills in all other data.
+ Please see the ``numpy/experimental_dtype_api.h`` header for exact
+ call information; the main thing to note is that the new-style loops
+ return 0 on success, -1 on failure. They are passed context as new
+ first input and ``auxdata`` as (replaced) last.
+
+ Only the ``strided_loop``signature is considered guaranteed stable
+ for NumPy bug-fix releases. All other API is tied to the experimental
+ API versioning.
+
+ The reason for the split call is that cast information is required to
+ decide what the fixed-strides will be.
+
+ NumPy ties the lifetime of the ``auxdata`` information to the capsule.
+
+ """))
+
+
+
+##############################################################################
+#
+# Documentation for dtype attributes and methods
+#
+##############################################################################
+
+##############################################################################
+#
+# dtype object
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'dtype',
+ """
+ dtype(dtype, align=False, copy=False)
+
+ Create a data type object.
+
+ A numpy array is homogeneous, and contains elements described by a
+ dtype object. A dtype object can be constructed from different
+ combinations of fundamental numeric types.
+
+ Parameters
+ ----------
+ dtype
+ Object to be converted to a data type object.
+ align : bool, optional
+ Add padding to the fields to match what a C compiler would output
+ for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
+ or a comma-separated string. If a struct dtype is being created,
+ this also sets a sticky alignment flag ``isalignedstruct``.
+ copy : bool, optional
+ Make a new copy of the data-type object. If ``False``, the result
+ may just be a reference to a built-in data-type object.
+
+ See also
+ --------
+ result_type
+
+ Examples
+ --------
+ Using array-scalar type:
+
+ >>> np.dtype(np.int16)
+ dtype('int16')
+
+ Structured type, one field name 'f1', containing int16:
+
+ >>> np.dtype([('f1', np.int16)])
+ dtype([('f1', '<i2')])
+
+ Structured type, one field named 'f1', in itself containing a structured
+ type with one field:
+
+ >>> np.dtype([('f1', [('f1', np.int16)])])
+ dtype([('f1', [('f1', '<i2')])])
+
+ Structured type, two fields: the first field contains an unsigned int, the
+ second an int32:
+
+ >>> np.dtype([('f1', np.uint64), ('f2', np.int32)])
+ dtype([('f1', '<u8'), ('f2', '<i4')])
+
+ Using array-protocol type strings:
+
+ >>> np.dtype([('a','f8'),('b','S10')])
+ dtype([('a', '<f8'), ('b', 'S10')])
+
+ Using comma-separated field formats. The shape is (2,3):
+
+ >>> np.dtype("i4, (2,3)f8")
+ dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
+
+ Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
+ is a flexible type, here of size 10:
+
+ >>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)])
+ dtype([('hello', '<i8', (3,)), ('world', 'V10')])
+
+ Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
+ the offsets in bytes:
+
+ >>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
+ dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')]))
+
+ Using dictionaries. Two fields named 'gender' and 'age':
+
+ >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
+ dtype([('gender', 'S1'), ('age', 'u1')])
+
+ Offsets in bytes, here 0 and 25:
+
+ >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
+ dtype([('surname', 'S25'), ('age', 'u1')])
+
+ """)
+
+##############################################################################
+#
+# dtype attributes
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
+ """
+ The required alignment (bytes) of this data-type according to the compiler.
+
+ More information is available in the C-API section of the manual.
+
+ Examples
+ --------
+
+ >>> x = np.dtype('i4')
+ >>> x.alignment
+ 4
+
+ >>> x = np.dtype(float)
+ >>> x.alignment
+ 8
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
+ """
+ A character indicating the byte-order of this data-type object.
+
+ One of:
+
+ === ==============
+ '=' native
+ '<' little-endian
+ '>' big-endian
+ '|' not applicable
+ === ==============
+
+ All built-in data-type objects have byteorder either '=' or '|'.
+
+ Examples
+ --------
+
+ >>> dt = np.dtype('i2')
+ >>> dt.byteorder
+ '='
+ >>> # endian is not relevant for 8 bit numbers
+ >>> np.dtype('i1').byteorder
+ '|'
+ >>> # or ASCII strings
+ >>> np.dtype('S2').byteorder
+ '|'
+ >>> # Even if specific code is given, and it is native
+ >>> # '=' is the byteorder
+ >>> import sys
+ >>> sys_is_le = sys.byteorder == 'little'
+ >>> native_code = sys_is_le and '<' or '>'
+ >>> swapped_code = sys_is_le and '>' or '<'
+ >>> dt = np.dtype(native_code + 'i2')
+ >>> dt.byteorder
+ '='
+ >>> # Swapped code shows up as itself
+ >>> dt = np.dtype(swapped_code + 'i2')
+ >>> dt.byteorder == swapped_code
+ True
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('char',
+ """A unique character code for each of the 21 different built-in types.
+
+ Examples
+ --------
+
+ >>> x = np.dtype(float)
+ >>> x.char
+ 'd'
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
+ """
+ `__array_interface__` description of the data-type.
+
+ The format is that required by the 'descr' key in the
+ `__array_interface__` attribute.
+
+ Warning: This attribute exists specifically for `__array_interface__`,
+ and passing it directly to `np.dtype` will not accurately reconstruct
+ some dtypes (e.g., scalar and subarray dtypes).
+
+ Examples
+ --------
+
+ >>> x = np.dtype(float)
+ >>> x.descr
+ [('', '<f8')]
+
+ >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ >>> dt.descr
+ [('name', '<U16'), ('grades', '<f8', (2,))]
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
+ """
+ Dictionary of named fields defined for this data type, or ``None``.
+
+ The dictionary is indexed by keys that are the names of the fields.
+ Each entry in the dictionary is a tuple fully describing the field::
+
+ (dtype, offset[, title])
+
+ Offset is limited to C int, which is signed and usually 32 bits.
+ If present, the optional title can be any object (if it is a string
+ or unicode then it will also be a key in the fields dictionary,
+ otherwise it's meta-data). Notice also that the first two elements
+ of the tuple can be passed directly as arguments to the ``ndarray.getfield``
+ and ``ndarray.setfield`` methods.
+
+ See Also
+ --------
+ ndarray.getfield, ndarray.setfield
+
+ Examples
+ --------
+ >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ >>> print(dt.fields)
+ {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
+ """
+ Bit-flags describing how this data type is to be interpreted.
+
+ Bit-masks are in `numpy.core.multiarray` as the constants
+ `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
+ `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
+ of these flags is in C-API documentation; they are largely useful
+ for user-defined data-types.
+
+ The following example demonstrates that operations on this particular
+ dtype requires Python C-API.
+
+ Examples
+ --------
+
+ >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
+ >>> x.flags
+ 16
+ >>> np.core.multiarray.NEEDS_PYAPI
+ 16
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
+ """
+ Boolean indicating whether this dtype contains any reference-counted
+ objects in any fields or sub-dtypes.
+
+ Recall that what is actually in the ndarray memory representing
+ the Python object is the memory address of that object (a pointer).
+ Special handling may be required, and this attribute is useful for
+ distinguishing data types that may contain arbitrary Python objects
+ and data-types that won't.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
+ """
+ Integer indicating how this dtype relates to the built-in dtypes.
+
+ Read-only.
+
+ = ========================================================================
+ 0 if this is a structured array type, with fields
+ 1 if this is a dtype compiled into numpy (such as ints, floats etc)
+ 2 if the dtype is for a user-defined numpy type
+ A user-defined type uses the numpy C-API machinery to extend
+ numpy to handle a new array type. See
+ :ref:`user.user-defined-data-types` in the NumPy manual.
+ = ========================================================================
+
+ Examples
+ --------
+ >>> dt = np.dtype('i2')
+ >>> dt.isbuiltin
+ 1
+ >>> dt = np.dtype('f8')
+ >>> dt.isbuiltin
+ 1
+ >>> dt = np.dtype([('field1', 'f8')])
+ >>> dt.isbuiltin
+ 0
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
+ """
+ Boolean indicating whether the byte order of this dtype is native
+ to the platform.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
+ """
+ Boolean indicating whether the dtype is a struct which maintains
+ field alignment. This flag is sticky, so when combining multiple
+ structs together, it is preserved and produces new dtypes which
+ are also aligned.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
+ """
+ The element size of this data-type object.
+
+ For 18 of the 21 types this number is fixed by the data-type.
+ For the flexible data-types, this number can be anything.
+
+ Examples
+ --------
+
+ >>> arr = np.array([[1, 2], [3, 4]])
+ >>> arr.dtype
+ dtype('int64')
+ >>> arr.itemsize
+ 8
+
+ >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ >>> dt.itemsize
+ 80
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
+ """
+ A character code (one of 'biufcmMOSUV') identifying the general kind of data.
+
+ = ======================
+ b boolean
+ i signed integer
+ u unsigned integer
+ f floating-point
+ c complex floating-point
+ m timedelta
+ M datetime
+ O object
+ S (byte-)string
+ U Unicode
+ V void
+ = ======================
+
+ Examples
+ --------
+
+ >>> dt = np.dtype('i4')
+ >>> dt.kind
+ 'i'
+ >>> dt = np.dtype('f8')
+ >>> dt.kind
+ 'f'
+ >>> dt = np.dtype([('field1', 'f8')])
+ >>> dt.kind
+ 'V'
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('metadata',
+ """
+ Either ``None`` or a readonly dictionary of metadata (mappingproxy).
+
+ The metadata field can be set using any dictionary at data-type
+ creation. NumPy currently has no uniform approach to propagating
+ metadata; although some array operations preserve it, there is no
+ guarantee that others will.
+
+ .. warning::
+
+ Although used in certain projects, this feature was long undocumented
+ and is not well supported. Some aspects of metadata propagation
+ are expected to change in the future.
+
+ Examples
+ --------
+
+ >>> dt = np.dtype(float, metadata={"key": "value"})
+ >>> dt.metadata["key"]
+ 'value'
+ >>> arr = np.array([1, 2, 3], dtype=dt)
+ >>> arr.dtype.metadata
+ mappingproxy({'key': 'value'})
+
+ Adding arrays with identical datatypes currently preserves the metadata:
+
+ >>> (arr + arr).dtype.metadata
+ mappingproxy({'key': 'value'})
+
+ But if the arrays have different dtype metadata, the metadata may be
+ dropped:
+
+ >>> dt2 = np.dtype(float, metadata={"key2": "value2"})
+ >>> arr2 = np.array([3, 2, 1], dtype=dt2)
+ >>> (arr + arr2).dtype.metadata is None
+ True # The metadata field is cleared so None is returned
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('name',
+ """
+ A bit-width name for this data-type.
+
+ Un-sized flexible data-type objects do not have this attribute.
+
+ Examples
+ --------
+
+ >>> x = np.dtype(float)
+ >>> x.name
+ 'float64'
+ >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
+ >>> x.name
+ 'void640'
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('names',
+ """
+ Ordered list of field names, or ``None`` if there are no fields.
+
+ The names are ordered according to increasing byte offset. This can be
+ used, for example, to walk through all of the named fields in offset order.
+
+ Examples
+ --------
+ >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ >>> dt.names
+ ('name', 'grades')
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('num',
+ """
+ A unique number for each of the 21 different built-in types.
+
+ These are roughly ordered from least-to-most precision.
+
+ Examples
+ --------
+
+ >>> dt = np.dtype(str)
+ >>> dt.num
+ 19
+
+ >>> dt = np.dtype(float)
+ >>> dt.num
+ 12
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
+ """
+ Shape tuple of the sub-array if this data type describes a sub-array,
+ and ``()`` otherwise.
+
+ Examples
+ --------
+
+ >>> dt = np.dtype(('i4', 4))
+ >>> dt.shape
+ (4,)
+
+ >>> dt = np.dtype(('i4', (2, 3)))
+ >>> dt.shape
+ (2, 3)
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
+ """
+ Number of dimensions of the sub-array if this data type describes a
+ sub-array, and ``0`` otherwise.
+
+ .. versionadded:: 1.13.0
+
+ Examples
+ --------
+ >>> x = np.dtype(float)
+ >>> x.ndim
+ 0
+
+ >>> x = np.dtype((float, 8))
+ >>> x.ndim
+ 1
+
+ >>> x = np.dtype(('i4', (3, 4)))
+ >>> x.ndim
+ 2
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('str',
+ """The array-protocol typestring of this data-type object."""))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
+ """
+ Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
+ None otherwise.
+
+ The *shape* is the fixed shape of the sub-array described by this
+ data type, and *item_dtype* the data type of the array.
+
+ If a field whose dtype object has this attribute is retrieved,
+ then the extra dimensions implied by *shape* are tacked on to
+ the end of the retrieved array.
+
+ See Also
+ --------
+ dtype.base
+
+ Examples
+ --------
+ >>> x = numpy.dtype('8f')
+ >>> x.subdtype
+ (dtype('float32'), (8,))
+
+ >>> x = numpy.dtype('i2')
+ >>> x.subdtype
+ >>>
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('base',
+ """
+ Returns dtype for the base element of the subarrays,
+ regardless of their dimension or shape.
+
+ See Also
+ --------
+ dtype.subdtype
+
+ Examples
+ --------
+ >>> x = numpy.dtype('8f')
+ >>> x.base
+ dtype('float32')
+
+ >>> x = numpy.dtype('i2')
+ >>> x.base
+ dtype('int16')
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('type',
+ """The type object used to instantiate a scalar of this data-type."""))
+
+##############################################################################
+#
+# dtype methods
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
+ """
+ newbyteorder(new_order='S', /)
+
+ Return a new dtype with a different byte order.
+
+ Changes are also made in all fields and sub-arrays of the data type.
+
+ Parameters
+ ----------
+ new_order : string, optional
+ Byte order to force; a value from the byte order specifications
+ below. The default value ('S') results in swapping the current
+ byte order. `new_order` codes can be any of:
+
+ * 'S' - swap dtype from current to opposite endian
+ * {'<', 'little'} - little endian
+ * {'>', 'big'} - big endian
+ * {'=', 'native'} - native order
+ * {'|', 'I'} - ignore (no change to byte order)
+
+ Returns
+ -------
+ new_dtype : dtype
+ New dtype object with the given change to the byte order.
+
+ Notes
+ -----
+ Changes are also made in all fields and sub-arrays of the data type.
+
+ Examples
+ --------
+ >>> import sys
+ >>> sys_is_le = sys.byteorder == 'little'
+ >>> native_code = sys_is_le and '<' or '>'
+ >>> swapped_code = sys_is_le and '>' or '<'
+ >>> native_dt = np.dtype(native_code+'i2')
+ >>> swapped_dt = np.dtype(swapped_code+'i2')
+ >>> native_dt.newbyteorder('S') == swapped_dt
+ True
+ >>> native_dt.newbyteorder() == swapped_dt
+ True
+ >>> native_dt == swapped_dt.newbyteorder('S')
+ True
+ >>> native_dt == swapped_dt.newbyteorder('=')
+ True
+ >>> native_dt == swapped_dt.newbyteorder('N')
+ True
+ >>> native_dt == native_dt.newbyteorder('|')
+ True
+ >>> np.dtype('<i2') == native_dt.newbyteorder('<')
+ True
+ >>> np.dtype('<i2') == native_dt.newbyteorder('L')
+ True
+ >>> np.dtype('>i2') == native_dt.newbyteorder('>')
+ True
+ >>> np.dtype('>i2') == native_dt.newbyteorder('B')
+ True
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('__class_getitem__',
+ """
+ __class_getitem__(item, /)
+
+ Return a parametrized wrapper around the `~numpy.dtype` type.
+
+ .. versionadded:: 1.22
+
+ Returns
+ -------
+ alias : types.GenericAlias
+ A parametrized `~numpy.dtype` type.
+
+ Examples
+ --------
+ >>> import numpy as np
+
+ >>> np.dtype[np.int64]
+ numpy.dtype[numpy.int64]
+
+ Notes
+ -----
+ This method is only available for python 3.9 and later.
+
+ See Also
+ --------
+ :pep:`585` : Type hinting generics in standard collections.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('__ge__',
+ """
+ __ge__(value, /)
+
+ Return ``self >= value``.
+
+ Equivalent to ``np.can_cast(value, self, casting="safe")``.
+
+ See Also
+ --------
+ can_cast : Returns True if cast between data types can occur according to
+ the casting rule.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('__le__',
+ """
+ __le__(value, /)
+
+ Return ``self <= value``.
+
+ Equivalent to ``np.can_cast(self, value, casting="safe")``.
+
+ See Also
+ --------
+ can_cast : Returns True if cast between data types can occur according to
+ the casting rule.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('__gt__',
+ """
+ __ge__(value, /)
+
+ Return ``self > value``.
+
+ Equivalent to
+ ``self != value and np.can_cast(value, self, casting="safe")``.
+
+ See Also
+ --------
+ can_cast : Returns True if cast between data types can occur according to
+ the casting rule.
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('__lt__',
+ """
+ __lt__(value, /)
+
+ Return ``self < value``.
+
+ Equivalent to
+ ``self != value and np.can_cast(self, value, casting="safe")``.
+
+ See Also
+ --------
+ can_cast : Returns True if cast between data types can occur according to
+ the casting rule.
+
+ """))
+
+##############################################################################
+#
+# Datetime-related Methods
+#
+##############################################################################
+
+add_newdoc('numpy.core.multiarray', 'busdaycalendar',
+ """
+ busdaycalendar(weekmask='1111100', holidays=None)
+
+ A business day calendar object that efficiently stores information
+ defining valid days for the busday family of functions.
+
+ The default valid days are Monday through Friday ("business days").
+ A busdaycalendar object can be specified with any set of weekly
+ valid days, plus an optional "holiday" dates that always will be invalid.
+
+ Once a busdaycalendar object is created, the weekmask and holidays
+ cannot be modified.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates, no matter which
+ weekday they fall upon. Holiday dates may be specified in any
+ order, and NaT (not-a-time) dates are ignored. This list is
+ saved in a normalized form that is suited for fast calculations
+ of valid days.
+
+ Returns
+ -------
+ out : busdaycalendar
+ A business day calendar object containing the specified
+ weekmask and holidays values.
+
+ See Also
+ --------
+ is_busday : Returns a boolean array indicating valid days.
+ busday_offset : Applies an offset counted in valid days.
+ busday_count : Counts how many valid days are in a half-open date range.
+
+ Attributes
+ ----------
+ Note: once a busdaycalendar object is created, you cannot modify the
+ weekmask or holidays. The attributes return copies of internal data.
+ weekmask : (copy) seven-element array of bool
+ holidays : (copy) sorted array of datetime64[D]
+
+ Examples
+ --------
+ >>> # Some important days in July
+ ... bdd = np.busdaycalendar(
+ ... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
+ >>> # Default is Monday to Friday weekdays
+ ... bdd.weekmask
+ array([ True, True, True, True, True, False, False])
+ >>> # Any holidays already on the weekend are removed
+ ... bdd.holidays
+ array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
+ """)
+
+add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
+ """A copy of the seven-element boolean mask indicating valid days."""))
+
+add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
+ """A copy of the holiday array indicating additional invalid days."""))
+
+add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
+ """
+ normalize_axis_index(axis, ndim, msg_prefix=None)
+
+ Normalizes an axis index, `axis`, such that is a valid positive index into
+ the shape of array with `ndim` dimensions. Raises an AxisError with an
+ appropriate message if this is not possible.
+
+ Used internally by all axis-checking logic.
+
+ .. versionadded:: 1.13.0
+
+ Parameters
+ ----------
+ axis : int
+ The un-normalized index of the axis. Can be negative
+ ndim : int
+ The number of dimensions of the array that `axis` should be normalized
+ against
+ msg_prefix : str
+ A prefix to put before the message, typically the name of the argument
+
+ Returns
+ -------
+ normalized_axis : int
+ The normalized axis index, such that `0 <= normalized_axis < ndim`
+
+ Raises
+ ------
+ AxisError
+ If the axis index is invalid, when `-ndim <= axis < ndim` is false.
+
+ Examples
+ --------
+ >>> normalize_axis_index(0, ndim=3)
+ 0
+ >>> normalize_axis_index(1, ndim=3)
+ 1
+ >>> normalize_axis_index(-1, ndim=3)
+ 2
+
+ >>> normalize_axis_index(3, ndim=3)
+ Traceback (most recent call last):
+ ...
+ AxisError: axis 3 is out of bounds for array of dimension 3
+ >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg')
+ Traceback (most recent call last):
+ ...
+ AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
+ """)
+
+add_newdoc('numpy.core.multiarray', 'datetime_data',
+ """
+ datetime_data(dtype, /)
+
+ Get information about the step size of a date or time type.
+
+ The returned tuple can be passed as the second argument of `numpy.datetime64` and
+ `numpy.timedelta64`.
+
+ Parameters
+ ----------
+ dtype : dtype
+ The dtype object, which must be a `datetime64` or `timedelta64` type.
+
+ Returns
+ -------
+ unit : str
+ The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype
+ is based.
+ count : int
+ The number of base units in a step.
+
+ Examples
+ --------
+ >>> dt_25s = np.dtype('timedelta64[25s]')
+ >>> np.datetime_data(dt_25s)
+ ('s', 25)
+ >>> np.array(10, dt_25s).astype('timedelta64[s]')
+ array(250, dtype='timedelta64[s]')
+
+ The result can be used to construct a datetime that uses the same units
+ as a timedelta
+
+ >>> np.datetime64('2010', np.datetime_data(dt_25s))
+ numpy.datetime64('2010-01-01T00:00:00','25s')
+ """)
+
+
+##############################################################################
+#
+# Documentation for `generic` attributes and methods
+#
+##############################################################################
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ """
+ Base class for numpy scalar types.
+
+ Class from which most (all?) numpy scalar types are derived. For
+ consistency, exposes the same API as `ndarray`, despite many
+ consequent attributes being either "get-only," or completely irrelevant.
+ This is the class from which it is strongly suggested users should derive
+ custom scalar types.
+
+ """)
+
+# Attributes
+
+def refer_to_array_attribute(attr, method=True):
+ docstring = """
+ Scalar {} identical to the corresponding array attribute.
+
+ Please see `ndarray.{}`.
+ """
+
+ return attr, docstring.format("method" if method else "attribute", attr)
+
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('T', method=False))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('base', method=False))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('data',
+ """Pointer to start of data."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
+ """Get array data-descriptor."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
+ """The integer value of flags."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
+ """A 1-D view of the scalar."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
+ """The imaginary part of the scalar."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
+ """The length of one element in bytes."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
+ """The length of the scalar in bytes."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
+ """The number of array dimensions."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('real',
+ """The real part of the scalar."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
+ """Tuple of array dimensions."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('size',
+ """The number of elements in the gentype."""))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
+ """Tuple of bytes steps in each dimension."""))
+
+# Methods
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('all'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('any'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('argmax'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('argmin'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('argsort'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('astype'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('byteswap'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('choose'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('clip'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('compress'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('conjugate'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('copy'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('cumprod'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('cumsum'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('diagonal'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('dump'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('dumps'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('fill'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('flatten'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('getfield'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('item'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('itemset'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('max'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('mean'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('min'))
+
+add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
+ """
+ newbyteorder(new_order='S', /)
+
+ Return a new `dtype` with a different byte order.
+
+ Changes are also made in all fields and sub-arrays of the data type.
+
+ The `new_order` code can be any from the following:
+
+ * 'S' - swap dtype from current to opposite endian
+ * {'<', 'little'} - little endian
+ * {'>', 'big'} - big endian
+ * {'=', 'native'} - native order
+ * {'|', 'I'} - ignore (no change to byte order)
+
+ Parameters
+ ----------
+ new_order : str, optional
+ Byte order to force; a value from the byte order specifications
+ above. The default value ('S') results in swapping the current
+ byte order.
+
+
+ Returns
+ -------
+ new_dtype : dtype
+ New `dtype` object with the given change to the byte order.
+
+ """))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('nonzero'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('prod'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('ptp'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('put'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('ravel'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('repeat'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('reshape'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('resize'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('round'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('searchsorted'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('setfield'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('setflags'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('sort'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('squeeze'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('std'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('sum'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('swapaxes'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('take'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('tofile'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('tolist'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('tostring'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('trace'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('transpose'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('var'))
+
+add_newdoc('numpy.core.numerictypes', 'generic',
+ refer_to_array_attribute('view'))
+
+add_newdoc('numpy.core.numerictypes', 'number', ('__class_getitem__',
+ """
+ __class_getitem__(item, /)
+
+ Return a parametrized wrapper around the `~numpy.number` type.
+
+ .. versionadded:: 1.22
+
+ Returns
+ -------
+ alias : types.GenericAlias
+ A parametrized `~numpy.number` type.
+
+ Examples
+ --------
+ >>> from typing import Any
+ >>> import numpy as np
+
+ >>> np.signedinteger[Any]
+ numpy.signedinteger[typing.Any]
+
+ Notes
+ -----
+ This method is only available for python 3.9 and later.
+
+ See Also
+ --------
+ :pep:`585` : Type hinting generics in standard collections.
+
+ """))
+
+##############################################################################
+#
+# Documentation for scalar type abstract base classes in type hierarchy
+#
+##############################################################################
+
+
+add_newdoc('numpy.core.numerictypes', 'number',
+ """
+ Abstract base class of all numeric scalar types.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'integer',
+ """
+ Abstract base class of all integer scalar types.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'signedinteger',
+ """
+ Abstract base class of all signed integer scalar types.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'unsignedinteger',
+ """
+ Abstract base class of all unsigned integer scalar types.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'inexact',
+ """
+ Abstract base class of all numeric scalar types with a (potentially)
+ inexact representation of the values in its range, such as
+ floating-point numbers.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'floating',
+ """
+ Abstract base class of all floating-point scalar types.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'complexfloating',
+ """
+ Abstract base class of all complex number scalar types that are made up of
+ floating-point numbers.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'flexible',
+ """
+ Abstract base class of all scalar types without predefined length.
+ The actual size of these types depends on the specific `np.dtype`
+ instantiation.
+
+ """)
+
+add_newdoc('numpy.core.numerictypes', 'character',
+ """
+ Abstract base class of all character string scalar types.
+
+ """)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_add_newdocs_scalars.py b/venv/lib/python3.9/site-packages/numpy/core/_add_newdocs_scalars.py
new file mode 100644
index 00000000..15d37522
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_add_newdocs_scalars.py
@@ -0,0 +1,368 @@
+"""
+This file is separate from ``_add_newdocs.py`` so that it can be mocked out by
+our sphinx ``conf.py`` during doc builds, where we want to avoid showing
+platform-dependent information.
+"""
+import sys
+import os
+from numpy.core import dtype
+from numpy.core import numerictypes as _numerictypes
+from numpy.core.function_base import add_newdoc
+
+##############################################################################
+#
+# Documentation for concrete scalar classes
+#
+##############################################################################
+
+def numeric_type_aliases(aliases):
+ def type_aliases_gen():
+ for alias, doc in aliases:
+ try:
+ alias_type = getattr(_numerictypes, alias)
+ except AttributeError:
+ # The set of aliases that actually exist varies between platforms
+ pass
+ else:
+ yield (alias_type, alias, doc)
+ return list(type_aliases_gen())
+
+
+possible_aliases = numeric_type_aliases([
+ ('int8', '8-bit signed integer (``-128`` to ``127``)'),
+ ('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'),
+ ('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'),
+ ('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'),
+ ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
+ ('uint8', '8-bit unsigned integer (``0`` to ``255``)'),
+ ('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'),
+ ('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'),
+ ('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'),
+ ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
+ ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
+ ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
+ ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
+ ('float96', '96-bit extended-precision floating-point number type'),
+ ('float128', '128-bit extended-precision floating-point number type'),
+ ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
+ ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
+ ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
+ ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
+ ])
+
+
+def _get_platform_and_machine():
+ try:
+ system, _, _, _, machine = os.uname()
+ except AttributeError:
+ system = sys.platform
+ if system == 'win32':
+ machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \
+ or os.environ.get('PROCESSOR_ARCHITECTURE', '')
+ else:
+ machine = 'unknown'
+ return system, machine
+
+
+_system, _machine = _get_platform_and_machine()
+_doc_alias_string = f":Alias on this platform ({_system} {_machine}):"
+
+
+def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
+ # note: `:field: value` is rST syntax which renders as field lists.
+ o = getattr(_numerictypes, obj)
+
+ character_code = dtype(o).char
+ canonical_name_doc = "" if obj == o.__name__ else \
+ f":Canonical name: `numpy.{obj}`\n "
+ if fixed_aliases:
+ alias_doc = ''.join(f":Alias: `numpy.{alias}`\n "
+ for alias in fixed_aliases)
+ else:
+ alias_doc = ''
+ alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n "
+ for (alias_type, alias, doc) in possible_aliases if alias_type is o)
+
+ docstring = f"""
+ {doc.strip()}
+
+ :Character code: ``'{character_code}'``
+ {canonical_name_doc}{alias_doc}
+ """
+
+ add_newdoc('numpy.core.numerictypes', obj, docstring)
+
+
+add_newdoc_for_scalar_type('bool_', ['bool8'],
+ """
+ Boolean type (True or False), stored as a byte.
+
+ .. warning::
+
+ The :class:`bool_` type is not a subclass of the :class:`int_` type
+ (the :class:`bool_` is not even a number type). This is different
+ than Python's default implementation of :class:`bool` as a
+ sub-class of :class:`int`.
+ """)
+
+add_newdoc_for_scalar_type('byte', [],
+ """
+ Signed integer type, compatible with C ``char``.
+ """)
+
+add_newdoc_for_scalar_type('short', [],
+ """
+ Signed integer type, compatible with C ``short``.
+ """)
+
+add_newdoc_for_scalar_type('intc', [],
+ """
+ Signed integer type, compatible with C ``int``.
+ """)
+
+add_newdoc_for_scalar_type('int_', [],
+ """
+ Signed integer type, compatible with Python `int` and C ``long``.
+ """)
+
+add_newdoc_for_scalar_type('longlong', [],
+ """
+ Signed integer type, compatible with C ``long long``.
+ """)
+
+add_newdoc_for_scalar_type('ubyte', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned char``.
+ """)
+
+add_newdoc_for_scalar_type('ushort', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned short``.
+ """)
+
+add_newdoc_for_scalar_type('uintc', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned int``.
+ """)
+
+add_newdoc_for_scalar_type('uint', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned long``.
+ """)
+
+add_newdoc_for_scalar_type('ulonglong', [],
+ """
+ Signed integer type, compatible with C ``unsigned long long``.
+ """)
+
+add_newdoc_for_scalar_type('half', [],
+ """
+ Half-precision floating-point number type.
+ """)
+
+add_newdoc_for_scalar_type('single', [],
+ """
+ Single-precision floating-point number type, compatible with C ``float``.
+ """)
+
+add_newdoc_for_scalar_type('double', ['float_'],
+ """
+ Double-precision floating-point number type, compatible with Python `float`
+ and C ``double``.
+ """)
+
+add_newdoc_for_scalar_type('longdouble', ['longfloat'],
+ """
+ Extended-precision floating-point number type, compatible with C
+ ``long double`` but not necessarily with IEEE 754 quadruple-precision.
+ """)
+
+add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
+ """
+ Complex number type composed of two single-precision floating-point
+ numbers.
+ """)
+
+add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
+ """
+ Complex number type composed of two double-precision floating-point
+ numbers, compatible with Python `complex`.
+ """)
+
+add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
+ """
+ Complex number type composed of two extended-precision floating-point
+ numbers.
+ """)
+
+add_newdoc_for_scalar_type('object_', [],
+ """
+ Any Python object.
+ """)
+
+add_newdoc_for_scalar_type('str_', ['unicode_'],
+ r"""
+ A unicode string.
+
+ When used in arrays, this type strips trailing null codepoints.
+
+ Unlike the builtin `str`, this supports the :ref:`python:bufferobjects`, exposing its
+ contents as UCS4:
+
+ >>> m = memoryview(np.str_("abc"))
+ >>> m.format
+ '3w'
+ >>> m.tobytes()
+ b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'
+ """)
+
+add_newdoc_for_scalar_type('bytes_', ['string_'],
+ r"""
+ A byte string.
+
+ When used in arrays, this type strips trailing null bytes.
+ """)
+
+add_newdoc_for_scalar_type('void', [],
+ r"""
+ np.void(length_or_data, /, dtype=None)
+
+ Create a new structured or unstructured void scalar.
+
+ Parameters
+ ----------
+ length_or_data : int, array-like, bytes-like, object
+ One of multiple meanings (see notes). The length or
+ bytes data of an unstructured void. Or alternatively,
+ the data to be stored in the new scalar when `dtype`
+ is provided.
+ This can be an array-like, in which case an array may
+ be returned.
+ dtype : dtype, optional
+ If provided the dtype of the new scalar. This dtype must
+ be "void" dtype (i.e. a structured or unstructured void,
+ see also :ref:`defining-structured-types`).
+
+ ..versionadded:: 1.24
+
+ Notes
+ -----
+ For historical reasons and because void scalars can represent both
+ arbitrary byte data and structured dtypes, the void constructor
+ has three calling conventions:
+
+ 1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five
+ ``\0`` bytes. The 5 can be a Python or NumPy integer.
+ 2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string.
+ The dtype itemsize will match the byte string length, here ``"V10"``.
+ 3. When a ``dtype=`` is passed the call is rougly the same as an
+ array creation. However, a void scalar rather than array is returned.
+
+ Please see the examples which show all three different conventions.
+
+ Examples
+ --------
+ >>> np.void(5)
+ void(b'\x00\x00\x00\x00\x00')
+ >>> np.void(b'abcd')
+ void(b'\x61\x62\x63\x64')
+ >>> np.void((5, 3.2, "eggs"), dtype="i,d,S5")
+ (5, 3.2, b'eggs') # looks like a tuple, but is `np.void`
+ >>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)])
+ (3, 3) # looks like a tuple, but is `np.void`
+
+ """)
+
+add_newdoc_for_scalar_type('datetime64', [],
+ """
+ If created from a 64-bit integer, it represents an offset from
+ ``1970-01-01T00:00:00``.
+ If created from string, the string can be in ISO 8601 date
+ or datetime format.
+
+ >>> np.datetime64(10, 'Y')
+ numpy.datetime64('1980')
+ >>> np.datetime64('1980', 'Y')
+ numpy.datetime64('1980')
+ >>> np.datetime64(10, 'D')
+ numpy.datetime64('1970-01-11')
+
+ See :ref:`arrays.datetime` for more information.
+ """)
+
+add_newdoc_for_scalar_type('timedelta64', [],
+ """
+ A timedelta stored as a 64-bit integer.
+
+ See :ref:`arrays.datetime` for more information.
+ """)
+
+add_newdoc('numpy.core.numerictypes', "integer", ('is_integer',
+ """
+ integer.is_integer() -> bool
+
+ Return ``True`` if the number is finite with integral value.
+
+ .. versionadded:: 1.22
+
+ Examples
+ --------
+ >>> np.int64(-2).is_integer()
+ True
+ >>> np.uint32(5).is_integer()
+ True
+ """))
+
+# TODO: work out how to put this on the base class, np.floating
+for float_name in ('half', 'single', 'double', 'longdouble'):
+ add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
+ """
+ {ftype}.as_integer_ratio() -> (int, int)
+
+ Return a pair of integers, whose ratio is exactly equal to the original
+ floating point number, and with a positive denominator.
+ Raise `OverflowError` on infinities and a `ValueError` on NaNs.
+
+ >>> np.{ftype}(10.0).as_integer_ratio()
+ (10, 1)
+ >>> np.{ftype}(0.0).as_integer_ratio()
+ (0, 1)
+ >>> np.{ftype}(-.25).as_integer_ratio()
+ (-1, 4)
+ """.format(ftype=float_name)))
+
+ add_newdoc('numpy.core.numerictypes', float_name, ('is_integer',
+ f"""
+ {float_name}.is_integer() -> bool
+
+ Return ``True`` if the floating point number is finite with integral
+ value, and ``False`` otherwise.
+
+ .. versionadded:: 1.22
+
+ Examples
+ --------
+ >>> np.{float_name}(-2.0).is_integer()
+ True
+ >>> np.{float_name}(3.2).is_integer()
+ False
+ """))
+
+for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
+ 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'):
+ # Add negative examples for signed cases by checking typecode
+ add_newdoc('numpy.core.numerictypes', int_name, ('bit_count',
+ f"""
+ {int_name}.bit_count() -> int
+
+ Computes the number of 1-bits in the absolute value of the input.
+ Analogous to the builtin `int.bit_count` or ``popcount`` in C++.
+
+ Examples
+ --------
+ >>> np.{int_name}(127).bit_count()
+ 7""" +
+ (f"""
+ >>> np.{int_name}(-127).bit_count()
+ 7
+ """ if dtype(int_name).char.islower() else "")))
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_asarray.py b/venv/lib/python3.9/site-packages/numpy/core/_asarray.py
new file mode 100644
index 00000000..28f1fe6f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_asarray.py
@@ -0,0 +1,140 @@
+"""
+Functions in the ``as*array`` family that promote array-likes into arrays.
+
+`require` fits this category despite its name not matching this pattern.
+"""
+from .overrides import (
+ array_function_dispatch,
+ set_array_function_like_doc,
+ set_module,
+)
+from .multiarray import array, asanyarray
+
+
+__all__ = ["require"]
+
+
+POSSIBLE_FLAGS = {
+ 'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
+ 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
+ 'A': 'A', 'ALIGNED': 'A',
+ 'W': 'W', 'WRITEABLE': 'W',
+ 'O': 'O', 'OWNDATA': 'O',
+ 'E': 'E', 'ENSUREARRAY': 'E'
+}
+
+
+def _require_dispatcher(a, dtype=None, requirements=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def require(a, dtype=None, requirements=None, *, like=None):
+ """
+ Return an ndarray of the provided type that satisfies requirements.
+
+ This function is useful to be sure that an array with the correct flags
+ is returned for passing to compiled code (perhaps through ctypes).
+
+ Parameters
+ ----------
+ a : array_like
+ The object to be converted to a type-and-requirement-satisfying array.
+ dtype : data-type
+ The required data-type. If None preserve the current dtype. If your
+ application requires the data to be in native byteorder, include
+ a byteorder specification as a part of the dtype specification.
+ requirements : str or sequence of str
+ The requirements list can be any of the following
+
+ * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
+ * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
+ * 'ALIGNED' ('A') - ensure a data-type aligned array
+ * 'WRITEABLE' ('W') - ensure a writable array
+ * 'OWNDATA' ('O') - ensure an array that owns its own data
+ * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Array with specified requirements and type if given.
+
+ See Also
+ --------
+ asarray : Convert input to an ndarray.
+ asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ ndarray.flags : Information about the memory layout of the array.
+
+ Notes
+ -----
+ The returned array will be guaranteed to have the listed requirements
+ by making a copy if needed.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2,3)
+ >>> x.flags
+ C_CONTIGUOUS : True
+ F_CONTIGUOUS : False
+ OWNDATA : False
+ WRITEABLE : True
+ ALIGNED : True
+ WRITEBACKIFCOPY : False
+
+ >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
+ >>> y.flags
+ C_CONTIGUOUS : False
+ F_CONTIGUOUS : True
+ OWNDATA : True
+ WRITEABLE : True
+ ALIGNED : True
+ WRITEBACKIFCOPY : False
+
+ """
+ if like is not None:
+ return _require_with_like(
+ a,
+ dtype=dtype,
+ requirements=requirements,
+ like=like,
+ )
+
+ if not requirements:
+ return asanyarray(a, dtype=dtype)
+
+ requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements}
+
+ if 'E' in requirements:
+ requirements.remove('E')
+ subok = False
+ else:
+ subok = True
+
+ order = 'A'
+ if requirements >= {'C', 'F'}:
+ raise ValueError('Cannot specify both "C" and "F" order')
+ elif 'F' in requirements:
+ order = 'F'
+ requirements.remove('F')
+ elif 'C' in requirements:
+ order = 'C'
+ requirements.remove('C')
+
+ arr = array(a, dtype=dtype, order=order, copy=False, subok=subok)
+
+ for prop in requirements:
+ if not arr.flags[prop]:
+ return arr.copy(order)
+ return arr
+
+
+_require_with_like = array_function_dispatch(
+ _require_dispatcher, use_like=True
+)(require)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_asarray.pyi b/venv/lib/python3.9/site-packages/numpy/core/_asarray.pyi
new file mode 100644
index 00000000..473bc037
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_asarray.pyi
@@ -0,0 +1,42 @@
+from collections.abc import Iterable
+from typing import TypeVar, Union, overload, Literal
+
+from numpy import ndarray
+from numpy._typing import DTypeLike, _SupportsArrayFunc
+
+_ArrayType = TypeVar("_ArrayType", bound=ndarray)
+
+_Requirements = Literal[
+ "C", "C_CONTIGUOUS", "CONTIGUOUS",
+ "F", "F_CONTIGUOUS", "FORTRAN",
+ "A", "ALIGNED",
+ "W", "WRITEABLE",
+ "O", "OWNDATA"
+]
+_E = Literal["E", "ENSUREARRAY"]
+_RequirementsWithE = Union[_Requirements, _E]
+
+@overload
+def require(
+ a: _ArrayType,
+ dtype: None = ...,
+ requirements: None | _Requirements | Iterable[_Requirements] = ...,
+ *,
+ like: _SupportsArrayFunc = ...
+) -> _ArrayType: ...
+@overload
+def require(
+ a: object,
+ dtype: DTypeLike = ...,
+ requirements: _E | Iterable[_RequirementsWithE] = ...,
+ *,
+ like: _SupportsArrayFunc = ...
+) -> ndarray: ...
+@overload
+def require(
+ a: object,
+ dtype: DTypeLike = ...,
+ requirements: None | _Requirements | Iterable[_Requirements] = ...,
+ *,
+ like: _SupportsArrayFunc = ...
+) -> ndarray: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_dtype.py b/venv/lib/python3.9/site-packages/numpy/core/_dtype.py
new file mode 100644
index 00000000..3db80c17
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_dtype.py
@@ -0,0 +1,365 @@
+"""
+A place for code to be called from the implementation of np.dtype
+
+String handling is much easier to do correctly in python.
+"""
+import numpy as np
+
+
+_kind_to_stem = {
+ 'u': 'uint',
+ 'i': 'int',
+ 'c': 'complex',
+ 'f': 'float',
+ 'b': 'bool',
+ 'V': 'void',
+ 'O': 'object',
+ 'M': 'datetime',
+ 'm': 'timedelta',
+ 'S': 'bytes',
+ 'U': 'str',
+}
+
+
+def _kind_name(dtype):
+ try:
+ return _kind_to_stem[dtype.kind]
+ except KeyError as e:
+ raise RuntimeError(
+ "internal dtype error, unknown kind {!r}"
+ .format(dtype.kind)
+ ) from None
+
+
+def __str__(dtype):
+ if dtype.fields is not None:
+ return _struct_str(dtype, include_align=True)
+ elif dtype.subdtype:
+ return _subarray_str(dtype)
+ elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
+ return dtype.str
+ else:
+ return dtype.name
+
+
+def __repr__(dtype):
+ arg_str = _construction_repr(dtype, include_align=False)
+ if dtype.isalignedstruct:
+ arg_str = arg_str + ", align=True"
+ return "dtype({})".format(arg_str)
+
+
+def _unpack_field(dtype, offset, title=None):
+ """
+ Helper function to normalize the items in dtype.fields.
+
+ Call as:
+
+ dtype, offset, title = _unpack_field(*dtype.fields[name])
+ """
+ return dtype, offset, title
+
+
+def _isunsized(dtype):
+ # PyDataType_ISUNSIZED
+ return dtype.itemsize == 0
+
+
+def _construction_repr(dtype, include_align=False, short=False):
+ """
+ Creates a string repr of the dtype, excluding the 'dtype()' part
+ surrounding the object. This object may be a string, a list, or
+ a dict depending on the nature of the dtype. This
+ is the object passed as the first parameter to the dtype
+ constructor, and if no additional constructor parameters are
+ given, will reproduce the exact memory layout.
+
+ Parameters
+ ----------
+ short : bool
+ If true, this creates a shorter repr using 'kind' and 'itemsize', instead
+ of the longer type name.
+
+ include_align : bool
+ If true, this includes the 'align=True' parameter
+ inside the struct dtype construction dict when needed. Use this flag
+ if you want a proper repr string without the 'dtype()' part around it.
+
+ If false, this does not preserve the
+ 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
+ struct arrays like the regular repr does, because the 'align'
+ flag is not part of first dtype constructor parameter. This
+ mode is intended for a full 'repr', where the 'align=True' is
+ provided as the second parameter.
+ """
+ if dtype.fields is not None:
+ return _struct_str(dtype, include_align=include_align)
+ elif dtype.subdtype:
+ return _subarray_str(dtype)
+ else:
+ return _scalar_str(dtype, short=short)
+
+
+def _scalar_str(dtype, short):
+ byteorder = _byte_order_str(dtype)
+
+ if dtype.type == np.bool_:
+ if short:
+ return "'?'"
+ else:
+ return "'bool'"
+
+ elif dtype.type == np.object_:
+ # The object reference may be different sizes on different
+ # platforms, so it should never include the itemsize here.
+ return "'O'"
+
+ elif dtype.type == np.string_:
+ if _isunsized(dtype):
+ return "'S'"
+ else:
+ return "'S%d'" % dtype.itemsize
+
+ elif dtype.type == np.unicode_:
+ if _isunsized(dtype):
+ return "'%sU'" % byteorder
+ else:
+ return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
+
+ # unlike the other types, subclasses of void are preserved - but
+ # historically the repr does not actually reveal the subclass
+ elif issubclass(dtype.type, np.void):
+ if _isunsized(dtype):
+ return "'V'"
+ else:
+ return "'V%d'" % dtype.itemsize
+
+ elif dtype.type == np.datetime64:
+ return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype))
+
+ elif dtype.type == np.timedelta64:
+ return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype))
+
+ elif np.issubdtype(dtype, np.number):
+ # Short repr with endianness, like '<f8'
+ if short or dtype.byteorder not in ('=', '|'):
+ return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize)
+
+ # Longer repr, like 'float64'
+ else:
+ return "'%s%d'" % (_kind_name(dtype), 8*dtype.itemsize)
+
+ elif dtype.isbuiltin == 2:
+ return dtype.type.__name__
+
+ else:
+ raise RuntimeError(
+ "Internal error: NumPy dtype unrecognized type number")
+
+
+def _byte_order_str(dtype):
+ """ Normalize byteorder to '<' or '>' """
+ # hack to obtain the native and swapped byte order characters
+ swapped = np.dtype(int).newbyteorder('S')
+ native = swapped.newbyteorder('S')
+
+ byteorder = dtype.byteorder
+ if byteorder == '=':
+ return native.byteorder
+ if byteorder == 'S':
+ # TODO: this path can never be reached
+ return swapped.byteorder
+ elif byteorder == '|':
+ return ''
+ else:
+ return byteorder
+
+
+def _datetime_metadata_str(dtype):
+ # TODO: this duplicates the C metastr_to_unicode functionality
+ unit, count = np.datetime_data(dtype)
+ if unit == 'generic':
+ return ''
+ elif count == 1:
+ return '[{}]'.format(unit)
+ else:
+ return '[{}{}]'.format(count, unit)
+
+
+def _struct_dict_str(dtype, includealignedflag):
+ # unpack the fields dictionary into ls
+ names = dtype.names
+ fld_dtypes = []
+ offsets = []
+ titles = []
+ for name in names:
+ fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
+ fld_dtypes.append(fld_dtype)
+ offsets.append(offset)
+ titles.append(title)
+
+ # Build up a string to make the dictionary
+
+ if np.core.arrayprint._get_legacy_print_mode() <= 121:
+ colon = ":"
+ fieldsep = ","
+ else:
+ colon = ": "
+ fieldsep = ", "
+
+ # First, the names
+ ret = "{'names'%s[" % colon
+ ret += fieldsep.join(repr(name) for name in names)
+
+ # Second, the formats
+ ret += "], 'formats'%s[" % colon
+ ret += fieldsep.join(
+ _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
+
+ # Third, the offsets
+ ret += "], 'offsets'%s[" % colon
+ ret += fieldsep.join("%d" % offset for offset in offsets)
+
+ # Fourth, the titles
+ if any(title is not None for title in titles):
+ ret += "], 'titles'%s[" % colon
+ ret += fieldsep.join(repr(title) for title in titles)
+
+ # Fifth, the itemsize
+ ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize)
+
+ if (includealignedflag and dtype.isalignedstruct):
+ # Finally, the aligned flag
+ ret += ", 'aligned'%sTrue}" % colon
+ else:
+ ret += "}"
+
+ return ret
+
+
+def _aligned_offset(offset, alignment):
+ # round up offset:
+ return - (-offset // alignment) * alignment
+
+
+def _is_packed(dtype):
+ """
+ Checks whether the structured data type in 'dtype'
+ has a simple layout, where all the fields are in order,
+ and follow each other with no alignment padding.
+
+ When this returns true, the dtype can be reconstructed
+ from a list of the field names and dtypes with no additional
+ dtype parameters.
+
+ Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
+ """
+ align = dtype.isalignedstruct
+ max_alignment = 1
+ total_offset = 0
+ for name in dtype.names:
+ fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
+
+ if align:
+ total_offset = _aligned_offset(total_offset, fld_dtype.alignment)
+ max_alignment = max(max_alignment, fld_dtype.alignment)
+
+ if fld_offset != total_offset:
+ return False
+ total_offset += fld_dtype.itemsize
+
+ if align:
+ total_offset = _aligned_offset(total_offset, max_alignment)
+
+ if total_offset != dtype.itemsize:
+ return False
+ return True
+
+
+def _struct_list_str(dtype):
+ items = []
+ for name in dtype.names:
+ fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
+
+ item = "("
+ if title is not None:
+ item += "({!r}, {!r}), ".format(title, name)
+ else:
+ item += "{!r}, ".format(name)
+ # Special case subarray handling here
+ if fld_dtype.subdtype is not None:
+ base, shape = fld_dtype.subdtype
+ item += "{}, {}".format(
+ _construction_repr(base, short=True),
+ shape
+ )
+ else:
+ item += _construction_repr(fld_dtype, short=True)
+
+ item += ")"
+ items.append(item)
+
+ return "[" + ", ".join(items) + "]"
+
+
+def _struct_str(dtype, include_align):
+ # The list str representation can't include the 'align=' flag,
+ # so if it is requested and the struct has the aligned flag set,
+ # we must use the dict str instead.
+ if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
+ sub = _struct_list_str(dtype)
+
+ else:
+ sub = _struct_dict_str(dtype, include_align)
+
+ # If the data type isn't the default, void, show it
+ if dtype.type != np.void:
+ return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub)
+ else:
+ return sub
+
+
+def _subarray_str(dtype):
+ base, shape = dtype.subdtype
+ return "({}, {})".format(
+ _construction_repr(base, short=True),
+ shape
+ )
+
+
+def _name_includes_bit_suffix(dtype):
+ if dtype.type == np.object_:
+ # pointer size varies by system, best to omit it
+ return False
+ elif dtype.type == np.bool_:
+ # implied
+ return False
+ elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
+ # unspecified
+ return False
+ else:
+ return True
+
+
+def _name_get(dtype):
+ # provides dtype.name.__get__, documented as returning a "bit name"
+
+ if dtype.isbuiltin == 2:
+ # user dtypes don't promise to do anything special
+ return dtype.type.__name__
+
+ if issubclass(dtype.type, np.void):
+ # historically, void subclasses preserve their name, eg `record64`
+ name = dtype.type.__name__
+ else:
+ name = _kind_name(dtype)
+
+ # append bit counts
+ if _name_includes_bit_suffix(dtype):
+ name += "{}".format(dtype.itemsize * 8)
+
+ # append metadata to datetimes
+ if dtype.type in (np.datetime64, np.timedelta64):
+ name += _datetime_metadata_str(dtype)
+
+ return name
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_dtype_ctypes.py b/venv/lib/python3.9/site-packages/numpy/core/_dtype_ctypes.py
new file mode 100644
index 00000000..6d7cbb24
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_dtype_ctypes.py
@@ -0,0 +1,117 @@
+"""
+Conversion from ctypes to dtype.
+
+In an ideal world, we could achieve this through the PEP3118 buffer protocol,
+something like::
+
+ def dtype_from_ctypes_type(t):
+ # needed to ensure that the shape of `t` is within memoryview.format
+ class DummyStruct(ctypes.Structure):
+ _fields_ = [('a', t)]
+
+ # empty to avoid memory allocation
+ ctype_0 = (DummyStruct * 0)()
+ mv = memoryview(ctype_0)
+
+ # convert the struct, and slice back out the field
+ return _dtype_from_pep3118(mv.format)['a']
+
+Unfortunately, this fails because:
+
+* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
+* PEP3118 cannot represent unions, but both numpy and ctypes can
+* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
+"""
+
+# We delay-import ctypes for distributions that do not include it.
+# While this module is not used unless the user passes in ctypes
+# members, it is eagerly imported from numpy/core/__init__.py.
+import numpy as np
+
+
+def _from_ctypes_array(t):
+ return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
+
+
+def _from_ctypes_structure(t):
+ for item in t._fields_:
+ if len(item) > 2:
+ raise TypeError(
+ "ctypes bitfields have no dtype equivalent")
+
+ if hasattr(t, "_pack_"):
+ import ctypes
+ formats = []
+ offsets = []
+ names = []
+ current_offset = 0
+ for fname, ftyp in t._fields_:
+ names.append(fname)
+ formats.append(dtype_from_ctypes_type(ftyp))
+ # Each type has a default offset, this is platform dependent for some types.
+ effective_pack = min(t._pack_, ctypes.alignment(ftyp))
+ current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack
+ offsets.append(current_offset)
+ current_offset += ctypes.sizeof(ftyp)
+
+ return np.dtype(dict(
+ formats=formats,
+ offsets=offsets,
+ names=names,
+ itemsize=ctypes.sizeof(t)))
+ else:
+ fields = []
+ for fname, ftyp in t._fields_:
+ fields.append((fname, dtype_from_ctypes_type(ftyp)))
+
+ # by default, ctypes structs are aligned
+ return np.dtype(fields, align=True)
+
+
+def _from_ctypes_scalar(t):
+ """
+ Return the dtype type with endianness included if it's the case
+ """
+ if getattr(t, '__ctype_be__', None) is t:
+ return np.dtype('>' + t._type_)
+ elif getattr(t, '__ctype_le__', None) is t:
+ return np.dtype('<' + t._type_)
+ else:
+ return np.dtype(t._type_)
+
+
+def _from_ctypes_union(t):
+ import ctypes
+ formats = []
+ offsets = []
+ names = []
+ for fname, ftyp in t._fields_:
+ names.append(fname)
+ formats.append(dtype_from_ctypes_type(ftyp))
+ offsets.append(0) # Union fields are offset to 0
+
+ return np.dtype(dict(
+ formats=formats,
+ offsets=offsets,
+ names=names,
+ itemsize=ctypes.sizeof(t)))
+
+
+def dtype_from_ctypes_type(t):
+ """
+ Construct a dtype object from a ctypes type
+ """
+ import _ctypes
+ if issubclass(t, _ctypes.Array):
+ return _from_ctypes_array(t)
+ elif issubclass(t, _ctypes._Pointer):
+ raise TypeError("ctypes pointers have no dtype equivalent")
+ elif issubclass(t, _ctypes.Structure):
+ return _from_ctypes_structure(t)
+ elif issubclass(t, _ctypes.Union):
+ return _from_ctypes_union(t)
+ elif isinstance(getattr(t, '_type_', None), str):
+ return _from_ctypes_scalar(t)
+ else:
+ raise NotImplementedError(
+ "Unknown ctypes type {}".format(t.__name__))
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_exceptions.py b/venv/lib/python3.9/site-packages/numpy/core/_exceptions.py
new file mode 100644
index 00000000..be3aa8be
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_exceptions.py
@@ -0,0 +1,280 @@
+"""
+Various richly-typed exceptions, that also help us deal with string formatting
+in python where it's easier.
+
+By putting the formatting in `__str__`, we also avoid paying the cost for
+users who silence the exceptions.
+"""
+from numpy.core.overrides import set_module
+
+def _unpack_tuple(tup):
+ if len(tup) == 1:
+ return tup[0]
+ else:
+ return tup
+
+
+def _display_as_base(cls):
+ """
+ A decorator that makes an exception class look like its base.
+
+ We use this to hide subclasses that are implementation details - the user
+ should catch the base type, which is what the traceback will show them.
+
+ Classes decorated with this decorator are subject to removal without a
+ deprecation warning.
+ """
+ assert issubclass(cls, Exception)
+ cls.__name__ = cls.__base__.__name__
+ return cls
+
+
+class UFuncTypeError(TypeError):
+ """ Base class for all ufunc exceptions """
+ def __init__(self, ufunc):
+ self.ufunc = ufunc
+
+
+@_display_as_base
+class _UFuncBinaryResolutionError(UFuncTypeError):
+ """ Thrown when a binary resolution fails """
+ def __init__(self, ufunc, dtypes):
+ super().__init__(ufunc)
+ self.dtypes = tuple(dtypes)
+ assert len(self.dtypes) == 2
+
+ def __str__(self):
+ return (
+ "ufunc {!r} cannot use operands with types {!r} and {!r}"
+ ).format(
+ self.ufunc.__name__, *self.dtypes
+ )
+
+
+@_display_as_base
+class _UFuncNoLoopError(UFuncTypeError):
+ """ Thrown when a ufunc loop cannot be found """
+ def __init__(self, ufunc, dtypes):
+ super().__init__(ufunc)
+ self.dtypes = tuple(dtypes)
+
+ def __str__(self):
+ return (
+ "ufunc {!r} did not contain a loop with signature matching types "
+ "{!r} -> {!r}"
+ ).format(
+ self.ufunc.__name__,
+ _unpack_tuple(self.dtypes[:self.ufunc.nin]),
+ _unpack_tuple(self.dtypes[self.ufunc.nin:])
+ )
+
+
+@_display_as_base
+class _UFuncCastingError(UFuncTypeError):
+ def __init__(self, ufunc, casting, from_, to):
+ super().__init__(ufunc)
+ self.casting = casting
+ self.from_ = from_
+ self.to = to
+
+
+@_display_as_base
+class _UFuncInputCastingError(_UFuncCastingError):
+ """ Thrown when a ufunc input cannot be casted """
+ def __init__(self, ufunc, casting, from_, to, i):
+ super().__init__(ufunc, casting, from_, to)
+ self.in_i = i
+
+ def __str__(self):
+ # only show the number if more than one input exists
+ i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else ""
+ return (
+ "Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting "
+ "rule {!r}"
+ ).format(
+ self.ufunc.__name__, i_str, self.from_, self.to, self.casting
+ )
+
+
+@_display_as_base
+class _UFuncOutputCastingError(_UFuncCastingError):
+ """ Thrown when a ufunc output cannot be casted """
+ def __init__(self, ufunc, casting, from_, to, i):
+ super().__init__(ufunc, casting, from_, to)
+ self.out_i = i
+
+ def __str__(self):
+ # only show the number if more than one output exists
+ i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else ""
+ return (
+ "Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting "
+ "rule {!r}"
+ ).format(
+ self.ufunc.__name__, i_str, self.from_, self.to, self.casting
+ )
+
+
+# Exception used in shares_memory()
+@set_module('numpy')
+class TooHardError(RuntimeError):
+ """max_work was exceeded.
+
+ This is raised whenever the maximum number of candidate solutions
+ to consider specified by the ``max_work`` parameter is exceeded.
+ Assigning a finite number to max_work may have caused the operation
+ to fail.
+
+ """
+
+ pass
+
+
+@set_module('numpy')
+class AxisError(ValueError, IndexError):
+ """Axis supplied was invalid.
+
+ This is raised whenever an ``axis`` parameter is specified that is larger
+ than the number of array dimensions.
+ For compatibility with code written against older numpy versions, which
+ raised a mixture of `ValueError` and `IndexError` for this situation, this
+ exception subclasses both to ensure that ``except ValueError`` and
+ ``except IndexError`` statements continue to catch `AxisError`.
+
+ .. versionadded:: 1.13
+
+ Parameters
+ ----------
+ axis : int or str
+ The out of bounds axis or a custom exception message.
+ If an axis is provided, then `ndim` should be specified as well.
+ ndim : int, optional
+ The number of array dimensions.
+ msg_prefix : str, optional
+ A prefix for the exception message.
+
+ Attributes
+ ----------
+ axis : int, optional
+ The out of bounds axis or ``None`` if a custom exception
+ message was provided. This should be the axis as passed by
+ the user, before any normalization to resolve negative indices.
+
+ .. versionadded:: 1.22
+ ndim : int, optional
+ The number of array dimensions or ``None`` if a custom exception
+ message was provided.
+
+ .. versionadded:: 1.22
+
+
+ Examples
+ --------
+ >>> array_1d = np.arange(10)
+ >>> np.cumsum(array_1d, axis=1)
+ Traceback (most recent call last):
+ ...
+ numpy.AxisError: axis 1 is out of bounds for array of dimension 1
+
+ Negative axes are preserved:
+
+ >>> np.cumsum(array_1d, axis=-2)
+ Traceback (most recent call last):
+ ...
+ numpy.AxisError: axis -2 is out of bounds for array of dimension 1
+
+ The class constructor generally takes the axis and arrays'
+ dimensionality as arguments:
+
+ >>> print(np.AxisError(2, 1, msg_prefix='error'))
+ error: axis 2 is out of bounds for array of dimension 1
+
+ Alternatively, a custom exception message can be passed:
+
+ >>> print(np.AxisError('Custom error message'))
+ Custom error message
+
+ """
+
+ __slots__ = ("axis", "ndim", "_msg")
+
+ def __init__(self, axis, ndim=None, msg_prefix=None):
+ if ndim is msg_prefix is None:
+ # single-argument form: directly set the error message
+ self._msg = axis
+ self.axis = None
+ self.ndim = None
+ else:
+ self._msg = msg_prefix
+ self.axis = axis
+ self.ndim = ndim
+
+ def __str__(self):
+ axis = self.axis
+ ndim = self.ndim
+
+ if axis is ndim is None:
+ return self._msg
+ else:
+ msg = f"axis {axis} is out of bounds for array of dimension {ndim}"
+ if self._msg is not None:
+ msg = f"{self._msg}: {msg}"
+ return msg
+
+
+@_display_as_base
+class _ArrayMemoryError(MemoryError):
+ """ Thrown when an array cannot be allocated"""
+ def __init__(self, shape, dtype):
+ self.shape = shape
+ self.dtype = dtype
+
+ @property
+ def _total_size(self):
+ num_bytes = self.dtype.itemsize
+ for dim in self.shape:
+ num_bytes *= dim
+ return num_bytes
+
+ @staticmethod
+ def _size_to_string(num_bytes):
+ """ Convert a number of bytes into a binary size string """
+
+ # https://en.wikipedia.org/wiki/Binary_prefix
+ LOG2_STEP = 10
+ STEP = 1024
+ units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
+
+ unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
+ unit_val = 1 << (unit_i * LOG2_STEP)
+ n_units = num_bytes / unit_val
+ del unit_val
+
+ # ensure we pick a unit that is correct after rounding
+ if round(n_units) == STEP:
+ unit_i += 1
+ n_units /= STEP
+
+ # deal with sizes so large that we don't have units for them
+ if unit_i >= len(units):
+ new_unit_i = len(units) - 1
+ n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
+ unit_i = new_unit_i
+
+ unit_name = units[unit_i]
+ # format with a sensible number of digits
+ if unit_i == 0:
+ # no decimal point on bytes
+ return '{:.0f} {}'.format(n_units, unit_name)
+ elif round(n_units) < 1000:
+ # 3 significant figures, if none are dropped to the left of the .
+ return '{:#.3g} {}'.format(n_units, unit_name)
+ else:
+ # just give all the digits otherwise
+ return '{:#.0f} {}'.format(n_units, unit_name)
+
+ def __str__(self):
+ size_str = self._size_to_string(self._total_size)
+ return (
+ "Unable to allocate {} for an array with shape {} and data type {}"
+ .format(size_str, self.shape, self.dtype)
+ )
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_internal.py b/venv/lib/python3.9/site-packages/numpy/core/_internal.py
new file mode 100644
index 00000000..85076f3e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_internal.py
@@ -0,0 +1,932 @@
+"""
+A place for internal code
+
+Some things are more easily handled Python.
+
+"""
+import ast
+import re
+import sys
+import warnings
+
+from .multiarray import dtype, array, ndarray, promote_types
+try:
+ import ctypes
+except ImportError:
+ ctypes = None
+
+IS_PYPY = sys.implementation.name == 'pypy'
+
+if sys.byteorder == 'little':
+ _nbo = '<'
+else:
+ _nbo = '>'
+
+def _makenames_list(adict, align):
+ allfields = []
+
+ for fname, obj in adict.items():
+ n = len(obj)
+ if not isinstance(obj, tuple) or n not in (2, 3):
+ raise ValueError("entry not a 2- or 3- tuple")
+ if n > 2 and obj[2] == fname:
+ continue
+ num = int(obj[1])
+ if num < 0:
+ raise ValueError("invalid offset.")
+ format = dtype(obj[0], align=align)
+ if n > 2:
+ title = obj[2]
+ else:
+ title = None
+ allfields.append((fname, format, num, title))
+ # sort by offsets
+ allfields.sort(key=lambda x: x[2])
+ names = [x[0] for x in allfields]
+ formats = [x[1] for x in allfields]
+ offsets = [x[2] for x in allfields]
+ titles = [x[3] for x in allfields]
+
+ return names, formats, offsets, titles
+
+# Called in PyArray_DescrConverter function when
+# a dictionary without "names" and "formats"
+# fields is used as a data-type descriptor.
+def _usefields(adict, align):
+ try:
+ names = adict[-1]
+ except KeyError:
+ names = None
+ if names is None:
+ names, formats, offsets, titles = _makenames_list(adict, align)
+ else:
+ formats = []
+ offsets = []
+ titles = []
+ for name in names:
+ res = adict[name]
+ formats.append(res[0])
+ offsets.append(res[1])
+ if len(res) > 2:
+ titles.append(res[2])
+ else:
+ titles.append(None)
+
+ return dtype({"names": names,
+ "formats": formats,
+ "offsets": offsets,
+ "titles": titles}, align)
+
+
+# construct an array_protocol descriptor list
+# from the fields attribute of a descriptor
+# This calls itself recursively but should eventually hit
+# a descriptor that has no fields and then return
+# a simple typestring
+
+def _array_descr(descriptor):
+ fields = descriptor.fields
+ if fields is None:
+ subdtype = descriptor.subdtype
+ if subdtype is None:
+ if descriptor.metadata is None:
+ return descriptor.str
+ else:
+ new = descriptor.metadata.copy()
+ if new:
+ return (descriptor.str, new)
+ else:
+ return descriptor.str
+ else:
+ return (_array_descr(subdtype[0]), subdtype[1])
+
+ names = descriptor.names
+ ordered_fields = [fields[x] + (x,) for x in names]
+ result = []
+ offset = 0
+ for field in ordered_fields:
+ if field[1] > offset:
+ num = field[1] - offset
+ result.append(('', f'|V{num}'))
+ offset += num
+ elif field[1] < offset:
+ raise ValueError(
+ "dtype.descr is not defined for types with overlapping or "
+ "out-of-order fields")
+ if len(field) > 3:
+ name = (field[2], field[3])
+ else:
+ name = field[2]
+ if field[0].subdtype:
+ tup = (name, _array_descr(field[0].subdtype[0]),
+ field[0].subdtype[1])
+ else:
+ tup = (name, _array_descr(field[0]))
+ offset += field[0].itemsize
+ result.append(tup)
+
+ if descriptor.itemsize > offset:
+ num = descriptor.itemsize - offset
+ result.append(('', f'|V{num}'))
+
+ return result
+
+# Build a new array from the information in a pickle.
+# Note that the name numpy.core._internal._reconstruct is embedded in
+# pickles of ndarrays made with NumPy before release 1.0
+# so don't remove the name here, or you'll
+# break backward compatibility.
+def _reconstruct(subtype, shape, dtype):
+ return ndarray.__new__(subtype, shape, dtype)
+
+
+# format_re was originally from numarray by J. Todd Miller
+
+format_re = re.compile(r'(?P<order1>[<>|=]?)'
+ r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
+ r'(?P<order2>[<>|=]?)'
+ r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
+sep_re = re.compile(r'\s*,\s*')
+space_re = re.compile(r'\s+$')
+
+# astr is a string (perhaps comma separated)
+
+_convorder = {'=': _nbo}
+
+def _commastring(astr):
+ startindex = 0
+ result = []
+ while startindex < len(astr):
+ mo = format_re.match(astr, pos=startindex)
+ try:
+ (order1, repeats, order2, dtype) = mo.groups()
+ except (TypeError, AttributeError):
+ raise ValueError(
+ f'format number {len(result)+1} of "{astr}" is not recognized'
+ ) from None
+ startindex = mo.end()
+ # Separator or ending padding
+ if startindex < len(astr):
+ if space_re.match(astr, pos=startindex):
+ startindex = len(astr)
+ else:
+ mo = sep_re.match(astr, pos=startindex)
+ if not mo:
+ raise ValueError(
+ 'format number %d of "%s" is not recognized' %
+ (len(result)+1, astr))
+ startindex = mo.end()
+
+ if order2 == '':
+ order = order1
+ elif order1 == '':
+ order = order2
+ else:
+ order1 = _convorder.get(order1, order1)
+ order2 = _convorder.get(order2, order2)
+ if (order1 != order2):
+ raise ValueError(
+ 'inconsistent byte-order specification %s and %s' %
+ (order1, order2))
+ order = order1
+
+ if order in ('|', '=', _nbo):
+ order = ''
+ dtype = order + dtype
+ if (repeats == ''):
+ newitem = dtype
+ else:
+ newitem = (dtype, ast.literal_eval(repeats))
+ result.append(newitem)
+
+ return result
+
+class dummy_ctype:
+ def __init__(self, cls):
+ self._cls = cls
+ def __mul__(self, other):
+ return self
+ def __call__(self, *other):
+ return self._cls(other)
+ def __eq__(self, other):
+ return self._cls == other._cls
+ def __ne__(self, other):
+ return self._cls != other._cls
+
+def _getintp_ctype():
+ val = _getintp_ctype.cache
+ if val is not None:
+ return val
+ if ctypes is None:
+ import numpy as np
+ val = dummy_ctype(np.intp)
+ else:
+ char = dtype('p').char
+ if char == 'i':
+ val = ctypes.c_int
+ elif char == 'l':
+ val = ctypes.c_long
+ elif char == 'q':
+ val = ctypes.c_longlong
+ else:
+ val = ctypes.c_long
+ _getintp_ctype.cache = val
+ return val
+_getintp_ctype.cache = None
+
+# Used for .ctypes attribute of ndarray
+
+class _missing_ctypes:
+ def cast(self, num, obj):
+ return num.value
+
+ class c_void_p:
+ def __init__(self, ptr):
+ self.value = ptr
+
+
+class _ctypes:
+ def __init__(self, array, ptr=None):
+ self._arr = array
+
+ if ctypes:
+ self._ctypes = ctypes
+ self._data = self._ctypes.c_void_p(ptr)
+ else:
+ # fake a pointer-like object that holds onto the reference
+ self._ctypes = _missing_ctypes()
+ self._data = self._ctypes.c_void_p(ptr)
+ self._data._objects = array
+
+ if self._arr.ndim == 0:
+ self._zerod = True
+ else:
+ self._zerod = False
+
+ def data_as(self, obj):
+ """
+ Return the data pointer cast to a particular c-types object.
+ For example, calling ``self._as_parameter_`` is equivalent to
+ ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
+ pointer to a ctypes array of floating-point data:
+ ``self.data_as(ctypes.POINTER(ctypes.c_double))``.
+
+ The returned pointer will keep a reference to the array.
+ """
+ # _ctypes.cast function causes a circular reference of self._data in
+ # self._data._objects. Attributes of self._data cannot be released
+ # until gc.collect is called. Make a copy of the pointer first then let
+ # it hold the array reference. This is a workaround to circumvent the
+ # CPython bug https://bugs.python.org/issue12836
+ ptr = self._ctypes.cast(self._data, obj)
+ ptr._arr = self._arr
+ return ptr
+
+ def shape_as(self, obj):
+ """
+ Return the shape tuple as an array of some other c-types
+ type. For example: ``self.shape_as(ctypes.c_short)``.
+ """
+ if self._zerod:
+ return None
+ return (obj*self._arr.ndim)(*self._arr.shape)
+
+ def strides_as(self, obj):
+ """
+ Return the strides tuple as an array of some other
+ c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
+ """
+ if self._zerod:
+ return None
+ return (obj*self._arr.ndim)(*self._arr.strides)
+
+ @property
+ def data(self):
+ """
+ A pointer to the memory area of the array as a Python integer.
+ This memory area may contain data that is not aligned, or not in correct
+ byte-order. The memory area may not even be writeable. The array
+ flags and data-type of this array should be respected when passing this
+ attribute to arbitrary C-code to avoid trouble that can include Python
+ crashing. User Beware! The value of this attribute is exactly the same
+ as ``self._array_interface_['data'][0]``.
+
+ Note that unlike ``data_as``, a reference will not be kept to the array:
+ code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
+ pointer to a deallocated array, and should be spelt
+ ``(a + b).ctypes.data_as(ctypes.c_void_p)``
+ """
+ return self._data.value
+
+ @property
+ def shape(self):
+ """
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
+ the basetype is the C-integer corresponding to ``dtype('p')`` on this
+ platform (see `~numpy.ctypeslib.c_intp`). This base-type could be
+ `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on
+ the platform. The ctypes array contains the shape of
+ the underlying array.
+ """
+ return self.shape_as(_getintp_ctype())
+
+ @property
+ def strides(self):
+ """
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
+ the basetype is the same as for the shape attribute. This ctypes array
+ contains the strides information from the underlying array. This strides
+ information is important for showing how many bytes must be jumped to
+ get to the next element in the array.
+ """
+ return self.strides_as(_getintp_ctype())
+
+ @property
+ def _as_parameter_(self):
+ """
+ Overrides the ctypes semi-magic method
+
+ Enables `c_func(some_array.ctypes)`
+ """
+ return self.data_as(ctypes.c_void_p)
+
+ # Numpy 1.21.0, 2021-05-18
+
+ def get_data(self):
+ """Deprecated getter for the `_ctypes.data` property.
+
+ .. deprecated:: 1.21
+ """
+ warnings.warn('"get_data" is deprecated. Use "data" instead',
+ DeprecationWarning, stacklevel=2)
+ return self.data
+
+ def get_shape(self):
+ """Deprecated getter for the `_ctypes.shape` property.
+
+ .. deprecated:: 1.21
+ """
+ warnings.warn('"get_shape" is deprecated. Use "shape" instead',
+ DeprecationWarning, stacklevel=2)
+ return self.shape
+
+ def get_strides(self):
+ """Deprecated getter for the `_ctypes.strides` property.
+
+ .. deprecated:: 1.21
+ """
+ warnings.warn('"get_strides" is deprecated. Use "strides" instead',
+ DeprecationWarning, stacklevel=2)
+ return self.strides
+
+ def get_as_parameter(self):
+ """Deprecated getter for the `_ctypes._as_parameter_` property.
+
+ .. deprecated:: 1.21
+ """
+ warnings.warn(
+ '"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
+ DeprecationWarning, stacklevel=2,
+ )
+ return self._as_parameter_
+
+
+def _newnames(datatype, order):
+ """
+ Given a datatype and an order object, return a new names tuple, with the
+ order indicated
+ """
+ oldnames = datatype.names
+ nameslist = list(oldnames)
+ if isinstance(order, str):
+ order = [order]
+ seen = set()
+ if isinstance(order, (list, tuple)):
+ for name in order:
+ try:
+ nameslist.remove(name)
+ except ValueError:
+ if name in seen:
+ raise ValueError(f"duplicate field name: {name}") from None
+ else:
+ raise ValueError(f"unknown field name: {name}") from None
+ seen.add(name)
+ return tuple(list(order) + nameslist)
+ raise ValueError(f"unsupported order value: {order}")
+
+def _copy_fields(ary):
+ """Return copy of structured array with padding between fields removed.
+
+ Parameters
+ ----------
+ ary : ndarray
+ Structured array from which to remove padding bytes
+
+ Returns
+ -------
+ ary_copy : ndarray
+ Copy of ary with padding bytes removed
+ """
+ dt = ary.dtype
+ copy_dtype = {'names': dt.names,
+ 'formats': [dt.fields[name][0] for name in dt.names]}
+ return array(ary, dtype=copy_dtype, copy=True)
+
+def _promote_fields(dt1, dt2):
+ """ Perform type promotion for two structured dtypes.
+
+ Parameters
+ ----------
+ dt1 : structured dtype
+ First dtype.
+ dt2 : structured dtype
+ Second dtype.
+
+ Returns
+ -------
+ out : dtype
+ The promoted dtype
+
+ Notes
+ -----
+ If one of the inputs is aligned, the result will be. The titles of
+ both descriptors must match (point to the same field).
+ """
+ # Both must be structured and have the same names in the same order
+ if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:
+ raise TypeError("invalid type promotion")
+
+ # if both are identical, we can (maybe!) just return the same dtype.
+ identical = dt1 is dt2
+ new_fields = []
+ for name in dt1.names:
+ field1 = dt1.fields[name]
+ field2 = dt2.fields[name]
+ new_descr = promote_types(field1[0], field2[0])
+ identical = identical and new_descr is field1[0]
+
+ # Check that the titles match (if given):
+ if field1[2:] != field2[2:]:
+ raise TypeError("invalid type promotion")
+ if len(field1) == 2:
+ new_fields.append((name, new_descr))
+ else:
+ new_fields.append(((field1[2], name), new_descr))
+
+ res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
+
+ # Might as well preserve identity (and metadata) if the dtype is identical
+ # and the itemsize, offsets are also unmodified. This could probably be
+ # sped up, but also probably just be removed entirely.
+ if identical and res.itemsize == dt1.itemsize:
+ for name in dt1.names:
+ if dt1.fields[name][1] != res.fields[name][1]:
+ return res # the dtype changed.
+ return dt1
+
+ return res
+
+
+def _getfield_is_safe(oldtype, newtype, offset):
+ """ Checks safety of getfield for object arrays.
+
+ As in _view_is_safe, we need to check that memory containing objects is not
+ reinterpreted as a non-object datatype and vice versa.
+
+ Parameters
+ ----------
+ oldtype : data-type
+ Data type of the original ndarray.
+ newtype : data-type
+ Data type of the field being accessed by ndarray.getfield
+ offset : int
+ Offset of the field being accessed by ndarray.getfield
+
+ Raises
+ ------
+ TypeError
+ If the field access is invalid
+
+ """
+ if newtype.hasobject or oldtype.hasobject:
+ if offset == 0 and newtype == oldtype:
+ return
+ if oldtype.names is not None:
+ for name in oldtype.names:
+ if (oldtype.fields[name][1] == offset and
+ oldtype.fields[name][0] == newtype):
+ return
+ raise TypeError("Cannot get/set field of an object array")
+ return
+
+def _view_is_safe(oldtype, newtype):
+ """ Checks safety of a view involving object arrays, for example when
+ doing::
+
+ np.zeros(10, dtype=oldtype).view(newtype)
+
+ Parameters
+ ----------
+ oldtype : data-type
+ Data type of original ndarray
+ newtype : data-type
+ Data type of the view
+
+ Raises
+ ------
+ TypeError
+ If the new type is incompatible with the old type.
+
+ """
+
+ # if the types are equivalent, there is no problem.
+ # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
+ if oldtype == newtype:
+ return
+
+ if newtype.hasobject or oldtype.hasobject:
+ raise TypeError("Cannot change data-type for object array.")
+ return
+
+# Given a string containing a PEP 3118 format specifier,
+# construct a NumPy dtype
+
+_pep3118_native_map = {
+ '?': '?',
+ 'c': 'S1',
+ 'b': 'b',
+ 'B': 'B',
+ 'h': 'h',
+ 'H': 'H',
+ 'i': 'i',
+ 'I': 'I',
+ 'l': 'l',
+ 'L': 'L',
+ 'q': 'q',
+ 'Q': 'Q',
+ 'e': 'e',
+ 'f': 'f',
+ 'd': 'd',
+ 'g': 'g',
+ 'Zf': 'F',
+ 'Zd': 'D',
+ 'Zg': 'G',
+ 's': 'S',
+ 'w': 'U',
+ 'O': 'O',
+ 'x': 'V', # padding
+}
+_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
+
+_pep3118_standard_map = {
+ '?': '?',
+ 'c': 'S1',
+ 'b': 'b',
+ 'B': 'B',
+ 'h': 'i2',
+ 'H': 'u2',
+ 'i': 'i4',
+ 'I': 'u4',
+ 'l': 'i4',
+ 'L': 'u4',
+ 'q': 'i8',
+ 'Q': 'u8',
+ 'e': 'f2',
+ 'f': 'f',
+ 'd': 'd',
+ 'Zf': 'F',
+ 'Zd': 'D',
+ 's': 'S',
+ 'w': 'U',
+ 'O': 'O',
+ 'x': 'V', # padding
+}
+_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
+
+_pep3118_unsupported_map = {
+ 'u': 'UCS-2 strings',
+ '&': 'pointers',
+ 't': 'bitfields',
+ 'X': 'function pointers',
+}
+
+class _Stream:
+ def __init__(self, s):
+ self.s = s
+ self.byteorder = '@'
+
+ def advance(self, n):
+ res = self.s[:n]
+ self.s = self.s[n:]
+ return res
+
+ def consume(self, c):
+ if self.s[:len(c)] == c:
+ self.advance(len(c))
+ return True
+ return False
+
+ def consume_until(self, c):
+ if callable(c):
+ i = 0
+ while i < len(self.s) and not c(self.s[i]):
+ i = i + 1
+ return self.advance(i)
+ else:
+ i = self.s.index(c)
+ res = self.advance(i)
+ self.advance(len(c))
+ return res
+
+ @property
+ def next(self):
+ return self.s[0]
+
+ def __bool__(self):
+ return bool(self.s)
+
+
+def _dtype_from_pep3118(spec):
+ stream = _Stream(spec)
+ dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
+ return dtype
+
+def __dtype_from_pep3118(stream, is_subdtype):
+ field_spec = dict(
+ names=[],
+ formats=[],
+ offsets=[],
+ itemsize=0
+ )
+ offset = 0
+ common_alignment = 1
+ is_padding = False
+
+ # Parse spec
+ while stream:
+ value = None
+
+ # End of structure, bail out to upper level
+ if stream.consume('}'):
+ break
+
+ # Sub-arrays (1)
+ shape = None
+ if stream.consume('('):
+ shape = stream.consume_until(')')
+ shape = tuple(map(int, shape.split(',')))
+
+ # Byte order
+ if stream.next in ('@', '=', '<', '>', '^', '!'):
+ byteorder = stream.advance(1)
+ if byteorder == '!':
+ byteorder = '>'
+ stream.byteorder = byteorder
+
+ # Byte order characters also control native vs. standard type sizes
+ if stream.byteorder in ('@', '^'):
+ type_map = _pep3118_native_map
+ type_map_chars = _pep3118_native_typechars
+ else:
+ type_map = _pep3118_standard_map
+ type_map_chars = _pep3118_standard_typechars
+
+ # Item sizes
+ itemsize_str = stream.consume_until(lambda c: not c.isdigit())
+ if itemsize_str:
+ itemsize = int(itemsize_str)
+ else:
+ itemsize = 1
+
+ # Data types
+ is_padding = False
+
+ if stream.consume('T{'):
+ value, align = __dtype_from_pep3118(
+ stream, is_subdtype=True)
+ elif stream.next in type_map_chars:
+ if stream.next == 'Z':
+ typechar = stream.advance(2)
+ else:
+ typechar = stream.advance(1)
+
+ is_padding = (typechar == 'x')
+ dtypechar = type_map[typechar]
+ if dtypechar in 'USV':
+ dtypechar += '%d' % itemsize
+ itemsize = 1
+ numpy_byteorder = {'@': '=', '^': '='}.get(
+ stream.byteorder, stream.byteorder)
+ value = dtype(numpy_byteorder + dtypechar)
+ align = value.alignment
+ elif stream.next in _pep3118_unsupported_map:
+ desc = _pep3118_unsupported_map[stream.next]
+ raise NotImplementedError(
+ "Unrepresentable PEP 3118 data type {!r} ({})"
+ .format(stream.next, desc))
+ else:
+ raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
+
+ #
+ # Native alignment may require padding
+ #
+ # Here we assume that the presence of a '@' character implicitly implies
+ # that the start of the array is *already* aligned.
+ #
+ extra_offset = 0
+ if stream.byteorder == '@':
+ start_padding = (-offset) % align
+ intra_padding = (-value.itemsize) % align
+
+ offset += start_padding
+
+ if intra_padding != 0:
+ if itemsize > 1 or (shape is not None and _prod(shape) > 1):
+ # Inject internal padding to the end of the sub-item
+ value = _add_trailing_padding(value, intra_padding)
+ else:
+ # We can postpone the injection of internal padding,
+ # as the item appears at most once
+ extra_offset += intra_padding
+
+ # Update common alignment
+ common_alignment = _lcm(align, common_alignment)
+
+ # Convert itemsize to sub-array
+ if itemsize != 1:
+ value = dtype((value, (itemsize,)))
+
+ # Sub-arrays (2)
+ if shape is not None:
+ value = dtype((value, shape))
+
+ # Field name
+ if stream.consume(':'):
+ name = stream.consume_until(':')
+ else:
+ name = None
+
+ if not (is_padding and name is None):
+ if name is not None and name in field_spec['names']:
+ raise RuntimeError(f"Duplicate field name '{name}' in PEP3118 format")
+ field_spec['names'].append(name)
+ field_spec['formats'].append(value)
+ field_spec['offsets'].append(offset)
+
+ offset += value.itemsize
+ offset += extra_offset
+
+ field_spec['itemsize'] = offset
+
+ # extra final padding for aligned types
+ if stream.byteorder == '@':
+ field_spec['itemsize'] += (-offset) % common_alignment
+
+ # Check if this was a simple 1-item type, and unwrap it
+ if (field_spec['names'] == [None]
+ and field_spec['offsets'][0] == 0
+ and field_spec['itemsize'] == field_spec['formats'][0].itemsize
+ and not is_subdtype):
+ ret = field_spec['formats'][0]
+ else:
+ _fix_names(field_spec)
+ ret = dtype(field_spec)
+
+ # Finished
+ return ret, common_alignment
+
+def _fix_names(field_spec):
+ """ Replace names which are None with the next unused f%d name """
+ names = field_spec['names']
+ for i, name in enumerate(names):
+ if name is not None:
+ continue
+
+ j = 0
+ while True:
+ name = f'f{j}'
+ if name not in names:
+ break
+ j = j + 1
+ names[i] = name
+
+def _add_trailing_padding(value, padding):
+ """Inject the specified number of padding bytes at the end of a dtype"""
+ if value.fields is None:
+ field_spec = dict(
+ names=['f0'],
+ formats=[value],
+ offsets=[0],
+ itemsize=value.itemsize
+ )
+ else:
+ fields = value.fields
+ names = value.names
+ field_spec = dict(
+ names=names,
+ formats=[fields[name][0] for name in names],
+ offsets=[fields[name][1] for name in names],
+ itemsize=value.itemsize
+ )
+
+ field_spec['itemsize'] += padding
+ return dtype(field_spec)
+
+def _prod(a):
+ p = 1
+ for x in a:
+ p *= x
+ return p
+
+def _gcd(a, b):
+ """Calculate the greatest common divisor of a and b"""
+ while b:
+ a, b = b, a % b
+ return a
+
+def _lcm(a, b):
+ return a // _gcd(a, b) * b
+
+def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
+ """ Format the error message for when __array_ufunc__ gives up. """
+ args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
+ ['{}={!r}'.format(k, v)
+ for k, v in kwargs.items()])
+ args = inputs + kwargs.get('out', ())
+ types_string = ', '.join(repr(type(arg).__name__) for arg in args)
+ return ('operand type(s) all returned NotImplemented from '
+ '__array_ufunc__({!r}, {!r}, {}): {}'
+ .format(ufunc, method, args_string, types_string))
+
+
+def array_function_errmsg_formatter(public_api, types):
+ """ Format the error message for when __array_ufunc__ gives up. """
+ func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
+ return ("no implementation found for '{}' on types that implement "
+ '__array_function__: {}'.format(func_name, list(types)))
+
+
+def _ufunc_doc_signature_formatter(ufunc):
+ """
+ Builds a signature string which resembles PEP 457
+
+ This is used to construct the first line of the docstring
+ """
+
+ # input arguments are simple
+ if ufunc.nin == 1:
+ in_args = 'x'
+ else:
+ in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin))
+
+ # output arguments are both keyword or positional
+ if ufunc.nout == 0:
+ out_args = ', /, out=()'
+ elif ufunc.nout == 1:
+ out_args = ', /, out=None'
+ else:
+ out_args = '[, {positional}], / [, out={default}]'.format(
+ positional=', '.join(
+ 'out{}'.format(i+1) for i in range(ufunc.nout)),
+ default=repr((None,)*ufunc.nout)
+ )
+
+ # keyword only args depend on whether this is a gufunc
+ kwargs = (
+ ", casting='same_kind'"
+ ", order='K'"
+ ", dtype=None"
+ ", subok=True"
+ )
+
+ # NOTE: gufuncs may or may not support the `axis` parameter
+ if ufunc.signature is None:
+ kwargs = f", where=True{kwargs}[, signature, extobj]"
+ else:
+ kwargs += "[, signature, extobj, axes, axis]"
+
+ # join all the parts together
+ return '{name}({in_args}{out_args}, *{kwargs})'.format(
+ name=ufunc.__name__,
+ in_args=in_args,
+ out_args=out_args,
+ kwargs=kwargs
+ )
+
+
+def npy_ctypes_check(cls):
+ # determine if a class comes from ctypes, in order to work around
+ # a bug in the buffer protocol for those objects, bpo-10746
+ try:
+ # ctypes class are new-style, so have an __mro__. This probably fails
+ # for ctypes classes with multiple inheritance.
+ if IS_PYPY:
+ # (..., _ctypes.basics._CData, Bufferable, object)
+ ctype_base = cls.__mro__[-3]
+ else:
+ # # (..., _ctypes._CData, object)
+ ctype_base = cls.__mro__[-2]
+ # right now, they're part of the _ctypes module
+ return '_ctypes' in ctype_base.__module__
+ except Exception:
+ return False
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_internal.pyi b/venv/lib/python3.9/site-packages/numpy/core/_internal.pyi
new file mode 100644
index 00000000..8a25ef2c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_internal.pyi
@@ -0,0 +1,30 @@
+from typing import Any, TypeVar, overload, Generic
+import ctypes as ct
+
+from numpy import ndarray
+from numpy.ctypeslib import c_intp
+
+_CastT = TypeVar("_CastT", bound=ct._CanCastTo) # Copied from `ctypes.cast`
+_CT = TypeVar("_CT", bound=ct._CData)
+_PT = TypeVar("_PT", bound=None | int)
+
+# TODO: Let the likes of `shape_as` and `strides_as` return `None`
+# for 0D arrays once we've got shape-support
+
+class _ctypes(Generic[_PT]):
+ @overload
+ def __new__(cls, array: ndarray[Any, Any], ptr: None = ...) -> _ctypes[None]: ...
+ @overload
+ def __new__(cls, array: ndarray[Any, Any], ptr: _PT) -> _ctypes[_PT]: ...
+ @property
+ def data(self) -> _PT: ...
+ @property
+ def shape(self) -> ct.Array[c_intp]: ...
+ @property
+ def strides(self) -> ct.Array[c_intp]: ...
+ @property
+ def _as_parameter_(self) -> ct.c_void_p: ...
+
+ def data_as(self, obj: type[_CastT]) -> _CastT: ...
+ def shape_as(self, obj: type[_CT]) -> ct.Array[_CT]: ...
+ def strides_as(self, obj: type[_CT]) -> ct.Array[_CT]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_machar.py b/venv/lib/python3.9/site-packages/numpy/core/_machar.py
new file mode 100644
index 00000000..3cc7db27
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_machar.py
@@ -0,0 +1,357 @@
+"""
+Machine arithmetic - determine the parameters of the
+floating-point arithmetic system
+
+Author: Pearu Peterson, September 2003
+
+"""
+__all__ = ['MachAr']
+
+from numpy.core.fromnumeric import any
+from numpy.core._ufunc_config import errstate
+from numpy.core.overrides import set_module
+
+# Need to speed this up...especially for longfloat
+
+# Deprecated 2021-10-20, NumPy 1.22
+@set_module('numpy')
+class MachAr:
+ """
+ Diagnosing machine parameters.
+
+ Attributes
+ ----------
+ ibeta : int
+ Radix in which numbers are represented.
+ it : int
+ Number of base-`ibeta` digits in the floating point mantissa M.
+ machep : int
+ Exponent of the smallest (most negative) power of `ibeta` that,
+ added to 1.0, gives something different from 1.0
+ eps : float
+ Floating-point number ``beta**machep`` (floating point precision)
+ negep : int
+ Exponent of the smallest power of `ibeta` that, subtracted
+ from 1.0, gives something different from 1.0.
+ epsneg : float
+ Floating-point number ``beta**negep``.
+ iexp : int
+ Number of bits in the exponent (including its sign and bias).
+ minexp : int
+ Smallest (most negative) power of `ibeta` consistent with there
+ being no leading zeros in the mantissa.
+ xmin : float
+ Floating-point number ``beta**minexp`` (the smallest [in
+ magnitude] positive floating point number with full precision).
+ maxexp : int
+ Smallest (positive) power of `ibeta` that causes overflow.
+ xmax : float
+ ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
+ usable floating value).
+ irnd : int
+ In ``range(6)``, information on what kind of rounding is done
+ in addition, and on how underflow is handled.
+ ngrd : int
+ Number of 'guard digits' used when truncating the product
+ of two mantissas to fit the representation.
+ epsilon : float
+ Same as `eps`.
+ tiny : float
+ An alias for `smallest_normal`, kept for backwards compatibility.
+ huge : float
+ Same as `xmax`.
+ precision : float
+ ``- int(-log10(eps))``
+ resolution : float
+ ``- 10**(-precision)``
+ smallest_normal : float
+ The smallest positive floating point number with 1 as leading bit in
+ the mantissa following IEEE-754. Same as `xmin`.
+ smallest_subnormal : float
+ The smallest positive floating point number with 0 as leading bit in
+ the mantissa following IEEE-754.
+
+ Parameters
+ ----------
+ float_conv : function, optional
+ Function that converts an integer or integer array to a float
+ or float array. Default is `float`.
+ int_conv : function, optional
+ Function that converts a float or float array to an integer or
+ integer array. Default is `int`.
+ float_to_float : function, optional
+ Function that converts a float array to float. Default is `float`.
+ Note that this does not seem to do anything useful in the current
+ implementation.
+ float_to_str : function, optional
+ Function that converts a single float to a string. Default is
+ ``lambda v:'%24.16e' %v``.
+ title : str, optional
+ Title that is printed in the string representation of `MachAr`.
+
+ See Also
+ --------
+ finfo : Machine limits for floating point types.
+ iinfo : Machine limits for integer types.
+
+ References
+ ----------
+ .. [1] Press, Teukolsky, Vetterling and Flannery,
+ "Numerical Recipes in C++," 2nd ed,
+ Cambridge University Press, 2002, p. 31.
+
+ """
+
+ def __init__(self, float_conv=float,int_conv=int,
+ float_to_float=float,
+ float_to_str=lambda v:'%24.16e' % v,
+ title='Python floating point number'):
+ """
+
+ float_conv - convert integer to float (array)
+ int_conv - convert float (array) to integer
+ float_to_float - convert float array to float
+ float_to_str - convert array float to str
+ title - description of used floating point numbers
+
+ """
+ # We ignore all errors here because we are purposely triggering
+ # underflow to detect the properties of the runninng arch.
+ with errstate(under='ignore'):
+ self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
+
+ def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
+ max_iterN = 10000
+ msg = "Did not converge after %d tries with %s"
+ one = float_conv(1)
+ two = one + one
+ zero = one - one
+
+ # Do we really need to do this? Aren't they 2 and 2.0?
+ # Determine ibeta and beta
+ a = one
+ for _ in range(max_iterN):
+ a = a + a
+ temp = a + one
+ temp1 = temp - a
+ if any(temp1 - one != zero):
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ b = one
+ for _ in range(max_iterN):
+ b = b + b
+ temp = a + b
+ itemp = int_conv(temp-a)
+ if any(itemp != 0):
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ ibeta = itemp
+ beta = float_conv(ibeta)
+
+ # Determine it and irnd
+ it = -1
+ b = one
+ for _ in range(max_iterN):
+ it = it + 1
+ b = b * beta
+ temp = b + one
+ temp1 = temp - b
+ if any(temp1 - one != zero):
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+
+ betah = beta / two
+ a = one
+ for _ in range(max_iterN):
+ a = a + a
+ temp = a + one
+ temp1 = temp - a
+ if any(temp1 - one != zero):
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ temp = a + betah
+ irnd = 0
+ if any(temp-a != zero):
+ irnd = 1
+ tempa = a + beta
+ temp = tempa + betah
+ if irnd == 0 and any(temp-tempa != zero):
+ irnd = 2
+
+ # Determine negep and epsneg
+ negep = it + 3
+ betain = one / beta
+ a = one
+ for i in range(negep):
+ a = a * betain
+ b = a
+ for _ in range(max_iterN):
+ temp = one - a
+ if any(temp-one != zero):
+ break
+ a = a * beta
+ negep = negep - 1
+ # Prevent infinite loop on PPC with gcc 4.0:
+ if negep < 0:
+ raise RuntimeError("could not determine machine tolerance "
+ "for 'negep', locals() -> %s" % (locals()))
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ negep = -negep
+ epsneg = a
+
+ # Determine machep and eps
+ machep = - it - 3
+ a = b
+
+ for _ in range(max_iterN):
+ temp = one + a
+ if any(temp-one != zero):
+ break
+ a = a * beta
+ machep = machep + 1
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ eps = a
+
+ # Determine ngrd
+ ngrd = 0
+ temp = one + eps
+ if irnd == 0 and any(temp*one - one != zero):
+ ngrd = 1
+
+ # Determine iexp
+ i = 0
+ k = 1
+ z = betain
+ t = one + eps
+ nxres = 0
+ for _ in range(max_iterN):
+ y = z
+ z = y*y
+ a = z*one # Check here for underflow
+ temp = z*t
+ if any(a+a == zero) or any(abs(z) >= y):
+ break
+ temp1 = temp * betain
+ if any(temp1*beta == z):
+ break
+ i = i + 1
+ k = k + k
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ if ibeta != 10:
+ iexp = i + 1
+ mx = k + k
+ else:
+ iexp = 2
+ iz = ibeta
+ while k >= iz:
+ iz = iz * ibeta
+ iexp = iexp + 1
+ mx = iz + iz - 1
+
+ # Determine minexp and xmin
+ for _ in range(max_iterN):
+ xmin = y
+ y = y * betain
+ a = y * one
+ temp = y * t
+ if any((a + a) != zero) and any(abs(y) < xmin):
+ k = k + 1
+ temp1 = temp * betain
+ if any(temp1*beta == y) and any(temp != y):
+ nxres = 3
+ xmin = y
+ break
+ else:
+ break
+ else:
+ raise RuntimeError(msg % (_, one.dtype))
+ minexp = -k
+
+ # Determine maxexp, xmax
+ if mx <= k + k - 3 and ibeta != 10:
+ mx = mx + mx
+ iexp = iexp + 1
+ maxexp = mx + minexp
+ irnd = irnd + nxres
+ if irnd >= 2:
+ maxexp = maxexp - 2
+ i = maxexp + minexp
+ if ibeta == 2 and not i:
+ maxexp = maxexp - 1
+ if i > 20:
+ maxexp = maxexp - 1
+ if any(a != y):
+ maxexp = maxexp - 2
+ xmax = one - epsneg
+ if any(xmax*one != xmax):
+ xmax = one - beta*epsneg
+ xmax = xmax / (xmin*beta*beta*beta)
+ i = maxexp + minexp + 3
+ for j in range(i):
+ if ibeta == 2:
+ xmax = xmax + xmax
+ else:
+ xmax = xmax * beta
+
+ smallest_subnormal = abs(xmin / beta ** (it))
+
+ self.ibeta = ibeta
+ self.it = it
+ self.negep = negep
+ self.epsneg = float_to_float(epsneg)
+ self._str_epsneg = float_to_str(epsneg)
+ self.machep = machep
+ self.eps = float_to_float(eps)
+ self._str_eps = float_to_str(eps)
+ self.ngrd = ngrd
+ self.iexp = iexp
+ self.minexp = minexp
+ self.xmin = float_to_float(xmin)
+ self._str_xmin = float_to_str(xmin)
+ self.maxexp = maxexp
+ self.xmax = float_to_float(xmax)
+ self._str_xmax = float_to_str(xmax)
+ self.irnd = irnd
+
+ self.title = title
+ # Commonly used parameters
+ self.epsilon = self.eps
+ self.tiny = self.xmin
+ self.huge = self.xmax
+ self.smallest_normal = self.xmin
+ self._str_smallest_normal = float_to_str(self.xmin)
+ self.smallest_subnormal = float_to_float(smallest_subnormal)
+ self._str_smallest_subnormal = float_to_str(smallest_subnormal)
+
+ import math
+ self.precision = int(-math.log10(float_to_float(self.eps)))
+ ten = two + two + two + two + two
+ resolution = ten ** (-self.precision)
+ self.resolution = float_to_float(resolution)
+ self._str_resolution = float_to_str(resolution)
+
+ def __str__(self):
+ fmt = (
+ 'Machine parameters for %(title)s\n'
+ '---------------------------------------------------------------------\n'
+ 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
+ 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
+ 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
+ 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
+ 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
+ 'smallest_normal=%(smallest_normal)s '
+ 'smallest_subnormal=%(smallest_subnormal)s\n'
+ '---------------------------------------------------------------------\n'
+ )
+ return fmt % self.__dict__
+
+
+if __name__ == '__main__':
+ print(MachAr())
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_methods.py b/venv/lib/python3.9/site-packages/numpy/core/_methods.py
new file mode 100644
index 00000000..040f02a9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_methods.py
@@ -0,0 +1,297 @@
+"""
+Array methods which are called by both the C-code for the method
+and the Python code for the NumPy-namespace function
+
+"""
+import warnings
+from contextlib import nullcontext
+
+from numpy.core import multiarray as mu
+from numpy.core import umath as um
+from numpy.core.multiarray import asanyarray
+from numpy.core import numerictypes as nt
+from numpy.core import _exceptions
+from numpy.core._ufunc_config import _no_nep50_warning
+from numpy._globals import _NoValue
+from numpy.compat import pickle, os_fspath
+
+# save those O(100) nanoseconds!
+umr_maximum = um.maximum.reduce
+umr_minimum = um.minimum.reduce
+umr_sum = um.add.reduce
+umr_prod = um.multiply.reduce
+umr_any = um.logical_or.reduce
+umr_all = um.logical_and.reduce
+
+# Complex types to -> (2,)float view for fast-path computation in _var()
+_complex_to_float = {
+ nt.dtype(nt.csingle) : nt.dtype(nt.single),
+ nt.dtype(nt.cdouble) : nt.dtype(nt.double),
+}
+# Special case for windows: ensure double takes precedence
+if nt.dtype(nt.longdouble) != nt.dtype(nt.double):
+ _complex_to_float.update({
+ nt.dtype(nt.clongdouble) : nt.dtype(nt.longdouble),
+ })
+
+# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
+# small reductions
+def _amax(a, axis=None, out=None, keepdims=False,
+ initial=_NoValue, where=True):
+ return umr_maximum(a, axis, None, out, keepdims, initial, where)
+
+def _amin(a, axis=None, out=None, keepdims=False,
+ initial=_NoValue, where=True):
+ return umr_minimum(a, axis, None, out, keepdims, initial, where)
+
+def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
+ initial=_NoValue, where=True):
+ return umr_sum(a, axis, dtype, out, keepdims, initial, where)
+
+def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
+ initial=_NoValue, where=True):
+ return umr_prod(a, axis, dtype, out, keepdims, initial, where)
+
+def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
+ # Parsing keyword arguments is currently fairly slow, so avoid it for now
+ if where is True:
+ return umr_any(a, axis, dtype, out, keepdims)
+ return umr_any(a, axis, dtype, out, keepdims, where=where)
+
+def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
+ # Parsing keyword arguments is currently fairly slow, so avoid it for now
+ if where is True:
+ return umr_all(a, axis, dtype, out, keepdims)
+ return umr_all(a, axis, dtype, out, keepdims, where=where)
+
+def _count_reduce_items(arr, axis, keepdims=False, where=True):
+ # fast-path for the default case
+ if where is True:
+ # no boolean mask given, calculate items according to axis
+ if axis is None:
+ axis = tuple(range(arr.ndim))
+ elif not isinstance(axis, tuple):
+ axis = (axis,)
+ items = 1
+ for ax in axis:
+ items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]
+ items = nt.intp(items)
+ else:
+ # TODO: Optimize case when `where` is broadcast along a non-reduction
+ # axis and full sum is more excessive than needed.
+
+ # guarded to protect circular imports
+ from numpy.lib.stride_tricks import broadcast_to
+ # count True values in (potentially broadcasted) boolean mask
+ items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None,
+ keepdims)
+ return items
+
+# Numpy 1.17.0, 2019-02-24
+# Various clip behavior deprecations, marked with _clip_dep as a prefix.
+
+def _clip_dep_is_scalar_nan(a):
+ # guarded to protect circular imports
+ from numpy.core.fromnumeric import ndim
+ if ndim(a) != 0:
+ return False
+ try:
+ return um.isnan(a)
+ except TypeError:
+ return False
+
+def _clip_dep_is_byte_swapped(a):
+ if isinstance(a, mu.ndarray):
+ return not a.dtype.isnative
+ return False
+
+def _clip_dep_invoke_with_casting(ufunc, *args, out=None, casting=None, **kwargs):
+ # normal path
+ if casting is not None:
+ return ufunc(*args, out=out, casting=casting, **kwargs)
+
+ # try to deal with broken casting rules
+ try:
+ return ufunc(*args, out=out, **kwargs)
+ except _exceptions._UFuncOutputCastingError as e:
+ # Numpy 1.17.0, 2019-02-24
+ warnings.warn(
+ "Converting the output of clip from {!r} to {!r} is deprecated. "
+ "Pass `casting=\"unsafe\"` explicitly to silence this warning, or "
+ "correct the type of the variables.".format(e.from_, e.to),
+ DeprecationWarning,
+ stacklevel=2
+ )
+ return ufunc(*args, out=out, casting="unsafe", **kwargs)
+
+def _clip(a, min=None, max=None, out=None, *, casting=None, **kwargs):
+ if min is None and max is None:
+ raise ValueError("One of max or min must be given")
+
+ # Numpy 1.17.0, 2019-02-24
+ # This deprecation probably incurs a substantial slowdown for small arrays,
+ # it will be good to get rid of it.
+ if not _clip_dep_is_byte_swapped(a) and not _clip_dep_is_byte_swapped(out):
+ using_deprecated_nan = False
+ if _clip_dep_is_scalar_nan(min):
+ min = -float('inf')
+ using_deprecated_nan = True
+ if _clip_dep_is_scalar_nan(max):
+ max = float('inf')
+ using_deprecated_nan = True
+ if using_deprecated_nan:
+ warnings.warn(
+ "Passing `np.nan` to mean no clipping in np.clip has always "
+ "been unreliable, and is now deprecated. "
+ "In future, this will always return nan, like it already does "
+ "when min or max are arrays that contain nan. "
+ "To skip a bound, pass either None or an np.inf of an "
+ "appropriate sign.",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
+ if min is None:
+ return _clip_dep_invoke_with_casting(
+ um.minimum, a, max, out=out, casting=casting, **kwargs)
+ elif max is None:
+ return _clip_dep_invoke_with_casting(
+ um.maximum, a, min, out=out, casting=casting, **kwargs)
+ else:
+ return _clip_dep_invoke_with_casting(
+ um.clip, a, min, max, out=out, casting=casting, **kwargs)
+
+def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
+ arr = asanyarray(a)
+
+ is_float16_result = False
+
+ rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
+ if rcount == 0 if where is True else umr_any(rcount == 0, axis=None):
+ warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
+
+ # Cast bool, unsigned int, and int to float64 by default
+ if dtype is None:
+ if issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
+ dtype = mu.dtype('f8')
+ elif issubclass(arr.dtype.type, nt.float16):
+ dtype = mu.dtype('f4')
+ is_float16_result = True
+
+ ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)
+ if isinstance(ret, mu.ndarray):
+ with _no_nep50_warning():
+ ret = um.true_divide(
+ ret, rcount, out=ret, casting='unsafe', subok=False)
+ if is_float16_result and out is None:
+ ret = arr.dtype.type(ret)
+ elif hasattr(ret, 'dtype'):
+ if is_float16_result:
+ ret = arr.dtype.type(ret / rcount)
+ else:
+ ret = ret.dtype.type(ret / rcount)
+ else:
+ ret = ret / rcount
+
+ return ret
+
+def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
+ where=True):
+ arr = asanyarray(a)
+
+ rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
+ # Make this warning show up on top.
+ if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None):
+ warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
+ stacklevel=2)
+
+ # Cast bool, unsigned int, and int to float64 by default
+ if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
+ dtype = mu.dtype('f8')
+
+ # Compute the mean.
+ # Note that if dtype is not of inexact type then arraymean will
+ # not be either.
+ arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where)
+ # The shape of rcount has to match arrmean to not change the shape of out
+ # in broadcasting. Otherwise, it cannot be stored back to arrmean.
+ if rcount.ndim == 0:
+ # fast-path for default case when where is True
+ div = rcount
+ else:
+ # matching rcount to arrmean when where is specified as array
+ div = rcount.reshape(arrmean.shape)
+ if isinstance(arrmean, mu.ndarray):
+ with _no_nep50_warning():
+ arrmean = um.true_divide(arrmean, div, out=arrmean,
+ casting='unsafe', subok=False)
+ elif hasattr(arrmean, "dtype"):
+ arrmean = arrmean.dtype.type(arrmean / rcount)
+ else:
+ arrmean = arrmean / rcount
+
+ # Compute sum of squared deviations from mean
+ # Note that x may not be inexact and that we need it to be an array,
+ # not a scalar.
+ x = asanyarray(arr - arrmean)
+
+ if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
+ x = um.multiply(x, x, out=x)
+ # Fast-paths for built-in complex types
+ elif x.dtype in _complex_to_float:
+ xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))
+ um.multiply(xv, xv, out=xv)
+ x = um.add(xv[..., 0], xv[..., 1], out=x.real).real
+ # Most general case; includes handling object arrays containing imaginary
+ # numbers and complex types with non-native byteorder
+ else:
+ x = um.multiply(x, um.conjugate(x), out=x).real
+
+ ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where)
+
+ # Compute degrees of freedom and make sure it is not negative.
+ rcount = um.maximum(rcount - ddof, 0)
+
+ # divide by degrees of freedom
+ if isinstance(ret, mu.ndarray):
+ with _no_nep50_warning():
+ ret = um.true_divide(
+ ret, rcount, out=ret, casting='unsafe', subok=False)
+ elif hasattr(ret, 'dtype'):
+ ret = ret.dtype.type(ret / rcount)
+ else:
+ ret = ret / rcount
+
+ return ret
+
+def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
+ where=True):
+ ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+ keepdims=keepdims, where=where)
+
+ if isinstance(ret, mu.ndarray):
+ ret = um.sqrt(ret, out=ret)
+ elif hasattr(ret, 'dtype'):
+ ret = ret.dtype.type(um.sqrt(ret))
+ else:
+ ret = um.sqrt(ret)
+
+ return ret
+
+def _ptp(a, axis=None, out=None, keepdims=False):
+ return um.subtract(
+ umr_maximum(a, axis, None, out, keepdims),
+ umr_minimum(a, axis, None, None, keepdims),
+ out
+ )
+
+def _dump(self, file, protocol=2):
+ if hasattr(file, 'write'):
+ ctx = nullcontext(file)
+ else:
+ ctx = open(os_fspath(file), "wb")
+ with ctx as f:
+ pickle.dump(self, f, protocol=protocol)
+
+def _dumps(self, protocol=2):
+ return pickle.dumps(self, protocol=protocol)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_multiarray_tests.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/core/_multiarray_tests.cpython-39-darwin.so
new file mode 100755
index 00000000..9fa9dcd3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_multiarray_tests.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_multiarray_umath.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/core/_multiarray_umath.cpython-39-darwin.so
new file mode 100755
index 00000000..472b747c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_multiarray_umath.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_operand_flag_tests.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/core/_operand_flag_tests.cpython-39-darwin.so
new file mode 100755
index 00000000..94c82e5e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_operand_flag_tests.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_rational_tests.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/core/_rational_tests.cpython-39-darwin.so
new file mode 100755
index 00000000..f95ef36d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_rational_tests.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_simd.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/core/_simd.cpython-39-darwin.so
new file mode 100755
index 00000000..2ca0f1e0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_simd.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_string_helpers.py b/venv/lib/python3.9/site-packages/numpy/core/_string_helpers.py
new file mode 100644
index 00000000..45e6a739
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_string_helpers.py
@@ -0,0 +1,100 @@
+"""
+String-handling utilities to avoid locale-dependence.
+
+Used primarily to generate type name aliases.
+"""
+# "import string" is costly to import!
+# Construct the translation tables directly
+# "A" = chr(65), "a" = chr(97)
+_all_chars = [chr(_m) for _m in range(256)]
+_ascii_upper = _all_chars[65:65+26]
+_ascii_lower = _all_chars[97:97+26]
+LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
+UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
+
+
+def english_lower(s):
+ """ Apply English case rules to convert ASCII strings to all lower case.
+
+ This is an internal utility function to replace calls to str.lower() such
+ that we can avoid changing behavior with changing locales. In particular,
+ Turkish has distinct dotted and dotless variants of the Latin letter "I" in
+ both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
+
+ Parameters
+ ----------
+ s : str
+
+ Returns
+ -------
+ lowered : str
+
+ Examples
+ --------
+ >>> from numpy.core.numerictypes import english_lower
+ >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
+ 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
+ >>> english_lower('')
+ ''
+ """
+ lowered = s.translate(LOWER_TABLE)
+ return lowered
+
+
+def english_upper(s):
+ """ Apply English case rules to convert ASCII strings to all upper case.
+
+ This is an internal utility function to replace calls to str.upper() such
+ that we can avoid changing behavior with changing locales. In particular,
+ Turkish has distinct dotted and dotless variants of the Latin letter "I" in
+ both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
+
+ Parameters
+ ----------
+ s : str
+
+ Returns
+ -------
+ uppered : str
+
+ Examples
+ --------
+ >>> from numpy.core.numerictypes import english_upper
+ >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
+ >>> english_upper('')
+ ''
+ """
+ uppered = s.translate(UPPER_TABLE)
+ return uppered
+
+
+def english_capitalize(s):
+ """ Apply English case rules to convert the first character of an ASCII
+ string to upper case.
+
+ This is an internal utility function to replace calls to str.capitalize()
+ such that we can avoid changing behavior with changing locales.
+
+ Parameters
+ ----------
+ s : str
+
+ Returns
+ -------
+ capitalized : str
+
+ Examples
+ --------
+ >>> from numpy.core.numerictypes import english_capitalize
+ >>> english_capitalize('int8')
+ 'Int8'
+ >>> english_capitalize('Int8')
+ 'Int8'
+ >>> english_capitalize('')
+ ''
+ """
+ if s:
+ return english_upper(s[0]) + s[1:]
+ else:
+ return s
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_struct_ufunc_tests.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/core/_struct_ufunc_tests.cpython-39-darwin.so
new file mode 100755
index 00000000..23c5828a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_struct_ufunc_tests.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_type_aliases.py b/venv/lib/python3.9/site-packages/numpy/core/_type_aliases.py
new file mode 100644
index 00000000..38f1a099
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_type_aliases.py
@@ -0,0 +1,245 @@
+"""
+Due to compatibility, numpy has a very large number of different naming
+conventions for the scalar types (those subclassing from `numpy.generic`).
+This file produces a convoluted set of dictionaries mapping names to types,
+and sometimes other mappings too.
+
+.. data:: allTypes
+ A dictionary of names to types that will be exposed as attributes through
+ ``np.core.numerictypes.*``
+
+.. data:: sctypeDict
+ Similar to `allTypes`, but maps a broader set of aliases to their types.
+
+.. data:: sctypes
+ A dictionary keyed by a "type group" string, providing a list of types
+ under that group.
+
+"""
+
+from numpy.compat import unicode
+from numpy.core._string_helpers import english_lower
+from numpy.core.multiarray import typeinfo, dtype
+from numpy.core._dtype import _kind_name
+
+
+sctypeDict = {} # Contains all leaf-node scalar types with aliases
+allTypes = {} # Collect the types we will add to the module
+
+
+# separate the actual type info from the abstract base classes
+_abstract_types = {}
+_concrete_typeinfo = {}
+for k, v in typeinfo.items():
+ # make all the keys lowercase too
+ k = english_lower(k)
+ if isinstance(v, type):
+ _abstract_types[k] = v
+ else:
+ _concrete_typeinfo[k] = v
+
+_concrete_types = {v.type for k, v in _concrete_typeinfo.items()}
+
+
+def _bits_of(obj):
+ try:
+ info = next(v for v in _concrete_typeinfo.values() if v.type is obj)
+ except StopIteration:
+ if obj in _abstract_types.values():
+ msg = "Cannot count the bits of an abstract type"
+ raise ValueError(msg) from None
+
+ # some third-party type - make a best-guess
+ return dtype(obj).itemsize * 8
+ else:
+ return info.bits
+
+
+def bitname(obj):
+ """Return a bit-width name for a given type object"""
+ bits = _bits_of(obj)
+ dt = dtype(obj)
+ char = dt.kind
+ base = _kind_name(dt)
+
+ if base == 'object':
+ bits = 0
+
+ if bits != 0:
+ char = "%s%d" % (char, bits // 8)
+
+ return base, bits, char
+
+
+def _add_types():
+ for name, info in _concrete_typeinfo.items():
+ # define C-name and insert typenum and typechar references also
+ allTypes[name] = info.type
+ sctypeDict[name] = info.type
+ sctypeDict[info.char] = info.type
+ sctypeDict[info.num] = info.type
+
+ for name, cls in _abstract_types.items():
+ allTypes[name] = cls
+_add_types()
+
+# This is the priority order used to assign the bit-sized NPY_INTxx names, which
+# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be
+# consistent.
+# If two C types have the same size, then the earliest one in this list is used
+# as the sized name.
+_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte']
+_uint_ctypes = list('u' + t for t in _int_ctypes)
+
+def _add_aliases():
+ for name, info in _concrete_typeinfo.items():
+ # these are handled by _add_integer_aliases
+ if name in _int_ctypes or name in _uint_ctypes:
+ continue
+
+ # insert bit-width version for this class (if relevant)
+ base, bit, char = bitname(info.type)
+
+ myname = "%s%d" % (base, bit)
+
+ # ensure that (c)longdouble does not overwrite the aliases assigned to
+ # (c)double
+ if name in ('longdouble', 'clongdouble') and myname in allTypes:
+ continue
+
+ # Add to the main namespace if desired:
+ if bit != 0 and base != "bool":
+ allTypes[myname] = info.type
+
+ # add forward, reverse, and string mapping to numarray
+ sctypeDict[char] = info.type
+
+ # add mapping for both the bit name
+ sctypeDict[myname] = info.type
+
+
+_add_aliases()
+
+def _add_integer_aliases():
+ seen_bits = set()
+ for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes):
+ i_info = _concrete_typeinfo[i_ctype]
+ u_info = _concrete_typeinfo[u_ctype]
+ bits = i_info.bits # same for both
+
+ for info, charname, intname in [
+ (i_info,'i%d' % (bits//8,), 'int%d' % bits),
+ (u_info,'u%d' % (bits//8,), 'uint%d' % bits)]:
+ if bits not in seen_bits:
+ # sometimes two different types have the same number of bits
+ # if so, the one iterated over first takes precedence
+ allTypes[intname] = info.type
+ sctypeDict[intname] = info.type
+ sctypeDict[charname] = info.type
+
+ seen_bits.add(bits)
+
+_add_integer_aliases()
+
+# We use these later
+void = allTypes['void']
+
+#
+# Rework the Python names (so that float and complex and int are consistent
+# with Python usage)
+#
+def _set_up_aliases():
+ type_pairs = [('complex_', 'cdouble'),
+ ('single', 'float'),
+ ('csingle', 'cfloat'),
+ ('singlecomplex', 'cfloat'),
+ ('float_', 'double'),
+ ('intc', 'int'),
+ ('uintc', 'uint'),
+ ('int_', 'long'),
+ ('uint', 'ulong'),
+ ('cfloat', 'cdouble'),
+ ('longfloat', 'longdouble'),
+ ('clongfloat', 'clongdouble'),
+ ('longcomplex', 'clongdouble'),
+ ('bool_', 'bool'),
+ ('bytes_', 'string'),
+ ('string_', 'string'),
+ ('str_', 'unicode'),
+ ('unicode_', 'unicode'),
+ ('object_', 'object')]
+ for alias, t in type_pairs:
+ allTypes[alias] = allTypes[t]
+ sctypeDict[alias] = sctypeDict[t]
+ # Remove aliases overriding python types and modules
+ to_remove = ['object', 'int', 'float',
+ 'complex', 'bool', 'string', 'datetime', 'timedelta',
+ 'bytes', 'str']
+
+ for t in to_remove:
+ try:
+ del allTypes[t]
+ del sctypeDict[t]
+ except KeyError:
+ pass
+
+ # Additional aliases in sctypeDict that should not be exposed as attributes
+ attrs_to_remove = ['ulong']
+
+ for t in attrs_to_remove:
+ try:
+ del allTypes[t]
+ except KeyError:
+ pass
+_set_up_aliases()
+
+
+sctypes = {'int': [],
+ 'uint':[],
+ 'float':[],
+ 'complex':[],
+ 'others':[bool, object, bytes, unicode, void]}
+
+def _add_array_type(typename, bits):
+ try:
+ t = allTypes['%s%d' % (typename, bits)]
+ except KeyError:
+ pass
+ else:
+ sctypes[typename].append(t)
+
+def _set_array_types():
+ ibytes = [1, 2, 4, 8, 16, 32, 64]
+ fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
+ for bytes in ibytes:
+ bits = 8*bytes
+ _add_array_type('int', bits)
+ _add_array_type('uint', bits)
+ for bytes in fbytes:
+ bits = 8*bytes
+ _add_array_type('float', bits)
+ _add_array_type('complex', 2*bits)
+ _gi = dtype('p')
+ if _gi.type not in sctypes['int']:
+ indx = 0
+ sz = _gi.itemsize
+ _lst = sctypes['int']
+ while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
+ indx += 1
+ sctypes['int'].insert(indx, _gi.type)
+ sctypes['uint'].insert(indx, dtype('P').type)
+_set_array_types()
+
+
+# Add additional strings to the sctypeDict
+_toadd = ['int', 'float', 'complex', 'bool', 'object',
+ 'str', 'bytes', ('a', 'bytes_'),
+ ('int0', 'intp'), ('uint0', 'uintp')]
+
+for name in _toadd:
+ if isinstance(name, tuple):
+ sctypeDict[name[0]] = allTypes[name[1]]
+ else:
+ sctypeDict[name] = allTypes['%s_' % name]
+
+del _toadd, name
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_type_aliases.pyi b/venv/lib/python3.9/site-packages/numpy/core/_type_aliases.pyi
new file mode 100644
index 00000000..bbead0cb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_type_aliases.pyi
@@ -0,0 +1,13 @@
+from typing import TypedDict
+
+from numpy import generic, signedinteger, unsignedinteger, floating, complexfloating
+
+class _SCTypes(TypedDict):
+ int: list[type[signedinteger]]
+ uint: list[type[unsignedinteger]]
+ float: list[type[floating]]
+ complex: list[type[complexfloating]]
+ others: list[type]
+
+sctypeDict: dict[int | str, type[generic]]
+sctypes: _SCTypes
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_ufunc_config.py b/venv/lib/python3.9/site-packages/numpy/core/_ufunc_config.py
new file mode 100644
index 00000000..5aac7ab0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_ufunc_config.py
@@ -0,0 +1,466 @@
+"""
+Functions for changing global ufunc configuration
+
+This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj`
+"""
+import collections.abc
+import contextlib
+import contextvars
+
+from .overrides import set_module
+from .umath import (
+ UFUNC_BUFSIZE_DEFAULT,
+ ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, ERR_DEFAULT,
+ SHIFT_DIVIDEBYZERO, SHIFT_OVERFLOW, SHIFT_UNDERFLOW, SHIFT_INVALID,
+)
+from . import umath
+
+__all__ = [
+ "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
+ "errstate", '_no_nep50_warning'
+]
+
+_errdict = {"ignore": ERR_IGNORE,
+ "warn": ERR_WARN,
+ "raise": ERR_RAISE,
+ "call": ERR_CALL,
+ "print": ERR_PRINT,
+ "log": ERR_LOG}
+
+_errdict_rev = {value: key for key, value in _errdict.items()}
+
+
+@set_module('numpy')
+def seterr(all=None, divide=None, over=None, under=None, invalid=None):
+ """
+ Set how floating-point errors are handled.
+
+ Note that operations on integer scalar types (such as `int16`) are
+ handled like floating point, and are affected by these settings.
+
+ Parameters
+ ----------
+ all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Set treatment for all types of floating-point errors at once:
+
+ - ignore: Take no action when the exception occurs.
+ - warn: Print a `RuntimeWarning` (via the Python `warnings` module).
+ - raise: Raise a `FloatingPointError`.
+ - call: Call a function specified using the `seterrcall` function.
+ - print: Print a warning directly to ``stdout``.
+ - log: Record error in a Log object specified by `seterrcall`.
+
+ The default is not to change the current behavior.
+ divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for division by zero.
+ over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for floating-point overflow.
+ under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for floating-point underflow.
+ invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for invalid floating-point operation.
+
+ Returns
+ -------
+ old_settings : dict
+ Dictionary containing the old settings.
+
+ See also
+ --------
+ seterrcall : Set a callback function for the 'call' mode.
+ geterr, geterrcall, errstate
+
+ Notes
+ -----
+ The floating-point exceptions are defined in the IEEE 754 standard [1]_:
+
+ - Division by zero: infinite result obtained from finite numbers.
+ - Overflow: result too large to be expressed.
+ - Underflow: result so close to zero that some precision
+ was lost.
+ - Invalid operation: result is not an expressible number, typically
+ indicates that a NaN was produced.
+
+ .. [1] https://en.wikipedia.org/wiki/IEEE_754
+
+ Examples
+ --------
+ >>> old_settings = np.seterr(all='ignore') #seterr to known value
+ >>> np.seterr(over='raise')
+ {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
+ >>> np.seterr(**old_settings) # reset to default
+ {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'}
+
+ >>> np.int16(32000) * np.int16(3)
+ 30464
+ >>> old_settings = np.seterr(all='warn', over='raise')
+ >>> np.int16(32000) * np.int16(3)
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ FloatingPointError: overflow encountered in scalar multiply
+
+ >>> old_settings = np.seterr(all='print')
+ >>> np.geterr()
+ {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
+ >>> np.int16(32000) * np.int16(3)
+ 30464
+
+ """
+
+ pyvals = umath.geterrobj()
+ old = geterr()
+
+ if divide is None:
+ divide = all or old['divide']
+ if over is None:
+ over = all or old['over']
+ if under is None:
+ under = all or old['under']
+ if invalid is None:
+ invalid = all or old['invalid']
+
+ maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
+ (_errdict[over] << SHIFT_OVERFLOW) +
+ (_errdict[under] << SHIFT_UNDERFLOW) +
+ (_errdict[invalid] << SHIFT_INVALID))
+
+ pyvals[1] = maskvalue
+ umath.seterrobj(pyvals)
+ return old
+
+
+@set_module('numpy')
+def geterr():
+ """
+ Get the current way of handling floating-point errors.
+
+ Returns
+ -------
+ res : dict
+ A dictionary with keys "divide", "over", "under", and "invalid",
+ whose values are from the strings "ignore", "print", "log", "warn",
+ "raise", and "call". The keys represent possible floating-point
+ exceptions, and the values define how these exceptions are handled.
+
+ See Also
+ --------
+ geterrcall, seterr, seterrcall
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> np.geterr()
+ {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}
+ >>> np.arange(3.) / np.arange(3.)
+ array([nan, 1., 1.])
+
+ >>> oldsettings = np.seterr(all='warn', over='raise')
+ >>> np.geterr()
+ {'divide': 'warn', 'over': 'raise', 'under': 'warn', 'invalid': 'warn'}
+ >>> np.arange(3.) / np.arange(3.)
+ array([nan, 1., 1.])
+
+ """
+ maskvalue = umath.geterrobj()[1]
+ mask = 7
+ res = {}
+ val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
+ res['divide'] = _errdict_rev[val]
+ val = (maskvalue >> SHIFT_OVERFLOW) & mask
+ res['over'] = _errdict_rev[val]
+ val = (maskvalue >> SHIFT_UNDERFLOW) & mask
+ res['under'] = _errdict_rev[val]
+ val = (maskvalue >> SHIFT_INVALID) & mask
+ res['invalid'] = _errdict_rev[val]
+ return res
+
+
+@set_module('numpy')
+def setbufsize(size):
+ """
+ Set the size of the buffer used in ufuncs.
+
+ Parameters
+ ----------
+ size : int
+ Size of buffer.
+
+ """
+ if size > 10e6:
+ raise ValueError("Buffer size, %s, is too big." % size)
+ if size < 5:
+ raise ValueError("Buffer size, %s, is too small." % size)
+ if size % 16 != 0:
+ raise ValueError("Buffer size, %s, is not a multiple of 16." % size)
+
+ pyvals = umath.geterrobj()
+ old = getbufsize()
+ pyvals[0] = size
+ umath.seterrobj(pyvals)
+ return old
+
+
+@set_module('numpy')
+def getbufsize():
+ """
+ Return the size of the buffer used in ufuncs.
+
+ Returns
+ -------
+ getbufsize : int
+ Size of ufunc buffer in bytes.
+
+ """
+ return umath.geterrobj()[0]
+
+
+@set_module('numpy')
+def seterrcall(func):
+ """
+ Set the floating-point error callback function or log object.
+
+ There are two ways to capture floating-point error messages. The first
+ is to set the error-handler to 'call', using `seterr`. Then, set
+ the function to call using this function.
+
+ The second is to set the error-handler to 'log', using `seterr`.
+ Floating-point errors then trigger a call to the 'write' method of
+ the provided object.
+
+ Parameters
+ ----------
+ func : callable f(err, flag) or object with write method
+ Function to call upon floating-point errors ('call'-mode) or
+ object whose 'write' method is used to log such message ('log'-mode).
+
+ The call function takes two arguments. The first is a string describing
+ the type of error (such as "divide by zero", "overflow", "underflow",
+ or "invalid value"), and the second is the status flag. The flag is a
+ byte, whose four least-significant bits indicate the type of error, one
+ of "divide", "over", "under", "invalid"::
+
+ [0 0 0 0 divide over under invalid]
+
+ In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
+
+ If an object is provided, its write method should take one argument,
+ a string.
+
+ Returns
+ -------
+ h : callable, log instance or None
+ The old error handler.
+
+ See Also
+ --------
+ seterr, geterr, geterrcall
+
+ Examples
+ --------
+ Callback upon error:
+
+ >>> def err_handler(type, flag):
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
+ ...
+
+ >>> saved_handler = np.seterrcall(err_handler)
+ >>> save_err = np.seterr(all='call')
+
+ >>> np.array([1, 2, 3]) / 0.0
+ Floating point error (divide by zero), with flag 1
+ array([inf, inf, inf])
+
+ >>> np.seterrcall(saved_handler)
+ <function err_handler at 0x...>
+ >>> np.seterr(**save_err)
+ {'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'}
+
+ Log error message:
+
+ >>> class Log:
+ ... def write(self, msg):
+ ... print("LOG: %s" % msg)
+ ...
+
+ >>> log = Log()
+ >>> saved_handler = np.seterrcall(log)
+ >>> save_err = np.seterr(all='log')
+
+ >>> np.array([1, 2, 3]) / 0.0
+ LOG: Warning: divide by zero encountered in divide
+ array([inf, inf, inf])
+
+ >>> np.seterrcall(saved_handler)
+ <numpy.core.numeric.Log object at 0x...>
+ >>> np.seterr(**save_err)
+ {'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'}
+
+ """
+ if func is not None and not isinstance(func, collections.abc.Callable):
+ if (not hasattr(func, 'write') or
+ not isinstance(func.write, collections.abc.Callable)):
+ raise ValueError("Only callable can be used as callback")
+ pyvals = umath.geterrobj()
+ old = geterrcall()
+ pyvals[2] = func
+ umath.seterrobj(pyvals)
+ return old
+
+
+@set_module('numpy')
+def geterrcall():
+ """
+ Return the current callback function used on floating-point errors.
+
+ When the error handling for a floating-point error (one of "divide",
+ "over", "under", or "invalid") is set to 'call' or 'log', the function
+ that is called or the log instance that is written to is returned by
+ `geterrcall`. This function or log instance has been set with
+ `seterrcall`.
+
+ Returns
+ -------
+ errobj : callable, log instance or None
+ The current error handler. If no handler was set through `seterrcall`,
+ ``None`` is returned.
+
+ See Also
+ --------
+ seterrcall, seterr, geterr
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> np.geterrcall() # we did not yet set a handler, returns None
+
+ >>> oldsettings = np.seterr(all='call')
+ >>> def err_handler(type, flag):
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
+ >>> oldhandler = np.seterrcall(err_handler)
+ >>> np.array([1, 2, 3]) / 0.0
+ Floating point error (divide by zero), with flag 1
+ array([inf, inf, inf])
+
+ >>> cur_handler = np.geterrcall()
+ >>> cur_handler is err_handler
+ True
+
+ """
+ return umath.geterrobj()[2]
+
+
+class _unspecified:
+ pass
+
+
+_Unspecified = _unspecified()
+
+
+@set_module('numpy')
+class errstate(contextlib.ContextDecorator):
+ """
+ errstate(**kwargs)
+
+ Context manager for floating-point error handling.
+
+ Using an instance of `errstate` as a context manager allows statements in
+ that context to execute with a known error handling behavior. Upon entering
+ the context the error handling is set with `seterr` and `seterrcall`, and
+ upon exiting it is reset to what it was before.
+
+ .. versionchanged:: 1.17.0
+ `errstate` is also usable as a function decorator, saving
+ a level of indentation if an entire function is wrapped.
+ See :py:class:`contextlib.ContextDecorator` for more information.
+
+ Parameters
+ ----------
+ kwargs : {divide, over, under, invalid}
+ Keyword arguments. The valid keywords are the possible floating-point
+ exceptions. Each keyword should have a string value that defines the
+ treatment for the particular error. Possible values are
+ {'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
+
+ See Also
+ --------
+ seterr, geterr, seterrcall, geterrcall
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> olderr = np.seterr(all='ignore') # Set error handling to known state.
+
+ >>> np.arange(3) / 0.
+ array([nan, inf, inf])
+ >>> with np.errstate(divide='warn'):
+ ... np.arange(3) / 0.
+ array([nan, inf, inf])
+
+ >>> np.sqrt(-1)
+ nan
+ >>> with np.errstate(invalid='raise'):
+ ... np.sqrt(-1)
+ Traceback (most recent call last):
+ File "<stdin>", line 2, in <module>
+ FloatingPointError: invalid value encountered in sqrt
+
+ Outside the context the error handling behavior has not changed:
+
+ >>> np.geterr()
+ {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
+
+ """
+
+ def __init__(self, *, call=_Unspecified, **kwargs):
+ self.call = call
+ self.kwargs = kwargs
+
+ def __enter__(self):
+ self.oldstate = seterr(**self.kwargs)
+ if self.call is not _Unspecified:
+ self.oldcall = seterrcall(self.call)
+
+ def __exit__(self, *exc_info):
+ seterr(**self.oldstate)
+ if self.call is not _Unspecified:
+ seterrcall(self.oldcall)
+
+
+def _setdef():
+ defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
+ umath.seterrobj(defval)
+
+
+# set the default values
+_setdef()
+
+
+NO_NEP50_WARNING = contextvars.ContextVar("_no_nep50_warning", default=False)
+
+@set_module('numpy')
+@contextlib.contextmanager
+def _no_nep50_warning():
+ """
+ Context manager to disable NEP 50 warnings. This context manager is
+ only relevant if the NEP 50 warnings are enabled globally (which is not
+ thread/context safe).
+
+ This warning context manager itself is fully safe, however.
+ """
+ token = NO_NEP50_WARNING.set(True)
+ try:
+ yield
+ finally:
+ NO_NEP50_WARNING.reset(token)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_ufunc_config.pyi b/venv/lib/python3.9/site-packages/numpy/core/_ufunc_config.pyi
new file mode 100644
index 00000000..f5650450
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_ufunc_config.pyi
@@ -0,0 +1,37 @@
+from collections.abc import Callable
+from typing import Any, Literal, TypedDict
+
+from numpy import _SupportsWrite
+
+_ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"]
+_ErrFunc = Callable[[str, int], Any]
+
+class _ErrDict(TypedDict):
+ divide: _ErrKind
+ over: _ErrKind
+ under: _ErrKind
+ invalid: _ErrKind
+
+class _ErrDictOptional(TypedDict, total=False):
+ all: None | _ErrKind
+ divide: None | _ErrKind
+ over: None | _ErrKind
+ under: None | _ErrKind
+ invalid: None | _ErrKind
+
+def seterr(
+ all: None | _ErrKind = ...,
+ divide: None | _ErrKind = ...,
+ over: None | _ErrKind = ...,
+ under: None | _ErrKind = ...,
+ invalid: None | _ErrKind = ...,
+) -> _ErrDict: ...
+def geterr() -> _ErrDict: ...
+def setbufsize(size: int) -> int: ...
+def getbufsize() -> int: ...
+def seterrcall(
+ func: None | _ErrFunc | _SupportsWrite[str]
+) -> None | _ErrFunc | _SupportsWrite[str]: ...
+def geterrcall() -> None | _ErrFunc | _SupportsWrite[str]: ...
+
+# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings`
diff --git a/venv/lib/python3.9/site-packages/numpy/core/_umath_tests.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/core/_umath_tests.cpython-39-darwin.so
new file mode 100755
index 00000000..1a714bd6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/_umath_tests.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/core/arrayprint.py b/venv/lib/python3.9/site-packages/numpy/core/arrayprint.py
new file mode 100644
index 00000000..957cecf1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/arrayprint.py
@@ -0,0 +1,1701 @@
+"""Array printing function
+
+$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
+
+"""
+__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
+ "set_printoptions", "get_printoptions", "printoptions",
+ "format_float_positional", "format_float_scientific"]
+__docformat__ = 'restructuredtext'
+
+#
+# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>
+# last revision: 1996-3-13
+# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
+# and by Perry Greenfield 2000-4-1 for numarray
+# and by Travis Oliphant 2005-8-22 for numpy
+
+
+# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
+# scalars but for different purposes. scalartypes.c.src has str/reprs for when
+# the scalar is printed on its own, while arrayprint.py has strs for when
+# scalars are printed inside an ndarray. Only the latter strs are currently
+# user-customizable.
+
+import functools
+import numbers
+import sys
+try:
+ from _thread import get_ident
+except ImportError:
+ from _dummy_thread import get_ident
+
+import numpy as np
+from . import numerictypes as _nt
+from .umath import absolute, isinf, isfinite, isnat
+from . import multiarray
+from .multiarray import (array, dragon4_positional, dragon4_scientific,
+ datetime_as_string, datetime_data, ndarray,
+ set_legacy_print_mode)
+from .fromnumeric import any
+from .numeric import concatenate, asarray, errstate
+from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
+ flexible)
+from .overrides import array_function_dispatch, set_module
+import operator
+import warnings
+import contextlib
+
+_format_options = {
+ 'edgeitems': 3, # repr N leading and trailing items of each dimension
+ 'threshold': 1000, # total items > triggers array summarization
+ 'floatmode': 'maxprec',
+ 'precision': 8, # precision of floating point representations
+ 'suppress': False, # suppress printing small floating values in exp format
+ 'linewidth': 75,
+ 'nanstr': 'nan',
+ 'infstr': 'inf',
+ 'sign': '-',
+ 'formatter': None,
+ # Internally stored as an int to simplify comparisons; converted from/to
+ # str/False on the way in/out.
+ 'legacy': sys.maxsize}
+
+def _make_options_dict(precision=None, threshold=None, edgeitems=None,
+ linewidth=None, suppress=None, nanstr=None, infstr=None,
+ sign=None, formatter=None, floatmode=None, legacy=None):
+ """
+ Make a dictionary out of the non-None arguments, plus conversion of
+ *legacy* and sanity checks.
+ """
+
+ options = {k: v for k, v in locals().items() if v is not None}
+
+ if suppress is not None:
+ options['suppress'] = bool(suppress)
+
+ modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal']
+ if floatmode not in modes + [None]:
+ raise ValueError("floatmode option must be one of " +
+ ", ".join('"{}"'.format(m) for m in modes))
+
+ if sign not in [None, '-', '+', ' ']:
+ raise ValueError("sign option must be one of ' ', '+', or '-'")
+
+ if legacy == False:
+ options['legacy'] = sys.maxsize
+ elif legacy == '1.13':
+ options['legacy'] = 113
+ elif legacy == '1.21':
+ options['legacy'] = 121
+ elif legacy is None:
+ pass # OK, do nothing.
+ else:
+ warnings.warn(
+ "legacy printing option can currently only be '1.13', '1.21', or "
+ "`False`", stacklevel=3)
+
+ if threshold is not None:
+ # forbid the bad threshold arg suggested by stack overflow, gh-12351
+ if not isinstance(threshold, numbers.Number):
+ raise TypeError("threshold must be numeric")
+ if np.isnan(threshold):
+ raise ValueError("threshold must be non-NAN, try "
+ "sys.maxsize for untruncated representation")
+
+ if precision is not None:
+ # forbid the bad precision arg as suggested by issue #18254
+ try:
+ options['precision'] = operator.index(precision)
+ except TypeError as e:
+ raise TypeError('precision must be an integer') from e
+
+ return options
+
+
+@set_module('numpy')
+def set_printoptions(precision=None, threshold=None, edgeitems=None,
+ linewidth=None, suppress=None, nanstr=None, infstr=None,
+ formatter=None, sign=None, floatmode=None, *, legacy=None):
+ """
+ Set printing options.
+
+ These options determine the way floating point numbers, arrays and
+ other NumPy objects are displayed.
+
+ Parameters
+ ----------
+ precision : int or None, optional
+ Number of digits of precision for floating point output (default 8).
+ May be None if `floatmode` is not `fixed`, to print as many digits as
+ necessary to uniquely specify the value.
+ threshold : int, optional
+ Total number of array elements which trigger summarization
+ rather than full repr (default 1000).
+ To always use the full repr without summarization, pass `sys.maxsize`.
+ edgeitems : int, optional
+ Number of array items in summary at beginning and end of
+ each dimension (default 3).
+ linewidth : int, optional
+ The number of characters per line for the purpose of inserting
+ line breaks (default 75).
+ suppress : bool, optional
+ If True, always print floating point numbers using fixed point
+ notation, in which case numbers equal to zero in the current precision
+ will print as zero. If False, then scientific notation is used when
+ absolute value of the smallest number is < 1e-4 or the ratio of the
+ maximum absolute value to the minimum is > 1e3. The default is False.
+ nanstr : str, optional
+ String representation of floating point not-a-number (default nan).
+ infstr : str, optional
+ String representation of floating point infinity (default inf).
+ sign : string, either '-', '+', or ' ', optional
+ Controls printing of the sign of floating-point types. If '+', always
+ print the sign of positive values. If ' ', always prints a space
+ (whitespace character) in the sign position of positive values. If
+ '-', omit the sign character of positive values. (default '-')
+ formatter : dict of callables, optional
+ If not None, the keys should indicate the type(s) that the respective
+ formatting function applies to. Callables should return a string.
+ Types that are not specified (by their corresponding keys) are handled
+ by the default formatters. Individual types for which a formatter
+ can be set are:
+
+ - 'bool'
+ - 'int'
+ - 'timedelta' : a `numpy.timedelta64`
+ - 'datetime' : a `numpy.datetime64`
+ - 'float'
+ - 'longfloat' : 128-bit floats
+ - 'complexfloat'
+ - 'longcomplexfloat' : composed of two 128-bit floats
+ - 'numpystr' : types `numpy.string_` and `numpy.unicode_`
+ - 'object' : `np.object_` arrays
+
+ Other keys that can be used to set a group of types at once are:
+
+ - 'all' : sets all types
+ - 'int_kind' : sets 'int'
+ - 'float_kind' : sets 'float' and 'longfloat'
+ - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
+ - 'str_kind' : sets 'numpystr'
+ floatmode : str, optional
+ Controls the interpretation of the `precision` option for
+ floating-point types. Can take the following values
+ (default maxprec_equal):
+
+ * 'fixed': Always print exactly `precision` fractional digits,
+ even if this would print more or fewer digits than
+ necessary to specify the value uniquely.
+ * 'unique': Print the minimum number of fractional digits necessary
+ to represent each value uniquely. Different elements may
+ have a different number of digits. The value of the
+ `precision` option is ignored.
+ * 'maxprec': Print at most `precision` fractional digits, but if
+ an element can be uniquely represented with fewer digits
+ only print it with that many.
+ * 'maxprec_equal': Print at most `precision` fractional digits,
+ but if every element in the array can be uniquely
+ represented with an equal number of fewer digits, use that
+ many digits for all elements.
+ legacy : string or `False`, optional
+ If set to the string `'1.13'` enables 1.13 legacy printing mode. This
+ approximates numpy 1.13 print output by including a space in the sign
+ position of floats and different behavior for 0d arrays. This also
+ enables 1.21 legacy printing mode (described below).
+
+ If set to the string `'1.21'` enables 1.21 legacy printing mode. This
+ approximates numpy 1.21 print output of complex structured dtypes
+ by not inserting spaces after commas that separate fields and after
+ colons.
+
+ If set to `False`, disables legacy mode.
+
+ Unrecognized strings will be ignored with a warning for forward
+ compatibility.
+
+ .. versionadded:: 1.14.0
+ .. versionchanged:: 1.22.0
+
+ See Also
+ --------
+ get_printoptions, printoptions, set_string_function, array2string
+
+ Notes
+ -----
+ `formatter` is always reset with a call to `set_printoptions`.
+
+ Use `printoptions` as a context manager to set the values temporarily.
+
+ Examples
+ --------
+ Floating point precision can be set:
+
+ >>> np.set_printoptions(precision=4)
+ >>> np.array([1.123456789])
+ [1.1235]
+
+ Long arrays can be summarised:
+
+ >>> np.set_printoptions(threshold=5)
+ >>> np.arange(10)
+ array([0, 1, 2, ..., 7, 8, 9])
+
+ Small results can be suppressed:
+
+ >>> eps = np.finfo(float).eps
+ >>> x = np.arange(4.)
+ >>> x**2 - (x + eps)**2
+ array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
+ >>> np.set_printoptions(suppress=True)
+ >>> x**2 - (x + eps)**2
+ array([-0., -0., 0., 0.])
+
+ A custom formatter can be used to display array elements as desired:
+
+ >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
+ >>> x = np.arange(3)
+ >>> x
+ array([int: 0, int: -1, int: -2])
+ >>> np.set_printoptions() # formatter gets reset
+ >>> x
+ array([0, 1, 2])
+
+ To put back the default options, you can use:
+
+ >>> np.set_printoptions(edgeitems=3, infstr='inf',
+ ... linewidth=75, nanstr='nan', precision=8,
+ ... suppress=False, threshold=1000, formatter=None)
+
+ Also to temporarily override options, use `printoptions` as a context manager:
+
+ >>> with np.printoptions(precision=2, suppress=True, threshold=5):
+ ... np.linspace(0, 10, 10)
+ array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ])
+
+ """
+ opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
+ suppress, nanstr, infstr, sign, formatter,
+ floatmode, legacy)
+ # formatter is always reset
+ opt['formatter'] = formatter
+ _format_options.update(opt)
+
+ # set the C variable for legacy mode
+ if _format_options['legacy'] == 113:
+ set_legacy_print_mode(113)
+ # reset the sign option in legacy mode to avoid confusion
+ _format_options['sign'] = '-'
+ elif _format_options['legacy'] == 121:
+ set_legacy_print_mode(121)
+ elif _format_options['legacy'] == sys.maxsize:
+ set_legacy_print_mode(0)
+
+
+@set_module('numpy')
+def get_printoptions():
+ """
+ Return the current print options.
+
+ Returns
+ -------
+ print_opts : dict
+ Dictionary of current print options with keys
+
+ - precision : int
+ - threshold : int
+ - edgeitems : int
+ - linewidth : int
+ - suppress : bool
+ - nanstr : str
+ - infstr : str
+ - formatter : dict of callables
+ - sign : str
+
+ For a full description of these options, see `set_printoptions`.
+
+ See Also
+ --------
+ set_printoptions, printoptions, set_string_function
+
+ """
+ opts = _format_options.copy()
+ opts['legacy'] = {
+ 113: '1.13', 121: '1.21', sys.maxsize: False,
+ }[opts['legacy']]
+ return opts
+
+
+def _get_legacy_print_mode():
+ """Return the legacy print mode as an int."""
+ return _format_options['legacy']
+
+
+@set_module('numpy')
+@contextlib.contextmanager
+def printoptions(*args, **kwargs):
+ """Context manager for setting print options.
+
+ Set print options for the scope of the `with` block, and restore the old
+ options at the end. See `set_printoptions` for the full description of
+ available options.
+
+ Examples
+ --------
+
+ >>> from numpy.testing import assert_equal
+ >>> with np.printoptions(precision=2):
+ ... np.array([2.0]) / 3
+ array([0.67])
+
+ The `as`-clause of the `with`-statement gives the current print options:
+
+ >>> with np.printoptions(precision=2) as opts:
+ ... assert_equal(opts, np.get_printoptions())
+
+ See Also
+ --------
+ set_printoptions, get_printoptions
+
+ """
+ opts = np.get_printoptions()
+ try:
+ np.set_printoptions(*args, **kwargs)
+ yield np.get_printoptions()
+ finally:
+ np.set_printoptions(**opts)
+
+
+def _leading_trailing(a, edgeitems, index=()):
+ """
+ Keep only the N-D corners (leading and trailing edges) of an array.
+
+ Should be passed a base-class ndarray, since it makes no guarantees about
+ preserving subclasses.
+ """
+ axis = len(index)
+ if axis == a.ndim:
+ return a[index]
+
+ if a.shape[axis] > 2*edgeitems:
+ return concatenate((
+ _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
+ _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
+ ), axis=axis)
+ else:
+ return _leading_trailing(a, edgeitems, index + np.index_exp[:])
+
+
+def _object_format(o):
+ """ Object arrays containing lists should be printed unambiguously """
+ if type(o) is list:
+ fmt = 'list({!r})'
+ else:
+ fmt = '{!r}'
+ return fmt.format(o)
+
+def repr_format(x):
+ return repr(x)
+
+def str_format(x):
+ return str(x)
+
+def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy,
+ formatter, **kwargs):
+ # note: extra arguments in kwargs are ignored
+
+ # wrapped in lambdas to avoid taking a code path with the wrong type of data
+ formatdict = {
+ 'bool': lambda: BoolFormat(data),
+ 'int': lambda: IntegerFormat(data),
+ 'float': lambda: FloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'longfloat': lambda: FloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'complexfloat': lambda: ComplexFloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'longcomplexfloat': lambda: ComplexFloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'datetime': lambda: DatetimeFormat(data, legacy=legacy),
+ 'timedelta': lambda: TimedeltaFormat(data),
+ 'object': lambda: _object_format,
+ 'void': lambda: str_format,
+ 'numpystr': lambda: repr_format}
+
+ # we need to wrap values in `formatter` in a lambda, so that the interface
+ # is the same as the above values.
+ def indirect(x):
+ return lambda: x
+
+ if formatter is not None:
+ fkeys = [k for k in formatter.keys() if formatter[k] is not None]
+ if 'all' in fkeys:
+ for key in formatdict.keys():
+ formatdict[key] = indirect(formatter['all'])
+ if 'int_kind' in fkeys:
+ for key in ['int']:
+ formatdict[key] = indirect(formatter['int_kind'])
+ if 'float_kind' in fkeys:
+ for key in ['float', 'longfloat']:
+ formatdict[key] = indirect(formatter['float_kind'])
+ if 'complex_kind' in fkeys:
+ for key in ['complexfloat', 'longcomplexfloat']:
+ formatdict[key] = indirect(formatter['complex_kind'])
+ if 'str_kind' in fkeys:
+ formatdict['numpystr'] = indirect(formatter['str_kind'])
+ for key in formatdict.keys():
+ if key in fkeys:
+ formatdict[key] = indirect(formatter[key])
+
+ return formatdict
+
+def _get_format_function(data, **options):
+ """
+ find the right formatting function for the dtype_
+ """
+ dtype_ = data.dtype
+ dtypeobj = dtype_.type
+ formatdict = _get_formatdict(data, **options)
+ if dtypeobj is None:
+ return formatdict["numpystr"]()
+ elif issubclass(dtypeobj, _nt.bool_):
+ return formatdict['bool']()
+ elif issubclass(dtypeobj, _nt.integer):
+ if issubclass(dtypeobj, _nt.timedelta64):
+ return formatdict['timedelta']()
+ else:
+ return formatdict['int']()
+ elif issubclass(dtypeobj, _nt.floating):
+ if issubclass(dtypeobj, _nt.longfloat):
+ return formatdict['longfloat']()
+ else:
+ return formatdict['float']()
+ elif issubclass(dtypeobj, _nt.complexfloating):
+ if issubclass(dtypeobj, _nt.clongfloat):
+ return formatdict['longcomplexfloat']()
+ else:
+ return formatdict['complexfloat']()
+ elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
+ return formatdict['numpystr']()
+ elif issubclass(dtypeobj, _nt.datetime64):
+ return formatdict['datetime']()
+ elif issubclass(dtypeobj, _nt.object_):
+ return formatdict['object']()
+ elif issubclass(dtypeobj, _nt.void):
+ if dtype_.names is not None:
+ return StructuredVoidFormat.from_data(data, **options)
+ else:
+ return formatdict['void']()
+ else:
+ return formatdict['numpystr']()
+
+
+def _recursive_guard(fillvalue='...'):
+ """
+ Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs
+
+ Decorates a function such that if it calls itself with the same first
+ argument, it returns `fillvalue` instead of recursing.
+
+ Largely copied from reprlib.recursive_repr
+ """
+
+ def decorating_function(f):
+ repr_running = set()
+
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ key = id(self), get_ident()
+ if key in repr_running:
+ return fillvalue
+ repr_running.add(key)
+ try:
+ return f(self, *args, **kwargs)
+ finally:
+ repr_running.discard(key)
+
+ return wrapper
+
+ return decorating_function
+
+
+# gracefully handle recursive calls, when object arrays contain themselves
+@_recursive_guard()
+def _array2string(a, options, separator=' ', prefix=""):
+ # The formatter __init__s in _get_format_function cannot deal with
+ # subclasses yet, and we also need to avoid recursion issues in
+ # _formatArray with subclasses which return 0d arrays in place of scalars
+ data = asarray(a)
+ if a.shape == ():
+ a = data
+
+ if a.size > options['threshold']:
+ summary_insert = "..."
+ data = _leading_trailing(data, options['edgeitems'])
+ else:
+ summary_insert = ""
+
+ # find the right formatting function for the array
+ format_function = _get_format_function(data, **options)
+
+ # skip over "["
+ next_line_prefix = " "
+ # skip over array(
+ next_line_prefix += " "*len(prefix)
+
+ lst = _formatArray(a, format_function, options['linewidth'],
+ next_line_prefix, separator, options['edgeitems'],
+ summary_insert, options['legacy'])
+ return lst
+
+
+def _array2string_dispatcher(
+ a, max_line_width=None, precision=None,
+ suppress_small=None, separator=None, prefix=None,
+ style=None, formatter=None, threshold=None,
+ edgeitems=None, sign=None, floatmode=None, suffix=None,
+ *, legacy=None):
+ return (a,)
+
+
+@array_function_dispatch(_array2string_dispatcher, module='numpy')
+def array2string(a, max_line_width=None, precision=None,
+ suppress_small=None, separator=' ', prefix="",
+ style=np._NoValue, formatter=None, threshold=None,
+ edgeitems=None, sign=None, floatmode=None, suffix="",
+ *, legacy=None):
+ """
+ Return a string representation of an array.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array.
+ max_line_width : int, optional
+ Inserts newlines if text is longer than `max_line_width`.
+ Defaults to ``numpy.get_printoptions()['linewidth']``.
+ precision : int or None, optional
+ Floating point precision.
+ Defaults to ``numpy.get_printoptions()['precision']``.
+ suppress_small : bool, optional
+ Represent numbers "very close" to zero as zero; default is False.
+ Very close is defined by precision: if the precision is 8, e.g.,
+ numbers smaller (in absolute value) than 5e-9 are represented as
+ zero.
+ Defaults to ``numpy.get_printoptions()['suppress']``.
+ separator : str, optional
+ Inserted between elements.
+ prefix : str, optional
+ suffix : str, optional
+ The length of the prefix and suffix strings are used to respectively
+ align and wrap the output. An array is typically printed as::
+
+ prefix + array2string(a) + suffix
+
+ The output is left-padded by the length of the prefix string, and
+ wrapping is forced at the column ``max_line_width - len(suffix)``.
+ It should be noted that the content of prefix and suffix strings are
+ not included in the output.
+ style : _NoValue, optional
+ Has no effect, do not use.
+
+ .. deprecated:: 1.14.0
+ formatter : dict of callables, optional
+ If not None, the keys should indicate the type(s) that the respective
+ formatting function applies to. Callables should return a string.
+ Types that are not specified (by their corresponding keys) are handled
+ by the default formatters. Individual types for which a formatter
+ can be set are:
+
+ - 'bool'
+ - 'int'
+ - 'timedelta' : a `numpy.timedelta64`
+ - 'datetime' : a `numpy.datetime64`
+ - 'float'
+ - 'longfloat' : 128-bit floats
+ - 'complexfloat'
+ - 'longcomplexfloat' : composed of two 128-bit floats
+ - 'void' : type `numpy.void`
+ - 'numpystr' : types `numpy.string_` and `numpy.unicode_`
+
+ Other keys that can be used to set a group of types at once are:
+
+ - 'all' : sets all types
+ - 'int_kind' : sets 'int'
+ - 'float_kind' : sets 'float' and 'longfloat'
+ - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
+ - 'str_kind' : sets 'numpystr'
+ threshold : int, optional
+ Total number of array elements which trigger summarization
+ rather than full repr.
+ Defaults to ``numpy.get_printoptions()['threshold']``.
+ edgeitems : int, optional
+ Number of array items in summary at beginning and end of
+ each dimension.
+ Defaults to ``numpy.get_printoptions()['edgeitems']``.
+ sign : string, either '-', '+', or ' ', optional
+ Controls printing of the sign of floating-point types. If '+', always
+ print the sign of positive values. If ' ', always prints a space
+ (whitespace character) in the sign position of positive values. If
+ '-', omit the sign character of positive values.
+ Defaults to ``numpy.get_printoptions()['sign']``.
+ floatmode : str, optional
+ Controls the interpretation of the `precision` option for
+ floating-point types.
+ Defaults to ``numpy.get_printoptions()['floatmode']``.
+ Can take the following values:
+
+ - 'fixed': Always print exactly `precision` fractional digits,
+ even if this would print more or fewer digits than
+ necessary to specify the value uniquely.
+ - 'unique': Print the minimum number of fractional digits necessary
+ to represent each value uniquely. Different elements may
+ have a different number of digits. The value of the
+ `precision` option is ignored.
+ - 'maxprec': Print at most `precision` fractional digits, but if
+ an element can be uniquely represented with fewer digits
+ only print it with that many.
+ - 'maxprec_equal': Print at most `precision` fractional digits,
+ but if every element in the array can be uniquely
+ represented with an equal number of fewer digits, use that
+ many digits for all elements.
+ legacy : string or `False`, optional
+ If set to the string `'1.13'` enables 1.13 legacy printing mode. This
+ approximates numpy 1.13 print output by including a space in the sign
+ position of floats and different behavior for 0d arrays. If set to
+ `False`, disables legacy mode. Unrecognized strings will be ignored
+ with a warning for forward compatibility.
+
+ .. versionadded:: 1.14.0
+
+ Returns
+ -------
+ array_str : str
+ String representation of the array.
+
+ Raises
+ ------
+ TypeError
+ if a callable in `formatter` does not return a string.
+
+ See Also
+ --------
+ array_str, array_repr, set_printoptions, get_printoptions
+
+ Notes
+ -----
+ If a formatter is specified for a certain type, the `precision` keyword is
+ ignored for that type.
+
+ This is a very flexible function; `array_repr` and `array_str` are using
+ `array2string` internally so keywords with the same name should work
+ identically in all three functions.
+
+ Examples
+ --------
+ >>> x = np.array([1e-16,1,2,3])
+ >>> np.array2string(x, precision=2, separator=',',
+ ... suppress_small=True)
+ '[0.,1.,2.,3.]'
+
+ >>> x = np.arange(3.)
+ >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
+ '[0.00 1.00 2.00]'
+
+ >>> x = np.arange(3)
+ >>> np.array2string(x, formatter={'int':lambda x: hex(x)})
+ '[0x0 0x1 0x2]'
+
+ """
+
+ overrides = _make_options_dict(precision, threshold, edgeitems,
+ max_line_width, suppress_small, None, None,
+ sign, formatter, floatmode, legacy)
+ options = _format_options.copy()
+ options.update(overrides)
+
+ if options['legacy'] <= 113:
+ if style is np._NoValue:
+ style = repr
+
+ if a.shape == () and a.dtype.names is None:
+ return style(a.item())
+ elif style is not np._NoValue:
+ # Deprecation 11-9-2017 v1.14
+ warnings.warn("'style' argument is deprecated and no longer functional"
+ " except in 1.13 'legacy' mode",
+ DeprecationWarning, stacklevel=3)
+
+ if options['legacy'] > 113:
+ options['linewidth'] -= len(suffix)
+
+ # treat as a null array if any of shape elements == 0
+ if a.size == 0:
+ return "[]"
+
+ return _array2string(a, options, separator, prefix)
+
+
+def _extendLine(s, line, word, line_width, next_line_prefix, legacy):
+ needs_wrap = len(line) + len(word) > line_width
+ if legacy > 113:
+ # don't wrap lines if it won't help
+ if len(line) <= len(next_line_prefix):
+ needs_wrap = False
+
+ if needs_wrap:
+ s += line.rstrip() + "\n"
+ line = next_line_prefix
+ line += word
+ return s, line
+
+
+def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy):
+ """
+ Extends line with nicely formatted (possibly multi-line) string ``word``.
+ """
+ words = word.splitlines()
+ if len(words) == 1 or legacy <= 113:
+ return _extendLine(s, line, word, line_width, next_line_prefix, legacy)
+
+ max_word_length = max(len(word) for word in words)
+ if (len(line) + max_word_length > line_width and
+ len(line) > len(next_line_prefix)):
+ s += line.rstrip() + '\n'
+ line = next_line_prefix + words[0]
+ indent = next_line_prefix
+ else:
+ indent = len(line)*' '
+ line += words[0]
+
+ for word in words[1::]:
+ s += line.rstrip() + '\n'
+ line = indent + word
+
+ suffix_length = max_word_length - len(words[-1])
+ line += suffix_length*' '
+
+ return s, line
+
+def _formatArray(a, format_function, line_width, next_line_prefix,
+ separator, edge_items, summary_insert, legacy):
+ """formatArray is designed for two modes of operation:
+
+ 1. Full output
+
+ 2. Summarized output
+
+ """
+ def recurser(index, hanging_indent, curr_width):
+ """
+ By using this local function, we don't need to recurse with all the
+ arguments. Since this function is not created recursively, the cost is
+ not significant
+ """
+ axis = len(index)
+ axes_left = a.ndim - axis
+
+ if axes_left == 0:
+ return format_function(a[index])
+
+ # when recursing, add a space to align with the [ added, and reduce the
+ # length of the line by 1
+ next_hanging_indent = hanging_indent + ' '
+ if legacy <= 113:
+ next_width = curr_width
+ else:
+ next_width = curr_width - len(']')
+
+ a_len = a.shape[axis]
+ show_summary = summary_insert and 2*edge_items < a_len
+ if show_summary:
+ leading_items = edge_items
+ trailing_items = edge_items
+ else:
+ leading_items = 0
+ trailing_items = a_len
+
+ # stringify the array with the hanging indent on the first line too
+ s = ''
+
+ # last axis (rows) - wrap elements if they would not fit on one line
+ if axes_left == 1:
+ # the length up until the beginning of the separator / bracket
+ if legacy <= 113:
+ elem_width = curr_width - len(separator.rstrip())
+ else:
+ elem_width = curr_width - max(len(separator.rstrip()), len(']'))
+
+ line = hanging_indent
+ for i in range(leading_items):
+ word = recurser(index + (i,), next_hanging_indent, next_width)
+ s, line = _extendLine_pretty(
+ s, line, word, elem_width, hanging_indent, legacy)
+ line += separator
+
+ if show_summary:
+ s, line = _extendLine(
+ s, line, summary_insert, elem_width, hanging_indent, legacy)
+ if legacy <= 113:
+ line += ", "
+ else:
+ line += separator
+
+ for i in range(trailing_items, 1, -1):
+ word = recurser(index + (-i,), next_hanging_indent, next_width)
+ s, line = _extendLine_pretty(
+ s, line, word, elem_width, hanging_indent, legacy)
+ line += separator
+
+ if legacy <= 113:
+ # width of the separator is not considered on 1.13
+ elem_width = curr_width
+ word = recurser(index + (-1,), next_hanging_indent, next_width)
+ s, line = _extendLine_pretty(
+ s, line, word, elem_width, hanging_indent, legacy)
+
+ s += line
+
+ # other axes - insert newlines between rows
+ else:
+ s = ''
+ line_sep = separator.rstrip() + '\n'*(axes_left - 1)
+
+ for i in range(leading_items):
+ nested = recurser(index + (i,), next_hanging_indent, next_width)
+ s += hanging_indent + nested + line_sep
+
+ if show_summary:
+ if legacy <= 113:
+ # trailing space, fixed nbr of newlines, and fixed separator
+ s += hanging_indent + summary_insert + ", \n"
+ else:
+ s += hanging_indent + summary_insert + line_sep
+
+ for i in range(trailing_items, 1, -1):
+ nested = recurser(index + (-i,), next_hanging_indent,
+ next_width)
+ s += hanging_indent + nested + line_sep
+
+ nested = recurser(index + (-1,), next_hanging_indent, next_width)
+ s += hanging_indent + nested
+
+ # remove the hanging indent, and wrap in []
+ s = '[' + s[len(hanging_indent):] + ']'
+ return s
+
+ try:
+ # invoke the recursive part with an initial index and prefix
+ return recurser(index=(),
+ hanging_indent=next_line_prefix,
+ curr_width=line_width)
+ finally:
+ # recursive closures have a cyclic reference to themselves, which
+ # requires gc to collect (gh-10620). To avoid this problem, for
+ # performance and PyPy friendliness, we break the cycle:
+ recurser = None
+
+def _none_or_positive_arg(x, name):
+ if x is None:
+ return -1
+ if x < 0:
+ raise ValueError("{} must be >= 0".format(name))
+ return x
+
+class FloatingFormat:
+ """ Formatter for subtypes of np.floating """
+ def __init__(self, data, precision, floatmode, suppress_small, sign=False,
+ *, legacy=None):
+ # for backcompatibility, accept bools
+ if isinstance(sign, bool):
+ sign = '+' if sign else '-'
+
+ self._legacy = legacy
+ if self._legacy <= 113:
+ # when not 0d, legacy does not support '-'
+ if data.shape != () and sign == '-':
+ sign = ' '
+
+ self.floatmode = floatmode
+ if floatmode == 'unique':
+ self.precision = None
+ else:
+ self.precision = precision
+
+ self.precision = _none_or_positive_arg(self.precision, 'precision')
+
+ self.suppress_small = suppress_small
+ self.sign = sign
+ self.exp_format = False
+ self.large_exponent = False
+
+ self.fillFormat(data)
+
+ def fillFormat(self, data):
+ # only the finite values are used to compute the number of digits
+ finite_vals = data[isfinite(data)]
+
+ # choose exponential mode based on the non-zero finite values:
+ abs_non_zero = absolute(finite_vals[finite_vals != 0])
+ if len(abs_non_zero) != 0:
+ max_val = np.max(abs_non_zero)
+ min_val = np.min(abs_non_zero)
+ with errstate(over='ignore'): # division can overflow
+ if max_val >= 1.e8 or (not self.suppress_small and
+ (min_val < 0.0001 or max_val/min_val > 1000.)):
+ self.exp_format = True
+
+ # do a first pass of printing all the numbers, to determine sizes
+ if len(finite_vals) == 0:
+ self.pad_left = 0
+ self.pad_right = 0
+ self.trim = '.'
+ self.exp_size = -1
+ self.unique = True
+ self.min_digits = None
+ elif self.exp_format:
+ trim, unique = '.', True
+ if self.floatmode == 'fixed' or self._legacy <= 113:
+ trim, unique = 'k', False
+ strs = (dragon4_scientific(x, precision=self.precision,
+ unique=unique, trim=trim, sign=self.sign == '+')
+ for x in finite_vals)
+ frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs))
+ int_part, frac_part = zip(*(s.split('.') for s in frac_strs))
+ self.exp_size = max(len(s) for s in exp_strs) - 1
+
+ self.trim = 'k'
+ self.precision = max(len(s) for s in frac_part)
+ self.min_digits = self.precision
+ self.unique = unique
+
+ # for back-compat with np 1.13, use 2 spaces & sign and full prec
+ if self._legacy <= 113:
+ self.pad_left = 3
+ else:
+ # this should be only 1 or 2. Can be calculated from sign.
+ self.pad_left = max(len(s) for s in int_part)
+ # pad_right is only needed for nan length calculation
+ self.pad_right = self.exp_size + 2 + self.precision
+ else:
+ trim, unique = '.', True
+ if self.floatmode == 'fixed':
+ trim, unique = 'k', False
+ strs = (dragon4_positional(x, precision=self.precision,
+ fractional=True,
+ unique=unique, trim=trim,
+ sign=self.sign == '+')
+ for x in finite_vals)
+ int_part, frac_part = zip(*(s.split('.') for s in strs))
+ if self._legacy <= 113:
+ self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part)
+ else:
+ self.pad_left = max(len(s) for s in int_part)
+ self.pad_right = max(len(s) for s in frac_part)
+ self.exp_size = -1
+ self.unique = unique
+
+ if self.floatmode in ['fixed', 'maxprec_equal']:
+ self.precision = self.min_digits = self.pad_right
+ self.trim = 'k'
+ else:
+ self.trim = '.'
+ self.min_digits = 0
+
+ if self._legacy > 113:
+ # account for sign = ' ' by adding one to pad_left
+ if self.sign == ' ' and not any(np.signbit(finite_vals)):
+ self.pad_left += 1
+
+ # if there are non-finite values, may need to increase pad_left
+ if data.size != finite_vals.size:
+ neginf = self.sign != '-' or any(data[isinf(data)] < 0)
+ nanlen = len(_format_options['nanstr'])
+ inflen = len(_format_options['infstr']) + neginf
+ offset = self.pad_right + 1 # +1 for decimal pt
+ self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset)
+
+ def __call__(self, x):
+ if not np.isfinite(x):
+ with errstate(invalid='ignore'):
+ if np.isnan(x):
+ sign = '+' if self.sign == '+' else ''
+ ret = sign + _format_options['nanstr']
+ else: # isinf
+ sign = '-' if x < 0 else '+' if self.sign == '+' else ''
+ ret = sign + _format_options['infstr']
+ return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret
+
+ if self.exp_format:
+ return dragon4_scientific(x,
+ precision=self.precision,
+ min_digits=self.min_digits,
+ unique=self.unique,
+ trim=self.trim,
+ sign=self.sign == '+',
+ pad_left=self.pad_left,
+ exp_digits=self.exp_size)
+ else:
+ return dragon4_positional(x,
+ precision=self.precision,
+ min_digits=self.min_digits,
+ unique=self.unique,
+ fractional=True,
+ trim=self.trim,
+ sign=self.sign == '+',
+ pad_left=self.pad_left,
+ pad_right=self.pad_right)
+
+
+@set_module('numpy')
+def format_float_scientific(x, precision=None, unique=True, trim='k',
+ sign=False, pad_left=None, exp_digits=None,
+ min_digits=None):
+ """
+ Format a floating-point scalar as a decimal string in scientific notation.
+
+ Provides control over rounding, trimming and padding. Uses and assumes
+ IEEE unbiased rounding. Uses the "Dragon4" algorithm.
+
+ Parameters
+ ----------
+ x : python float or numpy floating scalar
+ Value to format.
+ precision : non-negative integer or None, optional
+ Maximum number of digits to print. May be None if `unique` is
+ `True`, but must be an integer if unique is `False`.
+ unique : boolean, optional
+ If `True`, use a digit-generation strategy which gives the shortest
+ representation which uniquely identifies the floating-point number from
+ other values of the same type, by judicious rounding. If `precision`
+ is given fewer digits than necessary can be printed. If `min_digits`
+ is given more can be printed, in which cases the last digit is rounded
+ with unbiased rounding.
+ If `False`, digits are generated as if printing an infinite-precision
+ value and stopping after `precision` digits, rounding the remaining
+ value with unbiased rounding
+ trim : one of 'k', '.', '0', '-', optional
+ Controls post-processing trimming of trailing digits, as follows:
+
+ * 'k' : keep trailing zeros, keep decimal point (no trimming)
+ * '.' : trim all trailing zeros, leave decimal point
+ * '0' : trim all but the zero before the decimal point. Insert the
+ zero if it is missing.
+ * '-' : trim trailing zeros and any trailing decimal point
+ sign : boolean, optional
+ Whether to show the sign for positive values.
+ pad_left : non-negative integer, optional
+ Pad the left side of the string with whitespace until at least that
+ many characters are to the left of the decimal point.
+ exp_digits : non-negative integer, optional
+ Pad the exponent with zeros until it contains at least this many digits.
+ If omitted, the exponent will be at least 2 digits.
+ min_digits : non-negative integer or None, optional
+ Minimum number of digits to print. This only has an effect for
+ `unique=True`. In that case more digits than necessary to uniquely
+ identify the value may be printed and rounded unbiased.
+
+ -- versionadded:: 1.21.0
+
+ Returns
+ -------
+ rep : string
+ The string representation of the floating point value
+
+ See Also
+ --------
+ format_float_positional
+
+ Examples
+ --------
+ >>> np.format_float_scientific(np.float32(np.pi))
+ '3.1415927e+00'
+ >>> s = np.float32(1.23e24)
+ >>> np.format_float_scientific(s, unique=False, precision=15)
+ '1.230000071797338e+24'
+ >>> np.format_float_scientific(s, exp_digits=4)
+ '1.23e+0024'
+ """
+ precision = _none_or_positive_arg(precision, 'precision')
+ pad_left = _none_or_positive_arg(pad_left, 'pad_left')
+ exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits')
+ min_digits = _none_or_positive_arg(min_digits, 'min_digits')
+ if min_digits > 0 and precision > 0 and min_digits > precision:
+ raise ValueError("min_digits must be less than or equal to precision")
+ return dragon4_scientific(x, precision=precision, unique=unique,
+ trim=trim, sign=sign, pad_left=pad_left,
+ exp_digits=exp_digits, min_digits=min_digits)
+
+
+@set_module('numpy')
+def format_float_positional(x, precision=None, unique=True,
+ fractional=True, trim='k', sign=False,
+ pad_left=None, pad_right=None, min_digits=None):
+ """
+ Format a floating-point scalar as a decimal string in positional notation.
+
+ Provides control over rounding, trimming and padding. Uses and assumes
+ IEEE unbiased rounding. Uses the "Dragon4" algorithm.
+
+ Parameters
+ ----------
+ x : python float or numpy floating scalar
+ Value to format.
+ precision : non-negative integer or None, optional
+ Maximum number of digits to print. May be None if `unique` is
+ `True`, but must be an integer if unique is `False`.
+ unique : boolean, optional
+ If `True`, use a digit-generation strategy which gives the shortest
+ representation which uniquely identifies the floating-point number from
+ other values of the same type, by judicious rounding. If `precision`
+ is given fewer digits than necessary can be printed, or if `min_digits`
+ is given more can be printed, in which cases the last digit is rounded
+ with unbiased rounding.
+ If `False`, digits are generated as if printing an infinite-precision
+ value and stopping after `precision` digits, rounding the remaining
+ value with unbiased rounding
+ fractional : boolean, optional
+ If `True`, the cutoffs of `precision` and `min_digits` refer to the
+ total number of digits after the decimal point, including leading
+ zeros.
+ If `False`, `precision` and `min_digits` refer to the total number of
+ significant digits, before or after the decimal point, ignoring leading
+ zeros.
+ trim : one of 'k', '.', '0', '-', optional
+ Controls post-processing trimming of trailing digits, as follows:
+
+ * 'k' : keep trailing zeros, keep decimal point (no trimming)
+ * '.' : trim all trailing zeros, leave decimal point
+ * '0' : trim all but the zero before the decimal point. Insert the
+ zero if it is missing.
+ * '-' : trim trailing zeros and any trailing decimal point
+ sign : boolean, optional
+ Whether to show the sign for positive values.
+ pad_left : non-negative integer, optional
+ Pad the left side of the string with whitespace until at least that
+ many characters are to the left of the decimal point.
+ pad_right : non-negative integer, optional
+ Pad the right side of the string with whitespace until at least that
+ many characters are to the right of the decimal point.
+ min_digits : non-negative integer or None, optional
+ Minimum number of digits to print. Only has an effect if `unique=True`
+ in which case additional digits past those necessary to uniquely
+ identify the value may be printed, rounding the last additional digit.
+
+ -- versionadded:: 1.21.0
+
+ Returns
+ -------
+ rep : string
+ The string representation of the floating point value
+
+ See Also
+ --------
+ format_float_scientific
+
+ Examples
+ --------
+ >>> np.format_float_positional(np.float32(np.pi))
+ '3.1415927'
+ >>> np.format_float_positional(np.float16(np.pi))
+ '3.14'
+ >>> np.format_float_positional(np.float16(0.3))
+ '0.3'
+ >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10)
+ '0.3000488281'
+ """
+ precision = _none_or_positive_arg(precision, 'precision')
+ pad_left = _none_or_positive_arg(pad_left, 'pad_left')
+ pad_right = _none_or_positive_arg(pad_right, 'pad_right')
+ min_digits = _none_or_positive_arg(min_digits, 'min_digits')
+ if not fractional and precision == 0:
+ raise ValueError("precision must be greater than 0 if "
+ "fractional=False")
+ if min_digits > 0 and precision > 0 and min_digits > precision:
+ raise ValueError("min_digits must be less than or equal to precision")
+ return dragon4_positional(x, precision=precision, unique=unique,
+ fractional=fractional, trim=trim,
+ sign=sign, pad_left=pad_left,
+ pad_right=pad_right, min_digits=min_digits)
+
+
+class IntegerFormat:
+ def __init__(self, data):
+ if data.size > 0:
+ max_str_len = max(len(str(np.max(data))),
+ len(str(np.min(data))))
+ else:
+ max_str_len = 0
+ self.format = '%{}d'.format(max_str_len)
+
+ def __call__(self, x):
+ return self.format % x
+
+
+class BoolFormat:
+ def __init__(self, data, **kwargs):
+ # add an extra space so " True" and "False" have the same length and
+ # array elements align nicely when printed, except in 0d arrays
+ self.truestr = ' True' if data.shape != () else 'True'
+
+ def __call__(self, x):
+ return self.truestr if x else "False"
+
+
+class ComplexFloatingFormat:
+ """ Formatter for subtypes of np.complexfloating """
+ def __init__(self, x, precision, floatmode, suppress_small,
+ sign=False, *, legacy=None):
+ # for backcompatibility, accept bools
+ if isinstance(sign, bool):
+ sign = '+' if sign else '-'
+
+ floatmode_real = floatmode_imag = floatmode
+ if legacy <= 113:
+ floatmode_real = 'maxprec_equal'
+ floatmode_imag = 'maxprec'
+
+ self.real_format = FloatingFormat(
+ x.real, precision, floatmode_real, suppress_small,
+ sign=sign, legacy=legacy
+ )
+ self.imag_format = FloatingFormat(
+ x.imag, precision, floatmode_imag, suppress_small,
+ sign='+', legacy=legacy
+ )
+
+ def __call__(self, x):
+ r = self.real_format(x.real)
+ i = self.imag_format(x.imag)
+
+ # add the 'j' before the terminal whitespace in i
+ sp = len(i.rstrip())
+ i = i[:sp] + 'j' + i[sp:]
+
+ return r + i
+
+
+class _TimelikeFormat:
+ def __init__(self, data):
+ non_nat = data[~isnat(data)]
+ if len(non_nat) > 0:
+ # Max str length of non-NaT elements
+ max_str_len = max(len(self._format_non_nat(np.max(non_nat))),
+ len(self._format_non_nat(np.min(non_nat))))
+ else:
+ max_str_len = 0
+ if len(non_nat) < data.size:
+ # data contains a NaT
+ max_str_len = max(max_str_len, 5)
+ self._format = '%{}s'.format(max_str_len)
+ self._nat = "'NaT'".rjust(max_str_len)
+
+ def _format_non_nat(self, x):
+ # override in subclass
+ raise NotImplementedError
+
+ def __call__(self, x):
+ if isnat(x):
+ return self._nat
+ else:
+ return self._format % self._format_non_nat(x)
+
+
+class DatetimeFormat(_TimelikeFormat):
+ def __init__(self, x, unit=None, timezone=None, casting='same_kind',
+ legacy=False):
+ # Get the unit from the dtype
+ if unit is None:
+ if x.dtype.kind == 'M':
+ unit = datetime_data(x.dtype)[0]
+ else:
+ unit = 's'
+
+ if timezone is None:
+ timezone = 'naive'
+ self.timezone = timezone
+ self.unit = unit
+ self.casting = casting
+ self.legacy = legacy
+
+ # must be called after the above are configured
+ super().__init__(x)
+
+ def __call__(self, x):
+ if self.legacy <= 113:
+ return self._format_non_nat(x)
+ return super().__call__(x)
+
+ def _format_non_nat(self, x):
+ return "'%s'" % datetime_as_string(x,
+ unit=self.unit,
+ timezone=self.timezone,
+ casting=self.casting)
+
+
+class TimedeltaFormat(_TimelikeFormat):
+ def _format_non_nat(self, x):
+ return str(x.astype('i8'))
+
+
+class SubArrayFormat:
+ def __init__(self, format_function):
+ self.format_function = format_function
+
+ def __call__(self, arr):
+ if arr.ndim <= 1:
+ return "[" + ", ".join(self.format_function(a) for a in arr) + "]"
+ return "[" + ", ".join(self.__call__(a) for a in arr) + "]"
+
+
+class StructuredVoidFormat:
+ """
+ Formatter for structured np.void objects.
+
+ This does not work on structured alias types like np.dtype(('i4', 'i2,i2')),
+ as alias scalars lose their field information, and the implementation
+ relies upon np.void.__getitem__.
+ """
+ def __init__(self, format_functions):
+ self.format_functions = format_functions
+
+ @classmethod
+ def from_data(cls, data, **options):
+ """
+ This is a second way to initialize StructuredVoidFormat, using the raw data
+ as input. Added to avoid changing the signature of __init__.
+ """
+ format_functions = []
+ for field_name in data.dtype.names:
+ format_function = _get_format_function(data[field_name], **options)
+ if data.dtype[field_name].shape != ():
+ format_function = SubArrayFormat(format_function)
+ format_functions.append(format_function)
+ return cls(format_functions)
+
+ def __call__(self, x):
+ str_fields = [
+ format_function(field)
+ for field, format_function in zip(x, self.format_functions)
+ ]
+ if len(str_fields) == 1:
+ return "({},)".format(str_fields[0])
+ else:
+ return "({})".format(", ".join(str_fields))
+
+
+def _void_scalar_repr(x):
+ """
+ Implements the repr for structured-void scalars. It is called from the
+ scalartypes.c.src code, and is placed here because it uses the elementwise
+ formatters defined above.
+ """
+ return StructuredVoidFormat.from_data(array(x), **_format_options)(x)
+
+
+_typelessdata = [int_, float_, complex_, bool_]
+
+
+def dtype_is_implied(dtype):
+ """
+ Determine if the given dtype is implied by the representation of its values.
+
+ Parameters
+ ----------
+ dtype : dtype
+ Data type
+
+ Returns
+ -------
+ implied : bool
+ True if the dtype is implied by the representation of its values.
+
+ Examples
+ --------
+ >>> np.core.arrayprint.dtype_is_implied(int)
+ True
+ >>> np.array([1, 2, 3], int)
+ array([1, 2, 3])
+ >>> np.core.arrayprint.dtype_is_implied(np.int8)
+ False
+ >>> np.array([1, 2, 3], np.int8)
+ array([1, 2, 3], dtype=int8)
+ """
+ dtype = np.dtype(dtype)
+ if _format_options['legacy'] <= 113 and dtype.type == bool_:
+ return False
+
+ # not just void types can be structured, and names are not part of the repr
+ if dtype.names is not None:
+ return False
+
+ return dtype.type in _typelessdata
+
+
+def dtype_short_repr(dtype):
+ """
+ Convert a dtype to a short form which evaluates to the same dtype.
+
+ The intent is roughly that the following holds
+
+ >>> from numpy import *
+ >>> dt = np.int64([1, 2]).dtype
+ >>> assert eval(dtype_short_repr(dt)) == dt
+ """
+ if type(dtype).__repr__ != np.dtype.__repr__:
+ # TODO: Custom repr for user DTypes, logic should likely move.
+ return repr(dtype)
+ if dtype.names is not None:
+ # structured dtypes give a list or tuple repr
+ return str(dtype)
+ elif issubclass(dtype.type, flexible):
+ # handle these separately so they don't give garbage like str256
+ return "'%s'" % str(dtype)
+
+ typename = dtype.name
+ # quote typenames which can't be represented as python variable names
+ if typename and not (typename[0].isalpha() and typename.isalnum()):
+ typename = repr(typename)
+
+ return typename
+
+
+def _array_repr_implementation(
+ arr, max_line_width=None, precision=None, suppress_small=None,
+ array2string=array2string):
+ """Internal version of array_repr() that allows overriding array2string."""
+ if max_line_width is None:
+ max_line_width = _format_options['linewidth']
+
+ if type(arr) is not ndarray:
+ class_name = type(arr).__name__
+ else:
+ class_name = "array"
+
+ skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0
+
+ prefix = class_name + "("
+ suffix = ")" if skipdtype else ","
+
+ if (_format_options['legacy'] <= 113 and
+ arr.shape == () and not arr.dtype.names):
+ lst = repr(arr.item())
+ elif arr.size > 0 or arr.shape == (0,):
+ lst = array2string(arr, max_line_width, precision, suppress_small,
+ ', ', prefix, suffix=suffix)
+ else: # show zero-length shape unless it is (0,)
+ lst = "[], shape=%s" % (repr(arr.shape),)
+
+ arr_str = prefix + lst + suffix
+
+ if skipdtype:
+ return arr_str
+
+ dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype))
+
+ # compute whether we should put dtype on a new line: Do so if adding the
+ # dtype would extend the last line past max_line_width.
+ # Note: This line gives the correct result even when rfind returns -1.
+ last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)
+ spacer = " "
+ if _format_options['legacy'] <= 113:
+ if issubclass(arr.dtype.type, flexible):
+ spacer = '\n' + ' '*len(class_name + "(")
+ elif last_line_len + len(dtype_str) + 1 > max_line_width:
+ spacer = '\n' + ' '*len(class_name + "(")
+
+ return arr_str + spacer + dtype_str
+
+
+def _array_repr_dispatcher(
+ arr, max_line_width=None, precision=None, suppress_small=None):
+ return (arr,)
+
+
+@array_function_dispatch(_array_repr_dispatcher, module='numpy')
+def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return the string representation of an array.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array.
+ max_line_width : int, optional
+ Inserts newlines if text is longer than `max_line_width`.
+ Defaults to ``numpy.get_printoptions()['linewidth']``.
+ precision : int, optional
+ Floating point precision.
+ Defaults to ``numpy.get_printoptions()['precision']``.
+ suppress_small : bool, optional
+ Represent numbers "very close" to zero as zero; default is False.
+ Very close is defined by precision: if the precision is 8, e.g.,
+ numbers smaller (in absolute value) than 5e-9 are represented as
+ zero.
+ Defaults to ``numpy.get_printoptions()['suppress']``.
+
+ Returns
+ -------
+ string : str
+ The string representation of an array.
+
+ See Also
+ --------
+ array_str, array2string, set_printoptions
+
+ Examples
+ --------
+ >>> np.array_repr(np.array([1,2]))
+ 'array([1, 2])'
+ >>> np.array_repr(np.ma.array([0.]))
+ 'MaskedArray([0.])'
+ >>> np.array_repr(np.array([], np.int32))
+ 'array([], dtype=int32)'
+
+ >>> x = np.array([1e-6, 4e-7, 2, 3])
+ >>> np.array_repr(x, precision=6, suppress_small=True)
+ 'array([0.000001, 0. , 2. , 3. ])'
+
+ """
+ return _array_repr_implementation(
+ arr, max_line_width, precision, suppress_small)
+
+
+@_recursive_guard()
+def _guarded_repr_or_str(v):
+ if isinstance(v, bytes):
+ return repr(v)
+ return str(v)
+
+
+def _array_str_implementation(
+ a, max_line_width=None, precision=None, suppress_small=None,
+ array2string=array2string):
+ """Internal version of array_str() that allows overriding array2string."""
+ if (_format_options['legacy'] <= 113 and
+ a.shape == () and not a.dtype.names):
+ return str(a.item())
+
+ # the str of 0d arrays is a special case: It should appear like a scalar,
+ # so floats are not truncated by `precision`, and strings are not wrapped
+ # in quotes. So we return the str of the scalar value.
+ if a.shape == ():
+ # obtain a scalar and call str on it, avoiding problems for subclasses
+ # for which indexing with () returns a 0d instead of a scalar by using
+ # ndarray's getindex. Also guard against recursive 0d object arrays.
+ return _guarded_repr_or_str(np.ndarray.__getitem__(a, ()))
+
+ return array2string(a, max_line_width, precision, suppress_small, ' ', "")
+
+
+def _array_str_dispatcher(
+ a, max_line_width=None, precision=None, suppress_small=None):
+ return (a,)
+
+
+@array_function_dispatch(_array_str_dispatcher, module='numpy')
+def array_str(a, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return a string representation of the data in an array.
+
+ The data in the array is returned as a single string. This function is
+ similar to `array_repr`, the difference being that `array_repr` also
+ returns information on the kind of array and its data type.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array.
+ max_line_width : int, optional
+ Inserts newlines if text is longer than `max_line_width`.
+ Defaults to ``numpy.get_printoptions()['linewidth']``.
+ precision : int, optional
+ Floating point precision.
+ Defaults to ``numpy.get_printoptions()['precision']``.
+ suppress_small : bool, optional
+ Represent numbers "very close" to zero as zero; default is False.
+ Very close is defined by precision: if the precision is 8, e.g.,
+ numbers smaller (in absolute value) than 5e-9 are represented as
+ zero.
+ Defaults to ``numpy.get_printoptions()['suppress']``.
+
+ See Also
+ --------
+ array2string, array_repr, set_printoptions
+
+ Examples
+ --------
+ >>> np.array_str(np.arange(3))
+ '[0 1 2]'
+
+ """
+ return _array_str_implementation(
+ a, max_line_width, precision, suppress_small)
+
+
+# needed if __array_function__ is disabled
+_array2string_impl = getattr(array2string, '__wrapped__', array2string)
+_default_array_str = functools.partial(_array_str_implementation,
+ array2string=_array2string_impl)
+_default_array_repr = functools.partial(_array_repr_implementation,
+ array2string=_array2string_impl)
+
+
+def set_string_function(f, repr=True):
+ """
+ Set a Python function to be used when pretty printing arrays.
+
+ Parameters
+ ----------
+ f : function or None
+ Function to be used to pretty print arrays. The function should expect
+ a single array argument and return a string of the representation of
+ the array. If None, the function is reset to the default NumPy function
+ to print arrays.
+ repr : bool, optional
+ If True (default), the function for pretty printing (``__repr__``)
+ is set, if False the function that returns the default string
+ representation (``__str__``) is set.
+
+ See Also
+ --------
+ set_printoptions, get_printoptions
+
+ Examples
+ --------
+ >>> def pprint(arr):
+ ... return 'HA! - What are you going to do now?'
+ ...
+ >>> np.set_string_function(pprint)
+ >>> a = np.arange(10)
+ >>> a
+ HA! - What are you going to do now?
+ >>> _ = a
+ >>> # [0 1 2 3 4 5 6 7 8 9]
+
+ We can reset the function to the default:
+
+ >>> np.set_string_function(None)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ `repr` affects either pretty printing or normal string representation.
+ Note that ``__repr__`` is still affected by setting ``__str__``
+ because the width of each array element in the returned string becomes
+ equal to the length of the result of ``__str__()``.
+
+ >>> x = np.arange(4)
+ >>> np.set_string_function(lambda x:'random', repr=False)
+ >>> x.__str__()
+ 'random'
+ >>> x.__repr__()
+ 'array([0, 1, 2, 3])'
+
+ """
+ if f is None:
+ if repr:
+ return multiarray.set_string_function(_default_array_repr, 1)
+ else:
+ return multiarray.set_string_function(_default_array_str, 0)
+ else:
+ return multiarray.set_string_function(f, repr)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/arrayprint.pyi b/venv/lib/python3.9/site-packages/numpy/core/arrayprint.pyi
new file mode 100644
index 00000000..d8255387
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/arrayprint.pyi
@@ -0,0 +1,142 @@
+from types import TracebackType
+from collections.abc import Callable
+from typing import Any, Literal, TypedDict, SupportsIndex
+
+# Using a private class is by no means ideal, but it is simply a consequence
+# of a `contextlib.context` returning an instance of aforementioned class
+from contextlib import _GeneratorContextManager
+
+from numpy import (
+ ndarray,
+ generic,
+ bool_,
+ integer,
+ timedelta64,
+ datetime64,
+ floating,
+ complexfloating,
+ void,
+ str_,
+ bytes_,
+ longdouble,
+ clongdouble,
+)
+from numpy._typing import ArrayLike, _CharLike_co, _FloatLike_co
+
+_FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"]
+
+class _FormatDict(TypedDict, total=False):
+ bool: Callable[[bool_], str]
+ int: Callable[[integer[Any]], str]
+ timedelta: Callable[[timedelta64], str]
+ datetime: Callable[[datetime64], str]
+ float: Callable[[floating[Any]], str]
+ longfloat: Callable[[longdouble], str]
+ complexfloat: Callable[[complexfloating[Any, Any]], str]
+ longcomplexfloat: Callable[[clongdouble], str]
+ void: Callable[[void], str]
+ numpystr: Callable[[_CharLike_co], str]
+ object: Callable[[object], str]
+ all: Callable[[object], str]
+ int_kind: Callable[[integer[Any]], str]
+ float_kind: Callable[[floating[Any]], str]
+ complex_kind: Callable[[complexfloating[Any, Any]], str]
+ str_kind: Callable[[_CharLike_co], str]
+
+class _FormatOptions(TypedDict):
+ precision: int
+ threshold: int
+ edgeitems: int
+ linewidth: int
+ suppress: bool
+ nanstr: str
+ infstr: str
+ formatter: None | _FormatDict
+ sign: Literal["-", "+", " "]
+ floatmode: _FloatMode
+ legacy: Literal[False, "1.13", "1.21"]
+
+def set_printoptions(
+ precision: None | SupportsIndex = ...,
+ threshold: None | int = ...,
+ edgeitems: None | int = ...,
+ linewidth: None | int = ...,
+ suppress: None | bool = ...,
+ nanstr: None | str = ...,
+ infstr: None | str = ...,
+ formatter: None | _FormatDict = ...,
+ sign: Literal[None, "-", "+", " "] = ...,
+ floatmode: None | _FloatMode = ...,
+ *,
+ legacy: Literal[None, False, "1.13", "1.21"] = ...
+) -> None: ...
+def get_printoptions() -> _FormatOptions: ...
+def array2string(
+ a: ndarray[Any, Any],
+ max_line_width: None | int = ...,
+ precision: None | SupportsIndex = ...,
+ suppress_small: None | bool = ...,
+ separator: str = ...,
+ prefix: str = ...,
+ # NOTE: With the `style` argument being deprecated,
+ # all arguments between `formatter` and `suffix` are de facto
+ # keyworld-only arguments
+ *,
+ formatter: None | _FormatDict = ...,
+ threshold: None | int = ...,
+ edgeitems: None | int = ...,
+ sign: Literal[None, "-", "+", " "] = ...,
+ floatmode: None | _FloatMode = ...,
+ suffix: str = ...,
+ legacy: Literal[None, False, "1.13", "1.21"] = ...,
+) -> str: ...
+def format_float_scientific(
+ x: _FloatLike_co,
+ precision: None | int = ...,
+ unique: bool = ...,
+ trim: Literal["k", ".", "0", "-"] = ...,
+ sign: bool = ...,
+ pad_left: None | int = ...,
+ exp_digits: None | int = ...,
+ min_digits: None | int = ...,
+) -> str: ...
+def format_float_positional(
+ x: _FloatLike_co,
+ precision: None | int = ...,
+ unique: bool = ...,
+ fractional: bool = ...,
+ trim: Literal["k", ".", "0", "-"] = ...,
+ sign: bool = ...,
+ pad_left: None | int = ...,
+ pad_right: None | int = ...,
+ min_digits: None | int = ...,
+) -> str: ...
+def array_repr(
+ arr: ndarray[Any, Any],
+ max_line_width: None | int = ...,
+ precision: None | SupportsIndex = ...,
+ suppress_small: None | bool = ...,
+) -> str: ...
+def array_str(
+ a: ndarray[Any, Any],
+ max_line_width: None | int = ...,
+ precision: None | SupportsIndex = ...,
+ suppress_small: None | bool = ...,
+) -> str: ...
+def set_string_function(
+ f: None | Callable[[ndarray[Any, Any]], str], repr: bool = ...
+) -> None: ...
+def printoptions(
+ precision: None | SupportsIndex = ...,
+ threshold: None | int = ...,
+ edgeitems: None | int = ...,
+ linewidth: None | int = ...,
+ suppress: None | bool = ...,
+ nanstr: None | str = ...,
+ infstr: None | str = ...,
+ formatter: None | _FormatDict = ...,
+ sign: Literal[None, "-", "+", " "] = ...,
+ floatmode: None | _FloatMode = ...,
+ *,
+ legacy: Literal[None, False, "1.13", "1.21"] = ...
+) -> _GeneratorContextManager[_FormatOptions]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/core/cversions.py b/venv/lib/python3.9/site-packages/numpy/core/cversions.py
new file mode 100644
index 00000000..00159c3a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/cversions.py
@@ -0,0 +1,13 @@
+"""Simple script to compute the api hash of the current API.
+
+The API has is defined by numpy_api_order and ufunc_api_order.
+
+"""
+from os.path import dirname
+
+from code_generators.genapi import fullapi_hash
+from code_generators.numpy_api import full_api
+
+if __name__ == '__main__':
+ curdir = dirname(__file__)
+ print(fullapi_hash(full_api))
diff --git a/venv/lib/python3.9/site-packages/numpy/core/defchararray.py b/venv/lib/python3.9/site-packages/numpy/core/defchararray.py
new file mode 100644
index 00000000..6750e497
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/defchararray.py
@@ -0,0 +1,2900 @@
+"""
+This module contains a set of functions for vectorized string
+operations and methods.
+
+.. note::
+ The `chararray` class exists for backwards compatibility with
+ Numarray, it is not recommended for new development. Starting from numpy
+ 1.4, if one needs arrays of strings, it is recommended to use arrays of
+ `dtype` `object_`, `string_` or `unicode_`, and use the free functions
+ in the `numpy.char` module for fast vectorized string operations.
+
+Some methods will only be available if the corresponding string method is
+available in your version of Python.
+
+The preferred alias for `defchararray` is `numpy.char`.
+
+"""
+import functools
+from .numerictypes import (
+ string_, unicode_, integer, int_, object_, bool_, character)
+from .numeric import ndarray, compare_chararrays
+from .numeric import array as narray
+from numpy.core.multiarray import _vec_string
+from numpy.core.overrides import set_module
+from numpy.core import overrides
+from numpy.compat import asbytes
+import numpy
+
+__all__ = [
+ 'equal', 'not_equal', 'greater_equal', 'less_equal',
+ 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
+ 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
+ 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
+ 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
+ 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
+ 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
+ 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
+ 'array', 'asarray'
+ ]
+
+
+_globalvar = 0
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy.char')
+
+
+def _use_unicode(*args):
+ """
+ Helper function for determining the output type of some string
+ operations.
+
+ For an operation on two ndarrays, if at least one is unicode, the
+ result should be unicode.
+ """
+ for x in args:
+ if (isinstance(x, str) or
+ issubclass(numpy.asarray(x).dtype.type, unicode_)):
+ return unicode_
+ return string_
+
+def _to_string_or_unicode_array(result):
+ """
+ Helper function to cast a result back into a string or unicode array
+ if an object array must be used as an intermediary.
+ """
+ return numpy.asarray(result.tolist())
+
+def _clean_args(*args):
+ """
+ Helper function for delegating arguments to Python string
+ functions.
+
+ Many of the Python string operations that have optional arguments
+ do not use 'None' to indicate a default value. In these cases,
+ we need to remove all None arguments, and those following them.
+ """
+ newargs = []
+ for chk in args:
+ if chk is None:
+ break
+ newargs.append(chk)
+ return newargs
+
+def _get_num_chars(a):
+ """
+ Helper function that returns the number of characters per field in
+ a string or unicode array. This is to abstract out the fact that
+ for a unicode array this is itemsize / 4.
+ """
+ if issubclass(a.dtype.type, unicode_):
+ return a.itemsize // 4
+ return a.itemsize
+
+
+def _binary_op_dispatcher(x1, x2):
+ return (x1, x2)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def equal(x1, x2):
+ """
+ Return (x1 == x2) element-wise.
+
+ Unlike `numpy.equal`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools.
+
+ See Also
+ --------
+ not_equal, greater_equal, less_equal, greater, less
+ """
+ return compare_chararrays(x1, x2, '==', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def not_equal(x1, x2):
+ """
+ Return (x1 != x2) element-wise.
+
+ Unlike `numpy.not_equal`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools.
+
+ See Also
+ --------
+ equal, greater_equal, less_equal, greater, less
+ """
+ return compare_chararrays(x1, x2, '!=', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def greater_equal(x1, x2):
+ """
+ Return (x1 >= x2) element-wise.
+
+ Unlike `numpy.greater_equal`, this comparison is performed by
+ first stripping whitespace characters from the end of the string.
+ This behavior is provided for backward-compatibility with
+ numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools.
+
+ See Also
+ --------
+ equal, not_equal, less_equal, greater, less
+ """
+ return compare_chararrays(x1, x2, '>=', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def less_equal(x1, x2):
+ """
+ Return (x1 <= x2) element-wise.
+
+ Unlike `numpy.less_equal`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools.
+
+ See Also
+ --------
+ equal, not_equal, greater_equal, greater, less
+ """
+ return compare_chararrays(x1, x2, '<=', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def greater(x1, x2):
+ """
+ Return (x1 > x2) element-wise.
+
+ Unlike `numpy.greater`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools.
+
+ See Also
+ --------
+ equal, not_equal, greater_equal, less_equal, less
+ """
+ return compare_chararrays(x1, x2, '>', True)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def less(x1, x2):
+ """
+ Return (x1 < x2) element-wise.
+
+ Unlike `numpy.greater`, this comparison is performed by first
+ stripping whitespace characters from the end of the string. This
+ behavior is provided for backward-compatibility with numarray.
+
+ Parameters
+ ----------
+ x1, x2 : array_like of str or unicode
+ Input arrays of the same shape.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools.
+
+ See Also
+ --------
+ equal, not_equal, greater_equal, less_equal, greater
+ """
+ return compare_chararrays(x1, x2, '<', True)
+
+
+def _unary_op_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def str_len(a):
+ """
+ Return len(a) element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of integers
+
+ See Also
+ --------
+ builtins.len
+
+ Examples
+ --------
+ >>> a = np.array(['Grace Hopper Conference', 'Open Source Day'])
+ >>> np.char.str_len(a)
+ array([23, 15])
+ >>> a = np.array([u'\u0420', u'\u043e'])
+ >>> np.char.str_len(a)
+ array([1, 1])
+ >>> a = np.array([['hello', 'world'], [u'\u0420', u'\u043e']])
+ >>> np.char.str_len(a)
+ array([[5, 5], [1, 1]])
+ """
+ # Note: __len__, etc. currently return ints, which are not C-integers.
+ # Generally intp would be expected for lengths, although int is sufficient
+ # due to the dtype itemsize limitation.
+ return _vec_string(a, int_, '__len__')
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def add(x1, x2):
+ """
+ Return element-wise string concatenation for two arrays of str or unicode.
+
+ Arrays `x1` and `x2` must have the same shape.
+
+ Parameters
+ ----------
+ x1 : array_like of str or unicode
+ Input array.
+ x2 : array_like of str or unicode
+ Input array.
+
+ Returns
+ -------
+ add : ndarray
+ Output array of `string_` or `unicode_`, depending on input types
+ of the same shape as `x1` and `x2`.
+
+ """
+ arr1 = numpy.asarray(x1)
+ arr2 = numpy.asarray(x2)
+ out_size = _get_num_chars(arr1) + _get_num_chars(arr2)
+ dtype = _use_unicode(arr1, arr2)
+ return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,))
+
+
+def _multiply_dispatcher(a, i):
+ return (a,)
+
+
+@array_function_dispatch(_multiply_dispatcher)
+def multiply(a, i):
+ """
+ Return (a * i), that is string multiple concatenation,
+ element-wise.
+
+ Values in `i` of less than 0 are treated as 0 (which yields an
+ empty string).
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ i : array_like of ints
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input types
+
+ Examples
+ --------
+ >>> a = np.array(["a", "b", "c"])
+ >>> np.char.multiply(x, 3)
+ array(['aaa', 'bbb', 'ccc'], dtype='<U3')
+ >>> i = np.array([1, 2, 3])
+ >>> np.char.multiply(a, i)
+ array(['a', 'bb', 'ccc'], dtype='<U3')
+ >>> np.char.multiply(np.array(['a']), i)
+ array(['a', 'aa', 'aaa'], dtype='<U3')
+ >>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3))
+ >>> np.char.multiply(a, 3)
+ array([['aaa', 'bbb', 'ccc'],
+ ['ddd', 'eee', 'fff']], dtype='<U3')
+ >>> np.char.multiply(a, i)
+ array([['a', 'bb', 'ccc'],
+ ['d', 'ee', 'fff']], dtype='<U3')
+ """
+ a_arr = numpy.asarray(a)
+ i_arr = numpy.asarray(i)
+ if not issubclass(i_arr.dtype.type, integer):
+ raise ValueError("Can only multiply by integers")
+ out_size = _get_num_chars(a_arr) * max(int(i_arr.max()), 0)
+ return _vec_string(
+ a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,))
+
+
+def _mod_dispatcher(a, values):
+ return (a, values)
+
+
+@array_function_dispatch(_mod_dispatcher)
+def mod(a, values):
+ """
+ Return (a % i), that is pre-Python 2.6 string formatting
+ (interpolation), element-wise for a pair of array_likes of str
+ or unicode.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ values : array_like of values
+ These values will be element-wise interpolated into the string.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input types
+
+ See Also
+ --------
+ str.__mod__
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, '__mod__', (values,)))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def capitalize(a):
+ """
+ Return a copy of `a` with only the first character of each element
+ capitalized.
+
+ Calls `str.capitalize` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+ Input array of strings to capitalize.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input
+ types
+
+ See Also
+ --------
+ str.capitalize
+
+ Examples
+ --------
+ >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c
+ array(['a1b2', '1b2a', 'b2a1', '2a1b'],
+ dtype='|S4')
+ >>> np.char.capitalize(c)
+ array(['A1b2', '1b2a', 'B2a1', '2a1b'],
+ dtype='|S4')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'capitalize')
+
+
+def _center_dispatcher(a, width, fillchar=None):
+ return (a,)
+
+
+@array_function_dispatch(_center_dispatcher)
+def center(a, width, fillchar=' '):
+ """
+ Return a copy of `a` with its elements centered in a string of
+ length `width`.
+
+ Calls `str.center` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ width : int
+ The length of the resulting strings
+ fillchar : str or unicode, optional
+ The padding character to use (default is space).
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input
+ types
+
+ See Also
+ --------
+ str.center
+
+ Notes
+ -----
+ This function is intended to work with arrays of strings. The
+ fill character is not applied to numeric types.
+
+ Examples
+ --------
+ >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c
+ array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='<U4')
+ >>> np.char.center(c, width=9)
+ array([' a1b2 ', ' 1b2a ', ' b2a1 ', ' 2a1b '], dtype='<U9')
+ >>> np.char.center(c, width=9, fillchar='*')
+ array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='<U9')
+ >>> np.char.center(c, width=1)
+ array(['a', '1', 'b', '2'], dtype='<U1')
+
+ """
+ a_arr = numpy.asarray(a)
+ width_arr = numpy.asarray(width)
+ size = int(numpy.max(width_arr.flat))
+ if numpy.issubdtype(a_arr.dtype, numpy.string_):
+ fillchar = asbytes(fillchar)
+ return _vec_string(
+ a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar))
+
+
+def _count_dispatcher(a, sub, start=None, end=None):
+ return (a,)
+
+
+@array_function_dispatch(_count_dispatcher)
+def count(a, sub, start=0, end=None):
+ """
+ Returns an array with the number of non-overlapping occurrences of
+ substring `sub` in the range [`start`, `end`].
+
+ Calls `str.count` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ sub : str or unicode
+ The substring to search for.
+
+ start, end : int, optional
+ Optional arguments `start` and `end` are interpreted as slice
+ notation to specify the range in which to count.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of ints.
+
+ See Also
+ --------
+ str.count
+
+ Examples
+ --------
+ >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
+ >>> c
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
+ >>> np.char.count(c, 'A')
+ array([3, 1, 1])
+ >>> np.char.count(c, 'aA')
+ array([3, 1, 0])
+ >>> np.char.count(c, 'A', start=1, end=4)
+ array([2, 1, 1])
+ >>> np.char.count(c, 'A', start=1, end=3)
+ array([1, 0, 0])
+
+ """
+ return _vec_string(a, int_, 'count', [sub, start] + _clean_args(end))
+
+
+def _code_dispatcher(a, encoding=None, errors=None):
+ return (a,)
+
+
+@array_function_dispatch(_code_dispatcher)
+def decode(a, encoding=None, errors=None):
+ r"""
+ Calls ``bytes.decode`` element-wise.
+
+ The set of available codecs comes from the Python standard library,
+ and may be extended at runtime. For more information, see the
+ :mod:`codecs` module.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ encoding : str, optional
+ The name of an encoding
+
+ errors : str, optional
+ Specifies how to handle encoding errors
+
+ Returns
+ -------
+ out : ndarray
+
+ See Also
+ --------
+ :py:meth:`bytes.decode`
+
+ Notes
+ -----
+ The type of the result will depend on the encoding specified.
+
+ Examples
+ --------
+ >>> c = np.array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@',
+ ... b'\x81\x82\xc2\xc1\xc2\x82\x81'])
+ >>> c
+ array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@',
+ ... b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7')
+ >>> np.char.decode(c, encoding='cp037')
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, 'decode', _clean_args(encoding, errors)))
+
+
+@array_function_dispatch(_code_dispatcher)
+def encode(a, encoding=None, errors=None):
+ """
+ Calls `str.encode` element-wise.
+
+ The set of available codecs comes from the Python standard library,
+ and may be extended at runtime. For more information, see the codecs
+ module.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ encoding : str, optional
+ The name of an encoding
+
+ errors : str, optional
+ Specifies how to handle encoding errors
+
+ Returns
+ -------
+ out : ndarray
+
+ See Also
+ --------
+ str.encode
+
+ Notes
+ -----
+ The type of the result will depend on the encoding specified.
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, 'encode', _clean_args(encoding, errors)))
+
+
+def _endswith_dispatcher(a, suffix, start=None, end=None):
+ return (a,)
+
+
+@array_function_dispatch(_endswith_dispatcher)
+def endswith(a, suffix, start=0, end=None):
+ """
+ Returns a boolean array which is `True` where the string element
+ in `a` ends with `suffix`, otherwise `False`.
+
+ Calls `str.endswith` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ suffix : str
+
+ start, end : int, optional
+ With optional `start`, test beginning at that position. With
+ optional `end`, stop comparing at that position.
+
+ Returns
+ -------
+ out : ndarray
+ Outputs an array of bools.
+
+ See Also
+ --------
+ str.endswith
+
+ Examples
+ --------
+ >>> s = np.array(['foo', 'bar'])
+ >>> s[0] = 'foo'
+ >>> s[1] = 'bar'
+ >>> s
+ array(['foo', 'bar'], dtype='<U3')
+ >>> np.char.endswith(s, 'ar')
+ array([False, True])
+ >>> np.char.endswith(s, 'a', start=1, end=2)
+ array([False, True])
+
+ """
+ return _vec_string(
+ a, bool_, 'endswith', [suffix, start] + _clean_args(end))
+
+
+def _expandtabs_dispatcher(a, tabsize=None):
+ return (a,)
+
+
+@array_function_dispatch(_expandtabs_dispatcher)
+def expandtabs(a, tabsize=8):
+ """
+ Return a copy of each string element where all tab characters are
+ replaced by one or more spaces.
+
+ Calls `str.expandtabs` element-wise.
+
+ Return a copy of each string element where all tab characters are
+ replaced by one or more spaces, depending on the current column
+ and the given `tabsize`. The column number is reset to zero after
+ each newline occurring in the string. This doesn't understand other
+ non-printing characters or escape sequences.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+ Input array
+ tabsize : int, optional
+ Replace tabs with `tabsize` number of spaces. If not given defaults
+ to 8 spaces.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.expandtabs
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, 'expandtabs', (tabsize,)))
+
+
+@array_function_dispatch(_count_dispatcher)
+def find(a, sub, start=0, end=None):
+ """
+ For each element, return the lowest index in the string where
+ substring `sub` is found.
+
+ Calls `str.find` element-wise.
+
+ For each element, return the lowest index in the string where
+ substring `sub` is found, such that `sub` is contained in the
+ range [`start`, `end`].
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ sub : str or unicode
+
+ start, end : int, optional
+ Optional arguments `start` and `end` are interpreted as in
+ slice notation.
+
+ Returns
+ -------
+ out : ndarray or int
+ Output array of ints. Returns -1 if `sub` is not found.
+
+ See Also
+ --------
+ str.find
+
+ Examples
+ --------
+ >>> a = np.array(["NumPy is a Python library"])
+ >>> np.char.find(a, "Python", start=0, end=None)
+ array([11])
+
+ """
+ return _vec_string(
+ a, int_, 'find', [sub, start] + _clean_args(end))
+
+
+@array_function_dispatch(_count_dispatcher)
+def index(a, sub, start=0, end=None):
+ """
+ Like `find`, but raises `ValueError` when the substring is not found.
+
+ Calls `str.index` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ sub : str or unicode
+
+ start, end : int, optional
+
+ Returns
+ -------
+ out : ndarray
+ Output array of ints. Returns -1 if `sub` is not found.
+
+ See Also
+ --------
+ find, str.find
+
+ Examples
+ --------
+ >>> a = np.array(["Computer Science"])
+ >>> np.char.index(a, "Science", start=0, end=None)
+ array([9])
+
+ """
+ return _vec_string(
+ a, int_, 'index', [sub, start] + _clean_args(end))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isalnum(a):
+ """
+ Returns true for each element if all characters in the string are
+ alphanumeric and there is at least one character, false otherwise.
+
+ Calls `str.isalnum` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.isalnum
+ """
+ return _vec_string(a, bool_, 'isalnum')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isalpha(a):
+ """
+ Returns true for each element if all characters in the string are
+ alphabetic and there is at least one character, false otherwise.
+
+ Calls `str.isalpha` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools
+
+ See Also
+ --------
+ str.isalpha
+ """
+ return _vec_string(a, bool_, 'isalpha')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isdigit(a):
+ """
+ Returns true for each element if all characters in the string are
+ digits and there is at least one character, false otherwise.
+
+ Calls `str.isdigit` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools
+
+ See Also
+ --------
+ str.isdigit
+
+ Examples
+ --------
+ >>> a = np.array(['a', 'b', '0'])
+ >>> np.char.isdigit(a)
+ array([False, False, True])
+ >>> a = np.array([['a', 'b', '0'], ['c', '1', '2']])
+ >>> np.char.isdigit(a)
+ array([[False, False, True], [False, True, True]])
+ """
+ return _vec_string(a, bool_, 'isdigit')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def islower(a):
+ """
+ Returns true for each element if all cased characters in the
+ string are lowercase and there is at least one cased character,
+ false otherwise.
+
+ Calls `str.islower` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools
+
+ See Also
+ --------
+ str.islower
+ """
+ return _vec_string(a, bool_, 'islower')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isspace(a):
+ """
+ Returns true for each element if there are only whitespace
+ characters in the string and there is at least one character,
+ false otherwise.
+
+ Calls `str.isspace` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools
+
+ See Also
+ --------
+ str.isspace
+ """
+ return _vec_string(a, bool_, 'isspace')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def istitle(a):
+ """
+ Returns true for each element if the element is a titlecased
+ string and there is at least one character, false otherwise.
+
+ Call `str.istitle` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools
+
+ See Also
+ --------
+ str.istitle
+ """
+ return _vec_string(a, bool_, 'istitle')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isupper(a):
+ """
+ Return true for each element if all cased characters in the
+ string are uppercase and there is at least one character, false
+ otherwise.
+
+ Call `str.isupper` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of bools
+
+ See Also
+ --------
+ str.isupper
+
+ Examples
+ --------
+ >>> str = "GHC"
+ >>> np.char.isupper(str)
+ array(True)
+ >>> a = np.array(["hello", "HELLO", "Hello"])
+ >>> np.char.isupper(a)
+ array([False, True, False])
+
+ """
+ return _vec_string(a, bool_, 'isupper')
+
+
+def _join_dispatcher(sep, seq):
+ return (sep, seq)
+
+
+@array_function_dispatch(_join_dispatcher)
+def join(sep, seq):
+ """
+ Return a string which is the concatenation of the strings in the
+ sequence `seq`.
+
+ Calls `str.join` element-wise.
+
+ Parameters
+ ----------
+ sep : array_like of str or unicode
+ seq : array_like of str or unicode
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input types
+
+ See Also
+ --------
+ str.join
+
+ Examples
+ --------
+ >>> np.char.join('-', 'osd')
+ array('o-s-d', dtype='<U5')
+
+ >>> np.char.join(['-', '.'], ['ghc', 'osd'])
+ array(['g-h-c', 'o.s.d'], dtype='<U5')
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(sep, object_, 'join', (seq,)))
+
+
+
+def _just_dispatcher(a, width, fillchar=None):
+ return (a,)
+
+
+@array_function_dispatch(_just_dispatcher)
+def ljust(a, width, fillchar=' '):
+ """
+ Return an array with the elements of `a` left-justified in a
+ string of length `width`.
+
+ Calls `str.ljust` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ width : int
+ The length of the resulting strings
+ fillchar : str or unicode, optional
+ The character to use for padding
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.ljust
+
+ """
+ a_arr = numpy.asarray(a)
+ width_arr = numpy.asarray(width)
+ size = int(numpy.max(width_arr.flat))
+ if numpy.issubdtype(a_arr.dtype, numpy.string_):
+ fillchar = asbytes(fillchar)
+ return _vec_string(
+ a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def lower(a):
+ """
+ Return an array with the elements converted to lowercase.
+
+ Call `str.lower` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like, {str, unicode}
+ Input array.
+
+ Returns
+ -------
+ out : ndarray, {str, unicode}
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.lower
+
+ Examples
+ --------
+ >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c
+ array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')
+ >>> np.char.lower(c)
+ array(['a1b c', '1bca', 'bca1'], dtype='<U5')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'lower')
+
+
+def _strip_dispatcher(a, chars=None):
+ return (a,)
+
+
+@array_function_dispatch(_strip_dispatcher)
+def lstrip(a, chars=None):
+ """
+ For each element in `a`, return a copy with the leading characters
+ removed.
+
+ Calls `str.lstrip` element-wise.
+
+ Parameters
+ ----------
+ a : array-like, {str, unicode}
+ Input array.
+
+ chars : {str, unicode}, optional
+ The `chars` argument is a string specifying the set of
+ characters to be removed. If omitted or None, the `chars`
+ argument defaults to removing whitespace. The `chars` argument
+ is not a prefix; rather, all combinations of its values are
+ stripped.
+
+ Returns
+ -------
+ out : ndarray, {str, unicode}
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.lstrip
+
+ Examples
+ --------
+ >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
+ >>> c
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
+
+ The 'a' variable is unstripped from c[1] because whitespace leading.
+
+ >>> np.char.lstrip(c, 'a')
+ array(['AaAaA', ' aA ', 'bBABba'], dtype='<U7')
+
+
+ >>> np.char.lstrip(c, 'A') # leaves c unchanged
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
+ >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all()
+ ... # XXX: is this a regression? This used to return True
+ ... # np.char.lstrip(c,'') does not modify c at all.
+ False
+ >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all()
+ True
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))
+
+
+def _partition_dispatcher(a, sep):
+ return (a,)
+
+
+@array_function_dispatch(_partition_dispatcher)
+def partition(a, sep):
+ """
+ Partition each element in `a` around `sep`.
+
+ Calls `str.partition` element-wise.
+
+ For each element in `a`, split the element as the first
+ occurrence of `sep`, and return 3 strings containing the part
+ before the separator, the separator itself, and the part after
+ the separator. If the separator is not found, return 3 strings
+ containing the string itself, followed by two empty strings.
+
+ Parameters
+ ----------
+ a : array_like, {str, unicode}
+ Input array
+ sep : {str, unicode}
+ Separator to split each string element in `a`.
+
+ Returns
+ -------
+ out : ndarray, {str, unicode}
+ Output array of str or unicode, depending on input type.
+ The output array will have an extra dimension with 3
+ elements per input element.
+
+ See Also
+ --------
+ str.partition
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, 'partition', (sep,)))
+
+
+def _replace_dispatcher(a, old, new, count=None):
+ return (a,)
+
+
+@array_function_dispatch(_replace_dispatcher)
+def replace(a, old, new, count=None):
+ """
+ For each element in `a`, return a copy of the string with all
+ occurrences of substring `old` replaced by `new`.
+
+ Calls `str.replace` element-wise.
+
+ Parameters
+ ----------
+ a : array-like of str or unicode
+
+ old, new : str or unicode
+
+ count : int, optional
+ If the optional argument `count` is given, only the first
+ `count` occurrences are replaced.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.replace
+
+ Examples
+ --------
+ >>> a = np.array(["That is a mango", "Monkeys eat mangos"])
+ >>> np.char.replace(a, 'mango', 'banana')
+ array(['That is a banana', 'Monkeys eat bananas'], dtype='<U19')
+
+ >>> a = np.array(["The dish is fresh", "This is it"])
+ >>> np.char.replace(a, 'is', 'was')
+ array(['The dwash was fresh', 'Thwas was it'], dtype='<U19')
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(
+ a, object_, 'replace', [old, new] + _clean_args(count)))
+
+
+@array_function_dispatch(_count_dispatcher)
+def rfind(a, sub, start=0, end=None):
+ """
+ For each element in `a`, return the highest index in the string
+ where substring `sub` is found, such that `sub` is contained
+ within [`start`, `end`].
+
+ Calls `str.rfind` element-wise.
+
+ Parameters
+ ----------
+ a : array-like of str or unicode
+
+ sub : str or unicode
+
+ start, end : int, optional
+ Optional arguments `start` and `end` are interpreted as in
+ slice notation.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of ints. Return -1 on failure.
+
+ See Also
+ --------
+ str.rfind
+
+ """
+ return _vec_string(
+ a, int_, 'rfind', [sub, start] + _clean_args(end))
+
+
+@array_function_dispatch(_count_dispatcher)
+def rindex(a, sub, start=0, end=None):
+ """
+ Like `rfind`, but raises `ValueError` when the substring `sub` is
+ not found.
+
+ Calls `str.rindex` element-wise.
+
+ Parameters
+ ----------
+ a : array-like of str or unicode
+
+ sub : str or unicode
+
+ start, end : int, optional
+
+ Returns
+ -------
+ out : ndarray
+ Output array of ints.
+
+ See Also
+ --------
+ rfind, str.rindex
+
+ """
+ return _vec_string(
+ a, int_, 'rindex', [sub, start] + _clean_args(end))
+
+
+@array_function_dispatch(_just_dispatcher)
+def rjust(a, width, fillchar=' '):
+ """
+ Return an array with the elements of `a` right-justified in a
+ string of length `width`.
+
+ Calls `str.rjust` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ width : int
+ The length of the resulting strings
+ fillchar : str or unicode, optional
+ The character to use for padding
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.rjust
+
+ """
+ a_arr = numpy.asarray(a)
+ width_arr = numpy.asarray(width)
+ size = int(numpy.max(width_arr.flat))
+ if numpy.issubdtype(a_arr.dtype, numpy.string_):
+ fillchar = asbytes(fillchar)
+ return _vec_string(
+ a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar))
+
+
+@array_function_dispatch(_partition_dispatcher)
+def rpartition(a, sep):
+ """
+ Partition (split) each element around the right-most separator.
+
+ Calls `str.rpartition` element-wise.
+
+ For each element in `a`, split the element as the last
+ occurrence of `sep`, and return 3 strings containing the part
+ before the separator, the separator itself, and the part after
+ the separator. If the separator is not found, return 3 strings
+ containing the string itself, followed by two empty strings.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+ Input array
+ sep : str or unicode
+ Right-most separator to split each element in array.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of string or unicode, depending on input
+ type. The output array will have an extra dimension with
+ 3 elements per input element.
+
+ See Also
+ --------
+ str.rpartition
+
+ """
+ return _to_string_or_unicode_array(
+ _vec_string(a, object_, 'rpartition', (sep,)))
+
+
+def _split_dispatcher(a, sep=None, maxsplit=None):
+ return (a,)
+
+
+@array_function_dispatch(_split_dispatcher)
+def rsplit(a, sep=None, maxsplit=None):
+ """
+ For each element in `a`, return a list of the words in the
+ string, using `sep` as the delimiter string.
+
+ Calls `str.rsplit` element-wise.
+
+ Except for splitting from the right, `rsplit`
+ behaves like `split`.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ sep : str or unicode, optional
+ If `sep` is not specified or None, any whitespace string
+ is a separator.
+ maxsplit : int, optional
+ If `maxsplit` is given, at most `maxsplit` splits are done,
+ the rightmost ones.
+
+ Returns
+ -------
+ out : ndarray
+ Array of list objects
+
+ See Also
+ --------
+ str.rsplit, split
+
+ """
+ # This will return an array of lists of different sizes, so we
+ # leave it as an object array
+ return _vec_string(
+ a, object_, 'rsplit', [sep] + _clean_args(maxsplit))
+
+
+def _strip_dispatcher(a, chars=None):
+ return (a,)
+
+
+@array_function_dispatch(_strip_dispatcher)
+def rstrip(a, chars=None):
+ """
+ For each element in `a`, return a copy with the trailing
+ characters removed.
+
+ Calls `str.rstrip` element-wise.
+
+ Parameters
+ ----------
+ a : array-like of str or unicode
+
+ chars : str or unicode, optional
+ The `chars` argument is a string specifying the set of
+ characters to be removed. If omitted or None, the `chars`
+ argument defaults to removing whitespace. The `chars` argument
+ is not a suffix; rather, all combinations of its values are
+ stripped.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.rstrip
+
+ Examples
+ --------
+ >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c
+ array(['aAaAaA', 'abBABba'],
+ dtype='|S7')
+ >>> np.char.rstrip(c, b'a')
+ array(['aAaAaA', 'abBABb'],
+ dtype='|S7')
+ >>> np.char.rstrip(c, b'A')
+ array(['aAaAa', 'abBABba'],
+ dtype='|S7')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))
+
+
+@array_function_dispatch(_split_dispatcher)
+def split(a, sep=None, maxsplit=None):
+ """
+ For each element in `a`, return a list of the words in the
+ string, using `sep` as the delimiter string.
+
+ Calls `str.split` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ sep : str or unicode, optional
+ If `sep` is not specified or None, any whitespace string is a
+ separator.
+
+ maxsplit : int, optional
+ If `maxsplit` is given, at most `maxsplit` splits are done.
+
+ Returns
+ -------
+ out : ndarray
+ Array of list objects
+
+ See Also
+ --------
+ str.split, rsplit
+
+ """
+ # This will return an array of lists of different sizes, so we
+ # leave it as an object array
+ return _vec_string(
+ a, object_, 'split', [sep] + _clean_args(maxsplit))
+
+
+def _splitlines_dispatcher(a, keepends=None):
+ return (a,)
+
+
+@array_function_dispatch(_splitlines_dispatcher)
+def splitlines(a, keepends=None):
+ """
+ For each element in `a`, return a list of the lines in the
+ element, breaking at line boundaries.
+
+ Calls `str.splitlines` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ keepends : bool, optional
+ Line breaks are not included in the resulting list unless
+ keepends is given and true.
+
+ Returns
+ -------
+ out : ndarray
+ Array of list objects
+
+ See Also
+ --------
+ str.splitlines
+
+ """
+ return _vec_string(
+ a, object_, 'splitlines', _clean_args(keepends))
+
+
+def _startswith_dispatcher(a, prefix, start=None, end=None):
+ return (a,)
+
+
+@array_function_dispatch(_startswith_dispatcher)
+def startswith(a, prefix, start=0, end=None):
+ """
+ Returns a boolean array which is `True` where the string element
+ in `a` starts with `prefix`, otherwise `False`.
+
+ Calls `str.startswith` element-wise.
+
+ Parameters
+ ----------
+ a : array_like of str or unicode
+
+ prefix : str
+
+ start, end : int, optional
+ With optional `start`, test beginning at that position. With
+ optional `end`, stop comparing at that position.
+
+ Returns
+ -------
+ out : ndarray
+ Array of booleans
+
+ See Also
+ --------
+ str.startswith
+
+ """
+ return _vec_string(
+ a, bool_, 'startswith', [prefix, start] + _clean_args(end))
+
+
+@array_function_dispatch(_strip_dispatcher)
+def strip(a, chars=None):
+ """
+ For each element in `a`, return a copy with the leading and
+ trailing characters removed.
+
+ Calls `str.strip` element-wise.
+
+ Parameters
+ ----------
+ a : array-like of str or unicode
+
+ chars : str or unicode, optional
+ The `chars` argument is a string specifying the set of
+ characters to be removed. If omitted or None, the `chars`
+ argument defaults to removing whitespace. The `chars` argument
+ is not a prefix or suffix; rather, all combinations of its
+ values are stripped.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.strip
+
+ Examples
+ --------
+ >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
+ >>> c
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
+ >>> np.char.strip(c)
+ array(['aAaAaA', 'aA', 'abBABba'], dtype='<U7')
+ >>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads
+ array(['AaAaA', ' aA ', 'bBABb'], dtype='<U7')
+ >>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails
+ array(['aAaAa', ' aA ', 'abBABba'], dtype='<U7')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def swapcase(a):
+ """
+ Return element-wise a copy of the string with
+ uppercase characters converted to lowercase and vice versa.
+
+ Calls `str.swapcase` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like, {str, unicode}
+ Input array.
+
+ Returns
+ -------
+ out : ndarray, {str, unicode}
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.swapcase
+
+ Examples
+ --------
+ >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c
+ array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'],
+ dtype='|S5')
+ >>> np.char.swapcase(c)
+ array(['A1b C', '1B cA', 'B cA1', 'Ca1B'],
+ dtype='|S5')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'swapcase')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def title(a):
+ """
+ Return element-wise title cased version of string or unicode.
+
+ Title case words start with uppercase characters, all remaining cased
+ characters are lowercase.
+
+ Calls `str.title` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like, {str, unicode}
+ Input array.
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.title
+
+ Examples
+ --------
+ >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c
+ array(['a1b c', '1b ca', 'b ca1', 'ca1b'],
+ dtype='|S5')
+ >>> np.char.title(c)
+ array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'],
+ dtype='|S5')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'title')
+
+
+def _translate_dispatcher(a, table, deletechars=None):
+ return (a,)
+
+
+@array_function_dispatch(_translate_dispatcher)
+def translate(a, table, deletechars=None):
+ """
+ For each element in `a`, return a copy of the string where all
+ characters occurring in the optional argument `deletechars` are
+ removed, and the remaining characters have been mapped through the
+ given translation table.
+
+ Calls `str.translate` element-wise.
+
+ Parameters
+ ----------
+ a : array-like of str or unicode
+
+ table : str of length 256
+
+ deletechars : str
+
+ Returns
+ -------
+ out : ndarray
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.translate
+
+ """
+ a_arr = numpy.asarray(a)
+ if issubclass(a_arr.dtype.type, unicode_):
+ return _vec_string(
+ a_arr, a_arr.dtype, 'translate', (table,))
+ else:
+ return _vec_string(
+ a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def upper(a):
+ """
+ Return an array with the elements converted to uppercase.
+
+ Calls `str.upper` element-wise.
+
+ For 8-bit strings, this method is locale-dependent.
+
+ Parameters
+ ----------
+ a : array_like, {str, unicode}
+ Input array.
+
+ Returns
+ -------
+ out : ndarray, {str, unicode}
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.upper
+
+ Examples
+ --------
+ >>> c = np.array(['a1b c', '1bca', 'bca1']); c
+ array(['a1b c', '1bca', 'bca1'], dtype='<U5')
+ >>> np.char.upper(c)
+ array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')
+
+ """
+ a_arr = numpy.asarray(a)
+ return _vec_string(a_arr, a_arr.dtype, 'upper')
+
+
+def _zfill_dispatcher(a, width):
+ return (a,)
+
+
+@array_function_dispatch(_zfill_dispatcher)
+def zfill(a, width):
+ """
+ Return the numeric string left-filled with zeros
+
+ Calls `str.zfill` element-wise.
+
+ Parameters
+ ----------
+ a : array_like, {str, unicode}
+ Input array.
+ width : int
+ Width of string to left-fill elements in `a`.
+
+ Returns
+ -------
+ out : ndarray, {str, unicode}
+ Output array of str or unicode, depending on input type
+
+ See Also
+ --------
+ str.zfill
+
+ """
+ a_arr = numpy.asarray(a)
+ width_arr = numpy.asarray(width)
+ size = int(numpy.max(width_arr.flat))
+ return _vec_string(
+ a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,))
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isnumeric(a):
+ """
+ For each element, return True if there are only numeric
+ characters in the element.
+
+ Calls `unicode.isnumeric` element-wise.
+
+ Numeric characters include digit characters, and all characters
+ that have the Unicode numeric value property, e.g. ``U+2155,
+ VULGAR FRACTION ONE FIFTH``.
+
+ Parameters
+ ----------
+ a : array_like, unicode
+ Input array.
+
+ Returns
+ -------
+ out : ndarray, bool
+ Array of booleans of same shape as `a`.
+
+ See Also
+ --------
+ unicode.isnumeric
+
+ Examples
+ --------
+ >>> np.char.isnumeric(['123', '123abc', '9.0', '1/4', 'VIII'])
+ array([ True, False, False, False, False])
+
+ """
+ if _use_unicode(a) != unicode_:
+ raise TypeError("isnumeric is only available for Unicode strings and arrays")
+ return _vec_string(a, bool_, 'isnumeric')
+
+
+@array_function_dispatch(_unary_op_dispatcher)
+def isdecimal(a):
+ """
+ For each element, return True if there are only decimal
+ characters in the element.
+
+ Calls `unicode.isdecimal` element-wise.
+
+ Decimal characters include digit characters, and all characters
+ that can be used to form decimal-radix numbers,
+ e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``.
+
+ Parameters
+ ----------
+ a : array_like, unicode
+ Input array.
+
+ Returns
+ -------
+ out : ndarray, bool
+ Array of booleans identical in shape to `a`.
+
+ See Also
+ --------
+ unicode.isdecimal
+
+ Examples
+ --------
+ >>> np.char.isdecimal(['12345', '4.99', '123ABC', ''])
+ array([ True, False, False, False])
+
+ """
+ if _use_unicode(a) != unicode_:
+ raise TypeError("isnumeric is only available for Unicode strings and arrays")
+ return _vec_string(a, bool_, 'isdecimal')
+
+
+@set_module('numpy')
+class chararray(ndarray):
+ """
+ chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,
+ strides=None, order=None)
+
+ Provides a convenient view on arrays of string and unicode values.
+
+ .. note::
+ The `chararray` class exists for backwards compatibility with
+ Numarray, it is not recommended for new development. Starting from numpy
+ 1.4, if one needs arrays of strings, it is recommended to use arrays of
+ `dtype` `object_`, `string_` or `unicode_`, and use the free functions
+ in the `numpy.char` module for fast vectorized string operations.
+
+ Versus a regular NumPy array of type `str` or `unicode`, this
+ class adds the following functionality:
+
+ 1) values automatically have whitespace removed from the end
+ when indexed
+
+ 2) comparison operators automatically remove whitespace from the
+ end when comparing values
+
+ 3) vectorized string operations are provided as methods
+ (e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``)
+
+ chararrays should be created using `numpy.char.array` or
+ `numpy.char.asarray`, rather than this constructor directly.
+
+ This constructor creates the array, using `buffer` (with `offset`
+ and `strides`) if it is not ``None``. If `buffer` is ``None``, then
+ constructs a new array with `strides` in "C order", unless both
+ ``len(shape) >= 2`` and ``order='F'``, in which case `strides`
+ is in "Fortran order".
+
+ Methods
+ -------
+ astype
+ argsort
+ copy
+ count
+ decode
+ dump
+ dumps
+ encode
+ endswith
+ expandtabs
+ fill
+ find
+ flatten
+ getfield
+ index
+ isalnum
+ isalpha
+ isdecimal
+ isdigit
+ islower
+ isnumeric
+ isspace
+ istitle
+ isupper
+ item
+ join
+ ljust
+ lower
+ lstrip
+ nonzero
+ put
+ ravel
+ repeat
+ replace
+ reshape
+ resize
+ rfind
+ rindex
+ rjust
+ rsplit
+ rstrip
+ searchsorted
+ setfield
+ setflags
+ sort
+ split
+ splitlines
+ squeeze
+ startswith
+ strip
+ swapaxes
+ swapcase
+ take
+ title
+ tofile
+ tolist
+ tostring
+ translate
+ transpose
+ upper
+ view
+ zfill
+
+ Parameters
+ ----------
+ shape : tuple
+ Shape of the array.
+ itemsize : int, optional
+ Length of each array element, in number of characters. Default is 1.
+ unicode : bool, optional
+ Are the array elements of type unicode (True) or string (False).
+ Default is False.
+ buffer : object exposing the buffer interface or str, optional
+ Memory address of the start of the array data. Default is None,
+ in which case a new array is created.
+ offset : int, optional
+ Fixed stride displacement from the beginning of an axis?
+ Default is 0. Needs to be >=0.
+ strides : array_like of ints, optional
+ Strides for the array (see `ndarray.strides` for full description).
+ Default is None.
+ order : {'C', 'F'}, optional
+ The order in which the array data is stored in memory: 'C' ->
+ "row major" order (the default), 'F' -> "column major"
+ (Fortran) order.
+
+ Examples
+ --------
+ >>> charar = np.chararray((3, 3))
+ >>> charar[:] = 'a'
+ >>> charar
+ chararray([[b'a', b'a', b'a'],
+ [b'a', b'a', b'a'],
+ [b'a', b'a', b'a']], dtype='|S1')
+
+ >>> charar = np.chararray(charar.shape, itemsize=5)
+ >>> charar[:] = 'abc'
+ >>> charar
+ chararray([[b'abc', b'abc', b'abc'],
+ [b'abc', b'abc', b'abc'],
+ [b'abc', b'abc', b'abc']], dtype='|S5')
+
+ """
+ def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
+ offset=0, strides=None, order='C'):
+ global _globalvar
+
+ if unicode:
+ dtype = unicode_
+ else:
+ dtype = string_
+
+ # force itemsize to be a Python int, since using NumPy integer
+ # types results in itemsize.itemsize being used as the size of
+ # strings in the new array.
+ itemsize = int(itemsize)
+
+ if isinstance(buffer, str):
+ # unicode objects do not have the buffer interface
+ filler = buffer
+ buffer = None
+ else:
+ filler = None
+
+ _globalvar = 1
+ if buffer is None:
+ self = ndarray.__new__(subtype, shape, (dtype, itemsize),
+ order=order)
+ else:
+ self = ndarray.__new__(subtype, shape, (dtype, itemsize),
+ buffer=buffer,
+ offset=offset, strides=strides,
+ order=order)
+ if filler is not None:
+ self[...] = filler
+ _globalvar = 0
+ return self
+
+ def __array_finalize__(self, obj):
+ # The b is a special case because it is used for reconstructing.
+ if not _globalvar and self.dtype.char not in 'SUbc':
+ raise ValueError("Can only create a chararray from string data.")
+
+ def __getitem__(self, obj):
+ val = ndarray.__getitem__(self, obj)
+
+ if isinstance(val, character):
+ temp = val.rstrip()
+ if len(temp) == 0:
+ val = ''
+ else:
+ val = temp
+
+ return val
+
+ # IMPLEMENTATION NOTE: Most of the methods of this class are
+ # direct delegations to the free functions in this module.
+ # However, those that return an array of strings should instead
+ # return a chararray, so some extra wrapping is required.
+
+ def __eq__(self, other):
+ """
+ Return (self == other) element-wise.
+
+ See Also
+ --------
+ equal
+ """
+ return equal(self, other)
+
+ def __ne__(self, other):
+ """
+ Return (self != other) element-wise.
+
+ See Also
+ --------
+ not_equal
+ """
+ return not_equal(self, other)
+
+ def __ge__(self, other):
+ """
+ Return (self >= other) element-wise.
+
+ See Also
+ --------
+ greater_equal
+ """
+ return greater_equal(self, other)
+
+ def __le__(self, other):
+ """
+ Return (self <= other) element-wise.
+
+ See Also
+ --------
+ less_equal
+ """
+ return less_equal(self, other)
+
+ def __gt__(self, other):
+ """
+ Return (self > other) element-wise.
+
+ See Also
+ --------
+ greater
+ """
+ return greater(self, other)
+
+ def __lt__(self, other):
+ """
+ Return (self < other) element-wise.
+
+ See Also
+ --------
+ less
+ """
+ return less(self, other)
+
+ def __add__(self, other):
+ """
+ Return (self + other), that is string concatenation,
+ element-wise for a pair of array_likes of str or unicode.
+
+ See Also
+ --------
+ add
+ """
+ return asarray(add(self, other))
+
+ def __radd__(self, other):
+ """
+ Return (other + self), that is string concatenation,
+ element-wise for a pair of array_likes of `string_` or `unicode_`.
+
+ See Also
+ --------
+ add
+ """
+ return asarray(add(numpy.asarray(other), self))
+
+ def __mul__(self, i):
+ """
+ Return (self * i), that is string multiple concatenation,
+ element-wise.
+
+ See Also
+ --------
+ multiply
+ """
+ return asarray(multiply(self, i))
+
+ def __rmul__(self, i):
+ """
+ Return (self * i), that is string multiple concatenation,
+ element-wise.
+
+ See Also
+ --------
+ multiply
+ """
+ return asarray(multiply(self, i))
+
+ def __mod__(self, i):
+ """
+ Return (self % i), that is pre-Python 2.6 string formatting
+ (interpolation), element-wise for a pair of array_likes of `string_`
+ or `unicode_`.
+
+ See Also
+ --------
+ mod
+ """
+ return asarray(mod(self, i))
+
+ def __rmod__(self, other):
+ return NotImplemented
+
+ def argsort(self, axis=-1, kind=None, order=None):
+ """
+ Return the indices that sort the array lexicographically.
+
+ For full documentation see `numpy.argsort`, for which this method is
+ in fact merely a "thin wrapper."
+
+ Examples
+ --------
+ >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')
+ >>> c = c.view(np.chararray); c
+ chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],
+ dtype='|S5')
+ >>> c[c.argsort()]
+ chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],
+ dtype='|S5')
+
+ """
+ return self.__array__().argsort(axis, kind, order)
+ argsort.__doc__ = ndarray.argsort.__doc__
+
+ def capitalize(self):
+ """
+ Return a copy of `self` with only the first character of each element
+ capitalized.
+
+ See Also
+ --------
+ char.capitalize
+
+ """
+ return asarray(capitalize(self))
+
+ def center(self, width, fillchar=' '):
+ """
+ Return a copy of `self` with its elements centered in a
+ string of length `width`.
+
+ See Also
+ --------
+ center
+ """
+ return asarray(center(self, width, fillchar))
+
+ def count(self, sub, start=0, end=None):
+ """
+ Returns an array with the number of non-overlapping occurrences of
+ substring `sub` in the range [`start`, `end`].
+
+ See Also
+ --------
+ char.count
+
+ """
+ return count(self, sub, start, end)
+
+ def decode(self, encoding=None, errors=None):
+ """
+ Calls ``bytes.decode`` element-wise.
+
+ See Also
+ --------
+ char.decode
+
+ """
+ return decode(self, encoding, errors)
+
+ def encode(self, encoding=None, errors=None):
+ """
+ Calls `str.encode` element-wise.
+
+ See Also
+ --------
+ char.encode
+
+ """
+ return encode(self, encoding, errors)
+
+ def endswith(self, suffix, start=0, end=None):
+ """
+ Returns a boolean array which is `True` where the string element
+ in `self` ends with `suffix`, otherwise `False`.
+
+ See Also
+ --------
+ char.endswith
+
+ """
+ return endswith(self, suffix, start, end)
+
+ def expandtabs(self, tabsize=8):
+ """
+ Return a copy of each string element where all tab characters are
+ replaced by one or more spaces.
+
+ See Also
+ --------
+ char.expandtabs
+
+ """
+ return asarray(expandtabs(self, tabsize))
+
+ def find(self, sub, start=0, end=None):
+ """
+ For each element, return the lowest index in the string where
+ substring `sub` is found.
+
+ See Also
+ --------
+ char.find
+
+ """
+ return find(self, sub, start, end)
+
+ def index(self, sub, start=0, end=None):
+ """
+ Like `find`, but raises `ValueError` when the substring is not found.
+
+ See Also
+ --------
+ char.index
+
+ """
+ return index(self, sub, start, end)
+
+ def isalnum(self):
+ """
+ Returns true for each element if all characters in the string
+ are alphanumeric and there is at least one character, false
+ otherwise.
+
+ See Also
+ --------
+ char.isalnum
+
+ """
+ return isalnum(self)
+
+ def isalpha(self):
+ """
+ Returns true for each element if all characters in the string
+ are alphabetic and there is at least one character, false
+ otherwise.
+
+ See Also
+ --------
+ char.isalpha
+
+ """
+ return isalpha(self)
+
+ def isdigit(self):
+ """
+ Returns true for each element if all characters in the string are
+ digits and there is at least one character, false otherwise.
+
+ See Also
+ --------
+ char.isdigit
+
+ """
+ return isdigit(self)
+
+ def islower(self):
+ """
+ Returns true for each element if all cased characters in the
+ string are lowercase and there is at least one cased character,
+ false otherwise.
+
+ See Also
+ --------
+ char.islower
+
+ """
+ return islower(self)
+
+ def isspace(self):
+ """
+ Returns true for each element if there are only whitespace
+ characters in the string and there is at least one character,
+ false otherwise.
+
+ See Also
+ --------
+ char.isspace
+
+ """
+ return isspace(self)
+
+ def istitle(self):
+ """
+ Returns true for each element if the element is a titlecased
+ string and there is at least one character, false otherwise.
+
+ See Also
+ --------
+ char.istitle
+
+ """
+ return istitle(self)
+
+ def isupper(self):
+ """
+ Returns true for each element if all cased characters in the
+ string are uppercase and there is at least one character, false
+ otherwise.
+
+ See Also
+ --------
+ char.isupper
+
+ """
+ return isupper(self)
+
+ def join(self, seq):
+ """
+ Return a string which is the concatenation of the strings in the
+ sequence `seq`.
+
+ See Also
+ --------
+ char.join
+
+ """
+ return join(self, seq)
+
+ def ljust(self, width, fillchar=' '):
+ """
+ Return an array with the elements of `self` left-justified in a
+ string of length `width`.
+
+ See Also
+ --------
+ char.ljust
+
+ """
+ return asarray(ljust(self, width, fillchar))
+
+ def lower(self):
+ """
+ Return an array with the elements of `self` converted to
+ lowercase.
+
+ See Also
+ --------
+ char.lower
+
+ """
+ return asarray(lower(self))
+
+ def lstrip(self, chars=None):
+ """
+ For each element in `self`, return a copy with the leading characters
+ removed.
+
+ See Also
+ --------
+ char.lstrip
+
+ """
+ return asarray(lstrip(self, chars))
+
+ def partition(self, sep):
+ """
+ Partition each element in `self` around `sep`.
+
+ See Also
+ --------
+ partition
+ """
+ return asarray(partition(self, sep))
+
+ def replace(self, old, new, count=None):
+ """
+ For each element in `self`, return a copy of the string with all
+ occurrences of substring `old` replaced by `new`.
+
+ See Also
+ --------
+ char.replace
+
+ """
+ return asarray(replace(self, old, new, count))
+
+ def rfind(self, sub, start=0, end=None):
+ """
+ For each element in `self`, return the highest index in the string
+ where substring `sub` is found, such that `sub` is contained
+ within [`start`, `end`].
+
+ See Also
+ --------
+ char.rfind
+
+ """
+ return rfind(self, sub, start, end)
+
+ def rindex(self, sub, start=0, end=None):
+ """
+ Like `rfind`, but raises `ValueError` when the substring `sub` is
+ not found.
+
+ See Also
+ --------
+ char.rindex
+
+ """
+ return rindex(self, sub, start, end)
+
+ def rjust(self, width, fillchar=' '):
+ """
+ Return an array with the elements of `self`
+ right-justified in a string of length `width`.
+
+ See Also
+ --------
+ char.rjust
+
+ """
+ return asarray(rjust(self, width, fillchar))
+
+ def rpartition(self, sep):
+ """
+ Partition each element in `self` around `sep`.
+
+ See Also
+ --------
+ rpartition
+ """
+ return asarray(rpartition(self, sep))
+
+ def rsplit(self, sep=None, maxsplit=None):
+ """
+ For each element in `self`, return a list of the words in
+ the string, using `sep` as the delimiter string.
+
+ See Also
+ --------
+ char.rsplit
+
+ """
+ return rsplit(self, sep, maxsplit)
+
+ def rstrip(self, chars=None):
+ """
+ For each element in `self`, return a copy with the trailing
+ characters removed.
+
+ See Also
+ --------
+ char.rstrip
+
+ """
+ return asarray(rstrip(self, chars))
+
+ def split(self, sep=None, maxsplit=None):
+ """
+ For each element in `self`, return a list of the words in the
+ string, using `sep` as the delimiter string.
+
+ See Also
+ --------
+ char.split
+
+ """
+ return split(self, sep, maxsplit)
+
+ def splitlines(self, keepends=None):
+ """
+ For each element in `self`, return a list of the lines in the
+ element, breaking at line boundaries.
+
+ See Also
+ --------
+ char.splitlines
+
+ """
+ return splitlines(self, keepends)
+
+ def startswith(self, prefix, start=0, end=None):
+ """
+ Returns a boolean array which is `True` where the string element
+ in `self` starts with `prefix`, otherwise `False`.
+
+ See Also
+ --------
+ char.startswith
+
+ """
+ return startswith(self, prefix, start, end)
+
+ def strip(self, chars=None):
+ """
+ For each element in `self`, return a copy with the leading and
+ trailing characters removed.
+
+ See Also
+ --------
+ char.strip
+
+ """
+ return asarray(strip(self, chars))
+
+ def swapcase(self):
+ """
+ For each element in `self`, return a copy of the string with
+ uppercase characters converted to lowercase and vice versa.
+
+ See Also
+ --------
+ char.swapcase
+
+ """
+ return asarray(swapcase(self))
+
+ def title(self):
+ """
+ For each element in `self`, return a titlecased version of the
+ string: words start with uppercase characters, all remaining cased
+ characters are lowercase.
+
+ See Also
+ --------
+ char.title
+
+ """
+ return asarray(title(self))
+
+ def translate(self, table, deletechars=None):
+ """
+ For each element in `self`, return a copy of the string where
+ all characters occurring in the optional argument
+ `deletechars` are removed, and the remaining characters have
+ been mapped through the given translation table.
+
+ See Also
+ --------
+ char.translate
+
+ """
+ return asarray(translate(self, table, deletechars))
+
+ def upper(self):
+ """
+ Return an array with the elements of `self` converted to
+ uppercase.
+
+ See Also
+ --------
+ char.upper
+
+ """
+ return asarray(upper(self))
+
+ def zfill(self, width):
+ """
+ Return the numeric string left-filled with zeros in a string of
+ length `width`.
+
+ See Also
+ --------
+ char.zfill
+
+ """
+ return asarray(zfill(self, width))
+
+ def isnumeric(self):
+ """
+ For each element in `self`, return True if there are only
+ numeric characters in the element.
+
+ See Also
+ --------
+ char.isnumeric
+
+ """
+ return isnumeric(self)
+
+ def isdecimal(self):
+ """
+ For each element in `self`, return True if there are only
+ decimal characters in the element.
+
+ See Also
+ --------
+ char.isdecimal
+
+ """
+ return isdecimal(self)
+
+
+@set_module("numpy.char")
+def array(obj, itemsize=None, copy=True, unicode=None, order=None):
+ """
+ Create a `chararray`.
+
+ .. note::
+ This class is provided for numarray backward-compatibility.
+ New code (not concerned with numarray compatibility) should use
+ arrays of type `string_` or `unicode_` and use the free functions
+ in :mod:`numpy.char <numpy.core.defchararray>` for fast
+ vectorized string operations instead.
+
+ Versus a regular NumPy array of type `str` or `unicode`, this
+ class adds the following functionality:
+
+ 1) values automatically have whitespace removed from the end
+ when indexed
+
+ 2) comparison operators automatically remove whitespace from the
+ end when comparing values
+
+ 3) vectorized string operations are provided as methods
+ (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``)
+
+ Parameters
+ ----------
+ obj : array of str or unicode-like
+
+ itemsize : int, optional
+ `itemsize` is the number of characters per scalar in the
+ resulting array. If `itemsize` is None, and `obj` is an
+ object array or a Python list, the `itemsize` will be
+ automatically determined. If `itemsize` is provided and `obj`
+ is of type str or unicode, then the `obj` string will be
+ chunked into `itemsize` pieces.
+
+ copy : bool, optional
+ If true (default), then the object is copied. Otherwise, a copy
+ will only be made if __array__ returns a copy, if obj is a
+ nested sequence, or if a copy is needed to satisfy any of the other
+ requirements (`itemsize`, unicode, `order`, etc.).
+
+ unicode : bool, optional
+ When true, the resulting `chararray` can contain Unicode
+ characters, when false only 8-bit characters. If unicode is
+ None and `obj` is one of the following:
+
+ - a `chararray`,
+ - an ndarray of type `str` or `unicode`
+ - a Python str or unicode object,
+
+ then the unicode setting of the output array will be
+ automatically determined.
+
+ order : {'C', 'F', 'A'}, optional
+ Specify the order of the array. If order is 'C' (default), then the
+ array will be in C-contiguous order (last-index varies the
+ fastest). If order is 'F', then the returned array
+ will be in Fortran-contiguous order (first-index varies the
+ fastest). If order is 'A', then the returned array may
+ be in any order (either C-, Fortran-contiguous, or even
+ discontiguous).
+ """
+ if isinstance(obj, (bytes, str)):
+ if unicode is None:
+ if isinstance(obj, str):
+ unicode = True
+ else:
+ unicode = False
+
+ if itemsize is None:
+ itemsize = len(obj)
+ shape = len(obj) // itemsize
+
+ return chararray(shape, itemsize=itemsize, unicode=unicode,
+ buffer=obj, order=order)
+
+ if isinstance(obj, (list, tuple)):
+ obj = numpy.asarray(obj)
+
+ if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character):
+ # If we just have a vanilla chararray, create a chararray
+ # view around it.
+ if not isinstance(obj, chararray):
+ obj = obj.view(chararray)
+
+ if itemsize is None:
+ itemsize = obj.itemsize
+ # itemsize is in 8-bit chars, so for Unicode, we need
+ # to divide by the size of a single Unicode character,
+ # which for NumPy is always 4
+ if issubclass(obj.dtype.type, unicode_):
+ itemsize //= 4
+
+ if unicode is None:
+ if issubclass(obj.dtype.type, unicode_):
+ unicode = True
+ else:
+ unicode = False
+
+ if unicode:
+ dtype = unicode_
+ else:
+ dtype = string_
+
+ if order is not None:
+ obj = numpy.asarray(obj, order=order)
+ if (copy or
+ (itemsize != obj.itemsize) or
+ (not unicode and isinstance(obj, unicode_)) or
+ (unicode and isinstance(obj, string_))):
+ obj = obj.astype((dtype, int(itemsize)))
+ return obj
+
+ if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):
+ if itemsize is None:
+ # Since no itemsize was specified, convert the input array to
+ # a list so the ndarray constructor will automatically
+ # determine the itemsize for us.
+ obj = obj.tolist()
+ # Fall through to the default case
+
+ if unicode:
+ dtype = unicode_
+ else:
+ dtype = string_
+
+ if itemsize is None:
+ val = narray(obj, dtype=dtype, order=order, subok=True)
+ else:
+ val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True)
+ return val.view(chararray)
+
+
+@set_module("numpy.char")
+def asarray(obj, itemsize=None, unicode=None, order=None):
+ """
+ Convert the input to a `chararray`, copying the data only if
+ necessary.
+
+ Versus a regular NumPy array of type `str` or `unicode`, this
+ class adds the following functionality:
+
+ 1) values automatically have whitespace removed from the end
+ when indexed
+
+ 2) comparison operators automatically remove whitespace from the
+ end when comparing values
+
+ 3) vectorized string operations are provided as methods
+ (e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``)
+
+ Parameters
+ ----------
+ obj : array of str or unicode-like
+
+ itemsize : int, optional
+ `itemsize` is the number of characters per scalar in the
+ resulting array. If `itemsize` is None, and `obj` is an
+ object array or a Python list, the `itemsize` will be
+ automatically determined. If `itemsize` is provided and `obj`
+ is of type str or unicode, then the `obj` string will be
+ chunked into `itemsize` pieces.
+
+ unicode : bool, optional
+ When true, the resulting `chararray` can contain Unicode
+ characters, when false only 8-bit characters. If unicode is
+ None and `obj` is one of the following:
+
+ - a `chararray`,
+ - an ndarray of type `str` or 'unicode`
+ - a Python str or unicode object,
+
+ then the unicode setting of the output array will be
+ automatically determined.
+
+ order : {'C', 'F'}, optional
+ Specify the order of the array. If order is 'C' (default), then the
+ array will be in C-contiguous order (last-index varies the
+ fastest). If order is 'F', then the returned array
+ will be in Fortran-contiguous order (first-index varies the
+ fastest).
+ """
+ return array(obj, itemsize, copy=False,
+ unicode=unicode, order=order)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/defchararray.pyi b/venv/lib/python3.9/site-packages/numpy/core/defchararray.pyi
new file mode 100644
index 00000000..73d90bb2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/defchararray.pyi
@@ -0,0 +1,421 @@
+from typing import (
+ Literal as L,
+ overload,
+ TypeVar,
+ Any,
+)
+
+from numpy import (
+ chararray as chararray,
+ dtype,
+ str_,
+ bytes_,
+ int_,
+ bool_,
+ object_,
+ _OrderKACF,
+)
+
+from numpy._typing import (
+ NDArray,
+ _ArrayLikeStr_co as U_co,
+ _ArrayLikeBytes_co as S_co,
+ _ArrayLikeInt_co as i_co,
+ _ArrayLikeBool_co as b_co,
+)
+
+from numpy.core.multiarray import compare_chararrays as compare_chararrays
+
+_SCT = TypeVar("_SCT", str_, bytes_)
+_CharArray = chararray[Any, dtype[_SCT]]
+
+__all__: list[str]
+
+# Comparison
+@overload
+def equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
+@overload
+def equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
+
+@overload
+def not_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
+@overload
+def not_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
+
+@overload
+def greater_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
+@overload
+def greater_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
+
+@overload
+def less_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
+@overload
+def less_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
+
+@overload
+def greater(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
+@overload
+def greater(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
+
+@overload
+def less(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
+@overload
+def less(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
+
+# String operations
+@overload
+def add(x1: U_co, x2: U_co) -> NDArray[str_]: ...
+@overload
+def add(x1: S_co, x2: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def multiply(a: U_co, i: i_co) -> NDArray[str_]: ...
+@overload
+def multiply(a: S_co, i: i_co) -> NDArray[bytes_]: ...
+
+@overload
+def mod(a: U_co, value: Any) -> NDArray[str_]: ...
+@overload
+def mod(a: S_co, value: Any) -> NDArray[bytes_]: ...
+
+@overload
+def capitalize(a: U_co) -> NDArray[str_]: ...
+@overload
+def capitalize(a: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...
+@overload
+def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...
+
+def decode(
+ a: S_co,
+ encoding: None | str = ...,
+ errors: None | str = ...,
+) -> NDArray[str_]: ...
+
+def encode(
+ a: U_co,
+ encoding: None | str = ...,
+ errors: None | str = ...,
+) -> NDArray[bytes_]: ...
+
+@overload
+def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ...
+@overload
+def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ...
+
+@overload
+def join(sep: U_co, seq: U_co) -> NDArray[str_]: ...
+@overload
+def join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...
+@overload
+def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...
+
+@overload
+def lower(a: U_co) -> NDArray[str_]: ...
+@overload
+def lower(a: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ...
+@overload
+def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ...
+
+@overload
+def partition(a: U_co, sep: U_co) -> NDArray[str_]: ...
+@overload
+def partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def replace(
+ a: U_co,
+ old: U_co,
+ new: U_co,
+ count: None | i_co = ...,
+) -> NDArray[str_]: ...
+@overload
+def replace(
+ a: S_co,
+ old: S_co,
+ new: S_co,
+ count: None | i_co = ...,
+) -> NDArray[bytes_]: ...
+
+@overload
+def rjust(
+ a: U_co,
+ width: i_co,
+ fillchar: U_co = ...,
+) -> NDArray[str_]: ...
+@overload
+def rjust(
+ a: S_co,
+ width: i_co,
+ fillchar: S_co = ...,
+) -> NDArray[bytes_]: ...
+
+@overload
+def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ...
+@overload
+def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def rsplit(
+ a: U_co,
+ sep: None | U_co = ...,
+ maxsplit: None | i_co = ...,
+) -> NDArray[object_]: ...
+@overload
+def rsplit(
+ a: S_co,
+ sep: None | S_co = ...,
+ maxsplit: None | i_co = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def rstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ...
+@overload
+def rstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ...
+
+@overload
+def split(
+ a: U_co,
+ sep: None | U_co = ...,
+ maxsplit: None | i_co = ...,
+) -> NDArray[object_]: ...
+@overload
+def split(
+ a: S_co,
+ sep: None | S_co = ...,
+ maxsplit: None | i_co = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def splitlines(a: U_co, keepends: None | b_co = ...) -> NDArray[object_]: ...
+@overload
+def splitlines(a: S_co, keepends: None | b_co = ...) -> NDArray[object_]: ...
+
+@overload
+def strip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ...
+@overload
+def strip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ...
+
+@overload
+def swapcase(a: U_co) -> NDArray[str_]: ...
+@overload
+def swapcase(a: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def title(a: U_co) -> NDArray[str_]: ...
+@overload
+def title(a: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def translate(
+ a: U_co,
+ table: U_co,
+ deletechars: None | U_co = ...,
+) -> NDArray[str_]: ...
+@overload
+def translate(
+ a: S_co,
+ table: S_co,
+ deletechars: None | S_co = ...,
+) -> NDArray[bytes_]: ...
+
+@overload
+def upper(a: U_co) -> NDArray[str_]: ...
+@overload
+def upper(a: S_co) -> NDArray[bytes_]: ...
+
+@overload
+def zfill(a: U_co, width: i_co) -> NDArray[str_]: ...
+@overload
+def zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ...
+
+# String information
+@overload
+def count(
+ a: U_co,
+ sub: U_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[int_]: ...
+@overload
+def count(
+ a: S_co,
+ sub: S_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[int_]: ...
+
+@overload
+def endswith(
+ a: U_co,
+ suffix: U_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[bool_]: ...
+@overload
+def endswith(
+ a: S_co,
+ suffix: S_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[bool_]: ...
+
+@overload
+def find(
+ a: U_co,
+ sub: U_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[int_]: ...
+@overload
+def find(
+ a: S_co,
+ sub: S_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[int_]: ...
+
+@overload
+def index(
+ a: U_co,
+ sub: U_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[int_]: ...
+@overload
+def index(
+ a: S_co,
+ sub: S_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[int_]: ...
+
+def isalpha(a: U_co | S_co) -> NDArray[bool_]: ...
+def isalnum(a: U_co | S_co) -> NDArray[bool_]: ...
+def isdecimal(a: U_co | S_co) -> NDArray[bool_]: ...
+def isdigit(a: U_co | S_co) -> NDArray[bool_]: ...
+def islower(a: U_co | S_co) -> NDArray[bool_]: ...
+def isnumeric(a: U_co | S_co) -> NDArray[bool_]: ...
+def isspace(a: U_co | S_co) -> NDArray[bool_]: ...
+def istitle(a: U_co | S_co) -> NDArray[bool_]: ...
+def isupper(a: U_co | S_co) -> NDArray[bool_]: ...
+
+@overload
+def rfind(
+ a: U_co,
+ sub: U_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[int_]: ...
+@overload
+def rfind(
+ a: S_co,
+ sub: S_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[int_]: ...
+
+@overload
+def rindex(
+ a: U_co,
+ sub: U_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[int_]: ...
+@overload
+def rindex(
+ a: S_co,
+ sub: S_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[int_]: ...
+
+@overload
+def startswith(
+ a: U_co,
+ prefix: U_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[bool_]: ...
+@overload
+def startswith(
+ a: S_co,
+ prefix: S_co,
+ start: i_co = ...,
+ end: None | i_co = ...,
+) -> NDArray[bool_]: ...
+
+def str_len(A: U_co | S_co) -> NDArray[int_]: ...
+
+# Overload 1 and 2: str- or bytes-based array-likes
+# overload 3: arbitrary object with unicode=False (-> bytes_)
+# overload 4: arbitrary object with unicode=True (-> str_)
+@overload
+def array(
+ obj: U_co,
+ itemsize: None | int = ...,
+ copy: bool = ...,
+ unicode: L[False] = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
+@overload
+def array(
+ obj: S_co,
+ itemsize: None | int = ...,
+ copy: bool = ...,
+ unicode: L[False] = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def array(
+ obj: object,
+ itemsize: None | int = ...,
+ copy: bool = ...,
+ unicode: L[False] = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def array(
+ obj: object,
+ itemsize: None | int = ...,
+ copy: bool = ...,
+ unicode: L[True] = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
+
+@overload
+def asarray(
+ obj: U_co,
+ itemsize: None | int = ...,
+ unicode: L[False] = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
+@overload
+def asarray(
+ obj: S_co,
+ itemsize: None | int = ...,
+ unicode: L[False] = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def asarray(
+ obj: object,
+ itemsize: None | int = ...,
+ unicode: L[False] = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[bytes_]: ...
+@overload
+def asarray(
+ obj: object,
+ itemsize: None | int = ...,
+ unicode: L[True] = ...,
+ order: _OrderKACF = ...,
+) -> _CharArray[str_]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/core/einsumfunc.py b/venv/lib/python3.9/site-packages/numpy/core/einsumfunc.py
new file mode 100644
index 00000000..d6c5885b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/einsumfunc.py
@@ -0,0 +1,1443 @@
+"""
+Implementation of optimized einsum.
+
+"""
+import itertools
+import operator
+
+from numpy.core.multiarray import c_einsum
+from numpy.core.numeric import asanyarray, tensordot
+from numpy.core.overrides import array_function_dispatch
+
+__all__ = ['einsum', 'einsum_path']
+
+einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
+einsum_symbols_set = set(einsum_symbols)
+
+
+def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
+ """
+ Computes the number of FLOPS in the contraction.
+
+ Parameters
+ ----------
+ idx_contraction : iterable
+ The indices involved in the contraction
+ inner : bool
+ Does this contraction require an inner product?
+ num_terms : int
+ The number of terms in a contraction
+ size_dictionary : dict
+ The size of each of the indices in idx_contraction
+
+ Returns
+ -------
+ flop_count : int
+ The total number of FLOPS required for the contraction.
+
+ Examples
+ --------
+
+ >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
+ 30
+
+ >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
+ 60
+
+ """
+
+ overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
+ op_factor = max(1, num_terms - 1)
+ if inner:
+ op_factor += 1
+
+ return overall_size * op_factor
+
+def _compute_size_by_dict(indices, idx_dict):
+ """
+ Computes the product of the elements in indices based on the dictionary
+ idx_dict.
+
+ Parameters
+ ----------
+ indices : iterable
+ Indices to base the product on.
+ idx_dict : dictionary
+ Dictionary of index sizes
+
+ Returns
+ -------
+ ret : int
+ The resulting product.
+
+ Examples
+ --------
+ >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
+ 90
+
+ """
+ ret = 1
+ for i in indices:
+ ret *= idx_dict[i]
+ return ret
+
+
+def _find_contraction(positions, input_sets, output_set):
+ """
+ Finds the contraction for a given set of input and output sets.
+
+ Parameters
+ ----------
+ positions : iterable
+ Integer positions of terms used in the contraction.
+ input_sets : list
+ List of sets that represent the lhs side of the einsum subscript
+ output_set : set
+ Set that represents the rhs side of the overall einsum subscript
+
+ Returns
+ -------
+ new_result : set
+ The indices of the resulting contraction
+ remaining : list
+ List of sets that have not been contracted, the new set is appended to
+ the end of this list
+ idx_removed : set
+ Indices removed from the entire contraction
+ idx_contraction : set
+ The indices used in the current contraction
+
+ Examples
+ --------
+
+ # A simple dot product test case
+ >>> pos = (0, 1)
+ >>> isets = [set('ab'), set('bc')]
+ >>> oset = set('ac')
+ >>> _find_contraction(pos, isets, oset)
+ ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
+
+ # A more complex case with additional terms in the contraction
+ >>> pos = (0, 2)
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
+ >>> oset = set('ac')
+ >>> _find_contraction(pos, isets, oset)
+ ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
+ """
+
+ idx_contract = set()
+ idx_remain = output_set.copy()
+ remaining = []
+ for ind, value in enumerate(input_sets):
+ if ind in positions:
+ idx_contract |= value
+ else:
+ remaining.append(value)
+ idx_remain |= value
+
+ new_result = idx_remain & idx_contract
+ idx_removed = (idx_contract - new_result)
+ remaining.append(new_result)
+
+ return (new_result, remaining, idx_removed, idx_contract)
+
+
+def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
+ """
+ Computes all possible pair contractions, sieves the results based
+ on ``memory_limit`` and returns the lowest cost path. This algorithm
+ scales factorial with respect to the elements in the list ``input_sets``.
+
+ Parameters
+ ----------
+ input_sets : list
+ List of sets that represent the lhs side of the einsum subscript
+ output_set : set
+ Set that represents the rhs side of the overall einsum subscript
+ idx_dict : dictionary
+ Dictionary of index sizes
+ memory_limit : int
+ The maximum number of elements in a temporary array
+
+ Returns
+ -------
+ path : list
+ The optimal contraction order within the memory limit constraint.
+
+ Examples
+ --------
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
+ >>> oset = set()
+ >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
+ >>> _optimal_path(isets, oset, idx_sizes, 5000)
+ [(0, 2), (0, 1)]
+ """
+
+ full_results = [(0, [], input_sets)]
+ for iteration in range(len(input_sets) - 1):
+ iter_results = []
+
+ # Compute all unique pairs
+ for curr in full_results:
+ cost, positions, remaining = curr
+ for con in itertools.combinations(range(len(input_sets) - iteration), 2):
+
+ # Find the contraction
+ cont = _find_contraction(con, remaining, output_set)
+ new_result, new_input_sets, idx_removed, idx_contract = cont
+
+ # Sieve the results based on memory_limit
+ new_size = _compute_size_by_dict(new_result, idx_dict)
+ if new_size > memory_limit:
+ continue
+
+ # Build (total_cost, positions, indices_remaining)
+ total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict)
+ new_pos = positions + [con]
+ iter_results.append((total_cost, new_pos, new_input_sets))
+
+ # Update combinatorial list, if we did not find anything return best
+ # path + remaining contractions
+ if iter_results:
+ full_results = iter_results
+ else:
+ path = min(full_results, key=lambda x: x[0])[1]
+ path += [tuple(range(len(input_sets) - iteration))]
+ return path
+
+ # If we have not found anything return single einsum contraction
+ if len(full_results) == 0:
+ return [tuple(range(len(input_sets)))]
+
+ path = min(full_results, key=lambda x: x[0])[1]
+ return path
+
+def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost):
+ """Compute the cost (removed size + flops) and resultant indices for
+ performing the contraction specified by ``positions``.
+
+ Parameters
+ ----------
+ positions : tuple of int
+ The locations of the proposed tensors to contract.
+ input_sets : list of sets
+ The indices found on each tensors.
+ output_set : set
+ The output indices of the expression.
+ idx_dict : dict
+ Mapping of each index to its size.
+ memory_limit : int
+ The total allowed size for an intermediary tensor.
+ path_cost : int
+ The contraction cost so far.
+ naive_cost : int
+ The cost of the unoptimized expression.
+
+ Returns
+ -------
+ cost : (int, int)
+ A tuple containing the size of any indices removed, and the flop cost.
+ positions : tuple of int
+ The locations of the proposed tensors to contract.
+ new_input_sets : list of sets
+ The resulting new list of indices if this proposed contraction is performed.
+
+ """
+
+ # Find the contraction
+ contract = _find_contraction(positions, input_sets, output_set)
+ idx_result, new_input_sets, idx_removed, idx_contract = contract
+
+ # Sieve the results based on memory_limit
+ new_size = _compute_size_by_dict(idx_result, idx_dict)
+ if new_size > memory_limit:
+ return None
+
+ # Build sort tuple
+ old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions)
+ removed_size = sum(old_sizes) - new_size
+
+ # NB: removed_size used to be just the size of any removed indices i.e.:
+ # helpers.compute_size_by_dict(idx_removed, idx_dict)
+ cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
+ sort = (-removed_size, cost)
+
+ # Sieve based on total cost as well
+ if (path_cost + cost) > naive_cost:
+ return None
+
+ # Add contraction to possible choices
+ return [sort, positions, new_input_sets]
+
+
+def _update_other_results(results, best):
+ """Update the positions and provisional input_sets of ``results`` based on
+ performing the contraction result ``best``. Remove any involving the tensors
+ contracted.
+
+ Parameters
+ ----------
+ results : list
+ List of contraction results produced by ``_parse_possible_contraction``.
+ best : list
+ The best contraction of ``results`` i.e. the one that will be performed.
+
+ Returns
+ -------
+ mod_results : list
+ The list of modified results, updated with outcome of ``best`` contraction.
+ """
+
+ best_con = best[1]
+ bx, by = best_con
+ mod_results = []
+
+ for cost, (x, y), con_sets in results:
+
+ # Ignore results involving tensors just contracted
+ if x in best_con or y in best_con:
+ continue
+
+ # Update the input_sets
+ del con_sets[by - int(by > x) - int(by > y)]
+ del con_sets[bx - int(bx > x) - int(bx > y)]
+ con_sets.insert(-1, best[2][-1])
+
+ # Update the position indices
+ mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
+ mod_results.append((cost, mod_con, con_sets))
+
+ return mod_results
+
+def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
+ """
+ Finds the path by contracting the best pair until the input list is
+ exhausted. The best pair is found by minimizing the tuple
+ ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
+ matrix multiplication or inner product operations, then Hadamard like
+ operations, and finally outer operations. Outer products are limited by
+ ``memory_limit``. This algorithm scales cubically with respect to the
+ number of elements in the list ``input_sets``.
+
+ Parameters
+ ----------
+ input_sets : list
+ List of sets that represent the lhs side of the einsum subscript
+ output_set : set
+ Set that represents the rhs side of the overall einsum subscript
+ idx_dict : dictionary
+ Dictionary of index sizes
+ memory_limit : int
+ The maximum number of elements in a temporary array
+
+ Returns
+ -------
+ path : list
+ The greedy contraction order within the memory limit constraint.
+
+ Examples
+ --------
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
+ >>> oset = set()
+ >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
+ >>> _greedy_path(isets, oset, idx_sizes, 5000)
+ [(0, 2), (0, 1)]
+ """
+
+ # Handle trivial cases that leaked through
+ if len(input_sets) == 1:
+ return [(0,)]
+ elif len(input_sets) == 2:
+ return [(0, 1)]
+
+ # Build up a naive cost
+ contract = _find_contraction(range(len(input_sets)), input_sets, output_set)
+ idx_result, new_input_sets, idx_removed, idx_contract = contract
+ naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)
+
+ # Initially iterate over all pairs
+ comb_iter = itertools.combinations(range(len(input_sets)), 2)
+ known_contractions = []
+
+ path_cost = 0
+ path = []
+
+ for iteration in range(len(input_sets) - 1):
+
+ # Iterate over all pairs on first step, only previously found pairs on subsequent steps
+ for positions in comb_iter:
+
+ # Always initially ignore outer products
+ if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
+ continue
+
+ result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost,
+ naive_cost)
+ if result is not None:
+ known_contractions.append(result)
+
+ # If we do not have a inner contraction, rescan pairs including outer products
+ if len(known_contractions) == 0:
+
+ # Then check the outer products
+ for positions in itertools.combinations(range(len(input_sets)), 2):
+ result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit,
+ path_cost, naive_cost)
+ if result is not None:
+ known_contractions.append(result)
+
+ # If we still did not find any remaining contractions, default back to einsum like behavior
+ if len(known_contractions) == 0:
+ path.append(tuple(range(len(input_sets))))
+ break
+
+ # Sort based on first index
+ best = min(known_contractions, key=lambda x: x[0])
+
+ # Now propagate as many unused contractions as possible to next iteration
+ known_contractions = _update_other_results(known_contractions, best)
+
+ # Next iteration only compute contractions with the new tensor
+ # All other contractions have been accounted for
+ input_sets = best[2]
+ new_tensor_pos = len(input_sets) - 1
+ comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
+
+ # Update path and total cost
+ path.append(best[1])
+ path_cost += best[0][1]
+
+ return path
+
+
+def _can_dot(inputs, result, idx_removed):
+ """
+ Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
+
+ Parameters
+ ----------
+ inputs : list of str
+ Specifies the subscripts for summation.
+ result : str
+ Resulting summation.
+ idx_removed : set
+ Indices that are removed in the summation
+
+
+ Returns
+ -------
+ type : bool
+ Returns true if BLAS should and can be used, else False
+
+ Notes
+ -----
+ If the operations is BLAS level 1 or 2 and is not already aligned
+ we default back to einsum as the memory movement to copy is more
+ costly than the operation itself.
+
+
+ Examples
+ --------
+
+ # Standard GEMM operation
+ >>> _can_dot(['ij', 'jk'], 'ik', set('j'))
+ True
+
+ # Can use the standard BLAS, but requires odd data movement
+ >>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
+ False
+
+ # DDOT where the memory is not aligned
+ >>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
+ False
+
+ """
+
+ # All `dot` calls remove indices
+ if len(idx_removed) == 0:
+ return False
+
+ # BLAS can only handle two operands
+ if len(inputs) != 2:
+ return False
+
+ input_left, input_right = inputs
+
+ for c in set(input_left + input_right):
+ # can't deal with repeated indices on same input or more than 2 total
+ nl, nr = input_left.count(c), input_right.count(c)
+ if (nl > 1) or (nr > 1) or (nl + nr > 2):
+ return False
+
+ # can't do implicit summation or dimension collapse e.g.
+ # "ab,bc->c" (implicitly sum over 'a')
+ # "ab,ca->ca" (take diagonal of 'a')
+ if nl + nr - 1 == int(c in result):
+ return False
+
+ # Build a few temporaries
+ set_left = set(input_left)
+ set_right = set(input_right)
+ keep_left = set_left - idx_removed
+ keep_right = set_right - idx_removed
+ rs = len(idx_removed)
+
+ # At this point we are a DOT, GEMV, or GEMM operation
+
+ # Handle inner products
+
+ # DDOT with aligned data
+ if input_left == input_right:
+ return True
+
+ # DDOT without aligned data (better to use einsum)
+ if set_left == set_right:
+ return False
+
+ # Handle the 4 possible (aligned) GEMV or GEMM cases
+
+ # GEMM or GEMV no transpose
+ if input_left[-rs:] == input_right[:rs]:
+ return True
+
+ # GEMM or GEMV transpose both
+ if input_left[:rs] == input_right[-rs:]:
+ return True
+
+ # GEMM or GEMV transpose right
+ if input_left[-rs:] == input_right[-rs:]:
+ return True
+
+ # GEMM or GEMV transpose left
+ if input_left[:rs] == input_right[:rs]:
+ return True
+
+ # Einsum is faster than GEMV if we have to copy data
+ if not keep_left or not keep_right:
+ return False
+
+ # We are a matrix-matrix product, but we need to copy data
+ return True
+
+
+def _parse_einsum_input(operands):
+ """
+ A reproduction of einsum c side einsum parsing in python.
+
+ Returns
+ -------
+ input_strings : str
+ Parsed input strings
+ output_string : str
+ Parsed output string
+ operands : list of array_like
+ The operands to use in the numpy contraction
+
+ Examples
+ --------
+ The operand list is simplified to reduce printing:
+
+ >>> np.random.seed(123)
+ >>> a = np.random.rand(4, 4)
+ >>> b = np.random.rand(4, 4, 4)
+ >>> _parse_einsum_input(('...a,...a->...', a, b))
+ ('za,xza', 'xz', [a, b]) # may vary
+
+ >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
+ ('za,xza', 'xz', [a, b]) # may vary
+ """
+
+ if len(operands) == 0:
+ raise ValueError("No input operands")
+
+ if isinstance(operands[0], str):
+ subscripts = operands[0].replace(" ", "")
+ operands = [asanyarray(v) for v in operands[1:]]
+
+ # Ensure all characters are valid
+ for s in subscripts:
+ if s in '.,->':
+ continue
+ if s not in einsum_symbols:
+ raise ValueError("Character %s is not a valid symbol." % s)
+
+ else:
+ tmp_operands = list(operands)
+ operand_list = []
+ subscript_list = []
+ for p in range(len(operands) // 2):
+ operand_list.append(tmp_operands.pop(0))
+ subscript_list.append(tmp_operands.pop(0))
+
+ output_list = tmp_operands[-1] if len(tmp_operands) else None
+ operands = [asanyarray(v) for v in operand_list]
+ subscripts = ""
+ last = len(subscript_list) - 1
+ for num, sub in enumerate(subscript_list):
+ for s in sub:
+ if s is Ellipsis:
+ subscripts += "..."
+ else:
+ try:
+ s = operator.index(s)
+ except TypeError as e:
+ raise TypeError("For this input type lists must contain "
+ "either int or Ellipsis") from e
+ subscripts += einsum_symbols[s]
+ if num != last:
+ subscripts += ","
+
+ if output_list is not None:
+ subscripts += "->"
+ for s in output_list:
+ if s is Ellipsis:
+ subscripts += "..."
+ else:
+ try:
+ s = operator.index(s)
+ except TypeError as e:
+ raise TypeError("For this input type lists must contain "
+ "either int or Ellipsis") from e
+ subscripts += einsum_symbols[s]
+ # Check for proper "->"
+ if ("-" in subscripts) or (">" in subscripts):
+ invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
+ if invalid or (subscripts.count("->") != 1):
+ raise ValueError("Subscripts can only contain one '->'.")
+
+ # Parse ellipses
+ if "." in subscripts:
+ used = subscripts.replace(".", "").replace(",", "").replace("->", "")
+ unused = list(einsum_symbols_set - set(used))
+ ellipse_inds = "".join(unused)
+ longest = 0
+
+ if "->" in subscripts:
+ input_tmp, output_sub = subscripts.split("->")
+ split_subscripts = input_tmp.split(",")
+ out_sub = True
+ else:
+ split_subscripts = subscripts.split(',')
+ out_sub = False
+
+ for num, sub in enumerate(split_subscripts):
+ if "." in sub:
+ if (sub.count(".") != 3) or (sub.count("...") != 1):
+ raise ValueError("Invalid Ellipses.")
+
+ # Take into account numerical values
+ if operands[num].shape == ():
+ ellipse_count = 0
+ else:
+ ellipse_count = max(operands[num].ndim, 1)
+ ellipse_count -= (len(sub) - 3)
+
+ if ellipse_count > longest:
+ longest = ellipse_count
+
+ if ellipse_count < 0:
+ raise ValueError("Ellipses lengths do not match.")
+ elif ellipse_count == 0:
+ split_subscripts[num] = sub.replace('...', '')
+ else:
+ rep_inds = ellipse_inds[-ellipse_count:]
+ split_subscripts[num] = sub.replace('...', rep_inds)
+
+ subscripts = ",".join(split_subscripts)
+ if longest == 0:
+ out_ellipse = ""
+ else:
+ out_ellipse = ellipse_inds[-longest:]
+
+ if out_sub:
+ subscripts += "->" + output_sub.replace("...", out_ellipse)
+ else:
+ # Special care for outputless ellipses
+ output_subscript = ""
+ tmp_subscripts = subscripts.replace(",", "")
+ for s in sorted(set(tmp_subscripts)):
+ if s not in (einsum_symbols):
+ raise ValueError("Character %s is not a valid symbol." % s)
+ if tmp_subscripts.count(s) == 1:
+ output_subscript += s
+ normal_inds = ''.join(sorted(set(output_subscript) -
+ set(out_ellipse)))
+
+ subscripts += "->" + out_ellipse + normal_inds
+
+ # Build output string if does not exist
+ if "->" in subscripts:
+ input_subscripts, output_subscript = subscripts.split("->")
+ else:
+ input_subscripts = subscripts
+ # Build output subscripts
+ tmp_subscripts = subscripts.replace(",", "")
+ output_subscript = ""
+ for s in sorted(set(tmp_subscripts)):
+ if s not in einsum_symbols:
+ raise ValueError("Character %s is not a valid symbol." % s)
+ if tmp_subscripts.count(s) == 1:
+ output_subscript += s
+
+ # Make sure output subscripts are in the input
+ for char in output_subscript:
+ if char not in input_subscripts:
+ raise ValueError("Output character %s did not appear in the input"
+ % char)
+
+ # Make sure number operands is equivalent to the number of terms
+ if len(input_subscripts.split(',')) != len(operands):
+ raise ValueError("Number of einsum subscripts must be equal to the "
+ "number of operands.")
+
+ return (input_subscripts, output_subscript, operands)
+
+
+def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None):
+ # NOTE: technically, we should only dispatch on array-like arguments, not
+ # subscripts (given as strings). But separating operands into
+ # arrays/subscripts is a little tricky/slow (given einsum's two supported
+ # signatures), so as a practical shortcut we dispatch on everything.
+ # Strings will be ignored for dispatching since they don't define
+ # __array_function__.
+ return operands
+
+
+@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
+def einsum_path(*operands, optimize='greedy', einsum_call=False):
+ """
+ einsum_path(subscripts, *operands, optimize='greedy')
+
+ Evaluates the lowest cost contraction order for an einsum expression by
+ considering the creation of intermediate arrays.
+
+ Parameters
+ ----------
+ subscripts : str
+ Specifies the subscripts for summation.
+ *operands : list of array_like
+ These are the arrays for the operation.
+ optimize : {bool, list, tuple, 'greedy', 'optimal'}
+ Choose the type of path. If a tuple is provided, the second argument is
+ assumed to be the maximum intermediate size created. If only a single
+ argument is provided the largest input or output array size is used
+ as a maximum intermediate size.
+
+ * if a list is given that starts with ``einsum_path``, uses this as the
+ contraction path
+ * if False no optimization is taken
+ * if True defaults to the 'greedy' algorithm
+ * 'optimal' An algorithm that combinatorially explores all possible
+ ways of contracting the listed tensors and choosest the least costly
+ path. Scales exponentially with the number of terms in the
+ contraction.
+ * 'greedy' An algorithm that chooses the best pair contraction
+ at each step. Effectively, this algorithm searches the largest inner,
+ Hadamard, and then outer products at each step. Scales cubically with
+ the number of terms in the contraction. Equivalent to the 'optimal'
+ path for most contractions.
+
+ Default is 'greedy'.
+
+ Returns
+ -------
+ path : list of tuples
+ A list representation of the einsum path.
+ string_repr : str
+ A printable representation of the einsum path.
+
+ Notes
+ -----
+ The resulting path indicates which terms of the input contraction should be
+ contracted first, the result of this contraction is then appended to the
+ end of the contraction list. This list can then be iterated over until all
+ intermediate contractions are complete.
+
+ See Also
+ --------
+ einsum, linalg.multi_dot
+
+ Examples
+ --------
+
+ We can begin with a chain dot example. In this case, it is optimal to
+ contract the ``b`` and ``c`` tensors first as represented by the first
+ element of the path ``(1, 2)``. The resulting tensor is added to the end
+ of the contraction and the remaining contraction ``(0, 1)`` is then
+ completed.
+
+ >>> np.random.seed(123)
+ >>> a = np.random.rand(2, 2)
+ >>> b = np.random.rand(2, 5)
+ >>> c = np.random.rand(5, 2)
+ >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
+ >>> print(path_info[0])
+ ['einsum_path', (1, 2), (0, 1)]
+ >>> print(path_info[1])
+ Complete contraction: ij,jk,kl->il # may vary
+ Naive scaling: 4
+ Optimized scaling: 3
+ Naive FLOP count: 1.600e+02
+ Optimized FLOP count: 5.600e+01
+ Theoretical speedup: 2.857
+ Largest intermediate: 4.000e+00 elements
+ -------------------------------------------------------------------------
+ scaling current remaining
+ -------------------------------------------------------------------------
+ 3 kl,jk->jl ij,jl->il
+ 3 jl,ij->il il->il
+
+
+ A more complex index transformation example.
+
+ >>> I = np.random.rand(10, 10, 10, 10)
+ >>> C = np.random.rand(10, 10)
+ >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
+ ... optimize='greedy')
+
+ >>> print(path_info[0])
+ ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
+ >>> print(path_info[1])
+ Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary
+ Naive scaling: 8
+ Optimized scaling: 5
+ Naive FLOP count: 8.000e+08
+ Optimized FLOP count: 8.000e+05
+ Theoretical speedup: 1000.000
+ Largest intermediate: 1.000e+04 elements
+ --------------------------------------------------------------------------
+ scaling current remaining
+ --------------------------------------------------------------------------
+ 5 abcd,ea->bcde fb,gc,hd,bcde->efgh
+ 5 bcde,fb->cdef gc,hd,cdef->efgh
+ 5 cdef,gc->defg hd,defg->efgh
+ 5 defg,hd->efgh efgh->efgh
+ """
+
+ # Figure out what the path really is
+ path_type = optimize
+ if path_type is True:
+ path_type = 'greedy'
+ if path_type is None:
+ path_type = False
+
+ explicit_einsum_path = False
+ memory_limit = None
+
+ # No optimization or a named path algorithm
+ if (path_type is False) or isinstance(path_type, str):
+ pass
+
+ # Given an explicit path
+ elif len(path_type) and (path_type[0] == 'einsum_path'):
+ explicit_einsum_path = True
+
+ # Path tuple with memory limit
+ elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
+ isinstance(path_type[1], (int, float))):
+ memory_limit = int(path_type[1])
+ path_type = path_type[0]
+
+ else:
+ raise TypeError("Did not understand the path: %s" % str(path_type))
+
+ # Hidden option, only einsum should call this
+ einsum_call_arg = einsum_call
+
+ # Python side parsing
+ input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
+
+ # Build a few useful list and sets
+ input_list = input_subscripts.split(',')
+ input_sets = [set(x) for x in input_list]
+ output_set = set(output_subscript)
+ indices = set(input_subscripts.replace(',', ''))
+
+ # Get length of each unique dimension and ensure all dimensions are correct
+ dimension_dict = {}
+ broadcast_indices = [[] for x in range(len(input_list))]
+ for tnum, term in enumerate(input_list):
+ sh = operands[tnum].shape
+ if len(sh) != len(term):
+ raise ValueError("Einstein sum subscript %s does not contain the "
+ "correct number of indices for operand %d."
+ % (input_subscripts[tnum], tnum))
+ for cnum, char in enumerate(term):
+ dim = sh[cnum]
+
+ # Build out broadcast indices
+ if dim == 1:
+ broadcast_indices[tnum].append(char)
+
+ if char in dimension_dict.keys():
+ # For broadcasting cases we always want the largest dim size
+ if dimension_dict[char] == 1:
+ dimension_dict[char] = dim
+ elif dim not in (1, dimension_dict[char]):
+ raise ValueError("Size of label '%s' for operand %d (%d) "
+ "does not match previous terms (%d)."
+ % (char, tnum, dimension_dict[char], dim))
+ else:
+ dimension_dict[char] = dim
+
+ # Convert broadcast inds to sets
+ broadcast_indices = [set(x) for x in broadcast_indices]
+
+ # Compute size of each input array plus the output array
+ size_list = [_compute_size_by_dict(term, dimension_dict)
+ for term in input_list + [output_subscript]]
+ max_size = max(size_list)
+
+ if memory_limit is None:
+ memory_arg = max_size
+ else:
+ memory_arg = memory_limit
+
+ # Compute naive cost
+ # This isn't quite right, need to look into exactly how einsum does this
+ inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
+ naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict)
+
+ # Compute the path
+ if explicit_einsum_path:
+ path = path_type[1:]
+ elif (
+ (path_type is False)
+ or (len(input_list) in [1, 2])
+ or (indices == output_set)
+ ):
+ # Nothing to be optimized, leave it to einsum
+ path = [tuple(range(len(input_list)))]
+ elif path_type == "greedy":
+ path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
+ elif path_type == "optimal":
+ path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
+ else:
+ raise KeyError("Path name %s not found", path_type)
+
+ cost_list, scale_list, size_list, contraction_list = [], [], [], []
+
+ # Build contraction tuple (positions, gemm, einsum_str, remaining)
+ for cnum, contract_inds in enumerate(path):
+ # Make sure we remove inds from right to left
+ contract_inds = tuple(sorted(list(contract_inds), reverse=True))
+
+ contract = _find_contraction(contract_inds, input_sets, output_set)
+ out_inds, input_sets, idx_removed, idx_contract = contract
+
+ cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict)
+ cost_list.append(cost)
+ scale_list.append(len(idx_contract))
+ size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
+
+ bcast = set()
+ tmp_inputs = []
+ for x in contract_inds:
+ tmp_inputs.append(input_list.pop(x))
+ bcast |= broadcast_indices.pop(x)
+
+ new_bcast_inds = bcast - idx_removed
+
+ # If we're broadcasting, nix blas
+ if not len(idx_removed & bcast):
+ do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
+ else:
+ do_blas = False
+
+ # Last contraction
+ if (cnum - len(path)) == -1:
+ idx_result = output_subscript
+ else:
+ sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
+ idx_result = "".join([x[1] for x in sorted(sort_result)])
+
+ input_list.append(idx_result)
+ broadcast_indices.append(new_bcast_inds)
+ einsum_str = ",".join(tmp_inputs) + "->" + idx_result
+
+ contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
+ contraction_list.append(contraction)
+
+ opt_cost = sum(cost_list) + 1
+
+ if len(input_list) != 1:
+ # Explicit "einsum_path" is usually trusted, but we detect this kind of
+ # mistake in order to prevent from returning an intermediate value.
+ raise RuntimeError(
+ "Invalid einsum_path is specified: {} more operands has to be "
+ "contracted.".format(len(input_list) - 1))
+
+ if einsum_call_arg:
+ return (operands, contraction_list)
+
+ # Return the path along with a nice string representation
+ overall_contraction = input_subscripts + "->" + output_subscript
+ header = ("scaling", "current", "remaining")
+
+ speedup = naive_cost / opt_cost
+ max_i = max(size_list)
+
+ path_print = " Complete contraction: %s\n" % overall_contraction
+ path_print += " Naive scaling: %d\n" % len(indices)
+ path_print += " Optimized scaling: %d\n" % max(scale_list)
+ path_print += " Naive FLOP count: %.3e\n" % naive_cost
+ path_print += " Optimized FLOP count: %.3e\n" % opt_cost
+ path_print += " Theoretical speedup: %3.3f\n" % speedup
+ path_print += " Largest intermediate: %.3e elements\n" % max_i
+ path_print += "-" * 74 + "\n"
+ path_print += "%6s %24s %40s\n" % header
+ path_print += "-" * 74
+
+ for n, contraction in enumerate(contraction_list):
+ inds, idx_rm, einsum_str, remaining, blas = contraction
+ remaining_str = ",".join(remaining) + "->" + output_subscript
+ path_run = (scale_list[n], einsum_str, remaining_str)
+ path_print += "\n%4d %24s %40s" % path_run
+
+ path = ['einsum_path'] + path
+ return (path, path_print)
+
+
+def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
+ # Arguably we dispatch on more arguments than we really should; see note in
+ # _einsum_path_dispatcher for why.
+ yield from operands
+ yield out
+
+
+# Rewrite einsum to handle different cases
+@array_function_dispatch(_einsum_dispatcher, module='numpy')
+def einsum(*operands, out=None, optimize=False, **kwargs):
+ """
+ einsum(subscripts, *operands, out=None, dtype=None, order='K',
+ casting='safe', optimize=False)
+
+ Evaluates the Einstein summation convention on the operands.
+
+ Using the Einstein summation convention, many common multi-dimensional,
+ linear algebraic array operations can be represented in a simple fashion.
+ In *implicit* mode `einsum` computes these values.
+
+ In *explicit* mode, `einsum` provides further flexibility to compute
+ other array operations that might not be considered classical Einstein
+ summation operations, by disabling, or forcing summation over specified
+ subscript labels.
+
+ See the notes and examples for clarification.
+
+ Parameters
+ ----------
+ subscripts : str
+ Specifies the subscripts for summation as comma separated list of
+ subscript labels. An implicit (classical Einstein summation)
+ calculation is performed unless the explicit indicator '->' is
+ included as well as subscript labels of the precise output form.
+ operands : list of array_like
+ These are the arrays for the operation.
+ out : ndarray, optional
+ If provided, the calculation is done into this array.
+ dtype : {data-type, None}, optional
+ If provided, forces the calculation to use the data type specified.
+ Note that you may have to also give a more liberal `casting`
+ parameter to allow the conversions. Default is None.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout of the output. 'C' means it should
+ be C contiguous. 'F' means it should be Fortran contiguous,
+ 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
+ 'K' means it should be as close to the layout as the inputs as
+ is possible, including arbitrarily permuted axes.
+ Default is 'K'.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Setting this to
+ 'unsafe' is not recommended, as it can adversely affect accumulations.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+
+ Default is 'safe'.
+ optimize : {False, True, 'greedy', 'optimal'}, optional
+ Controls if intermediate optimization should occur. No optimization
+ will occur if False and True will default to the 'greedy' algorithm.
+ Also accepts an explicit contraction list from the ``np.einsum_path``
+ function. See ``np.einsum_path`` for more details. Defaults to False.
+
+ Returns
+ -------
+ output : ndarray
+ The calculation based on the Einstein summation convention.
+
+ See Also
+ --------
+ einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
+ einops :
+ similar verbose interface is provided by
+ `einops <https://github.com/arogozhnikov/einops>`_ package to cover
+ additional operations: transpose, reshape/flatten, repeat/tile,
+ squeeze/unsqueeze and reductions.
+ opt_einsum :
+ `opt_einsum <https://optimized-einsum.readthedocs.io/en/stable/>`_
+ optimizes contraction order for einsum-like expressions
+ in backend-agnostic manner.
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ The Einstein summation convention can be used to compute
+ many multi-dimensional, linear algebraic array operations. `einsum`
+ provides a succinct way of representing these.
+
+ A non-exhaustive list of these operations,
+ which can be computed by `einsum`, is shown below along with examples:
+
+ * Trace of an array, :py:func:`numpy.trace`.
+ * Return a diagonal, :py:func:`numpy.diag`.
+ * Array axis summations, :py:func:`numpy.sum`.
+ * Transpositions and permutations, :py:func:`numpy.transpose`.
+ * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
+ * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
+ * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
+ * Tensor contractions, :py:func:`numpy.tensordot`.
+ * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
+
+ The subscripts string is a comma-separated list of subscript labels,
+ where each label refers to a dimension of the corresponding operand.
+ Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
+ is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
+ appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
+ view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
+ describes traditional matrix multiplication and is equivalent to
+ :py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
+ operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
+ to :py:func:`np.trace(a) <numpy.trace>`.
+
+ In *implicit mode*, the chosen subscripts are important
+ since the axes of the output are reordered alphabetically. This
+ means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
+ ``np.einsum('ji', a)`` takes its transpose. Additionally,
+ ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
+ ``np.einsum('ij,jh', a, b)`` returns the transpose of the
+ multiplication since subscript 'h' precedes subscript 'i'.
+
+ In *explicit mode* the output can be directly controlled by
+ specifying output subscript labels. This requires the
+ identifier '->' as well as the list of output subscript labels.
+ This feature increases the flexibility of the function since
+ summing can be disabled or forced when required. The call
+ ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
+ and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
+ The difference is that `einsum` does not allow broadcasting by default.
+ Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
+ order of the output subscript labels and therefore returns matrix
+ multiplication, unlike the example above in implicit mode.
+
+ To enable and control broadcasting, use an ellipsis. Default
+ NumPy-style broadcasting is done by adding an ellipsis
+ to the left of each term, like ``np.einsum('...ii->...i', a)``.
+ To take the trace along the first and last axes,
+ you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
+ product with the left-most indices instead of rightmost, one can do
+ ``np.einsum('ij...,jk...->ik...', a, b)``.
+
+ When there is only one operand, no axes are summed, and no output
+ parameter is provided, a view into the operand is returned instead
+ of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
+ produces a view (changed in version 1.10.0).
+
+ `einsum` also provides an alternative way to provide the subscripts
+ and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
+ If the output shape is not provided in this format `einsum` will be
+ calculated in implicit mode, otherwise it will be performed explicitly.
+ The examples below have corresponding `einsum` calls with the two
+ parameter methods.
+
+ .. versionadded:: 1.10.0
+
+ Views returned from einsum are now writeable whenever the input array
+ is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
+ have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
+ and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
+ of a 2D array.
+
+ .. versionadded:: 1.12.0
+
+ Added the ``optimize`` argument which will optimize the contraction order
+ of an einsum expression. For a contraction with three or more operands this
+ can greatly increase the computational efficiency at the cost of a larger
+ memory footprint during computation.
+
+ Typically a 'greedy' algorithm is applied which empirical tests have shown
+ returns the optimal path in the majority of cases. In some cases 'optimal'
+ will return the superlative path through a more expensive, exhaustive search.
+ For iterative calculations it may be advisable to calculate the optimal path
+ once and reuse that path by supplying it as an argument. An example is given
+ below.
+
+ See :py:func:`numpy.einsum_path` for more details.
+
+ Examples
+ --------
+ >>> a = np.arange(25).reshape(5,5)
+ >>> b = np.arange(5)
+ >>> c = np.arange(6).reshape(2,3)
+
+ Trace of a matrix:
+
+ >>> np.einsum('ii', a)
+ 60
+ >>> np.einsum(a, [0,0])
+ 60
+ >>> np.trace(a)
+ 60
+
+ Extract the diagonal (requires explicit form):
+
+ >>> np.einsum('ii->i', a)
+ array([ 0, 6, 12, 18, 24])
+ >>> np.einsum(a, [0,0], [0])
+ array([ 0, 6, 12, 18, 24])
+ >>> np.diag(a)
+ array([ 0, 6, 12, 18, 24])
+
+ Sum over an axis (requires explicit form):
+
+ >>> np.einsum('ij->i', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [0,1], [0])
+ array([ 10, 35, 60, 85, 110])
+ >>> np.sum(a, axis=1)
+ array([ 10, 35, 60, 85, 110])
+
+ For higher dimensional arrays summing a single axis can be done with ellipsis:
+
+ >>> np.einsum('...j->...', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [Ellipsis,1], [Ellipsis])
+ array([ 10, 35, 60, 85, 110])
+
+ Compute a matrix transpose, or reorder any number of axes:
+
+ >>> np.einsum('ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.einsum('ij->ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.einsum(c, [1,0])
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+ >>> np.transpose(c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
+
+ Vector inner products:
+
+ >>> np.einsum('i,i', b, b)
+ 30
+ >>> np.einsum(b, [0], b, [0])
+ 30
+ >>> np.inner(b,b)
+ 30
+
+ Matrix vector multiplication:
+
+ >>> np.einsum('ij,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum(a, [0,1], b, [1])
+ array([ 30, 80, 130, 180, 230])
+ >>> np.dot(a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum('...j,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+
+ Broadcasting and scalar multiplication:
+
+ >>> np.einsum('..., ...', 3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.einsum(',ij', 3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.einsum(3, [Ellipsis], c, [Ellipsis])
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+ >>> np.multiply(3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
+
+ Vector outer product:
+
+ >>> np.einsum('i,j', np.arange(2)+1, b)
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+ >>> np.einsum(np.arange(2)+1, [0], b, [1])
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+ >>> np.outer(np.arange(2)+1, b)
+ array([[0, 1, 2, 3, 4],
+ [0, 2, 4, 6, 8]])
+
+ Tensor contraction:
+
+ >>> a = np.arange(60.).reshape(3,4,5)
+ >>> b = np.arange(24.).reshape(4,3,2)
+ >>> np.einsum('ijk,jil->kl', a, b)
+ array([[4400., 4730.],
+ [4532., 4874.],
+ [4664., 5018.],
+ [4796., 5162.],
+ [4928., 5306.]])
+ >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
+ array([[4400., 4730.],
+ [4532., 4874.],
+ [4664., 5018.],
+ [4796., 5162.],
+ [4928., 5306.]])
+ >>> np.tensordot(a,b, axes=([1,0],[0,1]))
+ array([[4400., 4730.],
+ [4532., 4874.],
+ [4664., 5018.],
+ [4796., 5162.],
+ [4928., 5306.]])
+
+ Writeable returned arrays (since version 1.10.0):
+
+ >>> a = np.zeros((3, 3))
+ >>> np.einsum('ii->i', a)[:] = 1
+ >>> a
+ array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
+
+ Example of ellipsis use:
+
+ >>> a = np.arange(6).reshape((3,2))
+ >>> b = np.arange(12).reshape((4,3))
+ >>> np.einsum('ki,jk->ij', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+ >>> np.einsum('ki,...k->i...', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+ >>> np.einsum('k...,jk', a, b)
+ array([[10, 28, 46, 64],
+ [13, 40, 67, 94]])
+
+ Chained array operations. For more complicated contractions, speed ups
+ might be achieved by repeatedly computing a 'greedy' path or pre-computing the
+ 'optimal' path and repeatedly applying it, using an
+ `einsum_path` insertion (since version 1.12.0). Performance improvements can be
+ particularly significant with larger arrays:
+
+ >>> a = np.ones(64).reshape(2,4,8)
+
+ Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
+
+ >>> for iteration in range(500):
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
+
+ Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
+
+ >>> for iteration in range(500):
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
+
+ Greedy `einsum` (faster optimal path approximation): ~160ms
+
+ >>> for iteration in range(500):
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
+
+ Optimal `einsum` (best usage pattern in some use cases): ~110ms
+
+ >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0]
+ >>> for iteration in range(500):
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
+
+ """
+ # Special handling if out is specified
+ specified_out = out is not None
+
+ # If no optimization, run pure einsum
+ if optimize is False:
+ if specified_out:
+ kwargs['out'] = out
+ return c_einsum(*operands, **kwargs)
+
+ # Check the kwargs to avoid a more cryptic error later, without having to
+ # repeat default values here
+ valid_einsum_kwargs = ['dtype', 'order', 'casting']
+ unknown_kwargs = [k for (k, v) in kwargs.items() if
+ k not in valid_einsum_kwargs]
+ if len(unknown_kwargs):
+ raise TypeError("Did not understand the following kwargs: %s"
+ % unknown_kwargs)
+
+ # Build the contraction list and operand
+ operands, contraction_list = einsum_path(*operands, optimize=optimize,
+ einsum_call=True)
+
+ # Handle order kwarg for output array, c_einsum allows mixed case
+ output_order = kwargs.pop('order', 'K')
+ if output_order.upper() == 'A':
+ if all(arr.flags.f_contiguous for arr in operands):
+ output_order = 'F'
+ else:
+ output_order = 'C'
+
+ # Start contraction loop
+ for num, contraction in enumerate(contraction_list):
+ inds, idx_rm, einsum_str, remaining, blas = contraction
+ tmp_operands = [operands.pop(x) for x in inds]
+
+ # Do we need to deal with the output?
+ handle_out = specified_out and ((num + 1) == len(contraction_list))
+
+ # Call tensordot if still possible
+ if blas:
+ # Checks have already been handled
+ input_str, results_index = einsum_str.split('->')
+ input_left, input_right = input_str.split(',')
+
+ tensor_result = input_left + input_right
+ for s in idx_rm:
+ tensor_result = tensor_result.replace(s, "")
+
+ # Find indices to contract over
+ left_pos, right_pos = [], []
+ for s in sorted(idx_rm):
+ left_pos.append(input_left.find(s))
+ right_pos.append(input_right.find(s))
+
+ # Contract!
+ new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos)))
+
+ # Build a new view if needed
+ if (tensor_result != results_index) or handle_out:
+ if handle_out:
+ kwargs["out"] = out
+ new_view = c_einsum(tensor_result + '->' + results_index, new_view, **kwargs)
+
+ # Call einsum
+ else:
+ # If out was specified
+ if handle_out:
+ kwargs["out"] = out
+
+ # Do the contraction
+ new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)
+
+ # Append new items and dereference what we can
+ operands.append(new_view)
+ del tmp_operands, new_view
+
+ if specified_out:
+ return out
+ else:
+ return asanyarray(operands[0], order=output_order)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/einsumfunc.pyi b/venv/lib/python3.9/site-packages/numpy/core/einsumfunc.pyi
new file mode 100644
index 00000000..c811a578
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/einsumfunc.pyi
@@ -0,0 +1,144 @@
+from collections.abc import Sequence
+from typing import TypeVar, Any, overload, Union, Literal
+
+from numpy import (
+ ndarray,
+ dtype,
+ bool_,
+ unsignedinteger,
+ signedinteger,
+ floating,
+ complexfloating,
+ number,
+ _OrderKACF,
+)
+from numpy._typing import (
+ _ArrayLikeBool_co,
+ _ArrayLikeUInt_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _DTypeLikeBool,
+ _DTypeLikeUInt,
+ _DTypeLikeInt,
+ _DTypeLikeFloat,
+ _DTypeLikeComplex,
+ _DTypeLikeComplex_co,
+)
+
+_ArrayType = TypeVar(
+ "_ArrayType",
+ bound=ndarray[Any, dtype[Union[bool_, number[Any]]]],
+)
+
+_OptimizeKind = None | bool | Literal["greedy", "optimal"] | Sequence[Any]
+_CastingSafe = Literal["no", "equiv", "safe", "same_kind"]
+_CastingUnsafe = Literal["unsafe"]
+
+__all__: list[str]
+
+# TODO: Properly handle the `casting`-based combinatorics
+# TODO: We need to evaluate the content `__subscripts` in order
+# to identify whether or an array or scalar is returned. At a cursory
+# glance this seems like something that can quite easily be done with
+# a mypy plugin.
+# Something like `is_scalar = bool(__subscripts.partition("->")[-1])`
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeBool_co,
+ out: None = ...,
+ dtype: None | _DTypeLikeBool = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeUInt_co,
+ out: None = ...,
+ dtype: None | _DTypeLikeUInt = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeInt_co,
+ out: None = ...,
+ dtype: None | _DTypeLikeInt = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeFloat_co,
+ out: None = ...,
+ dtype: None | _DTypeLikeFloat = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeComplex_co,
+ out: None = ...,
+ dtype: None | _DTypeLikeComplex = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: Any,
+ casting: _CastingUnsafe,
+ dtype: None | _DTypeLikeComplex_co = ...,
+ out: None = ...,
+ order: _OrderKACF = ...,
+ optimize: _OptimizeKind = ...,
+) -> Any: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeComplex_co,
+ out: _ArrayType,
+ dtype: None | _DTypeLikeComplex_co = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingSafe = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayType: ...
+@overload
+def einsum(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: Any,
+ out: _ArrayType,
+ casting: _CastingUnsafe,
+ dtype: None | _DTypeLikeComplex_co = ...,
+ order: _OrderKACF = ...,
+ optimize: _OptimizeKind = ...,
+) -> _ArrayType: ...
+
+# NOTE: `einsum_call` is a hidden kwarg unavailable for public use.
+# It is therefore excluded from the signatures below.
+# NOTE: In practice the list consists of a `str` (first element)
+# and a variable number of integer tuples.
+def einsum_path(
+ subscripts: str | _ArrayLikeInt_co,
+ /,
+ *operands: _ArrayLikeComplex_co,
+ optimize: _OptimizeKind = ...,
+) -> tuple[list[Any], str]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/core/fromnumeric.py b/venv/lib/python3.9/site-packages/numpy/core/fromnumeric.py
new file mode 100644
index 00000000..ca9d55cb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/fromnumeric.py
@@ -0,0 +1,3813 @@
+"""Module containing non-deprecated functions borrowed from Numeric.
+
+"""
+import functools
+import types
+import warnings
+
+import numpy as np
+from . import multiarray as mu
+from . import overrides
+from . import umath as um
+from . import numerictypes as nt
+from .multiarray import asarray, array, asanyarray, concatenate
+from . import _methods
+
+_dt_ = nt.sctype2char
+
+# functions that are methods
+__all__ = [
+ 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
+ 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
+ 'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
+ 'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
+ 'ravel', 'repeat', 'reshape', 'resize', 'round_',
+ 'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
+ 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
+]
+
+_gentype = types.GeneratorType
+# save away Python sum
+_sum_ = sum
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+# functions that are now methods
+def _wrapit(obj, method, *args, **kwds):
+ try:
+ wrap = obj.__array_wrap__
+ except AttributeError:
+ wrap = None
+ result = getattr(asarray(obj), method)(*args, **kwds)
+ if wrap:
+ if not isinstance(result, mu.ndarray):
+ result = asarray(result)
+ result = wrap(result)
+ return result
+
+
+def _wrapfunc(obj, method, *args, **kwds):
+ bound = getattr(obj, method, None)
+ if bound is None:
+ return _wrapit(obj, method, *args, **kwds)
+
+ try:
+ return bound(*args, **kwds)
+ except TypeError:
+ # A TypeError occurs if the object does have such a method in its
+ # class, but its signature is not identical to that of NumPy's. This
+ # situation has occurred in the case of a downstream library like
+ # 'pandas'.
+ #
+ # Call _wrapit from within the except clause to ensure a potential
+ # exception has a traceback chain.
+ return _wrapit(obj, method, *args, **kwds)
+
+
+def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
+ passkwargs = {k: v for k, v in kwargs.items()
+ if v is not np._NoValue}
+
+ if type(obj) is not mu.ndarray:
+ try:
+ reduction = getattr(obj, method)
+ except AttributeError:
+ pass
+ else:
+ # This branch is needed for reductions like any which don't
+ # support a dtype.
+ if dtype is not None:
+ return reduction(axis=axis, dtype=dtype, out=out, **passkwargs)
+ else:
+ return reduction(axis=axis, out=out, **passkwargs)
+
+ return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
+
+
+def _take_dispatcher(a, indices, axis=None, out=None, mode=None):
+ return (a, out)
+
+
+@array_function_dispatch(_take_dispatcher)
+def take(a, indices, axis=None, out=None, mode='raise'):
+ """
+ Take elements from an array along an axis.
+
+ When axis is not None, this function does the same thing as "fancy"
+ indexing (indexing arrays using arrays); however, it can be easier to use
+ if you need elements along a given axis. A call such as
+ ``np.take(arr, indices, axis=3)`` is equivalent to
+ ``arr[:,:,:,indices,...]``.
+
+ Explained without fancy indexing, this is equivalent to the following use
+ of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
+ indices::
+
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+ Nj = indices.shape
+ for ii in ndindex(Ni):
+ for jj in ndindex(Nj):
+ for kk in ndindex(Nk):
+ out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
+
+ Parameters
+ ----------
+ a : array_like (Ni..., M, Nk...)
+ The source array.
+ indices : array_like (Nj...)
+ The indices of the values to extract.
+
+ .. versionadded:: 1.8.0
+
+ Also allow scalars for indices.
+ axis : int, optional
+ The axis over which to select values. By default, the flattened
+ input array is used.
+ out : ndarray, optional (Ni..., Nj..., Nk...)
+ If provided, the result will be placed in this array. It should
+ be of the appropriate shape and dtype. Note that `out` is always
+ buffered if `mode='raise'`; use other modes for better performance.
+ mode : {'raise', 'wrap', 'clip'}, optional
+ Specifies how out-of-bounds indices will behave.
+
+ * 'raise' -- raise an error (default)
+ * 'wrap' -- wrap around
+ * 'clip' -- clip to the range
+
+ 'clip' mode means that all indices that are too large are replaced
+ by the index that addresses the last element along that axis. Note
+ that this disables indexing with negative numbers.
+
+ Returns
+ -------
+ out : ndarray (Ni..., Nj..., Nk...)
+ The returned array has the same type as `a`.
+
+ See Also
+ --------
+ compress : Take elements using a boolean mask
+ ndarray.take : equivalent method
+ take_along_axis : Take elements by matching the array and the index arrays
+
+ Notes
+ -----
+
+ By eliminating the inner loop in the description above, and using `s_` to
+ build simple slice objects, `take` can be expressed in terms of applying
+ fancy indexing to each 1-d slice::
+
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nj):
+ out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices]
+
+ For this reason, it is equivalent to (but faster than) the following use
+ of `apply_along_axis`::
+
+ out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a)
+
+ Examples
+ --------
+ >>> a = [4, 3, 5, 7, 6, 8]
+ >>> indices = [0, 1, 4]
+ >>> np.take(a, indices)
+ array([4, 3, 6])
+
+ In this example if `a` is an ndarray, "fancy" indexing can be used.
+
+ >>> a = np.array(a)
+ >>> a[indices]
+ array([4, 3, 6])
+
+ If `indices` is not one dimensional, the output also has these dimensions.
+
+ >>> np.take(a, [[0, 1], [2, 3]])
+ array([[4, 3],
+ [5, 7]])
+ """
+ return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
+
+
+def _reshape_dispatcher(a, newshape, order=None):
+ return (a,)
+
+
+# not deprecated --- copy if necessary, view otherwise
+@array_function_dispatch(_reshape_dispatcher)
+def reshape(a, newshape, order='C'):
+ """
+ Gives a new shape to an array without changing its data.
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be reshaped.
+ newshape : int or tuple of ints
+ The new shape should be compatible with the original shape. If
+ an integer, then the result will be a 1-D array of that length.
+ One shape dimension can be -1. In this case, the value is
+ inferred from the length of the array and remaining dimensions.
+ order : {'C', 'F', 'A'}, optional
+ Read the elements of `a` using this index order, and place the
+ elements into the reshaped array using this index order. 'C'
+ means to read / write the elements using C-like index order,
+ with the last axis index changing fastest, back to the first
+ axis index changing slowest. 'F' means to read / write the
+ elements using Fortran-like index order, with the first index
+ changing fastest, and the last index changing slowest. Note that
+ the 'C' and 'F' options take no account of the memory layout of
+ the underlying array, and only refer to the order of indexing.
+ 'A' means to read / write the elements in Fortran-like index
+ order if `a` is Fortran *contiguous* in memory, C-like order
+ otherwise.
+
+ Returns
+ -------
+ reshaped_array : ndarray
+ This will be a new view object if possible; otherwise, it will
+ be a copy. Note there is no guarantee of the *memory layout* (C- or
+ Fortran- contiguous) of the returned array.
+
+ See Also
+ --------
+ ndarray.reshape : Equivalent method.
+
+ Notes
+ -----
+ It is not always possible to change the shape of an array without
+ copying the data. If you want an error to be raised when the data is copied,
+ you should assign the new shape to the shape attribute of the array::
+
+ >>> a = np.zeros((10, 2))
+
+ # A transpose makes the array non-contiguous
+ >>> b = a.T
+
+ # Taking a view makes it possible to modify the shape without modifying
+ # the initial object.
+ >>> c = b.view()
+ >>> c.shape = (20)
+ Traceback (most recent call last):
+ ...
+ AttributeError: Incompatible shape for in-place modification. Use
+ `.reshape()` to make a copy with the desired shape.
+
+ The `order` keyword gives the index ordering both for *fetching* the values
+ from `a`, and then *placing* the values into the output array.
+ For example, let's say you have an array:
+
+ >>> a = np.arange(6).reshape((3, 2))
+ >>> a
+ array([[0, 1],
+ [2, 3],
+ [4, 5]])
+
+ You can think of reshaping as first raveling the array (using the given
+ index order), then inserting the elements from the raveled array into the
+ new array using the same kind of index ordering as was used for the
+ raveling.
+
+ >>> np.reshape(a, (2, 3)) # C-like index ordering
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
+ array([[0, 4, 3],
+ [2, 1, 5]])
+ >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
+ array([[0, 4, 3],
+ [2, 1, 5]])
+
+ Examples
+ --------
+ >>> a = np.array([[1,2,3], [4,5,6]])
+ >>> np.reshape(a, 6)
+ array([1, 2, 3, 4, 5, 6])
+ >>> np.reshape(a, 6, order='F')
+ array([1, 4, 2, 5, 3, 6])
+
+ >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
+ array([[1, 2],
+ [3, 4],
+ [5, 6]])
+ """
+ return _wrapfunc(a, 'reshape', newshape, order=order)
+
+
+def _choose_dispatcher(a, choices, out=None, mode=None):
+ yield a
+ yield from choices
+ yield out
+
+
+@array_function_dispatch(_choose_dispatcher)
+def choose(a, choices, out=None, mode='raise'):
+ """
+ Construct an array from an index array and a list of arrays to choose from.
+
+ First of all, if confused or uncertain, definitely look at the Examples -
+ in its full generality, this function is less simple than it might
+ seem from the following code description (below ndi =
+ `numpy.lib.index_tricks`):
+
+ ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
+
+ But this omits some subtleties. Here is a fully general summary:
+
+ Given an "index" array (`a`) of integers and a sequence of ``n`` arrays
+ (`choices`), `a` and each choice array are first broadcast, as necessary,
+ to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
+ 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
+ for each ``i``. Then, a new array with shape ``Ba.shape`` is created as
+ follows:
+
+ * if ``mode='raise'`` (the default), then, first of all, each element of
+ ``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose
+ that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)``
+ position in ``Ba`` - then the value at the same position in the new array
+ is the value in ``Bchoices[i]`` at that same position;
+
+ * if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed)
+ integer; modular arithmetic is used to map integers outside the range
+ `[0, n-1]` back into that range; and then the new array is constructed
+ as above;
+
+ * if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed)
+ integer; negative integers are mapped to 0; values greater than ``n-1``
+ are mapped to ``n-1``; and then the new array is constructed as above.
+
+ Parameters
+ ----------
+ a : int array
+ This array must contain integers in ``[0, n-1]``, where ``n`` is the
+ number of choices, unless ``mode=wrap`` or ``mode=clip``, in which
+ cases any integers are permissible.
+ choices : sequence of arrays
+ Choice arrays. `a` and all of the choices must be broadcastable to the
+ same shape. If `choices` is itself an array (not recommended), then
+ its outermost dimension (i.e., the one corresponding to
+ ``choices.shape[0]``) is taken as defining the "sequence".
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype. Note that `out` is always
+ buffered if ``mode='raise'``; use other modes for better performance.
+ mode : {'raise' (default), 'wrap', 'clip'}, optional
+ Specifies how indices outside ``[0, n-1]`` will be treated:
+
+ * 'raise' : an exception is raised
+ * 'wrap' : value becomes value mod ``n``
+ * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
+
+ Returns
+ -------
+ merged_array : array
+ The merged result.
+
+ Raises
+ ------
+ ValueError: shape mismatch
+ If `a` and each choice array are not all broadcastable to the same
+ shape.
+
+ See Also
+ --------
+ ndarray.choose : equivalent method
+ numpy.take_along_axis : Preferable if `choices` is an array
+
+ Notes
+ -----
+ To reduce the chance of misinterpretation, even though the following
+ "abuse" is nominally supported, `choices` should neither be, nor be
+ thought of as, a single array, i.e., the outermost sequence-like container
+ should be either a list or a tuple.
+
+ Examples
+ --------
+
+ >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
+ ... [20, 21, 22, 23], [30, 31, 32, 33]]
+ >>> np.choose([2, 3, 1, 0], choices
+ ... # the first element of the result will be the first element of the
+ ... # third (2+1) "array" in choices, namely, 20; the second element
+ ... # will be the second element of the fourth (3+1) choice array, i.e.,
+ ... # 31, etc.
+ ... )
+ array([20, 31, 12, 3])
+ >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
+ array([20, 31, 12, 3])
+ >>> # because there are 4 choice arrays
+ >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
+ array([20, 1, 12, 3])
+ >>> # i.e., 0
+
+ A couple examples illustrating how choose broadcasts:
+
+ >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
+ >>> choices = [-10, 10]
+ >>> np.choose(a, choices)
+ array([[ 10, -10, 10],
+ [-10, 10, -10],
+ [ 10, -10, 10]])
+
+ >>> # With thanks to Anne Archibald
+ >>> a = np.array([0, 1]).reshape((2,1,1))
+ >>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
+ >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
+ >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
+ array([[[ 1, 1, 1, 1, 1],
+ [ 2, 2, 2, 2, 2],
+ [ 3, 3, 3, 3, 3]],
+ [[-1, -2, -3, -4, -5],
+ [-1, -2, -3, -4, -5],
+ [-1, -2, -3, -4, -5]]])
+
+ """
+ return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
+
+
+def _repeat_dispatcher(a, repeats, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_repeat_dispatcher)
+def repeat(a, repeats, axis=None):
+ """
+ Repeat elements of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ repeats : int or array of ints
+ The number of repetitions for each element. `repeats` is broadcasted
+ to fit the shape of the given axis.
+ axis : int, optional
+ The axis along which to repeat values. By default, use the
+ flattened input array, and return a flat output array.
+
+ Returns
+ -------
+ repeated_array : ndarray
+ Output array which has the same shape as `a`, except along
+ the given axis.
+
+ See Also
+ --------
+ tile : Tile an array.
+ unique : Find the unique elements of an array.
+
+ Examples
+ --------
+ >>> np.repeat(3, 4)
+ array([3, 3, 3, 3])
+ >>> x = np.array([[1,2],[3,4]])
+ >>> np.repeat(x, 2)
+ array([1, 1, 2, 2, 3, 3, 4, 4])
+ >>> np.repeat(x, 3, axis=1)
+ array([[1, 1, 1, 2, 2, 2],
+ [3, 3, 3, 4, 4, 4]])
+ >>> np.repeat(x, [1, 2], axis=0)
+ array([[1, 2],
+ [3, 4],
+ [3, 4]])
+
+ """
+ return _wrapfunc(a, 'repeat', repeats, axis=axis)
+
+
+def _put_dispatcher(a, ind, v, mode=None):
+ return (a, ind, v)
+
+
+@array_function_dispatch(_put_dispatcher)
+def put(a, ind, v, mode='raise'):
+ """
+ Replaces specified elements of an array with given values.
+
+ The indexing works on the flattened target array. `put` is roughly
+ equivalent to:
+
+ ::
+
+ a.flat[ind] = v
+
+ Parameters
+ ----------
+ a : ndarray
+ Target array.
+ ind : array_like
+ Target indices, interpreted as integers.
+ v : array_like
+ Values to place in `a` at target indices. If `v` is shorter than
+ `ind` it will be repeated as necessary.
+ mode : {'raise', 'wrap', 'clip'}, optional
+ Specifies how out-of-bounds indices will behave.
+
+ * 'raise' -- raise an error (default)
+ * 'wrap' -- wrap around
+ * 'clip' -- clip to the range
+
+ 'clip' mode means that all indices that are too large are replaced
+ by the index that addresses the last element along that axis. Note
+ that this disables indexing with negative numbers. In 'raise' mode,
+ if an exception occurs the target array may still be modified.
+
+ See Also
+ --------
+ putmask, place
+ put_along_axis : Put elements by matching the array and the index arrays
+
+ Examples
+ --------
+ >>> a = np.arange(5)
+ >>> np.put(a, [0, 2], [-44, -55])
+ >>> a
+ array([-44, 1, -55, 3, 4])
+
+ >>> a = np.arange(5)
+ >>> np.put(a, 22, -5, mode='clip')
+ >>> a
+ array([ 0, 1, 2, 3, -5])
+
+ """
+ try:
+ put = a.put
+ except AttributeError as e:
+ raise TypeError("argument 1 must be numpy.ndarray, "
+ "not {name}".format(name=type(a).__name__)) from e
+
+ return put(ind, v, mode=mode)
+
+
+def _swapaxes_dispatcher(a, axis1, axis2):
+ return (a,)
+
+
+@array_function_dispatch(_swapaxes_dispatcher)
+def swapaxes(a, axis1, axis2):
+ """
+ Interchange two axes of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis1 : int
+ First axis.
+ axis2 : int
+ Second axis.
+
+ Returns
+ -------
+ a_swapped : ndarray
+ For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
+ returned; otherwise a new array is created. For earlier NumPy
+ versions a view of `a` is returned only if the order of the
+ axes is changed, otherwise the input array is returned.
+
+ Examples
+ --------
+ >>> x = np.array([[1,2,3]])
+ >>> np.swapaxes(x,0,1)
+ array([[1],
+ [2],
+ [3]])
+
+ >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
+ >>> x
+ array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ >>> np.swapaxes(x,0,2)
+ array([[[0, 4],
+ [2, 6]],
+ [[1, 5],
+ [3, 7]]])
+
+ """
+ return _wrapfunc(a, 'swapaxes', axis1, axis2)
+
+
+def _transpose_dispatcher(a, axes=None):
+ return (a,)
+
+
+@array_function_dispatch(_transpose_dispatcher)
+def transpose(a, axes=None):
+ """
+ Returns an array with axes transposed.
+
+ For a 1-D array, this returns an unchanged view of the original array, as a
+ transposed vector is simply the same vector.
+ To convert a 1-D array into a 2-D column vector, an additional dimension
+ must be added, e.g., ``np.atleast2d(a).T`` achieves this, as does
+ ``a[:, np.newaxis]``.
+ For a 2-D array, this is the standard matrix transpose.
+ For an n-D array, if axes are given, their order indicates how the
+ axes are permuted (see Examples). If axes are not provided, then
+ ``transpose(a).shape == a.shape[::-1]``.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axes : tuple or list of ints, optional
+ If specified, it must be a tuple or list which contains a permutation
+ of [0,1,...,N-1] where N is the number of axes of `a`. The `i`'th axis
+ of the returned array will correspond to the axis numbered ``axes[i]``
+ of the input. If not specified, defaults to ``range(a.ndim)[::-1]``,
+ which reverses the order of the axes.
+
+ Returns
+ -------
+ p : ndarray
+ `a` with its axes permuted. A view is returned whenever possible.
+
+ See Also
+ --------
+ ndarray.transpose : Equivalent method.
+ moveaxis : Move axes of an array to new positions.
+ argsort : Return the indices that would sort an array.
+
+ Notes
+ -----
+ Use ``transpose(a, argsort(axes))`` to invert the transposition of tensors
+ when using the `axes` keyword argument.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> a
+ array([[1, 2],
+ [3, 4]])
+ >>> np.transpose(a)
+ array([[1, 3],
+ [2, 4]])
+
+ >>> a = np.array([1, 2, 3, 4])
+ >>> a
+ array([1, 2, 3, 4])
+ >>> np.transpose(a)
+ array([1, 2, 3, 4])
+
+ >>> a = np.ones((1, 2, 3))
+ >>> np.transpose(a, (1, 0, 2)).shape
+ (2, 1, 3)
+
+ >>> a = np.ones((2, 3, 4, 5))
+ >>> np.transpose(a).shape
+ (5, 4, 3, 2)
+
+ """
+ return _wrapfunc(a, 'transpose', axes)
+
+
+def _partition_dispatcher(a, kth, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_partition_dispatcher)
+def partition(a, kth, axis=-1, kind='introselect', order=None):
+ """
+ Return a partitioned copy of an array.
+
+ Creates a copy of the array with its elements rearranged in such a
+ way that the value of the element in k-th position is in the position
+ the value would be in a sorted array. In the partitioned array, all
+ elements before the k-th element are less than or equal to that
+ element, and all the elements after the k-th element are greater than
+ or equal to that element. The ordering of the elements in the two
+ partitions is undefined.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be sorted.
+ kth : int or sequence of ints
+ Element index to partition by. The k-th value of the element
+ will be in its final sorted position and all smaller elements
+ will be moved before it and all equal or greater elements behind
+ it. The order of all elements in the partitions is undefined. If
+ provided with a sequence of k-th it will partition all elements
+ indexed by k-th of them into their sorted position at once.
+
+ .. deprecated:: 1.22.0
+ Passing booleans as index is deprecated.
+ axis : int or None, optional
+ Axis along which to sort. If None, the array is flattened before
+ sorting. The default is -1, which sorts along the last axis.
+ kind : {'introselect'}, optional
+ Selection algorithm. Default is 'introselect'.
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument
+ specifies which fields to compare first, second, etc. A single
+ field can be specified as a string. Not all fields need be
+ specified, but unspecified fields will still be used, in the
+ order in which they come up in the dtype, to break ties.
+
+ Returns
+ -------
+ partitioned_array : ndarray
+ Array of the same type and shape as `a`.
+
+ See Also
+ --------
+ ndarray.partition : Method to sort an array in-place.
+ argpartition : Indirect partition.
+ sort : Full sorting
+
+ Notes
+ -----
+ The various selection algorithms are characterized by their average
+ speed, worst case performance, work space size, and whether they are
+ stable. A stable sort keeps items with the same key in the same
+ relative order. The available algorithms have the following
+ properties:
+
+ ================= ======= ============= ============ =======
+ kind speed worst case work space stable
+ ================= ======= ============= ============ =======
+ 'introselect' 1 O(n) 0 no
+ ================= ======= ============= ============ =======
+
+ All the partition algorithms make temporary copies of the data when
+ partitioning along any but the last axis. Consequently,
+ partitioning along the last axis is faster and uses less space than
+ partitioning along any other axis.
+
+ The sort order for complex numbers is lexicographic. If both the
+ real and imaginary parts are non-nan then the order is determined by
+ the real parts except when they are equal, in which case the order
+ is determined by the imaginary parts.
+
+ Examples
+ --------
+ >>> a = np.array([7, 1, 7, 7, 1, 5, 7, 2, 3, 2, 6, 2, 3, 0])
+ >>> p = np.partition(a, 4)
+ >>> p
+ array([0, 1, 2, 1, 2, 5, 2, 3, 3, 6, 7, 7, 7, 7])
+
+ ``p[4]`` is 2; all elements in ``p[:4]`` are less than or equal
+ to ``p[4]``, and all elements in ``p[5:]`` are greater than or
+ equal to ``p[4]``. The partition is::
+
+ [0, 1, 2, 1], [2], [5, 2, 3, 3, 6, 7, 7, 7, 7]
+
+ The next example shows the use of multiple values passed to `kth`.
+
+ >>> p2 = np.partition(a, (4, 8))
+ >>> p2
+ array([0, 1, 2, 1, 2, 3, 3, 2, 5, 6, 7, 7, 7, 7])
+
+ ``p2[4]`` is 2 and ``p2[8]`` is 5. All elements in ``p2[:4]``
+ are less than or equal to ``p2[4]``, all elements in ``p2[5:8]``
+ are greater than or equal to ``p2[4]`` and less than or equal to
+ ``p2[8]``, and all elements in ``p2[9:]`` are greater than or
+ equal to ``p2[8]``. The partition is::
+
+ [0, 1, 2, 1], [2], [3, 3, 2], [5], [6, 7, 7, 7, 7]
+ """
+ if axis is None:
+ # flatten returns (1, N) for np.matrix, so always use the last axis
+ a = asanyarray(a).flatten()
+ axis = -1
+ else:
+ a = asanyarray(a).copy(order="K")
+ a.partition(kth, axis=axis, kind=kind, order=order)
+ return a
+
+
+def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_argpartition_dispatcher)
+def argpartition(a, kth, axis=-1, kind='introselect', order=None):
+ """
+ Perform an indirect partition along the given axis using the
+ algorithm specified by the `kind` keyword. It returns an array of
+ indices of the same shape as `a` that index data along the given
+ axis in partitioned order.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ Array to sort.
+ kth : int or sequence of ints
+ Element index to partition by. The k-th element will be in its
+ final sorted position and all smaller elements will be moved
+ before it and all larger elements behind it. The order of all
+ elements in the partitions is undefined. If provided with a
+ sequence of k-th it will partition all of them into their sorted
+ position at once.
+
+ .. deprecated:: 1.22.0
+ Passing booleans as index is deprecated.
+ axis : int or None, optional
+ Axis along which to sort. The default is -1 (the last axis). If
+ None, the flattened array is used.
+ kind : {'introselect'}, optional
+ Selection algorithm. Default is 'introselect'
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument
+ specifies which fields to compare first, second, etc. A single
+ field can be specified as a string, and not all fields need be
+ specified, but unspecified fields will still be used, in the
+ order in which they come up in the dtype, to break ties.
+
+ Returns
+ -------
+ index_array : ndarray, int
+ Array of indices that partition `a` along the specified axis.
+ If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.
+ More generally, ``np.take_along_axis(a, index_array, axis=axis)``
+ always yields the partitioned `a`, irrespective of dimensionality.
+
+ See Also
+ --------
+ partition : Describes partition algorithms used.
+ ndarray.partition : Inplace partition.
+ argsort : Full indirect sort.
+ take_along_axis : Apply ``index_array`` from argpartition
+ to an array as if by calling partition.
+
+ Notes
+ -----
+ See `partition` for notes on the different selection algorithms.
+
+ Examples
+ --------
+ One dimensional array:
+
+ >>> x = np.array([3, 4, 2, 1])
+ >>> x[np.argpartition(x, 3)]
+ array([2, 1, 3, 4])
+ >>> x[np.argpartition(x, (1, 3))]
+ array([1, 2, 3, 4])
+
+ >>> x = [3, 4, 2, 1]
+ >>> np.array(x)[np.argpartition(x, 3)]
+ array([2, 1, 3, 4])
+
+ Multi-dimensional array:
+
+ >>> x = np.array([[3, 4, 2], [1, 3, 1]])
+ >>> index_array = np.argpartition(x, kth=1, axis=-1)
+ >>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1)
+ array([[2, 3, 4],
+ [1, 1, 3]])
+
+ """
+ return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
+
+
+def _sort_dispatcher(a, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_sort_dispatcher)
+def sort(a, axis=-1, kind=None, order=None):
+ """
+ Return a sorted copy of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be sorted.
+ axis : int or None, optional
+ Axis along which to sort. If None, the array is flattened before
+ sorting. The default is -1, which sorts along the last axis.
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
+ Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
+ and 'mergesort' use timsort or radix sort under the covers and, in general,
+ the actual implementation will vary with data type. The 'mergesort' option
+ is retained for backwards compatibility.
+
+ .. versionchanged:: 1.15.0.
+ The 'stable' option was added.
+
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument specifies
+ which fields to compare first, second, etc. A single field can
+ be specified as a string, and not all fields need be specified,
+ but unspecified fields will still be used, in the order in which
+ they come up in the dtype, to break ties.
+
+ Returns
+ -------
+ sorted_array : ndarray
+ Array of the same type and shape as `a`.
+
+ See Also
+ --------
+ ndarray.sort : Method to sort an array in-place.
+ argsort : Indirect sort.
+ lexsort : Indirect stable sort on multiple keys.
+ searchsorted : Find elements in a sorted array.
+ partition : Partial sort.
+
+ Notes
+ -----
+ The various sorting algorithms are characterized by their average speed,
+ worst case performance, work space size, and whether they are stable. A
+ stable sort keeps items with the same key in the same relative
+ order. The four algorithms implemented in NumPy have the following
+ properties:
+
+ =========== ======= ============= ============ ========
+ kind speed worst case work space stable
+ =========== ======= ============= ============ ========
+ 'quicksort' 1 O(n^2) 0 no
+ 'heapsort' 3 O(n*log(n)) 0 no
+ 'mergesort' 2 O(n*log(n)) ~n/2 yes
+ 'timsort' 2 O(n*log(n)) ~n/2 yes
+ =========== ======= ============= ============ ========
+
+ .. note:: The datatype determines which of 'mergesort' or 'timsort'
+ is actually used, even if 'mergesort' is specified. User selection
+ at a finer scale is not currently available.
+
+ All the sort algorithms make temporary copies of the data when
+ sorting along any but the last axis. Consequently, sorting along
+ the last axis is faster and uses less space than sorting along
+ any other axis.
+
+ The sort order for complex numbers is lexicographic. If both the real
+ and imaginary parts are non-nan then the order is determined by the
+ real parts except when they are equal, in which case the order is
+ determined by the imaginary parts.
+
+ Previous to numpy 1.4.0 sorting real and complex arrays containing nan
+ values led to undefined behaviour. In numpy versions >= 1.4.0 nan
+ values are sorted to the end. The extended sort order is:
+
+ * Real: [R, nan]
+ * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
+
+ where R is a non-nan real value. Complex values with the same nan
+ placements are sorted according to the non-nan part if it exists.
+ Non-nan values are sorted as before.
+
+ .. versionadded:: 1.12.0
+
+ quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
+ When sorting does not make enough progress it switches to
+ `heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
+ This implementation makes quicksort O(n*log(n)) in the worst case.
+
+ 'stable' automatically chooses the best stable sorting algorithm
+ for the data type being sorted.
+ It, along with 'mergesort' is currently mapped to
+ `timsort <https://en.wikipedia.org/wiki/Timsort>`_
+ or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
+ depending on the data type.
+ API forward compatibility currently limits the
+ ability to select the implementation and it is hardwired for the different
+ data types.
+
+ .. versionadded:: 1.17.0
+
+ Timsort is added for better performance on already or nearly
+ sorted data. On random data timsort is almost identical to
+ mergesort. It is now used for stable sort while quicksort is still the
+ default sort if none is chosen. For timsort details, refer to
+ `CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
+ 'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
+ O(n) sort instead of O(n log n).
+
+ .. versionchanged:: 1.18.0
+
+ NaT now sorts to the end of arrays for consistency with NaN.
+
+ Examples
+ --------
+ >>> a = np.array([[1,4],[3,1]])
+ >>> np.sort(a) # sort along the last axis
+ array([[1, 4],
+ [1, 3]])
+ >>> np.sort(a, axis=None) # sort the flattened array
+ array([1, 1, 3, 4])
+ >>> np.sort(a, axis=0) # sort along the first axis
+ array([[1, 1],
+ [3, 4]])
+
+ Use the `order` keyword to specify a field to use when sorting a
+ structured array:
+
+ >>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
+ >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
+ ... ('Galahad', 1.7, 38)]
+ >>> a = np.array(values, dtype=dtype) # create a structured array
+ >>> np.sort(a, order='height') # doctest: +SKIP
+ array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
+ ('Lancelot', 1.8999999999999999, 38)],
+ dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
+
+ Sort by age, then height if ages are equal:
+
+ >>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
+ array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
+ ('Arthur', 1.8, 41)],
+ dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
+
+ """
+ if axis is None:
+ # flatten returns (1, N) for np.matrix, so always use the last axis
+ a = asanyarray(a).flatten()
+ axis = -1
+ else:
+ a = asanyarray(a).copy(order="K")
+ a.sort(axis=axis, kind=kind, order=order)
+ return a
+
+
+def _argsort_dispatcher(a, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_argsort_dispatcher)
+def argsort(a, axis=-1, kind=None, order=None):
+ """
+ Returns the indices that would sort an array.
+
+ Perform an indirect sort along the given axis using the algorithm specified
+ by the `kind` keyword. It returns an array of indices of the same shape as
+ `a` that index data along the given axis in sorted order.
+
+ Parameters
+ ----------
+ a : array_like
+ Array to sort.
+ axis : int or None, optional
+ Axis along which to sort. The default is -1 (the last axis). If None,
+ the flattened array is used.
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
+ Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
+ and 'mergesort' use timsort under the covers and, in general, the
+ actual implementation will vary with data type. The 'mergesort' option
+ is retained for backwards compatibility.
+
+ .. versionchanged:: 1.15.0.
+ The 'stable' option was added.
+ order : str or list of str, optional
+ When `a` is an array with fields defined, this argument specifies
+ which fields to compare first, second, etc. A single field can
+ be specified as a string, and not all fields need be specified,
+ but unspecified fields will still be used, in the order in which
+ they come up in the dtype, to break ties.
+
+ Returns
+ -------
+ index_array : ndarray, int
+ Array of indices that sort `a` along the specified `axis`.
+ If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
+ More generally, ``np.take_along_axis(a, index_array, axis=axis)``
+ always yields the sorted `a`, irrespective of dimensionality.
+
+ See Also
+ --------
+ sort : Describes sorting algorithms used.
+ lexsort : Indirect stable sort with multiple keys.
+ ndarray.sort : Inplace sort.
+ argpartition : Indirect partial sort.
+ take_along_axis : Apply ``index_array`` from argsort
+ to an array as if by calling sort.
+
+ Notes
+ -----
+ See `sort` for notes on the different sorting algorithms.
+
+ As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
+ nan values. The enhanced sort order is documented in `sort`.
+
+ Examples
+ --------
+ One dimensional array:
+
+ >>> x = np.array([3, 1, 2])
+ >>> np.argsort(x)
+ array([1, 2, 0])
+
+ Two-dimensional array:
+
+ >>> x = np.array([[0, 3], [2, 2]])
+ >>> x
+ array([[0, 3],
+ [2, 2]])
+
+ >>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
+ >>> ind
+ array([[0, 1],
+ [1, 0]])
+ >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
+ array([[0, 2],
+ [2, 3]])
+
+ >>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
+ >>> ind
+ array([[0, 1],
+ [0, 1]])
+ >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
+ array([[0, 3],
+ [2, 2]])
+
+ Indices of the sorted elements of a N-dimensional array:
+
+ >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
+ >>> ind
+ (array([0, 1, 1, 0]), array([0, 0, 1, 1]))
+ >>> x[ind] # same as np.sort(x, axis=None)
+ array([0, 2, 2, 3])
+
+ Sorting with keys:
+
+ >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
+ >>> x
+ array([(1, 0), (0, 1)],
+ dtype=[('x', '<i4'), ('y', '<i4')])
+
+ >>> np.argsort(x, order=('x','y'))
+ array([1, 0])
+
+ >>> np.argsort(x, order=('y','x'))
+ array([0, 1])
+
+ """
+ return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
+
+
+def _argmax_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue):
+ return (a, out)
+
+
+@array_function_dispatch(_argmax_dispatcher)
+def argmax(a, axis=None, out=None, *, keepdims=np._NoValue):
+ """
+ Returns the indices of the maximum values along an axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ By default, the index is into the flattened array, otherwise
+ along the specified axis.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ index_array : ndarray of ints
+ Array of indices into the array. It has the same shape as `a.shape`
+ with the dimension along `axis` removed. If `keepdims` is set to True,
+ then the size of `axis` will be 1 with the resulting array having same
+ shape as `a.shape`.
+
+ See Also
+ --------
+ ndarray.argmax, argmin
+ amax : The maximum value along a given axis.
+ unravel_index : Convert a flat index into an index tuple.
+ take_along_axis : Apply ``np.expand_dims(index_array, axis)``
+ from argmax to an array as if by calling max.
+
+ Notes
+ -----
+ In case of multiple occurrences of the maximum values, the indices
+ corresponding to the first occurrence are returned.
+
+ Examples
+ --------
+ >>> a = np.arange(6).reshape(2,3) + 10
+ >>> a
+ array([[10, 11, 12],
+ [13, 14, 15]])
+ >>> np.argmax(a)
+ 5
+ >>> np.argmax(a, axis=0)
+ array([1, 1, 1])
+ >>> np.argmax(a, axis=1)
+ array([2, 2])
+
+ Indexes of the maximal elements of a N-dimensional array:
+
+ >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
+ >>> ind
+ (1, 2)
+ >>> a[ind]
+ 15
+
+ >>> b = np.arange(6)
+ >>> b[1] = 5
+ >>> b
+ array([0, 5, 2, 3, 4, 5])
+ >>> np.argmax(b) # Only the first occurrence is returned.
+ 1
+
+ >>> x = np.array([[4,2,3], [1,0,3]])
+ >>> index_array = np.argmax(x, axis=-1)
+ >>> # Same as np.amax(x, axis=-1, keepdims=True)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
+ array([[4],
+ [3]])
+ >>> # Same as np.amax(x, axis=-1)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
+ array([4, 3])
+
+ Setting `keepdims` to `True`,
+
+ >>> x = np.arange(24).reshape((2, 3, 4))
+ >>> res = np.argmax(x, axis=1, keepdims=True)
+ >>> res.shape
+ (2, 1, 4)
+ """
+ kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {}
+ return _wrapfunc(a, 'argmax', axis=axis, out=out, **kwds)
+
+
+def _argmin_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue):
+ return (a, out)
+
+
+@array_function_dispatch(_argmin_dispatcher)
+def argmin(a, axis=None, out=None, *, keepdims=np._NoValue):
+ """
+ Returns the indices of the minimum values along an axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ By default, the index is into the flattened array, otherwise
+ along the specified axis.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ index_array : ndarray of ints
+ Array of indices into the array. It has the same shape as `a.shape`
+ with the dimension along `axis` removed. If `keepdims` is set to True,
+ then the size of `axis` will be 1 with the resulting array having same
+ shape as `a.shape`.
+
+ See Also
+ --------
+ ndarray.argmin, argmax
+ amin : The minimum value along a given axis.
+ unravel_index : Convert a flat index into an index tuple.
+ take_along_axis : Apply ``np.expand_dims(index_array, axis)``
+ from argmin to an array as if by calling min.
+
+ Notes
+ -----
+ In case of multiple occurrences of the minimum values, the indices
+ corresponding to the first occurrence are returned.
+
+ Examples
+ --------
+ >>> a = np.arange(6).reshape(2,3) + 10
+ >>> a
+ array([[10, 11, 12],
+ [13, 14, 15]])
+ >>> np.argmin(a)
+ 0
+ >>> np.argmin(a, axis=0)
+ array([0, 0, 0])
+ >>> np.argmin(a, axis=1)
+ array([0, 0])
+
+ Indices of the minimum elements of a N-dimensional array:
+
+ >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
+ >>> ind
+ (0, 0)
+ >>> a[ind]
+ 10
+
+ >>> b = np.arange(6) + 10
+ >>> b[4] = 10
+ >>> b
+ array([10, 11, 12, 13, 10, 15])
+ >>> np.argmin(b) # Only the first occurrence is returned.
+ 0
+
+ >>> x = np.array([[4,2,3], [1,0,3]])
+ >>> index_array = np.argmin(x, axis=-1)
+ >>> # Same as np.amin(x, axis=-1, keepdims=True)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
+ array([[2],
+ [0]])
+ >>> # Same as np.amax(x, axis=-1)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
+ array([2, 0])
+
+ Setting `keepdims` to `True`,
+
+ >>> x = np.arange(24).reshape((2, 3, 4))
+ >>> res = np.argmin(x, axis=1, keepdims=True)
+ >>> res.shape
+ (2, 1, 4)
+ """
+ kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {}
+ return _wrapfunc(a, 'argmin', axis=axis, out=out, **kwds)
+
+
+def _searchsorted_dispatcher(a, v, side=None, sorter=None):
+ return (a, v, sorter)
+
+
+@array_function_dispatch(_searchsorted_dispatcher)
+def searchsorted(a, v, side='left', sorter=None):
+ """
+ Find indices where elements should be inserted to maintain order.
+
+ Find the indices into a sorted array `a` such that, if the
+ corresponding elements in `v` were inserted before the indices, the
+ order of `a` would be preserved.
+
+ Assuming that `a` is sorted:
+
+ ====== ============================
+ `side` returned index `i` satisfies
+ ====== ============================
+ left ``a[i-1] < v <= a[i]``
+ right ``a[i-1] <= v < a[i]``
+ ====== ============================
+
+ Parameters
+ ----------
+ a : 1-D array_like
+ Input array. If `sorter` is None, then it must be sorted in
+ ascending order, otherwise `sorter` must be an array of indices
+ that sort it.
+ v : array_like
+ Values to insert into `a`.
+ side : {'left', 'right'}, optional
+ If 'left', the index of the first suitable location found is given.
+ If 'right', return the last such index. If there is no suitable
+ index, return either 0 or N (where N is the length of `a`).
+ sorter : 1-D array_like, optional
+ Optional array of integer indices that sort array a into ascending
+ order. They are typically the result of argsort.
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ indices : int or array of ints
+ Array of insertion points with the same shape as `v`,
+ or an integer if `v` is a scalar.
+
+ See Also
+ --------
+ sort : Return a sorted copy of an array.
+ histogram : Produce histogram from 1-D data.
+
+ Notes
+ -----
+ Binary search is used to find the required insertion points.
+
+ As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
+ `nan` values. The enhanced sort order is documented in `sort`.
+
+ This function uses the same algorithm as the builtin python `bisect.bisect_left`
+ (``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
+ which is also vectorized in the `v` argument.
+
+ Examples
+ --------
+ >>> np.searchsorted([1,2,3,4,5], 3)
+ 2
+ >>> np.searchsorted([1,2,3,4,5], 3, side='right')
+ 3
+ >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
+ array([0, 5, 1, 2])
+
+ """
+ return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
+
+
+def _resize_dispatcher(a, new_shape):
+ return (a,)
+
+
+@array_function_dispatch(_resize_dispatcher)
+def resize(a, new_shape):
+ """
+ Return a new array with the specified shape.
+
+ If the new array is larger than the original array, then the new
+ array is filled with repeated copies of `a`. Note that this behavior
+ is different from a.resize(new_shape) which fills with zeros instead
+ of repeated copies of `a`.
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be resized.
+
+ new_shape : int or tuple of int
+ Shape of resized array.
+
+ Returns
+ -------
+ reshaped_array : ndarray
+ The new array is formed from the data in the old array, repeated
+ if necessary to fill out the required number of elements. The
+ data are repeated iterating over the array in C-order.
+
+ See Also
+ --------
+ numpy.reshape : Reshape an array without changing the total size.
+ numpy.pad : Enlarge and pad an array.
+ numpy.repeat : Repeat elements of an array.
+ ndarray.resize : resize an array in-place.
+
+ Notes
+ -----
+ When the total size of the array does not change `~numpy.reshape` should
+ be used. In most other cases either indexing (to reduce the size)
+ or padding (to increase the size) may be a more appropriate solution.
+
+ Warning: This functionality does **not** consider axes separately,
+ i.e. it does not apply interpolation/extrapolation.
+ It fills the return array with the required number of elements, iterating
+ over `a` in C-order, disregarding axes (and cycling back from the start if
+ the new shape is larger). This functionality is therefore not suitable to
+ resize images, or data where each axis represents a separate and distinct
+ entity.
+
+ Examples
+ --------
+ >>> a=np.array([[0,1],[2,3]])
+ >>> np.resize(a,(2,3))
+ array([[0, 1, 2],
+ [3, 0, 1]])
+ >>> np.resize(a,(1,4))
+ array([[0, 1, 2, 3]])
+ >>> np.resize(a,(2,4))
+ array([[0, 1, 2, 3],
+ [0, 1, 2, 3]])
+
+ """
+ if isinstance(new_shape, (int, nt.integer)):
+ new_shape = (new_shape,)
+
+ a = ravel(a)
+
+ new_size = 1
+ for dim_length in new_shape:
+ new_size *= dim_length
+ if dim_length < 0:
+ raise ValueError('all elements of `new_shape` must be non-negative')
+
+ if a.size == 0 or new_size == 0:
+ # First case must zero fill. The second would have repeats == 0.
+ return np.zeros_like(a, shape=new_shape)
+
+ repeats = -(-new_size // a.size) # ceil division
+ a = concatenate((a,) * repeats)[:new_size]
+
+ return reshape(a, new_shape)
+
+
+def _squeeze_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_squeeze_dispatcher)
+def squeeze(a, axis=None):
+ """
+ Remove axes of length one from `a`.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : None or int or tuple of ints, optional
+ .. versionadded:: 1.7.0
+
+ Selects a subset of the entries of length one in the
+ shape. If an axis is selected with shape entry greater than
+ one, an error is raised.
+
+ Returns
+ -------
+ squeezed : ndarray
+ The input array, but with all or a subset of the
+ dimensions of length 1 removed. This is always `a` itself
+ or a view into `a`. Note that if all axes are squeezed,
+ the result is a 0d array and not a scalar.
+
+ Raises
+ ------
+ ValueError
+ If `axis` is not None, and an axis being squeezed is not of length 1
+
+ See Also
+ --------
+ expand_dims : The inverse operation, adding entries of length one
+ reshape : Insert, remove, and combine dimensions, and resize existing ones
+
+ Examples
+ --------
+ >>> x = np.array([[[0], [1], [2]]])
+ >>> x.shape
+ (1, 3, 1)
+ >>> np.squeeze(x).shape
+ (3,)
+ >>> np.squeeze(x, axis=0).shape
+ (3, 1)
+ >>> np.squeeze(x, axis=1).shape
+ Traceback (most recent call last):
+ ...
+ ValueError: cannot select an axis to squeeze out which has size not equal to one
+ >>> np.squeeze(x, axis=2).shape
+ (1, 3)
+ >>> x = np.array([[1234]])
+ >>> x.shape
+ (1, 1)
+ >>> np.squeeze(x)
+ array(1234) # 0d array
+ >>> np.squeeze(x).shape
+ ()
+ >>> np.squeeze(x)[()]
+ 1234
+
+ """
+ try:
+ squeeze = a.squeeze
+ except AttributeError:
+ return _wrapit(a, 'squeeze', axis=axis)
+ if axis is None:
+ return squeeze()
+ else:
+ return squeeze(axis=axis)
+
+
+def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None):
+ return (a,)
+
+
+@array_function_dispatch(_diagonal_dispatcher)
+def diagonal(a, offset=0, axis1=0, axis2=1):
+ """
+ Return specified diagonals.
+
+ If `a` is 2-D, returns the diagonal of `a` with the given offset,
+ i.e., the collection of elements of the form ``a[i, i+offset]``. If
+ `a` has more than two dimensions, then the axes specified by `axis1`
+ and `axis2` are used to determine the 2-D sub-array whose diagonal is
+ returned. The shape of the resulting array can be determined by
+ removing `axis1` and `axis2` and appending an index to the right equal
+ to the size of the resulting diagonals.
+
+ In versions of NumPy prior to 1.7, this function always returned a new,
+ independent array containing a copy of the values in the diagonal.
+
+ In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
+ but depending on this fact is deprecated. Writing to the resulting
+ array continues to work as it used to, but a FutureWarning is issued.
+
+ Starting in NumPy 1.9 it returns a read-only view on the original array.
+ Attempting to write to the resulting array will produce an error.
+
+ In some future release, it will return a read/write view and writing to
+ the returned array will alter your original array. The returned array
+ will have the same type as the input array.
+
+ If you don't write to the array returned by this function, then you can
+ just ignore all of the above.
+
+ If you depend on the current behavior, then we suggest copying the
+ returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
+ of just ``np.diagonal(a)``. This will work with both past and future
+ versions of NumPy.
+
+ Parameters
+ ----------
+ a : array_like
+ Array from which the diagonals are taken.
+ offset : int, optional
+ Offset of the diagonal from the main diagonal. Can be positive or
+ negative. Defaults to main diagonal (0).
+ axis1 : int, optional
+ Axis to be used as the first axis of the 2-D sub-arrays from which
+ the diagonals should be taken. Defaults to first axis (0).
+ axis2 : int, optional
+ Axis to be used as the second axis of the 2-D sub-arrays from
+ which the diagonals should be taken. Defaults to second axis (1).
+
+ Returns
+ -------
+ array_of_diagonals : ndarray
+ If `a` is 2-D, then a 1-D array containing the diagonal and of the
+ same type as `a` is returned unless `a` is a `matrix`, in which case
+ a 1-D array rather than a (2-D) `matrix` is returned in order to
+ maintain backward compatibility.
+
+ If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
+ are removed, and a new axis inserted at the end corresponding to the
+ diagonal.
+
+ Raises
+ ------
+ ValueError
+ If the dimension of `a` is less than 2.
+
+ See Also
+ --------
+ diag : MATLAB work-a-like for 1-D and 2-D arrays.
+ diagflat : Create diagonal arrays.
+ trace : Sum along diagonals.
+
+ Examples
+ --------
+ >>> a = np.arange(4).reshape(2,2)
+ >>> a
+ array([[0, 1],
+ [2, 3]])
+ >>> a.diagonal()
+ array([0, 3])
+ >>> a.diagonal(1)
+ array([1])
+
+ A 3-D example:
+
+ >>> a = np.arange(8).reshape(2,2,2); a
+ array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+ >>> a.diagonal(0, # Main diagonals of two arrays created by skipping
+ ... 0, # across the outer(left)-most axis last and
+ ... 1) # the "middle" (row) axis first.
+ array([[0, 6],
+ [1, 7]])
+
+ The sub-arrays whose main diagonals we just obtained; note that each
+ corresponds to fixing the right-most (column) axis, and that the
+ diagonals are "packed" in rows.
+
+ >>> a[:,:,0] # main diagonal is [0 6]
+ array([[0, 2],
+ [4, 6]])
+ >>> a[:,:,1] # main diagonal is [1 7]
+ array([[1, 3],
+ [5, 7]])
+
+ The anti-diagonal can be obtained by reversing the order of elements
+ using either `numpy.flipud` or `numpy.fliplr`.
+
+ >>> a = np.arange(9).reshape(3, 3)
+ >>> a
+ array([[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]])
+ >>> np.fliplr(a).diagonal() # Horizontal flip
+ array([2, 4, 6])
+ >>> np.flipud(a).diagonal() # Vertical flip
+ array([6, 4, 2])
+
+ Note that the order in which the diagonal is retrieved varies depending
+ on the flip function.
+ """
+ if isinstance(a, np.matrix):
+ # Make diagonal of matrix 1-D to preserve backward compatibility.
+ return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
+ else:
+ return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
+
+
+def _trace_dispatcher(
+ a, offset=None, axis1=None, axis2=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_trace_dispatcher)
+def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
+ """
+ Return the sum along diagonals of the array.
+
+ If `a` is 2-D, the sum along its diagonal with the given offset
+ is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
+
+ If `a` has more than two dimensions, then the axes specified by axis1 and
+ axis2 are used to determine the 2-D sub-arrays whose traces are returned.
+ The shape of the resulting array is the same as that of `a` with `axis1`
+ and `axis2` removed.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array, from which the diagonals are taken.
+ offset : int, optional
+ Offset of the diagonal from the main diagonal. Can be both positive
+ and negative. Defaults to 0.
+ axis1, axis2 : int, optional
+ Axes to be used as the first and second axis of the 2-D sub-arrays
+ from which the diagonals should be taken. Defaults are the first two
+ axes of `a`.
+ dtype : dtype, optional
+ Determines the data-type of the returned array and of the accumulator
+ where the elements are summed. If dtype has the value None and `a` is
+ of integer type of precision less than the default integer
+ precision, then the default integer precision is used. Otherwise,
+ the precision is the same as that of `a`.
+ out : ndarray, optional
+ Array into which the output is placed. Its type is preserved and
+ it must be of the right shape to hold the output.
+
+ Returns
+ -------
+ sum_along_diagonals : ndarray
+ If `a` is 2-D, the sum along the diagonal is returned. If `a` has
+ larger dimensions, then an array of sums along diagonals is returned.
+
+ See Also
+ --------
+ diag, diagonal, diagflat
+
+ Examples
+ --------
+ >>> np.trace(np.eye(3))
+ 3.0
+ >>> a = np.arange(8).reshape((2,2,2))
+ >>> np.trace(a)
+ array([6, 8])
+
+ >>> a = np.arange(24).reshape((2,2,2,3))
+ >>> np.trace(a).shape
+ (2, 3)
+
+ """
+ if isinstance(a, np.matrix):
+ # Get trace of matrix via an array to preserve backward compatibility.
+ return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
+ else:
+ return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
+
+
+def _ravel_dispatcher(a, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_ravel_dispatcher)
+def ravel(a, order='C'):
+ """Return a contiguous flattened array.
+
+ A 1-D array, containing the elements of the input, is returned. A copy is
+ made only if needed.
+
+ As of NumPy 1.10, the returned array will have the same type as the input
+ array. (for example, a masked array will be returned for a masked array
+ input)
+
+ Parameters
+ ----------
+ a : array_like
+ Input array. The elements in `a` are read in the order specified by
+ `order`, and packed as a 1-D array.
+ order : {'C','F', 'A', 'K'}, optional
+
+ The elements of `a` are read using this index order. 'C' means
+ to index the elements in row-major, C-style order,
+ with the last axis index changing fastest, back to the first
+ axis index changing slowest. 'F' means to index the elements
+ in column-major, Fortran-style order, with the
+ first index changing fastest, and the last index changing
+ slowest. Note that the 'C' and 'F' options take no account of
+ the memory layout of the underlying array, and only refer to
+ the order of axis indexing. 'A' means to read the elements in
+ Fortran-like index order if `a` is Fortran *contiguous* in
+ memory, C-like order otherwise. 'K' means to read the
+ elements in the order they occur in memory, except for
+ reversing the data when strides are negative. By default, 'C'
+ index order is used.
+
+ Returns
+ -------
+ y : array_like
+ y is an array of the same subtype as `a`, with shape ``(a.size,)``.
+ Note that matrices are special cased for backward compatibility, if `a`
+ is a matrix, then y is a 1-D ndarray.
+
+ See Also
+ --------
+ ndarray.flat : 1-D iterator over an array.
+ ndarray.flatten : 1-D array copy of the elements of an array
+ in row-major order.
+ ndarray.reshape : Change the shape of an array without changing its data.
+
+ Notes
+ -----
+ In row-major, C-style order, in two dimensions, the row index
+ varies the slowest, and the column index the quickest. This can
+ be generalized to multiple dimensions, where row-major order
+ implies that the index along the first axis varies slowest, and
+ the index along the last quickest. The opposite holds for
+ column-major, Fortran-style index ordering.
+
+ When a view is desired in as many cases as possible, ``arr.reshape(-1)``
+ may be preferable.
+
+ Examples
+ --------
+ It is equivalent to ``reshape(-1, order=order)``.
+
+ >>> x = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> np.ravel(x)
+ array([1, 2, 3, 4, 5, 6])
+
+ >>> x.reshape(-1)
+ array([1, 2, 3, 4, 5, 6])
+
+ >>> np.ravel(x, order='F')
+ array([1, 4, 2, 5, 3, 6])
+
+ When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
+
+ >>> np.ravel(x.T)
+ array([1, 4, 2, 5, 3, 6])
+ >>> np.ravel(x.T, order='A')
+ array([1, 2, 3, 4, 5, 6])
+
+ When ``order`` is 'K', it will preserve orderings that are neither 'C'
+ nor 'F', but won't reverse axes:
+
+ >>> a = np.arange(3)[::-1]; a
+ array([2, 1, 0])
+ >>> a.ravel(order='C')
+ array([2, 1, 0])
+ >>> a.ravel(order='K')
+ array([2, 1, 0])
+
+ >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
+ array([[[ 0, 2, 4],
+ [ 1, 3, 5]],
+ [[ 6, 8, 10],
+ [ 7, 9, 11]]])
+ >>> a.ravel(order='C')
+ array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
+ >>> a.ravel(order='K')
+ array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+
+ """
+ if isinstance(a, np.matrix):
+ return asarray(a).ravel(order=order)
+ else:
+ return asanyarray(a).ravel(order=order)
+
+
+def _nonzero_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_nonzero_dispatcher)
+def nonzero(a):
+ """
+ Return the indices of the elements that are non-zero.
+
+ Returns a tuple of arrays, one for each dimension of `a`,
+ containing the indices of the non-zero elements in that
+ dimension. The values in `a` are always tested and returned in
+ row-major, C-style order.
+
+ To group the indices by element, rather than dimension, use `argwhere`,
+ which returns a row for each non-zero element.
+
+ .. note::
+
+ When called on a zero-d array or scalar, ``nonzero(a)`` is treated
+ as ``nonzero(atleast_1d(a))``.
+
+ .. deprecated:: 1.17.0
+
+ Use `atleast_1d` explicitly if this behavior is deliberate.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+
+ Returns
+ -------
+ tuple_of_arrays : tuple
+ Indices of elements that are non-zero.
+
+ See Also
+ --------
+ flatnonzero :
+ Return indices that are non-zero in the flattened version of the input
+ array.
+ ndarray.nonzero :
+ Equivalent ndarray method.
+ count_nonzero :
+ Counts the number of non-zero elements in the input array.
+
+ Notes
+ -----
+ While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
+ recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
+ will correctly handle 0-d arrays.
+
+ Examples
+ --------
+ >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
+ >>> x
+ array([[3, 0, 0],
+ [0, 4, 0],
+ [5, 6, 0]])
+ >>> np.nonzero(x)
+ (array([0, 1, 2, 2]), array([0, 1, 0, 1]))
+
+ >>> x[np.nonzero(x)]
+ array([3, 4, 5, 6])
+ >>> np.transpose(np.nonzero(x))
+ array([[0, 0],
+ [1, 1],
+ [2, 0],
+ [2, 1]])
+
+ A common use for ``nonzero`` is to find the indices of an array, where
+ a condition is True. Given an array `a`, the condition `a` > 3 is a
+ boolean array and since False is interpreted as 0, np.nonzero(a > 3)
+ yields the indices of the `a` where the condition is true.
+
+ >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ >>> a > 3
+ array([[False, False, False],
+ [ True, True, True],
+ [ True, True, True]])
+ >>> np.nonzero(a > 3)
+ (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
+
+ Using this result to index `a` is equivalent to using the mask directly:
+
+ >>> a[np.nonzero(a > 3)]
+ array([4, 5, 6, 7, 8, 9])
+ >>> a[a > 3] # prefer this spelling
+ array([4, 5, 6, 7, 8, 9])
+
+ ``nonzero`` can also be called as a method of the array.
+
+ >>> (a > 3).nonzero()
+ (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
+
+ """
+ return _wrapfunc(a, 'nonzero')
+
+
+def _shape_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_shape_dispatcher)
+def shape(a):
+ """
+ Return the shape of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+
+ Returns
+ -------
+ shape : tuple of ints
+ The elements of the shape tuple give the lengths of the
+ corresponding array dimensions.
+
+ See Also
+ --------
+ len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with
+ ``N>=1``.
+ ndarray.shape : Equivalent array method.
+
+ Examples
+ --------
+ >>> np.shape(np.eye(3))
+ (3, 3)
+ >>> np.shape([[1, 3]])
+ (1, 2)
+ >>> np.shape([0])
+ (1,)
+ >>> np.shape(0)
+ ()
+
+ >>> a = np.array([(1, 2), (3, 4), (5, 6)],
+ ... dtype=[('x', 'i4'), ('y', 'i4')])
+ >>> np.shape(a)
+ (3,)
+ >>> a.shape
+ (3,)
+
+ """
+ try:
+ result = a.shape
+ except AttributeError:
+ result = asarray(a).shape
+ return result
+
+
+def _compress_dispatcher(condition, a, axis=None, out=None):
+ return (condition, a, out)
+
+
+@array_function_dispatch(_compress_dispatcher)
+def compress(condition, a, axis=None, out=None):
+ """
+ Return selected slices of an array along given axis.
+
+ When working along a given axis, a slice along that axis is returned in
+ `output` for each index where `condition` evaluates to True. When
+ working on a 1-D array, `compress` is equivalent to `extract`.
+
+ Parameters
+ ----------
+ condition : 1-D array of bools
+ Array that selects which entries to return. If len(condition)
+ is less than the size of `a` along the given axis, then output is
+ truncated to the length of the condition array.
+ a : array_like
+ Array from which to extract a part.
+ axis : int, optional
+ Axis along which to take slices. If None (default), work on the
+ flattened array.
+ out : ndarray, optional
+ Output array. Its type is preserved and it must be of the right
+ shape to hold the output.
+
+ Returns
+ -------
+ compressed_array : ndarray
+ A copy of `a` without the slices along axis for which `condition`
+ is false.
+
+ See Also
+ --------
+ take, choose, diag, diagonal, select
+ ndarray.compress : Equivalent method in ndarray
+ extract : Equivalent method when working on 1-D arrays
+ :ref:`ufuncs-output-type`
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4], [5, 6]])
+ >>> a
+ array([[1, 2],
+ [3, 4],
+ [5, 6]])
+ >>> np.compress([0, 1], a, axis=0)
+ array([[3, 4]])
+ >>> np.compress([False, True, True], a, axis=0)
+ array([[3, 4],
+ [5, 6]])
+ >>> np.compress([False, True], a, axis=1)
+ array([[2],
+ [4],
+ [6]])
+
+ Working on the flattened array does not return slices along an axis but
+ selects elements.
+
+ >>> np.compress([False, True], a)
+ array([2])
+
+ """
+ return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
+
+
+def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs):
+ return (a, a_min, a_max)
+
+
+@array_function_dispatch(_clip_dispatcher)
+def clip(a, a_min, a_max, out=None, **kwargs):
+ """
+ Clip (limit) the values in an array.
+
+ Given an interval, values outside the interval are clipped to
+ the interval edges. For example, if an interval of ``[0, 1]``
+ is specified, values smaller than 0 become 0, and values larger
+ than 1 become 1.
+
+ Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``.
+
+ No check is performed to ensure ``a_min < a_max``.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing elements to clip.
+ a_min, a_max : array_like or None
+ Minimum and maximum value. If ``None``, clipping is not performed on
+ the corresponding edge. Only one of `a_min` and `a_max` may be
+ ``None``. Both are broadcast against `a`.
+ out : ndarray, optional
+ The results will be placed in this array. It may be the input
+ array for in-place clipping. `out` must be of the right shape
+ to hold the output. Its type is preserved.
+ **kwargs
+ For other keyword-only arguments, see the
+ :ref:`ufunc docs <ufuncs.kwargs>`.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ clipped_array : ndarray
+ An array with the elements of `a`, but where values
+ < `a_min` are replaced with `a_min`, and those > `a_max`
+ with `a_max`.
+
+ See Also
+ --------
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ When `a_min` is greater than `a_max`, `clip` returns an
+ array in which all values are equal to `a_max`,
+ as shown in the second example.
+
+ Examples
+ --------
+ >>> a = np.arange(10)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.clip(a, 1, 8)
+ array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
+ >>> np.clip(a, 8, 1)
+ array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
+ >>> np.clip(a, 3, 6, out=a)
+ array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
+ >>> a
+ array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
+ >>> a = np.arange(10)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
+ array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
+
+ """
+ return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs)
+
+
+def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_sum_dispatcher)
+def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ initial=np._NoValue, where=np._NoValue):
+ """
+ Sum of array elements over a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Elements to sum.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a sum is performed. The default,
+ axis=None, will sum all of the elements of the input array. If
+ axis is negative it counts from the last to the first axis.
+
+ .. versionadded:: 1.7.0
+
+ If axis is a tuple of ints, a sum is performed on all of the axes
+ specified in the tuple instead of a single axis or all the axes as
+ before.
+ dtype : dtype, optional
+ The type of the returned array and of the accumulator in which the
+ elements are summed. The dtype of `a` is used by default unless `a`
+ has an integer dtype of less precision than the default platform
+ integer. In that case, if `a` is signed then the platform integer
+ is used while if `a` is unsigned then an unsigned integer of the
+ same precision as the platform integer is used.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output, but the type of the output
+ values will be cast if necessary.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `sum` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+ initial : scalar, optional
+ Starting value for the sum. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
+ where : array_like of bool, optional
+ Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ sum_along_axis : ndarray
+ An array with the same shape as `a`, with the specified
+ axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
+ is returned. If an output array is specified, a reference to
+ `out` is returned.
+
+ See Also
+ --------
+ ndarray.sum : Equivalent method.
+
+ add.reduce : Equivalent functionality of `add`.
+
+ cumsum : Cumulative sum of array elements.
+
+ trapz : Integration of array values using the composite trapezoidal rule.
+
+ mean, average
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ The sum of an empty array is the neutral element 0:
+
+ >>> np.sum([])
+ 0.0
+
+ For floating point numbers the numerical precision of sum (and
+ ``np.add.reduce``) is in general limited by directly adding each number
+ individually to the result causing rounding errors in every step.
+ However, often numpy will use a numerically better approach (partial
+ pairwise summation) leading to improved precision in many use-cases.
+ This improved precision is always provided when no ``axis`` is given.
+ When ``axis`` is given, it will depend on which axis is summed.
+ Technically, to provide the best speed possible, the improved precision
+ is only used when the summation is along the fast axis in memory.
+ Note that the exact precision may vary depending on other parameters.
+ In contrast to NumPy, Python's ``math.fsum`` function uses a slower but
+ more precise approach to summation.
+ Especially when summing a large number of lower precision floating point
+ numbers, such as ``float32``, numerical errors can become significant.
+ In such cases it can be advisable to use `dtype="float64"` to use a higher
+ precision for the output.
+
+ Examples
+ --------
+ >>> np.sum([0.5, 1.5])
+ 2.0
+ >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
+ 1
+ >>> np.sum([[0, 1], [0, 5]])
+ 6
+ >>> np.sum([[0, 1], [0, 5]], axis=0)
+ array([0, 6])
+ >>> np.sum([[0, 1], [0, 5]], axis=1)
+ array([1, 5])
+ >>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1)
+ array([1., 5.])
+
+ If the accumulator is too small, overflow occurs:
+
+ >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
+ -128
+
+ You can also start the sum with a value other than zero:
+
+ >>> np.sum([10], initial=5)
+ 15
+ """
+ if isinstance(a, _gentype):
+ # 2018-02-25, 1.15.0
+ warnings.warn(
+ "Calling np.sum(generator) is deprecated, and in the future will give a different result. "
+ "Use np.sum(np.fromiter(generator)) or the python sum builtin instead.",
+ DeprecationWarning, stacklevel=3)
+
+ res = _sum_(a)
+ if out is not None:
+ out[...] = res
+ return out
+ return res
+
+ return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
+ initial=initial, where=where)
+
+
+def _any_dispatcher(a, axis=None, out=None, keepdims=None, *,
+ where=np._NoValue):
+ return (a, where, out)
+
+
+@array_function_dispatch(_any_dispatcher)
+def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
+ """
+ Test whether any array element along a given axis evaluates to True.
+
+ Returns single boolean if `axis` is ``None``
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a logical OR reduction is performed.
+ The default (``axis=None``) is to perform a logical OR over all
+ the dimensions of the input array. `axis` may be negative, in
+ which case it counts from the last to the first axis.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, a reduction is performed on multiple
+ axes, instead of a single axis or all the axes as before.
+ out : ndarray, optional
+ Alternate output array in which to place the result. It must have
+ the same shape as the expected output and its type is preserved
+ (e.g., if it is of type float, then it will remain so, returning
+ 1.0 for True and 0.0 for False, regardless of the type of `a`).
+ See :ref:`ufuncs-output-type` for more details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `any` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ where : array_like of bool, optional
+ Elements to include in checking for any `True` values.
+ See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ any : bool or ndarray
+ A new boolean or `ndarray` is returned unless `out` is specified,
+ in which case a reference to `out` is returned.
+
+ See Also
+ --------
+ ndarray.any : equivalent method
+
+ all : Test whether all elements along a given axis evaluate to True.
+
+ Notes
+ -----
+ Not a Number (NaN), positive infinity and negative infinity evaluate
+ to `True` because these are not equal to zero.
+
+ Examples
+ --------
+ >>> np.any([[True, False], [True, True]])
+ True
+
+ >>> np.any([[True, False], [False, False]], axis=0)
+ array([ True, False])
+
+ >>> np.any([-1, 0, 5])
+ True
+
+ >>> np.any(np.nan)
+ True
+
+ >>> np.any([[True, False], [False, False]], where=[[False], [True]])
+ False
+
+ >>> o=np.array(False)
+ >>> z=np.any([-1, 4, 5], out=o)
+ >>> z, o
+ (array(True), array(True))
+ >>> # Check now that z is a reference to o
+ >>> z is o
+ True
+ >>> id(z), id(o) # identity of z and o # doctest: +SKIP
+ (191614240, 191614240)
+
+ """
+ return _wrapreduction(a, np.logical_or, 'any', axis, None, out,
+ keepdims=keepdims, where=where)
+
+
+def _all_dispatcher(a, axis=None, out=None, keepdims=None, *,
+ where=None):
+ return (a, where, out)
+
+
+@array_function_dispatch(_all_dispatcher)
+def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
+ """
+ Test whether all array elements along a given axis evaluate to True.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a logical AND reduction is performed.
+ The default (``axis=None``) is to perform a logical AND over all
+ the dimensions of the input array. `axis` may be negative, in
+ which case it counts from the last to the first axis.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, a reduction is performed on multiple
+ axes, instead of a single axis or all the axes as before.
+ out : ndarray, optional
+ Alternate output array in which to place the result.
+ It must have the same shape as the expected output and its
+ type is preserved (e.g., if ``dtype(out)`` is float, the result
+ will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` for more
+ details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `all` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ where : array_like of bool, optional
+ Elements to include in checking for all `True` values.
+ See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ all : ndarray, bool
+ A new boolean or array is returned unless `out` is specified,
+ in which case a reference to `out` is returned.
+
+ See Also
+ --------
+ ndarray.all : equivalent method
+
+ any : Test whether any element along a given axis evaluates to True.
+
+ Notes
+ -----
+ Not a Number (NaN), positive infinity and negative infinity
+ evaluate to `True` because these are not equal to zero.
+
+ Examples
+ --------
+ >>> np.all([[True,False],[True,True]])
+ False
+
+ >>> np.all([[True,False],[True,True]], axis=0)
+ array([ True, False])
+
+ >>> np.all([-1, 4, 5])
+ True
+
+ >>> np.all([1.0, np.nan])
+ True
+
+ >>> np.all([[True, True], [False, True]], where=[[True], [False]])
+ True
+
+ >>> o=np.array(False)
+ >>> z=np.all([-1, 4, 5], out=o)
+ >>> id(z), id(o), z
+ (28293632, 28293632, array(True)) # may vary
+
+ """
+ return _wrapreduction(a, np.logical_and, 'all', axis, None, out,
+ keepdims=keepdims, where=where)
+
+
+def _cumsum_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_cumsum_dispatcher)
+def cumsum(a, axis=None, dtype=None, out=None):
+ """
+ Return the cumulative sum of the elements along a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ Axis along which the cumulative sum is computed. The default
+ (None) is to compute the cumsum over the flattened array.
+ dtype : dtype, optional
+ Type of the returned array and of the accumulator in which the
+ elements are summed. If `dtype` is not specified, it defaults
+ to the dtype of `a`, unless `a` has an integer dtype with a
+ precision less than that of the default platform integer. In
+ that case, the default platform integer is used.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
+ more details.
+
+ Returns
+ -------
+ cumsum_along_axis : ndarray.
+ A new array holding the result is returned unless `out` is
+ specified, in which case a reference to `out` is returned. The
+ result has the same size as `a`, and the same shape as `a` if
+ `axis` is not None or `a` is a 1-d array.
+
+ See Also
+ --------
+ sum : Sum array elements.
+ trapz : Integration of array values using the composite trapezoidal rule.
+ diff : Calculate the n-th discrete difference along given axis.
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ ``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point
+ values since ``sum`` may use a pairwise summation routine, reducing
+ the roundoff-error. See `sum` for more information.
+
+ Examples
+ --------
+ >>> a = np.array([[1,2,3], [4,5,6]])
+ >>> a
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> np.cumsum(a)
+ array([ 1, 3, 6, 10, 15, 21])
+ >>> np.cumsum(a, dtype=float) # specifies type of output value(s)
+ array([ 1., 3., 6., 10., 15., 21.])
+
+ >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
+ array([[1, 2, 3],
+ [5, 7, 9]])
+ >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
+ array([[ 1, 3, 6],
+ [ 4, 9, 15]])
+
+ ``cumsum(b)[-1]`` may not be equal to ``sum(b)``
+
+ >>> b = np.array([1, 2e-9, 3e-9] * 1000000)
+ >>> b.cumsum()[-1]
+ 1000000.0050045159
+ >>> b.sum()
+ 1000000.0050000029
+
+ """
+ return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
+
+
+def _ptp_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_ptp_dispatcher)
+def ptp(a, axis=None, out=None, keepdims=np._NoValue):
+ """
+ Range of values (maximum - minimum) along an axis.
+
+ The name of the function comes from the acronym for 'peak to peak'.
+
+ .. warning::
+ `ptp` preserves the data type of the array. This means the
+ return value for an input of signed integers with n bits
+ (e.g. `np.int8`, `np.int16`, etc) is also a signed integer
+ with n bits. In that case, peak-to-peak values greater than
+ ``2**(n-1)-1`` will be returned as negative values. An example
+ with a work-around is shown below.
+
+ Parameters
+ ----------
+ a : array_like
+ Input values.
+ axis : None or int or tuple of ints, optional
+ Axis along which to find the peaks. By default, flatten the
+ array. `axis` may be negative, in
+ which case it counts from the last to the first axis.
+
+ .. versionadded:: 1.15.0
+
+ If this is a tuple of ints, a reduction is performed on multiple
+ axes, instead of a single axis or all the axes as before.
+ out : array_like
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type of the output values will be cast if necessary.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `ptp` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ Returns
+ -------
+ ptp : ndarray or scalar
+ The range of a given array - `scalar` if array is one-dimensional
+ or a new array holding the result along the given axis
+
+ Examples
+ --------
+ >>> x = np.array([[4, 9, 2, 10],
+ ... [6, 9, 7, 12]])
+
+ >>> np.ptp(x, axis=1)
+ array([8, 6])
+
+ >>> np.ptp(x, axis=0)
+ array([2, 0, 5, 2])
+
+ >>> np.ptp(x)
+ 10
+
+ This example shows that a negative value can be returned when
+ the input is an array of signed integers.
+
+ >>> y = np.array([[1, 127],
+ ... [0, 127],
+ ... [-1, 127],
+ ... [-2, 127]], dtype=np.int8)
+ >>> np.ptp(y, axis=1)
+ array([ 126, 127, -128, -127], dtype=int8)
+
+ A work-around is to use the `view()` method to view the result as
+ unsigned integers with the same bit width:
+
+ >>> np.ptp(y, axis=1).view(np.uint8)
+ array([126, 127, 128, 129], dtype=uint8)
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ if type(a) is not mu.ndarray:
+ try:
+ ptp = a.ptp
+ except AttributeError:
+ pass
+ else:
+ return ptp(axis=axis, out=out, **kwargs)
+ return _methods._ptp(a, axis=axis, out=out, **kwargs)
+
+
+def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
+ where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_amax_dispatcher)
+def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
+ """
+ Return the maximum of an array or maximum along an axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to operate. By default, flattened input is
+ used.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, the maximum is selected over multiple axes,
+ instead of a single axis or all the axes as before.
+ out : ndarray, optional
+ Alternative output array in which to place the result. Must
+ be of the same shape and buffer length as the expected output.
+ See :ref:`ufuncs-output-type` for more details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `amax` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ initial : scalar, optional
+ The minimum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
+ where : array_like of bool, optional
+ Elements to compare for the maximum. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ amax : ndarray or scalar
+ Maximum of `a`. If `axis` is None, the result is a scalar value.
+ If `axis` is an int, the result is an array of dimension
+ ``a.ndim - 1``. If `axis` is a tuple, the result is an array of
+ dimension ``a.ndim - len(axis)``.
+
+ See Also
+ --------
+ amin :
+ The minimum value of an array along a given axis, propagating any NaNs.
+ nanmax :
+ The maximum value of an array along a given axis, ignoring any NaNs.
+ maximum :
+ Element-wise maximum of two arrays, propagating any NaNs.
+ fmax :
+ Element-wise maximum of two arrays, ignoring any NaNs.
+ argmax :
+ Return the indices of the maximum values.
+
+ nanmin, minimum, fmin
+
+ Notes
+ -----
+ NaN values are propagated, that is if at least one item is NaN, the
+ corresponding max value will be NaN as well. To ignore NaN values
+ (MATLAB behavior), please use nanmax.
+
+ Don't use `amax` for element-wise comparison of 2 arrays; when
+ ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
+ ``amax(a, axis=0)``.
+
+ Examples
+ --------
+ >>> a = np.arange(4).reshape((2,2))
+ >>> a
+ array([[0, 1],
+ [2, 3]])
+ >>> np.amax(a) # Maximum of the flattened array
+ 3
+ >>> np.amax(a, axis=0) # Maxima along the first axis
+ array([2, 3])
+ >>> np.amax(a, axis=1) # Maxima along the second axis
+ array([1, 3])
+ >>> np.amax(a, where=[False, True], initial=-1, axis=0)
+ array([-1, 3])
+ >>> b = np.arange(5, dtype=float)
+ >>> b[2] = np.NaN
+ >>> np.amax(b)
+ nan
+ >>> np.amax(b, where=~np.isnan(b), initial=-1)
+ 4.0
+ >>> np.nanmax(b)
+ 4.0
+
+ You can use an initial value to compute the maximum of an empty slice, or
+ to initialize it to a different value:
+
+ >>> np.amax([[-50], [10]], axis=-1, initial=0)
+ array([ 0, 10])
+
+ Notice that the initial value is used as one of the elements for which the
+ maximum is determined, unlike for the default argument Python's max
+ function, which is only used for empty iterables.
+
+ >>> np.amax([5], initial=6)
+ 6
+ >>> max([5], default=6)
+ 5
+ """
+ return _wrapreduction(a, np.maximum, 'max', axis, None, out,
+ keepdims=keepdims, initial=initial, where=where)
+
+
+def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
+ where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_amin_dispatcher)
+def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
+ """
+ Return the minimum of an array or minimum along an axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to operate. By default, flattened input is
+ used.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, the minimum is selected over multiple axes,
+ instead of a single axis or all the axes as before.
+ out : ndarray, optional
+ Alternative output array in which to place the result. Must
+ be of the same shape and buffer length as the expected output.
+ See :ref:`ufuncs-output-type` for more details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `amin` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ initial : scalar, optional
+ The maximum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
+ where : array_like of bool, optional
+ Elements to compare for the minimum. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ amin : ndarray or scalar
+ Minimum of `a`. If `axis` is None, the result is a scalar value.
+ If `axis` is an int, the result is an array of dimension
+ ``a.ndim - 1``. If `axis` is a tuple, the result is an array of
+ dimension ``a.ndim - len(axis)``.
+
+ See Also
+ --------
+ amax :
+ The maximum value of an array along a given axis, propagating any NaNs.
+ nanmin :
+ The minimum value of an array along a given axis, ignoring any NaNs.
+ minimum :
+ Element-wise minimum of two arrays, propagating any NaNs.
+ fmin :
+ Element-wise minimum of two arrays, ignoring any NaNs.
+ argmin :
+ Return the indices of the minimum values.
+
+ nanmax, maximum, fmax
+
+ Notes
+ -----
+ NaN values are propagated, that is if at least one item is NaN, the
+ corresponding min value will be NaN as well. To ignore NaN values
+ (MATLAB behavior), please use nanmin.
+
+ Don't use `amin` for element-wise comparison of 2 arrays; when
+ ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
+ ``amin(a, axis=0)``.
+
+ Examples
+ --------
+ >>> a = np.arange(4).reshape((2,2))
+ >>> a
+ array([[0, 1],
+ [2, 3]])
+ >>> np.amin(a) # Minimum of the flattened array
+ 0
+ >>> np.amin(a, axis=0) # Minima along the first axis
+ array([0, 1])
+ >>> np.amin(a, axis=1) # Minima along the second axis
+ array([0, 2])
+ >>> np.amin(a, where=[False, True], initial=10, axis=0)
+ array([10, 1])
+
+ >>> b = np.arange(5, dtype=float)
+ >>> b[2] = np.NaN
+ >>> np.amin(b)
+ nan
+ >>> np.amin(b, where=~np.isnan(b), initial=10)
+ 0.0
+ >>> np.nanmin(b)
+ 0.0
+
+ >>> np.amin([[-50], [10]], axis=-1, initial=0)
+ array([-50, 0])
+
+ Notice that the initial value is used as one of the elements for which the
+ minimum is determined, unlike for the default argument Python's max
+ function, which is only used for empty iterables.
+
+ Notice that this isn't the same as Python's ``default`` argument.
+
+ >>> np.amin([6], initial=5)
+ 5
+ >>> min([6], default=5)
+ 6
+ """
+ return _wrapreduction(a, np.minimum, 'min', axis, None, out,
+ keepdims=keepdims, initial=initial, where=where)
+
+
+def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_prod_dispatcher)
+def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ initial=np._NoValue, where=np._NoValue):
+ """
+ Return the product of array elements over a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which a product is performed. The default,
+ axis=None, will calculate the product of all the elements in the
+ input array. If axis is negative it counts from the last to the
+ first axis.
+
+ .. versionadded:: 1.7.0
+
+ If axis is a tuple of ints, a product is performed on all of the
+ axes specified in the tuple instead of a single axis or all the
+ axes as before.
+ dtype : dtype, optional
+ The type of the returned array, as well as of the accumulator in
+ which the elements are multiplied. The dtype of `a` is used by
+ default unless `a` has an integer dtype of less precision than the
+ default platform integer. In that case, if `a` is signed then the
+ platform integer is used while if `a` is unsigned then an unsigned
+ integer of the same precision as the platform integer is used.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output, but the type of the output
+ values will be cast if necessary.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in the
+ result as dimensions with size one. With this option, the result
+ will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `prod` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+ initial : scalar, optional
+ The starting value for this product. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.15.0
+
+ where : array_like of bool, optional
+ Elements to include in the product. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ product_along_axis : ndarray, see `dtype` parameter above.
+ An array shaped as `a` but with the specified axis removed.
+ Returns a reference to `out` if specified.
+
+ See Also
+ --------
+ ndarray.prod : equivalent method
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow. That means that, on a 32-bit platform:
+
+ >>> x = np.array([536870910, 536870910, 536870910, 536870910])
+ >>> np.prod(x)
+ 16 # may vary
+
+ The product of an empty array is the neutral element 1:
+
+ >>> np.prod([])
+ 1.0
+
+ Examples
+ --------
+ By default, calculate the product of all elements:
+
+ >>> np.prod([1.,2.])
+ 2.0
+
+ Even when the input array is two-dimensional:
+
+ >>> a = np.array([[1., 2.], [3., 4.]])
+ >>> np.prod(a)
+ 24.0
+
+ But we can also specify the axis over which to multiply:
+
+ >>> np.prod(a, axis=1)
+ array([ 2., 12.])
+ >>> np.prod(a, axis=0)
+ array([3., 8.])
+
+ Or select specific elements to include:
+
+ >>> np.prod([1., np.nan, 3.], where=[True, False, True])
+ 3.0
+
+ If the type of `x` is unsigned, then the output type is
+ the unsigned platform integer:
+
+ >>> x = np.array([1, 2, 3], dtype=np.uint8)
+ >>> np.prod(x).dtype == np.uint
+ True
+
+ If `x` is of a signed integer type, then the output type
+ is the default platform integer:
+
+ >>> x = np.array([1, 2, 3], dtype=np.int8)
+ >>> np.prod(x).dtype == int
+ True
+
+ You can also start the product with a value other than one:
+
+ >>> np.prod([1, 2], initial=5)
+ 10
+ """
+ return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
+ keepdims=keepdims, initial=initial, where=where)
+
+
+def _cumprod_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_cumprod_dispatcher)
+def cumprod(a, axis=None, dtype=None, out=None):
+ """
+ Return the cumulative product of elements along a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ Axis along which the cumulative product is computed. By default
+ the input is flattened.
+ dtype : dtype, optional
+ Type of the returned array, as well as of the accumulator in which
+ the elements are multiplied. If *dtype* is not specified, it
+ defaults to the dtype of `a`, unless `a` has an integer dtype with
+ a precision less than that of the default platform integer. In
+ that case, the default platform integer is used instead.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type of the resulting values will be cast if necessary.
+
+ Returns
+ -------
+ cumprod : ndarray
+ A new array holding the result is returned unless `out` is
+ specified, in which case a reference to out is returned.
+
+ See Also
+ --------
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ Examples
+ --------
+ >>> a = np.array([1,2,3])
+ >>> np.cumprod(a) # intermediate results 1, 1*2
+ ... # total product 1*2*3 = 6
+ array([1, 2, 6])
+ >>> a = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> np.cumprod(a, dtype=float) # specify type of output
+ array([ 1., 2., 6., 24., 120., 720.])
+
+ The cumulative product for each column (i.e., over the rows) of `a`:
+
+ >>> np.cumprod(a, axis=0)
+ array([[ 1, 2, 3],
+ [ 4, 10, 18]])
+
+ The cumulative product for each row (i.e. over the columns) of `a`:
+
+ >>> np.cumprod(a,axis=1)
+ array([[ 1, 2, 6],
+ [ 4, 20, 120]])
+
+ """
+ return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
+
+
+def _ndim_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_ndim_dispatcher)
+def ndim(a):
+ """
+ Return the number of dimensions of an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array. If it is not already an ndarray, a conversion is
+ attempted.
+
+ Returns
+ -------
+ number_of_dimensions : int
+ The number of dimensions in `a`. Scalars are zero-dimensional.
+
+ See Also
+ --------
+ ndarray.ndim : equivalent method
+ shape : dimensions of array
+ ndarray.shape : dimensions of array
+
+ Examples
+ --------
+ >>> np.ndim([[1,2,3],[4,5,6]])
+ 2
+ >>> np.ndim(np.array([[1,2,3],[4,5,6]]))
+ 2
+ >>> np.ndim(1)
+ 0
+
+ """
+ try:
+ return a.ndim
+ except AttributeError:
+ return asarray(a).ndim
+
+
+def _size_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_size_dispatcher)
+def size(a, axis=None):
+ """
+ Return the number of elements along a given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : int, optional
+ Axis along which the elements are counted. By default, give
+ the total number of elements.
+
+ Returns
+ -------
+ element_count : int
+ Number of elements along the specified axis.
+
+ See Also
+ --------
+ shape : dimensions of array
+ ndarray.shape : dimensions of array
+ ndarray.size : number of elements in array
+
+ Examples
+ --------
+ >>> a = np.array([[1,2,3],[4,5,6]])
+ >>> np.size(a)
+ 6
+ >>> np.size(a,1)
+ 3
+ >>> np.size(a,0)
+ 2
+
+ """
+ if axis is None:
+ try:
+ return a.size
+ except AttributeError:
+ return asarray(a).size
+ else:
+ try:
+ return a.shape[axis]
+ except AttributeError:
+ return asarray(a).shape[axis]
+
+
+def _around_dispatcher(a, decimals=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_around_dispatcher)
+def around(a, decimals=0, out=None):
+ """
+ Evenly round to the given number of decimals.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ decimals : int, optional
+ Number of decimal places to round to (default: 0). If
+ decimals is negative, it specifies the number of positions to
+ the left of the decimal point.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output, but the type of the output
+ values will be cast if necessary. See :ref:`ufuncs-output-type` for more
+ details.
+
+ Returns
+ -------
+ rounded_array : ndarray
+ An array of the same type as `a`, containing the rounded values.
+ Unless `out` was specified, a new array is created. A reference to
+ the result is returned.
+
+ The real and imaginary parts of complex numbers are rounded
+ separately. The result of rounding a float is a float.
+
+ See Also
+ --------
+ ndarray.round : equivalent method
+ ceil, fix, floor, rint, trunc
+
+
+ Notes
+ -----
+ `~numpy.round` is often used as an alias for `~numpy.around`.
+
+ For values exactly halfway between rounded decimal values, NumPy
+ rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
+ -0.5 and 0.5 round to 0.0, etc.
+
+ ``np.around`` uses a fast but sometimes inexact algorithm to round
+ floating-point datatypes. For positive `decimals` it is equivalent to
+ ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has
+ error due to the inexact representation of decimal fractions in the IEEE
+ floating point standard [1]_ and errors introduced when scaling by powers
+ of ten. For instance, note the extra "1" in the following:
+
+ >>> np.round(56294995342131.5, 3)
+ 56294995342131.51
+
+ If your goal is to print such values with a fixed number of decimals, it is
+ preferable to use numpy's float printing routines to limit the number of
+ printed decimals:
+
+ >>> np.format_float_positional(56294995342131.5, precision=3)
+ '56294995342131.5'
+
+ The float printing routines use an accurate but much more computationally
+ demanding algorithm to compute the number of digits after the decimal
+ point.
+
+ Alternatively, Python's builtin `round` function uses a more accurate
+ but slower algorithm for 64-bit floating point values:
+
+ >>> round(56294995342131.5, 3)
+ 56294995342131.5
+ >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997
+ (16.06, 16.05)
+
+
+ References
+ ----------
+ .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
+ https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
+
+ Examples
+ --------
+ >>> np.around([0.37, 1.64])
+ array([0., 2.])
+ >>> np.around([0.37, 1.64], decimals=1)
+ array([0.4, 1.6])
+ >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
+ array([0., 2., 2., 4., 4.])
+ >>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
+ array([ 1, 2, 3, 11])
+ >>> np.around([1,2,3,11], decimals=-1)
+ array([ 0, 0, 0, 10])
+
+ """
+ return _wrapfunc(a, 'round', decimals=decimals, out=out)
+
+
+def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, *,
+ where=None):
+ return (a, where, out)
+
+
+@array_function_dispatch(_mean_dispatcher)
+def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *,
+ where=np._NoValue):
+ """
+ Compute the arithmetic mean along the specified axis.
+
+ Returns the average of the array elements. The average is taken over
+ the flattened array by default, otherwise over the specified axis.
+ `float64` intermediate and return values are used for integer inputs.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose mean is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which the means are computed. The default is to
+ compute the mean of the flattened array.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, a mean is performed over multiple axes,
+ instead of a single axis or all the axes as before.
+ dtype : data-type, optional
+ Type to use in computing the mean. For integer inputs, the default
+ is `float64`; for floating point inputs, it is the same as the
+ input dtype.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary.
+ See :ref:`ufuncs-output-type` for more details.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `mean` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ where : array_like of bool, optional
+ Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ m : ndarray, see dtype parameter above
+ If `out=None`, returns a new array containing the mean values,
+ otherwise a reference to the output array is returned.
+
+ See Also
+ --------
+ average : Weighted average
+ std, var, nanmean, nanstd, nanvar
+
+ Notes
+ -----
+ The arithmetic mean is the sum of the elements along the axis divided
+ by the number of elements.
+
+ Note that for floating-point input, the mean is computed using the
+ same precision the input has. Depending on the input data, this can
+ cause the results to be inaccurate, especially for `float32` (see
+ example below). Specifying a higher-precision accumulator using the
+ `dtype` keyword can alleviate this issue.
+
+ By default, `float16` results are computed using `float32` intermediates
+ for extra precision.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> np.mean(a)
+ 2.5
+ >>> np.mean(a, axis=0)
+ array([2., 3.])
+ >>> np.mean(a, axis=1)
+ array([1.5, 3.5])
+
+ In single precision, `mean` can be inaccurate:
+
+ >>> a = np.zeros((2, 512*512), dtype=np.float32)
+ >>> a[0, :] = 1.0
+ >>> a[1, :] = 0.1
+ >>> np.mean(a)
+ 0.54999924
+
+ Computing the mean in float64 is more accurate:
+
+ >>> np.mean(a, dtype=np.float64)
+ 0.55000000074505806 # may vary
+
+ Specifying a where argument:
+
+ >>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]])
+ >>> np.mean(a)
+ 12.0
+ >>> np.mean(a, where=[[True], [False], [False]])
+ 9.0
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ if where is not np._NoValue:
+ kwargs['where'] = where
+ if type(a) is not mu.ndarray:
+ try:
+ mean = a.mean
+ except AttributeError:
+ pass
+ else:
+ return mean(axis=axis, dtype=dtype, out=out, **kwargs)
+
+ return _methods._mean(a, axis=axis, dtype=dtype,
+ out=out, **kwargs)
+
+
+def _std_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
+ keepdims=None, *, where=None):
+ return (a, where, out)
+
+
+@array_function_dispatch(_std_dispatcher)
+def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
+ where=np._NoValue):
+ """
+ Compute the standard deviation along the specified axis.
+
+ Returns the standard deviation, a measure of the spread of a distribution,
+ of the array elements. The standard deviation is computed for the
+ flattened array by default, otherwise over the specified axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Calculate the standard deviation of these values.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which the standard deviation is computed. The
+ default is to compute the standard deviation of the flattened array.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, a standard deviation is performed over
+ multiple axes, instead of a single axis or all the axes as before.
+ dtype : dtype, optional
+ Type to use in computing the standard deviation. For arrays of
+ integer type the default is float64, for arrays of float types it is
+ the same as the array type.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output but the type (of the calculated
+ values) will be cast if necessary.
+ ddof : int, optional
+ Means Delta Degrees of Freedom. The divisor used in calculations
+ is ``N - ddof``, where ``N`` represents the number of elements.
+ By default `ddof` is zero.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `std` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ where : array_like of bool, optional
+ Elements to include in the standard deviation.
+ See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ standard_deviation : ndarray, see dtype parameter above.
+ If `out` is None, return a new array containing the standard deviation,
+ otherwise return a reference to the output array.
+
+ See Also
+ --------
+ var, mean, nanmean, nanstd, nanvar
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ The standard deviation is the square root of the average of the squared
+ deviations from the mean, i.e., ``std = sqrt(mean(x))``, where
+ ``x = abs(a - a.mean())**2``.
+
+ The average squared deviation is typically calculated as ``x.sum() / N``,
+ where ``N = len(x)``. If, however, `ddof` is specified, the divisor
+ ``N - ddof`` is used instead. In standard statistical practice, ``ddof=1``
+ provides an unbiased estimator of the variance of the infinite population.
+ ``ddof=0`` provides a maximum likelihood estimate of the variance for
+ normally distributed variables. The standard deviation computed in this
+ function is the square root of the estimated variance, so even with
+ ``ddof=1``, it will not be an unbiased estimate of the standard deviation
+ per se.
+
+ Note that, for complex numbers, `std` takes the absolute
+ value before squaring, so that the result is always real and nonnegative.
+
+ For floating-point input, the *std* is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for float32 (see example below).
+ Specifying a higher-accuracy accumulator using the `dtype` keyword can
+ alleviate this issue.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> np.std(a)
+ 1.1180339887498949 # may vary
+ >>> np.std(a, axis=0)
+ array([1., 1.])
+ >>> np.std(a, axis=1)
+ array([0.5, 0.5])
+
+ In single precision, std() can be inaccurate:
+
+ >>> a = np.zeros((2, 512*512), dtype=np.float32)
+ >>> a[0, :] = 1.0
+ >>> a[1, :] = 0.1
+ >>> np.std(a)
+ 0.45000005
+
+ Computing the standard deviation in float64 is more accurate:
+
+ >>> np.std(a, dtype=np.float64)
+ 0.44999999925494177 # may vary
+
+ Specifying a where argument:
+
+ >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
+ >>> np.std(a)
+ 2.614064523559687 # may vary
+ >>> np.std(a, where=[[True], [True], [False]])
+ 2.0
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ if where is not np._NoValue:
+ kwargs['where'] = where
+ if type(a) is not mu.ndarray:
+ try:
+ std = a.std
+ except AttributeError:
+ pass
+ else:
+ return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
+
+ return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+ **kwargs)
+
+
+def _var_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
+ keepdims=None, *, where=None):
+ return (a, where, out)
+
+
+@array_function_dispatch(_var_dispatcher)
+def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
+ where=np._NoValue):
+ """
+ Compute the variance along the specified axis.
+
+ Returns the variance of the array elements, a measure of the spread of a
+ distribution. The variance is computed for the flattened array by
+ default, otherwise over the specified axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose variance is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which the variance is computed. The default is to
+ compute the variance of the flattened array.
+
+ .. versionadded:: 1.7.0
+
+ If this is a tuple of ints, a variance is performed over multiple axes,
+ instead of a single axis or all the axes as before.
+ dtype : data-type, optional
+ Type to use in computing the variance. For arrays of integer type
+ the default is `float64`; for arrays of float types it is the same as
+ the array type.
+ out : ndarray, optional
+ Alternate output array in which to place the result. It must have
+ the same shape as the expected output, but the type is cast if
+ necessary.
+ ddof : int, optional
+ "Delta Degrees of Freedom": the divisor used in the calculation is
+ ``N - ddof``, where ``N`` represents the number of elements. By
+ default `ddof` is zero.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `var` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-class' method does not implement `keepdims` any
+ exceptions will be raised.
+
+ where : array_like of bool, optional
+ Elements to include in the variance. See `~numpy.ufunc.reduce` for
+ details.
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ variance : ndarray, see dtype parameter above
+ If ``out=None``, returns a new array containing the variance;
+ otherwise, a reference to the output array is returned.
+
+ See Also
+ --------
+ std, mean, nanmean, nanstd, nanvar
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ The variance is the average of the squared deviations from the mean,
+ i.e., ``var = mean(x)``, where ``x = abs(a - a.mean())**2``.
+
+ The mean is typically calculated as ``x.sum() / N``, where ``N = len(x)``.
+ If, however, `ddof` is specified, the divisor ``N - ddof`` is used
+ instead. In standard statistical practice, ``ddof=1`` provides an
+ unbiased estimator of the variance of a hypothetical infinite population.
+ ``ddof=0`` provides a maximum likelihood estimate of the variance for
+ normally distributed variables.
+
+ Note that for complex numbers, the absolute value is taken before
+ squaring, so that the result is always real and nonnegative.
+
+ For floating-point input, the variance is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for `float32` (see example
+ below). Specifying a higher-accuracy accumulator using the ``dtype``
+ keyword can alleviate this issue.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> np.var(a)
+ 1.25
+ >>> np.var(a, axis=0)
+ array([1., 1.])
+ >>> np.var(a, axis=1)
+ array([0.25, 0.25])
+
+ In single precision, var() can be inaccurate:
+
+ >>> a = np.zeros((2, 512*512), dtype=np.float32)
+ >>> a[0, :] = 1.0
+ >>> a[1, :] = 0.1
+ >>> np.var(a)
+ 0.20250003
+
+ Computing the variance in float64 is more accurate:
+
+ >>> np.var(a, dtype=np.float64)
+ 0.20249999932944759 # may vary
+ >>> ((1-0.55)**2 + (0.1-0.55)**2)/2
+ 0.2025
+
+ Specifying a where argument:
+
+ >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
+ >>> np.var(a)
+ 6.833333333333333 # may vary
+ >>> np.var(a, where=[[True], [True], [False]])
+ 4.0
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ if where is not np._NoValue:
+ kwargs['where'] = where
+
+ if type(a) is not mu.ndarray:
+ try:
+ var = a.var
+
+ except AttributeError:
+ pass
+ else:
+ return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
+
+ return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+ **kwargs)
+
+
+# Aliases of other functions. These have their own definitions only so that
+# they can have unique docstrings.
+
+@array_function_dispatch(_around_dispatcher)
+def round_(a, decimals=0, out=None):
+ """
+ Round an array to the given number of decimals.
+
+ See Also
+ --------
+ around : equivalent function; see for details.
+ """
+ return around(a, decimals=decimals, out=out)
+
+
+@array_function_dispatch(_prod_dispatcher, verify=False)
+def product(*args, **kwargs):
+ """
+ Return the product of array elements over a given axis.
+
+ See Also
+ --------
+ prod : equivalent function; see for details.
+ """
+ return prod(*args, **kwargs)
+
+
+@array_function_dispatch(_cumprod_dispatcher, verify=False)
+def cumproduct(*args, **kwargs):
+ """
+ Return the cumulative product over the given axis.
+
+ See Also
+ --------
+ cumprod : equivalent function; see for details.
+ """
+ return cumprod(*args, **kwargs)
+
+
+@array_function_dispatch(_any_dispatcher, verify=False)
+def sometrue(*args, **kwargs):
+ """
+ Check whether some values are true.
+
+ Refer to `any` for full documentation.
+
+ See Also
+ --------
+ any : equivalent function; see for details.
+ """
+ return any(*args, **kwargs)
+
+
+@array_function_dispatch(_all_dispatcher, verify=False)
+def alltrue(*args, **kwargs):
+ """
+ Check if all elements of input array are true.
+
+ See Also
+ --------
+ numpy.all : Equivalent function; see for details.
+ """
+ return all(*args, **kwargs)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/fromnumeric.pyi b/venv/lib/python3.9/site-packages/numpy/core/fromnumeric.pyi
new file mode 100644
index 00000000..17b17819
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/fromnumeric.pyi
@@ -0,0 +1,1049 @@
+import datetime as dt
+from collections.abc import Sequence
+from typing import Union, Any, overload, TypeVar, Literal, SupportsIndex
+
+from numpy import (
+ ndarray,
+ number,
+ uint64,
+ int_,
+ int64,
+ intp,
+ float16,
+ bool_,
+ floating,
+ complexfloating,
+ object_,
+ generic,
+ _OrderKACF,
+ _OrderACF,
+ _ModeKind,
+ _PartitionKind,
+ _SortKind,
+ _SortSide,
+)
+from numpy._typing import (
+ DTypeLike,
+ _DTypeLike,
+ ArrayLike,
+ _ArrayLike,
+ NDArray,
+ _ShapeLike,
+ _Shape,
+ _ArrayLikeBool_co,
+ _ArrayLikeUInt_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeObject_co,
+ _IntLike_co,
+ _BoolLike_co,
+ _ComplexLike_co,
+ _NumberLike_co,
+ _ScalarLike_co,
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+_SCT_uifcO = TypeVar("_SCT_uifcO", bound=number[Any] | object_)
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+
+__all__: list[str]
+
+@overload
+def take(
+ a: _ArrayLike[_SCT],
+ indices: _IntLike_co,
+ axis: None = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> _SCT: ...
+@overload
+def take(
+ a: ArrayLike,
+ indices: _IntLike_co,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> Any: ...
+@overload
+def take(
+ a: _ArrayLike[_SCT],
+ indices: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def take(
+ a: ArrayLike,
+ indices: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> NDArray[Any]: ...
+@overload
+def take(
+ a: ArrayLike,
+ indices: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ out: _ArrayType = ...,
+ mode: _ModeKind = ...,
+) -> _ArrayType: ...
+
+@overload
+def reshape(
+ a: _ArrayLike[_SCT],
+ newshape: _ShapeLike,
+ order: _OrderACF = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def reshape(
+ a: ArrayLike,
+ newshape: _ShapeLike,
+ order: _OrderACF = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def choose(
+ a: _IntLike_co,
+ choices: ArrayLike,
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> Any: ...
+@overload
+def choose(
+ a: _ArrayLikeInt_co,
+ choices: _ArrayLike[_SCT],
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def choose(
+ a: _ArrayLikeInt_co,
+ choices: ArrayLike,
+ out: None = ...,
+ mode: _ModeKind = ...,
+) -> NDArray[Any]: ...
+@overload
+def choose(
+ a: _ArrayLikeInt_co,
+ choices: ArrayLike,
+ out: _ArrayType = ...,
+ mode: _ModeKind = ...,
+) -> _ArrayType: ...
+
+@overload
+def repeat(
+ a: _ArrayLike[_SCT],
+ repeats: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def repeat(
+ a: ArrayLike,
+ repeats: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+def put(
+ a: NDArray[Any],
+ ind: _ArrayLikeInt_co,
+ v: ArrayLike,
+ mode: _ModeKind = ...,
+) -> None: ...
+
+@overload
+def swapaxes(
+ a: _ArrayLike[_SCT],
+ axis1: SupportsIndex,
+ axis2: SupportsIndex,
+) -> NDArray[_SCT]: ...
+@overload
+def swapaxes(
+ a: ArrayLike,
+ axis1: SupportsIndex,
+ axis2: SupportsIndex,
+) -> NDArray[Any]: ...
+
+@overload
+def transpose(
+ a: _ArrayLike[_SCT],
+ axes: None | _ShapeLike = ...
+) -> NDArray[_SCT]: ...
+@overload
+def transpose(
+ a: ArrayLike,
+ axes: None | _ShapeLike = ...
+) -> NDArray[Any]: ...
+
+@overload
+def partition(
+ a: _ArrayLike[_SCT],
+ kth: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ kind: _PartitionKind = ...,
+ order: None | str | Sequence[str] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def partition(
+ a: ArrayLike,
+ kth: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ kind: _PartitionKind = ...,
+ order: None | str | Sequence[str] = ...,
+) -> NDArray[Any]: ...
+
+def argpartition(
+ a: ArrayLike,
+ kth: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ kind: _PartitionKind = ...,
+ order: None | str | Sequence[str] = ...,
+) -> NDArray[intp]: ...
+
+@overload
+def sort(
+ a: _ArrayLike[_SCT],
+ axis: None | SupportsIndex = ...,
+ kind: None | _SortKind = ...,
+ order: None | str | Sequence[str] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def sort(
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ kind: None | _SortKind = ...,
+ order: None | str | Sequence[str] = ...,
+) -> NDArray[Any]: ...
+
+def argsort(
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ kind: None | _SortKind = ...,
+ order: None | str | Sequence[str] = ...,
+) -> NDArray[intp]: ...
+
+@overload
+def argmax(
+ a: ArrayLike,
+ axis: None = ...,
+ out: None = ...,
+ *,
+ keepdims: Literal[False] = ...,
+) -> intp: ...
+@overload
+def argmax(
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ *,
+ keepdims: bool = ...,
+) -> Any: ...
+@overload
+def argmax(
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ out: _ArrayType = ...,
+ *,
+ keepdims: bool = ...,
+) -> _ArrayType: ...
+
+@overload
+def argmin(
+ a: ArrayLike,
+ axis: None = ...,
+ out: None = ...,
+ *,
+ keepdims: Literal[False] = ...,
+) -> intp: ...
+@overload
+def argmin(
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ *,
+ keepdims: bool = ...,
+) -> Any: ...
+@overload
+def argmin(
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ out: _ArrayType = ...,
+ *,
+ keepdims: bool = ...,
+) -> _ArrayType: ...
+
+@overload
+def searchsorted(
+ a: ArrayLike,
+ v: _ScalarLike_co,
+ side: _SortSide = ...,
+ sorter: None | _ArrayLikeInt_co = ..., # 1D int array
+) -> intp: ...
+@overload
+def searchsorted(
+ a: ArrayLike,
+ v: ArrayLike,
+ side: _SortSide = ...,
+ sorter: None | _ArrayLikeInt_co = ..., # 1D int array
+) -> NDArray[intp]: ...
+
+@overload
+def resize(
+ a: _ArrayLike[_SCT],
+ new_shape: _ShapeLike,
+) -> NDArray[_SCT]: ...
+@overload
+def resize(
+ a: ArrayLike,
+ new_shape: _ShapeLike,
+) -> NDArray[Any]: ...
+
+@overload
+def squeeze(
+ a: _SCT,
+ axis: None | _ShapeLike = ...,
+) -> _SCT: ...
+@overload
+def squeeze(
+ a: _ArrayLike[_SCT],
+ axis: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def squeeze(
+ a: ArrayLike,
+ axis: None | _ShapeLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def diagonal(
+ a: _ArrayLike[_SCT],
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ..., # >= 2D array
+) -> NDArray[_SCT]: ...
+@overload
+def diagonal(
+ a: ArrayLike,
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ..., # >= 2D array
+) -> NDArray[Any]: ...
+
+@overload
+def trace(
+ a: ArrayLike, # >= 2D array
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+) -> Any: ...
+@overload
+def trace(
+ a: ArrayLike, # >= 2D array
+ offset: SupportsIndex = ...,
+ axis1: SupportsIndex = ...,
+ axis2: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+@overload
+def ravel(a: _ArrayLike[_SCT], order: _OrderKACF = ...) -> NDArray[_SCT]: ...
+@overload
+def ravel(a: ArrayLike, order: _OrderKACF = ...) -> NDArray[Any]: ...
+
+def nonzero(a: ArrayLike) -> tuple[NDArray[intp], ...]: ...
+
+def shape(a: ArrayLike) -> _Shape: ...
+
+@overload
+def compress(
+ condition: _ArrayLikeBool_co, # 1D bool array
+ a: _ArrayLike[_SCT],
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def compress(
+ condition: _ArrayLikeBool_co, # 1D bool array
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def compress(
+ condition: _ArrayLikeBool_co, # 1D bool array
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+@overload
+def clip(
+ a: _SCT,
+ a_min: None | ArrayLike,
+ a_max: None | ArrayLike,
+ out: None = ...,
+ *,
+ dtype: None = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ signature: str | tuple[None | str, ...] = ...,
+ extobj: list[Any] = ...,
+) -> _SCT: ...
+@overload
+def clip(
+ a: _ScalarLike_co,
+ a_min: None | ArrayLike,
+ a_max: None | ArrayLike,
+ out: None = ...,
+ *,
+ dtype: None = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ signature: str | tuple[None | str, ...] = ...,
+ extobj: list[Any] = ...,
+) -> Any: ...
+@overload
+def clip(
+ a: _ArrayLike[_SCT],
+ a_min: None | ArrayLike,
+ a_max: None | ArrayLike,
+ out: None = ...,
+ *,
+ dtype: None = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ signature: str | tuple[None | str, ...] = ...,
+ extobj: list[Any] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def clip(
+ a: ArrayLike,
+ a_min: None | ArrayLike,
+ a_max: None | ArrayLike,
+ out: None = ...,
+ *,
+ dtype: None = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ signature: str | tuple[None | str, ...] = ...,
+ extobj: list[Any] = ...,
+) -> NDArray[Any]: ...
+@overload
+def clip(
+ a: ArrayLike,
+ a_min: None | ArrayLike,
+ a_max: None | ArrayLike,
+ out: _ArrayType = ...,
+ *,
+ dtype: DTypeLike,
+ where: None | _ArrayLikeBool_co = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ signature: str | tuple[None | str, ...] = ...,
+ extobj: list[Any] = ...,
+) -> Any: ...
+@overload
+def clip(
+ a: ArrayLike,
+ a_min: None | ArrayLike,
+ a_max: None | ArrayLike,
+ out: _ArrayType,
+ *,
+ dtype: DTypeLike = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ signature: str | tuple[None | str, ...] = ...,
+ extobj: list[Any] = ...,
+) -> _ArrayType: ...
+
+@overload
+def sum(
+ a: _ArrayLike[_SCT],
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def sum(
+ a: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def sum(
+ a: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: _ArrayType = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def all(
+ a: ArrayLike,
+ axis: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> bool_: ...
+@overload
+def all(
+ a: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def all(
+ a: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ out: _ArrayType = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def any(
+ a: ArrayLike,
+ axis: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> bool_: ...
+@overload
+def any(
+ a: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def any(
+ a: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ out: _ArrayType = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def cumsum(
+ a: _ArrayLike[_SCT],
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def cumsum(
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def cumsum(
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ out: None = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def cumsum(
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def cumsum(
+ a: ArrayLike,
+ axis: None | SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+@overload
+def ptp(
+ a: _ArrayLike[_SCT],
+ axis: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+) -> _SCT: ...
+@overload
+def ptp(
+ a: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+) -> Any: ...
+@overload
+def ptp(
+ a: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ out: _ArrayType = ...,
+ keepdims: bool = ...,
+) -> _ArrayType: ...
+
+@overload
+def amax(
+ a: _ArrayLike[_SCT],
+ axis: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def amax(
+ a: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def amax(
+ a: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ out: _ArrayType = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def amin(
+ a: _ArrayLike[_SCT],
+ axis: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def amin(
+ a: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def amin(
+ a: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ out: _ArrayType = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+# TODO: `np.prod()``: For object arrays `initial` does not necessarily
+# have to be a numerical scalar.
+# The only requirement is that it is compatible
+# with the `.__mul__()` method(s) of the passed array's elements.
+
+# Note that the same situation holds for all wrappers around
+# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`).
+@overload
+def prod(
+ a: _ArrayLikeBool_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> int_: ...
+@overload
+def prod(
+ a: _ArrayLikeUInt_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> uint64: ...
+@overload
+def prod(
+ a: _ArrayLikeInt_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> int64: ...
+@overload
+def prod(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> floating[Any]: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: None | DTypeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def prod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: None | DTypeLike = ...,
+ out: _ArrayType = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike_co = ...,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def cumprod(
+ a: _ArrayLikeBool_co,
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[int_]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeUInt_co,
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[uint64]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[int64]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeFloat_co,
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co,
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeObject_co,
+ axis: None | SupportsIndex = ...,
+ dtype: None = ...,
+ out: None = ...,
+) -> NDArray[object_]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | SupportsIndex = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ out: None = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def cumprod(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+def ndim(a: ArrayLike) -> int: ...
+
+def size(a: ArrayLike, axis: None | int = ...) -> int: ...
+
+@overload
+def around(
+ a: _BoolLike_co,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> float16: ...
+@overload
+def around(
+ a: _SCT_uifcO,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> _SCT_uifcO: ...
+@overload
+def around(
+ a: _ComplexLike_co | object_,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> Any: ...
+@overload
+def around(
+ a: _ArrayLikeBool_co,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> NDArray[float16]: ...
+@overload
+def around(
+ a: _ArrayLike[_SCT_uifcO],
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> NDArray[_SCT_uifcO]: ...
+@overload
+def around(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ decimals: SupportsIndex = ...,
+ out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def around(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ decimals: SupportsIndex = ...,
+ out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+@overload
+def mean(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> floating[Any]: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: None = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def mean(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: _ArrayType = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def std(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> floating[Any]: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: None = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def std(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: _ArrayType = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
+
+@overload
+def var(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ dtype: None = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> floating[Any]: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: None = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: Literal[False] = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _SCT: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: None = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> Any: ...
+@overload
+def var(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: _ArrayType = ...,
+ ddof: float = ...,
+ keepdims: bool = ...,
+ *,
+ where: _ArrayLikeBool_co = ...,
+) -> _ArrayType: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/core/function_base.py b/venv/lib/python3.9/site-packages/numpy/core/function_base.py
new file mode 100644
index 00000000..3dc51a81
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/function_base.py
@@ -0,0 +1,537 @@
+import functools
+import warnings
+import operator
+import types
+
+from . import numeric as _nx
+from .numeric import result_type, NaN, asanyarray, ndim
+from numpy.core.multiarray import add_docstring
+from numpy.core import overrides
+
+__all__ = ['logspace', 'linspace', 'geomspace']
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,
+ dtype=None, axis=None):
+ return (start, stop)
+
+
+@array_function_dispatch(_linspace_dispatcher)
+def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
+ axis=0):
+ """
+ Return evenly spaced numbers over a specified interval.
+
+ Returns `num` evenly spaced samples, calculated over the
+ interval [`start`, `stop`].
+
+ The endpoint of the interval can optionally be excluded.
+
+ .. versionchanged:: 1.16.0
+ Non-scalar `start` and `stop` are now supported.
+
+ .. versionchanged:: 1.20.0
+ Values are rounded towards ``-inf`` instead of ``0`` when an
+ integer ``dtype`` is specified. The old behavior can
+ still be obtained with ``np.linspace(start, stop, num).astype(int)``
+
+ Parameters
+ ----------
+ start : array_like
+ The starting value of the sequence.
+ stop : array_like
+ The end value of the sequence, unless `endpoint` is set to False.
+ In that case, the sequence consists of all but the last of ``num + 1``
+ evenly spaced samples, so that `stop` is excluded. Note that the step
+ size changes when `endpoint` is False.
+ num : int, optional
+ Number of samples to generate. Default is 50. Must be non-negative.
+ endpoint : bool, optional
+ If True, `stop` is the last sample. Otherwise, it is not included.
+ Default is True.
+ retstep : bool, optional
+ If True, return (`samples`, `step`), where `step` is the spacing
+ between samples.
+ dtype : dtype, optional
+ The type of the output array. If `dtype` is not given, the data type
+ is inferred from `start` and `stop`. The inferred dtype will never be
+ an integer; `float` is chosen even if the arguments would produce an
+ array of integers.
+
+ .. versionadded:: 1.9.0
+
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+ .. versionadded:: 1.16.0
+
+ Returns
+ -------
+ samples : ndarray
+ There are `num` equally spaced samples in the closed interval
+ ``[start, stop]`` or the half-open interval ``[start, stop)``
+ (depending on whether `endpoint` is True or False).
+ step : float, optional
+ Only returned if `retstep` is True
+
+ Size of spacing between samples.
+
+
+ See Also
+ --------
+ arange : Similar to `linspace`, but uses a step size (instead of the
+ number of samples).
+ geomspace : Similar to `linspace`, but with numbers spaced evenly on a log
+ scale (a geometric progression).
+ logspace : Similar to `geomspace`, but with the end points specified as
+ logarithms.
+ :ref:`how-to-partition`
+
+ Examples
+ --------
+ >>> np.linspace(2.0, 3.0, num=5)
+ array([2. , 2.25, 2.5 , 2.75, 3. ])
+ >>> np.linspace(2.0, 3.0, num=5, endpoint=False)
+ array([2. , 2.2, 2.4, 2.6, 2.8])
+ >>> np.linspace(2.0, 3.0, num=5, retstep=True)
+ (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
+
+ Graphical illustration:
+
+ >>> import matplotlib.pyplot as plt
+ >>> N = 8
+ >>> y = np.zeros(N)
+ >>> x1 = np.linspace(0, 10, N, endpoint=True)
+ >>> x2 = np.linspace(0, 10, N, endpoint=False)
+ >>> plt.plot(x1, y, 'o')
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.plot(x2, y + 0.5, 'o')
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.ylim([-0.5, 1])
+ (-0.5, 1)
+ >>> plt.show()
+
+ """
+ num = operator.index(num)
+ if num < 0:
+ raise ValueError("Number of samples, %s, must be non-negative." % num)
+ div = (num - 1) if endpoint else num
+
+ # Convert float/complex array scalars to float, gh-3504
+ # and make sure one can use variables that have an __array_interface__, gh-6634
+ start = asanyarray(start) * 1.0
+ stop = asanyarray(stop) * 1.0
+
+ dt = result_type(start, stop, float(num))
+ if dtype is None:
+ dtype = dt
+ integer_dtype = False
+ else:
+ integer_dtype = _nx.issubdtype(dtype, _nx.integer)
+
+ delta = stop - start
+ y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta))
+ # In-place multiplication y *= delta/div is faster, but prevents the multiplicant
+ # from overriding what class is produced, and thus prevents, e.g. use of Quantities,
+ # see gh-7142. Hence, we multiply in place only for standard scalar types.
+ if div > 0:
+ _mult_inplace = _nx.isscalar(delta)
+ step = delta / div
+ any_step_zero = (
+ step == 0 if _mult_inplace else _nx.asanyarray(step == 0).any())
+ if any_step_zero:
+ # Special handling for denormal numbers, gh-5437
+ y /= div
+ if _mult_inplace:
+ y *= delta
+ else:
+ y = y * delta
+ else:
+ if _mult_inplace:
+ y *= step
+ else:
+ y = y * step
+ else:
+ # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0)
+ # have an undefined step
+ step = NaN
+ # Multiply with delta to allow possible override of output class.
+ y = y * delta
+
+ y += start
+
+ if endpoint and num > 1:
+ y[-1] = stop
+
+ if axis != 0:
+ y = _nx.moveaxis(y, 0, axis)
+
+ if integer_dtype:
+ _nx.floor(y, out=y)
+
+ if retstep:
+ return y.astype(dtype, copy=False), step
+ else:
+ return y.astype(dtype, copy=False)
+
+
+def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None,
+ dtype=None, axis=None):
+ return (start, stop)
+
+
+@array_function_dispatch(_logspace_dispatcher)
+def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
+ axis=0):
+ """
+ Return numbers spaced evenly on a log scale.
+
+ In linear space, the sequence starts at ``base ** start``
+ (`base` to the power of `start`) and ends with ``base ** stop``
+ (see `endpoint` below).
+
+ .. versionchanged:: 1.16.0
+ Non-scalar `start` and `stop` are now supported.
+
+ Parameters
+ ----------
+ start : array_like
+ ``base ** start`` is the starting value of the sequence.
+ stop : array_like
+ ``base ** stop`` is the final value of the sequence, unless `endpoint`
+ is False. In that case, ``num + 1`` values are spaced over the
+ interval in log-space, of which all but the last (a sequence of
+ length `num`) are returned.
+ num : integer, optional
+ Number of samples to generate. Default is 50.
+ endpoint : boolean, optional
+ If true, `stop` is the last sample. Otherwise, it is not included.
+ Default is True.
+ base : array_like, optional
+ The base of the log space. The step size between the elements in
+ ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
+ Default is 10.0.
+ dtype : dtype
+ The type of the output array. If `dtype` is not given, the data type
+ is inferred from `start` and `stop`. The inferred type will never be
+ an integer; `float` is chosen even if the arguments would produce an
+ array of integers.
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+ .. versionadded:: 1.16.0
+
+
+ Returns
+ -------
+ samples : ndarray
+ `num` samples, equally spaced on a log scale.
+
+ See Also
+ --------
+ arange : Similar to linspace, with the step size specified instead of the
+ number of samples. Note that, when used with a float endpoint, the
+ endpoint may or may not be included.
+ linspace : Similar to logspace, but with the samples uniformly distributed
+ in linear space, instead of log space.
+ geomspace : Similar to logspace, but with endpoints specified directly.
+ :ref:`how-to-partition`
+
+ Notes
+ -----
+ Logspace is equivalent to the code
+
+ >>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
+ ... # doctest: +SKIP
+ >>> power(base, y).astype(dtype)
+ ... # doctest: +SKIP
+
+ Examples
+ --------
+ >>> np.logspace(2.0, 3.0, num=4)
+ array([ 100. , 215.443469 , 464.15888336, 1000. ])
+ >>> np.logspace(2.0, 3.0, num=4, endpoint=False)
+ array([100. , 177.827941 , 316.22776602, 562.34132519])
+ >>> np.logspace(2.0, 3.0, num=4, base=2.0)
+ array([4. , 5.0396842 , 6.34960421, 8. ])
+
+ Graphical illustration:
+
+ >>> import matplotlib.pyplot as plt
+ >>> N = 10
+ >>> x1 = np.logspace(0.1, 1, N, endpoint=True)
+ >>> x2 = np.logspace(0.1, 1, N, endpoint=False)
+ >>> y = np.zeros(N)
+ >>> plt.plot(x1, y, 'o')
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.plot(x2, y + 0.5, 'o')
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.ylim([-0.5, 1])
+ (-0.5, 1)
+ >>> plt.show()
+
+ """
+ y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)
+ if dtype is None:
+ return _nx.power(base, y)
+ return _nx.power(base, y).astype(dtype, copy=False)
+
+
+def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None,
+ axis=None):
+ return (start, stop)
+
+
+@array_function_dispatch(_geomspace_dispatcher)
+def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
+ """
+ Return numbers spaced evenly on a log scale (a geometric progression).
+
+ This is similar to `logspace`, but with endpoints specified directly.
+ Each output sample is a constant multiple of the previous.
+
+ .. versionchanged:: 1.16.0
+ Non-scalar `start` and `stop` are now supported.
+
+ Parameters
+ ----------
+ start : array_like
+ The starting value of the sequence.
+ stop : array_like
+ The final value of the sequence, unless `endpoint` is False.
+ In that case, ``num + 1`` values are spaced over the
+ interval in log-space, of which all but the last (a sequence of
+ length `num`) are returned.
+ num : integer, optional
+ Number of samples to generate. Default is 50.
+ endpoint : boolean, optional
+ If true, `stop` is the last sample. Otherwise, it is not included.
+ Default is True.
+ dtype : dtype
+ The type of the output array. If `dtype` is not given, the data type
+ is inferred from `start` and `stop`. The inferred dtype will never be
+ an integer; `float` is chosen even if the arguments would produce an
+ array of integers.
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+ .. versionadded:: 1.16.0
+
+ Returns
+ -------
+ samples : ndarray
+ `num` samples, equally spaced on a log scale.
+
+ See Also
+ --------
+ logspace : Similar to geomspace, but with endpoints specified using log
+ and base.
+ linspace : Similar to geomspace, but with arithmetic instead of geometric
+ progression.
+ arange : Similar to linspace, with the step size specified instead of the
+ number of samples.
+ :ref:`how-to-partition`
+
+ Notes
+ -----
+ If the inputs or dtype are complex, the output will follow a logarithmic
+ spiral in the complex plane. (There are an infinite number of spirals
+ passing through two points; the output will follow the shortest such path.)
+
+ Examples
+ --------
+ >>> np.geomspace(1, 1000, num=4)
+ array([ 1., 10., 100., 1000.])
+ >>> np.geomspace(1, 1000, num=3, endpoint=False)
+ array([ 1., 10., 100.])
+ >>> np.geomspace(1, 1000, num=4, endpoint=False)
+ array([ 1. , 5.62341325, 31.6227766 , 177.827941 ])
+ >>> np.geomspace(1, 256, num=9)
+ array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.])
+
+ Note that the above may not produce exact integers:
+
+ >>> np.geomspace(1, 256, num=9, dtype=int)
+ array([ 1, 2, 4, 7, 16, 32, 63, 127, 256])
+ >>> np.around(np.geomspace(1, 256, num=9)).astype(int)
+ array([ 1, 2, 4, 8, 16, 32, 64, 128, 256])
+
+ Negative, decreasing, and complex inputs are allowed:
+
+ >>> np.geomspace(1000, 1, num=4)
+ array([1000., 100., 10., 1.])
+ >>> np.geomspace(-1000, -1, num=4)
+ array([-1000., -100., -10., -1.])
+ >>> np.geomspace(1j, 1000j, num=4) # Straight line
+ array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
+ >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle
+ array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j,
+ 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j,
+ 1.00000000e+00+0.00000000e+00j])
+
+ Graphical illustration of `endpoint` parameter:
+
+ >>> import matplotlib.pyplot as plt
+ >>> N = 10
+ >>> y = np.zeros(N)
+ >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.axis([0.5, 2000, 0, 3])
+ [0.5, 2000, 0, 3]
+ >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
+ >>> plt.show()
+
+ """
+ start = asanyarray(start)
+ stop = asanyarray(stop)
+ if _nx.any(start == 0) or _nx.any(stop == 0):
+ raise ValueError('Geometric sequence cannot include zero')
+
+ dt = result_type(start, stop, float(num), _nx.zeros((), dtype))
+ if dtype is None:
+ dtype = dt
+ else:
+ # complex to dtype('complex128'), for instance
+ dtype = _nx.dtype(dtype)
+
+ # Promote both arguments to the same dtype in case, for instance, one is
+ # complex and another is negative and log would produce NaN otherwise.
+ # Copy since we may change things in-place further down.
+ start = start.astype(dt, copy=True)
+ stop = stop.astype(dt, copy=True)
+
+ out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt)
+ # Avoid negligible real or imaginary parts in output by rotating to
+ # positive real, calculating, then undoing rotation
+ if _nx.issubdtype(dt, _nx.complexfloating):
+ all_imag = (start.real == 0.) & (stop.real == 0.)
+ if _nx.any(all_imag):
+ start[all_imag] = start[all_imag].imag
+ stop[all_imag] = stop[all_imag].imag
+ out_sign[all_imag] = 1j
+
+ both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1)
+ if _nx.any(both_negative):
+ _nx.negative(start, out=start, where=both_negative)
+ _nx.negative(stop, out=stop, where=both_negative)
+ _nx.negative(out_sign, out=out_sign, where=both_negative)
+
+ log_start = _nx.log10(start)
+ log_stop = _nx.log10(stop)
+ result = logspace(log_start, log_stop, num=num,
+ endpoint=endpoint, base=10.0, dtype=dtype)
+
+ # Make sure the endpoints match the start and stop arguments. This is
+ # necessary because np.exp(np.log(x)) is not necessarily equal to x.
+ if num > 0:
+ result[0] = start
+ if num > 1 and endpoint:
+ result[-1] = stop
+
+ result = out_sign * result
+
+ if axis != 0:
+ result = _nx.moveaxis(result, 0, axis)
+
+ return result.astype(dtype, copy=False)
+
+
+def _needs_add_docstring(obj):
+ """
+ Returns true if the only way to set the docstring of `obj` from python is
+ via add_docstring.
+
+ This function errs on the side of being overly conservative.
+ """
+ Py_TPFLAGS_HEAPTYPE = 1 << 9
+
+ if isinstance(obj, (types.FunctionType, types.MethodType, property)):
+ return False
+
+ if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE:
+ return False
+
+ return True
+
+
+def _add_docstring(obj, doc, warn_on_python):
+ if warn_on_python and not _needs_add_docstring(obj):
+ warnings.warn(
+ "add_newdoc was used on a pure-python object {}. "
+ "Prefer to attach it directly to the source."
+ .format(obj),
+ UserWarning,
+ stacklevel=3)
+ try:
+ add_docstring(obj, doc)
+ except Exception:
+ pass
+
+
+def add_newdoc(place, obj, doc, warn_on_python=True):
+ """
+ Add documentation to an existing object, typically one defined in C
+
+ The purpose is to allow easier editing of the docstrings without requiring
+ a re-compile. This exists primarily for internal use within numpy itself.
+
+ Parameters
+ ----------
+ place : str
+ The absolute name of the module to import from
+ obj : str
+ The name of the object to add documentation to, typically a class or
+ function name
+ doc : {str, Tuple[str, str], List[Tuple[str, str]]}
+ If a string, the documentation to apply to `obj`
+
+ If a tuple, then the first element is interpreted as an attribute of
+ `obj` and the second as the docstring to apply - ``(method, docstring)``
+
+ If a list, then each element of the list should be a tuple of length
+ two - ``[(method1, docstring1), (method2, docstring2), ...]``
+ warn_on_python : bool
+ If True, the default, emit `UserWarning` if this is used to attach
+ documentation to a pure-python object.
+
+ Notes
+ -----
+ This routine never raises an error if the docstring can't be written, but
+ will raise an error if the object being documented does not exist.
+
+ This routine cannot modify read-only docstrings, as appear
+ in new-style classes or built-in functions. Because this
+ routine never raises an error the caller must check manually
+ that the docstrings were changed.
+
+ Since this function grabs the ``char *`` from a c-level str object and puts
+ it into the ``tp_doc`` slot of the type of `obj`, it violates a number of
+ C-API best-practices, by:
+
+ - modifying a `PyTypeObject` after calling `PyType_Ready`
+ - calling `Py_INCREF` on the str and losing the reference, so the str
+ will never be released
+
+ If possible it should be avoided.
+ """
+ new = getattr(__import__(place, globals(), {}, [obj]), obj)
+ if isinstance(doc, str):
+ _add_docstring(new, doc.strip(), warn_on_python)
+ elif isinstance(doc, tuple):
+ attr, docstring = doc
+ _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
+ elif isinstance(doc, list):
+ for attr, docstring in doc:
+ _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/function_base.pyi b/venv/lib/python3.9/site-packages/numpy/core/function_base.pyi
new file mode 100644
index 00000000..2c2a277b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/function_base.pyi
@@ -0,0 +1,187 @@
+from typing import (
+ Literal as L,
+ overload,
+ Any,
+ SupportsIndex,
+ TypeVar,
+)
+
+from numpy import floating, complexfloating, generic
+from numpy._typing import (
+ NDArray,
+ DTypeLike,
+ _DTypeLike,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+
+__all__: list[str]
+
+@overload
+def linspace(
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[False] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[False] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[False] = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[False] = ...,
+ dtype: DTypeLike = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[Any]: ...
+@overload
+def linspace(
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[True] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> tuple[NDArray[floating[Any]], floating[Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[True] = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> tuple[NDArray[complexfloating[Any, Any]], complexfloating[Any, Any]]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[True] = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> tuple[NDArray[_SCT], _SCT]: ...
+@overload
+def linspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ retstep: L[True] = ...,
+ dtype: DTypeLike = ...,
+ axis: SupportsIndex = ...,
+) -> tuple[NDArray[Any], Any]: ...
+
+@overload
+def logspace(
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeFloat_co = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeComplex_co = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeComplex_co = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def logspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ base: _ArrayLikeComplex_co = ...,
+ dtype: DTypeLike = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def geomspace(
+ start: _ArrayLikeFloat_co,
+ stop: _ArrayLikeFloat_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: None = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def geomspace(
+ start: _ArrayLikeComplex_co,
+ stop: _ArrayLikeComplex_co,
+ num: SupportsIndex = ...,
+ endpoint: bool = ...,
+ dtype: DTypeLike = ...,
+ axis: SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+# Re-exported to `np.lib.function_base`
+def add_newdoc(
+ place: str,
+ obj: str,
+ doc: str | tuple[str, str] | list[tuple[str, str]],
+ warn_on_python: bool = ...,
+) -> None: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/core/generate_numpy_api.py b/venv/lib/python3.9/site-packages/numpy/core/generate_numpy_api.py
new file mode 100644
index 00000000..a57a36a7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/generate_numpy_api.py
@@ -0,0 +1,244 @@
+import os
+import genapi
+
+from genapi import \
+ TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
+
+import numpy_api
+
+# use annotated api when running under cpychecker
+h_template = r"""
+#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)
+
+typedef struct {
+ PyObject_HEAD
+ npy_bool obval;
+} PyBoolScalarObject;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
+extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
+extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
+
+%s
+
+#else
+
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
+#endif
+
+#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
+extern void **PyArray_API;
+#else
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+void **PyArray_API;
+#else
+static void **PyArray_API=NULL;
+#endif
+#endif
+
+%s
+
+#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
+static int
+_import_array(void)
+{
+ int st;
+ PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
+ PyObject *c_api = NULL;
+
+ if (numpy == NULL) {
+ return -1;
+ }
+ c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
+ Py_DECREF(numpy);
+ if (c_api == NULL) {
+ PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
+ return -1;
+ }
+
+ if (!PyCapsule_CheckExact(c_api)) {
+ PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
+ Py_DECREF(c_api);
+ return -1;
+ }
+ PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
+ Py_DECREF(c_api);
+ if (PyArray_API == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
+ return -1;
+ }
+
+ /* Perform runtime check of C API version */
+ if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
+ PyErr_Format(PyExc_RuntimeError, "module compiled against "\
+ "ABI version 0x%%x but this version of numpy is 0x%%x", \
+ (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
+ return -1;
+ }
+ if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
+ PyErr_Format(PyExc_RuntimeError, "module compiled against "\
+ "API version 0x%%x but this version of numpy is 0x%%x . "\
+ "Check the section C-API incompatibility at the "\
+ "Troubleshooting ImportError section at "\
+ "https://numpy.org/devdocs/user/troubleshooting-importerror.html"\
+ "#c-api-incompatibility "\
+ "for indications on how to solve this problem .", \
+ (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
+ return -1;
+ }
+
+ /*
+ * Perform runtime check of endianness and check it matches the one set by
+ * the headers (npy_endian.h) as a safeguard
+ */
+ st = PyArray_GetEndianness();
+ if (st == NPY_CPU_UNKNOWN_ENDIAN) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "FATAL: module compiled as unknown endian");
+ return -1;
+ }
+#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
+ if (st != NPY_CPU_BIG) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "FATAL: module compiled as big endian, but "
+ "detected different endianness at runtime");
+ return -1;
+ }
+#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
+ if (st != NPY_CPU_LITTLE) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "FATAL: module compiled as little endian, but "
+ "detected different endianness at runtime");
+ return -1;
+ }
+#endif
+
+ return 0;
+}
+
+#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NULL; } }
+
+#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
+
+#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
+
+#endif
+
+#endif
+"""
+
+
+c_template = r"""
+/* These pointers will be stored in the C-object for use in other
+ extension modules
+*/
+
+void *PyArray_API[] = {
+%s
+};
+"""
+
+c_api_header = """
+===========
+NumPy C-API
+===========
+"""
+
+def generate_api(output_dir, force=False):
+ basename = 'multiarray_api'
+
+ h_file = os.path.join(output_dir, '__%s.h' % basename)
+ c_file = os.path.join(output_dir, '__%s.c' % basename)
+ d_file = os.path.join(output_dir, '%s.txt' % basename)
+ targets = (h_file, c_file, d_file)
+
+ sources = numpy_api.multiarray_api
+
+ if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):
+ return targets
+ else:
+ do_generate_api(targets, sources)
+
+ return targets
+
+def do_generate_api(targets, sources):
+ header_file = targets[0]
+ c_file = targets[1]
+ doc_file = targets[2]
+
+ global_vars = sources[0]
+ scalar_bool_values = sources[1]
+ types_api = sources[2]
+ multiarray_funcs = sources[3]
+
+ multiarray_api = sources[:]
+
+ module_list = []
+ extension_list = []
+ init_list = []
+
+ # Check multiarray api indexes
+ multiarray_api_index = genapi.merge_api_dicts(multiarray_api)
+ genapi.check_api_dict(multiarray_api_index)
+
+ numpyapi_list = genapi.get_api_functions('NUMPY_API',
+ multiarray_funcs)
+
+ # Create dict name -> *Api instance
+ api_name = 'PyArray_API'
+ multiarray_api_dict = {}
+ for f in numpyapi_list:
+ name = f.name
+ index = multiarray_funcs[name][0]
+ annotations = multiarray_funcs[name][1:]
+ multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations,
+ f.return_type,
+ f.args, api_name)
+
+ for name, val in global_vars.items():
+ index, type = val
+ multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)
+
+ for name, val in scalar_bool_values.items():
+ index = val[0]
+ multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)
+
+ for name, val in types_api.items():
+ index = val[0]
+ internal_type = None if len(val) == 1 else val[1]
+ multiarray_api_dict[name] = TypeApi(
+ name, index, 'PyTypeObject', api_name, internal_type)
+
+ if len(multiarray_api_dict) != len(multiarray_api_index):
+ keys_dict = set(multiarray_api_dict.keys())
+ keys_index = set(multiarray_api_index.keys())
+ raise AssertionError(
+ "Multiarray API size mismatch - "
+ "index has extra keys {}, dict has extra keys {}"
+ .format(keys_index - keys_dict, keys_dict - keys_index)
+ )
+
+ extension_list = []
+ for name, index in genapi.order_dict(multiarray_api_index):
+ api_item = multiarray_api_dict[name]
+ extension_list.append(api_item.define_from_array_api_string())
+ init_list.append(api_item.array_api_define())
+ module_list.append(api_item.internal_define())
+
+ # Write to header
+ s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
+ genapi.write_file(header_file, s)
+
+ # Write to c-code
+ s = c_template % ',\n'.join(init_list)
+ genapi.write_file(c_file, s)
+
+ # write to documentation
+ s = c_api_header
+ for func in numpyapi_list:
+ s += func.to_ReST()
+ s += '\n\n'
+ genapi.write_file(doc_file, s)
+
+ return targets
diff --git a/venv/lib/python3.9/site-packages/numpy/core/getlimits.py b/venv/lib/python3.9/site-packages/numpy/core/getlimits.py
new file mode 100644
index 00000000..2c0f462c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/getlimits.py
@@ -0,0 +1,718 @@
+"""Machine limits for Float32 and Float64 and (long double) if available...
+
+"""
+__all__ = ['finfo', 'iinfo']
+
+import warnings
+
+from ._machar import MachAr
+from .overrides import set_module
+from . import numeric
+from . import numerictypes as ntypes
+from .numeric import array, inf, NaN
+from .umath import log10, exp2, nextafter, isnan
+
+
+def _fr0(a):
+ """fix rank-0 --> rank-1"""
+ if a.ndim == 0:
+ a = a.copy()
+ a.shape = (1,)
+ return a
+
+
+def _fr1(a):
+ """fix rank > 0 --> rank-0"""
+ if a.size == 1:
+ a = a.copy()
+ a.shape = ()
+ return a
+
+
+class MachArLike:
+ """ Object to simulate MachAr instance """
+ def __init__(self, ftype, *, eps, epsneg, huge, tiny,
+ ibeta, smallest_subnormal=None, **kwargs):
+ self.params = _MACHAR_PARAMS[ftype]
+ self.ftype = ftype
+ self.title = self.params['title']
+ # Parameter types same as for discovered MachAr object.
+ if not smallest_subnormal:
+ self._smallest_subnormal = nextafter(
+ self.ftype(0), self.ftype(1), dtype=self.ftype)
+ else:
+ self._smallest_subnormal = smallest_subnormal
+ self.epsilon = self.eps = self._float_to_float(eps)
+ self.epsneg = self._float_to_float(epsneg)
+ self.xmax = self.huge = self._float_to_float(huge)
+ self.xmin = self._float_to_float(tiny)
+ self.smallest_normal = self.tiny = self._float_to_float(tiny)
+ self.ibeta = self.params['itype'](ibeta)
+ self.__dict__.update(kwargs)
+ self.precision = int(-log10(self.eps))
+ self.resolution = self._float_to_float(
+ self._float_conv(10) ** (-self.precision))
+ self._str_eps = self._float_to_str(self.eps)
+ self._str_epsneg = self._float_to_str(self.epsneg)
+ self._str_xmin = self._float_to_str(self.xmin)
+ self._str_xmax = self._float_to_str(self.xmax)
+ self._str_resolution = self._float_to_str(self.resolution)
+ self._str_smallest_normal = self._float_to_str(self.xmin)
+
+ @property
+ def smallest_subnormal(self):
+ """Return the value for the smallest subnormal.
+
+ Returns
+ -------
+ smallest_subnormal : float
+ value for the smallest subnormal.
+
+ Warns
+ -----
+ UserWarning
+ If the calculated value for the smallest subnormal is zero.
+ """
+ # Check that the calculated value is not zero, in case it raises a
+ # warning.
+ value = self._smallest_subnormal
+ if self.ftype(0) == value:
+ warnings.warn(
+ 'The value of the smallest subnormal for {} type '
+ 'is zero.'.format(self.ftype), UserWarning, stacklevel=2)
+
+ return self._float_to_float(value)
+
+ @property
+ def _str_smallest_subnormal(self):
+ """Return the string representation of the smallest subnormal."""
+ return self._float_to_str(self.smallest_subnormal)
+
+ def _float_to_float(self, value):
+ """Converts float to float.
+
+ Parameters
+ ----------
+ value : float
+ value to be converted.
+ """
+ return _fr1(self._float_conv(value))
+
+ def _float_conv(self, value):
+ """Converts float to conv.
+
+ Parameters
+ ----------
+ value : float
+ value to be converted.
+ """
+ return array([value], self.ftype)
+
+ def _float_to_str(self, value):
+ """Converts float to str.
+
+ Parameters
+ ----------
+ value : float
+ value to be converted.
+ """
+ return self.params['fmt'] % array(_fr0(value)[0], self.ftype)
+
+
+_convert_to_float = {
+ ntypes.csingle: ntypes.single,
+ ntypes.complex_: ntypes.float_,
+ ntypes.clongfloat: ntypes.longfloat
+ }
+
+# Parameters for creating MachAr / MachAr-like objects
+_title_fmt = 'numpy {} precision floating point number'
+_MACHAR_PARAMS = {
+ ntypes.double: dict(
+ itype = ntypes.int64,
+ fmt = '%24.16e',
+ title = _title_fmt.format('double')),
+ ntypes.single: dict(
+ itype = ntypes.int32,
+ fmt = '%15.7e',
+ title = _title_fmt.format('single')),
+ ntypes.longdouble: dict(
+ itype = ntypes.longlong,
+ fmt = '%s',
+ title = _title_fmt.format('long double')),
+ ntypes.half: dict(
+ itype = ntypes.int16,
+ fmt = '%12.5e',
+ title = _title_fmt.format('half'))}
+
+# Key to identify the floating point type. Key is result of
+# ftype('-0.1').newbyteorder('<').tobytes()
+# See:
+# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
+_KNOWN_TYPES = {}
+def _register_type(machar, bytepat):
+ _KNOWN_TYPES[bytepat] = machar
+_float_ma = {}
+
+
+def _register_known_types():
+ # Known parameters for float16
+ # See docstring of MachAr class for description of parameters.
+ f16 = ntypes.float16
+ float16_ma = MachArLike(f16,
+ machep=-10,
+ negep=-11,
+ minexp=-14,
+ maxexp=16,
+ it=10,
+ iexp=5,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(f16(-10)),
+ epsneg=exp2(f16(-11)),
+ huge=f16(65504),
+ tiny=f16(2 ** -14))
+ _register_type(float16_ma, b'f\xae')
+ _float_ma[16] = float16_ma
+
+ # Known parameters for float32
+ f32 = ntypes.float32
+ float32_ma = MachArLike(f32,
+ machep=-23,
+ negep=-24,
+ minexp=-126,
+ maxexp=128,
+ it=23,
+ iexp=8,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(f32(-23)),
+ epsneg=exp2(f32(-24)),
+ huge=f32((1 - 2 ** -24) * 2**128),
+ tiny=exp2(f32(-126)))
+ _register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
+ _float_ma[32] = float32_ma
+
+ # Known parameters for float64
+ f64 = ntypes.float64
+ epsneg_f64 = 2.0 ** -53.0
+ tiny_f64 = 2.0 ** -1022.0
+ float64_ma = MachArLike(f64,
+ machep=-52,
+ negep=-53,
+ minexp=-1022,
+ maxexp=1024,
+ it=52,
+ iexp=11,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=2.0 ** -52.0,
+ epsneg=epsneg_f64,
+ huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
+ tiny=tiny_f64)
+ _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
+ _float_ma[64] = float64_ma
+
+ # Known parameters for IEEE 754 128-bit binary float
+ ld = ntypes.longdouble
+ epsneg_f128 = exp2(ld(-113))
+ tiny_f128 = exp2(ld(-16382))
+ # Ignore runtime error when this is not f128
+ with numeric.errstate(all='ignore'):
+ huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
+ float128_ma = MachArLike(ld,
+ machep=-112,
+ negep=-113,
+ minexp=-16382,
+ maxexp=16384,
+ it=112,
+ iexp=15,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-112)),
+ epsneg=epsneg_f128,
+ huge=huge_f128,
+ tiny=tiny_f128)
+ # IEEE 754 128-bit binary float
+ _register_type(float128_ma,
+ b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
+ _register_type(float128_ma,
+ b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
+ _float_ma[128] = float128_ma
+
+ # Known parameters for float80 (Intel 80-bit extended precision)
+ epsneg_f80 = exp2(ld(-64))
+ tiny_f80 = exp2(ld(-16382))
+ # Ignore runtime error when this is not f80
+ with numeric.errstate(all='ignore'):
+ huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
+ float80_ma = MachArLike(ld,
+ machep=-63,
+ negep=-64,
+ minexp=-16382,
+ maxexp=16384,
+ it=63,
+ iexp=15,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-63)),
+ epsneg=epsneg_f80,
+ huge=huge_f80,
+ tiny=tiny_f80)
+ # float80, first 10 bytes containing actual storage
+ _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
+ _float_ma[80] = float80_ma
+
+ # Guessed / known parameters for double double; see:
+ # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
+ # These numbers have the same exponent range as float64, but extended number of
+ # digits in the significand.
+ huge_dd = nextafter(ld(inf), ld(0), dtype=ld)
+ # As the smallest_normal in double double is so hard to calculate we set
+ # it to NaN.
+ smallest_normal_dd = NaN
+ # Leave the same value for the smallest subnormal as double
+ smallest_subnormal_dd = ld(nextafter(0., 1.))
+ float_dd_ma = MachArLike(ld,
+ machep=-105,
+ negep=-106,
+ minexp=-1022,
+ maxexp=1024,
+ it=105,
+ iexp=11,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-105)),
+ epsneg=exp2(ld(-106)),
+ huge=huge_dd,
+ tiny=smallest_normal_dd,
+ smallest_subnormal=smallest_subnormal_dd)
+ # double double; low, high order (e.g. PPC 64)
+ _register_type(float_dd_ma,
+ b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
+ # double double; high, low order (e.g. PPC 64 le)
+ _register_type(float_dd_ma,
+ b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
+ _float_ma['dd'] = float_dd_ma
+
+
+def _get_machar(ftype):
+ """ Get MachAr instance or MachAr-like instance
+
+ Get parameters for floating point type, by first trying signatures of
+ various known floating point types, then, if none match, attempting to
+ identify parameters by analysis.
+
+ Parameters
+ ----------
+ ftype : class
+ Numpy floating point type class (e.g. ``np.float64``)
+
+ Returns
+ -------
+ ma_like : instance of :class:`MachAr` or :class:`MachArLike`
+ Object giving floating point parameters for `ftype`.
+
+ Warns
+ -----
+ UserWarning
+ If the binary signature of the float type is not in the dictionary of
+ known float types.
+ """
+ params = _MACHAR_PARAMS.get(ftype)
+ if params is None:
+ raise ValueError(repr(ftype))
+ # Detect known / suspected types
+ key = ftype('-0.1').newbyteorder('<').tobytes()
+ ma_like = None
+ if ftype == ntypes.longdouble:
+ # Could be 80 bit == 10 byte extended precision, where last bytes can
+ # be random garbage.
+ # Comparing first 10 bytes to pattern first to avoid branching on the
+ # random garbage.
+ ma_like = _KNOWN_TYPES.get(key[:10])
+ if ma_like is None:
+ ma_like = _KNOWN_TYPES.get(key)
+ if ma_like is not None:
+ return ma_like
+ # Fall back to parameter discovery
+ warnings.warn(
+ f'Signature {key} for {ftype} does not match any known type: '
+ 'falling back to type probe function.\n'
+ 'This warnings indicates broken support for the dtype!',
+ UserWarning, stacklevel=2)
+ return _discovered_machar(ftype)
+
+
+def _discovered_machar(ftype):
+ """ Create MachAr instance with found information on float types
+ """
+ params = _MACHAR_PARAMS[ftype]
+ return MachAr(lambda v: array([v], ftype),
+ lambda v:_fr0(v.astype(params['itype']))[0],
+ lambda v:array(_fr0(v)[0], ftype),
+ lambda v: params['fmt'] % array(_fr0(v)[0], ftype),
+ params['title'])
+
+
+@set_module('numpy')
+class finfo:
+ """
+ finfo(dtype)
+
+ Machine limits for floating point types.
+
+ Attributes
+ ----------
+ bits : int
+ The number of bits occupied by the type.
+ dtype : dtype
+ Returns the dtype for which `finfo` returns information. For complex
+ input, the returned dtype is the associated ``float*`` dtype for its
+ real and complex components.
+ eps : float
+ The difference between 1.0 and the next smallest representable float
+ larger than 1.0. For example, for 64-bit binary floats in the IEEE-754
+ standard, ``eps = 2**-52``, approximately 2.22e-16.
+ epsneg : float
+ The difference between 1.0 and the next smallest representable float
+ less than 1.0. For example, for 64-bit binary floats in the IEEE-754
+ standard, ``epsneg = 2**-53``, approximately 1.11e-16.
+ iexp : int
+ The number of bits in the exponent portion of the floating point
+ representation.
+ machar : MachAr
+ The object which calculated these parameters and holds more
+ detailed information.
+
+ .. deprecated:: 1.22
+ machep : int
+ The exponent that yields `eps`.
+ max : floating point number of the appropriate type
+ The largest representable number.
+ maxexp : int
+ The smallest positive power of the base (2) that causes overflow.
+ min : floating point number of the appropriate type
+ The smallest representable number, typically ``-max``.
+ minexp : int
+ The most negative power of the base (2) consistent with there
+ being no leading 0's in the mantissa.
+ negep : int
+ The exponent that yields `epsneg`.
+ nexp : int
+ The number of bits in the exponent including its sign and bias.
+ nmant : int
+ The number of bits in the mantissa.
+ precision : int
+ The approximate number of decimal digits to which this kind of
+ float is precise.
+ resolution : floating point number of the appropriate type
+ The approximate decimal resolution of this type, i.e.,
+ ``10**-precision``.
+ tiny : float
+ An alias for `smallest_normal`, kept for backwards compatibility.
+ smallest_normal : float
+ The smallest positive floating point number with 1 as leading bit in
+ the mantissa following IEEE-754 (see Notes).
+ smallest_subnormal : float
+ The smallest positive floating point number with 0 as leading bit in
+ the mantissa following IEEE-754.
+
+ Parameters
+ ----------
+ dtype : float, dtype, or instance
+ Kind of floating point or complex floating point
+ data-type about which to get information.
+
+ See Also
+ --------
+ MachAr : The implementation of the tests that produce this information.
+ iinfo : The equivalent for integer data types.
+ spacing : The distance between a value and the nearest adjacent number
+ nextafter : The next floating point value after x1 towards x2
+
+ Notes
+ -----
+ For developers of NumPy: do not instantiate this at the module level.
+ The initial calculation of these parameters is expensive and negatively
+ impacts import times. These objects are cached, so calling ``finfo()``
+ repeatedly inside your functions is not a problem.
+
+ Note that ``smallest_normal`` is not actually the smallest positive
+ representable value in a NumPy floating point type. As in the IEEE-754
+ standard [1]_, NumPy floating point types make use of subnormal numbers to
+ fill the gap between 0 and ``smallest_normal``. However, subnormal numbers
+ may have significantly reduced precision [2]_.
+
+ This function can also be used for complex data types as well. If used,
+ the output will be the same as the corresponding real float type
+ (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)).
+ However, the output is true for the real and imaginary components.
+
+ References
+ ----------
+ .. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008,
+ pp.1-70, 2008, http://www.doi.org/10.1109/IEEESTD.2008.4610935
+ .. [2] Wikipedia, "Denormal Numbers",
+ https://en.wikipedia.org/wiki/Denormal_number
+
+ Examples
+ --------
+ >>> np.finfo(np.float64).dtype
+ dtype('float64')
+ >>> np.finfo(np.complex64).dtype
+ dtype('float32')
+
+ """
+
+ _finfo_cache = {}
+
+ def __new__(cls, dtype):
+ try:
+ dtype = numeric.dtype(dtype)
+ except TypeError:
+ # In case a float instance was given
+ dtype = numeric.dtype(type(dtype))
+
+ obj = cls._finfo_cache.get(dtype, None)
+ if obj is not None:
+ return obj
+ dtypes = [dtype]
+ newdtype = numeric.obj2sctype(dtype)
+ if newdtype is not dtype:
+ dtypes.append(newdtype)
+ dtype = newdtype
+ if not issubclass(dtype, numeric.inexact):
+ raise ValueError("data type %r not inexact" % (dtype))
+ obj = cls._finfo_cache.get(dtype, None)
+ if obj is not None:
+ return obj
+ if not issubclass(dtype, numeric.floating):
+ newdtype = _convert_to_float[dtype]
+ if newdtype is not dtype:
+ dtypes.append(newdtype)
+ dtype = newdtype
+ obj = cls._finfo_cache.get(dtype, None)
+ if obj is not None:
+ return obj
+ obj = object.__new__(cls)._init(dtype)
+ for dt in dtypes:
+ cls._finfo_cache[dt] = obj
+ return obj
+
+ def _init(self, dtype):
+ self.dtype = numeric.dtype(dtype)
+ machar = _get_machar(dtype)
+
+ for word in ['precision', 'iexp',
+ 'maxexp', 'minexp', 'negep',
+ 'machep']:
+ setattr(self, word, getattr(machar, word))
+ for word in ['resolution', 'epsneg', 'smallest_subnormal']:
+ setattr(self, word, getattr(machar, word).flat[0])
+ self.bits = self.dtype.itemsize * 8
+ self.max = machar.huge.flat[0]
+ self.min = -self.max
+ self.eps = machar.eps.flat[0]
+ self.nexp = machar.iexp
+ self.nmant = machar.it
+ self._machar = machar
+ self._str_tiny = machar._str_xmin.strip()
+ self._str_max = machar._str_xmax.strip()
+ self._str_epsneg = machar._str_epsneg.strip()
+ self._str_eps = machar._str_eps.strip()
+ self._str_resolution = machar._str_resolution.strip()
+ self._str_smallest_normal = machar._str_smallest_normal.strip()
+ self._str_smallest_subnormal = machar._str_smallest_subnormal.strip()
+ return self
+
+ def __str__(self):
+ fmt = (
+ 'Machine parameters for %(dtype)s\n'
+ '---------------------------------------------------------------\n'
+ 'precision = %(precision)3s resolution = %(_str_resolution)s\n'
+ 'machep = %(machep)6s eps = %(_str_eps)s\n'
+ 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'
+ 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'
+ 'maxexp = %(maxexp)6s max = %(_str_max)s\n'
+ 'nexp = %(nexp)6s min = -max\n'
+ 'smallest_normal = %(_str_smallest_normal)s '
+ 'smallest_subnormal = %(_str_smallest_subnormal)s\n'
+ '---------------------------------------------------------------\n'
+ )
+ return fmt % self.__dict__
+
+ def __repr__(self):
+ c = self.__class__.__name__
+ d = self.__dict__.copy()
+ d['klass'] = c
+ return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
+ " max=%(_str_max)s, dtype=%(dtype)s)") % d)
+
+ @property
+ def smallest_normal(self):
+ """Return the value for the smallest normal.
+
+ Returns
+ -------
+ smallest_normal : float
+ Value for the smallest normal.
+
+ Warns
+ -----
+ UserWarning
+ If the calculated value for the smallest normal is requested for
+ double-double.
+ """
+ # This check is necessary because the value for smallest_normal is
+ # platform dependent for longdouble types.
+ if isnan(self._machar.smallest_normal.flat[0]):
+ warnings.warn(
+ 'The value of smallest normal is undefined for double double',
+ UserWarning, stacklevel=2)
+ return self._machar.smallest_normal.flat[0]
+
+ @property
+ def tiny(self):
+ """Return the value for tiny, alias of smallest_normal.
+
+ Returns
+ -------
+ tiny : float
+ Value for the smallest normal, alias of smallest_normal.
+
+ Warns
+ -----
+ UserWarning
+ If the calculated value for the smallest normal is requested for
+ double-double.
+ """
+ return self.smallest_normal
+
+ @property
+ def machar(self):
+ """The object which calculated these parameters and holds more
+ detailed information.
+
+ .. deprecated:: 1.22
+ """
+ # Deprecated 2021-10-27, NumPy 1.22
+ warnings.warn(
+ "`finfo.machar` is deprecated (NumPy 1.22)",
+ DeprecationWarning, stacklevel=2,
+ )
+ return self._machar
+
+
+@set_module('numpy')
+class iinfo:
+ """
+ iinfo(type)
+
+ Machine limits for integer types.
+
+ Attributes
+ ----------
+ bits : int
+ The number of bits occupied by the type.
+ dtype : dtype
+ Returns the dtype for which `iinfo` returns information.
+ min : int
+ The smallest integer expressible by the type.
+ max : int
+ The largest integer expressible by the type.
+
+ Parameters
+ ----------
+ int_type : integer type, dtype, or instance
+ The kind of integer data type to get information about.
+
+ See Also
+ --------
+ finfo : The equivalent for floating point data types.
+
+ Examples
+ --------
+ With types:
+
+ >>> ii16 = np.iinfo(np.int16)
+ >>> ii16.min
+ -32768
+ >>> ii16.max
+ 32767
+ >>> ii32 = np.iinfo(np.int32)
+ >>> ii32.min
+ -2147483648
+ >>> ii32.max
+ 2147483647
+
+ With instances:
+
+ >>> ii32 = np.iinfo(np.int32(10))
+ >>> ii32.min
+ -2147483648
+ >>> ii32.max
+ 2147483647
+
+ """
+
+ _min_vals = {}
+ _max_vals = {}
+
+ def __init__(self, int_type):
+ try:
+ self.dtype = numeric.dtype(int_type)
+ except TypeError:
+ self.dtype = numeric.dtype(type(int_type))
+ self.kind = self.dtype.kind
+ self.bits = self.dtype.itemsize * 8
+ self.key = "%s%d" % (self.kind, self.bits)
+ if self.kind not in 'iu':
+ raise ValueError("Invalid integer data type %r." % (self.kind,))
+
+ @property
+ def min(self):
+ """Minimum value of given dtype."""
+ if self.kind == 'u':
+ return 0
+ else:
+ try:
+ val = iinfo._min_vals[self.key]
+ except KeyError:
+ val = int(-(1 << (self.bits-1)))
+ iinfo._min_vals[self.key] = val
+ return val
+
+ @property
+ def max(self):
+ """Maximum value of given dtype."""
+ try:
+ val = iinfo._max_vals[self.key]
+ except KeyError:
+ if self.kind == 'u':
+ val = int((1 << self.bits) - 1)
+ else:
+ val = int((1 << (self.bits-1)) - 1)
+ iinfo._max_vals[self.key] = val
+ return val
+
+ def __str__(self):
+ """String representation."""
+ fmt = (
+ 'Machine parameters for %(dtype)s\n'
+ '---------------------------------------------------------------\n'
+ 'min = %(min)s\n'
+ 'max = %(max)s\n'
+ '---------------------------------------------------------------\n'
+ )
+ return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
+
+ def __repr__(self):
+ return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
+ self.min, self.max, self.dtype)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/getlimits.pyi b/venv/lib/python3.9/site-packages/numpy/core/getlimits.pyi
new file mode 100644
index 00000000..da5e3c23
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/getlimits.pyi
@@ -0,0 +1,6 @@
+from numpy import (
+ finfo as finfo,
+ iinfo as iinfo,
+)
+
+__all__: list[str]
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/.doxyfile b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/.doxyfile
new file mode 100644
index 00000000..ed2aefff
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/.doxyfile
@@ -0,0 +1,2 @@
+INCLUDE_PATH += @CUR_DIR
+PREDEFINED += NPY_INTERNAL_BUILD
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/__multiarray_api.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/__multiarray_api.h
new file mode 100644
index 00000000..0de43df1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/__multiarray_api.h
@@ -0,0 +1,1561 @@
+
+#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)
+
+typedef struct {
+ PyObject_HEAD
+ npy_bool obval;
+} PyBoolScalarObject;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
+extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
+extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
+
+NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCVersion \
+ (void);
+extern NPY_NO_EXPORT PyTypeObject PyBigArray_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyArray_Type;
+
+extern NPY_NO_EXPORT PyArray_DTypeMeta PyArrayDescr_TypeFull;
+#define PyArrayDescr_Type (*(PyTypeObject *)(&PyArrayDescr_TypeFull))
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type;
+
+extern NPY_NO_EXPORT int NPY_NUMUSERTYPES;
+
+extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type;
+
+extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
+
+extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type;
+
+NPY_NO_EXPORT int PyArray_SetNumericOps \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_GetNumericOps \
+ (void);
+NPY_NO_EXPORT int PyArray_INCREF \
+ (PyArrayObject *);
+NPY_NO_EXPORT int PyArray_XDECREF \
+ (PyArrayObject *);
+NPY_NO_EXPORT void PyArray_SetStringFunction \
+ (PyObject *, int);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType \
+ (int);
+NPY_NO_EXPORT PyObject * PyArray_TypeObjectFromType \
+ (int);
+NPY_NO_EXPORT char * PyArray_Zero \
+ (PyArrayObject *);
+NPY_NO_EXPORT char * PyArray_One \
+ (PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CastToType \
+ (PyArrayObject *, PyArray_Descr *, int);
+NPY_NO_EXPORT int PyArray_CastTo \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_CastAnyTo \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_CanCastSafely \
+ (int, int);
+NPY_NO_EXPORT npy_bool PyArray_CanCastTo \
+ (PyArray_Descr *, PyArray_Descr *);
+NPY_NO_EXPORT int PyArray_ObjectType \
+ (PyObject *, int);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromObject \
+ (PyObject *, PyArray_Descr *);
+NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType \
+ (PyObject *, int *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar \
+ (PyObject *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromTypeObject \
+ (PyObject *);
+NPY_NO_EXPORT npy_intp PyArray_Size \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Scalar \
+ (void *, PyArray_Descr *, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \
+ (PyObject *, PyArray_Descr *);
+NPY_NO_EXPORT void PyArray_ScalarAsCtype \
+ (PyObject *, void *);
+NPY_NO_EXPORT int PyArray_CastScalarToCtype \
+ (PyObject *, void *, PyArray_Descr *);
+NPY_NO_EXPORT int PyArray_CastScalarDirect \
+ (PyObject *, PyArray_Descr *, void *, int);
+NPY_NO_EXPORT PyObject * PyArray_ScalarFromObject \
+ (PyObject *);
+NPY_NO_EXPORT PyArray_VectorUnaryFunc * PyArray_GetCastFunc \
+ (PyArray_Descr *, int);
+NPY_NO_EXPORT PyObject * PyArray_FromDims \
+ (int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type));
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_FromDimsAndDataAndDescr \
+ (int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *, char *NPY_UNUSED(data));
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \
+ (PyObject *, PyArray_Descr *, int, int, int, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \
+ (PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_FromFile \
+ (FILE *, PyArray_Descr *, npy_intp, char *);
+NPY_NO_EXPORT PyObject * PyArray_FromString \
+ (char *, npy_intp, PyArray_Descr *, npy_intp, char *);
+NPY_NO_EXPORT PyObject * PyArray_FromBuffer \
+ (PyObject *, PyArray_Descr *, npy_intp, npy_intp);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \
+ (PyObject *, PyArray_Descr *, npy_intp);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \
+ (PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_GetField \
+ (PyArrayObject *, PyArray_Descr *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetField \
+ (PyArrayObject *, PyArray_Descr *, int, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Byteswap \
+ (PyArrayObject *, npy_bool);
+NPY_NO_EXPORT PyObject * PyArray_Resize \
+ (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order));
+NPY_NO_EXPORT int PyArray_MoveInto \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_CopyInto \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_CopyAnyInto \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_CopyObject \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_NewCopy \
+ (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT PyObject * PyArray_ToList \
+ (PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_ToString \
+ (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT int PyArray_ToFile \
+ (PyArrayObject *, FILE *, char *, char *);
+NPY_NO_EXPORT int PyArray_Dump \
+ (PyObject *, PyObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_Dumps \
+ (PyObject *, int);
+NPY_NO_EXPORT int PyArray_ValidType \
+ (int);
+NPY_NO_EXPORT void PyArray_UpdateFlags \
+ (PyArrayObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_New \
+ (PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_NewFromDescr \
+ (PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew \
+ (PyArray_Descr *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewFromType \
+ (int);
+NPY_NO_EXPORT double PyArray_GetPriority \
+ (PyObject *, double);
+NPY_NO_EXPORT PyObject * PyArray_IterNew \
+ (PyObject *);
+NPY_NO_EXPORT PyObject* PyArray_MultiIterNew \
+ (int, ...);
+NPY_NO_EXPORT int PyArray_PyIntAsInt \
+ (PyObject *);
+NPY_NO_EXPORT npy_intp PyArray_PyIntAsIntp \
+ (PyObject *);
+NPY_NO_EXPORT int PyArray_Broadcast \
+ (PyArrayMultiIterObject *);
+NPY_NO_EXPORT void PyArray_FillObjectArray \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT int PyArray_FillWithScalar \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT npy_bool PyArray_CheckStrides \
+ (int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewByteorder \
+ (PyArray_Descr *, char);
+NPY_NO_EXPORT PyObject * PyArray_IterAllButAxis \
+ (PyObject *, int *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \
+ (PyObject *, PyArray_Descr *, int, int, int, PyObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \
+ (PyArrayObject *, PyArray_Descr *, int);
+NPY_NO_EXPORT PyObject * PyArray_FromInterface \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_FromStructInterface \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr \
+ (PyObject *, PyArray_Descr *, PyObject *);
+NPY_NO_EXPORT NPY_SCALARKIND PyArray_ScalarKind \
+ (int, PyArrayObject **);
+NPY_NO_EXPORT int PyArray_CanCoerceScalar \
+ (int, int, NPY_SCALARKIND);
+NPY_NO_EXPORT PyObject * PyArray_NewFlagsObject \
+ (PyObject *);
+NPY_NO_EXPORT npy_bool PyArray_CanCastScalar \
+ (PyTypeObject *, PyTypeObject *);
+NPY_NO_EXPORT int PyArray_CompareUCS4 \
+ (npy_ucs4 const *, npy_ucs4 const *, size_t);
+NPY_NO_EXPORT int PyArray_RemoveSmallest \
+ (PyArrayMultiIterObject *);
+NPY_NO_EXPORT int PyArray_ElementStrides \
+ (PyObject *);
+NPY_NO_EXPORT void PyArray_Item_INCREF \
+ (char *, PyArray_Descr *);
+NPY_NO_EXPORT void PyArray_Item_XDECREF \
+ (char *, PyArray_Descr *);
+NPY_NO_EXPORT PyObject * PyArray_FieldNames \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Transpose \
+ (PyArrayObject *, PyArray_Dims *);
+NPY_NO_EXPORT PyObject * PyArray_TakeFrom \
+ (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE);
+NPY_NO_EXPORT PyObject * PyArray_PutTo \
+ (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE);
+NPY_NO_EXPORT PyObject * PyArray_PutMask \
+ (PyArrayObject *, PyObject*, PyObject*);
+NPY_NO_EXPORT PyObject * PyArray_Repeat \
+ (PyArrayObject *, PyObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_Choose \
+ (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE);
+NPY_NO_EXPORT int PyArray_Sort \
+ (PyArrayObject *, int, NPY_SORTKIND);
+NPY_NO_EXPORT PyObject * PyArray_ArgSort \
+ (PyArrayObject *, int, NPY_SORTKIND);
+NPY_NO_EXPORT PyObject * PyArray_SearchSorted \
+ (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_ArgMax \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_ArgMin \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Reshape \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Newshape \
+ (PyArrayObject *, PyArray_Dims *, NPY_ORDER);
+NPY_NO_EXPORT PyObject * PyArray_Squeeze \
+ (PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \
+ (PyArrayObject *, PyArray_Descr *, PyTypeObject *);
+NPY_NO_EXPORT PyObject * PyArray_SwapAxes \
+ (PyArrayObject *, int, int);
+NPY_NO_EXPORT PyObject * PyArray_Max \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Min \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Ptp \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Mean \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Trace \
+ (PyArrayObject *, int, int, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Diagonal \
+ (PyArrayObject *, int, int, int);
+NPY_NO_EXPORT PyObject * PyArray_Clip \
+ (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Conjugate \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Nonzero \
+ (PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Std \
+ (PyArrayObject *, int, int, PyArrayObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_Sum \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_CumSum \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Prod \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_CumProd \
+ (PyArrayObject *, int, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_All \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Any \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Compress \
+ (PyArrayObject *, PyObject *, int, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyArray_Flatten \
+ (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT PyObject * PyArray_Ravel \
+ (PyArrayObject *, NPY_ORDER);
+NPY_NO_EXPORT npy_intp PyArray_MultiplyList \
+ (npy_intp const *, int);
+NPY_NO_EXPORT int PyArray_MultiplyIntList \
+ (int const *, int);
+NPY_NO_EXPORT void * PyArray_GetPtr \
+ (PyArrayObject *, npy_intp const*);
+NPY_NO_EXPORT int PyArray_CompareLists \
+ (npy_intp const *, npy_intp const *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \
+ (PyObject **, void *, npy_intp *, int, PyArray_Descr*);
+NPY_NO_EXPORT int PyArray_As1D \
+ (PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int NPY_UNUSED(typecode));
+NPY_NO_EXPORT int PyArray_As2D \
+ (PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode));
+NPY_NO_EXPORT int PyArray_Free \
+ (PyObject *, void *);
+NPY_NO_EXPORT int PyArray_Converter \
+ (PyObject *, PyObject **);
+NPY_NO_EXPORT int PyArray_IntpFromSequence \
+ (PyObject *, npy_intp *, int);
+NPY_NO_EXPORT PyObject * PyArray_Concatenate \
+ (PyObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_InnerProduct \
+ (PyObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_MatrixProduct \
+ (PyObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_CopyAndTranspose \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Correlate \
+ (PyObject *, PyObject *, int);
+NPY_NO_EXPORT int PyArray_TypestrConvert \
+ (int, int);
+NPY_NO_EXPORT int PyArray_DescrConverter \
+ (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyArray_DescrConverter2 \
+ (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyArray_IntpConverter \
+ (PyObject *, PyArray_Dims *);
+NPY_NO_EXPORT int PyArray_BufferConverter \
+ (PyObject *, PyArray_Chunk *);
+NPY_NO_EXPORT int PyArray_AxisConverter \
+ (PyObject *, int *);
+NPY_NO_EXPORT int PyArray_BoolConverter \
+ (PyObject *, npy_bool *);
+NPY_NO_EXPORT int PyArray_ByteorderConverter \
+ (PyObject *, char *);
+NPY_NO_EXPORT int PyArray_OrderConverter \
+ (PyObject *, NPY_ORDER *);
+NPY_NO_EXPORT unsigned char PyArray_EquivTypes \
+ (PyArray_Descr *, PyArray_Descr *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \
+ (int, npy_intp const *, PyArray_Descr *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \
+ (int, npy_intp const *, PyArray_Descr *, int);
+NPY_NO_EXPORT PyObject * PyArray_Where \
+ (PyObject *, PyObject *, PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_Arange \
+ (double, double, double, int);
+NPY_NO_EXPORT PyObject * PyArray_ArangeObj \
+ (PyObject *, PyObject *, PyObject *, PyArray_Descr *);
+NPY_NO_EXPORT int PyArray_SortkindConverter \
+ (PyObject *, NPY_SORTKIND *);
+NPY_NO_EXPORT PyObject * PyArray_LexSort \
+ (PyObject *, int);
+NPY_NO_EXPORT PyObject * PyArray_Round \
+ (PyArrayObject *, int, PyArrayObject *);
+NPY_NO_EXPORT unsigned char PyArray_EquivTypenums \
+ (int, int);
+NPY_NO_EXPORT int PyArray_RegisterDataType \
+ (PyArray_Descr *);
+NPY_NO_EXPORT int PyArray_RegisterCastFunc \
+ (PyArray_Descr *, int, PyArray_VectorUnaryFunc *);
+NPY_NO_EXPORT int PyArray_RegisterCanCast \
+ (PyArray_Descr *, int, NPY_SCALARKIND);
+NPY_NO_EXPORT void PyArray_InitArrFuncs \
+ (PyArray_ArrFuncs *);
+NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp \
+ (int, npy_intp const *);
+NPY_NO_EXPORT int PyArray_TypeNumFromName \
+ (char const *);
+NPY_NO_EXPORT int PyArray_ClipmodeConverter \
+ (PyObject *, NPY_CLIPMODE *);
+NPY_NO_EXPORT int PyArray_OutputConverter \
+ (PyObject *, PyArrayObject **);
+NPY_NO_EXPORT PyObject * PyArray_BroadcastToShape \
+ (PyObject *, npy_intp *, int);
+NPY_NO_EXPORT void _PyArray_SigintHandler \
+ (int);
+NPY_NO_EXPORT void* _PyArray_GetSigintBuf \
+ (void);
+NPY_NO_EXPORT int PyArray_DescrAlignConverter \
+ (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyArray_DescrAlignConverter2 \
+ (PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyArray_SearchsideConverter \
+ (PyObject *, void *);
+NPY_NO_EXPORT PyObject * PyArray_CheckAxis \
+ (PyArrayObject *, int *, int);
+NPY_NO_EXPORT npy_intp PyArray_OverflowMultiplyList \
+ (npy_intp const *, int);
+NPY_NO_EXPORT int PyArray_CompareString \
+ (const char *, const char *, size_t);
+NPY_NO_EXPORT PyObject* PyArray_MultiIterFromObjects \
+ (PyObject **, int, int, ...);
+NPY_NO_EXPORT int PyArray_GetEndianness \
+ (void);
+NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCFeatureVersion \
+ (void);
+NPY_NO_EXPORT PyObject * PyArray_Correlate2 \
+ (PyObject *, PyObject *, int);
+NPY_NO_EXPORT PyObject* PyArray_NeighborhoodIterNew \
+ (PyArrayIterObject *, const npy_intp *, int, PyArrayObject*);
+extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type;
+
+extern NPY_NO_EXPORT PyTypeObject NpyIter_Type;
+
+NPY_NO_EXPORT void PyArray_SetDatetimeParseFunction \
+ (PyObject *NPY_UNUSED(op));
+NPY_NO_EXPORT void PyArray_DatetimeToDatetimeStruct \
+ (npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *);
+NPY_NO_EXPORT void PyArray_TimedeltaToTimedeltaStruct \
+ (npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *);
+NPY_NO_EXPORT npy_datetime PyArray_DatetimeStructToDatetime \
+ (NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d));
+NPY_NO_EXPORT npy_datetime PyArray_TimedeltaStructToTimedelta \
+ (NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d));
+NPY_NO_EXPORT NpyIter * NpyIter_New \
+ (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*);
+NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \
+ (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **);
+NPY_NO_EXPORT NpyIter * NpyIter_AdvancedNew \
+ (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp);
+NPY_NO_EXPORT NpyIter * NpyIter_Copy \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_Deallocate \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_HasDelayedBufAlloc \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_HasExternalLoop \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_EnableExternalLoop \
+ (NpyIter *);
+NPY_NO_EXPORT npy_intp * NpyIter_GetInnerStrideArray \
+ (NpyIter *);
+NPY_NO_EXPORT npy_intp * NpyIter_GetInnerLoopSizePtr \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_Reset \
+ (NpyIter *, char **);
+NPY_NO_EXPORT int NpyIter_ResetBasePointers \
+ (NpyIter *, char **, char **);
+NPY_NO_EXPORT int NpyIter_ResetToIterIndexRange \
+ (NpyIter *, npy_intp, npy_intp, char **);
+NPY_NO_EXPORT int NpyIter_GetNDim \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_GetNOp \
+ (NpyIter *);
+NPY_NO_EXPORT NpyIter_IterNextFunc * NpyIter_GetIterNext \
+ (NpyIter *, char **);
+NPY_NO_EXPORT npy_intp NpyIter_GetIterSize \
+ (NpyIter *);
+NPY_NO_EXPORT void NpyIter_GetIterIndexRange \
+ (NpyIter *, npy_intp *, npy_intp *);
+NPY_NO_EXPORT npy_intp NpyIter_GetIterIndex \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_GotoIterIndex \
+ (NpyIter *, npy_intp);
+NPY_NO_EXPORT npy_bool NpyIter_HasMultiIndex \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_GetShape \
+ (NpyIter *, npy_intp *);
+NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \
+ (NpyIter *, char **);
+NPY_NO_EXPORT int NpyIter_GotoMultiIndex \
+ (NpyIter *, npy_intp const *);
+NPY_NO_EXPORT int NpyIter_RemoveMultiIndex \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_HasIndex \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_IsBuffered \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_IsGrowInner \
+ (NpyIter *);
+NPY_NO_EXPORT npy_intp NpyIter_GetBufferSize \
+ (NpyIter *);
+NPY_NO_EXPORT npy_intp * NpyIter_GetIndexPtr \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_GotoIndex \
+ (NpyIter *, npy_intp);
+NPY_NO_EXPORT char ** NpyIter_GetDataPtrArray \
+ (NpyIter *);
+NPY_NO_EXPORT PyArray_Descr ** NpyIter_GetDescrArray \
+ (NpyIter *);
+NPY_NO_EXPORT PyArrayObject ** NpyIter_GetOperandArray \
+ (NpyIter *);
+NPY_NO_EXPORT PyArrayObject * NpyIter_GetIterView \
+ (NpyIter *, npy_intp);
+NPY_NO_EXPORT void NpyIter_GetReadFlags \
+ (NpyIter *, char *);
+NPY_NO_EXPORT void NpyIter_GetWriteFlags \
+ (NpyIter *, char *);
+NPY_NO_EXPORT void NpyIter_DebugPrint \
+ (NpyIter *);
+NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI \
+ (NpyIter *);
+NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray \
+ (NpyIter *, npy_intp *);
+NPY_NO_EXPORT int NpyIter_RemoveAxis \
+ (NpyIter *, int);
+NPY_NO_EXPORT npy_intp * NpyIter_GetAxisStrideArray \
+ (NpyIter *, int);
+NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering \
+ (NpyIter *);
+NPY_NO_EXPORT char ** NpyIter_GetInitialDataPtrArray \
+ (NpyIter *);
+NPY_NO_EXPORT int NpyIter_CreateCompatibleStrides \
+ (NpyIter *, npy_intp, npy_intp *);
+NPY_NO_EXPORT int PyArray_CastingConverter \
+ (PyObject *, NPY_CASTING *);
+NPY_NO_EXPORT npy_intp PyArray_CountNonzero \
+ (PyArrayObject *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypes \
+ (PyArray_Descr *, PyArray_Descr *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_MinScalarType \
+ (PyArrayObject *);
+NPY_NO_EXPORT PyArray_Descr * PyArray_ResultType \
+ (npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[]);
+NPY_NO_EXPORT npy_bool PyArray_CanCastArrayTo \
+ (PyArrayObject *, PyArray_Descr *, NPY_CASTING);
+NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo \
+ (PyArray_Descr *, PyArray_Descr *, NPY_CASTING);
+NPY_NO_EXPORT PyArrayObject * PyArray_EinsteinSum \
+ (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_NewLikeArray \
+ (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int);
+NPY_NO_EXPORT int PyArray_GetArrayParamsFromObject \
+ (PyObject *NPY_UNUSED(op), PyArray_Descr *NPY_UNUSED(requested_dtype), npy_bool NPY_UNUSED(writeable), PyArray_Descr **NPY_UNUSED(out_dtype), int *NPY_UNUSED(out_ndim), npy_intp *NPY_UNUSED(out_dims), PyArrayObject **NPY_UNUSED(out_arr), PyObject *NPY_UNUSED(context));
+NPY_NO_EXPORT int PyArray_ConvertClipmodeSequence \
+ (PyObject *, NPY_CLIPMODE *, int);
+NPY_NO_EXPORT PyObject * PyArray_MatrixProduct2 \
+ (PyObject *, PyObject *, PyArrayObject*);
+NPY_NO_EXPORT npy_bool NpyIter_IsFirstVisit \
+ (NpyIter *, int);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT void PyArray_CreateSortedStridePerm \
+ (int, npy_intp const *, npy_stride_sort_item *);
+NPY_NO_EXPORT void PyArray_RemoveAxesInPlace \
+ (PyArrayObject *, const npy_bool *);
+NPY_NO_EXPORT void PyArray_DebugPrint \
+ (PyArrayObject *);
+NPY_NO_EXPORT int PyArray_FailUnlessWriteable \
+ (PyArrayObject *, const char *);
+NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT void * PyDataMem_NEW \
+ (size_t);
+NPY_NO_EXPORT void PyDataMem_FREE \
+ (void *);
+NPY_NO_EXPORT void * PyDataMem_RENEW \
+ (void *, size_t);
+NPY_NO_EXPORT PyDataMem_EventHookFunc * PyDataMem_SetEventHook \
+ (PyDataMem_EventHookFunc *, void *, void **);
+extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING;
+
+NPY_NO_EXPORT void PyArray_MapIterSwapAxes \
+ (PyArrayMapIterObject *, PyArrayObject **, int);
+NPY_NO_EXPORT PyObject * PyArray_MapIterArray \
+ (PyArrayObject *, PyObject *);
+NPY_NO_EXPORT void PyArray_MapIterNext \
+ (PyArrayMapIterObject *);
+NPY_NO_EXPORT int PyArray_Partition \
+ (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND);
+NPY_NO_EXPORT PyObject * PyArray_ArgPartition \
+ (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND);
+NPY_NO_EXPORT int PyArray_SelectkindConverter \
+ (PyObject *, NPY_SELECTKIND *);
+NPY_NO_EXPORT void * PyDataMem_NEW_ZEROED \
+ (size_t, size_t);
+NPY_NO_EXPORT int PyArray_CheckAnyScalarExact \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyArray_MapIterArrayCopyIfOverlap \
+ (PyArrayObject *, PyObject *, int, PyArrayObject *);
+NPY_NO_EXPORT int PyArray_ResolveWritebackIfCopy \
+ (PyArrayObject *);
+NPY_NO_EXPORT int PyArray_SetWritebackIfCopyBase \
+ (PyArrayObject *, PyArrayObject *);
+NPY_NO_EXPORT PyObject * PyDataMem_SetHandler \
+ (PyObject *);
+NPY_NO_EXPORT PyObject * PyDataMem_GetHandler \
+ (void);
+extern NPY_NO_EXPORT PyObject* PyDataMem_DefaultHandler;
+
+
+#else
+
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
+#endif
+
+#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
+extern void **PyArray_API;
+#else
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+void **PyArray_API;
+#else
+static void **PyArray_API=NULL;
+#endif
+#endif
+
+#define PyArray_GetNDArrayCVersion \
+ (*(unsigned int (*)(void)) \
+ PyArray_API[0])
+#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[1])
+#define PyArray_Type (*(PyTypeObject *)PyArray_API[2])
+#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3])
+#define PyArrayFlags_Type (*(PyTypeObject *)PyArray_API[4])
+#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5])
+#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6])
+#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7])
+#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8])
+#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9])
+#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10])
+#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11])
+#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12])
+#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13])
+#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14])
+#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15])
+#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16])
+#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17])
+#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18])
+#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19])
+#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20])
+#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21])
+#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22])
+#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23])
+#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24])
+#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25])
+#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26])
+#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27])
+#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28])
+#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29])
+#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30])
+#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31])
+#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32])
+#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33])
+#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34])
+#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35])
+#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36])
+#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37])
+#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38])
+#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39])
+#define PyArray_SetNumericOps \
+ (*(int (*)(PyObject *)) \
+ PyArray_API[40])
+#define PyArray_GetNumericOps \
+ (*(PyObject * (*)(void)) \
+ PyArray_API[41])
+#define PyArray_INCREF \
+ (*(int (*)(PyArrayObject *)) \
+ PyArray_API[42])
+#define PyArray_XDECREF \
+ (*(int (*)(PyArrayObject *)) \
+ PyArray_API[43])
+#define PyArray_SetStringFunction \
+ (*(void (*)(PyObject *, int)) \
+ PyArray_API[44])
+#define PyArray_DescrFromType \
+ (*(PyArray_Descr * (*)(int)) \
+ PyArray_API[45])
+#define PyArray_TypeObjectFromType \
+ (*(PyObject * (*)(int)) \
+ PyArray_API[46])
+#define PyArray_Zero \
+ (*(char * (*)(PyArrayObject *)) \
+ PyArray_API[47])
+#define PyArray_One \
+ (*(char * (*)(PyArrayObject *)) \
+ PyArray_API[48])
+#define PyArray_CastToType \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \
+ PyArray_API[49])
+#define PyArray_CastTo \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[50])
+#define PyArray_CastAnyTo \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[51])
+#define PyArray_CanCastSafely \
+ (*(int (*)(int, int)) \
+ PyArray_API[52])
+#define PyArray_CanCastTo \
+ (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \
+ PyArray_API[53])
+#define PyArray_ObjectType \
+ (*(int (*)(PyObject *, int)) \
+ PyArray_API[54])
+#define PyArray_DescrFromObject \
+ (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \
+ PyArray_API[55])
+#define PyArray_ConvertToCommonType \
+ (*(PyArrayObject ** (*)(PyObject *, int *)) \
+ PyArray_API[56])
+#define PyArray_DescrFromScalar \
+ (*(PyArray_Descr * (*)(PyObject *)) \
+ PyArray_API[57])
+#define PyArray_DescrFromTypeObject \
+ (*(PyArray_Descr * (*)(PyObject *)) \
+ PyArray_API[58])
+#define PyArray_Size \
+ (*(npy_intp (*)(PyObject *)) \
+ PyArray_API[59])
+#define PyArray_Scalar \
+ (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \
+ PyArray_API[60])
+#define PyArray_FromScalar \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \
+ PyArray_API[61])
+#define PyArray_ScalarAsCtype \
+ (*(void (*)(PyObject *, void *)) \
+ PyArray_API[62])
+#define PyArray_CastScalarToCtype \
+ (*(int (*)(PyObject *, void *, PyArray_Descr *)) \
+ PyArray_API[63])
+#define PyArray_CastScalarDirect \
+ (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \
+ PyArray_API[64])
+#define PyArray_ScalarFromObject \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[65])
+#define PyArray_GetCastFunc \
+ (*(PyArray_VectorUnaryFunc * (*)(PyArray_Descr *, int)) \
+ PyArray_API[66])
+#define PyArray_FromDims \
+ (*(PyObject * (*)(int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type))) \
+ PyArray_API[67])
+#define PyArray_FromDimsAndDataAndDescr \
+ (*(PyObject * (*)(int NPY_UNUSED(nd), int *NPY_UNUSED(d), PyArray_Descr *, char *NPY_UNUSED(data))) \
+ PyArray_API[68])
+#define PyArray_FromAny \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \
+ PyArray_API[69])
+#define PyArray_EnsureArray \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[70])
+#define PyArray_EnsureAnyArray \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[71])
+#define PyArray_FromFile \
+ (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \
+ PyArray_API[72])
+#define PyArray_FromString \
+ (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \
+ PyArray_API[73])
+#define PyArray_FromBuffer \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \
+ PyArray_API[74])
+#define PyArray_FromIter \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \
+ PyArray_API[75])
+#define PyArray_Return \
+ (*(PyObject * (*)(PyArrayObject *)) \
+ PyArray_API[76])
+#define PyArray_GetField \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \
+ PyArray_API[77])
+#define PyArray_SetField \
+ (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \
+ PyArray_API[78])
+#define PyArray_Byteswap \
+ (*(PyObject * (*)(PyArrayObject *, npy_bool)) \
+ PyArray_API[79])
+#define PyArray_Resize \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order))) \
+ PyArray_API[80])
+#define PyArray_MoveInto \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[81])
+#define PyArray_CopyInto \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[82])
+#define PyArray_CopyAnyInto \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[83])
+#define PyArray_CopyObject \
+ (*(int (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[84])
+#define PyArray_NewCopy \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+ PyArray_API[85])
+#define PyArray_ToList \
+ (*(PyObject * (*)(PyArrayObject *)) \
+ PyArray_API[86])
+#define PyArray_ToString \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+ PyArray_API[87])
+#define PyArray_ToFile \
+ (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \
+ PyArray_API[88])
+#define PyArray_Dump \
+ (*(int (*)(PyObject *, PyObject *, int)) \
+ PyArray_API[89])
+#define PyArray_Dumps \
+ (*(PyObject * (*)(PyObject *, int)) \
+ PyArray_API[90])
+#define PyArray_ValidType \
+ (*(int (*)(int)) \
+ PyArray_API[91])
+#define PyArray_UpdateFlags \
+ (*(void (*)(PyArrayObject *, int)) \
+ PyArray_API[92])
+#define PyArray_New \
+ (*(PyObject * (*)(PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *)) \
+ PyArray_API[93])
+#define PyArray_NewFromDescr \
+ (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *)) \
+ PyArray_API[94])
+#define PyArray_DescrNew \
+ (*(PyArray_Descr * (*)(PyArray_Descr *)) \
+ PyArray_API[95])
+#define PyArray_DescrNewFromType \
+ (*(PyArray_Descr * (*)(int)) \
+ PyArray_API[96])
+#define PyArray_GetPriority \
+ (*(double (*)(PyObject *, double)) \
+ PyArray_API[97])
+#define PyArray_IterNew \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[98])
+#define PyArray_MultiIterNew \
+ (*(PyObject* (*)(int, ...)) \
+ PyArray_API[99])
+#define PyArray_PyIntAsInt \
+ (*(int (*)(PyObject *)) \
+ PyArray_API[100])
+#define PyArray_PyIntAsIntp \
+ (*(npy_intp (*)(PyObject *)) \
+ PyArray_API[101])
+#define PyArray_Broadcast \
+ (*(int (*)(PyArrayMultiIterObject *)) \
+ PyArray_API[102])
+#define PyArray_FillObjectArray \
+ (*(void (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[103])
+#define PyArray_FillWithScalar \
+ (*(int (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[104])
+#define PyArray_CheckStrides \
+ (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *)) \
+ PyArray_API[105])
+#define PyArray_DescrNewByteorder \
+ (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \
+ PyArray_API[106])
+#define PyArray_IterAllButAxis \
+ (*(PyObject * (*)(PyObject *, int *)) \
+ PyArray_API[107])
+#define PyArray_CheckFromAny \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \
+ PyArray_API[108])
+#define PyArray_FromArray \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \
+ PyArray_API[109])
+#define PyArray_FromInterface \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[110])
+#define PyArray_FromStructInterface \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[111])
+#define PyArray_FromArrayAttr \
+ (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \
+ PyArray_API[112])
+#define PyArray_ScalarKind \
+ (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \
+ PyArray_API[113])
+#define PyArray_CanCoerceScalar \
+ (*(int (*)(int, int, NPY_SCALARKIND)) \
+ PyArray_API[114])
+#define PyArray_NewFlagsObject \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[115])
+#define PyArray_CanCastScalar \
+ (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \
+ PyArray_API[116])
+#define PyArray_CompareUCS4 \
+ (*(int (*)(npy_ucs4 const *, npy_ucs4 const *, size_t)) \
+ PyArray_API[117])
+#define PyArray_RemoveSmallest \
+ (*(int (*)(PyArrayMultiIterObject *)) \
+ PyArray_API[118])
+#define PyArray_ElementStrides \
+ (*(int (*)(PyObject *)) \
+ PyArray_API[119])
+#define PyArray_Item_INCREF \
+ (*(void (*)(char *, PyArray_Descr *)) \
+ PyArray_API[120])
+#define PyArray_Item_XDECREF \
+ (*(void (*)(char *, PyArray_Descr *)) \
+ PyArray_API[121])
+#define PyArray_FieldNames \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[122])
+#define PyArray_Transpose \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \
+ PyArray_API[123])
+#define PyArray_TakeFrom \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \
+ PyArray_API[124])
+#define PyArray_PutTo \
+ (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \
+ PyArray_API[125])
+#define PyArray_PutMask \
+ (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \
+ PyArray_API[126])
+#define PyArray_Repeat \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \
+ PyArray_API[127])
+#define PyArray_Choose \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \
+ PyArray_API[128])
+#define PyArray_Sort \
+ (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \
+ PyArray_API[129])
+#define PyArray_ArgSort \
+ (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \
+ PyArray_API[130])
+#define PyArray_SearchSorted \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \
+ PyArray_API[131])
+#define PyArray_ArgMax \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[132])
+#define PyArray_ArgMin \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[133])
+#define PyArray_Reshape \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[134])
+#define PyArray_Newshape \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \
+ PyArray_API[135])
+#define PyArray_Squeeze \
+ (*(PyObject * (*)(PyArrayObject *)) \
+ PyArray_API[136])
+#define PyArray_View \
+ (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \
+ PyArray_API[137])
+#define PyArray_SwapAxes \
+ (*(PyObject * (*)(PyArrayObject *, int, int)) \
+ PyArray_API[138])
+#define PyArray_Max \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[139])
+#define PyArray_Min \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[140])
+#define PyArray_Ptp \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[141])
+#define PyArray_Mean \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[142])
+#define PyArray_Trace \
+ (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \
+ PyArray_API[143])
+#define PyArray_Diagonal \
+ (*(PyObject * (*)(PyArrayObject *, int, int, int)) \
+ PyArray_API[144])
+#define PyArray_Clip \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \
+ PyArray_API[145])
+#define PyArray_Conjugate \
+ (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[146])
+#define PyArray_Nonzero \
+ (*(PyObject * (*)(PyArrayObject *)) \
+ PyArray_API[147])
+#define PyArray_Std \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \
+ PyArray_API[148])
+#define PyArray_Sum \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[149])
+#define PyArray_CumSum \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[150])
+#define PyArray_Prod \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[151])
+#define PyArray_CumProd \
+ (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \
+ PyArray_API[152])
+#define PyArray_All \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[153])
+#define PyArray_Any \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[154])
+#define PyArray_Compress \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \
+ PyArray_API[155])
+#define PyArray_Flatten \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+ PyArray_API[156])
+#define PyArray_Ravel \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \
+ PyArray_API[157])
+#define PyArray_MultiplyList \
+ (*(npy_intp (*)(npy_intp const *, int)) \
+ PyArray_API[158])
+#define PyArray_MultiplyIntList \
+ (*(int (*)(int const *, int)) \
+ PyArray_API[159])
+#define PyArray_GetPtr \
+ (*(void * (*)(PyArrayObject *, npy_intp const*)) \
+ PyArray_API[160])
+#define PyArray_CompareLists \
+ (*(int (*)(npy_intp const *, npy_intp const *, int)) \
+ PyArray_API[161])
+#define PyArray_AsCArray \
+ (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \
+ PyArray_API[162])
+#define PyArray_As1D \
+ (*(int (*)(PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int NPY_UNUSED(typecode))) \
+ PyArray_API[163])
+#define PyArray_As2D \
+ (*(int (*)(PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr), int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode))) \
+ PyArray_API[164])
+#define PyArray_Free \
+ (*(int (*)(PyObject *, void *)) \
+ PyArray_API[165])
+#define PyArray_Converter \
+ (*(int (*)(PyObject *, PyObject **)) \
+ PyArray_API[166])
+#define PyArray_IntpFromSequence \
+ (*(int (*)(PyObject *, npy_intp *, int)) \
+ PyArray_API[167])
+#define PyArray_Concatenate \
+ (*(PyObject * (*)(PyObject *, int)) \
+ PyArray_API[168])
+#define PyArray_InnerProduct \
+ (*(PyObject * (*)(PyObject *, PyObject *)) \
+ PyArray_API[169])
+#define PyArray_MatrixProduct \
+ (*(PyObject * (*)(PyObject *, PyObject *)) \
+ PyArray_API[170])
+#define PyArray_CopyAndTranspose \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[171])
+#define PyArray_Correlate \
+ (*(PyObject * (*)(PyObject *, PyObject *, int)) \
+ PyArray_API[172])
+#define PyArray_TypestrConvert \
+ (*(int (*)(int, int)) \
+ PyArray_API[173])
+#define PyArray_DescrConverter \
+ (*(int (*)(PyObject *, PyArray_Descr **)) \
+ PyArray_API[174])
+#define PyArray_DescrConverter2 \
+ (*(int (*)(PyObject *, PyArray_Descr **)) \
+ PyArray_API[175])
+#define PyArray_IntpConverter \
+ (*(int (*)(PyObject *, PyArray_Dims *)) \
+ PyArray_API[176])
+#define PyArray_BufferConverter \
+ (*(int (*)(PyObject *, PyArray_Chunk *)) \
+ PyArray_API[177])
+#define PyArray_AxisConverter \
+ (*(int (*)(PyObject *, int *)) \
+ PyArray_API[178])
+#define PyArray_BoolConverter \
+ (*(int (*)(PyObject *, npy_bool *)) \
+ PyArray_API[179])
+#define PyArray_ByteorderConverter \
+ (*(int (*)(PyObject *, char *)) \
+ PyArray_API[180])
+#define PyArray_OrderConverter \
+ (*(int (*)(PyObject *, NPY_ORDER *)) \
+ PyArray_API[181])
+#define PyArray_EquivTypes \
+ (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \
+ PyArray_API[182])
+#define PyArray_Zeros \
+ (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \
+ PyArray_API[183])
+#define PyArray_Empty \
+ (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \
+ PyArray_API[184])
+#define PyArray_Where \
+ (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \
+ PyArray_API[185])
+#define PyArray_Arange \
+ (*(PyObject * (*)(double, double, double, int)) \
+ PyArray_API[186])
+#define PyArray_ArangeObj \
+ (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \
+ PyArray_API[187])
+#define PyArray_SortkindConverter \
+ (*(int (*)(PyObject *, NPY_SORTKIND *)) \
+ PyArray_API[188])
+#define PyArray_LexSort \
+ (*(PyObject * (*)(PyObject *, int)) \
+ PyArray_API[189])
+#define PyArray_Round \
+ (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \
+ PyArray_API[190])
+#define PyArray_EquivTypenums \
+ (*(unsigned char (*)(int, int)) \
+ PyArray_API[191])
+#define PyArray_RegisterDataType \
+ (*(int (*)(PyArray_Descr *)) \
+ PyArray_API[192])
+#define PyArray_RegisterCastFunc \
+ (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \
+ PyArray_API[193])
+#define PyArray_RegisterCanCast \
+ (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \
+ PyArray_API[194])
+#define PyArray_InitArrFuncs \
+ (*(void (*)(PyArray_ArrFuncs *)) \
+ PyArray_API[195])
+#define PyArray_IntTupleFromIntp \
+ (*(PyObject * (*)(int, npy_intp const *)) \
+ PyArray_API[196])
+#define PyArray_TypeNumFromName \
+ (*(int (*)(char const *)) \
+ PyArray_API[197])
+#define PyArray_ClipmodeConverter \
+ (*(int (*)(PyObject *, NPY_CLIPMODE *)) \
+ PyArray_API[198])
+#define PyArray_OutputConverter \
+ (*(int (*)(PyObject *, PyArrayObject **)) \
+ PyArray_API[199])
+#define PyArray_BroadcastToShape \
+ (*(PyObject * (*)(PyObject *, npy_intp *, int)) \
+ PyArray_API[200])
+#define _PyArray_SigintHandler \
+ (*(void (*)(int)) \
+ PyArray_API[201])
+#define _PyArray_GetSigintBuf \
+ (*(void* (*)(void)) \
+ PyArray_API[202])
+#define PyArray_DescrAlignConverter \
+ (*(int (*)(PyObject *, PyArray_Descr **)) \
+ PyArray_API[203])
+#define PyArray_DescrAlignConverter2 \
+ (*(int (*)(PyObject *, PyArray_Descr **)) \
+ PyArray_API[204])
+#define PyArray_SearchsideConverter \
+ (*(int (*)(PyObject *, void *)) \
+ PyArray_API[205])
+#define PyArray_CheckAxis \
+ (*(PyObject * (*)(PyArrayObject *, int *, int)) \
+ PyArray_API[206])
+#define PyArray_OverflowMultiplyList \
+ (*(npy_intp (*)(npy_intp const *, int)) \
+ PyArray_API[207])
+#define PyArray_CompareString \
+ (*(int (*)(const char *, const char *, size_t)) \
+ PyArray_API[208])
+#define PyArray_MultiIterFromObjects \
+ (*(PyObject* (*)(PyObject **, int, int, ...)) \
+ PyArray_API[209])
+#define PyArray_GetEndianness \
+ (*(int (*)(void)) \
+ PyArray_API[210])
+#define PyArray_GetNDArrayCFeatureVersion \
+ (*(unsigned int (*)(void)) \
+ PyArray_API[211])
+#define PyArray_Correlate2 \
+ (*(PyObject * (*)(PyObject *, PyObject *, int)) \
+ PyArray_API[212])
+#define PyArray_NeighborhoodIterNew \
+ (*(PyObject* (*)(PyArrayIterObject *, const npy_intp *, int, PyArrayObject*)) \
+ PyArray_API[213])
+#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214])
+#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215])
+#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216])
+#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217])
+#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218])
+#define PyArray_SetDatetimeParseFunction \
+ (*(void (*)(PyObject *NPY_UNUSED(op))) \
+ PyArray_API[219])
+#define PyArray_DatetimeToDatetimeStruct \
+ (*(void (*)(npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *)) \
+ PyArray_API[220])
+#define PyArray_TimedeltaToTimedeltaStruct \
+ (*(void (*)(npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *)) \
+ PyArray_API[221])
+#define PyArray_DatetimeStructToDatetime \
+ (*(npy_datetime (*)(NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d))) \
+ PyArray_API[222])
+#define PyArray_TimedeltaStructToTimedelta \
+ (*(npy_datetime (*)(NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d))) \
+ PyArray_API[223])
+#define NpyIter_New \
+ (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \
+ PyArray_API[224])
+#define NpyIter_MultiNew \
+ (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \
+ PyArray_API[225])
+#define NpyIter_AdvancedNew \
+ (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \
+ PyArray_API[226])
+#define NpyIter_Copy \
+ (*(NpyIter * (*)(NpyIter *)) \
+ PyArray_API[227])
+#define NpyIter_Deallocate \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[228])
+#define NpyIter_HasDelayedBufAlloc \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[229])
+#define NpyIter_HasExternalLoop \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[230])
+#define NpyIter_EnableExternalLoop \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[231])
+#define NpyIter_GetInnerStrideArray \
+ (*(npy_intp * (*)(NpyIter *)) \
+ PyArray_API[232])
+#define NpyIter_GetInnerLoopSizePtr \
+ (*(npy_intp * (*)(NpyIter *)) \
+ PyArray_API[233])
+#define NpyIter_Reset \
+ (*(int (*)(NpyIter *, char **)) \
+ PyArray_API[234])
+#define NpyIter_ResetBasePointers \
+ (*(int (*)(NpyIter *, char **, char **)) \
+ PyArray_API[235])
+#define NpyIter_ResetToIterIndexRange \
+ (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \
+ PyArray_API[236])
+#define NpyIter_GetNDim \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[237])
+#define NpyIter_GetNOp \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[238])
+#define NpyIter_GetIterNext \
+ (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \
+ PyArray_API[239])
+#define NpyIter_GetIterSize \
+ (*(npy_intp (*)(NpyIter *)) \
+ PyArray_API[240])
+#define NpyIter_GetIterIndexRange \
+ (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \
+ PyArray_API[241])
+#define NpyIter_GetIterIndex \
+ (*(npy_intp (*)(NpyIter *)) \
+ PyArray_API[242])
+#define NpyIter_GotoIterIndex \
+ (*(int (*)(NpyIter *, npy_intp)) \
+ PyArray_API[243])
+#define NpyIter_HasMultiIndex \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[244])
+#define NpyIter_GetShape \
+ (*(int (*)(NpyIter *, npy_intp *)) \
+ PyArray_API[245])
+#define NpyIter_GetGetMultiIndex \
+ (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \
+ PyArray_API[246])
+#define NpyIter_GotoMultiIndex \
+ (*(int (*)(NpyIter *, npy_intp const *)) \
+ PyArray_API[247])
+#define NpyIter_RemoveMultiIndex \
+ (*(int (*)(NpyIter *)) \
+ PyArray_API[248])
+#define NpyIter_HasIndex \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[249])
+#define NpyIter_IsBuffered \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[250])
+#define NpyIter_IsGrowInner \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[251])
+#define NpyIter_GetBufferSize \
+ (*(npy_intp (*)(NpyIter *)) \
+ PyArray_API[252])
+#define NpyIter_GetIndexPtr \
+ (*(npy_intp * (*)(NpyIter *)) \
+ PyArray_API[253])
+#define NpyIter_GotoIndex \
+ (*(int (*)(NpyIter *, npy_intp)) \
+ PyArray_API[254])
+#define NpyIter_GetDataPtrArray \
+ (*(char ** (*)(NpyIter *)) \
+ PyArray_API[255])
+#define NpyIter_GetDescrArray \
+ (*(PyArray_Descr ** (*)(NpyIter *)) \
+ PyArray_API[256])
+#define NpyIter_GetOperandArray \
+ (*(PyArrayObject ** (*)(NpyIter *)) \
+ PyArray_API[257])
+#define NpyIter_GetIterView \
+ (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \
+ PyArray_API[258])
+#define NpyIter_GetReadFlags \
+ (*(void (*)(NpyIter *, char *)) \
+ PyArray_API[259])
+#define NpyIter_GetWriteFlags \
+ (*(void (*)(NpyIter *, char *)) \
+ PyArray_API[260])
+#define NpyIter_DebugPrint \
+ (*(void (*)(NpyIter *)) \
+ PyArray_API[261])
+#define NpyIter_IterationNeedsAPI \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[262])
+#define NpyIter_GetInnerFixedStrideArray \
+ (*(void (*)(NpyIter *, npy_intp *)) \
+ PyArray_API[263])
+#define NpyIter_RemoveAxis \
+ (*(int (*)(NpyIter *, int)) \
+ PyArray_API[264])
+#define NpyIter_GetAxisStrideArray \
+ (*(npy_intp * (*)(NpyIter *, int)) \
+ PyArray_API[265])
+#define NpyIter_RequiresBuffering \
+ (*(npy_bool (*)(NpyIter *)) \
+ PyArray_API[266])
+#define NpyIter_GetInitialDataPtrArray \
+ (*(char ** (*)(NpyIter *)) \
+ PyArray_API[267])
+#define NpyIter_CreateCompatibleStrides \
+ (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \
+ PyArray_API[268])
+#define PyArray_CastingConverter \
+ (*(int (*)(PyObject *, NPY_CASTING *)) \
+ PyArray_API[269])
+#define PyArray_CountNonzero \
+ (*(npy_intp (*)(PyArrayObject *)) \
+ PyArray_API[270])
+#define PyArray_PromoteTypes \
+ (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \
+ PyArray_API[271])
+#define PyArray_MinScalarType \
+ (*(PyArray_Descr * (*)(PyArrayObject *)) \
+ PyArray_API[272])
+#define PyArray_ResultType \
+ (*(PyArray_Descr * (*)(npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[])) \
+ PyArray_API[273])
+#define PyArray_CanCastArrayTo \
+ (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \
+ PyArray_API[274])
+#define PyArray_CanCastTypeTo \
+ (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \
+ PyArray_API[275])
+#define PyArray_EinsteinSum \
+ (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \
+ PyArray_API[276])
+#define PyArray_NewLikeArray \
+ (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \
+ PyArray_API[277])
+#define PyArray_GetArrayParamsFromObject \
+ (*(int (*)(PyObject *NPY_UNUSED(op), PyArray_Descr *NPY_UNUSED(requested_dtype), npy_bool NPY_UNUSED(writeable), PyArray_Descr **NPY_UNUSED(out_dtype), int *NPY_UNUSED(out_ndim), npy_intp *NPY_UNUSED(out_dims), PyArrayObject **NPY_UNUSED(out_arr), PyObject *NPY_UNUSED(context))) \
+ PyArray_API[278])
+#define PyArray_ConvertClipmodeSequence \
+ (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \
+ PyArray_API[279])
+#define PyArray_MatrixProduct2 \
+ (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \
+ PyArray_API[280])
+#define NpyIter_IsFirstVisit \
+ (*(npy_bool (*)(NpyIter *, int)) \
+ PyArray_API[281])
+#define PyArray_SetBaseObject \
+ (*(int (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[282])
+#define PyArray_CreateSortedStridePerm \
+ (*(void (*)(int, npy_intp const *, npy_stride_sort_item *)) \
+ PyArray_API[283])
+#define PyArray_RemoveAxesInPlace \
+ (*(void (*)(PyArrayObject *, const npy_bool *)) \
+ PyArray_API[284])
+#define PyArray_DebugPrint \
+ (*(void (*)(PyArrayObject *)) \
+ PyArray_API[285])
+#define PyArray_FailUnlessWriteable \
+ (*(int (*)(PyArrayObject *, const char *)) \
+ PyArray_API[286])
+#define PyArray_SetUpdateIfCopyBase \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[287])
+#define PyDataMem_NEW \
+ (*(void * (*)(size_t)) \
+ PyArray_API[288])
+#define PyDataMem_FREE \
+ (*(void (*)(void *)) \
+ PyArray_API[289])
+#define PyDataMem_RENEW \
+ (*(void * (*)(void *, size_t)) \
+ PyArray_API[290])
+#define PyDataMem_SetEventHook \
+ (*(PyDataMem_EventHookFunc * (*)(PyDataMem_EventHookFunc *, void *, void **)) \
+ PyArray_API[291])
+#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292])
+#define PyArray_MapIterSwapAxes \
+ (*(void (*)(PyArrayMapIterObject *, PyArrayObject **, int)) \
+ PyArray_API[293])
+#define PyArray_MapIterArray \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *)) \
+ PyArray_API[294])
+#define PyArray_MapIterNext \
+ (*(void (*)(PyArrayMapIterObject *)) \
+ PyArray_API[295])
+#define PyArray_Partition \
+ (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \
+ PyArray_API[296])
+#define PyArray_ArgPartition \
+ (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \
+ PyArray_API[297])
+#define PyArray_SelectkindConverter \
+ (*(int (*)(PyObject *, NPY_SELECTKIND *)) \
+ PyArray_API[298])
+#define PyDataMem_NEW_ZEROED \
+ (*(void * (*)(size_t, size_t)) \
+ PyArray_API[299])
+#define PyArray_CheckAnyScalarExact \
+ (*(int (*)(PyObject *)) \
+ PyArray_API[300])
+#define PyArray_MapIterArrayCopyIfOverlap \
+ (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \
+ PyArray_API[301])
+#define PyArray_ResolveWritebackIfCopy \
+ (*(int (*)(PyArrayObject *)) \
+ PyArray_API[302])
+#define PyArray_SetWritebackIfCopyBase \
+ (*(int (*)(PyArrayObject *, PyArrayObject *)) \
+ PyArray_API[303])
+#define PyDataMem_SetHandler \
+ (*(PyObject * (*)(PyObject *)) \
+ PyArray_API[304])
+#define PyDataMem_GetHandler \
+ (*(PyObject * (*)(void)) \
+ PyArray_API[305])
+#define PyDataMem_DefaultHandler (*(PyObject* *)PyArray_API[306])
+
+#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
+static int
+_import_array(void)
+{
+ int st;
+ PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
+ PyObject *c_api = NULL;
+
+ if (numpy == NULL) {
+ return -1;
+ }
+ c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
+ Py_DECREF(numpy);
+ if (c_api == NULL) {
+ PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
+ return -1;
+ }
+
+ if (!PyCapsule_CheckExact(c_api)) {
+ PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
+ Py_DECREF(c_api);
+ return -1;
+ }
+ PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
+ Py_DECREF(c_api);
+ if (PyArray_API == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
+ return -1;
+ }
+
+ /* Perform runtime check of C API version */
+ if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
+ PyErr_Format(PyExc_RuntimeError, "module compiled against "\
+ "ABI version 0x%x but this version of numpy is 0x%x", \
+ (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
+ return -1;
+ }
+ if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
+ PyErr_Format(PyExc_RuntimeError, "module compiled against "\
+ "API version 0x%x but this version of numpy is 0x%x . "\
+ "Check the section C-API incompatibility at the "\
+ "Troubleshooting ImportError section at "\
+ "https://numpy.org/devdocs/user/troubleshooting-importerror.html"\
+ "#c-api-incompatibility "\
+ "for indications on how to solve this problem .", \
+ (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
+ return -1;
+ }
+
+ /*
+ * Perform runtime check of endianness and check it matches the one set by
+ * the headers (npy_endian.h) as a safeguard
+ */
+ st = PyArray_GetEndianness();
+ if (st == NPY_CPU_UNKNOWN_ENDIAN) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "FATAL: module compiled as unknown endian");
+ return -1;
+ }
+#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
+ if (st != NPY_CPU_BIG) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "FATAL: module compiled as big endian, but "
+ "detected different endianness at runtime");
+ return -1;
+ }
+#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
+ if (st != NPY_CPU_LITTLE) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "FATAL: module compiled as little endian, but "
+ "detected different endianness at runtime");
+ return -1;
+ }
+#endif
+
+ return 0;
+}
+
+#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NULL; } }
+
+#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
+
+#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
+
+#endif
+
+#endif
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/__ufunc_api.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/__ufunc_api.h
new file mode 100644
index 00000000..5b5a9f4d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/__ufunc_api.h
@@ -0,0 +1,311 @@
+
+#ifdef _UMATHMODULE
+
+extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
+
+extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
+
+NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \
+ (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int);
+NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \
+ (PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *);
+NPY_NO_EXPORT int PyUFunc_GenericFunction \
+ (PyUFuncObject *NPY_UNUSED(ufunc), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds), PyArrayObject **NPY_UNUSED(op));
+NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_d_d \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_f_f \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_g_g \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_F_F \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_D_D \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_G_G \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_O_O \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_ff_f \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_dd_d \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_gg_g \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_DD_D \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_FF_F \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_GG_G \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_OO_O \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_O_O_method \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_OO_O_method \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_On_Om \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT int PyUFunc_GetPyValues \
+ (char *, int *, int *, PyObject **);
+NPY_NO_EXPORT int PyUFunc_checkfperr \
+ (int, PyObject *, int *);
+NPY_NO_EXPORT void PyUFunc_clearfperr \
+ (void);
+NPY_NO_EXPORT int PyUFunc_getfperr \
+ (void);
+NPY_NO_EXPORT int PyUFunc_handlefperr \
+ (int, PyObject *, int, int *);
+NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \
+ (PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *);
+NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \
+ (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *);
+NPY_NO_EXPORT int PyUFunc_SetUsesArraysAsData \
+ (void **NPY_UNUSED(data), size_t NPY_UNUSED(i));
+NPY_NO_EXPORT void PyUFunc_e_e \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_ee_e \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \
+ (char **, npy_intp const *, npy_intp const *, void *);
+NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \
+ (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **);
+NPY_NO_EXPORT int PyUFunc_ValidateCasting \
+ (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **);
+NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \
+ (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *);
+NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \
+ (PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *);
+
+#else
+
+#if defined(PY_UFUNC_UNIQUE_SYMBOL)
+#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL
+#endif
+
+#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC)
+extern void **PyUFunc_API;
+#else
+#if defined(PY_UFUNC_UNIQUE_SYMBOL)
+void **PyUFunc_API;
+#else
+static void **PyUFunc_API=NULL;
+#endif
+#endif
+
+#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0])
+#define PyUFunc_FromFuncAndData \
+ (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int)) \
+ PyUFunc_API[1])
+#define PyUFunc_RegisterLoopForType \
+ (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *)) \
+ PyUFunc_API[2])
+#define PyUFunc_GenericFunction \
+ (*(int (*)(PyUFuncObject *NPY_UNUSED(ufunc), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds), PyArrayObject **NPY_UNUSED(op))) \
+ PyUFunc_API[3])
+#define PyUFunc_f_f_As_d_d \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[4])
+#define PyUFunc_d_d \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[5])
+#define PyUFunc_f_f \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[6])
+#define PyUFunc_g_g \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[7])
+#define PyUFunc_F_F_As_D_D \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[8])
+#define PyUFunc_F_F \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[9])
+#define PyUFunc_D_D \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[10])
+#define PyUFunc_G_G \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[11])
+#define PyUFunc_O_O \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[12])
+#define PyUFunc_ff_f_As_dd_d \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[13])
+#define PyUFunc_ff_f \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[14])
+#define PyUFunc_dd_d \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[15])
+#define PyUFunc_gg_g \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[16])
+#define PyUFunc_FF_F_As_DD_D \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[17])
+#define PyUFunc_DD_D \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[18])
+#define PyUFunc_FF_F \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[19])
+#define PyUFunc_GG_G \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[20])
+#define PyUFunc_OO_O \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[21])
+#define PyUFunc_O_O_method \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[22])
+#define PyUFunc_OO_O_method \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[23])
+#define PyUFunc_On_Om \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[24])
+#define PyUFunc_GetPyValues \
+ (*(int (*)(char *, int *, int *, PyObject **)) \
+ PyUFunc_API[25])
+#define PyUFunc_checkfperr \
+ (*(int (*)(int, PyObject *, int *)) \
+ PyUFunc_API[26])
+#define PyUFunc_clearfperr \
+ (*(void (*)(void)) \
+ PyUFunc_API[27])
+#define PyUFunc_getfperr \
+ (*(int (*)(void)) \
+ PyUFunc_API[28])
+#define PyUFunc_handlefperr \
+ (*(int (*)(int, PyObject *, int, int *)) \
+ PyUFunc_API[29])
+#define PyUFunc_ReplaceLoopBySignature \
+ (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *)) \
+ PyUFunc_API[30])
+#define PyUFunc_FromFuncAndDataAndSignature \
+ (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, int, const char *)) \
+ PyUFunc_API[31])
+#define PyUFunc_SetUsesArraysAsData \
+ (*(int (*)(void **NPY_UNUSED(data), size_t NPY_UNUSED(i))) \
+ PyUFunc_API[32])
+#define PyUFunc_e_e \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[33])
+#define PyUFunc_e_e_As_f_f \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[34])
+#define PyUFunc_e_e_As_d_d \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[35])
+#define PyUFunc_ee_e \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[36])
+#define PyUFunc_ee_e_As_ff_f \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[37])
+#define PyUFunc_ee_e_As_dd_d \
+ (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \
+ PyUFunc_API[38])
+#define PyUFunc_DefaultTypeResolver \
+ (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \
+ PyUFunc_API[39])
+#define PyUFunc_ValidateCasting \
+ (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr **)) \
+ PyUFunc_API[40])
+#define PyUFunc_RegisterLoopForDescr \
+ (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \
+ PyUFunc_API[41])
+#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \
+ (*(PyObject * (*)(PyUFuncGenericFunction *, void **, char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *)) \
+ PyUFunc_API[42])
+
+static NPY_INLINE int
+_import_umath(void)
+{
+ PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
+ PyObject *c_api = NULL;
+
+ if (numpy == NULL) {
+ PyErr_SetString(PyExc_ImportError,
+ "numpy.core._multiarray_umath failed to import");
+ return -1;
+ }
+ c_api = PyObject_GetAttrString(numpy, "_UFUNC_API");
+ Py_DECREF(numpy);
+ if (c_api == NULL) {
+ PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found");
+ return -1;
+ }
+
+ if (!PyCapsule_CheckExact(c_api)) {
+ PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object");
+ Py_DECREF(c_api);
+ return -1;
+ }
+ PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL);
+ Py_DECREF(c_api);
+ if (PyUFunc_API == NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer");
+ return -1;
+ }
+ return 0;
+}
+
+#define import_umath() \
+ do {\
+ UFUNC_NOFPE\
+ if (_import_umath() < 0) {\
+ PyErr_Print();\
+ PyErr_SetString(PyExc_ImportError,\
+ "numpy.core.umath failed to import");\
+ return NULL;\
+ }\
+ } while(0)
+
+#define import_umath1(ret) \
+ do {\
+ UFUNC_NOFPE\
+ if (_import_umath() < 0) {\
+ PyErr_Print();\
+ PyErr_SetString(PyExc_ImportError,\
+ "numpy.core.umath failed to import");\
+ return ret;\
+ }\
+ } while(0)
+
+#define import_umath2(ret, msg) \
+ do {\
+ UFUNC_NOFPE\
+ if (_import_umath() < 0) {\
+ PyErr_Print();\
+ PyErr_SetString(PyExc_ImportError, msg);\
+ return ret;\
+ }\
+ } while(0)
+
+#define import_ufunc() \
+ do {\
+ UFUNC_NOFPE\
+ if (_import_umath() < 0) {\
+ PyErr_Print();\
+ PyErr_SetString(PyExc_ImportError,\
+ "numpy.core.umath failed to import");\
+ }\
+ } while(0)
+
+#endif
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h
new file mode 100644
index 00000000..07e2363d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h
@@ -0,0 +1,90 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_
+#error You should not include this header directly
+#endif
+/*
+ * Private API (here for inline)
+ */
+static NPY_INLINE int
+_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter);
+
+/*
+ * Update to next item of the iterator
+ *
+ * Note: this simply increment the coordinates vector, last dimension
+ * incremented first , i.e, for dimension 3
+ * ...
+ * -1, -1, -1
+ * -1, -1, 0
+ * -1, -1, 1
+ * ....
+ * -1, 0, -1
+ * -1, 0, 0
+ * ....
+ * 0, -1, -1
+ * 0, -1, 0
+ * ....
+ */
+#define _UPDATE_COORD_ITER(c) \
+ wb = iter->coordinates[c] < iter->bounds[c][1]; \
+ if (wb) { \
+ iter->coordinates[c] += 1; \
+ return 0; \
+ } \
+ else { \
+ iter->coordinates[c] = iter->bounds[c][0]; \
+ }
+
+static NPY_INLINE int
+_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter)
+{
+ npy_intp i, wb;
+
+ for (i = iter->nd - 1; i >= 0; --i) {
+ _UPDATE_COORD_ITER(i)
+ }
+
+ return 0;
+}
+
+/*
+ * Version optimized for 2d arrays, manual loop unrolling
+ */
+static NPY_INLINE int
+_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter)
+{
+ npy_intp wb;
+
+ _UPDATE_COORD_ITER(1)
+ _UPDATE_COORD_ITER(0)
+
+ return 0;
+}
+#undef _UPDATE_COORD_ITER
+
+/*
+ * Advance to the next neighbour
+ */
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter)
+{
+ _PyArrayNeighborhoodIter_IncrCoord (iter);
+ iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
+
+ return 0;
+}
+
+/*
+ * Reset functions
+ */
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter)
+{
+ npy_intp i;
+
+ for (i = 0; i < iter->nd; ++i) {
+ iter->coordinates[i] = iter->bounds[i][0];
+ }
+ iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
+
+ return 0;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/_numpyconfig.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/_numpyconfig.h
new file mode 100644
index 00000000..e46916f5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/_numpyconfig.h
@@ -0,0 +1,30 @@
+#define NPY_SIZEOF_SHORT SIZEOF_SHORT
+#define NPY_SIZEOF_INT SIZEOF_INT
+#define NPY_SIZEOF_LONG SIZEOF_LONG
+#define NPY_SIZEOF_FLOAT 4
+#define NPY_SIZEOF_COMPLEX_FLOAT 8
+#define NPY_SIZEOF_DOUBLE 8
+#define NPY_SIZEOF_COMPLEX_DOUBLE 16
+#define NPY_SIZEOF_LONGDOUBLE 16
+#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
+#define NPY_SIZEOF_PY_INTPTR_T 8
+#define NPY_SIZEOF_OFF_T 8
+#define NPY_SIZEOF_PY_LONG_LONG 8
+#define NPY_SIZEOF_LONGLONG 8
+#define NPY_NO_SMP 0
+#define NPY_HAVE_DECL_ISNAN
+#define NPY_HAVE_DECL_ISINF
+#define NPY_HAVE_DECL_ISFINITE
+#define NPY_HAVE_DECL_SIGNBIT
+#define NPY_USE_C99_COMPLEX 1
+#define NPY_HAVE_COMPLEX_DOUBLE 1
+#define NPY_HAVE_COMPLEX_FLOAT 1
+#define NPY_HAVE_COMPLEX_LONG_DOUBLE 1
+#define NPY_USE_C99_FORMATS 1
+#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
+#define NPY_ABI_VERSION 0x01000009
+#define NPY_API_VERSION 0x00000010
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS 1
+#endif
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/arrayobject.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/arrayobject.h
new file mode 100644
index 00000000..da47bb09
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/arrayobject.h
@@ -0,0 +1,12 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_
+#define Py_ARRAYOBJECT_H
+
+#include "ndarrayobject.h"
+#include "npy_interrupt.h"
+
+#ifdef NPY_NO_PREFIX
+#include "noprefix.h"
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/arrayscalars.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/arrayscalars.h
new file mode 100644
index 00000000..a20a6801
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/arrayscalars.h
@@ -0,0 +1,182 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_
+
+#ifndef _MULTIARRAYMODULE
+typedef struct {
+ PyObject_HEAD
+ npy_bool obval;
+} PyBoolScalarObject;
+#endif
+
+
+typedef struct {
+ PyObject_HEAD
+ signed char obval;
+} PyByteScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ short obval;
+} PyShortScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ int obval;
+} PyIntScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ long obval;
+} PyLongScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_longlong obval;
+} PyLongLongScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ unsigned char obval;
+} PyUByteScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ unsigned short obval;
+} PyUShortScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ unsigned int obval;
+} PyUIntScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ unsigned long obval;
+} PyULongScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_ulonglong obval;
+} PyULongLongScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_half obval;
+} PyHalfScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ float obval;
+} PyFloatScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ double obval;
+} PyDoubleScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_longdouble obval;
+} PyLongDoubleScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_cfloat obval;
+} PyCFloatScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_cdouble obval;
+} PyCDoubleScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ npy_clongdouble obval;
+} PyCLongDoubleScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ PyObject * obval;
+} PyObjectScalarObject;
+
+typedef struct {
+ PyObject_HEAD
+ npy_datetime obval;
+ PyArray_DatetimeMetaData obmeta;
+} PyDatetimeScalarObject;
+
+typedef struct {
+ PyObject_HEAD
+ npy_timedelta obval;
+ PyArray_DatetimeMetaData obmeta;
+} PyTimedeltaScalarObject;
+
+
+typedef struct {
+ PyObject_HEAD
+ char obval;
+} PyScalarObject;
+
+#define PyStringScalarObject PyBytesObject
+typedef struct {
+ /* note that the PyObject_HEAD macro lives right here */
+ PyUnicodeObject base;
+ Py_UCS4 *obval;
+ char *buffer_fmt;
+} PyUnicodeScalarObject;
+
+
+typedef struct {
+ PyObject_VAR_HEAD
+ char *obval;
+ PyArray_Descr *descr;
+ int flags;
+ PyObject *base;
+ void *_buffer_info; /* private buffer info, tagged to allow warning */
+} PyVoidScalarObject;
+
+/* Macros
+ Py<Cls><bitsize>ScalarObject
+ Py<Cls><bitsize>ArrType_Type
+ are defined in ndarrayobject.h
+*/
+
+#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0])))
+#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1])))
+#define PyArrayScalar_FromLong(i) \
+ ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)])))
+#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \
+ return Py_INCREF(PyArrayScalar_FromLong(i)), \
+ PyArrayScalar_FromLong(i)
+#define PyArrayScalar_RETURN_FALSE \
+ return Py_INCREF(PyArrayScalar_False), \
+ PyArrayScalar_False
+#define PyArrayScalar_RETURN_TRUE \
+ return Py_INCREF(PyArrayScalar_True), \
+ PyArrayScalar_True
+
+#define PyArrayScalar_New(cls) \
+ Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0)
+#define PyArrayScalar_VAL(obj, cls) \
+ ((Py##cls##ScalarObject *)obj)->obval
+#define PyArrayScalar_ASSIGN(obj, cls, val) \
+ PyArrayScalar_VAL(obj, cls) = val
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/experimental_dtype_api.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/experimental_dtype_api.h
new file mode 100644
index 00000000..1fbd4198
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/experimental_dtype_api.h
@@ -0,0 +1,502 @@
+/*
+ * This header exports the new experimental DType API as proposed in
+ * NEPs 41 to 43. For background, please check these NEPs. Otherwise,
+ * this header also serves as documentation for the time being.
+ *
+ * Please do not hesitate to contact @seberg with questions. This is
+ * developed together with https://github.com/seberg/experimental_user_dtypes
+ * and those interested in experimenting are encouraged to contribute there.
+ *
+ * To use the functions defined in the header, call::
+ *
+ * if (import_experimental_dtype_api(version) < 0) {
+ * return NULL;
+ * }
+ *
+ * in your module init. (A version mismatch will be reported, just update
+ * to the correct one, this will alert you of possible changes.)
+ *
+ * The following lists the main symbols currently exported. Please do not
+ * hesitate to ask for help or clarification:
+ *
+ * - PyUFunc_AddLoopFromSpec:
+ *
+ * Register a new loop for a ufunc. This uses the `PyArrayMethod_Spec`
+ * which must be filled in (see in-line comments).
+ *
+ * - PyUFunc_AddWrappingLoop:
+ *
+ * Register a new loop which reuses an existing one, but modifies the
+ * result dtypes. Please search the internal NumPy docs for more info
+ * at this point. (Used for physical units dtype.)
+ *
+ * - PyUFunc_AddPromoter:
+ *
+ * Register a new promoter for a ufunc. A promoter is a function stored
+ * in a PyCapsule (see in-line comments). It is passed the operation and
+ * requested DType signatures and can mutate it to attempt a new search
+ * for a matching loop/promoter.
+ * I.e. for Numba a promoter could even add the desired loop.
+ *
+ * - PyArrayInitDTypeMeta_FromSpec:
+ *
+ * Initialize a new DType. It must currently be a static Python C type
+ * that is declared as `PyArray_DTypeMeta` and not `PyTypeObject`.
+ * Further, it must subclass `np.dtype` and set its type to
+ * `PyArrayDTypeMeta_Type` (before calling `PyType_Read()`).
+ *
+ * - PyArray_CommonDType:
+ *
+ * Find the common-dtype ("promotion") for two DType classes. Similar
+ * to `np.result_type`, but works on the classes and not instances.
+ *
+ * - PyArray_PromoteDTypeSequence:
+ *
+ * Same as CommonDType, but works with an arbitrary number of DTypes.
+ * This function is smarter and can often return successful and unambiguous
+ * results when `common_dtype(common_dtype(dt1, dt2), dt3)` would
+ * depend on the operation order or fail. Nevertheless, DTypes should
+ * aim to ensure that their common-dtype implementation is associative
+ * and commutative! (Mainly, unsigned and signed integers are not.)
+ *
+ * For guaranteed consistent results DTypes must implement common-Dtype
+ * "transitively". If A promotes B and B promotes C, than A must generally
+ * also promote C; where "promotes" means implements the promotion.
+ * (There are some exceptions for abstract DTypes)
+ *
+ * - PyArray_GetDefaultDescr:
+ *
+ * Given a DType class, returns the default instance (descriptor).
+ * This is an inline function checking for `singleton` first and only
+ * calls the `default_descr` function if necessary.
+ *
+ * - PyArray_DoubleDType, etc.:
+ *
+ * Aliases to the DType classes for the builtin NumPy DTypes.
+ *
+ * WARNING
+ * =======
+ *
+ * By using this header, you understand that this is a fully experimental
+ * exposure. Details are expected to change, and some options may have no
+ * effect. (Please contact @seberg if you have questions!)
+ * If the exposure stops working, please file a bug report with NumPy.
+ * Further, a DType created using this API/header should still be expected
+ * to be incompatible with some functionality inside and outside of NumPy.
+ * In this case crashes must be expected. Please report any such problems
+ * so that they can be fixed before final exposure.
+ * Furthermore, expect missing checks for programming errors which the final
+ * API is expected to have.
+ *
+ * Symbols with a leading underscore are likely to not be included in the
+ * first public version, if these are central to your use-case, please let
+ * us know, so that we can reconsider.
+ *
+ * "Array-like" consumer API not yet under considerations
+ * ======================================================
+ *
+ * The new DType API is designed in a way to make it potentially useful for
+ * alternative "array-like" implementations. This will require careful
+ * exposure of details and functions and is not part of this experimental API.
+ *
+ * Brief (incompatibility) changelog
+ * =================================
+ *
+ * 2. None (only additions).
+ * 3. New `npy_intp *view_offset` argument for `resolve_descriptors`.
+ * This replaces the `NPY_CAST_IS_VIEW` flag. It can be set to 0 if the
+ * operation is a view, and is pre-initialized to `NPY_MIN_INTP` indicating
+ * that the operation is not a view.
+ */
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_
+
+#include <Python.h>
+#include "ndarraytypes.h"
+
+
+/*
+ * There must be a better way?! -- Oh well, this is experimental
+ * (my issue with it, is that I cannot undef those helpers).
+ */
+#if defined(PY_ARRAY_UNIQUE_SYMBOL)
+ #define NPY_EXP_DTYPE_API_CONCAT_HELPER2(x, y) x ## y
+ #define NPY_EXP_DTYPE_API_CONCAT_HELPER(arg) NPY_EXP_DTYPE_API_CONCAT_HELPER2(arg, __experimental_dtype_api_table)
+ #define __experimental_dtype_api_table NPY_EXP_DTYPE_API_CONCAT_HELPER(PY_ARRAY_UNIQUE_SYMBOL)
+#else
+ #define __experimental_dtype_api_table __experimental_dtype_api_table
+#endif
+
+/* Support for correct multi-file projects: */
+#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
+ extern void **__experimental_dtype_api_table;
+#else
+ /*
+ * Just a hack so I don't forget importing as much myself, I spend way too
+ * much time noticing it the first time around :).
+ */
+ static void
+ __not_imported(void)
+ {
+ printf("*****\nCritical error, dtype API not imported\n*****\n");
+ }
+
+ static void *__uninitialized_table[] = {
+ &__not_imported, &__not_imported, &__not_imported, &__not_imported,
+ &__not_imported, &__not_imported, &__not_imported, &__not_imported};
+
+ #if defined(PY_ARRAY_UNIQUE_SYMBOL)
+ void **__experimental_dtype_api_table = __uninitialized_table;
+ #else
+ static void **__experimental_dtype_api_table = __uninitialized_table;
+ #endif
+#endif
+
+
+/*
+ * DTypeMeta struct, the content may be made fully opaque (except the size).
+ * We may also move everything into a single `void *dt_slots`.
+ */
+typedef struct {
+ PyHeapTypeObject super;
+ PyArray_Descr *singleton;
+ int type_num;
+ PyTypeObject *scalar_type;
+ npy_uint64 flags;
+ void *dt_slots;
+ void *reserved[3];
+} PyArray_DTypeMeta;
+
+
+/*
+ * ******************************************************
+ * ArrayMethod API (Casting and UFuncs)
+ * ******************************************************
+ */
+/*
+ * NOTE: Expected changes:
+ * * invert logic of floating point error flag
+ * * probably split runtime and general flags into two
+ * * should possibly not use an enum for typedef for more stable ABI?
+ */
+typedef enum {
+ /* Flag for whether the GIL is required */
+ NPY_METH_REQUIRES_PYAPI = 1 << 1,
+ /*
+ * Some functions cannot set floating point error flags, this flag
+ * gives us the option (not requirement) to skip floating point error
+ * setup/check. No function should set error flags and ignore them
+ * since it would interfere with chaining operations (e.g. casting).
+ */
+ NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 2,
+ /* Whether the method supports unaligned access (not runtime) */
+ NPY_METH_SUPPORTS_UNALIGNED = 1 << 3,
+
+ /* All flags which can change at runtime */
+ NPY_METH_RUNTIME_FLAGS = (
+ NPY_METH_REQUIRES_PYAPI |
+ NPY_METH_NO_FLOATINGPOINT_ERRORS),
+} NPY_ARRAYMETHOD_FLAGS;
+
+
+/*
+ * The main object for creating a new ArrayMethod. We use the typical `slots`
+ * mechanism used by the Python limited API (see below for the slot defs).
+ */
+typedef struct {
+ const char *name;
+ int nin, nout;
+ NPY_CASTING casting;
+ NPY_ARRAYMETHOD_FLAGS flags;
+ PyArray_DTypeMeta **dtypes;
+ PyType_Slot *slots;
+} PyArrayMethod_Spec;
+
+
+typedef int _ufunc_addloop_fromspec_func(
+ PyObject *ufunc, PyArrayMethod_Spec *spec);
+/*
+ * The main ufunc registration function. This adds a new implementation/loop
+ * to a ufunc. It replaces `PyUFunc_RegisterLoopForType`.
+ */
+#define PyUFunc_AddLoopFromSpec \
+ (*(_ufunc_addloop_fromspec_func *)(__experimental_dtype_api_table[0]))
+
+
+/* Please see the NumPy definitions in `array_method.h` for details on these */
+typedef int translate_given_descrs_func(int nin, int nout,
+ PyArray_DTypeMeta *wrapped_dtypes[],
+ PyArray_Descr *given_descrs[], PyArray_Descr *new_descrs[]);
+typedef int translate_loop_descrs_func(int nin, int nout,
+ PyArray_DTypeMeta *new_dtypes[], PyArray_Descr *given_descrs[],
+ PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]);
+
+typedef int _ufunc_wrapping_loop_func(PyObject *ufunc_obj,
+ PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[],
+ translate_given_descrs_func *translate_given_descrs,
+ translate_loop_descrs_func *translate_loop_descrs);
+#define PyUFunc_AddWrappingLoop \
+ (*(_ufunc_wrapping_loop_func *)(__experimental_dtype_api_table[7]))
+
+/*
+ * Type of the C promoter function, which must be wrapped into a
+ * PyCapsule with name "numpy._ufunc_promoter".
+ *
+ * Note that currently the output dtypes are always NULL unless they are
+ * also part of the signature. This is an implementation detail and could
+ * change in the future. However, in general promoters should not have a
+ * need for output dtypes.
+ * (There are potential use-cases, these are currently unsupported.)
+ */
+typedef int promoter_function(PyObject *ufunc,
+ PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[],
+ PyArray_DTypeMeta *new_op_dtypes[]);
+
+/*
+ * Function to register a promoter.
+ *
+ * @param ufunc The ufunc object to register the promoter with.
+ * @param DType_tuple A Python tuple containing DTypes or None matching the
+ * number of inputs and outputs of the ufunc.
+ * @param promoter A PyCapsule with name "numpy._ufunc_promoter" containing
+ * a pointer to a `promoter_function`.
+ */
+typedef int _ufunc_addpromoter_func(
+ PyObject *ufunc, PyObject *DType_tuple, PyObject *promoter);
+#define PyUFunc_AddPromoter \
+ (*(_ufunc_addpromoter_func *)(__experimental_dtype_api_table[1]))
+
+
+/*
+ * The resolve descriptors function, must be able to handle NULL values for
+ * all output (but not input) `given_descrs` and fill `loop_descrs`.
+ * Return -1 on error or 0 if the operation is not possible without an error
+ * set. (This may still be in flux.)
+ * Otherwise must return the "casting safety", for normal functions, this is
+ * almost always "safe" (or even "equivalent"?).
+ *
+ * `resolve_descriptors` is optional if all output DTypes are non-parametric.
+ */
+#define NPY_METH_resolve_descriptors 1
+typedef NPY_CASTING (resolve_descriptors_function)(
+ /* "method" is currently opaque (necessary e.g. to wrap Python) */
+ PyObject *method,
+ /* DTypes the method was created for */
+ PyObject **dtypes,
+ /* Input descriptors (instances). Outputs may be NULL. */
+ PyArray_Descr **given_descrs,
+ /* Exact loop descriptors to use, must not hold references on error */
+ PyArray_Descr **loop_descrs,
+ npy_intp *view_offset);
+
+/* NOT public yet: Signature needs adapting as external API. */
+#define _NPY_METH_get_loop 2
+
+/*
+ * Current public API to define fast inner-loops. You must provide a
+ * strided loop. If this is a cast between two "versions" of the same dtype
+ * you must also provide an unaligned strided loop.
+ * Other loops are useful to optimize the very common contiguous case.
+ *
+ * NOTE: As of now, NumPy will NOT use unaligned loops in ufuncs!
+ */
+#define NPY_METH_strided_loop 3
+#define NPY_METH_contiguous_loop 4
+#define NPY_METH_unaligned_strided_loop 5
+#define NPY_METH_unaligned_contiguous_loop 6
+
+
+typedef struct {
+ PyObject *caller; /* E.g. the original ufunc, may be NULL */
+ PyObject *method; /* The method "self". Currently an opaque object */
+
+ /* Operand descriptors, filled in by resolve_descriptors */
+ PyArray_Descr **descriptors;
+ /* Structure may grow (this is harmless for DType authors) */
+} PyArrayMethod_Context;
+
+typedef int (PyArrayMethod_StridedLoop)(PyArrayMethod_Context *context,
+ char *const *data, const npy_intp *dimensions, const npy_intp *strides,
+ NpyAuxData *transferdata);
+
+
+
+/*
+ * ****************************
+ * DTYPE API
+ * ****************************
+ */
+
+#define NPY_DT_ABSTRACT 1 << 1
+#define NPY_DT_PARAMETRIC 1 << 2
+
+#define NPY_DT_discover_descr_from_pyobject 1
+#define _NPY_DT_is_known_scalar_type 2
+#define NPY_DT_default_descr 3
+#define NPY_DT_common_dtype 4
+#define NPY_DT_common_instance 5
+#define NPY_DT_ensure_canonical 6
+#define NPY_DT_setitem 7
+#define NPY_DT_getitem 8
+
+
+// TODO: These slots probably still need some thought, and/or a way to "grow"?
+typedef struct{
+ PyTypeObject *typeobj; /* type of python scalar or NULL */
+ int flags; /* flags, including parametric and abstract */
+ /* NULL terminated cast definitions. Use NULL for the newly created DType */
+ PyArrayMethod_Spec **casts;
+ PyType_Slot *slots;
+ /* Baseclass or NULL (will always subclass `np.dtype`) */
+ PyTypeObject *baseclass;
+} PyArrayDTypeMeta_Spec;
+
+
+#define PyArrayDTypeMeta_Type \
+ (*(PyTypeObject *)__experimental_dtype_api_table[2])
+typedef int __dtypemeta_fromspec(
+ PyArray_DTypeMeta *DType, PyArrayDTypeMeta_Spec *dtype_spec);
+/*
+ * Finalize creation of a DTypeMeta. You must ensure that the DTypeMeta is
+ * a proper subclass. The DTypeMeta object has additional fields compared to
+ * a normal PyTypeObject!
+ * The only (easy) creation of a new DType is to create a static Type which
+ * inherits `PyArray_DescrType`, sets its type to `PyArrayDTypeMeta_Type` and
+ * uses `PyArray_DTypeMeta` defined above as the C-structure.
+ */
+#define PyArrayInitDTypeMeta_FromSpec \
+ ((__dtypemeta_fromspec *)(__experimental_dtype_api_table[3]))
+
+
+/*
+ * *************************************
+ * WORKING WITH DTYPES
+ * *************************************
+ */
+
+typedef PyArray_DTypeMeta *__common_dtype(
+ PyArray_DTypeMeta *DType1, PyArray_DTypeMeta *DType2);
+#define PyArray_CommonDType \
+ ((__common_dtype *)(__experimental_dtype_api_table[4]))
+
+
+typedef PyArray_DTypeMeta *__promote_dtype_sequence(
+ npy_intp num, PyArray_DTypeMeta *DTypes[]);
+#define PyArray_PromoteDTypeSequence \
+ ((__promote_dtype_sequence *)(__experimental_dtype_api_table[5]))
+
+
+typedef PyArray_Descr *__get_default_descr(
+ PyArray_DTypeMeta *DType);
+#define _PyArray_GetDefaultDescr \
+ ((__get_default_descr *)(__experimental_dtype_api_table[6]))
+
+static NPY_INLINE PyArray_Descr *
+PyArray_GetDefaultDescr(PyArray_DTypeMeta *DType)
+{
+ if (DType->singleton != NULL) {
+ Py_INCREF(DType->singleton);
+ return DType->singleton;
+ }
+ return _PyArray_GetDefaultDescr(DType);
+}
+
+
+/*
+ * NumPy's builtin DTypes:
+ */
+#define PyArray_BoolDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[10])
+/* Integers */
+#define PyArray_ByteDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[11])
+#define PyArray_UByteDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[12])
+#define PyArray_ShortDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[13])
+#define PyArray_UShortDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[14])
+#define PyArray_IntDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[15])
+#define PyArray_UIntDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[16])
+#define PyArray_LongDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[17])
+#define PyArray_ULongDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[18])
+#define PyArray_LongLongDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[19])
+#define PyArray_ULongLongDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[20])
+/* Integer aliases */
+#define PyArray_Int8Type (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[21])
+#define PyArray_UInt8DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[22])
+#define PyArray_Int16DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[23])
+#define PyArray_UInt16DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[24])
+#define PyArray_Int32DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[25])
+#define PyArray_UInt32DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[26])
+#define PyArray_Int64DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[27])
+#define PyArray_UInt64DType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[28])
+#define PyArray_IntpDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[29])
+#define PyArray_UIntpDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[30])
+/* Floats */
+#define PyArray_HalfType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[31])
+#define PyArray_FloatDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[32])
+#define PyArray_DoubleDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[33])
+#define PyArray_LongDoubleDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[34])
+/* Complex */
+#define PyArray_CFloatDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[35])
+#define PyArray_CDoubleDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[36])
+#define PyArray_CLongDoubleDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[37])
+/* String/Bytes */
+#define PyArray_StringDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[38])
+#define PyArray_UnicodeDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[39])
+/* Datetime/Timedelta */
+#define PyArray_DatetimeDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[40])
+#define PyArray_TimedeltaDType (*(PyArray_DTypeMeta *)__experimental_dtype_api_table[41])
+
+
+/*
+ * ********************************
+ * Initialization
+ * ********************************
+ *
+ * Import the experimental API, the version must match the one defined in
+ * the header to ensure changes are taken into account. NumPy will further
+ * runtime-check this.
+ * You must call this function to use the symbols defined in this file.
+ */
+#if !defined(NO_IMPORT) && !defined(NO_IMPORT_ARRAY)
+
+#define __EXPERIMENTAL_DTYPE_VERSION 5
+
+static int
+import_experimental_dtype_api(int version)
+{
+ if (version != __EXPERIMENTAL_DTYPE_VERSION) {
+ PyErr_Format(PyExc_RuntimeError,
+ "DType API version %d did not match header version %d. Please "
+ "update the import statement and check for API changes.",
+ version, __EXPERIMENTAL_DTYPE_VERSION);
+ return -1;
+ }
+ if (__experimental_dtype_api_table != __uninitialized_table) {
+ /* already imported. */
+ return 0;
+ }
+
+ PyObject *multiarray = PyImport_ImportModule("numpy.core._multiarray_umath");
+ if (multiarray == NULL) {
+ return -1;
+ }
+
+ PyObject *api = PyObject_CallMethod(multiarray,
+ "_get_experimental_dtype_api", "i", version);
+ Py_DECREF(multiarray);
+ if (api == NULL) {
+ return -1;
+ }
+ __experimental_dtype_api_table = (void **)PyCapsule_GetPointer(api,
+ "experimental_dtype_api_table");
+ Py_DECREF(api);
+
+ if (__experimental_dtype_api_table == NULL) {
+ __experimental_dtype_api_table = __uninitialized_table;
+ return -1;
+ }
+ return 0;
+}
+
+#endif /* !defined(NO_IMPORT) && !defined(NO_IMPORT_ARRAY) */
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_EXPERIMENTAL_DTYPE_API_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/halffloat.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/halffloat.h
new file mode 100644
index 00000000..95040166
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/halffloat.h
@@ -0,0 +1,70 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_
+
+#include <Python.h>
+#include <numpy/npy_math.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Half-precision routines
+ */
+
+/* Conversions */
+float npy_half_to_float(npy_half h);
+double npy_half_to_double(npy_half h);
+npy_half npy_float_to_half(float f);
+npy_half npy_double_to_half(double d);
+/* Comparisons */
+int npy_half_eq(npy_half h1, npy_half h2);
+int npy_half_ne(npy_half h1, npy_half h2);
+int npy_half_le(npy_half h1, npy_half h2);
+int npy_half_lt(npy_half h1, npy_half h2);
+int npy_half_ge(npy_half h1, npy_half h2);
+int npy_half_gt(npy_half h1, npy_half h2);
+/* faster *_nonan variants for when you know h1 and h2 are not NaN */
+int npy_half_eq_nonan(npy_half h1, npy_half h2);
+int npy_half_lt_nonan(npy_half h1, npy_half h2);
+int npy_half_le_nonan(npy_half h1, npy_half h2);
+/* Miscellaneous functions */
+int npy_half_iszero(npy_half h);
+int npy_half_isnan(npy_half h);
+int npy_half_isinf(npy_half h);
+int npy_half_isfinite(npy_half h);
+int npy_half_signbit(npy_half h);
+npy_half npy_half_copysign(npy_half x, npy_half y);
+npy_half npy_half_spacing(npy_half h);
+npy_half npy_half_nextafter(npy_half x, npy_half y);
+npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus);
+
+/*
+ * Half-precision constants
+ */
+
+#define NPY_HALF_ZERO (0x0000u)
+#define NPY_HALF_PZERO (0x0000u)
+#define NPY_HALF_NZERO (0x8000u)
+#define NPY_HALF_ONE (0x3c00u)
+#define NPY_HALF_NEGONE (0xbc00u)
+#define NPY_HALF_PINF (0x7c00u)
+#define NPY_HALF_NINF (0xfc00u)
+#define NPY_HALF_NAN (0x7e00u)
+
+#define NPY_MAX_HALF (0x7bffu)
+
+/*
+ * Bit-level conversions
+ */
+
+npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f);
+npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d);
+npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h);
+npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/libdivide/LICENSE.txt b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/libdivide/LICENSE.txt
new file mode 100644
index 00000000..d72a7c38
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/libdivide/LICENSE.txt
@@ -0,0 +1,21 @@
+ zlib License
+ ------------
+
+ Copyright (C) 2010 - 2019 ridiculous_fish, <libdivide@ridiculousfish.com>
+ Copyright (C) 2016 - 2019 Kim Walisch, <kim.walisch@gmail.com>
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/libdivide/libdivide.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/libdivide/libdivide.h
new file mode 100644
index 00000000..f4eb8039
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/libdivide/libdivide.h
@@ -0,0 +1,2079 @@
+// libdivide.h - Optimized integer division
+// https://libdivide.com
+//
+// Copyright (C) 2010 - 2019 ridiculous_fish, <libdivide@ridiculousfish.com>
+// Copyright (C) 2016 - 2019 Kim Walisch, <kim.walisch@gmail.com>
+//
+// libdivide is dual-licensed under the Boost or zlib licenses.
+// You may use libdivide under the terms of either of these.
+// See LICENSE.txt for more details.
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
+
+#define LIBDIVIDE_VERSION "3.0"
+#define LIBDIVIDE_VERSION_MAJOR 3
+#define LIBDIVIDE_VERSION_MINOR 0
+
+#include <stdint.h>
+
+#if defined(__cplusplus)
+ #include <cstdlib>
+ #include <cstdio>
+ #include <type_traits>
+#else
+ #include <stdlib.h>
+ #include <stdio.h>
+#endif
+
+#if defined(LIBDIVIDE_AVX512)
+ #include <immintrin.h>
+#elif defined(LIBDIVIDE_AVX2)
+ #include <immintrin.h>
+#elif defined(LIBDIVIDE_SSE2)
+ #include <emmintrin.h>
+#endif
+
+#if defined(_MSC_VER)
+ #include <intrin.h>
+ // disable warning C4146: unary minus operator applied
+ // to unsigned type, result still unsigned
+ #pragma warning(disable: 4146)
+ #define LIBDIVIDE_VC
+#endif
+
+#if !defined(__has_builtin)
+ #define __has_builtin(x) 0
+#endif
+
+#if defined(__SIZEOF_INT128__)
+ #define HAS_INT128_T
+ // clang-cl on Windows does not yet support 128-bit division
+ #if !(defined(__clang__) && defined(LIBDIVIDE_VC))
+ #define HAS_INT128_DIV
+ #endif
+#endif
+
+#if defined(__x86_64__) || defined(_M_X64)
+ #define LIBDIVIDE_X86_64
+#endif
+
+#if defined(__i386__)
+ #define LIBDIVIDE_i386
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+ #define LIBDIVIDE_GCC_STYLE_ASM
+#endif
+
+#if defined(__cplusplus) || defined(LIBDIVIDE_VC)
+ #define LIBDIVIDE_FUNCTION __FUNCTION__
+#else
+ #define LIBDIVIDE_FUNCTION __func__
+#endif
+
+#define LIBDIVIDE_ERROR(msg) \
+ do { \
+ fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \
+ __LINE__, LIBDIVIDE_FUNCTION, msg); \
+ abort(); \
+ } while (0)
+
+#if defined(LIBDIVIDE_ASSERTIONS_ON)
+ #define LIBDIVIDE_ASSERT(x) \
+ do { \
+ if (!(x)) { \
+ fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \
+ __LINE__, LIBDIVIDE_FUNCTION, #x); \
+ abort(); \
+ } \
+ } while (0)
+#else
+ #define LIBDIVIDE_ASSERT(x)
+#endif
+
+#ifdef __cplusplus
+namespace libdivide {
+#endif
+
+// pack divider structs to prevent compilers from padding.
+// This reduces memory usage by up to 43% when using a large
+// array of libdivide dividers and improves performance
+// by up to 10% because of reduced memory bandwidth.
+#pragma pack(push, 1)
+
+struct libdivide_u32_t {
+ uint32_t magic;
+ uint8_t more;
+};
+
+struct libdivide_s32_t {
+ int32_t magic;
+ uint8_t more;
+};
+
+struct libdivide_u64_t {
+ uint64_t magic;
+ uint8_t more;
+};
+
+struct libdivide_s64_t {
+ int64_t magic;
+ uint8_t more;
+};
+
+struct libdivide_u32_branchfree_t {
+ uint32_t magic;
+ uint8_t more;
+};
+
+struct libdivide_s32_branchfree_t {
+ int32_t magic;
+ uint8_t more;
+};
+
+struct libdivide_u64_branchfree_t {
+ uint64_t magic;
+ uint8_t more;
+};
+
+struct libdivide_s64_branchfree_t {
+ int64_t magic;
+ uint8_t more;
+};
+
+#pragma pack(pop)
+
+// Explanation of the "more" field:
+//
+// * Bits 0-5 is the shift value (for shift path or mult path).
+// * Bit 6 is the add indicator for mult path.
+// * Bit 7 is set if the divisor is negative. We use bit 7 as the negative
+// divisor indicator so that we can efficiently use sign extension to
+// create a bitmask with all bits set to 1 (if the divisor is negative)
+// or 0 (if the divisor is positive).
+//
+// u32: [0-4] shift value
+// [5] ignored
+// [6] add indicator
+// magic number of 0 indicates shift path
+//
+// s32: [0-4] shift value
+// [5] ignored
+// [6] add indicator
+// [7] indicates negative divisor
+// magic number of 0 indicates shift path
+//
+// u64: [0-5] shift value
+// [6] add indicator
+// magic number of 0 indicates shift path
+//
+// s64: [0-5] shift value
+// [6] add indicator
+// [7] indicates negative divisor
+// magic number of 0 indicates shift path
+//
+// In s32 and s64 branchfree modes, the magic number is negated according to
+// whether the divisor is negated. In branchfree strategy, it is not negated.
+
+enum {
+ LIBDIVIDE_32_SHIFT_MASK = 0x1F,
+ LIBDIVIDE_64_SHIFT_MASK = 0x3F,
+ LIBDIVIDE_ADD_MARKER = 0x40,
+ LIBDIVIDE_NEGATIVE_DIVISOR = 0x80
+};
+
+static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d);
+static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d);
+static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d);
+static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d);
+
+static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d);
+static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d);
+static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d);
+static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d);
+
+static inline int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom);
+static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom);
+static inline int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom);
+static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom);
+
+static inline int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom);
+static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom);
+static inline int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom);
+static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom);
+
+static inline int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom);
+static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom);
+static inline int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom);
+static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom);
+
+static inline int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom);
+static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom);
+static inline int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom);
+static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom);
+
+//////// Internal Utility Functions
+
+static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) {
+ uint64_t xl = x, yl = y;
+ uint64_t rl = xl * yl;
+ return (uint32_t)(rl >> 32);
+}
+
+static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) {
+ int64_t xl = x, yl = y;
+ int64_t rl = xl * yl;
+ // needs to be arithmetic shift
+ return (int32_t)(rl >> 32);
+}
+
+static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) {
+#if defined(LIBDIVIDE_VC) && \
+ defined(LIBDIVIDE_X86_64)
+ return __umulh(x, y);
+#elif defined(HAS_INT128_T)
+ __uint128_t xl = x, yl = y;
+ __uint128_t rl = xl * yl;
+ return (uint64_t)(rl >> 64);
+#else
+ // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64)
+ uint32_t mask = 0xFFFFFFFF;
+ uint32_t x0 = (uint32_t)(x & mask);
+ uint32_t x1 = (uint32_t)(x >> 32);
+ uint32_t y0 = (uint32_t)(y & mask);
+ uint32_t y1 = (uint32_t)(y >> 32);
+ uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0);
+ uint64_t x0y1 = x0 * (uint64_t)y1;
+ uint64_t x1y0 = x1 * (uint64_t)y0;
+ uint64_t x1y1 = x1 * (uint64_t)y1;
+ uint64_t temp = x1y0 + x0y0_hi;
+ uint64_t temp_lo = temp & mask;
+ uint64_t temp_hi = temp >> 32;
+
+ return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32);
+#endif
+}
+
+static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) {
+#if defined(LIBDIVIDE_VC) && \
+ defined(LIBDIVIDE_X86_64)
+ return __mulh(x, y);
+#elif defined(HAS_INT128_T)
+ __int128_t xl = x, yl = y;
+ __int128_t rl = xl * yl;
+ return (int64_t)(rl >> 64);
+#else
+ // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64)
+ uint32_t mask = 0xFFFFFFFF;
+ uint32_t x0 = (uint32_t)(x & mask);
+ uint32_t y0 = (uint32_t)(y & mask);
+ int32_t x1 = (int32_t)(x >> 32);
+ int32_t y1 = (int32_t)(y >> 32);
+ uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0);
+ int64_t t = x1 * (int64_t)y0 + x0y0_hi;
+ int64_t w1 = x0 * (int64_t)y1 + (t & mask);
+
+ return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32);
+#endif
+}
+
+static inline int32_t libdivide_count_leading_zeros32(uint32_t val) {
+#if defined(__GNUC__) || \
+ __has_builtin(__builtin_clz)
+ // Fast way to count leading zeros
+ return __builtin_clz(val);
+#elif defined(LIBDIVIDE_VC)
+ unsigned long result;
+ if (_BitScanReverse(&result, val)) {
+ return 31 - result;
+ }
+ return 0;
+#else
+ if (val == 0)
+ return 32;
+ int32_t result = 8;
+ uint32_t hi = 0xFFU << 24;
+ while ((val & hi) == 0) {
+ hi >>= 8;
+ result += 8;
+ }
+ while (val & hi) {
+ result -= 1;
+ hi <<= 1;
+ }
+ return result;
+#endif
+}
+
+static inline int32_t libdivide_count_leading_zeros64(uint64_t val) {
+#if defined(__GNUC__) || \
+ __has_builtin(__builtin_clzll)
+ // Fast way to count leading zeros
+ return __builtin_clzll(val);
+#elif defined(LIBDIVIDE_VC) && defined(_WIN64)
+ unsigned long result;
+ if (_BitScanReverse64(&result, val)) {
+ return 63 - result;
+ }
+ return 0;
+#else
+ uint32_t hi = val >> 32;
+ uint32_t lo = val & 0xFFFFFFFF;
+ if (hi != 0) return libdivide_count_leading_zeros32(hi);
+ return 32 + libdivide_count_leading_zeros32(lo);
+#endif
+}
+
+// libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit
+// uint {v}. The result must fit in 32 bits.
+// Returns the quotient directly and the remainder in *r
+static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) {
+#if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \
+ defined(LIBDIVIDE_GCC_STYLE_ASM)
+ uint32_t result;
+ __asm__("divl %[v]"
+ : "=a"(result), "=d"(*r)
+ : [v] "r"(v), "a"(u0), "d"(u1)
+ );
+ return result;
+#else
+ uint64_t n = ((uint64_t)u1 << 32) | u0;
+ uint32_t result = (uint32_t)(n / v);
+ *r = (uint32_t)(n - result * (uint64_t)v);
+ return result;
+#endif
+}
+
+// libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit
+// uint {v}. The result must fit in 64 bits.
+// Returns the quotient directly and the remainder in *r
+static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) {
+#if defined(LIBDIVIDE_X86_64) && \
+ defined(LIBDIVIDE_GCC_STYLE_ASM)
+ uint64_t result;
+ __asm__("divq %[v]"
+ : "=a"(result), "=d"(*r)
+ : [v] "r"(v), "a"(u0), "d"(u1)
+ );
+ return result;
+#elif defined(HAS_INT128_T) && \
+ defined(HAS_INT128_DIV)
+ __uint128_t n = ((__uint128_t)u1 << 64) | u0;
+ uint64_t result = (uint64_t)(n / v);
+ *r = (uint64_t)(n - result * (__uint128_t)v);
+ return result;
+#else
+ // Code taken from Hacker's Delight:
+ // http://www.hackersdelight.org/HDcode/divlu.c.
+ // License permits inclusion here per:
+ // http://www.hackersdelight.org/permissions.htm
+
+ const uint64_t b = (1ULL << 32); // Number base (32 bits)
+ uint64_t un1, un0; // Norm. dividend LSD's
+ uint64_t vn1, vn0; // Norm. divisor digits
+ uint64_t q1, q0; // Quotient digits
+ uint64_t un64, un21, un10; // Dividend digit pairs
+ uint64_t rhat; // A remainder
+ int32_t s; // Shift amount for norm
+
+ // If overflow, set rem. to an impossible value,
+ // and return the largest possible quotient
+ if (u1 >= v) {
+ *r = (uint64_t) -1;
+ return (uint64_t) -1;
+ }
+
+ // count leading zeros
+ s = libdivide_count_leading_zeros64(v);
+ if (s > 0) {
+ // Normalize divisor
+ v = v << s;
+ un64 = (u1 << s) | (u0 >> (64 - s));
+ un10 = u0 << s; // Shift dividend left
+ } else {
+ // Avoid undefined behavior of (u0 >> 64).
+ // The behavior is undefined if the right operand is
+ // negative, or greater than or equal to the length
+ // in bits of the promoted left operand.
+ un64 = u1;
+ un10 = u0;
+ }
+
+ // Break divisor up into two 32-bit digits
+ vn1 = v >> 32;
+ vn0 = v & 0xFFFFFFFF;
+
+ // Break right half of dividend into two digits
+ un1 = un10 >> 32;
+ un0 = un10 & 0xFFFFFFFF;
+
+ // Compute the first quotient digit, q1
+ q1 = un64 / vn1;
+ rhat = un64 - q1 * vn1;
+
+ while (q1 >= b || q1 * vn0 > b * rhat + un1) {
+ q1 = q1 - 1;
+ rhat = rhat + vn1;
+ if (rhat >= b)
+ break;
+ }
+
+ // Multiply and subtract
+ un21 = un64 * b + un1 - q1 * v;
+
+ // Compute the second quotient digit
+ q0 = un21 / vn1;
+ rhat = un21 - q0 * vn1;
+
+ while (q0 >= b || q0 * vn0 > b * rhat + un0) {
+ q0 = q0 - 1;
+ rhat = rhat + vn1;
+ if (rhat >= b)
+ break;
+ }
+
+ *r = (un21 * b + un0 - q0 * v) >> s;
+ return q1 * b + q0;
+#endif
+}
+
+// Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0)
+static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) {
+ if (signed_shift > 0) {
+ uint32_t shift = signed_shift;
+ *u1 <<= shift;
+ *u1 |= *u0 >> (64 - shift);
+ *u0 <<= shift;
+ }
+ else if (signed_shift < 0) {
+ uint32_t shift = -signed_shift;
+ *u0 >>= shift;
+ *u0 |= *u1 << (64 - shift);
+ *u1 >>= shift;
+ }
+}
+
+// Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder.
+static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) {
+#if defined(HAS_INT128_T) && \
+ defined(HAS_INT128_DIV)
+ __uint128_t ufull = u_hi;
+ __uint128_t vfull = v_hi;
+ ufull = (ufull << 64) | u_lo;
+ vfull = (vfull << 64) | v_lo;
+ uint64_t res = (uint64_t)(ufull / vfull);
+ __uint128_t remainder = ufull - (vfull * res);
+ *r_lo = (uint64_t)remainder;
+ *r_hi = (uint64_t)(remainder >> 64);
+ return res;
+#else
+ // Adapted from "Unsigned Doubleword Division" in Hacker's Delight
+ // We want to compute u / v
+ typedef struct { uint64_t hi; uint64_t lo; } u128_t;
+ u128_t u = {u_hi, u_lo};
+ u128_t v = {v_hi, v_lo};
+
+ if (v.hi == 0) {
+ // divisor v is a 64 bit value, so we just need one 128/64 division
+ // Note that we are simpler than Hacker's Delight here, because we know
+ // the quotient fits in 64 bits whereas Hacker's Delight demands a full
+ // 128 bit quotient
+ *r_hi = 0;
+ return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo);
+ }
+ // Here v >= 2**64
+ // We know that v.hi != 0, so count leading zeros is OK
+ // We have 0 <= n <= 63
+ uint32_t n = libdivide_count_leading_zeros64(v.hi);
+
+ // Normalize the divisor so its MSB is 1
+ u128_t v1t = v;
+ libdivide_u128_shift(&v1t.hi, &v1t.lo, n);
+ uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64
+
+ // To ensure no overflow
+ u128_t u1 = u;
+ libdivide_u128_shift(&u1.hi, &u1.lo, -1);
+
+ // Get quotient from divide unsigned insn.
+ uint64_t rem_ignored;
+ uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored);
+
+ // Undo normalization and division of u by 2.
+ u128_t q0 = {0, q1};
+ libdivide_u128_shift(&q0.hi, &q0.lo, n);
+ libdivide_u128_shift(&q0.hi, &q0.lo, -63);
+
+ // Make q0 correct or too small by 1
+ // Equivalent to `if (q0 != 0) q0 = q0 - 1;`
+ if (q0.hi != 0 || q0.lo != 0) {
+ q0.hi -= (q0.lo == 0); // borrow
+ q0.lo -= 1;
+ }
+
+ // Now q0 is correct.
+ // Compute q0 * v as q0v
+ // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo)
+ // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) +
+ // (q0.lo * v.hi << 64) + q0.lo * v.lo)
+ // Each term is 128 bit
+ // High half of full product (upper 128 bits!) are dropped
+ u128_t q0v = {0, 0};
+ q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo);
+ q0v.lo = q0.lo*v.lo;
+
+ // Compute u - q0v as u_q0v
+ // This is the remainder
+ u128_t u_q0v = u;
+ u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow
+ u_q0v.lo -= q0v.lo;
+
+ // Check if u_q0v >= v
+ // This checks if our remainder is larger than the divisor
+ if ((u_q0v.hi > v.hi) ||
+ (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) {
+ // Increment q0
+ q0.lo += 1;
+ q0.hi += (q0.lo == 0); // carry
+
+ // Subtract v from remainder
+ u_q0v.hi -= v.hi + (u_q0v.lo < v.lo);
+ u_q0v.lo -= v.lo;
+ }
+
+ *r_hi = u_q0v.hi;
+ *r_lo = u_q0v.lo;
+
+ LIBDIVIDE_ASSERT(q0.hi == 0);
+ return q0.lo;
+#endif
+}
+
+////////// UINT32
+
+static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) {
+ if (d == 0) {
+ LIBDIVIDE_ERROR("divider must be != 0");
+ }
+
+ struct libdivide_u32_t result;
+ uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d);
+
+ // Power of 2
+ if ((d & (d - 1)) == 0) {
+ // We need to subtract 1 from the shift value in case of an unsigned
+ // branchfree divider because there is a hardcoded right shift by 1
+ // in its division algorithm. Because of this we also need to add back
+ // 1 in its recovery algorithm.
+ result.magic = 0;
+ result.more = (uint8_t)(floor_log_2_d - (branchfree != 0));
+ } else {
+ uint8_t more;
+ uint32_t rem, proposed_m;
+ proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem);
+
+ LIBDIVIDE_ASSERT(rem > 0 && rem < d);
+ const uint32_t e = d - rem;
+
+ // This power works if e < 2**floor_log_2_d.
+ if (!branchfree && (e < (1U << floor_log_2_d))) {
+ // This power works
+ more = floor_log_2_d;
+ } else {
+ // We have to use the general 33-bit algorithm. We need to compute
+ // (2**power) / d. However, we already have (2**(power-1))/d and
+ // its remainder. By doubling both, and then correcting the
+ // remainder, we can compute the larger division.
+ // don't care about overflow here - in fact, we expect it
+ proposed_m += proposed_m;
+ const uint32_t twice_rem = rem + rem;
+ if (twice_rem >= d || twice_rem < rem) proposed_m += 1;
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
+ }
+ result.magic = 1 + proposed_m;
+ result.more = more;
+ // result.more's shift should in general be ceil_log_2_d. But if we
+ // used the smaller power, we subtract one from the shift because we're
+ // using the smaller power. If we're using the larger power, we
+ // subtract one from the shift because it's taken care of by the add
+ // indicator. So floor_log_2_d happens to be correct in both cases.
+ }
+ return result;
+}
+
+struct libdivide_u32_t libdivide_u32_gen(uint32_t d) {
+ return libdivide_internal_u32_gen(d, 0);
+}
+
+struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) {
+ if (d == 1) {
+ LIBDIVIDE_ERROR("branchfree divider must be != 1");
+ }
+ struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1);
+ struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)};
+ return ret;
+}
+
+uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return numer >> more;
+ }
+ else {
+ uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ uint32_t t = ((numer - q) >> 1) + q;
+ return t >> (more & LIBDIVIDE_32_SHIFT_MASK);
+ }
+ else {
+ // All upper bits are 0,
+ // don't need to mask them off.
+ return q >> more;
+ }
+ }
+}
+
+uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) {
+ uint32_t q = libdivide_mullhi_u32(denom->magic, numer);
+ uint32_t t = ((numer - q) >> 1) + q;
+ return t >> denom->more;
+}
+
+uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+
+ if (!denom->magic) {
+ return 1U << shift;
+ } else if (!(more & LIBDIVIDE_ADD_MARKER)) {
+ // We compute q = n/d = n*m / 2^(32 + shift)
+ // Therefore we have d = 2^(32 + shift) / m
+ // We need to ceil it.
+ // We know d is not a power of 2, so m is not a power of 2,
+ // so we can just add 1 to the floor
+ uint32_t hi_dividend = 1U << shift;
+ uint32_t rem_ignored;
+ return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored);
+ } else {
+ // Here we wish to compute d = 2^(32+shift+1)/(m+2^32).
+ // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now
+ // Also note that shift may be as high as 31, so shift + 1 will
+ // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and
+ // then double the quotient and remainder.
+ uint64_t half_n = 1ULL << (32 + shift);
+ uint64_t d = (1ULL << 32) | denom->magic;
+ // Note that the quotient is guaranteed <= 32 bits, but the remainder
+ // may need 33!
+ uint32_t half_q = (uint32_t)(half_n / d);
+ uint64_t rem = half_n % d;
+ // We computed 2^(32+shift)/(m+2^32)
+ // Need to double it, and then add 1 to the quotient if doubling th
+ // remainder would increase the quotient.
+ // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits
+ uint32_t full_q = half_q + half_q + ((rem<<1) >= d);
+
+ // We rounded down in gen (hence +1)
+ return full_q + 1;
+ }
+}
+
+uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+
+ if (!denom->magic) {
+ return 1U << (shift + 1);
+ } else {
+ // Here we wish to compute d = 2^(32+shift+1)/(m+2^32).
+ // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now
+ // Also note that shift may be as high as 31, so shift + 1 will
+ // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and
+ // then double the quotient and remainder.
+ uint64_t half_n = 1ULL << (32 + shift);
+ uint64_t d = (1ULL << 32) | denom->magic;
+ // Note that the quotient is guaranteed <= 32 bits, but the remainder
+ // may need 33!
+ uint32_t half_q = (uint32_t)(half_n / d);
+ uint64_t rem = half_n % d;
+ // We computed 2^(32+shift)/(m+2^32)
+ // Need to double it, and then add 1 to the quotient if doubling th
+ // remainder would increase the quotient.
+ // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits
+ uint32_t full_q = half_q + half_q + ((rem<<1) >= d);
+
+ // We rounded down in gen (hence +1)
+ return full_q + 1;
+ }
+}
+
+/////////// UINT64
+
+static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) {
+ if (d == 0) {
+ LIBDIVIDE_ERROR("divider must be != 0");
+ }
+
+ struct libdivide_u64_t result;
+ uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d);
+
+ // Power of 2
+ if ((d & (d - 1)) == 0) {
+ // We need to subtract 1 from the shift value in case of an unsigned
+ // branchfree divider because there is a hardcoded right shift by 1
+ // in its division algorithm. Because of this we also need to add back
+ // 1 in its recovery algorithm.
+ result.magic = 0;
+ result.more = (uint8_t)(floor_log_2_d - (branchfree != 0));
+ } else {
+ uint64_t proposed_m, rem;
+ uint8_t more;
+ // (1 << (64 + floor_log_2_d)) / d
+ proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem);
+
+ LIBDIVIDE_ASSERT(rem > 0 && rem < d);
+ const uint64_t e = d - rem;
+
+ // This power works if e < 2**floor_log_2_d.
+ if (!branchfree && e < (1ULL << floor_log_2_d)) {
+ // This power works
+ more = floor_log_2_d;
+ } else {
+ // We have to use the general 65-bit algorithm. We need to compute
+ // (2**power) / d. However, we already have (2**(power-1))/d and
+ // its remainder. By doubling both, and then correcting the
+ // remainder, we can compute the larger division.
+ // don't care about overflow here - in fact, we expect it
+ proposed_m += proposed_m;
+ const uint64_t twice_rem = rem + rem;
+ if (twice_rem >= d || twice_rem < rem) proposed_m += 1;
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
+ }
+ result.magic = 1 + proposed_m;
+ result.more = more;
+ // result.more's shift should in general be ceil_log_2_d. But if we
+ // used the smaller power, we subtract one from the shift because we're
+ // using the smaller power. If we're using the larger power, we
+ // subtract one from the shift because it's taken care of by the add
+ // indicator. So floor_log_2_d happens to be correct in both cases,
+ // which is why we do it outside of the if statement.
+ }
+ return result;
+}
+
+struct libdivide_u64_t libdivide_u64_gen(uint64_t d) {
+ return libdivide_internal_u64_gen(d, 0);
+}
+
+struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) {
+ if (d == 1) {
+ LIBDIVIDE_ERROR("branchfree divider must be != 1");
+ }
+ struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1);
+ struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)};
+ return ret;
+}
+
+uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return numer >> more;
+ }
+ else {
+ uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ uint64_t t = ((numer - q) >> 1) + q;
+ return t >> (more & LIBDIVIDE_64_SHIFT_MASK);
+ }
+ else {
+ // All upper bits are 0,
+ // don't need to mask them off.
+ return q >> more;
+ }
+ }
+}
+
+uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) {
+ uint64_t q = libdivide_mullhi_u64(denom->magic, numer);
+ uint64_t t = ((numer - q) >> 1) + q;
+ return t >> denom->more;
+}
+
+uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+
+ if (!denom->magic) {
+ return 1ULL << shift;
+ } else if (!(more & LIBDIVIDE_ADD_MARKER)) {
+ // We compute q = n/d = n*m / 2^(64 + shift)
+ // Therefore we have d = 2^(64 + shift) / m
+ // We need to ceil it.
+ // We know d is not a power of 2, so m is not a power of 2,
+ // so we can just add 1 to the floor
+ uint64_t hi_dividend = 1ULL << shift;
+ uint64_t rem_ignored;
+ return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored);
+ } else {
+ // Here we wish to compute d = 2^(64+shift+1)/(m+2^64).
+ // Notice (m + 2^64) is a 65 bit number. This gets hairy. See
+ // libdivide_u32_recover for more on what we do here.
+ // TODO: do something better than 128 bit math
+
+ // Full n is a (potentially) 129 bit value
+ // half_n is a 128 bit value
+ // Compute the hi half of half_n. Low half is 0.
+ uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0;
+ // d is a 65 bit value. The high bit is always set to 1.
+ const uint64_t d_hi = 1, d_lo = denom->magic;
+ // Note that the quotient is guaranteed <= 64 bits,
+ // but the remainder may need 65!
+ uint64_t r_hi, r_lo;
+ uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo);
+ // We computed 2^(64+shift)/(m+2^64)
+ // Double the remainder ('dr') and check if that is larger than d
+ // Note that d is a 65 bit value, so r1 is small and so r1 + r1
+ // cannot overflow
+ uint64_t dr_lo = r_lo + r_lo;
+ uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry
+ int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo);
+ uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0);
+ return full_q + 1;
+ }
+}
+
+uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+
+ if (!denom->magic) {
+ return 1ULL << (shift + 1);
+ } else {
+ // Here we wish to compute d = 2^(64+shift+1)/(m+2^64).
+ // Notice (m + 2^64) is a 65 bit number. This gets hairy. See
+ // libdivide_u32_recover for more on what we do here.
+ // TODO: do something better than 128 bit math
+
+ // Full n is a (potentially) 129 bit value
+ // half_n is a 128 bit value
+ // Compute the hi half of half_n. Low half is 0.
+ uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0;
+ // d is a 65 bit value. The high bit is always set to 1.
+ const uint64_t d_hi = 1, d_lo = denom->magic;
+ // Note that the quotient is guaranteed <= 64 bits,
+ // but the remainder may need 65!
+ uint64_t r_hi, r_lo;
+ uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo);
+ // We computed 2^(64+shift)/(m+2^64)
+ // Double the remainder ('dr') and check if that is larger than d
+ // Note that d is a 65 bit value, so r1 is small and so r1 + r1
+ // cannot overflow
+ uint64_t dr_lo = r_lo + r_lo;
+ uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry
+ int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo);
+ uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0);
+ return full_q + 1;
+ }
+}
+
+/////////// SINT32
+
+static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) {
+ if (d == 0) {
+ LIBDIVIDE_ERROR("divider must be != 0");
+ }
+
+ struct libdivide_s32_t result;
+
+ // If d is a power of 2, or negative a power of 2, we have to use a shift.
+ // This is especially important because the magic algorithm fails for -1.
+ // To check if d is a power of 2 or its inverse, it suffices to check
+ // whether its absolute value has exactly one bit set. This works even for
+ // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set
+ // and is a power of 2.
+ uint32_t ud = (uint32_t)d;
+ uint32_t absD = (d < 0) ? -ud : ud;
+ uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD);
+ // check if exactly one bit is set,
+ // don't care if absD is 0 since that's divide by zero
+ if ((absD & (absD - 1)) == 0) {
+ // Branchfree and normal paths are exactly the same
+ result.magic = 0;
+ result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0);
+ } else {
+ LIBDIVIDE_ASSERT(floor_log_2_d >= 1);
+
+ uint8_t more;
+ // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word
+ // is 0 and the high word is floor_log_2_d - 1
+ uint32_t rem, proposed_m;
+ proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem);
+ const uint32_t e = absD - rem;
+
+ // We are going to start with a power of floor_log_2_d - 1.
+ // This works if works if e < 2**floor_log_2_d.
+ if (!branchfree && e < (1U << floor_log_2_d)) {
+ // This power works
+ more = floor_log_2_d - 1;
+ } else {
+ // We need to go one higher. This should not make proposed_m
+ // overflow, but it will make it negative when interpreted as an
+ // int32_t.
+ proposed_m += proposed_m;
+ const uint32_t twice_rem = rem + rem;
+ if (twice_rem >= absD || twice_rem < rem) proposed_m += 1;
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
+ }
+
+ proposed_m += 1;
+ int32_t magic = (int32_t)proposed_m;
+
+ // Mark if we are negative. Note we only negate the magic number in the
+ // branchfull case.
+ if (d < 0) {
+ more |= LIBDIVIDE_NEGATIVE_DIVISOR;
+ if (!branchfree) {
+ magic = -magic;
+ }
+ }
+
+ result.more = more;
+ result.magic = magic;
+ }
+ return result;
+}
+
+struct libdivide_s32_t libdivide_s32_gen(int32_t d) {
+ return libdivide_internal_s32_gen(d, 0);
+}
+
+struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) {
+ struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1);
+ struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more};
+ return result;
+}
+
+int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+
+ if (!denom->magic) {
+ uint32_t sign = (int8_t)more >> 7;
+ uint32_t mask = (1U << shift) - 1;
+ uint32_t uq = numer + ((numer >> 31) & mask);
+ int32_t q = (int32_t)uq;
+ q >>= shift;
+ q = (q ^ sign) - sign;
+ return q;
+ } else {
+ uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer);
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift and then sign extend
+ int32_t sign = (int8_t)more >> 7;
+ // q += (more < 0 ? -numer : numer)
+ // cast required to avoid UB
+ uq += ((uint32_t)numer ^ sign) - sign;
+ }
+ int32_t q = (int32_t)uq;
+ q >>= shift;
+ q += (q < 0);
+ return q;
+ }
+}
+
+int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ // must be arithmetic shift and then sign extend
+ int32_t sign = (int8_t)more >> 7;
+ int32_t magic = denom->magic;
+ int32_t q = libdivide_mullhi_s32(magic, numer);
+ q += numer;
+
+ // If q is non-negative, we have nothing to do
+ // If q is negative, we want to add either (2**shift)-1 if d is a power of
+ // 2, or (2**shift) if it is not a power of 2
+ uint32_t is_power_of_2 = (magic == 0);
+ uint32_t q_sign = (uint32_t)(q >> 31);
+ q += q_sign & ((1U << shift) - is_power_of_2);
+
+ // Now arithmetic right shift
+ q >>= shift;
+ // Negate if needed
+ q = (q ^ sign) - sign;
+
+ return q;
+}
+
+int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ if (!denom->magic) {
+ uint32_t absD = 1U << shift;
+ if (more & LIBDIVIDE_NEGATIVE_DIVISOR) {
+ absD = -absD;
+ }
+ return (int32_t)absD;
+ } else {
+ // Unsigned math is much easier
+ // We negate the magic number only in the branchfull case, and we don't
+ // know which case we're in. However we have enough information to
+ // determine the correct sign of the magic number. The divisor was
+ // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set,
+ // the magic number's sign is opposite that of the divisor.
+ // We want to compute the positive magic number.
+ int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR);
+ int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER)
+ ? denom->magic > 0 : denom->magic < 0;
+
+ // Handle the power of 2 case (including branchfree)
+ if (denom->magic == 0) {
+ int32_t result = 1U << shift;
+ return negative_divisor ? -result : result;
+ }
+
+ uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic);
+ uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30
+ uint32_t q = (uint32_t)(n / d);
+ int32_t result = (int32_t)q;
+ result += 1;
+ return negative_divisor ? -result : result;
+ }
+}
+
+int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) {
+ return libdivide_s32_recover((const struct libdivide_s32_t *)denom);
+}
+
+///////////// SINT64
+
+static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) {
+ if (d == 0) {
+ LIBDIVIDE_ERROR("divider must be != 0");
+ }
+
+ struct libdivide_s64_t result;
+
+ // If d is a power of 2, or negative a power of 2, we have to use a shift.
+ // This is especially important because the magic algorithm fails for -1.
+ // To check if d is a power of 2 or its inverse, it suffices to check
+ // whether its absolute value has exactly one bit set. This works even for
+ // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set
+ // and is a power of 2.
+ uint64_t ud = (uint64_t)d;
+ uint64_t absD = (d < 0) ? -ud : ud;
+ uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD);
+ // check if exactly one bit is set,
+ // don't care if absD is 0 since that's divide by zero
+ if ((absD & (absD - 1)) == 0) {
+ // Branchfree and non-branchfree cases are the same
+ result.magic = 0;
+ result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0);
+ } else {
+ // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word
+ // is 0 and the high word is floor_log_2_d - 1
+ uint8_t more;
+ uint64_t rem, proposed_m;
+ proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem);
+ const uint64_t e = absD - rem;
+
+ // We are going to start with a power of floor_log_2_d - 1.
+ // This works if works if e < 2**floor_log_2_d.
+ if (!branchfree && e < (1ULL << floor_log_2_d)) {
+ // This power works
+ more = floor_log_2_d - 1;
+ } else {
+ // We need to go one higher. This should not make proposed_m
+ // overflow, but it will make it negative when interpreted as an
+ // int32_t.
+ proposed_m += proposed_m;
+ const uint64_t twice_rem = rem + rem;
+ if (twice_rem >= absD || twice_rem < rem) proposed_m += 1;
+ // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we
+ // also set ADD_MARKER this is an annoying optimization that
+ // enables algorithm #4 to avoid the mask. However we always set it
+ // in the branchfree case
+ more = floor_log_2_d | LIBDIVIDE_ADD_MARKER;
+ }
+ proposed_m += 1;
+ int64_t magic = (int64_t)proposed_m;
+
+ // Mark if we are negative
+ if (d < 0) {
+ more |= LIBDIVIDE_NEGATIVE_DIVISOR;
+ if (!branchfree) {
+ magic = -magic;
+ }
+ }
+
+ result.more = more;
+ result.magic = magic;
+ }
+ return result;
+}
+
+struct libdivide_s64_t libdivide_s64_gen(int64_t d) {
+ return libdivide_internal_s64_gen(d, 0);
+}
+
+struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) {
+ struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1);
+ struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more};
+ return ret;
+}
+
+int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+
+ if (!denom->magic) { // shift path
+ uint64_t mask = (1ULL << shift) - 1;
+ uint64_t uq = numer + ((numer >> 63) & mask);
+ int64_t q = (int64_t)uq;
+ q >>= shift;
+ // must be arithmetic shift and then sign-extend
+ int64_t sign = (int8_t)more >> 7;
+ q = (q ^ sign) - sign;
+ return q;
+ } else {
+ uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer);
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift and then sign extend
+ int64_t sign = (int8_t)more >> 7;
+ // q += (more < 0 ? -numer : numer)
+ // cast required to avoid UB
+ uq += ((uint64_t)numer ^ sign) - sign;
+ }
+ int64_t q = (int64_t)uq;
+ q >>= shift;
+ q += (q < 0);
+ return q;
+ }
+}
+
+int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ // must be arithmetic shift and then sign extend
+ int64_t sign = (int8_t)more >> 7;
+ int64_t magic = denom->magic;
+ int64_t q = libdivide_mullhi_s64(magic, numer);
+ q += numer;
+
+ // If q is non-negative, we have nothing to do.
+ // If q is negative, we want to add either (2**shift)-1 if d is a power of
+ // 2, or (2**shift) if it is not a power of 2.
+ uint64_t is_power_of_2 = (magic == 0);
+ uint64_t q_sign = (uint64_t)(q >> 63);
+ q += q_sign & ((1ULL << shift) - is_power_of_2);
+
+ // Arithmetic right shift
+ q >>= shift;
+ // Negate if needed
+ q = (q ^ sign) - sign;
+
+ return q;
+}
+
+int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) {
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ if (denom->magic == 0) { // shift path
+ uint64_t absD = 1ULL << shift;
+ if (more & LIBDIVIDE_NEGATIVE_DIVISOR) {
+ absD = -absD;
+ }
+ return (int64_t)absD;
+ } else {
+ // Unsigned math is much easier
+ int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR);
+ int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER)
+ ? denom->magic > 0 : denom->magic < 0;
+
+ uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic);
+ uint64_t n_hi = 1ULL << shift, n_lo = 0;
+ uint64_t rem_ignored;
+ uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored);
+ int64_t result = (int64_t)(q + 1);
+ if (negative_divisor) {
+ result = -result;
+ }
+ return result;
+ }
+}
+
+int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) {
+ return libdivide_s64_recover((const struct libdivide_s64_t *)denom);
+}
+
+#if defined(LIBDIVIDE_AVX512)
+
+static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom);
+static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom);
+static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom);
+static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom);
+
+static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom);
+static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom);
+static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom);
+static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom);
+
+//////// Internal Utility Functions
+
+static inline __m512i libdivide_s64_signbits(__m512i v) {;
+ return _mm512_srai_epi64(v, 63);
+}
+
+static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) {
+ return _mm512_srai_epi64(v, amt);
+}
+
+// Here, b is assumed to contain one 32-bit value repeated.
+static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) {
+ __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32);
+ __m512i a1X3X = _mm512_srli_epi64(a, 32);
+ __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0);
+ __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask);
+ return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// b is one 32-bit value repeated.
+static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) {
+ __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32);
+ __m512i a1X3X = _mm512_srli_epi64(a, 32);
+ __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0);
+ __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask);
+ return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// Here, y is assumed to contain one 64-bit value repeated.
+// https://stackoverflow.com/a/28827013
+static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) {
+ __m512i lomask = _mm512_set1_epi64(0xffffffff);
+ __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1);
+ __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1);
+ __m512i w0 = _mm512_mul_epu32(x, y);
+ __m512i w1 = _mm512_mul_epu32(x, yh);
+ __m512i w2 = _mm512_mul_epu32(xh, y);
+ __m512i w3 = _mm512_mul_epu32(xh, yh);
+ __m512i w0h = _mm512_srli_epi64(w0, 32);
+ __m512i s1 = _mm512_add_epi64(w1, w0h);
+ __m512i s1l = _mm512_and_si512(s1, lomask);
+ __m512i s1h = _mm512_srli_epi64(s1, 32);
+ __m512i s2 = _mm512_add_epi64(w2, s1l);
+ __m512i s2h = _mm512_srli_epi64(s2, 32);
+ __m512i hi = _mm512_add_epi64(w3, s1h);
+ hi = _mm512_add_epi64(hi, s2h);
+
+ return hi;
+}
+
+// y is one 64-bit value repeated.
+static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) {
+ __m512i p = libdivide_mullhi_u64_vector(x, y);
+ __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y);
+ __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x);
+ p = _mm512_sub_epi64(p, t1);
+ p = _mm512_sub_epi64(p, t2);
+ return p;
+}
+
+////////// UINT32
+
+__m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return _mm512_srli_epi32(numers, more);
+ }
+ else {
+ __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // uint32_t t = ((numer - q) >> 1) + q;
+ // return t >> denom->shift;
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q);
+ return _mm512_srli_epi32(t, shift);
+ }
+ else {
+ return _mm512_srli_epi32(q, more);
+ }
+ }
+}
+
+__m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) {
+ __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic));
+ __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q);
+ return _mm512_srli_epi32(t, denom->more);
+}
+
+////////// UINT64
+
+__m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return _mm512_srli_epi64(numers, more);
+ }
+ else {
+ __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // uint32_t t = ((numer - q) >> 1) + q;
+ // return t >> denom->shift;
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q);
+ return _mm512_srli_epi64(t, shift);
+ }
+ else {
+ return _mm512_srli_epi64(q, more);
+ }
+ }
+}
+
+__m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) {
+ __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic));
+ __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q);
+ return _mm512_srli_epi64(t, denom->more);
+}
+
+////////// SINT32
+
+__m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ uint32_t mask = (1U << shift) - 1;
+ __m512i roundToZeroTweak = _mm512_set1_epi32(mask);
+ // q = numer + ((numer >> 31) & roundToZeroTweak);
+ __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak));
+ q = _mm512_srai_epi32(q, shift);
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+ // q = (q ^ sign) - sign;
+ q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign);
+ return q;
+ }
+ else {
+ __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+ // q += ((numer ^ sign) - sign);
+ q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign));
+ }
+ // q >>= shift
+ q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
+ q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0)
+ return q;
+ }
+}
+
+__m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) {
+ int32_t magic = denom->magic;
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ // must be arithmetic shift
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+ __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic));
+ q = _mm512_add_epi32(q, numers); // q += numers
+
+ // If q is non-negative, we have nothing to do
+ // If q is negative, we want to add either (2**shift)-1 if d is
+ // a power of 2, or (2**shift) if it is not a power of 2
+ uint32_t is_power_of_2 = (magic == 0);
+ __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31
+ __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2);
+ q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask)
+ q = _mm512_srai_epi32(q, shift); // q >>= shift
+ q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign
+ return q;
+}
+
+////////// SINT64
+
+__m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) {
+ uint8_t more = denom->more;
+ int64_t magic = denom->magic;
+ if (magic == 0) { // shift path
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ uint64_t mask = (1ULL << shift) - 1;
+ __m512i roundToZeroTweak = _mm512_set1_epi64(mask);
+ // q = numer + ((numer >> 63) & roundToZeroTweak);
+ __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak));
+ q = libdivide_s64_shift_right_vector(q, shift);
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+ // q = (q ^ sign) - sign;
+ q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign);
+ return q;
+ }
+ else {
+ __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+ // q += ((numer ^ sign) - sign);
+ q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign));
+ }
+ // q >>= denom->mult_path.shift
+ q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
+ q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0)
+ return q;
+ }
+}
+
+__m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) {
+ int64_t magic = denom->magic;
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ // must be arithmetic shift
+ __m512i sign = _mm512_set1_epi32((int8_t)more >> 7);
+
+ // libdivide_mullhi_s64(numers, magic);
+ __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic));
+ q = _mm512_add_epi64(q, numers); // q += numers
+
+ // If q is non-negative, we have nothing to do.
+ // If q is negative, we want to add either (2**shift)-1 if d is
+ // a power of 2, or (2**shift) if it is not a power of 2.
+ uint32_t is_power_of_2 = (magic == 0);
+ __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
+ __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2);
+ q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask)
+ q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
+ q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign
+ return q;
+}
+
+#elif defined(LIBDIVIDE_AVX2)
+
+static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom);
+static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom);
+static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom);
+static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom);
+
+static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom);
+static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom);
+static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom);
+static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom);
+
+//////// Internal Utility Functions
+
+// Implementation of _mm256_srai_epi64(v, 63) (from AVX512).
+static inline __m256i libdivide_s64_signbits(__m256i v) {
+ __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1));
+ __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31);
+ return signBits;
+}
+
+// Implementation of _mm256_srai_epi64 (from AVX512).
+static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) {
+ const int b = 64 - amt;
+ __m256i m = _mm256_set1_epi64x(1ULL << (b - 1));
+ __m256i x = _mm256_srli_epi64(v, amt);
+ __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m);
+ return result;
+}
+
+// Here, b is assumed to contain one 32-bit value repeated.
+static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) {
+ __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32);
+ __m256i a1X3X = _mm256_srli_epi64(a, 32);
+ __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0);
+ __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask);
+ return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// b is one 32-bit value repeated.
+static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) {
+ __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32);
+ __m256i a1X3X = _mm256_srli_epi64(a, 32);
+ __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0);
+ __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask);
+ return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// Here, y is assumed to contain one 64-bit value repeated.
+// https://stackoverflow.com/a/28827013
+static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) {
+ __m256i lomask = _mm256_set1_epi64x(0xffffffff);
+ __m256i xh = _mm256_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h
+ __m256i yh = _mm256_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h
+ __m256i w0 = _mm256_mul_epu32(x, y); // x0l*y0l, x1l*y1l
+ __m256i w1 = _mm256_mul_epu32(x, yh); // x0l*y0h, x1l*y1h
+ __m256i w2 = _mm256_mul_epu32(xh, y); // x0h*y0l, x1h*y0l
+ __m256i w3 = _mm256_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h
+ __m256i w0h = _mm256_srli_epi64(w0, 32);
+ __m256i s1 = _mm256_add_epi64(w1, w0h);
+ __m256i s1l = _mm256_and_si256(s1, lomask);
+ __m256i s1h = _mm256_srli_epi64(s1, 32);
+ __m256i s2 = _mm256_add_epi64(w2, s1l);
+ __m256i s2h = _mm256_srli_epi64(s2, 32);
+ __m256i hi = _mm256_add_epi64(w3, s1h);
+ hi = _mm256_add_epi64(hi, s2h);
+
+ return hi;
+}
+
+// y is one 64-bit value repeated.
+static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) {
+ __m256i p = libdivide_mullhi_u64_vector(x, y);
+ __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y);
+ __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x);
+ p = _mm256_sub_epi64(p, t1);
+ p = _mm256_sub_epi64(p, t2);
+ return p;
+}
+
+////////// UINT32
+
+__m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return _mm256_srli_epi32(numers, more);
+ }
+ else {
+ __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // uint32_t t = ((numer - q) >> 1) + q;
+ // return t >> denom->shift;
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q);
+ return _mm256_srli_epi32(t, shift);
+ }
+ else {
+ return _mm256_srli_epi32(q, more);
+ }
+ }
+}
+
+__m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) {
+ __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic));
+ __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q);
+ return _mm256_srli_epi32(t, denom->more);
+}
+
+////////// UINT64
+
+__m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return _mm256_srli_epi64(numers, more);
+ }
+ else {
+ __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // uint32_t t = ((numer - q) >> 1) + q;
+ // return t >> denom->shift;
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q);
+ return _mm256_srli_epi64(t, shift);
+ }
+ else {
+ return _mm256_srli_epi64(q, more);
+ }
+ }
+}
+
+__m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) {
+ __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic));
+ __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q);
+ return _mm256_srli_epi64(t, denom->more);
+}
+
+////////// SINT32
+
+__m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ uint32_t mask = (1U << shift) - 1;
+ __m256i roundToZeroTweak = _mm256_set1_epi32(mask);
+ // q = numer + ((numer >> 31) & roundToZeroTweak);
+ __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak));
+ q = _mm256_srai_epi32(q, shift);
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+ // q = (q ^ sign) - sign;
+ q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign);
+ return q;
+ }
+ else {
+ __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+ // q += ((numer ^ sign) - sign);
+ q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign));
+ }
+ // q >>= shift
+ q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
+ q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0)
+ return q;
+ }
+}
+
+__m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) {
+ int32_t magic = denom->magic;
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ // must be arithmetic shift
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+ __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic));
+ q = _mm256_add_epi32(q, numers); // q += numers
+
+ // If q is non-negative, we have nothing to do
+ // If q is negative, we want to add either (2**shift)-1 if d is
+ // a power of 2, or (2**shift) if it is not a power of 2
+ uint32_t is_power_of_2 = (magic == 0);
+ __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31
+ __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2);
+ q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask)
+ q = _mm256_srai_epi32(q, shift); // q >>= shift
+ q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign
+ return q;
+}
+
+////////// SINT64
+
+__m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) {
+ uint8_t more = denom->more;
+ int64_t magic = denom->magic;
+ if (magic == 0) { // shift path
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ uint64_t mask = (1ULL << shift) - 1;
+ __m256i roundToZeroTweak = _mm256_set1_epi64x(mask);
+ // q = numer + ((numer >> 63) & roundToZeroTweak);
+ __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak));
+ q = libdivide_s64_shift_right_vector(q, shift);
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+ // q = (q ^ sign) - sign;
+ q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign);
+ return q;
+ }
+ else {
+ __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+ // q += ((numer ^ sign) - sign);
+ q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign));
+ }
+ // q >>= denom->mult_path.shift
+ q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
+ q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0)
+ return q;
+ }
+}
+
+__m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) {
+ int64_t magic = denom->magic;
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ // must be arithmetic shift
+ __m256i sign = _mm256_set1_epi32((int8_t)more >> 7);
+
+ // libdivide_mullhi_s64(numers, magic);
+ __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic));
+ q = _mm256_add_epi64(q, numers); // q += numers
+
+ // If q is non-negative, we have nothing to do.
+ // If q is negative, we want to add either (2**shift)-1 if d is
+ // a power of 2, or (2**shift) if it is not a power of 2.
+ uint32_t is_power_of_2 = (magic == 0);
+ __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
+ __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2);
+ q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask)
+ q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
+ q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign
+ return q;
+}
+
+#elif defined(LIBDIVIDE_SSE2)
+
+static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom);
+static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom);
+static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom);
+static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom);
+
+static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom);
+static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom);
+static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom);
+static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom);
+
+//////// Internal Utility Functions
+
+// Implementation of _mm_srai_epi64(v, 63) (from AVX512).
+static inline __m128i libdivide_s64_signbits(__m128i v) {
+ __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1));
+ __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31);
+ return signBits;
+}
+
+// Implementation of _mm_srai_epi64 (from AVX512).
+static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) {
+ const int b = 64 - amt;
+ __m128i m = _mm_set1_epi64x(1ULL << (b - 1));
+ __m128i x = _mm_srli_epi64(v, amt);
+ __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m);
+ return result;
+}
+
+// Here, b is assumed to contain one 32-bit value repeated.
+static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) {
+ __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32);
+ __m128i a1X3X = _mm_srli_epi64(a, 32);
+ __m128i mask = _mm_set_epi32(-1, 0, -1, 0);
+ __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask);
+ return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3);
+}
+
+// SSE2 does not have a signed multiplication instruction, but we can convert
+// unsigned to signed pretty efficiently. Again, b is just a 32 bit value
+// repeated four times.
+static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) {
+ __m128i p = libdivide_mullhi_u32_vector(a, b);
+ // t1 = (a >> 31) & y, arithmetic shift
+ __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b);
+ __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a);
+ p = _mm_sub_epi32(p, t1);
+ p = _mm_sub_epi32(p, t2);
+ return p;
+}
+
+// Here, y is assumed to contain one 64-bit value repeated.
+// https://stackoverflow.com/a/28827013
+static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) {
+ __m128i lomask = _mm_set1_epi64x(0xffffffff);
+ __m128i xh = _mm_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h
+ __m128i yh = _mm_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h
+ __m128i w0 = _mm_mul_epu32(x, y); // x0l*y0l, x1l*y1l
+ __m128i w1 = _mm_mul_epu32(x, yh); // x0l*y0h, x1l*y1h
+ __m128i w2 = _mm_mul_epu32(xh, y); // x0h*y0l, x1h*y0l
+ __m128i w3 = _mm_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h
+ __m128i w0h = _mm_srli_epi64(w0, 32);
+ __m128i s1 = _mm_add_epi64(w1, w0h);
+ __m128i s1l = _mm_and_si128(s1, lomask);
+ __m128i s1h = _mm_srli_epi64(s1, 32);
+ __m128i s2 = _mm_add_epi64(w2, s1l);
+ __m128i s2h = _mm_srli_epi64(s2, 32);
+ __m128i hi = _mm_add_epi64(w3, s1h);
+ hi = _mm_add_epi64(hi, s2h);
+
+ return hi;
+}
+
+// y is one 64-bit value repeated.
+static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) {
+ __m128i p = libdivide_mullhi_u64_vector(x, y);
+ __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y);
+ __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x);
+ p = _mm_sub_epi64(p, t1);
+ p = _mm_sub_epi64(p, t2);
+ return p;
+}
+
+////////// UINT32
+
+__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return _mm_srli_epi32(numers, more);
+ }
+ else {
+ __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // uint32_t t = ((numer - q) >> 1) + q;
+ // return t >> denom->shift;
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q);
+ return _mm_srli_epi32(t, shift);
+ }
+ else {
+ return _mm_srli_epi32(q, more);
+ }
+ }
+}
+
+__m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) {
+ __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic));
+ __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q);
+ return _mm_srli_epi32(t, denom->more);
+}
+
+////////// UINT64
+
+__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ return _mm_srli_epi64(numers, more);
+ }
+ else {
+ __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // uint32_t t = ((numer - q) >> 1) + q;
+ // return t >> denom->shift;
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q);
+ return _mm_srli_epi64(t, shift);
+ }
+ else {
+ return _mm_srli_epi64(q, more);
+ }
+ }
+}
+
+__m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) {
+ __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic));
+ __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q);
+ return _mm_srli_epi64(t, denom->more);
+}
+
+////////// SINT32
+
+__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) {
+ uint8_t more = denom->more;
+ if (!denom->magic) {
+ uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ uint32_t mask = (1U << shift) - 1;
+ __m128i roundToZeroTweak = _mm_set1_epi32(mask);
+ // q = numer + ((numer >> 31) & roundToZeroTweak);
+ __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak));
+ q = _mm_srai_epi32(q, shift);
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+ // q = (q ^ sign) - sign;
+ q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign);
+ return q;
+ }
+ else {
+ __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+ // q += ((numer ^ sign) - sign);
+ q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign));
+ }
+ // q >>= shift
+ q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK);
+ q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0)
+ return q;
+ }
+}
+
+__m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) {
+ int32_t magic = denom->magic;
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK;
+ // must be arithmetic shift
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+ __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic));
+ q = _mm_add_epi32(q, numers); // q += numers
+
+ // If q is non-negative, we have nothing to do
+ // If q is negative, we want to add either (2**shift)-1 if d is
+ // a power of 2, or (2**shift) if it is not a power of 2
+ uint32_t is_power_of_2 = (magic == 0);
+ __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31
+ __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2);
+ q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask)
+ q = _mm_srai_epi32(q, shift); // q >>= shift
+ q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign
+ return q;
+}
+
+////////// SINT64
+
+__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) {
+ uint8_t more = denom->more;
+ int64_t magic = denom->magic;
+ if (magic == 0) { // shift path
+ uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ uint64_t mask = (1ULL << shift) - 1;
+ __m128i roundToZeroTweak = _mm_set1_epi64x(mask);
+ // q = numer + ((numer >> 63) & roundToZeroTweak);
+ __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak));
+ q = libdivide_s64_shift_right_vector(q, shift);
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+ // q = (q ^ sign) - sign;
+ q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign);
+ return q;
+ }
+ else {
+ __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic));
+ if (more & LIBDIVIDE_ADD_MARKER) {
+ // must be arithmetic shift
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+ // q += ((numer ^ sign) - sign);
+ q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign));
+ }
+ // q >>= denom->mult_path.shift
+ q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK);
+ q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0)
+ return q;
+ }
+}
+
+__m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) {
+ int64_t magic = denom->magic;
+ uint8_t more = denom->more;
+ uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK;
+ // must be arithmetic shift
+ __m128i sign = _mm_set1_epi32((int8_t)more >> 7);
+
+ // libdivide_mullhi_s64(numers, magic);
+ __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic));
+ q = _mm_add_epi64(q, numers); // q += numers
+
+ // If q is non-negative, we have nothing to do.
+ // If q is negative, we want to add either (2**shift)-1 if d is
+ // a power of 2, or (2**shift) if it is not a power of 2.
+ uint32_t is_power_of_2 = (magic == 0);
+ __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63
+ __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2);
+ q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask)
+ q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift
+ q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign
+ return q;
+}
+
+#endif
+
+/////////// C++ stuff
+
+#ifdef __cplusplus
+
+// The C++ divider class is templated on both an integer type
+// (like uint64_t) and an algorithm type.
+// * BRANCHFULL is the default algorithm type.
+// * BRANCHFREE is the branchfree algorithm type.
+enum {
+ BRANCHFULL,
+ BRANCHFREE
+};
+
+#if defined(LIBDIVIDE_AVX512)
+ #define LIBDIVIDE_VECTOR_TYPE __m512i
+#elif defined(LIBDIVIDE_AVX2)
+ #define LIBDIVIDE_VECTOR_TYPE __m256i
+#elif defined(LIBDIVIDE_SSE2)
+ #define LIBDIVIDE_VECTOR_TYPE __m128i
+#endif
+
+#if !defined(LIBDIVIDE_VECTOR_TYPE)
+ #define LIBDIVIDE_DIVIDE_VECTOR(ALGO)
+#else
+ #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \
+ LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \
+ return libdivide_##ALGO##_do_vector(n, &denom); \
+ }
+#endif
+
+// The DISPATCHER_GEN() macro generates C++ methods (for the given integer
+// and algorithm types) that redirect to libdivide's C API.
+#define DISPATCHER_GEN(T, ALGO) \
+ libdivide_##ALGO##_t denom; \
+ dispatcher() { } \
+ dispatcher(T d) \
+ : denom(libdivide_##ALGO##_gen(d)) \
+ { } \
+ T divide(T n) const { \
+ return libdivide_##ALGO##_do(n, &denom); \
+ } \
+ LIBDIVIDE_DIVIDE_VECTOR(ALGO) \
+ T recover() const { \
+ return libdivide_##ALGO##_recover(&denom); \
+ }
+
+// The dispatcher selects a specific division algorithm for a given
+// type and ALGO using partial template specialization.
+template<bool IS_INTEGRAL, bool IS_SIGNED, int SIZEOF, int ALGO> struct dispatcher { };
+
+template<> struct dispatcher<true, true, sizeof(int32_t), BRANCHFULL> { DISPATCHER_GEN(int32_t, s32) };
+template<> struct dispatcher<true, true, sizeof(int32_t), BRANCHFREE> { DISPATCHER_GEN(int32_t, s32_branchfree) };
+template<> struct dispatcher<true, false, sizeof(uint32_t), BRANCHFULL> { DISPATCHER_GEN(uint32_t, u32) };
+template<> struct dispatcher<true, false, sizeof(uint32_t), BRANCHFREE> { DISPATCHER_GEN(uint32_t, u32_branchfree) };
+template<> struct dispatcher<true, true, sizeof(int64_t), BRANCHFULL> { DISPATCHER_GEN(int64_t, s64) };
+template<> struct dispatcher<true, true, sizeof(int64_t), BRANCHFREE> { DISPATCHER_GEN(int64_t, s64_branchfree) };
+template<> struct dispatcher<true, false, sizeof(uint64_t), BRANCHFULL> { DISPATCHER_GEN(uint64_t, u64) };
+template<> struct dispatcher<true, false, sizeof(uint64_t), BRANCHFREE> { DISPATCHER_GEN(uint64_t, u64_branchfree) };
+
+// This is the main divider class for use by the user (C++ API).
+// The actual division algorithm is selected using the dispatcher struct
+// based on the integer and algorithm template parameters.
+template<typename T, int ALGO = BRANCHFULL>
+class divider {
+public:
+ // We leave the default constructor empty so that creating
+ // an array of dividers and then initializing them
+ // later doesn't slow us down.
+ divider() { }
+
+ // Constructor that takes the divisor as a parameter
+ divider(T d) : div(d) { }
+
+ // Divides n by the divisor
+ T divide(T n) const {
+ return div.divide(n);
+ }
+
+ // Recovers the divisor, returns the value that was
+ // used to initialize this divider object.
+ T recover() const {
+ return div.recover();
+ }
+
+ bool operator==(const divider<T, ALGO>& other) const {
+ return div.denom.magic == other.denom.magic &&
+ div.denom.more == other.denom.more;
+ }
+
+ bool operator!=(const divider<T, ALGO>& other) const {
+ return !(*this == other);
+ }
+
+#if defined(LIBDIVIDE_VECTOR_TYPE)
+ // Treats the vector as packed integer values with the same type as
+ // the divider (e.g. s32, u32, s64, u64) and divides each of
+ // them by the divider, returning the packed quotients.
+ LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const {
+ return div.divide(n);
+ }
+#endif
+
+private:
+ // Storage for the actual divisor
+ dispatcher<std::is_integral<T>::value,
+ std::is_signed<T>::value, sizeof(T), ALGO> div;
+};
+
+// Overload of operator / for scalar division
+template<typename T, int ALGO>
+T operator/(T n, const divider<T, ALGO>& div) {
+ return div.divide(n);
+}
+
+// Overload of operator /= for scalar division
+template<typename T, int ALGO>
+T& operator/=(T& n, const divider<T, ALGO>& div) {
+ n = div.divide(n);
+ return n;
+}
+
+#if defined(LIBDIVIDE_VECTOR_TYPE)
+ // Overload of operator / for vector division
+ template<typename T, int ALGO>
+ LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider<T, ALGO>& div) {
+ return div.divide(n);
+ }
+ // Overload of operator /= for vector division
+ template<typename T, int ALGO>
+ LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider<T, ALGO>& div) {
+ n = div.divide(n);
+ return n;
+ }
+#endif
+
+// libdivdie::branchfree_divider<T>
+template <typename T>
+using branchfree_divider = divider<T, BRANCHFREE>;
+
+} // namespace libdivide
+
+#endif // __cplusplus
+
+#endif // NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/multiarray_api.txt b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/multiarray_api.txt
new file mode 100644
index 00000000..3f56b437
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/multiarray_api.txt
@@ -0,0 +1,2501 @@
+
+===========
+NumPy C-API
+===========
+::
+
+ unsigned int
+ PyArray_GetNDArrayCVersion(void )
+
+
+Included at the very first so not auto-grabbed and thus not labeled.
+
+::
+
+ int
+ PyArray_SetNumericOps(PyObject *dict)
+
+Set internal structure with number functions that all arrays will use
+
+::
+
+ PyObject *
+ PyArray_GetNumericOps(void )
+
+Get dictionary showing number functions that all arrays will use
+
+::
+
+ int
+ PyArray_INCREF(PyArrayObject *mp)
+
+For object arrays, increment all internal references.
+
+::
+
+ int
+ PyArray_XDECREF(PyArrayObject *mp)
+
+Decrement all internal references for object arrays.
+(or arrays with object fields)
+
+::
+
+ void
+ PyArray_SetStringFunction(PyObject *op, int repr)
+
+Set the array print function to be a Python function.
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrFromType(int type)
+
+Get the PyArray_Descr structure for a type.
+
+::
+
+ PyObject *
+ PyArray_TypeObjectFromType(int type)
+
+Get a typeobject from a type-number -- can return NULL.
+
+New reference
+
+::
+
+ char *
+ PyArray_Zero(PyArrayObject *arr)
+
+Get pointer to zero of correct type for array.
+
+::
+
+ char *
+ PyArray_One(PyArrayObject *arr)
+
+Get pointer to one of correct type for array
+
+::
+
+ PyObject *
+ PyArray_CastToType(PyArrayObject *arr, PyArray_Descr *dtype, int
+ is_f_order)
+
+For backward compatibility
+
+Cast an array using typecode structure.
+steals reference to dtype --- cannot be NULL
+
+This function always makes a copy of arr, even if the dtype
+doesn't change.
+
+::
+
+ int
+ PyArray_CastTo(PyArrayObject *out, PyArrayObject *mp)
+
+Cast to an already created array.
+
+::
+
+ int
+ PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp)
+
+Cast to an already created array. Arrays don't have to be "broadcastable"
+Only requirement is they have the same number of elements.
+
+::
+
+ int
+ PyArray_CanCastSafely(int fromtype, int totype)
+
+Check the type coercion rules.
+
+::
+
+ npy_bool
+ PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to)
+
+leaves reference count alone --- cannot be NULL
+
+PyArray_CanCastTypeTo is equivalent to this, but adds a 'casting'
+parameter.
+
+::
+
+ int
+ PyArray_ObjectType(PyObject *op, int minimum_type)
+
+Return the typecode of the array a Python object would be converted to
+
+Returns the type number the result should have, or NPY_NOTYPE on error.
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype)
+
+new reference -- accepts NULL for mintype
+
+::
+
+ PyArrayObject **
+ PyArray_ConvertToCommonType(PyObject *op, int *retn)
+
+
+This function is only used in one place within NumPy and should
+generally be avoided. It is provided mainly for backward compatibility.
+
+The user of the function has to free the returned array with PyDataMem_FREE.
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrFromScalar(PyObject *sc)
+
+Return descr object from array scalar.
+
+New reference
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrFromTypeObject(PyObject *type)
+
+
+::
+
+ npy_intp
+ PyArray_Size(PyObject *op)
+
+Compute the size of an array (in number of items)
+
+::
+
+ PyObject *
+ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
+
+Get scalar-equivalent to a region of memory described by a descriptor.
+
+::
+
+ PyObject *
+ PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode)
+
+Get 0-dim array from scalar
+
+0-dim array from array-scalar object
+always contains a copy of the data
+unless outcode is NULL, it is of void type and the referrer does
+not own it either.
+
+steals reference to outcode
+
+::
+
+ void
+ PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr)
+
+Convert to c-type
+
+no error checking is performed -- ctypeptr must be same type as scalar
+in case of flexible type, the data is not copied
+into ctypeptr which is expected to be a pointer to pointer
+
+::
+
+ int
+ PyArray_CastScalarToCtype(PyObject *scalar, void
+ *ctypeptr, PyArray_Descr *outcode)
+
+Cast Scalar to c-type
+
+The output buffer must be large-enough to receive the value
+Even for flexible types which is different from ScalarAsCtype
+where only a reference for flexible types is returned
+
+This may not work right on narrow builds for NumPy unicode scalars.
+
+::
+
+ int
+ PyArray_CastScalarDirect(PyObject *scalar, PyArray_Descr
+ *indescr, void *ctypeptr, int outtype)
+
+Cast Scalar to c-type
+
+::
+
+ PyObject *
+ PyArray_ScalarFromObject(PyObject *object)
+
+Get an Array Scalar From a Python Object
+
+Returns NULL if unsuccessful but error is only set if another error occurred.
+Currently only Numeric-like object supported.
+
+::
+
+ PyArray_VectorUnaryFunc *
+ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num)
+
+Get a cast function to cast from the input descriptor to the
+output type_number (must be a registered data-type).
+Returns NULL if un-successful.
+
+::
+
+ PyObject *
+ PyArray_FromDims(int NPY_UNUSED(nd) , int *NPY_UNUSED(d) , int
+ NPY_UNUSED(type) )
+
+Deprecated, use PyArray_SimpleNew instead.
+
+::
+
+ PyObject *
+ PyArray_FromDimsAndDataAndDescr(int NPY_UNUSED(nd) , int
+ *NPY_UNUSED(d) , PyArray_Descr
+ *descr, char *NPY_UNUSED(data) )
+
+Deprecated, use PyArray_NewFromDescr instead.
+
+::
+
+ PyObject *
+ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int
+ min_depth, int max_depth, int flags, PyObject
+ *context)
+
+Does not check for NPY_ARRAY_ENSURECOPY and NPY_ARRAY_NOTSWAPPED in flags
+Steals a reference to newtype --- which can be NULL
+
+::
+
+ PyObject *
+ PyArray_EnsureArray(PyObject *op)
+
+This is a quick wrapper around
+PyArray_FromAny(op, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL)
+that special cases Arrays and PyArray_Scalars up front
+It *steals a reference* to the object
+It also guarantees that the result is PyArray_Type
+Because it decrefs op if any conversion needs to take place
+so it can be used like PyArray_EnsureArray(some_function(...))
+
+::
+
+ PyObject *
+ PyArray_EnsureAnyArray(PyObject *op)
+
+
+::
+
+ PyObject *
+ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char
+ *sep)
+
+
+Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an
+array corresponding to the data encoded in that file.
+
+The reference to `dtype` is stolen (it is possible that the passed in
+dtype is not held on to).
+
+The number of elements to read is given as ``num``; if it is < 0, then
+then as many as possible are read.
+
+If ``sep`` is NULL or empty, then binary data is assumed, else
+text data, with ``sep`` as the separator between elements. Whitespace in
+the separator matches any length of whitespace in the text, and a match
+for whitespace around the separator is added.
+
+For memory-mapped files, use the buffer interface. No more data than
+necessary is read by this routine.
+
+::
+
+ PyObject *
+ PyArray_FromString(char *data, npy_intp slen, PyArray_Descr
+ *dtype, npy_intp num, char *sep)
+
+
+Given a pointer to a string ``data``, a string length ``slen``, and
+a ``PyArray_Descr``, return an array corresponding to the data
+encoded in that string.
+
+If the dtype is NULL, the default array type is used (double).
+If non-null, the reference is stolen.
+
+If ``slen`` is < 0, then the end of string is used for text data.
+It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs
+would be the norm).
+
+The number of elements to read is given as ``num``; if it is < 0, then
+then as many as possible are read.
+
+If ``sep`` is NULL or empty, then binary data is assumed, else
+text data, with ``sep`` as the separator between elements. Whitespace in
+the separator matches any length of whitespace in the text, and a match
+for whitespace around the separator is added.
+
+::
+
+ PyObject *
+ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, npy_intp
+ count, npy_intp offset)
+
+
+::
+
+ PyObject *
+ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
+
+
+steals a reference to dtype (which cannot be NULL)
+
+::
+
+ PyObject *
+ PyArray_Return(PyArrayObject *mp)
+
+
+Return either an array or the appropriate Python object if the array
+is 0d and matches a Python type.
+steals reference to mp
+
+::
+
+ PyObject *
+ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int
+ offset)
+
+Get a subset of bytes from each element of the array
+steals reference to typed, must not be NULL
+
+::
+
+ int
+ PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype, int
+ offset, PyObject *val)
+
+Set a subset of bytes from each element of the array
+steals reference to dtype, must not be NULL
+
+::
+
+ PyObject *
+ PyArray_Byteswap(PyArrayObject *self, npy_bool inplace)
+
+
+::
+
+ PyObject *
+ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int
+ refcheck, NPY_ORDER NPY_UNUSED(order) )
+
+Resize (reallocate data). Only works if nothing else is referencing this
+array and it is contiguous. If refcheck is 0, then the reference count is
+not checked and assumed to be 1. You still must own this data and have no
+weak-references and no base object.
+
+::
+
+ int
+ PyArray_MoveInto(PyArrayObject *dst, PyArrayObject *src)
+
+Move the memory of one array into another, allowing for overlapping data.
+
+Returns 0 on success, negative on failure.
+
+::
+
+ int
+ PyArray_CopyInto(PyArrayObject *dst, PyArrayObject *src)
+
+Copy an Array into another array.
+Broadcast to the destination shape if necessary.
+
+Returns 0 on success, -1 on failure.
+
+::
+
+ int
+ PyArray_CopyAnyInto(PyArrayObject *dst, PyArrayObject *src)
+
+Copy an Array into another array -- memory must not overlap
+Does not require src and dest to have "broadcastable" shapes
+(only the same number of elements).
+
+TODO: For NumPy 2.0, this could accept an order parameter which
+only allows NPY_CORDER and NPY_FORDER. Could also rename
+this to CopyAsFlat to make the name more intuitive.
+
+Returns 0 on success, -1 on error.
+
+::
+
+ int
+ PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object)
+
+
+::
+
+ PyObject *
+ PyArray_NewCopy(PyArrayObject *obj, NPY_ORDER order)
+
+Copy an array.
+
+::
+
+ PyObject *
+ PyArray_ToList(PyArrayObject *self)
+
+To List
+
+::
+
+ PyObject *
+ PyArray_ToString(PyArrayObject *self, NPY_ORDER order)
+
+
+::
+
+ int
+ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
+
+To File
+
+::
+
+ int
+ PyArray_Dump(PyObject *self, PyObject *file, int protocol)
+
+
+::
+
+ PyObject *
+ PyArray_Dumps(PyObject *self, int protocol)
+
+
+::
+
+ int
+ PyArray_ValidType(int type)
+
+Is the typenum valid?
+
+::
+
+ void
+ PyArray_UpdateFlags(PyArrayObject *ret, int flagmask)
+
+Update Several Flags at once.
+
+::
+
+ PyObject *
+ PyArray_New(PyTypeObject *subtype, int nd, npy_intp const *dims, int
+ type_num, npy_intp const *strides, void *data, int
+ itemsize, int flags, PyObject *obj)
+
+Generic new array creation routine.
+
+::
+
+ PyObject *
+ PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int
+ nd, npy_intp const *dims, npy_intp const
+ *strides, void *data, int flags, PyObject *obj)
+
+Generic new array creation routine.
+
+steals a reference to descr. On failure or when dtype->subarray is
+true, dtype will be decrefed.
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrNew(PyArray_Descr *base)
+
+base cannot be NULL
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrNewFromType(int type_num)
+
+
+::
+
+ double
+ PyArray_GetPriority(PyObject *obj, double default_)
+
+Get Priority from object
+
+::
+
+ PyObject *
+ PyArray_IterNew(PyObject *obj)
+
+Get Iterator.
+
+::
+
+ PyObject*
+ PyArray_MultiIterNew(int n, ... )
+
+Get MultiIterator,
+
+::
+
+ int
+ PyArray_PyIntAsInt(PyObject *o)
+
+
+::
+
+ npy_intp
+ PyArray_PyIntAsIntp(PyObject *o)
+
+
+::
+
+ int
+ PyArray_Broadcast(PyArrayMultiIterObject *mit)
+
+
+::
+
+ void
+ PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj)
+
+Assumes contiguous
+
+::
+
+ int
+ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
+
+
+::
+
+ npy_bool
+ PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp
+ offset, npy_intp const *dims, npy_intp const
+ *newstrides)
+
+
+::
+
+ PyArray_Descr *
+ PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian)
+
+
+returns a copy of the PyArray_Descr structure with the byteorder
+altered:
+no arguments: The byteorder is swapped (in all subfields as well)
+single argument: The byteorder is forced to the given state
+(in all subfields as well)
+
+Valid states: ('big', '>') or ('little' or '<')
+('native', or '=')
+
+If a descr structure with | is encountered it's own
+byte-order is not changed but any fields are:
+
+
+Deep bytorder change of a data-type descriptor
+Leaves reference count of self unchanged --- does not DECREF self ***
+
+::
+
+ PyObject *
+ PyArray_IterAllButAxis(PyObject *obj, int *inaxis)
+
+Get Iterator that iterates over all but one axis (don't use this with
+PyArray_ITER_GOTO1D). The axis will be over-written if negative
+with the axis having the smallest stride.
+
+::
+
+ PyObject *
+ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int
+ min_depth, int max_depth, int requires, PyObject
+ *context)
+
+steals a reference to descr -- accepts NULL
+
+::
+
+ PyObject *
+ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int
+ flags)
+
+steals reference to newtype --- acc. NULL
+
+::
+
+ PyObject *
+ PyArray_FromInterface(PyObject *origin)
+
+
+::
+
+ PyObject *
+ PyArray_FromStructInterface(PyObject *input)
+
+
+::
+
+ PyObject *
+ PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject
+ *context)
+
+
+::
+
+ NPY_SCALARKIND
+ PyArray_ScalarKind(int typenum, PyArrayObject **arr)
+
+ScalarKind
+
+Returns the scalar kind of a type number, with an
+optional tweak based on the scalar value itself.
+If no scalar is provided, it returns INTPOS_SCALAR
+for both signed and unsigned integers, otherwise
+it checks the sign of any signed integer to choose
+INTNEG_SCALAR when appropriate.
+
+::
+
+ int
+ PyArray_CanCoerceScalar(int thistype, int neededtype, NPY_SCALARKIND
+ scalar)
+
+
+Determines whether the data type 'thistype', with
+scalar kind 'scalar', can be coerced into 'neededtype'.
+
+::
+
+ PyObject *
+ PyArray_NewFlagsObject(PyObject *obj)
+
+
+Get New ArrayFlagsObject
+
+::
+
+ npy_bool
+ PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to)
+
+See if array scalars can be cast.
+
+TODO: For NumPy 2.0, add a NPY_CASTING parameter.
+
+::
+
+ int
+ PyArray_CompareUCS4(npy_ucs4 const *s1, npy_ucs4 const *s2, size_t
+ len)
+
+
+::
+
+ int
+ PyArray_RemoveSmallest(PyArrayMultiIterObject *multi)
+
+Adjusts previously broadcasted iterators so that the axis with
+the smallest sum of iterator strides is not iterated over.
+Returns dimension which is smallest in the range [0,multi->nd).
+A -1 is returned if multi->nd == 0.
+
+don't use with PyArray_ITER_GOTO1D because factors are not adjusted
+
+::
+
+ int
+ PyArray_ElementStrides(PyObject *obj)
+
+
+::
+
+ void
+ PyArray_Item_INCREF(char *data, PyArray_Descr *descr)
+
+XINCREF all objects in a single array item. This is complicated for
+structured datatypes where the position of objects needs to be extracted.
+The function is execute recursively for each nested field or subarrays dtype
+such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])`
+
+::
+
+ void
+ PyArray_Item_XDECREF(char *data, PyArray_Descr *descr)
+
+
+XDECREF all objects in a single array item. This is complicated for
+structured datatypes where the position of objects needs to be extracted.
+The function is execute recursively for each nested field or subarrays dtype
+such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])`
+
+::
+
+ PyObject *
+ PyArray_FieldNames(PyObject *fields)
+
+Return the tuple of ordered field names from a dictionary.
+
+::
+
+ PyObject *
+ PyArray_Transpose(PyArrayObject *ap, PyArray_Dims *permute)
+
+Return Transpose.
+
+::
+
+ PyObject *
+ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int
+ axis, PyArrayObject *out, NPY_CLIPMODE clipmode)
+
+Take
+
+::
+
+ PyObject *
+ PyArray_PutTo(PyArrayObject *self, PyObject*values0, PyObject
+ *indices0, NPY_CLIPMODE clipmode)
+
+Put values into an array
+
+::
+
+ PyObject *
+ PyArray_PutMask(PyArrayObject *self, PyObject*values0, PyObject*mask0)
+
+Put values into an array according to a mask.
+
+::
+
+ PyObject *
+ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis)
+
+Repeat the array.
+
+::
+
+ PyObject *
+ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject
+ *out, NPY_CLIPMODE clipmode)
+
+
+::
+
+ int
+ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
+
+Sort an array in-place
+
+::
+
+ PyObject *
+ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which)
+
+ArgSort an array
+
+::
+
+ PyObject *
+ PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, NPY_SEARCHSIDE
+ side, PyObject *perm)
+
+
+Search the sorted array op1 for the location of the items in op2. The
+result is an array of indexes, one for each element in op2, such that if
+the item were to be inserted in op1 just before that index the array
+would still be in sorted order.
+
+Parameters
+----------
+op1 : PyArrayObject *
+Array to be searched, must be 1-D.
+op2 : PyObject *
+Array of items whose insertion indexes in op1 are wanted
+side : {NPY_SEARCHLEFT, NPY_SEARCHRIGHT}
+If NPY_SEARCHLEFT, return first valid insertion indexes
+If NPY_SEARCHRIGHT, return last valid insertion indexes
+perm : PyObject *
+Permutation array that sorts op1 (optional)
+
+Returns
+-------
+ret : PyObject *
+New reference to npy_intp array containing indexes where items in op2
+could be validly inserted into op1. NULL on error.
+
+Notes
+-----
+Binary search is used to find the indexes.
+
+::
+
+ PyObject *
+ PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out)
+
+ArgMax
+
+::
+
+ PyObject *
+ PyArray_ArgMin(PyArrayObject *op, int axis, PyArrayObject *out)
+
+ArgMin
+
+::
+
+ PyObject *
+ PyArray_Reshape(PyArrayObject *self, PyObject *shape)
+
+Reshape
+
+::
+
+ PyObject *
+ PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, NPY_ORDER
+ order)
+
+New shape for an array
+
+::
+
+ PyObject *
+ PyArray_Squeeze(PyArrayObject *self)
+
+
+return a new view of the array object with all of its unit-length
+dimensions squeezed out if needed, otherwise
+return the same array.
+
+::
+
+ PyObject *
+ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject
+ *pytype)
+
+View
+steals a reference to type -- accepts NULL
+
+::
+
+ PyObject *
+ PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2)
+
+SwapAxes
+
+::
+
+ PyObject *
+ PyArray_Max(PyArrayObject *ap, int axis, PyArrayObject *out)
+
+Max
+
+::
+
+ PyObject *
+ PyArray_Min(PyArrayObject *ap, int axis, PyArrayObject *out)
+
+Min
+
+::
+
+ PyObject *
+ PyArray_Ptp(PyArrayObject *ap, int axis, PyArrayObject *out)
+
+Ptp
+
+::
+
+ PyObject *
+ PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject
+ *out)
+
+Mean
+
+::
+
+ PyObject *
+ PyArray_Trace(PyArrayObject *self, int offset, int axis1, int
+ axis2, int rtype, PyArrayObject *out)
+
+Trace
+
+::
+
+ PyObject *
+ PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int
+ axis2)
+
+Diagonal
+
+In NumPy versions prior to 1.7, this function always returned a copy of
+the diagonal array. In 1.7, the code has been updated to compute a view
+onto 'self', but it still copies this array before returning, as well as
+setting the internal WARN_ON_WRITE flag. In a future version, it will
+simply return a view onto self.
+
+::
+
+ PyObject *
+ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject
+ *max, PyArrayObject *out)
+
+Clip
+
+::
+
+ PyObject *
+ PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out)
+
+Conjugate
+
+::
+
+ PyObject *
+ PyArray_Nonzero(PyArrayObject *self)
+
+Nonzero
+
+TODO: In NumPy 2.0, should make the iteration order a parameter.
+
+::
+
+ PyObject *
+ PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject
+ *out, int variance)
+
+Set variance to 1 to by-pass square-root calculation and return variance
+Std
+
+::
+
+ PyObject *
+ PyArray_Sum(PyArrayObject *self, int axis, int rtype, PyArrayObject
+ *out)
+
+Sum
+
+::
+
+ PyObject *
+ PyArray_CumSum(PyArrayObject *self, int axis, int rtype, PyArrayObject
+ *out)
+
+CumSum
+
+::
+
+ PyObject *
+ PyArray_Prod(PyArrayObject *self, int axis, int rtype, PyArrayObject
+ *out)
+
+Prod
+
+::
+
+ PyObject *
+ PyArray_CumProd(PyArrayObject *self, int axis, int
+ rtype, PyArrayObject *out)
+
+CumProd
+
+::
+
+ PyObject *
+ PyArray_All(PyArrayObject *self, int axis, PyArrayObject *out)
+
+All
+
+::
+
+ PyObject *
+ PyArray_Any(PyArrayObject *self, int axis, PyArrayObject *out)
+
+Any
+
+::
+
+ PyObject *
+ PyArray_Compress(PyArrayObject *self, PyObject *condition, int
+ axis, PyArrayObject *out)
+
+Compress
+
+::
+
+ PyObject *
+ PyArray_Flatten(PyArrayObject *a, NPY_ORDER order)
+
+Flatten
+
+::
+
+ PyObject *
+ PyArray_Ravel(PyArrayObject *arr, NPY_ORDER order)
+
+Ravel
+Returns a contiguous array
+
+::
+
+ npy_intp
+ PyArray_MultiplyList(npy_intp const *l1, int n)
+
+Multiply a List
+
+::
+
+ int
+ PyArray_MultiplyIntList(int const *l1, int n)
+
+Multiply a List of ints
+
+::
+
+ void *
+ PyArray_GetPtr(PyArrayObject *obj, npy_intp const*ind)
+
+Produce a pointer into array
+
+::
+
+ int
+ PyArray_CompareLists(npy_intp const *l1, npy_intp const *l2, int n)
+
+Compare Lists
+
+::
+
+ int
+ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int
+ nd, PyArray_Descr*typedescr)
+
+Simulate a C-array
+steals a reference to typedescr -- can be NULL
+
+::
+
+ int
+ PyArray_As1D(PyObject **NPY_UNUSED(op) , char **NPY_UNUSED(ptr) , int
+ *NPY_UNUSED(d1) , int NPY_UNUSED(typecode) )
+
+Convert to a 1D C-array
+
+::
+
+ int
+ PyArray_As2D(PyObject **NPY_UNUSED(op) , char ***NPY_UNUSED(ptr) , int
+ *NPY_UNUSED(d1) , int *NPY_UNUSED(d2) , int
+ NPY_UNUSED(typecode) )
+
+Convert to a 2D C-array
+
+::
+
+ int
+ PyArray_Free(PyObject *op, void *ptr)
+
+Free pointers created if As2D is called
+
+::
+
+ int
+ PyArray_Converter(PyObject *object, PyObject **address)
+
+
+Useful to pass as converter function for O& processing in PyArgs_ParseTuple.
+
+This conversion function can be used with the "O&" argument for
+PyArg_ParseTuple. It will immediately return an object of array type
+or will convert to a NPY_ARRAY_CARRAY any other object.
+
+If you use PyArray_Converter, you must DECREF the array when finished
+as you get a new reference to it.
+
+::
+
+ int
+ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals)
+
+PyArray_IntpFromSequence
+Returns the number of integers converted or -1 if an error occurred.
+vals must be large enough to hold maxvals
+
+::
+
+ PyObject *
+ PyArray_Concatenate(PyObject *op, int axis)
+
+Concatenate
+
+Concatenate an arbitrary Python sequence into an array.
+op is a python object supporting the sequence interface.
+Its elements will be concatenated together to form a single
+multidimensional array. If axis is NPY_MAXDIMS or bigger, then
+each sequence object will be flattened before concatenation
+
+::
+
+ PyObject *
+ PyArray_InnerProduct(PyObject *op1, PyObject *op2)
+
+Numeric.innerproduct(a,v)
+
+::
+
+ PyObject *
+ PyArray_MatrixProduct(PyObject *op1, PyObject *op2)
+
+Numeric.matrixproduct(a,v)
+just like inner product but does the swapaxes stuff on the fly
+
+::
+
+ PyObject *
+ PyArray_CopyAndTranspose(PyObject *op)
+
+Copy and Transpose
+
+Could deprecate this function, as there isn't a speed benefit over
+calling Transpose and then Copy.
+
+::
+
+ PyObject *
+ PyArray_Correlate(PyObject *op1, PyObject *op2, int mode)
+
+Numeric.correlate(a1,a2,mode)
+
+::
+
+ int
+ PyArray_TypestrConvert(int itemsize, int gentype)
+
+Typestr converter
+
+::
+
+ int
+ PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
+
+Get typenum from an object -- None goes to NPY_DEFAULT_TYPE
+This function takes a Python object representing a type and converts it
+to a the correct PyArray_Descr * structure to describe the type.
+
+Many objects can be used to represent a data-type which in NumPy is
+quite a flexible concept.
+
+This is the central code that converts Python objects to
+Type-descriptor objects that are used throughout numpy.
+
+Returns a new reference in *at, but the returned should not be
+modified as it may be one of the canonical immutable objects or
+a reference to the input obj.
+
+::
+
+ int
+ PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at)
+
+Get typenum from an object -- None goes to NULL
+
+::
+
+ int
+ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
+
+Get intp chunk from sequence
+
+This function takes a Python sequence object and allocates and
+fills in an intp array with the converted values.
+
+Remember to free the pointer seq.ptr when done using
+PyDimMem_FREE(seq.ptr)**
+
+::
+
+ int
+ PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf)
+
+Get buffer chunk from object
+
+this function takes a Python object which exposes the (single-segment)
+buffer interface and returns a pointer to the data segment
+
+You should increment the reference count by one of buf->base
+if you will hang on to a reference
+
+You only get a borrowed reference to the object. Do not free the
+memory...
+
+::
+
+ int
+ PyArray_AxisConverter(PyObject *obj, int *axis)
+
+Get axis from an object (possibly None) -- a converter function,
+
+See also PyArray_ConvertMultiAxis, which also handles a tuple of axes.
+
+::
+
+ int
+ PyArray_BoolConverter(PyObject *object, npy_bool *val)
+
+Convert an object to true / false
+
+::
+
+ int
+ PyArray_ByteorderConverter(PyObject *obj, char *endian)
+
+Convert object to endian
+
+::
+
+ int
+ PyArray_OrderConverter(PyObject *object, NPY_ORDER *val)
+
+Convert an object to FORTRAN / C / ANY / KEEP
+
+::
+
+ unsigned char
+ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2)
+
+
+This function returns true if the two typecodes are
+equivalent (same basic kind and same itemsize).
+
+::
+
+ PyObject *
+ PyArray_Zeros(int nd, npy_intp const *dims, PyArray_Descr *type, int
+ is_f_order)
+
+Zeros
+
+steals a reference to type. On failure or when dtype->subarray is
+true, dtype will be decrefed.
+accepts NULL type
+
+::
+
+ PyObject *
+ PyArray_Empty(int nd, npy_intp const *dims, PyArray_Descr *type, int
+ is_f_order)
+
+Empty
+
+accepts NULL type
+steals a reference to type
+
+::
+
+ PyObject *
+ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y)
+
+Where
+
+::
+
+ PyObject *
+ PyArray_Arange(double start, double stop, double step, int type_num)
+
+Arange,
+
+::
+
+ PyObject *
+ PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject
+ *step, PyArray_Descr *dtype)
+
+
+ArangeObj,
+
+this doesn't change the references
+
+::
+
+ int
+ PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind)
+
+Convert object to sort kind
+
+::
+
+ PyObject *
+ PyArray_LexSort(PyObject *sort_keys, int axis)
+
+LexSort an array providing indices that will sort a collection of arrays
+lexicographically. The first key is sorted on first, followed by the second key
+-- requires that arg"merge"sort is available for each sort_key
+
+Returns an index array that shows the indexes for the lexicographic sort along
+the given axis.
+
+::
+
+ PyObject *
+ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out)
+
+Round
+
+::
+
+ unsigned char
+ PyArray_EquivTypenums(int typenum1, int typenum2)
+
+
+::
+
+ int
+ PyArray_RegisterDataType(PyArray_Descr *descr)
+
+Register Data type
+Does not change the reference count of descr
+
+::
+
+ int
+ PyArray_RegisterCastFunc(PyArray_Descr *descr, int
+ totype, PyArray_VectorUnaryFunc *castfunc)
+
+Register Casting Function
+Replaces any function currently stored.
+
+::
+
+ int
+ PyArray_RegisterCanCast(PyArray_Descr *descr, int
+ totype, NPY_SCALARKIND scalar)
+
+Register a type number indicating that a descriptor can be cast
+to it safely
+
+::
+
+ void
+ PyArray_InitArrFuncs(PyArray_ArrFuncs *f)
+
+Initialize arrfuncs to NULL
+
+::
+
+ PyObject *
+ PyArray_IntTupleFromIntp(int len, npy_intp const *vals)
+
+PyArray_IntTupleFromIntp
+
+::
+
+ int
+ PyArray_TypeNumFromName(char const *str)
+
+
+::
+
+ int
+ PyArray_ClipmodeConverter(PyObject *object, NPY_CLIPMODE *val)
+
+Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP
+
+::
+
+ int
+ PyArray_OutputConverter(PyObject *object, PyArrayObject **address)
+
+Useful to pass as converter function for O& processing in
+PyArgs_ParseTuple for output arrays
+
+::
+
+ PyObject *
+ PyArray_BroadcastToShape(PyObject *obj, npy_intp *dims, int nd)
+
+Get Iterator broadcast to a particular shape
+
+::
+
+ void
+ _PyArray_SigintHandler(int signum)
+
+
+::
+
+ void*
+ _PyArray_GetSigintBuf(void )
+
+
+::
+
+ int
+ PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at)
+
+
+Get type-descriptor from an object forcing alignment if possible
+None goes to DEFAULT type.
+
+any object with the .fields attribute and/or .itemsize attribute (if the
+.fields attribute does not give the total size -- i.e. a partial record
+naming). If itemsize is given it must be >= size computed from fields
+
+The .fields attribute must return a convertible dictionary if present.
+Result inherits from NPY_VOID.
+
+::
+
+ int
+ PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at)
+
+
+Get type-descriptor from an object forcing alignment if possible
+None goes to NULL.
+
+::
+
+ int
+ PyArray_SearchsideConverter(PyObject *obj, void *addr)
+
+Convert object to searchsorted side
+
+::
+
+ PyObject *
+ PyArray_CheckAxis(PyArrayObject *arr, int *axis, int flags)
+
+PyArray_CheckAxis
+
+check that axis is valid
+convert 0-d arrays to 1-d arrays
+
+::
+
+ npy_intp
+ PyArray_OverflowMultiplyList(npy_intp const *l1, int n)
+
+Multiply a List of Non-negative numbers with over-flow detection.
+
+::
+
+ int
+ PyArray_CompareString(const char *s1, const char *s2, size_t len)
+
+
+::
+
+ PyObject*
+ PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ... )
+
+Get MultiIterator from array of Python objects and any additional
+
+PyObject **mps - array of PyObjects
+int n - number of PyObjects in the array
+int nadd - number of additional arrays to include in the iterator.
+
+Returns a multi-iterator object.
+
+::
+
+ int
+ PyArray_GetEndianness(void )
+
+
+::
+
+ unsigned int
+ PyArray_GetNDArrayCFeatureVersion(void )
+
+Returns the built-in (at compilation time) C API version
+
+::
+
+ PyObject *
+ PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode)
+
+correlate(a1,a2,mode)
+
+This function computes the usual correlation (correlate(a1, a2) !=
+correlate(a2, a1), and conjugate the second argument for complex inputs
+
+::
+
+ PyObject*
+ PyArray_NeighborhoodIterNew(PyArrayIterObject *x, const npy_intp
+ *bounds, int mode, PyArrayObject*fill)
+
+A Neighborhood Iterator object.
+
+::
+
+ void
+ PyArray_SetDatetimeParseFunction(PyObject *NPY_UNUSED(op) )
+
+This function is scheduled to be removed
+
+TO BE REMOVED - NOT USED INTERNALLY.
+
+::
+
+ void
+ PyArray_DatetimeToDatetimeStruct(npy_datetime NPY_UNUSED(val)
+ , NPY_DATETIMEUNIT NPY_UNUSED(fr)
+ , npy_datetimestruct *result)
+
+Fill the datetime struct from the value and resolution unit.
+
+TO BE REMOVED - NOT USED INTERNALLY.
+
+::
+
+ void
+ PyArray_TimedeltaToTimedeltaStruct(npy_timedelta NPY_UNUSED(val)
+ , NPY_DATETIMEUNIT NPY_UNUSED(fr)
+ , npy_timedeltastruct *result)
+
+Fill the timedelta struct from the timedelta value and resolution unit.
+
+TO BE REMOVED - NOT USED INTERNALLY.
+
+::
+
+ npy_datetime
+ PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT NPY_UNUSED(fr)
+ , npy_datetimestruct *NPY_UNUSED(d) )
+
+Create a datetime value from a filled datetime struct and resolution unit.
+
+TO BE REMOVED - NOT USED INTERNALLY.
+
+::
+
+ npy_datetime
+ PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT NPY_UNUSED(fr)
+ , npy_timedeltastruct
+ *NPY_UNUSED(d) )
+
+Create a timedelta value from a filled timedelta struct and resolution unit.
+
+TO BE REMOVED - NOT USED INTERNALLY.
+
+::
+
+ NpyIter *
+ NpyIter_New(PyArrayObject *op, npy_uint32 flags, NPY_ORDER
+ order, NPY_CASTING casting, PyArray_Descr*dtype)
+
+Allocate a new iterator for one array object.
+
+::
+
+ NpyIter *
+ NpyIter_MultiNew(int nop, PyArrayObject **op_in, npy_uint32
+ flags, NPY_ORDER order, NPY_CASTING
+ casting, npy_uint32 *op_flags, PyArray_Descr
+ **op_request_dtypes)
+
+Allocate a new iterator for more than one array object, using
+standard NumPy broadcasting rules and the default buffer size.
+
+::
+
+ NpyIter *
+ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32
+ flags, NPY_ORDER order, NPY_CASTING
+ casting, npy_uint32 *op_flags, PyArray_Descr
+ **op_request_dtypes, int oa_ndim, int
+ **op_axes, npy_intp *itershape, npy_intp
+ buffersize)
+
+Allocate a new iterator for multiple array objects, and advanced
+options for controlling the broadcasting, shape, and buffer size.
+
+::
+
+ NpyIter *
+ NpyIter_Copy(NpyIter *iter)
+
+Makes a copy of the iterator
+
+::
+
+ int
+ NpyIter_Deallocate(NpyIter *iter)
+
+Deallocate an iterator.
+
+To correctly work when an error is in progress, we have to check
+`PyErr_Occurred()`. This is necessary when buffers are not finalized
+or WritebackIfCopy is used. We could avoid that check by exposing a new
+function which is passed in whether or not a Python error is already set.
+
+::
+
+ npy_bool
+ NpyIter_HasDelayedBufAlloc(NpyIter *iter)
+
+Whether the buffer allocation is being delayed
+
+::
+
+ npy_bool
+ NpyIter_HasExternalLoop(NpyIter *iter)
+
+Whether the iterator handles the inner loop
+
+::
+
+ int
+ NpyIter_EnableExternalLoop(NpyIter *iter)
+
+Removes the inner loop handling (so HasExternalLoop returns true)
+
+::
+
+ npy_intp *
+ NpyIter_GetInnerStrideArray(NpyIter *iter)
+
+Get the array of strides for the inner loop (when HasExternalLoop is true)
+
+This function may be safely called without holding the Python GIL.
+
+::
+
+ npy_intp *
+ NpyIter_GetInnerLoopSizePtr(NpyIter *iter)
+
+Get a pointer to the size of the inner loop (when HasExternalLoop is true)
+
+This function may be safely called without holding the Python GIL.
+
+::
+
+ int
+ NpyIter_Reset(NpyIter *iter, char **errmsg)
+
+Resets the iterator to its initial state
+
+The use of errmsg is discouraged, it cannot be guaranteed that the GIL
+will not be grabbed on casting errors even when this is passed.
+
+If errmsg is non-NULL, it should point to a variable which will
+receive the error message, and no Python exception will be set.
+This is so that the function can be called from code not holding
+the GIL. Note that cast errors may still lead to the GIL being
+grabbed temporarily.
+
+::
+
+ int
+ NpyIter_ResetBasePointers(NpyIter *iter, char **baseptrs, char
+ **errmsg)
+
+Resets the iterator to its initial state, with new base data pointers.
+This function requires great caution.
+
+If errmsg is non-NULL, it should point to a variable which will
+receive the error message, and no Python exception will be set.
+This is so that the function can be called from code not holding
+the GIL. Note that cast errors may still lead to the GIL being
+grabbed temporarily.
+
+::
+
+ int
+ NpyIter_ResetToIterIndexRange(NpyIter *iter, npy_intp istart, npy_intp
+ iend, char **errmsg)
+
+Resets the iterator to a new iterator index range
+
+If errmsg is non-NULL, it should point to a variable which will
+receive the error message, and no Python exception will be set.
+This is so that the function can be called from code not holding
+the GIL. Note that cast errors may still lead to the GIL being
+grabbed temporarily.
+
+::
+
+ int
+ NpyIter_GetNDim(NpyIter *iter)
+
+Gets the number of dimensions being iterated
+
+::
+
+ int
+ NpyIter_GetNOp(NpyIter *iter)
+
+Gets the number of operands being iterated
+
+::
+
+ NpyIter_IterNextFunc *
+ NpyIter_GetIterNext(NpyIter *iter, char **errmsg)
+
+Compute the specialized iteration function for an iterator
+
+If errmsg is non-NULL, it should point to a variable which will
+receive the error message, and no Python exception will be set.
+This is so that the function can be called from code not holding
+the GIL.
+
+::
+
+ npy_intp
+ NpyIter_GetIterSize(NpyIter *iter)
+
+Gets the number of elements being iterated
+
+::
+
+ void
+ NpyIter_GetIterIndexRange(NpyIter *iter, npy_intp *istart, npy_intp
+ *iend)
+
+Gets the range of iteration indices being iterated
+
+::
+
+ npy_intp
+ NpyIter_GetIterIndex(NpyIter *iter)
+
+Gets the current iteration index
+
+::
+
+ int
+ NpyIter_GotoIterIndex(NpyIter *iter, npy_intp iterindex)
+
+Sets the iterator position to the specified iterindex,
+which matches the iteration order of the iterator.
+
+Returns NPY_SUCCEED on success, NPY_FAIL on failure.
+
+::
+
+ npy_bool
+ NpyIter_HasMultiIndex(NpyIter *iter)
+
+Whether the iterator is tracking a multi-index
+
+::
+
+ int
+ NpyIter_GetShape(NpyIter *iter, npy_intp *outshape)
+
+Gets the broadcast shape if a multi-index is being tracked by the iterator,
+otherwise gets the shape of the iteration as Fortran-order
+(fastest-changing index first).
+
+The reason Fortran-order is returned when a multi-index
+is not enabled is that this is providing a direct view into how
+the iterator traverses the n-dimensional space. The iterator organizes
+its memory from fastest index to slowest index, and when
+a multi-index is enabled, it uses a permutation to recover the original
+order.
+
+Returns NPY_SUCCEED or NPY_FAIL.
+
+::
+
+ NpyIter_GetMultiIndexFunc *
+ NpyIter_GetGetMultiIndex(NpyIter *iter, char **errmsg)
+
+Compute a specialized get_multi_index function for the iterator
+
+If errmsg is non-NULL, it should point to a variable which will
+receive the error message, and no Python exception will be set.
+This is so that the function can be called from code not holding
+the GIL.
+
+::
+
+ int
+ NpyIter_GotoMultiIndex(NpyIter *iter, npy_intp const *multi_index)
+
+Sets the iterator to the specified multi-index, which must have the
+correct number of entries for 'ndim'. It is only valid
+when NPY_ITER_MULTI_INDEX was passed to the constructor. This operation
+fails if the multi-index is out of bounds.
+
+Returns NPY_SUCCEED on success, NPY_FAIL on failure.
+
+::
+
+ int
+ NpyIter_RemoveMultiIndex(NpyIter *iter)
+
+Removes multi-index support from an iterator.
+
+Returns NPY_SUCCEED or NPY_FAIL.
+
+::
+
+ npy_bool
+ NpyIter_HasIndex(NpyIter *iter)
+
+Whether the iterator is tracking an index
+
+::
+
+ npy_bool
+ NpyIter_IsBuffered(NpyIter *iter)
+
+Whether the iterator is buffered
+
+::
+
+ npy_bool
+ NpyIter_IsGrowInner(NpyIter *iter)
+
+Whether the inner loop can grow if buffering is unneeded
+
+::
+
+ npy_intp
+ NpyIter_GetBufferSize(NpyIter *iter)
+
+Gets the size of the buffer, or 0 if buffering is not enabled
+
+::
+
+ npy_intp *
+ NpyIter_GetIndexPtr(NpyIter *iter)
+
+Get a pointer to the index, if it is being tracked
+
+::
+
+ int
+ NpyIter_GotoIndex(NpyIter *iter, npy_intp flat_index)
+
+If the iterator is tracking an index, sets the iterator
+to the specified index.
+
+Returns NPY_SUCCEED on success, NPY_FAIL on failure.
+
+::
+
+ char **
+ NpyIter_GetDataPtrArray(NpyIter *iter)
+
+Get the array of data pointers (1 per object being iterated)
+
+This function may be safely called without holding the Python GIL.
+
+::
+
+ PyArray_Descr **
+ NpyIter_GetDescrArray(NpyIter *iter)
+
+Get the array of data type pointers (1 per object being iterated)
+
+::
+
+ PyArrayObject **
+ NpyIter_GetOperandArray(NpyIter *iter)
+
+Get the array of objects being iterated
+
+::
+
+ PyArrayObject *
+ NpyIter_GetIterView(NpyIter *iter, npy_intp i)
+
+Returns a view to the i-th object with the iterator's internal axes
+
+::
+
+ void
+ NpyIter_GetReadFlags(NpyIter *iter, char *outreadflags)
+
+Gets an array of read flags (1 per object being iterated)
+
+::
+
+ void
+ NpyIter_GetWriteFlags(NpyIter *iter, char *outwriteflags)
+
+Gets an array of write flags (1 per object being iterated)
+
+::
+
+ void
+ NpyIter_DebugPrint(NpyIter *iter)
+
+For debugging
+
+::
+
+ npy_bool
+ NpyIter_IterationNeedsAPI(NpyIter *iter)
+
+Whether the iteration loop, and in particular the iternext()
+function, needs API access. If this is true, the GIL must
+be retained while iterating.
+
+NOTE: Internally (currently), `NpyIter_GetTransferFlags` will
+additionally provide information on whether floating point errors
+may be given during casts. The flags only require the API use
+necessary for buffering though. So an iterate which does not require
+buffering may indicate `NpyIter_IterationNeedsAPI`, but not include
+the flag in `NpyIter_GetTransferFlags`.
+
+::
+
+ void
+ NpyIter_GetInnerFixedStrideArray(NpyIter *iter, npy_intp *out_strides)
+
+Get an array of strides which are fixed. Any strides which may
+change during iteration receive the value NPY_MAX_INTP. Once
+the iterator is ready to iterate, call this to get the strides
+which will always be fixed in the inner loop, then choose optimized
+inner loop functions which take advantage of those fixed strides.
+
+This function may be safely called without holding the Python GIL.
+
+::
+
+ int
+ NpyIter_RemoveAxis(NpyIter *iter, int axis)
+
+Removes an axis from iteration. This requires that NPY_ITER_MULTI_INDEX
+was set for iterator creation, and does not work if buffering is
+enabled. This function also resets the iterator to its initial state.
+
+Returns NPY_SUCCEED or NPY_FAIL.
+
+::
+
+ npy_intp *
+ NpyIter_GetAxisStrideArray(NpyIter *iter, int axis)
+
+Gets the array of strides for the specified axis.
+If the iterator is tracking a multi-index, gets the strides
+for the axis specified, otherwise gets the strides for
+the iteration axis as Fortran order (fastest-changing axis first).
+
+Returns NULL if an error occurs.
+
+::
+
+ npy_bool
+ NpyIter_RequiresBuffering(NpyIter *iter)
+
+Whether the iteration could be done with no buffering.
+
+::
+
+ char **
+ NpyIter_GetInitialDataPtrArray(NpyIter *iter)
+
+Get the array of data pointers (1 per object being iterated),
+directly into the arrays (never pointing to a buffer), for starting
+unbuffered iteration. This always returns the addresses for the
+iterator position as reset to iterator index 0.
+
+These pointers are different from the pointers accepted by
+NpyIter_ResetBasePointers, because the direction along some
+axes may have been reversed, requiring base offsets.
+
+This function may be safely called without holding the Python GIL.
+
+::
+
+ int
+ NpyIter_CreateCompatibleStrides(NpyIter *iter, npy_intp
+ itemsize, npy_intp *outstrides)
+
+Builds a set of strides which are the same as the strides of an
+output array created using the NPY_ITER_ALLOCATE flag, where NULL
+was passed for op_axes. This is for data packed contiguously,
+but not necessarily in C or Fortran order. This should be used
+together with NpyIter_GetShape and NpyIter_GetNDim.
+
+A use case for this function is to match the shape and layout of
+the iterator and tack on one or more dimensions. For example,
+in order to generate a vector per input value for a numerical gradient,
+you pass in ndim*itemsize for itemsize, then add another dimension to
+the end with size ndim and stride itemsize. To do the Hessian matrix,
+you do the same thing but add two dimensions, or take advantage of
+the symmetry and pack it into 1 dimension with a particular encoding.
+
+This function may only be called if the iterator is tracking a multi-index
+and if NPY_ITER_DONT_NEGATE_STRIDES was used to prevent an axis from
+being iterated in reverse order.
+
+If an array is created with this method, simply adding 'itemsize'
+for each iteration will traverse the new array matching the
+iterator.
+
+Returns NPY_SUCCEED or NPY_FAIL.
+
+::
+
+ int
+ PyArray_CastingConverter(PyObject *obj, NPY_CASTING *casting)
+
+Convert any Python object, *obj*, to an NPY_CASTING enum.
+
+::
+
+ npy_intp
+ PyArray_CountNonzero(PyArrayObject *self)
+
+Counts the number of non-zero elements in the array.
+
+Returns -1 on error.
+
+::
+
+ PyArray_Descr *
+ PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2)
+
+Produces the smallest size and lowest kind type to which both
+input types can be cast.
+
+::
+
+ PyArray_Descr *
+ PyArray_MinScalarType(PyArrayObject *arr)
+
+If arr is a scalar (has 0 dimensions) with a built-in number data type,
+finds the smallest type size/kind which can still represent its data.
+Otherwise, returns the array's data type.
+
+
+::
+
+ PyArray_Descr *
+ PyArray_ResultType(npy_intp narrs, PyArrayObject *arrs[] , npy_intp
+ ndtypes, PyArray_Descr *descrs[] )
+
+
+Produces the result type of a bunch of inputs, using the same rules
+as `np.result_type`.
+
+NOTE: This function is expected to through a transitional period or
+change behaviour. DTypes should always be strictly enforced for
+0-D arrays, while "weak DTypes" will be used to represent Python
+integers, floats, and complex in all cases.
+(Within this function, these are currently flagged on the array
+object to work through `np.result_type`, this may change.)
+
+Until a time where this transition is complete, we probably cannot
+add new "weak DTypes" or allow users to create their own.
+
+::
+
+ npy_bool
+ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr
+ *to, NPY_CASTING casting)
+
+Returns 1 if the array object may be cast to the given data type using
+the casting rule, 0 otherwise. This differs from PyArray_CanCastTo in
+that it handles scalar arrays (0 dimensions) specially, by checking
+their value.
+
+::
+
+ npy_bool
+ PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr
+ *to, NPY_CASTING casting)
+
+Returns true if data of type 'from' may be cast to data of type
+'to' according to the rule 'casting'.
+
+::
+
+ PyArrayObject *
+ PyArray_EinsteinSum(char *subscripts, npy_intp nop, PyArrayObject
+ **op_in, PyArray_Descr *dtype, NPY_ORDER
+ order, NPY_CASTING casting, PyArrayObject *out)
+
+This function provides summation of array elements according to
+the Einstein summation convention. For example:
+- trace(a) -> einsum("ii", a)
+- transpose(a) -> einsum("ji", a)
+- multiply(a,b) -> einsum(",", a, b)
+- inner(a,b) -> einsum("i,i", a, b)
+- outer(a,b) -> einsum("i,j", a, b)
+- matvec(a,b) -> einsum("ij,j", a, b)
+- matmat(a,b) -> einsum("ij,jk", a, b)
+
+subscripts: The string of subscripts for einstein summation.
+nop: The number of operands
+op_in: The array of operands
+dtype: Either NULL, or the data type to force the calculation as.
+order: The order for the calculation/the output axes.
+casting: What kind of casts should be permitted.
+out: Either NULL, or an array into which the output should be placed.
+
+By default, the labels get placed in alphabetical order
+at the end of the output. So, if c = einsum("i,j", a, b)
+then c[i,j] == a[i]*b[j], but if c = einsum("j,i", a, b)
+then c[i,j] = a[j]*b[i].
+
+Alternatively, you can control the output order or prevent
+an axis from being summed/force an axis to be summed by providing
+indices for the output. This allows us to turn 'trace' into
+'diag', for example.
+- diag(a) -> einsum("ii->i", a)
+- sum(a, axis=0) -> einsum("i...->", a)
+
+Subscripts at the beginning and end may be specified by
+putting an ellipsis "..." in the middle. For example,
+the function einsum("i...i", a) takes the diagonal of
+the first and last dimensions of the operand, and
+einsum("ij...,jk...->ik...") takes the matrix product using
+the first two indices of each operand instead of the last two.
+
+When there is only one operand, no axes being summed, and
+no output parameter, this function returns a view
+into the operand instead of making a copy.
+
+::
+
+ PyObject *
+ PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER
+ order, PyArray_Descr *dtype, int subok)
+
+Creates a new array with the same shape as the provided one,
+with possible memory layout order and data type changes.
+
+prototype - The array the new one should be like.
+order - NPY_CORDER - C-contiguous result.
+NPY_FORTRANORDER - Fortran-contiguous result.
+NPY_ANYORDER - Fortran if prototype is Fortran, C otherwise.
+NPY_KEEPORDER - Keeps the axis ordering of prototype.
+dtype - If not NULL, overrides the data type of the result.
+subok - If 1, use the prototype's array subtype, otherwise
+always create a base-class array.
+
+NOTE: If dtype is not NULL, steals the dtype reference. On failure or when
+dtype->subarray is true, dtype will be decrefed.
+
+::
+
+ int
+ PyArray_GetArrayParamsFromObject(PyObject *NPY_UNUSED(op)
+ , PyArray_Descr
+ *NPY_UNUSED(requested_dtype)
+ , npy_bool NPY_UNUSED(writeable)
+ , PyArray_Descr
+ **NPY_UNUSED(out_dtype) , int
+ *NPY_UNUSED(out_ndim) , npy_intp
+ *NPY_UNUSED(out_dims) , PyArrayObject
+ **NPY_UNUSED(out_arr) , PyObject
+ *NPY_UNUSED(context) )
+
+
+::
+
+ int
+ PyArray_ConvertClipmodeSequence(PyObject *object, NPY_CLIPMODE
+ *modes, int n)
+
+Convert an object to an array of n NPY_CLIPMODE values.
+This is intended to be used in functions where a different mode
+could be applied to each axis, like in ravel_multi_index.
+
+::
+
+ PyObject *
+ PyArray_MatrixProduct2(PyObject *op1, PyObject
+ *op2, PyArrayObject*out)
+
+Numeric.matrixproduct2(a,v,out)
+just like inner product but does the swapaxes stuff on the fly
+
+::
+
+ npy_bool
+ NpyIter_IsFirstVisit(NpyIter *iter, int iop)
+
+Checks to see whether this is the first time the elements
+of the specified reduction operand which the iterator points at are
+being seen for the first time. The function returns
+a reasonable answer for reduction operands and when buffering is
+disabled. The answer may be incorrect for buffered non-reduction
+operands.
+
+This function is intended to be used in EXTERNAL_LOOP mode only,
+and will produce some wrong answers when that mode is not enabled.
+
+If this function returns true, the caller should also
+check the inner loop stride of the operand, because if
+that stride is 0, then only the first element of the innermost
+external loop is being visited for the first time.
+
+WARNING: For performance reasons, 'iop' is not bounds-checked,
+it is not confirmed that 'iop' is actually a reduction
+operand, and it is not confirmed that EXTERNAL_LOOP
+mode is enabled. These checks are the responsibility of
+the caller, and should be done outside of any inner loops.
+
+::
+
+ int
+ PyArray_SetBaseObject(PyArrayObject *arr, PyObject *obj)
+
+Sets the 'base' attribute of the array. This steals a reference
+to 'obj'.
+
+Returns 0 on success, -1 on failure.
+
+::
+
+ void
+ PyArray_CreateSortedStridePerm(int ndim, npy_intp const
+ *strides, npy_stride_sort_item
+ *out_strideperm)
+
+
+This function populates the first ndim elements
+of strideperm with sorted descending by their absolute values.
+For example, the stride array (4, -2, 12) becomes
+[(2, 12), (0, 4), (1, -2)].
+
+::
+
+ void
+ PyArray_RemoveAxesInPlace(PyArrayObject *arr, const npy_bool *flags)
+
+
+Removes the axes flagged as True from the array,
+modifying it in place. If an axis flagged for removal
+has a shape entry bigger than one, this effectively selects
+index zero for that axis.
+
+WARNING: If an axis flagged for removal has a shape equal to zero,
+the array will point to invalid memory. The caller must
+validate this!
+If an axis flagged for removal has a shape larger than one,
+the aligned flag (and in the future the contiguous flags),
+may need explicit update.
+
+For example, this can be used to remove the reduction axes
+from a reduction result once its computation is complete.
+
+::
+
+ void
+ PyArray_DebugPrint(PyArrayObject *obj)
+
+Prints the raw data of the ndarray in a form useful for debugging
+low-level C issues.
+
+::
+
+ int
+ PyArray_FailUnlessWriteable(PyArrayObject *obj, const char *name)
+
+
+This function does nothing and returns 0 if *obj* is writeable.
+It raises an exception and returns -1 if *obj* is not writeable.
+It may also do other house-keeping, such as issuing warnings on
+arrays which are transitioning to become views. Always call this
+function at some point before writing to an array.
+
+name* is a name for the array, used to give better error messages.
+It can be something like "assignment destination", "output array",
+or even just "array".
+
+::
+
+ int
+ PyArray_SetUpdateIfCopyBase(PyArrayObject *arr, PyArrayObject *base)
+
+
+::
+
+ void *
+ PyDataMem_NEW(size_t size)
+
+Allocates memory for array data.
+
+::
+
+ void
+ PyDataMem_FREE(void *ptr)
+
+Free memory for array data.
+
+::
+
+ void *
+ PyDataMem_RENEW(void *ptr, size_t size)
+
+Reallocate/resize memory for array data.
+
+::
+
+ PyDataMem_EventHookFunc *
+ PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook, void
+ *user_data, void **old_data)
+
+Sets the allocation event hook for numpy array data.
+Takes a PyDataMem_EventHookFunc *, which has the signature:
+void hook(void *old, void *new, size_t size, void *user_data).
+Also takes a void *user_data, and void **old_data.
+
+Returns a pointer to the previous hook or NULL. If old_data is
+non-NULL, the previous user_data pointer will be copied to it.
+
+If not NULL, hook will be called at the end of each PyDataMem_NEW/FREE/RENEW:
+result = PyDataMem_NEW(size) -> (*hook)(NULL, result, size, user_data)
+PyDataMem_FREE(ptr) -> (*hook)(ptr, NULL, 0, user_data)
+result = PyDataMem_RENEW(ptr, size) -> (*hook)(ptr, result, size, user_data)
+
+When the hook is called, the GIL will be held by the calling
+thread. The hook should be written to be reentrant, if it performs
+operations that might cause new allocation events (such as the
+creation/destruction numpy objects, or creating/destroying Python
+objects which might cause a gc)
+
+Deprecated in 1.23
+
+::
+
+ void
+ PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, PyArrayObject
+ **ret, int getmap)
+
+
+Swap the axes to or from their inserted form. MapIter always puts the
+advanced (array) indices first in the iteration. But if they are
+consecutive, will insert/transpose them back before returning.
+This is stored as `mit->consec != 0` (the place where they are inserted)
+For assignments, the opposite happens: The values to be assigned are
+transposed (getmap=1 instead of getmap=0). `getmap=0` and `getmap=1`
+undo the other operation.
+
+::
+
+ PyObject *
+ PyArray_MapIterArray(PyArrayObject *a, PyObject *index)
+
+
+Use advanced indexing to iterate an array.
+
+::
+
+ void
+ PyArray_MapIterNext(PyArrayMapIterObject *mit)
+
+This function needs to update the state of the map iterator
+and point mit->dataptr to the memory-location of the next object
+
+Note that this function never handles an extra operand but provides
+compatibility for an old (exposed) API.
+
+::
+
+ int
+ PyArray_Partition(PyArrayObject *op, PyArrayObject *ktharray, int
+ axis, NPY_SELECTKIND which)
+
+Partition an array in-place
+
+::
+
+ PyObject *
+ PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int
+ axis, NPY_SELECTKIND which)
+
+ArgPartition an array
+
+::
+
+ int
+ PyArray_SelectkindConverter(PyObject *obj, NPY_SELECTKIND *selectkind)
+
+Convert object to select kind
+
+::
+
+ void *
+ PyDataMem_NEW_ZEROED(size_t nmemb, size_t size)
+
+Allocates zeroed memory for array data.
+
+::
+
+ int
+ PyArray_CheckAnyScalarExact(PyObject *obj)
+
+return 1 if an object is exactly a numpy scalar
+
+::
+
+ PyObject *
+ PyArray_MapIterArrayCopyIfOverlap(PyArrayObject *a, PyObject
+ *index, int
+ copy_if_overlap, PyArrayObject
+ *extra_op)
+
+
+Same as PyArray_MapIterArray, but:
+
+If copy_if_overlap != 0, check if `a` has memory overlap with any of the
+arrays in `index` and with `extra_op`. If yes, make copies as appropriate
+to avoid problems if `a` is modified during the iteration.
+`iter->array` may contain a copied array (WRITEBACKIFCOPY set).
+
+::
+
+ int
+ PyArray_ResolveWritebackIfCopy(PyArrayObject *self)
+
+
+If WRITEBACKIFCOPY and self has data, reset the base WRITEABLE flag,
+copy the local data to base, release the local data, and set flags
+appropriately. Return 0 if not relevant, 1 if success, < 0 on failure
+
+::
+
+ int
+ PyArray_SetWritebackIfCopyBase(PyArrayObject *arr, PyArrayObject
+ *base)
+
+
+Precondition: 'arr' is a copy of 'base' (though possibly with different
+strides, ordering, etc.). This function sets the WRITEBACKIFCOPY flag and the
+->base pointer on 'arr', call PyArray_ResolveWritebackIfCopy to copy any
+changes back to 'base' before deallocating the array.
+
+Steals a reference to 'base'.
+
+Returns 0 on success, -1 on failure.
+
+::
+
+ PyObject *
+ PyDataMem_SetHandler(PyObject *handler)
+
+Set a new allocation policy. If the input value is NULL, will reset
+the policy to the default. Return the previous policy, or
+return NULL if an error has occurred. We wrap the user-provided
+functions so they will still call the python and numpy
+memory management callback hooks.
+
+::
+
+ PyObject *
+ PyDataMem_GetHandler()
+
+Return the policy that will be used to allocate data
+for the next PyArrayObject. On failure, return NULL.
+
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/ndarrayobject.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/ndarrayobject.h
new file mode 100644
index 00000000..aaaefd7d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/ndarrayobject.h
@@ -0,0 +1,251 @@
+/*
+ * DON'T INCLUDE THIS DIRECTLY.
+ */
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <Python.h>
+#include "ndarraytypes.h"
+
+/* Includes the "function" C-API -- these are all stored in a
+ list of pointers --- one for each file
+ The two lists are concatenated into one in multiarray.
+
+ They are available as import_array()
+*/
+
+#include "__multiarray_api.h"
+
+
+/* C-API that requires previous API to be defined */
+
+#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type)
+
+#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type)
+#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type)
+
+#define PyArray_HasArrayInterfaceType(op, type, context, out) \
+ ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \
+ (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \
+ (((out)=PyArray_FromArrayAttr(op, type, context)) != \
+ Py_NotImplemented))
+
+#define PyArray_HasArrayInterface(op, out) \
+ PyArray_HasArrayInterfaceType(op, NULL, NULL, out)
+
+#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \
+ (PyArray_NDIM((PyArrayObject *)op) == 0))
+
+#define PyArray_IsScalar(obj, cls) \
+ (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type))
+
+#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \
+ PyArray_IsZeroDim(m))
+#define PyArray_IsPythonNumber(obj) \
+ (PyFloat_Check(obj) || PyComplex_Check(obj) || \
+ PyLong_Check(obj) || PyBool_Check(obj))
+#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \
+ || PyArray_IsScalar((obj), Integer))
+#define PyArray_IsPythonScalar(obj) \
+ (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \
+ PyUnicode_Check(obj))
+
+#define PyArray_IsAnyScalar(obj) \
+ (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj))
+
+#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \
+ PyArray_CheckScalar(obj))
+
+
+#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \
+ Py_INCREF(m), (m) : \
+ (PyArrayObject *)(PyArray_Copy(m)))
+
+#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \
+ PyArray_CompareLists(PyArray_DIMS(a1), \
+ PyArray_DIMS(a2), \
+ PyArray_NDIM(a1)))
+
+#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m))
+#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m))
+#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL)
+
+#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \
+ NULL)
+
+#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \
+ PyArray_DescrFromType(type), 0, 0, 0, NULL)
+
+#define PyArray_FROM_OTF(m, type, flags) \
+ PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \
+ (((flags) & NPY_ARRAY_ENSURECOPY) ? \
+ ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL)
+
+#define PyArray_FROMANY(m, type, min, max, flags) \
+ PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \
+ (((flags) & NPY_ARRAY_ENSURECOPY) ? \
+ (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL)
+
+#define PyArray_ZEROS(m, dims, type, is_f_order) \
+ PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order)
+
+#define PyArray_EMPTY(m, dims, type, is_f_order) \
+ PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order)
+
+#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \
+ PyArray_NBYTES(obj))
+#ifndef PYPY_VERSION
+#define PyArray_REFCOUNT(obj) (((PyObject *)(obj))->ob_refcnt)
+#define NPY_REFCOUNT PyArray_REFCOUNT
+#endif
+#define NPY_MAX_ELSIZE (2 * NPY_SIZEOF_LONGDOUBLE)
+
+#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, NPY_ARRAY_DEFAULT, NULL)
+
+#define PyArray_EquivArrTypes(a1, a2) \
+ PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2))
+
+#define PyArray_EquivByteorders(b1, b2) \
+ (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2)))
+
+#define PyArray_SimpleNew(nd, dims, typenum) \
+ PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL)
+
+#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \
+ PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \
+ data, 0, NPY_ARRAY_CARRAY, NULL)
+
+#define PyArray_SimpleNewFromDescr(nd, dims, descr) \
+ PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \
+ NULL, NULL, 0, NULL)
+
+#define PyArray_ToScalar(data, arr) \
+ PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr)
+
+
+/* These might be faster without the dereferencing of obj
+ going on inside -- of course an optimizing compiler should
+ inline the constants inside a for loop making it a moot point
+*/
+
+#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \
+ (i)*PyArray_STRIDES(obj)[0]))
+
+#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \
+ (i)*PyArray_STRIDES(obj)[0] + \
+ (j)*PyArray_STRIDES(obj)[1]))
+
+#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \
+ (i)*PyArray_STRIDES(obj)[0] + \
+ (j)*PyArray_STRIDES(obj)[1] + \
+ (k)*PyArray_STRIDES(obj)[2]))
+
+#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \
+ (i)*PyArray_STRIDES(obj)[0] + \
+ (j)*PyArray_STRIDES(obj)[1] + \
+ (k)*PyArray_STRIDES(obj)[2] + \
+ (l)*PyArray_STRIDES(obj)[3]))
+
+static NPY_INLINE void
+PyArray_DiscardWritebackIfCopy(PyArrayObject *arr)
+{
+ PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
+ if (fa && fa->base) {
+ if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) {
+ PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE);
+ Py_DECREF(fa->base);
+ fa->base = NULL;
+ PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY);
+ }
+ }
+}
+
+#define PyArray_DESCR_REPLACE(descr) do { \
+ PyArray_Descr *_new_; \
+ _new_ = PyArray_DescrNew(descr); \
+ Py_XDECREF(descr); \
+ descr = _new_; \
+ } while(0)
+
+/* Copy should always return contiguous array */
+#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER)
+
+#define PyArray_FromObject(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, NPY_ARRAY_BEHAVED | \
+ NPY_ARRAY_ENSUREARRAY, NULL)
+
+#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, NPY_ARRAY_DEFAULT | \
+ NPY_ARRAY_ENSUREARRAY, NULL)
+
+#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \
+ PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \
+ max_depth, NPY_ARRAY_ENSURECOPY | \
+ NPY_ARRAY_DEFAULT | \
+ NPY_ARRAY_ENSUREARRAY, NULL)
+
+#define PyArray_Cast(mp, type_num) \
+ PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0)
+
+#define PyArray_Take(ap, items, axis) \
+ PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE)
+
+#define PyArray_Put(ap, items, values) \
+ PyArray_PutTo(ap, items, values, NPY_RAISE)
+
+/* Compatibility with old Numeric stuff -- don't use in new code */
+
+#define PyArray_FromDimsAndData(nd, d, type, data) \
+ PyArray_FromDimsAndDataAndDescr(nd, d, PyArray_DescrFromType(type), \
+ data)
+
+
+/*
+ Check to see if this key in the dictionary is the "title"
+ entry of the tuple (i.e. a duplicate dictionary entry in the fields
+ dict).
+*/
+
+static NPY_INLINE int
+NPY_TITLE_KEY_check(PyObject *key, PyObject *value)
+{
+ PyObject *title;
+ if (PyTuple_Size(value) != 3) {
+ return 0;
+ }
+ title = PyTuple_GetItem(value, 2);
+ if (key == title) {
+ return 1;
+ }
+#ifdef PYPY_VERSION
+ /*
+ * On PyPy, dictionary keys do not always preserve object identity.
+ * Fall back to comparison by value.
+ */
+ if (PyUnicode_Check(title) && PyUnicode_Check(key)) {
+ return PyUnicode_Compare(title, key) == 0 ? 1 : 0;
+ }
+#endif
+ return 0;
+}
+
+/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */
+#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value)))
+
+#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1)
+#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1)
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/ndarraytypes.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/ndarraytypes.h
new file mode 100644
index 00000000..e894b08f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/ndarraytypes.h
@@ -0,0 +1,1956 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_
+
+#include "npy_common.h"
+#include "npy_endian.h"
+#include "npy_cpu.h"
+#include "utils.h"
+
+#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN
+
+/* Only use thread if configured in config and python supports it */
+#if defined WITH_THREAD && !NPY_NO_SMP
+ #define NPY_ALLOW_THREADS 1
+#else
+ #define NPY_ALLOW_THREADS 0
+#endif
+
+#ifndef __has_extension
+#define __has_extension(x) 0
+#endif
+
+#if !defined(_NPY_NO_DEPRECATIONS) && \
+ ((defined(__GNUC__)&& __GNUC__ >= 6) || \
+ __has_extension(attribute_deprecated_with_message))
+#define NPY_ATTR_DEPRECATE(text) __attribute__ ((deprecated (text)))
+#else
+#define NPY_ATTR_DEPRECATE(text)
+#endif
+
+/*
+ * There are several places in the code where an array of dimensions
+ * is allocated statically. This is the size of that static
+ * allocation.
+ *
+ * The array creation itself could have arbitrary dimensions but all
+ * the places where static allocation is used would need to be changed
+ * to dynamic (including inside of several structures)
+ */
+
+#define NPY_MAXDIMS 32
+#define NPY_MAXARGS 32
+
+/* Used for Converter Functions "O&" code in ParseTuple */
+#define NPY_FAIL 0
+#define NPY_SUCCEED 1
+
+/*
+ * Binary compatibility version number. This number is increased
+ * whenever the C-API is changed such that binary compatibility is
+ * broken, i.e. whenever a recompile of extension modules is needed.
+ */
+#define NPY_VERSION NPY_ABI_VERSION
+
+/*
+ * Minor API version. This number is increased whenever a change is
+ * made to the C-API -- whether it breaks binary compatibility or not.
+ * Some changes, such as adding a function pointer to the end of the
+ * function table, can be made without breaking binary compatibility.
+ * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION)
+ * would be increased. Whenever binary compatibility is broken, both
+ * NPY_VERSION and NPY_FEATURE_VERSION should be increased.
+ */
+#define NPY_FEATURE_VERSION NPY_API_VERSION
+
+enum NPY_TYPES { NPY_BOOL=0,
+ NPY_BYTE, NPY_UBYTE,
+ NPY_SHORT, NPY_USHORT,
+ NPY_INT, NPY_UINT,
+ NPY_LONG, NPY_ULONG,
+ NPY_LONGLONG, NPY_ULONGLONG,
+ NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE,
+ NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE,
+ NPY_OBJECT=17,
+ NPY_STRING, NPY_UNICODE,
+ NPY_VOID,
+ /*
+ * New 1.6 types appended, may be integrated
+ * into the above in 2.0.
+ */
+ NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF,
+
+ NPY_NTYPES,
+ NPY_NOTYPE,
+ NPY_CHAR NPY_ATTR_DEPRECATE("Use NPY_STRING"),
+ NPY_USERDEF=256, /* leave room for characters */
+
+ /* The number of types not including the new 1.6 types */
+ NPY_NTYPES_ABI_COMPATIBLE=21
+};
+#if defined(_MSC_VER) && !defined(__clang__)
+#pragma deprecated(NPY_CHAR)
+#endif
+
+/* basetype array priority */
+#define NPY_PRIORITY 0.0
+
+/* default subtype priority */
+#define NPY_SUBTYPE_PRIORITY 1.0
+
+/* default scalar priority */
+#define NPY_SCALAR_PRIORITY -1000000.0
+
+/* How many floating point types are there (excluding half) */
+#define NPY_NUM_FLOATTYPE 3
+
+/*
+ * These characters correspond to the array type and the struct
+ * module
+ */
+
+enum NPY_TYPECHAR {
+ NPY_BOOLLTR = '?',
+ NPY_BYTELTR = 'b',
+ NPY_UBYTELTR = 'B',
+ NPY_SHORTLTR = 'h',
+ NPY_USHORTLTR = 'H',
+ NPY_INTLTR = 'i',
+ NPY_UINTLTR = 'I',
+ NPY_LONGLTR = 'l',
+ NPY_ULONGLTR = 'L',
+ NPY_LONGLONGLTR = 'q',
+ NPY_ULONGLONGLTR = 'Q',
+ NPY_HALFLTR = 'e',
+ NPY_FLOATLTR = 'f',
+ NPY_DOUBLELTR = 'd',
+ NPY_LONGDOUBLELTR = 'g',
+ NPY_CFLOATLTR = 'F',
+ NPY_CDOUBLELTR = 'D',
+ NPY_CLONGDOUBLELTR = 'G',
+ NPY_OBJECTLTR = 'O',
+ NPY_STRINGLTR = 'S',
+ NPY_STRINGLTR2 = 'a',
+ NPY_UNICODELTR = 'U',
+ NPY_VOIDLTR = 'V',
+ NPY_DATETIMELTR = 'M',
+ NPY_TIMEDELTALTR = 'm',
+ NPY_CHARLTR = 'c',
+
+ /*
+ * No Descriptor, just a define -- this let's
+ * Python users specify an array of integers
+ * large enough to hold a pointer on the
+ * platform
+ */
+ NPY_INTPLTR = 'p',
+ NPY_UINTPLTR = 'P',
+
+ /*
+ * These are for dtype 'kinds', not dtype 'typecodes'
+ * as the above are for.
+ */
+ NPY_GENBOOLLTR ='b',
+ NPY_SIGNEDLTR = 'i',
+ NPY_UNSIGNEDLTR = 'u',
+ NPY_FLOATINGLTR = 'f',
+ NPY_COMPLEXLTR = 'c'
+};
+
+/*
+ * Changing this may break Numpy API compatibility
+ * due to changing offsets in PyArray_ArrFuncs, so be
+ * careful. Here we have reused the mergesort slot for
+ * any kind of stable sort, the actual implementation will
+ * depend on the data type.
+ */
+typedef enum {
+ NPY_QUICKSORT=0,
+ NPY_HEAPSORT=1,
+ NPY_MERGESORT=2,
+ NPY_STABLESORT=2,
+} NPY_SORTKIND;
+#define NPY_NSORTS (NPY_STABLESORT + 1)
+
+
+typedef enum {
+ NPY_INTROSELECT=0
+} NPY_SELECTKIND;
+#define NPY_NSELECTS (NPY_INTROSELECT + 1)
+
+
+typedef enum {
+ NPY_SEARCHLEFT=0,
+ NPY_SEARCHRIGHT=1
+} NPY_SEARCHSIDE;
+#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1)
+
+
+typedef enum {
+ NPY_NOSCALAR=-1,
+ NPY_BOOL_SCALAR,
+ NPY_INTPOS_SCALAR,
+ NPY_INTNEG_SCALAR,
+ NPY_FLOAT_SCALAR,
+ NPY_COMPLEX_SCALAR,
+ NPY_OBJECT_SCALAR
+} NPY_SCALARKIND;
+#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1)
+
+/* For specifying array memory layout or iteration order */
+typedef enum {
+ /* Fortran order if inputs are all Fortran, C otherwise */
+ NPY_ANYORDER=-1,
+ /* C order */
+ NPY_CORDER=0,
+ /* Fortran order */
+ NPY_FORTRANORDER=1,
+ /* An order as close to the inputs as possible */
+ NPY_KEEPORDER=2
+} NPY_ORDER;
+
+/* For specifying allowed casting in operations which support it */
+typedef enum {
+ _NPY_ERROR_OCCURRED_IN_CAST = -1,
+ /* Only allow identical types */
+ NPY_NO_CASTING=0,
+ /* Allow identical and byte swapped types */
+ NPY_EQUIV_CASTING=1,
+ /* Only allow safe casts */
+ NPY_SAFE_CASTING=2,
+ /* Allow safe casts or casts within the same kind */
+ NPY_SAME_KIND_CASTING=3,
+ /* Allow any casts */
+ NPY_UNSAFE_CASTING=4,
+} NPY_CASTING;
+
+typedef enum {
+ NPY_CLIP=0,
+ NPY_WRAP=1,
+ NPY_RAISE=2
+} NPY_CLIPMODE;
+
+typedef enum {
+ NPY_VALID=0,
+ NPY_SAME=1,
+ NPY_FULL=2
+} NPY_CORRELATEMODE;
+
+/* The special not-a-time (NaT) value */
+#define NPY_DATETIME_NAT NPY_MIN_INT64
+
+/*
+ * Upper bound on the length of a DATETIME ISO 8601 string
+ * YEAR: 21 (64-bit year)
+ * MONTH: 3
+ * DAY: 3
+ * HOURS: 3
+ * MINUTES: 3
+ * SECONDS: 3
+ * ATTOSECONDS: 1 + 3*6
+ * TIMEZONE: 5
+ * NULL TERMINATOR: 1
+ */
+#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1)
+
+/* The FR in the unit names stands for frequency */
+typedef enum {
+ /* Force signed enum type, must be -1 for code compatibility */
+ NPY_FR_ERROR = -1, /* error or undetermined */
+
+ /* Start of valid units */
+ NPY_FR_Y = 0, /* Years */
+ NPY_FR_M = 1, /* Months */
+ NPY_FR_W = 2, /* Weeks */
+ /* Gap where 1.6 NPY_FR_B (value 3) was */
+ NPY_FR_D = 4, /* Days */
+ NPY_FR_h = 5, /* hours */
+ NPY_FR_m = 6, /* minutes */
+ NPY_FR_s = 7, /* seconds */
+ NPY_FR_ms = 8, /* milliseconds */
+ NPY_FR_us = 9, /* microseconds */
+ NPY_FR_ns = 10, /* nanoseconds */
+ NPY_FR_ps = 11, /* picoseconds */
+ NPY_FR_fs = 12, /* femtoseconds */
+ NPY_FR_as = 13, /* attoseconds */
+ NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */
+} NPY_DATETIMEUNIT;
+
+/*
+ * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS
+ * is technically one more than the actual number of units.
+ */
+#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1)
+#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC
+
+/*
+ * Business day conventions for mapping invalid business
+ * days to valid business days.
+ */
+typedef enum {
+ /* Go forward in time to the following business day. */
+ NPY_BUSDAY_FORWARD,
+ NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD,
+ /* Go backward in time to the preceding business day. */
+ NPY_BUSDAY_BACKWARD,
+ NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD,
+ /*
+ * Go forward in time to the following business day, unless it
+ * crosses a month boundary, in which case go backward
+ */
+ NPY_BUSDAY_MODIFIEDFOLLOWING,
+ /*
+ * Go backward in time to the preceding business day, unless it
+ * crosses a month boundary, in which case go forward.
+ */
+ NPY_BUSDAY_MODIFIEDPRECEDING,
+ /* Produce a NaT for non-business days. */
+ NPY_BUSDAY_NAT,
+ /* Raise an exception for non-business days. */
+ NPY_BUSDAY_RAISE
+} NPY_BUSDAY_ROLL;
+
+/************************************************************
+ * NumPy Auxiliary Data for inner loops, sort functions, etc.
+ ************************************************************/
+
+/*
+ * When creating an auxiliary data struct, this should always appear
+ * as the first member, like this:
+ *
+ * typedef struct {
+ * NpyAuxData base;
+ * double constant;
+ * } constant_multiplier_aux_data;
+ */
+typedef struct NpyAuxData_tag NpyAuxData;
+
+/* Function pointers for freeing or cloning auxiliary data */
+typedef void (NpyAuxData_FreeFunc) (NpyAuxData *);
+typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *);
+
+struct NpyAuxData_tag {
+ NpyAuxData_FreeFunc *free;
+ NpyAuxData_CloneFunc *clone;
+ /* To allow for a bit of expansion without breaking the ABI */
+ void *reserved[2];
+};
+
+/* Macros to use for freeing and cloning auxiliary data */
+#define NPY_AUXDATA_FREE(auxdata) \
+ do { \
+ if ((auxdata) != NULL) { \
+ (auxdata)->free(auxdata); \
+ } \
+ } while(0)
+#define NPY_AUXDATA_CLONE(auxdata) \
+ ((auxdata)->clone(auxdata))
+
+#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr);
+#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr);
+
+/*
+* Macros to define how array, and dimension/strides data is
+* allocated. These should be made private
+*/
+
+#define NPY_USE_PYMEM 1
+
+
+#if NPY_USE_PYMEM == 1
+/* use the Raw versions which are safe to call with the GIL released */
+#define PyArray_malloc PyMem_RawMalloc
+#define PyArray_free PyMem_RawFree
+#define PyArray_realloc PyMem_RawRealloc
+#else
+#define PyArray_malloc malloc
+#define PyArray_free free
+#define PyArray_realloc realloc
+#endif
+
+/* Dimensions and strides */
+#define PyDimMem_NEW(size) \
+ ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp)))
+
+#define PyDimMem_FREE(ptr) PyArray_free(ptr)
+
+#define PyDimMem_RENEW(ptr,size) \
+ ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp)))
+
+/* forward declaration */
+struct _PyArray_Descr;
+
+/* These must deal with unaligned and swapped data if necessary */
+typedef PyObject * (PyArray_GetItemFunc) (void *, void *);
+typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *);
+
+typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp,
+ npy_intp, int, void *);
+
+typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *);
+typedef npy_bool (PyArray_NonzeroFunc)(void *, void *);
+
+
+/*
+ * These assume aligned and notswapped data -- a buffer will be used
+ * before or contiguous data will be obtained
+ */
+
+typedef int (PyArray_CompareFunc)(const void *, const void *, void *);
+typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *);
+
+typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *,
+ npy_intp, void *);
+
+typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *,
+ void *);
+
+/*
+ * XXX the ignore argument should be removed next time the API version
+ * is bumped. It used to be the separator.
+ */
+typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr,
+ char *ignore, struct _PyArray_Descr *);
+typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr,
+ struct _PyArray_Descr *);
+
+typedef int (PyArray_FillFunc)(void *, npy_intp, void *);
+
+typedef int (PyArray_SortFunc)(void *, npy_intp, void *);
+typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *);
+typedef int (PyArray_PartitionFunc)(void *, npy_intp, npy_intp,
+ npy_intp *, npy_intp *,
+ void *);
+typedef int (PyArray_ArgPartitionFunc)(void *, npy_intp *, npy_intp, npy_intp,
+ npy_intp *, npy_intp *,
+ void *);
+
+typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *);
+
+typedef int (PyArray_ScalarKindFunc)(void *);
+
+typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min,
+ void *max, void *out);
+typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in,
+ void *values, npy_intp nv);
+typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray,
+ npy_intp nindarray, npy_intp n_outer,
+ npy_intp m_middle, npy_intp nelem,
+ NPY_CLIPMODE clipmode);
+
+typedef struct {
+ npy_intp *ptr;
+ int len;
+} PyArray_Dims;
+
+typedef struct {
+ /*
+ * Functions to cast to most other standard types
+ * Can have some NULL entries. The types
+ * DATETIME, TIMEDELTA, and HALF go into the castdict
+ * even though they are built-in.
+ */
+ PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE];
+
+ /* The next four functions *cannot* be NULL */
+
+ /*
+ * Functions to get and set items with standard Python types
+ * -- not array scalars
+ */
+ PyArray_GetItemFunc *getitem;
+ PyArray_SetItemFunc *setitem;
+
+ /*
+ * Copy and/or swap data. Memory areas may not overlap
+ * Use memmove first if they might
+ */
+ PyArray_CopySwapNFunc *copyswapn;
+ PyArray_CopySwapFunc *copyswap;
+
+ /*
+ * Function to compare items
+ * Can be NULL
+ */
+ PyArray_CompareFunc *compare;
+
+ /*
+ * Function to select largest
+ * Can be NULL
+ */
+ PyArray_ArgFunc *argmax;
+
+ /*
+ * Function to compute dot product
+ * Can be NULL
+ */
+ PyArray_DotFunc *dotfunc;
+
+ /*
+ * Function to scan an ASCII file and
+ * place a single value plus possible separator
+ * Can be NULL
+ */
+ PyArray_ScanFunc *scanfunc;
+
+ /*
+ * Function to read a single value from a string
+ * and adjust the pointer; Can be NULL
+ */
+ PyArray_FromStrFunc *fromstr;
+
+ /*
+ * Function to determine if data is zero or not
+ * If NULL a default version is
+ * used at Registration time.
+ */
+ PyArray_NonzeroFunc *nonzero;
+
+ /*
+ * Used for arange. Should return 0 on success
+ * and -1 on failure.
+ * Can be NULL.
+ */
+ PyArray_FillFunc *fill;
+
+ /*
+ * Function to fill arrays with scalar values
+ * Can be NULL
+ */
+ PyArray_FillWithScalarFunc *fillwithscalar;
+
+ /*
+ * Sorting functions
+ * Can be NULL
+ */
+ PyArray_SortFunc *sort[NPY_NSORTS];
+ PyArray_ArgSortFunc *argsort[NPY_NSORTS];
+
+ /*
+ * Dictionary of additional casting functions
+ * PyArray_VectorUnaryFuncs
+ * which can be populated to support casting
+ * to other registered types. Can be NULL
+ */
+ PyObject *castdict;
+
+ /*
+ * Functions useful for generalizing
+ * the casting rules.
+ * Can be NULL;
+ */
+ PyArray_ScalarKindFunc *scalarkind;
+ int **cancastscalarkindto;
+ int *cancastto;
+
+ PyArray_FastClipFunc *fastclip;
+ PyArray_FastPutmaskFunc *fastputmask;
+ PyArray_FastTakeFunc *fasttake;
+
+ /*
+ * Function to select smallest
+ * Can be NULL
+ */
+ PyArray_ArgFunc *argmin;
+
+} PyArray_ArrFuncs;
+
+/* The item must be reference counted when it is inserted or extracted. */
+#define NPY_ITEM_REFCOUNT 0x01
+/* Same as needing REFCOUNT */
+#define NPY_ITEM_HASOBJECT 0x01
+/* Convert to list for pickling */
+#define NPY_LIST_PICKLE 0x02
+/* The item is a POINTER */
+#define NPY_ITEM_IS_POINTER 0x04
+/* memory needs to be initialized for this data-type */
+#define NPY_NEEDS_INIT 0x08
+/* operations need Python C-API so don't give-up thread. */
+#define NPY_NEEDS_PYAPI 0x10
+/* Use f.getitem when extracting elements of this data-type */
+#define NPY_USE_GETITEM 0x20
+/* Use f.setitem when setting creating 0-d array from this data-type.*/
+#define NPY_USE_SETITEM 0x40
+/* A sticky flag specifically for structured arrays */
+#define NPY_ALIGNED_STRUCT 0x80
+
+/*
+ *These are inherited for global data-type if any data-types in the
+ * field have them
+ */
+#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \
+ NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI)
+
+#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \
+ NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \
+ NPY_NEEDS_INIT | NPY_NEEDS_PYAPI)
+
+#define PyDataType_FLAGCHK(dtype, flag) \
+ (((dtype)->flags & (flag)) == (flag))
+
+#define PyDataType_REFCHK(dtype) \
+ PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)
+
+typedef struct _PyArray_Descr {
+ PyObject_HEAD
+ /*
+ * the type object representing an
+ * instance of this type -- should not
+ * be two type_numbers with the same type
+ * object.
+ */
+ PyTypeObject *typeobj;
+ /* kind for this type */
+ char kind;
+ /* unique-character representing this type */
+ char type;
+ /*
+ * '>' (big), '<' (little), '|'
+ * (not-applicable), or '=' (native).
+ */
+ char byteorder;
+ /* flags describing data type */
+ char flags;
+ /* number representing this type */
+ int type_num;
+ /* element size (itemsize) for this type */
+ int elsize;
+ /* alignment needed for this type */
+ int alignment;
+ /*
+ * Non-NULL if this type is
+ * is an array (C-contiguous)
+ * of some other type
+ */
+ struct _arr_descr *subarray;
+ /*
+ * The fields dictionary for this type
+ * For statically defined descr this
+ * is always Py_None
+ */
+ PyObject *fields;
+ /*
+ * An ordered tuple of field names or NULL
+ * if no fields are defined
+ */
+ PyObject *names;
+ /*
+ * a table of functions specific for each
+ * basic data descriptor
+ */
+ PyArray_ArrFuncs *f;
+ /* Metadata about this dtype */
+ PyObject *metadata;
+ /*
+ * Metadata specific to the C implementation
+ * of the particular dtype. This was added
+ * for NumPy 1.7.0.
+ */
+ NpyAuxData *c_metadata;
+ /* Cached hash value (-1 if not yet computed).
+ * This was added for NumPy 2.0.0.
+ */
+ npy_hash_t hash;
+} PyArray_Descr;
+
+typedef struct _arr_descr {
+ PyArray_Descr *base;
+ PyObject *shape; /* a tuple */
+} PyArray_ArrayDescr;
+
+/*
+ * Memory handler structure for array data.
+ */
+/* The declaration of free differs from PyMemAllocatorEx */
+typedef struct {
+ void *ctx;
+ void* (*malloc) (void *ctx, size_t size);
+ void* (*calloc) (void *ctx, size_t nelem, size_t elsize);
+ void* (*realloc) (void *ctx, void *ptr, size_t new_size);
+ void (*free) (void *ctx, void *ptr, size_t size);
+ /*
+ * This is the end of the version=1 struct. Only add new fields after
+ * this line
+ */
+} PyDataMemAllocator;
+
+typedef struct {
+ char name[127]; /* multiple of 64 to keep the struct aligned */
+ uint8_t version; /* currently 1 */
+ PyDataMemAllocator allocator;
+} PyDataMem_Handler;
+
+
+/*
+ * The main array object structure.
+ *
+ * It has been recommended to use the inline functions defined below
+ * (PyArray_DATA and friends) to access fields here for a number of
+ * releases. Direct access to the members themselves is deprecated.
+ * To ensure that your code does not use deprecated access,
+ * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+ * (or NPY_1_8_API_VERSION or higher as required).
+ */
+/* This struct will be moved to a private header in a future release */
+typedef struct tagPyArrayObject_fields {
+ PyObject_HEAD
+ /* Pointer to the raw data buffer */
+ char *data;
+ /* The number of dimensions, also called 'ndim' */
+ int nd;
+ /* The size in each dimension, also called 'shape' */
+ npy_intp *dimensions;
+ /*
+ * Number of bytes to jump to get to the
+ * next element in each dimension
+ */
+ npy_intp *strides;
+ /*
+ * This object is decref'd upon
+ * deletion of array. Except in the
+ * case of WRITEBACKIFCOPY which has
+ * special handling.
+ *
+ * For views it points to the original
+ * array, collapsed so no chains of
+ * views occur.
+ *
+ * For creation from buffer object it
+ * points to an object that should be
+ * decref'd on deletion
+ *
+ * For WRITEBACKIFCOPY flag this is an
+ * array to-be-updated upon calling
+ * PyArray_ResolveWritebackIfCopy
+ */
+ PyObject *base;
+ /* Pointer to type structure */
+ PyArray_Descr *descr;
+ /* Flags describing array -- see below */
+ int flags;
+ /* For weak references */
+ PyObject *weakreflist;
+ void *_buffer_info; /* private buffer info, tagged to allow warning */
+ /*
+ * For malloc/calloc/realloc/free per object
+ */
+ PyObject *mem_handler;
+} PyArrayObject_fields;
+
+/*
+ * To hide the implementation details, we only expose
+ * the Python struct HEAD.
+ */
+#if !defined(NPY_NO_DEPRECATED_API) || \
+ (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION)
+/*
+ * Can't put this in npy_deprecated_api.h like the others.
+ * PyArrayObject field access is deprecated as of NumPy 1.7.
+ */
+typedef PyArrayObject_fields PyArrayObject;
+#else
+typedef struct tagPyArrayObject {
+ PyObject_HEAD
+} PyArrayObject;
+#endif
+
+/*
+ * Removed 2020-Nov-25, NumPy 1.20
+ * #define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields))
+ *
+ * The above macro was removed as it gave a false sense of a stable ABI
+ * with respect to the structures size. If you require a runtime constant,
+ * you can use `PyArray_Type.tp_basicsize` instead. Otherwise, please
+ * see the PyArrayObject documentation or ask the NumPy developers for
+ * information on how to correctly replace the macro in a way that is
+ * compatible with multiple NumPy versions.
+ */
+
+
+/* Array Flags Object */
+typedef struct PyArrayFlagsObject {
+ PyObject_HEAD
+ PyObject *arr;
+ int flags;
+} PyArrayFlagsObject;
+
+/* Mirrors buffer object to ptr */
+
+typedef struct {
+ PyObject_HEAD
+ PyObject *base;
+ void *ptr;
+ npy_intp len;
+ int flags;
+} PyArray_Chunk;
+
+typedef struct {
+ NPY_DATETIMEUNIT base;
+ int num;
+} PyArray_DatetimeMetaData;
+
+typedef struct {
+ NpyAuxData base;
+ PyArray_DatetimeMetaData meta;
+} PyArray_DatetimeDTypeMetaData;
+
+/*
+ * This structure contains an exploded view of a date-time value.
+ * NaT is represented by year == NPY_DATETIME_NAT.
+ */
+typedef struct {
+ npy_int64 year;
+ npy_int32 month, day, hour, min, sec, us, ps, as;
+} npy_datetimestruct;
+
+/* This is not used internally. */
+typedef struct {
+ npy_int64 day;
+ npy_int32 sec, us, ps, as;
+} npy_timedeltastruct;
+
+typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
+
+/*
+ * Means c-style contiguous (last index varies the fastest). The data
+ * elements right after each other.
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_C_CONTIGUOUS 0x0001
+
+/*
+ * Set if array is a contiguous Fortran array: the first index varies
+ * the fastest in memory (strides array is reverse of C-contiguous
+ * array)
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_F_CONTIGUOUS 0x0002
+
+/*
+ * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a
+ * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with
+ * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS
+ * at the same time if they have either zero or one element.
+ * A higher dimensional array always has the same contiguity flags as
+ * `array.squeeze()`; dimensions with `array.shape[dimension] == 1` are
+ * effectively ignored when checking for contiguity.
+ */
+
+/*
+ * If set, the array owns the data: it will be free'd when the array
+ * is deleted.
+ *
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_OWNDATA 0x0004
+
+/*
+ * An array never has the next four set; they're only used as parameter
+ * flags to the various FromAny functions
+ *
+ * This flag may be requested in constructor functions.
+ */
+
+/* Cause a cast to occur regardless of whether or not it is safe. */
+#define NPY_ARRAY_FORCECAST 0x0010
+
+/*
+ * Always copy the array. Returned arrays are always CONTIGUOUS,
+ * ALIGNED, and WRITEABLE. See also: NPY_ARRAY_ENSURENOCOPY = 0x4000.
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ENSURECOPY 0x0020
+
+/*
+ * Make sure the returned array is a base-class ndarray
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ENSUREARRAY 0x0040
+
+/*
+ * Make sure that the strides are in units of the element size Needed
+ * for some operations with record-arrays.
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ELEMENTSTRIDES 0x0080
+
+/*
+ * Array data is aligned on the appropriate memory address for the type
+ * stored according to how the compiler would align things (e.g., an
+ * array of integers (4 bytes each) starts on a memory address that's
+ * a multiple of 4)
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_ALIGNED 0x0100
+
+/*
+ * Array data has the native endianness
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_NOTSWAPPED 0x0200
+
+/*
+ * Array data is writeable
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_WRITEABLE 0x0400
+
+/*
+ * If this flag is set, then base contains a pointer to an array of
+ * the same size that should be updated with the current contents of
+ * this array when PyArray_ResolveWritebackIfCopy is called.
+ *
+ * This flag may be requested in constructor functions.
+ * This flag may be tested for in PyArray_FLAGS(arr).
+ */
+#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000
+
+/*
+ * No copy may be made while converting from an object/array (result is a view)
+ *
+ * This flag may be requested in constructor functions.
+ */
+#define NPY_ARRAY_ENSURENOCOPY 0x4000
+
+/*
+ * NOTE: there are also internal flags defined in multiarray/arrayobject.h,
+ * which start at bit 31 and work down.
+ */
+
+#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \
+ NPY_ARRAY_WRITEABLE)
+#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \
+ NPY_ARRAY_WRITEABLE | \
+ NPY_ARRAY_NOTSWAPPED)
+#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \
+ NPY_ARRAY_BEHAVED)
+#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \
+ NPY_ARRAY_ALIGNED)
+#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \
+ NPY_ARRAY_BEHAVED)
+#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \
+ NPY_ARRAY_ALIGNED)
+#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY)
+#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO)
+#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY)
+#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY)
+#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \
+ NPY_ARRAY_WRITEBACKIFCOPY)
+#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO)
+#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY)
+#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY)
+#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \
+ NPY_ARRAY_WRITEBACKIFCOPY)
+
+#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \
+ NPY_ARRAY_F_CONTIGUOUS | \
+ NPY_ARRAY_ALIGNED)
+
+/* This flag is for the array interface, not PyArrayObject */
+#define NPY_ARR_HAS_DESCR 0x0800
+
+
+
+
+/*
+ * Size of internal buffers used for alignment Make BUFSIZE a multiple
+ * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned
+ */
+#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble))
+#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000)
+#define NPY_BUFSIZE 8192
+/* buffer stress test size: */
+/*#define NPY_BUFSIZE 17*/
+
+#define PyArray_MAX(a,b) (((a)>(b))?(a):(b))
+#define PyArray_MIN(a,b) (((a)<(b))?(a):(b))
+#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \
+ ((p).real < (q).real)))
+#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \
+ ((p).real > (q).real)))
+#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \
+ ((p).real <= (q).real)))
+#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \
+ ((p).real >= (q).real)))
+#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag))
+#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag))
+
+/*
+ * C API: consists of Macros and functions. The MACROS are defined
+ * here.
+ */
+
+
+#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS)
+#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS((m), NPY_ARRAY_WRITEABLE)
+#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS((m), NPY_ARRAY_ALIGNED)
+
+#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS)
+#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_F_CONTIGUOUS)
+
+/* the variable is used in some places, so always define it */
+#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL;
+#if NPY_ALLOW_THREADS
+#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS
+#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS
+#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0);
+#define NPY_END_THREADS do { if (_save) \
+ { PyEval_RestoreThread(_save); _save = NULL;} } while (0);
+#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if ((loop_size) > 500) \
+ { _save = PyEval_SaveThread();} } while (0);
+
+#define NPY_BEGIN_THREADS_DESCR(dtype) \
+ do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \
+ NPY_BEGIN_THREADS;} while (0);
+
+#define NPY_END_THREADS_DESCR(dtype) \
+ do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \
+ NPY_END_THREADS; } while (0);
+
+#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__;
+#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0);
+#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0);
+#else
+#define NPY_BEGIN_ALLOW_THREADS
+#define NPY_END_ALLOW_THREADS
+#define NPY_BEGIN_THREADS
+#define NPY_END_THREADS
+#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size)
+#define NPY_BEGIN_THREADS_DESCR(dtype)
+#define NPY_END_THREADS_DESCR(dtype)
+#define NPY_ALLOW_C_API_DEF
+#define NPY_ALLOW_C_API
+#define NPY_DISABLE_C_API
+#endif
+
+/**********************************
+ * The nditer object, added in 1.6
+ **********************************/
+
+/* The actual structure of the iterator is an internal detail */
+typedef struct NpyIter_InternalOnly NpyIter;
+
+/* Iterator function pointers that may be specialized */
+typedef int (NpyIter_IterNextFunc)(NpyIter *iter);
+typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter,
+ npy_intp *outcoords);
+
+/*** Global flags that may be passed to the iterator constructors ***/
+
+/* Track an index representing C order */
+#define NPY_ITER_C_INDEX 0x00000001
+/* Track an index representing Fortran order */
+#define NPY_ITER_F_INDEX 0x00000002
+/* Track a multi-index */
+#define NPY_ITER_MULTI_INDEX 0x00000004
+/* User code external to the iterator does the 1-dimensional innermost loop */
+#define NPY_ITER_EXTERNAL_LOOP 0x00000008
+/* Convert all the operands to a common data type */
+#define NPY_ITER_COMMON_DTYPE 0x00000010
+/* Operands may hold references, requiring API access during iteration */
+#define NPY_ITER_REFS_OK 0x00000020
+/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */
+#define NPY_ITER_ZEROSIZE_OK 0x00000040
+/* Permits reductions (size-0 stride with dimension size > 1) */
+#define NPY_ITER_REDUCE_OK 0x00000080
+/* Enables sub-range iteration */
+#define NPY_ITER_RANGED 0x00000100
+/* Enables buffering */
+#define NPY_ITER_BUFFERED 0x00000200
+/* When buffering is enabled, grows the inner loop if possible */
+#define NPY_ITER_GROWINNER 0x00000400
+/* Delay allocation of buffers until first Reset* call */
+#define NPY_ITER_DELAY_BUFALLOC 0x00000800
+/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */
+#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000
+/*
+ * If output operands overlap with other operands (based on heuristics that
+ * has false positives but no false negatives), make temporary copies to
+ * eliminate overlap.
+ */
+#define NPY_ITER_COPY_IF_OVERLAP 0x00002000
+
+/*** Per-operand flags that may be passed to the iterator constructors ***/
+
+/* The operand will be read from and written to */
+#define NPY_ITER_READWRITE 0x00010000
+/* The operand will only be read from */
+#define NPY_ITER_READONLY 0x00020000
+/* The operand will only be written to */
+#define NPY_ITER_WRITEONLY 0x00040000
+/* The operand's data must be in native byte order */
+#define NPY_ITER_NBO 0x00080000
+/* The operand's data must be aligned */
+#define NPY_ITER_ALIGNED 0x00100000
+/* The operand's data must be contiguous (within the inner loop) */
+#define NPY_ITER_CONTIG 0x00200000
+/* The operand may be copied to satisfy requirements */
+#define NPY_ITER_COPY 0x00400000
+/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */
+#define NPY_ITER_UPDATEIFCOPY 0x00800000
+/* Allocate the operand if it is NULL */
+#define NPY_ITER_ALLOCATE 0x01000000
+/* If an operand is allocated, don't use any subtype */
+#define NPY_ITER_NO_SUBTYPE 0x02000000
+/* This is a virtual array slot, operand is NULL but temporary data is there */
+#define NPY_ITER_VIRTUAL 0x04000000
+/* Require that the dimension match the iterator dimensions exactly */
+#define NPY_ITER_NO_BROADCAST 0x08000000
+/* A mask is being used on this array, affects buffer -> array copy */
+#define NPY_ITER_WRITEMASKED 0x10000000
+/* This array is the mask for all WRITEMASKED operands */
+#define NPY_ITER_ARRAYMASK 0x20000000
+/* Assume iterator order data access for COPY_IF_OVERLAP */
+#define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000
+
+#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff
+#define NPY_ITER_PER_OP_FLAGS 0xffff0000
+
+
+/*****************************
+ * Basic iterator object
+ *****************************/
+
+/* FWD declaration */
+typedef struct PyArrayIterObject_tag PyArrayIterObject;
+
+/*
+ * type of the function which translates a set of coordinates to a
+ * pointer to the data
+ */
+typedef char* (*npy_iter_get_dataptr_t)(
+ PyArrayIterObject* iter, const npy_intp*);
+
+struct PyArrayIterObject_tag {
+ PyObject_HEAD
+ int nd_m1; /* number of dimensions - 1 */
+ npy_intp index, size;
+ npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */
+ npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */
+ npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */
+ npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */
+ npy_intp factors[NPY_MAXDIMS]; /* shape factors */
+ PyArrayObject *ao;
+ char *dataptr; /* pointer to current item*/
+ npy_bool contiguous;
+
+ npy_intp bounds[NPY_MAXDIMS][2];
+ npy_intp limits[NPY_MAXDIMS][2];
+ npy_intp limits_sizes[NPY_MAXDIMS];
+ npy_iter_get_dataptr_t translate;
+} ;
+
+
+/* Iterator API */
+#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type)
+
+#define _PyAIT(it) ((PyArrayIterObject *)(it))
+#define PyArray_ITER_RESET(it) do { \
+ _PyAIT(it)->index = 0; \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
+ memset(_PyAIT(it)->coordinates, 0, \
+ (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \
+} while (0)
+
+#define _PyArray_ITER_NEXT1(it) do { \
+ (it)->dataptr += _PyAIT(it)->strides[0]; \
+ (it)->coordinates[0]++; \
+} while (0)
+
+#define _PyArray_ITER_NEXT2(it) do { \
+ if ((it)->coordinates[1] < (it)->dims_m1[1]) { \
+ (it)->coordinates[1]++; \
+ (it)->dataptr += (it)->strides[1]; \
+ } \
+ else { \
+ (it)->coordinates[1] = 0; \
+ (it)->coordinates[0]++; \
+ (it)->dataptr += (it)->strides[0] - \
+ (it)->backstrides[1]; \
+ } \
+} while (0)
+
+#define PyArray_ITER_NEXT(it) do { \
+ _PyAIT(it)->index++; \
+ if (_PyAIT(it)->nd_m1 == 0) { \
+ _PyArray_ITER_NEXT1(_PyAIT(it)); \
+ } \
+ else if (_PyAIT(it)->contiguous) \
+ _PyAIT(it)->dataptr += PyArray_DESCR(_PyAIT(it)->ao)->elsize; \
+ else if (_PyAIT(it)->nd_m1 == 1) { \
+ _PyArray_ITER_NEXT2(_PyAIT(it)); \
+ } \
+ else { \
+ int __npy_i; \
+ for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \
+ if (_PyAIT(it)->coordinates[__npy_i] < \
+ _PyAIT(it)->dims_m1[__npy_i]) { \
+ _PyAIT(it)->coordinates[__npy_i]++; \
+ _PyAIT(it)->dataptr += \
+ _PyAIT(it)->strides[__npy_i]; \
+ break; \
+ } \
+ else { \
+ _PyAIT(it)->coordinates[__npy_i] = 0; \
+ _PyAIT(it)->dataptr -= \
+ _PyAIT(it)->backstrides[__npy_i]; \
+ } \
+ } \
+ } \
+} while (0)
+
+#define PyArray_ITER_GOTO(it, destination) do { \
+ int __npy_i; \
+ _PyAIT(it)->index = 0; \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
+ for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \
+ if (destination[__npy_i] < 0) { \
+ destination[__npy_i] += \
+ _PyAIT(it)->dims_m1[__npy_i]+1; \
+ } \
+ _PyAIT(it)->dataptr += destination[__npy_i] * \
+ _PyAIT(it)->strides[__npy_i]; \
+ _PyAIT(it)->coordinates[__npy_i] = \
+ destination[__npy_i]; \
+ _PyAIT(it)->index += destination[__npy_i] * \
+ ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \
+ _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \
+ } \
+} while (0)
+
+#define PyArray_ITER_GOTO1D(it, ind) do { \
+ int __npy_i; \
+ npy_intp __npy_ind = (npy_intp)(ind); \
+ if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \
+ _PyAIT(it)->index = __npy_ind; \
+ if (_PyAIT(it)->nd_m1 == 0) { \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \
+ __npy_ind * _PyAIT(it)->strides[0]; \
+ } \
+ else if (_PyAIT(it)->contiguous) \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \
+ __npy_ind * PyArray_DESCR(_PyAIT(it)->ao)->elsize; \
+ else { \
+ _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \
+ for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \
+ __npy_i++) { \
+ _PyAIT(it)->coordinates[__npy_i] = \
+ (__npy_ind / _PyAIT(it)->factors[__npy_i]); \
+ _PyAIT(it)->dataptr += \
+ (__npy_ind / _PyAIT(it)->factors[__npy_i]) \
+ * _PyAIT(it)->strides[__npy_i]; \
+ __npy_ind %= _PyAIT(it)->factors[__npy_i]; \
+ } \
+ } \
+} while (0)
+
+#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr))
+
+#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size)
+
+
+/*
+ * Any object passed to PyArray_Broadcast must be binary compatible
+ * with this structure.
+ */
+
+typedef struct {
+ PyObject_HEAD
+ int numiter; /* number of iters */
+ npy_intp size; /* broadcasted size */
+ npy_intp index; /* current index */
+ int nd; /* number of dims */
+ npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */
+ PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */
+} PyArrayMultiIterObject;
+
+#define _PyMIT(m) ((PyArrayMultiIterObject *)(m))
+#define PyArray_MultiIter_RESET(multi) do { \
+ int __npy_mi; \
+ _PyMIT(multi)->index = 0; \
+ for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \
+ PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \
+ } \
+} while (0)
+
+#define PyArray_MultiIter_NEXT(multi) do { \
+ int __npy_mi; \
+ _PyMIT(multi)->index++; \
+ for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \
+ PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \
+ } \
+} while (0)
+
+#define PyArray_MultiIter_GOTO(multi, dest) do { \
+ int __npy_mi; \
+ for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \
+ PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \
+ } \
+ _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \
+} while (0)
+
+#define PyArray_MultiIter_GOTO1D(multi, ind) do { \
+ int __npy_mi; \
+ for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \
+ PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \
+ } \
+ _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \
+} while (0)
+
+#define PyArray_MultiIter_DATA(multi, i) \
+ ((void *)(_PyMIT(multi)->iters[i]->dataptr))
+
+#define PyArray_MultiIter_NEXTi(multi, i) \
+ PyArray_ITER_NEXT(_PyMIT(multi)->iters[i])
+
+#define PyArray_MultiIter_NOTDONE(multi) \
+ (_PyMIT(multi)->index < _PyMIT(multi)->size)
+
+/*
+ * Store the information needed for fancy-indexing over an array. The
+ * fields are slightly unordered to keep consec, dataptr and subspace
+ * where they were originally.
+ */
+typedef struct {
+ PyObject_HEAD
+ /*
+ * Multi-iterator portion --- needs to be present in this
+ * order to work with PyArray_Broadcast
+ */
+
+ int numiter; /* number of index-array
+ iterators */
+ npy_intp size; /* size of broadcasted
+ result */
+ npy_intp index; /* current index */
+ int nd; /* number of dims */
+ npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */
+ NpyIter *outer; /* index objects
+ iterator */
+ void *unused[NPY_MAXDIMS - 2];
+ PyArrayObject *array;
+ /* Flat iterator for the indexed array. For compatibility solely. */
+ PyArrayIterObject *ait;
+
+ /*
+ * Subspace array. For binary compatibility (was an iterator,
+ * but only the check for NULL should be used).
+ */
+ PyArrayObject *subspace;
+
+ /*
+ * if subspace iteration, then this is the array of axes in
+ * the underlying array represented by the index objects
+ */
+ int iteraxes[NPY_MAXDIMS];
+ npy_intp fancy_strides[NPY_MAXDIMS];
+
+ /* pointer when all fancy indices are 0 */
+ char *baseoffset;
+
+ /*
+ * after binding consec denotes at which axis the fancy axes
+ * are inserted.
+ */
+ int consec;
+ char *dataptr;
+
+ int nd_fancy;
+ npy_intp fancy_dims[NPY_MAXDIMS];
+
+ /*
+ * Whether the iterator (any of the iterators) requires API. This is
+ * unused by NumPy itself; ArrayMethod flags are more precise.
+ */
+ int needs_api;
+
+ /*
+ * Extra op information.
+ */
+ PyArrayObject *extra_op;
+ PyArray_Descr *extra_op_dtype; /* desired dtype */
+ npy_uint32 *extra_op_flags; /* Iterator flags */
+
+ NpyIter *extra_op_iter;
+ NpyIter_IterNextFunc *extra_op_next;
+ char **extra_op_ptrs;
+
+ /*
+ * Information about the iteration state.
+ */
+ NpyIter_IterNextFunc *outer_next;
+ char **outer_ptrs;
+ npy_intp *outer_strides;
+
+ /*
+ * Information about the subspace iterator.
+ */
+ NpyIter *subspace_iter;
+ NpyIter_IterNextFunc *subspace_next;
+ char **subspace_ptrs;
+ npy_intp *subspace_strides;
+
+ /* Count for the external loop (which ever it is) for API iteration */
+ npy_intp iter_count;
+
+} PyArrayMapIterObject;
+
+enum {
+ NPY_NEIGHBORHOOD_ITER_ZERO_PADDING,
+ NPY_NEIGHBORHOOD_ITER_ONE_PADDING,
+ NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING,
+ NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING,
+ NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING
+};
+
+typedef struct {
+ PyObject_HEAD
+
+ /*
+ * PyArrayIterObject part: keep this in this exact order
+ */
+ int nd_m1; /* number of dimensions - 1 */
+ npy_intp index, size;
+ npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */
+ npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */
+ npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */
+ npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */
+ npy_intp factors[NPY_MAXDIMS]; /* shape factors */
+ PyArrayObject *ao;
+ char *dataptr; /* pointer to current item*/
+ npy_bool contiguous;
+
+ npy_intp bounds[NPY_MAXDIMS][2];
+ npy_intp limits[NPY_MAXDIMS][2];
+ npy_intp limits_sizes[NPY_MAXDIMS];
+ npy_iter_get_dataptr_t translate;
+
+ /*
+ * New members
+ */
+ npy_intp nd;
+
+ /* Dimensions is the dimension of the array */
+ npy_intp dimensions[NPY_MAXDIMS];
+
+ /*
+ * Neighborhood points coordinates are computed relatively to the
+ * point pointed by _internal_iter
+ */
+ PyArrayIterObject* _internal_iter;
+ /*
+ * To keep a reference to the representation of the constant value
+ * for constant padding
+ */
+ char* constant;
+
+ int mode;
+} PyArrayNeighborhoodIterObject;
+
+/*
+ * Neighborhood iterator API
+ */
+
+/* General: those work for any mode */
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter);
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter);
+#if 0
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter);
+#endif
+
+/*
+ * Include inline implementations - functions defined there are not
+ * considered public API
+ */
+#define NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_
+#include "_neighborhood_iterator_imp.h"
+#undef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_
+
+
+
+/* The default array type */
+#define NPY_DEFAULT_TYPE NPY_DOUBLE
+
+/*
+ * All sorts of useful ways to look into a PyArrayObject. It is recommended
+ * to use PyArrayObject * objects instead of always casting from PyObject *,
+ * for improved type checking.
+ *
+ * In many cases here the macro versions of the accessors are deprecated,
+ * but can't be immediately changed to inline functions because the
+ * preexisting macros accept PyObject * and do automatic casts. Inline
+ * functions accepting PyArrayObject * provides for some compile-time
+ * checking of correctness when working with these objects in C.
+ */
+
+#define PyArray_ISONESEGMENT(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \
+ PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS))
+
+#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \
+ (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS)))
+
+#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \
+ NPY_ARRAY_F_CONTIGUOUS : 0))
+
+#if (defined(NPY_NO_DEPRECATED_API) && (NPY_1_7_API_VERSION <= NPY_NO_DEPRECATED_API))
+/*
+ * Changing access macros into functions, to allow for future hiding
+ * of the internal memory layout. This later hiding will allow the 2.x series
+ * to change the internal representation of arrays without affecting
+ * ABI compatibility.
+ */
+
+static NPY_INLINE int
+PyArray_NDIM(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->nd;
+}
+
+static NPY_INLINE void *
+PyArray_DATA(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->data;
+}
+
+static NPY_INLINE char *
+PyArray_BYTES(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->data;
+}
+
+static NPY_INLINE npy_intp *
+PyArray_DIMS(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->dimensions;
+}
+
+static NPY_INLINE npy_intp *
+PyArray_STRIDES(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->strides;
+}
+
+static NPY_INLINE npy_intp
+PyArray_DIM(const PyArrayObject *arr, int idim)
+{
+ return ((PyArrayObject_fields *)arr)->dimensions[idim];
+}
+
+static NPY_INLINE npy_intp
+PyArray_STRIDE(const PyArrayObject *arr, int istride)
+{
+ return ((PyArrayObject_fields *)arr)->strides[istride];
+}
+
+static NPY_INLINE NPY_RETURNS_BORROWED_REF PyObject *
+PyArray_BASE(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->base;
+}
+
+static NPY_INLINE NPY_RETURNS_BORROWED_REF PyArray_Descr *
+PyArray_DESCR(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->descr;
+}
+
+static NPY_INLINE int
+PyArray_FLAGS(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->flags;
+}
+
+static NPY_INLINE npy_intp
+PyArray_ITEMSIZE(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->descr->elsize;
+}
+
+static NPY_INLINE int
+PyArray_TYPE(const PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->descr->type_num;
+}
+
+static NPY_INLINE int
+PyArray_CHKFLAGS(const PyArrayObject *arr, int flags)
+{
+ return (PyArray_FLAGS(arr) & flags) == flags;
+}
+
+static NPY_INLINE PyObject *
+PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr)
+{
+ return ((PyArrayObject_fields *)arr)->descr->f->getitem(
+ (void *)itemptr, (PyArrayObject *)arr);
+}
+
+/*
+ * SETITEM should only be used if it is known that the value is a scalar
+ * and of a type understood by the arrays dtype.
+ * Use `PyArray_Pack` if the value may be of a different dtype.
+ */
+static NPY_INLINE int
+PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v)
+{
+ return ((PyArrayObject_fields *)arr)->descr->f->setitem(v, itemptr, arr);
+}
+
+#else
+
+/* These macros are deprecated as of NumPy 1.7. */
+#define PyArray_NDIM(obj) (((PyArrayObject_fields *)(obj))->nd)
+#define PyArray_BYTES(obj) (((PyArrayObject_fields *)(obj))->data)
+#define PyArray_DATA(obj) ((void *)((PyArrayObject_fields *)(obj))->data)
+#define PyArray_DIMS(obj) (((PyArrayObject_fields *)(obj))->dimensions)
+#define PyArray_STRIDES(obj) (((PyArrayObject_fields *)(obj))->strides)
+#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n])
+#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n])
+#define PyArray_BASE(obj) (((PyArrayObject_fields *)(obj))->base)
+#define PyArray_DESCR(obj) (((PyArrayObject_fields *)(obj))->descr)
+#define PyArray_FLAGS(obj) (((PyArrayObject_fields *)(obj))->flags)
+#define PyArray_CHKFLAGS(m, FLAGS) \
+ ((((PyArrayObject_fields *)(m))->flags & (FLAGS)) == (FLAGS))
+#define PyArray_ITEMSIZE(obj) \
+ (((PyArrayObject_fields *)(obj))->descr->elsize)
+#define PyArray_TYPE(obj) \
+ (((PyArrayObject_fields *)(obj))->descr->type_num)
+#define PyArray_GETITEM(obj,itemptr) \
+ PyArray_DESCR(obj)->f->getitem((char *)(itemptr), \
+ (PyArrayObject *)(obj))
+
+#define PyArray_SETITEM(obj,itemptr,v) \
+ PyArray_DESCR(obj)->f->setitem((PyObject *)(v), \
+ (char *)(itemptr), \
+ (PyArrayObject *)(obj))
+#endif
+
+static NPY_INLINE PyArray_Descr *
+PyArray_DTYPE(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->descr;
+}
+
+static NPY_INLINE npy_intp *
+PyArray_SHAPE(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->dimensions;
+}
+
+/*
+ * Enables the specified array flags. Does no checking,
+ * assumes you know what you're doing.
+ */
+static NPY_INLINE void
+PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags)
+{
+ ((PyArrayObject_fields *)arr)->flags |= flags;
+}
+
+/*
+ * Clears the specified array flags. Does no checking,
+ * assumes you know what you're doing.
+ */
+static NPY_INLINE void
+PyArray_CLEARFLAGS(PyArrayObject *arr, int flags)
+{
+ ((PyArrayObject_fields *)arr)->flags &= ~flags;
+}
+
+static NPY_INLINE NPY_RETURNS_BORROWED_REF PyObject *
+PyArray_HANDLER(PyArrayObject *arr)
+{
+ return ((PyArrayObject_fields *)arr)->mem_handler;
+}
+
+#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL)
+
+#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \
+ ((type) == NPY_USHORT) || \
+ ((type) == NPY_UINT) || \
+ ((type) == NPY_ULONG) || \
+ ((type) == NPY_ULONGLONG))
+
+#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \
+ ((type) == NPY_SHORT) || \
+ ((type) == NPY_INT) || \
+ ((type) == NPY_LONG) || \
+ ((type) == NPY_LONGLONG))
+
+#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \
+ ((type) <= NPY_ULONGLONG))
+
+#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \
+ ((type) <= NPY_LONGDOUBLE)) || \
+ ((type) == NPY_HALF))
+
+#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \
+ ((type) == NPY_HALF))
+
+#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \
+ ((type) == NPY_UNICODE))
+
+#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \
+ ((type) <= NPY_CLONGDOUBLE))
+
+#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \
+ ((type) == NPY_DOUBLE) || \
+ ((type) == NPY_CDOUBLE) || \
+ ((type) == NPY_BOOL) || \
+ ((type) == NPY_OBJECT ))
+
+#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \
+ ((type) <=NPY_VOID))
+
+#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \
+ ((type) <=NPY_TIMEDELTA))
+
+#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \
+ ((type) < NPY_USERDEF+ \
+ NPY_NUMUSERTYPES))
+
+#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \
+ PyTypeNum_ISUSERDEF(type))
+
+#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT)
+
+
+#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num )
+#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num)
+#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL)
+#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL)
+#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \
+ !PyDataType_HASFIELDS(dtype))
+#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0)
+
+#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj))
+#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj))
+#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj))
+#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj))
+#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj))
+#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj))
+#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj))
+#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj))
+#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj))
+#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj))
+#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj))
+#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj))
+#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj))
+#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj))
+#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj))
+
+ /*
+ * FIXME: This should check for a flag on the data-type that
+ * states whether or not it is variable length. Because the
+ * ISFLEXIBLE check is hard-coded to the built-in data-types.
+ */
+#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj))
+
+#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj))
+
+
+#define NPY_LITTLE '<'
+#define NPY_BIG '>'
+#define NPY_NATIVE '='
+#define NPY_SWAP 's'
+#define NPY_IGNORE '|'
+
+#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
+#define NPY_NATBYTE NPY_BIG
+#define NPY_OPPBYTE NPY_LITTLE
+#else
+#define NPY_NATBYTE NPY_LITTLE
+#define NPY_OPPBYTE NPY_BIG
+#endif
+
+#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE)
+#define PyArray_IsNativeByteOrder PyArray_ISNBO
+#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder)
+#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m))
+
+#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \
+ PyArray_ISNOTSWAPPED(m))
+
+#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY)
+#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO)
+#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY)
+#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO)
+#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED)
+#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED)
+
+
+#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder)
+#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d))
+
+/************************************************************
+ * A struct used by PyArray_CreateSortedStridePerm, new in 1.7.
+ ************************************************************/
+
+typedef struct {
+ npy_intp perm, stride;
+} npy_stride_sort_item;
+
+/************************************************************
+ * This is the form of the struct that's stored in the
+ * PyCapsule returned by an array's __array_struct__ attribute. See
+ * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full
+ * documentation.
+ ************************************************************/
+typedef struct {
+ int two; /*
+ * contains the integer 2 as a sanity
+ * check
+ */
+
+ int nd; /* number of dimensions */
+
+ char typekind; /*
+ * kind in array --- character code of
+ * typestr
+ */
+
+ int itemsize; /* size of each element */
+
+ int flags; /*
+ * how should be data interpreted. Valid
+ * flags are CONTIGUOUS (1), F_CONTIGUOUS (2),
+ * ALIGNED (0x100), NOTSWAPPED (0x200), and
+ * WRITEABLE (0x400). ARR_HAS_DESCR (0x800)
+ * states that arrdescr field is present in
+ * structure
+ */
+
+ npy_intp *shape; /*
+ * A length-nd array of shape
+ * information
+ */
+
+ npy_intp *strides; /* A length-nd array of stride information */
+
+ void *data; /* A pointer to the first element of the array */
+
+ PyObject *descr; /*
+ * A list of fields or NULL (ignored if flags
+ * does not have ARR_HAS_DESCR flag set)
+ */
+} PyArrayInterface;
+
+/*
+ * This is a function for hooking into the PyDataMem_NEW/FREE/RENEW functions.
+ * See the documentation for PyDataMem_SetEventHook.
+ */
+typedef void (PyDataMem_EventHookFunc)(void *inp, void *outp, size_t size,
+ void *user_data);
+
+
+/*
+ * PyArray_DTypeMeta related definitions.
+ *
+ * As of now, this API is preliminary and will be extended as necessary.
+ */
+#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
+ /*
+ * The Structures defined in this block are currently considered
+ * private API and may change without warning!
+ * Part of this (at least the size) is expected to be public API without
+ * further modifications.
+ */
+ /* TODO: Make this definition public in the API, as soon as its settled */
+ NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type;
+
+ /*
+ * While NumPy DTypes would not need to be heap types the plan is to
+ * make DTypes available in Python at which point they will be heap types.
+ * Since we also wish to add fields to the DType class, this looks like
+ * a typical instance definition, but with PyHeapTypeObject instead of
+ * only the PyObject_HEAD.
+ * This must only be exposed very extremely careful consideration, since
+ * it is a fairly complex construct which may be better to allow
+ * refactoring of.
+ */
+ typedef struct {
+ PyHeapTypeObject super;
+
+ /*
+ * Most DTypes will have a singleton default instance, for the
+ * parametric legacy DTypes (bytes, string, void, datetime) this
+ * may be a pointer to the *prototype* instance?
+ */
+ PyArray_Descr *singleton;
+ /* Copy of the legacy DTypes type number, usually invalid. */
+ int type_num;
+
+ /* The type object of the scalar instances (may be NULL?) */
+ PyTypeObject *scalar_type;
+ /*
+ * DType flags to signal legacy, parametric, or
+ * abstract. But plenty of space for additional information/flags.
+ */
+ npy_uint64 flags;
+
+ /*
+ * Use indirection in order to allow a fixed size for this struct.
+ * A stable ABI size makes creating a static DType less painful
+ * while also ensuring flexibility for all opaque API (with one
+ * indirection due the pointer lookup).
+ */
+ void *dt_slots;
+ void *reserved[3];
+ } PyArray_DTypeMeta;
+
+#endif /* NPY_INTERNAL_BUILD */
+
+
+/*
+ * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files
+ * npy_*_*_deprecated_api.h are only included from here and nowhere else.
+ */
+#ifdef NPY_DEPRECATED_INCLUDES
+#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES."
+#endif
+#define NPY_DEPRECATED_INCLUDES
+#if !defined(NPY_NO_DEPRECATED_API) || \
+ (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION)
+#include "npy_1_7_deprecated_api.h"
+#endif
+/*
+ * There is no file npy_1_8_deprecated_api.h since there are no additional
+ * deprecated API features in NumPy 1.8.
+ *
+ * Note to maintainers: insert code like the following in future NumPy
+ * versions.
+ *
+ * #if !defined(NPY_NO_DEPRECATED_API) || \
+ * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION)
+ * #include "npy_1_9_deprecated_api.h"
+ * #endif
+ */
+#undef NPY_DEPRECATED_INCLUDES
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/noprefix.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/noprefix.h
new file mode 100644
index 00000000..cea5b0d4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/noprefix.h
@@ -0,0 +1,211 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NOPREFIX_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NOPREFIX_H_
+
+/*
+ * You can directly include noprefix.h as a backward
+ * compatibility measure
+ */
+#ifndef NPY_NO_PREFIX
+#include "ndarrayobject.h"
+#include "npy_interrupt.h"
+#endif
+
+#define SIGSETJMP NPY_SIGSETJMP
+#define SIGLONGJMP NPY_SIGLONGJMP
+#define SIGJMP_BUF NPY_SIGJMP_BUF
+
+#define MAX_DIMS NPY_MAXDIMS
+
+#define longlong npy_longlong
+#define ulonglong npy_ulonglong
+#define Bool npy_bool
+#define longdouble npy_longdouble
+#define byte npy_byte
+
+#ifndef _BSD_SOURCE
+#define ushort npy_ushort
+#define uint npy_uint
+#define ulong npy_ulong
+#endif
+
+#define ubyte npy_ubyte
+#define ushort npy_ushort
+#define uint npy_uint
+#define ulong npy_ulong
+#define cfloat npy_cfloat
+#define cdouble npy_cdouble
+#define clongdouble npy_clongdouble
+#define Int8 npy_int8
+#define UInt8 npy_uint8
+#define Int16 npy_int16
+#define UInt16 npy_uint16
+#define Int32 npy_int32
+#define UInt32 npy_uint32
+#define Int64 npy_int64
+#define UInt64 npy_uint64
+#define Int128 npy_int128
+#define UInt128 npy_uint128
+#define Int256 npy_int256
+#define UInt256 npy_uint256
+#define Float16 npy_float16
+#define Complex32 npy_complex32
+#define Float32 npy_float32
+#define Complex64 npy_complex64
+#define Float64 npy_float64
+#define Complex128 npy_complex128
+#define Float80 npy_float80
+#define Complex160 npy_complex160
+#define Float96 npy_float96
+#define Complex192 npy_complex192
+#define Float128 npy_float128
+#define Complex256 npy_complex256
+#define intp npy_intp
+#define uintp npy_uintp
+#define datetime npy_datetime
+#define timedelta npy_timedelta
+
+#define SIZEOF_LONGLONG NPY_SIZEOF_LONGLONG
+#define SIZEOF_INTP NPY_SIZEOF_INTP
+#define SIZEOF_UINTP NPY_SIZEOF_UINTP
+#define SIZEOF_HALF NPY_SIZEOF_HALF
+#define SIZEOF_LONGDOUBLE NPY_SIZEOF_LONGDOUBLE
+#define SIZEOF_DATETIME NPY_SIZEOF_DATETIME
+#define SIZEOF_TIMEDELTA NPY_SIZEOF_TIMEDELTA
+
+#define LONGLONG_FMT NPY_LONGLONG_FMT
+#define ULONGLONG_FMT NPY_ULONGLONG_FMT
+#define LONGLONG_SUFFIX NPY_LONGLONG_SUFFIX
+#define ULONGLONG_SUFFIX NPY_ULONGLONG_SUFFIX
+
+#define MAX_INT8 127
+#define MIN_INT8 -128
+#define MAX_UINT8 255
+#define MAX_INT16 32767
+#define MIN_INT16 -32768
+#define MAX_UINT16 65535
+#define MAX_INT32 2147483647
+#define MIN_INT32 (-MAX_INT32 - 1)
+#define MAX_UINT32 4294967295U
+#define MAX_INT64 LONGLONG_SUFFIX(9223372036854775807)
+#define MIN_INT64 (-MAX_INT64 - LONGLONG_SUFFIX(1))
+#define MAX_UINT64 ULONGLONG_SUFFIX(18446744073709551615)
+#define MAX_INT128 LONGLONG_SUFFIX(85070591730234615865843651857942052864)
+#define MIN_INT128 (-MAX_INT128 - LONGLONG_SUFFIX(1))
+#define MAX_UINT128 ULONGLONG_SUFFIX(170141183460469231731687303715884105728)
+#define MAX_INT256 LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967)
+#define MIN_INT256 (-MAX_INT256 - LONGLONG_SUFFIX(1))
+#define MAX_UINT256 ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935)
+
+#define MAX_BYTE NPY_MAX_BYTE
+#define MIN_BYTE NPY_MIN_BYTE
+#define MAX_UBYTE NPY_MAX_UBYTE
+#define MAX_SHORT NPY_MAX_SHORT
+#define MIN_SHORT NPY_MIN_SHORT
+#define MAX_USHORT NPY_MAX_USHORT
+#define MAX_INT NPY_MAX_INT
+#define MIN_INT NPY_MIN_INT
+#define MAX_UINT NPY_MAX_UINT
+#define MAX_LONG NPY_MAX_LONG
+#define MIN_LONG NPY_MIN_LONG
+#define MAX_ULONG NPY_MAX_ULONG
+#define MAX_LONGLONG NPY_MAX_LONGLONG
+#define MIN_LONGLONG NPY_MIN_LONGLONG
+#define MAX_ULONGLONG NPY_MAX_ULONGLONG
+#define MIN_DATETIME NPY_MIN_DATETIME
+#define MAX_DATETIME NPY_MAX_DATETIME
+#define MIN_TIMEDELTA NPY_MIN_TIMEDELTA
+#define MAX_TIMEDELTA NPY_MAX_TIMEDELTA
+
+#define BITSOF_BOOL NPY_BITSOF_BOOL
+#define BITSOF_CHAR NPY_BITSOF_CHAR
+#define BITSOF_SHORT NPY_BITSOF_SHORT
+#define BITSOF_INT NPY_BITSOF_INT
+#define BITSOF_LONG NPY_BITSOF_LONG
+#define BITSOF_LONGLONG NPY_BITSOF_LONGLONG
+#define BITSOF_HALF NPY_BITSOF_HALF
+#define BITSOF_FLOAT NPY_BITSOF_FLOAT
+#define BITSOF_DOUBLE NPY_BITSOF_DOUBLE
+#define BITSOF_LONGDOUBLE NPY_BITSOF_LONGDOUBLE
+#define BITSOF_DATETIME NPY_BITSOF_DATETIME
+#define BITSOF_TIMEDELTA NPY_BITSOF_TIMEDELTA
+
+#define _pya_malloc PyArray_malloc
+#define _pya_free PyArray_free
+#define _pya_realloc PyArray_realloc
+
+#define BEGIN_THREADS_DEF NPY_BEGIN_THREADS_DEF
+#define BEGIN_THREADS NPY_BEGIN_THREADS
+#define END_THREADS NPY_END_THREADS
+#define ALLOW_C_API_DEF NPY_ALLOW_C_API_DEF
+#define ALLOW_C_API NPY_ALLOW_C_API
+#define DISABLE_C_API NPY_DISABLE_C_API
+
+#define PY_FAIL NPY_FAIL
+#define PY_SUCCEED NPY_SUCCEED
+
+#ifndef TRUE
+#define TRUE NPY_TRUE
+#endif
+
+#ifndef FALSE
+#define FALSE NPY_FALSE
+#endif
+
+#define LONGDOUBLE_FMT NPY_LONGDOUBLE_FMT
+
+#define CONTIGUOUS NPY_CONTIGUOUS
+#define C_CONTIGUOUS NPY_C_CONTIGUOUS
+#define FORTRAN NPY_FORTRAN
+#define F_CONTIGUOUS NPY_F_CONTIGUOUS
+#define OWNDATA NPY_OWNDATA
+#define FORCECAST NPY_FORCECAST
+#define ENSURECOPY NPY_ENSURECOPY
+#define ENSUREARRAY NPY_ENSUREARRAY
+#define ELEMENTSTRIDES NPY_ELEMENTSTRIDES
+#define ALIGNED NPY_ALIGNED
+#define NOTSWAPPED NPY_NOTSWAPPED
+#define WRITEABLE NPY_WRITEABLE
+#define WRITEBACKIFCOPY NPY_ARRAY_WRITEBACKIFCOPY
+#define ARR_HAS_DESCR NPY_ARR_HAS_DESCR
+#define BEHAVED NPY_BEHAVED
+#define BEHAVED_NS NPY_BEHAVED_NS
+#define CARRAY NPY_CARRAY
+#define CARRAY_RO NPY_CARRAY_RO
+#define FARRAY NPY_FARRAY
+#define FARRAY_RO NPY_FARRAY_RO
+#define DEFAULT NPY_DEFAULT
+#define IN_ARRAY NPY_IN_ARRAY
+#define OUT_ARRAY NPY_OUT_ARRAY
+#define INOUT_ARRAY NPY_INOUT_ARRAY
+#define IN_FARRAY NPY_IN_FARRAY
+#define OUT_FARRAY NPY_OUT_FARRAY
+#define INOUT_FARRAY NPY_INOUT_FARRAY
+#define UPDATE_ALL NPY_UPDATE_ALL
+
+#define OWN_DATA NPY_OWNDATA
+#define BEHAVED_FLAGS NPY_BEHAVED
+#define BEHAVED_FLAGS_NS NPY_BEHAVED_NS
+#define CARRAY_FLAGS_RO NPY_CARRAY_RO
+#define CARRAY_FLAGS NPY_CARRAY
+#define FARRAY_FLAGS NPY_FARRAY
+#define FARRAY_FLAGS_RO NPY_FARRAY_RO
+#define DEFAULT_FLAGS NPY_DEFAULT
+#define UPDATE_ALL_FLAGS NPY_UPDATE_ALL_FLAGS
+
+#ifndef MIN
+#define MIN PyArray_MIN
+#endif
+#ifndef MAX
+#define MAX PyArray_MAX
+#endif
+#define MAX_INTP NPY_MAX_INTP
+#define MIN_INTP NPY_MIN_INTP
+#define MAX_UINTP NPY_MAX_UINTP
+#define INTP_FMT NPY_INTP_FMT
+
+#ifndef PYPY_VERSION
+#define REFCOUNT PyArray_REFCOUNT
+#define MAX_ELSIZE NPY_MAX_ELSIZE
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NOPREFIX_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h
new file mode 100644
index 00000000..6455d40d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h
@@ -0,0 +1,124 @@
+#ifndef NPY_DEPRECATED_INCLUDES
+#error "Should never include npy_*_*_deprecated_api directly."
+#endif
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_
+
+/* Emit a warning if the user did not specifically request the old API */
+#ifndef NPY_NO_DEPRECATED_API
+#if defined(_WIN32)
+#define _WARN___STR2__(x) #x
+#define _WARN___STR1__(x) _WARN___STR2__(x)
+#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: "
+#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \
+ "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION")
+#else
+#warning "Using deprecated NumPy API, disable it with " \
+ "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION"
+#endif
+#endif
+
+/*
+ * This header exists to collect all dangerous/deprecated NumPy API
+ * as of NumPy 1.7.
+ *
+ * This is an attempt to remove bad API, the proliferation of macros,
+ * and namespace pollution currently produced by the NumPy headers.
+ */
+
+/* These array flags are deprecated as of NumPy 1.7 */
+#define NPY_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS
+#define NPY_FORTRAN NPY_ARRAY_F_CONTIGUOUS
+
+/*
+ * The consistent NPY_ARRAY_* names which don't pollute the NPY_*
+ * namespace were added in NumPy 1.7.
+ *
+ * These versions of the carray flags are deprecated, but
+ * probably should only be removed after two releases instead of one.
+ */
+#define NPY_C_CONTIGUOUS NPY_ARRAY_C_CONTIGUOUS
+#define NPY_F_CONTIGUOUS NPY_ARRAY_F_CONTIGUOUS
+#define NPY_OWNDATA NPY_ARRAY_OWNDATA
+#define NPY_FORCECAST NPY_ARRAY_FORCECAST
+#define NPY_ENSURECOPY NPY_ARRAY_ENSURECOPY
+#define NPY_ENSUREARRAY NPY_ARRAY_ENSUREARRAY
+#define NPY_ELEMENTSTRIDES NPY_ARRAY_ELEMENTSTRIDES
+#define NPY_ALIGNED NPY_ARRAY_ALIGNED
+#define NPY_NOTSWAPPED NPY_ARRAY_NOTSWAPPED
+#define NPY_WRITEABLE NPY_ARRAY_WRITEABLE
+#define NPY_BEHAVED NPY_ARRAY_BEHAVED
+#define NPY_BEHAVED_NS NPY_ARRAY_BEHAVED_NS
+#define NPY_CARRAY NPY_ARRAY_CARRAY
+#define NPY_CARRAY_RO NPY_ARRAY_CARRAY_RO
+#define NPY_FARRAY NPY_ARRAY_FARRAY
+#define NPY_FARRAY_RO NPY_ARRAY_FARRAY_RO
+#define NPY_DEFAULT NPY_ARRAY_DEFAULT
+#define NPY_IN_ARRAY NPY_ARRAY_IN_ARRAY
+#define NPY_OUT_ARRAY NPY_ARRAY_OUT_ARRAY
+#define NPY_INOUT_ARRAY NPY_ARRAY_INOUT_ARRAY
+#define NPY_IN_FARRAY NPY_ARRAY_IN_FARRAY
+#define NPY_OUT_FARRAY NPY_ARRAY_OUT_FARRAY
+#define NPY_INOUT_FARRAY NPY_ARRAY_INOUT_FARRAY
+#define NPY_UPDATE_ALL NPY_ARRAY_UPDATE_ALL
+
+/* This way of accessing the default type is deprecated as of NumPy 1.7 */
+#define PyArray_DEFAULT NPY_DEFAULT_TYPE
+
+/* These DATETIME bits aren't used internally */
+#define PyDataType_GetDatetimeMetaData(descr) \
+ ((descr->metadata == NULL) ? NULL : \
+ ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \
+ PyDict_GetItemString( \
+ descr->metadata, NPY_METADATA_DTSTR), NULL))))
+
+/*
+ * Deprecated as of NumPy 1.7, this kind of shortcut doesn't
+ * belong in the public API.
+ */
+#define NPY_AO PyArrayObject
+
+/*
+ * Deprecated as of NumPy 1.7, an all-lowercase macro doesn't
+ * belong in the public API.
+ */
+#define fortran fortran_
+
+/*
+ * Deprecated as of NumPy 1.7, as it is a namespace-polluting
+ * macro.
+ */
+#define FORTRAN_IF PyArray_FORTRAN_IF
+
+/* Deprecated as of NumPy 1.7, datetime64 uses c_metadata instead */
+#define NPY_METADATA_DTSTR "__timeunit__"
+
+/*
+ * Deprecated as of NumPy 1.7.
+ * The reasoning:
+ * - These are for datetime, but there's no datetime "namespace".
+ * - They just turn NPY_STR_<x> into "<x>", which is just
+ * making something simple be indirected.
+ */
+#define NPY_STR_Y "Y"
+#define NPY_STR_M "M"
+#define NPY_STR_W "W"
+#define NPY_STR_D "D"
+#define NPY_STR_h "h"
+#define NPY_STR_m "m"
+#define NPY_STR_s "s"
+#define NPY_STR_ms "ms"
+#define NPY_STR_us "us"
+#define NPY_STR_ns "ns"
+#define NPY_STR_ps "ps"
+#define NPY_STR_fs "fs"
+#define NPY_STR_as "as"
+
+/*
+ * The macros in old_defines.h are Deprecated as of NumPy 1.7 and will be
+ * removed in the next major release.
+ */
+#include "old_defines.h"
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_3kcompat.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_3kcompat.h
new file mode 100644
index 00000000..11cc4776
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_3kcompat.h
@@ -0,0 +1,597 @@
+/*
+ * This is a convenience header file providing compatibility utilities
+ * for supporting different minor versions of Python 3.
+ * It was originally used to support the transition from Python 2,
+ * hence the "3k" naming.
+ *
+ * If you want to use this for your own projects, it's recommended to make a
+ * copy of it. Although the stuff below is unlikely to change, we don't provide
+ * strong backwards compatibility guarantees at the moment.
+ */
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_
+
+#include <Python.h>
+#include <stdio.h>
+
+#ifndef NPY_PY3K
+#define NPY_PY3K 1
+#endif
+
+#include "numpy/npy_common.h"
+#include "numpy/ndarrayobject.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * PyInt -> PyLong
+ */
+
+
+/*
+ * This is a renamed copy of the Python non-limited API function _PyLong_AsInt. It is
+ * included here because it is missing from the PyPy API. It completes the PyLong_As*
+ * group of functions and can be useful in replacing PyInt_Check.
+ */
+static NPY_INLINE int
+Npy__PyLong_AsInt(PyObject *obj)
+{
+ int overflow;
+ long result = PyLong_AsLongAndOverflow(obj, &overflow);
+
+ /* INT_MAX and INT_MIN are defined in Python.h */
+ if (overflow || result > INT_MAX || result < INT_MIN) {
+ /* XXX: could be cute and give a different
+ message for overflow == -1 */
+ PyErr_SetString(PyExc_OverflowError,
+ "Python int too large to convert to C int");
+ return -1;
+ }
+ return (int)result;
+}
+
+
+#if defined(NPY_PY3K)
+/* Return True only if the long fits in a C long */
+static NPY_INLINE int PyInt_Check(PyObject *op) {
+ int overflow = 0;
+ if (!PyLong_Check(op)) {
+ return 0;
+ }
+ PyLong_AsLongAndOverflow(op, &overflow);
+ return (overflow == 0);
+}
+
+
+#define PyInt_FromLong PyLong_FromLong
+#define PyInt_AsLong PyLong_AsLong
+#define PyInt_AS_LONG PyLong_AsLong
+#define PyInt_AsSsize_t PyLong_AsSsize_t
+#define PyNumber_Int PyNumber_Long
+
+/* NOTE:
+ *
+ * Since the PyLong type is very different from the fixed-range PyInt,
+ * we don't define PyInt_Type -> PyLong_Type.
+ */
+#endif /* NPY_PY3K */
+
+/* Py3 changes PySlice_GetIndicesEx' first argument's type to PyObject* */
+#ifdef NPY_PY3K
+# define NpySlice_GetIndicesEx PySlice_GetIndicesEx
+#else
+# define NpySlice_GetIndicesEx(op, nop, start, end, step, slicelength) \
+ PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength)
+#endif
+
+#if PY_VERSION_HEX < 0x030900a4
+ /* Introduced in https://github.com/python/cpython/commit/d2ec81a8c99796b51fb8c49b77a7fe369863226f */
+ #define Py_SET_TYPE(obj, type) ((Py_TYPE(obj) = (type)), (void)0)
+ /* Introduced in https://github.com/python/cpython/commit/b10dc3e7a11fcdb97e285882eba6da92594f90f9 */
+ #define Py_SET_SIZE(obj, size) ((Py_SIZE(obj) = (size)), (void)0)
+ /* Introduced in https://github.com/python/cpython/commit/c86a11221df7e37da389f9c6ce6e47ea22dc44ff */
+ #define Py_SET_REFCNT(obj, refcnt) ((Py_REFCNT(obj) = (refcnt)), (void)0)
+#endif
+
+
+#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
+
+/* Py_SETREF was added in 3.5.2, and only if Py_LIMITED_API is absent */
+#if PY_VERSION_HEX < 0x03050200
+ #define Py_SETREF(op, op2) \
+ do { \
+ PyObject *_py_tmp = (PyObject *)(op); \
+ (op) = (op2); \
+ Py_DECREF(_py_tmp); \
+ } while (0)
+#endif
+
+/* introduced in https://github.com/python/cpython/commit/a24107b04c1277e3c1105f98aff5bfa3a98b33a0 */
+#if PY_VERSION_HEX < 0x030800A3
+ static NPY_INLINE PyObject *
+ _PyDict_GetItemStringWithError(PyObject *v, const char *key)
+ {
+ PyObject *kv, *rv;
+ kv = PyUnicode_FromString(key);
+ if (kv == NULL) {
+ return NULL;
+ }
+ rv = PyDict_GetItemWithError(v, kv);
+ Py_DECREF(kv);
+ return rv;
+ }
+#endif
+
+/*
+ * PyString -> PyBytes
+ */
+
+#if defined(NPY_PY3K)
+
+#define PyString_Type PyBytes_Type
+#define PyString_Check PyBytes_Check
+#define PyStringObject PyBytesObject
+#define PyString_FromString PyBytes_FromString
+#define PyString_FromStringAndSize PyBytes_FromStringAndSize
+#define PyString_AS_STRING PyBytes_AS_STRING
+#define PyString_AsStringAndSize PyBytes_AsStringAndSize
+#define PyString_FromFormat PyBytes_FromFormat
+#define PyString_Concat PyBytes_Concat
+#define PyString_ConcatAndDel PyBytes_ConcatAndDel
+#define PyString_AsString PyBytes_AsString
+#define PyString_GET_SIZE PyBytes_GET_SIZE
+#define PyString_Size PyBytes_Size
+
+#define PyUString_Type PyUnicode_Type
+#define PyUString_Check PyUnicode_Check
+#define PyUStringObject PyUnicodeObject
+#define PyUString_FromString PyUnicode_FromString
+#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize
+#define PyUString_FromFormat PyUnicode_FromFormat
+#define PyUString_Concat PyUnicode_Concat2
+#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel
+#define PyUString_GET_SIZE PyUnicode_GET_SIZE
+#define PyUString_Size PyUnicode_Size
+#define PyUString_InternFromString PyUnicode_InternFromString
+#define PyUString_Format PyUnicode_Format
+
+#define PyBaseString_Check(obj) (PyUnicode_Check(obj))
+
+#else
+
+#define PyBytes_Type PyString_Type
+#define PyBytes_Check PyString_Check
+#define PyBytesObject PyStringObject
+#define PyBytes_FromString PyString_FromString
+#define PyBytes_FromStringAndSize PyString_FromStringAndSize
+#define PyBytes_AS_STRING PyString_AS_STRING
+#define PyBytes_AsStringAndSize PyString_AsStringAndSize
+#define PyBytes_FromFormat PyString_FromFormat
+#define PyBytes_Concat PyString_Concat
+#define PyBytes_ConcatAndDel PyString_ConcatAndDel
+#define PyBytes_AsString PyString_AsString
+#define PyBytes_GET_SIZE PyString_GET_SIZE
+#define PyBytes_Size PyString_Size
+
+#define PyUString_Type PyString_Type
+#define PyUString_Check PyString_Check
+#define PyUStringObject PyStringObject
+#define PyUString_FromString PyString_FromString
+#define PyUString_FromStringAndSize PyString_FromStringAndSize
+#define PyUString_FromFormat PyString_FromFormat
+#define PyUString_Concat PyString_Concat
+#define PyUString_ConcatAndDel PyString_ConcatAndDel
+#define PyUString_GET_SIZE PyString_GET_SIZE
+#define PyUString_Size PyString_Size
+#define PyUString_InternFromString PyString_InternFromString
+#define PyUString_Format PyString_Format
+
+#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj))
+
+#endif /* NPY_PY3K */
+
+
+static NPY_INLINE void
+PyUnicode_ConcatAndDel(PyObject **left, PyObject *right)
+{
+ Py_SETREF(*left, PyUnicode_Concat(*left, right));
+ Py_DECREF(right);
+}
+
+static NPY_INLINE void
+PyUnicode_Concat2(PyObject **left, PyObject *right)
+{
+ Py_SETREF(*left, PyUnicode_Concat(*left, right));
+}
+
+/*
+ * PyFile_* compatibility
+ */
+
+/*
+ * Get a FILE* handle to the file represented by the Python object
+ */
+static NPY_INLINE FILE*
+npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)
+{
+ int fd, fd2, unbuf;
+ Py_ssize_t fd2_tmp;
+ PyObject *ret, *os, *io, *io_raw;
+ npy_off_t pos;
+ FILE *handle;
+
+ /* For Python 2 PyFileObject, use PyFile_AsFile */
+#if !defined(NPY_PY3K)
+ if (PyFile_Check(file)) {
+ return PyFile_AsFile(file);
+ }
+#endif
+
+ /* Flush first to ensure things end up in the file in the correct order */
+ ret = PyObject_CallMethod(file, "flush", "");
+ if (ret == NULL) {
+ return NULL;
+ }
+ Py_DECREF(ret);
+ fd = PyObject_AsFileDescriptor(file);
+ if (fd == -1) {
+ return NULL;
+ }
+
+ /*
+ * The handle needs to be dup'd because we have to call fclose
+ * at the end
+ */
+ os = PyImport_ImportModule("os");
+ if (os == NULL) {
+ return NULL;
+ }
+ ret = PyObject_CallMethod(os, "dup", "i", fd);
+ Py_DECREF(os);
+ if (ret == NULL) {
+ return NULL;
+ }
+ fd2_tmp = PyNumber_AsSsize_t(ret, PyExc_IOError);
+ Py_DECREF(ret);
+ if (fd2_tmp == -1 && PyErr_Occurred()) {
+ return NULL;
+ }
+ if (fd2_tmp < INT_MIN || fd2_tmp > INT_MAX) {
+ PyErr_SetString(PyExc_IOError,
+ "Getting an 'int' from os.dup() failed");
+ return NULL;
+ }
+ fd2 = (int)fd2_tmp;
+
+ /* Convert to FILE* handle */
+#ifdef _WIN32
+ handle = _fdopen(fd2, mode);
+#else
+ handle = fdopen(fd2, mode);
+#endif
+ if (handle == NULL) {
+ PyErr_SetString(PyExc_IOError,
+ "Getting a FILE* from a Python file object failed");
+ return NULL;
+ }
+
+ /* Record the original raw file handle position */
+ *orig_pos = npy_ftell(handle);
+ if (*orig_pos == -1) {
+ /* The io module is needed to determine if buffering is used */
+ io = PyImport_ImportModule("io");
+ if (io == NULL) {
+ fclose(handle);
+ return NULL;
+ }
+ /* File object instances of RawIOBase are unbuffered */
+ io_raw = PyObject_GetAttrString(io, "RawIOBase");
+ Py_DECREF(io);
+ if (io_raw == NULL) {
+ fclose(handle);
+ return NULL;
+ }
+ unbuf = PyObject_IsInstance(file, io_raw);
+ Py_DECREF(io_raw);
+ if (unbuf == 1) {
+ /* Succeed if the IO is unbuffered */
+ return handle;
+ }
+ else {
+ PyErr_SetString(PyExc_IOError, "obtaining file position failed");
+ fclose(handle);
+ return NULL;
+ }
+ }
+
+ /* Seek raw handle to the Python-side position */
+ ret = PyObject_CallMethod(file, "tell", "");
+ if (ret == NULL) {
+ fclose(handle);
+ return NULL;
+ }
+ pos = PyLong_AsLongLong(ret);
+ Py_DECREF(ret);
+ if (PyErr_Occurred()) {
+ fclose(handle);
+ return NULL;
+ }
+ if (npy_fseek(handle, pos, SEEK_SET) == -1) {
+ PyErr_SetString(PyExc_IOError, "seeking file failed");
+ fclose(handle);
+ return NULL;
+ }
+ return handle;
+}
+
+/*
+ * Close the dup-ed file handle, and seek the Python one to the current position
+ */
+static NPY_INLINE int
+npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos)
+{
+ int fd, unbuf;
+ PyObject *ret, *io, *io_raw;
+ npy_off_t position;
+
+ /* For Python 2 PyFileObject, do nothing */
+#if !defined(NPY_PY3K)
+ if (PyFile_Check(file)) {
+ return 0;
+ }
+#endif
+
+ position = npy_ftell(handle);
+
+ /* Close the FILE* handle */
+ fclose(handle);
+
+ /*
+ * Restore original file handle position, in order to not confuse
+ * Python-side data structures
+ */
+ fd = PyObject_AsFileDescriptor(file);
+ if (fd == -1) {
+ return -1;
+ }
+
+ if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) {
+
+ /* The io module is needed to determine if buffering is used */
+ io = PyImport_ImportModule("io");
+ if (io == NULL) {
+ return -1;
+ }
+ /* File object instances of RawIOBase are unbuffered */
+ io_raw = PyObject_GetAttrString(io, "RawIOBase");
+ Py_DECREF(io);
+ if (io_raw == NULL) {
+ return -1;
+ }
+ unbuf = PyObject_IsInstance(file, io_raw);
+ Py_DECREF(io_raw);
+ if (unbuf == 1) {
+ /* Succeed if the IO is unbuffered */
+ return 0;
+ }
+ else {
+ PyErr_SetString(PyExc_IOError, "seeking file failed");
+ return -1;
+ }
+ }
+
+ if (position == -1) {
+ PyErr_SetString(PyExc_IOError, "obtaining file position failed");
+ return -1;
+ }
+
+ /* Seek Python-side handle to the FILE* handle position */
+ ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0);
+ if (ret == NULL) {
+ return -1;
+ }
+ Py_DECREF(ret);
+ return 0;
+}
+
+static NPY_INLINE int
+npy_PyFile_Check(PyObject *file)
+{
+ int fd;
+ /* For Python 2, check if it is a PyFileObject */
+#if !defined(NPY_PY3K)
+ if (PyFile_Check(file)) {
+ return 1;
+ }
+#endif
+ fd = PyObject_AsFileDescriptor(file);
+ if (fd == -1) {
+ PyErr_Clear();
+ return 0;
+ }
+ return 1;
+}
+
+static NPY_INLINE PyObject*
+npy_PyFile_OpenFile(PyObject *filename, const char *mode)
+{
+ PyObject *open;
+ open = PyDict_GetItemString(PyEval_GetBuiltins(), "open");
+ if (open == NULL) {
+ return NULL;
+ }
+ return PyObject_CallFunction(open, "Os", filename, mode);
+}
+
+static NPY_INLINE int
+npy_PyFile_CloseFile(PyObject *file)
+{
+ PyObject *ret;
+
+ ret = PyObject_CallMethod(file, "close", NULL);
+ if (ret == NULL) {
+ return -1;
+ }
+ Py_DECREF(ret);
+ return 0;
+}
+
+
+/* This is a copy of _PyErr_ChainExceptions
+ */
+static NPY_INLINE void
+npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb)
+{
+ if (exc == NULL)
+ return;
+
+ if (PyErr_Occurred()) {
+ /* only py3 supports this anyway */
+ #ifdef NPY_PY3K
+ PyObject *exc2, *val2, *tb2;
+ PyErr_Fetch(&exc2, &val2, &tb2);
+ PyErr_NormalizeException(&exc, &val, &tb);
+ if (tb != NULL) {
+ PyException_SetTraceback(val, tb);
+ Py_DECREF(tb);
+ }
+ Py_DECREF(exc);
+ PyErr_NormalizeException(&exc2, &val2, &tb2);
+ PyException_SetContext(val2, val);
+ PyErr_Restore(exc2, val2, tb2);
+ #endif
+ }
+ else {
+ PyErr_Restore(exc, val, tb);
+ }
+}
+
+
+/* This is a copy of _PyErr_ChainExceptions, with:
+ * - a minimal implementation for python 2
+ * - __cause__ used instead of __context__
+ */
+static NPY_INLINE void
+npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb)
+{
+ if (exc == NULL)
+ return;
+
+ if (PyErr_Occurred()) {
+ /* only py3 supports this anyway */
+ #ifdef NPY_PY3K
+ PyObject *exc2, *val2, *tb2;
+ PyErr_Fetch(&exc2, &val2, &tb2);
+ PyErr_NormalizeException(&exc, &val, &tb);
+ if (tb != NULL) {
+ PyException_SetTraceback(val, tb);
+ Py_DECREF(tb);
+ }
+ Py_DECREF(exc);
+ PyErr_NormalizeException(&exc2, &val2, &tb2);
+ PyException_SetCause(val2, val);
+ PyErr_Restore(exc2, val2, tb2);
+ #endif
+ }
+ else {
+ PyErr_Restore(exc, val, tb);
+ }
+}
+
+/*
+ * PyObject_Cmp
+ */
+#if defined(NPY_PY3K)
+static NPY_INLINE int
+PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp)
+{
+ int v;
+ v = PyObject_RichCompareBool(i1, i2, Py_LT);
+ if (v == 1) {
+ *cmp = -1;
+ return 1;
+ }
+ else if (v == -1) {
+ return -1;
+ }
+
+ v = PyObject_RichCompareBool(i1, i2, Py_GT);
+ if (v == 1) {
+ *cmp = 1;
+ return 1;
+ }
+ else if (v == -1) {
+ return -1;
+ }
+
+ v = PyObject_RichCompareBool(i1, i2, Py_EQ);
+ if (v == 1) {
+ *cmp = 0;
+ return 1;
+ }
+ else {
+ *cmp = 0;
+ return -1;
+ }
+}
+#endif
+
+/*
+ * PyCObject functions adapted to PyCapsules.
+ *
+ * The main job here is to get rid of the improved error handling
+ * of PyCapsules. It's a shame...
+ */
+static NPY_INLINE PyObject *
+NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
+{
+ PyObject *ret = PyCapsule_New(ptr, NULL, dtor);
+ if (ret == NULL) {
+ PyErr_Clear();
+ }
+ return ret;
+}
+
+static NPY_INLINE PyObject *
+NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *))
+{
+ PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor);
+ if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) {
+ PyErr_Clear();
+ Py_DECREF(ret);
+ ret = NULL;
+ }
+ return ret;
+}
+
+static NPY_INLINE void *
+NpyCapsule_AsVoidPtr(PyObject *obj)
+{
+ void *ret = PyCapsule_GetPointer(obj, NULL);
+ if (ret == NULL) {
+ PyErr_Clear();
+ }
+ return ret;
+}
+
+static NPY_INLINE void *
+NpyCapsule_GetDesc(PyObject *obj)
+{
+ return PyCapsule_GetContext(obj);
+}
+
+static NPY_INLINE int
+NpyCapsule_Check(PyObject *ptr)
+{
+ return PyCapsule_CheckExact(ptr);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_common.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_common.h
new file mode 100644
index 00000000..2bcc45e4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_common.h
@@ -0,0 +1,1122 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_
+
+/* need Python.h for npy_intp, npy_uintp */
+#include <Python.h>
+
+/* numpconfig.h is auto-generated */
+#include "numpyconfig.h"
+#ifdef HAVE_NPY_CONFIG_H
+#include <npy_config.h>
+#endif
+
+/*
+ * using static inline modifiers when defining npy_math functions
+ * allows the compiler to make optimizations when possible
+ */
+#ifndef NPY_INLINE_MATH
+#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
+ #define NPY_INLINE_MATH 1
+#else
+ #define NPY_INLINE_MATH 0
+#endif
+#endif
+
+/*
+ * gcc does not unroll even with -O3
+ * use with care, unrolling on modern cpus rarely speeds things up
+ */
+#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS
+#define NPY_GCC_UNROLL_LOOPS \
+ __attribute__((optimize("unroll-loops")))
+#else
+#define NPY_GCC_UNROLL_LOOPS
+#endif
+
+/* highest gcc optimization level, enabled autovectorizer */
+#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3
+#define NPY_GCC_OPT_3 __attribute__((optimize("O3")))
+#else
+#define NPY_GCC_OPT_3
+#endif
+
+/* compile target attributes */
+#if defined HAVE_ATTRIBUTE_TARGET_AVX && defined HAVE_LINK_AVX
+#define NPY_GCC_TARGET_AVX __attribute__((target("avx")))
+#else
+#define NPY_GCC_TARGET_AVX
+#endif
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS
+#define HAVE_ATTRIBUTE_TARGET_FMA
+#define NPY_GCC_TARGET_FMA __attribute__((target("avx2,fma")))
+#endif
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX2 && defined HAVE_LINK_AVX2
+#define NPY_GCC_TARGET_AVX2 __attribute__((target("avx2")))
+#else
+#define NPY_GCC_TARGET_AVX2
+#endif
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512F && defined HAVE_LINK_AVX512F
+#define NPY_GCC_TARGET_AVX512F __attribute__((target("avx512f")))
+#elif defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS
+#define NPY_GCC_TARGET_AVX512F __attribute__((target("avx512f")))
+#else
+#define NPY_GCC_TARGET_AVX512F
+#endif
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX && defined HAVE_LINK_AVX512_SKX
+#define NPY_GCC_TARGET_AVX512_SKX __attribute__((target("avx512f,avx512dq,avx512vl,avx512bw,avx512cd")))
+#elif defined HAVE_ATTRIBUTE_TARGET_AVX512_SKX_WITH_INTRINSICS
+#define NPY_GCC_TARGET_AVX512_SKX __attribute__((target("avx512f,avx512dq,avx512vl,avx512bw,avx512cd")))
+#else
+#define NPY_GCC_TARGET_AVX512_SKX
+#endif
+/*
+ * mark an argument (starting from 1) that must not be NULL and is not checked
+ * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check
+ */
+#ifdef HAVE_ATTRIBUTE_NONNULL
+#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n)))
+#else
+#define NPY_GCC_NONNULL(n)
+#endif
+
+#if defined HAVE_XMMINTRIN_H && defined HAVE__MM_LOAD_PS
+#define NPY_HAVE_SSE_INTRINSICS
+#endif
+
+#if defined HAVE_EMMINTRIN_H && defined HAVE__MM_LOAD_PD
+#define NPY_HAVE_SSE2_INTRINSICS
+#endif
+
+#if defined HAVE_IMMINTRIN_H && defined HAVE_LINK_AVX2
+#define NPY_HAVE_AVX2_INTRINSICS
+#endif
+
+#if defined HAVE_IMMINTRIN_H && defined HAVE_LINK_AVX512F
+#define NPY_HAVE_AVX512F_INTRINSICS
+#endif
+/*
+ * give a hint to the compiler which branch is more likely or unlikely
+ * to occur, e.g. rare error cases:
+ *
+ * if (NPY_UNLIKELY(failure == 0))
+ * return NULL;
+ *
+ * the double !! is to cast the expression (e.g. NULL) to a boolean required by
+ * the intrinsic
+ */
+#ifdef HAVE___BUILTIN_EXPECT
+#define NPY_LIKELY(x) __builtin_expect(!!(x), 1)
+#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define NPY_LIKELY(x) (x)
+#define NPY_UNLIKELY(x) (x)
+#endif
+
+#ifdef HAVE___BUILTIN_PREFETCH
+/* unlike _mm_prefetch also works on non-x86 */
+#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc))
+#else
+#ifdef HAVE__MM_PREFETCH
+/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */
+#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \
+ (loc == 1 ? _MM_HINT_T2 : \
+ (loc == 2 ? _MM_HINT_T1 : \
+ (loc == 3 ? _MM_HINT_T0 : -1))))
+#else
+#define NPY_PREFETCH(x, rw,loc)
+#endif
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+ #define NPY_INLINE __inline
+/* clang included here to handle clang-cl on Windows */
+#elif defined(__GNUC__) || defined(__clang__)
+ #if defined(__STRICT_ANSI__)
+ #define NPY_INLINE __inline__
+ #else
+ #define NPY_INLINE inline
+ #endif
+#else
+ #define NPY_INLINE
+#endif
+
+#ifdef _MSC_VER
+ #define NPY_FINLINE static __forceinline
+#elif defined(__GNUC__)
+ #define NPY_FINLINE static NPY_INLINE __attribute__((always_inline))
+#else
+ #define NPY_FINLINE static
+#endif
+
+#ifdef HAVE___THREAD
+ #define NPY_TLS __thread
+#else
+ #ifdef HAVE___DECLSPEC_THREAD_
+ #define NPY_TLS __declspec(thread)
+ #else
+ #define NPY_TLS
+ #endif
+#endif
+
+#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE
+ #define NPY_RETURNS_BORROWED_REF \
+ __attribute__((cpychecker_returns_borrowed_ref))
+#else
+ #define NPY_RETURNS_BORROWED_REF
+#endif
+
+#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE
+ #define NPY_STEALS_REF_TO_ARG(n) \
+ __attribute__((cpychecker_steals_reference_to_arg(n)))
+#else
+ #define NPY_STEALS_REF_TO_ARG(n)
+#endif
+
+/* 64 bit file position support, also on win-amd64. Ticket #1660 */
+#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \
+ defined(__MINGW32__) || defined(__MINGW64__)
+ #include <io.h>
+
+ #define npy_fseek _fseeki64
+ #define npy_ftell _ftelli64
+ #define npy_lseek _lseeki64
+ #define npy_off_t npy_int64
+
+ #if NPY_SIZEOF_INT == 8
+ #define NPY_OFF_T_PYFMT "i"
+ #elif NPY_SIZEOF_LONG == 8
+ #define NPY_OFF_T_PYFMT "l"
+ #elif NPY_SIZEOF_LONGLONG == 8
+ #define NPY_OFF_T_PYFMT "L"
+ #else
+ #error Unsupported size for type off_t
+ #endif
+#else
+#ifdef HAVE_FSEEKO
+ #define npy_fseek fseeko
+#else
+ #define npy_fseek fseek
+#endif
+#ifdef HAVE_FTELLO
+ #define npy_ftell ftello
+#else
+ #define npy_ftell ftell
+#endif
+ #include <sys/types.h>
+ #define npy_lseek lseek
+ #define npy_off_t off_t
+
+ #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT
+ #define NPY_OFF_T_PYFMT "h"
+ #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT
+ #define NPY_OFF_T_PYFMT "i"
+ #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG
+ #define NPY_OFF_T_PYFMT "l"
+ #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG
+ #define NPY_OFF_T_PYFMT "L"
+ #else
+ #error Unsupported size for type off_t
+ #endif
+#endif
+
+/* enums for detected endianness */
+enum {
+ NPY_CPU_UNKNOWN_ENDIAN,
+ NPY_CPU_LITTLE,
+ NPY_CPU_BIG
+};
+
+/*
+ * This is to typedef npy_intp to the appropriate pointer size for this
+ * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h.
+ */
+typedef Py_intptr_t npy_intp;
+typedef Py_uintptr_t npy_uintp;
+
+/*
+ * Define sizes that were not defined in numpyconfig.h.
+ */
+#define NPY_SIZEOF_CHAR 1
+#define NPY_SIZEOF_BYTE 1
+#define NPY_SIZEOF_DATETIME 8
+#define NPY_SIZEOF_TIMEDELTA 8
+#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T
+#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T
+#define NPY_SIZEOF_HALF 2
+#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT
+#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE
+#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE
+
+#ifdef constchar
+#undef constchar
+#endif
+
+#define NPY_SSIZE_T_PYFMT "n"
+#define constchar char
+
+/* NPY_INTP_FMT Note:
+ * Unlike the other NPY_*_FMT macros, which are used with PyOS_snprintf,
+ * NPY_INTP_FMT is used with PyErr_Format and PyUnicode_FromFormat. Those
+ * functions use different formatting codes that are portably specified
+ * according to the Python documentation. See issue gh-2388.
+ */
+#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT
+ #define NPY_INTP NPY_INT
+ #define NPY_UINTP NPY_UINT
+ #define PyIntpArrType_Type PyIntArrType_Type
+ #define PyUIntpArrType_Type PyUIntArrType_Type
+ #define NPY_MAX_INTP NPY_MAX_INT
+ #define NPY_MIN_INTP NPY_MIN_INT
+ #define NPY_MAX_UINTP NPY_MAX_UINT
+ #define NPY_INTP_FMT "d"
+#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG
+ #define NPY_INTP NPY_LONG
+ #define NPY_UINTP NPY_ULONG
+ #define PyIntpArrType_Type PyLongArrType_Type
+ #define PyUIntpArrType_Type PyULongArrType_Type
+ #define NPY_MAX_INTP NPY_MAX_LONG
+ #define NPY_MIN_INTP NPY_MIN_LONG
+ #define NPY_MAX_UINTP NPY_MAX_ULONG
+ #define NPY_INTP_FMT "ld"
+#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG)
+ #define NPY_INTP NPY_LONGLONG
+ #define NPY_UINTP NPY_ULONGLONG
+ #define PyIntpArrType_Type PyLongLongArrType_Type
+ #define PyUIntpArrType_Type PyULongLongArrType_Type
+ #define NPY_MAX_INTP NPY_MAX_LONGLONG
+ #define NPY_MIN_INTP NPY_MIN_LONGLONG
+ #define NPY_MAX_UINTP NPY_MAX_ULONGLONG
+ #define NPY_INTP_FMT "lld"
+#endif
+
+/*
+ * We can only use C99 formats for npy_int_p if it is the same as
+ * intp_t, hence the condition on HAVE_UNITPTR_T
+ */
+#if (NPY_USE_C99_FORMATS) == 1 \
+ && (defined HAVE_UINTPTR_T) \
+ && (defined HAVE_INTTYPES_H)
+ #include <inttypes.h>
+ #undef NPY_INTP_FMT
+ #define NPY_INTP_FMT PRIdPTR
+#endif
+
+
+/*
+ * Some platforms don't define bool, long long, or long double.
+ * Handle that here.
+ */
+#define NPY_BYTE_FMT "hhd"
+#define NPY_UBYTE_FMT "hhu"
+#define NPY_SHORT_FMT "hd"
+#define NPY_USHORT_FMT "hu"
+#define NPY_INT_FMT "d"
+#define NPY_UINT_FMT "u"
+#define NPY_LONG_FMT "ld"
+#define NPY_ULONG_FMT "lu"
+#define NPY_HALF_FMT "g"
+#define NPY_FLOAT_FMT "g"
+#define NPY_DOUBLE_FMT "g"
+
+
+#ifdef PY_LONG_LONG
+typedef PY_LONG_LONG npy_longlong;
+typedef unsigned PY_LONG_LONG npy_ulonglong;
+# ifdef _MSC_VER
+# define NPY_LONGLONG_FMT "I64d"
+# define NPY_ULONGLONG_FMT "I64u"
+# else
+# define NPY_LONGLONG_FMT "lld"
+# define NPY_ULONGLONG_FMT "llu"
+# endif
+# ifdef _MSC_VER
+# define NPY_LONGLONG_SUFFIX(x) (x##i64)
+# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64)
+# else
+# define NPY_LONGLONG_SUFFIX(x) (x##LL)
+# define NPY_ULONGLONG_SUFFIX(x) (x##ULL)
+# endif
+#else
+typedef long npy_longlong;
+typedef unsigned long npy_ulonglong;
+# define NPY_LONGLONG_SUFFIX(x) (x##L)
+# define NPY_ULONGLONG_SUFFIX(x) (x##UL)
+#endif
+
+
+typedef unsigned char npy_bool;
+#define NPY_FALSE 0
+#define NPY_TRUE 1
+/*
+ * `NPY_SIZEOF_LONGDOUBLE` isn't usually equal to sizeof(long double).
+ * In some certain cases, it may forced to be equal to sizeof(double)
+ * even against the compiler implementation and the same goes for
+ * `complex long double`.
+ *
+ * Therefore, avoid `long double`, use `npy_longdouble` instead,
+ * and when it comes to standard math functions make sure of using
+ * the double version when `NPY_SIZEOF_LONGDOUBLE` == `NPY_SIZEOF_DOUBLE`.
+ * For example:
+ * npy_longdouble *ptr, x;
+ * #if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
+ * npy_longdouble r = modf(x, ptr);
+ * #else
+ * npy_longdouble r = modfl(x, ptr);
+ * #endif
+ *
+ * See https://github.com/numpy/numpy/issues/20348
+ */
+#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
+ #define NPY_LONGDOUBLE_FMT "g"
+ typedef double npy_longdouble;
+#else
+ #define NPY_LONGDOUBLE_FMT "Lg"
+ typedef long double npy_longdouble;
+#endif
+
+#ifndef Py_USING_UNICODE
+#error Must use Python with unicode enabled.
+#endif
+
+
+typedef signed char npy_byte;
+typedef unsigned char npy_ubyte;
+typedef unsigned short npy_ushort;
+typedef unsigned int npy_uint;
+typedef unsigned long npy_ulong;
+
+/* These are for completeness */
+typedef char npy_char;
+typedef short npy_short;
+typedef int npy_int;
+typedef long npy_long;
+typedef float npy_float;
+typedef double npy_double;
+
+typedef Py_hash_t npy_hash_t;
+#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP
+
+/*
+ * Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being
+ * able to do .real/.imag. Will have to convert code first.
+ */
+#if 0
+#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_DOUBLE)
+typedef complex npy_cdouble;
+#else
+typedef struct { double real, imag; } npy_cdouble;
+#endif
+
+#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_FLOAT)
+typedef complex float npy_cfloat;
+#else
+typedef struct { float real, imag; } npy_cfloat;
+#endif
+
+#if defined(NPY_USE_C99_COMPLEX) && defined(NPY_HAVE_COMPLEX_LONG_DOUBLE)
+typedef complex long double npy_clongdouble;
+#else
+typedef struct {npy_longdouble real, imag;} npy_clongdouble;
+#endif
+#endif
+#if NPY_SIZEOF_COMPLEX_DOUBLE != 2 * NPY_SIZEOF_DOUBLE
+#error npy_cdouble definition is not compatible with C99 complex definition ! \
+ Please contact NumPy maintainers and give detailed information about your \
+ compiler and platform
+#endif
+typedef struct { double real, imag; } npy_cdouble;
+
+#if NPY_SIZEOF_COMPLEX_FLOAT != 2 * NPY_SIZEOF_FLOAT
+#error npy_cfloat definition is not compatible with C99 complex definition ! \
+ Please contact NumPy maintainers and give detailed information about your \
+ compiler and platform
+#endif
+typedef struct { float real, imag; } npy_cfloat;
+
+#if NPY_SIZEOF_COMPLEX_LONGDOUBLE != 2 * NPY_SIZEOF_LONGDOUBLE
+#error npy_clongdouble definition is not compatible with C99 complex definition ! \
+ Please contact NumPy maintainers and give detailed information about your \
+ compiler and platform
+#endif
+typedef struct { npy_longdouble real, imag; } npy_clongdouble;
+
+/*
+ * numarray-style bit-width typedefs
+ */
+#define NPY_MAX_INT8 127
+#define NPY_MIN_INT8 -128
+#define NPY_MAX_UINT8 255
+#define NPY_MAX_INT16 32767
+#define NPY_MIN_INT16 -32768
+#define NPY_MAX_UINT16 65535
+#define NPY_MAX_INT32 2147483647
+#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1)
+#define NPY_MAX_UINT32 4294967295U
+#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807)
+#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1))
+#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615)
+#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864)
+#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1))
+#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728)
+#define NPY_MAX_INT256 NPY_LONGLONG_SUFFIX(57896044618658097711785492504343953926634992332820282019728792003956564819967)
+#define NPY_MIN_INT256 (-NPY_MAX_INT256 - NPY_LONGLONG_SUFFIX(1))
+#define NPY_MAX_UINT256 NPY_ULONGLONG_SUFFIX(115792089237316195423570985008687907853269984665640564039457584007913129639935)
+#define NPY_MIN_DATETIME NPY_MIN_INT64
+#define NPY_MAX_DATETIME NPY_MAX_INT64
+#define NPY_MIN_TIMEDELTA NPY_MIN_INT64
+#define NPY_MAX_TIMEDELTA NPY_MAX_INT64
+
+ /* Need to find the number of bits for each type and
+ make definitions accordingly.
+
+ C states that sizeof(char) == 1 by definition
+
+ So, just using the sizeof keyword won't help.
+
+ It also looks like Python itself uses sizeof(char) quite a
+ bit, which by definition should be 1 all the time.
+
+ Idea: Make Use of CHAR_BIT which should tell us how many
+ BITS per CHARACTER
+ */
+
+ /* Include platform definitions -- These are in the C89/90 standard */
+#include <limits.h>
+#define NPY_MAX_BYTE SCHAR_MAX
+#define NPY_MIN_BYTE SCHAR_MIN
+#define NPY_MAX_UBYTE UCHAR_MAX
+#define NPY_MAX_SHORT SHRT_MAX
+#define NPY_MIN_SHORT SHRT_MIN
+#define NPY_MAX_USHORT USHRT_MAX
+#define NPY_MAX_INT INT_MAX
+#ifndef INT_MIN
+#define INT_MIN (-INT_MAX - 1)
+#endif
+#define NPY_MIN_INT INT_MIN
+#define NPY_MAX_UINT UINT_MAX
+#define NPY_MAX_LONG LONG_MAX
+#define NPY_MIN_LONG LONG_MIN
+#define NPY_MAX_ULONG ULONG_MAX
+
+#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT)
+#define NPY_BITSOF_CHAR CHAR_BIT
+#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT)
+#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT)
+#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT)
+#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT)
+#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT)
+#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT)
+#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT)
+#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT)
+#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT)
+#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT)
+#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT)
+#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT)
+#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT)
+#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT)
+#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT)
+
+#if NPY_BITSOF_LONG == 8
+#define NPY_INT8 NPY_LONG
+#define NPY_UINT8 NPY_ULONG
+ typedef long npy_int8;
+ typedef unsigned long npy_uint8;
+#define PyInt8ScalarObject PyLongScalarObject
+#define PyInt8ArrType_Type PyLongArrType_Type
+#define PyUInt8ScalarObject PyULongScalarObject
+#define PyUInt8ArrType_Type PyULongArrType_Type
+#define NPY_INT8_FMT NPY_LONG_FMT
+#define NPY_UINT8_FMT NPY_ULONG_FMT
+#elif NPY_BITSOF_LONG == 16
+#define NPY_INT16 NPY_LONG
+#define NPY_UINT16 NPY_ULONG
+ typedef long npy_int16;
+ typedef unsigned long npy_uint16;
+#define PyInt16ScalarObject PyLongScalarObject
+#define PyInt16ArrType_Type PyLongArrType_Type
+#define PyUInt16ScalarObject PyULongScalarObject
+#define PyUInt16ArrType_Type PyULongArrType_Type
+#define NPY_INT16_FMT NPY_LONG_FMT
+#define NPY_UINT16_FMT NPY_ULONG_FMT
+#elif NPY_BITSOF_LONG == 32
+#define NPY_INT32 NPY_LONG
+#define NPY_UINT32 NPY_ULONG
+ typedef long npy_int32;
+ typedef unsigned long npy_uint32;
+ typedef unsigned long npy_ucs4;
+#define PyInt32ScalarObject PyLongScalarObject
+#define PyInt32ArrType_Type PyLongArrType_Type
+#define PyUInt32ScalarObject PyULongScalarObject
+#define PyUInt32ArrType_Type PyULongArrType_Type
+#define NPY_INT32_FMT NPY_LONG_FMT
+#define NPY_UINT32_FMT NPY_ULONG_FMT
+#elif NPY_BITSOF_LONG == 64
+#define NPY_INT64 NPY_LONG
+#define NPY_UINT64 NPY_ULONG
+ typedef long npy_int64;
+ typedef unsigned long npy_uint64;
+#define PyInt64ScalarObject PyLongScalarObject
+#define PyInt64ArrType_Type PyLongArrType_Type
+#define PyUInt64ScalarObject PyULongScalarObject
+#define PyUInt64ArrType_Type PyULongArrType_Type
+#define NPY_INT64_FMT NPY_LONG_FMT
+#define NPY_UINT64_FMT NPY_ULONG_FMT
+#define MyPyLong_FromInt64 PyLong_FromLong
+#define MyPyLong_AsInt64 PyLong_AsLong
+#elif NPY_BITSOF_LONG == 128
+#define NPY_INT128 NPY_LONG
+#define NPY_UINT128 NPY_ULONG
+ typedef long npy_int128;
+ typedef unsigned long npy_uint128;
+#define PyInt128ScalarObject PyLongScalarObject
+#define PyInt128ArrType_Type PyLongArrType_Type
+#define PyUInt128ScalarObject PyULongScalarObject
+#define PyUInt128ArrType_Type PyULongArrType_Type
+#define NPY_INT128_FMT NPY_LONG_FMT
+#define NPY_UINT128_FMT NPY_ULONG_FMT
+#endif
+
+#if NPY_BITSOF_LONGLONG == 8
+# ifndef NPY_INT8
+# define NPY_INT8 NPY_LONGLONG
+# define NPY_UINT8 NPY_ULONGLONG
+ typedef npy_longlong npy_int8;
+ typedef npy_ulonglong npy_uint8;
+# define PyInt8ScalarObject PyLongLongScalarObject
+# define PyInt8ArrType_Type PyLongLongArrType_Type
+# define PyUInt8ScalarObject PyULongLongScalarObject
+# define PyUInt8ArrType_Type PyULongLongArrType_Type
+#define NPY_INT8_FMT NPY_LONGLONG_FMT
+#define NPY_UINT8_FMT NPY_ULONGLONG_FMT
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT8
+# define NPY_MIN_LONGLONG NPY_MIN_INT8
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT8
+#elif NPY_BITSOF_LONGLONG == 16
+# ifndef NPY_INT16
+# define NPY_INT16 NPY_LONGLONG
+# define NPY_UINT16 NPY_ULONGLONG
+ typedef npy_longlong npy_int16;
+ typedef npy_ulonglong npy_uint16;
+# define PyInt16ScalarObject PyLongLongScalarObject
+# define PyInt16ArrType_Type PyLongLongArrType_Type
+# define PyUInt16ScalarObject PyULongLongScalarObject
+# define PyUInt16ArrType_Type PyULongLongArrType_Type
+#define NPY_INT16_FMT NPY_LONGLONG_FMT
+#define NPY_UINT16_FMT NPY_ULONGLONG_FMT
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT16
+# define NPY_MIN_LONGLONG NPY_MIN_INT16
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT16
+#elif NPY_BITSOF_LONGLONG == 32
+# ifndef NPY_INT32
+# define NPY_INT32 NPY_LONGLONG
+# define NPY_UINT32 NPY_ULONGLONG
+ typedef npy_longlong npy_int32;
+ typedef npy_ulonglong npy_uint32;
+ typedef npy_ulonglong npy_ucs4;
+# define PyInt32ScalarObject PyLongLongScalarObject
+# define PyInt32ArrType_Type PyLongLongArrType_Type
+# define PyUInt32ScalarObject PyULongLongScalarObject
+# define PyUInt32ArrType_Type PyULongLongArrType_Type
+#define NPY_INT32_FMT NPY_LONGLONG_FMT
+#define NPY_UINT32_FMT NPY_ULONGLONG_FMT
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT32
+# define NPY_MIN_LONGLONG NPY_MIN_INT32
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT32
+#elif NPY_BITSOF_LONGLONG == 64
+# ifndef NPY_INT64
+# define NPY_INT64 NPY_LONGLONG
+# define NPY_UINT64 NPY_ULONGLONG
+ typedef npy_longlong npy_int64;
+ typedef npy_ulonglong npy_uint64;
+# define PyInt64ScalarObject PyLongLongScalarObject
+# define PyInt64ArrType_Type PyLongLongArrType_Type
+# define PyUInt64ScalarObject PyULongLongScalarObject
+# define PyUInt64ArrType_Type PyULongLongArrType_Type
+#define NPY_INT64_FMT NPY_LONGLONG_FMT
+#define NPY_UINT64_FMT NPY_ULONGLONG_FMT
+# define MyPyLong_FromInt64 PyLong_FromLongLong
+# define MyPyLong_AsInt64 PyLong_AsLongLong
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT64
+# define NPY_MIN_LONGLONG NPY_MIN_INT64
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT64
+#elif NPY_BITSOF_LONGLONG == 128
+# ifndef NPY_INT128
+# define NPY_INT128 NPY_LONGLONG
+# define NPY_UINT128 NPY_ULONGLONG
+ typedef npy_longlong npy_int128;
+ typedef npy_ulonglong npy_uint128;
+# define PyInt128ScalarObject PyLongLongScalarObject
+# define PyInt128ArrType_Type PyLongLongArrType_Type
+# define PyUInt128ScalarObject PyULongLongScalarObject
+# define PyUInt128ArrType_Type PyULongLongArrType_Type
+#define NPY_INT128_FMT NPY_LONGLONG_FMT
+#define NPY_UINT128_FMT NPY_ULONGLONG_FMT
+# endif
+# define NPY_MAX_LONGLONG NPY_MAX_INT128
+# define NPY_MIN_LONGLONG NPY_MIN_INT128
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT128
+#elif NPY_BITSOF_LONGLONG == 256
+# define NPY_INT256 NPY_LONGLONG
+# define NPY_UINT256 NPY_ULONGLONG
+ typedef npy_longlong npy_int256;
+ typedef npy_ulonglong npy_uint256;
+# define PyInt256ScalarObject PyLongLongScalarObject
+# define PyInt256ArrType_Type PyLongLongArrType_Type
+# define PyUInt256ScalarObject PyULongLongScalarObject
+# define PyUInt256ArrType_Type PyULongLongArrType_Type
+#define NPY_INT256_FMT NPY_LONGLONG_FMT
+#define NPY_UINT256_FMT NPY_ULONGLONG_FMT
+# define NPY_MAX_LONGLONG NPY_MAX_INT256
+# define NPY_MIN_LONGLONG NPY_MIN_INT256
+# define NPY_MAX_ULONGLONG NPY_MAX_UINT256
+#endif
+
+#if NPY_BITSOF_INT == 8
+#ifndef NPY_INT8
+#define NPY_INT8 NPY_INT
+#define NPY_UINT8 NPY_UINT
+ typedef int npy_int8;
+ typedef unsigned int npy_uint8;
+# define PyInt8ScalarObject PyIntScalarObject
+# define PyInt8ArrType_Type PyIntArrType_Type
+# define PyUInt8ScalarObject PyUIntScalarObject
+# define PyUInt8ArrType_Type PyUIntArrType_Type
+#define NPY_INT8_FMT NPY_INT_FMT
+#define NPY_UINT8_FMT NPY_UINT_FMT
+#endif
+#elif NPY_BITSOF_INT == 16
+#ifndef NPY_INT16
+#define NPY_INT16 NPY_INT
+#define NPY_UINT16 NPY_UINT
+ typedef int npy_int16;
+ typedef unsigned int npy_uint16;
+# define PyInt16ScalarObject PyIntScalarObject
+# define PyInt16ArrType_Type PyIntArrType_Type
+# define PyUInt16ScalarObject PyIntUScalarObject
+# define PyUInt16ArrType_Type PyIntUArrType_Type
+#define NPY_INT16_FMT NPY_INT_FMT
+#define NPY_UINT16_FMT NPY_UINT_FMT
+#endif
+#elif NPY_BITSOF_INT == 32
+#ifndef NPY_INT32
+#define NPY_INT32 NPY_INT
+#define NPY_UINT32 NPY_UINT
+ typedef int npy_int32;
+ typedef unsigned int npy_uint32;
+ typedef unsigned int npy_ucs4;
+# define PyInt32ScalarObject PyIntScalarObject
+# define PyInt32ArrType_Type PyIntArrType_Type
+# define PyUInt32ScalarObject PyUIntScalarObject
+# define PyUInt32ArrType_Type PyUIntArrType_Type
+#define NPY_INT32_FMT NPY_INT_FMT
+#define NPY_UINT32_FMT NPY_UINT_FMT
+#endif
+#elif NPY_BITSOF_INT == 64
+#ifndef NPY_INT64
+#define NPY_INT64 NPY_INT
+#define NPY_UINT64 NPY_UINT
+ typedef int npy_int64;
+ typedef unsigned int npy_uint64;
+# define PyInt64ScalarObject PyIntScalarObject
+# define PyInt64ArrType_Type PyIntArrType_Type
+# define PyUInt64ScalarObject PyUIntScalarObject
+# define PyUInt64ArrType_Type PyUIntArrType_Type
+#define NPY_INT64_FMT NPY_INT_FMT
+#define NPY_UINT64_FMT NPY_UINT_FMT
+# define MyPyLong_FromInt64 PyLong_FromLong
+# define MyPyLong_AsInt64 PyLong_AsLong
+#endif
+#elif NPY_BITSOF_INT == 128
+#ifndef NPY_INT128
+#define NPY_INT128 NPY_INT
+#define NPY_UINT128 NPY_UINT
+ typedef int npy_int128;
+ typedef unsigned int npy_uint128;
+# define PyInt128ScalarObject PyIntScalarObject
+# define PyInt128ArrType_Type PyIntArrType_Type
+# define PyUInt128ScalarObject PyUIntScalarObject
+# define PyUInt128ArrType_Type PyUIntArrType_Type
+#define NPY_INT128_FMT NPY_INT_FMT
+#define NPY_UINT128_FMT NPY_UINT_FMT
+#endif
+#endif
+
+#if NPY_BITSOF_SHORT == 8
+#ifndef NPY_INT8
+#define NPY_INT8 NPY_SHORT
+#define NPY_UINT8 NPY_USHORT
+ typedef short npy_int8;
+ typedef unsigned short npy_uint8;
+# define PyInt8ScalarObject PyShortScalarObject
+# define PyInt8ArrType_Type PyShortArrType_Type
+# define PyUInt8ScalarObject PyUShortScalarObject
+# define PyUInt8ArrType_Type PyUShortArrType_Type
+#define NPY_INT8_FMT NPY_SHORT_FMT
+#define NPY_UINT8_FMT NPY_USHORT_FMT
+#endif
+#elif NPY_BITSOF_SHORT == 16
+#ifndef NPY_INT16
+#define NPY_INT16 NPY_SHORT
+#define NPY_UINT16 NPY_USHORT
+ typedef short npy_int16;
+ typedef unsigned short npy_uint16;
+# define PyInt16ScalarObject PyShortScalarObject
+# define PyInt16ArrType_Type PyShortArrType_Type
+# define PyUInt16ScalarObject PyUShortScalarObject
+# define PyUInt16ArrType_Type PyUShortArrType_Type
+#define NPY_INT16_FMT NPY_SHORT_FMT
+#define NPY_UINT16_FMT NPY_USHORT_FMT
+#endif
+#elif NPY_BITSOF_SHORT == 32
+#ifndef NPY_INT32
+#define NPY_INT32 NPY_SHORT
+#define NPY_UINT32 NPY_USHORT
+ typedef short npy_int32;
+ typedef unsigned short npy_uint32;
+ typedef unsigned short npy_ucs4;
+# define PyInt32ScalarObject PyShortScalarObject
+# define PyInt32ArrType_Type PyShortArrType_Type
+# define PyUInt32ScalarObject PyUShortScalarObject
+# define PyUInt32ArrType_Type PyUShortArrType_Type
+#define NPY_INT32_FMT NPY_SHORT_FMT
+#define NPY_UINT32_FMT NPY_USHORT_FMT
+#endif
+#elif NPY_BITSOF_SHORT == 64
+#ifndef NPY_INT64
+#define NPY_INT64 NPY_SHORT
+#define NPY_UINT64 NPY_USHORT
+ typedef short npy_int64;
+ typedef unsigned short npy_uint64;
+# define PyInt64ScalarObject PyShortScalarObject
+# define PyInt64ArrType_Type PyShortArrType_Type
+# define PyUInt64ScalarObject PyUShortScalarObject
+# define PyUInt64ArrType_Type PyUShortArrType_Type
+#define NPY_INT64_FMT NPY_SHORT_FMT
+#define NPY_UINT64_FMT NPY_USHORT_FMT
+# define MyPyLong_FromInt64 PyLong_FromLong
+# define MyPyLong_AsInt64 PyLong_AsLong
+#endif
+#elif NPY_BITSOF_SHORT == 128
+#ifndef NPY_INT128
+#define NPY_INT128 NPY_SHORT
+#define NPY_UINT128 NPY_USHORT
+ typedef short npy_int128;
+ typedef unsigned short npy_uint128;
+# define PyInt128ScalarObject PyShortScalarObject
+# define PyInt128ArrType_Type PyShortArrType_Type
+# define PyUInt128ScalarObject PyUShortScalarObject
+# define PyUInt128ArrType_Type PyUShortArrType_Type
+#define NPY_INT128_FMT NPY_SHORT_FMT
+#define NPY_UINT128_FMT NPY_USHORT_FMT
+#endif
+#endif
+
+
+#if NPY_BITSOF_CHAR == 8
+#ifndef NPY_INT8
+#define NPY_INT8 NPY_BYTE
+#define NPY_UINT8 NPY_UBYTE
+ typedef signed char npy_int8;
+ typedef unsigned char npy_uint8;
+# define PyInt8ScalarObject PyByteScalarObject
+# define PyInt8ArrType_Type PyByteArrType_Type
+# define PyUInt8ScalarObject PyUByteScalarObject
+# define PyUInt8ArrType_Type PyUByteArrType_Type
+#define NPY_INT8_FMT NPY_BYTE_FMT
+#define NPY_UINT8_FMT NPY_UBYTE_FMT
+#endif
+#elif NPY_BITSOF_CHAR == 16
+#ifndef NPY_INT16
+#define NPY_INT16 NPY_BYTE
+#define NPY_UINT16 NPY_UBYTE
+ typedef signed char npy_int16;
+ typedef unsigned char npy_uint16;
+# define PyInt16ScalarObject PyByteScalarObject
+# define PyInt16ArrType_Type PyByteArrType_Type
+# define PyUInt16ScalarObject PyUByteScalarObject
+# define PyUInt16ArrType_Type PyUByteArrType_Type
+#define NPY_INT16_FMT NPY_BYTE_FMT
+#define NPY_UINT16_FMT NPY_UBYTE_FMT
+#endif
+#elif NPY_BITSOF_CHAR == 32
+#ifndef NPY_INT32
+#define NPY_INT32 NPY_BYTE
+#define NPY_UINT32 NPY_UBYTE
+ typedef signed char npy_int32;
+ typedef unsigned char npy_uint32;
+ typedef unsigned char npy_ucs4;
+# define PyInt32ScalarObject PyByteScalarObject
+# define PyInt32ArrType_Type PyByteArrType_Type
+# define PyUInt32ScalarObject PyUByteScalarObject
+# define PyUInt32ArrType_Type PyUByteArrType_Type
+#define NPY_INT32_FMT NPY_BYTE_FMT
+#define NPY_UINT32_FMT NPY_UBYTE_FMT
+#endif
+#elif NPY_BITSOF_CHAR == 64
+#ifndef NPY_INT64
+#define NPY_INT64 NPY_BYTE
+#define NPY_UINT64 NPY_UBYTE
+ typedef signed char npy_int64;
+ typedef unsigned char npy_uint64;
+# define PyInt64ScalarObject PyByteScalarObject
+# define PyInt64ArrType_Type PyByteArrType_Type
+# define PyUInt64ScalarObject PyUByteScalarObject
+# define PyUInt64ArrType_Type PyUByteArrType_Type
+#define NPY_INT64_FMT NPY_BYTE_FMT
+#define NPY_UINT64_FMT NPY_UBYTE_FMT
+# define MyPyLong_FromInt64 PyLong_FromLong
+# define MyPyLong_AsInt64 PyLong_AsLong
+#endif
+#elif NPY_BITSOF_CHAR == 128
+#ifndef NPY_INT128
+#define NPY_INT128 NPY_BYTE
+#define NPY_UINT128 NPY_UBYTE
+ typedef signed char npy_int128;
+ typedef unsigned char npy_uint128;
+# define PyInt128ScalarObject PyByteScalarObject
+# define PyInt128ArrType_Type PyByteArrType_Type
+# define PyUInt128ScalarObject PyUByteScalarObject
+# define PyUInt128ArrType_Type PyUByteArrType_Type
+#define NPY_INT128_FMT NPY_BYTE_FMT
+#define NPY_UINT128_FMT NPY_UBYTE_FMT
+#endif
+#endif
+
+
+
+#if NPY_BITSOF_DOUBLE == 32
+#ifndef NPY_FLOAT32
+#define NPY_FLOAT32 NPY_DOUBLE
+#define NPY_COMPLEX64 NPY_CDOUBLE
+ typedef double npy_float32;
+ typedef npy_cdouble npy_complex64;
+# define PyFloat32ScalarObject PyDoubleScalarObject
+# define PyComplex64ScalarObject PyCDoubleScalarObject
+# define PyFloat32ArrType_Type PyDoubleArrType_Type
+# define PyComplex64ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 64
+#ifndef NPY_FLOAT64
+#define NPY_FLOAT64 NPY_DOUBLE
+#define NPY_COMPLEX128 NPY_CDOUBLE
+ typedef double npy_float64;
+ typedef npy_cdouble npy_complex128;
+# define PyFloat64ScalarObject PyDoubleScalarObject
+# define PyComplex128ScalarObject PyCDoubleScalarObject
+# define PyFloat64ArrType_Type PyDoubleArrType_Type
+# define PyComplex128ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 80
+#ifndef NPY_FLOAT80
+#define NPY_FLOAT80 NPY_DOUBLE
+#define NPY_COMPLEX160 NPY_CDOUBLE
+ typedef double npy_float80;
+ typedef npy_cdouble npy_complex160;
+# define PyFloat80ScalarObject PyDoubleScalarObject
+# define PyComplex160ScalarObject PyCDoubleScalarObject
+# define PyFloat80ArrType_Type PyDoubleArrType_Type
+# define PyComplex160ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 96
+#ifndef NPY_FLOAT96
+#define NPY_FLOAT96 NPY_DOUBLE
+#define NPY_COMPLEX192 NPY_CDOUBLE
+ typedef double npy_float96;
+ typedef npy_cdouble npy_complex192;
+# define PyFloat96ScalarObject PyDoubleScalarObject
+# define PyComplex192ScalarObject PyCDoubleScalarObject
+# define PyFloat96ArrType_Type PyDoubleArrType_Type
+# define PyComplex192ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_DOUBLE == 128
+#ifndef NPY_FLOAT128
+#define NPY_FLOAT128 NPY_DOUBLE
+#define NPY_COMPLEX256 NPY_CDOUBLE
+ typedef double npy_float128;
+ typedef npy_cdouble npy_complex256;
+# define PyFloat128ScalarObject PyDoubleScalarObject
+# define PyComplex256ScalarObject PyCDoubleScalarObject
+# define PyFloat128ArrType_Type PyDoubleArrType_Type
+# define PyComplex256ArrType_Type PyCDoubleArrType_Type
+#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT
+#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT
+#endif
+#endif
+
+
+
+#if NPY_BITSOF_FLOAT == 32
+#ifndef NPY_FLOAT32
+#define NPY_FLOAT32 NPY_FLOAT
+#define NPY_COMPLEX64 NPY_CFLOAT
+ typedef float npy_float32;
+ typedef npy_cfloat npy_complex64;
+# define PyFloat32ScalarObject PyFloatScalarObject
+# define PyComplex64ScalarObject PyCFloatScalarObject
+# define PyFloat32ArrType_Type PyFloatArrType_Type
+# define PyComplex64ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT32_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 64
+#ifndef NPY_FLOAT64
+#define NPY_FLOAT64 NPY_FLOAT
+#define NPY_COMPLEX128 NPY_CFLOAT
+ typedef float npy_float64;
+ typedef npy_cfloat npy_complex128;
+# define PyFloat64ScalarObject PyFloatScalarObject
+# define PyComplex128ScalarObject PyCFloatScalarObject
+# define PyFloat64ArrType_Type PyFloatArrType_Type
+# define PyComplex128ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT64_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 80
+#ifndef NPY_FLOAT80
+#define NPY_FLOAT80 NPY_FLOAT
+#define NPY_COMPLEX160 NPY_CFLOAT
+ typedef float npy_float80;
+ typedef npy_cfloat npy_complex160;
+# define PyFloat80ScalarObject PyFloatScalarObject
+# define PyComplex160ScalarObject PyCFloatScalarObject
+# define PyFloat80ArrType_Type PyFloatArrType_Type
+# define PyComplex160ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT80_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 96
+#ifndef NPY_FLOAT96
+#define NPY_FLOAT96 NPY_FLOAT
+#define NPY_COMPLEX192 NPY_CFLOAT
+ typedef float npy_float96;
+ typedef npy_cfloat npy_complex192;
+# define PyFloat96ScalarObject PyFloatScalarObject
+# define PyComplex192ScalarObject PyCFloatScalarObject
+# define PyFloat96ArrType_Type PyFloatArrType_Type
+# define PyComplex192ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT96_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT
+#endif
+#elif NPY_BITSOF_FLOAT == 128
+#ifndef NPY_FLOAT128
+#define NPY_FLOAT128 NPY_FLOAT
+#define NPY_COMPLEX256 NPY_CFLOAT
+ typedef float npy_float128;
+ typedef npy_cfloat npy_complex256;
+# define PyFloat128ScalarObject PyFloatScalarObject
+# define PyComplex256ScalarObject PyCFloatScalarObject
+# define PyFloat128ArrType_Type PyFloatArrType_Type
+# define PyComplex256ArrType_Type PyCFloatArrType_Type
+#define NPY_FLOAT128_FMT NPY_FLOAT_FMT
+#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT
+#endif
+#endif
+
+/* half/float16 isn't a floating-point type in C */
+#define NPY_FLOAT16 NPY_HALF
+typedef npy_uint16 npy_half;
+typedef npy_half npy_float16;
+
+#if NPY_BITSOF_LONGDOUBLE == 32
+#ifndef NPY_FLOAT32
+#define NPY_FLOAT32 NPY_LONGDOUBLE
+#define NPY_COMPLEX64 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float32;
+ typedef npy_clongdouble npy_complex64;
+# define PyFloat32ScalarObject PyLongDoubleScalarObject
+# define PyComplex64ScalarObject PyCLongDoubleScalarObject
+# define PyFloat32ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 64
+#ifndef NPY_FLOAT64
+#define NPY_FLOAT64 NPY_LONGDOUBLE
+#define NPY_COMPLEX128 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float64;
+ typedef npy_clongdouble npy_complex128;
+# define PyFloat64ScalarObject PyLongDoubleScalarObject
+# define PyComplex128ScalarObject PyCLongDoubleScalarObject
+# define PyFloat64ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 80
+#ifndef NPY_FLOAT80
+#define NPY_FLOAT80 NPY_LONGDOUBLE
+#define NPY_COMPLEX160 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float80;
+ typedef npy_clongdouble npy_complex160;
+# define PyFloat80ScalarObject PyLongDoubleScalarObject
+# define PyComplex160ScalarObject PyCLongDoubleScalarObject
+# define PyFloat80ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 96
+#ifndef NPY_FLOAT96
+#define NPY_FLOAT96 NPY_LONGDOUBLE
+#define NPY_COMPLEX192 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float96;
+ typedef npy_clongdouble npy_complex192;
+# define PyFloat96ScalarObject PyLongDoubleScalarObject
+# define PyComplex192ScalarObject PyCLongDoubleScalarObject
+# define PyFloat96ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 128
+#ifndef NPY_FLOAT128
+#define NPY_FLOAT128 NPY_LONGDOUBLE
+#define NPY_COMPLEX256 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float128;
+ typedef npy_clongdouble npy_complex256;
+# define PyFloat128ScalarObject PyLongDoubleScalarObject
+# define PyComplex256ScalarObject PyCLongDoubleScalarObject
+# define PyFloat128ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT
+#endif
+#elif NPY_BITSOF_LONGDOUBLE == 256
+#define NPY_FLOAT256 NPY_LONGDOUBLE
+#define NPY_COMPLEX512 NPY_CLONGDOUBLE
+ typedef npy_longdouble npy_float256;
+ typedef npy_clongdouble npy_complex512;
+# define PyFloat256ScalarObject PyLongDoubleScalarObject
+# define PyComplex512ScalarObject PyCLongDoubleScalarObject
+# define PyFloat256ArrType_Type PyLongDoubleArrType_Type
+# define PyComplex512ArrType_Type PyCLongDoubleArrType_Type
+#define NPY_FLOAT256_FMT NPY_LONGDOUBLE_FMT
+#define NPY_COMPLEX512_FMT NPY_CLONGDOUBLE_FMT
+#endif
+
+/* datetime typedefs */
+typedef npy_int64 npy_timedelta;
+typedef npy_int64 npy_datetime;
+#define NPY_DATETIME_FMT NPY_INT64_FMT
+#define NPY_TIMEDELTA_FMT NPY_INT64_FMT
+
+/* End of typedefs for numarray style bit-width names */
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_cpu.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_cpu.h
new file mode 100644
index 00000000..78d229e7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_cpu.h
@@ -0,0 +1,129 @@
+/*
+ * This set (target) cpu specific macros:
+ * - Possible values:
+ * NPY_CPU_X86
+ * NPY_CPU_AMD64
+ * NPY_CPU_PPC
+ * NPY_CPU_PPC64
+ * NPY_CPU_PPC64LE
+ * NPY_CPU_SPARC
+ * NPY_CPU_S390
+ * NPY_CPU_IA64
+ * NPY_CPU_HPPA
+ * NPY_CPU_ALPHA
+ * NPY_CPU_ARMEL
+ * NPY_CPU_ARMEB
+ * NPY_CPU_SH_LE
+ * NPY_CPU_SH_BE
+ * NPY_CPU_ARCEL
+ * NPY_CPU_ARCEB
+ * NPY_CPU_RISCV64
+ * NPY_CPU_LOONGARCH
+ * NPY_CPU_WASM
+ */
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_
+
+#include "numpyconfig.h"
+
+#if defined( __i386__ ) || defined(i386) || defined(_M_IX86)
+ /*
+ * __i386__ is defined by gcc and Intel compiler on Linux,
+ * _M_IX86 by VS compiler,
+ * i386 by Sun compilers on opensolaris at least
+ */
+ #define NPY_CPU_X86
+#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64)
+ /*
+ * both __x86_64__ and __amd64__ are defined by gcc
+ * __x86_64 defined by sun compiler on opensolaris at least
+ * _M_AMD64 defined by MS compiler
+ */
+ #define NPY_CPU_AMD64
+#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_PPC64LE
+#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_PPC64
+#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC)
+ /*
+ * __ppc__ is defined by gcc, I remember having seen __powerpc__ once,
+ * but can't find it ATM
+ * _ARCH_PPC is used by at least gcc on AIX
+ * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check
+ * for those specifically first before defaulting to ppc
+ */
+ #define NPY_CPU_PPC
+#elif defined(__sparc__) || defined(__sparc)
+ /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */
+ #define NPY_CPU_SPARC
+#elif defined(__s390__)
+ #define NPY_CPU_S390
+#elif defined(__ia64)
+ #define NPY_CPU_IA64
+#elif defined(__hppa)
+ #define NPY_CPU_HPPA
+#elif defined(__alpha__)
+ #define NPY_CPU_ALPHA
+#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM64)
+ /* _M_ARM64 is defined in MSVC for ARM64 compilation on Windows */
+ #if defined(__ARMEB__) || defined(__AARCH64EB__)
+ #if defined(__ARM_32BIT_STATE)
+ #define NPY_CPU_ARMEB_AARCH32
+ #elif defined(__ARM_64BIT_STATE)
+ #define NPY_CPU_ARMEB_AARCH64
+ #else
+ #define NPY_CPU_ARMEB
+ #endif
+ #elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
+ #if defined(__ARM_32BIT_STATE)
+ #define NPY_CPU_ARMEL_AARCH32
+ #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64)
+ #define NPY_CPU_ARMEL_AARCH64
+ #else
+ #define NPY_CPU_ARMEL
+ #endif
+ #else
+ # error Unknown ARM CPU, please report this to numpy maintainers with \
+ information about your platform (OS, CPU and compiler)
+ #endif
+#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_SH_LE
+#elif defined(__sh__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_SH_BE
+#elif defined(__MIPSEL__)
+ #define NPY_CPU_MIPSEL
+#elif defined(__MIPSEB__)
+ #define NPY_CPU_MIPSEB
+#elif defined(__or1k__)
+ #define NPY_CPU_OR1K
+#elif defined(__mc68000__)
+ #define NPY_CPU_M68K
+#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_ARCEL
+#elif defined(__arc__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_ARCEB
+#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64
+ #define NPY_CPU_RISCV64
+#elif defined(__loongarch__)
+ #define NPY_CPU_LOONGARCH
+#elif defined(__EMSCRIPTEN__)
+ /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */
+ #define NPY_CPU_WASM
+#else
+ #error Unknown CPU, please report this to numpy maintainers with \
+ information about your platform (OS, CPU and compiler)
+#endif
+
+/*
+ * Except for the following architectures, memory access is limited to the natural
+ * alignment of data types otherwise it may lead to bus error or performance regression.
+ * For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt.
+*/
+#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__)
+ #define NPY_ALIGNMENT_REQUIRED 0
+#endif
+#ifndef NPY_ALIGNMENT_REQUIRED
+ #define NPY_ALIGNMENT_REQUIRED 1
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_endian.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_endian.h
new file mode 100644
index 00000000..5e58a7f5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_endian.h
@@ -0,0 +1,77 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_
+
+/*
+ * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in
+ * endian.h
+ */
+
+#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H)
+ /* Use endian.h if available */
+
+ #if defined(NPY_HAVE_ENDIAN_H)
+ #include <endian.h>
+ #elif defined(NPY_HAVE_SYS_ENDIAN_H)
+ #include <sys/endian.h>
+ #endif
+
+ #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN)
+ #define NPY_BYTE_ORDER BYTE_ORDER
+ #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN
+ #define NPY_BIG_ENDIAN BIG_ENDIAN
+ #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN)
+ #define NPY_BYTE_ORDER _BYTE_ORDER
+ #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN
+ #define NPY_BIG_ENDIAN _BIG_ENDIAN
+ #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
+ #define NPY_BYTE_ORDER __BYTE_ORDER
+ #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN
+ #define NPY_BIG_ENDIAN __BIG_ENDIAN
+ #endif
+#endif
+
+#ifndef NPY_BYTE_ORDER
+ /* Set endianness info using target CPU */
+ #include "npy_cpu.h"
+
+ #define NPY_LITTLE_ENDIAN 1234
+ #define NPY_BIG_ENDIAN 4321
+
+ #if defined(NPY_CPU_X86) \
+ || defined(NPY_CPU_AMD64) \
+ || defined(NPY_CPU_IA64) \
+ || defined(NPY_CPU_ALPHA) \
+ || defined(NPY_CPU_ARMEL) \
+ || defined(NPY_CPU_ARMEL_AARCH32) \
+ || defined(NPY_CPU_ARMEL_AARCH64) \
+ || defined(NPY_CPU_SH_LE) \
+ || defined(NPY_CPU_MIPSEL) \
+ || defined(NPY_CPU_PPC64LE) \
+ || defined(NPY_CPU_ARCEL) \
+ || defined(NPY_CPU_RISCV64) \
+ || defined(NPY_CPU_LOONGARCH) \
+ || defined(NPY_CPU_WASM)
+ #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
+
+ #elif defined(NPY_CPU_PPC) \
+ || defined(NPY_CPU_SPARC) \
+ || defined(NPY_CPU_S390) \
+ || defined(NPY_CPU_HPPA) \
+ || defined(NPY_CPU_PPC64) \
+ || defined(NPY_CPU_ARMEB) \
+ || defined(NPY_CPU_ARMEB_AARCH32) \
+ || defined(NPY_CPU_ARMEB_AARCH64) \
+ || defined(NPY_CPU_SH_BE) \
+ || defined(NPY_CPU_MIPSEB) \
+ || defined(NPY_CPU_OR1K) \
+ || defined(NPY_CPU_M68K) \
+ || defined(NPY_CPU_ARCEB)
+ #define NPY_BYTE_ORDER NPY_BIG_ENDIAN
+
+ #else
+ #error Unknown CPU: can not set endianness
+ #endif
+
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_interrupt.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_interrupt.h
new file mode 100644
index 00000000..69a0374d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_interrupt.h
@@ -0,0 +1,56 @@
+/*
+ * This API is only provided because it is part of publicly exported
+ * headers. Its use is considered DEPRECATED, and it will be removed
+ * eventually.
+ * (This includes the _PyArray_SigintHandler and _PyArray_GetSigintBuf
+ * functions which are however, public API, and not headers.)
+ *
+ * Instead of using these non-threadsafe macros consider periodically
+ * querying `PyErr_CheckSignals()` or `PyOS_InterruptOccurred()` will work.
+ * Both of these require holding the GIL, although cpython could add a
+ * version of `PyOS_InterruptOccurred()` which does not. Such a version
+ * actually exists as private API in Python 3.10, and backported to 3.9 and 3.8,
+ * see also https://bugs.python.org/issue41037 and
+ * https://github.com/python/cpython/pull/20599).
+ */
+
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_
+
+#ifndef NPY_NO_SIGNAL
+
+#include <setjmp.h>
+#include <signal.h>
+
+#ifndef sigsetjmp
+
+#define NPY_SIGSETJMP(arg1, arg2) setjmp(arg1)
+#define NPY_SIGLONGJMP(arg1, arg2) longjmp(arg1, arg2)
+#define NPY_SIGJMP_BUF jmp_buf
+
+#else
+
+#define NPY_SIGSETJMP(arg1, arg2) sigsetjmp(arg1, arg2)
+#define NPY_SIGLONGJMP(arg1, arg2) siglongjmp(arg1, arg2)
+#define NPY_SIGJMP_BUF sigjmp_buf
+
+#endif
+
+# define NPY_SIGINT_ON { \
+ PyOS_sighandler_t _npy_sig_save; \
+ _npy_sig_save = PyOS_setsig(SIGINT, _PyArray_SigintHandler); \
+ if (NPY_SIGSETJMP(*((NPY_SIGJMP_BUF *)_PyArray_GetSigintBuf()), \
+ 1) == 0) { \
+
+# define NPY_SIGINT_OFF } \
+ PyOS_setsig(SIGINT, _npy_sig_save); \
+ }
+
+#else /* NPY_NO_SIGNAL */
+
+#define NPY_SIGINT_ON
+#define NPY_SIGINT_OFF
+
+#endif /* HAVE_SIGSETJMP */
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_INTERRUPT_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_math.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_math.h
new file mode 100644
index 00000000..90050866
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_math.h
@@ -0,0 +1,590 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_
+
+#include <numpy/npy_common.h>
+
+#include <math.h>
+
+/* By adding static inline specifiers to npy_math function definitions when
+ appropriate, compiler is given the opportunity to optimize */
+#if NPY_INLINE_MATH
+#define NPY_INPLACE NPY_INLINE static
+#else
+#define NPY_INPLACE
+#endif
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99
+ * for INFINITY)
+ *
+ * XXX: I should test whether INFINITY and NAN are available on the platform
+ */
+NPY_INLINE static float __npy_inff(void)
+{
+ const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL};
+ return __bint.__f;
+}
+
+NPY_INLINE static float __npy_nanf(void)
+{
+ const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL};
+ return __bint.__f;
+}
+
+NPY_INLINE static float __npy_pzerof(void)
+{
+ const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL};
+ return __bint.__f;
+}
+
+NPY_INLINE static float __npy_nzerof(void)
+{
+ const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL};
+ return __bint.__f;
+}
+
+#define NPY_INFINITYF __npy_inff()
+#define NPY_NANF __npy_nanf()
+#define NPY_PZEROF __npy_pzerof()
+#define NPY_NZEROF __npy_nzerof()
+
+#define NPY_INFINITY ((npy_double)NPY_INFINITYF)
+#define NPY_NAN ((npy_double)NPY_NANF)
+#define NPY_PZERO ((npy_double)NPY_PZEROF)
+#define NPY_NZERO ((npy_double)NPY_NZEROF)
+
+#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF)
+#define NPY_NANL ((npy_longdouble)NPY_NANF)
+#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF)
+#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF)
+
+/*
+ * Useful constants
+ */
+#define NPY_E 2.718281828459045235360287471352662498 /* e */
+#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */
+#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */
+#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */
+#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */
+#define NPY_PI 3.141592653589793238462643383279502884 /* pi */
+#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */
+#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */
+#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */
+#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */
+#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */
+#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */
+#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */
+
+#define NPY_Ef 2.718281828459045235360287471352662498F /* e */
+#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */
+#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */
+#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */
+#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */
+#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */
+#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */
+#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */
+#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */
+#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */
+#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */
+#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */
+#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */
+
+#define NPY_El 2.718281828459045235360287471352662498L /* e */
+#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */
+#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */
+#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */
+#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */
+#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */
+#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */
+#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */
+#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */
+#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */
+#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */
+#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */
+#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */
+
+/*
+ * Integer functions.
+ */
+NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b);
+NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b);
+
+NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b);
+NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b);
+NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b);
+NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b);
+NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b);
+NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b);
+
+NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b);
+NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b);
+NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b);
+NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b);
+NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b);
+NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b);
+
+NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b);
+NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b);
+NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b);
+NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b);
+NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b);
+NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b);
+NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b);
+NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b);
+NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b);
+NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b);
+
+NPY_INPLACE uint8_t npy_popcountuhh(npy_ubyte a);
+NPY_INPLACE uint8_t npy_popcountuh(npy_ushort a);
+NPY_INPLACE uint8_t npy_popcountu(npy_uint a);
+NPY_INPLACE uint8_t npy_popcountul(npy_ulong a);
+NPY_INPLACE uint8_t npy_popcountull(npy_ulonglong a);
+NPY_INPLACE uint8_t npy_popcounthh(npy_byte a);
+NPY_INPLACE uint8_t npy_popcounth(npy_short a);
+NPY_INPLACE uint8_t npy_popcount(npy_int a);
+NPY_INPLACE uint8_t npy_popcountl(npy_long a);
+NPY_INPLACE uint8_t npy_popcountll(npy_longlong a);
+
+/*
+ * C99 double math funcs that need fixups or are blocklist-able
+ */
+NPY_INPLACE double npy_sin(double x);
+NPY_INPLACE double npy_cos(double x);
+NPY_INPLACE double npy_tan(double x);
+NPY_INPLACE double npy_hypot(double x, double y);
+NPY_INPLACE double npy_log2(double x);
+NPY_INPLACE double npy_atan2(double x, double y);
+
+/* Mandatory C99 double math funcs, no blocklisting or fixups */
+/* defined for legacy reasons, should be deprecated at some point */
+#define npy_sinh sinh
+#define npy_cosh cosh
+#define npy_tanh tanh
+#define npy_asin asin
+#define npy_acos acos
+#define npy_atan atan
+#define npy_log log
+#define npy_log10 log10
+#define npy_cbrt cbrt
+#define npy_fabs fabs
+#define npy_ceil ceil
+#define npy_fmod fmod
+#define npy_floor floor
+#define npy_expm1 expm1
+#define npy_log1p log1p
+#define npy_acosh acosh
+#define npy_asinh asinh
+#define npy_atanh atanh
+#define npy_rint rint
+#define npy_trunc trunc
+#define npy_exp2 exp2
+#define npy_frexp frexp
+#define npy_ldexp ldexp
+#define npy_copysign copysign
+#define npy_exp exp
+#define npy_sqrt sqrt
+#define npy_pow pow
+#define npy_modf modf
+
+double npy_nextafter(double x, double y);
+
+double npy_spacing(double x);
+
+/*
+ * IEEE 754 fpu handling. Those are guaranteed to be macros
+ */
+
+/* use builtins to avoid function calls in tight loops
+ * only available if npy_config.h is available (= numpys own build) */
+#ifdef HAVE___BUILTIN_ISNAN
+ #define npy_isnan(x) __builtin_isnan(x)
+#else
+ #ifndef NPY_HAVE_DECL_ISNAN
+ #define npy_isnan(x) ((x) != (x))
+ #else
+ #define npy_isnan(x) isnan(x)
+ #endif
+#endif
+
+
+/* only available if npy_config.h is available (= numpys own build) */
+#ifdef HAVE___BUILTIN_ISFINITE
+ #define npy_isfinite(x) __builtin_isfinite(x)
+#else
+ #ifndef NPY_HAVE_DECL_ISFINITE
+ #ifdef _MSC_VER
+ #define npy_isfinite(x) _finite((x))
+ #else
+ #define npy_isfinite(x) !npy_isnan((x) + (-x))
+ #endif
+ #else
+ #define npy_isfinite(x) isfinite((x))
+ #endif
+#endif
+
+/* only available if npy_config.h is available (= numpys own build) */
+#ifdef HAVE___BUILTIN_ISINF
+ #define npy_isinf(x) __builtin_isinf(x)
+#else
+ #ifndef NPY_HAVE_DECL_ISINF
+ #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x))
+ #else
+ #define npy_isinf(x) isinf((x))
+ #endif
+#endif
+
+#ifndef NPY_HAVE_DECL_SIGNBIT
+ int _npy_signbit_f(float x);
+ int _npy_signbit_d(double x);
+ int _npy_signbit_ld(long double x);
+ #define npy_signbit(x) \
+ (sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \
+ : sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \
+ : _npy_signbit_f (x))
+#else
+ #define npy_signbit(x) signbit((x))
+#endif
+
+/*
+ * float C99 math funcs that need fixups or are blocklist-able
+ */
+NPY_INPLACE float npy_sinf(float x);
+NPY_INPLACE float npy_cosf(float x);
+NPY_INPLACE float npy_tanf(float x);
+NPY_INPLACE float npy_expf(float x);
+NPY_INPLACE float npy_sqrtf(float x);
+NPY_INPLACE float npy_hypotf(float x, float y);
+NPY_INPLACE float npy_log2f(float x);
+NPY_INPLACE float npy_atan2f(float x, float y);
+NPY_INPLACE float npy_powf(float x, float y);
+NPY_INPLACE float npy_modff(float x, float* y);
+
+/* Mandatory C99 float math funcs, no blocklisting or fixups */
+/* defined for legacy reasons, should be deprecated at some point */
+
+#define npy_sinhf sinhf
+#define npy_coshf coshf
+#define npy_tanhf tanhf
+#define npy_asinf asinf
+#define npy_acosf acosf
+#define npy_atanf atanf
+#define npy_logf logf
+#define npy_log10f log10f
+#define npy_cbrtf cbrtf
+#define npy_fabsf fabsf
+#define npy_ceilf ceilf
+#define npy_fmodf fmodf
+#define npy_floorf floorf
+#define npy_expm1f expm1f
+#define npy_log1pf log1pf
+#define npy_asinhf asinhf
+#define npy_acoshf acoshf
+#define npy_atanhf atanhf
+#define npy_rintf rintf
+#define npy_truncf truncf
+#define npy_exp2f exp2f
+#define npy_frexpf frexpf
+#define npy_ldexpf ldexpf
+#define npy_copysignf copysignf
+
+float npy_nextafterf(float x, float y);
+float npy_spacingf(float x);
+
+/*
+ * long double C99 double math funcs that need fixups or are blocklist-able
+ */
+NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y);
+
+/* Mandatory C99 double math funcs, no blocklisting or fixups */
+/* defined for legacy reasons, should be deprecated at some point */
+#define npy_sinhl sinhl
+#define npy_coshl coshl
+#define npy_tanhl tanhl
+#define npy_fabsl fabsl
+#define npy_floorl floorl
+#define npy_ceill ceill
+#define npy_rintl rintl
+#define npy_truncl truncl
+#define npy_cbrtl cbrtl
+#define npy_log10l log10l
+#define npy_logl logl
+#define npy_expm1l expm1l
+#define npy_asinl asinl
+#define npy_acosl acosl
+#define npy_atanl atanl
+#define npy_asinhl asinhl
+#define npy_acoshl acoshl
+#define npy_atanhl atanhl
+#define npy_log1pl log1pl
+#define npy_exp2l exp2l
+#define npy_fmodl fmodl
+#define npy_frexpl frexpl
+#define npy_ldexpl ldexpl
+#define npy_copysignl copysignl
+
+npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y);
+npy_longdouble npy_spacingl(npy_longdouble x);
+
+/*
+ * Non standard functions
+ */
+NPY_INPLACE double npy_deg2rad(double x);
+NPY_INPLACE double npy_rad2deg(double x);
+NPY_INPLACE double npy_logaddexp(double x, double y);
+NPY_INPLACE double npy_logaddexp2(double x, double y);
+NPY_INPLACE double npy_divmod(double x, double y, double *modulus);
+NPY_INPLACE double npy_heaviside(double x, double h0);
+
+NPY_INPLACE float npy_deg2radf(float x);
+NPY_INPLACE float npy_rad2degf(float x);
+NPY_INPLACE float npy_logaddexpf(float x, float y);
+NPY_INPLACE float npy_logaddexp2f(float x, float y);
+NPY_INPLACE float npy_divmodf(float x, float y, float *modulus);
+NPY_INPLACE float npy_heavisidef(float x, float h0);
+
+NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x);
+NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y);
+NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y,
+ npy_longdouble *modulus);
+NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0);
+
+#define npy_degrees npy_rad2deg
+#define npy_degreesf npy_rad2degf
+#define npy_degreesl npy_rad2degl
+
+#define npy_radians npy_deg2rad
+#define npy_radiansf npy_deg2radf
+#define npy_radiansl npy_deg2radl
+
+/*
+ * Complex declarations
+ */
+
+/*
+ * C99 specifies that complex numbers have the same representation as
+ * an array of two elements, where the first element is the real part
+ * and the second element is the imaginary part.
+ */
+#define __NPY_CPACK_IMP(x, y, type, ctype) \
+ union { \
+ ctype z; \
+ type a[2]; \
+ } z1; \
+ \
+ z1.a[0] = (x); \
+ z1.a[1] = (y); \
+ \
+ return z1.z;
+
+static NPY_INLINE npy_cdouble npy_cpack(double x, double y)
+{
+ __NPY_CPACK_IMP(x, y, double, npy_cdouble);
+}
+
+static NPY_INLINE npy_cfloat npy_cpackf(float x, float y)
+{
+ __NPY_CPACK_IMP(x, y, float, npy_cfloat);
+}
+
+static NPY_INLINE npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y)
+{
+ __NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble);
+}
+#undef __NPY_CPACK_IMP
+
+/*
+ * Same remark as above, but in the other direction: extract first/second
+ * member of complex number, assuming a C99-compatible representation
+ *
+ * Those are defineds as static inline, and such as a reasonable compiler would
+ * most likely compile this to one or two instructions (on CISC at least)
+ */
+#define __NPY_CEXTRACT_IMP(z, index, type, ctype) \
+ union { \
+ ctype z; \
+ type a[2]; \
+ } __z_repr; \
+ __z_repr.z = z; \
+ \
+ return __z_repr.a[index];
+
+static NPY_INLINE double npy_creal(npy_cdouble z)
+{
+ __NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble);
+}
+
+static NPY_INLINE double npy_cimag(npy_cdouble z)
+{
+ __NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble);
+}
+
+static NPY_INLINE float npy_crealf(npy_cfloat z)
+{
+ __NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat);
+}
+
+static NPY_INLINE float npy_cimagf(npy_cfloat z)
+{
+ __NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat);
+}
+
+static NPY_INLINE npy_longdouble npy_creall(npy_clongdouble z)
+{
+ __NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble);
+}
+
+static NPY_INLINE npy_longdouble npy_cimagl(npy_clongdouble z)
+{
+ __NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble);
+}
+#undef __NPY_CEXTRACT_IMP
+
+/*
+ * Double precision complex functions
+ */
+double npy_cabs(npy_cdouble z);
+double npy_carg(npy_cdouble z);
+
+npy_cdouble npy_cexp(npy_cdouble z);
+npy_cdouble npy_clog(npy_cdouble z);
+npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y);
+
+npy_cdouble npy_csqrt(npy_cdouble z);
+
+npy_cdouble npy_ccos(npy_cdouble z);
+npy_cdouble npy_csin(npy_cdouble z);
+npy_cdouble npy_ctan(npy_cdouble z);
+
+npy_cdouble npy_ccosh(npy_cdouble z);
+npy_cdouble npy_csinh(npy_cdouble z);
+npy_cdouble npy_ctanh(npy_cdouble z);
+
+npy_cdouble npy_cacos(npy_cdouble z);
+npy_cdouble npy_casin(npy_cdouble z);
+npy_cdouble npy_catan(npy_cdouble z);
+
+npy_cdouble npy_cacosh(npy_cdouble z);
+npy_cdouble npy_casinh(npy_cdouble z);
+npy_cdouble npy_catanh(npy_cdouble z);
+
+/*
+ * Single precision complex functions
+ */
+float npy_cabsf(npy_cfloat z);
+float npy_cargf(npy_cfloat z);
+
+npy_cfloat npy_cexpf(npy_cfloat z);
+npy_cfloat npy_clogf(npy_cfloat z);
+npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y);
+
+npy_cfloat npy_csqrtf(npy_cfloat z);
+
+npy_cfloat npy_ccosf(npy_cfloat z);
+npy_cfloat npy_csinf(npy_cfloat z);
+npy_cfloat npy_ctanf(npy_cfloat z);
+
+npy_cfloat npy_ccoshf(npy_cfloat z);
+npy_cfloat npy_csinhf(npy_cfloat z);
+npy_cfloat npy_ctanhf(npy_cfloat z);
+
+npy_cfloat npy_cacosf(npy_cfloat z);
+npy_cfloat npy_casinf(npy_cfloat z);
+npy_cfloat npy_catanf(npy_cfloat z);
+
+npy_cfloat npy_cacoshf(npy_cfloat z);
+npy_cfloat npy_casinhf(npy_cfloat z);
+npy_cfloat npy_catanhf(npy_cfloat z);
+
+
+/*
+ * Extended precision complex functions
+ */
+npy_longdouble npy_cabsl(npy_clongdouble z);
+npy_longdouble npy_cargl(npy_clongdouble z);
+
+npy_clongdouble npy_cexpl(npy_clongdouble z);
+npy_clongdouble npy_clogl(npy_clongdouble z);
+npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y);
+
+npy_clongdouble npy_csqrtl(npy_clongdouble z);
+
+npy_clongdouble npy_ccosl(npy_clongdouble z);
+npy_clongdouble npy_csinl(npy_clongdouble z);
+npy_clongdouble npy_ctanl(npy_clongdouble z);
+
+npy_clongdouble npy_ccoshl(npy_clongdouble z);
+npy_clongdouble npy_csinhl(npy_clongdouble z);
+npy_clongdouble npy_ctanhl(npy_clongdouble z);
+
+npy_clongdouble npy_cacosl(npy_clongdouble z);
+npy_clongdouble npy_casinl(npy_clongdouble z);
+npy_clongdouble npy_catanl(npy_clongdouble z);
+
+npy_clongdouble npy_cacoshl(npy_clongdouble z);
+npy_clongdouble npy_casinhl(npy_clongdouble z);
+npy_clongdouble npy_catanhl(npy_clongdouble z);
+
+
+/*
+ * Functions that set the floating point error
+ * status word.
+ */
+
+/*
+ * platform-dependent code translates floating point
+ * status to an integer sum of these values
+ */
+#define NPY_FPE_DIVIDEBYZERO 1
+#define NPY_FPE_OVERFLOW 2
+#define NPY_FPE_UNDERFLOW 4
+#define NPY_FPE_INVALID 8
+
+int npy_clear_floatstatus_barrier(char*);
+int npy_get_floatstatus_barrier(char*);
+/*
+ * use caution with these - clang and gcc8.1 are known to reorder calls
+ * to this form of the function which can defeat the check. The _barrier
+ * form of the call is preferable, where the argument is
+ * (char*)&local_variable
+ */
+int npy_clear_floatstatus(void);
+int npy_get_floatstatus(void);
+
+void npy_set_floatstatus_divbyzero(void);
+void npy_set_floatstatus_overflow(void);
+void npy_set_floatstatus_underflow(void);
+void npy_set_floatstatus_invalid(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#if NPY_INLINE_MATH
+#include "npy_math_internal.h"
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h
new file mode 100644
index 00000000..39658c0b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h
@@ -0,0 +1,20 @@
+/*
+ * This include file is provided for inclusion in Cython *.pyd files where
+ * one would like to define the NPY_NO_DEPRECATED_API macro. It can be
+ * included by
+ *
+ * cdef extern from "npy_no_deprecated_api.h": pass
+ *
+ */
+#ifndef NPY_NO_DEPRECATED_API
+
+/* put this check here since there may be multiple includes in C extensions. */
+#if defined(NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_) || \
+ defined(NUMPY_CORE_INCLUDE_NUMPY_NPY_DEPRECATED_API_H) || \
+ defined(NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_)
+#error "npy_no_deprecated_api.h" must be first among numpy includes.
+#else
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#endif
+
+#endif /* NPY_NO_DEPRECATED_API */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_os.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_os.h
new file mode 100644
index 00000000..6d8317d0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/npy_os.h
@@ -0,0 +1,36 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_
+
+#if defined(linux) || defined(__linux) || defined(__linux__)
+ #define NPY_OS_LINUX
+#elif defined(__FreeBSD__) || defined(__NetBSD__) || \
+ defined(__OpenBSD__) || defined(__DragonFly__)
+ #define NPY_OS_BSD
+ #ifdef __FreeBSD__
+ #define NPY_OS_FREEBSD
+ #elif defined(__NetBSD__)
+ #define NPY_OS_NETBSD
+ #elif defined(__OpenBSD__)
+ #define NPY_OS_OPENBSD
+ #elif defined(__DragonFly__)
+ #define NPY_OS_DRAGONFLY
+ #endif
+#elif defined(sun) || defined(__sun)
+ #define NPY_OS_SOLARIS
+#elif defined(__CYGWIN__)
+ #define NPY_OS_CYGWIN
+#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32)
+ #define NPY_OS_WIN32
+#elif defined(_WIN64) || defined(__WIN64__) || defined(WIN64)
+ #define NPY_OS_WIN64
+#elif defined(__MINGW32__) || defined(__MINGW64__)
+ #define NPY_OS_MINGW
+#elif defined(__APPLE__)
+ #define NPY_OS_DARWIN
+#elif defined(__HAIKU__)
+ #define NPY_OS_HAIKU
+#else
+ #define NPY_OS_UNKNOWN
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/numpyconfig.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/numpyconfig.h
new file mode 100644
index 00000000..4b520983
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/numpyconfig.h
@@ -0,0 +1,84 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_
+
+#include "_numpyconfig.h"
+
+/*
+ * On Mac OS X, because there is only one configuration stage for all the archs
+ * in universal builds, any macro which depends on the arch needs to be
+ * hardcoded.
+ *
+ * Note that distutils/pip will attempt a universal2 build when Python itself
+ * is built as universal2, hence this hardcoding is needed even if we do not
+ * support universal2 wheels anymore (see gh-22796).
+ * This code block can be removed after we have dropped the setup.py based
+ * build completely.
+ */
+#ifdef __APPLE__
+ #undef NPY_SIZEOF_LONG
+ #undef NPY_SIZEOF_PY_INTPTR_T
+
+ #ifdef __LP64__
+ #define NPY_SIZEOF_LONG 8
+ #define NPY_SIZEOF_PY_INTPTR_T 8
+ #else
+ #define NPY_SIZEOF_LONG 4
+ #define NPY_SIZEOF_PY_INTPTR_T 4
+ #endif
+
+ #undef NPY_SIZEOF_LONGDOUBLE
+ #undef NPY_SIZEOF_COMPLEX_LONGDOUBLE
+ #ifdef HAVE_LDOUBLE_IEEE_DOUBLE_LE
+ #undef HAVE_LDOUBLE_IEEE_DOUBLE_LE
+ #endif
+ #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE
+ #undef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE
+ #endif
+
+ #if defined(__arm64__)
+ #define NPY_SIZEOF_LONGDOUBLE 8
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16
+ #define HAVE_LDOUBLE_IEEE_DOUBLE_LE 1
+ #elif defined(__x86_64)
+ #define NPY_SIZEOF_LONGDOUBLE 16
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
+ #define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE 1
+ #elif defined (__i386)
+ #define NPY_SIZEOF_LONGDOUBLE 12
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24
+ #elif defined(__ppc__) || defined (__ppc64__)
+ #define NPY_SIZEOF_LONGDOUBLE 16
+ #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32
+ #else
+ #error "unknown architecture"
+ #endif
+#endif
+
+
+/**
+ * To help with the NPY_NO_DEPRECATED_API macro, we include API version
+ * numbers for specific versions of NumPy. To exclude all API that was
+ * deprecated as of 1.7, add the following before #including any NumPy
+ * headers:
+ * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+ */
+#define NPY_1_7_API_VERSION 0x00000007
+#define NPY_1_8_API_VERSION 0x00000008
+#define NPY_1_9_API_VERSION 0x00000008
+#define NPY_1_10_API_VERSION 0x00000008
+#define NPY_1_11_API_VERSION 0x00000008
+#define NPY_1_12_API_VERSION 0x00000008
+#define NPY_1_13_API_VERSION 0x00000008
+#define NPY_1_14_API_VERSION 0x00000008
+#define NPY_1_15_API_VERSION 0x00000008
+#define NPY_1_16_API_VERSION 0x00000008
+#define NPY_1_17_API_VERSION 0x00000008
+#define NPY_1_18_API_VERSION 0x00000008
+#define NPY_1_19_API_VERSION 0x00000008
+#define NPY_1_20_API_VERSION 0x0000000e
+#define NPY_1_21_API_VERSION 0x0000000e
+#define NPY_1_22_API_VERSION 0x0000000f
+#define NPY_1_23_API_VERSION 0x00000010
+#define NPY_1_24_API_VERSION 0x00000010
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/old_defines.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/old_defines.h
new file mode 100644
index 00000000..b3fa6775
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/old_defines.h
@@ -0,0 +1,187 @@
+/* This header is deprecated as of NumPy 1.7 */
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_
+
+#if defined(NPY_NO_DEPRECATED_API) && NPY_NO_DEPRECATED_API >= NPY_1_7_API_VERSION
+#error The header "old_defines.h" is deprecated as of NumPy 1.7.
+#endif
+
+#define NDARRAY_VERSION NPY_VERSION
+
+#define PyArray_MIN_BUFSIZE NPY_MIN_BUFSIZE
+#define PyArray_MAX_BUFSIZE NPY_MAX_BUFSIZE
+#define PyArray_BUFSIZE NPY_BUFSIZE
+
+#define PyArray_PRIORITY NPY_PRIORITY
+#define PyArray_SUBTYPE_PRIORITY NPY_PRIORITY
+#define PyArray_NUM_FLOATTYPE NPY_NUM_FLOATTYPE
+
+#define NPY_MAX PyArray_MAX
+#define NPY_MIN PyArray_MIN
+
+#define PyArray_TYPES NPY_TYPES
+#define PyArray_BOOL NPY_BOOL
+#define PyArray_BYTE NPY_BYTE
+#define PyArray_UBYTE NPY_UBYTE
+#define PyArray_SHORT NPY_SHORT
+#define PyArray_USHORT NPY_USHORT
+#define PyArray_INT NPY_INT
+#define PyArray_UINT NPY_UINT
+#define PyArray_LONG NPY_LONG
+#define PyArray_ULONG NPY_ULONG
+#define PyArray_LONGLONG NPY_LONGLONG
+#define PyArray_ULONGLONG NPY_ULONGLONG
+#define PyArray_HALF NPY_HALF
+#define PyArray_FLOAT NPY_FLOAT
+#define PyArray_DOUBLE NPY_DOUBLE
+#define PyArray_LONGDOUBLE NPY_LONGDOUBLE
+#define PyArray_CFLOAT NPY_CFLOAT
+#define PyArray_CDOUBLE NPY_CDOUBLE
+#define PyArray_CLONGDOUBLE NPY_CLONGDOUBLE
+#define PyArray_OBJECT NPY_OBJECT
+#define PyArray_STRING NPY_STRING
+#define PyArray_UNICODE NPY_UNICODE
+#define PyArray_VOID NPY_VOID
+#define PyArray_DATETIME NPY_DATETIME
+#define PyArray_TIMEDELTA NPY_TIMEDELTA
+#define PyArray_NTYPES NPY_NTYPES
+#define PyArray_NOTYPE NPY_NOTYPE
+#define PyArray_CHAR NPY_CHAR
+#define PyArray_USERDEF NPY_USERDEF
+#define PyArray_NUMUSERTYPES NPY_NUMUSERTYPES
+
+#define PyArray_INTP NPY_INTP
+#define PyArray_UINTP NPY_UINTP
+
+#define PyArray_INT8 NPY_INT8
+#define PyArray_UINT8 NPY_UINT8
+#define PyArray_INT16 NPY_INT16
+#define PyArray_UINT16 NPY_UINT16
+#define PyArray_INT32 NPY_INT32
+#define PyArray_UINT32 NPY_UINT32
+
+#ifdef NPY_INT64
+#define PyArray_INT64 NPY_INT64
+#define PyArray_UINT64 NPY_UINT64
+#endif
+
+#ifdef NPY_INT128
+#define PyArray_INT128 NPY_INT128
+#define PyArray_UINT128 NPY_UINT128
+#endif
+
+#ifdef NPY_FLOAT16
+#define PyArray_FLOAT16 NPY_FLOAT16
+#define PyArray_COMPLEX32 NPY_COMPLEX32
+#endif
+
+#ifdef NPY_FLOAT80
+#define PyArray_FLOAT80 NPY_FLOAT80
+#define PyArray_COMPLEX160 NPY_COMPLEX160
+#endif
+
+#ifdef NPY_FLOAT96
+#define PyArray_FLOAT96 NPY_FLOAT96
+#define PyArray_COMPLEX192 NPY_COMPLEX192
+#endif
+
+#ifdef NPY_FLOAT128
+#define PyArray_FLOAT128 NPY_FLOAT128
+#define PyArray_COMPLEX256 NPY_COMPLEX256
+#endif
+
+#define PyArray_FLOAT32 NPY_FLOAT32
+#define PyArray_COMPLEX64 NPY_COMPLEX64
+#define PyArray_FLOAT64 NPY_FLOAT64
+#define PyArray_COMPLEX128 NPY_COMPLEX128
+
+
+#define PyArray_TYPECHAR NPY_TYPECHAR
+#define PyArray_BOOLLTR NPY_BOOLLTR
+#define PyArray_BYTELTR NPY_BYTELTR
+#define PyArray_UBYTELTR NPY_UBYTELTR
+#define PyArray_SHORTLTR NPY_SHORTLTR
+#define PyArray_USHORTLTR NPY_USHORTLTR
+#define PyArray_INTLTR NPY_INTLTR
+#define PyArray_UINTLTR NPY_UINTLTR
+#define PyArray_LONGLTR NPY_LONGLTR
+#define PyArray_ULONGLTR NPY_ULONGLTR
+#define PyArray_LONGLONGLTR NPY_LONGLONGLTR
+#define PyArray_ULONGLONGLTR NPY_ULONGLONGLTR
+#define PyArray_HALFLTR NPY_HALFLTR
+#define PyArray_FLOATLTR NPY_FLOATLTR
+#define PyArray_DOUBLELTR NPY_DOUBLELTR
+#define PyArray_LONGDOUBLELTR NPY_LONGDOUBLELTR
+#define PyArray_CFLOATLTR NPY_CFLOATLTR
+#define PyArray_CDOUBLELTR NPY_CDOUBLELTR
+#define PyArray_CLONGDOUBLELTR NPY_CLONGDOUBLELTR
+#define PyArray_OBJECTLTR NPY_OBJECTLTR
+#define PyArray_STRINGLTR NPY_STRINGLTR
+#define PyArray_STRINGLTR2 NPY_STRINGLTR2
+#define PyArray_UNICODELTR NPY_UNICODELTR
+#define PyArray_VOIDLTR NPY_VOIDLTR
+#define PyArray_DATETIMELTR NPY_DATETIMELTR
+#define PyArray_TIMEDELTALTR NPY_TIMEDELTALTR
+#define PyArray_CHARLTR NPY_CHARLTR
+#define PyArray_INTPLTR NPY_INTPLTR
+#define PyArray_UINTPLTR NPY_UINTPLTR
+#define PyArray_GENBOOLLTR NPY_GENBOOLLTR
+#define PyArray_SIGNEDLTR NPY_SIGNEDLTR
+#define PyArray_UNSIGNEDLTR NPY_UNSIGNEDLTR
+#define PyArray_FLOATINGLTR NPY_FLOATINGLTR
+#define PyArray_COMPLEXLTR NPY_COMPLEXLTR
+
+#define PyArray_QUICKSORT NPY_QUICKSORT
+#define PyArray_HEAPSORT NPY_HEAPSORT
+#define PyArray_MERGESORT NPY_MERGESORT
+#define PyArray_SORTKIND NPY_SORTKIND
+#define PyArray_NSORTS NPY_NSORTS
+
+#define PyArray_NOSCALAR NPY_NOSCALAR
+#define PyArray_BOOL_SCALAR NPY_BOOL_SCALAR
+#define PyArray_INTPOS_SCALAR NPY_INTPOS_SCALAR
+#define PyArray_INTNEG_SCALAR NPY_INTNEG_SCALAR
+#define PyArray_FLOAT_SCALAR NPY_FLOAT_SCALAR
+#define PyArray_COMPLEX_SCALAR NPY_COMPLEX_SCALAR
+#define PyArray_OBJECT_SCALAR NPY_OBJECT_SCALAR
+#define PyArray_SCALARKIND NPY_SCALARKIND
+#define PyArray_NSCALARKINDS NPY_NSCALARKINDS
+
+#define PyArray_ANYORDER NPY_ANYORDER
+#define PyArray_CORDER NPY_CORDER
+#define PyArray_FORTRANORDER NPY_FORTRANORDER
+#define PyArray_ORDER NPY_ORDER
+
+#define PyDescr_ISBOOL PyDataType_ISBOOL
+#define PyDescr_ISUNSIGNED PyDataType_ISUNSIGNED
+#define PyDescr_ISSIGNED PyDataType_ISSIGNED
+#define PyDescr_ISINTEGER PyDataType_ISINTEGER
+#define PyDescr_ISFLOAT PyDataType_ISFLOAT
+#define PyDescr_ISNUMBER PyDataType_ISNUMBER
+#define PyDescr_ISSTRING PyDataType_ISSTRING
+#define PyDescr_ISCOMPLEX PyDataType_ISCOMPLEX
+#define PyDescr_ISPYTHON PyDataType_ISPYTHON
+#define PyDescr_ISFLEXIBLE PyDataType_ISFLEXIBLE
+#define PyDescr_ISUSERDEF PyDataType_ISUSERDEF
+#define PyDescr_ISEXTENDED PyDataType_ISEXTENDED
+#define PyDescr_ISOBJECT PyDataType_ISOBJECT
+#define PyDescr_HASFIELDS PyDataType_HASFIELDS
+
+#define PyArray_LITTLE NPY_LITTLE
+#define PyArray_BIG NPY_BIG
+#define PyArray_NATIVE NPY_NATIVE
+#define PyArray_SWAP NPY_SWAP
+#define PyArray_IGNORE NPY_IGNORE
+
+#define PyArray_NATBYTE NPY_NATBYTE
+#define PyArray_OPPBYTE NPY_OPPBYTE
+
+#define PyArray_MAX_ELSIZE NPY_MAX_ELSIZE
+
+#define PyArray_USE_PYMEM NPY_USE_PYMEM
+
+#define PyArray_RemoveLargest PyArray_RemoveSmallest
+
+#define PyArray_UCS4 npy_ucs4
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/oldnumeric.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/oldnumeric.h
new file mode 100644
index 00000000..6604e8d1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/oldnumeric.h
@@ -0,0 +1,32 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_OLDNUMERIC_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_OLDNUMERIC_H_
+
+/* FIXME -- this file can be deleted? */
+
+#include "arrayobject.h"
+
+#ifndef PYPY_VERSION
+#ifndef REFCOUNT
+# define REFCOUNT NPY_REFCOUNT
+# define MAX_ELSIZE 16
+#endif
+#endif
+
+#define PyArray_UNSIGNED_TYPES
+#define PyArray_SBYTE NPY_BYTE
+#define PyArray_CopyArray PyArray_CopyInto
+#define _PyArray_multiply_list PyArray_MultiplyIntList
+#define PyArray_ISSPACESAVER(m) NPY_FALSE
+#define PyScalarArray_Check PyArray_CheckScalar
+
+#define CONTIGUOUS NPY_CONTIGUOUS
+#define OWN_DIMENSIONS 0
+#define OWN_STRIDES 0
+#define OWN_DATA NPY_OWNDATA
+#define SAVESPACE 0
+#define SAVESPACEBIT 0
+
+#undef import_array
+#define import_array() { if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); } }
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_OLDNUMERIC_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/random/bitgen.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/random/bitgen.h
new file mode 100644
index 00000000..162dd5c5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/random/bitgen.h
@@ -0,0 +1,20 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_
+
+#pragma once
+#include <stddef.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+/* Must match the declaration in numpy/random/<any>.pxd */
+
+typedef struct bitgen {
+ void *state;
+ uint64_t (*next_uint64)(void *st);
+ uint32_t (*next_uint32)(void *st);
+ double (*next_double)(void *st);
+ uint64_t (*next_raw)(void *st);
+} bitgen_t;
+
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/random/distributions.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/random/distributions.h
new file mode 100644
index 00000000..78bd06ff
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/random/distributions.h
@@ -0,0 +1,209 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <Python.h>
+#include "numpy/npy_common.h"
+#include <stddef.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "numpy/npy_math.h"
+#include "numpy/random/bitgen.h"
+
+/*
+ * RAND_INT_TYPE is used to share integer generators with RandomState which
+ * used long in place of int64_t. If changing a distribution that uses
+ * RAND_INT_TYPE, then the original unmodified copy must be retained for
+ * use in RandomState by copying to the legacy distributions source file.
+ */
+#ifdef NP_RANDOM_LEGACY
+#define RAND_INT_TYPE long
+#define RAND_INT_MAX LONG_MAX
+#else
+#define RAND_INT_TYPE int64_t
+#define RAND_INT_MAX INT64_MAX
+#endif
+
+#ifdef _MSC_VER
+#define DECLDIR __declspec(dllexport)
+#else
+#define DECLDIR extern
+#endif
+
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? x : y)
+#define MAX(x, y) (((x) > (y)) ? x : y)
+#endif
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846264338328
+#endif
+
+typedef struct s_binomial_t {
+ int has_binomial; /* !=0: following parameters initialized for binomial */
+ double psave;
+ RAND_INT_TYPE nsave;
+ double r;
+ double q;
+ double fm;
+ RAND_INT_TYPE m;
+ double p1;
+ double xm;
+ double xl;
+ double xr;
+ double c;
+ double laml;
+ double lamr;
+ double p2;
+ double p3;
+ double p4;
+} binomial_t;
+
+DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state);
+DECLDIR double random_standard_uniform(bitgen_t *bitgen_state);
+DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *);
+
+DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state);
+DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state);
+DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state);
+DECLDIR uint64_t random_uint(bitgen_t *bitgen_state);
+
+DECLDIR double random_standard_exponential(bitgen_t *bitgen_state);
+DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state);
+DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *);
+DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *);
+
+DECLDIR double random_standard_normal(bitgen_t *bitgen_state);
+DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state);
+DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *);
+DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *);
+DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape);
+DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape);
+
+DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale);
+
+DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale);
+DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale);
+
+DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale);
+DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range);
+DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b);
+DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df);
+DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden);
+DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state);
+DECLDIR double random_pareto(bitgen_t *bitgen_state, double a);
+DECLDIR double random_weibull(bitgen_t *bitgen_state, double a);
+DECLDIR double random_power(bitgen_t *bitgen_state, double a);
+DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma);
+DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode);
+DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df);
+DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
+ double nonc);
+DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
+ double dfden, double nonc);
+DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale);
+DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa);
+DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode,
+ double right);
+
+DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam);
+DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n,
+ double p);
+
+DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial);
+
+DECLDIR int64_t random_logseries(bitgen_t *bitgen_state, double p);
+DECLDIR int64_t random_geometric(bitgen_t *bitgen_state, double p);
+DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p);
+DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a);
+DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state,
+ int64_t good, int64_t bad, int64_t sample);
+DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max);
+
+/* Generate random uint64 numbers in closed interval [off, off + rng]. */
+DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off,
+ uint64_t rng, uint64_t mask,
+ bool use_masked);
+
+/* Generate random uint32 numbers in closed interval [off, off + rng]. */
+DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
+ uint32_t off, uint32_t rng,
+ uint32_t mask, bool use_masked,
+ int *bcnt, uint32_t *buf);
+DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
+ uint16_t off, uint16_t rng,
+ uint16_t mask, bool use_masked,
+ int *bcnt, uint32_t *buf);
+DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off,
+ uint8_t rng, uint8_t mask,
+ bool use_masked, int *bcnt,
+ uint32_t *buf);
+DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off,
+ npy_bool rng, npy_bool mask,
+ bool use_masked, int *bcnt,
+ uint32_t *buf);
+
+DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off,
+ uint64_t rng, npy_intp cnt,
+ bool use_masked, uint64_t *out);
+DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off,
+ uint32_t rng, npy_intp cnt,
+ bool use_masked, uint32_t *out);
+DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off,
+ uint16_t rng, npy_intp cnt,
+ bool use_masked, uint16_t *out);
+DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off,
+ uint8_t rng, npy_intp cnt,
+ bool use_masked, uint8_t *out);
+DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off,
+ npy_bool rng, npy_intp cnt,
+ bool use_masked, npy_bool *out);
+
+DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix,
+ double *pix, npy_intp d, binomial_t *binomial);
+
+/* multivariate hypergeometric, "count" method */
+DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates);
+
+/* multivariate hypergeometric, "marginals" method */
+DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates);
+
+/* Common to legacy-distributions.c and distributions.c but not exported */
+
+RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state,
+ RAND_INT_TYPE n,
+ double p,
+ binomial_t *binomial);
+RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state,
+ RAND_INT_TYPE n,
+ double p,
+ binomial_t *binomial);
+double random_loggam(double x);
+static NPY_INLINE double next_double(bitgen_t *bitgen_state) {
+ return bitgen_state->next_double(bitgen_state->state);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/ufunc_api.txt b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/ufunc_api.txt
new file mode 100644
index 00000000..20a8a75f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/ufunc_api.txt
@@ -0,0 +1,335 @@
+
+=================
+NumPy Ufunc C-API
+=================
+::
+
+ PyObject *
+ PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void
+ **data, char *types, int ntypes, int nin, int
+ nout, int identity, const char *name, const
+ char *doc, int unused)
+
+
+::
+
+ int
+ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, int
+ usertype, PyUFuncGenericFunction
+ function, const int *arg_types, void
+ *data)
+
+
+::
+
+ int
+ PyUFunc_GenericFunction(PyUFuncObject *NPY_UNUSED(ufunc) , PyObject
+ *NPY_UNUSED(args) , PyObject *NPY_UNUSED(kwds)
+ , PyArrayObject **NPY_UNUSED(op) )
+
+
+::
+
+ void
+ PyUFunc_f_f_As_d_d(char **args, npy_intp const *dimensions, npy_intp
+ const *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_d_d(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_f_f(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_g_g(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_F_F_As_D_D(char **args, npy_intp const *dimensions, npy_intp
+ const *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_F_F(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_D_D(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_G_G(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_O_O(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_ff_f_As_dd_d(char **args, npy_intp const *dimensions, npy_intp
+ const *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_ff_f(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_dd_d(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_gg_g(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_FF_F_As_DD_D(char **args, npy_intp const *dimensions, npy_intp
+ const *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_DD_D(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_FF_F(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_GG_G(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_OO_O(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_O_O_method(char **args, npy_intp const *dimensions, npy_intp
+ const *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_OO_O_method(char **args, npy_intp const *dimensions, npy_intp
+ const *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_On_Om(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ int
+ PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject
+ **errobj)
+
+
+On return, if errobj is populated with a non-NULL value, the caller
+owns a new reference to errobj.
+
+::
+
+ int
+ PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first)
+
+
+::
+
+ void
+ PyUFunc_clearfperr()
+
+
+::
+
+ int
+ PyUFunc_getfperr(void )
+
+
+::
+
+ int
+ PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int
+ *first)
+
+
+::
+
+ int
+ PyUFunc_ReplaceLoopBySignature(PyUFuncObject
+ *func, PyUFuncGenericFunction
+ newfunc, const int
+ *signature, PyUFuncGenericFunction
+ *oldfunc)
+
+
+::
+
+ PyObject *
+ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void
+ **data, char *types, int
+ ntypes, int nin, int nout, int
+ identity, const char *name, const
+ char *doc, int unused, const char
+ *signature)
+
+
+::
+
+ int
+ PyUFunc_SetUsesArraysAsData(void **NPY_UNUSED(data) , size_t
+ NPY_UNUSED(i) )
+
+
+::
+
+ void
+ PyUFunc_e_e(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_e_e_As_f_f(char **args, npy_intp const *dimensions, npy_intp
+ const *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_e_e_As_d_d(char **args, npy_intp const *dimensions, npy_intp
+ const *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_ee_e(char **args, npy_intp const *dimensions, npy_intp const
+ *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_ee_e_As_ff_f(char **args, npy_intp const *dimensions, npy_intp
+ const *steps, void *func)
+
+
+::
+
+ void
+ PyUFunc_ee_e_As_dd_d(char **args, npy_intp const *dimensions, npy_intp
+ const *steps, void *func)
+
+
+::
+
+ int
+ PyUFunc_DefaultTypeResolver(PyUFuncObject *ufunc, NPY_CASTING
+ casting, PyArrayObject
+ **operands, PyObject
+ *type_tup, PyArray_Descr **out_dtypes)
+
+
+This function applies the default type resolution rules
+for the provided ufunc.
+
+Returns 0 on success, -1 on error.
+
+::
+
+ int
+ PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING
+ casting, PyArrayObject
+ **operands, PyArray_Descr **dtypes)
+
+
+Validates that the input operands can be cast to
+the input types, and the output types can be cast to
+the output operands where provided.
+
+Returns 0 on success, -1 (with exception raised) on validation failure.
+
+::
+
+ int
+ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc, PyArray_Descr
+ *user_dtype, PyUFuncGenericFunction
+ function, PyArray_Descr
+ **arg_dtypes, void *data)
+
+
+::
+
+ PyObject *
+ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction
+ *func, void
+ **data, char
+ *types, int ntypes, int
+ nin, int nout, int
+ identity, const char
+ *name, const char
+ *doc, const int
+ unused, const char
+ *signature, PyObject
+ *identity_value)
+
+
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/ufuncobject.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/ufuncobject.h
new file mode 100644
index 00000000..bb063310
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/ufuncobject.h
@@ -0,0 +1,357 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_
+
+#include <numpy/npy_math.h>
+#include <numpy/npy_common.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The legacy generic inner loop for a standard element-wise or
+ * generalized ufunc.
+ */
+typedef void (*PyUFuncGenericFunction)
+ (char **args,
+ npy_intp const *dimensions,
+ npy_intp const *strides,
+ void *innerloopdata);
+
+/*
+ * The most generic one-dimensional inner loop for
+ * a masked standard element-wise ufunc. "Masked" here means that it skips
+ * doing calculations on any items for which the maskptr array has a true
+ * value.
+ */
+typedef void (PyUFunc_MaskedStridedInnerLoopFunc)(
+ char **dataptrs, npy_intp *strides,
+ char *maskptr, npy_intp mask_stride,
+ npy_intp count,
+ NpyAuxData *innerloopdata);
+
+/* Forward declaration for the type resolver and loop selector typedefs */
+struct _tagPyUFuncObject;
+
+/*
+ * Given the operands for calling a ufunc, should determine the
+ * calculation input and output data types and return an inner loop function.
+ * This function should validate that the casting rule is being followed,
+ * and fail if it is not.
+ *
+ * For backwards compatibility, the regular type resolution function does not
+ * support auxiliary data with object semantics. The type resolution call
+ * which returns a masked generic function returns a standard NpyAuxData
+ * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros
+ * work.
+ *
+ * ufunc: The ufunc object.
+ * casting: The 'casting' parameter provided to the ufunc.
+ * operands: An array of length (ufunc->nin + ufunc->nout),
+ * with the output parameters possibly NULL.
+ * type_tup: Either NULL, or the type_tup passed to the ufunc.
+ * out_dtypes: An array which should be populated with new
+ * references to (ufunc->nin + ufunc->nout) new
+ * dtypes, one for each input and output. These
+ * dtypes should all be in native-endian format.
+ *
+ * Should return 0 on success, -1 on failure (with exception set),
+ * or -2 if Py_NotImplemented should be returned.
+ */
+typedef int (PyUFunc_TypeResolutionFunc)(
+ struct _tagPyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes);
+
+/*
+ * Legacy loop selector. (This should NOT normally be used and we can expect
+ * that only the `PyUFunc_DefaultLegacyInnerLoopSelector` is ever set).
+ * However, unlike the masked version, it probably still works.
+ *
+ * ufunc: The ufunc object.
+ * dtypes: An array which has been populated with dtypes,
+ * in most cases by the type resolution function
+ * for the same ufunc.
+ * out_innerloop: Should be populated with the correct ufunc inner
+ * loop for the given type.
+ * out_innerloopdata: Should be populated with the void* data to
+ * be passed into the out_innerloop function.
+ * out_needs_api: If the inner loop needs to use the Python API,
+ * should set the to 1, otherwise should leave
+ * this untouched.
+ */
+typedef int (PyUFunc_LegacyInnerLoopSelectionFunc)(
+ struct _tagPyUFuncObject *ufunc,
+ PyArray_Descr **dtypes,
+ PyUFuncGenericFunction *out_innerloop,
+ void **out_innerloopdata,
+ int *out_needs_api);
+
+
+typedef struct _tagPyUFuncObject {
+ PyObject_HEAD
+ /*
+ * nin: Number of inputs
+ * nout: Number of outputs
+ * nargs: Always nin + nout (Why is it stored?)
+ */
+ int nin, nout, nargs;
+
+ /*
+ * Identity for reduction, any of PyUFunc_One, PyUFunc_Zero
+ * PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone,
+ * PyUFunc_IdentityValue.
+ */
+ int identity;
+
+ /* Array of one-dimensional core loops */
+ PyUFuncGenericFunction *functions;
+ /* Array of funcdata that gets passed into the functions */
+ void **data;
+ /* The number of elements in 'functions' and 'data' */
+ int ntypes;
+
+ /* Used to be unused field 'check_return' */
+ int reserved1;
+
+ /* The name of the ufunc */
+ const char *name;
+
+ /* Array of type numbers, of size ('nargs' * 'ntypes') */
+ char *types;
+
+ /* Documentation string */
+ const char *doc;
+
+ void *ptr;
+ PyObject *obj;
+ PyObject *userloops;
+
+ /* generalized ufunc parameters */
+
+ /* 0 for scalar ufunc; 1 for generalized ufunc */
+ int core_enabled;
+ /* number of distinct dimension names in signature */
+ int core_num_dim_ix;
+
+ /*
+ * dimension indices of input/output argument k are stored in
+ * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1]
+ */
+
+ /* numbers of core dimensions of each argument */
+ int *core_num_dims;
+ /*
+ * dimension indices in a flatted form; indices
+ * are in the range of [0,core_num_dim_ix)
+ */
+ int *core_dim_ixs;
+ /*
+ * positions of 1st core dimensions of each
+ * argument in core_dim_ixs, equivalent to cumsum(core_num_dims)
+ */
+ int *core_offsets;
+ /* signature string for printing purpose */
+ char *core_signature;
+
+ /*
+ * A function which resolves the types and fills an array
+ * with the dtypes for the inputs and outputs.
+ */
+ PyUFunc_TypeResolutionFunc *type_resolver;
+ /*
+ * A function which returns an inner loop written for
+ * NumPy 1.6 and earlier ufuncs. This is for backwards
+ * compatibility, and may be NULL if inner_loop_selector
+ * is specified.
+ */
+ PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector;
+ /*
+ * This was blocked off to be the "new" inner loop selector in 1.7,
+ * but this was never implemented. (This is also why the above
+ * selector is called the "legacy" selector.)
+ */
+ #ifndef Py_LIMITED_API
+ vectorcallfunc vectorcall;
+ #else
+ void *vectorcall;
+ #endif
+
+ /* Was previously the `PyUFunc_MaskedInnerLoopSelectionFunc` */
+ void *_always_null_previously_masked_innerloop_selector;
+
+ /*
+ * List of flags for each operand when ufunc is called by nditer object.
+ * These flags will be used in addition to the default flags for each
+ * operand set by nditer object.
+ */
+ npy_uint32 *op_flags;
+
+ /*
+ * List of global flags used when ufunc is called by nditer object.
+ * These flags will be used in addition to the default global flags
+ * set by nditer object.
+ */
+ npy_uint32 iter_flags;
+
+ /* New in NPY_API_VERSION 0x0000000D and above */
+
+ /*
+ * for each core_num_dim_ix distinct dimension names,
+ * the possible "frozen" size (-1 if not frozen).
+ */
+ npy_intp *core_dim_sizes;
+
+ /*
+ * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags
+ */
+ npy_uint32 *core_dim_flags;
+
+ /* Identity for reduction, when identity == PyUFunc_IdentityValue */
+ PyObject *identity_value;
+
+ /* New in NPY_API_VERSION 0x0000000F and above */
+
+ /* New private fields related to dispatching */
+ void *_dispatch_cache;
+ /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */
+ PyObject *_loops;
+} PyUFuncObject;
+
+#include "arrayobject.h"
+/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */
+/* the core dimension's size will be determined by the operands. */
+#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002
+/* the core dimension may be absent */
+#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004
+/* flags inferred during execution */
+#define UFUNC_CORE_DIM_MISSING 0x00040000
+
+#define UFUNC_ERR_IGNORE 0
+#define UFUNC_ERR_WARN 1
+#define UFUNC_ERR_RAISE 2
+#define UFUNC_ERR_CALL 3
+#define UFUNC_ERR_PRINT 4
+#define UFUNC_ERR_LOG 5
+
+ /* Python side integer mask */
+
+#define UFUNC_MASK_DIVIDEBYZERO 0x07
+#define UFUNC_MASK_OVERFLOW 0x3f
+#define UFUNC_MASK_UNDERFLOW 0x1ff
+#define UFUNC_MASK_INVALID 0xfff
+
+#define UFUNC_SHIFT_DIVIDEBYZERO 0
+#define UFUNC_SHIFT_OVERFLOW 3
+#define UFUNC_SHIFT_UNDERFLOW 6
+#define UFUNC_SHIFT_INVALID 9
+
+
+#define UFUNC_OBJ_ISOBJECT 1
+#define UFUNC_OBJ_NEEDS_API 2
+
+ /* Default user error mode */
+#define UFUNC_ERR_DEFAULT \
+ (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \
+ (UFUNC_ERR_WARN << UFUNC_SHIFT_OVERFLOW) + \
+ (UFUNC_ERR_WARN << UFUNC_SHIFT_INVALID)
+
+#if NPY_ALLOW_THREADS
+#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0);
+#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0);
+#else
+#define NPY_LOOP_BEGIN_THREADS
+#define NPY_LOOP_END_THREADS
+#endif
+
+/*
+ * UFunc has unit of 0, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_Zero 0
+/*
+ * UFunc has unit of 1, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_One 1
+/*
+ * UFunc has unit of -1, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once. Intended for
+ * bitwise_and reduction.
+ */
+#define PyUFunc_MinusOne 2
+/*
+ * UFunc has no unit, and the order of operations cannot be reordered.
+ * This case does not allow reduction with multiple axes at once.
+ */
+#define PyUFunc_None -1
+/*
+ * UFunc has no unit, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_ReorderableNone -2
+/*
+ * UFunc unit is an identity_value, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_IdentityValue -3
+
+
+#define UFUNC_REDUCE 0
+#define UFUNC_ACCUMULATE 1
+#define UFUNC_REDUCEAT 2
+#define UFUNC_OUTER 3
+
+
+typedef struct {
+ int nin;
+ int nout;
+ PyObject *callable;
+} PyUFunc_PyFuncData;
+
+/* A linked-list of function information for
+ user-defined 1-d loops.
+ */
+typedef struct _loop1d_info {
+ PyUFuncGenericFunction func;
+ void *data;
+ int *arg_types;
+ struct _loop1d_info *next;
+ int nargs;
+ PyArray_Descr **arg_dtypes;
+} PyUFunc_Loop1d;
+
+
+#include "__ufunc_api.h"
+
+#define UFUNC_PYVALS_NAME "UFUNC_PYVALS"
+
+/*
+ * THESE MACROS ARE DEPRECATED.
+ * Use npy_set_floatstatus_* in the npymath library.
+ */
+#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO
+#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW
+#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW
+#define UFUNC_FPE_INVALID NPY_FPE_INVALID
+
+#define generate_divbyzero_error() npy_set_floatstatus_divbyzero()
+#define generate_overflow_error() npy_set_floatstatus_overflow()
+
+ /* Make sure it gets defined if it isn't already */
+#ifndef UFUNC_NOFPE
+/* Clear the floating point exception default of Borland C++ */
+#if defined(__BORLANDC__)
+#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM);
+#else
+#define UFUNC_NOFPE
+#endif
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/include/numpy/utils.h b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/utils.h
new file mode 100644
index 00000000..97f06092
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/include/numpy/utils.h
@@ -0,0 +1,37 @@
+#ifndef NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_
+#define NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_
+
+#ifndef __COMP_NPY_UNUSED
+ #if defined(__GNUC__)
+ #define __COMP_NPY_UNUSED __attribute__ ((__unused__))
+ #elif defined(__ICC)
+ #define __COMP_NPY_UNUSED __attribute__ ((__unused__))
+ #elif defined(__clang__)
+ #define __COMP_NPY_UNUSED __attribute__ ((unused))
+ #else
+ #define __COMP_NPY_UNUSED
+ #endif
+#endif
+
+#if defined(__GNUC__) || defined(__ICC) || defined(__clang__)
+ #define NPY_DECL_ALIGNED(x) __attribute__ ((aligned (x)))
+#elif defined(_MSC_VER)
+ #define NPY_DECL_ALIGNED(x) __declspec(align(x))
+#else
+ #define NPY_DECL_ALIGNED(x)
+#endif
+
+/* Use this to tag a variable as not used. It will remove unused variable
+ * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable
+ * to avoid accidental use */
+#define NPY_UNUSED(x) __NPY_UNUSED_TAGGED ## x __COMP_NPY_UNUSED
+#define NPY_EXPAND(x) x
+
+#define NPY_STRINGIFY(x) #x
+#define NPY_TOSTRING(x) NPY_STRINGIFY(x)
+
+#define NPY_CAT__(a, b) a ## b
+#define NPY_CAT_(a, b) NPY_CAT__(a, b)
+#define NPY_CAT(a, b) NPY_CAT_(a, b)
+
+#endif /* NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ */
diff --git a/venv/lib/python3.9/site-packages/numpy/core/lib/libnpymath.a b/venv/lib/python3.9/site-packages/numpy/core/lib/libnpymath.a
new file mode 100644
index 00000000..0f643546
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/lib/libnpymath.a
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini b/venv/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini
new file mode 100644
index 00000000..1e70ccb6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini
@@ -0,0 +1,12 @@
+[meta]
+Name = mlib
+Description = Math library used with this version of numpy
+Version = 1.0
+
+[default]
+Libs=
+Cflags=
+
+[msvc]
+Libs=
+Cflags=
diff --git a/venv/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini b/venv/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini
new file mode 100644
index 00000000..3e465ad2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini
@@ -0,0 +1,20 @@
+[meta]
+Name=npymath
+Description=Portable, core math library implementing C99 standard
+Version=0.1
+
+[variables]
+pkgname=numpy.core
+prefix=${pkgdir}
+libdir=${prefix}/lib
+includedir=${prefix}/include
+
+[default]
+Libs=-L${libdir} -lnpymath
+Cflags=-I${includedir}
+Requires=mlib
+
+[msvc]
+Libs=/LIBPATH:${libdir} npymath.lib
+Cflags=/INCLUDE:${includedir}
+Requires=mlib
diff --git a/venv/lib/python3.9/site-packages/numpy/core/memmap.py b/venv/lib/python3.9/site-packages/numpy/core/memmap.py
new file mode 100644
index 00000000..b0d9cb3a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/memmap.py
@@ -0,0 +1,337 @@
+from contextlib import nullcontext
+
+import numpy as np
+from .numeric import uint8, ndarray, dtype
+from numpy.compat import os_fspath, is_pathlib_path
+from numpy.core.overrides import set_module
+
+__all__ = ['memmap']
+
+dtypedescr = dtype
+valid_filemodes = ["r", "c", "r+", "w+"]
+writeable_filemodes = ["r+", "w+"]
+
+mode_equivalents = {
+ "readonly":"r",
+ "copyonwrite":"c",
+ "readwrite":"r+",
+ "write":"w+"
+ }
+
+
+@set_module('numpy')
+class memmap(ndarray):
+ """Create a memory-map to an array stored in a *binary* file on disk.
+
+ Memory-mapped files are used for accessing small segments of large files
+ on disk, without reading the entire file into memory. NumPy's
+ memmap's are array-like objects. This differs from Python's ``mmap``
+ module, which uses file-like objects.
+
+ This subclass of ndarray has some unpleasant interactions with
+ some operations, because it doesn't quite fit properly as a subclass.
+ An alternative to using this subclass is to create the ``mmap``
+ object yourself, then create an ndarray with ndarray.__new__ directly,
+ passing the object created in its 'buffer=' parameter.
+
+ This class may at some point be turned into a factory function
+ which returns a view into an mmap buffer.
+
+ Flush the memmap instance to write the changes to the file. Currently there
+ is no API to close the underlying ``mmap``. It is tricky to ensure the
+ resource is actually closed, since it may be shared between different
+ memmap instances.
+
+
+ Parameters
+ ----------
+ filename : str, file-like object, or pathlib.Path instance
+ The file name or file object to be used as the array data buffer.
+ dtype : data-type, optional
+ The data-type used to interpret the file contents.
+ Default is `uint8`.
+ mode : {'r+', 'r', 'w+', 'c'}, optional
+ The file is opened in this mode:
+
+ +------+-------------------------------------------------------------+
+ | 'r' | Open existing file for reading only. |
+ +------+-------------------------------------------------------------+
+ | 'r+' | Open existing file for reading and writing. |
+ +------+-------------------------------------------------------------+
+ | 'w+' | Create or overwrite existing file for reading and writing. |
+ +------+-------------------------------------------------------------+
+ | 'c' | Copy-on-write: assignments affect data in memory, but |
+ | | changes are not saved to disk. The file on disk is |
+ | | read-only. |
+ +------+-------------------------------------------------------------+
+
+ Default is 'r+'.
+ offset : int, optional
+ In the file, array data starts at this offset. Since `offset` is
+ measured in bytes, it should normally be a multiple of the byte-size
+ of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
+ file are valid; The file will be extended to accommodate the
+ additional data. By default, ``memmap`` will start at the beginning of
+ the file, even if ``filename`` is a file pointer ``fp`` and
+ ``fp.tell() != 0``.
+ shape : tuple, optional
+ The desired shape of the array. If ``mode == 'r'`` and the number
+ of remaining bytes after `offset` is not a multiple of the byte-size
+ of `dtype`, you must specify `shape`. By default, the returned array
+ will be 1-D with the number of elements determined by file size
+ and data-type.
+ order : {'C', 'F'}, optional
+ Specify the order of the ndarray memory layout:
+ :term:`row-major`, C-style or :term:`column-major`,
+ Fortran-style. This only has an effect if the shape is
+ greater than 1-D. The default order is 'C'.
+
+ Attributes
+ ----------
+ filename : str or pathlib.Path instance
+ Path to the mapped file.
+ offset : int
+ Offset position in the file.
+ mode : str
+ File mode.
+
+ Methods
+ -------
+ flush
+ Flush any changes in memory to file on disk.
+ When you delete a memmap object, flush is called first to write
+ changes to disk.
+
+
+ See also
+ --------
+ lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
+
+ Notes
+ -----
+ The memmap object can be used anywhere an ndarray is accepted.
+ Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
+ ``True``.
+
+ Memory-mapped files cannot be larger than 2GB on 32-bit systems.
+
+ When a memmap causes a file to be created or extended beyond its
+ current size in the filesystem, the contents of the new part are
+ unspecified. On systems with POSIX filesystem semantics, the extended
+ part will be filled with zero bytes.
+
+ Examples
+ --------
+ >>> data = np.arange(12, dtype='float32')
+ >>> data.resize((3,4))
+
+ This example uses a temporary file so that doctest doesn't write
+ files to your directory. You would use a 'normal' filename.
+
+ >>> from tempfile import mkdtemp
+ >>> import os.path as path
+ >>> filename = path.join(mkdtemp(), 'newfile.dat')
+
+ Create a memmap with dtype and shape that matches our data:
+
+ >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
+ >>> fp
+ memmap([[0., 0., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.]], dtype=float32)
+
+ Write data to memmap array:
+
+ >>> fp[:] = data[:]
+ >>> fp
+ memmap([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+
+ >>> fp.filename == path.abspath(filename)
+ True
+
+ Flushes memory changes to disk in order to read them back
+
+ >>> fp.flush()
+
+ Load the memmap and verify data was stored:
+
+ >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
+ >>> newfp
+ memmap([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+
+ Read-only memmap:
+
+ >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
+ >>> fpr.flags.writeable
+ False
+
+ Copy-on-write memmap:
+
+ >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
+ >>> fpc.flags.writeable
+ True
+
+ It's possible to assign to copy-on-write array, but values are only
+ written into the memory copy of the array, and not written to disk:
+
+ >>> fpc
+ memmap([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+ >>> fpc[0,:] = 0
+ >>> fpc
+ memmap([[ 0., 0., 0., 0.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+
+ File on disk is unchanged:
+
+ >>> fpr
+ memmap([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]], dtype=float32)
+
+ Offset into a memmap:
+
+ >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
+ >>> fpo
+ memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
+
+ """
+
+ __array_priority__ = -100.0
+
+ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
+ shape=None, order='C'):
+ # Import here to minimize 'import numpy' overhead
+ import mmap
+ import os.path
+ try:
+ mode = mode_equivalents[mode]
+ except KeyError as e:
+ if mode not in valid_filemodes:
+ raise ValueError(
+ "mode must be one of {!r} (got {!r})"
+ .format(valid_filemodes + list(mode_equivalents.keys()), mode)
+ ) from None
+
+ if mode == 'w+' and shape is None:
+ raise ValueError("shape must be given")
+
+ if hasattr(filename, 'read'):
+ f_ctx = nullcontext(filename)
+ else:
+ f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b')
+
+ with f_ctx as fid:
+ fid.seek(0, 2)
+ flen = fid.tell()
+ descr = dtypedescr(dtype)
+ _dbytes = descr.itemsize
+
+ if shape is None:
+ bytes = flen - offset
+ if bytes % _dbytes:
+ raise ValueError("Size of available data is not a "
+ "multiple of the data-type size.")
+ size = bytes // _dbytes
+ shape = (size,)
+ else:
+ if not isinstance(shape, tuple):
+ shape = (shape,)
+ size = np.intp(1) # avoid default choice of np.int_, which might overflow
+ for k in shape:
+ size *= k
+
+ bytes = int(offset + size*_dbytes)
+
+ if mode in ('w+', 'r+') and flen < bytes:
+ fid.seek(bytes - 1, 0)
+ fid.write(b'\0')
+ fid.flush()
+
+ if mode == 'c':
+ acc = mmap.ACCESS_COPY
+ elif mode == 'r':
+ acc = mmap.ACCESS_READ
+ else:
+ acc = mmap.ACCESS_WRITE
+
+ start = offset - offset % mmap.ALLOCATIONGRANULARITY
+ bytes -= start
+ array_offset = offset - start
+ mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
+
+ self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
+ offset=array_offset, order=order)
+ self._mmap = mm
+ self.offset = offset
+ self.mode = mode
+
+ if is_pathlib_path(filename):
+ # special case - if we were constructed with a pathlib.path,
+ # then filename is a path object, not a string
+ self.filename = filename.resolve()
+ elif hasattr(fid, "name") and isinstance(fid.name, str):
+ # py3 returns int for TemporaryFile().name
+ self.filename = os.path.abspath(fid.name)
+ # same as memmap copies (e.g. memmap + 1)
+ else:
+ self.filename = None
+
+ return self
+
+ def __array_finalize__(self, obj):
+ if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
+ self._mmap = obj._mmap
+ self.filename = obj.filename
+ self.offset = obj.offset
+ self.mode = obj.mode
+ else:
+ self._mmap = None
+ self.filename = None
+ self.offset = None
+ self.mode = None
+
+ def flush(self):
+ """
+ Write any changes in the array to the file on disk.
+
+ For further information, see `memmap`.
+
+ Parameters
+ ----------
+ None
+
+ See Also
+ --------
+ memmap
+
+ """
+ if self.base is not None and hasattr(self.base, 'flush'):
+ self.base.flush()
+
+ def __array_wrap__(self, arr, context=None):
+ arr = super().__array_wrap__(arr, context)
+
+ # Return a memmap if a memmap was given as the output of the
+ # ufunc. Leave the arr class unchanged if self is not a memmap
+ # to keep original memmap subclasses behavior
+ if self is arr or type(self) is not memmap:
+ return arr
+ # Return scalar instead of 0d memmap, e.g. for np.sum with
+ # axis=None
+ if arr.shape == ():
+ return arr[()]
+ # Return ndarray otherwise
+ return arr.view(np.ndarray)
+
+ def __getitem__(self, index):
+ res = super().__getitem__(index)
+ if type(res) is memmap and res._mmap is None:
+ return res.view(type=ndarray)
+ return res
diff --git a/venv/lib/python3.9/site-packages/numpy/core/memmap.pyi b/venv/lib/python3.9/site-packages/numpy/core/memmap.pyi
new file mode 100644
index 00000000..03c6b772
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/memmap.pyi
@@ -0,0 +1,3 @@
+from numpy import memmap as memmap
+
+__all__: list[str]
diff --git a/venv/lib/python3.9/site-packages/numpy/core/multiarray.py b/venv/lib/python3.9/site-packages/numpy/core/multiarray.py
new file mode 100644
index 00000000..31b77978
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/multiarray.py
@@ -0,0 +1,1714 @@
+"""
+Create the numpy.core.multiarray namespace for backward compatibility. In v1.16
+the multiarray and umath c-extension modules were merged into a single
+_multiarray_umath extension module. So we replicate the old namespace
+by importing from the extension module.
+
+"""
+
+import functools
+from . import overrides
+from . import _multiarray_umath
+from ._multiarray_umath import * # noqa: F403
+# These imports are needed for backward compatibility,
+# do not change them. issue gh-15518
+# _get_ndarray_c_version is semi-public, on purpose not added to __all__
+from ._multiarray_umath import (
+ fastCopyAndTranspose, _flagdict, from_dlpack, _insert, _reconstruct,
+ _vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version,
+ _get_madvise_hugepage, _set_madvise_hugepage,
+ _get_promotion_state, _set_promotion_state,
+ )
+
+__all__ = [
+ '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
+ 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
+ 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
+ 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP',
+ '_flagdict', 'from_dlpack', '_insert', '_reconstruct', '_vec_string',
+ '_monotonicity', 'add_docstring', 'arange', 'array', 'asarray',
+ 'asanyarray', 'ascontiguousarray', 'asfortranarray', 'bincount',
+ 'broadcast', 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
+ 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
+ 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
+ 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
+ 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
+ 'frombuffer', 'fromfile', 'fromiter', 'fromstring',
+ 'get_handler_name', 'get_handler_version', 'inner', 'interp',
+ 'interp_complex', 'is_busday', 'lexsort', 'matmul', 'may_share_memory',
+ 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters',
+ 'normalize_axis_index', 'packbits', 'promote_types', 'putmask',
+ 'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function',
+ 'set_legacy_print_mode', 'set_numeric_ops', 'set_string_function',
+ 'set_typeDict', 'shares_memory', 'tracemalloc_domain', 'typeinfo',
+ 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros',
+ '_get_promotion_state', '_set_promotion_state']
+
+# For backward compatibility, make sure pickle imports these functions from here
+_reconstruct.__module__ = 'numpy.core.multiarray'
+scalar.__module__ = 'numpy.core.multiarray'
+
+
+from_dlpack.__module__ = 'numpy'
+arange.__module__ = 'numpy'
+array.__module__ = 'numpy'
+asarray.__module__ = 'numpy'
+asanyarray.__module__ = 'numpy'
+ascontiguousarray.__module__ = 'numpy'
+asfortranarray.__module__ = 'numpy'
+datetime_data.__module__ = 'numpy'
+empty.__module__ = 'numpy'
+frombuffer.__module__ = 'numpy'
+fromfile.__module__ = 'numpy'
+fromiter.__module__ = 'numpy'
+frompyfunc.__module__ = 'numpy'
+fromstring.__module__ = 'numpy'
+geterrobj.__module__ = 'numpy'
+may_share_memory.__module__ = 'numpy'
+nested_iters.__module__ = 'numpy'
+promote_types.__module__ = 'numpy'
+set_numeric_ops.__module__ = 'numpy'
+seterrobj.__module__ = 'numpy'
+zeros.__module__ = 'numpy'
+_get_promotion_state.__module__ = 'numpy'
+_set_promotion_state.__module__ = 'numpy'
+
+
+# We can't verify dispatcher signatures because NumPy's C functions don't
+# support introspection.
+array_function_from_c_func_and_dispatcher = functools.partial(
+ overrides.array_function_from_dispatcher,
+ module='numpy', docs_from_dispatcher=True, verify=False)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
+def empty_like(prototype, dtype=None, order=None, subok=None, shape=None):
+ """
+ empty_like(prototype, dtype=None, order='K', subok=True, shape=None)
+
+ Return a new array with the same shape and type as a given array.
+
+ Parameters
+ ----------
+ prototype : array_like
+ The shape and data-type of `prototype` define these same attributes
+ of the returned array.
+ dtype : data-type, optional
+ Overrides the data type of the result.
+
+ .. versionadded:: 1.6.0
+ order : {'C', 'F', 'A', or 'K'}, optional
+ Overrides the memory layout of the result. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `prototype` is Fortran
+ contiguous, 'C' otherwise. 'K' means match the layout of `prototype`
+ as closely as possible.
+
+ .. versionadded:: 1.6.0
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of `prototype`, otherwise it will be a base-class array. Defaults
+ to True.
+ shape : int or sequence of ints, optional.
+ Overrides the shape of the result. If order='K' and the number of
+ dimensions is unchanged, will try to keep order, otherwise,
+ order='C' is implied.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ out : ndarray
+ Array of uninitialized (arbitrary) data with the same
+ shape and type as `prototype`.
+
+ See Also
+ --------
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ empty : Return a new uninitialized array.
+
+ Notes
+ -----
+ This function does *not* initialize the returned array; to do that use
+ `zeros_like` or `ones_like` instead. It may be marginally faster than
+ the functions that do set the array values.
+
+ Examples
+ --------
+ >>> a = ([1,2,3], [4,5,6]) # a is array-like
+ >>> np.empty_like(a)
+ array([[-1073741821, -1073741821, 3], # uninitialized
+ [ 0, 0, -1073741821]])
+ >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
+ >>> np.empty_like(a)
+ array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized
+ [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
+
+ """
+ return (prototype,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
+def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
+ """
+ concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind")
+
+ Join a sequence of arrays along an existing axis.
+
+ Parameters
+ ----------
+ a1, a2, ... : sequence of array_like
+ The arrays must have the same shape, except in the dimension
+ corresponding to `axis` (the first, by default).
+ axis : int, optional
+ The axis along which the arrays will be joined. If axis is None,
+ arrays are flattened before use. Default is 0.
+ out : ndarray, optional
+ If provided, the destination to place the result. The shape must be
+ correct, matching that of what concatenate would have returned if no
+ out argument were specified.
+ dtype : str or dtype
+ If provided, the destination array will have this dtype. Cannot be
+ provided together with `out`.
+
+ .. versionadded:: 1.20.0
+
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Defaults to 'same_kind'.
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ res : ndarray
+ The concatenated array.
+
+ See Also
+ --------
+ ma.concatenate : Concatenate function that preserves input masks.
+ array_split : Split an array into multiple sub-arrays of equal or
+ near-equal size.
+ split : Split array into a list of multiple sub-arrays of equal size.
+ hsplit : Split array into multiple sub-arrays horizontally (column wise).
+ vsplit : Split array into multiple sub-arrays vertically (row wise).
+ dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
+ stack : Stack a sequence of arrays along a new axis.
+ block : Assemble arrays from blocks.
+ hstack : Stack arrays in sequence horizontally (column wise).
+ vstack : Stack arrays in sequence vertically (row wise).
+ dstack : Stack arrays in sequence depth wise (along third dimension).
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
+
+ Notes
+ -----
+ When one or more of the arrays to be concatenated is a MaskedArray,
+ this function will return a MaskedArray object instead of an ndarray,
+ but the input masks are *not* preserved. In cases where a MaskedArray
+ is expected as input, use the ma.concatenate function from the masked
+ array module instead.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> b = np.array([[5, 6]])
+ >>> np.concatenate((a, b), axis=0)
+ array([[1, 2],
+ [3, 4],
+ [5, 6]])
+ >>> np.concatenate((a, b.T), axis=1)
+ array([[1, 2, 5],
+ [3, 4, 6]])
+ >>> np.concatenate((a, b), axis=None)
+ array([1, 2, 3, 4, 5, 6])
+
+ This function will not preserve masking of MaskedArray inputs.
+
+ >>> a = np.ma.arange(3)
+ >>> a[1] = np.ma.masked
+ >>> b = np.arange(2, 5)
+ >>> a
+ masked_array(data=[0, --, 2],
+ mask=[False, True, False],
+ fill_value=999999)
+ >>> b
+ array([2, 3, 4])
+ >>> np.concatenate([a, b])
+ masked_array(data=[0, 1, 2, 2, 3, 4],
+ mask=False,
+ fill_value=999999)
+ >>> np.ma.concatenate([a, b])
+ masked_array(data=[0, --, 2, 2, 3, 4],
+ mask=[False, True, False, False, False, False],
+ fill_value=999999)
+
+ """
+ if out is not None:
+ # optimize for the typical case where only arrays is provided
+ arrays = list(arrays)
+ arrays.append(out)
+ return arrays
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
+def inner(a, b):
+ """
+ inner(a, b, /)
+
+ Inner product of two arrays.
+
+ Ordinary inner product of vectors for 1-D arrays (without complex
+ conjugation), in higher dimensions a sum product over the last axes.
+
+ Parameters
+ ----------
+ a, b : array_like
+ If `a` and `b` are nonscalar, their last dimensions must match.
+
+ Returns
+ -------
+ out : ndarray
+ If `a` and `b` are both
+ scalars or both 1-D arrays then a scalar is returned; otherwise
+ an array is returned.
+ ``out.shape = (*a.shape[:-1], *b.shape[:-1])``
+
+ Raises
+ ------
+ ValueError
+ If both `a` and `b` are nonscalar and their last dimensions have
+ different sizes.
+
+ See Also
+ --------
+ tensordot : Sum products over arbitrary axes.
+ dot : Generalised matrix product, using second last dimension of `b`.
+ einsum : Einstein summation convention.
+
+ Notes
+ -----
+ For vectors (1-D arrays) it computes the ordinary inner-product::
+
+ np.inner(a, b) = sum(a[:]*b[:])
+
+ More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``::
+
+ np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
+
+ or explicitly::
+
+ np.inner(a, b)[i0,...,ir-2,j0,...,js-2]
+ = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:])
+
+ In addition `a` or `b` may be scalars, in which case::
+
+ np.inner(a,b) = a*b
+
+ Examples
+ --------
+ Ordinary inner product for vectors:
+
+ >>> a = np.array([1,2,3])
+ >>> b = np.array([0,1,0])
+ >>> np.inner(a, b)
+ 2
+
+ Some multidimensional examples:
+
+ >>> a = np.arange(24).reshape((2,3,4))
+ >>> b = np.arange(4)
+ >>> c = np.inner(a, b)
+ >>> c.shape
+ (2, 3)
+ >>> c
+ array([[ 14, 38, 62],
+ [ 86, 110, 134]])
+
+ >>> a = np.arange(2).reshape((1,1,2))
+ >>> b = np.arange(6).reshape((3,2))
+ >>> c = np.inner(a, b)
+ >>> c.shape
+ (1, 1, 3)
+ >>> c
+ array([[[1, 3, 5]]])
+
+ An example where `b` is a scalar:
+
+ >>> np.inner(np.eye(2), 7)
+ array([[7., 0.],
+ [0., 7.]])
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
+def where(condition, x=None, y=None):
+ """
+ where(condition, [x, y], /)
+
+ Return elements chosen from `x` or `y` depending on `condition`.
+
+ .. note::
+ When only `condition` is provided, this function is a shorthand for
+ ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
+ preferred, as it behaves correctly for subclasses. The rest of this
+ documentation covers only the case where all three arguments are
+ provided.
+
+ Parameters
+ ----------
+ condition : array_like, bool
+ Where True, yield `x`, otherwise yield `y`.
+ x, y : array_like
+ Values from which to choose. `x`, `y` and `condition` need to be
+ broadcastable to some shape.
+
+ Returns
+ -------
+ out : ndarray
+ An array with elements from `x` where `condition` is True, and elements
+ from `y` elsewhere.
+
+ See Also
+ --------
+ choose
+ nonzero : The function that is called when x and y are omitted
+
+ Notes
+ -----
+ If all the arrays are 1-D, `where` is equivalent to::
+
+ [xv if c else yv
+ for c, xv, yv in zip(condition, x, y)]
+
+ Examples
+ --------
+ >>> a = np.arange(10)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.where(a < 5, a, 10*a)
+ array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
+
+ This can be used on multidimensional arrays too:
+
+ >>> np.where([[True, False], [True, True]],
+ ... [[1, 2], [3, 4]],
+ ... [[9, 8], [7, 6]])
+ array([[1, 8],
+ [3, 4]])
+
+ The shapes of x, y, and the condition are broadcast together:
+
+ >>> x, y = np.ogrid[:3, :4]
+ >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
+ array([[10, 0, 0, 0],
+ [10, 11, 1, 1],
+ [10, 11, 12, 2]])
+
+ >>> a = np.array([[0, 1, 2],
+ ... [0, 2, 4],
+ ... [0, 3, 6]])
+ >>> np.where(a < 4, a, -1) # -1 is broadcast
+ array([[ 0, 1, 2],
+ [ 0, 2, -1],
+ [ 0, 3, -1]])
+ """
+ return (condition, x, y)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
+def lexsort(keys, axis=None):
+ """
+ lexsort(keys, axis=-1)
+
+ Perform an indirect stable sort using a sequence of keys.
+
+ Given multiple sorting keys, which can be interpreted as columns in a
+ spreadsheet, lexsort returns an array of integer indices that describes
+ the sort order by multiple columns. The last key in the sequence is used
+ for the primary sort order, the second-to-last key for the secondary sort
+ order, and so on. The keys argument must be a sequence of objects that
+ can be converted to arrays of the same shape. If a 2D array is provided
+ for the keys argument, its rows are interpreted as the sorting keys and
+ sorting is according to the last row, second last row etc.
+
+ Parameters
+ ----------
+ keys : (k, N) array or tuple containing k (N,)-shaped sequences
+ The `k` different "columns" to be sorted. The last column (or row if
+ `keys` is a 2D array) is the primary sort key.
+ axis : int, optional
+ Axis to be indirectly sorted. By default, sort over the last axis.
+
+ Returns
+ -------
+ indices : (N,) ndarray of ints
+ Array of indices that sort the keys along the specified axis.
+
+ See Also
+ --------
+ argsort : Indirect sort.
+ ndarray.sort : In-place sort.
+ sort : Return a sorted copy of an array.
+
+ Examples
+ --------
+ Sort names: first by surname, then by name.
+
+ >>> surnames = ('Hertz', 'Galilei', 'Hertz')
+ >>> first_names = ('Heinrich', 'Galileo', 'Gustav')
+ >>> ind = np.lexsort((first_names, surnames))
+ >>> ind
+ array([1, 2, 0])
+
+ >>> [surnames[i] + ", " + first_names[i] for i in ind]
+ ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
+
+ Sort two columns of numbers:
+
+ >>> a = [1,5,1,4,3,4,4] # First column
+ >>> b = [9,4,0,4,0,2,1] # Second column
+ >>> ind = np.lexsort((b,a)) # Sort by a, then by b
+ >>> ind
+ array([2, 0, 4, 6, 5, 3, 1])
+
+ >>> [(a[i],b[i]) for i in ind]
+ [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
+
+ Note that sorting is first according to the elements of ``a``.
+ Secondary sorting is according to the elements of ``b``.
+
+ A normal ``argsort`` would have yielded:
+
+ >>> [(a[i],b[i]) for i in np.argsort(a)]
+ [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
+
+ Structured arrays are sorted lexically by ``argsort``:
+
+ >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
+ ... dtype=np.dtype([('x', int), ('y', int)]))
+
+ >>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
+ array([2, 0, 4, 6, 5, 3, 1])
+
+ """
+ if isinstance(keys, tuple):
+ return keys
+ else:
+ return (keys,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
+def can_cast(from_, to, casting=None):
+ """
+ can_cast(from_, to, casting='safe')
+
+ Returns True if cast between data types can occur according to the
+ casting rule. If from is a scalar or array scalar, also returns
+ True if the scalar value can be cast without overflow or truncation
+ to an integer.
+
+ Parameters
+ ----------
+ from_ : dtype, dtype specifier, scalar, or array
+ Data type, scalar, or array to cast from.
+ to : dtype or dtype specifier
+ Data type to cast to.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+
+ Returns
+ -------
+ out : bool
+ True if cast can occur according to the casting rule.
+
+ Notes
+ -----
+ .. versionchanged:: 1.17.0
+ Casting between a simple data type and a structured one is possible only
+ for "unsafe" casting. Casting to multiple fields is allowed, but
+ casting from multiple fields is not.
+
+ .. versionchanged:: 1.9.0
+ Casting from numeric to string types in 'safe' casting mode requires
+ that the string dtype length is long enough to store the maximum
+ integer/float value converted.
+
+ See also
+ --------
+ dtype, result_type
+
+ Examples
+ --------
+ Basic examples
+
+ >>> np.can_cast(np.int32, np.int64)
+ True
+ >>> np.can_cast(np.float64, complex)
+ True
+ >>> np.can_cast(complex, float)
+ False
+
+ >>> np.can_cast('i8', 'f8')
+ True
+ >>> np.can_cast('i8', 'f4')
+ False
+ >>> np.can_cast('i4', 'S4')
+ False
+
+ Casting scalars
+
+ >>> np.can_cast(100, 'i1')
+ True
+ >>> np.can_cast(150, 'i1')
+ False
+ >>> np.can_cast(150, 'u1')
+ True
+
+ >>> np.can_cast(3.5e100, np.float32)
+ False
+ >>> np.can_cast(1000.0, np.float32)
+ True
+
+ Array scalar checks the value, array does not
+
+ >>> np.can_cast(np.array(1000.0), np.float32)
+ True
+ >>> np.can_cast(np.array([1000.0]), np.float32)
+ False
+
+ Using the casting rules
+
+ >>> np.can_cast('i8', 'i8', 'no')
+ True
+ >>> np.can_cast('<i8', '>i8', 'no')
+ False
+
+ >>> np.can_cast('<i8', '>i8', 'equiv')
+ True
+ >>> np.can_cast('<i4', '>i8', 'equiv')
+ False
+
+ >>> np.can_cast('<i4', '>i8', 'safe')
+ True
+ >>> np.can_cast('<i8', '>i4', 'safe')
+ False
+
+ >>> np.can_cast('<i8', '>i4', 'same_kind')
+ True
+ >>> np.can_cast('<i8', '>u4', 'same_kind')
+ False
+
+ >>> np.can_cast('<i8', '>u4', 'unsafe')
+ True
+
+ """
+ return (from_,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
+def min_scalar_type(a):
+ """
+ min_scalar_type(a, /)
+
+ For scalar ``a``, returns the data type with the smallest size
+ and smallest scalar kind which can hold its value. For non-scalar
+ array ``a``, returns the vector's dtype unmodified.
+
+ Floating point values are not demoted to integers,
+ and complex values are not demoted to floats.
+
+ Parameters
+ ----------
+ a : scalar or array_like
+ The value whose minimal data type is to be found.
+
+ Returns
+ -------
+ out : dtype
+ The minimal data type.
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ See Also
+ --------
+ result_type, promote_types, dtype, can_cast
+
+ Examples
+ --------
+ >>> np.min_scalar_type(10)
+ dtype('uint8')
+
+ >>> np.min_scalar_type(-260)
+ dtype('int16')
+
+ >>> np.min_scalar_type(3.1)
+ dtype('float16')
+
+ >>> np.min_scalar_type(1e50)
+ dtype('float64')
+
+ >>> np.min_scalar_type(np.arange(4,dtype='f8'))
+ dtype('float64')
+
+ """
+ return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
+def result_type(*arrays_and_dtypes):
+ """
+ result_type(*arrays_and_dtypes)
+
+ Returns the type that results from applying the NumPy
+ type promotion rules to the arguments.
+
+ Type promotion in NumPy works similarly to the rules in languages
+ like C++, with some slight differences. When both scalars and
+ arrays are used, the array's type takes precedence and the actual value
+ of the scalar is taken into account.
+
+ For example, calculating 3*a, where a is an array of 32-bit floats,
+ intuitively should result in a 32-bit float output. If the 3 is a
+ 32-bit integer, the NumPy rules indicate it can't convert losslessly
+ into a 32-bit float, so a 64-bit float should be the result type.
+ By examining the value of the constant, '3', we see that it fits in
+ an 8-bit integer, which can be cast losslessly into the 32-bit float.
+
+ Parameters
+ ----------
+ arrays_and_dtypes : list of arrays and dtypes
+ The operands of some operation whose result type is needed.
+
+ Returns
+ -------
+ out : dtype
+ The result type.
+
+ See also
+ --------
+ dtype, promote_types, min_scalar_type, can_cast
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ The specific algorithm used is as follows.
+
+ Categories are determined by first checking which of boolean,
+ integer (int/uint), or floating point (float/complex) the maximum
+ kind of all the arrays and the scalars are.
+
+ If there are only scalars or the maximum category of the scalars
+ is higher than the maximum category of the arrays,
+ the data types are combined with :func:`promote_types`
+ to produce the return value.
+
+ Otherwise, `min_scalar_type` is called on each array, and
+ the resulting data types are all combined with :func:`promote_types`
+ to produce the return value.
+
+ The set of int values is not a subset of the uint values for types
+ with the same number of bits, something not reflected in
+ :func:`min_scalar_type`, but handled as a special case in `result_type`.
+
+ Examples
+ --------
+ >>> np.result_type(3, np.arange(7, dtype='i1'))
+ dtype('int8')
+
+ >>> np.result_type('i4', 'c8')
+ dtype('complex128')
+
+ >>> np.result_type(3.0, -2)
+ dtype('float64')
+
+ """
+ return arrays_and_dtypes
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
+def dot(a, b, out=None):
+ """
+ dot(a, b, out=None)
+
+ Dot product of two arrays. Specifically,
+
+ - If both `a` and `b` are 1-D arrays, it is inner product of vectors
+ (without complex conjugation).
+
+ - If both `a` and `b` are 2-D arrays, it is matrix multiplication,
+ but using :func:`matmul` or ``a @ b`` is preferred.
+
+ - If either `a` or `b` is 0-D (scalar), it is equivalent to
+ :func:`multiply` and using ``numpy.multiply(a, b)`` or ``a * b`` is
+ preferred.
+
+ - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
+ the last axis of `a` and `b`.
+
+ - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
+ sum product over the last axis of `a` and the second-to-last axis of
+ `b`::
+
+ dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
+
+ It uses an optimized BLAS library when possible (see `numpy.linalg`).
+
+ Parameters
+ ----------
+ a : array_like
+ First argument.
+ b : array_like
+ Second argument.
+ out : ndarray, optional
+ Output argument. This must have the exact kind that would be returned
+ if it was not used. In particular, it must have the right type, must be
+ C-contiguous, and its dtype must be the dtype that would be returned
+ for `dot(a,b)`. This is a performance feature. Therefore, if these
+ conditions are not met, an exception is raised, instead of attempting
+ to be flexible.
+
+ Returns
+ -------
+ output : ndarray
+ Returns the dot product of `a` and `b`. If `a` and `b` are both
+ scalars or both 1-D arrays then a scalar is returned; otherwise
+ an array is returned.
+ If `out` is given, then it is returned.
+
+ Raises
+ ------
+ ValueError
+ If the last dimension of `a` is not the same size as
+ the second-to-last dimension of `b`.
+
+ See Also
+ --------
+ vdot : Complex-conjugating dot product.
+ tensordot : Sum products over arbitrary axes.
+ einsum : Einstein summation convention.
+ matmul : '@' operator as method with out parameter.
+ linalg.multi_dot : Chained dot product.
+
+ Examples
+ --------
+ >>> np.dot(3, 4)
+ 12
+
+ Neither argument is complex-conjugated:
+
+ >>> np.dot([2j, 3j], [2j, 3j])
+ (-13+0j)
+
+ For 2-D arrays it is the matrix product:
+
+ >>> a = [[1, 0], [0, 1]]
+ >>> b = [[4, 1], [2, 2]]
+ >>> np.dot(a, b)
+ array([[4, 1],
+ [2, 2]])
+
+ >>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
+ >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
+ >>> np.dot(a, b)[2,3,2,1,2,2]
+ 499128
+ >>> sum(a[2,3,2,:] * b[1,2,:,2])
+ 499128
+
+ """
+ return (a, b, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
+def vdot(a, b):
+ """
+ vdot(a, b, /)
+
+ Return the dot product of two vectors.
+
+ The vdot(`a`, `b`) function handles complex numbers differently than
+ dot(`a`, `b`). If the first argument is complex the complex conjugate
+ of the first argument is used for the calculation of the dot product.
+
+ Note that `vdot` handles multidimensional arrays differently than `dot`:
+ it does *not* perform a matrix product, but flattens input arguments
+ to 1-D vectors first. Consequently, it should only be used for vectors.
+
+ Parameters
+ ----------
+ a : array_like
+ If `a` is complex the complex conjugate is taken before calculation
+ of the dot product.
+ b : array_like
+ Second argument to the dot product.
+
+ Returns
+ -------
+ output : ndarray
+ Dot product of `a` and `b`. Can be an int, float, or
+ complex depending on the types of `a` and `b`.
+
+ See Also
+ --------
+ dot : Return the dot product without using the complex conjugate of the
+ first argument.
+
+ Examples
+ --------
+ >>> a = np.array([1+2j,3+4j])
+ >>> b = np.array([5+6j,7+8j])
+ >>> np.vdot(a, b)
+ (70-8j)
+ >>> np.vdot(b, a)
+ (70+8j)
+
+ Note that higher-dimensional arrays are flattened!
+
+ >>> a = np.array([[1, 4], [5, 6]])
+ >>> b = np.array([[4, 1], [2, 2]])
+ >>> np.vdot(a, b)
+ 30
+ >>> np.vdot(b, a)
+ 30
+ >>> 1*4 + 4*1 + 5*2 + 6*2
+ 30
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
+def bincount(x, weights=None, minlength=None):
+ """
+ bincount(x, /, weights=None, minlength=0)
+
+ Count number of occurrences of each value in array of non-negative ints.
+
+ The number of bins (of size 1) is one larger than the largest value in
+ `x`. If `minlength` is specified, there will be at least this number
+ of bins in the output array (though it will be longer if necessary,
+ depending on the contents of `x`).
+ Each bin gives the number of occurrences of its index value in `x`.
+ If `weights` is specified the input array is weighted by it, i.e. if a
+ value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
+ of ``out[n] += 1``.
+
+ Parameters
+ ----------
+ x : array_like, 1 dimension, nonnegative ints
+ Input array.
+ weights : array_like, optional
+ Weights, array of the same shape as `x`.
+ minlength : int, optional
+ A minimum number of bins for the output array.
+
+ .. versionadded:: 1.6.0
+
+ Returns
+ -------
+ out : ndarray of ints
+ The result of binning the input array.
+ The length of `out` is equal to ``np.amax(x)+1``.
+
+ Raises
+ ------
+ ValueError
+ If the input is not 1-dimensional, or contains elements with negative
+ values, or if `minlength` is negative.
+ TypeError
+ If the type of the input is float or complex.
+
+ See Also
+ --------
+ histogram, digitize, unique
+
+ Examples
+ --------
+ >>> np.bincount(np.arange(5))
+ array([1, 1, 1, 1, 1])
+ >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
+ array([1, 3, 1, 1, 0, 0, 0, 1])
+
+ >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
+ >>> np.bincount(x).size == np.amax(x)+1
+ True
+
+ The input array needs to be of integer dtype, otherwise a
+ TypeError is raised:
+
+ >>> np.bincount(np.arange(5, dtype=float))
+ Traceback (most recent call last):
+ ...
+ TypeError: Cannot cast array data from dtype('float64') to dtype('int64')
+ according to the rule 'safe'
+
+ A possible use of ``bincount`` is to perform sums over
+ variable-size chunks of an array, using the ``weights`` keyword.
+
+ >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
+ >>> x = np.array([0, 1, 1, 2, 2, 2])
+ >>> np.bincount(x, weights=w)
+ array([ 0.3, 0.7, 1.1])
+
+ """
+ return (x, weights)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
+def ravel_multi_index(multi_index, dims, mode=None, order=None):
+ """
+ ravel_multi_index(multi_index, dims, mode='raise', order='C')
+
+ Converts a tuple of index arrays into an array of flat
+ indices, applying boundary modes to the multi-index.
+
+ Parameters
+ ----------
+ multi_index : tuple of array_like
+ A tuple of integer arrays, one array for each dimension.
+ dims : tuple of ints
+ The shape of array into which the indices from ``multi_index`` apply.
+ mode : {'raise', 'wrap', 'clip'}, optional
+ Specifies how out-of-bounds indices are handled. Can specify
+ either one mode or a tuple of modes, one mode per index.
+
+ * 'raise' -- raise an error (default)
+ * 'wrap' -- wrap around
+ * 'clip' -- clip to the range
+
+ In 'clip' mode, a negative index which would normally
+ wrap will clip to 0 instead.
+ order : {'C', 'F'}, optional
+ Determines whether the multi-index should be viewed as
+ indexing in row-major (C-style) or column-major
+ (Fortran-style) order.
+
+ Returns
+ -------
+ raveled_indices : ndarray
+ An array of indices into the flattened version of an array
+ of dimensions ``dims``.
+
+ See Also
+ --------
+ unravel_index
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ Examples
+ --------
+ >>> arr = np.array([[3,6,6],[4,5,1]])
+ >>> np.ravel_multi_index(arr, (7,6))
+ array([22, 41, 37])
+ >>> np.ravel_multi_index(arr, (7,6), order='F')
+ array([31, 41, 13])
+ >>> np.ravel_multi_index(arr, (4,6), mode='clip')
+ array([22, 23, 19])
+ >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
+ array([12, 13, 13])
+
+ >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
+ 1621
+ """
+ return multi_index
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
+def unravel_index(indices, shape=None, order=None):
+ """
+ unravel_index(indices, shape, order='C')
+
+ Converts a flat index or array of flat indices into a tuple
+ of coordinate arrays.
+
+ Parameters
+ ----------
+ indices : array_like
+ An integer array whose elements are indices into the flattened
+ version of an array of dimensions ``shape``. Before version 1.6.0,
+ this function accepted just one index value.
+ shape : tuple of ints
+ The shape of the array to use for unraveling ``indices``.
+
+ .. versionchanged:: 1.16.0
+ Renamed from ``dims`` to ``shape``.
+
+ order : {'C', 'F'}, optional
+ Determines whether the indices should be viewed as indexing in
+ row-major (C-style) or column-major (Fortran-style) order.
+
+ .. versionadded:: 1.6.0
+
+ Returns
+ -------
+ unraveled_coords : tuple of ndarray
+ Each array in the tuple has the same shape as the ``indices``
+ array.
+
+ See Also
+ --------
+ ravel_multi_index
+
+ Examples
+ --------
+ >>> np.unravel_index([22, 41, 37], (7,6))
+ (array([3, 6, 6]), array([4, 5, 1]))
+ >>> np.unravel_index([31, 41, 13], (7,6), order='F')
+ (array([3, 6, 6]), array([4, 5, 1]))
+
+ >>> np.unravel_index(1621, (6,7,8,9))
+ (3, 1, 4, 1)
+
+ """
+ return (indices,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
+def copyto(dst, src, casting=None, where=None):
+ """
+ copyto(dst, src, casting='same_kind', where=True)
+
+ Copies values from one array to another, broadcasting as necessary.
+
+ Raises a TypeError if the `casting` rule is violated, and if
+ `where` is provided, it selects which elements to copy.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ dst : ndarray
+ The array into which values are copied.
+ src : array_like
+ The array from which values are copied.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur when copying.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+ where : array_like of bool, optional
+ A boolean array which is broadcasted to match the dimensions
+ of `dst`, and selects elements to copy from `src` to `dst`
+ wherever it contains the value True.
+
+ Examples
+ --------
+ >>> A = np.array([4, 5, 6])
+ >>> B = [1, 2, 3]
+ >>> np.copyto(A, B)
+ >>> A
+ array([1, 2, 3])
+
+ >>> A = np.array([[1, 2, 3], [4, 5, 6]])
+ >>> B = [[4, 5, 6], [7, 8, 9]]
+ >>> np.copyto(A, B)
+ >>> A
+ array([[4, 5, 6],
+ [7, 8, 9]])
+
+ """
+ return (dst, src, where)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
+def putmask(a, mask, values):
+ """
+ putmask(a, mask, values)
+
+ Changes elements of an array based on conditional and input values.
+
+ Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
+
+ If `values` is not the same size as `a` and `mask` then it will repeat.
+ This gives behavior different from ``a[mask] = values``.
+
+ Parameters
+ ----------
+ a : ndarray
+ Target array.
+ mask : array_like
+ Boolean mask array. It has to be the same shape as `a`.
+ values : array_like
+ Values to put into `a` where `mask` is True. If `values` is smaller
+ than `a` it will be repeated.
+
+ See Also
+ --------
+ place, put, take, copyto
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> np.putmask(x, x>2, x**2)
+ >>> x
+ array([[ 0, 1, 2],
+ [ 9, 16, 25]])
+
+ If `values` is smaller than `a` it is repeated:
+
+ >>> x = np.arange(5)
+ >>> np.putmask(x, x>1, [-33, -44])
+ >>> x
+ array([ 0, 1, -33, -44, -33])
+
+ """
+ return (a, mask, values)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
+def packbits(a, axis=None, bitorder='big'):
+ """
+ packbits(a, /, axis=None, bitorder='big')
+
+ Packs the elements of a binary-valued array into bits in a uint8 array.
+
+ The result is padded to full bytes by inserting zero bits at the end.
+
+ Parameters
+ ----------
+ a : array_like
+ An array of integers or booleans whose elements should be packed to
+ bits.
+ axis : int, optional
+ The dimension over which bit-packing is done.
+ ``None`` implies packing the flattened array.
+ bitorder : {'big', 'little'}, optional
+ The order of the input bits. 'big' will mimic bin(val),
+ ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will
+ reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``.
+ Defaults to 'big'.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ packed : ndarray
+ Array of type uint8 whose elements represent bits corresponding to the
+ logical (0 or nonzero) value of the input elements. The shape of
+ `packed` has the same number of dimensions as the input (unless `axis`
+ is None, in which case the output is 1-D).
+
+ See Also
+ --------
+ unpackbits: Unpacks elements of a uint8 array into a binary-valued output
+ array.
+
+ Examples
+ --------
+ >>> a = np.array([[[1,0,1],
+ ... [0,1,0]],
+ ... [[1,1,0],
+ ... [0,0,1]]])
+ >>> b = np.packbits(a, axis=-1)
+ >>> b
+ array([[[160],
+ [ 64]],
+ [[192],
+ [ 32]]], dtype=uint8)
+
+ Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
+ and 32 = 0010 0000.
+
+ """
+ return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
+def unpackbits(a, axis=None, count=None, bitorder='big'):
+ """
+ unpackbits(a, /, axis=None, count=None, bitorder='big')
+
+ Unpacks elements of a uint8 array into a binary-valued output array.
+
+ Each element of `a` represents a bit-field that should be unpacked
+ into a binary-valued output array. The shape of the output array is
+ either 1-D (if `axis` is ``None``) or the same shape as the input
+ array with unpacking done along the axis specified.
+
+ Parameters
+ ----------
+ a : ndarray, uint8 type
+ Input array.
+ axis : int, optional
+ The dimension over which bit-unpacking is done.
+ ``None`` implies unpacking the flattened array.
+ count : int or None, optional
+ The number of elements to unpack along `axis`, provided as a way
+ of undoing the effect of packing a size that is not a multiple
+ of eight. A non-negative number means to only unpack `count`
+ bits. A negative number means to trim off that many bits from
+ the end. ``None`` means to unpack the entire array (the
+ default). Counts larger than the available number of bits will
+ add zero padding to the output. Negative counts must not
+ exceed the available number of bits.
+
+ .. versionadded:: 1.17.0
+
+ bitorder : {'big', 'little'}, optional
+ The order of the returned bits. 'big' will mimic bin(val),
+ ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse
+ the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``.
+ Defaults to 'big'.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ unpacked : ndarray, uint8 type
+ The elements are binary-valued (0 or 1).
+
+ See Also
+ --------
+ packbits : Packs the elements of a binary-valued array into bits in
+ a uint8 array.
+
+ Examples
+ --------
+ >>> a = np.array([[2], [7], [23]], dtype=np.uint8)
+ >>> a
+ array([[ 2],
+ [ 7],
+ [23]], dtype=uint8)
+ >>> b = np.unpackbits(a, axis=1)
+ >>> b
+ array([[0, 0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
+ >>> c = np.unpackbits(a, axis=1, count=-3)
+ >>> c
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0]], dtype=uint8)
+
+ >>> p = np.packbits(b, axis=0)
+ >>> np.unpackbits(p, axis=0)
+ array([[0, 0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 0, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
+ >>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0]))
+ True
+
+ """
+ return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
+def shares_memory(a, b, max_work=None):
+ """
+ shares_memory(a, b, /, max_work=None)
+
+ Determine if two arrays share memory.
+
+ .. warning::
+
+ This function can be exponentially slow for some inputs, unless
+ `max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``.
+ If in doubt, use `numpy.may_share_memory` instead.
+
+ Parameters
+ ----------
+ a, b : ndarray
+ Input arrays
+ max_work : int, optional
+ Effort to spend on solving the overlap problem (maximum number
+ of candidate solutions to consider). The following special
+ values are recognized:
+
+ max_work=MAY_SHARE_EXACT (default)
+ The problem is solved exactly. In this case, the function returns
+ True only if there is an element shared between the arrays. Finding
+ the exact solution may take extremely long in some cases.
+ max_work=MAY_SHARE_BOUNDS
+ Only the memory bounds of a and b are checked.
+
+ Raises
+ ------
+ numpy.TooHardError
+ Exceeded max_work.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ may_share_memory
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3, 4])
+ >>> np.shares_memory(x, np.array([5, 6, 7]))
+ False
+ >>> np.shares_memory(x[::2], x)
+ True
+ >>> np.shares_memory(x[::2], x[1::2])
+ False
+
+ Checking whether two arrays share memory is NP-complete, and
+ runtime may increase exponentially in the number of
+ dimensions. Hence, `max_work` should generally be set to a finite
+ number, as it is possible to construct examples that take
+ extremely long to run:
+
+ >>> from numpy.lib.stride_tricks import as_strided
+ >>> x = np.zeros([192163377], dtype=np.int8)
+ >>> x1 = as_strided(x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049))
+ >>> x2 = as_strided(x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1))
+ >>> np.shares_memory(x1, x2, max_work=1000)
+ Traceback (most recent call last):
+ ...
+ numpy.TooHardError: Exceeded max_work
+
+ Running ``np.shares_memory(x1, x2)`` without `max_work` set takes
+ around 1 minute for this case. It is possible to find problems
+ that take still significantly longer.
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
+def may_share_memory(a, b, max_work=None):
+ """
+ may_share_memory(a, b, /, max_work=None)
+
+ Determine if two arrays might share memory
+
+ A return of True does not necessarily mean that the two arrays
+ share any element. It just means that they *might*.
+
+ Only the memory bounds of a and b are checked by default.
+
+ Parameters
+ ----------
+ a, b : ndarray
+ Input arrays
+ max_work : int, optional
+ Effort to spend on solving the overlap problem. See
+ `shares_memory` for details. Default for ``may_share_memory``
+ is to do a bounds check.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ shares_memory
+
+ Examples
+ --------
+ >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ False
+ >>> x = np.zeros([3, 4])
+ >>> np.may_share_memory(x[:,0], x[:,1])
+ True
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
+def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
+ """
+ is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
+
+ Calculates which of the given dates are valid days, and which are not.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ dates : array_like of datetime64[D]
+ The array of dates to process.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of bool, optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of bool
+ An array with the same shape as ``dates``, containing True for
+ each valid day, and False for each invalid day.
+
+ See Also
+ --------
+ busdaycalendar : An object that specifies a custom set of valid days.
+ busday_offset : Applies an offset counted in valid days.
+ busday_count : Counts how many valid days are in a half-open date range.
+
+ Examples
+ --------
+ >>> # The weekdays are Friday, Saturday, and Monday
+ ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
+ ... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
+ array([False, False, True])
+ """
+ return (dates, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
+def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
+ busdaycal=None, out=None):
+ """
+ busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
+
+ First adjusts the date to fall on a valid day according to
+ the ``roll`` rule, then applies offsets to the given dates
+ counted in valid days.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ dates : array_like of datetime64[D]
+ The array of dates to process.
+ offsets : array_like of int
+ The array of offsets, which is broadcast with ``dates``.
+ roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
+ How to treat dates that do not fall on a valid day. The default
+ is 'raise'.
+
+ * 'raise' means to raise an exception for an invalid day.
+ * 'nat' means to return a NaT (not-a-time) for an invalid day.
+ * 'forward' and 'following' mean to take the first valid day
+ later in time.
+ * 'backward' and 'preceding' mean to take the first valid day
+ earlier in time.
+ * 'modifiedfollowing' means to take the first valid day
+ later in time unless it is across a Month boundary, in which
+ case to take the first valid day earlier in time.
+ * 'modifiedpreceding' means to take the first valid day
+ earlier in time unless it is across a Month boundary, in which
+ case to take the first valid day later in time.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of datetime64[D], optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of datetime64[D]
+ An array with a shape from broadcasting ``dates`` and ``offsets``
+ together, containing the dates with offsets applied.
+
+ See Also
+ --------
+ busdaycalendar : An object that specifies a custom set of valid days.
+ is_busday : Returns a boolean array indicating valid days.
+ busday_count : Counts how many valid days are in a half-open date range.
+
+ Examples
+ --------
+ >>> # First business day in October 2011 (not accounting for holidays)
+ ... np.busday_offset('2011-10', 0, roll='forward')
+ numpy.datetime64('2011-10-03')
+ >>> # Last business day in February 2012 (not accounting for holidays)
+ ... np.busday_offset('2012-03', -1, roll='forward')
+ numpy.datetime64('2012-02-29')
+ >>> # Third Wednesday in January 2011
+ ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
+ numpy.datetime64('2011-01-19')
+ >>> # 2012 Mother's Day in Canada and the U.S.
+ ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
+ numpy.datetime64('2012-05-13')
+
+ >>> # First business day on or after a date
+ ... np.busday_offset('2011-03-20', 0, roll='forward')
+ numpy.datetime64('2011-03-21')
+ >>> np.busday_offset('2011-03-22', 0, roll='forward')
+ numpy.datetime64('2011-03-22')
+ >>> # First business day after a date
+ ... np.busday_offset('2011-03-20', 1, roll='backward')
+ numpy.datetime64('2011-03-21')
+ >>> np.busday_offset('2011-03-22', 1, roll='backward')
+ numpy.datetime64('2011-03-23')
+ """
+ return (dates, offsets, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
+def busday_count(begindates, enddates, weekmask=None, holidays=None,
+ busdaycal=None, out=None):
+ """
+ busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
+
+ Counts the number of valid days between `begindates` and
+ `enddates`, not including the day of `enddates`.
+
+ If ``enddates`` specifies a date value that is earlier than the
+ corresponding ``begindates`` date value, the count will be negative.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ begindates : array_like of datetime64[D]
+ The array of the first dates for counting.
+ enddates : array_like of datetime64[D]
+ The array of the end dates for counting, which are excluded
+ from the count themselves.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of int, optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of int
+ An array with a shape from broadcasting ``begindates`` and ``enddates``
+ together, containing the number of valid days between
+ the begin and end dates.
+
+ See Also
+ --------
+ busdaycalendar : An object that specifies a custom set of valid days.
+ is_busday : Returns a boolean array indicating valid days.
+ busday_offset : Applies an offset counted in valid days.
+
+ Examples
+ --------
+ >>> # Number of weekdays in January 2011
+ ... np.busday_count('2011-01', '2011-02')
+ 21
+ >>> # Number of weekdays in 2011
+ >>> np.busday_count('2011', '2012')
+ 260
+ >>> # Number of Saturdays in 2011
+ ... np.busday_count('2011', '2012', weekmask='Sat')
+ 53
+ """
+ return (begindates, enddates, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(
+ _multiarray_umath.datetime_as_string)
+def datetime_as_string(arr, unit=None, timezone=None, casting=None):
+ """
+ datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
+
+ Convert an array of datetimes into an array of strings.
+
+ Parameters
+ ----------
+ arr : array_like of datetime64
+ The array of UTC timestamps to format.
+ unit : str
+ One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
+ timezone : {'naive', 'UTC', 'local'} or tzinfo
+ Timezone information to use when displaying the datetime. If 'UTC', end
+ with a Z to indicate UTC time. If 'local', convert to the local timezone
+ first, and suffix with a +-#### timezone offset. If a tzinfo object,
+ then do as with 'local', but use the specified timezone.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
+ Casting to allow when changing between datetime units.
+
+ Returns
+ -------
+ str_arr : ndarray
+ An array of strings the same shape as `arr`.
+
+ Examples
+ --------
+ >>> import pytz
+ >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
+ >>> d
+ array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
+ '2002-10-27T07:30'], dtype='datetime64[m]')
+
+ Setting the timezone to UTC shows the same information, but with a Z suffix
+
+ >>> np.datetime_as_string(d, timezone='UTC')
+ array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
+ '2002-10-27T07:30Z'], dtype='<U35')
+
+ Note that we picked datetimes that cross a DST boundary. Passing in a
+ ``pytz`` timezone object will print the appropriate offset
+
+ >>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
+ array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
+ '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
+
+ Passing in a unit will change the precision
+
+ >>> np.datetime_as_string(d, unit='h')
+ array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
+ dtype='<U32')
+ >>> np.datetime_as_string(d, unit='s')
+ array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
+ '2002-10-27T07:30:00'], dtype='<U38')
+
+ 'casting' can be used to specify whether precision can be changed
+
+ >>> np.datetime_as_string(d, unit='h', casting='safe')
+ Traceback (most recent call last):
+ ...
+ TypeError: Cannot create a datetime string as units 'h' from a NumPy
+ datetime with units 'm' according to the rule 'safe'
+ """
+ return (arr,)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/multiarray.pyi b/venv/lib/python3.9/site-packages/numpy/core/multiarray.pyi
new file mode 100644
index 00000000..0701085b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/multiarray.pyi
@@ -0,0 +1,1021 @@
+# TODO: Sort out any and all missing functions in this namespace
+
+import os
+import datetime as dt
+from collections.abc import Sequence, Callable, Iterable
+from typing import (
+ Literal as L,
+ Any,
+ overload,
+ TypeVar,
+ SupportsIndex,
+ final,
+ Final,
+ Protocol,
+ ClassVar,
+)
+
+from numpy import (
+ # Re-exports
+ busdaycalendar as busdaycalendar,
+ broadcast as broadcast,
+ dtype as dtype,
+ ndarray as ndarray,
+ nditer as nditer,
+
+ # The rest
+ ufunc,
+ str_,
+ bool_,
+ uint8,
+ intp,
+ int_,
+ float64,
+ timedelta64,
+ datetime64,
+ generic,
+ unsignedinteger,
+ signedinteger,
+ floating,
+ complexfloating,
+ _OrderKACF,
+ _OrderCF,
+ _CastingKind,
+ _ModeKind,
+ _SupportsBuffer,
+ _IOProtocol,
+ _CopyMode,
+ _NDIterFlagsKind,
+ _NDIterOpFlagsKind,
+)
+
+from numpy._typing import (
+ # Shapes
+ _ShapeLike,
+
+ # DTypes
+ DTypeLike,
+ _DTypeLike,
+
+ # Arrays
+ NDArray,
+ ArrayLike,
+ _ArrayLike,
+ _SupportsArrayFunc,
+ _NestedSequence,
+ _ArrayLikeBool_co,
+ _ArrayLikeUInt_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeTD64_co,
+ _ArrayLikeDT64_co,
+ _ArrayLikeObject_co,
+ _ArrayLikeStr_co,
+ _ArrayLikeBytes_co,
+ _ScalarLike_co,
+ _IntLike_co,
+ _FloatLike_co,
+ _TD64Like_co,
+)
+
+_T_co = TypeVar("_T_co", covariant=True)
+_T_contra = TypeVar("_T_contra", contravariant=True)
+_SCT = TypeVar("_SCT", bound=generic)
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+
+# Valid time units
+_UnitKind = L[
+ "Y",
+ "M",
+ "D",
+ "h",
+ "m",
+ "s",
+ "ms",
+ "us", "μs",
+ "ns",
+ "ps",
+ "fs",
+ "as",
+]
+_RollKind = L[ # `raise` is deliberately excluded
+ "nat",
+ "forward",
+ "following",
+ "backward",
+ "preceding",
+ "modifiedfollowing",
+ "modifiedpreceding",
+]
+
+class _SupportsLenAndGetItem(Protocol[_T_contra, _T_co]):
+ def __len__(self) -> int: ...
+ def __getitem__(self, key: _T_contra, /) -> _T_co: ...
+
+__all__: list[str]
+
+ALLOW_THREADS: Final[int] # 0 or 1 (system-specific)
+BUFSIZE: L[8192]
+CLIP: L[0]
+WRAP: L[1]
+RAISE: L[2]
+MAXDIMS: L[32]
+MAY_SHARE_BOUNDS: L[0]
+MAY_SHARE_EXACT: L[-1]
+tracemalloc_domain: L[389047]
+
+@overload
+def empty_like(
+ prototype: _ArrayType,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike = ...,
+) -> _ArrayType: ...
+@overload
+def empty_like(
+ prototype: _ArrayLike[_SCT],
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def empty_like(
+ prototype: object,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike = ...,
+) -> NDArray[Any]: ...
+@overload
+def empty_like(
+ prototype: Any,
+ dtype: _DTypeLike[_SCT],
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def empty_like(
+ prototype: Any,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def array(
+ object: _ArrayType,
+ dtype: None = ...,
+ *,
+ copy: bool | _CopyMode = ...,
+ order: _OrderKACF = ...,
+ subok: L[True],
+ ndmin: int = ...,
+ like: None | _SupportsArrayFunc = ...,
+) -> _ArrayType: ...
+@overload
+def array(
+ object: _ArrayLike[_SCT],
+ dtype: None = ...,
+ *,
+ copy: bool | _CopyMode = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ ndmin: int = ...,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def array(
+ object: object,
+ dtype: None = ...,
+ *,
+ copy: bool | _CopyMode = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ ndmin: int = ...,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def array(
+ object: Any,
+ dtype: _DTypeLike[_SCT],
+ *,
+ copy: bool | _CopyMode = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ ndmin: int = ...,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def array(
+ object: Any,
+ dtype: DTypeLike,
+ *,
+ copy: bool | _CopyMode = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ ndmin: int = ...,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def zeros(
+ shape: _ShapeLike,
+ dtype: None = ...,
+ order: _OrderCF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def zeros(
+ shape: _ShapeLike,
+ dtype: _DTypeLike[_SCT],
+ order: _OrderCF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def zeros(
+ shape: _ShapeLike,
+ dtype: DTypeLike,
+ order: _OrderCF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def empty(
+ shape: _ShapeLike,
+ dtype: None = ...,
+ order: _OrderCF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def empty(
+ shape: _ShapeLike,
+ dtype: _DTypeLike[_SCT],
+ order: _OrderCF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def empty(
+ shape: _ShapeLike,
+ dtype: DTypeLike,
+ order: _OrderCF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def unravel_index( # type: ignore[misc]
+ indices: _IntLike_co,
+ shape: _ShapeLike,
+ order: _OrderCF = ...,
+) -> tuple[intp, ...]: ...
+@overload
+def unravel_index(
+ indices: _ArrayLikeInt_co,
+ shape: _ShapeLike,
+ order: _OrderCF = ...,
+) -> tuple[NDArray[intp], ...]: ...
+
+@overload
+def ravel_multi_index( # type: ignore[misc]
+ multi_index: Sequence[_IntLike_co],
+ dims: Sequence[SupportsIndex],
+ mode: _ModeKind | tuple[_ModeKind, ...] = ...,
+ order: _OrderCF = ...,
+) -> intp: ...
+@overload
+def ravel_multi_index(
+ multi_index: Sequence[_ArrayLikeInt_co],
+ dims: Sequence[SupportsIndex],
+ mode: _ModeKind | tuple[_ModeKind, ...] = ...,
+ order: _OrderCF = ...,
+) -> NDArray[intp]: ...
+
+# NOTE: Allow any sequence of array-like objects
+@overload
+def concatenate( # type: ignore[misc]
+ arrays: _ArrayLike[_SCT],
+ /,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ *,
+ dtype: None = ...,
+ casting: None | _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def concatenate( # type: ignore[misc]
+ arrays: _SupportsLenAndGetItem[int, ArrayLike],
+ /,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ *,
+ dtype: None = ...,
+ casting: None | _CastingKind = ...
+) -> NDArray[Any]: ...
+@overload
+def concatenate( # type: ignore[misc]
+ arrays: _SupportsLenAndGetItem[int, ArrayLike],
+ /,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ *,
+ dtype: _DTypeLike[_SCT],
+ casting: None | _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def concatenate( # type: ignore[misc]
+ arrays: _SupportsLenAndGetItem[int, ArrayLike],
+ /,
+ axis: None | SupportsIndex = ...,
+ out: None = ...,
+ *,
+ dtype: DTypeLike,
+ casting: None | _CastingKind = ...
+) -> NDArray[Any]: ...
+@overload
+def concatenate(
+ arrays: _SupportsLenAndGetItem[int, ArrayLike],
+ /,
+ axis: None | SupportsIndex = ...,
+ out: _ArrayType = ...,
+ *,
+ dtype: DTypeLike = ...,
+ casting: None | _CastingKind = ...
+) -> _ArrayType: ...
+
+def inner(
+ a: ArrayLike,
+ b: ArrayLike,
+ /,
+) -> Any: ...
+
+@overload
+def where(
+ condition: ArrayLike,
+ /,
+) -> tuple[NDArray[intp], ...]: ...
+@overload
+def where(
+ condition: ArrayLike,
+ x: ArrayLike,
+ y: ArrayLike,
+ /,
+) -> NDArray[Any]: ...
+
+def lexsort(
+ keys: ArrayLike,
+ axis: None | SupportsIndex = ...,
+) -> Any: ...
+
+def can_cast(
+ from_: ArrayLike | DTypeLike,
+ to: DTypeLike,
+ casting: None | _CastingKind = ...,
+) -> bool: ...
+
+def min_scalar_type(
+ a: ArrayLike, /,
+) -> dtype[Any]: ...
+
+def result_type(
+ *arrays_and_dtypes: ArrayLike | DTypeLike,
+) -> dtype[Any]: ...
+
+@overload
+def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ...
+@overload
+def dot(a: ArrayLike, b: ArrayLike, out: _ArrayType) -> _ArrayType: ...
+
+@overload
+def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> bool_: ... # type: ignore[misc]
+@overload
+def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger[Any]: ... # type: ignore[misc]
+@overload
+def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger[Any]: ... # type: ignore[misc]
+@overload
+def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating[Any]: ... # type: ignore[misc]
+@overload
+def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating[Any, Any]: ... # type: ignore[misc]
+@overload
+def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ...
+@overload
+def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ...
+@overload
+def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ...
+
+def bincount(
+ x: ArrayLike,
+ /,
+ weights: None | ArrayLike = ...,
+ minlength: SupportsIndex = ...,
+) -> NDArray[intp]: ...
+
+def copyto(
+ dst: NDArray[Any],
+ src: ArrayLike,
+ casting: None | _CastingKind = ...,
+ where: None | _ArrayLikeBool_co = ...,
+) -> None: ...
+
+def putmask(
+ a: NDArray[Any],
+ mask: _ArrayLikeBool_co,
+ values: ArrayLike,
+) -> None: ...
+
+def packbits(
+ a: _ArrayLikeInt_co,
+ /,
+ axis: None | SupportsIndex = ...,
+ bitorder: L["big", "little"] = ...,
+) -> NDArray[uint8]: ...
+
+def unpackbits(
+ a: _ArrayLike[uint8],
+ /,
+ axis: None | SupportsIndex = ...,
+ count: None | SupportsIndex = ...,
+ bitorder: L["big", "little"] = ...,
+) -> NDArray[uint8]: ...
+
+def shares_memory(
+ a: object,
+ b: object,
+ /,
+ max_work: None | int = ...,
+) -> bool: ...
+
+def may_share_memory(
+ a: object,
+ b: object,
+ /,
+ max_work: None | int = ...,
+) -> bool: ...
+
+@overload
+def asarray(
+ a: _ArrayLike[_SCT],
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asarray(
+ a: object,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def asarray(
+ a: Any,
+ dtype: _DTypeLike[_SCT],
+ order: _OrderKACF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asarray(
+ a: Any,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def asanyarray(
+ a: _ArrayType, # Preserve subclass-information
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> _ArrayType: ...
+@overload
+def asanyarray(
+ a: _ArrayLike[_SCT],
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asanyarray(
+ a: object,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def asanyarray(
+ a: Any,
+ dtype: _DTypeLike[_SCT],
+ order: _OrderKACF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asanyarray(
+ a: Any,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def ascontiguousarray(
+ a: _ArrayLike[_SCT],
+ dtype: None = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def ascontiguousarray(
+ a: object,
+ dtype: None = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def ascontiguousarray(
+ a: Any,
+ dtype: _DTypeLike[_SCT],
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def ascontiguousarray(
+ a: Any,
+ dtype: DTypeLike,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def asfortranarray(
+ a: _ArrayLike[_SCT],
+ dtype: None = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asfortranarray(
+ a: object,
+ dtype: None = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def asfortranarray(
+ a: Any,
+ dtype: _DTypeLike[_SCT],
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asfortranarray(
+ a: Any,
+ dtype: DTypeLike,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+# In practice `list[Any]` is list with an int, int and a valid
+# `np.seterrcall()` object
+def geterrobj() -> list[Any]: ...
+def seterrobj(errobj: list[Any], /) -> None: ...
+
+def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype[Any]: ...
+
+# `sep` is a de facto mandatory argument, as its default value is deprecated
+@overload
+def fromstring(
+ string: str | bytes,
+ dtype: None = ...,
+ count: SupportsIndex = ...,
+ *,
+ sep: str,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def fromstring(
+ string: str | bytes,
+ dtype: _DTypeLike[_SCT],
+ count: SupportsIndex = ...,
+ *,
+ sep: str,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def fromstring(
+ string: str | bytes,
+ dtype: DTypeLike,
+ count: SupportsIndex = ...,
+ *,
+ sep: str,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+def frompyfunc(
+ func: Callable[..., Any], /,
+ nin: SupportsIndex,
+ nout: SupportsIndex,
+ *,
+ identity: Any = ...,
+) -> ufunc: ...
+
+@overload
+def fromfile(
+ file: str | bytes | os.PathLike[Any] | _IOProtocol,
+ dtype: None = ...,
+ count: SupportsIndex = ...,
+ sep: str = ...,
+ offset: SupportsIndex = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def fromfile(
+ file: str | bytes | os.PathLike[Any] | _IOProtocol,
+ dtype: _DTypeLike[_SCT],
+ count: SupportsIndex = ...,
+ sep: str = ...,
+ offset: SupportsIndex = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def fromfile(
+ file: str | bytes | os.PathLike[Any] | _IOProtocol,
+ dtype: DTypeLike,
+ count: SupportsIndex = ...,
+ sep: str = ...,
+ offset: SupportsIndex = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def fromiter(
+ iter: Iterable[Any],
+ dtype: _DTypeLike[_SCT],
+ count: SupportsIndex = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def fromiter(
+ iter: Iterable[Any],
+ dtype: DTypeLike,
+ count: SupportsIndex = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def frombuffer(
+ buffer: _SupportsBuffer,
+ dtype: None = ...,
+ count: SupportsIndex = ...,
+ offset: SupportsIndex = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def frombuffer(
+ buffer: _SupportsBuffer,
+ dtype: _DTypeLike[_SCT],
+ count: SupportsIndex = ...,
+ offset: SupportsIndex = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def frombuffer(
+ buffer: _SupportsBuffer,
+ dtype: DTypeLike,
+ count: SupportsIndex = ...,
+ offset: SupportsIndex = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def arange( # type: ignore[misc]
+ stop: _IntLike_co,
+ /, *,
+ dtype: None = ...,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def arange( # type: ignore[misc]
+ start: _IntLike_co,
+ stop: _IntLike_co,
+ step: _IntLike_co = ...,
+ dtype: None = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def arange( # type: ignore[misc]
+ stop: _FloatLike_co,
+ /, *,
+ dtype: None = ...,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def arange( # type: ignore[misc]
+ start: _FloatLike_co,
+ stop: _FloatLike_co,
+ step: _FloatLike_co = ...,
+ dtype: None = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def arange(
+ stop: _TD64Like_co,
+ /, *,
+ dtype: None = ...,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def arange(
+ start: _TD64Like_co,
+ stop: _TD64Like_co,
+ step: _TD64Like_co = ...,
+ dtype: None = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def arange( # both start and stop must always be specified for datetime64
+ start: datetime64,
+ stop: datetime64,
+ step: datetime64 = ...,
+ dtype: None = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[datetime64]: ...
+@overload
+def arange(
+ stop: Any,
+ /, *,
+ dtype: _DTypeLike[_SCT],
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def arange(
+ start: Any,
+ stop: Any,
+ step: Any = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def arange(
+ stop: Any, /,
+ *,
+ dtype: DTypeLike,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def arange(
+ start: Any,
+ stop: Any,
+ step: Any = ...,
+ dtype: DTypeLike = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+def datetime_data(
+ dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /,
+) -> tuple[str, int]: ...
+
+# The datetime functions perform unsafe casts to `datetime64[D]`,
+# so a lot of different argument types are allowed here
+
+@overload
+def busday_count( # type: ignore[misc]
+ begindates: _ScalarLike_co | dt.date,
+ enddates: _ScalarLike_co | dt.date,
+ weekmask: ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ busdaycal: None | busdaycalendar = ...,
+ out: None = ...,
+) -> int_: ...
+@overload
+def busday_count( # type: ignore[misc]
+ begindates: ArrayLike | dt.date | _NestedSequence[dt.date],
+ enddates: ArrayLike | dt.date | _NestedSequence[dt.date],
+ weekmask: ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ busdaycal: None | busdaycalendar = ...,
+ out: None = ...,
+) -> NDArray[int_]: ...
+@overload
+def busday_count(
+ begindates: ArrayLike | dt.date | _NestedSequence[dt.date],
+ enddates: ArrayLike | dt.date | _NestedSequence[dt.date],
+ weekmask: ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ busdaycal: None | busdaycalendar = ...,
+ out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+# `roll="raise"` is (more or less?) equivalent to `casting="safe"`
+@overload
+def busday_offset( # type: ignore[misc]
+ dates: datetime64 | dt.date,
+ offsets: _TD64Like_co | dt.timedelta,
+ roll: L["raise"] = ...,
+ weekmask: ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ busdaycal: None | busdaycalendar = ...,
+ out: None = ...,
+) -> datetime64: ...
+@overload
+def busday_offset( # type: ignore[misc]
+ dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date],
+ offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta],
+ roll: L["raise"] = ...,
+ weekmask: ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ busdaycal: None | busdaycalendar = ...,
+ out: None = ...,
+) -> NDArray[datetime64]: ...
+@overload
+def busday_offset( # type: ignore[misc]
+ dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date],
+ offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta],
+ roll: L["raise"] = ...,
+ weekmask: ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ busdaycal: None | busdaycalendar = ...,
+ out: _ArrayType = ...,
+) -> _ArrayType: ...
+@overload
+def busday_offset( # type: ignore[misc]
+ dates: _ScalarLike_co | dt.date,
+ offsets: _ScalarLike_co | dt.timedelta,
+ roll: _RollKind,
+ weekmask: ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ busdaycal: None | busdaycalendar = ...,
+ out: None = ...,
+) -> datetime64: ...
+@overload
+def busday_offset( # type: ignore[misc]
+ dates: ArrayLike | dt.date | _NestedSequence[dt.date],
+ offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta],
+ roll: _RollKind,
+ weekmask: ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ busdaycal: None | busdaycalendar = ...,
+ out: None = ...,
+) -> NDArray[datetime64]: ...
+@overload
+def busday_offset(
+ dates: ArrayLike | dt.date | _NestedSequence[dt.date],
+ offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta],
+ roll: _RollKind,
+ weekmask: ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ busdaycal: None | busdaycalendar = ...,
+ out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+@overload
+def is_busday( # type: ignore[misc]
+ dates: _ScalarLike_co | dt.date,
+ weekmask: ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ busdaycal: None | busdaycalendar = ...,
+ out: None = ...,
+) -> bool_: ...
+@overload
+def is_busday( # type: ignore[misc]
+ dates: ArrayLike | _NestedSequence[dt.date],
+ weekmask: ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ busdaycal: None | busdaycalendar = ...,
+ out: None = ...,
+) -> NDArray[bool_]: ...
+@overload
+def is_busday(
+ dates: ArrayLike | _NestedSequence[dt.date],
+ weekmask: ArrayLike = ...,
+ holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
+ busdaycal: None | busdaycalendar = ...,
+ out: _ArrayType = ...,
+) -> _ArrayType: ...
+
+@overload
+def datetime_as_string( # type: ignore[misc]
+ arr: datetime64 | dt.date,
+ unit: None | L["auto"] | _UnitKind = ...,
+ timezone: L["naive", "UTC", "local"] | dt.tzinfo = ...,
+ casting: _CastingKind = ...,
+) -> str_: ...
+@overload
+def datetime_as_string(
+ arr: _ArrayLikeDT64_co | _NestedSequence[dt.date],
+ unit: None | L["auto"] | _UnitKind = ...,
+ timezone: L["naive", "UTC", "local"] | dt.tzinfo = ...,
+ casting: _CastingKind = ...,
+) -> NDArray[str_]: ...
+
+@overload
+def compare_chararrays(
+ a1: _ArrayLikeStr_co,
+ a2: _ArrayLikeStr_co,
+ cmp: L["<", "<=", "==", ">=", ">", "!="],
+ rstrip: bool,
+) -> NDArray[bool_]: ...
+@overload
+def compare_chararrays(
+ a1: _ArrayLikeBytes_co,
+ a2: _ArrayLikeBytes_co,
+ cmp: L["<", "<=", "==", ">=", ">", "!="],
+ rstrip: bool,
+) -> NDArray[bool_]: ...
+
+def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ...
+
+_GetItemKeys = L[
+ "C", "CONTIGUOUS", "C_CONTIGUOUS",
+ "F", "FORTRAN", "F_CONTIGUOUS",
+ "W", "WRITEABLE",
+ "B", "BEHAVED",
+ "O", "OWNDATA",
+ "A", "ALIGNED",
+ "X", "WRITEBACKIFCOPY",
+ "CA", "CARRAY",
+ "FA", "FARRAY",
+ "FNC",
+ "FORC",
+]
+_SetItemKeys = L[
+ "A", "ALIGNED",
+ "W", "WRITEABLE",
+ "X", "WRITEBACKIFCOPY",
+]
+
+@final
+class flagsobj:
+ __hash__: ClassVar[None] # type: ignore[assignment]
+ aligned: bool
+ # NOTE: deprecated
+ # updateifcopy: bool
+ writeable: bool
+ writebackifcopy: bool
+ @property
+ def behaved(self) -> bool: ...
+ @property
+ def c_contiguous(self) -> bool: ...
+ @property
+ def carray(self) -> bool: ...
+ @property
+ def contiguous(self) -> bool: ...
+ @property
+ def f_contiguous(self) -> bool: ...
+ @property
+ def farray(self) -> bool: ...
+ @property
+ def fnc(self) -> bool: ...
+ @property
+ def forc(self) -> bool: ...
+ @property
+ def fortran(self) -> bool: ...
+ @property
+ def num(self) -> int: ...
+ @property
+ def owndata(self) -> bool: ...
+ def __getitem__(self, key: _GetItemKeys) -> bool: ...
+ def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ...
+
+def nested_iters(
+ op: ArrayLike | Sequence[ArrayLike],
+ axes: Sequence[Sequence[SupportsIndex]],
+ flags: None | Sequence[_NDIterFlagsKind] = ...,
+ op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ...,
+ op_dtypes: DTypeLike | Sequence[DTypeLike] = ...,
+ order: _OrderKACF = ...,
+ casting: _CastingKind = ...,
+ buffersize: SupportsIndex = ...,
+) -> tuple[nditer, ...]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/core/numeric.py b/venv/lib/python3.9/site-packages/numpy/core/numeric.py
new file mode 100644
index 00000000..fdd81a20
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/numeric.py
@@ -0,0 +1,2559 @@
+import functools
+import itertools
+import operator
+import sys
+import warnings
+import numbers
+
+import numpy as np
+from . import multiarray
+from .multiarray import (
+ fastCopyAndTranspose, ALLOW_THREADS,
+ BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE,
+ WRAP, arange, array, asarray, asanyarray, ascontiguousarray,
+ asfortranarray, broadcast, can_cast, compare_chararrays,
+ concatenate, copyto, dot, dtype, empty,
+ empty_like, flatiter, frombuffer, from_dlpack, fromfile, fromiter,
+ fromstring, inner, lexsort, matmul, may_share_memory,
+ min_scalar_type, ndarray, nditer, nested_iters, promote_types,
+ putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
+ zeros, normalize_axis_index, _get_promotion_state, _set_promotion_state)
+
+from . import overrides
+from . import umath
+from . import shape_base
+from .overrides import set_array_function_like_doc, set_module
+from .umath import (multiply, invert, sin, PINF, NAN)
+from . import numerictypes
+from .numerictypes import longlong, intc, int_, float_, complex_, bool_
+from ._exceptions import TooHardError, AxisError
+from ._ufunc_config import errstate, _no_nep50_warning
+
+bitwise_not = invert
+ufunc = type(sin)
+newaxis = None
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+__all__ = [
+ 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
+ 'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray',
+ 'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
+ 'fromstring', 'fromfile', 'frombuffer', 'from_dlpack', 'where',
+ 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
+ 'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
+ 'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
+ 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
+ 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian',
+ 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
+ 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones',
+ 'identity', 'allclose', 'compare_chararrays', 'putmask',
+ 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
+ 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
+ 'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like',
+ 'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
+ 'MAY_SHARE_EXACT', 'TooHardError', 'AxisError',
+ '_get_promotion_state', '_set_promotion_state']
+
+
+@set_module('numpy')
+class ComplexWarning(RuntimeWarning):
+ """
+ The warning raised when casting a complex dtype to a real dtype.
+
+ As implemented, casting a complex number to a real discards its imaginary
+ part, but this behavior may not be what the user actually wants.
+
+ """
+ pass
+
+
+def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
+ return (a,)
+
+
+@array_function_dispatch(_zeros_like_dispatcher)
+def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
+ """
+ Return an array of zeros with the same shape and type as a given array.
+
+ Parameters
+ ----------
+ a : array_like
+ The shape and data-type of `a` define these same attributes of
+ the returned array.
+ dtype : data-type, optional
+ Overrides the data type of the result.
+
+ .. versionadded:: 1.6.0
+ order : {'C', 'F', 'A', or 'K'}, optional
+ Overrides the memory layout of the result. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+ 'C' otherwise. 'K' means match the layout of `a` as closely
+ as possible.
+
+ .. versionadded:: 1.6.0
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of `a`, otherwise it will be a base-class array. Defaults
+ to True.
+ shape : int or sequence of ints, optional.
+ Overrides the shape of the result. If order='K' and the number of
+ dimensions is unchanged, will try to keep order, otherwise,
+ order='C' is implied.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ out : ndarray
+ Array of zeros with the same shape and type as `a`.
+
+ See Also
+ --------
+ empty_like : Return an empty array with shape and type of input.
+ ones_like : Return an array of ones with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ zeros : Return a new array setting values to zero.
+
+ Examples
+ --------
+ >>> x = np.arange(6)
+ >>> x = x.reshape((2, 3))
+ >>> x
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.zeros_like(x)
+ array([[0, 0, 0],
+ [0, 0, 0]])
+
+ >>> y = np.arange(3, dtype=float)
+ >>> y
+ array([0., 1., 2.])
+ >>> np.zeros_like(y)
+ array([0., 0., 0.])
+
+ """
+ res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
+ # needed instead of a 0 to get same result as zeros for string dtypes
+ z = zeros(1, dtype=res.dtype)
+ multiarray.copyto(res, z, casting='unsafe')
+ return res
+
+
+def _ones_dispatcher(shape, dtype=None, order=None, *, like=None):
+ return(like,)
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def ones(shape, dtype=None, order='C', *, like=None):
+ """
+ Return a new array of given shape and type, filled with ones.
+
+ Parameters
+ ----------
+ shape : int or sequence of ints
+ Shape of the new array, e.g., ``(2, 3)`` or ``2``.
+ dtype : data-type, optional
+ The desired data-type for the array, e.g., `numpy.int8`. Default is
+ `numpy.float64`.
+ order : {'C', 'F'}, optional, default: C
+ Whether to store multi-dimensional data in row-major
+ (C-style) or column-major (Fortran-style) order in
+ memory.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Array of ones with the given shape, dtype, and order.
+
+ See Also
+ --------
+ ones_like : Return an array of ones with shape and type of input.
+ empty : Return a new uninitialized array.
+ zeros : Return a new array setting values to zero.
+ full : Return a new array of given shape filled with value.
+
+
+ Examples
+ --------
+ >>> np.ones(5)
+ array([1., 1., 1., 1., 1.])
+
+ >>> np.ones((5,), dtype=int)
+ array([1, 1, 1, 1, 1])
+
+ >>> np.ones((2, 1))
+ array([[1.],
+ [1.]])
+
+ >>> s = (2,2)
+ >>> np.ones(s)
+ array([[1., 1.],
+ [1., 1.]])
+
+ """
+ if like is not None:
+ return _ones_with_like(shape, dtype=dtype, order=order, like=like)
+
+ a = empty(shape, dtype, order)
+ multiarray.copyto(a, 1, casting='unsafe')
+ return a
+
+
+_ones_with_like = array_function_dispatch(
+ _ones_dispatcher, use_like=True
+)(ones)
+
+
+def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
+ return (a,)
+
+
+@array_function_dispatch(_ones_like_dispatcher)
+def ones_like(a, dtype=None, order='K', subok=True, shape=None):
+ """
+ Return an array of ones with the same shape and type as a given array.
+
+ Parameters
+ ----------
+ a : array_like
+ The shape and data-type of `a` define these same attributes of
+ the returned array.
+ dtype : data-type, optional
+ Overrides the data type of the result.
+
+ .. versionadded:: 1.6.0
+ order : {'C', 'F', 'A', or 'K'}, optional
+ Overrides the memory layout of the result. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+ 'C' otherwise. 'K' means match the layout of `a` as closely
+ as possible.
+
+ .. versionadded:: 1.6.0
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of `a`, otherwise it will be a base-class array. Defaults
+ to True.
+ shape : int or sequence of ints, optional.
+ Overrides the shape of the result. If order='K' and the number of
+ dimensions is unchanged, will try to keep order, otherwise,
+ order='C' is implied.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ out : ndarray
+ Array of ones with the same shape and type as `a`.
+
+ See Also
+ --------
+ empty_like : Return an empty array with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ ones : Return a new array setting values to one.
+
+ Examples
+ --------
+ >>> x = np.arange(6)
+ >>> x = x.reshape((2, 3))
+ >>> x
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.ones_like(x)
+ array([[1, 1, 1],
+ [1, 1, 1]])
+
+ >>> y = np.arange(3, dtype=float)
+ >>> y
+ array([0., 1., 2.])
+ >>> np.ones_like(y)
+ array([1., 1., 1.])
+
+ """
+ res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
+ multiarray.copyto(res, 1, casting='unsafe')
+ return res
+
+
+def _full_dispatcher(shape, fill_value, dtype=None, order=None, *, like=None):
+ return(like,)
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def full(shape, fill_value, dtype=None, order='C', *, like=None):
+ """
+ Return a new array of given shape and type, filled with `fill_value`.
+
+ Parameters
+ ----------
+ shape : int or sequence of ints
+ Shape of the new array, e.g., ``(2, 3)`` or ``2``.
+ fill_value : scalar or array_like
+ Fill value.
+ dtype : data-type, optional
+ The desired data-type for the array The default, None, means
+ ``np.array(fill_value).dtype``.
+ order : {'C', 'F'}, optional
+ Whether to store multidimensional data in C- or Fortran-contiguous
+ (row- or column-wise) order in memory.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Array of `fill_value` with the given shape, dtype, and order.
+
+ See Also
+ --------
+ full_like : Return a new array with shape of input filled with value.
+ empty : Return a new uninitialized array.
+ ones : Return a new array setting values to one.
+ zeros : Return a new array setting values to zero.
+
+ Examples
+ --------
+ >>> np.full((2, 2), np.inf)
+ array([[inf, inf],
+ [inf, inf]])
+ >>> np.full((2, 2), 10)
+ array([[10, 10],
+ [10, 10]])
+
+ >>> np.full((2, 2), [1, 2])
+ array([[1, 2],
+ [1, 2]])
+
+ """
+ if like is not None:
+ return _full_with_like(shape, fill_value, dtype=dtype, order=order, like=like)
+
+ if dtype is None:
+ fill_value = asarray(fill_value)
+ dtype = fill_value.dtype
+ a = empty(shape, dtype, order)
+ multiarray.copyto(a, fill_value, casting='unsafe')
+ return a
+
+
+_full_with_like = array_function_dispatch(
+ _full_dispatcher, use_like=True
+)(full)
+
+
+def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None):
+ return (a,)
+
+
+@array_function_dispatch(_full_like_dispatcher)
+def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
+ """
+ Return a full array with the same shape and type as a given array.
+
+ Parameters
+ ----------
+ a : array_like
+ The shape and data-type of `a` define these same attributes of
+ the returned array.
+ fill_value : array_like
+ Fill value.
+ dtype : data-type, optional
+ Overrides the data type of the result.
+ order : {'C', 'F', 'A', or 'K'}, optional
+ Overrides the memory layout of the result. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+ 'C' otherwise. 'K' means match the layout of `a` as closely
+ as possible.
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of `a`, otherwise it will be a base-class array. Defaults
+ to True.
+ shape : int or sequence of ints, optional.
+ Overrides the shape of the result. If order='K' and the number of
+ dimensions is unchanged, will try to keep order, otherwise,
+ order='C' is implied.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ out : ndarray
+ Array of `fill_value` with the same shape and type as `a`.
+
+ See Also
+ --------
+ empty_like : Return an empty array with shape and type of input.
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full : Return a new array of given shape filled with value.
+
+ Examples
+ --------
+ >>> x = np.arange(6, dtype=int)
+ >>> np.full_like(x, 1)
+ array([1, 1, 1, 1, 1, 1])
+ >>> np.full_like(x, 0.1)
+ array([0, 0, 0, 0, 0, 0])
+ >>> np.full_like(x, 0.1, dtype=np.double)
+ array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
+ >>> np.full_like(x, np.nan, dtype=np.double)
+ array([nan, nan, nan, nan, nan, nan])
+
+ >>> y = np.arange(6, dtype=np.double)
+ >>> np.full_like(y, 0.1)
+ array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
+
+ >>> y = np.zeros([2, 2, 3], dtype=int)
+ >>> np.full_like(y, [0, 0, 255])
+ array([[[ 0, 0, 255],
+ [ 0, 0, 255]],
+ [[ 0, 0, 255],
+ [ 0, 0, 255]]])
+ """
+ res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
+ multiarray.copyto(res, fill_value, casting='unsafe')
+ return res
+
+
+def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None):
+ return (a,)
+
+
+@array_function_dispatch(_count_nonzero_dispatcher)
+def count_nonzero(a, axis=None, *, keepdims=False):
+ """
+ Counts the number of non-zero values in the array ``a``.
+
+ The word "non-zero" is in reference to the Python 2.x
+ built-in method ``__nonzero__()`` (renamed ``__bool__()``
+ in Python 3.x) of Python objects that tests an object's
+ "truthfulness". For example, any number is considered
+ truthful if it is nonzero, whereas any string is considered
+ truthful if it is not the empty string. Thus, this function
+ (recursively) counts how many elements in ``a`` (and in
+ sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()``
+ method evaluated to ``True``.
+
+ Parameters
+ ----------
+ a : array_like
+ The array for which to count non-zeros.
+ axis : int or tuple, optional
+ Axis or tuple of axes along which to count non-zeros.
+ Default is None, meaning that non-zeros will be counted
+ along a flattened version of ``a``.
+
+ .. versionadded:: 1.12.0
+
+ keepdims : bool, optional
+ If this is set to True, the axes that are counted are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ .. versionadded:: 1.19.0
+
+ Returns
+ -------
+ count : int or array of int
+ Number of non-zero values in the array along a given axis.
+ Otherwise, the total number of non-zero values in the array
+ is returned.
+
+ See Also
+ --------
+ nonzero : Return the coordinates of all the non-zero values.
+
+ Examples
+ --------
+ >>> np.count_nonzero(np.eye(4))
+ 4
+ >>> a = np.array([[0, 1, 7, 0],
+ ... [3, 0, 2, 19]])
+ >>> np.count_nonzero(a)
+ 5
+ >>> np.count_nonzero(a, axis=0)
+ array([1, 1, 2, 1])
+ >>> np.count_nonzero(a, axis=1)
+ array([2, 3])
+ >>> np.count_nonzero(a, axis=1, keepdims=True)
+ array([[2],
+ [3]])
+ """
+ if axis is None and not keepdims:
+ return multiarray.count_nonzero(a)
+
+ a = asanyarray(a)
+
+ # TODO: this works around .astype(bool) not working properly (gh-9847)
+ if np.issubdtype(a.dtype, np.character):
+ a_bool = a != a.dtype.type()
+ else:
+ a_bool = a.astype(np.bool_, copy=False)
+
+ return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims)
+
+
+@set_module('numpy')
+def isfortran(a):
+ """
+ Check if the array is Fortran contiguous but *not* C contiguous.
+
+ This function is obsolete and, because of changes due to relaxed stride
+ checking, its return value for the same array may differ for versions
+ of NumPy >= 1.10.0 and previous versions. If you only want to check if an
+ array is Fortran contiguous use ``a.flags.f_contiguous`` instead.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array.
+
+ Returns
+ -------
+ isfortran : bool
+ Returns True if the array is Fortran contiguous but *not* C contiguous.
+
+
+ Examples
+ --------
+
+ np.array allows to specify whether the array is written in C-contiguous
+ order (last index varies the fastest), or FORTRAN-contiguous order in
+ memory (first index varies the fastest).
+
+ >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
+ >>> a
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> np.isfortran(a)
+ False
+
+ >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F')
+ >>> b
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> np.isfortran(b)
+ True
+
+
+ The transpose of a C-ordered array is a FORTRAN-ordered array.
+
+ >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
+ >>> a
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> np.isfortran(a)
+ False
+ >>> b = a.T
+ >>> b
+ array([[1, 4],
+ [2, 5],
+ [3, 6]])
+ >>> np.isfortran(b)
+ True
+
+ C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.
+
+ >>> np.isfortran(np.array([1, 2], order='F'))
+ False
+
+ """
+ return a.flags.fnc
+
+
+def _argwhere_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_argwhere_dispatcher)
+def argwhere(a):
+ """
+ Find the indices of array elements that are non-zero, grouped by element.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+
+ Returns
+ -------
+ index_array : (N, a.ndim) ndarray
+ Indices of elements that are non-zero. Indices are grouped by element.
+ This array will have shape ``(N, a.ndim)`` where ``N`` is the number of
+ non-zero items.
+
+ See Also
+ --------
+ where, nonzero
+
+ Notes
+ -----
+ ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``,
+ but produces a result of the correct shape for a 0D array.
+
+ The output of ``argwhere`` is not suitable for indexing arrays.
+ For this purpose use ``nonzero(a)`` instead.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2,3)
+ >>> x
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.argwhere(x>1)
+ array([[0, 2],
+ [1, 0],
+ [1, 1],
+ [1, 2]])
+
+ """
+ # nonzero does not behave well on 0d, so promote to 1d
+ if np.ndim(a) == 0:
+ a = shape_base.atleast_1d(a)
+ # then remove the added dimension
+ return argwhere(a)[:,:0]
+ return transpose(nonzero(a))
+
+
+def _flatnonzero_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_flatnonzero_dispatcher)
+def flatnonzero(a):
+ """
+ Return indices that are non-zero in the flattened version of a.
+
+ This is equivalent to ``np.nonzero(np.ravel(a))[0]``.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+
+ Returns
+ -------
+ res : ndarray
+ Output array, containing the indices of the elements of ``a.ravel()``
+ that are non-zero.
+
+ See Also
+ --------
+ nonzero : Return the indices of the non-zero elements of the input array.
+ ravel : Return a 1-D array containing the elements of the input array.
+
+ Examples
+ --------
+ >>> x = np.arange(-2, 3)
+ >>> x
+ array([-2, -1, 0, 1, 2])
+ >>> np.flatnonzero(x)
+ array([0, 1, 3, 4])
+
+ Use the indices of the non-zero elements as an index array to extract
+ these elements:
+
+ >>> x.ravel()[np.flatnonzero(x)]
+ array([-2, -1, 1, 2])
+
+ """
+ return np.nonzero(np.ravel(a))[0]
+
+
+def _correlate_dispatcher(a, v, mode=None):
+ return (a, v)
+
+
+@array_function_dispatch(_correlate_dispatcher)
+def correlate(a, v, mode='valid'):
+ r"""
+ Cross-correlation of two 1-dimensional sequences.
+
+ This function computes the correlation as generally defined in signal
+ processing texts:
+
+ .. math:: c_k = \sum_n a_{n+k} \cdot \overline{v}_n
+
+ with a and v sequences being zero-padded where necessary and
+ :math:`\overline x` denoting complex conjugation.
+
+ Parameters
+ ----------
+ a, v : array_like
+ Input sequences.
+ mode : {'valid', 'same', 'full'}, optional
+ Refer to the `convolve` docstring. Note that the default
+ is 'valid', unlike `convolve`, which uses 'full'.
+ old_behavior : bool
+ `old_behavior` was removed in NumPy 1.10. If you need the old
+ behavior, use `multiarray.correlate`.
+
+ Returns
+ -------
+ out : ndarray
+ Discrete cross-correlation of `a` and `v`.
+
+ See Also
+ --------
+ convolve : Discrete, linear convolution of two one-dimensional sequences.
+ multiarray.correlate : Old, no conjugate, version of correlate.
+ scipy.signal.correlate : uses FFT which has superior performance on large arrays.
+
+ Notes
+ -----
+ The definition of correlation above is not unique and sometimes correlation
+ may be defined differently. Another common definition is:
+
+ .. math:: c'_k = \sum_n a_{n} \cdot \overline{v_{n+k}}
+
+ which is related to :math:`c_k` by :math:`c'_k = c_{-k}`.
+
+ `numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does
+ not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might
+ be preferable.
+
+
+ Examples
+ --------
+ >>> np.correlate([1, 2, 3], [0, 1, 0.5])
+ array([3.5])
+ >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
+ array([2. , 3.5, 3. ])
+ >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
+ array([0.5, 2. , 3.5, 3. , 0. ])
+
+ Using complex sequences:
+
+ >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')
+ array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ])
+
+ Note that you get the time reversed, complex conjugated result
+ (:math:`\overline{c_{-k}}`) when the two input sequences a and v change
+ places:
+
+ >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')
+ array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j])
+
+ """
+ return multiarray.correlate2(a, v, mode)
+
+
+def _convolve_dispatcher(a, v, mode=None):
+ return (a, v)
+
+
+@array_function_dispatch(_convolve_dispatcher)
+def convolve(a, v, mode='full'):
+ """
+ Returns the discrete, linear convolution of two one-dimensional sequences.
+
+ The convolution operator is often seen in signal processing, where it
+ models the effect of a linear time-invariant system on a signal [1]_. In
+ probability theory, the sum of two independent random variables is
+ distributed according to the convolution of their individual
+ distributions.
+
+ If `v` is longer than `a`, the arrays are swapped before computation.
+
+ Parameters
+ ----------
+ a : (N,) array_like
+ First one-dimensional input array.
+ v : (M,) array_like
+ Second one-dimensional input array.
+ mode : {'full', 'valid', 'same'}, optional
+ 'full':
+ By default, mode is 'full'. This returns the convolution
+ at each point of overlap, with an output shape of (N+M-1,). At
+ the end-points of the convolution, the signals do not overlap
+ completely, and boundary effects may be seen.
+
+ 'same':
+ Mode 'same' returns output of length ``max(M, N)``. Boundary
+ effects are still visible.
+
+ 'valid':
+ Mode 'valid' returns output of length
+ ``max(M, N) - min(M, N) + 1``. The convolution product is only given
+ for points where the signals overlap completely. Values outside
+ the signal boundary have no effect.
+
+ Returns
+ -------
+ out : ndarray
+ Discrete, linear convolution of `a` and `v`.
+
+ See Also
+ --------
+ scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
+ Transform.
+ scipy.linalg.toeplitz : Used to construct the convolution operator.
+ polymul : Polynomial multiplication. Same output as convolve, but also
+ accepts poly1d objects as input.
+
+ Notes
+ -----
+ The discrete convolution operation is defined as
+
+ .. math:: (a * v)_n = \\sum_{m = -\\infty}^{\\infty} a_m v_{n - m}
+
+ It can be shown that a convolution :math:`x(t) * y(t)` in time/space
+ is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
+ domain, after appropriate padding (padding is necessary to prevent
+ circular convolution). Since multiplication is more efficient (faster)
+ than convolution, the function `scipy.signal.fftconvolve` exploits the
+ FFT to calculate the convolution of large data-sets.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Convolution",
+ https://en.wikipedia.org/wiki/Convolution
+
+ Examples
+ --------
+ Note how the convolution operator flips the second array
+ before "sliding" the two across one another:
+
+ >>> np.convolve([1, 2, 3], [0, 1, 0.5])
+ array([0. , 1. , 2.5, 4. , 1.5])
+
+ Only return the middle values of the convolution.
+ Contains boundary effects, where zeros are taken
+ into account:
+
+ >>> np.convolve([1,2,3],[0,1,0.5], 'same')
+ array([1. , 2.5, 4. ])
+
+ The two arrays are of the same length, so there
+ is only one position where they completely overlap:
+
+ >>> np.convolve([1,2,3],[0,1,0.5], 'valid')
+ array([2.5])
+
+ """
+ a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1)
+ if (len(v) > len(a)):
+ a, v = v, a
+ if len(a) == 0:
+ raise ValueError('a cannot be empty')
+ if len(v) == 0:
+ raise ValueError('v cannot be empty')
+ return multiarray.correlate(a, v[::-1], mode)
+
+
+def _outer_dispatcher(a, b, out=None):
+ return (a, b, out)
+
+
+@array_function_dispatch(_outer_dispatcher)
+def outer(a, b, out=None):
+ """
+ Compute the outer product of two vectors.
+
+ Given two vectors, ``a = [a0, a1, ..., aM]`` and
+ ``b = [b0, b1, ..., bN]``,
+ the outer product [1]_ is::
+
+ [[a0*b0 a0*b1 ... a0*bN ]
+ [a1*b0 .
+ [ ... .
+ [aM*b0 aM*bN ]]
+
+ Parameters
+ ----------
+ a : (M,) array_like
+ First input vector. Input is flattened if
+ not already 1-dimensional.
+ b : (N,) array_like
+ Second input vector. Input is flattened if
+ not already 1-dimensional.
+ out : (M, N) ndarray, optional
+ A location where the result is stored
+
+ .. versionadded:: 1.9.0
+
+ Returns
+ -------
+ out : (M, N) ndarray
+ ``out[i, j] = a[i] * b[j]``
+
+ See also
+ --------
+ inner
+ einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
+ ufunc.outer : A generalization to dimensions other than 1D and other
+ operations. ``np.multiply.outer(a.ravel(), b.ravel())``
+ is the equivalent.
+ tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))``
+ is the equivalent.
+
+ References
+ ----------
+ .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
+ ed., Baltimore, MD, Johns Hopkins University Press, 1996,
+ pg. 8.
+
+ Examples
+ --------
+ Make a (*very* coarse) grid for computing a Mandelbrot set:
+
+ >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
+ >>> rl
+ array([[-2., -1., 0., 1., 2.],
+ [-2., -1., 0., 1., 2.],
+ [-2., -1., 0., 1., 2.],
+ [-2., -1., 0., 1., 2.],
+ [-2., -1., 0., 1., 2.]])
+ >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
+ >>> im
+ array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
+ [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
+ [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
+ [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
+ [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
+ >>> grid = rl + im
+ >>> grid
+ array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
+ [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
+ [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
+ [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
+ [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
+
+ An example using a "vector" of letters:
+
+ >>> x = np.array(['a', 'b', 'c'], dtype=object)
+ >>> np.outer(x, [1, 2, 3])
+ array([['a', 'aa', 'aaa'],
+ ['b', 'bb', 'bbb'],
+ ['c', 'cc', 'ccc']], dtype=object)
+
+ """
+ a = asarray(a)
+ b = asarray(b)
+ return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out)
+
+
+def _tensordot_dispatcher(a, b, axes=None):
+ return (a, b)
+
+
+@array_function_dispatch(_tensordot_dispatcher)
+def tensordot(a, b, axes=2):
+ """
+ Compute tensor dot product along specified axes.
+
+ Given two tensors, `a` and `b`, and an array_like object containing
+ two array_like objects, ``(a_axes, b_axes)``, sum the products of
+ `a`'s and `b`'s elements (components) over the axes specified by
+ ``a_axes`` and ``b_axes``. The third argument can be a single non-negative
+ integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions
+ of `a` and the first ``N`` dimensions of `b` are summed over.
+
+ Parameters
+ ----------
+ a, b : array_like
+ Tensors to "dot".
+
+ axes : int or (2,) array_like
+ * integer_like
+ If an int N, sum over the last N axes of `a` and the first N axes
+ of `b` in order. The sizes of the corresponding axes must match.
+ * (2,) array_like
+ Or, a list of axes to be summed over, first sequence applying to `a`,
+ second to `b`. Both elements array_like must be of the same length.
+
+ Returns
+ -------
+ output : ndarray
+ The tensor dot product of the input.
+
+ See Also
+ --------
+ dot, einsum
+
+ Notes
+ -----
+ Three common use cases are:
+ * ``axes = 0`` : tensor product :math:`a\\otimes b`
+ * ``axes = 1`` : tensor dot product :math:`a\\cdot b`
+ * ``axes = 2`` : (default) tensor double contraction :math:`a:b`
+
+ When `axes` is integer_like, the sequence for evaluation will be: first
+ the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
+ Nth axis in `b` last.
+
+ When there is more than one axis to sum over - and they are not the last
+ (first) axes of `a` (`b`) - the argument `axes` should consist of
+ two sequences of the same length, with the first axis to sum over given
+ first in both sequences, the second axis second, and so forth.
+
+ The shape of the result consists of the non-contracted axes of the
+ first tensor, followed by the non-contracted axes of the second.
+
+ Examples
+ --------
+ A "traditional" example:
+
+ >>> a = np.arange(60.).reshape(3,4,5)
+ >>> b = np.arange(24.).reshape(4,3,2)
+ >>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
+ >>> c.shape
+ (5, 2)
+ >>> c
+ array([[4400., 4730.],
+ [4532., 4874.],
+ [4664., 5018.],
+ [4796., 5162.],
+ [4928., 5306.]])
+ >>> # A slower but equivalent way of computing the same...
+ >>> d = np.zeros((5,2))
+ >>> for i in range(5):
+ ... for j in range(2):
+ ... for k in range(3):
+ ... for n in range(4):
+ ... d[i,j] += a[k,n,i] * b[n,k,j]
+ >>> c == d
+ array([[ True, True],
+ [ True, True],
+ [ True, True],
+ [ True, True],
+ [ True, True]])
+
+ An extended example taking advantage of the overloading of + and \\*:
+
+ >>> a = np.array(range(1, 9))
+ >>> a.shape = (2, 2, 2)
+ >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
+ >>> A.shape = (2, 2)
+ >>> a; A
+ array([[[1, 2],
+ [3, 4]],
+ [[5, 6],
+ [7, 8]]])
+ array([['a', 'b'],
+ ['c', 'd']], dtype=object)
+
+ >>> np.tensordot(a, A) # third argument default is 2 for double-contraction
+ array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object)
+
+ >>> np.tensordot(a, A, 1)
+ array([[['acc', 'bdd'],
+ ['aaacccc', 'bbbdddd']],
+ [['aaaaacccccc', 'bbbbbdddddd'],
+ ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object)
+
+ >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.)
+ array([[[[['a', 'b'],
+ ['c', 'd']],
+ ...
+
+ >>> np.tensordot(a, A, (0, 1))
+ array([[['abbbbb', 'cddddd'],
+ ['aabbbbbb', 'ccdddddd']],
+ [['aaabbbbbbb', 'cccddddddd'],
+ ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object)
+
+ >>> np.tensordot(a, A, (2, 1))
+ array([[['abb', 'cdd'],
+ ['aaabbbb', 'cccdddd']],
+ [['aaaaabbbbbb', 'cccccdddddd'],
+ ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object)
+
+ >>> np.tensordot(a, A, ((0, 1), (0, 1)))
+ array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object)
+
+ >>> np.tensordot(a, A, ((2, 1), (1, 0)))
+ array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object)
+
+ """
+ try:
+ iter(axes)
+ except Exception:
+ axes_a = list(range(-axes, 0))
+ axes_b = list(range(0, axes))
+ else:
+ axes_a, axes_b = axes
+ try:
+ na = len(axes_a)
+ axes_a = list(axes_a)
+ except TypeError:
+ axes_a = [axes_a]
+ na = 1
+ try:
+ nb = len(axes_b)
+ axes_b = list(axes_b)
+ except TypeError:
+ axes_b = [axes_b]
+ nb = 1
+
+ a, b = asarray(a), asarray(b)
+ as_ = a.shape
+ nda = a.ndim
+ bs = b.shape
+ ndb = b.ndim
+ equal = True
+ if na != nb:
+ equal = False
+ else:
+ for k in range(na):
+ if as_[axes_a[k]] != bs[axes_b[k]]:
+ equal = False
+ break
+ if axes_a[k] < 0:
+ axes_a[k] += nda
+ if axes_b[k] < 0:
+ axes_b[k] += ndb
+ if not equal:
+ raise ValueError("shape-mismatch for sum")
+
+ # Move the axes to sum over to the end of "a"
+ # and to the front of "b"
+ notin = [k for k in range(nda) if k not in axes_a]
+ newaxes_a = notin + axes_a
+ N2 = 1
+ for axis in axes_a:
+ N2 *= as_[axis]
+ newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2)
+ olda = [as_[axis] for axis in notin]
+
+ notin = [k for k in range(ndb) if k not in axes_b]
+ newaxes_b = axes_b + notin
+ N2 = 1
+ for axis in axes_b:
+ N2 *= bs[axis]
+ newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin])))
+ oldb = [bs[axis] for axis in notin]
+
+ at = a.transpose(newaxes_a).reshape(newshape_a)
+ bt = b.transpose(newaxes_b).reshape(newshape_b)
+ res = dot(at, bt)
+ return res.reshape(olda + oldb)
+
+
+def _roll_dispatcher(a, shift, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_roll_dispatcher)
+def roll(a, shift, axis=None):
+ """
+ Roll array elements along a given axis.
+
+ Elements that roll beyond the last position are re-introduced at
+ the first.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ shift : int or tuple of ints
+ The number of places by which elements are shifted. If a tuple,
+ then `axis` must be a tuple of the same size, and each of the
+ given axes is shifted by the corresponding number. If an int
+ while `axis` is a tuple of ints, then the same value is used for
+ all given axes.
+ axis : int or tuple of ints, optional
+ Axis or axes along which elements are shifted. By default, the
+ array is flattened before shifting, after which the original
+ shape is restored.
+
+ Returns
+ -------
+ res : ndarray
+ Output array, with the same shape as `a`.
+
+ See Also
+ --------
+ rollaxis : Roll the specified axis backwards, until it lies in a
+ given position.
+
+ Notes
+ -----
+ .. versionadded:: 1.12.0
+
+ Supports rolling over multiple dimensions simultaneously.
+
+ Examples
+ --------
+ >>> x = np.arange(10)
+ >>> np.roll(x, 2)
+ array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
+ >>> np.roll(x, -2)
+ array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
+
+ >>> x2 = np.reshape(x, (2, 5))
+ >>> x2
+ array([[0, 1, 2, 3, 4],
+ [5, 6, 7, 8, 9]])
+ >>> np.roll(x2, 1)
+ array([[9, 0, 1, 2, 3],
+ [4, 5, 6, 7, 8]])
+ >>> np.roll(x2, -1)
+ array([[1, 2, 3, 4, 5],
+ [6, 7, 8, 9, 0]])
+ >>> np.roll(x2, 1, axis=0)
+ array([[5, 6, 7, 8, 9],
+ [0, 1, 2, 3, 4]])
+ >>> np.roll(x2, -1, axis=0)
+ array([[5, 6, 7, 8, 9],
+ [0, 1, 2, 3, 4]])
+ >>> np.roll(x2, 1, axis=1)
+ array([[4, 0, 1, 2, 3],
+ [9, 5, 6, 7, 8]])
+ >>> np.roll(x2, -1, axis=1)
+ array([[1, 2, 3, 4, 0],
+ [6, 7, 8, 9, 5]])
+ >>> np.roll(x2, (1, 1), axis=(1, 0))
+ array([[9, 5, 6, 7, 8],
+ [4, 0, 1, 2, 3]])
+ >>> np.roll(x2, (2, 1), axis=(1, 0))
+ array([[8, 9, 5, 6, 7],
+ [3, 4, 0, 1, 2]])
+
+ """
+ a = asanyarray(a)
+ if axis is None:
+ return roll(a.ravel(), shift, 0).reshape(a.shape)
+
+ else:
+ axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
+ broadcasted = broadcast(shift, axis)
+ if broadcasted.ndim > 1:
+ raise ValueError(
+ "'shift' and 'axis' should be scalars or 1D sequences")
+ shifts = {ax: 0 for ax in range(a.ndim)}
+ for sh, ax in broadcasted:
+ shifts[ax] += sh
+
+ rolls = [((slice(None), slice(None)),)] * a.ndim
+ for ax, offset in shifts.items():
+ offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters.
+ if offset:
+ # (original, result), (original, result)
+ rolls[ax] = ((slice(None, -offset), slice(offset, None)),
+ (slice(-offset, None), slice(None, offset)))
+
+ result = empty_like(a)
+ for indices in itertools.product(*rolls):
+ arr_index, res_index = zip(*indices)
+ result[res_index] = a[arr_index]
+
+ return result
+
+
+def _rollaxis_dispatcher(a, axis, start=None):
+ return (a,)
+
+
+@array_function_dispatch(_rollaxis_dispatcher)
+def rollaxis(a, axis, start=0):
+ """
+ Roll the specified axis backwards, until it lies in a given position.
+
+ This function continues to be supported for backward compatibility, but you
+ should prefer `moveaxis`. The `moveaxis` function was added in NumPy
+ 1.11.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array.
+ axis : int
+ The axis to be rolled. The positions of the other axes do not
+ change relative to one another.
+ start : int, optional
+ When ``start <= axis``, the axis is rolled back until it lies in
+ this position. When ``start > axis``, the axis is rolled until it
+ lies before this position. The default, 0, results in a "complete"
+ roll. The following table describes how negative values of ``start``
+ are interpreted:
+
+ .. table::
+ :align: left
+
+ +-------------------+----------------------+
+ | ``start`` | Normalized ``start`` |
+ +===================+======================+
+ | ``-(arr.ndim+1)`` | raise ``AxisError`` |
+ +-------------------+----------------------+
+ | ``-arr.ndim`` | 0 |
+ +-------------------+----------------------+
+ | |vdots| | |vdots| |
+ +-------------------+----------------------+
+ | ``-1`` | ``arr.ndim-1`` |
+ +-------------------+----------------------+
+ | ``0`` | ``0`` |
+ +-------------------+----------------------+
+ | |vdots| | |vdots| |
+ +-------------------+----------------------+
+ | ``arr.ndim`` | ``arr.ndim`` |
+ +-------------------+----------------------+
+ | ``arr.ndim + 1`` | raise ``AxisError`` |
+ +-------------------+----------------------+
+
+ .. |vdots| unicode:: U+22EE .. Vertical Ellipsis
+
+ Returns
+ -------
+ res : ndarray
+ For NumPy >= 1.10.0 a view of `a` is always returned. For earlier
+ NumPy versions a view of `a` is returned only if the order of the
+ axes is changed, otherwise the input array is returned.
+
+ See Also
+ --------
+ moveaxis : Move array axes to new positions.
+ roll : Roll the elements of an array by a number of positions along a
+ given axis.
+
+ Examples
+ --------
+ >>> a = np.ones((3,4,5,6))
+ >>> np.rollaxis(a, 3, 1).shape
+ (3, 6, 4, 5)
+ >>> np.rollaxis(a, 2).shape
+ (5, 3, 4, 6)
+ >>> np.rollaxis(a, 1, 4).shape
+ (3, 5, 6, 4)
+
+ """
+ n = a.ndim
+ axis = normalize_axis_index(axis, n)
+ if start < 0:
+ start += n
+ msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
+ if not (0 <= start < n + 1):
+ raise AxisError(msg % ('start', -n, 'start', n + 1, start))
+ if axis < start:
+ # it's been removed
+ start -= 1
+ if axis == start:
+ return a[...]
+ axes = list(range(0, n))
+ axes.remove(axis)
+ axes.insert(start, axis)
+ return a.transpose(axes)
+
+
+def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
+ """
+ Normalizes an axis argument into a tuple of non-negative integer axes.
+
+ This handles shorthands such as ``1`` and converts them to ``(1,)``,
+ as well as performing the handling of negative indices covered by
+ `normalize_axis_index`.
+
+ By default, this forbids axes from being specified multiple times.
+
+ Used internally by multi-axis-checking logic.
+
+ .. versionadded:: 1.13.0
+
+ Parameters
+ ----------
+ axis : int, iterable of int
+ The un-normalized index or indices of the axis.
+ ndim : int
+ The number of dimensions of the array that `axis` should be normalized
+ against.
+ argname : str, optional
+ A prefix to put before the error message, typically the name of the
+ argument.
+ allow_duplicate : bool, optional
+ If False, the default, disallow an axis from being specified twice.
+
+ Returns
+ -------
+ normalized_axes : tuple of int
+ The normalized axis index, such that `0 <= normalized_axis < ndim`
+
+ Raises
+ ------
+ AxisError
+ If any axis provided is out of range
+ ValueError
+ If an axis is repeated
+
+ See also
+ --------
+ normalize_axis_index : normalizing a single scalar axis
+ """
+ # Optimization to speed-up the most common cases.
+ if type(axis) not in (tuple, list):
+ try:
+ axis = [operator.index(axis)]
+ except TypeError:
+ pass
+ # Going via an iterator directly is slower than via list comprehension.
+ axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
+ if not allow_duplicate and len(set(axis)) != len(axis):
+ if argname:
+ raise ValueError('repeated axis in `{}` argument'.format(argname))
+ else:
+ raise ValueError('repeated axis')
+ return axis
+
+
+def _moveaxis_dispatcher(a, source, destination):
+ return (a,)
+
+
+@array_function_dispatch(_moveaxis_dispatcher)
+def moveaxis(a, source, destination):
+ """
+ Move axes of an array to new positions.
+
+ Other axes remain in their original order.
+
+ .. versionadded:: 1.11.0
+
+ Parameters
+ ----------
+ a : np.ndarray
+ The array whose axes should be reordered.
+ source : int or sequence of int
+ Original positions of the axes to move. These must be unique.
+ destination : int or sequence of int
+ Destination positions for each of the original axes. These must also be
+ unique.
+
+ Returns
+ -------
+ result : np.ndarray
+ Array with moved axes. This array is a view of the input array.
+
+ See Also
+ --------
+ transpose : Permute the dimensions of an array.
+ swapaxes : Interchange two axes of an array.
+
+ Examples
+ --------
+ >>> x = np.zeros((3, 4, 5))
+ >>> np.moveaxis(x, 0, -1).shape
+ (4, 5, 3)
+ >>> np.moveaxis(x, -1, 0).shape
+ (5, 3, 4)
+
+ These all achieve the same result:
+
+ >>> np.transpose(x).shape
+ (5, 4, 3)
+ >>> np.swapaxes(x, 0, -1).shape
+ (5, 4, 3)
+ >>> np.moveaxis(x, [0, 1], [-1, -2]).shape
+ (5, 4, 3)
+ >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
+ (5, 4, 3)
+
+ """
+ try:
+ # allow duck-array types if they define transpose
+ transpose = a.transpose
+ except AttributeError:
+ a = asarray(a)
+ transpose = a.transpose
+
+ source = normalize_axis_tuple(source, a.ndim, 'source')
+ destination = normalize_axis_tuple(destination, a.ndim, 'destination')
+ if len(source) != len(destination):
+ raise ValueError('`source` and `destination` arguments must have '
+ 'the same number of elements')
+
+ order = [n for n in range(a.ndim) if n not in source]
+
+ for dest, src in sorted(zip(destination, source)):
+ order.insert(dest, src)
+
+ result = transpose(order)
+ return result
+
+
+def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None):
+ return (a, b)
+
+
+@array_function_dispatch(_cross_dispatcher)
+def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
+ """
+ Return the cross product of two (arrays of) vectors.
+
+ The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
+ to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
+ are defined by the last axis of `a` and `b` by default, and these axes
+ can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
+ 2, the third component of the input vector is assumed to be zero and the
+ cross product calculated accordingly. In cases where both input vectors
+ have dimension 2, the z-component of the cross product is returned.
+
+ Parameters
+ ----------
+ a : array_like
+ Components of the first vector(s).
+ b : array_like
+ Components of the second vector(s).
+ axisa : int, optional
+ Axis of `a` that defines the vector(s). By default, the last axis.
+ axisb : int, optional
+ Axis of `b` that defines the vector(s). By default, the last axis.
+ axisc : int, optional
+ Axis of `c` containing the cross product vector(s). Ignored if
+ both input vectors have dimension 2, as the return is scalar.
+ By default, the last axis.
+ axis : int, optional
+ If defined, the axis of `a`, `b` and `c` that defines the vector(s)
+ and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
+
+ Returns
+ -------
+ c : ndarray
+ Vector cross product(s).
+
+ Raises
+ ------
+ ValueError
+ When the dimension of the vector(s) in `a` and/or `b` does not
+ equal 2 or 3.
+
+ See Also
+ --------
+ inner : Inner product
+ outer : Outer product.
+ ix_ : Construct index arrays.
+
+ Notes
+ -----
+ .. versionadded:: 1.9.0
+
+ Supports full broadcasting of the inputs.
+
+ Examples
+ --------
+ Vector cross-product.
+
+ >>> x = [1, 2, 3]
+ >>> y = [4, 5, 6]
+ >>> np.cross(x, y)
+ array([-3, 6, -3])
+
+ One vector with dimension 2.
+
+ >>> x = [1, 2]
+ >>> y = [4, 5, 6]
+ >>> np.cross(x, y)
+ array([12, -6, -3])
+
+ Equivalently:
+
+ >>> x = [1, 2, 0]
+ >>> y = [4, 5, 6]
+ >>> np.cross(x, y)
+ array([12, -6, -3])
+
+ Both vectors with dimension 2.
+
+ >>> x = [1,2]
+ >>> y = [4,5]
+ >>> np.cross(x, y)
+ array(-3)
+
+ Multiple vector cross-products. Note that the direction of the cross
+ product vector is defined by the *right-hand rule*.
+
+ >>> x = np.array([[1,2,3], [4,5,6]])
+ >>> y = np.array([[4,5,6], [1,2,3]])
+ >>> np.cross(x, y)
+ array([[-3, 6, -3],
+ [ 3, -6, 3]])
+
+ The orientation of `c` can be changed using the `axisc` keyword.
+
+ >>> np.cross(x, y, axisc=0)
+ array([[-3, 3],
+ [ 6, -6],
+ [-3, 3]])
+
+ Change the vector definition of `x` and `y` using `axisa` and `axisb`.
+
+ >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
+ >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
+ >>> np.cross(x, y)
+ array([[ -6, 12, -6],
+ [ 0, 0, 0],
+ [ 6, -12, 6]])
+ >>> np.cross(x, y, axisa=0, axisb=0)
+ array([[-24, 48, -24],
+ [-30, 60, -30],
+ [-36, 72, -36]])
+
+ """
+ if axis is not None:
+ axisa, axisb, axisc = (axis,) * 3
+ a = asarray(a)
+ b = asarray(b)
+ # Check axisa and axisb are within bounds
+ axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa')
+ axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
+
+ # Move working axis to the end of the shape
+ a = moveaxis(a, axisa, -1)
+ b = moveaxis(b, axisb, -1)
+ msg = ("incompatible dimensions for cross product\n"
+ "(dimension must be 2 or 3)")
+ if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
+ raise ValueError(msg)
+
+ # Create the output array
+ shape = broadcast(a[..., 0], b[..., 0]).shape
+ if a.shape[-1] == 3 or b.shape[-1] == 3:
+ shape += (3,)
+ # Check axisc is within bounds
+ axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc')
+ dtype = promote_types(a.dtype, b.dtype)
+ cp = empty(shape, dtype)
+
+ # recast arrays as dtype
+ a = a.astype(dtype)
+ b = b.astype(dtype)
+
+ # create local aliases for readability
+ a0 = a[..., 0]
+ a1 = a[..., 1]
+ if a.shape[-1] == 3:
+ a2 = a[..., 2]
+ b0 = b[..., 0]
+ b1 = b[..., 1]
+ if b.shape[-1] == 3:
+ b2 = b[..., 2]
+ if cp.ndim != 0 and cp.shape[-1] == 3:
+ cp0 = cp[..., 0]
+ cp1 = cp[..., 1]
+ cp2 = cp[..., 2]
+
+ if a.shape[-1] == 2:
+ if b.shape[-1] == 2:
+ # a0 * b1 - a1 * b0
+ multiply(a0, b1, out=cp)
+ cp -= a1 * b0
+ return cp
+ else:
+ assert b.shape[-1] == 3
+ # cp0 = a1 * b2 - 0 (a2 = 0)
+ # cp1 = 0 - a0 * b2 (a2 = 0)
+ # cp2 = a0 * b1 - a1 * b0
+ multiply(a1, b2, out=cp0)
+ multiply(a0, b2, out=cp1)
+ negative(cp1, out=cp1)
+ multiply(a0, b1, out=cp2)
+ cp2 -= a1 * b0
+ else:
+ assert a.shape[-1] == 3
+ if b.shape[-1] == 3:
+ # cp0 = a1 * b2 - a2 * b1
+ # cp1 = a2 * b0 - a0 * b2
+ # cp2 = a0 * b1 - a1 * b0
+ multiply(a1, b2, out=cp0)
+ tmp = array(a2 * b1)
+ cp0 -= tmp
+ multiply(a2, b0, out=cp1)
+ multiply(a0, b2, out=tmp)
+ cp1 -= tmp
+ multiply(a0, b1, out=cp2)
+ multiply(a1, b0, out=tmp)
+ cp2 -= tmp
+ else:
+ assert b.shape[-1] == 2
+ # cp0 = 0 - a2 * b1 (b2 = 0)
+ # cp1 = a2 * b0 - 0 (b2 = 0)
+ # cp2 = a0 * b1 - a1 * b0
+ multiply(a2, b1, out=cp0)
+ negative(cp0, out=cp0)
+ multiply(a2, b0, out=cp1)
+ multiply(a0, b1, out=cp2)
+ cp2 -= a1 * b0
+
+ return moveaxis(cp, -1, axisc)
+
+
+little_endian = (sys.byteorder == 'little')
+
+
+@set_module('numpy')
+def indices(dimensions, dtype=int, sparse=False):
+ """
+ Return an array representing the indices of a grid.
+
+ Compute an array where the subarrays contain index values 0, 1, ...
+ varying only along the corresponding axis.
+
+ Parameters
+ ----------
+ dimensions : sequence of ints
+ The shape of the grid.
+ dtype : dtype, optional
+ Data type of the result.
+ sparse : boolean, optional
+ Return a sparse representation of the grid instead of a dense
+ representation. Default is False.
+
+ .. versionadded:: 1.17
+
+ Returns
+ -------
+ grid : one ndarray or tuple of ndarrays
+ If sparse is False:
+ Returns one array of grid indices,
+ ``grid.shape = (len(dimensions),) + tuple(dimensions)``.
+ If sparse is True:
+ Returns a tuple of arrays, with
+ ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
+ dimensions[i] in the ith place
+
+ See Also
+ --------
+ mgrid, ogrid, meshgrid
+
+ Notes
+ -----
+ The output shape in the dense case is obtained by prepending the number
+ of dimensions in front of the tuple of dimensions, i.e. if `dimensions`
+ is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is
+ ``(N, r0, ..., rN-1)``.
+
+ The subarrays ``grid[k]`` contains the N-D array of indices along the
+ ``k-th`` axis. Explicitly::
+
+ grid[k, i0, i1, ..., iN-1] = ik
+
+ Examples
+ --------
+ >>> grid = np.indices((2, 3))
+ >>> grid.shape
+ (2, 2, 3)
+ >>> grid[0] # row indices
+ array([[0, 0, 0],
+ [1, 1, 1]])
+ >>> grid[1] # column indices
+ array([[0, 1, 2],
+ [0, 1, 2]])
+
+ The indices can be used as an index into an array.
+
+ >>> x = np.arange(20).reshape(5, 4)
+ >>> row, col = np.indices((2, 3))
+ >>> x[row, col]
+ array([[0, 1, 2],
+ [4, 5, 6]])
+
+ Note that it would be more straightforward in the above example to
+ extract the required elements directly with ``x[:2, :3]``.
+
+ If sparse is set to true, the grid will be returned in a sparse
+ representation.
+
+ >>> i, j = np.indices((2, 3), sparse=True)
+ >>> i.shape
+ (2, 1)
+ >>> j.shape
+ (1, 3)
+ >>> i # row indices
+ array([[0],
+ [1]])
+ >>> j # column indices
+ array([[0, 1, 2]])
+
+ """
+ dimensions = tuple(dimensions)
+ N = len(dimensions)
+ shape = (1,)*N
+ if sparse:
+ res = tuple()
+ else:
+ res = empty((N,)+dimensions, dtype=dtype)
+ for i, dim in enumerate(dimensions):
+ idx = arange(dim, dtype=dtype).reshape(
+ shape[:i] + (dim,) + shape[i+1:]
+ )
+ if sparse:
+ res = res + (idx,)
+ else:
+ res[i] = idx
+ return res
+
+
+def _fromfunction_dispatcher(function, shape, *, dtype=None, like=None, **kwargs):
+ return (like,)
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def fromfunction(function, shape, *, dtype=float, like=None, **kwargs):
+ """
+ Construct an array by executing a function over each coordinate.
+
+ The resulting array therefore has a value ``fn(x, y, z)`` at
+ coordinate ``(x, y, z)``.
+
+ Parameters
+ ----------
+ function : callable
+ The function is called with N parameters, where N is the rank of
+ `shape`. Each parameter represents the coordinates of the array
+ varying along a specific axis. For example, if `shape`
+ were ``(2, 2)``, then the parameters would be
+ ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])``
+ shape : (N,) tuple of ints
+ Shape of the output array, which also determines the shape of
+ the coordinate arrays passed to `function`.
+ dtype : data-type, optional
+ Data-type of the coordinate arrays passed to `function`.
+ By default, `dtype` is float.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ fromfunction : any
+ The result of the call to `function` is passed back directly.
+ Therefore the shape of `fromfunction` is completely determined by
+ `function`. If `function` returns a scalar value, the shape of
+ `fromfunction` would not match the `shape` parameter.
+
+ See Also
+ --------
+ indices, meshgrid
+
+ Notes
+ -----
+ Keywords other than `dtype` and `like` are passed to `function`.
+
+ Examples
+ --------
+ >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float)
+ array([[0., 0.],
+ [1., 1.]])
+
+ >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float)
+ array([[0., 1.],
+ [0., 1.]])
+
+ >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
+ array([[ True, False, False],
+ [False, True, False],
+ [False, False, True]])
+
+ >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
+ array([[0, 1, 2],
+ [1, 2, 3],
+ [2, 3, 4]])
+
+ """
+ if like is not None:
+ return _fromfunction_with_like(function, shape, dtype=dtype, like=like, **kwargs)
+
+ args = indices(shape, dtype=dtype)
+ return function(*args, **kwargs)
+
+
+_fromfunction_with_like = array_function_dispatch(
+ _fromfunction_dispatcher, use_like=True
+)(fromfunction)
+
+
+def _frombuffer(buf, dtype, shape, order):
+ return frombuffer(buf, dtype=dtype).reshape(shape, order=order)
+
+
+@set_module('numpy')
+def isscalar(element):
+ """
+ Returns True if the type of `element` is a scalar type.
+
+ Parameters
+ ----------
+ element : any
+ Input argument, can be of any type and shape.
+
+ Returns
+ -------
+ val : bool
+ True if `element` is a scalar type, False if it is not.
+
+ See Also
+ --------
+ ndim : Get the number of dimensions of an array
+
+ Notes
+ -----
+ If you need a stricter way to identify a *numerical* scalar, use
+ ``isinstance(x, numbers.Number)``, as that returns ``False`` for most
+ non-numerical elements such as strings.
+
+ In most cases ``np.ndim(x) == 0`` should be used instead of this function,
+ as that will also return true for 0d arrays. This is how numpy overloads
+ functions in the style of the ``dx`` arguments to `gradient` and the ``bins``
+ argument to `histogram`. Some key differences:
+
+ +--------------------------------------+---------------+-------------------+
+ | x |``isscalar(x)``|``np.ndim(x) == 0``|
+ +======================================+===============+===================+
+ | PEP 3141 numeric objects (including | ``True`` | ``True`` |
+ | builtins) | | |
+ +--------------------------------------+---------------+-------------------+
+ | builtin string and buffer objects | ``True`` | ``True`` |
+ +--------------------------------------+---------------+-------------------+
+ | other builtin objects, like | ``False`` | ``True`` |
+ | `pathlib.Path`, `Exception`, | | |
+ | the result of `re.compile` | | |
+ +--------------------------------------+---------------+-------------------+
+ | third-party objects like | ``False`` | ``True`` |
+ | `matplotlib.figure.Figure` | | |
+ +--------------------------------------+---------------+-------------------+
+ | zero-dimensional numpy arrays | ``False`` | ``True`` |
+ +--------------------------------------+---------------+-------------------+
+ | other numpy arrays | ``False`` | ``False`` |
+ +--------------------------------------+---------------+-------------------+
+ | `list`, `tuple`, and other sequence | ``False`` | ``False`` |
+ | objects | | |
+ +--------------------------------------+---------------+-------------------+
+
+ Examples
+ --------
+ >>> np.isscalar(3.1)
+ True
+ >>> np.isscalar(np.array(3.1))
+ False
+ >>> np.isscalar([3.1])
+ False
+ >>> np.isscalar(False)
+ True
+ >>> np.isscalar('numpy')
+ True
+
+ NumPy supports PEP 3141 numbers:
+
+ >>> from fractions import Fraction
+ >>> np.isscalar(Fraction(5, 17))
+ True
+ >>> from numbers import Number
+ >>> np.isscalar(Number())
+ True
+
+ """
+ return (isinstance(element, generic)
+ or type(element) in ScalarType
+ or isinstance(element, numbers.Number))
+
+
+@set_module('numpy')
+def binary_repr(num, width=None):
+ """
+ Return the binary representation of the input number as a string.
+
+ For negative numbers, if width is not given, a minus sign is added to the
+ front. If width is given, the two's complement of the number is
+ returned, with respect to that width.
+
+ In a two's-complement system negative numbers are represented by the two's
+ complement of the absolute value. This is the most common method of
+ representing signed integers on computers [1]_. A N-bit two's-complement
+ system can represent every integer in the range
+ :math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
+
+ Parameters
+ ----------
+ num : int
+ Only an integer decimal number can be used.
+ width : int, optional
+ The length of the returned string if `num` is positive, or the length
+ of the two's complement if `num` is negative, provided that `width` is
+ at least a sufficient number of bits for `num` to be represented in the
+ designated form.
+
+ If the `width` value is insufficient, it will be ignored, and `num` will
+ be returned in binary (`num` > 0) or two's complement (`num` < 0) form
+ with its width equal to the minimum number of bits needed to represent
+ the number in the designated form. This behavior is deprecated and will
+ later raise an error.
+
+ .. deprecated:: 1.12.0
+
+ Returns
+ -------
+ bin : str
+ Binary representation of `num` or two's complement of `num`.
+
+ See Also
+ --------
+ base_repr: Return a string representation of a number in the given base
+ system.
+ bin: Python's built-in binary representation generator of an integer.
+
+ Notes
+ -----
+ `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
+ faster.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Two's complement",
+ https://en.wikipedia.org/wiki/Two's_complement
+
+ Examples
+ --------
+ >>> np.binary_repr(3)
+ '11'
+ >>> np.binary_repr(-3)
+ '-11'
+ >>> np.binary_repr(3, width=4)
+ '0011'
+
+ The two's complement is returned when the input number is negative and
+ width is specified:
+
+ >>> np.binary_repr(-3, width=3)
+ '101'
+ >>> np.binary_repr(-3, width=5)
+ '11101'
+
+ """
+ def warn_if_insufficient(width, binwidth):
+ if width is not None and width < binwidth:
+ warnings.warn(
+ "Insufficient bit width provided. This behavior "
+ "will raise an error in the future.", DeprecationWarning,
+ stacklevel=3)
+
+ # Ensure that num is a Python integer to avoid overflow or unwanted
+ # casts to floating point.
+ num = operator.index(num)
+
+ if num == 0:
+ return '0' * (width or 1)
+
+ elif num > 0:
+ binary = bin(num)[2:]
+ binwidth = len(binary)
+ outwidth = (binwidth if width is None
+ else max(binwidth, width))
+ warn_if_insufficient(width, binwidth)
+ return binary.zfill(outwidth)
+
+ else:
+ if width is None:
+ return '-' + bin(-num)[2:]
+
+ else:
+ poswidth = len(bin(-num)[2:])
+
+ # See gh-8679: remove extra digit
+ # for numbers at boundaries.
+ if 2**(poswidth - 1) == -num:
+ poswidth -= 1
+
+ twocomp = 2**(poswidth + 1) + num
+ binary = bin(twocomp)[2:]
+ binwidth = len(binary)
+
+ outwidth = max(binwidth, width)
+ warn_if_insufficient(width, binwidth)
+ return '1' * (outwidth - binwidth) + binary
+
+
+@set_module('numpy')
+def base_repr(number, base=2, padding=0):
+ """
+ Return a string representation of a number in the given base system.
+
+ Parameters
+ ----------
+ number : int
+ The value to convert. Positive and negative values are handled.
+ base : int, optional
+ Convert `number` to the `base` number system. The valid range is 2-36,
+ the default value is 2.
+ padding : int, optional
+ Number of zeros padded on the left. Default is 0 (no padding).
+
+ Returns
+ -------
+ out : str
+ String representation of `number` in `base` system.
+
+ See Also
+ --------
+ binary_repr : Faster version of `base_repr` for base 2.
+
+ Examples
+ --------
+ >>> np.base_repr(5)
+ '101'
+ >>> np.base_repr(6, 5)
+ '11'
+ >>> np.base_repr(7, base=5, padding=3)
+ '00012'
+
+ >>> np.base_repr(10, base=16)
+ 'A'
+ >>> np.base_repr(32, base=16)
+ '20'
+
+ """
+ digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ if base > len(digits):
+ raise ValueError("Bases greater than 36 not handled in base_repr.")
+ elif base < 2:
+ raise ValueError("Bases less than 2 not handled in base_repr.")
+
+ num = abs(number)
+ res = []
+ while num:
+ res.append(digits[num % base])
+ num //= base
+ if padding:
+ res.append('0' * padding)
+ if number < 0:
+ res.append('-')
+ return ''.join(reversed(res or '0'))
+
+
+# These are all essentially abbreviations
+# These might wind up in a special abbreviations module
+
+
+def _maketup(descr, val):
+ dt = dtype(descr)
+ # Place val in all scalar tuples:
+ fields = dt.fields
+ if fields is None:
+ return val
+ else:
+ res = [_maketup(fields[name][0], val) for name in dt.names]
+ return tuple(res)
+
+
+def _identity_dispatcher(n, dtype=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def identity(n, dtype=None, *, like=None):
+ """
+ Return the identity array.
+
+ The identity array is a square array with ones on
+ the main diagonal.
+
+ Parameters
+ ----------
+ n : int
+ Number of rows (and columns) in `n` x `n` output.
+ dtype : data-type, optional
+ Data-type of the output. Defaults to ``float``.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ `n` x `n` array with its main diagonal set to one,
+ and all other elements 0.
+
+ Examples
+ --------
+ >>> np.identity(3)
+ array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
+
+ """
+ if like is not None:
+ return _identity_with_like(n, dtype=dtype, like=like)
+
+ from numpy import eye
+ return eye(n, dtype=dtype, like=like)
+
+
+_identity_with_like = array_function_dispatch(
+ _identity_dispatcher, use_like=True
+)(identity)
+
+
+def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
+ return (a, b)
+
+
+@array_function_dispatch(_allclose_dispatcher)
+def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
+ """
+ Returns True if two arrays are element-wise equal within a tolerance.
+
+ The tolerance values are positive, typically very small numbers. The
+ relative difference (`rtol` * abs(`b`)) and the absolute difference
+ `atol` are added together to compare against the absolute difference
+ between `a` and `b`.
+
+ NaNs are treated as equal if they are in the same place and if
+ ``equal_nan=True``. Infs are treated as equal if they are in the same
+ place and of the same sign in both arrays.
+
+ Parameters
+ ----------
+ a, b : array_like
+ Input arrays to compare.
+ rtol : float
+ The relative tolerance parameter (see Notes).
+ atol : float
+ The absolute tolerance parameter (see Notes).
+ equal_nan : bool
+ Whether to compare NaN's as equal. If True, NaN's in `a` will be
+ considered equal to NaN's in `b` in the output array.
+
+ .. versionadded:: 1.10.0
+
+ Returns
+ -------
+ allclose : bool
+ Returns True if the two arrays are equal within the given
+ tolerance; False otherwise.
+
+ See Also
+ --------
+ isclose, all, any, equal
+
+ Notes
+ -----
+ If the following equation is element-wise True, then allclose returns
+ True.
+
+ absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
+
+ The above equation is not symmetric in `a` and `b`, so that
+ ``allclose(a, b)`` might be different from ``allclose(b, a)`` in
+ some rare cases.
+
+ The comparison of `a` and `b` uses standard broadcasting, which
+ means that `a` and `b` need not have the same shape in order for
+ ``allclose(a, b)`` to evaluate to True. The same is true for
+ `equal` but not `array_equal`.
+
+ `allclose` is not defined for non-numeric data types.
+ `bool` is considered a numeric data-type for this purpose.
+
+ Examples
+ --------
+ >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
+ False
+ >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
+ True
+ >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
+ False
+ >>> np.allclose([1.0, np.nan], [1.0, np.nan])
+ False
+ >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
+ True
+
+ """
+ res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
+ return bool(res)
+
+
+def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
+ return (a, b)
+
+
+@array_function_dispatch(_isclose_dispatcher)
+def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
+ """
+ Returns a boolean array where two arrays are element-wise equal within a
+ tolerance.
+
+ The tolerance values are positive, typically very small numbers. The
+ relative difference (`rtol` * abs(`b`)) and the absolute difference
+ `atol` are added together to compare against the absolute difference
+ between `a` and `b`.
+
+ .. warning:: The default `atol` is not appropriate for comparing numbers
+ that are much smaller than one (see Notes).
+
+ Parameters
+ ----------
+ a, b : array_like
+ Input arrays to compare.
+ rtol : float
+ The relative tolerance parameter (see Notes).
+ atol : float
+ The absolute tolerance parameter (see Notes).
+ equal_nan : bool
+ Whether to compare NaN's as equal. If True, NaN's in `a` will be
+ considered equal to NaN's in `b` in the output array.
+
+ Returns
+ -------
+ y : array_like
+ Returns a boolean array of where `a` and `b` are equal within the
+ given tolerance. If both `a` and `b` are scalars, returns a single
+ boolean value.
+
+ See Also
+ --------
+ allclose
+ math.isclose
+
+ Notes
+ -----
+ .. versionadded:: 1.7.0
+
+ For finite values, isclose uses the following equation to test whether
+ two floating point values are equivalent.
+
+ absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
+
+ Unlike the built-in `math.isclose`, the above equation is not symmetric
+ in `a` and `b` -- it assumes `b` is the reference value -- so that
+ `isclose(a, b)` might be different from `isclose(b, a)`. Furthermore,
+ the default value of atol is not zero, and is used to determine what
+ small values should be considered close to zero. The default value is
+ appropriate for expected values of order unity: if the expected values
+ are significantly smaller than one, it can result in false positives.
+ `atol` should be carefully selected for the use case at hand. A zero value
+ for `atol` will result in `False` if either `a` or `b` is zero.
+
+ `isclose` is not defined for non-numeric data types.
+ `bool` is considered a numeric data-type for this purpose.
+
+ Examples
+ --------
+ >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
+ array([ True, False])
+ >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
+ array([ True, True])
+ >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
+ array([False, True])
+ >>> np.isclose([1.0, np.nan], [1.0, np.nan])
+ array([ True, False])
+ >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
+ array([ True, True])
+ >>> np.isclose([1e-8, 1e-7], [0.0, 0.0])
+ array([ True, False])
+ >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0)
+ array([False, False])
+ >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0])
+ array([ True, True])
+ >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0)
+ array([False, True])
+ """
+ def within_tol(x, y, atol, rtol):
+ with errstate(invalid='ignore'), _no_nep50_warning():
+ return less_equal(abs(x-y), atol + rtol * abs(y))
+
+ x = asanyarray(a)
+ y = asanyarray(b)
+
+ # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
+ # This will cause casting of x later. Also, make sure to allow subclasses
+ # (e.g., for numpy.ma).
+ # NOTE: We explicitly allow timedelta, which used to work. This could
+ # possibly be deprecated. See also gh-18286.
+ # timedelta works if `atol` is an integer or also a timedelta.
+ # Although, the default tolerances are unlikely to be useful
+ if y.dtype.kind != "m":
+ dt = multiarray.result_type(y, 1.)
+ y = asanyarray(y, dtype=dt)
+
+ xfin = isfinite(x)
+ yfin = isfinite(y)
+ if all(xfin) and all(yfin):
+ return within_tol(x, y, atol, rtol)
+ else:
+ finite = xfin & yfin
+ cond = zeros_like(finite, subok=True)
+ # Because we're using boolean indexing, x & y must be the same shape.
+ # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
+ # lib.stride_tricks, though, so we can't import it here.
+ x = x * ones_like(cond)
+ y = y * ones_like(cond)
+ # Avoid subtraction with infinite/nan values...
+ cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
+ # Check for equality of infinite values...
+ cond[~finite] = (x[~finite] == y[~finite])
+ if equal_nan:
+ # Make NaN == NaN
+ both_nan = isnan(x) & isnan(y)
+
+ # Needed to treat masked arrays correctly. = True would not work.
+ cond[both_nan] = both_nan[both_nan]
+
+ return cond[()] # Flatten 0d arrays to scalars
+
+
+def _array_equal_dispatcher(a1, a2, equal_nan=None):
+ return (a1, a2)
+
+
+@array_function_dispatch(_array_equal_dispatcher)
+def array_equal(a1, a2, equal_nan=False):
+ """
+ True if two arrays have the same shape and elements, False otherwise.
+
+ Parameters
+ ----------
+ a1, a2 : array_like
+ Input arrays.
+ equal_nan : bool
+ Whether to compare NaN's as equal. If the dtype of a1 and a2 is
+ complex, values will be considered equal if either the real or the
+ imaginary component of a given value is ``nan``.
+
+ .. versionadded:: 1.19.0
+
+ Returns
+ -------
+ b : bool
+ Returns True if the arrays are equal.
+
+ See Also
+ --------
+ allclose: Returns True if two arrays are element-wise equal within a
+ tolerance.
+ array_equiv: Returns True if input arrays are shape consistent and all
+ elements equal.
+
+ Examples
+ --------
+ >>> np.array_equal([1, 2], [1, 2])
+ True
+ >>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
+ True
+ >>> np.array_equal([1, 2], [1, 2, 3])
+ False
+ >>> np.array_equal([1, 2], [1, 4])
+ False
+ >>> a = np.array([1, np.nan])
+ >>> np.array_equal(a, a)
+ False
+ >>> np.array_equal(a, a, equal_nan=True)
+ True
+
+ When ``equal_nan`` is True, complex values with nan components are
+ considered equal if either the real *or* the imaginary components are nan.
+
+ >>> a = np.array([1 + 1j])
+ >>> b = a.copy()
+ >>> a.real = np.nan
+ >>> b.imag = np.nan
+ >>> np.array_equal(a, b, equal_nan=True)
+ True
+ """
+ try:
+ a1, a2 = asarray(a1), asarray(a2)
+ except Exception:
+ return False
+ if a1.shape != a2.shape:
+ return False
+ if not equal_nan:
+ return bool(asarray(a1 == a2).all())
+ # Handling NaN values if equal_nan is True
+ a1nan, a2nan = isnan(a1), isnan(a2)
+ # NaN's occur at different locations
+ if not (a1nan == a2nan).all():
+ return False
+ # Shapes of a1, a2 and masks are guaranteed to be consistent by this point
+ return bool(asarray(a1[~a1nan] == a2[~a1nan]).all())
+
+
+def _array_equiv_dispatcher(a1, a2):
+ return (a1, a2)
+
+
+@array_function_dispatch(_array_equiv_dispatcher)
+def array_equiv(a1, a2):
+ """
+ Returns True if input arrays are shape consistent and all elements equal.
+
+ Shape consistent means they are either the same shape, or one input array
+ can be broadcasted to create the same shape as the other one.
+
+ Parameters
+ ----------
+ a1, a2 : array_like
+ Input arrays.
+
+ Returns
+ -------
+ out : bool
+ True if equivalent, False otherwise.
+
+ Examples
+ --------
+ >>> np.array_equiv([1, 2], [1, 2])
+ True
+ >>> np.array_equiv([1, 2], [1, 3])
+ False
+
+ Showing the shape equivalence:
+
+ >>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
+ True
+ >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
+ False
+
+ >>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
+ False
+
+ """
+ try:
+ a1, a2 = asarray(a1), asarray(a2)
+ except Exception:
+ return False
+ try:
+ multiarray.broadcast(a1, a2)
+ except Exception:
+ return False
+
+ return bool(asarray(a1 == a2).all())
+
+
+Inf = inf = infty = Infinity = PINF
+nan = NaN = NAN
+False_ = bool_(False)
+True_ = bool_(True)
+
+
+def extend_all(module):
+ existing = set(__all__)
+ mall = getattr(module, '__all__')
+ for a in mall:
+ if a not in existing:
+ __all__.append(a)
+
+
+from .umath import *
+from .numerictypes import *
+from . import fromnumeric
+from .fromnumeric import *
+from . import arrayprint
+from .arrayprint import *
+from . import _asarray
+from ._asarray import *
+from . import _ufunc_config
+from ._ufunc_config import *
+extend_all(fromnumeric)
+extend_all(umath)
+extend_all(numerictypes)
+extend_all(arrayprint)
+extend_all(_asarray)
+extend_all(_ufunc_config)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/numeric.pyi b/venv/lib/python3.9/site-packages/numpy/core/numeric.pyi
new file mode 100644
index 00000000..98d3789a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/numeric.pyi
@@ -0,0 +1,657 @@
+from collections.abc import Callable, Sequence
+from typing import (
+ Any,
+ overload,
+ TypeVar,
+ Literal,
+ SupportsAbs,
+ SupportsIndex,
+ NoReturn,
+)
+from typing_extensions import TypeGuard
+
+from numpy import (
+ ComplexWarning as ComplexWarning,
+ generic,
+ unsignedinteger,
+ signedinteger,
+ floating,
+ complexfloating,
+ bool_,
+ int_,
+ intp,
+ float64,
+ timedelta64,
+ object_,
+ _OrderKACF,
+ _OrderCF,
+)
+
+from numpy._typing import (
+ ArrayLike,
+ NDArray,
+ DTypeLike,
+ _ShapeLike,
+ _DTypeLike,
+ _ArrayLike,
+ _SupportsArrayFunc,
+ _ScalarLike_co,
+ _ArrayLikeBool_co,
+ _ArrayLikeUInt_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeTD64_co,
+ _ArrayLikeObject_co,
+ _ArrayLikeUnknown,
+)
+
+_T = TypeVar("_T")
+_SCT = TypeVar("_SCT", bound=generic)
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+
+_CorrelateMode = Literal["valid", "same", "full"]
+
+__all__: list[str]
+
+@overload
+def zeros_like(
+ a: _ArrayType,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ subok: Literal[True] = ...,
+ shape: None = ...,
+) -> _ArrayType: ...
+@overload
+def zeros_like(
+ a: _ArrayLike[_SCT],
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def zeros_like(
+ a: object,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike= ...,
+) -> NDArray[Any]: ...
+@overload
+def zeros_like(
+ a: Any,
+ dtype: _DTypeLike[_SCT],
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike= ...,
+) -> NDArray[_SCT]: ...
+@overload
+def zeros_like(
+ a: Any,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike= ...,
+) -> NDArray[Any]: ...
+
+@overload
+def ones(
+ shape: _ShapeLike,
+ dtype: None = ...,
+ order: _OrderCF = ...,
+ *,
+ like: _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def ones(
+ shape: _ShapeLike,
+ dtype: _DTypeLike[_SCT],
+ order: _OrderCF = ...,
+ *,
+ like: _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def ones(
+ shape: _ShapeLike,
+ dtype: DTypeLike,
+ order: _OrderCF = ...,
+ *,
+ like: _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def ones_like(
+ a: _ArrayType,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ subok: Literal[True] = ...,
+ shape: None = ...,
+) -> _ArrayType: ...
+@overload
+def ones_like(
+ a: _ArrayLike[_SCT],
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def ones_like(
+ a: object,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike= ...,
+) -> NDArray[Any]: ...
+@overload
+def ones_like(
+ a: Any,
+ dtype: _DTypeLike[_SCT],
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike= ...,
+) -> NDArray[_SCT]: ...
+@overload
+def ones_like(
+ a: Any,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike= ...,
+) -> NDArray[Any]: ...
+
+@overload
+def full(
+ shape: _ShapeLike,
+ fill_value: Any,
+ dtype: None = ...,
+ order: _OrderCF = ...,
+ *,
+ like: _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def full(
+ shape: _ShapeLike,
+ fill_value: Any,
+ dtype: _DTypeLike[_SCT],
+ order: _OrderCF = ...,
+ *,
+ like: _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def full(
+ shape: _ShapeLike,
+ fill_value: Any,
+ dtype: DTypeLike,
+ order: _OrderCF = ...,
+ *,
+ like: _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def full_like(
+ a: _ArrayType,
+ fill_value: Any,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ subok: Literal[True] = ...,
+ shape: None = ...,
+) -> _ArrayType: ...
+@overload
+def full_like(
+ a: _ArrayLike[_SCT],
+ fill_value: Any,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def full_like(
+ a: object,
+ fill_value: Any,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike= ...,
+) -> NDArray[Any]: ...
+@overload
+def full_like(
+ a: Any,
+ fill_value: Any,
+ dtype: _DTypeLike[_SCT],
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike= ...,
+) -> NDArray[_SCT]: ...
+@overload
+def full_like(
+ a: Any,
+ fill_value: Any,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: None | _ShapeLike= ...,
+) -> NDArray[Any]: ...
+
+@overload
+def count_nonzero(
+ a: ArrayLike,
+ axis: None = ...,
+ *,
+ keepdims: Literal[False] = ...,
+) -> int: ...
+@overload
+def count_nonzero(
+ a: ArrayLike,
+ axis: _ShapeLike = ...,
+ *,
+ keepdims: bool = ...,
+) -> Any: ... # TODO: np.intp or ndarray[np.intp]
+
+def isfortran(a: NDArray[Any] | generic) -> bool: ...
+
+def argwhere(a: ArrayLike) -> NDArray[intp]: ...
+
+def flatnonzero(a: ArrayLike) -> NDArray[intp]: ...
+
+@overload
+def correlate(
+ a: _ArrayLikeUnknown,
+ v: _ArrayLikeUnknown,
+ mode: _CorrelateMode = ...,
+) -> NDArray[Any]: ...
+@overload
+def correlate(
+ a: _ArrayLikeBool_co,
+ v: _ArrayLikeBool_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[bool_]: ...
+@overload
+def correlate(
+ a: _ArrayLikeUInt_co,
+ v: _ArrayLikeUInt_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def correlate(
+ a: _ArrayLikeInt_co,
+ v: _ArrayLikeInt_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def correlate(
+ a: _ArrayLikeFloat_co,
+ v: _ArrayLikeFloat_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def correlate(
+ a: _ArrayLikeComplex_co,
+ v: _ArrayLikeComplex_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def correlate(
+ a: _ArrayLikeTD64_co,
+ v: _ArrayLikeTD64_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def correlate(
+ a: _ArrayLikeObject_co,
+ v: _ArrayLikeObject_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def convolve(
+ a: _ArrayLikeUnknown,
+ v: _ArrayLikeUnknown,
+ mode: _CorrelateMode = ...,
+) -> NDArray[Any]: ...
+@overload
+def convolve(
+ a: _ArrayLikeBool_co,
+ v: _ArrayLikeBool_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[bool_]: ...
+@overload
+def convolve(
+ a: _ArrayLikeUInt_co,
+ v: _ArrayLikeUInt_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def convolve(
+ a: _ArrayLikeInt_co,
+ v: _ArrayLikeInt_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def convolve(
+ a: _ArrayLikeFloat_co,
+ v: _ArrayLikeFloat_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def convolve(
+ a: _ArrayLikeComplex_co,
+ v: _ArrayLikeComplex_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def convolve(
+ a: _ArrayLikeTD64_co,
+ v: _ArrayLikeTD64_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def convolve(
+ a: _ArrayLikeObject_co,
+ v: _ArrayLikeObject_co,
+ mode: _CorrelateMode = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def outer(
+ a: _ArrayLikeUnknown,
+ b: _ArrayLikeUnknown,
+ out: None = ...,
+) -> NDArray[Any]: ...
+@overload
+def outer(
+ a: _ArrayLikeBool_co,
+ b: _ArrayLikeBool_co,
+ out: None = ...,
+) -> NDArray[bool_]: ...
+@overload
+def outer(
+ a: _ArrayLikeUInt_co,
+ b: _ArrayLikeUInt_co,
+ out: None = ...,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def outer(
+ a: _ArrayLikeInt_co,
+ b: _ArrayLikeInt_co,
+ out: None = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def outer(
+ a: _ArrayLikeFloat_co,
+ b: _ArrayLikeFloat_co,
+ out: None = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def outer(
+ a: _ArrayLikeComplex_co,
+ b: _ArrayLikeComplex_co,
+ out: None = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def outer(
+ a: _ArrayLikeTD64_co,
+ b: _ArrayLikeTD64_co,
+ out: None = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def outer(
+ a: _ArrayLikeObject_co,
+ b: _ArrayLikeObject_co,
+ out: None = ...,
+) -> NDArray[object_]: ...
+@overload
+def outer(
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ out: _ArrayType,
+) -> _ArrayType: ...
+
+@overload
+def tensordot(
+ a: _ArrayLikeUnknown,
+ b: _ArrayLikeUnknown,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[Any]: ...
+@overload
+def tensordot(
+ a: _ArrayLikeBool_co,
+ b: _ArrayLikeBool_co,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[bool_]: ...
+@overload
+def tensordot(
+ a: _ArrayLikeUInt_co,
+ b: _ArrayLikeUInt_co,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def tensordot(
+ a: _ArrayLikeInt_co,
+ b: _ArrayLikeInt_co,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def tensordot(
+ a: _ArrayLikeFloat_co,
+ b: _ArrayLikeFloat_co,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def tensordot(
+ a: _ArrayLikeComplex_co,
+ b: _ArrayLikeComplex_co,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def tensordot(
+ a: _ArrayLikeTD64_co,
+ b: _ArrayLikeTD64_co,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def tensordot(
+ a: _ArrayLikeObject_co,
+ b: _ArrayLikeObject_co,
+ axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def roll(
+ a: _ArrayLike[_SCT],
+ shift: _ShapeLike,
+ axis: None | _ShapeLike = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def roll(
+ a: ArrayLike,
+ shift: _ShapeLike,
+ axis: None | _ShapeLike = ...,
+) -> NDArray[Any]: ...
+
+def rollaxis(
+ a: NDArray[_SCT],
+ axis: int,
+ start: int = ...,
+) -> NDArray[_SCT]: ...
+
+def moveaxis(
+ a: NDArray[_SCT],
+ source: _ShapeLike,
+ destination: _ShapeLike,
+) -> NDArray[_SCT]: ...
+
+@overload
+def cross(
+ a: _ArrayLikeUnknown,
+ b: _ArrayLikeUnknown,
+ axisa: int = ...,
+ axisb: int = ...,
+ axisc: int = ...,
+ axis: None | int = ...,
+) -> NDArray[Any]: ...
+@overload
+def cross(
+ a: _ArrayLikeBool_co,
+ b: _ArrayLikeBool_co,
+ axisa: int = ...,
+ axisb: int = ...,
+ axisc: int = ...,
+ axis: None | int = ...,
+) -> NoReturn: ...
+@overload
+def cross(
+ a: _ArrayLikeUInt_co,
+ b: _ArrayLikeUInt_co,
+ axisa: int = ...,
+ axisb: int = ...,
+ axisc: int = ...,
+ axis: None | int = ...,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def cross(
+ a: _ArrayLikeInt_co,
+ b: _ArrayLikeInt_co,
+ axisa: int = ...,
+ axisb: int = ...,
+ axisc: int = ...,
+ axis: None | int = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def cross(
+ a: _ArrayLikeFloat_co,
+ b: _ArrayLikeFloat_co,
+ axisa: int = ...,
+ axisb: int = ...,
+ axisc: int = ...,
+ axis: None | int = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def cross(
+ a: _ArrayLikeComplex_co,
+ b: _ArrayLikeComplex_co,
+ axisa: int = ...,
+ axisb: int = ...,
+ axisc: int = ...,
+ axis: None | int = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def cross(
+ a: _ArrayLikeObject_co,
+ b: _ArrayLikeObject_co,
+ axisa: int = ...,
+ axisb: int = ...,
+ axisc: int = ...,
+ axis: None | int = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def indices(
+ dimensions: Sequence[int],
+ dtype: type[int] = ...,
+ sparse: Literal[False] = ...,
+) -> NDArray[int_]: ...
+@overload
+def indices(
+ dimensions: Sequence[int],
+ dtype: type[int] = ...,
+ sparse: Literal[True] = ...,
+) -> tuple[NDArray[int_], ...]: ...
+@overload
+def indices(
+ dimensions: Sequence[int],
+ dtype: _DTypeLike[_SCT],
+ sparse: Literal[False] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def indices(
+ dimensions: Sequence[int],
+ dtype: _DTypeLike[_SCT],
+ sparse: Literal[True],
+) -> tuple[NDArray[_SCT], ...]: ...
+@overload
+def indices(
+ dimensions: Sequence[int],
+ dtype: DTypeLike,
+ sparse: Literal[False] = ...,
+) -> NDArray[Any]: ...
+@overload
+def indices(
+ dimensions: Sequence[int],
+ dtype: DTypeLike,
+ sparse: Literal[True],
+) -> tuple[NDArray[Any], ...]: ...
+
+def fromfunction(
+ function: Callable[..., _T],
+ shape: Sequence[int],
+ *,
+ dtype: DTypeLike = ...,
+ like: _SupportsArrayFunc = ...,
+ **kwargs: Any,
+) -> _T: ...
+
+def isscalar(element: object) -> TypeGuard[
+ generic | bool | int | float | complex | str | bytes | memoryview
+]: ...
+
+def binary_repr(num: int, width: None | int = ...) -> str: ...
+
+def base_repr(
+ number: SupportsAbs[float],
+ base: float = ...,
+ padding: SupportsIndex = ...,
+) -> str: ...
+
+@overload
+def identity(
+ n: int,
+ dtype: None = ...,
+ *,
+ like: _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def identity(
+ n: int,
+ dtype: _DTypeLike[_SCT],
+ *,
+ like: _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def identity(
+ n: int,
+ dtype: DTypeLike,
+ *,
+ like: _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+def allclose(
+ a: ArrayLike,
+ b: ArrayLike,
+ rtol: float = ...,
+ atol: float = ...,
+ equal_nan: bool = ...,
+) -> bool: ...
+
+@overload
+def isclose(
+ a: _ScalarLike_co,
+ b: _ScalarLike_co,
+ rtol: float = ...,
+ atol: float = ...,
+ equal_nan: bool = ...,
+) -> bool_: ...
+@overload
+def isclose(
+ a: ArrayLike,
+ b: ArrayLike,
+ rtol: float = ...,
+ atol: float = ...,
+ equal_nan: bool = ...,
+) -> NDArray[bool_]: ...
+
+def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ...
+
+def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/core/numerictypes.py b/venv/lib/python3.9/site-packages/numpy/core/numerictypes.py
new file mode 100644
index 00000000..21a00ac6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/numerictypes.py
@@ -0,0 +1,670 @@
+"""
+numerictypes: Define the numeric type objects
+
+This module is designed so "from numerictypes import \\*" is safe.
+Exported symbols include:
+
+ Dictionary with all registered number types (including aliases):
+ sctypeDict
+
+ Type objects (not all will be available, depends on platform):
+ see variable sctypes for which ones you have
+
+ Bit-width names
+
+ int8 int16 int32 int64 int128
+ uint8 uint16 uint32 uint64 uint128
+ float16 float32 float64 float96 float128 float256
+ complex32 complex64 complex128 complex192 complex256 complex512
+ datetime64 timedelta64
+
+ c-based names
+
+ bool_
+
+ object_
+
+ void, str_, unicode_
+
+ byte, ubyte,
+ short, ushort
+ intc, uintc,
+ intp, uintp,
+ int_, uint,
+ longlong, ulonglong,
+
+ single, csingle,
+ float_, complex_,
+ longfloat, clongfloat,
+
+ As part of the type-hierarchy: xx -- is bit-width
+
+ generic
+ +-> bool_ (kind=b)
+ +-> number
+ | +-> integer
+ | | +-> signedinteger (intxx) (kind=i)
+ | | | byte
+ | | | short
+ | | | intc
+ | | | intp
+ | | | int_
+ | | | longlong
+ | | \\-> unsignedinteger (uintxx) (kind=u)
+ | | ubyte
+ | | ushort
+ | | uintc
+ | | uintp
+ | | uint_
+ | | ulonglong
+ | +-> inexact
+ | +-> floating (floatxx) (kind=f)
+ | | half
+ | | single
+ | | float_ (double)
+ | | longfloat
+ | \\-> complexfloating (complexxx) (kind=c)
+ | csingle (singlecomplex)
+ | complex_ (cfloat, cdouble)
+ | clongfloat (longcomplex)
+ +-> flexible
+ | +-> character
+ | | str_ (string_, bytes_) (kind=S) [Python 2]
+ | | unicode_ (kind=U) [Python 2]
+ | |
+ | | bytes_ (string_) (kind=S) [Python 3]
+ | | str_ (unicode_) (kind=U) [Python 3]
+ | |
+ | \\-> void (kind=V)
+ \\-> object_ (not used much) (kind=O)
+
+"""
+import numbers
+
+from numpy.core.multiarray import (
+ ndarray, array, dtype, datetime_data, datetime_as_string,
+ busday_offset, busday_count, is_busday, busdaycalendar
+ )
+from numpy.core.overrides import set_module
+
+# we add more at the bottom
+__all__ = ['sctypeDict', 'sctypes',
+ 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
+ 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type',
+ 'issubdtype', 'datetime_data', 'datetime_as_string',
+ 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
+ ]
+
+# we don't need all these imports, but we need to keep them for compatibility
+# for users using np.core.numerictypes.UPPER_TABLE
+from ._string_helpers import (
+ english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE
+)
+
+from ._type_aliases import (
+ sctypeDict,
+ allTypes,
+ bitname,
+ sctypes,
+ _concrete_types,
+ _concrete_typeinfo,
+ _bits_of,
+)
+from ._dtype import _kind_name
+
+# we don't export these for import *, but we do want them accessible
+# as numerictypes.bool, etc.
+from builtins import bool, int, float, complex, object, str, bytes
+from numpy.compat import long, unicode
+
+
+# We use this later
+generic = allTypes['generic']
+
+genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
+ 'int32', 'uint32', 'int64', 'uint64', 'int128',
+ 'uint128', 'float16',
+ 'float32', 'float64', 'float80', 'float96', 'float128',
+ 'float256',
+ 'complex32', 'complex64', 'complex128', 'complex160',
+ 'complex192', 'complex256', 'complex512', 'object']
+
+@set_module('numpy')
+def maximum_sctype(t):
+ """
+ Return the scalar type of highest precision of the same kind as the input.
+
+ Parameters
+ ----------
+ t : dtype or dtype specifier
+ The input data type. This can be a `dtype` object or an object that
+ is convertible to a `dtype`.
+
+ Returns
+ -------
+ out : dtype
+ The highest precision data type of the same kind (`dtype.kind`) as `t`.
+
+ See Also
+ --------
+ obj2sctype, mintypecode, sctype2char
+ dtype
+
+ Examples
+ --------
+ >>> np.maximum_sctype(int)
+ <class 'numpy.int64'>
+ >>> np.maximum_sctype(np.uint8)
+ <class 'numpy.uint64'>
+ >>> np.maximum_sctype(complex)
+ <class 'numpy.complex256'> # may vary
+
+ >>> np.maximum_sctype(str)
+ <class 'numpy.str_'>
+
+ >>> np.maximum_sctype('i2')
+ <class 'numpy.int64'>
+ >>> np.maximum_sctype('f4')
+ <class 'numpy.float128'> # may vary
+
+ """
+ g = obj2sctype(t)
+ if g is None:
+ return t
+ t = g
+ base = _kind_name(dtype(t))
+ if base in sctypes:
+ return sctypes[base][-1]
+ else:
+ return t
+
+
+@set_module('numpy')
+def issctype(rep):
+ """
+ Determines whether the given object represents a scalar data-type.
+
+ Parameters
+ ----------
+ rep : any
+ If `rep` is an instance of a scalar dtype, True is returned. If not,
+ False is returned.
+
+ Returns
+ -------
+ out : bool
+ Boolean result of check whether `rep` is a scalar dtype.
+
+ See Also
+ --------
+ issubsctype, issubdtype, obj2sctype, sctype2char
+
+ Examples
+ --------
+ >>> np.issctype(np.int32)
+ True
+ >>> np.issctype(list)
+ False
+ >>> np.issctype(1.1)
+ False
+
+ Strings are also a scalar type:
+
+ >>> np.issctype(np.dtype('str'))
+ True
+
+ """
+ if not isinstance(rep, (type, dtype)):
+ return False
+ try:
+ res = obj2sctype(rep)
+ if res and res != object_:
+ return True
+ return False
+ except Exception:
+ return False
+
+
+@set_module('numpy')
+def obj2sctype(rep, default=None):
+ """
+ Return the scalar dtype or NumPy equivalent of Python type of an object.
+
+ Parameters
+ ----------
+ rep : any
+ The object of which the type is returned.
+ default : any, optional
+ If given, this is returned for objects whose types can not be
+ determined. If not given, None is returned for those objects.
+
+ Returns
+ -------
+ dtype : dtype or Python type
+ The data type of `rep`.
+
+ See Also
+ --------
+ sctype2char, issctype, issubsctype, issubdtype, maximum_sctype
+
+ Examples
+ --------
+ >>> np.obj2sctype(np.int32)
+ <class 'numpy.int32'>
+ >>> np.obj2sctype(np.array([1., 2.]))
+ <class 'numpy.float64'>
+ >>> np.obj2sctype(np.array([1.j]))
+ <class 'numpy.complex128'>
+
+ >>> np.obj2sctype(dict)
+ <class 'numpy.object_'>
+ >>> np.obj2sctype('string')
+
+ >>> np.obj2sctype(1, default=list)
+ <class 'list'>
+
+ """
+ # prevent abstract classes being upcast
+ if isinstance(rep, type) and issubclass(rep, generic):
+ return rep
+ # extract dtype from arrays
+ if isinstance(rep, ndarray):
+ return rep.dtype.type
+ # fall back on dtype to convert
+ try:
+ res = dtype(rep)
+ except Exception:
+ return default
+ else:
+ return res.type
+
+
+@set_module('numpy')
+def issubclass_(arg1, arg2):
+ """
+ Determine if a class is a subclass of a second class.
+
+ `issubclass_` is equivalent to the Python built-in ``issubclass``,
+ except that it returns False instead of raising a TypeError if one
+ of the arguments is not a class.
+
+ Parameters
+ ----------
+ arg1 : class
+ Input class. True is returned if `arg1` is a subclass of `arg2`.
+ arg2 : class or tuple of classes.
+ Input class. If a tuple of classes, True is returned if `arg1` is a
+ subclass of any of the tuple elements.
+
+ Returns
+ -------
+ out : bool
+ Whether `arg1` is a subclass of `arg2` or not.
+
+ See Also
+ --------
+ issubsctype, issubdtype, issctype
+
+ Examples
+ --------
+ >>> np.issubclass_(np.int32, int)
+ False
+ >>> np.issubclass_(np.int32, float)
+ False
+ >>> np.issubclass_(np.float64, float)
+ True
+
+ """
+ try:
+ return issubclass(arg1, arg2)
+ except TypeError:
+ return False
+
+
+@set_module('numpy')
+def issubsctype(arg1, arg2):
+ """
+ Determine if the first argument is a subclass of the second argument.
+
+ Parameters
+ ----------
+ arg1, arg2 : dtype or dtype specifier
+ Data-types.
+
+ Returns
+ -------
+ out : bool
+ The result.
+
+ See Also
+ --------
+ issctype, issubdtype, obj2sctype
+
+ Examples
+ --------
+ >>> np.issubsctype('S8', str)
+ False
+ >>> np.issubsctype(np.array([1]), int)
+ True
+ >>> np.issubsctype(np.array([1]), float)
+ False
+
+ """
+ return issubclass(obj2sctype(arg1), obj2sctype(arg2))
+
+
+@set_module('numpy')
+def issubdtype(arg1, arg2):
+ r"""
+ Returns True if first argument is a typecode lower/equal in type hierarchy.
+
+ This is like the builtin :func:`issubclass`, but for `dtype`\ s.
+
+ Parameters
+ ----------
+ arg1, arg2 : dtype_like
+ `dtype` or object coercible to one
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ :ref:`arrays.scalars` : Overview of the numpy type hierarchy.
+ issubsctype, issubclass_
+
+ Examples
+ --------
+ `issubdtype` can be used to check the type of arrays:
+
+ >>> ints = np.array([1, 2, 3], dtype=np.int32)
+ >>> np.issubdtype(ints.dtype, np.integer)
+ True
+ >>> np.issubdtype(ints.dtype, np.floating)
+ False
+
+ >>> floats = np.array([1, 2, 3], dtype=np.float32)
+ >>> np.issubdtype(floats.dtype, np.integer)
+ False
+ >>> np.issubdtype(floats.dtype, np.floating)
+ True
+
+ Similar types of different sizes are not subdtypes of each other:
+
+ >>> np.issubdtype(np.float64, np.float32)
+ False
+ >>> np.issubdtype(np.float32, np.float64)
+ False
+
+ but both are subtypes of `floating`:
+
+ >>> np.issubdtype(np.float64, np.floating)
+ True
+ >>> np.issubdtype(np.float32, np.floating)
+ True
+
+ For convenience, dtype-like objects are allowed too:
+
+ >>> np.issubdtype('S1', np.string_)
+ True
+ >>> np.issubdtype('i4', np.signedinteger)
+ True
+
+ """
+ if not issubclass_(arg1, generic):
+ arg1 = dtype(arg1).type
+ if not issubclass_(arg2, generic):
+ arg2 = dtype(arg2).type
+
+ return issubclass(arg1, arg2)
+
+
+# This dictionary allows look up based on any alias for an array data-type
+class _typedict(dict):
+ """
+ Base object for a dictionary for look-up with any alias for an array dtype.
+
+ Instances of `_typedict` can not be used as dictionaries directly,
+ first they have to be populated.
+
+ """
+
+ def __getitem__(self, obj):
+ return dict.__getitem__(self, obj2sctype(obj))
+
+nbytes = _typedict()
+_alignment = _typedict()
+_maxvals = _typedict()
+_minvals = _typedict()
+def _construct_lookups():
+ for name, info in _concrete_typeinfo.items():
+ obj = info.type
+ nbytes[obj] = info.bits // 8
+ _alignment[obj] = info.alignment
+ if len(info) > 5:
+ _maxvals[obj] = info.max
+ _minvals[obj] = info.min
+ else:
+ _maxvals[obj] = None
+ _minvals[obj] = None
+
+_construct_lookups()
+
+
+@set_module('numpy')
+def sctype2char(sctype):
+ """
+ Return the string representation of a scalar dtype.
+
+ Parameters
+ ----------
+ sctype : scalar dtype or object
+ If a scalar dtype, the corresponding string character is
+ returned. If an object, `sctype2char` tries to infer its scalar type
+ and then return the corresponding string character.
+
+ Returns
+ -------
+ typechar : str
+ The string character corresponding to the scalar type.
+
+ Raises
+ ------
+ ValueError
+ If `sctype` is an object for which the type can not be inferred.
+
+ See Also
+ --------
+ obj2sctype, issctype, issubsctype, mintypecode
+
+ Examples
+ --------
+ >>> for sctype in [np.int32, np.double, np.complex_, np.string_, np.ndarray]:
+ ... print(np.sctype2char(sctype))
+ l # may vary
+ d
+ D
+ S
+ O
+
+ >>> x = np.array([1., 2-1.j])
+ >>> np.sctype2char(x)
+ 'D'
+ >>> np.sctype2char(list)
+ 'O'
+
+ """
+ sctype = obj2sctype(sctype)
+ if sctype is None:
+ raise ValueError("unrecognized type")
+ if sctype not in _concrete_types:
+ # for compatibility
+ raise KeyError(sctype)
+ return dtype(sctype).char
+
+# Create dictionary of casting functions that wrap sequences
+# indexed by type or type character
+cast = _typedict()
+for key in _concrete_types:
+ cast[key] = lambda x, k=key: array(x, copy=False).astype(k)
+
+
+def _scalar_type_key(typ):
+ """A ``key`` function for `sorted`."""
+ dt = dtype(typ)
+ return (dt.kind.lower(), dt.itemsize)
+
+
+ScalarType = [int, float, complex, bool, bytes, str, memoryview]
+ScalarType += sorted(_concrete_types, key=_scalar_type_key)
+ScalarType = tuple(ScalarType)
+
+
+# Now add the types we've determined to this module
+for key in allTypes:
+ globals()[key] = allTypes[key]
+ __all__.append(key)
+
+del key
+
+typecodes = {'Character':'c',
+ 'Integer':'bhilqp',
+ 'UnsignedInteger':'BHILQP',
+ 'Float':'efdg',
+ 'Complex':'FDG',
+ 'AllInteger':'bBhHiIlLqQpP',
+ 'AllFloat':'efdgFDG',
+ 'Datetime': 'Mm',
+ 'All':'?bhilqpBHILQPefdgFDGSUVOMm'}
+
+# backwards compatibility --- deprecated name
+# Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py)
+typeDict = sctypeDict
+
+# b -> boolean
+# u -> unsigned integer
+# i -> signed integer
+# f -> floating point
+# c -> complex
+# M -> datetime
+# m -> timedelta
+# S -> string
+# U -> Unicode string
+# V -> record
+# O -> Python object
+_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm']
+
+__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
+__len_test_types = len(__test_types)
+
+# Keep incrementing until a common type both can be coerced to
+# is found. Otherwise, return None
+def _find_common_coerce(a, b):
+ if a > b:
+ return a
+ try:
+ thisind = __test_types.index(a.char)
+ except ValueError:
+ return None
+ return _can_coerce_all([a, b], start=thisind)
+
+# Find a data-type that all data-types in a list can be coerced to
+def _can_coerce_all(dtypelist, start=0):
+ N = len(dtypelist)
+ if N == 0:
+ return None
+ if N == 1:
+ return dtypelist[0]
+ thisind = start
+ while thisind < __len_test_types:
+ newdtype = dtype(__test_types[thisind])
+ numcoerce = len([x for x in dtypelist if newdtype >= x])
+ if numcoerce == N:
+ return newdtype
+ thisind += 1
+ return None
+
+def _register_types():
+ numbers.Integral.register(integer)
+ numbers.Complex.register(inexact)
+ numbers.Real.register(floating)
+ numbers.Number.register(number)
+
+_register_types()
+
+
+@set_module('numpy')
+def find_common_type(array_types, scalar_types):
+ """
+ Determine common type following standard coercion rules.
+
+ Parameters
+ ----------
+ array_types : sequence
+ A list of dtypes or dtype convertible objects representing arrays.
+ scalar_types : sequence
+ A list of dtypes or dtype convertible objects representing scalars.
+
+ Returns
+ -------
+ datatype : dtype
+ The common data type, which is the maximum of `array_types` ignoring
+ `scalar_types`, unless the maximum of `scalar_types` is of a
+ different kind (`dtype.kind`). If the kind is not understood, then
+ None is returned.
+
+ See Also
+ --------
+ dtype, common_type, can_cast, mintypecode
+
+ Examples
+ --------
+ >>> np.find_common_type([], [np.int64, np.float32, complex])
+ dtype('complex128')
+ >>> np.find_common_type([np.int64, np.float32], [])
+ dtype('float64')
+
+ The standard casting rules ensure that a scalar cannot up-cast an
+ array unless the scalar is of a fundamentally different kind of data
+ (i.e. under a different hierarchy in the data type hierarchy) then
+ the array:
+
+ >>> np.find_common_type([np.float32], [np.int64, np.float64])
+ dtype('float32')
+
+ Complex is of a different type, so it up-casts the float in the
+ `array_types` argument:
+
+ >>> np.find_common_type([np.float32], [complex])
+ dtype('complex128')
+
+ Type specifier strings are convertible to dtypes and can therefore
+ be used instead of dtypes:
+
+ >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8'])
+ dtype('complex128')
+
+ """
+ array_types = [dtype(x) for x in array_types]
+ scalar_types = [dtype(x) for x in scalar_types]
+
+ maxa = _can_coerce_all(array_types)
+ maxsc = _can_coerce_all(scalar_types)
+
+ if maxa is None:
+ return maxsc
+
+ if maxsc is None:
+ return maxa
+
+ try:
+ index_a = _kind_list.index(maxa.kind)
+ index_sc = _kind_list.index(maxsc.kind)
+ except ValueError:
+ return None
+
+ if index_sc > index_a:
+ return _find_common_coerce(maxsc, maxa)
+ else:
+ return maxa
diff --git a/venv/lib/python3.9/site-packages/numpy/core/numerictypes.pyi b/venv/lib/python3.9/site-packages/numpy/core/numerictypes.pyi
new file mode 100644
index 00000000..d10e4822
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/numerictypes.pyi
@@ -0,0 +1,161 @@
+import sys
+import types
+from collections.abc import Iterable
+from typing import (
+ Literal as L,
+ Union,
+ overload,
+ Any,
+ TypeVar,
+ Protocol,
+ TypedDict,
+)
+
+from numpy import (
+ ndarray,
+ dtype,
+ generic,
+ bool_,
+ ubyte,
+ ushort,
+ uintc,
+ uint,
+ ulonglong,
+ byte,
+ short,
+ intc,
+ int_,
+ longlong,
+ half,
+ single,
+ double,
+ longdouble,
+ csingle,
+ cdouble,
+ clongdouble,
+ datetime64,
+ timedelta64,
+ object_,
+ str_,
+ bytes_,
+ void,
+)
+
+from numpy.core._type_aliases import (
+ sctypeDict as sctypeDict,
+ sctypes as sctypes,
+)
+
+from numpy._typing import DTypeLike, ArrayLike, _DTypeLike
+
+_T = TypeVar("_T")
+_SCT = TypeVar("_SCT", bound=generic)
+
+class _CastFunc(Protocol):
+ def __call__(
+ self, x: ArrayLike, k: DTypeLike = ...
+ ) -> ndarray[Any, dtype[Any]]: ...
+
+class _TypeCodes(TypedDict):
+ Character: L['c']
+ Integer: L['bhilqp']
+ UnsignedInteger: L['BHILQP']
+ Float: L['efdg']
+ Complex: L['FDG']
+ AllInteger: L['bBhHiIlLqQpP']
+ AllFloat: L['efdgFDG']
+ Datetime: L['Mm']
+ All: L['?bhilqpBHILQPefdgFDGSUVOMm']
+
+class _typedict(dict[type[generic], _T]):
+ def __getitem__(self, key: DTypeLike) -> _T: ...
+
+if sys.version_info >= (3, 10):
+ _TypeTuple = Union[
+ type[Any],
+ types.UnionType,
+ tuple[Union[type[Any], types.UnionType, tuple[Any, ...]], ...],
+ ]
+else:
+ _TypeTuple = Union[
+ type[Any],
+ tuple[Union[type[Any], tuple[Any, ...]], ...],
+ ]
+
+__all__: list[str]
+
+@overload
+def maximum_sctype(t: _DTypeLike[_SCT]) -> type[_SCT]: ...
+@overload
+def maximum_sctype(t: DTypeLike) -> type[Any]: ...
+
+@overload
+def issctype(rep: dtype[Any] | type[Any]) -> bool: ...
+@overload
+def issctype(rep: object) -> L[False]: ...
+
+@overload
+def obj2sctype(rep: _DTypeLike[_SCT], default: None = ...) -> None | type[_SCT]: ...
+@overload
+def obj2sctype(rep: _DTypeLike[_SCT], default: _T) -> _T | type[_SCT]: ...
+@overload
+def obj2sctype(rep: DTypeLike, default: None = ...) -> None | type[Any]: ...
+@overload
+def obj2sctype(rep: DTypeLike, default: _T) -> _T | type[Any]: ...
+@overload
+def obj2sctype(rep: object, default: None = ...) -> None: ...
+@overload
+def obj2sctype(rep: object, default: _T) -> _T: ...
+
+@overload
+def issubclass_(arg1: type[Any], arg2: _TypeTuple) -> bool: ...
+@overload
+def issubclass_(arg1: object, arg2: object) -> L[False]: ...
+
+def issubsctype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ...
+
+def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ...
+
+def sctype2char(sctype: DTypeLike) -> str: ...
+
+def find_common_type(
+ array_types: Iterable[DTypeLike],
+ scalar_types: Iterable[DTypeLike],
+) -> dtype[Any]: ...
+
+cast: _typedict[_CastFunc]
+nbytes: _typedict[int]
+typecodes: _TypeCodes
+ScalarType: tuple[
+ type[int],
+ type[float],
+ type[complex],
+ type[bool],
+ type[bytes],
+ type[str],
+ type[memoryview],
+ type[bool_],
+ type[csingle],
+ type[cdouble],
+ type[clongdouble],
+ type[half],
+ type[single],
+ type[double],
+ type[longdouble],
+ type[byte],
+ type[short],
+ type[intc],
+ type[int_],
+ type[longlong],
+ type[timedelta64],
+ type[datetime64],
+ type[object_],
+ type[bytes_],
+ type[str_],
+ type[ubyte],
+ type[ushort],
+ type[uintc],
+ type[uint],
+ type[ulonglong],
+ type[void],
+]
diff --git a/venv/lib/python3.9/site-packages/numpy/core/overrides.py b/venv/lib/python3.9/site-packages/numpy/core/overrides.py
new file mode 100644
index 00000000..c567dfef
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/overrides.py
@@ -0,0 +1,225 @@
+"""Implementation of __array_function__ overrides from NEP-18."""
+import collections
+import functools
+import os
+
+from numpy.core._multiarray_umath import (
+ add_docstring, implement_array_function, _get_implementing_args)
+from numpy.compat._inspect import getargspec
+
+
+ARRAY_FUNCTION_ENABLED = bool(
+ int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1)))
+
+array_function_like_doc = (
+ """like : array_like, optional
+ Reference object to allow the creation of arrays which are not
+ NumPy arrays. If an array-like passed in as ``like`` supports
+ the ``__array_function__`` protocol, the result will be defined
+ by it. In this case, it ensures the creation of an array object
+ compatible with that passed in via this argument."""
+)
+
+def set_array_function_like_doc(public_api):
+ if public_api.__doc__ is not None:
+ public_api.__doc__ = public_api.__doc__.replace(
+ "${ARRAY_FUNCTION_LIKE}",
+ array_function_like_doc,
+ )
+ return public_api
+
+
+add_docstring(
+ implement_array_function,
+ """
+ Implement a function with checks for __array_function__ overrides.
+
+ All arguments are required, and can only be passed by position.
+
+ Parameters
+ ----------
+ implementation : function
+ Function that implements the operation on NumPy array without
+ overrides when called like ``implementation(*args, **kwargs)``.
+ public_api : function
+ Function exposed by NumPy's public API originally called like
+ ``public_api(*args, **kwargs)`` on which arguments are now being
+ checked.
+ relevant_args : iterable
+ Iterable of arguments to check for __array_function__ methods.
+ args : tuple
+ Arbitrary positional arguments originally passed into ``public_api``.
+ kwargs : dict
+ Arbitrary keyword arguments originally passed into ``public_api``.
+
+ Returns
+ -------
+ Result from calling ``implementation()`` or an ``__array_function__``
+ method, as appropriate.
+
+ Raises
+ ------
+ TypeError : if no implementation is found.
+ """)
+
+
+# exposed for testing purposes; used internally by implement_array_function
+add_docstring(
+ _get_implementing_args,
+ """
+ Collect arguments on which to call __array_function__.
+
+ Parameters
+ ----------
+ relevant_args : iterable of array-like
+ Iterable of possibly array-like arguments to check for
+ __array_function__ methods.
+
+ Returns
+ -------
+ Sequence of arguments with __array_function__ methods, in the order in
+ which they should be called.
+ """)
+
+
+ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
+
+
+def verify_matching_signatures(implementation, dispatcher):
+ """Verify that a dispatcher function has the right signature."""
+ implementation_spec = ArgSpec(*getargspec(implementation))
+ dispatcher_spec = ArgSpec(*getargspec(dispatcher))
+
+ if (implementation_spec.args != dispatcher_spec.args or
+ implementation_spec.varargs != dispatcher_spec.varargs or
+ implementation_spec.keywords != dispatcher_spec.keywords or
+ (bool(implementation_spec.defaults) !=
+ bool(dispatcher_spec.defaults)) or
+ (implementation_spec.defaults is not None and
+ len(implementation_spec.defaults) !=
+ len(dispatcher_spec.defaults))):
+ raise RuntimeError('implementation and dispatcher for %s have '
+ 'different function signatures' % implementation)
+
+ if implementation_spec.defaults is not None:
+ if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults):
+ raise RuntimeError('dispatcher functions can only use None for '
+ 'default argument values')
+
+
+def set_module(module):
+ """Decorator for overriding __module__ on a function or class.
+
+ Example usage::
+
+ @set_module('numpy')
+ def example():
+ pass
+
+ assert example.__module__ == 'numpy'
+ """
+ def decorator(func):
+ if module is not None:
+ func.__module__ = module
+ return func
+ return decorator
+
+
+def array_function_dispatch(dispatcher, module=None, verify=True,
+ docs_from_dispatcher=False, use_like=False):
+ """Decorator for adding dispatch with the __array_function__ protocol.
+
+ See NEP-18 for example usage.
+
+ Parameters
+ ----------
+ dispatcher : callable
+ Function that when called like ``dispatcher(*args, **kwargs)`` with
+ arguments from the NumPy function call returns an iterable of
+ array-like arguments to check for ``__array_function__``.
+ module : str, optional
+ __module__ attribute to set on new function, e.g., ``module='numpy'``.
+ By default, module is copied from the decorated function.
+ verify : bool, optional
+ If True, verify the that the signature of the dispatcher and decorated
+ function signatures match exactly: all required and optional arguments
+ should appear in order with the same names, but the default values for
+ all optional arguments should be ``None``. Only disable verification
+ if the dispatcher's signature needs to deviate for some particular
+ reason, e.g., because the function has a signature like
+ ``func(*args, **kwargs)``.
+ docs_from_dispatcher : bool, optional
+ If True, copy docs from the dispatcher function onto the dispatched
+ function, rather than from the implementation. This is useful for
+ functions defined in C, which otherwise don't have docstrings.
+
+ Returns
+ -------
+ Function suitable for decorating the implementation of a NumPy function.
+ """
+
+ if not ARRAY_FUNCTION_ENABLED:
+ def decorator(implementation):
+ if docs_from_dispatcher:
+ add_docstring(implementation, dispatcher.__doc__)
+ if module is not None:
+ implementation.__module__ = module
+ return implementation
+ return decorator
+
+ def decorator(implementation):
+ if verify:
+ verify_matching_signatures(implementation, dispatcher)
+
+ if docs_from_dispatcher:
+ add_docstring(implementation, dispatcher.__doc__)
+
+ @functools.wraps(implementation)
+ def public_api(*args, **kwargs):
+ try:
+ relevant_args = dispatcher(*args, **kwargs)
+ except TypeError as exc:
+ # Try to clean up a signature related TypeError. Such an
+ # error will be something like:
+ # dispatcher.__name__() got an unexpected keyword argument
+ #
+ # So replace the dispatcher name in this case. In principle
+ # TypeErrors may be raised from _within_ the dispatcher, so
+ # we check that the traceback contains a string that starts
+ # with the name. (In principle we could also check the
+ # traceback length, as it would be deeper.)
+ msg = exc.args[0]
+ disp_name = dispatcher.__name__
+ if not isinstance(msg, str) or not msg.startswith(disp_name):
+ raise
+
+ # Replace with the correct name and re-raise:
+ new_msg = msg.replace(disp_name, public_api.__name__)
+ raise TypeError(new_msg) from None
+
+ return implement_array_function(
+ implementation, public_api, relevant_args, args, kwargs,
+ use_like)
+
+ public_api.__code__ = public_api.__code__.replace(
+ co_name=implementation.__name__,
+ co_filename='<__array_function__ internals>')
+ if module is not None:
+ public_api.__module__ = module
+
+ public_api._implementation = implementation
+
+ return public_api
+
+ return decorator
+
+
+def array_function_from_dispatcher(
+ implementation, module=None, verify=True, docs_from_dispatcher=True):
+ """Like array_function_dispatcher, but with function arguments flipped."""
+
+ def decorator(dispatcher):
+ return array_function_dispatch(
+ dispatcher, module, verify=verify,
+ docs_from_dispatcher=docs_from_dispatcher)(implementation)
+ return decorator
diff --git a/venv/lib/python3.9/site-packages/numpy/core/records.py b/venv/lib/python3.9/site-packages/numpy/core/records.py
new file mode 100644
index 00000000..c014bc97
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/records.py
@@ -0,0 +1,1099 @@
+"""
+Record Arrays
+=============
+Record arrays expose the fields of structured arrays as properties.
+
+Most commonly, ndarrays contain elements of a single type, e.g. floats,
+integers, bools etc. However, it is possible for elements to be combinations
+of these using structured types, such as::
+
+ >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', np.int64), ('y', np.float64)])
+ >>> a
+ array([(1, 2.), (1, 2.)], dtype=[('x', '<i8'), ('y', '<f8')])
+
+Here, each element consists of two fields: x (and int), and y (a float).
+This is known as a structured array. The different fields are analogous
+to columns in a spread-sheet. The different fields can be accessed as
+one would a dictionary::
+
+ >>> a['x']
+ array([1, 1])
+
+ >>> a['y']
+ array([2., 2.])
+
+Record arrays allow us to access fields as properties::
+
+ >>> ar = np.rec.array(a)
+
+ >>> ar.x
+ array([1, 1])
+
+ >>> ar.y
+ array([2., 2.])
+
+"""
+import warnings
+from collections import Counter
+from contextlib import nullcontext
+
+from . import numeric as sb
+from . import numerictypes as nt
+from numpy.compat import os_fspath
+from numpy.core.overrides import set_module
+from .arrayprint import _get_legacy_print_mode
+
+# All of the functions allow formats to be a dtype
+__all__ = [
+ 'record', 'recarray', 'format_parser',
+ 'fromarrays', 'fromrecords', 'fromstring', 'fromfile', 'array',
+]
+
+
+ndarray = sb.ndarray
+
+_byteorderconv = {'b':'>',
+ 'l':'<',
+ 'n':'=',
+ 'B':'>',
+ 'L':'<',
+ 'N':'=',
+ 'S':'s',
+ 's':'s',
+ '>':'>',
+ '<':'<',
+ '=':'=',
+ '|':'|',
+ 'I':'|',
+ 'i':'|'}
+
+# formats regular expression
+# allows multidimensional spec with a tuple syntax in front
+# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
+# are equally allowed
+
+numfmt = nt.sctypeDict
+
+
+def find_duplicate(list):
+ """Find duplication in a list, return a list of duplicated elements"""
+ return [
+ item
+ for item, counts in Counter(list).items()
+ if counts > 1
+ ]
+
+
+@set_module('numpy')
+class format_parser:
+ """
+ Class to convert formats, names, titles description to a dtype.
+
+ After constructing the format_parser object, the dtype attribute is
+ the converted data-type:
+ ``dtype = format_parser(formats, names, titles).dtype``
+
+ Attributes
+ ----------
+ dtype : dtype
+ The converted data-type.
+
+ Parameters
+ ----------
+ formats : str or list of str
+ The format description, either specified as a string with
+ comma-separated format descriptions in the form ``'f8, i4, a5'``, or
+ a list of format description strings in the form
+ ``['f8', 'i4', 'a5']``.
+ names : str or list/tuple of str
+ The field names, either specified as a comma-separated string in the
+ form ``'col1, col2, col3'``, or as a list or tuple of strings in the
+ form ``['col1', 'col2', 'col3']``.
+ An empty list can be used, in that case default field names
+ ('f0', 'f1', ...) are used.
+ titles : sequence
+ Sequence of title strings. An empty list can be used to leave titles
+ out.
+ aligned : bool, optional
+ If True, align the fields by padding as the C-compiler would.
+ Default is False.
+ byteorder : str, optional
+ If specified, all the fields will be changed to the
+ provided byte-order. Otherwise, the default byte-order is
+ used. For all available string specifiers, see `dtype.newbyteorder`.
+
+ See Also
+ --------
+ dtype, typename, sctype2char
+
+ Examples
+ --------
+ >>> np.format_parser(['<f8', '<i4', '<a5'], ['col1', 'col2', 'col3'],
+ ... ['T1', 'T2', 'T3']).dtype
+ dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'), (('T3', 'col3'), 'S5')])
+
+ `names` and/or `titles` can be empty lists. If `titles` is an empty list,
+ titles will simply not appear. If `names` is empty, default field names
+ will be used.
+
+ >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
+ ... []).dtype
+ dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '<S5')])
+ >>> np.format_parser(['<f8', '<i4', '<a5'], [], []).dtype
+ dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', 'S5')])
+
+ """
+
+ def __init__(self, formats, names, titles, aligned=False, byteorder=None):
+ self._parseFormats(formats, aligned)
+ self._setfieldnames(names, titles)
+ self._createdtype(byteorder)
+
+ def _parseFormats(self, formats, aligned=False):
+ """ Parse the field formats """
+
+ if formats is None:
+ raise ValueError("Need formats argument")
+ if isinstance(formats, list):
+ dtype = sb.dtype(
+ [('f{}'.format(i), format_) for i, format_ in enumerate(formats)],
+ aligned,
+ )
+ else:
+ dtype = sb.dtype(formats, aligned)
+ fields = dtype.fields
+ if fields is None:
+ dtype = sb.dtype([('f1', dtype)], aligned)
+ fields = dtype.fields
+ keys = dtype.names
+ self._f_formats = [fields[key][0] for key in keys]
+ self._offsets = [fields[key][1] for key in keys]
+ self._nfields = len(keys)
+
+ def _setfieldnames(self, names, titles):
+ """convert input field names into a list and assign to the _names
+ attribute """
+
+ if names:
+ if type(names) in [list, tuple]:
+ pass
+ elif isinstance(names, str):
+ names = names.split(',')
+ else:
+ raise NameError("illegal input names %s" % repr(names))
+
+ self._names = [n.strip() for n in names[:self._nfields]]
+ else:
+ self._names = []
+
+ # if the names are not specified, they will be assigned as
+ # "f0, f1, f2,..."
+ # if not enough names are specified, they will be assigned as "f[n],
+ # f[n+1],..." etc. where n is the number of specified names..."
+ self._names += ['f%d' % i for i in range(len(self._names),
+ self._nfields)]
+ # check for redundant names
+ _dup = find_duplicate(self._names)
+ if _dup:
+ raise ValueError("Duplicate field names: %s" % _dup)
+
+ if titles:
+ self._titles = [n.strip() for n in titles[:self._nfields]]
+ else:
+ self._titles = []
+ titles = []
+
+ if self._nfields > len(titles):
+ self._titles += [None] * (self._nfields - len(titles))
+
+ def _createdtype(self, byteorder):
+ dtype = sb.dtype({
+ 'names': self._names,
+ 'formats': self._f_formats,
+ 'offsets': self._offsets,
+ 'titles': self._titles,
+ })
+ if byteorder is not None:
+ byteorder = _byteorderconv[byteorder[0]]
+ dtype = dtype.newbyteorder(byteorder)
+
+ self.dtype = dtype
+
+
+class record(nt.void):
+ """A data-type scalar that allows field access as attribute lookup.
+ """
+
+ # manually set name and module so that this class's type shows up
+ # as numpy.record when printed
+ __name__ = 'record'
+ __module__ = 'numpy'
+
+ def __repr__(self):
+ if _get_legacy_print_mode() <= 113:
+ return self.__str__()
+ return super().__repr__()
+
+ def __str__(self):
+ if _get_legacy_print_mode() <= 113:
+ return str(self.item())
+ return super().__str__()
+
+ def __getattribute__(self, attr):
+ if attr in ('setfield', 'getfield', 'dtype'):
+ return nt.void.__getattribute__(self, attr)
+ try:
+ return nt.void.__getattribute__(self, attr)
+ except AttributeError:
+ pass
+ fielddict = nt.void.__getattribute__(self, 'dtype').fields
+ res = fielddict.get(attr, None)
+ if res:
+ obj = self.getfield(*res[:2])
+ # if it has fields return a record,
+ # otherwise return the object
+ try:
+ dt = obj.dtype
+ except AttributeError:
+ #happens if field is Object type
+ return obj
+ if dt.names is not None:
+ return obj.view((self.__class__, obj.dtype))
+ return obj
+ else:
+ raise AttributeError("'record' object has no "
+ "attribute '%s'" % attr)
+
+ def __setattr__(self, attr, val):
+ if attr in ('setfield', 'getfield', 'dtype'):
+ raise AttributeError("Cannot set '%s' attribute" % attr)
+ fielddict = nt.void.__getattribute__(self, 'dtype').fields
+ res = fielddict.get(attr, None)
+ if res:
+ return self.setfield(val, *res[:2])
+ else:
+ if getattr(self, attr, None):
+ return nt.void.__setattr__(self, attr, val)
+ else:
+ raise AttributeError("'record' object has no "
+ "attribute '%s'" % attr)
+
+ def __getitem__(self, indx):
+ obj = nt.void.__getitem__(self, indx)
+
+ # copy behavior of record.__getattribute__,
+ if isinstance(obj, nt.void) and obj.dtype.names is not None:
+ return obj.view((self.__class__, obj.dtype))
+ else:
+ # return a single element
+ return obj
+
+ def pprint(self):
+ """Pretty-print all fields."""
+ # pretty-print all fields
+ names = self.dtype.names
+ maxlen = max(len(name) for name in names)
+ fmt = '%% %ds: %%s' % maxlen
+ rows = [fmt % (name, getattr(self, name)) for name in names]
+ return "\n".join(rows)
+
+# The recarray is almost identical to a standard array (which supports
+# named fields already) The biggest difference is that it can use
+# attribute-lookup to find the fields and it is constructed using
+# a record.
+
+# If byteorder is given it forces a particular byteorder on all
+# the fields (and any subfields)
+
+class recarray(ndarray):
+ """Construct an ndarray that allows field access using attributes.
+
+ Arrays may have a data-types containing fields, analogous
+ to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,
+ where each entry in the array is a pair of ``(int, float)``. Normally,
+ these attributes are accessed using dictionary lookups such as ``arr['x']``
+ and ``arr['y']``. Record arrays allow the fields to be accessed as members
+ of the array, using ``arr.x`` and ``arr.y``.
+
+ Parameters
+ ----------
+ shape : tuple
+ Shape of output array.
+ dtype : data-type, optional
+ The desired data-type. By default, the data-type is determined
+ from `formats`, `names`, `titles`, `aligned` and `byteorder`.
+ formats : list of data-types, optional
+ A list containing the data-types for the different columns, e.g.
+ ``['i4', 'f8', 'i4']``. `formats` does *not* support the new
+ convention of using types directly, i.e. ``(int, float, int)``.
+ Note that `formats` must be a list, not a tuple.
+ Given that `formats` is somewhat limited, we recommend specifying
+ `dtype` instead.
+ names : tuple of str, optional
+ The name of each column, e.g. ``('x', 'y', 'z')``.
+ buf : buffer, optional
+ By default, a new array is created of the given shape and data-type.
+ If `buf` is specified and is an object exposing the buffer interface,
+ the array will use the memory from the existing buffer. In this case,
+ the `offset` and `strides` keywords are available.
+
+ Other Parameters
+ ----------------
+ titles : tuple of str, optional
+ Aliases for column names. For example, if `names` were
+ ``('x', 'y', 'z')`` and `titles` is
+ ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then
+ ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.
+ byteorder : {'<', '>', '='}, optional
+ Byte-order for all fields.
+ aligned : bool, optional
+ Align the fields in memory as the C-compiler would.
+ strides : tuple of ints, optional
+ Buffer (`buf`) is interpreted according to these strides (strides
+ define how many bytes each array element, row, column, etc.
+ occupy in memory).
+ offset : int, optional
+ Start reading buffer (`buf`) from this offset onwards.
+ order : {'C', 'F'}, optional
+ Row-major (C-style) or column-major (Fortran-style) order.
+
+ Returns
+ -------
+ rec : recarray
+ Empty array of the given shape and type.
+
+ See Also
+ --------
+ core.records.fromrecords : Construct a record array from data.
+ record : fundamental data-type for `recarray`.
+ format_parser : determine a data-type from formats, names, titles.
+
+ Notes
+ -----
+ This constructor can be compared to ``empty``: it creates a new record
+ array but does not fill it with data. To create a record array from data,
+ use one of the following methods:
+
+ 1. Create a standard ndarray and convert it to a record array,
+ using ``arr.view(np.recarray)``
+ 2. Use the `buf` keyword.
+ 3. Use `np.rec.fromrecords`.
+
+ Examples
+ --------
+ Create an array with two fields, ``x`` and ``y``:
+
+ >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '<f8'), ('y', '<i8')])
+ >>> x
+ array([(1., 2), (3., 4)], dtype=[('x', '<f8'), ('y', '<i8')])
+
+ >>> x['x']
+ array([1., 3.])
+
+ View the array as a record array:
+
+ >>> x = x.view(np.recarray)
+
+ >>> x.x
+ array([1., 3.])
+
+ >>> x.y
+ array([2, 4])
+
+ Create a new, empty record array:
+
+ >>> np.recarray((2,),
+ ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP
+ rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),
+ (3471280, 1.2134086255804012e-316, 0)],
+ dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])
+
+ """
+
+ # manually set name and module so that this class's type shows
+ # up as "numpy.recarray" when printed
+ __name__ = 'recarray'
+ __module__ = 'numpy'
+
+ def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
+ formats=None, names=None, titles=None,
+ byteorder=None, aligned=False, order='C'):
+
+ if dtype is not None:
+ descr = sb.dtype(dtype)
+ else:
+ descr = format_parser(formats, names, titles, aligned, byteorder).dtype
+
+ if buf is None:
+ self = ndarray.__new__(subtype, shape, (record, descr), order=order)
+ else:
+ self = ndarray.__new__(subtype, shape, (record, descr),
+ buffer=buf, offset=offset,
+ strides=strides, order=order)
+ return self
+
+ def __array_finalize__(self, obj):
+ if self.dtype.type is not record and self.dtype.names is not None:
+ # if self.dtype is not np.record, invoke __setattr__ which will
+ # convert it to a record if it is a void dtype.
+ self.dtype = self.dtype
+
+ def __getattribute__(self, attr):
+ # See if ndarray has this attr, and return it if so. (note that this
+ # means a field with the same name as an ndarray attr cannot be
+ # accessed by attribute).
+ try:
+ return object.__getattribute__(self, attr)
+ except AttributeError: # attr must be a fieldname
+ pass
+
+ # look for a field with this name
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields
+ try:
+ res = fielddict[attr][:2]
+ except (TypeError, KeyError) as e:
+ raise AttributeError("recarray has no attribute %s" % attr) from e
+ obj = self.getfield(*res)
+
+ # At this point obj will always be a recarray, since (see
+ # PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is
+ # non-structured, convert it to an ndarray. Then if obj is structured
+ # with void type convert it to the same dtype.type (eg to preserve
+ # numpy.record type if present), since nested structured fields do not
+ # inherit type. Don't do this for non-void structures though.
+ if obj.dtype.names is not None:
+ if issubclass(obj.dtype.type, nt.void):
+ return obj.view(dtype=(self.dtype.type, obj.dtype))
+ return obj
+ else:
+ return obj.view(ndarray)
+
+ # Save the dictionary.
+ # If the attr is a field name and not in the saved dictionary
+ # Undo any "setting" of the attribute and do a setfield
+ # Thus, you can't create attributes on-the-fly that are field names.
+ def __setattr__(self, attr, val):
+
+ # Automatically convert (void) structured types to records
+ # (but not non-void structures, subarrays, or non-structured voids)
+ if attr == 'dtype' and issubclass(val.type, nt.void) and val.names is not None:
+ val = sb.dtype((record, val))
+
+ newattr = attr not in self.__dict__
+ try:
+ ret = object.__setattr__(self, attr, val)
+ except Exception:
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
+ if attr not in fielddict:
+ raise
+ else:
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
+ if attr not in fielddict:
+ return ret
+ if newattr:
+ # We just added this one or this setattr worked on an
+ # internal attribute.
+ try:
+ object.__delattr__(self, attr)
+ except Exception:
+ return ret
+ try:
+ res = fielddict[attr][:2]
+ except (TypeError, KeyError) as e:
+ raise AttributeError(
+ "record array has no attribute %s" % attr
+ ) from e
+ return self.setfield(val, *res)
+
+ def __getitem__(self, indx):
+ obj = super().__getitem__(indx)
+
+ # copy behavior of getattr, except that here
+ # we might also be returning a single element
+ if isinstance(obj, ndarray):
+ if obj.dtype.names is not None:
+ obj = obj.view(type(self))
+ if issubclass(obj.dtype.type, nt.void):
+ return obj.view(dtype=(self.dtype.type, obj.dtype))
+ return obj
+ else:
+ return obj.view(type=ndarray)
+ else:
+ # return a single element
+ return obj
+
+ def __repr__(self):
+
+ repr_dtype = self.dtype
+ if self.dtype.type is record or not issubclass(self.dtype.type, nt.void):
+ # If this is a full record array (has numpy.record dtype),
+ # or if it has a scalar (non-void) dtype with no records,
+ # represent it using the rec.array function. Since rec.array
+ # converts dtype to a numpy.record for us, convert back
+ # to non-record before printing
+ if repr_dtype.type is record:
+ repr_dtype = sb.dtype((nt.void, repr_dtype))
+ prefix = "rec.array("
+ fmt = 'rec.array(%s,%sdtype=%s)'
+ else:
+ # otherwise represent it using np.array plus a view
+ # This should only happen if the user is playing
+ # strange games with dtypes.
+ prefix = "array("
+ fmt = 'array(%s,%sdtype=%s).view(numpy.recarray)'
+
+ # get data/shape string. logic taken from numeric.array_repr
+ if self.size > 0 or self.shape == (0,):
+ lst = sb.array2string(
+ self, separator=', ', prefix=prefix, suffix=',')
+ else:
+ # show zero-length shape unless it is (0,)
+ lst = "[], shape=%s" % (repr(self.shape),)
+
+ lf = '\n'+' '*len(prefix)
+ if _get_legacy_print_mode() <= 113:
+ lf = ' ' + lf # trailing space
+ return fmt % (lst, lf, repr_dtype)
+
+ def field(self, attr, val=None):
+ if isinstance(attr, int):
+ names = ndarray.__getattribute__(self, 'dtype').names
+ attr = names[attr]
+
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields
+
+ res = fielddict[attr][:2]
+
+ if val is None:
+ obj = self.getfield(*res)
+ if obj.dtype.names is not None:
+ return obj
+ return obj.view(ndarray)
+ else:
+ return self.setfield(val, *res)
+
+
+def _deprecate_shape_0_as_None(shape):
+ if shape == 0:
+ warnings.warn(
+ "Passing `shape=0` to have the shape be inferred is deprecated, "
+ "and in future will be equivalent to `shape=(0,)`. To infer "
+ "the shape and suppress this warning, pass `shape=None` instead.",
+ FutureWarning, stacklevel=3)
+ return None
+ else:
+ return shape
+
+
+@set_module("numpy.rec")
+def fromarrays(arrayList, dtype=None, shape=None, formats=None,
+ names=None, titles=None, aligned=False, byteorder=None):
+ """Create a record array from a (flat) list of arrays
+
+ Parameters
+ ----------
+ arrayList : list or tuple
+ List of array-like objects (such as lists, tuples,
+ and ndarrays).
+ dtype : data-type, optional
+ valid dtype for all arrays
+ shape : int or tuple of ints, optional
+ Shape of the resulting array. If not provided, inferred from
+ ``arrayList[0]``.
+ formats, names, titles, aligned, byteorder :
+ If `dtype` is ``None``, these arguments are passed to
+ `numpy.format_parser` to construct a dtype. See that function for
+ detailed documentation.
+
+ Returns
+ -------
+ np.recarray
+ Record array consisting of given arrayList columns.
+
+ Examples
+ --------
+ >>> x1=np.array([1,2,3,4])
+ >>> x2=np.array(['a','dd','xyz','12'])
+ >>> x3=np.array([1.1,2,3,4])
+ >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
+ >>> print(r[1])
+ (2, 'dd', 2.0) # may vary
+ >>> x1[1]=34
+ >>> r.a
+ array([1, 2, 3, 4])
+
+ >>> x1 = np.array([1, 2, 3, 4])
+ >>> x2 = np.array(['a', 'dd', 'xyz', '12'])
+ >>> x3 = np.array([1.1, 2, 3,4])
+ >>> r = np.core.records.fromarrays(
+ ... [x1, x2, x3],
+ ... dtype=np.dtype([('a', np.int32), ('b', 'S3'), ('c', np.float32)]))
+ >>> r
+ rec.array([(1, b'a', 1.1), (2, b'dd', 2. ), (3, b'xyz', 3. ),
+ (4, b'12', 4. )],
+ dtype=[('a', '<i4'), ('b', 'S3'), ('c', '<f4')])
+ """
+
+ arrayList = [sb.asarray(x) for x in arrayList]
+
+ # NumPy 1.19.0, 2020-01-01
+ shape = _deprecate_shape_0_as_None(shape)
+
+ if shape is None:
+ shape = arrayList[0].shape
+ elif isinstance(shape, int):
+ shape = (shape,)
+
+ if formats is None and dtype is None:
+ # go through each object in the list to see if it is an ndarray
+ # and determine the formats.
+ formats = [obj.dtype for obj in arrayList]
+
+ if dtype is not None:
+ descr = sb.dtype(dtype)
+ else:
+ descr = format_parser(formats, names, titles, aligned, byteorder).dtype
+ _names = descr.names
+
+ # Determine shape from data-type.
+ if len(descr) != len(arrayList):
+ raise ValueError("mismatch between the number of fields "
+ "and the number of arrays")
+
+ d0 = descr[0].shape
+ nn = len(d0)
+ if nn > 0:
+ shape = shape[:-nn]
+
+ _array = recarray(shape, descr)
+
+ # populate the record array (makes a copy)
+ for k, obj in enumerate(arrayList):
+ nn = descr[k].ndim
+ testshape = obj.shape[:obj.ndim - nn]
+ name = _names[k]
+ if testshape != shape:
+ raise ValueError(f'array-shape mismatch in array {k} ("{name}")')
+
+ _array[name] = obj
+
+ return _array
+
+
+@set_module("numpy.rec")
+def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
+ titles=None, aligned=False, byteorder=None):
+ """Create a recarray from a list of records in text form.
+
+ Parameters
+ ----------
+ recList : sequence
+ data in the same field may be heterogeneous - they will be promoted
+ to the highest data type.
+ dtype : data-type, optional
+ valid dtype for all arrays
+ shape : int or tuple of ints, optional
+ shape of each array.
+ formats, names, titles, aligned, byteorder :
+ If `dtype` is ``None``, these arguments are passed to
+ `numpy.format_parser` to construct a dtype. See that function for
+ detailed documentation.
+
+ If both `formats` and `dtype` are None, then this will auto-detect
+ formats. Use list of tuples rather than list of lists for faster
+ processing.
+
+ Returns
+ -------
+ np.recarray
+ record array consisting of given recList rows.
+
+ Examples
+ --------
+ >>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
+ ... names='col1,col2,col3')
+ >>> print(r[0])
+ (456, 'dbe', 1.2)
+ >>> r.col1
+ array([456, 2])
+ >>> r.col2
+ array(['dbe', 'de'], dtype='<U3')
+ >>> import pickle
+ >>> pickle.loads(pickle.dumps(r))
+ rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)],
+ dtype=[('col1', '<i8'), ('col2', '<U3'), ('col3', '<f8')])
+ """
+
+ if formats is None and dtype is None: # slower
+ obj = sb.array(recList, dtype=object)
+ arrlist = [sb.array(obj[..., i].tolist()) for i in range(obj.shape[-1])]
+ return fromarrays(arrlist, formats=formats, shape=shape, names=names,
+ titles=titles, aligned=aligned, byteorder=byteorder)
+
+ if dtype is not None:
+ descr = sb.dtype((record, dtype))
+ else:
+ descr = format_parser(formats, names, titles, aligned, byteorder).dtype
+
+ try:
+ retval = sb.array(recList, dtype=descr)
+ except (TypeError, ValueError):
+ # NumPy 1.19.0, 2020-01-01
+ shape = _deprecate_shape_0_as_None(shape)
+ if shape is None:
+ shape = len(recList)
+ if isinstance(shape, int):
+ shape = (shape,)
+ if len(shape) > 1:
+ raise ValueError("Can only deal with 1-d array.")
+ _array = recarray(shape, descr)
+ for k in range(_array.size):
+ _array[k] = tuple(recList[k])
+ # list of lists instead of list of tuples ?
+ # 2018-02-07, 1.14.1
+ warnings.warn(
+ "fromrecords expected a list of tuples, may have received a list "
+ "of lists instead. In the future that will raise an error",
+ FutureWarning, stacklevel=2)
+ return _array
+ else:
+ if shape is not None and retval.shape != shape:
+ retval.shape = shape
+
+ res = retval.view(recarray)
+
+ return res
+
+
+@set_module("numpy.rec")
+def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
+ names=None, titles=None, aligned=False, byteorder=None):
+ r"""Create a record array from binary data
+
+ Note that despite the name of this function it does not accept `str`
+ instances.
+
+ Parameters
+ ----------
+ datastring : bytes-like
+ Buffer of binary data
+ dtype : data-type, optional
+ Valid dtype for all arrays
+ shape : int or tuple of ints, optional
+ Shape of each array.
+ offset : int, optional
+ Position in the buffer to start reading from.
+ formats, names, titles, aligned, byteorder :
+ If `dtype` is ``None``, these arguments are passed to
+ `numpy.format_parser` to construct a dtype. See that function for
+ detailed documentation.
+
+
+ Returns
+ -------
+ np.recarray
+ Record array view into the data in datastring. This will be readonly
+ if `datastring` is readonly.
+
+ See Also
+ --------
+ numpy.frombuffer
+
+ Examples
+ --------
+ >>> a = b'\x01\x02\x03abc'
+ >>> np.core.records.fromstring(a, dtype='u1,u1,u1,S3')
+ rec.array([(1, 2, 3, b'abc')],
+ dtype=[('f0', 'u1'), ('f1', 'u1'), ('f2', 'u1'), ('f3', 'S3')])
+
+ >>> grades_dtype = [('Name', (np.str_, 10)), ('Marks', np.float64),
+ ... ('GradeLevel', np.int32)]
+ >>> grades_array = np.array([('Sam', 33.3, 3), ('Mike', 44.4, 5),
+ ... ('Aadi', 66.6, 6)], dtype=grades_dtype)
+ >>> np.core.records.fromstring(grades_array.tobytes(), dtype=grades_dtype)
+ rec.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), ('Aadi', 66.6, 6)],
+ dtype=[('Name', '<U10'), ('Marks', '<f8'), ('GradeLevel', '<i4')])
+
+ >>> s = '\x01\x02\x03abc'
+ >>> np.core.records.fromstring(s, dtype='u1,u1,u1,S3')
+ Traceback (most recent call last)
+ ...
+ TypeError: a bytes-like object is required, not 'str'
+ """
+
+ if dtype is None and formats is None:
+ raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")
+
+ if dtype is not None:
+ descr = sb.dtype(dtype)
+ else:
+ descr = format_parser(formats, names, titles, aligned, byteorder).dtype
+
+ itemsize = descr.itemsize
+
+ # NumPy 1.19.0, 2020-01-01
+ shape = _deprecate_shape_0_as_None(shape)
+
+ if shape in (None, -1):
+ shape = (len(datastring) - offset) // itemsize
+
+ _array = recarray(shape, descr, buf=datastring, offset=offset)
+ return _array
+
+def get_remaining_size(fd):
+ pos = fd.tell()
+ try:
+ fd.seek(0, 2)
+ return fd.tell() - pos
+ finally:
+ fd.seek(pos, 0)
+
+
+@set_module("numpy.rec")
+def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
+ names=None, titles=None, aligned=False, byteorder=None):
+ """Create an array from binary file data
+
+ Parameters
+ ----------
+ fd : str or file type
+ If file is a string or a path-like object then that file is opened,
+ else it is assumed to be a file object. The file object must
+ support random access (i.e. it must have tell and seek methods).
+ dtype : data-type, optional
+ valid dtype for all arrays
+ shape : int or tuple of ints, optional
+ shape of each array.
+ offset : int, optional
+ Position in the file to start reading from.
+ formats, names, titles, aligned, byteorder :
+ If `dtype` is ``None``, these arguments are passed to
+ `numpy.format_parser` to construct a dtype. See that function for
+ detailed documentation
+
+ Returns
+ -------
+ np.recarray
+ record array consisting of data enclosed in file.
+
+ Examples
+ --------
+ >>> from tempfile import TemporaryFile
+ >>> a = np.empty(10,dtype='f8,i4,a5')
+ >>> a[5] = (0.5,10,'abcde')
+ >>>
+ >>> fd=TemporaryFile()
+ >>> a = a.newbyteorder('<')
+ >>> a.tofile(fd)
+ >>>
+ >>> _ = fd.seek(0)
+ >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
+ ... byteorder='<')
+ >>> print(r[5])
+ (0.5, 10, 'abcde')
+ >>> r.shape
+ (10,)
+ """
+
+ if dtype is None and formats is None:
+ raise TypeError("fromfile() needs a 'dtype' or 'formats' argument")
+
+ # NumPy 1.19.0, 2020-01-01
+ shape = _deprecate_shape_0_as_None(shape)
+
+ if shape is None:
+ shape = (-1,)
+ elif isinstance(shape, int):
+ shape = (shape,)
+
+ if hasattr(fd, 'readinto'):
+ # GH issue 2504. fd supports io.RawIOBase or io.BufferedIOBase interface.
+ # Example of fd: gzip, BytesIO, BufferedReader
+ # file already opened
+ ctx = nullcontext(fd)
+ else:
+ # open file
+ ctx = open(os_fspath(fd), 'rb')
+
+ with ctx as fd:
+ if offset > 0:
+ fd.seek(offset, 1)
+ size = get_remaining_size(fd)
+
+ if dtype is not None:
+ descr = sb.dtype(dtype)
+ else:
+ descr = format_parser(formats, names, titles, aligned, byteorder).dtype
+
+ itemsize = descr.itemsize
+
+ shapeprod = sb.array(shape).prod(dtype=nt.intp)
+ shapesize = shapeprod * itemsize
+ if shapesize < 0:
+ shape = list(shape)
+ shape[shape.index(-1)] = size // -shapesize
+ shape = tuple(shape)
+ shapeprod = sb.array(shape).prod(dtype=nt.intp)
+
+ nbytes = shapeprod * itemsize
+
+ if nbytes > size:
+ raise ValueError(
+ "Not enough bytes left in file for specified shape and type")
+
+ # create the array
+ _array = recarray(shape, descr)
+ nbytesread = fd.readinto(_array.data)
+ if nbytesread != nbytes:
+ raise OSError("Didn't read as many bytes as expected")
+
+ return _array
+
+
+@set_module("numpy.rec")
+def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
+ names=None, titles=None, aligned=False, byteorder=None, copy=True):
+ """
+ Construct a record array from a wide-variety of objects.
+
+ A general-purpose record array constructor that dispatches to the
+ appropriate `recarray` creation function based on the inputs (see Notes).
+
+ Parameters
+ ----------
+ obj : any
+ Input object. See Notes for details on how various input types are
+ treated.
+ dtype : data-type, optional
+ Valid dtype for array.
+ shape : int or tuple of ints, optional
+ Shape of each array.
+ offset : int, optional
+ Position in the file or buffer to start reading from.
+ strides : tuple of ints, optional
+ Buffer (`buf`) is interpreted according to these strides (strides
+ define how many bytes each array element, row, column, etc.
+ occupy in memory).
+ formats, names, titles, aligned, byteorder :
+ If `dtype` is ``None``, these arguments are passed to
+ `numpy.format_parser` to construct a dtype. See that function for
+ detailed documentation.
+ copy : bool, optional
+ Whether to copy the input object (True), or to use a reference instead.
+ This option only applies when the input is an ndarray or recarray.
+ Defaults to True.
+
+ Returns
+ -------
+ np.recarray
+ Record array created from the specified object.
+
+ Notes
+ -----
+ If `obj` is ``None``, then call the `~numpy.recarray` constructor. If
+ `obj` is a string, then call the `fromstring` constructor. If `obj` is a
+ list or a tuple, then if the first object is an `~numpy.ndarray`, call
+ `fromarrays`, otherwise call `fromrecords`. If `obj` is a
+ `~numpy.recarray`, then make a copy of the data in the recarray
+ (if ``copy=True``) and use the new formats, names, and titles. If `obj`
+ is a file, then call `fromfile`. Finally, if obj is an `ndarray`, then
+ return ``obj.view(recarray)``, making a copy of the data if ``copy=True``.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+
+ >>> np.core.records.array(a)
+ rec.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]],
+ dtype=int32)
+
+ >>> b = [(1, 1), (2, 4), (3, 9)]
+ >>> c = np.core.records.array(b, formats = ['i2', 'f2'], names = ('x', 'y'))
+ >>> c
+ rec.array([(1, 1.0), (2, 4.0), (3, 9.0)],
+ dtype=[('x', '<i2'), ('y', '<f2')])
+
+ >>> c.x
+ rec.array([1, 2, 3], dtype=int16)
+
+ >>> c.y
+ rec.array([ 1.0, 4.0, 9.0], dtype=float16)
+
+ >>> r = np.rec.array(['abc','def'], names=['col1','col2'])
+ >>> print(r.col1)
+ abc
+
+ >>> r.col1
+ array('abc', dtype='<U3')
+
+ >>> r.col2
+ array('def', dtype='<U3')
+ """
+
+ if ((isinstance(obj, (type(None), str)) or hasattr(obj, 'readinto')) and
+ formats is None and dtype is None):
+ raise ValueError("Must define formats (or dtype) if object is "
+ "None, string, or an open file")
+
+ kwds = {}
+ if dtype is not None:
+ dtype = sb.dtype(dtype)
+ elif formats is not None:
+ dtype = format_parser(formats, names, titles,
+ aligned, byteorder).dtype
+ else:
+ kwds = {'formats': formats,
+ 'names': names,
+ 'titles': titles,
+ 'aligned': aligned,
+ 'byteorder': byteorder
+ }
+
+ if obj is None:
+ if shape is None:
+ raise ValueError("Must define a shape if obj is None")
+ return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
+
+ elif isinstance(obj, bytes):
+ return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
+
+ elif isinstance(obj, (list, tuple)):
+ if isinstance(obj[0], (tuple, list)):
+ return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
+ else:
+ return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
+
+ elif isinstance(obj, recarray):
+ if dtype is not None and (obj.dtype != dtype):
+ new = obj.view(dtype)
+ else:
+ new = obj
+ if copy:
+ new = new.copy()
+ return new
+
+ elif hasattr(obj, 'readinto'):
+ return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
+
+ elif isinstance(obj, ndarray):
+ if dtype is not None and (obj.dtype != dtype):
+ new = obj.view(dtype)
+ else:
+ new = obj
+ if copy:
+ new = new.copy()
+ return new.view(recarray)
+
+ else:
+ interface = getattr(obj, "__array_interface__", None)
+ if interface is None or not isinstance(interface, dict):
+ raise ValueError("Unknown input type")
+ obj = sb.array(obj)
+ if dtype is not None and (obj.dtype != dtype):
+ obj = obj.view(dtype)
+ return obj.view(recarray)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/records.pyi b/venv/lib/python3.9/site-packages/numpy/core/records.pyi
new file mode 100644
index 00000000..d3bbe0e7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/records.pyi
@@ -0,0 +1,234 @@
+import os
+from collections.abc import Sequence, Iterable
+from typing import (
+ Any,
+ TypeVar,
+ overload,
+ Protocol,
+)
+
+from numpy import (
+ format_parser as format_parser,
+ record as record,
+ recarray as recarray,
+ dtype,
+ generic,
+ void,
+ _ByteOrder,
+ _SupportsBuffer,
+)
+
+from numpy._typing import (
+ ArrayLike,
+ DTypeLike,
+ NDArray,
+ _ShapeLike,
+ _ArrayLikeVoid_co,
+ _NestedSequence,
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+
+_RecArray = recarray[Any, dtype[_SCT]]
+
+class _SupportsReadInto(Protocol):
+ def seek(self, offset: int, whence: int, /) -> object: ...
+ def tell(self, /) -> int: ...
+ def readinto(self, buffer: memoryview, /) -> int: ...
+
+__all__: list[str]
+
+@overload
+def fromarrays(
+ arrayList: Iterable[ArrayLike],
+ dtype: DTypeLike = ...,
+ shape: None | _ShapeLike = ...,
+ formats: None = ...,
+ names: None = ...,
+ titles: None = ...,
+ aligned: bool = ...,
+ byteorder: None = ...,
+) -> _RecArray[Any]: ...
+@overload
+def fromarrays(
+ arrayList: Iterable[ArrayLike],
+ dtype: None = ...,
+ shape: None | _ShapeLike = ...,
+ *,
+ formats: DTypeLike,
+ names: None | str | Sequence[str] = ...,
+ titles: None | str | Sequence[str] = ...,
+ aligned: bool = ...,
+ byteorder: None | _ByteOrder = ...,
+) -> _RecArray[record]: ...
+
+@overload
+def fromrecords(
+ recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]],
+ dtype: DTypeLike = ...,
+ shape: None | _ShapeLike = ...,
+ formats: None = ...,
+ names: None = ...,
+ titles: None = ...,
+ aligned: bool = ...,
+ byteorder: None = ...,
+) -> _RecArray[record]: ...
+@overload
+def fromrecords(
+ recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]],
+ dtype: None = ...,
+ shape: None | _ShapeLike = ...,
+ *,
+ formats: DTypeLike,
+ names: None | str | Sequence[str] = ...,
+ titles: None | str | Sequence[str] = ...,
+ aligned: bool = ...,
+ byteorder: None | _ByteOrder = ...,
+) -> _RecArray[record]: ...
+
+@overload
+def fromstring(
+ datastring: _SupportsBuffer,
+ dtype: DTypeLike,
+ shape: None | _ShapeLike = ...,
+ offset: int = ...,
+ formats: None = ...,
+ names: None = ...,
+ titles: None = ...,
+ aligned: bool = ...,
+ byteorder: None = ...,
+) -> _RecArray[record]: ...
+@overload
+def fromstring(
+ datastring: _SupportsBuffer,
+ dtype: None = ...,
+ shape: None | _ShapeLike = ...,
+ offset: int = ...,
+ *,
+ formats: DTypeLike,
+ names: None | str | Sequence[str] = ...,
+ titles: None | str | Sequence[str] = ...,
+ aligned: bool = ...,
+ byteorder: None | _ByteOrder = ...,
+) -> _RecArray[record]: ...
+
+@overload
+def fromfile(
+ fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto,
+ dtype: DTypeLike,
+ shape: None | _ShapeLike = ...,
+ offset: int = ...,
+ formats: None = ...,
+ names: None = ...,
+ titles: None = ...,
+ aligned: bool = ...,
+ byteorder: None = ...,
+) -> _RecArray[Any]: ...
+@overload
+def fromfile(
+ fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto,
+ dtype: None = ...,
+ shape: None | _ShapeLike = ...,
+ offset: int = ...,
+ *,
+ formats: DTypeLike,
+ names: None | str | Sequence[str] = ...,
+ titles: None | str | Sequence[str] = ...,
+ aligned: bool = ...,
+ byteorder: None | _ByteOrder = ...,
+) -> _RecArray[record]: ...
+
+@overload
+def array(
+ obj: _SCT | NDArray[_SCT],
+ dtype: None = ...,
+ shape: None | _ShapeLike = ...,
+ offset: int = ...,
+ formats: None = ...,
+ names: None = ...,
+ titles: None = ...,
+ aligned: bool = ...,
+ byteorder: None = ...,
+ copy: bool = ...,
+) -> _RecArray[_SCT]: ...
+@overload
+def array(
+ obj: ArrayLike,
+ dtype: DTypeLike,
+ shape: None | _ShapeLike = ...,
+ offset: int = ...,
+ formats: None = ...,
+ names: None = ...,
+ titles: None = ...,
+ aligned: bool = ...,
+ byteorder: None = ...,
+ copy: bool = ...,
+) -> _RecArray[Any]: ...
+@overload
+def array(
+ obj: ArrayLike,
+ dtype: None = ...,
+ shape: None | _ShapeLike = ...,
+ offset: int = ...,
+ *,
+ formats: DTypeLike,
+ names: None | str | Sequence[str] = ...,
+ titles: None | str | Sequence[str] = ...,
+ aligned: bool = ...,
+ byteorder: None | _ByteOrder = ...,
+ copy: bool = ...,
+) -> _RecArray[record]: ...
+@overload
+def array(
+ obj: None,
+ dtype: DTypeLike,
+ shape: _ShapeLike,
+ offset: int = ...,
+ formats: None = ...,
+ names: None = ...,
+ titles: None = ...,
+ aligned: bool = ...,
+ byteorder: None = ...,
+ copy: bool = ...,
+) -> _RecArray[Any]: ...
+@overload
+def array(
+ obj: None,
+ dtype: None = ...,
+ *,
+ shape: _ShapeLike,
+ offset: int = ...,
+ formats: DTypeLike,
+ names: None | str | Sequence[str] = ...,
+ titles: None | str | Sequence[str] = ...,
+ aligned: bool = ...,
+ byteorder: None | _ByteOrder = ...,
+ copy: bool = ...,
+) -> _RecArray[record]: ...
+@overload
+def array(
+ obj: _SupportsReadInto,
+ dtype: DTypeLike,
+ shape: None | _ShapeLike = ...,
+ offset: int = ...,
+ formats: None = ...,
+ names: None = ...,
+ titles: None = ...,
+ aligned: bool = ...,
+ byteorder: None = ...,
+ copy: bool = ...,
+) -> _RecArray[Any]: ...
+@overload
+def array(
+ obj: _SupportsReadInto,
+ dtype: None = ...,
+ shape: None | _ShapeLike = ...,
+ offset: int = ...,
+ *,
+ formats: DTypeLike,
+ names: None | str | Sequence[str] = ...,
+ titles: None | str | Sequence[str] = ...,
+ aligned: bool = ...,
+ byteorder: None | _ByteOrder = ...,
+ copy: bool = ...,
+) -> _RecArray[record]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/core/setup.py b/venv/lib/python3.9/site-packages/numpy/core/setup.py
new file mode 100644
index 00000000..927defa0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/setup.py
@@ -0,0 +1,1197 @@
+import os
+import sys
+import sysconfig
+import pickle
+import copy
+import warnings
+import textwrap
+import glob
+from os.path import join
+
+from numpy.distutils import log
+from numpy.distutils.msvccompiler import lib_opts_if_msvc
+from distutils.dep_util import newer
+from sysconfig import get_config_var
+from numpy.compat import npy_load_module
+from setup_common import * # noqa: F403
+
+# Set to True to enable relaxed strides checking. This (mostly) means
+# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
+NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
+if not NPY_RELAXED_STRIDES_CHECKING:
+ raise SystemError(
+ "Support for NPY_RELAXED_STRIDES_CHECKING=0 has been remove as of "
+ "NumPy 1.23. This error will eventually be removed entirely.")
+
+# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a
+# bogus value for affected strides in order to help smoke out bad stride usage
+# when relaxed stride checking is enabled.
+NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0")
+NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING
+
+# Set NPY_DISABLE_SVML=1 in the environment to disable the vendored SVML
+# library. This option only has significance on a Linux x86_64 host and is most
+# useful to avoid improperly requiring SVML when cross compiling.
+NPY_DISABLE_SVML = (os.environ.get('NPY_DISABLE_SVML', "0") == "1")
+
+# XXX: ugly, we use a class to avoid calling twice some expensive functions in
+# config.h/numpyconfig.h. I don't see a better way because distutils force
+# config.h generation inside an Extension class, and as such sharing
+# configuration information between extensions is not easy.
+# Using a pickled-based memoize does not work because config_cmd is an instance
+# method, which cPickle does not like.
+#
+# Use pickle in all cases, as cPickle is gone in python3 and the difference
+# in time is only in build. -- Charles Harris, 2013-03-30
+
+class CallOnceOnly:
+ def __init__(self):
+ self._check_types = None
+ self._check_ieee_macros = None
+ self._check_complex = None
+
+ def check_types(self, *a, **kw):
+ if self._check_types is None:
+ out = check_types(*a, **kw)
+ self._check_types = pickle.dumps(out)
+ else:
+ out = copy.deepcopy(pickle.loads(self._check_types))
+ return out
+
+ def check_ieee_macros(self, *a, **kw):
+ if self._check_ieee_macros is None:
+ out = check_ieee_macros(*a, **kw)
+ self._check_ieee_macros = pickle.dumps(out)
+ else:
+ out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
+ return out
+
+ def check_complex(self, *a, **kw):
+ if self._check_complex is None:
+ out = check_complex(*a, **kw)
+ self._check_complex = pickle.dumps(out)
+ else:
+ out = copy.deepcopy(pickle.loads(self._check_complex))
+ return out
+
+def can_link_svml():
+ """SVML library is supported only on x86_64 architecture and currently
+ only on linux
+ """
+ if NPY_DISABLE_SVML:
+ return False
+ platform = sysconfig.get_platform()
+ return ("x86_64" in platform
+ and "linux" in platform
+ and sys.maxsize > 2**31)
+
+def check_svml_submodule(svmlpath):
+ if not os.path.exists(svmlpath + "/README.md"):
+ raise RuntimeError("Missing `SVML` submodule! Run `git submodule "
+ "update --init` to fix this.")
+ return True
+
+def pythonlib_dir():
+ """return path where libpython* is."""
+ if sys.platform == 'win32':
+ return os.path.join(sys.prefix, "libs")
+ else:
+ return get_config_var('LIBDIR')
+
+def is_npy_no_signal():
+ """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
+ header."""
+ return sys.platform == 'win32'
+
+def is_npy_no_smp():
+ """Return True if the NPY_NO_SMP symbol must be defined in public
+ header (when SMP support cannot be reliably enabled)."""
+ # Perhaps a fancier check is in order here.
+ # so that threads are only enabled if there
+ # are actually multiple CPUS? -- but
+ # threaded code can be nice even on a single
+ # CPU so that long-calculating code doesn't
+ # block.
+ return 'NPY_NOSMP' in os.environ
+
+def win32_checks(deflist):
+ from numpy.distutils.misc_util import get_build_architecture
+ a = get_build_architecture()
+
+ # Distutils hack on AMD64 on windows
+ print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
+ (a, os.name, sys.platform))
+ if a == 'AMD64':
+ deflist.append('DISTUTILS_USE_SDK')
+
+ # On win32, force long double format string to be 'g', not
+ # 'Lg', since the MS runtime does not support long double whose
+ # size is > sizeof(double)
+ if a == "Intel" or a == "AMD64":
+ deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
+
+def check_math_capabilities(config, ext, moredefs, mathlibs):
+ def check_func(
+ func_name,
+ decl=False,
+ headers=["feature_detection_math.h", "feature_detection_cmath.h"],
+ ):
+ return config.check_func(
+ func_name,
+ libraries=mathlibs,
+ decl=decl,
+ call=True,
+ call_args=FUNC_CALL_ARGS[func_name],
+ headers=headers,
+ )
+
+ def check_funcs_once(
+ funcs_name,
+ headers=["feature_detection_math.h", "feature_detection_cmath.h"],
+ add_to_moredefs=True):
+ call = dict([(f, True) for f in funcs_name])
+ call_args = dict([(f, FUNC_CALL_ARGS[f]) for f in funcs_name])
+ st = config.check_funcs_once(
+ funcs_name,
+ libraries=mathlibs,
+ decl=False,
+ call=call,
+ call_args=call_args,
+ headers=headers,
+ )
+ if st and add_to_moredefs:
+ moredefs.extend([(fname2def(f), 1) for f in funcs_name])
+ return st
+
+ def check_funcs(
+ funcs_name,
+ headers=["feature_detection_math.h", "feature_detection_cmath.h"]):
+ # Use check_funcs_once first, and if it does not work, test func per
+ # func. Return success only if all the functions are available
+ if not check_funcs_once(funcs_name, headers=headers):
+ # Global check failed, check func per func
+ for f in funcs_name:
+ if check_func(f, headers=headers):
+ moredefs.append((fname2def(f), 1))
+ return 0
+ else:
+ return 1
+
+ # GH-14787: Work around GCC<8.4 bug when compiling with AVX512
+ # support on Windows-based platforms
+ def check_gh14787(fn):
+ if fn == 'attribute_target_avx512f':
+ if (sys.platform in ('win32', 'cygwin') and
+ config.check_compiler_gcc() and
+ not config.check_gcc_version_at_least(8, 4)):
+ ext.extra_compile_args.extend(
+ ['-ffixed-xmm%s' % n for n in range(16, 32)])
+
+ #use_msvc = config.check_decl("_MSC_VER")
+ if not check_funcs_once(MANDATORY_FUNCS, add_to_moredefs=False):
+ raise SystemError("One of the required function to build numpy is not"
+ " available (the list is %s)." % str(MANDATORY_FUNCS))
+
+ # Standard functions which may not be available and for which we have a
+ # replacement implementation. Note that some of these are C99 functions.
+
+ # XXX: hack to circumvent cpp pollution from python: python put its
+ # config.h in the public namespace, so we have a clash for the common
+ # functions we test. We remove every function tested by python's
+ # autoconf, hoping their own test are correct
+ for f in OPTIONAL_FUNCS_MAYBE:
+ if config.check_decl(fname2def(f), headers=["Python.h"]):
+ OPTIONAL_FILE_FUNCS.remove(f)
+
+ check_funcs(OPTIONAL_FILE_FUNCS, headers=["feature_detection_stdio.h"])
+ check_funcs(OPTIONAL_MISC_FUNCS, headers=["feature_detection_misc.h"])
+
+ for h in OPTIONAL_HEADERS:
+ if config.check_func("", decl=False, call=False, headers=[h]):
+ h = h.replace(".", "_").replace(os.path.sep, "_")
+ moredefs.append((fname2def(h), 1))
+
+ # Try with both "locale.h" and "xlocale.h"
+ locale_headers = [
+ "stdlib.h",
+ "xlocale.h",
+ "feature_detection_locale.h",
+ ]
+ if not check_funcs(OPTIONAL_LOCALE_FUNCS, headers=locale_headers):
+ # It didn't work with xlocale.h, maybe it will work with locale.h?
+ locale_headers[1] = "locale.h"
+ check_funcs(OPTIONAL_LOCALE_FUNCS, headers=locale_headers)
+
+ for tup in OPTIONAL_INTRINSICS:
+ headers = None
+ if len(tup) == 2:
+ f, args, m = tup[0], tup[1], fname2def(tup[0])
+ elif len(tup) == 3:
+ f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0])
+ else:
+ f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3])
+ if config.check_func(f, decl=False, call=True, call_args=args,
+ headers=headers):
+ moredefs.append((m, 1))
+
+ for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
+ if config.check_gcc_function_attribute(dec, fn):
+ moredefs.append((fname2def(fn), 1))
+ check_gh14787(fn)
+
+ platform = sysconfig.get_platform()
+ if ("x86_64" in platform):
+ for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES_AVX:
+ if config.check_gcc_function_attribute(dec, fn):
+ moredefs.append((fname2def(fn), 1))
+ check_gh14787(fn)
+ for dec, fn, code, header in (
+ OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS_AVX):
+ if config.check_gcc_function_attribute_with_intrinsics(
+ dec, fn, code, header):
+ moredefs.append((fname2def(fn), 1))
+
+ for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
+ if config.check_gcc_variable_attribute(fn):
+ m = fn.replace("(", "_").replace(")", "_")
+ moredefs.append((fname2def(m), 1))
+
+def check_complex(config, mathlibs):
+ priv = []
+ pub = []
+
+ # Check for complex support
+ st = config.check_header('complex.h')
+ if st:
+ priv.append(('HAVE_COMPLEX_H', 1))
+ pub.append(('NPY_USE_C99_COMPLEX', 1))
+
+ for t in C99_COMPLEX_TYPES:
+ st = config.check_type(t, headers=["complex.h"])
+ if st:
+ pub.append(('NPY_HAVE_%s' % type2def(t), 1))
+
+ def check_prec(prec):
+ flist = [f + prec for f in C99_COMPLEX_FUNCS]
+ decl = dict([(f, True) for f in flist])
+ if not config.check_funcs_once(flist, call=decl, decl=decl,
+ libraries=mathlibs):
+ for f in flist:
+ if config.check_func(f, call=True, decl=True,
+ libraries=mathlibs):
+ priv.append((fname2def(f), 1))
+ else:
+ priv.extend([(fname2def(f), 1) for f in flist])
+
+ check_prec('')
+ check_prec('f')
+ check_prec('l')
+
+ return priv, pub
+
+def check_ieee_macros(config):
+ priv = []
+ pub = []
+
+ macros = []
+
+ def _add_decl(f):
+ priv.append(fname2def("decl_%s" % f))
+ pub.append('NPY_%s' % fname2def("decl_%s" % f))
+
+ # XXX: hack to circumvent cpp pollution from python: python put its
+ # config.h in the public namespace, so we have a clash for the common
+ # functions we test. We remove every function tested by python's
+ # autoconf, hoping their own test are correct
+ _macros = ["isnan", "isinf", "signbit", "isfinite"]
+ for f in _macros:
+ py_symbol = fname2def("decl_%s" % f)
+ already_declared = config.check_decl(py_symbol,
+ headers=["Python.h", "math.h"])
+ if already_declared:
+ if config.check_macro_true(py_symbol,
+ headers=["Python.h", "math.h"]):
+ pub.append('NPY_%s' % fname2def("decl_%s" % f))
+ else:
+ macros.append(f)
+ # Normally, isnan and isinf are macro (C99), but some platforms only have
+ # func, or both func and macro version. Check for macro only, and define
+ # replacement ones if not found.
+ # Note: including Python.h is necessary because it modifies some math.h
+ # definitions
+ for f in macros:
+ st = config.check_decl(f, headers=["Python.h", "math.h"])
+ if st:
+ _add_decl(f)
+
+ return priv, pub
+
+def check_types(config_cmd, ext, build_dir):
+ private_defines = []
+ public_defines = []
+
+ # Expected size (in number of bytes) for each type. This is an
+ # optimization: those are only hints, and an exhaustive search for the size
+ # is done if the hints are wrong.
+ expected = {'short': [2], 'int': [4], 'long': [8, 4],
+ 'float': [4], 'double': [8], 'long double': [16, 12, 8],
+ 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],
+ 'off_t': [8, 4]}
+
+ # Check we have the python header (-dev* packages on Linux)
+ result = config_cmd.check_header('Python.h')
+ if not result:
+ python = 'python'
+ if '__pypy__' in sys.builtin_module_names:
+ python = 'pypy'
+ raise SystemError(
+ "Cannot compile 'Python.h'. Perhaps you need to "
+ "install {0}-dev|{0}-devel.".format(python))
+ res = config_cmd.check_header("endian.h")
+ if res:
+ private_defines.append(('HAVE_ENDIAN_H', 1))
+ public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
+ res = config_cmd.check_header("sys/endian.h")
+ if res:
+ private_defines.append(('HAVE_SYS_ENDIAN_H', 1))
+ public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1))
+
+ # Check basic types sizes
+ for type in ('short', 'int', 'long'):
+ res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"])
+ if res:
+ public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
+ else:
+ res = config_cmd.check_type_size(type, expected=expected[type])
+ if res >= 0:
+ public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
+ else:
+ raise SystemError("Checking sizeof (%s) failed !" % type)
+
+ for type in ('float', 'double', 'long double'):
+ already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
+ headers=["Python.h"])
+ res = config_cmd.check_type_size(type, expected=expected[type])
+ if res >= 0:
+ public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
+ if not already_declared and not type == 'long double':
+ private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
+ else:
+ raise SystemError("Checking sizeof (%s) failed !" % type)
+
+ # Compute size of corresponding complex type: used to check that our
+ # definition is binary compatible with C99 complex type (check done at
+ # build time in npy_common.h)
+ complex_def = "struct {%s __x; %s __y;}" % (type, type)
+ res = config_cmd.check_type_size(complex_def,
+ expected=[2 * x for x in expected[type]])
+ if res >= 0:
+ public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
+ else:
+ raise SystemError("Checking sizeof (%s) failed !" % complex_def)
+
+ for type in ('Py_intptr_t', 'off_t'):
+ res = config_cmd.check_type_size(type, headers=["Python.h"],
+ library_dirs=[pythonlib_dir()],
+ expected=expected[type])
+
+ if res >= 0:
+ private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
+ public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
+ else:
+ raise SystemError("Checking sizeof (%s) failed !" % type)
+
+ # We check declaration AND type because that's how distutils does it.
+ if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
+ res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
+ library_dirs=[pythonlib_dir()],
+ expected=expected['PY_LONG_LONG'])
+ if res >= 0:
+ private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
+ public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
+ else:
+ raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
+
+ res = config_cmd.check_type_size('long long',
+ expected=expected['long long'])
+ if res >= 0:
+ #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
+ public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
+ else:
+ raise SystemError("Checking sizeof (%s) failed !" % 'long long')
+
+ if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
+ raise RuntimeError(
+ "Config wo CHAR_BIT is not supported"
+ ", please contact the maintainers")
+
+ return private_defines, public_defines
+
+def check_mathlib(config_cmd):
+ # Testing the C math library
+ mathlibs = []
+ mathlibs_choices = [[], ["m"], ["cpml"]]
+ mathlib = os.environ.get("MATHLIB")
+ if mathlib:
+ mathlibs_choices.insert(0, mathlib.split(","))
+ for libs in mathlibs_choices:
+ if config_cmd.check_func(
+ "log",
+ libraries=libs,
+ call_args="0",
+ decl="double log(double);",
+ call=True
+ ):
+ mathlibs = libs
+ break
+ else:
+ raise RuntimeError(
+ "math library missing; rerun setup.py after setting the "
+ "MATHLIB env variable"
+ )
+ return mathlibs
+
+
+def visibility_define(config):
+ """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
+ string)."""
+ hide = '__attribute__((visibility("hidden")))'
+ if config.check_gcc_function_attribute(hide, 'hideme'):
+ return hide
+ else:
+ return ''
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import (Configuration, dot_join,
+ exec_mod_from_location)
+ from numpy.distutils.system_info import (get_info, blas_opt_info,
+ lapack_opt_info)
+ from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS
+ from numpy.version import release as is_released
+
+ config = Configuration('core', parent_package, top_path)
+ local_dir = config.local_path
+ codegen_dir = join(local_dir, 'code_generators')
+
+ # Check whether we have a mismatch between the set C API VERSION and the
+ # actual C API VERSION. Will raise a MismatchCAPIError if so.
+ check_api_version(C_API_VERSION, codegen_dir)
+
+ generate_umath_py = join(codegen_dir, 'generate_umath.py')
+ n = dot_join(config.name, 'generate_umath')
+ generate_umath = exec_mod_from_location('_'.join(n.split('.')),
+ generate_umath_py)
+
+ header_dir = 'include/numpy' # this is relative to config.path_in_package
+
+ cocache = CallOnceOnly()
+
+ def generate_config_h(ext, build_dir):
+ target = join(build_dir, header_dir, 'config.h')
+ d = os.path.dirname(target)
+ if not os.path.exists(d):
+ os.makedirs(d)
+
+ if newer(__file__, target):
+ config_cmd = config.get_config_cmd()
+ log.info('Generating %s', target)
+
+ # Check sizeof
+ moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
+
+ # Check math library and C99 math funcs availability
+ mathlibs = check_mathlib(config_cmd)
+ moredefs.append(('MATHLIB', ','.join(mathlibs)))
+
+ check_math_capabilities(config_cmd, ext, moredefs, mathlibs)
+ moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
+ moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
+
+ # Signal check
+ if is_npy_no_signal():
+ moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
+
+ # Windows checks
+ if sys.platform == 'win32' or os.name == 'nt':
+ win32_checks(moredefs)
+
+ # C99 restrict keyword
+ moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))
+
+ # Inline check
+ inline = config_cmd.check_inline()
+
+ if can_link_svml():
+ moredefs.append(('NPY_CAN_LINK_SVML', 1))
+
+ # Use bogus stride debug aid to flush out bugs where users use
+ # strides of dimensions with length 1 to index a full contiguous
+ # array.
+ if NPY_RELAXED_STRIDES_DEBUG:
+ moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
+ else:
+ moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 0))
+
+ # Get long double representation
+ rep = check_long_double_representation(config_cmd)
+ moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
+
+ if check_for_right_shift_internal_compiler_error(config_cmd):
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_LONG_right_shift')
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONG_right_shift')
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_LONGLONG_right_shift')
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONGLONG_right_shift')
+
+ # Generate the config.h file from moredefs
+ with open(target, 'w') as target_f:
+ if sys.platform == 'darwin':
+ target_f.write(
+ "/* may be overridden by numpyconfig.h on darwin */\n"
+ )
+ for d in moredefs:
+ if isinstance(d, str):
+ target_f.write('#define %s\n' % (d))
+ else:
+ target_f.write('#define %s %s\n' % (d[0], d[1]))
+
+ # define inline to our keyword, or nothing
+ target_f.write('#ifndef __cplusplus\n')
+ if inline == 'inline':
+ target_f.write('/* #undef inline */\n')
+ else:
+ target_f.write('#define inline %s\n' % inline)
+ target_f.write('#endif\n')
+
+ # add the guard to make sure config.h is never included directly,
+ # but always through npy_config.h
+ target_f.write(textwrap.dedent("""
+ #ifndef NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_
+ #error config.h should never be included directly, include npy_config.h instead
+ #endif
+ """))
+
+ log.info('File: %s' % target)
+ with open(target) as target_f:
+ log.info(target_f.read())
+ log.info('EOF')
+ else:
+ mathlibs = []
+ with open(target) as target_f:
+ for line in target_f:
+ s = '#define MATHLIB'
+ if line.startswith(s):
+ value = line[len(s):].strip()
+ if value:
+ mathlibs.extend(value.split(','))
+
+ # Ugly: this can be called within a library and not an extension,
+ # in which case there is no libraries attributes (and none is
+ # needed).
+ if hasattr(ext, 'libraries'):
+ ext.libraries.extend(mathlibs)
+
+ incl_dir = os.path.dirname(target)
+ if incl_dir not in config.numpy_include_dirs:
+ config.numpy_include_dirs.append(incl_dir)
+
+ return target
+
+ def generate_numpyconfig_h(ext, build_dir):
+ """Depends on config.h: generate_config_h has to be called before !"""
+ # put common include directory in build_dir on search path
+ # allows using code generation in headers
+ config.add_include_dirs(join(build_dir, "src", "common"))
+ config.add_include_dirs(join(build_dir, "src", "npymath"))
+
+ target = join(build_dir, header_dir, '_numpyconfig.h')
+ d = os.path.dirname(target)
+ if not os.path.exists(d):
+ os.makedirs(d)
+ if newer(__file__, target):
+ config_cmd = config.get_config_cmd()
+ log.info('Generating %s', target)
+
+ # Check sizeof
+ ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
+
+ if is_npy_no_signal():
+ moredefs.append(('NPY_NO_SIGNAL', 1))
+
+ if is_npy_no_smp():
+ moredefs.append(('NPY_NO_SMP', 1))
+ else:
+ moredefs.append(('NPY_NO_SMP', 0))
+
+ mathlibs = check_mathlib(config_cmd)
+ moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
+ moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
+
+ if NPY_RELAXED_STRIDES_DEBUG:
+ moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))
+
+ # Check whether we can use inttypes (C99) formats
+ if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):
+ moredefs.append(('NPY_USE_C99_FORMATS', 1))
+
+ # visibility check
+ hidden_visibility = visibility_define(config_cmd)
+ moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
+
+ # Add the C API/ABI versions
+ moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
+ moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
+
+ # Add moredefs to header
+ with open(target, 'w') as target_f:
+ for d in moredefs:
+ if isinstance(d, str):
+ target_f.write('#define %s\n' % (d))
+ else:
+ target_f.write('#define %s %s\n' % (d[0], d[1]))
+
+ # Define __STDC_FORMAT_MACROS
+ target_f.write(textwrap.dedent("""
+ #ifndef __STDC_FORMAT_MACROS
+ #define __STDC_FORMAT_MACROS 1
+ #endif
+ """))
+
+ # Dump the numpyconfig.h header to stdout
+ log.info('File: %s' % target)
+ with open(target) as target_f:
+ log.info(target_f.read())
+ log.info('EOF')
+ config.add_data_files((header_dir, target))
+ return target
+
+ def generate_api_func(module_name):
+ def generate_api(ext, build_dir):
+ script = join(codegen_dir, module_name + '.py')
+ sys.path.insert(0, codegen_dir)
+ try:
+ m = __import__(module_name)
+ log.info('executing %s', script)
+ h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
+ finally:
+ del sys.path[0]
+ config.add_data_files((header_dir, h_file),
+ (header_dir, doc_file))
+ return (h_file,)
+ return generate_api
+
+ generate_numpy_api = generate_api_func('generate_numpy_api')
+ generate_ufunc_api = generate_api_func('generate_ufunc_api')
+
+ config.add_include_dirs(join(local_dir, "src", "common"))
+ config.add_include_dirs(join(local_dir, "src"))
+ config.add_include_dirs(join(local_dir))
+
+ config.add_data_dir('include/numpy')
+ config.add_include_dirs(join('src', 'npymath'))
+ config.add_include_dirs(join('src', 'multiarray'))
+ config.add_include_dirs(join('src', 'umath'))
+ config.add_include_dirs(join('src', 'npysort'))
+ config.add_include_dirs(join('src', '_simd'))
+
+ config.add_define_macros([("NPY_INTERNAL_BUILD", "1")]) # this macro indicates that Numpy build is in process
+ config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
+ if sys.platform[:3] == "aix":
+ config.add_define_macros([("_LARGE_FILES", None)])
+ else:
+ config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
+ config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
+ config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
+
+ config.numpy_include_dirs.extend(config.paths('include'))
+
+ deps = [join('src', 'npymath', '_signbit.c'),
+ join('include', 'numpy', '*object.h'),
+ join(codegen_dir, 'genapi.py'),
+ ]
+
+ #######################################################################
+ # npymath library #
+ #######################################################################
+
+ subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
+
+ def get_mathlib_info(*args):
+ # Another ugly hack: the mathlib info is known once build_src is run,
+ # but we cannot use add_installed_pkg_config here either, so we only
+ # update the substitution dictionary during npymath build
+ config_cmd = config.get_config_cmd()
+ # Check that the toolchain works, to fail early if it doesn't
+ # (avoid late errors with MATHLIB which are confusing if the
+ # compiler does not work).
+ for lang, test_code, note in (
+ ('c', 'int main(void) { return 0;}', ''),
+ ('c++', (
+ 'int main(void)'
+ '{ auto x = 0.0; return static_cast<int>(x); }'
+ ), (
+ 'note: A compiler with support for C++11 language '
+ 'features is required.'
+ )
+ ),
+ ):
+ is_cpp = lang == 'c++'
+ if is_cpp:
+ # this a workaround to get rid of invalid c++ flags
+ # without doing big changes to config.
+ # c tested first, compiler should be here
+ bk_c = config_cmd.compiler
+ config_cmd.compiler = bk_c.cxx_compiler()
+
+ # Check that Linux compiler actually support the default flags
+ if hasattr(config_cmd.compiler, 'compiler'):
+ config_cmd.compiler.compiler.extend(NPY_CXX_FLAGS)
+ config_cmd.compiler.compiler_so.extend(NPY_CXX_FLAGS)
+
+ st = config_cmd.try_link(test_code, lang=lang)
+ if not st:
+ # rerun the failing command in verbose mode
+ config_cmd.compiler.verbose = True
+ config_cmd.try_link(test_code, lang=lang)
+ raise RuntimeError(
+ f"Broken toolchain: cannot link a simple {lang.upper()} "
+ f"program. {note}"
+ )
+ if is_cpp:
+ config_cmd.compiler = bk_c
+ mlibs = check_mathlib(config_cmd)
+
+ posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
+ msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
+ subst_dict["posix_mathlib"] = posix_mlib
+ subst_dict["msvc_mathlib"] = msvc_mlib
+
+ npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'),
+ join('src', 'npymath', 'npy_math.c'),
+ # join('src', 'npymath', 'ieee754.cpp'),
+ join('src', 'npymath', 'ieee754.c.src'),
+ join('src', 'npymath', 'npy_math_complex.c.src'),
+ join('src', 'npymath', 'halffloat.c'),
+ ]
+
+ config.add_installed_library('npymath',
+ sources=npymath_sources + [get_mathlib_info],
+ install_dir='lib',
+ build_info={
+ 'include_dirs' : [], # empty list required for creating npy_math_internal.h
+ 'extra_compiler_args': [lib_opts_if_msvc],
+ })
+ config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
+ subst_dict)
+ config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
+ subst_dict)
+
+ #######################################################################
+ # multiarray_tests module #
+ #######################################################################
+
+ config.add_extension('_multiarray_tests',
+ sources=[join('src', 'multiarray', '_multiarray_tests.c.src'),
+ join('src', 'common', 'mem_overlap.c'),
+ join('src', 'common', 'npy_argparse.c'),
+ join('src', 'common', 'npy_hashtable.c')],
+ depends=[join('src', 'common', 'mem_overlap.h'),
+ join('src', 'common', 'npy_argparse.h'),
+ join('src', 'common', 'npy_hashtable.h'),
+ join('src', 'common', 'npy_extint128.h')],
+ libraries=['npymath'])
+
+ #######################################################################
+ # _multiarray_umath module - common part #
+ #######################################################################
+
+ common_deps = [
+ join('src', 'common', 'dlpack', 'dlpack.h'),
+ join('src', 'common', 'array_assign.h'),
+ join('src', 'common', 'binop_override.h'),
+ join('src', 'common', 'cblasfuncs.h'),
+ join('src', 'common', 'lowlevel_strided_loops.h'),
+ join('src', 'common', 'mem_overlap.h'),
+ join('src', 'common', 'npy_argparse.h'),
+ join('src', 'common', 'npy_cblas.h'),
+ join('src', 'common', 'npy_config.h'),
+ join('src', 'common', 'npy_ctypes.h'),
+ join('src', 'common', 'npy_dlpack.h'),
+ join('src', 'common', 'npy_extint128.h'),
+ join('src', 'common', 'npy_import.h'),
+ join('src', 'common', 'npy_hashtable.h'),
+ join('src', 'common', 'npy_longdouble.h'),
+ join('src', 'common', 'npy_svml.h'),
+ join('src', 'common', 'templ_common.h.src'),
+ join('src', 'common', 'ucsnarrow.h'),
+ join('src', 'common', 'ufunc_override.h'),
+ join('src', 'common', 'umathmodule.h'),
+ join('src', 'common', 'numpyos.h'),
+ join('src', 'common', 'npy_cpu_dispatch.h'),
+ join('src', 'common', 'simd', 'simd.h'),
+ ]
+
+ common_src = [
+ join('src', 'common', 'array_assign.c'),
+ join('src', 'common', 'mem_overlap.c'),
+ join('src', 'common', 'npy_argparse.c'),
+ join('src', 'common', 'npy_hashtable.c'),
+ join('src', 'common', 'npy_longdouble.c'),
+ join('src', 'common', 'templ_common.h.src'),
+ join('src', 'common', 'ucsnarrow.c'),
+ join('src', 'common', 'ufunc_override.c'),
+ join('src', 'common', 'numpyos.c'),
+ join('src', 'common', 'npy_cpu_features.c'),
+ ]
+
+ if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":
+ blas_info = get_info('blas_ilp64_opt', 2)
+ else:
+ blas_info = get_info('blas_opt', 0)
+
+ have_blas = blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', [])
+
+ if have_blas:
+ extra_info = blas_info
+ # These files are also in MANIFEST.in so that they are always in
+ # the source distribution independently of HAVE_CBLAS.
+ common_src.extend([join('src', 'common', 'cblasfuncs.c'),
+ join('src', 'common', 'python_xerbla.c'),
+ ])
+ else:
+ extra_info = {}
+
+ #######################################################################
+ # _multiarray_umath module - multiarray part #
+ #######################################################################
+
+ multiarray_deps = [
+ join('src', 'multiarray', 'abstractdtypes.h'),
+ join('src', 'multiarray', 'arrayobject.h'),
+ join('src', 'multiarray', 'arraytypes.h.src'),
+ join('src', 'multiarray', 'arrayfunction_override.h'),
+ join('src', 'multiarray', 'array_coercion.h'),
+ join('src', 'multiarray', 'array_method.h'),
+ join('src', 'multiarray', 'npy_buffer.h'),
+ join('src', 'multiarray', 'calculation.h'),
+ join('src', 'multiarray', 'common.h'),
+ join('src', 'multiarray', 'common_dtype.h'),
+ join('src', 'multiarray', 'convert_datatype.h'),
+ join('src', 'multiarray', 'convert.h'),
+ join('src', 'multiarray', 'conversion_utils.h'),
+ join('src', 'multiarray', 'ctors.h'),
+ join('src', 'multiarray', 'descriptor.h'),
+ join('src', 'multiarray', 'dtypemeta.h'),
+ join('src', 'multiarray', 'dtype_transfer.h'),
+ join('src', 'multiarray', 'dragon4.h'),
+ join('src', 'multiarray', 'einsum_debug.h'),
+ join('src', 'multiarray', 'einsum_sumprod.h'),
+ join('src', 'multiarray', 'experimental_public_dtype_api.h'),
+ join('src', 'multiarray', 'getset.h'),
+ join('src', 'multiarray', 'hashdescr.h'),
+ join('src', 'multiarray', 'iterators.h'),
+ join('src', 'multiarray', 'legacy_dtype_implementation.h'),
+ join('src', 'multiarray', 'mapping.h'),
+ join('src', 'multiarray', 'methods.h'),
+ join('src', 'multiarray', 'multiarraymodule.h'),
+ join('src', 'multiarray', 'nditer_impl.h'),
+ join('src', 'multiarray', 'number.h'),
+ join('src', 'multiarray', 'refcount.h'),
+ join('src', 'multiarray', 'scalartypes.h'),
+ join('src', 'multiarray', 'sequence.h'),
+ join('src', 'multiarray', 'shape.h'),
+ join('src', 'multiarray', 'strfuncs.h'),
+ join('src', 'multiarray', 'typeinfo.h'),
+ join('src', 'multiarray', 'usertypes.h'),
+ join('src', 'multiarray', 'vdot.h'),
+ join('src', 'multiarray', 'textreading', 'readtext.h'),
+ join('include', 'numpy', 'arrayobject.h'),
+ join('include', 'numpy', '_neighborhood_iterator_imp.h'),
+ join('include', 'numpy', 'npy_endian.h'),
+ join('include', 'numpy', 'arrayscalars.h'),
+ join('include', 'numpy', 'noprefix.h'),
+ join('include', 'numpy', 'npy_interrupt.h'),
+ join('include', 'numpy', 'npy_3kcompat.h'),
+ join('include', 'numpy', 'npy_math.h'),
+ join('include', 'numpy', 'halffloat.h'),
+ join('include', 'numpy', 'npy_common.h'),
+ join('include', 'numpy', 'npy_os.h'),
+ join('include', 'numpy', 'utils.h'),
+ join('include', 'numpy', 'ndarrayobject.h'),
+ join('include', 'numpy', 'npy_cpu.h'),
+ join('include', 'numpy', 'numpyconfig.h'),
+ join('include', 'numpy', 'ndarraytypes.h'),
+ join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
+ # add library sources as distuils does not consider libraries
+ # dependencies
+ ] + npymath_sources
+
+ multiarray_src = [
+ join('src', 'multiarray', 'abstractdtypes.c'),
+ join('src', 'multiarray', 'alloc.c'),
+ join('src', 'multiarray', 'arrayobject.c'),
+ join('src', 'multiarray', 'arraytypes.h.src'),
+ join('src', 'multiarray', 'arraytypes.c.src'),
+ join('src', 'multiarray', 'argfunc.dispatch.c.src'),
+ join('src', 'multiarray', 'array_coercion.c'),
+ join('src', 'multiarray', 'array_method.c'),
+ join('src', 'multiarray', 'array_assign_scalar.c'),
+ join('src', 'multiarray', 'array_assign_array.c'),
+ join('src', 'multiarray', 'arrayfunction_override.c'),
+ join('src', 'multiarray', 'buffer.c'),
+ join('src', 'multiarray', 'calculation.c'),
+ join('src', 'multiarray', 'compiled_base.c'),
+ join('src', 'multiarray', 'common.c'),
+ join('src', 'multiarray', 'common_dtype.c'),
+ join('src', 'multiarray', 'convert.c'),
+ join('src', 'multiarray', 'convert_datatype.c'),
+ join('src', 'multiarray', 'conversion_utils.c'),
+ join('src', 'multiarray', 'ctors.c'),
+ join('src', 'multiarray', 'datetime.c'),
+ join('src', 'multiarray', 'datetime_strings.c'),
+ join('src', 'multiarray', 'datetime_busday.c'),
+ join('src', 'multiarray', 'datetime_busdaycal.c'),
+ join('src', 'multiarray', 'descriptor.c'),
+ join('src', 'multiarray', 'dlpack.c'),
+ join('src', 'multiarray', 'dtypemeta.c'),
+ join('src', 'multiarray', 'dragon4.c'),
+ join('src', 'multiarray', 'dtype_transfer.c'),
+ join('src', 'multiarray', 'einsum.c.src'),
+ join('src', 'multiarray', 'einsum_sumprod.c.src'),
+ join('src', 'multiarray', 'experimental_public_dtype_api.c'),
+ join('src', 'multiarray', 'flagsobject.c'),
+ join('src', 'multiarray', 'getset.c'),
+ join('src', 'multiarray', 'hashdescr.c'),
+ join('src', 'multiarray', 'item_selection.c'),
+ join('src', 'multiarray', 'iterators.c'),
+ join('src', 'multiarray', 'legacy_dtype_implementation.c'),
+ join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
+ join('src', 'multiarray', 'mapping.c'),
+ join('src', 'multiarray', 'methods.c'),
+ join('src', 'multiarray', 'multiarraymodule.c'),
+ join('src', 'multiarray', 'nditer_templ.c.src'),
+ join('src', 'multiarray', 'nditer_api.c'),
+ join('src', 'multiarray', 'nditer_constr.c'),
+ join('src', 'multiarray', 'nditer_pywrap.c'),
+ join('src', 'multiarray', 'number.c'),
+ join('src', 'multiarray', 'refcount.c'),
+ join('src', 'multiarray', 'sequence.c'),
+ join('src', 'multiarray', 'shape.c'),
+ join('src', 'multiarray', 'scalarapi.c'),
+ join('src', 'multiarray', 'scalartypes.c.src'),
+ join('src', 'multiarray', 'strfuncs.c'),
+ join('src', 'multiarray', 'temp_elide.c'),
+ join('src', 'multiarray', 'typeinfo.c'),
+ join('src', 'multiarray', 'usertypes.c'),
+ join('src', 'multiarray', 'vdot.c'),
+ join('src', 'common', 'npy_sort.h.src'),
+ join('src', 'npysort', 'x86-qsort.dispatch.cpp'),
+ join('src', 'npysort', 'quicksort.cpp'),
+ join('src', 'npysort', 'mergesort.cpp'),
+ join('src', 'npysort', 'timsort.cpp'),
+ join('src', 'npysort', 'heapsort.cpp'),
+ join('src', 'npysort', 'radixsort.cpp'),
+ join('src', 'common', 'npy_partition.h'),
+ join('src', 'npysort', 'selection.cpp'),
+ join('src', 'common', 'npy_binsearch.h'),
+ join('src', 'npysort', 'binsearch.cpp'),
+ join('src', 'multiarray', 'textreading', 'conversions.c'),
+ join('src', 'multiarray', 'textreading', 'field_types.c'),
+ join('src', 'multiarray', 'textreading', 'growth.c'),
+ join('src', 'multiarray', 'textreading', 'readtext.c'),
+ join('src', 'multiarray', 'textreading', 'rows.c'),
+ join('src', 'multiarray', 'textreading', 'stream_pyobject.c'),
+ join('src', 'multiarray', 'textreading', 'str_to_int.c'),
+ join('src', 'multiarray', 'textreading', 'tokenize.cpp'),
+ # Remove this once scipy macos arm64 build correctly
+ # links to the arm64 npymath library,
+ # see gh-22673
+ join('src', 'npymath', 'arm64_exports.c'),
+ ]
+
+ #######################################################################
+ # _multiarray_umath module - umath part #
+ #######################################################################
+
+ def generate_umath_c(ext, build_dir):
+ target = join(build_dir, header_dir, '__umath_generated.c')
+ dir = os.path.dirname(target)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ script = generate_umath_py
+ if newer(script, target):
+ with open(target, 'w') as f:
+ f.write(generate_umath.make_code(generate_umath.defdict,
+ generate_umath.__file__))
+ return []
+
+ def generate_umath_doc_header(ext, build_dir):
+ from numpy.distutils.misc_util import exec_mod_from_location
+
+ target = join(build_dir, header_dir, '_umath_doc_generated.h')
+ dir = os.path.dirname(target)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+
+ generate_umath_doc_py = join(codegen_dir, 'generate_umath_doc.py')
+ if newer(generate_umath_doc_py, target):
+ n = dot_join(config.name, 'generate_umath_doc')
+ generate_umath_doc = exec_mod_from_location(
+ '_'.join(n.split('.')), generate_umath_doc_py)
+ generate_umath_doc.write_code(target)
+
+ umath_src = [
+ join('src', 'umath', 'umathmodule.c'),
+ join('src', 'umath', 'reduction.c'),
+ join('src', 'umath', 'funcs.inc.src'),
+ join('src', 'umath', 'simd.inc.src'),
+ join('src', 'umath', 'loops.h.src'),
+ join('src', 'umath', 'loops_utils.h.src'),
+ join('src', 'umath', 'loops.c.src'),
+ join('src', 'umath', 'loops_unary_fp.dispatch.c.src'),
+ join('src', 'umath', 'loops_arithm_fp.dispatch.c.src'),
+ join('src', 'umath', 'loops_arithmetic.dispatch.c.src'),
+ join('src', 'umath', 'loops_minmax.dispatch.c.src'),
+ join('src', 'umath', 'loops_trigonometric.dispatch.c.src'),
+ join('src', 'umath', 'loops_umath_fp.dispatch.c.src'),
+ join('src', 'umath', 'loops_exponent_log.dispatch.c.src'),
+ join('src', 'umath', 'loops_hyperbolic.dispatch.c.src'),
+ join('src', 'umath', 'loops_modulo.dispatch.c.src'),
+ join('src', 'umath', 'loops_comparison.dispatch.c.src'),
+ join('src', 'umath', 'matmul.h.src'),
+ join('src', 'umath', 'matmul.c.src'),
+ join('src', 'umath', 'clip.h'),
+ join('src', 'umath', 'clip.cpp'),
+ join('src', 'umath', 'dispatching.c'),
+ join('src', 'umath', 'legacy_array_method.c'),
+ join('src', 'umath', 'wrapping_array_method.c'),
+ join('src', 'umath', 'ufunc_object.c'),
+ join('src', 'umath', 'extobj.c'),
+ join('src', 'umath', 'scalarmath.c.src'),
+ join('src', 'umath', 'ufunc_type_resolution.c'),
+ join('src', 'umath', 'override.c'),
+ join('src', 'umath', 'string_ufuncs.cpp'),
+ # For testing. Eventually, should use public API and be separate:
+ join('src', 'umath', '_scaled_float_dtype.c'),
+ ]
+
+ umath_deps = [
+ generate_umath_py,
+ join('include', 'numpy', 'npy_math.h'),
+ join('include', 'numpy', 'halffloat.h'),
+ join('src', 'multiarray', 'common.h'),
+ join('src', 'multiarray', 'number.h'),
+ join('src', 'common', 'templ_common.h.src'),
+ join('src', 'umath', 'simd.inc.src'),
+ join('src', 'umath', 'override.h'),
+ join(codegen_dir, 'generate_ufunc_api.py'),
+ join(codegen_dir, 'ufunc_docstrings.py'),
+ ]
+
+ svml_path = join('numpy', 'core', 'src', 'umath', 'svml')
+ svml_objs = []
+ # we have converted the following into universal intrinsics
+ # so we can bring the benefits of performance for all platforms
+ # not just for avx512 on linux without performance/accuracy regression,
+ # actually the other way around, better performance and
+ # after all maintainable code.
+ svml_filter = (
+ )
+ if can_link_svml() and check_svml_submodule(svml_path):
+ svml_objs = glob.glob(svml_path + '/**/*.s', recursive=True)
+ svml_objs = [o for o in svml_objs if not o.endswith(svml_filter)]
+
+ # The ordering of names returned by glob is undefined, so we sort
+ # to make builds reproducible.
+ svml_objs.sort()
+
+ config.add_extension('_multiarray_umath',
+ # Forcing C language even though we have C++ sources.
+ # It forces the C linker and don't link C++ runtime.
+ language = 'c',
+ sources=multiarray_src + umath_src +
+ common_src +
+ [generate_config_h,
+ generate_numpyconfig_h,
+ generate_numpy_api,
+ join(codegen_dir, 'generate_numpy_api.py'),
+ join('*.py'),
+ generate_umath_c,
+ generate_umath_doc_header,
+ generate_ufunc_api,
+ ],
+ depends=deps + multiarray_deps + umath_deps +
+ common_deps,
+ libraries=['npymath'],
+ extra_objects=svml_objs,
+ extra_info=extra_info,
+ extra_cxx_compile_args=NPY_CXX_FLAGS)
+
+ #######################################################################
+ # umath_tests module #
+ #######################################################################
+
+ config.add_extension('_umath_tests', sources=[
+ join('src', 'umath', '_umath_tests.c.src'),
+ join('src', 'umath', '_umath_tests.dispatch.c'),
+ join('src', 'common', 'npy_cpu_features.c'),
+ ])
+
+ #######################################################################
+ # custom rational dtype module #
+ #######################################################################
+
+ config.add_extension('_rational_tests',
+ sources=[join('src', 'umath', '_rational_tests.c')])
+
+ #######################################################################
+ # struct_ufunc_test module #
+ #######################################################################
+
+ config.add_extension('_struct_ufunc_tests',
+ sources=[join('src', 'umath', '_struct_ufunc_tests.c')])
+
+
+ #######################################################################
+ # operand_flag_tests module #
+ #######################################################################
+
+ config.add_extension('_operand_flag_tests',
+ sources=[join('src', 'umath', '_operand_flag_tests.c')])
+
+ #######################################################################
+ # SIMD module #
+ #######################################################################
+
+ config.add_extension('_simd',
+ sources=[
+ join('src', 'common', 'npy_cpu_features.c'),
+ join('src', '_simd', '_simd.c'),
+ join('src', '_simd', '_simd_inc.h.src'),
+ join('src', '_simd', '_simd_data.inc.src'),
+ join('src', '_simd', '_simd.dispatch.c.src'),
+ ], depends=[
+ join('src', 'common', 'npy_cpu_dispatch.h'),
+ join('src', 'common', 'simd', 'simd.h'),
+ join('src', '_simd', '_simd.h'),
+ join('src', '_simd', '_simd_inc.h.src'),
+ join('src', '_simd', '_simd_data.inc.src'),
+ join('src', '_simd', '_simd_arg.inc'),
+ join('src', '_simd', '_simd_convert.inc'),
+ join('src', '_simd', '_simd_easyintrin.inc'),
+ join('src', '_simd', '_simd_vector.inc'),
+ ],
+ libraries=['npymath']
+ )
+
+ config.add_subpackage('tests')
+ config.add_data_dir('tests/data')
+ config.add_data_dir('tests/examples')
+ config.add_data_files('*.pyi')
+
+ config.make_svn_version_py()
+
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/setup_common.py b/venv/lib/python3.9/site-packages/numpy/core/setup_common.py
new file mode 100644
index 00000000..0512457f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/setup_common.py
@@ -0,0 +1,476 @@
+# Code common to build tools
+import copy
+import pathlib
+import sys
+import textwrap
+
+from numpy.distutils.misc_util import mingw32
+
+
+#-------------------
+# Versioning support
+#-------------------
+# How to change C_API_VERSION ?
+# - increase C_API_VERSION value
+# - record the hash for the new C API with the cversions.py script
+# and add the hash to cversions.txt
+# The hash values are used to remind developers when the C API number was not
+# updated - generates a MismatchCAPIWarning warning which is turned into an
+# exception for released version.
+
+# Binary compatibility version number. This number is increased whenever the
+# C-API is changed such that binary compatibility is broken, i.e. whenever a
+# recompile of extension modules is needed.
+C_ABI_VERSION = 0x01000009
+
+# Minor API version. This number is increased whenever a change is made to the
+# C-API -- whether it breaks binary compatibility or not. Some changes, such
+# as adding a function pointer to the end of the function table, can be made
+# without breaking binary compatibility. In this case, only the C_API_VERSION
+# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is
+# broken, both C_API_VERSION and C_ABI_VERSION should be increased.
+#
+# The version needs to be kept in sync with that in cversions.txt.
+#
+# 0x00000008 - 1.7.x
+# 0x00000009 - 1.8.x
+# 0x00000009 - 1.9.x
+# 0x0000000a - 1.10.x
+# 0x0000000a - 1.11.x
+# 0x0000000a - 1.12.x
+# 0x0000000b - 1.13.x
+# 0x0000000c - 1.14.x
+# 0x0000000c - 1.15.x
+# 0x0000000d - 1.16.x
+# 0x0000000d - 1.19.x
+# 0x0000000e - 1.20.x
+# 0x0000000e - 1.21.x
+# 0x0000000f - 1.22.x
+# 0x00000010 - 1.23.x
+# 0x00000010 - 1.24.x
+C_API_VERSION = 0x00000010
+
+class MismatchCAPIError(ValueError):
+ pass
+
+
+def get_api_versions(apiversion, codegen_dir):
+ """
+ Return current C API checksum and the recorded checksum.
+
+ Return current C API checksum and the recorded checksum for the given
+ version of the C API version.
+
+ """
+ # Compute the hash of the current API as defined in the .txt files in
+ # code_generators
+ sys.path.insert(0, codegen_dir)
+ try:
+ m = __import__('genapi')
+ numpy_api = __import__('numpy_api')
+ curapi_hash = m.fullapi_hash(numpy_api.full_api)
+ apis_hash = m.get_versions_hash()
+ finally:
+ del sys.path[0]
+
+ return curapi_hash, apis_hash[apiversion]
+
+def check_api_version(apiversion, codegen_dir):
+ """Emits a MismatchCAPIWarning if the C API version needs updating."""
+ curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)
+
+ # If different hash, it means that the api .txt files in
+ # codegen_dir have been updated without the API version being
+ # updated. Any modification in those .txt files should be reflected
+ # in the api and eventually abi versions.
+ # To compute the checksum of the current API, use numpy/core/cversions.py
+ if not curapi_hash == api_hash:
+ msg = ("API mismatch detected, the C API version "
+ "numbers have to be updated. Current C api version is "
+ f"{apiversion}, with checksum {curapi_hash}, but recorded "
+ f"checksum in core/codegen_dir/cversions.txt is {api_hash}. If "
+ "functions were added in the C API, you have to update "
+ f"C_API_VERSION in {__file__}."
+ )
+ raise MismatchCAPIError(msg)
+
+
+FUNC_CALL_ARGS = {}
+
+def set_sig(sig):
+ prefix, _, args = sig.partition("(")
+ args = args.rpartition(")")[0]
+ funcname = prefix.rpartition(" ")[-1]
+ args = [arg.strip() for arg in args.split(",")]
+ # We use {0} because 0 alone cannot be cast to complex on MSVC in C:
+ FUNC_CALL_ARGS[funcname] = ", ".join("(%s){0}" % arg for arg in args)
+
+
+for file in [
+ "feature_detection_locale.h",
+ "feature_detection_math.h",
+ "feature_detection_cmath.h",
+ "feature_detection_misc.h",
+ "feature_detection_stdio.h",
+]:
+ with open(pathlib.Path(__file__).parent / file) as f:
+ for line in f:
+ if line.startswith("#"):
+ continue
+ if not line.strip():
+ continue
+ set_sig(line)
+
+# Mandatory functions: if not found, fail the build
+# Some of these can still be blocklisted if the C99 implementation
+# is buggy, see numpy/core/src/common/npy_config.h
+MANDATORY_FUNCS = [
+ "sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs",
+ "floor", "ceil", "sqrt", "log10", "log", "exp", "asin",
+ "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp',
+ "expm1", "log1p", "acosh", "asinh", "atanh",
+ "rint", "trunc", "exp2",
+ "copysign", "nextafter", "strtoll", "strtoull", "cbrt",
+ "log2", "pow", "hypot", "atan2",
+ "creal", "cimag", "conj"
+]
+
+OPTIONAL_LOCALE_FUNCS = ["strtold_l"]
+OPTIONAL_FILE_FUNCS = ["ftello", "fseeko", "fallocate"]
+OPTIONAL_MISC_FUNCS = ["backtrace", "madvise"]
+
+# variable attributes tested via "int %s a" % attribute
+OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"]
+
+# Subset of OPTIONAL_*_FUNCS which may already have HAVE_* defined by Python.h
+OPTIONAL_FUNCS_MAYBE = [
+ "ftello", "fseeko"
+ ]
+
+C99_COMPLEX_TYPES = [
+ 'complex double', 'complex float', 'complex long double'
+ ]
+C99_COMPLEX_FUNCS = [
+ "cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan",
+ "catanh", "cexp", "clog", "cpow", "csqrt",
+ # The long double variants (like csinl) should be mandatory on C11,
+ # but are missing in FreeBSD. Issue gh-22850
+ "csin", "csinh", "ccos", "ccosh", "ctan", "ctanh",
+ ]
+
+OPTIONAL_HEADERS = [
+# sse headers only enabled automatically on amd64/x32 builds
+ "xmmintrin.h", # SSE
+ "emmintrin.h", # SSE2
+ "immintrin.h", # AVX
+ "features.h", # for glibc version linux
+ "xlocale.h", # see GH#8367
+ "dlfcn.h", # dladdr
+ "execinfo.h", # backtrace
+ "libunwind.h", # backtrace for LLVM/Clang using libunwind
+ "sys/mman.h", #madvise
+]
+
+# optional gcc compiler builtins and their call arguments and optional a
+# required header and definition name (HAVE_ prepended)
+# call arguments are required as the compiler will do strict signature checking
+OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
+ ("__builtin_isinf", '5.'),
+ ("__builtin_isfinite", '5.'),
+ ("__builtin_bswap32", '5u'),
+ ("__builtin_bswap64", '5u'),
+ ("__builtin_expect", '5, 0'),
+ # Test `long long` for arm+clang 13 (gh-22811,
+ # but we use all versions of __builtin_mul_overflow):
+ ("__builtin_mul_overflow", '(long long)5, 5, (int*)5'),
+ # MMX only needed for icc, but some clangs don't have it
+ ("_m_from_int64", '0', "emmintrin.h"),
+ ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
+ ("_mm_prefetch", '(float*)0, _MM_HINT_NTA',
+ "xmmintrin.h"), # SSE
+ ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2
+ ("__builtin_prefetch", "(float*)0, 0, 3"),
+ # check that the linker can handle avx
+ ("__asm__ volatile", '"vpand %xmm1, %xmm2, %xmm3"',
+ "stdio.h", "LINK_AVX"),
+ ("__asm__ volatile", '"vpand %ymm1, %ymm2, %ymm3"',
+ "stdio.h", "LINK_AVX2"),
+ ("__asm__ volatile", '"vpaddd %zmm1, %zmm2, %zmm3"',
+ "stdio.h", "LINK_AVX512F"),
+ ("__asm__ volatile", '"vfpclasspd $0x40, %zmm15, %k6\\n"\
+ "vmovdqu8 %xmm0, %xmm1\\n"\
+ "vpbroadcastmb2q %k0, %xmm0\\n"',
+ "stdio.h", "LINK_AVX512_SKX"),
+ ("__asm__ volatile", '"xgetbv"', "stdio.h", "XGETBV"),
+ ]
+
+# function attributes
+# tested via "int %s %s(void *);" % (attribute, name)
+# function name will be converted to HAVE_<upper-case-name> preprocessor macro
+OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
+ 'attribute_optimize_unroll_loops'),
+ ('__attribute__((optimize("O3")))',
+ 'attribute_optimize_opt_3'),
+ ('__attribute__((optimize("O2")))',
+ 'attribute_optimize_opt_2'),
+ ('__attribute__((nonnull (1)))',
+ 'attribute_nonnull'),
+ ]
+
+OPTIONAL_FUNCTION_ATTRIBUTES_AVX = [('__attribute__((target ("avx")))',
+ 'attribute_target_avx'),
+ ('__attribute__((target ("avx2")))',
+ 'attribute_target_avx2'),
+ ('__attribute__((target ("avx512f")))',
+ 'attribute_target_avx512f'),
+ ('__attribute__((target ("avx512f,avx512dq,avx512bw,avx512vl,avx512cd")))',
+ 'attribute_target_avx512_skx'),
+ ]
+
+# function attributes with intrinsics
+# To ensure your compiler can compile avx intrinsics with just the attributes
+# gcc 4.8.4 support attributes but not with intrisics
+# tested via "#include<%s> int %s %s(void *){code; return 0;};" % (header, attribute, name, code)
+# function name will be converted to HAVE_<upper-case-name> preprocessor macro
+# The _mm512_castps_si512 instruction is specific check for AVX-512F support
+# in gcc-4.9 which is missing a subset of intrinsics. See
+# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61878
+OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS_AVX = [
+ ('__attribute__((target("avx2,fma")))',
+ 'attribute_target_avx2_with_intrinsics',
+ '__m256 temp = _mm256_set1_ps(1.0); temp = \
+ _mm256_fmadd_ps(temp, temp, temp)',
+ 'immintrin.h'),
+ ('__attribute__((target("avx512f")))',
+ 'attribute_target_avx512f_with_intrinsics',
+ '__m512i temp = _mm512_castps_si512(_mm512_set1_ps(1.0))',
+ 'immintrin.h'),
+ ('__attribute__((target ("avx512f,avx512dq,avx512bw,avx512vl,avx512cd")))',
+ 'attribute_target_avx512_skx_with_intrinsics',
+ '__mmask8 temp = _mm512_fpclass_pd_mask(_mm512_set1_pd(1.0), 0x01);\
+ __m512i unused_temp = \
+ _mm512_castps_si512(_mm512_set1_ps(1.0));\
+ _mm_mask_storeu_epi8(NULL, 0xFF, _mm_broadcastmb_epi64(temp))',
+ 'immintrin.h'),
+ ]
+
+def fname2def(name):
+ return "HAVE_%s" % name.upper()
+
+def sym2def(symbol):
+ define = symbol.replace(' ', '')
+ return define.upper()
+
+def type2def(symbol):
+ define = symbol.replace(' ', '_')
+ return define.upper()
+
+# Code to detect long double representation taken from MPFR m4 macro
+def check_long_double_representation(cmd):
+ cmd._check_compiler()
+ body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}
+
+ # Disable whole program optimization (the default on vs2015, with python 3.5+)
+ # which generates intermediary object files and prevents checking the
+ # float representation.
+ if sys.platform == "win32" and not mingw32():
+ try:
+ cmd.compiler.compile_options.remove("/GL")
+ except (AttributeError, ValueError):
+ pass
+
+ # Disable multi-file interprocedural optimization in the Intel compiler on Linux
+ # which generates intermediary object files and prevents checking the
+ # float representation.
+ elif (sys.platform != "win32"
+ and cmd.compiler.compiler_type.startswith('intel')
+ and '-ipo' in cmd.compiler.cc_exe):
+ newcompiler = cmd.compiler.cc_exe.replace(' -ipo', '')
+ cmd.compiler.set_executables(
+ compiler=newcompiler,
+ compiler_so=newcompiler,
+ compiler_cxx=newcompiler,
+ linker_exe=newcompiler,
+ linker_so=newcompiler + ' -shared'
+ )
+
+ # We need to use _compile because we need the object filename
+ src, obj = cmd._compile(body, None, None, 'c')
+ try:
+ ltype = long_double_representation(pyod(obj))
+ return ltype
+ except ValueError:
+ # try linking to support CC="gcc -flto" or icc -ipo
+ # struct needs to be volatile so it isn't optimized away
+ # additionally "clang -flto" requires the foo struct to be used
+ body = body.replace('struct', 'volatile struct')
+ body += "int main(void) { return foo.before[0]; }\n"
+ src, obj = cmd._compile(body, None, None, 'c')
+ cmd.temp_files.append("_configtest")
+ cmd.compiler.link_executable([obj], "_configtest")
+ ltype = long_double_representation(pyod("_configtest"))
+ return ltype
+ finally:
+ cmd._clean()
+
+LONG_DOUBLE_REPRESENTATION_SRC = r"""
+/* "before" is 16 bytes to ensure there's no padding between it and "x".
+ * We're not expecting any "long double" bigger than 16 bytes or with
+ * alignment requirements stricter than 16 bytes. */
+typedef %(type)s test_type;
+
+struct {
+ char before[16];
+ test_type x;
+ char after[8];
+} foo = {
+ { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
+ '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' },
+ -123456789.0,
+ { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' }
+};
+"""
+
+def pyod(filename):
+ """Python implementation of the od UNIX utility (od -b, more exactly).
+
+ Parameters
+ ----------
+ filename : str
+ name of the file to get the dump from.
+
+ Returns
+ -------
+ out : seq
+ list of lines of od output
+
+ Notes
+ -----
+ We only implement enough to get the necessary information for long double
+ representation, this is not intended as a compatible replacement for od.
+ """
+ out = []
+ with open(filename, 'rb') as fid:
+ yo2 = [oct(o)[2:] for o in fid.read()]
+ for i in range(0, len(yo2), 16):
+ line = ['%07d' % int(oct(i)[2:])]
+ line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
+ out.append(" ".join(line))
+ return out
+
+
+_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000',
+ '001', '043', '105', '147', '211', '253', '315', '357']
+_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020']
+
+_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000']
+_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1]
+_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353',
+ '031', '300', '000', '000']
+_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353',
+ '031', '300', '000', '000', '000', '000', '000', '000']
+_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171',
+ '242', '240', '000', '000', '000', '000']
+_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',
+ '000', '000', '000', '000', '000', '000', '000', '000']
+_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]
+_IBM_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] +
+ ['000'] * 8)
+_IBM_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] +
+ ['000'] * 8)
+
+def long_double_representation(lines):
+ """Given a binary dump as given by GNU od -b, look for long double
+ representation."""
+
+ # Read contains a list of 32 items, each item is a byte (in octal
+ # representation, as a string). We 'slide' over the output until read is of
+ # the form before_seq + content + after_sequence, where content is the long double
+ # representation:
+ # - content is 12 bytes: 80 bits Intel representation
+ # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision
+ # - content is 8 bytes: same as double (not implemented yet)
+ read = [''] * 32
+ saw = None
+ for line in lines:
+ # we skip the first word, as od -b output an index at the beginning of
+ # each line
+ for w in line.split()[1:]:
+ read.pop(0)
+ read.append(w)
+
+ # If the end of read is equal to the after_sequence, read contains
+ # the long double
+ if read[-8:] == _AFTER_SEQ:
+ saw = copy.copy(read)
+ # if the content was 12 bytes, we only have 32 - 8 - 12 = 12
+ # "before" bytes. In other words the first 4 "before" bytes went
+ # past the sliding window.
+ if read[:12] == _BEFORE_SEQ[4:]:
+ if read[12:-8] == _INTEL_EXTENDED_12B:
+ return 'INTEL_EXTENDED_12_BYTES_LE'
+ if read[12:-8] == _MOTOROLA_EXTENDED_12B:
+ return 'MOTOROLA_EXTENDED_12_BYTES_BE'
+ # if the content was 16 bytes, we are left with 32-8-16 = 16
+ # "before" bytes, so 8 went past the sliding window.
+ elif read[:8] == _BEFORE_SEQ[8:]:
+ if read[8:-8] == _INTEL_EXTENDED_16B:
+ return 'INTEL_EXTENDED_16_BYTES_LE'
+ elif read[8:-8] == _IEEE_QUAD_PREC_BE:
+ return 'IEEE_QUAD_BE'
+ elif read[8:-8] == _IEEE_QUAD_PREC_LE:
+ return 'IEEE_QUAD_LE'
+ elif read[8:-8] == _IBM_DOUBLE_DOUBLE_LE:
+ return 'IBM_DOUBLE_DOUBLE_LE'
+ elif read[8:-8] == _IBM_DOUBLE_DOUBLE_BE:
+ return 'IBM_DOUBLE_DOUBLE_BE'
+ # if the content was 8 bytes, left with 32-8-8 = 16 bytes
+ elif read[:16] == _BEFORE_SEQ:
+ if read[16:-8] == _IEEE_DOUBLE_LE:
+ return 'IEEE_DOUBLE_LE'
+ elif read[16:-8] == _IEEE_DOUBLE_BE:
+ return 'IEEE_DOUBLE_BE'
+
+ if saw is not None:
+ raise ValueError("Unrecognized format (%s)" % saw)
+ else:
+ # We never detected the after_sequence
+ raise ValueError("Could not lock sequences (%s)" % saw)
+
+
+def check_for_right_shift_internal_compiler_error(cmd):
+ """
+ On our arm CI, this fails with an internal compilation error
+
+ The failure looks like the following, and can be reproduced on ARM64 GCC 5.4:
+
+ <source>: In function 'right_shift':
+ <source>:4:20: internal compiler error: in expand_shift_1, at expmed.c:2349
+ ip1[i] = ip1[i] >> in2;
+ ^
+ Please submit a full bug report,
+ with preprocessed source if appropriate.
+ See <http://gcc.gnu.org/bugs.html> for instructions.
+ Compiler returned: 1
+
+ This function returns True if this compiler bug is present, and we need to
+ turn off optimization for the function
+ """
+ cmd._check_compiler()
+ has_optimize = cmd.try_compile(textwrap.dedent("""\
+ __attribute__((optimize("O3"))) void right_shift() {}
+ """), None, None)
+ if not has_optimize:
+ return False
+
+ no_err = cmd.try_compile(textwrap.dedent("""\
+ typedef long the_type; /* fails also for unsigned and long long */
+ __attribute__((optimize("O3"))) void right_shift(the_type in2, the_type *ip1, int n) {
+ for (int i = 0; i < n; i++) {
+ if (in2 < (the_type)sizeof(the_type) * 8) {
+ ip1[i] = ip1[i] >> in2;
+ }
+ }
+ }
+ """), None, None)
+ return not no_err
diff --git a/venv/lib/python3.9/site-packages/numpy/core/shape_base.py b/venv/lib/python3.9/site-packages/numpy/core/shape_base.py
new file mode 100644
index 00000000..84a6bd67
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/shape_base.py
@@ -0,0 +1,938 @@
+__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
+ 'stack', 'vstack']
+
+import functools
+import itertools
+import operator
+import warnings
+
+from . import numeric as _nx
+from . import overrides
+from .multiarray import array, asanyarray, normalize_axis_index
+from . import fromnumeric as _from_nx
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+def _atleast_1d_dispatcher(*arys):
+ return arys
+
+
+@array_function_dispatch(_atleast_1d_dispatcher)
+def atleast_1d(*arys):
+ """
+ Convert inputs to arrays with at least one dimension.
+
+ Scalar inputs are converted to 1-dimensional arrays, whilst
+ higher-dimensional inputs are preserved.
+
+ Parameters
+ ----------
+ arys1, arys2, ... : array_like
+ One or more input arrays.
+
+ Returns
+ -------
+ ret : ndarray
+ An array, or list of arrays, each with ``a.ndim >= 1``.
+ Copies are made only if necessary.
+
+ See Also
+ --------
+ atleast_2d, atleast_3d
+
+ Examples
+ --------
+ >>> np.atleast_1d(1.0)
+ array([1.])
+
+ >>> x = np.arange(9.0).reshape(3,3)
+ >>> np.atleast_1d(x)
+ array([[0., 1., 2.],
+ [3., 4., 5.],
+ [6., 7., 8.]])
+ >>> np.atleast_1d(x) is x
+ True
+
+ >>> np.atleast_1d(1, [3, 4])
+ [array([1]), array([3, 4])]
+
+ """
+ res = []
+ for ary in arys:
+ ary = asanyarray(ary)
+ if ary.ndim == 0:
+ result = ary.reshape(1)
+ else:
+ result = ary
+ res.append(result)
+ if len(res) == 1:
+ return res[0]
+ else:
+ return res
+
+
+def _atleast_2d_dispatcher(*arys):
+ return arys
+
+
+@array_function_dispatch(_atleast_2d_dispatcher)
+def atleast_2d(*arys):
+ """
+ View inputs as arrays with at least two dimensions.
+
+ Parameters
+ ----------
+ arys1, arys2, ... : array_like
+ One or more array-like sequences. Non-array inputs are converted
+ to arrays. Arrays that already have two or more dimensions are
+ preserved.
+
+ Returns
+ -------
+ res, res2, ... : ndarray
+ An array, or list of arrays, each with ``a.ndim >= 2``.
+ Copies are avoided where possible, and views with two or more
+ dimensions are returned.
+
+ See Also
+ --------
+ atleast_1d, atleast_3d
+
+ Examples
+ --------
+ >>> np.atleast_2d(3.0)
+ array([[3.]])
+
+ >>> x = np.arange(3.0)
+ >>> np.atleast_2d(x)
+ array([[0., 1., 2.]])
+ >>> np.atleast_2d(x).base is x
+ True
+
+ >>> np.atleast_2d(1, [1, 2], [[1, 2]])
+ [array([[1]]), array([[1, 2]]), array([[1, 2]])]
+
+ """
+ res = []
+ for ary in arys:
+ ary = asanyarray(ary)
+ if ary.ndim == 0:
+ result = ary.reshape(1, 1)
+ elif ary.ndim == 1:
+ result = ary[_nx.newaxis, :]
+ else:
+ result = ary
+ res.append(result)
+ if len(res) == 1:
+ return res[0]
+ else:
+ return res
+
+
+def _atleast_3d_dispatcher(*arys):
+ return arys
+
+
+@array_function_dispatch(_atleast_3d_dispatcher)
+def atleast_3d(*arys):
+ """
+ View inputs as arrays with at least three dimensions.
+
+ Parameters
+ ----------
+ arys1, arys2, ... : array_like
+ One or more array-like sequences. Non-array inputs are converted to
+ arrays. Arrays that already have three or more dimensions are
+ preserved.
+
+ Returns
+ -------
+ res1, res2, ... : ndarray
+ An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
+ avoided where possible, and views with three or more dimensions are
+ returned. For example, a 1-D array of shape ``(N,)`` becomes a view
+ of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
+ view of shape ``(M, N, 1)``.
+
+ See Also
+ --------
+ atleast_1d, atleast_2d
+
+ Examples
+ --------
+ >>> np.atleast_3d(3.0)
+ array([[[3.]]])
+
+ >>> x = np.arange(3.0)
+ >>> np.atleast_3d(x).shape
+ (1, 3, 1)
+
+ >>> x = np.arange(12.0).reshape(4,3)
+ >>> np.atleast_3d(x).shape
+ (4, 3, 1)
+ >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
+ True
+
+ >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
+ ... print(arr, arr.shape) # doctest: +SKIP
+ ...
+ [[[1]
+ [2]]] (1, 2, 1)
+ [[[1]
+ [2]]] (1, 2, 1)
+ [[[1 2]]] (1, 1, 2)
+
+ """
+ res = []
+ for ary in arys:
+ ary = asanyarray(ary)
+ if ary.ndim == 0:
+ result = ary.reshape(1, 1, 1)
+ elif ary.ndim == 1:
+ result = ary[_nx.newaxis, :, _nx.newaxis]
+ elif ary.ndim == 2:
+ result = ary[:, :, _nx.newaxis]
+ else:
+ result = ary
+ res.append(result)
+ if len(res) == 1:
+ return res[0]
+ else:
+ return res
+
+
+def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
+ if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
+ warnings.warn('arrays to stack must be passed as a "sequence" type '
+ 'such as list or tuple. Support for non-sequence '
+ 'iterables such as generators is deprecated as of '
+ 'NumPy 1.16 and will raise an error in the future.',
+ FutureWarning, stacklevel=stacklevel)
+ return ()
+ return arrays
+
+
+def _vhstack_dispatcher(tup, *,
+ dtype=None, casting=None):
+ return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_vhstack_dispatcher)
+def vstack(tup, *, dtype=None, casting="same_kind"):
+ """
+ Stack arrays in sequence vertically (row wise).
+
+ This is equivalent to concatenation along the first axis after 1-D arrays
+ of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
+ `vsplit`.
+
+ This function makes most sense for arrays with up to 3 dimensions. For
+ instance, for pixel-data with a height (first axis), width (second axis),
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
+ `block` provide more general stacking and concatenation operations.
+
+ ``np.row_stack`` is an alias for `vstack`. They are the same function.
+
+ Parameters
+ ----------
+ tup : sequence of ndarrays
+ The arrays must have the same shape along all but the first axis.
+ 1-D arrays must have the same length.
+
+ dtype : str or dtype
+ If provided, the destination array will have this dtype. Cannot be
+ provided together with `out`.
+
+ .. versionadded:: 1.24
+
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Defaults to 'same_kind'.
+
+ .. versionadded:: 1.24
+
+ Returns
+ -------
+ stacked : ndarray
+ The array formed by stacking the given arrays, will be at least 2-D.
+
+ See Also
+ --------
+ concatenate : Join a sequence of arrays along an existing axis.
+ stack : Join a sequence of arrays along a new axis.
+ block : Assemble an nd-array from nested lists of blocks.
+ hstack : Stack arrays in sequence horizontally (column wise).
+ dstack : Stack arrays in sequence depth wise (along third axis).
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
+ vsplit : Split an array into multiple sub-arrays vertically (row-wise).
+
+ Examples
+ --------
+ >>> a = np.array([1, 2, 3])
+ >>> b = np.array([4, 5, 6])
+ >>> np.vstack((a,b))
+ array([[1, 2, 3],
+ [4, 5, 6]])
+
+ >>> a = np.array([[1], [2], [3]])
+ >>> b = np.array([[4], [5], [6]])
+ >>> np.vstack((a,b))
+ array([[1],
+ [2],
+ [3],
+ [4],
+ [5],
+ [6]])
+
+ """
+ if not overrides.ARRAY_FUNCTION_ENABLED:
+ # raise warning if necessary
+ _arrays_for_stack_dispatcher(tup, stacklevel=2)
+ arrs = atleast_2d(*tup)
+ if not isinstance(arrs, list):
+ arrs = [arrs]
+ return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)
+
+
+@array_function_dispatch(_vhstack_dispatcher)
+def hstack(tup, *, dtype=None, casting="same_kind"):
+ """
+ Stack arrays in sequence horizontally (column wise).
+
+ This is equivalent to concatenation along the second axis, except for 1-D
+ arrays where it concatenates along the first axis. Rebuilds arrays divided
+ by `hsplit`.
+
+ This function makes most sense for arrays with up to 3 dimensions. For
+ instance, for pixel-data with a height (first axis), width (second axis),
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
+ `block` provide more general stacking and concatenation operations.
+
+ Parameters
+ ----------
+ tup : sequence of ndarrays
+ The arrays must have the same shape along all but the second axis,
+ except 1-D arrays which can be any length.
+
+ dtype : str or dtype
+ If provided, the destination array will have this dtype. Cannot be
+ provided together with `out`.
+
+ .. versionadded:: 1.24
+
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Defaults to 'same_kind'.
+
+ .. versionadded:: 1.24
+
+ Returns
+ -------
+ stacked : ndarray
+ The array formed by stacking the given arrays.
+
+ See Also
+ --------
+ concatenate : Join a sequence of arrays along an existing axis.
+ stack : Join a sequence of arrays along a new axis.
+ block : Assemble an nd-array from nested lists of blocks.
+ vstack : Stack arrays in sequence vertically (row wise).
+ dstack : Stack arrays in sequence depth wise (along third axis).
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
+ hsplit : Split an array into multiple sub-arrays horizontally (column-wise).
+
+ Examples
+ --------
+ >>> a = np.array((1,2,3))
+ >>> b = np.array((4,5,6))
+ >>> np.hstack((a,b))
+ array([1, 2, 3, 4, 5, 6])
+ >>> a = np.array([[1],[2],[3]])
+ >>> b = np.array([[4],[5],[6]])
+ >>> np.hstack((a,b))
+ array([[1, 4],
+ [2, 5],
+ [3, 6]])
+
+ """
+ if not overrides.ARRAY_FUNCTION_ENABLED:
+ # raise warning if necessary
+ _arrays_for_stack_dispatcher(tup, stacklevel=2)
+
+ arrs = atleast_1d(*tup)
+ if not isinstance(arrs, list):
+ arrs = [arrs]
+ # As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
+ if arrs and arrs[0].ndim == 1:
+ return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)
+ else:
+ return _nx.concatenate(arrs, 1, dtype=dtype, casting=casting)
+
+
+def _stack_dispatcher(arrays, axis=None, out=None, *,
+ dtype=None, casting=None):
+ arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
+ if out is not None:
+ # optimize for the typical case where only arrays is provided
+ arrays = list(arrays)
+ arrays.append(out)
+ return arrays
+
+
+@array_function_dispatch(_stack_dispatcher)
+def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"):
+ """
+ Join a sequence of arrays along a new axis.
+
+ The ``axis`` parameter specifies the index of the new axis in the
+ dimensions of the result. For example, if ``axis=0`` it will be the first
+ dimension and if ``axis=-1`` it will be the last dimension.
+
+ .. versionadded:: 1.10.0
+
+ Parameters
+ ----------
+ arrays : sequence of array_like
+ Each array must have the same shape.
+
+ axis : int, optional
+ The axis in the result array along which the input arrays are stacked.
+
+ out : ndarray, optional
+ If provided, the destination to place the result. The shape must be
+ correct, matching that of what stack would have returned if no
+ out argument were specified.
+
+ dtype : str or dtype
+ If provided, the destination array will have this dtype. Cannot be
+ provided together with `out`.
+
+ .. versionadded:: 1.24
+
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur. Defaults to 'same_kind'.
+
+ .. versionadded:: 1.24
+
+
+ Returns
+ -------
+ stacked : ndarray
+ The stacked array has one more dimension than the input arrays.
+
+ See Also
+ --------
+ concatenate : Join a sequence of arrays along an existing axis.
+ block : Assemble an nd-array from nested lists of blocks.
+ split : Split array into a list of multiple sub-arrays of equal size.
+
+ Examples
+ --------
+ >>> arrays = [np.random.randn(3, 4) for _ in range(10)]
+ >>> np.stack(arrays, axis=0).shape
+ (10, 3, 4)
+
+ >>> np.stack(arrays, axis=1).shape
+ (3, 10, 4)
+
+ >>> np.stack(arrays, axis=2).shape
+ (3, 4, 10)
+
+ >>> a = np.array([1, 2, 3])
+ >>> b = np.array([4, 5, 6])
+ >>> np.stack((a, b))
+ array([[1, 2, 3],
+ [4, 5, 6]])
+
+ >>> np.stack((a, b), axis=-1)
+ array([[1, 4],
+ [2, 5],
+ [3, 6]])
+
+ """
+ if not overrides.ARRAY_FUNCTION_ENABLED:
+ # raise warning if necessary
+ _arrays_for_stack_dispatcher(arrays, stacklevel=2)
+
+ arrays = [asanyarray(arr) for arr in arrays]
+ if not arrays:
+ raise ValueError('need at least one array to stack')
+
+ shapes = {arr.shape for arr in arrays}
+ if len(shapes) != 1:
+ raise ValueError('all input arrays must have the same shape')
+
+ result_ndim = arrays[0].ndim + 1
+ axis = normalize_axis_index(axis, result_ndim)
+
+ sl = (slice(None),) * axis + (_nx.newaxis,)
+ expanded_arrays = [arr[sl] for arr in arrays]
+ return _nx.concatenate(expanded_arrays, axis=axis, out=out,
+ dtype=dtype, casting=casting)
+
+
+# Internal functions to eliminate the overhead of repeated dispatch in one of
+# the two possible paths inside np.block.
+# Use getattr to protect against __array_function__ being disabled.
+_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
+_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
+_concatenate = getattr(_from_nx.concatenate,
+ '__wrapped__', _from_nx.concatenate)
+
+
+def _block_format_index(index):
+ """
+ Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
+ """
+ idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
+ return 'arrays' + idx_str
+
+
+def _block_check_depths_match(arrays, parent_index=[]):
+ """
+ Recursive function checking that the depths of nested lists in `arrays`
+ all match. Mismatch raises a ValueError as described in the block
+ docstring below.
+
+ The entire index (rather than just the depth) needs to be calculated
+ for each innermost list, in case an error needs to be raised, so that
+ the index of the offending list can be printed as part of the error.
+
+ Parameters
+ ----------
+ arrays : nested list of arrays
+ The arrays to check
+ parent_index : list of int
+ The full index of `arrays` within the nested lists passed to
+ `_block_check_depths_match` at the top of the recursion.
+
+ Returns
+ -------
+ first_index : list of int
+ The full index of an element from the bottom of the nesting in
+ `arrays`. If any element at the bottom is an empty list, this will
+ refer to it, and the last index along the empty axis will be None.
+ max_arr_ndim : int
+ The maximum of the ndims of the arrays nested in `arrays`.
+ final_size: int
+ The number of elements in the final array. This is used the motivate
+ the choice of algorithm used using benchmarking wisdom.
+
+ """
+ if type(arrays) is tuple:
+ # not strictly necessary, but saves us from:
+ # - more than one way to do things - no point treating tuples like
+ # lists
+ # - horribly confusing behaviour that results when tuples are
+ # treated like ndarray
+ raise TypeError(
+ '{} is a tuple. '
+ 'Only lists can be used to arrange blocks, and np.block does '
+ 'not allow implicit conversion from tuple to ndarray.'.format(
+ _block_format_index(parent_index)
+ )
+ )
+ elif type(arrays) is list and len(arrays) > 0:
+ idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
+ for i, arr in enumerate(arrays))
+
+ first_index, max_arr_ndim, final_size = next(idxs_ndims)
+ for index, ndim, size in idxs_ndims:
+ final_size += size
+ if ndim > max_arr_ndim:
+ max_arr_ndim = ndim
+ if len(index) != len(first_index):
+ raise ValueError(
+ "List depths are mismatched. First element was at depth "
+ "{}, but there is an element at depth {} ({})".format(
+ len(first_index),
+ len(index),
+ _block_format_index(index)
+ )
+ )
+ # propagate our flag that indicates an empty list at the bottom
+ if index[-1] is None:
+ first_index = index
+
+ return first_index, max_arr_ndim, final_size
+ elif type(arrays) is list and len(arrays) == 0:
+ # We've 'bottomed out' on an empty list
+ return parent_index + [None], 0, 0
+ else:
+ # We've 'bottomed out' - arrays is either a scalar or an array
+ size = _size(arrays)
+ return parent_index, _ndim(arrays), size
+
+
+def _atleast_nd(a, ndim):
+ # Ensures `a` has at least `ndim` dimensions by prepending
+ # ones to `a.shape` as necessary
+ return array(a, ndmin=ndim, copy=False, subok=True)
+
+
+def _accumulate(values):
+ return list(itertools.accumulate(values))
+
+
+def _concatenate_shapes(shapes, axis):
+ """Given array shapes, return the resulting shape and slices prefixes.
+
+ These help in nested concatenation.
+
+ Returns
+ -------
+ shape: tuple of int
+ This tuple satisfies::
+
+ shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
+ shape == concatenate(arrs, axis).shape
+
+ slice_prefixes: tuple of (slice(start, end), )
+ For a list of arrays being concatenated, this returns the slice
+ in the larger array at axis that needs to be sliced into.
+
+ For example, the following holds::
+
+ ret = concatenate([a, b, c], axis)
+ _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
+
+ ret[(slice(None),) * axis + sl_a] == a
+ ret[(slice(None),) * axis + sl_b] == b
+ ret[(slice(None),) * axis + sl_c] == c
+
+ These are called slice prefixes since they are used in the recursive
+ blocking algorithm to compute the left-most slices during the
+ recursion. Therefore, they must be prepended to rest of the slice
+ that was computed deeper in the recursion.
+
+ These are returned as tuples to ensure that they can quickly be added
+ to existing slice tuple without creating a new tuple every time.
+
+ """
+ # Cache a result that will be reused.
+ shape_at_axis = [shape[axis] for shape in shapes]
+
+ # Take a shape, any shape
+ first_shape = shapes[0]
+ first_shape_pre = first_shape[:axis]
+ first_shape_post = first_shape[axis+1:]
+
+ if any(shape[:axis] != first_shape_pre or
+ shape[axis+1:] != first_shape_post for shape in shapes):
+ raise ValueError(
+ 'Mismatched array shapes in block along axis {}.'.format(axis))
+
+ shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
+
+ offsets_at_axis = _accumulate(shape_at_axis)
+ slice_prefixes = [(slice(start, end),)
+ for start, end in zip([0] + offsets_at_axis,
+ offsets_at_axis)]
+ return shape, slice_prefixes
+
+
+def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
+ """
+ Returns the shape of the final array, along with a list
+ of slices and a list of arrays that can be used for assignment inside the
+ new array
+
+ Parameters
+ ----------
+ arrays : nested list of arrays
+ The arrays to check
+ max_depth : list of int
+ The number of nested lists
+ result_ndim : int
+ The number of dimensions in thefinal array.
+
+ Returns
+ -------
+ shape : tuple of int
+ The shape that the final array will take on.
+ slices: list of tuple of slices
+ The slices into the full array required for assignment. These are
+ required to be prepended with ``(Ellipsis, )`` to obtain to correct
+ final index.
+ arrays: list of ndarray
+ The data to assign to each slice of the full array
+
+ """
+ if depth < max_depth:
+ shapes, slices, arrays = zip(
+ *[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
+ for arr in arrays])
+
+ axis = result_ndim - max_depth + depth
+ shape, slice_prefixes = _concatenate_shapes(shapes, axis)
+
+ # Prepend the slice prefix and flatten the slices
+ slices = [slice_prefix + the_slice
+ for slice_prefix, inner_slices in zip(slice_prefixes, slices)
+ for the_slice in inner_slices]
+
+ # Flatten the array list
+ arrays = functools.reduce(operator.add, arrays)
+
+ return shape, slices, arrays
+ else:
+ # We've 'bottomed out' - arrays is either a scalar or an array
+ # type(arrays) is not list
+ # Return the slice and the array inside a list to be consistent with
+ # the recursive case.
+ arr = _atleast_nd(arrays, result_ndim)
+ return arr.shape, [()], [arr]
+
+
+def _block(arrays, max_depth, result_ndim, depth=0):
+ """
+ Internal implementation of block based on repeated concatenation.
+ `arrays` is the argument passed to
+ block. `max_depth` is the depth of nested lists within `arrays` and
+ `result_ndim` is the greatest of the dimensions of the arrays in
+ `arrays` and the depth of the lists in `arrays` (see block docstring
+ for details).
+ """
+ if depth < max_depth:
+ arrs = [_block(arr, max_depth, result_ndim, depth+1)
+ for arr in arrays]
+ return _concatenate(arrs, axis=-(max_depth-depth))
+ else:
+ # We've 'bottomed out' - arrays is either a scalar or an array
+ # type(arrays) is not list
+ return _atleast_nd(arrays, result_ndim)
+
+
+def _block_dispatcher(arrays):
+ # Use type(...) is list to match the behavior of np.block(), which special
+ # cases list specifically rather than allowing for generic iterables or
+ # tuple. Also, we know that list.__array_function__ will never exist.
+ if type(arrays) is list:
+ for subarrays in arrays:
+ yield from _block_dispatcher(subarrays)
+ else:
+ yield arrays
+
+
+@array_function_dispatch(_block_dispatcher)
+def block(arrays):
+ """
+ Assemble an nd-array from nested lists of blocks.
+
+ Blocks in the innermost lists are concatenated (see `concatenate`) along
+ the last dimension (-1), then these are concatenated along the
+ second-last dimension (-2), and so on until the outermost list is reached.
+
+ Blocks can be of any dimension, but will not be broadcasted using the normal
+ rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
+ the same for all blocks. This is primarily useful for working with scalars,
+ and means that code like ``np.block([v, 1])`` is valid, where
+ ``v.ndim == 1``.
+
+ When the nested list is two levels deep, this allows block matrices to be
+ constructed from their components.
+
+ .. versionadded:: 1.13.0
+
+ Parameters
+ ----------
+ arrays : nested list of array_like or scalars (but not tuples)
+ If passed a single ndarray or scalar (a nested list of depth 0), this
+ is returned unmodified (and not copied).
+
+ Elements shapes must match along the appropriate axes (without
+ broadcasting), but leading 1s will be prepended to the shape as
+ necessary to make the dimensions match.
+
+ Returns
+ -------
+ block_array : ndarray
+ The array assembled from the given blocks.
+
+ The dimensionality of the output is equal to the greatest of:
+ * the dimensionality of all the inputs
+ * the depth to which the input list is nested
+
+ Raises
+ ------
+ ValueError
+ * If list depths are mismatched - for instance, ``[[a, b], c]`` is
+ illegal, and should be spelt ``[[a, b], [c]]``
+ * If lists are empty - for instance, ``[[a, b], []]``
+
+ See Also
+ --------
+ concatenate : Join a sequence of arrays along an existing axis.
+ stack : Join a sequence of arrays along a new axis.
+ vstack : Stack arrays in sequence vertically (row wise).
+ hstack : Stack arrays in sequence horizontally (column wise).
+ dstack : Stack arrays in sequence depth wise (along third axis).
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
+ vsplit : Split an array into multiple sub-arrays vertically (row-wise).
+
+ Notes
+ -----
+
+ When called with only scalars, ``np.block`` is equivalent to an ndarray
+ call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
+ ``np.array([[1, 2], [3, 4]])``.
+
+ This function does not enforce that the blocks lie on a fixed grid.
+ ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
+
+ AAAbb
+ AAAbb
+ cccDD
+
+ But is also allowed to produce, for some ``a, b, c, d``::
+
+ AAAbb
+ AAAbb
+ cDDDD
+
+ Since concatenation happens along the last axis first, `block` is _not_
+ capable of producing the following directly::
+
+ AAAbb
+ cccbb
+ cccDD
+
+ Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
+ equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
+
+ Examples
+ --------
+ The most common use of this function is to build a block matrix
+
+ >>> A = np.eye(2) * 2
+ >>> B = np.eye(3) * 3
+ >>> np.block([
+ ... [A, np.zeros((2, 3))],
+ ... [np.ones((3, 2)), B ]
+ ... ])
+ array([[2., 0., 0., 0., 0.],
+ [0., 2., 0., 0., 0.],
+ [1., 1., 3., 0., 0.],
+ [1., 1., 0., 3., 0.],
+ [1., 1., 0., 0., 3.]])
+
+ With a list of depth 1, `block` can be used as `hstack`
+
+ >>> np.block([1, 2, 3]) # hstack([1, 2, 3])
+ array([1, 2, 3])
+
+ >>> a = np.array([1, 2, 3])
+ >>> b = np.array([4, 5, 6])
+ >>> np.block([a, b, 10]) # hstack([a, b, 10])
+ array([ 1, 2, 3, 4, 5, 6, 10])
+
+ >>> A = np.ones((2, 2), int)
+ >>> B = 2 * A
+ >>> np.block([A, B]) # hstack([A, B])
+ array([[1, 1, 2, 2],
+ [1, 1, 2, 2]])
+
+ With a list of depth 2, `block` can be used in place of `vstack`:
+
+ >>> a = np.array([1, 2, 3])
+ >>> b = np.array([4, 5, 6])
+ >>> np.block([[a], [b]]) # vstack([a, b])
+ array([[1, 2, 3],
+ [4, 5, 6]])
+
+ >>> A = np.ones((2, 2), int)
+ >>> B = 2 * A
+ >>> np.block([[A], [B]]) # vstack([A, B])
+ array([[1, 1],
+ [1, 1],
+ [2, 2],
+ [2, 2]])
+
+ It can also be used in places of `atleast_1d` and `atleast_2d`
+
+ >>> a = np.array(0)
+ >>> b = np.array([1])
+ >>> np.block([a]) # atleast_1d(a)
+ array([0])
+ >>> np.block([b]) # atleast_1d(b)
+ array([1])
+
+ >>> np.block([[a]]) # atleast_2d(a)
+ array([[0]])
+ >>> np.block([[b]]) # atleast_2d(b)
+ array([[1]])
+
+
+ """
+ arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
+
+ # It was found through benchmarking that making an array of final size
+ # around 256x256 was faster by straight concatenation on a
+ # i7-7700HQ processor and dual channel ram 2400MHz.
+ # It didn't seem to matter heavily on the dtype used.
+ #
+ # A 2D array using repeated concatenation requires 2 copies of the array.
+ #
+ # The fastest algorithm will depend on the ratio of CPU power to memory
+ # speed.
+ # One can monitor the results of the benchmark
+ # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
+ # to tune this parameter until a C version of the `_block_info_recursion`
+ # algorithm is implemented which would likely be faster than the python
+ # version.
+ if list_ndim * final_size > (2 * 512 * 512):
+ return _block_slicing(arrays, list_ndim, result_ndim)
+ else:
+ return _block_concatenate(arrays, list_ndim, result_ndim)
+
+
+# These helper functions are mostly used for testing.
+# They allow us to write tests that directly call `_block_slicing`
+# or `_block_concatenate` without blocking large arrays to force the wisdom
+# to trigger the desired path.
+def _block_setup(arrays):
+ """
+ Returns
+ (`arrays`, list_ndim, result_ndim, final_size)
+ """
+ bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
+ list_ndim = len(bottom_index)
+ if bottom_index and bottom_index[-1] is None:
+ raise ValueError(
+ 'List at {} cannot be empty'.format(
+ _block_format_index(bottom_index)
+ )
+ )
+ result_ndim = max(arr_ndim, list_ndim)
+ return arrays, list_ndim, result_ndim, final_size
+
+
+def _block_slicing(arrays, list_ndim, result_ndim):
+ shape, slices, arrays = _block_info_recursion(
+ arrays, list_ndim, result_ndim)
+ dtype = _nx.result_type(*[arr.dtype for arr in arrays])
+
+ # Test preferring F only in the case that all input arrays are F
+ F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
+ C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
+ order = 'F' if F_order and not C_order else 'C'
+ result = _nx.empty(shape=shape, dtype=dtype, order=order)
+ # Note: In a c implementation, the function
+ # PyArray_CreateMultiSortedStridePerm could be used for more advanced
+ # guessing of the desired order.
+
+ for the_slice, arr in zip(slices, arrays):
+ result[(Ellipsis,) + the_slice] = arr
+ return result
+
+
+def _block_concatenate(arrays, list_ndim, result_ndim):
+ result = _block(arrays, list_ndim, result_ndim)
+ if list_ndim == 0:
+ # Catch an edge case where _block returns a view because
+ # `arrays` is a single numpy array and not a list of numpy arrays.
+ # This might copy scalars or lists twice, but this isn't a likely
+ # usecase for those interested in performance
+ result = result.copy()
+ return result
diff --git a/venv/lib/python3.9/site-packages/numpy/core/shape_base.pyi b/venv/lib/python3.9/site-packages/numpy/core/shape_base.pyi
new file mode 100644
index 00000000..10116f1e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/shape_base.pyi
@@ -0,0 +1,123 @@
+from collections.abc import Sequence
+from typing import TypeVar, overload, Any, SupportsIndex
+
+from numpy import generic, _CastingKind
+from numpy._typing import (
+ NDArray,
+ ArrayLike,
+ DTypeLike,
+ _ArrayLike,
+ _DTypeLike,
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+
+__all__: list[str]
+
+@overload
+def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
+@overload
+def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]: ...
+@overload
+def atleast_1d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
+
+@overload
+def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
+@overload
+def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]: ...
+@overload
+def atleast_2d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
+
+@overload
+def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
+@overload
+def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]: ...
+@overload
+def atleast_3d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
+
+@overload
+def vstack(
+ tup: Sequence[_ArrayLike[_SCT]],
+ *,
+ dtype: None = ...,
+ casting: _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def vstack(
+ tup: Sequence[ArrayLike],
+ *,
+ dtype: _DTypeLike[_SCT],
+ casting: _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def vstack(
+ tup: Sequence[ArrayLike],
+ *,
+ dtype: DTypeLike = ...,
+ casting: _CastingKind = ...
+) -> NDArray[Any]: ...
+
+@overload
+def hstack(
+ tup: Sequence[_ArrayLike[_SCT]],
+ *,
+ dtype: None = ...,
+ casting: _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def hstack(
+ tup: Sequence[ArrayLike],
+ *,
+ dtype: _DTypeLike[_SCT],
+ casting: _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def hstack(
+ tup: Sequence[ArrayLike],
+ *,
+ dtype: DTypeLike = ...,
+ casting: _CastingKind = ...
+) -> NDArray[Any]: ...
+
+@overload
+def stack(
+ arrays: Sequence[_ArrayLike[_SCT]],
+ axis: SupportsIndex = ...,
+ out: None = ...,
+ *,
+ dtype: None = ...,
+ casting: _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def stack(
+ arrays: Sequence[ArrayLike],
+ axis: SupportsIndex = ...,
+ out: None = ...,
+ *,
+ dtype: _DTypeLike[_SCT],
+ casting: _CastingKind = ...
+) -> NDArray[_SCT]: ...
+@overload
+def stack(
+ arrays: Sequence[ArrayLike],
+ axis: SupportsIndex = ...,
+ out: None = ...,
+ *,
+ dtype: DTypeLike = ...,
+ casting: _CastingKind = ...
+) -> NDArray[Any]: ...
+@overload
+def stack(
+ arrays: Sequence[ArrayLike],
+ axis: SupportsIndex = ...,
+ out: _ArrayType = ...,
+ *,
+ dtype: DTypeLike = ...,
+ casting: _CastingKind = ...
+) -> _ArrayType: ...
+
+@overload
+def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def block(arrays: ArrayLike) -> NDArray[Any]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/core/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/_locales.py b/venv/lib/python3.9/site-packages/numpy/core/tests/_locales.py
new file mode 100644
index 00000000..b1dc55a9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/_locales.py
@@ -0,0 +1,74 @@
+"""Provide class for testing in French locale
+
+"""
+import sys
+import locale
+
+import pytest
+
+__ALL__ = ['CommaDecimalPointLocale']
+
+
+def find_comma_decimal_point_locale():
+ """See if platform has a decimal point as comma locale.
+
+ Find a locale that uses a comma instead of a period as the
+ decimal point.
+
+ Returns
+ -------
+ old_locale: str
+ Locale when the function was called.
+ new_locale: {str, None)
+ First French locale found, None if none found.
+
+ """
+ if sys.platform == 'win32':
+ locales = ['FRENCH']
+ else:
+ locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
+
+ old_locale = locale.getlocale(locale.LC_NUMERIC)
+ new_locale = None
+ try:
+ for loc in locales:
+ try:
+ locale.setlocale(locale.LC_NUMERIC, loc)
+ new_locale = loc
+ break
+ except locale.Error:
+ pass
+ finally:
+ locale.setlocale(locale.LC_NUMERIC, locale=old_locale)
+ return old_locale, new_locale
+
+
+class CommaDecimalPointLocale:
+ """Sets LC_NUMERIC to a locale with comma as decimal point.
+
+ Classes derived from this class have setup and teardown methods that run
+ tests with locale.LC_NUMERIC set to a locale where commas (',') are used as
+ the decimal point instead of periods ('.'). On exit the locale is restored
+ to the initial locale. It also serves as context manager with the same
+ effect. If no such locale is available, the test is skipped.
+
+ .. versionadded:: 1.15.0
+
+ """
+ (cur_locale, tst_locale) = find_comma_decimal_point_locale()
+
+ def setup_method(self):
+ if self.tst_locale is None:
+ pytest.skip("No French locale available")
+ locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
+
+ def teardown_method(self):
+ locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
+
+ def __enter__(self):
+ if self.tst_locale is None:
+ pytest.skip("No French locale available")
+ locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
+
+ def __exit__(self, type, value, traceback):
+ locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/astype_copy.pkl b/venv/lib/python3.9/site-packages/numpy/core/tests/data/astype_copy.pkl
new file mode 100644
index 00000000..7397c978
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/astype_copy.pkl
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/generate_umath_validation_data.cpp b/venv/lib/python3.9/site-packages/numpy/core/tests/data/generate_umath_validation_data.cpp
new file mode 100644
index 00000000..51ee1250
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/generate_umath_validation_data.cpp
@@ -0,0 +1,170 @@
+#include <algorithm>
+#include <fstream>
+#include <iostream>
+#include <cmath>
+#include <random>
+#include <cstdio>
+#include <ctime>
+#include <vector>
+
+struct ufunc {
+ std::string name;
+ double (*f32func)(double);
+ long double (*f64func)(long double);
+ float f32ulp;
+ float f64ulp;
+};
+
+template <typename T>
+T
+RandomFloat(T a, T b)
+{
+ T random = ((T)rand()) / (T)RAND_MAX;
+ T diff = b - a;
+ T r = random * diff;
+ return a + r;
+}
+
+template <typename T>
+void
+append_random_array(std::vector<T> &arr, T min, T max, size_t N)
+{
+ for (size_t ii = 0; ii < N; ++ii)
+ arr.emplace_back(RandomFloat<T>(min, max));
+}
+
+template <typename T1, typename T2>
+std::vector<T1>
+computeTrueVal(const std::vector<T1> &in, T2 (*mathfunc)(T2))
+{
+ std::vector<T1> out;
+ for (T1 elem : in) {
+ T2 elem_d = (T2)elem;
+ T1 out_elem = (T1)mathfunc(elem_d);
+ out.emplace_back(out_elem);
+ }
+ return out;
+}
+
+/*
+ * FP range:
+ * [-inf, -maxflt, -1., -minflt, -minden, 0., minden, minflt, 1., maxflt, inf]
+ */
+
+#define MINDEN std::numeric_limits<T>::denorm_min()
+#define MINFLT std::numeric_limits<T>::min()
+#define MAXFLT std::numeric_limits<T>::max()
+#define INF std::numeric_limits<T>::infinity()
+#define qNAN std::numeric_limits<T>::quiet_NaN()
+#define sNAN std::numeric_limits<T>::signaling_NaN()
+
+template <typename T>
+std::vector<T>
+generate_input_vector(std::string func)
+{
+ std::vector<T> input = {MINDEN, -MINDEN, MINFLT, -MINFLT, MAXFLT,
+ -MAXFLT, INF, -INF, qNAN, sNAN,
+ -1.0, 1.0, 0.0, -0.0};
+
+ // [-1.0, 1.0]
+ if ((func == "arcsin") || (func == "arccos") || (func == "arctanh")) {
+ append_random_array<T>(input, -1.0, 1.0, 700);
+ }
+ // (0.0, INF]
+ else if ((func == "log2") || (func == "log10")) {
+ append_random_array<T>(input, 0.0, 1.0, 200);
+ append_random_array<T>(input, MINDEN, MINFLT, 200);
+ append_random_array<T>(input, MINFLT, 1.0, 200);
+ append_random_array<T>(input, 1.0, MAXFLT, 200);
+ }
+ // (-1.0, INF]
+ else if (func == "log1p") {
+ append_random_array<T>(input, -1.0, 1.0, 200);
+ append_random_array<T>(input, -MINFLT, -MINDEN, 100);
+ append_random_array<T>(input, -1.0, -MINFLT, 100);
+ append_random_array<T>(input, MINDEN, MINFLT, 100);
+ append_random_array<T>(input, MINFLT, 1.0, 100);
+ append_random_array<T>(input, 1.0, MAXFLT, 100);
+ }
+ // [1.0, INF]
+ else if (func == "arccosh") {
+ append_random_array<T>(input, 1.0, 2.0, 400);
+ append_random_array<T>(input, 2.0, MAXFLT, 300);
+ }
+ // [-INF, INF]
+ else {
+ append_random_array<T>(input, -1.0, 1.0, 100);
+ append_random_array<T>(input, MINDEN, MINFLT, 100);
+ append_random_array<T>(input, -MINFLT, -MINDEN, 100);
+ append_random_array<T>(input, MINFLT, 1.0, 100);
+ append_random_array<T>(input, -1.0, -MINFLT, 100);
+ append_random_array<T>(input, 1.0, MAXFLT, 100);
+ append_random_array<T>(input, -MAXFLT, -100.0, 100);
+ }
+
+ std::random_shuffle(input.begin(), input.end());
+ return input;
+}
+
+int
+main()
+{
+ srand(42);
+ std::vector<struct ufunc> umathfunc = {
+ {"sin", sin, sin, 2.37, 3.3},
+ {"cos", cos, cos, 2.36, 3.38},
+ {"tan", tan, tan, 3.91, 3.93},
+ {"arcsin", asin, asin, 3.12, 2.55},
+ {"arccos", acos, acos, 2.1, 1.67},
+ {"arctan", atan, atan, 2.3, 2.52},
+ {"sinh", sinh, sinh, 1.55, 1.89},
+ {"cosh", cosh, cosh, 2.48, 1.97},
+ {"tanh", tanh, tanh, 1.38, 1.19},
+ {"arcsinh", asinh, asinh, 1.01, 1.48},
+ {"arccosh", acosh, acosh, 1.16, 1.05},
+ {"arctanh", atanh, atanh, 1.45, 1.46},
+ {"cbrt", cbrt, cbrt, 1.94, 1.82},
+ //{"exp",exp,exp,3.76,1.53},
+ {"exp2", exp2, exp2, 1.01, 1.04},
+ {"expm1", expm1, expm1, 2.62, 2.1},
+ //{"log",log,log,1.84,1.67},
+ {"log10", log10, log10, 3.5, 1.92},
+ {"log1p", log1p, log1p, 1.96, 1.93},
+ {"log2", log2, log2, 2.12, 1.84},
+ };
+
+ for (int ii = 0; ii < umathfunc.size(); ++ii) {
+ // ignore sin/cos
+ if ((umathfunc[ii].name != "sin") && (umathfunc[ii].name != "cos")) {
+ std::string fileName =
+ "umath-validation-set-" + umathfunc[ii].name + ".csv";
+ std::ofstream txtOut;
+ txtOut.open(fileName, std::ofstream::trunc);
+ txtOut << "dtype,input,output,ulperrortol" << std::endl;
+
+ // Single Precision
+ auto f32in = generate_input_vector<float>(umathfunc[ii].name);
+ auto f32out = computeTrueVal<float, double>(f32in,
+ umathfunc[ii].f32func);
+ for (int jj = 0; jj < f32in.size(); ++jj) {
+ txtOut << "np.float32" << std::hex << ",0x"
+ << *reinterpret_cast<uint32_t *>(&f32in[jj]) << ",0x"
+ << *reinterpret_cast<uint32_t *>(&f32out[jj]) << ","
+ << ceil(umathfunc[ii].f32ulp) << std::endl;
+ }
+
+ // Double Precision
+ auto f64in = generate_input_vector<double>(umathfunc[ii].name);
+ auto f64out = computeTrueVal<double, long double>(
+ f64in, umathfunc[ii].f64func);
+ for (int jj = 0; jj < f64in.size(); ++jj) {
+ txtOut << "np.float64" << std::hex << ",0x"
+ << *reinterpret_cast<uint64_t *>(&f64in[jj]) << ",0x"
+ << *reinterpret_cast<uint64_t *>(&f64out[jj]) << ","
+ << ceil(umathfunc[ii].f64ulp) << std::endl;
+ }
+ txtOut.close();
+ }
+ }
+ return 0;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/recarray_from_file.fits b/venv/lib/python3.9/site-packages/numpy/core/tests/data/recarray_from_file.fits
new file mode 100644
index 00000000..ca48ee85
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/recarray_from_file.fits
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-README.txt b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-README.txt
new file mode 100644
index 00000000..cfc9e414
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-README.txt
@@ -0,0 +1,15 @@
+Steps to validate transcendental functions:
+1) Add a file 'umath-validation-set-<ufuncname>.txt', where ufuncname is name of
+ the function in NumPy you want to validate
+2) The file should contain 4 columns: dtype,input,expected output,ulperror
+ a. dtype: one of np.float16, np.float32, np.float64
+ b. input: floating point input to ufunc in hex. Example: 0x414570a4
+ represents 12.340000152587890625
+ c. expected output: floating point output for the corresponding input in hex.
+ This should be computed using a high(er) precision library and then rounded to
+ same format as the input.
+ d. ulperror: expected maximum ulp error of the function. This
+ should be same across all rows of the same dtype. Otherwise, the function is
+ tested for the maximum ulp error among all entries of that dtype.
+3) Add file umath-validation-set-<ufuncname>.txt to the test file test_umath_accuracy.py
+ which will then validate your ufunc.
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arccos.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arccos.csv
new file mode 100644
index 00000000..6697ae95
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arccos.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0xbddd7f50,0x3fd6eec2,3
+np.float32,0xbe32a20c,0x3fdf8182,3
+np.float32,0xbf607c09,0x4028f84f,3
+np.float32,0x3f25d906,0x3f5db544,3
+np.float32,0x3f01cec8,0x3f84febf,3
+np.float32,0x3f1d5c6e,0x3f68a735,3
+np.float32,0xbf0cab89,0x4009c36d,3
+np.float32,0xbf176b40,0x400d0941,3
+np.float32,0x3f3248b2,0x3f4ce6d4,3
+np.float32,0x3f390b48,0x3f434e0d,3
+np.float32,0xbe261698,0x3fddea43,3
+np.float32,0x3f0e1154,0x3f7b848b,3
+np.float32,0xbf379a3c,0x4017b764,3
+np.float32,0xbeda6f2c,0x4000bd62,3
+np.float32,0xbf6a0c3f,0x402e5d5a,3
+np.float32,0x3ef1d700,0x3f8a17b7,3
+np.float32,0xbf6f4f65,0x4031d30d,3
+np.float32,0x3f2c9eee,0x3f54adfd,3
+np.float32,0x3f3cfb18,0x3f3d8a1e,3
+np.float32,0x3ba80800,0x3fc867d2,3
+np.float32,0x3e723b08,0x3faa7e4d,3
+np.float32,0xbf65820f,0x402bb054,3
+np.float32,0xbee64e7a,0x40026410,3
+np.float32,0x3cb15140,0x3fc64a87,3
+np.float32,0x3f193660,0x3f6ddf2a,3
+np.float32,0xbf0e5b52,0x400a44f7,3
+np.float32,0x3ed55f14,0x3f920a4b,3
+np.float32,0x3dd11a80,0x3fbbf85c,3
+np.float32,0xbf4f5c4b,0x4020f4f9,3
+np.float32,0x3f787532,0x3e792e87,3
+np.float32,0x3f40e6ac,0x3f37a74f,3
+np.float32,0x3f1c1318,0x3f6a47b6,3
+np.float32,0xbe3c48d8,0x3fe0bb70,3
+np.float32,0xbe94d4bc,0x3feed08e,3
+np.float32,0xbe5c3688,0x3fe4ce26,3
+np.float32,0xbf6fe026,0x403239cb,3
+np.float32,0x3ea5983c,0x3f9ee7bf,3
+np.float32,0x3f1471e6,0x3f73c5bb,3
+np.float32,0x3f0e2622,0x3f7b6b87,3
+np.float32,0xbf597180,0x40257ad1,3
+np.float32,0xbeb5321c,0x3ff75d34,3
+np.float32,0x3f5afcd2,0x3f0b6012,3
+np.float32,0xbef2ff88,0x40042e14,3
+np.float32,0xbedc747e,0x400104f5,3
+np.float32,0xbee0c2f4,0x40019dfc,3
+np.float32,0xbf152cd8,0x400c57dc,3
+np.float32,0xbf6cf9e2,0x40303bbe,3
+np.float32,0x3ed9cd74,0x3f90d1a1,3
+np.float32,0xbf754406,0x4036767f,3
+np.float32,0x3f59c5c2,0x3f0db42f,3
+np.float32,0x3f2eefd8,0x3f518684,3
+np.float32,0xbf156bf9,0x400c6b49,3
+np.float32,0xbd550790,0x3fcfb8dc,3
+np.float32,0x3ede58fc,0x3f8f8f77,3
+np.float32,0xbf00ac19,0x40063c4b,3
+np.float32,0x3f4d25ba,0x3f24280e,3
+np.float32,0xbe9568be,0x3feef73c,3
+np.float32,0x3f67d154,0x3ee05547,3
+np.float32,0x3f617226,0x3efcb4f4,3
+np.float32,0xbf3ab41a,0x4018d6cc,3
+np.float32,0xbf3186fe,0x401592cd,3
+np.float32,0x3de3ba50,0x3fbacca9,3
+np.float32,0x3e789f98,0x3fa9ab97,3
+np.float32,0x3f016e08,0x3f8536d8,3
+np.float32,0x3e8b618c,0x3fa5c571,3
+np.float32,0x3eff97bc,0x3f8628a9,3
+np.float32,0xbf6729f0,0x402ca32f,3
+np.float32,0xbebec146,0x3ff9eddc,3
+np.float32,0x3ddb2e60,0x3fbb563a,3
+np.float32,0x3caa8e40,0x3fc66595,3
+np.float32,0xbf5973f2,0x40257bfa,3
+np.float32,0xbdd82c70,0x3fd69916,3
+np.float32,0xbedf4c82,0x400169ef,3
+np.float32,0x3ef8f22c,0x3f881184,3
+np.float32,0xbf1d74d4,0x400eedc9,3
+np.float32,0x3f2e10a6,0x3f52b790,3
+np.float32,0xbf08ecc0,0x4008a628,3
+np.float32,0x3ecb7db4,0x3f94be9f,3
+np.float32,0xbf052ded,0x40078bfc,3
+np.float32,0x3f2ee78a,0x3f5191e4,3
+np.float32,0xbf56f4e1,0x40245194,3
+np.float32,0x3f600a3e,0x3f014a25,3
+np.float32,0x3f3836f8,0x3f44808b,3
+np.float32,0x3ecabfbc,0x3f94f25c,3
+np.float32,0x3c70f500,0x3fc72dec,3
+np.float32,0x3f17c444,0x3f6fabf0,3
+np.float32,0xbf4c22a5,0x401f9a09,3
+np.float32,0xbe4205dc,0x3fe1765a,3
+np.float32,0x3ea49138,0x3f9f2d36,3
+np.float32,0xbece0082,0x3ffe106b,3
+np.float32,0xbe387578,0x3fe03eef,3
+np.float32,0xbf2b6466,0x40137a30,3
+np.float32,0xbe9dadb2,0x3ff12204,3
+np.float32,0xbf56b3f2,0x402433bb,3
+np.float32,0xbdf9b4d8,0x3fd8b51f,3
+np.float32,0x3f58a596,0x3f0fd4b4,3
+np.float32,0xbedf5748,0x40016b6e,3
+np.float32,0x3f446442,0x3f32476f,3
+np.float32,0x3f5be886,0x3f099658,3
+np.float32,0x3ea1e44c,0x3f9fe1de,3
+np.float32,0xbf11e9b8,0x400b585f,3
+np.float32,0xbf231f8f,0x4010befb,3
+np.float32,0xbf4395ea,0x401c2dd0,3
+np.float32,0x3e9e7784,0x3fa0c8a6,3
+np.float32,0xbe255184,0x3fddd14c,3
+np.float32,0x3f70d25e,0x3eb13148,3
+np.float32,0x3f220cdc,0x3f62a722,3
+np.float32,0xbd027bf0,0x3fcd23e7,3
+np.float32,0x3e4ef8b8,0x3faf02d2,3
+np.float32,0xbf76fc6b,0x40380728,3
+np.float32,0xbf57e761,0x4024c1cd,3
+np.float32,0x3ed4fc20,0x3f922580,3
+np.float32,0xbf09b64a,0x4008e1db,3
+np.float32,0x3f21ca62,0x3f62fcf5,3
+np.float32,0xbe55f610,0x3fe40170,3
+np.float32,0xbc0def80,0x3fca2bbb,3
+np.float32,0xbebc8764,0x3ff9547b,3
+np.float32,0x3ec1b200,0x3f9766d1,3
+np.float32,0xbf4ee44e,0x4020c1ee,3
+np.float32,0xbea85852,0x3ff3f22a,3
+np.float32,0xbf195c0c,0x400da3d3,3
+np.float32,0xbf754b5d,0x40367ce8,3
+np.float32,0xbdcbfe50,0x3fd5d52b,3
+np.float32,0xbf1adb87,0x400e1be3,3
+np.float32,0xbf6f8491,0x4031f898,3
+np.float32,0xbf6f9ae7,0x4032086e,3
+np.float32,0xbf52b3f0,0x40226790,3
+np.float32,0xbf698452,0x402e09f4,3
+np.float32,0xbf43dc9a,0x401c493a,3
+np.float32,0xbf165f7f,0x400cb664,3
+np.float32,0x3e635468,0x3fac682f,3
+np.float32,0xbe8cf2b6,0x3fecc28a,3
+np.float32,0x7f7fffff,0x7fc00000,3
+np.float32,0xbf4c6513,0x401fb597,3
+np.float32,0xbf02b8f8,0x4006d47e,3
+np.float32,0x3ed3759c,0x3f9290c8,3
+np.float32,0xbf2a7a5f,0x40132b98,3
+np.float32,0xbae65000,0x3fc9496f,3
+np.float32,0x3f65f5ea,0x3ee8ef07,3
+np.float32,0xbe7712fc,0x3fe84106,3
+np.float32,0xbb9ff700,0x3fc9afd2,3
+np.float32,0x3d8d87a0,0x3fc03592,3
+np.float32,0xbefc921c,0x40058c23,3
+np.float32,0xbf286566,0x401279d8,3
+np.float32,0x3f53857e,0x3f192eaf,3
+np.float32,0xbee9b0f4,0x4002dd90,3
+np.float32,0x3f4041f8,0x3f38a14a,3
+np.float32,0x3f54ea96,0x3f16b02d,3
+np.float32,0x3ea50ef8,0x3f9f0c01,3
+np.float32,0xbeaad2dc,0x3ff49a4a,3
+np.float32,0xbec428c8,0x3ffb636f,3
+np.float32,0xbda46178,0x3fd358c7,3
+np.float32,0xbefacfc4,0x40054b7f,3
+np.float32,0xbf7068f9,0x40329c85,3
+np.float32,0x3f70b850,0x3eb1caa7,3
+np.float32,0x7fa00000,0x7fe00000,3
+np.float32,0x80000000,0x3fc90fdb,3
+np.float32,0x3f68d5c8,0x3edb7cf3,3
+np.float32,0x3d9443d0,0x3fbfc98a,3
+np.float32,0xff7fffff,0x7fc00000,3
+np.float32,0xbeee7ba8,0x40038a5e,3
+np.float32,0xbf0aaaba,0x40092a73,3
+np.float32,0x3f36a4e8,0x3f46c0ee,3
+np.float32,0x3ed268e4,0x3f92da82,3
+np.float32,0xbee6002c,0x4002591b,3
+np.float32,0xbe8f2752,0x3fed5576,3
+np.float32,0x3f525912,0x3f1b40e0,3
+np.float32,0xbe8e151e,0x3fed0e16,3
+np.float32,0x1,0x3fc90fdb,3
+np.float32,0x3ee23b84,0x3f8e7ae1,3
+np.float32,0xbf5961ca,0x40257361,3
+np.float32,0x3f6bbca0,0x3ecd14cd,3
+np.float32,0x3e27b230,0x3fb4014d,3
+np.float32,0xbf183bb8,0x400d49fc,3
+np.float32,0x3f57759c,0x3f120b68,3
+np.float32,0xbd6994c0,0x3fd05d84,3
+np.float32,0xbf1dd684,0x400f0cc8,3
+np.float32,0xbececc1c,0x3ffe480a,3
+np.float32,0xbf48855f,0x401e206d,3
+np.float32,0x3f28c922,0x3f59d382,3
+np.float32,0xbf65c094,0x402bd3b0,3
+np.float32,0x3f657d42,0x3eeb11dd,3
+np.float32,0xbed32d4e,0x3fff7b15,3
+np.float32,0xbf31af02,0x4015a0b1,3
+np.float32,0x3d89eb00,0x3fc06f7f,3
+np.float32,0x3dac2830,0x3fbe4a17,3
+np.float32,0x3f7f7cb6,0x3d81a7df,3
+np.float32,0xbedbb570,0x4000ea82,3
+np.float32,0x3db37830,0x3fbdd4a8,3
+np.float32,0xbf376f48,0x4017a7fd,3
+np.float32,0x3f319f12,0x3f4dd2c9,3
+np.float32,0x7fc00000,0x7fc00000,3
+np.float32,0x3f1b4f70,0x3f6b3e31,3
+np.float32,0x3e33c880,0x3fb278d1,3
+np.float32,0x3f2796e0,0x3f5b69bd,3
+np.float32,0x3f4915d6,0x3f2ad4d0,3
+np.float32,0x3e4db120,0x3faf2ca0,3
+np.float32,0x3ef03dd4,0x3f8a8ba9,3
+np.float32,0x3e96ca88,0x3fa2cbf7,3
+np.float32,0xbeb136ce,0x3ff64d2b,3
+np.float32,0xbf2f3938,0x4014c75e,3
+np.float32,0x3f769dde,0x3e8b0d76,3
+np.float32,0x3f67cec8,0x3ee06148,3
+np.float32,0x3f0a1ade,0x3f80204e,3
+np.float32,0x3e4b9718,0x3faf7144,3
+np.float32,0x3cccb480,0x3fc5dcf3,3
+np.float32,0x3caeb740,0x3fc654f0,3
+np.float32,0x3f684e0e,0x3ede0678,3
+np.float32,0x3f0ba93c,0x3f7e6663,3
+np.float32,0xbf12bbc4,0x400b985e,3
+np.float32,0xbf2a8e1a,0x40133235,3
+np.float32,0x3f42029c,0x3f35f5c5,3
+np.float32,0x3eed1728,0x3f8b6f9c,3
+np.float32,0xbe5779ac,0x3fe432fd,3
+np.float32,0x3f6ed8b8,0x3ebc7e4b,3
+np.float32,0x3eea25b0,0x3f8c43c7,3
+np.float32,0x3f1988a4,0x3f6d786b,3
+np.float32,0xbe751674,0x3fe7ff8a,3
+np.float32,0xbe9f7418,0x3ff1997d,3
+np.float32,0x3dca11d0,0x3fbc6979,3
+np.float32,0x3f795226,0x3e6a6cab,3
+np.float32,0xbea780e0,0x3ff3b926,3
+np.float32,0xbed92770,0x4000901e,3
+np.float32,0xbf3e9f8c,0x401a49f8,3
+np.float32,0x3f0f7054,0x3f79ddb2,3
+np.float32,0x3a99d400,0x3fc8e966,3
+np.float32,0xbef082b0,0x4003d3c6,3
+np.float32,0xbf0d0790,0x4009defb,3
+np.float32,0xbf1649da,0x400cafb4,3
+np.float32,0xbea5aca8,0x3ff33d5c,3
+np.float32,0xbf4e1843,0x40206ba1,3
+np.float32,0xbe3d7d5c,0x3fe0e2ad,3
+np.float32,0xbf0e802d,0x400a500e,3
+np.float32,0xbf0de8f0,0x400a2295,3
+np.float32,0xbf3016ba,0x4015137e,3
+np.float32,0x3f36b1ea,0x3f46ae5d,3
+np.float32,0xbd27f170,0x3fce4fc7,3
+np.float32,0x3e96ec54,0x3fa2c31f,3
+np.float32,0x3eb4dfdc,0x3f9ad87d,3
+np.float32,0x3f5cac6c,0x3f0815cc,3
+np.float32,0xbf0489aa,0x40075bf1,3
+np.float32,0x3df010c0,0x3fba05f5,3
+np.float32,0xbf229f4a,0x4010956a,3
+np.float32,0x3f75e474,0x3e905a99,3
+np.float32,0xbcece6a0,0x3fccc397,3
+np.float32,0xbdb41528,0x3fd454e7,3
+np.float32,0x3ec8b2f8,0x3f958118,3
+np.float32,0x3f5eaa70,0x3f041a1d,3
+np.float32,0xbf32e1cc,0x40160b91,3
+np.float32,0xbe8e6026,0x3fed219c,3
+np.float32,0x3e6b3160,0x3fab65e3,3
+np.float32,0x3e6d7460,0x3fab1b81,3
+np.float32,0xbf13fbde,0x400bfa3b,3
+np.float32,0xbe8235ec,0x3fe9f9e3,3
+np.float32,0x3d71c4a0,0x3fc18096,3
+np.float32,0x3eb769d0,0x3f9a2aa0,3
+np.float32,0xbf68cb3b,0x402d99e4,3
+np.float32,0xbd917610,0x3fd22932,3
+np.float32,0x3d3cba60,0x3fc3297f,3
+np.float32,0xbf383cbe,0x4017f1cc,3
+np.float32,0xbeee96d0,0x40038e34,3
+np.float32,0x3ec89cb4,0x3f958725,3
+np.float32,0x3ebf92d8,0x3f97f95f,3
+np.float32,0x3f30f3da,0x3f4ec021,3
+np.float32,0xbd26b560,0x3fce45e4,3
+np.float32,0xbec0eb12,0x3ffa8330,3
+np.float32,0x3f6d592a,0x3ec4a6c1,3
+np.float32,0x3ea6d39c,0x3f9e9463,3
+np.float32,0x3e884184,0x3fa6951e,3
+np.float32,0x3ea566c4,0x3f9ef4d1,3
+np.float32,0x3f0c8f4c,0x3f7d5380,3
+np.float32,0x3f28e1ba,0x3f59b2cb,3
+np.float32,0x3f798538,0x3e66e1c3,3
+np.float32,0xbe2889b8,0x3fde39b8,3
+np.float32,0x3f3da05e,0x3f3c949c,3
+np.float32,0x3f24d700,0x3f5f073e,3
+np.float32,0xbe5b5768,0x3fe4b198,3
+np.float32,0xbed3b03a,0x3fff9f05,3
+np.float32,0x3e8a1c4c,0x3fa619eb,3
+np.float32,0xbf075d24,0x40083030,3
+np.float32,0x3f765648,0x3e8d1f52,3
+np.float32,0xbf70fc5e,0x403308bb,3
+np.float32,0x3f557ae8,0x3f15ab76,3
+np.float32,0x3f02f7ea,0x3f84521c,3
+np.float32,0x3f7ebbde,0x3dcbc5c5,3
+np.float32,0xbefbdfc6,0x40057285,3
+np.float32,0x3ec687ac,0x3f9617d9,3
+np.float32,0x3e4831c8,0x3fafe01b,3
+np.float32,0x3e25cde0,0x3fb43ea8,3
+np.float32,0x3e4f2ab8,0x3faefc70,3
+np.float32,0x3ea60ae4,0x3f9ec973,3
+np.float32,0xbf1ed55f,0x400f5dde,3
+np.float32,0xbf5ad4aa,0x40262479,3
+np.float32,0x3e8b3594,0x3fa5d0de,3
+np.float32,0x3f3a77aa,0x3f413c80,3
+np.float32,0xbf07512b,0x40082ca9,3
+np.float32,0x3f33d990,0x3f4ab5e5,3
+np.float32,0x3f521556,0x3f1bb78f,3
+np.float32,0xbecf6036,0x3ffe7086,3
+np.float32,0x3db91bd0,0x3fbd7a11,3
+np.float32,0x3ef63a74,0x3f88d839,3
+np.float32,0xbf2f1116,0x4014b99c,3
+np.float32,0xbf17fdc0,0x400d36b9,3
+np.float32,0xbe87df2c,0x3feb7117,3
+np.float32,0x80800000,0x3fc90fdb,3
+np.float32,0x3ee24c1c,0x3f8e7641,3
+np.float32,0x3f688dce,0x3edcd644,3
+np.float32,0xbf0f4e1c,0x400a8e1b,3
+np.float32,0x0,0x3fc90fdb,3
+np.float32,0x3f786eba,0x3e7999d4,3
+np.float32,0xbf404f80,0x401aeca8,3
+np.float32,0xbe9ffb6a,0x3ff1bd18,3
+np.float32,0x3f146bfc,0x3f73ccfd,3
+np.float32,0xbe47d630,0x3fe233ee,3
+np.float32,0xbe95847c,0x3feefe7c,3
+np.float32,0xbf135df0,0x400bc9e5,3
+np.float32,0x3ea19f3c,0x3f9ff411,3
+np.float32,0x3f235e20,0x3f60f247,3
+np.float32,0xbec789ec,0x3ffc4def,3
+np.float32,0x3f04b656,0x3f834db6,3
+np.float32,0x3dfaf440,0x3fb95679,3
+np.float32,0xbe4a7f28,0x3fe28abe,3
+np.float32,0x3ed4850c,0x3f92463b,3
+np.float32,0x3ec4ba5c,0x3f9694dd,3
+np.float32,0xbce24ca0,0x3fcc992b,3
+np.float32,0xbf5b7c6e,0x402675a0,3
+np.float32,0xbea3ce2a,0x3ff2bf04,3
+np.float32,0x3db02c60,0x3fbe0998,3
+np.float32,0x3c47b780,0x3fc78069,3
+np.float32,0x3ed33b20,0x3f92a0d5,3
+np.float32,0xbf4556d7,0x401cdcde,3
+np.float32,0xbe1b6e28,0x3fdc90ec,3
+np.float32,0xbf3289b7,0x4015ecd0,3
+np.float32,0x3df3f240,0x3fb9c76d,3
+np.float32,0x3eefa7d0,0x3f8ab61d,3
+np.float32,0xbe945838,0x3feeb006,3
+np.float32,0xbf0b1386,0x400949a3,3
+np.float32,0x3f77e546,0x3e812cc1,3
+np.float32,0x3e804ba0,0x3fa8a480,3
+np.float32,0x3f43dcea,0x3f331a06,3
+np.float32,0x3eb87450,0x3f99e33c,3
+np.float32,0x3e5f4898,0x3facecea,3
+np.float32,0x3f646640,0x3eeff10e,3
+np.float32,0x3f1aa832,0x3f6c1051,3
+np.float32,0xbebf6bfa,0x3ffa1bdc,3
+np.float32,0xbb77f300,0x3fc98bd4,3
+np.float32,0x3f3587fe,0x3f485645,3
+np.float32,0x3ef85f34,0x3f883b8c,3
+np.float32,0x3f50e584,0x3f1dc82c,3
+np.float32,0x3f1d30a8,0x3f68deb0,3
+np.float32,0x3ee75a78,0x3f8d0c86,3
+np.float32,0x3f2c023a,0x3f5581e1,3
+np.float32,0xbf074e34,0x40082bca,3
+np.float32,0xbead71f0,0x3ff54c6d,3
+np.float32,0xbf39ed88,0x40188e69,3
+np.float32,0x3f5d2fe6,0x3f07118b,3
+np.float32,0xbf1f79f8,0x400f9267,3
+np.float32,0x3e900c58,0x3fa48e99,3
+np.float32,0xbf759cb2,0x4036c47b,3
+np.float32,0x3f63329c,0x3ef5359c,3
+np.float32,0xbf5d6755,0x40276709,3
+np.float32,0x3f2ce31c,0x3f54519a,3
+np.float32,0x7f800000,0x7fc00000,3
+np.float32,0x3f1bf50e,0x3f6a6d9a,3
+np.float32,0x3f258334,0x3f5e25d8,3
+np.float32,0xbf661a3f,0x402c06ac,3
+np.float32,0x3d1654c0,0x3fc45cef,3
+np.float32,0xbef14a36,0x4003f009,3
+np.float32,0xbf356051,0x4016ec3a,3
+np.float32,0x3f6ccc42,0x3ec79193,3
+np.float32,0xbf2fe3d6,0x401501f9,3
+np.float32,0x3deedc80,0x3fba195b,3
+np.float32,0x3f2e5a28,0x3f52533e,3
+np.float32,0x3e6b68b8,0x3fab5ec8,3
+np.float32,0x3e458240,0x3fb037b7,3
+np.float32,0xbf24bab0,0x401144cb,3
+np.float32,0x3f600f4c,0x3f013fb2,3
+np.float32,0x3f021a04,0x3f84d316,3
+np.float32,0x3f741732,0x3e9cc948,3
+np.float32,0x3f0788aa,0x3f81a5b0,3
+np.float32,0x3f28802c,0x3f5a347c,3
+np.float32,0x3c9eb400,0x3fc69500,3
+np.float32,0x3e5d11e8,0x3fad357a,3
+np.float32,0x3d921250,0x3fbfecb9,3
+np.float32,0x3f354866,0x3f48b066,3
+np.float32,0xbf72cf43,0x40346d84,3
+np.float32,0x3eecdbb8,0x3f8b805f,3
+np.float32,0xbee585d0,0x400247fd,3
+np.float32,0x3e3607a8,0x3fb22fc6,3
+np.float32,0xbf0cb7d6,0x4009c71c,3
+np.float32,0xbf56b230,0x402432ec,3
+np.float32,0xbf4ced02,0x401fee29,3
+np.float32,0xbf3a325c,0x4018a776,3
+np.float32,0x3ecae8bc,0x3f94e732,3
+np.float32,0xbe48c7e8,0x3fe252bd,3
+np.float32,0xbe175d7c,0x3fdc0d5b,3
+np.float32,0x3ea78dac,0x3f9e632d,3
+np.float32,0xbe7434a8,0x3fe7e279,3
+np.float32,0x3f1f9e02,0x3f65c7b9,3
+np.float32,0xbe150f2c,0x3fdbc2c2,3
+np.float32,0x3ee13480,0x3f8ec423,3
+np.float32,0x3ecb7d54,0x3f94beb9,3
+np.float32,0x3f1cef42,0x3f693181,3
+np.float32,0xbf1ec06a,0x400f5730,3
+np.float32,0xbe112acc,0x3fdb44e8,3
+np.float32,0xbe77b024,0x3fe85545,3
+np.float32,0x3ec86fe0,0x3f959353,3
+np.float32,0x3f36b326,0x3f46ac9a,3
+np.float32,0x3e581a70,0x3fadd829,3
+np.float32,0xbf032c0c,0x4006f5f9,3
+np.float32,0xbf43b1fd,0x401c38b1,3
+np.float32,0x3f3701b4,0x3f463c5c,3
+np.float32,0x3f1a995a,0x3f6c22f1,3
+np.float32,0xbf05de0b,0x4007bf97,3
+np.float32,0x3d4bd960,0x3fc2b063,3
+np.float32,0x3f0e1618,0x3f7b7ed0,3
+np.float32,0x3edfd420,0x3f8f2628,3
+np.float32,0xbf6662fe,0x402c3047,3
+np.float32,0x3ec0690c,0x3f97bf9b,3
+np.float32,0xbeaf4146,0x3ff5c7a0,3
+np.float32,0x3f5e7764,0x3f04816d,3
+np.float32,0xbedd192c,0x40011bc5,3
+np.float32,0x3eb76350,0x3f9a2c5e,3
+np.float32,0xbed8108c,0x400069a5,3
+np.float32,0xbe59f31c,0x3fe48401,3
+np.float32,0xbea3e1e6,0x3ff2c439,3
+np.float32,0x3e26d1f8,0x3fb41db5,3
+np.float32,0x3f3a0a7c,0x3f41dba5,3
+np.float32,0x3ebae068,0x3f993ce4,3
+np.float32,0x3f2d8e30,0x3f536942,3
+np.float32,0xbe838bbe,0x3fea5247,3
+np.float32,0x3ebe4420,0x3f98538f,3
+np.float32,0xbcc59b80,0x3fcc265c,3
+np.float32,0x3eebb5c8,0x3f8bd334,3
+np.float32,0xbafc3400,0x3fc94ee8,3
+np.float32,0xbf63ddc1,0x402ac683,3
+np.float32,0xbeabdf80,0x3ff4e18f,3
+np.float32,0x3ea863f0,0x3f9e2a78,3
+np.float32,0x3f45b292,0x3f303bc1,3
+np.float32,0xbe68aa60,0x3fe666bf,3
+np.float32,0x3eb9de18,0x3f998239,3
+np.float32,0xbf719d85,0x4033815e,3
+np.float32,0x3edef9a8,0x3f8f62db,3
+np.float32,0xbd7781c0,0x3fd0cd1e,3
+np.float32,0x3f0b3b90,0x3f7ee92a,3
+np.float32,0xbe3eb3b4,0x3fe10a27,3
+np.float32,0xbf31a4c4,0x40159d23,3
+np.float32,0x3e929434,0x3fa3e5b0,3
+np.float32,0xbeb1a90e,0x3ff66b9e,3
+np.float32,0xbeba9b5e,0x3ff8d048,3
+np.float32,0xbf272a84,0x4012119e,3
+np.float32,0x3f1ebbd0,0x3f66e889,3
+np.float32,0x3ed3cdc8,0x3f927893,3
+np.float32,0xbf50dfce,0x40219b58,3
+np.float32,0x3f0c02de,0x3f7dfb62,3
+np.float32,0xbf694de3,0x402de8d2,3
+np.float32,0xbeaeb13e,0x3ff5a14f,3
+np.float32,0xbf61aa7a,0x40299702,3
+np.float32,0xbf13d159,0x400bed35,3
+np.float32,0xbeecd034,0x40034e0b,3
+np.float32,0xbe50c2e8,0x3fe35761,3
+np.float32,0x3f714406,0x3eae8e57,3
+np.float32,0xbf1ca486,0x400eabd8,3
+np.float32,0x3f5858cc,0x3f106497,3
+np.float32,0x3f670288,0x3ee41c84,3
+np.float32,0xbf20bd2c,0x400ff9f5,3
+np.float32,0xbe29afd8,0x3fde5eff,3
+np.float32,0xbf635e6a,0x402a80f3,3
+np.float32,0x3e82b7b0,0x3fa80446,3
+np.float32,0x3e982e7c,0x3fa26ece,3
+np.float32,0x3d9f0e00,0x3fbf1c6a,3
+np.float32,0x3e8299b4,0x3fa80c07,3
+np.float32,0xbf0529c1,0x40078ac3,3
+np.float32,0xbf403b8a,0x401ae519,3
+np.float32,0xbe57e09c,0x3fe44027,3
+np.float32,0x3ea1c8f4,0x3f9fe913,3
+np.float32,0xbe216a94,0x3fdd52d0,3
+np.float32,0x3f59c442,0x3f0db709,3
+np.float32,0xbd636260,0x3fd02bdd,3
+np.float32,0xbdbbc788,0x3fd4d08d,3
+np.float32,0x3dd19560,0x3fbbf0a3,3
+np.float32,0x3f060ad4,0x3f828641,3
+np.float32,0x3b102e00,0x3fc8c7c4,3
+np.float32,0x3f42b3b8,0x3f34e5a6,3
+np.float32,0x3f0255ac,0x3f84b071,3
+np.float32,0xbf014898,0x40066996,3
+np.float32,0x3e004dc0,0x3fb8fb51,3
+np.float32,0xbf594ff8,0x40256af2,3
+np.float32,0x3efafddc,0x3f877b80,3
+np.float32,0xbf5f0780,0x40283899,3
+np.float32,0x3ee95e54,0x3f8c7bcc,3
+np.float32,0x3eba2f0c,0x3f996c80,3
+np.float32,0x3f37721c,0x3f459b68,3
+np.float32,0x3e2be780,0x3fb378bf,3
+np.float32,0x3e550270,0x3fae3d69,3
+np.float32,0x3e0f9500,0x3fb70e0a,3
+np.float32,0xbf51974a,0x4021eaf4,3
+np.float32,0x3f393832,0x3f430d05,3
+np.float32,0x3f3df16a,0x3f3c1bd8,3
+np.float32,0xbd662340,0x3fd041ed,3
+np.float32,0x3f7e8418,0x3ddc9fce,3
+np.float32,0xbf392734,0x40184672,3
+np.float32,0x3ee3b278,0x3f8e124e,3
+np.float32,0x3eed4808,0x3f8b61d2,3
+np.float32,0xbf6fccbd,0x40322beb,3
+np.float32,0x3e3ecdd0,0x3fb1123b,3
+np.float32,0x3f4419e0,0x3f32bb45,3
+np.float32,0x3f595e00,0x3f0e7914,3
+np.float32,0xbe8c1486,0x3fec88c6,3
+np.float32,0xbf800000,0x40490fdb,3
+np.float32,0xbdaf5020,0x3fd4084d,3
+np.float32,0xbf407660,0x401afb63,3
+np.float32,0x3f0c3aa8,0x3f7db8b8,3
+np.float32,0xbcdb5980,0x3fcc7d5b,3
+np.float32,0x3f4738d4,0x3f2dd1ed,3
+np.float32,0x3f4d7064,0x3f23ab14,3
+np.float32,0xbeb1d576,0x3ff67774,3
+np.float32,0xbf507166,0x40216bb3,3
+np.float32,0x3e86484c,0x3fa71813,3
+np.float32,0x3f09123e,0x3f80bd35,3
+np.float32,0xbe9abe0e,0x3ff05cb2,3
+np.float32,0x3f3019dc,0x3f4fed21,3
+np.float32,0xbe99e00e,0x3ff0227d,3
+np.float32,0xbf155ec5,0x400c6739,3
+np.float32,0x3f5857ba,0x3f106698,3
+np.float32,0x3edf619c,0x3f8f45fb,3
+np.float32,0xbf5ab76a,0x40261664,3
+np.float32,0x3e54b5a8,0x3fae4738,3
+np.float32,0xbee92772,0x4002ca40,3
+np.float32,0x3f2fd610,0x3f504a7a,3
+np.float32,0xbf38521c,0x4017f97e,3
+np.float32,0xff800000,0x7fc00000,3
+np.float32,0x3e2da348,0x3fb34077,3
+np.float32,0x3f2f85fa,0x3f50b894,3
+np.float32,0x3e88f9c8,0x3fa66551,3
+np.float32,0xbf61e570,0x4029b648,3
+np.float32,0xbeab362c,0x3ff4b4a1,3
+np.float32,0x3ec6c310,0x3f9607bd,3
+np.float32,0x3f0d7bda,0x3f7c3810,3
+np.float32,0xbeba5d36,0x3ff8bf99,3
+np.float32,0x3f4b0554,0x3f27adda,3
+np.float32,0x3f60f5dc,0x3efebfb3,3
+np.float32,0x3f36ce2c,0x3f468603,3
+np.float32,0xbe70afac,0x3fe76e8e,3
+np.float32,0x3f673350,0x3ee339b5,3
+np.float32,0xbe124cf0,0x3fdb698c,3
+np.float32,0xbf1243dc,0x400b73d0,3
+np.float32,0x3f3c8850,0x3f3e3407,3
+np.float32,0x3ea02f24,0x3fa05500,3
+np.float32,0xbeffed34,0x400607db,3
+np.float32,0x3f5c75c2,0x3f08817c,3
+np.float32,0x3f4b2fbe,0x3f27682d,3
+np.float32,0x3ee47c34,0x3f8dd9f9,3
+np.float32,0x3f50d48c,0x3f1de584,3
+np.float32,0x3f12dc5e,0x3f75b628,3
+np.float32,0xbefe7e4a,0x4005d2f4,3
+np.float32,0xbec2e846,0x3ffb0cbc,3
+np.float32,0xbedc3036,0x4000fb80,3
+np.float32,0xbf48aedc,0x401e311f,3
+np.float32,0x3f6e032e,0x3ec11363,3
+np.float32,0xbf60de15,0x40292b72,3
+np.float32,0x3f06585e,0x3f8258ba,3
+np.float32,0x3ef49b98,0x3f894e66,3
+np.float32,0x3cc5fe00,0x3fc5f7cf,3
+np.float32,0xbf7525c5,0x40365c2c,3
+np.float32,0x3f64f9f8,0x3eed5fb2,3
+np.float32,0x3e8849c0,0x3fa692fb,3
+np.float32,0x3e50c878,0x3faec79e,3
+np.float32,0x3ed61530,0x3f91d831,3
+np.float32,0xbf54872e,0x40233724,3
+np.float32,0xbf52ee7f,0x4022815e,3
+np.float32,0xbe708c24,0x3fe769fc,3
+np.float32,0xbf26fc54,0x40120260,3
+np.float32,0x3f226e8a,0x3f6228db,3
+np.float32,0xbef30406,0x40042eb8,3
+np.float32,0x3f5d996c,0x3f063f5f,3
+np.float32,0xbf425f9c,0x401bb618,3
+np.float32,0x3e4bb260,0x3faf6dc9,3
+np.float32,0xbe52d5a4,0x3fe39b29,3
+np.float32,0xbe169cf0,0x3fdbf505,3
+np.float32,0xbedfc422,0x40017a8e,3
+np.float32,0x3d8ffef0,0x3fc00e05,3
+np.float32,0xbf12bdab,0x400b98f2,3
+np.float32,0x3f295d0a,0x3f590e88,3
+np.float32,0x3f49d8e4,0x3f2998aa,3
+np.float32,0xbef914f4,0x40050c12,3
+np.float32,0xbf4ea2b5,0x4020a61e,3
+np.float32,0xbf3a89e5,0x4018c762,3
+np.float32,0x3e8707b4,0x3fa6e67a,3
+np.float32,0x3ac55400,0x3fc8de86,3
+np.float32,0x800000,0x3fc90fdb,3
+np.float32,0xbeb9762c,0x3ff8819b,3
+np.float32,0xbebbe23c,0x3ff92815,3
+np.float32,0xbf598c88,0x402587a1,3
+np.float32,0x3e95d864,0x3fa30b4a,3
+np.float32,0x3f7f6f40,0x3d882486,3
+np.float32,0xbf53658c,0x4022b604,3
+np.float32,0xbf2a35f2,0x401314ad,3
+np.float32,0x3eb14380,0x3f9bcf28,3
+np.float32,0x3f0e0c64,0x3f7b8a7a,3
+np.float32,0x3d349920,0x3fc36a9a,3
+np.float32,0xbec2092c,0x3ffad071,3
+np.float32,0xbe1d08e8,0x3fdcc4e0,3
+np.float32,0xbf008968,0x40063243,3
+np.float32,0xbefad582,0x40054c51,3
+np.float32,0xbe52d010,0x3fe39a72,3
+np.float32,0x3f4afdac,0x3f27ba6b,3
+np.float32,0x3f6c483c,0x3eca4408,3
+np.float32,0xbef3cb68,0x40044b0c,3
+np.float32,0x3e94687c,0x3fa36b6f,3
+np.float32,0xbf64ae5c,0x402b39bb,3
+np.float32,0xbf0022b4,0x40061497,3
+np.float32,0x80000001,0x3fc90fdb,3
+np.float32,0x3f25bcd0,0x3f5dda4b,3
+np.float32,0x3ed91b40,0x3f9102d7,3
+np.float32,0x3f800000,0x0,3
+np.float32,0xbebc6aca,0x3ff94cca,3
+np.float32,0x3f239e9a,0x3f609e7d,3
+np.float32,0xbf7312be,0x4034a305,3
+np.float32,0x3efd16d0,0x3f86e148,3
+np.float32,0x3f52753a,0x3f1b0f72,3
+np.float32,0xbde58960,0x3fd7702c,3
+np.float32,0x3ef88580,0x3f883099,3
+np.float32,0x3eebaefc,0x3f8bd51e,3
+np.float32,0x3e877d2c,0x3fa6c807,3
+np.float32,0x3f1a0324,0x3f6cdf32,3
+np.float32,0xbedfe20a,0x40017eb6,3
+np.float32,0x3f205a3c,0x3f64d69d,3
+np.float32,0xbeed5b7c,0x400361b0,3
+np.float32,0xbf69ba10,0x402e2ad0,3
+np.float32,0x3c4fe200,0x3fc77014,3
+np.float32,0x3f043310,0x3f839a69,3
+np.float32,0xbeaf359a,0x3ff5c485,3
+np.float32,0x3db3f110,0x3fbdcd12,3
+np.float32,0x3e24af88,0x3fb462ed,3
+np.float32,0xbf34e858,0x4016c1c8,3
+np.float32,0x3f3334f2,0x3f4b9cd0,3
+np.float32,0xbf145882,0x400c16a2,3
+np.float32,0xbf541c38,0x40230748,3
+np.float32,0x3eba7e10,0x3f99574b,3
+np.float32,0xbe34c6e0,0x3fdfc731,3
+np.float32,0xbe957abe,0x3feefbf0,3
+np.float32,0xbf595a59,0x40256fdb,3
+np.float32,0xbdedc7b8,0x3fd7f4f0,3
+np.float32,0xbf627c02,0x402a06a9,3
+np.float32,0x3f339b78,0x3f4b0d18,3
+np.float32,0xbf2df6d2,0x40145929,3
+np.float32,0x3f617726,0x3efc9fd8,3
+np.float32,0xbee3a8fc,0x40020561,3
+np.float32,0x3efe9f68,0x3f867043,3
+np.float32,0xbf2c3e76,0x4013c3ba,3
+np.float32,0xbf218f28,0x40103d84,3
+np.float32,0xbf1ea847,0x400f4f7f,3
+np.float32,0x3ded9160,0x3fba2e31,3
+np.float32,0x3bce1b00,0x3fc841bf,3
+np.float32,0xbe90566e,0x3feda46a,3
+np.float32,0xbf5ea2ba,0x4028056b,3
+np.float32,0x3f538e62,0x3f191ee6,3
+np.float32,0xbf59e054,0x4025af74,3
+np.float32,0xbe8c98ba,0x3fecab24,3
+np.float32,0x3ee7bdb0,0x3f8cf0b7,3
+np.float32,0xbf2eb828,0x40149b2b,3
+np.float32,0xbe5eb904,0x3fe52068,3
+np.float32,0xbf16b422,0x400cd08d,3
+np.float32,0x3f1ab9b4,0x3f6bfa58,3
+np.float32,0x3dc23040,0x3fbce82a,3
+np.float32,0xbf29d9e7,0x4012f5e5,3
+np.float32,0xbf38f30a,0x40183393,3
+np.float32,0x3e88e798,0x3fa66a09,3
+np.float32,0x3f1d07e6,0x3f69124f,3
+np.float32,0xbe1d3d34,0x3fdccb7e,3
+np.float32,0xbf1715be,0x400ceec2,3
+np.float32,0x3f7a0eac,0x3e5d11f7,3
+np.float32,0xbe764924,0x3fe82707,3
+np.float32,0xbf01a1f8,0x4006837c,3
+np.float32,0x3f2be730,0x3f55a661,3
+np.float32,0xbf7bb070,0x403d4ce5,3
+np.float32,0xbd602110,0x3fd011c9,3
+np.float32,0x3f5d080c,0x3f07609d,3
+np.float32,0xbda20400,0x3fd332d1,3
+np.float32,0x3f1c62da,0x3f69e308,3
+np.float32,0xbf2c6916,0x4013d223,3
+np.float32,0xbf44f8fd,0x401cb816,3
+np.float32,0x3f4da392,0x3f235539,3
+np.float32,0x3e9e8aa0,0x3fa0c3a0,3
+np.float32,0x3e9633c4,0x3fa2f366,3
+np.float32,0xbf0422ab,0x40073ddd,3
+np.float32,0x3f518386,0x3f1cb603,3
+np.float32,0x3f24307a,0x3f5fe096,3
+np.float32,0xbdfb4220,0x3fd8ce24,3
+np.float32,0x3f179d28,0x3f6fdc7d,3
+np.float32,0xbecc2df0,0x3ffd911e,3
+np.float32,0x3f3dff0c,0x3f3c0782,3
+np.float32,0xbf58c4d8,0x4025295b,3
+np.float32,0xbdcf8438,0x3fd60dd3,3
+np.float32,0xbeeaf1b2,0x40030aa7,3
+np.float32,0xbf298a28,0x4012db45,3
+np.float32,0x3f6c4dec,0x3eca2678,3
+np.float32,0x3f4d1ac8,0x3f243a59,3
+np.float32,0x3f62cdfa,0x3ef6e8f8,3
+np.float32,0xbee8acce,0x4002b909,3
+np.float32,0xbd5f2af0,0x3fd00a15,3
+np.float32,0x3f5fde8e,0x3f01a453,3
+np.float32,0x3e95233c,0x3fa33aa4,3
+np.float32,0x3ecd2a60,0x3f9449be,3
+np.float32,0x3f10aa86,0x3f78619d,3
+np.float32,0x3f3888e8,0x3f440a70,3
+np.float32,0x3eeb5bfc,0x3f8bec7d,3
+np.float32,0xbe12d654,0x3fdb7ae6,3
+np.float32,0x3eca3110,0x3f951931,3
+np.float32,0xbe2d1b7c,0x3fdece05,3
+np.float32,0xbf29e9db,0x4012fb3a,3
+np.float32,0xbf0c50b8,0x4009a845,3
+np.float32,0xbed9f0e4,0x4000abef,3
+np.float64,0x3fd078ec5ba0f1d8,0x3ff4f7c00595a4d3,2
+np.float64,0xbfdbc39743b7872e,0x400027f85bce43b2,2
+np.float64,0xbfacd2707c39a4e0,0x3ffa08ae1075d766,2
+np.float64,0xbfc956890f32ad14,0x3ffc52308e7285fd,2
+np.float64,0xbf939c2298273840,0x3ff9706d18e6ea6b,2
+np.float64,0xbfe0d7048961ae09,0x4000fff4406bd395,2
+np.float64,0xbfe9d19b86f3a337,0x4004139bc683a69f,2
+np.float64,0x3fd35c7f90a6b900,0x3ff437220e9123f8,2
+np.float64,0x3fdddca171bbb944,0x3ff15da61e61ec08,2
+np.float64,0x3feb300de9f6601c,0x3fe1c6fadb68cdca,2
+np.float64,0xbfef1815327e302a,0x400739808fc6f964,2
+np.float64,0xbfe332d78e6665af,0x4001b6c4ef922f7c,2
+np.float64,0xbfedbf4dfb7b7e9c,0x40061cefed62a58b,2
+np.float64,0xbfd8dcc7e3b1b990,0x3fff84307713c2c3,2
+np.float64,0xbfedaf161c7b5e2c,0x400612027c1b2b25,2
+np.float64,0xbfed9bde897b37bd,0x4006053f05bd7d26,2
+np.float64,0xbfe081ebc26103d8,0x4000e70755eb66e0,2
+np.float64,0xbfe0366f9c606cdf,0x4000d11212f29afd,2
+np.float64,0xbfc7c115212f822c,0x3ffc1e8c9d58f7db,2
+np.float64,0x3fd8dd9a78b1bb34,0x3ff2bf8d0f4c9376,2
+np.float64,0xbfe54eff466a9dfe,0x4002655950b611f4,2
+np.float64,0xbfe4aad987e955b3,0x40022efb19882518,2
+np.float64,0x3f70231ca0204600,0x3ff911d834e7abf4,2
+np.float64,0x3fede01d047bc03a,0x3fd773cecbd8561b,2
+np.float64,0xbfd6a00d48ad401a,0x3ffee9fd7051633f,2
+np.float64,0x3fd44f3d50a89e7c,0x3ff3f74dd0fc9c91,2
+np.float64,0x3fe540f0d0ea81e2,0x3feb055a7c7d43d6,2
+np.float64,0xbf3ba2e200374800,0x3ff923b582650c6c,2
+np.float64,0x3fe93b2d3f72765a,0x3fe532fa15331072,2
+np.float64,0x3fee8ce5a17d19cc,0x3fd35666eefbe336,2
+np.float64,0x3fe55d5f8feabac0,0x3feadf3dcfe251d4,2
+np.float64,0xbfd1d2ede8a3a5dc,0x3ffda600041ac884,2
+np.float64,0xbfee41186e7c8231,0x40067a625cc6f64d,2
+np.float64,0x3fe521a8b9ea4352,0x3feb2f1a6c8084e5,2
+np.float64,0x3fc65378ef2ca6f0,0x3ff653dfe81ee9f2,2
+np.float64,0x3fdaba0fbcb57420,0x3ff23d630995c6ba,2
+np.float64,0xbfe6b7441d6d6e88,0x4002e182539a2994,2
+np.float64,0x3fda00b6dcb4016c,0x3ff2703d516f28e7,2
+np.float64,0xbfe8699f01f0d33e,0x400382326920ea9e,2
+np.float64,0xbfef5889367eb112,0x4007832af5983793,2
+np.float64,0x3fefb57c8aff6afa,0x3fc14700ab38dcef,2
+np.float64,0xbfda0dfdaab41bfc,0x3fffd75b6fd497f6,2
+np.float64,0xbfb059c36620b388,0x3ffa27c528b97a42,2
+np.float64,0xbfdd450ab1ba8a16,0x40005dcac6ab50fd,2
+np.float64,0xbfe54d6156ea9ac2,0x400264ce9f3f0fb9,2
+np.float64,0xbfe076e94760edd2,0x4000e3d1374884da,2
+np.float64,0xbfc063286720c650,0x3ffb2fd1d6bff0ef,2
+np.float64,0xbfe24680f2e48d02,0x40016ddfbb5bcc0e,2
+np.float64,0xbfdc9351d2b926a4,0x400044e3756fb765,2
+np.float64,0x3fefb173d8ff62e8,0x3fc1bd5626f80850,2
+np.float64,0x3fe77c117a6ef822,0x3fe7e57089bad2ec,2
+np.float64,0xbfddbcebf7bb79d8,0x40006eadb60406b3,2
+np.float64,0xbfecf6625ff9ecc5,0x40059e6c6961a6db,2
+np.float64,0x3fdc8950b8b912a0,0x3ff1bcfb2e27795b,2
+np.float64,0xbfeb2fa517765f4a,0x4004b00aee3e6888,2
+np.float64,0x3fd0efc88da1df90,0x3ff4d8f7cbd8248a,2
+np.float64,0xbfe6641a2becc834,0x4002c43362c1bd0f,2
+np.float64,0xbfe28aec0fe515d8,0x400182c91d4df039,2
+np.float64,0xbfd5ede8d0abdbd2,0x3ffeba7baef05ae8,2
+np.float64,0xbfbd99702a3b32e0,0x3ffafca21c1053f1,2
+np.float64,0x3f96f043f82de080,0x3ff8c6384d5eb610,2
+np.float64,0xbfe5badbc9eb75b8,0x400289c8cd5873d1,2
+np.float64,0x3fe5c6bf95eb8d80,0x3fea5093e9a3e43e,2
+np.float64,0x3fb1955486232ab0,0x3ff8086d4c3e71d5,2
+np.float64,0xbfea145f397428be,0x4004302237a35871,2
+np.float64,0xbfdabe685db57cd0,0x400003e2e29725fb,2
+np.float64,0xbfefc79758ff8f2f,0x400831814e23bfc8,2
+np.float64,0x3fd7edb66cafdb6c,0x3ff3006c5123bfaf,2
+np.float64,0xbfeaf7644bf5eec8,0x400495a7963ce4ed,2
+np.float64,0x3fdf838d78bf071c,0x3ff0e527eed73800,2
+np.float64,0xbfd1a0165ba3402c,0x3ffd98c5ab76d375,2
+np.float64,0x3fd75b67a9aeb6d0,0x3ff327c8d80b17cf,2
+np.float64,0x3fc2aa9647255530,0x3ff6ca854b157df1,2
+np.float64,0xbfe0957fd4612b00,0x4000ecbf3932becd,2
+np.float64,0x3fda1792c0b42f24,0x3ff269fbb2360487,2
+np.float64,0x3fd480706ca900e0,0x3ff3ea53a6aa3ae8,2
+np.float64,0xbfd0780ed9a0f01e,0x3ffd4bfd544c7d47,2
+np.float64,0x3feeec0cd77dd81a,0x3fd0a8a241fdb441,2
+np.float64,0x3fcfa933e93f5268,0x3ff5223478621a6b,2
+np.float64,0x3fdad2481fb5a490,0x3ff236b86c6b2b49,2
+np.float64,0x3fe03b129de07626,0x3ff09f21fb868451,2
+np.float64,0xbfc01212cd202424,0x3ffb259a07159ae9,2
+np.float64,0x3febdb912df7b722,0x3fe0768e20dac8c9,2
+np.float64,0xbfbf2148763e4290,0x3ffb154c361ce5bf,2
+np.float64,0xbfb1a7eb1e234fd8,0x3ffa3cb37ac4a176,2
+np.float64,0xbfe26ad1ec64d5a4,0x400178f480ecce8d,2
+np.float64,0x3fe6d1cd1b6da39a,0x3fe8dc20ec4dad3b,2
+np.float64,0xbfede0e53dfbc1ca,0x4006340d3bdd7c97,2
+np.float64,0xbfe8fd1bd9f1fa38,0x4003bc3477f93f40,2
+np.float64,0xbfe329d0f26653a2,0x4001b3f345af5648,2
+np.float64,0xbfe4bb20eee97642,0x40023451404d6d08,2
+np.float64,0x3fb574832e2ae900,0x3ff7ca4bed0c7110,2
+np.float64,0xbfdf3c098fbe7814,0x4000a525bb72d659,2
+np.float64,0x3fa453e6d428a7c0,0x3ff87f512bb9b0c6,2
+np.float64,0x3faaec888435d920,0x3ff84a7d9e4def63,2
+np.float64,0xbfcdc240df3b8480,0x3ffce30ece754e7f,2
+np.float64,0xbf8c3220f0386440,0x3ff95a600ae6e157,2
+np.float64,0x3fe806076c700c0e,0x3fe71784a96c76eb,2
+np.float64,0x3fedf9b0e17bf362,0x3fd6e35fc0a7b6c3,2
+np.float64,0xbfe1b48422636908,0x400141bd8ed251bc,2
+np.float64,0xbfe82e2817705c50,0x40036b5a5556d021,2
+np.float64,0xbfc8ef8ff931df20,0x3ffc450ffae7ce58,2
+np.float64,0xbfe919fa94f233f5,0x4003c7cce4697fe8,2
+np.float64,0xbfc3ace4a72759c8,0x3ffb9a197bb22651,2
+np.float64,0x3fe479f71ee8f3ee,0x3fec0bd2f59097aa,2
+np.float64,0xbfeeb54a967d6a95,0x4006da12c83649c5,2
+np.float64,0x3fe5e74ea8ebce9e,0x3fea2407cef0f08c,2
+np.float64,0x3fb382baf2270570,0x3ff7e98213b921ba,2
+np.float64,0xbfdd86fd3cbb0dfa,0x40006712952ddbcf,2
+np.float64,0xbfd250eb52a4a1d6,0x3ffdc6d56253b1cd,2
+np.float64,0x3fea30c4ed74618a,0x3fe3962deba4f30e,2
+np.float64,0x3fc895963d312b30,0x3ff60a5d52fcbccc,2
+np.float64,0x3fe9cc4f6273989e,0x3fe442740942c80f,2
+np.float64,0xbfe8769f5cf0ed3f,0x4003873b4cb5bfce,2
+np.float64,0xbfe382f3726705e7,0x4001cfeb3204d110,2
+np.float64,0x3fbfe9a9163fd350,0x3ff7220bd2b97c8f,2
+np.float64,0xbfca6162bb34c2c4,0x3ffc743f939358f1,2
+np.float64,0x3fe127a014e24f40,0x3ff0147c4bafbc39,2
+np.float64,0x3fee9cdd2a7d39ba,0x3fd2e9ef45ab122f,2
+np.float64,0x3fa9ffb97c33ff80,0x3ff851e69fa3542c,2
+np.float64,0x3fd378f393a6f1e8,0x3ff42faafa77de56,2
+np.float64,0xbfe4df1e1669be3c,0x400240284df1c321,2
+np.float64,0x3fed0ed79bfa1db0,0x3fdba89060aa96fb,2
+np.float64,0x3fdef2ee52bde5dc,0x3ff10e942244f4f1,2
+np.float64,0xbfdab38f3ab5671e,0x40000264d8d5b49b,2
+np.float64,0x3fbe95a96e3d2b50,0x3ff73774cb59ce2d,2
+np.float64,0xbfe945653af28aca,0x4003d9657bf129c2,2
+np.float64,0xbfb18f3f2a231e80,0x3ffa3b27cba23f50,2
+np.float64,0xbfef50bf22fea17e,0x40077998a850082c,2
+np.float64,0xbfc52b8c212a5718,0x3ffbca8d6560a2da,2
+np.float64,0x7ff8000000000000,0x7ff8000000000000,2
+np.float64,0x3fc1e3a02d23c740,0x3ff6e3a5fcac12a4,2
+np.float64,0xbfeb5e4ea5f6bc9d,0x4004c65abef9426f,2
+np.float64,0xbfe425b132684b62,0x400203c29608b00d,2
+np.float64,0xbfbfa1c19e3f4380,0x3ffb1d6367711158,2
+np.float64,0x3fbba2776e3744f0,0x3ff766f6df586fad,2
+np.float64,0xbfb5d0951e2ba128,0x3ffa7f712480b25e,2
+np.float64,0xbfe949fdab7293fb,0x4003db4530a18507,2
+np.float64,0xbfcf13519b3e26a4,0x3ffd0e6f0a6c38ee,2
+np.float64,0x3f91e6d72823cdc0,0x3ff8da5f08909b6e,2
+np.float64,0x3f78a2e360314600,0x3ff909586727caef,2
+np.float64,0xbfe1ae7e8fe35cfd,0x40013fef082caaa3,2
+np.float64,0x3fe97a6dd1f2f4dc,0x3fe4cb4b99863478,2
+np.float64,0xbfcc1e1e69383c3c,0x3ffcad250a949843,2
+np.float64,0x3faccb797c399700,0x3ff83b8066b49330,2
+np.float64,0x3fe7a2647a6f44c8,0x3fe7acceae6ec425,2
+np.float64,0xbfec3bfcf0f877fa,0x4005366af5a7175b,2
+np.float64,0xbfe2310b94646217,0x400167588fceb228,2
+np.float64,0x3feb167372762ce6,0x3fe1f74c0288fad8,2
+np.float64,0xbfb722b4ee2e4568,0x3ffa94a81b94dfca,2
+np.float64,0x3fc58da9712b1b50,0x3ff66cf8f072aa14,2
+np.float64,0xbfe7fff9d6effff4,0x400359d01b8141de,2
+np.float64,0xbfd56691c5aacd24,0x3ffe9686697797e8,2
+np.float64,0x3fe3ab0557e7560a,0x3fed1593959ef8e8,2
+np.float64,0x3fdd458995ba8b14,0x3ff1883d6f22a322,2
+np.float64,0x3fe7bbed2cef77da,0x3fe786d618094cda,2
+np.float64,0x3fa31a30c4263460,0x3ff88920b936fd79,2
+np.float64,0x8010000000000000,0x3ff921fb54442d18,2
+np.float64,0xbfdc5effbdb8be00,0x40003d95fe0dff11,2
+np.float64,0x3febfdad7e77fb5a,0x3fe030b5297dbbdd,2
+np.float64,0x3fe4f3f3b2e9e7e8,0x3feb6bc59eeb2be2,2
+np.float64,0xbfe44469fd6888d4,0x40020daa5488f97a,2
+np.float64,0xbfe19fddb0e33fbc,0x40013b8c902b167b,2
+np.float64,0x3fa36ad17c26d5a0,0x3ff8869b3e828134,2
+np.float64,0x3fcf23e6c93e47d0,0x3ff5336491a65d1e,2
+np.float64,0xffefffffffffffff,0x7ff8000000000000,2
+np.float64,0xbfe375f4cee6ebea,0x4001cbd2ba42e8b5,2
+np.float64,0xbfaef1215c3de240,0x3ffa19ab02081189,2
+np.float64,0xbfec39c59c78738b,0x4005353dc38e3d78,2
+np.float64,0x7ff4000000000000,0x7ffc000000000000,2
+np.float64,0xbfec09bb7b781377,0x40051c0a5754cb3a,2
+np.float64,0x3fe8301f2870603e,0x3fe6d783c5ef0944,2
+np.float64,0xbfed418c987a8319,0x4005cbae1b8693d1,2
+np.float64,0xbfdc16e7adb82dd0,0x4000338b634eaf03,2
+np.float64,0x3fd5d361bdaba6c4,0x3ff390899300a54c,2
+np.float64,0xbff0000000000000,0x400921fb54442d18,2
+np.float64,0x3fd5946232ab28c4,0x3ff3a14767813f29,2
+np.float64,0x3fe833e5fef067cc,0x3fe6d1be720edf2d,2
+np.float64,0x3fedf746a67bee8e,0x3fd6f127fdcadb7b,2
+np.float64,0x3fd90353d3b206a8,0x3ff2b54f7d369ba9,2
+np.float64,0x3fec4b4b72f89696,0x3fdf1b38d2e93532,2
+np.float64,0xbfe9c67596f38ceb,0x40040ee5f524ce03,2
+np.float64,0x3fd350d91aa6a1b4,0x3ff43a303c0da27f,2
+np.float64,0x3fd062603ba0c4c0,0x3ff4fd9514b935d8,2
+np.float64,0xbfe24c075f64980e,0x40016f8e9f2663b3,2
+np.float64,0x3fdaa546eeb54a8c,0x3ff2431a88fef1d5,2
+np.float64,0x3fe92b8151f25702,0x3fe54c67e005cbf9,2
+np.float64,0xbfe1be8b8a637d17,0x400144c078f67c6e,2
+np.float64,0xbfe468a1d7e8d144,0x40021964b118cbf4,2
+np.float64,0xbfdc6de4fab8dbca,0x40003fa9e27893d8,2
+np.float64,0xbfe3c2788ae784f1,0x4001e407ba3aa956,2
+np.float64,0xbfe2bf1542e57e2a,0x400192d4a9072016,2
+np.float64,0xbfe6982f4c6d305e,0x4002d681b1991bbb,2
+np.float64,0x3fdbceb1c4b79d64,0x3ff1f0f117b9d354,2
+np.float64,0x3fdb3705e7b66e0c,0x3ff21af01ca27ace,2
+np.float64,0x3fe3e6358ee7cc6c,0x3fecca4585053983,2
+np.float64,0xbfe16d6a9a62dad5,0x40012c7988aee247,2
+np.float64,0xbfce66e4413ccdc8,0x3ffcf83b08043a0c,2
+np.float64,0xbfeb6cd46876d9a9,0x4004cd61733bfb79,2
+np.float64,0xbfdb1cdd64b639ba,0x400010e6cf087cb7,2
+np.float64,0xbfe09e4e30e13c9c,0x4000ef5277c47721,2
+np.float64,0xbfee88dd127d11ba,0x4006b3cd443643ac,2
+np.float64,0xbf911e06c8223c00,0x3ff966744064fb05,2
+np.float64,0xbfe8f22bc471e458,0x4003b7d5513af295,2
+np.float64,0x3fe3d7329567ae66,0x3fecdd6c241f83ee,2
+np.float64,0x3fc8a9404b315280,0x3ff607dc175edf3f,2
+np.float64,0x3fe7eb80ad6fd702,0x3fe73f8fdb3e6a6c,2
+np.float64,0x3fef0931e37e1264,0x3fcf7fde80a3c5ab,2
+np.float64,0x3fe2ed3c3fe5da78,0x3fee038334cd1860,2
+np.float64,0x3fe251fdb8e4a3fc,0x3feec26dc636ac31,2
+np.float64,0x3feb239436764728,0x3fe1de9462455da7,2
+np.float64,0xbfe63fd7eeec7fb0,0x4002b78cfa3d2fa6,2
+np.float64,0x3fdd639cb5bac738,0x3ff17fc7d92b3eee,2
+np.float64,0x3fd0a7a13fa14f44,0x3ff4eba95c559c84,2
+np.float64,0x3fe804362d70086c,0x3fe71a44cd91ffa4,2
+np.float64,0xbfe0fecf6e61fd9f,0x40010bac8edbdc4f,2
+np.float64,0x3fcb74acfd36e958,0x3ff5ac84437f1b7c,2
+np.float64,0x3fe55053e1eaa0a8,0x3feaf0bf76304c30,2
+np.float64,0x3fc06b508d20d6a0,0x3ff7131da17f3902,2
+np.float64,0x3fdd78750fbaf0ec,0x3ff179e97fbf7f65,2
+np.float64,0x3fe44cb946689972,0x3fec46859b5da6be,2
+np.float64,0xbfeb165a7ff62cb5,0x4004a41c9cc9589e,2
+np.float64,0x3fe01ffb2b603ff6,0x3ff0aed52bf1c3c1,2
+np.float64,0x3f983c60a83078c0,0x3ff8c107805715ab,2
+np.float64,0x3fd8b5ff13b16c00,0x3ff2ca4a837a476a,2
+np.float64,0x3fc80510a1300a20,0x3ff61cc3b4af470b,2
+np.float64,0xbfd3935b06a726b6,0x3ffe1b3a2066f473,2
+np.float64,0xbfdd4a1f31ba943e,0x40005e81979ed445,2
+np.float64,0xbfa76afdd42ed600,0x3ff9dd63ffba72d2,2
+np.float64,0x3fe7e06d496fc0da,0x3fe7503773566707,2
+np.float64,0xbfea5fbfe874bf80,0x40045106af6c538f,2
+np.float64,0x3fee000c487c0018,0x3fd6bef1f8779d88,2
+np.float64,0xbfb39f4ee2273ea0,0x3ffa5c3f2b3888ab,2
+np.float64,0x3feb9247b0772490,0x3fe1092d2905efce,2
+np.float64,0x3fdaa39b4cb54738,0x3ff243901da0da17,2
+np.float64,0x3fcd5b2b493ab658,0x3ff56e262e65b67d,2
+np.float64,0x3fcf82512f3f04a0,0x3ff52738847c55f2,2
+np.float64,0x3fe2af5e0c655ebc,0x3fee4ffab0c82348,2
+np.float64,0xbfec0055d0f800ac,0x4005172d325933e8,2
+np.float64,0x3fe71da9336e3b52,0x3fe86f2e12f6e303,2
+np.float64,0x3fbefab0723df560,0x3ff731188ac716ec,2
+np.float64,0xbfe11dca28623b94,0x400114d3d4ad370d,2
+np.float64,0x3fbcbda8ca397b50,0x3ff755281078abd4,2
+np.float64,0x3fe687c7126d0f8e,0x3fe945099a7855cc,2
+np.float64,0xbfecde510579bca2,0x400590606e244591,2
+np.float64,0xbfd72de681ae5bce,0x3fff0ff797ad1755,2
+np.float64,0xbfe7c0f7386f81ee,0x40034226e0805309,2
+np.float64,0x3fd8d55619b1aaac,0x3ff2c1cb3267b14e,2
+np.float64,0x3fecd7a2ad79af46,0x3fdcabbffeaa279e,2
+np.float64,0x3fee7fb1a8fcff64,0x3fd3ae620286fe19,2
+np.float64,0xbfc5f3a3592be748,0x3ffbe3ed204d9842,2
+np.float64,0x3fec9e5527793caa,0x3fddb00bc8687e4b,2
+np.float64,0x3fc35dc70f26bb90,0x3ff6b3ded7191e33,2
+np.float64,0x3fda91c07ab52380,0x3ff24878848fec8f,2
+np.float64,0xbfe12cde1fe259bc,0x4001194ab99d5134,2
+np.float64,0xbfd35ab736a6b56e,0x3ffe0c5ce8356d16,2
+np.float64,0x3fc9c94123339280,0x3ff5e3239f3ad795,2
+np.float64,0xbfe72f54926e5ea9,0x40030c95d1d02b56,2
+np.float64,0xbfee283186fc5063,0x40066786bd0feb79,2
+np.float64,0xbfe7b383f56f6708,0x40033d23ef0e903d,2
+np.float64,0x3fd6037327ac06e8,0x3ff383bf2f311ddb,2
+np.float64,0x3fe0e344b561c68a,0x3ff03cd90fd4ba65,2
+np.float64,0xbfef0ff54b7e1feb,0x400730fa5fce381e,2
+np.float64,0x3fd269929da4d324,0x3ff476b230136d32,2
+np.float64,0xbfbc5fb9f638bf70,0x3ffae8e63a4e3234,2
+np.float64,0xbfe2e8bc84e5d179,0x40019fb5874f4310,2
+np.float64,0xbfd7017413ae02e8,0x3fff040d843c1531,2
+np.float64,0x3fefd362fa7fa6c6,0x3fbababc3ddbb21d,2
+np.float64,0x3fecb62ed3f96c5e,0x3fdd44ba77ccff94,2
+np.float64,0xbfb16fad5222df58,0x3ffa392d7f02b522,2
+np.float64,0x3fbcf4abc639e950,0x3ff751b23c40e27f,2
+np.float64,0x3fe128adbce2515c,0x3ff013dc91db04b5,2
+np.float64,0x3fa5dd9d842bbb40,0x3ff87300c88d512f,2
+np.float64,0xbfe61efcaf6c3dfa,0x4002ac27117f87c9,2
+np.float64,0x3feffe1233fffc24,0x3f9638d3796a4954,2
+np.float64,0xbfe78548b66f0a92,0x40032c0447b7bfe2,2
+np.float64,0x3fe7bd38416f7a70,0x3fe784e86d6546b6,2
+np.float64,0x3fe0d6bc5961ad78,0x3ff0443899e747ac,2
+np.float64,0xbfd0bb6e47a176dc,0x3ffd5d6dff390d41,2
+np.float64,0xbfec1d16b8f83a2e,0x40052620378d3b78,2
+np.float64,0x3fe9bbec20f377d8,0x3fe45e167c7a3871,2
+np.float64,0xbfeed81d9dfdb03b,0x4006f9dec2db7310,2
+np.float64,0xbfe1e35179e3c6a3,0x40014fd1b1186ac0,2
+np.float64,0xbfc9c7e605338fcc,0x3ffc60a6bd1a7126,2
+np.float64,0x3feec92810fd9250,0x3fd1afde414ab338,2
+np.float64,0xbfeb9f1d90773e3b,0x4004e606b773f5b0,2
+np.float64,0x3fcbabdf6b3757c0,0x3ff5a573866404af,2
+np.float64,0x3fe9f4e1fff3e9c4,0x3fe3fd7b6712dd7b,2
+np.float64,0xbfe6c0175ded802e,0x4002e4a4dc12f3fe,2
+np.float64,0xbfeefc96f37df92e,0x40071d367cd721ff,2
+np.float64,0xbfeaab58dc7556b2,0x400472ce37e31e50,2
+np.float64,0xbfc62668772c4cd0,0x3ffbea5e6c92010a,2
+np.float64,0x3fafe055fc3fc0a0,0x3ff822ce6502519a,2
+np.float64,0x3fd7b648ffaf6c90,0x3ff30f5a42f11418,2
+np.float64,0xbfe934fe827269fd,0x4003d2b9fed9e6ad,2
+np.float64,0xbfe6d691f2edad24,0x4002eca6a4b1797b,2
+np.float64,0x3fc7e62ced2fcc58,0x3ff620b1f44398b7,2
+np.float64,0xbfc89be9f33137d4,0x3ffc3a67a497f59c,2
+np.float64,0xbfe7793d536ef27a,0x40032794bf14dd64,2
+np.float64,0x3fde55a02dbcab40,0x3ff13b5f82d223e4,2
+np.float64,0xbfc8eabd7b31d57c,0x3ffc4472a81cb6d0,2
+np.float64,0x3fddcb5468bb96a8,0x3ff162899c381f2e,2
+np.float64,0xbfec7554d8f8eaaa,0x40055550e18ec463,2
+np.float64,0x3fd0b6e8b6a16dd0,0x3ff4e7b4781a50e3,2
+np.float64,0x3fedaae01b7b55c0,0x3fd8964916cdf53d,2
+np.float64,0x3fe0870f8a610e20,0x3ff072e7db95c2a2,2
+np.float64,0xbfec3e3ce2787c7a,0x4005379d0f6be873,2
+np.float64,0xbfe65502586caa04,0x4002beecff89147f,2
+np.float64,0xbfe0df39a961be74,0x4001025e36d1c061,2
+np.float64,0xbfb5d8edbe2bb1d8,0x3ffa7ff72b7d6a2b,2
+np.float64,0xbfde89574bbd12ae,0x40008ba4cd74544d,2
+np.float64,0xbfe72938f0ee5272,0x40030a5efd1acb6d,2
+np.float64,0xbfcd500d133aa01c,0x3ffcd462f9104689,2
+np.float64,0x3fe0350766606a0e,0x3ff0a2a3664e2c14,2
+np.float64,0xbfc892fb573125f8,0x3ffc3944641cc69d,2
+np.float64,0xbfba7dc7c634fb90,0x3ffaca9a6a0ffe61,2
+np.float64,0xbfeac94478759289,0x40048068a8b83e45,2
+np.float64,0xbfe8f60c1af1ec18,0x4003b961995b6e51,2
+np.float64,0x3fea1c0817743810,0x3fe3ba28c1643cf7,2
+np.float64,0xbfe42a0fefe85420,0x4002052aadd77f01,2
+np.float64,0x3fd2c61c56a58c38,0x3ff45e84cb9a7fa9,2
+np.float64,0xbfd83fb7cdb07f70,0x3fff59ab4790074c,2
+np.float64,0x3fd95e630fb2bcc8,0x3ff29c8bee1335ad,2
+np.float64,0x3feee88f387dd11e,0x3fd0c3ad3ded4094,2
+np.float64,0x3fe061291160c252,0x3ff0890010199bbc,2
+np.float64,0xbfdc7db3b5b8fb68,0x400041dea3759443,2
+np.float64,0x3fee23b320fc4766,0x3fd5ee73d7aa5c56,2
+np.float64,0xbfdc25c590b84b8c,0x4000359cf98a00b4,2
+np.float64,0xbfd63cbfd2ac7980,0x3ffecf7b9cf99b3c,2
+np.float64,0xbfbeb3c29a3d6788,0x3ffb0e66ecc0fc3b,2
+np.float64,0xbfd2f57fd6a5eb00,0x3ffdf1d7c79e1532,2
+np.float64,0xbfab3eda9c367db0,0x3ff9fc0c875f42e9,2
+np.float64,0xbfe12df1c6e25be4,0x4001199c673e698c,2
+np.float64,0x3fef8ab23a7f1564,0x3fc5aff358c59f1c,2
+np.float64,0x3fe562f50feac5ea,0x3fead7bce205f7d9,2
+np.float64,0x3fdc41adbeb8835c,0x3ff1d0f71341b8f2,2
+np.float64,0x3fe2748967e4e912,0x3fee9837f970ff9e,2
+np.float64,0xbfdaa89d57b5513a,0x400000e3889ba4cf,2
+np.float64,0x3fdf2a137dbe5428,0x3ff0fecfbecbbf86,2
+np.float64,0xbfea1fdcd2f43fba,0x4004351974b32163,2
+np.float64,0xbfe34a93a3e69528,0x4001be323946a3e0,2
+np.float64,0x3fe929bacff25376,0x3fe54f47bd7f4cf2,2
+np.float64,0xbfd667fbd6accff8,0x3ffedb04032b3a1a,2
+np.float64,0xbfeb695796f6d2af,0x4004cbb08ec6f525,2
+np.float64,0x3fd204df2ea409c0,0x3ff490f51e6670f5,2
+np.float64,0xbfd89a2757b1344e,0x3fff722127b988c4,2
+np.float64,0xbfd0787187a0f0e4,0x3ffd4c16dbe94f32,2
+np.float64,0x3fd44239bfa88474,0x3ff3fabbfb24b1fa,2
+np.float64,0xbfeb0b3489f61669,0x40049ee33d811d33,2
+np.float64,0x3fdcf04eaab9e09c,0x3ff1a02a29996c4e,2
+np.float64,0x3fd4c51e4fa98a3c,0x3ff3d8302c68fc9a,2
+np.float64,0x3fd1346645a268cc,0x3ff4c72b4970ecaf,2
+np.float64,0x3fd6a89d09ad513c,0x3ff357af6520afac,2
+np.float64,0xbfba0f469a341e90,0x3ffac3a8f41bed23,2
+np.float64,0xbfe13f8ddce27f1c,0x40011ed557719fd6,2
+np.float64,0x3fd43e5e26a87cbc,0x3ff3fbc040fc30dc,2
+np.float64,0x3fe838125a707024,0x3fe6cb5c987248f3,2
+np.float64,0x3fe128c30c625186,0x3ff013cff238dd1b,2
+np.float64,0xbfcd4718833a8e30,0x3ffcd33c96bde6f9,2
+np.float64,0x3fe43fcd08e87f9a,0x3fec573997456ec1,2
+np.float64,0xbfe9a29104734522,0x4003ffd502a1b57f,2
+np.float64,0xbfe4709d7968e13b,0x40021bfc5cd55af4,2
+np.float64,0x3fd21c3925a43874,0x3ff48adf48556cbb,2
+np.float64,0x3fe9a521b2734a44,0x3fe4844fc054e839,2
+np.float64,0xbfdfa6a912bf4d52,0x4000b4730ad8521e,2
+np.float64,0x3fe3740702e6e80e,0x3fed5b106283b6ed,2
+np.float64,0x3fd0a3aa36a14754,0x3ff4ecb02a5e3f49,2
+np.float64,0x3fdcb903d0b97208,0x3ff1afa5d692c5b9,2
+np.float64,0xbfe7d67839efacf0,0x40034a3146abf6f2,2
+np.float64,0x3f9981c6d8330380,0x3ff8bbf1853d7b90,2
+np.float64,0xbfe9d4191673a832,0x400414a9ab453c5d,2
+np.float64,0x3fef0a1e5c7e143c,0x3fcf70b02a54c415,2
+np.float64,0xbfd996dee6b32dbe,0x3fffb6cf707ad8e4,2
+np.float64,0x3fe19bef17e337de,0x3fef9e70d4fcedae,2
+np.float64,0x3fe34a59716694b2,0x3fed8f6d5cfba474,2
+np.float64,0x3fdf27e27cbe4fc4,0x3ff0ff70500e0c7c,2
+np.float64,0xbfe19df87fe33bf1,0x40013afb401de24c,2
+np.float64,0xbfbdfd97ba3bfb30,0x3ffb02ef8c225e57,2
+np.float64,0xbfe3d3417267a683,0x4001e95ed240b0f8,2
+np.float64,0x3fe566498b6acc94,0x3fead342957d4910,2
+np.float64,0x3ff0000000000000,0x0,2
+np.float64,0x3feb329bd8766538,0x3fe1c2225aafe3b4,2
+np.float64,0xbfc19ca703233950,0x3ffb575b5df057b9,2
+np.float64,0x3fe755027d6eaa04,0x3fe81eb99c262e00,2
+np.float64,0xbfe6c2b8306d8570,0x4002e594199f9eec,2
+np.float64,0x3fd69438e6ad2870,0x3ff35d2275ae891d,2
+np.float64,0x3fda3e7285b47ce4,0x3ff25f5573dd47ae,2
+np.float64,0x3fe7928a166f2514,0x3fe7c4490ef4b9a9,2
+np.float64,0xbfd4eb71b9a9d6e4,0x3ffe75e8ccb74be1,2
+np.float64,0xbfcc3a07f1387410,0x3ffcb0b8af914a5b,2
+np.float64,0xbfe6e80225edd004,0x4002f2e26eae8999,2
+np.float64,0xbfb347728a268ee8,0x3ffa56bd526a12db,2
+np.float64,0x3fe5140ead6a281e,0x3feb4132c9140a1c,2
+np.float64,0xbfc147f125228fe4,0x3ffb4cab18b9050f,2
+np.float64,0xbfcb9145b537228c,0x3ffc9b1b6227a8c9,2
+np.float64,0xbfda84ef4bb509de,0x3ffff7f8a674e17d,2
+np.float64,0x3fd2eb6bbfa5d6d8,0x3ff454c225529d7e,2
+np.float64,0x3fe18c95f1e3192c,0x3fefb0cf0efba75a,2
+np.float64,0x3fe78606efef0c0e,0x3fe7d6c3a092d64c,2
+np.float64,0x3fbad5119a35aa20,0x3ff773dffe3ce660,2
+np.float64,0x3fd0cf5903a19eb4,0x3ff4e15fd21fdb42,2
+np.float64,0xbfd85ce90bb0b9d2,0x3fff618ee848e974,2
+np.float64,0x3fe90e11b9f21c24,0x3fe57be62f606f4a,2
+np.float64,0x3fd7a2040faf4408,0x3ff314ce85457ec2,2
+np.float64,0xbfd73fba69ae7f74,0x3fff14bff3504811,2
+np.float64,0x3fa04b4bd42096a0,0x3ff89f9b52f521a2,2
+np.float64,0xbfd7219ce5ae433a,0x3fff0cac0b45cc18,2
+np.float64,0xbfe0cf4661e19e8d,0x4000fdadb14e3c22,2
+np.float64,0x3fd07469fea0e8d4,0x3ff4f8eaa9b2394a,2
+np.float64,0x3f9b05c5d8360b80,0x3ff8b5e10672db5c,2
+np.float64,0x3fe4c25b916984b8,0x3febad29bd0e25e2,2
+np.float64,0xbfde8b4891bd1692,0x40008beb88d5c409,2
+np.float64,0xbfe199a7efe33350,0x400139b089aee21c,2
+np.float64,0x3fecdad25cf9b5a4,0x3fdc9d062867e8c3,2
+np.float64,0xbfe979b277f2f365,0x4003eedb061e25a4,2
+np.float64,0x3fc8c7311f318e60,0x3ff6040b9aeaad9d,2
+np.float64,0x3fd2b605b8a56c0c,0x3ff462b9a955c224,2
+np.float64,0x3fc073b6ad20e770,0x3ff7120e9f2fd63c,2
+np.float64,0xbfec60ede678c1dc,0x40054a3863e24dc2,2
+np.float64,0x3fe225171be44a2e,0x3feef910dca420ea,2
+np.float64,0xbfd7529762aea52e,0x3fff19d00661f650,2
+np.float64,0xbfd781783daf02f0,0x3fff2667b90be461,2
+np.float64,0x3fe3f6ec6d67edd8,0x3fecb4e814a2e33a,2
+np.float64,0x3fece6702df9cce0,0x3fdc6719d92a50d2,2
+np.float64,0xbfb5c602ce2b8c08,0x3ffa7ec761ba856a,2
+np.float64,0xbfd61f0153ac3e02,0x3ffec78e3b1a6c4d,2
+np.float64,0xbfec3462b2f868c5,0x400532630bbd7050,2
+np.float64,0xbfdd248485ba490a,0x400059391c07c1bb,2
+np.float64,0xbfd424921fa84924,0x3ffe416a85d1dcdf,2
+np.float64,0x3fbb23a932364750,0x3ff76eef79209f7f,2
+np.float64,0x3fca248b0f344918,0x3ff5d77c5c1b4e5e,2
+np.float64,0xbfe69af4a4ed35ea,0x4002d77c2e4fbd4e,2
+np.float64,0x3fdafe3cdcb5fc78,0x3ff22a9be6efbbf2,2
+np.float64,0xbfebba3377f77467,0x4004f3836e1fe71a,2
+np.float64,0xbfe650fae06ca1f6,0x4002bd851406377c,2
+np.float64,0x3fda630007b4c600,0x3ff2554f1832bd94,2
+np.float64,0xbfda8107d9b50210,0x3ffff6e6209659f3,2
+np.float64,0x3fea759a02f4eb34,0x3fe31d1a632c9aae,2
+np.float64,0x3fbf88149e3f1030,0x3ff728313aa12ccb,2
+np.float64,0x3f7196d2a0232e00,0x3ff910647e1914c1,2
+np.float64,0x3feeae51d17d5ca4,0x3fd2709698d31f6f,2
+np.float64,0xbfd73cd663ae79ac,0x3fff13f96300b55a,2
+np.float64,0x3fd4fc5f06a9f8c0,0x3ff3c99359854b97,2
+np.float64,0x3fb29f5d6e253ec0,0x3ff7f7c20e396b20,2
+np.float64,0xbfd757c82aaeaf90,0x3fff1b34c6141e98,2
+np.float64,0x3fc56fd4cf2adfa8,0x3ff670c145122909,2
+np.float64,0x3fc609a2f52c1348,0x3ff65d3ef3cade2c,2
+np.float64,0xbfe1de631163bcc6,0x40014e5528fadb73,2
+np.float64,0xbfe7eb4a726fd695,0x40035202f49d95c4,2
+np.float64,0xbfc9223771324470,0x3ffc4b84d5e263b9,2
+np.float64,0x3fee91a8a87d2352,0x3fd3364befde8de6,2
+np.float64,0x3fbc9784fe392f10,0x3ff7578e29f6a1b2,2
+np.float64,0xbfec627c2c78c4f8,0x40054b0ff2cb9c55,2
+np.float64,0xbfb8b406a6316810,0x3ffaadd97062fb8c,2
+np.float64,0xbfecf98384f9f307,0x4005a043d9110d79,2
+np.float64,0xbfe5834bab6b0698,0x400276f114aebee4,2
+np.float64,0xbfd90f391eb21e72,0x3fff91e26a8f48f3,2
+np.float64,0xbfee288ce2fc511a,0x400667cb09aa04b3,2
+np.float64,0x3fd5aa5e32ab54bc,0x3ff39b7080a52214,2
+np.float64,0xbfee7ef907fcfdf2,0x4006ab96a8eba4c5,2
+np.float64,0x3fd6097973ac12f4,0x3ff3822486978bd1,2
+np.float64,0xbfe02d14b8e05a2a,0x4000ce5be53047b1,2
+np.float64,0xbf9c629a6838c540,0x3ff993897728c3f9,2
+np.float64,0xbfee2024667c4049,0x40066188782fb1f0,2
+np.float64,0xbfa42a88fc285510,0x3ff9c35a4bbce104,2
+np.float64,0x3fa407af5c280f60,0x3ff881b360d8eea1,2
+np.float64,0x3fed0ba42cfa1748,0x3fdbb7d55609175f,2
+np.float64,0xbfdd0b5844ba16b0,0x400055b0bb59ebb2,2
+np.float64,0x3fd88d97e6b11b30,0x3ff2d53c1ecb8f8c,2
+np.float64,0xbfeb7a915ef6f523,0x4004d410812eb84c,2
+np.float64,0xbfb5f979ca2bf2f0,0x3ffa8201d73cd4ca,2
+np.float64,0x3fb3b65dd6276cc0,0x3ff7e64576199505,2
+np.float64,0x3fcd47a7793a8f50,0x3ff570a7b672f160,2
+np.float64,0xbfa41dd30c283ba0,0x3ff9c2f488127eb3,2
+np.float64,0x3fe4b1ea1f6963d4,0x3febc2bed7760427,2
+np.float64,0xbfdd0f81d2ba1f04,0x400056463724b768,2
+np.float64,0x3fd15d93f7a2bb28,0x3ff4bc7a24eacfd7,2
+np.float64,0xbfe3213af8e64276,0x4001b14579dfded3,2
+np.float64,0x3fd90dfbeab21bf8,0x3ff2b26a6c2c3bb3,2
+np.float64,0xbfd02d54bca05aaa,0x3ffd38ab3886b203,2
+np.float64,0x3fc218dcad2431b8,0x3ff6dced56d5b417,2
+np.float64,0x3fea5edf71f4bdbe,0x3fe3455ee09f27e6,2
+np.float64,0x3fa74319042e8640,0x3ff867d224545438,2
+np.float64,0x3fd970ad92b2e15c,0x3ff2979084815dc1,2
+np.float64,0x3fce0a4bf73c1498,0x3ff557a4df32df3e,2
+np.float64,0x3fef5c8e10feb91c,0x3fc99ca0eeaaebe4,2
+np.float64,0xbfedae997ffb5d33,0x400611af18f407ab,2
+np.float64,0xbfbcf07d6239e0f8,0x3ffaf201177a2d36,2
+np.float64,0xbfc3c52541278a4c,0x3ffb9d2af0408e4a,2
+np.float64,0x3fe4ef44e4e9de8a,0x3feb71f7331255e5,2
+np.float64,0xbfccd9f5f539b3ec,0x3ffcc53a99339592,2
+np.float64,0xbfda32c745b4658e,0x3fffe16e8727ef89,2
+np.float64,0xbfef54932a7ea926,0x40077e4605e61ca1,2
+np.float64,0x3fe9d4ae3573a95c,0x3fe4344a069a3fd0,2
+np.float64,0x3fda567e73b4acfc,0x3ff258bd77a663c7,2
+np.float64,0xbfd5bcac5eab7958,0x3ffead6379c19c52,2
+np.float64,0xbfee5e56f97cbcae,0x40069131fc54018d,2
+np.float64,0x3fc2d4413925a880,0x3ff6c54163816298,2
+np.float64,0xbfe9ddf6e873bbee,0x400418d8c722f7c5,2
+np.float64,0x3fdaf2a683b5e54c,0x3ff22dcda599d69c,2
+np.float64,0xbfca69789f34d2f0,0x3ffc7547ff10b1a6,2
+np.float64,0x3fed076f62fa0ede,0x3fdbcbda03c1d72a,2
+np.float64,0xbfcb38326f367064,0x3ffc8fb55dadeae5,2
+np.float64,0x3fe1938705e3270e,0x3fefa88130c5adda,2
+np.float64,0x3feaffae3b75ff5c,0x3fe221e3da537c7e,2
+np.float64,0x3fefc94acb7f9296,0x3fbd9a360ace67b4,2
+np.float64,0xbfe8bddeb0f17bbe,0x4003a316685c767e,2
+np.float64,0x3fbe10fbee3c21f0,0x3ff73fceb10650f5,2
+np.float64,0x3fde9126c1bd224c,0x3ff12a742f734d0a,2
+np.float64,0xbfe9686c91f2d0d9,0x4003e7bc6ee77906,2
+np.float64,0xbfb1ba4892237490,0x3ffa3dda064c2509,2
+np.float64,0xbfe2879100e50f22,0x400181c1a5b16f0f,2
+np.float64,0x3fd1cd40b6a39a80,0x3ff49f70e3064e95,2
+np.float64,0xbfc965869132cb0c,0x3ffc5419f3b43701,2
+np.float64,0x3fea7a6f2874f4de,0x3fe31480fb2dd862,2
+np.float64,0x3fc3bc56892778b0,0x3ff6a7e8fa0e8b0e,2
+np.float64,0x3fec1ed451f83da8,0x3fdfd78e564b8ad7,2
+np.float64,0x3feb77d16df6efa2,0x3fe13d083344e45e,2
+np.float64,0xbfe822e7c67045d0,0x400367104a830cf6,2
+np.float64,0x8000000000000001,0x3ff921fb54442d18,2
+np.float64,0xbfd4900918a92012,0x3ffe5dc0e19737b4,2
+np.float64,0x3fed184187fa3084,0x3fdb7b7a39f234f4,2
+np.float64,0x3fecef846179df08,0x3fdc3cb2228c3682,2
+np.float64,0xbfe2d2aed165a55e,0x400198e21c5b861b,2
+np.float64,0x7ff0000000000000,0x7ff8000000000000,2
+np.float64,0xbfee9409a07d2813,0x4006bd358232d073,2
+np.float64,0xbfecedc2baf9db86,0x4005995df566fc21,2
+np.float64,0x3fe6d857396db0ae,0x3fe8d2cb8794aa99,2
+np.float64,0xbf9a579e7834af40,0x3ff98b5cc8021e1c,2
+np.float64,0x3fc664fefb2cca00,0x3ff651a664ccf8fa,2
+np.float64,0xbfe8a7aa0e714f54,0x40039a5b4df938a0,2
+np.float64,0xbfdf27d380be4fa8,0x4000a241074dbae6,2
+np.float64,0x3fe00ddf55e01bbe,0x3ff0b94eb1ea1851,2
+np.float64,0x3feb47edbff68fdc,0x3fe199822d075959,2
+np.float64,0x3fb4993822293270,0x3ff7d80c838186d0,2
+np.float64,0xbfca2cd1473459a4,0x3ffc6d88c8de3d0d,2
+np.float64,0xbfea7d9c7674fb39,0x40045e4559e9e52d,2
+np.float64,0x3fe0dce425e1b9c8,0x3ff04099cab23289,2
+np.float64,0x3fd6bb7e97ad76fc,0x3ff352a30434499c,2
+np.float64,0x3fd4a4f16da949e4,0x3ff3e0b07432c9aa,2
+np.float64,0x8000000000000000,0x3ff921fb54442d18,2
+np.float64,0x3fe688f5b56d11ec,0x3fe9435f63264375,2
+np.float64,0xbfdf5a427ebeb484,0x4000a97a6c5d4abc,2
+np.float64,0xbfd1f3483fa3e690,0x3ffdae6c8a299383,2
+np.float64,0xbfeac920db759242,0x4004805862be51ec,2
+np.float64,0x3fef5bc711feb78e,0x3fc9ac40fba5b93b,2
+np.float64,0x3fe4bd9e12e97b3c,0x3febb363c787d381,2
+np.float64,0x3fef6a59ab7ed4b4,0x3fc880f1324eafce,2
+np.float64,0x3fc07a362120f470,0x3ff7113cf2c672b3,2
+np.float64,0xbfe4d6dbe2e9adb8,0x40023d6f6bea44b7,2
+np.float64,0xbfec2d6a15785ad4,0x40052eb425cc37a2,2
+np.float64,0x3fc90dae05321b60,0x3ff5fb10015d2934,2
+np.float64,0xbfa9239f74324740,0x3ff9eb2d057068ea,2
+np.float64,0xbfeb4fc8baf69f92,0x4004bf5e17fb08a4,2
+np.float64,0x0,0x3ff921fb54442d18,2
+np.float64,0x3faaf1884c35e320,0x3ff84a5591dbe1f3,2
+np.float64,0xbfed842561fb084b,0x4005f5c0a19116ce,2
+np.float64,0xbfc64850c32c90a0,0x3ffbeeac2ee70f9a,2
+np.float64,0x3fd7d879f5afb0f4,0x3ff306254c453436,2
+np.float64,0xbfdabaa586b5754c,0x4000035e6ac83a2b,2
+np.float64,0xbfebfeefa977fddf,0x4005167446fb9faf,2
+np.float64,0xbfe9383462727069,0x4003d407aa6a1577,2
+np.float64,0x3fe108dfb6e211c0,0x3ff026ac924b281d,2
+np.float64,0xbf85096df02a12c0,0x3ff94c0e60a22ede,2
+np.float64,0xbfe3121cd566243a,0x4001ac8f90db5882,2
+np.float64,0xbfd227f62aa44fec,0x3ffdbc26bb175dcc,2
+np.float64,0x3fd931af2cb26360,0x3ff2a8b62dfe003c,2
+np.float64,0xbfd9b794e3b36f2a,0x3fffbfbc89ec013d,2
+np.float64,0x3fc89b2e6f313660,0x3ff609a6e67f15f2,2
+np.float64,0x3fc0b14a8f216298,0x3ff70a4b6905aad2,2
+np.float64,0xbfeda11a657b4235,0x400608b3f9fff574,2
+np.float64,0xbfed2ee9ec7a5dd4,0x4005c040b7c02390,2
+np.float64,0xbfef7819d8fef034,0x4007ac6bf75cf09d,2
+np.float64,0xbfcc4720fb388e40,0x3ffcb2666a00b336,2
+np.float64,0xbfe05dec4be0bbd8,0x4000dc8a25ca3760,2
+np.float64,0x3fb093416e212680,0x3ff81897b6d8b374,2
+np.float64,0xbfc6ab89332d5714,0x3ffbfb4559d143e7,2
+np.float64,0x3fc51948512a3290,0x3ff67bb9df662c0a,2
+np.float64,0x3fed4d94177a9b28,0x3fda76c92f0c0132,2
+np.float64,0x3fdd195fbeba32c0,0x3ff194a5586dd18e,2
+np.float64,0x3fe3f82799e7f050,0x3fecb354c2faf55c,2
+np.float64,0x3fecac2169f95842,0x3fdd7222296cb7a7,2
+np.float64,0x3fe3d3f36fe7a7e6,0x3fece18f45e30dd7,2
+np.float64,0x3fe31ff63d663fec,0x3fedc46c77d30c6a,2
+np.float64,0xbfe3120c83e62419,0x4001ac8a7c4aa742,2
+np.float64,0x3fe7c1a7976f8350,0x3fe77e4a9307c9f8,2
+np.float64,0x3fe226fe9de44dfe,0x3feef6c0f3cb00fa,2
+np.float64,0x3fd5c933baab9268,0x3ff3933e8a37de42,2
+np.float64,0x3feaa98496f5530a,0x3fe2c003832ebf21,2
+np.float64,0xbfc6f80a2f2df014,0x3ffc04fd54cb1317,2
+np.float64,0x3fde5e18d0bcbc30,0x3ff138f7b32a2ca3,2
+np.float64,0xbfe30c8dd566191c,0x4001aad4af935a78,2
+np.float64,0x3fbe8d196e3d1a30,0x3ff737fec8149ecc,2
+np.float64,0x3feaee6731f5dcce,0x3fe241fa42cce22d,2
+np.float64,0x3fef9cc46cff3988,0x3fc3f17b708dbdbb,2
+np.float64,0xbfdb181bdeb63038,0x4000103ecf405602,2
+np.float64,0xbfc58de0ed2b1bc0,0x3ffbd704c14e15cd,2
+np.float64,0xbfee05d5507c0bab,0x40064e480faba6d8,2
+np.float64,0x3fe27d0ffa64fa20,0x3fee8dc71ef79f2c,2
+np.float64,0xbfe4f7ad4c69ef5a,0x400248456cd09a07,2
+np.float64,0xbfe4843e91e9087d,0x4002225f3e139c84,2
+np.float64,0x3fe7158b9c6e2b18,0x3fe87ae845c5ba96,2
+np.float64,0xbfea64316074c863,0x400452fd2bc23a44,2
+np.float64,0xbfc9f3ae4133e75c,0x3ffc663d482afa42,2
+np.float64,0xbfd5e18513abc30a,0x3ffeb72fc76d7071,2
+np.float64,0xbfd52f6438aa5ec8,0x3ffe87e5b18041e5,2
+np.float64,0xbfea970650f52e0d,0x400469a4a6758154,2
+np.float64,0xbfe44321b7e88644,0x40020d404a2141b1,2
+np.float64,0x3fdf5a39bbbeb474,0x3ff0f10453059dbd,2
+np.float64,0xbfa1d4069423a810,0x3ff9b0a2eacd2ce2,2
+np.float64,0xbfc36d16a326da2c,0x3ffb92077d41d26a,2
+np.float64,0x1,0x3ff921fb54442d18,2
+np.float64,0x3feb232a79764654,0x3fe1df5beeb249d0,2
+np.float64,0xbfed2003d5fa4008,0x4005b737c2727583,2
+np.float64,0x3fd5b093a3ab6128,0x3ff399ca2db1d96d,2
+np.float64,0x3fca692c3d34d258,0x3ff5ceb86b79223e,2
+np.float64,0x3fd6bbdf89ad77c0,0x3ff3528916df652d,2
+np.float64,0xbfefdadd46ffb5bb,0x40085ee735e19f19,2
+np.float64,0x3feb69fb2676d3f6,0x3fe157ee0c15691e,2
+np.float64,0x3fe44c931f689926,0x3fec46b6f5e3f265,2
+np.float64,0xbfc43ddbcb287bb8,0x3ffbac71d268d74d,2
+np.float64,0x3fe6e16d43edc2da,0x3fe8c5cf0f0daa66,2
+np.float64,0x3fe489efc76913e0,0x3febf704ca1ac2a6,2
+np.float64,0xbfe590aadceb2156,0x40027b764205cf78,2
+np.float64,0xbf782e8aa0305d00,0x3ff93a29e81928ab,2
+np.float64,0x3fedcb80cffb9702,0x3fd7e5d1f98a418b,2
+np.float64,0x3fe075858060eb0c,0x3ff07d23ab46b60f,2
+np.float64,0x3fe62a68296c54d0,0x3fe9c77f7068043b,2
+np.float64,0x3feff16a3c7fe2d4,0x3fae8e8a739cc67a,2
+np.float64,0xbfd6ed93e3addb28,0x3ffefebab206fa99,2
+np.float64,0x3fe40d8ccf681b1a,0x3fec97e9cd29966d,2
+np.float64,0x3fd6408210ac8104,0x3ff3737a7d374107,2
+np.float64,0x3fec8023b8f90048,0x3fde35ebfb2b3afd,2
+np.float64,0xbfe13babd4627758,0x40011dae5c07c56b,2
+np.float64,0xbfd2183e61a4307c,0x3ffdb80dd747cfbe,2
+np.float64,0x3feae8eb1d75d1d6,0x3fe24c1f6e42ae77,2
+np.float64,0xbfea559b9c74ab37,0x40044c8e5e123b20,2
+np.float64,0xbfd12c9d57a2593a,0x3ffd7ac6222f561c,2
+np.float64,0x3fe32eb697e65d6e,0x3fedb202693875b6,2
+np.float64,0xbfde0808c3bc1012,0x4000794bd8616ea3,2
+np.float64,0x3fe14958a06292b2,0x3ff0007b40ac648a,2
+np.float64,0x3fe3d388a6e7a712,0x3fece21751a6dd7c,2
+np.float64,0x3fe7ad7897ef5af2,0x3fe79c5b3da302a7,2
+np.float64,0x3fec75527e78eaa4,0x3fde655de0cf0508,2
+np.float64,0x3fea920d4c75241a,0x3fe2ea48f031d908,2
+np.float64,0x7fefffffffffffff,0x7ff8000000000000,2
+np.float64,0xbfc17a68cb22f4d0,0x3ffb530925f41aa0,2
+np.float64,0xbfe1c93166e39263,0x400147f3cb435dec,2
+np.float64,0x3feb97c402f72f88,0x3fe0fe5b561bf869,2
+np.float64,0x3fb58ff5162b1ff0,0x3ff7c8933fa969dc,2
+np.float64,0x3fe68e2beded1c58,0x3fe93c075283703b,2
+np.float64,0xbf94564cc828aca0,0x3ff97355e5ee35db,2
+np.float64,0x3fd31061c9a620c4,0x3ff44b150ec96998,2
+np.float64,0xbfc7d0c89f2fa190,0x3ffc208bf4eddc4d,2
+np.float64,0x3fe5736f1d6ae6de,0x3feac18f84992d1e,2
+np.float64,0x3fdb62e480b6c5c8,0x3ff20ecfdc4afe7c,2
+np.float64,0xbfc417228b282e44,0x3ffba78afea35979,2
+np.float64,0x3f8f5ba1303eb780,0x3ff8e343714630ff,2
+np.float64,0x3fe8e99126f1d322,0x3fe5b6511d4c0798,2
+np.float64,0xbfe2ec08a1e5d812,0x4001a0bb28a85875,2
+np.float64,0x3fea3b46cf74768e,0x3fe383dceaa74296,2
+np.float64,0xbfe008b5ed60116c,0x4000c3d62c275d40,2
+np.float64,0xbfcd9f8a4b3b3f14,0x3ffcde98d6484202,2
+np.float64,0xbfdb5fb112b6bf62,0x40001a22137ef1c9,2
+np.float64,0xbfe9079565f20f2b,0x4003c0670c92e401,2
+np.float64,0xbfce250dc53c4a1c,0x3ffcefc2b3dc3332,2
+np.float64,0x3fe9ba85d373750c,0x3fe4607131b28773,2
+np.float64,0x10000000000000,0x3ff921fb54442d18,2
+np.float64,0xbfeb9ef42c773de8,0x4004e5f239203ad8,2
+np.float64,0xbfd6bf457dad7e8a,0x3ffef2563d87b18d,2
+np.float64,0x3fe4de9aa5e9bd36,0x3feb87f97defb04a,2
+np.float64,0x3fedb4f67cfb69ec,0x3fd8603c465bffac,2
+np.float64,0x3fe7b6d9506f6db2,0x3fe78e670c7bdb67,2
+np.float64,0x3fe071717460e2e2,0x3ff07f84472d9cc5,2
+np.float64,0xbfed2e79dbfa5cf4,0x4005bffc6f9ad24f,2
+np.float64,0x3febb8adc377715c,0x3fe0bcebfbd45900,2
+np.float64,0xbfee2cffd87c5a00,0x40066b20a037c478,2
+np.float64,0x3fef7e358d7efc6c,0x3fc6d0ba71a542a8,2
+np.float64,0xbfef027eef7e04fe,0x400723291cb00a7a,2
+np.float64,0x3fac96da34392dc0,0x3ff83d260a936c6a,2
+np.float64,0x3fe9dba94a73b752,0x3fe428736b94885e,2
+np.float64,0x3fed37581efa6eb0,0x3fdae49dcadf1d90,2
+np.float64,0xbfe6e61037edcc20,0x4002f23031b8d522,2
+np.float64,0xbfdea7204dbd4e40,0x40008fe1f37918b7,2
+np.float64,0x3feb9f8edb773f1e,0x3fe0eef20bd4387b,2
+np.float64,0x3feeb0b6ed7d616e,0x3fd25fb3b7a525d6,2
+np.float64,0xbfd7ce9061af9d20,0x3fff3b25d531aa2b,2
+np.float64,0xbfc806b509300d6c,0x3ffc2768743a8360,2
+np.float64,0xbfa283882c250710,0x3ff9b61fda28914a,2
+np.float64,0x3fdec70050bd8e00,0x3ff11b1d769b578f,2
+np.float64,0xbfc858a44930b148,0x3ffc31d6758b4721,2
+np.float64,0x3fdc321150b86424,0x3ff1d5504c3c91e4,2
+np.float64,0x3fd9416870b282d0,0x3ff2a46f3a850f5b,2
+np.float64,0x3fdd756968baead4,0x3ff17ac510a5573f,2
+np.float64,0xbfedfd632cfbfac6,0x400648345a2f89b0,2
+np.float64,0x3fd6874285ad0e84,0x3ff36098ebff763f,2
+np.float64,0x3fe6daacc9edb55a,0x3fe8cf75fae1e35f,2
+np.float64,0x3fe53f19766a7e32,0x3feb07d0e97cd55b,2
+np.float64,0x3fd13cc36ca27988,0x3ff4c4ff801b1faa,2
+np.float64,0x3fe4f21cbce9e43a,0x3feb6e34a72ef529,2
+np.float64,0xbfc21c1cc9243838,0x3ffb67726394ca89,2
+np.float64,0x3fe947a3f2728f48,0x3fe51eae4660e23c,2
+np.float64,0xbfce78cd653cf19c,0x3ffcfa89194b3f5e,2
+np.float64,0x3fe756f049eeade0,0x3fe81be7f2d399e2,2
+np.float64,0xbfcc727cf138e4f8,0x3ffcb7f547841bb0,2
+np.float64,0xbfc2d8d58f25b1ac,0x3ffb7f496cc72458,2
+np.float64,0xbfcfd0e4653fa1c8,0x3ffd26e1309bc80b,2
+np.float64,0xbfe2126c106424d8,0x40015e0e01db6a4a,2
+np.float64,0x3fe580e4306b01c8,0x3feaaf683ce51aa5,2
+np.float64,0x3fcea8a1b93d5140,0x3ff543456c0d28c7,2
+np.float64,0xfff0000000000000,0x7ff8000000000000,2
+np.float64,0xbfd9d5da72b3abb4,0x3fffc8013113f968,2
+np.float64,0xbfe1fdfcea63fbfa,0x400157def2e4808d,2
+np.float64,0xbfc0022e0720045c,0x3ffb239963e7cbf2,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arccosh.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arccosh.csv
new file mode 100644
index 00000000..0defe50b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arccosh.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0x3f83203f,0x3e61d9d6,2
+np.float32,0x3f98dea1,0x3f1d1af6,2
+np.float32,0x7fa00000,0x7fe00000,2
+np.float32,0x7eba99af,0x42b0d032,2
+np.float32,0x3fc95a13,0x3f833650,2
+np.float32,0x3fce9a45,0x3f8771e1,2
+np.float32,0x3fc1bd96,0x3f797811,2
+np.float32,0x7eba2391,0x42b0ceed,2
+np.float32,0x7d4e8f15,0x42acdb8c,2
+np.float32,0x3feca42e,0x3f9cc88e,2
+np.float32,0x7e2b314e,0x42af412e,2
+np.float32,0x7f7fffff,0x42b2d4fc,2
+np.float32,0x3f803687,0x3d6c4380,2
+np.float32,0x3fa0edbd,0x3f33e706,2
+np.float32,0x3faa8074,0x3f4b3d3c,2
+np.float32,0x3fa0c49e,0x3f337af3,2
+np.float32,0x3f8c9ec4,0x3ee18812,2
+np.float32,0x7efef78e,0x42b17006,2
+np.float32,0x3fc75720,0x3f818aa4,2
+np.float32,0x7f52d4c8,0x42b27198,2
+np.float32,0x3f88f21e,0x3ebe52b0,2
+np.float32,0x3ff7a042,0x3fa3a07a,2
+np.float32,0x7f52115c,0x42b26fbd,2
+np.float32,0x3fc6bf6f,0x3f810b42,2
+np.float32,0x3fd105d0,0x3f895649,2
+np.float32,0x3fee7c2a,0x3f9df66e,2
+np.float32,0x7f0ff9a5,0x42b1ae4f,2
+np.float32,0x7e81f075,0x42b016e7,2
+np.float32,0x3fa57d65,0x3f3f70c6,2
+np.float32,0x80800000,0xffc00000,2
+np.float32,0x7da239f5,0x42adc2bf,2
+np.float32,0x3f9e432c,0x3f2cbd80,2
+np.float32,0x3ff2839b,0x3fa07ee4,2
+np.float32,0x3fec8aef,0x3f9cb850,2
+np.float32,0x7d325893,0x42ac905b,2
+np.float32,0x3fa27431,0x3f37dade,2
+np.float32,0x3fce7408,0x3f8753ae,2
+np.float32,0x3fde6684,0x3f93353f,2
+np.float32,0x3feb9a3e,0x3f9c1cff,2
+np.float32,0x7deb34bb,0x42ae80f0,2
+np.float32,0x3fed9300,0x3f9d61b7,2
+np.float32,0x7f35e253,0x42b225fb,2
+np.float32,0x7e6db57f,0x42afe93f,2
+np.float32,0x3fa41f08,0x3f3c10bc,2
+np.float32,0x3fb0d4da,0x3f590de3,2
+np.float32,0x3fb5c690,0x3f632351,2
+np.float32,0x3fcde9ce,0x3f86e638,2
+np.float32,0x3f809c7b,0x3dc81161,2
+np.float32,0x3fd77291,0x3f8e3226,2
+np.float32,0x3fc21a06,0x3f7a1a82,2
+np.float32,0x3fba177e,0x3f6b8139,2
+np.float32,0x7f370dff,0x42b22944,2
+np.float32,0x3fe5bfcc,0x3f9841c1,2
+np.float32,0x3feb0caa,0x3f9bc139,2
+np.float32,0x7f4fe5c3,0x42b26a6c,2
+np.float32,0x7f1e1419,0x42b1de28,2
+np.float32,0x7f5e3c96,0x42b28c92,2
+np.float32,0x3f8cd313,0x3ee3521e,2
+np.float32,0x3fa97824,0x3f48e049,2
+np.float32,0x7d8ca281,0x42ad799e,2
+np.float32,0x3f96b51b,0x3f165193,2
+np.float32,0x3f81328a,0x3e0bf504,2
+np.float32,0x3ff60bf3,0x3fa2ab45,2
+np.float32,0x3ff9b629,0x3fa4e107,2
+np.float32,0x3fecacfc,0x3f9cce37,2
+np.float32,0x3fba8804,0x3f6c5600,2
+np.float32,0x3f81f752,0x3e333fdd,2
+np.float32,0x3fb5b262,0x3f62fb46,2
+np.float32,0x3fa21bc0,0x3f36f7e6,2
+np.float32,0x3fbc87bb,0x3f7011dc,2
+np.float32,0x3fe18b32,0x3f9565ae,2
+np.float32,0x7dfb6dd5,0x42aea316,2
+np.float32,0x3fb7c602,0x3f670ee3,2
+np.float32,0x7efeb6a2,0x42b16f84,2
+np.float32,0x3fa56180,0x3f3f2ca4,2
+np.float32,0x3f8dcaff,0x3eeb9ac0,2
+np.float32,0x7e876238,0x42b02beb,2
+np.float32,0x7f0bb67d,0x42b19eec,2
+np.float32,0x3faca01c,0x3f4fffa5,2
+np.float32,0x3fdb57ee,0x3f9108b8,2
+np.float32,0x3fe3bade,0x3f96e4b7,2
+np.float32,0x7f7aa2dd,0x42b2ca25,2
+np.float32,0x3fed92ec,0x3f9d61aa,2
+np.float32,0x7eb789b1,0x42b0c7b9,2
+np.float32,0x7f7f16e4,0x42b2d329,2
+np.float32,0x3fb6647e,0x3f645b84,2
+np.float32,0x3f99335e,0x3f1e1d96,2
+np.float32,0x7e690a11,0x42afdf17,2
+np.float32,0x7dff2f95,0x42aeaaae,2
+np.float32,0x7f70adfd,0x42b2b564,2
+np.float32,0x3fe92252,0x3f9a80fe,2
+np.float32,0x3fef54ce,0x3f9e7fe5,2
+np.float32,0x3ff24eaa,0x3fa05df9,2
+np.float32,0x7f04565a,0x42b18328,2
+np.float32,0x3fcb8b80,0x3f85007f,2
+np.float32,0x3fcd4d0a,0x3f866983,2
+np.float32,0x3fbe7d82,0x3f73a911,2
+np.float32,0x3f8a7a8a,0x3ecdc8f6,2
+np.float32,0x3f912441,0x3f030d56,2
+np.float32,0x3f9b29d6,0x3f23f663,2
+np.float32,0x3fab7f36,0x3f4d7c6c,2
+np.float32,0x7dfedafc,0x42aeaa04,2
+np.float32,0x3fe190c0,0x3f956982,2
+np.float32,0x3f927515,0x3f07e0bb,2
+np.float32,0x3ff6442a,0x3fa2cd7e,2
+np.float32,0x7f6656d0,0x42b29ee8,2
+np.float32,0x3fe29aa0,0x3f96201f,2
+np.float32,0x3fa4a247,0x3f3d5687,2
+np.float32,0x3fa1cf19,0x3f363226,2
+np.float32,0x3fc20037,0x3f79ed36,2
+np.float32,0x7cc1241a,0x42ab5645,2
+np.float32,0x3fafd540,0x3f56f25a,2
+np.float32,0x7e5b3f5f,0x42afbfdb,2
+np.float32,0x7f48de5f,0x42b258d0,2
+np.float32,0x3fce1ca0,0x3f870e85,2
+np.float32,0x7ee40bb2,0x42b136e4,2
+np.float32,0x7ecdb133,0x42b10212,2
+np.float32,0x3f9f181c,0x3f2f02ca,2
+np.float32,0x3f936cbf,0x3f0b4f63,2
+np.float32,0x3fa4f8ea,0x3f3e2c2f,2
+np.float32,0x3fcc03e2,0x3f8561ac,2
+np.float32,0x3fb801f2,0x3f67831b,2
+np.float32,0x7e141dad,0x42aef70c,2
+np.float32,0x3fe8c04e,0x3f9a4087,2
+np.float32,0x3f8548d5,0x3e929f37,2
+np.float32,0x7f148d7d,0x42b1be56,2
+np.float32,0x3fd2c9a2,0x3f8ab1ed,2
+np.float32,0x7eb374fd,0x42b0bc36,2
+np.float32,0x7f296d36,0x42b201a7,2
+np.float32,0x3ff138e2,0x3f9fb09d,2
+np.float32,0x3ff42898,0x3fa18347,2
+np.float32,0x7da8c5e1,0x42add700,2
+np.float32,0x7dcf72c4,0x42ae40a4,2
+np.float32,0x7ea571fc,0x42b09296,2
+np.float32,0x3fc0953d,0x3f776ba3,2
+np.float32,0x7f1773dd,0x42b1c83c,2
+np.float32,0x7ef53b68,0x42b15c17,2
+np.float32,0x3f85d69f,0x3e9a0f3a,2
+np.float32,0x7e8b9a05,0x42b03ba0,2
+np.float32,0x3ff07d20,0x3f9f3ad2,2
+np.float32,0x7e8da32c,0x42b0430a,2
+np.float32,0x7ef96004,0x42b164ab,2
+np.float32,0x3fdfaa62,0x3f941837,2
+np.float32,0x7f0057c5,0x42b17377,2
+np.float32,0x3fb2663f,0x3f5c5065,2
+np.float32,0x3fd3d8c3,0x3f8b8055,2
+np.float32,0x1,0xffc00000,2
+np.float32,0x3fd536c1,0x3f8c8862,2
+np.float32,0x3f91b953,0x3f053619,2
+np.float32,0x3fb3305c,0x3f5deee1,2
+np.float32,0x7ecd86b9,0x42b101a8,2
+np.float32,0x3fbf71c5,0x3f75624d,2
+np.float32,0x3ff5f0f4,0x3fa29ad2,2
+np.float32,0x3fe50389,0x3f97c328,2
+np.float32,0x3fa325a1,0x3f399e69,2
+np.float32,0x3fe4397a,0x3f973a9f,2
+np.float32,0x3f8684c6,0x3ea2b784,2
+np.float32,0x7f25ae00,0x42b1f634,2
+np.float32,0x3ff7cbf7,0x3fa3badb,2
+np.float32,0x7f73f0e0,0x42b2bc48,2
+np.float32,0x3fc88b70,0x3f828b92,2
+np.float32,0x3fb01c16,0x3f578886,2
+np.float32,0x7e557623,0x42afb229,2
+np.float32,0x3fcbcd5b,0x3f8535b4,2
+np.float32,0x7f7157e4,0x42b2b6cd,2
+np.float32,0x7f51d9d4,0x42b26f36,2
+np.float32,0x7f331a3b,0x42b21e17,2
+np.float32,0x7f777fb5,0x42b2c3b2,2
+np.float32,0x3f832001,0x3e61d11f,2
+np.float32,0x7f2cd055,0x42b20bca,2
+np.float32,0x3f89831f,0x3ec42f76,2
+np.float32,0x7f21da33,0x42b1ea3d,2
+np.float32,0x3f99e416,0x3f20330a,2
+np.float32,0x7f2c8ea1,0x42b20b07,2
+np.float32,0x7f462c98,0x42b251e6,2
+np.float32,0x7f4fdb3f,0x42b26a52,2
+np.float32,0x3fcc1338,0x3f856e07,2
+np.float32,0x3f823673,0x3e3e20da,2
+np.float32,0x7dbfe89d,0x42ae18c6,2
+np.float32,0x3fc9b04c,0x3f837d38,2
+np.float32,0x7dba3213,0x42ae094d,2
+np.float32,0x7ec5a483,0x42b0eda1,2
+np.float32,0x3fbc4d14,0x3f6fa543,2
+np.float32,0x3fc85ce2,0x3f8264f1,2
+np.float32,0x7f77c816,0x42b2c447,2
+np.float32,0x3f9c9281,0x3f280492,2
+np.float32,0x7f49b3e2,0x42b25aef,2
+np.float32,0x3fa7e4da,0x3f45347c,2
+np.float32,0x7e0c9df5,0x42aedc72,2
+np.float32,0x7f21fd1a,0x42b1eaab,2
+np.float32,0x7f7c63ad,0x42b2cdb6,2
+np.float32,0x7f4eb80a,0x42b26783,2
+np.float32,0x7e98038c,0x42b0673c,2
+np.float32,0x7e89ba08,0x42b034b4,2
+np.float32,0x3ffc06ba,0x3fa64094,2
+np.float32,0x3fae63f6,0x3f53db36,2
+np.float32,0x3fbc2d30,0x3f6f6a1c,2
+np.float32,0x7de0e5e5,0x42ae69fe,2
+np.float32,0x7e09ed18,0x42aed28d,2
+np.float32,0x3fea78f8,0x3f9b6129,2
+np.float32,0x7dfe0bcc,0x42aea863,2
+np.float32,0x7ee21d03,0x42b13289,2
+np.float32,0x3fcc3aed,0x3f858dfc,2
+np.float32,0x3fe6b3ba,0x3f98e4ea,2
+np.float32,0x3f90f25f,0x3f025225,2
+np.float32,0x7f1bcaf4,0x42b1d6b3,2
+np.float32,0x3f83ac81,0x3e74c20e,2
+np.float32,0x3f98681d,0x3f1bae16,2
+np.float32,0x3fe1f2d9,0x3f95ad08,2
+np.float32,0x3fa279d7,0x3f37e951,2
+np.float32,0x3feb922a,0x3f9c17c4,2
+np.float32,0x7f1c72e8,0x42b1d8da,2
+np.float32,0x3fea156b,0x3f9b2038,2
+np.float32,0x3fed6bda,0x3f9d48aa,2
+np.float32,0x3fa86142,0x3f46589c,2
+np.float32,0x3ff16bc2,0x3f9fd072,2
+np.float32,0x3fbebf65,0x3f74207b,2
+np.float32,0x7e7b78b5,0x42b00610,2
+np.float32,0x3ff51ab8,0x3fa217f0,2
+np.float32,0x3f8361bb,0x3e6adf07,2
+np.float32,0x7edbceed,0x42b1240e,2
+np.float32,0x7f10e2c0,0x42b1b18a,2
+np.float32,0x3fa7bc58,0x3f44d4ef,2
+np.float32,0x3f813bde,0x3e0e1138,2
+np.float32,0x7f30d5b9,0x42b21791,2
+np.float32,0x3fb4f450,0x3f61806a,2
+np.float32,0x7eee02c4,0x42b14cca,2
+np.float32,0x7ec74b62,0x42b0f1e4,2
+np.float32,0x3ff96bca,0x3fa4b498,2
+np.float32,0x7f50e304,0x42b26cda,2
+np.float32,0x7eb14c57,0x42b0b603,2
+np.float32,0x7c3f0733,0x42a9edbf,2
+np.float32,0x7ea57acb,0x42b092b1,2
+np.float32,0x7f2788dc,0x42b1fbe7,2
+np.float32,0x3fa39f14,0x3f3ad09b,2
+np.float32,0x3fc3a7e0,0x3f7ccfa0,2
+np.float32,0x3fe70a73,0x3f991eb0,2
+np.float32,0x7f4831f7,0x42b25718,2
+np.float32,0x3fe947d0,0x3f9a999c,2
+np.float32,0x7ef2b1c7,0x42b156c4,2
+np.float32,0x3fede0ea,0x3f9d937f,2
+np.float32,0x3f9fef8e,0x3f314637,2
+np.float32,0x3fc313c5,0x3f7bcebd,2
+np.float32,0x7ee99337,0x42b14328,2
+np.float32,0x7eb9042e,0x42b0cbd5,2
+np.float32,0x3fc9d3dc,0x3f839a69,2
+np.float32,0x3fb2c018,0x3f5d091d,2
+np.float32,0x3fcc4e8f,0x3f859dc5,2
+np.float32,0x3fa9363b,0x3f484819,2
+np.float32,0x7f72ce2e,0x42b2b9e4,2
+np.float32,0x7e639326,0x42afd2f1,2
+np.float32,0x7f4595d3,0x42b25060,2
+np.float32,0x7f6d0ac4,0x42b2ad97,2
+np.float32,0x7f1bda0d,0x42b1d6e5,2
+np.float32,0x3fd85ffd,0x3f8ee0ed,2
+np.float32,0x3f91d53f,0x3f059c8e,2
+np.float32,0x7d06e103,0x42ac0155,2
+np.float32,0x3fb83126,0x3f67de6e,2
+np.float32,0x7d81ce1f,0x42ad5097,2
+np.float32,0x7f79cb3b,0x42b2c86b,2
+np.float32,0x7f800000,0x7f800000,2
+np.float32,0x3fdbfffd,0x3f918137,2
+np.float32,0x7f4ecb1c,0x42b267b2,2
+np.float32,0x3fc2c122,0x3f7b3ed3,2
+np.float32,0x7f415854,0x42b24544,2
+np.float32,0x7e3d988b,0x42af7575,2
+np.float32,0x3f83ca99,0x3e789fcb,2
+np.float32,0x7f274f70,0x42b1fb38,2
+np.float32,0x7f0d20e6,0x42b1a416,2
+np.float32,0x3fdf3a1d,0x3f93c9c1,2
+np.float32,0x7efaa13e,0x42b1673d,2
+np.float32,0x3fb20b15,0x3f5b9434,2
+np.float32,0x3f86af9f,0x3ea4c664,2
+np.float32,0x3fe4fcb0,0x3f97be8a,2
+np.float32,0x3f920683,0x3f065085,2
+np.float32,0x3fa4b278,0x3f3d7e8b,2
+np.float32,0x3f8077a8,0x3daef77f,2
+np.float32,0x7e865be4,0x42b02807,2
+np.float32,0x3fcea7e2,0x3f877c9f,2
+np.float32,0x7e7e9db1,0x42b00c6d,2
+np.float32,0x3f9819aa,0x3f1aba7e,2
+np.float32,0x7f2b6c4b,0x42b207a7,2
+np.float32,0x7ef85e3e,0x42b16299,2
+np.float32,0x3fbd8290,0x3f71df8b,2
+np.float32,0x3fbbb615,0x3f6e8c8c,2
+np.float32,0x7f1bc7f5,0x42b1d6a9,2
+np.float32,0x3fbb4fea,0x3f6dcdad,2
+np.float32,0x3fb67e09,0x3f648dd1,2
+np.float32,0x3fc83495,0x3f824374,2
+np.float32,0x3fe52980,0x3f97dcbc,2
+np.float32,0x3f87d893,0x3eb25d7c,2
+np.float32,0x3fdb805a,0x3f9125c0,2
+np.float32,0x3fb33f0f,0x3f5e0ce1,2
+np.float32,0x3facc524,0x3f50516b,2
+np.float32,0x3ff40484,0x3fa16d0e,2
+np.float32,0x3ff078bf,0x3f9f3811,2
+np.float32,0x7f736747,0x42b2bb27,2
+np.float32,0x7f55768b,0x42b277f3,2
+np.float32,0x80000001,0xffc00000,2
+np.float32,0x7f6463d1,0x42b29a8e,2
+np.float32,0x3f8f8b59,0x3ef9d792,2
+np.float32,0x3f8a6f4d,0x3ecd5bf4,2
+np.float32,0x3fe958d9,0x3f9aa4ca,2
+np.float32,0x7f1e2ce2,0x42b1de78,2
+np.float32,0x3fb8584a,0x3f682a05,2
+np.float32,0x7dea3dc6,0x42ae7ed5,2
+np.float32,0x7f53a815,0x42b27399,2
+np.float32,0x7e0cf986,0x42aeddbf,2
+np.float32,0x7f3afb71,0x42b23422,2
+np.float32,0x3fd87d6e,0x3f8ef685,2
+np.float32,0x3ffcaa46,0x3fa6a0d7,2
+np.float32,0x7eecd276,0x42b14a3a,2
+np.float32,0x3ffc30b4,0x3fa65951,2
+np.float32,0x7e9c85e2,0x42b07634,2
+np.float32,0x3f95d862,0x3f1383de,2
+np.float32,0x7ef21410,0x42b15577,2
+np.float32,0x3fbfa1b5,0x3f75b86e,2
+np.float32,0x3fd6d90f,0x3f8dc086,2
+np.float32,0x0,0xffc00000,2
+np.float32,0x7e885dcd,0x42b02f9f,2
+np.float32,0x3fb3e057,0x3f5f54bf,2
+np.float32,0x7f40afdd,0x42b24385,2
+np.float32,0x3fb795c2,0x3f66b120,2
+np.float32,0x3fba7c11,0x3f6c3f73,2
+np.float32,0x3ffef620,0x3fa7f828,2
+np.float32,0x7d430508,0x42acbe1e,2
+np.float32,0x3f8d2892,0x3ee6369f,2
+np.float32,0x3fbea139,0x3f73e9d5,2
+np.float32,0x3ffaa928,0x3fa571b9,2
+np.float32,0x7fc00000,0x7fc00000,2
+np.float32,0x7f16f9ce,0x42b1c69f,2
+np.float32,0x3fa8f753,0x3f47b657,2
+np.float32,0x3fd48a63,0x3f8c06ac,2
+np.float32,0x7f13419e,0x42b1b9d9,2
+np.float32,0x3fdf1526,0x3f93afde,2
+np.float32,0x3f903c8b,0x3eff3be8,2
+np.float32,0x7f085323,0x42b1925b,2
+np.float32,0x7cdbe309,0x42ab98ac,2
+np.float32,0x3fba2cfd,0x3f6ba9f1,2
+np.float32,0x7f5a805d,0x42b283e4,2
+np.float32,0x7f6753dd,0x42b2a119,2
+np.float32,0x3fed9f02,0x3f9d6964,2
+np.float32,0x3f96422c,0x3f14ddba,2
+np.float32,0x7f22f2a9,0x42b1edb1,2
+np.float32,0x3fe3fcfd,0x3f97119d,2
+np.float32,0x7e018ad0,0x42aeb271,2
+np.float32,0x7db896f5,0x42ae04de,2
+np.float32,0x7e55c795,0x42afb2ec,2
+np.float32,0x7f58ef8d,0x42b28036,2
+np.float32,0x7f24a16a,0x42b1f2f3,2
+np.float32,0x3fcf714c,0x3f881b09,2
+np.float32,0x3fcdd056,0x3f86d200,2
+np.float32,0x7f02fad0,0x42b17de0,2
+np.float32,0x7eeab877,0x42b145a9,2
+np.float32,0x3fd6029d,0x3f8d20f7,2
+np.float32,0x3fd4f8cd,0x3f8c59d6,2
+np.float32,0x3fb29d4a,0x3f5cc1a5,2
+np.float32,0x3fb11e2d,0x3f59a77a,2
+np.float32,0x7eded576,0x42b12b0e,2
+np.float32,0x7f26c2a5,0x42b1f988,2
+np.float32,0x3fb6165b,0x3f63c151,2
+np.float32,0x7f3bca47,0x42b23657,2
+np.float32,0x7d8c93bf,0x42ad7968,2
+np.float32,0x3f8ede02,0x3ef47176,2
+np.float32,0x3fbef762,0x3f7485b9,2
+np.float32,0x7f1419af,0x42b1bcc6,2
+np.float32,0x7d9e8c79,0x42adb701,2
+np.float32,0x3fa26336,0x3f37af63,2
+np.float32,0x7f5f5590,0x42b28f18,2
+np.float32,0x3fddc93a,0x3f92c651,2
+np.float32,0x3ff0a5fc,0x3f9f547f,2
+np.float32,0x3fb2f6b8,0x3f5d790e,2
+np.float32,0x3ffe59a4,0x3fa79d2c,2
+np.float32,0x7e4df848,0x42af9fde,2
+np.float32,0x3fb0ab3b,0x3f58b678,2
+np.float32,0x7ea54d47,0x42b09225,2
+np.float32,0x3fdd6404,0x3f927eb2,2
+np.float32,0x3f846dc0,0x3e864caa,2
+np.float32,0x7d046aee,0x42abf7e7,2
+np.float32,0x7f7c5a05,0x42b2cda3,2
+np.float32,0x3faf6126,0x3f55fb21,2
+np.float32,0x7f36a910,0x42b22829,2
+np.float32,0x3fdc7b36,0x3f91d938,2
+np.float32,0x3fff443e,0x3fa82577,2
+np.float32,0x7ee7154a,0x42b13daa,2
+np.float32,0x3f944742,0x3f0e435c,2
+np.float32,0x7f5b510a,0x42b285cc,2
+np.float32,0x3f9bc940,0x3f25c4d2,2
+np.float32,0x3fee4782,0x3f9dd4ea,2
+np.float32,0x3fcfc2dd,0x3f885aea,2
+np.float32,0x7eab65cf,0x42b0a4af,2
+np.float32,0x3f9cf908,0x3f292689,2
+np.float32,0x7ed35501,0x42b10feb,2
+np.float32,0x7dabb70a,0x42addfd9,2
+np.float32,0x7f348919,0x42b2222b,2
+np.float32,0x3fb137d4,0x3f59dd17,2
+np.float32,0x7e7b36c9,0x42b0058a,2
+np.float32,0x7e351fa4,0x42af5e0d,2
+np.float32,0x3f973c0c,0x3f18011e,2
+np.float32,0xff800000,0xffc00000,2
+np.float32,0x3f9b0a4b,0x3f239a33,2
+np.float32,0x3f87c4cf,0x3eb17e7e,2
+np.float32,0x7ef67760,0x42b15eaa,2
+np.float32,0x3fc4d2c8,0x3f7ed20f,2
+np.float32,0x7e940dac,0x42b059b8,2
+np.float32,0x7f6e6a52,0x42b2b08d,2
+np.float32,0x3f838752,0x3e6fe4b2,2
+np.float32,0x3fd8f046,0x3f8f4a94,2
+np.float32,0x3fa82112,0x3f45c223,2
+np.float32,0x3fd49b16,0x3f8c1345,2
+np.float32,0x7f02a941,0x42b17ca1,2
+np.float32,0x3f8a9d2c,0x3ecf1768,2
+np.float32,0x7c9372e3,0x42aacc0f,2
+np.float32,0x3fd260b3,0x3f8a619a,2
+np.float32,0x3f8a1b88,0x3eca27cb,2
+np.float32,0x7d25d510,0x42ac6b1c,2
+np.float32,0x7ef5a578,0x42b15cf5,2
+np.float32,0x3fe6625d,0x3f98ae9a,2
+np.float32,0x3ff53240,0x3fa22658,2
+np.float32,0x3f8bb2e6,0x3ed944cf,2
+np.float32,0x7f4679b1,0x42b252ad,2
+np.float32,0x3fa8db30,0x3f4774fc,2
+np.float32,0x7ee5fafd,0x42b13b37,2
+np.float32,0x3fc405e0,0x3f7d71fb,2
+np.float32,0x3f9303cd,0x3f09ddfd,2
+np.float32,0x7f486e67,0x42b257b2,2
+np.float32,0x7e73f12b,0x42aff680,2
+np.float32,0x3fe80f8b,0x3f99cbe4,2
+np.float32,0x3f84200a,0x3e81a3f3,2
+np.float32,0x3fa14e5c,0x3f34e3ce,2
+np.float32,0x3fda22ec,0x3f9029bb,2
+np.float32,0x3f801772,0x3d1aef98,2
+np.float32,0x7eaa1428,0x42b0a0bb,2
+np.float32,0x3feae0b3,0x3f9ba4aa,2
+np.float32,0x7ea439b4,0x42b08ecc,2
+np.float32,0x3fa28b1c,0x3f381579,2
+np.float32,0x7e8af247,0x42b03937,2
+np.float32,0x3fd19216,0x3f89c2b7,2
+np.float32,0x7f6ea033,0x42b2b100,2
+np.float32,0x3fad4fbf,0x3f518224,2
+np.float32,0x3febd940,0x3f9c45bd,2
+np.float32,0x7f4643a3,0x42b25221,2
+np.float32,0x7ec34478,0x42b0e771,2
+np.float32,0x7f18c83b,0x42b1ccb5,2
+np.float32,0x3fc665ad,0x3f80bf94,2
+np.float32,0x3ff0a999,0x3f9f56c4,2
+np.float32,0x3faf1cd2,0x3f5568fe,2
+np.float32,0x7ecd9dc6,0x42b101e1,2
+np.float32,0x3faad282,0x3f4bf754,2
+np.float32,0x3ff905a0,0x3fa47771,2
+np.float32,0x7f596481,0x42b28149,2
+np.float32,0x7f1cb31f,0x42b1d9ac,2
+np.float32,0x7e266719,0x42af32a6,2
+np.float32,0x7eccce06,0x42b0ffdb,2
+np.float32,0x3f9b6f71,0x3f24c102,2
+np.float32,0x3f80e4ba,0x3df1d6bc,2
+np.float32,0x3f843d51,0x3e836a60,2
+np.float32,0x7f70bd88,0x42b2b585,2
+np.float32,0x3fe4cc96,0x3f979e18,2
+np.float32,0x3ff737c7,0x3fa36151,2
+np.float32,0x3ff1197e,0x3f9f9cf4,2
+np.float32,0x7f08e190,0x42b19471,2
+np.float32,0x3ff1542e,0x3f9fc1b2,2
+np.float32,0x3ff6673c,0x3fa2e2d2,2
+np.float32,0xbf800000,0xffc00000,2
+np.float32,0x7e3f9ba7,0x42af7add,2
+np.float32,0x7f658ff6,0x42b29d2d,2
+np.float32,0x3f93441c,0x3f0ac0d9,2
+np.float32,0x7f526a74,0x42b27096,2
+np.float32,0x7f5b00c8,0x42b28511,2
+np.float32,0x3ff212f8,0x3fa038cf,2
+np.float32,0x7e0bd60d,0x42aed998,2
+np.float32,0x7f71ef7f,0x42b2b80e,2
+np.float32,0x7f7a897e,0x42b2c9f1,2
+np.float32,0x7e8b76a6,0x42b03b1e,2
+np.float32,0x7efa0da3,0x42b1660f,2
+np.float32,0x3fce9166,0x3f876ae0,2
+np.float32,0x3fc4163d,0x3f7d8e30,2
+np.float32,0x3fdb3784,0x3f90f16b,2
+np.float32,0x7c5f177b,0x42aa3d30,2
+np.float32,0x3fc6276d,0x3f808af5,2
+np.float32,0x7bac9cc2,0x42a856f4,2
+np.float32,0x3fe5876f,0x3f981bea,2
+np.float32,0x3fef60e3,0x3f9e878a,2
+np.float32,0x3fb23cd8,0x3f5bfb06,2
+np.float32,0x3fe114e2,0x3f951402,2
+np.float32,0x7ca8ef04,0x42ab11b4,2
+np.float32,0x7d93c2ad,0x42ad92ec,2
+np.float32,0x3fe5bb8a,0x3f983ee6,2
+np.float32,0x7f0182fd,0x42b1781b,2
+np.float32,0x7da63bb2,0x42adcf3d,2
+np.float32,0x3fac46b7,0x3f4f399e,2
+np.float32,0x7f7a5d8f,0x42b2c997,2
+np.float32,0x7f76572e,0x42b2c14b,2
+np.float32,0x7f42d53e,0x42b24931,2
+np.float32,0x7f7ffd00,0x42b2d4f6,2
+np.float32,0x3fc346c3,0x3f7c2756,2
+np.float32,0x7f1f6ae3,0x42b1e27a,2
+np.float32,0x3f87fb56,0x3eb3e2ee,2
+np.float32,0x3fed17a2,0x3f9d12b4,2
+np.float32,0x7f5ea903,0x42b28d8c,2
+np.float32,0x3f967f82,0x3f15a4ab,2
+np.float32,0x7d3b540c,0x42aca984,2
+np.float32,0x7f56711a,0x42b27a4a,2
+np.float32,0x7f122223,0x42b1b5ee,2
+np.float32,0x3fd6fa34,0x3f8dd919,2
+np.float32,0x3fadd62e,0x3f52a7b3,2
+np.float32,0x3fb7bf0c,0x3f67015f,2
+np.float32,0x7edf4ba7,0x42b12c1d,2
+np.float32,0x7e33cc65,0x42af5a4b,2
+np.float32,0x3fa6be17,0x3f427831,2
+np.float32,0x3fa07aa8,0x3f32b7d4,2
+np.float32,0x3fa4a3af,0x3f3d5a01,2
+np.float32,0x3fdbb267,0x3f9149a8,2
+np.float32,0x7ed45e25,0x42b1126c,2
+np.float32,0x3fe3f432,0x3f970ba6,2
+np.float32,0x7f752080,0x42b2bec3,2
+np.float32,0x3f872747,0x3eaa62ea,2
+np.float32,0x7e52175d,0x42afaa03,2
+np.float32,0x3fdc766c,0x3f91d5ce,2
+np.float32,0x7ecd6841,0x42b1015c,2
+np.float32,0x7f3d6c40,0x42b23ac6,2
+np.float32,0x3fb80c14,0x3f6796b9,2
+np.float32,0x3ff6ad56,0x3fa30d68,2
+np.float32,0x3fda44c3,0x3f90423e,2
+np.float32,0x3fdcba0c,0x3f9205fc,2
+np.float32,0x7e14a720,0x42aef8e6,2
+np.float32,0x3fe9e489,0x3f9b0047,2
+np.float32,0x7e69f933,0x42afe123,2
+np.float32,0x3ff3ee6d,0x3fa15f71,2
+np.float32,0x3f8538cd,0x3e91c1a7,2
+np.float32,0x3fdc3f07,0x3f91ae46,2
+np.float32,0x3fba2ef0,0x3f6bada2,2
+np.float32,0x7da64cd8,0x42adcf71,2
+np.float32,0x3fc34bd2,0x3f7c301d,2
+np.float32,0x3fa273aa,0x3f37d984,2
+np.float32,0x3ff0338c,0x3f9f0c86,2
+np.float32,0x7ed62cef,0x42b116c3,2
+np.float32,0x3f911e7e,0x3f02f7c6,2
+np.float32,0x7c8514c9,0x42aa9792,2
+np.float32,0x3fea2a74,0x3f9b2df5,2
+np.float32,0x3fe036f8,0x3f947a25,2
+np.float32,0x7c5654bf,0x42aa28ad,2
+np.float32,0x3fd9e423,0x3f8ffc32,2
+np.float32,0x7eec0439,0x42b1487b,2
+np.float32,0x3fc580f4,0x3f7ffb62,2
+np.float32,0x3fb0e316,0x3f592bbe,2
+np.float32,0x7c4cfb7d,0x42aa11d8,2
+np.float32,0x3faf9704,0x3f566e00,2
+np.float32,0x3fa7cf8a,0x3f45023d,2
+np.float32,0x7f7b724d,0x42b2cbcc,2
+np.float32,0x7f05bfe3,0x42b18897,2
+np.float32,0x3f90bde3,0x3f018bf3,2
+np.float32,0x7c565479,0x42aa28ad,2
+np.float32,0x3f94b517,0x3f0fb8e5,2
+np.float32,0x3fd6aadd,0x3f8d9e3c,2
+np.float32,0x7f09b37c,0x42b1977f,2
+np.float32,0x7f2b45ea,0x42b20734,2
+np.float32,0x3ff1d15e,0x3fa00fe9,2
+np.float32,0x3f99bce6,0x3f1fbd6c,2
+np.float32,0x7ecd1f76,0x42b100a7,2
+np.float32,0x7f443e2b,0x42b24ce2,2
+np.float32,0x7da7d6a5,0x42add428,2
+np.float32,0x7ebe0193,0x42b0d975,2
+np.float32,0x7ee13c43,0x42b1308b,2
+np.float32,0x3f8adf1b,0x3ed18e0c,2
+np.float32,0x7f76ce65,0x42b2c242,2
+np.float32,0x7e34f43d,0x42af5d92,2
+np.float32,0x7f306b76,0x42b2165d,2
+np.float32,0x7e1fd07f,0x42af1df7,2
+np.float32,0x3fab9a41,0x3f4db909,2
+np.float32,0x3fc23d1a,0x3f7a5803,2
+np.float32,0x3f8b7403,0x3ed70245,2
+np.float32,0x3f8c4dd6,0x3edebbae,2
+np.float32,0x3fe5f411,0x3f9864cd,2
+np.float32,0x3f88128b,0x3eb4e508,2
+np.float32,0x3fcb09de,0x3f84976f,2
+np.float32,0x7f32f2f5,0x42b21da6,2
+np.float32,0x3fe75610,0x3f9950f6,2
+np.float32,0x3f993edf,0x3f1e408d,2
+np.float32,0x3fc4a9d7,0x3f7e8be9,2
+np.float32,0x7f74551a,0x42b2bd1a,2
+np.float32,0x7de87129,0x42ae7ae2,2
+np.float32,0x7f18bbbd,0x42b1cc8c,2
+np.float32,0x7e7e1dd4,0x42b00b6c,2
+np.float32,0x3ff6e55b,0x3fa32f64,2
+np.float32,0x3fa634c8,0x3f412df3,2
+np.float32,0x3fd0fb7c,0x3f894e49,2
+np.float32,0x3ff4f6a6,0x3fa201d7,2
+np.float32,0x7f69d418,0x42b2a69a,2
+np.float32,0x7cb9632d,0x42ab414a,2
+np.float32,0x3fc57d36,0x3f7ff503,2
+np.float32,0x7e9e2ed7,0x42b07b9b,2
+np.float32,0x7f2e6868,0x42b2107d,2
+np.float32,0x3fa3169a,0x3f39785d,2
+np.float32,0x7f03cde0,0x42b18117,2
+np.float32,0x7f6d75d2,0x42b2ae7f,2
+np.float32,0x3ff483f2,0x3fa1bb75,2
+np.float32,0x7f1b39f7,0x42b1d4d6,2
+np.float32,0x3f8c7a7d,0x3ee0481e,2
+np.float32,0x3f989095,0x3f1c2b19,2
+np.float32,0x3fa4cbfd,0x3f3dbd87,2
+np.float32,0x7f75b00f,0x42b2bfef,2
+np.float32,0x3f940724,0x3f0d6756,2
+np.float32,0x7f5e5a1a,0x42b28cd6,2
+np.float32,0x800000,0xffc00000,2
+np.float32,0x7edd1d29,0x42b12716,2
+np.float32,0x3fa3e9e4,0x3f3b8c16,2
+np.float32,0x7e46d70e,0x42af8dd5,2
+np.float32,0x3f824745,0x3e40ec1e,2
+np.float32,0x3fd67623,0x3f8d770a,2
+np.float32,0x3fe9a6f3,0x3f9ad7fa,2
+np.float32,0x3fdda67c,0x3f92adc1,2
+np.float32,0x7ccb6c9a,0x42ab70d4,2
+np.float32,0x3ffd364a,0x3fa6f2fe,2
+np.float32,0x7e02424c,0x42aeb545,2
+np.float32,0x3fb6d2f2,0x3f6534a1,2
+np.float32,0x3fe1fe26,0x3f95b4cc,2
+np.float32,0x7e93ac57,0x42b05867,2
+np.float32,0x7f7b3433,0x42b2cb4d,2
+np.float32,0x3fb76803,0x3f66580d,2
+np.float32,0x3f9af881,0x3f23661b,2
+np.float32,0x3fd58062,0x3f8cbf98,2
+np.float32,0x80000000,0xffc00000,2
+np.float32,0x7f1af8f4,0x42b1d3ff,2
+np.float32,0x3fe66bba,0x3f98b4dc,2
+np.float32,0x7f6bd7bf,0x42b2aaff,2
+np.float32,0x3f84f79a,0x3e8e2e49,2
+np.float32,0x7e475b06,0x42af8f28,2
+np.float32,0x3faff89b,0x3f573d5e,2
+np.float32,0x7de5aa77,0x42ae74bb,2
+np.float32,0x3f8e9e42,0x3ef26cd2,2
+np.float32,0x3fb1cec3,0x3f5b1740,2
+np.float32,0x3f8890d6,0x3eba4821,2
+np.float32,0x3f9b39e9,0x3f242547,2
+np.float32,0x3fc895a4,0x3f829407,2
+np.float32,0x7f77943c,0x42b2c3dc,2
+np.float32,0x7f390d58,0x42b22ed2,2
+np.float32,0x3fe7e160,0x3f99ad58,2
+np.float32,0x3f93d2a0,0x3f0cb205,2
+np.float32,0x7f29499b,0x42b2013c,2
+np.float32,0x3f8c11b2,0x3edca10f,2
+np.float32,0x7e898ef8,0x42b03413,2
+np.float32,0x3fdff942,0x3f944f34,2
+np.float32,0x7f3d602f,0x42b23aa5,2
+np.float32,0x3f8a50f3,0x3ecc345b,2
+np.float32,0x3fa1f86d,0x3f369ce4,2
+np.float32,0x3f97ad95,0x3f19681d,2
+np.float32,0x3ffad1e0,0x3fa589e5,2
+np.float32,0x3fa70590,0x3f432311,2
+np.float32,0x7e6840cb,0x42afdd5c,2
+np.float32,0x3fd4036d,0x3f8ba0aa,2
+np.float32,0x7f7cc953,0x42b2ce84,2
+np.float32,0x7f228e1e,0x42b1ec74,2
+np.float32,0x7e37a866,0x42af652a,2
+np.float32,0x3fda22d0,0x3f9029a7,2
+np.float32,0x7f736bff,0x42b2bb31,2
+np.float32,0x3f9833b6,0x3f1b0b8e,2
+np.float32,0x7f466001,0x42b2526a,2
+np.float32,0xff7fffff,0xffc00000,2
+np.float32,0x7dd62bcd,0x42ae50f8,2
+np.float32,0x7f1d2bfe,0x42b1db36,2
+np.float32,0x7ecffe9e,0x42b107c5,2
+np.float32,0x7ebefe0a,0x42b0dc1b,2
+np.float32,0x7f45c63d,0x42b250dd,2
+np.float32,0x7f601af0,0x42b290db,2
+np.float32,0x3fcbb88a,0x3f8524e5,2
+np.float32,0x7ede55ff,0x42b129e8,2
+np.float32,0x7ea5dd5a,0x42b093e2,2
+np.float32,0x3ff53857,0x3fa22a12,2
+np.float32,0x3f8dbd6a,0x3eeb28a4,2
+np.float32,0x3fd1b467,0x3f89dd2c,2
+np.float32,0x3fe0423f,0x3f9481fc,2
+np.float32,0x3f84b421,0x3e8a6174,2
+np.float32,0x7f4efc97,0x42b2682c,2
+np.float32,0x7f601b33,0x42b290dc,2
+np.float32,0x3f94f240,0x3f108719,2
+np.float32,0x7decd251,0x42ae8471,2
+np.float32,0x3fdc457c,0x3f91b2e2,2
+np.float32,0x3f92a966,0x3f089c5a,2
+np.float32,0x3fc9732f,0x3f834afc,2
+np.float32,0x3f97948f,0x3f19194e,2
+np.float32,0x7f0824a1,0x42b191ac,2
+np.float32,0x7f0365a5,0x42b17f81,2
+np.float32,0x3f800000,0x0,2
+np.float32,0x7f0054c6,0x42b1736b,2
+np.float32,0x3fe86544,0x3f9a0484,2
+np.float32,0x7e95f844,0x42b0604e,2
+np.float32,0x3fce8602,0x3f8761e2,2
+np.float32,0x3fc726c8,0x3f81621d,2
+np.float32,0x3fcf6b03,0x3f88161b,2
+np.float32,0x3fceb843,0x3f87898a,2
+np.float32,0x3fe2f8b2,0x3f966071,2
+np.float32,0x7f3c8e7f,0x42b2386d,2
+np.float32,0x3fcee13a,0x3f87a9d2,2
+np.float32,0x3fc4df27,0x3f7ee73c,2
+np.float32,0x3ffde486,0x3fa758e3,2
+np.float32,0x3fa91be0,0x3f480b17,2
+np.float32,0x7f2a5a7d,0x42b20472,2
+np.float32,0x7e278d80,0x42af362d,2
+np.float32,0x3f96d091,0x3f16a9d5,2
+np.float32,0x7e925225,0x42b053b2,2
+np.float32,0x7f7ef83a,0x42b2d2ec,2
+np.float32,0x7eb4923a,0x42b0bf61,2
+np.float32,0x7e98bf19,0x42b069b3,2
+np.float32,0x3fac93a2,0x3f4fe410,2
+np.float32,0x7f46389c,0x42b25205,2
+np.float32,0x3f9fd447,0x3f30fd54,2
+np.float32,0x3fef42d4,0x3f9e7483,2
+np.float32,0x7f482174,0x42b256ed,2
+np.float32,0x3f97aedb,0x3f196c1e,2
+np.float32,0x7f764edd,0x42b2c13a,2
+np.float32,0x3f9117b5,0x3f02de5c,2
+np.float32,0x3fc7984e,0x3f81c12d,2
+np.float64,0x3ff1e2cb7463c597,0x3fdec6caf39e0c0e,2
+np.float64,0x3ffe4f89789c9f13,0x3ff40f4b1da0f3e9,2
+np.float64,0x7f6a5c9ac034b935,0x408605e51703c145,2
+np.float64,0x7fdcb6ece3b96dd9,0x40862d6521e16d60,2
+np.float64,0x3ff6563e182cac7c,0x3feb9d8210f3fa88,2
+np.float64,0x7fde32025f3c6404,0x40862dcc1d1a9b7f,2
+np.float64,0x7fd755ed35aeabd9,0x40862bbc5522b779,2
+np.float64,0x3ff5c81f4bcb903e,0x3fea71f10b954ea3,2
+np.float64,0x3fffe805d35fd00c,0x3ff50463a1ba2938,2
+np.float64,0x7fd045a1c1a08b43,0x408628d9f431f2f5,2
+np.float64,0x3ff49f7dd9893efc,0x3fe7c6736e17ea8e,2
+np.float64,0x7fccfbc1fd39f783,0x408627eca79acf51,2
+np.float64,0x3ff1af0a00035e14,0x3fdd1c0e7d5706ea,2
+np.float64,0x7fe7bd17162f7a2d,0x4086316af683502b,2
+np.float64,0x3ff0941b8d012837,0x3fd128d274065ac0,2
+np.float64,0x3ffa0c5d98b418bb,0x3ff11af9c8edd17f,2
+np.float64,0x3ffad9733355b2e6,0x3ff1b6d1307acb42,2
+np.float64,0x3ffabb2a33d57654,0x3ff1a0442b034e50,2
+np.float64,0x3ff36118b0c6c231,0x3fe472b7dfb23516,2
+np.float64,0x3ff2441d3664883a,0x3fe0d61145608f0c,2
+np.float64,0x7fe039862d20730b,0x40862e5f8ed752d3,2
+np.float64,0x7fb1dde24023bbc4,0x40861e824cdb0664,2
+np.float64,0x7face6335839cc66,0x40861ccf90a26e16,2
+np.float64,0x3ffb5d0e1af6ba1c,0x3ff2170f6f42fafe,2
+np.float64,0x3ff5c2c6a50b858d,0x3fea665aabf04407,2
+np.float64,0x3ffabb409db57681,0x3ff1a054ea32bfc3,2
+np.float64,0x3ff1e054e983c0aa,0x3fdeb30c17286cb6,2
+np.float64,0x7fe467f73268cfed,0x4086303529e52e9b,2
+np.float64,0x7fe0e86bf961d0d7,0x40862eb40788b04a,2
+np.float64,0x3ffb743542f6e86a,0x3ff227b4ea5acee0,2
+np.float64,0x3ff2de6826e5bcd0,0x3fe2e31fcde0a96c,2
+np.float64,0x7fd6b27ccfad64f9,0x40862b8385697c31,2
+np.float64,0x7fe0918e8d21231c,0x40862e8a82d9517a,2
+np.float64,0x7fd0ca0395a19406,0x4086291a0696ed33,2
+np.float64,0x3ffb042496960849,0x3ff1d658c928abfc,2
+np.float64,0x3ffcd0409799a081,0x3ff31877df0cb245,2
+np.float64,0x7fe429bd06685379,0x4086301c9f259934,2
+np.float64,0x3ff933076092660f,0x3ff06d2e5f4d9ab7,2
+np.float64,0x7feaefcb28f5df95,0x4086326dccf88e6f,2
+np.float64,0x7fb5f2c1f82be583,0x40862027ac02a39d,2
+np.float64,0x3ffb5d9e3bd6bb3c,0x3ff21777501d097e,2
+np.float64,0x10000000000000,0xfff8000000000000,2
+np.float64,0x3ff70361596e06c3,0x3fecf675ceda7e19,2
+np.float64,0x3ff71a21b5ee3444,0x3fed224fa048d9a9,2
+np.float64,0x3ffb102b86762057,0x3ff1df2cc9390518,2
+np.float64,0x7feaaeb35c355d66,0x4086325a60704a90,2
+np.float64,0x7fd9a3d0a93347a0,0x40862c7d300fc076,2
+np.float64,0x7fabcf159c379e2a,0x40861c80cdbbff27,2
+np.float64,0x7fd1c066ec2380cd,0x4086298c3006fee6,2
+np.float64,0x3ff3d5ae2d67ab5c,0x3fe5bc16447428db,2
+np.float64,0x3ff4b76add696ed6,0x3fe800f5bbf21376,2
+np.float64,0x3ff60d89ee0c1b14,0x3feb063fdebe1a68,2
+np.float64,0x7f1d2648003a4c8f,0x4085eaf9238af95a,2
+np.float64,0x7fe8b45f6df168be,0x408631bca5abf6d6,2
+np.float64,0x7fe9ea5308f3d4a5,0x4086321ea2bd3af9,2
+np.float64,0x7fcb6ba5a636d74a,0x4086277b208075ed,2
+np.float64,0x3ff621cfd74c43a0,0x3feb30d59baf5919,2
+np.float64,0x3ff7bc8ca0af7919,0x3fee524da8032896,2
+np.float64,0x7fda22dd0c3445b9,0x40862ca47326d063,2
+np.float64,0x7fd02ed4b2a05da8,0x408628ceb6919421,2
+np.float64,0x3ffe64309fdcc861,0x3ff41c1b18940709,2
+np.float64,0x3ffee4042abdc808,0x3ff46a6005bccb41,2
+np.float64,0x3ff078145b00f029,0x3fceeb3d6bfae0eb,2
+np.float64,0x7fda20fd20b441f9,0x40862ca3e03b990b,2
+np.float64,0x3ffa9e9e9af53d3d,0x3ff18ade3cbee789,2
+np.float64,0x3ff0a1062501420c,0x3fd1e32de6d18c0d,2
+np.float64,0x3ff3bdf118477be2,0x3fe57ad89b7fdf8b,2
+np.float64,0x3ff101c0d5c20382,0x3fd6965d3539be47,2
+np.float64,0x7feba3b53b774769,0x408632a28c7aca4d,2
+np.float64,0x3ff598db5d4b31b7,0x3fea0aa65c0b421a,2
+np.float64,0x3ff5fdfbb72bfbf8,0x3feae55accde4a5e,2
+np.float64,0x7fe5bae53aab75c9,0x408630b5e7a5b92a,2
+np.float64,0x3ff8f668afd1ecd2,0x3ff03af686666c9c,2
+np.float64,0x3ff5ba72dd2b74e6,0x3fea5441f223c093,2
+np.float64,0x3ff8498147109302,0x3fef4e45d501601d,2
+np.float64,0x7feddcfa5efbb9f4,0x4086334106a6e76b,2
+np.float64,0x7fd1a30200234603,0x4086297ee5cc562c,2
+np.float64,0x3ffffa8ee07ff51e,0x3ff50f1dc46f1303,2
+np.float64,0x7fef7ed00ebefd9f,0x408633ae01dabe52,2
+np.float64,0x3ffb6e062276dc0c,0x3ff22344c58c2016,2
+np.float64,0x7fcf2b59943e56b2,0x4086288190dd5eeb,2
+np.float64,0x3ffa589f9254b13f,0x3ff155cc081eee0b,2
+np.float64,0x3ff05415ca60a82c,0x3fc9e45565baef0a,2
+np.float64,0x7feb34bed576697d,0x408632822d5a178c,2
+np.float64,0x3ff3993845c73270,0x3fe51423baf246c3,2
+np.float64,0x3ff88367aaf106d0,0x3fefb2d9ca9f1192,2
+np.float64,0x7fef364304fe6c85,0x4086339b7ed82997,2
+np.float64,0x7fcba2c317374585,0x4086278b24e42934,2
+np.float64,0x3ff1aef885e35df1,0x3fdd1b79f55b20c0,2
+np.float64,0x7fe19367886326ce,0x40862f035f867445,2
+np.float64,0x3ff3c8295e279053,0x3fe5970aa670d32e,2
+np.float64,0x3ff6edda164ddbb4,0x3feccca9eb59d6b9,2
+np.float64,0x7fdeaea940bd5d52,0x40862dece02d151b,2
+np.float64,0x7fea9d6324353ac5,0x408632552ddf0d4f,2
+np.float64,0x7fe60e39e66c1c73,0x408630d45b1ad0c4,2
+np.float64,0x7fde06325abc0c64,0x40862dc07910038c,2
+np.float64,0x7f9ec89d303d9139,0x408617c55ea4c576,2
+np.float64,0x3ff9801930530032,0x3ff0abe5be046051,2
+np.float64,0x3ff4d5859689ab0b,0x3fe849a7f7a19fa3,2
+np.float64,0x3ff38afbc48715f8,0x3fe4ebb7710cbab9,2
+np.float64,0x3ffd88a0e77b1142,0x3ff3916964407e21,2
+np.float64,0x1,0xfff8000000000000,2
+np.float64,0x3ff5db59e58bb6b4,0x3fea9b6b5ccc116f,2
+np.float64,0x3ffd4b05b15a960c,0x3ff369792f661a90,2
+np.float64,0x7fdcebc4fb39d789,0x40862d73cd623378,2
+np.float64,0x3ff5b56f944b6adf,0x3fea4955d6b06ca3,2
+np.float64,0x7fd4e4abf2a9c957,0x40862ad9e9da3c61,2
+np.float64,0x7fe08e0d6aa11c1a,0x40862e88d17ef277,2
+np.float64,0x3ff0dfc97da1bf93,0x3fd50f9004136d8f,2
+np.float64,0x7fdec38eaebd871c,0x40862df2511e26b4,2
+np.float64,0x7ff8000000000000,0x7ff8000000000000,2
+np.float64,0x3ff21865504430cb,0x3fe033fe3cf3947a,2
+np.float64,0x7fdc139708b8272d,0x40862d371cfbad03,2
+np.float64,0x7fe1fe3be3a3fc77,0x40862f336e3ba63a,2
+np.float64,0x7fd9fa2493b3f448,0x40862c97f2960be9,2
+np.float64,0x3ff0a027db414050,0x3fd1d6e54a707c87,2
+np.float64,0x3ff568b16f4ad163,0x3fe99f5c6d7b6e18,2
+np.float64,0x3ffe2f82877c5f05,0x3ff3fb54bd0da753,2
+np.float64,0x7fbaf5778435eaee,0x408621ccc9e2c1be,2
+np.float64,0x7fc5aaf8362b55ef,0x40862598e7072a49,2
+np.float64,0x7fe0ebfdd4a1d7fb,0x40862eb5b7bf99d5,2
+np.float64,0x7fd8efeb5931dfd6,0x40862c444636f408,2
+np.float64,0x3ff361a308c6c346,0x3fe4744cae63e6df,2
+np.float64,0x7fef287d39be50f9,0x40863397f65c807e,2
+np.float64,0x7fe72c4a14ae5893,0x4086313992e52082,2
+np.float64,0x3ffd1be44cba37c8,0x3ff34a9a45239eb9,2
+np.float64,0x3ff50369c18a06d4,0x3fe8b69319f091f1,2
+np.float64,0x3ffb333c25766678,0x3ff1f8c78eeb28f1,2
+np.float64,0x7fe12050416240a0,0x40862ece4e2f2f24,2
+np.float64,0x7fe348f5526691ea,0x40862fc16fbe7b6c,2
+np.float64,0x3ff343cc4d068799,0x3fe41c2a30cab7d2,2
+np.float64,0x7fd1b0daaa2361b4,0x408629852b3104ff,2
+np.float64,0x3ff6a41f37ad483e,0x3fec3b36ee6c6d4a,2
+np.float64,0x3ffad9439435b287,0x3ff1b6add9a1b3d7,2
+np.float64,0x7fbeb9a2f23d7345,0x408622d89ac1eaba,2
+np.float64,0x3ffab3d39fb567a7,0x3ff19ac75b4427f3,2
+np.float64,0x3ff890003ed12000,0x3fefc8844471c6ad,2
+np.float64,0x3ffc9f595e593eb2,0x3ff2f7a8699f06d8,2
+np.float64,0x7fe2224ef6e4449d,0x40862f43684a154a,2
+np.float64,0x3ffa67ba08d4cf74,0x3ff161525778df99,2
+np.float64,0x7fe87e24b570fc48,0x408631ab02b159fb,2
+np.float64,0x7fd6e99be92dd337,0x40862b96dba73685,2
+np.float64,0x7fe90f39fdf21e73,0x408631d9dbd36c1e,2
+np.float64,0x3ffb7806abd6f00e,0x3ff22a719b0f4c46,2
+np.float64,0x3ffa511ba3d4a238,0x3ff1500c124f6e17,2
+np.float64,0x3ff5d7a569abaf4b,0x3fea937391c280e8,2
+np.float64,0x7fc4279d20284f39,0x40862504a5cdcb96,2
+np.float64,0x3ffe8791b1fd0f24,0x3ff431f1ed7eaba0,2
+np.float64,0x7fe3b2f5276765e9,0x40862fecf15e2535,2
+np.float64,0x7feeab0e7abd561c,0x408633778044cfbc,2
+np.float64,0x7fdba88531375109,0x40862d1860306d7a,2
+np.float64,0x7fe7b19b3def6335,0x4086316716d6890b,2
+np.float64,0x3ff9e9437413d287,0x3ff0ff89431c748c,2
+np.float64,0x3ff960716a52c0e3,0x3ff092498028f802,2
+np.float64,0x3ff271bf56a4e37f,0x3fe1786fc8dd775d,2
+np.float64,0x3fff2a6578be54cb,0x3ff494bbe303eeb5,2
+np.float64,0x3ffd842eb5fb085e,0x3ff38e8b7ba42bc5,2
+np.float64,0x3ff91600e5d22c02,0x3ff0553c6a6b3d93,2
+np.float64,0x3ff9153f45f22a7e,0x3ff0549c0eaecf95,2
+np.float64,0x7fe0ab319da15662,0x40862e96da3b19f9,2
+np.float64,0x3ff06acd1f60d59a,0x3fcd2aca543d2772,2
+np.float64,0x3ffb3e7a54d67cf4,0x3ff200f288cd391b,2
+np.float64,0x3ffd01356f1a026b,0x3ff339003462a56c,2
+np.float64,0x3ffacd35def59a6c,0x3ff1adb8d32b3ec0,2
+np.float64,0x3ff6f953264df2a6,0x3fece2f992948d6e,2
+np.float64,0x3ff0fa91f5a1f524,0x3fd64609a28f1590,2
+np.float64,0x7fd1b7610ca36ec1,0x408629881e03dc7d,2
+np.float64,0x3ff4317fb7c86300,0x3fe6b086ed265887,2
+np.float64,0x3ff3856198070ac3,0x3fe4dbb6bc88b9e3,2
+np.float64,0x7fed7fc4573aff88,0x40863327e7013a81,2
+np.float64,0x3ffe53cbbf5ca798,0x3ff411f07a29b1f4,2
+np.float64,0x3ff092195b012433,0x3fd10b1c0b4b14fe,2
+np.float64,0x3ff1a3171163462e,0x3fdcb5c301d5d40d,2
+np.float64,0x3ffa1401f1742804,0x3ff120eb319e9faa,2
+np.float64,0x7fd352f6f426a5ed,0x40862a3a048feb6d,2
+np.float64,0x7fd4ee246fa9dc48,0x40862add895d808f,2
+np.float64,0x3ff0675cfa00ceba,0x3fccb2222c5493ca,2
+np.float64,0x3ffe5cb38f3cb967,0x3ff417773483d161,2
+np.float64,0x7fe11469ea2228d3,0x40862ec8bd3e497f,2
+np.float64,0x3fff13cba67e2798,0x3ff4872fe2c26104,2
+np.float64,0x3ffb73d3d316e7a8,0x3ff2276f08612ea2,2
+np.float64,0x7febfb70f237f6e1,0x408632bbc9450721,2
+np.float64,0x3ff84a0d87b0941b,0x3fef4f3b707e3145,2
+np.float64,0x7fd71fd5082e3fa9,0x40862ba9b4091172,2
+np.float64,0x3ff560737d8ac0e7,0x3fe98cc9c9ba2f61,2
+np.float64,0x3ff46a266ae8d44d,0x3fe74190e5234822,2
+np.float64,0x7fe8cc9225719923,0x408631c477db9708,2
+np.float64,0x3ff871de5930e3bc,0x3fef948f7d00fbef,2
+np.float64,0x3ffd0bc7895a178f,0x3ff33ffc18357721,2
+np.float64,0x3ff66099f9ccc134,0x3febb2bc775b4720,2
+np.float64,0x7fe91f1be9723e37,0x408631deec3a5c9e,2
+np.float64,0x7fd60462f12c08c5,0x40862b4537e1c1c6,2
+np.float64,0x3ff053100ba0a620,0x3fc9bc0c21e2284f,2
+np.float64,0x7fd864c611b0c98b,0x40862c1724506255,2
+np.float64,0x7fd191decb2323bd,0x408629771bfb68cc,2
+np.float64,0x3ff792a1656f2543,0x3fee054f2e135fcf,2
+np.float64,0x7fd03625cea06c4b,0x408628d253b840e3,2
+np.float64,0x7fc3967716272ced,0x408624ca35451042,2
+np.float64,0x7fe6636cb32cc6d8,0x408630f3073a22a7,2
+np.float64,0x3ffc2d3976585a73,0x3ff2a9d4c0dae607,2
+np.float64,0x3fffd10ee79fa21e,0x3ff4f70db69888be,2
+np.float64,0x3ff1d4fcae23a9f9,0x3fde57675007b23c,2
+np.float64,0x3ffa5da19e14bb43,0x3ff1599f74d1c113,2
+np.float64,0x3ff7f4eb0d6fe9d6,0x3feeb85189659e99,2
+np.float64,0x7fbcca44d8399489,0x408622536234f7c1,2
+np.float64,0x7fef5f97ec3ebf2f,0x408633a60fdde0d7,2
+np.float64,0x7fde4a66da3c94cd,0x40862dd290ebc184,2
+np.float64,0x3ff072957a40e52b,0x3fce34d913d87613,2
+np.float64,0x3ff2bc4c9dc57899,0x3fe27497e6ebe27d,2
+np.float64,0x7fd7d152b4afa2a4,0x40862be63469eecd,2
+np.float64,0x3ff957d768f2afaf,0x3ff08b4ad8062a73,2
+np.float64,0x7fe4bc5f45a978be,0x40863055fd66e4eb,2
+np.float64,0x7fc90de345321bc6,0x408626c24ce7e370,2
+np.float64,0x3ff2d7a37d85af47,0x3fe2cd6a40b544a0,2
+np.float64,0x7fe536ea1f6a6dd3,0x40863084bade76a3,2
+np.float64,0x3fff970c9cdf2e19,0x3ff4d524572356dd,2
+np.float64,0x3ffe173ae63c2e76,0x3ff3ec1ee35ad28c,2
+np.float64,0x3ff714025cce2805,0x3fed168aedff4a2b,2
+np.float64,0x7fce7b414c3cf682,0x40862853dcdd19d4,2
+np.float64,0x3ff019623f2032c4,0x3fbc7c602df0bbaf,2
+np.float64,0x3ff72f57fd0e5eb0,0x3fed4ae75f697432,2
+np.float64,0x3ff283778e8506ef,0x3fe1b5c5725b0dfd,2
+np.float64,0x3ff685a29aed0b45,0x3febfdfdedd581e2,2
+np.float64,0x3ff942d24fb285a4,0x3ff07a224c3ecfaf,2
+np.float64,0x3ff2e4a9f465c954,0x3fe2f71905399e8f,2
+np.float64,0x7fdfa1c7fa3f438f,0x40862e2b4e06f098,2
+np.float64,0x3ff49b59c26936b4,0x3fe7bc41c8c1e59d,2
+np.float64,0x3ff2102d3704205a,0x3fe014bf7e28924e,2
+np.float64,0x3ff88de3b8311bc8,0x3fefc4e3e0a15a89,2
+np.float64,0x7fea5ba25374b744,0x40863241519c9b66,2
+np.float64,0x3fffe5df637fcbbf,0x3ff5032488f570f9,2
+np.float64,0x7fe67cfefe6cf9fd,0x408630fc25333cb4,2
+np.float64,0x3ff090bf2b01217e,0x3fd0f6fcf1092b4a,2
+np.float64,0x7fecd75bc5f9aeb7,0x408632f9b6c2e013,2
+np.float64,0x7fe15df38c62bbe6,0x40862eeae5ac944b,2
+np.float64,0x3ff4757875a8eaf1,0x3fe75e0eafbe28ce,2
+np.float64,0x7fecca8a51b99514,0x408632f627c23923,2
+np.float64,0x3ff91ca529d2394a,0x3ff05abb327fd1ca,2
+np.float64,0x3ffb962993b72c53,0x3ff23ff831717579,2
+np.float64,0x3ffd548a2c7aa914,0x3ff36fac7f56d716,2
+np.float64,0x7fbafb5cb035f6b8,0x408621ce898a02fb,2
+np.float64,0x3ff1d86daca3b0db,0x3fde73536c29218c,2
+np.float64,0x7fa8d0f8f431a1f1,0x40861b97a03c3a18,2
+np.float64,0x3ff44f1067489e21,0x3fe6fcbd8144ab2a,2
+np.float64,0x7fec062b07380c55,0x408632bed9c6ce85,2
+np.float64,0x3ff7e11e0fcfc23c,0x3fee94ada7efaac4,2
+np.float64,0x7fe77505c1aeea0b,0x4086315287dda0ba,2
+np.float64,0x7fc465af2728cb5d,0x4086251d236107f7,2
+np.float64,0x3ffe811c4a7d0238,0x3ff42df7e8b6cf2d,2
+np.float64,0x7fe05a471260b48d,0x40862e6fa502738b,2
+np.float64,0x7fec32cd9778659a,0x408632cb8d98c5a3,2
+np.float64,0x7fd203a220a40743,0x408629aa43b010c0,2
+np.float64,0x7fed71f7d17ae3ef,0x4086332428207101,2
+np.float64,0x3ff3918999e72313,0x3fe4fe5e8991402f,2
+np.float64,0x3ff3ecae38c7d95c,0x3fe5fa787d887981,2
+np.float64,0x7fd65345b82ca68a,0x40862b61aed8c64e,2
+np.float64,0x3ff1efdd01c3dfba,0x3fdf2eae36139204,2
+np.float64,0x3ffba9344f375268,0x3ff24d7fdcfc313b,2
+np.float64,0x7fd0469b35208d35,0x408628da6ed24bdd,2
+np.float64,0x7fe525782daa4aef,0x4086307e240c8b30,2
+np.float64,0x3ff8e473d371c8e8,0x3ff02beebd4171c7,2
+np.float64,0x3ff59a43898b3487,0x3fea0dc0a6acea0a,2
+np.float64,0x7fef50c7263ea18d,0x408633a247d7cd42,2
+np.float64,0x7fe8b5a301f16b45,0x408631bd0e71c855,2
+np.float64,0x3ff209369de4126d,0x3fdff4264334446b,2
+np.float64,0x3ffbe2ff4437c5fe,0x3ff2763b356814c7,2
+np.float64,0x3ff55938156ab270,0x3fe97c70514f91bf,2
+np.float64,0x3fff5d8bf81ebb18,0x3ff4b333b230672a,2
+np.float64,0x3ff16a317bc2d463,0x3fdab84e7faa468f,2
+np.float64,0x3ff7e64f8dafcc9f,0x3fee9e0bd57e9566,2
+np.float64,0x7fef4dc065be9b80,0x408633a181e25abb,2
+np.float64,0x3ff64a24a62c9449,0x3feb849ced76437e,2
+np.float64,0x7fc3cb85ef27970b,0x408624dfc39c8f74,2
+np.float64,0x7fec2162a77842c4,0x408632c69b0d43b6,2
+np.float64,0x7feccee6dc399dcd,0x408632f75de98c46,2
+np.float64,0x7faff4f5f43fe9eb,0x40861d9d89be14c9,2
+np.float64,0x7fee82df60fd05be,0x4086336cfdeb7317,2
+np.float64,0x3ffe54588d9ca8b1,0x3ff41247eb2f75ca,2
+np.float64,0x3ffe5615b55cac2c,0x3ff4135c4eb11620,2
+np.float64,0x3ffdaf9a6a1b5f35,0x3ff3aa70e50d1692,2
+np.float64,0x3ff69c045f4d3809,0x3fec2b00734e2cde,2
+np.float64,0x7fd049239aa09246,0x408628dbad6dd995,2
+np.float64,0x3ff2acbe8465597d,0x3fe24138652195e1,2
+np.float64,0x3ffb288302365106,0x3ff1f0f86ca7e5d1,2
+np.float64,0x3fff6fe8d87edfd2,0x3ff4be136acf53c5,2
+np.float64,0x3ffc87c8bfb90f92,0x3ff2e7bbd65867cb,2
+np.float64,0x3ff173327ca2e665,0x3fdb0b945abb00d7,2
+np.float64,0x3ff9a5cf7a134b9f,0x3ff0ca2450f07c78,2
+np.float64,0x7faf782b043ef055,0x40861d7e0e9b35ef,2
+np.float64,0x3ffa0874975410e9,0x3ff117ee3dc8f5ba,2
+np.float64,0x7fc710fc7f2e21f8,0x40862618fed167fb,2
+np.float64,0x7feb73f4c876e7e9,0x40863294ae3ac1eb,2
+np.float64,0x8000000000000000,0xfff8000000000000,2
+np.float64,0x7fb46615c028cc2b,0x40861f91bade4dad,2
+np.float64,0x7fc26b064624d60c,0x4086244c1b76c938,2
+np.float64,0x3ff06ab9fa40d574,0x3fcd282fd971d1b4,2
+np.float64,0x3ff61da7410c3b4e,0x3feb28201031af02,2
+np.float64,0x3ffec7ba1b9d8f74,0x3ff459342511f952,2
+np.float64,0x7ff4000000000000,0x7ffc000000000000,2
+np.float64,0x7fe5d570422baae0,0x408630bfa75008c9,2
+np.float64,0x3ffa895832f512b0,0x3ff17ad41555dccb,2
+np.float64,0x7fd343ac21a68757,0x40862a33ad59947a,2
+np.float64,0x3ffc1eeb37383dd6,0x3ff29ff29e55a006,2
+np.float64,0x7fee3c5c507c78b8,0x4086335a6b768090,2
+np.float64,0x7fe96d774a32daee,0x408631f7b9937e36,2
+np.float64,0x7fb878362430f06b,0x40862106603497b6,2
+np.float64,0x7fec0a79c03814f3,0x408632c01479905e,2
+np.float64,0x3ffa2f143c145e28,0x3ff135e25d902e1a,2
+np.float64,0x3ff14ccff80299a0,0x3fd9a0cd3397b14c,2
+np.float64,0x3ff97980dcb2f302,0x3ff0a6942a8133ab,2
+np.float64,0x3ff872e2d1f0e5c6,0x3fef96526eb2f756,2
+np.float64,0x7fdf1c9b46be3936,0x40862e0957fee329,2
+np.float64,0x7fcab6525d356ca4,0x408627458791f029,2
+np.float64,0x3ff964e74a52c9ce,0x3ff095e8845d523c,2
+np.float64,0x3ffb3aa23c967544,0x3ff1fe282d897c13,2
+np.float64,0x7fdd8a36afbb146c,0x40862d9f2b05f61b,2
+np.float64,0x3ffea39f42fd473e,0x3ff4432a48176399,2
+np.float64,0x7fea614f68b4c29e,0x408632430a750385,2
+np.float64,0x7feeafb86abd5f70,0x40863378b79f70cf,2
+np.float64,0x3ff80bc94eb01792,0x3feee138e9d626bd,2
+np.float64,0x7fcaca74743594e8,0x4086274b8ce4d1e1,2
+np.float64,0x3ff8b14815316290,0x3ff000b3526c8321,2
+np.float64,0x7fc698eb5f2d31d6,0x408625eeec86cd2b,2
+np.float64,0x7fe15429a3e2a852,0x40862ee6621205b8,2
+np.float64,0x7fee37f81b7c6fef,0x4086335941ed80dd,2
+np.float64,0x3ff8097ab3f012f6,0x3feedd1bafc3196e,2
+np.float64,0x7fe7c889ceaf9113,0x4086316ed13f2394,2
+np.float64,0x7fceca94513d9528,0x4086286893a06824,2
+np.float64,0x3ff593a103cb2742,0x3fe9ff1af4f63cc9,2
+np.float64,0x7fee237d24bc46f9,0x40863353d4142c87,2
+np.float64,0x3ffbf71e4777ee3c,0x3ff2844c0ed9f4d9,2
+np.float64,0x3ff490c65c09218d,0x3fe7a2216d9f69fd,2
+np.float64,0x3fff5ceaf1feb9d6,0x3ff4b2d430a90110,2
+np.float64,0x3ff55baecceab75e,0x3fe98203980666c4,2
+np.float64,0x3ff511bc306a2378,0x3fe8d81ce7be7b50,2
+np.float64,0x3ff38f83dcc71f08,0x3fe4f89f130d5f87,2
+np.float64,0x3ff73a3676ee746d,0x3fed5f98a65107ee,2
+np.float64,0x7fc27e50c824fca1,0x408624547828bc49,2
+np.float64,0xfff0000000000000,0xfff8000000000000,2
+np.float64,0x3fff38959ebe712b,0x3ff49d362c7ba16a,2
+np.float64,0x3ffad6d23a75ada4,0x3ff1b4dda6394ed0,2
+np.float64,0x3ffe77c6c2dcef8e,0x3ff4283698835ecb,2
+np.float64,0x3fff5feb413ebfd6,0x3ff4b49bcbdb3aa9,2
+np.float64,0x3ff0d30aa161a615,0x3fd4751bcdd7d727,2
+np.float64,0x3ff51e07e00a3c10,0x3fe8f4bd1408d694,2
+np.float64,0x8010000000000000,0xfff8000000000000,2
+np.float64,0x7fd231d2fe2463a5,0x408629beaceafcba,2
+np.float64,0x3fff6b4aee1ed696,0x3ff4bb58544bf8eb,2
+np.float64,0x3ff91fcd2f323f9a,0x3ff05d56e33db6b3,2
+np.float64,0x3ff3b889ab477113,0x3fe56bdeab74cce5,2
+np.float64,0x3ff99bfe30d337fc,0x3ff0c24bbf265561,2
+np.float64,0x3ffbe9e5eaf7d3cc,0x3ff27b0fe60f827a,2
+np.float64,0x7fd65678e92cacf1,0x40862b62d44fe8b6,2
+np.float64,0x7fd9cc477233988e,0x40862c89c638ee48,2
+np.float64,0x3ffc123c72d82479,0x3ff297294d05cbc0,2
+np.float64,0x3ff58abad58b1576,0x3fe9eb65da2a867a,2
+np.float64,0x7fe534887b2a6910,0x40863083d4ec2877,2
+np.float64,0x7fe1d3dcb123a7b8,0x40862f208116c55e,2
+np.float64,0x7fd4d570dba9aae1,0x40862ad412c413cd,2
+np.float64,0x3fffce7d3fdf9cfa,0x3ff4f58f02451928,2
+np.float64,0x3ffa76901c74ed20,0x3ff16c9a5851539c,2
+np.float64,0x7fdd88ffa23b11fe,0x40862d9ed6c6f426,2
+np.float64,0x3ff09fdbb9e13fb7,0x3fd1d2ae4fcbf713,2
+np.float64,0x7fe64567772c8ace,0x408630e845dbc290,2
+np.float64,0x7fb1a849ba235092,0x40861e6a291535b2,2
+np.float64,0x3ffaddb105f5bb62,0x3ff1b9f68f4c419b,2
+np.float64,0x7fd2fc3d5025f87a,0x40862a15cbc1df75,2
+np.float64,0x7fdea7d872bd4fb0,0x40862deb190b2c50,2
+np.float64,0x7fd50ea97eaa1d52,0x40862ae9edc4c812,2
+np.float64,0x3fff659c245ecb38,0x3ff4b7fb18b31aea,2
+np.float64,0x3ff3f1fbb7c7e3f7,0x3fe608bd9d76268c,2
+np.float64,0x3ff76869d9aed0d4,0x3fedb6c23d3a317b,2
+np.float64,0x7fedd4efe93ba9df,0x4086333edeecaa43,2
+np.float64,0x3ff9a5bd4eb34b7a,0x3ff0ca15d02bc960,2
+np.float64,0x3ffd9359cc5b26b4,0x3ff39850cb1a6b6c,2
+np.float64,0x7fe912d0427225a0,0x408631db00e46272,2
+np.float64,0x3ffb3802fe567006,0x3ff1fc4093646465,2
+np.float64,0x3ff02cc38a205987,0x3fc2e8182802a07b,2
+np.float64,0x3ffda953dd1b52a8,0x3ff3a66c504cf207,2
+np.float64,0x7fe0a487e4a1490f,0x40862e93a6f20152,2
+np.float64,0x7fed265ed1fa4cbd,0x4086330f838ae431,2
+np.float64,0x7fd0000114200001,0x408628b76ec48b5c,2
+np.float64,0x3ff2c262786584c5,0x3fe288860d354b0f,2
+np.float64,0x8000000000000001,0xfff8000000000000,2
+np.float64,0x3ffdae9f075b5d3e,0x3ff3a9d006ae55c1,2
+np.float64,0x3ffb69c72156d38e,0x3ff22037cbb85e5b,2
+np.float64,0x7feeae255f7d5c4a,0x408633784e89bc05,2
+np.float64,0x7feb13927c362724,0x408632786630c55d,2
+np.float64,0x7fef49e072be93c0,0x408633a08451d476,2
+np.float64,0x3fff23d6337e47ac,0x3ff490ceb6e634ae,2
+np.float64,0x3ffba82cf8f7505a,0x3ff24cc51c73234d,2
+np.float64,0x7fe948719ef290e2,0x408631ec0b36476e,2
+np.float64,0x3ff41926c5e8324e,0x3fe670e14bbda8cd,2
+np.float64,0x3ff91f09c1523e14,0x3ff05cb5731878da,2
+np.float64,0x3ff6ae6afccd5cd6,0x3fec4fbeca764086,2
+np.float64,0x3ff927f7e0f24ff0,0x3ff06413eeb8eb1e,2
+np.float64,0x3ff19dd2b9e33ba5,0x3fdc882f97994600,2
+np.float64,0x7fe8e502c5b1ca05,0x408631cc56526fff,2
+np.float64,0x7feb49f70fb693ed,0x4086328868486fcd,2
+np.float64,0x3ffd942d535b285a,0x3ff398d8d89f52ca,2
+np.float64,0x7fc3b9c5c627738b,0x408624d893e692ca,2
+np.float64,0x7fea0780ff340f01,0x408632279fa46704,2
+np.float64,0x7fe4c90066a99200,0x4086305adb47a598,2
+np.float64,0x7fdb209113364121,0x40862cf0ab64fd7d,2
+np.float64,0x3ff38617e5470c30,0x3fe4ddc0413b524f,2
+np.float64,0x7fea1b5b803436b6,0x4086322db767f091,2
+np.float64,0x7fe2004898e40090,0x40862f3457795dc5,2
+np.float64,0x3ff3c4360ac7886c,0x3fe58c29843a4c75,2
+np.float64,0x3ff504bc168a0978,0x3fe8b9ada7f698e6,2
+np.float64,0x3ffd3e936fda7d27,0x3ff3615912c5b4ac,2
+np.float64,0x3ffbdc52fb97b8a6,0x3ff2718dae5f1f2b,2
+np.float64,0x3fffef6d84ffdedb,0x3ff508adbc8556cf,2
+np.float64,0x3ff23b65272476ca,0x3fe0b646ed2579eb,2
+np.float64,0x7fe4633068a8c660,0x408630334a4b7ff7,2
+np.float64,0x3ff769b754aed36f,0x3fedb932af0223f9,2
+np.float64,0x7fe7482d92ee905a,0x408631432de1b057,2
+np.float64,0x3ff5dd682aabbad0,0x3fea9fd5e506a86d,2
+np.float64,0x7fd68399a2ad0732,0x40862b72ed89805d,2
+np.float64,0x3ffad7acc3d5af5a,0x3ff1b57fe632c948,2
+np.float64,0x3ffc68e43698d1c8,0x3ff2d2be6f758761,2
+np.float64,0x3ff4e517fbc9ca30,0x3fe86eddf5e63a58,2
+np.float64,0x3ff34c63c56698c8,0x3fe435b74ccd6a13,2
+np.float64,0x7fea9456c17528ad,0x4086325275237015,2
+np.float64,0x7fee6573f2fccae7,0x4086336543760346,2
+np.float64,0x7fd5496fb9aa92de,0x40862b0023235667,2
+np.float64,0x7ff0000000000000,0x7ff0000000000000,2
+np.float64,0x3ffb70e31256e1c6,0x3ff22552f54b13e0,2
+np.float64,0x3ff66a33988cd467,0x3febc656da46a1ca,2
+np.float64,0x3fff0af2eb1e15e6,0x3ff481dec325f5c8,2
+np.float64,0x3ff6a0233d0d4046,0x3fec33400958eda1,2
+np.float64,0x7fdb11e2d5b623c5,0x40862cec55e405f9,2
+np.float64,0x3ffb8a015ad71402,0x3ff2374d7b563a72,2
+np.float64,0x3ff1807d8ce300fb,0x3fdb849e4bce8335,2
+np.float64,0x3ffefd535e3dfaa6,0x3ff479aaac6ffe79,2
+np.float64,0x3ff701e23a6e03c4,0x3fecf39072d96fc7,2
+np.float64,0x3ff4ac809f895901,0x3fe7e6598f2335a5,2
+np.float64,0x3ff0309f26a0613e,0x3fc3b3f4b2783690,2
+np.float64,0x3ff241dd0ce483ba,0x3fe0cde2cb639144,2
+np.float64,0x3ffabce63fb579cc,0x3ff1a18fe2a2da59,2
+np.float64,0x3ffd84b967db0973,0x3ff38ee4f240645d,2
+np.float64,0x7fc3f88b9a27f116,0x408624f1e10cdf3f,2
+np.float64,0x7fe1d5fd5923abfa,0x40862f2175714a3a,2
+np.float64,0x7fe487b145690f62,0x4086304190700183,2
+np.float64,0x7fe7997feaef32ff,0x4086315eeefdddd2,2
+np.float64,0x3ff8f853b671f0a8,0x3ff03c907353a8da,2
+np.float64,0x7fca4c23b5349846,0x408627257ace5778,2
+np.float64,0x7fe0c9bf3a21937d,0x40862ea576c3ea43,2
+np.float64,0x7fc442b389288566,0x4086250f5f126ec9,2
+np.float64,0x7fc6d382ed2da705,0x40862603900431b0,2
+np.float64,0x7fe40b069068160c,0x4086301066468124,2
+np.float64,0x3ff7f62a146fec54,0x3feeba8dfc4363fe,2
+np.float64,0x3ff721e8e94e43d2,0x3fed313a6755d34f,2
+np.float64,0x7fe579feaf2af3fc,0x4086309ddefb6112,2
+np.float64,0x3ffe2c6bde5c58d8,0x3ff3f9665dc9a16e,2
+np.float64,0x7fcf9998ed3f3331,0x4086289dab274788,2
+np.float64,0x7fdb03af2236075d,0x40862ce82252e490,2
+np.float64,0x7fe72799392e4f31,0x40863137f428ee71,2
+np.float64,0x7f9f2190603e4320,0x408617dc5b3b3c3c,2
+np.float64,0x3ff69c56d52d38ae,0x3fec2ba59fe938b2,2
+np.float64,0x7fdcde27bf39bc4e,0x40862d70086cd06d,2
+np.float64,0x3ff654d6b8eca9ae,0x3feb9aa0107609a6,2
+np.float64,0x7fdf69d967bed3b2,0x40862e1d1c2b94c2,2
+np.float64,0xffefffffffffffff,0xfff8000000000000,2
+np.float64,0x7fedfd073f3bfa0d,0x40863349980c2c8b,2
+np.float64,0x7f7c1856803830ac,0x40860bf312b458c7,2
+np.float64,0x7fe9553f1bb2aa7d,0x408631f0173eadd5,2
+np.float64,0x3ff6e92efc2dd25e,0x3fecc38f98e7e1a7,2
+np.float64,0x7fe9719ac532e335,0x408631f906cd79c3,2
+np.float64,0x3ff60e56ae4c1cad,0x3feb07ef8637ec7e,2
+np.float64,0x3ff0d0803501a100,0x3fd455c0af195a9c,2
+np.float64,0x7fe75248a3eea490,0x40863146a614aec1,2
+np.float64,0x7fdff61ead3fec3c,0x40862e408643d7aa,2
+np.float64,0x7fed4ac7a4fa958e,0x408633197b5cf6ea,2
+np.float64,0x7fe58d44562b1a88,0x408630a5098d1bbc,2
+np.float64,0x7fd89dcdb1b13b9a,0x40862c29c2979288,2
+np.float64,0x3ff205deda240bbe,0x3fdfda67c84fd3a8,2
+np.float64,0x7fdf84c15abf0982,0x40862e23f361923d,2
+np.float64,0x3ffe012b3afc0256,0x3ff3de3dfa5f47ce,2
+np.float64,0x3ffe2f3512dc5e6a,0x3ff3fb245206398e,2
+np.float64,0x7fed6174c2bac2e9,0x4086331faa699617,2
+np.float64,0x3ff1f30f8783e61f,0x3fdf47e06f2c40d1,2
+np.float64,0x3ff590da9eab21b5,0x3fe9f8f7b4baf3c2,2
+np.float64,0x3ffb3ca1eb967944,0x3ff1ff9baf66d704,2
+np.float64,0x7fe50ba9a5aa1752,0x408630745ab7fd3c,2
+np.float64,0x3ff43743a4a86e87,0x3fe6bf7ae80b1dda,2
+np.float64,0x3ff47e1a24e8fc34,0x3fe773acca44c7d6,2
+np.float64,0x3ff589ede9eb13dc,0x3fe9e99f28fab3a4,2
+np.float64,0x3ff72f2cbf8e5e5a,0x3fed4a94e7edbf24,2
+np.float64,0x3ffa4f9bbc549f38,0x3ff14ee60aea45d3,2
+np.float64,0x3ff975dae732ebb6,0x3ff0a3a1fbd7284a,2
+np.float64,0x7fbcf14ee039e29d,0x4086225e33f3793e,2
+np.float64,0x3ff10e027f621c05,0x3fd71cce2452b4e0,2
+np.float64,0x3ff33ea193067d43,0x3fe40cbac4daaddc,2
+np.float64,0x7fbef8f2263df1e3,0x408622e905c8e1b4,2
+np.float64,0x3fff7f5bfe3efeb8,0x3ff4c732e83df253,2
+np.float64,0x3ff5700a6b4ae015,0x3fe9afdd7b8b82b0,2
+np.float64,0x3ffd5099da5aa134,0x3ff36d1bf26e55bf,2
+np.float64,0x3ffed8e0f89db1c2,0x3ff4639ff065107a,2
+np.float64,0x3fff9d0c463f3a18,0x3ff4d8a9f297cf52,2
+np.float64,0x3ff23db5b2e47b6b,0x3fe0bebdd48f961a,2
+np.float64,0x3ff042bff1e08580,0x3fc713bf24cc60ef,2
+np.float64,0x7feb4fe97a769fd2,0x4086328a26675646,2
+np.float64,0x3ffeafbfeedd5f80,0x3ff44a955a553b1c,2
+np.float64,0x3ff83fb524507f6a,0x3fef3d1729ae0976,2
+np.float64,0x3ff1992294433245,0x3fdc5f5ce53dd197,2
+np.float64,0x7fe89fe629b13fcb,0x408631b601a83867,2
+np.float64,0x7fe53e4d74aa7c9a,0x40863087839b52f1,2
+np.float64,0x3ff113713e6226e2,0x3fd757631ca7cd09,2
+np.float64,0x7fd4a0b7a629416e,0x40862abfba27a09b,2
+np.float64,0x3ff184c6e2a3098e,0x3fdbab2e3966ae57,2
+np.float64,0x3ffafbbf77f5f77f,0x3ff1d02bb331d9f9,2
+np.float64,0x3ffc6099a358c134,0x3ff2cd16941613d1,2
+np.float64,0x3ffb7c441ef6f888,0x3ff22d7b12e31432,2
+np.float64,0x3ff625ba5eec4b75,0x3feb39060e55fb79,2
+np.float64,0x7fde879acbbd0f35,0x40862de2aab4d72d,2
+np.float64,0x7f930aed982615da,0x408613edb6df8528,2
+np.float64,0x7fa4b82dac29705a,0x40861a261c0a9aae,2
+np.float64,0x7fced5c16b3dab82,0x4086286b7a73e611,2
+np.float64,0x7fe133749d2266e8,0x40862ed73a41b112,2
+np.float64,0x3ff2d8146ea5b029,0x3fe2ced55dbf997d,2
+np.float64,0x3ff60dac77ac1b59,0x3feb0688b0e54c7b,2
+np.float64,0x3ff275d9b024ebb3,0x3fe186b87258b834,2
+np.float64,0x3ff533e6500a67cd,0x3fe92746c8b50ddd,2
+np.float64,0x7fe370896666e112,0x40862fd1ca144736,2
+np.float64,0x7fee7695357ced29,0x40863369c459420e,2
+np.float64,0x7fd1e0528023c0a4,0x4086299a85caffd0,2
+np.float64,0x7fd05c7b24a0b8f5,0x408628e52824386f,2
+np.float64,0x3ff11dcc3b023b98,0x3fd7c56c8cef1be1,2
+np.float64,0x7fc9d9fae933b3f5,0x408627027404bc5f,2
+np.float64,0x7fe2359981246b32,0x40862f4be675e90d,2
+np.float64,0x3ffb10a949962152,0x3ff1df88f83b8cde,2
+np.float64,0x3ffa65b53654cb6a,0x3ff15fc8956ccc87,2
+np.float64,0x3ff0000000000000,0x0,2
+np.float64,0x7fad97ef703b2fde,0x40861d002f3d02da,2
+np.float64,0x3ff57aaf93aaf55f,0x3fe9c7b01f194edb,2
+np.float64,0x7fe9ecd73f33d9ad,0x4086321f69917205,2
+np.float64,0x3ff0dcb79c61b96f,0x3fd4eac86a7a9c38,2
+np.float64,0x7fee9c12ffbd3825,0x4086337396cd706d,2
+np.float64,0x3ff52c40af4a5881,0x3fe915a8a7de8f00,2
+np.float64,0x3ffbcfff59779ffe,0x3ff268e523fe8dda,2
+np.float64,0x7fe014cb4b602996,0x40862e4d5de42a03,2
+np.float64,0x7fae2370e83c46e1,0x40861d258dd5b3ee,2
+np.float64,0x7fe9e33602f3c66b,0x4086321c704ac2bb,2
+np.float64,0x3ff648acd74c915a,0x3feb8195ca53bcaa,2
+np.float64,0x7fe385f507670be9,0x40862fda95ebaf44,2
+np.float64,0x3ffb0e382c361c70,0x3ff1ddbea963e0a7,2
+np.float64,0x3ff47d6b6ae8fad7,0x3fe771f80ad37cd2,2
+np.float64,0x3ffca7d538f94faa,0x3ff2fd5f62e851ac,2
+np.float64,0x3ff83e949c107d29,0x3fef3b1c5bbac99b,2
+np.float64,0x7fc6fb933a2df725,0x408626118e51a286,2
+np.float64,0x7fe43a1454e87428,0x4086302318512d9b,2
+np.float64,0x7fe51fe32aaa3fc5,0x4086307c07271348,2
+np.float64,0x3ff35e563966bcac,0x3fe46aa2856ef85f,2
+np.float64,0x3ff84dd4e4909baa,0x3fef55d86d1d5c2e,2
+np.float64,0x7febe3d84077c7b0,0x408632b507686f03,2
+np.float64,0x3ff6aca2e32d5946,0x3fec4c32a2368ee3,2
+np.float64,0x7fe7070e3e6e0e1b,0x4086312caddb0454,2
+np.float64,0x7fd3657f2aa6cafd,0x40862a41acf47e70,2
+np.float64,0x3ff61534456c2a68,0x3feb1663900af13b,2
+np.float64,0x3ff8bc556eb178ab,0x3ff00a16b5403f88,2
+np.float64,0x3ffa7782e3f4ef06,0x3ff16d529c94a438,2
+np.float64,0x7fc15785ed22af0b,0x408623d0cd94fb86,2
+np.float64,0x3ff2e3eeb6e5c7dd,0x3fe2f4c4876d3edf,2
+np.float64,0x3ff2e4e17e85c9c3,0x3fe2f7c9e437b22e,2
+np.float64,0x7feb3aaf67f6755e,0x40863283ec4a0d76,2
+np.float64,0x7fe89efcf7313df9,0x408631b5b5e41263,2
+np.float64,0x3ffcc6fad4f98df6,0x3ff31245778dff6d,2
+np.float64,0x3ff356114466ac22,0x3fe45253d040a024,2
+np.float64,0x3ff81c70d2d038e2,0x3feefed71ebac776,2
+np.float64,0x7fdb75c96136eb92,0x40862d09a603f03e,2
+np.float64,0x3ff340f91b8681f2,0x3fe413bb6e6d4a54,2
+np.float64,0x3fff906079df20c1,0x3ff4d13869d16bc7,2
+np.float64,0x3ff226a42d644d48,0x3fe0698d316f1ac0,2
+np.float64,0x3ff948abc3b29158,0x3ff07eeb0b3c81ba,2
+np.float64,0x3ffc25df1fb84bbe,0x3ff2a4c13ad4edad,2
+np.float64,0x7fe07ea3b960fd46,0x40862e815b4cf43d,2
+np.float64,0x3ff497d3dae92fa8,0x3fe7b3917bf10311,2
+np.float64,0x7fea561db1f4ac3a,0x4086323fa4aef2a9,2
+np.float64,0x7fd1b49051236920,0x40862986d8759ce5,2
+np.float64,0x7f7ba3bd6037477a,0x40860bd19997fd90,2
+np.float64,0x3ff01126dd00224e,0x3fb76b67938dfb11,2
+np.float64,0x3ff29e1105053c22,0x3fe2102a4c5fa102,2
+np.float64,0x3ff9de2a6553bc55,0x3ff0f6cfe4dea30e,2
+np.float64,0x7fc558e7d42ab1cf,0x4086257a608fc055,2
+np.float64,0x3ff79830a74f3061,0x3fee0f93db153d65,2
+np.float64,0x7fe2661648e4cc2c,0x40862f6117a71eb2,2
+np.float64,0x3ff140cf4262819e,0x3fd92aefedae1ab4,2
+np.float64,0x3ff5f36251abe6c5,0x3feaced481ceaee3,2
+np.float64,0x7fc80911d5301223,0x4086266d4757f768,2
+np.float64,0x3ff9079a6c320f35,0x3ff04949d21ebe1e,2
+np.float64,0x3ffde8d2e09bd1a6,0x3ff3cedca8a5db5d,2
+np.float64,0x3ffadd1de375ba3c,0x3ff1b989790e8d93,2
+np.float64,0x3ffdbc40ee1b7882,0x3ff3b286b1c7da57,2
+np.float64,0x3ff8ff514771fea2,0x3ff04264add00971,2
+np.float64,0x7fefd7d0e63fafa1,0x408633c47d9f7ae4,2
+np.float64,0x3ffc47798c588ef3,0x3ff2bbe441fa783a,2
+np.float64,0x7fe6ebc55b6dd78a,0x408631232d9abf31,2
+np.float64,0xbff0000000000000,0xfff8000000000000,2
+np.float64,0x7fd378e4afa6f1c8,0x40862a49a8f98cb4,2
+np.float64,0x0,0xfff8000000000000,2
+np.float64,0x3ffe88ed7efd11db,0x3ff432c7ecb95492,2
+np.float64,0x3ff4f5509289eaa1,0x3fe8955a11656323,2
+np.float64,0x7fda255b41344ab6,0x40862ca53676a23e,2
+np.float64,0x3ffebe85b9bd7d0c,0x3ff453992cd55dea,2
+np.float64,0x3ff5d6180b8bac30,0x3fea901c2160c3bc,2
+np.float64,0x3ffcdfb8fcf9bf72,0x3ff322c83b3bc735,2
+np.float64,0x3ff3c91c26679238,0x3fe599a652b7cf59,2
+np.float64,0x7fc389f7a62713ee,0x408624c518edef93,2
+np.float64,0x3ffe1245ba1c248c,0x3ff3e901b2c4a47a,2
+np.float64,0x7fe1e76e95e3cedc,0x40862f29446f9eff,2
+np.float64,0x3ff02ae4f92055ca,0x3fc28221abd63daa,2
+np.float64,0x7fbf648a143ec913,0x40862304a0619d03,2
+np.float64,0x3ff2be7ef8657cfe,0x3fe27bcc6c97522e,2
+np.float64,0x3ffa7595e514eb2c,0x3ff16bdc64249ad1,2
+np.float64,0x3ff4ee130049dc26,0x3fe884354cbad8c9,2
+np.float64,0x3ff19211fc232424,0x3fdc2160bf3eae40,2
+np.float64,0x3ffec215aedd842c,0x3ff455c4cdd50c32,2
+np.float64,0x7fe7cb50ffaf96a1,0x4086316fc06a53af,2
+np.float64,0x3fffa679161f4cf2,0x3ff4de30ba7ac5b8,2
+np.float64,0x7fdcb459763968b2,0x40862d646a21011d,2
+np.float64,0x3ff9f338d6d3e672,0x3ff1075835d8f64e,2
+np.float64,0x3ff8de3319d1bc66,0x3ff026ae858c0458,2
+np.float64,0x7fee0199d33c0333,0x4086334ad03ac683,2
+np.float64,0x3ffc06076c380c0f,0x3ff28eaec3814faa,2
+np.float64,0x3ffe9e2e235d3c5c,0x3ff43fd4d2191a7f,2
+np.float64,0x3ffd93b06adb2761,0x3ff398888239cde8,2
+np.float64,0x7fefe4b71cffc96d,0x408633c7ba971b92,2
+np.float64,0x7fb2940352252806,0x40861ed244bcfed6,2
+np.float64,0x3ffba4647e3748c9,0x3ff24a15f02e11b9,2
+np.float64,0x7fd2d9543725b2a7,0x40862a0708446596,2
+np.float64,0x7fc04997f120932f,0x4086235055d35251,2
+np.float64,0x3ff6d14313ada286,0x3fec94b177f5d3fc,2
+np.float64,0x3ff279fc8684f3f9,0x3fe19511c3e5b9a8,2
+np.float64,0x3ff42f4609085e8c,0x3fe6aabe526ce2bc,2
+np.float64,0x7fc1c6c62a238d8b,0x408624037de7f6ec,2
+np.float64,0x7fe31ff4b8e63fe8,0x40862fb05b40fd16,2
+np.float64,0x7fd2a8825fa55104,0x408629f234d460d6,2
+np.float64,0x3ffe8c1d725d183b,0x3ff434bdc444143f,2
+np.float64,0x3ff0e9dc3e21d3b8,0x3fd58676e2c13fc9,2
+np.float64,0x3ffed03172fda063,0x3ff45e59f7aa6c8b,2
+np.float64,0x7fd74621962e8c42,0x40862bb6e90d66f8,2
+np.float64,0x3ff1faa29663f545,0x3fdf833a2c5efde1,2
+np.float64,0x7fda02834db40506,0x40862c9a860d6747,2
+np.float64,0x7f709b2fc021365f,0x408607be328eb3eb,2
+np.float64,0x7fec0d58aa381ab0,0x408632c0e61a1af6,2
+np.float64,0x3ff524d1720a49a3,0x3fe90479968d40fd,2
+np.float64,0x7fd64cb3b32c9966,0x40862b5f53c4b0b4,2
+np.float64,0x3ff9593e3ed2b27c,0x3ff08c6eea5f6e8b,2
+np.float64,0x3ff7de8b1f6fbd16,0x3fee9007abcfdf7b,2
+np.float64,0x7fe8d816d6b1b02d,0x408631c82e38a894,2
+np.float64,0x7fd726bbe22e4d77,0x40862bac16ee8d52,2
+np.float64,0x7fa70b07d42e160f,0x40861affcc4265e2,2
+np.float64,0x7fe18b4091e31680,0x40862effa8bce66f,2
+np.float64,0x3ff830253010604a,0x3fef21b2eaa75758,2
+np.float64,0x3fffcade407f95bc,0x3ff4f3734b24c419,2
+np.float64,0x3ff8c17cecb182fa,0x3ff00e75152d7bda,2
+np.float64,0x7fdad9b9d035b373,0x40862cdbabb793ba,2
+np.float64,0x3ff9f9e154f3f3c2,0x3ff10c8dfdbd2510,2
+np.float64,0x3ff465e162e8cbc3,0x3fe736c751c75b73,2
+np.float64,0x3ff9b4cd8493699b,0x3ff0d616235544b8,2
+np.float64,0x7fe557c4a56aaf88,0x4086309114ed12d9,2
+np.float64,0x7fe5999133eb3321,0x408630a9991a9b54,2
+np.float64,0x7fe7c9009e2f9200,0x4086316ef9359a47,2
+np.float64,0x3ff8545cabd0a8ba,0x3fef6141f1030c36,2
+np.float64,0x3ffa1f1712943e2e,0x3ff129849d492ce3,2
+np.float64,0x7fea803a14750073,0x4086324c652c276c,2
+np.float64,0x3ff5b6f97fcb6df3,0x3fea4cb0b97b18e9,2
+np.float64,0x7fc2efdfc425dfbf,0x40862485036a5c6e,2
+np.float64,0x7fe2c78e5be58f1c,0x40862f8b0a5e7baf,2
+np.float64,0x7fe80d7fff301aff,0x40863185e234060a,2
+np.float64,0x3ffd895d457b12ba,0x3ff391e2cac7a3f8,2
+np.float64,0x3ff44c9764a8992f,0x3fe6f6690396c232,2
+np.float64,0x3ff731688b8e62d1,0x3fed4ed70fac3839,2
+np.float64,0x3ff060200460c040,0x3fcbad4a07d97f0e,2
+np.float64,0x3ffbd2f70a17a5ee,0x3ff26afb46ade929,2
+np.float64,0x7febe9e841f7d3d0,0x408632b6c465ddd9,2
+np.float64,0x3ff2532f8be4a65f,0x3fe10c6cd8d64cf4,2
+np.float64,0x7fefffffffffffff,0x408633ce8fb9f87e,2
+np.float64,0x3ff3a1ae3a47435c,0x3fe52c00210cc459,2
+np.float64,0x7fe9c34ae6b38695,0x408632128d150149,2
+np.float64,0x3fff311029fe6220,0x3ff498b852f30bff,2
+np.float64,0x3ffd4485a1ba890c,0x3ff3653b6fa701cd,2
+np.float64,0x7fd52718b1aa4e30,0x40862af330d9c68c,2
+np.float64,0x3ff10b695a4216d3,0x3fd7009294e367b7,2
+np.float64,0x3ffdf73de59bee7c,0x3ff3d7fa96d2c1ae,2
+np.float64,0x3ff2f1c75965e38f,0x3fe320aaff3db882,2
+np.float64,0x3ff2a56a5a854ad5,0x3fe228cc4ad7e7a5,2
+np.float64,0x7fe60cd1cf6c19a3,0x408630d3d87a04b3,2
+np.float64,0x3ff89fa65c113f4c,0x3fefe3543773180c,2
+np.float64,0x3ffd253130ba4a62,0x3ff350b76ba692a0,2
+np.float64,0x7feaad7051f55ae0,0x40863259ff932d62,2
+np.float64,0x7fd9cc37cf33986f,0x40862c89c15f963b,2
+np.float64,0x3ff8c08de771811c,0x3ff00daa9c17acd7,2
+np.float64,0x7fea58b25d34b164,0x408632406d54cc6f,2
+np.float64,0x7fe5f161fd2be2c3,0x408630c9ddf272a5,2
+np.float64,0x3ff5840dbf8b081c,0x3fe9dc9117b4cbc7,2
+np.float64,0x3ff3fd762307faec,0x3fe6277cd530c640,2
+np.float64,0x3ff9095c98b212b9,0x3ff04abff170ac24,2
+np.float64,0x7feaac66017558cb,0x40863259afb4f8ce,2
+np.float64,0x7fd78f96bcaf1f2c,0x40862bd00175fdf9,2
+np.float64,0x3ffaca27e0959450,0x3ff1ab72b8f8633e,2
+np.float64,0x3ffb7f18cb96fe32,0x3ff22f81bcb8907b,2
+np.float64,0x3ffcce48d1199c92,0x3ff317276f62c0b2,2
+np.float64,0x3ffcb9a7f3797350,0x3ff30958e0d6a34d,2
+np.float64,0x7fda569ef6b4ad3d,0x40862cb43b33275a,2
+np.float64,0x7fde9f0893bd3e10,0x40862de8cc036283,2
+np.float64,0x3ff428be3928517c,0x3fe699bb5ab58904,2
+np.float64,0x7fa4d3344029a668,0x40861a3084989291,2
+np.float64,0x3ff03607bd006c0f,0x3fc4c4840cf35f48,2
+np.float64,0x3ff2b1335c056267,0x3fe25000846b75a2,2
+np.float64,0x7fe0cb8bd8e19717,0x40862ea65237d496,2
+np.float64,0x3fff4b1b7b9e9637,0x3ff4a83fb08e7b24,2
+np.float64,0x7fe7526140aea4c2,0x40863146ae86069c,2
+np.float64,0x7fbfcfb7c23f9f6f,0x4086231fc246ede5,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arcsin.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arcsin.csv
new file mode 100644
index 00000000..cb94c93c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arcsin.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0xbe7d3a7c,0xbe7fe217,4
+np.float32,0x3dc102f0,0x3dc14c60,4
+np.float32,0xbe119c28,0xbe121aef,4
+np.float32,0xbe51cd68,0xbe534c75,4
+np.float32,0x3c04a300,0x3c04a35f,4
+np.float32,0xbf4f0b62,0xbf712a69,4
+np.float32,0x3ef61a5c,0x3f005cf6,4
+np.float32,0xbf13024c,0xbf1c97df,4
+np.float32,0x3e93b580,0x3e95d6b5,4
+np.float32,0x3e44e7b8,0x3e4623a5,4
+np.float32,0xbe35df20,0xbe36d773,4
+np.float32,0x3eecd2c0,0x3ef633cf,4
+np.float32,0x3f2772ba,0x3f36862a,4
+np.float32,0x3e211ea8,0x3e21cac5,4
+np.float32,0x3e3b3d90,0x3e3c4cc6,4
+np.float32,0x3f37c962,0x3f4d018c,4
+np.float32,0x3e92ad88,0x3e94c31a,4
+np.float32,0x3f356ffc,0x3f49a766,4
+np.float32,0x3f487ba2,0x3f665254,4
+np.float32,0x3f061c46,0x3f0d27ae,4
+np.float32,0xbee340a2,0xbeeb7722,4
+np.float32,0xbe85aede,0xbe874026,4
+np.float32,0x3f34cf9a,0x3f48c474,4
+np.float32,0x3e29a690,0x3e2a6fbd,4
+np.float32,0xbeb29428,0xbeb669d1,4
+np.float32,0xbe606d40,0xbe624370,4
+np.float32,0x3dae6860,0x3dae9e85,4
+np.float32,0xbf04872b,0xbf0b4d25,4
+np.float32,0x3f2080e2,0x3f2d7ab0,4
+np.float32,0xbec77dcc,0xbecceb27,4
+np.float32,0x3e0dda10,0x3e0e4f38,4
+np.float32,0xbefaf970,0xbf03262c,4
+np.float32,0x3f576a0c,0x3f7ffee6,4
+np.float32,0x3f222382,0x3f2f95d6,4
+np.float32,0x7fc00000,0x7fc00000,4
+np.float32,0x3e41c468,0x3e42f14e,4
+np.float32,0xbf2f64dd,0xbf4139a8,4
+np.float32,0xbf60ef90,0xbf895956,4
+np.float32,0xbf67c855,0xbf90eff0,4
+np.float32,0xbed35aee,0xbed9df00,4
+np.float32,0xbf2c7d92,0xbf3d448f,4
+np.float32,0x3f7b1604,0x3faff122,4
+np.float32,0xbf7c758b,0xbfb3bf87,4
+np.float32,0x3ecda1c8,0x3ed39acf,4
+np.float32,0x3f3af8ae,0x3f519fcb,4
+np.float32,0xbf16e6a3,0xbf2160fd,4
+np.float32,0x3f0c97d2,0x3f14d668,4
+np.float32,0x3f0a8060,0x3f1257b9,4
+np.float32,0x3f27905a,0x3f36ad57,4
+np.float32,0x3eeaeba4,0x3ef40efe,4
+np.float32,0x3e58dde0,0x3e5a8580,4
+np.float32,0xbf0cabe2,0xbf14ee6b,4
+np.float32,0xbe805ca8,0xbe81bf03,4
+np.float32,0x3f5462ba,0x3f7a7b85,4
+np.float32,0xbee235d0,0xbeea4d8b,4
+np.float32,0xbe880cb0,0xbe89b426,4
+np.float32,0x80000001,0x80000001,4
+np.float32,0x3f208c00,0x3f2d88f6,4
+np.float32,0xbf34f3d2,0xbf48f7a2,4
+np.float32,0x3f629428,0x3f8b1763,4
+np.float32,0xbf52a900,0xbf776b4a,4
+np.float32,0xbd17f8d0,0xbd1801be,4
+np.float32,0xbef7cada,0xbf0153d1,4
+np.float32,0x3f7d3b90,0x3fb63967,4
+np.float32,0xbd6a20b0,0xbd6a4160,4
+np.float32,0x3f740496,0x3fa1beb7,4
+np.float32,0x3ed8762c,0x3edf7dd9,4
+np.float32,0x3f53b066,0x3f793d42,4
+np.float32,0xbe9de718,0xbea084f9,4
+np.float32,0x3ea3ae90,0x3ea69b4b,4
+np.float32,0x3f1b8f00,0x3f273183,4
+np.float32,0x3f5cd6ac,0x3f852ead,4
+np.float32,0x3f29d510,0x3f39b169,4
+np.float32,0x3ee2a934,0x3eeace33,4
+np.float32,0x3eecac94,0x3ef608c2,4
+np.float32,0xbea915e2,0xbeac5203,4
+np.float32,0xbd316e90,0xbd317cc8,4
+np.float32,0xbf70b495,0xbf9c97b6,4
+np.float32,0xbe80d976,0xbe823ff3,4
+np.float32,0x3e9205f8,0x3e94143f,4
+np.float32,0x3f49247e,0x3f676296,4
+np.float32,0x3d9030c0,0x3d904f50,4
+np.float32,0x3e4df058,0x3e4f5a5c,4
+np.float32,0xbe1fd360,0xbe207b58,4
+np.float32,0xbf69dc7c,0xbf937006,4
+np.float32,0x3f36babe,0x3f4b7df3,4
+np.float32,0xbe8c9758,0xbe8e6bb7,4
+np.float32,0xbf4de72d,0xbf6f3c20,4
+np.float32,0xbecdad68,0xbed3a780,4
+np.float32,0xbf73e2cf,0xbfa18702,4
+np.float32,0xbece16a8,0xbed41a75,4
+np.float32,0x3f618a96,0x3f89fc6d,4
+np.float32,0xbf325853,0xbf454ea9,4
+np.float32,0x3f138568,0x3f1d3828,4
+np.float32,0xbf56a6e9,0xbf7e9748,4
+np.float32,0x3ef5d594,0x3f0035bf,4
+np.float32,0xbf408220,0xbf59dfaa,4
+np.float32,0xbed120e6,0xbed76dd5,4
+np.float32,0xbf6dbda5,0xbf986cee,4
+np.float32,0x3f744a38,0x3fa23282,4
+np.float32,0xbe4b56d8,0xbe4cb329,4
+np.float32,0x3f54c5f2,0x3f7b2d97,4
+np.float32,0xbd8b1c90,0xbd8b3801,4
+np.float32,0x3ee19a48,0x3ee9a03b,4
+np.float32,0x3f48460e,0x3f65fc3d,4
+np.float32,0x3eb541c0,0x3eb9461e,4
+np.float32,0xbea7d098,0xbeaaf98c,4
+np.float32,0xbda99e40,0xbda9d00c,4
+np.float32,0xbefb2ca6,0xbf03438d,4
+np.float32,0x3f4256be,0x3f5cab0b,4
+np.float32,0xbdbdb198,0xbdbdf74d,4
+np.float32,0xbf325b5f,0xbf4552e9,4
+np.float32,0xbf704d1a,0xbf9c00b4,4
+np.float32,0x3ebb1d04,0x3ebf8cf8,4
+np.float32,0xbed03566,0xbed66bf1,4
+np.float32,0x3e8fcee8,0x3e91c501,4
+np.float32,0xbf2e1eec,0xbf3f7b9d,4
+np.float32,0x3f33c4d2,0x3f474cac,4
+np.float32,0x3f598ef4,0x3f8201b4,4
+np.float32,0x3e09bb30,0x3e0a2660,4
+np.float32,0x3ed4e228,0x3edb8cdb,4
+np.float32,0x3eb7a190,0x3ebbd0a1,4
+np.float32,0xbd9ae630,0xbd9b0c18,4
+np.float32,0x3f43020e,0x3f5db2d7,4
+np.float32,0xbec06ac0,0xbec542d4,4
+np.float32,0x3f3dfde0,0x3f561674,4
+np.float32,0xbf64084a,0xbf8cabe6,4
+np.float32,0xbd6f95b0,0xbd6fb8b7,4
+np.float32,0x3f268640,0x3f354e2d,4
+np.float32,0xbe72b4bc,0xbe7509b2,4
+np.float32,0xbf3414fa,0xbf47bd5a,4
+np.float32,0xbf375218,0xbf4c566b,4
+np.float32,0x3f203c1a,0x3f2d2273,4
+np.float32,0xbd503530,0xbd504c2b,4
+np.float32,0xbc45e540,0xbc45e67b,4
+np.float32,0xbf175c4f,0xbf21f2c6,4
+np.float32,0x3f7432a6,0x3fa20b2b,4
+np.float32,0xbf43367f,0xbf5e03d8,4
+np.float32,0x3eb3997c,0x3eb780c4,4
+np.float32,0x3e5574c8,0x3e570878,4
+np.float32,0xbf04b57b,0xbf0b8349,4
+np.float32,0x3f6216d8,0x3f8a914b,4
+np.float32,0xbf57a237,0xbf80337d,4
+np.float32,0xbee1403a,0xbee93bee,4
+np.float32,0xbeaf9b9a,0xbeb33f3b,4
+np.float32,0xbf109374,0xbf19a223,4
+np.float32,0xbeae6824,0xbeb1f810,4
+np.float32,0xbcff9320,0xbcff9dbe,4
+np.float32,0x3ed205c0,0x3ed868a9,4
+np.float32,0x3d897c30,0x3d8996ad,4
+np.float32,0xbf2899d2,0xbf380d4c,4
+np.float32,0xbf54cb0b,0xbf7b36c2,4
+np.float32,0x3ea8e8ec,0x3eac2262,4
+np.float32,0x3ef5e1a0,0x3f003c9d,4
+np.float32,0xbf00c81e,0xbf06f1e2,4
+np.float32,0xbf346775,0xbf483181,4
+np.float32,0x3f7a4fe4,0x3fae077c,4
+np.float32,0x3f00776e,0x3f06948f,4
+np.float32,0xbe0a3078,0xbe0a9cbc,4
+np.float32,0xbeba0b06,0xbebe66be,4
+np.float32,0xbdff4e38,0xbdfff8b2,4
+np.float32,0xbe927f70,0xbe9492ff,4
+np.float32,0x3ebb07e0,0x3ebf7642,4
+np.float32,0x3ebcf8e0,0x3ec18c95,4
+np.float32,0x3f49bdfc,0x3f685b51,4
+np.float32,0x3cbc29c0,0x3cbc2dfd,4
+np.float32,0xbe9e951a,0xbea13bf1,4
+np.float32,0xbe8c237c,0xbe8df33d,4
+np.float32,0x3e17f198,0x3e1881c4,4
+np.float32,0xbd0b5220,0xbd0b5902,4
+np.float32,0xbf34c4a2,0xbf48b4f5,4
+np.float32,0xbedaa814,0xbee1ea94,4
+np.float32,0x3ebf5d6c,0x3ec42053,4
+np.float32,0x3cd04b40,0x3cd050ff,4
+np.float32,0xbec33fe0,0xbec85244,4
+np.float32,0xbf00b27a,0xbf06d8d8,4
+np.float32,0x3f15d7be,0x3f201243,4
+np.float32,0xbe3debd0,0xbe3f06f7,4
+np.float32,0xbea81704,0xbeab4418,4
+np.float32,0x1,0x1,4
+np.float32,0x3f49e6ba,0x3f689d8b,4
+np.float32,0x3f351030,0x3f491fc0,4
+np.float32,0x3e607de8,0x3e625482,4
+np.float32,0xbe8dbbe4,0xbe8f9c0e,4
+np.float32,0x3edbf350,0x3ee35924,4
+np.float32,0xbf0c84c4,0xbf14bf9c,4
+np.float32,0x3eb218b0,0x3eb5e61a,4
+np.float32,0x3e466dd0,0x3e47b138,4
+np.float32,0xbe8ece94,0xbe90ba01,4
+np.float32,0xbe82ec2a,0xbe84649a,4
+np.float32,0xbf7e1f10,0xbfb98b9e,4
+np.float32,0xbf2d00ea,0xbf3df688,4
+np.float32,0x3db7cdd0,0x3db80d36,4
+np.float32,0xbe388b98,0xbe398f25,4
+np.float32,0xbd86cb40,0xbd86e436,4
+np.float32,0x7f7fffff,0x7fc00000,4
+np.float32,0x3f472a60,0x3f6436c6,4
+np.float32,0xbf5b2c1d,0xbf838d87,4
+np.float32,0x3f0409ea,0x3f0abad8,4
+np.float32,0x3f47dd0e,0x3f6553f0,4
+np.float32,0x3e3eab00,0x3e3fc98a,4
+np.float32,0xbf7c2a7f,0xbfb2e19b,4
+np.float32,0xbeda0048,0xbee13112,4
+np.float32,0x3f46600a,0x3f62f5b2,4
+np.float32,0x3f45aef4,0x3f61de43,4
+np.float32,0x3dd40a50,0x3dd46bc4,4
+np.float32,0xbf6cdd0b,0xbf974191,4
+np.float32,0x3f78de4c,0x3faac725,4
+np.float32,0x3f3c39a4,0x3f53777f,4
+np.float32,0xbe2a30ec,0xbe2afc0b,4
+np.float32,0xbf3c0ef0,0xbf533887,4
+np.float32,0x3ecb6548,0x3ed12a53,4
+np.float32,0x3eb994e8,0x3ebde7fc,4
+np.float32,0x3d4c1ee0,0x3d4c3487,4
+np.float32,0xbf52cb6d,0xbf77a7eb,4
+np.float32,0x3eb905d4,0x3ebd4e80,4
+np.float32,0x3e712428,0x3e736d72,4
+np.float32,0xbf79ee6e,0xbfad22be,4
+np.float32,0x3de6f8b0,0x3de776c1,4
+np.float32,0x3e9b2898,0x3e9da325,4
+np.float32,0x3ea09b20,0x3ea35d20,4
+np.float32,0x3d0ea9a0,0x3d0eb103,4
+np.float32,0xbd911500,0xbd913423,4
+np.float32,0x3e004618,0x3e009c97,4
+np.float32,0x3f5e0e5a,0x3f86654c,4
+np.float32,0x3f2e6300,0x3f3fd88b,4
+np.float32,0x3e0cf5d0,0x3e0d68c3,4
+np.float32,0x3d6a16c0,0x3d6a376c,4
+np.float32,0x3f7174aa,0x3f9db53c,4
+np.float32,0xbe04bba0,0xbe051b81,4
+np.float32,0xbe6fdcb4,0xbe721c92,4
+np.float32,0x3f4379f0,0x3f5e6c31,4
+np.float32,0xbf680098,0xbf913257,4
+np.float32,0xbf3c31ca,0xbf536bea,4
+np.float32,0x3f59db58,0x3f824a4e,4
+np.float32,0xbf3ffc84,0xbf591554,4
+np.float32,0x3d1d5160,0x3d1d5b48,4
+np.float32,0x3f6c64ae,0x3f96a3da,4
+np.float32,0xbf1b49fd,0xbf26daaa,4
+np.float32,0x3ec80be0,0x3ecd8576,4
+np.float32,0x3f3becc0,0x3f530629,4
+np.float32,0xbea93890,0xbeac76c1,4
+np.float32,0x3f5b3acc,0x3f839bbd,4
+np.float32,0xbf5d6818,0xbf85bef9,4
+np.float32,0x3f794266,0x3fab9fa6,4
+np.float32,0xbee8eb7c,0xbef1cf3b,4
+np.float32,0xbf360a06,0xbf4a821e,4
+np.float32,0x3f441cf6,0x3f5f693d,4
+np.float32,0x3e60de40,0x3e62b742,4
+np.float32,0xbebb3d7e,0xbebfafdc,4
+np.float32,0x3e56a3a0,0x3e583e28,4
+np.float32,0x3f375bfe,0x3f4c6499,4
+np.float32,0xbf384d7d,0xbf4dbf9a,4
+np.float32,0x3efb03a4,0x3f032c06,4
+np.float32,0x3f1d5d10,0x3f29794d,4
+np.float32,0xbe25f7dc,0xbe26b41d,4
+np.float32,0x3f6d2f88,0x3f97aebb,4
+np.float32,0xbe9fa100,0xbea255cb,4
+np.float32,0xbf21dafa,0xbf2f382a,4
+np.float32,0x3d3870e0,0x3d3880d9,4
+np.float32,0x3eeaf00c,0x3ef413f4,4
+np.float32,0xbc884ea0,0xbc88503c,4
+np.float32,0xbf7dbdad,0xbfb80b6d,4
+np.float32,0xbf4eb713,0xbf709b46,4
+np.float32,0xbf1c0ad4,0xbf27cd92,4
+np.float32,0x3f323088,0x3f451737,4
+np.float32,0x3e405d88,0x3e4183e1,4
+np.float32,0x3d7ad580,0x3d7afdb4,4
+np.float32,0xbf207338,0xbf2d6927,4
+np.float32,0xbecf7948,0xbed59e1a,4
+np.float32,0x3f16ff94,0x3f217fde,4
+np.float32,0xbdf19588,0xbdf225dd,4
+np.float32,0xbf4d9654,0xbf6eb442,4
+np.float32,0xbf390b9b,0xbf4ed220,4
+np.float32,0xbe155a74,0xbe15e354,4
+np.float32,0x3f519e4c,0x3f759850,4
+np.float32,0xbee3f08c,0xbeec3b84,4
+np.float32,0xbf478be7,0xbf64d23b,4
+np.float32,0xbefdee50,0xbf04d92a,4
+np.float32,0x3e8def78,0x3e8fd1bc,4
+np.float32,0x3e3df2a8,0x3e3f0dee,4
+np.float32,0xbf413e22,0xbf5afd97,4
+np.float32,0xbf1b8bc4,0xbf272d71,4
+np.float32,0xbf31e5be,0xbf44af22,4
+np.float32,0x3de7e080,0x3de86010,4
+np.float32,0xbf5ddf7e,0xbf863645,4
+np.float32,0x3f3eba6a,0x3f57306e,4
+np.float32,0xff7fffff,0x7fc00000,4
+np.float32,0x3ec22d5c,0x3ec72973,4
+np.float32,0x80800000,0x80800000,4
+np.float32,0x3f032e0c,0x3f09ba82,4
+np.float32,0x3d74bd60,0x3d74e2b7,4
+np.float32,0xbea0d61e,0xbea39b42,4
+np.float32,0xbefdfa78,0xbf04e02a,4
+np.float32,0x3e5cb220,0x3e5e70ec,4
+np.float32,0xbe239e54,0xbe2452a4,4
+np.float32,0x3f452738,0x3f61090e,4
+np.float32,0x3e99a2e0,0x3e9c0a66,4
+np.float32,0x3e4394d8,0x3e44ca5f,4
+np.float32,0x3f4472e2,0x3f5fef14,4
+np.float32,0xbf46bc70,0xbf638814,4
+np.float32,0xbf0b910f,0xbf139c7a,4
+np.float32,0x3f36b4a6,0x3f4b753f,4
+np.float32,0x3e0bf478,0x3e0c64f6,4
+np.float32,0x3ce02480,0x3ce02ba9,4
+np.float32,0xbd904b10,0xbd9069b1,4
+np.float32,0xbf7f5d72,0xbfc00b70,4
+np.float32,0x3f62127e,0x3f8a8ca8,4
+np.float32,0xbf320253,0xbf44d6e4,4
+np.float32,0x3f2507be,0x3f335833,4
+np.float32,0x3f299284,0x3f395887,4
+np.float32,0xbd8211b0,0xbd82281d,4
+np.float32,0xbd3374c0,0xbd338376,4
+np.float32,0x3f36c56a,0x3f4b8d30,4
+np.float32,0xbf51f704,0xbf76331f,4
+np.float32,0xbe9871ca,0xbe9acab2,4
+np.float32,0xbe818d8c,0xbe82fa0f,4
+np.float32,0x3f08b958,0x3f103c18,4
+np.float32,0x3f22559a,0x3f2fd698,4
+np.float32,0xbf11f388,0xbf1b4db8,4
+np.float32,0x3ebe1990,0x3ec2c359,4
+np.float32,0xbe75ab38,0xbe7816b6,4
+np.float32,0x3e96102c,0x3e984c99,4
+np.float32,0xbe80d9d2,0xbe824052,4
+np.float32,0x3ef47588,0x3efeda7f,4
+np.float32,0xbe45e524,0xbe4725ea,4
+np.float32,0x3f7f9e7a,0x3fc213ff,4
+np.float32,0x3f1d3c36,0x3f294faa,4
+np.float32,0xbf3c58db,0xbf53a591,4
+np.float32,0x3f0d3d20,0x3f159c69,4
+np.float32,0x3f744be6,0x3fa23552,4
+np.float32,0x3f2e0cea,0x3f3f630e,4
+np.float32,0x3e193c10,0x3e19cff7,4
+np.float32,0xbf4150ac,0xbf5b19dd,4
+np.float32,0xbf145f72,0xbf1e4355,4
+np.float32,0xbb76cc00,0xbb76cc26,4
+np.float32,0x3f756780,0x3fa41b3e,4
+np.float32,0x3ea9b868,0x3eacfe3c,4
+np.float32,0x3d07c920,0x3d07cf7f,4
+np.float32,0xbf2263d4,0xbf2fe8ff,4
+np.float32,0x3e53b3f8,0x3e553daa,4
+np.float32,0xbf785be8,0xbfa9b5ba,4
+np.float32,0x3f324f7a,0x3f454254,4
+np.float32,0xbf2188f2,0xbf2ece5b,4
+np.float32,0xbe33781c,0xbe3466a2,4
+np.float32,0xbd3cf120,0xbd3d024c,4
+np.float32,0x3f06b18a,0x3f0dd70f,4
+np.float32,0x3f40d63e,0x3f5a5f6a,4
+np.float32,0x3f752340,0x3fa3a41e,4
+np.float32,0xbe1cf1c0,0xbe1d90bc,4
+np.float32,0xbf02d948,0xbf0957d7,4
+np.float32,0x3f73bed0,0x3fa14bf7,4
+np.float32,0x3d914920,0x3d916864,4
+np.float32,0x7fa00000,0x7fe00000,4
+np.float32,0xbe67a5d8,0xbe69aba7,4
+np.float32,0x3f689c4a,0x3f91eb9f,4
+np.float32,0xbf196e00,0xbf248601,4
+np.float32,0xbf50dacb,0xbf7444fe,4
+np.float32,0x3f628b86,0x3f8b0e1e,4
+np.float32,0x3f6ee2f2,0x3f99fe7f,4
+np.float32,0x3ee5df40,0x3eee6492,4
+np.float32,0x3f501746,0x3f72f41b,4
+np.float32,0xbf1f0f18,0xbf2ba164,4
+np.float32,0xbf1a8bfd,0xbf25ec01,4
+np.float32,0xbd4926f0,0xbd493ba9,4
+np.float32,0xbf4e364f,0xbf6fc17b,4
+np.float32,0x3e50c578,0x3e523ed4,4
+np.float32,0x3f65bf10,0x3f8e95ce,4
+np.float32,0xbe8d75a2,0xbe8f52f2,4
+np.float32,0xbf3f557e,0xbf581962,4
+np.float32,0xbeff2bfc,0xbf05903a,4
+np.float32,0x3f5e8bde,0x3f86e3d8,4
+np.float32,0xbf7a0012,0xbfad4b9b,4
+np.float32,0x3edefce0,0x3ee6b790,4
+np.float32,0xbf0003de,0xbf060f09,4
+np.float32,0x3efc4650,0x3f03e548,4
+np.float32,0x3f4582e4,0x3f6198f5,4
+np.float32,0x3f10086c,0x3f18f9d0,4
+np.float32,0x3f1cd304,0x3f28ca77,4
+np.float32,0x3f683366,0x3f916e8d,4
+np.float32,0xbed49392,0xbedb3675,4
+np.float32,0xbf6fe5f6,0xbf9b6c0e,4
+np.float32,0xbf59b416,0xbf8224f6,4
+np.float32,0x3d20c960,0x3d20d3f4,4
+np.float32,0x3f6b00d6,0x3f94dbe7,4
+np.float32,0x3f6c26ae,0x3f965352,4
+np.float32,0xbf370ea6,0xbf4bf5dd,4
+np.float32,0x3dfe7230,0x3dff1af1,4
+np.float32,0xbefc21a8,0xbf03d038,4
+np.float32,0x3f16a990,0x3f21156a,4
+np.float32,0xbef8ac0c,0xbf01d48f,4
+np.float32,0x3f170de8,0x3f21919d,4
+np.float32,0x3db9ef80,0x3dba3122,4
+np.float32,0x3d696400,0x3d698461,4
+np.float32,0x3f007aa2,0x3f069843,4
+np.float32,0x3f22827c,0x3f3010a9,4
+np.float32,0x3f3650dc,0x3f4ae6f1,4
+np.float32,0xbf1d8037,0xbf29a5e1,4
+np.float32,0xbf08fdc4,0xbf108d0e,4
+np.float32,0xbd8df350,0xbd8e1079,4
+np.float32,0xbf36bb32,0xbf4b7e98,4
+np.float32,0x3f2e3756,0x3f3f9ced,4
+np.float32,0x3d5a6f20,0x3d5a89aa,4
+np.float32,0x3f55d568,0x3f7d1889,4
+np.float32,0x3e1ed110,0x3e1f75d9,4
+np.float32,0x3e7386b8,0x3e75e1dc,4
+np.float32,0x3f48ea0e,0x3f670434,4
+np.float32,0x3e921fb0,0x3e942f14,4
+np.float32,0xbf0d4d0b,0xbf15af7f,4
+np.float32,0x3f179ed2,0x3f224549,4
+np.float32,0xbf3a328e,0xbf507e6d,4
+np.float32,0xbf74591a,0xbfa24b6e,4
+np.float32,0x3ec7d1c4,0x3ecd4657,4
+np.float32,0xbf6ecbed,0xbf99de85,4
+np.float32,0x3db0bd00,0x3db0f559,4
+np.float32,0x7f800000,0x7fc00000,4
+np.float32,0x3e0373b8,0x3e03d0d6,4
+np.float32,0xbf439784,0xbf5e9a04,4
+np.float32,0xbef97a9e,0xbf024ac6,4
+np.float32,0x3e4d71a8,0x3e4ed90a,4
+np.float32,0xbf14d868,0xbf1ed7e3,4
+np.float32,0xbf776870,0xbfa7ce37,4
+np.float32,0xbe32a500,0xbe339038,4
+np.float32,0xbf326d8a,0xbf456c3d,4
+np.float32,0xbe9b758c,0xbe9df3e7,4
+np.float32,0x3d9515a0,0x3d95376a,4
+np.float32,0x3e3f7320,0x3e40953e,4
+np.float32,0xbee57e7e,0xbeedf84f,4
+np.float32,0x3e821e94,0x3e838ffd,4
+np.float32,0x3f74beaa,0x3fa2f721,4
+np.float32,0xbe9b7672,0xbe9df4d9,4
+np.float32,0x3f4041fc,0x3f597e71,4
+np.float32,0xbe9ea7c4,0xbea14f92,4
+np.float32,0xbf800000,0xbfc90fdb,4
+np.float32,0x3e04fb90,0x3e055bfd,4
+np.float32,0xbf14d3d6,0xbf1ed245,4
+np.float32,0xbe84ebec,0xbe86763e,4
+np.float32,0x3f08e568,0x3f107039,4
+np.float32,0x3d8dc9e0,0x3d8de6ef,4
+np.float32,0x3ea4549c,0x3ea74a94,4
+np.float32,0xbebd2806,0xbec1bf51,4
+np.float32,0x3f311a26,0x3f439498,4
+np.float32,0xbf3d2222,0xbf54cf7e,4
+np.float32,0x3e00c500,0x3e011c81,4
+np.float32,0xbe35ed1c,0xbe36e5a9,4
+np.float32,0xbd4ec020,0xbd4ed6a0,4
+np.float32,0x3e1eb088,0x3e1f54eb,4
+np.float32,0x3cf94840,0x3cf9521a,4
+np.float32,0xbf010c5d,0xbf0740e0,4
+np.float32,0xbf3bd63b,0xbf52e502,4
+np.float32,0x3f233f30,0x3f310542,4
+np.float32,0x3ea24128,0x3ea519d7,4
+np.float32,0x3f478b38,0x3f64d124,4
+np.float32,0x3f1e0c6c,0x3f2a57ec,4
+np.float32,0xbf3ad294,0xbf51680a,4
+np.float32,0x3ede0554,0x3ee5a4b4,4
+np.float32,0x3e451a98,0x3e46577d,4
+np.float32,0x3f520164,0x3f764542,4
+np.float32,0x0,0x0,4
+np.float32,0xbd056cd0,0xbd0572db,4
+np.float32,0xbf58b018,0xbf812f5e,4
+np.float32,0x3e036eb0,0x3e03cbc3,4
+np.float32,0x3d1377a0,0x3d137fc9,4
+np.float32,0xbf692d3a,0xbf929a2c,4
+np.float32,0xbec60fb8,0xbecb5dea,4
+np.float32,0x3ed23340,0x3ed89a8e,4
+np.float32,0x3c87f040,0x3c87f1d9,4
+np.float32,0x3dac62f0,0x3dac9737,4
+np.float32,0xbed97c16,0xbee09f02,4
+np.float32,0xbf2d5f3c,0xbf3e769c,4
+np.float32,0xbc3b7c40,0xbc3b7d4c,4
+np.float32,0x3ed998ec,0x3ee0bedd,4
+np.float32,0x3dd86630,0x3dd8cdcb,4
+np.float32,0x3e8b4304,0x3e8d09ea,4
+np.float32,0x3f51e6b0,0x3f761697,4
+np.float32,0x3ec51f24,0x3eca5923,4
+np.float32,0xbf647430,0xbf8d2307,4
+np.float32,0x3f253d9c,0x3f339eb2,4
+np.float32,0x3dc969d0,0x3dc9bd4b,4
+np.float32,0xbc2f1300,0xbc2f13da,4
+np.float32,0xbf170007,0xbf21806d,4
+np.float32,0x3f757d10,0x3fa4412e,4
+np.float32,0xbe7864ac,0xbe7ae564,4
+np.float32,0x3f2ffe90,0x3f420cfb,4
+np.float32,0xbe576138,0xbe590012,4
+np.float32,0xbf517a21,0xbf755959,4
+np.float32,0xbf159cfe,0xbf1fc9d5,4
+np.float32,0xbf638b2a,0xbf8c22cf,4
+np.float32,0xff800000,0x7fc00000,4
+np.float32,0x3ed19ca0,0x3ed7f569,4
+np.float32,0x3f7c4460,0x3fb32d26,4
+np.float32,0x3ebfae6c,0x3ec477ab,4
+np.float32,0x3dd452d0,0x3dd4b4a8,4
+np.float32,0x3f471482,0x3f6413fb,4
+np.float32,0xbf49d704,0xbf6883fe,4
+np.float32,0xbd42c4e0,0xbd42d7af,4
+np.float32,0xbeb02994,0xbeb3d668,4
+np.float32,0x3f4d1fd8,0x3f6dedd2,4
+np.float32,0x3efb591c,0x3f035d11,4
+np.float32,0x80000000,0x80000000,4
+np.float32,0xbf50f782,0xbf7476ad,4
+np.float32,0x3d7232c0,0x3d7256f0,4
+np.float32,0x3f649460,0x3f8d46bb,4
+np.float32,0x3f5561bc,0x3f7c46a9,4
+np.float32,0x3e64f6a0,0x3e66ea5d,4
+np.float32,0x3e5b0470,0x3e5cb8f9,4
+np.float32,0xbe9b6b2c,0xbe9de904,4
+np.float32,0x3f6c33f4,0x3f966486,4
+np.float32,0x3f5cee54,0x3f854613,4
+np.float32,0x3ed3e044,0x3eda716e,4
+np.float32,0xbf3cac7f,0xbf542131,4
+np.float32,0x3c723500,0x3c723742,4
+np.float32,0x3de59900,0x3de614d3,4
+np.float32,0xbdf292f8,0xbdf32517,4
+np.float32,0x3f05c8b2,0x3f0cc59b,4
+np.float32,0xbf1ab182,0xbf261b14,4
+np.float32,0xbda396f0,0xbda3c39a,4
+np.float32,0xbf270ed0,0xbf360231,4
+np.float32,0x3f2063e6,0x3f2d557e,4
+np.float32,0x3c550280,0x3c550409,4
+np.float32,0xbe103b48,0xbe10b679,4
+np.float32,0xbebae390,0xbebf4f40,4
+np.float32,0x3f3bc868,0x3f52d0aa,4
+np.float32,0xbd62f880,0xbd631647,4
+np.float32,0xbe7a38f4,0xbe7cc833,4
+np.float32,0x3f09d796,0x3f118f39,4
+np.float32,0xbf5fa558,0xbf8802d0,4
+np.float32,0x3f111cc8,0x3f1a48b0,4
+np.float32,0x3e831958,0x3e849356,4
+np.float32,0xbf614dbd,0xbf89bc3b,4
+np.float32,0xbd521510,0xbd522cac,4
+np.float32,0x3f05af22,0x3f0ca7a0,4
+np.float32,0xbf1ac60e,0xbf2634df,4
+np.float32,0xbf6bd05e,0xbf95e3fe,4
+np.float32,0xbd1fa6e0,0xbd1fb13b,4
+np.float32,0xbeb82f7a,0xbebc68b1,4
+np.float32,0xbd92aaf8,0xbd92cb23,4
+np.float32,0xbe073a54,0xbe079fbf,4
+np.float32,0xbf198655,0xbf24a468,4
+np.float32,0x3f62f6d8,0x3f8b81ba,4
+np.float32,0x3eef4310,0x3ef8f4f9,4
+np.float32,0x3e8988e0,0x3e8b3eae,4
+np.float32,0xbf3ddba5,0xbf55e367,4
+np.float32,0x3dc6d2e0,0x3dc7232b,4
+np.float32,0xbf31040e,0xbf437601,4
+np.float32,0x3f1bb74a,0x3f276442,4
+np.float32,0xbf0075d2,0xbf0692b3,4
+np.float32,0xbf606ce0,0xbf88d0ff,4
+np.float32,0xbf083856,0xbf0fa39d,4
+np.float32,0xbdb25b20,0xbdb2950a,4
+np.float32,0xbeb86860,0xbebca5ae,4
+np.float32,0x3de83160,0x3de8b176,4
+np.float32,0xbf33a98f,0xbf472664,4
+np.float32,0x3e7795f8,0x3e7a1058,4
+np.float32,0x3e0ca6f8,0x3e0d192a,4
+np.float32,0xbf1aef60,0xbf2668c3,4
+np.float32,0xbda53b58,0xbda5695e,4
+np.float32,0xbf178096,0xbf221fc5,4
+np.float32,0xbf0a4159,0xbf120ccf,4
+np.float32,0x3f7bca36,0x3fb1d0df,4
+np.float32,0xbef94360,0xbf022b26,4
+np.float32,0xbef16f36,0xbefb6ad6,4
+np.float32,0x3f53a7e6,0x3f792e25,4
+np.float32,0xbf7c536f,0xbfb35993,4
+np.float32,0xbe84aaa0,0xbe8632a2,4
+np.float32,0x3ecb3998,0x3ed0fab9,4
+np.float32,0x3f539304,0x3f79090a,4
+np.float32,0xbf3c7816,0xbf53d3b3,4
+np.float32,0xbe7a387c,0xbe7cc7b7,4
+np.float32,0x3f7000e4,0x3f9b92b1,4
+np.float32,0x3e08fd70,0x3e0966e5,4
+np.float32,0x3db97ba0,0x3db9bcc8,4
+np.float32,0xbee99056,0xbef2886a,4
+np.float32,0xbf0668da,0xbf0d819e,4
+np.float32,0x3e58a408,0x3e5a4a51,4
+np.float32,0x3f3440b8,0x3f47faed,4
+np.float32,0xbf19a2ce,0xbf24c7ff,4
+np.float32,0xbe75e990,0xbe7856ee,4
+np.float32,0x3f3c865c,0x3f53e8cb,4
+np.float32,0x3e5e03d0,0x3e5fcac9,4
+np.float32,0x3edb8e34,0x3ee2e932,4
+np.float32,0xbf7e1f5f,0xbfb98ce4,4
+np.float32,0xbf7372ff,0xbfa0d0ae,4
+np.float32,0xbf3ee850,0xbf577548,4
+np.float32,0x3ef19658,0x3efb9737,4
+np.float32,0xbe8088de,0xbe81ecaf,4
+np.float32,0x800000,0x800000,4
+np.float32,0xbde39dd8,0xbde4167a,4
+np.float32,0xbf065d7a,0xbf0d7441,4
+np.float32,0xbde52c78,0xbde5a79b,4
+np.float32,0xbe3a28c0,0xbe3b333e,4
+np.float32,0x3f6e8b3c,0x3f998516,4
+np.float32,0x3f3485c2,0x3f485c39,4
+np.float32,0x3e6f2c68,0x3e71673e,4
+np.float32,0xbe4ec9cc,0xbe50385e,4
+np.float32,0xbf1c3bb0,0xbf280b39,4
+np.float32,0x3ec8ea18,0x3ece76f7,4
+np.float32,0x3e26b5f8,0x3e2774c9,4
+np.float32,0x3e1e4a38,0x3e1eed5c,4
+np.float32,0xbee7a106,0xbef05c6b,4
+np.float32,0xbf305928,0xbf4289d8,4
+np.float32,0x3f0c431c,0x3f147118,4
+np.float32,0xbe57ba6c,0xbe595b52,4
+np.float32,0x3eabc9cc,0x3eaf2fc7,4
+np.float32,0xbef1ed24,0xbefbf9ae,4
+np.float32,0xbf61b576,0xbf8a29cc,4
+np.float32,0x3e9c1ff4,0x3e9ea6cb,4
+np.float32,0x3f6c53b2,0x3f968dbe,4
+np.float32,0x3e2d1b80,0x3e2df156,4
+np.float32,0x3e9f2f70,0x3ea1de4a,4
+np.float32,0xbf5861ee,0xbf80e61a,4
+np.float32,0x3f429144,0x3f5d0505,4
+np.float32,0x3e235cc8,0x3e24103e,4
+np.float32,0xbf354879,0xbf496f6a,4
+np.float32,0xbf20a146,0xbf2da447,4
+np.float32,0x3e8d8968,0x3e8f6785,4
+np.float32,0x3f3fbc94,0x3f58b4c1,4
+np.float32,0x3f2c5f50,0x3f3d1b9f,4
+np.float32,0x3f7bf0f8,0x3fb23d23,4
+np.float32,0xbf218282,0xbf2ec60f,4
+np.float32,0x3f2545aa,0x3f33a93e,4
+np.float32,0xbf4b17be,0xbf6a9018,4
+np.float32,0xbb9df700,0xbb9df728,4
+np.float32,0x3f685d54,0x3f91a06c,4
+np.float32,0x3efdfe2c,0x3f04e24c,4
+np.float32,0x3ef1c5a0,0x3efbccd9,4
+np.float32,0xbf41d731,0xbf5be76e,4
+np.float32,0x3ebd1360,0x3ec1a919,4
+np.float32,0xbf706bd4,0xbf9c2d58,4
+np.float32,0x3ea525e4,0x3ea8279d,4
+np.float32,0xbe51f1b0,0xbe537186,4
+np.float32,0x3f5e8cf6,0x3f86e4f4,4
+np.float32,0xbdad2520,0xbdad5a19,4
+np.float32,0xbf5c5704,0xbf84b0e5,4
+np.float32,0x3f47b54e,0x3f65145e,4
+np.float32,0x3eb4fc78,0x3eb8fc0c,4
+np.float32,0x3dca1450,0x3dca68a1,4
+np.float32,0x3eb02a74,0x3eb3d757,4
+np.float32,0x3f74ae6a,0x3fa2db75,4
+np.float32,0x3f800000,0x3fc90fdb,4
+np.float32,0xbdb46a00,0xbdb4a5f2,4
+np.float32,0xbe9f2ba6,0xbea1da4e,4
+np.float32,0x3f0afa70,0x3f12e8f7,4
+np.float32,0xbf677b20,0xbf909547,4
+np.float32,0x3eff9188,0x3f05cacf,4
+np.float32,0x3f720562,0x3f9e911b,4
+np.float32,0xbf7180d8,0xbf9dc794,4
+np.float32,0xbee7d076,0xbef0919d,4
+np.float32,0x3f0432ce,0x3f0aea95,4
+np.float32,0x3f3bc4c8,0x3f52cb54,4
+np.float32,0xbea72f30,0xbeaa4ebe,4
+np.float32,0x3e90ed00,0x3e92ef33,4
+np.float32,0xbda63670,0xbda6654a,4
+np.float32,0xbf5a6f85,0xbf82d7e0,4
+np.float32,0x3e6e8808,0x3e70be34,4
+np.float32,0xbf4f3822,0xbf71768f,4
+np.float32,0x3e5c8a68,0x3e5e483f,4
+np.float32,0xbf0669d4,0xbf0d82c4,4
+np.float32,0xbf79f77c,0xbfad37b0,4
+np.float32,0x3f25c82c,0x3f345453,4
+np.float32,0x3f1b2948,0x3f26b188,4
+np.float32,0x3ef7e288,0x3f016159,4
+np.float32,0x3c274280,0x3c27433e,4
+np.float32,0xbf4c8fa0,0xbf6cfd5e,4
+np.float32,0x3ea4ccb4,0x3ea7c966,4
+np.float32,0xbf7b157e,0xbfafefca,4
+np.float32,0xbee4c2b0,0xbeed264d,4
+np.float32,0xbc1fd640,0xbc1fd6e6,4
+np.float32,0x3e892308,0x3e8ad4f6,4
+np.float32,0xbf3f69c7,0xbf5837ed,4
+np.float32,0x3ec879e8,0x3ecdfd05,4
+np.float32,0x3f07a8c6,0x3f0efa30,4
+np.float32,0x3f67b880,0x3f90dd4d,4
+np.float32,0x3e8a11c8,0x3e8bccd5,4
+np.float32,0x3f7df6fc,0x3fb8e935,4
+np.float32,0xbef3e498,0xbefe3599,4
+np.float32,0xbf18ad7d,0xbf2395d8,4
+np.float32,0x3f2bce74,0x3f3c57f5,4
+np.float32,0xbf38086e,0xbf4d5c2e,4
+np.float32,0x3f772d7a,0x3fa75c35,4
+np.float32,0xbf3b6e24,0xbf524c00,4
+np.float32,0xbdd39108,0xbdd3f1d4,4
+np.float32,0xbf691f6b,0xbf928974,4
+np.float32,0x3f146188,0x3f1e45e4,4
+np.float32,0xbf56045b,0xbf7d6e03,4
+np.float32,0xbf4b2ee4,0xbf6ab622,4
+np.float32,0xbf3fa3f6,0xbf588f9d,4
+np.float32,0x3f127bb0,0x3f1bf398,4
+np.float32,0x3ed858a0,0x3edf5d3e,4
+np.float32,0xbd6de3b0,0xbd6e05fa,4
+np.float32,0xbecc662c,0xbed24261,4
+np.float32,0xbd6791d0,0xbd67b170,4
+np.float32,0xbf146016,0xbf1e441e,4
+np.float32,0xbf61f04c,0xbf8a6841,4
+np.float32,0xbe7f16d0,0xbe80e6e7,4
+np.float32,0xbebf93e6,0xbec45b10,4
+np.float32,0xbe8a59fc,0xbe8c17d1,4
+np.float32,0xbebc7a0c,0xbec10426,4
+np.float32,0xbf2a682e,0xbf3a7649,4
+np.float32,0xbe18d0cc,0xbe19637b,4
+np.float32,0x3d7f5100,0x3d7f7b66,4
+np.float32,0xbf10f5fa,0xbf1a1998,4
+np.float32,0x3f25e956,0x3f347fdc,4
+np.float32,0x3e6e8658,0x3e70bc78,4
+np.float32,0x3f21a5de,0x3f2ef3a5,4
+np.float32,0xbf4e71d4,0xbf702607,4
+np.float32,0xbf49d6b6,0xbf688380,4
+np.float32,0xbdb729c0,0xbdb7687c,4
+np.float32,0xbf63e1f4,0xbf8c81c7,4
+np.float32,0x3dda6cb0,0x3ddad73e,4
+np.float32,0x3ee1bc40,0x3ee9c612,4
+np.float32,0x3ebdb5f8,0x3ec2581b,4
+np.float32,0x3f7d9576,0x3fb77646,4
+np.float32,0x3e087140,0x3e08d971,4
+np.float64,0xbfdba523cfb74a48,0xbfdc960ddd9c0506,3
+np.float64,0x3fb51773622a2ee0,0x3fb51d93f77089d5,3
+np.float64,0x3fc839f6d33073f0,0x3fc85f9a47dfe8e6,3
+np.float64,0xbfecba2d82f9745b,0xbff1d55416c6c993,3
+np.float64,0x3fd520fe47aa41fc,0x3fd58867f1179634,3
+np.float64,0x3fe1b369c56366d4,0x3fe2c1ac9dd2c45a,3
+np.float64,0xbfec25a7cd784b50,0xbff133417389b12d,3
+np.float64,0xbfd286342ea50c68,0xbfd2cb0bca22e66d,3
+np.float64,0x3fd5f6fe5eabedfc,0x3fd66bad16680d08,3
+np.float64,0xbfe863a87570c751,0xbfebbb9b637eb6dc,3
+np.float64,0x3fc97f5b4d32feb8,0x3fc9ab5066d8eaec,3
+np.float64,0xbfcb667af936ccf4,0xbfcb9d3017047a1d,3
+np.float64,0xbfd1b7b9afa36f74,0xbfd1f3c175706154,3
+np.float64,0x3fef97385b7f2e70,0x3ff6922a1a6c709f,3
+np.float64,0xbfd13e4205a27c84,0xbfd1757c993cdb74,3
+np.float64,0xbfd18d88aca31b12,0xbfd1c7dd75068f7d,3
+np.float64,0x3fe040ce0f60819c,0x3fe10c59d2a27089,3
+np.float64,0xbfddc7deddbb8fbe,0xbfdef9de5baecdda,3
+np.float64,0xbfcf6e96193edd2c,0xbfcfc1bb7396b9a3,3
+np.float64,0x3fd544f494aa89e8,0x3fd5ae850e2b37dd,3
+np.float64,0x3fe15b381fe2b670,0x3fe25841c7bfe2af,3
+np.float64,0xbfde793420bcf268,0xbfdfc2ddc7b4a341,3
+np.float64,0x3fd0d5db30a1abb8,0x3fd1092cef4aa4fb,3
+np.float64,0x3fe386a08c670d42,0x3fe50059bbf7f491,3
+np.float64,0xbfe0aae3a96155c8,0xbfe1880ef13e95ce,3
+np.float64,0xbfe80eeb03f01dd6,0xbfeb39e9f107e944,3
+np.float64,0xbfd531af3caa635e,0xbfd59a178f17552a,3
+np.float64,0x3fcced14ab39da28,0x3fcd2d9a806337ef,3
+np.float64,0xbfdb4c71bcb698e4,0xbfdc33d9d9daf708,3
+np.float64,0xbfde7375ecbce6ec,0xbfdfbc5611bc48ff,3
+np.float64,0x3fecc5707a798ae0,0x3ff1e2268d778017,3
+np.float64,0x3fe8f210a1f1e422,0x3fec9b3349a5baa2,3
+np.float64,0x3fe357f9b8e6aff4,0x3fe4c5a0b89a9228,3
+np.float64,0xbfe0f863b761f0c8,0xbfe1e3283494c3d4,3
+np.float64,0x3fd017c395a02f88,0x3fd044761f2f4a66,3
+np.float64,0x3febeb4746f7d68e,0x3ff0f6b955e7feb6,3
+np.float64,0xbfbdaaeeae3b55e0,0xbfbdbc0950109261,3
+np.float64,0xbfea013095f40261,0xbfee5b8fe8ad8593,3
+np.float64,0xbfe9f87b7973f0f7,0xbfee4ca3a8438d72,3
+np.float64,0x3fd37f77cfa6fef0,0x3fd3d018c825f057,3
+np.float64,0x3fb0799cee20f340,0x3fb07c879e7cb63f,3
+np.float64,0xbfdcfd581cb9fab0,0xbfde15e35314b52d,3
+np.float64,0xbfd49781b8a92f04,0xbfd4f6fa1516fefc,3
+np.float64,0x3fb3fcb6d627f970,0x3fb401ed44a713a8,3
+np.float64,0x3fd5737ef8aae6fc,0x3fd5dfe42d4416c7,3
+np.float64,0x7ff4000000000000,0x7ffc000000000000,3
+np.float64,0xbfe56ae780ead5cf,0xbfe776ea5721b900,3
+np.float64,0x3fd4567786a8acf0,0x3fd4b255421c161a,3
+np.float64,0x3fef6fb58cfedf6c,0x3ff62012dfcf0a33,3
+np.float64,0xbfd1dbcd3da3b79a,0xbfd2194fd628f74d,3
+np.float64,0x3fd9350016b26a00,0x3fd9e8b01eb023e9,3
+np.float64,0xbfe4fb3a69e9f675,0xbfe6e1d2c9eca56c,3
+np.float64,0x3fe9fe0f73f3fc1e,0x3fee5631cfd39772,3
+np.float64,0xbfd51c1bc6aa3838,0xbfd5833b3bd53543,3
+np.float64,0x3fc64158e12c82b0,0x3fc65e7352f237d7,3
+np.float64,0x3fd0d8ee1ba1b1dc,0x3fd10c5c99a16f0e,3
+np.float64,0x3fd5554e15aaaa9c,0x3fd5bfdb9ec9e873,3
+np.float64,0x3fe61ce209ec39c4,0x3fe869bc4c28437d,3
+np.float64,0xbfe4e42c8c69c859,0xbfe6c356dac7e2db,3
+np.float64,0xbfe157021062ae04,0xbfe2533ed39f4212,3
+np.float64,0x3fe844066cf0880c,0x3feb8aea0b7bd0a4,3
+np.float64,0x3fe55016586aa02c,0x3fe752e4b2a67b9f,3
+np.float64,0x3fdabce619b579cc,0x3fdb95809bc789d9,3
+np.float64,0x3fee03bae37c0776,0x3ff3778ba38ca882,3
+np.float64,0xbfeb2f5844f65eb0,0xbff03dd1b767d3c8,3
+np.float64,0x3fedcfdbaffb9fb8,0x3ff32e81d0639164,3
+np.float64,0x3fe06fc63ee0df8c,0x3fe142fc27f92eaf,3
+np.float64,0x3fe7ce90fd6f9d22,0x3fead8f832bbbf5d,3
+np.float64,0xbfbc0015ce380028,0xbfbc0e7470e06e86,3
+np.float64,0xbfe9b3de90f367bd,0xbfedd857931dfc6b,3
+np.float64,0xbfcb588f5936b120,0xbfcb8ef0124a4f21,3
+np.float64,0x3f8d376a503a6f00,0x3f8d37ab43e7988d,3
+np.float64,0xbfdb123a40b62474,0xbfdbf38b6cf5db92,3
+np.float64,0xbfee7da6be7cfb4e,0xbff433042cd9d5eb,3
+np.float64,0xbfc4c9e01b2993c0,0xbfc4e18dbafe37ef,3
+np.float64,0x3fedd42faffba860,0x3ff334790cd18a19,3
+np.float64,0x3fe9cdf772f39bee,0x3fee044f87b856ab,3
+np.float64,0x3fe0245881e048b2,0x3fe0eb5a1f739c8d,3
+np.float64,0xbfe4712bd9e8e258,0xbfe62cb3d82034aa,3
+np.float64,0x3fe9a16b46f342d6,0x3fedb972b2542551,3
+np.float64,0xbfe57ab4536af568,0xbfe78c34b03569c2,3
+np.float64,0x3fb6d6ceb22dada0,0x3fb6de976964d6dd,3
+np.float64,0x3fc3ac23a3275848,0x3fc3c02de53919b8,3
+np.float64,0xbfccb531e7396a64,0xbfccf43ec69f6281,3
+np.float64,0xbfd2f07fc8a5e100,0xbfd33a35a8c41b62,3
+np.float64,0xbfe3e5dd04e7cbba,0xbfe57940157c27ba,3
+np.float64,0x3feefe40757dfc80,0x3ff51bc72b846af6,3
+np.float64,0x8000000000000001,0x8000000000000001,3
+np.float64,0x3fecb7b766796f6e,0x3ff1d28972a0fc7e,3
+np.float64,0xbfea1bf1357437e2,0xbfee89a6532bfd71,3
+np.float64,0xbfca3983b7347308,0xbfca696463b791ef,3
+np.float64,0x10000000000000,0x10000000000000,3
+np.float64,0xbf886b45d030d680,0xbf886b6bbc04314b,3
+np.float64,0x3fd5224bb5aa4498,0x3fd589c92e82218f,3
+np.float64,0xbfec799874f8f331,0xbff18d5158b8e640,3
+np.float64,0xbf88124410302480,0xbf88126863350a16,3
+np.float64,0xbfe37feaaa66ffd6,0xbfe4f7e24382e79d,3
+np.float64,0x3fd777eca1aeefd8,0x3fd8076ead6d55dc,3
+np.float64,0x3fecaaeb3af955d6,0x3ff1c4159fa3e965,3
+np.float64,0xbfeb81e4e6f703ca,0xbff08d4e4c77fada,3
+np.float64,0xbfd7d0a0edafa142,0xbfd866e37010312e,3
+np.float64,0x3feda48c00fb4918,0x3ff2f3fd33c36307,3
+np.float64,0x3feb87ecc4770fda,0x3ff09336e490deda,3
+np.float64,0xbfefd78ad27faf16,0xbff78abbafb50ac1,3
+np.float64,0x3fe58e918c6b1d24,0x3fe7a70b38cbf016,3
+np.float64,0x3fda163b95b42c78,0x3fdade86b88ba4ee,3
+np.float64,0x3fe8fc1aaf71f836,0x3fecab3f93b59df5,3
+np.float64,0xbf8de56f903bcac0,0xbf8de5b527cec797,3
+np.float64,0xbfec112db2f8225b,0xbff11dd648de706f,3
+np.float64,0x3fc3214713264290,0x3fc333b1c862f7d0,3
+np.float64,0xbfeb5e5836f6bcb0,0xbff06ac364b49177,3
+np.float64,0x3fc23d9777247b30,0x3fc24d8ae3bcb615,3
+np.float64,0xbfdf0eed65be1dda,0xbfe036cea9b9dfb6,3
+np.float64,0xbfb2d5c85a25ab90,0xbfb2da24bb409ff3,3
+np.float64,0xbfecdda0c3f9bb42,0xbff1fdf94fc6e89e,3
+np.float64,0x3fdfe79154bfcf24,0x3fe0b338e0476a9d,3
+np.float64,0xbfd712ac6bae2558,0xbfd79abde21f287b,3
+np.float64,0x3fea3f148a747e2a,0x3feec6bed9d4fa04,3
+np.float64,0x3fd4879e4ca90f3c,0x3fd4e632fa4e2edd,3
+np.float64,0x3fe9137a9e7226f6,0x3fecd0c441088d6a,3
+np.float64,0xbfc75bf4ef2eb7e8,0xbfc77da8347d742d,3
+np.float64,0xbfd94090a0b28122,0xbfd9f5458816ed5a,3
+np.float64,0x3fde439cbcbc8738,0x3fdf85fbf496b61f,3
+np.float64,0xbfe18bacdce3175a,0xbfe29210e01237f7,3
+np.float64,0xbfd58ec413ab1d88,0xbfd5fcd838f0a934,3
+np.float64,0xbfeae5af2d75cb5e,0xbfeff1de1b4a06be,3
+np.float64,0x3fb64d1a162c9a30,0x3fb65458fb831354,3
+np.float64,0x3fc18b1e15231640,0x3fc1994c6ffd7a6a,3
+np.float64,0xbfd7b881bcaf7104,0xbfd84ce89a9ee8c7,3
+np.float64,0x3feb916a40f722d4,0x3ff09c8aa851d7c4,3
+np.float64,0x3fdab5fbb5b56bf8,0x3fdb8de43961bbde,3
+np.float64,0x3fe4f35402e9e6a8,0x3fe6d75dc5082894,3
+np.float64,0x3fe2fdb2e5e5fb66,0x3fe454e32a5d2182,3
+np.float64,0x3fe8607195f0c0e4,0x3febb6a4c3bf6a5c,3
+np.float64,0x3fd543ca9aaa8794,0x3fd5ad49203ae572,3
+np.float64,0x3fe8e05ca1f1c0ba,0x3fec7eff123dcc58,3
+np.float64,0x3fe298b6ca65316e,0x3fe3d81d2927c4dd,3
+np.float64,0x3fcfecea733fd9d8,0x3fd0220f1d0faf78,3
+np.float64,0xbfe2e739f065ce74,0xbfe439004e73772a,3
+np.float64,0xbfd1ae6b82a35cd8,0xbfd1ea129a5ee756,3
+np.float64,0xbfeb7edff576fdc0,0xbff08a5a638b8a8b,3
+np.float64,0x3fe5b645ff6b6c8c,0x3fe7dcee1faefe3f,3
+np.float64,0xbfd478427ba8f084,0xbfd4d5fc7c239e60,3
+np.float64,0xbfe39904e3e7320a,0xbfe517972b30b1e5,3
+np.float64,0xbfd3b75b6ba76eb6,0xbfd40acf20a6e074,3
+np.float64,0x3fd596267aab2c4c,0x3fd604b01faeaf75,3
+np.float64,0x3fe134463762688c,0x3fe229fc36784a72,3
+np.float64,0x3fd25dadf7a4bb5c,0x3fd2a0b9e04ea060,3
+np.float64,0xbfc05d3e0b20ba7c,0xbfc068bd2bb9966f,3
+np.float64,0x3f8cf517b039ea00,0x3f8cf556ed74b163,3
+np.float64,0x3fda87361cb50e6c,0x3fdb5a75af897e7f,3
+np.float64,0x3fe53e1926ea7c32,0x3fe73acf01b8ff31,3
+np.float64,0x3fe2e94857e5d290,0x3fe43b8cc820f9c7,3
+np.float64,0x3fd81fe6acb03fcc,0x3fd8bc623c0068cf,3
+np.float64,0xbfddf662c3bbecc6,0xbfdf2e76dc90786e,3
+np.float64,0x3fece174fbf9c2ea,0x3ff2026a1a889580,3
+np.float64,0xbfdc83c5b8b9078c,0xbfdd8dcf6ee3b7da,3
+np.float64,0x3feaf5448f75ea8a,0x3ff0075b108bcd0d,3
+np.float64,0xbfebf32f7ef7e65f,0xbff0fed42aaa826a,3
+np.float64,0x3fe389e5e8e713cc,0x3fe5047ade055ccb,3
+np.float64,0x3f635cdcc026ba00,0x3f635cddeea082ce,3
+np.float64,0x3fae580f543cb020,0x3fae5c9d5108a796,3
+np.float64,0x3fec9fafce793f60,0x3ff1b77bec654f00,3
+np.float64,0x3fb19d226e233a40,0x3fb1a0b32531f7ee,3
+np.float64,0xbfdf9a71e7bf34e4,0xbfe086cef88626c7,3
+np.float64,0x8010000000000000,0x8010000000000000,3
+np.float64,0xbfef170ba2fe2e17,0xbff54ed4675f5b8a,3
+np.float64,0xbfcc6e2f8f38dc60,0xbfccab65fc34d183,3
+np.float64,0x3fee756c4bfcead8,0x3ff4258782c137e6,3
+np.float64,0xbfd461c218a8c384,0xbfd4be3e391f0ff4,3
+np.float64,0xbfe3b64686e76c8d,0xbfe53caa16d6c90f,3
+np.float64,0xbfc1c65d8d238cbc,0xbfc1d51e58f82403,3
+np.float64,0x3fe6e06c63edc0d8,0x3fe97cb832eeb6a2,3
+np.float64,0xbfc9fc20b933f840,0xbfca2ab004312d85,3
+np.float64,0xbfe29aa6df65354e,0xbfe3da7ecf3ba466,3
+np.float64,0x3fea4df7d1749bf0,0x3feee0d448bd4746,3
+np.float64,0xbfedec6161fbd8c3,0xbff3563e1d943aa2,3
+np.float64,0x3fdb6f0437b6de08,0x3fdc5a1888b1213d,3
+np.float64,0xbfe270cbd3e4e198,0xbfe3a72ac27a0b0c,3
+np.float64,0xbfdfff8068bfff00,0xbfe0c1088e3b8983,3
+np.float64,0xbfd28edbe6a51db8,0xbfd2d416c8ed363e,3
+np.float64,0xbfb4e35f9229c6c0,0xbfb4e9531d2a737f,3
+np.float64,0xbfee6727e97cce50,0xbff40e7717576e46,3
+np.float64,0xbfddb5fbddbb6bf8,0xbfdee5aad78f5361,3
+np.float64,0xbfdf9d3e9dbf3a7e,0xbfe0886b191f2957,3
+np.float64,0x3fa57e77042afce0,0x3fa5801518ea9342,3
+np.float64,0x3f95c4e4882b89c0,0x3f95c55003c8e714,3
+np.float64,0x3fd9b10f61b36220,0x3fda6fe5d635a8aa,3
+np.float64,0xbfe2973411652e68,0xbfe3d641fe9885fd,3
+np.float64,0xbfee87bd5a7d0f7b,0xbff443bea81b3fff,3
+np.float64,0x3f9ea064c83d40c0,0x3f9ea19025085b2f,3
+np.float64,0xbfe4b823dfe97048,0xbfe689623d30dc75,3
+np.float64,0xbfa06a326c20d460,0xbfa06aeacbcd3eb8,3
+np.float64,0x3fe1e5c4c1e3cb8a,0x3fe2fe44b822f20e,3
+np.float64,0x3f99dafaa833b600,0x3f99dbaec10a1a0a,3
+np.float64,0xbfed7cb3877af967,0xbff2bfe9e556aaf9,3
+np.float64,0x3fd604f2e2ac09e4,0x3fd67a89408ce6ba,3
+np.float64,0x3fec57b60f78af6c,0x3ff16881f46d60f7,3
+np.float64,0xbfea2e3a17745c74,0xbfeea95c7190fd42,3
+np.float64,0xbfd60a7c37ac14f8,0xbfd6806ed642de35,3
+np.float64,0xbfe544b9726a8973,0xbfe743ac399d81d7,3
+np.float64,0xbfd13520faa26a42,0xbfd16c02034a8fe0,3
+np.float64,0xbfea9ea59ff53d4b,0xbfef70538ee12e00,3
+np.float64,0x3fd66633f8accc68,0x3fd6e23c13ab0e9e,3
+np.float64,0xbfe4071bd3e80e38,0xbfe5a3c9ba897d81,3
+np.float64,0xbfbe1659fa3c2cb0,0xbfbe2831d4fed196,3
+np.float64,0xbfd3312777a6624e,0xbfd37df09b9baeba,3
+np.float64,0x3fd13997caa27330,0x3fd170a4900c8907,3
+np.float64,0xbfe7cbc235ef9784,0xbfead4c4d6cbf129,3
+np.float64,0xbfe1456571628acb,0xbfe23e4ec768c8e2,3
+np.float64,0xbfedf1a044fbe340,0xbff35da96773e176,3
+np.float64,0x3fce38b1553c7160,0x3fce8270709774f9,3
+np.float64,0xbfecb01761f9602f,0xbff1c9e9d382f1f8,3
+np.float64,0xbfe0a03560e1406b,0xbfe17b8d5a1ca662,3
+np.float64,0x3fe50f37cbea1e70,0x3fe6fc55e1ae7da6,3
+np.float64,0xbfe12d64a0625aca,0xbfe221d3a7834e43,3
+np.float64,0xbf6fb288403f6500,0xbf6fb28d6f389db6,3
+np.float64,0x3fda831765b50630,0x3fdb55eecae58ca9,3
+np.float64,0x3fe1a0fe4c6341fc,0x3fe2ab9564304425,3
+np.float64,0xbfef2678a77e4cf1,0xbff56ff42b2797bb,3
+np.float64,0xbfab269c1c364d40,0xbfab29df1cd48779,3
+np.float64,0x3fe8ec82a271d906,0x3fec92567d7a6675,3
+np.float64,0xbfc235115f246a24,0xbfc244ee567682ea,3
+np.float64,0x3feef5bf8d7deb80,0x3ff50ad4875ee9bd,3
+np.float64,0x3fe768b5486ed16a,0x3fea421356160e65,3
+np.float64,0xbfd4255684a84aae,0xbfd47e8baf7ec7f6,3
+np.float64,0x3fc7f67f2b2fed00,0x3fc81ae83cf92dd5,3
+np.float64,0x3fe9b1b19a736364,0x3fedd4b0e24ee741,3
+np.float64,0x3fb27eb9e624fd70,0x3fb282dacd89ce28,3
+np.float64,0xbfd490b710a9216e,0xbfd4efcdeb213458,3
+np.float64,0xbfd1347b2ca268f6,0xbfd16b55dece2d38,3
+np.float64,0x3fc6a5668d2d4ad0,0x3fc6c41452c0c087,3
+np.float64,0xbfca7b209f34f640,0xbfcaac710486f6bd,3
+np.float64,0x3fc23a1a47247438,0x3fc24a047fd4c27a,3
+np.float64,0x3fdb1413a8b62828,0x3fdbf595e2d994bc,3
+np.float64,0xbfea69b396f4d367,0xbfef11bdd2b0709a,3
+np.float64,0x3fd14c9958a29934,0x3fd1846161b10422,3
+np.float64,0xbfe205f44be40be8,0xbfe325283aa3c6a8,3
+np.float64,0x3fecd03c9ef9a07a,0x3ff1ee85aaf52a01,3
+np.float64,0x3fe34281d7e68504,0x3fe4aab63e6de816,3
+np.float64,0xbfe120e2376241c4,0xbfe213023ab03939,3
+np.float64,0xbfe951edc4f2a3dc,0xbfed3615e38576f8,3
+np.float64,0x3fe5a2286f6b4450,0x3fe7c196e0ec10ed,3
+np.float64,0xbfed7a3e1f7af47c,0xbff2bcc0793555d2,3
+np.float64,0x3fe050274960a04e,0x3fe11e2e256ea5cc,3
+np.float64,0xbfcfa71f653f4e40,0xbfcffc11483d6a06,3
+np.float64,0x3f6ead2e403d5a00,0x3f6ead32f314c052,3
+np.float64,0x3fe3a2a026674540,0x3fe523bfe085f6ec,3
+np.float64,0xbfe294a62e65294c,0xbfe3d31ebd0b4ca2,3
+np.float64,0xbfb4894d06291298,0xbfb48ef4b8e256b8,3
+np.float64,0xbfc0c042c1218084,0xbfc0cc98ac2767c4,3
+np.float64,0xbfc6a32cb52d4658,0xbfc6c1d1597ed06b,3
+np.float64,0xbfd30f7777a61eee,0xbfd35aa39fee34eb,3
+np.float64,0x3fe7fc2c2eeff858,0x3feb1d8a558b5537,3
+np.float64,0x7fefffffffffffff,0x7ff8000000000000,3
+np.float64,0xbfdadf917bb5bf22,0xbfdbbbae9a9f67a0,3
+np.float64,0xbfcf0395e13e072c,0xbfcf5366015f7362,3
+np.float64,0xbfe8644c9170c899,0xbfebbc98e74a227d,3
+np.float64,0x3fc3b2d8e52765b0,0x3fc3c6f7d44cffaa,3
+np.float64,0x3fc57407b92ae810,0x3fc58e12ccdd47a1,3
+np.float64,0x3fd56a560daad4ac,0x3fd5d62b8dfcc058,3
+np.float64,0x3fd595deefab2bbc,0x3fd6046420b2f79b,3
+np.float64,0xbfd5360f50aa6c1e,0xbfd59ebaacd815b8,3
+np.float64,0x3fdfb6aababf6d54,0x3fe0970b8aac9f61,3
+np.float64,0x3ff0000000000000,0x3ff921fb54442d18,3
+np.float64,0xbfeb3a8958f67513,0xbff04872e8278c79,3
+np.float64,0x3f9e1ea6683c3d40,0x3f9e1fc326186705,3
+np.float64,0x3fe6b6d5986d6dac,0x3fe94175bd60b19d,3
+np.float64,0xbfee4d90b77c9b21,0xbff3e60e9134edc2,3
+np.float64,0x3fd806ce0cb00d9c,0x3fd8a14c4855a8f5,3
+np.float64,0x3fd54acc75aa9598,0x3fd5b4b72fcbb5df,3
+np.float64,0xbfe59761f16b2ec4,0xbfe7b2fa5d0244ac,3
+np.float64,0xbfcd4fa3513a9f48,0xbfcd92d0814a5383,3
+np.float64,0xbfdc827523b904ea,0xbfdd8c577b53053c,3
+np.float64,0xbfd4bb7f34a976fe,0xbfd51d00d9a99360,3
+np.float64,0xbfe818bc87f03179,0xbfeb48d1ea0199c5,3
+np.float64,0xbfa8a2e15c3145c0,0xbfa8a5510ba0e45c,3
+np.float64,0xbfb6d15f422da2c0,0xbfb6d922689da015,3
+np.float64,0x3fcd04eaab3a09d8,0x3fcd46131746ef08,3
+np.float64,0x3fcfb5cfbb3f6ba0,0x3fd0059d308237f3,3
+np.float64,0x3fe8dcf609f1b9ec,0x3fec7997973010b6,3
+np.float64,0xbfdf1834d7be306a,0xbfe03c1d4e2b48f0,3
+np.float64,0x3fee82ae50fd055c,0x3ff43b545066fe1a,3
+np.float64,0xbfde039c08bc0738,0xbfdf3d6ed4d2ee5c,3
+np.float64,0x3fec07389bf80e72,0x3ff1137ed0acd161,3
+np.float64,0xbfef44c010fe8980,0xbff5b488ad22a4c5,3
+np.float64,0x3f76e722e02dce00,0x3f76e72ab2759d88,3
+np.float64,0xbfcaa9e6053553cc,0xbfcadc41125fca93,3
+np.float64,0x3fed6088147ac110,0x3ff29c06c4ef35fc,3
+np.float64,0x3fd32bd836a657b0,0x3fd3785fdb75909f,3
+np.float64,0xbfeedbb1d97db764,0xbff4d87f6c82a93c,3
+np.float64,0xbfe40f31d5e81e64,0xbfe5ae292cf258a2,3
+np.float64,0x7ff8000000000000,0x7ff8000000000000,3
+np.float64,0xbfeb2b25bc76564c,0xbff039d81388550c,3
+np.float64,0x3fec5008fa78a012,0x3ff1604195801da3,3
+np.float64,0x3fce2d4f293c5aa0,0x3fce76b99c2db4da,3
+np.float64,0xbfdc435412b886a8,0xbfdd45e7b7813f1e,3
+np.float64,0x3fdf2c9d06be593c,0x3fe047cb03c141b6,3
+np.float64,0x3fddefc61ebbdf8c,0x3fdf26fb8fad9fae,3
+np.float64,0x3fab50218436a040,0x3fab537395eaf3bb,3
+np.float64,0xbfd5b95a8fab72b6,0xbfd62a191a59343a,3
+np.float64,0x3fdbf803b4b7f008,0x3fdcf211578e98c3,3
+np.float64,0xbfec8c255979184b,0xbff1a1bee108ed30,3
+np.float64,0x3fe33cdaffe679b6,0x3fe4a3a318cd994f,3
+np.float64,0x3fd8cf585cb19eb0,0x3fd97a408bf3c38c,3
+np.float64,0x3fe919dde07233bc,0x3fecdb0ea13a2455,3
+np.float64,0xbfd5ba35e4ab746c,0xbfd62b024805542d,3
+np.float64,0x3fd2f933e7a5f268,0x3fd343527565e97c,3
+np.float64,0xbfe5b9f8ddeb73f2,0xbfe7e1f772c3e438,3
+np.float64,0x3fe843cd92f0879c,0x3feb8a92d68eae3e,3
+np.float64,0xbfd096b234a12d64,0xbfd0c7beca2c6605,3
+np.float64,0xbfef3363da7e66c8,0xbff58c98dde6c27c,3
+np.float64,0x3fd51b01ddaa3604,0x3fd582109d89ead1,3
+np.float64,0x3fea0f10ff741e22,0x3fee736c2d2a2067,3
+np.float64,0x3fc276e7b724edd0,0x3fc28774520bc6d4,3
+np.float64,0xbfef9abc9f7f3579,0xbff69d49762b1889,3
+np.float64,0x3fe1539ec0e2a73e,0x3fe24f370b7687d0,3
+np.float64,0x3fad72350c3ae460,0x3fad765e7766682a,3
+np.float64,0x3fa289a47c251340,0x3fa28aae12f41646,3
+np.float64,0xbfe5c488e5eb8912,0xbfe7f05d7e7dcddb,3
+np.float64,0xbfc22ef1d7245de4,0xbfc23ebeb990a1b8,3
+np.float64,0x3fe59a0b80eb3418,0x3fe7b695fdcba1de,3
+np.float64,0xbfe9cad619f395ac,0xbfedff0514d91e2c,3
+np.float64,0x3fc8bc74eb3178e8,0x3fc8e48cb22da666,3
+np.float64,0xbfc5389a3f2a7134,0xbfc551cd6febc544,3
+np.float64,0x3fce82feb33d0600,0x3fceceecce2467ef,3
+np.float64,0x3fda346791b468d0,0x3fdaff95154a4ca6,3
+np.float64,0x3fd04501fea08a04,0x3fd073397b32607e,3
+np.float64,0xbfb6be498a2d7c90,0xbfb6c5f93aeb0e57,3
+np.float64,0x3fe1f030dd63e062,0x3fe30ad8fb97cce0,3
+np.float64,0xbfee3fb36dfc7f67,0xbff3d0a5e380b86f,3
+np.float64,0xbfa876773c30ecf0,0xbfa878d9d3df6a3f,3
+np.float64,0x3fdb58296eb6b054,0x3fdc40ceffb17f82,3
+np.float64,0xbfea16b5d8742d6c,0xbfee809b99fd6adc,3
+np.float64,0xbfdc5062b6b8a0c6,0xbfdd547623275fdb,3
+np.float64,0x3fef6db242fedb64,0x3ff61ab4cdaef467,3
+np.float64,0xbfc9f778f933eef0,0xbfca25eef1088167,3
+np.float64,0xbfd22063eba440c8,0xbfd260c8766c69cf,3
+np.float64,0x3fdd2379f2ba46f4,0x3fde40b025cb1ffa,3
+np.float64,0xbfea967af2f52cf6,0xbfef61a178774636,3
+np.float64,0x3fe4f5b49fe9eb6a,0x3fe6da8311a5520e,3
+np.float64,0x3feccde17b799bc2,0x3ff1ebd0ea228b71,3
+np.float64,0x3fe1bb76506376ec,0x3fe2cb56fca01840,3
+np.float64,0xbfef94e583ff29cb,0xbff68aeab8ba75a2,3
+np.float64,0x3fed024a55fa0494,0x3ff228ea5d456e9d,3
+np.float64,0xbfe877b2a8f0ef65,0xbfebdaa1a4712459,3
+np.float64,0x3fef687a8d7ed0f6,0x3ff60cf5fef8d448,3
+np.float64,0xbfeeb2dc8afd65b9,0xbff48dda6a906cd6,3
+np.float64,0x3fdb2e28aeb65c50,0x3fdc12620655eb7a,3
+np.float64,0x3fedc1863afb830c,0x3ff31ae823315e83,3
+np.float64,0xbfe6b1bb546d6376,0xbfe93a38163e3a59,3
+np.float64,0x3fe479c78468f390,0x3fe637e5c0fc5730,3
+np.float64,0x3fbad1fade35a3f0,0x3fbade9a43ca05cf,3
+np.float64,0xbfe2d1c563e5a38b,0xbfe41e712785900c,3
+np.float64,0xbfc08c33ed211868,0xbfc09817a752d500,3
+np.float64,0xbfecce0935f99c12,0xbff1ebfe84524037,3
+np.float64,0x3fce4ef0e73c9de0,0x3fce995638a3dc48,3
+np.float64,0xbfd2fb2343a5f646,0xbfd345592517ca18,3
+np.float64,0x3fd848f7cdb091f0,0x3fd8e8bee5f7b49a,3
+np.float64,0x3fe532b7d2ea6570,0x3fe72b9ac747926a,3
+np.float64,0x3fd616aadcac2d54,0x3fd68d692c5cad42,3
+np.float64,0x3fd7720eb3aee41c,0x3fd801206a0e1e43,3
+np.float64,0x3fee835a35fd06b4,0x3ff43c7175eb7a54,3
+np.float64,0xbfe2e8f70b65d1ee,0xbfe43b2800a947a7,3
+np.float64,0xbfed38f45d7a71e9,0xbff26acd6bde7174,3
+np.float64,0xbfc0c62661218c4c,0xbfc0d28964d66120,3
+np.float64,0x3fe97940bef2f282,0x3fed76b986a74ee3,3
+np.float64,0x3fc96f7dc532def8,0x3fc99b20044c8fcf,3
+np.float64,0xbfd60201eeac0404,0xbfd677675efaaedc,3
+np.float64,0x3fe63c0867ec7810,0x3fe894f060200140,3
+np.float64,0xbfef6144b37ec289,0xbff5fa589a515ba8,3
+np.float64,0xbfde2da0c8bc5b42,0xbfdf6d0b59e3232a,3
+np.float64,0xbfd7401612ae802c,0xbfd7cb74ddd413b9,3
+np.float64,0x3fe41c012de83802,0x3fe5be9d87da3f82,3
+np.float64,0x3fdf501609bea02c,0x3fe05c1d96a2270b,3
+np.float64,0x3fcf9fa1233f3f40,0x3fcff45598e72f07,3
+np.float64,0x3fd4e3895ea9c714,0x3fd547580d8392a2,3
+np.float64,0x3fe1e8ff5fe3d1fe,0x3fe3022a0b86a2ab,3
+np.float64,0xbfe0aa55956154ab,0xbfe18768823da589,3
+np.float64,0x3fb2a0aa26254150,0x3fb2a4e1faff1c93,3
+np.float64,0x3fd3823417a70468,0x3fd3d2f808dbb167,3
+np.float64,0xbfaed323643da640,0xbfaed7e9bef69811,3
+np.float64,0x3fe661e8c4ecc3d2,0x3fe8c9c535f43c16,3
+np.float64,0xbfa429777c2852f0,0xbfa42acd38ba02a6,3
+np.float64,0x3fb5993ea22b3280,0x3fb59fd353e47397,3
+np.float64,0x3fee62d21efcc5a4,0x3ff40788f9278ade,3
+np.float64,0xbf813fb810227f80,0xbf813fc56d8f3c53,3
+np.float64,0x3fd56205deaac40c,0x3fd5cd59671ef193,3
+np.float64,0x3fd31a4de5a6349c,0x3fd365fe401b66e8,3
+np.float64,0xbfec7cc7a478f98f,0xbff190cf69703ca4,3
+np.float64,0xbf755881a02ab100,0xbf755887f52e7794,3
+np.float64,0x3fdd1c92e6ba3924,0x3fde38efb4e8605c,3
+np.float64,0x3fdf49da80be93b4,0x3fe0588af8dd4a34,3
+np.float64,0x3fe1fcdbf2e3f9b8,0x3fe31a27b9d273f2,3
+np.float64,0x3fe2a0f18be541e4,0x3fe3e23b159ce20f,3
+np.float64,0xbfed0f1561fa1e2b,0xbff23820fc0a54ca,3
+np.float64,0x3fe34a006c669400,0x3fe4b419b9ed2b83,3
+np.float64,0xbfd51be430aa37c8,0xbfd583005a4d62e7,3
+np.float64,0x3fe5ec4e336bd89c,0x3fe826caad6b0f65,3
+np.float64,0xbfdad71b1fb5ae36,0xbfdbb25bef8b53d8,3
+np.float64,0xbfe8eac2d871d586,0xbfec8f8cac7952f9,3
+np.float64,0xbfe1d5aef663ab5e,0xbfe2eae14b7ccdfd,3
+np.float64,0x3fec11d3157823a6,0x3ff11e8279506753,3
+np.float64,0xbfe67ff1166cffe2,0xbfe8f3e61c1dfd32,3
+np.float64,0xbfd101eecda203de,0xbfd136e0e9557022,3
+np.float64,0x3fde6c9e5cbcd93c,0x3fdfb48ee7efe134,3
+np.float64,0x3fec3ede9c787dbe,0x3ff14dead1e5cc1c,3
+np.float64,0x3fe7a022086f4044,0x3fea93ce2980b161,3
+np.float64,0xbfc3b2b1b7276564,0xbfc3c6d02d60bb21,3
+np.float64,0x7ff0000000000000,0x7ff8000000000000,3
+np.float64,0x3fe60b5647ec16ac,0x3fe8517ef0544b40,3
+np.float64,0xbfd20ab654a4156c,0xbfd24a2f1b8e4932,3
+np.float64,0xbfe4aa1e2f69543c,0xbfe677005cbd2646,3
+np.float64,0xbfc831cc0b306398,0xbfc8574910d0b86d,3
+np.float64,0xbfc3143495262868,0xbfc3267961b79198,3
+np.float64,0x3fc14d64c1229ac8,0x3fc15afea90a319d,3
+np.float64,0x3fc0a5a207214b48,0x3fc0b1bd2f15c1b0,3
+np.float64,0xbfc0b8351521706c,0xbfc0c4792672d6db,3
+np.float64,0xbfdc383600b8706c,0xbfdd398429e163bd,3
+np.float64,0x3fd9e17321b3c2e8,0x3fdaa4c4d140a622,3
+np.float64,0xbfd44f079ea89e10,0xbfd4aa7d6deff4ab,3
+np.float64,0xbfc3de52a927bca4,0xbfc3f2f8f65f4c3f,3
+np.float64,0x3fe7779d566eef3a,0x3fea57f8592dbaad,3
+np.float64,0xbfe309039e661207,0xbfe462f47f9a64e5,3
+np.float64,0x3fd8e06d08b1c0dc,0x3fd98cc946e440a6,3
+np.float64,0x3fdde66c9ebbccd8,0x3fdf1c68009a8dc1,3
+np.float64,0x3fd4369c6ba86d38,0x3fd490bf460a69e4,3
+np.float64,0xbfe132252fe2644a,0xbfe22775e109cc2e,3
+np.float64,0x3fee15483c7c2a90,0x3ff39111de89036f,3
+np.float64,0xbfc1d5ee8123abdc,0xbfc1e4d66c6871a5,3
+np.float64,0x3fc851c52b30a388,0x3fc877d93fb4ae1a,3
+np.float64,0x3fdaade707b55bd0,0x3fdb85001661fffe,3
+np.float64,0xbfe79fb7f96f3f70,0xbfea9330ec27ac10,3
+np.float64,0xbfe8b0f725f161ee,0xbfec3411c0e4517a,3
+np.float64,0xbfea79f5f374f3ec,0xbfef2e9dd9270488,3
+np.float64,0x3fe0b5fe5b616bfc,0x3fe19512a36a4534,3
+np.float64,0xbfad7c622c3af8c0,0xbfad808fea96a804,3
+np.float64,0xbfe3e24dbce7c49c,0xbfe574b4c1ea9818,3
+np.float64,0xbfe80b038af01607,0xbfeb33fec279576a,3
+np.float64,0xbfef69e2ea7ed3c6,0xbff610a5593a18bc,3
+np.float64,0x3fdcc0bb39b98178,0x3fddd1f8c9a46430,3
+np.float64,0xbfba39976a347330,0xbfba4563bb5369a4,3
+np.float64,0xbfebf9768ef7f2ed,0xbff10548ab725f74,3
+np.float64,0xbfec21c066f84381,0xbff12f2803ba052f,3
+np.float64,0xbfca216a6b3442d4,0xbfca50c5e1e5748e,3
+np.float64,0x3fd5e40da4abc81c,0x3fd65783f9a22946,3
+np.float64,0x3fc235ca17246b98,0x3fc245a8f453173f,3
+np.float64,0x3fecb5b867796b70,0x3ff1d046a0bfda69,3
+np.float64,0x3fcb457fef368b00,0x3fcb7b6daa8165a7,3
+np.float64,0xbfa5ed6f7c2bdae0,0xbfa5ef27244e2e42,3
+np.float64,0x3fecf618a1f9ec32,0x3ff21a86cc104542,3
+np.float64,0x3fe9d95413f3b2a8,0x3fee178dcafa11fc,3
+np.float64,0xbfe93a5357f274a7,0xbfed0f9a565da84a,3
+np.float64,0xbfeb9e45ff773c8c,0xbff0a93cab8e258d,3
+np.float64,0x3fcbd9d0bd37b3a0,0x3fcc134e87cae241,3
+np.float64,0x3fe55d4db76aba9c,0x3fe764a0e028475a,3
+np.float64,0xbfc8a6fc71314df8,0xbfc8ceaafbfc59a7,3
+np.float64,0x3fe0615fa660c2c0,0x3fe1323611c4cbc2,3
+np.float64,0x3fb965558632cab0,0x3fb9700b84de20ab,3
+np.float64,0x8000000000000000,0x8000000000000000,3
+np.float64,0x3fe76776c6eeceee,0x3fea40403e24a9f1,3
+np.float64,0x3fe3b7f672676fec,0x3fe53ece71a1a1b1,3
+np.float64,0xbfa9b82ba4337050,0xbfa9baf15394ca64,3
+np.float64,0xbfe31faf49663f5e,0xbfe47f31b1ca73dc,3
+np.float64,0xbfcc4c6beb3898d8,0xbfcc88c5f814b2c1,3
+np.float64,0x3fd481530aa902a8,0x3fd4df8df03bc155,3
+np.float64,0x3fd47593b8a8eb28,0x3fd4d327ab78a1a8,3
+np.float64,0x3fd70e6ccbae1cd8,0x3fd7962fe8b63d46,3
+np.float64,0x3fd25191f7a4a324,0x3fd2941623c88e02,3
+np.float64,0x3fd0603ef0a0c07c,0x3fd08f64e97588dc,3
+np.float64,0xbfc653bae52ca774,0xbfc6711e5e0d8ea9,3
+np.float64,0xbfd11db8fea23b72,0xbfd153b63c6e8812,3
+np.float64,0xbfea9bde25f537bc,0xbfef6b52268e139a,3
+np.float64,0x1,0x1,3
+np.float64,0xbfefd3806d7fa701,0xbff776dcef9583ca,3
+np.float64,0xbfe0fb8cfde1f71a,0xbfe1e6e2e774a8f8,3
+np.float64,0x3fea384534f4708a,0x3feebadaa389be0d,3
+np.float64,0x3feff761c97feec4,0x3ff866157b9d072d,3
+np.float64,0x3fe7131ccb6e263a,0x3fe9c58b4389f505,3
+np.float64,0x3fe9084f7872109e,0x3fecbed0355dbc8f,3
+np.float64,0x3f708e89e0211d00,0x3f708e8cd4946b9e,3
+np.float64,0xbfe39185f067230c,0xbfe50e1cd178244d,3
+np.float64,0x3fd67cc1a9acf984,0x3fd6fa514784b48c,3
+np.float64,0xbfecaef005f95de0,0xbff1c89c9c3ef94a,3
+np.float64,0xbfe12eec81e25dd9,0xbfe223a4285bba9a,3
+np.float64,0x3fbe7f9faa3cff40,0x3fbe92363525068d,3
+np.float64,0xbfe1950b2b632a16,0xbfe29d45fc1e4ce9,3
+np.float64,0x3fe45049e6e8a094,0x3fe6020de759e383,3
+np.float64,0x3fe4d10c8969a21a,0x3fe6aa1fe42cbeb9,3
+np.float64,0xbfe9d04658f3a08d,0xbfee08370a0dbf0c,3
+np.float64,0x3fe14fb314e29f66,0x3fe24a8d73663521,3
+np.float64,0xbfef4abfe4fe9580,0xbff5c2c1ff1250ca,3
+np.float64,0xbfe6162b366c2c56,0xbfe86073ac3c6243,3
+np.float64,0x3feffe781e7ffcf0,0x3ff8d2cbedd6a1b5,3
+np.float64,0xbff0000000000000,0xbff921fb54442d18,3
+np.float64,0x3fc1dc45ad23b888,0x3fc1eb3d9bddda58,3
+np.float64,0xbfe793f6fcef27ee,0xbfea81c93d65aa64,3
+np.float64,0x3fdef6d2bbbdeda4,0x3fe029079d42efb5,3
+np.float64,0xbfdf0ac479be1588,0xbfe0346dbc95963f,3
+np.float64,0xbfd33927d7a67250,0xbfd38653f90a5b73,3
+np.float64,0xbfe248b072e49161,0xbfe37631ef6572e1,3
+np.float64,0xbfc8ceb6af319d6c,0xbfc8f7288657f471,3
+np.float64,0x3fdd7277fcbae4f0,0x3fde99886e6766ef,3
+np.float64,0xbfe0d30c6561a619,0xbfe1b72f90bf53d6,3
+np.float64,0xbfcb0fe07d361fc0,0xbfcb448e2eae9542,3
+np.float64,0xbfe351f57fe6a3eb,0xbfe4be13eef250f2,3
+np.float64,0x3fe85ec02cf0bd80,0x3febb407e2e52e4c,3
+np.float64,0x3fc8bc59b53178b0,0x3fc8e470f65800ec,3
+np.float64,0xbfd278d447a4f1a8,0xbfd2bd133c9c0620,3
+np.float64,0x3feda5cfd87b4ba0,0x3ff2f5ab4324f43f,3
+np.float64,0xbfd2b32a36a56654,0xbfd2fa09c36afd34,3
+np.float64,0xbfed4a81cb7a9504,0xbff28077a4f4fff4,3
+np.float64,0x3fdf079bf9be0f38,0x3fe0329f7fb13f54,3
+np.float64,0x3fd14097f6a28130,0x3fd177e9834ec23f,3
+np.float64,0xbfaeab11843d5620,0xbfaeafc5531eb6b5,3
+np.float64,0xbfac3f8c14387f20,0xbfac433893d53360,3
+np.float64,0xbfc139d7ed2273b0,0xbfc14743adbbe660,3
+np.float64,0x3fe78cb02cef1960,0x3fea7707f76edba9,3
+np.float64,0x3fefe16b41ffc2d6,0x3ff7bff36a7aa7b8,3
+np.float64,0x3fec5260d378a4c2,0x3ff162c588b0da38,3
+np.float64,0x3fedb146f17b628e,0x3ff304f90d3a15d1,3
+np.float64,0x3fd1fd45f7a3fa8c,0x3fd23c2dc3929e20,3
+np.float64,0x3fe0898a5ee11314,0x3fe1610c63e726eb,3
+np.float64,0x3fe7719946eee332,0x3fea4f205eecb59f,3
+np.float64,0x3fe955218972aa44,0x3fed3b530c1f7651,3
+np.float64,0x3fe0ccbf4461997e,0x3fe1afc7b4587836,3
+np.float64,0xbfe9204314f24086,0xbfece5605780e346,3
+np.float64,0xbfe552017feaa403,0xbfe755773cbd74d5,3
+np.float64,0x3fd8ce4b32b19c98,0x3fd9791c8dd44eae,3
+np.float64,0x3fef89acd9ff135a,0x3ff668f78adf7ced,3
+np.float64,0x3fc9d713ad33ae28,0x3fca04da6c293bbd,3
+np.float64,0xbfe22d9c4de45b38,0xbfe3553effadcf92,3
+np.float64,0x3fa5cda38c2b9b40,0x3fa5cf53c5787482,3
+np.float64,0x3fa878ebdc30f1e0,0x3fa87b4f2bf1d4c3,3
+np.float64,0x3fe8030353700606,0x3feb27e196928789,3
+np.float64,0x3fb50607222a0c10,0x3fb50c188ce391e6,3
+np.float64,0x3fd9ba4ab4b37494,0x3fda79fa8bd40f45,3
+np.float64,0x3fb564598e2ac8b0,0x3fb56abe42d1ba13,3
+np.float64,0xbfd1177c83a22efa,0xbfd14d3d7ef30cc4,3
+np.float64,0xbfd952cec7b2a59e,0xbfda09215d17c0ac,3
+np.float64,0x3fe1d8066663b00c,0x3fe2edb35770b8dd,3
+np.float64,0xbfc89427a3312850,0xbfc8bb7a7c389497,3
+np.float64,0xbfe86ebfd3f0dd80,0xbfebccc2ba0f506c,3
+np.float64,0x3fc390578b2720b0,0x3fc3a40cb7f5f728,3
+np.float64,0xbfd122f9b8a245f4,0xbfd15929dc57a897,3
+np.float64,0x3f8d0636d03a0c80,0x3f8d06767de576df,3
+np.float64,0xbfe4b55d8b696abb,0xbfe685be537a9637,3
+np.float64,0xbfdfd51cf9bfaa3a,0xbfe0a894fcff0c76,3
+np.float64,0xbfd37c1f52a6f83e,0xbfd3cc9593c37aad,3
+np.float64,0x3fd0e8283ea1d050,0x3fd11c25c800785a,3
+np.float64,0x3fd3160784a62c10,0x3fd36183a6c2880c,3
+np.float64,0x3fd4c66e57a98cdc,0x3fd5288fe3394eff,3
+np.float64,0x3fee2f7e3afc5efc,0x3ff3b8063eb30cdc,3
+np.float64,0xbfe526773a6a4cee,0xbfe71b4364215b18,3
+np.float64,0x3fea01181e740230,0x3fee5b65eccfd130,3
+np.float64,0xbfe51c03f76a3808,0xbfe70d5919d37587,3
+np.float64,0x3fd97e1375b2fc28,0x3fda3845da40b22b,3
+np.float64,0x3fd5c14a14ab8294,0x3fd632890d07ed03,3
+np.float64,0xbfec9b474279368e,0xbff1b28f50584fe3,3
+np.float64,0x3fe0139ca860273a,0x3fe0d7fc377f001c,3
+np.float64,0x3fdb080c9db61018,0x3fdbe85056358fa0,3
+np.float64,0xbfdd72ceb1bae59e,0xbfde99ea171661eb,3
+np.float64,0xbfe64e934fec9d26,0xbfe8aec2ef24be63,3
+np.float64,0x3fd1036a93a206d4,0x3fd1386adabe01bd,3
+np.float64,0x3febc9d4a5f793aa,0x3ff0d4c069f1e67d,3
+np.float64,0xbfe547a16fea8f43,0xbfe747902fe6fb4d,3
+np.float64,0x3fc289b0f9251360,0x3fc29a709de6bdd9,3
+np.float64,0xbfe694494a6d2892,0xbfe9108f3dc133e2,3
+np.float64,0x3fd827dfe4b04fc0,0x3fd8c4fe40532b91,3
+np.float64,0xbfe8b89418f17128,0xbfec400c5a334b2e,3
+np.float64,0x3fed5605147aac0a,0x3ff28ed1f612814a,3
+np.float64,0xbfed36af31fa6d5e,0xbff26804e1f71af0,3
+np.float64,0x3fdbb01c02b76038,0x3fdca2381558bbf0,3
+np.float64,0x3fe2a951666552a2,0x3fe3ec88f780f9e6,3
+np.float64,0x3fe662defbecc5be,0x3fe8cb1dbfca98ab,3
+np.float64,0x3fd098b1b3a13164,0x3fd0c9d064e4eaf2,3
+np.float64,0x3fefa10edeff421e,0x3ff6b1c6187b18a8,3
+np.float64,0xbfec4feb7a789fd7,0xbff16021ef37a219,3
+np.float64,0x3fd8e415bbb1c82c,0x3fd990c1f8b786bd,3
+np.float64,0xbfead5a09275ab41,0xbfefd44fab5b4f6e,3
+np.float64,0xbfe8666c16f0ccd8,0xbfebbfe0c9f2a9ae,3
+np.float64,0x3fdc962132b92c44,0x3fdda2525a6f406c,3
+np.float64,0xbfe2037f03e406fe,0xbfe3222ec2a3449e,3
+np.float64,0xbfec82c27e790585,0xbff197626ea9df1e,3
+np.float64,0x3fd2b4e03ca569c0,0x3fd2fbd3c7fda23e,3
+np.float64,0xbfe9b0dee5f361be,0xbfedd34f6d3dfe8a,3
+np.float64,0x3feef45cd17de8ba,0x3ff508180687b591,3
+np.float64,0x3f82c39bf0258700,0x3f82c3ad24c3b3f1,3
+np.float64,0xbfca848cfd350918,0xbfcab612ce258546,3
+np.float64,0x3fd6442aaaac8854,0x3fd6bdea54016e48,3
+np.float64,0x3fe550799e6aa0f4,0x3fe75369c9ea5b1e,3
+np.float64,0xbfe0e9d5a361d3ac,0xbfe1d20011139d89,3
+np.float64,0x3fbfc9ff1e3f9400,0x3fbfdf0ea6885c80,3
+np.float64,0xbfa187e8b4230fd0,0xbfa188c95072092e,3
+np.float64,0x3fcd28c9533a5190,0x3fcd6ae879c21b47,3
+np.float64,0x3fc6227ec52c4500,0x3fc63f1fbb441d29,3
+np.float64,0x3fe9b7a2ed736f46,0x3feddeab49b2d176,3
+np.float64,0x3fd4aee93da95dd4,0x3fd50fb3b71e0339,3
+np.float64,0xbfe164dacf62c9b6,0xbfe263bb2f7dd5d9,3
+np.float64,0x3fec62e525f8c5ca,0x3ff17496416d9921,3
+np.float64,0x3fdd363ee0ba6c7c,0x3fde55c6a49a5f86,3
+np.float64,0x3fe65cbf75ecb97e,0x3fe8c28d31ff3ebd,3
+np.float64,0xbfe76d27ca6eda50,0xbfea4899e3661425,3
+np.float64,0xbfc305738d260ae8,0xbfc3178dcfc9d30f,3
+np.float64,0xbfd3aa2a54a75454,0xbfd3fcf1e1ce8328,3
+np.float64,0x3fd1609fc9a2c140,0x3fd1992efa539b9f,3
+np.float64,0xbfac1291bc382520,0xbfac162cc7334b4d,3
+np.float64,0xbfedb461ea7b68c4,0xbff309247850455d,3
+np.float64,0xbfe8d2adf8f1a55c,0xbfec6947be90ba92,3
+np.float64,0xbfd7128965ae2512,0xbfd79a9855bcfc5a,3
+np.float64,0x3fe8deb09471bd62,0x3fec7c56b3aee531,3
+np.float64,0xbfe5f4d329ebe9a6,0xbfe8327ea8189af8,3
+np.float64,0xbfd3b46ac9a768d6,0xbfd407b80b12ff17,3
+np.float64,0x3fec899d7cf9133a,0x3ff19ef26baca36f,3
+np.float64,0xbfec192fd5783260,0xbff126306e507fd0,3
+np.float64,0x3fe945bdaef28b7c,0x3fed222f787310bf,3
+np.float64,0xbfeff9635d7ff2c7,0xbff87d6773f318eb,3
+np.float64,0xbfd604b81cac0970,0xbfd67a4aa852559a,3
+np.float64,0x3fcd1cc9d53a3990,0x3fcd5e962e237c24,3
+np.float64,0xbfed77b0fffaef62,0xbff2b97a1c9b6483,3
+np.float64,0xbfc9c69325338d28,0xbfc9f401500402fb,3
+np.float64,0xbfdf97e246bf2fc4,0xbfe0855601ea9db3,3
+np.float64,0x3fc7e6304f2fcc60,0x3fc80a4e718504cd,3
+np.float64,0x3fec3b599e7876b4,0x3ff14a2d1b9c68e6,3
+np.float64,0xbfe98618e1f30c32,0xbfed8bfbb31c394a,3
+np.float64,0xbfe59b3c0feb3678,0xbfe7b832d6df81de,3
+np.float64,0xbfe54ce2fe6a99c6,0xbfe74e9a85be4116,3
+np.float64,0x3fc9db49cb33b690,0x3fca092737ef500a,3
+np.float64,0xbfb4a922ae295248,0xbfb4aee4e39078a9,3
+np.float64,0xbfd0e542e0a1ca86,0xbfd11925208d66af,3
+np.float64,0x3fd70543f2ae0a88,0x3fd78c5e9238a3ee,3
+np.float64,0x3fd67f7a7facfef4,0x3fd6fd3998df8545,3
+np.float64,0xbfe40b643d6816c8,0xbfe5a947e427f298,3
+np.float64,0xbfcd85f69b3b0bec,0xbfcdcaa24b75f1a3,3
+np.float64,0x3fec705fb4f8e0c0,0x3ff1833c82163ee2,3
+np.float64,0x3fb37650ea26eca0,0x3fb37b20c16fb717,3
+np.float64,0x3fe5ebfa55ebd7f4,0x3fe826578d716e70,3
+np.float64,0x3fe991dfe5f323c0,0x3fed9f8a4bf1f588,3
+np.float64,0xbfd658bd0aacb17a,0xbfd6d3dd06e54900,3
+np.float64,0xbfc24860252490c0,0xbfc258701a0b9290,3
+np.float64,0xbfefb8d763ff71af,0xbff705b6ea4a569d,3
+np.float64,0x3fb8fcb4ae31f970,0x3fb906e809e7899f,3
+np.float64,0x3fce6343cb3cc688,0x3fceae41d1629625,3
+np.float64,0xbfd43d5a11a87ab4,0xbfd497da25687e07,3
+np.float64,0xbfe9568851f2ad11,0xbfed3d9e5fe83a76,3
+np.float64,0x3fe1b66153e36cc2,0x3fe2c53c7e016271,3
+np.float64,0x3fef27452bfe4e8a,0x3ff571b3486ed416,3
+np.float64,0x3fca87c0a7350f80,0x3fcab958a7bb82d4,3
+np.float64,0xbfd8776a8fb0eed6,0xbfd91afaf2f50edf,3
+np.float64,0x3fe9522a76f2a454,0x3fed3679264e1525,3
+np.float64,0x3fea14ff2cf429fe,0x3fee7da6431cc316,3
+np.float64,0x3fe970618bf2e0c4,0x3fed68154d54dd97,3
+np.float64,0x3fd3410cfca68218,0x3fd38e9b21792240,3
+np.float64,0xbf6a8070c0350100,0xbf6a8073c7c34517,3
+np.float64,0xbfbe449de23c8938,0xbfbe56c8e5e4d98b,3
+np.float64,0x3fedbc92e27b7926,0x3ff314313216d8e6,3
+np.float64,0xbfe3be4706677c8e,0xbfe546d3ceb85aea,3
+np.float64,0x3fe30cd6d76619ae,0x3fe467b6f2664a8d,3
+np.float64,0x3fd7d69b21afad38,0x3fd86d54284d05ad,3
+np.float64,0xbfe501001fea0200,0xbfe6e978afcff4d9,3
+np.float64,0xbfe44ba3d8e89748,0xbfe5fc0a31cd1e3e,3
+np.float64,0x3fec52f7c078a5f0,0x3ff16367acb209b2,3
+np.float64,0xbfcb19efcb3633e0,0xbfcb4ed9235a7d47,3
+np.float64,0xbfab86796c370cf0,0xbfab89df7bf15710,3
+np.float64,0xbfb962feda32c600,0xbfb96db1e1679c98,3
+np.float64,0x3fe0dd14e861ba2a,0x3fe1c2fc72810567,3
+np.float64,0x3fe41bcc6de83798,0x3fe5be59b7f9003b,3
+np.float64,0x3fc82f4c4f305e98,0x3fc854bd9798939f,3
+np.float64,0xbfcd143a613a2874,0xbfcd55cbd1619d84,3
+np.float64,0xbfd52da61baa5b4c,0xbfd595d0b3543439,3
+np.float64,0xbfb71b4a8e2e3698,0xbfb7235a4ab8432f,3
+np.float64,0xbfec141a19782834,0xbff120e1e39fc856,3
+np.float64,0xbfdba9319db75264,0xbfdc9a8ca2578bb2,3
+np.float64,0xbfbce5d74639cbb0,0xbfbcf5a4878cfa51,3
+np.float64,0x3fde67f7b3bccff0,0x3fdfaf45a9f843ad,3
+np.float64,0xbfe12d87bc625b10,0xbfe221fd4476eb71,3
+np.float64,0x3fe35b8f6be6b71e,0x3fe4ca20f65179e1,3
+np.float64,0xbfdbada1d3b75b44,0xbfdc9f78b19f93d1,3
+np.float64,0xbfc60159c52c02b4,0xbfc61d79b879f598,3
+np.float64,0x3fd6b81c38ad7038,0x3fd739c27bfa16d8,3
+np.float64,0xbfd646a253ac8d44,0xbfd6c08c19612bbb,3
+np.float64,0xbfe6babef0ed757e,0xbfe94703d0bfa311,3
+np.float64,0xbfed5671f1faace4,0xbff28f5a3f3683d0,3
+np.float64,0x3fc01d1e85203a40,0x3fc02817ec0dfd38,3
+np.float64,0xbfe9188a61f23115,0xbfecd8eb5da84223,3
+np.float64,0x3fdca3bab9b94774,0x3fddb1868660c239,3
+np.float64,0xbfa255750c24aaf0,0xbfa25675f7b36343,3
+np.float64,0x3fb3602db626c060,0x3fb364ed2d5b2876,3
+np.float64,0xbfd30a14bda6142a,0xbfd354ff703b8862,3
+np.float64,0xbfe1cfe381639fc7,0xbfe2e3e720b968c8,3
+np.float64,0xbfd2af6a4fa55ed4,0xbfd2f61e190bcd1f,3
+np.float64,0xbfe93c50937278a1,0xbfed12d64bb10d73,3
+np.float64,0x3fddd8bc44bbb178,0x3fdf0ced7f9005cc,3
+np.float64,0x3fdb2bc73cb65790,0x3fdc0fc0e18e425e,3
+np.float64,0xbfd073f6aba0e7ee,0xbfd0a3cb5468a961,3
+np.float64,0x3fed4bad7b7a975a,0x3ff281ebeb75e414,3
+np.float64,0xbfdc75b50bb8eb6a,0xbfdd7e1a7631cb22,3
+np.float64,0x3fd458a90fa8b154,0x3fd4b4a5817248ce,3
+np.float64,0x3feead5db57d5abc,0x3ff484286fab55ff,3
+np.float64,0x3fb3894382271280,0x3fb38e217b4e7905,3
+np.float64,0xffefffffffffffff,0x7ff8000000000000,3
+np.float64,0xbfe428212ae85042,0xbfe5ce36f226bea8,3
+np.float64,0xbfc08b39f7211674,0xbfc0971b93ebc7ad,3
+np.float64,0xbfc2e7cf5525cfa0,0xbfc2f994eb72b623,3
+np.float64,0xbfdb0d85afb61b0c,0xbfdbee5a2de3c5db,3
+np.float64,0xfff0000000000000,0x7ff8000000000000,3
+np.float64,0xbfd0d36af7a1a6d6,0xbfd106a5f05ef6ff,3
+np.float64,0xbfc333d0912667a0,0xbfc3467162b7289a,3
+np.float64,0x3fcdababc53b5758,0x3fcdf16458c20fa8,3
+np.float64,0x3fd0821b38a10438,0x3fd0b26e3e0b9185,3
+np.float64,0x0,0x0,3
+np.float64,0x3feb7f70edf6fee2,0x3ff08ae81854bf20,3
+np.float64,0x3fe6e075716dc0ea,0x3fe97cc5254be6ff,3
+np.float64,0x3fea13b682f4276e,0x3fee7b6f18073b5b,3
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arcsinh.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arcsinh.csv
new file mode 100644
index 00000000..1da29c82
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arcsinh.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0xbf24142a,0xbf1a85ef,2
+np.float32,0x3e71cf91,0x3e6f9e37,2
+np.float32,0xe52a7,0xe52a7,2
+np.float32,0x3ef1e074,0x3ee9add9,2
+np.float32,0x806160ac,0x806160ac,2
+np.float32,0x7e2d59a2,0x42af4798,2
+np.float32,0xbf32cac9,0xbf26bf96,2
+np.float32,0x3f081701,0x3f026142,2
+np.float32,0x3f23cc88,0x3f1a499c,2
+np.float32,0xbf090d94,0xbf033ad0,2
+np.float32,0x803af2fc,0x803af2fc,2
+np.float32,0x807eb17e,0x807eb17e,2
+np.float32,0x5c0d8e,0x5c0d8e,2
+np.float32,0x3f7b79d2,0x3f5e6b1d,2
+np.float32,0x806feeae,0x806feeae,2
+np.float32,0x3e4b423a,0x3e49f274,2
+np.float32,0x3f49e5ac,0x3f394a41,2
+np.float32,0x3f18cd4e,0x3f10ef35,2
+np.float32,0xbed75734,0xbed17322,2
+np.float32,0x7f591151,0x42b28085,2
+np.float32,0xfefe9da6,0xc2b16f51,2
+np.float32,0xfeac90fc,0xc2b0a82a,2
+np.float32,0x805c198e,0x805c198e,2
+np.float32,0x7f66d6df,0x42b2a004,2
+np.float32,0x505438,0x505438,2
+np.float32,0xbf39a209,0xbf2c5255,2
+np.float32,0x7fa00000,0x7fe00000,2
+np.float32,0xc84cb,0xc84cb,2
+np.float32,0x7f07d6f5,0x42b19088,2
+np.float32,0x79d7e4,0x79d7e4,2
+np.float32,0xff32f6a0,0xc2b21db1,2
+np.float32,0x7c005c05,0x42a9222e,2
+np.float32,0x3ec449aa,0x3ebfc5ae,2
+np.float32,0x800ec323,0x800ec323,2
+np.float32,0xff1c904c,0xc2b1d93a,2
+np.float32,0x7f4eca52,0x42b267b0,2
+np.float32,0x3ee06540,0x3ed9c514,2
+np.float32,0x6aab4,0x6aab4,2
+np.float32,0x3e298d8c,0x3e28c99e,2
+np.float32,0xbf38d162,0xbf2ba94a,2
+np.float32,0x2d9083,0x2d9083,2
+np.float32,0x7eae5032,0x42b0ad52,2
+np.float32,0x3ead5b3c,0x3eaa3443,2
+np.float32,0x806fef66,0x806fef66,2
+np.float32,0x3f5b614e,0x3f46ca71,2
+np.float32,0xbf4c906a,0xbf3b60fc,2
+np.float32,0x8049453e,0x8049453e,2
+np.float32,0x3d305220,0x3d304432,2
+np.float32,0x2e1a89,0x2e1a89,2
+np.float32,0xbf4e74ec,0xbf3cdacf,2
+np.float32,0x807a827a,0x807a827a,2
+np.float32,0x80070745,0x80070745,2
+np.float32,0xbe1ba2fc,0xbe1b0b28,2
+np.float32,0xbe5131d0,0xbe4fc421,2
+np.float32,0x5bfd98,0x5bfd98,2
+np.float32,0xbd8e1a48,0xbd8dfd27,2
+np.float32,0x8006c160,0x8006c160,2
+np.float32,0x346490,0x346490,2
+np.float32,0xbdbdf060,0xbdbdaaf0,2
+np.float32,0x3ea9d0c4,0x3ea6d8c7,2
+np.float32,0xbf2aaa28,0xbf200916,2
+np.float32,0xbf160c26,0xbf0e9047,2
+np.float32,0x80081fd4,0x80081fd4,2
+np.float32,0x7db44283,0x42adf8b6,2
+np.float32,0xbf1983f8,0xbf118bf5,2
+np.float32,0x2c4a35,0x2c4a35,2
+np.float32,0x6165a7,0x6165a7,2
+np.float32,0xbe776b44,0xbe75129f,2
+np.float32,0xfe81841a,0xc2b0153b,2
+np.float32,0xbf7d1b2f,0xbf5f9461,2
+np.float32,0x80602d36,0x80602d36,2
+np.float32,0xfe8d5046,0xc2b041dd,2
+np.float32,0xfe5037bc,0xc2afa56d,2
+np.float32,0x4bbea6,0x4bbea6,2
+np.float32,0xfea039de,0xc2b0822d,2
+np.float32,0x7ea627a4,0x42b094c7,2
+np.float32,0x3f556198,0x3f423591,2
+np.float32,0xfedbae04,0xc2b123c1,2
+np.float32,0xbe30432c,0xbe2f6744,2
+np.float32,0x80202c77,0x80202c77,2
+np.float32,0xff335cc1,0xc2b21ed5,2
+np.float32,0x3e1e1ebe,0x3e1d7f95,2
+np.float32,0x8021c9c0,0x8021c9c0,2
+np.float32,0x7dc978,0x7dc978,2
+np.float32,0xff6cfabc,0xc2b2ad75,2
+np.float32,0x7f2bd542,0x42b208e0,2
+np.float32,0x53bf33,0x53bf33,2
+np.float32,0x804e04bb,0x804e04bb,2
+np.float32,0x3f30d2f9,0x3f2521ca,2
+np.float32,0x3dfde876,0x3dfd4316,2
+np.float32,0x46f8b1,0x46f8b1,2
+np.float32,0xbd5f9e20,0xbd5f81ba,2
+np.float32,0x807d6a22,0x807d6a22,2
+np.float32,0xff3881da,0xc2b22d50,2
+np.float32,0x1b1cb5,0x1b1cb5,2
+np.float32,0x3f75f2d0,0x3f5a7435,2
+np.float32,0xfee39c1a,0xc2b135e9,2
+np.float32,0x7f79f14a,0x42b2c8b9,2
+np.float32,0x8000e2d1,0x8000e2d1,2
+np.float32,0xab779,0xab779,2
+np.float32,0xbede6690,0xbed7f102,2
+np.float32,0x76e20d,0x76e20d,2
+np.float32,0x3ed714cb,0x3ed135e9,2
+np.float32,0xbeaa6f44,0xbea76f31,2
+np.float32,0x7f7dc8b1,0x42b2d089,2
+np.float32,0x108cb2,0x108cb2,2
+np.float32,0x7d37ba82,0x42ac9f94,2
+np.float32,0x3f31d068,0x3f25f221,2
+np.float32,0x8010a331,0x8010a331,2
+np.float32,0x3f2fdc7c,0x3f2456cd,2
+np.float32,0x7f7a9a67,0x42b2ca13,2
+np.float32,0x3f2acb31,0x3f202492,2
+np.float32,0x7f54fa94,0x42b276c9,2
+np.float32,0x3ebf8a70,0x3ebb553c,2
+np.float32,0x7f75b1a7,0x42b2bff2,2
+np.float32,0x7daebe07,0x42ade8cc,2
+np.float32,0xbd3a3ef0,0xbd3a2e86,2
+np.float32,0x8078ec9e,0x8078ec9e,2
+np.float32,0x3eda206a,0x3ed403ec,2
+np.float32,0x3f7248f2,0x3f57cd77,2
+np.float32,0x805d55ba,0x805d55ba,2
+np.float32,0xff30dc3e,0xc2b217a3,2
+np.float32,0xbe12b27c,0xbe123333,2
+np.float32,0xbf6ed9cf,0xbf554cd0,2
+np.float32,0xbed9eb5c,0xbed3d31c,2
+np.float32,0xbf1c9aea,0xbf14307b,2
+np.float32,0x3f540ac4,0x3f412de2,2
+np.float32,0x800333ac,0x800333ac,2
+np.float32,0x3f74cdb4,0x3f59a09a,2
+np.float32,0xbf41dc41,0xbf32ee6f,2
+np.float32,0xff2c7804,0xc2b20ac4,2
+np.float32,0x514493,0x514493,2
+np.float32,0xbddf1220,0xbddea1cf,2
+np.float32,0xfeaf74de,0xc2b0b0ab,2
+np.float32,0xfe5dfb30,0xc2afc633,2
+np.float32,0xbf4785c4,0xbf376bdb,2
+np.float32,0x80191cd3,0x80191cd3,2
+np.float32,0xfe44f708,0xc2af88fb,2
+np.float32,0x3d4cd8a0,0x3d4cc2ca,2
+np.float32,0x7f572eff,0x42b27c0f,2
+np.float32,0x8031bacb,0x8031bacb,2
+np.float32,0x7f2ea684,0x42b21133,2
+np.float32,0xbea1976a,0xbe9f05bb,2
+np.float32,0x3d677b41,0x3d675bc1,2
+np.float32,0x3f61bf24,0x3f4b9870,2
+np.float32,0x7ef55ddf,0x42b15c5f,2
+np.float32,0x3eabcb20,0x3ea8b91c,2
+np.float32,0xff73d9ec,0xc2b2bc18,2
+np.float32,0x77b9f5,0x77b9f5,2
+np.float32,0x4c6c6c,0x4c6c6c,2
+np.float32,0x7ed09c94,0x42b10949,2
+np.float32,0xdeeec,0xdeeec,2
+np.float32,0x7eac5858,0x42b0a782,2
+np.float32,0x7e190658,0x42af07bd,2
+np.float32,0xbe3c8980,0xbe3b7ce2,2
+np.float32,0x8059e86e,0x8059e86e,2
+np.float32,0xff201836,0xc2b1e4a5,2
+np.float32,0xbeac109c,0xbea8fafb,2
+np.float32,0x7edd1e2b,0x42b12718,2
+np.float32,0x639cd8,0x639cd8,2
+np.float32,0x3f5e4cae,0x3f490059,2
+np.float32,0x3d84c185,0x3d84a9c4,2
+np.float32,0xbe8c1130,0xbe8a605b,2
+np.float32,0x80000000,0x80000000,2
+np.float32,0x3f1da5e4,0x3f151404,2
+np.float32,0x7f75a873,0x42b2bfdf,2
+np.float32,0xbd873540,0xbd871c28,2
+np.float32,0xbe8e5e10,0xbe8c9808,2
+np.float32,0x7f004bf2,0x42b17347,2
+np.float32,0x800000,0x800000,2
+np.float32,0xbf6d6b79,0xbf544095,2
+np.float32,0x7ed7b563,0x42b11a6a,2
+np.float32,0x80693745,0x80693745,2
+np.float32,0x3ee0f608,0x3eda49a8,2
+np.float32,0xfe1285a4,0xc2aef181,2
+np.float32,0x72d946,0x72d946,2
+np.float32,0x6a0dca,0x6a0dca,2
+np.float32,0x3f5c9df6,0x3f47ba99,2
+np.float32,0xff002af6,0xc2b172c4,2
+np.float32,0x3f4ac98f,0x3f39fd0a,2
+np.float32,0x8066acf7,0x8066acf7,2
+np.float32,0xbcaa4e60,0xbcaa4b3c,2
+np.float32,0x80162813,0x80162813,2
+np.float32,0xff34b318,0xc2b222a2,2
+np.float32,0x7f1ce33c,0x42b1da49,2
+np.float32,0x3f0e55ab,0x3f07ddb0,2
+np.float32,0x7c75d996,0x42aa6eec,2
+np.float32,0xbf221bc6,0xbf18dc89,2
+np.float32,0x3f5a1a4c,0x3f45d1d4,2
+np.float32,0x7f2451b8,0x42b1f1fb,2
+np.float32,0x3ec55ca0,0x3ec0c655,2
+np.float32,0x3f752dc2,0x3f59e600,2
+np.float32,0xbe33f638,0xbe330c4d,2
+np.float32,0x3e2a9148,0x3e29c9d8,2
+np.float32,0x3f3362a1,0x3f273c01,2
+np.float32,0x5f83b3,0x5f83b3,2
+np.float32,0x3e362488,0x3e353216,2
+np.float32,0x140bcf,0x140bcf,2
+np.float32,0x7e3e96df,0x42af7822,2
+np.float32,0xbebc7082,0xbeb86ce6,2
+np.float32,0xbe92a92e,0xbe90b9d2,2
+np.float32,0xff3d8afc,0xc2b23b19,2
+np.float32,0x804125e3,0x804125e3,2
+np.float32,0x3f3675d1,0x3f29bedb,2
+np.float32,0xff70bb09,0xc2b2b57f,2
+np.float32,0x3f29681c,0x3f1efcd2,2
+np.float32,0xbdc70380,0xbdc6b3a8,2
+np.float32,0x54e0dd,0x54e0dd,2
+np.float32,0x3d545de0,0x3d54458c,2
+np.float32,0x7f800000,0x7f800000,2
+np.float32,0x8014a4c2,0x8014a4c2,2
+np.float32,0xbe93f58a,0xbe91f938,2
+np.float32,0x17de33,0x17de33,2
+np.float32,0xfefb679a,0xc2b168d2,2
+np.float32,0xbf23423e,0xbf19d511,2
+np.float32,0x7e893fa1,0x42b032ec,2
+np.float32,0x3f44fe2d,0x3f356bda,2
+np.float32,0xbebb2e78,0xbeb73e8f,2
+np.float32,0x3f5632e0,0x3f42d633,2
+np.float32,0x3ddd8698,0x3ddd1896,2
+np.float32,0x80164ea7,0x80164ea7,2
+np.float32,0x80087b37,0x80087b37,2
+np.float32,0xbf06ab1e,0xbf011f95,2
+np.float32,0x3db95524,0x3db9149f,2
+np.float32,0x7aa1fbb3,0x42a570a1,2
+np.float32,0xbd84fc48,0xbd84e467,2
+np.float32,0x3d65c6f5,0x3d65a826,2
+np.float32,0xfe987800,0xc2b068c4,2
+np.float32,0x7ec59532,0x42b0ed7a,2
+np.float32,0x3ea0232c,0x3e9da29a,2
+np.float32,0x80292a08,0x80292a08,2
+np.float32,0x734cfe,0x734cfe,2
+np.float32,0x3f3b6d63,0x3f2dc596,2
+np.float32,0x3f27bcc1,0x3f1d97e6,2
+np.float32,0xfe1da554,0xc2af16f9,2
+np.float32,0x7c91f5,0x7c91f5,2
+np.float32,0xfe4e78cc,0xc2afa11e,2
+np.float32,0x7e4b4e08,0x42af9933,2
+np.float32,0xfe0949ec,0xc2aed02e,2
+np.float32,0x7e2f057f,0x42af4c81,2
+np.float32,0xbf200ae0,0xbf171ce1,2
+np.float32,0x3ebcc244,0x3eb8b99e,2
+np.float32,0xbf68f58d,0xbf50f7aa,2
+np.float32,0x4420b1,0x4420b1,2
+np.float32,0x3f5b61bf,0x3f46cac7,2
+np.float32,0x3fec78,0x3fec78,2
+np.float32,0x7f4183c8,0x42b245b7,2
+np.float32,0xbf10587c,0xbf099ee2,2
+np.float32,0x0,0x0,2
+np.float32,0x7ec84dc3,0x42b0f47a,2
+np.float32,0x3f5fbd7b,0x3f4a166d,2
+np.float32,0xbd884eb8,0xbd883502,2
+np.float32,0xfe3f10a4,0xc2af7969,2
+np.float32,0xff3f4920,0xc2b23fc9,2
+np.float32,0x8013900f,0x8013900f,2
+np.float32,0x8003529d,0x8003529d,2
+np.float32,0xbf032384,0xbefbfb3c,2
+np.float32,0xff418c7c,0xc2b245ce,2
+np.float32,0xbec0aad0,0xbebc633b,2
+np.float32,0xfdbff178,0xc2ae18de,2
+np.float32,0x68ab15,0x68ab15,2
+np.float32,0xbdfc4a88,0xbdfba848,2
+np.float32,0xbf5adec6,0xbf466747,2
+np.float32,0x807d5dcc,0x807d5dcc,2
+np.float32,0x61d144,0x61d144,2
+np.float32,0x807e3a03,0x807e3a03,2
+np.float32,0x1872f2,0x1872f2,2
+np.float32,0x7f2a272c,0x42b203d8,2
+np.float32,0xfe7f8314,0xc2b00e3a,2
+np.float32,0xbe42aeac,0xbe418737,2
+np.float32,0x8024b614,0x8024b614,2
+np.float32,0xbe41b6b8,0xbe40939a,2
+np.float32,0xa765c,0xa765c,2
+np.float32,0x7ea74f4b,0x42b09853,2
+np.float32,0x7f7ef631,0x42b2d2e7,2
+np.float32,0x7eaef5e6,0x42b0af38,2
+np.float32,0xff733d85,0xc2b2bacf,2
+np.float32,0x537ac0,0x537ac0,2
+np.float32,0xbeca4790,0xbec55b1d,2
+np.float32,0x80117314,0x80117314,2
+np.float32,0xfe958536,0xc2b05ec5,2
+np.float32,0x8066ecc2,0x8066ecc2,2
+np.float32,0xbf56baf3,0xbf433e82,2
+np.float32,0x1f7fd7,0x1f7fd7,2
+np.float32,0x3e942104,0x3e9222fc,2
+np.float32,0xfeaffe82,0xc2b0b23c,2
+np.float32,0xfe0e02b0,0xc2aee17e,2
+np.float32,0xbf800000,0xbf61a1b3,2
+np.float32,0x800b7e49,0x800b7e49,2
+np.float32,0x6c514f,0x6c514f,2
+np.float32,0xff800000,0xff800000,2
+np.float32,0x7f7d9a45,0x42b2d02b,2
+np.float32,0x800c9c69,0x800c9c69,2
+np.float32,0x274b14,0x274b14,2
+np.float32,0xbf4b22b0,0xbf3a42e2,2
+np.float32,0x63e5ae,0x63e5ae,2
+np.float32,0xbe18facc,0xbe186a90,2
+np.float32,0x7e137351,0x42aef4bd,2
+np.float32,0x80518ffd,0x80518ffd,2
+np.float32,0xbf0a8ffc,0xbf048f0d,2
+np.float32,0x841d,0x841d,2
+np.float32,0x7edfdc9e,0x42b12d69,2
+np.float32,0xfd1092b0,0xc2ac24de,2
+np.float32,0x7e2c9bdf,0x42af4566,2
+np.float32,0x7f7fffff,0x42b2d4fc,2
+np.float32,0x3f4954a6,0x3f38d853,2
+np.float32,0xbe83efd2,0xbe8284c3,2
+np.float32,0x800e8e02,0x800e8e02,2
+np.float32,0x78ad39,0x78ad39,2
+np.float32,0x7eb0f967,0x42b0b514,2
+np.float32,0xbe39aa94,0xbe38a9ee,2
+np.float32,0x80194e7b,0x80194e7b,2
+np.float32,0x3cf3a340,0x3cf39a0f,2
+np.float32,0x3ed3117a,0x3ecd8173,2
+np.float32,0x7f530b11,0x42b2721c,2
+np.float32,0xff756ba2,0xc2b2bf60,2
+np.float32,0x15ea25,0x15ea25,2
+np.float32,0x803cbb64,0x803cbb64,2
+np.float32,0x3f34722d,0x3f281a2c,2
+np.float32,0x3ddd88e0,0x3ddd1adb,2
+np.float32,0x3f54244c,0x3f41418b,2
+np.float32,0x3e0adb98,0x3e0a6f8b,2
+np.float32,0x80800000,0x80800000,2
+np.float32,0x58902b,0x58902b,2
+np.float32,0xfe3b50b8,0xc2af6f43,2
+np.float32,0xfe0846d0,0xc2aecc64,2
+np.float32,0xbe0299d0,0xbe023fd4,2
+np.float32,0x18dde6,0x18dde6,2
+np.float32,0x8039fe8b,0x8039fe8b,2
+np.float32,0x8015d179,0x8015d179,2
+np.float32,0x3f551322,0x3f41f947,2
+np.float32,0x2ab387,0x2ab387,2
+np.float32,0xbf7e311e,0xbf6059d0,2
+np.float32,0xbdba58a8,0xbdba1713,2
+np.float32,0xbf1d008a,0xbf148724,2
+np.float32,0xbf6b9c97,0xbf52ec98,2
+np.float32,0x802acf04,0x802acf04,2
+np.float32,0x1,0x1,2
+np.float32,0xbe9e16d6,0xbe9bade3,2
+np.float32,0xbf048a14,0xbefe78c7,2
+np.float32,0x7e432ad3,0x42af8449,2
+np.float32,0xbdcc7fe0,0xbdcc2944,2
+np.float32,0x6dfc27,0x6dfc27,2
+np.float32,0xfef6eed8,0xc2b15fa1,2
+np.float32,0xbeeff6e8,0xbee7f2e4,2
+np.float32,0x7e3a6ca8,0x42af6cd2,2
+np.float32,0xff2c82e8,0xc2b20ae4,2
+np.float32,0x3e9f8d74,0x3e9d13b0,2
+np.float32,0x7ea36191,0x42b08c29,2
+np.float32,0x7f734bed,0x42b2baed,2
+np.float32,0x7f2df96d,0x42b20f37,2
+np.float32,0x5036fd,0x5036fd,2
+np.float32,0x806eab38,0x806eab38,2
+np.float32,0xbe9db90e,0xbe9b5446,2
+np.float32,0xfeef6fac,0xc2b14fd9,2
+np.float32,0xc2bf7,0xc2bf7,2
+np.float32,0xff53ec3d,0xc2b2743d,2
+np.float32,0x7e837637,0x42b01cde,2
+np.float32,0xbefb5934,0xbef23662,2
+np.float32,0x3f6cec80,0x3f53e371,2
+np.float32,0x3e86e7de,0x3e85643f,2
+np.float32,0x3f09cb42,0x3f03e1ef,2
+np.float32,0xbec3d236,0xbebf5620,2
+np.float32,0xfedef246,0xc2b12b50,2
+np.float32,0xbf08d6a8,0xbf030a62,2
+np.float32,0x8036cbf9,0x8036cbf9,2
+np.float32,0x3f74d3e3,0x3f59a512,2
+np.float32,0x6a600c,0x6a600c,2
+np.float32,0xfd1295b0,0xc2ac2bf1,2
+np.float32,0xbeb61142,0xbeb26efa,2
+np.float32,0x80216556,0x80216556,2
+np.float32,0xbf1fa0f6,0xbf16c30a,2
+np.float32,0x3e0af8e1,0x3e0a8c90,2
+np.float32,0x80434709,0x80434709,2
+np.float32,0x49efd9,0x49efd9,2
+np.float32,0x7f7cce6c,0x42b2ce8f,2
+np.float32,0x6e5450,0x6e5450,2
+np.float32,0x7f0fc115,0x42b1ad86,2
+np.float32,0x632db0,0x632db0,2
+np.float32,0x3f6f4c2a,0x3f55a064,2
+np.float32,0x7ec4f273,0x42b0ebd3,2
+np.float32,0x61ae1e,0x61ae1e,2
+np.float32,0x5f47c4,0x5f47c4,2
+np.float32,0xbf3c8f62,0xbf2eaf54,2
+np.float32,0xfca38900,0xc2ab0113,2
+np.float32,0x3ec89d52,0x3ec3ce78,2
+np.float32,0xbe0e3f70,0xbe0dcb53,2
+np.float32,0x805d3156,0x805d3156,2
+np.float32,0x3eee33f8,0x3ee65a4e,2
+np.float32,0xbeda7e9a,0xbed45a90,2
+np.float32,0x7e2fac7b,0x42af4e69,2
+np.float32,0x7efd0e28,0x42b16c2c,2
+np.float32,0x3f0c7b17,0x3f063e46,2
+np.float32,0xbf395bec,0xbf2c198f,2
+np.float32,0xfdf1c3f8,0xc2ae8f05,2
+np.float32,0xbe11f4e4,0xbe117783,2
+np.float32,0x7eddc901,0x42b128a3,2
+np.float32,0x3f4bad09,0x3f3aaf33,2
+np.float32,0xfefb5d76,0xc2b168bd,2
+np.float32,0x3ed3a4cf,0x3ece09a3,2
+np.float32,0x7ec582e4,0x42b0ed4a,2
+np.float32,0x3dc2268a,0x3dc1dc64,2
+np.float32,0x3ef9b17c,0x3ef0b9c9,2
+np.float32,0x2748ac,0x2748ac,2
+np.float32,0xfed6a602,0xc2b117e4,2
+np.float32,0xbefc9c36,0xbef35832,2
+np.float32,0x7e0476,0x7e0476,2
+np.float32,0x804be1a0,0x804be1a0,2
+np.float32,0xbefbc1c2,0xbef2943a,2
+np.float32,0xbd4698f0,0xbd46850a,2
+np.float32,0x688627,0x688627,2
+np.float32,0x3f7f7685,0x3f61406f,2
+np.float32,0x827fb,0x827fb,2
+np.float32,0x3f503264,0x3f3e34fd,2
+np.float32,0x7f5458d1,0x42b27543,2
+np.float32,0x800ac01f,0x800ac01f,2
+np.float32,0x6188dd,0x6188dd,2
+np.float32,0x806ac0ba,0x806ac0ba,2
+np.float32,0xbe14493c,0xbe13c5cc,2
+np.float32,0x3f77542c,0x3f5b72ae,2
+np.float32,0xfeaacab6,0xc2b0a2df,2
+np.float32,0x7f2893d5,0x42b1ff15,2
+np.float32,0x66b528,0x66b528,2
+np.float32,0xbf653e24,0xbf4e3573,2
+np.float32,0x801a2853,0x801a2853,2
+np.float32,0x3f3d8c98,0x3f2f7b04,2
+np.float32,0xfdffbad8,0xc2aeabc5,2
+np.float32,0x3dd50f,0x3dd50f,2
+np.float32,0x3f325a4c,0x3f266353,2
+np.float32,0xfcc48ec0,0xc2ab5f3f,2
+np.float32,0x3e6f5b9a,0x3e6d3ae5,2
+np.float32,0x3dbcd62b,0x3dbc91ee,2
+np.float32,0xbf7458d9,0xbf594c1c,2
+np.float32,0xff5adb24,0xc2b284b9,2
+np.float32,0x807b246d,0x807b246d,2
+np.float32,0x3f800000,0x3f61a1b3,2
+np.float32,0x231a28,0x231a28,2
+np.float32,0xbdc66258,0xbdc61341,2
+np.float32,0x3c84b4b4,0x3c84b338,2
+np.float32,0xbf215894,0xbf183783,2
+np.float32,0xff4ee298,0xc2b267ec,2
+np.float32,0x801ef52e,0x801ef52e,2
+np.float32,0x1040b0,0x1040b0,2
+np.float32,0xff545582,0xc2b2753b,2
+np.float32,0x3f3b9dda,0x3f2decaf,2
+np.float32,0x730f99,0x730f99,2
+np.float32,0xff7fffff,0xc2b2d4fc,2
+np.float32,0xff24cc5e,0xc2b1f379,2
+np.float32,0xbe9b456a,0xbe98fc0b,2
+np.float32,0x188fb,0x188fb,2
+np.float32,0x3f5c7ce2,0x3f47a18a,2
+np.float32,0x7fc00000,0x7fc00000,2
+np.float32,0x806ea4da,0x806ea4da,2
+np.float32,0xfe810570,0xc2b01345,2
+np.float32,0x8036af89,0x8036af89,2
+np.float32,0x8043cec6,0x8043cec6,2
+np.float32,0x80342bb3,0x80342bb3,2
+np.float32,0x1a2bd4,0x1a2bd4,2
+np.float32,0x3f6248c2,0x3f4bff9a,2
+np.float32,0x8024eb35,0x8024eb35,2
+np.float32,0x7ea55872,0x42b09247,2
+np.float32,0x806d6e56,0x806d6e56,2
+np.float32,0x25c21a,0x25c21a,2
+np.float32,0x3f4e95f3,0x3f3cf483,2
+np.float32,0x15ca38,0x15ca38,2
+np.float32,0x803f01b2,0x803f01b2,2
+np.float32,0xbe731634,0xbe70dc10,2
+np.float32,0x3e80cee4,0x3e7ef933,2
+np.float32,0x3ef6dda5,0x3eee2e7b,2
+np.float32,0x3f3dfdc2,0x3f2fd5ed,2
+np.float32,0xff0492a7,0xc2b18411,2
+np.float32,0xbf1d0adf,0xbf148ff3,2
+np.float32,0xfcf75460,0xc2abd4e3,2
+np.float32,0x3f46fca6,0x3f36ffa6,2
+np.float32,0xbe63b5c0,0xbe61dfb3,2
+np.float32,0xff019bec,0xc2b1787d,2
+np.float32,0x801f14a9,0x801f14a9,2
+np.float32,0x3f176cfa,0x3f0fc051,2
+np.float32,0x3f69d976,0x3f51a015,2
+np.float32,0x3f4917cb,0x3f38a87a,2
+np.float32,0x3b2a0bea,0x3b2a0bdd,2
+np.float32,0xbf41d857,0xbf32eb50,2
+np.float32,0xbf08841a,0xbf02c18f,2
+np.float32,0x7ec86f14,0x42b0f4d0,2
+np.float32,0xbf7d15d1,0xbf5f9090,2
+np.float32,0xbd080550,0xbd07feea,2
+np.float32,0xbf6f1bef,0xbf557d26,2
+np.float32,0xfebc282c,0xc2b0d473,2
+np.float32,0x3e68d2f5,0x3e66dd03,2
+np.float32,0x3f3ed8fe,0x3f3085d5,2
+np.float32,0xff2f78ae,0xc2b2139a,2
+np.float32,0xff647a70,0xc2b29ac1,2
+np.float32,0xfd0859a0,0xc2ac06e2,2
+np.float32,0x3ea578a8,0x3ea2b7e1,2
+np.float32,0x6c58c6,0x6c58c6,2
+np.float32,0xff23f26a,0xc2b1f0d2,2
+np.float32,0x800902a4,0x800902a4,2
+np.float32,0xfe8ba64e,0xc2b03bcd,2
+np.float32,0x3f091143,0x3f033e0f,2
+np.float32,0x8017c4bd,0x8017c4bd,2
+np.float32,0xbf708fd4,0xbf568c8c,2
+np.float32,0x3be1d8,0x3be1d8,2
+np.float32,0x80091f07,0x80091f07,2
+np.float32,0x68eabe,0x68eabe,2
+np.float32,0xfe9ab2c8,0xc2b07033,2
+np.float32,0x3eabe752,0x3ea8d3d7,2
+np.float32,0xbf7adcb2,0xbf5dfaf5,2
+np.float32,0x801ecc01,0x801ecc01,2
+np.float32,0xbf5570a9,0xbf424123,2
+np.float32,0x3e89eecd,0x3e88510e,2
+np.float32,0xfeb2feee,0xc2b0bae4,2
+np.float32,0xbeb25ec2,0xbeaef22b,2
+np.float32,0x201e49,0x201e49,2
+np.float32,0x800a35f6,0x800a35f6,2
+np.float32,0xbf02d449,0xbefb6e2a,2
+np.float32,0x3f062bea,0x3f00aef6,2
+np.float32,0x7f5219ff,0x42b26fd2,2
+np.float32,0xbd4561d0,0xbd454e47,2
+np.float32,0x3f6c4789,0x3f536a4b,2
+np.float32,0x7f58b06d,0x42b27fa1,2
+np.float32,0x7f132f39,0x42b1b999,2
+np.float32,0x3e05dcb4,0x3e057bd8,2
+np.float32,0x7f526045,0x42b2707d,2
+np.float32,0x3f6117d0,0x3f4b1adb,2
+np.float32,0xbf21f47d,0xbf18bb57,2
+np.float32,0x1a26d6,0x1a26d6,2
+np.float32,0x46b114,0x46b114,2
+np.float32,0x3eb24518,0x3eaed9ef,2
+np.float32,0xfe2139c8,0xc2af2278,2
+np.float32,0xbf7c36fb,0xbf5ef1f6,2
+np.float32,0x3f193834,0x3f114af7,2
+np.float32,0xff3ea650,0xc2b23e14,2
+np.float32,0xfeeb3bca,0xc2b146c7,2
+np.float32,0x7e8b8ca0,0x42b03b6f,2
+np.float32,0x3eed903d,0x3ee5c5d2,2
+np.float32,0xbdc73740,0xbdc6e72a,2
+np.float32,0x7e500307,0x42afa4ec,2
+np.float32,0xe003c,0xe003c,2
+np.float32,0x3e612bb4,0x3e5f64fd,2
+np.float32,0xfd81e248,0xc2ad50e6,2
+np.float32,0x766a4f,0x766a4f,2
+np.float32,0x3e8708c9,0x3e858414,2
+np.float32,0xbf206c58,0xbf176f7f,2
+np.float32,0x7e93aeb0,0x42b0586f,2
+np.float32,0xfd9d36b8,0xc2adb2ad,2
+np.float32,0xff1f4e0e,0xc2b1e21d,2
+np.float32,0x3f22bd5a,0x3f1964f8,2
+np.float32,0x7f6a517a,0x42b2a7ad,2
+np.float32,0xff6ca773,0xc2b2acc1,2
+np.float32,0x7f6bf453,0x42b2ab3d,2
+np.float32,0x3edfdd64,0x3ed9489f,2
+np.float32,0xbeafc5ba,0xbeac7daa,2
+np.float32,0x7d862039,0x42ad615b,2
+np.float32,0xbe9d2002,0xbe9ac1fc,2
+np.float32,0xbdcc54c0,0xbdcbfe5b,2
+np.float32,0xbf1bc0aa,0xbf13762a,2
+np.float32,0xbf4679ce,0xbf36984b,2
+np.float32,0x3ef45696,0x3eebe713,2
+np.float32,0xff6eb999,0xc2b2b137,2
+np.float32,0xbe4b2e4c,0xbe49dee8,2
+np.float32,0x3f498951,0x3f3901b7,2
+np.float32,0xbe9692f4,0xbe947be1,2
+np.float32,0xbf44ce26,0xbf3545c8,2
+np.float32,0x805787a8,0x805787a8,2
+np.float32,0xbf342650,0xbf27dc26,2
+np.float32,0x3edafbf0,0x3ed4cdd2,2
+np.float32,0x3f6fb858,0x3f55ef63,2
+np.float32,0xff227d0a,0xc2b1ec3f,2
+np.float32,0xfeb9a202,0xc2b0cd89,2
+np.float32,0x7f5b12c1,0x42b2853b,2
+np.float32,0x584578,0x584578,2
+np.float32,0x7ec0b76f,0x42b0e0b5,2
+np.float32,0x3f57f54b,0x3f442f10,2
+np.float32,0x7eef3620,0x42b14f5d,2
+np.float32,0x4525b5,0x4525b5,2
+np.float32,0x801bd407,0x801bd407,2
+np.float32,0xbed1f166,0xbecc7703,2
+np.float32,0x3f57e732,0x3f442449,2
+np.float32,0x80767cd5,0x80767cd5,2
+np.float32,0xbef1a7d2,0xbee97aa3,2
+np.float32,0x3dd5b1af,0x3dd54ee6,2
+np.float32,0x960c,0x960c,2
+np.float32,0x7c392d41,0x42a9ddd1,2
+np.float32,0x3f5c9a34,0x3f47b7c1,2
+np.float32,0x3f5cecee,0x3f47f667,2
+np.float32,0xbee482ce,0xbedd8899,2
+np.float32,0x8066ba7e,0x8066ba7e,2
+np.float32,0x7ed76127,0x42b119a2,2
+np.float32,0x805ca40b,0x805ca40b,2
+np.float32,0x7f5ed5d1,0x42b28df3,2
+np.float32,0xfe9e1b1e,0xc2b07b5b,2
+np.float32,0x3f0201a2,0x3ef9f6c4,2
+np.float32,0xbf2e6430,0xbf232039,2
+np.float32,0x80326b4d,0x80326b4d,2
+np.float32,0x3f11dc7c,0x3f0af06e,2
+np.float32,0xbe89c42e,0xbe8827e6,2
+np.float32,0x3f3c69f8,0x3f2e9133,2
+np.float32,0x806326a9,0x806326a9,2
+np.float32,0x3f1c5286,0x3f13f2b6,2
+np.float32,0xff5c0ead,0xc2b28786,2
+np.float32,0xff32b952,0xc2b21d01,2
+np.float32,0x7dd27c4e,0x42ae4815,2
+np.float32,0xbf7a6816,0xbf5da7a2,2
+np.float32,0xfeac72f8,0xc2b0a7d1,2
+np.float32,0x335ad7,0x335ad7,2
+np.float32,0xbe682da4,0xbe663bcc,2
+np.float32,0x3f2df244,0x3f22c208,2
+np.float32,0x80686e8e,0x80686e8e,2
+np.float32,0x7f50120f,0x42b26ad9,2
+np.float32,0x3dbc596a,0x3dbc15b3,2
+np.float32,0xbf4f2868,0xbf3d666d,2
+np.float32,0x80000001,0x80000001,2
+np.float32,0xff66c059,0xc2b29fd2,2
+np.float32,0xfe8bbcaa,0xc2b03c1f,2
+np.float32,0x3ece6a51,0x3ec93271,2
+np.float32,0x7f06cd26,0x42b18c9a,2
+np.float32,0x7e41e6dc,0x42af80f5,2
+np.float32,0x7d878334,0x42ad669f,2
+np.float32,0xfe8c5c4c,0xc2b03e67,2
+np.float32,0x337a05,0x337a05,2
+np.float32,0x3e63801d,0x3e61ab58,2
+np.float32,0x62c315,0x62c315,2
+np.float32,0x802aa888,0x802aa888,2
+np.float32,0x80038b43,0x80038b43,2
+np.float32,0xff5c1271,0xc2b2878f,2
+np.float32,0xff4184a5,0xc2b245b9,2
+np.float32,0x7ef58f4b,0x42b15cc6,2
+np.float32,0x7f42d8ac,0x42b2493a,2
+np.float32,0x806609f2,0x806609f2,2
+np.float32,0x801e763b,0x801e763b,2
+np.float32,0x7f2bc073,0x42b208a2,2
+np.float32,0x801d7d7f,0x801d7d7f,2
+np.float32,0x7d415dc1,0x42acb9c2,2
+np.float32,0xbf624ff9,0xbf4c0502,2
+np.float32,0xbf603afd,0xbf4a74e2,2
+np.float32,0x8007fe42,0x8007fe42,2
+np.float32,0x800456db,0x800456db,2
+np.float32,0x620871,0x620871,2
+np.float32,0x3e9c6c1e,0x3e9a15fa,2
+np.float32,0x4245d,0x4245d,2
+np.float32,0x8035bde9,0x8035bde9,2
+np.float32,0xbf597418,0xbf45533c,2
+np.float32,0x3c730f80,0x3c730d38,2
+np.float32,0x3f7cd8ed,0x3f5f6540,2
+np.float32,0x807e49c3,0x807e49c3,2
+np.float32,0x3d6584c0,0x3d65660c,2
+np.float32,0xff42a744,0xc2b248b8,2
+np.float32,0xfedc6f56,0xc2b12583,2
+np.float32,0x806263a4,0x806263a4,2
+np.float32,0x175a17,0x175a17,2
+np.float32,0x3f1e8537,0x3f15d208,2
+np.float32,0x4055b5,0x4055b5,2
+np.float32,0x438aa6,0x438aa6,2
+np.float32,0x8038507f,0x8038507f,2
+np.float32,0xbed75348,0xbed16f85,2
+np.float32,0x7f07b7d6,0x42b19012,2
+np.float32,0xfe8b9d30,0xc2b03bac,2
+np.float32,0x805c501c,0x805c501c,2
+np.float32,0x3ef22b1d,0x3ee9f159,2
+np.float32,0x802b6759,0x802b6759,2
+np.float32,0x45281a,0x45281a,2
+np.float32,0xbf7e9970,0xbf60a3cf,2
+np.float32,0xbf14d152,0xbf0d8062,2
+np.float32,0x3d9ff950,0x3d9fcfc8,2
+np.float32,0x7865d9,0x7865d9,2
+np.float32,0xbee67fa4,0xbedf58eb,2
+np.float32,0x7dc822d1,0x42ae2e44,2
+np.float32,0x3f3af0fe,0x3f2d612c,2
+np.float32,0xbefea106,0xbef5274e,2
+np.float32,0xbf758a3f,0xbf5a28c5,2
+np.float32,0xbf331bdd,0xbf270209,2
+np.float32,0x7f51c901,0x42b26f0d,2
+np.float32,0x3f67c33b,0x3f5014d8,2
+np.float32,0xbbc9d980,0xbbc9d92c,2
+np.float32,0xbc407540,0xbc40741e,2
+np.float32,0x7eed9a3c,0x42b14be9,2
+np.float32,0x1be0fe,0x1be0fe,2
+np.float32,0xbf6b4913,0xbf52af1f,2
+np.float32,0xbda8eba8,0xbda8bac6,2
+np.float32,0x8004bcea,0x8004bcea,2
+np.float32,0xff6f6afe,0xc2b2b2b3,2
+np.float32,0xbf205810,0xbf175e50,2
+np.float32,0x80651944,0x80651944,2
+np.float32,0xbec73016,0xbec27a3f,2
+np.float32,0x5701b9,0x5701b9,2
+np.float32,0xbf1062ce,0xbf09a7df,2
+np.float32,0x3e0306ae,0x3e02abd1,2
+np.float32,0x7bfc62,0x7bfc62,2
+np.float32,0xbf48dd3c,0xbf387a6b,2
+np.float32,0x8009573e,0x8009573e,2
+np.float32,0x660a2c,0x660a2c,2
+np.float32,0xff2280da,0xc2b1ec4b,2
+np.float32,0xbf7034fe,0xbf564a54,2
+np.float32,0xbeeb448e,0xbee3b045,2
+np.float32,0xff4e949c,0xc2b2672b,2
+np.float32,0xbf3c4486,0xbf2e7309,2
+np.float32,0x7eb086d8,0x42b0b3c8,2
+np.float32,0x7eac8aca,0x42b0a817,2
+np.float32,0xfd3d2d60,0xc2acae8b,2
+np.float32,0xbf363226,0xbf2987bd,2
+np.float32,0x7f02e524,0x42b17d8c,2
+np.float32,0x8049a148,0x8049a148,2
+np.float32,0x147202,0x147202,2
+np.float32,0x8031d3f6,0x8031d3f6,2
+np.float32,0xfe78bf68,0xc2b0007d,2
+np.float32,0x7ebd16d0,0x42b0d6fb,2
+np.float32,0xbdaed2e8,0xbdae9cbb,2
+np.float32,0x802833ae,0x802833ae,2
+np.float32,0x7f62adf6,0x42b296b5,2
+np.float32,0xff2841c0,0xc2b1fe1b,2
+np.float32,0xbeb2c47e,0xbeaf523b,2
+np.float32,0x7e42a36e,0x42af82e6,2
+np.float32,0x41ea29,0x41ea29,2
+np.float32,0xbcaaa800,0xbcaaa4d7,2
+np.float64,0x3fed71f27ebae3e5,0x3fea5c6095012ca6,2
+np.float64,0x224dc392449b9,0x224dc392449b9,2
+np.float64,0x3fdf897a7d3f12f5,0x3fde620339360992,2
+np.float64,0xbfe1f99a5123f334,0xbfe124a57cfaf556,2
+np.float64,0xbfd9725c3bb2e4b8,0xbfd8d1e3f75110c7,2
+np.float64,0x3fe38977546712ee,0x3fe27d9d37f4b91f,2
+np.float64,0xbfc36c29e526d854,0xbfc3594743ee45c4,2
+np.float64,0xbfe5cbec332b97d8,0xbfe4638802316849,2
+np.float64,0x2ff35efe5fe6d,0x2ff35efe5fe6d,2
+np.float64,0x7fd3f828e227f051,0x40862a7d4a40b1e0,2
+np.float64,0xffd06fc11620df82,0xc08628ee8f1bf6c8,2
+np.float64,0x3fe5321bf4aa6438,0x3fe3e3d9fa453199,2
+np.float64,0xffd07a323ca0f464,0xc08628f3a2930f8c,2
+np.float64,0x3fdf7abe7abef57c,0x3fde54cb193d49cb,2
+np.float64,0x40941f1881285,0x40941f1881285,2
+np.float64,0xffef18defc7e31bd,0xc0863393f2c9f061,2
+np.float64,0xbfe379f871e6f3f1,0xbfe270620cb68347,2
+np.float64,0xffec829848f90530,0xc08632e210edaa2b,2
+np.float64,0x80070c00574e1801,0x80070c00574e1801,2
+np.float64,0xffce7654b23ceca8,0xc086285291e89975,2
+np.float64,0x7fc9932daa33265a,0x408626ec6cc2b807,2
+np.float64,0x355ee98c6abde,0x355ee98c6abde,2
+np.float64,0x3fac54962c38a920,0x3fac50e40b6c19f2,2
+np.float64,0x800857984af0af31,0x800857984af0af31,2
+np.float64,0x7fea6a3d55f4d47a,0x40863245bf39f179,2
+np.float64,0x3fdb8fab33371f56,0x3fdac5ffc9e1c347,2
+np.float64,0x800a887a7bf510f5,0x800a887a7bf510f5,2
+np.float64,0xbfbdbda3c63b7b48,0xbfbdac9dd5a2d3e8,2
+np.float64,0xbfd4a2457b29448a,0xbfd44acb3b316d6d,2
+np.float64,0x7fd5329a502a6534,0x40862af789b528b5,2
+np.float64,0x3fd96a7bceb2d4f8,0x3fd8ca92104d6cd6,2
+np.float64,0x3fde6a0cd6bcd41a,0x3fdd5f4b85abf749,2
+np.float64,0xbfc7faaff32ff560,0xbfc7d7560b8c4a52,2
+np.float64,0x7fec381b2f787035,0x408632cd0e9c095c,2
+np.float64,0x1fc2eb543f85e,0x1fc2eb543f85e,2
+np.float64,0x7ac6000af58c1,0x7ac6000af58c1,2
+np.float64,0xffe060a87920c150,0xc0862e72c37d5a4e,2
+np.float64,0xbfb7d8c89e2fb190,0xbfb7cffd3c3f8e3a,2
+np.float64,0x3fd91033deb22068,0x3fd87695b067aa1e,2
+np.float64,0x3fec1aff01b835fe,0x3fe95d5cbd729af7,2
+np.float64,0x7fb97f69ec32fed3,0x4086215aaae5c697,2
+np.float64,0x7feaf1e4e5f5e3c9,0x4086326e6ca6a2bb,2
+np.float64,0x800537e44d0a6fc9,0x800537e44d0a6fc9,2
+np.float64,0x800b2a0d0d36541a,0x800b2a0d0d36541a,2
+np.float64,0x3fe2193846e43270,0x3fe140308550138e,2
+np.float64,0x5e2a0a32bc542,0x5e2a0a32bc542,2
+np.float64,0xffe5888b09eb1116,0xc08630a348783aa3,2
+np.float64,0xbfceb9b5033d736c,0xbfce701049c10435,2
+np.float64,0x7fe5d68589abad0a,0x408630c00ce63f23,2
+np.float64,0x8009b5457ff36a8b,0x8009b5457ff36a8b,2
+np.float64,0xbfb5518c2e2aa318,0xbfb54b42638ca718,2
+np.float64,0x3f9c58469838b080,0x3f9c575974fbcd7b,2
+np.float64,0x3fe8db4b4731b697,0x3fe6dc9231587966,2
+np.float64,0x8007d0f77f4fa1f0,0x8007d0f77f4fa1f0,2
+np.float64,0x7fe79eef542f3dde,0x40863160c673c67f,2
+np.float64,0xffbdc0b6163b8170,0xc0862296be4bf032,2
+np.float64,0x3fbb8d3312371a66,0x3fbb7fa76fb4cf8d,2
+np.float64,0xffd8a0eedbb141de,0xc0862c2ac6e512f0,2
+np.float64,0x7fee99d8d87d33b1,0x4086337301c4c8df,2
+np.float64,0xffe7479b552e8f36,0xc0863142fba0f0ec,2
+np.float64,0xffedf8ef4abbf1de,0xc08633488068fe69,2
+np.float64,0x895c4d9f12b8a,0x895c4d9f12b8a,2
+np.float64,0x29b4caf05369a,0x29b4caf05369a,2
+np.float64,0xbfefb90d657f721b,0xbfec01efa2425b35,2
+np.float64,0xde07c3bdbc0f9,0xde07c3bdbc0f9,2
+np.float64,0x7feae9fd02f5d3f9,0x4086326c1368ed5a,2
+np.float64,0x3feab792da756f26,0x3fe84f6e15338ed7,2
+np.float64,0xbfeff8ed72fff1db,0xbfec2f35da06daaf,2
+np.float64,0x8004b2c132896583,0x8004b2c132896583,2
+np.float64,0xbf9fcb00103f9600,0xbf9fc9b1751c569e,2
+np.float64,0x4182b72e83058,0x4182b72e83058,2
+np.float64,0x90820d812105,0x90820d812105,2
+np.float64,0xbfdec9a0ba3d9342,0xbfddb585df607ce1,2
+np.float64,0x7fdc0a69a03814d2,0x40862d347f201b63,2
+np.float64,0xbfef0708937e0e11,0xbfeb82d27f8ea97f,2
+np.float64,0xffda57e4ddb4afca,0xc0862cb49e2e0c4c,2
+np.float64,0xbfa30b9af4261730,0xbfa30a7b4a633060,2
+np.float64,0x7feb57fcc4b6aff9,0x4086328c83957a0b,2
+np.float64,0x7fe6759153eceb22,0x408630f980433963,2
+np.float64,0x7fdd3278c8ba64f1,0x40862d87445243e9,2
+np.float64,0xd3b8e6b9a771d,0xd3b8e6b9a771d,2
+np.float64,0x6267dc88c4cfc,0x6267dc88c4cfc,2
+np.float64,0x7fedd3cf00bba79d,0x4086333e91712ff5,2
+np.float64,0xffbe512ce03ca258,0xc08622bd39314cea,2
+np.float64,0xbfe71742ca6e2e86,0xbfe572ccbf2d010d,2
+np.float64,0x8002fb048c65f60a,0x8002fb048c65f60a,2
+np.float64,0x800d9d9ddf7b3b3c,0x800d9d9ddf7b3b3c,2
+np.float64,0xbfeaf6230df5ec46,0xbfe87f5d751ec3d5,2
+np.float64,0xbfe69973a42d32e8,0xbfe50c680f7002fe,2
+np.float64,0x3fe309cf87e613a0,0x3fe21048714ce1ac,2
+np.float64,0x800435d17a286ba4,0x800435d17a286ba4,2
+np.float64,0x7fefffffffffffff,0x408633ce8fb9f87e,2
+np.float64,0x3fe36ade1766d5bc,0x3fe26379fb285dde,2
+np.float64,0x3f98d8d94831b1c0,0x3f98d839885dc527,2
+np.float64,0xbfd08f7ae5211ef6,0xbfd0618ab5293e1e,2
+np.float64,0xbfcf630bd53ec618,0xbfcf14a0cd20704d,2
+np.float64,0xbfe58f0ca6eb1e1a,0xbfe4312225df8e28,2
+np.float64,0xffef4f6406be9ec7,0xc08633a1ed1d27e5,2
+np.float64,0x7fe10120b3e20240,0x40862ebfaf94e6e8,2
+np.float64,0xffe96c52fbb2d8a5,0xc08631f75d9a59a0,2
+np.float64,0xbfe448a333e89146,0xbfe31fee44c3ec43,2
+np.float64,0x80045ff4e788bfeb,0x80045ff4e788bfeb,2
+np.float64,0x7fefaa2f823f545e,0x408633b8fea29524,2
+np.float64,0xffea6b8bf234d717,0xc0863246248e5960,2
+np.float64,0xbfdb085d80b610bc,0xbfda498b15b43eec,2
+np.float64,0xbfd5e12da3abc25c,0xbfd57970e2b8aecc,2
+np.float64,0x3fcc84928a390925,0x3fcc497c417a89f3,2
+np.float64,0xbfdcb713bf396e28,0xbfdbd46c5e731fd9,2
+np.float64,0xffdf50c0453ea180,0xc0862e16b5562f25,2
+np.float64,0x800342c2f7268587,0x800342c2f7268587,2
+np.float64,0x7feb8b6d743716da,0x4086329b8248de2c,2
+np.float64,0x800a9b18b4953632,0x800a9b18b4953632,2
+np.float64,0xffedaf0d12fb5e19,0xc0863334af82de1a,2
+np.float64,0x800aebda4ab5d7b5,0x800aebda4ab5d7b5,2
+np.float64,0xbfa9f5848433eb10,0xbfa9f2ac7ac065d4,2
+np.float64,0x3fea375928f46eb2,0x3fe7ec9f10eeac7d,2
+np.float64,0x3fd6c213fead8428,0x3fd64dcc1eff5f1b,2
+np.float64,0xbfa0476f44208ee0,0xbfa046bb986007ac,2
+np.float64,0x6c8e18aed91c4,0x6c8e18aed91c4,2
+np.float64,0x8000000000000001,0x8000000000000001,2
+np.float64,0x7fea86b5ba350d6a,0x4086324e59f13027,2
+np.float64,0x2316c3b0462d9,0x2316c3b0462d9,2
+np.float64,0x3fec4e3281389c65,0x3fe983c5c9d65940,2
+np.float64,0x3fbb87c47f772,0x3fbb87c47f772,2
+np.float64,0x8004af00fdc95e03,0x8004af00fdc95e03,2
+np.float64,0xbfd316db9ba62db8,0xbfd2d12765b9d155,2
+np.float64,0x3fec1a7a99f834f6,0x3fe95cf941889b3d,2
+np.float64,0x3feff7e1477fefc3,0x3fec2e782392d4b9,2
+np.float64,0xbfc683ea042d07d4,0xbfc66698cfa5026e,2
+np.float64,0x3fdbc8aaa9b79154,0x3fdafa50e6fc3fff,2
+np.float64,0xfb3b630ff676d,0xfb3b630ff676d,2
+np.float64,0x7fe715ef8eae2bde,0x40863131d794b41f,2
+np.float64,0x7fefa06c11bf40d7,0x408633b686c7996a,2
+np.float64,0x80002a40f5205483,0x80002a40f5205483,2
+np.float64,0x7fe95f3c74b2be78,0x408631f33e37bf76,2
+np.float64,0x3fb2977b32252ef0,0x3fb2934eaf5a4be8,2
+np.float64,0x3fc0f3dbc821e7b8,0x3fc0e745288c84c3,2
+np.float64,0x3fda98da56b531b5,0x3fd9e2b19447dacc,2
+np.float64,0x3f95b9d5202b73aa,0x3f95b96a53282949,2
+np.float64,0x3fdc1ace7738359d,0x3fdb4597d31df7ff,2
+np.float64,0xffeac5bb2e358b76,0xc0863261452ab66c,2
+np.float64,0xbfefb1b78f7f636f,0xbfebfcb9be100ced,2
+np.float64,0xf5c9e191eb93c,0xf5c9e191eb93c,2
+np.float64,0x3fe83a977630752f,0x3fe65d0df90ff6ef,2
+np.float64,0x3fc317515d262ea0,0x3fc3056072b719f0,2
+np.float64,0x7fe2dcfab225b9f4,0x40862f94257c28a2,2
+np.float64,0xca2b115794562,0xca2b115794562,2
+np.float64,0x3fd495301aa92a60,0x3fd43e57108761d5,2
+np.float64,0x800ccc4293199885,0x800ccc4293199885,2
+np.float64,0xc8d3173d91a63,0xc8d3173d91a63,2
+np.float64,0xbf2541bb7e4a8,0xbf2541bb7e4a8,2
+np.float64,0xbfe9a330df334662,0xbfe779816573f5be,2
+np.float64,0xffd5e4c8252bc990,0xc0862b39b3ca5d72,2
+np.float64,0x3fe90f3a53721e75,0x3fe70585ae09531d,2
+np.float64,0xbfe2b5ddc7a56bbc,0xbfe1c7fa91a675ed,2
+np.float64,0xbf981a0360303400,0xbf9819719345073a,2
+np.float64,0x19174b0e322ea,0x19174b0e322ea,2
+np.float64,0xbfd2f71a1725ee34,0xbfd2b2b6f7cd10b1,2
+np.float64,0x80056e83236add07,0x80056e83236add07,2
+np.float64,0x7fe4bc41d9697883,0x40863055f20ce0cb,2
+np.float64,0xffe76e06c46edc0d,0xc086315024b25559,2
+np.float64,0x3fe3c4f0f96789e2,0x3fe2b04b584609bf,2
+np.float64,0x3fe6cfc533ed9f8a,0x3fe538b4d784d5ee,2
+np.float64,0x7fd234a640a4694c,0x408629bfead4f0b2,2
+np.float64,0x3fdbc49c9ab78939,0x3fdaf698a83d08e2,2
+np.float64,0x3fe4c5336ee98a66,0x3fe388c6ddb60e0a,2
+np.float64,0xf4b9497be9729,0xf4b9497be9729,2
+np.float64,0x3fb312be12262580,0x3fb30e3c847c1d16,2
+np.float64,0x3fe9554218f2aa84,0x3fe73c8b311c7a98,2
+np.float64,0xff899816a0333040,0xc08610bfb2cd8559,2
+np.float64,0x8006008ad52c0116,0x8006008ad52c0116,2
+np.float64,0x3fd7d47be4afa8f8,0x3fd74fa71ec17fd0,2
+np.float64,0x8010000000000000,0x8010000000000000,2
+np.float64,0xdf2a9943be553,0xdf2a9943be553,2
+np.float64,0xbfeb86bf1eb70d7e,0xbfe8ed797580ba5c,2
+np.float64,0x800e2c0c28bc5818,0x800e2c0c28bc5818,2
+np.float64,0xbfe2be65d4657ccc,0xbfe1cf578dec2323,2
+np.float64,0xbfedea3a5afbd475,0xbfeab490bf05e585,2
+np.float64,0xbfe04b1583a0962b,0xbfdf523dfd7be25c,2
+np.float64,0x75929bb4eb254,0x75929bb4eb254,2
+np.float64,0x3fd7b4968caf692d,0x3fd731c0938ff97c,2
+np.float64,0x60bd8fd2c17b3,0x60bd8fd2c17b3,2
+np.float64,0xbfdaf15e70b5e2bc,0xbfda345a95ce18fe,2
+np.float64,0x7fdd7c35c2baf86b,0x40862d9b5f40c6b2,2
+np.float64,0x7feeb4d2ab7d69a4,0x4086337a0c0dffaf,2
+np.float64,0xffe65b5a1decb6b4,0xc08630f024420efb,2
+np.float64,0x7feb272b30764e55,0x4086327e2e553aa2,2
+np.float64,0x3fd27513e8a4ea28,0x3fd235ea49670f6a,2
+np.float64,0x3fe6541a6aeca834,0x3fe4d3a5b69fd1b6,2
+np.float64,0xbfe0c6ca0f618d94,0xbfe017058259efdb,2
+np.float64,0x7fc1bf07b7237e0e,0x4086240000fa5a52,2
+np.float64,0x7fe96af9c0f2d5f3,0x408631f6f0f4faa2,2
+np.float64,0x3fe0728be7a0e518,0x3fdf9881a5869de9,2
+np.float64,0xffe8ea4441b1d488,0xc08631ce0685ae7e,2
+np.float64,0xffd0b973f02172e8,0xc08629121e7fdf85,2
+np.float64,0xffe37b907a26f720,0xc0862fd6529401a0,2
+np.float64,0x3fe0ee826461dd05,0x3fe03a2a424a1b40,2
+np.float64,0xbfe8073c92300e79,0xbfe6340cbd179ac1,2
+np.float64,0x800768383f8ed071,0x800768383f8ed071,2
+np.float64,0x8002e467c7c5c8d0,0x8002e467c7c5c8d0,2
+np.float64,0xbfd8d53ea5b1aa7e,0xbfd83fa7243289d7,2
+np.float64,0xffebefce2bb7df9c,0xc08632b874f4f8dc,2
+np.float64,0xffe3be9eb9277d3d,0xc0862ff1ac70ad0b,2
+np.float64,0xffe2f8a82e65f150,0xc0862f9fd9e77d86,2
+np.float64,0xbfa01d151c203a30,0xbfa01c66dc13a70a,2
+np.float64,0x800877062d30ee0d,0x800877062d30ee0d,2
+np.float64,0xaade16a755bc3,0xaade16a755bc3,2
+np.float64,0xbfeb1abc70363579,0xbfe89b52c3b003aa,2
+np.float64,0x80097d0b2ad2fa17,0x80097d0b2ad2fa17,2
+np.float64,0x8001499907429333,0x8001499907429333,2
+np.float64,0x3fe8db2aaf71b656,0x3fe6dc7873f1b235,2
+np.float64,0x5cfeadc4b9fd6,0x5cfeadc4b9fd6,2
+np.float64,0xff3f77d1fe7ef,0xff3f77d1fe7ef,2
+np.float64,0xffeecd56f9bd9aad,0xc08633806cb1163d,2
+np.float64,0xbf96f3ca582de7a0,0xbf96f34c6b8e1c85,2
+np.float64,0x7ed6b44afdad7,0x7ed6b44afdad7,2
+np.float64,0x80071808da4e3012,0x80071808da4e3012,2
+np.float64,0x3feb8aee2bf715dc,0x3fe8f0a55516615c,2
+np.float64,0x800038f62e2071ed,0x800038f62e2071ed,2
+np.float64,0x3fb13f9af2227f30,0x3fb13c456ced8e08,2
+np.float64,0xffd584d1812b09a4,0xc0862b165558ec0c,2
+np.float64,0x800b20c30fb64186,0x800b20c30fb64186,2
+np.float64,0x80024f9646e49f2d,0x80024f9646e49f2d,2
+np.float64,0xffefffffffffffff,0xc08633ce8fb9f87e,2
+np.float64,0x3fdddbcb5bbbb797,0x3fdcde981111f650,2
+np.float64,0xffed14077f3a280e,0xc086330a795ad634,2
+np.float64,0x800fec2da7ffd85b,0x800fec2da7ffd85b,2
+np.float64,0x3fe8205ffc7040c0,0x3fe6482318d217f9,2
+np.float64,0x3013e5226027d,0x3013e5226027d,2
+np.float64,0xffe4e5aad469cb55,0xc0863065dc2fb4e3,2
+np.float64,0x5cb0f7b2b9620,0x5cb0f7b2b9620,2
+np.float64,0xbfeb4537d2768a70,0xbfe8bbb2c1d3bff9,2
+np.float64,0xbfd859e297b0b3c6,0xbfd7cc807948bf9d,2
+np.float64,0x71f00b8ce3e02,0x71f00b8ce3e02,2
+np.float64,0xf5c1b875eb837,0xf5c1b875eb837,2
+np.float64,0xa0f35c8141e8,0xa0f35c8141e8,2
+np.float64,0xffe24860b42490c1,0xc0862f54222f616e,2
+np.float64,0xffcd9ae8583b35d0,0xc08628181e643a42,2
+np.float64,0x7fe9b710c7736e21,0x4086320ec033490f,2
+np.float64,0x3fd2b9ca1d257394,0x3fd277e631f0c0b3,2
+np.float64,0x23559bfc46ab4,0x23559bfc46ab4,2
+np.float64,0x8002adf75e455bef,0x8002adf75e455bef,2
+np.float64,0xbfefa4d75cbf49af,0xbfebf392e51d6a1a,2
+np.float64,0xffcfef263e3fde4c,0xc08628b336adb611,2
+np.float64,0x80061acaa8ec3596,0x80061acaa8ec3596,2
+np.float64,0x7fc1b33be0236677,0x408623faaddcc17e,2
+np.float64,0x7fe3a84083675080,0x40862fe8972e41e1,2
+np.float64,0xbfe756c1276ead82,0xbfe5a6318b061e1b,2
+np.float64,0xbfae4b71b43c96e0,0xbfae46ed0b6203a4,2
+np.float64,0x800421c6d0a8438e,0x800421c6d0a8438e,2
+np.float64,0x8009ad56fe335aae,0x8009ad56fe335aae,2
+np.float64,0xbfe71afc976e35f9,0xbfe575d21f3d7193,2
+np.float64,0x7fec0bbe4c38177c,0x408632c0710f1d8a,2
+np.float64,0x750e1daeea1c4,0x750e1daeea1c4,2
+np.float64,0x800501d4240a03a9,0x800501d4240a03a9,2
+np.float64,0x800794955cef292b,0x800794955cef292b,2
+np.float64,0x3fdf8a87f5bf1510,0x3fde62f4f00cfa19,2
+np.float64,0xbfebebdbc7f7d7b8,0xbfe939e51ba1340c,2
+np.float64,0xbfe3a16217a742c4,0xbfe292039dd08a71,2
+np.float64,0x3fed6cd04c3ad9a1,0x3fea58995973f74b,2
+np.float64,0xffcad8787335b0f0,0xc086274fbb35dd37,2
+np.float64,0x3fcb178e3d362f1c,0x3fcae4c9f3e6dddc,2
+np.float64,0xbfcadc669435b8cc,0xbfcaaae7cf075420,2
+np.float64,0x7fe0e3906321c720,0x40862eb1bacc5c43,2
+np.float64,0xff8ad5edb035abc0,0xc0861120b6404d0b,2
+np.float64,0x3fe175a21562eb44,0x3fe0b13120a46549,2
+np.float64,0xbfeb4c4a5f769895,0xbfe8c1147f1c9d8f,2
+np.float64,0x7fca22f4e63445e9,0x40862718e9b4094e,2
+np.float64,0x3fe4269d0c684d3a,0x3fe3032aa2015c53,2
+np.float64,0x3fef551c09beaa38,0x3febbabe03f49c83,2
+np.float64,0xffd843df9fb087c0,0xc0862c0c52d5e5d9,2
+np.float64,0x7fc497e2ca292fc5,0x40862530bbd9fcc7,2
+np.float64,0x3fee02919efc0523,0x3feac655588a4acd,2
+np.float64,0x7fed1e52c0fa3ca5,0x4086330d4ddd8a2c,2
+np.float64,0xba04d4ef7409b,0xba04d4ef7409b,2
+np.float64,0x3fee22d0937c45a2,0x3feaddd4ca66b447,2
+np.float64,0xffeb2558cf764ab1,0xc086327da4e84053,2
+np.float64,0xbfe103d987e207b3,0xbfe04d04818ad1ff,2
+np.float64,0x3f9fd7fed03faffe,0x3f9fd6ae9a45be84,2
+np.float64,0x800a53ec4c34a7d9,0x800a53ec4c34a7d9,2
+np.float64,0xbfe2feb17f65fd63,0xbfe206b9d33a78a2,2
+np.float64,0x989bdd613139,0x989bdd613139,2
+np.float64,0xbfdd0ad3fb3a15a8,0xbfdc20c32a530741,2
+np.float64,0xbfc4222163284444,0xbfc40d1c612784b5,2
+np.float64,0xc30cf5c78619f,0xc30cf5c78619f,2
+np.float64,0x3fe913bd6732277b,0x3fe70912f76bad71,2
+np.float64,0x98f175f531e2f,0x98f175f531e2f,2
+np.float64,0x3fed8c1f717b183f,0x3fea6f9fb3af3423,2
+np.float64,0x7fee46b085bc8d60,0x4086335d269eb7e9,2
+np.float64,0x8007480f564e901f,0x8007480f564e901f,2
+np.float64,0xc9b96e179372e,0xc9b96e179372e,2
+np.float64,0x3fe44deac4289bd6,0x3fe32463a74a69e7,2
+np.float64,0x80021d6c5c243ad9,0x80021d6c5c243ad9,2
+np.float64,0xbfebc805a6f7900b,0xbfe91edcf65a1c19,2
+np.float64,0x80044748adc88e92,0x80044748adc88e92,2
+np.float64,0x4007ee44800fe,0x4007ee44800fe,2
+np.float64,0xbfe24307a4648610,0xbfe1648ad5c47b6f,2
+np.float64,0xbfee6d3a93fcda75,0xbfeb13e1a3196e78,2
+np.float64,0x3fe49a287f293451,0x3fe364a11b9f0068,2
+np.float64,0x80052b37ceaa5670,0x80052b37ceaa5670,2
+np.float64,0xbfd42be893a857d2,0xbfd3da05dac7c286,2
+np.float64,0xffb4bbe4ac2977c8,0xc0861fb31bda6956,2
+np.float64,0xbfc732a4142e6548,0xbfc7129a4eafa399,2
+np.float64,0x7fd0696791a0d2ce,0x408628eb7756cb9c,2
+np.float64,0x3fe46c8f8d68d91f,0x3fe33e3df16187c1,2
+np.float64,0x3fe3a28f1ce7451e,0x3fe293043238d08c,2
+np.float64,0xffedc4eb723b89d6,0xc086333a92258c15,2
+np.float64,0x8000d15b4c41a2b7,0x8000d15b4c41a2b7,2
+np.float64,0xffeb73450236e689,0xc08632947b0148ab,2
+np.float64,0xffe68cf4722d19e8,0xc0863101d08d77bd,2
+np.float64,0x800c70eb4698e1d7,0x800c70eb4698e1d7,2
+np.float64,0xffa94387ff529,0xffa94387ff529,2
+np.float64,0x7fe3835d996706ba,0x40862fd985ff8e7d,2
+np.float64,0x3fe55e476feabc8e,0x3fe408a15594ec52,2
+np.float64,0xffc69672222d2ce4,0xc08625ee0c4c0f6a,2
+np.float64,0xbf9d900b883b2020,0xbf9d8efe811d36df,2
+np.float64,0xbfdb9b9755b7372e,0xbfdad0f2aa2cb110,2
+np.float64,0xffeade6073b5bcc0,0xc08632689f17a25d,2
+np.float64,0xffd1d6a6baa3ad4e,0xc086299630a93a7b,2
+np.float64,0x7fd05ba25620b744,0x408628e4be1ef845,2
+np.float64,0xbfc7d422d52fa844,0xbfc7b170a61531bf,2
+np.float64,0x3fd5196797aa32d0,0x3fd4bc0f0e7d8e1d,2
+np.float64,0x617594a4c2eb3,0x617594a4c2eb3,2
+np.float64,0x7fd779bc4caef378,0x40862bc89271b882,2
+np.float64,0xffd2fb262ba5f64c,0xc0862a15561e9524,2
+np.float64,0x72fd661ae5fad,0x72fd661ae5fad,2
+np.float64,0x3fecf441f339e884,0x3fe9ff880d584f64,2
+np.float64,0x7fc3a8968827512c,0x408624d198b05c61,2
+np.float64,0x3fe7a25c56ef44b9,0x3fe5e32509a7c32d,2
+np.float64,0x7fd117d514222fa9,0x4086293ec640d5f2,2
+np.float64,0x3fe37dfe5ee6fbfc,0x3fe273d1bcaa1ef0,2
+np.float64,0xbfed4cd19d7a99a3,0xbfea41064cba4c8b,2
+np.float64,0x8003ff12aaa7fe26,0x8003ff12aaa7fe26,2
+np.float64,0x3fcbc3d1193787a2,0x3fcb8d39e3e88264,2
+np.float64,0xe9ba1a91d3744,0xe9ba1a91d3744,2
+np.float64,0x8002ab71998556e4,0x8002ab71998556e4,2
+np.float64,0x800110057922200c,0x800110057922200c,2
+np.float64,0xbfe3b7af19a76f5e,0xbfe2a502fc0a2882,2
+np.float64,0x7fd9de9d5e33bd3a,0x40862c8f73cccabf,2
+np.float64,0xbfba0f0a86341e18,0xbfba0392f44c2771,2
+np.float64,0x8000000000000000,0x8000000000000000,2
+np.float64,0x7fe5d162e96ba2c5,0x408630be2b15e01b,2
+np.float64,0x800b7f0eac76fe1e,0x800b7f0eac76fe1e,2
+np.float64,0xff98bed150317da0,0xc086160633164f5f,2
+np.float64,0x3fef91fd70ff23fb,0x3febe629709d0ae7,2
+np.float64,0x7fe5bea7f16b7d4f,0x408630b749f445e9,2
+np.float64,0xbfe3dc428467b885,0xbfe2c41ea93fab07,2
+np.float64,0xbfeba1fbfcf743f8,0xbfe9021b52851bb9,2
+np.float64,0x7fd2fb2108a5f641,0x40862a1553f45830,2
+np.float64,0x7feb8199a4370332,0x40863298a7169dad,2
+np.float64,0x800f97ff8d7f2fff,0x800f97ff8d7f2fff,2
+np.float64,0x3fd5e20b6b2bc417,0x3fd57a42bd1c0993,2
+np.float64,0x8006b4072dad680f,0x8006b4072dad680f,2
+np.float64,0x605dccf2c0bba,0x605dccf2c0bba,2
+np.float64,0x3fc705ed142e0bda,0x3fc6e69971d86f73,2
+np.float64,0xffd2ba1aad257436,0xc08629f9bc918f8b,2
+np.float64,0x8002954e23c52a9d,0x8002954e23c52a9d,2
+np.float64,0xbfecc65da7798cbb,0xbfe9dd745be18562,2
+np.float64,0x7fc66110482cc220,0x408625db0db57ef8,2
+np.float64,0x3fcd09446d3a1289,0x3fcccaf2dd0a41ea,2
+np.float64,0x3febe7095437ce13,0x3fe93642d1e73b2a,2
+np.float64,0x8004773c7da8ee7a,0x8004773c7da8ee7a,2
+np.float64,0x8001833241230665,0x8001833241230665,2
+np.float64,0x3fe6a262db6d44c6,0x3fe513b3dab5adce,2
+np.float64,0xe6282cc1cc506,0xe6282cc1cc506,2
+np.float64,0x800b9d8553973b0b,0x800b9d8553973b0b,2
+np.float64,0x3fdfbe0c7b3f7c19,0x3fde912375d867a8,2
+np.float64,0x7fd5ac11ebab5823,0x40862b24dfc6d08e,2
+np.float64,0x800e4b7cb1fc96f9,0x800e4b7cb1fc96f9,2
+np.float64,0x3fe14706da628e0e,0x3fe0883aec2a917a,2
+np.float64,0x7fc963f97532c7f2,0x408626dd9b0cafe1,2
+np.float64,0xbfe9c250b5b384a2,0xbfe791c5eabcb05d,2
+np.float64,0x3fe8d16e6c71a2dd,0x3fe6d4c7a33a0bf4,2
+np.float64,0x3fe474ae4628e95d,0x3fe34515c93f4733,2
+np.float64,0x3fbf3257ee3e64b0,0x3fbf1eb530e126ea,2
+np.float64,0x8005f089b3abe114,0x8005f089b3abe114,2
+np.float64,0x3fece07bccf9c0f8,0x3fe9f0dc228124d5,2
+np.float64,0xbfc52521632a4a44,0xbfc50ccebdf59c2c,2
+np.float64,0x7fdf53beb13ea77c,0x40862e177918195e,2
+np.float64,0x8003d9f6ad07b3ee,0x8003d9f6ad07b3ee,2
+np.float64,0xffeacf96bbb59f2d,0xc086326436b38b1a,2
+np.float64,0xdccaea29b995e,0xdccaea29b995e,2
+np.float64,0x5948d21eb291b,0x5948d21eb291b,2
+np.float64,0x10000000000000,0x10000000000000,2
+np.float64,0x7fef6d2c543eda58,0x408633a98593cdf5,2
+np.float64,0x7feda454f47b48a9,0x40863331cb6dc9f7,2
+np.float64,0x3fdd377cecba6ef8,0x3fdc4968f74a9c83,2
+np.float64,0x800644096d4c8814,0x800644096d4c8814,2
+np.float64,0xbfe33ca15ae67942,0xbfe23be5de832bd8,2
+np.float64,0xffce9582bd3d2b04,0xc086285abdf9bf9d,2
+np.float64,0x3fe6621e86acc43d,0x3fe4df231bfa93e1,2
+np.float64,0xee7d19e9dcfa3,0xee7d19e9dcfa3,2
+np.float64,0x800be5997277cb33,0x800be5997277cb33,2
+np.float64,0x82069041040e,0x82069041040e,2
+np.float64,0x800d6efdc19addfc,0x800d6efdc19addfc,2
+np.float64,0x7fb27770ee24eee1,0x40861ec5ed91b839,2
+np.float64,0x3fd506064caa0c0d,0x3fd4a9a66353fefd,2
+np.float64,0xbfeca9b36bf95367,0xbfe9c81f03ba37b8,2
+np.float64,0xffeab1b7bab5636f,0xc086325b47f61f2b,2
+np.float64,0xffc99f5b2e333eb8,0xc08626f03b08b412,2
+np.float64,0x3fbf1a71bc3e34e3,0x3fbf06fbcaa5de58,2
+np.float64,0x3fe75015736ea02b,0x3fe5a0cd8d763d8d,2
+np.float64,0xffe6a7442fad4e88,0xc086310b20addba4,2
+np.float64,0x3fe5d62ff86bac60,0x3fe46c033195bf28,2
+np.float64,0x7fd0b1f0362163df,0x4086290e857dc1be,2
+np.float64,0xbe0353737c06b,0xbe0353737c06b,2
+np.float64,0x7fec912d8739225a,0x408632e627704635,2
+np.float64,0xded8ba2fbdb18,0xded8ba2fbdb18,2
+np.float64,0x7fec0b53fdf816a7,0x408632c052bc1bd2,2
+np.float64,0x7fe9640d12b2c819,0x408631f4c2ba54d8,2
+np.float64,0x800be714eeb7ce2a,0x800be714eeb7ce2a,2
+np.float64,0xbfcf444a793e8894,0xbfcef6c126b54853,2
+np.float64,0xffeb20cf1bf6419e,0xc086327c4e6ffe80,2
+np.float64,0xc07de22180fd,0xc07de22180fd,2
+np.float64,0xffed129d387a253a,0xc086330a15ad0adb,2
+np.float64,0x3fd9e94fedb3d2a0,0x3fd94049924706a8,2
+np.float64,0x7fe6ba488c2d7490,0x40863111d51e7861,2
+np.float64,0xbfebbdf25db77be5,0xbfe91740ad7ba521,2
+np.float64,0x7fbc6c3c4838d878,0x40862239160cb613,2
+np.float64,0xbfefa82ecebf505e,0xbfebf5f31957dffd,2
+np.float64,0x800bebeb7ad7d7d7,0x800bebeb7ad7d7d7,2
+np.float64,0x7fecccc6f8f9998d,0x408632f6c6da8aac,2
+np.float64,0xcbe4926197ca,0xcbe4926197ca,2
+np.float64,0x2c5d9fd858bb5,0x2c5d9fd858bb5,2
+np.float64,0xbfe9fb021073f604,0xbfe7bddc61f1151a,2
+np.float64,0xbfebb18572f7630b,0xbfe90ddc5002313f,2
+np.float64,0x13bb0d3227763,0x13bb0d3227763,2
+np.float64,0x3feefa5e5cbdf4bd,0x3feb79b9e8ce16bf,2
+np.float64,0x3fc97f086132fe10,0x3fc9549fc8e15ecb,2
+np.float64,0xffe70887c06e110f,0xc086312d30fd31cf,2
+np.float64,0xa00c113540182,0xa00c113540182,2
+np.float64,0x800950984772a131,0x800950984772a131,2
+np.float64,0x1,0x1,2
+np.float64,0x3fd83b4026b07680,0x3fd7afdc659d9a34,2
+np.float64,0xbfe32348fbe64692,0xbfe226292a706a1a,2
+np.float64,0x800b894dcc77129c,0x800b894dcc77129c,2
+np.float64,0xeb2ca419d6595,0xeb2ca419d6595,2
+np.float64,0xbff0000000000000,0xbfec34366179d427,2
+np.float64,0x3feb269e99f64d3d,0x3fe8a4634b927a21,2
+np.float64,0xbfe83149d7706294,0xbfe655a2b245254e,2
+np.float64,0xbfe6eef3ca6ddde8,0xbfe5521310e24d16,2
+np.float64,0x3fea89a4b7b51349,0x3fe82c1fc69edcec,2
+np.float64,0x800f2a8bf17e5518,0x800f2a8bf17e5518,2
+np.float64,0x800f71fac29ee3f6,0x800f71fac29ee3f6,2
+np.float64,0xe7cb31f1cf966,0xe7cb31f1cf966,2
+np.float64,0x3b0f8752761f2,0x3b0f8752761f2,2
+np.float64,0x3fea27dea3744fbd,0x3fe7e0a4705476b2,2
+np.float64,0xbfa97c019c32f800,0xbfa97950c1257b92,2
+np.float64,0xffeff13647ffe26c,0xc08633cadc7105ed,2
+np.float64,0x3feee162353dc2c4,0x3feb67c2da0fbce8,2
+np.float64,0x80088c0807911810,0x80088c0807911810,2
+np.float64,0x3fe936ab1db26d56,0x3fe72489bc69719d,2
+np.float64,0xa2f84bd545f0a,0xa2f84bd545f0a,2
+np.float64,0xbfed445ed27a88be,0xbfea3acac0aaf482,2
+np.float64,0x800faf3e69df5e7d,0x800faf3e69df5e7d,2
+np.float64,0x3fc145a330228b46,0x3fc13853f11b1c90,2
+np.float64,0xbfe25ec5abe4bd8c,0xbfe17c9e9b486f07,2
+np.float64,0x3fe119b160e23363,0x3fe0604b10178966,2
+np.float64,0x7fe0cbf2836197e4,0x40862ea6831e5f4a,2
+np.float64,0x3fe75dd3b4eebba8,0x3fe5abe80fd628fb,2
+np.float64,0x3f7c391000387220,0x3f7c39015d8f3a36,2
+np.float64,0x899d9cad133b4,0x899d9cad133b4,2
+np.float64,0x3fe5f0e34febe1c6,0x3fe4820cefe138fc,2
+np.float64,0x7fe060dfdba0c1bf,0x40862e72de8afcd0,2
+np.float64,0xbfae42f7103c85f0,0xbfae3e7630819c60,2
+np.float64,0x35f1f2c06be5,0x35f1f2c06be5,2
+np.float64,0xffc5194d362a329c,0xc086256266c8b7ad,2
+np.float64,0xbfda034f1b34069e,0xbfd95860a44c43ad,2
+np.float64,0x32bcebca6579e,0x32bcebca6579e,2
+np.float64,0xbfd1751ebca2ea3e,0xbfd13f79f45bf75c,2
+np.float64,0x3fee4fa1e5bc9f44,0x3feafe69e0d6c1c7,2
+np.float64,0x7f9c03cd5038079a,0x4086170459172900,2
+np.float64,0x7fc5fb6d6d2bf6da,0x408625b6651cfc73,2
+np.float64,0x7ff8000000000000,0x7ff8000000000000,2
+np.float64,0xffd1a8162ca3502c,0xc0862981333931ad,2
+np.float64,0x7fc415c198282b82,0x408624fd8c155d1b,2
+np.float64,0xffda37fbe7b46ff8,0xc0862caae7865c43,2
+np.float64,0xbfef4312257e8624,0xbfebadd89f3ee31c,2
+np.float64,0xbfec45e1fd788bc4,0xbfe97d8b14db6274,2
+np.float64,0xbfe6fdcfd26dfba0,0xbfe55e25b770d00a,2
+np.float64,0x7feb66d424f6cda7,0x40863290d9ff7ea2,2
+np.float64,0x8b08a29916115,0x8b08a29916115,2
+np.float64,0xffe12ca25c625944,0xc0862ed40d769f72,2
+np.float64,0x7ff4000000000000,0x7ffc000000000000,2
+np.float64,0x804925e100925,0x804925e100925,2
+np.float64,0xcebf3e019d9,0xcebf3e019d9,2
+np.float64,0xbfd5d75d4aabaeba,0xbfd57027671dedf7,2
+np.float64,0x800b829ecd37053e,0x800b829ecd37053e,2
+np.float64,0x800b1205daf6240c,0x800b1205daf6240c,2
+np.float64,0x3fdf7e9889befd31,0x3fde583fdff406c3,2
+np.float64,0x7ff0000000000000,0x7ff0000000000000,2
+np.float64,0x3fdc09760d3812ec,0x3fdb35b55c8090c6,2
+np.float64,0x800c4d99e4f89b34,0x800c4d99e4f89b34,2
+np.float64,0xffbaa6772e354cf0,0xc08621b535badb2f,2
+np.float64,0xbfc91188fd322310,0xbfc8e933b5d25ea7,2
+np.float64,0xffc1b947f4237290,0xc08623fd69164251,2
+np.float64,0x3fc6ab3b252d5678,0x3fc68d50bbac106d,2
+np.float64,0xffac8eb968391d70,0xc0861cb734833355,2
+np.float64,0xffe29a35c365346b,0xc0862f77a1aed6d8,2
+np.float64,0x3fde14b9543c2973,0x3fdd122697779015,2
+np.float64,0xbf10f5400021e000,0xbf10f53fffef1383,2
+np.float64,0xffe0831aa3e10635,0xc0862e838553d0ca,2
+np.float64,0x3fccbadbcf3975b8,0x3fcc7e768d0154ec,2
+np.float64,0x3fe092ef66e125df,0x3fdfd212a7116c9b,2
+np.float64,0xbfd727f039ae4fe0,0xbfd6adad040b2334,2
+np.float64,0xbfe4223b93a84477,0xbfe2ff7587364db4,2
+np.float64,0x3f4e5c3a003cb874,0x3f4e5c39b75c70f7,2
+np.float64,0x800e76b1a87ced63,0x800e76b1a87ced63,2
+np.float64,0x3fed2b7368fa56e7,0x3fea2863b9131b8c,2
+np.float64,0xffadb76ec43b6ee0,0xc0861d08ae79f20c,2
+np.float64,0x800b6a0cd1f6d41a,0x800b6a0cd1f6d41a,2
+np.float64,0xffee6aa943fcd552,0xc0863366a24250d5,2
+np.float64,0xbfe68cbc4e6d1978,0xbfe502040591aa5b,2
+np.float64,0xff859a38002b3480,0xc0860f64726235cc,2
+np.float64,0x3474d13e68e9b,0x3474d13e68e9b,2
+np.float64,0xffc11d49f6223a94,0xc08623b5c2df9712,2
+np.float64,0x800d82d019bb05a0,0x800d82d019bb05a0,2
+np.float64,0xbfe2af0192255e03,0xbfe1c20e38106388,2
+np.float64,0x3fe97d13c032fa28,0x3fe75bba11a65f86,2
+np.float64,0x7fcd457e133a8afb,0x40862800e80f5863,2
+np.float64,0x9d7254cf3ae4b,0x9d7254cf3ae4b,2
+np.float64,0x8003047675a608ee,0x8003047675a608ee,2
+np.float64,0x3fead6cd7d75ad9a,0x3fe8676138e5ff93,2
+np.float64,0x3fea6ee3b0f4ddc7,0x3fe817838a2bcbe3,2
+np.float64,0x3feed0edea7da1dc,0x3feb5bea3cb12fe2,2
+np.float64,0x88003fe510008,0x88003fe510008,2
+np.float64,0x3fe64cadc56c995c,0x3fe4cd8ead87fc79,2
+np.float64,0xaae30c5955c62,0xaae30c5955c62,2
+np.float64,0x7fc8c97cae3192f8,0x408626ac579f4fc5,2
+np.float64,0xbfc2bc0e8b25781c,0xbfc2ab188fdab7dc,2
+np.float64,0xc8f8e5e791f1d,0xc8f8e5e791f1d,2
+np.float64,0x3fecfaa5d6f9f54c,0x3fea0444dabe5a15,2
+np.float64,0xbfeb93740ff726e8,0xbfe8f71a9ab13baf,2
+np.float64,0xffd951236c32a246,0xc0862c633a4661eb,2
+np.float64,0x3fddbc5fcd3b78c0,0x3fdcc21c1a0a9246,2
+np.float64,0xbfd242443da48488,0xbfd20512d91f7924,2
+np.float64,0x2a3689b2546d2,0x2a3689b2546d2,2
+np.float64,0xffe24c67382498ce,0xc0862f55e4ea6283,2
+np.float64,0x800cbfce22197f9c,0x800cbfce22197f9c,2
+np.float64,0x8002269428044d29,0x8002269428044d29,2
+np.float64,0x7fd44babbd289756,0x40862a9e79b51c3b,2
+np.float64,0x3feea056a27d40ad,0x3feb38dcddb682f0,2
+np.float64,0xffeca8174b39502e,0xc08632ec8f88a5b2,2
+np.float64,0x7fbe0853a03c10a6,0x408622a9e8d53a9e,2
+np.float64,0xbfa9704b2432e090,0xbfa96d9dfc8c0cc2,2
+np.float64,0x800bda28fab7b452,0x800bda28fab7b452,2
+np.float64,0xbfb0ffa2f621ff48,0xbfb0fc71f405e82a,2
+np.float64,0xbfe66c04216cd808,0xbfe4e73ea3b58cf6,2
+np.float64,0x3fe336ea5d266dd5,0x3fe236ffcf078c62,2
+np.float64,0xbfe7729ae6aee536,0xbfe5bcad4b8ac62d,2
+np.float64,0x558cfc96ab1a0,0x558cfc96ab1a0,2
+np.float64,0xbfe7d792aaefaf26,0xbfe60de1b8f0279d,2
+np.float64,0xffd19ef6bda33dee,0xc086297d0ffee3c7,2
+np.float64,0x666b3ab4ccd68,0x666b3ab4ccd68,2
+np.float64,0xffa3d89e3c27b140,0xc08619cdeb2c1e49,2
+np.float64,0xbfb1728f7f62f,0xbfb1728f7f62f,2
+np.float64,0x3fc76319f32ec634,0x3fc74247bd005e20,2
+np.float64,0xbfbf1caee23e3960,0xbfbf0934c13d70e2,2
+np.float64,0x7fe79626f32f2c4d,0x4086315dcc68a5cb,2
+np.float64,0xffee78c4603cf188,0xc086336a572c05c2,2
+np.float64,0x3fce546eda3ca8de,0x3fce0d8d737fd31d,2
+np.float64,0xa223644d4446d,0xa223644d4446d,2
+np.float64,0x3fecea878b79d510,0x3fe9f850d50973f6,2
+np.float64,0x3fc20e0ea1241c1d,0x3fc1fedda87c5e75,2
+np.float64,0xffd1c5a99ca38b54,0xc086298e8e94cd47,2
+np.float64,0x7feb2c299d765852,0x4086327fa6db2808,2
+np.float64,0xcaf9d09595f3a,0xcaf9d09595f3a,2
+np.float64,0xbfe293bf21e5277e,0xbfe1aa7f6ac274ef,2
+np.float64,0xbfbaa3c8ce354790,0xbfba97891df19c01,2
+np.float64,0x3faf5784543eaf09,0x3faf5283acc7d71d,2
+np.float64,0x7fc014f8f62029f1,0x40862336531c662d,2
+np.float64,0xbfe0d9ac2d61b358,0xbfe027bce36699ca,2
+np.float64,0x8003e112ff27c227,0x8003e112ff27c227,2
+np.float64,0xffec0d4151381a82,0xc08632c0df718dd0,2
+np.float64,0x7fa2156fb0242ade,0x4086190f7587d708,2
+np.float64,0xd698358dad307,0xd698358dad307,2
+np.float64,0xbfed8d1b0efb1a36,0xbfea70588ef9ba18,2
+np.float64,0xbfd2cae6a92595ce,0xbfd28851e2185dee,2
+np.float64,0xffe7a36764ef46ce,0xc086316249c9287a,2
+np.float64,0xbfdb8ad8e5b715b2,0xbfdac19213c14315,2
+np.float64,0x3b5dba6076bc,0x3b5dba6076bc,2
+np.float64,0x800e6e8347bcdd07,0x800e6e8347bcdd07,2
+np.float64,0x800bea9f3fb7d53f,0x800bea9f3fb7d53f,2
+np.float64,0x7fb6d0e5fc2da1cb,0x4086207714c4ab85,2
+np.float64,0x0,0x0,2
+np.float64,0xbfe2aa1e1465543c,0xbfe1bdd550ef2966,2
+np.float64,0x7fd3f6a47fa7ed48,0x40862a7caea33055,2
+np.float64,0x800094e292c129c6,0x800094e292c129c6,2
+np.float64,0x800e1500ecbc2a02,0x800e1500ecbc2a02,2
+np.float64,0xbfd8ff6f97b1fee0,0xbfd866f84346ecdc,2
+np.float64,0x681457d0d028c,0x681457d0d028c,2
+np.float64,0x3feed0b5987da16b,0x3feb5bc1ab424984,2
+np.float64,0x3fdbcb34cdb79668,0x3fdafca540f32c06,2
+np.float64,0xbfdc9eacdcb93d5a,0xbfdbbe274aa8aeb0,2
+np.float64,0xffe6e35d526dc6ba,0xc08631203df38ed2,2
+np.float64,0x3fcac1cc65358398,0x3fca90de41889613,2
+np.float64,0xbfebf07a55b7e0f5,0xbfe93d6007db0c67,2
+np.float64,0xbfd7a7b1e7af4f64,0xbfd725a9081c22cb,2
+np.float64,0x800232bd7de4657c,0x800232bd7de4657c,2
+np.float64,0x7fb1dae43c23b5c7,0x40861e80f5c0a64e,2
+np.float64,0x8013ded70027c,0x8013ded70027c,2
+np.float64,0x7fc4373a59286e74,0x4086250ad60575d0,2
+np.float64,0xbfe9980fd6733020,0xbfe770d1352d0ed3,2
+np.float64,0x8008a66b8dd14cd7,0x8008a66b8dd14cd7,2
+np.float64,0xbfaebc67f83d78d0,0xbfaeb7b015848478,2
+np.float64,0xffd0c52762218a4e,0xc0862917b564afc6,2
+np.float64,0xbfd503860aaa070c,0xbfd4a74618441561,2
+np.float64,0x5bdacabcb7b5a,0x5bdacabcb7b5a,2
+np.float64,0xf3623cffe6c48,0xf3623cffe6c48,2
+np.float64,0x7fe16c6c7ea2d8d8,0x40862ef18d90201f,2
+np.float64,0x3ff0000000000000,0x3fec34366179d427,2
+np.float64,0x7fe19cbc84233978,0x40862f079dcbc169,2
+np.float64,0x3fcfd3d6933fa7ad,0x3fcf822187907f6b,2
+np.float64,0x8007d65d672facbc,0x8007d65d672facbc,2
+np.float64,0xffca6115aa34c22c,0xc086272bd7728750,2
+np.float64,0xbfe77ab1556ef562,0xbfe5c332fb55b66e,2
+np.float64,0x8001ed797c23daf4,0x8001ed797c23daf4,2
+np.float64,0x7fdd3d16cb3a7a2d,0x40862d8a2c869281,2
+np.float64,0x75f36beaebe6e,0x75f36beaebe6e,2
+np.float64,0xffda3c2798b47850,0xc0862cac2d3435df,2
+np.float64,0xbfa37cc3c426f980,0xbfa37b8f9d3ec4b7,2
+np.float64,0x80030ea8bd061d52,0x80030ea8bd061d52,2
+np.float64,0xffe41f7617683eec,0xc08630188a3e135e,2
+np.float64,0x800e40590dfc80b2,0x800e40590dfc80b2,2
+np.float64,0x3fea950d80f52a1c,0x3fe834e74481e66f,2
+np.float64,0xffec95e39a792bc6,0xc08632e779150084,2
+np.float64,0xbfd54310ecaa8622,0xbfd4e39c4d767002,2
+np.float64,0xffd40c9971a81932,0xc0862a85764eb2f4,2
+np.float64,0xb0a2230761445,0xb0a2230761445,2
+np.float64,0x80092973661252e7,0x80092973661252e7,2
+np.float64,0x7fb13b030a227605,0x40861e380aeb5549,2
+np.float64,0x3fbd5d8db23abb1b,0x3fbd4d2a0b94af36,2
+np.float64,0xbfd6cb8567ad970a,0xbfd656b19ab8fa61,2
+np.float64,0xbfe7c0fd346f81fa,0xbfe5fbc28807c794,2
+np.float64,0xffd586579eab0cb0,0xc0862b16e65c0754,2
+np.float64,0x8000e52da461ca5c,0x8000e52da461ca5c,2
+np.float64,0x3fc69d17112d3a2e,0x3fc67f63fe1fea1c,2
+np.float64,0x3fd36ba892a6d750,0x3fd3225be1fa87af,2
+np.float64,0x7fe2850598e50a0a,0x40862f6e7fcd6c1a,2
+np.float64,0x80074a4dacce949c,0x80074a4dacce949c,2
+np.float64,0x3fe25eea4d64bdd5,0x3fe17cbe5fefbd4e,2
+np.float64,0xbfe250c08be4a181,0xbfe17074c520e5de,2
+np.float64,0x8000f5665481eacd,0x8000f5665481eacd,2
+np.float64,0x7fdb3172f83662e5,0x40862cf5a46764f1,2
+np.float64,0x7fd8ed82d631db05,0x40862c4380658afa,2
+np.float64,0xffec5163feb8a2c7,0xc08632d4366aab06,2
+np.float64,0x800ff14ac6ffe296,0x800ff14ac6ffe296,2
+np.float64,0xbfc7cc7aea2f98f4,0xbfc7a9e9cb38f023,2
+np.float64,0xbfd50cdfc32a19c0,0xbfd4b0282b452fb2,2
+np.float64,0xbfec256d75b84adb,0xbfe965328c1860b2,2
+np.float64,0xffe860c4cdb0c189,0xc08631a164b7059a,2
+np.float64,0xbfe23de164247bc3,0xbfe16011bffa4651,2
+np.float64,0xcc96b39d992d7,0xcc96b39d992d7,2
+np.float64,0xbfec43acf938875a,0xbfe97be3a13b50c3,2
+np.float64,0xc4f587bb89eb1,0xc4f587bb89eb1,2
+np.float64,0xbfcd971d9a3b2e3c,0xbfcd5537ad15dab4,2
+np.float64,0xffcaf00d8035e01c,0xc0862756bf2cdf8f,2
+np.float64,0x8008c26f93f184e0,0x8008c26f93f184e0,2
+np.float64,0xfff0000000000000,0xfff0000000000000,2
+np.float64,0xbfd13552c3a26aa6,0xbfd101e5e252eb7b,2
+np.float64,0x7fe497235e292e46,0x4086304792fb423a,2
+np.float64,0x7fd6dc0192adb802,0x40862b921a5e935d,2
+np.float64,0xf16d49a1e2da9,0xf16d49a1e2da9,2
+np.float64,0xffef6b1b71bed636,0xc08633a8feed0178,2
+np.float64,0x7fe15ec62f62bd8b,0x40862eeb46b193dc,2
+np.float64,0x3fef4369ec7e86d4,0x3febae1768be52cc,2
+np.float64,0x4f84e8e89f09e,0x4f84e8e89f09e,2
+np.float64,0xbfe19e71ade33ce4,0xbfe0d4fad05e0ebc,2
+np.float64,0xbfe7e1df1defc3be,0xbfe616233e15b3d0,2
+np.float64,0x7fe9349afdb26935,0x408631e5c1c5c6cd,2
+np.float64,0xff90c35ac82186c0,0xc08612e896a06467,2
+np.float64,0xbfe88bf8807117f1,0xbfe69dc786464422,2
+np.float64,0x3feaf9ff6475f3fe,0x3fe8825132410d18,2
+np.float64,0x9ff487a33fe91,0x9ff487a33fe91,2
+np.float64,0x7fedb30159bb6602,0x40863335c0419322,2
+np.float64,0x800bddf6ed77bbee,0x800bddf6ed77bbee,2
+np.float64,0x3fd919df133233be,0x3fd87f963b9584ce,2
+np.float64,0x7fd64da3b52c9b46,0x40862b5fa9dd3b6d,2
+np.float64,0xbfce288db43c511c,0xbfcde2d953407ae8,2
+np.float64,0x3fe88bc72771178e,0x3fe69da05e9e9b4e,2
+np.float64,0x800feafe259fd5fc,0x800feafe259fd5fc,2
+np.float64,0x3febbbff4a7777ff,0x3fe915c78f6a280f,2
+np.float64,0xbfefbde4417f7bc9,0xbfec055f4fb2cd21,2
+np.float64,0xf13ca103e2794,0xf13ca103e2794,2
+np.float64,0x3fe6423884ec8471,0x3fe4c4f97eaa876a,2
+np.float64,0x800ca01c8cb94039,0x800ca01c8cb94039,2
+np.float64,0x3fbc5073f638a0e0,0x3fbc41c163ac0001,2
+np.float64,0xbfda0d83cfb41b08,0xbfd961d4cacc82cf,2
+np.float64,0x800f37b8f17e6f72,0x800f37b8f17e6f72,2
+np.float64,0x7fe0b08cd7216119,0x40862e996becb771,2
+np.float64,0xffd4222a40a84454,0xc0862a8e0c984917,2
+np.float64,0x7feb3df98ff67bf2,0x40863284e3a86ee6,2
+np.float64,0x8001d5d291e3aba6,0x8001d5d291e3aba6,2
+np.float64,0xbfd3c21629a7842c,0xbfd3750095a5894a,2
+np.float64,0xbfd069eb48a0d3d6,0xbfd03d2b1c2ae9db,2
+np.float64,0xffeb1be2973637c4,0xc086327ada954662,2
+np.float64,0x3fc659f97e2cb3f3,0x3fc63d497a451f10,2
+np.float64,0xbfeb624bc776c498,0xbfe8d1cf7c0626ca,2
+np.float64,0xffeedf26e23dbe4d,0xc08633850baab425,2
+np.float64,0xffe70da48a6e1b48,0xc086312ef75d5036,2
+np.float64,0x2b4f4830569ea,0x2b4f4830569ea,2
+np.float64,0xffe82e7fcfb05cff,0xc0863190d4771f75,2
+np.float64,0x3fcc2c1fd5385840,0x3fcbf3211ddc5123,2
+np.float64,0x7fe22ced5a6459da,0x40862f481629ee6a,2
+np.float64,0x7fe13d2895e27a50,0x40862edbbc411899,2
+np.float64,0x3fd54c4280aa9884,0x3fd4ec55a946c5d7,2
+np.float64,0xffd75b8e01aeb71c,0xc0862bbe42d76e5e,2
+np.float64,0x7f1d5376fe3ab,0x7f1d5376fe3ab,2
+np.float64,0x3fe6ec6c902dd8d9,0x3fe55004f35192bd,2
+np.float64,0x5634504aac68b,0x5634504aac68b,2
+np.float64,0x3feedb0d83bdb61b,0x3feb633467467ce6,2
+np.float64,0x3fddb1c0dcbb6380,0x3fdcb87a02daf1fa,2
+np.float64,0xbfa832da443065b0,0xbfa8308c70257209,2
+np.float64,0x87a9836b0f531,0x87a9836b0f531,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arctan.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arctan.csv
new file mode 100644
index 00000000..1e92073d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arctan.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0x3f338252,0x3f1c8d9c,3
+np.float32,0x7e569df2,0x3fc90fdb,3
+np.float32,0xbf347e25,0xbf1d361f,3
+np.float32,0xbf0a654e,0xbefdbfd2,3
+np.float32,0x8070968e,0x8070968e,3
+np.float32,0x803cfb27,0x803cfb27,3
+np.float32,0x8024362e,0x8024362e,3
+np.float32,0xfd55dca0,0xbfc90fdb,3
+np.float32,0x592b82,0x592b82,3
+np.float32,0x802eb8e1,0x802eb8e1,3
+np.float32,0xbc5fef40,0xbc5febae,3
+np.float32,0x3f1f6ce8,0x3f0e967c,3
+np.float32,0x20bedc,0x20bedc,3
+np.float32,0xbf058860,0xbef629c7,3
+np.float32,0x311504,0x311504,3
+np.float32,0xbd23f560,0xbd23defa,3
+np.float32,0x800ff4e8,0x800ff4e8,3
+np.float32,0x355009,0x355009,3
+np.float32,0x3f7be42e,0x3f46fdb3,3
+np.float32,0xbf225f7c,0xbf10b364,3
+np.float32,0x8074fa9e,0x8074fa9e,3
+np.float32,0xbea4b418,0xbe9f59ce,3
+np.float32,0xbe909c14,0xbe8cf045,3
+np.float32,0x80026bee,0x80026bee,3
+np.float32,0x3d789c20,0x3d784e25,3
+np.float32,0x7f56a4ba,0x3fc90fdb,3
+np.float32,0xbf70d141,0xbf413db7,3
+np.float32,0xbf2c4886,0xbf17a505,3
+np.float32,0x7e2993bf,0x3fc90fdb,3
+np.float32,0xbe2c8a30,0xbe2aef28,3
+np.float32,0x803f82d9,0x803f82d9,3
+np.float32,0x3f062fbc,0x3ef730a1,3
+np.float32,0x3f349ee0,0x3f1d4bfa,3
+np.float32,0x3eccfb69,0x3ec2f9e8,3
+np.float32,0x7e8a85dd,0x3fc90fdb,3
+np.float32,0x25331,0x25331,3
+np.float32,0x464f19,0x464f19,3
+np.float32,0x8035c818,0x8035c818,3
+np.float32,0x802e5799,0x802e5799,3
+np.float32,0x64e1c0,0x64e1c0,3
+np.float32,0x701cc2,0x701cc2,3
+np.float32,0x265c57,0x265c57,3
+np.float32,0x807a053f,0x807a053f,3
+np.float32,0x3bd2c412,0x3bd2c354,3
+np.float32,0xff28f1c8,0xbfc90fdb,3
+np.float32,0x7f08f08b,0x3fc90fdb,3
+np.float32,0x800c50e4,0x800c50e4,3
+np.float32,0x369674,0x369674,3
+np.float32,0xbf5b7db3,0xbf3571bf,3
+np.float32,0x7edcf5e2,0x3fc90fdb,3
+np.float32,0x800e5d4b,0x800e5d4b,3
+np.float32,0x80722554,0x80722554,3
+np.float32,0x693f33,0x693f33,3
+np.float32,0x800844e4,0x800844e4,3
+np.float32,0xbf111b82,0xbf0402ec,3
+np.float32,0x7df9c9ac,0x3fc90fdb,3
+np.float32,0xbf6619a6,0xbf3b6f57,3
+np.float32,0x8002fafe,0x8002fafe,3
+np.float32,0xfe1e67f8,0xbfc90fdb,3
+np.float32,0x3f7f4bf8,0x3f48b5b7,3
+np.float32,0x7f017b20,0x3fc90fdb,3
+np.float32,0x2d9b07,0x2d9b07,3
+np.float32,0x803aa174,0x803aa174,3
+np.float32,0x7d530336,0x3fc90fdb,3
+np.float32,0x80662195,0x80662195,3
+np.float32,0xfd5ebcf0,0xbfc90fdb,3
+np.float32,0xbe7b8dcc,0xbe76ab59,3
+np.float32,0x7f2bacaf,0x3fc90fdb,3
+np.float32,0x3f194fc4,0x3f0a229e,3
+np.float32,0x7ee21cdf,0x3fc90fdb,3
+np.float32,0x3f5a17fc,0x3f34a307,3
+np.float32,0x7f100c58,0x3fc90fdb,3
+np.float32,0x7e9128f5,0x3fc90fdb,3
+np.float32,0xbf2107c6,0xbf0fbdb4,3
+np.float32,0xbd29c800,0xbd29af22,3
+np.float32,0xbf5af499,0xbf3522a6,3
+np.float32,0x801bde44,0x801bde44,3
+np.float32,0xfeb4761a,0xbfc90fdb,3
+np.float32,0x3d88aa1b,0x3d887650,3
+np.float32,0x7eba5e0b,0x3fc90fdb,3
+np.float32,0x803906bd,0x803906bd,3
+np.float32,0x80101512,0x80101512,3
+np.float32,0x7e898f83,0x3fc90fdb,3
+np.float32,0x806406d3,0x806406d3,3
+np.float32,0x7ed20fc0,0x3fc90fdb,3
+np.float32,0x20827d,0x20827d,3
+np.float32,0x3f361359,0x3f1e43fe,3
+np.float32,0xfe4ef8d8,0xbfc90fdb,3
+np.float32,0x805e7d2d,0x805e7d2d,3
+np.float32,0xbe4316b0,0xbe40c745,3
+np.float32,0xbf0a1c06,0xbefd4e5a,3
+np.float32,0x3e202860,0x3e1edee1,3
+np.float32,0xbeb32a2c,0xbeac5899,3
+np.float32,0xfe528838,0xbfc90fdb,3
+np.float32,0x2f73e2,0x2f73e2,3
+np.float32,0xbe16e010,0xbe15cc27,3
+np.float32,0x3f50d6c5,0x3f2f2d75,3
+np.float32,0xbe88a6a2,0xbe8589c7,3
+np.float32,0x3ee36060,0x3ed5fb36,3
+np.float32,0x6c978b,0x6c978b,3
+np.float32,0x7f1b735f,0x3fc90fdb,3
+np.float32,0x3dad8256,0x3dad1885,3
+np.float32,0x807f5094,0x807f5094,3
+np.float32,0x65c358,0x65c358,3
+np.float32,0xff315ce4,0xbfc90fdb,3
+np.float32,0x7411a6,0x7411a6,3
+np.float32,0x80757b04,0x80757b04,3
+np.float32,0x3eec73a6,0x3edd82f4,3
+np.float32,0xfe9f69e8,0xbfc90fdb,3
+np.float32,0x801f4fa8,0x801f4fa8,3
+np.float32,0xbf6f2fae,0xbf405f79,3
+np.float32,0xfea206b6,0xbfc90fdb,3
+np.float32,0x3f257301,0x3f12e1ee,3
+np.float32,0x7ea6a506,0x3fc90fdb,3
+np.float32,0x80800000,0x80800000,3
+np.float32,0xff735c2d,0xbfc90fdb,3
+np.float32,0x80197f95,0x80197f95,3
+np.float32,0x7f4a354f,0x3fc90fdb,3
+np.float32,0xff320c00,0xbfc90fdb,3
+np.float32,0x3f2659de,0x3f138484,3
+np.float32,0xbe5451bc,0xbe515a52,3
+np.float32,0x3f6e228c,0x3f3fcf7c,3
+np.float32,0x66855a,0x66855a,3
+np.float32,0x8034b3a3,0x8034b3a3,3
+np.float32,0xbe21a2fc,0xbe20505d,3
+np.float32,0x7f79e2dc,0x3fc90fdb,3
+np.float32,0xbe19a8e0,0xbe18858c,3
+np.float32,0x10802c,0x10802c,3
+np.float32,0xfeee579e,0xbfc90fdb,3
+np.float32,0x3f3292c8,0x3f1becc0,3
+np.float32,0xbf595a71,0xbf34350a,3
+np.float32,0xbf7c3373,0xbf4725f4,3
+np.float32,0xbdd30938,0xbdd24b36,3
+np.float32,0x153a17,0x153a17,3
+np.float32,0x807282a0,0x807282a0,3
+np.float32,0xfe817322,0xbfc90fdb,3
+np.float32,0x3f1b3628,0x3f0b8771,3
+np.float32,0x41be8f,0x41be8f,3
+np.float32,0x7f4a8343,0x3fc90fdb,3
+np.float32,0x3dc4ea2b,0x3dc44fae,3
+np.float32,0x802aac25,0x802aac25,3
+np.float32,0xbf20e1d7,0xbf0fa284,3
+np.float32,0xfd91a1b0,0xbfc90fdb,3
+np.float32,0x3f0d5476,0x3f012265,3
+np.float32,0x21c916,0x21c916,3
+np.float32,0x807df399,0x807df399,3
+np.float32,0x7e207b4c,0x3fc90fdb,3
+np.float32,0x8055f8ff,0x8055f8ff,3
+np.float32,0x7edf3b01,0x3fc90fdb,3
+np.float32,0x803a8df3,0x803a8df3,3
+np.float32,0x3ce3b002,0x3ce3a101,3
+np.float32,0x3f62dd54,0x3f39a248,3
+np.float32,0xff33ae10,0xbfc90fdb,3
+np.float32,0x7e3de69d,0x3fc90fdb,3
+np.float32,0x8024581e,0x8024581e,3
+np.float32,0xbf4ac99d,0xbf2b807a,3
+np.float32,0x3f157d19,0x3f074d8c,3
+np.float32,0xfed383f4,0xbfc90fdb,3
+np.float32,0xbf5a39fa,0xbf34b6b8,3
+np.float32,0x800d757d,0x800d757d,3
+np.float32,0x807d606b,0x807d606b,3
+np.float32,0x3e828f89,0x3e7fac2d,3
+np.float32,0x7a6604,0x7a6604,3
+np.float32,0x7dc7e72b,0x3fc90fdb,3
+np.float32,0x80144146,0x80144146,3
+np.float32,0x7c2eed69,0x3fc90fdb,3
+np.float32,0x3f5b4d8c,0x3f3555fc,3
+np.float32,0xfd8b7778,0xbfc90fdb,3
+np.float32,0xfc9d9140,0xbfc90fdb,3
+np.float32,0xbea265d4,0xbe9d4232,3
+np.float32,0xbe9344d0,0xbe8f65da,3
+np.float32,0x3f71f19a,0x3f41d65b,3
+np.float32,0x804a3f59,0x804a3f59,3
+np.float32,0x3e596290,0x3e563476,3
+np.float32,0x3e994ee4,0x3e94f546,3
+np.float32,0xbc103e00,0xbc103d0c,3
+np.float32,0xbf1cd896,0xbf0cb889,3
+np.float32,0x7f52b080,0x3fc90fdb,3
+np.float32,0xff584452,0xbfc90fdb,3
+np.float32,0x58b26b,0x58b26b,3
+np.float32,0x3f23cd4c,0x3f11b799,3
+np.float32,0x707d7,0x707d7,3
+np.float32,0xff732cff,0xbfc90fdb,3
+np.float32,0x3e41c2a6,0x3e3f7f0f,3
+np.float32,0xbf7058e9,0xbf40fdcf,3
+np.float32,0x7dca9857,0x3fc90fdb,3
+np.float32,0x7f0eb44b,0x3fc90fdb,3
+np.float32,0x8000405c,0x8000405c,3
+np.float32,0x4916ab,0x4916ab,3
+np.float32,0x4811a8,0x4811a8,3
+np.float32,0x3d69bf,0x3d69bf,3
+np.float32,0xfeadcf1e,0xbfc90fdb,3
+np.float32,0x3e08dbbf,0x3e080d58,3
+np.float32,0xff031f88,0xbfc90fdb,3
+np.float32,0xbe09cab8,0xbe08f818,3
+np.float32,0x21d7cd,0x21d7cd,3
+np.float32,0x3f23230d,0x3f113ea9,3
+np.float32,0x7e8a48d4,0x3fc90fdb,3
+np.float32,0x413869,0x413869,3
+np.float32,0x7e832990,0x3fc90fdb,3
+np.float32,0x800f5c09,0x800f5c09,3
+np.float32,0x7f5893b6,0x3fc90fdb,3
+np.float32,0x7f06b5b1,0x3fc90fdb,3
+np.float32,0xbe1cbee8,0xbe1b89d6,3
+np.float32,0xbf279f14,0xbf1468a8,3
+np.float32,0xfea86060,0xbfc90fdb,3
+np.float32,0x3e828174,0x3e7f91bb,3
+np.float32,0xff682c82,0xbfc90fdb,3
+np.float32,0x4e20f3,0x4e20f3,3
+np.float32,0x7f17d7e9,0x3fc90fdb,3
+np.float32,0x80671f92,0x80671f92,3
+np.float32,0x7f6dd100,0x3fc90fdb,3
+np.float32,0x3f219a4d,0x3f102695,3
+np.float32,0x803c9808,0x803c9808,3
+np.float32,0x3c432ada,0x3c43287d,3
+np.float32,0xbd3db450,0xbd3d91a2,3
+np.float32,0x3baac135,0x3baac0d0,3
+np.float32,0xff7fffe1,0xbfc90fdb,3
+np.float32,0xfe38a6f4,0xbfc90fdb,3
+np.float32,0x3dfb0a04,0x3df9cb04,3
+np.float32,0x800b05c2,0x800b05c2,3
+np.float32,0x644163,0x644163,3
+np.float32,0xff03a025,0xbfc90fdb,3
+np.float32,0x3f7d506c,0x3f47b641,3
+np.float32,0xff0e682a,0xbfc90fdb,3
+np.float32,0x3e09b7b0,0x3e08e567,3
+np.float32,0x7f72a216,0x3fc90fdb,3
+np.float32,0x7f800000,0x3fc90fdb,3
+np.float32,0x8050a281,0x8050a281,3
+np.float32,0x7edafa2f,0x3fc90fdb,3
+np.float32,0x3f4e0df6,0x3f2d7f2f,3
+np.float32,0xbf6728e0,0xbf3c050f,3
+np.float32,0x3e904ce4,0x3e8ca6eb,3
+np.float32,0x0,0x0,3
+np.float32,0xfd215070,0xbfc90fdb,3
+np.float32,0x7e406b15,0x3fc90fdb,3
+np.float32,0xbf2803c9,0xbf14af18,3
+np.float32,0x5950c8,0x5950c8,3
+np.float32,0xbeddcec8,0xbed14faa,3
+np.float32,0xbec6457e,0xbebd2aa5,3
+np.float32,0xbf42843c,0xbf2656db,3
+np.float32,0x3ee9cba8,0x3edb5163,3
+np.float32,0xbe30c954,0xbe2f0f90,3
+np.float32,0xbeee6b44,0xbedf216f,3
+np.float32,0xbe35d818,0xbe33f7cd,3
+np.float32,0xbe47c630,0xbe454bc6,3
+np.float32,0x801b146f,0x801b146f,3
+np.float32,0x7f6788da,0x3fc90fdb,3
+np.float32,0x3eaef088,0x3ea8927d,3
+np.float32,0x3eb5983e,0x3eae81fc,3
+np.float32,0x40b51d,0x40b51d,3
+np.float32,0xfebddd04,0xbfc90fdb,3
+np.float32,0x3e591aee,0x3e55efea,3
+np.float32,0xbe2b6b48,0xbe29d81f,3
+np.float32,0xff4a8826,0xbfc90fdb,3
+np.float32,0x3e791df0,0x3e745eac,3
+np.float32,0x7c8f681f,0x3fc90fdb,3
+np.float32,0xfe7a15c4,0xbfc90fdb,3
+np.float32,0x3c8963,0x3c8963,3
+np.float32,0x3f0afa0a,0x3efea5cc,3
+np.float32,0xbf0d2680,0xbf00ff29,3
+np.float32,0x3dc306b0,0x3dc27096,3
+np.float32,0x7f4cf105,0x3fc90fdb,3
+np.float32,0xbe196060,0xbe183ea4,3
+np.float32,0x5caf1c,0x5caf1c,3
+np.float32,0x801f2852,0x801f2852,3
+np.float32,0xbe01aa0c,0xbe00fa53,3
+np.float32,0x3f0cfd32,0x3f00df7a,3
+np.float32,0x7d82038e,0x3fc90fdb,3
+np.float32,0x7f7b927f,0x3fc90fdb,3
+np.float32,0xbe93b2e4,0xbe8fcb7f,3
+np.float32,0x1ffe8c,0x1ffe8c,3
+np.float32,0x3faaf6,0x3faaf6,3
+np.float32,0x3e32b1b8,0x3e30e9ab,3
+np.float32,0x802953c0,0x802953c0,3
+np.float32,0xfe5d9844,0xbfc90fdb,3
+np.float32,0x3e1a59d0,0x3e193292,3
+np.float32,0x801c6edc,0x801c6edc,3
+np.float32,0x1ecf41,0x1ecf41,3
+np.float32,0xfe56b09c,0xbfc90fdb,3
+np.float32,0x7e878351,0x3fc90fdb,3
+np.float32,0x3f401e2c,0x3f24cfcb,3
+np.float32,0xbf204a40,0xbf0f35bb,3
+np.float32,0x3e155a98,0x3e144ee1,3
+np.float32,0xbf34f929,0xbf1d8838,3
+np.float32,0x801bbf70,0x801bbf70,3
+np.float32,0x7e7c9730,0x3fc90fdb,3
+np.float32,0x7cc23432,0x3fc90fdb,3
+np.float32,0xbf351638,0xbf1d9b97,3
+np.float32,0x80152094,0x80152094,3
+np.float32,0x3f2d731c,0x3f187219,3
+np.float32,0x804ab0b7,0x804ab0b7,3
+np.float32,0x37d6db,0x37d6db,3
+np.float32,0xbf3ccc56,0xbf22acbf,3
+np.float32,0x3e546f8c,0x3e5176e7,3
+np.float32,0xbe90e87e,0xbe8d3707,3
+np.float32,0x48256c,0x48256c,3
+np.float32,0x7e2468d0,0x3fc90fdb,3
+np.float32,0x807af47e,0x807af47e,3
+np.float32,0x3ed4b221,0x3ec996f0,3
+np.float32,0x3d3b1956,0x3d3af811,3
+np.float32,0xbe69d93c,0xbe65e7f0,3
+np.float32,0xff03ff14,0xbfc90fdb,3
+np.float32,0x801e79dc,0x801e79dc,3
+np.float32,0x3f467c53,0x3f28d63d,3
+np.float32,0x3eab6baa,0x3ea56a1c,3
+np.float32,0xbf15519c,0xbf072d1c,3
+np.float32,0x7f0bd8e8,0x3fc90fdb,3
+np.float32,0xbe1e0d1c,0xbe1cd053,3
+np.float32,0x8016edab,0x8016edab,3
+np.float32,0x7ecaa09b,0x3fc90fdb,3
+np.float32,0x3f72e6d9,0x3f4257a8,3
+np.float32,0xbefe787e,0xbeec29a4,3
+np.float32,0xbee989e8,0xbedb1af9,3
+np.float32,0xbe662db0,0xbe626a45,3
+np.float32,0x495bf7,0x495bf7,3
+np.float32,0x26c379,0x26c379,3
+np.float32,0x7f54d41a,0x3fc90fdb,3
+np.float32,0x801e7dd9,0x801e7dd9,3
+np.float32,0x80000000,0x80000000,3
+np.float32,0xfa3d3000,0xbfc90fdb,3
+np.float32,0xfa3cb800,0xbfc90fdb,3
+np.float32,0x264894,0x264894,3
+np.float32,0xff6de011,0xbfc90fdb,3
+np.float32,0x7e9045b2,0x3fc90fdb,3
+np.float32,0x3f2253a8,0x3f10aaf4,3
+np.float32,0xbd462bf0,0xbd460469,3
+np.float32,0x7f1796af,0x3fc90fdb,3
+np.float32,0x3e718858,0x3e6d3279,3
+np.float32,0xff437d7e,0xbfc90fdb,3
+np.float32,0x805ae7cb,0x805ae7cb,3
+np.float32,0x807e32e9,0x807e32e9,3
+np.float32,0x3ee0bafc,0x3ed3c453,3
+np.float32,0xbf721dee,0xbf41edc3,3
+np.float32,0xfec9f792,0xbfc90fdb,3
+np.float32,0x7f050720,0x3fc90fdb,3
+np.float32,0x182261,0x182261,3
+np.float32,0x3e39e678,0x3e37e5be,3
+np.float32,0x7e096e4b,0x3fc90fdb,3
+np.float32,0x103715,0x103715,3
+np.float32,0x3f7e7741,0x3f484ae4,3
+np.float32,0x3e29aea5,0x3e28277c,3
+np.float32,0x58c183,0x58c183,3
+np.float32,0xff72fdb2,0xbfc90fdb,3
+np.float32,0xbd9a9420,0xbd9a493c,3
+np.float32,0x7f1e07e7,0x3fc90fdb,3
+np.float32,0xff79f522,0xbfc90fdb,3
+np.float32,0x7c7d0e96,0x3fc90fdb,3
+np.float32,0xbeba9e8e,0xbeb2f504,3
+np.float32,0xfd880a80,0xbfc90fdb,3
+np.float32,0xff7f2a33,0xbfc90fdb,3
+np.float32,0x3e861ae0,0x3e83289c,3
+np.float32,0x7f0161c1,0x3fc90fdb,3
+np.float32,0xfe844ff8,0xbfc90fdb,3
+np.float32,0xbebf4b98,0xbeb7128e,3
+np.float32,0x652bee,0x652bee,3
+np.float32,0xff188a4b,0xbfc90fdb,3
+np.float32,0xbf800000,0xbf490fdb,3
+np.float32,0x80418711,0x80418711,3
+np.float32,0xbeb712d4,0xbeafd1f6,3
+np.float32,0xbf7cee28,0xbf478491,3
+np.float32,0xfe66c59c,0xbfc90fdb,3
+np.float32,0x4166a2,0x4166a2,3
+np.float32,0x3dfa1a2c,0x3df8deb5,3
+np.float32,0xbdbfbcb8,0xbdbf2e0f,3
+np.float32,0xfe60ef70,0xbfc90fdb,3
+np.float32,0xfe009444,0xbfc90fdb,3
+np.float32,0xfeb27aa0,0xbfc90fdb,3
+np.float32,0xbe99f7bc,0xbe95902b,3
+np.float32,0x8043d28d,0x8043d28d,3
+np.float32,0xfe5328c4,0xbfc90fdb,3
+np.float32,0x8017b27e,0x8017b27e,3
+np.float32,0x3ef1d2cf,0x3ee1ebd7,3
+np.float32,0x805ddd90,0x805ddd90,3
+np.float32,0xbf424263,0xbf262d17,3
+np.float32,0xfc99dde0,0xbfc90fdb,3
+np.float32,0xbf7ec13b,0xbf487015,3
+np.float32,0xbef727ea,0xbee64377,3
+np.float32,0xff15ce95,0xbfc90fdb,3
+np.float32,0x1fbba4,0x1fbba4,3
+np.float32,0x3f3b2368,0x3f2198a9,3
+np.float32,0xfefda26e,0xbfc90fdb,3
+np.float32,0x801519ad,0x801519ad,3
+np.float32,0x80473fa2,0x80473fa2,3
+np.float32,0x7e7a8bc1,0x3fc90fdb,3
+np.float32,0x3e8a9289,0x3e87548a,3
+np.float32,0x3ed68987,0x3ecb2872,3
+np.float32,0x805bca66,0x805bca66,3
+np.float32,0x8079c4e3,0x8079c4e3,3
+np.float32,0x3a2510,0x3a2510,3
+np.float32,0x7eedc598,0x3fc90fdb,3
+np.float32,0x80681956,0x80681956,3
+np.float32,0xff64c778,0xbfc90fdb,3
+np.float32,0x806bbc46,0x806bbc46,3
+np.float32,0x433643,0x433643,3
+np.float32,0x705b92,0x705b92,3
+np.float32,0xff359392,0xbfc90fdb,3
+np.float32,0xbee78672,0xbed96fa7,3
+np.float32,0x3e21717b,0x3e202010,3
+np.float32,0xfea13c34,0xbfc90fdb,3
+np.float32,0x2c8895,0x2c8895,3
+np.float32,0x3ed33290,0x3ec84f7c,3
+np.float32,0x3e63031e,0x3e5f662e,3
+np.float32,0x7e30907b,0x3fc90fdb,3
+np.float32,0xbe293708,0xbe27b310,3
+np.float32,0x3ed93738,0x3ecd6ea3,3
+np.float32,0x9db7e,0x9db7e,3
+np.float32,0x3f7cd1b8,0x3f47762c,3
+np.float32,0x3eb5143c,0x3eae0cb0,3
+np.float32,0xbe69b234,0xbe65c2d7,3
+np.float32,0x3f6e74de,0x3f3ffb97,3
+np.float32,0x5d0559,0x5d0559,3
+np.float32,0x3e1e8c30,0x3e1d4c70,3
+np.float32,0xbf2d1878,0xbf1833ef,3
+np.float32,0xff2adf82,0xbfc90fdb,3
+np.float32,0x8012e2c1,0x8012e2c1,3
+np.float32,0x7f031be3,0x3fc90fdb,3
+np.float32,0x805ff94e,0x805ff94e,3
+np.float32,0x3e9d5b27,0x3e98aa31,3
+np.float32,0x3f56d5cf,0x3f32bc9e,3
+np.float32,0x3eaa0412,0x3ea4267f,3
+np.float32,0xbe899ea4,0xbe86712f,3
+np.float32,0x800f2f48,0x800f2f48,3
+np.float32,0x3f1c2269,0x3f0c33ea,3
+np.float32,0x3f4a5f64,0x3f2b3f28,3
+np.float32,0x80739318,0x80739318,3
+np.float32,0x806e9b47,0x806e9b47,3
+np.float32,0x3c8cd300,0x3c8ccf73,3
+np.float32,0x7f39a39d,0x3fc90fdb,3
+np.float32,0x3ec95d61,0x3ebfd9dc,3
+np.float32,0xff351ff8,0xbfc90fdb,3
+np.float32,0xff3a8f58,0xbfc90fdb,3
+np.float32,0x7f313ec0,0x3fc90fdb,3
+np.float32,0x803aed13,0x803aed13,3
+np.float32,0x7f771d9b,0x3fc90fdb,3
+np.float32,0x8045a6d6,0x8045a6d6,3
+np.float32,0xbc85f280,0xbc85ef72,3
+np.float32,0x7e9c68f5,0x3fc90fdb,3
+np.float32,0xbf0f9379,0xbf02d975,3
+np.float32,0x7e97bcb1,0x3fc90fdb,3
+np.float32,0x804a07d5,0x804a07d5,3
+np.float32,0x802e6117,0x802e6117,3
+np.float32,0x7ed5e388,0x3fc90fdb,3
+np.float32,0x80750455,0x80750455,3
+np.float32,0xff4a8325,0xbfc90fdb,3
+np.float32,0xbedb6866,0xbecf497c,3
+np.float32,0x52ea3b,0x52ea3b,3
+np.float32,0xff773172,0xbfc90fdb,3
+np.float32,0xbeaa8ff0,0xbea4a46e,3
+np.float32,0x7eef2058,0x3fc90fdb,3
+np.float32,0x3f712472,0x3f4169d3,3
+np.float32,0xff6c8608,0xbfc90fdb,3
+np.float32,0xbf6eaa41,0xbf40182a,3
+np.float32,0x3eb03c24,0x3ea9bb34,3
+np.float32,0xfe118cd4,0xbfc90fdb,3
+np.float32,0x3e5b03b0,0x3e57c378,3
+np.float32,0x7f34d92d,0x3fc90fdb,3
+np.float32,0x806c3418,0x806c3418,3
+np.float32,0x7f3074e3,0x3fc90fdb,3
+np.float32,0x8002df02,0x8002df02,3
+np.float32,0x3f6df63a,0x3f3fb7b7,3
+np.float32,0xfd2b4100,0xbfc90fdb,3
+np.float32,0x80363d5c,0x80363d5c,3
+np.float32,0xbeac1f98,0xbea60bd6,3
+np.float32,0xff7fffff,0xbfc90fdb,3
+np.float32,0x80045097,0x80045097,3
+np.float32,0xfe011100,0xbfc90fdb,3
+np.float32,0x80739ef5,0x80739ef5,3
+np.float32,0xff3976ed,0xbfc90fdb,3
+np.float32,0xbe18e3a0,0xbe17c49e,3
+np.float32,0xbe289294,0xbe2712f6,3
+np.float32,0x3f1d41e7,0x3f0d050e,3
+np.float32,0x39364a,0x39364a,3
+np.float32,0x8072b77e,0x8072b77e,3
+np.float32,0x3f7cfec0,0x3f478cf6,3
+np.float32,0x2f68f6,0x2f68f6,3
+np.float32,0xbf031fb8,0xbef25c84,3
+np.float32,0xbf0b842c,0xbeff7afc,3
+np.float32,0x3f081e7e,0x3efa3676,3
+np.float32,0x7f7fffff,0x3fc90fdb,3
+np.float32,0xff15da0e,0xbfc90fdb,3
+np.float32,0x3d2001b2,0x3d1fece1,3
+np.float32,0x7f76efef,0x3fc90fdb,3
+np.float32,0x3f2405dd,0x3f11dfb7,3
+np.float32,0xa0319,0xa0319,3
+np.float32,0x3e23d2bd,0x3e227255,3
+np.float32,0xbd4d4c50,0xbd4d205e,3
+np.float32,0x382344,0x382344,3
+np.float32,0x21bbf,0x21bbf,3
+np.float32,0xbf209e82,0xbf0f7239,3
+np.float32,0xff03bf9f,0xbfc90fdb,3
+np.float32,0x7b1789,0x7b1789,3
+np.float32,0xff314944,0xbfc90fdb,3
+np.float32,0x1a63eb,0x1a63eb,3
+np.float32,0x803dc983,0x803dc983,3
+np.float32,0x3f0ff558,0x3f0323dc,3
+np.float32,0x3f544f2c,0x3f313f58,3
+np.float32,0xff032948,0xbfc90fdb,3
+np.float32,0x7f4933cc,0x3fc90fdb,3
+np.float32,0x7f14c5ed,0x3fc90fdb,3
+np.float32,0x803aeebf,0x803aeebf,3
+np.float32,0xbf0d4c0f,0xbf011bf5,3
+np.float32,0xbeaf8de2,0xbea91f57,3
+np.float32,0xff3ae030,0xbfc90fdb,3
+np.float32,0xbb362d00,0xbb362ce1,3
+np.float32,0x3d1f79e0,0x3d1f6544,3
+np.float32,0x3f56e9d9,0x3f32c860,3
+np.float32,0x3f723e5e,0x3f41fee2,3
+np.float32,0x4c0179,0x4c0179,3
+np.float32,0xfee36132,0xbfc90fdb,3
+np.float32,0x619ae6,0x619ae6,3
+np.float32,0xfde5d670,0xbfc90fdb,3
+np.float32,0xff079ac5,0xbfc90fdb,3
+np.float32,0x3e974fbd,0x3e931fae,3
+np.float32,0x8020ae6b,0x8020ae6b,3
+np.float32,0x6b5af1,0x6b5af1,3
+np.float32,0xbeb57cd6,0xbeae69a3,3
+np.float32,0x806e7eb2,0x806e7eb2,3
+np.float32,0x7e666edb,0x3fc90fdb,3
+np.float32,0xbf458c18,0xbf283ff0,3
+np.float32,0x3e50518e,0x3e4d8399,3
+np.float32,0x3e9ce224,0x3e983b98,3
+np.float32,0x3e6bc067,0x3e67b6c6,3
+np.float32,0x13783d,0x13783d,3
+np.float32,0xff3d518c,0xbfc90fdb,3
+np.float32,0xfeba5968,0xbfc90fdb,3
+np.float32,0xbf0b9f76,0xbeffa50f,3
+np.float32,0xfe174900,0xbfc90fdb,3
+np.float32,0x3f38bb0a,0x3f200527,3
+np.float32,0x7e94a77d,0x3fc90fdb,3
+np.float32,0x29d776,0x29d776,3
+np.float32,0xbf4e058d,0xbf2d7a15,3
+np.float32,0xbd94abc8,0xbd946923,3
+np.float32,0xbee62db0,0xbed85124,3
+np.float32,0x800000,0x800000,3
+np.float32,0xbef1df7e,0xbee1f636,3
+np.float32,0xbcf3cd20,0xbcf3bab5,3
+np.float32,0x80007b05,0x80007b05,3
+np.float32,0x3d9b3f2e,0x3d9af351,3
+np.float32,0xbf714a68,0xbf417dee,3
+np.float32,0xbf2a2d37,0xbf163069,3
+np.float32,0x8055104f,0x8055104f,3
+np.float32,0x7f5c40d7,0x3fc90fdb,3
+np.float32,0x1,0x1,3
+np.float32,0xff35f3a6,0xbfc90fdb,3
+np.float32,0xd9c7c,0xd9c7c,3
+np.float32,0xbf440cfc,0xbf274f22,3
+np.float32,0x8050ac43,0x8050ac43,3
+np.float32,0x63ee16,0x63ee16,3
+np.float32,0x7d90419b,0x3fc90fdb,3
+np.float32,0xfee22198,0xbfc90fdb,3
+np.float32,0xc2ead,0xc2ead,3
+np.float32,0x7f5cd6a6,0x3fc90fdb,3
+np.float32,0x3f6fab7e,0x3f40a184,3
+np.float32,0x3ecf998c,0x3ec53a73,3
+np.float32,0x7e5271f0,0x3fc90fdb,3
+np.float32,0x67c016,0x67c016,3
+np.float32,0x2189c8,0x2189c8,3
+np.float32,0x27d892,0x27d892,3
+np.float32,0x3f0d02c4,0x3f00e3c0,3
+np.float32,0xbf69ebca,0xbf3d8862,3
+np.float32,0x3e60c0d6,0x3e5d3ebb,3
+np.float32,0x3f45206c,0x3f27fc66,3
+np.float32,0xbf6b47dc,0xbf3e4592,3
+np.float32,0xfe9be2e2,0xbfc90fdb,3
+np.float32,0x7fa00000,0x7fe00000,3
+np.float32,0xff271562,0xbfc90fdb,3
+np.float32,0x3e2e5270,0x3e2caaaf,3
+np.float32,0x80222934,0x80222934,3
+np.float32,0xbd01d220,0xbd01c701,3
+np.float32,0x223aa0,0x223aa0,3
+np.float32,0x3f4b5a7e,0x3f2bd967,3
+np.float32,0x3f217d85,0x3f101200,3
+np.float32,0xbf57663a,0xbf331144,3
+np.float32,0x3f219862,0x3f102536,3
+np.float32,0x28a28c,0x28a28c,3
+np.float32,0xbf3f55f4,0xbf244f86,3
+np.float32,0xbf3de287,0xbf236092,3
+np.float32,0xbf1c1ce2,0xbf0c2fe3,3
+np.float32,0x80000001,0x80000001,3
+np.float32,0x3db695d0,0x3db61a90,3
+np.float32,0x6c39bf,0x6c39bf,3
+np.float32,0x7e33a12f,0x3fc90fdb,3
+np.float32,0x67623a,0x67623a,3
+np.float32,0x3e45dc54,0x3e4373b6,3
+np.float32,0x7f62fa68,0x3fc90fdb,3
+np.float32,0x3f0e1d01,0x3f01bbe5,3
+np.float32,0x3f13dc69,0x3f0615f5,3
+np.float32,0x246703,0x246703,3
+np.float32,0xbf1055b5,0xbf036d07,3
+np.float32,0x7f46d3d0,0x3fc90fdb,3
+np.float32,0x3d2b8086,0x3d2b66e5,3
+np.float32,0xbf03be44,0xbef35776,3
+np.float32,0x3f800000,0x3f490fdb,3
+np.float32,0xbec8d226,0xbebf613d,3
+np.float32,0x3d8faf00,0x3d8f72d4,3
+np.float32,0x170c4e,0x170c4e,3
+np.float32,0xff14c0f0,0xbfc90fdb,3
+np.float32,0xff16245d,0xbfc90fdb,3
+np.float32,0x7f44ce6d,0x3fc90fdb,3
+np.float32,0xbe8175d8,0xbe7d9aeb,3
+np.float32,0x3df7a4a1,0x3df67254,3
+np.float32,0xfe2cc46c,0xbfc90fdb,3
+np.float32,0x3f284e63,0x3f14e335,3
+np.float32,0x7e46e5d6,0x3fc90fdb,3
+np.float32,0x397be4,0x397be4,3
+np.float32,0xbf2560bc,0xbf12d50b,3
+np.float32,0x3ed9b8c1,0x3ecddc60,3
+np.float32,0xfec18c5a,0xbfc90fdb,3
+np.float32,0x64894d,0x64894d,3
+np.float32,0x36a65d,0x36a65d,3
+np.float32,0x804ffcd7,0x804ffcd7,3
+np.float32,0x800f79e4,0x800f79e4,3
+np.float32,0x5d45ac,0x5d45ac,3
+np.float32,0x6cdda0,0x6cdda0,3
+np.float32,0xbf7f2077,0xbf489fe5,3
+np.float32,0xbf152f78,0xbf0713a1,3
+np.float32,0x807bf344,0x807bf344,3
+np.float32,0x3f775023,0x3f44a4d8,3
+np.float32,0xbf3edf67,0xbf240365,3
+np.float32,0x7eed729c,0x3fc90fdb,3
+np.float32,0x14cc29,0x14cc29,3
+np.float32,0x7edd7b6b,0x3fc90fdb,3
+np.float32,0xbf3c6e2c,0xbf226fb7,3
+np.float32,0x51b9ad,0x51b9ad,3
+np.float32,0x3f617ee8,0x3f38dd7c,3
+np.float32,0xff800000,0xbfc90fdb,3
+np.float32,0x7f440ea0,0x3fc90fdb,3
+np.float32,0x3e639893,0x3e5ff49e,3
+np.float32,0xbd791bb0,0xbd78cd3c,3
+np.float32,0x8059fcbc,0x8059fcbc,3
+np.float32,0xbf7d1214,0xbf4796bd,3
+np.float32,0x3ef368fa,0x3ee33788,3
+np.float32,0xbecec0f4,0xbec48055,3
+np.float32,0xbc83d940,0xbc83d656,3
+np.float32,0xbce01220,0xbce003d4,3
+np.float32,0x803192a5,0x803192a5,3
+np.float32,0xbe40e0c0,0xbe3ea4f0,3
+np.float32,0xfb692600,0xbfc90fdb,3
+np.float32,0x3f1bec65,0x3f0c0c88,3
+np.float32,0x7f042798,0x3fc90fdb,3
+np.float32,0xbe047374,0xbe03b83b,3
+np.float32,0x7f7c6630,0x3fc90fdb,3
+np.float32,0x7f58dae3,0x3fc90fdb,3
+np.float32,0x80691c92,0x80691c92,3
+np.float32,0x7dbe76,0x7dbe76,3
+np.float32,0xbf231384,0xbf11339d,3
+np.float32,0xbef4acf8,0xbee43f8b,3
+np.float32,0x3ee9f9d0,0x3edb7793,3
+np.float32,0x3f0064f6,0x3eee04a8,3
+np.float32,0x313732,0x313732,3
+np.float32,0xfd58cf80,0xbfc90fdb,3
+np.float32,0x3f7a2bc9,0x3f461d30,3
+np.float32,0x7f7681af,0x3fc90fdb,3
+np.float32,0x7f504211,0x3fc90fdb,3
+np.float32,0xfeae0c00,0xbfc90fdb,3
+np.float32,0xbee14396,0xbed436d1,3
+np.float32,0x7fc00000,0x7fc00000,3
+np.float32,0x693406,0x693406,3
+np.float32,0x3eb4a679,0x3eadab1b,3
+np.float32,0x550505,0x550505,3
+np.float32,0xfd493d10,0xbfc90fdb,3
+np.float32,0x3f4fc907,0x3f2e8b2c,3
+np.float32,0x80799aa4,0x80799aa4,3
+np.float32,0xff1ea89b,0xbfc90fdb,3
+np.float32,0xff424510,0xbfc90fdb,3
+np.float32,0x7f68d026,0x3fc90fdb,3
+np.float32,0xbea230ca,0xbe9d1200,3
+np.float32,0x7ea585da,0x3fc90fdb,3
+np.float32,0x3f3db211,0x3f23414c,3
+np.float32,0xfea4d964,0xbfc90fdb,3
+np.float32,0xbf17fe18,0xbf092984,3
+np.float32,0x7cc8a2,0x7cc8a2,3
+np.float32,0xff0330ba,0xbfc90fdb,3
+np.float32,0x3f769835,0x3f444592,3
+np.float32,0xeb0ac,0xeb0ac,3
+np.float32,0x7f7e45de,0x3fc90fdb,3
+np.float32,0xbdb510a8,0xbdb49873,3
+np.float32,0x3ebf900b,0x3eb74e9c,3
+np.float32,0xbf21bbce,0xbf103e89,3
+np.float32,0xbf3f4682,0xbf24459d,3
+np.float32,0x7eb6e9c8,0x3fc90fdb,3
+np.float32,0xbf42532d,0xbf2637be,3
+np.float32,0xbd3b2600,0xbd3b04b4,3
+np.float32,0x3f1fa9aa,0x3f0ec23e,3
+np.float32,0x7ed6a0f1,0x3fc90fdb,3
+np.float32,0xff4759a1,0xbfc90fdb,3
+np.float32,0x6d26e3,0x6d26e3,3
+np.float32,0xfe1108e0,0xbfc90fdb,3
+np.float32,0xfdf76900,0xbfc90fdb,3
+np.float32,0xfec66f22,0xbfc90fdb,3
+np.float32,0xbf3d097f,0xbf22d458,3
+np.float32,0x3d85be25,0x3d858d99,3
+np.float32,0x7f36739f,0x3fc90fdb,3
+np.float32,0x7bc0a304,0x3fc90fdb,3
+np.float32,0xff48dd90,0xbfc90fdb,3
+np.float32,0x48cab0,0x48cab0,3
+np.float32,0x3ed3943c,0x3ec8a2ef,3
+np.float32,0xbf61488e,0xbf38bede,3
+np.float32,0x3f543df5,0x3f313525,3
+np.float32,0x5cf2ca,0x5cf2ca,3
+np.float32,0x572686,0x572686,3
+np.float32,0x80369c7c,0x80369c7c,3
+np.float32,0xbd2c1d20,0xbd2c0338,3
+np.float32,0x3e255428,0x3e23ea0b,3
+np.float32,0xbeba9ee0,0xbeb2f54c,3
+np.float32,0x8015c165,0x8015c165,3
+np.float32,0x3d31f488,0x3d31d7e6,3
+np.float32,0x3f68591c,0x3f3cac43,3
+np.float32,0xf5ed5,0xf5ed5,3
+np.float32,0xbf3b1d34,0xbf21949e,3
+np.float32,0x1f0343,0x1f0343,3
+np.float32,0x3f0e52b5,0x3f01e4ef,3
+np.float32,0x7f57c596,0x3fc90fdb,3
+np.float64,0x7fd8e333ddb1c667,0x3ff921fb54442d18,3
+np.float64,0x800bcc9cdad7993a,0x800bcc9cdad7993a,3
+np.float64,0x3fcd6f81df3adf00,0x3fcceebbafc5d55e,3
+np.float64,0x3fed7338a57ae671,0x3fe7ce3e5811fc0a,3
+np.float64,0x7fe64994fcac9329,0x3ff921fb54442d18,3
+np.float64,0xfa5a6345f4b4d,0xfa5a6345f4b4d,3
+np.float64,0xe9dcd865d3b9b,0xe9dcd865d3b9b,3
+np.float64,0x7fea6cffabf4d9fe,0x3ff921fb54442d18,3
+np.float64,0xa9e1de6153c3c,0xa9e1de6153c3c,3
+np.float64,0xab6bdc5356d7c,0xab6bdc5356d7c,3
+np.float64,0x80062864a02c50ca,0x80062864a02c50ca,3
+np.float64,0xbfdac03aa7b58076,0xbfd9569f3230128d,3
+np.float64,0xbfe61b77752c36ef,0xbfe3588f51b8be8f,3
+np.float64,0x800bc854c8d790aa,0x800bc854c8d790aa,3
+np.float64,0x3feed1a2da3da346,0x3fe887f9b8ea031f,3
+np.float64,0x3fe910d3697221a7,0x3fe54365a53d840e,3
+np.float64,0x7fe7ab4944ef5692,0x3ff921fb54442d18,3
+np.float64,0x3fa462f1a028c5e3,0x3fa460303a6a4e69,3
+np.float64,0x800794f1a3af29e4,0x800794f1a3af29e4,3
+np.float64,0x3fee6fe7fafcdfd0,0x3fe854f863816d55,3
+np.float64,0x8000000000000000,0x8000000000000000,3
+np.float64,0x7f336472fe66d,0x7f336472fe66d,3
+np.float64,0xffb1623ac822c478,0xbff921fb54442d18,3
+np.float64,0x3fbacd68ce359ad2,0x3fbab480b3638846,3
+np.float64,0xffd5c02706ab804e,0xbff921fb54442d18,3
+np.float64,0xbfd4daf03d29b5e0,0xbfd42928f069c062,3
+np.float64,0x800c6e85dbd8dd0c,0x800c6e85dbd8dd0c,3
+np.float64,0x800e3599c5bc6b34,0x800e3599c5bc6b34,3
+np.float64,0x2c0d654c581ad,0x2c0d654c581ad,3
+np.float64,0xbfdd3eb13fba7d62,0xbfdb6e8143302de7,3
+np.float64,0x800b60cb8776c197,0x800b60cb8776c197,3
+np.float64,0x80089819ad113034,0x80089819ad113034,3
+np.float64,0x29fe721453fcf,0x29fe721453fcf,3
+np.float64,0x3fe8722f4df0e45f,0x3fe4e026d9eadb4d,3
+np.float64,0xffd1fbcd01a3f79a,0xbff921fb54442d18,3
+np.float64,0x7fc74e1e982e9c3c,0x3ff921fb54442d18,3
+np.float64,0x800c09d3d15813a8,0x800c09d3d15813a8,3
+np.float64,0xbfeee4578b3dc8af,0xbfe891ab3d6c3ce4,3
+np.float64,0xffdd01a6f33a034e,0xbff921fb54442d18,3
+np.float64,0x7fcc130480382608,0x3ff921fb54442d18,3
+np.float64,0xffcbb6bd1d376d7c,0xbff921fb54442d18,3
+np.float64,0xc068a53780d15,0xc068a53780d15,3
+np.float64,0xbfc974f15532e9e4,0xbfc92100b355f3e7,3
+np.float64,0x3fe6da79442db4f3,0x3fe3d87393b082e7,3
+np.float64,0xd9d9be4db3b38,0xd9d9be4db3b38,3
+np.float64,0x5ea50a20bd4a2,0x5ea50a20bd4a2,3
+np.float64,0xbfe5597f7d2ab2ff,0xbfe2d3ccc544b52b,3
+np.float64,0x80019364e4e326cb,0x80019364e4e326cb,3
+np.float64,0x3fed2902c3fa5206,0x3fe7a5e1df07e5c1,3
+np.float64,0xbfa7b72b5c2f6e50,0xbfa7b2d545b3cc1f,3
+np.float64,0xffdb60dd43b6c1ba,0xbff921fb54442d18,3
+np.float64,0x81a65d8b034cc,0x81a65d8b034cc,3
+np.float64,0x8000c30385818608,0x8000c30385818608,3
+np.float64,0x6022f5f4c045f,0x6022f5f4c045f,3
+np.float64,0x8007a2bb810f4578,0x8007a2bb810f4578,3
+np.float64,0x7fdc68893238d111,0x3ff921fb54442d18,3
+np.float64,0x7fd443454ea8868a,0x3ff921fb54442d18,3
+np.float64,0xffe6b04209ed6084,0xbff921fb54442d18,3
+np.float64,0x7fcd9733d13b2e67,0x3ff921fb54442d18,3
+np.float64,0xf5ee80a9ebdd0,0xf5ee80a9ebdd0,3
+np.float64,0x3fe3788e8de6f11e,0x3fe17dec7e6843a0,3
+np.float64,0x3fee36f62f7c6dec,0x3fe836f832515b43,3
+np.float64,0xf6cb49aded969,0xf6cb49aded969,3
+np.float64,0x3fd2b15ea4a562bc,0x3fd22fdc09920e67,3
+np.float64,0x7fccf6aef139ed5d,0x3ff921fb54442d18,3
+np.float64,0x3fd396b8ce272d72,0x3fd3026118857bd4,3
+np.float64,0x7fe53d3c80ea7a78,0x3ff921fb54442d18,3
+np.float64,0x3feae88fc4f5d120,0x3fe65fb04b18ef7a,3
+np.float64,0x3fedc643747b8c86,0x3fe7fafa6c20e25a,3
+np.float64,0xffdb2dc0df365b82,0xbff921fb54442d18,3
+np.float64,0xbfa2af3658255e70,0xbfa2ad17348f4253,3
+np.float64,0x3f8aa77b30354ef6,0x3f8aa71892336a69,3
+np.float64,0xbfdd1b1efbba363e,0xbfdb510dcd186820,3
+np.float64,0x800f50d99c5ea1b3,0x800f50d99c5ea1b3,3
+np.float64,0xff6ed602403dac00,0xbff921fb54442d18,3
+np.float64,0x800477d71aa8efaf,0x800477d71aa8efaf,3
+np.float64,0xbfe729a9e86e5354,0xbfe40ca78d9eefcf,3
+np.float64,0x3fd81ab2d4303566,0x3fd70d7e3937ea22,3
+np.float64,0xb617cbab6c2fa,0xb617cbab6c2fa,3
+np.float64,0x7fefffffffffffff,0x3ff921fb54442d18,3
+np.float64,0xffa40933ac281260,0xbff921fb54442d18,3
+np.float64,0xbfe1ede621e3dbcc,0xbfe057bb2b341ced,3
+np.float64,0xbfec700f03b8e01e,0xbfe73fb190bc722e,3
+np.float64,0x6e28af02dc517,0x6e28af02dc517,3
+np.float64,0x3fe37ad37ae6f5a7,0x3fe17f94674818a9,3
+np.float64,0x8000cbdeeae197bf,0x8000cbdeeae197bf,3
+np.float64,0x3fe8fd1f01f1fa3e,0x3fe5372bbec5d72c,3
+np.float64,0x3f8f9229103f2452,0x3f8f918531894256,3
+np.float64,0x800536858e0a6d0c,0x800536858e0a6d0c,3
+np.float64,0x7fe82bb4f9f05769,0x3ff921fb54442d18,3
+np.float64,0xffc1c2fb592385f8,0xbff921fb54442d18,3
+np.float64,0x7f924ddfc0249bbf,0x3ff921fb54442d18,3
+np.float64,0xffd5e125c52bc24c,0xbff921fb54442d18,3
+np.float64,0xbfef0d8738be1b0e,0xbfe8a6ef17b16c10,3
+np.float64,0x3fc9c8875233910f,0x3fc9715e708503cb,3
+np.float64,0xbfe2d926f4e5b24e,0xbfe108956e61cbb3,3
+np.float64,0x7fd61c496dac3892,0x3ff921fb54442d18,3
+np.float64,0x7fed545c6b7aa8b8,0x3ff921fb54442d18,3
+np.float64,0x8003746fea86e8e1,0x8003746fea86e8e1,3
+np.float64,0x3fdf515e75bea2bd,0x3fdd201a5585caa3,3
+np.float64,0xffda87c8ee350f92,0xbff921fb54442d18,3
+np.float64,0xffc675d8e22cebb0,0xbff921fb54442d18,3
+np.float64,0xffcdc173433b82e8,0xbff921fb54442d18,3
+np.float64,0xffed9df1517b3be2,0xbff921fb54442d18,3
+np.float64,0x3fd6a2eec72d45de,0x3fd5c1f1d7dcddcf,3
+np.float64,0xffec116a66f822d4,0xbff921fb54442d18,3
+np.float64,0x8007c2a2458f8545,0x8007c2a2458f8545,3
+np.float64,0x3fe4ee80d969dd02,0x3fe2895076094668,3
+np.float64,0x3fe3cae7116795ce,0x3fe1b9c07e0d03a7,3
+np.float64,0xbfd81bf8d8b037f2,0xbfd70e9bbbb4ca57,3
+np.float64,0x800c88ccd1f9119a,0x800c88ccd1f9119a,3
+np.float64,0xffdab2aee2b5655e,0xbff921fb54442d18,3
+np.float64,0x3fe743d227ee87a4,0x3fe41dcaef186d96,3
+np.float64,0x3fb060fd0220c1fa,0x3fb05b47f56ebbb4,3
+np.float64,0xbfd3f03772a7e06e,0xbfd3541522377291,3
+np.float64,0x190a5ae03216,0x190a5ae03216,3
+np.float64,0x3fe48c71916918e4,0x3fe24442f45b3183,3
+np.float64,0x800862470590c48e,0x800862470590c48e,3
+np.float64,0x7fd3ced89d279db0,0x3ff921fb54442d18,3
+np.float64,0x3feb3d9b4ab67b37,0x3fe69140cf2623f7,3
+np.float64,0xbc3f296b787e5,0xbc3f296b787e5,3
+np.float64,0xbfed6b905dfad721,0xbfe7ca1881a8c0fd,3
+np.float64,0xbfe621c2aaac4386,0xbfe35cd1969a82db,3
+np.float64,0x8009e7b17593cf63,0x8009e7b17593cf63,3
+np.float64,0x80045f580ca8beb1,0x80045f580ca8beb1,3
+np.float64,0xbfea2e177e745c2f,0xbfe5f13971633339,3
+np.float64,0x3fee655787fccab0,0x3fe84f6b98b6de26,3
+np.float64,0x3fc9cde92f339bd0,0x3fc9768a88b2c97c,3
+np.float64,0x3fc819c3b3303388,0x3fc7d25e1526e731,3
+np.float64,0x3fd3e848d2a7d090,0x3fd34cd9e6af558f,3
+np.float64,0x3fe19dacac633b5a,0x3fe01a6b4d27adc2,3
+np.float64,0x800b190da316321c,0x800b190da316321c,3
+np.float64,0xd5c69711ab8d3,0xd5c69711ab8d3,3
+np.float64,0xbfdc31bed7b8637e,0xbfda8ea3c1309d6d,3
+np.float64,0xbfd02ba007a05740,0xbfcfad86f0d756dc,3
+np.float64,0x3fe874473d70e88e,0x3fe4e1793cd82123,3
+np.float64,0xffb465585c28cab0,0xbff921fb54442d18,3
+np.float64,0xbfb5d8e13e2bb1c0,0xbfb5cb5c7807fc4d,3
+np.float64,0xffe80f933bf01f26,0xbff921fb54442d18,3
+np.float64,0x7feea783f5fd4f07,0x3ff921fb54442d18,3
+np.float64,0xbfae6665f43cccd0,0xbfae5d45b0a6f90a,3
+np.float64,0x800bd6ef5a77addf,0x800bd6ef5a77addf,3
+np.float64,0x800d145babda28b8,0x800d145babda28b8,3
+np.float64,0x39de155473bc3,0x39de155473bc3,3
+np.float64,0x3fefbd6bb1ff7ad8,0x3fe9008e73a3296e,3
+np.float64,0x3fc40bca3d281798,0x3fc3e2710e167007,3
+np.float64,0x3fcae0918335c120,0x3fca7e09e704a678,3
+np.float64,0x51287fbea2511,0x51287fbea2511,3
+np.float64,0x7fa6bc33a82d7866,0x3ff921fb54442d18,3
+np.float64,0xe72a2bebce546,0xe72a2bebce546,3
+np.float64,0x3fe1c8fd686391fa,0x3fe03b9622aeb4e3,3
+np.float64,0x3fe2a73ac3654e76,0x3fe0e36bc1ee4ac4,3
+np.float64,0x59895218b312b,0x59895218b312b,3
+np.float64,0xc6dc25c78db85,0xc6dc25c78db85,3
+np.float64,0xbfc06cfac520d9f4,0xbfc0561f85d2c907,3
+np.float64,0xbfea912dc4f5225c,0xbfe62c3b1c01c793,3
+np.float64,0x3fb78ce89a2f19d0,0x3fb77bfcb65a67d3,3
+np.float64,0xbfece5cdea39cb9c,0xbfe78103d24099e5,3
+np.float64,0x30d3054e61a61,0x30d3054e61a61,3
+np.float64,0xbfd3fe26fba7fc4e,0xbfd360c8447c4f7a,3
+np.float64,0x800956072a92ac0f,0x800956072a92ac0f,3
+np.float64,0x7fe639b3b6ec7366,0x3ff921fb54442d18,3
+np.float64,0x800ee30240bdc605,0x800ee30240bdc605,3
+np.float64,0x7fef6af0d2bed5e1,0x3ff921fb54442d18,3
+np.float64,0xffefce8725ff9d0d,0xbff921fb54442d18,3
+np.float64,0x3fe2e311da65c624,0x3fe10ff1623089dc,3
+np.float64,0xbfe7e5cbe56fcb98,0xbfe486c3daeda67c,3
+np.float64,0x80095bc14472b783,0x80095bc14472b783,3
+np.float64,0xffef0cb4553e1968,0xbff921fb54442d18,3
+np.float64,0xe3e60567c7cc1,0xe3e60567c7cc1,3
+np.float64,0xffde919f06bd233e,0xbff921fb54442d18,3
+np.float64,0x3fe3f9632e27f2c6,0x3fe1db49ebd21c4e,3
+np.float64,0x9dee9a233bdd4,0x9dee9a233bdd4,3
+np.float64,0xbfe3bb0602e7760c,0xbfe1ae41b6d4c488,3
+np.float64,0x3fc46945a128d288,0x3fc43da54c6c6a2a,3
+np.float64,0x7fdef149ac3de292,0x3ff921fb54442d18,3
+np.float64,0x800a96c76d752d8f,0x800a96c76d752d8f,3
+np.float64,0x3f971a32382e3464,0x3f9719316b9e9baf,3
+np.float64,0x7fe97bcf15b2f79d,0x3ff921fb54442d18,3
+np.float64,0x7fea894558f5128a,0x3ff921fb54442d18,3
+np.float64,0x3fc9e3be1933c780,0x3fc98b847c3923eb,3
+np.float64,0x3f7accac40359959,0x3f7acc9330741b64,3
+np.float64,0xa80c136950183,0xa80c136950183,3
+np.float64,0x3fe408732b2810e6,0x3fe1e61e7cbc8824,3
+np.float64,0xffa775bc042eeb80,0xbff921fb54442d18,3
+np.float64,0x3fbf04bd223e0980,0x3fbede37b8fc697e,3
+np.float64,0x7fd999b34c333366,0x3ff921fb54442d18,3
+np.float64,0xe72146dfce429,0xe72146dfce429,3
+np.float64,0x4f511ee49ea24,0x4f511ee49ea24,3
+np.float64,0xffb3e6e58827cdc8,0xbff921fb54442d18,3
+np.float64,0x3fd1f180cfa3e300,0x3fd17e85b2871de2,3
+np.float64,0x97c8e45b2f91d,0x97c8e45b2f91d,3
+np.float64,0xbfeeb20e88fd641d,0xbfe8778f878440bf,3
+np.float64,0xbfe1fc6dee23f8dc,0xbfe062c815a93cde,3
+np.float64,0xab4bf71f5697f,0xab4bf71f5697f,3
+np.float64,0xa9675a2952cec,0xa9675a2952cec,3
+np.float64,0xbfef3ea4a33e7d49,0xbfe8c02743ebc1b6,3
+np.float64,0x3fe22a2eafa4545d,0x3fe08577afca52a9,3
+np.float64,0x3fe8a08daaf1411c,0x3fe4fd5a34f05305,3
+np.float64,0xbfc6cda77b2d9b50,0xbfc6910bcfa0cf4f,3
+np.float64,0x3fec398394387307,0x3fe7211dd5276500,3
+np.float64,0x3fe36c95c626d92c,0x3fe1752e5aa2357b,3
+np.float64,0xffd8b9e7073173ce,0xbff921fb54442d18,3
+np.float64,0xffe19f043ae33e08,0xbff921fb54442d18,3
+np.float64,0x800e3640709c6c81,0x800e3640709c6c81,3
+np.float64,0x3fe7d6c20aafad84,0x3fe47d1a3307d9c8,3
+np.float64,0x80093fd63b727fad,0x80093fd63b727fad,3
+np.float64,0xffe1a671a4634ce3,0xbff921fb54442d18,3
+np.float64,0xbfe53a6b386a74d6,0xbfe2be41859cb10d,3
+np.float64,0xbfed149a097a2934,0xbfe79ab7e3e93c1c,3
+np.float64,0x7fc2769a5724ed34,0x3ff921fb54442d18,3
+np.float64,0xffd01e4e99a03c9e,0xbff921fb54442d18,3
+np.float64,0xa61f38434c3e7,0xa61f38434c3e7,3
+np.float64,0x800ad4ac5195a959,0x800ad4ac5195a959,3
+np.float64,0x7ff8000000000000,0x7ff8000000000000,3
+np.float64,0x80034a45b6c6948c,0x80034a45b6c6948c,3
+np.float64,0x6350b218c6a17,0x6350b218c6a17,3
+np.float64,0xfff0000000000000,0xbff921fb54442d18,3
+np.float64,0x3fe363e759e6c7cf,0x3fe16ed58d80f9ce,3
+np.float64,0xffe3b98e59e7731c,0xbff921fb54442d18,3
+np.float64,0x3fdbf7b40337ef68,0x3fda5df7ad3c80f9,3
+np.float64,0xbfe9cdf784739bef,0xbfe5b74f346ef93d,3
+np.float64,0xbfc321bea326437c,0xbfc2fdc0d4ff7561,3
+np.float64,0xbfe40f77d2a81ef0,0xbfe1eb28c4ae4dde,3
+np.float64,0x7fe071806960e300,0x3ff921fb54442d18,3
+np.float64,0x7fd269006ea4d200,0x3ff921fb54442d18,3
+np.float64,0x80017a56e0e2f4af,0x80017a56e0e2f4af,3
+np.float64,0x8004b4ea09a969d5,0x8004b4ea09a969d5,3
+np.float64,0xbfedbb01e63b7604,0xbfe7f4f0e84297df,3
+np.float64,0x3fe44454826888a9,0x3fe210ff6d005706,3
+np.float64,0xbfe0e77e6ea1cefd,0xbfdf1a977da33402,3
+np.float64,0xbfed6d4c8c3ada99,0xbfe7cb0932093f60,3
+np.float64,0x1d74cb9e3ae9a,0x1d74cb9e3ae9a,3
+np.float64,0x80082a785d1054f1,0x80082a785d1054f1,3
+np.float64,0x3fe58393266b0726,0x3fe2f0d8e91d4887,3
+np.float64,0xffe4028899680510,0xbff921fb54442d18,3
+np.float64,0x783a2e5af0746,0x783a2e5af0746,3
+np.float64,0x7fcdce88e73b9d11,0x3ff921fb54442d18,3
+np.float64,0x3fc58672a72b0ce5,0x3fc5535e090e56e2,3
+np.float64,0x800889c839b11391,0x800889c839b11391,3
+np.float64,0xffe5e05c466bc0b8,0xbff921fb54442d18,3
+np.float64,0xbfcbef6ebe37dedc,0xbfcb810752468f49,3
+np.float64,0xffe9408563b2810a,0xbff921fb54442d18,3
+np.float64,0xbfee4738367c8e70,0xbfe83f8e5dd7602f,3
+np.float64,0xbfe4aeb587295d6b,0xbfe25c7a0c76a454,3
+np.float64,0xffc9aea0a7335d40,0xbff921fb54442d18,3
+np.float64,0xe1e02199c3c04,0xe1e02199c3c04,3
+np.float64,0xbfbd9400783b2800,0xbfbd729345d1d14f,3
+np.float64,0x7a5418bcf4a84,0x7a5418bcf4a84,3
+np.float64,0x3fdc1c2fa5b83860,0x3fda7c935965ae72,3
+np.float64,0x80076a9f58ced53f,0x80076a9f58ced53f,3
+np.float64,0x3fedc4bf957b897f,0x3fe7fa2a83148f1c,3
+np.float64,0x800981b8a9d30372,0x800981b8a9d30372,3
+np.float64,0xffe1082311621046,0xbff921fb54442d18,3
+np.float64,0xe0091f89c0124,0xe0091f89c0124,3
+np.float64,0xbfce8d674f3d1ad0,0xbfcdfdbf2ddaa0ca,3
+np.float64,0x800516e72eaa2dcf,0x800516e72eaa2dcf,3
+np.float64,0xffe61ee64c6c3dcc,0xbff921fb54442d18,3
+np.float64,0x7fed2683cafa4d07,0x3ff921fb54442d18,3
+np.float64,0xffd4faf27729f5e4,0xbff921fb54442d18,3
+np.float64,0x7fe308fa842611f4,0x3ff921fb54442d18,3
+np.float64,0x3fc612a62b2c2550,0x3fc5db9ddbd4e159,3
+np.float64,0xbfe5b01e766b603d,0xbfe30f72a875e988,3
+np.float64,0x3fc2dd8b9a25bb17,0x3fc2bb06246b9f78,3
+np.float64,0x8170908102e12,0x8170908102e12,3
+np.float64,0x800c1c8a8a583915,0x800c1c8a8a583915,3
+np.float64,0xffe5d91e8b6bb23c,0xbff921fb54442d18,3
+np.float64,0xffd140adee22815c,0xbff921fb54442d18,3
+np.float64,0xbfe2f1f5f8e5e3ec,0xbfe11afa5d749952,3
+np.float64,0xbfed6d1d587ada3b,0xbfe7caef9ecf7651,3
+np.float64,0x3fe9b85e67f370bd,0x3fe5aa3474768982,3
+np.float64,0x7fdc8932edb91265,0x3ff921fb54442d18,3
+np.float64,0x7fd136bc54a26d78,0x3ff921fb54442d18,3
+np.float64,0x800a1ea12a343d43,0x800a1ea12a343d43,3
+np.float64,0x3fec6a5c1b78d4b8,0x3fe73c82235c3f8f,3
+np.float64,0x800fbf6a00df7ed4,0x800fbf6a00df7ed4,3
+np.float64,0xbfd0e6e0cda1cdc2,0xbfd0864bf8cad294,3
+np.float64,0x3fc716df482e2dbf,0x3fc6d7fbfd4a8470,3
+np.float64,0xbfe75990936eb321,0xbfe42bffec3fa0d7,3
+np.float64,0x3fd58e54a02b1ca9,0x3fd4cace1107a5cc,3
+np.float64,0xbfc9c04136338084,0xbfc9696ad2591d54,3
+np.float64,0xdd1f0147ba3e0,0xdd1f0147ba3e0,3
+np.float64,0x5c86a940b90e,0x5c86a940b90e,3
+np.float64,0xbfecae3b8e795c77,0xbfe7624d4988c612,3
+np.float64,0xffd0370595206e0c,0xbff921fb54442d18,3
+np.float64,0xbfdc26d443384da8,0xbfda857ecd33ba9f,3
+np.float64,0xbfd1c849d9a39094,0xbfd15849449cc378,3
+np.float64,0xffee04acdb3c0959,0xbff921fb54442d18,3
+np.float64,0xbfded1056dbda20a,0xbfdcb83b30e1528c,3
+np.float64,0x7fb7b826622f704c,0x3ff921fb54442d18,3
+np.float64,0xbfee4df8ae7c9bf1,0xbfe8431df9dfd05d,3
+np.float64,0x7fe7f3670e2fe6cd,0x3ff921fb54442d18,3
+np.float64,0x8008ac9ae0d15936,0x8008ac9ae0d15936,3
+np.float64,0x800dce9f3b3b9d3f,0x800dce9f3b3b9d3f,3
+np.float64,0x7fbb19db203633b5,0x3ff921fb54442d18,3
+np.float64,0x3fe56c7f302ad8fe,0x3fe2e0eec3ad45fd,3
+np.float64,0x7fe82c05c570580b,0x3ff921fb54442d18,3
+np.float64,0xc0552b7780aa6,0xc0552b7780aa6,3
+np.float64,0x39d40e3073a83,0x39d40e3073a83,3
+np.float64,0x3fd8db54d731b6aa,0x3fd7b589b3ee9b20,3
+np.float64,0xffcdd355233ba6ac,0xbff921fb54442d18,3
+np.float64,0x3fbe97b3a43d2f67,0x3fbe72bca9be0348,3
+np.float64,0xbff0000000000000,0xbfe921fb54442d18,3
+np.float64,0xbfb4f55e6229eac0,0xbfb4e96df18a75a7,3
+np.float64,0xbfc66399ba2cc734,0xbfc62a3298bd96fc,3
+np.float64,0x3fd00988bb201311,0x3fcf6d67a9374c38,3
+np.float64,0x7fe471867d28e30c,0x3ff921fb54442d18,3
+np.float64,0xbfe38e0e64271c1d,0xbfe18d9888b7523b,3
+np.float64,0x8009dc127573b825,0x8009dc127573b825,3
+np.float64,0x800047bde4608f7d,0x800047bde4608f7d,3
+np.float64,0xffeede42c77dbc85,0xbff921fb54442d18,3
+np.float64,0xd8cf6d13b19ee,0xd8cf6d13b19ee,3
+np.float64,0xbfd08fb302a11f66,0xbfd034b1f8235e23,3
+np.float64,0x7fdb404c0b368097,0x3ff921fb54442d18,3
+np.float64,0xbfd6ba0438ad7408,0xbfd5d673e3276ec1,3
+np.float64,0xffd9568027b2ad00,0xbff921fb54442d18,3
+np.float64,0xbfb313b73e262770,0xbfb30ab4acb4fa67,3
+np.float64,0xbfe2dc1a15e5b834,0xbfe10ac5f8f3acd3,3
+np.float64,0xbfee426bf4bc84d8,0xbfe83d061df91edd,3
+np.float64,0xd9142c2fb2286,0xd9142c2fb2286,3
+np.float64,0x7feb0d11dff61a23,0x3ff921fb54442d18,3
+np.float64,0x800fea5b509fd4b7,0x800fea5b509fd4b7,3
+np.float64,0x3fe1a8818da35103,0x3fe022ba1bdf366e,3
+np.float64,0x8010000000000000,0x8010000000000000,3
+np.float64,0xbfd8fc6de6b1f8dc,0xbfd7d24726ed8dcc,3
+np.float64,0xf4b3dc2de967c,0xf4b3dc2de967c,3
+np.float64,0x8af0409b15e08,0x8af0409b15e08,3
+np.float64,0x3fb21e6934243cd2,0x3fb216b065f8709a,3
+np.float64,0x3fc53069392a60d2,0x3fc4ffa931211fb9,3
+np.float64,0xffc955812c32ab04,0xbff921fb54442d18,3
+np.float64,0xbfe3de42b1a7bc86,0xbfe1c7bd1324de75,3
+np.float64,0x1dc149a03b82a,0x1dc149a03b82a,3
+np.float64,0x8001bc5a24a378b5,0x8001bc5a24a378b5,3
+np.float64,0x3da14c407b44,0x3da14c407b44,3
+np.float64,0x80025e8da924bd1c,0x80025e8da924bd1c,3
+np.float64,0xbfcb0141c9360284,0xbfca9d572ea5e1f3,3
+np.float64,0xc90036fd92007,0xc90036fd92007,3
+np.float64,0x138312c427063,0x138312c427063,3
+np.float64,0x800dda3a963bb475,0x800dda3a963bb475,3
+np.float64,0x3fe9339934f26732,0x3fe558e723291f78,3
+np.float64,0xbfea8357027506ae,0xbfe6240826faaf48,3
+np.float64,0x7fe04735cae08e6b,0x3ff921fb54442d18,3
+np.float64,0x3fe29aca3c653594,0x3fe0da214c8bc6a4,3
+np.float64,0x3fbe1f09a03c3e13,0x3fbdfbbefef0155b,3
+np.float64,0x816ee4ad02ddd,0x816ee4ad02ddd,3
+np.float64,0xffddd1b31d3ba366,0xbff921fb54442d18,3
+np.float64,0x3fe2e01e0625c03c,0x3fe10dc0bd6677c2,3
+np.float64,0x3fec6bcf1978d79e,0x3fe73d518cddeb7c,3
+np.float64,0x7fe01aaaf8603555,0x3ff921fb54442d18,3
+np.float64,0xdf300cc5be602,0xdf300cc5be602,3
+np.float64,0xbfe71c01a36e3804,0xbfe403af80ce47b8,3
+np.float64,0xffa5be00ac2b7c00,0xbff921fb54442d18,3
+np.float64,0xbfda9ba711b5374e,0xbfd93775e3ac6bda,3
+np.float64,0xbfe56d8a27eadb14,0xbfe2e1a7185e8e6d,3
+np.float64,0x800f1bc937be3792,0x800f1bc937be3792,3
+np.float64,0x800a61d93c74c3b3,0x800a61d93c74c3b3,3
+np.float64,0x7fe71a52fcae34a5,0x3ff921fb54442d18,3
+np.float64,0x7fb4aef256295de4,0x3ff921fb54442d18,3
+np.float64,0x3fe6c1e861ed83d1,0x3fe3c828f281a7ef,3
+np.float64,0x3fba128402342508,0x3fb9fb94cf141860,3
+np.float64,0x3fee55a7ecfcab50,0x3fe8472a9af893ee,3
+np.float64,0x3fe586f31b2b0de6,0x3fe2f32bce9e91bc,3
+np.float64,0xbfbb1d1442363a28,0xbfbb034c7729d5f2,3
+np.float64,0xc78b4d3f8f16a,0xc78b4d3f8f16a,3
+np.float64,0x7fdbc277d4b784ef,0x3ff921fb54442d18,3
+np.float64,0xbfa728ca2c2e5190,0xbfa724c04e73ccbd,3
+np.float64,0x7fefc7b2143f8f63,0x3ff921fb54442d18,3
+np.float64,0x3fd153a3dda2a748,0x3fd0ebccd33a4dca,3
+np.float64,0xbfe18a6eace314de,0xbfe00ba32ec89d30,3
+np.float64,0x7feef518537dea30,0x3ff921fb54442d18,3
+np.float64,0x8005f007cd4be010,0x8005f007cd4be010,3
+np.float64,0x7fd890b840b12170,0x3ff921fb54442d18,3
+np.float64,0x7feed0582ebda0af,0x3ff921fb54442d18,3
+np.float64,0x1013f53220280,0x1013f53220280,3
+np.float64,0xbfe77273986ee4e7,0xbfe43c375a8bf6de,3
+np.float64,0x7fe3ab8918675711,0x3ff921fb54442d18,3
+np.float64,0xbfc6ad515b2d5aa4,0xbfc671b2f7f86624,3
+np.float64,0x7fcd86231d3b0c45,0x3ff921fb54442d18,3
+np.float64,0xffe2523299a4a464,0xbff921fb54442d18,3
+np.float64,0x7fcadc5a1b35b8b3,0x3ff921fb54442d18,3
+np.float64,0x3fe5e020c4ebc042,0x3fe330418eec75bd,3
+np.float64,0x7fe332a9dc266553,0x3ff921fb54442d18,3
+np.float64,0xfa11dc21f425,0xfa11dc21f425,3
+np.float64,0xbec800177d900,0xbec800177d900,3
+np.float64,0x3fcadd057835ba0b,0x3fca7aa42face8bc,3
+np.float64,0xbfe6b9a206ad7344,0xbfe3c2a9719803de,3
+np.float64,0x3fbb4250b63684a0,0x3fbb281e9cefc519,3
+np.float64,0x7fef8787517f0f0e,0x3ff921fb54442d18,3
+np.float64,0x8001315c2d6262b9,0x8001315c2d6262b9,3
+np.float64,0xbfd94e3cf2b29c7a,0xbfd819257d36f56c,3
+np.float64,0xf1f325abe3e65,0xf1f325abe3e65,3
+np.float64,0x7fd6c07079ad80e0,0x3ff921fb54442d18,3
+np.float64,0x7fe328b075a65160,0x3ff921fb54442d18,3
+np.float64,0x7fe7998f812f331e,0x3ff921fb54442d18,3
+np.float64,0xffe026bb65604d76,0xbff921fb54442d18,3
+np.float64,0xffd6c06de8ad80dc,0xbff921fb54442d18,3
+np.float64,0x3fcd5a37bf3ab46f,0x3fccda82935d98ce,3
+np.float64,0xffc3e5a45227cb48,0xbff921fb54442d18,3
+np.float64,0x3febf7dd8177efbc,0x3fe6fc0bb999883e,3
+np.float64,0x7fd7047ea92e08fc,0x3ff921fb54442d18,3
+np.float64,0x35b3fc406b680,0x35b3fc406b680,3
+np.float64,0x7fd52e97632a5d2e,0x3ff921fb54442d18,3
+np.float64,0x3fd464d401a8c9a8,0x3fd3be2967fc97c3,3
+np.float64,0x800e815b2ebd02b6,0x800e815b2ebd02b6,3
+np.float64,0x3fca8428af350850,0x3fca257b466b8970,3
+np.float64,0x8007b7526f6f6ea6,0x8007b7526f6f6ea6,3
+np.float64,0x82f60a8f05ec2,0x82f60a8f05ec2,3
+np.float64,0x3fb71a5d0a2e34c0,0x3fb70a629ef8e2a2,3
+np.float64,0x7fc8570c7d30ae18,0x3ff921fb54442d18,3
+np.float64,0x7fe5528e77eaa51c,0x3ff921fb54442d18,3
+np.float64,0xffc20dbbf1241b78,0xbff921fb54442d18,3
+np.float64,0xeb13368fd6267,0xeb13368fd6267,3
+np.float64,0x7fe7d529056faa51,0x3ff921fb54442d18,3
+np.float64,0x3fecd02eabf9a05d,0x3fe77516f0ba1ac4,3
+np.float64,0x800fcba6a09f974d,0x800fcba6a09f974d,3
+np.float64,0x7fe7e8e015afd1bf,0x3ff921fb54442d18,3
+np.float64,0xbfd271a382a4e348,0xbfd1f513a191c595,3
+np.float64,0x9f1014013e21,0x9f1014013e21,3
+np.float64,0x3fc05da47f20bb49,0x3fc04708a13a3a47,3
+np.float64,0x3fe0f427dda1e850,0x3fdf2e60ba8678b9,3
+np.float64,0xbfecb29fa539653f,0xbfe764bc791c45dd,3
+np.float64,0x45881ec68b104,0x45881ec68b104,3
+np.float64,0x8000000000000001,0x8000000000000001,3
+np.float64,0x3fe9c67ee1338cfe,0x3fe5b2c7b3df6ce8,3
+np.float64,0x7fedb8fef6bb71fd,0x3ff921fb54442d18,3
+np.float64,0x3fe54f6aaaea9ed6,0x3fe2ccd1df2abaa9,3
+np.float64,0x7feff58a1bbfeb13,0x3ff921fb54442d18,3
+np.float64,0x7fe3b62827276c4f,0x3ff921fb54442d18,3
+np.float64,0x3fe5feb682ebfd6d,0x3fe345105bc6d980,3
+np.float64,0x3fe49f38d9693e72,0x3fe2518b2824757f,3
+np.float64,0x8006bfd27c6d7fa6,0x8006bfd27c6d7fa6,3
+np.float64,0x3fc13409e2226814,0x3fc119ce0c01a5a2,3
+np.float64,0x95f8c7212bf19,0x95f8c7212bf19,3
+np.float64,0x3fd9f0fa6133e1f5,0x3fd8a567515edecf,3
+np.float64,0x3fef95cbe5ff2b98,0x3fe8ec88c768ba0b,3
+np.float64,0x3fbed28bba3da510,0x3fbeacbf136e51c2,3
+np.float64,0xbfd3987aeca730f6,0xbfd303fca58e3e60,3
+np.float64,0xbfed0f90cbfa1f22,0xbfe797f59249410d,3
+np.float64,0xffe55d8cbf2abb19,0xbff921fb54442d18,3
+np.float64,0x3feb4d9fc6769b40,0x3fe69a88131a1f1f,3
+np.float64,0x80085569acd0aad4,0x80085569acd0aad4,3
+np.float64,0x20557a6e40ab0,0x20557a6e40ab0,3
+np.float64,0x3fead2fd5df5a5fb,0x3fe653091f33b27f,3
+np.float64,0x3fe7b9983eaf7330,0x3fe46a50c4b5235e,3
+np.float64,0xffdad237ffb5a470,0xbff921fb54442d18,3
+np.float64,0xbfe5cc39a4eb9874,0xbfe322ad3a903f93,3
+np.float64,0x800ad6eecb35adde,0x800ad6eecb35adde,3
+np.float64,0xffec620f6438c41e,0xbff921fb54442d18,3
+np.float64,0xbfe5ef29122bde52,0xbfe33a7dfcc255e2,3
+np.float64,0x3fd451e7d0a8a3d0,0x3fd3acfa4939af10,3
+np.float64,0x8003ea93c127d528,0x8003ea93c127d528,3
+np.float64,0x800b48d37c9691a7,0x800b48d37c9691a7,3
+np.float64,0x3fe7e202acafc405,0x3fe484558246069b,3
+np.float64,0x80070c9b686e1938,0x80070c9b686e1938,3
+np.float64,0xbfda90bbc6352178,0xbfd92e25fcd12288,3
+np.float64,0x800e1ffebb1c3ffe,0x800e1ffebb1c3ffe,3
+np.float64,0x3ff0000000000000,0x3fe921fb54442d18,3
+np.float64,0xffd8cfdd46319fba,0xbff921fb54442d18,3
+np.float64,0x7fd8cd4182319a82,0x3ff921fb54442d18,3
+np.float64,0x3fed8bb778bb176f,0x3fe7db7c77c4c694,3
+np.float64,0x3fc74a70302e94e0,0x3fc709e95d6defec,3
+np.float64,0x3fe87269d070e4d4,0x3fe4e04bcc4a2137,3
+np.float64,0x7fb48223f6290447,0x3ff921fb54442d18,3
+np.float64,0xffe8ec444b71d888,0xbff921fb54442d18,3
+np.float64,0x7fde17d280bc2fa4,0x3ff921fb54442d18,3
+np.float64,0x3fd1cbde01a397bc,0x3fd15b9bb7b3147b,3
+np.float64,0x800883a64451074d,0x800883a64451074d,3
+np.float64,0x7fe3160a3f262c13,0x3ff921fb54442d18,3
+np.float64,0xbfe051d4d9a0a3aa,0xbfde2ecf14dc75fb,3
+np.float64,0xbfd89de689b13bce,0xbfd780176d1a28a3,3
+np.float64,0x3fecde2bf779bc58,0x3fe77ccf10bdd8e2,3
+np.float64,0xffe75774dc6eaee9,0xbff921fb54442d18,3
+np.float64,0x7fe834414d706882,0x3ff921fb54442d18,3
+np.float64,0x1,0x1,3
+np.float64,0xbfea5e4e4a74bc9c,0xbfe60e0601711835,3
+np.float64,0xffec248d4cb8491a,0xbff921fb54442d18,3
+np.float64,0xffd9942c2c332858,0xbff921fb54442d18,3
+np.float64,0xa9db36a553b67,0xa9db36a553b67,3
+np.float64,0x7fec630718b8c60d,0x3ff921fb54442d18,3
+np.float64,0xbfd062188f20c432,0xbfd009ecd652be89,3
+np.float64,0x8001b84e3023709d,0x8001b84e3023709d,3
+np.float64,0xbfe9e26d7cb3c4db,0xbfe5c3b157ecf668,3
+np.float64,0xbfef66ddf33ecdbc,0xbfe8d4b1f6410a24,3
+np.float64,0x3fd8d7109431ae21,0x3fd7b1d4860719a2,3
+np.float64,0xffee0f53107c1ea5,0xbff921fb54442d18,3
+np.float64,0x80000b4fd60016a0,0x80000b4fd60016a0,3
+np.float64,0xbfd99ff6e5333fee,0xbfd85fb3cbdaa049,3
+np.float64,0xbfe9cfd268339fa5,0xbfe5b86ef021a1b1,3
+np.float64,0xe32eace1c65d6,0xe32eace1c65d6,3
+np.float64,0xffc81f6627303ecc,0xbff921fb54442d18,3
+np.float64,0x7fe98dadde331b5b,0x3ff921fb54442d18,3
+np.float64,0xbfbcebd11e39d7a0,0xbfbccc8ec47883c7,3
+np.float64,0x7fe164880f22c90f,0x3ff921fb54442d18,3
+np.float64,0x800467c0cae8cf82,0x800467c0cae8cf82,3
+np.float64,0x800071e4b140e3ca,0x800071e4b140e3ca,3
+np.float64,0xbfc87a7eae30f4fc,0xbfc82fbc55bb0f24,3
+np.float64,0xffb2e0e23225c1c8,0xbff921fb54442d18,3
+np.float64,0x20ef338041df,0x20ef338041df,3
+np.float64,0x7fe6de71ca6dbce3,0x3ff921fb54442d18,3
+np.float64,0x5d1fa026ba3f5,0x5d1fa026ba3f5,3
+np.float64,0xffd112a9ce222554,0xbff921fb54442d18,3
+np.float64,0x3fb351f66626a3ed,0x3fb3489ab578c452,3
+np.float64,0x7fef7b2bd3bef657,0x3ff921fb54442d18,3
+np.float64,0xffe144f5d4e289eb,0xbff921fb54442d18,3
+np.float64,0xffd63a6750ac74ce,0xbff921fb54442d18,3
+np.float64,0x7fd2d8bb25a5b175,0x3ff921fb54442d18,3
+np.float64,0x3fec5920a078b242,0x3fe732dcffcf6521,3
+np.float64,0x80009a8b7f813518,0x80009a8b7f813518,3
+np.float64,0x3fdea220893d4441,0x3fdc921edf6bf3d8,3
+np.float64,0x8006cee2208d9dc5,0x8006cee2208d9dc5,3
+np.float64,0xdd0b0081ba17,0xdd0b0081ba17,3
+np.float64,0x7ff4000000000000,0x7ffc000000000000,3
+np.float64,0xbfdac33955358672,0xbfd9592bce7daf1f,3
+np.float64,0x7fe8301d7170603a,0x3ff921fb54442d18,3
+np.float64,0xbfc1d34d8523a69c,0xbfc1b62449af9684,3
+np.float64,0x800c62239458c447,0x800c62239458c447,3
+np.float64,0xffd398c009a73180,0xbff921fb54442d18,3
+np.float64,0xbfe0c6d9ee218db4,0xbfdee777557f4401,3
+np.float64,0x3feccdd373799ba7,0x3fe773c9c2263f89,3
+np.float64,0xbfd21898bda43132,0xbfd1a2be8545fcc5,3
+np.float64,0x3fd77019b62ee033,0x3fd67793cabdf267,3
+np.float64,0x7fa609cad42c1395,0x3ff921fb54442d18,3
+np.float64,0x7fb4eaea5a29d5d4,0x3ff921fb54442d18,3
+np.float64,0x3fc570dc9a2ae1b9,0x3fc53e5f6218a799,3
+np.float64,0x800344ae8466895e,0x800344ae8466895e,3
+np.float64,0xbfc7c985252f930c,0xbfc784d60fa27bac,3
+np.float64,0xffaa2929fc345250,0xbff921fb54442d18,3
+np.float64,0xffe63e5ee9ac7cbe,0xbff921fb54442d18,3
+np.float64,0x73f0280ce7e06,0x73f0280ce7e06,3
+np.float64,0xffc525f8822a4bf0,0xbff921fb54442d18,3
+np.float64,0x7fd744d00aae899f,0x3ff921fb54442d18,3
+np.float64,0xbfe0fe590761fcb2,0xbfdf3e493e8b1f32,3
+np.float64,0xfae04ae7f5c0a,0xfae04ae7f5c0a,3
+np.float64,0xef821939df043,0xef821939df043,3
+np.float64,0x7fef6135843ec26a,0x3ff921fb54442d18,3
+np.float64,0xbfebf34dcbf7e69c,0xbfe6f97588a8f911,3
+np.float64,0xbfeec0b498fd8169,0xbfe87f2eceeead12,3
+np.float64,0x7fb67161b42ce2c2,0x3ff921fb54442d18,3
+np.float64,0x3fdcfd998639fb33,0x3fdb38934927c096,3
+np.float64,0xffda5960bc34b2c2,0xbff921fb54442d18,3
+np.float64,0xbfe11f8c71223f19,0xbfdf71fe770c96ab,3
+np.float64,0x3fe4ac1bab695838,0x3fe25aa4517b8322,3
+np.float64,0x3f730458a02608b1,0x3f73044fabb5e999,3
+np.float64,0x3fdb14ffcdb62a00,0x3fd99ea6c241a3ed,3
+np.float64,0xbfc93208cd326410,0xbfc8e09d78b6d4db,3
+np.float64,0x19e734dc33ce8,0x19e734dc33ce8,3
+np.float64,0x3fe5e98428abd308,0x3fe336a6a085eb55,3
+np.float64,0x7fec672a1378ce53,0x3ff921fb54442d18,3
+np.float64,0x800f8bd8d4ff17b2,0x800f8bd8d4ff17b2,3
+np.float64,0xbfe5a12e4e6b425c,0xbfe30533f99d5d06,3
+np.float64,0x75a34cb0eb46a,0x75a34cb0eb46a,3
+np.float64,0x7fe1d21d16a3a439,0x3ff921fb54442d18,3
+np.float64,0x7ff0000000000000,0x3ff921fb54442d18,3
+np.float64,0xffe0f50db261ea1b,0xbff921fb54442d18,3
+np.float64,0xbfd9dc22feb3b846,0xbfd8937ec965a501,3
+np.float64,0x8009d68e48d3ad1d,0x8009d68e48d3ad1d,3
+np.float64,0xbfe2eba620e5d74c,0xbfe1164d7d273c60,3
+np.float64,0x992efa09325e0,0x992efa09325e0,3
+np.float64,0x3fdab640ea356c82,0x3fd94e20cab88db2,3
+np.float64,0x69a6f04ad34df,0x69a6f04ad34df,3
+np.float64,0x3fe397df25272fbe,0x3fe194bd1a3a6192,3
+np.float64,0xebcce9fdd799d,0xebcce9fdd799d,3
+np.float64,0x3fbb49490c369292,0x3fbb2f02eccc497d,3
+np.float64,0xffd871f980b0e3f4,0xbff921fb54442d18,3
+np.float64,0x800348f6966691ee,0x800348f6966691ee,3
+np.float64,0xbfebc270a7f784e1,0xbfe6dda8d0d80f26,3
+np.float64,0xffd6d559b1adaab4,0xbff921fb54442d18,3
+np.float64,0x3fec3635c0b86c6c,0x3fe71f420256e43e,3
+np.float64,0x7fbc82ad7039055a,0x3ff921fb54442d18,3
+np.float64,0x7f873050602e60a0,0x3ff921fb54442d18,3
+np.float64,0x3fca44b8c3348970,0x3fc9e8a1a1a2d96e,3
+np.float64,0x3fe0fc308fe1f861,0x3fdf3aeb469ea225,3
+np.float64,0x7fefc27de8bf84fb,0x3ff921fb54442d18,3
+np.float64,0x8005f3f3916be7e8,0x8005f3f3916be7e8,3
+np.float64,0xbfd4278c7c284f18,0xbfd38678988873b6,3
+np.float64,0x435eafc486bd7,0x435eafc486bd7,3
+np.float64,0xbfd01f5199203ea4,0xbfcf96631f2108a3,3
+np.float64,0xffd5ee9185abdd24,0xbff921fb54442d18,3
+np.float64,0xffedb363257b66c5,0xbff921fb54442d18,3
+np.float64,0x800d68e6e11ad1ce,0x800d68e6e11ad1ce,3
+np.float64,0xbfcf687f8e3ed100,0xbfceccb771b0d39a,3
+np.float64,0x7feb3b9ef2f6773d,0x3ff921fb54442d18,3
+np.float64,0x3fe15ec5ca62bd8c,0x3fdfd3fab9d96f81,3
+np.float64,0x10000000000000,0x10000000000000,3
+np.float64,0xd2386f81a470e,0xd2386f81a470e,3
+np.float64,0xb9feed4573fde,0xb9feed4573fde,3
+np.float64,0x3fe7ed25c9efda4c,0x3fe48b7b72db4014,3
+np.float64,0xbfe01478726028f1,0xbfddcd1f5a2efc59,3
+np.float64,0x9946d02f328da,0x9946d02f328da,3
+np.float64,0xbfe3bb67f06776d0,0xbfe1ae88aa81c5a6,3
+np.float64,0xbfd3fd8a4c27fb14,0xbfd3603982e3b78d,3
+np.float64,0xffd5c3ab912b8758,0xbff921fb54442d18,3
+np.float64,0xffd5f502b12bea06,0xbff921fb54442d18,3
+np.float64,0xbfc64981ec2c9304,0xbfc610e0382b1fa6,3
+np.float64,0xffec42e3413885c6,0xbff921fb54442d18,3
+np.float64,0x80084eb4ed109d6a,0x80084eb4ed109d6a,3
+np.float64,0xbfd17cac9fa2f95a,0xbfd112020588a4b3,3
+np.float64,0xbfd06c1359a0d826,0xbfd0134a28aa9a66,3
+np.float64,0x7fdc3d7c03b87af7,0x3ff921fb54442d18,3
+np.float64,0x7bdf5aaaf7bec,0x7bdf5aaaf7bec,3
+np.float64,0xbfee3cd966fc79b3,0xbfe83a14bc07ac3b,3
+np.float64,0x7fec910da3f9221a,0x3ff921fb54442d18,3
+np.float64,0xffb4ea667029d4d0,0xbff921fb54442d18,3
+np.float64,0x800103d7cce207b0,0x800103d7cce207b0,3
+np.float64,0x7fbb229a6c364534,0x3ff921fb54442d18,3
+np.float64,0x0,0x0,3
+np.float64,0xffd8fccd0331f99a,0xbff921fb54442d18,3
+np.float64,0xbfd0784ae1a0f096,0xbfd01ebff62e39ad,3
+np.float64,0xbfed2ec9b3ba5d93,0xbfe7a9099410bc76,3
+np.float64,0x800690b8d16d2172,0x800690b8d16d2172,3
+np.float64,0x7fc061b26520c364,0x3ff921fb54442d18,3
+np.float64,0x8007ec47054fd88f,0x8007ec47054fd88f,3
+np.float64,0x775546b6eeaa9,0x775546b6eeaa9,3
+np.float64,0x8005e00fb56bc020,0x8005e00fb56bc020,3
+np.float64,0xbfe510f8d0ea21f2,0xbfe2a16862b5a37f,3
+np.float64,0xffd87a6bf3b0f4d8,0xbff921fb54442d18,3
+np.float64,0x800906e3d0520dc8,0x800906e3d0520dc8,3
+np.float64,0x2296f000452f,0x2296f000452f,3
+np.float64,0xbfe3189fa2e63140,0xbfe1378c0e005be4,3
+np.float64,0xb4d2447f69a49,0xb4d2447f69a49,3
+np.float64,0xffd056a24a20ad44,0xbff921fb54442d18,3
+np.float64,0xbfe3b23fe4e76480,0xbfe1a7e5840fcbeb,3
+np.float64,0x80018ee270831dc6,0x80018ee270831dc6,3
+np.float64,0x800df89f245bf13e,0x800df89f245bf13e,3
+np.float64,0x3fee1409d7bc2814,0x3fe824779d133232,3
+np.float64,0xbfef8d81667f1b03,0xbfe8e85523620368,3
+np.float64,0xffd8a6519b314ca4,0xbff921fb54442d18,3
+np.float64,0x7fc7bc86f32f790d,0x3ff921fb54442d18,3
+np.float64,0xffea6159e674c2b3,0xbff921fb54442d18,3
+np.float64,0x3fe153c3fba2a788,0x3fdfc2f74769d300,3
+np.float64,0xffc4261ef3284c3c,0xbff921fb54442d18,3
+np.float64,0x7fe8a8961ff1512b,0x3ff921fb54442d18,3
+np.float64,0xbfe3fb1fd167f640,0xbfe1dc89dcb7ecdf,3
+np.float64,0x3fd88577c2b10af0,0x3fd76acc09660704,3
+np.float64,0x3fe128ec27e251d8,0x3fdf808fc7ebcd8f,3
+np.float64,0xbfed6ca7c4fad950,0xbfe7caafe9a3e213,3
+np.float64,0xbf9a3912b8347220,0xbf9a379b3349352e,3
+np.float64,0xbfd724d7bcae49b0,0xbfd6351efa2a5fc5,3
+np.float64,0xbfed59700a7ab2e0,0xbfe7c043014c694c,3
+np.float64,0x8002ad435bc55a87,0x8002ad435bc55a87,3
+np.float64,0xffe46ed345a8dda6,0xbff921fb54442d18,3
+np.float64,0x7fd2f1d1d825e3a3,0x3ff921fb54442d18,3
+np.float64,0xbfea0265e23404cc,0xbfe5d6fb3fd30464,3
+np.float64,0xbfd17e049122fc0a,0xbfd113421078bbae,3
+np.float64,0xffea03b986b40772,0xbff921fb54442d18,3
+np.float64,0x800b55331a16aa67,0x800b55331a16aa67,3
+np.float64,0xbfc6fcafbf2df960,0xbfc6be9ecd0ebc1f,3
+np.float64,0xd6a36017ad46c,0xd6a36017ad46c,3
+np.float64,0xbfe9ba86dfb3750e,0xbfe5ab840cb0ef86,3
+np.float64,0x75c4a108eb895,0x75c4a108eb895,3
+np.float64,0x8008d6bc8051ad79,0x8008d6bc8051ad79,3
+np.float64,0xbfd3dc5984a7b8b4,0xbfd341f78e0528ec,3
+np.float64,0xffe1cbb01aa39760,0xbff921fb54442d18,3
+np.float64,0x3fc7e292f52fc526,0x3fc79d0ce9365767,3
+np.float64,0xbfcbeae2bd37d5c4,0xbfcb7cb034f82467,3
+np.float64,0x8000f0c62e21e18d,0x8000f0c62e21e18d,3
+np.float64,0xbfe23d8bc6247b18,0xbfe09418ee35c3c7,3
+np.float64,0x717394bae2e73,0x717394bae2e73,3
+np.float64,0xffa2ef1cc425de40,0xbff921fb54442d18,3
+np.float64,0x3fd938c229b27184,0x3fd806900735c99d,3
+np.float64,0x800bf3ec8a77e7d9,0x800bf3ec8a77e7d9,3
+np.float64,0xffeef41dd57de83b,0xbff921fb54442d18,3
+np.float64,0x8008df97e5b1bf30,0x8008df97e5b1bf30,3
+np.float64,0xffe9ab9d0db35739,0xbff921fb54442d18,3
+np.float64,0x99ff391333fe7,0x99ff391333fe7,3
+np.float64,0x3fb864b4a630c969,0x3fb851e883ea2cf9,3
+np.float64,0x22c1230a45825,0x22c1230a45825,3
+np.float64,0xff2336fbfe467,0xff2336fbfe467,3
+np.float64,0xbfd488f4cea911ea,0xbfd3def0490f5414,3
+np.float64,0x3fa379c78426f38f,0x3fa377607370800b,3
+np.float64,0xbfb0873302210e68,0xbfb08155b78dfd53,3
+np.float64,0xbfdf9ff7c2bf3ff0,0xbfdd5f658e357ad2,3
+np.float64,0x800978719192f0e4,0x800978719192f0e4,3
+np.float64,0xbfba8759ea350eb0,0xbfba6f325013b9e5,3
+np.float64,0xbfdd3e6b06ba7cd6,0xbfdb6e472b6091b0,3
+np.float64,0x7fe0c334a7a18668,0x3ff921fb54442d18,3
+np.float64,0xbfeb971feb772e40,0xbfe6c4e0f61404d1,3
+np.float64,0x3fe2a50968e54a13,0x3fe0e1c8b8d96e85,3
+np.float64,0x800fa9c5515f538b,0x800fa9c5515f538b,3
+np.float64,0x800f8532fbbf0a66,0x800f8532fbbf0a66,3
+np.float64,0x167d6f1e2cfaf,0x167d6f1e2cfaf,3
+np.float64,0xffee88e769fd11ce,0xbff921fb54442d18,3
+np.float64,0xbfeecc8529fd990a,0xbfe885520cdad8ea,3
+np.float64,0xffefffffffffffff,0xbff921fb54442d18,3
+np.float64,0xbfef6a566afed4ad,0xbfe8d6767b4c4235,3
+np.float64,0xffec12415af82482,0xbff921fb54442d18,3
+np.float64,0x3678a20a6cf15,0x3678a20a6cf15,3
+np.float64,0xffe468d54ee8d1aa,0xbff921fb54442d18,3
+np.float64,0x800ad6006795ac01,0x800ad6006795ac01,3
+np.float64,0x8001d5b61063ab6d,0x8001d5b61063ab6d,3
+np.float64,0x800dfcd1863bf9a3,0x800dfcd1863bf9a3,3
+np.float64,0xc9fbff6f93f80,0xc9fbff6f93f80,3
+np.float64,0xffe55c20f9eab842,0xbff921fb54442d18,3
+np.float64,0xbfcb596b6536b2d8,0xbfcaf1b339c5c615,3
+np.float64,0xbfe092689ea124d1,0xbfde94fa58946e51,3
+np.float64,0x3fe9ec733af3d8e6,0x3fe5c9bf5dee2623,3
+np.float64,0x3fe30f3d83261e7b,0x3fe1309fd6620e03,3
+np.float64,0xffd31d7f84263b00,0xbff921fb54442d18,3
+np.float64,0xbfe88d2d3e711a5a,0xbfe4f12b5a136178,3
+np.float64,0xffc81e4ce1303c98,0xbff921fb54442d18,3
+np.float64,0xffe5b96ebfab72dd,0xbff921fb54442d18,3
+np.float64,0x512f0502a25e1,0x512f0502a25e1,3
+np.float64,0x7fa3a376982746ec,0x3ff921fb54442d18,3
+np.float64,0x80005b5f2f60b6bf,0x80005b5f2f60b6bf,3
+np.float64,0xc337cc69866fa,0xc337cc69866fa,3
+np.float64,0x3fe7719c4caee339,0x3fe43bab42b19e64,3
+np.float64,0x7fde7ec1d93cfd83,0x3ff921fb54442d18,3
+np.float64,0x3fd2f38f3825e71e,0x3fd26cc7b1dd0acb,3
+np.float64,0x7fce298b993c5316,0x3ff921fb54442d18,3
+np.float64,0x56ae3b2cad5c8,0x56ae3b2cad5c8,3
+np.float64,0x3fe9299f2bf2533e,0x3fe552bddd999e72,3
+np.float64,0x7feff3a4823fe748,0x3ff921fb54442d18,3
+np.float64,0xbfd05c670aa0b8ce,0xbfd00494d78e9e97,3
+np.float64,0xffe745323eae8a64,0xbff921fb54442d18,3
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arctanh.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arctanh.csv
new file mode 100644
index 00000000..a655269d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-arctanh.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0x3ee82930,0x3efa60fd,2
+np.float32,0x3f0aa640,0x3f1b3e13,2
+np.float32,0x3ec1a21c,0x3ecbbf8d,2
+np.float32,0x3cdb1740,0x3cdb24a1,2
+np.float32,0xbf28b6f3,0xbf4a86ac,2
+np.float32,0xbe490dcc,0xbe4bb2eb,2
+np.float32,0x80000001,0x80000001,2
+np.float32,0xbf44f9dd,0xbf826ce1,2
+np.float32,0xbf1d66c4,0xbf37786b,2
+np.float32,0x3f0ad26a,0x3f1b7c9b,2
+np.float32,0x3f7b6c54,0x4016aab0,2
+np.float32,0xbf715bb8,0xbfe1a0bc,2
+np.float32,0xbee8a562,0xbefafd6a,2
+np.float32,0x3db94d00,0x3db9cf16,2
+np.float32,0x3ee2970c,0x3ef368b3,2
+np.float32,0x3f3f8614,0x3f77fdca,2
+np.float32,0xbf1fb5f0,0xbf3b3789,2
+np.float32,0x3f798dc0,0x400b96bb,2
+np.float32,0x3e975d64,0x3e9c0573,2
+np.float32,0xbe3f1908,0xbe415d1f,2
+np.float32,0x3f2cea38,0x3f52192e,2
+np.float32,0x3e82f1ac,0x3e85eaa1,2
+np.float32,0x3eab6b30,0x3eb24acd,2
+np.float32,0xbe9bb90c,0xbea0cf5f,2
+np.float32,0xbf43e847,0xbf81202f,2
+np.float32,0xbd232fa0,0xbd2345c0,2
+np.float32,0xbbabbc00,0xbbabbc67,2
+np.float32,0xbf0b2975,0xbf1bf808,2
+np.float32,0xbef5ab0a,0xbf05d305,2
+np.float32,0x3f2cad16,0x3f51a8e2,2
+np.float32,0xbef75940,0xbf06eb08,2
+np.float32,0xbf0c1216,0xbf1d4325,2
+np.float32,0x3e7bdc08,0x3e8090c2,2
+np.float32,0x3da14e10,0x3da1a3c5,2
+np.float32,0x3f627412,0x3fb2bf21,2
+np.float32,0xbd6d08c0,0xbd6d4ca0,2
+np.float32,0x3f3e2368,0x3f74df8b,2
+np.float32,0xbe0df104,0xbe0edc77,2
+np.float32,0x3e8a265c,0x3e8da833,2
+np.float32,0xbdccdbb0,0xbdcd8ba8,2
+np.float32,0x3eb080c4,0x3eb80a44,2
+np.float32,0x3e627800,0x3e6645fe,2
+np.float32,0xbd8be0b0,0xbd8c1886,2
+np.float32,0xbf3282ac,0xbf5cae8c,2
+np.float32,0xbe515910,0xbe545707,2
+np.float32,0xbf2e64ac,0xbf54d637,2
+np.float32,0x3e0fc230,0x3e10b6de,2
+np.float32,0x3eb13ca0,0x3eb8df94,2
+np.float32,0x3f07a3ca,0x3f170572,2
+np.float32,0x3f2c7026,0x3f513935,2
+np.float32,0x3f3c4ec8,0x3f70d67c,2
+np.float32,0xbee9cce8,0xbefc724f,2
+np.float32,0xbe53ca60,0xbe56e3f3,2
+np.float32,0x3dd9e9a0,0x3ddabd98,2
+np.float32,0x3f38b8d4,0x3f69319b,2
+np.float32,0xbe176dc4,0xbe188c1d,2
+np.float32,0xbf322f2e,0xbf5c0c51,2
+np.float32,0xbe9b8676,0xbea097a2,2
+np.float32,0xbca44280,0xbca44823,2
+np.float32,0xbe2b0248,0xbe2ca036,2
+np.float32,0x3d101e80,0x3d102dbd,2
+np.float32,0xbf4eb610,0xbf8f526d,2
+np.float32,0xbec32a50,0xbecd89d1,2
+np.float32,0x3d549100,0x3d54c1ee,2
+np.float32,0x3f78e55e,0x40087025,2
+np.float32,0x3e592798,0x3e5c802d,2
+np.float32,0x3de045d0,0x3de12cfb,2
+np.float32,0xbdad28e0,0xbdad92f7,2
+np.float32,0x3e9a69e0,0x3e9f5e59,2
+np.float32,0x3e809778,0x3e836716,2
+np.float32,0xbf3278d9,0xbf5c9b6d,2
+np.float32,0x3f39fa00,0x3f6bd4a5,2
+np.float32,0xbec8143c,0xbed34ffa,2
+np.float32,0x3ddb7f40,0x3ddc57e6,2
+np.float32,0x3f0e8342,0x3f20c634,2
+np.float32,0x3f353dda,0x3f6213a4,2
+np.float32,0xbe96b400,0xbe9b4bea,2
+np.float32,0x3e626580,0x3e66328a,2
+np.float32,0xbde091c8,0xbde179df,2
+np.float32,0x3eb47b5c,0x3ebc91ca,2
+np.float32,0xbf282182,0xbf497f2f,2
+np.float32,0x3ea9f64c,0x3eb0a748,2
+np.float32,0x3f28dd4e,0x3f4aca86,2
+np.float32,0xbf71de18,0xbfe3f587,2
+np.float32,0x7fa00000,0x7fe00000,2
+np.float32,0xbf6696a6,0xbfbcf11a,2
+np.float32,0xbc853ae0,0xbc853de2,2
+np.float32,0xbeced246,0xbedb51b8,2
+np.float32,0x3f3472a4,0x3f607e00,2
+np.float32,0xbee90124,0xbefb7117,2
+np.float32,0x3eb45b90,0x3ebc6d7c,2
+np.float32,0xbe53ead0,0xbe5705d6,2
+np.float32,0x3f630c80,0x3fb420e2,2
+np.float32,0xbf408cd0,0xbf7a56a2,2
+np.float32,0x3dda4ed0,0x3ddb23f1,2
+np.float32,0xbf37ae88,0xbf67096b,2
+np.float32,0xbdd48c28,0xbdd550c9,2
+np.float32,0xbf5745b0,0xbf9cb4a4,2
+np.float32,0xbf44e6fc,0xbf8255c1,2
+np.float32,0x3f5c8e6a,0x3fa65020,2
+np.float32,0xbea45fe8,0xbeaa6630,2
+np.float32,0x3f08bdee,0x3f188ef5,2
+np.float32,0x3ec77e74,0x3ed29f4b,2
+np.float32,0xbf1a1d3c,0xbf324029,2
+np.float32,0x3cad7340,0x3cad79e3,2
+np.float32,0xbf4fac2e,0xbf90b72a,2
+np.float32,0x3f58516e,0x3f9e8330,2
+np.float32,0x3f442008,0x3f816391,2
+np.float32,0xbf6e0c6c,0xbfd42854,2
+np.float32,0xbf266f7a,0xbf4689b2,2
+np.float32,0x3eb7e2f0,0x3ec077ba,2
+np.float32,0xbf320fd0,0xbf5bcf83,2
+np.float32,0xbf6a76b9,0xbfc80a11,2
+np.float32,0xbf2a91b4,0xbf4dd526,2
+np.float32,0x3f176e30,0x3f2e150e,2
+np.float32,0xbdcccad0,0xbdcd7a9c,2
+np.float32,0x3f60a8a4,0x3faebbf7,2
+np.float32,0x3d9706f0,0x3d974d40,2
+np.float32,0x3ef3cd34,0x3f049d58,2
+np.float32,0xbf73c615,0xbfed79fe,2
+np.float32,0x3df1b170,0x3df2d31b,2
+np.float32,0x3f632a46,0x3fb466c7,2
+np.float32,0xbf3ea18e,0xbf75f9ce,2
+np.float32,0xbf3ea05c,0xbf75f71f,2
+np.float32,0xbdd76750,0xbdd83403,2
+np.float32,0xbca830c0,0xbca836cd,2
+np.float32,0x3f1d4162,0x3f373c59,2
+np.float32,0x3c115700,0x3c1157fa,2
+np.float32,0x3dae8ab0,0x3daef758,2
+np.float32,0xbcad5020,0xbcad56bf,2
+np.float32,0x3ee299c4,0x3ef36c15,2
+np.float32,0xbf7f566c,0xc054c3bd,2
+np.float32,0x3f0cc698,0x3f1e4557,2
+np.float32,0xbe75c648,0xbe7aaa04,2
+np.float32,0x3ea29238,0x3ea86417,2
+np.float32,0x3f09d9c0,0x3f1a1d61,2
+np.float32,0x3f67275c,0x3fbe74b3,2
+np.float32,0x3e1a4e18,0x3e1b7d3a,2
+np.float32,0xbef6e3fc,0xbf069e98,2
+np.float32,0xbf6038ac,0xbfadc9fd,2
+np.float32,0xbe46bdd4,0xbe494b7f,2
+np.float32,0xbf4df1f4,0xbf8e3a98,2
+np.float32,0x3d094dc0,0x3d095aed,2
+np.float32,0x3f44c7d2,0x3f822fa3,2
+np.float32,0xbea30816,0xbea8e737,2
+np.float32,0xbe3c27c4,0xbe3e511b,2
+np.float32,0x3f3bb47c,0x3f6f8789,2
+np.float32,0xbe423760,0xbe4498c3,2
+np.float32,0x3ece1a74,0x3eda7634,2
+np.float32,0x3f14d1f6,0x3f2a1a89,2
+np.float32,0xbf4d9e8f,0xbf8dc4c1,2
+np.float32,0xbe92968e,0xbe96cd7f,2
+np.float32,0x3e99e6c0,0x3e9ece26,2
+np.float32,0xbf397361,0xbf6ab878,2
+np.float32,0xbf4fcea4,0xbf90e99f,2
+np.float32,0x3de37640,0x3de46779,2
+np.float32,0x3eb1b604,0x3eb9698c,2
+np.float32,0xbf52d0a2,0xbf957361,2
+np.float32,0xbe20435c,0xbe21975a,2
+np.float32,0x3f437a58,0x3f809bf1,2
+np.float32,0x3f27d1cc,0x3f48f335,2
+np.float32,0x3f7d4ff2,0x4027d1e2,2
+np.float32,0xbef732e4,0xbf06d205,2
+np.float32,0x3f4a0ae6,0x3f88e18e,2
+np.float32,0x3f800000,0x7f800000,2
+np.float32,0x3e3e56a0,0x3e4093ba,2
+np.float32,0xbed2fcfa,0xbee0517d,2
+np.float32,0xbe0e0114,0xbe0eecd7,2
+np.float32,0xbe808574,0xbe8353db,2
+np.float32,0x3f572e2a,0x3f9c8c86,2
+np.float32,0x80800000,0x80800000,2
+np.float32,0x3f3f3c82,0x3f775703,2
+np.float32,0xbf6e2482,0xbfd4818b,2
+np.float32,0xbf3943b0,0xbf6a5439,2
+np.float32,0x3f6e42ac,0x3fd4f1ea,2
+np.float32,0x3eb676c4,0x3ebed619,2
+np.float32,0xbe5e56c4,0xbe61ef6c,2
+np.float32,0x3eea200c,0x3efcdb65,2
+np.float32,0x3e3d2c78,0x3e3f5ef8,2
+np.float32,0xbdfd8fb0,0xbdfede71,2
+np.float32,0xbee69c8a,0xbef86e89,2
+np.float32,0x3e9efca0,0x3ea46a1c,2
+np.float32,0x3e4c2498,0x3e4ee9ee,2
+np.float32,0xbf3cc93c,0xbf71e21d,2
+np.float32,0x3ee0d77c,0x3ef13d2b,2
+np.float32,0xbefbcd2a,0xbf09d6a3,2
+np.float32,0x3f6dbe5c,0x3fd30a3e,2
+np.float32,0x3dae63e0,0x3daed03f,2
+np.float32,0xbd5001e0,0xbd502fb9,2
+np.float32,0x3f59632a,0x3fa067c8,2
+np.float32,0x3f0d355a,0x3f1ee452,2
+np.float32,0x3f2cbe5c,0x3f51c896,2
+np.float32,0x3c5e6e80,0x3c5e7200,2
+np.float32,0xbe8ac49c,0xbe8e52f0,2
+np.float32,0x3f54e576,0x3f98c0e6,2
+np.float32,0xbeaa0762,0xbeb0ba7c,2
+np.float32,0x3ec81e88,0x3ed35c21,2
+np.float32,0x3f5a6738,0x3fa23fb6,2
+np.float32,0xbf24a682,0xbf43784a,2
+np.float32,0x1,0x1,2
+np.float32,0x3ee6bc24,0x3ef89630,2
+np.float32,0x3f19444a,0x3f30ecf5,2
+np.float32,0x3ec1fc70,0x3ecc28fc,2
+np.float32,0xbf706e14,0xbfdd92fb,2
+np.float32,0x3eccb630,0x3ed8cd98,2
+np.float32,0xbcdf7aa0,0xbcdf88d3,2
+np.float32,0xbe450da8,0xbe478a8e,2
+np.float32,0x3ec9c210,0x3ed54c0b,2
+np.float32,0xbf3b86ca,0xbf6f24d1,2
+np.float32,0x3edcc7a0,0x3eec3a5c,2
+np.float32,0x3f075d5c,0x3f16a39a,2
+np.float32,0xbf5719ce,0xbf9c69de,2
+np.float32,0x3f62cb22,0x3fb3885a,2
+np.float32,0x3f639216,0x3fb55c93,2
+np.float32,0xbf473ee7,0xbf85413a,2
+np.float32,0xbf01b66c,0xbf0eea86,2
+np.float32,0x3e872d80,0x3e8a74f8,2
+np.float32,0xbf60957e,0xbfae925c,2
+np.float32,0xbf6847b2,0xbfc1929b,2
+np.float32,0x3f78bb94,0x4007b363,2
+np.float32,0xbf47efdb,0xbf8622db,2
+np.float32,0xbe1f2308,0xbe206fd6,2
+np.float32,0xbf414926,0xbf7c0a7e,2
+np.float32,0x3eecc268,0x3f00194d,2
+np.float32,0x3eb086d0,0x3eb81120,2
+np.float32,0xbef1af80,0xbf033ff5,2
+np.float32,0xbf454e56,0xbf82d4aa,2
+np.float32,0x3e622560,0x3e65ef20,2
+np.float32,0x3f50d2b2,0x3f926a83,2
+np.float32,0x3eb2c45c,0x3eba9d2c,2
+np.float32,0x3e42d1a0,0x3e4538c9,2
+np.float32,0xbf24cc5c,0xbf43b8e3,2
+np.float32,0x3e8c6464,0x3e90141a,2
+np.float32,0xbf3abff2,0xbf6d79c5,2
+np.float32,0xbec8f2e6,0xbed456fa,2
+np.float32,0xbf787b38,0xc00698b4,2
+np.float32,0xbf58d5cd,0xbf9f6c03,2
+np.float32,0x3df4ee20,0x3df61ba8,2
+np.float32,0xbf34581e,0xbf604951,2
+np.float32,0xbeba5cf4,0xbec35119,2
+np.float32,0xbf76c22d,0xbfffc51c,2
+np.float32,0x3ef63b2c,0x3f0630b4,2
+np.float32,0x3eeadb64,0x3efdc877,2
+np.float32,0x3dfd8c70,0x3dfedb24,2
+np.float32,0x3f441600,0x3f81576d,2
+np.float32,0x3f23a0d8,0x3f41bbf6,2
+np.float32,0x3cb84d40,0x3cb85536,2
+np.float32,0xbf25cb5c,0xbf456e38,2
+np.float32,0xbc108540,0xbc108636,2
+np.float32,0xbc5b9140,0xbc5b949e,2
+np.float32,0xbf62ff40,0xbfb401dd,2
+np.float32,0x3e8e0710,0x3e91d93e,2
+np.float32,0x3f1b6ae0,0x3f344dfd,2
+np.float32,0xbf4dbbbe,0xbf8dedea,2
+np.float32,0x3f1a5fb2,0x3f32a880,2
+np.float32,0xbe56bd00,0xbe59f8cb,2
+np.float32,0xbf490a5c,0xbf87902d,2
+np.float32,0xbf513072,0xbf92f717,2
+np.float32,0x3e73ee28,0x3e78b542,2
+np.float32,0x3f0a4c7a,0x3f1abf2c,2
+np.float32,0x3e10d5c8,0x3e11d00b,2
+np.float32,0xbf771aac,0xc001207e,2
+np.float32,0x3efe2f54,0x3f0b6a46,2
+np.float32,0xbea5f3ea,0xbeac291f,2
+np.float32,0xbf1a73e8,0xbf32c845,2
+np.float32,0x3ebcc82c,0x3ec61c4f,2
+np.float32,0xbf24f492,0xbf43fd9a,2
+np.float32,0x3ecbd908,0x3ed7c691,2
+np.float32,0x3f461c5e,0x3f83d3f0,2
+np.float32,0x3eed0524,0x3f0043c1,2
+np.float32,0x3d06e840,0x3d06f4bf,2
+np.float32,0x3eb6c974,0x3ebf34d7,2
+np.float32,0xbf1c85e1,0xbf36100f,2
+np.float32,0x3ed697d0,0x3ee4ad04,2
+np.float32,0x3eab0484,0x3eb1d733,2
+np.float32,0xbf3b02f2,0xbf6e0935,2
+np.float32,0xbeeab154,0xbefd9334,2
+np.float32,0xbf695372,0xbfc49881,2
+np.float32,0x3e8aaa7c,0x3e8e36be,2
+np.float32,0xbf208754,0xbf3c8f7b,2
+np.float32,0xbe0dbf28,0xbe0ea9a1,2
+np.float32,0x3ca780c0,0x3ca786ba,2
+np.float32,0xbeb320b4,0xbebb065e,2
+np.float32,0x3f13c698,0x3f288821,2
+np.float32,0xbe8cbbec,0xbe9072c4,2
+np.float32,0x3f1ed534,0x3f39c8df,2
+np.float32,0x3e1ca450,0x3e1de190,2
+np.float32,0x3f54be1c,0x3f988134,2
+np.float32,0x3f34e4ee,0x3f6161b4,2
+np.float32,0xbf7e6913,0xc038b246,2
+np.float32,0x3d3c3f20,0x3d3c6119,2
+np.float32,0x3ca9dc80,0x3ca9e2bc,2
+np.float32,0xbf577ea2,0xbf9d161a,2
+np.float32,0xbedb22c8,0xbeea3644,2
+np.float32,0x3f22a044,0x3f400bfa,2
+np.float32,0xbe214b8c,0xbe22a637,2
+np.float32,0x3e8cd300,0x3e908bbc,2
+np.float32,0xbec4d214,0xbecf7a58,2
+np.float32,0x3e9399a4,0x3e97e7e4,2
+np.float32,0xbee6a1a2,0xbef874ed,2
+np.float32,0xbf323742,0xbf5c1bfd,2
+np.float32,0x3f48b882,0x3f8725ac,2
+np.float32,0xbf4d4dba,0xbf8d532e,2
+np.float32,0xbf59640a,0xbfa0695a,2
+np.float32,0xbf2ad562,0xbf4e4f03,2
+np.float32,0x3e317d98,0x3e334d03,2
+np.float32,0xbf6a5b71,0xbfc7b5a2,2
+np.float32,0x3e87b434,0x3e8b05cf,2
+np.float32,0xbf1c344c,0xbf358dee,2
+np.float32,0x3e449428,0x3e470c65,2
+np.float32,0xbf2c0f2f,0xbf508808,2
+np.float32,0xbec5b5ac,0xbed0859c,2
+np.float32,0xbf4aa956,0xbf89b4b1,2
+np.float32,0x3f6dd374,0x3fd35717,2
+np.float32,0x3f45f76c,0x3f83a5ef,2
+np.float32,0xbed1fba8,0xbedf1bd5,2
+np.float32,0xbd26b2d0,0xbd26ca66,2
+np.float32,0xbe9817c2,0xbe9cd1c3,2
+np.float32,0x3e725988,0x3e770875,2
+np.float32,0xbf1a8ded,0xbf32f132,2
+np.float32,0xbe695860,0xbe6d83d3,2
+np.float32,0x3d8cecd0,0x3d8d25ea,2
+np.float32,0x3f574706,0x3f9cb6ec,2
+np.float32,0xbf5c5a1f,0xbfa5eaf3,2
+np.float32,0x3e7a7c88,0x3e7fab83,2
+np.float32,0xff800000,0xffc00000,2
+np.float32,0x3f66396a,0x3fbbfbb0,2
+np.float32,0x3ed6e588,0x3ee50b53,2
+np.float32,0xbb56d500,0xbb56d532,2
+np.float32,0x3ebd23fc,0x3ec6869a,2
+np.float32,0xbf70d490,0xbfdf4af5,2
+np.float32,0x3e514f88,0x3e544d15,2
+np.float32,0x3e660f98,0x3e6a0dac,2
+np.float32,0xbf034da1,0xbf1110bb,2
+np.float32,0xbf60d9be,0xbfaf2714,2
+np.float32,0x3df67b10,0x3df7ae64,2
+np.float32,0xbeeedc0a,0xbf017010,2
+np.float32,0xbe149224,0xbe15a072,2
+np.float32,0x3f455084,0x3f82d759,2
+np.float32,0x3f210f9e,0x3f3d7093,2
+np.float32,0xbeaea3e0,0xbeb5edd3,2
+np.float32,0x3e0724b0,0x3e07efad,2
+np.float32,0x3f09a784,0x3f19d6ac,2
+np.float32,0xbf044340,0xbf125ee8,2
+np.float32,0xbf71adc9,0xbfe315fe,2
+np.float32,0x3efd3870,0x3f0ac6a8,2
+np.float32,0xbf53c7a6,0xbf96f6df,2
+np.float32,0xbf3cf784,0xbf7247af,2
+np.float32,0x3e0ce9e0,0x3e0dd035,2
+np.float32,0xbd3051a0,0xbd306d89,2
+np.float32,0x3ecab804,0x3ed66f77,2
+np.float32,0x3e984350,0x3e9d0189,2
+np.float32,0x3edd1c00,0x3eeca20b,2
+np.float32,0xbe8e22a0,0xbe91f71b,2
+np.float32,0x3ebebc18,0x3ec85fd6,2
+np.float32,0xba275c00,0xba275c01,2
+np.float32,0x3f1d8190,0x3f37a385,2
+np.float32,0x3f17343e,0x3f2dbbfe,2
+np.float32,0x3caa8000,0x3caa864e,2
+np.float32,0x3e7a7308,0x3e7fa168,2
+np.float32,0x3f7359a6,0x3feb3e1a,2
+np.float32,0xbf7ad15a,0xc012a743,2
+np.float32,0xbf122efb,0xbf262812,2
+np.float32,0xbf03ba04,0xbf11a3fa,2
+np.float32,0x3ed7a90c,0x3ee5f8d4,2
+np.float32,0xbe23e318,0xbe254eed,2
+np.float32,0xbe2866f4,0xbe29f20a,2
+np.float32,0xbeaedff2,0xbeb631d0,2
+np.float32,0x0,0x0,2
+np.float32,0x3ef2a034,0x3f03dafd,2
+np.float32,0x3f35806c,0x3f62994e,2
+np.float32,0xbf655e19,0xbfb9c718,2
+np.float32,0x3f5d54ce,0x3fa7d4f4,2
+np.float32,0x3f33e64a,0x3f5f67e3,2
+np.float32,0x3ebf4010,0x3ec8f923,2
+np.float32,0xbe050dc8,0xbe05cf70,2
+np.float32,0x3f61693e,0x3fb063b0,2
+np.float32,0xbd94ac00,0xbd94ef12,2
+np.float32,0x3e9de008,0x3ea32f61,2
+np.float32,0xbe3d042c,0xbe3f3540,2
+np.float32,0x3e8fdfc0,0x3e93d9e4,2
+np.float32,0x3f28bc48,0x3f4a9019,2
+np.float32,0x3edea928,0x3eee8b09,2
+np.float32,0xbf05f673,0xbf14b362,2
+np.float32,0xbf360730,0xbf63a914,2
+np.float32,0xbe3fb454,0xbe41fe0a,2
+np.float32,0x3f6d99a8,0x3fd28552,2
+np.float32,0xbf3ae866,0xbf6dd052,2
+np.float32,0x3f5b1164,0x3fa37aec,2
+np.float32,0xbf64a451,0xbfb7f61b,2
+np.float32,0xbdd79bd0,0xbdd86919,2
+np.float32,0x3e89fc00,0x3e8d7a85,2
+np.float32,0x3f4bf690,0x3f8b77ea,2
+np.float32,0x3cbdf280,0x3cbdfb38,2
+np.float32,0x3f138f98,0x3f2835b4,2
+np.float32,0xbe33967c,0xbe3576bc,2
+np.float32,0xbf298164,0xbf4bedda,2
+np.float32,0x3e9955cc,0x3e9e2edb,2
+np.float32,0xbf79b383,0xc00c56c0,2
+np.float32,0x3ea0834c,0x3ea61aea,2
+np.float32,0xbf511184,0xbf92c89a,2
+np.float32,0x3f4d9fba,0x3f8dc666,2
+np.float32,0x3f3387c2,0x3f5ead80,2
+np.float32,0x3e3f7360,0x3e41babb,2
+np.float32,0xbf3cc4d6,0xbf71d879,2
+np.float32,0x3f2e4402,0x3f54994e,2
+np.float32,0x3e6a7118,0x3e6eabff,2
+np.float32,0xbf05d83e,0xbf1489cc,2
+np.float32,0xbdce4fd8,0xbdcf039a,2
+np.float32,0xbf03e2f4,0xbf11dbaf,2
+np.float32,0x3f1ea0a0,0x3f397375,2
+np.float32,0x3f7aff54,0x4013cb1b,2
+np.float32,0x3f5ef158,0x3fab1801,2
+np.float32,0xbe33bcc8,0xbe359e40,2
+np.float32,0xbf04dd0e,0xbf133111,2
+np.float32,0xbf14f887,0xbf2a54d1,2
+np.float32,0x3f75c37a,0x3ff9196e,2
+np.float32,0x3f35c3c8,0x3f6320f2,2
+np.float32,0x3f53bb94,0x3f96e3c3,2
+np.float32,0x3f4d473e,0x3f8d4a19,2
+np.float32,0xbdfe19e0,0xbdff6ac9,2
+np.float32,0xbf7f0cc4,0xc049342d,2
+np.float32,0xbdbfc778,0xbdc057bb,2
+np.float32,0xbf7575b7,0xbff73067,2
+np.float32,0xbe9df488,0xbea34609,2
+np.float32,0xbefbd3c6,0xbf09daff,2
+np.float32,0x3f19962c,0x3f316cbd,2
+np.float32,0x3f7acec6,0x40129732,2
+np.float32,0xbf5db7de,0xbfa89a21,2
+np.float32,0x3f62f444,0x3fb3e830,2
+np.float32,0xbf522adb,0xbf94737f,2
+np.float32,0xbef6ceb2,0xbf0690ba,2
+np.float32,0xbf57c41e,0xbf9d8db0,2
+np.float32,0x3eb3360c,0x3ebb1eb0,2
+np.float32,0x3f29327e,0x3f4b618e,2
+np.float32,0xbf08d099,0xbf18a916,2
+np.float32,0x3ea21014,0x3ea7d369,2
+np.float32,0x3f39e516,0x3f6ba861,2
+np.float32,0x3e7c4f28,0x3e80ce08,2
+np.float32,0xbec5a7f8,0xbed07582,2
+np.float32,0xbf0b1b46,0xbf1be3e7,2
+np.float32,0xbef0e0ec,0xbf02bb2e,2
+np.float32,0x3d835a30,0x3d838869,2
+np.float32,0x3f08aa40,0x3f18736e,2
+np.float32,0x3eb0e4c8,0x3eb87bcd,2
+np.float32,0x3eb3821c,0x3ebb7564,2
+np.float32,0xbe3a7320,0xbe3c8d5a,2
+np.float32,0x3e43f8c0,0x3e466b10,2
+np.float32,0x3e914288,0x3e955b69,2
+np.float32,0x3ec7d800,0x3ed308e7,2
+np.float32,0x3e603df8,0x3e63eef2,2
+np.float32,0x3f225cac,0x3f3f9ac6,2
+np.float32,0x3e3db8f0,0x3e3ff06b,2
+np.float32,0x3f358d78,0x3f62b38c,2
+np.float32,0xbed9bd64,0xbee88158,2
+np.float32,0x800000,0x800000,2
+np.float32,0x3f1adfce,0x3f337230,2
+np.float32,0xbefdc346,0xbf0b229d,2
+np.float32,0xbf091018,0xbf190208,2
+np.float32,0xbf800000,0xff800000,2
+np.float32,0x3f27c2c4,0x3f48d8db,2
+np.float32,0x3ef59c80,0x3f05c993,2
+np.float32,0x3e18a340,0x3e19c893,2
+np.float32,0x3f209610,0x3f3ca7c5,2
+np.float32,0x3f69cc22,0x3fc60087,2
+np.float32,0xbf66cf07,0xbfbd8721,2
+np.float32,0xbf768098,0xbffdfcc4,2
+np.float32,0x3df27a40,0x3df39ec4,2
+np.float32,0x3daf5bd0,0x3dafca02,2
+np.float32,0x3f53f2be,0x3f973b41,2
+np.float32,0xbf7edcbc,0xc0436ce3,2
+np.float32,0xbdf61db8,0xbdf74fae,2
+np.float32,0x3e2c9328,0x3e2e3cb2,2
+np.float32,0x3f1a4570,0x3f327f41,2
+np.float32,0xbf766306,0xbffd32f1,2
+np.float32,0xbf468b9d,0xbf845f0f,2
+np.float32,0x3e398970,0x3e3b9bb1,2
+np.float32,0xbbefa900,0xbbefaa18,2
+np.float32,0xbf54c989,0xbf9893ad,2
+np.float32,0x3f262cf6,0x3f46169d,2
+np.float32,0x3f638a8a,0x3fb54a98,2
+np.float32,0xbeb36c78,0xbebb5cb8,2
+np.float32,0xbeac4d42,0xbeb34993,2
+np.float32,0x3f1d1942,0x3f36fbf2,2
+np.float32,0xbf5d49ba,0xbfa7bf07,2
+np.float32,0xbf182b5c,0xbf2f38d0,2
+np.float32,0x3f41a742,0x3f7ce5ef,2
+np.float32,0x3f0b9a6c,0x3f1c9898,2
+np.float32,0x3e847494,0x3e8788f3,2
+np.float32,0xbde41608,0xbde50941,2
+np.float32,0x3f693944,0x3fc44b5a,2
+np.float32,0x3f0386b2,0x3f115e37,2
+np.float32,0x3f3a08b0,0x3f6bf3c1,2
+np.float32,0xbf78ee64,0xc0089977,2
+np.float32,0xbf013a11,0xbf0e436e,2
+np.float32,0x3f00668e,0x3f0d2836,2
+np.float32,0x3e6d9850,0x3e720081,2
+np.float32,0x3eacf578,0x3eb4075d,2
+np.float32,0x3f18aef8,0x3f3004b4,2
+np.float32,0x3de342f0,0x3de43385,2
+np.float32,0x3e56cee8,0x3e5a0b85,2
+np.float32,0xbf287912,0xbf4a1966,2
+np.float32,0x3e92c948,0x3e9704c2,2
+np.float32,0x3c07d080,0x3c07d14c,2
+np.float32,0xbe90f6a0,0xbe9508e0,2
+np.float32,0x3e8b4f28,0x3e8ee884,2
+np.float32,0xbf35b56c,0xbf6303ff,2
+np.float32,0xbef512b8,0xbf057027,2
+np.float32,0x3e36c630,0x3e38c0cd,2
+np.float32,0x3f0b3ca8,0x3f1c134a,2
+np.float32,0x3e4cd610,0x3e4fa2c5,2
+np.float32,0xbf5a8372,0xbfa273a3,2
+np.float32,0xbecaad3c,0xbed662ae,2
+np.float32,0xbec372d2,0xbecddeac,2
+np.float32,0x3f6fb2b2,0x3fda8a22,2
+np.float32,0x3f365f28,0x3f645b5a,2
+np.float32,0xbecd00fa,0xbed926a4,2
+np.float32,0xbebafa32,0xbec40672,2
+np.float32,0xbf235b73,0xbf4146c4,2
+np.float32,0x3f7a4658,0x400f6e2c,2
+np.float32,0x3f35e824,0x3f636a54,2
+np.float32,0x3cb87640,0x3cb87e3c,2
+np.float32,0xbf296288,0xbf4bb6ee,2
+np.float32,0x7f800000,0xffc00000,2
+np.float32,0xbf4de86e,0xbf8e2d1a,2
+np.float32,0xbf4ace12,0xbf89e5f3,2
+np.float32,0x3d65a300,0x3d65e0b5,2
+np.float32,0xbe10c534,0xbe11bf21,2
+np.float32,0xbeba3c1c,0xbec32b3e,2
+np.float32,0x3e87eaf8,0x3e8b40b8,2
+np.float32,0x3d5c3bc0,0x3d5c722d,2
+np.float32,0x3e8c14b8,0x3e8fbdf8,2
+np.float32,0xbf06c6f0,0xbf15d327,2
+np.float32,0xbe0f1e30,0xbe100f96,2
+np.float32,0xbee244b0,0xbef30251,2
+np.float32,0x3f2a21b0,0x3f4d0c1d,2
+np.float32,0xbf5f7f81,0xbfac408e,2
+np.float32,0xbe3dba2c,0xbe3ff1b2,2
+np.float32,0x3f3ffc22,0x3f790abf,2
+np.float32,0x3edc3dac,0x3eeb90fd,2
+np.float32,0x7f7fffff,0xffc00000,2
+np.float32,0x3ecfaaac,0x3edc5485,2
+np.float32,0x3f0affbe,0x3f1bbcd9,2
+np.float32,0x3f5f2264,0x3fab7dca,2
+np.float32,0x3f37394c,0x3f66186c,2
+np.float32,0xbe6b2f6c,0xbe6f74e3,2
+np.float32,0x3f284772,0x3f49c1f1,2
+np.float32,0xbdf27bc8,0xbdf3a051,2
+np.float32,0xbc8b14e0,0xbc8b184c,2
+np.float32,0x3f6a867c,0x3fc83b07,2
+np.float32,0x3f1ec876,0x3f39b429,2
+np.float32,0x3f6fd9a8,0x3fdb28d6,2
+np.float32,0xbf473cca,0xbf853e8c,2
+np.float32,0x3e23eff8,0x3e255c23,2
+np.float32,0x3ebefdfc,0x3ec8ac5d,2
+np.float32,0x3f6c8c22,0x3fced2b1,2
+np.float32,0x3f168388,0x3f2cad44,2
+np.float32,0xbece2410,0xbeda81ac,2
+np.float32,0x3f5532f0,0x3f993eea,2
+np.float32,0x3ef1938c,0x3f032dfa,2
+np.float32,0xbef05268,0xbf025fba,2
+np.float32,0x3f552e4a,0x3f993754,2
+np.float32,0x3e9ed068,0x3ea4392d,2
+np.float32,0xbe1a0c24,0xbe1b39be,2
+np.float32,0xbf2623aa,0xbf46068c,2
+np.float32,0xbe1cc300,0xbe1e00fc,2
+np.float32,0xbe9c0576,0xbea12397,2
+np.float32,0xbd827338,0xbd82a07e,2
+np.float32,0x3f0fc31a,0x3f229786,2
+np.float32,0x3e577810,0x3e5abc7d,2
+np.float32,0x3e0e1cb8,0x3e0f0906,2
+np.float32,0x3e84d344,0x3e87ee73,2
+np.float32,0xbf39c45e,0xbf6b6337,2
+np.float32,0x3edfb25c,0x3eefd273,2
+np.float32,0x3e016398,0x3e021596,2
+np.float32,0xbefeb1be,0xbf0bc0de,2
+np.float32,0x3f37e104,0x3f677196,2
+np.float32,0x3f545316,0x3f97d500,2
+np.float32,0xbefc165a,0xbf0a06ed,2
+np.float32,0xbf0923e6,0xbf191dcd,2
+np.float32,0xbf386508,0xbf68831f,2
+np.float32,0xbf3d4630,0xbf72f4e1,2
+np.float32,0x3f3dbe82,0x3f73ff13,2
+np.float32,0xbf703de4,0xbfdcc7e2,2
+np.float32,0xbf531482,0xbf95dd1a,2
+np.float32,0xbf0af1b6,0xbf1ba8f4,2
+np.float32,0xbec8fd9c,0xbed463a4,2
+np.float32,0xbe230320,0xbe24691a,2
+np.float32,0xbf7de541,0xc02faf38,2
+np.float32,0x3efd2360,0x3f0ab8b7,2
+np.float32,0x3db7f350,0x3db87291,2
+np.float32,0x3e74c510,0x3e799924,2
+np.float32,0x3da549c0,0x3da5a5fc,2
+np.float32,0x3e8a3bc4,0x3e8dbf4a,2
+np.float32,0xbf69f086,0xbfc66e84,2
+np.float32,0x3f323f8e,0x3f5c2c17,2
+np.float32,0x3ec0ae3c,0x3ecaa334,2
+np.float32,0xbebe8966,0xbec824fc,2
+np.float32,0x3f34691e,0x3f606b13,2
+np.float32,0x3f13790e,0x3f2813f5,2
+np.float32,0xbf61c027,0xbfb12618,2
+np.float32,0x3e90c690,0x3e94d4a1,2
+np.float32,0xbefce8f0,0xbf0a920e,2
+np.float32,0xbf5c0e8a,0xbfa559a7,2
+np.float32,0x3f374f60,0x3f6645b6,2
+np.float32,0x3f25f6fa,0x3f45b967,2
+np.float32,0x3f2421aa,0x3f42963a,2
+np.float32,0x3ebfa328,0x3ec96c57,2
+np.float32,0x3e3bef28,0x3e3e1685,2
+np.float32,0x3ea3fa3c,0x3ea9f4dd,2
+np.float32,0x3f362b8e,0x3f63f2b2,2
+np.float32,0xbedcef18,0xbeec6ada,2
+np.float32,0xbdd29c88,0xbdd35bd0,2
+np.float32,0x3f261aea,0x3f45f76f,2
+np.float32,0xbe62c470,0xbe66965e,2
+np.float32,0x7fc00000,0x7fc00000,2
+np.float32,0xbee991aa,0xbefc277b,2
+np.float32,0xbf571960,0xbf9c6923,2
+np.float32,0xbe6fb410,0xbe743b41,2
+np.float32,0x3eb1bed0,0x3eb9738d,2
+np.float32,0x80000000,0x80000000,2
+np.float32,0x3eddcbe4,0x3eed7a69,2
+np.float32,0xbf2a81ba,0xbf4db86d,2
+np.float32,0x3f74da54,0x3ff38737,2
+np.float32,0xbeb6bff4,0xbebf29f4,2
+np.float32,0x3f445752,0x3f81a698,2
+np.float32,0x3ed081b4,0x3edd5618,2
+np.float32,0xbee73802,0xbef931b4,2
+np.float32,0xbd13f2a0,0xbd14031c,2
+np.float32,0xbb4d1200,0xbb4d122c,2
+np.float32,0xbee8777a,0xbefac393,2
+np.float32,0x3f42047c,0x3f7dc06c,2
+np.float32,0xbd089270,0xbd089f67,2
+np.float32,0xbf628c16,0xbfb2f66b,2
+np.float32,0x3e72e098,0x3e77978d,2
+np.float32,0x3ed967cc,0x3ee818e4,2
+np.float32,0x3e284c80,0x3e29d6d9,2
+np.float32,0x3f74e8ba,0x3ff3dbef,2
+np.float32,0x3f013e86,0x3f0e4969,2
+np.float32,0xbf610d4f,0xbfaf983c,2
+np.float32,0xbf3c8d36,0xbf715eba,2
+np.float32,0xbedbc756,0xbeeaffdb,2
+np.float32,0x3e143ec8,0x3e154b4c,2
+np.float32,0xbe1c9808,0xbe1dd4fc,2
+np.float32,0xbe887a1e,0xbe8bdac5,2
+np.float32,0xbe85c4bc,0xbe88f17a,2
+np.float32,0x3f35967e,0x3f62c5b4,2
+np.float32,0x3ea2c4a4,0x3ea89c2d,2
+np.float32,0xbc8703c0,0xbc8706e1,2
+np.float32,0xbf13d52c,0xbf289dff,2
+np.float32,0xbf63bb56,0xbfb5bf29,2
+np.float32,0xbf61c5ef,0xbfb13319,2
+np.float32,0xbf128410,0xbf26a675,2
+np.float32,0x3f03fcf2,0x3f11ff13,2
+np.float32,0xbe49c924,0xbe4c75cd,2
+np.float32,0xbf211a9c,0xbf3d82c5,2
+np.float32,0x3f7e9d52,0x403d1b42,2
+np.float32,0x3edfefd4,0x3ef01e71,2
+np.float32,0x3ebc5bd8,0x3ec59efb,2
+np.float32,0x3d7b02e0,0x3d7b537f,2
+np.float32,0xbf1163ba,0xbf24fb43,2
+np.float32,0x3f5072f2,0x3f91dbf1,2
+np.float32,0xbee700ce,0xbef8ec60,2
+np.float32,0x3f534168,0x3f962359,2
+np.float32,0x3e6d6c40,0x3e71d1ef,2
+np.float32,0x3def9d70,0x3df0b7a8,2
+np.float32,0x3e89cf80,0x3e8d4a8a,2
+np.float32,0xbf687ca7,0xbfc2290f,2
+np.float32,0x3f35e134,0x3f635c51,2
+np.float32,0x3e59eef8,0x3e5d50fa,2
+np.float32,0xbf65c9e1,0xbfbada61,2
+np.float32,0xbf759292,0xbff7e43d,2
+np.float32,0x3f4635a0,0x3f83f372,2
+np.float32,0x3f29baaa,0x3f4c53f1,2
+np.float32,0x3f6b15a6,0x3fc9fe04,2
+np.float32,0x3edabc88,0x3ee9b922,2
+np.float32,0x3ef382e0,0x3f046d4d,2
+np.float32,0xbe351310,0xbe36ff7f,2
+np.float32,0xbf05c935,0xbf14751c,2
+np.float32,0xbf0e7c50,0xbf20bc24,2
+np.float32,0xbf69bc94,0xbfc5d1b8,2
+np.float32,0xbed41aca,0xbee1aa23,2
+np.float32,0x3f518c08,0x3f938162,2
+np.float32,0xbf3d7974,0xbf73661a,2
+np.float32,0x3f1951a6,0x3f3101c9,2
+np.float32,0xbeb3f436,0xbebbf787,2
+np.float32,0xbf77a190,0xc0031d43,2
+np.float32,0x3eb5b3cc,0x3ebdf6e7,2
+np.float32,0xbed534b4,0xbee2fed2,2
+np.float32,0xbe53e1b8,0xbe56fc56,2
+np.float32,0x3f679e20,0x3fbfb91c,2
+np.float32,0xff7fffff,0xffc00000,2
+np.float32,0xbf7b9bcb,0xc0180073,2
+np.float32,0xbf5635e8,0xbf9aea15,2
+np.float32,0xbe5a3318,0xbe5d9856,2
+np.float32,0xbe003284,0xbe00df9a,2
+np.float32,0x3eb119a4,0x3eb8b7d6,2
+np.float32,0xbf3bccf8,0xbf6fbc84,2
+np.float32,0x3f36f600,0x3f658ea8,2
+np.float32,0x3f1ea834,0x3f397fc2,2
+np.float32,0xbe7cfb54,0xbe8129b3,2
+np.float32,0xbe9b3746,0xbea0406a,2
+np.float32,0x3edc0f90,0x3eeb586c,2
+np.float32,0x3e1842e8,0x3e19660c,2
+np.float32,0xbd8f10b0,0xbd8f4c70,2
+np.float32,0xbf064aca,0xbf1527a2,2
+np.float32,0x3e632e58,0x3e6705be,2
+np.float32,0xbef28ba4,0xbf03cdbb,2
+np.float32,0x3f27b21e,0x3f48bbaf,2
+np.float32,0xbe6f30d4,0xbe73b06e,2
+np.float32,0x3f3e6cb0,0x3f75834b,2
+np.float32,0xbf264aa5,0xbf4649f0,2
+np.float32,0xbf690775,0xbfc3b978,2
+np.float32,0xbf3e4a38,0xbf753632,2
+np.float64,0x3fe12bbe8c62577e,0x3fe32de8e5f961b0,2
+np.float64,0x3fc9b8909b337120,0x3fca1366da00efff,2
+np.float64,0x3feaee4245f5dc84,0x3ff3a011ea0432f3,2
+np.float64,0xbfe892c000f12580,0xbff03e5adaed6f0c,2
+np.float64,0xbf9be8de4837d1c0,0xbf9beaa367756bd1,2
+np.float64,0x3fe632e58fec65cc,0x3feb5ccc5114ca38,2
+np.float64,0x3fe78a0ef7ef141e,0x3fee1b4521d8eb6c,2
+np.float64,0x3feec27a65fd84f4,0x3fff643c8318e81e,2
+np.float64,0x3fbed6efce3dade0,0x3fbefd76cff00111,2
+np.float64,0xbfe3a05fab6740c0,0xbfe6db078aeeb0ca,2
+np.float64,0x3fdca11a56b94234,0x3fdece9e6eacff1b,2
+np.float64,0x3fe0fb15aae1f62c,0x3fe2e9e095ec2089,2
+np.float64,0x3fede12abf7bc256,0x3ffafd0ff4142807,2
+np.float64,0x3feb919edcf7233e,0x3ff4c9aa0bc2432f,2
+np.float64,0x3fd39633b5a72c68,0x3fd43c2e6d5f441c,2
+np.float64,0x3fd9efcbfeb3df98,0x3fdb83f03e58f91c,2
+np.float64,0x3fe2867a36650cf4,0x3fe525858c8ce72e,2
+np.float64,0x3fdacbb8f3b59770,0x3fdc8cd431b6e3ff,2
+np.float64,0x3fcc120503382408,0x3fcc88a8fa43e1c6,2
+np.float64,0xbfd99ff4eab33fea,0xbfdb24a20ae3687d,2
+np.float64,0xbfe8caf0157195e0,0xbff083b8dd0941d3,2
+np.float64,0x3fddc9bf92bb9380,0x3fe022aac0f761d5,2
+np.float64,0x3fe2dbb66e65b76c,0x3fe5a6e7caf3f1f2,2
+np.float64,0x3fe95f5c4a72beb8,0x3ff1444697e96138,2
+np.float64,0xbfc6b163d92d62c8,0xbfc6ef6e006658a1,2
+np.float64,0x3fdf1b2616be364c,0x3fe0fcbd2848c9e8,2
+np.float64,0xbfdca1ccf7b9439a,0xbfdecf7dc0eaa663,2
+np.float64,0x3fe078d6a260f1ae,0x3fe236a7c66ef6c2,2
+np.float64,0x3fdf471bb9be8e38,0x3fe11990ec74e704,2
+np.float64,0xbfe417626be82ec5,0xbfe79c9aa5ed2e2f,2
+np.float64,0xbfeb9cf5677739eb,0xbff4dfc24c012c90,2
+np.float64,0x3f8d9142b03b2280,0x3f8d91c9559d4779,2
+np.float64,0x3fb052c67220a590,0x3fb05873c90d1cd6,2
+np.float64,0x3fd742e2c7ae85c4,0x3fd860128947d15d,2
+np.float64,0x3fec2e2a2bf85c54,0x3ff60eb554bb8d71,2
+np.float64,0xbfeb2b8bc8f65718,0xbff40b734679497a,2
+np.float64,0x3fe25f8e0d64bf1c,0x3fe4eb381d077803,2
+np.float64,0x3fe56426256ac84c,0x3fe9dafbe79370f0,2
+np.float64,0x3feecc1e5d7d983c,0x3fffa49bedc7aa25,2
+np.float64,0xbfc88ce94b3119d4,0xbfc8dbba0fdee2d2,2
+np.float64,0xbfabcf51ac379ea0,0xbfabd6552aa63da3,2
+np.float64,0xbfccc8b849399170,0xbfcd48d6ff057a4d,2
+np.float64,0x3fd2f831e8a5f064,0x3fd38e67b0dda905,2
+np.float64,0x3fcafdcd6135fb98,0x3fcb670ae2ef4d36,2
+np.float64,0x3feda6042efb4c08,0x3ffa219442ac4ea5,2
+np.float64,0x3fed382b157a7056,0x3ff8bc01bc6d10bc,2
+np.float64,0x3fed858a50fb0b14,0x3ff9b1c05cb6cc0f,2
+np.float64,0x3fcc3960653872c0,0x3fccb2045373a3d1,2
+np.float64,0xbfec5177e478a2f0,0xbff65eb4557d94eb,2
+np.float64,0x3feafe0d5e75fc1a,0x3ff3bb4a260a0dcb,2
+np.float64,0x3fe08bc87ee11790,0x3fe25078aac99d31,2
+np.float64,0xffefffffffffffff,0xfff8000000000000,2
+np.float64,0x3f79985ce0333100,0x3f799872b591d1cb,2
+np.float64,0xbfd4001cf9a8003a,0xbfd4b14b9035b94f,2
+np.float64,0x3fe54a17e6ea9430,0x3fe9ac0f18682343,2
+np.float64,0xbfb4e07fea29c100,0xbfb4ec6520dd0689,2
+np.float64,0xbfed2b6659fa56cd,0xbff895ed57dc1450,2
+np.float64,0xbfe81fc8b5f03f92,0xbfef6b95e72a7a7c,2
+np.float64,0xbfe6aced16ed59da,0xbfec4ce131ee3704,2
+np.float64,0xbfe599f30ceb33e6,0xbfea3d07c1cd78e2,2
+np.float64,0xbfe0ff278b61fe4f,0xbfe2ef8b5efa89ed,2
+np.float64,0xbfe3e9406467d281,0xbfe750e43e841736,2
+np.float64,0x3fcc6b52cf38d6a8,0x3fcce688f4fb2cf1,2
+np.float64,0xbfc890e8133121d0,0xbfc8dfdfee72d258,2
+np.float64,0x3fe46e81dbe8dd04,0x3fe82e09783811a8,2
+np.float64,0x3fd94455e5b288ac,0x3fdab7cef2de0b1f,2
+np.float64,0xbfe82151fff042a4,0xbfef6f254c9696ca,2
+np.float64,0x3fcee1ac1d3dc358,0x3fcf80a6ed07070a,2
+np.float64,0x3fcce8f90939d1f0,0x3fcd6ad18d34f8b5,2
+np.float64,0x3fd6afe56fad5fcc,0x3fd7b7567526b1fb,2
+np.float64,0x3fb1a77092234ee0,0x3fb1ae9fe0d176fc,2
+np.float64,0xbfeb758b0d76eb16,0xbff493d105652edc,2
+np.float64,0xbfb857c24e30af88,0xbfb86aa4da3be53f,2
+np.float64,0x3fe89064eff120ca,0x3ff03b7c5b3339a8,2
+np.float64,0xbfc1bd2fef237a60,0xbfc1da99893473ed,2
+np.float64,0xbfe5ad6e2eeb5adc,0xbfea60ed181b5c05,2
+np.float64,0x3fd5a66358ab4cc8,0x3fd6899e640aeb1f,2
+np.float64,0xbfe198e832e331d0,0xbfe3c8c9496d0de5,2
+np.float64,0xbfdaa5c0d7b54b82,0xbfdc5ed7d3c5ce49,2
+np.float64,0x3fcceccb6939d998,0x3fcd6ed88c2dd3a5,2
+np.float64,0xbfe44413eae88828,0xbfe7e6cd32b34046,2
+np.float64,0xbfc7cbeccf2f97d8,0xbfc8139a2626edae,2
+np.float64,0x3fbf31e4fa3e63d0,0x3fbf59c6e863255e,2
+np.float64,0x3fdf03fa05be07f4,0x3fe0ed953f7989ad,2
+np.float64,0x3fe7f4eaceefe9d6,0x3fef092ca7e2ac39,2
+np.float64,0xbfc084e9d92109d4,0xbfc09ca10fd6aaea,2
+np.float64,0xbf88cfbf70319f80,0xbf88d00effa6d897,2
+np.float64,0x7ff4000000000000,0x7ffc000000000000,2
+np.float64,0xbfa0176e9c202ee0,0xbfa018ca0a6ceef3,2
+np.float64,0xbfd88d0815b11a10,0xbfd9dfc6c6bcbe4e,2
+np.float64,0x3fe89f7730713eee,0x3ff04de52fb536f3,2
+np.float64,0xbfedc9707bfb92e1,0xbffaa25fcf9dd6da,2
+np.float64,0x3fe936d1a6726da4,0x3ff10e40c2d94bc9,2
+np.float64,0x3fdb64aec7b6c95c,0x3fdd473177317b3f,2
+np.float64,0xbfee4f9aaefc9f35,0xbffcdd212667003c,2
+np.float64,0x3fe3730067e6e600,0x3fe692b0a0babf5f,2
+np.float64,0xbfc257e58924afcc,0xbfc27871f8c218d7,2
+np.float64,0x3fe62db12dec5b62,0x3feb52c61b97d9f6,2
+np.float64,0xbfe3ff491367fe92,0xbfe774f1b3a96fd6,2
+np.float64,0x3fea43255274864a,0x3ff28b0c4b7b8d21,2
+np.float64,0xbfea37923c746f24,0xbff27962159f2072,2
+np.float64,0x3fcd0ac3c73a1588,0x3fcd8e6f8de41755,2
+np.float64,0xbfdccafde6b995fc,0xbfdf030fea8a0630,2
+np.float64,0x3fdba35268b746a4,0x3fdd94094f6f50c1,2
+np.float64,0x3fc68ea1d92d1d40,0x3fc6cb8d07cbb0e4,2
+np.float64,0xbfb88b1f6e311640,0xbfb89e7af4e58778,2
+np.float64,0xbfedc7cadffb8f96,0xbffa9c3766227956,2
+np.float64,0x3fe7928d3eef251a,0x3fee2dcf2ac7961b,2
+np.float64,0xbfeff42ede7fe85e,0xc00cef6b0f1e8323,2
+np.float64,0xbfebf07fa477e0ff,0xbff5893f99e15236,2
+np.float64,0x3fe3002ab9660056,0x3fe5defba550c583,2
+np.float64,0x3feb8f4307f71e86,0x3ff4c517ec8d6de9,2
+np.float64,0x3fd3c16f49a782e0,0x3fd46becaacf74da,2
+np.float64,0x3fc7613df12ec278,0x3fc7a52b2a3c3368,2
+np.float64,0xbfe33af560e675eb,0xbfe63a6528ff1587,2
+np.float64,0xbfde86495abd0c92,0xbfe09bd7ba05b461,2
+np.float64,0x3fe1e7fb4ee3cff6,0x3fe43b04311c0ab6,2
+np.float64,0xbfc528b6bd2a516c,0xbfc55ae0a0c184c8,2
+np.float64,0xbfd81025beb0204c,0xbfd94dd72d804613,2
+np.float64,0x10000000000000,0x10000000000000,2
+np.float64,0x3fc1151c47222a38,0x3fc12f5aad80a6bf,2
+np.float64,0x3feafa136775f426,0x3ff3b46854da0b3a,2
+np.float64,0x3fed2da0747a5b40,0x3ff89c85b658459e,2
+np.float64,0x3fda2a4b51b45498,0x3fdbca0d908ddbbd,2
+np.float64,0xbfd04cf518a099ea,0xbfd0aae0033b9e4c,2
+np.float64,0xbfb9065586320ca8,0xbfb91adb7e31f322,2
+np.float64,0xbfd830b428b06168,0xbfd973ca3c484d8d,2
+np.float64,0x3fc952f7ed32a5f0,0x3fc9a9994561fc1a,2
+np.float64,0xbfeb06c83c760d90,0xbff3ca77b326df20,2
+np.float64,0xbfeb1c98ac763931,0xbff3f0d0900f6149,2
+np.float64,0x3fdf061dbebe0c3c,0x3fe0eefb32b48d17,2
+np.float64,0xbf9acbaf28359760,0xbf9acd4024be9fec,2
+np.float64,0x3fec0adde2f815bc,0x3ff5c1628423794d,2
+np.float64,0xbfc4bc750d2978ec,0xbfc4eba43f590b94,2
+np.float64,0x3fdbe47878b7c8f0,0x3fdde44a2b500d73,2
+np.float64,0x3fe160d18162c1a4,0x3fe378cff08f18f0,2
+np.float64,0x3fc3b58dfd276b18,0x3fc3de01d3802de9,2
+np.float64,0x3fa860343430c060,0x3fa864ecd07ec962,2
+np.float64,0x3fcaebfb4b35d7f8,0x3fcb546512d1b4c7,2
+np.float64,0x3fe3fda558e7fb4a,0x3fe772412e5776de,2
+np.float64,0xbfe8169f2c702d3e,0xbfef5666c9a10f6d,2
+np.float64,0x3feda78e9efb4f1e,0x3ffa270712ded769,2
+np.float64,0xbfda483161b49062,0xbfdbedfbf2e850ba,2
+np.float64,0x3fd7407cf3ae80f8,0x3fd85d4f52622743,2
+np.float64,0xbfd63de4d4ac7bca,0xbfd73550a33e3c32,2
+np.float64,0xbfd9c30b90b38618,0xbfdb4e7695c856f3,2
+np.float64,0x3fcd70c00b3ae180,0x3fcdfa0969e0a119,2
+np.float64,0x3feb4f127f769e24,0x3ff44bf42514e0f4,2
+np.float64,0xbfec1db44af83b69,0xbff5ea54aed1f8e9,2
+np.float64,0x3fd68ff051ad1fe0,0x3fd792d0ed6d6122,2
+np.float64,0x3fe0a048a5614092,0x3fe26c80a826b2a2,2
+np.float64,0x3fd59f3742ab3e70,0x3fd6818563fcaf80,2
+np.float64,0x3fca26ecf9344dd8,0x3fca867ceb5d7ba8,2
+np.float64,0x3fdc1d547ab83aa8,0x3fde2a9cea866484,2
+np.float64,0xbfc78df6312f1bec,0xbfc7d3719b698a39,2
+np.float64,0x3fe754e72b6ea9ce,0x3feda89ea844a2e5,2
+np.float64,0x3fe740c1a4ee8184,0x3fed7dc56ec0c425,2
+np.float64,0x3fe77566a9eeeace,0x3fedee6f408df6de,2
+np.float64,0xbfbbf5bf8e37eb80,0xbfbc126a223781b4,2
+np.float64,0xbfe0acb297615965,0xbfe27d86681ca2b5,2
+np.float64,0xbfc20a0487241408,0xbfc228f5f7d52ce8,2
+np.float64,0xfff0000000000000,0xfff8000000000000,2
+np.float64,0x3fef98a4dbff314a,0x40043cfb60bd46fa,2
+np.float64,0x3fd059102ca0b220,0x3fd0b7d2be6d7822,2
+np.float64,0x3fe89f18a1f13e32,0x3ff04d714bbbf400,2
+np.float64,0x3fd45b6275a8b6c4,0x3fd516a44a276a4b,2
+np.float64,0xbfe04463e86088c8,0xbfe1ef9dfc9f9a53,2
+np.float64,0xbfe086e279610dc5,0xbfe249c9c1040a13,2
+np.float64,0x3f89c9b110339380,0x3f89ca0a641454b5,2
+np.float64,0xbfb5f5b4322beb68,0xbfb6038dc3fd1516,2
+np.float64,0x3fe6eae76f6dd5ce,0x3feccabae04d5c14,2
+np.float64,0x3fa9ef6c9c33dee0,0x3fa9f51c9a8c8a2f,2
+np.float64,0xbfe171b45f62e368,0xbfe390ccc4c01bf6,2
+np.float64,0x3fb2999442253330,0x3fb2a1fc006804b5,2
+np.float64,0x3fd124bf04a24980,0x3fd1927abb92472d,2
+np.float64,0xbfe6e05938edc0b2,0xbfecb519ba78114f,2
+np.float64,0x3fed466ee6fa8cde,0x3ff8e75405b50490,2
+np.float64,0xbfb999aa92333358,0xbfb9afa4f19f80a2,2
+np.float64,0xbfe98969ed7312d4,0xbff17d887b0303e7,2
+np.float64,0x3fe782843e6f0508,0x3fee0adbeebe3486,2
+np.float64,0xbfe232fcc26465fa,0xbfe4a90a68d46040,2
+np.float64,0x3fd190a90fa32154,0x3fd206f56ffcdca2,2
+np.float64,0xbfc4f8b75929f170,0xbfc5298b2d4e7740,2
+np.float64,0xbfba3a63d63474c8,0xbfba520835c2fdc2,2
+np.float64,0xbfb7708eea2ee120,0xbfb781695ec17846,2
+np.float64,0x3fed9fb7a5fb3f70,0x3ffa0b717bcd1609,2
+np.float64,0xbfc1b158cd2362b0,0xbfc1ce87345f3473,2
+np.float64,0x3f963478082c6900,0x3f96355c3000953b,2
+np.float64,0x3fc5050e532a0a20,0x3fc536397f38f616,2
+np.float64,0x3fe239f9eee473f4,0x3fe4b360da3b2faa,2
+np.float64,0xbfd66bd80eacd7b0,0xbfd769a29fd784c0,2
+np.float64,0x3fc57cdad52af9b8,0x3fc5b16b937f5f72,2
+np.float64,0xbfd3c36a0aa786d4,0xbfd46e1cd0b4eddc,2
+np.float64,0x3feff433487fe866,0x400cf0ea1def3161,2
+np.float64,0xbfed5577807aaaef,0xbff915e8f6bfdf22,2
+np.float64,0xbfca0dd3eb341ba8,0xbfca6c4d11836cb6,2
+np.float64,0x7ff8000000000000,0x7ff8000000000000,2
+np.float64,0xbf974deaa82e9be0,0xbf974ef26a3130d1,2
+np.float64,0xbfe7f425e1efe84c,0xbfef076cb00d649d,2
+np.float64,0xbfe4413605e8826c,0xbfe7e20448b8a4b1,2
+np.float64,0xbfdfad202cbf5a40,0xbfe15cd9eb2be707,2
+np.float64,0xbfe43261ee6864c4,0xbfe7c952c951fe33,2
+np.float64,0xbfec141225782824,0xbff5d54d33861d98,2
+np.float64,0x3fd0f47abaa1e8f4,0x3fd15e8691a7f1c2,2
+np.float64,0x3fd378f0baa6f1e0,0x3fd41bea4a599081,2
+np.float64,0xbfb52523462a4a48,0xbfb5317fa7f436e2,2
+np.float64,0x3fcb30797d3660f0,0x3fcb9c174ea401ff,2
+np.float64,0xbfd48480dea90902,0xbfd5446e02c8b329,2
+np.float64,0xbfee4ae3ab7c95c7,0xbffcc650340ba274,2
+np.float64,0xbfeab086d075610e,0xbff3387f4e83ae26,2
+np.float64,0x3fa17cddf422f9c0,0x3fa17e9bf1b25736,2
+np.float64,0xbfe3064536e60c8a,0xbfe5e86aa5244319,2
+np.float64,0x3feb2882c5765106,0x3ff40604c7d97d44,2
+np.float64,0xbfa6923ff42d2480,0xbfa695ff57b2fc3f,2
+np.float64,0xbfa8bdbdcc317b80,0xbfa8c2ada0d94aa7,2
+np.float64,0x3fe7f16b8e6fe2d8,0x3fef013948c391a6,2
+np.float64,0x3fe4e7169f69ce2e,0x3fe8fceef835050a,2
+np.float64,0x3fed877638fb0eec,0x3ff9b83694127959,2
+np.float64,0xbfe0cc9ecf61993e,0xbfe2a978234cbde5,2
+np.float64,0xbfe977e79672efcf,0xbff16589ea494a38,2
+np.float64,0xbfe240130ae48026,0xbfe4bc69113e0d7f,2
+np.float64,0x3feb1e9b70763d36,0x3ff3f4615938a491,2
+np.float64,0xbfdf197dfcbe32fc,0xbfe0fba78a0fc816,2
+np.float64,0xbfee0f8543fc1f0a,0xbffbb9d9a4ee5387,2
+np.float64,0x3fe88d2191f11a44,0x3ff037843b5b6313,2
+np.float64,0xbfd11bb850a23770,0xbfd188c1cef40007,2
+np.float64,0xbfa1b36e9c2366e0,0xbfa1b53d1d8a8bc4,2
+np.float64,0xbfea2d70d9f45ae2,0xbff26a0629e36b3e,2
+np.float64,0xbfd9188703b2310e,0xbfda83f9ddc18348,2
+np.float64,0xbfee194894fc3291,0xbffbe3c83b61e7cb,2
+np.float64,0xbfe093b4a9e1276a,0xbfe25b4ad6f8f83d,2
+np.float64,0x3fea031489f4062a,0x3ff22accc000082e,2
+np.float64,0xbfc6c0827b2d8104,0xbfc6ff0a94326381,2
+np.float64,0x3fef5cd340feb9a6,0x4002659c5a1b34af,2
+np.float64,0x8010000000000000,0x8010000000000000,2
+np.float64,0x3fd97cb533b2f96c,0x3fdafab28aaae8e3,2
+np.float64,0x3fe2123334642466,0x3fe478bd83a8ce02,2
+np.float64,0xbfd9a69637b34d2c,0xbfdb2c87c6b6fb8c,2
+np.float64,0x3fc58def7f2b1be0,0x3fc5c2ff724a9f61,2
+np.float64,0xbfedd5da1f7babb4,0xbffad15949b7fb22,2
+np.float64,0x3fe90e92a0721d26,0x3ff0d9b64323efb8,2
+np.float64,0x3fd34b9442a69728,0x3fd3e9f8fe80654e,2
+np.float64,0xbfc5f509ab2bea14,0xbfc62d2ad325c59f,2
+np.float64,0x3feb245634f648ac,0x3ff3fe91a46acbe1,2
+np.float64,0x3fd101e539a203cc,0x3fd16cf52ae6d203,2
+np.float64,0xbfc51e9ba72a3d38,0xbfc5507d00521ba3,2
+np.float64,0x3fe5fe1683ebfc2e,0x3feaf7dd8b1f92b0,2
+np.float64,0x3fc362e59126c5c8,0x3fc389601814170b,2
+np.float64,0x3fea34dbd77469b8,0x3ff27542eb721e7e,2
+np.float64,0xbfc13ed241227da4,0xbfc159d42c0a35a9,2
+np.float64,0xbfe6df118cedbe23,0xbfecb27bb5d3f784,2
+np.float64,0x3fd92895f6b2512c,0x3fda96f5f94b625e,2
+np.float64,0xbfe7ea3aa76fd476,0xbfeef0e93939086e,2
+np.float64,0xbfc855498330aa94,0xbfc8a1ff690c9533,2
+np.float64,0x3fd9f27b3ab3e4f8,0x3fdb8726979afc3b,2
+np.float64,0x3fc65d52232cbaa8,0x3fc698ac4367afba,2
+np.float64,0x3fd1271dd0a24e3c,0x3fd195087649d54e,2
+np.float64,0xbfe983445df30689,0xbff175158b773b90,2
+np.float64,0xbfe0d9b13261b362,0xbfe2bb8908fc9e6e,2
+np.float64,0x3fd7671f2aaece40,0x3fd889dccbf21629,2
+np.float64,0x3fe748aebfee915e,0x3fed8e970d94c17d,2
+np.float64,0x3fea756e4e74eadc,0x3ff2d947ef3a54f4,2
+np.float64,0x3fde22311cbc4464,0x3fe05b4ce9df1fdd,2
+np.float64,0x3fe2b55ec1e56abe,0x3fe56c6849e3985a,2
+np.float64,0x3fed7b47437af68e,0x3ff98f8e82de99a0,2
+np.float64,0x3fec8184b179030a,0x3ff6d03aaf0135ba,2
+np.float64,0x3fc9ea825533d508,0x3fca4776d7190e71,2
+np.float64,0xbfe8ddd58b71bbab,0xbff09b770ed7bc9a,2
+np.float64,0xbfed41741bfa82e8,0xbff8d81c2a9fc615,2
+np.float64,0x3fe0a73888e14e72,0x3fe27602ad9a3726,2
+np.float64,0xbfe9d0a565f3a14b,0xbff1e1897b628f66,2
+np.float64,0x3fda12b381b42568,0x3fdbadbec22fbd5a,2
+np.float64,0x3fef0081187e0102,0x4000949eff8313c2,2
+np.float64,0x3fef6942b67ed286,0x4002b7913eb1ee76,2
+np.float64,0x3fda10f882b421f0,0x3fdbababa2d6659d,2
+np.float64,0x3fe5828971eb0512,0x3fea122b5088315a,2
+np.float64,0x3fe9d4b53ff3a96a,0x3ff1e75c148bda01,2
+np.float64,0x3fe95d246bf2ba48,0x3ff1414a61a136ec,2
+np.float64,0x3f9e575eb83caec0,0x3f9e59a4f17179e3,2
+np.float64,0x3fdb0a20b5b61440,0x3fdcd8a56178a17f,2
+np.float64,0xbfdef425e3bde84c,0xbfe0e33eeacf3861,2
+np.float64,0x3fd6afcf6bad5fa0,0x3fd7b73d47288347,2
+np.float64,0x3fe89256367124ac,0x3ff03dd9f36ce40e,2
+np.float64,0x3fe7e560fcefcac2,0x3feee5ef8688b60b,2
+np.float64,0x3fedef55e1fbdeac,0x3ffb350ee1df986b,2
+np.float64,0xbfe44b926de89725,0xbfe7f3539910c41f,2
+np.float64,0x3fc58310f32b0620,0x3fc5b7cfdba15bd0,2
+np.float64,0x3f736d256026da00,0x3f736d2eebe91a90,2
+np.float64,0x3feb012d2076025a,0x3ff3c0b5d21a7259,2
+np.float64,0xbfe466a6c468cd4e,0xbfe820c9c197601f,2
+np.float64,0x3fe1aba8aa635752,0x3fe3e3b73920f64c,2
+np.float64,0x3fe5597c336ab2f8,0x3fe9c7bc4b765b15,2
+np.float64,0x3fe1004ac5e20096,0x3fe2f12116e99821,2
+np.float64,0x3fecbc67477978ce,0x3ff76377434dbdad,2
+np.float64,0x3fe0e64515e1cc8a,0x3fe2ccf5447c1579,2
+np.float64,0x3febcfa874f79f50,0x3ff54528f0822144,2
+np.float64,0x3fc36915ed26d228,0x3fc38fb5b28d3f72,2
+np.float64,0xbfe01213e5e02428,0xbfe1ac0e1e7418f1,2
+np.float64,0x3fcd97875b3b2f10,0x3fce22fe3fc98702,2
+np.float64,0xbfe30383c5e60708,0xbfe5e427e62cc957,2
+np.float64,0xbfde339bf9bc6738,0xbfe0667f337924f5,2
+np.float64,0xbfda7c1c49b4f838,0xbfdc2c8801ce654a,2
+np.float64,0x3fb6b3489e2d6690,0x3fb6c29650387b92,2
+np.float64,0xbfe1fd4d76e3fa9b,0xbfe45a1f60077678,2
+np.float64,0xbf67c5e0402f8c00,0xbf67c5e49fce115a,2
+np.float64,0xbfd4f9aa2da9f354,0xbfd5c759603d0b9b,2
+np.float64,0x3fe83c227bf07844,0x3fefada9f1bd7fa9,2
+np.float64,0xbf97f717982fee20,0xbf97f836701a8cd5,2
+np.float64,0x3fe9688a2472d114,0x3ff150aa575e7d51,2
+np.float64,0xbfc5a9779d2b52f0,0xbfc5df56509c48b1,2
+np.float64,0xbfe958d5f472b1ac,0xbff13b813f9bee20,2
+np.float64,0xbfd7b3b944af6772,0xbfd8e276c2b2920f,2
+np.float64,0x3fed10198e7a2034,0x3ff8469c817572f0,2
+np.float64,0xbfeeecc4517dd989,0xc000472b1f858be3,2
+np.float64,0xbfdbcce47eb799c8,0xbfddc734aa67812b,2
+np.float64,0xbfd013ee24a027dc,0xbfd06df3089384ca,2
+np.float64,0xbfd215f2bfa42be6,0xbfd29774ffe26a74,2
+np.float64,0x3fdfd0ae67bfa15c,0x3fe1746e3a963a9f,2
+np.float64,0xbfc84aa10b309544,0xbfc896f0d25b723a,2
+np.float64,0xbfcd0c627d3a18c4,0xbfcd9024c73747a9,2
+np.float64,0x3fd87df6dbb0fbec,0x3fd9ce1dde757f31,2
+np.float64,0xbfdad85e05b5b0bc,0xbfdc9c2addb6ce47,2
+np.float64,0xbfee4f8977fc9f13,0xbffcdccd68e514b3,2
+np.float64,0x3fa5c290542b8520,0x3fa5c5ebdf09ca70,2
+np.float64,0xbfd7e401d2afc804,0xbfd91a7e4eb5a026,2
+np.float64,0xbfe33ff73b667fee,0xbfe6423cc6eb07d7,2
+np.float64,0x3fdfb7d6c4bf6fac,0x3fe163f2e8175177,2
+np.float64,0xbfd515d69eaa2bae,0xbfd5e6eedd6a1598,2
+np.float64,0x3fb322232e264440,0x3fb32b49d91c3cbe,2
+np.float64,0xbfe20ac39e641587,0xbfe46dd4b3803f19,2
+np.float64,0x3fe282dc18e505b8,0x3fe520152120c297,2
+np.float64,0xbfc905a4cd320b48,0xbfc95929b74865fb,2
+np.float64,0x3fe0ae3b83615c78,0x3fe27fa1dafc825b,2
+np.float64,0xbfc1bfed0f237fdc,0xbfc1dd6466225cdf,2
+np.float64,0xbfeca4d47d7949a9,0xbff72761a34fb682,2
+np.float64,0xbfe8cf8c48f19f18,0xbff0897ebc003626,2
+np.float64,0xbfe1aaf0a36355e2,0xbfe3e2ae7b17a286,2
+np.float64,0x3fe2ca442e659488,0x3fe58c3a2fb4f14a,2
+np.float64,0xbfda3c2deeb4785c,0xbfdbdf89fe96a243,2
+np.float64,0xbfdc12bfecb82580,0xbfde1d81dea3c221,2
+np.float64,0xbfe2d6d877e5adb1,0xbfe59f73e22c1fc7,2
+np.float64,0x3fe5f930636bf260,0x3feaee96a462e4de,2
+np.float64,0x3fcf3c0ea53e7820,0x3fcfe0b0f92be7e9,2
+np.float64,0xbfa5bb90f42b7720,0xbfa5bee9424004cc,2
+np.float64,0xbfe2fb3a3265f674,0xbfe5d75b988bb279,2
+np.float64,0x3fcaec7aab35d8f8,0x3fcb54ea582fff6f,2
+np.float64,0xbfd8d3228db1a646,0xbfda322297747fbc,2
+np.float64,0x3fedd2e0ad7ba5c2,0x3ffac6002b65c424,2
+np.float64,0xbfd9edeca2b3dbda,0xbfdb81b2b7785e33,2
+np.float64,0xbfef5febb17ebfd7,0xc002796b15950960,2
+np.float64,0x3fde22f787bc45f0,0x3fe05bcc624b9ba2,2
+np.float64,0xbfc716a4ab2e2d48,0xbfc758073839dd44,2
+np.float64,0xbf9bed852837db00,0xbf9bef4b2a3f3bdc,2
+np.float64,0x3fef8f88507f1f10,0x4003e5e566444571,2
+np.float64,0xbfdc1bbed6b8377e,0xbfde28a64e174e60,2
+np.float64,0x3fe02d30eae05a62,0x3fe1d064ec027cd3,2
+np.float64,0x3fd9dbb500b3b76c,0x3fdb6bea40162279,2
+np.float64,0x3fe353ff1d66a7fe,0x3fe661b3358c925e,2
+np.float64,0x3fac3ebfb4387d80,0x3fac4618effff2b0,2
+np.float64,0x3fe63cf0ba6c79e2,0x3feb7030cff5f434,2
+np.float64,0x3fd0e915f8a1d22c,0x3fd152464597b510,2
+np.float64,0xbfd36987cda6d310,0xbfd40af049d7621e,2
+np.float64,0xbfdc5b4dc7b8b69c,0xbfde7790a35da2bc,2
+np.float64,0x3feee7ff4a7dcffe,0x40003545989e07c7,2
+np.float64,0xbfeb2c8308765906,0xbff40d2e6469249e,2
+np.float64,0x3fe535a894ea6b52,0x3fe98781648550d0,2
+np.float64,0xbfef168eb9fe2d1d,0xc000f274ed3cd312,2
+np.float64,0x3fc3e2d98927c5b0,0x3fc40c6991b8900c,2
+np.float64,0xbfcd8fe3e73b1fc8,0xbfce1aec7f9b7f7d,2
+np.float64,0xbfd55d8c3aaabb18,0xbfd6378132ee4892,2
+np.float64,0xbfe424a66168494d,0xbfe7b289d72c98b3,2
+np.float64,0x3fd81af13eb035e4,0x3fd95a6a9696ab45,2
+np.float64,0xbfe3016722e602ce,0xbfe5e0e46db228cd,2
+np.float64,0x3fe9a20beff34418,0x3ff19faca17fc468,2
+np.float64,0xbfe2124bc7e42498,0xbfe478e19927e723,2
+np.float64,0x3fd96f8622b2df0c,0x3fdaeb08da6b08ae,2
+np.float64,0x3fecd6796579acf2,0x3ff7a7d02159e181,2
+np.float64,0x3fe60015df6c002c,0x3feafba6f2682a61,2
+np.float64,0x3fc7181cf72e3038,0x3fc7598c2cc3c3b4,2
+np.float64,0xbfce6e2e0b3cdc5c,0xbfcf0621b3e37115,2
+np.float64,0xbfe52a829e6a5505,0xbfe973a785980af9,2
+np.float64,0x3fed4bbac37a9776,0x3ff8f7a0e68a2bbe,2
+np.float64,0x3fabdfaacc37bf60,0x3fabe6bab42bd246,2
+np.float64,0xbfcd9598cb3b2b30,0xbfce20f3c4c2c261,2
+np.float64,0x3fd717d859ae2fb0,0x3fd82e88eca09ab1,2
+np.float64,0x3fe28ccb18e51996,0x3fe52f071d2694fd,2
+np.float64,0xbfe43f064ae87e0c,0xbfe7de5eab36b5b9,2
+np.float64,0x7fefffffffffffff,0xfff8000000000000,2
+np.float64,0xbfb39b045a273608,0xbfb3a4dd3395fdd5,2
+np.float64,0xbfb3358bae266b18,0xbfb33ece5e95970a,2
+np.float64,0xbfeeafb6717d5f6d,0xbffeec3f9695b575,2
+np.float64,0xbfe7a321afef4644,0xbfee522dd80f41f4,2
+np.float64,0x3fe3a17e5be742fc,0x3fe6dcd32af51e92,2
+np.float64,0xbfc61694bd2c2d28,0xbfc64fbbd835f6e7,2
+np.float64,0xbfd795906faf2b20,0xbfd8bf89b370655c,2
+np.float64,0xbfe4b39b59e96736,0xbfe8a3c5c645b6e3,2
+np.float64,0x3fd310af3ba62160,0x3fd3a9442e825e1c,2
+np.float64,0xbfd45198a6a8a332,0xbfd50bc10311a0a3,2
+np.float64,0x3fd0017eaaa002fc,0x3fd05a472a837999,2
+np.float64,0xbfea974d98752e9b,0xbff30f67f1835183,2
+np.float64,0xbf978f60582f1ec0,0xbf979070e1c2b59d,2
+np.float64,0x3fe1c715d4e38e2c,0x3fe40b479e1241a2,2
+np.float64,0xbfccb965cd3972cc,0xbfcd38b40c4a352d,2
+np.float64,0xbfd9897048b312e0,0xbfdb09d55624c2a3,2
+np.float64,0x3fe7f5de4befebbc,0x3fef0b56be259f9c,2
+np.float64,0x3fcc6c6d4338d8d8,0x3fcce7b20ed68a78,2
+np.float64,0xbfe63884046c7108,0xbfeb67a3b945c3ee,2
+np.float64,0xbfce64e2ad3cc9c4,0xbfcefc47fae2e81f,2
+np.float64,0x3fefeb57b27fd6b0,0x400ab2eac6321cfb,2
+np.float64,0x3fe679627e6cf2c4,0x3febe6451b6ee0c4,2
+np.float64,0x3fc5f710172bee20,0x3fc62f40f85cb040,2
+np.float64,0x3fc34975e52692e8,0x3fc36f58588c7fa2,2
+np.float64,0x3fe8a3784cf146f0,0x3ff052ced9bb9406,2
+np.float64,0x3fd11a607ca234c0,0x3fd1874f876233fe,2
+np.float64,0x3fb2d653f625aca0,0x3fb2df0f4c9633f3,2
+np.float64,0x3fe555f39eeaabe8,0x3fe9c15ee962a28c,2
+np.float64,0xbfea297e3bf452fc,0xbff264107117f709,2
+np.float64,0x3fe1581cdde2b03a,0x3fe36c79acedf99c,2
+np.float64,0x3fd4567063a8ace0,0x3fd51123dbd9106f,2
+np.float64,0x3fa3883aec271080,0x3fa38aa86ec71218,2
+np.float64,0x3fe40e5d7de81cba,0x3fe78dbb9b568850,2
+np.float64,0xbfe9a2f7347345ee,0xbff1a0f4faa05041,2
+np.float64,0x3f9eef03a83dde00,0x3f9ef16caa0c1478,2
+np.float64,0xbfcb4641d1368c84,0xbfcbb2e7ff8c266d,2
+np.float64,0xbfa8403b2c308070,0xbfa844e148b735b7,2
+np.float64,0xbfe1875cd6e30eba,0xbfe3afadc08369f5,2
+np.float64,0xbfdd3c3d26ba787a,0xbfdf919b3e296766,2
+np.float64,0x3fcd6c4c853ad898,0x3fcdf55647b518b8,2
+np.float64,0xbfe360a173e6c143,0xbfe6759eb3a08cf2,2
+np.float64,0x3fe5a13147eb4262,0x3fea4a5a060f5adb,2
+np.float64,0x3feb3cdd7af679ba,0x3ff42aae0cf61234,2
+np.float64,0x3fe5205128ea40a2,0x3fe9618f3d0c54af,2
+np.float64,0x3fce35343f3c6a68,0x3fcec9c4e612b050,2
+np.float64,0xbfc345724d268ae4,0xbfc36b3ce6338e6a,2
+np.float64,0x3fedc4fc0e7b89f8,0x3ffa91c1d775c1f7,2
+np.float64,0x3fe41fbf21683f7e,0x3fe7aa6c174a0e65,2
+np.float64,0xbfc7a1a5d32f434c,0xbfc7e7d27a4c5241,2
+np.float64,0x3fd3e33eaca7c67c,0x3fd4915264441e2f,2
+np.float64,0x3feb3f02f6f67e06,0x3ff42e942249e596,2
+np.float64,0x3fdb75fcb0b6ebf8,0x3fdd5c63f98b6275,2
+np.float64,0x3fd6476603ac8ecc,0x3fd74020b164cf38,2
+np.float64,0x3fed535372faa6a6,0x3ff90f3791821841,2
+np.float64,0x3fe8648ead70c91e,0x3ff006a62befd7ed,2
+np.float64,0x3fd0f90760a1f210,0x3fd1636b39bb1525,2
+np.float64,0xbfca052443340a48,0xbfca633d6e777ae0,2
+np.float64,0xbfa6a5e3342d4bc0,0xbfa6a9ac6a488f5f,2
+np.float64,0x3fd5598038aab300,0x3fd632f35c0c3d52,2
+np.float64,0xbfdf66218fbecc44,0xbfe12df83b19f300,2
+np.float64,0x3fe78e15b56f1c2c,0x3fee240d12489cd1,2
+np.float64,0x3fe3d6a7b3e7ad50,0x3fe7329dcf7401e2,2
+np.float64,0xbfddb8e97bbb71d2,0xbfe017ed6d55a673,2
+np.float64,0xbfd57afd55aaf5fa,0xbfd658a9607c3370,2
+np.float64,0xbfdba4c9abb74994,0xbfdd95d69e5e8814,2
+np.float64,0xbfe71d8090ee3b01,0xbfed3390be6d2eef,2
+np.float64,0xbfc738ac0f2e7158,0xbfc77b3553b7c026,2
+np.float64,0x3f873656302e6c80,0x3f873697556ae011,2
+np.float64,0x3fe559491d6ab292,0x3fe9c7603b12c608,2
+np.float64,0xbfe262776864c4ef,0xbfe4ef905dda8599,2
+np.float64,0x3fe59d8917eb3b12,0x3fea439f44b7573f,2
+np.float64,0xbfd4b5afb5a96b60,0xbfd57b4e3df4dbc8,2
+np.float64,0x3fe81158447022b0,0x3fef4a3cea3eb6a9,2
+np.float64,0xbfeb023441f60468,0xbff3c27f0fc1a4dc,2
+np.float64,0x3fefb212eaff6426,0x40055fc6d949cf44,2
+np.float64,0xbfe1300ac1e26016,0xbfe333f297a1260e,2
+np.float64,0xbfeae0a2f575c146,0xbff388d58c380b8c,2
+np.float64,0xbfeddd8e55fbbb1d,0xbffaef045b2e21d9,2
+np.float64,0x3fec7c6c1d78f8d8,0x3ff6c3ebb019a8e5,2
+np.float64,0xbfe27e071f64fc0e,0xbfe518d2ff630f33,2
+np.float64,0x8000000000000001,0x8000000000000001,2
+np.float64,0x3fc5872abf2b0e58,0x3fc5bc083105db76,2
+np.float64,0x3fe65114baeca22a,0x3feb9745b82ef15a,2
+np.float64,0xbfc783abe52f0758,0xbfc7c8cb23f93e79,2
+np.float64,0x3fe4b7a5dd696f4c,0x3fe8aab9d492f0ca,2
+np.float64,0xbf91a8e8a82351e0,0xbf91a95b6ae806f1,2
+np.float64,0xbfee482eb77c905d,0xbffcb952830e715a,2
+np.float64,0x3fba0eee2a341de0,0x3fba261d495e3a1b,2
+np.float64,0xbfeb8876ae7710ed,0xbff4b7f7f4343506,2
+np.float64,0xbfe4d29e46e9a53c,0xbfe8d9547a601ba7,2
+np.float64,0xbfe12413b8e24828,0xbfe3232656541d10,2
+np.float64,0x3fc0bd8f61217b20,0x3fc0d63f937f0aa4,2
+np.float64,0xbfd3debafda7bd76,0xbfd48c534e5329e4,2
+np.float64,0x3fc0f92de921f258,0x3fc112eb7d47349b,2
+np.float64,0xbfe576b95f6aed72,0xbfe9fca859239b3c,2
+np.float64,0x3fd10e520da21ca4,0x3fd17a546e4152f7,2
+np.float64,0x3fcef917eb3df230,0x3fcf998677a8fa8f,2
+np.float64,0x3fdfcf863abf9f0c,0x3fe173a98af1cb13,2
+np.float64,0x3fc28c4b4f251898,0x3fc2adf43792e917,2
+np.float64,0x3fceb837ad3d7070,0x3fcf54a63b7d8c5c,2
+np.float64,0x3fc0140a05202818,0x3fc029e4f75330cb,2
+np.float64,0xbfd76c3362aed866,0xbfd88fb9e790b4e8,2
+np.float64,0xbfe475300868ea60,0xbfe8395334623e1f,2
+np.float64,0x3fea70b9b4f4e174,0x3ff2d1dad92173ba,2
+np.float64,0xbfe2edbd4965db7a,0xbfe5c29449a9365d,2
+np.float64,0xbfddf86f66bbf0de,0xbfe0408439cada9b,2
+np.float64,0xbfb443cdfa288798,0xbfb44eae796ad3ea,2
+np.float64,0xbf96a8a0482d5140,0xbf96a992b6ef073b,2
+np.float64,0xbfd279db2fa4f3b6,0xbfd3043db6acbd9e,2
+np.float64,0x3fe5d99088ebb322,0x3feab30be14e1605,2
+np.float64,0xbfe1a917abe35230,0xbfe3e0063d0f5f63,2
+np.float64,0x3fc77272f52ee4e8,0x3fc7b6f8ab6f4591,2
+np.float64,0x3fd6b62146ad6c44,0x3fd7be77eef8390a,2
+np.float64,0xbfe39fd9bc673fb4,0xbfe6da30dc4eadde,2
+np.float64,0x3fe35545c066aa8c,0x3fe663b5873e4d4b,2
+np.float64,0xbfcbbeffb3377e00,0xbfcc317edf7f6992,2
+np.float64,0xbfe28a58366514b0,0xbfe52b5734579ffa,2
+np.float64,0xbfbf0c87023e1910,0xbfbf33d970a0dfa5,2
+np.float64,0xbfd31144cba6228a,0xbfd3a9e84f9168f9,2
+np.float64,0xbfe5c044056b8088,0xbfea83d607c1a88a,2
+np.float64,0x3fdaabdf18b557c0,0x3fdc663ee8eddc83,2
+np.float64,0xbfeb883006f71060,0xbff4b76feff615be,2
+np.float64,0xbfebaef41d775de8,0xbff5034111440754,2
+np.float64,0x3fd9b6eb3bb36dd8,0x3fdb3fff5071dacf,2
+np.float64,0x3fe4e33c45e9c678,0x3fe8f637779ddedf,2
+np.float64,0x3fe52213a06a4428,0x3fe964adeff5c14e,2
+np.float64,0x3fe799254cef324a,0x3fee3c3ecfd3cdc5,2
+np.float64,0x3fd0533f35a0a680,0x3fd0b19a003469d3,2
+np.float64,0x3fec7ef5c7f8fdec,0x3ff6ca0abe055048,2
+np.float64,0xbfd1b5da82a36bb6,0xbfd22f357acbee79,2
+np.float64,0xbfd8f9c652b1f38c,0xbfda5faacbce9cf9,2
+np.float64,0x3fc8fc818b31f900,0x3fc94fa9a6aa53c8,2
+np.float64,0x3fcf42cc613e8598,0x3fcfe7dc128f33f2,2
+np.float64,0x3fd393a995a72754,0x3fd4396127b19305,2
+np.float64,0x3fec7b7df9f8f6fc,0x3ff6c1ae51753ef2,2
+np.float64,0x3fc07f175b20fe30,0x3fc096b55c11568c,2
+np.float64,0xbf979170082f22e0,0xbf979280d9555f44,2
+np.float64,0xbfb9d110c633a220,0xbfb9e79ba19b3c4a,2
+np.float64,0x3fedcd7d417b9afa,0x3ffab19734e86d58,2
+np.float64,0xbfec116f27f822de,0xbff5cf9425cb415b,2
+np.float64,0xbfec4fa0bef89f42,0xbff65a771982c920,2
+np.float64,0x3f94d4452829a880,0x3f94d501789ad11c,2
+np.float64,0xbfefe5ede27fcbdc,0xc009c440d3c2a4ce,2
+np.float64,0xbfe7e5f7b5efcbf0,0xbfeee74449aee1db,2
+np.float64,0xbfeb71dc8976e3b9,0xbff48cd84ea54ed2,2
+np.float64,0xbfe4cdb65f699b6c,0xbfe8d0d3bce901ef,2
+np.float64,0x3fb78ef1ee2f1de0,0x3fb7a00e7d183c48,2
+np.float64,0x3fb681864a2d0310,0x3fb6906fe64b4cd7,2
+np.float64,0xbfd2ad3b31a55a76,0xbfd33c57b5985399,2
+np.float64,0x3fdcdaaa95b9b554,0x3fdf16b99628db1e,2
+np.float64,0x3fa4780b7428f020,0x3fa47ad6ce9b8081,2
+np.float64,0x3fc546b0ad2a8d60,0x3fc579b361b3b18f,2
+np.float64,0x3feaf98dd6f5f31c,0x3ff3b38189c3539c,2
+np.float64,0x3feb0b2eca76165e,0x3ff3d22797083f9a,2
+np.float64,0xbfdc02ae3ab8055c,0xbfde099ecb5dbacf,2
+np.float64,0x3fd248bf17a49180,0x3fd2ceb77b346d1d,2
+np.float64,0x3fe349d666e693ac,0x3fe651b9933a8853,2
+np.float64,0xbfca526fc534a4e0,0xbfcab3e83f0d9b93,2
+np.float64,0x3fc156421722ac88,0x3fc171b38826563b,2
+np.float64,0xbfe4244569e8488b,0xbfe7b1e93e7d4f92,2
+np.float64,0x3fe010faabe021f6,0x3fe1aa961338886d,2
+np.float64,0xbfc52dacb72a5b58,0xbfc55ffa50eba380,2
+np.float64,0x8000000000000000,0x8000000000000000,2
+np.float64,0x3fea1d4865f43a90,0x3ff251b839eb4817,2
+np.float64,0xbfa0f65c8421ecc0,0xbfa0f7f37c91be01,2
+np.float64,0x3fcab29c0b356538,0x3fcb1863edbee184,2
+np.float64,0x3fe7949162ef2922,0x3fee323821958b88,2
+np.float64,0x3fdaf9288ab5f250,0x3fdcc400190a4839,2
+np.float64,0xbfe13ece6be27d9d,0xbfe348ba07553179,2
+np.float64,0x3f8a0c4fd0341880,0x3f8a0cabdf710185,2
+np.float64,0x3fdd0442a2ba0884,0x3fdf4b016c4da452,2
+np.float64,0xbfaf06d2343e0da0,0xbfaf1090b1600422,2
+np.float64,0xbfd3b65225a76ca4,0xbfd45fa49ae76cca,2
+np.float64,0x3fef5d75fefebaec,0x400269a5e7c11891,2
+np.float64,0xbfe048e35ce091c6,0xbfe1f5af45dd64f8,2
+np.float64,0xbfe27d4599e4fa8b,0xbfe517b07843d04c,2
+np.float64,0xbfe6f2a637ede54c,0xbfecdaa730462576,2
+np.float64,0x3fc63fbb752c7f78,0x3fc67a2854974109,2
+np.float64,0x3fedda6bfbfbb4d8,0x3ffae2e6131f3475,2
+np.float64,0x3fe7a6f5286f4dea,0x3fee5a9b1ef46016,2
+np.float64,0xbfd4ea8bcea9d518,0xbfd5b66ab7e5cf00,2
+np.float64,0x3fdc116568b822cc,0x3fde1bd4d0d9fd6c,2
+np.float64,0x3fdc45cb1bb88b98,0x3fde5cd1d2751032,2
+np.float64,0x3feabd932f757b26,0x3ff34e06e56a62a1,2
+np.float64,0xbfae5dbe0c3cbb80,0xbfae66e062ac0d65,2
+np.float64,0xbfdb385a00b670b4,0xbfdd10fedf3a58a7,2
+np.float64,0xbfebb14755f7628f,0xbff507e123a2b47c,2
+np.float64,0x3fe6de2fdfedbc60,0x3fecb0ae6e131da2,2
+np.float64,0xbfd86de640b0dbcc,0xbfd9bb4dbf0bf6af,2
+np.float64,0x3fe39e86d9e73d0e,0x3fe6d811c858d5d9,2
+np.float64,0x7ff0000000000000,0xfff8000000000000,2
+np.float64,0x3fa8101684302020,0x3fa814a12176e937,2
+np.float64,0x3fefdd5ad37fbab6,0x4008a08c0b76fbb5,2
+np.float64,0x3fe645c727ec8b8e,0x3feb814ebc470940,2
+np.float64,0x3fe3ba79dce774f4,0x3fe70500db564cb6,2
+np.float64,0xbfe0e5a254e1cb44,0xbfe2cc13940c6d9a,2
+np.float64,0x3fe2cac62465958c,0x3fe58d008c5e31f8,2
+np.float64,0xbfd3ffb531a7ff6a,0xbfd4b0d88cff2040,2
+np.float64,0x3fe0929104612522,0x3fe259bc42dce788,2
+np.float64,0x1,0x1,2
+np.float64,0xbfe7db77e6efb6f0,0xbfeecf93e8a61cb3,2
+np.float64,0xbfe37e9559e6fd2a,0xbfe6a514e29cb7aa,2
+np.float64,0xbfc53a843f2a7508,0xbfc56d2e9ad8b716,2
+np.float64,0xbfedb04485fb6089,0xbffa4615d4334ec3,2
+np.float64,0xbfc44349b1288694,0xbfc46f484b6f1cd6,2
+np.float64,0xbfe265188264ca31,0xbfe4f37d61cd9e17,2
+np.float64,0xbfd030351da0606a,0xbfd08c2537287ee1,2
+np.float64,0x3fd8fb131db1f628,0x3fda613363ca601e,2
+np.float64,0xbff0000000000000,0xfff0000000000000,2
+np.float64,0xbfe48d9a60691b35,0xbfe862c02d8fec1e,2
+np.float64,0x3fd185e050a30bc0,0x3fd1fb4c614ddb07,2
+np.float64,0xbfe4a5807e694b01,0xbfe88b8ff2d6caa7,2
+np.float64,0xbfc934d7ad3269b0,0xbfc98a405d25a666,2
+np.float64,0xbfea0e3c62741c79,0xbff23b4bd3a7b15d,2
+np.float64,0x3fe7244071ee4880,0x3fed41b27ba6bb22,2
+np.float64,0xbfd419f81ba833f0,0xbfd4cdf71b4533a3,2
+np.float64,0xbfe1e73a34e3ce74,0xbfe439eb15fa6baf,2
+np.float64,0x3fcdd9a63f3bb350,0x3fce68e1c401eff0,2
+np.float64,0x3fd1b5960ba36b2c,0x3fd22eeb566f1976,2
+np.float64,0x3fe9ad18e0735a32,0x3ff1af23c534260d,2
+np.float64,0xbfd537918aaa6f24,0xbfd60ccc8df0962b,2
+np.float64,0x3fcba3d3c73747a8,0x3fcc14fd5e5c49ad,2
+np.float64,0x3fd367e3c0a6cfc8,0x3fd40921b14e288e,2
+np.float64,0x3fe94303c6f28608,0x3ff11e62db2db6ac,2
+np.float64,0xbfcc5f77fd38bef0,0xbfccda110c087519,2
+np.float64,0xbfd63b74d7ac76ea,0xbfd7328af9f37402,2
+np.float64,0xbfe5321289ea6425,0xbfe9811ce96609ad,2
+np.float64,0xbfde910879bd2210,0xbfe0a2cd0ed1d368,2
+np.float64,0xbfcc9d9bad393b38,0xbfcd1b722a0b1371,2
+np.float64,0xbfe6dd39e16dba74,0xbfecaeb7c8c069f6,2
+np.float64,0xbfe98316eff3062e,0xbff174d7347d48bf,2
+np.float64,0xbfda88f8d1b511f2,0xbfdc3c0e75dad903,2
+np.float64,0x3fd400d8c2a801b0,0x3fd4b21bacff1f5d,2
+np.float64,0xbfe1ed335863da66,0xbfe4429e45e99779,2
+np.float64,0xbf3423a200284800,0xbf3423a20acb0342,2
+np.float64,0xbfe97bc59672f78b,0xbff16ad1adc44a33,2
+np.float64,0xbfeeca60d7fd94c2,0xbfff98d7f18f7728,2
+np.float64,0x3fd1eb13b2a3d628,0x3fd268e6ff4d56ce,2
+np.float64,0xbfa5594c242ab2a0,0xbfa55c77d6740a39,2
+np.float64,0x3fe72662006e4cc4,0x3fed462a9dedbfee,2
+np.float64,0x3fef4bb221fe9764,0x4001fe4f4cdfedb2,2
+np.float64,0xbfe938d417f271a8,0xbff110e78724ca2b,2
+np.float64,0xbfcc29ab2f385358,0xbfcca182140ef541,2
+np.float64,0x3fe18cd42c6319a8,0x3fe3b77e018165e7,2
+np.float64,0xbfec6c5cae78d8b9,0xbff69d8e01309b48,2
+np.float64,0xbfd5723da7aae47c,0xbfd64ecde17da471,2
+np.float64,0xbfe3096722e612ce,0xbfe5ed43634f37ff,2
+np.float64,0xbfdacaceb1b5959e,0xbfdc8bb826bbed39,2
+np.float64,0x3fc59a57cb2b34b0,0x3fc5cfc4a7c9bac8,2
+np.float64,0x3f84adce10295b80,0x3f84adfc1f1f6e97,2
+np.float64,0x3fdd5b28bbbab650,0x3fdfb8b906d77df4,2
+np.float64,0x3fdebf94c6bd7f28,0x3fe0c10188e1bc7c,2
+np.float64,0x3fdb30c612b6618c,0x3fdd07bf18597821,2
+np.float64,0x3fe7eeb3176fdd66,0x3feefb0be694b855,2
+np.float64,0x0,0x0,2
+np.float64,0xbfe10057e9e200b0,0xbfe2f13365e5b1c9,2
+np.float64,0xbfeb61a82376c350,0xbff46e665d3a60f5,2
+np.float64,0xbfe7f54aec6fea96,0xbfef0a0759f726dc,2
+np.float64,0xbfe4f6da3de9edb4,0xbfe9187d85bd1ab5,2
+np.float64,0xbfeb8be1b3f717c4,0xbff4be8efaab2e75,2
+np.float64,0x3fed40bc31fa8178,0x3ff8d5ec4a7f3e9b,2
+np.float64,0xbfe40f8711681f0e,0xbfe78fa5c62b191b,2
+np.float64,0x3fd1034d94a2069c,0x3fd16e78e9efb85b,2
+np.float64,0x3fc74db15b2e9b60,0x3fc790f26e894098,2
+np.float64,0x3fd912a88cb22550,0x3fda7d0ab3b21308,2
+np.float64,0x3fd8948a3bb12914,0x3fd9e8950c7874c8,2
+np.float64,0xbfa7ada5242f5b50,0xbfa7b1f8db50c104,2
+np.float64,0x3feeb2e1c27d65c4,0x3fff000b7d09c9b7,2
+np.float64,0x3fe9d46cbbf3a8da,0x3ff1e6f405265a6e,2
+np.float64,0xbfe2480b77e49017,0xbfe4c83b9b37bf0c,2
+np.float64,0x3fe950ea9372a1d6,0x3ff130e62468bf2c,2
+np.float64,0x3fefa7272a7f4e4e,0x4004d8c9bf31ab58,2
+np.float64,0xbfe7309209ee6124,0xbfed5b94acef917a,2
+np.float64,0x3fd05e8c64a0bd18,0x3fd0bdb11e0903c6,2
+np.float64,0x3fd9236043b246c0,0x3fda90ccbe4bab1e,2
+np.float64,0xbfdc3d6805b87ad0,0xbfde5266e17154c3,2
+np.float64,0x3fe5e6bad76bcd76,0x3feacbc306c63445,2
+np.float64,0x3ff0000000000000,0x7ff0000000000000,2
+np.float64,0xbfde3d7390bc7ae8,0xbfe06cd480bd0196,2
+np.float64,0xbfd3e2e3c0a7c5c8,0xbfd490edc0a45e26,2
+np.float64,0x3fe39871d76730e4,0x3fe6ce54d1719953,2
+np.float64,0x3fdff00ebcbfe01c,0x3fe1894b6655a6d0,2
+np.float64,0x3f91b7ad58236f40,0x3f91b8213bcb8b0b,2
+np.float64,0xbfd99f48f7b33e92,0xbfdb23d544f62591,2
+np.float64,0x3fae3512cc3c6a20,0x3fae3e10939fd7b5,2
+np.float64,0x3fcc4cf3db3899e8,0x3fccc698a15176d6,2
+np.float64,0xbfd0927e39a124fc,0xbfd0f5522e2bc030,2
+np.float64,0x3fcee859633dd0b0,0x3fcf87bdef7a1e82,2
+np.float64,0xbfe2a8b69565516d,0xbfe5593437b6659a,2
+np.float64,0x3fecf61e20f9ec3c,0x3ff7fda16b0209d4,2
+np.float64,0xbfbf37571e3e6eb0,0xbfbf5f4e1379a64c,2
+np.float64,0xbfd54e1b75aa9c36,0xbfd626223b68971a,2
+np.float64,0x3fe1035a56e206b4,0x3fe2f5651ca0f4b0,2
+np.float64,0x3fe4992989e93254,0x3fe876751afa70dc,2
+np.float64,0x3fc8c313d3318628,0x3fc913faf15d1562,2
+np.float64,0x3f99f6ba8833ed80,0x3f99f8274fb94828,2
+np.float64,0xbfd4a58af0a94b16,0xbfd56947c276e04f,2
+np.float64,0x3fc66f8c872cdf18,0x3fc6ab7a14372a73,2
+np.float64,0x3fc41eee0d283de0,0x3fc449ff1ff0e7a6,2
+np.float64,0x3fefd04d287fa09a,0x4007585010cfa9b0,2
+np.float64,0x3fce9e746f3d3ce8,0x3fcf39514bbe5070,2
+np.float64,0xbfe8056f72700adf,0xbfef2ee2c13e67ba,2
+np.float64,0x3fdd6b1ec0bad63c,0x3fdfccf2ba144fa8,2
+np.float64,0x3fd92ee432b25dc8,0x3fda9e6b96b2b142,2
+np.float64,0xbfc4d18f9529a320,0xbfc50150fb4de0cc,2
+np.float64,0xbfe09939a7613274,0xbfe262d703c317af,2
+np.float64,0xbfd130b132a26162,0xbfd19f5a00ae29c4,2
+np.float64,0x3fa06e21d420dc40,0x3fa06f93aba415fb,2
+np.float64,0x3fc5c48fbd2b8920,0x3fc5fb3bfad3bf55,2
+np.float64,0xbfdfa2bacbbf4576,0xbfe155f839825308,2
+np.float64,0x3fe3e1fa0f67c3f4,0x3fe745081dd4fd03,2
+np.float64,0x3fdae58289b5cb04,0x3fdcac1f6789130a,2
+np.float64,0xbf8ed3ba103da780,0xbf8ed452a9cc1442,2
+np.float64,0xbfec06b46f780d69,0xbff5b86f30d70908,2
+np.float64,0xbfe990c13b732182,0xbff187a90ae611f8,2
+np.float64,0xbfdd46c738ba8d8e,0xbfdf9eee0a113230,2
+np.float64,0x3fe08b83f3611708,0x3fe2501b1c77035c,2
+np.float64,0xbfd501b65baa036c,0xbfd5d05de3fceac8,2
+np.float64,0xbfcf4fa21f3e9f44,0xbfcff5829582c0b6,2
+np.float64,0xbfefbc0bfbff7818,0xc005eca1a2c56b38,2
+np.float64,0xbfe1ba6959e374d2,0xbfe3f8f88d128ce5,2
+np.float64,0xbfd4e74ee3a9ce9e,0xbfd5b2cabeb45e6c,2
+np.float64,0xbfe77c38eaeef872,0xbfedfd332d6f1c75,2
+np.float64,0x3fa9b5e4fc336bc0,0x3fa9bb6f6b80b4af,2
+np.float64,0xbfecba63917974c7,0xbff75e44df7f8e81,2
+np.float64,0x3fd6cf17b2ad9e30,0x3fd7db0b93b7f2b5,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-cbrt.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-cbrt.csv
new file mode 100644
index 00000000..ad141cb4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-cbrt.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0x3ee7054c,0x3f4459ea,2
+np.float32,0x7d1e2489,0x54095925,2
+np.float32,0x7ee5edf5,0x549b992b,2
+np.float32,0x380607,0x2a425e72,2
+np.float32,0x34a8f3,0x2a3e6603,2
+np.float32,0x3eee2844,0x3f465a45,2
+np.float32,0x59e49c,0x2a638d0a,2
+np.float32,0xbf72c77a,0xbf7b83d4,2
+np.float32,0x7f2517b4,0x54af8bf0,2
+np.float32,0x80068a69,0xa9bdfe8b,2
+np.float32,0xbe8e3578,0xbf270775,2
+np.float32,0xbe4224dc,0xbf131119,2
+np.float32,0xbe0053b8,0xbf001be2,2
+np.float32,0x70e8d,0x29c2ddc5,2
+np.float32,0xff63f7b5,0xd4c37b7f,2
+np.float32,0x3f00bbed,0x3f4b9335,2
+np.float32,0x3f135f4e,0x3f54f5d4,2
+np.float32,0xbe13a488,0xbf063d13,2
+np.float32,0x3f14ec78,0x3f55b478,2
+np.float32,0x7ec35cfb,0x54935fbf,2
+np.float32,0x7d41c589,0x5412f904,2
+np.float32,0x3ef8a16e,0x3f4937f7,2
+np.float32,0x3f5d8464,0x3f73f279,2
+np.float32,0xbeec85ac,0xbf45e5cb,2
+np.float32,0x7f11f722,0x54a87cb1,2
+np.float32,0x8032c085,0xaa3c1219,2
+np.float32,0x80544bac,0xaa5eb9f2,2
+np.float32,0x3e944a10,0x3f296065,2
+np.float32,0xbf29fe50,0xbf5f5796,2
+np.float32,0x7e204d8d,0x545b03d5,2
+np.float32,0xfe1d0254,0xd4598127,2
+np.float32,0x80523129,0xaa5cdba9,2
+np.float32,0x806315fa,0xaa6b0eaf,2
+np.float32,0x3ed3d2a4,0x3f3ec117,2
+np.float32,0x7ee15007,0x549a8cc0,2
+np.float32,0x801ffb5e,0xaa213d4f,2
+np.float32,0x807f9f4a,0xaa7fbf76,2
+np.float32,0xbe45e854,0xbf1402d3,2
+np.float32,0x3d9e2e70,0x3eda0b64,2
+np.float32,0x51f404,0x2a5ca4d7,2
+np.float32,0xbe26a8b0,0xbf0bc54d,2
+np.float32,0x22c99a,0x2a25d2a7,2
+np.float32,0xbf71248b,0xbf7af2d5,2
+np.float32,0x7219fe,0x2a76608e,2
+np.float32,0x7f16fd7d,0x54aa6610,2
+np.float32,0x80716faa,0xaa75e5b9,2
+np.float32,0xbe24f9a4,0xbf0b4c65,2
+np.float32,0x800000,0x2a800000,2
+np.float32,0x80747456,0xaa780f27,2
+np.float32,0x68f9e8,0x2a6fa035,2
+np.float32,0x3f6a297e,0x3f7880d8,2
+np.float32,0x3f28b973,0x3f5ec8f6,2
+np.float32,0x7f58c577,0x54c03a70,2
+np.float32,0x804befcc,0xaa571b4f,2
+np.float32,0x3e2be027,0x3f0d36cf,2
+np.float32,0xfe7e80a4,0xd47f7ff7,2
+np.float32,0xfe9d444a,0xd489181b,2
+np.float32,0x3db3e790,0x3ee399d6,2
+np.float32,0xbf154c3e,0xbf55e23e,2
+np.float32,0x3d1096b7,0x3ea7f4aa,2
+np.float32,0x7fc00000,0x7fc00000,2
+np.float32,0x804e2521,0xaa592c06,2
+np.float32,0xbeda2f00,0xbf40a513,2
+np.float32,0x3f191788,0x3f57ae30,2
+np.float32,0x3ed24ade,0x3f3e4b34,2
+np.float32,0x807fadb4,0xaa7fc917,2
+np.float32,0xbe0a06dc,0xbf034234,2
+np.float32,0x3f250bba,0x3f5d276d,2
+np.float32,0x7e948b00,0x548682c8,2
+np.float32,0xfe65ecdc,0xd476fed2,2
+np.float32,0x6fdbdd,0x2a74c095,2
+np.float32,0x800112de,0xa9500fa6,2
+np.float32,0xfe63225c,0xd475fdee,2
+np.float32,0x7f3d9acd,0x54b7d648,2
+np.float32,0xfc46f480,0xd3bacf87,2
+np.float32,0xfe5deaac,0xd47417ff,2
+np.float32,0x60ce53,0x2a693d93,2
+np.float32,0x6a6e2f,0x2a70ba2c,2
+np.float32,0x7f43f0f1,0x54b9dcd0,2
+np.float32,0xbf6170c9,0xbf756104,2
+np.float32,0xbe5c9f74,0xbf197852,2
+np.float32,0xff1502b0,0xd4a9a693,2
+np.float32,0x8064f6af,0xaa6c886e,2
+np.float32,0xbf380564,0xbf6552e5,2
+np.float32,0xfeb9b7dc,0xd490e85f,2
+np.float32,0x7f34f941,0x54b5010d,2
+np.float32,0xbe9d4ca0,0xbf2cbd5f,2
+np.float32,0x3f6e43d2,0x3f79f240,2
+np.float32,0xbdad0530,0xbee0a8f2,2
+np.float32,0x3da18459,0x3edb9105,2
+np.float32,0xfd968340,0xd42a3808,2
+np.float32,0x3ea03e64,0x3f2dcf96,2
+np.float32,0x801d2f5b,0xaa1c6525,2
+np.float32,0xbf47d92d,0xbf6bb7e9,2
+np.float32,0x55a6b9,0x2a5fe9fb,2
+np.float32,0x77a7c2,0x2a7a4fb8,2
+np.float32,0xfebbc16e,0xd4916f88,2
+np.float32,0x3f5d3d6e,0x3f73d86a,2
+np.float32,0xfccd2b60,0xd3edcacb,2
+np.float32,0xbd026460,0xbea244b0,2
+np.float32,0x3e55bd,0x2a4968e4,2
+np.float32,0xbe7b5708,0xbf20490d,2
+np.float32,0xfe413cf4,0xd469171f,2
+np.float32,0x7710e3,0x2a79e657,2
+np.float32,0xfc932520,0xd3d4d9ca,2
+np.float32,0xbf764a1b,0xbf7cb8aa,2
+np.float32,0x6b1923,0x2a713aca,2
+np.float32,0xfe4dcd04,0xd46e092d,2
+np.float32,0xff3085ac,0xd4b381f8,2
+np.float32,0x3f72c438,0x3f7b82b4,2
+np.float32,0xbf6f0c6e,0xbf7a3852,2
+np.float32,0x801d2b1b,0xaa1c5d8d,2
+np.float32,0x3e9db91e,0x3f2ce50d,2
+np.float32,0x3f684f9d,0x3f77d8c5,2
+np.float32,0x7dc784,0x2a7e82cc,2
+np.float32,0x7d2c88e9,0x540d64f8,2
+np.float32,0x807fb708,0xaa7fcf51,2
+np.float32,0x8003c49a,0xa99e16e0,2
+np.float32,0x3ee4f5b8,0x3f43c3ff,2
+np.float32,0xfe992c5e,0xd487e4ec,2
+np.float32,0x4b4dfa,0x2a568216,2
+np.float32,0x3d374c80,0x3eb5c6a8,2
+np.float32,0xbd3a4700,0xbeb6c15c,2
+np.float32,0xbf13cb80,0xbf5529e5,2
+np.float32,0xbe7306d4,0xbf1e7f91,2
+np.float32,0xbf800000,0xbf800000,2
+np.float32,0xbea42efe,0xbf2f394e,2
+np.float32,0x3e1981d0,0x3f07fe2c,2
+np.float32,0x3f17ea1d,0x3f572047,2
+np.float32,0x7dc1e0,0x2a7e7efe,2
+np.float32,0x80169c08,0xaa0fa320,2
+np.float32,0x3f3e1972,0x3f67d248,2
+np.float32,0xfe5d3c88,0xd473d815,2
+np.float32,0xbf677448,0xbf778aac,2
+np.float32,0x7e799b7d,0x547dd9e4,2
+np.float32,0x3f00bb2c,0x3f4b92cf,2
+np.float32,0xbeb29f9c,0xbf343798,2
+np.float32,0xbd6b7830,0xbec59a86,2
+np.float32,0x807a524a,0xaa7c282a,2
+np.float32,0xbe0a7a04,0xbf0366ab,2
+np.float32,0x80237470,0xaa26e061,2
+np.float32,0x3ccbc0f6,0x3e95744f,2
+np.float32,0x3edec6bc,0x3f41fcb6,2
+np.float32,0x3f635198,0x3f760efa,2
+np.float32,0x800eca4f,0xa9f960d8,2
+np.float32,0x3f800000,0x3f800000,2
+np.float32,0xff4eeb9e,0xd4bd456a,2
+np.float32,0x56f4e,0x29b29e70,2
+np.float32,0xff5383a0,0xd4bea95c,2
+np.float32,0x3f4c3a77,0x3f6d6d94,2
+np.float32,0x3f6c324a,0x3f79388c,2
+np.float32,0xbebdc092,0xbf37e27c,2
+np.float32,0xff258956,0xd4afb42e,2
+np.float32,0xdc78c,0x29f39012,2
+np.float32,0xbf2db06a,0xbf60f2f5,2
+np.float32,0xbe3c5808,0xbf119660,2
+np.float32,0xbf1ba866,0xbf58e0f4,2
+np.float32,0x80377640,0xaa41b79d,2
+np.float32,0x4fdc4d,0x2a5abfea,2
+np.float32,0x7f5e7560,0x54c1e516,2
+np.float32,0xfeb4d3f2,0xd48f9fde,2
+np.float32,0x3f12a622,0x3f549c7d,2
+np.float32,0x7f737ed7,0x54c7d2dc,2
+np.float32,0xa0ddc,0x29db456d,2
+np.float32,0xfe006740,0xd44b6689,2
+np.float32,0x3f17dfd4,0x3f571b6c,2
+np.float32,0x67546e,0x2a6e5dd1,2
+np.float32,0xff0d0f11,0xd4a693e2,2
+np.float32,0xbd170090,0xbeaa6738,2
+np.float32,0x5274a0,0x2a5d1806,2
+np.float32,0x3e154fe0,0x3f06be1a,2
+np.float32,0x7ddb302e,0x5440f0a7,2
+np.float32,0x3f579d10,0x3f71c2af,2
+np.float32,0xff2bc5bb,0xd4b1e20c,2
+np.float32,0xfee8fa6a,0xd49c4872,2
+np.float32,0xbea551b0,0xbf2fa07b,2
+np.float32,0xfeabc75c,0xd48d3004,2
+np.float32,0x7f50a5a8,0x54bdcbd1,2
+np.float32,0x50354b,0x2a5b110d,2
+np.float32,0x7d139f13,0x54063b6b,2
+np.float32,0xbeee1b08,0xbf465699,2
+np.float32,0xfe5e1650,0xd47427fe,2
+np.float32,0x7f7fffff,0x54cb2ff5,2
+np.float32,0xbf52ede8,0xbf6fff35,2
+np.float32,0x804bba81,0xaa56e8f1,2
+np.float32,0x6609e2,0x2a6d5e94,2
+np.float32,0x692621,0x2a6fc1d6,2
+np.float32,0xbf288bb6,0xbf5eb4d3,2
+np.float32,0x804f28c4,0xaa5a1b82,2
+np.float32,0xbdaad2a8,0xbedfb46e,2
+np.float32,0x5e04f8,0x2a66fb13,2
+np.float32,0x804c10da,0xaa573a81,2
+np.float32,0xbe412764,0xbf12d0fd,2
+np.float32,0x801c35cc,0xaa1aa250,2
+np.float32,0x6364d4,0x2a6b4cf9,2
+np.float32,0xbf6d3cea,0xbf79962f,2
+np.float32,0x7e5a9935,0x5472defb,2
+np.float32,0xbe73a38c,0xbf1ea19c,2
+np.float32,0xbd35e950,0xbeb550f2,2
+np.float32,0x46cc16,0x2a5223d6,2
+np.float32,0x3f005288,0x3f4b5b97,2
+np.float32,0x8034e8b7,0xaa3eb2be,2
+np.float32,0xbea775fc,0xbf3061cf,2
+np.float32,0xea0e9,0x29f87751,2
+np.float32,0xbf38faaf,0xbf65b89d,2
+np.float32,0xbedf3184,0xbf421bb0,2
+np.float32,0xbe04250c,0xbf015def,2
+np.float32,0x7f56dae8,0x54bfa901,2
+np.float32,0xfebe3e04,0xd492132e,2
+np.float32,0x3e4dc326,0x3f15f19e,2
+np.float32,0x803da197,0xaa48a621,2
+np.float32,0x7eeb35aa,0x549cc7c6,2
+np.float32,0xfebb3eb6,0xd4914dc0,2
+np.float32,0xfed17478,0xd496d5e2,2
+np.float32,0x80243694,0xaa280ed2,2
+np.float32,0x8017e666,0xaa1251d3,2
+np.float32,0xbf07e942,0xbf4f4a3e,2
+np.float32,0xbf578fa6,0xbf71bdab,2
+np.float32,0x7ed8d80f,0x549896b6,2
+np.float32,0x3f2277ae,0x3f5bff11,2
+np.float32,0x7e6f195b,0x547a3cd4,2
+np.float32,0xbf441559,0xbf6a3a91,2
+np.float32,0x7f1fb427,0x54ad9d8d,2
+np.float32,0x71695f,0x2a75e12d,2
+np.float32,0xbd859588,0xbece19a1,2
+np.float32,0x7f5702fc,0x54bfb4eb,2
+np.float32,0x3f040008,0x3f4d4842,2
+np.float32,0x3de00ca5,0x3ef4df89,2
+np.float32,0x3eeabb03,0x3f45658c,2
+np.float32,0x3dfe5e65,0x3eff7480,2
+np.float32,0x1,0x26a14518,2
+np.float32,0x8065e400,0xaa6d4130,2
+np.float32,0xff50e1bb,0xd4bdde07,2
+np.float32,0xbe88635a,0xbf24b7e9,2
+np.float32,0x3f46bfab,0x3f6b4908,2
+np.float32,0xbd85c3c8,0xbece3168,2
+np.float32,0xbe633f64,0xbf1afdb1,2
+np.float32,0xff2c7706,0xd4b21f2a,2
+np.float32,0xbf02816c,0xbf4c812a,2
+np.float32,0x80653aeb,0xaa6cbdab,2
+np.float32,0x3eef1d10,0x3f469e24,2
+np.float32,0x3d9944bf,0x3ed7c36a,2
+np.float32,0x1b03d4,0x2a186b2b,2
+np.float32,0x3f251b7c,0x3f5d2e76,2
+np.float32,0x3edebab0,0x3f41f937,2
+np.float32,0xfefc2148,0xd4a073ff,2
+np.float32,0x7448ee,0x2a77f051,2
+np.float32,0x3bb8a400,0x3e3637ee,2
+np.float32,0x57df36,0x2a61d527,2
+np.float32,0xfd8b9098,0xd425fccb,2
+np.float32,0x7f67627e,0x54c4744d,2
+np.float32,0x801165d7,0xaa039fba,2
+np.float32,0x53aae5,0x2a5e2bfd,2
+np.float32,0x8014012b,0xaa09e4f1,2
+np.float32,0x3f7a2d53,0x3f7e0b4b,2
+np.float32,0x3f5fb700,0x3f74c052,2
+np.float32,0x7f192a06,0x54ab366c,2
+np.float32,0x3f569611,0x3f71603b,2
+np.float32,0x25e2dc,0x2a2a9b65,2
+np.float32,0x8036465e,0xaa405342,2
+np.float32,0x804118e1,0xaa4c5785,2
+np.float32,0xbef08d3e,0xbf4703e1,2
+np.float32,0x3447e2,0x2a3df0be,2
+np.float32,0xbf2a350b,0xbf5f6f8c,2
+np.float32,0xbec87e3e,0xbf3b4a73,2
+np.float32,0xbe99a4a8,0xbf2b6412,2
+np.float32,0x2ea2ae,0x2a36d77e,2
+np.float32,0xfcb69600,0xd3e4b9e3,2
+np.float32,0x717700,0x2a75eb06,2
+np.float32,0xbf4e81ce,0xbf6e4ecc,2
+np.float32,0xbe2021ac,0xbf09ebee,2
+np.float32,0xfef94eee,0xd49fda31,2
+np.float32,0x8563e,0x29ce0015,2
+np.float32,0x7f5d0ca5,0x54c17c0f,2
+np.float32,0x3f16459a,0x3f56590f,2
+np.float32,0xbe12f7bc,0xbf0608a0,2
+np.float32,0x3f10fd3d,0x3f53ce5f,2
+np.float32,0x3ca5e1b0,0x3e8b8d96,2
+np.float32,0xbe5288e0,0xbf17181f,2
+np.float32,0xbf7360f6,0xbf7bb8c9,2
+np.float32,0x7e989d33,0x5487ba88,2
+np.float32,0x3ea7b5dc,0x3f307839,2
+np.float32,0x7e8da0c9,0x548463f0,2
+np.float32,0xfeaf7888,0xd48e3122,2
+np.float32,0x7d90402d,0x5427d321,2
+np.float32,0x72e309,0x2a76f0ee,2
+np.float32,0xbe1faa34,0xbf09c998,2
+np.float32,0xbf2b1652,0xbf5fd1f4,2
+np.float32,0x8051eb0c,0xaa5c9cca,2
+np.float32,0x7edf02bf,0x549a058e,2
+np.float32,0x7fa00000,0x7fe00000,2
+np.float32,0x3f67f873,0x3f77b9c1,2
+np.float32,0x3f276b63,0x3f5e358c,2
+np.float32,0x7eeb4bf2,0x549cccb9,2
+np.float32,0x3bfa2c,0x2a46d675,2
+np.float32,0x3e133c50,0x3f061d75,2
+np.float32,0x3ca302c0,0x3e8abe4a,2
+np.float32,0x802e152e,0xaa361dd5,2
+np.float32,0x3f504810,0x3f6efd0a,2
+np.float32,0xbf43e0b5,0xbf6a2599,2
+np.float32,0x80800000,0xaa800000,2
+np.float32,0x3f1c0980,0x3f590e03,2
+np.float32,0xbf0084f6,0xbf4b7638,2
+np.float32,0xfee72d32,0xd49be10d,2
+np.float32,0x3f3c00ed,0x3f66f763,2
+np.float32,0x80511e81,0xaa5be492,2
+np.float32,0xfdd1b8a0,0xd43e1f0d,2
+np.float32,0x7d877474,0x54245785,2
+np.float32,0x7f110bfe,0x54a82207,2
+np.float32,0xff800000,0xff800000,2
+np.float32,0x6b6a2,0x29bfa706,2
+np.float32,0xbf5bdfd9,0xbf7357b7,2
+np.float32,0x8025bfa3,0xaa2a6676,2
+np.float32,0x3a3581,0x2a44dd3a,2
+np.float32,0x542c2a,0x2a5e9e2f,2
+np.float32,0xbe1d5650,0xbf091d57,2
+np.float32,0x3e97760d,0x3f2a935e,2
+np.float32,0x7f5dcde2,0x54c1b460,2
+np.float32,0x800bde1e,0xa9e7bbaf,2
+np.float32,0x3e6b9e61,0x3f1cdf07,2
+np.float32,0x7d46c003,0x54143884,2
+np.float32,0x80073fbb,0xa9c49e67,2
+np.float32,0x503c23,0x2a5b1748,2
+np.float32,0x7eb7b070,0x549060c8,2
+np.float32,0xe9d8f,0x29f86456,2
+np.float32,0xbeedd4f0,0xbf464320,2
+np.float32,0x3f40d5d6,0x3f68eda1,2
+np.float32,0xff201f28,0xd4adc44b,2
+np.float32,0xbdf61e98,0xbefca9c7,2
+np.float32,0x3e8a0dc9,0x3f2562e3,2
+np.float32,0xbc0c0c80,0xbe515f61,2
+np.float32,0x2b3c15,0x2a3248e3,2
+np.float32,0x42a7bb,0x2a4df592,2
+np.float32,0x7f337947,0x54b480af,2
+np.float32,0xfec21db4,0xd4930f4b,2
+np.float32,0x7f4fdbf3,0x54bd8e94,2
+np.float32,0x1e2253,0x2a1e1286,2
+np.float32,0x800c4c80,0xa9ea819e,2
+np.float32,0x7e96f5b7,0x54873c88,2
+np.float32,0x7ce4e131,0x53f69ed4,2
+np.float32,0xbead8372,0xbf327b63,2
+np.float32,0x3e15ca7e,0x3f06e2f3,2
+np.float32,0xbf63e17b,0xbf7642da,2
+np.float32,0xff5bdbdb,0xd4c122f9,2
+np.float32,0x3f44411e,0x3f6a4bfd,2
+np.float32,0xfd007da0,0xd40029d2,2
+np.float32,0xbe940168,0xbf2944b7,2
+np.float32,0x80000000,0x80000000,2
+np.float32,0x3d28e356,0x3eb0e1b8,2
+np.float32,0x3eb9fcd8,0x3f36a918,2
+np.float32,0x4f6410,0x2a5a51eb,2
+np.float32,0xbdf18e30,0xbefb1775,2
+np.float32,0x32edbd,0x2a3c49e3,2
+np.float32,0x801f70a5,0xaa2052da,2
+np.float32,0x8045a045,0xaa50f98c,2
+np.float32,0xbdd6cb00,0xbef17412,2
+np.float32,0x3f118f2c,0x3f541557,2
+np.float32,0xbe65c378,0xbf1b8f95,2
+np.float32,0xfd9a9060,0xd42bbb8b,2
+np.float32,0x3f04244f,0x3f4d5b0f,2
+np.float32,0xff05214b,0xd4a3656f,2
+np.float32,0xfe342cd0,0xd463b706,2
+np.float32,0x3f3409a8,0x3f63a836,2
+np.float32,0x80205db2,0xaa21e1e5,2
+np.float32,0xbf37c982,0xbf653a03,2
+np.float32,0x3f36ce8f,0x3f64d17e,2
+np.float32,0x36ffda,0x2a412d61,2
+np.float32,0xff569752,0xd4bf94e6,2
+np.float32,0x802fdb0f,0xaa386c3a,2
+np.float32,0x7ec55a87,0x5493df71,2
+np.float32,0x7f2234c7,0x54ae847e,2
+np.float32,0xbf02df76,0xbf4cb23d,2
+np.float32,0x3d68731a,0x3ec4c156,2
+np.float32,0x8146,0x2921cd8e,2
+np.float32,0x80119364,0xaa041235,2
+np.float32,0xfe6c1c00,0xd47930b5,2
+np.float32,0x8070da44,0xaa757996,2
+np.float32,0xfefbf50c,0xd4a06a9d,2
+np.float32,0xbf01b6a8,0xbf4c170a,2
+np.float32,0x110702,0x2a02aedb,2
+np.float32,0xbf063cd4,0xbf4e6f87,2
+np.float32,0x3f1ff178,0x3f5ad9dd,2
+np.float32,0xbf76dcd4,0xbf7cead0,2
+np.float32,0x80527281,0xaa5d1620,2
+np.float32,0xfea96df8,0xd48c8a7f,2
+np.float32,0x68db02,0x2a6f88b0,2
+np.float32,0x62d971,0x2a6adec7,2
+np.float32,0x3e816fe0,0x3f21df04,2
+np.float32,0x3f586379,0x3f720cc0,2
+np.float32,0x804a3718,0xaa5577ff,2
+np.float32,0x2e2506,0x2a3632b2,2
+np.float32,0x3f297d,0x2a4a4bf3,2
+np.float32,0xbe37aba8,0xbf105f88,2
+np.float32,0xbf18b264,0xbf577ea7,2
+np.float32,0x7f50d02d,0x54bdd8b5,2
+np.float32,0xfee296dc,0xd49ad757,2
+np.float32,0x7ec5137e,0x5493cdb1,2
+np.float32,0x3f4811f4,0x3f6bce3a,2
+np.float32,0xfdff32a0,0xd44af991,2
+np.float32,0x3f6ef140,0x3f7a2ed6,2
+np.float32,0x250838,0x2a2950b5,2
+np.float32,0x25c28e,0x2a2a6ada,2
+np.float32,0xbe875e50,0xbf244e90,2
+np.float32,0x3e3bdff8,0x3f11776a,2
+np.float32,0x3e9fe493,0x3f2daf17,2
+np.float32,0x804d8599,0xaa5897d9,2
+np.float32,0x3f0533da,0x3f4de759,2
+np.float32,0xbe63023c,0xbf1aefc8,2
+np.float32,0x80636e5e,0xaa6b547f,2
+np.float32,0xff112958,0xd4a82d5d,2
+np.float32,0x3e924112,0x3f28991f,2
+np.float32,0xbe996ffc,0xbf2b507a,2
+np.float32,0x802a7cda,0xaa314081,2
+np.float32,0x8022b524,0xaa25b21e,2
+np.float32,0x3f0808c8,0x3f4f5a43,2
+np.float32,0xbef0ec2a,0xbf471e0b,2
+np.float32,0xff4c2345,0xd4bc6b3c,2
+np.float32,0x25ccc8,0x2a2a7a3b,2
+np.float32,0x7f4467d6,0x54ba0260,2
+np.float32,0x7f506539,0x54bdb846,2
+np.float32,0x412ab4,0x2a4c6a2a,2
+np.float32,0x80672c4a,0xaa6e3ef0,2
+np.float32,0xbddfb7f8,0xbef4c0ac,2
+np.float32,0xbf250bb9,0xbf5d276c,2
+np.float32,0x807dca65,0xaa7e84bd,2
+np.float32,0xbf63b8e0,0xbf763438,2
+np.float32,0xbeed1b0c,0xbf460f6b,2
+np.float32,0x8021594f,0xaa238136,2
+np.float32,0xbebc74c8,0xbf377710,2
+np.float32,0x3e9f8e3b,0x3f2d8fce,2
+np.float32,0x7f50ca09,0x54bdd6d8,2
+np.float32,0x805797c1,0xaa6197df,2
+np.float32,0x3de198f9,0x3ef56f98,2
+np.float32,0xf154d,0x29fb0392,2
+np.float32,0xff7fffff,0xd4cb2ff5,2
+np.float32,0xfed22fa8,0xd49702c4,2
+np.float32,0xbf733736,0xbf7baa64,2
+np.float32,0xbf206a8a,0xbf5b1108,2
+np.float32,0xbca49680,0xbe8b3078,2
+np.float32,0xfecba794,0xd4956e1a,2
+np.float32,0x80126582,0xaa061886,2
+np.float32,0xfee5cc82,0xd49b919f,2
+np.float32,0xbf7ad6ae,0xbf7e4491,2
+np.float32,0x7ea88c81,0x548c4c0c,2
+np.float32,0xbf493a0d,0xbf6c4255,2
+np.float32,0xbf06dda0,0xbf4ec1d4,2
+np.float32,0xff3f6e84,0xd4b86cf6,2
+np.float32,0x3e4fe093,0x3f1674b0,2
+np.float32,0x8048ad60,0xaa53fbde,2
+np.float32,0x7ebb7112,0x54915ac5,2
+np.float32,0x5bd191,0x2a652a0d,2
+np.float32,0xfe3121d0,0xd4626cfb,2
+np.float32,0x7e4421c6,0x546a3f83,2
+np.float32,0x19975b,0x2a15b14f,2
+np.float32,0x801c8087,0xaa1b2a64,2
+np.float32,0xfdf6e950,0xd448c0f6,2
+np.float32,0x74e711,0x2a786083,2
+np.float32,0xbf2b2f2e,0xbf5fdccb,2
+np.float32,0x7ed19ece,0x5496e00b,2
+np.float32,0x7f6f8322,0x54c6ba63,2
+np.float32,0x3e90316d,0x3f27cd69,2
+np.float32,0x7ecb42ce,0x54955571,2
+np.float32,0x3f6d49be,0x3f799aaf,2
+np.float32,0x8053d327,0xaa5e4f9a,2
+np.float32,0x7ebd7361,0x5491df3e,2
+np.float32,0xfdb6eed0,0xd435a7aa,2
+np.float32,0x7f3e79f4,0x54b81e4b,2
+np.float32,0xfe83afa6,0xd4813794,2
+np.float32,0x37c443,0x2a421246,2
+np.float32,0xff075a10,0xd4a44cd8,2
+np.float32,0x3ebc5fe0,0x3f377047,2
+np.float32,0x739694,0x2a77714e,2
+np.float32,0xfe832946,0xd4810b91,2
+np.float32,0x7f2638e6,0x54aff235,2
+np.float32,0xfe87f7a6,0xd4829a3f,2
+np.float32,0x3f50f3f8,0x3f6f3eb8,2
+np.float32,0x3eafa3d0,0x3f333548,2
+np.float32,0xbec26ee6,0xbf39626f,2
+np.float32,0x7e6f924f,0x547a66ff,2
+np.float32,0x7f0baa46,0x54a606f8,2
+np.float32,0xbf6dfc49,0xbf79d939,2
+np.float32,0x7f005709,0x54a1699d,2
+np.float32,0x7ee3d7ef,0x549b2057,2
+np.float32,0x803709a4,0xaa4138d7,2
+np.float32,0x3f7bf49a,0x3f7ea509,2
+np.float32,0x509db7,0x2a5b6ff5,2
+np.float32,0x7eb1b0d4,0x548ec9ff,2
+np.float32,0x7eb996ec,0x5490dfce,2
+np.float32,0xbf1fcbaa,0xbf5ac89e,2
+np.float32,0x3e2c9a98,0x3f0d69cc,2
+np.float32,0x3ea77994,0x3f306312,2
+np.float32,0x3f3cbfe4,0x3f67457c,2
+np.float32,0x8422a,0x29cd5a30,2
+np.float32,0xbd974558,0xbed6d264,2
+np.float32,0xfecee77a,0xd496387f,2
+np.float32,0x3f51876b,0x3f6f76f1,2
+np.float32,0x3b1a25,0x2a45ddad,2
+np.float32,0xfe9912f0,0xd487dd67,2
+np.float32,0x3f3ab13d,0x3f666d99,2
+np.float32,0xbf35565a,0xbf64341b,2
+np.float32,0x7d4e84aa,0x54162091,2
+np.float32,0x4c2570,0x2a574dea,2
+np.float32,0x7e82dca6,0x5480f26b,2
+np.float32,0x7f5503e7,0x54bf1c8d,2
+np.float32,0xbeb85034,0xbf361c59,2
+np.float32,0x80460a69,0xaa516387,2
+np.float32,0x805fbbab,0xaa68602c,2
+np.float32,0x7d4b4c1b,0x541557b8,2
+np.float32,0xbefa9a0a,0xbf49bfbc,2
+np.float32,0x3dbd233f,0x3ee76e09,2
+np.float32,0x58b6df,0x2a628d50,2
+np.float32,0xfcdcc180,0xd3f3aad9,2
+np.float32,0x423a37,0x2a4d8487,2
+np.float32,0xbed8b32a,0xbf403507,2
+np.float32,0x3f68e85d,0x3f780f0b,2
+np.float32,0x7ee13c4b,0x549a883d,2
+np.float32,0xff2ed4c5,0xd4b2eec1,2
+np.float32,0xbf54dadc,0xbf70b99a,2
+np.float32,0x3f78b0af,0x3f7d8a32,2
+np.float32,0x3f377372,0x3f651635,2
+np.float32,0xfdaa6178,0xd43166bc,2
+np.float32,0x8060c337,0xaa6934a6,2
+np.float32,0x7ec752c2,0x54945cf6,2
+np.float32,0xbd01a760,0xbea1f624,2
+np.float32,0x6f6599,0x2a746a35,2
+np.float32,0x3f6315b0,0x3f75f95b,2
+np.float32,0x7f2baf32,0x54b1da44,2
+np.float32,0x3e400353,0x3f1286d8,2
+np.float32,0x40d3bf,0x2a4c0f15,2
+np.float32,0x7f733aca,0x54c7c03d,2
+np.float32,0x7e5c5407,0x5473828b,2
+np.float32,0x80191703,0xaa14b56a,2
+np.float32,0xbf4fc144,0xbf6ec970,2
+np.float32,0xbf1137a7,0xbf53eacd,2
+np.float32,0x80575410,0xaa615db3,2
+np.float32,0xbd0911d0,0xbea4fe07,2
+np.float32,0x3e98534a,0x3f2ae643,2
+np.float32,0x3f3b089a,0x3f669185,2
+np.float32,0x4fc752,0x2a5aacc1,2
+np.float32,0xbef44ddc,0xbf480b6e,2
+np.float32,0x80464217,0xaa519af4,2
+np.float32,0x80445fae,0xaa4fb6de,2
+np.float32,0x80771cf4,0xaa79eec8,2
+np.float32,0xfd9182e8,0xd4284fed,2
+np.float32,0xff0a5d16,0xd4a58288,2
+np.float32,0x3f33e169,0x3f63973e,2
+np.float32,0x8021a247,0xaa23f820,2
+np.float32,0xbf362522,0xbf648ab8,2
+np.float32,0x3f457cd7,0x3f6ac95e,2
+np.float32,0xbcadf400,0xbe8dc7e2,2
+np.float32,0x80237210,0xaa26dca7,2
+np.float32,0xbf1293c9,0xbf54939f,2
+np.float32,0xbc5e73c0,0xbe744a37,2
+np.float32,0x3c03f980,0x3e4d44df,2
+np.float32,0x7da46f,0x2a7e6b20,2
+np.float32,0x5d4570,0x2a665dd0,2
+np.float32,0x3e93fbac,0x3f294287,2
+np.float32,0x7e6808fd,0x5477bfa4,2
+np.float32,0xff5aa9a6,0xd4c0c925,2
+np.float32,0xbf5206ba,0xbf6fa767,2
+np.float32,0xbf6e513e,0xbf79f6f1,2
+np.float32,0x3ed01c0f,0x3f3da20f,2
+np.float32,0xff47d93d,0xd4bb1704,2
+np.float32,0x7f466cfd,0x54baa514,2
+np.float32,0x665e10,0x2a6d9fc8,2
+np.float32,0x804d0629,0xaa5820e8,2
+np.float32,0x7e0beaa0,0x54514e7e,2
+np.float32,0xbf7fcb6c,0xbf7fee78,2
+np.float32,0x3f6c5b03,0x3f7946dd,2
+np.float32,0x3e941504,0x3f294c30,2
+np.float32,0xbf2749ad,0xbf5e26a1,2
+np.float32,0xfec2a00a,0xd493302d,2
+np.float32,0x3f15a358,0x3f560bce,2
+np.float32,0x3f15c4e7,0x3f561bcd,2
+np.float32,0xfedc8692,0xd499728c,2
+np.float32,0x7e8f6902,0x5484f180,2
+np.float32,0x7f663d62,0x54c42136,2
+np.float32,0x8027ea62,0xaa2d99b4,2
+np.float32,0x3f3d093d,0x3f67636d,2
+np.float32,0x7f118c33,0x54a85382,2
+np.float32,0x803e866a,0xaa499d43,2
+np.float32,0x80053632,0xa9b02407,2
+np.float32,0xbf36dd66,0xbf64d7af,2
+np.float32,0xbf560358,0xbf71292b,2
+np.float32,0x139a8,0x29596bc0,2
+np.float32,0xbe04f75c,0xbf01a26c,2
+np.float32,0xfe1c3268,0xd45920fa,2
+np.float32,0x7ec77f72,0x5494680c,2
+np.float32,0xbedde724,0xbf41bbba,2
+np.float32,0x3e81dbe0,0x3f220bfd,2
+np.float32,0x800373ac,0xa99989d4,2
+np.float32,0x3f7f859a,0x3f7fd72d,2
+np.float32,0x3eb9dc7e,0x3f369e80,2
+np.float32,0xff5f8eb7,0xd4c236b1,2
+np.float32,0xff1c03cb,0xd4ac44ac,2
+np.float32,0x18cfe1,0x2a14285b,2
+np.float32,0x7f21b075,0x54ae54fd,2
+np.float32,0xff490bd8,0xd4bb7680,2
+np.float32,0xbf15dc22,0xbf5626de,2
+np.float32,0xfe1d5a10,0xd459a9a3,2
+np.float32,0x750544,0x2a7875e4,2
+np.float32,0x8023d5df,0xaa2778b3,2
+np.float32,0x3e42aa08,0x3f1332b2,2
+np.float32,0x3ecaa751,0x3f3bf60d,2
+np.float32,0x0,0x0,2
+np.float32,0x80416da6,0xaa4cb011,2
+np.float32,0x3f4ea9ae,0x3f6e5e22,2
+np.float32,0x2113f4,0x2a230f8e,2
+np.float32,0x3f35c2e6,0x3f64619a,2
+np.float32,0xbf50db8a,0xbf6f3564,2
+np.float32,0xff4d5cea,0xd4bccb8a,2
+np.float32,0x7ee54420,0x549b72d2,2
+np.float32,0x64ee68,0x2a6c81f7,2
+np.float32,0x5330da,0x2a5dbfc2,2
+np.float32,0x80047f88,0xa9a7b467,2
+np.float32,0xbda01078,0xbedae800,2
+np.float32,0xfe96d05a,0xd487315f,2
+np.float32,0x8003cc10,0xa99e7ef4,2
+np.float32,0x8007b4ac,0xa9c8aa3d,2
+np.float32,0x5d4bcf,0x2a66630e,2
+np.float32,0xfdd0c0b0,0xd43dd403,2
+np.float32,0xbf7a1d82,0xbf7e05f0,2
+np.float32,0x74ca33,0x2a784c0f,2
+np.float32,0x804f45e5,0xaa5a3640,2
+np.float32,0x7e6d16aa,0x547988c4,2
+np.float32,0x807d5762,0xaa7e3714,2
+np.float32,0xfecf93d0,0xd4966229,2
+np.float32,0xfecbd25c,0xd4957890,2
+np.float32,0xff7db31c,0xd4ca93b0,2
+np.float32,0x3dac9e18,0x3ee07c4a,2
+np.float32,0xbf4b2d28,0xbf6d0509,2
+np.float32,0xbd4f4c50,0xbebd62e0,2
+np.float32,0xbd2eac40,0xbeb2e0ee,2
+np.float32,0x3d01b69b,0x3ea1fc7b,2
+np.float32,0x7ec63902,0x549416ed,2
+np.float32,0xfcc47700,0xd3ea616d,2
+np.float32,0xbf5ddec2,0xbf7413a1,2
+np.float32,0xff6a6110,0xd4c54c52,2
+np.float32,0xfdfae2a0,0xd449d335,2
+np.float32,0x7e54868c,0x547099cd,2
+np.float32,0x802b5b88,0xaa327413,2
+np.float32,0x80440e72,0xaa4f647a,2
+np.float32,0x3e313c94,0x3f0eaad5,2
+np.float32,0x3ebb492a,0x3f3715a2,2
+np.float32,0xbef56286,0xbf4856d5,2
+np.float32,0x3f0154ba,0x3f4be3a0,2
+np.float32,0xff2df86c,0xd4b2a376,2
+np.float32,0x3ef6a850,0x3f48af57,2
+np.float32,0x3d8d33e1,0x3ed1f22d,2
+np.float32,0x4dd9b9,0x2a58e615,2
+np.float32,0x7f1caf83,0x54ac83c9,2
+np.float32,0xbf7286b3,0xbf7b6d73,2
+np.float32,0x80064f88,0xa9bbbd9f,2
+np.float32,0xbf1f55fa,0xbf5a92db,2
+np.float32,0x546a81,0x2a5ed516,2
+np.float32,0xbe912880,0xbf282d0a,2
+np.float32,0x5df587,0x2a66ee6e,2
+np.float32,0x801f706c,0xaa205279,2
+np.float32,0x58cb6d,0x2a629ece,2
+np.float32,0xfe754f8c,0xd47c62da,2
+np.float32,0xbefb6f4c,0xbf49f8e7,2
+np.float32,0x80000001,0xa6a14518,2
+np.float32,0xbf067837,0xbf4e8df4,2
+np.float32,0x3e8e715c,0x3f271ee4,2
+np.float32,0x8009de9b,0xa9d9ebc8,2
+np.float32,0xbf371ff1,0xbf64f36e,2
+np.float32,0x7f5ce661,0x54c170e4,2
+np.float32,0x3f3c47d1,0x3f671467,2
+np.float32,0xfea5e5a6,0xd48b8eb2,2
+np.float32,0xff62b17f,0xd4c31e15,2
+np.float32,0xff315932,0xd4b3c98f,2
+np.float32,0xbf1c3ca8,0xbf5925b9,2
+np.float32,0x7f800000,0x7f800000,2
+np.float32,0xfdf20868,0xd4476c3b,2
+np.float32,0x5b790e,0x2a64e052,2
+np.float32,0x3f5ddf4e,0x3f7413d4,2
+np.float32,0x7f1a3182,0x54ab9861,2
+np.float32,0x3f4b906e,0x3f6d2b9d,2
+np.float32,0x7ebac760,0x54912edb,2
+np.float32,0x7f626d3f,0x54c30a7e,2
+np.float32,0x3e27b058,0x3f0c0edc,2
+np.float32,0x8041e69c,0xaa4d2de8,2
+np.float32,0x3f42cee0,0x3f69b84a,2
+np.float32,0x7ec5fe83,0x5494085b,2
+np.float32,0x9d3e6,0x29d99cde,2
+np.float32,0x3edc50c0,0x3f41452d,2
+np.float32,0xbf2c463a,0xbf60562c,2
+np.float32,0x800bfa33,0xa9e871e8,2
+np.float32,0x7c9f2c,0x2a7dba4d,2
+np.float32,0x7f2ef9fd,0x54b2fb73,2
+np.float32,0x80741847,0xaa77cdb9,2
+np.float32,0x7e9c462a,0x5488ce1b,2
+np.float32,0x3ea47ec1,0x3f2f55a9,2
+np.float32,0x7f311c43,0x54b3b4f5,2
+np.float32,0x3d8f4c73,0x3ed2facd,2
+np.float32,0x806d7bd2,0xaa7301ef,2
+np.float32,0xbf633d24,0xbf760799,2
+np.float32,0xff4f9a3f,0xd4bd7a99,2
+np.float32,0x3f6021ca,0x3f74e73d,2
+np.float32,0x7e447015,0x546a5eac,2
+np.float32,0x6bff3c,0x2a71e711,2
+np.float32,0xe9c9f,0x29f85f06,2
+np.float32,0x8009fe14,0xa9dad277,2
+np.float32,0x807cf79c,0xaa7df644,2
+np.float32,0xff440e1b,0xd4b9e608,2
+np.float32,0xbddf9a50,0xbef4b5db,2
+np.float32,0x7f3b1c39,0x54b706fc,2
+np.float32,0x3c7471a0,0x3e7c16a7,2
+np.float32,0x8065b02b,0xaa6d18ee,2
+np.float32,0x7f63a3b2,0x54c36379,2
+np.float32,0xbe9c9d92,0xbf2c7d33,2
+np.float32,0x3d93aad3,0x3ed51a2e,2
+np.float32,0xbf41b040,0xbf694571,2
+np.float32,0x80396b9e,0xaa43f899,2
+np.float64,0x800fa025695f404b,0xaaa4000ff64bb00c,2
+np.float64,0xbfecc00198f98003,0xbfeee0b623fbd94b,2
+np.float64,0x7f9eeb60b03dd6c0,0x55291bf8554bb303,2
+np.float64,0x3fba74485634e890,0x3fde08710bdb148d,2
+np.float64,0xbfdd9a75193b34ea,0xbfe8bf711660a2f5,2
+np.float64,0xbfcf92e17a3f25c4,0xbfe4119eda6f3773,2
+np.float64,0xbfe359e2ba66b3c6,0xbfeb0f7ae97ea142,2
+np.float64,0x20791a5640f24,0x2a9441f13d262bed,2
+np.float64,0x3fe455fbfae8abf8,0x3feb830d63e1022c,2
+np.float64,0xbd112b7b7a226,0x2aa238c097ec269a,2
+np.float64,0x93349ba126694,0x2aa0c363cd74465a,2
+np.float64,0x20300cd440602,0x2a9432b4f4081209,2
+np.float64,0x3fdcfae677b9f5cc,0x3fe892a9ee56fe8d,2
+np.float64,0xbfefaae3f7bf55c8,0xbfefe388066132c4,2
+np.float64,0x1a7d6eb634faf,0x2a92ed9851d29ab5,2
+np.float64,0x7fd5308d39aa6119,0x553be444e30326c6,2
+np.float64,0xff811c7390223900,0xd5205cb404952fa7,2
+np.float64,0x80083d24aff07a4a,0xaaa0285cf764d898,2
+np.float64,0x800633810ccc6703,0xaa9d65341419586b,2
+np.float64,0x800ff456223fe8ac,0xaaa423bbcc24dff1,2
+np.float64,0x7fde5c99aebcb932,0x553f71be7d6d9daa,2
+np.float64,0x3fed961c4b3b2c39,0x3fef2ca146270cac,2
+np.float64,0x7fe744d30c6e89a5,0x554220a4cdc78e62,2
+np.float64,0x3fd8f527c7b1ea50,0x3fe76101085be1cb,2
+np.float64,0xbfc96a14b232d428,0xbfe2ab1a8962606c,2
+np.float64,0xffe85f540cf0bea7,0xd54268dff964519a,2
+np.float64,0x800e3be0fe7c77c2,0xaaa3634efd7f020b,2
+np.float64,0x3feb90d032f721a0,0x3fee72a4579e8b12,2
+np.float64,0xffe05674aaa0ace9,0xd5401c9e3fb4abcf,2
+np.float64,0x3fefc2e32c3f85c6,0x3fefeb940924bf42,2
+np.float64,0xbfecfd89e9f9fb14,0xbfeef6addf73ee49,2
+np.float64,0xf5862717eb0c5,0x2aa3e1428780382d,2
+np.float64,0xffc3003b32260078,0xd53558f92202dcdb,2
+np.float64,0x3feb4c152c36982a,0x3fee5940f7da0825,2
+np.float64,0x3fe7147b002e28f6,0x3fecb2948f46d1e3,2
+np.float64,0x7fe00ad9b4a015b2,0x5540039d15e1da54,2
+np.float64,0x8010000000000000,0xaaa428a2f98d728b,2
+np.float64,0xbfd3a41bfea74838,0xbfe595ab45b1be91,2
+np.float64,0x7fdbfd6e5537fadc,0x553e9a6e1107b8d0,2
+np.float64,0x800151d9d9a2a3b4,0xaa918cd8fb63f40f,2
+np.float64,0x7fe6828401ad0507,0x5541eda05dcd1fcf,2
+np.float64,0x3fdae1e7a1b5c3d0,0x3fe7f711e72ecc35,2
+np.float64,0x7fdf4936133e926b,0x553fc29c8d5edea3,2
+np.float64,0x80079de12d4f3bc3,0xaa9f7b06a9286da4,2
+np.float64,0x3fe1261cade24c39,0x3fe9fe09488e417a,2
+np.float64,0xbfc20dce21241b9c,0xbfe0a842fb207a28,2
+np.float64,0x3fe3285dfa2650bc,0x3feaf85215f59ef9,2
+np.float64,0x7fe42b93aea85726,0x554148c3c3bb35e3,2
+np.float64,0xffe6c74e7f6d8e9c,0xd541ffd13fa36dbd,2
+np.float64,0x3fe73ea139ee7d42,0x3fecc402242ab7d3,2
+np.float64,0xffbd4b46be3a9690,0xd53392de917c72e4,2
+np.float64,0x800caed8df395db2,0xaaa2a811a02e6be4,2
+np.float64,0x800aacdb6c9559b7,0xaaa19d6fbc8feebf,2
+np.float64,0x839fb4eb073f7,0x2aa0264b98327c12,2
+np.float64,0xffd0157ba9a02af8,0xd5397157a11c0d05,2
+np.float64,0x7fddc8ff173b91fd,0x553f3e7663fb2ac7,2
+np.float64,0x67b365facf66d,0x2a9dd4d838b0d853,2
+np.float64,0xffe12e7fc7225cff,0xd5406272a83a8e1b,2
+np.float64,0x7fea5b19a034b632,0x5542e567658b3e36,2
+np.float64,0x124989d824932,0x2a90ba8dc7a39532,2
+np.float64,0xffe12ef098225de0,0xd54062968450a078,2
+np.float64,0x3fea2f44a3f45e8a,0x3fedee3c461f4716,2
+np.float64,0x3fe6b033e66d6068,0x3fec88c8035e06b1,2
+np.float64,0x3fe928a2ccf25146,0x3fed88d4cde7a700,2
+np.float64,0x3feead27e97d5a50,0x3fef8d7537d82e60,2
+np.float64,0x8003ab80b6875702,0xaa98adfedd7715a9,2
+np.float64,0x45a405828b481,0x2a9a1fa99a4eff1e,2
+np.float64,0x8002ddebad85bbd8,0xaa96babfda4e0031,2
+np.float64,0x3fc278c32824f186,0x3fe0c8e7c979fbd5,2
+np.float64,0x2e10fffc5c221,0x2a96c30a766d06fa,2
+np.float64,0xffd6ba8c2ead7518,0xd53c8d1d92bc2788,2
+np.float64,0xbfeb5ec3a036bd87,0xbfee602bbf0a0d01,2
+np.float64,0x3fed5bd58f7ab7ab,0x3fef181bf591a4a7,2
+np.float64,0x7feb5274a5b6a4e8,0x55431fcf81876218,2
+np.float64,0xaf8fd6cf5f1fb,0x2aa1c6edbb1e2aaf,2
+np.float64,0x7fece718f179ce31,0x55437c74efb90933,2
+np.float64,0xbfa3c42d0c278860,0xbfd5a16407c77e73,2
+np.float64,0x800b5cff0576b9fe,0xaaa1fc4ecb0dec4f,2
+np.float64,0x800be89ae557d136,0xaaa244d115fc0963,2
+np.float64,0x800d2578f5ba4af2,0xaaa2e18a3a3fc134,2
+np.float64,0x80090ff93e321ff3,0xaaa0add578e3cc3c,2
+np.float64,0x28c5a240518c,0x2a81587cccd7e202,2
+np.float64,0x7fec066929780cd1,0x55434971435d1069,2
+np.float64,0x7fc84d4d15309a99,0x55372c204515694f,2
+np.float64,0xffe070a75de0e14e,0xd54025365046dad2,2
+np.float64,0x7fe5b27cc36b64f9,0x5541b5b822f0b6ca,2
+np.float64,0x3fdea35ac8bd46b6,0x3fe9086a0fb792c2,2
+np.float64,0xbfe79996f7af332e,0xbfece9571d37a5b3,2
+np.float64,0xffdfb47f943f6900,0xd53fe6c14c3366db,2
+np.float64,0xc015cf63802ba,0x2aa2517164d075f4,2
+np.float64,0x7feba98948375312,0x5543340b5b1f1181,2
+np.float64,0x8008678e6550cf1d,0xaaa043e7cea90da5,2
+np.float64,0x3fb11b92fa223726,0x3fd9f8b53be4d90b,2
+np.float64,0x7fc9b18cf0336319,0x55379b42da882047,2
+np.float64,0xbfe5043e736a087d,0xbfebd0c67db7a8e3,2
+np.float64,0x7fde88546a3d10a8,0x553f80cfe5bcf5fe,2
+np.float64,0x8006a6c82dcd4d91,0xaa9e171d182ba049,2
+np.float64,0xbfa0f707ac21ee10,0xbfd48e5d3faa1699,2
+np.float64,0xbfe7716bffaee2d8,0xbfecd8e6abfb8964,2
+np.float64,0x9511ccab2a23a,0x2aa0d56d748f0313,2
+np.float64,0x8003ddb9b847bb74,0xaa991ca06fd9d308,2
+np.float64,0x80030710fac60e23,0xaa9725845ac95fe8,2
+np.float64,0xffece5bbaeb9cb76,0xd5437c2670f894f4,2
+np.float64,0x3fd9be5c72b37cb9,0x3fe79f2e932a5708,2
+np.float64,0x1f050cca3e0a3,0x2a93f36499fe5228,2
+np.float64,0x3fd5422becaa8458,0x3fe6295d6150df58,2
+np.float64,0xffd72c050e2e580a,0xd53cbc52d73b495f,2
+np.float64,0xbfe66d5235ecdaa4,0xbfec6ca27e60bf23,2
+np.float64,0x17ac49a42f58a,0x2a923b5b757087a0,2
+np.float64,0xffd39edc40273db8,0xd53b2f7bb99b96bf,2
+np.float64,0x7fde6cf009bcd9df,0x553f77614eb30d75,2
+np.float64,0x80042b4c3fa85699,0xaa99c05fbdd057db,2
+np.float64,0xbfde5547f8bcaa90,0xbfe8f3147d67a940,2
+np.float64,0xbfdd02f9bf3a05f4,0xbfe894f2048aa3fe,2
+np.float64,0xbfa20ec82c241d90,0xbfd4fd02ee55aac7,2
+np.float64,0x8002f670f8c5ece3,0xaa96fad7e53dd479,2
+np.float64,0x80059f24d7eb3e4a,0xaa9c7312dae0d7bc,2
+np.float64,0x7fe6ae7423ad5ce7,0x5541f9430be53062,2
+np.float64,0xe135ea79c26be,0x2aa350d8f8c526e1,2
+np.float64,0x3fec188ce4f8311a,0x3feea44d21c23f68,2
+np.float64,0x800355688286aad2,0xaa97e6ca51eb8357,2
+np.float64,0xa2d6530b45acb,0x2aa15635bbd366e8,2
+np.float64,0x600e0150c01c1,0x2a9d1456ea6c239c,2
+np.float64,0x8009c30863338611,0xaaa118f94b188bcf,2
+np.float64,0x3fe7e4c0dfefc982,0x3fed07e8480b8c07,2
+np.float64,0xbfddac6407bb58c8,0xbfe8c46f63a50225,2
+np.float64,0xbc85e977790bd,0x2aa2344636ed713d,2
+np.float64,0xfff0000000000000,0xfff0000000000000,2
+np.float64,0xffcd1570303a2ae0,0xd5389a27d5148701,2
+np.float64,0xbf937334d026e660,0xbfd113762e4e29a7,2
+np.float64,0x3fdbfdaa9b37fb55,0x3fe84a425fdff7df,2
+np.float64,0xffc10800f5221000,0xd5349535ffe12030,2
+np.float64,0xaf40f3755e81f,0x2aa1c443af16cd27,2
+np.float64,0x800f7da34f7efb47,0xaaa3f14bf25fc89f,2
+np.float64,0xffe4a60125a94c02,0xd5416b764a294128,2
+np.float64,0xbf8e25aa903c4b40,0xbfcf5ebc275b4789,2
+np.float64,0x3fca681bbb34d038,0x3fe2e882bcaee320,2
+np.float64,0xbfd0f3c9c1a1e794,0xbfe48d0df7b47572,2
+np.float64,0xffeb99b49d373368,0xd5433060dc641910,2
+np.float64,0x3fe554fb916aa9f8,0x3febf437cf30bd67,2
+np.float64,0x80079518d0af2a32,0xaa9f6ee87044745a,2
+np.float64,0x5e01a8a0bc036,0x2a9cdf0badf222c3,2
+np.float64,0xbfea9831b3f53064,0xbfee1601ee953ab3,2
+np.float64,0xbfc369d1a826d3a4,0xbfe110b675c311e0,2
+np.float64,0xa82e640d505cd,0x2aa1863d4e523b9c,2
+np.float64,0x3fe506d70a2a0dae,0x3febd1eba3aa83fa,2
+np.float64,0xcbacba7197598,0x2aa2adeb9927f1f2,2
+np.float64,0xc112d6038225b,0x2aa25978f12038b0,2
+np.float64,0xffa7f5f44c2febf0,0xd52d0ede02d4e18b,2
+np.float64,0x8006f218e34de433,0xaa9e870cf373b4eb,2
+np.float64,0xffe6d9a5d06db34b,0xd54204a4adc608c7,2
+np.float64,0x7fe717210eae2e41,0x554214bf3e2b5228,2
+np.float64,0xbfdd4b45cdba968c,0xbfe8a94c7f225f8e,2
+np.float64,0x883356571066b,0x2aa055ab0b2a8833,2
+np.float64,0x3fe307fc02a60ff8,0x3feae9175053288f,2
+np.float64,0x3fefa985f77f530c,0x3fefe31289446615,2
+np.float64,0x8005698a98aad316,0xaa9c17814ff7d630,2
+np.float64,0x3fea77333c74ee66,0x3fee098ba70e10fd,2
+np.float64,0xbfd1d00b0023a016,0xbfe4e497fd1cbea1,2
+np.float64,0x80009b0c39813619,0xaa8b130a6909cc3f,2
+np.float64,0x3fdbeb896fb7d714,0x3fe84502ba5437f8,2
+np.float64,0x3fb6e7e3562dcfc7,0x3fdca00d35c389ad,2
+np.float64,0xb2d46ebf65a8e,0x2aa1e2fe158d0838,2
+np.float64,0xbfd5453266aa8a64,0xbfe62a6a74c8ef6e,2
+np.float64,0x7fe993aa07732753,0x5542b5438bf31cb7,2
+np.float64,0xbfda5a098cb4b414,0xbfe7ce6d4d606203,2
+np.float64,0xbfe40c3ce068187a,0xbfeb61a32c57a6d0,2
+np.float64,0x3fcf17671d3e2ed0,0x3fe3f753170ab686,2
+np.float64,0xbfe4f814b6e9f02a,0xbfebcb67c60b7b08,2
+np.float64,0x800efedf59fdfdbf,0xaaa3ba4ed44ad45a,2
+np.float64,0x800420b556e8416b,0xaa99aa7fb14edeab,2
+np.float64,0xbf6e4ae6403c9600,0xbfc3cb2b29923989,2
+np.float64,0x3fda5c760a34b8ec,0x3fe7cf2821c52391,2
+np.float64,0x7f898faac0331f55,0x5522b44a01408188,2
+np.float64,0x3fd55af4b7aab5e9,0x3fe631f6d19503b3,2
+np.float64,0xbfa30a255c261450,0xbfd55caf0826361d,2
+np.float64,0x7fdfb801343f7001,0x553fe7ee50b9199a,2
+np.float64,0x7fa89ee91c313dd1,0x552d528ca2a4d659,2
+np.float64,0xffea72921d34e524,0xd542eb01af2e470d,2
+np.float64,0x3feddf0f33fbbe1e,0x3fef462b67fc0a91,2
+np.float64,0x3fe36700b566ce01,0x3feb1596caa8eff7,2
+np.float64,0x7fe6284a25ac5093,0x5541d58be3956601,2
+np.float64,0xffda16f7c8b42df0,0xd53de4f722485205,2
+np.float64,0x7f9355b94026ab72,0x552578cdeb41d2ca,2
+np.float64,0xffd3a9b022275360,0xd53b347b02dcea21,2
+np.float64,0x3fcb7f4f4a36fe9f,0x3fe32a40e9f6c1aa,2
+np.float64,0x7fdb958836372b0f,0x553e746103f92111,2
+np.float64,0x3fd37761c0a6eec4,0x3fe5853c5654027e,2
+np.float64,0x3fe449f1a2e893e4,0x3feb7d9e4eacc356,2
+np.float64,0x80077dfbef0efbf9,0xaa9f4ed788d2fadd,2
+np.float64,0x4823aa7890476,0x2a9a6eb4b653bad5,2
+np.float64,0xbfede01a373bc034,0xbfef468895fbcd29,2
+np.float64,0xbfe2bac5f125758c,0xbfeac4811c4dd66f,2
+np.float64,0x3fec10373af8206e,0x3feea14529e0f178,2
+np.float64,0x3fe305e30ca60bc6,0x3feae81a2f9d0302,2
+np.float64,0xa9668c5f52cd2,0x2aa1910e3a8f2113,2
+np.float64,0xbfd98b1717b3162e,0xbfe78f75995335d2,2
+np.float64,0x800fa649c35f4c94,0xaaa402ae79026a8f,2
+np.float64,0xbfb07dacf620fb58,0xbfd9a7d33d93a30f,2
+np.float64,0x80015812f382b027,0xaa91a843e9c85c0e,2
+np.float64,0x3fc687d96c2d0fb3,0x3fe1ef0ac16319c5,2
+np.float64,0xbfecad2ecd795a5e,0xbfeed9f786697af0,2
+np.float64,0x1608c1242c119,0x2a91cd11e9b4ccd2,2
+np.float64,0x6df775e8dbeef,0x2a9e6ba8c71130eb,2
+np.float64,0xffe96e9332b2dd26,0xd542ac342d06299b,2
+np.float64,0x7fecb6a3b8396d46,0x5543718af8162472,2
+np.float64,0x800d379f893a6f3f,0xaaa2ea36bbcb9308,2
+np.float64,0x3f924cdb202499b6,0x3fd0bb90af8d1f79,2
+np.float64,0x0,0x0,2
+np.float64,0x7feaf3b365f5e766,0x5543099a160e2427,2
+np.float64,0x3fea169ed0742d3e,0x3fede4d526e404f8,2
+np.float64,0x7feaf5f2f775ebe5,0x55430a2196c5f35a,2
+np.float64,0xbfc80d4429301a88,0xbfe2541f2ddd3334,2
+np.float64,0xffc75203b32ea408,0xd536db2837068689,2
+np.float64,0xffed2850e63a50a1,0xd5438b1217b72b8a,2
+np.float64,0x7fc16b0e7f22d61c,0x5534bcd0bfddb6f0,2
+np.float64,0x7feee8ed09fdd1d9,0x5543ed5b3ca483ab,2
+np.float64,0x7fb6c7ee662d8fdc,0x5531fffb5d46dafb,2
+np.float64,0x3fd77cebf8aef9d8,0x3fe6e9242e2bd29d,2
+np.float64,0x3f81c33f70238680,0x3fca4c7f3c9848f7,2
+np.float64,0x3fd59fea92ab3fd5,0x3fe649c1558cadd5,2
+np.float64,0xffeba82d4bf7505a,0xd54333bad387f7bd,2
+np.float64,0xffd37630e1a6ec62,0xd53b1ca62818c670,2
+np.float64,0xffec2c1e70b8583c,0xd5435213dcd27c22,2
+np.float64,0x7fec206971f840d2,0x55434f6660a8ae41,2
+np.float64,0x3fed2964adba52c9,0x3fef0642fe72e894,2
+np.float64,0xffd08e30d6211c62,0xd539b060e0ae02da,2
+np.float64,0x3e5f976c7cbf4,0x2a992e6ff991a122,2
+np.float64,0xffe6eee761adddce,0xd5420a393c67182f,2
+np.float64,0xbfe8ec9a31f1d934,0xbfed714426f58147,2
+np.float64,0x7fefffffffffffff,0x554428a2f98d728b,2
+np.float64,0x3fb3ae8b2c275d16,0x3fdb36b81b18a546,2
+np.float64,0x800f73df4dfee7bf,0xaaa3ed1a3e2cf49c,2
+np.float64,0xffd0c8873b21910e,0xd539ce6a3eab5dfd,2
+np.float64,0x3facd6c49439ad80,0x3fd8886f46335df1,2
+np.float64,0x3935859c726b2,0x2a98775f6438dbb1,2
+np.float64,0x7feed879fbfdb0f3,0x5543e9d1ac239469,2
+np.float64,0xbfe84dd990f09bb3,0xbfed323af09543b1,2
+np.float64,0xbfe767cc5a6ecf98,0xbfecd4f39aedbacb,2
+np.float64,0xffd8bd91d5b17b24,0xd53d5eb3734a2609,2
+np.float64,0xbfe13edeb2a27dbe,0xbfea0a856f0b9656,2
+np.float64,0xd933dd53b267c,0x2aa3158784e428c9,2
+np.float64,0xbfef6fef987edfdf,0xbfefcfb1c160462b,2
+np.float64,0x8009eeda4893ddb5,0xaaa13268a41045b1,2
+np.float64,0xab48c7a156919,0x2aa1a1a9c124c87d,2
+np.float64,0xa997931d532f3,0x2aa192bfe5b7bbb4,2
+np.float64,0xffe39ce8b1e739d1,0xd5411fa1c5c2cbd8,2
+np.float64,0x7e7ac2f6fcf59,0x2a9fdf6f263a9e9f,2
+np.float64,0xbfee1e35a6fc3c6b,0xbfef5c25d32b4047,2
+np.float64,0xffe5589c626ab138,0xd5419d220cc9a6da,2
+np.float64,0x7fe12509bf224a12,0x55405f7036dc5932,2
+np.float64,0xa6f15ba94de2c,0x2aa17b3367b1fc1b,2
+np.float64,0x3fca8adbfa3515b8,0x3fe2f0ca775749e5,2
+np.float64,0xbfcb03aa21360754,0xbfe30d5b90ca41f7,2
+np.float64,0x3fefafb2da7f5f66,0x3fefe5251aead4e7,2
+np.float64,0xffd90a59d23214b4,0xd53d7cf63a644f0e,2
+np.float64,0x3fba499988349333,0x3fddf84154fab7e5,2
+np.float64,0x800a76a0bc54ed42,0xaaa17f68cf67f2fa,2
+np.float64,0x3fea33d15bb467a3,0x3fedeff7f445b2ff,2
+np.float64,0x8005d9b0726bb362,0xaa9cd48624afeca9,2
+np.float64,0x7febf42e9a77e85c,0x55434541d8073376,2
+np.float64,0xbfedfc4469bbf889,0xbfef505989f7ee7d,2
+np.float64,0x8001211f1422423f,0xaa90a9889d865349,2
+np.float64,0x800e852f7fdd0a5f,0xaaa3845f11917f8e,2
+np.float64,0xffefd613c87fac27,0xd5441fd17ec669b4,2
+np.float64,0x7fed2a74543a54e8,0x55438b8c637da8b8,2
+np.float64,0xb83d50ff707aa,0x2aa210b4fc11e4b2,2
+np.float64,0x10000000000000,0x2aa428a2f98d728b,2
+np.float64,0x474ad9208e97,0x2a84e5a31530368a,2
+np.float64,0xffd0c5498ea18a94,0xd539ccc0e5cb425e,2
+np.float64,0x8001a8e9c82351d4,0xaa92f1aee6ca5b7c,2
+np.float64,0xd28db1e5a51b6,0x2aa2e328c0788f4a,2
+np.float64,0x3bf734ac77ee7,0x2a98da65c014b761,2
+np.float64,0x3fe56e17c96adc30,0x3febff2b6b829b7a,2
+np.float64,0x7783113eef063,0x2a9f46c3f09eb42c,2
+np.float64,0x3fd69d4e42ad3a9d,0x3fe69f83a21679f4,2
+np.float64,0x3fd34f4841a69e90,0x3fe5766b3c771616,2
+np.float64,0x3febb49895b76931,0x3fee7fcb603416c9,2
+np.float64,0x7fe8d6cb55f1ad96,0x554286c3b3bf4313,2
+np.float64,0xbfe67c6ba36cf8d8,0xbfec730218f2e284,2
+np.float64,0xffef9d97723f3b2e,0xd54413e38b6c29be,2
+np.float64,0x12d8cd2a25b1b,0x2a90e5ccd37b8563,2
+np.float64,0x81fe019103fc0,0x2aa01524155e73c5,2
+np.float64,0x7fe95d546f72baa8,0x5542a7fabfd425ff,2
+np.float64,0x800e742f1f9ce85e,0xaaa37cbe09e1f874,2
+np.float64,0xffd96bd3a732d7a8,0xd53da3086071264a,2
+np.float64,0x4ef2691e9de4e,0x2a9b3d316047fd6d,2
+np.float64,0x1a91684c3522e,0x2a92f25913c213de,2
+np.float64,0x3d5151b87aa2b,0x2a9909dbd9a44a84,2
+np.float64,0x800d9049435b2093,0xaaa31424e32d94a2,2
+np.float64,0xffe5b25fcc2b64bf,0xd541b5b0416b40b5,2
+np.float64,0xffe0eb784c21d6f0,0xd5404d083c3d6bc6,2
+np.float64,0x8007ceefbf0f9de0,0xaa9fbe0d739368b4,2
+np.float64,0xb78529416f0b,0x2a8ca3b29b5b3f18,2
+np.float64,0x7fba61130034c225,0x5532e6d4ca0f2918,2
+np.float64,0x3fba8d67ae351acf,0x3fde11efd6239b09,2
+np.float64,0x3fe7f24c576fe498,0x3fed0d63947a854d,2
+np.float64,0x2bb58dec576b3,0x2a965de7fca12aff,2
+np.float64,0xbfe86ceec4f0d9de,0xbfed3ea7f1d084e2,2
+np.float64,0x7fd1a7f7bca34fee,0x553a3f01b67fad2a,2
+np.float64,0x3fd9a43acfb34874,0x3fe7972dc5d8dfd6,2
+np.float64,0x7fd9861acdb30c35,0x553dad3b1bbb3b4d,2
+np.float64,0xffecc0c388398186,0xd54373d3b903deec,2
+np.float64,0x3fa6f86e9c2df0e0,0x3fd6bdbe40fcf710,2
+np.float64,0x800ddd99815bbb33,0xaaa33820d2f889bb,2
+np.float64,0x7fe087089b610e10,0x55402c868348a6d3,2
+np.float64,0x3fdf43d249be87a5,0x3fe933d29fbf7c23,2
+np.float64,0x7fe4f734c7a9ee69,0x5541822e56c40725,2
+np.float64,0x3feb39a9d3b67354,0x3fee526bf1f69f0e,2
+np.float64,0x3fe61454a0ec28a9,0x3fec46d7c36f7566,2
+np.float64,0xbfeafaa0a375f541,0xbfee3af2e49d457a,2
+np.float64,0x3fda7378e1b4e6f0,0x3fe7d613a3f92c40,2
+np.float64,0xe3e31c5fc7c64,0x2aa3645c12e26171,2
+np.float64,0xbfe97a556df2f4ab,0xbfeda8aa84cf3544,2
+np.float64,0xff612f9c80225f00,0xd514a51e5a2a8a97,2
+np.float64,0x800c51c8a0f8a391,0xaaa279fe7d40b50b,2
+np.float64,0xffd6f9d2312df3a4,0xd53ca783a5f8d110,2
+np.float64,0xbfead48bd7f5a918,0xbfee2cb2f89c5e57,2
+np.float64,0x800f5949e89eb294,0xaaa3e1a67a10cfef,2
+np.float64,0x800faf292b7f5e52,0xaaa40675e0c96cfd,2
+np.float64,0xbfedc238453b8470,0xbfef3c179d2d0209,2
+np.float64,0x3feb0443c5760888,0x3fee3e8bf29089c2,2
+np.float64,0xb26f69e164ded,0x2aa1df9f3dd7d765,2
+np.float64,0x3fcacdc053359b80,0x3fe300a67765b667,2
+np.float64,0x3fe8b274647164e8,0x3fed5a4cd4da8155,2
+np.float64,0x291e6782523ce,0x2a95ea7ac1b13a68,2
+np.float64,0xbfc4fc094e29f814,0xbfe1838671fc8513,2
+np.float64,0x3fbf1301f23e2600,0x3fdfb03a6f13e597,2
+np.float64,0xffeb36554ab66caa,0xd543193d8181e4f9,2
+np.float64,0xbfd969a52db2d34a,0xbfe78528ae61f16d,2
+np.float64,0x800cccd04d3999a1,0xaaa2b6b7a2d2d2d6,2
+np.float64,0x808eb4cb011d7,0x2aa005effecb2b4a,2
+np.float64,0x7fe839b3f9b07367,0x55425f61e344cd6d,2
+np.float64,0xbfeb25b6ed764b6e,0xbfee4b0234fee365,2
+np.float64,0xffefffffffffffff,0xd54428a2f98d728b,2
+np.float64,0xbfe01305da60260c,0xbfe9700b784af7e9,2
+np.float64,0xffcbf36b0a37e6d8,0xd538474b1d74ffe1,2
+np.float64,0xffaeebe3e83dd7c0,0xd52fa2e8dabf7209,2
+np.float64,0xbfd9913bf0b32278,0xbfe7915907aab13c,2
+np.float64,0xbfe7d125d9efa24c,0xbfecfff563177706,2
+np.float64,0xbfee98d23cbd31a4,0xbfef867ae393e446,2
+np.float64,0x3fe30efb67e61df6,0x3feaec6344633d11,2
+np.float64,0x1,0x2990000000000000,2
+np.float64,0x7fd5524fd3aaa49f,0x553bf30d18ab877e,2
+np.float64,0xc98b403f93168,0x2aa29d2fadb13c07,2
+np.float64,0xffe57080046ae100,0xd541a3b1b687360e,2
+np.float64,0x7fe20bade5e4175b,0x5540a79b94294f40,2
+np.float64,0x3fe155400a22aa80,0x3fea15c45f5b5837,2
+np.float64,0x7fe428dc8f6851b8,0x554147fd2ce93cc1,2
+np.float64,0xffefb77eb67f6efc,0xd544195dcaff4980,2
+np.float64,0x3fe49e733b293ce6,0x3feba394b833452a,2
+np.float64,0x38e01e3e71c05,0x2a986b2c955bad21,2
+np.float64,0x7fe735eb376e6bd5,0x55421cc51290d92d,2
+np.float64,0xbfd81d8644b03b0c,0xbfe71ce6d6fbd51a,2
+np.float64,0x8009a32325134647,0xaaa10645d0e6b0d7,2
+np.float64,0x56031ab8ac064,0x2a9c074be40b1f80,2
+np.float64,0xff8989aa30331340,0xd522b2d319a0ac6e,2
+np.float64,0xbfd6c183082d8306,0xbfe6ab8ffb3a8293,2
+np.float64,0x7ff8000000000000,0x7ff8000000000000,2
+np.float64,0xbfe17b68b1e2f6d2,0xbfea28dac8e0c457,2
+np.float64,0x3fbb50e42236a1c8,0x3fde5b090d51e3bd,2
+np.float64,0xffc2bb7cbf2576f8,0xd5353f1b3571c17f,2
+np.float64,0xbfe7576bca6eaed8,0xbfecce388241f47c,2
+np.float64,0x3fe7b52b04ef6a56,0x3fecf495bef99e7e,2
+np.float64,0xffe5511af82aa236,0xd5419b11524e8350,2
+np.float64,0xbfe66d5edf2cdabe,0xbfec6ca7d7b5be8c,2
+np.float64,0xc84a0ba790942,0x2aa29346f16a2cb4,2
+np.float64,0x6db5e7a0db6be,0x2a9e659c0e8244a0,2
+np.float64,0x7fef8f7b647f1ef6,0x554410e67af75d27,2
+np.float64,0xbfe2b4ada7e5695c,0xbfeac1997ec5a064,2
+np.float64,0xbfe99372e03326e6,0xbfedb2662b287543,2
+np.float64,0x3fa45d352428ba6a,0x3fd5d8a895423abb,2
+np.float64,0x3fa029695c2052d3,0x3fd439f858998886,2
+np.float64,0xffe0a9bd3261537a,0xd54037d0cd8bfcda,2
+np.float64,0xbfef83e09a7f07c1,0xbfefd66a4070ce73,2
+np.float64,0x7fee3dcc31fc7b97,0x5543c8503869407e,2
+np.float64,0xffbd16f1603a2de0,0xd533872fa5be978b,2
+np.float64,0xbfe8173141b02e62,0xbfed1c478614c6f4,2
+np.float64,0xbfef57aa277eaf54,0xbfefc77fdab27771,2
+np.float64,0x7fe883a02f31073f,0x554271ff0e3208da,2
+np.float64,0xe3adb63bc75b7,0x2aa362d833d0e41c,2
+np.float64,0x8001c430bac38862,0xaa93575026d26510,2
+np.float64,0x12fb347225f67,0x2a90f00eb9edb3fe,2
+np.float64,0x3fe53f83cbaa7f08,0x3febead40de452c2,2
+np.float64,0xbfe7f67227efece4,0xbfed0f10e32ad220,2
+np.float64,0xb8c5b45d718b7,0x2aa2152912cda86d,2
+np.float64,0x3fd23bb734a4776e,0x3fe50e5d3008c095,2
+np.float64,0x8001fd558ee3faac,0xaa941faa1f7ed450,2
+np.float64,0xffe6bbeda9ed77db,0xd541fcd185a63afa,2
+np.float64,0x4361d79086c3c,0x2a99d692237c30b7,2
+np.float64,0xbfd012f004a025e0,0xbfe43093e290fd0d,2
+np.float64,0xffe1d8850423b10a,0xd54097cf79d8d01e,2
+np.float64,0x3fccf4df7939e9bf,0x3fe37f8cf8be6436,2
+np.float64,0x8000546bc6c0a8d8,0xaa861bb3588556f2,2
+np.float64,0xbfecb4d6ba7969ae,0xbfeedcb6239135fe,2
+np.float64,0xbfaeb425cc3d6850,0xbfd90cfc103bb896,2
+np.float64,0x800ec037ec7d8070,0xaaa39eae8bde9774,2
+np.float64,0xbfeeaf863dfd5f0c,0xbfef8e4514772a8a,2
+np.float64,0xffec67c6c4b8cf8d,0xd5435fad89f900cf,2
+np.float64,0x3fda4498da348932,0x3fe7c7f6b3f84048,2
+np.float64,0xbfd05fd3dea0bfa8,0xbfe4509265a9b65f,2
+np.float64,0x3fe42cc713a8598e,0x3feb706ba9cd533c,2
+np.float64,0xec22d4d7d845b,0x2aa39f8cccb9711c,2
+np.float64,0x7fda30606c3460c0,0x553deea865065196,2
+np.float64,0xbfd58cba8bab1976,0xbfe64327ce32d611,2
+np.float64,0xadd521c75baa4,0x2aa1b7efce201a98,2
+np.float64,0x7fed43c1027a8781,0x55439131832b6429,2
+np.float64,0x800bee278fb7dc4f,0xaaa247a71e776db4,2
+np.float64,0xbfe9be5dd2737cbc,0xbfedc2f9501755b0,2
+np.float64,0x8003f4854447e90b,0xaa994d9b5372b13b,2
+np.float64,0xbfe5d0f867eba1f1,0xbfec29f8dd8b33a4,2
+np.float64,0x3fd79102d5af2206,0x3fe6efaa7a1efddb,2
+np.float64,0xbfeae783c835cf08,0xbfee33cdb4a44e81,2
+np.float64,0x3fcf1713e83e2e28,0x3fe3f7414753ddfb,2
+np.float64,0xffe5ab3cff2b567a,0xd541b3bf0213274a,2
+np.float64,0x7fe0fc65d8a1f8cb,0x554052761ac96386,2
+np.float64,0x7e81292efd026,0x2a9fdff8c01ae86f,2
+np.float64,0x80091176039222ec,0xaaa0aebf0565dfa6,2
+np.float64,0x800d2bf5ab5a57ec,0xaaa2e4a4c31e7e29,2
+np.float64,0xffd1912ea923225e,0xd53a33b2856726ab,2
+np.float64,0x800869918ed0d323,0xaaa0453408e1295d,2
+np.float64,0xffba0898fa341130,0xd532d19b202a9646,2
+np.float64,0xbfe09fac29613f58,0xbfe9b9687b5811a1,2
+np.float64,0xbfbd4ae82e3a95d0,0xbfdf1220f6f0fdfa,2
+np.float64,0xffea11d27bb423a4,0xd542d3d3e1522474,2
+np.float64,0xbfe6b05705ad60ae,0xbfec88d6bcab2683,2
+np.float64,0x3fe624a3f2ec4948,0x3fec4dcc78ddf871,2
+np.float64,0x53483018a6907,0x2a9bba8f92006b69,2
+np.float64,0xbfec0a6eeb7814de,0xbfee9f2a741248d7,2
+np.float64,0x3fe8c8ce6371919d,0x3fed63250c643482,2
+np.float64,0xbfe26b0ef964d61e,0xbfea9e511db83437,2
+np.float64,0xffa0408784208110,0xd52987f62c369ae9,2
+np.float64,0xffc153abc322a758,0xd534b384b5c5fe63,2
+np.float64,0xbfbdce88a63b9d10,0xbfdf4065ef0b01d4,2
+np.float64,0xffed4a4136fa9482,0xd54392a450f8b0af,2
+np.float64,0x8007aa18748f5432,0xaa9f8bd2226d4299,2
+np.float64,0xbfdab4d3e8b569a8,0xbfe7e9a5402540e5,2
+np.float64,0x7fe68914f92d1229,0x5541ef5e78fa35de,2
+np.float64,0x800a538bb1b4a718,0xaaa16bc487711295,2
+np.float64,0xffe02edbc8605db7,0xd5400f8f713df890,2
+np.float64,0xffe8968053712d00,0xd54276b9cc7f460a,2
+np.float64,0x800a4ce211d499c5,0xaaa1680491deb40c,2
+np.float64,0x3f988080f8310102,0x3fd2713691e99329,2
+np.float64,0xf64e42a7ec9c9,0x2aa3e6a7af780878,2
+np.float64,0xff73cc7100279900,0xd51b4478c3409618,2
+np.float64,0x71e6722ce3ccf,0x2a9ec76ddf296ce0,2
+np.float64,0x8006ca16ab0d942e,0xaa9e4bfd862af570,2
+np.float64,0x8000000000000000,0x8000000000000000,2
+np.float64,0xbfed373e02ba6e7c,0xbfef0b2b7bb767b3,2
+np.float64,0xa6cb0f694d962,0x2aa179dd16b0242b,2
+np.float64,0x7fec14626cf828c4,0x55434ca55b7c85d5,2
+np.float64,0x3fcda404513b4808,0x3fe3a68e8d977752,2
+np.float64,0xbfeb94995f772933,0xbfee74091d288b81,2
+np.float64,0x3fce2299a13c4530,0x3fe3c2603f28d23b,2
+np.float64,0xffd07f4534a0fe8a,0xd539a8a6ebc5a603,2
+np.float64,0x7fdb1c651e3638c9,0x553e478a6385c86b,2
+np.float64,0x3fec758336f8eb06,0x3feec5f3b92c8b28,2
+np.float64,0x796fc87cf2dfa,0x2a9f7184a4ad8c49,2
+np.float64,0x3fef9ba866ff3750,0x3fefde6a446fc2cd,2
+np.float64,0x964d26c72c9a5,0x2aa0e143f1820179,2
+np.float64,0xbfef6af750bed5ef,0xbfefce04870a97bd,2
+np.float64,0x3fe2f3961aa5e72c,0x3feadf769321a3ff,2
+np.float64,0xbfd6b706e9ad6e0e,0xbfe6a8141c5c3b5d,2
+np.float64,0x7fe0ecc40a21d987,0x55404d72c2b46a82,2
+np.float64,0xbfe560d19deac1a3,0xbfebf962681a42a4,2
+np.float64,0xbfea37170ab46e2e,0xbfedf136ee9df02b,2
+np.float64,0xbfebf78947b7ef12,0xbfee9847ef160257,2
+np.float64,0x800551f8312aa3f1,0xaa9bee7d3aa5491b,2
+np.float64,0xffed2513897a4a26,0xd5438a58c4ae28ec,2
+np.float64,0x7fd962d75cb2c5ae,0x553d9f8a0c2016f3,2
+np.float64,0x3fefdd8512bfbb0a,0x3feff47d8da7424d,2
+np.float64,0xbfefa5b43bff4b68,0xbfefe1ca42867af0,2
+np.float64,0xbfc8a2853531450c,0xbfe279bb7b965729,2
+np.float64,0x800c8843bc391088,0xaaa2951344e7b29b,2
+np.float64,0x7fe22587bae44b0e,0x5540af8bb58cfe86,2
+np.float64,0xbfe159fae822b3f6,0xbfea182394eafd8d,2
+np.float64,0xbfe6fdfd50edfbfa,0xbfeca93f2a3597d0,2
+np.float64,0xbfe5cd5afaeb9ab6,0xbfec286a8ce0470f,2
+np.float64,0xbfc84bb97f309774,0xbfe263ef0f8f1f6e,2
+np.float64,0x7fd9c1e548b383ca,0x553dc4556874ecb9,2
+np.float64,0x7fda43d33bb487a5,0x553df60f61532fc0,2
+np.float64,0xbfe774bd25eee97a,0xbfecda42e8578c1f,2
+np.float64,0x800df1f5ab9be3ec,0xaaa34184712e69db,2
+np.float64,0xbff0000000000000,0xbff0000000000000,2
+np.float64,0x3fe14ec21b629d84,0x3fea128244215713,2
+np.float64,0x7fc1ce7843239cf0,0x5534e3fa8285b7b8,2
+np.float64,0xbfe922b204724564,0xbfed86818687d649,2
+np.float64,0x3fc58924fb2b1248,0x3fe1aa715ff6ebbf,2
+np.float64,0x8008b637e4d16c70,0xaaa0760b53abcf46,2
+np.float64,0xffbf55bd4c3eab78,0xd53404a23091a842,2
+np.float64,0x9f6b4a753ed6a,0x2aa136ef9fef9596,2
+np.float64,0xbfd11da7f8a23b50,0xbfe49deb493710d8,2
+np.float64,0x800a2f07fcd45e10,0xaaa157237c98b4f6,2
+np.float64,0x3fdd4defa4ba9bdf,0x3fe8aa0bcf895f4f,2
+np.float64,0x7fe9b0ab05f36155,0x5542bc5335414473,2
+np.float64,0x3fe89c97de313930,0x3fed51a1189b8982,2
+np.float64,0x3fdd45c8773a8b91,0x3fe8a7c2096fbf5a,2
+np.float64,0xbfeb6f64daf6deca,0xbfee665167ef43ad,2
+np.float64,0xffdf9da1c4bf3b44,0xd53fdf141944a983,2
+np.float64,0x3fde092ed0bc125c,0x3fe8de25bfbfc2db,2
+np.float64,0xbfcb21f96b3643f4,0xbfe3147904c258cf,2
+np.float64,0x800c9c934f993927,0xaaa29f17c43f021b,2
+np.float64,0x9b91814d37230,0x2aa11329e59bf6b0,2
+np.float64,0x3fe28a7e0b6514fc,0x3feaad6d23e2eadd,2
+np.float64,0xffecf38395f9e706,0xd5437f3ee1cd61e4,2
+np.float64,0x3fcade92a935bd25,0x3fe3049f4c1da1d0,2
+np.float64,0x800ab25d95d564bc,0xaaa1a076d7c66e04,2
+np.float64,0xffc0989e1e21313c,0xd53467f3b8158298,2
+np.float64,0x3fd81523eeb02a48,0x3fe71a38d2da8a82,2
+np.float64,0x7fe5b9dd402b73ba,0x5541b7b9b8631010,2
+np.float64,0x2c160d94582c3,0x2a966e51b503a3d1,2
+np.float64,0x2c416ffa5882f,0x2a9675aaef8b29c4,2
+np.float64,0x7fefe2ff01bfc5fd,0x55442289faf22b86,2
+np.float64,0xbfd469bf5d28d37e,0xbfe5dd239ffdc7eb,2
+np.float64,0xbfdd56f3eabaade8,0xbfe8ac93244ca17b,2
+np.float64,0xbfe057b89160af71,0xbfe9941557340bb3,2
+np.float64,0x800c50e140b8a1c3,0xaaa2798ace9097ee,2
+np.float64,0xbfda5a8984b4b514,0xbfe7ce93d65a56b0,2
+np.float64,0xbfcd6458323ac8b0,0xbfe39872514127bf,2
+np.float64,0x3fefb1f5ebff63ec,0x3fefe5e761b49b89,2
+np.float64,0x3fea3abc1df47578,0x3fedf29a1c997863,2
+np.float64,0x7fcb4a528e3694a4,0x553815f169667213,2
+np.float64,0x8c77da7b18efc,0x2aa080e52bdedb54,2
+np.float64,0x800e5dde4c5cbbbd,0xaaa372b16fd8b1ad,2
+np.float64,0x3fd2976038a52ec0,0x3fe5316b4f79fdbc,2
+np.float64,0x69413a0ed2828,0x2a9dfacd9cb44286,2
+np.float64,0xbfebbac0bdb77582,0xbfee820d9288b631,2
+np.float64,0x1a12aa7c34256,0x2a92d407e073bbfe,2
+np.float64,0xbfc41a27c3283450,0xbfe143c8665b0d3c,2
+np.float64,0xffe4faa41369f548,0xd54183230e0ce613,2
+np.float64,0xbfdeae81f23d5d04,0xbfe90b734bf35b68,2
+np.float64,0x3fc984ba58330975,0x3fe2b19e9052008e,2
+np.float64,0x7fe6e51b8d2dca36,0x554207a74ae2bb39,2
+np.float64,0x80081a58a81034b2,0xaaa0117d4aff11c8,2
+np.float64,0x7fde3fddfe3c7fbb,0x553f67d0082acc67,2
+np.float64,0x3fac7c999038f933,0x3fd86ec2f5dc3aa4,2
+np.float64,0x7fa26b4c4c24d698,0x552a9e6ea8545c18,2
+np.float64,0x3fdacd06e6b59a0e,0x3fe7f0dc0e8f9c6d,2
+np.float64,0x80064b62cbec96c6,0xaa9d8ac0506fdd05,2
+np.float64,0xb858116170b1,0x2a8caea703d9ccc8,2
+np.float64,0xbfe8d94ccef1b29a,0xbfed69a8782cbf3d,2
+np.float64,0x8005607d6a6ac0fc,0xaa9c07cf8620b037,2
+np.float64,0xbfe66a52daacd4a6,0xbfec6b5e403e6864,2
+np.float64,0x7fc398c2e0273185,0x5535918245894606,2
+np.float64,0x74b2d7dce965c,0x2a9f077020defdbc,2
+np.float64,0x7fe8f7a4d9b1ef49,0x55428eeae210e8eb,2
+np.float64,0x80027deddc84fbdc,0xaa95b11ff9089745,2
+np.float64,0xffeba2a94e774552,0xd5433273f6568902,2
+np.float64,0x80002f8259405f05,0xaa8240b68d7b9dc4,2
+np.float64,0xbfdf0d84883e1b0a,0xbfe92532c69c5802,2
+np.float64,0xbfcdfa7b6b3bf4f8,0xbfe3b997a84d0914,2
+np.float64,0x800c18b04e183161,0xaaa25d46d60b15c6,2
+np.float64,0xffeaf1e37c35e3c6,0xd543092cd929ac19,2
+np.float64,0xbfc5aa07752b5410,0xbfe1b36ab5ec741f,2
+np.float64,0x3fe5c491d1eb8924,0x3fec24a1c3f6a178,2
+np.float64,0xbfeb736937f6e6d2,0xbfee67cd296e6fa9,2
+np.float64,0xffec3d5718787aad,0xd5435602e1a2cc43,2
+np.float64,0x7fe71e1da86e3c3a,0x55421691ead882cb,2
+np.float64,0x3fdd6ed0c93adda2,0x3fe8b341d066c43c,2
+np.float64,0x7fbe3d7a203c7af3,0x5533c83e53283430,2
+np.float64,0x3fdc20cb56384197,0x3fe854676360aba9,2
+np.float64,0xb7a1ac636f436,0x2aa20b9d40d66e78,2
+np.float64,0x3fb1491bb8229237,0x3fda0fabad1738ee,2
+np.float64,0xbfdf9c0ce73f381a,0xbfe94b716dbe35ee,2
+np.float64,0xbfbd4f0ad23a9e18,0xbfdf1397329a2dce,2
+np.float64,0xbfe4e0caac69c196,0xbfebc119b8a181cd,2
+np.float64,0x5753641aaea6d,0x2a9c2ba3e92b0cd2,2
+np.float64,0x72bb814ae5771,0x2a9eda92fada66de,2
+np.float64,0x57ed8f5aafdb3,0x2a9c3c2e1d42e609,2
+np.float64,0xffec33359c38666a,0xd54353b2acd0daf1,2
+np.float64,0x3fa5fe6e8c2bfce0,0x3fd66a0b3bf2720a,2
+np.float64,0xffe2dc8d7ca5b91a,0xd540e6ebc097d601,2
+np.float64,0x7fd99d260eb33a4b,0x553db626c9c75f78,2
+np.float64,0xbfe2dd73e425bae8,0xbfead4fc4b93a727,2
+np.float64,0xdcd4a583b9a95,0x2aa33094c9a17ad7,2
+np.float64,0x7fb0af6422215ec7,0x553039a606e8e64f,2
+np.float64,0x7fdfab6227bf56c3,0x553fe3b26164aeda,2
+np.float64,0x1e4d265e3c9a6,0x2a93cba8a1a8ae6d,2
+np.float64,0xbfdc7d097238fa12,0xbfe86ee2f24fd473,2
+np.float64,0x7fe5d35d29eba6b9,0x5541bea5878bce2b,2
+np.float64,0xffcb886a903710d4,0xd53828281710aab5,2
+np.float64,0xffe058c7ffe0b190,0xd5401d61e9a7cbcf,2
+np.float64,0x3ff0000000000000,0x3ff0000000000000,2
+np.float64,0xffd5b1c1132b6382,0xd53c1c839c098340,2
+np.float64,0x3fe2e7956725cf2b,0x3fead9c907b9d041,2
+np.float64,0x800a8ee293951dc6,0xaaa18ce3f079f118,2
+np.float64,0x7febcd3085b79a60,0x55433c47e1f822ad,2
+np.float64,0x3feb0e14cd761c2a,0x3fee423542102546,2
+np.float64,0x3fb45e6d0628bcda,0x3fdb86db67d0c992,2
+np.float64,0x7fa836e740306dce,0x552d2907cb8118b2,2
+np.float64,0x3fd15ba25b22b745,0x3fe4b6b018409d78,2
+np.float64,0xbfb59980ce2b3300,0xbfdc1206274cb51d,2
+np.float64,0x3fdef1b87fbde371,0x3fe91dafc62124a1,2
+np.float64,0x7fed37a4337a6f47,0x55438e7e0b50ae37,2
+np.float64,0xffe6c87633ad90ec,0xd542001f216ab448,2
+np.float64,0x8008d2548ab1a4a9,0xaaa087ad272d8e17,2
+np.float64,0xbfd1d6744da3ace8,0xbfe4e71965adda74,2
+np.float64,0xbfb27f751224fee8,0xbfdaa82132775406,2
+np.float64,0x3fe2b336ae65666d,0x3feac0e6b13ec2d2,2
+np.float64,0xffc6bac2262d7584,0xd536a951a2eecb49,2
+np.float64,0x7fdb661321b6cc25,0x553e62dfd7fcd3f3,2
+np.float64,0xffe83567d5706acf,0xd5425e4bb5027568,2
+np.float64,0xbf7f0693e03e0d00,0xbfc9235314d53f82,2
+np.float64,0x3feb32b218766564,0x3fee4fd5847f3722,2
+np.float64,0x3fec25d33df84ba6,0x3feea91fcd4aebab,2
+np.float64,0x7fe17abecb22f57d,0x55407a8ba661207c,2
+np.float64,0xbfe5674b1eeace96,0xbfebfc351708dc70,2
+np.float64,0xbfe51a2d2f6a345a,0xbfebda702c9d302a,2
+np.float64,0x3fec05584af80ab0,0x3fee9d502a7bf54d,2
+np.float64,0xffda8871dcb510e4,0xd53e10105f0365b5,2
+np.float64,0xbfc279c31824f388,0xbfe0c9354d871484,2
+np.float64,0x1cbed61e397dc,0x2a937364712cd518,2
+np.float64,0x800787d198af0fa4,0xaa9f5c847affa1d2,2
+np.float64,0x80079f6d65af3edc,0xaa9f7d2863368bbd,2
+np.float64,0xb942f1e97285e,0x2aa2193e0c513b7f,2
+np.float64,0x7fe9078263320f04,0x554292d85dee2c18,2
+np.float64,0xbfe4de0761a9bc0f,0xbfebbfe04116b829,2
+np.float64,0xbfdbe6f3fc37cde8,0xbfe843aea59a0749,2
+np.float64,0xffcb6c0de136d81c,0xd5381fd9c525b813,2
+np.float64,0x9b6bda9336d7c,0x2aa111c924c35386,2
+np.float64,0x3fe17eece422fdda,0x3fea2a9bacd78607,2
+np.float64,0xd8011c49b0024,0x2aa30c87574fc0c6,2
+np.float64,0xbfc0a08b3f214118,0xbfe034d48f0d8dc0,2
+np.float64,0x3fd60adb1eac15b8,0x3fe66e42e4e7e6b5,2
+np.float64,0x80011d68ea023ad3,0xaa909733befbb962,2
+np.float64,0xffb35ac32426b588,0xd5310c4be1c37270,2
+np.float64,0x3fee8b56c9bd16ae,0x3fef81d8d15f6939,2
+np.float64,0x3fdc10a45e382149,0x3fe84fbe4cf11e68,2
+np.float64,0xbfc85dc45e30bb88,0xbfe2687b5518abde,2
+np.float64,0x3fd53b85212a770a,0x3fe6270d6d920d0f,2
+np.float64,0x800fc158927f82b1,0xaaa40e303239586f,2
+np.float64,0x11af5e98235ed,0x2a908b04a790083f,2
+np.float64,0xbfe2a097afe54130,0xbfeab80269eece99,2
+np.float64,0xbfd74ac588ae958c,0xbfe6d8ca3828d0b8,2
+np.float64,0xffea18ab2ef43156,0xd542d579ab31df1e,2
+np.float64,0xbfecda7058f9b4e1,0xbfeeea29c33b7913,2
+np.float64,0x3fc4ac56ed2958b0,0x3fe16d3e2bd7806d,2
+np.float64,0x3feccc898cb99913,0x3feee531f217dcfa,2
+np.float64,0xffeb3a64c5b674c9,0xd5431a30a41f0905,2
+np.float64,0x3fe5a7ee212b4fdc,0x3fec1844af9076fc,2
+np.float64,0x80080fdb52301fb7,0xaaa00a8b4274db67,2
+np.float64,0x800b3e7e47d67cfd,0xaaa1ec2876959852,2
+np.float64,0x80063fb8ee2c7f73,0xaa9d7875c9f20d6f,2
+np.float64,0x7fdacf80d0b59f01,0x553e2acede4c62a8,2
+np.float64,0x401e9b24803d4,0x2a996a0a75d0e093,2
+np.float64,0x3fe6c29505ed852a,0x3fec907a6d8c10af,2
+np.float64,0x8005c04ee2cb809f,0xaa9caa9813faef46,2
+np.float64,0xbfe1360f21e26c1e,0xbfea06155d6985b6,2
+np.float64,0xffc70606682e0c0c,0xd536c239b9d4be0a,2
+np.float64,0x800e639afefcc736,0xaaa37547d0229a26,2
+np.float64,0x3fe5589290aab125,0x3febf5c925c4e6db,2
+np.float64,0x8003b59330276b27,0xaa98c47e44524335,2
+np.float64,0x800d67ec22dacfd8,0xaaa301251b6a730a,2
+np.float64,0x7fdaeb5025b5d69f,0x553e35397dfe87eb,2
+np.float64,0x3fdae32a24b5c654,0x3fe7f771bc108f6c,2
+np.float64,0xffe6c1fc93ad83f8,0xd541fe6a6a716756,2
+np.float64,0xbfd7b9c1d32f7384,0xbfe6fcdae563d638,2
+np.float64,0x800e1bea06fc37d4,0xaaa354c0bf61449c,2
+np.float64,0xbfd78f097aaf1e12,0xbfe6ef068329bdf4,2
+np.float64,0x7fea6a400874d47f,0x5542e905978ad722,2
+np.float64,0x8008b4377cb1686f,0xaaa074c87eee29f9,2
+np.float64,0x8002f3fb8d45e7f8,0xaa96f47ac539b614,2
+np.float64,0xbfcf2b3fd13e5680,0xbfe3fb91c0cc66ad,2
+np.float64,0xffecca2f5279945e,0xd54375f361075927,2
+np.float64,0x7ff0000000000000,0x7ff0000000000000,2
+np.float64,0x7f84d5a5a029ab4a,0x552178d1d4e8640e,2
+np.float64,0x3fea8a4b64351497,0x3fee10c332440eb2,2
+np.float64,0x800fe01ac1dfc036,0xaaa41b34d91a4bee,2
+np.float64,0x3fc0b3d8872167b1,0x3fe03b178d354f8d,2
+np.float64,0x5ee8b0acbdd17,0x2a9cf69f2e317729,2
+np.float64,0x8006ef0407adde09,0xaa9e82888f3dd83e,2
+np.float64,0x7fdbb08a07b76113,0x553e7e4e35b938b9,2
+np.float64,0x49663f9c92cc9,0x2a9a95e0affe5108,2
+np.float64,0x7fd9b87e79b370fc,0x553dc0b5cff3dc7d,2
+np.float64,0xbfd86ae657b0d5cc,0xbfe73584d02bdd2b,2
+np.float64,0x3fd4d4a13729a942,0x3fe6030a962aaaf8,2
+np.float64,0x7fcc246bcb3848d7,0x5538557309449bba,2
+np.float64,0xbfdc86a7d5b90d50,0xbfe871a2983c2a29,2
+np.float64,0xd2a6e995a54dd,0x2aa2e3e9c0fdd6c0,2
+np.float64,0x3f92eb447825d680,0x3fd0eb4fd2ba16d2,2
+np.float64,0x800d4001697a8003,0xaaa2ee358661b75c,2
+np.float64,0x3fd3705fd1a6e0c0,0x3fe582a6f321d7d6,2
+np.float64,0xbfcfdf51533fbea4,0xbfe421c3bdd9f2a3,2
+np.float64,0x3fe268e87964d1d1,0x3fea9d47e08aad8a,2
+np.float64,0x24b8901e49713,0x2a951adeefe7b31b,2
+np.float64,0x3fedb35d687b66bb,0x3fef36e440850bf8,2
+np.float64,0x3fb7ab5cbe2f56c0,0x3fdcf097380721c6,2
+np.float64,0x3f8c4eaa10389d54,0x3fceb7ecb605b73b,2
+np.float64,0xbfed831ed6fb063e,0xbfef25f462a336f1,2
+np.float64,0x7fd8c52112318a41,0x553d61b0ee609f58,2
+np.float64,0xbfe71c4ff76e38a0,0xbfecb5d32e789771,2
+np.float64,0xbfe35fb7b166bf70,0xbfeb12328e75ee6b,2
+np.float64,0x458e1a3a8b1c4,0x2a9a1cebadc81342,2
+np.float64,0x8003c1b3ad478368,0xaa98df5ed060b28c,2
+np.float64,0x7ff4000000000000,0x7ffc000000000000,2
+np.float64,0x7fe17098c162e131,0x5540775a9a3a104f,2
+np.float64,0xbfd95cb71732b96e,0xbfe7812acf7ea511,2
+np.float64,0x8000000000000001,0xa990000000000000,2
+np.float64,0xbfde0e7d9ebc1cfc,0xbfe8df9ca9e49a5b,2
+np.float64,0xffef4f67143e9ecd,0xd5440348a6a2f231,2
+np.float64,0x7fe37d23c826fa47,0x5541165de17caa03,2
+np.float64,0xbfcc0e5f85381cc0,0xbfe34b44b0deefe9,2
+np.float64,0x3fe858f1c470b1e4,0x3fed36ab90557d89,2
+np.float64,0x800e857278fd0ae5,0xaaa3847d13220545,2
+np.float64,0x3febd31a66f7a635,0x3fee8af90e66b043,2
+np.float64,0x7fd3fde1b127fbc2,0x553b5b186a49b968,2
+np.float64,0x3fd3dabb8b27b577,0x3fe5a99b446bed26,2
+np.float64,0xffeb4500f1768a01,0xd5431cab828e254a,2
+np.float64,0xffccca8fc6399520,0xd53884f8b505e79e,2
+np.float64,0xffeee9406b7dd280,0xd543ed6d27a1a899,2
+np.float64,0xffecdde0f0f9bbc1,0xd5437a6258b14092,2
+np.float64,0xe6b54005cd6a8,0x2aa378c25938dfda,2
+np.float64,0x7fe610f1022c21e1,0x5541cf460b972925,2
+np.float64,0xbfe5a170ec6b42e2,0xbfec1576081e3232,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-cos.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-cos.csv
new file mode 100644
index 00000000..e315c28b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-cos.csv
@@ -0,0 +1,1375 @@
+dtype,input,output,ulperrortol
+## +ve denormals ##
+np.float32,0x004b4716,0x3f800000,2
+np.float32,0x007b2490,0x3f800000,2
+np.float32,0x007c99fa,0x3f800000,2
+np.float32,0x00734a0c,0x3f800000,2
+np.float32,0x0070de24,0x3f800000,2
+np.float32,0x007fffff,0x3f800000,2
+np.float32,0x00000001,0x3f800000,2
+## -ve denormals ##
+np.float32,0x80495d65,0x3f800000,2
+np.float32,0x806894f6,0x3f800000,2
+np.float32,0x80555a76,0x3f800000,2
+np.float32,0x804e1fb8,0x3f800000,2
+np.float32,0x80687de9,0x3f800000,2
+np.float32,0x807fffff,0x3f800000,2
+np.float32,0x80000001,0x3f800000,2
+## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ##
+np.float32,0x00000000,0x3f800000,2
+np.float32,0x80000000,0x3f800000,2
+np.float32,0x00800000,0x3f800000,2
+np.float32,0x80800000,0x3f800000,2
+## 1.00f + 0x00000001 ##
+np.float32,0x3f800000,0x3f0a5140,2
+np.float32,0x3f800001,0x3f0a513f,2
+np.float32,0x3f800002,0x3f0a513d,2
+np.float32,0xc090a8b0,0xbe4332ce,2
+np.float32,0x41ce3184,0x3f4d1de1,2
+np.float32,0xc1d85848,0xbeaa8980,2
+np.float32,0x402b8820,0xbf653aa3,2
+np.float32,0x42b4e454,0xbf4a338b,2
+np.float32,0x42a67a60,0x3c58202e,2
+np.float32,0x41d92388,0xbed987c7,2
+np.float32,0x422dd66c,0x3f5dcab3,2
+np.float32,0xc28f5be6,0xbf5688d8,2
+np.float32,0x41ab2674,0xbf53aa3b,2
+np.float32,0x3f490fdb,0x3f3504f3,2
+np.float32,0xbf490fdb,0x3f3504f3,2
+np.float32,0x3fc90fdb,0xb33bbd2e,2
+np.float32,0xbfc90fdb,0xb33bbd2e,2
+np.float32,0x40490fdb,0xbf800000,2
+np.float32,0xc0490fdb,0xbf800000,2
+np.float32,0x3fc90fdb,0xb33bbd2e,2
+np.float32,0xbfc90fdb,0xb33bbd2e,2
+np.float32,0x40490fdb,0xbf800000,2
+np.float32,0xc0490fdb,0xbf800000,2
+np.float32,0x40c90fdb,0x3f800000,2
+np.float32,0xc0c90fdb,0x3f800000,2
+np.float32,0x4016cbe4,0xbf3504f3,2
+np.float32,0xc016cbe4,0xbf3504f3,2
+np.float32,0x4096cbe4,0x324cde2e,2
+np.float32,0xc096cbe4,0x324cde2e,2
+np.float32,0x4116cbe4,0xbf800000,2
+np.float32,0xc116cbe4,0xbf800000,2
+np.float32,0x40490fdb,0xbf800000,2
+np.float32,0xc0490fdb,0xbf800000,2
+np.float32,0x40c90fdb,0x3f800000,2
+np.float32,0xc0c90fdb,0x3f800000,2
+np.float32,0x41490fdb,0x3f800000,2
+np.float32,0xc1490fdb,0x3f800000,2
+np.float32,0x407b53d2,0xbf3504f1,2
+np.float32,0xc07b53d2,0xbf3504f1,2
+np.float32,0x40fb53d2,0xb4b5563d,2
+np.float32,0xc0fb53d2,0xb4b5563d,2
+np.float32,0x417b53d2,0xbf800000,2
+np.float32,0xc17b53d2,0xbf800000,2
+np.float32,0x4096cbe4,0x324cde2e,2
+np.float32,0xc096cbe4,0x324cde2e,2
+np.float32,0x4116cbe4,0xbf800000,2
+np.float32,0xc116cbe4,0xbf800000,2
+np.float32,0x4196cbe4,0x3f800000,2
+np.float32,0xc196cbe4,0x3f800000,2
+np.float32,0x40afede0,0x3f3504f7,2
+np.float32,0xc0afede0,0x3f3504f7,2
+np.float32,0x412fede0,0x353222c4,2
+np.float32,0xc12fede0,0x353222c4,2
+np.float32,0x41afede0,0xbf800000,2
+np.float32,0xc1afede0,0xbf800000,2
+np.float32,0x40c90fdb,0x3f800000,2
+np.float32,0xc0c90fdb,0x3f800000,2
+np.float32,0x41490fdb,0x3f800000,2
+np.float32,0xc1490fdb,0x3f800000,2
+np.float32,0x41c90fdb,0x3f800000,2
+np.float32,0xc1c90fdb,0x3f800000,2
+np.float32,0x40e231d6,0x3f3504f3,2
+np.float32,0xc0e231d6,0x3f3504f3,2
+np.float32,0x416231d6,0xb319a6a2,2
+np.float32,0xc16231d6,0xb319a6a2,2
+np.float32,0x41e231d6,0xbf800000,2
+np.float32,0xc1e231d6,0xbf800000,2
+np.float32,0x40fb53d2,0xb4b5563d,2
+np.float32,0xc0fb53d2,0xb4b5563d,2
+np.float32,0x417b53d2,0xbf800000,2
+np.float32,0xc17b53d2,0xbf800000,2
+np.float32,0x41fb53d2,0x3f800000,2
+np.float32,0xc1fb53d2,0x3f800000,2
+np.float32,0x410a3ae7,0xbf3504fb,2
+np.float32,0xc10a3ae7,0xbf3504fb,2
+np.float32,0x418a3ae7,0x35b08908,2
+np.float32,0xc18a3ae7,0x35b08908,2
+np.float32,0x420a3ae7,0xbf800000,2
+np.float32,0xc20a3ae7,0xbf800000,2
+np.float32,0x4116cbe4,0xbf800000,2
+np.float32,0xc116cbe4,0xbf800000,2
+np.float32,0x4196cbe4,0x3f800000,2
+np.float32,0xc196cbe4,0x3f800000,2
+np.float32,0x4216cbe4,0x3f800000,2
+np.float32,0xc216cbe4,0x3f800000,2
+np.float32,0x41235ce2,0xbf3504ef,2
+np.float32,0xc1235ce2,0xbf3504ef,2
+np.float32,0x41a35ce2,0xb53889b6,2
+np.float32,0xc1a35ce2,0xb53889b6,2
+np.float32,0x42235ce2,0xbf800000,2
+np.float32,0xc2235ce2,0xbf800000,2
+np.float32,0x412fede0,0x353222c4,2
+np.float32,0xc12fede0,0x353222c4,2
+np.float32,0x41afede0,0xbf800000,2
+np.float32,0xc1afede0,0xbf800000,2
+np.float32,0x422fede0,0x3f800000,2
+np.float32,0xc22fede0,0x3f800000,2
+np.float32,0x413c7edd,0x3f3504f4,2
+np.float32,0xc13c7edd,0x3f3504f4,2
+np.float32,0x41bc7edd,0x33800add,2
+np.float32,0xc1bc7edd,0x33800add,2
+np.float32,0x423c7edd,0xbf800000,2
+np.float32,0xc23c7edd,0xbf800000,2
+np.float32,0x41490fdb,0x3f800000,2
+np.float32,0xc1490fdb,0x3f800000,2
+np.float32,0x41c90fdb,0x3f800000,2
+np.float32,0xc1c90fdb,0x3f800000,2
+np.float32,0x42490fdb,0x3f800000,2
+np.float32,0xc2490fdb,0x3f800000,2
+np.float32,0x4155a0d9,0x3f3504eb,2
+np.float32,0xc155a0d9,0x3f3504eb,2
+np.float32,0x41d5a0d9,0xb5b3bc81,2
+np.float32,0xc1d5a0d9,0xb5b3bc81,2
+np.float32,0x4255a0d9,0xbf800000,2
+np.float32,0xc255a0d9,0xbf800000,2
+np.float32,0x416231d6,0xb319a6a2,2
+np.float32,0xc16231d6,0xb319a6a2,2
+np.float32,0x41e231d6,0xbf800000,2
+np.float32,0xc1e231d6,0xbf800000,2
+np.float32,0x426231d6,0x3f800000,2
+np.float32,0xc26231d6,0x3f800000,2
+np.float32,0x416ec2d4,0xbf3504f7,2
+np.float32,0xc16ec2d4,0xbf3504f7,2
+np.float32,0x41eec2d4,0x353ef0a7,2
+np.float32,0xc1eec2d4,0x353ef0a7,2
+np.float32,0x426ec2d4,0xbf800000,2
+np.float32,0xc26ec2d4,0xbf800000,2
+np.float32,0x417b53d2,0xbf800000,2
+np.float32,0xc17b53d2,0xbf800000,2
+np.float32,0x41fb53d2,0x3f800000,2
+np.float32,0xc1fb53d2,0x3f800000,2
+np.float32,0x427b53d2,0x3f800000,2
+np.float32,0xc27b53d2,0x3f800000,2
+np.float32,0x4183f268,0xbf3504e7,2
+np.float32,0xc183f268,0xbf3504e7,2
+np.float32,0x4203f268,0xb6059a13,2
+np.float32,0xc203f268,0xb6059a13,2
+np.float32,0x4283f268,0xbf800000,2
+np.float32,0xc283f268,0xbf800000,2
+np.float32,0x418a3ae7,0x35b08908,2
+np.float32,0xc18a3ae7,0x35b08908,2
+np.float32,0x420a3ae7,0xbf800000,2
+np.float32,0xc20a3ae7,0xbf800000,2
+np.float32,0x428a3ae7,0x3f800000,2
+np.float32,0xc28a3ae7,0x3f800000,2
+np.float32,0x41908365,0x3f3504f0,2
+np.float32,0xc1908365,0x3f3504f0,2
+np.float32,0x42108365,0xb512200d,2
+np.float32,0xc2108365,0xb512200d,2
+np.float32,0x42908365,0xbf800000,2
+np.float32,0xc2908365,0xbf800000,2
+np.float32,0x4196cbe4,0x3f800000,2
+np.float32,0xc196cbe4,0x3f800000,2
+np.float32,0x4216cbe4,0x3f800000,2
+np.float32,0xc216cbe4,0x3f800000,2
+np.float32,0x4296cbe4,0x3f800000,2
+np.float32,0xc296cbe4,0x3f800000,2
+np.float32,0x419d1463,0x3f3504ef,2
+np.float32,0xc19d1463,0x3f3504ef,2
+np.float32,0x421d1463,0xb5455799,2
+np.float32,0xc21d1463,0xb5455799,2
+np.float32,0x429d1463,0xbf800000,2
+np.float32,0xc29d1463,0xbf800000,2
+np.float32,0x41a35ce2,0xb53889b6,2
+np.float32,0xc1a35ce2,0xb53889b6,2
+np.float32,0x42235ce2,0xbf800000,2
+np.float32,0xc2235ce2,0xbf800000,2
+np.float32,0x42a35ce2,0x3f800000,2
+np.float32,0xc2a35ce2,0x3f800000,2
+np.float32,0x41a9a561,0xbf3504ff,2
+np.float32,0xc1a9a561,0xbf3504ff,2
+np.float32,0x4229a561,0x360733d0,2
+np.float32,0xc229a561,0x360733d0,2
+np.float32,0x42a9a561,0xbf800000,2
+np.float32,0xc2a9a561,0xbf800000,2
+np.float32,0x41afede0,0xbf800000,2
+np.float32,0xc1afede0,0xbf800000,2
+np.float32,0x422fede0,0x3f800000,2
+np.float32,0xc22fede0,0x3f800000,2
+np.float32,0x42afede0,0x3f800000,2
+np.float32,0xc2afede0,0x3f800000,2
+np.float32,0x41b6365e,0xbf3504f6,2
+np.float32,0xc1b6365e,0xbf3504f6,2
+np.float32,0x4236365e,0x350bb91c,2
+np.float32,0xc236365e,0x350bb91c,2
+np.float32,0x42b6365e,0xbf800000,2
+np.float32,0xc2b6365e,0xbf800000,2
+np.float32,0x41bc7edd,0x33800add,2
+np.float32,0xc1bc7edd,0x33800add,2
+np.float32,0x423c7edd,0xbf800000,2
+np.float32,0xc23c7edd,0xbf800000,2
+np.float32,0x42bc7edd,0x3f800000,2
+np.float32,0xc2bc7edd,0x3f800000,2
+np.float32,0x41c2c75c,0x3f3504f8,2
+np.float32,0xc1c2c75c,0x3f3504f8,2
+np.float32,0x4242c75c,0x354bbe8a,2
+np.float32,0xc242c75c,0x354bbe8a,2
+np.float32,0x42c2c75c,0xbf800000,2
+np.float32,0xc2c2c75c,0xbf800000,2
+np.float32,0x41c90fdb,0x3f800000,2
+np.float32,0xc1c90fdb,0x3f800000,2
+np.float32,0x42490fdb,0x3f800000,2
+np.float32,0xc2490fdb,0x3f800000,2
+np.float32,0x42c90fdb,0x3f800000,2
+np.float32,0xc2c90fdb,0x3f800000,2
+np.float32,0x41cf585a,0x3f3504e7,2
+np.float32,0xc1cf585a,0x3f3504e7,2
+np.float32,0x424f585a,0xb608cd8c,2
+np.float32,0xc24f585a,0xb608cd8c,2
+np.float32,0x42cf585a,0xbf800000,2
+np.float32,0xc2cf585a,0xbf800000,2
+np.float32,0x41d5a0d9,0xb5b3bc81,2
+np.float32,0xc1d5a0d9,0xb5b3bc81,2
+np.float32,0x4255a0d9,0xbf800000,2
+np.float32,0xc255a0d9,0xbf800000,2
+np.float32,0x42d5a0d9,0x3f800000,2
+np.float32,0xc2d5a0d9,0x3f800000,2
+np.float32,0x41dbe958,0xbf350507,2
+np.float32,0xc1dbe958,0xbf350507,2
+np.float32,0x425be958,0x365eab75,2
+np.float32,0xc25be958,0x365eab75,2
+np.float32,0x42dbe958,0xbf800000,2
+np.float32,0xc2dbe958,0xbf800000,2
+np.float32,0x41e231d6,0xbf800000,2
+np.float32,0xc1e231d6,0xbf800000,2
+np.float32,0x426231d6,0x3f800000,2
+np.float32,0xc26231d6,0x3f800000,2
+np.float32,0x42e231d6,0x3f800000,2
+np.float32,0xc2e231d6,0x3f800000,2
+np.float32,0x41e87a55,0xbf3504ef,2
+np.float32,0xc1e87a55,0xbf3504ef,2
+np.float32,0x42687a55,0xb552257b,2
+np.float32,0xc2687a55,0xb552257b,2
+np.float32,0x42e87a55,0xbf800000,2
+np.float32,0xc2e87a55,0xbf800000,2
+np.float32,0x41eec2d4,0x353ef0a7,2
+np.float32,0xc1eec2d4,0x353ef0a7,2
+np.float32,0x426ec2d4,0xbf800000,2
+np.float32,0xc26ec2d4,0xbf800000,2
+np.float32,0x42eec2d4,0x3f800000,2
+np.float32,0xc2eec2d4,0x3f800000,2
+np.float32,0x41f50b53,0x3f3504ff,2
+np.float32,0xc1f50b53,0x3f3504ff,2
+np.float32,0x42750b53,0x360a6748,2
+np.float32,0xc2750b53,0x360a6748,2
+np.float32,0x42f50b53,0xbf800000,2
+np.float32,0xc2f50b53,0xbf800000,2
+np.float32,0x41fb53d2,0x3f800000,2
+np.float32,0xc1fb53d2,0x3f800000,2
+np.float32,0x427b53d2,0x3f800000,2
+np.float32,0xc27b53d2,0x3f800000,2
+np.float32,0x42fb53d2,0x3f800000,2
+np.float32,0xc2fb53d2,0x3f800000,2
+np.float32,0x4200ce28,0x3f3504f6,2
+np.float32,0xc200ce28,0x3f3504f6,2
+np.float32,0x4280ce28,0x34fdd672,2
+np.float32,0xc280ce28,0x34fdd672,2
+np.float32,0x4300ce28,0xbf800000,2
+np.float32,0xc300ce28,0xbf800000,2
+np.float32,0x4203f268,0xb6059a13,2
+np.float32,0xc203f268,0xb6059a13,2
+np.float32,0x4283f268,0xbf800000,2
+np.float32,0xc283f268,0xbf800000,2
+np.float32,0x4303f268,0x3f800000,2
+np.float32,0xc303f268,0x3f800000,2
+np.float32,0x420716a7,0xbf3504f8,2
+np.float32,0xc20716a7,0xbf3504f8,2
+np.float32,0x428716a7,0x35588c6d,2
+np.float32,0xc28716a7,0x35588c6d,2
+np.float32,0x430716a7,0xbf800000,2
+np.float32,0xc30716a7,0xbf800000,2
+np.float32,0x420a3ae7,0xbf800000,2
+np.float32,0xc20a3ae7,0xbf800000,2
+np.float32,0x428a3ae7,0x3f800000,2
+np.float32,0xc28a3ae7,0x3f800000,2
+np.float32,0x430a3ae7,0x3f800000,2
+np.float32,0xc30a3ae7,0x3f800000,2
+np.float32,0x420d5f26,0xbf3504e7,2
+np.float32,0xc20d5f26,0xbf3504e7,2
+np.float32,0x428d5f26,0xb60c0105,2
+np.float32,0xc28d5f26,0xb60c0105,2
+np.float32,0x430d5f26,0xbf800000,2
+np.float32,0xc30d5f26,0xbf800000,2
+np.float32,0x42108365,0xb512200d,2
+np.float32,0xc2108365,0xb512200d,2
+np.float32,0x42908365,0xbf800000,2
+np.float32,0xc2908365,0xbf800000,2
+np.float32,0x43108365,0x3f800000,2
+np.float32,0xc3108365,0x3f800000,2
+np.float32,0x4213a7a5,0x3f350507,2
+np.float32,0xc213a7a5,0x3f350507,2
+np.float32,0x4293a7a5,0x3661deee,2
+np.float32,0xc293a7a5,0x3661deee,2
+np.float32,0x4313a7a5,0xbf800000,2
+np.float32,0xc313a7a5,0xbf800000,2
+np.float32,0x4216cbe4,0x3f800000,2
+np.float32,0xc216cbe4,0x3f800000,2
+np.float32,0x4296cbe4,0x3f800000,2
+np.float32,0xc296cbe4,0x3f800000,2
+np.float32,0x4316cbe4,0x3f800000,2
+np.float32,0xc316cbe4,0x3f800000,2
+np.float32,0x4219f024,0x3f3504d8,2
+np.float32,0xc219f024,0x3f3504d8,2
+np.float32,0x4299f024,0xb69bde6c,2
+np.float32,0xc299f024,0xb69bde6c,2
+np.float32,0x4319f024,0xbf800000,2
+np.float32,0xc319f024,0xbf800000,2
+np.float32,0x421d1463,0xb5455799,2
+np.float32,0xc21d1463,0xb5455799,2
+np.float32,0x429d1463,0xbf800000,2
+np.float32,0xc29d1463,0xbf800000,2
+np.float32,0x431d1463,0x3f800000,2
+np.float32,0xc31d1463,0x3f800000,2
+np.float32,0x422038a3,0xbf350516,2
+np.float32,0xc22038a3,0xbf350516,2
+np.float32,0x42a038a3,0x36c6cd61,2
+np.float32,0xc2a038a3,0x36c6cd61,2
+np.float32,0x432038a3,0xbf800000,2
+np.float32,0xc32038a3,0xbf800000,2
+np.float32,0x42235ce2,0xbf800000,2
+np.float32,0xc2235ce2,0xbf800000,2
+np.float32,0x42a35ce2,0x3f800000,2
+np.float32,0xc2a35ce2,0x3f800000,2
+np.float32,0x43235ce2,0x3f800000,2
+np.float32,0xc3235ce2,0x3f800000,2
+np.float32,0x42268121,0xbf3504f6,2
+np.float32,0xc2268121,0xbf3504f6,2
+np.float32,0x42a68121,0x34e43aac,2
+np.float32,0xc2a68121,0x34e43aac,2
+np.float32,0x43268121,0xbf800000,2
+np.float32,0xc3268121,0xbf800000,2
+np.float32,0x4229a561,0x360733d0,2
+np.float32,0xc229a561,0x360733d0,2
+np.float32,0x42a9a561,0xbf800000,2
+np.float32,0xc2a9a561,0xbf800000,2
+np.float32,0x4329a561,0x3f800000,2
+np.float32,0xc329a561,0x3f800000,2
+np.float32,0x422cc9a0,0x3f3504f8,2
+np.float32,0xc22cc9a0,0x3f3504f8,2
+np.float32,0x42acc9a0,0x35655a50,2
+np.float32,0xc2acc9a0,0x35655a50,2
+np.float32,0x432cc9a0,0xbf800000,2
+np.float32,0xc32cc9a0,0xbf800000,2
+np.float32,0x422fede0,0x3f800000,2
+np.float32,0xc22fede0,0x3f800000,2
+np.float32,0x42afede0,0x3f800000,2
+np.float32,0xc2afede0,0x3f800000,2
+np.float32,0x432fede0,0x3f800000,2
+np.float32,0xc32fede0,0x3f800000,2
+np.float32,0x4233121f,0x3f3504e7,2
+np.float32,0xc233121f,0x3f3504e7,2
+np.float32,0x42b3121f,0xb60f347d,2
+np.float32,0xc2b3121f,0xb60f347d,2
+np.float32,0x4333121f,0xbf800000,2
+np.float32,0xc333121f,0xbf800000,2
+np.float32,0x4236365e,0x350bb91c,2
+np.float32,0xc236365e,0x350bb91c,2
+np.float32,0x42b6365e,0xbf800000,2
+np.float32,0xc2b6365e,0xbf800000,2
+np.float32,0x4336365e,0x3f800000,2
+np.float32,0xc336365e,0x3f800000,2
+np.float32,0x42395a9e,0xbf350507,2
+np.float32,0xc2395a9e,0xbf350507,2
+np.float32,0x42b95a9e,0x36651267,2
+np.float32,0xc2b95a9e,0x36651267,2
+np.float32,0x43395a9e,0xbf800000,2
+np.float32,0xc3395a9e,0xbf800000,2
+np.float32,0x423c7edd,0xbf800000,2
+np.float32,0xc23c7edd,0xbf800000,2
+np.float32,0x42bc7edd,0x3f800000,2
+np.float32,0xc2bc7edd,0x3f800000,2
+np.float32,0x433c7edd,0x3f800000,2
+np.float32,0xc33c7edd,0x3f800000,2
+np.float32,0x423fa31d,0xbf3504d7,2
+np.float32,0xc23fa31d,0xbf3504d7,2
+np.float32,0x42bfa31d,0xb69d7828,2
+np.float32,0xc2bfa31d,0xb69d7828,2
+np.float32,0x433fa31d,0xbf800000,2
+np.float32,0xc33fa31d,0xbf800000,2
+np.float32,0x4242c75c,0x354bbe8a,2
+np.float32,0xc242c75c,0x354bbe8a,2
+np.float32,0x42c2c75c,0xbf800000,2
+np.float32,0xc2c2c75c,0xbf800000,2
+np.float32,0x4342c75c,0x3f800000,2
+np.float32,0xc342c75c,0x3f800000,2
+np.float32,0x4245eb9c,0x3f350517,2
+np.float32,0xc245eb9c,0x3f350517,2
+np.float32,0x42c5eb9c,0x36c8671d,2
+np.float32,0xc2c5eb9c,0x36c8671d,2
+np.float32,0x4345eb9c,0xbf800000,2
+np.float32,0xc345eb9c,0xbf800000,2
+np.float32,0x42490fdb,0x3f800000,2
+np.float32,0xc2490fdb,0x3f800000,2
+np.float32,0x42c90fdb,0x3f800000,2
+np.float32,0xc2c90fdb,0x3f800000,2
+np.float32,0x43490fdb,0x3f800000,2
+np.float32,0xc3490fdb,0x3f800000,2
+np.float32,0x424c341a,0x3f3504f5,2
+np.float32,0xc24c341a,0x3f3504f5,2
+np.float32,0x42cc341a,0x34ca9ee6,2
+np.float32,0xc2cc341a,0x34ca9ee6,2
+np.float32,0x434c341a,0xbf800000,2
+np.float32,0xc34c341a,0xbf800000,2
+np.float32,0x424f585a,0xb608cd8c,2
+np.float32,0xc24f585a,0xb608cd8c,2
+np.float32,0x42cf585a,0xbf800000,2
+np.float32,0xc2cf585a,0xbf800000,2
+np.float32,0x434f585a,0x3f800000,2
+np.float32,0xc34f585a,0x3f800000,2
+np.float32,0x42527c99,0xbf3504f9,2
+np.float32,0xc2527c99,0xbf3504f9,2
+np.float32,0x42d27c99,0x35722833,2
+np.float32,0xc2d27c99,0x35722833,2
+np.float32,0x43527c99,0xbf800000,2
+np.float32,0xc3527c99,0xbf800000,2
+np.float32,0x4255a0d9,0xbf800000,2
+np.float32,0xc255a0d9,0xbf800000,2
+np.float32,0x42d5a0d9,0x3f800000,2
+np.float32,0xc2d5a0d9,0x3f800000,2
+np.float32,0x4355a0d9,0x3f800000,2
+np.float32,0xc355a0d9,0x3f800000,2
+np.float32,0x4258c518,0xbf3504e6,2
+np.float32,0xc258c518,0xbf3504e6,2
+np.float32,0x42d8c518,0xb61267f6,2
+np.float32,0xc2d8c518,0xb61267f6,2
+np.float32,0x4358c518,0xbf800000,2
+np.float32,0xc358c518,0xbf800000,2
+np.float32,0x425be958,0x365eab75,2
+np.float32,0xc25be958,0x365eab75,2
+np.float32,0x42dbe958,0xbf800000,2
+np.float32,0xc2dbe958,0xbf800000,2
+np.float32,0x435be958,0x3f800000,2
+np.float32,0xc35be958,0x3f800000,2
+np.float32,0x425f0d97,0x3f350508,2
+np.float32,0xc25f0d97,0x3f350508,2
+np.float32,0x42df0d97,0x366845e0,2
+np.float32,0xc2df0d97,0x366845e0,2
+np.float32,0x435f0d97,0xbf800000,2
+np.float32,0xc35f0d97,0xbf800000,2
+np.float32,0x426231d6,0x3f800000,2
+np.float32,0xc26231d6,0x3f800000,2
+np.float32,0x42e231d6,0x3f800000,2
+np.float32,0xc2e231d6,0x3f800000,2
+np.float32,0x436231d6,0x3f800000,2
+np.float32,0xc36231d6,0x3f800000,2
+np.float32,0x42655616,0x3f3504d7,2
+np.float32,0xc2655616,0x3f3504d7,2
+np.float32,0x42e55616,0xb69f11e5,2
+np.float32,0xc2e55616,0xb69f11e5,2
+np.float32,0x43655616,0xbf800000,2
+np.float32,0xc3655616,0xbf800000,2
+np.float32,0x42687a55,0xb552257b,2
+np.float32,0xc2687a55,0xb552257b,2
+np.float32,0x42e87a55,0xbf800000,2
+np.float32,0xc2e87a55,0xbf800000,2
+np.float32,0x43687a55,0x3f800000,2
+np.float32,0xc3687a55,0x3f800000,2
+np.float32,0x426b9e95,0xbf350517,2
+np.float32,0xc26b9e95,0xbf350517,2
+np.float32,0x42eb9e95,0x36ca00d9,2
+np.float32,0xc2eb9e95,0x36ca00d9,2
+np.float32,0x436b9e95,0xbf800000,2
+np.float32,0xc36b9e95,0xbf800000,2
+np.float32,0x426ec2d4,0xbf800000,2
+np.float32,0xc26ec2d4,0xbf800000,2
+np.float32,0x42eec2d4,0x3f800000,2
+np.float32,0xc2eec2d4,0x3f800000,2
+np.float32,0x436ec2d4,0x3f800000,2
+np.float32,0xc36ec2d4,0x3f800000,2
+np.float32,0x4271e713,0xbf3504f5,2
+np.float32,0xc271e713,0xbf3504f5,2
+np.float32,0x42f1e713,0x34b10321,2
+np.float32,0xc2f1e713,0x34b10321,2
+np.float32,0x4371e713,0xbf800000,2
+np.float32,0xc371e713,0xbf800000,2
+np.float32,0x42750b53,0x360a6748,2
+np.float32,0xc2750b53,0x360a6748,2
+np.float32,0x42f50b53,0xbf800000,2
+np.float32,0xc2f50b53,0xbf800000,2
+np.float32,0x43750b53,0x3f800000,2
+np.float32,0xc3750b53,0x3f800000,2
+np.float32,0x42782f92,0x3f3504f9,2
+np.float32,0xc2782f92,0x3f3504f9,2
+np.float32,0x42f82f92,0x357ef616,2
+np.float32,0xc2f82f92,0x357ef616,2
+np.float32,0x43782f92,0xbf800000,2
+np.float32,0xc3782f92,0xbf800000,2
+np.float32,0x427b53d2,0x3f800000,2
+np.float32,0xc27b53d2,0x3f800000,2
+np.float32,0x42fb53d2,0x3f800000,2
+np.float32,0xc2fb53d2,0x3f800000,2
+np.float32,0x437b53d2,0x3f800000,2
+np.float32,0xc37b53d2,0x3f800000,2
+np.float32,0x427e7811,0x3f3504e6,2
+np.float32,0xc27e7811,0x3f3504e6,2
+np.float32,0x42fe7811,0xb6159b6f,2
+np.float32,0xc2fe7811,0xb6159b6f,2
+np.float32,0x437e7811,0xbf800000,2
+np.float32,0xc37e7811,0xbf800000,2
+np.float32,0x4280ce28,0x34fdd672,2
+np.float32,0xc280ce28,0x34fdd672,2
+np.float32,0x4300ce28,0xbf800000,2
+np.float32,0xc300ce28,0xbf800000,2
+np.float32,0x4380ce28,0x3f800000,2
+np.float32,0xc380ce28,0x3f800000,2
+np.float32,0x42826048,0xbf350508,2
+np.float32,0xc2826048,0xbf350508,2
+np.float32,0x43026048,0x366b7958,2
+np.float32,0xc3026048,0x366b7958,2
+np.float32,0x43826048,0xbf800000,2
+np.float32,0xc3826048,0xbf800000,2
+np.float32,0x4283f268,0xbf800000,2
+np.float32,0xc283f268,0xbf800000,2
+np.float32,0x4303f268,0x3f800000,2
+np.float32,0xc303f268,0x3f800000,2
+np.float32,0x4383f268,0x3f800000,2
+np.float32,0xc383f268,0x3f800000,2
+np.float32,0x42858487,0xbf350504,2
+np.float32,0xc2858487,0xbf350504,2
+np.float32,0x43058487,0x363ea8be,2
+np.float32,0xc3058487,0x363ea8be,2
+np.float32,0x43858487,0xbf800000,2
+np.float32,0xc3858487,0xbf800000,2
+np.float32,0x428716a7,0x35588c6d,2
+np.float32,0xc28716a7,0x35588c6d,2
+np.float32,0x430716a7,0xbf800000,2
+np.float32,0xc30716a7,0xbf800000,2
+np.float32,0x438716a7,0x3f800000,2
+np.float32,0xc38716a7,0x3f800000,2
+np.float32,0x4288a8c7,0x3f350517,2
+np.float32,0xc288a8c7,0x3f350517,2
+np.float32,0x4308a8c7,0x36cb9a96,2
+np.float32,0xc308a8c7,0x36cb9a96,2
+np.float32,0x4388a8c7,0xbf800000,2
+np.float32,0xc388a8c7,0xbf800000,2
+np.float32,0x428a3ae7,0x3f800000,2
+np.float32,0xc28a3ae7,0x3f800000,2
+np.float32,0x430a3ae7,0x3f800000,2
+np.float32,0xc30a3ae7,0x3f800000,2
+np.float32,0x438a3ae7,0x3f800000,2
+np.float32,0xc38a3ae7,0x3f800000,2
+np.float32,0x428bcd06,0x3f3504f5,2
+np.float32,0xc28bcd06,0x3f3504f5,2
+np.float32,0x430bcd06,0x3497675b,2
+np.float32,0xc30bcd06,0x3497675b,2
+np.float32,0x438bcd06,0xbf800000,2
+np.float32,0xc38bcd06,0xbf800000,2
+np.float32,0x428d5f26,0xb60c0105,2
+np.float32,0xc28d5f26,0xb60c0105,2
+np.float32,0x430d5f26,0xbf800000,2
+np.float32,0xc30d5f26,0xbf800000,2
+np.float32,0x438d5f26,0x3f800000,2
+np.float32,0xc38d5f26,0x3f800000,2
+np.float32,0x428ef146,0xbf350526,2
+np.float32,0xc28ef146,0xbf350526,2
+np.float32,0x430ef146,0x3710bc40,2
+np.float32,0xc30ef146,0x3710bc40,2
+np.float32,0x438ef146,0xbf800000,2
+np.float32,0xc38ef146,0xbf800000,2
+np.float32,0x42908365,0xbf800000,2
+np.float32,0xc2908365,0xbf800000,2
+np.float32,0x43108365,0x3f800000,2
+np.float32,0xc3108365,0x3f800000,2
+np.float32,0x43908365,0x3f800000,2
+np.float32,0xc3908365,0x3f800000,2
+np.float32,0x42921585,0xbf3504e6,2
+np.float32,0xc2921585,0xbf3504e6,2
+np.float32,0x43121585,0xb618cee8,2
+np.float32,0xc3121585,0xb618cee8,2
+np.float32,0x43921585,0xbf800000,2
+np.float32,0xc3921585,0xbf800000,2
+np.float32,0x4293a7a5,0x3661deee,2
+np.float32,0xc293a7a5,0x3661deee,2
+np.float32,0x4313a7a5,0xbf800000,2
+np.float32,0xc313a7a5,0xbf800000,2
+np.float32,0x4393a7a5,0x3f800000,2
+np.float32,0xc393a7a5,0x3f800000,2
+np.float32,0x429539c5,0x3f350536,2
+np.float32,0xc29539c5,0x3f350536,2
+np.float32,0x431539c5,0x373bab34,2
+np.float32,0xc31539c5,0x373bab34,2
+np.float32,0x439539c5,0xbf800000,2
+np.float32,0xc39539c5,0xbf800000,2
+np.float32,0x4296cbe4,0x3f800000,2
+np.float32,0xc296cbe4,0x3f800000,2
+np.float32,0x4316cbe4,0x3f800000,2
+np.float32,0xc316cbe4,0x3f800000,2
+np.float32,0x4396cbe4,0x3f800000,2
+np.float32,0xc396cbe4,0x3f800000,2
+np.float32,0x42985e04,0x3f3504d7,2
+np.float32,0xc2985e04,0x3f3504d7,2
+np.float32,0x43185e04,0xb6a2455d,2
+np.float32,0xc3185e04,0xb6a2455d,2
+np.float32,0x43985e04,0xbf800000,2
+np.float32,0xc3985e04,0xbf800000,2
+np.float32,0x4299f024,0xb69bde6c,2
+np.float32,0xc299f024,0xb69bde6c,2
+np.float32,0x4319f024,0xbf800000,2
+np.float32,0xc319f024,0xbf800000,2
+np.float32,0x4399f024,0x3f800000,2
+np.float32,0xc399f024,0x3f800000,2
+np.float32,0x429b8243,0xbf3504ea,2
+np.float32,0xc29b8243,0xbf3504ea,2
+np.float32,0x431b8243,0xb5cb2eb8,2
+np.float32,0xc31b8243,0xb5cb2eb8,2
+np.float32,0x439b8243,0xbf800000,2
+np.float32,0xc39b8243,0xbf800000,2
+np.float32,0x435b2047,0x3f3504c1,2
+np.float32,0x42a038a2,0xb5e4ca7e,2
+np.float32,0x432038a2,0xbf800000,2
+np.float32,0x4345eb9b,0xbf800000,2
+np.float32,0x42c5eb9b,0xb5de638c,2
+np.float32,0x42eb9e94,0xb5d7fc9b,2
+np.float32,0x4350ea79,0x3631dadb,2
+np.float32,0x42dbe957,0xbf800000,2
+np.float32,0x425be957,0xb505522a,2
+np.float32,0x435be957,0x3f800000,2
+np.float32,0x46027eb2,0x3e7d94c9,2
+np.float32,0x4477baed,0xbe7f1824,2
+np.float32,0x454b8024,0x3e7f5268,2
+np.float32,0x455d2c09,0x3e7f40cb,2
+np.float32,0x4768d3de,0xba14b4af,2
+np.float32,0x46c1e7cd,0x3e7fb102,2
+np.float32,0x44a52949,0xbe7dc9d5,2
+np.float32,0x4454633a,0x3e7dbc7d,2
+np.float32,0x4689810b,0x3e7eb02b,2
+np.float32,0x473473cd,0xbe7eef6f,2
+np.float32,0x44a5193f,0x3e7e1b1f,2
+np.float32,0x46004b36,0x3e7dac59,2
+np.float32,0x467f604b,0x3d7ffd3a,2
+np.float32,0x45ea1805,0x3dffd2e0,2
+np.float32,0x457b6af3,0x3dff7831,2
+np.float32,0x44996159,0xbe7d85f4,2
+np.float32,0x47883553,0xbb80584e,2
+np.float32,0x44e19f0c,0xbdffcfe6,2
+np.float32,0x472b3bf6,0xbe7f7a82,2
+np.float32,0x4600bb4e,0x3a135e33,2
+np.float32,0x449f4556,0x3e7e42e5,2
+np.float32,0x474e9420,0x3dff77b2,2
+np.float32,0x45cbdb23,0x3dff7240,2
+np.float32,0x44222747,0x3dffb039,2
+np.float32,0x4772e419,0xbdff74b8,2
+np.float64,0x1,0x3ff0000000000000,4
+np.float64,0x8000000000000001,0x3ff0000000000000,4
+np.float64,0x10000000000000,0x3ff0000000000000,4
+np.float64,0x8010000000000000,0x3ff0000000000000,4
+np.float64,0x7fefffffffffffff,0xbfefffe62ecfab75,4
+np.float64,0xffefffffffffffff,0xbfefffe62ecfab75,4
+np.float64,0x7ff0000000000000,0xfff8000000000000,4
+np.float64,0xfff0000000000000,0xfff8000000000000,4
+np.float64,0x7ff8000000000000,0x7ff8000000000000,4
+np.float64,0x7ff4000000000000,0x7ffc000000000000,4
+np.float64,0xbfc28bd9dd2517b4,0x3fefaa28ba13a702,4
+np.float64,0x3fb673c62e2ce790,0x3fefe083847a717f,4
+np.float64,0xbfe3e1dac7e7c3b6,0x3fea0500ba099f3a,4
+np.float64,0xbfbe462caa3c8c58,0x3fefc6c8b9c1c87c,4
+np.float64,0xbfb9353576326a68,0x3fefd8513e50e6b1,4
+np.float64,0xbfc05e798520bcf4,0x3fefbd1ad81cf089,4
+np.float64,0xbfe3ca3be2e79478,0x3fea12b995ea6574,4
+np.float64,0xbfde875d46bd0eba,0x3fec6d888662a824,4
+np.float64,0x3fafc4e02c3f89c0,0x3feff03c34bffd69,4
+np.float64,0xbf98855848310ac0,0x3feffda6c1588bdb,4
+np.float64,0x3fe66c51186cd8a2,0x3fe875c61c630ecb,4
+np.float64,0xbfedff1c3b7bfe38,0x3fe2f0c8c9e8fa39,4
+np.float64,0x3fd6082267ac1044,0x3fee1f6023695050,4
+np.float64,0xbfe78449b06f0894,0x3fe7bda2b223850e,4
+np.float64,0x3feedb8e63fdb71c,0x3fe23d5dfd2dd33f,4
+np.float64,0xbfc0a9de3d2153bc,0x3fefbaadf5e5285e,4
+np.float64,0x3fc04c67432098d0,0x3fefbdae07b7de8d,4
+np.float64,0xbfeeef84c4fddf0a,0x3fe22cf37f309d88,4
+np.float64,0x3fc04bb025209760,0x3fefbdb3d7d34ecf,4
+np.float64,0x3fd6b84d48ad709c,0x3fee013403da6e2a,4
+np.float64,0x3fec1ae25d7835c4,0x3fe46e62195cf274,4
+np.float64,0xbfdc6fdf9bb8dfc0,0x3fece48dc78bbb2e,4
+np.float64,0x3fb4db2c9229b660,0x3fefe4d42f79bf49,4
+np.float64,0xbfc0ed698521dad4,0x3fefb8785ea658c9,4
+np.float64,0xbfee82772b7d04ee,0x3fe2864a80efe8e9,4
+np.float64,0x3fd575b664aaeb6c,0x3fee37c669a12879,4
+np.float64,0x3fe4afb1c5e95f64,0x3fe98b177194439c,4
+np.float64,0x3fd93962f9b272c4,0x3fed8bef61876294,4
+np.float64,0x3fd97ae025b2f5c0,0x3fed7f4cfbf4d300,4
+np.float64,0xbfd9afdb1bb35fb6,0x3fed74fdc44dabb1,4
+np.float64,0x3f8ae65e3035cc80,0x3fefff4b1a0ea62b,4
+np.float64,0xbfe7e58664efcb0d,0x3fe77c02a1cbb670,4
+np.float64,0x3fe5f68b37ebed16,0x3fe8c10f849a5d4d,4
+np.float64,0x3fd9137d61b226fc,0x3fed9330eb4815a1,4
+np.float64,0x3fc146d019228da0,0x3fefb57e2d4d52f8,4
+np.float64,0xbfda6036edb4c06e,0x3fed521b2b578679,4
+np.float64,0xbfe78ddfb0ef1bc0,0x3fe7b734319a77e4,4
+np.float64,0x3fe0877823610ef0,0x3febd33a993dd786,4
+np.float64,0x3fbc61af2e38c360,0x3fefcdb4f889756d,4
+np.float64,0x3fd4dcdca4a9b9b8,0x3fee50962ffea5ae,4
+np.float64,0xbfe03cb29f607965,0x3febf7dbf640a75a,4
+np.float64,0xbfc81de407303bc8,0x3fef6f066cef64bc,4
+np.float64,0x3fd8dea42db1bd48,0x3fed9d3e00dbe0b3,4
+np.float64,0x3feac75e94f58ebe,0x3fe56f1f47f97896,4
+np.float64,0x3fb3a1ea6e2743d0,0x3fefe7ec1247cdaa,4
+np.float64,0x3fd695c0f4ad2b80,0x3fee0730bd40883d,4
+np.float64,0xbfd2c631f5a58c64,0x3feea20cbd1105d7,4
+np.float64,0xbfe978a8e1f2f152,0x3fe663014d40ad7a,4
+np.float64,0x3fd8b6b76ab16d70,0x3feda4c879aacc19,4
+np.float64,0x3feaafd30e755fa6,0x3fe5809514c28453,4
+np.float64,0x3fe1e37dc263c6fc,0x3feb20f9ad1f3f5c,4
+np.float64,0x3fd0ec7c24a1d8f8,0x3feee34048f43b75,4
+np.float64,0xbfe3881cbf67103a,0x3fea38d7886e6f53,4
+np.float64,0xbfd7023957ae0472,0x3fedf4471c765a1c,4
+np.float64,0xbfebc51c4ef78a38,0x3fe4b01c424e297b,4
+np.float64,0xbfe20a93eae41528,0x3feb0c2aa321d2e0,4
+np.float64,0x3fef39be867e737e,0x3fe1efaba9164d27,4
+np.float64,0x3fe8ea9576f1d52a,0x3fe6c7a8826ce1be,4
+np.float64,0x3fea921d91f5243c,0x3fe5968c6cf78963,4
+np.float64,0x3fd7ee5d31afdcbc,0x3fedc9f19d43fe61,4
+np.float64,0xbfe3ed581767dab0,0x3fe9fe4ee2f2b1cd,4
+np.float64,0xbfc40923d5281248,0x3fef9bd8ee9f6e68,4
+np.float64,0x3fe411a834682350,0x3fe9e9103854f057,4
+np.float64,0xbfedf6ccdf7bed9a,0x3fe2f77ad6543246,4
+np.float64,0xbfe8788a44f0f114,0x3fe7172f3aa0c742,4
+np.float64,0xbfce728f173ce520,0x3fef1954083bea04,4
+np.float64,0xbfd64dd0acac9ba2,0x3fee138c3293c246,4
+np.float64,0xbfe00669f5600cd4,0x3fec121443945350,4
+np.float64,0xbfe7152ba2ee2a58,0x3fe8079465d09846,4
+np.float64,0x3fe8654d8f70ca9c,0x3fe7247c94f09596,4
+np.float64,0x3fea68045cf4d008,0x3fe5b58cfe81a243,4
+np.float64,0xbfcd4779073a8ef4,0x3fef2a9d78153fa5,4
+np.float64,0xbfdb4456e5b688ae,0x3fed23b11614203f,4
+np.float64,0x3fcb5d59cd36bab0,0x3fef45818216a515,4
+np.float64,0xbfd914ff5ab229fe,0x3fed92e73746fea8,4
+np.float64,0x3fe4d211db69a424,0x3fe97653f433d15f,4
+np.float64,0xbfdbbb9224b77724,0x3fed0adb593dde80,4
+np.float64,0x3fd424ceafa8499c,0x3fee6d9124795d33,4
+np.float64,0x3feb5968f976b2d2,0x3fe501d116efbf54,4
+np.float64,0x3fee7d92a2fcfb26,0x3fe28a479b6a9dcf,4
+np.float64,0x3fc308e9972611d0,0x3fefa595f4df0c89,4
+np.float64,0x3fda79cd77b4f39c,0x3fed4cf8e69ba1f8,4
+np.float64,0x3fcbcf42d5379e88,0x3fef3f6a6a77c187,4
+np.float64,0x3fe13a1da662743c,0x3feb79504faea888,4
+np.float64,0xbfee4435f07c886c,0x3fe2b8ea98d2fc29,4
+np.float64,0x3fd65d68ccacbad0,0x3fee10e1ac7ada89,4
+np.float64,0x3fef2f89bb7e5f14,0x3fe1f81e882cc3f4,4
+np.float64,0xbfef0a7769fe14ef,0x3fe216bf384fc646,4
+np.float64,0x3fc065277320ca50,0x3fefbce44835c193,4
+np.float64,0x3fe9c1a74d73834e,0x3fe62e9ee0c2f2bf,4
+np.float64,0x3fd9d96e5db3b2dc,0x3fed6cd88eb51f6a,4
+np.float64,0x3fe02bf1c56057e4,0x3febfffc24b5a7ba,4
+np.float64,0xbfd6814350ad0286,0x3fee0ab9ad318b84,4
+np.float64,0x3f9fcbec583f97c0,0x3feffc0d0f1d8e75,4
+np.float64,0x3fe23524e5e46a4a,0x3feaf55372949a06,4
+np.float64,0xbfbdc95f6a3b92c0,0x3fefc89c21d44995,4
+np.float64,0x3fe961bb9cf2c378,0x3fe6735d6e1cca58,4
+np.float64,0xbfe8f1c370f1e387,0x3fe6c29d1be8bee9,4
+np.float64,0x3fd880d43ab101a8,0x3fedaee3c7ccfc96,4
+np.float64,0xbfedb37005fb66e0,0x3fe32d91ef2e3bd3,4
+np.float64,0xfdce287bfb9c5,0x3ff0000000000000,4
+np.float64,0x9aa1b9e735437,0x3ff0000000000000,4
+np.float64,0x6beac6e0d7d59,0x3ff0000000000000,4
+np.float64,0x47457aae8e8b0,0x3ff0000000000000,4
+np.float64,0x35ff13b46bfe3,0x3ff0000000000000,4
+np.float64,0xb9c0c82b73819,0x3ff0000000000000,4
+np.float64,0x1a8dc21a351b9,0x3ff0000000000000,4
+np.float64,0x7e87ef6afd0ff,0x3ff0000000000000,4
+np.float64,0x620a6588c414d,0x3ff0000000000000,4
+np.float64,0x7f366000fe6e,0x3ff0000000000000,4
+np.float64,0x787e39f4f0fc8,0x3ff0000000000000,4
+np.float64,0xf5134f1fea26a,0x3ff0000000000000,4
+np.float64,0xbce700ef79ce0,0x3ff0000000000000,4
+np.float64,0x144d7cc8289b1,0x3ff0000000000000,4
+np.float64,0xb9fbc5b973f79,0x3ff0000000000000,4
+np.float64,0xc3d6292d87ac5,0x3ff0000000000000,4
+np.float64,0xc1084e618210a,0x3ff0000000000000,4
+np.float64,0xb6b9eca56d73e,0x3ff0000000000000,4
+np.float64,0xc7ac4b858f58a,0x3ff0000000000000,4
+np.float64,0x516d75d2a2daf,0x3ff0000000000000,4
+np.float64,0x9dc089d93b811,0x3ff0000000000000,4
+np.float64,0x7b5f2840f6be6,0x3ff0000000000000,4
+np.float64,0x121d3ce8243a9,0x3ff0000000000000,4
+np.float64,0xf0be0337e17c1,0x3ff0000000000000,4
+np.float64,0xff58a5cbfeb15,0x3ff0000000000000,4
+np.float64,0xdaf1d07fb5e3a,0x3ff0000000000000,4
+np.float64,0x61d95382c3b2b,0x3ff0000000000000,4
+np.float64,0xe4df943fc9bf3,0x3ff0000000000000,4
+np.float64,0xf72ac2bdee559,0x3ff0000000000000,4
+np.float64,0x12dafbf625b60,0x3ff0000000000000,4
+np.float64,0xee11d427dc23b,0x3ff0000000000000,4
+np.float64,0xf4f8eb37e9f1e,0x3ff0000000000000,4
+np.float64,0xad7cb5df5af97,0x3ff0000000000000,4
+np.float64,0x59fc9b06b3f94,0x3ff0000000000000,4
+np.float64,0x3c3e65e4787ce,0x3ff0000000000000,4
+np.float64,0xe37bc993c6f79,0x3ff0000000000000,4
+np.float64,0x13bd6330277ad,0x3ff0000000000000,4
+np.float64,0x56cc2800ad986,0x3ff0000000000000,4
+np.float64,0x6203b8fcc4078,0x3ff0000000000000,4
+np.float64,0x75c7c8b8eb8fa,0x3ff0000000000000,4
+np.float64,0x5ebf8e00bd7f2,0x3ff0000000000000,4
+np.float64,0xda81f2f1b503f,0x3ff0000000000000,4
+np.float64,0x6adb17d6d5b64,0x3ff0000000000000,4
+np.float64,0x1ba68eee374d3,0x3ff0000000000000,4
+np.float64,0xeecf6fbbdd9ee,0x3ff0000000000000,4
+np.float64,0x24d6dd8e49add,0x3ff0000000000000,4
+np.float64,0xdf7cb81bbef97,0x3ff0000000000000,4
+np.float64,0xafd7be1b5faf8,0x3ff0000000000000,4
+np.float64,0xdb90ca35b721a,0x3ff0000000000000,4
+np.float64,0xa72903a14e521,0x3ff0000000000000,4
+np.float64,0x14533ee028a7,0x3ff0000000000000,4
+np.float64,0x7951540cf2a2b,0x3ff0000000000000,4
+np.float64,0x22882be045106,0x3ff0000000000000,4
+np.float64,0x136270d626c4f,0x3ff0000000000000,4
+np.float64,0x6a0f5744d41ec,0x3ff0000000000000,4
+np.float64,0x21e0d1aa43c1b,0x3ff0000000000000,4
+np.float64,0xee544155dca88,0x3ff0000000000000,4
+np.float64,0xcbe8aac797d16,0x3ff0000000000000,4
+np.float64,0x6c065e80d80e,0x3ff0000000000000,4
+np.float64,0xe57f0411cafe1,0x3ff0000000000000,4
+np.float64,0xdec3a6bdbd875,0x3ff0000000000000,4
+np.float64,0xf4d23a0fe9a48,0x3ff0000000000000,4
+np.float64,0xda77ef47b4efe,0x3ff0000000000000,4
+np.float64,0x8c405c9b1880c,0x3ff0000000000000,4
+np.float64,0x4eced5149d9db,0x3ff0000000000000,4
+np.float64,0x16b6552c2d6cc,0x3ff0000000000000,4
+np.float64,0x6fbc262cdf785,0x3ff0000000000000,4
+np.float64,0x628c3844c5188,0x3ff0000000000000,4
+np.float64,0x6d827d2cdb050,0x3ff0000000000000,4
+np.float64,0xd1bfdf29a37fc,0x3ff0000000000000,4
+np.float64,0xd85400fdb0a80,0x3ff0000000000000,4
+np.float64,0xcc420b2d98842,0x3ff0000000000000,4
+np.float64,0xac41d21b5883b,0x3ff0000000000000,4
+np.float64,0x432f18d4865e4,0x3ff0000000000000,4
+np.float64,0xe7e89a1bcfd14,0x3ff0000000000000,4
+np.float64,0x9b1141d536228,0x3ff0000000000000,4
+np.float64,0x6805f662d00bf,0x3ff0000000000000,4
+np.float64,0xc76552358ecab,0x3ff0000000000000,4
+np.float64,0x4ae8ffee95d21,0x3ff0000000000000,4
+np.float64,0x4396c096872d9,0x3ff0000000000000,4
+np.float64,0x6e8e55d4dd1cb,0x3ff0000000000000,4
+np.float64,0x4c2e33dc985c7,0x3ff0000000000000,4
+np.float64,0xbce814a579d03,0x3ff0000000000000,4
+np.float64,0x911681b5222d0,0x3ff0000000000000,4
+np.float64,0x5f90a4b2bf215,0x3ff0000000000000,4
+np.float64,0x26f76be84deee,0x3ff0000000000000,4
+np.float64,0xb2f7536165eeb,0x3ff0000000000000,4
+np.float64,0x4de4e6089bc9d,0x3ff0000000000000,4
+np.float64,0xf2e016afe5c03,0x3ff0000000000000,4
+np.float64,0xb9b7b949736f7,0x3ff0000000000000,4
+np.float64,0x3363ea1866c7e,0x3ff0000000000000,4
+np.float64,0xd1a3bd6ba3478,0x3ff0000000000000,4
+np.float64,0xae89f3595d13f,0x3ff0000000000000,4
+np.float64,0xddbd9601bb7c,0x3ff0000000000000,4
+np.float64,0x5de41a06bbc84,0x3ff0000000000000,4
+np.float64,0xfd58c86dfab19,0x3ff0000000000000,4
+np.float64,0x24922e8c49247,0x3ff0000000000000,4
+np.float64,0xcda040339b408,0x3ff0000000000000,4
+np.float64,0x5fe500b2bfca1,0x3ff0000000000000,4
+np.float64,0x9214abb924296,0x3ff0000000000000,4
+np.float64,0x800609fe0a2c13fd,0x3ff0000000000000,4
+np.float64,0x800c7c6fe518f8e0,0x3ff0000000000000,4
+np.float64,0x800a1a9491b4352a,0x3ff0000000000000,4
+np.float64,0x800b45e0e8968bc2,0x3ff0000000000000,4
+np.float64,0x8008497e57d092fd,0x3ff0000000000000,4
+np.float64,0x800b9c0af0173816,0x3ff0000000000000,4
+np.float64,0x800194cccb43299a,0x3ff0000000000000,4
+np.float64,0x8001c91ef183923f,0x3ff0000000000000,4
+np.float64,0x800f25b5ccde4b6c,0x3ff0000000000000,4
+np.float64,0x800ce63ccc79cc7a,0x3ff0000000000000,4
+np.float64,0x800d8fb2e83b1f66,0x3ff0000000000000,4
+np.float64,0x80083cd06f7079a1,0x3ff0000000000000,4
+np.float64,0x800823598e9046b3,0x3ff0000000000000,4
+np.float64,0x8001c1319de38264,0x3ff0000000000000,4
+np.float64,0x800f2b68543e56d1,0x3ff0000000000000,4
+np.float64,0x80022a4f4364549f,0x3ff0000000000000,4
+np.float64,0x800f51badf7ea376,0x3ff0000000000000,4
+np.float64,0x8003fbf31e27f7e7,0x3ff0000000000000,4
+np.float64,0x800d4c00e2fa9802,0x3ff0000000000000,4
+np.float64,0x800023b974804774,0x3ff0000000000000,4
+np.float64,0x800860778990c0ef,0x3ff0000000000000,4
+np.float64,0x800a15c241542b85,0x3ff0000000000000,4
+np.float64,0x8003097d9dc612fc,0x3ff0000000000000,4
+np.float64,0x800d77d8541aefb1,0x3ff0000000000000,4
+np.float64,0x80093804ab52700a,0x3ff0000000000000,4
+np.float64,0x800d2b3bfd7a5678,0x3ff0000000000000,4
+np.float64,0x800da24bcd5b4498,0x3ff0000000000000,4
+np.float64,0x8006eee1c28dddc4,0x3ff0000000000000,4
+np.float64,0x80005137fa40a271,0x3ff0000000000000,4
+np.float64,0x8007a3fbc22f47f8,0x3ff0000000000000,4
+np.float64,0x800dcd97071b9b2e,0x3ff0000000000000,4
+np.float64,0x80065b36048cb66d,0x3ff0000000000000,4
+np.float64,0x8004206ba72840d8,0x3ff0000000000000,4
+np.float64,0x8007e82b98cfd058,0x3ff0000000000000,4
+np.float64,0x8001a116ed23422f,0x3ff0000000000000,4
+np.float64,0x800c69e9ff18d3d4,0x3ff0000000000000,4
+np.float64,0x8003843688e7086e,0x3ff0000000000000,4
+np.float64,0x800335e3b8866bc8,0x3ff0000000000000,4
+np.float64,0x800e3308f0bc6612,0x3ff0000000000000,4
+np.float64,0x8002a9ec55c553d9,0x3ff0000000000000,4
+np.float64,0x80001c2084e03842,0x3ff0000000000000,4
+np.float64,0x800bc2bbd8d78578,0x3ff0000000000000,4
+np.float64,0x800ae6bcc555cd7a,0x3ff0000000000000,4
+np.float64,0x80083f7a13907ef5,0x3ff0000000000000,4
+np.float64,0x800d83ed76db07db,0x3ff0000000000000,4
+np.float64,0x800a12251974244b,0x3ff0000000000000,4
+np.float64,0x800a69c95714d393,0x3ff0000000000000,4
+np.float64,0x800cd5a85639ab51,0x3ff0000000000000,4
+np.float64,0x800e0e1837bc1c31,0x3ff0000000000000,4
+np.float64,0x8007b5ca39ef6b95,0x3ff0000000000000,4
+np.float64,0x800cf961cad9f2c4,0x3ff0000000000000,4
+np.float64,0x80066e8fc14cdd20,0x3ff0000000000000,4
+np.float64,0x8001cb8c7b43971a,0x3ff0000000000000,4
+np.float64,0x800002df68a005c0,0x3ff0000000000000,4
+np.float64,0x8003e6681567ccd1,0x3ff0000000000000,4
+np.float64,0x800b039126b60723,0x3ff0000000000000,4
+np.float64,0x800d2e1b663a5c37,0x3ff0000000000000,4
+np.float64,0x800188b3e2a31169,0x3ff0000000000000,4
+np.float64,0x8001f272e943e4e7,0x3ff0000000000000,4
+np.float64,0x800d7f53607afea7,0x3ff0000000000000,4
+np.float64,0x80092cafa4f25960,0x3ff0000000000000,4
+np.float64,0x800fc009f07f8014,0x3ff0000000000000,4
+np.float64,0x8003da896507b514,0x3ff0000000000000,4
+np.float64,0x800d4d1b4c3a9a37,0x3ff0000000000000,4
+np.float64,0x8007a835894f506c,0x3ff0000000000000,4
+np.float64,0x80057ba0522af741,0x3ff0000000000000,4
+np.float64,0x8009b7054b336e0b,0x3ff0000000000000,4
+np.float64,0x800b2c6c125658d9,0x3ff0000000000000,4
+np.float64,0x8008b1840ad16308,0x3ff0000000000000,4
+np.float64,0x8007ea0e3befd41d,0x3ff0000000000000,4
+np.float64,0x800dd658683bacb1,0x3ff0000000000000,4
+np.float64,0x8008cda48fd19b49,0x3ff0000000000000,4
+np.float64,0x8003acca14c75995,0x3ff0000000000000,4
+np.float64,0x8008bd152d717a2b,0x3ff0000000000000,4
+np.float64,0x80010d1ea3621a3e,0x3ff0000000000000,4
+np.float64,0x800130b78b826170,0x3ff0000000000000,4
+np.float64,0x8002cf3a46e59e75,0x3ff0000000000000,4
+np.float64,0x800b76e7fa76edd0,0x3ff0000000000000,4
+np.float64,0x800e065fe1dc0cc0,0x3ff0000000000000,4
+np.float64,0x8000dd527ea1baa6,0x3ff0000000000000,4
+np.float64,0x80032cb234665965,0x3ff0000000000000,4
+np.float64,0x800affc1acb5ff84,0x3ff0000000000000,4
+np.float64,0x80074be23fee97c5,0x3ff0000000000000,4
+np.float64,0x8004f83eafc9f07e,0x3ff0000000000000,4
+np.float64,0x800b02a115560543,0x3ff0000000000000,4
+np.float64,0x800b324a55766495,0x3ff0000000000000,4
+np.float64,0x800ffbcfd69ff7a0,0x3ff0000000000000,4
+np.float64,0x800830bc7b906179,0x3ff0000000000000,4
+np.float64,0x800cbafe383975fd,0x3ff0000000000000,4
+np.float64,0x8001ee42bfe3dc86,0x3ff0000000000000,4
+np.float64,0x8005b00fdc0b6020,0x3ff0000000000000,4
+np.float64,0x8005e7addd0bcf5c,0x3ff0000000000000,4
+np.float64,0x8001ae4cb0635c9a,0x3ff0000000000000,4
+np.float64,0x80098a9941131533,0x3ff0000000000000,4
+np.float64,0x800334c929466993,0x3ff0000000000000,4
+np.float64,0x8009568239d2ad05,0x3ff0000000000000,4
+np.float64,0x800f0639935e0c73,0x3ff0000000000000,4
+np.float64,0x800cebce7499d79d,0x3ff0000000000000,4
+np.float64,0x800482ee4c2905dd,0x3ff0000000000000,4
+np.float64,0x8007b7bd9e2f6f7c,0x3ff0000000000000,4
+np.float64,0x3fe654469f2ca88d,0x3fe8853f6c01ffb3,4
+np.float64,0x3feb4d7297369ae5,0x3fe50ad5bb621408,4
+np.float64,0x3feef53ba43dea77,0x3fe2283f356f8658,4
+np.float64,0x3fddf564eabbeaca,0x3fec8ec0e0dead9c,4
+np.float64,0x3fd3a69078274d21,0x3fee80e05c320000,4
+np.float64,0x3fecdafe5d39b5fd,0x3fe3d91a5d440fd9,4
+np.float64,0x3fd93286bc32650d,0x3fed8d40696cd10e,4
+np.float64,0x3fc0d34eb821a69d,0x3fefb954023d4284,4
+np.float64,0x3fc7b4b9a02f6973,0x3fef73e8739787ce,4
+np.float64,0x3fe08c839a611907,0x3febd0bc6f5641cd,4
+np.float64,0x3fb3d1758627a2eb,0x3fefe776f6183f96,4
+np.float64,0x3fef93c9ff3f2794,0x3fe1a4d2f622627d,4
+np.float64,0x3fea8d0041351a01,0x3fe59a52a1c78c9e,4
+np.float64,0x3fe3e26a30e7c4d4,0x3fea04ad3e0bbf8d,4
+np.float64,0x3fe5a34c9f6b4699,0x3fe8f57c5ccd1eab,4
+np.float64,0x3fc21ef859243df1,0x3fefae0b68a3a2e7,4
+np.float64,0x3fed7dd585fafbab,0x3fe35860041e5b0d,4
+np.float64,0x3fe5abacf22b575a,0x3fe8f03d8b6ef0f2,4
+np.float64,0x3fe426451f284c8a,0x3fe9dcf21f13205b,4
+np.float64,0x3fc01f6456203ec9,0x3fefbf19e2a8e522,4
+np.float64,0x3fe1cf2772239e4f,0x3feb2bbd645c7697,4
+np.float64,0x3fd18c4ace231896,0x3feecdfdd086c110,4
+np.float64,0x3fe8387d5b7070fb,0x3fe74358f2ec4910,4
+np.float64,0x3fdce51c2239ca38,0x3feccb2ae5459632,4
+np.float64,0x3fe5b0f2e4eb61e6,0x3fe8ecef4dbe4277,4
+np.float64,0x3fe1ceeb08a39dd6,0x3feb2bdd4dcfb3df,4
+np.float64,0x3febc5899d778b13,0x3fe4afc8dd8ad228,4
+np.float64,0x3fe7a47fbe2f48ff,0x3fe7a7fd9b352ea5,4
+np.float64,0x3fe7f74e1fafee9c,0x3fe76feb2755b247,4
+np.float64,0x3fe2bfad04e57f5a,0x3feaa9b46adddaeb,4
+np.float64,0x3fd06a090320d412,0x3feef40c334f8fba,4
+np.float64,0x3fdc97297d392e53,0x3fecdc16a3e22fcb,4
+np.float64,0x3fdc1a3f3838347e,0x3fecf6db2769d404,4
+np.float64,0x3fcca90096395201,0x3fef338156fcd218,4
+np.float64,0x3fed464733fa8c8e,0x3fe38483f0465d91,4
+np.float64,0x3fe7e067d82fc0d0,0x3fe77f7c8c9de896,4
+np.float64,0x3fc014fa0b2029f4,0x3fefbf6d84c933f8,4
+np.float64,0x3fd3bf1524277e2a,0x3fee7d2997b74dec,4
+np.float64,0x3fec153b86782a77,0x3fe472bb5497bb2a,4
+np.float64,0x3fd3e4d9d5a7c9b4,0x3fee776842691902,4
+np.float64,0x3fea6c0e2c74d81c,0x3fe5b2954cb458d9,4
+np.float64,0x3fee8f6a373d1ed4,0x3fe27bb9e348125b,4
+np.float64,0x3fd30c6dd42618dc,0x3fee97d2cab2b0bc,4
+np.float64,0x3fe4f90e6d69f21d,0x3fe95ea3dd4007f2,4
+np.float64,0x3fe271d467e4e3a9,0x3fead470d6d4008b,4
+np.float64,0x3fef2983897e5307,0x3fe1fd1a4debe33b,4
+np.float64,0x3fe980cc83b30199,0x3fe65d2fb8a0eb46,4
+np.float64,0x3fdfdf53db3fbea8,0x3fec1cf95b2a1cc7,4
+np.float64,0x3fe4d5307ba9aa61,0x3fe974701b4156cb,4
+np.float64,0x3fdb4e2345b69c47,0x3fed21aa6c146512,4
+np.float64,0x3fe3f7830327ef06,0x3fe9f85f6c88c2a8,4
+np.float64,0x3fca915fb63522bf,0x3fef502b73a52ecf,4
+np.float64,0x3fe66d3709ecda6e,0x3fe87531d7372d7a,4
+np.float64,0x3fd86000bcb0c001,0x3fedb5018dd684ca,4
+np.float64,0x3fe516e5feea2dcc,0x3fe94c68b111404e,4
+np.float64,0x3fd83c53dd3078a8,0x3fedbb9e5dd9e165,4
+np.float64,0x3fedfeeb673bfdd7,0x3fe2f0f0253c5d5d,4
+np.float64,0x3fe0dc6f9c21b8df,0x3feba8e2452410c2,4
+np.float64,0x3fbe154d643c2a9b,0x3fefc780a9357457,4
+np.float64,0x3fe5f63986abec73,0x3fe8c1434951a40a,4
+np.float64,0x3fbce0e50839c1ca,0x3fefcbeeaa27de75,4
+np.float64,0x3fd7ef5c5c2fdeb9,0x3fedc9c3022495b3,4
+np.float64,0x3fc1073914220e72,0x3fefb79de80fc0fd,4
+np.float64,0x3fe1a93c3d235278,0x3feb3fb21f86ac67,4
+np.float64,0x3fe321ee53e643dd,0x3fea72e2999f1e22,4
+np.float64,0x3fa881578c3102af,0x3feff69e6e51e0d6,4
+np.float64,0x3fd313482a262690,0x3fee96d161199495,4
+np.float64,0x3fe7272cd6ae4e5a,0x3fe7fbacbd0d8f43,4
+np.float64,0x3fd6cf4015ad9e80,0x3fedfd3513d544b8,4
+np.float64,0x3fc67b7e6d2cf6fd,0x3fef81f5c16923a4,4
+np.float64,0x3fa1999c14233338,0x3feffb2913a14184,4
+np.float64,0x3fc74eb8dd2e9d72,0x3fef78909a138e3c,4
+np.float64,0x3fc0b9274921724f,0x3fefba2ebd5f3e1c,4
+np.float64,0x3fd53fa156aa7f43,0x3fee40a18e952e88,4
+np.float64,0x3feaccbca4b59979,0x3fe56b22b33eb713,4
+np.float64,0x3fe6a01e3a2d403c,0x3fe8543fbd820ecc,4
+np.float64,0x3fd392a869a72551,0x3fee83e0ffe0e8de,4
+np.float64,0x3fe44d8928689b12,0x3fe9c5bf3c8fffdb,4
+np.float64,0x3fca3f209f347e41,0x3fef5461b6fa0924,4
+np.float64,0x3fee9e84b07d3d09,0x3fe26f638f733549,4
+np.float64,0x3faf49acb03e9359,0x3feff0b583cd8c48,4
+np.float64,0x3fea874b2af50e96,0x3fe59e882fa6febf,4
+np.float64,0x3fc50b72772a16e5,0x3fef918777dc41be,4
+np.float64,0x3fe861d1d4f0c3a4,0x3fe726e44d9d42c2,4
+np.float64,0x3fcadd2e2535ba5c,0x3fef4c3e2b56da38,4
+np.float64,0x3fea59c29cb4b385,0x3fe5c0043e586439,4
+np.float64,0x3fc1ffef0d23ffde,0x3fefaf22be452d13,4
+np.float64,0x3fc2d8dbc125b1b8,0x3fefa75b646d8e4e,4
+np.float64,0x3fd66c6471acd8c9,0x3fee0e5038b895c0,4
+np.float64,0x3fd0854adfa10a96,0x3feef0945bcc5c99,4
+np.float64,0x3feaac7076f558e1,0x3fe58316c23a82ad,4
+np.float64,0x3fdda49db3bb493b,0x3feca0e347c0ad6f,4
+np.float64,0x3fe43a539de874a7,0x3fe9d11d722d4822,4
+np.float64,0x3feeee3ebbfddc7d,0x3fe22dffd251e9af,4
+np.float64,0x3f8ee2c5b03dc58b,0x3fefff11855a7b6c,4
+np.float64,0x3fcd7107c63ae210,0x3fef2840bb55ca52,4
+np.float64,0x3f8d950d203b2a1a,0x3fefff253a08e40e,4
+np.float64,0x3fd40a5e57a814bd,0x3fee71a633c761fc,4
+np.float64,0x3fee836ec83d06de,0x3fe28580975be2fd,4
+np.float64,0x3fd7bbe87f2f77d1,0x3fedd31f661890cc,4
+np.float64,0xbfe05bf138a0b7e2,0x3febe8a000d96e47,4
+np.float64,0xbf88bddd90317bc0,0x3fefff66f6e2ff26,4
+np.float64,0xbfdc9cbb12393976,0x3fecdae2982335db,4
+np.float64,0xbfd85b4eccb0b69e,0x3fedb5e0dd87f702,4
+np.float64,0xbfe5c326cb2b864e,0x3fe8e180f525fa12,4
+np.float64,0xbfe381a0e4a70342,0x3fea3c8e5e3ab78e,4
+np.float64,0xbfe58d892c2b1b12,0x3fe9031551617aed,4
+np.float64,0xbfd7f3a52cafe74a,0x3fedc8fa97edd080,4
+np.float64,0xbfef3417bc7e682f,0x3fe1f45989f6a009,4
+np.float64,0xbfddfb8208bbf704,0x3fec8d5fa9970773,4
+np.float64,0xbfdab69bcc356d38,0x3fed40b2f6c347c6,4
+np.float64,0xbfed3f7cf17a7efa,0x3fe389e4ff4d9235,4
+np.float64,0xbfe47675d9a8ecec,0x3fe9ad6829a69e94,4
+np.float64,0xbfd030e2902061c6,0x3feefb3f811e024f,4
+np.float64,0xbfc376ac7226ed58,0x3fefa1798712b37e,4
+np.float64,0xbfdb7e54a0b6fcaa,0x3fed17a974c4bc28,4
+np.float64,0xbfdb7d5d5736faba,0x3fed17dcf31a8d84,4
+np.float64,0xbf876bd6502ed7c0,0x3fefff76dce6232c,4
+np.float64,0xbfd211e6c02423ce,0x3feebba41f0a1764,4
+np.float64,0xbfb443e3962887c8,0x3fefe658953629d4,4
+np.float64,0xbfe81b09e9b03614,0x3fe757882e4fdbae,4
+np.float64,0xbfdcb905d2b9720c,0x3fecd4c22cfe84e5,4
+np.float64,0xbfe3b62d99276c5b,0x3fea1e5520b3098d,4
+np.float64,0xbfbf05b25c3e0b68,0x3fefc3ecc04bca8e,4
+np.float64,0xbfdedc885b3db910,0x3fec59e22feb49f3,4
+np.float64,0xbfe33aa282667545,0x3fea64f2d55ec471,4
+np.float64,0xbfec84745a3908e9,0x3fe41cb3214e7044,4
+np.float64,0xbfddefdff1bbdfc0,0x3fec8fff88d4d0ec,4
+np.float64,0xbfd26ae6aca4d5ce,0x3feeaf208c7fedf6,4
+np.float64,0xbfee010591fc020b,0x3fe2ef3e57211a5e,4
+np.float64,0xbfb8cfddca319fb8,0x3fefd98d8f7918ed,4
+np.float64,0xbfe991648f3322c9,0x3fe6514e54670bae,4
+np.float64,0xbfee63fd087cc7fa,0x3fe29f1bfa3297cc,4
+np.float64,0xbfe1685942a2d0b2,0x3feb617f5f839eee,4
+np.float64,0xbfc6fc2fd62df860,0x3fef7c4698fd58cf,4
+np.float64,0xbfe42723d3a84e48,0x3fe9dc6ef7243e90,4
+np.float64,0xbfc3a7e89d274fd0,0x3fef9f99e3314e77,4
+np.float64,0xbfeb4c9521f6992a,0x3fe50b7c919bc6d8,4
+np.float64,0xbf707b34e020f680,0x3fefffef05e30264,4
+np.float64,0xbfc078478e20f090,0x3fefbc479305d5aa,4
+np.float64,0xbfd494ac4ca92958,0x3fee5c11f1cd8269,4
+np.float64,0xbfdaf888a035f112,0x3fed3346ae600469,4
+np.float64,0xbfa5d8ed502bb1e0,0x3feff88b0f262609,4
+np.float64,0xbfeec0cbfffd8198,0x3fe253543b2371cb,4
+np.float64,0xbfe594b5986b296b,0x3fe8fe9b39fb3940,4
+np.float64,0xbfc8ece7c631d9d0,0x3fef652bd0611ac7,4
+np.float64,0xbfd8ffeca0b1ffda,0x3fed96ebdf9b65cb,4
+np.float64,0xbfba9b221e353648,0x3fefd3cc21e2f15c,4
+np.float64,0xbfca63a52c34c74c,0x3fef52848eb9ed3b,4
+np.float64,0xbfe588e9b06b11d4,0x3fe905f7403e8881,4
+np.float64,0xbfc76f82db2edf04,0x3fef77138fe9bbc2,4
+np.float64,0xbfeeb3f334bd67e6,0x3fe25ddadb1096d6,4
+np.float64,0xbfbf2b64ce3e56c8,0x3fefc35a9555f6df,4
+np.float64,0xbfe9920e4ff3241c,0x3fe650d4ab8f5c42,4
+np.float64,0xbfb4a54c02294a98,0x3fefe55fc85ae5e9,4
+np.float64,0xbfe353b0c766a762,0x3fea56c02d17e4b7,4
+np.float64,0xbfd99961a4b332c4,0x3fed795fcd00dbf9,4
+np.float64,0xbfef191ddabe323c,0x3fe20aa79524f636,4
+np.float64,0xbfb25d060224ba10,0x3fefeaeee5cc8c0b,4
+np.float64,0xbfe6022428ec0448,0x3fe8b9b46e776194,4
+np.float64,0xbfed1a236cba3447,0x3fe3a76bee0d9861,4
+np.float64,0xbfc59671e72b2ce4,0x3fef8bc4daef6f14,4
+np.float64,0xbfdf2711703e4e22,0x3fec4886a8c9ceb5,4
+np.float64,0xbfeb7e207536fc41,0x3fe4e610c783f168,4
+np.float64,0xbfe6cdf5bcad9bec,0x3fe8365f8a59bc81,4
+np.float64,0xbfe55294adaaa52a,0x3fe927b0af5ccd09,4
+np.float64,0xbfdf4a88913e9512,0x3fec4036df58ba74,4
+np.float64,0xbfebb7efe4376fe0,0x3fe4ba276006992d,4
+np.float64,0xbfe09f29cfa13e54,0x3febc77f4f9c95e7,4
+np.float64,0xbfdf8c75653f18ea,0x3fec30ac924e4f46,4
+np.float64,0xbfefd601c7ffac04,0x3fe16d6f21bcb9c1,4
+np.float64,0xbfeae97ff5f5d300,0x3fe555bb5b87efe9,4
+np.float64,0xbfed427f02fa84fe,0x3fe387830db093bc,4
+np.float64,0xbfa33909cc267210,0x3feffa3a1bcb50dd,4
+np.float64,0xbfe9aa4bf5f35498,0x3fe63f6e98f6aa0f,4
+np.float64,0xbfe2d7349b25ae69,0x3fea9caa7c331e7e,4
+np.float64,0xbfcdbb2a3a3b7654,0x3fef2401c9659e4b,4
+np.float64,0xbfc8a90919315214,0x3fef686fe7fc0513,4
+np.float64,0xbfe62a98df2c5532,0x3fe89ff22a02cc6b,4
+np.float64,0xbfdc0f67b3b81ed0,0x3fecf928b637798f,4
+np.float64,0xbfebb32bf6f76658,0x3fe4bdc893c09698,4
+np.float64,0xbfec067996380cf3,0x3fe47e132741db97,4
+np.float64,0xbfd9774e1d32ee9c,0x3fed7ffe1e87c434,4
+np.float64,0xbfef989890bf3131,0x3fe1a0d025c80cf4,4
+np.float64,0xbfe59887e62b3110,0x3fe8fc382a3d4197,4
+np.float64,0xbfdea0a11e3d4142,0x3fec67b987e236ec,4
+np.float64,0xbfe2ec495825d892,0x3fea90efb231602d,4
+np.float64,0xbfb329c5c2265388,0x3fefe90f1b8209c3,4
+np.float64,0xbfdcd2dcd339a5ba,0x3feccf24c60b1478,4
+np.float64,0xbfe537ea18aa6fd4,0x3fe938237e217fe0,4
+np.float64,0xbfe8675ce170ceba,0x3fe723105925ce3a,4
+np.float64,0xbfd70723acae0e48,0x3fedf369ac070e65,4
+np.float64,0xbfea9d8692b53b0d,0x3fe58e1ee42e3fdb,4
+np.float64,0xbfcfeb96653fd72c,0x3fef029770033bdc,4
+np.float64,0xbfcc06c92d380d94,0x3fef3c69797d9b0a,4
+np.float64,0xbfe16b7c4f62d6f8,0x3feb5fdf9f0a9a07,4
+np.float64,0xbfed4d7a473a9af4,0x3fe37ecee27b1eb7,4
+np.float64,0xbfe6a6f6942d4ded,0x3fe84fccdf762b19,4
+np.float64,0xbfda46d867348db0,0x3fed572d928fa657,4
+np.float64,0xbfdbd9482db7b290,0x3fed049b5f907b52,4
+np.float64,0x7fe992ceb933259c,0xbfeb15af92aad70e,4
+np.float64,0x7fe3069204a60d23,0xbfe5eeff454240e9,4
+np.float64,0x7fe729dbf32e53b7,0xbfefe0528a330e4c,4
+np.float64,0x7fec504fb638a09e,0x3fd288e95dbedf65,4
+np.float64,0x7fe1d30167a3a602,0xbfeffc41f946fd02,4
+np.float64,0x7fed7f8ffd3aff1f,0x3fefe68ec604a19d,4
+np.float64,0x7fd2f23635a5e46b,0x3fea63032efbb447,4
+np.float64,0x7fd4c86db1a990da,0x3fdf6b9f7888db5d,4
+np.float64,0x7fe7554db6eeaa9a,0x3fe1b41476861bb0,4
+np.float64,0x7fe34e823ba69d03,0x3fefc435532e6294,4
+np.float64,0x7fec5c82fef8b905,0x3fef8f0c6473034f,4
+np.float64,0x7feba221bff74442,0xbfea95b81eb19b47,4
+np.float64,0x7fe74808a5ae9010,0xbfd3aa322917c3e5,4
+np.float64,0x7fdf41b7e0be836f,0x3fd14283c7147282,4
+np.float64,0x7fec09892f381311,0x3fe5240376ae484b,4
+np.float64,0x7faaf80bf435f017,0x3fe20227fa811423,4
+np.float64,0x7f8422d8402845b0,0x3fe911714593b8a0,4
+np.float64,0x7fd23a7fada474fe,0x3feff9f40aa37e9c,4
+np.float64,0x7fef4a4806fe948f,0x3fec6eca89cb4a62,4
+np.float64,0x7fe1e71cf763ce39,0xbfea6ac63f9ba457,4
+np.float64,0x7fe3e555be27caaa,0xbfe75b305d0dbbfd,4
+np.float64,0x7fcb8bac96371758,0xbfe8b126077f9d4c,4
+np.float64,0x7fc98e2c84331c58,0x3fef9092eb0bc85a,4
+np.float64,0x7fe947cf2b728f9d,0xbfebfff2c5b7d198,4
+np.float64,0x7feee8058c3dd00a,0xbfef21ebaae2eb17,4
+np.float64,0x7fef61d8d5bec3b1,0xbfdf1a032fb1c864,4
+np.float64,0x7fcf714b6f3ee296,0x3fe6fc89a8084098,4
+np.float64,0x7fa9a8b44c335168,0xbfeb16c149cea943,4
+np.float64,0x7fd175c482a2eb88,0xbfef64d341e73f88,4
+np.float64,0x7feab8e6a87571cc,0x3feb10069c397464,4
+np.float64,0x7fe3ade72de75bcd,0x3fd1753e333d5790,4
+np.float64,0x7fb26d87d224db0f,0xbfe753d36b18f4ca,4
+np.float64,0x7fdb7ef159b6fde2,0x3fe5c0a6044d3607,4
+np.float64,0x7fd5af86422b5f0c,0x3fe77193c95f6484,4
+np.float64,0x7fee9e00b07d3c00,0x3fe864d494596845,4
+np.float64,0x7fef927a147f24f3,0xbfe673b14715693d,4
+np.float64,0x7fd0aea63c215d4b,0xbfeff435f119fce9,4
+np.float64,0x7fd02e3796a05c6e,0x3fe4f7e3706e9a3d,4
+np.float64,0x7fd3ed61da27dac3,0xbfefef2f057f168c,4
+np.float64,0x7fefaca0d4ff5941,0x3fd3e8ad205cd4ab,4
+np.float64,0x7feb659e06f6cb3b,0x3fd64d803203e027,4
+np.float64,0x7fc94ccfaf32999e,0x3fee04922209369a,4
+np.float64,0x7feb4ec294f69d84,0xbfd102763a056c89,4
+np.float64,0x7fe2ada6ac655b4c,0x3fef4f6792aa6093,4
+np.float64,0x7fe5f40fdc2be81f,0xbfb4a6327186eee8,4
+np.float64,0x7fe7584bc3eeb097,0xbfd685b8ff94651d,4
+np.float64,0x7fe45d276be8ba4e,0x3fee53b13f7e442f,4
+np.float64,0x7fe6449b3d6c8935,0xbfe7e08bafa75251,4
+np.float64,0x7f8d62e6b03ac5cc,0x3fe73d30762f38fd,4
+np.float64,0x7fe3a76f72a74ede,0xbfeb48a28bc60968,4
+np.float64,0x7fd057706920aee0,0x3fdece8fa06f626c,4
+np.float64,0x7fe45ae158e8b5c2,0x3fe7a70f47b4d349,4
+np.float64,0x7fea8a5a983514b4,0x3fefb053d5f9ddd7,4
+np.float64,0x7fdd1e86ab3a3d0c,0x3fe3cded1b93816b,4
+np.float64,0x7fdb456108b68ac1,0xbfe37574c0b9bf8f,4
+np.float64,0x7fe972602432e4bf,0x3fef9a26e65ec01c,4
+np.float64,0x7fdbe2385637c470,0x3fed541df57969e1,4
+np.float64,0x7fe57f03602afe06,0x3fbd90f595cbbd94,4
+np.float64,0x7feb0ceb68f619d6,0xbfeae9cb8ee5261f,4
+np.float64,0x7fe6abfe6c6d57fc,0xbfef40a6edaca26f,4
+np.float64,0x7fe037ea08606fd3,0xbfda817d75858597,4
+np.float64,0x7fdd75a52dbaeb49,0x3feef2a0d91d6aa1,4
+np.float64,0x7fe8f9af66b1f35e,0xbfedfceef2a3bfc9,4
+np.float64,0x7fedf762b53beec4,0x3fd8b4f21ef69ee3,4
+np.float64,0x7fe99295b7f3252a,0x3feffc24d970383e,4
+np.float64,0x7fe797b0172f2f5f,0x3fee089aa56f7ce8,4
+np.float64,0x7fed89dcc97b13b9,0xbfcfa2bb0c3ea41f,4
+np.float64,0x7fae9e8d5c3d3d1a,0xbfe512ffe16c6b08,4
+np.float64,0x7fefaecbe27f5d97,0x3fbfc718a5e972f1,4
+np.float64,0x7fce0236d93c046d,0xbfa9b7cd790db256,4
+np.float64,0x7fa9689aac32d134,0x3feced501946628a,4
+np.float64,0x7feb1469e93628d3,0x3fef2a988e7673ed,4
+np.float64,0x7fdba78344b74f06,0xbfe092e78965b30c,4
+np.float64,0x7fece54c3fb9ca97,0x3fd3cfd184bed2e6,4
+np.float64,0x7fdb84212b370841,0xbfe25ebf2db6ee55,4
+np.float64,0x7fbe3e8bf23c7d17,0x3fe2ee72df573345,4
+np.float64,0x7fe43d9803687b2f,0xbfed2eff6a9e66a0,4
+np.float64,0x7fb0f9c00a21f37f,0x3feff70f3276fdb7,4
+np.float64,0x7fea0c6cbbb418d8,0xbfefa612494798b2,4
+np.float64,0x7fe4b3239e296646,0xbfe74dd959af8cdc,4
+np.float64,0x7fe5c6a773eb8d4e,0xbfd06944048f8d2b,4
+np.float64,0x7fb1c1278223824e,0xbfeb533a34655bde,4
+np.float64,0x7fd21c09ee243813,0xbfe921ccbc9255c3,4
+np.float64,0x7fe051020c20a203,0x3fbd519d700c1f2f,4
+np.float64,0x7fe0c76845e18ed0,0x3fefb9595191a31b,4
+np.float64,0x7fe6b0b57b6d616a,0xbf8c59a8ba5fcd9a,4
+np.float64,0x7fd386c460270d88,0x3fe8ffea5d1a5c46,4
+np.float64,0x7feeb884713d7108,0x3fee9b2247ef6c0d,4
+np.float64,0x7fd85f71b6b0bee2,0xbfefc30ec3e28f07,4
+np.float64,0x7fc341366426826c,0x3fd4234d35386d3b,4
+np.float64,0x7fe56482dd6ac905,0x3fe7189de6a50668,4
+np.float64,0x7fec67a2e3f8cf45,0xbfef86d0b940f37f,4
+np.float64,0x7fe38b202fe7163f,0x3feb90b75caa2030,4
+np.float64,0x7fdcbc64883978c8,0x3fed4f758fbf64d4,4
+np.float64,0x7fea5f0598f4be0a,0x3fdd503a417b3d4d,4
+np.float64,0x7fda3b6bcf3476d7,0x3fea6e9af3f7f9f5,4
+np.float64,0x7fc7d7896c2faf12,0x3fda2bebc36a2363,4
+np.float64,0x7fe7e8e2626fd1c4,0xbfe7d5e390c4cc3f,4
+np.float64,0x7fde0f3d7abc1e7a,0xbfede7a0ecfa3606,4
+np.float64,0x7fc692b8f52d2571,0x3feff0cd7ab6f61b,4
+np.float64,0xff92d1fce825a400,0xbfc921c36fc014fa,4
+np.float64,0xffdec3af2fbd875e,0xbfed6a77e6a0364e,4
+np.float64,0xffef46e7d9be8dcf,0xbfed7d39476f7e27,4
+np.float64,0xffe2c2ce4525859c,0x3fe1757261316bc9,4
+np.float64,0xffe27c8b5864f916,0xbfefe017c0d43457,4
+np.float64,0xffe184d7442309ae,0x3fa1fb8c49dba596,4
+np.float64,0xffddf5f98d3bebf4,0x3fee4f8eaa5f847e,4
+np.float64,0xffee3ef354fc7de6,0xbfebfd60fa51b2ba,4
+np.float64,0xffdecb3e85bd967e,0x3fbfad2667a8b468,4
+np.float64,0xffe4ee900b29dd20,0xbfdc02dc626f91cd,4
+np.float64,0xffd3179f6da62f3e,0xbfe2cfe442511776,4
+np.float64,0xffe99ef7cef33def,0x3f50994542a7f303,4
+np.float64,0xffe2b66b1ae56cd6,0xbfefe3e066eb6329,4
+np.float64,0xff8f72aff03ee540,0x3fe9c46224cf5003,4
+np.float64,0xffd29beb85a537d8,0x3fefcb0b6166be71,4
+np.float64,0xffaef02d4c3de060,0xbfef5fb71028fc72,4
+np.float64,0xffd39a2a89273456,0x3fe6d4b183205dca,4
+np.float64,0xffef8a9392ff1526,0x3fedb99fbf402468,4
+np.float64,0xffb9b3f31e3367e8,0x3fee1005270fcf80,4
+np.float64,0xffed9d5c693b3ab8,0x3fd110f4b02365d5,4
+np.float64,0xffeaba45f9f5748b,0x3fe499e0a6f4afb2,4
+np.float64,0xffdba3f70d3747ee,0xbfca0c30493ae519,4
+np.float64,0xffa35b985426b730,0xbfdb625df56bcf45,4
+np.float64,0xffccbc9728397930,0x3fc53cbc59020704,4
+np.float64,0xffef73c942bee792,0xbfdc647a7a5e08be,4
+np.float64,0xffcb5acfb236b5a0,0x3feeb4ec038c39fc,4
+np.float64,0xffea116fe2b422df,0x3fefe03b6ae0b435,4
+np.float64,0xffe97de6e7b2fbcd,0xbfd2025698fab9eb,4
+np.float64,0xffdddba314bbb746,0x3fd31f0fdb8f93be,4
+np.float64,0xffd613a24a2c2744,0xbfebbb1efae884b3,4
+np.float64,0xffe3d938aa67b271,0xbfc2099cead3d3be,4
+np.float64,0xffdf08c2e33e1186,0xbfefd236839b900d,4
+np.float64,0xffea6ba8bd34d751,0x3fe8dfc032114719,4
+np.float64,0xffe3202083e64040,0x3fed513b81432a22,4
+np.float64,0xffb2397db62472f8,0xbfee7d7fe1c3f76c,4
+np.float64,0xffd9d0682ab3a0d0,0x3fe0bcf9e531ad79,4
+np.float64,0xffc293df202527c0,0xbfe58d0bdece5e64,4
+np.float64,0xffe1422c7da28458,0xbf81bd72595f2341,4
+np.float64,0xffd64e4ed4ac9c9e,0x3fa4334cc011c703,4
+np.float64,0xffe40a970ae8152e,0x3fead3d258b55b7d,4
+np.float64,0xffc8c2f2223185e4,0xbfef685f07c8b9fd,4
+np.float64,0xffe4b2f7216965ee,0x3fe3861d3d896a83,4
+np.float64,0xffdb531db3b6a63c,0x3fe18cb8332dd59d,4
+np.float64,0xffe8e727a3b1ce4e,0xbfe57b15abb677b9,4
+np.float64,0xffe530c1e12a6184,0xbfb973ea5535e48f,4
+np.float64,0xffe6f7849cedef08,0x3fd39a37ec5af4b6,4
+np.float64,0xffead62a78b5ac54,0x3fe69b3f6c7aa24b,4
+np.float64,0xffeefdd725fdfbad,0xbfc08a456111fdd5,4
+np.float64,0xffe682182fed0430,0x3fecc7c1292761d2,4
+np.float64,0xffee0ca8dcbc1951,0x3fef6cc361ef2c19,4
+np.float64,0xffec9b338f393666,0x3fefa9ab8e0471b5,4
+np.float64,0xffe13c5e29a278bc,0xbfef8da74ad83398,4
+np.float64,0xffd7bd48c62f7a92,0x3fe3468cd4ac9d34,4
+np.float64,0xffedd0ed14bba1d9,0xbfd563a83477077b,4
+np.float64,0xffe86b83f3f0d707,0x3fe9eb3c658e4b2d,4
+np.float64,0xffd6a4db4bad49b6,0xbfc7e11276166e17,4
+np.float64,0xffc29e8404253d08,0x3fd35971961c789f,4
+np.float64,0xffe27cf3d664f9e7,0xbfeca0f73c72f810,4
+np.float64,0xffc34152352682a4,0x3fef384e564c002c,4
+np.float64,0xffe395728ba72ae4,0x3f8fe18c2de86eba,4
+np.float64,0xffed86c4fbbb0d89,0x3fef709db881c672,4
+np.float64,0xffe8a98d37f1531a,0x3fd4879c8f73c3dc,4
+np.float64,0xffb8ce9fea319d40,0xbfb853c8fe46b08d,4
+np.float64,0xffe7f26db8efe4db,0xbfec1cfd3e5c2ac1,4
+np.float64,0xffd7935b77af26b6,0x3fb7368c89b2a460,4
+np.float64,0xffc5840ed02b081c,0x3fd92220b56631f3,4
+np.float64,0xffc36a873926d510,0x3fa84d61baf61811,4
+np.float64,0xffe06ea583e0dd4a,0x3feb647e348b9e39,4
+np.float64,0xffe6a33031ed4660,0xbfe096b851dc1a0a,4
+np.float64,0xffe001c938e00392,0x3fe4eece77623e7a,4
+np.float64,0xffc1e4f23b23c9e4,0xbfdb9bb1f83f6ac4,4
+np.float64,0xffecd3ecbab9a7d9,0x3fbafb1f800f177d,4
+np.float64,0xffc2d3016825a604,0xbfef650e8b0d6afb,4
+np.float64,0xffe222cb68e44596,0x3fde3690e44de5bd,4
+np.float64,0xffe5bb145e2b7628,0x3fedbb98e23c9dc1,4
+np.float64,0xffe9e5823b73cb04,0xbfee41661016c03c,4
+np.float64,0xffd234a00ba46940,0x3fda0312cda580c2,4
+np.float64,0xffe0913ed6e1227d,0xbfed508bb529bd23,4
+np.float64,0xffe8e3596171c6b2,0xbfdc33e1c1d0310e,4
+np.float64,0xffef9c6835ff38cf,0x3fea8ce6d27dfba3,4
+np.float64,0xffdd3bcf66ba779e,0x3fe50523d2b6470e,4
+np.float64,0xffe57e8cf06afd1a,0xbfee600933347247,4
+np.float64,0xffe0d8c65fa1b18c,0x3fe75091f93d5e4c,4
+np.float64,0xffea7c8c16b4f918,0x3fee681724795198,4
+np.float64,0xffe34f7a05269ef4,0xbfe3c3e179676f13,4
+np.float64,0xffd28894a6a5112a,0xbfe5d1027aee615d,4
+np.float64,0xffc73be6f22e77cc,0x3fe469bbc08b472a,4
+np.float64,0xffe7f71b066fee36,0x3fe7ed136c8fdfaa,4
+np.float64,0xffebc13e29f7827c,0x3fefcdc6e677d314,4
+np.float64,0xffd53e9c942a7d3a,0x3fea5a02c7341749,4
+np.float64,0xffd7191b23ae3236,0x3fea419b66023443,4
+np.float64,0xffe9480325b29006,0xbfefeaff5fa38cd5,4
+np.float64,0xffba46dc0e348db8,0xbfefa54f4de28eba,4
+np.float64,0xffdd4cc31eba9986,0x3fe60bb41fe1c4da,4
+np.float64,0xffe13a70dea274e1,0xbfaa9192f7bd6c9b,4
+np.float64,0xffde25127bbc4a24,0x3f7c75f45e29be7d,4
+np.float64,0xffe4076543a80eca,0x3fea5aad50d2f687,4
+np.float64,0xffe61512acec2a25,0xbfefffeb67401649,4
+np.float64,0xffef812ec1ff025d,0xbfe919c7c073c766,4
+np.float64,0xffd5552aeaaaaa56,0x3fc89d38ab047396,4
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-cosh.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-cosh.csv
new file mode 100644
index 00000000..c9e446c3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-cosh.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0xfe0ac238,0x7f800000,3
+np.float32,0xbf553b86,0x3faf079b,3
+np.float32,0xff4457da,0x7f800000,3
+np.float32,0xff7253f3,0x7f800000,3
+np.float32,0x5a5802,0x3f800000,3
+np.float32,0x3db03413,0x3f80795b,3
+np.float32,0x7f6795c9,0x7f800000,3
+np.float32,0x805b9142,0x3f800000,3
+np.float32,0xfeea581a,0x7f800000,3
+np.float32,0x3f7e2dba,0x3fc472f6,3
+np.float32,0x3d9c4d74,0x3f805f7a,3
+np.float32,0x7f18c665,0x7f800000,3
+np.float32,0x7f003e23,0x7f800000,3
+np.float32,0x3d936fa0,0x3f8054f3,3
+np.float32,0x3f32034f,0x3fa0368e,3
+np.float32,0xff087604,0x7f800000,3
+np.float32,0x380a5,0x3f800000,3
+np.float32,0x3f59694e,0x3fb10077,3
+np.float32,0x3e63e648,0x3f832ee4,3
+np.float32,0x80712f42,0x3f800000,3
+np.float32,0x3e169908,0x3f816302,3
+np.float32,0x3f2d766e,0x3f9e8692,3
+np.float32,0x3d6412e0,0x3f8032d0,3
+np.float32,0xbde689e8,0x3f80cfd4,3
+np.float32,0x483e2e,0x3f800000,3
+np.float32,0xff1ba2d0,0x7f800000,3
+np.float32,0x80136bff,0x3f800000,3
+np.float32,0x3f72534c,0x3fbdc1d4,3
+np.float32,0x3e9eb381,0x3f8632c6,3
+np.float32,0x3e142892,0x3f815795,3
+np.float32,0x0,0x3f800000,3
+np.float32,0x2f2528,0x3f800000,3
+np.float32,0x7f38be13,0x7f800000,3
+np.float32,0xfeee6896,0x7f800000,3
+np.float32,0x7f09095d,0x7f800000,3
+np.float32,0xbe94d,0x3f800000,3
+np.float32,0xbedcf8d4,0x3f8c1b74,3
+np.float32,0xbf694c02,0x3fb8ef07,3
+np.float32,0x3e2261f8,0x3f819cde,3
+np.float32,0xbf01d3ce,0x3f90d0e0,3
+np.float32,0xbeb7b3a2,0x3f8853de,3
+np.float32,0x8046de7b,0x3f800000,3
+np.float32,0xbcb45ea0,0x3f8007f1,3
+np.float32,0x3eef14af,0x3f8e35dd,3
+np.float32,0xbf047316,0x3f91846e,3
+np.float32,0x801cef45,0x3f800000,3
+np.float32,0x3e9ad891,0x3f85e609,3
+np.float32,0xff20e9cf,0x7f800000,3
+np.float32,0x80068434,0x3f800000,3
+np.float32,0xbe253020,0x3f81ab49,3
+np.float32,0x3f13f4b8,0x3f95fac9,3
+np.float32,0x804accd1,0x3f800000,3
+np.float32,0x3dee3e10,0x3f80ddf7,3
+np.float32,0xbe6c4690,0x3f836c29,3
+np.float32,0xff30d431,0x7f800000,3
+np.float32,0xbec82416,0x3f89e791,3
+np.float32,0x3f30bbcb,0x3f9fbbcc,3
+np.float32,0x3f5620a2,0x3faf72b8,3
+np.float32,0x807a8130,0x3f800000,3
+np.float32,0x3e3cb02d,0x3f822de0,3
+np.float32,0xff4839ac,0x7f800000,3
+np.float32,0x800a3e9c,0x3f800000,3
+np.float32,0x3dffd65b,0x3f810002,3
+np.float32,0xbf2b1492,0x3f9da987,3
+np.float32,0xbf21602c,0x3f9a48fe,3
+np.float32,0x512531,0x3f800000,3
+np.float32,0x24b99a,0x3f800000,3
+np.float32,0xbf53e345,0x3fae67b1,3
+np.float32,0xff2126ec,0x7f800000,3
+np.float32,0x7e79b49d,0x7f800000,3
+np.float32,0x3ea3cf04,0x3f869b6f,3
+np.float32,0x7f270059,0x7f800000,3
+np.float32,0x3f625b2f,0x3fb561e1,3
+np.float32,0xbf59947e,0x3fb11519,3
+np.float32,0xfe0d1c64,0x7f800000,3
+np.float32,0xbf3f3eae,0x3fa568e2,3
+np.float32,0x7c04d1,0x3f800000,3
+np.float32,0x7e66bd,0x3f800000,3
+np.float32,0x8011880d,0x3f800000,3
+np.float32,0x3f302f07,0x3f9f8759,3
+np.float32,0x4e3375,0x3f800000,3
+np.float32,0xfe67a134,0x7f800000,3
+np.float32,0xff670249,0x7f800000,3
+np.float32,0x7e19f27d,0x7f800000,3
+np.float32,0xbf36ce12,0x3fa20b81,3
+np.float32,0xbe6bcfc4,0x3f8368b5,3
+np.float32,0x76fcba,0x3f800000,3
+np.float32,0x7f30abaf,0x7f800000,3
+np.float32,0x3f4c1f6d,0x3faae43c,3
+np.float32,0x7f61f44a,0x7f800000,3
+np.float32,0xbf4bb3c9,0x3faab4af,3
+np.float32,0xbda15ee0,0x3f8065c6,3
+np.float32,0xfbb4e800,0x7f800000,3
+np.float32,0x7fa00000,0x7fe00000,3
+np.float32,0x80568501,0x3f800000,3
+np.float32,0xfeb285e4,0x7f800000,3
+np.float32,0x804423a7,0x3f800000,3
+np.float32,0x7e6c0f21,0x7f800000,3
+np.float32,0x7f136b3c,0x7f800000,3
+np.float32,0x3f2d08e6,0x3f9e5e9c,3
+np.float32,0xbf6b454e,0x3fb9f7e6,3
+np.float32,0x3e6bceb0,0x3f8368ad,3
+np.float32,0xff1ad16a,0x7f800000,3
+np.float32,0x7cce1a04,0x7f800000,3
+np.float32,0xff7bcf95,0x7f800000,3
+np.float32,0x8049788d,0x3f800000,3
+np.float32,0x7ec45918,0x7f800000,3
+np.float32,0xff7fffff,0x7f800000,3
+np.float32,0x8039a1a0,0x3f800000,3
+np.float32,0x7e90cd72,0x7f800000,3
+np.float32,0xbf7dfd53,0x3fc456cc,3
+np.float32,0x3eeeb664,0x3f8e2a76,3
+np.float32,0x8055ef9b,0x3f800000,3
+np.float32,0x7ee06ddd,0x7f800000,3
+np.float32,0xba2cc000,0x3f800002,3
+np.float32,0x806da632,0x3f800000,3
+np.float32,0x7ecfaaf5,0x7f800000,3
+np.float32,0x3ddd12e6,0x3f80bf19,3
+np.float32,0xbf754394,0x3fbf60b1,3
+np.float32,0x6f3f19,0x3f800000,3
+np.float32,0x800a9af0,0x3f800000,3
+np.float32,0xfeef13ea,0x7f800000,3
+np.float32,0x7f74841f,0x7f800000,3
+np.float32,0xbeb9a2f0,0x3f888181,3
+np.float32,0x77cbb,0x3f800000,3
+np.float32,0xbf587f84,0x3fb0911b,3
+np.float32,0x210ba5,0x3f800000,3
+np.float32,0x3ee60a28,0x3f8d2367,3
+np.float32,0xbe3731ac,0x3f820dc7,3
+np.float32,0xbee8cfee,0x3f8d765e,3
+np.float32,0x7b2ef179,0x7f800000,3
+np.float32,0xfe81377c,0x7f800000,3
+np.float32,0x6ac98c,0x3f800000,3
+np.float32,0x3f51f144,0x3fad8288,3
+np.float32,0x80785750,0x3f800000,3
+np.float32,0x3f46615a,0x3fa864ff,3
+np.float32,0xbf35ac9e,0x3fa19b8e,3
+np.float32,0x7f0982ac,0x7f800000,3
+np.float32,0x1b2610,0x3f800000,3
+np.float32,0x3ed8bb25,0x3f8ba3df,3
+np.float32,0xbeb41bac,0x3f88006d,3
+np.float32,0xff48e89d,0x7f800000,3
+np.float32,0x3ed0ab8c,0x3f8ac755,3
+np.float32,0xbe64671c,0x3f833282,3
+np.float32,0x64bce4,0x3f800000,3
+np.float32,0x284f79,0x3f800000,3
+np.float32,0x7e09faa7,0x7f800000,3
+np.float32,0x4376c1,0x3f800000,3
+np.float32,0x805ca8c0,0x3f800000,3
+np.float32,0xff0859d5,0x7f800000,3
+np.float32,0xbed2f3b2,0x3f8b04dd,3
+np.float32,0x8045bd0c,0x3f800000,3
+np.float32,0x3f0e6216,0x3f94503f,3
+np.float32,0x3f41e3ae,0x3fa68035,3
+np.float32,0x80088ccc,0x3f800000,3
+np.float32,0x3f37fc19,0x3fa2812f,3
+np.float32,0x71c87d,0x3f800000,3
+np.float32,0x8024f4b2,0x3f800000,3
+np.float32,0xff78dd88,0x7f800000,3
+np.float32,0xbda66c90,0x3f806c40,3
+np.float32,0x7f33ef0d,0x7f800000,3
+np.float32,0x46a343,0x3f800000,3
+np.float32,0xff1dce38,0x7f800000,3
+np.float32,0x1b935d,0x3f800000,3
+np.float32,0x3ebec598,0x3f88fd0e,3
+np.float32,0xff115530,0x7f800000,3
+np.float32,0x803916aa,0x3f800000,3
+np.float32,0xff60a3e2,0x7f800000,3
+np.float32,0x3b8ddd48,0x3f80004f,3
+np.float32,0x3f761b6e,0x3fbfd8ea,3
+np.float32,0xbdf55b88,0x3f80eb70,3
+np.float32,0x37374,0x3f800000,3
+np.float32,0x3de150e0,0x3f80c682,3
+np.float32,0x3f343278,0x3fa10a83,3
+np.float32,0xbe9baefa,0x3f85f68b,3
+np.float32,0x3d8d43,0x3f800000,3
+np.float32,0x3e80994b,0x3f840f0c,3
+np.float32,0xbe573c6c,0x3f82d685,3
+np.float32,0x805b83b4,0x3f800000,3
+np.float32,0x683d88,0x3f800000,3
+np.float32,0x692465,0x3f800000,3
+np.float32,0xbdc345f8,0x3f809511,3
+np.float32,0x3f7c1c5a,0x3fc3406f,3
+np.float32,0xbf40bef3,0x3fa606df,3
+np.float32,0xff1e25b9,0x7f800000,3
+np.float32,0x3e4481e0,0x3f825d37,3
+np.float32,0x75d188,0x3f800000,3
+np.float32,0x3ea53cec,0x3f86b956,3
+np.float32,0xff105a54,0x7f800000,3
+np.float32,0x7f800000,0x7f800000,3
+np.float32,0x7f11f0b0,0x7f800000,3
+np.float32,0xbf58a57d,0x3fb0a328,3
+np.float32,0xbdd11e38,0x3f80aaf8,3
+np.float32,0xbea94adc,0x3f870fa0,3
+np.float32,0x3e9dd780,0x3f862180,3
+np.float32,0xff1786b9,0x7f800000,3
+np.float32,0xfec46aa2,0x7f800000,3
+np.float32,0x7f4300c1,0x7f800000,3
+np.float32,0x29ba2b,0x3f800000,3
+np.float32,0x3f4112e2,0x3fa62993,3
+np.float32,0xbe6c9224,0x3f836e5d,3
+np.float32,0x7f0e42a3,0x7f800000,3
+np.float32,0xff6390ad,0x7f800000,3
+np.float32,0x3f54e374,0x3faede94,3
+np.float32,0x7f2642a2,0x7f800000,3
+np.float32,0x7f46b2be,0x7f800000,3
+np.float32,0xfe59095c,0x7f800000,3
+np.float32,0x7146a0,0x3f800000,3
+np.float32,0x3f07763d,0x3f925786,3
+np.float32,0x3d172780,0x3f801651,3
+np.float32,0xff66f1c5,0x7f800000,3
+np.float32,0xff025349,0x7f800000,3
+np.float32,0x6ce99d,0x3f800000,3
+np.float32,0xbf7e4f50,0x3fc48685,3
+np.float32,0xbeff8ca2,0x3f904708,3
+np.float32,0x3e6c8,0x3f800000,3
+np.float32,0x7f7153dc,0x7f800000,3
+np.float32,0xbedcf612,0x3f8c1b26,3
+np.float32,0xbbc2f180,0x3f800094,3
+np.float32,0xbf397399,0x3fa314b8,3
+np.float32,0x6c6e35,0x3f800000,3
+np.float32,0x7f50a88b,0x7f800000,3
+np.float32,0xfe84093e,0x7f800000,3
+np.float32,0x3f737b9d,0x3fbe6478,3
+np.float32,0x7f6a5340,0x7f800000,3
+np.float32,0xbde83c20,0x3f80d2e7,3
+np.float32,0xff769ce9,0x7f800000,3
+np.float32,0xfdd33c30,0x7f800000,3
+np.float32,0xbc95cb60,0x3f80057a,3
+np.float32,0x8007a40d,0x3f800000,3
+np.float32,0x3f55d90c,0x3faf5132,3
+np.float32,0x80282082,0x3f800000,3
+np.float32,0xbf43b1f2,0x3fa7418c,3
+np.float32,0x3f1dc7cb,0x3f991731,3
+np.float32,0xbd4346a0,0x3f80253f,3
+np.float32,0xbf5aa82a,0x3fb19946,3
+np.float32,0x3f4b8c22,0x3faaa333,3
+np.float32,0x3d13468c,0x3f80152f,3
+np.float32,0x7db77097,0x7f800000,3
+np.float32,0x4a00df,0x3f800000,3
+np.float32,0xbedea5e0,0x3f8c4b64,3
+np.float32,0x80482543,0x3f800000,3
+np.float32,0xbef344fe,0x3f8eb8dd,3
+np.float32,0x7ebd4044,0x7f800000,3
+np.float32,0xbf512c0e,0x3fad287e,3
+np.float32,0x3db28cce,0x3f807c9c,3
+np.float32,0xbd0f5ae0,0x3f801412,3
+np.float32,0xfe7ed9ac,0x7f800000,3
+np.float32,0x3eb1aa82,0x3f87c8b4,3
+np.float32,0xfef1679e,0x7f800000,3
+np.float32,0xff3629f2,0x7f800000,3
+np.float32,0xff3562b4,0x7f800000,3
+np.float32,0x3dcafe1d,0x3f80a118,3
+np.float32,0xfedf242a,0x7f800000,3
+np.float32,0xbf43102a,0x3fa6fda4,3
+np.float32,0x8028834e,0x3f800000,3
+np.float32,0x805c8513,0x3f800000,3
+np.float32,0x3f59306a,0x3fb0e550,3
+np.float32,0x3eda2c9c,0x3f8bcc4a,3
+np.float32,0x80023524,0x3f800000,3
+np.float32,0x7ef72879,0x7f800000,3
+np.float32,0x661c8a,0x3f800000,3
+np.float32,0xfec3ba6c,0x7f800000,3
+np.float32,0x805aaca6,0x3f800000,3
+np.float32,0xff5c1f13,0x7f800000,3
+np.float32,0x3f6ab3f4,0x3fb9ab6b,3
+np.float32,0x3f014896,0x3f90ac20,3
+np.float32,0x3f030584,0x3f91222a,3
+np.float32,0xbf74853d,0x3fbef71d,3
+np.float32,0xbf534ee0,0x3fae2323,3
+np.float32,0x2c90c3,0x3f800000,3
+np.float32,0x7f62ad25,0x7f800000,3
+np.float32,0x1c8847,0x3f800000,3
+np.float32,0x7e2a8d43,0x7f800000,3
+np.float32,0x807a09cd,0x3f800000,3
+np.float32,0x413871,0x3f800000,3
+np.float32,0x80063692,0x3f800000,3
+np.float32,0x3edaf29b,0x3f8be211,3
+np.float32,0xbf64a7ab,0x3fb68b2d,3
+np.float32,0xfe56a720,0x7f800000,3
+np.float32,0xbf54a8d4,0x3faec350,3
+np.float32,0x3ecbaef7,0x3f8a4350,3
+np.float32,0x3f413714,0x3fa63890,3
+np.float32,0x7d3aa8,0x3f800000,3
+np.float32,0xbea9a13c,0x3f8716e7,3
+np.float32,0x7ef7553e,0x7f800000,3
+np.float32,0x8056f29f,0x3f800000,3
+np.float32,0xff1f7ffe,0x7f800000,3
+np.float32,0x3f41953b,0x3fa65f9c,3
+np.float32,0x3daa2f,0x3f800000,3
+np.float32,0xff0893e4,0x7f800000,3
+np.float32,0xbefc7ec6,0x3f8fe207,3
+np.float32,0xbb026800,0x3f800011,3
+np.float32,0x341e4f,0x3f800000,3
+np.float32,0x3e7b708a,0x3f83e0d1,3
+np.float32,0xa18cb,0x3f800000,3
+np.float32,0x7e290239,0x7f800000,3
+np.float32,0xbf4254f2,0x3fa6af62,3
+np.float32,0x80000000,0x3f800000,3
+np.float32,0x3f0a6c,0x3f800000,3
+np.float32,0xbec44d28,0x3f898609,3
+np.float32,0xf841f,0x3f800000,3
+np.float32,0x7f01a693,0x7f800000,3
+np.float32,0x8053340b,0x3f800000,3
+np.float32,0xfd4e7990,0x7f800000,3
+np.float32,0xbf782f1f,0x3fc10356,3
+np.float32,0xbe962118,0x3f858acc,3
+np.float32,0xfe8cd702,0x7f800000,3
+np.float32,0x7ecd986f,0x7f800000,3
+np.float32,0x3ebe775f,0x3f88f59b,3
+np.float32,0x8065524f,0x3f800000,3
+np.float32,0x3ede7fc4,0x3f8c471e,3
+np.float32,0x7f5e15ea,0x7f800000,3
+np.float32,0xbe871ada,0x3f847b78,3
+np.float32,0x3f21958b,0x3f9a5af7,3
+np.float32,0x3f64d480,0x3fb6a1fa,3
+np.float32,0xff18b0e9,0x7f800000,3
+np.float32,0xbf0840dd,0x3f928fd9,3
+np.float32,0x80104f5d,0x3f800000,3
+np.float32,0x643b94,0x3f800000,3
+np.float32,0xbc560a80,0x3f8002cc,3
+np.float32,0x3f5c75d6,0x3fb2786e,3
+np.float32,0x7f365fc9,0x7f800000,3
+np.float32,0x54e965,0x3f800000,3
+np.float32,0x6dcd4d,0x3f800000,3
+np.float32,0x3f2057a0,0x3f99f04d,3
+np.float32,0x272fa3,0x3f800000,3
+np.float32,0xff423dc9,0x7f800000,3
+np.float32,0x80273463,0x3f800000,3
+np.float32,0xfe21cc78,0x7f800000,3
+np.float32,0x7fc00000,0x7fc00000,3
+np.float32,0x802feb65,0x3f800000,3
+np.float32,0x3dc733d0,0x3f809b21,3
+np.float32,0x65d56b,0x3f800000,3
+np.float32,0x80351d8e,0x3f800000,3
+np.float32,0xbf244247,0x3f9b43dd,3
+np.float32,0x7f328e7e,0x7f800000,3
+np.float32,0x7f4d9712,0x7f800000,3
+np.float32,0x2c505d,0x3f800000,3
+np.float32,0xbf232ebe,0x3f9ae5a0,3
+np.float32,0x804a363a,0x3f800000,3
+np.float32,0x80417102,0x3f800000,3
+np.float32,0xbf48b170,0x3fa963d4,3
+np.float32,0x7ea3e3b6,0x7f800000,3
+np.float32,0xbf41415b,0x3fa63cd2,3
+np.float32,0xfe3af7c8,0x7f800000,3
+np.float32,0x7f478010,0x7f800000,3
+np.float32,0x80143113,0x3f800000,3
+np.float32,0x3f7626a7,0x3fbfdf2e,3
+np.float32,0xfea20b0a,0x7f800000,3
+np.float32,0x80144d64,0x3f800000,3
+np.float32,0x7db9ba47,0x7f800000,3
+np.float32,0x7f7fffff,0x7f800000,3
+np.float32,0xbe410834,0x3f8247ef,3
+np.float32,0x14a7af,0x3f800000,3
+np.float32,0x7eaebf9e,0x7f800000,3
+np.float32,0xff800000,0x7f800000,3
+np.float32,0x3f0a7d8e,0x3f9330fd,3
+np.float32,0x3ef780,0x3f800000,3
+np.float32,0x3f62253e,0x3fb546d1,3
+np.float32,0x3f4cbeac,0x3fab2acc,3
+np.float32,0x25db1,0x3f800000,3
+np.float32,0x65c54a,0x3f800000,3
+np.float32,0x800f0645,0x3f800000,3
+np.float32,0x3ed28c78,0x3f8af9f0,3
+np.float32,0x8040c6ce,0x3f800000,3
+np.float32,0x5e4e9a,0x3f800000,3
+np.float32,0xbd3fd2b0,0x3f8023f1,3
+np.float32,0xbf5d2d3f,0x3fb2d1b6,3
+np.float32,0x7ead999f,0x7f800000,3
+np.float32,0xbf30dc86,0x3f9fc805,3
+np.float32,0xff2b0a62,0x7f800000,3
+np.float32,0x3d5180e9,0x3f802adf,3
+np.float32,0x3f62716f,0x3fb56d0d,3
+np.float32,0x7e82ae9c,0x7f800000,3
+np.float32,0xfe2d4bdc,0x7f800000,3
+np.float32,0x805cc7d4,0x3f800000,3
+np.float32,0xfb50f700,0x7f800000,3
+np.float32,0xff57b684,0x7f800000,3
+np.float32,0x80344f01,0x3f800000,3
+np.float32,0x7f2af372,0x7f800000,3
+np.float32,0xfeab6204,0x7f800000,3
+np.float32,0x30b251,0x3f800000,3
+np.float32,0x3eed8cc4,0x3f8e0698,3
+np.float32,0x7eeb1c6a,0x7f800000,3
+np.float32,0x3f17ece6,0x3f9735b0,3
+np.float32,0x21e985,0x3f800000,3
+np.float32,0x3f3a7df3,0x3fa37e34,3
+np.float32,0x802a14a2,0x3f800000,3
+np.float32,0x807d4d5b,0x3f800000,3
+np.float32,0x7f6093ce,0x7f800000,3
+np.float32,0x3f800000,0x3fc583ab,3
+np.float32,0x3da2c26e,0x3f806789,3
+np.float32,0xfe05f278,0x7f800000,3
+np.float32,0x800000,0x3f800000,3
+np.float32,0xbee63342,0x3f8d282e,3
+np.float32,0xbf225586,0x3f9a9bd4,3
+np.float32,0xbed60e86,0x3f8b59ba,3
+np.float32,0xbec99484,0x3f8a0ca3,3
+np.float32,0x3e967c71,0x3f859199,3
+np.float32,0x7f26ab62,0x7f800000,3
+np.float32,0xca7f4,0x3f800000,3
+np.float32,0xbf543790,0x3fae8ebc,3
+np.float32,0x3e4c1ed9,0x3f828d2d,3
+np.float32,0xbdf37f88,0x3f80e7e1,3
+np.float32,0xff0cc44e,0x7f800000,3
+np.float32,0x5dea48,0x3f800000,3
+np.float32,0x31023c,0x3f800000,3
+np.float32,0x3ea10733,0x3f866208,3
+np.float32,0x3e11e6f2,0x3f814d2e,3
+np.float32,0x80641960,0x3f800000,3
+np.float32,0x3ef779a8,0x3f8f3edb,3
+np.float32,0x3f2a5062,0x3f9d632a,3
+np.float32,0x2b7d34,0x3f800000,3
+np.float32,0x3eeb95c5,0x3f8dca67,3
+np.float32,0x805c1357,0x3f800000,3
+np.float32,0x3db3a79d,0x3f807e29,3
+np.float32,0xfded1900,0x7f800000,3
+np.float32,0x45f362,0x3f800000,3
+np.float32,0x451f38,0x3f800000,3
+np.float32,0x801d3ae5,0x3f800000,3
+np.float32,0x458d45,0x3f800000,3
+np.float32,0xfda9d298,0x7f800000,3
+np.float32,0x467439,0x3f800000,3
+np.float32,0x7f66554a,0x7f800000,3
+np.float32,0xfef2375a,0x7f800000,3
+np.float32,0xbf33fc47,0x3fa0f5d7,3
+np.float32,0x3f75ba69,0x3fbfa2d0,3
+np.float32,0xfeb625b2,0x7f800000,3
+np.float32,0x8066b371,0x3f800000,3
+np.float32,0x3f5cb4e9,0x3fb29718,3
+np.float32,0x7f3b6a58,0x7f800000,3
+np.float32,0x7f6b35ea,0x7f800000,3
+np.float32,0xbf6ee555,0x3fbbe5be,3
+np.float32,0x3d836e21,0x3f804380,3
+np.float32,0xff43cd0c,0x7f800000,3
+np.float32,0xff55c1fa,0x7f800000,3
+np.float32,0xbf0dfccc,0x3f9432a6,3
+np.float32,0x3ed92121,0x3f8baf00,3
+np.float32,0x80068cc1,0x3f800000,3
+np.float32,0xff0103f9,0x7f800000,3
+np.float32,0x7e51b175,0x7f800000,3
+np.float32,0x8012f214,0x3f800000,3
+np.float32,0x62d298,0x3f800000,3
+np.float32,0xbf3e1525,0x3fa4ef8d,3
+np.float32,0x806b4882,0x3f800000,3
+np.float32,0xbf38c146,0x3fa2ce7c,3
+np.float32,0xbed59c30,0x3f8b4d70,3
+np.float32,0x3d1910c0,0x3f8016e2,3
+np.float32,0x7f33d55b,0x7f800000,3
+np.float32,0x7f5800e3,0x7f800000,3
+np.float32,0x5b2c5d,0x3f800000,3
+np.float32,0x807be750,0x3f800000,3
+np.float32,0x7eb297c1,0x7f800000,3
+np.float32,0x7dafee62,0x7f800000,3
+np.float32,0x7d9e23f0,0x7f800000,3
+np.float32,0x3e580537,0x3f82dbd8,3
+np.float32,0xbf800000,0x3fc583ab,3
+np.float32,0x7f40f880,0x7f800000,3
+np.float32,0x775ad3,0x3f800000,3
+np.float32,0xbedacd36,0x3f8bddf3,3
+np.float32,0x2138f6,0x3f800000,3
+np.float32,0x52c3b7,0x3f800000,3
+np.float32,0x8041cfdd,0x3f800000,3
+np.float32,0x7bf16791,0x7f800000,3
+np.float32,0xbe95869c,0x3f857f55,3
+np.float32,0xbf199796,0x3f97bcaf,3
+np.float32,0x3ef8da38,0x3f8f6b45,3
+np.float32,0x803f3648,0x3f800000,3
+np.float32,0x80026fd2,0x3f800000,3
+np.float32,0x7eb3ac26,0x7f800000,3
+np.float32,0x3e49921b,0x3f827ce8,3
+np.float32,0xbf689aed,0x3fb892de,3
+np.float32,0x3f253509,0x3f9b9779,3
+np.float32,0xff17894a,0x7f800000,3
+np.float32,0x3cd12639,0x3f800aae,3
+np.float32,0x1db14b,0x3f800000,3
+np.float32,0x39a0bf,0x3f800000,3
+np.float32,0xfdfe1d08,0x7f800000,3
+np.float32,0xff416cd2,0x7f800000,3
+np.float32,0x8070d818,0x3f800000,3
+np.float32,0x3e516e12,0x3f82afb8,3
+np.float32,0x80536651,0x3f800000,3
+np.float32,0xbf2903d2,0x3f9cecb7,3
+np.float32,0x3e896ae4,0x3f84a353,3
+np.float32,0xbd6ba2c0,0x3f80363d,3
+np.float32,0x80126d3e,0x3f800000,3
+np.float32,0xfd9d43d0,0x7f800000,3
+np.float32,0x7b56b6,0x3f800000,3
+np.float32,0xff04718e,0x7f800000,3
+np.float32,0x31440f,0x3f800000,3
+np.float32,0xbf7a1313,0x3fc215c9,3
+np.float32,0x7f43d6a0,0x7f800000,3
+np.float32,0x3f566503,0x3faf92cc,3
+np.float32,0xbf39eb0e,0x3fa343f1,3
+np.float32,0xbe35fd70,0x3f8206df,3
+np.float32,0x800c36ac,0x3f800000,3
+np.float32,0x60d061,0x3f800000,3
+np.float32,0x80453e12,0x3f800000,3
+np.float32,0xfe17c36c,0x7f800000,3
+np.float32,0x3d8c72,0x3f800000,3
+np.float32,0xfe8e9134,0x7f800000,3
+np.float32,0xff5d89de,0x7f800000,3
+np.float32,0x7f45020e,0x7f800000,3
+np.float32,0x3f28225e,0x3f9c9d01,3
+np.float32,0xbf3b6900,0x3fa3dbdd,3
+np.float32,0x80349023,0x3f800000,3
+np.float32,0xbf14d780,0x3f964042,3
+np.float32,0x3f56b5d2,0x3fafb8c3,3
+np.float32,0x800c639c,0x3f800000,3
+np.float32,0x7f7a19c8,0x7f800000,3
+np.float32,0xbf7a0815,0x3fc20f86,3
+np.float32,0xbec55926,0x3f89a06e,3
+np.float32,0x4b2cd2,0x3f800000,3
+np.float32,0xbf271eb2,0x3f9c41c8,3
+np.float32,0xff26e168,0x7f800000,3
+np.float32,0x800166b2,0x3f800000,3
+np.float32,0xbde97e38,0x3f80d532,3
+np.float32,0xbf1f93ec,0x3f99af1a,3
+np.float32,0x7f2896ed,0x7f800000,3
+np.float32,0x3da7d96d,0x3f806e1d,3
+np.float32,0x802b7237,0x3f800000,3
+np.float32,0xfdca6bc0,0x7f800000,3
+np.float32,0xbed2e300,0x3f8b0318,3
+np.float32,0x8079d9e8,0x3f800000,3
+np.float32,0x3f388c81,0x3fa2b9c2,3
+np.float32,0x3ed2607c,0x3f8af54a,3
+np.float32,0xff287de6,0x7f800000,3
+np.float32,0x3f55ed89,0x3faf5ac9,3
+np.float32,0x7f5b6af7,0x7f800000,3
+np.float32,0xbeb24730,0x3f87d698,3
+np.float32,0x1,0x3f800000,3
+np.float32,0x3f3a2350,0x3fa35a3b,3
+np.float32,0x8013b422,0x3f800000,3
+np.float32,0x3e9a6560,0x3f85dd35,3
+np.float32,0x80510631,0x3f800000,3
+np.float32,0xfeae39d6,0x7f800000,3
+np.float32,0x7eb437ad,0x7f800000,3
+np.float32,0x8047545b,0x3f800000,3
+np.float32,0x806a1c71,0x3f800000,3
+np.float32,0xbe5543f0,0x3f82c93b,3
+np.float32,0x40e8d,0x3f800000,3
+np.float32,0x63d18b,0x3f800000,3
+np.float32,0x1fa1ea,0x3f800000,3
+np.float32,0x801944e0,0x3f800000,3
+np.float32,0xbf4c7ac6,0x3fab0cae,3
+np.float32,0x7f2679d4,0x7f800000,3
+np.float32,0x3f0102fc,0x3f9099d0,3
+np.float32,0x7e44bdc1,0x7f800000,3
+np.float32,0xbf2072f6,0x3f99f970,3
+np.float32,0x5c7d38,0x3f800000,3
+np.float32,0x30a2e6,0x3f800000,3
+np.float32,0x805b9ca3,0x3f800000,3
+np.float32,0x7cc24ad5,0x7f800000,3
+np.float32,0x3f4f7920,0x3fac6357,3
+np.float32,0x111d62,0x3f800000,3
+np.float32,0xbf4de40a,0x3fabad77,3
+np.float32,0x805d0354,0x3f800000,3
+np.float32,0xbb3d2b00,0x3f800023,3
+np.float32,0x3ef229e7,0x3f8e960b,3
+np.float32,0x3f15754e,0x3f9670e0,3
+np.float32,0xbf689c6b,0x3fb893a5,3
+np.float32,0xbf3796c6,0x3fa2599b,3
+np.float32,0xbe95303c,0x3f8578f2,3
+np.float32,0xfee330de,0x7f800000,3
+np.float32,0xff0d9705,0x7f800000,3
+np.float32,0xbeb0ebd0,0x3f87b7dd,3
+np.float32,0xbf4d5a13,0x3fab6fe7,3
+np.float32,0x80142f5a,0x3f800000,3
+np.float32,0x7e01a87b,0x7f800000,3
+np.float32,0xbe45e5ec,0x3f8265d7,3
+np.float32,0x7f4ac255,0x7f800000,3
+np.float32,0x3ebf6a60,0x3f890ccb,3
+np.float32,0x7f771e16,0x7f800000,3
+np.float32,0x3f41834e,0x3fa6582b,3
+np.float32,0x3f7f6f98,0x3fc52ef0,3
+np.float32,0x7e4ad775,0x7f800000,3
+np.float32,0x3eb39991,0x3f87f4c4,3
+np.float32,0x1e3f4,0x3f800000,3
+np.float32,0x7e84ba19,0x7f800000,3
+np.float32,0x80640be4,0x3f800000,3
+np.float32,0x3f459fc8,0x3fa81272,3
+np.float32,0x3f554ed0,0x3faf109b,3
+np.float32,0x3c6617,0x3f800000,3
+np.float32,0x7f441158,0x7f800000,3
+np.float32,0x7f66e6d8,0x7f800000,3
+np.float32,0x7f565152,0x7f800000,3
+np.float32,0x7f16d550,0x7f800000,3
+np.float32,0xbd4f1950,0x3f8029e5,3
+np.float32,0xcf722,0x3f800000,3
+np.float32,0x3f37d6fd,0x3fa272ad,3
+np.float32,0xff7324ea,0x7f800000,3
+np.float32,0x804bc246,0x3f800000,3
+np.float32,0x7f099ef8,0x7f800000,3
+np.float32,0x5f838b,0x3f800000,3
+np.float32,0x80523534,0x3f800000,3
+np.float32,0x3f595e84,0x3fb0fb50,3
+np.float32,0xfdef8ac8,0x7f800000,3
+np.float32,0x3d9a07,0x3f800000,3
+np.float32,0x410f61,0x3f800000,3
+np.float32,0xbf715dbb,0x3fbd3bcb,3
+np.float32,0xbedd4734,0x3f8c242f,3
+np.float32,0x7e86739a,0x7f800000,3
+np.float32,0x3e81f144,0x3f8424fe,3
+np.float32,0x7f6342d1,0x7f800000,3
+np.float32,0xff6919a3,0x7f800000,3
+np.float32,0xff051878,0x7f800000,3
+np.float32,0x800ba28f,0x3f800000,3
+np.float32,0xfefab3d8,0x7f800000,3
+np.float32,0xff612a84,0x7f800000,3
+np.float32,0x800cd5ab,0x3f800000,3
+np.float32,0x802a07ae,0x3f800000,3
+np.float32,0xfef6ee3a,0x7f800000,3
+np.float32,0x8037e896,0x3f800000,3
+np.float32,0x3ef2d86f,0x3f8eab7d,3
+np.float32,0x3eafe53d,0x3f87a0cb,3
+np.float32,0xba591c00,0x3f800003,3
+np.float32,0x3e9ed028,0x3f863508,3
+np.float32,0x4a12a8,0x3f800000,3
+np.float32,0xbee55c84,0x3f8d0f45,3
+np.float32,0x8038a8d3,0x3f800000,3
+np.float32,0xff055243,0x7f800000,3
+np.float32,0xbf659067,0x3fb701ca,3
+np.float32,0xbee36a86,0x3f8cd5e0,3
+np.float32,0x7f1d74c1,0x7f800000,3
+np.float32,0xbf7657df,0x3fbffaad,3
+np.float32,0x7e37ee34,0x7f800000,3
+np.float32,0xff04bc74,0x7f800000,3
+np.float32,0x806d194e,0x3f800000,3
+np.float32,0x7f5596c3,0x7f800000,3
+np.float32,0xbe09d268,0x3f81293e,3
+np.float32,0x79ff75,0x3f800000,3
+np.float32,0xbf55479c,0x3faf0d3e,3
+np.float32,0xbe5428ec,0x3f82c1d4,3
+np.float32,0x3f624134,0x3fb554d7,3
+np.float32,0x2ccb8a,0x3f800000,3
+np.float32,0xfc082040,0x7f800000,3
+np.float32,0xff315467,0x7f800000,3
+np.float32,0x3e6ea2d2,0x3f837dd5,3
+np.float32,0x8020fdd1,0x3f800000,3
+np.float32,0x7f0416a1,0x7f800000,3
+np.float32,0x710a1b,0x3f800000,3
+np.float32,0x3dfcd050,0x3f80f9fc,3
+np.float32,0xfe995e96,0x7f800000,3
+np.float32,0x3f020d00,0x3f90e006,3
+np.float32,0x8064263e,0x3f800000,3
+np.float32,0xfcee4160,0x7f800000,3
+np.float32,0x801b3a18,0x3f800000,3
+np.float32,0x3f62c984,0x3fb59955,3
+np.float32,0x806e8355,0x3f800000,3
+np.float32,0x7e94f65d,0x7f800000,3
+np.float32,0x1173de,0x3f800000,3
+np.float32,0x3e3ff3b7,0x3f824166,3
+np.float32,0x803b4aea,0x3f800000,3
+np.float32,0x804c5bcc,0x3f800000,3
+np.float32,0x509fe5,0x3f800000,3
+np.float32,0xbf33b5ee,0x3fa0db0b,3
+np.float32,0x3f2ac15c,0x3f9d8ba4,3
+np.float32,0x7f2c54f8,0x7f800000,3
+np.float32,0x7f33d933,0x7f800000,3
+np.float32,0xbf09b2b4,0x3f92f795,3
+np.float32,0x805db8d6,0x3f800000,3
+np.float32,0x6d6e66,0x3f800000,3
+np.float32,0x3ddfea92,0x3f80c40c,3
+np.float32,0xfda719b8,0x7f800000,3
+np.float32,0x5d657f,0x3f800000,3
+np.float32,0xbf005ba3,0x3f906df6,3
+np.float32,0xbf45e606,0x3fa8305c,3
+np.float32,0x5e9fd1,0x3f800000,3
+np.float32,0x8079dc45,0x3f800000,3
+np.float32,0x7e9c40e3,0x7f800000,3
+np.float32,0x6bd5f6,0x3f800000,3
+np.float32,0xbea14a0e,0x3f866761,3
+np.float32,0x7e7323f3,0x7f800000,3
+np.float32,0x7f0c0a79,0x7f800000,3
+np.float32,0xbf7d7aeb,0x3fc40b0f,3
+np.float32,0x437588,0x3f800000,3
+np.float32,0xbf356376,0x3fa17f63,3
+np.float32,0x7f129921,0x7f800000,3
+np.float32,0x7f47a52e,0x7f800000,3
+np.float32,0xba8cb400,0x3f800005,3
+np.float32,0x802284e0,0x3f800000,3
+np.float32,0xbe820f56,0x3f8426ec,3
+np.float32,0x7f2ef6cf,0x7f800000,3
+np.float32,0xbf70a090,0x3fbcd501,3
+np.float32,0xbf173fea,0x3f96ff6d,3
+np.float32,0x3e19c489,0x3f817224,3
+np.float32,0x7f429b30,0x7f800000,3
+np.float32,0xbdae4118,0x3f8076af,3
+np.float32,0x3e70ad30,0x3f838d41,3
+np.float32,0x335fed,0x3f800000,3
+np.float32,0xff5359cf,0x7f800000,3
+np.float32,0xbf17e42b,0x3f9732f1,3
+np.float32,0xff3a950b,0x7f800000,3
+np.float32,0xbcca70c0,0x3f800a02,3
+np.float32,0x3f2cda62,0x3f9e4dad,3
+np.float32,0x3f50c185,0x3facf805,3
+np.float32,0x80000001,0x3f800000,3
+np.float32,0x807b86d2,0x3f800000,3
+np.float32,0x8010c2cf,0x3f800000,3
+np.float32,0x3f130fb8,0x3f95b519,3
+np.float32,0x807dc546,0x3f800000,3
+np.float32,0xbee20740,0x3f8cad3f,3
+np.float32,0x80800000,0x3f800000,3
+np.float32,0x3cbd90c0,0x3f8008c6,3
+np.float32,0x3e693488,0x3f835571,3
+np.float32,0xbe70cd44,0x3f838e35,3
+np.float32,0xbe348dc8,0x3f81feb1,3
+np.float32,0x3f31ea90,0x3fa02d3f,3
+np.float32,0xfcd7e180,0x7f800000,3
+np.float32,0xbe30a75c,0x3f81e8d0,3
+np.float32,0x3e552c5a,0x3f82c89d,3
+np.float32,0xff513f74,0x7f800000,3
+np.float32,0xbdb16248,0x3f807afd,3
+np.float64,0x7fbbf954e437f2a9,0x7ff0000000000000,2
+np.float64,0x581bbf0cb0379,0x3ff0000000000000,2
+np.float64,0x7ff8000000000000,0x7ff8000000000000,2
+np.float64,0xffb959a2a632b348,0x7ff0000000000000,2
+np.float64,0xbfdbd6baebb7ad76,0x3ff189a5ca25a6e1,2
+np.float64,0xbfd094ec9aa129da,0x3ff08a3f6b918065,2
+np.float64,0x3fe236753f646cea,0x3ff2a982660b8b43,2
+np.float64,0xbfe537fadfaa6ff6,0x3ff3a5f1c49c31bf,2
+np.float64,0xbfe31fa7dc663f50,0x3ff2f175374aef0e,2
+np.float64,0x3fc4b6569f296cb0,0x3ff035bde801bb53,2
+np.float64,0x800ce3c00f99c780,0x3ff0000000000000,2
+np.float64,0xbfebcde33e779bc6,0x3ff66de82cd30fc5,2
+np.float64,0x800dc09d3b7b813b,0x3ff0000000000000,2
+np.float64,0x80067d4c450cfa99,0x3ff0000000000000,2
+np.float64,0x1f6ade203ed7,0x3ff0000000000000,2
+np.float64,0xbfd4e311eca9c624,0x3ff0dc1383d6c3db,2
+np.float64,0x800649b3a54c9368,0x3ff0000000000000,2
+np.float64,0xcc14d1ab9829a,0x3ff0000000000000,2
+np.float64,0x3fc290c5bb25218b,0x3ff02b290f46dd6d,2
+np.float64,0x3fe78eb8376f1d70,0x3ff488f3bc259537,2
+np.float64,0xffc60f58e82c1eb0,0x7ff0000000000000,2
+np.float64,0x3fd35666ad26accd,0x3ff0bc6573da6bcd,2
+np.float64,0x7fc20257a62404ae,0x7ff0000000000000,2
+np.float64,0x80076d842e0edb09,0x3ff0000000000000,2
+np.float64,0x3fd8e44b08b1c898,0x3ff139b9a1f8428e,2
+np.float64,0x7fd6f6fc7a2dedf8,0x7ff0000000000000,2
+np.float64,0x3fa01b9f0820373e,0x3ff00206f8ad0f1b,2
+np.float64,0x69ed190ed3da4,0x3ff0000000000000,2
+np.float64,0xbfd997eb34b32fd6,0x3ff14be65a5db4a0,2
+np.float64,0x7feada2d0935b459,0x7ff0000000000000,2
+np.float64,0xbf80987120213100,0x3ff000226d29a9fc,2
+np.float64,0xbfef203e37fe407c,0x3ff82f51f04e8821,2
+np.float64,0xffe3dcf91fa7b9f2,0x7ff0000000000000,2
+np.float64,0x9a367283346cf,0x3ff0000000000000,2
+np.float64,0x800feb09f7bfd614,0x3ff0000000000000,2
+np.float64,0xbfe0319f9520633f,0x3ff217c5205c403f,2
+np.float64,0xbfa91eabd4323d50,0x3ff004ee4347f627,2
+np.float64,0x3fd19cbf7d23397f,0x3ff09c13e8e43571,2
+np.float64,0xffeb8945f0b7128b,0x7ff0000000000000,2
+np.float64,0x800a0eb4f2141d6a,0x3ff0000000000000,2
+np.float64,0xffe83e7312f07ce6,0x7ff0000000000000,2
+np.float64,0xffca53fee834a7fc,0x7ff0000000000000,2
+np.float64,0x800881cbf1710398,0x3ff0000000000000,2
+np.float64,0x80003e6abbe07cd6,0x3ff0000000000000,2
+np.float64,0xbfef6a998afed533,0x3ff859b7852d1b4d,2
+np.float64,0x3fd4eb7577a9d6eb,0x3ff0dcc601261aab,2
+np.float64,0xbfc9c12811338250,0x3ff05331268b05c8,2
+np.float64,0x7fddf84e5e3bf09c,0x7ff0000000000000,2
+np.float64,0xbfd4d6fbbc29adf8,0x3ff0db12db19d187,2
+np.float64,0x80077892bfaef126,0x3ff0000000000000,2
+np.float64,0xffae9d49543d3a90,0x7ff0000000000000,2
+np.float64,0xbfd8bef219317de4,0x3ff136034e5d2f1b,2
+np.float64,0xffe89c74ddb138e9,0x7ff0000000000000,2
+np.float64,0x8003b6bbb7e76d78,0x3ff0000000000000,2
+np.float64,0x315a4e8462b4b,0x3ff0000000000000,2
+np.float64,0x800ee616edddcc2e,0x3ff0000000000000,2
+np.float64,0xdfb27f97bf650,0x3ff0000000000000,2
+np.float64,0x8004723dc328e47c,0x3ff0000000000000,2
+np.float64,0xbfe529500daa52a0,0x3ff3a0b9b33fc84c,2
+np.float64,0xbfe4e46a7ce9c8d5,0x3ff3886ce0f92612,2
+np.float64,0xbf52003680240000,0x3ff00000a203d61a,2
+np.float64,0xffd3400458268008,0x7ff0000000000000,2
+np.float64,0x80076deb444edbd7,0x3ff0000000000000,2
+np.float64,0xa612f6c14c27,0x3ff0000000000000,2
+np.float64,0xbfd41c74c9a838ea,0x3ff0cbe61e16aecf,2
+np.float64,0x43f464a887e8d,0x3ff0000000000000,2
+np.float64,0x800976e748b2edcf,0x3ff0000000000000,2
+np.float64,0xffc79d6ba12f3ad8,0x7ff0000000000000,2
+np.float64,0xffd6dbcb022db796,0x7ff0000000000000,2
+np.float64,0xffd6a9672a2d52ce,0x7ff0000000000000,2
+np.float64,0x3fe95dcfa632bb9f,0x3ff54bbad2ee919e,2
+np.float64,0x3febadd2e1375ba6,0x3ff65e336c47c018,2
+np.float64,0x7fd47c37d828f86f,0x7ff0000000000000,2
+np.float64,0xbfd4ea59e0a9d4b4,0x3ff0dcae6af3e443,2
+np.float64,0x2c112afc58226,0x3ff0000000000000,2
+np.float64,0x8008122bced02458,0x3ff0000000000000,2
+np.float64,0x7fe7105ab3ee20b4,0x7ff0000000000000,2
+np.float64,0x80089634df312c6a,0x3ff0000000000000,2
+np.float64,0x68e9fbc8d1d40,0x3ff0000000000000,2
+np.float64,0xbfec1e1032f83c20,0x3ff69590b9f18ea8,2
+np.float64,0xbfedf181623be303,0x3ff787ef48935dc6,2
+np.float64,0xffe8600457f0c008,0x7ff0000000000000,2
+np.float64,0x7a841ec6f5084,0x3ff0000000000000,2
+np.float64,0x459a572e8b34c,0x3ff0000000000000,2
+np.float64,0x3fe8a232bef14465,0x3ff4fac1780f731e,2
+np.float64,0x3fcb37597d366eb3,0x3ff05cf08ab14ebd,2
+np.float64,0xbfb0261d00204c38,0x3ff00826fb86ca8a,2
+np.float64,0x3fc6e7a6dd2dcf4e,0x3ff041c1222ffa79,2
+np.float64,0xee65dd03dccbc,0x3ff0000000000000,2
+np.float64,0xffe26fdc23e4dfb8,0x7ff0000000000000,2
+np.float64,0x7fe8d6c8cab1ad91,0x7ff0000000000000,2
+np.float64,0xbfeb64bf2676c97e,0x3ff63abb8607828c,2
+np.float64,0x3fd28417b425082f,0x3ff0ac9eb22a732b,2
+np.float64,0xbfd26835b3a4d06c,0x3ff0aa94c48fb6d2,2
+np.float64,0xffec617a01b8c2f3,0x7ff0000000000000,2
+np.float64,0xe1bfff01c3800,0x3ff0000000000000,2
+np.float64,0x3fd4def913a9bdf4,0x3ff0dbbc7271046f,2
+np.float64,0x94f4c17129e98,0x3ff0000000000000,2
+np.float64,0x8009b2eaa33365d6,0x3ff0000000000000,2
+np.float64,0x3fd9633b41b2c678,0x3ff1468388bdfb65,2
+np.float64,0xffe0ae5c80e15cb8,0x7ff0000000000000,2
+np.float64,0x7fdfc35996bf86b2,0x7ff0000000000000,2
+np.float64,0x3fcfc5bdc23f8b7c,0x3ff07ed5caa4545c,2
+np.float64,0xd48b4907a9169,0x3ff0000000000000,2
+np.float64,0xbfe0a2cc52614598,0x3ff2361665895d95,2
+np.float64,0xbfe9068f90720d1f,0x3ff525b82491a1a5,2
+np.float64,0x4238b9208472,0x3ff0000000000000,2
+np.float64,0x800e6b2bf69cd658,0x3ff0000000000000,2
+np.float64,0x7fb638b6ae2c716c,0x7ff0000000000000,2
+np.float64,0x7fe267641764cec7,0x7ff0000000000000,2
+np.float64,0xffc0933d3521267c,0x7ff0000000000000,2
+np.float64,0x7fddfdfb533bfbf6,0x7ff0000000000000,2
+np.float64,0xced2a8e99da55,0x3ff0000000000000,2
+np.float64,0x2a80d5165501b,0x3ff0000000000000,2
+np.float64,0xbfeead2ab63d5a55,0x3ff7eeb5cbcfdcab,2
+np.float64,0x80097f6f92f2fee0,0x3ff0000000000000,2
+np.float64,0x3fee1f29b77c3e54,0x3ff7a0a58c13df62,2
+np.float64,0x3f9d06b8383a0d70,0x3ff001a54a2d8cf8,2
+np.float64,0xbfc8b41d3f31683c,0x3ff04c85379dd6b0,2
+np.float64,0xffd2a04c1e254098,0x7ff0000000000000,2
+np.float64,0xbfb71c01e02e3800,0x3ff010b34220e838,2
+np.float64,0xbfe69249ef6d2494,0x3ff425e48d1e938b,2
+np.float64,0xffefffffffffffff,0x7ff0000000000000,2
+np.float64,0x3feb1d52fbf63aa6,0x3ff618813ae922d7,2
+np.float64,0x7fb8d1a77e31a34e,0x7ff0000000000000,2
+np.float64,0xffc3cfc4ed279f88,0x7ff0000000000000,2
+np.float64,0x2164b9fc42c98,0x3ff0000000000000,2
+np.float64,0x3fbb868cee370d1a,0x3ff017b31b0d4d27,2
+np.float64,0x3fcd6dea583adbd5,0x3ff06cbd16bf44a0,2
+np.float64,0xbfecd041d479a084,0x3ff6efb25f61012d,2
+np.float64,0xbfb0552e6e20aa60,0x3ff00856ca83834a,2
+np.float64,0xe6293cbfcc528,0x3ff0000000000000,2
+np.float64,0x7fba58394034b072,0x7ff0000000000000,2
+np.float64,0x33bc96d467794,0x3ff0000000000000,2
+np.float64,0xffe90ea86bf21d50,0x7ff0000000000000,2
+np.float64,0xbfc626ea6d2c4dd4,0x3ff03d7e01ec3849,2
+np.float64,0x65b56fe4cb6af,0x3ff0000000000000,2
+np.float64,0x3fea409fb7f4813f,0x3ff5b171deab0ebd,2
+np.float64,0x3fe849c1df709384,0x3ff4d59063ff98c4,2
+np.float64,0x169073082d20f,0x3ff0000000000000,2
+np.float64,0xcc8b6add9916e,0x3ff0000000000000,2
+np.float64,0xbfef3d78d5fe7af2,0x3ff83fecc26abeea,2
+np.float64,0x3fe8c65a4a718cb4,0x3ff50a23bfeac7df,2
+np.float64,0x3fde9fa5c8bd3f4c,0x3ff1ddeb12b9d623,2
+np.float64,0xffe2af536da55ea6,0x7ff0000000000000,2
+np.float64,0x800186d0b0c30da2,0x3ff0000000000000,2
+np.float64,0x3fe9ba3c1d737478,0x3ff574ab2bf3a560,2
+np.float64,0xbfe1489c46a29138,0x3ff2641d36b30e21,2
+np.float64,0xbfe4b6b7c0e96d70,0x3ff37880ac8b0540,2
+np.float64,0x800e66ad82fccd5b,0x3ff0000000000000,2
+np.float64,0x7ff0000000000000,0x7ff0000000000000,2
+np.float64,0x7febb0fd477761fa,0x7ff0000000000000,2
+np.float64,0xbfdc433f2eb8867e,0x3ff195ec2a6cce27,2
+np.float64,0x3fe12c5a172258b4,0x3ff25c225b8a34bb,2
+np.float64,0xbfef6f116c3ede23,0x3ff85c47eaed49a0,2
+np.float64,0x800af6f60f35edec,0x3ff0000000000000,2
+np.float64,0xffe567999a2acf32,0x7ff0000000000000,2
+np.float64,0xbfc5ac5ae72b58b4,0x3ff03adb50ec04f3,2
+np.float64,0x3fea1b57e23436b0,0x3ff5a06f98541767,2
+np.float64,0x7fcc3e36fb387c6d,0x7ff0000000000000,2
+np.float64,0x8000c8dc698191ba,0x3ff0000000000000,2
+np.float64,0x3fee5085ed7ca10c,0x3ff7bb92f61245b8,2
+np.float64,0x7fbb9f803a373eff,0x7ff0000000000000,2
+np.float64,0xbfe1e5e806e3cbd0,0x3ff2918f2d773007,2
+np.float64,0x8008f8c3f3b1f188,0x3ff0000000000000,2
+np.float64,0x7fe53df515ea7be9,0x7ff0000000000000,2
+np.float64,0x7fdbb87fb3b770fe,0x7ff0000000000000,2
+np.float64,0x3fefcc0f50ff981f,0x3ff89210a6a04e6b,2
+np.float64,0x3fe33f87d0267f10,0x3ff2fb989ea4f2bc,2
+np.float64,0x1173992022e8,0x3ff0000000000000,2
+np.float64,0x3fef534632bea68c,0x3ff84c5ca9713ff9,2
+np.float64,0x3fc5991d552b3238,0x3ff03a72bfdb6e5f,2
+np.float64,0x3fdad90dc1b5b21c,0x3ff16db868180034,2
+np.float64,0xffe20b8078e41700,0x7ff0000000000000,2
+np.float64,0x7fdf409a82be8134,0x7ff0000000000000,2
+np.float64,0x3fccb7e691396fcd,0x3ff06786b6ccdbcb,2
+np.float64,0xffe416e0b7282dc1,0x7ff0000000000000,2
+np.float64,0xffe3a8a981275152,0x7ff0000000000000,2
+np.float64,0x3fd9c8bd31b3917c,0x3ff150ee6f5f692f,2
+np.float64,0xffeab6fef6356dfd,0x7ff0000000000000,2
+np.float64,0x3fe9c5e3faf38bc8,0x3ff579e18c9bd548,2
+np.float64,0x800b173e44762e7d,0x3ff0000000000000,2
+np.float64,0xffe2719db764e33b,0x7ff0000000000000,2
+np.float64,0x3fd1fcf31223f9e6,0x3ff0a2da7ad99856,2
+np.float64,0x80082c4afcd05896,0x3ff0000000000000,2
+np.float64,0xa56e5e4b4adcc,0x3ff0000000000000,2
+np.float64,0xffbbbddab2377bb8,0x7ff0000000000000,2
+np.float64,0x3b3927c076726,0x3ff0000000000000,2
+np.float64,0x3fec03fd58f807fb,0x3ff6889b8a774728,2
+np.float64,0xbfaa891fb4351240,0x3ff00580987bd914,2
+np.float64,0x7fb4800c4a290018,0x7ff0000000000000,2
+np.float64,0xffbb5d2b6036ba58,0x7ff0000000000000,2
+np.float64,0x7fd6608076acc100,0x7ff0000000000000,2
+np.float64,0x31267e4c624d1,0x3ff0000000000000,2
+np.float64,0x33272266664e5,0x3ff0000000000000,2
+np.float64,0x47bb37f28f768,0x3ff0000000000000,2
+np.float64,0x3fe134bb4ee26977,0x3ff25e7ea647a928,2
+np.float64,0xbfe2b5f42ba56be8,0x3ff2d05cbdc7344b,2
+np.float64,0xbfe0e013fd61c028,0x3ff246dfce572914,2
+np.float64,0x7fecedcda4f9db9a,0x7ff0000000000000,2
+np.float64,0x8001816c2da302d9,0x3ff0000000000000,2
+np.float64,0xffced8b65b3db16c,0x7ff0000000000000,2
+np.float64,0xffdc1d4a0b383a94,0x7ff0000000000000,2
+np.float64,0x7fe94e7339f29ce5,0x7ff0000000000000,2
+np.float64,0x33fb846667f71,0x3ff0000000000000,2
+np.float64,0x800a1380e9542702,0x3ff0000000000000,2
+np.float64,0x800b74eaa776e9d6,0x3ff0000000000000,2
+np.float64,0x5681784aad030,0x3ff0000000000000,2
+np.float64,0xbfee0eb7917c1d6f,0x3ff797b949f7f6b4,2
+np.float64,0xffe4ec5fd2a9d8bf,0x7ff0000000000000,2
+np.float64,0xbfcd7401dd3ae804,0x3ff06cea52c792c0,2
+np.float64,0x800587563beb0ead,0x3ff0000000000000,2
+np.float64,0x3fc15c6f3322b8de,0x3ff025bbd030166d,2
+np.float64,0x7feb6b4caf76d698,0x7ff0000000000000,2
+np.float64,0x7fe136ef82a26dde,0x7ff0000000000000,2
+np.float64,0xf592dac3eb25c,0x3ff0000000000000,2
+np.float64,0x7fd300baf6a60175,0x7ff0000000000000,2
+np.float64,0x7fc880de9e3101bc,0x7ff0000000000000,2
+np.float64,0x7fe7a1aa5caf4354,0x7ff0000000000000,2
+np.float64,0x2f9b8e0e5f373,0x3ff0000000000000,2
+np.float64,0xffcc9071993920e4,0x7ff0000000000000,2
+np.float64,0x8009e151b313c2a4,0x3ff0000000000000,2
+np.float64,0xbfd46e2d18a8dc5a,0x3ff0d27a7b37c1ae,2
+np.float64,0x3fe65c7961acb8f3,0x3ff4116946062a4c,2
+np.float64,0x7fd31b371626366d,0x7ff0000000000000,2
+np.float64,0x98dc924d31b93,0x3ff0000000000000,2
+np.float64,0x268bef364d17f,0x3ff0000000000000,2
+np.float64,0x7fd883ba56310774,0x7ff0000000000000,2
+np.float64,0x3fc53f01a32a7e03,0x3ff0388dea9cd63e,2
+np.float64,0xffe1ea8c0563d518,0x7ff0000000000000,2
+np.float64,0x3fd0bf0e63a17e1d,0x3ff08d0577f5ffa6,2
+np.float64,0x7fef42418f7e8482,0x7ff0000000000000,2
+np.float64,0x8000bccd38c1799b,0x3ff0000000000000,2
+np.float64,0xbfe6c48766ed890f,0x3ff43936fa4048c8,2
+np.float64,0xbfb2a38f3a254720,0x3ff00adc7f7b2822,2
+np.float64,0x3fd5262b2eaa4c56,0x3ff0e1af492c08f5,2
+np.float64,0x80065b4691ecb68e,0x3ff0000000000000,2
+np.float64,0xfb6b9e9ff6d74,0x3ff0000000000000,2
+np.float64,0x8006c71e6ecd8e3e,0x3ff0000000000000,2
+np.float64,0x3fd0a3e43ca147c8,0x3ff08b3ad7b42485,2
+np.float64,0xbfc82d8607305b0c,0x3ff04949d6733ef6,2
+np.float64,0xde048c61bc092,0x3ff0000000000000,2
+np.float64,0xffcf73e0fa3ee7c0,0x7ff0000000000000,2
+np.float64,0xbfe8639d7830c73b,0x3ff4e05f97948376,2
+np.float64,0x8010000000000000,0x3ff0000000000000,2
+np.float64,0x67f01a2acfe04,0x3ff0000000000000,2
+np.float64,0x3fe222e803e445d0,0x3ff2a3a75e5f29d8,2
+np.float64,0xffef84c6387f098b,0x7ff0000000000000,2
+np.float64,0x3fe5969c1e6b2d38,0x3ff3c80130462bb2,2
+np.float64,0x8009f56953d3ead3,0x3ff0000000000000,2
+np.float64,0x3fe05c9b6360b937,0x3ff2232e1cba5617,2
+np.float64,0x3fd8888d63b1111b,0x3ff130a5b788d52f,2
+np.float64,0xffe3a9e6f26753ce,0x7ff0000000000000,2
+np.float64,0x800e2aaa287c5554,0x3ff0000000000000,2
+np.float64,0x3fea8d6c82351ad9,0x3ff5d4d8cde9a11d,2
+np.float64,0x7feef700723dee00,0x7ff0000000000000,2
+np.float64,0x3fa5cb77242b96e0,0x3ff003b62b3e50f1,2
+np.float64,0x7fb68f0a862d1e14,0x7ff0000000000000,2
+np.float64,0x7fb97ee83432fdcf,0x7ff0000000000000,2
+np.float64,0x7fd74a78632e94f0,0x7ff0000000000000,2
+np.float64,0x7fcfe577713fcaee,0x7ff0000000000000,2
+np.float64,0xffe192ee5ea325dc,0x7ff0000000000000,2
+np.float64,0x477d6ae48efae,0x3ff0000000000000,2
+np.float64,0xffe34d5237669aa4,0x7ff0000000000000,2
+np.float64,0x7fe3ce8395a79d06,0x7ff0000000000000,2
+np.float64,0x80019c01ffa33805,0x3ff0000000000000,2
+np.float64,0x74b5b56ce96b7,0x3ff0000000000000,2
+np.float64,0x7fe05ecdeda0bd9b,0x7ff0000000000000,2
+np.float64,0xffe9693eb232d27d,0x7ff0000000000000,2
+np.float64,0xffd2be2c7da57c58,0x7ff0000000000000,2
+np.float64,0x800dbd5cbc1b7aba,0x3ff0000000000000,2
+np.float64,0xbfa36105d426c210,0x3ff002ef2e3a87f7,2
+np.float64,0x800b2d69fb765ad4,0x3ff0000000000000,2
+np.float64,0xbfdb81c9a9370394,0x3ff1802d409cbf7a,2
+np.float64,0x7fd481d014a9039f,0x7ff0000000000000,2
+np.float64,0xffe66c3c1fecd878,0x7ff0000000000000,2
+np.float64,0x3fc55865192ab0c8,0x3ff03915b51e8839,2
+np.float64,0xd6a78987ad4f1,0x3ff0000000000000,2
+np.float64,0x800c6cc80d58d990,0x3ff0000000000000,2
+np.float64,0x979435a12f29,0x3ff0000000000000,2
+np.float64,0xbfbd971e7a3b2e40,0x3ff01b647e45f5a6,2
+np.float64,0x80067565bfeceacc,0x3ff0000000000000,2
+np.float64,0x8001ad689ce35ad2,0x3ff0000000000000,2
+np.float64,0x7fa43253dc2864a7,0x7ff0000000000000,2
+np.float64,0xbfe3dda307e7bb46,0x3ff32ef99a2efe1d,2
+np.float64,0x3fe5d7b395ebaf68,0x3ff3dfd33cdc8ef4,2
+np.float64,0xd94cc9c3b2999,0x3ff0000000000000,2
+np.float64,0x3fee5a513fbcb4a2,0x3ff7c0f17b876ce5,2
+np.float64,0xffe27761fa64eec4,0x7ff0000000000000,2
+np.float64,0x3feb788119b6f102,0x3ff64446f67f4efa,2
+np.float64,0xbfed6e10dffadc22,0x3ff741d5ef610ca0,2
+np.float64,0x7fe73cf98b2e79f2,0x7ff0000000000000,2
+np.float64,0x7847d09af08fb,0x3ff0000000000000,2
+np.float64,0x29ded2da53bdb,0x3ff0000000000000,2
+np.float64,0xbfe51c1ec1aa383e,0x3ff39c0b7cf832e2,2
+np.float64,0xbfeafd5e65f5fabd,0x3ff609548a787f57,2
+np.float64,0x3fd872a26fb0e545,0x3ff12e7fbd95505c,2
+np.float64,0x7fed6b7c1b7ad6f7,0x7ff0000000000000,2
+np.float64,0xffe7ba9ec16f753d,0x7ff0000000000000,2
+np.float64,0x7f89b322f0336645,0x7ff0000000000000,2
+np.float64,0xbfad1677383a2cf0,0x3ff0069ca67e7baa,2
+np.float64,0x3fe0906d04a120da,0x3ff2311b04b7bfef,2
+np.float64,0xffe4b3c9d4296793,0x7ff0000000000000,2
+np.float64,0xbfe476bb0ce8ed76,0x3ff36277d2921a74,2
+np.float64,0x7fc35655cf26acab,0x7ff0000000000000,2
+np.float64,0x7fe9980f0373301d,0x7ff0000000000000,2
+np.float64,0x9e6e04cb3cdc1,0x3ff0000000000000,2
+np.float64,0x800b89e0afb713c2,0x3ff0000000000000,2
+np.float64,0x800bd951a3f7b2a4,0x3ff0000000000000,2
+np.float64,0x29644a9e52c8a,0x3ff0000000000000,2
+np.float64,0x3fe1be2843637c51,0x3ff285e90d8387e4,2
+np.float64,0x7fa233cce4246799,0x7ff0000000000000,2
+np.float64,0xbfcfb7bc2d3f6f78,0x3ff07e657de3e2ed,2
+np.float64,0xffd7c953e7af92a8,0x7ff0000000000000,2
+np.float64,0xbfc5bbaf772b7760,0x3ff03b2ee4febb1e,2
+np.float64,0x8007b7315a6f6e63,0x3ff0000000000000,2
+np.float64,0xbfe906d902320db2,0x3ff525d7e16acfe0,2
+np.float64,0x3fde33d8553c67b1,0x3ff1d09faa19aa53,2
+np.float64,0x61fe76a0c3fcf,0x3ff0000000000000,2
+np.float64,0xa75e355b4ebc7,0x3ff0000000000000,2
+np.float64,0x3fc9e6d86033cdb1,0x3ff05426299c7064,2
+np.float64,0x7fd83f489eb07e90,0x7ff0000000000000,2
+np.float64,0x8000000000000001,0x3ff0000000000000,2
+np.float64,0x80014434ae62886a,0x3ff0000000000000,2
+np.float64,0xbfe21af9686435f3,0x3ff2a149338bdefe,2
+np.float64,0x9354e6cd26a9d,0x3ff0000000000000,2
+np.float64,0xb42b95f768573,0x3ff0000000000000,2
+np.float64,0xbfecb4481bb96890,0x3ff6e15d269dd651,2
+np.float64,0x3f97842ae82f0840,0x3ff0011485156f28,2
+np.float64,0xffdef63d90bdec7c,0x7ff0000000000000,2
+np.float64,0x7fe511a8d36a2351,0x7ff0000000000000,2
+np.float64,0xbf8cb638a0396c80,0x3ff000670c318fb6,2
+np.float64,0x3fe467e1f668cfc4,0x3ff35d65f93ccac6,2
+np.float64,0xbfce7d88f03cfb10,0x3ff074c22475fe5b,2
+np.float64,0x6d0a4994da14a,0x3ff0000000000000,2
+np.float64,0xbfb3072580260e48,0x3ff00b51d3913e9f,2
+np.float64,0x8008fcde36b1f9bd,0x3ff0000000000000,2
+np.float64,0x3fd984df66b309c0,0x3ff149f29125eca4,2
+np.float64,0xffee2a10fe7c5421,0x7ff0000000000000,2
+np.float64,0x80039168ace722d2,0x3ff0000000000000,2
+np.float64,0xffda604379b4c086,0x7ff0000000000000,2
+np.float64,0xffdc6a405bb8d480,0x7ff0000000000000,2
+np.float64,0x3fe62888b26c5111,0x3ff3fdda754c4372,2
+np.float64,0x8008b452cb5168a6,0x3ff0000000000000,2
+np.float64,0x6165d540c2cbb,0x3ff0000000000000,2
+np.float64,0xbfee0c04d17c180a,0x3ff796431c64bcbe,2
+np.float64,0x800609b8448c1371,0x3ff0000000000000,2
+np.float64,0x800fc3fca59f87f9,0x3ff0000000000000,2
+np.float64,0x77f64848efeca,0x3ff0000000000000,2
+np.float64,0x8007cf522d8f9ea5,0x3ff0000000000000,2
+np.float64,0xbfe9fb0b93f3f617,0x3ff591cb0052e22c,2
+np.float64,0x7fd569d5f0aad3ab,0x7ff0000000000000,2
+np.float64,0x7fe5cf489d6b9e90,0x7ff0000000000000,2
+np.float64,0x7fd6e193e92dc327,0x7ff0000000000000,2
+np.float64,0xf78988a5ef131,0x3ff0000000000000,2
+np.float64,0x3fe8f97562b1f2eb,0x3ff5201080fbc12d,2
+np.float64,0x7febfd69d7b7fad3,0x7ff0000000000000,2
+np.float64,0xffc07b5c1720f6b8,0x7ff0000000000000,2
+np.float64,0xbfd966926832cd24,0x3ff146da9adf492e,2
+np.float64,0x7fef5bd9edfeb7b3,0x7ff0000000000000,2
+np.float64,0xbfd2afbc96255f7a,0x3ff0afd601febf44,2
+np.float64,0x7fdd4ea6293a9d4b,0x7ff0000000000000,2
+np.float64,0xbfe8a1e916b143d2,0x3ff4faa23c2793e5,2
+np.float64,0x800188fcd8c311fa,0x3ff0000000000000,2
+np.float64,0xbfe30803f1661008,0x3ff2e9fc729baaee,2
+np.float64,0x7fefffffffffffff,0x7ff0000000000000,2
+np.float64,0x3fd287bec3250f7e,0x3ff0ace34d3102f6,2
+np.float64,0x1f0ee9443e1de,0x3ff0000000000000,2
+np.float64,0xbfd92f73da325ee8,0x3ff14143e4fa2c5a,2
+np.float64,0x3fed7c9bdffaf938,0x3ff74984168734d3,2
+np.float64,0x8002c4d1696589a4,0x3ff0000000000000,2
+np.float64,0xfe03011bfc060,0x3ff0000000000000,2
+np.float64,0x7f7a391e6034723c,0x7ff0000000000000,2
+np.float64,0xffd6fd46f82dfa8e,0x7ff0000000000000,2
+np.float64,0xbfd7520a742ea414,0x3ff112f1ba5d4f91,2
+np.float64,0x8009389d8812713b,0x3ff0000000000000,2
+np.float64,0x7fefb846aaff708c,0x7ff0000000000000,2
+np.float64,0x3fd98a0983331413,0x3ff14a79efb8adbf,2
+np.float64,0xbfd897158db12e2c,0x3ff132137902cf3e,2
+np.float64,0xffc4048d5928091c,0x7ff0000000000000,2
+np.float64,0x80036ae46046d5ca,0x3ff0000000000000,2
+np.float64,0x7faba7ed3c374fd9,0x7ff0000000000000,2
+np.float64,0xbfec4265e1f884cc,0x3ff6a7b8602422c9,2
+np.float64,0xaa195e0b5432c,0x3ff0000000000000,2
+np.float64,0x3feac15d317582ba,0x3ff5ed115758145f,2
+np.float64,0x6c13a5bcd8275,0x3ff0000000000000,2
+np.float64,0xbfed20b8883a4171,0x3ff7194dbd0dc988,2
+np.float64,0x800cde65c899bccc,0x3ff0000000000000,2
+np.float64,0x7c72912af8e53,0x3ff0000000000000,2
+np.float64,0x3fe49d2bb4e93a57,0x3ff36fab3aba15d4,2
+np.float64,0xbfd598fa02ab31f4,0x3ff0eb72fc472025,2
+np.float64,0x8007a191712f4324,0x3ff0000000000000,2
+np.float64,0xbfdeb14872bd6290,0x3ff1e01ca83f35fd,2
+np.float64,0xbfe1da46b3e3b48e,0x3ff28e23ad2f5615,2
+np.float64,0x800a2f348e745e69,0x3ff0000000000000,2
+np.float64,0xbfee66928afccd25,0x3ff7c7ac7dbb3273,2
+np.float64,0xffd78a0a2b2f1414,0x7ff0000000000000,2
+np.float64,0x7fc5fa80b82bf500,0x7ff0000000000000,2
+np.float64,0x800e6d7260dcdae5,0x3ff0000000000000,2
+np.float64,0xbfd6cff2aaad9fe6,0x3ff106f78ee61642,2
+np.float64,0x7fe1041d1d220839,0x7ff0000000000000,2
+np.float64,0xbfdf75586cbeeab0,0x3ff1f8dbaa7e57f0,2
+np.float64,0xffdcaae410b955c8,0x7ff0000000000000,2
+np.float64,0x800fe5e0d1ffcbc2,0x3ff0000000000000,2
+np.float64,0x800d7999527af333,0x3ff0000000000000,2
+np.float64,0xbfe62c233bac5846,0x3ff3ff34220a204c,2
+np.float64,0x7fe99bbff8f3377f,0x7ff0000000000000,2
+np.float64,0x7feeaf471d3d5e8d,0x7ff0000000000000,2
+np.float64,0xd5904ff5ab20a,0x3ff0000000000000,2
+np.float64,0x3fd07aae3320f55c,0x3ff08888c227c968,2
+np.float64,0x7fea82b8dff50571,0x7ff0000000000000,2
+np.float64,0xffef2db9057e5b71,0x7ff0000000000000,2
+np.float64,0xbfe2077fef640f00,0x3ff29b7dd0d39d36,2
+np.float64,0xbfe09a4d7c61349b,0x3ff233c7e88881f4,2
+np.float64,0x3fda50c4cbb4a188,0x3ff15f28a71deee7,2
+np.float64,0x7fe7d9ee6b2fb3dc,0x7ff0000000000000,2
+np.float64,0x3febbf6faeb77edf,0x3ff666d13682ea93,2
+np.float64,0xc401a32988035,0x3ff0000000000000,2
+np.float64,0xbfeab30aa8f56615,0x3ff5e65dcc6603f8,2
+np.float64,0x92c8cea32591a,0x3ff0000000000000,2
+np.float64,0xbff0000000000000,0x3ff8b07551d9f550,2
+np.float64,0xbfbddfb4dc3bbf68,0x3ff01bebaec38faa,2
+np.float64,0xbfd8de3e2a31bc7c,0x3ff1391f4830d20b,2
+np.float64,0xffc83a8f8a307520,0x7ff0000000000000,2
+np.float64,0x3fee026ef53c04de,0x3ff7911337085827,2
+np.float64,0x7fbaf380b235e700,0x7ff0000000000000,2
+np.float64,0xffe5b89fa62b713f,0x7ff0000000000000,2
+np.float64,0xbfdc1ff54ab83fea,0x3ff191e8c0b60bb2,2
+np.float64,0x6ae3534cd5c6b,0x3ff0000000000000,2
+np.float64,0xbfea87e558750fcb,0x3ff5d24846013794,2
+np.float64,0xffe0f467bee1e8cf,0x7ff0000000000000,2
+np.float64,0x7fee3b0dc7bc761b,0x7ff0000000000000,2
+np.float64,0x3fed87521afb0ea4,0x3ff74f2f5cd36a5c,2
+np.float64,0x7b3c9882f6794,0x3ff0000000000000,2
+np.float64,0x7fdd1a62243a34c3,0x7ff0000000000000,2
+np.float64,0x800f1dc88d3e3b91,0x3ff0000000000000,2
+np.float64,0x7fc3213cfa264279,0x7ff0000000000000,2
+np.float64,0x3fe40e0f3d681c1e,0x3ff33f135e9d5ded,2
+np.float64,0x7febf14e51f7e29c,0x7ff0000000000000,2
+np.float64,0xffe96c630c72d8c5,0x7ff0000000000000,2
+np.float64,0x7fdd82fbe7bb05f7,0x7ff0000000000000,2
+np.float64,0xbf9a6a0b1034d420,0x3ff0015ce009f7d8,2
+np.float64,0xbfceb4f8153d69f0,0x3ff0766e3ecc77df,2
+np.float64,0x3fd9de31e633bc64,0x3ff15327b794a16e,2
+np.float64,0x3faa902a30352054,0x3ff00583848d1969,2
+np.float64,0x0,0x3ff0000000000000,2
+np.float64,0x3fbe3459c43c68b4,0x3ff01c8af6710ef6,2
+np.float64,0xbfa8df010031be00,0x3ff004d5632dc9f5,2
+np.float64,0x7fbcf6cf2a39ed9d,0x7ff0000000000000,2
+np.float64,0xffe4236202a846c4,0x7ff0000000000000,2
+np.float64,0x3fd35ed52e26bdaa,0x3ff0bd0b231f11f7,2
+np.float64,0x7fe7a2df532f45be,0x7ff0000000000000,2
+np.float64,0xffe32f8315665f06,0x7ff0000000000000,2
+np.float64,0x7fe1a69f03e34d3d,0x7ff0000000000000,2
+np.float64,0x7fa5542b742aa856,0x7ff0000000000000,2
+np.float64,0x3fe84e9f8ef09d3f,0x3ff4d79816359765,2
+np.float64,0x29076fe6520ef,0x3ff0000000000000,2
+np.float64,0xffd70894f7ae112a,0x7ff0000000000000,2
+np.float64,0x800188edcbe311dc,0x3ff0000000000000,2
+np.float64,0x3fe2c7acda258f5a,0x3ff2d5dad4617703,2
+np.float64,0x3f775d41a02ebb00,0x3ff000110f212445,2
+np.float64,0x7fe8a084d1714109,0x7ff0000000000000,2
+np.float64,0x3fe31562d8a62ac6,0x3ff2ee35055741cd,2
+np.float64,0xbfd195d4d1a32baa,0x3ff09b98a50c151b,2
+np.float64,0xffaae9ff0c35d400,0x7ff0000000000000,2
+np.float64,0xff819866502330c0,0x7ff0000000000000,2
+np.float64,0x7fddc64815bb8c8f,0x7ff0000000000000,2
+np.float64,0xbfd442b428288568,0x3ff0cef70aa73ae6,2
+np.float64,0x8002e7625aa5cec5,0x3ff0000000000000,2
+np.float64,0x7fe8d4f70e71a9ed,0x7ff0000000000000,2
+np.float64,0xbfc3bd015f277a04,0x3ff030cbf16f29d9,2
+np.float64,0x3fd315d5baa62bab,0x3ff0b77a551a5335,2
+np.float64,0x7fa638b4642c7168,0x7ff0000000000000,2
+np.float64,0x3fdea8b795bd516f,0x3ff1df0bb70cdb79,2
+np.float64,0xbfd78754762f0ea8,0x3ff117ee0f29abed,2
+np.float64,0x8009f6a37633ed47,0x3ff0000000000000,2
+np.float64,0x3fea1daf75343b5f,0x3ff5a1804789bf13,2
+np.float64,0x3fd044b6c0a0896e,0x3ff0850b7297d02f,2
+np.float64,0x8003547a9c86a8f6,0x3ff0000000000000,2
+np.float64,0x3fa6c2cd782d859b,0x3ff0040c4ac8f44a,2
+np.float64,0x3fe225baaae44b76,0x3ff2a47f5e1f5e85,2
+np.float64,0x8000000000000000,0x3ff0000000000000,2
+np.float64,0x3fcb53da8736a7b8,0x3ff05db45af470ac,2
+np.float64,0x80079f8f140f3f1f,0x3ff0000000000000,2
+np.float64,0xbfcd1d7e2b3a3afc,0x3ff06a6b6845d05f,2
+np.float64,0x96df93672dbf3,0x3ff0000000000000,2
+np.float64,0xdef86e43bdf0e,0x3ff0000000000000,2
+np.float64,0xbfec05a09db80b41,0x3ff6896b768eea08,2
+np.float64,0x7fe3ff91d267ff23,0x7ff0000000000000,2
+np.float64,0xffea3eaa07347d53,0x7ff0000000000000,2
+np.float64,0xbfebde1cc1f7bc3a,0x3ff675e34ac2afc2,2
+np.float64,0x629bcde8c537a,0x3ff0000000000000,2
+np.float64,0xbfdde4fcff3bc9fa,0x3ff1c7061d21f0fe,2
+np.float64,0x3fee60fd003cc1fa,0x3ff7c49af3878a51,2
+np.float64,0x3fe5c92ac32b9256,0x3ff3da7a7929588b,2
+np.float64,0xbfe249c78f64938f,0x3ff2af52a06f1a50,2
+np.float64,0xbfc6de9dbe2dbd3c,0x3ff0418d284ee29f,2
+np.float64,0xffc8ef094631de14,0x7ff0000000000000,2
+np.float64,0x3fdef05f423de0bf,0x3ff1e800caba8ab5,2
+np.float64,0xffc1090731221210,0x7ff0000000000000,2
+np.float64,0xbfedec9b5fbbd937,0x3ff7854b6792a24a,2
+np.float64,0xbfb873507630e6a0,0x3ff012b23b3b7a67,2
+np.float64,0xbfe3cd6692679acd,0x3ff3299d6936ec4b,2
+np.float64,0xbfb107c890220f90,0x3ff0091122162472,2
+np.float64,0xbfe4e6ee48e9cddc,0x3ff3894e5a5e70a6,2
+np.float64,0xffe6fa3413edf468,0x7ff0000000000000,2
+np.float64,0x3fe2faf79b65f5ef,0x3ff2e5e11fae8b54,2
+np.float64,0xbfdfeb8df9bfd71c,0x3ff208189691b15f,2
+np.float64,0x75d2d03ceba5b,0x3ff0000000000000,2
+np.float64,0x3feb48c182b69183,0x3ff62d4462eba6cb,2
+np.float64,0xffcda9f7ff3b53f0,0x7ff0000000000000,2
+np.float64,0x7fcafbdcbd35f7b8,0x7ff0000000000000,2
+np.float64,0xbfd1895523a312aa,0x3ff09aba642a78d9,2
+np.float64,0x3fe3129c3f662538,0x3ff2ed546bbfafcf,2
+np.float64,0x3fb444dee02889be,0x3ff00cd86273b964,2
+np.float64,0xbf73b32d7ee77,0x3ff0000000000000,2
+np.float64,0x3fae19904c3c3321,0x3ff00714865c498a,2
+np.float64,0x7fefbfaef5bf7f5d,0x7ff0000000000000,2
+np.float64,0x8000dc3816e1b871,0x3ff0000000000000,2
+np.float64,0x8003f957ba47f2b0,0x3ff0000000000000,2
+np.float64,0xbfe3563c7ea6ac79,0x3ff302dcebc92856,2
+np.float64,0xbfdc80fbae3901f8,0x3ff19cfe73e58092,2
+np.float64,0x8009223b04524476,0x3ff0000000000000,2
+np.float64,0x3fd95f431c32be86,0x3ff1461c21cb03f0,2
+np.float64,0x7ff4000000000000,0x7ffc000000000000,2
+np.float64,0xbfe7c12ed3ef825e,0x3ff49d59c265efcd,2
+np.float64,0x10000000000000,0x3ff0000000000000,2
+np.float64,0x7fc5e2632f2bc4c5,0x7ff0000000000000,2
+np.float64,0xffd8f6b4c7b1ed6a,0x7ff0000000000000,2
+np.float64,0x80034b93d4069728,0x3ff0000000000000,2
+np.float64,0xffdf5d4c1dbeba98,0x7ff0000000000000,2
+np.float64,0x800bc63d70178c7b,0x3ff0000000000000,2
+np.float64,0xbfeba31ea0f7463d,0x3ff658fa27073d2b,2
+np.float64,0xbfeebeede97d7ddc,0x3ff7f89a8e80dec4,2
+np.float64,0x7feb0f1f91361e3e,0x7ff0000000000000,2
+np.float64,0xffec3158d0b862b1,0x7ff0000000000000,2
+np.float64,0x3fde51cbfbbca398,0x3ff1d44c2ff15b3d,2
+np.float64,0xd58fb2b3ab1f7,0x3ff0000000000000,2
+np.float64,0x80028b9e32e5173d,0x3ff0000000000000,2
+np.float64,0x7fea77a56c74ef4a,0x7ff0000000000000,2
+np.float64,0x3fdaabbd4a35577b,0x3ff168d82edf2fe0,2
+np.float64,0xbfe69c39cc2d3874,0x3ff429b2f4cdb362,2
+np.float64,0x3b78f5d876f20,0x3ff0000000000000,2
+np.float64,0x7fa47d116428fa22,0x7ff0000000000000,2
+np.float64,0xbfe4118b0ce82316,0x3ff3403d989f780f,2
+np.float64,0x800482e793c905d0,0x3ff0000000000000,2
+np.float64,0xbfe48e5728e91cae,0x3ff36a9020bf9d20,2
+np.float64,0x7fe078ba8860f174,0x7ff0000000000000,2
+np.float64,0x3fd80843e5b01088,0x3ff1242f401e67da,2
+np.float64,0x3feb1f6965f63ed3,0x3ff6197fc590e143,2
+np.float64,0xffa41946d8283290,0x7ff0000000000000,2
+np.float64,0xffe30de129661bc2,0x7ff0000000000000,2
+np.float64,0x3fec9c8e1ab9391c,0x3ff6d542ea2f49b4,2
+np.float64,0x3fdc3e4490387c89,0x3ff1955ae18cac37,2
+np.float64,0xffef49d9c77e93b3,0x7ff0000000000000,2
+np.float64,0xfff0000000000000,0x7ff0000000000000,2
+np.float64,0x3fe0442455608849,0x3ff21cab90067d5c,2
+np.float64,0xbfed86aebd3b0d5e,0x3ff74ed8d4b75f50,2
+np.float64,0xffe4600d2b28c01a,0x7ff0000000000000,2
+np.float64,0x7fc1e8ccff23d199,0x7ff0000000000000,2
+np.float64,0x8008d49b0091a936,0x3ff0000000000000,2
+np.float64,0xbfe4139df028273c,0x3ff340ef3c86227c,2
+np.float64,0xbfe9ab4542b3568a,0x3ff56dfe32061247,2
+np.float64,0xbfd76dd365aedba6,0x3ff11589bab5fe71,2
+np.float64,0x3fd42cf829a859f0,0x3ff0cd3844bb0e11,2
+np.float64,0x7fd077cf2e20ef9d,0x7ff0000000000000,2
+np.float64,0x3fd7505760aea0b0,0x3ff112c937b3f088,2
+np.float64,0x1f93341a3f267,0x3ff0000000000000,2
+np.float64,0x7fe3c3c1b0678782,0x7ff0000000000000,2
+np.float64,0x800f85cec97f0b9e,0x3ff0000000000000,2
+np.float64,0xd93ab121b2756,0x3ff0000000000000,2
+np.float64,0xbfef8066fd7f00ce,0x3ff8663ed7d15189,2
+np.float64,0xffe31dd4af663ba9,0x7ff0000000000000,2
+np.float64,0xbfd7ff05a6affe0c,0x3ff1234c09bb686d,2
+np.float64,0xbfe718c31fee3186,0x3ff45a0c2d0ef7b0,2
+np.float64,0x800484bf33e9097f,0x3ff0000000000000,2
+np.float64,0xffd409dad02813b6,0x7ff0000000000000,2
+np.float64,0x3fe59679896b2cf4,0x3ff3c7f49e4fbbd3,2
+np.float64,0xbfd830c54d30618a,0x3ff1281729861390,2
+np.float64,0x1d4fc81c3a9fa,0x3ff0000000000000,2
+np.float64,0x3fd334e4272669c8,0x3ff0b9d5d82894f0,2
+np.float64,0xffc827e65c304fcc,0x7ff0000000000000,2
+np.float64,0xffe2d1814aa5a302,0x7ff0000000000000,2
+np.float64,0xffd7b5b8d32f6b72,0x7ff0000000000000,2
+np.float64,0xbfdbc9f077b793e0,0x3ff18836b9106ad0,2
+np.float64,0x7fc724c2082e4983,0x7ff0000000000000,2
+np.float64,0x3fa39ed72c273da0,0x3ff00302051ce17e,2
+np.float64,0xbfe3c4c209678984,0x3ff326c4fd16b5cd,2
+np.float64,0x7fe91f6d00f23ed9,0x7ff0000000000000,2
+np.float64,0x8004ee93fea9dd29,0x3ff0000000000000,2
+np.float64,0xbfe7c32d0eaf865a,0x3ff49e290ed2ca0e,2
+np.float64,0x800ea996b29d532d,0x3ff0000000000000,2
+np.float64,0x2df9ec1c5bf3e,0x3ff0000000000000,2
+np.float64,0xabb175df5762f,0x3ff0000000000000,2
+np.float64,0xffe3fc9c8e27f938,0x7ff0000000000000,2
+np.float64,0x7fb358a62826b14b,0x7ff0000000000000,2
+np.float64,0x800aedcccaf5db9a,0x3ff0000000000000,2
+np.float64,0xffca530c5234a618,0x7ff0000000000000,2
+np.float64,0x40f91e9681f24,0x3ff0000000000000,2
+np.float64,0x80098f4572f31e8b,0x3ff0000000000000,2
+np.float64,0xbfdc58c21fb8b184,0x3ff1986115f8fe92,2
+np.float64,0xbfebeafd40b7d5fa,0x3ff67c3cf34036e3,2
+np.float64,0x7fd108861a22110b,0x7ff0000000000000,2
+np.float64,0xff8e499ae03c9340,0x7ff0000000000000,2
+np.float64,0xbfd2f58caa25eb1a,0x3ff0b50b1bffafdf,2
+np.float64,0x3fa040c9bc208193,0x3ff002105e95aefa,2
+np.float64,0xbfd2ebc0a5a5d782,0x3ff0b44ed5a11584,2
+np.float64,0xffe237bc93a46f78,0x7ff0000000000000,2
+np.float64,0x3fd557c5eeaaaf8c,0x3ff0e5e0a575e1ba,2
+np.float64,0x7abb419ef5769,0x3ff0000000000000,2
+np.float64,0xffefa1fe353f43fb,0x7ff0000000000000,2
+np.float64,0x3fa6f80ba02df017,0x3ff0041f51fa0d76,2
+np.float64,0xbfdce79488b9cf2a,0x3ff1a8e32877beb4,2
+np.float64,0x2285f3e4450bf,0x3ff0000000000000,2
+np.float64,0x3bf7eb7277efe,0x3ff0000000000000,2
+np.float64,0xbfd5925fd3ab24c0,0x3ff0eae1c2ac2e78,2
+np.float64,0xbfed6325227ac64a,0x3ff73c14a2ad5bfe,2
+np.float64,0x8000429c02408539,0x3ff0000000000000,2
+np.float64,0xb67c21e76cf84,0x3ff0000000000000,2
+np.float64,0x3fec3d3462f87a69,0x3ff6a51e4c027eb7,2
+np.float64,0x3feae69cbcf5cd3a,0x3ff5fe9387314afd,2
+np.float64,0x7fd0c9a0ec219341,0x7ff0000000000000,2
+np.float64,0x8004adb7f6295b71,0x3ff0000000000000,2
+np.float64,0xffd61fe8bb2c3fd2,0x7ff0000000000000,2
+np.float64,0xffe7fb3834aff670,0x7ff0000000000000,2
+np.float64,0x7fd1eef163a3dde2,0x7ff0000000000000,2
+np.float64,0x2e84547a5d08b,0x3ff0000000000000,2
+np.float64,0x8002d8875ee5b10f,0x3ff0000000000000,2
+np.float64,0x3fe1d1c5f763a38c,0x3ff28ba524fb6de8,2
+np.float64,0x8001dea0bc43bd42,0x3ff0000000000000,2
+np.float64,0xfecfad91fd9f6,0x3ff0000000000000,2
+np.float64,0xffed7965fa3af2cb,0x7ff0000000000000,2
+np.float64,0xbfe6102ccc2c205a,0x3ff3f4c082506686,2
+np.float64,0x3feff75b777feeb6,0x3ff8ab6222578e0c,2
+np.float64,0x3fb8a97bd43152f8,0x3ff013057f0a9d89,2
+np.float64,0xffe234b5e964696c,0x7ff0000000000000,2
+np.float64,0x984d9137309b2,0x3ff0000000000000,2
+np.float64,0xbfe42e9230e85d24,0x3ff349fb7d1a7560,2
+np.float64,0xbfecc8b249f99165,0x3ff6ebd0fea0ea72,2
+np.float64,0x8000840910410813,0x3ff0000000000000,2
+np.float64,0xbfd81db9e7303b74,0x3ff126402d3539ec,2
+np.float64,0x800548eb7fea91d8,0x3ff0000000000000,2
+np.float64,0xbfe4679ad0e8cf36,0x3ff35d4db89296a3,2
+np.float64,0x3fd4c55b5a298ab7,0x3ff0d99da31081f9,2
+np.float64,0xbfa8f5b38c31eb60,0x3ff004de3a23b32d,2
+np.float64,0x80005d348e80ba6a,0x3ff0000000000000,2
+np.float64,0x800c348d6118691b,0x3ff0000000000000,2
+np.float64,0xffd6b88f84ad7120,0x7ff0000000000000,2
+np.float64,0x3fc1aaaa82235555,0x3ff027136afd08e0,2
+np.float64,0x7fca7d081b34fa0f,0x7ff0000000000000,2
+np.float64,0x1,0x3ff0000000000000,2
+np.float64,0xbfdc810d1139021a,0x3ff19d007408cfe3,2
+np.float64,0xbfe5dce05f2bb9c0,0x3ff3e1bb9234617b,2
+np.float64,0xffecfe2c32b9fc58,0x7ff0000000000000,2
+np.float64,0x95b2891b2b651,0x3ff0000000000000,2
+np.float64,0x8000b60c6c616c1a,0x3ff0000000000000,2
+np.float64,0x4944f0889289f,0x3ff0000000000000,2
+np.float64,0x3fe6e508696dca10,0x3ff445d1b94863e9,2
+np.float64,0xbfe63355d0ec66ac,0x3ff401e74f16d16f,2
+np.float64,0xbfe9b9595af372b3,0x3ff57445e1b4d670,2
+np.float64,0x800e16f7313c2dee,0x3ff0000000000000,2
+np.float64,0xffe898f5f0b131eb,0x7ff0000000000000,2
+np.float64,0x3fe91ac651f2358d,0x3ff52e787c21c004,2
+np.float64,0x7fbfaac6783f558c,0x7ff0000000000000,2
+np.float64,0xd8ef3dfbb1de8,0x3ff0000000000000,2
+np.float64,0xbfc58c13a52b1828,0x3ff03a2c19d65019,2
+np.float64,0xbfbde55e8a3bcac0,0x3ff01bf648a3e0a7,2
+np.float64,0xffc3034930260694,0x7ff0000000000000,2
+np.float64,0xea77a64dd4ef5,0x3ff0000000000000,2
+np.float64,0x800cfe7e7739fcfd,0x3ff0000000000000,2
+np.float64,0x4960f31a92c1f,0x3ff0000000000000,2
+np.float64,0x3fd9552c94b2aa58,0x3ff14515a29add09,2
+np.float64,0xffe8b3244c316648,0x7ff0000000000000,2
+np.float64,0x3fe8201e6a70403d,0x3ff4c444fa679cce,2
+np.float64,0xffe9ab7c20f356f8,0x7ff0000000000000,2
+np.float64,0x3fed8bba5f7b1774,0x3ff751853c4c95c5,2
+np.float64,0x8007639cb76ec73a,0x3ff0000000000000,2
+np.float64,0xbfe396db89672db7,0x3ff317bfd1d6fa8c,2
+np.float64,0xbfeb42f888f685f1,0x3ff62a7e0eee56b1,2
+np.float64,0x3fe894827c712904,0x3ff4f4f561d9ea13,2
+np.float64,0xb66b3caf6cd68,0x3ff0000000000000,2
+np.float64,0x800f8907fdbf1210,0x3ff0000000000000,2
+np.float64,0x7fe9b0cddb73619b,0x7ff0000000000000,2
+np.float64,0xbfda70c0e634e182,0x3ff1628c6fdffc53,2
+np.float64,0x3fe0b5f534a16bea,0x3ff23b4ed4c2b48e,2
+np.float64,0xbfe8eee93671ddd2,0x3ff51b85b3c50ae4,2
+np.float64,0xbfe8c22627f1844c,0x3ff50858787a3bfe,2
+np.float64,0x37bb83c86f771,0x3ff0000000000000,2
+np.float64,0xffb7827ffe2f0500,0x7ff0000000000000,2
+np.float64,0x64317940c864,0x3ff0000000000000,2
+np.float64,0x800430ecee6861db,0x3ff0000000000000,2
+np.float64,0x3fa4291fbc285240,0x3ff0032d0204f6dd,2
+np.float64,0xffec69f76af8d3ee,0x7ff0000000000000,2
+np.float64,0x3ff0000000000000,0x3ff8b07551d9f550,2
+np.float64,0x3fc4cf3c42299e79,0x3ff0363fb1d3c254,2
+np.float64,0x7fe0223a77e04474,0x7ff0000000000000,2
+np.float64,0x800a3d4fa4347aa0,0x3ff0000000000000,2
+np.float64,0x3fdd273f94ba4e7f,0x3ff1b05b686e6879,2
+np.float64,0x3feca79052f94f20,0x3ff6dadedfa283aa,2
+np.float64,0x5e7f6f80bcfef,0x3ff0000000000000,2
+np.float64,0xbfef035892fe06b1,0x3ff81efb39cbeba2,2
+np.float64,0x3fee6c08e07cd812,0x3ff7caad952860a1,2
+np.float64,0xffeda715877b4e2a,0x7ff0000000000000,2
+np.float64,0x800580286b0b0052,0x3ff0000000000000,2
+np.float64,0x800703a73fee074f,0x3ff0000000000000,2
+np.float64,0xbfccf96a6639f2d4,0x3ff0696330a60832,2
+np.float64,0x7feb408442368108,0x7ff0000000000000,2
+np.float64,0x3fedc87a46fb90f5,0x3ff771e3635649a9,2
+np.float64,0x3fd8297b773052f7,0x3ff12762bc0cea76,2
+np.float64,0x3fee41bb03fc8376,0x3ff7b37b2da48ab4,2
+np.float64,0xbfe2b05a226560b4,0x3ff2cea17ae7c528,2
+np.float64,0xbfd2e92cf2a5d25a,0x3ff0b41d605ced61,2
+np.float64,0x4817f03a902ff,0x3ff0000000000000,2
+np.float64,0x8c9d4f0d193aa,0x3ff0000000000000,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-exp.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-exp.csv
new file mode 100644
index 00000000..071fb312
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-exp.csv
@@ -0,0 +1,412 @@
+dtype,input,output,ulperrortol
+## +ve denormals ##
+np.float32,0x004b4716,0x3f800000,3
+np.float32,0x007b2490,0x3f800000,3
+np.float32,0x007c99fa,0x3f800000,3
+np.float32,0x00734a0c,0x3f800000,3
+np.float32,0x0070de24,0x3f800000,3
+np.float32,0x00495d65,0x3f800000,3
+np.float32,0x006894f6,0x3f800000,3
+np.float32,0x00555a76,0x3f800000,3
+np.float32,0x004e1fb8,0x3f800000,3
+np.float32,0x00687de9,0x3f800000,3
+## -ve denormals ##
+np.float32,0x805b59af,0x3f800000,3
+np.float32,0x807ed8ed,0x3f800000,3
+np.float32,0x807142ad,0x3f800000,3
+np.float32,0x80772002,0x3f800000,3
+np.float32,0x8062abcb,0x3f800000,3
+np.float32,0x8045e31c,0x3f800000,3
+np.float32,0x805f01c2,0x3f800000,3
+np.float32,0x80506432,0x3f800000,3
+np.float32,0x8060089d,0x3f800000,3
+np.float32,0x8071292f,0x3f800000,3
+## floats that output a denormal ##
+np.float32,0xc2cf3fc1,0x00000001,3
+np.float32,0xc2c79726,0x00000021,3
+np.float32,0xc2cb295d,0x00000005,3
+np.float32,0xc2b49e6b,0x00068c4c,3
+np.float32,0xc2ca8116,0x00000008,3
+np.float32,0xc2c23f82,0x000001d7,3
+np.float32,0xc2cb69c0,0x00000005,3
+np.float32,0xc2cc1f4d,0x00000003,3
+np.float32,0xc2ae094e,0x00affc4c,3
+np.float32,0xc2c86c44,0x00000015,3
+## random floats between -87.0f and 88.0f ##
+np.float32,0x4030d7e0,0x417d9a05,3
+np.float32,0x426f60e8,0x6aa1be2c,3
+np.float32,0x41a1b220,0x4e0efc11,3
+np.float32,0xc20cc722,0x26159da7,3
+np.float32,0x41c492bc,0x512ec79d,3
+np.float32,0x40980210,0x42e73a0e,3
+np.float32,0xbf1f7b80,0x3f094de3,3
+np.float32,0x42a678a4,0x7b87a383,3
+np.float32,0xc20f3cfd,0x25a1c304,3
+np.float32,0x423ff34c,0x6216467f,3
+np.float32,0x00000000,0x3f800000,3
+## floats that cause an overflow ##
+np.float32,0x7f06d8c1,0x7f800000,3
+np.float32,0x7f451912,0x7f800000,3
+np.float32,0x7ecceac3,0x7f800000,3
+np.float32,0x7f643b45,0x7f800000,3
+np.float32,0x7e910ea0,0x7f800000,3
+np.float32,0x7eb4756b,0x7f800000,3
+np.float32,0x7f4ec708,0x7f800000,3
+np.float32,0x7f6b4551,0x7f800000,3
+np.float32,0x7d8edbda,0x7f800000,3
+np.float32,0x7f730718,0x7f800000,3
+np.float32,0x42b17217,0x7f7fff84,3
+np.float32,0x42b17218,0x7f800000,3
+np.float32,0x42b17219,0x7f800000,3
+np.float32,0xfef2b0bc,0x00000000,3
+np.float32,0xff69f83e,0x00000000,3
+np.float32,0xff4ecb12,0x00000000,3
+np.float32,0xfeac6d86,0x00000000,3
+np.float32,0xfde0cdb8,0x00000000,3
+np.float32,0xff26aef4,0x00000000,3
+np.float32,0xff6f9277,0x00000000,3
+np.float32,0xff7adfc4,0x00000000,3
+np.float32,0xff0ad40e,0x00000000,3
+np.float32,0xff6fd8f3,0x00000000,3
+np.float32,0xc2cff1b4,0x00000001,3
+np.float32,0xc2cff1b5,0x00000000,3
+np.float32,0xc2cff1b6,0x00000000,3
+np.float32,0x7f800000,0x7f800000,3
+np.float32,0xff800000,0x00000000,3
+np.float32,0x4292f27c,0x7480000a,3
+np.float32,0x42a920be,0x7c7fff94,3
+np.float32,0x41c214c9,0x50ffffd9,3
+np.float32,0x41abe686,0x4effffd9,3
+np.float32,0x4287db5a,0x707fffd3,3
+np.float32,0x41902cbb,0x4c800078,3
+np.float32,0x42609466,0x67ffffeb,3
+np.float32,0x41a65af5,0x4e7fffd1,3
+np.float32,0x417f13ff,0x4affffc9,3
+np.float32,0x426d0e6c,0x6a3504f2,3
+np.float32,0x41bc8934,0x507fff51,3
+np.float32,0x42a7bdde,0x7c0000d6,3
+np.float32,0x4120cf66,0x46b504f6,3
+np.float32,0x4244da8f,0x62ffff1a,3
+np.float32,0x41a0cf69,0x4e000034,3
+np.float32,0x41cd2bec,0x52000005,3
+np.float32,0x42893e41,0x7100009e,3
+np.float32,0x41b437e1,0x4fb50502,3
+np.float32,0x41d8430f,0x5300001d,3
+np.float32,0x4244da92,0x62ffffda,3
+np.float32,0x41a0cf63,0x4dffffa9,3
+np.float32,0x3eb17218,0x3fb504f3,3
+np.float32,0x428729e8,0x703504dc,3
+np.float32,0x41a0cf67,0x4e000014,3
+np.float32,0x4252b77d,0x65800011,3
+np.float32,0x41902cb9,0x4c800058,3
+np.float32,0x42a0cf67,0x79800052,3
+np.float32,0x4152b77b,0x48ffffe9,3
+np.float32,0x41265af3,0x46ffffc8,3
+np.float32,0x42187e0b,0x5affff9a,3
+np.float32,0xc0d2b77c,0x3ab504f6,3
+np.float32,0xc283b2ac,0x10000072,3
+np.float32,0xc1cff1b4,0x2cb504f5,3
+np.float32,0xc05dce9e,0x3d000000,3
+np.float32,0xc28ec9d2,0x0bfffea5,3
+np.float32,0xc23c893a,0x1d7fffde,3
+np.float32,0xc2a920c0,0x027fff6c,3
+np.float32,0xc1f9886f,0x2900002b,3
+np.float32,0xc2c42920,0x000000b5,3
+np.float32,0xc2893e41,0x0dfffec5,3
+np.float32,0xc2c4da93,0x00000080,3
+np.float32,0xc17f1401,0x3400000c,3
+np.float32,0xc1902cb6,0x327fffaf,3
+np.float32,0xc27c4e3b,0x11ffffc5,3
+np.float32,0xc268e5c5,0x157ffe9d,3
+np.float32,0xc2b4e953,0x0005a826,3
+np.float32,0xc287db5a,0x0e800016,3
+np.float32,0xc207db5a,0x2700000b,3
+np.float32,0xc2b2d4fe,0x000ffff1,3
+np.float32,0xc268e5c0,0x157fffdd,3
+np.float32,0xc22920bd,0x2100003b,3
+np.float32,0xc2902caf,0x0b80011e,3
+np.float32,0xc1902cba,0x327fff2f,3
+np.float32,0xc2ca6625,0x00000008,3
+np.float32,0xc280ece8,0x10fffeb5,3
+np.float32,0xc2918f94,0x0b0000ea,3
+np.float32,0xc29b43d5,0x077ffffc,3
+np.float32,0xc1e61ff7,0x2ab504f5,3
+np.float32,0xc2867878,0x0effff15,3
+np.float32,0xc2a2324a,0x04fffff4,3
+#float64
+## near zero ##
+np.float64,0x8000000000000000,0x3ff0000000000000,2
+np.float64,0x8010000000000000,0x3ff0000000000000,2
+np.float64,0x8000000000000001,0x3ff0000000000000,2
+np.float64,0x8360000000000000,0x3ff0000000000000,2
+np.float64,0x9a70000000000000,0x3ff0000000000000,2
+np.float64,0xb9b0000000000000,0x3ff0000000000000,2
+np.float64,0xb810000000000000,0x3ff0000000000000,2
+np.float64,0xbc30000000000000,0x3ff0000000000000,2
+np.float64,0xb6a0000000000000,0x3ff0000000000000,2
+np.float64,0x0000000000000000,0x3ff0000000000000,2
+np.float64,0x0010000000000000,0x3ff0000000000000,2
+np.float64,0x0000000000000001,0x3ff0000000000000,2
+np.float64,0x0360000000000000,0x3ff0000000000000,2
+np.float64,0x1a70000000000000,0x3ff0000000000000,2
+np.float64,0x3c30000000000000,0x3ff0000000000000,2
+np.float64,0x36a0000000000000,0x3ff0000000000000,2
+np.float64,0x39b0000000000000,0x3ff0000000000000,2
+np.float64,0x3810000000000000,0x3ff0000000000000,2
+## underflow ##
+np.float64,0xc0c6276800000000,0x0000000000000000,2
+np.float64,0xc0c62d918ce2421d,0x0000000000000000,2
+np.float64,0xc0c62d918ce2421e,0x0000000000000000,2
+np.float64,0xc0c62d91a0000000,0x0000000000000000,2
+np.float64,0xc0c62d9180000000,0x0000000000000000,2
+np.float64,0xc0c62dea45ee3e06,0x0000000000000000,2
+np.float64,0xc0c62dea45ee3e07,0x0000000000000000,2
+np.float64,0xc0c62dea40000000,0x0000000000000000,2
+np.float64,0xc0c62dea60000000,0x0000000000000000,2
+np.float64,0xc0875f1120000000,0x0000000000000000,2
+np.float64,0xc0875f113c30b1c8,0x0000000000000000,2
+np.float64,0xc0875f1140000000,0x0000000000000000,2
+np.float64,0xc093480000000000,0x0000000000000000,2
+np.float64,0xffefffffffffffff,0x0000000000000000,2
+np.float64,0xc7efffffe0000000,0x0000000000000000,2
+## overflow ##
+np.float64,0x40862e52fefa39ef,0x7ff0000000000000,2
+np.float64,0x40872e42fefa39ef,0x7ff0000000000000,2
+## +/- INF, +/- NAN ##
+np.float64,0x7ff0000000000000,0x7ff0000000000000,2
+np.float64,0xfff0000000000000,0x0000000000000000,2
+np.float64,0x7ff8000000000000,0x7ff8000000000000,2
+np.float64,0xfff8000000000000,0xfff8000000000000,2
+## output denormal ##
+np.float64,0xc087438520000000,0x0000000000000001,2
+np.float64,0xc08743853f2f4461,0x0000000000000001,2
+np.float64,0xc08743853f2f4460,0x0000000000000001,2
+np.float64,0xc087438540000000,0x0000000000000001,2
+## between -745.13321910 and 709.78271289 ##
+np.float64,0xbff760cd14774bd9,0x3fcdb14ced00ceb6,2
+np.float64,0xbff760cd20000000,0x3fcdb14cd7993879,2
+np.float64,0xbff760cd00000000,0x3fcdb14d12fbd264,2
+np.float64,0xc07f1cf360000000,0x130c1b369af14fda,2
+np.float64,0xbeb0000000000000,0x3feffffe00001000,2
+np.float64,0xbd70000000000000,0x3fefffffffffe000,2
+np.float64,0xc084fd46e5c84952,0x0360000000000139,2
+np.float64,0xc084fd46e5c84953,0x035ffffffffffe71,2
+np.float64,0xc084fd46e0000000,0x0360000b9096d32c,2
+np.float64,0xc084fd4700000000,0x035fff9721d12104,2
+np.float64,0xc086232bc0000000,0x0010003af5e64635,2
+np.float64,0xc086232bdd7abcd2,0x001000000000007c,2
+np.float64,0xc086232bdd7abcd3,0x000ffffffffffe7c,2
+np.float64,0xc086232be0000000,0x000ffffaf57a6fc9,2
+np.float64,0xc086233920000000,0x000fe590e3b45eb0,2
+np.float64,0xc086233938000000,0x000fe56133493c57,2
+np.float64,0xc086233940000000,0x000fe5514deffbbc,2
+np.float64,0xc086234c98000000,0x000fbf1024c32ccb,2
+np.float64,0xc086234ca0000000,0x000fbf0065bae78d,2
+np.float64,0xc086234c80000000,0x000fbf3f623a7724,2
+np.float64,0xc086234ec0000000,0x000fbad237c846f9,2
+np.float64,0xc086234ec8000000,0x000fbac27cfdec97,2
+np.float64,0xc086234ee0000000,0x000fba934cfd3dc2,2
+np.float64,0xc086234ef0000000,0x000fba73d7f618d9,2
+np.float64,0xc086234f00000000,0x000fba54632dddc0,2
+np.float64,0xc0862356e0000000,0x000faae0945b761a,2
+np.float64,0xc0862356f0000000,0x000faac13eb9a310,2
+np.float64,0xc086235700000000,0x000faaa1e9567b0a,2
+np.float64,0xc086236020000000,0x000f98cd75c11ed7,2
+np.float64,0xc086236ca0000000,0x000f8081b4d93f89,2
+np.float64,0xc086236cb0000000,0x000f8062b3f4d6c5,2
+np.float64,0xc086236cc0000000,0x000f8043b34e6f8c,2
+np.float64,0xc086238d98000000,0x000f41220d9b0d2c,2
+np.float64,0xc086238da0000000,0x000f4112cc80a01f,2
+np.float64,0xc086238d80000000,0x000f414fd145db5b,2
+np.float64,0xc08624fd00000000,0x000cbfce8ea1e6c4,2
+np.float64,0xc086256080000000,0x000c250747fcd46e,2
+np.float64,0xc08626c480000000,0x000a34f4bd975193,2
+np.float64,0xbf50000000000000,0x3feff800ffeaac00,2
+np.float64,0xbe10000000000000,0x3fefffffff800000,2
+np.float64,0xbcd0000000000000,0x3feffffffffffff8,2
+np.float64,0xc055d589e0000000,0x38100004bf94f63e,2
+np.float64,0xc055d58a00000000,0x380ffff97f292ce8,2
+np.float64,0xbfd962d900000000,0x3fe585a4b00110e1,2
+np.float64,0x3ff4bed280000000,0x400d411e7a58a303,2
+np.float64,0x3fff0b3620000000,0x401bd7737ffffcf3,2
+np.float64,0x3ff0000000000000,0x4005bf0a8b145769,2
+np.float64,0x3eb0000000000000,0x3ff0000100000800,2
+np.float64,0x3d70000000000000,0x3ff0000000001000,2
+np.float64,0x40862e42e0000000,0x7fefff841808287f,2
+np.float64,0x40862e42fefa39ef,0x7fefffffffffff2a,2
+np.float64,0x40862e0000000000,0x7feef85a11e73f2d,2
+np.float64,0x4000000000000000,0x401d8e64b8d4ddae,2
+np.float64,0x4009242920000000,0x40372a52c383a488,2
+np.float64,0x4049000000000000,0x44719103e4080b45,2
+np.float64,0x4008000000000000,0x403415e5bf6fb106,2
+np.float64,0x3f50000000000000,0x3ff00400800aab55,2
+np.float64,0x3e10000000000000,0x3ff0000000400000,2
+np.float64,0x3cd0000000000000,0x3ff0000000000004,2
+np.float64,0x40562e40a0000000,0x47effed088821c3f,2
+np.float64,0x40562e42e0000000,0x47effff082e6c7ff,2
+np.float64,0x40562e4300000000,0x47f00000417184b8,2
+np.float64,0x3fe8000000000000,0x4000ef9db467dcf8,2
+np.float64,0x402b12e8d4f33589,0x412718f68c71a6fe,2
+np.float64,0x402b12e8d4f3358a,0x412718f68c71a70a,2
+np.float64,0x402b12e8c0000000,0x412718f59a7f472e,2
+np.float64,0x402b12e8e0000000,0x412718f70c0eac62,2
+##use 1th entry
+np.float64,0x40631659AE147CB4,0x4db3a95025a4890f,2
+np.float64,0xC061B87D2E85A4E2,0x332640c8e2de2c51,2
+np.float64,0x405A4A50BE243AF4,0x496a45e4b7f0339a,2
+np.float64,0xC0839898B98EC5C6,0x0764027828830df4,2
+#use 2th entry
+np.float64,0xC072428C44B6537C,0x2596ade838b96f3e,2
+np.float64,0xC053057C5E1AE9BF,0x3912c8fad18fdadf,2
+np.float64,0x407E89C78328BAA3,0x6bfe35d5b9a1a194,2
+np.float64,0x4083501B6DD87112,0x77a855503a38924e,2
+#use 3th entry
+np.float64,0x40832C6195F24540,0x7741e73c80e5eb2f,2
+np.float64,0xC083D4CD557C2EC9,0x06b61727c2d2508e,2
+np.float64,0x400C48F5F67C99BD,0x404128820f02b92e,2
+np.float64,0x4056E36D9B2DF26A,0x4830f52ff34a8242,2
+#use 4th entry
+np.float64,0x4080FF700D8CBD06,0x70fa70df9bc30f20,2
+np.float64,0x406C276D39E53328,0x543eb8e20a8f4741,2
+np.float64,0xC070D6159BBD8716,0x27a4a0548c904a75,2
+np.float64,0xC052EBCF8ED61F83,0x391c0e92368d15e4,2
+#use 5th entry
+np.float64,0xC061F892A8AC5FBE,0x32f807a89efd3869,2
+np.float64,0x4021D885D2DBA085,0x40bd4dc86d3e3270,2
+np.float64,0x40767AEEEE7D4FCF,0x605e22851ee2afb7,2
+np.float64,0xC0757C5D75D08C80,0x20f0751599b992a2,2
+#use 6th entry
+np.float64,0x405ACF7A284C4CE3,0x499a4e0b7a27027c,2
+np.float64,0xC085A6C9E80D7AF5,0x0175914009d62ec2,2
+np.float64,0xC07E4C02F86F1DAE,0x1439269b29a9231e,2
+np.float64,0x4080D80F9691CC87,0x7088a6cdafb041de,2
+#use 7th entry
+np.float64,0x407FDFD84FBA0AC1,0x6deb1ae6f9bc4767,2
+np.float64,0x40630C06A1A2213D,0x4dac7a9d51a838b7,2
+np.float64,0x40685FDB30BB8B4F,0x5183f5cc2cac9e79,2
+np.float64,0x408045A2208F77F4,0x6ee299e08e2aa2f0,2
+#use 8th entry
+np.float64,0xC08104E391F5078B,0x0ed397b7cbfbd230,2
+np.float64,0xC031501CAEFAE395,0x3e6040fd1ea35085,2
+np.float64,0xC079229124F6247C,0x1babf4f923306b1e,2
+np.float64,0x407FB65F44600435,0x6db03beaf2512b8a,2
+#use 9th entry
+np.float64,0xC07EDEE8E8E8A5AC,0x136536cec9cbef48,2
+np.float64,0x4072BB4086099A14,0x5af4d3c3008b56cc,2
+np.float64,0x4050442A2EC42CB4,0x45cd393bd8fad357,2
+np.float64,0xC06AC28FB3D419B4,0x2ca1b9d3437df85f,2
+#use 10th entry
+np.float64,0x40567FC6F0A68076,0x480c977fd5f3122e,2
+np.float64,0x40620A2F7EDA59BB,0x4cf278e96f4ce4d7,2
+np.float64,0xC085044707CD557C,0x034aad6c968a045a,2
+np.float64,0xC07374EA5AC516AA,0x23dd6afdc03e83d5,2
+#use 11th entry
+np.float64,0x4073CC95332619C1,0x5c804b1498bbaa54,2
+np.float64,0xC0799FEBBE257F31,0x1af6a954c43b87d2,2
+np.float64,0x408159F19EA424F6,0x7200858efcbfc84d,2
+np.float64,0x404A81F6F24C0792,0x44b664a07ce5bbfa,2
+#use 12th entry
+np.float64,0x40295FF1EFB9A741,0x4113c0e74c52d7b0,2
+np.float64,0x4073975F4CC411DA,0x5c32be40b4fec2c1,2
+np.float64,0x406E9DE52E82A77E,0x56049c9a3f1ae089,2
+np.float64,0x40748C2F52560ED9,0x5d93bc14fd4cd23b,2
+#use 13th entry
+np.float64,0x4062A553CDC4D04C,0x4d6266bfde301318,2
+np.float64,0xC079EC1D63598AB7,0x1a88cb184dab224c,2
+np.float64,0xC0725C1CB3167427,0x25725b46f8a081f6,2
+np.float64,0x407888771D9B45F9,0x6353b1ec6bd7ce80,2
+#use 14th entry
+np.float64,0xC082CBA03AA89807,0x09b383723831ce56,2
+np.float64,0xC083A8961BB67DD7,0x0735b118d5275552,2
+np.float64,0xC076BC6ECA12E7E3,0x1f2222679eaef615,2
+np.float64,0xC072752503AA1A5B,0x254eb832242c77e1,2
+#use 15th entry
+np.float64,0xC058800792125DEC,0x371882372a0b48d4,2
+np.float64,0x4082909FD863E81C,0x7580d5f386920142,2
+np.float64,0xC071616F8FB534F9,0x26dbe20ef64a412b,2
+np.float64,0x406D1AB571CAA747,0x54ee0d55cb38ac20,2
+#use 16th entry
+np.float64,0x406956428B7DAD09,0x52358682c271237f,2
+np.float64,0xC07EFC2D9D17B621,0x133b3e77c27a4d45,2
+np.float64,0xC08469BAC5BA3CCA,0x050863e5f42cc52f,2
+np.float64,0x407189D9626386A5,0x593cb1c0b3b5c1d3,2
+#use 17th entry
+np.float64,0x4077E652E3DEB8C6,0x6269a10dcbd3c752,2
+np.float64,0x407674C97DB06878,0x605485dcc2426ec2,2
+np.float64,0xC07CE9969CF4268D,0x16386cf8996669f2,2
+np.float64,0x40780EE32D5847C4,0x62a436bd1abe108d,2
+#use 18th entry
+np.float64,0x4076C3AA5E1E8DA1,0x60c62f56a5e72e24,2
+np.float64,0xC0730AFC7239B9BE,0x24758ead095cec1e,2
+np.float64,0xC085CC2B9C420DDB,0x0109cdaa2e5694c1,2
+np.float64,0x406D0765CB6D7AA4,0x54e06f8dd91bd945,2
+#use 19th entry
+np.float64,0xC082D011F3B495E7,0x09a6647661d279c2,2
+np.float64,0xC072826AF8F6AFBC,0x253acd3cd224507e,2
+np.float64,0x404EB9C4810CEA09,0x457933dbf07e8133,2
+np.float64,0x408284FBC97C58CE,0x755f6eb234aa4b98,2
+#use 20th entry
+np.float64,0x40856008CF6EDC63,0x7d9c0b3c03f4f73c,2
+np.float64,0xC077CB2E9F013B17,0x1d9b3d3a166a55db,2
+np.float64,0xC0479CA3C20AD057,0x3bad40e081555b99,2
+np.float64,0x40844CD31107332A,0x7a821d70aea478e2,2
+#use 21th entry
+np.float64,0xC07C8FCC0BFCC844,0x16ba1cc8c539d19b,2
+np.float64,0xC085C4E9A3ABA488,0x011ff675ba1a2217,2
+np.float64,0x4074D538B32966E5,0x5dfd9d78043c6ad9,2
+np.float64,0xC0630CA16902AD46,0x3231a446074cede6,2
+#use 22th entry
+np.float64,0xC06C826733D7D0B7,0x2b5f1078314d41e1,2
+np.float64,0xC0520DF55B2B907F,0x396c13a6ce8e833e,2
+np.float64,0xC080712072B0F437,0x107eae02d11d98ea,2
+np.float64,0x40528A6150E19EFB,0x469fdabda02228c5,2
+#use 23th entry
+np.float64,0xC07B1D74B6586451,0x18d1253883ae3b48,2
+np.float64,0x4045AFD7867DAEC0,0x43d7d634fc4c5d98,2
+np.float64,0xC07A08B91F9ED3E2,0x1a60973e6397fc37,2
+np.float64,0x407B3ECF0AE21C8C,0x673e03e9d98d7235,2
+#use 24th entry
+np.float64,0xC078AEB6F30CEABF,0x1c530b93ab54a1b3,2
+np.float64,0x4084495006A41672,0x7a775b6dc7e63064,2
+np.float64,0x40830B1C0EBF95DD,0x76e1e6eed77cfb89,2
+np.float64,0x407D93E8F33D8470,0x6a9adbc9e1e4f1e5,2
+#use 25th entry
+np.float64,0x4066B11A09EFD9E8,0x504dd528065c28a7,2
+np.float64,0x408545823723AEEB,0x7d504a9b1844f594,2
+np.float64,0xC068C711F2CA3362,0x2e104f3496ea118e,2
+np.float64,0x407F317FCC3CA873,0x6cf0732c9948ebf4,2
+#use 26th entry
+np.float64,0x407AFB3EBA2ED50F,0x66dc28a129c868d5,2
+np.float64,0xC075377037708ADE,0x21531a329f3d793e,2
+np.float64,0xC07C30066A1F3246,0x174448baa16ded2b,2
+np.float64,0xC06689A75DE2ABD3,0x2fad70662fae230b,2
+#use 27th entry
+np.float64,0x4081514E9FCCF1E0,0x71e673b9efd15f44,2
+np.float64,0xC0762C710AF68460,0x1ff1ed7d8947fe43,2
+np.float64,0xC0468102FF70D9C4,0x3be0c3a8ff3419a3,2
+np.float64,0xC07EA4CEEF02A83E,0x13b908f085102c61,2
+#use 28th entry
+np.float64,0xC06290B04AE823C4,0x328a83da3c2e3351,2
+np.float64,0xC0770EB1D1C395FB,0x1eab281c1f1db5fe,2
+np.float64,0xC06F5D4D838A5BAE,0x29500ea32fb474ea,2
+np.float64,0x40723B3133B54C5D,0x5a3c82c7c3a2b848,2
+#use 29th entry
+np.float64,0x4085E6454CE3B4AA,0x7f20319b9638d06a,2
+np.float64,0x408389F2A0585D4B,0x7850667c58aab3d0,2
+np.float64,0xC0382798F9C8AE69,0x3dc1c79fe8739d6d,2
+np.float64,0xC08299D827608418,0x0a4335f76cdbaeb5,2
+#use 30th entry
+np.float64,0xC06F3DED43301BF1,0x2965670ae46750a8,2
+np.float64,0xC070CAF6BDD577D9,0x27b4aa4ffdd29981,2
+np.float64,0x4078529AD4B2D9F2,0x6305c12755d5e0a6,2
+np.float64,0xC055B14E75A31B96,0x381c2eda6d111e5d,2
+#use 31th entry
+np.float64,0x407B13EE414FA931,0x6700772c7544564d,2
+np.float64,0x407EAFDE9DE3EC54,0x6c346a0e49724a3c,2
+np.float64,0xC08362F398B9530D,0x07ffeddbadf980cb,2
+np.float64,0x407E865CDD9EEB86,0x6bf866cac5e0d126,2
+#use 32th entry
+np.float64,0x407FB62DBC794C86,0x6db009f708ac62cb,2
+np.float64,0xC063D0BAA68CDDDE,0x31a3b2a51ce50430,2
+np.float64,0xC05E7706A2231394,0x34f24bead6fab5c9,2
+np.float64,0x4083E3A06FDE444E,0x79527b7a386d1937,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-exp2.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-exp2.csv
new file mode 100644
index 00000000..e19e9ebd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-exp2.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0xbdfe94b0,0x3f6adda6,2
+np.float32,0x3f20f8f8,0x3fc5ec69,2
+np.float32,0x7040b5,0x3f800000,2
+np.float32,0x30ec5,0x3f800000,2
+np.float32,0x3eb63070,0x3fa3ce29,2
+np.float32,0xff4dda3d,0x0,2
+np.float32,0x805b832f,0x3f800000,2
+np.float32,0x3e883fb7,0x3f99ed8c,2
+np.float32,0x3f14d71f,0x3fbf8708,2
+np.float32,0xff7b1e55,0x0,2
+np.float32,0xbf691ac6,0x3f082fa2,2
+np.float32,0x7ee3e6ab,0x7f800000,2
+np.float32,0xbec6e2b4,0x3f439248,2
+np.float32,0xbf5f5ec2,0x3f0bd2c0,2
+np.float32,0x8025cc2c,0x3f800000,2
+np.float32,0x7e0d7672,0x7f800000,2
+np.float32,0xff4bbc5c,0x0,2
+np.float32,0xbd94fb30,0x3f73696b,2
+np.float32,0x6cc079,0x3f800000,2
+np.float32,0x803cf080,0x3f800000,2
+np.float32,0x71d418,0x3f800000,2
+np.float32,0xbf24a442,0x3f23ec1e,2
+np.float32,0xbe6c9510,0x3f5a1e1d,2
+np.float32,0xbe8fb284,0x3f52be38,2
+np.float32,0x7ea64754,0x7f800000,2
+np.float32,0x7fc00000,0x7fc00000,2
+np.float32,0x80620cfd,0x3f800000,2
+np.float32,0x3f3e20e8,0x3fd62e72,2
+np.float32,0x3f384600,0x3fd2d00e,2
+np.float32,0xff362150,0x0,2
+np.float32,0xbf349fa8,0x3f1cfaef,2
+np.float32,0xbf776cf2,0x3f0301a6,2
+np.float32,0x8021fc60,0x3f800000,2
+np.float32,0xbdb75280,0x3f70995c,2
+np.float32,0x7e9363a6,0x7f800000,2
+np.float32,0x7e728422,0x7f800000,2
+np.float32,0xfe91edc2,0x0,2
+np.float32,0x3f5f438c,0x3fea491d,2
+np.float32,0x3f2afae9,0x3fcb5c1f,2
+np.float32,0xbef8e766,0x3f36c448,2
+np.float32,0xba522c00,0x3f7fdb97,2
+np.float32,0xff18ee8c,0x0,2
+np.float32,0xbee8c5f4,0x3f3acd44,2
+np.float32,0x3e790448,0x3f97802c,2
+np.float32,0x3e8c9541,0x3f9ad571,2
+np.float32,0xbf03fa9f,0x3f331460,2
+np.float32,0x801ee053,0x3f800000,2
+np.float32,0xbf773230,0x3f03167f,2
+np.float32,0x356fd9,0x3f800000,2
+np.float32,0x8009cd88,0x3f800000,2
+np.float32,0x7f2bac51,0x7f800000,2
+np.float32,0x4d9eeb,0x3f800000,2
+np.float32,0x3133,0x3f800000,2
+np.float32,0x7f4290e0,0x7f800000,2
+np.float32,0xbf5e6523,0x3f0c3161,2
+np.float32,0x3f19182e,0x3fc1bf10,2
+np.float32,0x7e1248bb,0x7f800000,2
+np.float32,0xff5f7aae,0x0,2
+np.float32,0x7e8557b5,0x7f800000,2
+np.float32,0x26fc7f,0x3f800000,2
+np.float32,0x80397d61,0x3f800000,2
+np.float32,0x3cb1825d,0x3f81efe0,2
+np.float32,0x3ed808d0,0x3fab7c45,2
+np.float32,0xbf6f668a,0x3f05e259,2
+np.float32,0x3e3c7802,0x3f916abd,2
+np.float32,0xbd5ac5a0,0x3f76b21b,2
+np.float32,0x805aa6c9,0x3f800000,2
+np.float32,0xbe4d6f68,0x3f5ec3e1,2
+np.float32,0x3f3108b2,0x3fceb87f,2
+np.float32,0x3ec385cc,0x3fa6c9fb,2
+np.float32,0xbe9fc1ce,0x3f4e35e8,2
+np.float32,0x43b68,0x3f800000,2
+np.float32,0x3ef0cdcc,0x3fb15557,2
+np.float32,0x3e3f729b,0x3f91b5e1,2
+np.float32,0x7f52a4df,0x7f800000,2
+np.float32,0xbf56da96,0x3f0f15b9,2
+np.float32,0xbf161d2b,0x3f2a7faf,2
+np.float32,0x3e8df763,0x3f9b1fbe,2
+np.float32,0xff4f0780,0x0,2
+np.float32,0x8048f594,0x3f800000,2
+np.float32,0x3e62bb1d,0x3f953b7e,2
+np.float32,0xfe58e764,0x0,2
+np.float32,0x3dd2c922,0x3f897718,2
+np.float32,0x7fa00000,0x7fe00000,2
+np.float32,0xff07b4b2,0x0,2
+np.float32,0x7f6231a0,0x7f800000,2
+np.float32,0xb8d1d,0x3f800000,2
+np.float32,0x3ee01d24,0x3fad5f16,2
+np.float32,0xbf43f59f,0x3f169869,2
+np.float32,0x801f5257,0x3f800000,2
+np.float32,0x803c15d8,0x3f800000,2
+np.float32,0x3f171a08,0x3fc0b42a,2
+np.float32,0x127aef,0x3f800000,2
+np.float32,0xfd1c6,0x3f800000,2
+np.float32,0x3f1ed13e,0x3fc4c59a,2
+np.float32,0x57fd4f,0x3f800000,2
+np.float32,0x6e8c61,0x3f800000,2
+np.float32,0x804019ab,0x3f800000,2
+np.float32,0x3ef4e5c6,0x3fb251a1,2
+np.float32,0x5044c3,0x3f800000,2
+np.float32,0x3f04460f,0x3fb7204b,2
+np.float32,0x7e326b47,0x7f800000,2
+np.float32,0x800a7e4c,0x3f800000,2
+np.float32,0xbf47ec82,0x3f14fccc,2
+np.float32,0xbedb1b3e,0x3f3e4a4d,2
+np.float32,0x3f741d86,0x3ff7e4b0,2
+np.float32,0xbe249d20,0x3f6501a6,2
+np.float32,0xbf2ea152,0x3f1f8c68,2
+np.float32,0x3ec6dbcc,0x3fa78b3f,2
+np.float32,0x7ebd9bb4,0x7f800000,2
+np.float32,0x3f61b574,0x3febd77a,2
+np.float32,0x3f3dfb2b,0x3fd61891,2
+np.float32,0x3c7d95,0x3f800000,2
+np.float32,0x8071e840,0x3f800000,2
+np.float32,0x15c6fe,0x3f800000,2
+np.float32,0xbf096601,0x3f307893,2
+np.float32,0x7f5c2ef9,0x7f800000,2
+np.float32,0xbe79f750,0x3f582689,2
+np.float32,0x1eb692,0x3f800000,2
+np.float32,0xbd8024f0,0x3f75226d,2
+np.float32,0xbf5a8be8,0x3f0da950,2
+np.float32,0xbf4d28f3,0x3f12e3e1,2
+np.float32,0x7f800000,0x7f800000,2
+np.float32,0xfea8a758,0x0,2
+np.float32,0x8075d2cf,0x3f800000,2
+np.float32,0xfd99af58,0x0,2
+np.float32,0x9e6a,0x3f800000,2
+np.float32,0x2fa19f,0x3f800000,2
+np.float32,0x3e9f4206,0x3f9ecc56,2
+np.float32,0xbee0b666,0x3f3cd9fc,2
+np.float32,0xbec558c4,0x3f43fab1,2
+np.float32,0x7e9a77df,0x7f800000,2
+np.float32,0xff3a9694,0x0,2
+np.float32,0x3f3b3708,0x3fd47f9a,2
+np.float32,0x807cd6d4,0x3f800000,2
+np.float32,0x804aa422,0x3f800000,2
+np.float32,0xfead7a70,0x0,2
+np.float32,0x3f08c610,0x3fb95efe,2
+np.float32,0xff390126,0x0,2
+np.float32,0x5d2d47,0x3f800000,2
+np.float32,0x8006849c,0x3f800000,2
+np.float32,0x654f6e,0x3f800000,2
+np.float32,0xff478a16,0x0,2
+np.float32,0x3f480b0c,0x3fdc024c,2
+np.float32,0xbc3b96c0,0x3f7df9f4,2
+np.float32,0xbcc96460,0x3f7bacb5,2
+np.float32,0x7f349f30,0x7f800000,2
+np.float32,0xbe08fa98,0x3f6954a1,2
+np.float32,0x4f3a13,0x3f800000,2
+np.float32,0x7f6a5ab4,0x7f800000,2
+np.float32,0x7eb85247,0x7f800000,2
+np.float32,0xbf287246,0x3f223e08,2
+np.float32,0x801584d0,0x3f800000,2
+np.float32,0x7ec25371,0x7f800000,2
+np.float32,0x3f002165,0x3fb51552,2
+np.float32,0x3e1108a8,0x3f8d3429,2
+np.float32,0x4f0f88,0x3f800000,2
+np.float32,0x7f67c1ce,0x7f800000,2
+np.float32,0xbf4348f8,0x3f16dedf,2
+np.float32,0xbe292b64,0x3f644d24,2
+np.float32,0xbf2bfa36,0x3f20b2d6,2
+np.float32,0xbf2a6e58,0x3f215f71,2
+np.float32,0x3e97d5d3,0x3f9d35df,2
+np.float32,0x31f597,0x3f800000,2
+np.float32,0x100544,0x3f800000,2
+np.float32,0x10a197,0x3f800000,2
+np.float32,0x3f44df50,0x3fda20d2,2
+np.float32,0x59916d,0x3f800000,2
+np.float32,0x707472,0x3f800000,2
+np.float32,0x8054194e,0x3f800000,2
+np.float32,0x80627b01,0x3f800000,2
+np.float32,0x7f4d5a5b,0x7f800000,2
+np.float32,0xbcecad00,0x3f7aeca5,2
+np.float32,0xff69c541,0x0,2
+np.float32,0xbe164e20,0x3f673c3a,2
+np.float32,0x3dd321de,0x3f897b39,2
+np.float32,0x3c9c4900,0x3f81b431,2
+np.float32,0x7f0efae3,0x7f800000,2
+np.float32,0xbf1b3ee6,0x3f282567,2
+np.float32,0x3ee858ac,0x3faf5083,2
+np.float32,0x3f0e6a39,0x3fbc3965,2
+np.float32,0x7f0c06d8,0x7f800000,2
+np.float32,0x801dd236,0x3f800000,2
+np.float32,0x564245,0x3f800000,2
+np.float32,0x7e99d3ad,0x7f800000,2
+np.float32,0xff3b0164,0x0,2
+np.float32,0x3f386f18,0x3fd2e785,2
+np.float32,0x7f603c39,0x7f800000,2
+np.float32,0x3cbd9b00,0x3f8211f0,2
+np.float32,0x2178e2,0x3f800000,2
+np.float32,0x5db226,0x3f800000,2
+np.float32,0xfec78d62,0x0,2
+np.float32,0x7f40bc1e,0x7f800000,2
+np.float32,0x80325064,0x3f800000,2
+np.float32,0x3f6068dc,0x3feb0377,2
+np.float32,0xfe8b95c6,0x0,2
+np.float32,0xbe496894,0x3f5f5f87,2
+np.float32,0xbf18722a,0x3f296cf4,2
+np.float32,0x332d0e,0x3f800000,2
+np.float32,0x3f6329dc,0x3fecc5c0,2
+np.float32,0x807d1802,0x3f800000,2
+np.float32,0x3e8afcee,0x3f9a7ff1,2
+np.float32,0x26a0a7,0x3f800000,2
+np.float32,0x7f13085d,0x7f800000,2
+np.float32,0x68d547,0x3f800000,2
+np.float32,0x7e9b04ae,0x7f800000,2
+np.float32,0x3f3ecdfe,0x3fd692ea,2
+np.float32,0x805256f4,0x3f800000,2
+np.float32,0x3f312dc8,0x3fcecd42,2
+np.float32,0x23ca15,0x3f800000,2
+np.float32,0x3f53c455,0x3fe31ad6,2
+np.float32,0xbf21186c,0x3f2580fd,2
+np.float32,0x803b9bb1,0x3f800000,2
+np.float32,0xff6ae1fc,0x0,2
+np.float32,0x2103cf,0x3f800000,2
+np.float32,0xbedcec6c,0x3f3dd29d,2
+np.float32,0x7f520afa,0x7f800000,2
+np.float32,0x7e8b44f2,0x7f800000,2
+np.float32,0xfef7f6ce,0x0,2
+np.float32,0xbd5e7c30,0x3f768a6f,2
+np.float32,0xfeb36848,0x0,2
+np.float32,0xff49effb,0x0,2
+np.float32,0xbec207c0,0x3f44dc74,2
+np.float32,0x3e91147f,0x3f9bc77f,2
+np.float32,0xfe784cd4,0x0,2
+np.float32,0xfd1a7250,0x0,2
+np.float32,0xff3b3f48,0x0,2
+np.float32,0x3f685db5,0x3ff0219f,2
+np.float32,0x3f370976,0x3fd21bae,2
+np.float32,0xfed4cc20,0x0,2
+np.float32,0xbf41e337,0x3f17714a,2
+np.float32,0xbf4e8638,0x3f12593a,2
+np.float32,0x3edaf0f1,0x3fac295e,2
+np.float32,0x803cbb4f,0x3f800000,2
+np.float32,0x7f492043,0x7f800000,2
+np.float32,0x2cabcf,0x3f800000,2
+np.float32,0x17f8ac,0x3f800000,2
+np.float32,0x3e846478,0x3f99205a,2
+np.float32,0x76948f,0x3f800000,2
+np.float32,0x1,0x3f800000,2
+np.float32,0x7ea6419e,0x7f800000,2
+np.float32,0xa5315,0x3f800000,2
+np.float32,0xff3a8e32,0x0,2
+np.float32,0xbe5714e8,0x3f5d50b7,2
+np.float32,0xfeadf960,0x0,2
+np.float32,0x3ebbd1a8,0x3fa50efc,2
+np.float32,0x7f31dce7,0x7f800000,2
+np.float32,0x80314999,0x3f800000,2
+np.float32,0x8017f41b,0x3f800000,2
+np.float32,0x7ed6d051,0x7f800000,2
+np.float32,0x7f525688,0x7f800000,2
+np.float32,0x7f7fffff,0x7f800000,2
+np.float32,0x3e8b0461,0x3f9a8180,2
+np.float32,0x3d9fe46e,0x3f871e1f,2
+np.float32,0x5e6d8f,0x3f800000,2
+np.float32,0xbf09ae55,0x3f305608,2
+np.float32,0xfe7028c4,0x0,2
+np.float32,0x7f3ade56,0x7f800000,2
+np.float32,0xff4c9ef9,0x0,2
+np.float32,0x7e3199cf,0x7f800000,2
+np.float32,0x8048652f,0x3f800000,2
+np.float32,0x805e1237,0x3f800000,2
+np.float32,0x189ed8,0x3f800000,2
+np.float32,0xbea7c094,0x3f4bfd98,2
+np.float32,0xbf2f109c,0x3f1f5c5c,2
+np.float32,0xbf0e7f4c,0x3f2e0d2c,2
+np.float32,0x8005981f,0x3f800000,2
+np.float32,0xbf762005,0x3f0377f3,2
+np.float32,0xbf0f60ab,0x3f2da317,2
+np.float32,0xbf4aa3e7,0x3f13e54e,2
+np.float32,0xbf348fd2,0x3f1d01aa,2
+np.float32,0x3e530b50,0x3f93a7fb,2
+np.float32,0xbf0b05a4,0x3f2fb26a,2
+np.float32,0x3eea416c,0x3fafc4aa,2
+np.float32,0x805ad04d,0x3f800000,2
+np.float32,0xbf6328d8,0x3f0a655e,2
+np.float32,0x3f7347b9,0x3ff75558,2
+np.float32,0xfda3ca68,0x0,2
+np.float32,0x80497d21,0x3f800000,2
+np.float32,0x3e740452,0x3f96fd22,2
+np.float32,0x3e528e57,0x3f939b7e,2
+np.float32,0x3e9e19fa,0x3f9e8cbd,2
+np.float32,0x8078060b,0x3f800000,2
+np.float32,0x3f3fea7a,0x3fd73872,2
+np.float32,0xfcfa30a0,0x0,2
+np.float32,0x7f4eb4bf,0x7f800000,2
+np.float32,0x3f712618,0x3ff5e900,2
+np.float32,0xbf668f0e,0x3f0920c6,2
+np.float32,0x3f3001e9,0x3fce259d,2
+np.float32,0xbe9b6fac,0x3f4f6b9c,2
+np.float32,0xbf61fcf3,0x3f0ad5ec,2
+np.float32,0xff08a55c,0x0,2
+np.float32,0x3e805014,0x3f984872,2
+np.float32,0x6ce04c,0x3f800000,2
+np.float32,0x7f7cbc07,0x7f800000,2
+np.float32,0x3c87dc,0x3f800000,2
+np.float32,0x3f2ee498,0x3fcd869a,2
+np.float32,0x4b1116,0x3f800000,2
+np.float32,0x3d382d06,0x3f840d5f,2
+np.float32,0xff7de21e,0x0,2
+np.float32,0x3f2f1d6d,0x3fcda63c,2
+np.float32,0xbf1c1618,0x3f27c38a,2
+np.float32,0xff4264b1,0x0,2
+np.float32,0x8026e5e7,0x3f800000,2
+np.float32,0xbe6fa180,0x3f59ab02,2
+np.float32,0xbe923c02,0x3f52053b,2
+np.float32,0xff3aa453,0x0,2
+np.float32,0x3f77a7ac,0x3ffa47d0,2
+np.float32,0xbed15f36,0x3f40d08a,2
+np.float32,0xa62d,0x3f800000,2
+np.float32,0xbf342038,0x3f1d3123,2
+np.float32,0x7f2f7f80,0x7f800000,2
+np.float32,0x7f2b6fc1,0x7f800000,2
+np.float32,0xff323540,0x0,2
+np.float32,0x3f1a2b6e,0x3fc24faa,2
+np.float32,0x800cc1d2,0x3f800000,2
+np.float32,0xff38fa01,0x0,2
+np.float32,0x80800000,0x3f800000,2
+np.float32,0xbf3d22e0,0x3f196745,2
+np.float32,0x7f40fd62,0x7f800000,2
+np.float32,0x7e1785c7,0x7f800000,2
+np.float32,0x807408c4,0x3f800000,2
+np.float32,0xbf300192,0x3f1ef485,2
+np.float32,0x351e3d,0x3f800000,2
+np.float32,0x7f5ab736,0x7f800000,2
+np.float32,0x2f1696,0x3f800000,2
+np.float32,0x806ac5d7,0x3f800000,2
+np.float32,0x42ec59,0x3f800000,2
+np.float32,0x7f79f52d,0x7f800000,2
+np.float32,0x44ad28,0x3f800000,2
+np.float32,0xbf49dc9c,0x3f143532,2
+np.float32,0x3f6c1f1f,0x3ff295e7,2
+np.float32,0x1589b3,0x3f800000,2
+np.float32,0x3f49b44e,0x3fdd0031,2
+np.float32,0x7f5942c9,0x7f800000,2
+np.float32,0x3f2dab28,0x3fccd877,2
+np.float32,0xff7fffff,0x0,2
+np.float32,0x80578eb2,0x3f800000,2
+np.float32,0x3f39ba67,0x3fd3a50b,2
+np.float32,0x8020340d,0x3f800000,2
+np.float32,0xbf6025b2,0x3f0b8783,2
+np.float32,0x8015ccfe,0x3f800000,2
+np.float32,0x3f6b9762,0x3ff23cd0,2
+np.float32,0xfeeb0c86,0x0,2
+np.float32,0x802779bc,0x3f800000,2
+np.float32,0xbf32bf64,0x3f1dc796,2
+np.float32,0xbf577eb6,0x3f0ed631,2
+np.float32,0x0,0x3f800000,2
+np.float32,0xfe99de6c,0x0,2
+np.float32,0x7a4e53,0x3f800000,2
+np.float32,0x1a15d3,0x3f800000,2
+np.float32,0x8035fe16,0x3f800000,2
+np.float32,0x3e845784,0x3f991dab,2
+np.float32,0x43d688,0x3f800000,2
+np.float32,0xbd447cc0,0x3f77a0b7,2
+np.float32,0x3f83fa,0x3f800000,2
+np.float32,0x3f141df2,0x3fbf2719,2
+np.float32,0x805c586a,0x3f800000,2
+np.float32,0x14c47e,0x3f800000,2
+np.float32,0x3d3bed00,0x3f8422d4,2
+np.float32,0x7f6f4ecd,0x7f800000,2
+np.float32,0x3f0a5e5a,0x3fba2c5c,2
+np.float32,0x523ecf,0x3f800000,2
+np.float32,0xbef4a6e8,0x3f37d262,2
+np.float32,0xff54eb58,0x0,2
+np.float32,0xff3fc875,0x0,2
+np.float32,0x8067c392,0x3f800000,2
+np.float32,0xfedae910,0x0,2
+np.float32,0x80595979,0x3f800000,2
+np.float32,0x3ee87d1d,0x3faf5929,2
+np.float32,0x7f5bad33,0x7f800000,2
+np.float32,0xbf45b868,0x3f15e109,2
+np.float32,0x3ef2277d,0x3fb1a868,2
+np.float32,0x3ca5a950,0x3f81ce8c,2
+np.float32,0x3e70f4e6,0x3f96ad25,2
+np.float32,0xfe3515bc,0x0,2
+np.float32,0xfe4af088,0x0,2
+np.float32,0xff3c78b2,0x0,2
+np.float32,0x7f50f51a,0x7f800000,2
+np.float32,0x3e3a232a,0x3f913009,2
+np.float32,0x7dfec6ff,0x7f800000,2
+np.float32,0x3e1bbaec,0x3f8e3ad6,2
+np.float32,0xbd658fa0,0x3f763ee7,2
+np.float32,0xfe958684,0x0,2
+np.float32,0x503670,0x3f800000,2
+np.float32,0x3f800000,0x40000000,2
+np.float32,0x1bbec6,0x3f800000,2
+np.float32,0xbea7bb7c,0x3f4bff00,2
+np.float32,0xff3a24a2,0x0,2
+np.float32,0xbf416240,0x3f17a635,2
+np.float32,0xbf800000,0x3f000000,2
+np.float32,0xff0c965c,0x0,2
+np.float32,0x80000000,0x3f800000,2
+np.float32,0xbec2c69a,0x3f44a99e,2
+np.float32,0x5b68d4,0x3f800000,2
+np.float32,0xb9a93000,0x3f7ff158,2
+np.float32,0x3d5a0dd8,0x3f84cfbc,2
+np.float32,0xbeaf7a28,0x3f49de4e,2
+np.float32,0x3ee83555,0x3faf4820,2
+np.float32,0xfd320330,0x0,2
+np.float32,0xe1af2,0x3f800000,2
+np.float32,0x7cf28caf,0x7f800000,2
+np.float32,0x80781009,0x3f800000,2
+np.float32,0xbf1e0baf,0x3f26e04d,2
+np.float32,0x7edb05b1,0x7f800000,2
+np.float32,0x3de004,0x3f800000,2
+np.float32,0xff436af6,0x0,2
+np.float32,0x802a9408,0x3f800000,2
+np.float32,0x7ed82205,0x7f800000,2
+np.float32,0x3e3f8212,0x3f91b767,2
+np.float32,0x16a2b2,0x3f800000,2
+np.float32,0xff1e5af3,0x0,2
+np.float32,0xbf1c860c,0x3f2790b7,2
+np.float32,0x3f3bc5da,0x3fd4d1d6,2
+np.float32,0x7f5f7085,0x7f800000,2
+np.float32,0x7f68e409,0x7f800000,2
+np.float32,0x7f4b3388,0x7f800000,2
+np.float32,0x7ecaf440,0x7f800000,2
+np.float32,0x80078785,0x3f800000,2
+np.float32,0x3ebd800d,0x3fa56f45,2
+np.float32,0xbe39a140,0x3f61c58e,2
+np.float32,0x803b587e,0x3f800000,2
+np.float32,0xbeaaa418,0x3f4b31c4,2
+np.float32,0xff7e2b9f,0x0,2
+np.float32,0xff5180a3,0x0,2
+np.float32,0xbf291394,0x3f21f73c,2
+np.float32,0x7f7b9698,0x7f800000,2
+np.float32,0x4218da,0x3f800000,2
+np.float32,0x7f135262,0x7f800000,2
+np.float32,0x804c10e8,0x3f800000,2
+np.float32,0xbf1c2a54,0x3f27ba5a,2
+np.float32,0x7f41fd32,0x7f800000,2
+np.float32,0x3e5cc464,0x3f94a195,2
+np.float32,0xff7a2fa7,0x0,2
+np.float32,0x3e05dc30,0x3f8c23c9,2
+np.float32,0x7f206d99,0x7f800000,2
+np.float32,0xbe9ae520,0x3f4f9287,2
+np.float32,0xfe4f4d58,0x0,2
+np.float32,0xbf44db42,0x3f163ae3,2
+np.float32,0x3f65ac48,0x3fee6300,2
+np.float32,0x3ebfaf36,0x3fa5ecb0,2
+np.float32,0x3f466719,0x3fdb08b0,2
+np.float32,0x80000001,0x3f800000,2
+np.float32,0xff4b3c7b,0x0,2
+np.float32,0x3df44374,0x3f8b0819,2
+np.float32,0xfea4b540,0x0,2
+np.float32,0x7f358e3d,0x7f800000,2
+np.float32,0x801f5e63,0x3f800000,2
+np.float32,0x804ae77e,0x3f800000,2
+np.float32,0xdbb5,0x3f800000,2
+np.float32,0x7f0a7e3b,0x7f800000,2
+np.float32,0xbe4152e4,0x3f609953,2
+np.float32,0x4b9579,0x3f800000,2
+np.float32,0x3ece0bd4,0x3fa92ea5,2
+np.float32,0x7e499d9a,0x7f800000,2
+np.float32,0x80637d8a,0x3f800000,2
+np.float32,0x3e50a425,0x3f936a8b,2
+np.float32,0xbf0e8cb0,0x3f2e06dd,2
+np.float32,0x802763e2,0x3f800000,2
+np.float32,0xff73041b,0x0,2
+np.float32,0xfea466da,0x0,2
+np.float32,0x80064c73,0x3f800000,2
+np.float32,0xbef29222,0x3f385728,2
+np.float32,0x8029c215,0x3f800000,2
+np.float32,0xbd3994e0,0x3f7815d1,2
+np.float32,0xbe6ac9e4,0x3f5a61f3,2
+np.float32,0x804b58b0,0x3f800000,2
+np.float32,0xbdb83be0,0x3f70865c,2
+np.float32,0x7ee18da2,0x7f800000,2
+np.float32,0xfd4ca010,0x0,2
+np.float32,0x807c668b,0x3f800000,2
+np.float32,0xbd40ed90,0x3f77c6e9,2
+np.float32,0x7efc6881,0x7f800000,2
+np.float32,0xfe633bfc,0x0,2
+np.float32,0x803ce363,0x3f800000,2
+np.float32,0x7ecba81e,0x7f800000,2
+np.float32,0xfdcb2378,0x0,2
+np.float32,0xbebc5524,0x3f4662b2,2
+np.float32,0xfaa30000,0x0,2
+np.float32,0x805d451b,0x3f800000,2
+np.float32,0xbee85600,0x3f3ae996,2
+np.float32,0xfefb0a54,0x0,2
+np.float32,0xbdfc6690,0x3f6b0a08,2
+np.float32,0x58a57,0x3f800000,2
+np.float32,0x3b41b7,0x3f800000,2
+np.float32,0x7c99812d,0x7f800000,2
+np.float32,0xbd3ae740,0x3f78079d,2
+np.float32,0xbf4a48a7,0x3f1409dd,2
+np.float32,0xfdeaad58,0x0,2
+np.float32,0xbe9aa65a,0x3f4fa42c,2
+np.float32,0x3f79d78c,0x3ffbc458,2
+np.float32,0x805e7389,0x3f800000,2
+np.float32,0x7ebb3612,0x7f800000,2
+np.float32,0x2e27dc,0x3f800000,2
+np.float32,0x80726dec,0x3f800000,2
+np.float32,0xfe8fb738,0x0,2
+np.float32,0xff1ff3bd,0x0,2
+np.float32,0x7f5264a2,0x7f800000,2
+np.float32,0x3f5a6893,0x3fe739ca,2
+np.float32,0xbec4029c,0x3f44558d,2
+np.float32,0xbef65cfa,0x3f37657e,2
+np.float32,0x63aba1,0x3f800000,2
+np.float32,0xfbb6e200,0x0,2
+np.float32,0xbf3466fc,0x3f1d1307,2
+np.float32,0x3f258844,0x3fc861d7,2
+np.float32,0xbf5f29a7,0x3f0be6dc,2
+np.float32,0x802b51cd,0x3f800000,2
+np.float32,0xbe9094dc,0x3f527dae,2
+np.float32,0xfec2e68c,0x0,2
+np.float32,0x807b38bd,0x3f800000,2
+np.float32,0xbf594662,0x3f0e2663,2
+np.float32,0x7cbcf747,0x7f800000,2
+np.float32,0xbe4b88f0,0x3f5f0d47,2
+np.float32,0x3c53c4,0x3f800000,2
+np.float32,0xbe883562,0x3f54e3f7,2
+np.float32,0xbf1efaf0,0x3f267456,2
+np.float32,0x3e22cd3e,0x3f8ee98b,2
+np.float32,0x80434875,0x3f800000,2
+np.float32,0xbf000b44,0x3f34ff6e,2
+np.float32,0x7f311c3a,0x7f800000,2
+np.float32,0x802f7f3f,0x3f800000,2
+np.float32,0x805155fe,0x3f800000,2
+np.float32,0x7f5d7485,0x7f800000,2
+np.float32,0x80119197,0x3f800000,2
+np.float32,0x3f445b8b,0x3fd9d30d,2
+np.float32,0xbf638eb3,0x3f0a3f38,2
+np.float32,0x402410,0x3f800000,2
+np.float32,0xbc578a40,0x3f7dad1d,2
+np.float32,0xbeecbf8a,0x3f39cc9e,2
+np.float32,0x7f2935a4,0x7f800000,2
+np.float32,0x3f570fea,0x3fe523e2,2
+np.float32,0xbf06bffa,0x3f31bdb6,2
+np.float32,0xbf2afdfd,0x3f2120ba,2
+np.float32,0x7f76f7ab,0x7f800000,2
+np.float32,0xfee2d1e8,0x0,2
+np.float32,0x800b026d,0x3f800000,2
+np.float32,0xff0eda75,0x0,2
+np.float32,0x3d4c,0x3f800000,2
+np.float32,0xbed538a2,0x3f3fcffb,2
+np.float32,0x3f73f4f9,0x3ff7c979,2
+np.float32,0x2aa9fc,0x3f800000,2
+np.float32,0x806a45b3,0x3f800000,2
+np.float32,0xff770d35,0x0,2
+np.float32,0x7e999be3,0x7f800000,2
+np.float32,0x80741128,0x3f800000,2
+np.float32,0xff6aac34,0x0,2
+np.float32,0x470f74,0x3f800000,2
+np.float32,0xff423b7b,0x0,2
+np.float32,0x17dfdd,0x3f800000,2
+np.float32,0x7f029e12,0x7f800000,2
+np.float32,0x803fcb9d,0x3f800000,2
+np.float32,0x3f3dc3,0x3f800000,2
+np.float32,0x7f3a27bc,0x7f800000,2
+np.float32,0x3e473108,0x3f9279ec,2
+np.float32,0x7f4add5d,0x7f800000,2
+np.float32,0xfd9736e0,0x0,2
+np.float32,0x805f1df2,0x3f800000,2
+np.float32,0x6c49c1,0x3f800000,2
+np.float32,0x7ec733c7,0x7f800000,2
+np.float32,0x804c1abf,0x3f800000,2
+np.float32,0x3de2e887,0x3f8a37a5,2
+np.float32,0x3f51630a,0x3fe1a561,2
+np.float32,0x3de686a8,0x3f8a62ff,2
+np.float32,0xbedb3538,0x3f3e439c,2
+np.float32,0xbf3aa892,0x3f1a6f9e,2
+np.float32,0x7ee5fb32,0x7f800000,2
+np.float32,0x7e916c9b,0x7f800000,2
+np.float32,0x3f033f1c,0x3fb69e19,2
+np.float32,0x25324b,0x3f800000,2
+np.float32,0x3f348d1d,0x3fd0b2e2,2
+np.float32,0x3f5797e8,0x3fe57851,2
+np.float32,0xbf69c316,0x3f07f1a0,2
+np.float32,0xbe8b7fb0,0x3f53f1bf,2
+np.float32,0xbdbbc190,0x3f703d00,2
+np.float32,0xff6c4fc0,0x0,2
+np.float32,0x7f29fcbe,0x7f800000,2
+np.float32,0x3f678d19,0x3fef9a23,2
+np.float32,0x73d140,0x3f800000,2
+np.float32,0x3e25bdd2,0x3f8f326b,2
+np.float32,0xbeb775ec,0x3f47b2c6,2
+np.float32,0xff451c4d,0x0,2
+np.float32,0x8072c466,0x3f800000,2
+np.float32,0x3f65e836,0x3fee89b2,2
+np.float32,0x52ca7a,0x3f800000,2
+np.float32,0x62cfed,0x3f800000,2
+np.float32,0xbf583dd0,0x3f0e8c5c,2
+np.float32,0xbf683842,0x3f088342,2
+np.float32,0x3f1a7828,0x3fc2780c,2
+np.float32,0x800ea979,0x3f800000,2
+np.float32,0xbeb9133c,0x3f474328,2
+np.float32,0x3ef09fc7,0x3fb14a4b,2
+np.float32,0x7ebbcb75,0x7f800000,2
+np.float32,0xff316c0e,0x0,2
+np.float32,0x805b84e3,0x3f800000,2
+np.float32,0x3d6a55e0,0x3f852d8a,2
+np.float32,0x3e755788,0x3f971fd1,2
+np.float32,0x3ee7aacb,0x3faf2743,2
+np.float32,0x7f714039,0x7f800000,2
+np.float32,0xff70bad8,0x0,2
+np.float32,0xbe0b74c8,0x3f68f08c,2
+np.float32,0xbf6cb170,0x3f06de86,2
+np.float32,0x7ec1fbff,0x7f800000,2
+np.float32,0x8014b1f6,0x3f800000,2
+np.float32,0xfe8b45fe,0x0,2
+np.float32,0x6e2220,0x3f800000,2
+np.float32,0x3ed1777d,0x3fa9f7ab,2
+np.float32,0xff48e467,0x0,2
+np.float32,0xff76c5aa,0x0,2
+np.float32,0x3e9bd330,0x3f9e0fd7,2
+np.float32,0x3f17de4f,0x3fc11aae,2
+np.float32,0x7eeaa2fd,0x7f800000,2
+np.float32,0xbf572746,0x3f0ef806,2
+np.float32,0x7e235554,0x7f800000,2
+np.float32,0xfe24fc1c,0x0,2
+np.float32,0x7daf71ad,0x7f800000,2
+np.float32,0x800d4a6b,0x3f800000,2
+np.float32,0xbf6fc31d,0x3f05c0ce,2
+np.float32,0x1c4d93,0x3f800000,2
+np.float32,0x7ee9200c,0x7f800000,2
+np.float32,0x3f54b4da,0x3fe3aeec,2
+np.float32,0x2b37b1,0x3f800000,2
+np.float32,0x3f7468bd,0x3ff81731,2
+np.float32,0x3f2850ea,0x3fc9e5f4,2
+np.float32,0xbe0d47ac,0x3f68a6f9,2
+np.float32,0x314877,0x3f800000,2
+np.float32,0x802700c3,0x3f800000,2
+np.float32,0x7e2c915f,0x7f800000,2
+np.float32,0x800d0059,0x3f800000,2
+np.float32,0x3f7f3c25,0x3fff7862,2
+np.float32,0xff735d31,0x0,2
+np.float32,0xff7e339e,0x0,2
+np.float32,0xbef96cf0,0x3f36a340,2
+np.float32,0x3db6ea21,0x3f882cb2,2
+np.float32,0x67cb3d,0x3f800000,2
+np.float32,0x801f349d,0x3f800000,2
+np.float32,0x3f1390ec,0x3fbede29,2
+np.float32,0x7f13644a,0x7f800000,2
+np.float32,0x804a369b,0x3f800000,2
+np.float32,0x80262666,0x3f800000,2
+np.float32,0x7e850fbc,0x7f800000,2
+np.float32,0x18b002,0x3f800000,2
+np.float32,0x8051f1ed,0x3f800000,2
+np.float32,0x3eba48f6,0x3fa4b753,2
+np.float32,0xbf3f4130,0x3f1886a9,2
+np.float32,0xbedac006,0x3f3e61cf,2
+np.float32,0xbf097c70,0x3f306ddc,2
+np.float32,0x4aba6d,0x3f800000,2
+np.float32,0x580078,0x3f800000,2
+np.float32,0x3f64d82e,0x3fedda40,2
+np.float32,0x7f781fd6,0x7f800000,2
+np.float32,0x6aff3d,0x3f800000,2
+np.float32,0xff25e074,0x0,2
+np.float32,0x7ea9ec89,0x7f800000,2
+np.float32,0xbf63b816,0x3f0a2fbb,2
+np.float32,0x133f07,0x3f800000,2
+np.float32,0xff800000,0x0,2
+np.float32,0x8013dde7,0x3f800000,2
+np.float32,0xff770b95,0x0,2
+np.float32,0x806154e8,0x3f800000,2
+np.float32,0x3f1e7bce,0x3fc4981a,2
+np.float32,0xff262c78,0x0,2
+np.float32,0x3f59a652,0x3fe6c04c,2
+np.float32,0x7f220166,0x7f800000,2
+np.float32,0x7eb24939,0x7f800000,2
+np.float32,0xbed58bb0,0x3f3fba6a,2
+np.float32,0x3c2ad000,0x3f80eda7,2
+np.float32,0x2adb2e,0x3f800000,2
+np.float32,0xfe8b213e,0x0,2
+np.float32,0xbf2e0c1e,0x3f1fccea,2
+np.float32,0x7e1716be,0x7f800000,2
+np.float32,0x80184e73,0x3f800000,2
+np.float32,0xbf254743,0x3f23a3d5,2
+np.float32,0x8063a722,0x3f800000,2
+np.float32,0xbe50adf0,0x3f5e46c7,2
+np.float32,0x3f614158,0x3feb8d60,2
+np.float32,0x8014bbc8,0x3f800000,2
+np.float32,0x283bc7,0x3f800000,2
+np.float32,0x3ffb5c,0x3f800000,2
+np.float32,0xfe8de6bc,0x0,2
+np.float32,0xbea6e086,0x3f4c3b82,2
+np.float32,0xfee64b92,0x0,2
+np.float32,0x506c1a,0x3f800000,2
+np.float32,0xff342af8,0x0,2
+np.float32,0x6b6f4c,0x3f800000,2
+np.float32,0xfeb42b1e,0x0,2
+np.float32,0x3e49384a,0x3f92ad71,2
+np.float32,0x152d08,0x3f800000,2
+np.float32,0x804c8f09,0x3f800000,2
+np.float32,0xff5e927d,0x0,2
+np.float32,0x6374da,0x3f800000,2
+np.float32,0x3f48f011,0x3fdc8ae4,2
+np.float32,0xbf446a30,0x3f1668e8,2
+np.float32,0x3ee77073,0x3faf196e,2
+np.float32,0xff4caa40,0x0,2
+np.float32,0x7efc9363,0x7f800000,2
+np.float32,0xbf706dcc,0x3f05830d,2
+np.float32,0xfe29c7e8,0x0,2
+np.float32,0x803cfe58,0x3f800000,2
+np.float32,0x3ec34c7c,0x3fa6bd0a,2
+np.float32,0x3eb85b62,0x3fa44968,2
+np.float32,0xfda1b9d8,0x0,2
+np.float32,0x802932cd,0x3f800000,2
+np.float32,0xbf5cde78,0x3f0cc5fa,2
+np.float32,0x3f31bf44,0x3fcf1ec8,2
+np.float32,0x803a0882,0x3f800000,2
+np.float32,0x800000,0x3f800000,2
+np.float32,0x3f54110e,0x3fe34a08,2
+np.float32,0x80645ea9,0x3f800000,2
+np.float32,0xbd8c1070,0x3f7425c3,2
+np.float32,0x801a006a,0x3f800000,2
+np.float32,0x7f5d161e,0x7f800000,2
+np.float32,0x805b5df3,0x3f800000,2
+np.float32,0xbf71a7c0,0x3f0511be,2
+np.float32,0xbe9a55c0,0x3f4fbad6,2
+np.float64,0xde7e2fd9bcfc6,0x3ff0000000000000,2
+np.float64,0xbfd8cd88eb319b12,0x3fe876349efbfa2b,2
+np.float64,0x3fe4fa13ace9f428,0x3ff933fbb117d196,2
+np.float64,0x475b3d048eb68,0x3ff0000000000000,2
+np.float64,0x7fef39ed07be73d9,0x7ff0000000000000,2
+np.float64,0x80026b84d904d70a,0x3ff0000000000000,2
+np.float64,0xebd60627d7ac1,0x3ff0000000000000,2
+np.float64,0xbfd7cbefdbaf97e0,0x3fe8bad30f6cf8e1,2
+np.float64,0x7fc17c605a22f8c0,0x7ff0000000000000,2
+np.float64,0x8cdac05119b58,0x3ff0000000000000,2
+np.float64,0x3fc45cd60a28b9ac,0x3ff1dd8028ec3f41,2
+np.float64,0x7fef4fce137e9f9b,0x7ff0000000000000,2
+np.float64,0xe5a2b819cb457,0x3ff0000000000000,2
+np.float64,0xe3bcfd4dc77a0,0x3ff0000000000000,2
+np.float64,0x68f0b670d1e17,0x3ff0000000000000,2
+np.float64,0xae69a6455cd35,0x3ff0000000000000,2
+np.float64,0xffe7007a0c6e00f4,0x0,2
+np.float64,0x59fc57a8b3f8c,0x3ff0000000000000,2
+np.float64,0xbfeee429c0bdc854,0x3fe0638fa62bed9f,2
+np.float64,0x80030bb6e206176f,0x3ff0000000000000,2
+np.float64,0x8006967a36ad2cf5,0x3ff0000000000000,2
+np.float64,0x3fe128176a22502f,0x3ff73393301e5dc8,2
+np.float64,0x218de20c431bd,0x3ff0000000000000,2
+np.float64,0x3fe7dbc48aafb789,0x3ffad38989b5955c,2
+np.float64,0xffda1ef411343de8,0x0,2
+np.float64,0xc6b392838d673,0x3ff0000000000000,2
+np.float64,0x7fe6d080c1ada101,0x7ff0000000000000,2
+np.float64,0xbfed36dd67fa6dbb,0x3fe0fec342c4ee89,2
+np.float64,0x3fee2bb6a3fc576e,0x3ffec1c149f1f092,2
+np.float64,0xbfd1f785eb23ef0c,0x3fea576eb01233cb,2
+np.float64,0x7fdad29a1f35a533,0x7ff0000000000000,2
+np.float64,0xffe8928c4fb12518,0x0,2
+np.float64,0x7fb123160022462b,0x7ff0000000000000,2
+np.float64,0x8007ab56cfaf56ae,0x3ff0000000000000,2
+np.float64,0x7fda342d6634685a,0x7ff0000000000000,2
+np.float64,0xbfe3b7e42c676fc8,0x3fe4e05cf8685b8a,2
+np.float64,0xffa708be7c2e1180,0x0,2
+np.float64,0xbfe8ffbece31ff7e,0x3fe29eb84077a34a,2
+np.float64,0xbf91002008220040,0x3fefa245058f05cb,2
+np.float64,0x8000281f0ee0503f,0x3ff0000000000000,2
+np.float64,0x8005617adc2ac2f6,0x3ff0000000000000,2
+np.float64,0x7fa84fec60309fd8,0x7ff0000000000000,2
+np.float64,0x8d00c0231a018,0x3ff0000000000000,2
+np.float64,0xbfdfe52ca63fca5a,0x3fe6a7324cc00d57,2
+np.float64,0x7fcc81073d39020d,0x7ff0000000000000,2
+np.float64,0x800134ff5a6269ff,0x3ff0000000000000,2
+np.float64,0xffc7fff98d2ffff4,0x0,2
+np.float64,0x8000925ce50124bb,0x3ff0000000000000,2
+np.float64,0xffe2530c66a4a618,0x0,2
+np.float64,0x7fc99070673320e0,0x7ff0000000000000,2
+np.float64,0xbfddd5c1f13bab84,0x3fe72a0c80f8df39,2
+np.float64,0x3fe1c220fee38442,0x3ff7817ec66aa55b,2
+np.float64,0x3fb9a1e1043343c2,0x3ff1265e575e6404,2
+np.float64,0xffef72e0833ee5c0,0x0,2
+np.float64,0x3fe710c0416e2181,0x3ffa5e93588aaa69,2
+np.float64,0xbfd8d23cbab1a47a,0x3fe874f5b9d99885,2
+np.float64,0x7fe9628ebd72c51c,0x7ff0000000000000,2
+np.float64,0xdd5fa611babf5,0x3ff0000000000000,2
+np.float64,0x8002bafac86575f6,0x3ff0000000000000,2
+np.float64,0x68acea44d159e,0x3ff0000000000000,2
+np.float64,0xffd776695eaeecd2,0x0,2
+np.float64,0x80059b59bb4b36b4,0x3ff0000000000000,2
+np.float64,0xbdcdd2af7b9bb,0x3ff0000000000000,2
+np.float64,0x8002b432ee856867,0x3ff0000000000000,2
+np.float64,0xcbc72f09978e6,0x3ff0000000000000,2
+np.float64,0xbfee8f4bf6fd1e98,0x3fe081cc0318b170,2
+np.float64,0xffc6e2892d2dc514,0x0,2
+np.float64,0x7feb682e4db6d05c,0x7ff0000000000000,2
+np.float64,0x8004b70a04296e15,0x3ff0000000000000,2
+np.float64,0x42408a4284812,0x3ff0000000000000,2
+np.float64,0xbfe9b8b197f37163,0x3fe254b4c003ce0a,2
+np.float64,0x3fcaadf5f5355bec,0x3ff27ca7876a8d20,2
+np.float64,0xfff0000000000000,0x0,2
+np.float64,0x7fea8376d33506ed,0x7ff0000000000000,2
+np.float64,0xffef73c2d63ee785,0x0,2
+np.float64,0xffe68b2bae2d1657,0x0,2
+np.float64,0x3fd8339cb2306739,0x3ff4cb774d616f90,2
+np.float64,0xbfc6d1db4d2da3b8,0x3fec47bb873a309c,2
+np.float64,0x7fe858016230b002,0x7ff0000000000000,2
+np.float64,0x7fe74cb99d2e9972,0x7ff0000000000000,2
+np.float64,0xffec2e96dc385d2d,0x0,2
+np.float64,0xb762a9876ec55,0x3ff0000000000000,2
+np.float64,0x3feca230c5794462,0x3ffdbfe62a572f52,2
+np.float64,0xbfb5ebad3a2bd758,0x3fee27eed86dcc39,2
+np.float64,0x471c705a8e38f,0x3ff0000000000000,2
+np.float64,0x7fc79bb5cf2f376b,0x7ff0000000000000,2
+np.float64,0xbfe53d6164ea7ac3,0x3fe4331b3beb73bd,2
+np.float64,0xbfe375a3f766eb48,0x3fe4fe67edb516e6,2
+np.float64,0x3fe1c7686ca38ed1,0x3ff7842f04770ba9,2
+np.float64,0x242e74dc485cf,0x3ff0000000000000,2
+np.float64,0x8009c06ab71380d6,0x3ff0000000000000,2
+np.float64,0x3fd08505efa10a0c,0x3ff3227b735b956d,2
+np.float64,0xffe3dfcecda7bf9d,0x0,2
+np.float64,0x8001f079bbc3e0f4,0x3ff0000000000000,2
+np.float64,0x3fddc706b6bb8e0c,0x3ff616d927987363,2
+np.float64,0xbfd151373ea2a26e,0x3fea870ba53ec126,2
+np.float64,0x7fe89533bfb12a66,0x7ff0000000000000,2
+np.float64,0xffed302cbc3a6059,0x0,2
+np.float64,0x3fd871cc28b0e398,0x3ff4d97d58c16ae2,2
+np.float64,0x7fbe9239683d2472,0x7ff0000000000000,2
+np.float64,0x848a445909149,0x3ff0000000000000,2
+np.float64,0x8007b104ce2f620a,0x3ff0000000000000,2
+np.float64,0x7fc2cd6259259ac4,0x7ff0000000000000,2
+np.float64,0xbfeadb640df5b6c8,0x3fe1e2b068de10af,2
+np.float64,0x800033b2f1a06767,0x3ff0000000000000,2
+np.float64,0x7fe54e5b7caa9cb6,0x7ff0000000000000,2
+np.float64,0x4f928f209f26,0x3ff0000000000000,2
+np.float64,0x8003c3dc6f2787ba,0x3ff0000000000000,2
+np.float64,0xbfd55a59daaab4b4,0x3fe9649d57b32b5d,2
+np.float64,0xffe3e2968d67c52c,0x0,2
+np.float64,0x80087434d550e86a,0x3ff0000000000000,2
+np.float64,0xffdde800083bd000,0x0,2
+np.float64,0xffe291f0542523e0,0x0,2
+np.float64,0xbfe1419bc3e28338,0x3fe6051d4f95a34a,2
+np.float64,0x3fd9d00ee1b3a01e,0x3ff5292bb8d5f753,2
+np.float64,0x3fdb720b60b6e417,0x3ff589d133625374,2
+np.float64,0xbfe3e21f0967c43e,0x3fe4cd4d02e3ef9a,2
+np.float64,0x7fd7e27f3dafc4fd,0x7ff0000000000000,2
+np.float64,0x3fd1cc2620a3984c,0x3ff366befbc38e3e,2
+np.float64,0x3fe78d05436f1a0b,0x3ffaa5ee4ea54b79,2
+np.float64,0x7e2acc84fc55a,0x3ff0000000000000,2
+np.float64,0x800ffb861c5ff70c,0x3ff0000000000000,2
+np.float64,0xffb2b0db1a2561b8,0x0,2
+np.float64,0xbfe80c2363701847,0x3fe301fdfe789576,2
+np.float64,0x7fe383c1c3e70783,0x7ff0000000000000,2
+np.float64,0xbfeefc02e6fdf806,0x3fe05b1a8528bf6c,2
+np.float64,0xbfe42c9268285925,0x3fe4abdc14793cb8,2
+np.float64,0x1,0x3ff0000000000000,2
+np.float64,0xa71c7ce94e390,0x3ff0000000000000,2
+np.float64,0x800ed4e6777da9cd,0x3ff0000000000000,2
+np.float64,0x3fde11b35d3c2367,0x3ff628bdc6dd1b78,2
+np.float64,0x3fef3964dbfe72ca,0x3fff777cae357608,2
+np.float64,0x3fefe369b7ffc6d4,0x3fffec357be508a3,2
+np.float64,0xbfdef1855f3de30a,0x3fe6e348c58e3fed,2
+np.float64,0x3fee0e2bc13c1c58,0x3ffeae1909c1b973,2
+np.float64,0xbfd31554ffa62aaa,0x3fea06628b2f048a,2
+np.float64,0x800dc56bcc7b8ad8,0x3ff0000000000000,2
+np.float64,0x7fbba01b8e374036,0x7ff0000000000000,2
+np.float64,0x7fd9737a92b2e6f4,0x7ff0000000000000,2
+np.float64,0x3feeae0fac3d5c1f,0x3fff1913705f1f07,2
+np.float64,0x3fdcc64fcdb98ca0,0x3ff5d9c3e5862972,2
+np.float64,0x3fdad9f83db5b3f0,0x3ff56674e81c1bd1,2
+np.float64,0x32b8797065710,0x3ff0000000000000,2
+np.float64,0x3fd20deae6241bd6,0x3ff37495bc057394,2
+np.float64,0x7fc899f0763133e0,0x7ff0000000000000,2
+np.float64,0x80045805fc08b00d,0x3ff0000000000000,2
+np.float64,0xbfcd8304cb3b0608,0x3feb4611f1eaa30c,2
+np.float64,0x3fd632a2fcac6544,0x3ff4592e1ea14fb0,2
+np.float64,0xffeeb066007d60cb,0x0,2
+np.float64,0x800bb12a42b76255,0x3ff0000000000000,2
+np.float64,0xbfe060fe1760c1fc,0x3fe6714640ab2574,2
+np.float64,0x80067ed737acfdaf,0x3ff0000000000000,2
+np.float64,0x3fd5ec3211abd864,0x3ff449adea82e73e,2
+np.float64,0x7fc4b2fdc22965fb,0x7ff0000000000000,2
+np.float64,0xff656afd002ad600,0x0,2
+np.float64,0xffeadefcdcb5bdf9,0x0,2
+np.float64,0x80052f18610a5e32,0x3ff0000000000000,2
+np.float64,0xbfd5b75c78ab6eb8,0x3fe94b15e0f39194,2
+np.float64,0xa4d3de2b49a7c,0x3ff0000000000000,2
+np.float64,0xbfe321c93de64392,0x3fe524ac7bbee401,2
+np.float64,0x3feb32f5def665ec,0x3ffcd6e4e5f9c271,2
+np.float64,0x7fe6b07e4ced60fc,0x7ff0000000000000,2
+np.float64,0x3fe013bb2de02776,0x3ff6aa4c32ab5ba4,2
+np.float64,0xbfeadd81d375bb04,0x3fe1e1de89b4aebf,2
+np.float64,0xffece7678079cece,0x0,2
+np.float64,0x3fe3d87b8467b0f8,0x3ff897cf22505e4d,2
+np.float64,0xffc4e3a05129c740,0x0,2
+np.float64,0xbfddee6b03bbdcd6,0x3fe723dd83ab49bd,2
+np.float64,0x3fcc4e2672389c4d,0x3ff2a680db769116,2
+np.float64,0x3fd8ed221ab1da44,0x3ff4f569aec8b850,2
+np.float64,0x80000a3538a0146b,0x3ff0000000000000,2
+np.float64,0x8004832eb109065e,0x3ff0000000000000,2
+np.float64,0xffdca83c60395078,0x0,2
+np.float64,0xffef551cda3eaa39,0x0,2
+np.float64,0x800fd95dd65fb2bc,0x3ff0000000000000,2
+np.float64,0x3ff0000000000000,0x4000000000000000,2
+np.float64,0xbfc06f5c4f20deb8,0x3fed466c17305ad8,2
+np.float64,0xbfeb01b5f476036c,0x3fe1d3de0f4211f4,2
+np.float64,0xbfdb2b9284365726,0x3fe7d7b02f790b05,2
+np.float64,0xff76ba83202d7500,0x0,2
+np.float64,0x3fd3f1c59ea7e38c,0x3ff3db96b3a0aaad,2
+np.float64,0x8b99ff6d17340,0x3ff0000000000000,2
+np.float64,0xbfeb383aa0f67075,0x3fe1bedcf2531c08,2
+np.float64,0x3fe321e35fa643c7,0x3ff83749a5d686ee,2
+np.float64,0xbfd863eb2130c7d6,0x3fe8923fcc39bac7,2
+np.float64,0x9e71dd333ce3c,0x3ff0000000000000,2
+np.float64,0x9542962b2a853,0x3ff0000000000000,2
+np.float64,0xba2c963b74593,0x3ff0000000000000,2
+np.float64,0x80019f4d0ca33e9b,0x3ff0000000000000,2
+np.float64,0xffde3e39a73c7c74,0x0,2
+np.float64,0x800258ae02c4b15d,0x3ff0000000000000,2
+np.float64,0xbfd99a535a3334a6,0x3fe8402f3a0662a5,2
+np.float64,0xe6c62143cd8c4,0x3ff0000000000000,2
+np.float64,0x7fbcc828f0399051,0x7ff0000000000000,2
+np.float64,0xbfe42e3596285c6b,0x3fe4ab2066d66071,2
+np.float64,0xffe2ee42d365dc85,0x0,2
+np.float64,0x3fe1f98abea3f315,0x3ff79dc68002a80b,2
+np.float64,0x7fd7225891ae44b0,0x7ff0000000000000,2
+np.float64,0x477177408ee30,0x3ff0000000000000,2
+np.float64,0xbfe16a7e2162d4fc,0x3fe5f1a5c745385d,2
+np.float64,0xbf98aaee283155e0,0x3fef785952e9c089,2
+np.float64,0x7fd7c14a8daf8294,0x7ff0000000000000,2
+np.float64,0xf7e7713defcee,0x3ff0000000000000,2
+np.float64,0x800769aa11aed355,0x3ff0000000000000,2
+np.float64,0xbfed30385e3a6071,0x3fe10135a3bd9ae6,2
+np.float64,0x3fe6dd7205edbae4,0x3ffa4155899efd70,2
+np.float64,0x800d705d26bae0ba,0x3ff0000000000000,2
+np.float64,0xa443ac1f48876,0x3ff0000000000000,2
+np.float64,0xbfec8cfec43919fe,0x3fe13dbf966e6633,2
+np.float64,0x7fd246efaa248dde,0x7ff0000000000000,2
+np.float64,0x800f2ad14afe55a3,0x3ff0000000000000,2
+np.float64,0x800487a894c90f52,0x3ff0000000000000,2
+np.float64,0x80014c4f19e2989f,0x3ff0000000000000,2
+np.float64,0x3fc11f265f223e4d,0x3ff18def05c971e5,2
+np.float64,0xffeb6d565776daac,0x0,2
+np.float64,0x7fd5ca5df8ab94bb,0x7ff0000000000000,2
+np.float64,0xbfe33de4fde67bca,0x3fe517d0e212cd1c,2
+np.float64,0xbfd1c738e5a38e72,0x3fea6539e9491693,2
+np.float64,0xbfec1d8c33b83b18,0x3fe16790fbca0c65,2
+np.float64,0xbfeecb464b7d968d,0x3fe06c67e2aefa55,2
+np.float64,0xbfd621dbf1ac43b8,0x3fe92dfa32d93846,2
+np.float64,0x80069a02860d3406,0x3ff0000000000000,2
+np.float64,0xbfe84f650e309eca,0x3fe2e661300f1975,2
+np.float64,0x7fc1d2cec523a59d,0x7ff0000000000000,2
+np.float64,0x3fd7706d79aee0db,0x3ff49fb033353dfe,2
+np.float64,0xffd94ba458329748,0x0,2
+np.float64,0x7fea98ba1a753173,0x7ff0000000000000,2
+np.float64,0xbfe756ba092ead74,0x3fe34d428d1857bc,2
+np.float64,0xffecfbd836b9f7b0,0x0,2
+np.float64,0x3fd211fbe5a423f8,0x3ff375711a3641e0,2
+np.float64,0x7fee24f7793c49ee,0x7ff0000000000000,2
+np.float64,0x7fe6a098886d4130,0x7ff0000000000000,2
+np.float64,0xbfd4ade909a95bd2,0x3fe99436524db1f4,2
+np.float64,0xbfeb704e6476e09d,0x3fe1a95be4a21bc6,2
+np.float64,0xffefc0f6627f81ec,0x0,2
+np.float64,0x7feff3f896ffe7f0,0x7ff0000000000000,2
+np.float64,0xa3f74edb47eea,0x3ff0000000000000,2
+np.float64,0xbfe0a551cf214aa4,0x3fe65027a7ff42e3,2
+np.float64,0x3fe164b23622c964,0x3ff7521c6225f51d,2
+np.float64,0x7fc258752324b0e9,0x7ff0000000000000,2
+np.float64,0x4739b3348e737,0x3ff0000000000000,2
+np.float64,0xb0392b1d60726,0x3ff0000000000000,2
+np.float64,0x7fe26f42e5e4de85,0x7ff0000000000000,2
+np.float64,0x8004601f87e8c040,0x3ff0000000000000,2
+np.float64,0xffe92ce37b3259c6,0x0,2
+np.float64,0x3fe620da3a6c41b4,0x3ff9d6ee3d005466,2
+np.float64,0x3fd850cfa2b0a1a0,0x3ff4d20bd249d411,2
+np.float64,0xffdcdfdfb5b9bfc0,0x0,2
+np.float64,0x800390297d672054,0x3ff0000000000000,2
+np.float64,0x3fde5864f6bcb0ca,0x3ff639bb9321f5ef,2
+np.float64,0x3fee484cec7c909a,0x3ffed4d2c6274219,2
+np.float64,0x7fe9b9a064b37340,0x7ff0000000000000,2
+np.float64,0xffe50028b8aa0051,0x0,2
+np.float64,0x3fe37774ade6eee9,0x3ff864558498a9a8,2
+np.float64,0x7fef83c724bf078d,0x7ff0000000000000,2
+np.float64,0xbfeb58450fb6b08a,0x3fe1b290556be73d,2
+np.float64,0x7fd7161475ae2c28,0x7ff0000000000000,2
+np.float64,0x3fece09621f9c12c,0x3ffde836a583bbdd,2
+np.float64,0x3fd045790ea08af2,0x3ff31554778fd4e2,2
+np.float64,0xbfe7c7dd6cef8fbb,0x3fe31e2eeda857fc,2
+np.float64,0xffe9632f5372c65e,0x0,2
+np.float64,0x800d4f3a703a9e75,0x3ff0000000000000,2
+np.float64,0xffea880e4df5101c,0x0,2
+np.float64,0xbfeb7edc4ff6fdb8,0x3fe1a3cb5dc33594,2
+np.float64,0xbfcaae4bab355c98,0x3febb1ee65e16b58,2
+np.float64,0xbfde598a19bcb314,0x3fe709145eafaaf8,2
+np.float64,0x3feefb6d78fdf6db,0x3fff4d5c8c68e39a,2
+np.float64,0x13efc75427dfa,0x3ff0000000000000,2
+np.float64,0xffe26f65c064decb,0x0,2
+np.float64,0xbfed5c1addfab836,0x3fe0f1133bd2189a,2
+np.float64,0x7fe7a7cf756f4f9e,0x7ff0000000000000,2
+np.float64,0xffc681702e2d02e0,0x0,2
+np.float64,0x8003d6ab5067ad57,0x3ff0000000000000,2
+np.float64,0xffa695f1342d2be0,0x0,2
+np.float64,0xbfcf8857db3f10b0,0x3feafa14da8c29a4,2
+np.float64,0xbfe8ca06be71940e,0x3fe2b46f6d2c64b4,2
+np.float64,0x3451c74468a3a,0x3ff0000000000000,2
+np.float64,0x3fde47d5f6bc8fac,0x3ff635bf8e024716,2
+np.float64,0xffda159d5db42b3a,0x0,2
+np.float64,0x7fef9fecaa3f3fd8,0x7ff0000000000000,2
+np.float64,0x3fd4e745e3a9ce8c,0x3ff410a9cb6fd8bf,2
+np.float64,0xffef57019b3eae02,0x0,2
+np.float64,0xbfe6604f4f6cc09e,0x3fe3b55de43c626d,2
+np.float64,0xffe066a424a0cd48,0x0,2
+np.float64,0x3fd547de85aa8fbc,0x3ff425b2a7a16675,2
+np.float64,0xffb3c69280278d28,0x0,2
+np.float64,0xffebe0b759f7c16e,0x0,2
+np.float64,0x3fefc84106ff9082,0x3fffd973687337d8,2
+np.float64,0x501c42a4a0389,0x3ff0000000000000,2
+np.float64,0x7feb45d13eb68ba1,0x7ff0000000000000,2
+np.float64,0xbfb16a8c2e22d518,0x3fee86a9c0f9291a,2
+np.float64,0x3be327b877c66,0x3ff0000000000000,2
+np.float64,0x7fe4a58220694b03,0x7ff0000000000000,2
+np.float64,0x3fe0286220a050c4,0x3ff6b472157ab8f2,2
+np.float64,0x3fc9381825327030,0x3ff2575fbea2bf5d,2
+np.float64,0xbfd1af7ee8a35efe,0x3fea6c032cf7e669,2
+np.float64,0xbfea9b0f39b5361e,0x3fe1fbae14b40b4d,2
+np.float64,0x39efe4aa73dfd,0x3ff0000000000000,2
+np.float64,0xffeb06fdc8360dfb,0x0,2
+np.float64,0xbfda481e72b4903c,0x3fe812b4b08d4884,2
+np.float64,0xbfd414ba5ba82974,0x3fe9bec9474bdfe6,2
+np.float64,0x7fe707177b6e0e2e,0x7ff0000000000000,2
+np.float64,0x8000000000000001,0x3ff0000000000000,2
+np.float64,0xbfede6a75bbbcd4f,0x3fe0be874cccd399,2
+np.float64,0x8006cdb577cd9b6c,0x3ff0000000000000,2
+np.float64,0x800051374f20a26f,0x3ff0000000000000,2
+np.float64,0x3fe5cba8c96b9752,0x3ff9a76b3adcc122,2
+np.float64,0xbfee3933487c7267,0x3fe0a0b190f9609a,2
+np.float64,0x3fd574b8d8aae970,0x3ff42f7e83de1af9,2
+np.float64,0xba5db72b74bb7,0x3ff0000000000000,2
+np.float64,0x3fa9bf512c337ea0,0x3ff0914a7f743a94,2
+np.float64,0xffe8cb736c3196e6,0x0,2
+np.float64,0x3761b2f06ec37,0x3ff0000000000000,2
+np.float64,0x8b4d4433169a9,0x3ff0000000000000,2
+np.float64,0x800f0245503e048b,0x3ff0000000000000,2
+np.float64,0x7fb20d54ac241aa8,0x7ff0000000000000,2
+np.float64,0x3fdf26666b3e4ccd,0x3ff66b8995142017,2
+np.float64,0xbfcbf2a83737e550,0x3feb8173a7b9d6b5,2
+np.float64,0x3fd31572a0a62ae5,0x3ff3ac6c94313dcd,2
+np.float64,0x7fb6c2807a2d8500,0x7ff0000000000000,2
+np.float64,0x800799758f2f32ec,0x3ff0000000000000,2
+np.float64,0xe72f1f6bce5e4,0x3ff0000000000000,2
+np.float64,0x3fe0e0f223a1c1e4,0x3ff70fed5b761673,2
+np.float64,0x3fe6d4f133eda9e2,0x3ffa3c8000c169eb,2
+np.float64,0xbfe1ccc3d8639988,0x3fe5c32148bedbda,2
+np.float64,0x3fea71c53574e38a,0x3ffc5f31201fe9be,2
+np.float64,0x9e0323eb3c065,0x3ff0000000000000,2
+np.float64,0x8005cc79a5cb98f4,0x3ff0000000000000,2
+np.float64,0x1dace1f83b59d,0x3ff0000000000000,2
+np.float64,0x10000000000000,0x3ff0000000000000,2
+np.float64,0xbfdef50830bdea10,0x3fe6e269fc17ebef,2
+np.float64,0x8010000000000000,0x3ff0000000000000,2
+np.float64,0xbfdfa82192bf5044,0x3fe6b6313ee0a095,2
+np.float64,0x3fd9398fe2b27320,0x3ff506ca2093c060,2
+np.float64,0x8002721fe664e441,0x3ff0000000000000,2
+np.float64,0x800c04166ad8082d,0x3ff0000000000000,2
+np.float64,0xffec3918b3387230,0x0,2
+np.float64,0x3fec62d5dfb8c5ac,0x3ffd972ea4a54b32,2
+np.float64,0x3fe7e42a0b6fc854,0x3ffad86b0443181d,2
+np.float64,0x3fc0aff5f3215fec,0x3ff1836058d4d210,2
+np.float64,0xbf82ff68a025fec0,0x3fefcb7f06862dce,2
+np.float64,0xae2e35195c5c7,0x3ff0000000000000,2
+np.float64,0x3fece3bddf79c77c,0x3ffdea41fb1ba8fa,2
+np.float64,0xbfa97b947832f730,0x3feeea34ebedbbd2,2
+np.float64,0xbfdfb1b1ce3f6364,0x3fe6b3d72871335c,2
+np.float64,0xbfe61a4f24ac349e,0x3fe3d356bf991b06,2
+np.float64,0x7fe23117a5e4622e,0x7ff0000000000000,2
+np.float64,0x800552a8cccaa552,0x3ff0000000000000,2
+np.float64,0x625b4d0ac4b6a,0x3ff0000000000000,2
+np.float64,0x3f86cf15702d9e00,0x3ff01fbe0381676d,2
+np.float64,0x800d7d1b685afa37,0x3ff0000000000000,2
+np.float64,0x3fe2cb6e40a596dd,0x3ff80a1a562f7fc9,2
+np.float64,0x3fe756eb8e2eadd7,0x3ffa86c638aad07d,2
+np.float64,0x800dc9a5513b934b,0x3ff0000000000000,2
+np.float64,0xbfbbdd118a37ba20,0x3fedacb4624f3cee,2
+np.float64,0x800de01f8efbc03f,0x3ff0000000000000,2
+np.float64,0x800da1a3fe9b4348,0x3ff0000000000000,2
+np.float64,0xbf87d8c7602fb180,0x3fefbe2614998ab6,2
+np.float64,0xbfdfff6141bffec2,0x3fe6a0c54d9f1bc8,2
+np.float64,0xee8fbba5dd1f8,0x3ff0000000000000,2
+np.float64,0x3fe79dc93e6f3b92,0x3ffaaf9d7d955b2c,2
+np.float64,0xffedd4b3d07ba967,0x0,2
+np.float64,0x800905dfc1720bc0,0x3ff0000000000000,2
+np.float64,0x3fd9e483b8b3c907,0x3ff52ddc6c950e7f,2
+np.float64,0xe34ffefdc6a00,0x3ff0000000000000,2
+np.float64,0x2168e62242d1e,0x3ff0000000000000,2
+np.float64,0x800349950e26932b,0x3ff0000000000000,2
+np.float64,0x7fc50da8532a1b50,0x7ff0000000000000,2
+np.float64,0xae1a4d115c34a,0x3ff0000000000000,2
+np.float64,0xa020f0b74041e,0x3ff0000000000000,2
+np.float64,0x3fd2aa2f77a5545f,0x3ff3959f09519a25,2
+np.float64,0x3fbfefc3223fdf86,0x3ff171f3df2d408b,2
+np.float64,0xbfea9fc340b53f86,0x3fe1f9d92b712654,2
+np.float64,0xffe9b920a5337240,0x0,2
+np.float64,0xbfe2eb0265e5d605,0x3fe53dd195782de3,2
+np.float64,0x7fb932c70e32658d,0x7ff0000000000000,2
+np.float64,0x3fda816bfcb502d8,0x3ff551f8d5c84c82,2
+np.float64,0x3fed68cbe9fad198,0x3ffe40f6692d5693,2
+np.float64,0x32df077665be2,0x3ff0000000000000,2
+np.float64,0x7fdc9c2f3539385d,0x7ff0000000000000,2
+np.float64,0x7fe71091a2ee2122,0x7ff0000000000000,2
+np.float64,0xbfe68106c46d020e,0x3fe3a76b56024c2c,2
+np.float64,0xffcf0572823e0ae4,0x0,2
+np.float64,0xbfeeab341fbd5668,0x3fe077d496941cda,2
+np.float64,0x7fe7ada0d2af5b41,0x7ff0000000000000,2
+np.float64,0xffacdef2a439bde0,0x0,2
+np.float64,0x3fe4200f3128401e,0x3ff8be0ddf30fd1e,2
+np.float64,0xffd9022a69320454,0x0,2
+np.float64,0xbfe8e06914f1c0d2,0x3fe2ab5fe7fffb5a,2
+np.float64,0x3fc4b976602972ed,0x3ff1e6786fa7a890,2
+np.float64,0xbfd784c105af0982,0x3fe8cdeb1cdbd57e,2
+np.float64,0x7feb20a20eb64143,0x7ff0000000000000,2
+np.float64,0xbfc87dd83630fbb0,0x3fec067c1e7e6983,2
+np.float64,0x7fe5400cbe6a8018,0x7ff0000000000000,2
+np.float64,0xbfb4a1f5e22943e8,0x3fee42e6c81559a9,2
+np.float64,0x3fe967c575f2cf8a,0x3ffbbd8bc0d5c50d,2
+np.float64,0xbfeb059cf4760b3a,0x3fe1d25c592c4dab,2
+np.float64,0xbfeef536d5bdea6e,0x3fe05d832c15c64a,2
+np.float64,0x3fa90b3f6432167f,0x3ff08d410dd732cc,2
+np.float64,0xbfeaff265e75fe4d,0x3fe1d4db3fb3208d,2
+np.float64,0x6d93d688db27b,0x3ff0000000000000,2
+np.float64,0x800ab9b4ea55736a,0x3ff0000000000000,2
+np.float64,0x3fd444b39d288967,0x3ff3ed749d48d444,2
+np.float64,0xbfd5f2c0d0abe582,0x3fe93ad6124d88e7,2
+np.float64,0x3fea8fd915f51fb2,0x3ffc71b32cb92d60,2
+np.float64,0xbfd23d6491a47aca,0x3fea43875709b0f0,2
+np.float64,0xffe76f75ce6edeeb,0x0,2
+np.float64,0x1f5670da3eacf,0x3ff0000000000000,2
+np.float64,0x8000d89c9621b13a,0x3ff0000000000000,2
+np.float64,0x3fedb51c52bb6a39,0x3ffe732279c228ff,2
+np.float64,0x7f99215ac83242b5,0x7ff0000000000000,2
+np.float64,0x742a6864e854e,0x3ff0000000000000,2
+np.float64,0xbfe02fb340205f66,0x3fe689495f9164e3,2
+np.float64,0x7fef4c12b0fe9824,0x7ff0000000000000,2
+np.float64,0x3fd40e17c2a81c30,0x3ff3e1aee8ed972f,2
+np.float64,0x7fdcd264e939a4c9,0x7ff0000000000000,2
+np.float64,0x3fdb675838b6ceb0,0x3ff587526241c550,2
+np.float64,0x3fdf1a4081be3480,0x3ff66896a18c2385,2
+np.float64,0xbfea5082b874a106,0x3fe218cf8f11be13,2
+np.float64,0xffe1a0ebf7e341d8,0x0,2
+np.float64,0x3fed0a2222ba1444,0x3ffe032ce928ae7d,2
+np.float64,0xffeae036da75c06d,0x0,2
+np.float64,0x5b05fc8ab60c0,0x3ff0000000000000,2
+np.float64,0x7fd8aae5f03155cb,0x7ff0000000000000,2
+np.float64,0xbfd0b4d9fda169b4,0x3feab41e58b6ccb7,2
+np.float64,0xffdcaffa57395ff4,0x0,2
+np.float64,0xbfcbf1455437e28c,0x3feb81a884182c5d,2
+np.float64,0x3f9d6700b83ace01,0x3ff0525657db35d4,2
+np.float64,0x4fd5b0b29fab7,0x3ff0000000000000,2
+np.float64,0x3fe9af2df5b35e5c,0x3ffbe895684df916,2
+np.float64,0x800dfd41f9dbfa84,0x3ff0000000000000,2
+np.float64,0xbf2a30457e546,0x3ff0000000000000,2
+np.float64,0x7fc6be37182d7c6d,0x7ff0000000000000,2
+np.float64,0x800e0f9788dc1f2f,0x3ff0000000000000,2
+np.float64,0x8006890c704d121a,0x3ff0000000000000,2
+np.float64,0xffecb1a7cbb9634f,0x0,2
+np.float64,0xffb35c330426b868,0x0,2
+np.float64,0x7fe8f2ba8a71e574,0x7ff0000000000000,2
+np.float64,0xf3ccff8fe79a0,0x3ff0000000000000,2
+np.float64,0x3fdf19a84e3e3351,0x3ff66871b17474c1,2
+np.float64,0x80049a662d0934cd,0x3ff0000000000000,2
+np.float64,0xdf5bb4bbbeb77,0x3ff0000000000000,2
+np.float64,0x8005eca030cbd941,0x3ff0000000000000,2
+np.float64,0xffe5f239586be472,0x0,2
+np.float64,0xbfc4526a0728a4d4,0x3fecaa52fbf5345e,2
+np.float64,0xbfe8f1ecda31e3da,0x3fe2a44c080848b3,2
+np.float64,0x3feebd32f4bd7a66,0x3fff234788938c3e,2
+np.float64,0xffd6ca04e9ad940a,0x0,2
+np.float64,0x7ff0000000000000,0x7ff0000000000000,2
+np.float64,0xbfd4c560a9a98ac2,0x3fe98db6d97442fc,2
+np.float64,0x8005723471cae46a,0x3ff0000000000000,2
+np.float64,0xbfeb278299764f05,0x3fe1c54b48f8ba4b,2
+np.float64,0x8007907b376f20f7,0x3ff0000000000000,2
+np.float64,0x7fe9c2fd01b385f9,0x7ff0000000000000,2
+np.float64,0x7fdaa37368b546e6,0x7ff0000000000000,2
+np.float64,0xbfe6d0f3786da1e7,0x3fe38582271cada7,2
+np.float64,0xbfea9b77823536ef,0x3fe1fb8575cd1b7d,2
+np.float64,0xbfe90ac38bf21587,0x3fe29a471b47a2e8,2
+np.float64,0xbfe9c51844738a30,0x3fe24fc8de03ea84,2
+np.float64,0x3fe45a9013a8b520,0x3ff8dd7c80f1cf75,2
+np.float64,0xbfe5780551eaf00a,0x3fe419832a6a4c56,2
+np.float64,0xffefffffffffffff,0x0,2
+np.float64,0x7fe3778c84a6ef18,0x7ff0000000000000,2
+np.float64,0xbfdc8a60413914c0,0x3fe77dc55b85028f,2
+np.float64,0xef47ae2fde8f6,0x3ff0000000000000,2
+np.float64,0x8001269fa4c24d40,0x3ff0000000000000,2
+np.float64,0x3fe9d2d39e73a5a7,0x3ffbfe2a66c4148e,2
+np.float64,0xffee61f528fcc3e9,0x0,2
+np.float64,0x3fe8a259ab7144b3,0x3ffb47e797a34bd2,2
+np.float64,0x3f906d610820dac0,0x3ff02dccda8e1a75,2
+np.float64,0x3fe70739f32e0e74,0x3ffa59232f4fcd07,2
+np.float64,0x3fe6b7f5e6ad6fec,0x3ffa2c0cc54f2c16,2
+np.float64,0x95a91a792b524,0x3ff0000000000000,2
+np.float64,0xbfedf6fcf57bedfa,0x3fe0b89bb40081cc,2
+np.float64,0xbfa4d2de9c29a5c0,0x3fef1c485678d657,2
+np.float64,0x3fe130470d22608e,0x3ff737b0be409a38,2
+np.float64,0x3fcf8035423f006b,0x3ff2f9d7c3c6a302,2
+np.float64,0xffe5995a3eab32b4,0x0,2
+np.float64,0xffca68c63034d18c,0x0,2
+np.float64,0xff9d53af903aa760,0x0,2
+np.float64,0x800563f1de6ac7e4,0x3ff0000000000000,2
+np.float64,0x7fce284fa63c509e,0x7ff0000000000000,2
+np.float64,0x7fb2a3959a25472a,0x7ff0000000000000,2
+np.float64,0x7fdbe2652f37c4c9,0x7ff0000000000000,2
+np.float64,0x800d705bbc1ae0b8,0x3ff0000000000000,2
+np.float64,0x7fd9bd2347b37a46,0x7ff0000000000000,2
+np.float64,0x3fcac3c0fb358782,0x3ff27ed62d6c8221,2
+np.float64,0x800110691ec220d3,0x3ff0000000000000,2
+np.float64,0x3fef79a8157ef350,0x3fffa368513eb909,2
+np.float64,0x7fe8bd2f0e317a5d,0x7ff0000000000000,2
+np.float64,0x7fd3040e60a6081c,0x7ff0000000000000,2
+np.float64,0xffea50723234a0e4,0x0,2
+np.float64,0xbfe6220054ac4400,0x3fe3d00961238a93,2
+np.float64,0x3f9eddd8c83dbbc0,0x3ff0567b0c73005a,2
+np.float64,0xbfa4a062c42940c0,0x3fef1e68badde324,2
+np.float64,0xbfd077ad4720ef5a,0x3feac5d577581d07,2
+np.float64,0x7fdfd4b025bfa95f,0x7ff0000000000000,2
+np.float64,0xd00d3cf3a01a8,0x3ff0000000000000,2
+np.float64,0x7fe3010427260207,0x7ff0000000000000,2
+np.float64,0x22ea196645d44,0x3ff0000000000000,2
+np.float64,0x7fd747e8cd2e8fd1,0x7ff0000000000000,2
+np.float64,0xd50665e7aa0cd,0x3ff0000000000000,2
+np.float64,0x7fe1da580ae3b4af,0x7ff0000000000000,2
+np.float64,0xffeb218ecfb6431d,0x0,2
+np.float64,0xbf887d0dd030fa00,0x3fefbc6252c8b354,2
+np.float64,0x3fcaa31067354621,0x3ff27b904c07e07f,2
+np.float64,0x7fe698cc4ded3198,0x7ff0000000000000,2
+np.float64,0x1c40191a38804,0x3ff0000000000000,2
+np.float64,0x80086fd20e30dfa4,0x3ff0000000000000,2
+np.float64,0x7fed34d5eaba69ab,0x7ff0000000000000,2
+np.float64,0xffd00b52622016a4,0x0,2
+np.float64,0x3f80abcdb021579b,0x3ff0172d27945851,2
+np.float64,0x3fe614cfd66c29a0,0x3ff9d031e1839191,2
+np.float64,0x80021d71c8843ae4,0x3ff0000000000000,2
+np.float64,0x800bc2adc657855c,0x3ff0000000000000,2
+np.float64,0x6b9fec1cd73fe,0x3ff0000000000000,2
+np.float64,0xffd9093b5f321276,0x0,2
+np.float64,0x800d3c6c77fa78d9,0x3ff0000000000000,2
+np.float64,0xffe80fc1cbf01f83,0x0,2
+np.float64,0xffbffbaf2a3ff760,0x0,2
+np.float64,0x3fea1ed29eb43da5,0x3ffc2c64ec0e17a3,2
+np.float64,0x7ff4000000000000,0x7ffc000000000000,2
+np.float64,0x3fd944a052328941,0x3ff5094f4c43ecca,2
+np.float64,0x800b1f9416163f29,0x3ff0000000000000,2
+np.float64,0x800f06bf33de0d7e,0x3ff0000000000000,2
+np.float64,0xbfdbf0d226b7e1a4,0x3fe7a4f73793d95b,2
+np.float64,0xffe7306c30ae60d8,0x0,2
+np.float64,0x7fe991accfb32359,0x7ff0000000000000,2
+np.float64,0x3fcc0040d2380082,0x3ff29ea47e4f07d4,2
+np.float64,0x7fefffffffffffff,0x7ff0000000000000,2
+np.float64,0x0,0x3ff0000000000000,2
+np.float64,0x3fe1423f7be2847e,0x3ff740bc1d3b20f8,2
+np.float64,0xbfeae3a3cab5c748,0x3fe1df7e936f8504,2
+np.float64,0x800b2da7d6165b50,0x3ff0000000000000,2
+np.float64,0x800b2404fcd6480a,0x3ff0000000000000,2
+np.float64,0x6fcbcf88df97b,0x3ff0000000000000,2
+np.float64,0xa248c0e14492,0x3ff0000000000000,2
+np.float64,0xffd255776824aaee,0x0,2
+np.float64,0x80057b3effeaf67f,0x3ff0000000000000,2
+np.float64,0x3feb0b07d7761610,0x3ffcbdfe1be5a594,2
+np.float64,0x924e1019249c2,0x3ff0000000000000,2
+np.float64,0x80074307e80e8611,0x3ff0000000000000,2
+np.float64,0xffb207fa46240ff8,0x0,2
+np.float64,0x95ac388d2b587,0x3ff0000000000000,2
+np.float64,0xbff0000000000000,0x3fe0000000000000,2
+np.float64,0x3fd38b6a492716d5,0x3ff3c59f62b5add5,2
+np.float64,0x7fe49362c3e926c5,0x7ff0000000000000,2
+np.float64,0x7fe842889db08510,0x7ff0000000000000,2
+np.float64,0xbfba6003e834c008,0x3fedcb620a2d9856,2
+np.float64,0xffe7e782bd6fcf05,0x0,2
+np.float64,0x7fd9b93d9433727a,0x7ff0000000000000,2
+np.float64,0x7fc8fcb61d31f96b,0x7ff0000000000000,2
+np.float64,0xbfef9be8db3f37d2,0x3fe022d603b81dc2,2
+np.float64,0x6f4fc766de9fa,0x3ff0000000000000,2
+np.float64,0xbfe93016f132602e,0x3fe28b42d782d949,2
+np.float64,0x3fe10e52b8e21ca5,0x3ff726a38b0bb895,2
+np.float64,0x3fbbba0ae6377416,0x3ff13f56084a9da3,2
+np.float64,0x3fe09e42ece13c86,0x3ff6eeb57e775e24,2
+np.float64,0x800942e39fb285c8,0x3ff0000000000000,2
+np.float64,0xffe5964370eb2c86,0x0,2
+np.float64,0x3fde479f32bc8f3e,0x3ff635b2619ba53a,2
+np.float64,0x3fe826e187f04dc3,0x3ffaff52b79c3a08,2
+np.float64,0x3febcbf1eab797e4,0x3ffd37152e5e2598,2
+np.float64,0x3fa0816a202102d4,0x3ff05c8e6a8b00d5,2
+np.float64,0xbd005ccb7a00c,0x3ff0000000000000,2
+np.float64,0x44c12fdc89827,0x3ff0000000000000,2
+np.float64,0xffc8fdffa431fc00,0x0,2
+np.float64,0xffeb4f5a87b69eb4,0x0,2
+np.float64,0xbfb07e7f8420fd00,0x3fee9a32924fe6a0,2
+np.float64,0xbfbd9d1bb63b3a38,0x3fed88ca81e5771c,2
+np.float64,0x8008682a74f0d055,0x3ff0000000000000,2
+np.float64,0x3fdeedbc7b3ddb79,0x3ff65dcb7c55f4dc,2
+np.float64,0x8009e889c613d114,0x3ff0000000000000,2
+np.float64,0x3faea831f43d5064,0x3ff0ad935e890e49,2
+np.float64,0xf0af1703e15e3,0x3ff0000000000000,2
+np.float64,0xffec06c4a5f80d88,0x0,2
+np.float64,0x53a1cc0ca743a,0x3ff0000000000000,2
+np.float64,0x7fd10c9eea22193d,0x7ff0000000000000,2
+np.float64,0xbfd48a6bf0a914d8,0x3fe99e0d109f2bac,2
+np.float64,0x3fd6dfe931adbfd4,0x3ff47f81c2dfc5d3,2
+np.float64,0x3fed20e86b7a41d0,0x3ffe11fecc7bc686,2
+np.float64,0xbfea586818b4b0d0,0x3fe215b7747d5cb8,2
+np.float64,0xbfd4ad3e20295a7c,0x3fe99465ab8c3275,2
+np.float64,0x3fd6619ee4acc33e,0x3ff4638b7b80c08a,2
+np.float64,0x3fdf6fcb63bedf97,0x3ff67d62fd3d560c,2
+np.float64,0x800a9191e7152324,0x3ff0000000000000,2
+np.float64,0x3fd2ff3c0da5fe78,0x3ff3a7b17e892a28,2
+np.float64,0x8003dbf1f327b7e5,0x3ff0000000000000,2
+np.float64,0xffea6b89a934d712,0x0,2
+np.float64,0x7fcfb879043f70f1,0x7ff0000000000000,2
+np.float64,0xea6a84dbd4d51,0x3ff0000000000000,2
+np.float64,0x800ec97a815d92f5,0x3ff0000000000000,2
+np.float64,0xffe304c3a8660987,0x0,2
+np.float64,0xbfefe24dd3ffc49c,0x3fe00a4e065be96d,2
+np.float64,0xffd3cc8c00a79918,0x0,2
+np.float64,0x95be8b7b2b7d2,0x3ff0000000000000,2
+np.float64,0x7fe20570cba40ae1,0x7ff0000000000000,2
+np.float64,0x7f97a06da02f40da,0x7ff0000000000000,2
+np.float64,0xffe702b9522e0572,0x0,2
+np.float64,0x3fada2d8543b45b1,0x3ff0a7adc4201e08,2
+np.float64,0x235e6acc46bce,0x3ff0000000000000,2
+np.float64,0x3fea6bc28ef4d786,0x3ffc5b7fc68fddac,2
+np.float64,0xffdbc9f505b793ea,0x0,2
+np.float64,0xffe98b137ff31626,0x0,2
+np.float64,0x800e26c6721c4d8d,0x3ff0000000000000,2
+np.float64,0x80080de445301bc9,0x3ff0000000000000,2
+np.float64,0x37e504a86fca1,0x3ff0000000000000,2
+np.float64,0x8002f5f60325ebed,0x3ff0000000000000,2
+np.float64,0x5c8772feb90ef,0x3ff0000000000000,2
+np.float64,0xbfe021abb4604358,0x3fe69023a51d22b8,2
+np.float64,0x3fde744f8fbce8a0,0x3ff64074dc84edd7,2
+np.float64,0xbfdd92899f3b2514,0x3fe73aefd9701858,2
+np.float64,0x7fc1ad5c51235ab8,0x7ff0000000000000,2
+np.float64,0xaae2f98955c5f,0x3ff0000000000000,2
+np.float64,0x7f9123d5782247aa,0x7ff0000000000000,2
+np.float64,0xbfe3f8e94b67f1d2,0x3fe4c30ab28e9cb7,2
+np.float64,0x7fdaba8b4cb57516,0x7ff0000000000000,2
+np.float64,0x7fefc85cfeff90b9,0x7ff0000000000000,2
+np.float64,0xffb83b4f523076a0,0x0,2
+np.float64,0xbfe888a68c71114d,0x3fe2ceff17c203d1,2
+np.float64,0x800de1dac4bbc3b6,0x3ff0000000000000,2
+np.float64,0xbfe4f27f09e9e4fe,0x3fe453f9af407eac,2
+np.float64,0xffe3d2713467a4e2,0x0,2
+np.float64,0xbfebaab840375570,0x3fe1931131b98842,2
+np.float64,0x93892a1b27126,0x3ff0000000000000,2
+np.float64,0x1e8e7f983d1d1,0x3ff0000000000000,2
+np.float64,0x3fecc950627992a0,0x3ffdd926f036add0,2
+np.float64,0xbfd41dfb1aa83bf6,0x3fe9bc34ece35b94,2
+np.float64,0x800aebfc6555d7f9,0x3ff0000000000000,2
+np.float64,0x7fe33ba52ca67749,0x7ff0000000000000,2
+np.float64,0xffe57c9b3feaf936,0x0,2
+np.float64,0x3fdd12464fba248c,0x3ff5ebc5598e6bd0,2
+np.float64,0xffe06d7f0fe0dafe,0x0,2
+np.float64,0x800e55b7fe9cab70,0x3ff0000000000000,2
+np.float64,0x3fd33803c8267008,0x3ff3b3cb78b2d642,2
+np.float64,0xe9cab8a1d3957,0x3ff0000000000000,2
+np.float64,0x3fb38ac166271580,0x3ff0de906947c0f0,2
+np.float64,0xbfd67aa552acf54a,0x3fe915cf64a389fd,2
+np.float64,0x1db96daa3b72f,0x3ff0000000000000,2
+np.float64,0xbfee9f08f4fd3e12,0x3fe07c2c615add3c,2
+np.float64,0xf14f6d65e29ee,0x3ff0000000000000,2
+np.float64,0x800bce089e179c12,0x3ff0000000000000,2
+np.float64,0xffc42dcc37285b98,0x0,2
+np.float64,0x7fd5f37063abe6e0,0x7ff0000000000000,2
+np.float64,0xbfd943c2cbb28786,0x3fe856f6452ec753,2
+np.float64,0x8ddfbc091bbf8,0x3ff0000000000000,2
+np.float64,0xbfe153491e22a692,0x3fe5fcb075dbbd5d,2
+np.float64,0xffe7933999ef2672,0x0,2
+np.float64,0x7ff8000000000000,0x7ff8000000000000,2
+np.float64,0x8000000000000000,0x3ff0000000000000,2
+np.float64,0xbfe9154580b22a8b,0x3fe2960bac3a8220,2
+np.float64,0x800dc6dda21b8dbb,0x3ff0000000000000,2
+np.float64,0xbfb26225a824c448,0x3fee7239a457df81,2
+np.float64,0xbfd7b68c83af6d1a,0x3fe8c08e351ab468,2
+np.float64,0xffde01f7213c03ee,0x0,2
+np.float64,0x3fe54cbe0faa997c,0x3ff9614527191d72,2
+np.float64,0xbfd6bec3732d7d86,0x3fe90354909493de,2
+np.float64,0xbfef3c85bd7e790b,0x3fe0444f8c489ca6,2
+np.float64,0x899501b7132a0,0x3ff0000000000000,2
+np.float64,0xbfe17a456462f48b,0x3fe5ea2719a9a84b,2
+np.float64,0xffe34003b8668007,0x0,2
+np.float64,0x7feff6a3633fed46,0x7ff0000000000000,2
+np.float64,0x3fba597ecc34b2fe,0x3ff12ee72e4de474,2
+np.float64,0x4084c7b68109a,0x3ff0000000000000,2
+np.float64,0x3fad23bf4c3a4780,0x3ff0a4d06193ff6d,2
+np.float64,0xffd0fe2707a1fc4e,0x0,2
+np.float64,0xb96cb43f72d97,0x3ff0000000000000,2
+np.float64,0x7fc4d684d829ad09,0x7ff0000000000000,2
+np.float64,0x7fdc349226b86923,0x7ff0000000000000,2
+np.float64,0x7fd82851cd3050a3,0x7ff0000000000000,2
+np.float64,0x800cde0041b9bc01,0x3ff0000000000000,2
+np.float64,0x4e8caa1e9d196,0x3ff0000000000000,2
+np.float64,0xbfed06a6d2fa0d4e,0x3fe1108c3682b05a,2
+np.float64,0xffe8908122312102,0x0,2
+np.float64,0xffe56ed6d9aaddad,0x0,2
+np.float64,0x3fedd6db00fbadb6,0x3ffe896c68c4b26e,2
+np.float64,0x3fde31f9b4bc63f4,0x3ff6307e08f8b6ba,2
+np.float64,0x6bb963c2d772d,0x3ff0000000000000,2
+np.float64,0x787b7142f0f6f,0x3ff0000000000000,2
+np.float64,0x3fe6e4147c6dc829,0x3ffa451bbdece240,2
+np.float64,0x8003857401470ae9,0x3ff0000000000000,2
+np.float64,0xbfeae82c3c75d058,0x3fe1ddbd66e65aab,2
+np.float64,0x7fe174707c62e8e0,0x7ff0000000000000,2
+np.float64,0x80008d2545e11a4b,0x3ff0000000000000,2
+np.float64,0xbfecc2dce17985ba,0x3fe129ad4325985a,2
+np.float64,0xbfe1fa1daf63f43c,0x3fe5adcb0731a44b,2
+np.float64,0x7fcf2530203e4a5f,0x7ff0000000000000,2
+np.float64,0xbfea5cefe874b9e0,0x3fe213f134b61f4a,2
+np.float64,0x800103729f2206e6,0x3ff0000000000000,2
+np.float64,0xbfe8442ff7708860,0x3fe2eaf850faa169,2
+np.float64,0x8006c78e19ed8f1d,0x3ff0000000000000,2
+np.float64,0x3fc259589c24b2b1,0x3ff1abe6a4d28816,2
+np.float64,0xffed02b7b5ba056e,0x0,2
+np.float64,0xbfce0aa4fe3c1548,0x3feb32115d92103e,2
+np.float64,0x7fec06e78bf80dce,0x7ff0000000000000,2
+np.float64,0xbfe0960bbc612c18,0x3fe6578ab29b70d4,2
+np.float64,0x3fee45841cbc8b08,0x3ffed2f6ca808ad3,2
+np.float64,0xbfeb0f8ebef61f1e,0x3fe1ce86003044cd,2
+np.float64,0x8002c357358586af,0x3ff0000000000000,2
+np.float64,0x3fe9aa10cc735422,0x3ffbe57e294ce68b,2
+np.float64,0x800256c0a544ad82,0x3ff0000000000000,2
+np.float64,0x4de6e1449bcdd,0x3ff0000000000000,2
+np.float64,0x65e9bc9ccbd38,0x3ff0000000000000,2
+np.float64,0xbfe53b0fa9aa7620,0x3fe4341f0aa29bbc,2
+np.float64,0xbfcdd94cd13bb298,0x3feb3956acd2e2dd,2
+np.float64,0x8004a49b65a94938,0x3ff0000000000000,2
+np.float64,0x800d3d05deba7a0c,0x3ff0000000000000,2
+np.float64,0x3fe4e05bce69c0b8,0x3ff925f55602a7e0,2
+np.float64,0xffe391e3256723c6,0x0,2
+np.float64,0xbfe92f0f37b25e1e,0x3fe28bacc76ae753,2
+np.float64,0x3f990238d8320472,0x3ff045edd36e2d62,2
+np.float64,0xffed8d15307b1a2a,0x0,2
+np.float64,0x3fee82e01afd05c0,0x3ffefc09e8b9c2b7,2
+np.float64,0xffb2d94b2225b298,0x0,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-expm1.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-expm1.csv
new file mode 100644
index 00000000..732ae865
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-expm1.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0x80606724,0x80606724,3
+np.float32,0xbf16790f,0xbee38e14,3
+np.float32,0xbf1778a1,0xbee4a97f,3
+np.float32,0x7d4fc610,0x7f800000,3
+np.float32,0xbec30a20,0xbea230d5,3
+np.float32,0x3eae8a36,0x3ecffac5,3
+np.float32,0xbf1f08f1,0xbeece93c,3
+np.float32,0x80374376,0x80374376,3
+np.float32,0x3f2e04ca,0x3f793115,3
+np.float32,0x7e2c7e36,0x7f800000,3
+np.float32,0xbf686cae,0xbf18bcf0,3
+np.float32,0xbf5518cd,0xbf10a3da,3
+np.float32,0x807e233c,0x807e233c,3
+np.float32,0x7f4edd54,0x7f800000,3
+np.float32,0x7ed70088,0x7f800000,3
+np.float32,0x801675da,0x801675da,3
+np.float32,0x806735d5,0x806735d5,3
+np.float32,0xfe635fec,0xbf800000,3
+np.float32,0xfed88a0a,0xbf800000,3
+np.float32,0xff52c052,0xbf800000,3
+np.float32,0x7fc00000,0x7fc00000,3
+np.float32,0xff4f65f9,0xbf800000,3
+np.float32,0xfe0f6c20,0xbf800000,3
+np.float32,0x80322b30,0x80322b30,3
+np.float32,0xfb757000,0xbf800000,3
+np.float32,0x3c81e0,0x3c81e0,3
+np.float32,0x79d56a,0x79d56a,3
+np.float32,0x8029d7af,0x8029d7af,3
+np.float32,0x8058a593,0x8058a593,3
+np.float32,0x3f3a13c7,0x3f88c75c,3
+np.float32,0x2a6b05,0x2a6b05,3
+np.float32,0xbd64c960,0xbd5e83ae,3
+np.float32,0x80471052,0x80471052,3
+np.float32,0xbe5dd950,0xbe47766c,3
+np.float32,0xfd8f88f0,0xbf800000,3
+np.float32,0x75a4b7,0x75a4b7,3
+np.float32,0x3f726f2e,0x3fc9fb7d,3
+np.float32,0x3ed6795c,0x3f053115,3
+np.float32,0x17d7f5,0x17d7f5,3
+np.float32,0xbf4cf19b,0xbf0d094f,3
+np.float32,0x3e0ec532,0x3e1933c6,3
+np.float32,0xff084016,0xbf800000,3
+np.float32,0x800829aa,0x800829aa,3
+np.float32,0x806d7302,0x806d7302,3
+np.float32,0x7f59d9da,0x7f800000,3
+np.float32,0x15f8b9,0x15f8b9,3
+np.float32,0x803befb3,0x803befb3,3
+np.float32,0x525043,0x525043,3
+np.float32,0x51a647,0x51a647,3
+np.float32,0xbf1cfce4,0xbeeab3d9,3
+np.float32,0x3f1f27a4,0x3f5cb1d2,3
+np.float32,0xbebc3a04,0xbe9d8142,3
+np.float32,0xbeea548c,0xbebc07e5,3
+np.float32,0x3f47401c,0x3f96c2a3,3
+np.float32,0x806b1ea3,0x806b1ea3,3
+np.float32,0x3ea56bb8,0x3ec3450c,3
+np.float32,0x3f7b4963,0x3fd597b5,3
+np.float32,0x7f051fa0,0x7f800000,3
+np.float32,0x1d411c,0x1d411c,3
+np.float32,0xff0b6a35,0xbf800000,3
+np.float32,0xbead63c0,0xbe9314f7,3
+np.float32,0x3738be,0x3738be,3
+np.float32,0x3f138cc8,0x3f479155,3
+np.float32,0x800a539f,0x800a539f,3
+np.float32,0x801b0ebd,0x801b0ebd,3
+np.float32,0x318fcd,0x318fcd,3
+np.float32,0x3ed67556,0x3f052e06,3
+np.float32,0x702886,0x702886,3
+np.float32,0x80000001,0x80000001,3
+np.float32,0x70a174,0x70a174,3
+np.float32,0x4f9c66,0x4f9c66,3
+np.float32,0x3e3e1927,0x3e50e351,3
+np.float32,0x7eac9a4d,0x7f800000,3
+np.float32,0x4b7407,0x4b7407,3
+np.float32,0x7f5bd2fd,0x7f800000,3
+np.float32,0x3eaafc58,0x3ecaffbd,3
+np.float32,0xbc989360,0xbc9729e2,3
+np.float32,0x3f470e5c,0x3f968c7b,3
+np.float32,0x4c5672,0x4c5672,3
+np.float32,0xff2b2ee2,0xbf800000,3
+np.float32,0xbf28a104,0xbef7079b,3
+np.float32,0x2c6175,0x2c6175,3
+np.float32,0x3d7e4fb0,0x3d832f9f,3
+np.float32,0x763276,0x763276,3
+np.float32,0x3cf364,0x3cf364,3
+np.float32,0xbf7ace75,0xbf1fe48c,3
+np.float32,0xff19e858,0xbf800000,3
+np.float32,0x80504c70,0x80504c70,3
+np.float32,0xff390210,0xbf800000,3
+np.float32,0x8046a743,0x8046a743,3
+np.float32,0x80000000,0x80000000,3
+np.float32,0x806c51da,0x806c51da,3
+np.float32,0x806ab38f,0x806ab38f,3
+np.float32,0x3f3de863,0x3f8cc538,3
+np.float32,0x7f6d45bb,0x7f800000,3
+np.float32,0xfd16ec60,0xbf800000,3
+np.float32,0x80513cba,0x80513cba,3
+np.float32,0xbf68996b,0xbf18cefa,3
+np.float32,0xfe039f2c,0xbf800000,3
+np.float32,0x3f013207,0x3f280c55,3
+np.float32,0x7ef4bc07,0x7f800000,3
+np.float32,0xbe8b65ac,0xbe741069,3
+np.float32,0xbf7a8186,0xbf1fc7a6,3
+np.float32,0x802532e5,0x802532e5,3
+np.float32,0x32c7df,0x32c7df,3
+np.float32,0x3ce4dceb,0x3ce81701,3
+np.float32,0xfe801118,0xbf800000,3
+np.float32,0x3d905f20,0x3d9594fb,3
+np.float32,0xbe11ed28,0xbe080168,3
+np.float32,0x59e773,0x59e773,3
+np.float32,0x3e9a2547,0x3eb3dd57,3
+np.float32,0x7ecb7c67,0x7f800000,3
+np.float32,0x7f69a67e,0x7f800000,3
+np.float32,0xff121e11,0xbf800000,3
+np.float32,0x3f7917cb,0x3fd2ad8c,3
+np.float32,0xbf1a7da8,0xbee7fc0c,3
+np.float32,0x3f077e66,0x3f329c40,3
+np.float32,0x3ce8e040,0x3cec37b3,3
+np.float32,0xbf3f0b8e,0xbf069f4d,3
+np.float32,0x3f52f194,0x3fa3c9d6,3
+np.float32,0xbf0e7422,0xbeda80f2,3
+np.float32,0xfd67e230,0xbf800000,3
+np.float32,0xff14d9a9,0xbf800000,3
+np.float32,0x3f3546e3,0x3f83dc2b,3
+np.float32,0x3e152e3a,0x3e20983d,3
+np.float32,0x4a89a3,0x4a89a3,3
+np.float32,0x63217,0x63217,3
+np.float32,0xbeb9e2a8,0xbe9be153,3
+np.float32,0x7e9fa049,0x7f800000,3
+np.float32,0x7f58110c,0x7f800000,3
+np.float32,0x3e88290c,0x3e9bfba9,3
+np.float32,0xbf2cb206,0xbefb3494,3
+np.float32,0xff5880c4,0xbf800000,3
+np.float32,0x7ecff3ac,0x7f800000,3
+np.float32,0x3f4b3de6,0x3f9b23fd,3
+np.float32,0xbebd2048,0xbe9e208c,3
+np.float32,0xff08f7a2,0xbf800000,3
+np.float32,0xff473330,0xbf800000,3
+np.float32,0x1,0x1,3
+np.float32,0xbf5dc239,0xbf14584b,3
+np.float32,0x458e3f,0x458e3f,3
+np.float32,0xbdb8a650,0xbdb091f8,3
+np.float32,0xff336ffc,0xbf800000,3
+np.float32,0x3c60bd00,0x3c624966,3
+np.float32,0xbe16a4f8,0xbe0c1664,3
+np.float32,0x3f214246,0x3f60a0f0,3
+np.float32,0x7fa00000,0x7fe00000,3
+np.float32,0x7e08737e,0x7f800000,3
+np.float32,0x3f70574c,0x3fc74b8e,3
+np.float32,0xbed5745c,0xbeae8c77,3
+np.float32,0x361752,0x361752,3
+np.float32,0x3eb276d6,0x3ed584ea,3
+np.float32,0x3f03fc1e,0x3f2cb1a5,3
+np.float32,0x3fafd1,0x3fafd1,3
+np.float32,0x7e50d74c,0x7f800000,3
+np.float32,0x3eeca5,0x3eeca5,3
+np.float32,0x5dc963,0x5dc963,3
+np.float32,0x7f0e63ae,0x7f800000,3
+np.float32,0x8021745f,0x8021745f,3
+np.float32,0xbf5881a9,0xbf121d07,3
+np.float32,0x7dadc7fd,0x7f800000,3
+np.float32,0xbf2c0798,0xbefa86bb,3
+np.float32,0x3e635f50,0x3e7e97a9,3
+np.float32,0xbf2053fa,0xbeee4c0e,3
+np.float32,0x3e8eee2b,0x3ea4dfcc,3
+np.float32,0xfc8a03c0,0xbf800000,3
+np.float32,0xfd9e4948,0xbf800000,3
+np.float32,0x801e817e,0x801e817e,3
+np.float32,0xbf603a27,0xbf1560c3,3
+np.float32,0x7f729809,0x7f800000,3
+np.float32,0x3f5a1864,0x3fac0e04,3
+np.float32,0x3e7648b8,0x3e8b3677,3
+np.float32,0x3edade24,0x3f088bc1,3
+np.float32,0x65e16e,0x65e16e,3
+np.float32,0x3f24aa50,0x3f671117,3
+np.float32,0x803cb1d0,0x803cb1d0,3
+np.float32,0xbe7b1858,0xbe5eadcc,3
+np.float32,0xbf19bb27,0xbee726fb,3
+np.float32,0xfd1f6e60,0xbf800000,3
+np.float32,0xfeb0de60,0xbf800000,3
+np.float32,0xff511a52,0xbf800000,3
+np.float32,0xff7757f7,0xbf800000,3
+np.float32,0x463ff5,0x463ff5,3
+np.float32,0x3f770d12,0x3fcffcc2,3
+np.float32,0xbf208562,0xbeee80dc,3
+np.float32,0x6df204,0x6df204,3
+np.float32,0xbf62d24f,0xbf1673fb,3
+np.float32,0x3dfcf210,0x3e069d5f,3
+np.float32,0xbef26002,0xbec114d7,3
+np.float32,0x7f800000,0x7f800000,3
+np.float32,0x7f30fb85,0x7f800000,3
+np.float32,0x7ee5dfef,0x7f800000,3
+np.float32,0x3f317829,0x3f800611,3
+np.float32,0x3f4b0bbd,0x3f9aec88,3
+np.float32,0x7edf708c,0x7f800000,3
+np.float32,0xff071260,0xbf800000,3
+np.float32,0x3e7b8c30,0x3e8e9198,3
+np.float32,0x3f33778b,0x3f82077f,3
+np.float32,0x3e8cd11d,0x3ea215fd,3
+np.float32,0x8004483d,0x8004483d,3
+np.float32,0x801633e3,0x801633e3,3
+np.float32,0x7e76eb15,0x7f800000,3
+np.float32,0x3c1571,0x3c1571,3
+np.float32,0x7de3de52,0x7f800000,3
+np.float32,0x804ae906,0x804ae906,3
+np.float32,0x7f3a2616,0x7f800000,3
+np.float32,0xff7fffff,0xbf800000,3
+np.float32,0xff5d17e4,0xbf800000,3
+np.float32,0xbeaa6704,0xbe90f252,3
+np.float32,0x7e6a43af,0x7f800000,3
+np.float32,0x2a0f35,0x2a0f35,3
+np.float32,0xfd8fece0,0xbf800000,3
+np.float32,0xfeef2e2a,0xbf800000,3
+np.float32,0xff800000,0xbf800000,3
+np.float32,0xbeefcc52,0xbebf78e4,3
+np.float32,0x3db6c490,0x3dbf2bd5,3
+np.float32,0x8290f,0x8290f,3
+np.float32,0xbeace648,0xbe92bb7f,3
+np.float32,0x801fea79,0x801fea79,3
+np.float32,0x3ea6c230,0x3ec51ebf,3
+np.float32,0x3e5f2ca3,0x3e795c8a,3
+np.float32,0x3eb6f634,0x3edbeb9f,3
+np.float32,0xff790b45,0xbf800000,3
+np.float32,0x3d82e240,0x3d872816,3
+np.float32,0x3f0d6a57,0x3f3cc7db,3
+np.float32,0x7f08531a,0x7f800000,3
+np.float32,0x702b6d,0x702b6d,3
+np.float32,0x7d3a3c38,0x7f800000,3
+np.float32,0x3d0a7fb3,0x3d0cddf3,3
+np.float32,0xff28084c,0xbf800000,3
+np.float32,0xfeee8804,0xbf800000,3
+np.float32,0x804094eb,0x804094eb,3
+np.float32,0x7acb39,0x7acb39,3
+np.float32,0x3f01c07a,0x3f28f88c,3
+np.float32,0x3e05c500,0x3e0ee674,3
+np.float32,0xbe6f7c38,0xbe558ac1,3
+np.float32,0x803b1f4b,0x803b1f4b,3
+np.float32,0xbf76561f,0xbf1e332b,3
+np.float32,0xff30d368,0xbf800000,3
+np.float32,0x7e2e1f38,0x7f800000,3
+np.float32,0x3ee085b8,0x3f0ce7c0,3
+np.float32,0x8064c4a7,0x8064c4a7,3
+np.float32,0xa7c1d,0xa7c1d,3
+np.float32,0x3f27498a,0x3f6c14bc,3
+np.float32,0x137ca,0x137ca,3
+np.float32,0x3d0a5c60,0x3d0cb969,3
+np.float32,0x80765f1f,0x80765f1f,3
+np.float32,0x80230a71,0x80230a71,3
+np.float32,0x3f321ed2,0x3f80acf4,3
+np.float32,0x7d61e7f4,0x7f800000,3
+np.float32,0xbf39f7f2,0xbf0430f7,3
+np.float32,0xbe2503f8,0xbe1867e8,3
+np.float32,0x29333d,0x29333d,3
+np.float32,0x7edc5a0e,0x7f800000,3
+np.float32,0xbe81a8a2,0xbe651663,3
+np.float32,0x7f76ab6d,0x7f800000,3
+np.float32,0x7f46111f,0x7f800000,3
+np.float32,0xff0fc888,0xbf800000,3
+np.float32,0x805ece89,0x805ece89,3
+np.float32,0xc390b,0xc390b,3
+np.float32,0xff64bdee,0xbf800000,3
+np.float32,0x3dd07e4e,0x3ddb79bd,3
+np.float32,0xfecc1f10,0xbf800000,3
+np.float32,0x803f5177,0x803f5177,3
+np.float32,0x802a24d2,0x802a24d2,3
+np.float32,0x7f27d0cc,0x7f800000,3
+np.float32,0x3ef57c98,0x3f1d7e88,3
+np.float32,0x7b848d,0x7b848d,3
+np.float32,0x7f7fffff,0x7f800000,3
+np.float32,0xfe889c46,0xbf800000,3
+np.float32,0xff2d6dc5,0xbf800000,3
+np.float32,0x3f53a186,0x3fa492a6,3
+np.float32,0xbf239c94,0xbef1c90c,3
+np.float32,0xff7c0f4e,0xbf800000,3
+np.float32,0x3e7c69a9,0x3e8f1f3a,3
+np.float32,0xbf47c9e9,0xbf0ab2a9,3
+np.float32,0xbc1eaf00,0xbc1deae9,3
+np.float32,0x3f4a6d39,0x3f9a3d8e,3
+np.float32,0x3f677930,0x3fbc26eb,3
+np.float32,0x3f45eea1,0x3f955418,3
+np.float32,0x7f61a1f8,0x7f800000,3
+np.float32,0xff58c7c6,0xbf800000,3
+np.float32,0x80239801,0x80239801,3
+np.float32,0xff56e616,0xbf800000,3
+np.float32,0xff62052c,0xbf800000,3
+np.float32,0x8009b615,0x8009b615,3
+np.float32,0x293d6b,0x293d6b,3
+np.float32,0xfe9e585c,0xbf800000,3
+np.float32,0x7f58ff4b,0x7f800000,3
+np.float32,0x10937c,0x10937c,3
+np.float32,0x7f5cc13f,0x7f800000,3
+np.float32,0x110c5d,0x110c5d,3
+np.float32,0x805e51fc,0x805e51fc,3
+np.float32,0xbedcf70a,0xbeb3766c,3
+np.float32,0x3f4d5e42,0x3f9d8091,3
+np.float32,0xff5925a0,0xbf800000,3
+np.float32,0x7e87cafa,0x7f800000,3
+np.float32,0xbf6474b2,0xbf171fee,3
+np.float32,0x4b39b2,0x4b39b2,3
+np.float32,0x8020cc28,0x8020cc28,3
+np.float32,0xff004ed8,0xbf800000,3
+np.float32,0xbf204cf5,0xbeee448d,3
+np.float32,0x3e30cf10,0x3e40fdb1,3
+np.float32,0x80202bee,0x80202bee,3
+np.float32,0xbf55a985,0xbf10e2bc,3
+np.float32,0xbe297dd8,0xbe1c351c,3
+np.float32,0x5780d9,0x5780d9,3
+np.float32,0x7ef729fa,0x7f800000,3
+np.float32,0x8039a3b5,0x8039a3b5,3
+np.float32,0x7cdd3f,0x7cdd3f,3
+np.float32,0x7ef0145a,0x7f800000,3
+np.float32,0x807ad7ae,0x807ad7ae,3
+np.float32,0x7f6c2643,0x7f800000,3
+np.float32,0xbec56124,0xbea3c929,3
+np.float32,0x512c3b,0x512c3b,3
+np.float32,0xbed3effe,0xbead8c1e,3
+np.float32,0x7f5e0a4d,0x7f800000,3
+np.float32,0x3f315316,0x3f7fc200,3
+np.float32,0x7eca5727,0x7f800000,3
+np.float32,0x7f4834f3,0x7f800000,3
+np.float32,0x8004af6d,0x8004af6d,3
+np.float32,0x3f223ca4,0x3f6277e3,3
+np.float32,0x7eea4fdd,0x7f800000,3
+np.float32,0x3e7143e8,0x3e880763,3
+np.float32,0xbf737008,0xbf1d160e,3
+np.float32,0xfc408b00,0xbf800000,3
+np.float32,0x803912ca,0x803912ca,3
+np.float32,0x7db31f4e,0x7f800000,3
+np.float32,0xff578b54,0xbf800000,3
+np.float32,0x3f068ec4,0x3f31062b,3
+np.float32,0x35f64f,0x35f64f,3
+np.float32,0x80437df4,0x80437df4,3
+np.float32,0x568059,0x568059,3
+np.float32,0x8005f8ba,0x8005f8ba,3
+np.float32,0x6824ad,0x6824ad,3
+np.float32,0xff3fdf30,0xbf800000,3
+np.float32,0xbf6f7682,0xbf1b89d6,3
+np.float32,0x3dcea8a0,0x3dd971f5,3
+np.float32,0x3ee32a62,0x3f0ef5a9,3
+np.float32,0xbf735bcd,0xbf1d0e3d,3
+np.float32,0x7e8c7c28,0x7f800000,3
+np.float32,0x3ed552bc,0x3f045161,3
+np.float32,0xfed90a8a,0xbf800000,3
+np.float32,0xbe454368,0xbe336d2a,3
+np.float32,0xbf171d26,0xbee4442d,3
+np.float32,0x80652bf9,0x80652bf9,3
+np.float32,0xbdbaaa20,0xbdb26914,3
+np.float32,0x3f56063d,0x3fa7522e,3
+np.float32,0x3d3d4fd3,0x3d41c13f,3
+np.float32,0x80456040,0x80456040,3
+np.float32,0x3dc15586,0x3dcac0ef,3
+np.float32,0x7f753060,0x7f800000,3
+np.float32,0x7f7d8039,0x7f800000,3
+np.float32,0xfdebf280,0xbf800000,3
+np.float32,0xbf1892c3,0xbee5e116,3
+np.float32,0xbf0f1468,0xbedb3878,3
+np.float32,0x40d85c,0x40d85c,3
+np.float32,0x3f93dd,0x3f93dd,3
+np.float32,0xbf5730fd,0xbf118c24,3
+np.float32,0xfe17aa44,0xbf800000,3
+np.float32,0x3dc0baf4,0x3dca1716,3
+np.float32,0xbf3433d8,0xbf015efb,3
+np.float32,0x1c59f5,0x1c59f5,3
+np.float32,0x802b1540,0x802b1540,3
+np.float32,0xbe47df6c,0xbe35936e,3
+np.float32,0xbe8e7070,0xbe78af32,3
+np.float32,0xfe7057f4,0xbf800000,3
+np.float32,0x80668b69,0x80668b69,3
+np.float32,0xbe677810,0xbe4f2c2d,3
+np.float32,0xbe7a2f1c,0xbe5df733,3
+np.float32,0xfeb79e3c,0xbf800000,3
+np.float32,0xbeb6e320,0xbe99c9e8,3
+np.float32,0xfea188f2,0xbf800000,3
+np.float32,0x7dcaeb15,0x7f800000,3
+np.float32,0x1be567,0x1be567,3
+np.float32,0xbf4041cc,0xbf07320d,3
+np.float32,0x3f721aa7,0x3fc98e9a,3
+np.float32,0x7f5aa835,0x7f800000,3
+np.float32,0x15180e,0x15180e,3
+np.float32,0x3f73d739,0x3fcbccdb,3
+np.float32,0xbeecd380,0xbebd9b36,3
+np.float32,0x3f2caec7,0x3f768fea,3
+np.float32,0xbeaf65f2,0xbe9482bb,3
+np.float32,0xfe6aa384,0xbf800000,3
+np.float32,0xbf4f2c0a,0xbf0e085e,3
+np.float32,0xbf2b5907,0xbef9d431,3
+np.float32,0x3e855e0d,0x3e985960,3
+np.float32,0x8056cc64,0x8056cc64,3
+np.float32,0xff746bb5,0xbf800000,3
+np.float32,0x3e0332f6,0x3e0bf986,3
+np.float32,0xff637720,0xbf800000,3
+np.float32,0xbf330676,0xbf00c990,3
+np.float32,0x3ec449a1,0x3eef3862,3
+np.float32,0x766541,0x766541,3
+np.float32,0xfe2edf6c,0xbf800000,3
+np.float32,0xbebb28ca,0xbe9cc3e2,3
+np.float32,0x3f16c930,0x3f4d5ce4,3
+np.float32,0x7f1a9a4a,0x7f800000,3
+np.float32,0x3e9ba1,0x3e9ba1,3
+np.float32,0xbf73d5f6,0xbf1d3d69,3
+np.float32,0xfdc8a8b0,0xbf800000,3
+np.float32,0x50f051,0x50f051,3
+np.float32,0xff0add02,0xbf800000,3
+np.float32,0x1e50bf,0x1e50bf,3
+np.float32,0x3f04d287,0x3f2e1948,3
+np.float32,0x7f1e50,0x7f1e50,3
+np.float32,0x2affb3,0x2affb3,3
+np.float32,0x80039f07,0x80039f07,3
+np.float32,0x804ba79e,0x804ba79e,3
+np.float32,0x7b5a8eed,0x7f800000,3
+np.float32,0x3e1a8b28,0x3e26d0a7,3
+np.float32,0x3ea95f29,0x3ec8bfa4,3
+np.float32,0x7e09fa55,0x7f800000,3
+np.float32,0x7eacb1b3,0x7f800000,3
+np.float32,0x3e8ad7c0,0x3e9f7dec,3
+np.float32,0x7e0e997c,0x7f800000,3
+np.float32,0x3f4422b4,0x3f936398,3
+np.float32,0x806bd222,0x806bd222,3
+np.float32,0x677ae6,0x677ae6,3
+np.float32,0x62cf68,0x62cf68,3
+np.float32,0x7e4e594e,0x7f800000,3
+np.float32,0x80445fd1,0x80445fd1,3
+np.float32,0xff3a0d04,0xbf800000,3
+np.float32,0x8052b256,0x8052b256,3
+np.float32,0x3cb34440,0x3cb53e11,3
+np.float32,0xbf0e3865,0xbeda3c6d,3
+np.float32,0x3f49f5df,0x3f99ba17,3
+np.float32,0xbed75a22,0xbeafcc09,3
+np.float32,0xbf7aec64,0xbf1fefc8,3
+np.float32,0x7f35a62d,0x7f800000,3
+np.float32,0xbf787b03,0xbf1f03fc,3
+np.float32,0x8006a62a,0x8006a62a,3
+np.float32,0x3f6419e7,0x3fb803c7,3
+np.float32,0x3ecea2e5,0x3efe8f01,3
+np.float32,0x80603577,0x80603577,3
+np.float32,0xff73198c,0xbf800000,3
+np.float32,0x7def110a,0x7f800000,3
+np.float32,0x544efd,0x544efd,3
+np.float32,0x3f052340,0x3f2ea0fc,3
+np.float32,0xff306666,0xbf800000,3
+np.float32,0xbf800000,0xbf21d2a7,3
+np.float32,0xbed3e150,0xbead826a,3
+np.float32,0x3f430c99,0x3f92390f,3
+np.float32,0xbf4bffa4,0xbf0c9c73,3
+np.float32,0xfd97a710,0xbf800000,3
+np.float32,0x3cadf0fe,0x3cafcd1a,3
+np.float32,0x807af7b4,0x807af7b4,3
+np.float32,0xbc508600,0xbc4f33bc,3
+np.float32,0x7f3e0ec7,0x7f800000,3
+np.float32,0xbe51334c,0xbe3d36f7,3
+np.float32,0xfe7b7fb4,0xbf800000,3
+np.float32,0xfed9c45e,0xbf800000,3
+np.float32,0x3da024eb,0x3da6926a,3
+np.float32,0x7eed9e76,0x7f800000,3
+np.float32,0xbf2b8f1f,0xbefa0b91,3
+np.float32,0x3f2b9286,0x3f746318,3
+np.float32,0xfe8af49c,0xbf800000,3
+np.float32,0x9c4f7,0x9c4f7,3
+np.float32,0x801d7543,0x801d7543,3
+np.float32,0xbf66474a,0xbf17de66,3
+np.float32,0xbf562155,0xbf1116b1,3
+np.float32,0x46a8de,0x46a8de,3
+np.float32,0x8053fe6b,0x8053fe6b,3
+np.float32,0xbf6ee842,0xbf1b51f3,3
+np.float32,0xbf6ad78e,0xbf19b565,3
+np.float32,0xbf012574,0xbecad7ff,3
+np.float32,0x748364,0x748364,3
+np.float32,0x8073f59b,0x8073f59b,3
+np.float32,0xff526825,0xbf800000,3
+np.float32,0xfeb02dc4,0xbf800000,3
+np.float32,0x8033eb1c,0x8033eb1c,3
+np.float32,0x3f3685ea,0x3f8520cc,3
+np.float32,0x7f657902,0x7f800000,3
+np.float32,0xbf75eac4,0xbf1e0a1f,3
+np.float32,0xfe67f384,0xbf800000,3
+np.float32,0x3f56d3cc,0x3fa83faf,3
+np.float32,0x44a4ce,0x44a4ce,3
+np.float32,0x1dc4b3,0x1dc4b3,3
+np.float32,0x4fb3b2,0x4fb3b2,3
+np.float32,0xbea904a4,0xbe8ff3ed,3
+np.float32,0x7e668f16,0x7f800000,3
+np.float32,0x7f538378,0x7f800000,3
+np.float32,0x80541709,0x80541709,3
+np.float32,0x80228040,0x80228040,3
+np.float32,0x7ef9694e,0x7f800000,3
+np.float32,0x3f5fca9b,0x3fb2ce54,3
+np.float32,0xbe9c43c2,0xbe86ab84,3
+np.float32,0xfecee000,0xbf800000,3
+np.float32,0x5a65c2,0x5a65c2,3
+np.float32,0x3f736572,0x3fcb3985,3
+np.float32,0xbf2a03f7,0xbef87600,3
+np.float32,0xfe96b488,0xbf800000,3
+np.float32,0xfedd8800,0xbf800000,3
+np.float32,0x80411804,0x80411804,3
+np.float32,0x7edcb0a6,0x7f800000,3
+np.float32,0x2bb882,0x2bb882,3
+np.float32,0x3f800000,0x3fdbf0a9,3
+np.float32,0x764b27,0x764b27,3
+np.float32,0x7e92035d,0x7f800000,3
+np.float32,0x3e80facb,0x3e92ae1d,3
+np.float32,0x8040b81a,0x8040b81a,3
+np.float32,0x7f487fe4,0x7f800000,3
+np.float32,0xbc641780,0xbc6282ed,3
+np.float32,0x804b0bb9,0x804b0bb9,3
+np.float32,0x7d0b7c39,0x7f800000,3
+np.float32,0xff072080,0xbf800000,3
+np.float32,0xbed7aff8,0xbeb00462,3
+np.float32,0x35e247,0x35e247,3
+np.float32,0xbf7edd19,0xbf216766,3
+np.float32,0x8004a539,0x8004a539,3
+np.float32,0xfdfc1790,0xbf800000,3
+np.float32,0x8037a841,0x8037a841,3
+np.float32,0xfed0a8a8,0xbf800000,3
+np.float32,0x7f1f1697,0x7f800000,3
+np.float32,0x3f2ccc6e,0x3f76ca23,3
+np.float32,0x35eada,0x35eada,3
+np.float32,0xff111f42,0xbf800000,3
+np.float32,0x3ee1ab7f,0x3f0dcbbe,3
+np.float32,0xbf6e89ee,0xbf1b2cd4,3
+np.float32,0x3f58611c,0x3faa0cdc,3
+np.float32,0x1ac6a6,0x1ac6a6,3
+np.float32,0xbf1286fa,0xbedf2312,3
+np.float32,0x7e451137,0x7f800000,3
+np.float32,0xbe92c326,0xbe7f3405,3
+np.float32,0x3f2fdd16,0x3f7cd87b,3
+np.float32,0xbe5c0ea0,0xbe4604c2,3
+np.float32,0xbdb29968,0xbdab0883,3
+np.float32,0x3964,0x3964,3
+np.float32,0x3f0dc236,0x3f3d60a0,3
+np.float32,0x7c3faf06,0x7f800000,3
+np.float32,0xbef41f7a,0xbec22b16,3
+np.float32,0x3f4c0289,0x3f9bfdcc,3
+np.float32,0x806084e9,0x806084e9,3
+np.float32,0x3ed1d8dd,0x3f01b0c1,3
+np.float32,0x806d8d8b,0x806d8d8b,3
+np.float32,0x3f052180,0x3f2e9e0a,3
+np.float32,0x803d85d5,0x803d85d5,3
+np.float32,0x3e0afd70,0x3e14dd48,3
+np.float32,0x2fbc63,0x2fbc63,3
+np.float32,0x2e436f,0x2e436f,3
+np.float32,0xbf7b19e6,0xbf2000da,3
+np.float32,0x3f34022e,0x3f829362,3
+np.float32,0x3d2b40e0,0x3d2ee246,3
+np.float32,0x3f5298b4,0x3fa3649b,3
+np.float32,0xbdb01328,0xbda8b7de,3
+np.float32,0x7f693c81,0x7f800000,3
+np.float32,0xbeb1abc0,0xbe961edc,3
+np.float32,0x801d9b5d,0x801d9b5d,3
+np.float32,0x80628668,0x80628668,3
+np.float32,0x800f57dd,0x800f57dd,3
+np.float32,0x8017c94f,0x8017c94f,3
+np.float32,0xbf16f5f4,0xbee418b8,3
+np.float32,0x3e686476,0x3e827022,3
+np.float32,0xbf256796,0xbef3abd9,3
+np.float32,0x7f1b4485,0x7f800000,3
+np.float32,0xbea0b3cc,0xbe89ed21,3
+np.float32,0xfee08b2e,0xbf800000,3
+np.float32,0x523cb4,0x523cb4,3
+np.float32,0x3daf2cb2,0x3db6e273,3
+np.float32,0xbd531c40,0xbd4dc323,3
+np.float32,0x80078fe5,0x80078fe5,3
+np.float32,0x80800000,0x80800000,3
+np.float32,0x3f232438,0x3f642d1a,3
+np.float32,0x3ec29446,0x3eecb7c0,3
+np.float32,0x3dbcd2a4,0x3dc5cd1d,3
+np.float32,0x7f045b0d,0x7f800000,3
+np.float32,0x7f22e6d1,0x7f800000,3
+np.float32,0xbf5d3430,0xbf141c80,3
+np.float32,0xbe03ec70,0xbdf78ee6,3
+np.float32,0x3e93ec9a,0x3eab822f,3
+np.float32,0x7f3b9262,0x7f800000,3
+np.float32,0x65ac6a,0x65ac6a,3
+np.float32,0x3db9a8,0x3db9a8,3
+np.float32,0xbf37ab59,0xbf031306,3
+np.float32,0x33c40e,0x33c40e,3
+np.float32,0x7f7a478f,0x7f800000,3
+np.float32,0xbe8532d0,0xbe6a906f,3
+np.float32,0x801c081d,0x801c081d,3
+np.float32,0xbe4212a0,0xbe30ca73,3
+np.float32,0xff0b603e,0xbf800000,3
+np.float32,0x4554dc,0x4554dc,3
+np.float32,0x3dd324be,0x3dde695e,3
+np.float32,0x3f224c44,0x3f629557,3
+np.float32,0x8003cd79,0x8003cd79,3
+np.float32,0xbf31351c,0xbeffc2fd,3
+np.float32,0x8034603a,0x8034603a,3
+np.float32,0xbf6fcb70,0xbf1bab24,3
+np.float32,0x804eb67e,0x804eb67e,3
+np.float32,0xff05c00e,0xbf800000,3
+np.float32,0x3eb5b36f,0x3eda1ec7,3
+np.float32,0x3f1ed7f9,0x3f5c1d90,3
+np.float32,0x3f052d8a,0x3f2eb24b,3
+np.float32,0x5ddf51,0x5ddf51,3
+np.float32,0x7e50c11c,0x7f800000,3
+np.float32,0xff74f55a,0xbf800000,3
+np.float32,0x4322d,0x4322d,3
+np.float32,0x3f16f8a9,0x3f4db27a,3
+np.float32,0x3f4f23d6,0x3f9f7c2c,3
+np.float32,0xbf706c1e,0xbf1bea0a,3
+np.float32,0x3f2cbd52,0x3f76ac77,3
+np.float32,0xf3043,0xf3043,3
+np.float32,0xfee79de0,0xbf800000,3
+np.float32,0x7e942f69,0x7f800000,3
+np.float32,0x180139,0x180139,3
+np.float32,0xff69c678,0xbf800000,3
+np.float32,0x3f46773f,0x3f95e840,3
+np.float32,0x804aae1c,0x804aae1c,3
+np.float32,0x3eb383b4,0x3ed7024c,3
+np.float32,0x8032624e,0x8032624e,3
+np.float32,0xbd0a0f80,0xbd07c27d,3
+np.float32,0xbf1c9b98,0xbeea4a61,3
+np.float32,0x7f370999,0x7f800000,3
+np.float32,0x801931f9,0x801931f9,3
+np.float32,0x3f6f45ce,0x3fc5eea0,3
+np.float32,0xff0ab4cc,0xbf800000,3
+np.float32,0x4c043d,0x4c043d,3
+np.float32,0x8002a599,0x8002a599,3
+np.float32,0xbc4a6080,0xbc4921d7,3
+np.float32,0x3f008d14,0x3f26fb72,3
+np.float32,0x7f48b3d9,0x7f800000,3
+np.float32,0x7cb2ec7e,0x7f800000,3
+np.float32,0xbf1338bd,0xbedfeb61,3
+np.float32,0x0,0x0,3
+np.float32,0xbf2f5b64,0xbefde71c,3
+np.float32,0xbe422974,0xbe30dd56,3
+np.float32,0x3f776be8,0x3fd07950,3
+np.float32,0xbf3e97a1,0xbf06684a,3
+np.float32,0x7d28cb26,0x7f800000,3
+np.float32,0x801618d2,0x801618d2,3
+np.float32,0x807e4f83,0x807e4f83,3
+np.float32,0x8006b07d,0x8006b07d,3
+np.float32,0xfea1c042,0xbf800000,3
+np.float32,0xff24ef74,0xbf800000,3
+np.float32,0xfef7ab16,0xbf800000,3
+np.float32,0x70b771,0x70b771,3
+np.float32,0x7daeb64e,0x7f800000,3
+np.float32,0xbe66e378,0xbe4eb59c,3
+np.float32,0xbead1534,0xbe92dcf7,3
+np.float32,0x7e6769b8,0x7f800000,3
+np.float32,0x7ecd0890,0x7f800000,3
+np.float32,0xbe7380d8,0xbe58b747,3
+np.float32,0x3efa6f2f,0x3f218265,3
+np.float32,0x3f59dada,0x3fabc5eb,3
+np.float32,0xff0f2d20,0xbf800000,3
+np.float32,0x8060210e,0x8060210e,3
+np.float32,0x3ef681e8,0x3f1e51c8,3
+np.float32,0x77a6dd,0x77a6dd,3
+np.float32,0xbebfdd0e,0xbea00399,3
+np.float32,0xfe889b72,0xbf800000,3
+np.float32,0x8049ed2c,0x8049ed2c,3
+np.float32,0x3b089dc4,0x3b08c23e,3
+np.float32,0xbf13c7c4,0xbee08c28,3
+np.float32,0x3efa13b9,0x3f2137d7,3
+np.float32,0x3e9385dc,0x3eaaf914,3
+np.float32,0x7e0e6a43,0x7f800000,3
+np.float32,0x7df6d63f,0x7f800000,3
+np.float32,0x3f3efead,0x3f8dea03,3
+np.float32,0xff52548c,0xbf800000,3
+np.float32,0x803ff9d8,0x803ff9d8,3
+np.float32,0x3c825823,0x3c836303,3
+np.float32,0xfc9e97a0,0xbf800000,3
+np.float32,0xfe644f48,0xbf800000,3
+np.float32,0x802f5017,0x802f5017,3
+np.float32,0x3d5753b9,0x3d5d1661,3
+np.float32,0x7f2a55d2,0x7f800000,3
+np.float32,0x7f4dabfe,0x7f800000,3
+np.float32,0x3f49492a,0x3f98fc47,3
+np.float32,0x3f4d1589,0x3f9d2f82,3
+np.float32,0xff016208,0xbf800000,3
+np.float32,0xbf571cb7,0xbf118365,3
+np.float32,0xbf1ef297,0xbeecd136,3
+np.float32,0x36266b,0x36266b,3
+np.float32,0xbed07b0e,0xbeab4129,3
+np.float32,0x7f553365,0x7f800000,3
+np.float32,0xfe9bb8c6,0xbf800000,3
+np.float32,0xbeb497d6,0xbe982e19,3
+np.float32,0xbf27af6c,0xbef60d16,3
+np.float32,0x55cf51,0x55cf51,3
+np.float32,0x3eab1db0,0x3ecb2e4f,3
+np.float32,0x3e777603,0x3e8bf62f,3
+np.float32,0x7f10e374,0x7f800000,3
+np.float32,0xbf1f6480,0xbeed4b8d,3
+np.float32,0x40479d,0x40479d,3
+np.float32,0x156259,0x156259,3
+np.float32,0x3d852e30,0x3d899b2d,3
+np.float32,0x80014ff3,0x80014ff3,3
+np.float32,0xbd812fa8,0xbd7a645c,3
+np.float32,0x800ab780,0x800ab780,3
+np.float32,0x3ea02ff4,0x3ebc13bd,3
+np.float32,0x7e858b8e,0x7f800000,3
+np.float32,0x75d63b,0x75d63b,3
+np.float32,0xbeb15c94,0xbe95e6e3,3
+np.float32,0x3da0cee0,0x3da74a39,3
+np.float32,0xff21c01c,0xbf800000,3
+np.float32,0x8049b5eb,0x8049b5eb,3
+np.float32,0x80177ab0,0x80177ab0,3
+np.float32,0xff137a50,0xbf800000,3
+np.float32,0x3f7febba,0x3fdbd51c,3
+np.float32,0x8041e4dd,0x8041e4dd,3
+np.float32,0x99b8c,0x99b8c,3
+np.float32,0x5621ba,0x5621ba,3
+np.float32,0x14b534,0x14b534,3
+np.float32,0xbe2eb3a8,0xbe209c95,3
+np.float32,0x7e510c28,0x7f800000,3
+np.float32,0x804ec2f2,0x804ec2f2,3
+np.float32,0x3f662406,0x3fba82b0,3
+np.float32,0x800000,0x800000,3
+np.float32,0x3f3120d6,0x3f7f5d96,3
+np.float32,0x7f179b8e,0x7f800000,3
+np.float32,0x7f65278e,0x7f800000,3
+np.float32,0xfeb50f52,0xbf800000,3
+np.float32,0x7f051bd1,0x7f800000,3
+np.float32,0x7ea0558d,0x7f800000,3
+np.float32,0xbd0a96c0,0xbd08453f,3
+np.float64,0xee82da5ddd05c,0xee82da5ddd05c,3
+np.float64,0x800c3a22d7f87446,0x800c3a22d7f87446,3
+np.float64,0xbfd34b20eaa69642,0xbfd0a825e7688d3e,3
+np.float64,0x3fd6a0f2492d41e5,0x3fdb253b906057b3,3
+np.float64,0xbfda13d8783427b0,0xbfd56b1d76684332,3
+np.float64,0xbfe50b5a99ea16b5,0xbfded7dd82c6f746,3
+np.float64,0x3f82468fc0248d20,0x3f825b7fa9378ee9,3
+np.float64,0x7ff0000000000000,0x7ff0000000000000,3
+np.float64,0x856e50290adca,0x856e50290adca,3
+np.float64,0x7fde55a5fa3cab4b,0x7ff0000000000000,3
+np.float64,0x7fcf2c8dd93e591b,0x7ff0000000000000,3
+np.float64,0x8001b3a0e3236743,0x8001b3a0e3236743,3
+np.float64,0x8000fdb14821fb63,0x8000fdb14821fb63,3
+np.float64,0xbfe3645e08e6c8bc,0xbfdd161362a5e9ef,3
+np.float64,0x7feb34d28b3669a4,0x7ff0000000000000,3
+np.float64,0x80099dd810933bb1,0x80099dd810933bb1,3
+np.float64,0xbfedbcc1097b7982,0xbfe35d86414d53dc,3
+np.float64,0x7fdc406fbdb880de,0x7ff0000000000000,3
+np.float64,0x800c4bf85ab897f1,0x800c4bf85ab897f1,3
+np.float64,0x3fd8f7b0e0b1ef60,0x3fde89b497ae20d8,3
+np.float64,0xffe4fced5c69f9da,0xbff0000000000000,3
+np.float64,0xbfe54d421fea9a84,0xbfdf1be0cbfbfcba,3
+np.float64,0x800af72f3535ee5f,0x800af72f3535ee5f,3
+np.float64,0x3fe24e6570e49ccb,0x3fe8b3a86d970411,3
+np.float64,0xbfdd7b22d0baf646,0xbfd79fac2e4f7558,3
+np.float64,0xbfe6a7654c6d4eca,0xbfe03c1f13f3b409,3
+np.float64,0x3fe2c3eb662587d7,0x3fe98566e625d4f5,3
+np.float64,0x3b1ef71e763e0,0x3b1ef71e763e0,3
+np.float64,0xffed03c6baba078d,0xbff0000000000000,3
+np.float64,0x3febac19d0b75834,0x3ff5fdacc9d51bcd,3
+np.float64,0x800635d6794c6bae,0x800635d6794c6bae,3
+np.float64,0xbfe8cafc827195f9,0xbfe1411438608ae1,3
+np.float64,0x7feeb616a83d6c2c,0x7ff0000000000000,3
+np.float64,0x3fd52d62a2aa5ac5,0x3fd91a07a7f18f44,3
+np.float64,0x80036996b8a6d32e,0x80036996b8a6d32e,3
+np.float64,0x2b1945965632a,0x2b1945965632a,3
+np.float64,0xbfecb5e8c9796bd2,0xbfe2f40fca276aa2,3
+np.float64,0x3fe8669ed4f0cd3e,0x3ff24c89fc9cdbff,3
+np.float64,0x71e9f65ee3d3f,0x71e9f65ee3d3f,3
+np.float64,0xbfd5ab262bab564c,0xbfd261ae108ef79e,3
+np.float64,0xbfe7091342ee1226,0xbfe06bf5622d75f6,3
+np.float64,0x49e888d093d12,0x49e888d093d12,3
+np.float64,0x2272f3dc44e5f,0x2272f3dc44e5f,3
+np.float64,0x7fe98736e0b30e6d,0x7ff0000000000000,3
+np.float64,0x30fa9cde61f54,0x30fa9cde61f54,3
+np.float64,0x7fdc163fc0382c7f,0x7ff0000000000000,3
+np.float64,0xffb40d04ee281a08,0xbff0000000000000,3
+np.float64,0xffe624617f2c48c2,0xbff0000000000000,3
+np.float64,0x3febb582bd376b05,0x3ff608da584d1716,3
+np.float64,0xfc30a5a5f8615,0xfc30a5a5f8615,3
+np.float64,0x3fef202efd7e405e,0x3ffa52009319b069,3
+np.float64,0x8004d0259829a04c,0x8004d0259829a04c,3
+np.float64,0x800622dc71ec45ba,0x800622dc71ec45ba,3
+np.float64,0xffefffffffffffff,0xbff0000000000000,3
+np.float64,0x800e89113c9d1223,0x800e89113c9d1223,3
+np.float64,0x7fba7fde3034ffbb,0x7ff0000000000000,3
+np.float64,0xbfeea31e807d463d,0xbfe3b7369b725915,3
+np.float64,0x3feb7c9589f6f92c,0x3ff5c56cf71b0dff,3
+np.float64,0x3fd52d3b59aa5a77,0x3fd919d0f683fd07,3
+np.float64,0x800de90a43fbd215,0x800de90a43fbd215,3
+np.float64,0x3fe7eb35a9efd66b,0x3ff1c940dbfc6ef9,3
+np.float64,0xbda0adcb7b416,0xbda0adcb7b416,3
+np.float64,0x7fc5753e3a2aea7b,0x7ff0000000000000,3
+np.float64,0xffdd101d103a203a,0xbff0000000000000,3
+np.float64,0x7fcb54f56836a9ea,0x7ff0000000000000,3
+np.float64,0xbfd61c8d6eac391a,0xbfd2b23bc0a2cef4,3
+np.float64,0x3feef55de37deabc,0x3ffa198639a0161d,3
+np.float64,0x7fe4ffbfaea9ff7e,0x7ff0000000000000,3
+np.float64,0x9d1071873a20e,0x9d1071873a20e,3
+np.float64,0x3fef1ecb863e3d97,0x3ffa502a81e09cfc,3
+np.float64,0xad2da12b5a5b4,0xad2da12b5a5b4,3
+np.float64,0xffe614b74c6c296e,0xbff0000000000000,3
+np.float64,0xffe60d3f286c1a7e,0xbff0000000000000,3
+np.float64,0x7fda7d91f4b4fb23,0x7ff0000000000000,3
+np.float64,0x800023f266a047e6,0x800023f266a047e6,3
+np.float64,0x7fdf5f9ad23ebf35,0x7ff0000000000000,3
+np.float64,0x3fa7459f002e8b3e,0x3fa7cf178dcf0af6,3
+np.float64,0x3fe9938d61f3271b,0x3ff39516a13caec3,3
+np.float64,0xbfd59314c3ab262a,0xbfd250830f73efd2,3
+np.float64,0xbfc7e193f72fc328,0xbfc5c924339dd7a8,3
+np.float64,0x7fec1965f17832cb,0x7ff0000000000000,3
+np.float64,0xbfd932908eb26522,0xbfd4d4312d272580,3
+np.float64,0xbfdf2d08e2be5a12,0xbfd8add1413b0b1b,3
+np.float64,0x7fdcf7cc74b9ef98,0x7ff0000000000000,3
+np.float64,0x7fc79300912f2600,0x7ff0000000000000,3
+np.float64,0xffd4bd8f23297b1e,0xbff0000000000000,3
+np.float64,0x41869ce0830e,0x41869ce0830e,3
+np.float64,0x3fe5dcec91ebb9da,0x3fef5e213598cbd4,3
+np.float64,0x800815d9c2902bb4,0x800815d9c2902bb4,3
+np.float64,0x800ba1a4b877434a,0x800ba1a4b877434a,3
+np.float64,0x80069d7bdc4d3af8,0x80069d7bdc4d3af8,3
+np.float64,0xcf00d4339e01b,0xcf00d4339e01b,3
+np.float64,0x80072b71bd4e56e4,0x80072b71bd4e56e4,3
+np.float64,0x80059ca6fbab394f,0x80059ca6fbab394f,3
+np.float64,0x3fe522fc092a45f8,0x3fedf212682bf894,3
+np.float64,0x7fe17f384ea2fe70,0x7ff0000000000000,3
+np.float64,0x0,0x0,3
+np.float64,0x3f72bb4c20257698,0x3f72c64766b52069,3
+np.float64,0x7fbc97c940392f92,0x7ff0000000000000,3
+np.float64,0xffc5904ebd2b209c,0xbff0000000000000,3
+np.float64,0xbfe34fb55b669f6a,0xbfdcff81dd30a49d,3
+np.float64,0x8007ccda006f99b5,0x8007ccda006f99b5,3
+np.float64,0x3fee50e4c8fca1ca,0x3ff9434c7750ad0f,3
+np.float64,0x7fee7b07c67cf60f,0x7ff0000000000000,3
+np.float64,0x3fdcce4a5a399c95,0x3fe230c83f28218a,3
+np.float64,0x7fee5187b37ca30e,0x7ff0000000000000,3
+np.float64,0x3fc48f6a97291ed8,0x3fc64db6200a9833,3
+np.float64,0xc7fec3498ffd9,0xc7fec3498ffd9,3
+np.float64,0x800769c59d2ed38c,0x800769c59d2ed38c,3
+np.float64,0xffe69ede782d3dbc,0xbff0000000000000,3
+np.float64,0x3fecd9770979b2ee,0x3ff76a1f2f0f08f2,3
+np.float64,0x5aa358a8b546c,0x5aa358a8b546c,3
+np.float64,0xbfe795a0506f2b40,0xbfe0afcc52c0166b,3
+np.float64,0xffd4ada1e8a95b44,0xbff0000000000000,3
+np.float64,0xffcac1dc213583b8,0xbff0000000000000,3
+np.float64,0xffe393c15fa72782,0xbff0000000000000,3
+np.float64,0xbfcd6a3c113ad478,0xbfca47a2157b9cdd,3
+np.float64,0xffedde20647bbc40,0xbff0000000000000,3
+np.float64,0x3fd0d011b1a1a024,0x3fd33a57945559f4,3
+np.float64,0x3fef27e29f7e4fc6,0x3ffa5c314e0e3d69,3
+np.float64,0xffe96ff71f72dfee,0xbff0000000000000,3
+np.float64,0xffe762414f2ec482,0xbff0000000000000,3
+np.float64,0x3fc2dcfd3d25b9fa,0x3fc452f41682a12e,3
+np.float64,0xbfbdb125b63b6248,0xbfbc08e6553296d4,3
+np.float64,0x7b915740f724,0x7b915740f724,3
+np.float64,0x60b502b2c16a1,0x60b502b2c16a1,3
+np.float64,0xbfeb38b0be367162,0xbfe254f6782cfc47,3
+np.float64,0x800dc39a3edb8735,0x800dc39a3edb8735,3
+np.float64,0x3fea4fb433349f68,0x3ff468b97cf699f5,3
+np.float64,0xbfd49967962932d0,0xbfd19ceb41ff4cd0,3
+np.float64,0xbfebf75cd377eeba,0xbfe2a576bdbccccc,3
+np.float64,0xbfb653d65c2ca7b0,0xbfb561ab8fcb3f26,3
+np.float64,0xffe3f34b8727e696,0xbff0000000000000,3
+np.float64,0x3fdd798064baf301,0x3fe2b7c130a6fc63,3
+np.float64,0x3febe027e6b7c050,0x3ff63bac1b22e12d,3
+np.float64,0x7fcaa371af3546e2,0x7ff0000000000000,3
+np.float64,0xbfe6ee980a2ddd30,0xbfe05f0bc5dc80d2,3
+np.float64,0xc559c33f8ab39,0xc559c33f8ab39,3
+np.float64,0x84542c2b08a86,0x84542c2b08a86,3
+np.float64,0xbfe5645e046ac8bc,0xbfdf3398dc3cc1bd,3
+np.float64,0x3fee8c48ae7d1892,0x3ff9902899480526,3
+np.float64,0x3fb706471c2e0c8e,0x3fb817787aace8db,3
+np.float64,0x7fefe78f91ffcf1e,0x7ff0000000000000,3
+np.float64,0xbfcf6d560b3edaac,0xbfcbddc72a2130df,3
+np.float64,0x7fd282bfd925057f,0x7ff0000000000000,3
+np.float64,0x3fb973dbee32e7b8,0x3fbac2c87cbd0215,3
+np.float64,0x3fd1ce38ff239c72,0x3fd4876de5164420,3
+np.float64,0x8008ac2e3c31585d,0x8008ac2e3c31585d,3
+np.float64,0x3fa05e06dc20bc00,0x3fa0a1b7de904dce,3
+np.float64,0x7fd925f215324be3,0x7ff0000000000000,3
+np.float64,0x3f949d95d0293b2c,0x3f94d31197d51874,3
+np.float64,0xffdded9e67bbdb3c,0xbff0000000000000,3
+np.float64,0x3fed390dcfba721c,0x3ff7e08c7a709240,3
+np.float64,0x7fe6e62300adcc45,0x7ff0000000000000,3
+np.float64,0xbfd779bc312ef378,0xbfd3a6cb64bb0181,3
+np.float64,0x3fe43e9877287d31,0x3fec3e100ef935fd,3
+np.float64,0x210b68e44216e,0x210b68e44216e,3
+np.float64,0x3fcdffc1e73bff84,0x3fd0e729d02ec539,3
+np.float64,0xcea10c0f9d422,0xcea10c0f9d422,3
+np.float64,0x7feb97a82d772f4f,0x7ff0000000000000,3
+np.float64,0x9b4b4d953696a,0x9b4b4d953696a,3
+np.float64,0x3fd1bd8e95237b1d,0x3fd4716dd34cf828,3
+np.float64,0x800fc273841f84e7,0x800fc273841f84e7,3
+np.float64,0xbfd2aef167255de2,0xbfd0340f30d82f18,3
+np.float64,0x800d021a551a0435,0x800d021a551a0435,3
+np.float64,0xffebf934a8b7f268,0xbff0000000000000,3
+np.float64,0x3fd819849fb03308,0x3fdd43bca0aac749,3
+np.float64,0x7ff8000000000000,0x7ff8000000000000,3
+np.float64,0x27c34b064f86a,0x27c34b064f86a,3
+np.float64,0x7fef4f5a373e9eb3,0x7ff0000000000000,3
+np.float64,0x7fd92fccce325f99,0x7ff0000000000000,3
+np.float64,0x800520869d6a410e,0x800520869d6a410e,3
+np.float64,0x3fccbcaddf397958,0x3fd01bf6b0c4d97f,3
+np.float64,0x80039ebfc4273d80,0x80039ebfc4273d80,3
+np.float64,0xbfed1f0b3c7a3e16,0xbfe31ea6e4c69141,3
+np.float64,0x7fee1bb7c4bc376f,0x7ff0000000000000,3
+np.float64,0xbfa8bee1d8317dc0,0xbfa8283b7dbf95a9,3
+np.float64,0x3fe797db606f2fb6,0x3ff171b1c2bc8fe5,3
+np.float64,0xbfee2ecfdbbc5da0,0xbfe38a3f0a43d14e,3
+np.float64,0x3fe815c7f1302b90,0x3ff1f65165c45d71,3
+np.float64,0xbfbb265c94364cb8,0xbfb9c27ec61a9a1d,3
+np.float64,0x3fcf1cab5d3e3957,0x3fd19c07444642f9,3
+np.float64,0xbfe6ae753f6d5cea,0xbfe03f99666dbe17,3
+np.float64,0xbfd18a2a73a31454,0xbfceaee204aca016,3
+np.float64,0x3fb8a1dffc3143c0,0x3fb9db38341ab1a3,3
+np.float64,0x7fd2a0376025406e,0x7ff0000000000000,3
+np.float64,0x7fe718c0e3ae3181,0x7ff0000000000000,3
+np.float64,0x3fb264d42424c9a8,0x3fb3121f071d4db4,3
+np.float64,0xd27190a7a4e32,0xd27190a7a4e32,3
+np.float64,0xbfe467668c68cecd,0xbfde2c4616738d5e,3
+np.float64,0x800ab9a2b9357346,0x800ab9a2b9357346,3
+np.float64,0x7fcbd108d537a211,0x7ff0000000000000,3
+np.float64,0x3fb79bba6e2f3770,0x3fb8bb2c140d3445,3
+np.float64,0xffefa7165e3f4e2c,0xbff0000000000000,3
+np.float64,0x7fb40185a428030a,0x7ff0000000000000,3
+np.float64,0xbfe9e3d58e73c7ab,0xbfe1c04d51c83d69,3
+np.float64,0x7fef5b97b17eb72e,0x7ff0000000000000,3
+np.float64,0x800a2957683452af,0x800a2957683452af,3
+np.float64,0x800f54f1925ea9e3,0x800f54f1925ea9e3,3
+np.float64,0xeffa4e77dff4a,0xeffa4e77dff4a,3
+np.float64,0xffbe501aa03ca038,0xbff0000000000000,3
+np.float64,0x8006c651bced8ca4,0x8006c651bced8ca4,3
+np.float64,0x3fe159faff22b3f6,0x3fe708f78efbdbed,3
+np.float64,0x800e7d59a31cfab3,0x800e7d59a31cfab3,3
+np.float64,0x3fe6ac2f272d585e,0x3ff07ee5305385c3,3
+np.float64,0x7fd014c054202980,0x7ff0000000000000,3
+np.float64,0xbfe4800b11e90016,0xbfde4648c6f29ce5,3
+np.float64,0xbfe6738470ece709,0xbfe0227b5b42b713,3
+np.float64,0x3fed052add3a0a56,0x3ff7a01819e65c6e,3
+np.float64,0xffe03106f120620e,0xbff0000000000000,3
+np.float64,0x7fe11df4d4e23be9,0x7ff0000000000000,3
+np.float64,0xbfcea25d7b3d44bc,0xbfcb3e808e7ce852,3
+np.float64,0xd0807b03a1010,0xd0807b03a1010,3
+np.float64,0x8004eda4fec9db4b,0x8004eda4fec9db4b,3
+np.float64,0x3fceb5c98d3d6b90,0x3fd15a894b15dd9f,3
+np.float64,0xbfee27228afc4e45,0xbfe38741702f3c0b,3
+np.float64,0xbfe606278c6c0c4f,0xbfdfd7cb6093652d,3
+np.float64,0xbfd66f59bc2cdeb4,0xbfd2ecb2297f6afc,3
+np.float64,0x4aee390095dc8,0x4aee390095dc8,3
+np.float64,0xbfe391355d67226a,0xbfdd46ddc0997014,3
+np.float64,0xffd27765e7a4eecc,0xbff0000000000000,3
+np.float64,0xbfe795e20a2f2bc4,0xbfe0afebc66c4dbd,3
+np.float64,0x7fc9a62e81334c5c,0x7ff0000000000000,3
+np.float64,0xffe4e57e52a9cafc,0xbff0000000000000,3
+np.float64,0x7fac326c8c3864d8,0x7ff0000000000000,3
+np.float64,0x3fe8675f6370cebf,0x3ff24d5863029c15,3
+np.float64,0x7fcf4745e73e8e8b,0x7ff0000000000000,3
+np.float64,0x7fcc9aec9f3935d8,0x7ff0000000000000,3
+np.float64,0x3fec2e8fcab85d20,0x3ff699ccd0b2fed6,3
+np.float64,0x3fd110a968222153,0x3fd38e81a88c2d13,3
+np.float64,0xffb3a68532274d08,0xbff0000000000000,3
+np.float64,0xf0e562bbe1cad,0xf0e562bbe1cad,3
+np.float64,0xbfe815b9e5f02b74,0xbfe0ec9f5023aebc,3
+np.float64,0xbf5151d88022a400,0xbf514f80c465feea,3
+np.float64,0x2547e3144a8fd,0x2547e3144a8fd,3
+np.float64,0x3fedcc0c28fb9818,0x3ff899612fbeb4c5,3
+np.float64,0x3fdc3d1c0f387a38,0x3fe1bf6e2d39bd75,3
+np.float64,0x7fe544dbe62a89b7,0x7ff0000000000000,3
+np.float64,0x8001500e48e2a01d,0x8001500e48e2a01d,3
+np.float64,0xbfed3b2b09fa7656,0xbfe329f3e7bada64,3
+np.float64,0xbfe76a943aeed528,0xbfe09b24e3aa3f79,3
+np.float64,0x3fe944330e328866,0x3ff33d472dee70c5,3
+np.float64,0x8004bbbd6cc9777c,0x8004bbbd6cc9777c,3
+np.float64,0xbfe28133fb650268,0xbfdc1ac230ac4ef5,3
+np.float64,0xc1370af7826e2,0xc1370af7826e2,3
+np.float64,0x7fcfa47f5f3f48fe,0x7ff0000000000000,3
+np.float64,0xbfa3002a04260050,0xbfa2a703a538b54e,3
+np.float64,0xffef44f3903e89e6,0xbff0000000000000,3
+np.float64,0xc32cce298659a,0xc32cce298659a,3
+np.float64,0x7b477cc2f68f0,0x7b477cc2f68f0,3
+np.float64,0x40a7f4ec814ff,0x40a7f4ec814ff,3
+np.float64,0xffee38edf67c71db,0xbff0000000000000,3
+np.float64,0x3fe23f6f1ce47ede,0x3fe8992b8bb03499,3
+np.float64,0x7fc8edfe7f31dbfc,0x7ff0000000000000,3
+np.float64,0x800bb8e6fb3771ce,0x800bb8e6fb3771ce,3
+np.float64,0xbfe11d364ee23a6c,0xbfda82a0c2ef9e46,3
+np.float64,0xbfeb993cb4b7327a,0xbfe27df565da85dc,3
+np.float64,0x10000000000000,0x10000000000000,3
+np.float64,0x3fc1f997d723f330,0x3fc34c5cff060af1,3
+np.float64,0x6e326fa0dc64f,0x6e326fa0dc64f,3
+np.float64,0x800fa30c2c5f4618,0x800fa30c2c5f4618,3
+np.float64,0x7fed16ad603a2d5a,0x7ff0000000000000,3
+np.float64,0x9411cf172823a,0x9411cf172823a,3
+np.float64,0xffece51d4cb9ca3a,0xbff0000000000000,3
+np.float64,0x3fdda3d1453b47a3,0x3fe2d954f7849890,3
+np.float64,0xffd58330172b0660,0xbff0000000000000,3
+np.float64,0xbfc6962ae52d2c54,0xbfc4b4bdf0069f17,3
+np.float64,0xbfb4010a8e280218,0xbfb33e1236f7efa0,3
+np.float64,0x7fd0444909208891,0x7ff0000000000000,3
+np.float64,0xbfe027a24de04f44,0xbfd95e9064101e7c,3
+np.float64,0xa6f3f3214de9,0xa6f3f3214de9,3
+np.float64,0xbfe112eb0fe225d6,0xbfda768f7cbdf346,3
+np.float64,0xbfe99e90d4b33d22,0xbfe1a153e45a382a,3
+np.float64,0xffecb34f8e79669e,0xbff0000000000000,3
+np.float64,0xbfdf32c9653e6592,0xbfd8b159caf5633d,3
+np.float64,0x3fe9519829b2a330,0x3ff34c0a8152e20f,3
+np.float64,0xffd08ec8a7a11d92,0xbff0000000000000,3
+np.float64,0xffd19b71b6a336e4,0xbff0000000000000,3
+np.float64,0x7feda6b9377b4d71,0x7ff0000000000000,3
+np.float64,0x800fda2956bfb453,0x800fda2956bfb453,3
+np.float64,0x3fe54f601bea9ec0,0x3fee483cb03cbde4,3
+np.float64,0xbfe2a8ad5ee5515a,0xbfdc46ee7a10bf0d,3
+np.float64,0xbfd336c8bd266d92,0xbfd09916d432274a,3
+np.float64,0xfff0000000000000,0xbff0000000000000,3
+np.float64,0x3fd9a811a9b35024,0x3fdf8fa68cc048e3,3
+np.float64,0x3fe078c68520f18d,0x3fe58aecc1f9649b,3
+np.float64,0xbfc6d5aa3a2dab54,0xbfc4e9ea84f3d73c,3
+np.float64,0xf9682007f2d04,0xf9682007f2d04,3
+np.float64,0x3fee54523dbca8a4,0x3ff947b826de81f4,3
+np.float64,0x80461e5d008c4,0x80461e5d008c4,3
+np.float64,0x3fdd6d12d5bada26,0x3fe2ade8dee2fa02,3
+np.float64,0x3fcd5f0dfd3abe18,0x3fd081d6cd25731d,3
+np.float64,0x7fa36475c826c8eb,0x7ff0000000000000,3
+np.float64,0xbfdf3ce052be79c0,0xbfd8b78baccfb908,3
+np.float64,0x7fcd890dd13b121b,0x7ff0000000000000,3
+np.float64,0x8000000000000001,0x8000000000000001,3
+np.float64,0x800ec0f4281d81e8,0x800ec0f4281d81e8,3
+np.float64,0xbfba960116352c00,0xbfb94085424496d9,3
+np.float64,0x3fdddedc9bbbbdb8,0x3fe30853fe4ef5ce,3
+np.float64,0x238092a847013,0x238092a847013,3
+np.float64,0xbfe38d4803271a90,0xbfdd429a955c46af,3
+np.float64,0xbfd4c9067329920c,0xbfd1bf6255ed91a4,3
+np.float64,0xbfbee213923dc428,0xbfbd17ce1bda6088,3
+np.float64,0xffd5a2d337ab45a6,0xbff0000000000000,3
+np.float64,0x7fe21bfcf82437f9,0x7ff0000000000000,3
+np.float64,0x3fe2a2714da544e3,0x3fe949594a74ea25,3
+np.float64,0x800e05cf8ebc0b9f,0x800e05cf8ebc0b9f,3
+np.float64,0x559a1526ab343,0x559a1526ab343,3
+np.float64,0xffe6a1b7906d436e,0xbff0000000000000,3
+np.float64,0xffef27d6253e4fab,0xbff0000000000000,3
+np.float64,0xbfe0f90ab0a1f216,0xbfda5828a1edde48,3
+np.float64,0x9675d2ab2cebb,0x9675d2ab2cebb,3
+np.float64,0xffee0f7eecfc1efd,0xbff0000000000000,3
+np.float64,0x2ec005625d801,0x2ec005625d801,3
+np.float64,0x7fde35ff14bc6bfd,0x7ff0000000000000,3
+np.float64,0xffe03f36d9e07e6d,0xbff0000000000000,3
+np.float64,0x7fe09ff7c4213fef,0x7ff0000000000000,3
+np.float64,0xffeac29dd1b5853b,0xbff0000000000000,3
+np.float64,0x3fb63120aa2c6241,0x3fb72ea3de98a853,3
+np.float64,0xffd079eb84a0f3d8,0xbff0000000000000,3
+np.float64,0xbfd3c2cc75a78598,0xbfd1005996880b3f,3
+np.float64,0x7fb80507ee300a0f,0x7ff0000000000000,3
+np.float64,0xffe8006105f000c1,0xbff0000000000000,3
+np.float64,0x8009138b0ab22716,0x8009138b0ab22716,3
+np.float64,0xbfd6dfb40b2dbf68,0xbfd33b8e4008e3b0,3
+np.float64,0xbfe7c2cf9bef859f,0xbfe0c55c807460df,3
+np.float64,0xbfe75fe4da6ebfca,0xbfe09600256d3b81,3
+np.float64,0xffd662fc73acc5f8,0xbff0000000000000,3
+np.float64,0x20b99dbc41735,0x20b99dbc41735,3
+np.float64,0x3fe10b38ade21671,0x3fe68229a9bbeefc,3
+np.float64,0x3743b99c6e878,0x3743b99c6e878,3
+np.float64,0xff9eb5ed903d6be0,0xbff0000000000000,3
+np.float64,0x3ff0000000000000,0x3ffb7e151628aed3,3
+np.float64,0xffb9e0569e33c0b0,0xbff0000000000000,3
+np.float64,0x7fd39c804fa73900,0x7ff0000000000000,3
+np.float64,0x3fe881ef67f103df,0x3ff269dd704b7129,3
+np.float64,0x1b6eb40236dd7,0x1b6eb40236dd7,3
+np.float64,0xbfe734ea432e69d4,0xbfe0813e6355d02f,3
+np.float64,0xffcf48f3743e91e8,0xbff0000000000000,3
+np.float64,0xffed10bcf6fa2179,0xbff0000000000000,3
+np.float64,0x3fef07723b7e0ee4,0x3ffa3156123f3c15,3
+np.float64,0xffe45c704aa8b8e0,0xbff0000000000000,3
+np.float64,0xb7b818d96f703,0xb7b818d96f703,3
+np.float64,0x42fcc04085f99,0x42fcc04085f99,3
+np.float64,0xbfda7ced01b4f9da,0xbfd5b0ce1e5524ae,3
+np.float64,0xbfe1e5963d63cb2c,0xbfdb6a87b6c09185,3
+np.float64,0x7fdfa18003bf42ff,0x7ff0000000000000,3
+np.float64,0xbfe3790a43e6f214,0xbfdd2c9a38b4f089,3
+np.float64,0xffe0ff5b9ae1feb6,0xbff0000000000000,3
+np.float64,0x80085a7d3110b4fb,0x80085a7d3110b4fb,3
+np.float64,0xffd6bfa6622d7f4c,0xbff0000000000000,3
+np.float64,0xbfef5ddc7cfebbb9,0xbfe3fe170521593e,3
+np.float64,0x3fc21773fa242ee8,0x3fc36ebda1f91a72,3
+np.float64,0x7fc04d98da209b31,0x7ff0000000000000,3
+np.float64,0xbfeba3b535b7476a,0xbfe282602e3c322e,3
+np.float64,0xffd41fb5c1a83f6c,0xbff0000000000000,3
+np.float64,0xf87d206df0fa4,0xf87d206df0fa4,3
+np.float64,0x800060946fc0c12a,0x800060946fc0c12a,3
+np.float64,0x3fe69d5f166d3abe,0x3ff06fdddcf4ca93,3
+np.float64,0x7fe9b5793b336af1,0x7ff0000000000000,3
+np.float64,0x7fe0dd4143e1ba82,0x7ff0000000000000,3
+np.float64,0xbfa8eaea3c31d5d0,0xbfa8522e397da3bd,3
+np.float64,0x119f0078233e1,0x119f0078233e1,3
+np.float64,0xbfd78a207aaf1440,0xbfd3b225bbf2ab4f,3
+np.float64,0xc66a6d4d8cd4e,0xc66a6d4d8cd4e,3
+np.float64,0xe7fc4b57cff8a,0xe7fc4b57cff8a,3
+np.float64,0x800883e8091107d0,0x800883e8091107d0,3
+np.float64,0x3fa6520c842ca419,0x3fa6d06e1041743a,3
+np.float64,0x3fa563182c2ac630,0x3fa5d70e27a84c97,3
+np.float64,0xe6a30b61cd462,0xe6a30b61cd462,3
+np.float64,0x3fee85dac37d0bb6,0x3ff987cfa41a9778,3
+np.float64,0x3fe8f621db71ec44,0x3ff2e7b768a2e9d0,3
+np.float64,0x800f231d861e463b,0x800f231d861e463b,3
+np.float64,0xbfe22eb07c645d61,0xbfdbbdbb853ab4c6,3
+np.float64,0x7fd2dda2dea5bb45,0x7ff0000000000000,3
+np.float64,0xbfd09b79a0a136f4,0xbfcd4147606ffd27,3
+np.float64,0xca039cc394074,0xca039cc394074,3
+np.float64,0x8000000000000000,0x8000000000000000,3
+np.float64,0xcb34575d9668b,0xcb34575d9668b,3
+np.float64,0x3fea62c1f3f4c584,0x3ff47e6dc67ec89f,3
+np.float64,0x7fe544c8606a8990,0x7ff0000000000000,3
+np.float64,0xffe0a980c4615301,0xbff0000000000000,3
+np.float64,0x3fdd67d5f8bacfac,0x3fe2a9c3421830f1,3
+np.float64,0xffe41d3dda283a7b,0xbff0000000000000,3
+np.float64,0xffeed59e5ffdab3c,0xbff0000000000000,3
+np.float64,0xffeeae8326fd5d05,0xbff0000000000000,3
+np.float64,0x800d70b4fa7ae16a,0x800d70b4fa7ae16a,3
+np.float64,0xffec932e6839265c,0xbff0000000000000,3
+np.float64,0xee30b185dc616,0xee30b185dc616,3
+np.float64,0x7fc3cf4397279e86,0x7ff0000000000000,3
+np.float64,0xbfeab34f1875669e,0xbfe21b868229de7d,3
+np.float64,0xf45f5f7de8bec,0xf45f5f7de8bec,3
+np.float64,0x3fad2c4b203a5896,0x3fae0528b568f3cf,3
+np.float64,0xbfe2479543e48f2a,0xbfdbd9e57cf64028,3
+np.float64,0x3fd41a1473283429,0x3fd79df2bc60debb,3
+np.float64,0x3febb5155ef76a2a,0x3ff608585afd698b,3
+np.float64,0xffe21f5303e43ea6,0xbff0000000000000,3
+np.float64,0x7fe9ef390833de71,0x7ff0000000000000,3
+np.float64,0xffe8ee873d71dd0e,0xbff0000000000000,3
+np.float64,0x7fd7cbc55e2f978a,0x7ff0000000000000,3
+np.float64,0x80081f9080d03f21,0x80081f9080d03f21,3
+np.float64,0x7fecbafc8b3975f8,0x7ff0000000000000,3
+np.float64,0x800b6c4b0b16d896,0x800b6c4b0b16d896,3
+np.float64,0xbfaa0fc2d4341f80,0xbfa968cdf32b98ad,3
+np.float64,0x3fec79fe4078f3fc,0x3ff6f5361a4a5d93,3
+np.float64,0xbfb14b79de2296f0,0xbfb0b93b75ecec11,3
+np.float64,0x800009d084c013a2,0x800009d084c013a2,3
+np.float64,0x4a4cdfe29499d,0x4a4cdfe29499d,3
+np.float64,0xbfe721c2d56e4386,0xbfe077f541987d76,3
+np.float64,0x3e5f539e7cbeb,0x3e5f539e7cbeb,3
+np.float64,0x3fd23f044c247e09,0x3fd51ceafcdd64aa,3
+np.float64,0x3fc70785b02e0f0b,0x3fc93b2a37eb342a,3
+np.float64,0xbfe7ab4ec7af569e,0xbfe0ba28eecbf6b0,3
+np.float64,0x800c1d4134583a83,0x800c1d4134583a83,3
+np.float64,0xffd9a73070334e60,0xbff0000000000000,3
+np.float64,0x68a4bf24d1499,0x68a4bf24d1499,3
+np.float64,0x7feba9d9507753b2,0x7ff0000000000000,3
+np.float64,0xbfe9d747db73ae90,0xbfe1bab53d932010,3
+np.float64,0x800a9a4aed953496,0x800a9a4aed953496,3
+np.float64,0xffcb89b0ad371360,0xbff0000000000000,3
+np.float64,0xbfc62388b82c4710,0xbfc4547be442a38c,3
+np.float64,0x800a006d187400db,0x800a006d187400db,3
+np.float64,0x3fcef2fbd33de5f8,0x3fd18177b2150148,3
+np.float64,0x8000b74e3da16e9d,0x8000b74e3da16e9d,3
+np.float64,0x25be536e4b7cb,0x25be536e4b7cb,3
+np.float64,0x3fa86e189430dc31,0x3fa905b4684c9f01,3
+np.float64,0xa7584b114eb0a,0xa7584b114eb0a,3
+np.float64,0x800331133c866227,0x800331133c866227,3
+np.float64,0x3fb52b48142a5690,0x3fb611a6f6e7c664,3
+np.float64,0x3fe825797cf04af2,0x3ff206fd60e98116,3
+np.float64,0x3fd0bec4e5217d8a,0x3fd323db3ffd59b2,3
+np.float64,0x907b43a120f7,0x907b43a120f7,3
+np.float64,0x3fed31eb1d3a63d6,0x3ff7d7a91c6930a4,3
+np.float64,0x7f97a13d782f427a,0x7ff0000000000000,3
+np.float64,0xffc7121a702e2434,0xbff0000000000000,3
+np.float64,0xbfe8bb4cbbf1769a,0xbfe139d7f46f1fb1,3
+np.float64,0xbfe3593cc5a6b27a,0xbfdd09ec91d6cd48,3
+np.float64,0x7fcff218ff9ff,0x7fcff218ff9ff,3
+np.float64,0x3fe73651d4ae6ca4,0x3ff10c5c1d21d127,3
+np.float64,0x80054e396eaa9c74,0x80054e396eaa9c74,3
+np.float64,0x3fe527d5f9aa4fac,0x3fedfb7743db9b53,3
+np.float64,0x7fec6f28c5f8de51,0x7ff0000000000000,3
+np.float64,0x3fcd2bbff53a5780,0x3fd061987416b49b,3
+np.float64,0xffd1f0046423e008,0xbff0000000000000,3
+np.float64,0x80034d97fac69b31,0x80034d97fac69b31,3
+np.float64,0x3faa803f14350080,0x3fab32e3f8073be4,3
+np.float64,0x3fcf8da0163f1b40,0x3fd1e42ba2354c8e,3
+np.float64,0x3fd573c2632ae785,0x3fd97c37609d18d7,3
+np.float64,0x7f922960482452c0,0x7ff0000000000000,3
+np.float64,0x800ebd0c5d3d7a19,0x800ebd0c5d3d7a19,3
+np.float64,0xbfee63b7807cc76f,0xbfe39ec7981035db,3
+np.float64,0xffdc023f8e380480,0xbff0000000000000,3
+np.float64,0x3fe3ffa02c67ff40,0x3febc7f8b900ceba,3
+np.float64,0x36c508b86d8a2,0x36c508b86d8a2,3
+np.float64,0x3fc9fbb0f133f760,0x3fcccee9f6ba801c,3
+np.float64,0x3fd75c1d5faeb83b,0x3fdc3150f9eff99e,3
+np.float64,0x3fe9a8d907b351b2,0x3ff3accc78a31df8,3
+np.float64,0x3fdd8fdcafbb1fb8,0x3fe2c97c97757994,3
+np.float64,0x3fb10c34ca22186a,0x3fb1a0cc42c76b86,3
+np.float64,0xbff0000000000000,0xbfe43a54e4e98864,3
+np.float64,0xffd046aefda08d5e,0xbff0000000000000,3
+np.float64,0x80067989758cf314,0x80067989758cf314,3
+np.float64,0x3fee9d77763d3aef,0x3ff9a67ff0841ba5,3
+np.float64,0xffe4d3cbf8e9a798,0xbff0000000000000,3
+np.float64,0x800f9cab273f3956,0x800f9cab273f3956,3
+np.float64,0x800a5c84f9f4b90a,0x800a5c84f9f4b90a,3
+np.float64,0x4fd377009fa8,0x4fd377009fa8,3
+np.float64,0xbfe7ba26af6f744e,0xbfe0c13ce45d6f95,3
+np.float64,0x609c8a86c1392,0x609c8a86c1392,3
+np.float64,0x7fe4d0296ea9a052,0x7ff0000000000000,3
+np.float64,0x59847bccb3090,0x59847bccb3090,3
+np.float64,0xbfdf944157bf2882,0xbfd8ed092bacad43,3
+np.float64,0xbfe7560a632eac15,0xbfe091405ec34973,3
+np.float64,0x3fea0699f4340d34,0x3ff415eb72089230,3
+np.float64,0x800a5533f374aa68,0x800a5533f374aa68,3
+np.float64,0xbf8e8cdb103d19c0,0xbf8e52cffcb83774,3
+np.float64,0x3fe87d9e52f0fb3d,0x3ff2653952344b81,3
+np.float64,0x7fca3950f73472a1,0x7ff0000000000000,3
+np.float64,0xffd5d1068aaba20e,0xbff0000000000000,3
+np.float64,0x3fd1a5f169a34be4,0x3fd4524b6ef17f91,3
+np.float64,0x3fdc4b95a8b8972c,0x3fe1caafd8652bf7,3
+np.float64,0x3fe333f65a6667ed,0x3fea502fb1f8a578,3
+np.float64,0xbfc117aaac222f54,0xbfc00018a4b84b6e,3
+np.float64,0x7fecf2efdf39e5df,0x7ff0000000000000,3
+np.float64,0x4e99d83e9d33c,0x4e99d83e9d33c,3
+np.float64,0x800d18937bda3127,0x800d18937bda3127,3
+np.float64,0x3fd6c67778ad8cef,0x3fdb5aba70a3ea9e,3
+np.float64,0x3fdbb71770b76e2f,0x3fe157ae8da20bc5,3
+np.float64,0xbfe9faf6ebf3f5ee,0xbfe1ca963d83f17f,3
+np.float64,0x80038850ac0710a2,0x80038850ac0710a2,3
+np.float64,0x8006beb72f8d7d6f,0x8006beb72f8d7d6f,3
+np.float64,0x3feead67bffd5acf,0x3ff9bb43e8b15e2f,3
+np.float64,0xbfd1174b89222e98,0xbfcdff9972799907,3
+np.float64,0x7fee2c077cfc580e,0x7ff0000000000000,3
+np.float64,0xbfbdbd904e3b7b20,0xbfbc13f4916ed466,3
+np.float64,0xffee47b8fe3c8f71,0xbff0000000000000,3
+np.float64,0xffd161884222c310,0xbff0000000000000,3
+np.float64,0xbfd42f27c4a85e50,0xbfd14fa8d67ba5ee,3
+np.float64,0x7fefffffffffffff,0x7ff0000000000000,3
+np.float64,0x8008151791b02a30,0x8008151791b02a30,3
+np.float64,0xbfba79029234f208,0xbfb926616cf41755,3
+np.float64,0x8004c486be29890e,0x8004c486be29890e,3
+np.float64,0x7fe5325a252a64b3,0x7ff0000000000000,3
+np.float64,0x5a880f04b5103,0x5a880f04b5103,3
+np.float64,0xbfe6f4b7702de96f,0xbfe06209002dd72c,3
+np.float64,0xbfdf8b3739bf166e,0xbfd8e783efe3c30f,3
+np.float64,0xbfe32571c8e64ae4,0xbfdcd128b9aa49a1,3
+np.float64,0xbfe97c98c172f932,0xbfe1920ac0fc040f,3
+np.float64,0x3fd0b513a2a16a28,0x3fd31744e3a1bf0a,3
+np.float64,0xffe3ab70832756e0,0xbff0000000000000,3
+np.float64,0x80030f055ce61e0b,0x80030f055ce61e0b,3
+np.float64,0xffd5f3b21b2be764,0xbff0000000000000,3
+np.float64,0x800c1f2d6c783e5b,0x800c1f2d6c783e5b,3
+np.float64,0x80075f4f148ebe9f,0x80075f4f148ebe9f,3
+np.float64,0xbfa5a046f42b4090,0xbfa52cfbf8992256,3
+np.float64,0xffd6702583ace04c,0xbff0000000000000,3
+np.float64,0x800dc0a5cf1b814c,0x800dc0a5cf1b814c,3
+np.float64,0x14f2203a29e45,0x14f2203a29e45,3
+np.float64,0x800421a40ee84349,0x800421a40ee84349,3
+np.float64,0xbfea7c279df4f84f,0xbfe2037fff3ed877,3
+np.float64,0xbfe9b41ddcf3683c,0xbfe1aafe18a44bf8,3
+np.float64,0xffe7b037022f606e,0xbff0000000000000,3
+np.float64,0x800bafb648775f6d,0x800bafb648775f6d,3
+np.float64,0x800b81681d5702d1,0x800b81681d5702d1,3
+np.float64,0x3fe29f8dc8653f1c,0x3fe9442da1c32c6b,3
+np.float64,0xffef9a05dc7f340b,0xbff0000000000000,3
+np.float64,0x800c8c65a65918cb,0x800c8c65a65918cb,3
+np.float64,0xffe99df0d5f33be1,0xbff0000000000000,3
+np.float64,0x9afeb22535fd7,0x9afeb22535fd7,3
+np.float64,0x7fc620dd822c41ba,0x7ff0000000000000,3
+np.float64,0x29c2cdf25385b,0x29c2cdf25385b,3
+np.float64,0x2d92284e5b246,0x2d92284e5b246,3
+np.float64,0xffc794aa942f2954,0xbff0000000000000,3
+np.float64,0xbfe7ed907eafdb21,0xbfe0d9a7b1442497,3
+np.float64,0xbfd4e0d4aea9c1aa,0xbfd1d09366dba2a7,3
+np.float64,0xa70412c34e083,0xa70412c34e083,3
+np.float64,0x41dc0ee083b9,0x41dc0ee083b9,3
+np.float64,0x8000ece20da1d9c5,0x8000ece20da1d9c5,3
+np.float64,0x3fdf3dae103e7b5c,0x3fe42314bf826bc5,3
+np.float64,0x3fe972533c72e4a6,0x3ff3703761e70f04,3
+np.float64,0xffba1d2b82343a58,0xbff0000000000000,3
+np.float64,0xe0086c83c010e,0xe0086c83c010e,3
+np.float64,0x3fe6fb0dde6df61c,0x3ff0cf5fae01aa08,3
+np.float64,0x3fcfaf057e3f5e0b,0x3fd1f98c1fd20139,3
+np.float64,0xbfdca19d9239433c,0xbfd7158745192ca9,3
+np.float64,0xffb17f394e22fe70,0xbff0000000000000,3
+np.float64,0x7fe40f05c7681e0b,0x7ff0000000000000,3
+np.float64,0x800b3c575d5678af,0x800b3c575d5678af,3
+np.float64,0x7fa4ab20ac295640,0x7ff0000000000000,3
+np.float64,0xbfd2fff4f6a5ffea,0xbfd07069bb50e1a6,3
+np.float64,0xbfef81b9147f0372,0xbfe40b845a749787,3
+np.float64,0x7fd7400e54ae801c,0x7ff0000000000000,3
+np.float64,0x3fd4401a17a88034,0x3fd7d20fb76a4f3d,3
+np.float64,0xbfd3e907fd27d210,0xbfd11c64b7577fc5,3
+np.float64,0x7fe34bed9ae697da,0x7ff0000000000000,3
+np.float64,0x80039119c0472234,0x80039119c0472234,3
+np.float64,0xbfe2e36ac565c6d6,0xbfdc88454ee997b3,3
+np.float64,0xbfec57204478ae40,0xbfe2cd3183de1d2d,3
+np.float64,0x7fed7e2a12fafc53,0x7ff0000000000000,3
+np.float64,0x7fd5c5fa7d2b8bf4,0x7ff0000000000000,3
+np.float64,0x3fdcf368d6b9e6d0,0x3fe24decce1ebd35,3
+np.float64,0xbfe0ebfcf2e1d7fa,0xbfda48c9247ae8cf,3
+np.float64,0xbfe10dbea2e21b7e,0xbfda707d68b59674,3
+np.float64,0xbfdf201b6ebe4036,0xbfd8a5df27742fdf,3
+np.float64,0xffe16555be62caab,0xbff0000000000000,3
+np.float64,0xffc23a5db22474bc,0xbff0000000000000,3
+np.float64,0xffe1cbb3f8a39768,0xbff0000000000000,3
+np.float64,0x8007b823be0f7048,0x8007b823be0f7048,3
+np.float64,0xbfa5d1f3042ba3e0,0xbfa55c97cd77bf6e,3
+np.float64,0xbfe316a074662d41,0xbfdcc0da4e7334d0,3
+np.float64,0xbfdfab2bf2bf5658,0xbfd8fb046b88b51f,3
+np.float64,0xfacc9dabf5994,0xfacc9dabf5994,3
+np.float64,0xffe7e420a4efc841,0xbff0000000000000,3
+np.float64,0x800bb986cd57730e,0x800bb986cd57730e,3
+np.float64,0xbfe314fa38e629f4,0xbfdcbf09302c3bf5,3
+np.float64,0x7fc56b17772ad62e,0x7ff0000000000000,3
+np.float64,0x8006a87d54ad50fb,0x8006a87d54ad50fb,3
+np.float64,0xbfe6633e4a6cc67c,0xbfe01a67c3b3ff32,3
+np.float64,0x3fe0ff56eb21feae,0x3fe66df01defb0fb,3
+np.float64,0xffc369cfc126d3a0,0xbff0000000000000,3
+np.float64,0x7fe8775d9a30eeba,0x7ff0000000000000,3
+np.float64,0x3fb53db13e2a7b60,0x3fb625a7279cdac3,3
+np.float64,0xffee76e7e6fcedcf,0xbff0000000000000,3
+np.float64,0xb45595b568ab3,0xb45595b568ab3,3
+np.float64,0xffa09a1d50213440,0xbff0000000000000,3
+np.float64,0x7d11dc16fa23c,0x7d11dc16fa23c,3
+np.float64,0x7fd4cc2928299851,0x7ff0000000000000,3
+np.float64,0x6a30e0ead461d,0x6a30e0ead461d,3
+np.float64,0x7fd3ee735a27dce6,0x7ff0000000000000,3
+np.float64,0x8008d7084b31ae11,0x8008d7084b31ae11,3
+np.float64,0x3fe469353fe8d26a,0x3fec8e7e2df38590,3
+np.float64,0x3fcecef2743d9de5,0x3fd16a888b715dfd,3
+np.float64,0x460130d68c027,0x460130d68c027,3
+np.float64,0xbfd76510c62eca22,0xbfd398766b741d6e,3
+np.float64,0x800ec88c2a5d9118,0x800ec88c2a5d9118,3
+np.float64,0x3fac969c6c392d40,0x3fad66ca6a1e583c,3
+np.float64,0x3fe5c616bf6b8c2e,0x3fef30f931e8dde5,3
+np.float64,0xb4cb6cd56996e,0xb4cb6cd56996e,3
+np.float64,0xffc3eacf8827d5a0,0xbff0000000000000,3
+np.float64,0x3fe1ceaf60e39d5f,0x3fe7d31e0a627cf9,3
+np.float64,0xffea69b42ff4d368,0xbff0000000000000,3
+np.float64,0x800ff8aef99ff15e,0x800ff8aef99ff15e,3
+np.float64,0x6c3953f0d872b,0x6c3953f0d872b,3
+np.float64,0x8007ca5a0d0f94b5,0x8007ca5a0d0f94b5,3
+np.float64,0x800993ce3ad3279d,0x800993ce3ad3279d,3
+np.float64,0x3fe5a4d1516b49a2,0x3feeef67b22ac65b,3
+np.float64,0x8003d7512a67aea3,0x8003d7512a67aea3,3
+np.float64,0x33864430670c9,0x33864430670c9,3
+np.float64,0xbfdbf477e3b7e8f0,0xbfd6a63f1b36f424,3
+np.float64,0x3fb5da92582bb525,0x3fb6d04ef1a1d31a,3
+np.float64,0xe38aae71c7156,0xe38aae71c7156,3
+np.float64,0x3fcaf5590a35eab2,0x3fce01ed6eb6188e,3
+np.float64,0x800deba9b05bd754,0x800deba9b05bd754,3
+np.float64,0x7fee0cde287c19bb,0x7ff0000000000000,3
+np.float64,0xbfe0c2ae70e1855d,0xbfda17fa64d84fcf,3
+np.float64,0x518618faa30c4,0x518618faa30c4,3
+np.float64,0xbfeb4c49b8769894,0xbfe25d52cd7e529f,3
+np.float64,0xbfeb3aa21b367544,0xbfe255cae1df4cfd,3
+np.float64,0xffd23f1c5d247e38,0xbff0000000000000,3
+np.float64,0xff9a75132034ea20,0xbff0000000000000,3
+np.float64,0xbfef9d96307f3b2c,0xbfe415e8b6ce0e50,3
+np.float64,0x8004046f2f0808df,0x8004046f2f0808df,3
+np.float64,0x3fe15871aea2b0e3,0x3fe706532ea5c770,3
+np.float64,0x7fd86b1576b0d62a,0x7ff0000000000000,3
+np.float64,0xbfc240a5c724814c,0xbfc102c7971ca455,3
+np.float64,0xffd8ea670bb1d4ce,0xbff0000000000000,3
+np.float64,0xbfeb1ddd1ff63bba,0xbfe2497c4e27bb8e,3
+np.float64,0x3fcd47e0a33a8fc1,0x3fd0734444150d83,3
+np.float64,0xe00b6a65c016e,0xe00b6a65c016e,3
+np.float64,0xbfc7d582142fab04,0xbfc5bf1fbe755a4c,3
+np.float64,0x8cc91ca11993,0x8cc91ca11993,3
+np.float64,0x7fdbc530e3b78a61,0x7ff0000000000000,3
+np.float64,0x7fee437522bc86e9,0x7ff0000000000000,3
+np.float64,0xffe9e09ae2b3c135,0xbff0000000000000,3
+np.float64,0x8002841cada5083a,0x8002841cada5083a,3
+np.float64,0x3fd6b485f8ad690c,0x3fdb412135932699,3
+np.float64,0x80070e8d0b0e1d1b,0x80070e8d0b0e1d1b,3
+np.float64,0x7fed5df165babbe2,0x7ff0000000000000,3
+np.float64,0x7ff4000000000000,0x7ffc000000000000,3
+np.float64,0x7fe99d08cd333a11,0x7ff0000000000000,3
+np.float64,0xdfff4201bfff,0xdfff4201bfff,3
+np.float64,0x800ccf7aaf999ef6,0x800ccf7aaf999ef6,3
+np.float64,0x3fddb05aad3b60b5,0x3fe2e34bdd1dd9d5,3
+np.float64,0xbfe5e1c60e6bc38c,0xbfdfb3275cc1675f,3
+np.float64,0x8004fe674269fccf,0x8004fe674269fccf,3
+np.float64,0x7fe9280363325006,0x7ff0000000000000,3
+np.float64,0xf605b9f1ec0b7,0xf605b9f1ec0b7,3
+np.float64,0x800c7c214018f843,0x800c7c214018f843,3
+np.float64,0x7fd97eb6b9b2fd6c,0x7ff0000000000000,3
+np.float64,0x7fd03f8fb6207f1e,0x7ff0000000000000,3
+np.float64,0x7fc526b64d2a4d6c,0x7ff0000000000000,3
+np.float64,0xbfef1a7c42fe34f9,0xbfe3e4b4399e0fcf,3
+np.float64,0xffdde10a2fbbc214,0xbff0000000000000,3
+np.float64,0xbfdd274f72ba4e9e,0xbfd76aa73788863c,3
+np.float64,0xbfecf7f77af9efef,0xbfe30ee2ae03fed1,3
+np.float64,0xffde709322bce126,0xbff0000000000000,3
+np.float64,0x268b5dac4d16d,0x268b5dac4d16d,3
+np.float64,0x8005c099606b8134,0x8005c099606b8134,3
+np.float64,0xffcf54c1593ea984,0xbff0000000000000,3
+np.float64,0xbfee9b8ebabd371d,0xbfe3b44f2663139d,3
+np.float64,0x3faf0330643e0661,0x3faff88fab74b447,3
+np.float64,0x7fe1c6011be38c01,0x7ff0000000000000,3
+np.float64,0xbfe9d58053b3ab01,0xbfe1b9ea12242485,3
+np.float64,0xbfe15a80fee2b502,0xbfdaca2aa7d1231a,3
+np.float64,0x7fe0d766d8a1aecd,0x7ff0000000000000,3
+np.float64,0x800f65e6a21ecbcd,0x800f65e6a21ecbcd,3
+np.float64,0x7fc85e45a530bc8a,0x7ff0000000000000,3
+np.float64,0x3fcc240e5438481d,0x3fcf7954fc080ac3,3
+np.float64,0xffddd49da2bba93c,0xbff0000000000000,3
+np.float64,0x1376f36c26edf,0x1376f36c26edf,3
+np.float64,0x3feffb7af17ff6f6,0x3ffb77f0ead2f881,3
+np.float64,0x3fd9354ea9b26a9d,0x3fdee4e4c8db8239,3
+np.float64,0xffdf7beed4bef7de,0xbff0000000000000,3
+np.float64,0xbfdef256ecbde4ae,0xbfd889b0e213a019,3
+np.float64,0x800d78bd1e7af17a,0x800d78bd1e7af17a,3
+np.float64,0xb66d66276cdad,0xb66d66276cdad,3
+np.float64,0x7fd8f51138b1ea21,0x7ff0000000000000,3
+np.float64,0xffe8c9c302b19385,0xbff0000000000000,3
+np.float64,0x8000be4cf5417c9b,0x8000be4cf5417c9b,3
+np.float64,0xbfe2293a25645274,0xbfdbb78a8c547c68,3
+np.float64,0xce8392c19d08,0xce8392c19d08,3
+np.float64,0xbfe075736b60eae7,0xbfd9bc0f6e34a283,3
+np.float64,0xbfe8d6fe6a71adfd,0xbfe1469ba80b4915,3
+np.float64,0xffe0c7993fa18f32,0xbff0000000000000,3
+np.float64,0x3fce5210fd3ca422,0x3fd11b40a1270a95,3
+np.float64,0x6c0534a8d80a7,0x6c0534a8d80a7,3
+np.float64,0x23c1823647831,0x23c1823647831,3
+np.float64,0x3fc901253732024a,0x3fcb9d264accb07c,3
+np.float64,0x3fe42b8997685714,0x3fec1a39e207b6e4,3
+np.float64,0x3fec4fd00fb89fa0,0x3ff6c1fdd0c262c8,3
+np.float64,0x8007b333caaf6668,0x8007b333caaf6668,3
+np.float64,0x800f9275141f24ea,0x800f9275141f24ea,3
+np.float64,0xffbba361a23746c0,0xbff0000000000000,3
+np.float64,0xbfee4effa9fc9dff,0xbfe396c11d0cd524,3
+np.float64,0x3e47e84c7c8fe,0x3e47e84c7c8fe,3
+np.float64,0x3fe80eb7b1301d6f,0x3ff1eed318a00153,3
+np.float64,0x7fd3f4c5b4a7e98a,0x7ff0000000000000,3
+np.float64,0x158abab02b158,0x158abab02b158,3
+np.float64,0x1,0x1,3
+np.float64,0x1f1797883e2f4,0x1f1797883e2f4,3
+np.float64,0x3feec055d03d80ac,0x3ff9d3fb0394de33,3
+np.float64,0x8010000000000000,0x8010000000000000,3
+np.float64,0xbfd070860ea0e10c,0xbfccfeec2828efef,3
+np.float64,0x80015c8b3e82b917,0x80015c8b3e82b917,3
+np.float64,0xffef9956d9ff32ad,0xbff0000000000000,3
+np.float64,0x7fe7f087dd2fe10f,0x7ff0000000000000,3
+np.float64,0x8002e7718665cee4,0x8002e7718665cee4,3
+np.float64,0x3fdfb9adb2bf735c,0x3fe4887a86214c1e,3
+np.float64,0xffc7747dfb2ee8fc,0xbff0000000000000,3
+np.float64,0x3fec309bb5386137,0x3ff69c44e1738547,3
+np.float64,0xffdbe2bf9ab7c580,0xbff0000000000000,3
+np.float64,0xbfe6a274daed44ea,0xbfe039aff2be9d48,3
+np.float64,0x7fd5a4e4efab49c9,0x7ff0000000000000,3
+np.float64,0xffbe6aaeb03cd560,0xbff0000000000000,3
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log.csv
new file mode 100644
index 00000000..7717745d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log.csv
@@ -0,0 +1,271 @@
+dtype,input,output,ulperrortol
+## +ve denormals ##
+np.float32,0x004b4716,0xc2afbc1b,4
+np.float32,0x007b2490,0xc2aec01e,4
+np.float32,0x007c99fa,0xc2aeba17,4
+np.float32,0x00734a0c,0xc2aee1dc,4
+np.float32,0x0070de24,0xc2aeecba,4
+np.float32,0x007fffff,0xc2aeac50,4
+np.float32,0x00000001,0xc2ce8ed0,4
+## -ve denormals ##
+np.float32,0x80495d65,0xffc00000,4
+np.float32,0x806894f6,0xffc00000,4
+np.float32,0x80555a76,0xffc00000,4
+np.float32,0x804e1fb8,0xffc00000,4
+np.float32,0x80687de9,0xffc00000,4
+np.float32,0x807fffff,0xffc00000,4
+np.float32,0x80000001,0xffc00000,4
+## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ##
+np.float32,0x00000000,0xff800000,4
+np.float32,0x80000000,0xff800000,4
+np.float32,0x7f7fffff,0x42b17218,4
+np.float32,0x80800000,0xffc00000,4
+np.float32,0xff7fffff,0xffc00000,4
+## 1.00f + 0x00000001 ##
+np.float32,0x3f800000,0x00000000,4
+np.float32,0x3f800001,0x33ffffff,4
+np.float32,0x3f800002,0x347ffffe,4
+np.float32,0x3f7fffff,0xb3800000,4
+np.float32,0x3f7ffffe,0xb4000000,4
+np.float32,0x3f7ffffd,0xb4400001,4
+np.float32,0x402df853,0x3f7ffffe,4
+np.float32,0x402df854,0x3f7fffff,4
+np.float32,0x402df855,0x3f800000,4
+np.float32,0x402df856,0x3f800001,4
+np.float32,0x3ebc5ab0,0xbf800001,4
+np.float32,0x3ebc5ab1,0xbf800000,4
+np.float32,0x3ebc5ab2,0xbf800000,4
+np.float32,0x3ebc5ab3,0xbf7ffffe,4
+np.float32,0x423ef575,0x407768ab,4
+np.float32,0x427b8c61,0x408485dd,4
+np.float32,0x4211e9ee,0x406630b0,4
+np.float32,0x424d5c41,0x407c0fed,4
+np.float32,0x42be722a,0x4091cc91,4
+np.float32,0x42b73d30,0x4090908b,4
+np.float32,0x427e48e2,0x4084de7f,4
+np.float32,0x428f759b,0x4088bba3,4
+np.float32,0x41629069,0x4029a0cc,4
+np.float32,0x4272c99d,0x40836379,4
+np.float32,0x4d1b7458,0x4197463d,4
+np.float32,0x4f10c594,0x41ace2b2,4
+np.float32,0x4ea397c2,0x41a85171,4
+np.float32,0x4fefa9d1,0x41b6769c,4
+np.float32,0x4ebac6ab,0x41a960dc,4
+np.float32,0x4f6efb42,0x41b0e535,4
+np.float32,0x4e9ab8e7,0x41a7df44,4
+np.float32,0x4e81b5d1,0x41a67625,4
+np.float32,0x5014d9f2,0x41b832bd,4
+np.float32,0x4f02175c,0x41ac07b8,4
+np.float32,0x7f034f89,0x42b01c47,4
+np.float32,0x7f56d00e,0x42b11849,4
+np.float32,0x7f1cd5f6,0x42b0773a,4
+np.float32,0x7e979174,0x42af02d7,4
+np.float32,0x7f23369f,0x42b08ba2,4
+np.float32,0x7f0637ae,0x42b0277d,4
+np.float32,0x7efcb6e8,0x42b00897,4
+np.float32,0x7f7907c8,0x42b163f6,4
+np.float32,0x7e95c4c2,0x42aefcba,4
+np.float32,0x7f4577b2,0x42b0ed2d,4
+np.float32,0x3f49c92e,0xbe73ae84,4
+np.float32,0x3f4a23d1,0xbe71e2f8,4
+np.float32,0x3f4abb67,0xbe6ee430,4
+np.float32,0x3f48169a,0xbe7c5532,4
+np.float32,0x3f47f5fa,0xbe7cfc37,4
+np.float32,0x3f488309,0xbe7a2ad8,4
+np.float32,0x3f479df4,0xbe7ebf5f,4
+np.float32,0x3f47cfff,0xbe7dbec9,4
+np.float32,0x3f496704,0xbe75a125,4
+np.float32,0x3f478ee8,0xbe7f0c92,4
+np.float32,0x3f4a763b,0xbe7041ce,4
+np.float32,0x3f47a108,0xbe7eaf94,4
+np.float32,0x3f48136c,0xbe7c6578,4
+np.float32,0x3f481c17,0xbe7c391c,4
+np.float32,0x3f47cd28,0xbe7dcd56,4
+np.float32,0x3f478be8,0xbe7f1bf7,4
+np.float32,0x3f4c1f8e,0xbe67e367,4
+np.float32,0x3f489b0c,0xbe79b03f,4
+np.float32,0x3f4934cf,0xbe76a08a,4
+np.float32,0x3f4954df,0xbe75fd6a,4
+np.float32,0x3f47a3f5,0xbe7ea093,4
+np.float32,0x3f4ba4fc,0xbe6a4b02,4
+np.float32,0x3f47a0e1,0xbe7eb05c,4
+np.float32,0x3f48c30a,0xbe78e42f,4
+np.float32,0x3f48cab8,0xbe78bd05,4
+np.float32,0x3f4b0569,0xbe6d6ea4,4
+np.float32,0x3f47de32,0xbe7d7607,4
+np.float32,0x3f477328,0xbe7f9b00,4
+np.float32,0x3f496dab,0xbe757f52,4
+np.float32,0x3f47662c,0xbe7fddac,4
+np.float32,0x3f48ddd8,0xbe785b80,4
+np.float32,0x3f481866,0xbe7c4bff,4
+np.float32,0x3f48b119,0xbe793fb6,4
+np.float32,0x3f48c7e8,0xbe78cb5c,4
+np.float32,0x3f4985f6,0xbe7503da,4
+np.float32,0x3f483fdf,0xbe7b8212,4
+np.float32,0x3f4b1c76,0xbe6cfa67,4
+np.float32,0x3f480b2e,0xbe7c8fa8,4
+np.float32,0x3f48745f,0xbe7a75bf,4
+np.float32,0x3f485bda,0xbe7af308,4
+np.float32,0x3f47a660,0xbe7e942c,4
+np.float32,0x3f47d4d5,0xbe7da600,4
+np.float32,0x3f4b0a26,0xbe6d56be,4
+np.float32,0x3f4a4883,0xbe712924,4
+np.float32,0x3f4769e7,0xbe7fca84,4
+np.float32,0x3f499702,0xbe74ad3f,4
+np.float32,0x3f494ab1,0xbe763131,4
+np.float32,0x3f476b69,0xbe7fc2c6,4
+np.float32,0x3f4884e8,0xbe7a214a,4
+np.float32,0x3f486945,0xbe7aae76,4
+#float64
+## +ve denormal ##
+np.float64,0x0000000000000001,0xc0874385446d71c3,2
+np.float64,0x0001000000000000,0xc086395a2079b70c,2
+np.float64,0x000fffffffffffff,0xc086232bdd7abcd2,2
+np.float64,0x0007ad63e2168cb6,0xc086290bc0b2980f,2
+## -ve denormal ##
+np.float64,0x8000000000000001,0xfff8000000000001,2
+np.float64,0x8001000000000000,0xfff8000000000001,2
+np.float64,0x800fffffffffffff,0xfff8000000000001,2
+np.float64,0x8007ad63e2168cb6,0xfff8000000000001,2
+## +/-0.0f, MAX, MIN##
+np.float64,0x0000000000000000,0xfff0000000000000,2
+np.float64,0x8000000000000000,0xfff0000000000000,2
+np.float64,0x7fefffffffffffff,0x40862e42fefa39ef,2
+np.float64,0xffefffffffffffff,0xfff8000000000001,2
+## near 1.0f ##
+np.float64,0x3ff0000000000000,0x0000000000000000,2
+np.float64,0x3fe8000000000000,0xbfd269621134db92,2
+np.float64,0x3ff0000000000001,0x3cafffffffffffff,2
+np.float64,0x3ff0000020000000,0x3e7fffffe000002b,2
+np.float64,0x3ff0000000000001,0x3cafffffffffffff,2
+np.float64,0x3fefffffe0000000,0xbe70000008000005,2
+np.float64,0x3fefffffffffffff,0xbca0000000000000,2
+## random numbers ##
+np.float64,0x02500186f3d9da56,0xc0855b8abf135773,2
+np.float64,0x09200815a3951173,0xc082ff1ad7131bdc,2
+np.float64,0x0da029623b0243d4,0xc0816fc994695bb5,2
+np.float64,0x48703b8ac483a382,0x40579213a313490b,2
+np.float64,0x09207b74c87c9860,0xc082fee20ff349ef,2
+np.float64,0x62c077698e8df947,0x407821c996d110f0,2
+np.float64,0x2350b45e87c3cfb0,0xc073d6b16b51d072,2
+np.float64,0x3990a23f9ff2b623,0xc051aa60eadd8c61,2
+np.float64,0x0d011386a116c348,0xc081a6cc7ea3b8fb,2
+np.float64,0x1fe0f0303ebe273a,0xc0763870b78a81ca,2
+np.float64,0x0cd1260121d387da,0xc081b7668d61a9d1,2
+np.float64,0x1e6135a8f581d422,0xc077425ac10f08c2,2
+np.float64,0x622168db5fe52d30,0x4077b3c669b9fadb,2
+np.float64,0x69f188e1ec6d1718,0x407d1e2f18c63889,2
+np.float64,0x3aa1bf1d9c4dd1a3,0xc04d682e24bde479,2
+np.float64,0x6c81c4011ce4f683,0x407ee5190e8a8e6a,2
+np.float64,0x2191fa55aa5a5095,0xc0750c0c318b5e2d,2
+np.float64,0x32a1f602a32bf360,0xc06270caa493fc17,2
+np.float64,0x16023c90ba93249b,0xc07d0f88e0801638,2
+np.float64,0x1c525fe6d71fa9ff,0xc078af49c66a5d63,2
+np.float64,0x1a927675815d65b7,0xc079e5bdd7fe376e,2
+np.float64,0x41227b8fe70da028,0x402aa0c9f9a84c71,2
+np.float64,0x4962bb6e853fe87d,0x405a34aa04c83747,2
+np.float64,0x23d2cda00b26b5a4,0xc0737c13a06d00ea,2
+np.float64,0x2d13083fd62987fa,0xc06a25055aeb474e,2
+np.float64,0x10e31e4c9b4579a1,0xc0804e181929418e,2
+np.float64,0x26d3247d556a86a9,0xc0716774171da7e8,2
+np.float64,0x6603379398d0d4ac,0x407a64f51f8a887b,2
+np.float64,0x02d38af17d9442ba,0xc0852d955ac9dd68,2
+np.float64,0x6a2382b4818dd967,0x407d4129d688e5d4,2
+np.float64,0x2ee3c403c79b3934,0xc067a091fefaf8b6,2
+np.float64,0x6493a699acdbf1a4,0x4079663c8602bfc5,2
+np.float64,0x1c8413c4f0de3100,0xc0788c99697059b6,2
+np.float64,0x4573f1ed350d9622,0x404e9bd1e4c08920,2
+np.float64,0x2f34265c9200b69c,0xc067310cfea4e986,2
+np.float64,0x19b43e65fa22029b,0xc07a7f8877de22d6,2
+np.float64,0x0af48ab7925ed6bc,0xc0825c4fbc0e5ade,2
+np.float64,0x4fa49699cad82542,0x4065c76d2a318235,2
+np.float64,0x7204a15e56ade492,0x40815bb87484dffb,2
+np.float64,0x4734aa08a230982d,0x40542a4bf7a361a9,2
+np.float64,0x1ae4ed296c2fd749,0xc079ac4921f20abb,2
+np.float64,0x472514ea4370289c,0x4053ff372bd8f18f,2
+np.float64,0x53a54b3f73820430,0x406b5411fc5f2e33,2
+np.float64,0x64754de5a15684fa,0x407951592e99a5ab,2
+np.float64,0x69358e279868a7c3,0x407c9c671a882c31,2
+np.float64,0x284579ec61215945,0xc0706688e55f0927,2
+np.float64,0x68b5c58806447adc,0x407c43d6f4eff760,2
+np.float64,0x1945a83f98b0e65d,0xc07acc15eeb032cc,2
+np.float64,0x0fc5eb98a16578bf,0xc080b0d02eddca0e,2
+np.float64,0x6a75e208f5784250,0x407d7a7383bf8f05,2
+np.float64,0x0fe63a029c47645d,0xc080a59ca1e98866,2
+np.float64,0x37963ac53f065510,0xc057236281f7bdb6,2
+np.float64,0x135661bb07067ff7,0xc07ee924930c21e4,2
+np.float64,0x4b4699469d458422,0x405f73843756e887,2
+np.float64,0x1a66d73e4bf4881b,0xc07a039ba1c63adf,2
+np.float64,0x12a6b9b119a7da59,0xc07f62e49c6431f3,2
+np.float64,0x24c719aa8fd1bdb5,0xc072d26da4bf84d3,2
+np.float64,0x0fa6ff524ffef314,0xc080bb8514662e77,2
+np.float64,0x1db751d66fdd4a9a,0xc077b77cb50d7c92,2
+np.float64,0x4947374c516da82c,0x4059e9acfc7105bf,2
+np.float64,0x1b1771ab98f3afc8,0xc07989326b8e1f66,2
+np.float64,0x25e78805baac8070,0xc0720a818e6ef080,2
+np.float64,0x4bd7a148225d3687,0x406082d004ea3ee7,2
+np.float64,0x53d7d6b2bbbda00a,0x406b9a398967cbd5,2
+np.float64,0x6997fb9f4e1c685f,0x407ce0a703413eba,2
+np.float64,0x069802c2ff71b951,0xc083df39bf7acddc,2
+np.float64,0x4d683ac9890f66d8,0x4062ae21d8c2acf0,2
+np.float64,0x5a2825863ec14f4c,0x40722d718d549552,2
+np.float64,0x0398799a88f4db80,0xc084e93dab8e2158,2
+np.float64,0x5ed87a8b77e135a5,0x40756d7051777b33,2
+np.float64,0x5828cd6d79b9bede,0x4070cafb22fc6ca1,2
+np.float64,0x7b18ba2a5ec6f068,0x408481386b3ed6fe,2
+np.float64,0x4938fd60922198fe,0x4059c206b762ea7e,2
+np.float64,0x31b8f44fcdd1a46e,0xc063b2faa8b6434e,2
+np.float64,0x5729341c0d918464,0x407019cac0c4a7d7,2
+np.float64,0x13595e9228ee878e,0xc07ee7235a7d8088,2
+np.float64,0x17698b0dc9dd4135,0xc07c1627e3a5ad5f,2
+np.float64,0x63b977c283abb0cc,0x4078cf1ec6ed65be,2
+np.float64,0x7349cc0d4dc16943,0x4081cc697ce4cb53,2
+np.float64,0x4e49a80b732fb28d,0x4063e67e3c5cbe90,2
+np.float64,0x07ba14b848a8ae02,0xc0837ac032a094e0,2
+np.float64,0x3da9f17b691bfddc,0xc03929c25366acda,2
+np.float64,0x02ea39aa6c3ac007,0xc08525af6f21e1c4,2
+np.float64,0x3a6a42f04ed9563d,0xc04e98e825dca46b,2
+np.float64,0x1afa877cd7900be7,0xc0799d6648cb34a9,2
+np.float64,0x58ea986649e052c6,0x4071512e939ad790,2
+np.float64,0x691abbc04647f536,0x407c89aaae0fcb83,2
+np.float64,0x43aabc5063e6f284,0x4044b45d18106fd2,2
+np.float64,0x488b003c893e0bea,0x4057df012a2dafbe,2
+np.float64,0x77eb076ed67caee5,0x40836720de94769e,2
+np.float64,0x5c1b46974aba46f4,0x40738731ba256007,2
+np.float64,0x1a5b29ecb5d3c261,0xc07a0becc77040d6,2
+np.float64,0x5d8b6ccf868c6032,0x4074865c1865e2db,2
+np.float64,0x4cfb6690b4aaf5af,0x406216cd8c7e8ddb,2
+np.float64,0x76cbd8eb5c5fc39e,0x4083038dc66d682b,2
+np.float64,0x28bbd1fec5012814,0xc07014c2dd1b9711,2
+np.float64,0x33dc1b3a4fd6bf7a,0xc060bd0756e07d8a,2
+np.float64,0x52bbe89b37de99f3,0x406a10041aa7d343,2
+np.float64,0x07bc479d15eb2dd3,0xc0837a1a6e3a3b61,2
+np.float64,0x18fc5275711a901d,0xc07aff3e9d62bc93,2
+np.float64,0x114c9758e247dc71,0xc080299a7cf15b05,2
+np.float64,0x25ac8f6d60755148,0xc07233c4c0c511d4,2
+np.float64,0x260cae2bb9e9fd7e,0xc071f128c7e82eac,2
+np.float64,0x572ccdfe0241de82,0x40701bedc84bb504,2
+np.float64,0x0ddcef6c8d41f5ee,0xc0815a7e16d07084,2
+np.float64,0x6dad1d59c988af68,0x407fb4a0bc0142b1,2
+np.float64,0x025d200580d8b6d1,0xc08556c0bc32b1b2,2
+np.float64,0x7aad344b6aa74c18,0x40845bbc453f22be,2
+np.float64,0x5b5d9d6ad9d14429,0x4073036d2d21f382,2
+np.float64,0x49cd8d8dcdf19954,0x405b5c034f5c7353,2
+np.float64,0x63edb9483335c1e6,0x4078f2dd21378786,2
+np.float64,0x7b1dd64c9d2c26bd,0x408482b922017bc9,2
+np.float64,0x782e13e0b574be5f,0x40837e2a0090a5ad,2
+np.float64,0x592dfe18b9d6db2f,0x40717f777fbcb1ec,2
+np.float64,0x654e3232ac60d72c,0x4079e71a95a70446,2
+np.float64,0x7b8e42ad22091456,0x4084a9a6f1e61722,2
+np.float64,0x570e88dfd5860ae6,0x407006ae6c0d137a,2
+np.float64,0x294e98346cb98ef1,0xc06f5edaac12bd44,2
+np.float64,0x1adeaa4ab792e642,0xc079b1431d5e2633,2
+np.float64,0x7b6ead3377529ac8,0x40849eabc8c7683c,2
+np.float64,0x2b8eedae8a9b2928,0xc06c400054deef11,2
+np.float64,0x65defb45b2dcf660,0x407a4b53f181c05a,2
+np.float64,0x1baf582d475e7701,0xc07920bcad4a502c,2
+np.float64,0x461f39cf05a0f15a,0x405126368f984fa1,2
+np.float64,0x7e5f6f5dcfff005b,0x4085a37d610439b4,2
+np.float64,0x136f66e4d09bd662,0xc07ed8a2719f2511,2
+np.float64,0x65afd8983fb6ca1f,0x407a2a7f48bf7fc1,2
+np.float64,0x572fa7f95ed22319,0x40701d706cf82e6f,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log10.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log10.csv
new file mode 100644
index 00000000..7f5241a2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log10.csv
@@ -0,0 +1,1629 @@
+dtype,input,output,ulperrortol
+np.float32,0x3f6fd5c8,0xbce80e8e,4
+np.float32,0x3ea4ab17,0xbefc3deb,4
+np.float32,0x3e87a133,0xbf13b0b7,4
+np.float32,0x3f0d9069,0xbe83bb19,4
+np.float32,0x3f7b9269,0xbbf84f47,4
+np.float32,0x3f7a9ffa,0xbc16fd97,4
+np.float32,0x7f535d34,0x4219cb66,4
+np.float32,0x3e79ad7c,0xbf1ce857,4
+np.float32,0x7e8bfd3b,0x4217dfe9,4
+np.float32,0x3f2d2ee9,0xbe2dcec6,4
+np.float32,0x572e04,0xc21862e4,4
+np.float32,0x7f36f8,0xc217bad5,4
+np.float32,0x3f7982fb,0xbc36aaed,4
+np.float32,0x45b019,0xc218c67c,4
+np.float32,0x3f521c46,0xbdafb3e3,4
+np.float32,0x80000001,0x7fc00000,4
+np.float32,0x3f336c81,0xbe1e107f,4
+np.float32,0x3eac92d7,0xbef1d0bb,4
+np.float32,0x47bdfc,0xc218b990,4
+np.float32,0x7f2d94c8,0x421973d1,4
+np.float32,0x7d53ff8d,0x4214fbb6,4
+np.float32,0x3f581e4e,0xbd96a079,4
+np.float32,0x7ddaf20d,0x42163e4e,4
+np.float32,0x3f341d3c,0xbe1c5b4c,4
+np.float32,0x7ef04ba9,0x4218d032,4
+np.float32,0x620ed2,0xc2182e99,4
+np.float32,0x507850,0xc2188682,4
+np.float32,0x7d08f9,0xc217c284,4
+np.float32,0x7f0cf2aa,0x42191734,4
+np.float32,0x3f109a17,0xbe7e04fe,4
+np.float32,0x7f426152,0x4219a625,4
+np.float32,0x7f32d5a3,0x42198113,4
+np.float32,0x2e14b2,0xc2197e6f,4
+np.float32,0x3a5acd,0xc219156a,4
+np.float32,0x50a565,0xc2188589,4
+np.float32,0x5b751c,0xc2184d97,4
+np.float32,0x7e4149f6,0x42173b22,4
+np.float32,0x3dc34bf9,0xbf82a42a,4
+np.float32,0x3d12bc28,0xbfb910d6,4
+np.float32,0x7ebd2584,0x421865c1,4
+np.float32,0x7f6b3375,0x4219faeb,4
+np.float32,0x7fa00000,0x7fe00000,4
+np.float32,0x3f35fe7d,0xbe17bd33,4
+np.float32,0x7db45c87,0x4215e818,4
+np.float32,0x3efff366,0xbe9a2b8d,4
+np.float32,0x3eb331d0,0xbee971a3,4
+np.float32,0x3f259d5f,0xbe41ae2e,4
+np.float32,0x3eab85ec,0xbef32c4a,4
+np.float32,0x7f194b8a,0x42193c8c,4
+np.float32,0x3f11a614,0xbe7acfc7,4
+np.float32,0x5b17,0xc221f16b,4
+np.float32,0x3f33dadc,0xbe1cff4d,4
+np.float32,0x3cda1506,0xbfc9920f,4
+np.float32,0x3f6856f1,0xbd2c8290,4
+np.float32,0x7f3357fb,0x42198257,4
+np.float32,0x7f56f329,0x4219d2e1,4
+np.float32,0x3ef84108,0xbea0f595,4
+np.float32,0x3f72340f,0xbcc51916,4
+np.float32,0x3daf28,0xc218fcbd,4
+np.float32,0x131035,0xc21b06f4,4
+np.float32,0x3f275c3b,0xbe3d0487,4
+np.float32,0x3ef06130,0xbea82069,4
+np.float32,0x3f57f3b0,0xbd974fef,4
+np.float32,0x7f6c4a78,0x4219fcfa,4
+np.float32,0x7e8421d0,0x4217c639,4
+np.float32,0x3f17a479,0xbe68e08e,4
+np.float32,0x7f03774e,0x4218f83b,4
+np.float32,0x441a33,0xc218d0b8,4
+np.float32,0x539158,0xc21875b6,4
+np.float32,0x3e8fcc75,0xbf0d3018,4
+np.float32,0x7ef74130,0x4218dce4,4
+np.float32,0x3ea6f4fa,0xbef92c38,4
+np.float32,0x7f3948ab,0x421990d5,4
+np.float32,0x7db6f8f5,0x4215ee7c,4
+np.float32,0x3ee44a2f,0xbeb399e5,4
+np.float32,0x156c59,0xc21ad30d,4
+np.float32,0x3f21ee53,0xbe4baf16,4
+np.float32,0x3f2c08f4,0xbe30c424,4
+np.float32,0x3f49885c,0xbdd4c6a9,4
+np.float32,0x3eae0b9c,0xbeefed54,4
+np.float32,0x1b5c1f,0xc21a6646,4
+np.float32,0x3e7330e2,0xbf1fd592,4
+np.float32,0x3ebbeb4c,0xbededf82,4
+np.float32,0x427154,0xc218dbb1,4
+np.float32,0x3f6b8b4b,0xbd142498,4
+np.float32,0x8e769,0xc21c5981,4
+np.float32,0x3e9db557,0xbf02ec1c,4
+np.float32,0x3f001bef,0xbe99f019,4
+np.float32,0x3e58b48c,0xbf2ca77a,4
+np.float32,0x3d46c16b,0xbfa8327c,4
+np.float32,0x7eeeb305,0x4218cd3b,4
+np.float32,0x3e3f163d,0xbf3aa446,4
+np.float32,0x3f66c872,0xbd3877d9,4
+np.float32,0x7f7162f8,0x421a0677,4
+np.float32,0x3edca3bc,0xbebb2e28,4
+np.float32,0x3dc1055b,0xbf834afa,4
+np.float32,0x12b16f,0xc21b0fad,4
+np.float32,0x3f733898,0xbcb62e16,4
+np.float32,0x3e617af8,0xbf283db0,4
+np.float32,0x7e86577a,0x4217cd99,4
+np.float32,0x3f0ba3c7,0xbe86c633,4
+np.float32,0x3f4cad25,0xbdc70247,4
+np.float32,0xb6cdf,0xc21bea9f,4
+np.float32,0x3f42971a,0xbdf3f49e,4
+np.float32,0x3e6ccad2,0xbf22cc78,4
+np.float32,0x7f2121b2,0x421952b8,4
+np.float32,0x3f6d3f55,0xbd075366,4
+np.float32,0x3f524f,0xc218f117,4
+np.float32,0x3e95b5d9,0xbf08b56a,4
+np.float32,0x7f6ae47d,0x4219fa56,4
+np.float32,0x267539,0xc219ceda,4
+np.float32,0x3ef72f6d,0xbea1eb2e,4
+np.float32,0x2100b2,0xc21a12e2,4
+np.float32,0x3d9777d1,0xbf90c4e7,4
+np.float32,0x44c6f5,0xc218cc56,4
+np.float32,0x7f2a613d,0x42196b8a,4
+np.float32,0x390a25,0xc2191f8d,4
+np.float32,0x3f1de5ad,0xbe56e703,4
+np.float32,0x2f59ce,0xc2197258,4
+np.float32,0x7f3b12a1,0x4219951b,4
+np.float32,0x3ecb66d4,0xbecd44ca,4
+np.float32,0x7e74ff,0xc217bd7d,4
+np.float32,0x7ed83f78,0x4218a14d,4
+np.float32,0x685994,0xc21812f1,4
+np.float32,0xbf800000,0x7fc00000,4
+np.float32,0x736f47,0xc217e60b,4
+np.float32,0x7f09c371,0x42190d0a,4
+np.float32,0x3f7ca51d,0xbbbbbce0,4
+np.float32,0x7f4b4d3b,0x4219ba1a,4
+np.float32,0x3f6c4471,0xbd0eb076,4
+np.float32,0xd944e,0xc21b9dcf,4
+np.float32,0x7cb06ffc,0x421375cd,4
+np.float32,0x586187,0xc2185cce,4
+np.float32,0x3f3cbf5b,0xbe078911,4
+np.float32,0x3f30b504,0xbe24d983,4
+np.float32,0x3f0a16ba,0xbe8941fd,4
+np.float32,0x5c43b0,0xc21849af,4
+np.float32,0x3dad74f6,0xbf893bd5,4
+np.float32,0x3c586958,0xbff087a6,4
+np.float32,0x3e8307a8,0xbf1786ba,4
+np.float32,0x7dcd1776,0x4216213d,4
+np.float32,0x3f44d107,0xbde9d662,4
+np.float32,0x3e2e6823,0xbf44cbec,4
+np.float32,0x3d87ea27,0xbf96caca,4
+np.float32,0x3e0c715b,0xbf5ce07e,4
+np.float32,0x7ec9cd5a,0x4218828e,4
+np.float32,0x3e26c0b4,0xbf49c93e,4
+np.float32,0x75b94e,0xc217dd50,4
+np.float32,0x3df7b9f5,0xbf6ad7f4,4
+np.float32,0x0,0xff800000,4
+np.float32,0x3f284795,0xbe3a94da,4
+np.float32,0x7ee49092,0x4218b9f0,4
+np.float32,0x7f4c20e0,0x4219bbe8,4
+np.float32,0x3efbbce8,0xbe9ddc4b,4
+np.float32,0x12274a,0xc21b1cb4,4
+np.float32,0x5fa1b1,0xc21839be,4
+np.float32,0x7f0b210e,0x4219116d,4
+np.float32,0x3f67092a,0xbd368545,4
+np.float32,0x3d572721,0xbfa3ca5b,4
+np.float32,0x3f7913ce,0xbc431028,4
+np.float32,0x3b0613,0xc2191059,4
+np.float32,0x3e1d16c0,0xbf506c6f,4
+np.float32,0xab130,0xc21c081d,4
+np.float32,0x3e23ac97,0xbf4bdb9d,4
+np.float32,0x7ef52368,0x4218d911,4
+np.float32,0x7f38e686,0x42198fe9,4
+np.float32,0x3f106a21,0xbe7e9897,4
+np.float32,0x3ecef8d5,0xbec96644,4
+np.float32,0x3ec37e02,0xbed61683,4
+np.float32,0x3efbd063,0xbe9dcb17,4
+np.float32,0x3f318fe3,0xbe22b402,4
+np.float32,0x7e5e5228,0x4217795d,4
+np.float32,0x72a046,0xc217e92c,4
+np.float32,0x7f6f970b,0x421a0324,4
+np.float32,0x3ed871b4,0xbebf72fb,4
+np.float32,0x7a2eaa,0xc217ccc8,4
+np.float32,0x3e819655,0xbf18c1d7,4
+np.float32,0x80800000,0x7fc00000,4
+np.float32,0x7eab0719,0x421838f9,4
+np.float32,0x7f0763cb,0x4219054f,4
+np.float32,0x3f191672,0xbe64a8af,4
+np.float32,0x7d4327,0xc217c1b6,4
+np.float32,0x3f724ba6,0xbcc3bea3,4
+np.float32,0x60fe06,0xc2183375,4
+np.float32,0x48cd59,0xc218b30b,4
+np.float32,0x3f7fec2b,0xb909d3f3,4
+np.float32,0x1c7bb9,0xc21a5460,4
+np.float32,0x24d8a8,0xc219e1e4,4
+np.float32,0x3e727c52,0xbf20283c,4
+np.float32,0x4bc460,0xc218a14a,4
+np.float32,0x63e313,0xc2182661,4
+np.float32,0x7f625581,0x4219e9d4,4
+np.float32,0x3eeb3e77,0xbeacedc0,4
+np.float32,0x7ef27a47,0x4218d437,4
+np.float32,0x27105a,0xc219c7e6,4
+np.float32,0x22a10b,0xc219fd7d,4
+np.float32,0x3f41e907,0xbdf711ab,4
+np.float32,0x7c1fbf95,0x4212155b,4
+np.float32,0x7e5acceb,0x42177244,4
+np.float32,0x3e0892fa,0xbf5ffb83,4
+np.float32,0x3ea0e51d,0xbf00b2c0,4
+np.float32,0x3e56fc29,0xbf2d8a51,4
+np.float32,0x7ee724ed,0x4218beed,4
+np.float32,0x7ebf142b,0x42186a46,4
+np.float32,0x7f6cf35c,0x4219fe37,4
+np.float32,0x3f11abf7,0xbe7abdcd,4
+np.float32,0x588d7a,0xc2185bf1,4
+np.float32,0x3f6e81d2,0xbcfbcf97,4
+np.float32,0x3f1b6be8,0xbe5dee2b,4
+np.float32,0x7f3815e0,0x42198df2,4
+np.float32,0x3f5bfc88,0xbd86d93d,4
+np.float32,0x3f3775d0,0xbe142bbc,4
+np.float32,0x78a958,0xc217d25a,4
+np.float32,0x2ff7c3,0xc2196c96,4
+np.float32,0x4b9c0,0xc21d733c,4
+np.float32,0x3ec025af,0xbed9ecf3,4
+np.float32,0x6443f0,0xc21824b3,4
+np.float32,0x3f754e28,0xbc97d299,4
+np.float32,0x3eaa91d3,0xbef4699d,4
+np.float32,0x3e5f2837,0xbf296478,4
+np.float32,0xe5676,0xc21b85a4,4
+np.float32,0x3f6859f2,0xbd2c6b90,4
+np.float32,0x3f68686b,0xbd2bfcc6,4
+np.float32,0x4b39b8,0xc218a47b,4
+np.float32,0x630ac4,0xc2182a28,4
+np.float32,0x160980,0xc21ac67d,4
+np.float32,0x3ed91c4d,0xbebec3fd,4
+np.float32,0x7ec27b0d,0x4218721f,4
+np.float32,0x3f3c0a5f,0xbe09344b,4
+np.float32,0x3dbff9c1,0xbf839841,4
+np.float32,0x7f0e8ea7,0x42191c40,4
+np.float32,0x3f36b162,0xbe1608e4,4
+np.float32,0x228bb3,0xc219fe90,4
+np.float32,0x2fdd30,0xc2196d8c,4
+np.float32,0x3e8fce8e,0xbf0d2e79,4
+np.float32,0x3f36acc7,0xbe16141a,4
+np.float32,0x7f44b51c,0x4219ab70,4
+np.float32,0x3ec3371c,0xbed66736,4
+np.float32,0x4388a2,0xc218d473,4
+np.float32,0x3f5aa6c3,0xbd8c4344,4
+np.float32,0x7f09fce4,0x42190dc3,4
+np.float32,0x7ed7854a,0x42189fce,4
+np.float32,0x7f4da83a,0x4219bf3a,4
+np.float32,0x3db8da28,0xbf85b25a,4
+np.float32,0x7f449686,0x4219ab2b,4
+np.float32,0x2eb25,0xc21e498c,4
+np.float32,0x3f2bcc08,0xbe3161bd,4
+np.float32,0x36c923,0xc219317b,4
+np.float32,0x3d52a866,0xbfa4f6d2,4
+np.float32,0x3f7d6688,0xbb913e4e,4
+np.float32,0x3f5a6ba4,0xbd8d33e3,4
+np.float32,0x719740,0xc217ed35,4
+np.float32,0x78a472,0xc217d26c,4
+np.float32,0x7ee33d0c,0x4218b759,4
+np.float32,0x7f668c1d,0x4219f208,4
+np.float32,0x3e29c600,0xbf47ca46,4
+np.float32,0x3f3cefc3,0xbe071712,4
+np.float32,0x3e224ebd,0xbf4cca41,4
+np.float32,0x7f1417be,0x42192d31,4
+np.float32,0x7f29d7d5,0x42196a23,4
+np.float32,0x3338ce,0xc2194f65,4
+np.float32,0x2a7897,0xc219a2b6,4
+np.float32,0x3d6bc3d8,0xbf9eb468,4
+np.float32,0x3f6bd7bf,0xbd11e392,4
+np.float32,0x7f6d26bf,0x4219fe98,4
+np.float32,0x3f52d378,0xbdacadb5,4
+np.float32,0x3efac453,0xbe9eb84a,4
+np.float32,0x3f692eb7,0xbd261184,4
+np.float32,0x3f6a0bb5,0xbd1f7ec1,4
+np.float32,0x3f037a49,0xbe942aa8,4
+np.float32,0x3f465bd4,0xbde2e530,4
+np.float32,0x7ef0f47b,0x4218d16a,4
+np.float32,0x637127,0xc218285e,4
+np.float32,0x3f41e511,0xbdf723d7,4
+np.float32,0x7f800000,0x7f800000,4
+np.float32,0x3f3342d5,0xbe1e77d5,4
+np.float32,0x7f57cfe6,0x4219d4a9,4
+np.float32,0x3e4358ed,0xbf3830a7,4
+np.float32,0x3ce25f15,0xbfc77f2b,4
+np.float32,0x7ed057e7,0x421890be,4
+np.float32,0x7ce154d9,0x4213e295,4
+np.float32,0x3ee91984,0xbeaef703,4
+np.float32,0x7e4e919c,0x421758af,4
+np.float32,0x6830e7,0xc218139e,4
+np.float32,0x3f12f08e,0xbe76e328,4
+np.float32,0x7f0a7a32,0x42190f56,4
+np.float32,0x7f38e,0xc21c8bd3,4
+np.float32,0x3e01def9,0xbf6593e3,4
+np.float32,0x3f5c8c6d,0xbd849432,4
+np.float32,0x3eed8747,0xbeaac7a3,4
+np.float32,0x3cadaa0e,0xbfd63b21,4
+np.float32,0x3f7532a9,0xbc996178,4
+np.float32,0x31f3ac,0xc2195a8f,4
+np.float32,0x3f0e0f97,0xbe82f3af,4
+np.float32,0x3f2a1f35,0xbe35bd3f,4
+np.float32,0x3f4547b2,0xbde7bebd,4
+np.float32,0x3f7988a6,0xbc36094c,4
+np.float32,0x74464c,0xc217e2d2,4
+np.float32,0x7f7518be,0x421a0d3f,4
+np.float32,0x7e97fa0a,0x42180473,4
+np.float32,0x584e3a,0xc2185d2f,4
+np.float32,0x3e7291f3,0xbf201e52,4
+np.float32,0xc0a05,0xc21bd359,4
+np.float32,0x3a3177,0xc21916a6,4
+np.float32,0x4f417f,0xc2188d45,4
+np.float32,0x263fce,0xc219d145,4
+np.float32,0x7e1d58,0xc217beb1,4
+np.float32,0x7f056af3,0x4218fec9,4
+np.float32,0x3f21c181,0xbe4c2a3f,4
+np.float32,0x7eca4956,0x4218839f,4
+np.float32,0x3e58afa8,0xbf2ca9fd,4
+np.float32,0x3f40d583,0xbdfc04ef,4
+np.float32,0x7f432fbb,0x4219a7fc,4
+np.float32,0x43aaa4,0xc218d393,4
+np.float32,0x7f2c9b62,0x42197150,4
+np.float32,0x5c3876,0xc21849e5,4
+np.float32,0x7f2034e8,0x42195029,4
+np.float32,0x7e5be772,0x42177481,4
+np.float32,0x80000000,0xff800000,4
+np.float32,0x3f5be03b,0xbd874bb0,4
+np.float32,0x3e32494f,0xbf4259be,4
+np.float32,0x3e1f4671,0xbf4ee30b,4
+np.float32,0x4606cc,0xc218c454,4
+np.float32,0x425cbc,0xc218dc3b,4
+np.float32,0x7dd9b8bf,0x42163bd0,4
+np.float32,0x3f0465d0,0xbe929db7,4
+np.float32,0x3f735077,0xbcb4d0fa,4
+np.float32,0x4d6a43,0xc21897b8,4
+np.float32,0x3e27d600,0xbf4910f5,4
+np.float32,0x3f06e0cc,0xbe8e7d24,4
+np.float32,0x3f3fd064,0xbe005e45,4
+np.float32,0x176f1,0xc21f7c2d,4
+np.float32,0x3eb64e6f,0xbee59d9c,4
+np.float32,0x7f0f075d,0x42191db8,4
+np.float32,0x3f718cbe,0xbcceb621,4
+np.float32,0x3ead7bda,0xbef0a54a,4
+np.float32,0x7f77c1a8,0x421a120c,4
+np.float32,0x3f6a79c5,0xbd1c3afd,4
+np.float32,0x3e992d1f,0xbf062a02,4
+np.float32,0x3e6f6335,0xbf219639,4
+np.float32,0x7f6d9a3e,0x4219ff70,4
+np.float32,0x557ed1,0xc2186b91,4
+np.float32,0x3f13a456,0xbe74c457,4
+np.float32,0x15c2dc,0xc21acc17,4
+np.float32,0x71f36f,0xc217ebcc,4
+np.float32,0x748dea,0xc217e1c1,4
+np.float32,0x7f0f32e0,0x42191e3f,4
+np.float32,0x5b1da8,0xc2184f41,4
+np.float32,0x3d865d3a,0xbf976e11,4
+np.float32,0x3f800000,0x0,4
+np.float32,0x7f67b56d,0x4219f444,4
+np.float32,0x6266a1,0xc2182d0c,4
+np.float32,0x3ec9c5e4,0xbecf0e6b,4
+np.float32,0x6a6a0e,0xc2180a3b,4
+np.float32,0x7e9db6fd,0x421814ef,4
+np.float32,0x3e7458f7,0xbf1f4e88,4
+np.float32,0x3ead8016,0xbef09fdc,4
+np.float32,0x3e263d1c,0xbf4a211e,4
+np.float32,0x7f6b3329,0x4219faeb,4
+np.float32,0x800000,0xc217b818,4
+np.float32,0x3f0654c7,0xbe8f6471,4
+np.float32,0x3f281b71,0xbe3b0990,4
+np.float32,0x7c4c8e,0xc217c524,4
+np.float32,0x7d113a87,0x4214537d,4
+np.float32,0x734b5f,0xc217e696,4
+np.float32,0x7f079d05,0x4219060b,4
+np.float32,0x3ee830b1,0xbeafd58b,4
+np.float32,0x3f1c3b8b,0xbe5b9d96,4
+np.float32,0x3f2bf0c6,0xbe3102aa,4
+np.float32,0x7ddffe22,0x42164871,4
+np.float32,0x3f1e58b4,0xbe55a37f,4
+np.float32,0x5f3edf,0xc2183b8a,4
+np.float32,0x7f1fb6ec,0x42194eca,4
+np.float32,0x3f78718e,0xbc55311e,4
+np.float32,0x3e574b7d,0xbf2d6152,4
+np.float32,0x7eab27c6,0x4218394e,4
+np.float32,0x7f34603c,0x421984e5,4
+np.float32,0x3f3a8b57,0xbe0cc1ca,4
+np.float32,0x3f744181,0xbca7134e,4
+np.float32,0x3f7e3bc4,0xbb45156b,4
+np.float32,0x93ab4,0xc21c498b,4
+np.float32,0x7ed5541e,0x42189b42,4
+np.float32,0x6bf8ec,0xc21803c4,4
+np.float32,0x757395,0xc217de58,4
+np.float32,0x7f177214,0x42193726,4
+np.float32,0x59935f,0xc21856d6,4
+np.float32,0x2cd9ba,0xc2198a78,4
+np.float32,0x3ef6fd5c,0xbea2183c,4
+np.float32,0x3ebb6c63,0xbedf75e0,4
+np.float32,0x7f43272c,0x4219a7e9,4
+np.float32,0x7f42e67d,0x4219a755,4
+np.float32,0x3f3f744f,0xbe0133f6,4
+np.float32,0x7f5fddaa,0x4219e4f4,4
+np.float32,0x3dc9874f,0xbf80e529,4
+np.float32,0x3f2efe64,0xbe292ec8,4
+np.float32,0x3e0406a6,0xbf63bf7c,4
+np.float32,0x3cdbb0aa,0xbfc92984,4
+np.float32,0x3e6597e7,0xbf263b30,4
+np.float32,0x3f0c1153,0xbe861807,4
+np.float32,0x7fce16,0xc217b8c6,4
+np.float32,0x3f5f4e5f,0xbd730dc6,4
+np.float32,0x3ed41ffa,0xbec3ee69,4
+np.float32,0x3f216c78,0xbe4d1446,4
+np.float32,0x3f123ed7,0xbe78fe4b,4
+np.float32,0x7f7e0ca9,0x421a1d34,4
+np.float32,0x7e318af4,0x42171558,4
+np.float32,0x7f1e1659,0x42194a3d,4
+np.float32,0x34d12a,0xc21941c2,4
+np.float32,0x3d9566ad,0xbf918870,4
+np.float32,0x3e799a47,0xbf1cf0e5,4
+np.float32,0x3e89dd6f,0xbf11df76,4
+np.float32,0x32f0d3,0xc21951d8,4
+np.float32,0x7e89d17e,0x4217d8f6,4
+np.float32,0x1f3b38,0xc21a2b6b,4
+np.float32,0x7ee9e060,0x4218c427,4
+np.float32,0x31a673,0xc2195d41,4
+np.float32,0x5180f1,0xc21880d5,4
+np.float32,0x3cd36f,0xc21902f8,4
+np.float32,0x3bb63004,0xc01050cb,4
+np.float32,0x3e8ee9d1,0xbf0ddfde,4
+np.float32,0x3d2a7da3,0xbfb0b970,4
+np.float32,0x3ea58107,0xbefb1dc3,4
+np.float32,0x7f6760b0,0x4219f3a2,4
+np.float32,0x7f7f9e08,0x421a1ff0,4
+np.float32,0x37e7f1,0xc219287b,4
+np.float32,0x3ef7eb53,0xbea14267,4
+np.float32,0x3e2eb581,0xbf449aa5,4
+np.float32,0x3da7671c,0xbf8b3568,4
+np.float32,0x7af36f7b,0x420f33ee,4
+np.float32,0x3eb3602c,0xbee93823,4
+np.float32,0x3f68bcff,0xbd2975de,4
+np.float32,0x3ea7cefb,0xbef80a9d,4
+np.float32,0x3f329689,0xbe202414,4
+np.float32,0x7f0c7c80,0x421915be,4
+np.float32,0x7f4739b8,0x4219b118,4
+np.float32,0x73af58,0xc217e515,4
+np.float32,0x7f13eb2a,0x42192cab,4
+np.float32,0x30f2d9,0xc2196395,4
+np.float32,0x7ea7066c,0x42182e71,4
+np.float32,0x669fec,0xc2181a5b,4
+np.float32,0x3f7d6876,0xbb90d1ef,4
+np.float32,0x3f08a4ef,0xbe8b9897,4
+np.float32,0x7f2a906c,0x42196c05,4
+np.float32,0x3ed3ca42,0xbec44856,4
+np.float32,0x9d27,0xc220fee2,4
+np.float32,0x3e4508a1,0xbf373c03,4
+np.float32,0x3e41f8de,0xbf38f9bb,4
+np.float32,0x3e912714,0xbf0c255b,4
+np.float32,0xff800000,0x7fc00000,4
+np.float32,0x7eefd13d,0x4218cf4f,4
+np.float32,0x3f491674,0xbdd6bded,4
+np.float32,0x3ef49512,0xbea445c9,4
+np.float32,0x3f045b79,0xbe92af15,4
+np.float32,0x3ef6c412,0xbea24bd5,4
+np.float32,0x3e6f3c28,0xbf21a85d,4
+np.float32,0x3ef71839,0xbea2000e,4
+np.float32,0x1,0xc23369f4,4
+np.float32,0x3e3fcfe4,0xbf3a3876,4
+np.float32,0x3e9d7a65,0xbf0315b2,4
+np.float32,0x20b7c4,0xc21a16bd,4
+np.float32,0x7f707b10,0x421a04cb,4
+np.float32,0x7fc00000,0x7fc00000,4
+np.float32,0x3f285ebd,0xbe3a57ac,4
+np.float32,0x74c9ea,0xc217e0dc,4
+np.float32,0x3f6501f2,0xbd4634ab,4
+np.float32,0x3f248959,0xbe4495cc,4
+np.float32,0x7e915ff0,0x4217f0b3,4
+np.float32,0x7edbb910,0x4218a864,4
+np.float32,0x3f7042dd,0xbce1bddb,4
+np.float32,0x6f08c9,0xc217f754,4
+np.float32,0x7f423993,0x4219a5ca,4
+np.float32,0x3f125704,0xbe78b4cd,4
+np.float32,0x7ef7f5ae,0x4218de28,4
+np.float32,0x3f2dd940,0xbe2c1a33,4
+np.float32,0x3f1ca78e,0xbe5a6a8b,4
+np.float32,0x244863,0xc219e8be,4
+np.float32,0x3f2614fe,0xbe406d6b,4
+np.float32,0x3e75e7a3,0xbf1e99b5,4
+np.float32,0x2bdd6e,0xc2199459,4
+np.float32,0x7e49e279,0x42174e7b,4
+np.float32,0x3e3bb09a,0xbf3ca2cd,4
+np.float32,0x649f06,0xc2182320,4
+np.float32,0x7f4a44e1,0x4219b7d6,4
+np.float32,0x400473,0xc218ec3a,4
+np.float32,0x3edb19ad,0xbebcbcad,4
+np.float32,0x3d8ee956,0xbf94006c,4
+np.float32,0x7e91c603,0x4217f1eb,4
+np.float32,0x221384,0xc21a04a6,4
+np.float32,0x7f7dd660,0x421a1cd5,4
+np.float32,0x7ef34609,0x4218d5ac,4
+np.float32,0x7f5ed529,0x4219e2e5,4
+np.float32,0x7f1bf685,0x42194438,4
+np.float32,0x3cdd094a,0xbfc8d294,4
+np.float32,0x7e87fc8e,0x4217d303,4
+np.float32,0x7f53d971,0x4219cc6b,4
+np.float32,0xabc8b,0xc21c0646,4
+np.float32,0x7f5011e6,0x4219c46a,4
+np.float32,0x7e460638,0x421745e5,4
+np.float32,0xa8126,0xc21c0ffd,4
+np.float32,0x3eec2a66,0xbeac0f2d,4
+np.float32,0x3f3a1213,0xbe0de340,4
+np.float32,0x7f5908db,0x4219d72c,4
+np.float32,0x7e0ad3c5,0x4216a7f3,4
+np.float32,0x3f2de40e,0xbe2bfe90,4
+np.float32,0x3d0463c5,0xbfbec8e4,4
+np.float32,0x7c7cde0b,0x4212e19a,4
+np.float32,0x74c24f,0xc217e0f9,4
+np.float32,0x3f14b4cb,0xbe71929b,4
+np.float32,0x3e94e192,0xbf09537f,4
+np.float32,0x3eebde71,0xbeac56bd,4
+np.float32,0x3f65e413,0xbd3f5b8a,4
+np.float32,0x7e109199,0x4216b9f9,4
+np.float32,0x3f22f5d0,0xbe48ddc0,4
+np.float32,0x3e22d3bc,0xbf4c6f4d,4
+np.float32,0x3f7a812f,0xbc1a680b,4
+np.float32,0x3f67f361,0xbd2f7d7c,4
+np.float32,0x3f1caa63,0xbe5a6281,4
+np.float32,0x3f306fde,0xbe2587ab,4
+np.float32,0x3e8df9d3,0xbf0e9b2f,4
+np.float32,0x3eaaccc4,0xbef41cd4,4
+np.float32,0x7f3f65ec,0x42199f45,4
+np.float32,0x3dc706e0,0xbf8196ec,4
+np.float32,0x3e14eaba,0xbf565cf6,4
+np.float32,0xcc60,0xc2208a09,4
+np.float32,0x358447,0xc2193be7,4
+np.float32,0x3dcecade,0xbf7eec70,4
+np.float32,0x3f20b4f8,0xbe4f0ef0,4
+np.float32,0x7e7c979f,0x4217b222,4
+np.float32,0x7f2387b9,0x4219594a,4
+np.float32,0x3f6f6e5c,0xbcee0e05,4
+np.float32,0x7f19ad81,0x42193da8,4
+np.float32,0x5635e1,0xc21867dd,4
+np.float32,0x4c5e97,0xc2189dc4,4
+np.float32,0x7f35f97f,0x421988d1,4
+np.float32,0x7f685224,0x4219f571,4
+np.float32,0x3eca0616,0xbecec7b8,4
+np.float32,0x3f436d0d,0xbdf024ca,4
+np.float32,0x12a97d,0xc21b106a,4
+np.float32,0x7f0fdc93,0x4219204d,4
+np.float32,0x3debfb42,0xbf703e65,4
+np.float32,0x3c6c54d2,0xbfeba291,4
+np.float32,0x7e5d7491,0x421777a1,4
+np.float32,0x3f4bd2f0,0xbdcab87d,4
+np.float32,0x3f7517f4,0xbc9ae510,4
+np.float32,0x3f71a59a,0xbccd480d,4
+np.float32,0x3f514653,0xbdb33f61,4
+np.float32,0x3f4e6ea4,0xbdbf694b,4
+np.float32,0x3eadadec,0xbef06526,4
+np.float32,0x3f3b41c1,0xbe0b0fbf,4
+np.float32,0xc35a,0xc2209e1e,4
+np.float32,0x384982,0xc2192575,4
+np.float32,0x3464c3,0xc2194556,4
+np.float32,0x7f5e20d9,0x4219e17d,4
+np.float32,0x3ea18b62,0xbf004016,4
+np.float32,0x63a02b,0xc218278c,4
+np.float32,0x7ef547ba,0x4218d953,4
+np.float32,0x3f2496fb,0xbe4470f4,4
+np.float32,0x7ea0c8c6,0x42181d81,4
+np.float32,0x3f42ba60,0xbdf35372,4
+np.float32,0x7e40d9,0xc217be34,4
+np.float32,0x3e95883b,0xbf08d750,4
+np.float32,0x3e0cddf3,0xbf5c8aa8,4
+np.float32,0x3f2305d5,0xbe48b20a,4
+np.float32,0x7f0d0941,0x4219177b,4
+np.float32,0x3f7b98d3,0xbbf6e477,4
+np.float32,0x3f687cdc,0xbd2b6057,4
+np.float32,0x3f42ce91,0xbdf2f73d,4
+np.float32,0x3ee00fc0,0xbeb7c217,4
+np.float32,0x7f3d483a,0x42199a53,4
+np.float32,0x3e1e08eb,0xbf4fc18d,4
+np.float32,0x7e202ff5,0x4216e798,4
+np.float32,0x582898,0xc2185ded,4
+np.float32,0x3e3552b1,0xbf40790c,4
+np.float32,0x3d3f7c87,0xbfaa44b6,4
+np.float32,0x669d8e,0xc2181a65,4
+np.float32,0x3f0e21b4,0xbe82d757,4
+np.float32,0x686f95,0xc2181293,4
+np.float32,0x3f48367f,0xbdda9ead,4
+np.float32,0x3dc27802,0xbf82e0a0,4
+np.float32,0x3f6ac40c,0xbd1a07d4,4
+np.float32,0x3bba6d,0xc2190b12,4
+np.float32,0x3ec7b6b0,0xbed15665,4
+np.float32,0x3f1f9ca4,0xbe521955,4
+np.float32,0x3ef2f147,0xbea5c4b8,4
+np.float32,0x7c65f769,0x4212b762,4
+np.float32,0x7e98e162,0x42180716,4
+np.float32,0x3f0f0c09,0xbe8169ea,4
+np.float32,0x3d67f03b,0xbf9f9d48,4
+np.float32,0x7f3751e4,0x42198c18,4
+np.float32,0x7f1fac61,0x42194ead,4
+np.float32,0x3e9b698b,0xbf048d89,4
+np.float32,0x7e66507b,0x42178913,4
+np.float32,0x7f5cb680,0x4219dea5,4
+np.float32,0x234700,0xc219f53e,4
+np.float32,0x3d9984ad,0xbf900591,4
+np.float32,0x3f33a3f2,0xbe1d872a,4
+np.float32,0x3eaf52b6,0xbeee4cf4,4
+np.float32,0x7f078930,0x421905ca,4
+np.float32,0x3f083b39,0xbe8c44df,4
+np.float32,0x3e3823f8,0xbf3ec231,4
+np.float32,0x3eef6f5d,0xbea9008c,4
+np.float32,0x6145e1,0xc218322c,4
+np.float32,0x16d9ae,0xc21ab65f,4
+np.float32,0x7e543376,0x421764a5,4
+np.float32,0x3ef77ccb,0xbea1a5a0,4
+np.float32,0x3f4a443f,0xbdd18af5,4
+np.float32,0x8f209,0xc21c5770,4
+np.float32,0x3ecac126,0xbecdfa33,4
+np.float32,0x3e8662f9,0xbf14b6c7,4
+np.float32,0x23759a,0xc219f2f4,4
+np.float32,0xf256d,0xc21b6d3f,4
+np.float32,0x3f579f93,0xbd98aaa2,4
+np.float32,0x3ed4cc8e,0xbec339cb,4
+np.float32,0x3ed25400,0xbec5d2a1,4
+np.float32,0x3ed6f8ba,0xbec0f795,4
+np.float32,0x7f36efd9,0x42198b2a,4
+np.float32,0x7f5169dd,0x4219c746,4
+np.float32,0x7de18a20,0x42164b80,4
+np.float32,0x3e8de526,0xbf0eab61,4
+np.float32,0x3de0cbcd,0xbf75a47e,4
+np.float32,0xe265f,0xc21b8b82,4
+np.float32,0x3df3cdbd,0xbf6c9e40,4
+np.float32,0x3f38a25a,0xbe115589,4
+np.float32,0x7f01f2c0,0x4218f311,4
+np.float32,0x3da7d5f4,0xbf8b10a5,4
+np.float32,0x4d4fe8,0xc2189850,4
+np.float32,0x3cc96d9d,0xbfcdfc8d,4
+np.float32,0x259a88,0xc219d8d7,4
+np.float32,0x7f1d5102,0x42194810,4
+np.float32,0x7e17ca91,0x4216cfa7,4
+np.float32,0x3f73d110,0xbcad7a8f,4
+np.float32,0x3f009383,0xbe9920ed,4
+np.float32,0x7e22af,0xc217be9f,4
+np.float32,0x3f7de2ce,0xbb6c0394,4
+np.float32,0x3edd0cd2,0xbebac45a,4
+np.float32,0x3ec9b5c1,0xbecf2035,4
+np.float32,0x3168c5,0xc2195f6b,4
+np.float32,0x3e935522,0xbf0a7d18,4
+np.float32,0x3e494077,0xbf34e120,4
+np.float32,0x3f52ed06,0xbdac41ec,4
+np.float32,0x3f73d51e,0xbcad3f65,4
+np.float32,0x3f03d453,0xbe939295,4
+np.float32,0x7ef4ee68,0x4218d8b1,4
+np.float32,0x3ed0e2,0xc218f4a7,4
+np.float32,0x4efab8,0xc2188ed3,4
+np.float32,0x3dbd5632,0xbf845d3b,4
+np.float32,0x7eecad4f,0x4218c972,4
+np.float32,0x9d636,0xc21c2d32,4
+np.float32,0x3e5f3b6b,0xbf295ae7,4
+np.float32,0x7f4932df,0x4219b57a,4
+np.float32,0x4b59b5,0xc218a3be,4
+np.float32,0x3e5de97f,0xbf2a03b4,4
+np.float32,0x3f1c479d,0xbe5b7b3c,4
+np.float32,0x3f42e7e4,0xbdf283a5,4
+np.float32,0x2445,0xc2238af2,4
+np.float32,0x7aa71b43,0x420e8c9e,4
+np.float32,0x3ede6e4e,0xbeb961e1,4
+np.float32,0x7f05dd3b,0x42190045,4
+np.float32,0x3ef5b55c,0xbea3404b,4
+np.float32,0x7f738624,0x421a0a62,4
+np.float32,0x3e7d50a1,0xbf1b4cb4,4
+np.float32,0x3f44cc4a,0xbde9ebcc,4
+np.float32,0x7e1a7b0b,0x4216d777,4
+np.float32,0x3f1d9868,0xbe57c0da,4
+np.float32,0x1ebee2,0xc21a3263,4
+np.float32,0x31685f,0xc2195f6e,4
+np.float32,0x368a8e,0xc2193379,4
+np.float32,0xa9847,0xc21c0c2e,4
+np.float32,0x3bd3b3,0xc2190a56,4
+np.float32,0x3961e4,0xc2191ce3,4
+np.float32,0x7e13a243,0x4216c34e,4
+np.float32,0x7f7b1790,0x421a17ff,4
+np.float32,0x3e55f020,0xbf2e1545,4
+np.float32,0x3f513861,0xbdb37aa8,4
+np.float32,0x3dd9e754,0xbf791ad2,4
+np.float32,0x5e8d86,0xc2183ec9,4
+np.float32,0x26b796,0xc219cbdd,4
+np.float32,0x429daa,0xc218da89,4
+np.float32,0x3f477caa,0xbdddd9ba,4
+np.float32,0x3f0e5114,0xbe828d45,4
+np.float32,0x3f54f362,0xbda3c286,4
+np.float32,0x6eac1c,0xc217f8c8,4
+np.float32,0x3f04c479,0xbe91fef5,4
+np.float32,0x3e993765,0xbf06228e,4
+np.float32,0x3eafd99f,0xbeeda21b,4
+np.float32,0x3f2a759e,0xbe34db96,4
+np.float32,0x3f05adfb,0xbe907937,4
+np.float32,0x3f6e2dfc,0xbd005980,4
+np.float32,0x3f2f2daa,0xbe28b6b5,4
+np.float32,0x15e746,0xc21ac931,4
+np.float32,0x7d34ca26,0x4214b4e5,4
+np.float32,0x7ebd175c,0x4218659f,4
+np.float32,0x7f1ed26b,0x42194c4c,4
+np.float32,0x2588b,0xc21eaab0,4
+np.float32,0x3f0065e3,0xbe996fe2,4
+np.float32,0x3f610376,0xbd658122,4
+np.float32,0x451995,0xc218ca41,4
+np.float32,0x70e083,0xc217f002,4
+np.float32,0x7e19821a,0x4216d4a8,4
+np.float32,0x3e7cd9a0,0xbf1b80fb,4
+np.float32,0x7f1a8f18,0x42194033,4
+np.float32,0x3f008fee,0xbe99271f,4
+np.float32,0xff7fffff,0x7fc00000,4
+np.float32,0x7f31d826,0x42197e9b,4
+np.float32,0x3f18cf12,0xbe657838,4
+np.float32,0x3e5c1bc7,0xbf2aebf9,4
+np.float32,0x3e3d3993,0xbf3bbaf8,4
+np.float32,0x68457a,0xc2181347,4
+np.float32,0x7ddf7561,0x42164761,4
+np.float32,0x7f47341b,0x4219b10c,4
+np.float32,0x4d3ecd,0xc21898b2,4
+np.float32,0x7f43dee8,0x4219a98b,4
+np.float32,0x3f0def7c,0xbe8325f5,4
+np.float32,0x3d5a551f,0xbfa2f994,4
+np.float32,0x7ed26602,0x4218951b,4
+np.float32,0x3ee7fa5b,0xbeb0099a,4
+np.float32,0x7ef74ea8,0x4218dcfc,4
+np.float32,0x6a3bb2,0xc2180afd,4
+np.float32,0x7f4c1e6e,0x4219bbe3,4
+np.float32,0x3e26f625,0xbf49a5a2,4
+np.float32,0xb8482,0xc21be70b,4
+np.float32,0x3f32f077,0xbe1f445b,4
+np.float32,0x7dd694b6,0x4216355a,4
+np.float32,0x7f3d62fd,0x42199a92,4
+np.float32,0x3f48e41a,0xbdd79cbf,4
+np.float32,0x338fc3,0xc2194c75,4
+np.float32,0x3e8355f0,0xbf174462,4
+np.float32,0x7f487e83,0x4219b3eb,4
+np.float32,0x2227f7,0xc21a039b,4
+np.float32,0x7e4383dd,0x4217403a,4
+np.float32,0x52d28b,0xc21879b2,4
+np.float32,0x12472c,0xc21b19a9,4
+np.float32,0x353530,0xc2193e7b,4
+np.float32,0x3f4e4728,0xbdc0137a,4
+np.float32,0x3bf169,0xc2190979,4
+np.float32,0x3eb3ee2e,0xbee8885f,4
+np.float32,0x3f03e3c0,0xbe937892,4
+np.float32,0x3c9f8408,0xbfdaf47f,4
+np.float32,0x40e792,0xc218e61b,4
+np.float32,0x5a6b29,0xc21852ab,4
+np.float32,0x7f268b83,0x4219616a,4
+np.float32,0x3ee25997,0xbeb57fa7,4
+np.float32,0x3f175324,0xbe69cf53,4
+np.float32,0x3f781d91,0xbc5e9827,4
+np.float32,0x7dba5210,0x4215f68c,4
+np.float32,0x7f1e66,0xc217bb2b,4
+np.float32,0x7f7fffff,0x421a209b,4
+np.float32,0x3f646202,0xbd4b10b8,4
+np.float32,0x575248,0xc218622b,4
+np.float32,0x7c67faa1,0x4212bb42,4
+np.float32,0x7f1683f2,0x42193469,4
+np.float32,0x1a3864,0xc21a7931,4
+np.float32,0x7f30ad75,0x42197bae,4
+np.float32,0x7f1c9d05,0x42194612,4
+np.float32,0x3e791795,0xbf1d2b2c,4
+np.float32,0x7e9ebc19,0x421817cd,4
+np.float32,0x4999b7,0xc218ae31,4
+np.float32,0x3d130e2c,0xbfb8f1cc,4
+np.float32,0x3f7e436f,0xbb41bb07,4
+np.float32,0x3ee00241,0xbeb7cf7d,4
+np.float32,0x7e496181,0x42174d5f,4
+np.float32,0x7efe58be,0x4218e978,4
+np.float32,0x3f5e5b0c,0xbd7aa43f,4
+np.float32,0x7ee4c6ab,0x4218ba59,4
+np.float32,0x3f6da8c6,0xbd043d7e,4
+np.float32,0x3e3e6e0f,0xbf3b064b,4
+np.float32,0x3f0143b3,0xbe97f10a,4
+np.float32,0x79170f,0xc217d0c6,4
+np.float32,0x517645,0xc218810f,4
+np.float32,0x3f1f9960,0xbe52226e,4
+np.float32,0x2a8df9,0xc219a1d6,4
+np.float32,0x2300a6,0xc219f8b8,4
+np.float32,0x3ee31355,0xbeb4c97a,4
+np.float32,0x3f20b05f,0xbe4f1ba9,4
+np.float32,0x3ee64249,0xbeb1b0ff,4
+np.float32,0x3a94b7,0xc21913b2,4
+np.float32,0x7ef7ef43,0x4218de1d,4
+np.float32,0x3f1abb5d,0xbe5fe872,4
+np.float32,0x7f65360b,0x4219ef72,4
+np.float32,0x3d315d,0xc219004c,4
+np.float32,0x3f26bbc4,0xbe3eafb9,4
+np.float32,0x3ee8c6e9,0xbeaf45de,4
+np.float32,0x7e5f1452,0x42177ae1,4
+np.float32,0x3f32e777,0xbe1f5aba,4
+np.float32,0x4d39a1,0xc21898d0,4
+np.float32,0x3e59ad15,0xbf2c2841,4
+np.float32,0x3f4be746,0xbdca5fc4,4
+np.float32,0x72e4fd,0xc217e821,4
+np.float32,0x1af0b8,0xc21a6d25,4
+np.float32,0x3f311147,0xbe23f18d,4
+np.float32,0x3f1ecebb,0xbe545880,4
+np.float32,0x7e90d293,0x4217ef02,4
+np.float32,0x3e3b366a,0xbf3ceb46,4
+np.float32,0x3f133239,0xbe761c96,4
+np.float32,0x7541ab,0xc217df15,4
+np.float32,0x3d8c8275,0xbf94f1a1,4
+np.float32,0x483b92,0xc218b689,4
+np.float32,0x3eb0dbed,0xbeec5c6b,4
+np.float32,0x3f00c676,0xbe98c8e2,4
+np.float32,0x3f445ac2,0xbdebed7c,4
+np.float32,0x3d2af4,0xc219007a,4
+np.float32,0x7f196ee1,0x42193cf2,4
+np.float32,0x290c94,0xc219b1db,4
+np.float32,0x3f5dbdc9,0xbd7f9019,4
+np.float32,0x3e80c62e,0xbf1974fc,4
+np.float32,0x3ec9ed2c,0xbecee326,4
+np.float32,0x7f469d60,0x4219afbb,4
+np.float32,0x3f698413,0xbd2386ce,4
+np.float32,0x42163f,0xc218de14,4
+np.float32,0x67a554,0xc21815f4,4
+np.float32,0x3f4bff74,0xbdc9f651,4
+np.float32,0x16a743,0xc21aba39,4
+np.float32,0x2eb8b0,0xc219784b,4
+np.float32,0x3eed9be1,0xbeaab45b,4
+np.float64,0x7fe0d76873e1aed0,0x40733f9d783bad7a,2
+np.float64,0x3fe22626bb244c4d,0xbfcf86a59864eea2,2
+np.float64,0x7f874113d02e8227,0x407324f54c4015b8,2
+np.float64,0x3fe40a46a9e8148d,0xbfca0411f533fcb9,2
+np.float64,0x3fd03932eea07266,0xbfe312bc9cf5649e,2
+np.float64,0x7fee5d2a1b3cba53,0x407343b5f56367a0,2
+np.float64,0x3feb7bda4a76f7b5,0xbfb0ea2c6edc784a,2
+np.float64,0x3fd6cd831a2d9b06,0xbfdcaf2e1a5faf51,2
+np.float64,0x98324e273064a,0xc0733e0e4c6d11c6,2
+np.float64,0x7fe1dd63b363bac6,0x4073400667c405c3,2
+np.float64,0x3fec5971f178b2e4,0xbfaaef32a7d94563,2
+np.float64,0x17abc07e2f579,0xc0734afca4da721e,2
+np.float64,0x3feec6ab5cfd8d57,0xbf9157f3545a8235,2
+np.float64,0x3fe3ae9622a75d2c,0xbfcb04b5ad254581,2
+np.float64,0x7fea73d854b4e7b0,0x407342c0a548f4c5,2
+np.float64,0x7fe29babf4653757,0x4073404eeb5fe714,2
+np.float64,0x7fd3a55d85a74aba,0x40733bde72e86c27,2
+np.float64,0x3fe83ce305f079c6,0xbfbee3511e85e0f1,2
+np.float64,0x3fd72087ea2e4110,0xbfdc4ab30802d7c2,2
+np.float64,0x7feb54ddab76a9ba,0x407342facb6f3ede,2
+np.float64,0xc57e34a18afd,0xc0734f82ec815baa,2
+np.float64,0x7a8cb97ef5198,0xc0733f8fb3777a67,2
+np.float64,0x7fe801032c300205,0x40734213dbe4eda9,2
+np.float64,0x3aefb1f475df7,0xc07344a5f08a0584,2
+np.float64,0x7fee85f1dd3d0be3,0x407343bf4441c2a7,2
+np.float64,0x3fdc7f1055b8fe21,0xbfd67d300630e893,2
+np.float64,0xe8ecddb3d1d9c,0xc0733b194f18f466,2
+np.float64,0x3fdf2b23c73e5648,0xbfd3ff6872c1f887,2
+np.float64,0x3fdba4aef2b7495e,0xbfd7557205e18b7b,2
+np.float64,0x3fe2ac34c6e5586a,0xbfcdf1dac69bfa08,2
+np.float64,0x3fc9852628330a4c,0xbfe66914f0fb9b0a,2
+np.float64,0x7fda211acf344235,0x40733dd9c2177aeb,2
+np.float64,0x3fe9420eb432841d,0xbfba4dd969a32575,2
+np.float64,0xb2f9d1ed65f3a,0xc0733cedfb6527ff,2
+np.float64,0x3fe9768a68f2ed15,0xbfb967c39c35c435,2
+np.float64,0x7fe8268462b04d08,0x4073421eaed32734,2
+np.float64,0x3fcf331f063e663e,0xbfe39e2f4b427ca9,2
+np.float64,0x7fd4eb9e2b29d73b,0x40733c4e4141418d,2
+np.float64,0x7fd2bba658a5774c,0x40733b89cd53d5b1,2
+np.float64,0x3fdfdf04913fbe09,0xbfd360c7fd9d251b,2
+np.float64,0x3fca5bfd0534b7fa,0xbfe5f5f844b2b20c,2
+np.float64,0x3feacd5032f59aa0,0xbfb3b5234ba8bf7b,2
+np.float64,0x7fe9241cec724839,0x4073426631362cec,2
+np.float64,0x3fe57aca20eaf594,0xbfc628e3ac2c6387,2
+np.float64,0x3fec6553ca38caa8,0xbfaa921368d3b222,2
+np.float64,0x3fe1e9676563d2cf,0xbfd020f866ba9b24,2
+np.float64,0x3fd5590667aab20d,0xbfde8458af5a4fd6,2
+np.float64,0x3fdf7528f43eea52,0xbfd3bdb438d6ba5e,2
+np.float64,0xb8dddc5571bbc,0xc0733cb4601e5bb2,2
+np.float64,0xe6d4e1fbcda9c,0xc0733b295ef4a4ba,2
+np.float64,0x3fe7019d962e033b,0xbfc257c0a6e8de16,2
+np.float64,0x3f94ef585029deb1,0xbffb07e5dfb0e936,2
+np.float64,0x7fc863b08030c760,0x4073388e28d7b354,2
+np.float64,0xf684443bed089,0xc0733ab46cfbff9a,2
+np.float64,0x7fe00e901d201d1f,0x40733f489c05a0f0,2
+np.float64,0x9e5c0a273cb82,0xc0733dc7af797e19,2
+np.float64,0x7fe49734f0692e69,0x4073410303680df0,2
+np.float64,0x7fb7b584442f6b08,0x4073338acff72502,2
+np.float64,0x3f99984c30333098,0xbff9a2642a6ed8cc,2
+np.float64,0x7fea2fcda8745f9a,0x407342aeae7f5e64,2
+np.float64,0xe580caadcb01a,0xc0733b33a3639217,2
+np.float64,0x1899ab3831336,0xc0734ab823729417,2
+np.float64,0x39bd4c76737aa,0xc07344ca6fac6d21,2
+np.float64,0xd755b2dbaeab7,0xc0733ba4fe19f2cc,2
+np.float64,0x3f952bebf82a57d8,0xbffaf3e7749c2512,2
+np.float64,0x3fe62ee5d72c5dcc,0xbfc45e3cb5baad08,2
+np.float64,0xb1264a7d624ca,0xc0733d003a1d0a66,2
+np.float64,0x3fc4bd1bcd297a38,0xbfe94b3058345c46,2
+np.float64,0x7fc5758bb32aeb16,0x407337aa7805497f,2
+np.float64,0x3fb0edcaf421db96,0xbff2dfb09c405294,2
+np.float64,0x3fd240fceaa481fa,0xbfe16f356bb36134,2
+np.float64,0x38c0c62a7181a,0xc07344e916d1e9b7,2
+np.float64,0x3fe98f2b3bf31e56,0xbfb8fc6eb622a820,2
+np.float64,0x3fe2bdf99c257bf3,0xbfcdbd0dbbae4d0b,2
+np.float64,0xce4b390d9c967,0xc0733bf14ada3134,2
+np.float64,0x3fd2ad607ba55ac1,0xbfe11da15167b37b,2
+np.float64,0x3fd8154f11b02a9e,0xbfdb2a6fabb9a026,2
+np.float64,0xf37849fde6f09,0xc0733aca8c64344c,2
+np.float64,0x3fcbae43b2375c87,0xbfe547f267c8e570,2
+np.float64,0x3fcd46fd7d3a8dfb,0xbfe48070f7232929,2
+np.float64,0x7fcdd245273ba489,0x407339f3d907b101,2
+np.float64,0x3fac75cd0838eb9a,0xbff4149d177b057b,2
+np.float64,0x7fe8ff3fd7f1fe7f,0x4073425bf968ba6f,2
+np.float64,0x7febadaa4df75b54,0x407343113a91f0e9,2
+np.float64,0x7fd5e4649c2bc8c8,0x40733c9f0620b065,2
+np.float64,0x903429812069,0xc07351b255e27887,2
+np.float64,0x3fe1d8c51c63b18a,0xbfd03ad448c1f1ee,2
+np.float64,0x3fe573ea646ae7d5,0xbfc63ab0bfd0e601,2
+np.float64,0x3f83b3f3c02767e8,0xc00022677e310649,2
+np.float64,0x7fd15d1582a2ba2a,0x40733b02c469c1d6,2
+np.float64,0x3fe63d3dabec7a7b,0xbfc43a56ee97b27e,2
+np.float64,0x7fe3a452fb2748a5,0x407340af1973c228,2
+np.float64,0x3fafac6b303f58d6,0xbff35651703ae9f2,2
+np.float64,0x513ddd24a27bc,0xc073426af96aaebb,2
+np.float64,0x3fef152246be2a45,0xbf89df79d7719282,2
+np.float64,0x3fe8c923e9f19248,0xbfbc67228e8db5f6,2
+np.float64,0x3fd6e2325fadc465,0xbfdc9602fb0b950f,2
+np.float64,0x3fe9616815f2c2d0,0xbfb9c4311a3b415b,2
+np.float64,0x2fe4e4005fc9d,0xc0734616fe294395,2
+np.float64,0x3fbceb02dc39d606,0xbfee4e68f1c7886f,2
+np.float64,0x7fe35e843d66bd07,0x407340963b066ad6,2
+np.float64,0x7fecd6c648f9ad8c,0x4073435a4c176e94,2
+np.float64,0x7fcbd72bf437ae57,0x4073397994b85665,2
+np.float64,0x3feff6443b3fec88,0xbf40eb380d5318ae,2
+np.float64,0x7fb9373cf6326e79,0x407333f869edef08,2
+np.float64,0x63790d9cc6f22,0xc0734102d4793cda,2
+np.float64,0x3f9de6efe83bcde0,0xbff88db6f0a6b56e,2
+np.float64,0xe00f2dc1c01f,0xc0734ea26ab84ff2,2
+np.float64,0xd7a9aa8baf536,0xc0733ba248fa33ab,2
+np.float64,0x3fee0089ea7c0114,0xbf9cab936ac31c4b,2
+np.float64,0x3fdec0d51cbd81aa,0xbfd45ed8878c5860,2
+np.float64,0x7fe91bf5e9f237eb,0x40734263f005081d,2
+np.float64,0x34ea7d1e69d50,0xc07345659dde7444,2
+np.float64,0x7fe67321a3ace642,0x4073419cc8130d95,2
+np.float64,0x9d1aeb2f3a35e,0xc0733dd5d506425c,2
+np.float64,0x7fbb01df003603bd,0x4073347282f1391d,2
+np.float64,0x42b945b285729,0xc07343c92d1bbef9,2
+np.float64,0x7fc92799b8324f32,0x407338c51e3f0733,2
+np.float64,0x3fe119c19b223383,0xbfd16ab707f65686,2
+np.float64,0x3fc9f9ac5333f359,0xbfe62a2f91ec0dff,2
+np.float64,0x3fd820d5a8b041ab,0xbfdb1d2586fe7b18,2
+np.float64,0x10000000000000,0xc0733a7146f72a42,2
+np.float64,0x3fe7e1543eafc2a8,0xbfc045362889592d,2
+np.float64,0xcbc0e1819783,0xc0734f4b68e05b1c,2
+np.float64,0xeb57e411d6afd,0xc0733b06efec001a,2
+np.float64,0xa9b74b47536ea,0xc0733d4c7bd06ddc,2
+np.float64,0x3fe56d4022eada80,0xbfc64bf8c7e3dd59,2
+np.float64,0x3fd445ca27288b94,0xbfdff40aecd0f882,2
+np.float64,0x3fe5af1cf5ab5e3a,0xbfc5a21d83699a04,2
+np.float64,0x7fed3431eb7a6863,0x40734370aa6131e1,2
+np.float64,0x3fd878dea1b0f1bd,0xbfdab8730dc00517,2
+np.float64,0x7ff8000000000000,0x7ff8000000000000,2
+np.float64,0x3feba9fcc1f753fa,0xbfb03027dcecbf65,2
+np.float64,0x7fca4feed6349fdd,0x4073391526327eb0,2
+np.float64,0x3fe7748ddbaee91c,0xbfc144b438218065,2
+np.float64,0x3fb5fbd94c2bf7b3,0xbff10ee6342c21a0,2
+np.float64,0x3feb603b97f6c077,0xbfb15a1f99d6d25e,2
+np.float64,0x3fe2e6fc8ce5cdf9,0xbfcd43edd7f3b4e6,2
+np.float64,0x7feb2b31f7765663,0x407342f02b306688,2
+np.float64,0x3fe290e2282521c4,0xbfce436deb8dbcf3,2
+np.float64,0x3fe3d5adf9e7ab5c,0xbfca96b8aa55d942,2
+np.float64,0x691899f2d2314,0xc07340a1026897c8,2
+np.float64,0x7fe468b008e8d15f,0x407340f33eadc628,2
+np.float64,0x3fb3a4c416274988,0xbff1d71da539a56e,2
+np.float64,0x3fe2442b29e48856,0xbfcf2b0037322661,2
+np.float64,0x3f376fbc7e6ef,0xc073442939a84643,2
+np.float64,0x3fe7c78d65ef8f1b,0xbfc08157cff411de,2
+np.float64,0xd4f27acba9e50,0xc0733bb8d38daa50,2
+np.float64,0x5198919ea3313,0xc07342633ba7cbea,2
+np.float64,0x7fd09f66f0a13ecd,0x40733ab5310b4385,2
+np.float64,0x3fdfe5531dbfcaa6,0xbfd35b487c7e739f,2
+np.float64,0x3fc4b0fecc2961fe,0xbfe95350c38c1640,2
+np.float64,0x7fd5ae21962b5c42,0x40733c8db78b7250,2
+np.float64,0x3fa4a8fcd42951fa,0xbff64e62fe602b72,2
+np.float64,0x7fc8e0e25831c1c4,0x407338b179b91223,2
+np.float64,0x7fdde1df6f3bc3be,0x40733ec87f9f027e,2
+np.float64,0x3fd8b9ad86b1735b,0xbfda6f385532c41b,2
+np.float64,0x3fd9f20ee933e41e,0xbfd91872fd858597,2
+np.float64,0x7feb35332df66a65,0x407342f2b9c715f0,2
+np.float64,0x7fe783dc7eaf07b8,0x407341ef41873706,2
+np.float64,0x7fceee929f3ddd24,0x40733a34e3c660fd,2
+np.float64,0x985b58d730b6b,0xc0733e0c6cfbb6f8,2
+np.float64,0x3fef4bb55cfe976b,0xbf83cb246c6f2a78,2
+np.float64,0x3fe218014f243003,0xbfcfb20ac683e1f6,2
+np.float64,0x7fe43b9fbea8773e,0x407340e3d5d5d29e,2
+np.float64,0x7fe148c74c62918e,0x40733fcba4367b8b,2
+np.float64,0x3feea4ad083d495a,0xbf93443917f3c991,2
+np.float64,0x8bcf6311179ed,0xc0733ea54d59dd31,2
+np.float64,0xf4b7a2dbe96f5,0xc0733ac175182401,2
+np.float64,0x543338baa8668,0xc073422b59165fe4,2
+np.float64,0x3fdb467317368ce6,0xbfd7b4d515929635,2
+np.float64,0x7fe3bbbc89e77778,0x407340b75cdf3de7,2
+np.float64,0x7fe693377aad266e,0x407341a6af60a0f1,2
+np.float64,0x3fc66210502cc421,0xbfe83bb940610a24,2
+np.float64,0x7fa75638982eac70,0x40732e9da476b816,2
+np.float64,0x3fe0d72a4761ae55,0xbfd1d7c82c479fab,2
+np.float64,0x97dec0dd2fbd8,0xc0733e121e072804,2
+np.float64,0x3fef33ec8c7e67d9,0xbf86701be6be8df1,2
+np.float64,0x7fcfca9b423f9536,0x40733a65a51efb94,2
+np.float64,0x9f2215633e443,0xc0733dbf043de9ed,2
+np.float64,0x2469373e48d28,0xc07347fe9e904b77,2
+np.float64,0x7fecc2e18cb985c2,0x407343557f58dfa2,2
+np.float64,0x3fde4acbfdbc9598,0xbfd4ca559e575e74,2
+np.float64,0x3fd6b11cf1ad623a,0xbfdcd1e17ef36114,2
+np.float64,0x3fc19ec494233d89,0xbfeb8ef228e8826a,2
+np.float64,0x4c89ee389913e,0xc07342d50c904f61,2
+np.float64,0x88c2046f11841,0xc0733ecc91369431,2
+np.float64,0x7fc88c13fd311827,0x40733899a125b392,2
+np.float64,0x3fcebd893a3d7b12,0xbfe3d2f35ab93765,2
+np.float64,0x3feb582a1476b054,0xbfb17ae8ec6a0465,2
+np.float64,0x7fd4369e5da86d3c,0x40733c1118b8cd67,2
+np.float64,0x3fda013fc1340280,0xbfd90831b85e98b2,2
+np.float64,0x7fed33d73fba67ad,0x4073437094ce1bd9,2
+np.float64,0x3fed3191053a6322,0xbfa468cc26a8f685,2
+np.float64,0x3fc04ed51c209daa,0xbfeca24a6f093bca,2
+np.float64,0x3fee4ac8763c9591,0xbf986458abbb90b5,2
+np.float64,0xa2d39dd145a74,0xc0733d9633651fbc,2
+np.float64,0x3fe7d9f86f2fb3f1,0xbfc0565a0b059f1c,2
+np.float64,0x3fe3250144e64a03,0xbfcc8eb2b9ae494b,2
+np.float64,0x7fe2b29507a56529,0x4073405774492075,2
+np.float64,0x7fdcdfcbe2b9bf97,0x40733e8b736b1bd8,2
+np.float64,0x3fc832730f3064e6,0xbfe7267ac9b2e7c3,2
+np.float64,0x3fc7e912e52fd226,0xbfe750dfc0aeae57,2
+np.float64,0x7fc960472f32c08d,0x407338d4b4cb3957,2
+np.float64,0x3fbdf182ea3be306,0xbfedd27150283ffb,2
+np.float64,0x3fd1e9359823d26b,0xbfe1b2ac7fd25f8d,2
+np.float64,0x7fbcf75f6039eebe,0x407334ef13eb16f8,2
+np.float64,0x3fe5a3c910eb4792,0xbfc5bf2f57c5d643,2
+np.float64,0x3fcf4f2a6e3e9e55,0xbfe391b6f065c4b8,2
+np.float64,0x3fee067873fc0cf1,0xbf9c53af0373fc0e,2
+np.float64,0xd3f08b85a7e12,0xc0733bc14357e686,2
+np.float64,0x7ff0000000000000,0x7ff0000000000000,2
+np.float64,0x3fc8635f6430c6bf,0xbfe70a7dc77749a7,2
+np.float64,0x3fe3ff5c52a7feb9,0xbfca22617c6636d5,2
+np.float64,0x3fbbae91fa375d24,0xbfeee9d4c300543f,2
+np.float64,0xe3f71b59c7ee4,0xc0733b3f99187375,2
+np.float64,0x7fca93d3be3527a6,0x40733926fd48ecd6,2
+np.float64,0x3fcd29f7223a53ee,0xbfe48e3edf32fe57,2
+np.float64,0x7fdc4ef6f8389ded,0x40733e68401cf2a6,2
+np.float64,0xe009bc81c014,0xc0734ea295ee3e5b,2
+np.float64,0x61f56c78c3eae,0xc073411e1dbd7c54,2
+np.float64,0x3fde131928bc2632,0xbfd4fda024f6927c,2
+np.float64,0x3fb21ee530243dca,0xbff266aaf0358129,2
+np.float64,0x7feaac82a4f55904,0x407342cf7809d9f9,2
+np.float64,0x3fe66ab177ecd563,0xbfc3c92d4d522819,2
+np.float64,0xfe9f9c2bfd3f4,0xc0733a7ade3a88a7,2
+np.float64,0x7fd0c5217c218a42,0x40733ac4e4c6dfa5,2
+np.float64,0x430f4ae6861ea,0xc07343c03d8a9442,2
+np.float64,0x494bff2a92981,0xc073432209d2fd16,2
+np.float64,0x3f8860e9d030c1d4,0xbffeca059ebf5e89,2
+np.float64,0x3fe43732dc286e66,0xbfc98800388bad2e,2
+np.float64,0x6443b60ec8877,0xc07340f4bab11827,2
+np.float64,0x3feda9be6d7b537d,0xbfa0dcb9a6914069,2
+np.float64,0x3fc5ceb6772b9d6d,0xbfe89868c881db70,2
+np.float64,0x3fbdf153023be2a6,0xbfedd2878c3b4949,2
+np.float64,0x7fe8f6b8e8f1ed71,0x407342599a30b273,2
+np.float64,0x3fea6fbdb8b4df7b,0xbfb53bf66f71ee96,2
+np.float64,0xc7ac3dbb8f588,0xc0733c2b525b7963,2
+np.float64,0x3fef3a91f77e7524,0xbf85b2bd3adbbe31,2
+np.float64,0x3f887cb97030f973,0xbffec21ccbb5d22a,2
+np.float64,0x8b2f1c9f165e4,0xc0733ead49300951,2
+np.float64,0x2c1cb32058397,0xc07346a951bd8d2b,2
+np.float64,0x3fe057edd620afdc,0xbfd2acf1881b7e99,2
+np.float64,0x7f82e9530025d2a5,0x4073238591dd52ce,2
+np.float64,0x3fe4e03dff69c07c,0xbfc7be96c5c006fc,2
+np.float64,0x52727b4aa4e50,0xc0734250c58ebbc1,2
+np.float64,0x3f99a62160334c43,0xbff99ea3ca09d8f9,2
+np.float64,0x3fd5314b4faa6297,0xbfdeb843daf01e03,2
+np.float64,0x3fefde89e13fbd14,0xbf5d1facb7a1e9de,2
+np.float64,0x7fb460f1a228c1e2,0x4073327d8cbc5f86,2
+np.float64,0xeb93efb3d727e,0xc0733b052a4990e4,2
+np.float64,0x3fe884baecf10976,0xbfbd9ba9cfe23713,2
+np.float64,0x7fefffffffffffff,0x40734413509f79ff,2
+np.float64,0x149dc7c6293ba,0xc0734bf26b1df025,2
+np.float64,0x64188f88c8313,0xc07340f7b8e6f4b5,2
+np.float64,0x3fdfac314abf5863,0xbfd38d3e9dba1b0e,2
+np.float64,0x3fd72052a42e40a5,0xbfdc4af30ee0b245,2
+np.float64,0x7fdd951f743b2a3e,0x40733eb68fafa838,2
+np.float64,0x65a2dd5acb45c,0xc07340dc8ed625e1,2
+np.float64,0x7fe89a79997134f2,0x4073423fbceb1cbe,2
+np.float64,0x3fe70a000d6e1400,0xbfc24381e09d02f7,2
+np.float64,0x3fe2cec160259d83,0xbfcd8b5e92354129,2
+np.float64,0x3feb9ef77a773def,0xbfb05c7b2ee6f388,2
+np.float64,0xe0d66689c1acd,0xc0733b582c779620,2
+np.float64,0x3fee86bd0ffd0d7a,0xbf94f7870502c325,2
+np.float64,0x186afc6230d60,0xc0734ac55fb66d5d,2
+np.float64,0xc0631f4b80c64,0xc0733c6d7149d373,2
+np.float64,0x3fdad1b87735a371,0xbfd82cca73ec663b,2
+np.float64,0x7fe7f6d313efeda5,0x40734210e84576ab,2
+np.float64,0x7fd7b7fce6af6ff9,0x40733d2d92ffdaaf,2
+np.float64,0x3fe6f35a28ade6b4,0xbfc27a4239b540c3,2
+np.float64,0x7fdb0b834eb61706,0x40733e17073a61f3,2
+np.float64,0x82f4661105e8d,0xc0733f19b34adeed,2
+np.float64,0x3fc77230112ee460,0xbfe796a7603c0d16,2
+np.float64,0x8000000000000000,0xfff0000000000000,2
+np.float64,0x7fb8317bc63062f7,0x407333aec761a739,2
+np.float64,0x7fd165609a22cac0,0x40733b061541ff15,2
+np.float64,0x3fed394768fa728f,0xbfa42e1596e1faf6,2
+np.float64,0x7febab693d7756d1,0x40734310a9ac828e,2
+np.float64,0x7fe809a69230134c,0x407342165b9acb69,2
+np.float64,0x3fc091d38f2123a7,0xbfec69a70fc23548,2
+np.float64,0x3fb2a8f5dc2551ec,0xbff2327f2641dd0d,2
+np.float64,0x7fc60b6fe02c16df,0x407337da5adc342c,2
+np.float64,0x3fefa53c3bbf4a78,0xbf73d1be15b73b00,2
+np.float64,0x7fee09c1717c1382,0x407343a2c479e1cb,2
+np.float64,0x8000000000000001,0x7ff8000000000000,2
+np.float64,0x3fede0b2733bc165,0xbf9e848ac2ecf604,2
+np.float64,0x3fee2ac331bc5586,0xbf9a3b699b721c9a,2
+np.float64,0x3fd4db12d829b626,0xbfdf2a413d1e453a,2
+np.float64,0x7fe605230dec0a45,0x4073417a67db06be,2
+np.float64,0x3fe378b2bf26f165,0xbfcb9dbb2b6d6832,2
+np.float64,0xc1d4c1ab83a98,0xc0733c60244cadbf,2
+np.float64,0x3feb15500e762aa0,0xbfb28c071d5efc22,2
+np.float64,0x3fe36225a626c44b,0xbfcbde4259e9047e,2
+np.float64,0x3fe7c586a72f8b0d,0xbfc08614b13ed4b2,2
+np.float64,0x7fb0f2d8cc21e5b1,0x40733135b2c7dd99,2
+np.float64,0x5957f3feb2aff,0xc07341c1df75638c,2
+np.float64,0x3fca4851bd3490a3,0xbfe6005ae5279485,2
+np.float64,0x824217d904843,0xc0733f232fd58f0f,2
+np.float64,0x4f9332269f267,0xc073428fd8e9cb32,2
+np.float64,0x3fea6f087374de11,0xbfb53ef0d03918b2,2
+np.float64,0x3fd9409ab4328135,0xbfd9d9231381e2b8,2
+np.float64,0x3fdba03b00374076,0xbfd759ec94a7ab5b,2
+np.float64,0x3fe0ce3766619c6f,0xbfd1e6912582ccf0,2
+np.float64,0x3fabd45ddc37a8bc,0xbff43c78d3188423,2
+np.float64,0x3fc3cadd592795bb,0xbfe9f1576c9b2c79,2
+np.float64,0x3fe10df049621be1,0xbfd17df2f2c28022,2
+np.float64,0x945b5d1328b6c,0xc0733e3bc06f1e75,2
+np.float64,0x7fc1c3742b2386e7,0x4073365a403d1051,2
+np.float64,0x7fdc957138b92ae1,0x40733e7977717586,2
+np.float64,0x7f943fa1a0287f42,0x407328d01de143f5,2
+np.float64,0x3fec9631c4392c64,0xbfa914b176d8f9d2,2
+np.float64,0x3fd8e7c008b1cf80,0xbfda3b9d9b6da8f4,2
+np.float64,0x7222f9fee4460,0xc073400e371516cc,2
+np.float64,0x3fe890e43eb121c8,0xbfbd64921462e823,2
+np.float64,0x3fcfd7fe2a3faffc,0xbfe3557e2f207800,2
+np.float64,0x3fed5dd1c1babba4,0xbfa318bb20db64e6,2
+np.float64,0x3fe6aa34c66d546a,0xbfc32c8a8991c11e,2
+np.float64,0x8ca79801196,0xc0736522bd5adf6a,2
+np.float64,0x3feb274079364e81,0xbfb2427b24b0ca20,2
+np.float64,0x7fe04927e4a0924f,0x40733f61c96f7f89,2
+np.float64,0x7c05f656f80bf,0xc0733f7a70555b4e,2
+np.float64,0x7fe97819eff2f033,0x4073427d4169b0f8,2
+np.float64,0x9def86e33bdf1,0xc0733dcc740b7175,2
+np.float64,0x7fedd1ef3f3ba3dd,0x40734395ceab8238,2
+np.float64,0x77bed86cef7dc,0xc0733fb8e0e9bf73,2
+np.float64,0x9274b41b24e97,0xc0733e52b16dff71,2
+np.float64,0x8010000000000000,0x7ff8000000000000,2
+np.float64,0x9c977855392ef,0xc0733ddba7d421d9,2
+np.float64,0xfb4560a3f68ac,0xc0733a9271e6a118,2
+np.float64,0xa67d9f394cfb4,0xc0733d6e9d58cc94,2
+np.float64,0x3fbfa766b03f4ecd,0xbfed0cccfecfc900,2
+np.float64,0x3fe177417522ee83,0xbfd0d45803bff01a,2
+np.float64,0x7fe85e077bb0bc0e,0x4073422e957a4aa3,2
+np.float64,0x7feeb0a6883d614c,0x407343c8f6568f7c,2
+np.float64,0xbab82edb75706,0xc0733ca2a2b20094,2
+np.float64,0xfadb44bdf5b69,0xc0733a9561b7ec04,2
+np.float64,0x3fefb9b82b3f7370,0xbf6ea776b2dcc3a9,2
+np.float64,0x7fe080ba8a610174,0x40733f795779b220,2
+np.float64,0x3f87faa1c02ff544,0xbffee76acafc92b7,2
+np.float64,0x7fed474108fa8e81,0x4073437531d4313e,2
+np.float64,0x3fdb7b229336f645,0xbfd77f583a4a067f,2
+np.float64,0x256dbf0c4adb9,0xc07347cd94e6fa81,2
+np.float64,0x3fd034ae25a0695c,0xbfe3169c15decdac,2
+np.float64,0x3a72177274e44,0xc07344b4cf7d68cd,2
+np.float64,0x7fa2522d5c24a45a,0x40732cef2f793470,2
+np.float64,0x3fb052bdde20a57c,0xbff3207fd413c848,2
+np.float64,0x3fdccfecbbb99fd9,0xbfd62ec04a1a687a,2
+np.float64,0x3fd403ac53280759,0xbfe027a31df2c8cc,2
+np.float64,0x3fab708e4036e11d,0xbff45591df4f2e8b,2
+np.float64,0x7fcfc001993f8002,0x40733a63539acf9d,2
+np.float64,0x3fd2b295dfa5652c,0xbfe119c1b476c536,2
+np.float64,0x7fe8061262b00c24,0x4073421552ae4538,2
+np.float64,0xffefffffffffffff,0x7ff8000000000000,2
+np.float64,0x7fed52093ffaa411,0x40734377c072a7e8,2
+np.float64,0xf3df902fe7bf2,0xc0733ac79a75ff7a,2
+np.float64,0x7fe13d382e227a6f,0x40733fc6fd0486bd,2
+np.float64,0x3621d5086c43b,0xc073453d31effbcd,2
+np.float64,0x3ff0000000000000,0x0,2
+np.float64,0x3fdaffea27b5ffd4,0xbfd7fd139dc1c2c5,2
+np.float64,0x7fea6536dc34ca6d,0x407342bccc564fdd,2
+np.float64,0x7fd478f00c28f1df,0x40733c27c0072fde,2
+np.float64,0x7fa72ef0502e5de0,0x40732e91e83db75c,2
+np.float64,0x7fd302970626052d,0x40733ba3ec6775f6,2
+np.float64,0x7fbb57ab0036af55,0x407334887348e613,2
+np.float64,0x3fda0ff722b41fee,0xbfd8f87b77930330,2
+np.float64,0x1e983ce23d309,0xc073493438f57e61,2
+np.float64,0x7fc90de97c321bd2,0x407338be01ffd4bd,2
+np.float64,0x7fe074b09c20e960,0x40733f7443f0dbe1,2
+np.float64,0x3fed5dec9fbabbd9,0xbfa317efb1fe8a95,2
+np.float64,0x7fdb877632b70eeb,0x40733e3697c88ba8,2
+np.float64,0x7fe4fb0067e9f600,0x40734124604b99e8,2
+np.float64,0x7fd447dc96288fb8,0x40733c1703ab2cce,2
+np.float64,0x3feb2d1e64f65a3d,0xbfb22a781df61c05,2
+np.float64,0xb6c8e6676d91d,0xc0733cc8859a0b91,2
+np.float64,0x3fdc3c2418387848,0xbfd6bec3a3c3cdb5,2
+np.float64,0x3fdecb9ccdbd973a,0xbfd4551c05721a8e,2
+np.float64,0x3feb1100e7762202,0xbfb29db911fe6768,2
+np.float64,0x3fe0444bc2a08898,0xbfd2ce69582e78c1,2
+np.float64,0x7fda403218b48063,0x40733de201d8340c,2
+np.float64,0x3fdc70421238e084,0xbfd68ba4bd48322b,2
+np.float64,0x3fe06e747c60dce9,0xbfd286bcac34a981,2
+np.float64,0x7fc1931d9623263a,0x407336473da54de4,2
+np.float64,0x229914da45323,0xc073485979ff141c,2
+np.float64,0x3fe142f92da285f2,0xbfd1280909992cb6,2
+np.float64,0xf1d02fa9e3a06,0xc0733ad6b19d71a0,2
+np.float64,0x3fb1fe9b0023fd36,0xbff27317d8252c16,2
+np.float64,0x3fa544b9242a8972,0xbff61ac38569bcfc,2
+np.float64,0x3feeb129d4fd6254,0xbf928f23ad20c1ee,2
+np.float64,0xa2510b7f44a22,0xc0733d9bc81ea0a1,2
+np.float64,0x3fca75694d34ead3,0xbfe5e8975b3646c2,2
+np.float64,0x7fece10621b9c20b,0x4073435cc3dd9a1b,2
+np.float64,0x7fe98a57d3b314af,0x4073428239b6a135,2
+np.float64,0x3fe259c62a64b38c,0xbfcee96682a0f355,2
+np.float64,0x3feaaa9b9d755537,0xbfb445779f3359af,2
+np.float64,0xdaadecfdb55be,0xc0733b899338432a,2
+np.float64,0x3fed00eae4fa01d6,0xbfa5dc8d77be5991,2
+np.float64,0x7fcc96c773392d8e,0x407339a8c5cd786e,2
+np.float64,0x3fef7b8b203ef716,0xbf7cff655ecb6424,2
+np.float64,0x7fd4008113a80101,0x40733bfe6552acb7,2
+np.float64,0x7fe99ff035b33fdf,0x407342881753ee2e,2
+np.float64,0x3ee031e87dc07,0xc0734432d736e492,2
+np.float64,0x3fddfe390f3bfc72,0xbfd510f1d9ec3e36,2
+np.float64,0x3fd9ddce74b3bb9d,0xbfd92e2d75a061bb,2
+np.float64,0x7fe5f742edebee85,0x40734176058e3a77,2
+np.float64,0x3fdb04185b360831,0xbfd7f8c63aa5e1c4,2
+np.float64,0xea2b0f43d4562,0xc0733b0fd77c8118,2
+np.float64,0x7fc3f4973527e92d,0x407337293bbb22c4,2
+np.float64,0x3fb9adfb38335bf6,0xbfeff4f3ea85821a,2
+np.float64,0x87fb98750ff73,0xc0733ed6ad83c269,2
+np.float64,0x3fe005721a200ae4,0xbfd33a9f1ebfb0ac,2
+np.float64,0xd9e04fe7b3c0a,0xc0733b901ee257f3,2
+np.float64,0x2c39102658723,0xc07346a4db63bf55,2
+np.float64,0x3f7dc28e003b851c,0xc0011c1d1233d948,2
+np.float64,0x3430fd3868620,0xc073457e24e0b70d,2
+np.float64,0xbff0000000000000,0x7ff8000000000000,2
+np.float64,0x3fd23e45e0247c8c,0xbfe17146bcf87b57,2
+np.float64,0x6599df3ecb33d,0xc07340dd2c41644c,2
+np.float64,0x3fdf074f31be0e9e,0xbfd41f6e9dbb68a5,2
+np.float64,0x7fdd6233f3bac467,0x40733eaa8f674b72,2
+np.float64,0x7fe03e8481607d08,0x40733f5d3df3b087,2
+np.float64,0x3fcc3b79f13876f4,0xbfe501bf3b379b77,2
+np.float64,0xe5d97ae3cbb30,0xc0733b30f47cbd12,2
+np.float64,0x8acbc4a115979,0xc0733eb240a4d2c6,2
+np.float64,0x3fedbdbc48bb7b79,0xbfa0470fd70c4359,2
+np.float64,0x3fde1611103c2c22,0xbfd4fae1fa8e7e5e,2
+np.float64,0x3fe09478bd2128f1,0xbfd246b7e85711dc,2
+np.float64,0x3fd6dfe8f3adbfd2,0xbfdc98ca2f32c1ad,2
+np.float64,0x72ccf274e599f,0xc0734003e5b0da63,2
+np.float64,0xe27c7265c4f8f,0xc0733b4b2d808566,2
+np.float64,0x7fee3161703c62c2,0x407343abe90f5649,2
+np.float64,0xf54fb5c1eaa0,0xc0734e01384fcf78,2
+np.float64,0xcde5924d9bcb3,0xc0733bf4b83c66c2,2
+np.float64,0x3fc46fdbe528dfb8,0xbfe97f55ef5e9683,2
+np.float64,0x7fe513528a2a26a4,0x4073412c69baceca,2
+np.float64,0x3fd29eca4aa53d95,0xbfe128801cd33ed0,2
+np.float64,0x7febb21718b7642d,0x4073431256def857,2
+np.float64,0x3fcab536c0356a6e,0xbfe5c73c59f41578,2
+np.float64,0x7fc7e9f0d82fd3e1,0x4073386b213e5dfe,2
+np.float64,0xb5b121276b624,0xc0733cd33083941c,2
+np.float64,0x7e0dd9bcfc1bc,0xc0733f5d8bf35050,2
+np.float64,0x3fd1c75106238ea2,0xbfe1cd11cccda0f4,2
+np.float64,0x9f060e673e0c2,0xc0733dc03da71909,2
+np.float64,0x7fd915a2f3322b45,0x40733d912af07189,2
+np.float64,0x3fd8cbae4431975d,0xbfda5b02ca661139,2
+np.float64,0x3fde8b411f3d1682,0xbfd48f6f710a53b6,2
+np.float64,0x3fc17a780622f4f0,0xbfebabb10c55255f,2
+np.float64,0x3fde5cbe5f3cb97d,0xbfd4b9e2e0101fb1,2
+np.float64,0x7fd859036530b206,0x40733d5c2252ff81,2
+np.float64,0xb0f5040f61ea1,0xc0733d02292f527b,2
+np.float64,0x3fde5c49ae3cb893,0xbfd4ba4db3ce2cf3,2
+np.float64,0x3fecc4518df988a3,0xbfa7af0bfc98bc65,2
+np.float64,0x3feffee03cbffdc0,0xbf0f3ede6ca7d695,2
+np.float64,0xbc5eac9b78bd6,0xc0733c92fb51c8ae,2
+np.float64,0x3fe2bb4ef765769e,0xbfcdc4f70a65dadc,2
+np.float64,0x5089443ca1129,0xc073427a7d0cde4a,2
+np.float64,0x3fd0d6e29121adc5,0xbfe28e28ece1db86,2
+np.float64,0xbe171e397c2e4,0xc0733c82cede5d02,2
+np.float64,0x4ede27be9dbc6,0xc073429fba1a4af1,2
+np.float64,0x3fe2aff3af655fe7,0xbfcde6b52a8ed3c1,2
+np.float64,0x7fd85ca295b0b944,0x40733d5d2adcccf1,2
+np.float64,0x24919bba49234,0xc07347f6ed704a6f,2
+np.float64,0x7fd74bc1eeae9783,0x40733d0d94a89011,2
+np.float64,0x3fc1cd12cb239a26,0xbfeb6a9c25c2a11d,2
+np.float64,0x3fdafbc0ac35f781,0xbfd8015ccf1f1b51,2
+np.float64,0x3fee01327c3c0265,0xbf9ca1d0d762dc18,2
+np.float64,0x3fe65bd7702cb7af,0xbfc3ee0de5c36b8d,2
+np.float64,0x7349c82ee693a,0xc0733ffc5b6eccf2,2
+np.float64,0x3fdc5906f738b20e,0xbfd6a26288eb5933,2
+np.float64,0x1,0xc07434e6420f4374,2
+np.float64,0x3fb966128a32cc25,0xbff00e0aa7273838,2
+np.float64,0x3fd501ff9a2a03ff,0xbfdef69133482121,2
+np.float64,0x194d4f3c329ab,0xc0734a861b44cfbe,2
+np.float64,0x3fec5d34f8f8ba6a,0xbfaad1b31510e70b,2
+np.float64,0x1635e4c22c6be,0xc0734b6dec650943,2
+np.float64,0x3fead2f8edb5a5f2,0xbfb39dac30a962cf,2
+np.float64,0x3f7dfa4ce03bf49a,0xc00115a112141aa7,2
+np.float64,0x3fef6827223ed04e,0xbf80a42c9edebfe9,2
+np.float64,0xe771f303cee3f,0xc0733b24a6269fe4,2
+np.float64,0x1160ccc622c1b,0xc0734d22604eacb9,2
+np.float64,0x3fc485cd08290b9a,0xbfe970723008c8c9,2
+np.float64,0x7fef99c518bf3389,0x407343fcf9ed202f,2
+np.float64,0x7fd8c1447a318288,0x40733d79a440b44d,2
+np.float64,0xaf219f955e434,0xc0733d149c13f440,2
+np.float64,0xcf45f6239e8bf,0xc0733be8ddda045d,2
+np.float64,0x7599394aeb328,0xc0733fd90fdbb0ea,2
+np.float64,0xc7f6390f8fec7,0xc0733c28bfbc66a3,2
+np.float64,0x3fd39ae96c2735d3,0xbfe0712274a8742b,2
+np.float64,0xa4d6c18f49ad8,0xc0733d805a0528f7,2
+np.float64,0x7fd9ea78d7b3d4f1,0x40733dcb2b74802a,2
+np.float64,0x3fecd251cb39a4a4,0xbfa742ed41d4ae57,2
+np.float64,0x7fed7a07cd7af40f,0x407343813476027e,2
+np.float64,0x3fd328ae7f26515d,0xbfe0c30b56a83c64,2
+np.float64,0x7fc937ff7a326ffe,0x407338c9a45b9140,2
+np.float64,0x3fcf1d31143e3a62,0xbfe3a7f760fbd6a8,2
+np.float64,0x7fb911dcbc3223b8,0x407333ee158cccc7,2
+np.float64,0x3fd352fc83a6a5f9,0xbfe0a47d2f74d283,2
+np.float64,0x7fd310753fa620e9,0x40733ba8fc4300dd,2
+np.float64,0x3febd64b4577ac97,0xbfaefd4a79f95c4b,2
+np.float64,0x6a6961a4d4d2d,0xc073408ae1687943,2
+np.float64,0x3fe4ba73d16974e8,0xbfc8239341b9e457,2
+np.float64,0x3fed8e7cac3b1cf9,0xbfa1a96a0cc5fcdc,2
+np.float64,0x7fd505ec04aa0bd7,0x40733c56f86e3531,2
+np.float64,0x3fdf166e9abe2cdd,0xbfd411e5f8569d70,2
+np.float64,0x7fe1bc6434e378c7,0x40733ff9861bdabb,2
+np.float64,0x3fd3b0b175a76163,0xbfe061ba5703f3c8,2
+np.float64,0x7fed75d7ffbaebaf,0x4073438037ba6f19,2
+np.float64,0x5a9e109cb53c3,0xc07341a8b04819c8,2
+np.float64,0x3fe14786b4e28f0d,0xbfd120b541bb880e,2
+np.float64,0x3fed4948573a9291,0xbfa3b471ff91614b,2
+np.float64,0x66aac5d8cd559,0xc07340ca9b18af46,2
+np.float64,0x3fdb48efd23691e0,0xbfd7b24c5694838b,2
+np.float64,0x7fe6da7d1eadb4f9,0x407341bc7d1fae43,2
+np.float64,0x7feb702cf336e059,0x40734301b96cc3c0,2
+np.float64,0x3fd1e60987a3cc13,0xbfe1b522cfcc3d0e,2
+np.float64,0x3feca57f50794aff,0xbfa89dc90625d39c,2
+np.float64,0x7fdc46dc56b88db8,0x40733e664294a0f9,2
+np.float64,0x8dc8fd811b920,0xc0733e8c5955df06,2
+np.float64,0xf01634abe02c7,0xc0733ae370a76d0c,2
+np.float64,0x3fc6f8d8ab2df1b1,0xbfe7df5093829464,2
+np.float64,0xda3d7597b47af,0xc0733b8d2702727a,2
+np.float64,0x7feefd53227dfaa5,0x407343da3d04db28,2
+np.float64,0x3fe2fbca3525f794,0xbfcd06e134417c08,2
+np.float64,0x7fd36d3ce226da79,0x40733bca7c322df1,2
+np.float64,0x7fec37e00b786fbf,0x4073433397b48a5b,2
+np.float64,0x3fbf133f163e267e,0xbfed4e72f1362a77,2
+np.float64,0x3fc11efbb9223df7,0xbfebf53002a561fe,2
+np.float64,0x3fc89c0e5431381d,0xbfe6ea562364bf81,2
+np.float64,0x3f9cd45da839a8bb,0xbff8ceb14669ee4b,2
+np.float64,0x23dc8fa647b93,0xc0734819aaa9b0ee,2
+np.float64,0x3fe829110d305222,0xbfbf3e60c45e2399,2
+np.float64,0x7fed8144e57b0289,0x40734382e917a02a,2
+np.float64,0x7fe033fbf7a067f7,0x40733f58bb00b20f,2
+np.float64,0xe3807f45c7010,0xc0733b43379415d1,2
+np.float64,0x3fd708fb342e11f6,0xbfdc670ef9793782,2
+np.float64,0x3fe88c924b311925,0xbfbd78210d9e7164,2
+np.float64,0x3fe0a2a7c7614550,0xbfd22efaf0472c4a,2
+np.float64,0x7fe3a37501a746e9,0x407340aecaeade41,2
+np.float64,0x3fd05077ec20a0f0,0xbfe2fedbf07a5302,2
+np.float64,0x7fd33bf61da677eb,0x40733bb8c58912aa,2
+np.float64,0x3feb29bdae76537b,0xbfb2384a8f61b5f9,2
+np.float64,0x3fec0fc14ff81f83,0xbfad3423e7ade174,2
+np.float64,0x3fd0f8b1a1a1f163,0xbfe2725dd4ccea8b,2
+np.float64,0x3fe382d26a6705a5,0xbfcb80dba4218bdf,2
+np.float64,0x3fa873f2cc30e7e6,0xbff522911cb34279,2
+np.float64,0x7fed7fd7377affad,0x4073438292f6829b,2
+np.float64,0x3feeacd8067d59b0,0xbf92cdbeda94b35e,2
+np.float64,0x7fe464d62228c9ab,0x407340f1eee19aa9,2
+np.float64,0xe997648bd32ed,0xc0733b143aa0fad3,2
+np.float64,0x7fea4869f13490d3,0x407342b5333b54f7,2
+np.float64,0x935b871926b71,0xc0733e47c6683319,2
+np.float64,0x28a9d0c05155,0xc0735a7e3532af83,2
+np.float64,0x79026548f204d,0xc0733fa6339ffa2f,2
+np.float64,0x3fdb1daaabb63b55,0xbfd7de839c240ace,2
+np.float64,0x3fc0db73b421b6e7,0xbfec2c6e36c4f416,2
+np.float64,0xb8b50ac1716b,0xc0734ff9fc60ebce,2
+np.float64,0x7fdf13e0c6be27c1,0x40733f0e44f69437,2
+np.float64,0x3fcd0cb97b3a1973,0xbfe49c34ff531273,2
+np.float64,0x3fcbac034b375807,0xbfe54913d73f180d,2
+np.float64,0x3fe091d2a2e123a5,0xbfd24b290a9218de,2
+np.float64,0xede43627dbc87,0xc0733af3c7c7f716,2
+np.float64,0x7fc037e7ed206fcf,0x407335b85fb0fedb,2
+np.float64,0x3fce7ae4c63cf5ca,0xbfe3f1350fe03f28,2
+np.float64,0x7fcdd862263bb0c3,0x407339f5458bb20e,2
+np.float64,0x4d7adf709af5d,0xc07342bf4edfadb2,2
+np.float64,0xdc6c03f3b8d81,0xc0733b7b74d6a635,2
+np.float64,0x3fe72ae0a4ee55c1,0xbfc1f4665608b21f,2
+np.float64,0xcd62f19d9ac5e,0xc0733bf92235e4d8,2
+np.float64,0xe3a7b8fdc74f7,0xc0733b4204f8e166,2
+np.float64,0x3fdafd35adb5fa6b,0xbfd7ffdca0753b36,2
+np.float64,0x3fa023e8702047d1,0xbff8059150ea1464,2
+np.float64,0x99ff336933fe7,0xc0733df961197517,2
+np.float64,0x7feeb365b9bd66ca,0x407343c995864091,2
+np.float64,0x7fe449b49f689368,0x407340e8aa3369e3,2
+np.float64,0x7faf5843043eb085,0x407330aa700136ca,2
+np.float64,0x3fd47b2922a8f652,0xbfdfab3de86f09ee,2
+np.float64,0x7fd9fc3248b3f864,0x40733dcfea6f9b3e,2
+np.float64,0xe20b0d8dc4162,0xc0733b4ea8fe7b3f,2
+np.float64,0x7feff8e0e23ff1c1,0x40734411c490ed70,2
+np.float64,0x7fa58382d02b0705,0x40732e0cf28e14fe,2
+np.float64,0xb8ad9a1b715b4,0xc0733cb630b8f2d4,2
+np.float64,0xe90abcf1d2158,0xc0733b186b04eeee,2
+np.float64,0x7fd6aa6f32ad54dd,0x40733cdccc636604,2
+np.float64,0x3fd8f84eedb1f09e,0xbfda292909a5298a,2
+np.float64,0x7fecd6b1d9f9ad63,0x4073435a472b05b5,2
+np.float64,0x3fd9f47604b3e8ec,0xbfd915e028cbf4a6,2
+np.float64,0x3fd20d9398241b27,0xbfe19691363dd508,2
+np.float64,0x3fe5ed09bbabda13,0xbfc5043dfc9c8081,2
+np.float64,0x7fbe5265363ca4c9,0x407335406f8e4fac,2
+np.float64,0xac2878af5850f,0xc0733d3311be9786,2
+np.float64,0xac2074555840f,0xc0733d3364970018,2
+np.float64,0x3fcd49b96b3a9373,0xbfe47f24c8181d9c,2
+np.float64,0x3fd10caca6a21959,0xbfe2620ae5594f9a,2
+np.float64,0xec5b87e9d8b71,0xc0733aff499e72ca,2
+np.float64,0x9d5e9fad3abd4,0xc0733dd2d70eeb4a,2
+np.float64,0x7fe3d3a24227a744,0x407340bfc2072fdb,2
+np.float64,0x3fc5f7a77c2bef4f,0xbfe87e69d502d784,2
+np.float64,0x33161a66662c4,0xc07345a436308244,2
+np.float64,0xa27acdc744f5a,0xc0733d99feb3d8ea,2
+np.float64,0x3fe2d9301565b260,0xbfcd6c914e204437,2
+np.float64,0x7fd5d111e12ba223,0x40733c98e14a6fd0,2
+np.float64,0x6c3387bed8672,0xc073406d3648171a,2
+np.float64,0x24d89fe849b15,0xc07347e97bec008c,2
+np.float64,0x3fefd763677faec7,0xbf61ae69caa9cad9,2
+np.float64,0x7fe0a4684ba148d0,0x40733f884d32c464,2
+np.float64,0x3fd5c3c939ab8792,0xbfddfaaefc1c7fca,2
+np.float64,0x3fec9b87a6b9370f,0xbfa8eb34efcc6b9b,2
+np.float64,0x3feb062431f60c48,0xbfb2ca6036698877,2
+np.float64,0x3fef97f6633f2fed,0xbf76bc742860a340,2
+np.float64,0x74477490e88ef,0xc0733fed220986bc,2
+np.float64,0x3fe4bea67ce97d4d,0xbfc818525292b0f6,2
+np.float64,0x3fc6add3a92d5ba7,0xbfe80cfdc9a90bda,2
+np.float64,0x847c9ce308f94,0xc0733f05026f5965,2
+np.float64,0x7fea53fd2eb4a7f9,0x407342b841fc4723,2
+np.float64,0x3fc55a16fc2ab42e,0xbfe8e3849130da34,2
+np.float64,0x3fbdf7d07c3befa1,0xbfedcf84b9c6c161,2
+np.float64,0x3fe5fb25aa6bf64b,0xbfc4e083ff96b116,2
+np.float64,0x61c776a8c38ef,0xc0734121611d84d7,2
+np.float64,0x3fec413164f88263,0xbfabadbd05131546,2
+np.float64,0x9bf06fe137e0e,0xc0733de315469ee0,2
+np.float64,0x2075eefc40ebf,0xc07348cae84de924,2
+np.float64,0x3fdd42e0143a85c0,0xbfd5c0b6f60b3cea,2
+np.float64,0xdbb1ab45b7636,0xc0733b8157329daf,2
+np.float64,0x3feac6d56bf58dab,0xbfb3d00771b28621,2
+np.float64,0x7fb2dc825025b904,0x407331f3e950751a,2
+np.float64,0x3fecea6efd79d4de,0xbfa689309cc0e3fe,2
+np.float64,0x3fd83abec7b0757e,0xbfdaff5c674a9c59,2
+np.float64,0x3fd396f7c0272df0,0xbfe073ee75c414ba,2
+np.float64,0x3fe10036c162006e,0xbfd1945a38342ae1,2
+np.float64,0x3fd5bbded52b77be,0xbfde04cca40d4156,2
+np.float64,0x3fe870945ab0e129,0xbfbdf72f0e6206fa,2
+np.float64,0x3fef72fddcbee5fc,0xbf7ee2dba88b1bad,2
+np.float64,0x4e111aa09c224,0xc07342b1e2b29643,2
+np.float64,0x3fd926d8b5b24db1,0xbfd9f58b78d6b061,2
+np.float64,0x3fc55679172aacf2,0xbfe8e5df687842e2,2
+np.float64,0x7f5f1749803e2e92,0x40731886e16cfc4d,2
+np.float64,0x7fea082b53b41056,0x407342a42227700e,2
+np.float64,0x3fece1d1d039c3a4,0xbfa6cb780988a469,2
+np.float64,0x3b2721d8764e5,0xc073449f6a5a4832,2
+np.float64,0x365cb7006cba,0xc0735879ba5f0b6e,2
+np.float64,0x7ff4000000000000,0x7ffc000000000000,2
+np.float64,0x7fe606ce92ac0d9c,0x4073417aeebe97e8,2
+np.float64,0x3fe237b544a46f6b,0xbfcf50f8f76d7df9,2
+np.float64,0x3fe7265e5eee4cbd,0xbfc1ff39089ec8d0,2
+np.float64,0x7fe2bb3c5ea57678,0x4073405aaad81cf2,2
+np.float64,0x3fd811df84b023bf,0xbfdb2e670ea8d8de,2
+np.float64,0x3f6a0efd00341dfa,0xc003fac1ae831241,2
+np.float64,0x3fd0d214afa1a429,0xbfe2922080a91c72,2
+np.float64,0x3feca6a350b94d47,0xbfa894eea3a96809,2
+np.float64,0x7fe23e5c76247cb8,0x4073402bbaaf71c7,2
+np.float64,0x3fe739a1fdae7344,0xbfc1d109f66efb5d,2
+np.float64,0x3fdf4b8e283e971c,0xbfd3e28f46169cc5,2
+np.float64,0x38f2535271e4b,0xc07344e3085219fa,2
+np.float64,0x7fd263a0f9a4c741,0x40733b68d945dae0,2
+np.float64,0x7fdd941863bb2830,0x40733eb651e3dca9,2
+np.float64,0xace7279159ce5,0xc0733d2b63b5947e,2
+np.float64,0x7fe34670b2268ce0,0x4073408d92770cb5,2
+np.float64,0x7fd11fa6dfa23f4d,0x40733aea02e76ea3,2
+np.float64,0x3fe6d9cbca6db398,0xbfc2b84b5c8c7eab,2
+np.float64,0x3fd69a0274ad3405,0xbfdcee3c7e52c463,2
+np.float64,0x3feb5af671f6b5ed,0xbfb16f88d739477f,2
+np.float64,0x3feea400163d4800,0xbf934e071c64fd0b,2
+np.float64,0x3fefd6bcf17fad7a,0xbf61f711c392b119,2
+np.float64,0x3fe148d43da291a8,0xbfd11e9cd3f91cd3,2
+np.float64,0x7fedf1308b7be260,0x4073439d135656da,2
+np.float64,0x3fe614c99c6c2993,0xbfc49fd1984dfd6d,2
+np.float64,0xd6e8d4e5add1b,0xc0733ba88256026e,2
+np.float64,0xfff0000000000000,0x7ff8000000000000,2
+np.float64,0x3fb530b5562a616b,0xbff1504bcc5c8f73,2
+np.float64,0xb7da68396fb4d,0xc0733cbe2790f52e,2
+np.float64,0x7fad78e26c3af1c4,0x4073303cdbfb0a15,2
+np.float64,0x7fee5698447cad30,0x407343b474573a8b,2
+np.float64,0x3fd488325c291065,0xbfdf999296d901e7,2
+np.float64,0x2669283a4cd26,0xc073479f823109a4,2
+np.float64,0x7fef3b090afe7611,0x407343e805a3b264,2
+np.float64,0x7fe8b96ae0f172d5,0x4073424874a342ab,2
+np.float64,0x7fef409f56fe813e,0x407343e943c3cd44,2
+np.float64,0x3fed28073dfa500e,0xbfa4b17e4cd31a3a,2
+np.float64,0x7f87ecc4802fd988,0x40732527e027b24b,2
+np.float64,0x3fdda24da0bb449b,0xbfd566a43ac035af,2
+np.float64,0x179fc9e62f3fa,0xc0734b0028c80fc1,2
+np.float64,0x3fef85b0927f0b61,0xbf7ac27565d5ab4f,2
+np.float64,0x5631501aac62b,0xc0734201be12c5d4,2
+np.float64,0x3fd782e424af05c8,0xbfdbd57544f8a7c3,2
+np.float64,0x3fe603a9a6ac0753,0xbfc4caff04dc3caf,2
+np.float64,0x7fbd5225163aa449,0x40733504b88f0a56,2
+np.float64,0x3fecd27506b9a4ea,0xbfa741dd70e6b08c,2
+np.float64,0x9c99603b3932c,0xc0733ddb922dc5db,2
+np.float64,0x3fbeb57f1a3d6afe,0xbfed789ff217aa08,2
+np.float64,0x3fef9c0f85bf381f,0xbf75d5c3d6cb281a,2
+np.float64,0x3fde4afb613c95f7,0xbfd4ca2a231c9005,2
+np.float64,0x396233d472c47,0xc07344d56ee70631,2
+np.float64,0x3fb31ea1c6263d44,0xbff207356152138d,2
+np.float64,0x3fe50bdf78aa17bf,0xbfc74ae0cbffb735,2
+np.float64,0xef74c701dee99,0xc0733ae81e4bb443,2
+np.float64,0x9a3e13a1347c3,0xc0733df68b60afc7,2
+np.float64,0x33ba4f886774b,0xc073458e03f0c13e,2
+np.float64,0x3fe8ba0e9931741d,0xbfbcaadf974e8f64,2
+np.float64,0x3fe090a4cd61214a,0xbfd24d236cf365d6,2
+np.float64,0x7fd87d992930fb31,0x40733d668b73b820,2
+np.float64,0x3fe6422b296c8456,0xbfc42e070b695d01,2
+np.float64,0x3febe9334677d267,0xbfae667864606cfe,2
+np.float64,0x771a3ce4ee348,0xc0733fc274d12c97,2
+np.float64,0x3fe0413542e0826b,0xbfd2d3b08fb5b8a6,2
+np.float64,0x3fd00870ea2010e2,0xbfe33cc04cbd42e0,2
+np.float64,0x3fe74fb817ae9f70,0xbfc19c45dbf919e1,2
+np.float64,0x40382fa08071,0xc07357514ced5577,2
+np.float64,0xa14968474292d,0xc0733da71a990f3a,2
+np.float64,0x5487c740a90fa,0xc0734224622d5801,2
+np.float64,0x3fed7d8d14fafb1a,0xbfa228f7ecc2ac03,2
+np.float64,0x3fe39bb485e73769,0xbfcb3a235a722960,2
+np.float64,0x3fd01090b2202121,0xbfe335b752589a22,2
+np.float64,0x3fd21a3e7da4347d,0xbfe18cd435a7c582,2
+np.float64,0x3fe7fa855a2ff50b,0xbfc00ab0665709fe,2
+np.float64,0x3fedc0d4577b81a9,0xbfa02fef3ff553fc,2
+np.float64,0x3fe99d4906333a92,0xbfb8bf18220e5e8e,2
+np.float64,0x3fd944ee3c3289dc,0xbfd9d46071675e73,2
+np.float64,0x3fe3ed8d52e7db1b,0xbfca53f8d4aef484,2
+np.float64,0x7fe748623a6e90c3,0x407341dd97c9dd79,2
+np.float64,0x3fea1b4b98343697,0xbfb6a1560a56927f,2
+np.float64,0xe1215715c242b,0xc0733b55dbf1f0a8,2
+np.float64,0x3fd0d5bccca1ab7a,0xbfe28f1b66d7a470,2
+np.float64,0x881a962710353,0xc0733ed51848a30d,2
+np.float64,0x3fcf022afe3e0456,0xbfe3b40eabf24501,2
+np.float64,0x3fdf1ac6bbbe358d,0xbfd40e03e888288d,2
+np.float64,0x3fa51a5eac2a34bd,0xbff628a7c34d51b3,2
+np.float64,0x3fdbaf408d375e81,0xbfd74ad39d97c92a,2
+np.float64,0x3fcd2418ea3a4832,0xbfe4910b009d8b11,2
+np.float64,0x3fc7b3062a2f660c,0xbfe7706dc47993e1,2
+np.float64,0x7fb8232218304643,0x407333aaa7041a9f,2
+np.float64,0x7fd5f186362be30b,0x40733ca32fdf9cc6,2
+np.float64,0x3fe57ef1d6aafde4,0xbfc61e23d00210c7,2
+np.float64,0x7c6830baf8d07,0xc0733f74f19e9dad,2
+np.float64,0xcacbfd5595980,0xc0733c0fb49edca7,2
+np.float64,0x3fdfdeac873fbd59,0xbfd36114c56bed03,2
+np.float64,0x3fd31f0889263e11,0xbfe0ca0cc1250169,2
+np.float64,0x3fe839fbe47073f8,0xbfbef0a2abc3d63f,2
+np.float64,0x3fc36af57e26d5eb,0xbfea3553f38770b7,2
+np.float64,0x3fe73dbc44ee7b79,0xbfc1c738f8fa6b3d,2
+np.float64,0x3fd3760e4da6ec1d,0xbfe08b5b609d11e5,2
+np.float64,0x3fee1cfa297c39f4,0xbf9b06d081bc9d5b,2
+np.float64,0xdfb01561bf61,0xc0734ea55e559888,2
+np.float64,0x687bd01cd0f7b,0xc07340ab67fe1816,2
+np.float64,0x3fefc88f4cbf911f,0xbf6828c359cf19dc,2
+np.float64,0x8ad34adb15a6a,0xc0733eb1e03811e5,2
+np.float64,0x3fe2b49c12e56938,0xbfcdd8dbdbc0ce59,2
+np.float64,0x6e05037adc0a1,0xc073404f91261635,2
+np.float64,0x3fe2fd737fe5fae7,0xbfcd020407ef4d78,2
+np.float64,0x3fd0f3c0dc21e782,0xbfe2766a1ab02eae,2
+np.float64,0x28564d9850acb,0xc073474875f87c5e,2
+np.float64,0x3fe4758015a8eb00,0xbfc8ddb45134a1bd,2
+np.float64,0x7fe7f19306efe325,0x4073420f626141a7,2
+np.float64,0x7fd27f34c0a4fe69,0x40733b733d2a5b50,2
+np.float64,0x92c2366325847,0xc0733e4f04f8195a,2
+np.float64,0x3fc21f8441243f09,0xbfeb2ad23bc1ab0b,2
+np.float64,0x3fc721d3e42e43a8,0xbfe7c69bb47b40c2,2
+np.float64,0x3fe2f11a1625e234,0xbfcd26363b9c36c3,2
+np.float64,0x3fdcb585acb96b0b,0xbfd648446237cb55,2
+np.float64,0x3fd4060bf2280c18,0xbfe025fd4c8a658b,2
+np.float64,0x7fb8ae2750315c4e,0x407333d23b025d08,2
+np.float64,0x3fe3a03119a74062,0xbfcb2d6c91b38552,2
+np.float64,0x7fdd2af92bba55f1,0x40733e9d737e16e6,2
+np.float64,0x3fe50b05862a160b,0xbfc74d20815fe36b,2
+np.float64,0x164409f82c882,0xc0734b6980e19c03,2
+np.float64,0x3fe4093712a8126e,0xbfca070367fda5e3,2
+np.float64,0xae3049935c609,0xc0733d1e3608797b,2
+np.float64,0x3fd71df4b4ae3be9,0xbfdc4dcb7637600d,2
+np.float64,0x7fca01e8023403cf,0x407339006c521c49,2
+np.float64,0x3fb0c5c43e218b88,0xbff2f03211c63f25,2
+np.float64,0x3fee757af83ceaf6,0xbf95f33a6e56b454,2
+np.float64,0x3f865f1f402cbe3f,0xbfff62d9c9072bd7,2
+np.float64,0x89864e95130ca,0xc0733ec29f1e32c6,2
+np.float64,0x3fe51482bcea2905,0xbfc73414ddc8f1b7,2
+np.float64,0x7fd802f8fa3005f1,0x40733d43684e460a,2
+np.float64,0x3fbeb86ca63d70d9,0xbfed774ccca9b8f5,2
+np.float64,0x3fb355dcc826abba,0xbff1f33f9339e7a3,2
+np.float64,0x3fe506c61eaa0d8c,0xbfc7585a3f7565a6,2
+np.float64,0x7fe393f25ba727e4,0x407340a94bcea73b,2
+np.float64,0xf66f532decdeb,0xc0733ab5041feb0f,2
+np.float64,0x3fe26e872be4dd0e,0xbfceaaab466f32e0,2
+np.float64,0x3fefd9e290bfb3c5,0xbf60977d24496295,2
+np.float64,0x7fe19c5f692338be,0x40733fecef53ad95,2
+np.float64,0x3fe80365ab3006cb,0xbfbfec4090ef76ec,2
+np.float64,0x3fe88ab39eb11567,0xbfbd8099388d054d,2
+np.float64,0x3fe68fb09fad1f61,0xbfc36db9de38c2c0,2
+np.float64,0x3fe9051883b20a31,0xbfbb5b75b8cb8f24,2
+np.float64,0x3fd4708683a8e10d,0xbfdfb9b085dd8a83,2
+np.float64,0x3fe00ac11a601582,0xbfd3316af3e43500,2
+np.float64,0xd16af30ba2d5f,0xc0733bd68e8252f9,2
+np.float64,0x3fb97d654632facb,0xbff007ac1257f575,2
+np.float64,0x7fd637c10fac6f81,0x40733cb949d76546,2
+np.float64,0x7fed2cab6dba5956,0x4073436edfc3764e,2
+np.float64,0x3fed04afbbba095f,0xbfa5bfaa5074b7f4,2
+np.float64,0x0,0xfff0000000000000,2
+np.float64,0x389a1dc671345,0xc07344edd4206338,2
+np.float64,0x3fbc9ba25a393745,0xbfee74c34f49b921,2
+np.float64,0x3feee749947dce93,0xbf8f032d9cf6b5ae,2
+np.float64,0xedc4cf89db89a,0xc0733af4b2a57920,2
+np.float64,0x3fe41629eba82c54,0xbfc9e321faf79e1c,2
+np.float64,0x3feb0bcbf7b61798,0xbfb2b31e5d952869,2
+np.float64,0xad60654b5ac0d,0xc0733d26860df676,2
+np.float64,0x3fe154e1ff22a9c4,0xbfd10b416e58c867,2
+np.float64,0x7fb20e9c8a241d38,0x407331a66453b8bc,2
+np.float64,0x7fcbbaaf7d37755e,0x4073397274f28008,2
+np.float64,0x187d0fbc30fa3,0xc0734ac03cc98cc9,2
+np.float64,0x7fd153afeaa2a75f,0x40733aff00b4311d,2
+np.float64,0x3fe05310a5e0a621,0xbfd2b5386aeecaac,2
+np.float64,0x7fea863b2b750c75,0x407342c57807f700,2
+np.float64,0x3fed5f0c633abe19,0xbfa30f6cfbc4bf94,2
+np.float64,0xf227c8b3e44f9,0xc0733ad42daaec9f,2
+np.float64,0x3fe956524772aca5,0xbfb9f4cabed7081d,2
+np.float64,0xefd11af7dfa24,0xc0733ae570ed2552,2
+np.float64,0x1690fff02d221,0xc0734b51a56c2980,2
+np.float64,0x7fd2e547a825ca8e,0x40733b992d6d9635,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log1p.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log1p.csv
new file mode 100644
index 00000000..6e4f88b3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log1p.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0x3e10aca8,0x3e075347,2
+np.float32,0x3f776e66,0x3f2d2003,2
+np.float32,0xbf34e8ce,0xbf9cfd5c,2
+np.float32,0xbf0260ee,0xbf363f69,2
+np.float32,0x3ed285e8,0x3eb05870,2
+np.float32,0x262b88,0x262b88,2
+np.float32,0x3eeffd6c,0x3ec4cfdb,2
+np.float32,0x3ee86808,0x3ebf9f54,2
+np.float32,0x3f36eba8,0x3f0a0524,2
+np.float32,0xbf1c047a,0xbf70afc7,2
+np.float32,0x3ead2916,0x3e952902,2
+np.float32,0x61c9c9,0x61c9c9,2
+np.float32,0xff7fffff,0xffc00000,2
+np.float32,0x7f64ee52,0x42b138e0,2
+np.float32,0x7ed00b1e,0x42afa4ff,2
+np.float32,0x3db53340,0x3dada0b2,2
+np.float32,0x3e6b0a4a,0x3e5397a4,2
+np.float32,0x7ed5d64f,0x42afb310,2
+np.float32,0xbf12bc5f,0xbf59f5ee,2
+np.float32,0xbda12710,0xbda7d8b5,2
+np.float32,0xbe2e89d8,0xbe3f5a9f,2
+np.float32,0x3f5bee75,0x3f1ebea4,2
+np.float32,0x9317a,0x9317a,2
+np.float32,0x7ee00130,0x42afcad8,2
+np.float32,0x7ef0d16d,0x42afefe7,2
+np.float32,0xbec7463a,0xbefc6a44,2
+np.float32,0xbf760ecc,0xc04fe59c,2
+np.float32,0xbecacb3c,0xbf011ae3,2
+np.float32,0x3ead92be,0x3e9577f0,2
+np.float32,0xbf41510d,0xbfb41b3a,2
+np.float32,0x7f71d489,0x42b154f1,2
+np.float32,0x8023bcd5,0x8023bcd5,2
+np.float32,0x801d33d8,0x801d33d8,2
+np.float32,0x3f3f545d,0x3f0ee0d4,2
+np.float32,0xbf700682,0xc0318c25,2
+np.float32,0xbe54e990,0xbe6eb0a3,2
+np.float32,0x7f0289bf,0x42b01941,2
+np.float32,0xbd61ac90,0xbd682113,2
+np.float32,0xbf2ff310,0xbf94cd6f,2
+np.float32,0x7f10064a,0x42b04b98,2
+np.float32,0x804d0d6d,0x804d0d6d,2
+np.float32,0x80317b0a,0x80317b0a,2
+np.float32,0xbddfef18,0xbded2640,2
+np.float32,0x3f00c9ab,0x3ed0a5bd,2
+np.float32,0x7f04b905,0x42b021c1,2
+np.float32,0x7fc00000,0x7fc00000,2
+np.float32,0x6524c4,0x6524c4,2
+np.float32,0x3da08ae0,0x3d9a8f88,2
+np.float32,0x293ea9,0x293ea9,2
+np.float32,0x71499e,0x71499e,2
+np.float32,0xbf14f54d,0xbf5f38a5,2
+np.float32,0x806e60f5,0x806e60f5,2
+np.float32,0x3f5f34bb,0x3f207fff,2
+np.float32,0x80513427,0x80513427,2
+np.float32,0x7f379670,0x42b0c7dc,2
+np.float32,0x3efba888,0x3eccb20b,2
+np.float32,0x3eeadd1b,0x3ec14f4b,2
+np.float32,0x7ec5a27f,0x42af8ab8,2
+np.float32,0x3f2afe4e,0x3f02f7a2,2
+np.float32,0x5591c8,0x5591c8,2
+np.float32,0x3dbb7240,0x3db35bab,2
+np.float32,0x805b911b,0x805b911b,2
+np.float32,0x800000,0x800000,2
+np.float32,0x7e784c04,0x42ae9cab,2
+np.float32,0x7ebaae14,0x42af6d86,2
+np.float32,0xbec84f7a,0xbefe1d42,2
+np.float32,0x7cea8281,0x42aa56bf,2
+np.float32,0xbf542cf6,0xbfe1eb1b,2
+np.float32,0xbf6bfb13,0xc0231a5b,2
+np.float32,0x7d6eeaef,0x42abc32c,2
+np.float32,0xbf062f6b,0xbf3e2000,2
+np.float32,0x8073d8e9,0x8073d8e9,2
+np.float32,0xbea4db14,0xbec6f485,2
+np.float32,0x7d7e8d62,0x42abe3a0,2
+np.float32,0x7e8fc34e,0x42aee7c6,2
+np.float32,0x7dcbb0c3,0x42acd464,2
+np.float32,0x7e123c,0x7e123c,2
+np.float32,0x3d77af62,0x3d707c34,2
+np.float32,0x498cc8,0x498cc8,2
+np.float32,0x7f4e2206,0x42b1032a,2
+np.float32,0x3f734e0a,0x3f2b04a1,2
+np.float32,0x8053a9d0,0x8053a9d0,2
+np.float32,0xbe8a67e0,0xbea15be9,2
+np.float32,0xbf78e0ea,0xc065409e,2
+np.float32,0x352bdd,0x352bdd,2
+np.float32,0x3ee42be7,0x3ebcb38a,2
+np.float32,0x7f482d10,0x42b0f427,2
+np.float32,0xbf23155e,0xbf81b993,2
+np.float32,0x594920,0x594920,2
+np.float32,0x63f53f,0x63f53f,2
+np.float32,0x363592,0x363592,2
+np.float32,0x7dafbb78,0x42ac88cc,2
+np.float32,0x7f69516c,0x42b14298,2
+np.float32,0x3e1d5be2,0x3e126131,2
+np.float32,0x410c23,0x410c23,2
+np.float32,0x7ec9563c,0x42af9439,2
+np.float32,0xbedd3a0e,0xbf10d705,2
+np.float32,0x7f7c4f1f,0x42b16aa8,2
+np.float32,0xbe99b34e,0xbeb6c2d3,2
+np.float32,0x6cdc84,0x6cdc84,2
+np.float32,0x5b3bbe,0x5b3bbe,2
+np.float32,0x252178,0x252178,2
+np.float32,0x7d531865,0x42ab83c8,2
+np.float32,0xbf565b44,0xbfe873bf,2
+np.float32,0x5977ce,0x5977ce,2
+np.float32,0x588a58,0x588a58,2
+np.float32,0x3eae7054,0x3e961d51,2
+np.float32,0x725049,0x725049,2
+np.float32,0x7f2b9386,0x42b0a538,2
+np.float32,0xbe674714,0xbe831245,2
+np.float32,0x8044f0d8,0x8044f0d8,2
+np.float32,0x800a3c21,0x800a3c21,2
+np.float32,0x807b275b,0x807b275b,2
+np.float32,0xbf2463b6,0xbf83896e,2
+np.float32,0x801cca42,0x801cca42,2
+np.float32,0xbf28f2d0,0xbf8a121a,2
+np.float32,0x3f4168c2,0x3f1010ce,2
+np.float32,0x6f91a1,0x6f91a1,2
+np.float32,0xbf2b9eeb,0xbf8e0fc5,2
+np.float32,0xbea4c858,0xbec6d8e4,2
+np.float32,0xbf7abba0,0xc0788e88,2
+np.float32,0x802f18f7,0x802f18f7,2
+np.float32,0xbf7f6c75,0xc0c3145c,2
+np.float32,0xbe988210,0xbeb50f5e,2
+np.float32,0xbf219b7e,0xbf7f6a3b,2
+np.float32,0x7f800000,0x7f800000,2
+np.float32,0x7f7fffff,0x42b17218,2
+np.float32,0xbdca8d90,0xbdd5487e,2
+np.float32,0xbef683b0,0xbf2821b0,2
+np.float32,0x8043e648,0x8043e648,2
+np.float32,0xbf4319a4,0xbfb7cd1b,2
+np.float32,0x62c2b2,0x62c2b2,2
+np.float32,0xbf479ccd,0xbfc1a7b1,2
+np.float32,0x806c8a32,0x806c8a32,2
+np.float32,0x7f004447,0x42b01045,2
+np.float32,0x3f737d36,0x3f2b1ccf,2
+np.float32,0x3ee71f24,0x3ebebced,2
+np.float32,0x3ea0b6b4,0x3e8bc606,2
+np.float32,0x358fd7,0x358fd7,2
+np.float32,0xbe69780c,0xbe847d17,2
+np.float32,0x7f6bed18,0x42b14849,2
+np.float32,0xbf6a5113,0xc01dfe1d,2
+np.float32,0xbf255693,0xbf84de88,2
+np.float32,0x7f34acac,0x42b0bfac,2
+np.float32,0xbe8a3b6a,0xbea11efe,2
+np.float32,0x3f470d84,0x3f1342ab,2
+np.float32,0xbf2cbde3,0xbf8fc602,2
+np.float32,0x47c103,0x47c103,2
+np.float32,0xe3c94,0xe3c94,2
+np.float32,0xbec07afa,0xbef1693a,2
+np.float32,0x6a9cfe,0x6a9cfe,2
+np.float32,0xbe4339e0,0xbe5899da,2
+np.float32,0x7ea9bf1e,0x42af3cd6,2
+np.float32,0x3f6378b4,0x3f22c4c4,2
+np.float32,0xbd989ff0,0xbd9e9c77,2
+np.float32,0xbe6f2f50,0xbe88343d,2
+np.float32,0x3f7f2ac5,0x3f310764,2
+np.float32,0x3f256704,0x3eff2fb2,2
+np.float32,0x80786aca,0x80786aca,2
+np.float32,0x65d02f,0x65d02f,2
+np.float32,0x50d1c3,0x50d1c3,2
+np.float32,0x3f4a9d76,0x3f1541b4,2
+np.float32,0x802cf491,0x802cf491,2
+np.float32,0x3e935cec,0x3e81829b,2
+np.float32,0x3e2ad478,0x3e1dfd81,2
+np.float32,0xbf107cbd,0xbf54bef2,2
+np.float32,0xbf58c02e,0xbff007fe,2
+np.float32,0x80090808,0x80090808,2
+np.float32,0x805d1f66,0x805d1f66,2
+np.float32,0x6aec95,0x6aec95,2
+np.float32,0xbee3fc6e,0xbf16dc73,2
+np.float32,0x7f63314b,0x42b134f9,2
+np.float32,0x550443,0x550443,2
+np.float32,0xbefa8174,0xbf2c026e,2
+np.float32,0x3f7fb380,0x3f314bd5,2
+np.float32,0x80171f2c,0x80171f2c,2
+np.float32,0x3f2f56ae,0x3f058f2d,2
+np.float32,0x3eacaecb,0x3e94cd97,2
+np.float32,0xbe0c4f0c,0xbe16e69d,2
+np.float32,0x3f48e4cb,0x3f144b42,2
+np.float32,0x7f03efe2,0x42b01eb7,2
+np.float32,0xbf1019ac,0xbf53dbe9,2
+np.float32,0x3e958524,0x3e832eb5,2
+np.float32,0xbf1b23c6,0xbf6e72f2,2
+np.float32,0x12c554,0x12c554,2
+np.float32,0x7dee588c,0x42ad24d6,2
+np.float32,0xbe8c216c,0xbea3ba70,2
+np.float32,0x804553cb,0x804553cb,2
+np.float32,0xbe446324,0xbe5a0966,2
+np.float32,0xbef7150a,0xbf28adff,2
+np.float32,0xbf087282,0xbf42ec6e,2
+np.float32,0x3eeef15c,0x3ec41937,2
+np.float32,0x61bbd2,0x61bbd2,2
+np.float32,0x3e51b28d,0x3e3ec538,2
+np.float32,0x57e869,0x57e869,2
+np.float32,0x7e5e7711,0x42ae646c,2
+np.float32,0x8050b173,0x8050b173,2
+np.float32,0xbf63c90c,0xc00d2438,2
+np.float32,0xbeba774c,0xbee7dcf8,2
+np.float32,0x8016faac,0x8016faac,2
+np.float32,0xbe8b448c,0xbea28aaf,2
+np.float32,0x3e8cd448,0x3e78d29e,2
+np.float32,0x80484e02,0x80484e02,2
+np.float32,0x3f63ba68,0x3f22e78c,2
+np.float32,0x2e87bb,0x2e87bb,2
+np.float32,0x230496,0x230496,2
+np.float32,0x1327b2,0x1327b2,2
+np.float32,0xbf046c56,0xbf3a72d2,2
+np.float32,0x3ecefe60,0x3eadd69a,2
+np.float32,0x49c56e,0x49c56e,2
+np.float32,0x3df22d60,0x3de4e550,2
+np.float32,0x3f67c19d,0x3f250707,2
+np.float32,0x3f20eb9c,0x3ef9b624,2
+np.float32,0x3f05ca75,0x3ed742fa,2
+np.float32,0xbe8514f8,0xbe9a1d45,2
+np.float32,0x8070a003,0x8070a003,2
+np.float32,0x7e49650e,0x42ae317a,2
+np.float32,0x3de16ce9,0x3dd5dc3e,2
+np.float32,0xbf4ae952,0xbfc95f1f,2
+np.float32,0xbe44dd84,0xbe5aa0db,2
+np.float32,0x803c3bc0,0x803c3bc0,2
+np.float32,0x3eebb9e8,0x3ec1e692,2
+np.float32,0x80588275,0x80588275,2
+np.float32,0xbea1e69a,0xbec29d86,2
+np.float32,0x3f7b4bf8,0x3f2f154c,2
+np.float32,0x7eb47ecc,0x42af5c46,2
+np.float32,0x3d441e00,0x3d3f911a,2
+np.float32,0x7f54d40e,0x42b11388,2
+np.float32,0xbf47f17e,0xbfc26882,2
+np.float32,0x3ea7da57,0x3e912db4,2
+np.float32,0x3f59cc7b,0x3f1d984e,2
+np.float32,0x570e08,0x570e08,2
+np.float32,0x3e99560c,0x3e8620a2,2
+np.float32,0x3ecfbd14,0x3eae5e55,2
+np.float32,0x7e86be08,0x42aec698,2
+np.float32,0x3f10f28a,0x3ee5b5d3,2
+np.float32,0x7f228722,0x42b0897a,2
+np.float32,0x3f4b979b,0x3f15cd30,2
+np.float32,0xbf134283,0xbf5b30f9,2
+np.float32,0x3f2ae16a,0x3f02e64f,2
+np.float32,0x3e98e158,0x3e85c6cc,2
+np.float32,0x7ec39f27,0x42af857a,2
+np.float32,0x3effedb0,0x3ecf8cea,2
+np.float32,0xbd545620,0xbd5a09c1,2
+np.float32,0x503a28,0x503a28,2
+np.float32,0x3f712744,0x3f29e9a1,2
+np.float32,0x3edc6194,0x3eb748b1,2
+np.float32,0xbf4ec1e5,0xbfd2ff5f,2
+np.float32,0x3f46669e,0x3f12e4b5,2
+np.float32,0xabad3,0xabad3,2
+np.float32,0x80000000,0x80000000,2
+np.float32,0x803f2e6d,0x803f2e6d,2
+np.float32,0xbf431542,0xbfb7c3e6,2
+np.float32,0x3f6f2d53,0x3f28e496,2
+np.float32,0x546bd8,0x546bd8,2
+np.float32,0x25c80a,0x25c80a,2
+np.float32,0x3e50883c,0x3e3dcd7e,2
+np.float32,0xbf5fa2ba,0xc0045c14,2
+np.float32,0x80271c07,0x80271c07,2
+np.float32,0x8043755d,0x8043755d,2
+np.float32,0xbf3c5cea,0xbfaa5ee9,2
+np.float32,0x3f2fea38,0x3f05e6af,2
+np.float32,0x6da3dc,0x6da3dc,2
+np.float32,0xbf095945,0xbf44dc70,2
+np.float32,0xbe33d584,0xbe45c1f5,2
+np.float32,0x7eb41b2e,0x42af5b2b,2
+np.float32,0xbf0feb74,0xbf537242,2
+np.float32,0xbe96225a,0xbeb1b0b1,2
+np.float32,0x3f63b95f,0x3f22e700,2
+np.float32,0x0,0x0,2
+np.float32,0x3e20b0cc,0x3e154374,2
+np.float32,0xbf79880c,0xc06b6801,2
+np.float32,0xbea690b6,0xbec97b93,2
+np.float32,0xbf3e11ca,0xbfada449,2
+np.float32,0x7e7e6292,0x42aea912,2
+np.float32,0x3e793350,0x3e5f0b7b,2
+np.float32,0x802e7183,0x802e7183,2
+np.float32,0x3f1b3695,0x3ef2a788,2
+np.float32,0x801efa20,0x801efa20,2
+np.float32,0x3f1ec43a,0x3ef70f42,2
+np.float32,0xbf12c5ed,0xbf5a0c52,2
+np.float32,0x8005e99c,0x8005e99c,2
+np.float32,0xbf79f5e7,0xc06fcca5,2
+np.float32,0x3ecbaf50,0x3eab7a03,2
+np.float32,0x46b0fd,0x46b0fd,2
+np.float32,0x3edb9023,0x3eb6b631,2
+np.float32,0x7f24bc41,0x42b09063,2
+np.float32,0xbd8d9328,0xbd92b4c6,2
+np.float32,0x3f2c5d7f,0x3f03c9d9,2
+np.float32,0x807bebc9,0x807bebc9,2
+np.float32,0x7f797a99,0x42b164e2,2
+np.float32,0x756e3c,0x756e3c,2
+np.float32,0x80416f8a,0x80416f8a,2
+np.float32,0x3e0d512a,0x3e04611a,2
+np.float32,0x3f7be3e6,0x3f2f61ec,2
+np.float32,0x80075c41,0x80075c41,2
+np.float32,0xbe850294,0xbe9a046c,2
+np.float32,0x684679,0x684679,2
+np.float32,0x3eb393c4,0x3e99eed2,2
+np.float32,0x3f4177c6,0x3f10195b,2
+np.float32,0x3dd1f402,0x3dc7dfe5,2
+np.float32,0x3ef484d4,0x3ec7e2e1,2
+np.float32,0x53eb8f,0x53eb8f,2
+np.float32,0x7f072cb6,0x42b02b20,2
+np.float32,0xbf1b6b55,0xbf6f28d4,2
+np.float32,0xbd8a98d8,0xbd8f827d,2
+np.float32,0x3eafb418,0x3e970e96,2
+np.float32,0x6555af,0x6555af,2
+np.float32,0x7dd5118e,0x42aceb6f,2
+np.float32,0x800a13f7,0x800a13f7,2
+np.float32,0x331a9d,0x331a9d,2
+np.float32,0x8063773f,0x8063773f,2
+np.float32,0x3e95e068,0x3e837553,2
+np.float32,0x80654b32,0x80654b32,2
+np.float32,0x3dabe0e0,0x3da50bb3,2
+np.float32,0xbf6283c3,0xc00a5280,2
+np.float32,0x80751cc5,0x80751cc5,2
+np.float32,0x3f668eb6,0x3f2465c0,2
+np.float32,0x3e13c058,0x3e0a048c,2
+np.float32,0x77780c,0x77780c,2
+np.float32,0x3f7d6e48,0x3f302868,2
+np.float32,0x7e31f9e3,0x42adf22f,2
+np.float32,0x246c7b,0x246c7b,2
+np.float32,0xbe915bf0,0xbeaafa6c,2
+np.float32,0xbf800000,0xff800000,2
+np.float32,0x3f698f42,0x3f25f8e0,2
+np.float32,0x7e698885,0x42ae7d48,2
+np.float32,0x3f5bbd42,0x3f1ea42c,2
+np.float32,0x5b8444,0x5b8444,2
+np.float32,0xbf6065f6,0xc005e2c6,2
+np.float32,0xbeb95036,0xbee60dad,2
+np.float32,0xbf44f846,0xbfbbcade,2
+np.float32,0xc96e5,0xc96e5,2
+np.float32,0xbf213e90,0xbf7e6eae,2
+np.float32,0xbeb309cc,0xbedc4fe6,2
+np.float32,0xbe781cf4,0xbe8e0fe6,2
+np.float32,0x7f0cf0db,0x42b04083,2
+np.float32,0xbf7b6143,0xc08078f9,2
+np.float32,0x80526fc6,0x80526fc6,2
+np.float32,0x3f092bf3,0x3edbaeec,2
+np.float32,0x3ecdf154,0x3ead16df,2
+np.float32,0x2fe85b,0x2fe85b,2
+np.float32,0xbf5100a0,0xbfd8f871,2
+np.float32,0xbec09d40,0xbef1a028,2
+np.float32,0x5e6a85,0x5e6a85,2
+np.float32,0xbec0e2a0,0xbef20f6b,2
+np.float32,0x3f72e788,0x3f2ad00d,2
+np.float32,0x880a6,0x880a6,2
+np.float32,0x3d9e90bf,0x3d98b9fc,2
+np.float32,0x15cf25,0x15cf25,2
+np.float32,0x10171b,0x10171b,2
+np.float32,0x805cf1aa,0x805cf1aa,2
+np.float32,0x3f19bd36,0x3ef0d0d2,2
+np.float32,0x3ebe2bda,0x3ea1b774,2
+np.float32,0xbecd8192,0xbf035c49,2
+np.float32,0x3e2ce508,0x3e1fc21b,2
+np.float32,0x290f,0x290f,2
+np.float32,0x803b679f,0x803b679f,2
+np.float32,0x1,0x1,2
+np.float32,0x807a9c76,0x807a9c76,2
+np.float32,0xbf65fced,0xc01257f8,2
+np.float32,0x3f783414,0x3f2d8475,2
+np.float32,0x3f2d9d92,0x3f0488da,2
+np.float32,0xbddb5798,0xbde80018,2
+np.float32,0x3e91afb8,0x3e8034e7,2
+np.float32,0xbf1b775a,0xbf6f476d,2
+np.float32,0xbf73a32c,0xc041f3ba,2
+np.float32,0xbea39364,0xbec5121b,2
+np.float32,0x80375b94,0x80375b94,2
+np.float32,0x3f331252,0x3f07c3e9,2
+np.float32,0xbf285774,0xbf892e74,2
+np.float32,0x3e699bb8,0x3e526d55,2
+np.float32,0x3f08208a,0x3eda523a,2
+np.float32,0xbf42fb4a,0xbfb78d60,2
+np.float32,0x8029c894,0x8029c894,2
+np.float32,0x3e926c0c,0x3e80c76e,2
+np.float32,0x801e4715,0x801e4715,2
+np.float32,0x3e4b36d8,0x3e395ffd,2
+np.float32,0x8041556b,0x8041556b,2
+np.float32,0xbf2d99ba,0xbf9119bd,2
+np.float32,0x3ed83ea8,0x3eb46250,2
+np.float32,0xbe94a280,0xbeaf92b4,2
+np.float32,0x7f4c7a64,0x42b0ff0a,2
+np.float32,0x806d4022,0x806d4022,2
+np.float32,0xbed382f8,0xbf086d26,2
+np.float32,0x1846fe,0x1846fe,2
+np.float32,0xbe702558,0xbe88d4d8,2
+np.float32,0xbe650ee0,0xbe81a3cc,2
+np.float32,0x3ee9d088,0x3ec0970c,2
+np.float32,0x7f6d4498,0x42b14b30,2
+np.float32,0xbef9f9e6,0xbf2b7ddb,2
+np.float32,0xbf70c384,0xc0349370,2
+np.float32,0xbeff9e9e,0xbf3110c8,2
+np.float32,0xbef06372,0xbf224aa9,2
+np.float32,0xbf15a692,0xbf60e1fa,2
+np.float32,0x8058c117,0x8058c117,2
+np.float32,0xbd9f74b8,0xbda6017b,2
+np.float32,0x801bf130,0x801bf130,2
+np.float32,0x805da84c,0x805da84c,2
+np.float32,0xff800000,0xffc00000,2
+np.float32,0xbeb01de2,0xbed7d6d6,2
+np.float32,0x8077de08,0x8077de08,2
+np.float32,0x3e327668,0x3e2482c1,2
+np.float32,0xbe7add88,0xbe8fe1ab,2
+np.float32,0x805a3c2e,0x805a3c2e,2
+np.float32,0x80326a73,0x80326a73,2
+np.float32,0x800b8a34,0x800b8a34,2
+np.float32,0x8048c83a,0x8048c83a,2
+np.float32,0xbf3799d6,0xbfa1a975,2
+np.float32,0x807649c7,0x807649c7,2
+np.float32,0x3dfdbf90,0x3def3798,2
+np.float32,0xbf1b538a,0xbf6eec4c,2
+np.float32,0xbf1e5989,0xbf76baa0,2
+np.float32,0xc7a80,0xc7a80,2
+np.float32,0x8001be54,0x8001be54,2
+np.float32,0x3f435bbc,0x3f112c6d,2
+np.float32,0xbeabcff8,0xbed151d1,2
+np.float32,0x7de20c78,0x42ad09b7,2
+np.float32,0x3f0e6d2e,0x3ee27b1e,2
+np.float32,0xbf0cb352,0xbf4c3267,2
+np.float32,0x7f6ec06f,0x42b14e61,2
+np.float32,0x7f6fa8ef,0x42b15053,2
+np.float32,0xbf3d2a6a,0xbfabe623,2
+np.float32,0x7f077a4c,0x42b02c46,2
+np.float32,0xbf2a68dc,0xbf8c3cc4,2
+np.float32,0x802a5dbe,0x802a5dbe,2
+np.float32,0x807f631c,0x807f631c,2
+np.float32,0x3dc9b8,0x3dc9b8,2
+np.float32,0x3ebdc1b7,0x3ea16a0a,2
+np.float32,0x7ef29dab,0x42aff3b5,2
+np.float32,0x3e8ab1cc,0x3e757806,2
+np.float32,0x3f27e88e,0x3f011c6d,2
+np.float32,0x3cfd1455,0x3cf93fb5,2
+np.float32,0x7f7eebf5,0x42b16fef,2
+np.float32,0x3c9b2140,0x3c99ade9,2
+np.float32,0x7e928601,0x42aef183,2
+np.float32,0xbd7d2db0,0xbd82abae,2
+np.float32,0x3e6f0df3,0x3e56da20,2
+np.float32,0x7d36a2fc,0x42ab39a3,2
+np.float32,0xbf49d3a2,0xbfc6c859,2
+np.float32,0x7ee541d3,0x42afd6b6,2
+np.float32,0x80753dc0,0x80753dc0,2
+np.float32,0x3f4ce486,0x3f16865d,2
+np.float32,0x39e701,0x39e701,2
+np.float32,0x3f3d9ede,0x3f0de5fa,2
+np.float32,0x7fafb2,0x7fafb2,2
+np.float32,0x3e013fdc,0x3df37090,2
+np.float32,0x807b6a2c,0x807b6a2c,2
+np.float32,0xbe86800a,0xbe9c08c7,2
+np.float32,0x7f40f080,0x42b0e14d,2
+np.float32,0x7eef5afe,0x42afecc8,2
+np.float32,0x7ec30052,0x42af83da,2
+np.float32,0x3eacf768,0x3e9503e1,2
+np.float32,0x7f13ef0e,0x42b0594e,2
+np.float32,0x80419f4a,0x80419f4a,2
+np.float32,0xbf485932,0xbfc3562a,2
+np.float32,0xbe8a24d6,0xbea10011,2
+np.float32,0xbda791c0,0xbdaed2bc,2
+np.float32,0x3e9b5169,0x3e87a67d,2
+np.float32,0x807dd882,0x807dd882,2
+np.float32,0x7f40170e,0x42b0df0a,2
+np.float32,0x7f02f7f9,0x42b01af1,2
+np.float32,0x3ea38bf9,0x3e8decde,2
+np.float32,0x3e2e7ce8,0x3e211ed4,2
+np.float32,0x70a7a6,0x70a7a6,2
+np.float32,0x7d978592,0x42ac3ce7,2
+np.float32,0x804d12d0,0x804d12d0,2
+np.float32,0x80165dc8,0x80165dc8,2
+np.float32,0x80000001,0x80000001,2
+np.float32,0x3e325da0,0x3e246da6,2
+np.float32,0xbe063bb8,0xbe0fe281,2
+np.float32,0x160b8,0x160b8,2
+np.float32,0xbe5687a4,0xbe70bbef,2
+np.float32,0x7f11ab34,0x42b05168,2
+np.float32,0xc955c,0xc955c,2
+np.float32,0xbea0003a,0xbebfd826,2
+np.float32,0x3f7fbdd9,0x3f315102,2
+np.float32,0xbe61aefc,0xbe7ef121,2
+np.float32,0xbf1b9873,0xbf6f9bc3,2
+np.float32,0x3a6d14,0x3a6d14,2
+np.float32,0xbf1ad3b4,0xbf6da808,2
+np.float32,0x3ed2dd24,0x3eb0963d,2
+np.float32,0xbe81a4ca,0xbe957d52,2
+np.float32,0x7f1be3e9,0x42b07421,2
+np.float32,0x7f5ce943,0x42b1269e,2
+np.float32,0x7eebcbdf,0x42afe51d,2
+np.float32,0x807181b5,0x807181b5,2
+np.float32,0xbecb03ba,0xbf0149ad,2
+np.float32,0x42edb8,0x42edb8,2
+np.float32,0xbf3aeec8,0xbfa7b13f,2
+np.float32,0xbd0c4f00,0xbd0ec4a0,2
+np.float32,0x3e48d260,0x3e376070,2
+np.float32,0x1a9731,0x1a9731,2
+np.float32,0x7f323be4,0x42b0b8b5,2
+np.float32,0x1a327f,0x1a327f,2
+np.float32,0x17f1fc,0x17f1fc,2
+np.float32,0xbf2f4f9b,0xbf93c91a,2
+np.float32,0x3ede8934,0x3eb8c9c3,2
+np.float32,0xbf56aaac,0xbfe968bb,2
+np.float32,0x3e22cb5a,0x3e17148c,2
+np.float32,0x7d9def,0x7d9def,2
+np.float32,0x8045b963,0x8045b963,2
+np.float32,0x77404f,0x77404f,2
+np.float32,0x7e2c9efb,0x42ade28b,2
+np.float32,0x8058ad89,0x8058ad89,2
+np.float32,0x7f4139,0x7f4139,2
+np.float32,0x8020e12a,0x8020e12a,2
+np.float32,0x800c9daa,0x800c9daa,2
+np.float32,0x7f2c5ac5,0x42b0a789,2
+np.float32,0x3f04a47b,0x3ed5c043,2
+np.float32,0x804692d5,0x804692d5,2
+np.float32,0xbf6e7fa4,0xc02bb493,2
+np.float32,0x80330756,0x80330756,2
+np.float32,0x7f3e29ad,0x42b0d9e1,2
+np.float32,0xbebf689a,0xbeefb24d,2
+np.float32,0x3f29a86c,0x3f022a56,2
+np.float32,0x3e3bd1c0,0x3e2c72b3,2
+np.float32,0x3f78f2e8,0x3f2de546,2
+np.float32,0x3f3709be,0x3f0a16af,2
+np.float32,0x3e11f150,0x3e086f97,2
+np.float32,0xbf5867ad,0xbfeee8a0,2
+np.float32,0xbebfb328,0xbef0296c,2
+np.float32,0x2f7f15,0x2f7f15,2
+np.float32,0x805cfe84,0x805cfe84,2
+np.float32,0xbf504e01,0xbfd71589,2
+np.float32,0x3ee0903c,0x3eba330c,2
+np.float32,0xbd838990,0xbd87f399,2
+np.float32,0x3f14444e,0x3ee9ee7d,2
+np.float32,0x7e352583,0x42adfb3a,2
+np.float32,0x7e76f824,0x42ae99ec,2
+np.float32,0x3f772d00,0x3f2cfebf,2
+np.float32,0x801f7763,0x801f7763,2
+np.float32,0x3f760bf5,0x3f2c6b87,2
+np.float32,0xbf0bb696,0xbf4a03a5,2
+np.float32,0x3f175d2c,0x3eedd6d2,2
+np.float32,0xbf5723f8,0xbfeae288,2
+np.float32,0x24de0a,0x24de0a,2
+np.float32,0x3cd73f80,0x3cd47801,2
+np.float32,0x7f013305,0x42b013fa,2
+np.float32,0x3e3ad425,0x3e2b9c50,2
+np.float32,0x7d3d16,0x7d3d16,2
+np.float32,0x3ef49738,0x3ec7ef54,2
+np.float32,0x3f5b8612,0x3f1e8678,2
+np.float32,0x7f0eeb5c,0x42b047a7,2
+np.float32,0x7e9d7cb0,0x42af1675,2
+np.float32,0xbdd1cfb0,0xbddd5aa0,2
+np.float32,0xbf645dba,0xc00e78fe,2
+np.float32,0x3f511174,0x3f18d56c,2
+np.float32,0x3d91ad00,0x3d8cba62,2
+np.float32,0x805298da,0x805298da,2
+np.float32,0xbedb6af4,0xbf0f4090,2
+np.float32,0x3d23b1ba,0x3d208205,2
+np.float32,0xbea5783e,0xbec7dc87,2
+np.float32,0x79d191,0x79d191,2
+np.float32,0x3e894413,0x3e7337da,2
+np.float32,0x80800000,0x80800000,2
+np.float32,0xbf34a8d3,0xbf9c907b,2
+np.float32,0x3bae779a,0x3bae011f,2
+np.float32,0x8049284d,0x8049284d,2
+np.float32,0x3eb42cc4,0x3e9a600b,2
+np.float32,0x3da1e2d0,0x3d9bce5f,2
+np.float32,0x3f364b8a,0x3f09a7af,2
+np.float32,0x3d930b10,0x3d8e0118,2
+np.float32,0x8061f8d7,0x8061f8d7,2
+np.float32,0x3f473213,0x3f13573b,2
+np.float32,0x3f1e2a38,0x3ef65102,2
+np.float32,0x8068f7d9,0x8068f7d9,2
+np.float32,0x3f181ef8,0x3eeeca2c,2
+np.float32,0x3eeb6168,0x3ec1a9f5,2
+np.float32,0xc2db6,0xc2db6,2
+np.float32,0x3ef7b578,0x3eca0a69,2
+np.float32,0xbf5b5a84,0xbff8d075,2
+np.float32,0x7f479d5f,0x42b0f2b7,2
+np.float32,0x3e6f3c24,0x3e56ff92,2
+np.float32,0x3f45543a,0x3f1249f0,2
+np.float32,0xbea7c1fa,0xbecb40d2,2
+np.float32,0x7de082,0x7de082,2
+np.float32,0x383729,0x383729,2
+np.float32,0xbd91cb90,0xbd973eb3,2
+np.float32,0x7f320218,0x42b0b80f,2
+np.float32,0x5547f2,0x5547f2,2
+np.float32,0x291fe4,0x291fe4,2
+np.float32,0xbe078ba0,0xbe11655f,2
+np.float32,0x7e0c0658,0x42ad7764,2
+np.float32,0x7e129a2b,0x42ad8ee5,2
+np.float32,0x3f7c96d4,0x3f2fbc0c,2
+np.float32,0x3f800000,0x3f317218,2
+np.float32,0x7f131754,0x42b05662,2
+np.float32,0x15f833,0x15f833,2
+np.float32,0x80392ced,0x80392ced,2
+np.float32,0x3f7c141a,0x3f2f7a36,2
+np.float32,0xbf71c03f,0xc038dcfd,2
+np.float32,0xbe14fb2c,0xbe20fff3,2
+np.float32,0xbee0bac6,0xbf13f14c,2
+np.float32,0x801a32dd,0x801a32dd,2
+np.float32,0x8e12d,0x8e12d,2
+np.float32,0x3f48c606,0x3f143a04,2
+np.float32,0x7f418af5,0x42b0e2e6,2
+np.float32,0x3f1f2918,0x3ef78bb7,2
+np.float32,0x11141b,0x11141b,2
+np.float32,0x3e9fc9e8,0x3e8b11ad,2
+np.float32,0xbea5447a,0xbec79010,2
+np.float32,0xbe31d904,0xbe4359db,2
+np.float32,0x80184667,0x80184667,2
+np.float32,0xbf00503c,0xbf3212c2,2
+np.float32,0x3e0328cf,0x3df6d425,2
+np.float32,0x7ee8e1b7,0x42afdebe,2
+np.float32,0xbef95e24,0xbf2ae5db,2
+np.float32,0x7f3e4eed,0x42b0da45,2
+np.float32,0x3f43ee85,0x3f117fa0,2
+np.float32,0xbcfa2ac0,0xbcfe10fe,2
+np.float32,0x80162774,0x80162774,2
+np.float32,0x372e8b,0x372e8b,2
+np.float32,0x3f263802,0x3f0016b0,2
+np.float32,0x8008725f,0x8008725f,2
+np.float32,0x800beb40,0x800beb40,2
+np.float32,0xbe93308e,0xbead8a77,2
+np.float32,0x3d8a4240,0x3d85cab8,2
+np.float32,0x80179de0,0x80179de0,2
+np.float32,0x7f4a98f2,0x42b0fa4f,2
+np.float32,0x3f0d214e,0x3ee0cff1,2
+np.float32,0x80536c2c,0x80536c2c,2
+np.float32,0x7e7038ed,0x42ae8bbe,2
+np.float32,0x7f345af9,0x42b0bec4,2
+np.float32,0xbf243219,0xbf83442f,2
+np.float32,0x7e0d5555,0x42ad7c27,2
+np.float32,0x762e95,0x762e95,2
+np.float32,0x7ebf4548,0x42af79f6,2
+np.float32,0x8079639e,0x8079639e,2
+np.float32,0x3ef925c0,0x3ecb0260,2
+np.float32,0x3f708695,0x3f2996d6,2
+np.float32,0xfca9f,0xfca9f,2
+np.float32,0x8060dbf4,0x8060dbf4,2
+np.float32,0x4c8840,0x4c8840,2
+np.float32,0xbea922ee,0xbecd4ed5,2
+np.float32,0xbf4f28a9,0xbfd40b98,2
+np.float32,0xbe25ad48,0xbe34ba1b,2
+np.float32,0x3f2fb254,0x3f05c58c,2
+np.float32,0x3f73bcc2,0x3f2b3d5f,2
+np.float32,0xbf479a07,0xbfc1a165,2
+np.float32,0xbeb9a808,0xbee69763,2
+np.float32,0x7eb16a65,0x42af5376,2
+np.float32,0xbeb3e442,0xbedda042,2
+np.float32,0x3d8f439c,0x3d8a79ac,2
+np.float32,0x80347516,0x80347516,2
+np.float32,0x3e8a0c5d,0x3e74738c,2
+np.float32,0xbf0383a4,0xbf389289,2
+np.float32,0x806be8f5,0x806be8f5,2
+np.float32,0x8023f0c5,0x8023f0c5,2
+np.float32,0x2060e9,0x2060e9,2
+np.float32,0xbf759eba,0xc04d239f,2
+np.float32,0x3d84cc5a,0x3d80ab96,2
+np.float32,0xbf57746b,0xbfebdf87,2
+np.float32,0x3e418417,0x3e31401f,2
+np.float32,0xaecce,0xaecce,2
+np.float32,0x3cd1766f,0x3cced45c,2
+np.float32,0x53724a,0x53724a,2
+np.float32,0x3f773710,0x3f2d03de,2
+np.float32,0x8013d040,0x8013d040,2
+np.float32,0x4d0eb2,0x4d0eb2,2
+np.float32,0x8014364a,0x8014364a,2
+np.float32,0x7f3c56c9,0x42b0d4f2,2
+np.float32,0x3eee1e1c,0x3ec3891a,2
+np.float32,0xbdda3eb8,0xbde6c5a0,2
+np.float32,0x26ef4a,0x26ef4a,2
+np.float32,0x7ed3370c,0x42afacbf,2
+np.float32,0xbf06e31b,0xbf3f9ab7,2
+np.float32,0xbe3185f0,0xbe42f556,2
+np.float32,0x3dcf9abe,0x3dc5be41,2
+np.float32,0xbf3696d9,0xbf9fe2bd,2
+np.float32,0x3e68ee50,0x3e51e01a,2
+np.float32,0x3f3d4cc2,0x3f0db6ca,2
+np.float32,0x7fa00000,0x7fe00000,2
+np.float32,0xbf03070c,0xbf3792d0,2
+np.float32,0x3ea79e6c,0x3e910092,2
+np.float32,0xbf1a393a,0xbf6c2251,2
+np.float32,0x3f41eb0e,0x3f105afc,2
+np.float32,0x3ceadb2f,0x3ce78d79,2
+np.float32,0xbf5dc105,0xc000be2c,2
+np.float32,0x7ebb5a0e,0x42af6f5c,2
+np.float32,0xbf7c44eb,0xc0875058,2
+np.float32,0x6aaaf4,0x6aaaf4,2
+np.float32,0x807d8f23,0x807d8f23,2
+np.float32,0xbee6b142,0xbf194fef,2
+np.float32,0xbe83f256,0xbe989526,2
+np.float32,0x7d588e,0x7d588e,2
+np.float32,0x7cc80131,0x42aa0542,2
+np.float32,0x3e0ab198,0x3e02124f,2
+np.float32,0xbf6e64db,0xc02b52eb,2
+np.float32,0x3d238b56,0x3d205d1b,2
+np.float32,0xbeb408e2,0xbeddd8bc,2
+np.float32,0x3f78340d,0x3f2d8471,2
+np.float32,0x806162a3,0x806162a3,2
+np.float32,0x804e484f,0x804e484f,2
+np.float32,0xbeb8c576,0xbee53466,2
+np.float32,0x807aab15,0x807aab15,2
+np.float32,0x3f523e20,0x3f197ab8,2
+np.float32,0xbf009190,0xbf3295de,2
+np.float32,0x3df43da5,0x3de6bd82,2
+np.float32,0x7f639aea,0x42b135e6,2
+np.float32,0x3f1e638a,0x3ef697da,2
+np.float32,0xbf4884de,0xbfc3bac3,2
+np.float32,0xbe9336b6,0xbead931b,2
+np.float32,0x6daf7f,0x6daf7f,2
+np.float32,0xbf1fc152,0xbf7a70b1,2
+np.float32,0x3f103720,0x3ee4c649,2
+np.float32,0x3eeaa227,0x3ec126df,2
+np.float32,0x7f7ea945,0x42b16f69,2
+np.float32,0x3d3cd800,0x3d389ead,2
+np.float32,0x3f3d7268,0x3f0dcc6e,2
+np.float32,0xbf3c1b41,0xbfa9e2e3,2
+np.float32,0x3ecf3818,0x3eadffb2,2
+np.float32,0x3f1af312,0x3ef25372,2
+np.float32,0x48fae4,0x48fae4,2
+np.float64,0x7fedaa1ee4fb543d,0x40862da7ca7c308e,2
+np.float64,0x8007d2d810efa5b1,0x8007d2d810efa5b1,2
+np.float64,0x3fc385e069270bc0,0x3fc22b8884cf2c3b,2
+np.float64,0x68ed4130d1da9,0x68ed4130d1da9,2
+np.float64,0x8008e93e58d1d27d,0x8008e93e58d1d27d,2
+np.float64,0xbfd3d62852a7ac50,0xbfd7be3a7ad1af02,2
+np.float64,0xbfc1fa0ba923f418,0xbfc35f0f19447df7,2
+np.float64,0xbfe01b8cec20371a,0xbfe6658c7e6c8e50,2
+np.float64,0xbfeda81a147b5034,0xc004e9c94f2b91c1,2
+np.float64,0xbfe1c36a97e386d5,0xbfe9ead4d6beaa92,2
+np.float64,0x3fe50be51f2a17ca,0x3fe02c8067d9e5c5,2
+np.float64,0x3febed4d3337da9a,0x3fe413956466134f,2
+np.float64,0x80068ea59ced1d4c,0x80068ea59ced1d4c,2
+np.float64,0x3febe77d5877cefb,0x3fe4107ac088bc71,2
+np.float64,0x800ae77617d5ceed,0x800ae77617d5ceed,2
+np.float64,0x3fd0546b60a0a8d7,0x3fcd16c2e995ab23,2
+np.float64,0xbfe33e1476667c29,0xbfed6d7faec4db2f,2
+np.float64,0x3fe9d2fd51b3a5fb,0x3fe2eef834310219,2
+np.float64,0x8004249878284932,0x8004249878284932,2
+np.float64,0xbfd5b485c72b690c,0xbfda828ccc6a7a5c,2
+np.float64,0x7fcd6e6b6b3adcd6,0x408622807f04768e,2
+np.float64,0x3fd7f9c32caff386,0x3fd45d024514b8da,2
+np.float64,0x7f87eb9d702fd73a,0x40860aa99fcff27f,2
+np.float64,0xbfc5d1f6fb2ba3ec,0xbfc7ec367cb3fecc,2
+np.float64,0x8008316a44d062d5,0x8008316a44d062d5,2
+np.float64,0xbfd54e4358aa9c86,0xbfd9e889d2998a4a,2
+np.float64,0xda65facdb4cc0,0xda65facdb4cc0,2
+np.float64,0x3fc5b4f6f32b69f0,0x3fc40d13aa8e248b,2
+np.float64,0x3fd825a5d5b04b4c,0x3fd47ce73e04d3ff,2
+np.float64,0x7ac9d56ef593b,0x7ac9d56ef593b,2
+np.float64,0xbfd0a51977214a32,0xbfd34702071428be,2
+np.float64,0x3fd21f620b243ec4,0x3fcfea0c02193640,2
+np.float64,0x3fe6fb3f1b2df67e,0x3fe151ffb18c983b,2
+np.float64,0x700de022e01bd,0x700de022e01bd,2
+np.float64,0xbfbb76b81236ed70,0xbfbd0d31deea1ec7,2
+np.float64,0x3fecfc3856f9f870,0x3fe4a2fcadf221e0,2
+np.float64,0x3fede286517bc50c,0x3fe51af2fbd6ef63,2
+np.float64,0x7fdc8da96c391b52,0x408627ce09cfef2b,2
+np.float64,0x8000edfcfb81dbfb,0x8000edfcfb81dbfb,2
+np.float64,0x8009ebc42af3d789,0x8009ebc42af3d789,2
+np.float64,0x7fd658aaf8acb155,0x408625d80cd1ccc9,2
+np.float64,0x3feea584a37d4b09,0x3fe57f29a73729cd,2
+np.float64,0x4cfe494699fca,0x4cfe494699fca,2
+np.float64,0xbfe9d96460b3b2c9,0xbffa62ecfa026c77,2
+np.float64,0x7fdb3852c3b670a5,0x4086276c191dc9b1,2
+np.float64,0xbfe4d1fc9ee9a3f9,0xbff0d37ce37cf479,2
+np.float64,0xffefffffffffffff,0xfff8000000000000,2
+np.float64,0xbfd1c43d7fa3887a,0xbfd4cfbefb5f2c43,2
+np.float64,0x3fec4a8e0d78951c,0x3fe4453a82ca2570,2
+np.float64,0x7fafed74583fdae8,0x4086181017b8dac9,2
+np.float64,0x80076c4ebcced89e,0x80076c4ebcced89e,2
+np.float64,0x8001a9aa7b235356,0x8001a9aa7b235356,2
+np.float64,0x121260fe2424d,0x121260fe2424d,2
+np.float64,0x3fddd028e3bba052,0x3fd87998c4c43c5b,2
+np.float64,0x800ed1cf4a9da39f,0x800ed1cf4a9da39f,2
+np.float64,0xbfef2e63d7fe5cc8,0xc00d53480b16971b,2
+np.float64,0xbfedde3309fbbc66,0xc005ab55b7a7c127,2
+np.float64,0x3fda3e1e85b47c3d,0x3fd5fddafd8d6729,2
+np.float64,0x8007c6443c6f8c89,0x8007c6443c6f8c89,2
+np.float64,0xbfe101705f2202e0,0xbfe8420817665121,2
+np.float64,0x7fe0bff3c1e17fe7,0x4086291539c56d80,2
+np.float64,0x7fe6001dab6c003a,0x40862b43aa7cb060,2
+np.float64,0x7fbdecf7de3bd9ef,0x40861d170b1c51a5,2
+np.float64,0xbfc0fd508c21faa0,0xbfc23a5876e99fa3,2
+np.float64,0xbfcf6eb14f3edd64,0xbfd208cbf742c8ea,2
+np.float64,0x3f6d40ea403a81d5,0x3f6d33934ab8e799,2
+np.float64,0x7fc32600b6264c00,0x40861f10302357e0,2
+np.float64,0x3fd05870baa0b0e0,0x3fcd1d2af420fac7,2
+np.float64,0x80051d5120aa3aa3,0x80051d5120aa3aa3,2
+np.float64,0x3fdb783fcfb6f080,0x3fd6db229658c083,2
+np.float64,0x3fe0b61199e16c24,0x3fdae41e277be2eb,2
+np.float64,0x3daf62167b5ed,0x3daf62167b5ed,2
+np.float64,0xbfec3c53b6f878a7,0xc0011f0ce7a78a2a,2
+np.float64,0x800fc905161f920a,0x800fc905161f920a,2
+np.float64,0x3fdc7b9cc138f73a,0x3fd78f9c2360e661,2
+np.float64,0x7fe4079e97a80f3c,0x40862a83795f2443,2
+np.float64,0x8010000000000000,0x8010000000000000,2
+np.float64,0x7fe6da5345adb4a6,0x40862b9183c1e4b0,2
+np.float64,0xbfd0a76667214ecc,0xbfd34a1e0c1f6186,2
+np.float64,0x37fb0b906ff62,0x37fb0b906ff62,2
+np.float64,0x7fe170e59fa2e1ca,0x408629680a55e5c5,2
+np.float64,0x3fea900c77752019,0x3fe356eec75aa345,2
+np.float64,0x3fc575c63a2aeb8c,0x3fc3d701167d76b5,2
+np.float64,0x3fe8b45da87168bc,0x3fe24ecbb778fd44,2
+np.float64,0xbfcb990ab5373214,0xbfcf1596c076813c,2
+np.float64,0xf146fdfbe28e0,0xf146fdfbe28e0,2
+np.float64,0x8001fcd474c3f9aa,0x8001fcd474c3f9aa,2
+np.float64,0xbfe9b555eeb36aac,0xbffa0630c3bb485b,2
+np.float64,0x800f950be83f2a18,0x800f950be83f2a18,2
+np.float64,0x7feb0e03ab761c06,0x40862ceb30e36887,2
+np.float64,0x7fca51bd4a34a37a,0x4086219b9dfd35c9,2
+np.float64,0xbfdc27c34cb84f86,0xbfe28ccde8d6bc08,2
+np.float64,0x80009ce1714139c4,0x80009ce1714139c4,2
+np.float64,0x8005290fb1ea5220,0x8005290fb1ea5220,2
+np.float64,0xbfee81e6473d03cd,0xc00885972ca1699b,2
+np.float64,0x7fcfb11a373f6233,0x408623180b8f75d9,2
+np.float64,0xbfcb9c4bfd373898,0xbfcf19bd25881928,2
+np.float64,0x7feaec5885f5d8b0,0x40862ce136050e6c,2
+np.float64,0x8009e17a4a53c2f5,0x8009e17a4a53c2f5,2
+np.float64,0xbfe1cceb9e6399d7,0xbfea0038bd3def20,2
+np.float64,0x8009170bd7122e18,0x8009170bd7122e18,2
+np.float64,0xb2b6f7f1656df,0xb2b6f7f1656df,2
+np.float64,0x3fc75bfd1f2eb7f8,0x3fc574c858332265,2
+np.float64,0x3fa24c06ec249800,0x3fa1fa462ffcb8ec,2
+np.float64,0xaa9a4d2d5534a,0xaa9a4d2d5534a,2
+np.float64,0xbfd7b76208af6ec4,0xbfdda0c3200dcc9f,2
+np.float64,0x7f8cbab73039756d,0x40860c20cba57a94,2
+np.float64,0x3fdbcf9f48b79f3f,0x3fd71827a60e8b6d,2
+np.float64,0xbfdd60f71a3ac1ee,0xbfe3a94bc8cf134d,2
+np.float64,0xb9253589724a7,0xb9253589724a7,2
+np.float64,0xbfcf28e37e3e51c8,0xbfd1da9977b741e3,2
+np.float64,0x80011457f7e228b1,0x80011457f7e228b1,2
+np.float64,0x7fec33df737867be,0x40862d404a897122,2
+np.float64,0xae55f8f95cabf,0xae55f8f95cabf,2
+np.float64,0xbfc1ab9397235728,0xbfc303e5533d4a5f,2
+np.float64,0x7fef0f84b3be1f08,0x40862e05f9ba7118,2
+np.float64,0x7fdc94f328b929e5,0x408627d01449d825,2
+np.float64,0x3fee1b598c7c36b3,0x3fe53847be166834,2
+np.float64,0x3fee8326f37d064e,0x3fe56d96f3fbcf43,2
+np.float64,0x3fe7b18a83ef6316,0x3fe1bb6a6d48c675,2
+np.float64,0x3fe5db969c6bb72e,0x3fe0a8d7d151996c,2
+np.float64,0x3e3391d27c673,0x3e3391d27c673,2
+np.float64,0x3fe79a46d76f348e,0x3fe1ae09a96ea628,2
+np.float64,0x7ff4000000000000,0x7ffc000000000000,2
+np.float64,0x7fe57d6505aafac9,0x40862b13925547f1,2
+np.float64,0x3fc433371d28666e,0x3fc2c196a764c47b,2
+np.float64,0x8008dbf69cd1b7ee,0x8008dbf69cd1b7ee,2
+np.float64,0xbfe744f459ee89e8,0xbff4c847ad3ee152,2
+np.float64,0x80098aa245331545,0x80098aa245331545,2
+np.float64,0x6747112ece8e3,0x6747112ece8e3,2
+np.float64,0x5d342a40ba69,0x5d342a40ba69,2
+np.float64,0xf7a17739ef42f,0xf7a17739ef42f,2
+np.float64,0x3fe1b34a9d236695,0x3fdc2d7c4e2c347a,2
+np.float64,0x7fb53bf5ec2a77eb,0x40861a585ec8f7ff,2
+np.float64,0xbfe6256f1cec4ade,0xbff2d89a36be65ae,2
+np.float64,0xb783bc9b6f078,0xb783bc9b6f078,2
+np.float64,0xbfedf74a3bfbee94,0xc0060bb6f2bc11ef,2
+np.float64,0x3fda2a5eccb454be,0x3fd5efd7f18b8e81,2
+np.float64,0xbfb3838ab2270718,0xbfb44c337fbca3c3,2
+np.float64,0x3fb4ac6dc22958e0,0x3fb3e194ca01a502,2
+np.float64,0x76c11aaaed824,0x76c11aaaed824,2
+np.float64,0x80025bb1af04b764,0x80025bb1af04b764,2
+np.float64,0x3fdc02740ab804e8,0x3fd73b8cd6f95f19,2
+np.float64,0x3fe71856f5ee30ae,0x3fe162e9fafb4428,2
+np.float64,0x800236f332646de7,0x800236f332646de7,2
+np.float64,0x7fe13fd9d2e27fb3,0x408629516b42a317,2
+np.float64,0x7fdf6bbd34bed779,0x40862892069d805c,2
+np.float64,0x3fd4727beba8e4f8,0x3fd1be5b48d9e282,2
+np.float64,0x800e0fac9e5c1f59,0x800e0fac9e5c1f59,2
+np.float64,0xfb54423ff6a89,0xfb54423ff6a89,2
+np.float64,0x800fbf7ed47f7efe,0x800fbf7ed47f7efe,2
+np.float64,0x3fe9d41fa2f3a840,0x3fe2ef98dc1fd463,2
+np.float64,0x800d733e805ae67d,0x800d733e805ae67d,2
+np.float64,0x3feebe4c46fd7c98,0x3fe58bcf7f47264e,2
+np.float64,0x7fe1ab77b5e356ee,0x40862982bb3dce34,2
+np.float64,0xbfdddac05abbb580,0xbfe41aa45f72d5a2,2
+np.float64,0x3fe14219dee28434,0x3fdb9b137d1f1220,2
+np.float64,0x3fe25d3d5a24ba7b,0x3fdd06e1cf32d35a,2
+np.float64,0x8000fa4fbe81f4a0,0x8000fa4fbe81f4a0,2
+np.float64,0x3fe303e23e6607c4,0x3fddd94982efa9f1,2
+np.float64,0x3fe89cf5d83139ec,0x3fe24193a2e12f75,2
+np.float64,0x3fe9b36ef87366de,0x3fe2dd7cdc25a4a5,2
+np.float64,0xbfdb8b38f8371672,0xbfe2023ba7e002bb,2
+np.float64,0xafc354955f86b,0xafc354955f86b,2
+np.float64,0xbfe2f3d49e65e7a9,0xbfecb557a94123d3,2
+np.float64,0x800496617c092cc4,0x800496617c092cc4,2
+np.float64,0x32db0cfa65b62,0x32db0cfa65b62,2
+np.float64,0xbfd893bfa2b12780,0xbfdf02a8c1e545aa,2
+np.float64,0x7fd5ac927d2b5924,0x408625997e7c1f9b,2
+np.float64,0x3fde9defb8bd3be0,0x3fd9056190986349,2
+np.float64,0x80030cfeb54619fe,0x80030cfeb54619fe,2
+np.float64,0x3fcba85b273750b8,0x3fc90a5ca976594f,2
+np.float64,0x3fe98f6f5cf31edf,0x3fe2c97fcb4eca25,2
+np.float64,0x3fe33dbf90667b80,0x3fde21b83321b993,2
+np.float64,0x3fe4686636e8d0cc,0x3fdf928cdca751b3,2
+np.float64,0x80018ade6ce315be,0x80018ade6ce315be,2
+np.float64,0x7fa9af70c8335ee1,0x408616528cd5a906,2
+np.float64,0x3fbeb460aa3d68c0,0x3fbcff96b00a2193,2
+np.float64,0x7fa82c869830590c,0x408615d6598d9368,2
+np.float64,0xd08c0e6fa1182,0xd08c0e6fa1182,2
+np.float64,0x3fef4eb750fe9d6f,0x3fe5d522fd4e7f64,2
+np.float64,0xbfc586f5492b0dec,0xbfc791eaae92aad1,2
+np.float64,0x7fede64ac7bbcc95,0x40862db7f444fa7b,2
+np.float64,0x3fe540003d6a8000,0x3fe04bdfc2916a0b,2
+np.float64,0x8009417fe6f28300,0x8009417fe6f28300,2
+np.float64,0x3fe6959cf16d2b3a,0x3fe116a1ce01887b,2
+np.float64,0x3fb0a40036214800,0x3fb01f447778219a,2
+np.float64,0x3feff26e91ffe4dd,0x3fe627798fc859a7,2
+np.float64,0x7fed8e46cd7b1c8d,0x40862da044a1d102,2
+np.float64,0x7fec4eb774f89d6e,0x40862d47e43edb53,2
+np.float64,0x3fe800e5e07001cc,0x3fe1e8e2b9105fc2,2
+np.float64,0x800f4eb2f9be9d66,0x800f4eb2f9be9d66,2
+np.float64,0x800611659bcc22cc,0x800611659bcc22cc,2
+np.float64,0x3fd66e65d2acdccc,0x3fd33ad63a5e1000,2
+np.float64,0x800a9085b7f5210c,0x800a9085b7f5210c,2
+np.float64,0x7fdf933a3fbf2673,0x4086289c0e292f2b,2
+np.float64,0x1cd1ba7a39a38,0x1cd1ba7a39a38,2
+np.float64,0xbfefd0b10fffa162,0xc0149ded900ed851,2
+np.float64,0xbfe8c63485b18c69,0xbff7cf3078b1574f,2
+np.float64,0x3fecde56ca79bcae,0x3fe4934afbd7dda9,2
+np.float64,0x8006cd6888cd9ad2,0x8006cd6888cd9ad2,2
+np.float64,0x3fd7a391c2af4724,0x3fd41e2f74df2329,2
+np.float64,0x3fe6a8ad58ed515a,0x3fe121ccfb28e6f5,2
+np.float64,0x7fe18a80dd631501,0x40862973c09086b9,2
+np.float64,0xbf74fd6d8029fb00,0xbf750b3e368ebe6b,2
+np.float64,0x3fdd35e93dba6bd4,0x3fd810071faaffad,2
+np.float64,0x3feb0d8f57361b1f,0x3fe39b3abdef8b7a,2
+np.float64,0xbfd5ec7288abd8e6,0xbfdad764df0d2ca1,2
+np.float64,0x7fdc848272b90904,0x408627cb78f3fb9e,2
+np.float64,0x800ed3eda91da7db,0x800ed3eda91da7db,2
+np.float64,0x3fefac64857f58c9,0x3fe60459dbaad1ba,2
+np.float64,0x3fd1df7a5ba3bef4,0x3fcf864a39b926ff,2
+np.float64,0xfe26ca4bfc4da,0xfe26ca4bfc4da,2
+np.float64,0xbfd1099f8da21340,0xbfd3cf6e6efe934b,2
+np.float64,0xbfe15de9a7a2bbd4,0xbfe909cc895f8795,2
+np.float64,0x3fe89714ed712e2a,0x3fe23e40d31242a4,2
+np.float64,0x800387113e470e23,0x800387113e470e23,2
+np.float64,0x3fe4f80730e9f00e,0x3fe0208219314cf1,2
+np.float64,0x2f95a97c5f2b6,0x2f95a97c5f2b6,2
+np.float64,0x800ea7cdd87d4f9c,0x800ea7cdd87d4f9c,2
+np.float64,0xbf64b967c0297300,0xbf64c020a145b7a5,2
+np.float64,0xbfc5a91a342b5234,0xbfc7bafd77a61d81,2
+np.float64,0xbfe2226fe76444e0,0xbfeac33eb1d1b398,2
+np.float64,0x3fc6aaa8d42d5552,0x3fc4de79f5c68cd4,2
+np.float64,0x3fe54fd4c1ea9faa,0x3fe05561a9a5922b,2
+np.float64,0x80029c1f75653840,0x80029c1f75653840,2
+np.float64,0xbfcb4a84a2369508,0xbfceb1a23bac3995,2
+np.float64,0x80010abeff02157f,0x80010abeff02157f,2
+np.float64,0x7f92d12cf825a259,0x40860e49bde3a5b6,2
+np.float64,0x800933e7027267ce,0x800933e7027267ce,2
+np.float64,0x3fc022b12e204562,0x3fbe64acc53ed887,2
+np.float64,0xbfe35f938de6bf27,0xbfedc1f3e443c016,2
+np.float64,0x1f8d9bae3f1b4,0x1f8d9bae3f1b4,2
+np.float64,0x3fe552f22ceaa5e4,0x3fe057404072350f,2
+np.float64,0xbfa73753442e6ea0,0xbfa7c24a100190f1,2
+np.float64,0x7fb3e2982827c52f,0x408619d1efa676b6,2
+np.float64,0xbfd80cb7a5301970,0xbfde28e65f344f33,2
+np.float64,0xbfcde835973bd06c,0xbfd10806fba46c8f,2
+np.float64,0xbfd4e3c749a9c78e,0xbfd949aff65de39c,2
+np.float64,0x3fcb4b9d6f36973b,0x3fc8be02ad6dc0d3,2
+np.float64,0x1a63000034c7,0x1a63000034c7,2
+np.float64,0x7fdc9c751e3938e9,0x408627d22df71959,2
+np.float64,0x3fd74f3f712e9e7f,0x3fd3e07df0c37ec1,2
+np.float64,0xbfceab74d33d56e8,0xbfd187e99bf82903,2
+np.float64,0x7ff0000000000000,0x7ff0000000000000,2
+np.float64,0xbfb2cca466259948,0xbfb3868208e8de30,2
+np.float64,0x800204688b8408d2,0x800204688b8408d2,2
+np.float64,0x3e4547407c8aa,0x3e4547407c8aa,2
+np.float64,0xbfe4668846e8cd10,0xbff03c85189f3818,2
+np.float64,0x800dd350245ba6a0,0x800dd350245ba6a0,2
+np.float64,0xbfbc13c160382780,0xbfbdbd56ce996d16,2
+np.float64,0x7fe25a628a24b4c4,0x408629d06eb2d64d,2
+np.float64,0x3fd19dabbc233b57,0x3fcf1f3ed1d34c8c,2
+np.float64,0x547e20faa8fc5,0x547e20faa8fc5,2
+np.float64,0xbfe19392c6232726,0xbfe97ffe4f303335,2
+np.float64,0x3f87f9f6702ff400,0x3f87d64fb471bb04,2
+np.float64,0x9dfc52db3bf8b,0x9dfc52db3bf8b,2
+np.float64,0x800e1f5a9adc3eb5,0x800e1f5a9adc3eb5,2
+np.float64,0xbfddbd09c8bb7a14,0xbfe3fed7d7cffc70,2
+np.float64,0xbfeda71af87b4e36,0xc004e6631c514544,2
+np.float64,0xbfdbfcfe1bb7f9fc,0xbfe266b5d4a56265,2
+np.float64,0x3fe4ee78cd69dcf2,0x3fe01abba4e81fc9,2
+np.float64,0x800f13b820de2770,0x800f13b820de2770,2
+np.float64,0x3f861e09702c3c00,0x3f85ffae83b02c4f,2
+np.float64,0xbfc0972479212e48,0xbfc1c4bf70b30cbc,2
+np.float64,0x7fef057ef57e0afd,0x40862e036479f6a9,2
+np.float64,0x8bdbabe517b76,0x8bdbabe517b76,2
+np.float64,0xbfec495417f892a8,0xc0013ade88746d18,2
+np.float64,0x3fec680ab3f8d015,0x3fe454dd304b560d,2
+np.float64,0xbfae7ce60c3cf9d0,0xbfaf6eef15bbe56b,2
+np.float64,0x3fec314124786282,0x3fe437ca06294f5a,2
+np.float64,0x7fd5ed05b82bda0a,0x408625b125518e58,2
+np.float64,0x3feac9f02f3593e0,0x3fe3768104dd5cb7,2
+np.float64,0x0,0x0,2
+np.float64,0xbfddd2abd5bba558,0xbfe41312b8ea20de,2
+np.float64,0xbfedf9558c7bf2ab,0xc00613c53e0bb33a,2
+np.float64,0x3fef245ffefe48c0,0x3fe5bfb4dfe3b7a5,2
+np.float64,0x7fe178604922f0c0,0x4086296b77d5eaef,2
+np.float64,0x10000000000000,0x10000000000000,2
+np.float64,0x7fed026766ba04ce,0x40862d7a0dc45643,2
+np.float64,0xbfde27d8c3bc4fb2,0xbfe46336b6447697,2
+np.float64,0x3fe9485d9cb290bb,0x3fe2a1e4b6419423,2
+np.float64,0xbfe27b8a7464f715,0xbfeb9382f5b16f65,2
+np.float64,0x5c34d274b869b,0x5c34d274b869b,2
+np.float64,0xbfeee0b7453dc16f,0xc00acdb46459b6e6,2
+np.float64,0x7fe3dfb4d4e7bf69,0x40862a73785fdf12,2
+np.float64,0xb4635eef68c6c,0xb4635eef68c6c,2
+np.float64,0xbfe522a2c82a4546,0xbff148912a59a1d6,2
+np.float64,0x8009ba38a9737472,0x8009ba38a9737472,2
+np.float64,0xbfc056ff3820ae00,0xbfc17b2205fa180d,2
+np.float64,0x7fe1c8b8a0239170,0x4086298feeee6133,2
+np.float64,0x3fe2d2c6b9e5a58e,0x3fdd9b907471031b,2
+np.float64,0x3fa0a161bc2142c0,0x3fa05db36f6a073b,2
+np.float64,0x3fdef4268ebde84c,0x3fd93f980794d1e7,2
+np.float64,0x800ecd9fe2fd9b40,0x800ecd9fe2fd9b40,2
+np.float64,0xbfc9fbd45e33f7a8,0xbfcd0afc47c340f6,2
+np.float64,0x3fe8c3035b718606,0x3fe2570eb65551a1,2
+np.float64,0xbfe78c4ad2ef1896,0xbff54d25b3328742,2
+np.float64,0x8006f5dcf8adebbb,0x8006f5dcf8adebbb,2
+np.float64,0x800301dca2a603ba,0x800301dca2a603ba,2
+np.float64,0xad4289e55a851,0xad4289e55a851,2
+np.float64,0x80037764f9e6eecb,0x80037764f9e6eecb,2
+np.float64,0xbfe73575b26e6aec,0xbff4abfb5e985c62,2
+np.float64,0xbfc6cb91652d9724,0xbfc91a8001b33ec2,2
+np.float64,0xbfe3a918ffe75232,0xbfee7e6e4fd34c53,2
+np.float64,0x9bc84e2b3790a,0x9bc84e2b3790a,2
+np.float64,0x7fdeec303cbdd85f,0x408628714a49d996,2
+np.float64,0x3fe1d1dcb763a3ba,0x3fdc54ce060dc7f4,2
+np.float64,0x8008ae6432b15cc9,0x8008ae6432b15cc9,2
+np.float64,0x3fd8022fa2b00460,0x3fd46322bf02a609,2
+np.float64,0xbfc55b64472ab6c8,0xbfc75d9568f462e0,2
+np.float64,0xbfe8b165437162ca,0xbff7a15e2ead645f,2
+np.float64,0x7f759330feeb3,0x7f759330feeb3,2
+np.float64,0xbfd504f68eaa09ee,0xbfd97b06c01d7473,2
+np.float64,0x54702d5aa8e06,0x54702d5aa8e06,2
+np.float64,0xbfed1779337a2ef2,0xc0032f7109ef5a51,2
+np.float64,0xe248bd4dc4918,0xe248bd4dc4918,2
+np.float64,0xbfd8c59150318b22,0xbfdf53bca6ca8b1e,2
+np.float64,0xbfe3b9d942e773b2,0xbfeea9fcad277ba7,2
+np.float64,0x800934ec127269d9,0x800934ec127269d9,2
+np.float64,0xbfbb7f535a36fea8,0xbfbd16d61b6c52b8,2
+np.float64,0xccb185a199631,0xccb185a199631,2
+np.float64,0x3fe3dda76fe7bb4e,0x3fdee83bc6094301,2
+np.float64,0xbfe0c902f5e19206,0xbfe7ca7c0e888006,2
+np.float64,0xbfefeed08cbfdda1,0xc018aadc483c8724,2
+np.float64,0x7fd0c05c52a180b8,0x40862389daf64aac,2
+np.float64,0xbfd28e3323a51c66,0xbfd5e9ba278fb685,2
+np.float64,0xbef4103b7de82,0xbef4103b7de82,2
+np.float64,0x3fe7661fd12ecc40,0x3fe18ff7dfb696e2,2
+np.float64,0x3fddd5f2f0bbabe4,0x3fd87d8bb6719c3b,2
+np.float64,0x800b3914cfd6722a,0x800b3914cfd6722a,2
+np.float64,0xf3f09a97e7e14,0xf3f09a97e7e14,2
+np.float64,0x7f97092b502e1256,0x40860fe8054cf54e,2
+np.float64,0xbfdbec7917b7d8f2,0xbfe2580b4b792c79,2
+np.float64,0x7fe7ff215aaffe42,0x40862bf5887fa062,2
+np.float64,0x80080186e570030e,0x80080186e570030e,2
+np.float64,0xbfc27f05e624fe0c,0xbfc3fa214be4adc4,2
+np.float64,0x3fe4481be1689038,0x3fdf6b11e9c4ca72,2
+np.float64,0x3fd642cc9cac8598,0x3fd31a857fe70227,2
+np.float64,0xbef8782d7df0f,0xbef8782d7df0f,2
+np.float64,0x8003077dc2e60efc,0x8003077dc2e60efc,2
+np.float64,0x80083eb5a2507d6c,0x80083eb5a2507d6c,2
+np.float64,0x800e8d1eb77d1a3e,0x800e8d1eb77d1a3e,2
+np.float64,0xbfc7737cd22ee6f8,0xbfc9e7716f03f1fc,2
+np.float64,0xbfe9a2b4ddf3456a,0xbff9d71664a8fc78,2
+np.float64,0x7fe67c7d322cf8f9,0x40862b7066465194,2
+np.float64,0x3fec080ce2b8101a,0x3fe421dac225be46,2
+np.float64,0xbfe6d27beb6da4f8,0xbff3fbb1add521f7,2
+np.float64,0x3fdd4f96ceba9f2e,0x3fd821a638986dbe,2
+np.float64,0x3fbd89f1303b13e2,0x3fbbf49223a9d002,2
+np.float64,0xbfe94e2b9d329c57,0xbff907e549c534f5,2
+np.float64,0x3fe2f2cc51e5e599,0x3fddc3d6b4a834a1,2
+np.float64,0xfdcb5b49fb96c,0xfdcb5b49fb96c,2
+np.float64,0xbfea7108fa74e212,0xbffc01b392f4897b,2
+np.float64,0x3fd38baef7a7175c,0x3fd10e7fd3b958dd,2
+np.float64,0x3fa75bf9cc2eb800,0x3fa6d792ecdedb8e,2
+np.float64,0x7fd19fd20aa33fa3,0x408623f1e2cd04c3,2
+np.float64,0x3fd62c708dac58e0,0x3fd309ec7818d16e,2
+np.float64,0x3fdf489047be9120,0x3fd978640617c758,2
+np.float64,0x1,0x1,2
+np.float64,0xbfe21e7c3ea43cf8,0xbfeaba21320697d3,2
+np.float64,0xbfd3649047a6c920,0xbfd71a6f14223744,2
+np.float64,0xbfd68ca68c2d194e,0xbfdbcce6784e5d44,2
+np.float64,0x3fdb26b0ea364d62,0x3fd6a1f86f64ff74,2
+np.float64,0xbfd843821cb08704,0xbfde80e90805ab3f,2
+np.float64,0x3fd508a27aaa1144,0x3fd22fc203a7b9d8,2
+np.float64,0xbfdb951c7eb72a38,0xbfe20aeaec13699b,2
+np.float64,0x3fef556ba57eaad7,0x3fe5d8865cce0a6d,2
+np.float64,0x3fd0d224b3a1a448,0x3fcdde7be5d7e21e,2
+np.float64,0x8007ff272baffe4f,0x8007ff272baffe4f,2
+np.float64,0x3fe1c7bddf638f7c,0x3fdc47cc6cf2f5cd,2
+np.float64,0x7ff8000000000000,0x7ff8000000000000,2
+np.float64,0x2016d560402f,0x2016d560402f,2
+np.float64,0xbfcca10be9394218,0xbfd033f36b94fc54,2
+np.float64,0xbfdb833628b7066c,0xbfe1fb344b840c70,2
+np.float64,0x3fd8529cb3b0a539,0x3fd49d847fe77218,2
+np.float64,0xbfc0b0ebab2161d8,0xbfc1e260c60ffd1b,2
+np.float64,0xbfea8b9a79f51735,0xbffc4ee6be8a0fa2,2
+np.float64,0x7feca8fab7f951f4,0x40862d613e454646,2
+np.float64,0x7fd8c52d82318a5a,0x408626aaf37423a3,2
+np.float64,0xbfe364ad4526c95a,0xbfedcee39bc93ff5,2
+np.float64,0x800b78161256f02d,0x800b78161256f02d,2
+np.float64,0xbfd55f0153aabe02,0xbfda01a78f72d494,2
+np.float64,0x800315a5f0662b4d,0x800315a5f0662b4d,2
+np.float64,0x7fe4c0dca02981b8,0x40862acc27e4819f,2
+np.float64,0x8009825c703304b9,0x8009825c703304b9,2
+np.float64,0x3fe6e94e1cadd29c,0x3fe1478ccc634f49,2
+np.float64,0x7fe622d8586c45b0,0x40862b504177827e,2
+np.float64,0x3fe4458600688b0c,0x3fdf67e79a84b953,2
+np.float64,0xbfdd75d8a1baebb2,0xbfe3bc9e6ca1bbb5,2
+np.float64,0x3fde789c6bbcf138,0x3fd8ec1d435531b3,2
+np.float64,0x3fe7052b94ee0a58,0x3fe157c5c4418dc1,2
+np.float64,0x7fef31652abe62c9,0x40862e0eaeabcfc0,2
+np.float64,0x3fe279691ee4f2d2,0x3fdd2aa41eb43cd4,2
+np.float64,0xbfd533fa95aa67f6,0xbfd9c12f516d29d7,2
+np.float64,0x3fe6d057f96da0b0,0x3fe138fd96693a6a,2
+np.float64,0x800bad984f775b31,0x800bad984f775b31,2
+np.float64,0x7fdd6fdba4badfb6,0x4086280c73d8ef97,2
+np.float64,0x7fe9b5c0eef36b81,0x40862c82c6f57a53,2
+np.float64,0x8000bc02ece17807,0x8000bc02ece17807,2
+np.float64,0xbff0000000000000,0xfff0000000000000,2
+np.float64,0xbfed430be3fa8618,0xc003aaf338c75b3c,2
+np.float64,0x3fee17b759fc2f6f,0x3fe53668696bf48b,2
+np.float64,0x3f8d4cf9d03a9a00,0x3f8d17d2f532afdc,2
+np.float64,0x8005d6257b8bac4c,0x8005d6257b8bac4c,2
+np.float64,0xbfd17a6df9a2f4dc,0xbfd469e3848adc6e,2
+np.float64,0xb28a293965145,0xb28a293965145,2
+np.float64,0xbfe7d011e42fa024,0xbff5cf818998c8ec,2
+np.float64,0xbfe74f0f136e9e1e,0xbff4dad6ebb0443c,2
+np.float64,0x800f249fc9be4940,0x800f249fc9be4940,2
+np.float64,0x2542f8fe4a860,0x2542f8fe4a860,2
+np.float64,0xc48d40cd891a8,0xc48d40cd891a8,2
+np.float64,0x3fe4e64bc8e9cc98,0x3fe015c9eb3caa53,2
+np.float64,0x3fd33881eca67104,0x3fd0cea886be2457,2
+np.float64,0xbfd01748fba02e92,0xbfd28875959e6901,2
+np.float64,0x7fb7ab01f22f5603,0x40861b369927bf53,2
+np.float64,0xbfe340274ce6804e,0xbfed72b39f0ebb24,2
+np.float64,0x7fc16c0c3422d817,0x40861e4eaf1a286c,2
+np.float64,0x3fc26944a324d288,0x3fc133a77b356ac4,2
+np.float64,0xa149d7134293b,0xa149d7134293b,2
+np.float64,0x800837382d106e71,0x800837382d106e71,2
+np.float64,0x797d1740f2fa4,0x797d1740f2fa4,2
+np.float64,0xc3f15b7787e2c,0xc3f15b7787e2c,2
+np.float64,0x80cad1b90195a,0x80cad1b90195a,2
+np.float64,0x3fdd8f1142bb1e23,0x3fd84d21490d1ce6,2
+np.float64,0xbfbde6c9123bcd90,0xbfbfcc030a86836a,2
+np.float64,0x8007f77e032feefd,0x8007f77e032feefd,2
+np.float64,0x3fe74fed1c6e9fda,0x3fe18322cf19cb61,2
+np.float64,0xbfd8a40bbcb14818,0xbfdf1d23520ba74b,2
+np.float64,0xbfeb7a0e6076f41d,0xbfff4ddfb926efa5,2
+np.float64,0xbfcb8c5f663718c0,0xbfcf0570f702bda9,2
+np.float64,0xf668cd97ecd1a,0xf668cd97ecd1a,2
+np.float64,0xbfe92accf572559a,0xbff8b4393878ffdb,2
+np.float64,0xbfeaa955567552ab,0xbffca70c7d73eee5,2
+np.float64,0xbfe083a14f610742,0xbfe739d84bc35077,2
+np.float64,0x78290568f0521,0x78290568f0521,2
+np.float64,0x3fe94bae2372975c,0x3fe2a3beac5c9858,2
+np.float64,0x3fca4fbab9349f78,0x3fc7edbca2492acb,2
+np.float64,0x8000000000000000,0x8000000000000000,2
+np.float64,0x7fb9eb505433d6a0,0x40861bf0adedb74d,2
+np.float64,0x7fdc66f72a38cded,0x408627c32aeecf0f,2
+np.float64,0x2e8e6f445d1cf,0x2e8e6f445d1cf,2
+np.float64,0xbfec43195af88633,0xc0012d7e3f91b7e8,2
+np.float64,0x7fcdb971e93b72e3,0x40862294c9e3a7bc,2
+np.float64,0x800cabc461195789,0x800cabc461195789,2
+np.float64,0x2c79709c58f2f,0x2c79709c58f2f,2
+np.float64,0x8005d772d3cbaee6,0x8005d772d3cbaee6,2
+np.float64,0x3fe84d8c03709b18,0x3fe21490ce3673dd,2
+np.float64,0x7fe5578adc2aaf15,0x40862b056e8437d4,2
+np.float64,0xbf91298c58225320,0xbf914ec86c32d11f,2
+np.float64,0xc7ed2b6d8fda6,0xc7ed2b6d8fda6,2
+np.float64,0x2761404c4ec29,0x2761404c4ec29,2
+np.float64,0x3fbad3c48835a789,0x3fb9833c02385305,2
+np.float64,0x3fa46fee5428dfe0,0x3fa40a357fb24c23,2
+np.float64,0xbfe3900c6fe72019,0xbfee3dba29dd9d43,2
+np.float64,0x3fe7a9e41a6f53c8,0x3fe1b704dfb9884b,2
+np.float64,0xbfe74a7a1eee94f4,0xbff4d269cacb1f29,2
+np.float64,0xbfee609c72fcc139,0xc007da8499d34123,2
+np.float64,0x3fef2d5fc23e5ac0,0x3fe5c44414e59cb4,2
+np.float64,0xbfd7bdc0402f7b80,0xbfddaae1e7bb78fb,2
+np.float64,0xd71ee01dae3dc,0xd71ee01dae3dc,2
+np.float64,0x3fe98cbcdef3197a,0x3fe2c7ffe33c4541,2
+np.float64,0x8000f8dbb3a1f1b8,0x8000f8dbb3a1f1b8,2
+np.float64,0x3fe3e98ad567d316,0x3fdef6e58058313f,2
+np.float64,0x41ad0bfc835a2,0x41ad0bfc835a2,2
+np.float64,0x7fdcc2dc0d3985b7,0x408627dce39f77af,2
+np.float64,0xbfe47b980de8f730,0xbff059acdccd6e2b,2
+np.float64,0xbfef49b6577e936d,0xc00e714f46b2ccc1,2
+np.float64,0x3fac31816c386300,0x3fab71cb92b0db8f,2
+np.float64,0x3fe59097e76b2130,0x3fe07c299fd1127c,2
+np.float64,0xbfecf0df5cf9e1bf,0xc002c7ebdd65039c,2
+np.float64,0x3fd2b7d0b6a56fa1,0x3fd06b638990ae02,2
+np.float64,0xbfeb68deecf6d1be,0xbfff1187e042d3e4,2
+np.float64,0x3fd44a9771a8952f,0x3fd1a01867c5e302,2
+np.float64,0xf79a9dedef354,0xf79a9dedef354,2
+np.float64,0x800c25a170d84b43,0x800c25a170d84b43,2
+np.float64,0x3ff0000000000000,0x3fe62e42fefa39ef,2
+np.float64,0x3fbff4f7623fe9f0,0x3fbe1d3878f4c417,2
+np.float64,0xd284c845a5099,0xd284c845a5099,2
+np.float64,0xbfe3c7815f678f02,0xbfeecdab5ca2e651,2
+np.float64,0x3fc19c934e233927,0x3fc08036104b1f23,2
+np.float64,0x800b6096de16c12e,0x800b6096de16c12e,2
+np.float64,0xbfe962a67e32c54d,0xbff9392313a112a1,2
+np.float64,0x2b9d0116573a1,0x2b9d0116573a1,2
+np.float64,0x3fcab269ed3564d4,0x3fc83f7e1c3095b7,2
+np.float64,0x3fc8c78d86318f1b,0x3fc6a6cde5696f99,2
+np.float64,0xd5b1e9b5ab63d,0xd5b1e9b5ab63d,2
+np.float64,0xbfed802a47fb0054,0xc00465cad3b5b0ef,2
+np.float64,0xbfd73aaf08ae755e,0xbfdcdbd62b8af271,2
+np.float64,0xbfd4f13c0229e278,0xbfd95dacff79e570,2
+np.float64,0xbfe9622808f2c450,0xbff937f13c397e8d,2
+np.float64,0xbfeddfa62efbbf4c,0xc005b0c835eed829,2
+np.float64,0x3fd65663d4acacc8,0x3fd3290cd0e675dc,2
+np.float64,0x8005e890f1abd123,0x8005e890f1abd123,2
+np.float64,0xbfe924919fb24923,0xbff8a5a827a28756,2
+np.float64,0x3fe8cdf490719be9,0x3fe25d39535e8366,2
+np.float64,0x7fc229e6ff2453cd,0x40861ea40ef87a5a,2
+np.float64,0x3fe5cf53ceeb9ea8,0x3fe0a18e0b65f27e,2
+np.float64,0xa79cf6fb4f39f,0xa79cf6fb4f39f,2
+np.float64,0x7fddbb3c0f3b7677,0x40862820d5edf310,2
+np.float64,0x3e1011de7c203,0x3e1011de7c203,2
+np.float64,0x3fc0b59a83216b38,0x3fbf6916510ff411,2
+np.float64,0x8647f98d0c8ff,0x8647f98d0c8ff,2
+np.float64,0x8005dad33ecbb5a7,0x8005dad33ecbb5a7,2
+np.float64,0x8a80d0631501a,0x8a80d0631501a,2
+np.float64,0xbfe18f7d6ee31efb,0xbfe976f06713afc1,2
+np.float64,0xbfe06eaed560dd5e,0xbfe70eac696933e6,2
+np.float64,0xbfed8ef93c7b1df2,0xc00495bfa3195b53,2
+np.float64,0x3febe9c24677d385,0x3fe411b10db16c42,2
+np.float64,0x7fd5d80c1fabb017,0x408625a97a7787ba,2
+np.float64,0x3fca79b59334f368,0x3fc8108a521341dc,2
+np.float64,0xbfccf8db4339f1b8,0xbfd06c9a5424aadb,2
+np.float64,0xbfea5ac5a574b58b,0xbffbc21d1405d840,2
+np.float64,0x800ce2bf4b19c57f,0x800ce2bf4b19c57f,2
+np.float64,0xbfe8df896d31bf13,0xbff807ab38ac41ab,2
+np.float64,0x3feab83da9f5707c,0x3fe36cdd827c0eff,2
+np.float64,0x3fee717683bce2ed,0x3fe564879171719b,2
+np.float64,0x80025e5577c4bcac,0x80025e5577c4bcac,2
+np.float64,0x3fe3e5378e67ca70,0x3fdef1902c5d1efd,2
+np.float64,0x3fa014bb7c202980,0x3f9faacf9238d499,2
+np.float64,0x3fddbf5e16bb7ebc,0x3fd86e2311cb0f6d,2
+np.float64,0x3fd24e50e6a49ca0,0x3fd0198f04f82186,2
+np.float64,0x656b5214cad6b,0x656b5214cad6b,2
+np.float64,0x8b0a4bfd1614a,0x8b0a4bfd1614a,2
+np.float64,0xbfeeb6bd9e7d6d7b,0xc009b669285e319e,2
+np.float64,0x8000000000000001,0x8000000000000001,2
+np.float64,0xbfe719feceee33fe,0xbff47a4c8cbf0cca,2
+np.float64,0xbfd14fa8c8a29f52,0xbfd42f27b1aced39,2
+np.float64,0x7fec9dcb80f93b96,0x40862d5e1e70bbb9,2
+np.float64,0x7fecacb826f9596f,0x40862d6249746915,2
+np.float64,0x973459f52e68b,0x973459f52e68b,2
+np.float64,0x7f40a59e00214b3b,0x4085f194f45f82b1,2
+np.float64,0x7fc5dbaec32bb75d,0x4086201f3e7065d9,2
+np.float64,0x82d0801305a10,0x82d0801305a10,2
+np.float64,0x7fec81c0f4790381,0x40862d5643c0fc85,2
+np.float64,0xbfe2d81e9ee5b03d,0xbfec71a8e864ea40,2
+np.float64,0x6c545c9ad8a8c,0x6c545c9ad8a8c,2
+np.float64,0x3f9be95a5037d2b5,0x3f9b89b48ac8f5d8,2
+np.float64,0x8000cae9702195d4,0x8000cae9702195d4,2
+np.float64,0xbfd375f45126ebe8,0xbfd733677e54a80d,2
+np.float64,0x3fd29a5b81a534b7,0x3fd05494bf200278,2
+np.float64,0xfff0000000000000,0xfff8000000000000,2
+np.float64,0x7fca8fc195351f82,0x408621ae61aa6c13,2
+np.float64,0x1b28e2ae3651d,0x1b28e2ae3651d,2
+np.float64,0x3fe7fdbd14effb7a,0x3fe1e714884b46a8,2
+np.float64,0x3fdf1ce068be39c0,0x3fd95b054e0fad3d,2
+np.float64,0x3fe79f9a636f3f34,0x3fe1b11a40c00b3e,2
+np.float64,0x3fe60eb7036c1d6e,0x3fe0c72a02176874,2
+np.float64,0x229da17e453b5,0x229da17e453b5,2
+np.float64,0x3fc1a921b5235240,0x3fc08b3f35e47fb1,2
+np.float64,0xbb92d2af7725b,0xbb92d2af7725b,2
+np.float64,0x3fe4110cb1e8221a,0x3fdf2787de6c73f7,2
+np.float64,0xbfbc87771a390ef0,0xbfbe3f6e95622363,2
+np.float64,0xbfe74025dfee804c,0xbff4bf7b1895e697,2
+np.float64,0x964eb6592c9d7,0x964eb6592c9d7,2
+np.float64,0x3f951689b82a2d00,0x3f94dfb38d746fdf,2
+np.float64,0x800356271be6ac4f,0x800356271be6ac4f,2
+np.float64,0x7fefffffffffffff,0x40862e42fefa39ef,2
+np.float64,0xbfed5ce250fab9c5,0xc003f7ddfeb94345,2
+np.float64,0x3fec3d5dc1387abc,0x3fe43e39c02d86f4,2
+np.float64,0x3999897e73332,0x3999897e73332,2
+np.float64,0xbfdcb57744b96aee,0xbfe30c4b98f3d088,2
+np.float64,0x7f961fb0b82c3f60,0x40860f9549c3a380,2
+np.float64,0x67d6efcacfadf,0x67d6efcacfadf,2
+np.float64,0x8002c9498f859294,0x8002c9498f859294,2
+np.float64,0xbfa3033800260670,0xbfa35fe3bf43e188,2
+np.float64,0xbfeab2fc157565f8,0xbffcc413c486b4eb,2
+np.float64,0x3fe25e62f364bcc6,0x3fdd0856e19e3430,2
+np.float64,0x7fb2f42dda25e85b,0x4086196fb34a65fd,2
+np.float64,0x3fe0f1a5af61e34c,0x3fdb3235a1786efb,2
+np.float64,0x800a340ca1f4681a,0x800a340ca1f4681a,2
+np.float64,0x7c20b9def8418,0x7c20b9def8418,2
+np.float64,0xdf0842a1be109,0xdf0842a1be109,2
+np.float64,0x3fe9f22cc2f3e45a,0x3fe300359b842bf0,2
+np.float64,0x3fe389ed73e713da,0x3fde809780fe4432,2
+np.float64,0x9500fb932a020,0x9500fb932a020,2
+np.float64,0x3fd8a21ffdb14440,0x3fd4d70862345d86,2
+np.float64,0x800d99c15cbb3383,0x800d99c15cbb3383,2
+np.float64,0x3fd96c98c932d932,0x3fd568959c9b028f,2
+np.float64,0x7fc228483a24508f,0x40861ea358420976,2
+np.float64,0x7fc6737bef2ce6f7,0x408620560ffc6a98,2
+np.float64,0xbfb2c27cee2584f8,0xbfb37b8cc7774b5f,2
+np.float64,0xbfd18409f9230814,0xbfd4771d1a9a24fb,2
+np.float64,0x3fb53cb3f42a7968,0x3fb466f06f88044b,2
+np.float64,0x3fef61d0187ec3a0,0x3fe5dec8a9d13dd9,2
+np.float64,0x3fe59a6ffd2b34e0,0x3fe0820a99c6143d,2
+np.float64,0x3fce18aff43c3160,0x3fcb07c7b523f0d1,2
+np.float64,0xbfb1319a62226338,0xbfb1cc62f31b2b40,2
+np.float64,0xa00cce6d4019a,0xa00cce6d4019a,2
+np.float64,0x80068ae8e0ed15d3,0x80068ae8e0ed15d3,2
+np.float64,0x3fecef353239de6a,0x3fe49c280adc607b,2
+np.float64,0x3fdf1a7fb0be34ff,0x3fd9596bafe2d766,2
+np.float64,0x3feb5e12eeb6bc26,0x3fe3c6be3ede8d07,2
+np.float64,0x3fdeff5cd43dfeba,0x3fd947262ec96b05,2
+np.float64,0x3f995e75e832bd00,0x3f990f511f4c7f1c,2
+np.float64,0xbfeb5b3ed0b6b67e,0xbffee24fc0fc2881,2
+np.float64,0x7fb82aad0a305559,0x40861b614d901182,2
+np.float64,0xbfe5c3a4926b8749,0xbff23cd0ad144fe6,2
+np.float64,0x3fef47da373e8fb4,0x3fe5d1aaa4031993,2
+np.float64,0x7fc6a8c3872d5186,0x40862068f5ca84be,2
+np.float64,0x7fc0c2276221844e,0x40861dff2566d001,2
+np.float64,0x7fc9ce7d28339cf9,0x40862173541f84d1,2
+np.float64,0x3fce2c34933c5869,0x3fcb179428ad241d,2
+np.float64,0xbfcf864c293f0c98,0xbfd21872c4821cfc,2
+np.float64,0x3fc51fd1f82a3fa4,0x3fc38d4f1685c166,2
+np.float64,0xbfe2707b70a4e0f7,0xbfeb795fbd5bb444,2
+np.float64,0x46629b568cc54,0x46629b568cc54,2
+np.float64,0x7fe5f821f32bf043,0x40862b40c2cdea3f,2
+np.float64,0x3fedd2c9457ba592,0x3fe512ce92394526,2
+np.float64,0x7fe6dcb8ceadb971,0x40862b925a7dc05d,2
+np.float64,0x3fd1b983b4a37307,0x3fcf4ae2545cf64e,2
+np.float64,0xbfe1c93104639262,0xbfe9f7d28e4c0c82,2
+np.float64,0x995ebc2932bd8,0x995ebc2932bd8,2
+np.float64,0x800a4c3ee614987e,0x800a4c3ee614987e,2
+np.float64,0x3fbb58766e36b0f0,0x3fb9fb3b9810ec16,2
+np.float64,0xbfe36d636666dac7,0xbfede5080f69053c,2
+np.float64,0x3f4feee1003fddc2,0x3f4feae5f05443d1,2
+np.float64,0x3fed0b772ffa16ee,0x3fe4aafb924903c6,2
+np.float64,0x800bb3faef3767f6,0x800bb3faef3767f6,2
+np.float64,0x3fe285cda5e50b9c,0x3fdd3a58df06c427,2
+np.float64,0x7feb9d560bb73aab,0x40862d152362bb94,2
+np.float64,0x3fecd1f447f9a3e9,0x3fe48cc78288cb3f,2
+np.float64,0x3fca927b0c3524f6,0x3fc8250f49ba28df,2
+np.float64,0x7fcc19944e383328,0x40862221b02fcf43,2
+np.float64,0xbfd8ddf41db1bbe8,0xbfdf7b92073ff2fd,2
+np.float64,0x80006fe736e0dfcf,0x80006fe736e0dfcf,2
+np.float64,0x800bbeb66d577d6d,0x800bbeb66d577d6d,2
+np.float64,0xbfe4329353e86526,0xbfefeaf19ab92b42,2
+np.float64,0x2fad72805f5af,0x2fad72805f5af,2
+np.float64,0x3fe1b827aa637050,0x3fdc33bf46012c0d,2
+np.float64,0x3fc3f3f8e227e7f2,0x3fc28aeb86d65278,2
+np.float64,0x3fec018933780312,0x3fe41e619aa4285c,2
+np.float64,0xbfd92428e0b24852,0xbfdfeecb08d154df,2
+np.float64,0x2d7046845ae0a,0x2d7046845ae0a,2
+np.float64,0x7fde7fd2233cffa3,0x408628550f8a948f,2
+np.float64,0x8000a32cd241465a,0x8000a32cd241465a,2
+np.float64,0x8004267a45084cf5,0x8004267a45084cf5,2
+np.float64,0xbfe6b422556d6844,0xbff3c71f67661e6e,2
+np.float64,0x3fe3a37d922746fb,0x3fdea04e04d6195c,2
+np.float64,0xbfddcc54b53b98aa,0xbfe40d2389cdb848,2
+np.float64,0x3fe18b4b92a31697,0x3fdbf9e68cbf5794,2
+np.float64,0x7fc9c5b2ee338b65,0x408621709a17a47a,2
+np.float64,0x1ebd1ce03d7b,0x1ebd1ce03d7b,2
+np.float64,0x8008a6fc39d14df9,0x8008a6fc39d14df9,2
+np.float64,0x3fec11384c782270,0x3fe426bdaedd2965,2
+np.float64,0x3fefc28344ff8507,0x3fe60f75d34fc3d2,2
+np.float64,0xc35f379786be7,0xc35f379786be7,2
+np.float64,0x3feef51f4a7dea3e,0x3fe5a7b95d7786b5,2
+np.float64,0x3fec9b9f0379373e,0x3fe4702477abbb63,2
+np.float64,0x3fde94f8cdbd29f0,0x3fd8ff50f7df0a6f,2
+np.float64,0xbfed32d1cdfa65a4,0xc0037c1470f6f979,2
+np.float64,0x800d3ba44f5a7749,0x800d3ba44f5a7749,2
+np.float64,0x3fe3c56c8fe78ad9,0x3fdeca4eb9bb8918,2
+np.float64,0xbfe7c97242ef92e4,0xbff5c2950dfd6f69,2
+np.float64,0xbd9440057b288,0xbd9440057b288,2
+np.float64,0x7feb2fc111f65f81,0x40862cf524bd2001,2
+np.float64,0x800a431e2df4863d,0x800a431e2df4863d,2
+np.float64,0x80038a3b79e71478,0x80038a3b79e71478,2
+np.float64,0x80000c93d4601928,0x80000c93d4601928,2
+np.float64,0x7fe9fec022f3fd7f,0x40862c995db8ada0,2
+np.float64,0x3fead0129c35a025,0x3fe379d7a92c8f79,2
+np.float64,0x3fdd8cbaf7bb1974,0x3fd84b87ff0c26c7,2
+np.float64,0x3fe8fb7c60b1f6f9,0x3fe276d5339e7135,2
+np.float64,0x85a255e10b44b,0x85a255e10b44b,2
+np.float64,0xbfe507c23fea0f84,0xbff1212d2260022a,2
+np.float64,0x3fc5487c7b2a90f9,0x3fc3b03222d3d148,2
+np.float64,0x7fec0bdcb8f817b8,0x40862d34e8fd11e7,2
+np.float64,0xbfc5f34b4f2be698,0xbfc8146a899c7a0c,2
+np.float64,0xbfa2a49c14254940,0xbfa2fdab2eae3826,2
+np.float64,0x800ec52f15dd8a5e,0x800ec52f15dd8a5e,2
+np.float64,0xbfe3ba4b12a77496,0xbfeeab256b3e9422,2
+np.float64,0x80034d6c7ba69ada,0x80034d6c7ba69ada,2
+np.float64,0x7fd394d4202729a7,0x408624c98a216742,2
+np.float64,0xbfd4493a38289274,0xbfd865d67af2de91,2
+np.float64,0xe47d6203c8fad,0xe47d6203c8fad,2
+np.float64,0x98eb4e4b31d6a,0x98eb4e4b31d6a,2
+np.float64,0x4507fb128a100,0x4507fb128a100,2
+np.float64,0xbfc77032e42ee064,0xbfc9e36ab747a14d,2
+np.float64,0xa1f8a03b43f14,0xa1f8a03b43f14,2
+np.float64,0xbfc3d4da8527a9b4,0xbfc58c27af2476b0,2
+np.float64,0x3fc0eb7d6921d6fb,0x3fbfc858a077ed61,2
+np.float64,0x7fddb2e9403b65d2,0x4086281e98443709,2
+np.float64,0xbfa7ea62942fd4c0,0xbfa87dfd06b05d2a,2
+np.float64,0xbfe7d5c5426fab8a,0xbff5daa969c6d9e5,2
+np.float64,0x3fbf7cba0c3ef974,0x3fbdb23cd8fe875b,2
+np.float64,0x7fe92021eb324043,0x40862c53aee8b154,2
+np.float64,0x7fefbaa1827f7542,0x40862e3194737072,2
+np.float64,0x3fc6f82c402df059,0x3fc520432cbc533f,2
+np.float64,0x7fb37679a826ecf2,0x408619a5f857e27f,2
+np.float64,0x79ec1528f3d83,0x79ec1528f3d83,2
+np.float64,0x3fbefe1d0c3dfc3a,0x3fbd41650ba2c893,2
+np.float64,0x3fc3e5e11827cbc2,0x3fc27eb9b47c9c42,2
+np.float64,0x16aed1922d5db,0x16aed1922d5db,2
+np.float64,0x800124f7e58249f1,0x800124f7e58249f1,2
+np.float64,0x8004f7d12489efa3,0x8004f7d12489efa3,2
+np.float64,0x3fef80b8e27f0172,0x3fe5ee5fd43322c6,2
+np.float64,0xbfe7740c88eee819,0xbff51f823c8da14d,2
+np.float64,0xbfe6e1f1f6edc3e4,0xbff416bcb1302e7c,2
+np.float64,0x8001a2c4a7e3458a,0x8001a2c4a7e3458a,2
+np.float64,0x3fe861e155f0c3c2,0x3fe2201d3000c329,2
+np.float64,0x3fd00a101a201420,0x3fcca01087dbd728,2
+np.float64,0x7fdf0eb1133e1d61,0x4086287a327839b8,2
+np.float64,0x95e3ffdb2bc80,0x95e3ffdb2bc80,2
+np.float64,0x3fd87a1e8230f43d,0x3fd4ba1eb9be1270,2
+np.float64,0x3fedc4792afb88f2,0x3fe50b6529080f73,2
+np.float64,0x7fc9e81fa833d03e,0x4086217b428cc6ff,2
+np.float64,0xbfd21f1ba5a43e38,0xbfd54e048b988e09,2
+np.float64,0xbfbf52af5a3ea560,0xbfc0b4ab3b81fafc,2
+np.float64,0x7fe475f8e268ebf1,0x40862aaf14fee029,2
+np.float64,0x3fcf56899f3ead10,0x3fcc081de28ae9cf,2
+np.float64,0x917d407122fa8,0x917d407122fa8,2
+np.float64,0x22e23e3245c49,0x22e23e3245c49,2
+np.float64,0xbfeec2814f3d8503,0xc00a00ecca27b426,2
+np.float64,0xbfd97fee1c32ffdc,0xbfe04351dfe306ec,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log2.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log2.csv
new file mode 100644
index 00000000..179c6519
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-log2.csv
@@ -0,0 +1,1629 @@
+dtype,input,output,ulperrortol
+np.float32,0x80000000,0xff800000,3
+np.float32,0x7f12870a,0x42fe63db,3
+np.float32,0x3ef29cf5,0xbf89eb12,3
+np.float32,0x3d6ba8fb,0xc083d26c,3
+np.float32,0x3d9907e8,0xc06f8230,3
+np.float32,0x4ee592,0xc2fd656e,3
+np.float32,0x58d8b1,0xc2fd0db3,3
+np.float32,0x7ba103,0xc2fc19aa,3
+np.float32,0x7f52e90e,0x42ff70e4,3
+np.float32,0x7fcb15,0xc2fc0132,3
+np.float32,0x7cb7129f,0x42f50855,3
+np.float32,0x9faba,0xc301ae59,3
+np.float32,0x7f300a,0xc2fc04b4,3
+np.float32,0x3f0bf047,0xbf5f10cb,3
+np.float32,0x2fb1fb,0xc2fed934,3
+np.float32,0x3eedb0d1,0xbf8db417,3
+np.float32,0x3d7a0b40,0xc0811638,3
+np.float32,0x2e0bac,0xc2fef334,3
+np.float32,0x6278c1,0xc2fcc1b9,3
+np.float32,0x7f61ab2e,0x42ffa2d9,3
+np.float32,0x8fe7c,0xc301d4be,3
+np.float32,0x3f25e6ee,0xbf203536,3
+np.float32,0x7efc78f0,0x42fdf5c0,3
+np.float32,0x6d7304,0xc2fc73a7,3
+np.float32,0x7f1a472a,0x42fe89ed,3
+np.float32,0x7dd029a6,0x42f96734,3
+np.float32,0x3e9b9327,0xbfdbf8f7,3
+np.float32,0x3f4eefc1,0xbe9d2942,3
+np.float32,0x7f5b9b64,0x42ff8ebc,3
+np.float32,0x3e458ee1,0xc017ed6e,3
+np.float32,0x3f7b766b,0xbcd35acf,3
+np.float32,0x3e616070,0xc00bc378,3
+np.float32,0x7f20e633,0x42fea8f8,3
+np.float32,0x3ee3b461,0xbf95a126,3
+np.float32,0x7e7722ba,0x42fbe5f8,3
+np.float32,0x3f0873d7,0xbf6861fa,3
+np.float32,0x7b4cb2,0xc2fc1ba3,3
+np.float32,0x3f0b6b02,0xbf60712e,3
+np.float32,0x9bff4,0xc301b6f2,3
+np.float32,0x3f07be25,0xbf6a4f0c,3
+np.float32,0x3ef10e57,0xbf8b1b75,3
+np.float32,0x46ad75,0xc2fdb6b1,3
+np.float32,0x3f7bc542,0xbcc4e3a9,3
+np.float32,0x3f6673d4,0xbe1b509c,3
+np.float32,0x7f19fe59,0x42fe8890,3
+np.float32,0x7f800000,0x7f800000,3
+np.float32,0x7f2fe696,0x42feead0,3
+np.float32,0x3dc9432d,0xc0563655,3
+np.float32,0x3ee47623,0xbf950446,3
+np.float32,0x3f1f8817,0xbf2eab51,3
+np.float32,0x7f220ec5,0x42feae44,3
+np.float32,0x2325e3,0xc2ffbab1,3
+np.float32,0x29dfc8,0xc2ff395a,3
+np.float32,0x7f524950,0x42ff6eb3,3
+np.float32,0x3e2234e0,0xc02a21c8,3
+np.float32,0x7f1c6f5a,0x42fe942f,3
+np.float32,0x3b6a61,0xc2fe36e7,3
+np.float32,0x3f1df90e,0xbf324ba9,3
+np.float32,0xb57f0,0xc3017f07,3
+np.float32,0x7d0eba,0xc2fc112e,3
+np.float32,0x403aa9,0xc2fdfd5c,3
+np.float32,0x3e74ecc7,0xc004155f,3
+np.float32,0x17509c,0xc30074f2,3
+np.float32,0x7f62196b,0x42ffa442,3
+np.float32,0x3ecef9a9,0xbfa7417a,3
+np.float32,0x7f14b158,0x42fe6eb1,3
+np.float32,0x3ede12be,0xbf9a40fe,3
+np.float32,0x42cfaa,0xc2fde03f,3
+np.float32,0x3f407b0f,0xbed2a6f5,3
+np.float32,0x7f7fffff,0x43000000,3
+np.float32,0x5467c6,0xc2fd3394,3
+np.float32,0x7ea6b80f,0x42fcc336,3
+np.float32,0x3f21e7b2,0xbf293704,3
+np.float32,0x3dc7e9eb,0xc056d542,3
+np.float32,0x7f3e6e67,0x42ff2571,3
+np.float32,0x3e3e809d,0xc01b4911,3
+np.float32,0x3f800000,0x0,3
+np.float32,0x3d8fd238,0xc0753d52,3
+np.float32,0x3f74aa65,0xbd85cd0e,3
+np.float32,0x7ec30305,0x42fd36ff,3
+np.float32,0x3e97bb93,0xbfe0971d,3
+np.float32,0x3e109d9c,0xc034bb1b,3
+np.float32,0x3f4a0b67,0xbeaed537,3
+np.float32,0x3f25a7aa,0xbf20c228,3
+np.float32,0x3ebc05eb,0xbfb8fd6b,3
+np.float32,0x3eebe749,0xbf8f18e5,3
+np.float32,0x3e9dc479,0xbfd96356,3
+np.float32,0x7f245200,0x42feb882,3
+np.float32,0x1573a8,0xc30093b5,3
+np.float32,0x3e66c4b9,0xc00994a6,3
+np.float32,0x3e73bffc,0xc0048709,3
+np.float32,0x3dfef8e5,0xc0405f16,3
+np.float32,0x403750,0xc2fdfd83,3
+np.float32,0x3ebedf17,0xbfb636a4,3
+np.float32,0x15cae6,0xc3008de2,3
+np.float32,0x3edf4d4e,0xbf993c24,3
+np.float32,0x3f7cc41e,0xbc963fb3,3
+np.float32,0x3e9e12a4,0xbfd907ee,3
+np.float32,0x7ded7b59,0x42f9c889,3
+np.float32,0x7f034878,0x42fe12b5,3
+np.float32,0x7ddce43f,0x42f9930b,3
+np.float32,0x3d82b257,0xc07e1333,3
+np.float32,0x3dae89c1,0xc0635dd4,3
+np.float32,0x6b1d00,0xc2fc8396,3
+np.float32,0x449a5a,0xc2fdccb3,3
+np.float32,0x4e89d2,0xc2fd68cb,3
+np.float32,0x7e1ae83f,0x42fa8cef,3
+np.float32,0x7e4bb22c,0x42fb572e,3
+np.float32,0x3de308ea,0xc04b1634,3
+np.float32,0x7f238c7a,0x42feb508,3
+np.float32,0x3f6c62a3,0xbdeb86f3,3
+np.float32,0x3e58cba6,0xc00f5908,3
+np.float32,0x7f7dd91f,0x42fff9c4,3
+np.float32,0x3d989376,0xc06fc88d,3
+np.float32,0x3dd013c5,0xc0532339,3
+np.float32,0x4b17e6,0xc2fd89ed,3
+np.float32,0x7f67f287,0x42ffb71e,3
+np.float32,0x3f69365e,0xbe09ba3c,3
+np.float32,0x3e4b8b21,0xc0152bf1,3
+np.float32,0x3a75b,0xc3032171,3
+np.float32,0x7f303676,0x42feec1f,3
+np.float32,0x7f6570e5,0x42ffaf18,3
+np.float32,0x3f5ed61e,0xbe4cf676,3
+np.float32,0x3e9b22f9,0xbfdc7e4f,3
+np.float32,0x2c095e,0xc2ff1428,3
+np.float32,0x3f1b17c1,0xbf391754,3
+np.float32,0x422dc6,0xc2fde746,3
+np.float32,0x3f677c8d,0xbe14b365,3
+np.float32,0x3ef85d0c,0xbf8597a9,3
+np.float32,0x3ecaaa6b,0xbfab2430,3
+np.float32,0x3f0607d1,0xbf6eff3d,3
+np.float32,0x3f011fdb,0xbf7cc50d,3
+np.float32,0x6ed7c1,0xc2fc6a4e,3
+np.float32,0x7ec2d1a2,0x42fd3644,3
+np.float32,0x3f75b7fe,0xbd7238a2,3
+np.float32,0x3ef2d146,0xbf89c344,3
+np.float32,0x7ec2cd27,0x42fd3633,3
+np.float32,0x7ee1e55a,0x42fda397,3
+np.float32,0x7f464d6a,0x42ff435c,3
+np.float32,0x7f469a93,0x42ff447b,3
+np.float32,0x7ece752f,0x42fd6121,3
+np.float32,0x2ed878,0xc2fee67b,3
+np.float32,0x75b23,0xc3021eff,3
+np.float32,0x3e0f4be4,0xc03593b8,3
+np.float32,0x2778e1,0xc2ff64fc,3
+np.float32,0x5fe2b7,0xc2fcd561,3
+np.float32,0x19b8a9,0xc30050ab,3
+np.float32,0x7df303e5,0x42f9d98d,3
+np.float32,0x608b8d,0xc2fcd051,3
+np.float32,0x588f46,0xc2fd1017,3
+np.float32,0x3eec6a11,0xbf8eb2a1,3
+np.float32,0x3f714121,0xbdaf4906,3
+np.float32,0x7f4f7b9e,0x42ff64c9,3
+np.float32,0x3c271606,0xc0d3b29c,3
+np.float32,0x3f002fe0,0xbf7f75f6,3
+np.float32,0x7efa4798,0x42fdef4f,3
+np.float32,0x3f61a865,0xbe3a601a,3
+np.float32,0x7e8087aa,0x42fc030d,3
+np.float32,0x3f70f0c7,0xbdb321ba,3
+np.float32,0x5db898,0xc2fce63f,3
+np.float32,0x7a965f,0xc2fc1fea,3
+np.float32,0x7f68b112,0x42ffb97c,3
+np.float32,0x7ef0ed3d,0x42fdd32d,3
+np.float32,0x7f3156a1,0x42fef0d3,3
+np.float32,0x3f1d405f,0xbf33fc6e,3
+np.float32,0x3e3494cf,0xc0203945,3
+np.float32,0x6018de,0xc2fcd3c1,3
+np.float32,0x623e49,0xc2fcc370,3
+np.float32,0x3ea29f0f,0xbfd3cad4,3
+np.float32,0xa514,0xc305a20c,3
+np.float32,0x3e1b2ab1,0xc02e3a8f,3
+np.float32,0x3f450b6f,0xbec1578f,3
+np.float32,0x7eb12908,0x42fcf015,3
+np.float32,0x3f10b720,0xbf52ab48,3
+np.float32,0x3e0a93,0xc2fe16f6,3
+np.float32,0x93845,0xc301cb96,3
+np.float32,0x7f4e9ce3,0x42ff61af,3
+np.float32,0x3f6d4296,0xbde09ceb,3
+np.float32,0x6ddede,0xc2fc70d0,3
+np.float32,0x3f4fb6fd,0xbe9a636d,3
+np.float32,0x3f6d08de,0xbde36c0b,3
+np.float32,0x3f56f057,0xbe8122ad,3
+np.float32,0x334e95,0xc2fea349,3
+np.float32,0x7efadbcd,0x42fdf104,3
+np.float32,0x3db02e88,0xc0628046,3
+np.float32,0x3f3309d1,0xbf041066,3
+np.float32,0x2d8722,0xc2fefb8f,3
+np.float32,0x7e926cac,0x42fc6356,3
+np.float32,0x3e3674ab,0xc01f452e,3
+np.float32,0x1b46ce,0xc3003afc,3
+np.float32,0x3f06a338,0xbf6d53fc,3
+np.float32,0x1b1ba7,0xc3003d46,3
+np.float32,0x319dfb,0xc2febc06,3
+np.float32,0x3e2f126a,0xc02315a5,3
+np.float32,0x3f40fe65,0xbed0af9e,3
+np.float32,0x3f1d842f,0xbf335d4b,3
+np.float32,0x3d044e4f,0xc09e78f8,3
+np.float32,0x7f272674,0x42fec51f,3
+np.float32,0x3cda6d8f,0xc0a753db,3
+np.float32,0x3eb92f12,0xbfbbccbb,3
+np.float32,0x7e4318f4,0x42fb3752,3
+np.float32,0x3c5890,0xc2fe2b6d,3
+np.float32,0x3d1993c9,0xc09796f8,3
+np.float32,0x7f18ef24,0x42fe8377,3
+np.float32,0x3e30c3a0,0xc0223244,3
+np.float32,0x3f27cd27,0xbf1c00ef,3
+np.float32,0x3f150957,0xbf47cd6c,3
+np.float32,0x7e7178a3,0x42fbd4d8,3
+np.float32,0x3f298db8,0xbf182ac3,3
+np.float32,0x7cb3be,0xc2fc1348,3
+np.float32,0x3ef64266,0xbf8729de,3
+np.float32,0x3eeb06ce,0xbf8fc8f2,3
+np.float32,0x3f406e36,0xbed2d845,3
+np.float32,0x7f1e1bd3,0x42fe9c0b,3
+np.float32,0x478dcc,0xc2fdad97,3
+np.float32,0x7f7937b5,0x42ffec2b,3
+np.float32,0x3f20f350,0xbf2b6624,3
+np.float32,0x7f13661a,0x42fe683c,3
+np.float32,0x208177,0xc2fff46b,3
+np.float32,0x263cfb,0xc2ff7c72,3
+np.float32,0x7f0bd28c,0x42fe4141,3
+np.float32,0x7230d8,0xc2fc5453,3
+np.float32,0x3f261bbf,0xbf1fbfb4,3
+np.float32,0x737b56,0xc2fc4c05,3
+np.float32,0x3ef88f33,0xbf857263,3
+np.float32,0x7e036464,0x42fa1352,3
+np.float32,0x4b5c4f,0xc2fd874d,3
+np.float32,0x3f77984d,0xbd454596,3
+np.float32,0x3f674202,0xbe162932,3
+np.float32,0x3e7157d9,0xc0057197,3
+np.float32,0x3f3f21da,0xbed7d861,3
+np.float32,0x7f1fb40f,0x42fea375,3
+np.float32,0x7ef0157f,0x42fdd096,3
+np.float32,0x3f71e88d,0xbda74962,3
+np.float32,0x3f174855,0xbf424728,3
+np.float32,0x3f3fdd2c,0xbed505d5,3
+np.float32,0x7b95d1,0xc2fc19ed,3
+np.float32,0x7f23f4e5,0x42feb6df,3
+np.float32,0x7d741925,0x42f7dcd6,3
+np.float32,0x60f81d,0xc2fccd14,3
+np.float32,0x3f17d267,0xbf40f6ae,3
+np.float32,0x3f036fc8,0xbf7636f8,3
+np.float32,0x167653,0xc30082b5,3
+np.float32,0x256d05,0xc2ff8c4f,3
+np.float32,0x3eccc63d,0xbfa93adb,3
+np.float32,0x7f6c91ea,0x42ffc5b2,3
+np.float32,0x2ee52a,0xc2fee5b3,3
+np.float32,0x3dc3579e,0xc058f80d,3
+np.float32,0x4c7170,0xc2fd7cc4,3
+np.float32,0x7f737f20,0x42ffdb03,3
+np.float32,0x3f2f9dbf,0xbf0b3119,3
+np.float32,0x3f4d0c54,0xbea3eec5,3
+np.float32,0x7e380862,0x42fb0c32,3
+np.float32,0x5d637f,0xc2fce8df,3
+np.float32,0x3f0aa623,0xbf627c27,3
+np.float32,0x3e4d5896,0xc0145b88,3
+np.float32,0x3f6cacdc,0xbde7e7ca,3
+np.float32,0x63a2c3,0xc2fcb90a,3
+np.float32,0x6c138c,0xc2fc7cfa,3
+np.float32,0x2063c,0xc303fb88,3
+np.float32,0x7e9e5a3e,0x42fc9d2f,3
+np.float32,0x56ec64,0xc2fd1ddd,3
+np.float32,0x7f1d6a35,0x42fe98cc,3
+np.float32,0x73dc96,0xc2fc4998,3
+np.float32,0x3e5d74e5,0xc00d6238,3
+np.float32,0x7f033cbb,0x42fe1273,3
+np.float32,0x3f5143fc,0xbe94e4e7,3
+np.float32,0x1d56d9,0xc3002010,3
+np.float32,0x2bf3e4,0xc2ff1591,3
+np.float32,0x3f2a6ef1,0xbf164170,3
+np.float32,0x3f33238b,0xbf03db58,3
+np.float32,0x22780e,0xc2ffc91a,3
+np.float32,0x7f00b873,0x42fe0425,3
+np.float32,0x3f7f6145,0xbb654706,3
+np.float32,0x7fc00000,0x7fc00000,3
+np.float32,0x63895a,0xc2fcb9c7,3
+np.float32,0x18a1b2,0xc30060a8,3
+np.float32,0x7e43c6a6,0x42fb39e3,3
+np.float32,0x78676e,0xc2fc2d30,3
+np.float32,0x3f16d839,0xbf435940,3
+np.float32,0x7eff78ba,0x42fdfe79,3
+np.float32,0x3f2e152c,0xbf0e6e54,3
+np.float32,0x3db20ced,0xc06186e1,3
+np.float32,0x3f0cd1d8,0xbf5cbf57,3
+np.float32,0x3fd7a8,0xc2fe01d2,3
+np.float32,0x3ebb075e,0xbfb9f816,3
+np.float32,0x7f94ef,0xc2fc026b,3
+np.float32,0x3d80ba0e,0xc07f7a2b,3
+np.float32,0x7f227e15,0x42feb03f,3
+np.float32,0x792264bf,0x42e6afcc,3
+np.float32,0x7f501576,0x42ff66ec,3
+np.float32,0x223629,0xc2ffcea3,3
+np.float32,0x40a79e,0xc2fdf87b,3
+np.float32,0x449483,0xc2fdccf2,3
+np.float32,0x3f4fa978,0xbe9a9382,3
+np.float32,0x7f148c53,0x42fe6df9,3
+np.float32,0x3ec98b3c,0xbfac2a98,3
+np.float32,0x3e4da320,0xc0143a0a,3
+np.float32,0x3d1d94bb,0xc09666d0,3
+np.float32,0x3c8e624e,0xc0bb155b,3
+np.float32,0x66a9af,0xc2fca2ef,3
+np.float32,0x3ec76ed7,0xbfae1c57,3
+np.float32,0x3f4b52f3,0xbeaa2b81,3
+np.float32,0x7e99bbb5,0x42fc8750,3
+np.float32,0x3f69a46b,0xbe0701be,3
+np.float32,0x3f775400,0xbd4ba495,3
+np.float32,0x131e56,0xc300be3c,3
+np.float32,0x3f30abb4,0xbf08fb10,3
+np.float32,0x7f7e528c,0x42fffb25,3
+np.float32,0x3eb89515,0xbfbc668a,3
+np.float32,0x7e9191b6,0x42fc5f02,3
+np.float32,0x7e80c7e9,0x42fc047e,3
+np.float32,0x3f77ef58,0xbd3d2995,3
+np.float32,0x7ddb1f8a,0x42f98d1b,3
+np.float32,0x7ebc6c4f,0x42fd1d9c,3
+np.float32,0x3f6638e0,0xbe1ccab8,3
+np.float32,0x7f4c45,0xc2fc0410,3
+np.float32,0x3e7d8aad,0xc000e414,3
+np.float32,0x3f4d148b,0xbea3d12e,3
+np.float32,0x3e98c45c,0xbfdf55f4,3
+np.float32,0x3d754c78,0xc081f8a9,3
+np.float32,0x17e4cf,0xc3006be3,3
+np.float32,0x7eb65814,0x42fd0563,3
+np.float32,0x3f65e0d8,0xbe1f0008,3
+np.float32,0x3e99541f,0xbfdea87e,3
+np.float32,0x3f3cb80e,0xbee13b27,3
+np.float32,0x3e99f0c0,0xbfddec3b,3
+np.float32,0x3f43903e,0xbec6ea66,3
+np.float32,0x7e211cd4,0x42faa9f2,3
+np.float32,0x824af,0xc301f971,3
+np.float32,0x3e16a56e,0xc030f56c,3
+np.float32,0x542b3b,0xc2fd35a6,3
+np.float32,0x3eeea2d1,0xbf8cf873,3
+np.float32,0x232e93,0xc2ffb9fa,3
+np.float32,0x3e8c52b9,0xbfef06aa,3
+np.float32,0x7f69c7e3,0x42ffbcef,3
+np.float32,0x3f573e43,0xbe801714,3
+np.float32,0x43b009,0xc2fdd69f,3
+np.float32,0x3ee571ab,0xbf943966,3
+np.float32,0x3ee3d5d8,0xbf958604,3
+np.float32,0x338b12,0xc2fe9fe4,3
+np.float32,0x29cb1f,0xc2ff3ac6,3
+np.float32,0x3f0892b4,0xbf680e7a,3
+np.float32,0x3e8c4f7f,0xbfef0ae9,3
+np.float32,0x7c9d3963,0x42f497e6,3
+np.float32,0x3f26ba84,0xbf1e5f59,3
+np.float32,0x3dd0acc0,0xc052df6f,3
+np.float32,0x3e43fbda,0xc018aa8c,3
+np.float32,0x3ec4fd0f,0xbfb0635d,3
+np.float32,0x3f52c8c6,0xbe8f8d85,3
+np.float32,0x3f5fdc5d,0xbe462fdb,3
+np.float32,0x3f461920,0xbebd6743,3
+np.float32,0x6161ff,0xc2fcc9ef,3
+np.float32,0x7f7ed306,0x42fffc9a,3
+np.float32,0x3d212263,0xc0955f46,3
+np.float32,0x3eca5826,0xbfab6f36,3
+np.float32,0x7d6317ac,0x42f7a77e,3
+np.float32,0x3eb02063,0xbfc50f60,3
+np.float32,0x7f71a6f8,0x42ffd565,3
+np.float32,0x1a3efe,0xc3004935,3
+np.float32,0x3dc599c9,0xc057e856,3
+np.float32,0x3f3e1301,0xbedbf205,3
+np.float32,0xf17d4,0xc301158d,3
+np.float32,0x3f615f84,0xbe3c3d85,3
+np.float32,0x3de63be1,0xc049cb77,3
+np.float32,0x3e8d2f51,0xbfede541,3
+np.float32,0x3a5cdd,0xc2fe441c,3
+np.float32,0x3f443ec0,0xbec4586a,3
+np.float32,0x3eacbd00,0xbfc8a5ad,3
+np.float32,0x3f600f6a,0xbe44df1b,3
+np.float32,0x5f77a6,0xc2fcd89c,3
+np.float32,0x476706,0xc2fdaf28,3
+np.float32,0x2f469,0xc3036fde,3
+np.float32,0x7dc4ba24,0x42f93d77,3
+np.float32,0x3e2d6080,0xc023fb9b,3
+np.float32,0x7e8d7135,0x42fc49c3,3
+np.float32,0x3f589065,0xbe77247b,3
+np.float32,0x3f59e210,0xbe6e2c05,3
+np.float32,0x7f51d388,0x42ff6d15,3
+np.float32,0x7d9a5fda,0x42f88a63,3
+np.float32,0x3e67d5bc,0xc00927ab,3
+np.float32,0x61d72c,0xc2fcc679,3
+np.float32,0x3ef3351d,0xbf897766,3
+np.float32,0x1,0xc3150000,3
+np.float32,0x7f653429,0x42ffae54,3
+np.float32,0x7e1ad3e5,0x42fa8c8e,3
+np.float32,0x3f4ca01d,0xbea57500,3
+np.float32,0x3f7606db,0xbd6ad13e,3
+np.float32,0x7ec4a27d,0x42fd3d1f,3
+np.float32,0x3efe4fd5,0xbf8138c7,3
+np.float32,0x77c2f1,0xc2fc3124,3
+np.float32,0x7e4d3251,0x42fb5c9a,3
+np.float32,0x3f543ac7,0xbe8a8154,3
+np.float32,0x7c3dbe29,0x42f322c4,3
+np.float32,0x408e01,0xc2fdf9a0,3
+np.float32,0x45069b,0xc2fdc829,3
+np.float32,0x3d7ecab7,0xc08037e8,3
+np.float32,0xf8c22,0xc3010a99,3
+np.float32,0x7f69af63,0x42ffbca2,3
+np.float32,0x7ec7d228,0x42fd48fe,3
+np.float32,0xff800000,0xffc00000,3
+np.float32,0xdd7c5,0xc301357c,3
+np.float32,0x143f38,0xc300a90e,3
+np.float32,0x7e65c176,0x42fbb01b,3
+np.float32,0x2c1a9e,0xc2ff1307,3
+np.float32,0x7f6e9224,0x42ffcbeb,3
+np.float32,0x3d32ab39,0xc0909a77,3
+np.float32,0x3e150b42,0xc031f22b,3
+np.float32,0x1f84b4,0xc300059a,3
+np.float32,0x3f71ce21,0xbda88c2a,3
+np.float32,0x2625c4,0xc2ff7e33,3
+np.float32,0x3dd0b293,0xc052dcdc,3
+np.float32,0x625c11,0xc2fcc290,3
+np.float32,0x3f610297,0xbe3e9f24,3
+np.float32,0x7ebdd5e5,0x42fd2320,3
+np.float32,0x3e883458,0xbff486ff,3
+np.float32,0x782313,0xc2fc2ed4,3
+np.float32,0x7f39c843,0x42ff132f,3
+np.float32,0x7f326aa7,0x42fef54d,3
+np.float32,0x4d2c71,0xc2fd75be,3
+np.float32,0x3f55747c,0xbe86409e,3
+np.float32,0x7f7f0867,0x42fffd34,3
+np.float32,0x321316,0xc2feb53f,3
+np.float32,0x3e1b37ed,0xc02e32b0,3
+np.float32,0x80edf,0xc301fd54,3
+np.float32,0x3f0b08ad,0xbf617607,3
+np.float32,0x7f3f4174,0x42ff28a2,3
+np.float32,0x3d79306d,0xc0813eb0,3
+np.float32,0x3f5f657a,0xbe49413d,3
+np.float32,0x3f56c63a,0xbe81b376,3
+np.float32,0x7f667123,0x42ffb24f,3
+np.float32,0x3f71021b,0xbdb24d43,3
+np.float32,0x7f434ab1,0x42ff380f,3
+np.float32,0x3dcae496,0xc055779c,3
+np.float32,0x3f5a7d88,0xbe6a0f5b,3
+np.float32,0x3cdf5c32,0xc0a64bf5,3
+np.float32,0x3e56222c,0xc0107d11,3
+np.float32,0x561a3a,0xc2fd24df,3
+np.float32,0x7ddd953c,0x42f9955a,3
+np.float32,0x7e35d839,0x42fb035c,3
+np.float32,0x3ec1816c,0xbfb3aeb2,3
+np.float32,0x7c87cfcd,0x42f42bc2,3
+np.float32,0xd9cd,0xc3053baf,3
+np.float32,0x3f388234,0xbef1e5b7,3
+np.float32,0x3edfcaca,0xbf98d47b,3
+np.float32,0x3ef28852,0xbf89fac8,3
+np.float32,0x7f7525df,0x42ffe001,3
+np.float32,0x7f6c33ef,0x42ffc48c,3
+np.float32,0x3ea4a881,0xbfd17e61,3
+np.float32,0x3f3e379f,0xbedb63c6,3
+np.float32,0x3f0524c1,0xbf717301,3
+np.float32,0x3db3e7f0,0xc06091d3,3
+np.float32,0x800000,0xc2fc0000,3
+np.float32,0x3f2f2897,0xbf0c27ce,3
+np.float32,0x7eb1776d,0x42fcf15c,3
+np.float32,0x3f039018,0xbf75dc37,3
+np.float32,0x3c4055,0xc2fe2c96,3
+np.float32,0x3f603653,0xbe43dea5,3
+np.float32,0x7f700d24,0x42ffd07c,3
+np.float32,0x3f4741a3,0xbeb918dc,3
+np.float32,0x3f5fe959,0xbe45da2d,3
+np.float32,0x3f3e4401,0xbedb33b1,3
+np.float32,0x7f0705ff,0x42fe2775,3
+np.float32,0x3ea85662,0xbfcd69b0,3
+np.float32,0x3f15f49f,0xbf458829,3
+np.float32,0x3f17c50e,0xbf411728,3
+np.float32,0x3e483f60,0xc016add2,3
+np.float32,0x3f1ab9e5,0xbf39f71b,3
+np.float32,0x3de0b6fb,0xc04c08fe,3
+np.float32,0x7e671225,0x42fbb452,3
+np.float32,0x80800000,0xffc00000,3
+np.float32,0xe2df3,0xc3012c9d,3
+np.float32,0x3ede1e3c,0xbf9a3770,3
+np.float32,0x3df2ffde,0xc044cfec,3
+np.float32,0x3eed8da5,0xbf8dcf6c,3
+np.float32,0x3ead15c3,0xbfc846e1,3
+np.float32,0x7ef3750a,0x42fddae4,3
+np.float32,0x7e6ab7c0,0x42fbbfe4,3
+np.float32,0x7ea4bbe5,0x42fcba5d,3
+np.float32,0x3f227706,0xbf27f0a1,3
+np.float32,0x3ef39bfd,0xbf89295a,3
+np.float32,0x3f289a20,0xbf1a3edd,3
+np.float32,0x7f225f82,0x42feafb4,3
+np.float32,0x768963,0xc2fc38bc,3
+np.float32,0x3f493c00,0xbeb1ccfc,3
+np.float32,0x3f4e7249,0xbe9ee9a7,3
+np.float32,0x1d0c3a,0xc30023c0,3
+np.float32,0x7f3c5f78,0x42ff1d6a,3
+np.float32,0xff7fffff,0xffc00000,3
+np.float32,0x3ee7896a,0xbf928c2a,3
+np.float32,0x3e788479,0xc002bd2e,3
+np.float32,0x3ee4df17,0xbf94af84,3
+np.float32,0x5e06d7,0xc2fce3d7,3
+np.float32,0x3d7b2776,0xc080e1dc,3
+np.float32,0x3e3d39d3,0xc01be7fd,3
+np.float32,0x7c81dece,0x42f40ab7,3
+np.float32,0x3f7d2085,0xbc856255,3
+np.float32,0x7f7f6627,0x42fffe44,3
+np.float32,0x7f5f2e94,0x42ff9aaa,3
+np.float32,0x7f5835f2,0x42ff8339,3
+np.float32,0x3f6a0e32,0xbe046580,3
+np.float32,0x7e16f586,0x42fa79dd,3
+np.float32,0x3f04a2f2,0xbf72dbc5,3
+np.float32,0x3f35e334,0xbefc7740,3
+np.float32,0x3f0d056e,0xbf5c3824,3
+np.float32,0x7ebeb95e,0x42fd2693,3
+np.float32,0x3c6192,0xc2fe2aff,3
+np.float32,0x3e892b4f,0xbff33958,3
+np.float32,0x3f61d694,0xbe3931df,3
+np.float32,0x29d183,0xc2ff3a56,3
+np.float32,0x7f0b0598,0x42fe3d04,3
+np.float32,0x7f743b28,0x42ffdd3d,3
+np.float32,0x3a2ed6,0xc2fe4663,3
+np.float32,0x3e27403a,0xc0274de8,3
+np.float32,0x3f58ee78,0xbe74a349,3
+np.float32,0x3eaa4b,0xc2fe0f92,3
+np.float32,0x3ecb613b,0xbfaa7de8,3
+np.float32,0x7f637d81,0x42ffa8c9,3
+np.float32,0x3f026e96,0xbf790c73,3
+np.float32,0x386cdf,0xc2fe5d0c,3
+np.float32,0x35abd1,0xc2fe8202,3
+np.float32,0x3eac3cd1,0xbfc92ee8,3
+np.float32,0x3f567869,0xbe82bf47,3
+np.float32,0x3f65c643,0xbe1faae6,3
+np.float32,0x7f5422b9,0x42ff752b,3
+np.float32,0x7c26e9,0xc2fc168c,3
+np.float32,0x7eff5cfd,0x42fdfe29,3
+np.float32,0x3f728e7f,0xbd9f6142,3
+np.float32,0x3f10fd43,0xbf51f874,3
+np.float32,0x7e7ada08,0x42fbf0fe,3
+np.float32,0x3e82a611,0xbffc37be,3
+np.float32,0xbf800000,0xffc00000,3
+np.float32,0x3dbe2e12,0xc05b711c,3
+np.float32,0x7e768fa9,0x42fbe440,3
+np.float32,0x5e44e8,0xc2fce1f0,3
+np.float32,0x7f25071a,0x42febbae,3
+np.float32,0x3f54db5e,0xbe885339,3
+np.float32,0x3f0f2c26,0xbf56a0b8,3
+np.float32,0x22f9a7,0xc2ffbe55,3
+np.float32,0x7ed63dcb,0x42fd7c77,3
+np.float32,0x7ea4fae2,0x42fcbb78,3
+np.float32,0x3f1d7766,0xbf337b47,3
+np.float32,0x7f16d59f,0x42fe7941,3
+np.float32,0x3f3a1bb6,0xbeeb855c,3
+np.float32,0x3ef57128,0xbf87c709,3
+np.float32,0xb24ff,0xc3018591,3
+np.float32,0x3ef99e27,0xbf84a983,3
+np.float32,0x3eac2ccf,0xbfc94013,3
+np.float32,0x3e9d3e1e,0xbfda00dc,3
+np.float32,0x718213,0xc2fc58c1,3
+np.float32,0x7edbf509,0x42fd8fea,3
+np.float32,0x70c7f1,0xc2fc5d80,3
+np.float32,0x3f7012f5,0xbdbdc6cd,3
+np.float32,0x12cba,0xc304c487,3
+np.float32,0x7f5d445d,0x42ff944c,3
+np.float32,0x7f3e30bd,0x42ff2481,3
+np.float32,0x63b110,0xc2fcb8a0,3
+np.float32,0x3f39f728,0xbeec1680,3
+np.float32,0x3f5bea58,0xbe6074b1,3
+np.float32,0x3f350749,0xbefff679,3
+np.float32,0x3e91ab2c,0xbfe81f3e,3
+np.float32,0x7ec53fe0,0x42fd3f6d,3
+np.float32,0x3f6cbbdc,0xbde72c8e,3
+np.float32,0x3f4df49f,0xbea0abcf,3
+np.float32,0x3e9c9638,0xbfdac674,3
+np.float32,0x7f3b82ec,0x42ff1a07,3
+np.float32,0x7f612a09,0x42ffa132,3
+np.float32,0x7ea26650,0x42fcafd3,3
+np.float32,0x3a615138,0xc122f26d,3
+np.float32,0x3f1108bd,0xbf51db39,3
+np.float32,0x6f80f6,0xc2fc65ea,3
+np.float32,0x3f7cb578,0xbc98ecb1,3
+np.float32,0x7f54d31a,0x42ff7790,3
+np.float32,0x196868,0xc3005532,3
+np.float32,0x3f01ee0a,0xbf7a7925,3
+np.float32,0x3e184013,0xc02ffb11,3
+np.float32,0xadde3,0xc3018ee3,3
+np.float32,0x252a91,0xc2ff9173,3
+np.float32,0x3f0382c2,0xbf7601a9,3
+np.float32,0x6d818c,0xc2fc7345,3
+np.float32,0x3bfbfd,0xc2fe2fdd,3
+np.float32,0x7f3cad19,0x42ff1e9a,3
+np.float32,0x4169a7,0xc2fdefdf,3
+np.float32,0x3f615d96,0xbe3c4a2b,3
+np.float32,0x3f036480,0xbf7656ac,3
+np.float32,0x7f5fbda3,0x42ff9c83,3
+np.float32,0x3d202d,0xc2fe21f1,3
+np.float32,0x3d0f5e5d,0xc09ac3e9,3
+np.float32,0x3f0fff6e,0xbf548142,3
+np.float32,0x7f11ed32,0x42fe60d2,3
+np.float32,0x3e6f856b,0xc00624b6,3
+np.float32,0x7f7c4dd7,0x42fff542,3
+np.float32,0x3e76fb86,0xc0034fa0,3
+np.float32,0x3e8a0d6e,0xbff209e7,3
+np.float32,0x3eacad19,0xbfc8b6ad,3
+np.float32,0xa7776,0xc3019cbe,3
+np.float32,0x3dc84d74,0xc056a754,3
+np.float32,0x3efb8052,0xbf834626,3
+np.float32,0x3f0e55fc,0xbf58cacc,3
+np.float32,0x7e0e71e3,0x42fa4efb,3
+np.float32,0x3ed5a800,0xbfa1639c,3
+np.float32,0x3f33335b,0xbf03babf,3
+np.float32,0x38cad7,0xc2fe5842,3
+np.float32,0x3bc21256,0xc0ecc927,3
+np.float32,0x3f09522d,0xbf660a19,3
+np.float32,0xcbd5d,0xc3015428,3
+np.float32,0x492752,0xc2fd9d42,3
+np.float32,0x3f2b9b32,0xbf13b904,3
+np.float32,0x6544ac,0xc2fcad09,3
+np.float32,0x52eb12,0xc2fd40b5,3
+np.float32,0x3f66a7c0,0xbe1a03e8,3
+np.float32,0x7ab289,0xc2fc1f41,3
+np.float32,0x62af5e,0xc2fcc020,3
+np.float32,0x7f73e9cf,0x42ffdc46,3
+np.float32,0x3e5eca,0xc2fe130e,3
+np.float32,0x3e3a10f4,0xc01d7602,3
+np.float32,0x3f04db46,0xbf723f0d,3
+np.float32,0x18fc4a,0xc3005b63,3
+np.float32,0x525bcb,0xc2fd45b6,3
+np.float32,0x3f6b9108,0xbdf5c769,3
+np.float32,0x3e992e8c,0xbfded5c5,3
+np.float32,0x7efea647,0x42fdfc18,3
+np.float32,0x7e8371db,0x42fc139e,3
+np.float32,0x3f397cfb,0xbeedfc69,3
+np.float32,0x7e46d233,0x42fb454a,3
+np.float32,0x7d5281ad,0x42f76f79,3
+np.float32,0x7f4c1878,0x42ff58a1,3
+np.float32,0x3e96ca5e,0xbfe1bd97,3
+np.float32,0x6a2743,0xc2fc8a3d,3
+np.float32,0x7f688781,0x42ffb8f8,3
+np.float32,0x7814b7,0xc2fc2f2d,3
+np.float32,0x3f2ffdc9,0xbf0a6756,3
+np.float32,0x3f766fa8,0xbd60fe24,3
+np.float32,0x4dc64e,0xc2fd7003,3
+np.float32,0x3a296f,0xc2fe46a8,3
+np.float32,0x3f2af942,0xbf15162e,3
+np.float32,0x7f702c32,0x42ffd0dc,3
+np.float32,0x7e61e318,0x42fba390,3
+np.float32,0x7f7d3bdb,0x42fff7fa,3
+np.float32,0x3ee87f3f,0xbf91c881,3
+np.float32,0x2bbc28,0xc2ff193c,3
+np.float32,0x3e01f918,0xc03e966e,3
+np.float32,0x7f0b39f4,0x42fe3e1a,3
+np.float32,0x3eaa4d64,0xbfcb4516,3
+np.float32,0x3e53901e,0xc0119a88,3
+np.float32,0x603cb,0xc3026957,3
+np.float32,0x7e81f926,0x42fc0b4d,3
+np.float32,0x5dab7c,0xc2fce6a6,3
+np.float32,0x3f46fefd,0xbeba1018,3
+np.float32,0x648448,0xc2fcb28a,3
+np.float32,0x3ec49470,0xbfb0c58b,3
+np.float32,0x3e8a5393,0xbff1ac2b,3
+np.float32,0x3f27ccfc,0xbf1c014e,3
+np.float32,0x3ed886e6,0xbf9eeca8,3
+np.float32,0x7cfbe06e,0x42f5f401,3
+np.float32,0x3f5aa7ba,0xbe68f229,3
+np.float32,0x9500d,0xc301c7e3,3
+np.float32,0x3f4861,0xc2fe0853,3
+np.float32,0x3e5ae104,0xc00e76f5,3
+np.float32,0x71253a,0xc2fc5b1e,3
+np.float32,0xcf7b8,0xc3014d9c,3
+np.float32,0x7f7edd2d,0x42fffcb7,3
+np.float32,0x3e9039ee,0xbfe9f5ab,3
+np.float32,0x2fd54e,0xc2fed712,3
+np.float32,0x3f600752,0xbe45147a,3
+np.float32,0x3f4da8f6,0xbea1bb5c,3
+np.float32,0x3f2d34a9,0xbf104bd9,3
+np.float32,0x3e1e66dd,0xc02c52d2,3
+np.float32,0x798276,0xc2fc2670,3
+np.float32,0xd55e2,0xc3014347,3
+np.float32,0x80000001,0xffc00000,3
+np.float32,0x3e7a5ead,0xc0020da6,3
+np.float32,0x7ec4c744,0x42fd3da9,3
+np.float32,0x597e00,0xc2fd085a,3
+np.float32,0x3dff6bf4,0xc0403575,3
+np.float32,0x5d6f1a,0xc2fce883,3
+np.float32,0x7e21faff,0x42faadea,3
+np.float32,0x3e570fea,0xc01016c6,3
+np.float32,0x28e6b6,0xc2ff4ab7,3
+np.float32,0x7e77062d,0x42fbe5a3,3
+np.float32,0x74cac4,0xc2fc43b0,3
+np.float32,0x3f707273,0xbdb93078,3
+np.float32,0x228e96,0xc2ffc737,3
+np.float32,0x686ac1,0xc2fc966b,3
+np.float32,0x3d76400d,0xc081cae8,3
+np.float32,0x3e9f502f,0xbfd7966b,3
+np.float32,0x3f6bc656,0xbdf32b1f,3
+np.float32,0x3edb828b,0xbf9c65d4,3
+np.float32,0x6c6e56,0xc2fc7a8e,3
+np.float32,0x3f04552e,0xbf73b48f,3
+np.float32,0x3f39cb69,0xbeecc457,3
+np.float32,0x7f681c44,0x42ffb7a3,3
+np.float32,0x7f5b44ee,0x42ff8d99,3
+np.float32,0x3e71430a,0xc005798d,3
+np.float32,0x3edcfde3,0xbf9b27c6,3
+np.float32,0x3f616a5a,0xbe3bf67f,3
+np.float32,0x3f523936,0xbe918548,3
+np.float32,0x3f39ce3a,0xbeecb925,3
+np.float32,0x3eac589a,0xbfc91120,3
+np.float32,0x7efc8d3d,0x42fdf5fc,3
+np.float32,0x5704b0,0xc2fd1d0f,3
+np.float32,0x7e7972e9,0x42fbecda,3
+np.float32,0x3eb0811c,0xbfc4aa13,3
+np.float32,0x7f1efcbb,0x42fea023,3
+np.float32,0x3e0b9e32,0xc037fa6b,3
+np.float32,0x7eef6a48,0x42fdce87,3
+np.float32,0x3cc0a373,0xc0ad20c0,3
+np.float32,0x3f2a75bb,0xbf1632ba,3
+np.float32,0x0,0xff800000,3
+np.float32,0x7ecdb6f4,0x42fd5e77,3
+np.float32,0x7f2e2dfd,0x42fee38d,3
+np.float32,0x3ee17f6e,0xbf976d8c,3
+np.float32,0x3f51e7ee,0xbe92a319,3
+np.float32,0x3f06942f,0xbf6d7d3c,3
+np.float32,0x3f7ba528,0xbccac6f1,3
+np.float32,0x3f413787,0xbecfd513,3
+np.float32,0x3e085e48,0xc03a2716,3
+np.float32,0x7e4c5e0e,0x42fb599c,3
+np.float32,0x306f76,0xc2fecdd4,3
+np.float32,0x7f5c2203,0x42ff9081,3
+np.float32,0x3d5355b4,0xc088da05,3
+np.float32,0x9a2a,0xc305bb4f,3
+np.float32,0x3db93a1f,0xc05de0db,3
+np.float32,0x4e50c6,0xc2fd6ae4,3
+np.float32,0x7ec4afed,0x42fd3d51,3
+np.float32,0x3a8f27,0xc2fe41a0,3
+np.float32,0x7f213caf,0x42feaa84,3
+np.float32,0x7e7b5f00,0x42fbf286,3
+np.float32,0x7e367194,0x42fb05ca,3
+np.float32,0x7f56e6de,0x42ff7ebd,3
+np.float32,0x3ed7383e,0xbfa00aef,3
+np.float32,0x7e844752,0x42fc184a,3
+np.float32,0x15157,0xc3049a19,3
+np.float32,0x3f78cd92,0xbd28824a,3
+np.float32,0x7ecddb16,0x42fd5ef9,3
+np.float32,0x3e479f16,0xc016f7d8,3
+np.float32,0x3f5cb418,0xbe5b2bd3,3
+np.float32,0x7c0934cb,0x42f2334e,3
+np.float32,0x3ebe5505,0xbfb6bc69,3
+np.float32,0x3eb1335a,0xbfc3eff5,3
+np.float32,0x3f2488a3,0xbf234444,3
+np.float32,0x642906,0xc2fcb52a,3
+np.float32,0x3da635fa,0xc067e15a,3
+np.float32,0x7e0d80db,0x42fa4a15,3
+np.float32,0x4f0b9d,0xc2fd640a,3
+np.float32,0x7e083806,0x42fa2df8,3
+np.float32,0x7f77f8c6,0x42ffe877,3
+np.float32,0x3e7bb46a,0xc0018ff5,3
+np.float32,0x3f06eb2e,0xbf6c8eca,3
+np.float32,0x7eae8f7c,0x42fce52a,3
+np.float32,0x3de481a0,0xc04a7d7f,3
+np.float32,0x3eed4311,0xbf8e096f,3
+np.float32,0x3f7b0300,0xbce8903d,3
+np.float32,0x3811b,0xc30330dd,3
+np.float32,0x3eb6f8e1,0xbfbe04bc,3
+np.float32,0x3ec35210,0xbfb1f55a,3
+np.float32,0x3d386916,0xc08f24a5,3
+np.float32,0x3f1fa197,0xbf2e704d,3
+np.float32,0x7f2020a5,0x42fea56a,3
+np.float32,0x7e1ea53f,0x42fa9e8c,3
+np.float32,0x3f148903,0xbf490bf9,3
+np.float32,0x3f2f56a0,0xbf0bc6c9,3
+np.float32,0x7da9fc,0xc2fc0d9b,3
+np.float32,0x3d802134,0xc07fe810,3
+np.float32,0x3f6cb927,0xbde74e57,3
+np.float32,0x7e05b125,0x42fa2023,3
+np.float32,0x3f3307f9,0xbf041433,3
+np.float32,0x5666bf,0xc2fd2250,3
+np.float32,0x3f51c93b,0xbe930f28,3
+np.float32,0x3eb5dcfe,0xbfbf241e,3
+np.float32,0xb2773,0xc301853f,3
+np.float32,0x7f4dee96,0x42ff5f3f,3
+np.float32,0x3e3f5c33,0xc01adee1,3
+np.float32,0x3f2ed29a,0xbf0cdd4a,3
+np.float32,0x3e3c01ef,0xc01c80ab,3
+np.float32,0x3ec2236e,0xbfb31458,3
+np.float32,0x7e841dc4,0x42fc1761,3
+np.float32,0x3df2cd8e,0xc044e30c,3
+np.float32,0x3f010901,0xbf7d0670,3
+np.float32,0x3c05ceaa,0xc0ddf39b,3
+np.float32,0x3f517226,0xbe944206,3
+np.float32,0x3f23c83d,0xbf24f522,3
+np.float32,0x7fc9da,0xc2fc0139,3
+np.float32,0x7f1bde53,0x42fe9181,3
+np.float32,0x3ea3786c,0xbfd2d4a5,3
+np.float32,0x3e83a71b,0xbffacdd2,3
+np.float32,0x3f6f0d4f,0xbdca61d5,3
+np.float32,0x7f5ab613,0x42ff8bb7,3
+np.float32,0x3ab1ec,0xc2fe3fea,3
+np.float32,0x4fbf58,0xc2fd5d82,3
+np.float32,0x3dea141b,0xc0484403,3
+np.float32,0x7d86ad3b,0x42f8258f,3
+np.float32,0x7f345315,0x42fefd29,3
+np.float32,0x3f3752fe,0xbef6a780,3
+np.float32,0x64830d,0xc2fcb293,3
+np.float32,0x3d9dc1eb,0xc06cb32a,3
+np.float32,0x3f2f935a,0xbf0b46f6,3
+np.float32,0xb90a4,0xc30177e3,3
+np.float32,0x4111dd,0xc2fdf3c1,3
+np.float32,0x3d4cd078,0xc08a4c68,3
+np.float32,0x3e95c3f1,0xbfe30011,3
+np.float32,0x3ec9f356,0xbfabcb4e,3
+np.float32,0x1b90d5,0xc3003717,3
+np.float32,0xee70f,0xc3011a3e,3
+np.float32,0x7fa00000,0x7fe00000,3
+np.float32,0x3f74cdb6,0xbd8422af,3
+np.float32,0x3d9b56fe,0xc06e2037,3
+np.float32,0x3f1853df,0xbf3fbc40,3
+np.float32,0x7d86a011,0x42f82547,3
+np.float32,0x3dff9629,0xc0402634,3
+np.float32,0x46f8c9,0xc2fdb39f,3
+np.float32,0x3e9b410b,0xbfdc5a87,3
+np.float32,0x3f5aed42,0xbe671cac,3
+np.float32,0x3b739886,0xc101257f,3
+np.float64,0x3fe2f58d6565eb1b,0xbfe82a641138e19a,2
+np.float64,0x3fee7f0642fcfe0d,0xbfb1c702f6974932,2
+np.float64,0x25b71f244b6e5,0xc090030d3b3c5d2b,2
+np.float64,0x8c9cc8e1193b,0xc0900b752a678fa8,2
+np.float64,0x3fd329b5d326536c,0xbffbd607f6db945c,2
+np.float64,0x3fb5109b3a2a2136,0xc00cd36bd15dfb18,2
+np.float64,0x3fd5393ae12a7276,0xbff97a7e4a157154,2
+np.float64,0x3fd374d1b926e9a3,0xbffb7c3e1a3a7ed3,2
+np.float64,0x3fe2c7f4e2658fea,0xbfe899f15ca78fcb,2
+np.float64,0x7fe3d6b81ee7ad6f,0x408ffa7b63d407ee,2
+np.float64,0x3fe086d097e10da1,0xbfee81456ce8dd03,2
+np.float64,0x7fd374a64ca6e94c,0x408ff241c7306d39,2
+np.float64,0x3fc0709a5b20e135,0xc007afdede31b29c,2
+np.float64,0x3fd4218f4b28431f,0xbffab2c696966e2d,2
+np.float64,0x143134c828628,0xc09006a8372c4d8a,2
+np.float64,0x3f8bd0aa0037a154,0xc018cf0e8b9c3107,2
+np.float64,0x7fe0ce905ee19d20,0x408ff8915e71bd67,2
+np.float64,0x3fda0f5f32b41ebe,0xbff4bd5e0869e820,2
+np.float64,0x7fe9ae63d0b35cc7,0x408ffd760ca4f292,2
+np.float64,0x3fe75abd9eeeb57b,0xbfdd1476fc8b3089,2
+np.float64,0x786c3110f0d87,0xc08ff8b44cedbeea,2
+np.float64,0x22c5fe80458d,0xc09013853591c2f2,2
+np.float64,0x3fdc250797384a0f,0xbff2f6a02c961f0b,2
+np.float64,0x3fa2b367b02566cf,0xc013199238485054,2
+np.float64,0x3fd26a910ca4d522,0xbffcc0e2089b1c0c,2
+np.float64,0x8068d3b300d1b,0xc08ff7f690210aac,2
+np.float64,0x3fe663bfa9ecc77f,0xbfe07cd95a43a5ce,2
+np.float64,0x3fd0ddb07321bb61,0xbffec886665e895e,2
+np.float64,0x3f91c730b0238e61,0xc0176452badc8d22,2
+np.float64,0x4dd10d309ba22,0xc08ffdbe738b1d8d,2
+np.float64,0x7fe322afa4a6455e,0x408ffa10c038f9de,2
+np.float64,0x7fdf7f7c42befef8,0x408ff7d147ddaad5,2
+np.float64,0x7fd673f386ace7e6,0x408ff3e920d00eef,2
+np.float64,0x3feaebfcadb5d7f9,0xbfcfe8ec27083478,2
+np.float64,0x3fdc6dc23738db84,0xbff2bb46794f07b8,2
+np.float64,0xcd8819599b103,0xc08ff288c5b2cf0f,2
+np.float64,0xfda00e77fb402,0xc08ff01b895d2236,2
+np.float64,0x840b02ff08161,0xc08ff7a41e41114c,2
+np.float64,0x3fbdce3a383b9c74,0xc008d1e61903a289,2
+np.float64,0x3fd24ed3c4a49da8,0xbffce3c12136b6d3,2
+np.float64,0x3fe8d0834131a107,0xbfd77b194e7051d4,2
+np.float64,0x3fdd0cb11aba1962,0xbff23b9dbd554455,2
+np.float64,0x1a32d97e3465c,0xc090052781a37271,2
+np.float64,0x3fdb09d2b1b613a5,0xbff3e396b862bd83,2
+np.float64,0x3fe04c848aa09909,0xbfef2540dd90103a,2
+np.float64,0x3fce0c48613c1891,0xc000b9f76877d744,2
+np.float64,0x3fc37109a226e213,0xc005c05d8b2b9a2f,2
+np.float64,0x81cf3837039e7,0xc08ff7d686517dff,2
+np.float64,0xd9342c29b2686,0xc08ff1e591c9a895,2
+np.float64,0x7fec731b0638e635,0x408ffea4884550a9,2
+np.float64,0x3fba0fc138341f82,0xc00a5e839b085f64,2
+np.float64,0x7fdda893b03b5126,0x408ff71f7c5a2797,2
+np.float64,0xd2a4bb03a5498,0xc08ff2402f7a907c,2
+np.float64,0x3fea61fb0d34c3f6,0xbfd1d293fbe76183,2
+np.float64,0x3fed5cf486fab9e9,0xbfbfc2e01a7ffff1,2
+np.float64,0x3fcbabc2bf375785,0xc001ad7750c9dbdf,2
+np.float64,0x3fdb5fff53b6bfff,0xbff39a7973a0c6a5,2
+np.float64,0x7feef05a00bde0b3,0x408fff9c5cbc8651,2
+np.float64,0xb1cf24f1639e5,0xc08ff434de10fffb,2
+np.float64,0x3fa583989c2b0731,0xc0124a8a3bbf18ce,2
+np.float64,0x7feae90bf9f5d217,0x408ffe002e7bbbea,2
+np.float64,0x3fe9ef41c4b3de84,0xbfd367878ae4528e,2
+np.float64,0x9be24ce337c4a,0xc08ff5b9b1c31cf9,2
+np.float64,0x3fe916894cb22d13,0xbfd677f915d58503,2
+np.float64,0x3fec1bab20f83756,0xbfc7f2777aabe8ee,2
+np.float64,0x3feaabf2873557e5,0xbfd0d11f28341233,2
+np.float64,0x3fd4d3c3b529a787,0xbff9e9e47acc8ca9,2
+np.float64,0x3fe4cfe96c699fd3,0xbfe3dc53fa739169,2
+np.float64,0xccfdb97399fb7,0xc08ff2908d893400,2
+np.float64,0x3fec7598be78eb31,0xbfc5a750f8f3441a,2
+np.float64,0x355be5fc6ab7e,0xc090010ca315b50b,2
+np.float64,0x3fba9f9074353f21,0xc00a1f80eaf5e581,2
+np.float64,0x7fdcaff189395fe2,0x408ff6bd1c5b90d9,2
+np.float64,0x3fd94d3b64b29a77,0xbff56be1b43d25f3,2
+np.float64,0x4e5f29949cbe6,0xc08ffda972da1d73,2
+np.float64,0x3fe654e2d9aca9c6,0xbfe09b88dcd8f15d,2
+np.float64,0x7fdc130190b82602,0x408ff67d496c1a27,2
+np.float64,0x3fbcd4701e39a8e0,0xc009343e36627e80,2
+np.float64,0x7fdaa4d38f3549a6,0x408ff5e2c6d8678f,2
+np.float64,0x3febe95e5237d2bd,0xbfc93e16d453fe3a,2
+np.float64,0x9ef5ca553deba,0xc08ff57ff4f7883d,2
+np.float64,0x7fe878e91170f1d1,0x408ffce795868fc8,2
+np.float64,0x3fe63dff466c7bff,0xbfe0caf2b79c9e5f,2
+np.float64,0x6561446ccac29,0xc08ffab0e383834c,2
+np.float64,0x30c6c2ae618d9,0xc09001914b30381b,2
+np.float64,0x7ff0000000000000,0x7ff0000000000000,2
+np.float64,0x3fe5c9daf1ab93b6,0xbfe1be81baf4dbdb,2
+np.float64,0x3fe0a03e24a1407c,0xbfee3a73c4c0e8f8,2
+np.float64,0xff2a2cf3fe546,0xc08ff009a7e6e782,2
+np.float64,0x7fcf0332213e0663,0x408fefa36235e210,2
+np.float64,0x3fb612affc2c2560,0xc00c494be9c8c33b,2
+np.float64,0x3fd2b259702564b3,0xbffc67967f077e75,2
+np.float64,0x7fcb63685d36c6d0,0x408fee343343f913,2
+np.float64,0x3fe369f1d5a6d3e4,0xbfe71251139939ad,2
+np.float64,0x3fdd17c618ba2f8c,0xbff232d11c986251,2
+np.float64,0x3f92cc8040259901,0xc01711d8e06b52ee,2
+np.float64,0x69a81dc2d3504,0xc08ffa36cdaf1141,2
+np.float64,0x3fea0fad99b41f5b,0xbfd2f4625a652645,2
+np.float64,0xd1cd5799a39ab,0xc08ff24c02b90d26,2
+np.float64,0x324e59ce649cc,0xc0900163ad091c76,2
+np.float64,0x3fc3d460a227a8c1,0xc00585f903dc7a7f,2
+np.float64,0xa7185ec74e30c,0xc08ff4ec7d65ccd9,2
+np.float64,0x3fa254eaac24a9d5,0xc01337053963321a,2
+np.float64,0x3feaeb112435d622,0xbfcfef3be17f81f6,2
+np.float64,0x60144c3ac028a,0xc08ffb4f8eb94595,2
+np.float64,0x7fa4d2ec6829a5d8,0x408fdb0a9670ab83,2
+np.float64,0x3fed1372f97a26e6,0xbfc1b1fe50d48a55,2
+np.float64,0x3fd5ade5972b5bcb,0xbff8fcf28f525031,2
+np.float64,0x7fe72e335bee5c66,0x408ffc4759236437,2
+np.float64,0x7fdfafab143f5f55,0x408ff7e2e22a8129,2
+np.float64,0x3fe90d0db9321a1b,0xbfd69ae5fe10eb9e,2
+np.float64,0x7fe20a59072414b1,0x408ff962a2492484,2
+np.float64,0x3fed853690bb0a6d,0xbfbdc9dc5f199d2b,2
+np.float64,0x3fd709d469ae13a9,0xbff795a218deb700,2
+np.float64,0x3fe21c35f5e4386c,0xbfea47d71789329b,2
+np.float64,0x9ea5ec053d4be,0xc08ff585c2f6b7a3,2
+np.float64,0x3fc0580f9e20b01f,0xc007c1268f49d037,2
+np.float64,0xd99127abb3225,0xc08ff1e0a1ff339d,2
+np.float64,0x3fdc8c9bbfb91937,0xbff2a2478354effb,2
+np.float64,0x3fe15fc6b162bf8d,0xbfec323ac358e008,2
+np.float64,0xffefffffffffffff,0x7ff8000000000000,2
+np.float64,0x3fee341afb3c6836,0xbfb556b6faee9a84,2
+np.float64,0x3fe4b64c56296c99,0xbfe4154835ad2afe,2
+np.float64,0x85de22810bbc5,0xc08ff77b914fe5b5,2
+np.float64,0x3fd22c72e3a458e6,0xbffd0f4269d20bb9,2
+np.float64,0xc090e5218123,0xc09009a4a65a8a8f,2
+np.float64,0x7fd9641692b2c82c,0x408ff5547782bdfc,2
+np.float64,0x3fd9b9cb28b37396,0xbff509a8fb59a9f1,2
+np.float64,0x3fcd2726f93a4e4e,0xc001135059a22117,2
+np.float64,0x3fa4b493d4296928,0xc0128323c7a55f4a,2
+np.float64,0x47455e788e8ac,0xc08ffec2101c1e82,2
+np.float64,0x3fe0d7e2e261afc6,0xbfeda0f1e2d0f4bd,2
+np.float64,0x3fe860fc5b70c1f9,0xbfd91dc42eaf72c2,2
+np.float64,0xa5d7805b4baf0,0xc08ff502bc819ff6,2
+np.float64,0xd83395b1b0673,0xc08ff1f33c3f94c2,2
+np.float64,0x3f865972e02cb2e6,0xc01a1243651565c8,2
+np.float64,0x52fc6952a5f8e,0xc08ffd006b158179,2
+np.float64,0x7fecac6c793958d8,0x408ffebbb1c09a70,2
+np.float64,0x7fe621ff606c43fe,0x408ffbbeb2b1473a,2
+np.float64,0x3fdb9f3f9db73e7f,0xbff365610c52bda7,2
+np.float64,0x7feab92992757252,0x408ffdeb92a04813,2
+np.float64,0xcc46c79f988d9,0xc08ff29adf03fb7c,2
+np.float64,0x3fe3156a03262ad4,0xbfe7dd0f598781c7,2
+np.float64,0x3fc00e3a61201c75,0xc007f5c121a87302,2
+np.float64,0x3fdce8e9f739d1d4,0xbff2581d41ef50ef,2
+np.float64,0x0,0xfff0000000000000,2
+np.float64,0x7d373ac4fa6e8,0xc08ff840fa8beaec,2
+np.float64,0x3fee41e0653c83c1,0xbfb4ae786f2a0d54,2
+np.float64,0x3ff0000000000000,0x0,2
+np.float64,0x7feca6fff9794dff,0x408ffeb982a70556,2
+np.float64,0x7fc532716d2a64e2,0x408feb3f0f6c095b,2
+np.float64,0x3fe4ec2954a9d853,0xbfe39dd44aa5a040,2
+np.float64,0x7fd3321d52a6643a,0x408ff21a0ab9cd85,2
+np.float64,0x7fd8f1b2dfb1e365,0x408ff52001fa7922,2
+np.float64,0x3fee5e58cabcbcb2,0xbfb3539734a24d8b,2
+np.float64,0x3feebf6e7dfd7edd,0xbfad7c648f025102,2
+np.float64,0x6008026ec0101,0xc08ffb5108b54a93,2
+np.float64,0x3fea06f5e2340dec,0xbfd3134a48283360,2
+np.float64,0x41cad13c8395b,0xc08fffae654b2426,2
+np.float64,0x7fedb5c9353b6b91,0x408fff249f1f32b6,2
+np.float64,0xe00c5af9c018c,0xc08ff189e68c655f,2
+np.float64,0x7feac398ddf58731,0x408ffdf01374de9f,2
+np.float64,0x3fed21127c7a4225,0xbfc15b8cf55628fa,2
+np.float64,0x3fd3446711a688ce,0xbffbb5f7252a9fa3,2
+np.float64,0x7fe75fa07a6ebf40,0x408ffc5fdb096018,2
+np.float64,0x3feeb1618cbd62c3,0xbfaece3bd0863070,2
+np.float64,0x7f5226e180244dc2,0x408fb174d506e52f,2
+np.float64,0x3fcd67deca3acfbe,0xc000f9cd7a490749,2
+np.float64,0xdc6f30efb8de6,0xc08ff1b9f2a22d2e,2
+np.float64,0x9c14931338293,0xc08ff5b5f975ec5d,2
+np.float64,0x7fe93e802df27cff,0x408ffd4354eba0e0,2
+np.float64,0x3feb92ae5077255d,0xbfcb7f2084e44dbb,2
+np.float64,0xd78dbfddaf1b8,0xc08ff1fc19fa5a13,2
+np.float64,0x7fe14c301fa2985f,0x408ff8e666cb6592,2
+np.float64,0xbda3d8b77b47b,0xc08ff37689f4b2e5,2
+np.float64,0x8a42953b14853,0xc08ff71c2db3b8cf,2
+np.float64,0x7fe4ca7e186994fb,0x408ffb05e94254a7,2
+np.float64,0x7fe92ffc5e325ff8,0x408ffd3cb0265b12,2
+np.float64,0x91b262912364d,0xc08ff681619be214,2
+np.float64,0x33fe2b0667fc6,0xc0900132f3fab55e,2
+np.float64,0x3fde10e9183c21d2,0xbff17060fb4416c7,2
+np.float64,0xb6b811cb6d702,0xc08ff3e46303b541,2
+np.float64,0x3fe4a7bda0a94f7b,0xbfe435c6481cd0e3,2
+np.float64,0x7fd9fe6057b3fcc0,0x408ff599c79a822c,2
+np.float64,0x3fef44bf917e897f,0xbfa11484e351a6e9,2
+np.float64,0x3fe57d701daafae0,0xbfe2618ab40fc01b,2
+np.float64,0x7fe52d2adbaa5a55,0x408ffb3c2fb1c99d,2
+np.float64,0xb432f66d6865f,0xc08ff40d6b4084fe,2
+np.float64,0xbff0000000000000,0x7ff8000000000000,2
+np.float64,0x7fecd2292bf9a451,0x408ffecad860de6f,2
+np.float64,0x3fddd2ae153ba55c,0xbff1a059adaca33e,2
+np.float64,0x3fee55d6e5bcabae,0xbfb3bb1c6179d820,2
+np.float64,0x7fc1d0085623a010,0x408fe93d16ada7a7,2
+np.float64,0x829b000105360,0xc08ff7c47629a68f,2
+np.float64,0x7fe1e0257523c04a,0x408ff94782cf0717,2
+np.float64,0x7fd652f9ad2ca5f2,0x408ff3d820ec892e,2
+np.float64,0x3fef2246203e448c,0xbfa444ab6209d8cd,2
+np.float64,0x3fec6c0ae178d816,0xbfc5e559ebd4e790,2
+np.float64,0x3fe6ddfee92dbbfe,0xbfdf06dd7d3fa7a8,2
+np.float64,0x3fb7fbcbea2ff798,0xc00b5404d859d148,2
+np.float64,0x7feb9a154d37342a,0x408ffe4b26c29e55,2
+np.float64,0x3fe4db717aa9b6e3,0xbfe3c2c6b3ef13bc,2
+np.float64,0x3fbae17dda35c2fc,0xc00a030f7f4b37e7,2
+np.float64,0x7fd632b9082c6571,0x408ff3c76826ef19,2
+np.float64,0x7fc4184a15283093,0x408feaa14adf00be,2
+np.float64,0x3fe052d19920a5a3,0xbfef136b5df81a3e,2
+np.float64,0x7fe38b872b67170d,0x408ffa4f51aafc86,2
+np.float64,0x3fef9842d03f3086,0xbf92d3d2a21d4be2,2
+np.float64,0x9cea662139d4d,0xc08ff5a634810daa,2
+np.float64,0x3fe35f0855e6be11,0xbfe72c4b564e62aa,2
+np.float64,0x3fecee3d3779dc7a,0xbfc29ee942f8729e,2
+np.float64,0x3fe7903fd72f2080,0xbfdc41db9b5f4048,2
+np.float64,0xb958889572b11,0xc08ff3ba366cf84b,2
+np.float64,0x3fcb3a67c53674d0,0xc001dd21081ad1ea,2
+np.float64,0xe3b1b53fc7637,0xc08ff15a3505e1ce,2
+np.float64,0xe5954ae9cb2aa,0xc08ff141cbbf0ae4,2
+np.float64,0x3fe394af74e7295f,0xbfe6ad1d13f206e8,2
+np.float64,0x7fe21dd704643bad,0x408ff96f13f80c1a,2
+np.float64,0x3fd23a7cf02474fa,0xbffcfd7454117a05,2
+np.float64,0x7fe257515e24aea2,0x408ff99378764d52,2
+np.float64,0x7fe4c5d0a6e98ba0,0x408ffb03503cf939,2
+np.float64,0x3fadc2c1603b8583,0xc0106b2c17550e3a,2
+np.float64,0x3fc0f7f02421efe0,0xc007525ac446864c,2
+np.float64,0x3feaf0b27275e165,0xbfcfc8a03eaa32ad,2
+np.float64,0x5ce7503cb9ceb,0xc08ffbb2de365fa8,2
+np.float64,0x2a0014f654003,0xc090026e41761a0d,2
+np.float64,0x7fe2c848a8e59090,0x408ff9d9b723ee89,2
+np.float64,0x7f66f54bc02dea97,0x408fbc2ae0ec5623,2
+np.float64,0xa35a890146b6,0xc0900a97b358ddbd,2
+np.float64,0x7fee267ded7c4cfb,0x408fff501560c9f5,2
+np.float64,0x3fe07c328520f865,0xbfee9ef7c3435b58,2
+np.float64,0x3fe67122cf6ce246,0xbfe06147001932ba,2
+np.float64,0x3fdacc8925359912,0xbff41824cece219e,2
+np.float64,0xffa3047fff461,0xc08ff00431ec9be3,2
+np.float64,0x3e1af43e7c35f,0xc090002c6573d29b,2
+np.float64,0x86fa94590df53,0xc08ff7632525ed92,2
+np.float64,0x7fec4c76227898eb,0x408ffe94d032c657,2
+np.float64,0x7fe2274ce1e44e99,0x408ff975194cfdff,2
+np.float64,0x7fe670e1b4ace1c2,0x408ffbe78cc451de,2
+np.float64,0x7fe853871db0a70d,0x408ffcd5e6a6ff47,2
+np.float64,0x3fcbf265db37e4cc,0xc0019026336e1176,2
+np.float64,0x3fef033cef3e067a,0xbfa726712eaae7f0,2
+np.float64,0x5d74973abae94,0xc08ffba15e6bb992,2
+np.float64,0x7fdd9c99b6bb3932,0x408ff71ad24a7ae0,2
+np.float64,0xbdc8e09b7b91c,0xc08ff3744939e9a3,2
+np.float64,0xdbfcff71b7fa0,0xc08ff1bfeecc9dfb,2
+np.float64,0xf9b38cf5f3672,0xc08ff0499af34a43,2
+np.float64,0x3fea820aa6b50415,0xbfd162a38e1927b1,2
+np.float64,0x3fe67f59a12cfeb3,0xbfe04412adca49dc,2
+np.float64,0x3feb301d9c76603b,0xbfce17e6edeb92d5,2
+np.float64,0x828ce00b0519c,0xc08ff7c5b5c57cde,2
+np.float64,0x4f935e229f26c,0xc08ffd7c67c1c54f,2
+np.float64,0x7fcd139e023a273b,0x408feee4f12ff11e,2
+np.float64,0x666a9944ccd54,0xc08ffa92d5e5cd64,2
+np.float64,0x3fe792f0fa6f25e2,0xbfdc374fda28f470,2
+np.float64,0xe996029bd32c1,0xc08ff10eb9b47a11,2
+np.float64,0x3fe7b0dd1eef61ba,0xbfdbc2676dc77db0,2
+np.float64,0x7fd3ec0127a7d801,0x408ff287bf47e27d,2
+np.float64,0x3fe793a8ea6f2752,0xbfdc347f7717e48d,2
+np.float64,0x7fdb89d15e3713a2,0x408ff64457a13ea2,2
+np.float64,0x3fe35b3cbbe6b679,0xbfe73557c8321b70,2
+np.float64,0x66573c94ccae8,0xc08ffa9504af7eb5,2
+np.float64,0x3fc620a2302c4144,0xc00442036b944a67,2
+np.float64,0x49b2fe0693660,0xc08ffe5f131c3c7e,2
+np.float64,0x7fda936cdfb526d9,0x408ff5db3ab3f701,2
+np.float64,0xc774ceef8ee9a,0xc08ff2e16d082fa1,2
+np.float64,0x4da9f8a09b55,0xc0900ee2206d0c88,2
+np.float64,0x3fe2ca5d5ae594bb,0xbfe89406611a5f1a,2
+np.float64,0x7fe0832497e10648,0x408ff85d1de6056e,2
+np.float64,0x3fe6a9e3222d53c6,0xbfdfda35a9bc2de1,2
+np.float64,0x3fed3d92c8ba7b26,0xbfc0a73620db8b98,2
+np.float64,0x3fdd2ec093ba5d81,0xbff2209cf78ce3f1,2
+np.float64,0x62fcb968c5f98,0xc08ffaf775a593c7,2
+np.float64,0xfcfb019ff9f60,0xc08ff0230e95bd16,2
+np.float64,0x3fd7a63e8f2f4c7d,0xbff6faf4fff7dbe0,2
+np.float64,0x3fef23b0ec3e4762,0xbfa4230cb176f917,2
+np.float64,0x340d1e6a681a5,0xc09001314b68a0a2,2
+np.float64,0x7fc0b85ba02170b6,0x408fe8821487b802,2
+np.float64,0x7fe9976e84f32edc,0x408ffd6bb6aaf467,2
+np.float64,0x329a0e9e65343,0xc090015b044e3270,2
+np.float64,0x3fea4928d3f49252,0xbfd2299b05546eab,2
+np.float64,0x3f188c70003118e0,0xc02ac3ce23bc5d5a,2
+np.float64,0x3fecce5020b99ca0,0xbfc36b23153d5f50,2
+np.float64,0x3fe203873e24070e,0xbfea86edb3690830,2
+np.float64,0x3fe02d9eaa205b3d,0xbfef7d18c54a76d2,2
+np.float64,0xef7537ebdeea7,0xc08ff0c55e9d89e7,2
+np.float64,0x3fedf7572efbeeae,0xbfb840af357cf07c,2
+np.float64,0xd1a97a61a354,0xc0900926fdfb96cc,2
+np.float64,0x7fe6a0daeced41b5,0x408ffc001edf1407,2
+np.float64,0x3fe5063625aa0c6c,0xbfe3647cfb949d62,2
+np.float64,0x7fe9b28d31736519,0x408ffd77eb4a922b,2
+np.float64,0x7feea90d033d5219,0x408fff81a4bbff62,2
+np.float64,0x3fe9494d17f2929a,0xbfd5bde02eb5287a,2
+np.float64,0x7feee17a8cbdc2f4,0x408fff96cf0dc16a,2
+np.float64,0xb2ad18ef655a3,0xc08ff4267eda8af8,2
+np.float64,0x3fad3b52683a76a5,0xc01085ab75b797ce,2
+np.float64,0x2300a65846016,0xc090037b81ce9500,2
+np.float64,0x3feb1041f9b62084,0xbfcef0c87d8b3249,2
+np.float64,0x3fdd887d3e3b10fa,0xbff1da0e1ede6db2,2
+np.float64,0x3fd3e410eb27c822,0xbffaf9b5fc9cc8cc,2
+np.float64,0x3fe0aa53e3e154a8,0xbfee1e7b5c486578,2
+np.float64,0x7fe33e389aa67c70,0x408ffa214fe50961,2
+np.float64,0x3fd27e3a43a4fc75,0xbffca84a79e8adeb,2
+np.float64,0x3fb309e0082613c0,0xc00dfe407b77a508,2
+np.float64,0x7feaf2ed8cf5e5da,0x408ffe046a9d1ba9,2
+np.float64,0x1e76167a3cec4,0xc0900448cd35ec67,2
+np.float64,0x3fe0a18e1721431c,0xbfee36cf1165a0d4,2
+np.float64,0x3fa73b78c02e76f2,0xc011d9069823b172,2
+np.float64,0x3fef6d48287eda90,0xbf9ab2d08722c101,2
+np.float64,0x8fdf0da31fbe2,0xc08ff6a6a2accaa1,2
+np.float64,0x3fc3638db826c71b,0xc005c86191688826,2
+np.float64,0xaa9c09c555381,0xc08ff4aefe1d9473,2
+np.float64,0x7fccb0f4523961e8,0x408feebd84773f23,2
+np.float64,0xede75dcfdbcec,0xc08ff0d89ba887d1,2
+np.float64,0x7f8a051520340a29,0x408fcd9cc17f0d95,2
+np.float64,0x3fef5ca2babeb945,0xbf9dc221f3618e6a,2
+np.float64,0x7fea0ff4bcf41fe8,0x408ffda193359f22,2
+np.float64,0x7fe05c53fd20b8a7,0x408ff841dc7123e8,2
+np.float64,0x3fc625664b2c4acd,0xc0043f8749b9a1d8,2
+np.float64,0x7fed58f98f7ab1f2,0x408fff00585f48c2,2
+np.float64,0x3fb3e5e51427cbca,0xc00d7bcb6528cafe,2
+np.float64,0x3fe728bd3d6e517a,0xbfdddafa72bd0f60,2
+np.float64,0x3fe3f005dd27e00c,0xbfe5d7b3ec93bca0,2
+np.float64,0x3fd74fbd1a2e9f7a,0xbff750001b63ce81,2
+np.float64,0x3fd3af6d85a75edb,0xbffb371d678d11b4,2
+np.float64,0x7fa690ad8c2d215a,0x408fdbf7db9c7640,2
+np.float64,0x3fbdfd38e23bfa72,0xc008bfc1c5c9b89e,2
+np.float64,0x3fe2374684a46e8d,0xbfea030c4595dfba,2
+np.float64,0x7fc0806c372100d7,0x408fe85b36fee334,2
+np.float64,0x3fef3ac47b7e7589,0xbfa2007195c5213f,2
+np.float64,0x3fb55473922aa8e7,0xc00cae7af8230e0c,2
+np.float64,0x7fe018dc152031b7,0x408ff811e0d712fa,2
+np.float64,0x3fe3b3fca56767f9,0xbfe6638ae2c99c62,2
+np.float64,0x7fac79818c38f302,0x408fdea720b39c3c,2
+np.float64,0x7fefffffffffffff,0x4090000000000000,2
+np.float64,0xd2b290cba5652,0xc08ff23f6d7152a6,2
+np.float64,0x7fc5848eb52b091c,0x408feb6b6f8b77d0,2
+np.float64,0xf399f62de733f,0xc08ff092ae319ad8,2
+np.float64,0x7fdec56c12bd8ad7,0x408ff78c4ddbc667,2
+np.float64,0x3fca640f1e34c81e,0xc0023969c5cbfa4c,2
+np.float64,0x3fd55225db2aa44c,0xbff95f7442a2189e,2
+np.float64,0x7fefa009a97f4012,0x408fffdd2f42ef9f,2
+np.float64,0x4a3b70609478,0xc0900f24e449bc3d,2
+np.float64,0x7fe3738b1ba6e715,0x408ffa411f2cb5e7,2
+np.float64,0x7fe5e53f0b6bca7d,0x408ffb9ed8d95cea,2
+np.float64,0x3fe274dd24a4e9ba,0xbfe967fb114b2a83,2
+np.float64,0x3fcbc58b8c378b17,0xc001a2bb1e158bcc,2
+np.float64,0x3fefc2c0043f8580,0xbf862c9b464dcf38,2
+np.float64,0xc2c4fafd858a0,0xc08ff327aecc409b,2
+np.float64,0x3fd8bc39a9b17873,0xbff5f1ad46e5a51c,2
+np.float64,0x3fdf341656be682d,0xbff094f41e7cb4c4,2
+np.float64,0x3fef8495c13f092c,0xbf966cf6313bae4c,2
+np.float64,0x3fe14e0f05229c1e,0xbfec6166f26b7161,2
+np.float64,0x3fed42d3b2ba85a7,0xbfc0860b773d35d8,2
+np.float64,0x7fd92bbac5b25775,0x408ff53abcb3fe0c,2
+np.float64,0xb1635b6f62c6c,0xc08ff43bdf47accf,2
+np.float64,0x4a3a2dbc94746,0xc08ffe49fabddb36,2
+np.float64,0x87d831290fb06,0xc08ff750419dc6fb,2
+np.float64,0x3fec4713f7f88e28,0xbfc6d6217c9f5cf9,2
+np.float64,0x7fed43ba2d3a8773,0x408ffef7fa2fc303,2
+np.float64,0x7fd1ec5b56a3d8b6,0x408ff14f62615f1e,2
+np.float64,0x3fee534b6c7ca697,0xbfb3da1951aa3e68,2
+np.float64,0x3febb564c2b76aca,0xbfca9737062e55e7,2
+np.float64,0x943e6b0f287ce,0xc08ff64e2d09335c,2
+np.float64,0xf177d957e2efb,0xc08ff0acab2999fa,2
+np.float64,0x7fb5b881a82b7102,0x408fe3872b4fde5e,2
+np.float64,0x3fdb2b4a97b65695,0xbff3c715c91359bc,2
+np.float64,0x3fac0a17e4381430,0xc010c330967309fb,2
+np.float64,0x7fd8057990b00af2,0x408ff4b0a287a348,2
+np.float64,0x1f9026a23f206,0xc09004144f3a19dd,2
+np.float64,0x3fdb2977243652ee,0xbff3c8a2fd05803d,2
+np.float64,0x3fe0f6e74b21edcf,0xbfed4c3bb956bae0,2
+np.float64,0xde9cc3bbbd399,0xc08ff19ce5c1e762,2
+np.float64,0x3fe72ce106ae59c2,0xbfddca7ab14ceba2,2
+np.float64,0x3fa8ee14e031dc2a,0xc01170d54ca88e86,2
+np.float64,0x3fe0b09bbb216137,0xbfee0d189a95b877,2
+np.float64,0x7fdfdcb157bfb962,0x408ff7f33cf2afea,2
+np.float64,0x3fef84d5f53f09ac,0xbf966134e2a154f4,2
+np.float64,0x3fea0e0b1bb41c16,0xbfd2fa2d36637d19,2
+np.float64,0x1ab76fd6356ef,0xc090050a9616ffbd,2
+np.float64,0x7fd0ccf79a2199ee,0x408ff09045af2dee,2
+np.float64,0x7fea929345f52526,0x408ffddadc322b07,2
+np.float64,0x3fe9ef629cf3dec5,0xbfd367129c166838,2
+np.float64,0x3feedf0ea2fdbe1d,0xbfaa862afca44c00,2
+np.float64,0x7fce725f723ce4be,0x408fef6cfd2769a8,2
+np.float64,0x7fe4313b3ca86275,0x408ffaaf9557ef8c,2
+np.float64,0xe2d46463c5a8d,0xc08ff165725c6b08,2
+np.float64,0x7fbacb4ace359695,0x408fe5f3647bd0d5,2
+np.float64,0x3fbafd009635fa01,0xc009f745a7a5c5d5,2
+np.float64,0x3fe3cea66ce79d4d,0xbfe6253b895e2838,2
+np.float64,0x7feaa71484354e28,0x408ffde3c0bad2a6,2
+np.float64,0x3fd755b8b42eab71,0xbff74a1444c6e654,2
+np.float64,0x3fc313e2172627c4,0xc005f830e77940c3,2
+np.float64,0x12d699a225ad4,0xc090070ec00f2338,2
+np.float64,0x3fa975fe8432ebfd,0xc01151b3da48b3f9,2
+np.float64,0x7fdce3103b39c61f,0x408ff6d19b3326fa,2
+np.float64,0x7fd341cbba268396,0x408ff2237490fdca,2
+np.float64,0x3fd8405885b080b1,0xbff6666d8802a7d5,2
+np.float64,0x3fe0f0cca3a1e199,0xbfed5cdb3e600791,2
+np.float64,0x7fbd56680c3aaccf,0x408fe6ff55bf378d,2
+np.float64,0x3f939c4f3027389e,0xc016d364dd6313fb,2
+np.float64,0x3fe9e87fac73d0ff,0xbfd37f9a2be4fe38,2
+np.float64,0x7fc93c6a883278d4,0x408fed4260e614f1,2
+np.float64,0x7fa88c0ff031181f,0x408fdcf09a46bd3a,2
+np.float64,0xd5487f99aa910,0xc08ff21b6390ab3b,2
+np.float64,0x3fe34acc96e69599,0xbfe75c9d290428fb,2
+np.float64,0x3fd17f5964a2feb3,0xbffdef50b524137b,2
+np.float64,0xe23dec0dc47be,0xc08ff16d1ce61dcb,2
+np.float64,0x3fec8bd64fb917ad,0xbfc5173941614b8f,2
+np.float64,0x3fc81d97d7303b30,0xc00343ccb791401d,2
+np.float64,0x7fe79ad18e2f35a2,0x408ffc7cf0ab0f2a,2
+np.float64,0x3f96306b402c60d7,0xc0161ce54754cac1,2
+np.float64,0xfb09fc97f6140,0xc08ff039d1d30123,2
+np.float64,0x3fec9c4afa793896,0xbfc4ace43ee46079,2
+np.float64,0x3f9262dac824c5b6,0xc01732a3a7eeb598,2
+np.float64,0x3fa5cd33f42b9a68,0xc01236ed4d315a3a,2
+np.float64,0x3fe7bb336caf7667,0xbfdb9a268a82e267,2
+np.float64,0xc6c338f98d867,0xc08ff2ebb8475bbc,2
+np.float64,0x3fd50714482a0e29,0xbff9b14a9f84f2c2,2
+np.float64,0xfff0000000000000,0x7ff8000000000000,2
+np.float64,0x3fde2cd0f93c59a2,0xbff15afe35a43a37,2
+np.float64,0xf1719cb9e2e34,0xc08ff0acf77b06d3,2
+np.float64,0xfd3caaf9fa796,0xc08ff020101771bd,2
+np.float64,0x7f750d63a02a1ac6,0x408fc32ad0caa362,2
+np.float64,0x7fcc50f4e238a1e9,0x408fee96a5622f1a,2
+np.float64,0x421d1da0843a4,0xc08fff9ffe62d869,2
+np.float64,0x3fd9e17023b3c2e0,0xbff4e631d687ee8e,2
+np.float64,0x3fe4999a09693334,0xbfe4556b3734c215,2
+np.float64,0xd619ef03ac33e,0xc08ff21013c85529,2
+np.float64,0x3fc4da522229b4a4,0xc004f150b2c573aa,2
+np.float64,0x3feb04b053b60961,0xbfcf3fc9e00ebc40,2
+np.float64,0x3fbedec5ea3dbd8c,0xc0086a33dc22fab5,2
+np.float64,0x7fec3b217ab87642,0x408ffe8dbc8ca041,2
+np.float64,0xdb257d33b64b0,0xc08ff1cb42d3c182,2
+np.float64,0x7fa2d92ec025b25d,0x408fd9e414d11cb0,2
+np.float64,0x3fa425c550284b8b,0xc012ab7cbf83be12,2
+np.float64,0x10b4869021692,0xc09007c0487d648a,2
+np.float64,0x7f97918c902f2318,0x408fd47867806574,2
+np.float64,0x3fe4f91238e9f224,0xbfe38160b4e99919,2
+np.float64,0x3fc2b1af6125635f,0xc00634343bc58461,2
+np.float64,0x3fc2a98071255301,0xc0063942bc8301be,2
+np.float64,0x3fe4cfc585299f8b,0xbfe3dca39f114f34,2
+np.float64,0x3fd1ea75b3a3d4eb,0xbffd63acd02c5406,2
+np.float64,0x3fd6bf48492d7e91,0xbff7e0cd249f80f9,2
+np.float64,0x76643d36ecc88,0xc08ff8e68f13b38c,2
+np.float64,0x7feeabab3e7d5755,0x408fff82a0fd4501,2
+np.float64,0x46c0d4a68d81b,0xc08ffed79abaddc9,2
+np.float64,0x3fd088d57ca111ab,0xbfff3dd0ed7128ea,2
+np.float64,0x3fed25887cba4b11,0xbfc13f47639bd645,2
+np.float64,0x7fd90984b4b21308,0x408ff52b022c7fb4,2
+np.float64,0x3fe6ef31daadde64,0xbfdec185760cbf21,2
+np.float64,0x3fe48dbe83291b7d,0xbfe47005b99920bd,2
+np.float64,0x3fdce8422f39d084,0xbff258a33a96cc8e,2
+np.float64,0xb8ecdef771d9c,0xc08ff3c0eca61b10,2
+np.float64,0x3fe9bbf9a03377f3,0xbfd41ecfdcc336b9,2
+np.float64,0x7fe2565339a4aca5,0x408ff992d8851eaf,2
+np.float64,0x3fe1693e3822d27c,0xbfec1919da2ca697,2
+np.float64,0x3fd3680488a6d009,0xbffb8b7330275947,2
+np.float64,0x7fbe4f3d2c3c9e79,0x408fe75fa3f4e600,2
+np.float64,0x7fd4cfef3ca99fdd,0x408ff308ee3ab50f,2
+np.float64,0x3fd9c9a51cb3934a,0xbff4fb7440055ce6,2
+np.float64,0x3fe08a9640a1152d,0xbfee76bd1bfbf5c2,2
+np.float64,0x3fef012c41fe0259,0xbfa757a2da7f9707,2
+np.float64,0x3fee653fe2fcca80,0xbfb2ffae0c95025c,2
+np.float64,0x7fd0776933a0eed1,0x408ff054e7b43d41,2
+np.float64,0x4c94e5c09929d,0xc08ffdedb7f49e5e,2
+np.float64,0xca3e3d17947c8,0xc08ff2b86dce2f7a,2
+np.float64,0x3fb528e1342a51c2,0xc00cc626c8e2d9ba,2
+np.float64,0xd774df81aee9c,0xc08ff1fd6f0a7548,2
+np.float64,0x3fc47a9b6128f537,0xc00526c577b80849,2
+np.float64,0x3fe29a6f6a6534df,0xbfe90a5f83644911,2
+np.float64,0x3fecda4f59f9b49f,0xbfc31e4a80c4cbb6,2
+np.float64,0x7fe51d44f5aa3a89,0x408ffb3382437426,2
+np.float64,0x3fd677fc412ceff9,0xbff82999086977e7,2
+np.float64,0x3fe2a3c7e7254790,0xbfe8f33415cdba9d,2
+np.float64,0x3fe6d8d1dc6db1a4,0xbfdf1bc61bc24dff,2
+np.float64,0x7febb32d8ef7665a,0x408ffe55a043ded1,2
+np.float64,0x60677860c0d0,0xc0900da2caa7d571,2
+np.float64,0x7390c2e0e7219,0xc08ff92df18bb5d2,2
+np.float64,0x3fca53711b34a6e2,0xc00240b07a9b529b,2
+np.float64,0x7fe7ce6dd8ef9cdb,0x408ffc961164ead9,2
+np.float64,0x7fc0c9de0d2193bb,0x408fe88e245767f6,2
+np.float64,0xc0ee217981dc4,0xc08ff343b77ea770,2
+np.float64,0x72bd4668e57a9,0xc08ff94323fd74fc,2
+np.float64,0x7fd6970e252d2e1b,0x408ff3fb1e2fead2,2
+np.float64,0x7fdcb61040396c20,0x408ff6bf926bc98f,2
+np.float64,0xda4faa25b49f6,0xc08ff1d68b3877f0,2
+np.float64,0x3feb344749f6688f,0xbfcdfba2d66c72c5,2
+np.float64,0x3fe2aa4284e55485,0xbfe8e32ae0683f57,2
+np.float64,0x3f8e8fcfd03d1fa0,0xc01843efb2129908,2
+np.float64,0x8000000000000000,0xfff0000000000000,2
+np.float64,0x3fd8e01155b1c023,0xbff5d0529dae9515,2
+np.float64,0x3fe8033f3370067e,0xbfda837c80b87e7c,2
+np.float64,0x7fc5bf831e2b7f05,0x408feb8ae3b039a0,2
+np.float64,0x3fd8dcdf5331b9bf,0xbff5d349e1ed422a,2
+np.float64,0x3fe58b4e302b169c,0xbfe243c9cbccde44,2
+np.float64,0x3fea8a2e47b5145d,0xbfd1464e37221894,2
+np.float64,0x75cd1e88eb9a4,0xc08ff8f553ef0475,2
+np.float64,0x7fcfc876e23f90ed,0x408fefebe6cc95e6,2
+np.float64,0x7f51aceb002359d5,0x408fb1263f9003fb,2
+np.float64,0x7fc2a1b877254370,0x408fe9c1ec52f8b9,2
+np.float64,0x7fd495810e292b01,0x408ff2e859414d31,2
+np.float64,0x7fd72048632e4090,0x408ff440690cebdb,2
+np.float64,0x7fd7aafaffaf6,0xc08ff803a390779f,2
+np.float64,0x7fe18067d4a300cf,0x408ff9090a02693f,2
+np.float64,0x3fdc1080f8b82102,0xbff3077bf44a89bd,2
+np.float64,0x3fc34a462f26948c,0xc005d777b3cdf139,2
+np.float64,0x3fe21e4a1fe43c94,0xbfea428acfbc6ea9,2
+np.float64,0x1f0d79083e1b0,0xc090042c65a7abf2,2
+np.float64,0x3fe8d0d15931a1a3,0xbfd779f6bbd4db78,2
+np.float64,0x3fe74578022e8af0,0xbfdd68b6c15e9f5e,2
+np.float64,0x50995dd0a132c,0xc08ffd56a5c8accf,2
+np.float64,0x3f9a6342b034c685,0xc0151ce1973c62bd,2
+np.float64,0x3f30856a00210ad4,0xc027e852f4d1fcbc,2
+np.float64,0x3febcf7646b79eed,0xbfc9e9cc9d12425c,2
+np.float64,0x8010000000000000,0x7ff8000000000000,2
+np.float64,0x3fdf520c02bea418,0xbff07ed5013f3062,2
+np.float64,0x3fe5433ecbea867e,0xbfe2df38968b6d14,2
+np.float64,0x3fb933a84e326751,0xc00ac1a144ad26c5,2
+np.float64,0x7b6d72c2f6daf,0xc08ff86b7a67f962,2
+np.float64,0xaef5dae75debc,0xc08ff46496bb2932,2
+np.float64,0x522d869aa45b1,0xc08ffd1d55281e98,2
+np.float64,0xa2462b05448c6,0xc08ff542fe0ac5fd,2
+np.float64,0x3fe2b71dd6e56e3c,0xbfe8c3690cf15415,2
+np.float64,0x3fe5778231aaef04,0xbfe26e495d09b783,2
+np.float64,0x3fe9b8d564f371ab,0xbfd42a161132970d,2
+np.float64,0x3f89ebc34033d787,0xc019373f90bfc7f1,2
+np.float64,0x3fe438ddc6e871bc,0xbfe53039341b0a93,2
+np.float64,0x873c75250e78f,0xc08ff75d8478dccd,2
+np.float64,0x807134cb00e27,0xc08ff7f5cf59c57a,2
+np.float64,0x3fac459878388b31,0xc010b6fe803bcdc2,2
+np.float64,0xca9dc7eb953b9,0xc08ff2b2fb480784,2
+np.float64,0x7feb38587bb670b0,0x408ffe21ff6d521e,2
+np.float64,0x7fd70e9b782e1d36,0x408ff437936b393a,2
+np.float64,0x3fa4037bbc2806f7,0xc012b55744c65ab2,2
+np.float64,0x3fd3d4637427a8c7,0xbffb0beebf4311ef,2
+np.float64,0x7fdabbda5db577b4,0x408ff5ecbc0d4428,2
+np.float64,0x7fda9be0a2b537c0,0x408ff5dee5d03d5a,2
+np.float64,0x7fe9c74396338e86,0x408ffd813506a18a,2
+np.float64,0x3fd058243e20b048,0xbfff822ffd8a7f21,2
+np.float64,0x3fe6aa6ca9ed54d9,0xbfdfd805629ff49e,2
+np.float64,0x3fd91431d5322864,0xbff5a025eea8c78b,2
+np.float64,0x7fe4d7f02329afdf,0x408ffb0d5d9b7878,2
+np.float64,0x3fe2954a12252a94,0xbfe917266e3e22d5,2
+np.float64,0x3fb25f7c8224bef9,0xc00e6764c81b3718,2
+np.float64,0x3fda4bddeeb497bc,0xbff4880638908c81,2
+np.float64,0x55dfd12eabbfb,0xc08ffc9b54ff4002,2
+np.float64,0x3fe8f399e031e734,0xbfd6f8e5c4dcd93f,2
+np.float64,0x3fd954a24832a945,0xbff56521f4707a06,2
+np.float64,0x3fdea911f2bd5224,0xbff0fcb2d0c2b2e2,2
+np.float64,0x3fe6b4ff8a2d69ff,0xbfdfacfc85cafeab,2
+np.float64,0x3fc7fa02042ff404,0xc00354e13b0767ad,2
+np.float64,0x3fe955088c72aa11,0xbfd593130f29949e,2
+np.float64,0xd7e74ec1afcea,0xc08ff1f74f61721c,2
+np.float64,0x3fe9d69c1ab3ad38,0xbfd3bf710a337e06,2
+np.float64,0x3fd85669a2b0acd3,0xbff65176143ccc1e,2
+np.float64,0x3fea99b285353365,0xbfd11062744783f2,2
+np.float64,0x3fe2c79f80a58f3f,0xbfe89ac33f990289,2
+np.float64,0x3f8332ba30266574,0xc01af2cb7b635783,2
+np.float64,0x30d0150061a1,0xc090119030f74c5d,2
+np.float64,0x3fdbf4cb06b7e996,0xbff31e5207aaa754,2
+np.float64,0x3fe6b56c216d6ad8,0xbfdfab42fb2941c5,2
+np.float64,0x7fc4dc239829b846,0x408feb0fb0e13fbe,2
+np.float64,0x3fd0ab85ef21570c,0xbfff0d95d6c7a35c,2
+np.float64,0x7fe13d75e5e27aeb,0x408ff8dc8efa476b,2
+np.float64,0x3fece3b832f9c770,0xbfc2e21b165d583f,2
+np.float64,0x3fe3a279c4e744f4,0xbfe68ca4fbb55dbf,2
+np.float64,0x3feb64659ef6c8cb,0xbfccb6204b6bf724,2
+np.float64,0x2279a6bc44f36,0xc0900391eeeb3e7c,2
+np.float64,0xb88046d571009,0xc08ff3c7b5b45300,2
+np.float64,0x7ff4000000000000,0x7ffc000000000000,2
+np.float64,0x3fe49af059a935e1,0xbfe4526c294f248f,2
+np.float64,0xa3e5508147cc,0xc0900a92ce5924b1,2
+np.float64,0x7fc56def3d2adbdd,0x408feb5f46c360e8,2
+np.float64,0x7fd99f3574333e6a,0x408ff56f3807987c,2
+np.float64,0x3fdc38d56fb871ab,0xbff2e667cad8f36a,2
+np.float64,0xd0b03507a1607,0xc08ff25bbcf8aa9d,2
+np.float64,0xc493f9078927f,0xc08ff30c5fa4e759,2
+np.float64,0x3fc86ddbcb30dbb8,0xc0031da1fcb56d75,2
+np.float64,0x7fe75dc395aebb86,0x408ffc5eef841491,2
+np.float64,0x1647618a2c8ed,0xc0900616ef9479c1,2
+np.float64,0xdf144763be289,0xc08ff196b527f3c9,2
+np.float64,0x3fe0b29da6a1653b,0xbfee078b5f4d7744,2
+np.float64,0x3feb055852b60ab1,0xbfcf3b4db5779a7a,2
+np.float64,0x3fe8bc1625f1782c,0xbfd7c739ade904bc,2
+np.float64,0x7fd19bfb8ea337f6,0x408ff11b2b55699c,2
+np.float64,0x3fed1d80d1ba3b02,0xbfc1722e8d3ce094,2
+np.float64,0x2d9c65925b38e,0xc09001f46bcd3bc5,2
+np.float64,0x7fed6f4d857ade9a,0x408fff091cf6a3b4,2
+np.float64,0x3fd070cd6ba0e19b,0xbfff5f7609ca29e8,2
+np.float64,0x7fea3508b8f46a10,0x408ffdb1f30bd6be,2
+np.float64,0x508b897ca1172,0xc08ffd58a0eb3583,2
+np.float64,0x7feba367b07746ce,0x408ffe4f0bf4bd4e,2
+np.float64,0x3fefebd5c4bfd7ac,0xbf6d20b4fcf21b69,2
+np.float64,0x3fd8ef07b8b1de0f,0xbff5c2745c0795a5,2
+np.float64,0x3fd38ed518271daa,0xbffb5d75f00f6900,2
+np.float64,0x6de0fecedbc20,0xc08ff9c307bbc647,2
+np.float64,0xafc0ffc35f820,0xc08ff45737e5d6b4,2
+np.float64,0x7fd282097ca50412,0x408ff1ae3b27bf3b,2
+np.float64,0x3fe2f2d50b65e5aa,0xbfe831042e6a1e99,2
+np.float64,0x3faa437bac3486f7,0xc01123d8d962205a,2
+np.float64,0x3feea54434fd4a88,0xbfaff202cc456647,2
+np.float64,0x3fc9e65b8633ccb7,0xc00270e77ffd19da,2
+np.float64,0x7fee15af61fc2b5e,0x408fff49a49154a3,2
+np.float64,0x7fefe670a73fcce0,0x408ffff6c44c1005,2
+np.float64,0x3fc0832d0f21065a,0xc007a2dc2f25384a,2
+np.float64,0x3fecfc96bcb9f92d,0xbfc24367c3912620,2
+np.float64,0x3feb705682b6e0ad,0xbfcc65b1bb16f9c5,2
+np.float64,0x3fe185c4f9630b8a,0xbfebcdb401af67a4,2
+np.float64,0x3fb0a5a9f6214b54,0xc00f8ada2566a047,2
+np.float64,0x7fe2908cdda52119,0x408ff9b744861fb1,2
+np.float64,0x7fee776e183ceedb,0x408fff6ee7c2f86e,2
+np.float64,0x3fce1d608f3c3ac1,0xc000b3685d006474,2
+np.float64,0x7fecf92aa339f254,0x408ffeda6c998267,2
+np.float64,0xce13cb519c27a,0xc08ff280f02882a9,2
+np.float64,0x1,0xc090c80000000000,2
+np.float64,0x3fe485a8afa90b51,0xbfe4823265d5a50a,2
+np.float64,0x3feea60908bd4c12,0xbfafdf7ad7fe203f,2
+np.float64,0x3fd2253033a44a60,0xbffd187d0ec8d5b9,2
+np.float64,0x435338fc86a68,0xc08fff6a591059dd,2
+np.float64,0x7fce8763a73d0ec6,0x408fef74f1e715ff,2
+np.float64,0x3fbe5ddb783cbbb7,0xc0089acc5afa794b,2
+np.float64,0x7fe4cf19ada99e32,0x408ffb0877ca302b,2
+np.float64,0x3fe94c9ea1b2993d,0xbfd5b1c2e867b911,2
+np.float64,0x3fe75541c72eaa84,0xbfdd2a27aa117699,2
+np.float64,0x8000000000000001,0x7ff8000000000000,2
+np.float64,0x7fdbec7f2c37d8fd,0x408ff66d69a7f818,2
+np.float64,0x8ef10d091de22,0xc08ff6b9ca5094f8,2
+np.float64,0x3fea69025b74d205,0xbfd1b9fe2c252c70,2
+np.float64,0x562376d0ac46f,0xc08ffc924111cd31,2
+np.float64,0x8e8097ab1d013,0xc08ff6c2e2706f67,2
+np.float64,0x3fca6803ed34d008,0xc00237aef808825b,2
+np.float64,0x7fe8fe9067b1fd20,0x408ffd25f459a7d1,2
+np.float64,0x3f918e8c7f233,0xc0900009fe011d54,2
+np.float64,0x3fdfe773833fcee7,0xbff011bc1af87bb9,2
+np.float64,0xefffef6fdfffe,0xc08ff0beb0f09eb0,2
+np.float64,0x7fe64610282c8c1f,0x408ffbd17209db18,2
+np.float64,0xe66be8c1ccd7d,0xc08ff13706c056e1,2
+np.float64,0x2837e570506fd,0xc09002ae4dae0c1a,2
+np.float64,0x3febe3a081f7c741,0xbfc964171f2a5a47,2
+np.float64,0x3fe21ed09a243da1,0xbfea41342d29c3ff,2
+np.float64,0x3fe1596c8162b2d9,0xbfec431eee30823a,2
+np.float64,0x8f2b9a131e574,0xc08ff6b51104ed4e,2
+np.float64,0x3fe88ed179711da3,0xbfd870d08a4a4b0c,2
+np.float64,0x34159bc2682b4,0xc09001305a885f94,2
+np.float64,0x1ed31e543da65,0xc0900437481577f8,2
+np.float64,0x3feafbe9de75f7d4,0xbfcf7bcdbacf1c61,2
+np.float64,0xfb16fb27f62e0,0xc08ff03938e682a2,2
+np.float64,0x3fe5cd5ba7eb9ab7,0xbfe1b7165771af3c,2
+np.float64,0x7fe72905e76e520b,0x408ffc44c4e7e80c,2
+np.float64,0x7fb7136e2e2e26db,0x408fe439fd383fb7,2
+np.float64,0x8fa585e11f4c,0xc0900b55a08a486b,2
+np.float64,0x7fed985ce47b30b9,0x408fff192b596821,2
+np.float64,0x3feaaf0869755e11,0xbfd0c671571b3764,2
+np.float64,0x3fa40fd4ec281faa,0xc012b1c8dc0b9e5f,2
+np.float64,0x7fda2a70993454e0,0x408ff5ad47b0c68a,2
+np.float64,0x3fe5f7e931abefd2,0xbfe15d52b3605abf,2
+np.float64,0x3fe9fc6d3533f8da,0xbfd338b06a790994,2
+np.float64,0x3fe060649420c0c9,0xbfeeed1756111891,2
+np.float64,0x3fce8435e33d086c,0xc0008c41cea9ed40,2
+np.float64,0x7ff8000000000000,0x7ff8000000000000,2
+np.float64,0x617820aec2f05,0xc08ffb251e9af0f0,2
+np.float64,0x7fcc4ab6ee38956d,0x408fee9419c8f77d,2
+np.float64,0x7fdefda2fc3dfb45,0x408ff7a15063bc05,2
+np.float64,0x7fe5138ccaaa2719,0x408ffb2e30f3a46e,2
+np.float64,0x3fe3817a836702f5,0xbfe6da7c2b25e35a,2
+np.float64,0x3fb8a7dafa314fb6,0xc00b025bc0784ebe,2
+np.float64,0x349dc420693d,0xc09011215825d2c8,2
+np.float64,0x6b0e504ad61cb,0xc08ffa0fee9c5cd6,2
+np.float64,0x273987644e732,0xc09002d34294ed79,2
+np.float64,0x3fc0bd8a6e217b15,0xc0077a5828b4d2f5,2
+np.float64,0x758b48c4eb16a,0xc08ff8fbc8fbe46a,2
+np.float64,0x3fc8a9a52631534a,0xc00301854ec0ef81,2
+np.float64,0x7fe79d29a76f3a52,0x408ffc7e1607a4c1,2
+np.float64,0x3fd7d3ebce2fa7d8,0xbff6ce8a94aebcda,2
+np.float64,0x7fd1cb68a52396d0,0x408ff13a17533b2b,2
+np.float64,0x7fda514a5d34a294,0x408ff5be5e081578,2
+np.float64,0x3fc40b4382281687,0xc0056632c8067228,2
+np.float64,0x7feff1208c3fe240,0x408ffffaa180fa0d,2
+np.float64,0x8f58739f1eb0f,0xc08ff6b17402689d,2
+np.float64,0x1fdbe9a23fb7e,0xc090040685b2d24f,2
+np.float64,0xcb1d0e87963a2,0xc08ff2abbd903b82,2
+np.float64,0x3fc45a6a1a28b4d4,0xc00538f86c4aeaee,2
+np.float64,0x3fe61885b1ac310b,0xbfe118fd2251d2ec,2
+np.float64,0x3fedf584c8fbeb0a,0xbfb8572433ff67a9,2
+np.float64,0x7fb0bddd1a217bb9,0x408fe085e0d621db,2
+np.float64,0x72d8d3e0e5b3,0xc0900ca02f68c7a1,2
+np.float64,0x5cca6ff6b994f,0xc08ffbb6751fda01,2
+np.float64,0x7fe3197839a632ef,0x408ffa0b2fccfb68,2
+np.float64,0x3fcce4d9c139c9b4,0xc0012dae05baa91b,2
+np.float64,0x3fe76d00f62eda02,0xbfdccc5f12799be1,2
+np.float64,0x3fc53c22f72a7846,0xc004bbaa9cbc7958,2
+np.float64,0x7fdda02f1ebb405d,0x408ff71c37c71659,2
+np.float64,0x3fe0844eaba1089d,0xbfee884722762583,2
+np.float64,0x3febb438dc776872,0xbfca9f05e1c691f1,2
+np.float64,0x3fdf4170cdbe82e2,0xbff08b1561c8d848,2
+np.float64,0x3fce1b8d6f3c371b,0xc000b41b69507671,2
+np.float64,0x8370e60706e1d,0xc08ff7b19ea0b4ca,2
+np.float64,0x7fa5bf92382b7f23,0x408fdb8aebb3df87,2
+np.float64,0x7fe4a59979a94b32,0x408ffaf15c1358cd,2
+np.float64,0x3faa66086034cc11,0xc0111c466b7835d6,2
+np.float64,0x7fb7a958262f52af,0x408fe48408b1e093,2
+np.float64,0x3fdaacc5f635598c,0xbff43390d06b5614,2
+np.float64,0x3fd2825b9e2504b7,0xbffca3234264f109,2
+np.float64,0x3fcede160a3dbc2c,0xc0006a759e29060c,2
+np.float64,0x7fd3b19603a7632b,0x408ff265b528371c,2
+np.float64,0x7fcf8a86ea3f150d,0x408fefd552e7f3b2,2
+np.float64,0xedbcc0f7db798,0xc08ff0daad12096b,2
+np.float64,0xf1e1683de3c2d,0xc08ff0a7a0a37e00,2
+np.float64,0xb6ebd9bf6dd7b,0xc08ff3e11e28378d,2
+np.float64,0x3fec8090d6f90122,0xbfc56031b72194cc,2
+np.float64,0x3fd3e10e37a7c21c,0xbffafd34a3ebc933,2
+np.float64,0x7fbb1c96aa36392c,0x408fe616347b3342,2
+np.float64,0x3fe2f3996f25e733,0xbfe82f25bc5d1bbd,2
+np.float64,0x7fe8709da870e13a,0x408ffce3ab6ce59a,2
+np.float64,0x7fea3233d1b46467,0x408ffdb0b3bbc6de,2
+np.float64,0x65fa4112cbf49,0xc08ffa9f85eb72b9,2
+np.float64,0x3fca2cae9f34595d,0xc00251bb275afb87,2
+np.float64,0x8135fd9f026c0,0xc08ff7e42e14dce7,2
+np.float64,0x7fe0a6f057e14de0,0x408ff876081a4bfe,2
+np.float64,0x10000000000000,0xc08ff00000000000,2
+np.float64,0x3fe1fd506263faa1,0xbfea96dd8c543b72,2
+np.float64,0xa5532c554aa66,0xc08ff50bf5bfc66d,2
+np.float64,0xc239d00b8473a,0xc08ff32ff0ea3f92,2
+np.float64,0x7fdb5314e336a629,0x408ff62d4ff60d82,2
+np.float64,0x3fe5f506e2abea0e,0xbfe16362a4682120,2
+np.float64,0x3fa20c60202418c0,0xc0134e08d82608b6,2
+np.float64,0x7fe03864b22070c8,0x408ff82866d65e9a,2
+np.float64,0x3fe72cf5656e59eb,0xbfddca298969effa,2
+np.float64,0x5c295386b852b,0xc08ffbca90b136c9,2
+np.float64,0x7fd71e5020ae3c9f,0x408ff43f6d58eb7c,2
+np.float64,0x3fd1905a842320b5,0xbffdd8ecd288159c,2
+np.float64,0x3fe6bddb256d7bb6,0xbfdf88fee1a820bb,2
+np.float64,0xe061b967c0c37,0xc08ff18581951561,2
+np.float64,0x3fe534f65cea69ed,0xbfe2fe45fe7d3040,2
+np.float64,0xdc7dae07b8fb6,0xc08ff1b93074ea76,2
+np.float64,0x3fd0425082a084a1,0xbfffa11838b21633,2
+np.float64,0xba723fc974e48,0xc08ff3a8b8d01c58,2
+np.float64,0x3fce42ffc73c8600,0xc000a5062678406e,2
+np.float64,0x3f2e6d3c7e5ce,0xc090001304cfd1c7,2
+np.float64,0x3fd4b2e5f7a965cc,0xbffa0e6e6bae0a68,2
+np.float64,0x3fe6db1d18edb63a,0xbfdf128158ee92d9,2
+np.float64,0x7fe4e5792f29caf1,0x408ffb14d9dbf133,2
+np.float64,0x3fc11cdf992239bf,0xc00739569619cd77,2
+np.float64,0x3fc05ea11220bd42,0xc007bc841b48a890,2
+np.float64,0x4bd592d497ab3,0xc08ffe0ab1c962e2,2
+np.float64,0x280068fc5000e,0xc09002b64955e865,2
+np.float64,0x7fe2f2637065e4c6,0x408ff9f379c1253a,2
+np.float64,0x3fefc38467ff8709,0xbf85e53e64b9a424,2
+np.float64,0x2d78ec5a5af1e,0xc09001f8ea8601e0,2
+np.float64,0x7feeef2b957dde56,0x408fff9bebe995f7,2
+np.float64,0x2639baf44c738,0xc09002f9618d623b,2
+np.float64,0x3fc562964d2ac52d,0xc004a6d76959ef78,2
+np.float64,0x3fe21b071fe4360e,0xbfea4adb2cd96ade,2
+np.float64,0x7fe56aa6802ad54c,0x408ffb5d81d1a898,2
+np.float64,0x4296b452852d7,0xc08fff8ad7fbcbe1,2
+np.float64,0x7fe3fac4ff27f589,0x408ffa9049eec479,2
+np.float64,0x7fe7a83e6caf507c,0x408ffc837f436604,2
+np.float64,0x3fc4ac5b872958b7,0xc0050add72381ac3,2
+np.float64,0x3fd6d697c02dad30,0xbff7c931a3eefb01,2
+np.float64,0x3f61e391c023c724,0xc021ad91e754f94b,2
+np.float64,0x10817f9c21031,0xc09007d20434d7bc,2
+np.float64,0x3fdb9c4c4cb73899,0xbff367d8615c5ece,2
+np.float64,0x3fe26ead6b64dd5b,0xbfe977771def5989,2
+np.float64,0x3fc43ea5c3287d4c,0xc00548c2163ae631,2
+np.float64,0x3fe05bd8bba0b7b1,0xbfeef9ea0db91abc,2
+np.float64,0x3feac78369358f07,0xbfd071e2b0aeab39,2
+np.float64,0x7fe254922ca4a923,0x408ff991bdd4e5d3,2
+np.float64,0x3fe5a2f5842b45eb,0xbfe21135c9a71666,2
+np.float64,0x3fd5daf98c2bb5f3,0xbff8cd24f7c07003,2
+np.float64,0x3fcb2a1384365427,0xc001e40f0d04299a,2
+np.float64,0x3fe073974360e72f,0xbfeeb7183a9930b7,2
+np.float64,0xcf3440819e688,0xc08ff270d3a71001,2
+np.float64,0x3fd35656cda6acae,0xbffba083fba4939d,2
+np.float64,0x7fe6c59b4ded8b36,0x408ffc12ce725425,2
+np.float64,0x3fba896f943512df,0xc00a291cb6947701,2
+np.float64,0x7fe54917e86a922f,0x408ffb4b5e0fb848,2
+np.float64,0x7fed2a3f51ba547e,0x408ffeede945a948,2
+np.float64,0x3fdc72bd5038e57b,0xbff2b73b7e93e209,2
+np.float64,0x7fefdb3f9f3fb67e,0x408ffff2b702a768,2
+np.float64,0x3fb0184430203088,0xc00fee8c1351763c,2
+np.float64,0x7d6c3668fad87,0xc08ff83c195f2cca,2
+np.float64,0x3fd5aa254aab544b,0xbff900f16365991b,2
+np.float64,0x3f963daab02c7b55,0xc0161974495b1b71,2
+np.float64,0x3fa7a9c5982f538b,0xc011bde0f6052a89,2
+np.float64,0xb3a5a74b674b5,0xc08ff4167bc97c81,2
+np.float64,0x7fad0c14503a1828,0x408fdee1f2d56cd7,2
+np.float64,0x43e0e9d887c1e,0xc08fff522837b13b,2
+np.float64,0x3fe513b20aea2764,0xbfe346ea994100e6,2
+np.float64,0x7fe4e10393e9c206,0x408ffb12630f6a06,2
+np.float64,0x68b286e2d1651,0xc08ffa51c0d795d4,2
+np.float64,0x7fe8de453331bc89,0x408ffd17012b75ac,2
+np.float64,0x1b3d77d4367b0,0xc09004edea60aa36,2
+np.float64,0x3fd351cbc326a398,0xbffba5f0f4d5fdba,2
+np.float64,0x3fd264951b24c92a,0xbffcc8636788b9bf,2
+np.float64,0xd2465761a48cb,0xc08ff2455c9c53e5,2
+np.float64,0x7fe46a0ef028d41d,0x408ffacfe32c6f5d,2
+np.float64,0x3fafd8ac4c3fb159,0xc010071bf33195d0,2
+np.float64,0x902aec5d2055e,0xc08ff6a08e28aabc,2
+np.float64,0x3fcea61bb03d4c37,0xc0007f76e509b657,2
+np.float64,0x7fe8d90f9571b21e,0x408ffd1495f952e7,2
+np.float64,0x7fa650c9442ca192,0x408fdbd6ff22fdd8,2
+np.float64,0x3fe8ecfdf171d9fc,0xbfd7115df40e8580,2
+np.float64,0x7fd4e6fe7f29cdfc,0x408ff315b0dae183,2
+np.float64,0x77df4c52efbea,0xc08ff8c1d5c1df33,2
+np.float64,0xe200b0cfc4016,0xc08ff1703cfb8e79,2
+np.float64,0x3fe230ea7e2461d5,0xbfea132d2385160e,2
+np.float64,0x7fd1f7ced723ef9d,0x408ff156bfbf92a4,2
+np.float64,0x3fea762818f4ec50,0xbfd18c12a88e5f79,2
+np.float64,0x7feea4ba7c7d4974,0x408fff8004164054,2
+np.float64,0x833ec605067d9,0xc08ff7b606383841,2
+np.float64,0x7fd0c2d7fea185af,0x408ff0894f3a0cf4,2
+np.float64,0x3fe1d7d61d23afac,0xbfeaf76fee875d3e,2
+np.float64,0x65adecb0cb5be,0xc08ffaa82cb09d68,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-sin.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-sin.csv
new file mode 100644
index 00000000..3b913ccd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-sin.csv
@@ -0,0 +1,1370 @@
+dtype,input,output,ulperrortol
+## +ve denormals ##
+np.float32,0x004b4716,0x004b4716,2
+np.float32,0x007b2490,0x007b2490,2
+np.float32,0x007c99fa,0x007c99fa,2
+np.float32,0x00734a0c,0x00734a0c,2
+np.float32,0x0070de24,0x0070de24,2
+np.float32,0x007fffff,0x007fffff,2
+np.float32,0x00000001,0x00000001,2
+## -ve denormals ##
+np.float32,0x80495d65,0x80495d65,2
+np.float32,0x806894f6,0x806894f6,2
+np.float32,0x80555a76,0x80555a76,2
+np.float32,0x804e1fb8,0x804e1fb8,2
+np.float32,0x80687de9,0x80687de9,2
+np.float32,0x807fffff,0x807fffff,2
+np.float32,0x80000001,0x80000001,2
+## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ##
+np.float32,0x00000000,0x00000000,2
+np.float32,0x80000000,0x80000000,2
+np.float32,0x00800000,0x00800000,2
+np.float32,0x80800000,0x80800000,2
+## 1.00f ##
+np.float32,0x3f800000,0x3f576aa4,2
+np.float32,0x3f800001,0x3f576aa6,2
+np.float32,0x3f800002,0x3f576aa7,2
+np.float32,0xc090a8b0,0x3f7b4e48,2
+np.float32,0x41ce3184,0x3f192d43,2
+np.float32,0xc1d85848,0xbf7161cb,2
+np.float32,0x402b8820,0x3ee3f29f,2
+np.float32,0x42b4e454,0x3f1d0151,2
+np.float32,0x42a67a60,0x3f7ffa4c,2
+np.float32,0x41d92388,0x3f67beef,2
+np.float32,0x422dd66c,0xbeffb0c1,2
+np.float32,0xc28f5be6,0xbf0bae79,2
+np.float32,0x41ab2674,0x3f0ffe2b,2
+np.float32,0x3f490fdb,0x3f3504f3,2
+np.float32,0xbf490fdb,0xbf3504f3,2
+np.float32,0x3fc90fdb,0x3f800000,2
+np.float32,0xbfc90fdb,0xbf800000,2
+np.float32,0x40490fdb,0xb3bbbd2e,2
+np.float32,0xc0490fdb,0x33bbbd2e,2
+np.float32,0x3fc90fdb,0x3f800000,2
+np.float32,0xbfc90fdb,0xbf800000,2
+np.float32,0x40490fdb,0xb3bbbd2e,2
+np.float32,0xc0490fdb,0x33bbbd2e,2
+np.float32,0x40c90fdb,0x343bbd2e,2
+np.float32,0xc0c90fdb,0xb43bbd2e,2
+np.float32,0x4016cbe4,0x3f3504f3,2
+np.float32,0xc016cbe4,0xbf3504f3,2
+np.float32,0x4096cbe4,0xbf800000,2
+np.float32,0xc096cbe4,0x3f800000,2
+np.float32,0x4116cbe4,0xb2ccde2e,2
+np.float32,0xc116cbe4,0x32ccde2e,2
+np.float32,0x40490fdb,0xb3bbbd2e,2
+np.float32,0xc0490fdb,0x33bbbd2e,2
+np.float32,0x40c90fdb,0x343bbd2e,2
+np.float32,0xc0c90fdb,0xb43bbd2e,2
+np.float32,0x41490fdb,0x34bbbd2e,2
+np.float32,0xc1490fdb,0xb4bbbd2e,2
+np.float32,0x407b53d2,0xbf3504f5,2
+np.float32,0xc07b53d2,0x3f3504f5,2
+np.float32,0x40fb53d2,0x3f800000,2
+np.float32,0xc0fb53d2,0xbf800000,2
+np.float32,0x417b53d2,0xb535563d,2
+np.float32,0xc17b53d2,0x3535563d,2
+np.float32,0x4096cbe4,0xbf800000,2
+np.float32,0xc096cbe4,0x3f800000,2
+np.float32,0x4116cbe4,0xb2ccde2e,2
+np.float32,0xc116cbe4,0x32ccde2e,2
+np.float32,0x4196cbe4,0x334cde2e,2
+np.float32,0xc196cbe4,0xb34cde2e,2
+np.float32,0x40afede0,0xbf3504ef,2
+np.float32,0xc0afede0,0x3f3504ef,2
+np.float32,0x412fede0,0xbf800000,2
+np.float32,0xc12fede0,0x3f800000,2
+np.float32,0x41afede0,0xb5b222c4,2
+np.float32,0xc1afede0,0x35b222c4,2
+np.float32,0x40c90fdb,0x343bbd2e,2
+np.float32,0xc0c90fdb,0xb43bbd2e,2
+np.float32,0x41490fdb,0x34bbbd2e,2
+np.float32,0xc1490fdb,0xb4bbbd2e,2
+np.float32,0x41c90fdb,0x353bbd2e,2
+np.float32,0xc1c90fdb,0xb53bbd2e,2
+np.float32,0x40e231d6,0x3f3504f3,2
+np.float32,0xc0e231d6,0xbf3504f3,2
+np.float32,0x416231d6,0x3f800000,2
+np.float32,0xc16231d6,0xbf800000,2
+np.float32,0x41e231d6,0xb399a6a2,2
+np.float32,0xc1e231d6,0x3399a6a2,2
+np.float32,0x40fb53d2,0x3f800000,2
+np.float32,0xc0fb53d2,0xbf800000,2
+np.float32,0x417b53d2,0xb535563d,2
+np.float32,0xc17b53d2,0x3535563d,2
+np.float32,0x41fb53d2,0x35b5563d,2
+np.float32,0xc1fb53d2,0xb5b5563d,2
+np.float32,0x410a3ae7,0x3f3504eb,2
+np.float32,0xc10a3ae7,0xbf3504eb,2
+np.float32,0x418a3ae7,0xbf800000,2
+np.float32,0xc18a3ae7,0x3f800000,2
+np.float32,0x420a3ae7,0xb6308908,2
+np.float32,0xc20a3ae7,0x36308908,2
+np.float32,0x4116cbe4,0xb2ccde2e,2
+np.float32,0xc116cbe4,0x32ccde2e,2
+np.float32,0x4196cbe4,0x334cde2e,2
+np.float32,0xc196cbe4,0xb34cde2e,2
+np.float32,0x4216cbe4,0x33ccde2e,2
+np.float32,0xc216cbe4,0xb3ccde2e,2
+np.float32,0x41235ce2,0xbf3504f7,2
+np.float32,0xc1235ce2,0x3f3504f7,2
+np.float32,0x41a35ce2,0x3f800000,2
+np.float32,0xc1a35ce2,0xbf800000,2
+np.float32,0x42235ce2,0xb5b889b6,2
+np.float32,0xc2235ce2,0x35b889b6,2
+np.float32,0x412fede0,0xbf800000,2
+np.float32,0xc12fede0,0x3f800000,2
+np.float32,0x41afede0,0xb5b222c4,2
+np.float32,0xc1afede0,0x35b222c4,2
+np.float32,0x422fede0,0x363222c4,2
+np.float32,0xc22fede0,0xb63222c4,2
+np.float32,0x413c7edd,0xbf3504f3,2
+np.float32,0xc13c7edd,0x3f3504f3,2
+np.float32,0x41bc7edd,0xbf800000,2
+np.float32,0xc1bc7edd,0x3f800000,2
+np.float32,0x423c7edd,0xb4000add,2
+np.float32,0xc23c7edd,0x34000add,2
+np.float32,0x41490fdb,0x34bbbd2e,2
+np.float32,0xc1490fdb,0xb4bbbd2e,2
+np.float32,0x41c90fdb,0x353bbd2e,2
+np.float32,0xc1c90fdb,0xb53bbd2e,2
+np.float32,0x42490fdb,0x35bbbd2e,2
+np.float32,0xc2490fdb,0xb5bbbd2e,2
+np.float32,0x4155a0d9,0x3f3504fb,2
+np.float32,0xc155a0d9,0xbf3504fb,2
+np.float32,0x41d5a0d9,0x3f800000,2
+np.float32,0xc1d5a0d9,0xbf800000,2
+np.float32,0x4255a0d9,0xb633bc81,2
+np.float32,0xc255a0d9,0x3633bc81,2
+np.float32,0x416231d6,0x3f800000,2
+np.float32,0xc16231d6,0xbf800000,2
+np.float32,0x41e231d6,0xb399a6a2,2
+np.float32,0xc1e231d6,0x3399a6a2,2
+np.float32,0x426231d6,0x3419a6a2,2
+np.float32,0xc26231d6,0xb419a6a2,2
+np.float32,0x416ec2d4,0x3f3504ef,2
+np.float32,0xc16ec2d4,0xbf3504ef,2
+np.float32,0x41eec2d4,0xbf800000,2
+np.float32,0xc1eec2d4,0x3f800000,2
+np.float32,0x426ec2d4,0xb5bef0a7,2
+np.float32,0xc26ec2d4,0x35bef0a7,2
+np.float32,0x417b53d2,0xb535563d,2
+np.float32,0xc17b53d2,0x3535563d,2
+np.float32,0x41fb53d2,0x35b5563d,2
+np.float32,0xc1fb53d2,0xb5b5563d,2
+np.float32,0x427b53d2,0x3635563d,2
+np.float32,0xc27b53d2,0xb635563d,2
+np.float32,0x4183f268,0xbf3504ff,2
+np.float32,0xc183f268,0x3f3504ff,2
+np.float32,0x4203f268,0x3f800000,2
+np.float32,0xc203f268,0xbf800000,2
+np.float32,0x4283f268,0xb6859a13,2
+np.float32,0xc283f268,0x36859a13,2
+np.float32,0x418a3ae7,0xbf800000,2
+np.float32,0xc18a3ae7,0x3f800000,2
+np.float32,0x420a3ae7,0xb6308908,2
+np.float32,0xc20a3ae7,0x36308908,2
+np.float32,0x428a3ae7,0x36b08908,2
+np.float32,0xc28a3ae7,0xb6b08908,2
+np.float32,0x41908365,0xbf3504f6,2
+np.float32,0xc1908365,0x3f3504f6,2
+np.float32,0x42108365,0xbf800000,2
+np.float32,0xc2108365,0x3f800000,2
+np.float32,0x42908365,0x3592200d,2
+np.float32,0xc2908365,0xb592200d,2
+np.float32,0x4196cbe4,0x334cde2e,2
+np.float32,0xc196cbe4,0xb34cde2e,2
+np.float32,0x4216cbe4,0x33ccde2e,2
+np.float32,0xc216cbe4,0xb3ccde2e,2
+np.float32,0x4296cbe4,0x344cde2e,2
+np.float32,0xc296cbe4,0xb44cde2e,2
+np.float32,0x419d1463,0x3f3504f8,2
+np.float32,0xc19d1463,0xbf3504f8,2
+np.float32,0x421d1463,0x3f800000,2
+np.float32,0xc21d1463,0xbf800000,2
+np.float32,0x429d1463,0xb5c55799,2
+np.float32,0xc29d1463,0x35c55799,2
+np.float32,0x41a35ce2,0x3f800000,2
+np.float32,0xc1a35ce2,0xbf800000,2
+np.float32,0x42235ce2,0xb5b889b6,2
+np.float32,0xc2235ce2,0x35b889b6,2
+np.float32,0x42a35ce2,0x363889b6,2
+np.float32,0xc2a35ce2,0xb63889b6,2
+np.float32,0x41a9a561,0x3f3504e7,2
+np.float32,0xc1a9a561,0xbf3504e7,2
+np.float32,0x4229a561,0xbf800000,2
+np.float32,0xc229a561,0x3f800000,2
+np.float32,0x42a9a561,0xb68733d0,2
+np.float32,0xc2a9a561,0x368733d0,2
+np.float32,0x41afede0,0xb5b222c4,2
+np.float32,0xc1afede0,0x35b222c4,2
+np.float32,0x422fede0,0x363222c4,2
+np.float32,0xc22fede0,0xb63222c4,2
+np.float32,0x42afede0,0x36b222c4,2
+np.float32,0xc2afede0,0xb6b222c4,2
+np.float32,0x41b6365e,0xbf3504f0,2
+np.float32,0xc1b6365e,0x3f3504f0,2
+np.float32,0x4236365e,0x3f800000,2
+np.float32,0xc236365e,0xbf800000,2
+np.float32,0x42b6365e,0x358bb91c,2
+np.float32,0xc2b6365e,0xb58bb91c,2
+np.float32,0x41bc7edd,0xbf800000,2
+np.float32,0xc1bc7edd,0x3f800000,2
+np.float32,0x423c7edd,0xb4000add,2
+np.float32,0xc23c7edd,0x34000add,2
+np.float32,0x42bc7edd,0x34800add,2
+np.float32,0xc2bc7edd,0xb4800add,2
+np.float32,0x41c2c75c,0xbf3504ef,2
+np.float32,0xc1c2c75c,0x3f3504ef,2
+np.float32,0x4242c75c,0xbf800000,2
+np.float32,0xc242c75c,0x3f800000,2
+np.float32,0x42c2c75c,0xb5cbbe8a,2
+np.float32,0xc2c2c75c,0x35cbbe8a,2
+np.float32,0x41c90fdb,0x353bbd2e,2
+np.float32,0xc1c90fdb,0xb53bbd2e,2
+np.float32,0x42490fdb,0x35bbbd2e,2
+np.float32,0xc2490fdb,0xb5bbbd2e,2
+np.float32,0x42c90fdb,0x363bbd2e,2
+np.float32,0xc2c90fdb,0xb63bbd2e,2
+np.float32,0x41cf585a,0x3f3504ff,2
+np.float32,0xc1cf585a,0xbf3504ff,2
+np.float32,0x424f585a,0x3f800000,2
+np.float32,0xc24f585a,0xbf800000,2
+np.float32,0x42cf585a,0xb688cd8c,2
+np.float32,0xc2cf585a,0x3688cd8c,2
+np.float32,0x41d5a0d9,0x3f800000,2
+np.float32,0xc1d5a0d9,0xbf800000,2
+np.float32,0x4255a0d9,0xb633bc81,2
+np.float32,0xc255a0d9,0x3633bc81,2
+np.float32,0x42d5a0d9,0x36b3bc81,2
+np.float32,0xc2d5a0d9,0xb6b3bc81,2
+np.float32,0x41dbe958,0x3f3504e0,2
+np.float32,0xc1dbe958,0xbf3504e0,2
+np.float32,0x425be958,0xbf800000,2
+np.float32,0xc25be958,0x3f800000,2
+np.float32,0x42dbe958,0xb6deab75,2
+np.float32,0xc2dbe958,0x36deab75,2
+np.float32,0x41e231d6,0xb399a6a2,2
+np.float32,0xc1e231d6,0x3399a6a2,2
+np.float32,0x426231d6,0x3419a6a2,2
+np.float32,0xc26231d6,0xb419a6a2,2
+np.float32,0x42e231d6,0x3499a6a2,2
+np.float32,0xc2e231d6,0xb499a6a2,2
+np.float32,0x41e87a55,0xbf3504f8,2
+np.float32,0xc1e87a55,0x3f3504f8,2
+np.float32,0x42687a55,0x3f800000,2
+np.float32,0xc2687a55,0xbf800000,2
+np.float32,0x42e87a55,0xb5d2257b,2
+np.float32,0xc2e87a55,0x35d2257b,2
+np.float32,0x41eec2d4,0xbf800000,2
+np.float32,0xc1eec2d4,0x3f800000,2
+np.float32,0x426ec2d4,0xb5bef0a7,2
+np.float32,0xc26ec2d4,0x35bef0a7,2
+np.float32,0x42eec2d4,0x363ef0a7,2
+np.float32,0xc2eec2d4,0xb63ef0a7,2
+np.float32,0x41f50b53,0xbf3504e7,2
+np.float32,0xc1f50b53,0x3f3504e7,2
+np.float32,0x42750b53,0xbf800000,2
+np.float32,0xc2750b53,0x3f800000,2
+np.float32,0x42f50b53,0xb68a6748,2
+np.float32,0xc2f50b53,0x368a6748,2
+np.float32,0x41fb53d2,0x35b5563d,2
+np.float32,0xc1fb53d2,0xb5b5563d,2
+np.float32,0x427b53d2,0x3635563d,2
+np.float32,0xc27b53d2,0xb635563d,2
+np.float32,0x42fb53d2,0x36b5563d,2
+np.float32,0xc2fb53d2,0xb6b5563d,2
+np.float32,0x4200ce28,0x3f3504f0,2
+np.float32,0xc200ce28,0xbf3504f0,2
+np.float32,0x4280ce28,0x3f800000,2
+np.float32,0xc280ce28,0xbf800000,2
+np.float32,0x4300ce28,0x357dd672,2
+np.float32,0xc300ce28,0xb57dd672,2
+np.float32,0x4203f268,0x3f800000,2
+np.float32,0xc203f268,0xbf800000,2
+np.float32,0x4283f268,0xb6859a13,2
+np.float32,0xc283f268,0x36859a13,2
+np.float32,0x4303f268,0x37059a13,2
+np.float32,0xc303f268,0xb7059a13,2
+np.float32,0x420716a7,0x3f3504ee,2
+np.float32,0xc20716a7,0xbf3504ee,2
+np.float32,0x428716a7,0xbf800000,2
+np.float32,0xc28716a7,0x3f800000,2
+np.float32,0x430716a7,0xb5d88c6d,2
+np.float32,0xc30716a7,0x35d88c6d,2
+np.float32,0x420a3ae7,0xb6308908,2
+np.float32,0xc20a3ae7,0x36308908,2
+np.float32,0x428a3ae7,0x36b08908,2
+np.float32,0xc28a3ae7,0xb6b08908,2
+np.float32,0x430a3ae7,0x37308908,2
+np.float32,0xc30a3ae7,0xb7308908,2
+np.float32,0x420d5f26,0xbf350500,2
+np.float32,0xc20d5f26,0x3f350500,2
+np.float32,0x428d5f26,0x3f800000,2
+np.float32,0xc28d5f26,0xbf800000,2
+np.float32,0x430d5f26,0xb68c0105,2
+np.float32,0xc30d5f26,0x368c0105,2
+np.float32,0x42108365,0xbf800000,2
+np.float32,0xc2108365,0x3f800000,2
+np.float32,0x42908365,0x3592200d,2
+np.float32,0xc2908365,0xb592200d,2
+np.float32,0x43108365,0xb612200d,2
+np.float32,0xc3108365,0x3612200d,2
+np.float32,0x4213a7a5,0xbf3504df,2
+np.float32,0xc213a7a5,0x3f3504df,2
+np.float32,0x4293a7a5,0xbf800000,2
+np.float32,0xc293a7a5,0x3f800000,2
+np.float32,0x4313a7a5,0xb6e1deee,2
+np.float32,0xc313a7a5,0x36e1deee,2
+np.float32,0x4216cbe4,0x33ccde2e,2
+np.float32,0xc216cbe4,0xb3ccde2e,2
+np.float32,0x4296cbe4,0x344cde2e,2
+np.float32,0xc296cbe4,0xb44cde2e,2
+np.float32,0x4316cbe4,0x34ccde2e,2
+np.float32,0xc316cbe4,0xb4ccde2e,2
+np.float32,0x4219f024,0x3f35050f,2
+np.float32,0xc219f024,0xbf35050f,2
+np.float32,0x4299f024,0x3f800000,2
+np.float32,0xc299f024,0xbf800000,2
+np.float32,0x4319f024,0xb71bde6c,2
+np.float32,0xc319f024,0x371bde6c,2
+np.float32,0x421d1463,0x3f800000,2
+np.float32,0xc21d1463,0xbf800000,2
+np.float32,0x429d1463,0xb5c55799,2
+np.float32,0xc29d1463,0x35c55799,2
+np.float32,0x431d1463,0x36455799,2
+np.float32,0xc31d1463,0xb6455799,2
+np.float32,0x422038a3,0x3f3504d0,2
+np.float32,0xc22038a3,0xbf3504d0,2
+np.float32,0x42a038a3,0xbf800000,2
+np.float32,0xc2a038a3,0x3f800000,2
+np.float32,0x432038a3,0xb746cd61,2
+np.float32,0xc32038a3,0x3746cd61,2
+np.float32,0x42235ce2,0xb5b889b6,2
+np.float32,0xc2235ce2,0x35b889b6,2
+np.float32,0x42a35ce2,0x363889b6,2
+np.float32,0xc2a35ce2,0xb63889b6,2
+np.float32,0x43235ce2,0x36b889b6,2
+np.float32,0xc3235ce2,0xb6b889b6,2
+np.float32,0x42268121,0xbf3504f1,2
+np.float32,0xc2268121,0x3f3504f1,2
+np.float32,0x42a68121,0x3f800000,2
+np.float32,0xc2a68121,0xbf800000,2
+np.float32,0x43268121,0x35643aac,2
+np.float32,0xc3268121,0xb5643aac,2
+np.float32,0x4229a561,0xbf800000,2
+np.float32,0xc229a561,0x3f800000,2
+np.float32,0x42a9a561,0xb68733d0,2
+np.float32,0xc2a9a561,0x368733d0,2
+np.float32,0x4329a561,0x370733d0,2
+np.float32,0xc329a561,0xb70733d0,2
+np.float32,0x422cc9a0,0xbf3504ee,2
+np.float32,0xc22cc9a0,0x3f3504ee,2
+np.float32,0x42acc9a0,0xbf800000,2
+np.float32,0xc2acc9a0,0x3f800000,2
+np.float32,0x432cc9a0,0xb5e55a50,2
+np.float32,0xc32cc9a0,0x35e55a50,2
+np.float32,0x422fede0,0x363222c4,2
+np.float32,0xc22fede0,0xb63222c4,2
+np.float32,0x42afede0,0x36b222c4,2
+np.float32,0xc2afede0,0xb6b222c4,2
+np.float32,0x432fede0,0x373222c4,2
+np.float32,0xc32fede0,0xb73222c4,2
+np.float32,0x4233121f,0x3f350500,2
+np.float32,0xc233121f,0xbf350500,2
+np.float32,0x42b3121f,0x3f800000,2
+np.float32,0xc2b3121f,0xbf800000,2
+np.float32,0x4333121f,0xb68f347d,2
+np.float32,0xc333121f,0x368f347d,2
+np.float32,0x4236365e,0x3f800000,2
+np.float32,0xc236365e,0xbf800000,2
+np.float32,0x42b6365e,0x358bb91c,2
+np.float32,0xc2b6365e,0xb58bb91c,2
+np.float32,0x4336365e,0xb60bb91c,2
+np.float32,0xc336365e,0x360bb91c,2
+np.float32,0x42395a9e,0x3f3504df,2
+np.float32,0xc2395a9e,0xbf3504df,2
+np.float32,0x42b95a9e,0xbf800000,2
+np.float32,0xc2b95a9e,0x3f800000,2
+np.float32,0x43395a9e,0xb6e51267,2
+np.float32,0xc3395a9e,0x36e51267,2
+np.float32,0x423c7edd,0xb4000add,2
+np.float32,0xc23c7edd,0x34000add,2
+np.float32,0x42bc7edd,0x34800add,2
+np.float32,0xc2bc7edd,0xb4800add,2
+np.float32,0x433c7edd,0x35000add,2
+np.float32,0xc33c7edd,0xb5000add,2
+np.float32,0x423fa31d,0xbf35050f,2
+np.float32,0xc23fa31d,0x3f35050f,2
+np.float32,0x42bfa31d,0x3f800000,2
+np.float32,0xc2bfa31d,0xbf800000,2
+np.float32,0x433fa31d,0xb71d7828,2
+np.float32,0xc33fa31d,0x371d7828,2
+np.float32,0x4242c75c,0xbf800000,2
+np.float32,0xc242c75c,0x3f800000,2
+np.float32,0x42c2c75c,0xb5cbbe8a,2
+np.float32,0xc2c2c75c,0x35cbbe8a,2
+np.float32,0x4342c75c,0x364bbe8a,2
+np.float32,0xc342c75c,0xb64bbe8a,2
+np.float32,0x4245eb9c,0xbf3504d0,2
+np.float32,0xc245eb9c,0x3f3504d0,2
+np.float32,0x42c5eb9c,0xbf800000,2
+np.float32,0xc2c5eb9c,0x3f800000,2
+np.float32,0x4345eb9c,0xb748671d,2
+np.float32,0xc345eb9c,0x3748671d,2
+np.float32,0x42490fdb,0x35bbbd2e,2
+np.float32,0xc2490fdb,0xb5bbbd2e,2
+np.float32,0x42c90fdb,0x363bbd2e,2
+np.float32,0xc2c90fdb,0xb63bbd2e,2
+np.float32,0x43490fdb,0x36bbbd2e,2
+np.float32,0xc3490fdb,0xb6bbbd2e,2
+np.float32,0x424c341a,0x3f3504f1,2
+np.float32,0xc24c341a,0xbf3504f1,2
+np.float32,0x42cc341a,0x3f800000,2
+np.float32,0xc2cc341a,0xbf800000,2
+np.float32,0x434c341a,0x354a9ee6,2
+np.float32,0xc34c341a,0xb54a9ee6,2
+np.float32,0x424f585a,0x3f800000,2
+np.float32,0xc24f585a,0xbf800000,2
+np.float32,0x42cf585a,0xb688cd8c,2
+np.float32,0xc2cf585a,0x3688cd8c,2
+np.float32,0x434f585a,0x3708cd8c,2
+np.float32,0xc34f585a,0xb708cd8c,2
+np.float32,0x42527c99,0x3f3504ee,2
+np.float32,0xc2527c99,0xbf3504ee,2
+np.float32,0x42d27c99,0xbf800000,2
+np.float32,0xc2d27c99,0x3f800000,2
+np.float32,0x43527c99,0xb5f22833,2
+np.float32,0xc3527c99,0x35f22833,2
+np.float32,0x4255a0d9,0xb633bc81,2
+np.float32,0xc255a0d9,0x3633bc81,2
+np.float32,0x42d5a0d9,0x36b3bc81,2
+np.float32,0xc2d5a0d9,0xb6b3bc81,2
+np.float32,0x4355a0d9,0x3733bc81,2
+np.float32,0xc355a0d9,0xb733bc81,2
+np.float32,0x4258c518,0xbf350500,2
+np.float32,0xc258c518,0x3f350500,2
+np.float32,0x42d8c518,0x3f800000,2
+np.float32,0xc2d8c518,0xbf800000,2
+np.float32,0x4358c518,0xb69267f6,2
+np.float32,0xc358c518,0x369267f6,2
+np.float32,0x425be958,0xbf800000,2
+np.float32,0xc25be958,0x3f800000,2
+np.float32,0x42dbe958,0xb6deab75,2
+np.float32,0xc2dbe958,0x36deab75,2
+np.float32,0x435be958,0x375eab75,2
+np.float32,0xc35be958,0xb75eab75,2
+np.float32,0x425f0d97,0xbf3504df,2
+np.float32,0xc25f0d97,0x3f3504df,2
+np.float32,0x42df0d97,0xbf800000,2
+np.float32,0xc2df0d97,0x3f800000,2
+np.float32,0x435f0d97,0xb6e845e0,2
+np.float32,0xc35f0d97,0x36e845e0,2
+np.float32,0x426231d6,0x3419a6a2,2
+np.float32,0xc26231d6,0xb419a6a2,2
+np.float32,0x42e231d6,0x3499a6a2,2
+np.float32,0xc2e231d6,0xb499a6a2,2
+np.float32,0x436231d6,0x3519a6a2,2
+np.float32,0xc36231d6,0xb519a6a2,2
+np.float32,0x42655616,0x3f35050f,2
+np.float32,0xc2655616,0xbf35050f,2
+np.float32,0x42e55616,0x3f800000,2
+np.float32,0xc2e55616,0xbf800000,2
+np.float32,0x43655616,0xb71f11e5,2
+np.float32,0xc3655616,0x371f11e5,2
+np.float32,0x42687a55,0x3f800000,2
+np.float32,0xc2687a55,0xbf800000,2
+np.float32,0x42e87a55,0xb5d2257b,2
+np.float32,0xc2e87a55,0x35d2257b,2
+np.float32,0x43687a55,0x3652257b,2
+np.float32,0xc3687a55,0xb652257b,2
+np.float32,0x426b9e95,0x3f3504cf,2
+np.float32,0xc26b9e95,0xbf3504cf,2
+np.float32,0x42eb9e95,0xbf800000,2
+np.float32,0xc2eb9e95,0x3f800000,2
+np.float32,0x436b9e95,0xb74a00d9,2
+np.float32,0xc36b9e95,0x374a00d9,2
+np.float32,0x426ec2d4,0xb5bef0a7,2
+np.float32,0xc26ec2d4,0x35bef0a7,2
+np.float32,0x42eec2d4,0x363ef0a7,2
+np.float32,0xc2eec2d4,0xb63ef0a7,2
+np.float32,0x436ec2d4,0x36bef0a7,2
+np.float32,0xc36ec2d4,0xb6bef0a7,2
+np.float32,0x4271e713,0xbf3504f1,2
+np.float32,0xc271e713,0x3f3504f1,2
+np.float32,0x42f1e713,0x3f800000,2
+np.float32,0xc2f1e713,0xbf800000,2
+np.float32,0x4371e713,0x35310321,2
+np.float32,0xc371e713,0xb5310321,2
+np.float32,0x42750b53,0xbf800000,2
+np.float32,0xc2750b53,0x3f800000,2
+np.float32,0x42f50b53,0xb68a6748,2
+np.float32,0xc2f50b53,0x368a6748,2
+np.float32,0x43750b53,0x370a6748,2
+np.float32,0xc3750b53,0xb70a6748,2
+np.float32,0x42782f92,0xbf3504ee,2
+np.float32,0xc2782f92,0x3f3504ee,2
+np.float32,0x42f82f92,0xbf800000,2
+np.float32,0xc2f82f92,0x3f800000,2
+np.float32,0x43782f92,0xb5fef616,2
+np.float32,0xc3782f92,0x35fef616,2
+np.float32,0x427b53d2,0x3635563d,2
+np.float32,0xc27b53d2,0xb635563d,2
+np.float32,0x42fb53d2,0x36b5563d,2
+np.float32,0xc2fb53d2,0xb6b5563d,2
+np.float32,0x437b53d2,0x3735563d,2
+np.float32,0xc37b53d2,0xb735563d,2
+np.float32,0x427e7811,0x3f350500,2
+np.float32,0xc27e7811,0xbf350500,2
+np.float32,0x42fe7811,0x3f800000,2
+np.float32,0xc2fe7811,0xbf800000,2
+np.float32,0x437e7811,0xb6959b6f,2
+np.float32,0xc37e7811,0x36959b6f,2
+np.float32,0x4280ce28,0x3f800000,2
+np.float32,0xc280ce28,0xbf800000,2
+np.float32,0x4300ce28,0x357dd672,2
+np.float32,0xc300ce28,0xb57dd672,2
+np.float32,0x4380ce28,0xb5fdd672,2
+np.float32,0xc380ce28,0x35fdd672,2
+np.float32,0x42826048,0x3f3504de,2
+np.float32,0xc2826048,0xbf3504de,2
+np.float32,0x43026048,0xbf800000,2
+np.float32,0xc3026048,0x3f800000,2
+np.float32,0x43826048,0xb6eb7958,2
+np.float32,0xc3826048,0x36eb7958,2
+np.float32,0x4283f268,0xb6859a13,2
+np.float32,0xc283f268,0x36859a13,2
+np.float32,0x4303f268,0x37059a13,2
+np.float32,0xc303f268,0xb7059a13,2
+np.float32,0x4383f268,0x37859a13,2
+np.float32,0xc383f268,0xb7859a13,2
+np.float32,0x42858487,0xbf3504e2,2
+np.float32,0xc2858487,0x3f3504e2,2
+np.float32,0x43058487,0x3f800000,2
+np.float32,0xc3058487,0xbf800000,2
+np.float32,0x43858487,0x36bea8be,2
+np.float32,0xc3858487,0xb6bea8be,2
+np.float32,0x428716a7,0xbf800000,2
+np.float32,0xc28716a7,0x3f800000,2
+np.float32,0x430716a7,0xb5d88c6d,2
+np.float32,0xc30716a7,0x35d88c6d,2
+np.float32,0x438716a7,0x36588c6d,2
+np.float32,0xc38716a7,0xb6588c6d,2
+np.float32,0x4288a8c7,0xbf3504cf,2
+np.float32,0xc288a8c7,0x3f3504cf,2
+np.float32,0x4308a8c7,0xbf800000,2
+np.float32,0xc308a8c7,0x3f800000,2
+np.float32,0x4388a8c7,0xb74b9a96,2
+np.float32,0xc388a8c7,0x374b9a96,2
+np.float32,0x428a3ae7,0x36b08908,2
+np.float32,0xc28a3ae7,0xb6b08908,2
+np.float32,0x430a3ae7,0x37308908,2
+np.float32,0xc30a3ae7,0xb7308908,2
+np.float32,0x438a3ae7,0x37b08908,2
+np.float32,0xc38a3ae7,0xb7b08908,2
+np.float32,0x428bcd06,0x3f3504f2,2
+np.float32,0xc28bcd06,0xbf3504f2,2
+np.float32,0x430bcd06,0x3f800000,2
+np.float32,0xc30bcd06,0xbf800000,2
+np.float32,0x438bcd06,0x3517675b,2
+np.float32,0xc38bcd06,0xb517675b,2
+np.float32,0x428d5f26,0x3f800000,2
+np.float32,0xc28d5f26,0xbf800000,2
+np.float32,0x430d5f26,0xb68c0105,2
+np.float32,0xc30d5f26,0x368c0105,2
+np.float32,0x438d5f26,0x370c0105,2
+np.float32,0xc38d5f26,0xb70c0105,2
+np.float32,0x428ef146,0x3f3504c0,2
+np.float32,0xc28ef146,0xbf3504c0,2
+np.float32,0x430ef146,0xbf800000,2
+np.float32,0xc30ef146,0x3f800000,2
+np.float32,0x438ef146,0xb790bc40,2
+np.float32,0xc38ef146,0x3790bc40,2
+np.float32,0x42908365,0x3592200d,2
+np.float32,0xc2908365,0xb592200d,2
+np.float32,0x43108365,0xb612200d,2
+np.float32,0xc3108365,0x3612200d,2
+np.float32,0x43908365,0xb692200d,2
+np.float32,0xc3908365,0x3692200d,2
+np.float32,0x42921585,0xbf350501,2
+np.float32,0xc2921585,0x3f350501,2
+np.float32,0x43121585,0x3f800000,2
+np.float32,0xc3121585,0xbf800000,2
+np.float32,0x43921585,0xb698cee8,2
+np.float32,0xc3921585,0x3698cee8,2
+np.float32,0x4293a7a5,0xbf800000,2
+np.float32,0xc293a7a5,0x3f800000,2
+np.float32,0x4313a7a5,0xb6e1deee,2
+np.float32,0xc313a7a5,0x36e1deee,2
+np.float32,0x4393a7a5,0x3761deee,2
+np.float32,0xc393a7a5,0xb761deee,2
+np.float32,0x429539c5,0xbf3504b1,2
+np.float32,0xc29539c5,0x3f3504b1,2
+np.float32,0x431539c5,0xbf800000,2
+np.float32,0xc31539c5,0x3f800000,2
+np.float32,0x439539c5,0xb7bbab34,2
+np.float32,0xc39539c5,0x37bbab34,2
+np.float32,0x4296cbe4,0x344cde2e,2
+np.float32,0xc296cbe4,0xb44cde2e,2
+np.float32,0x4316cbe4,0x34ccde2e,2
+np.float32,0xc316cbe4,0xb4ccde2e,2
+np.float32,0x4396cbe4,0x354cde2e,2
+np.float32,0xc396cbe4,0xb54cde2e,2
+np.float32,0x42985e04,0x3f350510,2
+np.float32,0xc2985e04,0xbf350510,2
+np.float32,0x43185e04,0x3f800000,2
+np.float32,0xc3185e04,0xbf800000,2
+np.float32,0x43985e04,0xb722455d,2
+np.float32,0xc3985e04,0x3722455d,2
+np.float32,0x4299f024,0x3f800000,2
+np.float32,0xc299f024,0xbf800000,2
+np.float32,0x4319f024,0xb71bde6c,2
+np.float32,0xc319f024,0x371bde6c,2
+np.float32,0x4399f024,0x379bde6c,2
+np.float32,0xc399f024,0xb79bde6c,2
+np.float32,0x429b8243,0x3f3504fc,2
+np.float32,0xc29b8243,0xbf3504fc,2
+np.float32,0x431b8243,0xbf800000,2
+np.float32,0xc31b8243,0x3f800000,2
+np.float32,0x439b8243,0x364b2eb8,2
+np.float32,0xc39b8243,0xb64b2eb8,2
+np.float32,0x435b2047,0xbf350525,2
+np.float32,0x42a038a2,0xbf800000,2
+np.float32,0x432038a2,0x3664ca7e,2
+np.float32,0x4345eb9b,0x365e638c,2
+np.float32,0x42c5eb9b,0xbf800000,2
+np.float32,0x42eb9e94,0xbf800000,2
+np.float32,0x4350ea79,0x3f800000,2
+np.float32,0x42dbe957,0x3585522a,2
+np.float32,0x425be957,0xbf800000,2
+np.float32,0x435be957,0xb605522a,2
+np.float32,0x476362a2,0xbd7ff911,2
+np.float32,0x464c99a4,0x3e7f4d41,2
+np.float32,0x4471f73d,0x3e7fe1b0,2
+np.float32,0x445a6752,0x3e7ef367,2
+np.float32,0x474fa400,0x3e7f9fcd,2
+np.float32,0x45c1e72f,0xbe7fc7af,2
+np.float32,0x4558c91d,0x3e7e9f31,2
+np.float32,0x43784f94,0xbdff6654,2
+np.float32,0x466e8500,0xbe7ea0a3,2
+np.float32,0x468e1c25,0x3e7e22fb,2
+np.float32,0x44ea6cfc,0x3dff70c3,2
+np.float32,0x4605126c,0x3e7f89ef,2
+np.float32,0x4788b3c6,0xbb87d853,2
+np.float32,0x4531b042,0x3dffd163,2
+np.float32,0x43f1f71d,0x3dfff387,2
+np.float32,0x462c3fa5,0xbd7fe13d,2
+np.float32,0x441c5354,0xbdff76b4,2
+np.float32,0x44908b69,0x3e7dcf0d,2
+np.float32,0x478813ad,0xbe7e9d80,2
+np.float32,0x441c4351,0x3dff937b,2
+np.float64,0x1,0x1,4
+np.float64,0x8000000000000001,0x8000000000000001,4
+np.float64,0x10000000000000,0x10000000000000,4
+np.float64,0x8010000000000000,0x8010000000000000,4
+np.float64,0x7fefffffffffffff,0x3f7452fc98b34e97,4
+np.float64,0xffefffffffffffff,0xbf7452fc98b34e97,4
+np.float64,0x7ff0000000000000,0xfff8000000000000,4
+np.float64,0xfff0000000000000,0xfff8000000000000,4
+np.float64,0x7ff8000000000000,0x7ff8000000000000,4
+np.float64,0x7ff4000000000000,0x7ffc000000000000,4
+np.float64,0xbfda51b226b4a364,0xbfd9956328ff876c,4
+np.float64,0xbfb4a65aee294cb8,0xbfb4a09fd744f8a5,4
+np.float64,0xbfd73b914fae7722,0xbfd6b9cce55af379,4
+np.float64,0xbfd90c12b4b21826,0xbfd869a3867b51c2,4
+np.float64,0x3fe649bb3d6c9376,0x3fe48778d9b48a21,4
+np.float64,0xbfd5944532ab288a,0xbfd52c30e1951b42,4
+np.float64,0x3fb150c45222a190,0x3fb14d633eb8275d,4
+np.float64,0x3fe4a6ffa9e94e00,0x3fe33f8a95c33299,4
+np.float64,0x3fe8d2157171a42a,0x3fe667d904ac95a6,4
+np.float64,0xbfa889f52c3113f0,0xbfa8878d90a23fa5,4
+np.float64,0x3feb3234bef6646a,0x3fe809d541d9017a,4
+np.float64,0x3fc6de266f2dbc50,0x3fc6bf0ee80a0d86,4
+np.float64,0x3fe8455368f08aa6,0x3fe6028254338ed5,4
+np.float64,0xbfe5576079eaaec1,0xbfe3cb4a8f6bc3f5,4
+np.float64,0xbfe9f822ff73f046,0xbfe7360d7d5cb887,4
+np.float64,0xbfb1960e7e232c20,0xbfb1928438258602,4
+np.float64,0xbfca75938d34eb28,0xbfca4570979bf2fa,4
+np.float64,0x3fd767dd15aecfbc,0x3fd6e33039018bab,4
+np.float64,0xbfe987750ef30eea,0xbfe6e7ed30ce77f0,4
+np.float64,0xbfe87f95a1f0ff2b,0xbfe62ca7e928bb2a,4
+np.float64,0xbfd2465301a48ca6,0xbfd2070245775d76,4
+np.float64,0xbfb1306ed22260e0,0xbfb12d2088eaa4f9,4
+np.float64,0xbfd8089010b01120,0xbfd778f9db77f2f3,4
+np.float64,0x3fbf9cf4ee3f39f0,0x3fbf88674fde1ca2,4
+np.float64,0x3fe6d8468a6db08e,0x3fe4f403f38b7bec,4
+np.float64,0xbfd9e5deefb3cbbe,0xbfd932692c722351,4
+np.float64,0x3fd1584d55a2b09c,0x3fd122253eeecc2e,4
+np.float64,0x3fe857979cf0af30,0x3fe60fc12b5ba8db,4
+np.float64,0x3fe3644149e6c882,0x3fe239f47013cfe6,4
+np.float64,0xbfe22ea62be45d4c,0xbfe13834c17d56fe,4
+np.float64,0xbfe8d93e1df1b27c,0xbfe66cf4ee467fd2,4
+np.float64,0xbfe9c497c9f38930,0xbfe7127417da4204,4
+np.float64,0x3fd6791cecacf238,0x3fd6039ccb5a7fde,4
+np.float64,0xbfc1dc1b1523b838,0xbfc1cd48edd9ae19,4
+np.float64,0xbfc92a8491325508,0xbfc901176e0158a5,4
+np.float64,0x3fa8649b3430c940,0x3fa8623e82d9504f,4
+np.float64,0x3fe0bed6a1617dae,0x3fdffbb307fb1abe,4
+np.float64,0x3febdf7765f7beee,0x3fe87ad01a89b74a,4
+np.float64,0xbfd3a56d46a74ada,0xbfd356cf41bf83cd,4
+np.float64,0x3fd321d824a643b0,0x3fd2d93846a224b3,4
+np.float64,0xbfc6a49fb52d4940,0xbfc686704906e7d3,4
+np.float64,0xbfdd4103c9ba8208,0xbfdc3ef0c03615b4,4
+np.float64,0xbfe0b78a51e16f14,0xbfdfef0d9ffc38b5,4
+np.float64,0xbfdac7a908b58f52,0xbfda0158956ceecf,4
+np.float64,0xbfbfbf12f23f7e28,0xbfbfaa428989258c,4
+np.float64,0xbfd55f5aa2aabeb6,0xbfd4fa39de65f33a,4
+np.float64,0x3fe06969abe0d2d4,0x3fdf6744fafdd9cf,4
+np.float64,0x3fe56ab8be6ad572,0x3fe3da7a1986d543,4
+np.float64,0xbfeefbbec67df77e,0xbfea5d426132f4aa,4
+np.float64,0x3fe6e1f49cedc3ea,0x3fe4fb53f3d8e3d5,4
+np.float64,0x3feceb231c79d646,0x3fe923d3efa55414,4
+np.float64,0xbfd03dd08ea07ba2,0xbfd011549aa1998a,4
+np.float64,0xbfd688327aad1064,0xbfd611c61b56adbe,4
+np.float64,0xbfde3249d8bc6494,0xbfdd16a7237a39d5,4
+np.float64,0x3febd4b65677a96c,0x3fe873e1a401ef03,4
+np.float64,0xbfe46bd2b368d7a6,0xbfe31023c2467749,4
+np.float64,0x3fbf9f5cde3f3ec0,0x3fbf8aca8ec53c45,4
+np.float64,0x3fc20374032406e8,0x3fc1f43f1f2f4d5e,4
+np.float64,0xbfec143b16f82876,0xbfe89caa42582381,4
+np.float64,0xbfd14fa635a29f4c,0xbfd119ced11da669,4
+np.float64,0x3fe25236d4e4a46e,0x3fe156242d644b7a,4
+np.float64,0xbfe4ed793469daf2,0xbfe377a88928fd77,4
+np.float64,0xbfb363572626c6b0,0xbfb35e98d8fe87ae,4
+np.float64,0xbfb389d5aa2713a8,0xbfb384fae55565a7,4
+np.float64,0x3fca6e001934dc00,0x3fca3e0661eaca84,4
+np.float64,0x3fe748f3f76e91e8,0x3fe548ab2168aea6,4
+np.float64,0x3fef150efdfe2a1e,0x3fea6b92d74f60d3,4
+np.float64,0xbfd14b52b1a296a6,0xbfd115a387c0fa93,4
+np.float64,0x3fe3286b5ce650d6,0x3fe208a6469a7527,4
+np.float64,0xbfd57b4f4baaf69e,0xbfd514a12a9f7ab0,4
+np.float64,0xbfef14bd467e297b,0xbfea6b64bbfd42ce,4
+np.float64,0xbfe280bc90650179,0xbfe17d2c49955dba,4
+np.float64,0x3fca8759d7350eb0,0x3fca56d5c17bbc14,4
+np.float64,0xbfdf988f30bf311e,0xbfde53f96f69b05f,4
+np.float64,0x3f6b6eeb4036de00,0x3f6b6ee7e3f86f9a,4
+np.float64,0xbfed560be8faac18,0xbfe9656c5cf973d8,4
+np.float64,0x3fc6102c592c2058,0x3fc5f43efad5396d,4
+np.float64,0xbfdef64ed2bdec9e,0xbfddc4b7fbd45aea,4
+np.float64,0x3fe814acd570295a,0x3fe5df183d543bfe,4
+np.float64,0x3fca21313f344260,0x3fc9f2d47f64fbe2,4
+np.float64,0xbfe89932cc713266,0xbfe63f186a2f60ce,4
+np.float64,0x3fe4ffcff169ffa0,0x3fe386336115ee21,4
+np.float64,0x3fee6964087cd2c8,0x3fea093d31e2c2c5,4
+np.float64,0xbfbeea604e3dd4c0,0xbfbed72734852669,4
+np.float64,0xbfea1954fb7432aa,0xbfe74cdad8720032,4
+np.float64,0x3fea3e1a5ef47c34,0x3fe765ffba65a11d,4
+np.float64,0x3fcedb850b3db708,0x3fce8f39d92f00ba,4
+np.float64,0x3fd3b52d41a76a5c,0x3fd365d22b0003f9,4
+np.float64,0xbfa4108a0c282110,0xbfa40f397fcd844f,4
+np.float64,0x3fd7454c57ae8a98,0x3fd6c2e5542c6c83,4
+np.float64,0xbfeecd3c7a7d9a79,0xbfea42ca943a1695,4
+np.float64,0xbfdddda397bbbb48,0xbfdccb27283d4c4c,4
+np.float64,0x3fe6b52cf76d6a5a,0x3fe4d96ff32925ff,4
+np.float64,0xbfa39a75ec2734f0,0xbfa3993c0da84f87,4
+np.float64,0x3fdd3fe6fdba7fcc,0x3fdc3df12fe9e525,4
+np.float64,0xbfb57a98162af530,0xbfb5742525d5fbe2,4
+np.float64,0xbfd3e166cfa7c2ce,0xbfd38ff2891be9b0,4
+np.float64,0x3fdb6a04f9b6d408,0x3fda955e5018e9dc,4
+np.float64,0x3fe4ab03a4e95608,0x3fe342bfa76e1aa8,4
+np.float64,0xbfe6c8480b6d9090,0xbfe4e7eaa935b3f5,4
+np.float64,0xbdd6b5a17bae,0xbdd6b5a17bae,4
+np.float64,0xd6591979acb23,0xd6591979acb23,4
+np.float64,0x5adbed90b5b7e,0x5adbed90b5b7e,4
+np.float64,0xa664c5314cc99,0xa664c5314cc99,4
+np.float64,0x1727fb162e500,0x1727fb162e500,4
+np.float64,0xdb49a93db6935,0xdb49a93db6935,4
+np.float64,0xb10c958d62193,0xb10c958d62193,4
+np.float64,0xad38276f5a705,0xad38276f5a705,4
+np.float64,0x1d5d0b983aba2,0x1d5d0b983aba2,4
+np.float64,0x915f48e122be9,0x915f48e122be9,4
+np.float64,0x475958ae8eb2c,0x475958ae8eb2c,4
+np.float64,0x3af8406675f09,0x3af8406675f09,4
+np.float64,0x655e88a4cabd2,0x655e88a4cabd2,4
+np.float64,0x40fee8ce81fde,0x40fee8ce81fde,4
+np.float64,0xab83103f57062,0xab83103f57062,4
+np.float64,0x7cf934b8f9f27,0x7cf934b8f9f27,4
+np.float64,0x29f7524853eeb,0x29f7524853eeb,4
+np.float64,0x4a5e954894bd3,0x4a5e954894bd3,4
+np.float64,0x24638f3a48c73,0x24638f3a48c73,4
+np.float64,0xa4f32fc749e66,0xa4f32fc749e66,4
+np.float64,0xf8e92df7f1d26,0xf8e92df7f1d26,4
+np.float64,0x292e9d50525d4,0x292e9d50525d4,4
+np.float64,0xe937e897d26fd,0xe937e897d26fd,4
+np.float64,0xd3bde1d5a77bc,0xd3bde1d5a77bc,4
+np.float64,0xa447ffd548900,0xa447ffd548900,4
+np.float64,0xa3b7b691476f7,0xa3b7b691476f7,4
+np.float64,0x490095c892013,0x490095c892013,4
+np.float64,0xfc853235f90a7,0xfc853235f90a7,4
+np.float64,0x5a8bc082b5179,0x5a8bc082b5179,4
+np.float64,0x1baca45a37595,0x1baca45a37595,4
+np.float64,0x2164120842c83,0x2164120842c83,4
+np.float64,0x66692bdeccd26,0x66692bdeccd26,4
+np.float64,0xf205bdd3e40b8,0xf205bdd3e40b8,4
+np.float64,0x7c3fff98f8801,0x7c3fff98f8801,4
+np.float64,0xccdf10e199bf,0xccdf10e199bf,4
+np.float64,0x92db8e8125b8,0x92db8e8125b8,4
+np.float64,0x5789a8d6af136,0x5789a8d6af136,4
+np.float64,0xbdda869d7bb51,0xbdda869d7bb51,4
+np.float64,0xb665e0596ccbc,0xb665e0596ccbc,4
+np.float64,0x74e6b46ee9cd7,0x74e6b46ee9cd7,4
+np.float64,0x4f39cf7c9e73b,0x4f39cf7c9e73b,4
+np.float64,0xfdbf3907fb7e7,0xfdbf3907fb7e7,4
+np.float64,0xafdef4d55fbdf,0xafdef4d55fbdf,4
+np.float64,0xb49858236930b,0xb49858236930b,4
+np.float64,0x3ebe21d47d7c5,0x3ebe21d47d7c5,4
+np.float64,0x5b620512b6c41,0x5b620512b6c41,4
+np.float64,0x31918cda63232,0x31918cda63232,4
+np.float64,0x68b5741ed16af,0x68b5741ed16af,4
+np.float64,0xa5c09a5b4b814,0xa5c09a5b4b814,4
+np.float64,0x55f51c14abea4,0x55f51c14abea4,4
+np.float64,0xda8a3e41b515,0xda8a3e41b515,4
+np.float64,0x9ea9c8513d539,0x9ea9c8513d539,4
+np.float64,0x7f23b964fe478,0x7f23b964fe478,4
+np.float64,0xf6e08c7bedc12,0xf6e08c7bedc12,4
+np.float64,0x7267aa24e4cf6,0x7267aa24e4cf6,4
+np.float64,0x236bb93a46d78,0x236bb93a46d78,4
+np.float64,0x9a98430b35309,0x9a98430b35309,4
+np.float64,0xbb683fef76d08,0xbb683fef76d08,4
+np.float64,0x1ff0eb6e3fe1e,0x1ff0eb6e3fe1e,4
+np.float64,0xf524038fea481,0xf524038fea481,4
+np.float64,0xd714e449ae29d,0xd714e449ae29d,4
+np.float64,0x4154fd7682aa0,0x4154fd7682aa0,4
+np.float64,0x5b8d2f6cb71a7,0x5b8d2f6cb71a7,4
+np.float64,0xc91aa21d92355,0xc91aa21d92355,4
+np.float64,0xbd94fd117b2a0,0xbd94fd117b2a0,4
+np.float64,0x685b207ad0b65,0x685b207ad0b65,4
+np.float64,0xd2485b05a490c,0xd2485b05a490c,4
+np.float64,0x151ea5e62a3d6,0x151ea5e62a3d6,4
+np.float64,0x2635a7164c6b6,0x2635a7164c6b6,4
+np.float64,0x88ae3b5d115c8,0x88ae3b5d115c8,4
+np.float64,0x8a055a55140ac,0x8a055a55140ac,4
+np.float64,0x756f7694eadef,0x756f7694eadef,4
+np.float64,0x866d74630cdaf,0x866d74630cdaf,4
+np.float64,0x39e44f2873c8b,0x39e44f2873c8b,4
+np.float64,0x2a07ceb6540fb,0x2a07ceb6540fb,4
+np.float64,0xc52b96398a573,0xc52b96398a573,4
+np.float64,0x9546543b2a8cb,0x9546543b2a8cb,4
+np.float64,0x5b995b90b732c,0x5b995b90b732c,4
+np.float64,0x2de10a565bc22,0x2de10a565bc22,4
+np.float64,0x3b06ee94760df,0x3b06ee94760df,4
+np.float64,0xb18e77a5631cf,0xb18e77a5631cf,4
+np.float64,0x3b89ae3a77137,0x3b89ae3a77137,4
+np.float64,0xd9b0b6e5b3617,0xd9b0b6e5b3617,4
+np.float64,0x30b2310861647,0x30b2310861647,4
+np.float64,0x326a3ab464d48,0x326a3ab464d48,4
+np.float64,0x4c18610a9830d,0x4c18610a9830d,4
+np.float64,0x541dea42a83be,0x541dea42a83be,4
+np.float64,0xcd027dbf9a050,0xcd027dbf9a050,4
+np.float64,0x780a0f80f015,0x780a0f80f015,4
+np.float64,0x740ed5b2e81db,0x740ed5b2e81db,4
+np.float64,0xc226814d844d0,0xc226814d844d0,4
+np.float64,0xde958541bd2b1,0xde958541bd2b1,4
+np.float64,0xb563d3296ac7b,0xb563d3296ac7b,4
+np.float64,0x1db3b0b83b677,0x1db3b0b83b677,4
+np.float64,0xa7b0275d4f605,0xa7b0275d4f605,4
+np.float64,0x72f8d038e5f1b,0x72f8d038e5f1b,4
+np.float64,0x860ed1350c1da,0x860ed1350c1da,4
+np.float64,0x79f88262f3f11,0x79f88262f3f11,4
+np.float64,0x8817761f102ef,0x8817761f102ef,4
+np.float64,0xac44784b5888f,0xac44784b5888f,4
+np.float64,0x800fd594241fab28,0x800fd594241fab28,4
+np.float64,0x800ede32f8ddbc66,0x800ede32f8ddbc66,4
+np.float64,0x800de4c1121bc982,0x800de4c1121bc982,4
+np.float64,0x80076ebcddcedd7a,0x80076ebcddcedd7a,4
+np.float64,0x800b3fee06567fdc,0x800b3fee06567fdc,4
+np.float64,0x800b444426b68889,0x800b444426b68889,4
+np.float64,0x800b1c037a563807,0x800b1c037a563807,4
+np.float64,0x8001eb88c2a3d712,0x8001eb88c2a3d712,4
+np.float64,0x80058aae6dab155e,0x80058aae6dab155e,4
+np.float64,0x80083df2d4f07be6,0x80083df2d4f07be6,4
+np.float64,0x800e3b19d97c7634,0x800e3b19d97c7634,4
+np.float64,0x800a71c6f374e38e,0x800a71c6f374e38e,4
+np.float64,0x80048557f1490ab1,0x80048557f1490ab1,4
+np.float64,0x8000a00e6b01401e,0x8000a00e6b01401e,4
+np.float64,0x800766a3e2cecd49,0x800766a3e2cecd49,4
+np.float64,0x80015eb44602bd69,0x80015eb44602bd69,4
+np.float64,0x800bde885a77bd11,0x800bde885a77bd11,4
+np.float64,0x800224c53ea4498b,0x800224c53ea4498b,4
+np.float64,0x80048e8c6a291d1a,0x80048e8c6a291d1a,4
+np.float64,0x800b667e4af6ccfd,0x800b667e4af6ccfd,4
+np.float64,0x800ae3d7e395c7b0,0x800ae3d7e395c7b0,4
+np.float64,0x80086c245550d849,0x80086c245550d849,4
+np.float64,0x800d7d25f6fafa4c,0x800d7d25f6fafa4c,4
+np.float64,0x800f8d9ab0ff1b35,0x800f8d9ab0ff1b35,4
+np.float64,0x800690e949cd21d3,0x800690e949cd21d3,4
+np.float64,0x8003022381060448,0x8003022381060448,4
+np.float64,0x80085e0dad70bc1c,0x80085e0dad70bc1c,4
+np.float64,0x800e2ffc369c5ff9,0x800e2ffc369c5ff9,4
+np.float64,0x800b629b5af6c537,0x800b629b5af6c537,4
+np.float64,0x800fdc964b7fb92d,0x800fdc964b7fb92d,4
+np.float64,0x80036bb4b1c6d76a,0x80036bb4b1c6d76a,4
+np.float64,0x800b382f7f16705f,0x800b382f7f16705f,4
+np.float64,0x800ebac9445d7593,0x800ebac9445d7593,4
+np.float64,0x80015075c3e2a0ec,0x80015075c3e2a0ec,4
+np.float64,0x8002a6ec5ce54dd9,0x8002a6ec5ce54dd9,4
+np.float64,0x8009fab74a93f56f,0x8009fab74a93f56f,4
+np.float64,0x800c94b9ea992974,0x800c94b9ea992974,4
+np.float64,0x800dc2efd75b85e0,0x800dc2efd75b85e0,4
+np.float64,0x800be6400d57cc80,0x800be6400d57cc80,4
+np.float64,0x80021f6858443ed1,0x80021f6858443ed1,4
+np.float64,0x800600e2ac4c01c6,0x800600e2ac4c01c6,4
+np.float64,0x800a2159e6b442b4,0x800a2159e6b442b4,4
+np.float64,0x800c912f4bb9225f,0x800c912f4bb9225f,4
+np.float64,0x800a863a9db50c76,0x800a863a9db50c76,4
+np.float64,0x800ac16851d582d1,0x800ac16851d582d1,4
+np.float64,0x8003f7d32e87efa7,0x8003f7d32e87efa7,4
+np.float64,0x800be4eee3d7c9de,0x800be4eee3d7c9de,4
+np.float64,0x80069ff0ac4d3fe2,0x80069ff0ac4d3fe2,4
+np.float64,0x80061c986d4c3932,0x80061c986d4c3932,4
+np.float64,0x8000737b4de0e6f7,0x8000737b4de0e6f7,4
+np.float64,0x8002066ef7440cdf,0x8002066ef7440cdf,4
+np.float64,0x8001007050c200e1,0x8001007050c200e1,4
+np.float64,0x8008df9fa351bf40,0x8008df9fa351bf40,4
+np.float64,0x800f8394ee5f072a,0x800f8394ee5f072a,4
+np.float64,0x80008e0b01c11c17,0x80008e0b01c11c17,4
+np.float64,0x800f7088ed3ee112,0x800f7088ed3ee112,4
+np.float64,0x800285b86f650b72,0x800285b86f650b72,4
+np.float64,0x8008ec18af51d832,0x8008ec18af51d832,4
+np.float64,0x800da08523bb410a,0x800da08523bb410a,4
+np.float64,0x800de853ca7bd0a8,0x800de853ca7bd0a8,4
+np.float64,0x8008c8aefad1915e,0x8008c8aefad1915e,4
+np.float64,0x80010c39d5821874,0x80010c39d5821874,4
+np.float64,0x8009208349724107,0x8009208349724107,4
+np.float64,0x800783783f0f06f1,0x800783783f0f06f1,4
+np.float64,0x80025caf9984b960,0x80025caf9984b960,4
+np.float64,0x800bc76fa6778ee0,0x800bc76fa6778ee0,4
+np.float64,0x80017e2f89a2fc60,0x80017e2f89a2fc60,4
+np.float64,0x800ef169843de2d3,0x800ef169843de2d3,4
+np.float64,0x80098a5f7db314bf,0x80098a5f7db314bf,4
+np.float64,0x800d646f971ac8df,0x800d646f971ac8df,4
+np.float64,0x800110d1dc6221a4,0x800110d1dc6221a4,4
+np.float64,0x800f8b422a1f1684,0x800f8b422a1f1684,4
+np.float64,0x800785c97dcf0b94,0x800785c97dcf0b94,4
+np.float64,0x800da201283b4403,0x800da201283b4403,4
+np.float64,0x800a117cc7b422fa,0x800a117cc7b422fa,4
+np.float64,0x80024731cfa48e64,0x80024731cfa48e64,4
+np.float64,0x800199d456c333a9,0x800199d456c333a9,4
+np.float64,0x8005f66bab8becd8,0x8005f66bab8becd8,4
+np.float64,0x8008e7227c11ce45,0x8008e7227c11ce45,4
+np.float64,0x8007b66cc42f6cda,0x8007b66cc42f6cda,4
+np.float64,0x800669e6f98cd3cf,0x800669e6f98cd3cf,4
+np.float64,0x800aed917375db23,0x800aed917375db23,4
+np.float64,0x8008b6dd15116dbb,0x8008b6dd15116dbb,4
+np.float64,0x800f49869cfe930d,0x800f49869cfe930d,4
+np.float64,0x800a712661b4e24d,0x800a712661b4e24d,4
+np.float64,0x800944e816f289d1,0x800944e816f289d1,4
+np.float64,0x800eba0f8a1d741f,0x800eba0f8a1d741f,4
+np.float64,0x800cf6ded139edbe,0x800cf6ded139edbe,4
+np.float64,0x80023100c6246202,0x80023100c6246202,4
+np.float64,0x800c5a94add8b52a,0x800c5a94add8b52a,4
+np.float64,0x800adf329b95be66,0x800adf329b95be66,4
+np.float64,0x800af9afc115f360,0x800af9afc115f360,4
+np.float64,0x800d66ce837acd9d,0x800d66ce837acd9d,4
+np.float64,0x8003ffb5e507ff6d,0x8003ffb5e507ff6d,4
+np.float64,0x80027d280024fa51,0x80027d280024fa51,4
+np.float64,0x800fc37e1d1f86fc,0x800fc37e1d1f86fc,4
+np.float64,0x800fc7258b9f8e4b,0x800fc7258b9f8e4b,4
+np.float64,0x8003fb5789e7f6b0,0x8003fb5789e7f6b0,4
+np.float64,0x800eb4e7a13d69cf,0x800eb4e7a13d69cf,4
+np.float64,0x800951850952a30a,0x800951850952a30a,4
+np.float64,0x3fed4071be3a80e3,0x3fe95842074431df,4
+np.float64,0x3f8d2341203a4682,0x3f8d2300b453bd9f,4
+np.float64,0x3fdc8ce332b919c6,0x3fdb9cdf1440c28f,4
+np.float64,0x3fdc69bd84b8d37b,0x3fdb7d25c8166b7b,4
+np.float64,0x3fc4c22ad0298456,0x3fc4aae73e231b4f,4
+np.float64,0x3fea237809f446f0,0x3fe753cc6ca96193,4
+np.float64,0x3fd34cf6462699ed,0x3fd30268909bb47e,4
+np.float64,0x3fafce20643f9c41,0x3fafc8e41a240e35,4
+np.float64,0x3fdc6d416538da83,0x3fdb805262292863,4
+np.float64,0x3fe7d8362aefb06c,0x3fe5b2ce659db7fd,4
+np.float64,0x3fe290087de52011,0x3fe189f9a3eb123d,4
+np.float64,0x3fa62d2bf82c5a58,0x3fa62b65958ca2b8,4
+np.float64,0x3fafd134403fa269,0x3fafcbf670f8a6f3,4
+np.float64,0x3fa224e53c2449ca,0x3fa223ec5de1631b,4
+np.float64,0x3fb67e2c2c2cfc58,0x3fb676c445fb70a0,4
+np.float64,0x3fda358d01346b1a,0x3fd97b9441666eb2,4
+np.float64,0x3fdd30fc4bba61f9,0x3fdc308da423778d,4
+np.float64,0x3fc56e99c52add34,0x3fc5550004492621,4
+np.float64,0x3fe32d08de265a12,0x3fe20c761a73cec2,4
+np.float64,0x3fd46cf932a8d9f2,0x3fd414a7f3db03df,4
+np.float64,0x3fd94cfa2b3299f4,0x3fd8a5961b3e4bdd,4
+np.float64,0x3fed6ea3a6fadd47,0x3fe9745b2f6c9204,4
+np.float64,0x3fe4431d1768863a,0x3fe2ef61d0481de0,4
+np.float64,0x3fe1d8e00ea3b1c0,0x3fe0efab5050ee78,4
+np.float64,0x3fe56f37dcaade70,0x3fe3de00b0f392e0,4
+np.float64,0x3fde919a2dbd2334,0x3fdd6b6d2dcf2396,4
+np.float64,0x3fe251e3d4a4a3c8,0x3fe155de69605d60,4
+np.float64,0x3fe5e0ecc5abc1da,0x3fe436a5de5516cf,4
+np.float64,0x3fcd48780c3a90f0,0x3fcd073fa907ba9b,4
+np.float64,0x3fe4e8149229d029,0x3fe37360801d5b66,4
+np.float64,0x3fb9ef159633de2b,0x3fb9e3bc05a15d1d,4
+np.float64,0x3fc24a3f0424947e,0x3fc23a5432ca0e7c,4
+np.float64,0x3fe55ca196aab943,0x3fe3cf6b3143435a,4
+np.float64,0x3fe184544c2308a9,0x3fe0a7b49fa80aec,4
+np.float64,0x3fe2c76e83658edd,0x3fe1b8355c1ea771,4
+np.float64,0x3fea8d2c4ab51a59,0x3fe79ba85aabc099,4
+np.float64,0x3fd74f98abae9f31,0x3fd6cc85005d0593,4
+np.float64,0x3fec6de9a678dbd3,0x3fe8d59a1d23cdd1,4
+np.float64,0x3fec8a0e50f9141d,0x3fe8e7500f6f6a00,4
+np.float64,0x3fe9de6d08b3bcda,0x3fe7245319508767,4
+np.float64,0x3fe4461fd1688c40,0x3fe2f1cf0b93aba6,4
+np.float64,0x3fde342d9d3c685b,0x3fdd185609d5719d,4
+np.float64,0x3feb413fc8368280,0x3fe813c091d2519a,4
+np.float64,0x3fe64333156c8666,0x3fe48275b9a6a358,4
+np.float64,0x3fe03c65226078ca,0x3fdf18b26786be35,4
+np.float64,0x3fee11054dbc220b,0x3fe9d579a1cfa7ad,4
+np.float64,0x3fbaefccae35df99,0x3fbae314fef7c7ea,4
+np.float64,0x3feed4e3487da9c7,0x3fea4729241c8811,4
+np.float64,0x3fbb655df836cabc,0x3fbb57fcf9a097be,4
+np.float64,0x3fe68b0273ed1605,0x3fe4b96109afdf76,4
+np.float64,0x3fd216bfc3242d80,0x3fd1d957363f6a43,4
+np.float64,0x3fe01328d4a02652,0x3fded083bbf94aba,4
+np.float64,0x3fe3f9a61ae7f34c,0x3fe2b3f701b79028,4
+np.float64,0x3fed4e7cf8fa9cfa,0x3fe960d27084fb40,4
+np.float64,0x3faec08e343d811c,0x3faebbd2aa07ac1f,4
+np.float64,0x3fd2d1bbeea5a378,0x3fd28c9aefcf48ad,4
+np.float64,0x3fd92e941fb25d28,0x3fd889857f88410d,4
+np.float64,0x3fe43decb7e87bd9,0x3fe2eb32b4ee4667,4
+np.float64,0x3fef49cabcfe9395,0x3fea892f9a233f76,4
+np.float64,0x3fe3e96812e7d2d0,0x3fe2a6c6b45dd6ee,4
+np.float64,0x3fd24c0293a49805,0x3fd20c76d54473cb,4
+np.float64,0x3fb43d6b7e287ad7,0x3fb438060772795a,4
+np.float64,0x3fe87bf7d3f0f7f0,0x3fe62a0c47411c62,4
+np.float64,0x3fee82a2e07d0546,0x3fea17e27e752b7b,4
+np.float64,0x3fe40c01bbe81803,0x3fe2c2d9483f44d8,4
+np.float64,0x3fd686ccae2d0d99,0x3fd610763fb61097,4
+np.float64,0x3fe90fcf2af21f9e,0x3fe693c12df59ba9,4
+np.float64,0x3fefb3ce11ff679c,0x3feac3dd4787529d,4
+np.float64,0x3fcec53ff63d8a80,0x3fce79992af00c58,4
+np.float64,0x3fe599dd7bab33bb,0x3fe3ff5da7575d85,4
+np.float64,0x3fe9923b1a732476,0x3fe6ef71d13db456,4
+np.float64,0x3febf76fcef7eee0,0x3fe88a3952e11373,4
+np.float64,0x3fc2cfd128259fa2,0x3fc2be7fd47fd811,4
+np.float64,0x3fe4d37ae269a6f6,0x3fe36300d45e3745,4
+np.float64,0x3fe23aa2e4247546,0x3fe1424e172f756f,4
+np.float64,0x3fe4f0596ca9e0b3,0x3fe379f0c49de7ef,4
+np.float64,0x3fe2e4802fe5c900,0x3fe1d062a8812601,4
+np.float64,0x3fe5989c79eb3139,0x3fe3fe6308552dec,4
+np.float64,0x3fe3c53cb4e78a79,0x3fe28956e573aca4,4
+np.float64,0x3fe6512beeeca258,0x3fe48d2d5ece979f,4
+np.float64,0x3fd8473ddb308e7c,0x3fd7b33e38adc6ad,4
+np.float64,0x3fecd09c9679a139,0x3fe91361fa0c5bcb,4
+np.float64,0x3fc991530e3322a6,0x3fc965e2c514a9e9,4
+np.float64,0x3f6d4508403a8a11,0x3f6d45042b68acc5,4
+np.float64,0x3fea1f198f743e33,0x3fe750ce918d9330,4
+np.float64,0x3fd0a0bb4da14177,0x3fd07100f9c71e1c,4
+np.float64,0x3fd30c45ffa6188c,0x3fd2c499f9961f66,4
+np.float64,0x3fcad98e7c35b31d,0x3fcaa74293cbc52e,4
+np.float64,0x3fec8e4a5eb91c95,0x3fe8e9f898d118db,4
+np.float64,0x3fd19fdb79233fb7,0x3fd1670c00febd24,4
+np.float64,0x3fea9fcbb1f53f97,0x3fe7a836b29c4075,4
+np.float64,0x3fc6d12ea12da25d,0x3fc6b24bd2f89f59,4
+np.float64,0x3fd6af3658ad5e6d,0x3fd636613e08df3f,4
+np.float64,0x3fe31bc385a63787,0x3fe1fe3081621213,4
+np.float64,0x3fc0dbba2221b774,0x3fc0cf42c9313dba,4
+np.float64,0x3fef639ce87ec73a,0x3fea9795454f1036,4
+np.float64,0x3fee5f29dcbcbe54,0x3fea0349b288f355,4
+np.float64,0x3fed46bdb37a8d7b,0x3fe95c199f5aa569,4
+np.float64,0x3fef176afa3e2ed6,0x3fea6ce78b2aa3aa,4
+np.float64,0x3fc841e7683083cf,0x3fc81cccb84848cc,4
+np.float64,0xbfda3ec9a2347d94,0xbfd9840d180e9de3,4
+np.float64,0xbfcd5967ae3ab2d0,0xbfcd17be13142bb9,4
+np.float64,0xbfedf816573bf02d,0xbfe9c6bb06476c60,4
+np.float64,0xbfd0d6e10e21adc2,0xbfd0a54f99d2f3dc,4
+np.float64,0xbfe282df096505be,0xbfe17ef5e2e80760,4
+np.float64,0xbfd77ae6e62ef5ce,0xbfd6f4f6b603ad8a,4
+np.float64,0xbfe37b171aa6f62e,0xbfe24cb4b2d0ade4,4
+np.float64,0xbfef9e5ed9bf3cbe,0xbfeab817b41000bd,4
+np.float64,0xbfe624d6f96c49ae,0xbfe46b1e9c9aff86,4
+np.float64,0xbfefb5da65ff6bb5,0xbfeac4fc9c982772,4
+np.float64,0xbfd29a65d52534cc,0xbfd2579df8ff87b9,4
+np.float64,0xbfd40270172804e0,0xbfd3af6471104aef,4
+np.float64,0xbfb729ee7a2e53e0,0xbfb721d7dbd2705e,4
+np.float64,0xbfb746f1382e8de0,0xbfb73ebc1207f8e3,4
+np.float64,0xbfd3c7e606a78fcc,0xbfd377a8aa1b0dd9,4
+np.float64,0xbfd18c4880231892,0xbfd1543506584ad5,4
+np.float64,0xbfea988080753101,0xbfe7a34cba0d0fa1,4
+np.float64,0xbf877400e02ee800,0xbf8773df47fa7e35,4
+np.float64,0xbfb07e050820fc08,0xbfb07b198d4a52c9,4
+np.float64,0xbfee0a3621fc146c,0xbfe9d1745a05ba77,4
+np.float64,0xbfe78de246ef1bc4,0xbfe57bf2baab91c8,4
+np.float64,0xbfcdbfd3bd3b7fa8,0xbfcd7b728a955a06,4
+np.float64,0xbfe855ea79b0abd5,0xbfe60e8a4a17b921,4
+np.float64,0xbfd86c8e3530d91c,0xbfd7d5e36c918dc1,4
+np.float64,0xbfe4543169e8a863,0xbfe2fd23d42f552e,4
+np.float64,0xbfe41efbf1283df8,0xbfe2d235a2faed1a,4
+np.float64,0xbfd9a55464b34aa8,0xbfd8f7083f7281e5,4
+np.float64,0xbfe5f5078d6bea0f,0xbfe44637d910c270,4
+np.float64,0xbfe6d83e3dedb07c,0xbfe4f3fdadd10552,4
+np.float64,0xbfdb767e70b6ecfc,0xbfdaa0b6c17f3fb1,4
+np.float64,0xbfdfc91b663f9236,0xbfde7eb0dfbeaa26,4
+np.float64,0xbfbfbd18783f7a30,0xbfbfa84bf2fa1c8d,4
+np.float64,0xbfe51199242a2332,0xbfe39447dbe066ae,4
+np.float64,0xbfdbb94814b77290,0xbfdadd63bd796972,4
+np.float64,0xbfd8c6272cb18c4e,0xbfd828f2d9e8607e,4
+np.float64,0xbfce51e0b63ca3c0,0xbfce097ee908083a,4
+np.float64,0xbfe99a177d73342f,0xbfe6f4ec776a57ae,4
+np.float64,0xbfefde2ab0ffbc55,0xbfeadafdcbf54733,4
+np.float64,0xbfcccb5c1c3996b8,0xbfcc8d586a73d126,4
+np.float64,0xbfdf7ddcedbefbba,0xbfde3c749a906de7,4
+np.float64,0xbfef940516ff280a,0xbfeab26429e89f4b,4
+np.float64,0xbfe08009f1e10014,0xbfdf8eab352997eb,4
+np.float64,0xbfe9c02682b3804d,0xbfe70f5fd05f79ee,4
+np.float64,0xbfb3ca1732279430,0xbfb3c50bec5b453a,4
+np.float64,0xbfe368e81926d1d0,0xbfe23dc704d0887c,4
+np.float64,0xbfbd20cc2e3a4198,0xbfbd10b7e6d81c6c,4
+np.float64,0xbfd67ece4d2cfd9c,0xbfd608f527dcc5e7,4
+np.float64,0xbfdc02d1333805a2,0xbfdb20104454b79f,4
+np.float64,0xbfc007a626200f4c,0xbfbff9dc9dc70193,4
+np.float64,0xbfda9e4f8fb53ca0,0xbfd9db8af35dc630,4
+np.float64,0xbfd8173d77302e7a,0xbfd786a0cf3e2914,4
+np.float64,0xbfeb8fcbd0b71f98,0xbfe84734debc10fb,4
+np.float64,0xbfe4bf1cb7697e3a,0xbfe352c891113f29,4
+np.float64,0xbfc18624d5230c48,0xbfc178248e863b64,4
+np.float64,0xbfcf184bac3e3098,0xbfceca3b19be1ebe,4
+np.float64,0xbfd2269c42a44d38,0xbfd1e8920d72b694,4
+np.float64,0xbfe8808526b1010a,0xbfe62d5497292495,4
+np.float64,0xbfe498bd1da9317a,0xbfe334245eadea93,4
+np.float64,0xbfef0855aebe10ab,0xbfea6462f29aeaf9,4
+np.float64,0xbfdeb186c93d630e,0xbfdd87c37943c602,4
+np.float64,0xbfb29fe2ae253fc8,0xbfb29bae3c87efe4,4
+np.float64,0xbfddd9c6c3bbb38e,0xbfdcc7b400bf384b,4
+np.float64,0xbfe3506673e6a0cd,0xbfe2299f26295553,4
+np.float64,0xbfe765957a2ecb2b,0xbfe55e03cf22edab,4
+np.float64,0xbfecc9876c79930f,0xbfe90efaf15b6207,4
+np.float64,0xbfefb37a0a7f66f4,0xbfeac3af3898e7c2,4
+np.float64,0xbfeefa0da7bdf41b,0xbfea5c4cde53c1c3,4
+np.float64,0xbfe6639ee9ecc73e,0xbfe49b4e28a72482,4
+np.float64,0xbfef91a4bb7f2349,0xbfeab114ac9e25dd,4
+np.float64,0xbfc8b392bb316724,0xbfc88c657f4441a3,4
+np.float64,0xbfc88a358231146c,0xbfc863cb900970fe,4
+np.float64,0xbfef25a9d23e4b54,0xbfea74eda432aabe,4
+np.float64,0xbfe6aceea0ed59de,0xbfe4d32e54a3fd01,4
+np.float64,0xbfefe2b3e37fc568,0xbfeadd74f4605835,4
+np.float64,0xbfa9eecb8833dd90,0xbfa9ebf4f4cb2591,4
+np.float64,0xbfd42bad7428575a,0xbfd3d69de8e52d0a,4
+np.float64,0xbfbc366b4a386cd8,0xbfbc27ceee8f3019,4
+np.float64,0xbfd9bca7be337950,0xbfd90c80e6204e57,4
+np.float64,0xbfe8173f53f02e7f,0xbfe5e0f8d8ed329c,4
+np.float64,0xbfce22dbcb3c45b8,0xbfcddbc8159b63af,4
+np.float64,0xbfea2d7ba7345af7,0xbfe75aa62ad5b80a,4
+np.float64,0xbfc08b783e2116f0,0xbfc07faf8d501558,4
+np.float64,0xbfb8c4161c318830,0xbfb8ba33950748ec,4
+np.float64,0xbfddd930bcbbb262,0xbfdcc72dffdf51bb,4
+np.float64,0xbfd108ce8a22119e,0xbfd0d5801e7698bd,4
+np.float64,0xbfd5bd2b5dab7a56,0xbfd552c52c468c76,4
+np.float64,0xbfe7ffe67fefffcd,0xbfe5cfe96e35e6e5,4
+np.float64,0xbfa04ec6bc209d90,0xbfa04e120a2c25cc,4
+np.float64,0xbfef7752cc7eeea6,0xbfeaa28715addc4f,4
+np.float64,0xbfe7083c2eae1078,0xbfe5182bf8ddfc8e,4
+np.float64,0xbfe05dafd0a0bb60,0xbfdf52d397cfe5f6,4
+np.float64,0xbfacb4f2243969e0,0xbfacb118991ea235,4
+np.float64,0xbfc7d47e422fa8fc,0xbfc7b1504714a4fd,4
+np.float64,0xbfbd70b2243ae168,0xbfbd60182efb61de,4
+np.float64,0xbfe930e49cb261c9,0xbfe6ab272b3f9cfc,4
+np.float64,0xbfb5f537e62bea70,0xbfb5ee540dcdc635,4
+np.float64,0xbfbb0c8278361908,0xbfbaffa1f7642a87,4
+np.float64,0xbfe82af2447055e4,0xbfe5ef54ca8db9e8,4
+np.float64,0xbfe92245e6f2448c,0xbfe6a0d32168040b,4
+np.float64,0xbfb799a8522f3350,0xbfb7911a7ada3640,4
+np.float64,0x7faa8290c8350521,0x3fe5916f67209cd6,4
+np.float64,0x7f976597082ecb2d,0x3fcf94dce396bd37,4
+np.float64,0x7fede721237bce41,0x3fe3e7b1575b005f,4
+np.float64,0x7fd5f674d72bece9,0x3fe3210628eba199,4
+np.float64,0x7f9b0f1aa0361e34,0x3feffd34d15d1da7,4
+np.float64,0x7fec48346ab89068,0x3fe93dd84253d9a2,4
+np.float64,0x7f9cac76283958eb,0xbfec4cd999653868,4
+np.float64,0x7fed51ab6bbaa356,0x3fecc27fb5f37bca,4
+np.float64,0x7fded3c116bda781,0xbfda473efee47cf1,4
+np.float64,0x7fd19c48baa33890,0xbfe25700cbfc0326,4
+np.float64,0x7fe5c8f478ab91e8,0xbfee4ab6d84806be,4
+np.float64,0x7fe53c64e46a78c9,0x3fee19c3f227f4e1,4
+np.float64,0x7fc2ad1936255a31,0xbfe56db9b877f807,4
+np.float64,0x7fe2b071b52560e2,0xbfce3990a8d390a9,4
+np.float64,0x7fc93f3217327e63,0xbfd1f6d7ef838d2b,4
+np.float64,0x7fec26df08784dbd,0x3fd5397be41c93d9,4
+np.float64,0x7fcf4770183e8edf,0x3fe6354f5a785016,4
+np.float64,0x7fdc9fcc0bb93f97,0xbfeeeae952e8267d,4
+np.float64,0x7feb21f29c7643e4,0x3fec20122e33f1bf,4
+np.float64,0x7fd0b51273216a24,0x3fefb09f8daba00b,4
+np.float64,0x7fe747a9d76e8f53,0x3feb46a3232842a4,4
+np.float64,0x7fd58885972b110a,0xbfce5ea57c186221,4
+np.float64,0x7fca3ce85c3479d0,0x3fef93a24548e8ca,4
+np.float64,0x7fe1528a46a2a514,0xbfb54bb578d9da91,4
+np.float64,0x7fcc58b21b38b163,0x3feffb5b741ffc2d,4
+np.float64,0x7fdabcaaf5357955,0x3fecbf855db524d1,4
+np.float64,0x7fdd27c6933a4f8c,0xbfef2f41bb80144b,4
+np.float64,0x7fbda4e1be3b49c2,0x3fdb9b33f84f5381,4
+np.float64,0x7fe53363362a66c5,0x3fe4daff3a6a4ed0,4
+np.float64,0x7fe5719d62eae33a,0xbfef761d98f625d5,4
+np.float64,0x7f982ce5a83059ca,0x3fd0b27c3365f0a8,4
+np.float64,0x7fe6db8c42edb718,0x3fe786f4b1fe11a6,4
+np.float64,0x7fe62cca1b2c5993,0x3fd425b6c4c9714a,4
+np.float64,0x7feea88850bd5110,0xbfd7bbb432017175,4
+np.float64,0x7fad6c6ae43ad8d5,0x3fe82e49098bc6de,4
+np.float64,0x7fe70542f02e0a85,0x3fec3017960b4822,4
+np.float64,0x7feaf0bcbb35e178,0xbfc3aac74dd322d5,4
+np.float64,0x7fb5e152fe2bc2a5,0x3fd4b27a4720614c,4
+np.float64,0x7fe456ee5be8addc,0xbfe9e15ab5cff229,4
+np.float64,0x7fd4b53a8d296a74,0xbfefff450f503326,4
+np.float64,0x7fd7149d7a2e293a,0x3fef4ef0a9009096,4
+np.float64,0x7fd43fc5a8a87f8a,0x3fe0c929fee9dce7,4
+np.float64,0x7fef97022aff2e03,0x3fd4ea52a813da20,4
+np.float64,0x7fe035950ae06b29,0x3fef4e125394fb05,4
+np.float64,0x7fecd0548979a0a8,0x3fe89d226244037b,4
+np.float64,0x7fc79b3ac22f3675,0xbfee9c9cf78c8270,4
+np.float64,0x7fd8b8e8263171cf,0x3fe8e24437961db0,4
+np.float64,0x7fc288c23e251183,0xbfbaf8eca50986ca,4
+np.float64,0x7fe436b4b6686d68,0xbfecd661741931c4,4
+np.float64,0x7fcdf99abe3bf334,0x3feaa75c90830b92,4
+np.float64,0x7fd9f9739233f2e6,0xbfebbfcb301b0da5,4
+np.float64,0x7fd6fcbd1b2df979,0xbfccf2c77cb65f56,4
+np.float64,0x7fe242a97b248552,0xbfe5b0f13bcbabc8,4
+np.float64,0x7fe38bf3e06717e7,0x3fbc8fa9004d2668,4
+np.float64,0x7fecd0e8d479a1d1,0xbfe886a6b4f73a4a,4
+np.float64,0x7fe958d60232b1ab,0xbfeb7c4cf0cee2dd,4
+np.float64,0x7f9d492b583a9256,0xbfebe975d00221cb,4
+np.float64,0x7fd6c9983bad932f,0xbfefe817621a31f6,4
+np.float64,0x7fed0d7239fa1ae3,0x3feac7e1b6455b4b,4
+np.float64,0x7fe61dac90ec3b58,0x3fef845b9efe8421,4
+np.float64,0x7f9acd3010359a5f,0xbfe460d376200130,4
+np.float64,0x7fedced9673b9db2,0xbfeeaf23445e1944,4
+np.float64,0x7fd9f271a733e4e2,0xbfd41544535ecb78,4
+np.float64,0x7fe703339bee0666,0x3fef93334626b56c,4
+np.float64,0x7fec7761b7b8eec2,0xbfe6da9179e8e714,4
+np.float64,0x7fdd9fff043b3ffd,0xbfc0761dfb8d94f9,4
+np.float64,0x7fdc10ed17b821d9,0x3fe1481e2a26c77f,4
+np.float64,0x7fe7681e72aed03c,0x3fefff94a6d47c84,4
+np.float64,0x7fe18c29e1e31853,0x3fe86ebd2fd89456,4
+np.float64,0x7fb2fb273c25f64d,0xbfefc136f57e06de,4
+np.float64,0x7fac2bbb90385776,0x3fe25d8e3cdae7e3,4
+np.float64,0x7fed16789efa2cf0,0x3fe94555091fdfd9,4
+np.float64,0x7fd8fe8f7831fd1e,0xbfed58d520361902,4
+np.float64,0x7fa59bde3c2b37bb,0x3fef585391c077ff,4
+np.float64,0x7fda981b53353036,0x3fde02ca08737b5f,4
+np.float64,0x7fd29f388aa53e70,0xbfe04f5499246df2,4
+np.float64,0x7fcd0232513a0464,0xbfd9737f2f565829,4
+np.float64,0x7fe9a881bcf35102,0xbfe079cf285b35dd,4
+np.float64,0x7fdbe399a9b7c732,0x3fe965bc4220f340,4
+np.float64,0x7feb77414af6ee82,0xbfb7df2fcd491f55,4
+np.float64,0x7fa26e86c424dd0d,0xbfea474c3d65b9be,4
+np.float64,0x7feaee869e35dd0c,0xbfd7b333a888cd14,4
+np.float64,0x7fcbd67f6137acfe,0xbfe15a7a15dfcee6,4
+np.float64,0x7fe36991e766d323,0xbfeb288077c4ed9f,4
+np.float64,0x7fdcf4f4fcb9e9e9,0xbfea331ef7a75e7b,4
+np.float64,0x7fbe3445643c688a,0x3fedf21b94ae8e37,4
+np.float64,0x7fd984cfd2b3099f,0x3fc0d3ade71c395e,4
+np.float64,0x7fdec987b23d930e,0x3fe4af5e48f6c26e,4
+np.float64,0x7fde56a9953cad52,0x3fc8e7762cefb8b0,4
+np.float64,0x7fd39fb446273f68,0xbfe6c3443208f44d,4
+np.float64,0x7fc609c1a72c1382,0x3fe884e639571baa,4
+np.float64,0x7fe001be4b20037c,0xbfed0d90cbcb6010,4
+np.float64,0x7fce7ace283cf59b,0xbfd0303792e51f49,4
+np.float64,0x7fe27ba93da4f751,0x3fe548b5ce740d71,4
+np.float64,0x7fcc13c79b38278e,0xbfe2e14f5b64a1e9,4
+np.float64,0x7fc058550620b0a9,0x3fe44bb55ebd0590,4
+np.float64,0x7fa4ba8bf8297517,0x3fee59b39f9d08c4,4
+np.float64,0x7fe50d6872ea1ad0,0xbfea1eaa2d059e13,4
+np.float64,0x7feb7e33b476fc66,0xbfeff28a4424dd3e,4
+np.float64,0x7fe2d7d2a165afa4,0xbfdbaff0ba1ea460,4
+np.float64,0xffd126654b224cca,0xbfef0cd3031fb97c,4
+np.float64,0xffb5f884942bf108,0x3fe0de589bea2e4c,4
+np.float64,0xffe011b4bfe02369,0xbfe805a0edf1e1f2,4
+np.float64,0xffec13eae9b827d5,0x3fb5f30347d78447,4
+np.float64,0xffa6552ae82caa50,0x3fb1ecee60135f2f,4
+np.float64,0xffb62d38b02c5a70,0x3fbd35903148fd12,4
+np.float64,0xffe2c44ea425889d,0xbfd7616547f99a7d,4
+np.float64,0xffea24c61a74498c,0x3fef4a1b15ae9005,4
+np.float64,0xffd23a4ab2a47496,0x3fe933bfaa569ae9,4
+np.float64,0xffc34a073d269410,0xbfeec0f510bb7474,4
+np.float64,0xffeead84cfbd5b09,0x3feb2d635e5a78bd,4
+np.float64,0xffcfd8f3b43fb1e8,0xbfdd59625801771b,4
+np.float64,0xffd3c7f662a78fec,0x3f9cf3209edfbc4e,4
+np.float64,0xffe7b7e4f72f6fca,0xbfefdcff4925632c,4
+np.float64,0xffe48cab05e91956,0x3fe6b41217948423,4
+np.float64,0xffeb6980b336d301,0xbfca5de148f69324,4
+np.float64,0xffe3f15c4aa7e2b8,0xbfeb18efae892081,4
+np.float64,0xffcf290c713e5218,0x3fefe6f1a513ed26,4
+np.float64,0xffd80979b43012f4,0xbfde6c8df91af976,4
+np.float64,0xffc3181e0026303c,0x3fe7448f681def38,4
+np.float64,0xffedfa68f97bf4d1,0xbfeca6efb802d109,4
+np.float64,0xffca0931c0341264,0x3fe31b9f073b08cd,4
+np.float64,0xffe4c44934e98892,0x3feda393a2e8a0f7,4
+np.float64,0xffe65bb56f2cb76a,0xbfeffaf638a4b73e,4
+np.float64,0xffe406a332a80d46,0x3fe8151dadb853c1,4
+np.float64,0xffdb7eae9c36fd5e,0xbfeff89abf5ab16e,4
+np.float64,0xffe245a02da48b40,0x3fef1fb43e85f4b8,4
+np.float64,0xffe2bafa732575f4,0x3fcbab115c6fd86e,4
+np.float64,0xffe8b1eedb7163dd,0x3feff263df6f6b12,4
+np.float64,0xffe6c76c796d8ed8,0xbfe61a8668511293,4
+np.float64,0xffefe327d1ffc64f,0xbfd9b92887a84827,4
+np.float64,0xffa452180c28a430,0xbfa9b9e578a4e52f,4
+np.float64,0xffe9867d0bf30cf9,0xbfca577867588408,4
+np.float64,0xffdfe9b923bfd372,0x3fdab5c15f085c2d,4
+np.float64,0xffed590c6abab218,0xbfd7e7b6c5a120e6,4
+np.float64,0xffeaebcfbab5d79f,0x3fed58be8a9e2c3b,4
+np.float64,0xffe2ba83a8257507,0x3fe6c42a4ac1d4d9,4
+np.float64,0xffe01d5b0ee03ab6,0xbfe5dad6c9247db7,4
+np.float64,0xffe51095d52a212b,0x3fef822cebc32d8e,4
+np.float64,0xffebd7a901b7af51,0xbfe5e63f3e3b1185,4
+np.float64,0xffe4efdcde29dfb9,0xbfe811294dfa758f,4
+np.float64,0xffe3be1aa4a77c35,0x3fdd8dcfcd409bb1,4
+np.float64,0xffbe6f2f763cde60,0x3fd13766e43bd622,4
+np.float64,0xffeed3d80fbda7af,0x3fec10a23c1b7a4a,4
+np.float64,0xffd6ebff37add7fe,0xbfe6177411607c86,4
+np.float64,0xffe85a90f4b0b521,0x3fc09fdd66c8fde9,4
+np.float64,0xffea3d58c2b47ab1,0x3feb5bd4a04b3562,4
+np.float64,0xffef675be6beceb7,0x3fecd840683d1044,4
+np.float64,0xff726a088024d400,0x3feff2b4f47b5214,4
+np.float64,0xffc90856733210ac,0xbfe3c6ffbf6840a5,4
+np.float64,0xffc0b58d9a216b1c,0xbfe10314267d0611,4
+np.float64,0xffee1f3d0abc3e79,0xbfd12ea7efea9067,4
+np.float64,0xffd988c41a331188,0x3febe83802d8a32e,4
+np.float64,0xffe8f1ac9bb1e358,0xbfdbf5fa7e84f2f2,4
+np.float64,0xffe47af279e8f5e4,0x3fef11e339e5fa78,4
+np.float64,0xff9960a7f832c140,0xbfa150363f8ec5b2,4
+np.float64,0xffcac40fa7358820,0xbfec3d5847a3df1d,4
+np.float64,0xffcb024a9d360494,0xbfd060fa31fd6b6a,4
+np.float64,0xffe385ffb3270bff,0xbfee6859e8dcd9e8,4
+np.float64,0xffef62f2c53ec5e5,0x3fe0a71ffddfc718,4
+np.float64,0xffed87ff20fb0ffd,0xbfe661db7c4098e3,4
+np.float64,0xffe369278526d24e,0x3fd64d89a41822fc,4
+np.float64,0xff950288c02a0520,0x3fe1df91d1ad7d5c,4
+np.float64,0xffe70e7c2cee1cf8,0x3fc9fece08df2fd8,4
+np.float64,0xffbaf020b635e040,0xbfc68c43ff9911a7,4
+np.float64,0xffee0120b0fc0240,0x3f9f792e17b490b0,4
+np.float64,0xffe1fa4be7a3f498,0xbfef4b18ab4b319e,4
+np.float64,0xffe61887bf2c310f,0x3fe846714826cb32,4
+np.float64,0xffdc3cf77f3879ee,0x3fe033b948a36125,4
+np.float64,0xffcc2b86f238570c,0xbfefdcceac3f220f,4
+np.float64,0xffe1f030c0a3e061,0x3fef502a808c359a,4
+np.float64,0xffb872c4ee30e588,0x3fef66ed8d3e6175,4
+np.float64,0xffeac8fc617591f8,0xbfe5d8448602aac9,4
+np.float64,0xffe5be16afab7c2d,0x3fee75ccde3cd14d,4
+np.float64,0xffae230ad83c4610,0xbfe49bbe6074d459,4
+np.float64,0xffc8fbeff531f7e0,0x3f77201e0c927f97,4
+np.float64,0xffdc314f48b8629e,0x3fef810dfc5db118,4
+np.float64,0xffec1f8970783f12,0x3fe15567102e042a,4
+np.float64,0xffc6995f902d32c0,0xbfecd5d2eedf342c,4
+np.float64,0xffdc7af76b38f5ee,0xbfd6e754476ab320,4
+np.float64,0xffb30cf8682619f0,0x3fd5ac3dfc4048d0,4
+np.float64,0xffd3a77695a74eee,0xbfefb5d6889e36e9,4
+np.float64,0xffd8b971803172e4,0xbfeb7f62f0b6c70b,4
+np.float64,0xffde4c0234bc9804,0xbfed50ba9e16d5e0,4
+np.float64,0xffb62b3f342c5680,0xbfeabc0de4069b84,4
+np.float64,0xff9af5674035eac0,0xbfed6c198b6b1bd8,4
+np.float64,0xffdfe20cb43fc41a,0x3fb11f8238f66306,4
+np.float64,0xffd2ecd7a0a5d9b0,0xbfec17ef1a62b1e3,4
+np.float64,0xffce60f7863cc1f0,0x3fe6dbcad3e3a006,4
+np.float64,0xffbbb8306a377060,0xbfbfd0fbef485c4c,4
+np.float64,0xffd1b2bd2b23657a,0xbfda3e046d987b99,4
+np.float64,0xffc480f4092901e8,0xbfeeff0427f6897b,4
+np.float64,0xffe6e02d926dc05a,0xbfcd59552778890b,4
+np.float64,0xffd302e5b7a605cc,0xbfee7c08641366b0,4
+np.float64,0xffec2eb92f785d72,0xbfef5c9c7f771050,4
+np.float64,0xffea3e31a9747c62,0xbfc49cd54755faf0,4
+np.float64,0xffce0a4e333c149c,0x3feeb9a6d0db4aee,4
+np.float64,0xffdc520a2db8a414,0x3fefc7b72613dcd0,4
+np.float64,0xffe056b968a0ad72,0xbfe47a9fe1f827fb,4
+np.float64,0xffe5a10f4cab421e,0x3fec2b1f74b73dec,4
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-sinh.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-sinh.csv
new file mode 100644
index 00000000..5888c91c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-sinh.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0xfee27582,0xff800000,2
+np.float32,0xff19f092,0xff800000,2
+np.float32,0xbf393576,0xbf49cb31,2
+np.float32,0x8020fdea,0x8020fdea,2
+np.float32,0x455f4e,0x455f4e,2
+np.float32,0xff718c35,0xff800000,2
+np.float32,0x3f3215e3,0x3f40cce5,2
+np.float32,0x19e833,0x19e833,2
+np.float32,0xff2dcd49,0xff800000,2
+np.float32,0x7e8f6c95,0x7f800000,2
+np.float32,0xbf159dac,0xbf1e47a5,2
+np.float32,0x100d3d,0x100d3d,2
+np.float32,0xff673441,0xff800000,2
+np.float32,0x80275355,0x80275355,2
+np.float32,0x4812d0,0x4812d0,2
+np.float32,0x8072b956,0x8072b956,2
+np.float32,0xff3bb918,0xff800000,2
+np.float32,0x0,0x0,2
+np.float32,0xfe327798,0xff800000,2
+np.float32,0x41d4e2,0x41d4e2,2
+np.float32,0xfe34b1b8,0xff800000,2
+np.float32,0x80199f72,0x80199f72,2
+np.float32,0x807242ce,0x807242ce,2
+np.float32,0x3ef4202d,0x3efd7b48,2
+np.float32,0x763529,0x763529,2
+np.float32,0x4f6662,0x4f6662,2
+np.float32,0x3f18efe9,0x3f2232b5,2
+np.float32,0x80701846,0x80701846,2
+np.float32,0x3f599948,0x3f74c393,2
+np.float32,0x5a3d69,0x5a3d69,2
+np.float32,0xbf4a7e65,0xbf6047a3,2
+np.float32,0xff0d4c82,0xff800000,2
+np.float32,0x7a74db,0x7a74db,2
+np.float32,0x803388e6,0x803388e6,2
+np.float32,0x7f4430bb,0x7f800000,2
+np.float32,0x14c5b1,0x14c5b1,2
+np.float32,0xfa113400,0xff800000,2
+np.float32,0x7f4b3209,0x7f800000,2
+np.float32,0x8038d88c,0x8038d88c,2
+np.float32,0xbef2f9de,0xbefc330b,2
+np.float32,0xbe147b38,0xbe15008f,2
+np.float32,0x2b61e6,0x2b61e6,2
+np.float32,0x80000001,0x80000001,2
+np.float32,0x8060456c,0x8060456c,2
+np.float32,0x3f30fa82,0x3f3f6a99,2
+np.float32,0xfd1f0220,0xff800000,2
+np.float32,0xbf2b7555,0xbf389151,2
+np.float32,0xff100b7a,0xff800000,2
+np.float32,0x70d3cd,0x70d3cd,2
+np.float32,0x2a8d4a,0x2a8d4a,2
+np.float32,0xbf7b733f,0xbf92f05f,2
+np.float32,0x3f7106dc,0x3f8b1fc6,2
+np.float32,0x3f39da7a,0x3f4a9d79,2
+np.float32,0x3f5dd73f,0x3f7aaab5,2
+np.float32,0xbe8c8754,0xbe8e4cba,2
+np.float32,0xbf6c74c9,0xbf87c556,2
+np.float32,0x800efbbb,0x800efbbb,2
+np.float32,0xff054ab5,0xff800000,2
+np.float32,0x800b4b46,0x800b4b46,2
+np.float32,0xff77fd74,0xff800000,2
+np.float32,0x257d0,0x257d0,2
+np.float32,0x7caa0c,0x7caa0c,2
+np.float32,0x8025d24d,0x8025d24d,2
+np.float32,0x3d9f1b60,0x3d9f445c,2
+np.float32,0xbe3bf6e8,0xbe3d0595,2
+np.float32,0x54bb93,0x54bb93,2
+np.float32,0xbf3e6a45,0xbf507716,2
+np.float32,0x3f4bb26e,0x3f61e1cd,2
+np.float32,0x3f698edc,0x3f85aac5,2
+np.float32,0xff7bd0ef,0xff800000,2
+np.float32,0xbed07b68,0xbed64a8e,2
+np.float32,0xbf237c72,0xbf2ed3d2,2
+np.float32,0x27b0fa,0x27b0fa,2
+np.float32,0x3f7606d1,0x3f8ed7d6,2
+np.float32,0x790dc0,0x790dc0,2
+np.float32,0x7f68f3ac,0x7f800000,2
+np.float32,0xbed39288,0xbed9a52f,2
+np.float32,0x3f6f8266,0x3f8a0187,2
+np.float32,0x3fbdca,0x3fbdca,2
+np.float32,0xbf7c3e5d,0xbf938b2c,2
+np.float32,0x802321a8,0x802321a8,2
+np.float32,0x3eecab66,0x3ef53031,2
+np.float32,0x62b324,0x62b324,2
+np.float32,0x3f13afac,0x3f1c03fe,2
+np.float32,0xff315ad7,0xff800000,2
+np.float32,0xbf1fac0d,0xbf2a3a63,2
+np.float32,0xbf543984,0xbf6d61d6,2
+np.float32,0x71a212,0x71a212,2
+np.float32,0x114fbe,0x114fbe,2
+np.float32,0x3f5b6ff2,0x3f77505f,2
+np.float32,0xff6ff89e,0xff800000,2
+np.float32,0xff4527a1,0xff800000,2
+np.float32,0x22cb3,0x22cb3,2
+np.float32,0x7f53bb6b,0x7f800000,2
+np.float32,0xff3d2dea,0xff800000,2
+np.float32,0xfd21dac0,0xff800000,2
+np.float32,0xfc486140,0xff800000,2
+np.float32,0x7e2b693a,0x7f800000,2
+np.float32,0x8022a9fb,0x8022a9fb,2
+np.float32,0x80765de0,0x80765de0,2
+np.float32,0x13d299,0x13d299,2
+np.float32,0x7ee53713,0x7f800000,2
+np.float32,0xbde1c770,0xbde23c96,2
+np.float32,0xbd473fc0,0xbd4753de,2
+np.float32,0x3f1cb455,0x3f26acf3,2
+np.float32,0x683e49,0x683e49,2
+np.float32,0x3ed5a9fc,0x3edbeb79,2
+np.float32,0x3f4fe3f6,0x3f67814f,2
+np.float32,0x802a2bce,0x802a2bce,2
+np.float32,0x7e951b4c,0x7f800000,2
+np.float32,0xbe6eb260,0xbe70dd44,2
+np.float32,0xbe3daca8,0xbe3ec2cb,2
+np.float32,0xbe9c38b2,0xbe9ea822,2
+np.float32,0xff2e29dc,0xff800000,2
+np.float32,0x7f62c7cc,0x7f800000,2
+np.float32,0xbf6799a4,0xbf84416c,2
+np.float32,0xbe30a7f0,0xbe318898,2
+np.float32,0xc83d9,0xc83d9,2
+np.float32,0x3f05abf4,0x3f0bd447,2
+np.float32,0x7e9b018a,0x7f800000,2
+np.float32,0xbf0ed72e,0xbf165e5b,2
+np.float32,0x8011ac8c,0x8011ac8c,2
+np.float32,0xbeb7c706,0xbebbbfcb,2
+np.float32,0x803637f9,0x803637f9,2
+np.float32,0xfe787cc8,0xff800000,2
+np.float32,0x3f533d4b,0x3f6c0a50,2
+np.float32,0x3f5c0f1c,0x3f782dde,2
+np.float32,0x3f301f36,0x3f3e590d,2
+np.float32,0x2dc929,0x2dc929,2
+np.float32,0xff15018a,0xff800000,2
+np.float32,0x3f4d0c56,0x3f63afeb,2
+np.float32,0xbf7a2ae3,0xbf91f6e4,2
+np.float32,0xbe771b84,0xbe798346,2
+np.float32,0x80800000,0x80800000,2
+np.float32,0x7f5689ba,0x7f800000,2
+np.float32,0x3f1c3177,0x3f2610df,2
+np.float32,0x3f1b9664,0x3f255825,2
+np.float32,0x3f7e5066,0x3f9520d4,2
+np.float32,0xbf1935f8,0xbf2285ab,2
+np.float32,0x3f096cc7,0x3f101ef9,2
+np.float32,0x8030c180,0x8030c180,2
+np.float32,0x6627ed,0x6627ed,2
+np.float32,0x454595,0x454595,2
+np.float32,0x7de66a33,0x7f800000,2
+np.float32,0xbf800000,0xbf966cfe,2
+np.float32,0xbf35c0a8,0xbf456939,2
+np.float32,0x3f6a6266,0x3f8643e0,2
+np.float32,0x3f0cbcee,0x3f13ef6a,2
+np.float32,0x7efd1e58,0x7f800000,2
+np.float32,0xfe9a74c6,0xff800000,2
+np.float32,0x807ebe6c,0x807ebe6c,2
+np.float32,0x80656736,0x80656736,2
+np.float32,0x800e0608,0x800e0608,2
+np.float32,0xbf30e39a,0xbf3f4e00,2
+np.float32,0x802015fd,0x802015fd,2
+np.float32,0x3e3ce26d,0x3e3df519,2
+np.float32,0x7ec142ac,0x7f800000,2
+np.float32,0xbf68c9ce,0xbf851c78,2
+np.float32,0xfede8356,0xff800000,2
+np.float32,0xbf1507ce,0xbf1d978d,2
+np.float32,0x3e53914c,0x3e551374,2
+np.float32,0x7f3e1c14,0x7f800000,2
+np.float32,0x8070d2ba,0x8070d2ba,2
+np.float32,0xbf4eb793,0xbf65ecee,2
+np.float32,0x7365a6,0x7365a6,2
+np.float32,0x8045cba2,0x8045cba2,2
+np.float32,0x7e4af521,0x7f800000,2
+np.float32,0xbf228625,0xbf2da9e1,2
+np.float32,0x7ee0536c,0x7f800000,2
+np.float32,0x3e126607,0x3e12e5d5,2
+np.float32,0x80311d92,0x80311d92,2
+np.float32,0xbf386b8b,0xbf48ca54,2
+np.float32,0x7f800000,0x7f800000,2
+np.float32,0x8049ec7a,0x8049ec7a,2
+np.float32,0xbf1dfde4,0xbf2836be,2
+np.float32,0x7e719a8c,0x7f800000,2
+np.float32,0x3eb9c856,0x3ebde2e6,2
+np.float32,0xfe3efda8,0xff800000,2
+np.float32,0xbe89d60c,0xbe8b81d1,2
+np.float32,0x3eaad338,0x3eae0317,2
+np.float32,0x7f4e5217,0x7f800000,2
+np.float32,0x3e9d0f40,0x3e9f88ce,2
+np.float32,0xbe026708,0xbe02c155,2
+np.float32,0x5fc22f,0x5fc22f,2
+np.float32,0x1c4572,0x1c4572,2
+np.float32,0xbed89d96,0xbedf22c5,2
+np.float32,0xbf3debee,0xbf4fd441,2
+np.float32,0xbf465520,0xbf5ac6e5,2
+np.float32,0x3f797081,0x3f9169b3,2
+np.float32,0xbf250734,0xbf30b2aa,2
+np.float32,0x7f5068e9,0x7f800000,2
+np.float32,0x3f1b814e,0x3f253f0c,2
+np.float32,0xbf27c5d3,0xbf340b05,2
+np.float32,0x3f1b78ae,0x3f2534c8,2
+np.float32,0x8059b51a,0x8059b51a,2
+np.float32,0x8059f182,0x8059f182,2
+np.float32,0xbf1bb36e,0xbf257ab8,2
+np.float32,0x41ac35,0x41ac35,2
+np.float32,0x68f41f,0x68f41f,2
+np.float32,0xbea504dc,0xbea7e40f,2
+np.float32,0x1,0x1,2
+np.float32,0x3e96b5b0,0x3e98e542,2
+np.float32,0x7f7fffff,0x7f800000,2
+np.float32,0x3c557a80,0x3c557c0c,2
+np.float32,0x800ca3ec,0x800ca3ec,2
+np.float32,0x8077d4aa,0x8077d4aa,2
+np.float32,0x3f000af0,0x3f0572d6,2
+np.float32,0x3e0434dd,0x3e0492f8,2
+np.float32,0x7d1a710a,0x7f800000,2
+np.float32,0x3f70f996,0x3f8b15f8,2
+np.float32,0x8033391d,0x8033391d,2
+np.float32,0x11927c,0x11927c,2
+np.float32,0x7f7784be,0x7f800000,2
+np.float32,0x7acb22af,0x7f800000,2
+np.float32,0x7e8b153c,0x7f800000,2
+np.float32,0x66d402,0x66d402,2
+np.float32,0xfed6e7b0,0xff800000,2
+np.float32,0x7f6872d3,0x7f800000,2
+np.float32,0x1bd49c,0x1bd49c,2
+np.float32,0xfdc4f1b8,0xff800000,2
+np.float32,0xbed8a466,0xbedf2a33,2
+np.float32,0x7ee789,0x7ee789,2
+np.float32,0xbece94b4,0xbed43b52,2
+np.float32,0x3cf3f734,0x3cf4006f,2
+np.float32,0x7e44aa00,0x7f800000,2
+np.float32,0x7f19e99c,0x7f800000,2
+np.float32,0x806ff1bc,0x806ff1bc,2
+np.float32,0x80296934,0x80296934,2
+np.float32,0x7f463363,0x7f800000,2
+np.float32,0xbf212ac3,0xbf2c06bb,2
+np.float32,0x3dc63778,0x3dc686ba,2
+np.float32,0x7f1b4328,0x7f800000,2
+np.float32,0x6311f6,0x6311f6,2
+np.float32,0xbf6b6fb6,0xbf870751,2
+np.float32,0xbf2c44cf,0xbf399155,2
+np.float32,0x3e7a67bc,0x3e7ce887,2
+np.float32,0x7f57c5f7,0x7f800000,2
+np.float32,0x7f2bb4ff,0x7f800000,2
+np.float32,0xbe9d448e,0xbe9fc0a4,2
+np.float32,0xbf4840f0,0xbf5d4f6b,2
+np.float32,0x7f1e1176,0x7f800000,2
+np.float32,0xff76638e,0xff800000,2
+np.float32,0xff055555,0xff800000,2
+np.float32,0x3f32b82b,0x3f419834,2
+np.float32,0xff363aa8,0xff800000,2
+np.float32,0x7f737fd0,0x7f800000,2
+np.float32,0x3da5d798,0x3da60602,2
+np.float32,0x3f1cc126,0x3f26bc3e,2
+np.float32,0x7eb07541,0x7f800000,2
+np.float32,0x3f7b2ff2,0x3f92bd2a,2
+np.float32,0x474f7,0x474f7,2
+np.float32,0x7fc00000,0x7fc00000,2
+np.float32,0xff2b0a4e,0xff800000,2
+np.float32,0xfeb24f16,0xff800000,2
+np.float32,0x2cb9fc,0x2cb9fc,2
+np.float32,0x67189d,0x67189d,2
+np.float32,0x8033d854,0x8033d854,2
+np.float32,0xbe85e94c,0xbe87717a,2
+np.float32,0x80767c6c,0x80767c6c,2
+np.float32,0x7ea84d65,0x7f800000,2
+np.float32,0x3f024bc7,0x3f07fead,2
+np.float32,0xbdcb0100,0xbdcb5625,2
+np.float32,0x3f160a9e,0x3f1ec7c9,2
+np.float32,0xff1734c8,0xff800000,2
+np.float32,0x7f424d5e,0x7f800000,2
+np.float32,0xbf75b215,0xbf8e9862,2
+np.float32,0x3f262a42,0x3f3214c4,2
+np.float32,0xbf4cfb53,0xbf639927,2
+np.float32,0x3f4ac8b8,0x3f60aa7c,2
+np.float32,0x3e90e593,0x3e92d6b3,2
+np.float32,0xbf66bccf,0xbf83a2d8,2
+np.float32,0x7d3d851a,0x7f800000,2
+np.float32,0x7bac783c,0x7f800000,2
+np.float32,0x8001c626,0x8001c626,2
+np.float32,0xbdffd480,0xbe003f7b,2
+np.float32,0x7f6680bf,0x7f800000,2
+np.float32,0xbecf448e,0xbed4f9bb,2
+np.float32,0x584c7,0x584c7,2
+np.float32,0x3f3e8ea0,0x3f50a5fb,2
+np.float32,0xbf5a5f04,0xbf75d56e,2
+np.float32,0x8065ae47,0x8065ae47,2
+np.float32,0xbf48dce3,0xbf5e1dba,2
+np.float32,0xbe8dae2e,0xbe8f7ed8,2
+np.float32,0x3f7ca6ab,0x3f93dace,2
+np.float32,0x4c3e81,0x4c3e81,2
+np.float32,0x80000000,0x80000000,2
+np.float32,0x3ee1f7d9,0x3ee96033,2
+np.float32,0x80588c6f,0x80588c6f,2
+np.float32,0x5ba34e,0x5ba34e,2
+np.float32,0x80095d28,0x80095d28,2
+np.float32,0xbe7ba198,0xbe7e2bdd,2
+np.float32,0xbe0bdcb4,0xbe0c4c22,2
+np.float32,0x1776f7,0x1776f7,2
+np.float32,0x80328b2a,0x80328b2a,2
+np.float32,0x3e978d37,0x3e99c63e,2
+np.float32,0x7ed50906,0x7f800000,2
+np.float32,0x3f776a54,0x3f8fe2bd,2
+np.float32,0xbed624c4,0xbedc7120,2
+np.float32,0x7f0b6a31,0x7f800000,2
+np.float32,0x7eb13913,0x7f800000,2
+np.float32,0xbe733684,0xbe758190,2
+np.float32,0x80016474,0x80016474,2
+np.float32,0x7a51ee,0x7a51ee,2
+np.float32,0x3f6cb91e,0x3f87f729,2
+np.float32,0xbd99b050,0xbd99d540,2
+np.float32,0x7c6e3cba,0x7f800000,2
+np.float32,0xbf00179a,0xbf05811e,2
+np.float32,0x3e609b29,0x3e626954,2
+np.float32,0xff3fd71a,0xff800000,2
+np.float32,0x5d8c2,0x5d8c2,2
+np.float32,0x7ee93662,0x7f800000,2
+np.float32,0x4b0b31,0x4b0b31,2
+np.float32,0x3ec243b7,0x3ec6f594,2
+np.float32,0x804d60f1,0x804d60f1,2
+np.float32,0xbf0cb784,0xbf13e929,2
+np.float32,0x3f13b74d,0x3f1c0cee,2
+np.float32,0xfe37cb64,0xff800000,2
+np.float32,0x1a88,0x1a88,2
+np.float32,0x3e22a472,0x3e2353ba,2
+np.float32,0x7f07d6a0,0x7f800000,2
+np.float32,0x3f78f435,0x3f910bb5,2
+np.float32,0x555a4a,0x555a4a,2
+np.float32,0x3e306c1f,0x3e314be3,2
+np.float32,0x8005877c,0x8005877c,2
+np.float32,0x4df389,0x4df389,2
+np.float32,0x8069ffc7,0x8069ffc7,2
+np.float32,0x3f328f24,0x3f4164c6,2
+np.float32,0x53a31b,0x53a31b,2
+np.float32,0xbe4d6768,0xbe4ec8be,2
+np.float32,0x7fa00000,0x7fe00000,2
+np.float32,0x3f484c1b,0x3f5d5e2f,2
+np.float32,0x8038be05,0x8038be05,2
+np.float32,0x58ac0f,0x58ac0f,2
+np.float32,0x7ed7fb72,0x7f800000,2
+np.float32,0x5a22e1,0x5a22e1,2
+np.float32,0xbebb7394,0xbebfaad6,2
+np.float32,0xbda98160,0xbda9b2ef,2
+np.float32,0x7f3e5c42,0x7f800000,2
+np.float32,0xfed204ae,0xff800000,2
+np.float32,0xbf5ef782,0xbf7c3ec5,2
+np.float32,0xbef7a0a8,0xbf00b292,2
+np.float32,0xfee6e176,0xff800000,2
+np.float32,0xfe121140,0xff800000,2
+np.float32,0xfe9e13be,0xff800000,2
+np.float32,0xbf3c98b1,0xbf4e2003,2
+np.float32,0x77520d,0x77520d,2
+np.float32,0xf17b2,0xf17b2,2
+np.float32,0x724d2f,0x724d2f,2
+np.float32,0x7eb326f5,0x7f800000,2
+np.float32,0x3edd6bf2,0x3ee4636e,2
+np.float32,0x350f57,0x350f57,2
+np.float32,0xff7d4435,0xff800000,2
+np.float32,0x802b2b9d,0x802b2b9d,2
+np.float32,0xbf7fbeee,0xbf963acf,2
+np.float32,0x804f3100,0x804f3100,2
+np.float32,0x7c594a71,0x7f800000,2
+np.float32,0x3ef49340,0x3efdfbb6,2
+np.float32,0x2e0659,0x2e0659,2
+np.float32,0x8006d5fe,0x8006d5fe,2
+np.float32,0xfd2a00b0,0xff800000,2
+np.float32,0xbee1c016,0xbee922ed,2
+np.float32,0x3e3b7de8,0x3e3c8a8b,2
+np.float32,0x805e6bba,0x805e6bba,2
+np.float32,0x1a7da2,0x1a7da2,2
+np.float32,0x6caba4,0x6caba4,2
+np.float32,0x802f7eab,0x802f7eab,2
+np.float32,0xff68b16b,0xff800000,2
+np.float32,0x8064f5e5,0x8064f5e5,2
+np.float32,0x2e39b4,0x2e39b4,2
+np.float32,0x800000,0x800000,2
+np.float32,0xfd0334c0,0xff800000,2
+np.float32,0x3e952fc4,0x3e974e7e,2
+np.float32,0x80057d33,0x80057d33,2
+np.float32,0x3ed3ddc4,0x3ed9f6f1,2
+np.float32,0x3f74ce18,0x3f8dedf4,2
+np.float32,0xff6bb7c0,0xff800000,2
+np.float32,0xff43bc21,0xff800000,2
+np.float32,0x80207570,0x80207570,2
+np.float32,0x7e1dda75,0x7f800000,2
+np.float32,0x3efe335c,0x3f0462ff,2
+np.float32,0xbf252c0c,0xbf30df70,2
+np.float32,0x3ef4b8e3,0x3efe25ba,2
+np.float32,0x7c33938d,0x7f800000,2
+np.float32,0x3eb1593c,0x3eb4ea95,2
+np.float32,0xfe1d0068,0xff800000,2
+np.float32,0xbf10da9b,0xbf18b551,2
+np.float32,0xfeb65748,0xff800000,2
+np.float32,0xfe8c6014,0xff800000,2
+np.float32,0x3f0503e2,0x3f0b14e3,2
+np.float32,0xfe5e5248,0xff800000,2
+np.float32,0xbd10afa0,0xbd10b754,2
+np.float32,0xff64b609,0xff800000,2
+np.float32,0xbf674a96,0xbf84089c,2
+np.float32,0x7f5d200d,0x7f800000,2
+np.float32,0x3cf44900,0x3cf45245,2
+np.float32,0x8044445a,0x8044445a,2
+np.float32,0xff35b676,0xff800000,2
+np.float32,0x806452cd,0x806452cd,2
+np.float32,0xbf2930fb,0xbf35c7b4,2
+np.float32,0x7e500617,0x7f800000,2
+np.float32,0x543719,0x543719,2
+np.float32,0x3ed11068,0x3ed6ec1d,2
+np.float32,0xbd8db068,0xbd8dcd59,2
+np.float32,0x3ede62c8,0x3ee571d0,2
+np.float32,0xbf00a410,0xbf061f9c,2
+np.float32,0xbf44fa39,0xbf58ff5b,2
+np.float32,0x3f1c3114,0x3f261069,2
+np.float32,0xbdea6210,0xbdeae521,2
+np.float32,0x80059f6d,0x80059f6d,2
+np.float32,0xbdba15f8,0xbdba578c,2
+np.float32,0x6d8a61,0x6d8a61,2
+np.float32,0x6f5428,0x6f5428,2
+np.float32,0x18d0e,0x18d0e,2
+np.float32,0x50e131,0x50e131,2
+np.float32,0x3f2f52be,0x3f3d5a7e,2
+np.float32,0x7399d8,0x7399d8,2
+np.float32,0x106524,0x106524,2
+np.float32,0x7ebf1c53,0x7f800000,2
+np.float32,0x80276458,0x80276458,2
+np.float32,0x3ebbde67,0x3ec01ceb,2
+np.float32,0x80144d9d,0x80144d9d,2
+np.float32,0x8017ea6b,0x8017ea6b,2
+np.float32,0xff38f201,0xff800000,2
+np.float32,0x7f2daa82,0x7f800000,2
+np.float32,0x3f3cb7c7,0x3f4e47ed,2
+np.float32,0x7f08c779,0x7f800000,2
+np.float32,0xbecc907a,0xbed20cec,2
+np.float32,0x7d440002,0x7f800000,2
+np.float32,0xbd410d80,0xbd411fcd,2
+np.float32,0x3d63ae07,0x3d63cc0c,2
+np.float32,0x805a9c13,0x805a9c13,2
+np.float32,0x803bdcdc,0x803bdcdc,2
+np.float32,0xbe88b354,0xbe8a5497,2
+np.float32,0x3f4eaf43,0x3f65e1c2,2
+np.float32,0x3f15e5b8,0x3f1e9c60,2
+np.float32,0x3e8a870c,0x3e8c394e,2
+np.float32,0x7e113de9,0x7f800000,2
+np.float32,0x7ee5ba41,0x7f800000,2
+np.float32,0xbe73d178,0xbe7620eb,2
+np.float32,0xfe972e6a,0xff800000,2
+np.float32,0xbf65567d,0xbf82a25a,2
+np.float32,0x3f38247e,0x3f487010,2
+np.float32,0xbece1c62,0xbed3b918,2
+np.float32,0x442c8d,0x442c8d,2
+np.float32,0x2dc52,0x2dc52,2
+np.float32,0x802ed923,0x802ed923,2
+np.float32,0x788cf8,0x788cf8,2
+np.float32,0x8024888e,0x8024888e,2
+np.float32,0x3f789bde,0x3f90c8fc,2
+np.float32,0x3f5de620,0x3f7abf88,2
+np.float32,0x3f0ffc45,0x3f17b2a7,2
+np.float32,0xbf709678,0xbf8accd4,2
+np.float32,0x12181f,0x12181f,2
+np.float32,0xfe54bbe4,0xff800000,2
+np.float32,0x7f1daba0,0x7f800000,2
+np.float32,0xbf6226df,0xbf805e3c,2
+np.float32,0xbd120610,0xbd120dfb,2
+np.float32,0x7f75e951,0x7f800000,2
+np.float32,0x80068048,0x80068048,2
+np.float32,0x45f04a,0x45f04a,2
+np.float32,0xff4c4f58,0xff800000,2
+np.float32,0x311604,0x311604,2
+np.float32,0x805e809c,0x805e809c,2
+np.float32,0x3d1d62c0,0x3d1d6caa,2
+np.float32,0x7f14ccf9,0x7f800000,2
+np.float32,0xff10017c,0xff800000,2
+np.float32,0xbf43ec48,0xbf579df4,2
+np.float32,0xff64da57,0xff800000,2
+np.float32,0x7f0622c5,0x7f800000,2
+np.float32,0x7f5460cd,0x7f800000,2
+np.float32,0xff0ef1c6,0xff800000,2
+np.float32,0xbece1146,0xbed3ad13,2
+np.float32,0x3f4d457f,0x3f63fc70,2
+np.float32,0xbdc1da28,0xbdc2244b,2
+np.float32,0xbe46d3f4,0xbe481463,2
+np.float32,0xff36b3d6,0xff800000,2
+np.float32,0xbec2e76c,0xbec7a540,2
+np.float32,0x8078fb81,0x8078fb81,2
+np.float32,0x7ec819cb,0x7f800000,2
+np.float32,0x39c4d,0x39c4d,2
+np.float32,0xbe8cddc2,0xbe8ea670,2
+np.float32,0xbf36dffb,0xbf46d48b,2
+np.float32,0xbf2302a3,0xbf2e4065,2
+np.float32,0x3e7b34a2,0x3e7dbb9a,2
+np.float32,0x3e3d87e1,0x3e3e9d62,2
+np.float32,0x7f3c94b1,0x7f800000,2
+np.float32,0x80455a85,0x80455a85,2
+np.float32,0xfd875568,0xff800000,2
+np.float32,0xbf618103,0xbf7fd1c8,2
+np.float32,0xbe332e3c,0xbe3418ac,2
+np.float32,0x80736b79,0x80736b79,2
+np.float32,0x3f705d9a,0x3f8aa2e6,2
+np.float32,0xbf3a36d2,0xbf4b134b,2
+np.float32,0xfddc55c0,0xff800000,2
+np.float32,0x805606fd,0x805606fd,2
+np.float32,0x3f4f0bc4,0x3f665e25,2
+np.float32,0xfebe7494,0xff800000,2
+np.float32,0xff0c541b,0xff800000,2
+np.float32,0xff0b8e7f,0xff800000,2
+np.float32,0xbcc51640,0xbcc51b1e,2
+np.float32,0x7ec1c4d0,0x7f800000,2
+np.float32,0xfc5c8e00,0xff800000,2
+np.float32,0x7f48d682,0x7f800000,2
+np.float32,0x7d5c7d8d,0x7f800000,2
+np.float32,0x8052ed03,0x8052ed03,2
+np.float32,0x7d4db058,0x7f800000,2
+np.float32,0xff3a65ee,0xff800000,2
+np.float32,0x806eeb93,0x806eeb93,2
+np.float32,0x803f9733,0x803f9733,2
+np.float32,0xbf2d1388,0xbf3a90e3,2
+np.float32,0x68e260,0x68e260,2
+np.float32,0x3e47a69f,0x3e48eb0e,2
+np.float32,0x3f0c4623,0x3f136646,2
+np.float32,0x3f37a831,0x3f47d249,2
+np.float32,0xff153a0c,0xff800000,2
+np.float32,0x2e8086,0x2e8086,2
+np.float32,0xc3f5e,0xc3f5e,2
+np.float32,0x7f31dc14,0x7f800000,2
+np.float32,0xfee37d68,0xff800000,2
+np.float32,0x711d4,0x711d4,2
+np.float32,0x7ede2ce4,0x7f800000,2
+np.float32,0xbf5d76d0,0xbf7a23d0,2
+np.float32,0xbe2b9eb4,0xbe2c6cac,2
+np.float32,0x2b14d7,0x2b14d7,2
+np.float32,0x3ea1db72,0x3ea4910e,2
+np.float32,0x7f3f03f7,0x7f800000,2
+np.float32,0x92de5,0x92de5,2
+np.float32,0x80322e1b,0x80322e1b,2
+np.float32,0xbf5eb214,0xbf7bdd55,2
+np.float32,0xbf21bf87,0xbf2cba14,2
+np.float32,0xbf5d4b78,0xbf79e73a,2
+np.float32,0xbc302840,0xbc30291e,2
+np.float32,0xfee567c6,0xff800000,2
+np.float32,0x7f70ee14,0x7f800000,2
+np.float32,0x7e5c4b33,0x7f800000,2
+np.float32,0x3f1e7b64,0x3f28ccfd,2
+np.float32,0xbf6309f7,0xbf80ff3e,2
+np.float32,0x1c2fe3,0x1c2fe3,2
+np.float32,0x8e78d,0x8e78d,2
+np.float32,0x7f2fce73,0x7f800000,2
+np.float32,0x7f25f690,0x7f800000,2
+np.float32,0x8074cba5,0x8074cba5,2
+np.float32,0x16975f,0x16975f,2
+np.float32,0x8012cf5c,0x8012cf5c,2
+np.float32,0x7da72138,0x7f800000,2
+np.float32,0xbf563f35,0xbf7025be,2
+np.float32,0x3f69d3f5,0x3f85dcbe,2
+np.float32,0xbf15c148,0xbf1e7184,2
+np.float32,0xbe7a077c,0xbe7c8564,2
+np.float32,0x3ebb6ef1,0x3ebfa5e3,2
+np.float32,0xbe41fde4,0xbe43277b,2
+np.float32,0x7f10b479,0x7f800000,2
+np.float32,0x3e021ace,0x3e02747d,2
+np.float32,0x3e93d984,0x3e95e9be,2
+np.float32,0xfe17e924,0xff800000,2
+np.float32,0xfe21a7cc,0xff800000,2
+np.float32,0x8019b660,0x8019b660,2
+np.float32,0x7e954631,0x7f800000,2
+np.float32,0x7e7330d1,0x7f800000,2
+np.float32,0xbe007d98,0xbe00d3fb,2
+np.float32,0x3ef3870e,0x3efcd077,2
+np.float32,0x7f5bbde8,0x7f800000,2
+np.float32,0x14a5b3,0x14a5b3,2
+np.float32,0x3e84d23f,0x3e8650e8,2
+np.float32,0x80763017,0x80763017,2
+np.float32,0xfe871f36,0xff800000,2
+np.float32,0x7ed43150,0x7f800000,2
+np.float32,0x3cc44547,0x3cc44a16,2
+np.float32,0x3ef0c0fa,0x3ef9b97d,2
+np.float32,0xbede9944,0xbee5ad86,2
+np.float32,0xbf10f0b2,0xbf18cf0a,2
+np.float32,0x3ecdaa78,0x3ed33dd9,2
+np.float32,0x3f7cc058,0x3f93ee6b,2
+np.float32,0x2d952f,0x2d952f,2
+np.float32,0x3f2cf2de,0x3f3a687a,2
+np.float32,0x8029b33c,0x8029b33c,2
+np.float32,0xbf22c737,0xbf2df888,2
+np.float32,0xff53c84a,0xff800000,2
+np.float32,0x40a509,0x40a509,2
+np.float32,0x56abce,0x56abce,2
+np.float32,0xff7fffff,0xff800000,2
+np.float32,0xbf3e67f6,0xbf50741c,2
+np.float32,0xfde67580,0xff800000,2
+np.float32,0x3f103e9b,0x3f17ffc7,2
+np.float32,0x3f3f7232,0x3f51cbe2,2
+np.float32,0x803e6d78,0x803e6d78,2
+np.float32,0x3a61da,0x3a61da,2
+np.float32,0xbc04de80,0xbc04dedf,2
+np.float32,0x7f1e7c52,0x7f800000,2
+np.float32,0x8058ee88,0x8058ee88,2
+np.float32,0x806dd660,0x806dd660,2
+np.float32,0x7e4af9,0x7e4af9,2
+np.float32,0x80702d27,0x80702d27,2
+np.float32,0x802cdad1,0x802cdad1,2
+np.float32,0x3e9b5c23,0x3e9dc149,2
+np.float32,0x7f076e89,0x7f800000,2
+np.float32,0x7f129d68,0x7f800000,2
+np.float32,0x7f6f0b0a,0x7f800000,2
+np.float32,0x7eafafb5,0x7f800000,2
+np.float32,0xbf2ef2ca,0xbf3ce332,2
+np.float32,0xff34c000,0xff800000,2
+np.float32,0x7f559274,0x7f800000,2
+np.float32,0xfed08556,0xff800000,2
+np.float32,0xbf014621,0xbf06d6ad,2
+np.float32,0xff23086a,0xff800000,2
+np.float32,0x6cb33f,0x6cb33f,2
+np.float32,0xfe6e3ffc,0xff800000,2
+np.float32,0x3e6bbec0,0x3e6dd546,2
+np.float32,0x8036afa6,0x8036afa6,2
+np.float32,0xff800000,0xff800000,2
+np.float32,0x3e0ed05c,0x3e0f46ff,2
+np.float32,0x3ec9215c,0x3ece57e6,2
+np.float32,0xbf449fa4,0xbf5888aa,2
+np.float32,0xff2c6640,0xff800000,2
+np.float32,0x7f08f4a7,0x7f800000,2
+np.float32,0xbf4f63e5,0xbf66d4c1,2
+np.float32,0x3f800000,0x3f966cfe,2
+np.float32,0xfe86c7d2,0xff800000,2
+np.float32,0x3f63f969,0x3f81a970,2
+np.float32,0xbd7022d0,0xbd704609,2
+np.float32,0xbead906c,0xbeb0e853,2
+np.float32,0x7ef149ee,0x7f800000,2
+np.float32,0xff0b9ff7,0xff800000,2
+np.float32,0x3f38380d,0x3f4888e7,2
+np.float32,0x3ef3a3e2,0x3efcf09e,2
+np.float32,0xff616477,0xff800000,2
+np.float32,0x3f3f83e4,0x3f51e2c3,2
+np.float32,0xbf79963c,0xbf918642,2
+np.float32,0x801416f4,0x801416f4,2
+np.float32,0xff75ce6d,0xff800000,2
+np.float32,0xbdbf3588,0xbdbf7cad,2
+np.float32,0xbe6ea938,0xbe70d3dc,2
+np.float32,0x8066f977,0x8066f977,2
+np.float32,0x3f5b5362,0x3f7728aa,2
+np.float32,0xbf72052c,0xbf8bdbd8,2
+np.float32,0xbe21ed74,0xbe229a6f,2
+np.float32,0x8062d19c,0x8062d19c,2
+np.float32,0x3ed8d01f,0x3edf59e6,2
+np.float32,0x803ed42b,0x803ed42b,2
+np.float32,0xbe099a64,0xbe0a0481,2
+np.float32,0xbe173eb4,0xbe17cba2,2
+np.float32,0xbebdcf02,0xbec22faf,2
+np.float32,0x7e3ff29e,0x7f800000,2
+np.float32,0x367c92,0x367c92,2
+np.float32,0xbf5c9db8,0xbf78f4a4,2
+np.float32,0xff0b49ea,0xff800000,2
+np.float32,0x3f4f9bc4,0x3f672001,2
+np.float32,0x85d4a,0x85d4a,2
+np.float32,0x80643e33,0x80643e33,2
+np.float32,0x8013aabd,0x8013aabd,2
+np.float32,0xff6997c3,0xff800000,2
+np.float32,0x3f4dd43c,0x3f64bbb6,2
+np.float32,0xff13bbb9,0xff800000,2
+np.float32,0x3f34efa2,0x3f446187,2
+np.float32,0x3e4b2f10,0x3e4c850d,2
+np.float32,0xfef695c6,0xff800000,2
+np.float32,0x7f7e0057,0x7f800000,2
+np.float32,0x3f6e1b9c,0x3f88fa40,2
+np.float32,0x806e46cf,0x806e46cf,2
+np.float32,0x3f15a88a,0x3f1e546c,2
+np.float32,0xbd2de7d0,0xbd2df530,2
+np.float32,0xbf63cae0,0xbf818854,2
+np.float32,0xbdc3e1a0,0xbdc42e1e,2
+np.float32,0xbf11a038,0xbf199b98,2
+np.float32,0xbec13706,0xbec5d56b,2
+np.float32,0x3f1c5f54,0x3f26478d,2
+np.float32,0x3e9ea97e,0x3ea136b4,2
+np.float32,0xfeb5a508,0xff800000,2
+np.float32,0x7f4698f4,0x7f800000,2
+np.float32,0xff51ee2c,0xff800000,2
+np.float32,0xff5994df,0xff800000,2
+np.float32,0x4b9fb9,0x4b9fb9,2
+np.float32,0xfda10d98,0xff800000,2
+np.float32,0x525555,0x525555,2
+np.float32,0x7ed571ef,0x7f800000,2
+np.float32,0xbf600d18,0xbf7dc50c,2
+np.float32,0x3ec674ca,0x3ecb768b,2
+np.float32,0x3cb69115,0x3cb694f3,2
+np.float32,0x7eac75f2,0x7f800000,2
+np.float32,0x804d4d75,0x804d4d75,2
+np.float32,0xfed5292e,0xff800000,2
+np.float32,0x800ed06a,0x800ed06a,2
+np.float32,0xfec37584,0xff800000,2
+np.float32,0x3ef96ac7,0x3f01b326,2
+np.float32,0x42f743,0x42f743,2
+np.float32,0x3f56f442,0x3f711e39,2
+np.float32,0xbf7ea726,0xbf956375,2
+np.float32,0x806c7202,0x806c7202,2
+np.float32,0xbd8ee980,0xbd8f0733,2
+np.float32,0xbdf2e930,0xbdf37b18,2
+np.float32,0x3f103910,0x3f17f955,2
+np.float32,0xff123e8f,0xff800000,2
+np.float32,0x806e4b5d,0x806e4b5d,2
+np.float32,0xbf4f3bfc,0xbf669f07,2
+np.float32,0xbf070c16,0xbf0d6609,2
+np.float32,0xff00e0ba,0xff800000,2
+np.float32,0xff49d828,0xff800000,2
+np.float32,0x7e47f04a,0x7f800000,2
+np.float32,0x7e984dac,0x7f800000,2
+np.float32,0x3f77473c,0x3f8fc858,2
+np.float32,0x3f017439,0x3f070ac8,2
+np.float32,0x118417,0x118417,2
+np.float32,0xbcf7a2c0,0xbcf7ac68,2
+np.float32,0xfee46fee,0xff800000,2
+np.float32,0x3e42a648,0x3e43d2e9,2
+np.float32,0x80131916,0x80131916,2
+np.float32,0x806209d3,0x806209d3,2
+np.float32,0x807c1f12,0x807c1f12,2
+np.float32,0x2f3696,0x2f3696,2
+np.float32,0xff28722b,0xff800000,2
+np.float32,0x7f1416a1,0x7f800000,2
+np.float32,0x8054e7a1,0x8054e7a1,2
+np.float32,0xbddc39a0,0xbddca656,2
+np.float32,0x7dc60175,0x7f800000,2
+np.float64,0x7fd0ae584da15cb0,0x7ff0000000000000,2
+np.float64,0x7fd41d68e5283ad1,0x7ff0000000000000,2
+np.float64,0x7fe93073bb7260e6,0x7ff0000000000000,2
+np.float64,0x3fb4fd19d229fa34,0x3fb5031f57dbac0f,2
+np.float64,0x85609ce10ac2,0x85609ce10ac2,2
+np.float64,0xbfd7aa12ccaf5426,0xbfd8351003a320e2,2
+np.float64,0x8004487c9b4890fa,0x8004487c9b4890fa,2
+np.float64,0x7fe7584cfd2eb099,0x7ff0000000000000,2
+np.float64,0x800ea8edc6dd51dc,0x800ea8edc6dd51dc,2
+np.float64,0x3fe0924aa5a12495,0x3fe15276e271c6dc,2
+np.float64,0x3feb1abf6d36357f,0x3fee76b4d3d06964,2
+np.float64,0x3fa8c14534318280,0x3fa8c3bd5ce5923c,2
+np.float64,0x800b9f5915d73eb3,0x800b9f5915d73eb3,2
+np.float64,0xffc05aaa7820b554,0xfff0000000000000,2
+np.float64,0x800157eda8c2afdc,0x800157eda8c2afdc,2
+np.float64,0xffe8d90042b1b200,0xfff0000000000000,2
+np.float64,0x3feda02ea93b405d,0x3ff1057e61d08d59,2
+np.float64,0xffd03b7361a076e6,0xfff0000000000000,2
+np.float64,0x3fe1a8ecd7e351da,0x3fe291eda9080847,2
+np.float64,0xffc5bfdff82b7fc0,0xfff0000000000000,2
+np.float64,0xbfe6fb3d386df67a,0xbfe9022c05df0565,2
+np.float64,0x7fefffffffffffff,0x7ff0000000000000,2
+np.float64,0x7fa10c340c221867,0x7ff0000000000000,2
+np.float64,0x3fe55cbf1daab97e,0x3fe6fc1648258b75,2
+np.float64,0xbfddeb5f60bbd6be,0xbfdf056d4fb5825f,2
+np.float64,0xffddb1a8213b6350,0xfff0000000000000,2
+np.float64,0xbfb20545e4240a88,0xbfb2091579375176,2
+np.float64,0x3f735ded2026bbda,0x3f735df1dad4ee3a,2
+np.float64,0xbfd1eb91efa3d724,0xbfd227c044dead61,2
+np.float64,0xffd737c588ae6f8c,0xfff0000000000000,2
+np.float64,0x3fc46818ec28d032,0x3fc47e416c4237a6,2
+np.float64,0x0,0x0,2
+np.float64,0xffb632097a2c6410,0xfff0000000000000,2
+np.float64,0xbfcb5ae84b36b5d0,0xbfcb905613af55b8,2
+np.float64,0xbfe7b926402f724c,0xbfe9f4f0be6aacc3,2
+np.float64,0x80081840b3f03082,0x80081840b3f03082,2
+np.float64,0x3fe767a656eecf4d,0x3fe98c53b4779de7,2
+np.float64,0x8005834c088b0699,0x8005834c088b0699,2
+np.float64,0x80074e92658e9d26,0x80074e92658e9d26,2
+np.float64,0x80045d60c268bac2,0x80045d60c268bac2,2
+np.float64,0xffb9aecfe8335da0,0xfff0000000000000,2
+np.float64,0x7fcad3e1cd35a7c3,0x7ff0000000000000,2
+np.float64,0xbf881853d03030c0,0xbf8818783e28fc87,2
+np.float64,0xe18c6d23c318e,0xe18c6d23c318e,2
+np.float64,0x7fcb367b8f366cf6,0x7ff0000000000000,2
+np.float64,0x5c13436cb8269,0x5c13436cb8269,2
+np.float64,0xffe5399938aa7332,0xfff0000000000000,2
+np.float64,0xbfdc45dbc3b88bb8,0xbfdd33958222c27e,2
+np.float64,0xbfd714691bae28d2,0xbfd7954edbef810b,2
+np.float64,0xbfdf18b02b3e3160,0xbfe02ad13634c651,2
+np.float64,0x8003e6f276e7cde6,0x8003e6f276e7cde6,2
+np.float64,0x3febb6b412776d68,0x3fef4f753def31f9,2
+np.float64,0x7fe016a3b4a02d46,0x7ff0000000000000,2
+np.float64,0x3fdc899ac7b91336,0x3fdd7e1cee1cdfc8,2
+np.float64,0x800219271e24324f,0x800219271e24324f,2
+np.float64,0x1529d93e2a53c,0x1529d93e2a53c,2
+np.float64,0x800d5bc827fab790,0x800d5bc827fab790,2
+np.float64,0x3e1495107c293,0x3e1495107c293,2
+np.float64,0x3fe89da0f2b13b42,0x3feb1dc1f3015ad7,2
+np.float64,0x800ba8c17b975183,0x800ba8c17b975183,2
+np.float64,0x8002dacf0265b59f,0x8002dacf0265b59f,2
+np.float64,0xffe6d0a4cc2da149,0xfff0000000000000,2
+np.float64,0x3fdf23fe82be47fc,0x3fe03126d8e2b309,2
+np.float64,0xffe41b1f1c28363e,0xfff0000000000000,2
+np.float64,0xbfd635c634ac6b8c,0xbfd6a8966da6adaa,2
+np.float64,0x800755bc08eeab79,0x800755bc08eeab79,2
+np.float64,0x800ba4c47c374989,0x800ba4c47c374989,2
+np.float64,0x7fec9f7649793eec,0x7ff0000000000000,2
+np.float64,0x7fdbf45738b7e8ad,0x7ff0000000000000,2
+np.float64,0x3f5597f07eab4,0x3f5597f07eab4,2
+np.float64,0xbfbf4599183e8b30,0xbfbf5985d8c65097,2
+np.float64,0xbf5b200580364000,0xbf5b2006501b21ae,2
+np.float64,0x7f91868370230d06,0x7ff0000000000000,2
+np.float64,0x3838e2a67071d,0x3838e2a67071d,2
+np.float64,0xffefe3ff5d3fc7fe,0xfff0000000000000,2
+np.float64,0xffe66b26d06cd64d,0xfff0000000000000,2
+np.float64,0xbfd830a571b0614a,0xbfd8c526927c742c,2
+np.float64,0x7fe8442122f08841,0x7ff0000000000000,2
+np.float64,0x800efa8c637df519,0x800efa8c637df519,2
+np.float64,0xf0026835e004d,0xf0026835e004d,2
+np.float64,0xffb11beefe2237e0,0xfff0000000000000,2
+np.float64,0x3fef9bbb327f3776,0x3ff2809f10641c32,2
+np.float64,0x350595306a0b3,0x350595306a0b3,2
+np.float64,0xf7f6538befecb,0xf7f6538befecb,2
+np.float64,0xffe36379c4a6c6f3,0xfff0000000000000,2
+np.float64,0x28b1d82e5163c,0x28b1d82e5163c,2
+np.float64,0x70a3d804e147c,0x70a3d804e147c,2
+np.float64,0xffd96c1bc9b2d838,0xfff0000000000000,2
+np.float64,0xffce8e00893d1c00,0xfff0000000000000,2
+np.float64,0x800f2bdcb25e57b9,0x800f2bdcb25e57b9,2
+np.float64,0xbfe0d9c63361b38c,0xbfe1a3eb02192b76,2
+np.float64,0xbfdc7b8711b8f70e,0xbfdd6e9db3a01e51,2
+np.float64,0x99e22ec133c46,0x99e22ec133c46,2
+np.float64,0xffeaef6ddab5dedb,0xfff0000000000000,2
+np.float64,0x7fe89c22c0f13845,0x7ff0000000000000,2
+np.float64,0x8002d5207de5aa42,0x8002d5207de5aa42,2
+np.float64,0x3fd1b13353236267,0x3fd1eb1b9345dfca,2
+np.float64,0x800ccae0a41995c1,0x800ccae0a41995c1,2
+np.float64,0x3fdbdaba38b7b574,0x3fdcbdfcbca37ce6,2
+np.float64,0x5b06d12cb60db,0x5b06d12cb60db,2
+np.float64,0xffd52262752a44c4,0xfff0000000000000,2
+np.float64,0x5a17f050b42ff,0x5a17f050b42ff,2
+np.float64,0x3d24205e7a485,0x3d24205e7a485,2
+np.float64,0x7fbed4dec63da9bd,0x7ff0000000000000,2
+np.float64,0xbfe56e9776aadd2f,0xbfe71212863c284f,2
+np.float64,0x7fea0bc952341792,0x7ff0000000000000,2
+np.float64,0x800f692d139ed25a,0x800f692d139ed25a,2
+np.float64,0xffdb63feab36c7fe,0xfff0000000000000,2
+np.float64,0x3fe1c2297fe38452,0x3fe2af21293c9571,2
+np.float64,0x7fede384747bc708,0x7ff0000000000000,2
+np.float64,0x800440169288802e,0x800440169288802e,2
+np.float64,0xffe3241eeb26483e,0xfff0000000000000,2
+np.float64,0xffe28f3879651e70,0xfff0000000000000,2
+np.float64,0xa435cbc1486d,0xa435cbc1486d,2
+np.float64,0x7fe55e08db6abc11,0x7ff0000000000000,2
+np.float64,0x1405e624280be,0x1405e624280be,2
+np.float64,0x3fd861bdf0b0c37c,0x3fd8f9d2e33e45e5,2
+np.float64,0x3feeb67cdc3d6cfa,0x3ff1d337d81d1c14,2
+np.float64,0x3fd159a10e22b342,0x3fd1903be7c2ea0c,2
+np.float64,0x3fd84626bc308c4d,0x3fd8dc373645e65b,2
+np.float64,0xffd3da81d9a7b504,0xfff0000000000000,2
+np.float64,0xbfd4a768b8294ed2,0xbfd503aa7c240051,2
+np.float64,0x3fe3059f2a660b3e,0x3fe42983e0c6bb2e,2
+np.float64,0x3fe3b8353827706a,0x3fe4fdd635c7269b,2
+np.float64,0xbfe4af0399695e07,0xbfe6277d9002b46c,2
+np.float64,0xbfd7e18a92afc316,0xbfd87066b54c4fe6,2
+np.float64,0x800432bcab48657a,0x800432bcab48657a,2
+np.float64,0x80033d609d267ac2,0x80033d609d267ac2,2
+np.float64,0x7fef5f758e7ebeea,0x7ff0000000000000,2
+np.float64,0xbfed7833dbfaf068,0xbff0e85bf45a5ebc,2
+np.float64,0x3fe2283985a45073,0x3fe325b0a9099c74,2
+np.float64,0xe820b4b3d0417,0xe820b4b3d0417,2
+np.float64,0x8003ecb72aa7d96f,0x8003ecb72aa7d96f,2
+np.float64,0xbfeab2c755b5658f,0xbfede7c83e92a625,2
+np.float64,0xbfc7b287f72f6510,0xbfc7d53ef2ffe9dc,2
+np.float64,0xffd9a41d0f33483a,0xfff0000000000000,2
+np.float64,0x3fd3a5b6e3a74b6c,0x3fd3f516f39a4725,2
+np.float64,0x800bc72091578e42,0x800bc72091578e42,2
+np.float64,0x800ff405ce9fe80c,0x800ff405ce9fe80c,2
+np.float64,0x57918600af24,0x57918600af24,2
+np.float64,0x2a5be7fa54b7e,0x2a5be7fa54b7e,2
+np.float64,0xbfdca7886bb94f10,0xbfdd9f142b5b43e4,2
+np.float64,0xbfe216993ee42d32,0xbfe3112936590995,2
+np.float64,0xbfe06bd9cf20d7b4,0xbfe126cd353ab42f,2
+np.float64,0x8003e6c31827cd87,0x8003e6c31827cd87,2
+np.float64,0x8005f37d810be6fc,0x8005f37d810be6fc,2
+np.float64,0x800715b081ae2b62,0x800715b081ae2b62,2
+np.float64,0x3fef94c35bff2986,0x3ff27b4bed2f4051,2
+np.float64,0x6f5798e0deb0,0x6f5798e0deb0,2
+np.float64,0x3fcef1f05c3de3e1,0x3fcf3f557550598f,2
+np.float64,0xbf9a91c400352380,0xbf9a92876273b85c,2
+np.float64,0x3fc9143f7f322880,0x3fc93d678c05d26b,2
+np.float64,0x78ad847af15b1,0x78ad847af15b1,2
+np.float64,0x8000fdc088c1fb82,0x8000fdc088c1fb82,2
+np.float64,0x800200fd304401fb,0x800200fd304401fb,2
+np.float64,0x7fb8ab09dc315613,0x7ff0000000000000,2
+np.float64,0x3fe949771b7292ee,0x3fec00891c3fc5a2,2
+np.float64,0xbfc54cae0e2a995c,0xbfc565e0f3d0e3af,2
+np.float64,0xffd546161e2a8c2c,0xfff0000000000000,2
+np.float64,0x800fe1d1279fc3a2,0x800fe1d1279fc3a2,2
+np.float64,0x3fd9c45301b388a8,0x3fda77fa1f4c79bf,2
+np.float64,0x7fe10ff238221fe3,0x7ff0000000000000,2
+np.float64,0xbfbc2181ae384300,0xbfbc3002229155c4,2
+np.float64,0xbfe7bbfae4ef77f6,0xbfe9f895e91f468d,2
+np.float64,0x800d3d994f7a7b33,0x800d3d994f7a7b33,2
+np.float64,0xffe6e15a896dc2b4,0xfff0000000000000,2
+np.float64,0x800e6b6c8abcd6d9,0x800e6b6c8abcd6d9,2
+np.float64,0xbfd862c938b0c592,0xbfd8faf1cdcb09db,2
+np.float64,0xffe2411f8464823e,0xfff0000000000000,2
+np.float64,0xffd0b32efaa1665e,0xfff0000000000000,2
+np.float64,0x3ac4ace475896,0x3ac4ace475896,2
+np.float64,0xf9c3a7ebf3875,0xf9c3a7ebf3875,2
+np.float64,0xdb998ba5b7332,0xdb998ba5b7332,2
+np.float64,0xbfe438a14fe87142,0xbfe5981751e4c5cd,2
+np.float64,0xbfbcf48cbc39e918,0xbfbd045d60e65d3a,2
+np.float64,0x7fde499615bc932b,0x7ff0000000000000,2
+np.float64,0x800bba269057744e,0x800bba269057744e,2
+np.float64,0x3fc9bb1ba3337638,0x3fc9e78fdb6799c1,2
+np.float64,0xffd9f974fbb3f2ea,0xfff0000000000000,2
+np.float64,0x7fcf1ad1693e35a2,0x7ff0000000000000,2
+np.float64,0x7fe5dcedd32bb9db,0x7ff0000000000000,2
+np.float64,0xeb06500bd60ca,0xeb06500bd60ca,2
+np.float64,0x7fd73e7b592e7cf6,0x7ff0000000000000,2
+np.float64,0xbfe9d91ae873b236,0xbfecc08482849bcd,2
+np.float64,0xffc85338b730a670,0xfff0000000000000,2
+np.float64,0x7fbba41eee37483d,0x7ff0000000000000,2
+np.float64,0x3fed5624fb7aac4a,0x3ff0cf9f0de1fd54,2
+np.float64,0xffe566d80d6acdb0,0xfff0000000000000,2
+np.float64,0x3fd4477884a88ef1,0x3fd49ec7acdd25a0,2
+np.float64,0x3fcb98c5fd37318c,0x3fcbcfa20e2c2712,2
+np.float64,0xffdeba71d5bd74e4,0xfff0000000000000,2
+np.float64,0x8001edc59dc3db8c,0x8001edc59dc3db8c,2
+np.float64,0x3fe6b09e896d613e,0x3fe8a3bb541ec0e3,2
+np.float64,0x3fe8694b4970d296,0x3fead94d271d05cf,2
+np.float64,0xb52c27bf6a585,0xb52c27bf6a585,2
+np.float64,0x7fcb0a21d9361443,0x7ff0000000000000,2
+np.float64,0xbfd9efc68cb3df8e,0xbfdaa7058c0ccbd1,2
+np.float64,0x8007cd170fef9a2f,0x8007cd170fef9a2f,2
+np.float64,0x3fe83325e770664c,0x3fea92c55c9d567e,2
+np.float64,0x800bd0085537a011,0x800bd0085537a011,2
+np.float64,0xffe05b9e7820b73c,0xfff0000000000000,2
+np.float64,0x3fea4ce4347499c8,0x3fed5cea9fdc541b,2
+np.float64,0x7fe08aae1921155b,0x7ff0000000000000,2
+np.float64,0x3fe7a5e7deef4bd0,0x3fe9dc2e20cfb61c,2
+np.float64,0xbfe0ccc8e6e19992,0xbfe195175f32ee3f,2
+np.float64,0xbfe8649717f0c92e,0xbfead3298974dcf0,2
+np.float64,0x7fed6c5308bad8a5,0x7ff0000000000000,2
+np.float64,0xffdbd8c7af37b190,0xfff0000000000000,2
+np.float64,0xbfb2bc4d06257898,0xbfb2c09569912839,2
+np.float64,0x3fc62eca512c5d95,0x3fc64b4251bce8f9,2
+np.float64,0xbfcae2ddbd35c5bc,0xbfcb15971fc61312,2
+np.float64,0x18d26ce831a4f,0x18d26ce831a4f,2
+np.float64,0x7fe38b279267164e,0x7ff0000000000000,2
+np.float64,0x97e1d9ab2fc3b,0x97e1d9ab2fc3b,2
+np.float64,0xbfee8e4785fd1c8f,0xbff1b52d16807627,2
+np.float64,0xbfb189b4a6231368,0xbfb18d37e83860ee,2
+np.float64,0xffd435761ea86aec,0xfff0000000000000,2
+np.float64,0x3fe6c48ebced891e,0x3fe8bcea189c3867,2
+np.float64,0x7fdadd3678b5ba6c,0x7ff0000000000000,2
+np.float64,0x7fea8f15b7b51e2a,0x7ff0000000000000,2
+np.float64,0xbff0000000000000,0xbff2cd9fc44eb982,2
+np.float64,0x80004c071120980f,0x80004c071120980f,2
+np.float64,0x8005367adfea6cf6,0x8005367adfea6cf6,2
+np.float64,0x3fbdc9139a3b9220,0x3fbdda4aba667ce5,2
+np.float64,0x7fed5ee3ad7abdc6,0x7ff0000000000000,2
+np.float64,0x51563fb2a2ac9,0x51563fb2a2ac9,2
+np.float64,0xbfba7d26ce34fa50,0xbfba894229c50ea1,2
+np.float64,0x6c10db36d821c,0x6c10db36d821c,2
+np.float64,0xbfbdaec0d03b5d80,0xbfbdbfca6ede64f4,2
+np.float64,0x800a1cbe7414397d,0x800a1cbe7414397d,2
+np.float64,0x800ae6e7f2d5cdd0,0x800ae6e7f2d5cdd0,2
+np.float64,0x3fea63d3fef4c7a8,0x3fed7c1356688ddc,2
+np.float64,0xbfde1e3a88bc3c76,0xbfdf3dfb09cc2260,2
+np.float64,0xbfd082d75a2105ae,0xbfd0b1e28c84877b,2
+np.float64,0x7fea1e5e85f43cbc,0x7ff0000000000000,2
+np.float64,0xffe2237a1a6446f4,0xfff0000000000000,2
+np.float64,0x3fd1e2be8523c57d,0x3fd21e93dfd1bbc4,2
+np.float64,0x3fd1acd428a359a8,0x3fd1e6916a42bc3a,2
+np.float64,0x61a152f0c342b,0x61a152f0c342b,2
+np.float64,0xbfc61a6b902c34d8,0xbfc6369557690ba0,2
+np.float64,0x7fd1a84b1f235095,0x7ff0000000000000,2
+np.float64,0x1c5cc7e638b9a,0x1c5cc7e638b9a,2
+np.float64,0x8008039755f0072f,0x8008039755f0072f,2
+np.float64,0x80097532d6f2ea66,0x80097532d6f2ea66,2
+np.float64,0xbfc6d979a12db2f4,0xbfc6f89777c53f8f,2
+np.float64,0x8004293ab1085276,0x8004293ab1085276,2
+np.float64,0x3fc2af5c21255eb8,0x3fc2c05dc0652554,2
+np.float64,0xbfd9a5ab87b34b58,0xbfda56d1076abc98,2
+np.float64,0xbfebd360ba77a6c2,0xbfef779fd6595f9b,2
+np.float64,0xffd5313c43aa6278,0xfff0000000000000,2
+np.float64,0xbfe994a262b32945,0xbfec64b969852ed5,2
+np.float64,0x3fce01a52e3c034a,0x3fce48324eb29c31,2
+np.float64,0x56bd74b2ad7af,0x56bd74b2ad7af,2
+np.float64,0xb84093ff70813,0xb84093ff70813,2
+np.float64,0x7fe776df946eedbe,0x7ff0000000000000,2
+np.float64,0xbfe294ac2e652958,0xbfe3a480938afa26,2
+np.float64,0x7fe741b4d0ee8369,0x7ff0000000000000,2
+np.float64,0x800b7e8a1056fd15,0x800b7e8a1056fd15,2
+np.float64,0x7fd28f1269251e24,0x7ff0000000000000,2
+np.float64,0x8009d4492e73a893,0x8009d4492e73a893,2
+np.float64,0x3fe3f27fca67e500,0x3fe543aff825e244,2
+np.float64,0x3fd12447e5a24890,0x3fd158efe43c0452,2
+np.float64,0xbfd58df0f2ab1be2,0xbfd5f6d908e3ebce,2
+np.float64,0xffc0a8e4642151c8,0xfff0000000000000,2
+np.float64,0xbfedb197787b632f,0xbff112367ec9d3e7,2
+np.float64,0xffdde07a7f3bc0f4,0xfff0000000000000,2
+np.float64,0x3fe91f3e5b723e7d,0x3febc886a1d48364,2
+np.float64,0x3fe50415236a082a,0x3fe68f43a5468d8c,2
+np.float64,0xd9a0c875b3419,0xd9a0c875b3419,2
+np.float64,0xbfee04ccf4bc099a,0xbff14f4740a114cf,2
+np.float64,0xbfd2bcc6a125798e,0xbfd30198b1e7d7ed,2
+np.float64,0xbfeb3c16f8f6782e,0xbfeea4ce47d09f58,2
+np.float64,0xffd3ba19e4a77434,0xfff0000000000000,2
+np.float64,0x8010000000000000,0x8010000000000000,2
+np.float64,0x3fdef0a642bde14d,0x3fe0146677b3a488,2
+np.float64,0x3fdc3dd0a2b87ba0,0x3fdd2abe65651487,2
+np.float64,0x3fdbb1fd47b763fb,0x3fdc915a2fd19f4b,2
+np.float64,0x7fbaa375e63546eb,0x7ff0000000000000,2
+np.float64,0x433ef8ee867e0,0x433ef8ee867e0,2
+np.float64,0xf5345475ea68b,0xf5345475ea68b,2
+np.float64,0xa126419b424c8,0xa126419b424c8,2
+np.float64,0x3fe0057248200ae5,0x3fe0b2f488339709,2
+np.float64,0xffc5e3b82f2bc770,0xfff0000000000000,2
+np.float64,0xffb215c910242b90,0xfff0000000000000,2
+np.float64,0xbfeba4ae0837495c,0xbfef3642e4b54aac,2
+np.float64,0xffbb187ebe363100,0xfff0000000000000,2
+np.float64,0x3fe4c6a496a98d49,0x3fe64440cdf06aab,2
+np.float64,0x800767a28f6ecf46,0x800767a28f6ecf46,2
+np.float64,0x3fdbed63b1b7dac8,0x3fdcd27318c0b683,2
+np.float64,0x80006d8339e0db07,0x80006d8339e0db07,2
+np.float64,0x8000b504f0416a0b,0x8000b504f0416a0b,2
+np.float64,0xbfe88055bfb100ac,0xbfeaf767bd2767b9,2
+np.float64,0x3fefe503317fca06,0x3ff2b8d4057240c8,2
+np.float64,0x7fe307538b660ea6,0x7ff0000000000000,2
+np.float64,0x944963c12892d,0x944963c12892d,2
+np.float64,0xbfd2c20b38a58416,0xbfd30717900f8233,2
+np.float64,0x7feed04e3e3da09b,0x7ff0000000000000,2
+np.float64,0x3fe639619cac72c3,0x3fe80de7b8560a8d,2
+np.float64,0x3fde066c66bc0cd9,0x3fdf237fb759a652,2
+np.float64,0xbfc56b22b52ad644,0xbfc584c267a47ebd,2
+np.float64,0x3fc710d5b12e21ab,0x3fc730d817ba0d0c,2
+np.float64,0x3fee1dfc347c3bf8,0x3ff161d9c3e15f68,2
+np.float64,0x3fde400954bc8013,0x3fdf639e5cc9e7a9,2
+np.float64,0x56e701f8adce1,0x56e701f8adce1,2
+np.float64,0xbfe33bbc89e67779,0xbfe46996b39381fe,2
+np.float64,0x7fec89e2f87913c5,0x7ff0000000000000,2
+np.float64,0xbfdad58b40b5ab16,0xbfdba098cc0ad5d3,2
+np.float64,0x3fe99c76a13338ed,0x3fec6f31bae613e7,2
+np.float64,0x3fe4242a29a84854,0x3fe57f6b45e5c0ef,2
+np.float64,0xbfe79d3199ef3a63,0xbfe9d0fb96c846ba,2
+np.float64,0x7ff8000000000000,0x7ff8000000000000,2
+np.float64,0xbfeb35a6cf766b4e,0xbfee9be4e7e943f7,2
+np.float64,0x3e047f267c091,0x3e047f267c091,2
+np.float64,0x4bf1376a97e28,0x4bf1376a97e28,2
+np.float64,0x800ef419685de833,0x800ef419685de833,2
+np.float64,0x3fe0efa61a21df4c,0x3fe1bce98baf2f0f,2
+np.float64,0x3fcc13c4d738278a,0x3fcc4d8c778bcaf7,2
+np.float64,0x800f1d291afe3a52,0x800f1d291afe3a52,2
+np.float64,0x3fd3f10e6da7e21d,0x3fd444106761ea1d,2
+np.float64,0x800706d6d76e0dae,0x800706d6d76e0dae,2
+np.float64,0xffa1ffbc9023ff80,0xfff0000000000000,2
+np.float64,0xbfe098f26d6131e5,0xbfe15a08a5f3eac0,2
+np.float64,0x3fe984f9cc7309f4,0x3fec4fcdbdb1cb9b,2
+np.float64,0x7fd7c2f1eaaf85e3,0x7ff0000000000000,2
+np.float64,0x800a8adb64f515b7,0x800a8adb64f515b7,2
+np.float64,0x80060d3ffc8c1a81,0x80060d3ffc8c1a81,2
+np.float64,0xbfec37e4aef86fc9,0xbff0029a6a1d61e2,2
+np.float64,0x800b21bcfcf6437a,0x800b21bcfcf6437a,2
+np.float64,0xbfc08facc1211f58,0xbfc09b8380ea8032,2
+np.float64,0xffebb4b52577696a,0xfff0000000000000,2
+np.float64,0x800b08096df61013,0x800b08096df61013,2
+np.float64,0x8000000000000000,0x8000000000000000,2
+np.float64,0xffd2f0c9c8a5e194,0xfff0000000000000,2
+np.float64,0xffe78b2299af1644,0xfff0000000000000,2
+np.float64,0x7fd0444794a0888e,0x7ff0000000000000,2
+np.float64,0x307c47b460f8a,0x307c47b460f8a,2
+np.float64,0xffe6b4c851ad6990,0xfff0000000000000,2
+np.float64,0xffe1877224a30ee4,0xfff0000000000000,2
+np.float64,0x48d7b5c091af7,0x48d7b5c091af7,2
+np.float64,0xbfa1dc6b1c23b8d0,0xbfa1dd5889e1b7da,2
+np.float64,0x3fe5004737ea008e,0x3fe68a9c310b08c1,2
+np.float64,0x7fec5f0742b8be0e,0x7ff0000000000000,2
+np.float64,0x3fd0a86285a150c5,0x3fd0d8b238d557fa,2
+np.float64,0x7fed60380efac06f,0x7ff0000000000000,2
+np.float64,0xeeca74dfdd94f,0xeeca74dfdd94f,2
+np.float64,0x3fda05aaa8b40b54,0x3fdabebdbf405e84,2
+np.float64,0x800e530ceb1ca61a,0x800e530ceb1ca61a,2
+np.float64,0x800b3866379670cd,0x800b3866379670cd,2
+np.float64,0xffedb3e7fa3b67cf,0xfff0000000000000,2
+np.float64,0xffdfa4c0713f4980,0xfff0000000000000,2
+np.float64,0x7fe4679e0728cf3b,0x7ff0000000000000,2
+np.float64,0xffe978611ef2f0c2,0xfff0000000000000,2
+np.float64,0x7fc9f4601f33e8bf,0x7ff0000000000000,2
+np.float64,0x3fd4942de6a9285c,0x3fd4ef6e089357dd,2
+np.float64,0x3faafe064435fc00,0x3fab0139cd6564dc,2
+np.float64,0x800d145a519a28b5,0x800d145a519a28b5,2
+np.float64,0xbfd82636f2304c6e,0xbfd8b9f75ddd2f02,2
+np.float64,0xbfdf2e975e3e5d2e,0xbfe037174280788c,2
+np.float64,0x7fd7051d7c2e0a3a,0x7ff0000000000000,2
+np.float64,0x8007933d452f267b,0x8007933d452f267b,2
+np.float64,0xb2043beb64088,0xb2043beb64088,2
+np.float64,0x3febfd9708f7fb2e,0x3fefb2ef090f18d2,2
+np.float64,0xffd9bc6bc83378d8,0xfff0000000000000,2
+np.float64,0xc10f9fd3821f4,0xc10f9fd3821f4,2
+np.float64,0x3fe3c83413a79068,0x3fe510fa1dd8edf7,2
+np.float64,0x3fbe26ccda3c4da0,0x3fbe38a892279975,2
+np.float64,0x3fcc1873103830e6,0x3fcc5257a6ae168d,2
+np.float64,0xe7e000e9cfc00,0xe7e000e9cfc00,2
+np.float64,0xffda73852bb4e70a,0xfff0000000000000,2
+np.float64,0xbfe831be19f0637c,0xbfea90f1b34da3e5,2
+np.float64,0xbfeb568f3076ad1e,0xbfeec97eebfde862,2
+np.float64,0x510a6ad0a214e,0x510a6ad0a214e,2
+np.float64,0x3fe6ba7e35ed74fc,0x3fe8b032a9a28c6a,2
+np.float64,0xffeb5cdcff76b9b9,0xfff0000000000000,2
+np.float64,0x4f0a23e89e145,0x4f0a23e89e145,2
+np.float64,0x446ec20288dd9,0x446ec20288dd9,2
+np.float64,0x7fe2521b02e4a435,0x7ff0000000000000,2
+np.float64,0x8001cd2969e39a54,0x8001cd2969e39a54,2
+np.float64,0x3fdfe90600bfd20c,0x3fe09fdcca10001c,2
+np.float64,0x7fd660c5762cc18a,0x7ff0000000000000,2
+np.float64,0xbfb11b23aa223648,0xbfb11e661949b377,2
+np.float64,0x800e025285fc04a5,0x800e025285fc04a5,2
+np.float64,0xffb180bb18230178,0xfff0000000000000,2
+np.float64,0xaaf590df55eb2,0xaaf590df55eb2,2
+np.float64,0xbfe8637d9df0c6fb,0xbfead1ba429462ec,2
+np.float64,0x7fd2577866a4aef0,0x7ff0000000000000,2
+np.float64,0xbfcfb2ab5a3f6558,0xbfd002ee87f272b9,2
+np.float64,0x7fdd64ae2f3ac95b,0x7ff0000000000000,2
+np.float64,0xffd1a502c9234a06,0xfff0000000000000,2
+np.float64,0x7fc4be4b60297c96,0x7ff0000000000000,2
+np.float64,0xbfb46b712a28d6e0,0xbfb470fca9919172,2
+np.float64,0xffdef913033df226,0xfff0000000000000,2
+np.float64,0x3fd94a3545b2946b,0x3fd9f40431ce9f9c,2
+np.float64,0x7fef88a0b6ff1140,0x7ff0000000000000,2
+np.float64,0xbfbcc81876399030,0xbfbcd7a0ab6cb388,2
+np.float64,0x800a4acfdd9495a0,0x800a4acfdd9495a0,2
+np.float64,0xffe270b3d5e4e167,0xfff0000000000000,2
+np.float64,0xbfd23f601e247ec0,0xbfd27eeca50a49eb,2
+np.float64,0x7fec6e796a78dcf2,0x7ff0000000000000,2
+np.float64,0x3fb85e0c9630bc19,0x3fb867791ccd6c72,2
+np.float64,0x7fe49fc424a93f87,0x7ff0000000000000,2
+np.float64,0xbfe75a99fbaeb534,0xbfe97ba37663de4c,2
+np.float64,0xffe85011b630a023,0xfff0000000000000,2
+np.float64,0xffe5962e492b2c5c,0xfff0000000000000,2
+np.float64,0x6f36ed4cde6de,0x6f36ed4cde6de,2
+np.float64,0x3feb72170af6e42e,0x3feeefbe6f1a2084,2
+np.float64,0x80014d8d60629b1c,0x80014d8d60629b1c,2
+np.float64,0xbfe0eb40d321d682,0xbfe1b7e31f252bf1,2
+np.float64,0x31fe305663fc7,0x31fe305663fc7,2
+np.float64,0x3fd2cd6381a59ac7,0x3fd312edc9868a4d,2
+np.float64,0xffcf0720793e0e40,0xfff0000000000000,2
+np.float64,0xbfeef1ef133de3de,0xbff1ffd5e1a3b648,2
+np.float64,0xbfd01c787aa038f0,0xbfd0482be3158a01,2
+np.float64,0x3fda3607c5b46c10,0x3fdaf3301e217301,2
+np.float64,0xffda9a9911b53532,0xfff0000000000000,2
+np.float64,0x3fc0b37c392166f8,0x3fc0bfa076f3c43e,2
+np.float64,0xbfe06591c760cb24,0xbfe11fad179ea12c,2
+np.float64,0x8006e369c20dc6d4,0x8006e369c20dc6d4,2
+np.float64,0x3fdf2912a8be5224,0x3fe033ff74b92f4d,2
+np.float64,0xffc0feb07821fd60,0xfff0000000000000,2
+np.float64,0xa4b938c949727,0xa4b938c949727,2
+np.float64,0x8008fe676571fccf,0x8008fe676571fccf,2
+np.float64,0xbfdda68459bb4d08,0xbfdeb8faab34fcbc,2
+np.float64,0xbfda18b419343168,0xbfdad360ca52ec7c,2
+np.float64,0x3febcbae35b7975c,0x3fef6cd51c9ebc15,2
+np.float64,0x3fbec615f63d8c30,0x3fbed912ba729926,2
+np.float64,0x7f99a831c8335063,0x7ff0000000000000,2
+np.float64,0x3fe663e8826cc7d1,0x3fe84330bd9aada8,2
+np.float64,0x70a9f9e6e1540,0x70a9f9e6e1540,2
+np.float64,0x8a13a5db14275,0x8a13a5db14275,2
+np.float64,0x7fc4330a3b286613,0x7ff0000000000000,2
+np.float64,0xbfe580c6136b018c,0xbfe728806cc7a99a,2
+np.float64,0x8000000000000001,0x8000000000000001,2
+np.float64,0xffec079d5df80f3a,0xfff0000000000000,2
+np.float64,0x8e1173c31c22f,0x8e1173c31c22f,2
+np.float64,0x3fe088456d21108b,0x3fe14712ca414103,2
+np.float64,0x3fe1b76f73636edf,0x3fe2a2b658557112,2
+np.float64,0xbfd4a1dd162943ba,0xbfd4fdd45cae8fb8,2
+np.float64,0x7fd60b46c8ac168d,0x7ff0000000000000,2
+np.float64,0xffe36cc3b166d987,0xfff0000000000000,2
+np.float64,0x3fdc2ae0cfb855c0,0x3fdd15f026773151,2
+np.float64,0xbfc41aa203283544,0xbfc42fd1b145fdd5,2
+np.float64,0xffed90c55fbb218a,0xfff0000000000000,2
+np.float64,0x3fe67e3a9aecfc75,0x3fe86440db65b4f6,2
+np.float64,0x7fd12dbeaba25b7c,0x7ff0000000000000,2
+np.float64,0xbfe1267c0de24cf8,0xbfe1fbb611bdf1e9,2
+np.float64,0x22e5619645cad,0x22e5619645cad,2
+np.float64,0x7fe327c72ea64f8d,0x7ff0000000000000,2
+np.float64,0x7fd2c3f545a587ea,0x7ff0000000000000,2
+np.float64,0x7fc7b689372f6d11,0x7ff0000000000000,2
+np.float64,0xc5e140bd8bc28,0xc5e140bd8bc28,2
+np.float64,0x3fccb3627a3966c5,0x3fccf11b44fa4102,2
+np.float64,0xbfd2cf725c259ee4,0xbfd315138d0e5dca,2
+np.float64,0x10000000000000,0x10000000000000,2
+np.float64,0xbfd3dfa8b627bf52,0xbfd431d17b235477,2
+np.float64,0xbfb82124e6304248,0xbfb82a4b6d9c2663,2
+np.float64,0x3fdcd590d9b9ab22,0x3fddd1d548806347,2
+np.float64,0x7fdee0cd1b3dc199,0x7ff0000000000000,2
+np.float64,0x8004ebfc60a9d7fa,0x8004ebfc60a9d7fa,2
+np.float64,0x3fe8eb818b71d704,0x3feb842679806108,2
+np.float64,0xffdd5e8fe63abd20,0xfff0000000000000,2
+np.float64,0xbfe3efcbd9e7df98,0xbfe54071436645ee,2
+np.float64,0x3fd5102557aa204b,0x3fd57203d31a05b8,2
+np.float64,0x3fe6318af7ec6316,0x3fe8041a177cbf96,2
+np.float64,0x3fdf3cecdabe79da,0x3fe03f2084ffbc78,2
+np.float64,0x7fe0ab6673a156cc,0x7ff0000000000000,2
+np.float64,0x800037d5c6c06fac,0x800037d5c6c06fac,2
+np.float64,0xffce58b86a3cb170,0xfff0000000000000,2
+np.float64,0xbfe3455d6ce68abb,0xbfe475034cecb2b8,2
+np.float64,0x991b663d3236d,0x991b663d3236d,2
+np.float64,0x3fda82d37c3505a7,0x3fdb46973da05c12,2
+np.float64,0x3f9b736fa036e6df,0x3f9b74471c234411,2
+np.float64,0x8001c96525e392cb,0x8001c96525e392cb,2
+np.float64,0x7ff0000000000000,0x7ff0000000000000,2
+np.float64,0xbfaf59122c3eb220,0xbfaf5e15f8b272b0,2
+np.float64,0xbf9aa7d288354fa0,0xbf9aa897d2a40cb5,2
+np.float64,0x8004a43428694869,0x8004a43428694869,2
+np.float64,0x7feead476dbd5a8e,0x7ff0000000000000,2
+np.float64,0xffca150f81342a20,0xfff0000000000000,2
+np.float64,0x80047ec3bc88fd88,0x80047ec3bc88fd88,2
+np.float64,0xbfee3e5b123c7cb6,0xbff179c8b8334278,2
+np.float64,0x3fd172359f22e46b,0x3fd1a9ba6b1420a1,2
+np.float64,0x3fe8e5e242f1cbc5,0x3feb7cbcaefc4d5c,2
+np.float64,0x8007fb059a6ff60c,0x8007fb059a6ff60c,2
+np.float64,0xe3899e71c7134,0xe3899e71c7134,2
+np.float64,0x7fe3b98326a77305,0x7ff0000000000000,2
+np.float64,0x7fec4e206cb89c40,0x7ff0000000000000,2
+np.float64,0xbfa3b012c4276020,0xbfa3b150c13b3cc5,2
+np.float64,0xffefffffffffffff,0xfff0000000000000,2
+np.float64,0xffe28a5b9aa514b6,0xfff0000000000000,2
+np.float64,0xbfd76a6cc2aed4da,0xbfd7f10f4d04e7f6,2
+np.float64,0xbc2b1c0178564,0xbc2b1c0178564,2
+np.float64,0x6d9d444adb3a9,0x6d9d444adb3a9,2
+np.float64,0xbfdcadd368395ba6,0xbfdda6037b5c429c,2
+np.float64,0x3fe11891fde23124,0x3fe1ebc1c204b14b,2
+np.float64,0x3fdd66c3eebacd88,0x3fde72526b5304c4,2
+np.float64,0xbfe79d85612f3b0b,0xbfe9d1673bd1f6d6,2
+np.float64,0x3fed60abdabac158,0x3ff0d7426b3800a2,2
+np.float64,0xbfb0ffa54021ff48,0xbfb102d81073a9f0,2
+np.float64,0xd2452af5a48a6,0xd2452af5a48a6,2
+np.float64,0xf4b835c1e971,0xf4b835c1e971,2
+np.float64,0x7e269cdafc4d4,0x7e269cdafc4d4,2
+np.float64,0x800097a21d812f45,0x800097a21d812f45,2
+np.float64,0x3fdfcc85e8bf990c,0x3fe08fcf770fd456,2
+np.float64,0xd8d53155b1aa6,0xd8d53155b1aa6,2
+np.float64,0x7fb8ed658831daca,0x7ff0000000000000,2
+np.float64,0xbfec865415b90ca8,0xbff03a4584d719f9,2
+np.float64,0xffd8cda62a319b4c,0xfff0000000000000,2
+np.float64,0x273598d84e6b4,0x273598d84e6b4,2
+np.float64,0x7fd566b5c32acd6b,0x7ff0000000000000,2
+np.float64,0xff61d9d48023b400,0xfff0000000000000,2
+np.float64,0xbfec5c3bf4f8b878,0xbff01c594243337c,2
+np.float64,0x7fd1be0561a37c0a,0x7ff0000000000000,2
+np.float64,0xffeaee3271b5dc64,0xfff0000000000000,2
+np.float64,0x800c0e1931b81c33,0x800c0e1931b81c33,2
+np.float64,0xbfad1171583a22e0,0xbfad1570e5c466d2,2
+np.float64,0x7fd783b0fe2f0761,0x7ff0000000000000,2
+np.float64,0x7fc39903e6273207,0x7ff0000000000000,2
+np.float64,0xffe00003c5600007,0xfff0000000000000,2
+np.float64,0x35a7b9c06b50,0x35a7b9c06b50,2
+np.float64,0x7fee441a22bc8833,0x7ff0000000000000,2
+np.float64,0xff6e47fbc03c9000,0xfff0000000000000,2
+np.float64,0xbfd3c3c9c8a78794,0xbfd41499b1912534,2
+np.float64,0x82c9c87f05939,0x82c9c87f05939,2
+np.float64,0xbfedeb0fe4fbd620,0xbff13c573ce9d3d0,2
+np.float64,0x2b79298656f26,0x2b79298656f26,2
+np.float64,0xbf5ee44f003dc800,0xbf5ee4503353c0ba,2
+np.float64,0xbfe1dd264e63ba4c,0xbfe2ce68116c7bf6,2
+np.float64,0x3fece10b7579c217,0x3ff07b21b11799c6,2
+np.float64,0x3fba47143a348e28,0x3fba52e601adf24c,2
+np.float64,0xffe9816e7a7302dc,0xfff0000000000000,2
+np.float64,0x8009a8047fd35009,0x8009a8047fd35009,2
+np.float64,0x800ac28e4e95851d,0x800ac28e4e95851d,2
+np.float64,0x80093facf4f27f5a,0x80093facf4f27f5a,2
+np.float64,0x3ff0000000000000,0x3ff2cd9fc44eb982,2
+np.float64,0x3fe76a9857eed530,0x3fe99018a5895a4f,2
+np.float64,0xbfd13c59a3a278b4,0xbfd171e133df0b16,2
+np.float64,0x7feb43bc83368778,0x7ff0000000000000,2
+np.float64,0xbfe2970c5fa52e18,0xbfe3a74a434c6efe,2
+np.float64,0xffd091c380212388,0xfff0000000000000,2
+np.float64,0x3febb3b9d2f76774,0x3fef4b4af2bd8580,2
+np.float64,0x7fec66787ef8ccf0,0x7ff0000000000000,2
+np.float64,0xbf935e185826bc40,0xbf935e640557a354,2
+np.float64,0x979df1552f3be,0x979df1552f3be,2
+np.float64,0x7fc096ee73212ddc,0x7ff0000000000000,2
+np.float64,0xbfe9de88faf3bd12,0xbfecc7d1ae691d1b,2
+np.float64,0x7fdc733f06b8e67d,0x7ff0000000000000,2
+np.float64,0xffd71be1a0ae37c4,0xfff0000000000000,2
+np.float64,0xb50dabd36a1b6,0xb50dabd36a1b6,2
+np.float64,0x7fce3d94d63c7b29,0x7ff0000000000000,2
+np.float64,0x7fbaf95e4435f2bc,0x7ff0000000000000,2
+np.float64,0x81a32a6f03466,0x81a32a6f03466,2
+np.float64,0xa99b5b4d5336c,0xa99b5b4d5336c,2
+np.float64,0x7f97c1eeb82f83dc,0x7ff0000000000000,2
+np.float64,0x3fe761636d6ec2c6,0x3fe98451160d2ffb,2
+np.float64,0xbfe3224ef5e6449e,0xbfe44b73eeadac52,2
+np.float64,0x7fde6feb0dbcdfd5,0x7ff0000000000000,2
+np.float64,0xbfee87f9ca7d0ff4,0xbff1b079e9d7f706,2
+np.float64,0x3fe46f4c9828de99,0x3fe5da2ab9609ea5,2
+np.float64,0xffb92fe882325fd0,0xfff0000000000000,2
+np.float64,0x80054bc63cea978d,0x80054bc63cea978d,2
+np.float64,0x3d988bea7b312,0x3d988bea7b312,2
+np.float64,0x3fe6468e1d6c8d1c,0x3fe81e64d37d39a8,2
+np.float64,0x3fd68eefc22d1de0,0x3fd7074264faeead,2
+np.float64,0xffb218a074243140,0xfff0000000000000,2
+np.float64,0x3fdbcb3b6cb79678,0x3fdcad011de40b7d,2
+np.float64,0x7fe3c161772782c2,0x7ff0000000000000,2
+np.float64,0x25575c904aaec,0x25575c904aaec,2
+np.float64,0x800fa43a8f5f4875,0x800fa43a8f5f4875,2
+np.float64,0x3fe41fc9e1e83f94,0x3fe57a25dd1a37f1,2
+np.float64,0x3fd895f4a7b12be9,0x3fd931e7b721a08a,2
+np.float64,0xce31469f9c629,0xce31469f9c629,2
+np.float64,0xffea0f55ca341eab,0xfff0000000000000,2
+np.float64,0xffe831c9ba306393,0xfff0000000000000,2
+np.float64,0x7fe2056f03a40add,0x7ff0000000000000,2
+np.float64,0x7fd6b075e02d60eb,0x7ff0000000000000,2
+np.float64,0x3fdfbef4273f7de8,0x3fe0882c1f59efc0,2
+np.float64,0x8005b9e094ab73c2,0x8005b9e094ab73c2,2
+np.float64,0x3fea881ac6351036,0x3fedad7a319b887c,2
+np.float64,0xbfe2c61c7ee58c39,0xbfe3de9a99d8a9c6,2
+np.float64,0x30b0d3786161b,0x30b0d3786161b,2
+np.float64,0x3fa51d56a02a3aad,0x3fa51edee2d2ecef,2
+np.float64,0x79745732f2e8c,0x79745732f2e8c,2
+np.float64,0x800d55b4907aab69,0x800d55b4907aab69,2
+np.float64,0xbfbe8fcf0a3d1fa0,0xbfbea267fbb5bfdf,2
+np.float64,0xbfd04e2756a09c4e,0xbfd07b74d079f9a2,2
+np.float64,0x3fc65170552ca2e1,0x3fc66e6eb00c82ed,2
+np.float64,0xbfb0674b8020ce98,0xbfb06a2b4771b64c,2
+np.float64,0x2059975840b34,0x2059975840b34,2
+np.float64,0x33d1385467a28,0x33d1385467a28,2
+np.float64,0x3fea41b74ff4836f,0x3fed4dc1a09e53cc,2
+np.float64,0xbfe8e08c9d71c119,0xbfeb75b4c59a6bec,2
+np.float64,0x7fdbbf14d6377e29,0x7ff0000000000000,2
+np.float64,0x3fcd8b71513b16e0,0x3fcdcec80174f9ad,2
+np.float64,0x5c50bc94b8a18,0x5c50bc94b8a18,2
+np.float64,0x969a18f52d343,0x969a18f52d343,2
+np.float64,0x3fd7ae44462f5c89,0x3fd8398bc34e395c,2
+np.float64,0xffdd0f8617ba1f0c,0xfff0000000000000,2
+np.float64,0xfff0000000000000,0xfff0000000000000,2
+np.float64,0xbfe2f9badb65f376,0xbfe41b771320ece8,2
+np.float64,0x3fd140bc7fa29,0x3fd140bc7fa29,2
+np.float64,0xbfe14523b5628a48,0xbfe21ee850972043,2
+np.float64,0x3feedd0336bdba06,0x3ff1f01afc1f3a06,2
+np.float64,0x800de423ad7bc848,0x800de423ad7bc848,2
+np.float64,0x4cef857c99df1,0x4cef857c99df1,2
+np.float64,0xbfea55e0e374abc2,0xbfed691e41d648dd,2
+np.float64,0x3fe70d7a18ae1af4,0x3fe91955a34d8094,2
+np.float64,0xbfc62fc3032c5f88,0xbfc64c3ec25decb8,2
+np.float64,0x3fc915abb5322b58,0x3fc93edac5cc73fe,2
+np.float64,0x69aaff66d3561,0x69aaff66d3561,2
+np.float64,0x5c6a90f2b8d53,0x5c6a90f2b8d53,2
+np.float64,0x3fefe30dc1bfc61c,0x3ff2b752257bdacd,2
+np.float64,0x3fef15db15fe2bb6,0x3ff21aea05601396,2
+np.float64,0xbfe353e5ac66a7cc,0xbfe48644e6553d1a,2
+np.float64,0x3fe6d30cffada61a,0x3fe8cf3e4c61ddac,2
+np.float64,0x7fb7857eb62f0afc,0x7ff0000000000000,2
+np.float64,0xbfdd9b53d23b36a8,0xbfdeac91a7af1340,2
+np.float64,0x3fd1456357228ac7,0x3fd17b3f7d39b27a,2
+np.float64,0x3fb57d10ae2afa21,0x3fb5838702b806f4,2
+np.float64,0x800c59c96c98b393,0x800c59c96c98b393,2
+np.float64,0x7fc1f2413823e481,0x7ff0000000000000,2
+np.float64,0xbfa3983624273070,0xbfa3996fa26c419a,2
+np.float64,0x7fb28874ae2510e8,0x7ff0000000000000,2
+np.float64,0x3fe826d02a304da0,0x3fea82bec50bc0b6,2
+np.float64,0x8008d6f0d3d1ade2,0x8008d6f0d3d1ade2,2
+np.float64,0xffe7c970ca2f92e1,0xfff0000000000000,2
+np.float64,0x7fcf42bcaa3e8578,0x7ff0000000000000,2
+np.float64,0x7fda1ab517343569,0x7ff0000000000000,2
+np.float64,0xbfe7926a65ef24d5,0xbfe9c323dd890d5b,2
+np.float64,0xbfcaf6282d35ec50,0xbfcb294f36a0a33d,2
+np.float64,0x800ca49df8d9493c,0x800ca49df8d9493c,2
+np.float64,0xffea18d26af431a4,0xfff0000000000000,2
+np.float64,0x3fb72f276e2e5e50,0x3fb7374539fd1221,2
+np.float64,0xffa6b613842d6c20,0xfff0000000000000,2
+np.float64,0xbfeb3c7263f678e5,0xbfeea54cdb60b54c,2
+np.float64,0x3fc976d2ba32eda5,0x3fc9a1e83a058de4,2
+np.float64,0xbfe4acd4b0e959aa,0xbfe624d5d4f9b9a6,2
+np.float64,0x7fca410a0f348213,0x7ff0000000000000,2
+np.float64,0xbfde368f77bc6d1e,0xbfdf5910c8c8bcb0,2
+np.float64,0xbfed7412937ae825,0xbff0e55afc428453,2
+np.float64,0xffef6b7b607ed6f6,0xfff0000000000000,2
+np.float64,0xbfb936f17e326de0,0xbfb941629a53c694,2
+np.float64,0x800dbb0c469b7619,0x800dbb0c469b7619,2
+np.float64,0x800f68b0581ed161,0x800f68b0581ed161,2
+np.float64,0x3fe25b2aad64b656,0x3fe361266fa9c5eb,2
+np.float64,0xbfb87e445a30fc88,0xbfb887d676910c3f,2
+np.float64,0x6e6ba9b6dcd76,0x6e6ba9b6dcd76,2
+np.float64,0x3fad27ce583a4f9d,0x3fad2bd72782ffdb,2
+np.float64,0xbfec0bc5d638178c,0xbfefc6e8c8f9095f,2
+np.float64,0x7fcba4a296374944,0x7ff0000000000000,2
+np.float64,0x8004ca237cc99448,0x8004ca237cc99448,2
+np.float64,0xffe85b8c3270b718,0xfff0000000000000,2
+np.float64,0x7fe7ee3eddafdc7d,0x7ff0000000000000,2
+np.float64,0xffd275967ca4eb2c,0xfff0000000000000,2
+np.float64,0xbfa95bc3a032b780,0xbfa95e6b288ecf43,2
+np.float64,0x3fc9e3214b33c643,0x3fca10667e7e7ff4,2
+np.float64,0x8001b89c5d837139,0x8001b89c5d837139,2
+np.float64,0xbf8807dfc0300fc0,0xbf880803e3badfbd,2
+np.float64,0x800aca94b895952a,0x800aca94b895952a,2
+np.float64,0x7fd79534a02f2a68,0x7ff0000000000000,2
+np.float64,0x3fe1b81179e37023,0x3fe2a371d8cc26f0,2
+np.float64,0x800699539d6d32a8,0x800699539d6d32a8,2
+np.float64,0xffe51dfbb3aa3bf7,0xfff0000000000000,2
+np.float64,0xbfdfb775abbf6eec,0xbfe083f48be2f98f,2
+np.float64,0x3fe87979d7b0f2f4,0x3feaee701d959079,2
+np.float64,0x3fd8e4e6a731c9cd,0x3fd986d29f25f982,2
+np.float64,0x3fe3dadaaf67b5b6,0x3fe527520fb02920,2
+np.float64,0x8003c2262bc7844d,0x8003c2262bc7844d,2
+np.float64,0x800c930add392616,0x800c930add392616,2
+np.float64,0xffb7a152a22f42a8,0xfff0000000000000,2
+np.float64,0x80028fe03dc51fc1,0x80028fe03dc51fc1,2
+np.float64,0xffe32ae60c6655cc,0xfff0000000000000,2
+np.float64,0x3fea3527e4746a50,0x3fed3cbbf47f18eb,2
+np.float64,0x800a53059e14a60c,0x800a53059e14a60c,2
+np.float64,0xbfd79e3b202f3c76,0xbfd828672381207b,2
+np.float64,0xffeed7e2eb7dafc5,0xfff0000000000000,2
+np.float64,0x3fec51ed6778a3db,0x3ff01509e34df61d,2
+np.float64,0xbfd84bc577b0978a,0xbfd8e23ec55e42e8,2
+np.float64,0x2483aff849077,0x2483aff849077,2
+np.float64,0x6f57883adeaf2,0x6f57883adeaf2,2
+np.float64,0xffd3fd74d927faea,0xfff0000000000000,2
+np.float64,0x7fca49ec773493d8,0x7ff0000000000000,2
+np.float64,0x7fd08fe2e8211fc5,0x7ff0000000000000,2
+np.float64,0x800852086db0a411,0x800852086db0a411,2
+np.float64,0x3fe5b1f2c9eb63e6,0x3fe7654f511bafc6,2
+np.float64,0xbfe01e2a58e03c54,0xbfe0cedb68f021e6,2
+np.float64,0x800988421d331085,0x800988421d331085,2
+np.float64,0xffd5038b18aa0716,0xfff0000000000000,2
+np.float64,0x8002c9264c85924d,0x8002c9264c85924d,2
+np.float64,0x3fd21ca302243946,0x3fd25ac653a71aab,2
+np.float64,0xbfea60d6e6f4c1ae,0xbfed78031d9dfa2b,2
+np.float64,0xffef97b6263f2f6b,0xfff0000000000000,2
+np.float64,0xbfd524732faa48e6,0xbfd5876ecc415dcc,2
+np.float64,0x660387e8cc072,0x660387e8cc072,2
+np.float64,0x7fcfc108a33f8210,0x7ff0000000000000,2
+np.float64,0x7febe5b0f877cb61,0x7ff0000000000000,2
+np.float64,0xbfa55fdfac2abfc0,0xbfa56176991851a8,2
+np.float64,0x25250f4c4a4a3,0x25250f4c4a4a3,2
+np.float64,0xffe2f6a2f2a5ed46,0xfff0000000000000,2
+np.float64,0x7fa754fcc02ea9f9,0x7ff0000000000000,2
+np.float64,0x3febd19dea37a33c,0x3fef75279f75d3b8,2
+np.float64,0xc5ed55218bdab,0xc5ed55218bdab,2
+np.float64,0x3fe72ff6b3ee5fed,0x3fe945388b979882,2
+np.float64,0xbfe16b854e22d70a,0xbfe24b10fc0dff14,2
+np.float64,0xffb22cbe10245980,0xfff0000000000000,2
+np.float64,0xa54246b54a849,0xa54246b54a849,2
+np.float64,0x3fe7f4cda76fe99c,0x3fea41edc74888b6,2
+np.float64,0x1,0x1,2
+np.float64,0x800d84acce9b095a,0x800d84acce9b095a,2
+np.float64,0xb0eef04761dde,0xb0eef04761dde,2
+np.float64,0x7ff4000000000000,0x7ffc000000000000,2
+np.float64,0xffecaf1dbb795e3b,0xfff0000000000000,2
+np.float64,0x90dbab8d21b76,0x90dbab8d21b76,2
+np.float64,0x3fe79584a9ef2b09,0x3fe9c71fa9e40eb5,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-tan.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-tan.csv
new file mode 100644
index 00000000..083cdb2f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-tan.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0xfd97ece0,0xc11186e9,4
+np.float32,0x8013bb34,0x8013bb34,4
+np.float32,0x316389,0x316389,4
+np.float32,0x7f7fffff,0xbf1c9eca,4
+np.float32,0x3f7674bb,0x3fb7e450,4
+np.float32,0x80800000,0x80800000,4
+np.float32,0x7f5995e8,0xbf94106c,4
+np.float32,0x74527,0x74527,4
+np.float32,0x7f08caea,0xbeceddb6,4
+np.float32,0x2d49b2,0x2d49b2,4
+np.float32,0x3f74e5e4,0x3fb58695,4
+np.float32,0x3f3fcd51,0x3f6e1e81,4
+np.float32,0xbf4f3608,0xbf864d3d,4
+np.float32,0xbed974a0,0xbee78c70,4
+np.float32,0xff5f483c,0x3ecf3cb2,4
+np.float32,0x7f4532f4,0xc0b96f7b,4
+np.float32,0x3f0a4f7c,0x3f198cc0,4
+np.float32,0x210193,0x210193,4
+np.float32,0xfeebad7a,0xbf92eba8,4
+np.float32,0xfed29f74,0xc134cab6,4
+np.float32,0x803433a0,0x803433a0,4
+np.float32,0x64eb46,0x64eb46,4
+np.float32,0xbf54ef22,0xbf8c757b,4
+np.float32,0x3f3d5fdd,0x3f69a17b,4
+np.float32,0x80000001,0x80000001,4
+np.float32,0x800a837a,0x800a837a,4
+np.float32,0x6ff0be,0x6ff0be,4
+np.float32,0xfe8f1186,0x3f518820,4
+np.float32,0x804963e5,0x804963e5,4
+np.float32,0xfebaa59a,0x3fa1dbb0,4
+np.float32,0x637970,0x637970,4
+np.float32,0x3e722a6b,0x3e76c89a,4
+np.float32,0xff2b0478,0xbddccb5f,4
+np.float32,0xbf7bd85b,0xbfc06821,4
+np.float32,0x3ec33600,0x3ecd4126,4
+np.float32,0x3e0a43b9,0x3e0b1c69,4
+np.float32,0x7f7511b6,0xbe427083,4
+np.float32,0x3f28c114,0x3f465a73,4
+np.float32,0x3f179e1c,0x3f2c3e7c,4
+np.float32,0x7b2963,0x7b2963,4
+np.float32,0x3f423d06,0x3f72b442,4
+np.float32,0x3f5a24c6,0x3f925508,4
+np.float32,0xff18c834,0xbf79b5c8,4
+np.float32,0x3f401ece,0x3f6eb6ac,4
+np.float32,0x7b8a3013,0xbffab968,4
+np.float32,0x80091ff0,0x80091ff0,4
+np.float32,0x3f389c51,0x3f610b47,4
+np.float32,0x5ea174,0x5ea174,4
+np.float32,0x807a9eb2,0x807a9eb2,4
+np.float32,0x806ce61e,0x806ce61e,4
+np.float32,0xbe956acc,0xbe99cefc,4
+np.float32,0x7e60e247,0xbf5e64a5,4
+np.float32,0x7f398e24,0x404d12ed,4
+np.float32,0x3d9049f8,0x3d908735,4
+np.float32,0x7db17ffc,0xbf5b3d87,4
+np.float32,0xff453f78,0xc0239c9f,4
+np.float32,0x3f024aac,0x3f0ed802,4
+np.float32,0xbe781c30,0xbe7d1508,4
+np.float32,0x3f77962a,0x3fb9a28e,4
+np.float32,0xff7fffff,0x3f1c9eca,4
+np.float32,0x3f7152e3,0x3fb03f9d,4
+np.float32,0xff7cb167,0x3f9ce831,4
+np.float32,0x3e763e30,0x3e7b1a10,4
+np.float32,0xbf126527,0xbf24c253,4
+np.float32,0x803f6660,0x803f6660,4
+np.float32,0xbf79de38,0xbfbd38b1,4
+np.float32,0x8046c2f0,0x8046c2f0,4
+np.float32,0x6dc74e,0x6dc74e,4
+np.float32,0xbec9c45e,0xbed4e768,4
+np.float32,0x3f0eedb6,0x3f1fe610,4
+np.float32,0x7e031999,0xbcc13026,4
+np.float32,0x7efc2fd7,0x41e4b284,4
+np.float32,0xbeab7454,0xbeb22a1b,4
+np.float32,0x805ee67b,0x805ee67b,4
+np.float32,0x7f76e58e,0xc2436659,4
+np.float32,0xbe62b024,0xbe667718,4
+np.float32,0x3eea0808,0x3efbd182,4
+np.float32,0xbf7fd00c,0xbfc70719,4
+np.float32,0x7f27b640,0xbf0d97e0,4
+np.float32,0x3f1b58a4,0x3f31b6f4,4
+np.float32,0x252a9f,0x252a9f,4
+np.float32,0x7f65f95a,0xbead5de3,4
+np.float32,0xfc6ea780,0x42d15801,4
+np.float32,0x7eac4c52,0xc0682424,4
+np.float32,0xbe8a3f5a,0xbe8db54d,4
+np.float32,0xbf1644e2,0xbf2a4abd,4
+np.float32,0x3fc96a,0x3fc96a,4
+np.float32,0x7f38c0e4,0x3cc04af8,4
+np.float32,0x3f623d75,0x3f9c065d,4
+np.float32,0x3ee6a51a,0x3ef7a058,4
+np.float32,0x3dd11020,0x3dd1cacf,4
+np.float32,0xb6918,0xb6918,4
+np.float32,0xfdd7a540,0x3f22f081,4
+np.float32,0x80798563,0x80798563,4
+np.float32,0x3e9a8b7a,0x3e9f6a7e,4
+np.float32,0xbea515d4,0xbeab0df5,4
+np.float32,0xbea9b9f4,0xbeb03abe,4
+np.float32,0xbf11a5fa,0xbf23b478,4
+np.float32,0xfd6cadf0,0xbfa2a878,4
+np.float32,0xbf6edd07,0xbfacbb78,4
+np.float32,0xff5c5328,0x3e2d1552,4
+np.float32,0xbea2f788,0xbea8b3f5,4
+np.float32,0x802efaeb,0x802efaeb,4
+np.float32,0xff1c85e5,0x41f8560e,4
+np.float32,0x3f53b123,0x3f8b18e1,4
+np.float32,0xff798c4a,0x4092e66f,4
+np.float32,0x7f2e6fe7,0xbdcbd58f,4
+np.float32,0xfe8a8196,0x3fd7fc56,4
+np.float32,0x5e7ad4,0x5e7ad4,4
+np.float32,0xbf23a02d,0xbf3e4533,4
+np.float32,0x3f31c55c,0x3f5531bf,4
+np.float32,0x80331be3,0x80331be3,4
+np.float32,0x8056960a,0x8056960a,4
+np.float32,0xff1c06ae,0xbfd26992,4
+np.float32,0xbe0cc4b0,0xbe0da96c,4
+np.float32,0x7e925ad5,0xbf8dba54,4
+np.float32,0x2c8cec,0x2c8cec,4
+np.float32,0x8011951e,0x8011951e,4
+np.float32,0x3f2caf84,0x3f4cb89f,4
+np.float32,0xbd32c220,0xbd32df33,4
+np.float32,0xbec358d6,0xbecd6996,4
+np.float32,0x3f6e4930,0x3fabeb92,4
+np.float32,0xbf6a3afd,0xbfa65a3a,4
+np.float32,0x80067764,0x80067764,4
+np.float32,0x3d8df1,0x3d8df1,4
+np.float32,0x7ee51cf2,0x409e4061,4
+np.float32,0x435f5d,0x435f5d,4
+np.float32,0xbf5b17f7,0xbf936ebe,4
+np.float32,0x3ecaacb5,0x3ed5f81f,4
+np.float32,0x807b0aa5,0x807b0aa5,4
+np.float32,0x52b40b,0x52b40b,4
+np.float32,0x146a97,0x146a97,4
+np.float32,0x7f42b952,0xbfdcb413,4
+np.float32,0xbf1a1af2,0xbf2fe1bb,4
+np.float32,0x3f312034,0x3f541aa2,4
+np.float32,0x3f281d60,0x3f4554f9,4
+np.float32,0x50e451,0x50e451,4
+np.float32,0xbe45838c,0xbe480016,4
+np.float32,0xff7d0aeb,0x3eb0746e,4
+np.float32,0x7f32a489,0xbf96af6d,4
+np.float32,0xbf1b4e27,0xbf31a769,4
+np.float32,0x3f242936,0x3f3f1a44,4
+np.float32,0xbf7482ff,0xbfb4f201,4
+np.float32,0x4bda38,0x4bda38,4
+np.float32,0xbf022208,0xbf0ea2bb,4
+np.float32,0x7d08ca95,0xbe904602,4
+np.float32,0x7ed2f356,0xc02b55ad,4
+np.float32,0xbf131204,0xbf25b734,4
+np.float32,0xff3464b4,0x3fb23706,4
+np.float32,0x5a97cf,0x5a97cf,4
+np.float32,0xbe52db70,0xbe55e388,4
+np.float32,0x3f52934f,0x3f89e2aa,4
+np.float32,0xfeea866a,0x40a2b33f,4
+np.float32,0x80333925,0x80333925,4
+np.float32,0xfef5d13e,0xc00139ec,4
+np.float32,0x3f4750ab,0x3f7c87ad,4
+np.float32,0x3e41bfdd,0x3e44185a,4
+np.float32,0xbf5b0572,0xbf935935,4
+np.float32,0xbe93c9da,0xbe9808d8,4
+np.float32,0x7f501f33,0xc0f9973c,4
+np.float32,0x800af035,0x800af035,4
+np.float32,0x3f29faf8,0x3f4852a8,4
+np.float32,0xbe1e4c20,0xbe1f920c,4
+np.float32,0xbf7e8616,0xbfc4d79d,4
+np.float32,0x43ffbf,0x43ffbf,4
+np.float32,0x7f28e8a9,0xbfa1ac24,4
+np.float32,0xbf1f9f92,0xbf3820bc,4
+np.float32,0x3f07e004,0x3f1641c4,4
+np.float32,0x3ef7ea7f,0x3f06a64a,4
+np.float32,0x7e013101,0x3f6080e6,4
+np.float32,0x7f122a4f,0xbf0a796f,4
+np.float32,0xfe096960,0x3ed7273a,4
+np.float32,0x3f06abf1,0x3f14a4b2,4
+np.float32,0x3e50ded3,0x3e53d0f1,4
+np.float32,0x7f50b346,0x3eabb536,4
+np.float32,0xff5adb0f,0xbd441972,4
+np.float32,0xbecefe46,0xbedb0f66,4
+np.float32,0x7da70bd4,0xbec66273,4
+np.float32,0x169811,0x169811,4
+np.float32,0xbee4dfee,0xbef5721a,4
+np.float32,0x3efbeae3,0x3f0936e6,4
+np.float32,0x8031bd61,0x8031bd61,4
+np.float32,0x8048e443,0x8048e443,4
+np.float32,0xff209aa6,0xbeb364cb,4
+np.float32,0xff477499,0x3c1b0041,4
+np.float32,0x803fe929,0x803fe929,4
+np.float32,0x3f70158b,0x3fae7725,4
+np.float32,0x7f795723,0x3e8e850a,4
+np.float32,0x3cba99,0x3cba99,4
+np.float32,0x80588d2a,0x80588d2a,4
+np.float32,0x805d1f05,0x805d1f05,4
+np.float32,0xff4ac09a,0xbefe614d,4
+np.float32,0x804af084,0x804af084,4
+np.float32,0x7c64ae63,0xc1a8b563,4
+np.float32,0x8078d793,0x8078d793,4
+np.float32,0x7f3e2436,0xbf8bf9d3,4
+np.float32,0x7ccec1,0x7ccec1,4
+np.float32,0xbf6462c7,0xbf9eb830,4
+np.float32,0x3f1002ca,0x3f216843,4
+np.float32,0xfe878ca6,0x409e73a5,4
+np.float32,0x3bd841d9,0x3bd842a7,4
+np.float32,0x7d406f41,0xbd9dcfa3,4
+np.float32,0x7c6d6,0x7c6d6,4
+np.float32,0x3f4ef360,0x3f86074b,4
+np.float32,0x805f534a,0x805f534a,4
+np.float32,0x1,0x1,4
+np.float32,0x3f739ee2,0x3fb39db2,4
+np.float32,0x3d0c2352,0x3d0c3153,4
+np.float32,0xfe8a4f2c,0x3edd8add,4
+np.float32,0x3e52eaa0,0x3e55f362,4
+np.float32,0x7bde9758,0xbf5ba5cf,4
+np.float32,0xff422654,0xbf41e487,4
+np.float32,0x385e5b,0x385e5b,4
+np.float32,0x5751dd,0x5751dd,4
+np.float32,0xff6c671c,0xc03e2d6d,4
+np.float32,0x1458be,0x1458be,4
+np.float32,0x80153d4d,0x80153d4d,4
+np.float32,0x7efd2adb,0x3e25458f,4
+np.float32,0xbe161880,0xbe172e12,4
+np.float32,0x7ecea1aa,0x40a66d79,4
+np.float32,0xbf5b02a2,0xbf9355f0,4
+np.float32,0x15d9ab,0x15d9ab,4
+np.float32,0x2dc7c7,0x2dc7c7,4
+np.float32,0xfebbf81a,0x4193f6e6,4
+np.float32,0xfe8e3594,0xc00a6695,4
+np.float32,0x185aa8,0x185aa8,4
+np.float32,0x3daea156,0x3daf0e00,4
+np.float32,0x3e071688,0x3e07e08e,4
+np.float32,0x802db9e6,0x802db9e6,4
+np.float32,0x7f7be2c4,0x3f1363dd,4
+np.float32,0x7eba3f5e,0xc13eb497,4
+np.float32,0x3de04a00,0x3de130a9,4
+np.float32,0xbf1022bc,0xbf2194eb,4
+np.float32,0xbf5b547e,0xbf93b53b,4
+np.float32,0x3e867bd6,0x3e89aa10,4
+np.float32,0xbea5eb5c,0xbeabfb73,4
+np.float32,0x7f1efae9,0x3ffca038,4
+np.float32,0xff5d0344,0xbe55dbbb,4
+np.float32,0x805167e7,0x805167e7,4
+np.float32,0xbdb3a020,0xbdb41667,4
+np.float32,0xbedea6b4,0xbeedd5fd,4
+np.float32,0x8053b45c,0x8053b45c,4
+np.float32,0x7ed370e9,0x3d90eba5,4
+np.float32,0xbefcd7da,0xbf09cf91,4
+np.float32,0x78b9ac,0x78b9ac,4
+np.float32,0xbf2f6dc0,0xbf5141ef,4
+np.float32,0x802d3a7b,0x802d3a7b,4
+np.float32,0xfd45d120,0x3fec31cc,4
+np.float32,0xbf7e7020,0xbfc4b2af,4
+np.float32,0xf04da,0xf04da,4
+np.float32,0xbe9819d4,0xbe9cbd35,4
+np.float32,0x8075ab35,0x8075ab35,4
+np.float32,0xbf052fdc,0xbf12aa2c,4
+np.float32,0x3f1530d0,0x3f28bd9f,4
+np.float32,0x80791881,0x80791881,4
+np.float32,0x67f309,0x67f309,4
+np.float32,0x3f12f16a,0x3f2588f5,4
+np.float32,0x3ecdac47,0x3ed97ff8,4
+np.float32,0xbf297fb7,0xbf478c39,4
+np.float32,0x8069fa80,0x8069fa80,4
+np.float32,0x807f940e,0x807f940e,4
+np.float32,0xbf648dc8,0xbf9eeecb,4
+np.float32,0x3de873b0,0x3de9748d,4
+np.float32,0x3f1aa645,0x3f30af1f,4
+np.float32,0xff227a62,0x3d8283cc,4
+np.float32,0xbf37187d,0xbf5e5f4c,4
+np.float32,0x803b1b1f,0x803b1b1f,4
+np.float32,0x3f58142a,0x3f8ff8da,4
+np.float32,0x8004339e,0x8004339e,4
+np.float32,0xbf0f5654,0xbf2077a4,4
+np.float32,0x3f17e509,0x3f2ca598,4
+np.float32,0x3f800000,0x3fc75923,4
+np.float32,0xfdf79980,0x42f13047,4
+np.float32,0x7f111381,0x3f13c4c9,4
+np.float32,0xbea40c70,0xbea9e724,4
+np.float32,0x110520,0x110520,4
+np.float32,0x60490d,0x60490d,4
+np.float32,0x3f6703ec,0x3fa21951,4
+np.float32,0xbf098256,0xbf187652,4
+np.float32,0x658951,0x658951,4
+np.float32,0x3f53bf16,0x3f8b2818,4
+np.float32,0xff451811,0xc0026068,4
+np.float32,0x80777ee0,0x80777ee0,4
+np.float32,0x3e4fcc19,0x3e52b286,4
+np.float32,0x7f387ee0,0x3ce93eb6,4
+np.float32,0xff51181f,0xbfca3ee4,4
+np.float32,0xbf5655ae,0xbf8e0304,4
+np.float32,0xff2f1dcd,0x40025471,4
+np.float32,0x7f6e58e5,0xbe9930d5,4
+np.float32,0x7adf11,0x7adf11,4
+np.float32,0xbe9a2bc2,0xbe9f0185,4
+np.float32,0x8065d3a0,0x8065d3a0,4
+np.float32,0x3ed6e826,0x3ee47c45,4
+np.float32,0x80598ea0,0x80598ea0,4
+np.float32,0x7f10b90a,0x40437bd0,4
+np.float32,0x27b447,0x27b447,4
+np.float32,0x7ecd861c,0x3fce250f,4
+np.float32,0x0,0x0,4
+np.float32,0xbeba82d6,0xbec3394c,4
+np.float32,0xbf4958b0,0xbf8048ea,4
+np.float32,0x7c643e,0x7c643e,4
+np.float32,0x580770,0x580770,4
+np.float32,0x805bf54a,0x805bf54a,4
+np.float32,0x7f1f3cee,0xbe1a54d6,4
+np.float32,0xfefefdea,0x3fa84576,4
+np.float32,0x7f007b7a,0x3e8a6d25,4
+np.float32,0xbf177959,0xbf2c0919,4
+np.float32,0xbf30fda0,0xbf53e058,4
+np.float32,0x3f0576be,0x3f130861,4
+np.float32,0x3f49380e,0x3f80283a,4
+np.float32,0xebc56,0xebc56,4
+np.float32,0x654e3b,0x654e3b,4
+np.float32,0x14a4d8,0x14a4d8,4
+np.float32,0xff69b3cb,0xbf822a88,4
+np.float32,0xbe9b6c1c,0xbea06109,4
+np.float32,0xbefddd7e,0xbf0a787b,4
+np.float32,0x4c4ebb,0x4c4ebb,4
+np.float32,0x7d0a74,0x7d0a74,4
+np.float32,0xbebb5f80,0xbec43635,4
+np.float32,0x7ee79723,0xc1c7f3f3,4
+np.float32,0x7f2be4c7,0xbfa6c693,4
+np.float32,0x805bc7d5,0x805bc7d5,4
+np.float32,0x8042f12c,0x8042f12c,4
+np.float32,0x3ef91be8,0x3f07697b,4
+np.float32,0x3cf37ac0,0x3cf38d1c,4
+np.float32,0x800000,0x800000,4
+np.float32,0xbe1ebf4c,0xbe200806,4
+np.float32,0x7f380862,0xbeb512e8,4
+np.float32,0xbe320064,0xbe33d0fc,4
+np.float32,0xff300b0c,0xbfadb805,4
+np.float32,0x308a06,0x308a06,4
+np.float32,0xbf084f6e,0xbf16d7b6,4
+np.float32,0xff47cab6,0x3f892b65,4
+np.float32,0xbed99f4a,0xbee7bfd5,4
+np.float32,0xff7d74c0,0x3ee88c9a,4
+np.float32,0x3c3d23,0x3c3d23,4
+np.float32,0x8074bde8,0x8074bde8,4
+np.float32,0x80042164,0x80042164,4
+np.float32,0x3e97c92a,0x3e9c6500,4
+np.float32,0x3b80e0,0x3b80e0,4
+np.float32,0xbf16646a,0xbf2a783d,4
+np.float32,0x7f3b4cb1,0xc01339be,4
+np.float32,0xbf31f36e,0xbf557fd0,4
+np.float32,0x7f540618,0xbe5f6fc1,4
+np.float32,0x7eee47d0,0x40a27e94,4
+np.float32,0x7f12f389,0xbebed654,4
+np.float32,0x56cff5,0x56cff5,4
+np.float32,0x8056032b,0x8056032b,4
+np.float32,0x3ed34e40,0x3ee02e38,4
+np.float32,0x7d51a908,0xbf19a90e,4
+np.float32,0x80000000,0x80000000,4
+np.float32,0xfdf73fd0,0xbf0f8cad,4
+np.float32,0x7ee4fe6d,0xbf1ea7e4,4
+np.float32,0x1f15ba,0x1f15ba,4
+np.float32,0xd18c3,0xd18c3,4
+np.float32,0x80797705,0x80797705,4
+np.float32,0x7ef07091,0x3f2f3b9a,4
+np.float32,0x7f552f41,0x3faf608c,4
+np.float32,0x3f779977,0x3fb9a7ad,4
+np.float32,0xfe1a7a50,0xbdadc4d1,4
+np.float32,0xbf449cf0,0xbf7740db,4
+np.float32,0xbe44e620,0xbe475cad,4
+np.float32,0x3f63a098,0x3f9dc2b5,4
+np.float32,0xfed40a12,0x4164533a,4
+np.float32,0x7a2bbb,0x7a2bbb,4
+np.float32,0xff7f7b9e,0xbeee8740,4
+np.float32,0x7ee27f8b,0x4233f53b,4
+np.float32,0xbf044c06,0xbf117c28,4
+np.float32,0xbeffde54,0xbf0bc49f,4
+np.float32,0xfeaef2e8,0x3ff258fe,4
+np.float32,0x527451,0x527451,4
+np.float32,0xbcef8d00,0xbcef9e7c,4
+np.float32,0xbf0e20c0,0xbf1ec9b2,4
+np.float32,0x8024afda,0x8024afda,4
+np.float32,0x7ef6cb3e,0x422cad0b,4
+np.float32,0x3c120,0x3c120,4
+np.float32,0xbf125c8f,0xbf24b62c,4
+np.float32,0x7e770a93,0x402c9d86,4
+np.float32,0xbd30a4e0,0xbd30c0ee,4
+np.float32,0xbf4d3388,0xbf843530,4
+np.float32,0x3f529072,0x3f89df92,4
+np.float32,0xff0270b1,0xbf81be9a,4
+np.float32,0x5e07e7,0x5e07e7,4
+np.float32,0x7bec32,0x7bec32,4
+np.float32,0x7fc00000,0x7fc00000,4
+np.float32,0x3e3ba5e0,0x3e3dc6e9,4
+np.float32,0x3ecb62d4,0x3ed6ce2c,4
+np.float32,0x3eb3dde8,0x3ebba68f,4
+np.float32,0x8063f952,0x8063f952,4
+np.float32,0x7f204aeb,0x3e88614e,4
+np.float32,0xbeae1ddc,0xbeb5278e,4
+np.float32,0x6829e9,0x6829e9,4
+np.float32,0xbf361a99,0xbf5ca354,4
+np.float32,0xbf24fbe6,0xbf406326,4
+np.float32,0x3f329d41,0x3f56a061,4
+np.float32,0xfed6d666,0x3e8f71a5,4
+np.float32,0x337f92,0x337f92,4
+np.float32,0xbe1c4970,0xbe1d8305,4
+np.float32,0xbe6b7e18,0xbe6fbbde,4
+np.float32,0x3f2267b9,0x3f3c61da,4
+np.float32,0xbee1ee94,0xbef1d628,4
+np.float32,0x7ecffc1a,0x3f02987e,4
+np.float32,0xbe9b1306,0xbe9fff3b,4
+np.float32,0xbeffacae,0xbf0ba468,4
+np.float32,0x7f800000,0xffc00000,4
+np.float32,0xfefc9aa8,0xc19de2a3,4
+np.float32,0x7d7185bb,0xbf9090ec,4
+np.float32,0x7edfbafd,0x3fe9352f,4
+np.float32,0x4ef2ec,0x4ef2ec,4
+np.float32,0x7f4cab2e,0xbff4e5dd,4
+np.float32,0xff3b1788,0x3e3c22e9,4
+np.float32,0x4e15ee,0x4e15ee,4
+np.float32,0xbf5451e6,0xbf8bc8a7,4
+np.float32,0x3f7f6d2e,0x3fc65e8b,4
+np.float32,0xbf1d9184,0xbf35071b,4
+np.float32,0xbf3a81cf,0xbf646d9b,4
+np.float32,0xbe71acc4,0xbe7643ab,4
+np.float32,0x528b7d,0x528b7d,4
+np.float32,0x2cb1d0,0x2cb1d0,4
+np.float32,0x3f324bf8,0x3f56161a,4
+np.float32,0x80709a21,0x80709a21,4
+np.float32,0x4bc448,0x4bc448,4
+np.float32,0x3e8bd600,0x3e8f6b7a,4
+np.float32,0xbeb97d30,0xbec20dd6,4
+np.float32,0x2a5669,0x2a5669,4
+np.float32,0x805f2689,0x805f2689,4
+np.float32,0xfe569f50,0x3fc51952,4
+np.float32,0x1de44c,0x1de44c,4
+np.float32,0x3ec7036c,0x3ed1ae67,4
+np.float32,0x8052b8e5,0x8052b8e5,4
+np.float32,0xff740a6b,0x3f4981a8,4
+np.float32,0xfee9bb70,0xc05e23be,4
+np.float32,0xff4e12c9,0x4002b4ad,4
+np.float32,0x803de0c2,0x803de0c2,4
+np.float32,0xbf433a07,0xbf74966f,4
+np.float32,0x803e60ca,0x803e60ca,4
+np.float32,0xbf19ee98,0xbf2fa07a,4
+np.float32,0x92929,0x92929,4
+np.float32,0x7f709c27,0x4257ba2d,4
+np.float32,0x803167c6,0x803167c6,4
+np.float32,0xbf095ead,0xbf184607,4
+np.float32,0x617060,0x617060,4
+np.float32,0x2d85b3,0x2d85b3,4
+np.float32,0x53d20b,0x53d20b,4
+np.float32,0x3e046838,0x3e052666,4
+np.float32,0xbe7c5fdc,0xbe80ce4b,4
+np.float32,0x3d18d060,0x3d18e289,4
+np.float32,0x804dc031,0x804dc031,4
+np.float32,0x3f224166,0x3f3c26cd,4
+np.float32,0x7d683e3c,0xbea24f25,4
+np.float32,0xbf3a92aa,0xbf648be4,4
+np.float32,0x8072670b,0x8072670b,4
+np.float32,0xbe281aec,0xbe29a1bc,4
+np.float32,0x7f09d918,0xc0942490,4
+np.float32,0x7ca9fd07,0x4018b990,4
+np.float32,0x7d36ac5d,0x3cf57184,4
+np.float32,0x8039b62f,0x8039b62f,4
+np.float32,0x6cad7b,0x6cad7b,4
+np.float32,0x3c0fd9ab,0x3c0fda9d,4
+np.float32,0x80299883,0x80299883,4
+np.float32,0x3c2d0e3e,0x3c2d0fe4,4
+np.float32,0x8002cf62,0x8002cf62,4
+np.float32,0x801dde97,0x801dde97,4
+np.float32,0x80411856,0x80411856,4
+np.float32,0x6ebce8,0x6ebce8,4
+np.float32,0x7b7d1a,0x7b7d1a,4
+np.float32,0x8031d3de,0x8031d3de,4
+np.float32,0x8005c4ab,0x8005c4ab,4
+np.float32,0xbf7dd803,0xbfc3b3ef,4
+np.float32,0x8017ae60,0x8017ae60,4
+np.float32,0xfe9316ce,0xbfe0544a,4
+np.float32,0x3f136bfe,0x3f2636ff,4
+np.float32,0x3df87b80,0x3df9b57d,4
+np.float32,0xff44c356,0xbf11c7ad,4
+np.float32,0x4914ae,0x4914ae,4
+np.float32,0x80524c21,0x80524c21,4
+np.float32,0x805c7dc8,0x805c7dc8,4
+np.float32,0xfed3c0aa,0xbff0c0ab,4
+np.float32,0x7eb2bfbb,0xbf4600bc,4
+np.float32,0xfec8df84,0x3f5bd350,4
+np.float32,0x3e5431a4,0x3e5748c3,4
+np.float32,0xbee6a3a0,0xbef79e86,4
+np.float32,0xbf6cc9b2,0xbfa9d61a,4
+np.float32,0x3f132bd5,0x3f25dbd9,4
+np.float32,0x7e6d2e48,0x3f9d025b,4
+np.float32,0x3edf430c,0x3eee942d,4
+np.float32,0x3f0d1b8a,0x3f1d60e1,4
+np.float32,0xbdf2f688,0xbdf41bfb,4
+np.float32,0xbe47a284,0xbe4a33ff,4
+np.float32,0x3eaa9fbc,0x3eb13be7,4
+np.float32,0xfe98d45e,0x3eb84517,4
+np.float32,0x7efc23b3,0x3dcc1c99,4
+np.float32,0x3ca36242,0x3ca367ce,4
+np.float32,0x3f76a944,0x3fb834e3,4
+np.float32,0xbf45207c,0xbf783f9b,4
+np.float32,0x3e7c1220,0x3e80a4f8,4
+np.float32,0x3f018200,0x3f0dd14e,4
+np.float32,0x3f53cdde,0x3f8b3839,4
+np.float32,0xbdbacb58,0xbdbb5063,4
+np.float32,0x804af68d,0x804af68d,4
+np.float32,0x3e2c12fc,0x3e2db65b,4
+np.float32,0x3f039433,0x3f10895a,4
+np.float32,0x7ef5193d,0x3f4115f7,4
+np.float32,0x8030afbe,0x8030afbe,4
+np.float32,0x3f06fa2a,0x3f150d5d,4
+np.float32,0x3f124442,0x3f2493d2,4
+np.float32,0xbeb5b792,0xbebdc090,4
+np.float32,0xbedc90a4,0xbeeb4de9,4
+np.float32,0x3f3ff8,0x3f3ff8,4
+np.float32,0x3ee75bc5,0x3ef881e4,4
+np.float32,0xfe80e3de,0xbf5cd535,4
+np.float32,0xf52eb,0xf52eb,4
+np.float32,0x80660ee8,0x80660ee8,4
+np.float32,0x3e173a58,0x3e185648,4
+np.float32,0xfe49520c,0xbf728d7c,4
+np.float32,0xbecbb8ec,0xbed73373,4
+np.float32,0xbf027ae0,0xbf0f173e,4
+np.float32,0xbcab6740,0xbcab6da8,4
+np.float32,0xbf2a15e2,0xbf487e11,4
+np.float32,0x3b781b,0x3b781b,4
+np.float32,0x44f559,0x44f559,4
+np.float32,0xff6a0ca6,0xc174d7c3,4
+np.float32,0x6460ef,0x6460ef,4
+np.float32,0xfe58009c,0x3ee2bb30,4
+np.float32,0xfec3c038,0x3e30d617,4
+np.float32,0x7f0687c0,0xbf62c820,4
+np.float32,0xbf44655e,0xbf76d589,4
+np.float32,0xbf42968c,0xbf735e78,4
+np.float32,0x80385503,0x80385503,4
+np.float32,0xbea7e3a2,0xbeae2d59,4
+np.float32,0x3dd0b770,0x3dd17131,4
+np.float32,0xbf4bc185,0xbf82b907,4
+np.float32,0xfefd7d64,0xbee05650,4
+np.float32,0xfaac3c00,0xbff23bc9,4
+np.float32,0xbf562f0d,0xbf8dd7f4,4
+np.float32,0x7fa00000,0x7fe00000,4
+np.float32,0x3e01bdb8,0x3e027098,4
+np.float32,0x3e2868ab,0x3e29f19e,4
+np.float32,0xfec55f2e,0x3f39f304,4
+np.float32,0xed4e,0xed4e,4
+np.float32,0x3e2b7330,0x3e2d11fa,4
+np.float32,0x7f738542,0x40cbbe16,4
+np.float32,0x3f123521,0x3f247e71,4
+np.float32,0x73572c,0x73572c,4
+np.float32,0x804936c8,0x804936c8,4
+np.float32,0x803b80d8,0x803b80d8,4
+np.float32,0x7f566c57,0xbee2855a,4
+np.float32,0xff0e3bd8,0xbff0543f,4
+np.float32,0x7d2b2fe7,0xbf94ba4c,4
+np.float32,0xbf0da470,0xbf1e1dc2,4
+np.float32,0xbd276500,0xbd277ce0,4
+np.float32,0xfcd15dc0,0x403ccc2a,4
+np.float32,0x80071e59,0x80071e59,4
+np.float32,0xbe9b0c34,0xbe9ff7be,4
+np.float32,0x3f4f9069,0x3f86ac50,4
+np.float32,0x80042a95,0x80042a95,4
+np.float32,0x7de28e39,0x3bc9b7f4,4
+np.float32,0xbf641935,0xbf9e5af8,4
+np.float32,0x8034f068,0x8034f068,4
+np.float32,0xff33a3d2,0xbf408e75,4
+np.float32,0xbcc51540,0xbcc51efc,4
+np.float32,0xff6d1ddf,0x3ef58f0e,4
+np.float32,0xbf64dfc4,0xbf9f5725,4
+np.float32,0xff068a06,0x3eea8987,4
+np.float32,0xff01c0af,0x3f24cdfe,4
+np.float32,0x3f4def7e,0x3f84f802,4
+np.float32,0xbf1b4ae7,0xbf31a299,4
+np.float32,0x8077df2d,0x8077df2d,4
+np.float32,0x3f0155c5,0x3f0d9785,4
+np.float32,0x5a54b2,0x5a54b2,4
+np.float32,0x7f271f9e,0x3efb2ef3,4
+np.float32,0xbf0ff2ec,0xbf215217,4
+np.float32,0x7f500130,0xbf8a7fdd,4
+np.float32,0xfed9891c,0xbf65c872,4
+np.float32,0xfecbfaae,0x403bdbc2,4
+np.float32,0x3f3a5aba,0x3f642772,4
+np.float32,0x7ebc681e,0xbd8df059,4
+np.float32,0xfe05e400,0xbfe35d74,4
+np.float32,0xbf295ace,0xbf4750ea,4
+np.float32,0x7ea055b2,0x3f62d6be,4
+np.float32,0xbd00b520,0xbd00bff9,4
+np.float32,0xbf7677aa,0xbfb7e8cf,4
+np.float32,0x3e83f788,0x3e86f816,4
+np.float32,0x801f6710,0x801f6710,4
+np.float32,0x801133cc,0x801133cc,4
+np.float32,0x41da2a,0x41da2a,4
+np.float32,0xff1622fd,0x3f023650,4
+np.float32,0x806c7a72,0x806c7a72,4
+np.float32,0x3f10779c,0x3f220bb4,4
+np.float32,0xbf08cf94,0xbf17848d,4
+np.float32,0xbecb55b4,0xbed6bebd,4
+np.float32,0xbf0a1528,0xbf193d7b,4
+np.float32,0x806a16bd,0x806a16bd,4
+np.float32,0xc222a,0xc222a,4
+np.float32,0x3930de,0x3930de,4
+np.float32,0x3f5c3588,0x3f94bca2,4
+np.float32,0x1215ad,0x1215ad,4
+np.float32,0x3ed15030,0x3eddcf67,4
+np.float32,0x7da83b2e,0x3fce0d39,4
+np.float32,0x32b0a8,0x32b0a8,4
+np.float32,0x805aed6b,0x805aed6b,4
+np.float32,0x3ef8e02f,0x3f074346,4
+np.float32,0xbdeb6780,0xbdec7250,4
+np.float32,0x3f6e3cec,0x3fabda61,4
+np.float32,0xfefd467a,0x3ef7821a,4
+np.float32,0xfef090fe,0x3bb752a2,4
+np.float32,0x8019c538,0x8019c538,4
+np.float32,0x3e8cf284,0x3e909e81,4
+np.float32,0xbe6c6618,0xbe70b0a2,4
+np.float32,0x7f50a539,0x3f367be1,4
+np.float32,0x8019fe2f,0x8019fe2f,4
+np.float32,0x800c3f48,0x800c3f48,4
+np.float32,0xfd054cc0,0xc0f52802,4
+np.float32,0x3d0cca20,0x3d0cd853,4
+np.float32,0xbf4a7c44,0xbf816e74,4
+np.float32,0x3f46fc40,0x3f7be153,4
+np.float32,0x807c5849,0x807c5849,4
+np.float32,0xd7e41,0xd7e41,4
+np.float32,0x70589b,0x70589b,4
+np.float32,0x80357b95,0x80357b95,4
+np.float32,0x3de239f0,0x3de326a5,4
+np.float32,0x800b08e3,0x800b08e3,4
+np.float32,0x807ec946,0x807ec946,4
+np.float32,0x3e2e4b83,0x3e2fff76,4
+np.float32,0x3f198e0f,0x3f2f12a6,4
+np.float32,0xbecb1aca,0xbed67979,4
+np.float32,0x80134082,0x80134082,4
+np.float32,0x3f3a269f,0x3f63ca05,4
+np.float32,0x3f1381e4,0x3f265622,4
+np.float32,0xff293080,0xbf10be6f,4
+np.float32,0xff800000,0xffc00000,4
+np.float32,0x37d196,0x37d196,4
+np.float32,0x7e57eea7,0x3e7d8138,4
+np.float32,0x804b1dae,0x804b1dae,4
+np.float32,0x7d9508f9,0xc1075b35,4
+np.float32,0x3f7bf468,0x3fc095e0,4
+np.float32,0x55472c,0x55472c,4
+np.float32,0x3ecdcd86,0x3ed9a738,4
+np.float32,0x3ed9be0f,0x3ee7e4e9,4
+np.float32,0x3e7e0ddb,0x3e81b2fe,4
+np.float32,0x7ee6c1d3,0x3f850634,4
+np.float32,0x800f6fad,0x800f6fad,4
+np.float32,0xfefb3bd6,0xbff68ecc,4
+np.float32,0x8013d6e2,0x8013d6e2,4
+np.float32,0x3f3a2cb6,0x3f63d4ee,4
+np.float32,0xff383c84,0x3e7854bb,4
+np.float32,0x3f21946e,0x3f3b1cea,4
+np.float32,0xff322ea2,0x3fb22f31,4
+np.float32,0x8065a024,0x8065a024,4
+np.float32,0x7f395e30,0xbefe0de1,4
+np.float32,0x5b52db,0x5b52db,4
+np.float32,0x7f7caea7,0x3dac8ded,4
+np.float32,0xbf0431f8,0xbf1159b2,4
+np.float32,0x7f15b25b,0xc02a3833,4
+np.float32,0x80131abc,0x80131abc,4
+np.float32,0x7e829d81,0xbeb2e93d,4
+np.float32,0x3f2c64d7,0x3f4c3e4d,4
+np.float32,0x7f228d48,0xc1518c74,4
+np.float32,0xfc3c6f40,0xbf00d585,4
+np.float32,0x7f754f0f,0x3e2152f5,4
+np.float32,0xff65d32b,0xbe8bd56c,4
+np.float32,0xfea6b8c0,0x41608655,4
+np.float32,0x3f7d4b05,0x3fc2c96a,4
+np.float32,0x3f463230,0x3f7a54da,4
+np.float32,0x805117bb,0x805117bb,4
+np.float32,0xbf2ad4f7,0xbf49b30e,4
+np.float32,0x3eaa01ff,0x3eb08b56,4
+np.float32,0xff7a02bb,0x3f095f73,4
+np.float32,0x759176,0x759176,4
+np.float32,0x803c18d5,0x803c18d5,4
+np.float32,0xbe0722d8,0xbe07ed16,4
+np.float32,0x3f4b4a99,0x3f823fc6,4
+np.float32,0x3f7d0451,0x3fc25463,4
+np.float32,0xfee31e40,0xbfb41091,4
+np.float32,0xbf733d2c,0xbfb30cf1,4
+np.float32,0x7ed81015,0x417c380c,4
+np.float32,0x7daafc3e,0xbe2a37ed,4
+np.float32,0x3e44f82b,0x3e476f67,4
+np.float32,0x7c8d99,0x7c8d99,4
+np.float32,0x3f7aec5a,0x3fbee991,4
+np.float32,0xff09fd55,0x3e0709d3,4
+np.float32,0xff4ba4df,0x4173c01f,4
+np.float32,0x3f43d944,0x3f75c7bd,4
+np.float32,0xff6a9106,0x40a10eff,4
+np.float32,0x3bc8341c,0x3bc834bf,4
+np.float32,0x3eea82,0x3eea82,4
+np.float32,0xfea36a3c,0x435729b2,4
+np.float32,0x7dcc1fb0,0x3e330053,4
+np.float32,0x3f616ae6,0x3f9b01ae,4
+np.float32,0x8030963f,0x8030963f,4
+np.float32,0x10d1e2,0x10d1e2,4
+np.float32,0xfeb9a8a6,0x40e6daac,4
+np.float32,0xbe1aba00,0xbe1bea3a,4
+np.float32,0x3cb6b4ea,0x3cb6bcac,4
+np.float32,0x3d8b0b64,0x3d8b422f,4
+np.float32,0x7b6894,0x7b6894,4
+np.float32,0x3e89dcde,0x3e8d4b4b,4
+np.float32,0x3f12b952,0x3f253974,4
+np.float32,0x1c316c,0x1c316c,4
+np.float32,0x7e2da535,0x3f95fe6b,4
+np.float32,0x3ae9a494,0x3ae9a4a4,4
+np.float32,0xbc5f5500,0xbc5f588b,4
+np.float32,0x3e7850fc,0x3e7d4d0e,4
+np.float32,0xbf800000,0xbfc75923,4
+np.float32,0x3e652d69,0x3e691502,4
+np.float32,0xbf6bdd26,0xbfa89129,4
+np.float32,0x3f441cfc,0x3f764a02,4
+np.float32,0x7f5445ff,0xc0906191,4
+np.float32,0x807b2ee3,0x807b2ee3,4
+np.float32,0xbeb6cab8,0xbebef9c0,4
+np.float32,0xff737277,0xbf327011,4
+np.float32,0xfc832aa0,0x402fd52e,4
+np.float32,0xbf0c7538,0xbf1c7c0f,4
+np.float32,0x7e1301c7,0xbf0ee63e,4
+np.float64,0xbfe0ef7df7a1defc,0xbfe2b76a8d8aeb35,4
+np.float64,0x7fdd9c2eae3b385c,0xbfc00d6885485039,4
+np.float64,0xbfb484c710290990,0xbfb4900e0a527555,4
+np.float64,0x7fe73e5d6cee7cba,0x3fefbf70a56b60d3,4
+np.float64,0x800a110aa8d42216,0x800a110aa8d42216,4
+np.float64,0xffedd4f3f3bba9e7,0xbff076f8c4124919,4
+np.float64,0x800093407f812682,0x800093407f812682,4
+np.float64,0x800a23150e54462a,0x800a23150e54462a,4
+np.float64,0xbfb1076864220ed0,0xbfb10dd95a74b733,4
+np.float64,0x3fed1f8b37fa3f16,0x3ff496100985211f,4
+np.float64,0x3fdf762f84beec5f,0x3fe1223eb04a17e0,4
+np.float64,0x53fd4e0aa7faa,0x53fd4e0aa7faa,4
+np.float64,0x3fdbd283bdb7a507,0x3fddb7ec9856a546,4
+np.float64,0xbfe43f449d687e89,0xbfe77724a0d3072b,4
+np.float64,0x618b73bcc316f,0x618b73bcc316f,4
+np.float64,0x67759424ceeb3,0x67759424ceeb3,4
+np.float64,0xbfe4b6f7d9a96df0,0xbfe831371f3bd7a8,4
+np.float64,0x800a531b8b74a637,0x800a531b8b74a637,4
+np.float64,0xffeeffd5c37dffab,0x3fea140cbc2c3726,4
+np.float64,0x3fe648e2002c91c4,0x3feac1b8816f972a,4
+np.float64,0x800f16242a1e2c48,0x800f16242a1e2c48,4
+np.float64,0xffeeff8e1dbdff1b,0xc000b555f117dce7,4
+np.float64,0x3fdf1cf73fbe39f0,0x3fe0e9032401135b,4
+np.float64,0x7fe19c388b633870,0x3fd5271b69317d5b,4
+np.float64,0x918f226d231e5,0x918f226d231e5,4
+np.float64,0x4cc19ab499834,0x4cc19ab499834,4
+np.float64,0xbd3121d57a624,0xbd3121d57a624,4
+np.float64,0xbfd145d334a28ba6,0xbfd1b468866124d6,4
+np.float64,0x8bdbf41517b7f,0x8bdbf41517b7f,4
+np.float64,0x3fd1b8cb3ea37198,0x3fd2306b13396cae,4
+np.float64,0xbfd632a959ac6552,0xbfd7220fcfb5ef78,4
+np.float64,0x1cdaafc639b57,0x1cdaafc639b57,4
+np.float64,0x3febdcce1577b99c,0x3ff2fe076195a2bc,4
+np.float64,0x7fca6e945934dd28,0x3ff43040df7024e8,4
+np.float64,0x3fbe08e78e3c11cf,0x3fbe2c60e6b48f75,4
+np.float64,0x7fc1ed0d0523da19,0x3ff55f8dcad9440f,4
+np.float64,0xbfdc729b8cb8e538,0xbfde7b6e15dd60c4,4
+np.float64,0x3fd219404f243281,0x3fd298d7b3546531,4
+np.float64,0x3fe715c3f56e2b88,0x3fec255b5a59456e,4
+np.float64,0x7fe8b88e74b1711c,0x3ff60efd2c81d13d,4
+np.float64,0xa1d2b9fd43a57,0xa1d2b9fd43a57,4
+np.float64,0xffc1818223230304,0xbfb85c6c1e8018e7,4
+np.float64,0x3fde38ac8b3c7159,0x3fe0580c7e228576,4
+np.float64,0x8008faf7b491f5f0,0x8008faf7b491f5f0,4
+np.float64,0xffe7a1d751af43ae,0xbf7114cd7bbcd981,4
+np.float64,0xffec2db1b4b85b62,0xbff5cae759667f83,4
+np.float64,0x7fefce1ae27f9c35,0x3ff4b8b88f4876cf,4
+np.float64,0x7fd1ff56a523feac,0xbff342ce192f14dd,4
+np.float64,0x80026b3e3f84d67d,0x80026b3e3f84d67d,4
+np.float64,0xffedee5879bbdcb0,0xc02fae11508b2be0,4
+np.float64,0x8003c0dc822781ba,0x8003c0dc822781ba,4
+np.float64,0xffe38a79eca714f4,0xc008aa23b7a63980,4
+np.float64,0xbfda70411eb4e082,0xbfdc0d7e29c89010,4
+np.float64,0x800a5e34f574bc6a,0x800a5e34f574bc6a,4
+np.float64,0x3fc19fac6e233f59,0x3fc1bc66ac0d73d4,4
+np.float64,0x3a8a61ea7514d,0x3a8a61ea7514d,4
+np.float64,0x3fb57b536e2af6a0,0x3fb588451f72f44c,4
+np.float64,0x7fd68c6d082d18d9,0xc032ac926b665c9a,4
+np.float64,0xd5b87cfdab710,0xd5b87cfdab710,4
+np.float64,0xfe80b20bfd017,0xfe80b20bfd017,4
+np.float64,0x3fef8781e37f0f04,0x3ff8215fe2c1315a,4
+np.float64,0xffedddbb9c3bbb76,0x3fd959b82258a32a,4
+np.float64,0x3fc7d41f382fa83e,0x3fc81b94c3a091ba,4
+np.float64,0xffc3275dcf264ebc,0x3fb2b3d4985c6078,4
+np.float64,0x7fe34d2b7ba69a56,0x40001f3618e3c7c9,4
+np.float64,0x3fd64ae35fac95c7,0x3fd73d77e0b730f8,4
+np.float64,0x800e53bf6b3ca77f,0x800e53bf6b3ca77f,4
+np.float64,0xbfddf7c9083bef92,0xbfe02f392744d2d1,4
+np.float64,0x1c237cc038471,0x1c237cc038471,4
+np.float64,0x3fe4172beea82e58,0x3fe739b4bf16bc7e,4
+np.float64,0xfa950523f52a1,0xfa950523f52a1,4
+np.float64,0xffc839a2c5307344,0xbff70ff8a3c9247f,4
+np.float64,0x264f828c4c9f1,0x264f828c4c9f1,4
+np.float64,0x148a650a2914e,0x148a650a2914e,4
+np.float64,0x3fe8d255c0b1a4ac,0x3fef623c3ea8d6e3,4
+np.float64,0x800f4fbb28be9f76,0x800f4fbb28be9f76,4
+np.float64,0x7fdca57bcfb94af7,0x3ff51207563fb6cb,4
+np.float64,0x3fe4944107692882,0x3fe7fad593235364,4
+np.float64,0x800119b4f1a2336b,0x800119b4f1a2336b,4
+np.float64,0xbfe734075e6e680e,0xbfec5b35381069f2,4
+np.float64,0xffeb3c00db767801,0xbfbbd7d22df7b4b3,4
+np.float64,0xbfe95c658cb2b8cb,0xbff03ad5e0bc888a,4
+np.float64,0xffeefeb58fbdfd6a,0xbfd5c9264deb0e11,4
+np.float64,0x7fccc80fde39901f,0xc012c60f914f3ca2,4
+np.float64,0x3fe5da289c2bb451,0x3fea07ad00a0ca63,4
+np.float64,0x800e364b0a5c6c96,0x800e364b0a5c6c96,4
+np.float64,0x3fcf9ea7d23f3d50,0x3fd023b72e8c9dcf,4
+np.float64,0x800a475cfc948eba,0x800a475cfc948eba,4
+np.float64,0xffd4e0d757a9c1ae,0xbfa89d573352e011,4
+np.float64,0xbfd4dbec8229b7da,0xbfd5a165f12c7c40,4
+np.float64,0xffe307ab51260f56,0x3fe6b1639da58c3f,4
+np.float64,0xbfe6955a546d2ab4,0xbfeb44ae2183fee9,4
+np.float64,0xbfca1f18f5343e30,0xbfca7d804ccccdf4,4
+np.float64,0xe9f4dfebd3e9c,0xe9f4dfebd3e9c,4
+np.float64,0xfff0000000000000,0xfff8000000000000,4
+np.float64,0x8008e69c0fb1cd38,0x8008e69c0fb1cd38,4
+np.float64,0xbfead1ccf975a39a,0xbff1c84b3db8ca93,4
+np.float64,0x25a982424b531,0x25a982424b531,4
+np.float64,0x8010000000000000,0x8010000000000000,4
+np.float64,0x80056204ea0ac40b,0x80056204ea0ac40b,4
+np.float64,0x800d1442d07a2886,0x800d1442d07a2886,4
+np.float64,0xbfaef3dadc3de7b0,0xbfaefd85ae6205f0,4
+np.float64,0x7fe969ce4b32d39c,0xbff3c4364fc6778f,4
+np.float64,0x7fe418bac0a83175,0x402167d16b1efe0b,4
+np.float64,0x3fd7c82a25af9054,0x3fd8f0c701315672,4
+np.float64,0x80013782a7826f06,0x80013782a7826f06,4
+np.float64,0x7fc031c7ee20638f,0x400747ab705e6904,4
+np.float64,0x3fe8cf327ff19e65,0x3fef5c14f8aafa89,4
+np.float64,0xbfe331a416a66348,0xbfe5e2290a098dd4,4
+np.float64,0x800607b2116c0f65,0x800607b2116c0f65,4
+np.float64,0x7fb40448f0280891,0xbfd43d4f0ffa1d64,4
+np.float64,0x7fefffffffffffff,0xbf74530cfe729484,4
+np.float64,0x3fe39b5444a736a9,0x3fe67eaa0b6acf27,4
+np.float64,0x3fee4733c4fc8e68,0x3ff631eabeef9696,4
+np.float64,0xbfec840f3b79081e,0xbff3cc8563ab2e74,4
+np.float64,0xbfc8f6854c31ed0c,0xbfc948caacb3bba0,4
+np.float64,0xffbcf754a639eea8,0xbfc88d17cad3992b,4
+np.float64,0x8000bd3163417a64,0x8000bd3163417a64,4
+np.float64,0x3fe766d0eaeecda2,0x3fecb660882f7024,4
+np.float64,0xb6cc30156d986,0xb6cc30156d986,4
+np.float64,0xffc0161f9f202c40,0x3fe19bdefe5cf8b1,4
+np.float64,0xffe1e462caa3c8c5,0x3fe392c47feea17b,4
+np.float64,0x30a36a566146e,0x30a36a566146e,4
+np.float64,0x3fa996f580332deb,0x3fa99c6b4f2abebe,4
+np.float64,0x3fba71716e34e2e0,0x3fba899f35edba1d,4
+np.float64,0xbfe8f7e5e971efcc,0xbfefac431a0e3d55,4
+np.float64,0xf48f1803e91e3,0xf48f1803e91e3,4
+np.float64,0x7fe3edc0a127db80,0xc03d1a579a5d74a8,4
+np.float64,0xffeba82056375040,0x3fdfd701308700db,4
+np.float64,0xbfeb5a924cf6b524,0xbff2640de7cd107f,4
+np.float64,0xfa4cd1a9f499a,0xfa4cd1a9f499a,4
+np.float64,0x800de1be7b9bc37d,0x800de1be7b9bc37d,4
+np.float64,0xffd44e56ad289cae,0x3fdf4b8085db9b67,4
+np.float64,0xbfe4fb3aea69f676,0xbfe89d2cc46fcc50,4
+np.float64,0xbfe596495d6b2c92,0xbfe997a589a1f632,4
+np.float64,0x6f55a2b8deab5,0x6f55a2b8deab5,4
+np.float64,0x7fe72dc4712e5b88,0x4039c4586b28c2bc,4
+np.float64,0x89348bd712692,0x89348bd712692,4
+np.float64,0xffe062156120c42a,0x4005f0580973bc77,4
+np.float64,0xbfeabc714d7578e2,0xbff1b07e2fa57dc0,4
+np.float64,0x8003a56b3e874ad7,0x8003a56b3e874ad7,4
+np.float64,0x800eeadfb85dd5c0,0x800eeadfb85dd5c0,4
+np.float64,0x46d77a4c8daf0,0x46d77a4c8daf0,4
+np.float64,0x8000c06e7dc180de,0x8000c06e7dc180de,4
+np.float64,0x3fe428d211e851a4,0x3fe754b1c00a89bc,4
+np.float64,0xc5be11818b7c2,0xc5be11818b7c2,4
+np.float64,0x7fefc244893f8488,0x401133dc54f52de5,4
+np.float64,0x3fde30eee93c61de,0x3fe0532b827543a6,4
+np.float64,0xbfd447f48b288fea,0xbfd4fd0654f90718,4
+np.float64,0xbfde98dc7b3d31b8,0xbfe094df12f84a06,4
+np.float64,0x3fed2c1a1dfa5834,0x3ff4a6c4f3470a65,4
+np.float64,0xbfe992165073242d,0xbff071ab039c9177,4
+np.float64,0x3fd0145d1b2028ba,0x3fd06d3867b703dc,4
+np.float64,0x3fe179457362f28b,0x3fe3722f1d045fda,4
+np.float64,0x800e28964fbc512d,0x800e28964fbc512d,4
+np.float64,0x8004a5d785294bb0,0x8004a5d785294bb0,4
+np.float64,0xbfd652f2272ca5e4,0xbfd7469713125120,4
+np.float64,0x7fe61f49036c3e91,0xbf9b6ccdf2d87e70,4
+np.float64,0xffb7d47dd02fa8f8,0xc004449a82320b13,4
+np.float64,0x3feb82f996b705f3,0x3ff29336c738a4c5,4
+np.float64,0x3fbb7fceea36ffa0,0x3fbb9b02c8ad7f93,4
+np.float64,0x80004519fb208a35,0x80004519fb208a35,4
+np.float64,0xbfe0539114e0a722,0xbfe1e86dc5aa039c,4
+np.float64,0x0,0x0,4
+np.float64,0xbfe99d1125f33a22,0xbff07cf8ec04300f,4
+np.float64,0xffd4fbeecc29f7de,0x3ffab76775a8455f,4
+np.float64,0xbfbf1c618e3e38c0,0xbfbf43d2764a8333,4
+np.float64,0x800cae02a9d95c06,0x800cae02a9d95c06,4
+np.float64,0x3febc47d3bf788fa,0x3ff2e0d7cf8ef509,4
+np.float64,0x3fef838f767f071f,0x3ff81aeac309bca0,4
+np.float64,0xbfd5e70716abce0e,0xbfd6ccb033ef7a35,4
+np.float64,0x3f9116fa60222df5,0x3f9117625f008e0b,4
+np.float64,0xffe02b1e5f20563c,0xbfe6b2ec293520b7,4
+np.float64,0xbf9b5aec3036b5e0,0xbf9b5c96c4c7f951,4
+np.float64,0xfdb0169bfb603,0xfdb0169bfb603,4
+np.float64,0x7fcdd1d51c3ba3a9,0x401f0e12fa0b7570,4
+np.float64,0xbfd088103fa11020,0xbfd0e8c4a333ffb2,4
+np.float64,0x3fe22df82ee45bf0,0x3fe46d03a7c14de2,4
+np.float64,0xbfd57b0c28aaf618,0xbfd65349a6191de5,4
+np.float64,0x3fe0a42f50a1485f,0x3fe252e26775d9a4,4
+np.float64,0x800fab4e363f569c,0x800fab4e363f569c,4
+np.float64,0xffe9f0ed63f3e1da,0xbfe278c341b171d5,4
+np.float64,0x7fe26c244664d848,0xbfb325269dad1996,4
+np.float64,0xffe830410bf06081,0xc00181a39f606e96,4
+np.float64,0x800c548a0c78a914,0x800c548a0c78a914,4
+np.float64,0x800f94761ebf28ec,0x800f94761ebf28ec,4
+np.float64,0x3fe5984845eb3091,0x3fe99aeb653c666d,4
+np.float64,0x7fe93e5bf8f27cb7,0xc010d159fa27396a,4
+np.float64,0xffefffffffffffff,0x3f74530cfe729484,4
+np.float64,0x4c83f1269907f,0x4c83f1269907f,4
+np.float64,0x3fde0065a8bc00cc,0x3fe034a1cdf026d4,4
+np.float64,0x800743810d6e8703,0x800743810d6e8703,4
+np.float64,0x80040662d5280cc6,0x80040662d5280cc6,4
+np.float64,0x3fed20b2c5ba4166,0x3ff497988519d7aa,4
+np.float64,0xffe8fa15e5f1f42b,0x3fff82ca76d797b4,4
+np.float64,0xbb72e22f76e5d,0xbb72e22f76e5d,4
+np.float64,0x7fc18ffa7c231ff4,0xbff4b8b4c3315026,4
+np.float64,0xbfe8d1ac44f1a358,0xbfef60efc4f821e3,4
+np.float64,0x3fd38c1fe8271840,0x3fd42dc37ff7262b,4
+np.float64,0xe577bee5caef8,0xe577bee5caef8,4
+np.float64,0xbff0000000000000,0xbff8eb245cbee3a6,4
+np.float64,0xffcb3a9dd436753c,0x3fcd1a3aff1c3fc7,4
+np.float64,0x7fe44bf2172897e3,0x3ff60bfe82a379f4,4
+np.float64,0x8009203823924071,0x8009203823924071,4
+np.float64,0x7fef8e0abc7f1c14,0x3fe90e4962d47ce5,4
+np.float64,0xffda50004434a000,0x3fb50dee03e1418b,4
+np.float64,0x7fe2ff276ea5fe4e,0xc0355b7d2a0a8d9d,4
+np.float64,0x3fd0711ba5a0e238,0x3fd0d03823d2d259,4
+np.float64,0xe7625b03cec4c,0xe7625b03cec4c,4
+np.float64,0xbfd492c8d7a92592,0xbfd55006cde8d300,4
+np.float64,0x8001fee99f23fdd4,0x8001fee99f23fdd4,4
+np.float64,0x7ff4000000000000,0x7ffc000000000000,4
+np.float64,0xfa15df97f42bc,0xfa15df97f42bc,4
+np.float64,0xbfec3fdca9787fb9,0xbff377164b13c7a9,4
+np.float64,0xbcec10e579d82,0xbcec10e579d82,4
+np.float64,0xbfc3b4e2132769c4,0xbfc3dd1fcc7150a6,4
+np.float64,0x80045b149ee8b62a,0x80045b149ee8b62a,4
+np.float64,0xffe044554c2088aa,0xbff741436d558785,4
+np.float64,0xffcc65f09f38cbe0,0xc0172b4adc2d317d,4
+np.float64,0xf68b2d3bed166,0xf68b2d3bed166,4
+np.float64,0x7fc7f44c572fe898,0x3fec69f3b1eca790,4
+np.float64,0x3fac51f61438a3ec,0x3fac595d34156002,4
+np.float64,0xbfeaa9f256f553e5,0xbff19bfdf5984326,4
+np.float64,0x800e4742149c8e84,0x800e4742149c8e84,4
+np.float64,0xbfc493df132927c0,0xbfc4c1ba4268ead9,4
+np.float64,0xbfbf0c56383e18b0,0xbfbf3389fcf50c72,4
+np.float64,0xbf978a0e082f1420,0xbf978b1dd1da3d3c,4
+np.float64,0xbfe04375356086ea,0xbfe1d34c57314dd1,4
+np.float64,0x3feaeeb29b75dd65,0x3ff1e8b772374979,4
+np.float64,0xbfe15e42c3a2bc86,0xbfe34d45d56c5c15,4
+np.float64,0x3fe507429a6a0e85,0x3fe8b058176b3225,4
+np.float64,0x3feee2b26c3dc565,0x3ff71b73203de921,4
+np.float64,0xbfd496577aa92cae,0xbfd553fa7fe15a5f,4
+np.float64,0x7fe2c10953e58212,0x3fc8ead6a0d14bbf,4
+np.float64,0x800035b77aa06b70,0x800035b77aa06b70,4
+np.float64,0x2329201e46525,0x2329201e46525,4
+np.float64,0xbfe6225c9a6c44b9,0xbfea80861590fa02,4
+np.float64,0xbfd6925030ad24a0,0xbfd78e70b1c2215d,4
+np.float64,0xbfd82225c4b0444c,0xbfd958a60f845b39,4
+np.float64,0xbb03d8a17609,0xbb03d8a17609,4
+np.float64,0x7fc33967b12672ce,0x40001e00c9af4002,4
+np.float64,0xff9373c6d026e780,0xbff308654a459d3d,4
+np.float64,0x3feab1f9c5f563f4,0x3ff1a4e0fd2f093d,4
+np.float64,0xbf993ef768327de0,0xbf994046b64e308b,4
+np.float64,0xffb87382fc30e708,0xbfde0accb83c891b,4
+np.float64,0x800bb3a118176743,0x800bb3a118176743,4
+np.float64,0x800c810250d90205,0x800c810250d90205,4
+np.float64,0xbfd2c4eb9ba589d8,0xbfd3539508b4a4a8,4
+np.float64,0xbee1f5437dc3f,0xbee1f5437dc3f,4
+np.float64,0x3fc07aeab520f5d8,0x3fc0926272f9d8e2,4
+np.float64,0xbfe23747a3246e90,0xbfe47a20a6e98687,4
+np.float64,0x3fde1296debc252c,0x3fe0401143ff6b5c,4
+np.float64,0xbfcec8c2f73d9184,0xbfcf644e25ed3b74,4
+np.float64,0xff9314f2c82629e0,0x40559a0f9099dfd1,4
+np.float64,0xbfe27487afa4e910,0xbfe4d0e01200bde6,4
+np.float64,0xffb3d6637627acc8,0x3fe326d4b1e1834f,4
+np.float64,0xffe6f84d642df09a,0x3fc73fa9f57c3acb,4
+np.float64,0xffe67cf76fecf9ee,0xc01cf48c97937ef9,4
+np.float64,0x7fdc73fc12b8e7f7,0xbfcfcecde9331104,4
+np.float64,0xffdcf8789239f0f2,0x3fe345e3b8e28776,4
+np.float64,0x800a70af5314e15f,0x800a70af5314e15f,4
+np.float64,0xffc862300730c460,0x3fc4e9ea813beca7,4
+np.float64,0xbfcc6961bd38d2c4,0xbfcce33bfa6c6bd1,4
+np.float64,0xbfc9b76bbf336ed8,0xbfca117456ac37e5,4
+np.float64,0x7fb86e829430dd04,0x400a5bd7a18e302d,4
+np.float64,0x7fb9813ef833027d,0xbfe5a6494f143625,4
+np.float64,0x8005085e2c2a10bd,0x8005085e2c2a10bd,4
+np.float64,0xffe5af099d6b5e12,0x40369bbe31e03e06,4
+np.float64,0xffde03b1fd3c0764,0x3ff061120aa1f52a,4
+np.float64,0x7fa4eb6cdc29d6d9,0x3fe9defbe9010322,4
+np.float64,0x800803f4b11007ea,0x800803f4b11007ea,4
+np.float64,0x7febd50f6df7aa1e,0xbffcf540ccf220dd,4
+np.float64,0x7fed454f08fa8a9d,0xbffc2a8b81079403,4
+np.float64,0xbfed7e8c69bafd19,0xbff5161e51ba6634,4
+np.float64,0xffef92e78eff25ce,0xbffefeecddae0ad3,4
+np.float64,0x7fe5b9b413ab7367,0xbfc681ba29704176,4
+np.float64,0x29284e805252,0x29284e805252,4
+np.float64,0xffed3955bcfa72ab,0xbfc695acb5f468de,4
+np.float64,0x3fe464ee1ca8c9dc,0x3fe7b140ce50fdca,4
+np.float64,0xffe522ae4bea455c,0x3feb957c146e66ef,4
+np.float64,0x8000000000000000,0x8000000000000000,4
+np.float64,0x3fd0c353a2a186a8,0x3fd1283aaa43a411,4
+np.float64,0x3fdb30a749b6614f,0x3fdcf40df006ed10,4
+np.float64,0x800109213cc21243,0x800109213cc21243,4
+np.float64,0xbfe72aa0c5ee5542,0xbfec4a713f513bc5,4
+np.float64,0x800865344ad0ca69,0x800865344ad0ca69,4
+np.float64,0x7feb7df60eb6fbeb,0x3fb1df06a67aa22f,4
+np.float64,0x3fe83a5dd93074bc,0x3fee3d63cda72636,4
+np.float64,0xbfde70e548bce1ca,0xbfe07b8e19c9dac6,4
+np.float64,0xbfeea38d537d471b,0xbff6bb18c230c0be,4
+np.float64,0x3fefeebbc47fdd78,0x3ff8cdaa53b7c7b4,4
+np.float64,0x7fe6512e20eca25b,0xbff623cee44a22b5,4
+np.float64,0xf8fa5ca3f1f4c,0xf8fa5ca3f1f4c,4
+np.float64,0x7fd12d00ed225a01,0xbfe90d518ea61faf,4
+np.float64,0x80027db43504fb69,0x80027db43504fb69,4
+np.float64,0xffc10a01aa221404,0x3fcc2065b3d0157b,4
+np.float64,0xbfef8286e87f050e,0xbff8193a54449b59,4
+np.float64,0xbfc73178092e62f0,0xbfc7735072ba4593,4
+np.float64,0x3fc859d70630b3ae,0x3fc8a626522af1c0,4
+np.float64,0x3fe4654c4268ca99,0x3fe7b1d2913eda1a,4
+np.float64,0xbfce93cd843d279c,0xbfcf2c2ef16a0957,4
+np.float64,0xffbcaa16d4395430,0xbfd511ced032d784,4
+np.float64,0xbfe91f980e723f30,0xbfeffb39cf8c7746,4
+np.float64,0x800556fb6f0aadf8,0x800556fb6f0aadf8,4
+np.float64,0xffd009cde520139c,0x3fe4fa83b1e93d28,4
+np.float64,0x7febc0675e3780ce,0x3feb53930c004dae,4
+np.float64,0xbfe7f975bdeff2ec,0xbfedc36e6729b010,4
+np.float64,0x45aff57c8b5ff,0x45aff57c8b5ff,4
+np.float64,0xbfec7ebd0138fd7a,0xbff3c5cab680aae0,4
+np.float64,0x8009448003b28900,0x8009448003b28900,4
+np.float64,0x3fca4b992d349732,0x3fcaabebcc86aa9c,4
+np.float64,0x3fca069161340d20,0x3fca63ecc742ff3a,4
+np.float64,0x80063bc80bec7791,0x80063bc80bec7791,4
+np.float64,0xbfe1764bffe2ec98,0xbfe36e1cb30cec94,4
+np.float64,0xffd0dba72f21b74e,0x3fb1834964d57ef6,4
+np.float64,0xbfe31848fc263092,0xbfe5bd066445cbc3,4
+np.float64,0xbfd1fb227323f644,0xbfd278334e27f02d,4
+np.float64,0xffdc59069fb8b20e,0xbfdfc363f559ea2c,4
+np.float64,0x3fdea52a52bd4a55,0x3fe09cada4e5344c,4
+np.float64,0x3f715e55a022bd00,0x3f715e5c72a2809e,4
+np.float64,0x1d1ac6023a35a,0x1d1ac6023a35a,4
+np.float64,0x7feacc71627598e2,0x400486b82121da19,4
+np.float64,0xa0287fa340510,0xa0287fa340510,4
+np.float64,0xffe352c5abe6a58b,0xc002623346060543,4
+np.float64,0x7fed577a23baaef3,0x3fda19bc8fa3b21f,4
+np.float64,0x3fde8dd5263d1baa,0x3fe08de0fedf7029,4
+np.float64,0x3feddd3be2bbba78,0x3ff599b2f3e018cc,4
+np.float64,0xc7a009f58f401,0xc7a009f58f401,4
+np.float64,0xbfef03d5a4fe07ab,0xbff74ee08681f47b,4
+np.float64,0x7fe2cf60eea59ec1,0x3fe905fb44f8cc60,4
+np.float64,0xbfe498fcab6931fa,0xbfe8023a6ff8becf,4
+np.float64,0xbfef7142acfee285,0xbff7fd196133a595,4
+np.float64,0xd214ffdba42a0,0xd214ffdba42a0,4
+np.float64,0x8006de7d78cdbcfc,0x8006de7d78cdbcfc,4
+np.float64,0xb247d34f648fb,0xb247d34f648fb,4
+np.float64,0xbfdd5bece6bab7da,0xbfdf9ba63ca2c5b2,4
+np.float64,0x7fe874650af0e8c9,0x3fe74204e122c10f,4
+np.float64,0x800768c49baed18a,0x800768c49baed18a,4
+np.float64,0x3fb4c0a192298140,0x3fb4cc4c8aa43300,4
+np.float64,0xbfa740531c2e80a0,0xbfa7446b7c74ae8e,4
+np.float64,0x7fe10d6edf221add,0x3fedbcd2eae26657,4
+np.float64,0xbfe9175d0f722eba,0xbfefeaca7f32c6e3,4
+np.float64,0x953e11d32a7c2,0x953e11d32a7c2,4
+np.float64,0x80032df90c465bf3,0x80032df90c465bf3,4
+np.float64,0xffec5b799638b6f2,0xbfe95cd2c69be12c,4
+np.float64,0xffe0c3cfa9a1879f,0x3fe20b99b0c108ce,4
+np.float64,0x3fb610d8e22c21b2,0x3fb61ee0d6c16df8,4
+np.float64,0xffe16bb39962d766,0xc016d370381b6b42,4
+np.float64,0xbfdc72edb238e5dc,0xbfde7bd2de10717a,4
+np.float64,0xffed52dee3baa5bd,0xc01994c08899129a,4
+np.float64,0xffa92aab08325550,0xbff2b881ce363cbd,4
+np.float64,0x7fe028282de0504f,0xc0157ff96c69a9c7,4
+np.float64,0xbfdb2151bf3642a4,0xbfdce196fcc35857,4
+np.float64,0x3fcffbd13c3ff7a2,0x3fd0554b5f0371ac,4
+np.float64,0x800d206bff1a40d8,0x800d206bff1a40d8,4
+np.float64,0x458f818c8b1f1,0x458f818c8b1f1,4
+np.float64,0x800a7b56a234f6ae,0x800a7b56a234f6ae,4
+np.float64,0xffe3d86161e7b0c2,0xbff58d0dbde9f188,4
+np.float64,0xe8ed82e3d1db1,0xe8ed82e3d1db1,4
+np.float64,0x3fe234e0176469c0,0x3fe476bd36b96a75,4
+np.float64,0xbfc7cb9c132f9738,0xbfc812c46e185e0b,4
+np.float64,0xbfeba116c1f7422e,0xbff2b6b7563ad854,4
+np.float64,0x7fe7041de62e083b,0x3f5d2b42aca47274,4
+np.float64,0xbfcf60f4ff3ec1e8,0xbfd002eb83406436,4
+np.float64,0xbfc06067a520c0d0,0xbfc0776e5839ecda,4
+np.float64,0x4384965a87093,0x4384965a87093,4
+np.float64,0xd2ed9d01a5db4,0xd2ed9d01a5db4,4
+np.float64,0x3fbea88cb63d5119,0x3fbece49cc34a379,4
+np.float64,0x3fe7e982ebefd306,0x3feda5bd4c435d43,4
+np.float64,0xffdb60a3e036c148,0xbfcb7ed21e7a8f49,4
+np.float64,0x7fdba9231eb75245,0xbfd750cab1536398,4
+np.float64,0x800d593534dab26b,0x800d593534dab26b,4
+np.float64,0xffdf15fb683e2bf6,0x3fb3aaea23357f06,4
+np.float64,0xbfd6f8a2e5adf146,0xbfd802e509d67c67,4
+np.float64,0x3feeaa31513d5463,0x3ff6c52147dc053c,4
+np.float64,0xf2f6dfd3e5edc,0xf2f6dfd3e5edc,4
+np.float64,0x7fd58d8279ab1b04,0x403243f23d02af2a,4
+np.float64,0x8000000000000001,0x8000000000000001,4
+np.float64,0x3fdffb8e0ebff71c,0x3fe1786cb0a6b0f3,4
+np.float64,0xc999826b93331,0xc999826b93331,4
+np.float64,0xffc4966f19292ce0,0x3ff0836c75c56cc7,4
+np.float64,0x7fef95a4b2ff2b48,0xbfbbe2c27c78154f,4
+np.float64,0xb8f1307f71e26,0xb8f1307f71e26,4
+np.float64,0x3fe807bc7eb00f79,0x3fedde19f2d3c42d,4
+np.float64,0x5e4b6580bc98,0x5e4b6580bc98,4
+np.float64,0xffe19353576326a6,0xc0278c51fee07d36,4
+np.float64,0xbfb0ca6f3e2194e0,0xbfb0d09be673fa72,4
+np.float64,0x3fea724211b4e484,0x3ff15ee06f0a0a13,4
+np.float64,0xbfda21e1c4b443c4,0xbfdbb041f3c86832,4
+np.float64,0x8008082b24901057,0x8008082b24901057,4
+np.float64,0xbfd031aa4ea06354,0xbfd08c77729634bb,4
+np.float64,0xbfc407e153280fc4,0xbfc432275711df5f,4
+np.float64,0xbb4fa4b5769f5,0xbb4fa4b5769f5,4
+np.float64,0x7fed6d1daffada3a,0xc037a14bc7b41fab,4
+np.float64,0xffeee589943dcb12,0x3ff2abfe47037778,4
+np.float64,0x301379d260270,0x301379d260270,4
+np.float64,0xbfec2fefc2b85fe0,0xbff36362c0363e06,4
+np.float64,0xbfe0b1c82e216390,0xbfe264f503f7c22c,4
+np.float64,0xbfea2bce78f4579d,0xbff112d6f07935ea,4
+np.float64,0x18508ef230a13,0x18508ef230a13,4
+np.float64,0x800667a74d6ccf4f,0x800667a74d6ccf4f,4
+np.float64,0x79ce5c8cf39cc,0x79ce5c8cf39cc,4
+np.float64,0x3feda61c8efb4c39,0x3ff54c9ade076f54,4
+np.float64,0x3fe27e06b0e4fc0d,0x3fe4de665c1dc3ca,4
+np.float64,0xbfd15fea2722bfd4,0xbfd1d081c55813b0,4
+np.float64,0xbfe5222c4cea4458,0xbfe8db62deb7d2ad,4
+np.float64,0xbfe8a16c33b142d8,0xbfef02d5831592a8,4
+np.float64,0x3fdb60e7c4b6c1d0,0x3fdd2e4265c4c3b6,4
+np.float64,0x800076d62b60edad,0x800076d62b60edad,4
+np.float64,0xbfec8f1527791e2a,0xbff3da7ed3641e8d,4
+np.float64,0x2af03bfe55e08,0x2af03bfe55e08,4
+np.float64,0xa862ee0950c5e,0xa862ee0950c5e,4
+np.float64,0x7fea5a7c1eb4b4f7,0xbffa6f07d28ef211,4
+np.float64,0x90e118fb21c23,0x90e118fb21c23,4
+np.float64,0xbfead0721bf5a0e4,0xbff1c6c7a771a128,4
+np.float64,0x3f63f4a4c027e94a,0x3f63f4a75665da67,4
+np.float64,0x3fece0efa579c1e0,0x3ff443bec52f021e,4
+np.float64,0xbfdbe743b737ce88,0xbfddd129bff89c15,4
+np.float64,0x3fd48c9b8fa91938,0x3fd5492a630a8cb5,4
+np.float64,0x3ff0000000000000,0x3ff8eb245cbee3a6,4
+np.float64,0xbfd51ea33baa3d46,0xbfd5ebd5dc710204,4
+np.float64,0x3fcfbab0183f7560,0x3fd032a054580b00,4
+np.float64,0x8007abce13cf579d,0x8007abce13cf579d,4
+np.float64,0xbfef0f4723be1e8e,0xbff760c7008e8913,4
+np.float64,0x8006340f524c681f,0x8006340f524c681f,4
+np.float64,0x87b7d7010f71,0x87b7d7010f71,4
+np.float64,0x3fe9422da9b2845b,0x3ff02052e6148c45,4
+np.float64,0x7fddd259b93ba4b2,0xc000731aa33d84b6,4
+np.float64,0x3fe0156d12202ada,0x3fe1972ba309cb29,4
+np.float64,0x8004f1264b89e24d,0x8004f1264b89e24d,4
+np.float64,0x3fececdcacb9d9b9,0x3ff4534d5861f731,4
+np.float64,0x3fd1790ab822f215,0x3fd1eb97b1bb6fb4,4
+np.float64,0xffce5d11863cba24,0xbfcb4f38c17210da,4
+np.float64,0x800a30c32a546187,0x800a30c32a546187,4
+np.float64,0x3fa58cc61c2b198c,0x3fa59008add7233e,4
+np.float64,0xbfe0ac77d62158f0,0xbfe25de3dba0bc4a,4
+np.float64,0xeb8c5753d718b,0xeb8c5753d718b,4
+np.float64,0x3fee5438dafca872,0x3ff644fef7e7adb5,4
+np.float64,0x3faad1eb2c35a3e0,0x3faad83499f94057,4
+np.float64,0x3fe39152c46722a6,0x3fe66fba0b96ab6e,4
+np.float64,0xffd6fd17712dfa2e,0xc010d697d1ab8731,4
+np.float64,0x5214a888a4296,0x5214a888a4296,4
+np.float64,0x8000127a5da024f5,0x8000127a5da024f5,4
+np.float64,0x7feb3a366cb6746c,0x3fbe49bd8d5f213a,4
+np.float64,0xca479501948f3,0xca479501948f3,4
+np.float64,0x7fe7c799ce6f8f33,0xbfd796cd98dc620c,4
+np.float64,0xffe20bcf30a4179e,0xbff8ca5453fa088f,4
+np.float64,0x3fe624638a6c48c7,0x3fea83f123832c3c,4
+np.float64,0xbfe5f1377c6be26f,0xbfea2e143a2d522c,4
+np.float64,0x7fd193f9f8a327f3,0xbfb04ee2602574d4,4
+np.float64,0xbfe7419d2fee833a,0xbfec737f140d363d,4
+np.float64,0x1,0x1,4
+np.float64,0x7fe2ac246c655848,0x3fd14fee3237727a,4
+np.float64,0xa459b42948b37,0xa459b42948b37,4
+np.float64,0x3fb26155ae24c2ab,0x3fb2696fc446d4c6,4
+np.float64,0xbfdd7b332e3af666,0xbfdfc296c21f1aa8,4
+np.float64,0xbfe00dbda4a01b7c,0xbfe18d2b060f0506,4
+np.float64,0x8003bb22d3e77646,0x8003bb22d3e77646,4
+np.float64,0x3fee21b0a57c4361,0x3ff5fb6a21dc911c,4
+np.float64,0x80ca69270194d,0x80ca69270194d,4
+np.float64,0xbfd6d80350adb006,0xbfd7ddb501edbde0,4
+np.float64,0xd2f8b801a5f2,0xd2f8b801a5f2,4
+np.float64,0xbfe856b3f170ad68,0xbfee7334fdc49296,4
+np.float64,0x3fed5c1b20bab836,0x3ff4e73ee5d5c7f3,4
+np.float64,0xbfd58085a5ab010c,0xbfd6596ddc381ffa,4
+np.float64,0x3fe4f0134b29e027,0x3fe88b70602fbd21,4
+np.float64,0xffc9098fdc321320,0x4011c334a74a92cf,4
+np.float64,0x794749bef28ea,0x794749bef28ea,4
+np.float64,0xbfc86b547f30d6a8,0xbfc8b84a4fafe0af,4
+np.float64,0x7fe1356b9da26ad6,0x3fd270bca208d899,4
+np.float64,0x7fca0ef1aa341de2,0xbff851044c0734fa,4
+np.float64,0x80064cb8b62c9972,0x80064cb8b62c9972,4
+np.float64,0xffd3a09a83a74136,0x3ffb66dae0accdf5,4
+np.float64,0x800e301aa15c6035,0x800e301aa15c6035,4
+np.float64,0x800e51f323bca3e6,0x800e51f323bca3e6,4
+np.float64,0x7ff0000000000000,0xfff8000000000000,4
+np.float64,0x800c4278c87884f2,0x800c4278c87884f2,4
+np.float64,0xbfe8481649f0902c,0xbfee576772695096,4
+np.float64,0xffe2344e3fa4689c,0x3fb10442ec0888de,4
+np.float64,0xbfeada313d75b462,0xbff1d1aee3fab3a9,4
+np.float64,0x8009ddfb1333bbf7,0x8009ddfb1333bbf7,4
+np.float64,0x7fed3314c93a6629,0x3ff7a9b12dc1cd37,4
+np.float64,0x3fd55c26da2ab84e,0x3fd630a7b8aac78a,4
+np.float64,0x800cdb5203f9b6a4,0x800cdb5203f9b6a4,4
+np.float64,0xffd04a875da0950e,0x4009a13810ab121d,4
+np.float64,0x800f1acb527e3597,0x800f1acb527e3597,4
+np.float64,0xbf9519bf282a3380,0xbf951a82e9b955ff,4
+np.float64,0x3fcd7a42fa3af486,0x3fce028f3c51072d,4
+np.float64,0xbfdd3e21b73a7c44,0xbfdf769f2ff2480b,4
+np.float64,0xffd4361e2aa86c3c,0xbfc211ce8e9f792c,4
+np.float64,0x7fccf97f6939f2fe,0xbff8464bad830f06,4
+np.float64,0x800ce47fb939c900,0x800ce47fb939c900,4
+np.float64,0xffe9e51df173ca3b,0xbfceaf990d652c4e,4
+np.float64,0x3fe05bba5b20b775,0x3fe1f326e4455442,4
+np.float64,0x800a29b4b134536a,0x800a29b4b134536a,4
+np.float64,0xe6f794b7cdef3,0xe6f794b7cdef3,4
+np.float64,0xffb5b688ce2b6d10,0x3ff924bb97ae2f6d,4
+np.float64,0x7fa74105d82e820b,0x3fd49643aaa9eee4,4
+np.float64,0x80020d15f7a41a2d,0x80020d15f7a41a2d,4
+np.float64,0x3fd6a983d5ad5308,0x3fd7a8cc8835b5b8,4
+np.float64,0x7fcd9798f03b2f31,0x3fc534c2f7bf4721,4
+np.float64,0xffdd31873a3a630e,0xbfe3171fcdffb3f7,4
+np.float64,0x80075183234ea307,0x80075183234ea307,4
+np.float64,0x82f3132505e63,0x82f3132505e63,4
+np.float64,0x3febfd9cb837fb39,0x3ff325bbf812515d,4
+np.float64,0xbfb4630fda28c620,0xbfb46e1f802ec278,4
+np.float64,0x3feeed7c89fddafa,0x3ff72c20ce5a9ee4,4
+np.float64,0x7fd3dcb3c127b967,0x40123d27ec9ec31d,4
+np.float64,0xbfe923450c72468a,0xbff00149c5742725,4
+np.float64,0x7fdef7f91abdeff1,0xbfe02ceb21f7923d,4
+np.float64,0x7fdd70d28fbae1a4,0xbfefcc5c9d10cdfd,4
+np.float64,0x800ca445a8d9488c,0x800ca445a8d9488c,4
+np.float64,0x7fec2754e1f84ea9,0x40173f6c1c97f825,4
+np.float64,0x7fcbca31f7379463,0x401e26bd2667075b,4
+np.float64,0x8003fa1d0847f43b,0x8003fa1d0847f43b,4
+np.float64,0xffe95cf85932b9f0,0xc01308e60278aa11,4
+np.float64,0x8009c53948f38a73,0x8009c53948f38a73,4
+np.float64,0x3fdcca9226b99524,0x3fdee7a008f75d41,4
+np.float64,0xbfe9ee241f33dc48,0xbff0d16bfff6c8e9,4
+np.float64,0xbfb3365058266ca0,0xbfb33f9176ebb51d,4
+np.float64,0x7fa98e10f4331c21,0x3fdee04ffd31314e,4
+np.float64,0xbfe1a11aea634236,0xbfe3a8e3d84fda38,4
+np.float64,0xbfd8df051131be0a,0xbfda342805d1948b,4
+np.float64,0x3d49a2407a935,0x3d49a2407a935,4
+np.float64,0xfc51eefff8a3e,0xfc51eefff8a3e,4
+np.float64,0xda63950bb4c73,0xda63950bb4c73,4
+np.float64,0x80050f3d4fea1e7b,0x80050f3d4fea1e7b,4
+np.float64,0x3fcdbd6e453b7ae0,0x3fce497478c28e77,4
+np.float64,0x7ebd4932fd7aa,0x7ebd4932fd7aa,4
+np.float64,0x7fa3904eac27209c,0xc0015f3125efc151,4
+np.float64,0x7fc59f956b2b3f2a,0xc00c012e7a2c281f,4
+np.float64,0xbfd436d716a86dae,0xbfd4ea13533a942b,4
+np.float64,0x9347ae3d268f6,0x9347ae3d268f6,4
+np.float64,0xffd001764d2002ec,0xbffab3462e515623,4
+np.float64,0x3fe6f406662de80d,0x3febe9bac3954999,4
+np.float64,0x3f943ecaf8287d96,0x3f943f77dee5e77f,4
+np.float64,0x3fd6250efcac4a1c,0x3fd712afa947d56f,4
+np.float64,0xbfe849ff777093ff,0xbfee5b089d03391f,4
+np.float64,0xffd3b8ef8f2771e0,0x4000463ff7f29214,4
+np.float64,0xbfc3bae9252775d4,0xbfc3e34c133f1933,4
+np.float64,0xbfea93943df52728,0xbff18355e4fc341d,4
+np.float64,0x3fc4d922ad29b245,0x3fc508d66869ef29,4
+np.float64,0x4329694a8652e,0x4329694a8652e,4
+np.float64,0x8834f1a71069e,0x8834f1a71069e,4
+np.float64,0xe0e5be8dc1cb8,0xe0e5be8dc1cb8,4
+np.float64,0x7fef4d103afe9a1f,0xc0047b88b94554fe,4
+np.float64,0x3fe9b57af4f36af6,0x3ff0963831d51c3f,4
+np.float64,0x3fe081e2fa6103c6,0x3fe22572e41be655,4
+np.float64,0x3fd78cf7b42f19ef,0x3fd8acafa1ad776a,4
+np.float64,0x7fbffd58d43ffab1,0x3fb16092c7de6036,4
+np.float64,0xbfe1e8bfae23d180,0xbfe40c1c6277dd52,4
+np.float64,0x800a9f59fb153eb4,0x800a9f59fb153eb4,4
+np.float64,0xffebe14e33b7c29c,0x3fe0ec532f4deedd,4
+np.float64,0xffc36ca00426d940,0xc000806a712d6e83,4
+np.float64,0xbfcc2be82d3857d0,0xbfcca2a7d372ec64,4
+np.float64,0x800c03b908780772,0x800c03b908780772,4
+np.float64,0xf315a64be62b5,0xf315a64be62b5,4
+np.float64,0xbfe644043cec8808,0xbfeab974d3dc6d80,4
+np.float64,0x3fedb7de3cbb6fbc,0x3ff56549a5acd324,4
+np.float64,0xbfb1a875522350e8,0xbfb1afa41dee338d,4
+np.float64,0xffee8d4a407d1a94,0x3fead1749a636ff6,4
+np.float64,0x8004061c13080c39,0x8004061c13080c39,4
+np.float64,0x3fe650ae7feca15c,0x3feacefb8bc25f64,4
+np.float64,0x3fda8340e6b50682,0x3fdc24275cab1df8,4
+np.float64,0x8009084344321087,0x8009084344321087,4
+np.float64,0x7fdd19cb823a3396,0xbfd1d8fb35d89e3f,4
+np.float64,0xbfe893172571262e,0xbfeee716b592b93c,4
+np.float64,0x8ff5acc11fec,0x8ff5acc11fec,4
+np.float64,0xbfdca0c57cb9418a,0xbfdeb42465a1b59e,4
+np.float64,0xffd77bd2a3aef7a6,0x4012cd69e85b82d8,4
+np.float64,0xbfe6ea78982dd4f1,0xbfebd8ec61fb9e1f,4
+np.float64,0x7fe14b1d80a2963a,0xc02241642102cf71,4
+np.float64,0x3fe712bf286e257e,0x3fec20012329a7fb,4
+np.float64,0x7fcb6fa4d636df49,0x400b899d14a886b3,4
+np.float64,0x3fb82cb39a305960,0x3fb83f29c5f0822e,4
+np.float64,0x7fed694c8b3ad298,0xbfe2724373c69808,4
+np.float64,0xbfcd21229f3a4244,0xbfcda497fc3e1245,4
+np.float64,0x564d3770ac9a8,0x564d3770ac9a8,4
+np.float64,0xf4409e13e8814,0xf4409e13e8814,4
+np.float64,0x80068dca9a8d1b96,0x80068dca9a8d1b96,4
+np.float64,0xbfe13f82afe27f06,0xbfe3236ddded353f,4
+np.float64,0x80023f8114647f03,0x80023f8114647f03,4
+np.float64,0xeafba7dfd5f75,0xeafba7dfd5f75,4
+np.float64,0x3feca74ddeb94e9c,0x3ff3f95dcce5a227,4
+np.float64,0x10000000000000,0x10000000000000,4
+np.float64,0xbfebdb4141f7b682,0xbff2fc29823ac64a,4
+np.float64,0xbfcd75ee2f3aebdc,0xbfcdfdfd87cc6a29,4
+np.float64,0x7fc010cda420219a,0x3fae4ca2cf1f2657,4
+np.float64,0x1a90209e35205,0x1a90209e35205,4
+np.float64,0x8008057d01900afa,0x8008057d01900afa,4
+np.float64,0x3f9cb5f280396be5,0x3f9cb7dfb4e4be4e,4
+np.float64,0xffe1bbb60b63776c,0xc00011b1ffcb2561,4
+np.float64,0xffda883f6fb5107e,0x4044238ef4e2a198,4
+np.float64,0x3fc07c0b4a20f817,0x3fc09387de9eebcf,4
+np.float64,0x8003a9ebc0c753d8,0x8003a9ebc0c753d8,4
+np.float64,0x1d7fd5923affc,0x1d7fd5923affc,4
+np.float64,0xbfe9cd8cf9b39b1a,0xbff0af43e567ba4a,4
+np.float64,0x11285cb42250c,0x11285cb42250c,4
+np.float64,0xffe81ae1ccb035c3,0xbfe038be7eb563a6,4
+np.float64,0xbfe56473b1eac8e8,0xbfe94654d8ab9e75,4
+np.float64,0x3fee904619fd208c,0x3ff69e198152fe17,4
+np.float64,0xbfeeb9a2cbfd7346,0xbff6dc8d96da78cd,4
+np.float64,0x8006cdfa59ed9bf5,0x8006cdfa59ed9bf5,4
+np.float64,0x8008f2366d31e46d,0x8008f2366d31e46d,4
+np.float64,0x8008d5f91e31abf3,0x8008d5f91e31abf3,4
+np.float64,0x3fe85886f8b0b10e,0x3fee76af16f5a126,4
+np.float64,0x3fefb9b2b73f7365,0x3ff8745128fa3e3b,4
+np.float64,0x7fdf3e721f3e7ce3,0xbfb19381541ca2a8,4
+np.float64,0x3fd2768c41a4ed18,0x3fd2fe2f85a3f3a6,4
+np.float64,0xbfcabe3c6a357c78,0xbfcb239fb88bc260,4
+np.float64,0xffdffb6a3dbff6d4,0xbff7af4759fd557c,4
+np.float64,0x800817f75f302fef,0x800817f75f302fef,4
+np.float64,0xbfe6a1d1762d43a3,0xbfeb5a399a095ef3,4
+np.float64,0x7fd6f32f912de65e,0x40016dedc51aabd0,4
+np.float64,0x3fc6cb26652d964d,0x3fc7099f047d924a,4
+np.float64,0x3fe8b975d67172ec,0x3fef31946123c0e7,4
+np.float64,0xffe44a09d1e89413,0x3fdee9e5eac6e540,4
+np.float64,0xbfece76d4cb9cedb,0xbff44c34849d07ba,4
+np.float64,0x7feb76027036ec04,0x3fe08595a5e263ac,4
+np.float64,0xffe194f591a329ea,0x3fbe5bd626400a70,4
+np.float64,0xbfc170698122e0d4,0xbfc18c3de8b63565,4
+np.float64,0x3fc82b2c0f305658,0x3fc875c3b5fbcd08,4
+np.float64,0x3fd5015634aa02ac,0x3fd5cb1df07213c3,4
+np.float64,0x7fe640884b6c8110,0xbff66255a420abb5,4
+np.float64,0x5a245206b448b,0x5a245206b448b,4
+np.float64,0xffe9d9fa2f73b3f4,0xc0272b0dd34ab9bf,4
+np.float64,0x3fd990e8aab321d0,0x3fdb04cd3a29bcc3,4
+np.float64,0xde9dda8bbd3bc,0xde9dda8bbd3bc,4
+np.float64,0xbfe81b32b4703666,0xbfee029937fa9f5a,4
+np.float64,0xbfe68116886d022d,0xbfeb21c62081cb73,4
+np.float64,0x3fb8da191231b432,0x3fb8ee28c71507d3,4
+np.float64,0x3fb111395a222273,0x3fb117b57de3dea4,4
+np.float64,0xffbafadc6a35f5b8,0x3ffcc6d2370297b9,4
+np.float64,0x8002ca475b05948f,0x8002ca475b05948f,4
+np.float64,0xbfeafef57875fdeb,0xbff1fb1315676f24,4
+np.float64,0x7fcda427d73b484f,0xbff9f70212694d17,4
+np.float64,0xffe2517b3ba4a2f6,0xc029ca6707305bf4,4
+np.float64,0x7fc5ee156b2bdc2a,0xbff8384b59e9056e,4
+np.float64,0xbfec22af3278455e,0xbff3530fe25816b4,4
+np.float64,0x6b5a8c2cd6b52,0x6b5a8c2cd6b52,4
+np.float64,0xffdaf6c4b935ed8a,0x4002f00ce58affcf,4
+np.float64,0x800a41813c748303,0x800a41813c748303,4
+np.float64,0xbfd09a1269213424,0xbfd0fc0a0c5de8eb,4
+np.float64,0x7fa2cb74d42596e9,0x3fc3d40e000fa69d,4
+np.float64,0x7ff8000000000000,0x7ff8000000000000,4
+np.float64,0x3fbfbf8ed63f7f1e,0x3fbfe97bcad9f53a,4
+np.float64,0x7fe0ebba65a1d774,0x401b0f17b28618df,4
+np.float64,0x3fd02c3a25a05874,0x3fd086aa55b19c9c,4
+np.float64,0xec628f95d8c52,0xec628f95d8c52,4
+np.float64,0x3fd319329fa63264,0x3fd3afb04e0dec63,4
+np.float64,0x180e0ade301c2,0x180e0ade301c2,4
+np.float64,0xbfe8d78324f1af06,0xbfef6c66153064ee,4
+np.float64,0xffb89fa200313f48,0xbfeb96ff2d9358dc,4
+np.float64,0x7fe6abcf86ed579e,0xc0269f4de86365ec,4
+np.float64,0x7fdff8cd65bff19a,0xbfd0f7c6b9052c9a,4
+np.float64,0xbfd2e3a53d25c74a,0xbfd37520cda5f6b2,4
+np.float64,0x7fe844b096708960,0x3ff696a6182e5a7a,4
+np.float64,0x7fdce0c7a3b9c18e,0x3fd42875d69ed379,4
+np.float64,0xffba5a91cc34b520,0x4001b571e8991951,4
+np.float64,0xffe78fe4a6ef1fc9,0x3ff4507b31f5b3bc,4
+np.float64,0xbfd7047493ae08ea,0xbfd810618a53fffb,4
+np.float64,0xc6559def8cab4,0xc6559def8cab4,4
+np.float64,0x3fe75d67a76ebacf,0x3feca56817de65e4,4
+np.float64,0xffd24adbd6a495b8,0xc012c491addf2df5,4
+np.float64,0x7fed35e28dba6bc4,0x403a0fa555ff7ec6,4
+np.float64,0x80078c4afa0f1897,0x80078c4afa0f1897,4
+np.float64,0xa6ec39114dd87,0xa6ec39114dd87,4
+np.float64,0x7fb1bd33ba237a66,0x4010092bb6810fd4,4
+np.float64,0x800ecf215edd9e43,0x800ecf215edd9e43,4
+np.float64,0x3fb7c169242f82d2,0x3fb7d2ed30c462e6,4
+np.float64,0xbf71b46d60236900,0xbf71b4749a10c112,4
+np.float64,0x800d7851787af0a3,0x800d7851787af0a3,4
+np.float64,0x3fcb4a45e7369488,0x3fcbb61701a1bcec,4
+np.float64,0x3fd4e3682429c6d0,0x3fd5a9bcb916eb94,4
+np.float64,0x800497564c292ead,0x800497564c292ead,4
+np.float64,0xbfca3737a1346e70,0xbfca96a86ae5d687,4
+np.float64,0x19aa87e03356,0x19aa87e03356,4
+np.float64,0xffb2593fe624b280,0xc05fedb99b467ced,4
+np.float64,0xbfdd8748fbbb0e92,0xbfdfd1a7df17252c,4
+np.float64,0x8004c7afc7098f60,0x8004c7afc7098f60,4
+np.float64,0x7fde48b2bf3c9164,0xbfe36ef1158ed420,4
+np.float64,0xbfec8e0eb0f91c1d,0xbff3d9319705a602,4
+np.float64,0xffea1be204f437c3,0xc0144f67298c3e6f,4
+np.float64,0x7fdb906b593720d6,0xbfce99233396eda7,4
+np.float64,0x3fef0f114ffe1e22,0x3ff76072a258a51b,4
+np.float64,0x3fe3e284c8e7c50a,0x3fe6e9b05e17c999,4
+np.float64,0xbfbda9eef23b53e0,0xbfbdcc1abb443597,4
+np.float64,0x3feb6454d4f6c8aa,0x3ff26f65a85baba4,4
+np.float64,0x3fea317439f462e8,0x3ff118e2187ef33f,4
+np.float64,0x376ad0646ed5b,0x376ad0646ed5b,4
+np.float64,0x7fdd461a1c3a8c33,0x3f7ba20fb79e785f,4
+np.float64,0xebc520a3d78a4,0xebc520a3d78a4,4
+np.float64,0x3fca90fe53352200,0x3fcaf45c7fae234d,4
+np.float64,0xbfe80dd1de701ba4,0xbfede97e12cde9de,4
+np.float64,0x3fd242b00ea48560,0x3fd2c5cf9bf69a31,4
+np.float64,0x7fe46c057828d80a,0xbfe2f76837488f94,4
+np.float64,0x3fc162bea322c580,0x3fc17e517c958867,4
+np.float64,0xffebf0452ff7e08a,0x3ffc3fd95c257b54,4
+np.float64,0xffd88043c6310088,0x4008b05598d0d95f,4
+np.float64,0x800d8c49da5b1894,0x800d8c49da5b1894,4
+np.float64,0xbfed33b487ba6769,0xbff4b0ea941f8a6a,4
+np.float64,0x16b881e22d711,0x16b881e22d711,4
+np.float64,0x288bae0051177,0x288bae0051177,4
+np.float64,0xffc83a0fe8307420,0x4006eff03da17f86,4
+np.float64,0x3fc7868b252f0d18,0x3fc7cb4954290324,4
+np.float64,0xbfe195514b232aa2,0xbfe398aae6c8ed76,4
+np.float64,0x800c001ae7f80036,0x800c001ae7f80036,4
+np.float64,0x7feb82abe7370557,0xbff1e13fe6fad23c,4
+np.float64,0xffecf609cdf9ec13,0xc0112aa1805ae59e,4
+np.float64,0xffddd654f63bacaa,0x3fe46cce899f710d,4
+np.float64,0x3fe2163138642c62,0x3fe44b9c760acd4c,4
+np.float64,0x4e570dc09cae2,0x4e570dc09cae2,4
+np.float64,0x7fe9e8d091f3d1a0,0xc000fe20f8e9a4b5,4
+np.float64,0x7fe60042952c0084,0x3fd0aa740f394c2a,4
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-tanh.csv b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-tanh.csv
new file mode 100644
index 00000000..9e3ddc60
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/data/umath-validation-set-tanh.csv
@@ -0,0 +1,1429 @@
+dtype,input,output,ulperrortol
+np.float32,0xbe26ebb0,0xbe25752f,2
+np.float32,0xbe22ecc0,0xbe219054,2
+np.float32,0x8010a6b3,0x8010a6b3,2
+np.float32,0x3135da,0x3135da,2
+np.float32,0xbe982afc,0xbe93d727,2
+np.float32,0x16a51f,0x16a51f,2
+np.float32,0x491e56,0x491e56,2
+np.float32,0x4bf7ca,0x4bf7ca,2
+np.float32,0x3eebc21c,0x3edc65b2,2
+np.float32,0x80155c94,0x80155c94,2
+np.float32,0x3e14f626,0x3e13eb6a,2
+np.float32,0x801a238f,0x801a238f,2
+np.float32,0xbde33a80,0xbde24cf9,2
+np.float32,0xbef8439c,0xbee67a51,2
+np.float32,0x7f60d0a5,0x3f800000,2
+np.float32,0x190ee3,0x190ee3,2
+np.float32,0x80759113,0x80759113,2
+np.float32,0x800afa9f,0x800afa9f,2
+np.float32,0x7110cf,0x7110cf,2
+np.float32,0x3cf709f0,0x3cf6f6c6,2
+np.float32,0x3ef58da4,0x3ee44fa7,2
+np.float32,0xbf220ff2,0xbf0f662c,2
+np.float32,0xfd888078,0xbf800000,2
+np.float32,0xbe324734,0xbe307f9b,2
+np.float32,0x3eb5cb4f,0x3eae8560,2
+np.float32,0xbf7e7d02,0xbf425493,2
+np.float32,0x3ddcdcf0,0x3ddc02c2,2
+np.float32,0x8026d27a,0x8026d27a,2
+np.float32,0x3d4c0fb1,0x3d4be484,2
+np.float32,0xbf27d2c9,0xbf134d7c,2
+np.float32,0x8029ff80,0x8029ff80,2
+np.float32,0x7f046d2c,0x3f800000,2
+np.float32,0x13f94b,0x13f94b,2
+np.float32,0x7f4ff922,0x3f800000,2
+np.float32,0x3f4ea2ed,0x3f2b03e4,2
+np.float32,0x3e7211f0,0x3e6da8cf,2
+np.float32,0x7f39d0cf,0x3f800000,2
+np.float32,0xfee57fc6,0xbf800000,2
+np.float32,0xff6fb326,0xbf800000,2
+np.float32,0xff800000,0xbf800000,2
+np.float32,0x3f0437a4,0x3ef32fcd,2
+np.float32,0xff546d1e,0xbf800000,2
+np.float32,0x3eb5645b,0x3eae2a5c,2
+np.float32,0x3f08a6e5,0x3ef9ff8f,2
+np.float32,0x80800000,0x80800000,2
+np.float32,0x7f3413da,0x3f800000,2
+np.float32,0xfd760140,0xbf800000,2
+np.float32,0x7f3ad24a,0x3f800000,2
+np.float32,0xbf56e812,0xbf2f7f14,2
+np.float32,0xbece0338,0xbec3920a,2
+np.float32,0xbeede54a,0xbede22ae,2
+np.float32,0x7eaeb215,0x3f800000,2
+np.float32,0x3c213c00,0x3c213aab,2
+np.float32,0x7eaac217,0x3f800000,2
+np.float32,0xbf2f740e,0xbf1851a6,2
+np.float32,0x7f6ca5b8,0x3f800000,2
+np.float32,0xff42ce95,0xbf800000,2
+np.float32,0x802e4189,0x802e4189,2
+np.float32,0x80000001,0x80000001,2
+np.float32,0xbf31f298,0xbf19ebbe,2
+np.float32,0x3dcb0e6c,0x3dca64c1,2
+np.float32,0xbf29599c,0xbf145204,2
+np.float32,0x2e33f2,0x2e33f2,2
+np.float32,0x1c11e7,0x1c11e7,2
+np.float32,0x3f3b188d,0x3f1fa302,2
+np.float32,0x113300,0x113300,2
+np.float32,0x8054589e,0x8054589e,2
+np.float32,0x2a9e69,0x2a9e69,2
+np.float32,0xff513af7,0xbf800000,2
+np.float32,0x7f2e987a,0x3f800000,2
+np.float32,0x807cd426,0x807cd426,2
+np.float32,0x7f0dc4e4,0x3f800000,2
+np.float32,0x7e7c0d56,0x3f800000,2
+np.float32,0x5cb076,0x5cb076,2
+np.float32,0x80576426,0x80576426,2
+np.float32,0xff616222,0xbf800000,2
+np.float32,0xbf7accb5,0xbf40c005,2
+np.float32,0xfe4118c8,0xbf800000,2
+np.float32,0x804b9327,0x804b9327,2
+np.float32,0x3ed2b428,0x3ec79026,2
+np.float32,0x3f4a048f,0x3f286d41,2
+np.float32,0x800000,0x800000,2
+np.float32,0x7efceb9f,0x3f800000,2
+np.float32,0xbf5fe2d3,0xbf34246f,2
+np.float32,0x807e086a,0x807e086a,2
+np.float32,0x7ef5e856,0x3f800000,2
+np.float32,0xfc546f00,0xbf800000,2
+np.float32,0x3a65b890,0x3a65b88c,2
+np.float32,0x800cfa70,0x800cfa70,2
+np.float32,0x80672ea7,0x80672ea7,2
+np.float32,0x3f2bf3f2,0x3f160a12,2
+np.float32,0xbf0ab67e,0xbefd2004,2
+np.float32,0x3f2a0bb4,0x3f14c824,2
+np.float32,0xbeff5374,0xbeec12d7,2
+np.float32,0xbf221b58,0xbf0f6dff,2
+np.float32,0x7cc1f3,0x7cc1f3,2
+np.float32,0x7f234e3c,0x3f800000,2
+np.float32,0x3f60ff10,0x3f34b37d,2
+np.float32,0xbdd957f0,0xbdd887fe,2
+np.float32,0x801ce048,0x801ce048,2
+np.float32,0x7f3a8f76,0x3f800000,2
+np.float32,0xfdd13d08,0xbf800000,2
+np.float32,0x3e9af4a4,0x3e966445,2
+np.float32,0x1e55f3,0x1e55f3,2
+np.float32,0x327905,0x327905,2
+np.float32,0xbf03cf0b,0xbef28dad,2
+np.float32,0x3f0223d3,0x3eeff4f4,2
+np.float32,0xfdd96ff8,0xbf800000,2
+np.float32,0x428db8,0x428db8,2
+np.float32,0xbd74a200,0xbd7457a5,2
+np.float32,0x2a63a3,0x2a63a3,2
+np.float32,0x7e8aa9d7,0x3f800000,2
+np.float32,0x7f50b810,0x3f800000,2
+np.float32,0xbce5ec80,0xbce5dd0d,2
+np.float32,0x54711,0x54711,2
+np.float32,0x8074212a,0x8074212a,2
+np.float32,0xbf13d0ec,0xbf0551b5,2
+np.float32,0x80217f89,0x80217f89,2
+np.float32,0x3f300824,0x3f18b12f,2
+np.float32,0x7d252462,0x3f800000,2
+np.float32,0x807a154c,0x807a154c,2
+np.float32,0x8064d4b9,0x8064d4b9,2
+np.float32,0x804543b4,0x804543b4,2
+np.float32,0x4c269e,0x4c269e,2
+np.float32,0xff39823b,0xbf800000,2
+np.float32,0x3f5040b1,0x3f2be80b,2
+np.float32,0xbf7028c1,0xbf3bfee5,2
+np.float32,0x3e94eb78,0x3e90db93,2
+np.float32,0x3ccc1b40,0x3ccc1071,2
+np.float32,0xbe8796f0,0xbe8481a1,2
+np.float32,0xfc767bc0,0xbf800000,2
+np.float32,0xbdd81ed0,0xbdd75259,2
+np.float32,0xbed31bfc,0xbec7e82d,2
+np.float32,0xbf350a9e,0xbf1be1c6,2
+np.float32,0x33d41f,0x33d41f,2
+np.float32,0x3f73e076,0x3f3db0b5,2
+np.float32,0x3f800000,0x3f42f7d6,2
+np.float32,0xfee27c14,0xbf800000,2
+np.float32,0x7f6e4388,0x3f800000,2
+np.float32,0x4ea19b,0x4ea19b,2
+np.float32,0xff2d75f2,0xbf800000,2
+np.float32,0x7ee225ca,0x3f800000,2
+np.float32,0x3f31cb4b,0x3f19d2a4,2
+np.float32,0x80554a9d,0x80554a9d,2
+np.float32,0x3f4d57fa,0x3f2a4c03,2
+np.float32,0x3eac6a88,0x3ea62e72,2
+np.float32,0x773520,0x773520,2
+np.float32,0x8079c20a,0x8079c20a,2
+np.float32,0xfeb1eb94,0xbf800000,2
+np.float32,0xfe8d81c0,0xbf800000,2
+np.float32,0xfeed6902,0xbf800000,2
+np.float32,0x8066bb65,0x8066bb65,2
+np.float32,0x7f800000,0x3f800000,2
+np.float32,0x1,0x1,2
+np.float32,0x3f2c66a4,0x3f16554a,2
+np.float32,0x3cd231,0x3cd231,2
+np.float32,0x3e932a64,0x3e8f3e0c,2
+np.float32,0xbf3ab1c3,0xbf1f6420,2
+np.float32,0xbc902b20,0xbc902751,2
+np.float32,0x7dac0a5b,0x3f800000,2
+np.float32,0x3f2b7e06,0x3f15bc93,2
+np.float32,0x75de0,0x75de0,2
+np.float32,0x8020b7bc,0x8020b7bc,2
+np.float32,0x3f257cda,0x3f11bb6b,2
+np.float32,0x807480e5,0x807480e5,2
+np.float32,0xfe00d758,0xbf800000,2
+np.float32,0xbd9b54e0,0xbd9b08cd,2
+np.float32,0x4dfbe3,0x4dfbe3,2
+np.float32,0xff645788,0xbf800000,2
+np.float32,0xbe92c80a,0xbe8ee360,2
+np.float32,0x3eb9b400,0x3eb1f77c,2
+np.float32,0xff20b69c,0xbf800000,2
+np.float32,0x623c28,0x623c28,2
+np.float32,0xff235748,0xbf800000,2
+np.float32,0xbf3bbc56,0xbf2006f3,2
+np.float32,0x7e6f78b1,0x3f800000,2
+np.float32,0x7e1584e9,0x3f800000,2
+np.float32,0xff463423,0xbf800000,2
+np.float32,0x8002861e,0x8002861e,2
+np.float32,0xbf0491d8,0xbef3bb6a,2
+np.float32,0x7ea3bc17,0x3f800000,2
+np.float32,0xbedde7ea,0xbed0fb49,2
+np.float32,0xbf4bac48,0xbf295c8b,2
+np.float32,0xff28e276,0xbf800000,2
+np.float32,0x7e8f3bf5,0x3f800000,2
+np.float32,0xbf0a4a73,0xbefc7c9d,2
+np.float32,0x7ec5bd96,0x3f800000,2
+np.float32,0xbf4c22e8,0xbf299f2c,2
+np.float32,0x3e3970a0,0x3e377064,2
+np.float32,0x3ecb1118,0x3ec10c88,2
+np.float32,0xff548a7a,0xbf800000,2
+np.float32,0xfe8ec550,0xbf800000,2
+np.float32,0x3e158985,0x3e147bb2,2
+np.float32,0x7eb79ad7,0x3f800000,2
+np.float32,0xbe811384,0xbe7cd1ab,2
+np.float32,0xbdc4b9e8,0xbdc41f94,2
+np.float32,0xe0fd5,0xe0fd5,2
+np.float32,0x3f2485f2,0x3f11142b,2
+np.float32,0xfdd3c3d8,0xbf800000,2
+np.float32,0xfe8458e6,0xbf800000,2
+np.float32,0x3f06e398,0x3ef74dd8,2
+np.float32,0xff4752cf,0xbf800000,2
+np.float32,0x6998e3,0x6998e3,2
+np.float32,0x626751,0x626751,2
+np.float32,0x806631d6,0x806631d6,2
+np.float32,0xbf0c3cf4,0xbeff6c54,2
+np.float32,0x802860f8,0x802860f8,2
+np.float32,0xff2952cb,0xbf800000,2
+np.float32,0xff31d40b,0xbf800000,2
+np.float32,0x7c389473,0x3f800000,2
+np.float32,0x3dcd2f1b,0x3dcc8010,2
+np.float32,0x3d70c29f,0x3d707bbc,2
+np.float32,0x3f6bd386,0x3f39f979,2
+np.float32,0x1efec9,0x1efec9,2
+np.float32,0x3f675518,0x3f37d338,2
+np.float32,0x5fdbe3,0x5fdbe3,2
+np.float32,0x5d684e,0x5d684e,2
+np.float32,0xbedfe748,0xbed2a4c7,2
+np.float32,0x3f0cb07a,0x3f000cdc,2
+np.float32,0xbf77151e,0xbf3f1f5d,2
+np.float32,0x7f038ea0,0x3f800000,2
+np.float32,0x3ea91be9,0x3ea3376f,2
+np.float32,0xbdf20738,0xbdf0e861,2
+np.float32,0x807ea380,0x807ea380,2
+np.float32,0x2760ca,0x2760ca,2
+np.float32,0x7f20a544,0x3f800000,2
+np.float32,0x76ed83,0x76ed83,2
+np.float32,0x15a441,0x15a441,2
+np.float32,0x74c76d,0x74c76d,2
+np.float32,0xff3d5c2a,0xbf800000,2
+np.float32,0x7f6a76a6,0x3f800000,2
+np.float32,0x3eb87067,0x3eb0dabe,2
+np.float32,0xbf515cfa,0xbf2c83af,2
+np.float32,0xbdececc0,0xbdebdf9d,2
+np.float32,0x7f51b7c2,0x3f800000,2
+np.float32,0x3eb867ac,0x3eb0d30d,2
+np.float32,0xff50fd84,0xbf800000,2
+np.float32,0x806945e9,0x806945e9,2
+np.float32,0x298eed,0x298eed,2
+np.float32,0x441f53,0x441f53,2
+np.float32,0x8066d4b0,0x8066d4b0,2
+np.float32,0x3f6a479c,0x3f393dae,2
+np.float32,0xbf6ce2a7,0xbf3a7921,2
+np.float32,0x8064c3cf,0x8064c3cf,2
+np.float32,0xbf2d8146,0xbf170dfd,2
+np.float32,0x3b0e82,0x3b0e82,2
+np.float32,0xbea97574,0xbea387dc,2
+np.float32,0x67ad15,0x67ad15,2
+np.float32,0xbf68478f,0xbf38485a,2
+np.float32,0xff6f593b,0xbf800000,2
+np.float32,0xbeda26f2,0xbecdd806,2
+np.float32,0xbd216d50,0xbd2157ee,2
+np.float32,0x7a8544db,0x3f800000,2
+np.float32,0x801df20b,0x801df20b,2
+np.float32,0xbe14ba24,0xbe13b0a8,2
+np.float32,0xfdc6d8a8,0xbf800000,2
+np.float32,0x1d6b49,0x1d6b49,2
+np.float32,0x7f5ff1b8,0x3f800000,2
+np.float32,0x3f75e032,0x3f3e9625,2
+np.float32,0x7f2c5687,0x3f800000,2
+np.float32,0x3d95fb6c,0x3d95b6ee,2
+np.float32,0xbea515e4,0xbe9f97c8,2
+np.float32,0x7f2b2cd7,0x3f800000,2
+np.float32,0x3f076f7a,0x3ef8241e,2
+np.float32,0x5178ca,0x5178ca,2
+np.float32,0xbeb5976a,0xbeae5781,2
+np.float32,0x3e3c3563,0x3e3a1e13,2
+np.float32,0xbd208530,0xbd20702a,2
+np.float32,0x3eb03b04,0x3ea995ef,2
+np.float32,0x17fb9c,0x17fb9c,2
+np.float32,0xfca68e40,0xbf800000,2
+np.float32,0xbf5e7433,0xbf336a9f,2
+np.float32,0xff5b8d3d,0xbf800000,2
+np.float32,0x8003121d,0x8003121d,2
+np.float32,0xbe6dd344,0xbe69a3b0,2
+np.float32,0x67cc4,0x67cc4,2
+np.float32,0x9b01d,0x9b01d,2
+np.float32,0x127c13,0x127c13,2
+np.float32,0xfea5e3d6,0xbf800000,2
+np.float32,0xbdf5c610,0xbdf499c1,2
+np.float32,0x3aff4c00,0x3aff4beb,2
+np.float32,0x3b00afd0,0x3b00afc5,2
+np.float32,0x479618,0x479618,2
+np.float32,0x801cbd05,0x801cbd05,2
+np.float32,0x3ec9249f,0x3ebf6579,2
+np.float32,0x3535c4,0x3535c4,2
+np.float32,0xbeb4f662,0xbeadc915,2
+np.float32,0x8006fda6,0x8006fda6,2
+np.float32,0xbf4f3097,0xbf2b5239,2
+np.float32,0xbf3cb9a8,0xbf20a0e9,2
+np.float32,0x32ced0,0x32ced0,2
+np.float32,0x7ea34e76,0x3f800000,2
+np.float32,0x80063046,0x80063046,2
+np.float32,0x80727e8b,0x80727e8b,2
+np.float32,0xfd6b5780,0xbf800000,2
+np.float32,0x80109815,0x80109815,2
+np.float32,0xfdcc8a78,0xbf800000,2
+np.float32,0x81562,0x81562,2
+np.float32,0x803dfacc,0x803dfacc,2
+np.float32,0xbe204318,0xbe1ef75f,2
+np.float32,0xbf745d34,0xbf3de8e2,2
+np.float32,0xff13fdcc,0xbf800000,2
+np.float32,0x7f75ba8c,0x3f800000,2
+np.float32,0x806c04b4,0x806c04b4,2
+np.float32,0x3ec61ca6,0x3ebcc877,2
+np.float32,0xbeaea984,0xbea8301f,2
+np.float32,0xbf4dcd0e,0xbf2a8d34,2
+np.float32,0x802a01d3,0x802a01d3,2
+np.float32,0xbf747be5,0xbf3df6ad,2
+np.float32,0xbf75cbd2,0xbf3e8d0f,2
+np.float32,0x7db86576,0x3f800000,2
+np.float32,0xff49a2c3,0xbf800000,2
+np.float32,0xbedc5314,0xbecfa978,2
+np.float32,0x8078877b,0x8078877b,2
+np.float32,0xbead4824,0xbea6f499,2
+np.float32,0xbf3926e3,0xbf1e716c,2
+np.float32,0x807f4a1c,0x807f4a1c,2
+np.float32,0x7f2cd8fd,0x3f800000,2
+np.float32,0x806cfcca,0x806cfcca,2
+np.float32,0xff1aa048,0xbf800000,2
+np.float32,0x7eb9ea08,0x3f800000,2
+np.float32,0xbf1034bc,0xbf02ab3a,2
+np.float32,0xbd087830,0xbd086b44,2
+np.float32,0x7e071034,0x3f800000,2
+np.float32,0xbefcc9de,0xbeea122f,2
+np.float32,0x80796d7a,0x80796d7a,2
+np.float32,0x33ce46,0x33ce46,2
+np.float32,0x8074a783,0x8074a783,2
+np.float32,0xbe95a56a,0xbe918691,2
+np.float32,0xbf2ff3f4,0xbf18a42d,2
+np.float32,0x1633e9,0x1633e9,2
+np.float32,0x7f0f104b,0x3f800000,2
+np.float32,0xbf800000,0xbf42f7d6,2
+np.float32,0x3d2cd6,0x3d2cd6,2
+np.float32,0xfed43e16,0xbf800000,2
+np.float32,0x3ee6faec,0x3ed87d2c,2
+np.float32,0x3f2c32d0,0x3f163352,2
+np.float32,0xff4290c0,0xbf800000,2
+np.float32,0xbf66500e,0xbf37546a,2
+np.float32,0x7dfb8fe3,0x3f800000,2
+np.float32,0x3f20ba5d,0x3f0e7b16,2
+np.float32,0xff30c7ae,0xbf800000,2
+np.float32,0x1728a4,0x1728a4,2
+np.float32,0x340d82,0x340d82,2
+np.float32,0xff7870b7,0xbf800000,2
+np.float32,0xbeac6ac4,0xbea62ea7,2
+np.float32,0xbef936fc,0xbee73c36,2
+np.float32,0x3ec7e12c,0x3ebe4ef8,2
+np.float32,0x80673488,0x80673488,2
+np.float32,0xfdf14c90,0xbf800000,2
+np.float32,0x3f182568,0x3f08726e,2
+np.float32,0x7ed7dcd0,0x3f800000,2
+np.float32,0x3de4da34,0x3de3e790,2
+np.float32,0xff7fffff,0xbf800000,2
+np.float32,0x4ff90c,0x4ff90c,2
+np.float32,0x3efb0d1c,0x3ee8b1d6,2
+np.float32,0xbf66e952,0xbf379ef4,2
+np.float32,0xba9dc,0xba9dc,2
+np.float32,0xff67c766,0xbf800000,2
+np.float32,0x7f1ffc29,0x3f800000,2
+np.float32,0x3f51c906,0x3f2cbe99,2
+np.float32,0x3f2e5792,0x3f179968,2
+np.float32,0x3ecb9750,0x3ec17fa0,2
+np.float32,0x7f3fcefc,0x3f800000,2
+np.float32,0xbe4e30fc,0xbe4b72f9,2
+np.float32,0x7e9bc4ce,0x3f800000,2
+np.float32,0x7e70aa1f,0x3f800000,2
+np.float32,0x14c6e9,0x14c6e9,2
+np.float32,0xbcf327c0,0xbcf3157a,2
+np.float32,0xff1fd204,0xbf800000,2
+np.float32,0x7d934a03,0x3f800000,2
+np.float32,0x8028bf1e,0x8028bf1e,2
+np.float32,0x7f0800b7,0x3f800000,2
+np.float32,0xfe04825c,0xbf800000,2
+np.float32,0x807210ac,0x807210ac,2
+np.float32,0x3f7faf7c,0x3f42d5fd,2
+np.float32,0x3e04a543,0x3e03e899,2
+np.float32,0x3e98ea15,0x3e94863e,2
+np.float32,0x3d2a2e48,0x3d2a153b,2
+np.float32,0x7fa00000,0x7fe00000,2
+np.float32,0x20a488,0x20a488,2
+np.float32,0x3f6ba86a,0x3f39e51a,2
+np.float32,0x0,0x0,2
+np.float32,0x3e892ddd,0x3e85fcfe,2
+np.float32,0x3e2da627,0x3e2c00e0,2
+np.float32,0xff000a50,0xbf800000,2
+np.float32,0x3eb749f4,0x3eafd739,2
+np.float32,0x8024c0ae,0x8024c0ae,2
+np.float32,0xfc8f3b40,0xbf800000,2
+np.float32,0xbf685fc7,0xbf385405,2
+np.float32,0x3f1510e6,0x3f063a4f,2
+np.float32,0x3f68e8ad,0x3f3895d8,2
+np.float32,0x3dba8608,0x3dba0271,2
+np.float32,0xbf16ea10,0xbf079017,2
+np.float32,0xb3928,0xb3928,2
+np.float32,0xfe447c00,0xbf800000,2
+np.float32,0x3db9cd57,0x3db94b45,2
+np.float32,0x803b66b0,0x803b66b0,2
+np.float32,0x805b5e02,0x805b5e02,2
+np.float32,0x7ec93f61,0x3f800000,2
+np.float32,0x8005a126,0x8005a126,2
+np.float32,0x6d8888,0x6d8888,2
+np.float32,0x3e21b7de,0x3e206314,2
+np.float32,0xbec9c31e,0xbebfedc2,2
+np.float32,0xbea88aa8,0xbea2b4e5,2
+np.float32,0x3d8fc310,0x3d8f86bb,2
+np.float32,0xbf3cc68a,0xbf20a8b8,2
+np.float32,0x432690,0x432690,2
+np.float32,0xbe51d514,0xbe4ef1a3,2
+np.float32,0xbcda6d20,0xbcda5fe1,2
+np.float32,0xfe24e458,0xbf800000,2
+np.float32,0xfedc8c14,0xbf800000,2
+np.float32,0x7f7e9bd4,0x3f800000,2
+np.float32,0x3ebcc880,0x3eb4ab44,2
+np.float32,0xbe0aa490,0xbe09cd44,2
+np.float32,0x3dc9158c,0x3dc870c3,2
+np.float32,0x3e5c319e,0x3e58dc90,2
+np.float32,0x1d4527,0x1d4527,2
+np.float32,0x2dbf5,0x2dbf5,2
+np.float32,0xbf1f121f,0xbf0d5534,2
+np.float32,0x7e3e9ab5,0x3f800000,2
+np.float32,0x7f74b5c1,0x3f800000,2
+np.float32,0xbf6321ba,0xbf35c42b,2
+np.float32,0xbe5c7488,0xbe591c79,2
+np.float32,0x7e7b02cd,0x3f800000,2
+np.float32,0xfe7cbfa4,0xbf800000,2
+np.float32,0xbeace360,0xbea69a86,2
+np.float32,0x7e149b00,0x3f800000,2
+np.float32,0xbf61a700,0xbf35079a,2
+np.float32,0x7eb592a7,0x3f800000,2
+np.float32,0x3f2105e6,0x3f0eaf30,2
+np.float32,0xfd997a88,0xbf800000,2
+np.float32,0xff5d093b,0xbf800000,2
+np.float32,0x63aede,0x63aede,2
+np.float32,0x6907ee,0x6907ee,2
+np.float32,0xbf7578ee,0xbf3e680f,2
+np.float32,0xfea971e8,0xbf800000,2
+np.float32,0x3f21d0f5,0x3f0f3aed,2
+np.float32,0x3a50e2,0x3a50e2,2
+np.float32,0x7f0f5b1e,0x3f800000,2
+np.float32,0x805b9765,0x805b9765,2
+np.float32,0xbe764ab8,0xbe71a664,2
+np.float32,0x3eafac7f,0x3ea91701,2
+np.float32,0x807f4130,0x807f4130,2
+np.float32,0x7c5f31,0x7c5f31,2
+np.float32,0xbdbe0e30,0xbdbd8300,2
+np.float32,0x7ecfe4e0,0x3f800000,2
+np.float32,0xff7cb628,0xbf800000,2
+np.float32,0xff1842bc,0xbf800000,2
+np.float32,0xfd4163c0,0xbf800000,2
+np.float32,0x800e11f7,0x800e11f7,2
+np.float32,0x7f3adec8,0x3f800000,2
+np.float32,0x7f597514,0x3f800000,2
+np.float32,0xbe986e14,0xbe9414a4,2
+np.float32,0x800fa9d7,0x800fa9d7,2
+np.float32,0xff5b79c4,0xbf800000,2
+np.float32,0x80070565,0x80070565,2
+np.float32,0xbee5628e,0xbed72d60,2
+np.float32,0x3f438ef2,0x3f24b3ca,2
+np.float32,0xcda91,0xcda91,2
+np.float32,0x7e64151a,0x3f800000,2
+np.float32,0xbe95d584,0xbe91b2c7,2
+np.float32,0x8022c2a1,0x8022c2a1,2
+np.float32,0x7e7097bf,0x3f800000,2
+np.float32,0x80139035,0x80139035,2
+np.float32,0x804de2cb,0x804de2cb,2
+np.float32,0xfde5d178,0xbf800000,2
+np.float32,0x6d238,0x6d238,2
+np.float32,0x807abedc,0x807abedc,2
+np.float32,0x3f450a12,0x3f259129,2
+np.float32,0x3ef1c120,0x3ee141f2,2
+np.float32,0xfeb64dae,0xbf800000,2
+np.float32,0x8001732c,0x8001732c,2
+np.float32,0x3f76062e,0x3f3ea711,2
+np.float32,0x3eddd550,0x3ed0ebc8,2
+np.float32,0xff5ca1d4,0xbf800000,2
+np.float32,0xbf49dc5e,0xbf285673,2
+np.float32,0x7e9e5438,0x3f800000,2
+np.float32,0x7e83625e,0x3f800000,2
+np.float32,0x3f5dc41c,0x3f3310da,2
+np.float32,0x3f583efa,0x3f30342f,2
+np.float32,0xbe26bf88,0xbe254a2d,2
+np.float32,0xff1e0beb,0xbf800000,2
+np.float32,0xbe2244c8,0xbe20ec86,2
+np.float32,0xff0b1630,0xbf800000,2
+np.float32,0xff338dd6,0xbf800000,2
+np.float32,0x3eafc22c,0x3ea92a51,2
+np.float32,0x800ea07f,0x800ea07f,2
+np.float32,0x3f46f006,0x3f26aa7e,2
+np.float32,0x3e5f57cd,0x3e5bde16,2
+np.float32,0xbf1b2d8e,0xbf0a9a93,2
+np.float32,0xfeacdbe0,0xbf800000,2
+np.float32,0x7e5ea4bc,0x3f800000,2
+np.float32,0xbf51cbe2,0xbf2cc027,2
+np.float32,0x8073644c,0x8073644c,2
+np.float32,0xff2d6bfe,0xbf800000,2
+np.float32,0x3f65f0f6,0x3f37260a,2
+np.float32,0xff4b37a6,0xbf800000,2
+np.float32,0x712df7,0x712df7,2
+np.float32,0x7f71ef17,0x3f800000,2
+np.float32,0x8042245c,0x8042245c,2
+np.float32,0x3e5dde7b,0x3e5a760d,2
+np.float32,0x8069317d,0x8069317d,2
+np.float32,0x807932dd,0x807932dd,2
+np.float32,0x802f847e,0x802f847e,2
+np.float32,0x7e9300,0x7e9300,2
+np.float32,0x8040b4ab,0x8040b4ab,2
+np.float32,0xff76ef8e,0xbf800000,2
+np.float32,0x4aae3a,0x4aae3a,2
+np.float32,0x8058de73,0x8058de73,2
+np.float32,0x7e4d58c0,0x3f800000,2
+np.float32,0x3d811b30,0x3d80ef79,2
+np.float32,0x7ec952cc,0x3f800000,2
+np.float32,0xfe162b1c,0xbf800000,2
+np.float32,0x3f0f1187,0x3f01d367,2
+np.float32,0xbf2f3458,0xbf182878,2
+np.float32,0x5ceb14,0x5ceb14,2
+np.float32,0xbec29476,0xbeb9b939,2
+np.float32,0x3e71f943,0x3e6d9176,2
+np.float32,0x3ededefc,0x3ed1c909,2
+np.float32,0x805df6ac,0x805df6ac,2
+np.float32,0x3e5ae2c8,0x3e579ca8,2
+np.float32,0x3f6ad2c3,0x3f397fdf,2
+np.float32,0x7d5f94d3,0x3f800000,2
+np.float32,0xbeec7fe4,0xbedd0037,2
+np.float32,0x3f645304,0x3f365b0d,2
+np.float32,0xbf69a087,0xbf38edef,2
+np.float32,0x8025102e,0x8025102e,2
+np.float32,0x800db486,0x800db486,2
+np.float32,0x4df6c7,0x4df6c7,2
+np.float32,0x806d8cdd,0x806d8cdd,2
+np.float32,0x7f0c78cc,0x3f800000,2
+np.float32,0x7e1cf70b,0x3f800000,2
+np.float32,0x3e0ae570,0x3e0a0cf7,2
+np.float32,0x80176ef8,0x80176ef8,2
+np.float32,0x3f38b60c,0x3f1e2bbb,2
+np.float32,0x3d3071e0,0x3d3055f5,2
+np.float32,0x3ebfcfdd,0x3eb750a9,2
+np.float32,0xfe2cdec0,0xbf800000,2
+np.float32,0x7eeb2eed,0x3f800000,2
+np.float32,0x8026c904,0x8026c904,2
+np.float32,0xbec79bde,0xbebe133a,2
+np.float32,0xbf7dfab6,0xbf421d47,2
+np.float32,0x805b3cfd,0x805b3cfd,2
+np.float32,0xfdfcfb68,0xbf800000,2
+np.float32,0xbd537ec0,0xbd534eaf,2
+np.float32,0x52ce73,0x52ce73,2
+np.float32,0xfeac6ea6,0xbf800000,2
+np.float32,0x3f2c2990,0x3f162d41,2
+np.float32,0x3e3354e0,0x3e318539,2
+np.float32,0x802db22b,0x802db22b,2
+np.float32,0x7f0faa83,0x3f800000,2
+np.float32,0x7f10e161,0x3f800000,2
+np.float32,0x7f165c60,0x3f800000,2
+np.float32,0xbf5a756f,0xbf315c82,2
+np.float32,0x7f5a4b68,0x3f800000,2
+np.float32,0xbd77fbf0,0xbd77ae7c,2
+np.float32,0x65d83c,0x65d83c,2
+np.float32,0x3e5f28,0x3e5f28,2
+np.float32,0x8040ec92,0x8040ec92,2
+np.float32,0xbf2b41a6,0xbf1594d5,2
+np.float32,0x7f2f88f1,0x3f800000,2
+np.float32,0xfdb64ab8,0xbf800000,2
+np.float32,0xbf7a3ff1,0xbf4082f5,2
+np.float32,0x1948fc,0x1948fc,2
+np.float32,0x802c1039,0x802c1039,2
+np.float32,0x80119274,0x80119274,2
+np.float32,0x7e885d7b,0x3f800000,2
+np.float32,0xfaf6a,0xfaf6a,2
+np.float32,0x3eba28c4,0x3eb25e1d,2
+np.float32,0x3e4df370,0x3e4b37da,2
+np.float32,0xbf19eff6,0xbf09b97d,2
+np.float32,0xbeddd3c6,0xbed0ea7f,2
+np.float32,0xff6fc971,0xbf800000,2
+np.float32,0x7e93de29,0x3f800000,2
+np.float32,0x3eb12332,0x3eaa6485,2
+np.float32,0x3eb7c6e4,0x3eb04563,2
+np.float32,0x4a67ee,0x4a67ee,2
+np.float32,0xff1cafde,0xbf800000,2
+np.float32,0x3f5e2812,0x3f3343da,2
+np.float32,0x3f060e04,0x3ef605d4,2
+np.float32,0x3e9027d8,0x3e8c76a6,2
+np.float32,0xe2d33,0xe2d33,2
+np.float32,0xff4c94fc,0xbf800000,2
+np.float32,0xbf574908,0xbf2fb26b,2
+np.float32,0xbf786c08,0xbf3fb68e,2
+np.float32,0x8011ecab,0x8011ecab,2
+np.float32,0xbf061c6a,0xbef61bfa,2
+np.float32,0x7eea5f9d,0x3f800000,2
+np.float32,0x3ea2e19c,0x3e9d99a5,2
+np.float32,0x8071550c,0x8071550c,2
+np.float32,0x41c70b,0x41c70b,2
+np.float32,0x80291fc8,0x80291fc8,2
+np.float32,0x43b1ec,0x43b1ec,2
+np.float32,0x32f5a,0x32f5a,2
+np.float32,0xbe9310ec,0xbe8f2692,2
+np.float32,0x7f75f6bf,0x3f800000,2
+np.float32,0x3e6642a6,0x3e6274d2,2
+np.float32,0x3ecb88e0,0x3ec1733f,2
+np.float32,0x804011b6,0x804011b6,2
+np.float32,0x80629cca,0x80629cca,2
+np.float32,0x8016b914,0x8016b914,2
+np.float32,0xbdd05fc0,0xbdcfa870,2
+np.float32,0x807b824d,0x807b824d,2
+np.float32,0xfeec2576,0xbf800000,2
+np.float32,0xbf54bf22,0xbf2e584c,2
+np.float32,0xbf185eb0,0xbf089b6b,2
+np.float32,0xfbc09480,0xbf800000,2
+np.float32,0x3f413054,0x3f234e25,2
+np.float32,0x7e9e32b8,0x3f800000,2
+np.float32,0x266296,0x266296,2
+np.float32,0x460284,0x460284,2
+np.float32,0x3eb0b056,0x3ea9fe5a,2
+np.float32,0x1a7be5,0x1a7be5,2
+np.float32,0x7f099895,0x3f800000,2
+np.float32,0x3f3614f0,0x3f1c88ef,2
+np.float32,0x7e757dc2,0x3f800000,2
+np.float32,0x801fc91e,0x801fc91e,2
+np.float32,0x3f5ce37d,0x3f329ddb,2
+np.float32,0x3e664d70,0x3e627f15,2
+np.float32,0xbf38ed78,0xbf1e4dfa,2
+np.float32,0xbf5c563d,0xbf325543,2
+np.float32,0xbe91cc54,0xbe8dfb24,2
+np.float32,0x3d767fbe,0x3d7633ac,2
+np.float32,0xbf6aeb40,0xbf398b7f,2
+np.float32,0x7f40508b,0x3f800000,2
+np.float32,0x2650df,0x2650df,2
+np.float32,0xbe8cea3c,0xbe897628,2
+np.float32,0x80515af8,0x80515af8,2
+np.float32,0x7f423986,0x3f800000,2
+np.float32,0xbdf250e8,0xbdf1310c,2
+np.float32,0xfe89288a,0xbf800000,2
+np.float32,0x397b3b,0x397b3b,2
+np.float32,0x7e5e91b0,0x3f800000,2
+np.float32,0x6866e2,0x6866e2,2
+np.float32,0x7f4d8877,0x3f800000,2
+np.float32,0x3e6c4a21,0x3e682ee3,2
+np.float32,0xfc3d5980,0xbf800000,2
+np.float32,0x7eae2cd0,0x3f800000,2
+np.float32,0xbf241222,0xbf10c579,2
+np.float32,0xfebc02de,0xbf800000,2
+np.float32,0xff6e0645,0xbf800000,2
+np.float32,0x802030b6,0x802030b6,2
+np.float32,0x7ef9a441,0x3f800000,2
+np.float32,0x3fcf9f,0x3fcf9f,2
+np.float32,0xbf0ccf13,0xbf0023cc,2
+np.float32,0xfefee688,0xbf800000,2
+np.float32,0xbf6c8e0c,0xbf3a5160,2
+np.float32,0xfe749c28,0xbf800000,2
+np.float32,0x7f7fffff,0x3f800000,2
+np.float32,0x58c1a0,0x58c1a0,2
+np.float32,0x3f2de0a1,0x3f174c17,2
+np.float32,0xbf5f7138,0xbf33eb03,2
+np.float32,0x3da15270,0x3da0fd3c,2
+np.float32,0x3da66560,0x3da607e4,2
+np.float32,0xbf306f9a,0xbf18f3c6,2
+np.float32,0x3e81a4de,0x3e7de293,2
+np.float32,0xbebb5fb8,0xbeb36f1a,2
+np.float32,0x14bf64,0x14bf64,2
+np.float32,0xbeac46c6,0xbea60e73,2
+np.float32,0xbdcdf210,0xbdcd4111,2
+np.float32,0x3f7e3cd9,0x3f42395e,2
+np.float32,0xbc4be640,0xbc4be38e,2
+np.float32,0xff5f53b4,0xbf800000,2
+np.float32,0xbf1315ae,0xbf04c90b,2
+np.float32,0x80000000,0x80000000,2
+np.float32,0xbf6a4149,0xbf393aaa,2
+np.float32,0x3f66b8ee,0x3f378772,2
+np.float32,0xff29293e,0xbf800000,2
+np.float32,0xbcc989c0,0xbcc97f58,2
+np.float32,0xbd9a1b70,0xbd99d125,2
+np.float32,0xfef353cc,0xbf800000,2
+np.float32,0xbdc30cf0,0xbdc27683,2
+np.float32,0xfdfd6768,0xbf800000,2
+np.float32,0x7ebac44c,0x3f800000,2
+np.float32,0xff453cd6,0xbf800000,2
+np.float32,0x3ef07720,0x3ee03787,2
+np.float32,0x80219c14,0x80219c14,2
+np.float32,0x805553a8,0x805553a8,2
+np.float32,0x80703928,0x80703928,2
+np.float32,0xff16d3a7,0xbf800000,2
+np.float32,0x3f1472bc,0x3f05c77b,2
+np.float32,0x3eeea37a,0x3edebcf9,2
+np.float32,0x3db801e6,0x3db7838d,2
+np.float32,0x800870d2,0x800870d2,2
+np.float32,0xbea1172c,0xbe9bfa32,2
+np.float32,0x3f1f5e7c,0x3f0d8a42,2
+np.float32,0x123cdb,0x123cdb,2
+np.float32,0x7f6e6b06,0x3f800000,2
+np.float32,0x3ed80573,0x3ecc0def,2
+np.float32,0xfea31b82,0xbf800000,2
+np.float32,0x6744e0,0x6744e0,2
+np.float32,0x695e8b,0x695e8b,2
+np.float32,0xbee3888a,0xbed5a67d,2
+np.float32,0x7f64bc2a,0x3f800000,2
+np.float32,0x7f204244,0x3f800000,2
+np.float32,0x7f647102,0x3f800000,2
+np.float32,0x3dd8ebc0,0x3dd81d03,2
+np.float32,0x801e7ab1,0x801e7ab1,2
+np.float32,0x7d034b56,0x3f800000,2
+np.float32,0x7fc00000,0x7fc00000,2
+np.float32,0x80194193,0x80194193,2
+np.float32,0xfe31c8d4,0xbf800000,2
+np.float32,0x7fc0c4,0x7fc0c4,2
+np.float32,0xd95bf,0xd95bf,2
+np.float32,0x7e4f991d,0x3f800000,2
+np.float32,0x7fc563,0x7fc563,2
+np.float32,0xbe3fcccc,0xbe3d968a,2
+np.float32,0xfdaaa1c8,0xbf800000,2
+np.float32,0xbf48e449,0xbf27c949,2
+np.float32,0x3eb6c584,0x3eaf625e,2
+np.float32,0xbea35a74,0xbe9e0702,2
+np.float32,0x3eeab47a,0x3edb89d5,2
+np.float32,0xbed99556,0xbecd5de5,2
+np.float64,0xbfb94a81e0329500,0xbfb935867ba761fe,2
+np.float64,0xbfec132f1678265e,0xbfe6900eb097abc3,2
+np.float64,0x5685ea72ad0be,0x5685ea72ad0be,2
+np.float64,0xbfd74d3169ae9a62,0xbfd652e09b9daf32,2
+np.float64,0xbfe28df53d651bea,0xbfe0b8a7f50ab433,2
+np.float64,0x0,0x0,2
+np.float64,0xbfed912738bb224e,0xbfe749e3732831ae,2
+np.float64,0x7fcc6faed838df5d,0x3ff0000000000000,2
+np.float64,0xbfe95fe9a432bfd3,0xbfe51f6349919910,2
+np.float64,0xbfc4d5900b29ab20,0xbfc4a6f496179b8b,2
+np.float64,0xbfcd6025033ac04c,0xbfccded7b34b49b0,2
+np.float64,0xbfdfa655b43f4cac,0xbfdd4ca1e5bb9db8,2
+np.float64,0xe7ea5c7fcfd4c,0xe7ea5c7fcfd4c,2
+np.float64,0xffa5449ca42a8940,0xbff0000000000000,2
+np.float64,0xffe63294c1ac6529,0xbff0000000000000,2
+np.float64,0x7feb9cbae7f73975,0x3ff0000000000000,2
+np.float64,0x800eb07c3e3d60f9,0x800eb07c3e3d60f9,2
+np.float64,0x3fc95777e932aef0,0x3fc9040391e20c00,2
+np.float64,0x800736052dee6c0b,0x800736052dee6c0b,2
+np.float64,0x3fe9ae4afd335c96,0x3fe54b569bab45c7,2
+np.float64,0x7fee4c94217c9927,0x3ff0000000000000,2
+np.float64,0x80094b594bd296b3,0x80094b594bd296b3,2
+np.float64,0xffe5adbcee6b5b7a,0xbff0000000000000,2
+np.float64,0x3fecb8eab47971d5,0x3fe6e236be6f27e9,2
+np.float64,0x44956914892ae,0x44956914892ae,2
+np.float64,0xbfe3bd18ef677a32,0xbfe190bf1e07200c,2
+np.float64,0x800104e5b46209cc,0x800104e5b46209cc,2
+np.float64,0x8008fbcecf71f79e,0x8008fbcecf71f79e,2
+np.float64,0x800f0a46a0be148d,0x800f0a46a0be148d,2
+np.float64,0x7fe657a0702caf40,0x3ff0000000000000,2
+np.float64,0xffd3ff1a9027fe36,0xbff0000000000000,2
+np.float64,0x3fe78bc87bef1790,0x3fe40d2e63aaf029,2
+np.float64,0x7feeabdc4c7d57b8,0x3ff0000000000000,2
+np.float64,0xbfabd28d8437a520,0xbfabcb8ce03a0e56,2
+np.float64,0xbfddc3a133bb8742,0xbfdbc9fdb2594451,2
+np.float64,0x7fec911565b9222a,0x3ff0000000000000,2
+np.float64,0x71302604e2605,0x71302604e2605,2
+np.float64,0xee919d2bdd234,0xee919d2bdd234,2
+np.float64,0xbfc04fcff3209fa0,0xbfc0395a739a2ce4,2
+np.float64,0xffe4668a36e8cd14,0xbff0000000000000,2
+np.float64,0xbfeeafeebefd5fde,0xbfe7cd5f3d61a3ec,2
+np.float64,0x7fddb34219bb6683,0x3ff0000000000000,2
+np.float64,0xbfd2cac6cba5958e,0xbfd24520abb2ff36,2
+np.float64,0xbfb857e49630afc8,0xbfb8452d5064dec2,2
+np.float64,0x3fd2dbf90b25b7f2,0x3fd254eaf48484c2,2
+np.float64,0x800af65c94f5ecba,0x800af65c94f5ecba,2
+np.float64,0xa0eef4bf41ddf,0xa0eef4bf41ddf,2
+np.float64,0xffd8e0a4adb1c14a,0xbff0000000000000,2
+np.float64,0xffe858f6e870b1ed,0xbff0000000000000,2
+np.float64,0x3f94c2c308298580,0x3f94c208a4bb006d,2
+np.float64,0xffb45f0d7428be18,0xbff0000000000000,2
+np.float64,0x800ed4f43dbda9e9,0x800ed4f43dbda9e9,2
+np.float64,0x8002dd697e85bad4,0x8002dd697e85bad4,2
+np.float64,0x787ceab2f0f9e,0x787ceab2f0f9e,2
+np.float64,0xbfdff5fcc2bfebfa,0xbfdd8b736b128589,2
+np.float64,0x7fdb2b4294365684,0x3ff0000000000000,2
+np.float64,0xffe711e5e92e23cc,0xbff0000000000000,2
+np.float64,0x800b1c93f1163928,0x800b1c93f1163928,2
+np.float64,0x7fc524d2f22a49a5,0x3ff0000000000000,2
+np.float64,0x7fc88013b5310026,0x3ff0000000000000,2
+np.float64,0x3fe1a910c5e35222,0x3fe00fd779ebaa2a,2
+np.float64,0xbfb57ec9ca2afd90,0xbfb571e47ecb9335,2
+np.float64,0x7fd7594b20aeb295,0x3ff0000000000000,2
+np.float64,0x7fba4641ca348c83,0x3ff0000000000000,2
+np.float64,0xffe61393706c2726,0xbff0000000000000,2
+np.float64,0x7fd54f3c7baa9e78,0x3ff0000000000000,2
+np.float64,0xffe65ffb12ecbff6,0xbff0000000000000,2
+np.float64,0xbfba3b0376347608,0xbfba239cbbbd1b11,2
+np.float64,0x800200886d640112,0x800200886d640112,2
+np.float64,0xbfecf0ba4679e174,0xbfe6fd59de44a3ec,2
+np.float64,0xffe5c57e122b8afc,0xbff0000000000000,2
+np.float64,0x7fdaad0143355a02,0x3ff0000000000000,2
+np.float64,0x46ab32c08d567,0x46ab32c08d567,2
+np.float64,0x7ff8000000000000,0x7ff8000000000000,2
+np.float64,0xbfda7980fdb4f302,0xbfd90fa9c8066109,2
+np.float64,0x3fe237703c646ee0,0x3fe07969f8d8805a,2
+np.float64,0x8000e9fcfc21d3fb,0x8000e9fcfc21d3fb,2
+np.float64,0xbfdfe6e958bfcdd2,0xbfdd7f952fe87770,2
+np.float64,0xbd7baf217af8,0xbd7baf217af8,2
+np.float64,0xbfceba9e4b3d753c,0xbfce26e54359869a,2
+np.float64,0xb95a2caf72b46,0xb95a2caf72b46,2
+np.float64,0x3fb407e25a280fc5,0x3fb3fd71e457b628,2
+np.float64,0xa1da09d943b41,0xa1da09d943b41,2
+np.float64,0xbfe9c7271cf38e4e,0xbfe559296b471738,2
+np.float64,0x3fefae6170ff5cc3,0x3fe83c70ba82f0e1,2
+np.float64,0x7fe7375348ae6ea6,0x3ff0000000000000,2
+np.float64,0xffe18c9cc6e31939,0xbff0000000000000,2
+np.float64,0x800483d13a6907a3,0x800483d13a6907a3,2
+np.float64,0x7fe772a18caee542,0x3ff0000000000000,2
+np.float64,0xffefff64e7bffec9,0xbff0000000000000,2
+np.float64,0x7fcffc31113ff861,0x3ff0000000000000,2
+np.float64,0x3fd91e067e323c0d,0x3fd7e70bf365a7b3,2
+np.float64,0xb0a6673d614cd,0xb0a6673d614cd,2
+np.float64,0xffef9a297e3f3452,0xbff0000000000000,2
+np.float64,0xffe87cc15e70f982,0xbff0000000000000,2
+np.float64,0xffefd6ad8e7fad5a,0xbff0000000000000,2
+np.float64,0x7fe3aaa3a8a75546,0x3ff0000000000000,2
+np.float64,0xddab0341bb561,0xddab0341bb561,2
+np.float64,0x3fe996d6d7332dae,0x3fe53e3ed5be2922,2
+np.float64,0x3fdbe66a18b7ccd4,0x3fda41e6053c1512,2
+np.float64,0x8914775d1228f,0x8914775d1228f,2
+np.float64,0x3fe44621d4688c44,0x3fe1ef9c7225f8bd,2
+np.float64,0xffab29a2a4365340,0xbff0000000000000,2
+np.float64,0xffc8d4a0c431a940,0xbff0000000000000,2
+np.float64,0xbfd426e085284dc2,0xbfd382e2a9617b87,2
+np.float64,0xbfd3b0a525a7614a,0xbfd3176856faccf1,2
+np.float64,0x80036dedcb06dbdc,0x80036dedcb06dbdc,2
+np.float64,0x3feb13823b762704,0x3fe60ca3facdb696,2
+np.float64,0x3fd7246b7bae48d8,0x3fd62f08afded155,2
+np.float64,0x1,0x1,2
+np.float64,0x3fe8ade4b9715bc9,0x3fe4b97cc1387d27,2
+np.float64,0x3fdf2dbec53e5b7e,0x3fdcecfeee33de95,2
+np.float64,0x3fe4292bf9685258,0x3fe1dbb5a6704090,2
+np.float64,0xbfd21acbb8243598,0xbfd1a2ff42174cae,2
+np.float64,0xdd0d2d01ba1a6,0xdd0d2d01ba1a6,2
+np.float64,0x3fa3f3d2f427e7a0,0x3fa3f13d6f101555,2
+np.float64,0x7fdabf4aceb57e95,0x3ff0000000000000,2
+np.float64,0xd4d9e39ba9b3d,0xd4d9e39ba9b3d,2
+np.float64,0xffec773396f8ee66,0xbff0000000000000,2
+np.float64,0x3fa88cc79031198f,0x3fa887f7ade722ba,2
+np.float64,0xffe63a92066c7524,0xbff0000000000000,2
+np.float64,0xbfcf514e2e3ea29c,0xbfceb510e99aaa19,2
+np.float64,0x9d78c19d3af18,0x9d78c19d3af18,2
+np.float64,0x7fdd748bfbbae917,0x3ff0000000000000,2
+np.float64,0xffb3594c4626b298,0xbff0000000000000,2
+np.float64,0x80068ce5b32d19cc,0x80068ce5b32d19cc,2
+np.float64,0x3fec63d60e78c7ac,0x3fe6b85536e44217,2
+np.float64,0x80080bad4dd0175b,0x80080bad4dd0175b,2
+np.float64,0xbfec6807baf8d010,0xbfe6ba69740f9687,2
+np.float64,0x7fedbae0bbfb75c0,0x3ff0000000000000,2
+np.float64,0x8001cb7aa3c396f6,0x8001cb7aa3c396f6,2
+np.float64,0x7fe1f1f03563e3df,0x3ff0000000000000,2
+np.float64,0x7fd83d3978307a72,0x3ff0000000000000,2
+np.float64,0xbfc05ffe9d20bffc,0xbfc049464e3f0af2,2
+np.float64,0xfe6e053ffcdc1,0xfe6e053ffcdc1,2
+np.float64,0xbfd3bdf39d277be8,0xbfd32386edf12726,2
+np.float64,0x800f41b27bde8365,0x800f41b27bde8365,2
+np.float64,0xbfe2c98390e59307,0xbfe0e3c9260fe798,2
+np.float64,0xffdd6206bcbac40e,0xbff0000000000000,2
+np.float64,0x67f35ef4cfe6c,0x67f35ef4cfe6c,2
+np.float64,0x800337e02ae66fc1,0x800337e02ae66fc1,2
+np.float64,0x3fe0ff70afe1fee1,0x3fdf1f46434330df,2
+np.float64,0x3fd7e0a1df2fc144,0x3fd6d3f82c8031e4,2
+np.float64,0x8008da5cd1b1b4ba,0x8008da5cd1b1b4ba,2
+np.float64,0x80065ec9e4ccbd95,0x80065ec9e4ccbd95,2
+np.float64,0x3fe1d1e559a3a3cb,0x3fe02e4f146aa1ab,2
+np.float64,0x7feb7d2f0836fa5d,0x3ff0000000000000,2
+np.float64,0xbfcb33ce9736679c,0xbfcaccd431b205bb,2
+np.float64,0x800e6d0adf5cda16,0x800e6d0adf5cda16,2
+np.float64,0x7fe46f272ca8de4d,0x3ff0000000000000,2
+np.float64,0x4fdfc73e9fbfa,0x4fdfc73e9fbfa,2
+np.float64,0x800958a13112b143,0x800958a13112b143,2
+np.float64,0xbfea01f877f403f1,0xbfe579a541594247,2
+np.float64,0xeefaf599ddf5f,0xeefaf599ddf5f,2
+np.float64,0x80038766c5e70ece,0x80038766c5e70ece,2
+np.float64,0x7fd31bc28ba63784,0x3ff0000000000000,2
+np.float64,0xbfe4df77eee9bef0,0xbfe257abe7083b77,2
+np.float64,0x7fe6790c78acf218,0x3ff0000000000000,2
+np.float64,0xffe7c66884af8cd0,0xbff0000000000000,2
+np.float64,0x800115e36f422bc8,0x800115e36f422bc8,2
+np.float64,0x3fc601945d2c0329,0x3fc5cab917bb20bc,2
+np.float64,0x3fd6ac9546ad592b,0x3fd5c55437ec3508,2
+np.float64,0xa7bd59294f7ab,0xa7bd59294f7ab,2
+np.float64,0x8005c26c8b8b84da,0x8005c26c8b8b84da,2
+np.float64,0x8257501704aea,0x8257501704aea,2
+np.float64,0x5b12aae0b6256,0x5b12aae0b6256,2
+np.float64,0x800232fe02c465fd,0x800232fe02c465fd,2
+np.float64,0x800dae28f85b5c52,0x800dae28f85b5c52,2
+np.float64,0x3fdade1ac135bc36,0x3fd964a2000ace25,2
+np.float64,0x3fed72ca04fae594,0x3fe73b9170d809f9,2
+np.float64,0x7fc6397e2b2c72fb,0x3ff0000000000000,2
+np.float64,0x3fe1f5296d23ea53,0x3fe048802d17621e,2
+np.float64,0xffe05544b920aa89,0xbff0000000000000,2
+np.float64,0xbfdb2e1588365c2c,0xbfd9a7e4113c713e,2
+np.float64,0xbfed6a06fa3ad40e,0xbfe7376be60535f8,2
+np.float64,0xbfe31dcaf5e63b96,0xbfe120417c46cac1,2
+np.float64,0xbfb7ed67ae2fdad0,0xbfb7dba14af33b00,2
+np.float64,0xffd32bb7eb265770,0xbff0000000000000,2
+np.float64,0x80039877b04730f0,0x80039877b04730f0,2
+np.float64,0x3f832e5630265cac,0x3f832e316f47f218,2
+np.float64,0xffe7fa7f732ff4fe,0xbff0000000000000,2
+np.float64,0x9649b87f2c937,0x9649b87f2c937,2
+np.float64,0xffaee447183dc890,0xbff0000000000000,2
+np.float64,0x7fe4e02dd869c05b,0x3ff0000000000000,2
+np.float64,0x3fe1d35e7463a6bd,0x3fe02f67bd21e86e,2
+np.float64,0xffe57f40fe2afe82,0xbff0000000000000,2
+np.float64,0xbfea1362b93426c6,0xbfe5833421dba8fc,2
+np.float64,0xffe9c689fe338d13,0xbff0000000000000,2
+np.float64,0xffc592dd102b25bc,0xbff0000000000000,2
+np.float64,0x3fd283c7aba5078f,0x3fd203d61d1398c3,2
+np.float64,0x8001d6820243ad05,0x8001d6820243ad05,2
+np.float64,0x3fe0ad5991e15ab4,0x3fdea14ef0d47fbd,2
+np.float64,0x3fe3916f2ee722de,0x3fe1722684a9ffb1,2
+np.float64,0xffef9e54e03f3ca9,0xbff0000000000000,2
+np.float64,0x7fe864faebb0c9f5,0x3ff0000000000000,2
+np.float64,0xbfed3587c3fa6b10,0xbfe71e7112df8a68,2
+np.float64,0xbfdd9efc643b3df8,0xbfdbac3a16caf208,2
+np.float64,0xbfd5ac08feab5812,0xbfd4e14575a6e41b,2
+np.float64,0xffda90fae6b521f6,0xbff0000000000000,2
+np.float64,0x8001380ecf22701e,0x8001380ecf22701e,2
+np.float64,0x7fed266fa5fa4cde,0x3ff0000000000000,2
+np.float64,0xffec6c0ac3b8d815,0xbff0000000000000,2
+np.float64,0x3fe7de43c32fbc88,0x3fe43ef62821a5a6,2
+np.float64,0x800bf4ffc357ea00,0x800bf4ffc357ea00,2
+np.float64,0x3fe125c975624b93,0x3fdf59b2de3eff5d,2
+np.float64,0x8004714c1028e299,0x8004714c1028e299,2
+np.float64,0x3fef1bfbf5fe37f8,0x3fe7fd2ba1b63c8a,2
+np.float64,0x800cae15c3195c2c,0x800cae15c3195c2c,2
+np.float64,0x7fde708e083ce11b,0x3ff0000000000000,2
+np.float64,0x7fbcee5df639dcbb,0x3ff0000000000000,2
+np.float64,0x800b1467141628cf,0x800b1467141628cf,2
+np.float64,0x3fe525e0d36a4bc2,0x3fe286b6e59e30f5,2
+np.float64,0xffe987f8b8330ff1,0xbff0000000000000,2
+np.float64,0x7e0a8284fc151,0x7e0a8284fc151,2
+np.float64,0x8006f982442df305,0x8006f982442df305,2
+np.float64,0xbfd75a3cb62eb47a,0xbfd65e54cee981c9,2
+np.float64,0x258e91104b1d3,0x258e91104b1d3,2
+np.float64,0xbfecd0056779a00b,0xbfe6ed7ae97fff1b,2
+np.float64,0x7fc3a4f9122749f1,0x3ff0000000000000,2
+np.float64,0x6e2b1024dc563,0x6e2b1024dc563,2
+np.float64,0x800d575ad4daaeb6,0x800d575ad4daaeb6,2
+np.float64,0xbfceafb1073d5f64,0xbfce1c93023d8414,2
+np.float64,0xffe895cb5f312b96,0xbff0000000000000,2
+np.float64,0x7fe7811ed4ef023d,0x3ff0000000000000,2
+np.float64,0xbfd93f952f327f2a,0xbfd803e6b5576b99,2
+np.float64,0xffdd883a3fbb1074,0xbff0000000000000,2
+np.float64,0x7fee5624eefcac49,0x3ff0000000000000,2
+np.float64,0xbfe264bb2624c976,0xbfe09a9b7cc896e7,2
+np.float64,0xffef14b417be2967,0xbff0000000000000,2
+np.float64,0xbfecbd0d94397a1b,0xbfe6e43bef852d9f,2
+np.float64,0xbfe20d9e4ba41b3c,0xbfe05a98e05846d9,2
+np.float64,0x10000000000000,0x10000000000000,2
+np.float64,0x7fefde93f7bfbd27,0x3ff0000000000000,2
+np.float64,0x80076b9e232ed73d,0x80076b9e232ed73d,2
+np.float64,0xbfe80df52c701bea,0xbfe45b754b433792,2
+np.float64,0x7fe3b5a637676b4b,0x3ff0000000000000,2
+np.float64,0x2c81d14c5903b,0x2c81d14c5903b,2
+np.float64,0x80038945c767128c,0x80038945c767128c,2
+np.float64,0xffeebaf544bd75ea,0xbff0000000000000,2
+np.float64,0xffdb1867d2b630d0,0xbff0000000000000,2
+np.float64,0x3fe3376eaee66ede,0x3fe13285579763d8,2
+np.float64,0xffddf65ca43becba,0xbff0000000000000,2
+np.float64,0xffec8e3e04791c7b,0xbff0000000000000,2
+np.float64,0x80064f4bde2c9e98,0x80064f4bde2c9e98,2
+np.float64,0x7fe534a085ea6940,0x3ff0000000000000,2
+np.float64,0xbfcbabe31d3757c8,0xbfcb3f8e70adf7e7,2
+np.float64,0xbfe45ca11e28b942,0xbfe1ff04515ef809,2
+np.float64,0x65f4df02cbe9d,0x65f4df02cbe9d,2
+np.float64,0xb08b0cbb61162,0xb08b0cbb61162,2
+np.float64,0x3feae2e8b975c5d1,0x3fe5f302b5e8eda2,2
+np.float64,0x7fcf277ff93e4eff,0x3ff0000000000000,2
+np.float64,0x80010999c4821334,0x80010999c4821334,2
+np.float64,0xbfd7f65911afecb2,0xbfd6e6e9cd098f8b,2
+np.float64,0x800e0560ec3c0ac2,0x800e0560ec3c0ac2,2
+np.float64,0x7fec4152ba3882a4,0x3ff0000000000000,2
+np.float64,0xbfb5c77cd42b8ef8,0xbfb5ba1336084908,2
+np.float64,0x457ff1b68afff,0x457ff1b68afff,2
+np.float64,0x5323ec56a647e,0x5323ec56a647e,2
+np.float64,0xbfeed16cf8bda2da,0xbfe7dc49fc9ae549,2
+np.float64,0xffe8446106b088c1,0xbff0000000000000,2
+np.float64,0xffb93cd13c3279a0,0xbff0000000000000,2
+np.float64,0x7fe515c2aeea2b84,0x3ff0000000000000,2
+np.float64,0x80099df83f933bf1,0x80099df83f933bf1,2
+np.float64,0x7fb3a375562746ea,0x3ff0000000000000,2
+np.float64,0x7fcd7efa243afdf3,0x3ff0000000000000,2
+np.float64,0xffe40cddb12819bb,0xbff0000000000000,2
+np.float64,0x8008b68eecd16d1e,0x8008b68eecd16d1e,2
+np.float64,0x2aec688055d8e,0x2aec688055d8e,2
+np.float64,0xffe23750bc646ea1,0xbff0000000000000,2
+np.float64,0x5adacf60b5b7,0x5adacf60b5b7,2
+np.float64,0x7fefb29b1cbf6535,0x3ff0000000000000,2
+np.float64,0xbfeadbf90175b7f2,0xbfe5ef55e2194794,2
+np.float64,0xeaad2885d55a5,0xeaad2885d55a5,2
+np.float64,0xffd7939fba2f2740,0xbff0000000000000,2
+np.float64,0x3fd187ea3aa30fd4,0x3fd11af023472386,2
+np.float64,0xbf6eb579c03d6b00,0xbf6eb57052f47019,2
+np.float64,0x3fefb67b3bff6cf6,0x3fe83fe4499969ac,2
+np.float64,0xbfe5183aacea3076,0xbfe27da1aa0b61a0,2
+np.float64,0xbfb83e47a2307c90,0xbfb82bcb0e12db42,2
+np.float64,0x80088849b1b11094,0x80088849b1b11094,2
+np.float64,0x800ceeed7399dddb,0x800ceeed7399dddb,2
+np.float64,0x80097cd90892f9b2,0x80097cd90892f9b2,2
+np.float64,0x7ec73feefd8e9,0x7ec73feefd8e9,2
+np.float64,0x7fe3291de5a6523b,0x3ff0000000000000,2
+np.float64,0xbfd537086daa6e10,0xbfd4787af5f60653,2
+np.float64,0x800e8ed4455d1da9,0x800e8ed4455d1da9,2
+np.float64,0x800ef8d19cbdf1a3,0x800ef8d19cbdf1a3,2
+np.float64,0x800dc4fa3a5b89f5,0x800dc4fa3a5b89f5,2
+np.float64,0xaa8b85cd55171,0xaa8b85cd55171,2
+np.float64,0xffd67a5f40acf4be,0xbff0000000000000,2
+np.float64,0xbfb7496db22e92d8,0xbfb7390a48130861,2
+np.float64,0x3fd86a8e7ab0d51d,0x3fd74bfba0f72616,2
+np.float64,0xffb7f5b7fc2feb70,0xbff0000000000000,2
+np.float64,0xbfea0960a7f412c1,0xbfe57db6d0ff4191,2
+np.float64,0x375f4fc26ebeb,0x375f4fc26ebeb,2
+np.float64,0x800c537e70b8a6fd,0x800c537e70b8a6fd,2
+np.float64,0x800b3f4506d67e8a,0x800b3f4506d67e8a,2
+np.float64,0x7fe61f2d592c3e5a,0x3ff0000000000000,2
+np.float64,0xffefffffffffffff,0xbff0000000000000,2
+np.float64,0x8005d0bb84eba178,0x8005d0bb84eba178,2
+np.float64,0x800c78b0ec18f162,0x800c78b0ec18f162,2
+np.float64,0xbfc42cccfb285998,0xbfc4027392f66b0d,2
+np.float64,0x3fd8fdc73fb1fb8e,0x3fd7cb46f928153f,2
+np.float64,0x800c71754298e2eb,0x800c71754298e2eb,2
+np.float64,0x3fe4aa7a96a954f5,0x3fe233f5d3bc1352,2
+np.float64,0x7fd53841f6aa7083,0x3ff0000000000000,2
+np.float64,0x3fd0a887b8a15110,0x3fd04ac3b9c0d1ca,2
+np.float64,0x8007b8e164cf71c4,0x8007b8e164cf71c4,2
+np.float64,0xbfddc35c66bb86b8,0xbfdbc9c5dddfb014,2
+np.float64,0x6a3756fed46eb,0x6a3756fed46eb,2
+np.float64,0xffd3dcd05527b9a0,0xbff0000000000000,2
+np.float64,0xbfd7dc75632fb8ea,0xbfd6d0538b340a98,2
+np.float64,0x17501f822ea05,0x17501f822ea05,2
+np.float64,0xbfe1f98b99a3f317,0xbfe04bbf8f8b6cb3,2
+np.float64,0x66ea65d2cdd4d,0x66ea65d2cdd4d,2
+np.float64,0xbfd12241e2224484,0xbfd0bc62f46ea5e1,2
+np.float64,0x3fed6e6fb3fadcdf,0x3fe7398249097285,2
+np.float64,0x3fe0b5ebeba16bd8,0x3fdeae84b3000a47,2
+np.float64,0x66d1bce8cda38,0x66d1bce8cda38,2
+np.float64,0x3fdd728db3bae51b,0x3fdb880f28c52713,2
+np.float64,0xffb45dbe5228bb80,0xbff0000000000000,2
+np.float64,0x1ff8990c3ff14,0x1ff8990c3ff14,2
+np.float64,0x800a68e8f294d1d2,0x800a68e8f294d1d2,2
+np.float64,0xbfe4d08b84a9a117,0xbfe24da40bff6be7,2
+np.float64,0x3fe0177f0ee02efe,0x3fddb83c5971df51,2
+np.float64,0xffc56893692ad128,0xbff0000000000000,2
+np.float64,0x51b44f6aa368b,0x51b44f6aa368b,2
+np.float64,0x2258ff4e44b21,0x2258ff4e44b21,2
+np.float64,0x3fe913649e7226c9,0x3fe4f3f119530f53,2
+np.float64,0xffe3767df766ecfc,0xbff0000000000000,2
+np.float64,0xbfe62ae12fec55c2,0xbfe33108f1f22a94,2
+np.float64,0x7fb6a6308e2d4c60,0x3ff0000000000000,2
+np.float64,0xbfe00f2085e01e41,0xbfddab19b6fc77d1,2
+np.float64,0x3fb66447dc2cc890,0x3fb655b4f46844f0,2
+np.float64,0x3fd80238f6b00470,0x3fd6f143be1617d6,2
+np.float64,0xbfd05bfeb3a0b7fe,0xbfd0031ab3455e15,2
+np.float64,0xffc3a50351274a08,0xbff0000000000000,2
+np.float64,0xffd8f4241cb1e848,0xbff0000000000000,2
+np.float64,0xbfca72a88c34e550,0xbfca13ebe85f2aca,2
+np.float64,0x3fd47d683ba8fad0,0x3fd3d13f1176ed8c,2
+np.float64,0x3fb6418e642c831d,0x3fb6333ebe479ff2,2
+np.float64,0x800fde8e023fbd1c,0x800fde8e023fbd1c,2
+np.float64,0x8001fb01e323f605,0x8001fb01e323f605,2
+np.float64,0x3febb21ff9f76440,0x3fe65ed788d52fee,2
+np.float64,0x3fe47553ffe8eaa8,0x3fe20fe01f853603,2
+np.float64,0x7fca20b3f9344167,0x3ff0000000000000,2
+np.float64,0x3fe704f4ec6e09ea,0x3fe3ba7277201805,2
+np.float64,0xf864359df0c87,0xf864359df0c87,2
+np.float64,0x4d96b01c9b2d7,0x4d96b01c9b2d7,2
+np.float64,0x3fe8a09fe9f14140,0x3fe4b1c6a2d2e095,2
+np.float64,0xffc46c61b228d8c4,0xbff0000000000000,2
+np.float64,0x3fe680a837ed0150,0x3fe3679d6eeb6485,2
+np.float64,0xbfecedc20f39db84,0xbfe6fbe9ee978bf6,2
+np.float64,0x3fb2314eae24629d,0x3fb2297ba6d55d2d,2
+np.float64,0x3fe9f0b8e7b3e172,0x3fe57026eae36db3,2
+np.float64,0x80097a132ed2f427,0x80097a132ed2f427,2
+np.float64,0x800ae5a41955cb49,0x800ae5a41955cb49,2
+np.float64,0xbfd7527279aea4e4,0xbfd6577de356e1bd,2
+np.float64,0x3fe27d3e01e4fa7c,0x3fe0ac7dd96f9179,2
+np.float64,0x7fedd8cb01bbb195,0x3ff0000000000000,2
+np.float64,0x78f8695af1f0e,0x78f8695af1f0e,2
+np.float64,0x800d2d0e927a5a1d,0x800d2d0e927a5a1d,2
+np.float64,0xffe74b46fb2e968e,0xbff0000000000000,2
+np.float64,0xbfdd12d4c8ba25aa,0xbfdb39dae49e1c10,2
+np.float64,0xbfd6c14710ad828e,0xbfd5d79ef5a8d921,2
+np.float64,0x921f4e55243ea,0x921f4e55243ea,2
+np.float64,0x800b4e4c80969c99,0x800b4e4c80969c99,2
+np.float64,0x7fe08c6ab7e118d4,0x3ff0000000000000,2
+np.float64,0xbfed290014fa5200,0xbfe71871f7e859ed,2
+np.float64,0x8008c1d5c59183ac,0x8008c1d5c59183ac,2
+np.float64,0x3fd339e68c2673cd,0x3fd2aaff3f165a9d,2
+np.float64,0xbfdd20d8113a41b0,0xbfdb4553ea2cb2fb,2
+np.float64,0x3fe52a25deea544c,0x3fe2898d5bf4442c,2
+np.float64,0x498602d4930c1,0x498602d4930c1,2
+np.float64,0x3fd8c450113188a0,0x3fd799b0b2a6c43c,2
+np.float64,0xbfd72bc2f2ae5786,0xbfd6357e15ba7f70,2
+np.float64,0xbfd076188ea0ec32,0xbfd01b8fce44d1af,2
+np.float64,0x9aace1713559c,0x9aace1713559c,2
+np.float64,0x8008a730e8914e62,0x8008a730e8914e62,2
+np.float64,0x7fe9e9a3d833d347,0x3ff0000000000000,2
+np.float64,0x800d3a0d69da741b,0x800d3a0d69da741b,2
+np.float64,0xbfe3e28a29e7c514,0xbfe1aad7643a2d19,2
+np.float64,0x7fe9894c71331298,0x3ff0000000000000,2
+np.float64,0xbfe7c6acb5ef8d5a,0xbfe430c9e258ce62,2
+np.float64,0xffb5a520a62b4a40,0xbff0000000000000,2
+np.float64,0x7fc02109ae204212,0x3ff0000000000000,2
+np.float64,0xb5c58f196b8b2,0xb5c58f196b8b2,2
+np.float64,0x3feb4ee82e769dd0,0x3fe62bae9a39d8b1,2
+np.float64,0x3fec5c3cf278b87a,0x3fe6b49000f12441,2
+np.float64,0x81f64b8103eca,0x81f64b8103eca,2
+np.float64,0xbfeab00d73f5601b,0xbfe5d7f755ab73d9,2
+np.float64,0x3fd016bf28a02d7e,0x3fcf843ea23bcd3c,2
+np.float64,0xbfa1db617423b6c0,0xbfa1d9872ddeb5a8,2
+np.float64,0x3fe83c879d70790f,0x3fe4771502d8f012,2
+np.float64,0x6b267586d64cf,0x6b267586d64cf,2
+np.float64,0x3fc91b6d3f3236d8,0x3fc8ca3eb4da25a9,2
+np.float64,0x7fd4e3f8f3a9c7f1,0x3ff0000000000000,2
+np.float64,0x800a75899214eb14,0x800a75899214eb14,2
+np.float64,0x7fdb1f2e07b63e5b,0x3ff0000000000000,2
+np.float64,0xffe7805a11ef00b4,0xbff0000000000000,2
+np.float64,0x3fc8e1b88a31c371,0x3fc892af45330818,2
+np.float64,0xbfe809fe447013fc,0xbfe45918f07da4d9,2
+np.float64,0xbfeb9d7f2ab73afe,0xbfe65446bfddc792,2
+np.float64,0x3fb47f0a5c28fe15,0x3fb473db9113e880,2
+np.float64,0x800a17ae3cb42f5d,0x800a17ae3cb42f5d,2
+np.float64,0xf5540945eaa81,0xf5540945eaa81,2
+np.float64,0xbfe577fc26aaeff8,0xbfe2bcfbf2cf69ff,2
+np.float64,0xbfb99b3e06333680,0xbfb98577b88e0515,2
+np.float64,0x7fd9290391b25206,0x3ff0000000000000,2
+np.float64,0x7fe1aa62ffa354c5,0x3ff0000000000000,2
+np.float64,0x7b0189a0f604,0x7b0189a0f604,2
+np.float64,0x3f9000ed602001db,0x3f900097fe168105,2
+np.float64,0x3fd576128d2aec25,0x3fd4b1002c92286f,2
+np.float64,0xffecc98ece79931d,0xbff0000000000000,2
+np.float64,0x800a1736c7f42e6e,0x800a1736c7f42e6e,2
+np.float64,0xbfed947548bb28eb,0xbfe74b71479ae739,2
+np.float64,0xa45c032148b9,0xa45c032148b9,2
+np.float64,0xbfc13d011c227a04,0xbfc1228447de5e9f,2
+np.float64,0xffed8baa6ebb1754,0xbff0000000000000,2
+np.float64,0x800ea2de243d45bc,0x800ea2de243d45bc,2
+np.float64,0x8001396be52272d9,0x8001396be52272d9,2
+np.float64,0xd018d1cda031a,0xd018d1cda031a,2
+np.float64,0x7fe1fece1fe3fd9b,0x3ff0000000000000,2
+np.float64,0x8009ac484c135891,0x8009ac484c135891,2
+np.float64,0x3fc560ad132ac15a,0x3fc52e5a9479f08e,2
+np.float64,0x3fd6f80ebe2df01d,0x3fd607f70ce8e3f4,2
+np.float64,0xbfd3e69e82a7cd3e,0xbfd34887c2a40699,2
+np.float64,0x3fe232d9baa465b3,0x3fe0760a822ada0c,2
+np.float64,0x3fe769bbc6eed378,0x3fe3f872680f6631,2
+np.float64,0xffe63dbd952c7b7a,0xbff0000000000000,2
+np.float64,0x4e0c00da9c181,0x4e0c00da9c181,2
+np.float64,0xffeae4d89735c9b0,0xbff0000000000000,2
+np.float64,0x3fe030bcbb606179,0x3fdddfc66660bfce,2
+np.float64,0x7fe35ca40d66b947,0x3ff0000000000000,2
+np.float64,0xbfd45bd66628b7ac,0xbfd3b2e04bfe7866,2
+np.float64,0x3fd1f0be2323e17c,0x3fd17c1c340d7a48,2
+np.float64,0x3fd7123b6cae2478,0x3fd61f0675aa9ae1,2
+np.float64,0xbfe918a377723147,0xbfe4f6efe66f5714,2
+np.float64,0x7fc400356f28006a,0x3ff0000000000000,2
+np.float64,0x7fd2dead70a5bd5a,0x3ff0000000000000,2
+np.float64,0xffe9c28f81f3851e,0xbff0000000000000,2
+np.float64,0x3fd09b1ec7a1363e,0x3fd03e3894320140,2
+np.float64,0x7fe6e80c646dd018,0x3ff0000000000000,2
+np.float64,0x7fec3760a4786ec0,0x3ff0000000000000,2
+np.float64,0x309eb6ee613d8,0x309eb6ee613d8,2
+np.float64,0x800731cb0ece6397,0x800731cb0ece6397,2
+np.float64,0xbfdb0c553db618aa,0xbfd98b8a4680ee60,2
+np.float64,0x3fd603a52eac074c,0x3fd52f6b53de7455,2
+np.float64,0x9ecb821b3d971,0x9ecb821b3d971,2
+np.float64,0x3feb7d64dc36faca,0x3fe643c2754bb7f4,2
+np.float64,0xffeb94825ef72904,0xbff0000000000000,2
+np.float64,0x24267418484cf,0x24267418484cf,2
+np.float64,0xbfa6b2fbac2d65f0,0xbfa6af2dca5bfa6f,2
+np.float64,0x8010000000000000,0x8010000000000000,2
+np.float64,0xffe6873978ed0e72,0xbff0000000000000,2
+np.float64,0x800447934ba88f27,0x800447934ba88f27,2
+np.float64,0x3fef305f09fe60be,0x3fe806156b8ca47c,2
+np.float64,0xffd441c697a8838e,0xbff0000000000000,2
+np.float64,0xbfa7684f6c2ed0a0,0xbfa764238d34830c,2
+np.float64,0xffb2c976142592f0,0xbff0000000000000,2
+np.float64,0xbfcc9d1585393a2c,0xbfcc25756bcbca1f,2
+np.float64,0xbfd477bb1ba8ef76,0xbfd3cc1d2114e77e,2
+np.float64,0xbfed1559983a2ab3,0xbfe70f03afd994ee,2
+np.float64,0xbfeb51139036a227,0xbfe62ccf56bc7fff,2
+np.float64,0x7d802890fb006,0x7d802890fb006,2
+np.float64,0x800e00af777c015f,0x800e00af777c015f,2
+np.float64,0x800647ce128c8f9d,0x800647ce128c8f9d,2
+np.float64,0x800a26da91d44db6,0x800a26da91d44db6,2
+np.float64,0x3fdc727eddb8e4fe,0x3fdab5fd9db630b3,2
+np.float64,0x7fd06def2ba0dbdd,0x3ff0000000000000,2
+np.float64,0xffe23678c4a46cf1,0xbff0000000000000,2
+np.float64,0xbfe7198e42ee331c,0xbfe3c7326c9c7553,2
+np.float64,0xffae465f3c3c8cc0,0xbff0000000000000,2
+np.float64,0xff9aea7c5035d500,0xbff0000000000000,2
+np.float64,0xbfeae49c0f35c938,0xbfe5f3e9326cb08b,2
+np.float64,0x3f9a16f300342de6,0x3f9a1581212be50f,2
+np.float64,0x8d99e2c31b33d,0x8d99e2c31b33d,2
+np.float64,0xffd58af253ab15e4,0xbff0000000000000,2
+np.float64,0xbfd205cd25a40b9a,0xbfd18f97155f8b25,2
+np.float64,0xbfebe839bbf7d074,0xbfe67a6024e8fefe,2
+np.float64,0xbfe4fb3595a9f66b,0xbfe26a42f99819ea,2
+np.float64,0x800e867c739d0cf9,0x800e867c739d0cf9,2
+np.float64,0x8bc4274f17885,0x8bc4274f17885,2
+np.float64,0xaec8914b5d912,0xaec8914b5d912,2
+np.float64,0x7fd1d64473a3ac88,0x3ff0000000000000,2
+np.float64,0xbfe6d6f69cedaded,0xbfe39dd61bc7e23e,2
+np.float64,0x7fed05039d7a0a06,0x3ff0000000000000,2
+np.float64,0xbfc40eab0f281d58,0xbfc3e50d14b79265,2
+np.float64,0x45179aec8a2f4,0x45179aec8a2f4,2
+np.float64,0xbfe717e362ee2fc7,0xbfe3c62a95b07d13,2
+np.float64,0xbfe5b8df0d6b71be,0xbfe2e76c7ec5013d,2
+np.float64,0x5c67ba6eb8cf8,0x5c67ba6eb8cf8,2
+np.float64,0xbfda72ce4cb4e59c,0xbfd909fdc7ecfe20,2
+np.float64,0x7fdf59a1e2beb343,0x3ff0000000000000,2
+np.float64,0xc4f7897f89ef1,0xc4f7897f89ef1,2
+np.float64,0x8fcd0a351f9a2,0x8fcd0a351f9a2,2
+np.float64,0x3fb161761022c2ec,0x3fb15aa31c464de2,2
+np.float64,0x8008a985be71530c,0x8008a985be71530c,2
+np.float64,0x3fca4ddb5e349bb7,0x3fc9f0a3b60e49c6,2
+np.float64,0x7fcc10a2d9382145,0x3ff0000000000000,2
+np.float64,0x78902b3af1206,0x78902b3af1206,2
+np.float64,0x7fe1e2765f23c4ec,0x3ff0000000000000,2
+np.float64,0xc1d288cf83a51,0xc1d288cf83a51,2
+np.float64,0x7fe8af692bb15ed1,0x3ff0000000000000,2
+np.float64,0x80057d90fb8afb23,0x80057d90fb8afb23,2
+np.float64,0x3fdc136b8fb826d8,0x3fda6749582b2115,2
+np.float64,0x800ec8ea477d91d5,0x800ec8ea477d91d5,2
+np.float64,0x4c0f4796981ea,0x4c0f4796981ea,2
+np.float64,0xec34c4a5d8699,0xec34c4a5d8699,2
+np.float64,0x7fce343dfb3c687b,0x3ff0000000000000,2
+np.float64,0xbfc95a98a332b530,0xbfc90705b2cc2fec,2
+np.float64,0x800d118e1dba231c,0x800d118e1dba231c,2
+np.float64,0x3fd354f310a6a9e8,0x3fd2c3bb90054154,2
+np.float64,0xbfdac0d4fab581aa,0xbfd94bf37424928e,2
+np.float64,0x3fe7f5391fefea72,0x3fe44cb49d51985b,2
+np.float64,0xd4c3c329a9879,0xd4c3c329a9879,2
+np.float64,0x3fc53977692a72f0,0x3fc50835d85c9ed1,2
+np.float64,0xbfd6989538ad312a,0xbfd5b3a2c08511fe,2
+np.float64,0xbfe329f2906653e5,0xbfe128ec1525a1c0,2
+np.float64,0x7ff0000000000000,0x3ff0000000000000,2
+np.float64,0xbfea57c90974af92,0xbfe5a87b04aa3116,2
+np.float64,0x7fdfba94043f7527,0x3ff0000000000000,2
+np.float64,0x3feedabddafdb57c,0x3fe7e06c0661978d,2
+np.float64,0x4bd9f3b697b3f,0x4bd9f3b697b3f,2
+np.float64,0x3fdd15bbfc3a2b78,0x3fdb3c3b8d070f7e,2
+np.float64,0x3fbd89ccd23b13a0,0x3fbd686b825cff80,2
+np.float64,0x7ff4000000000000,0x7ffc000000000000,2
+np.float64,0x3f9baa8928375512,0x3f9ba8d01ddd5300,2
+np.float64,0x4a3ebdf2947d8,0x4a3ebdf2947d8,2
+np.float64,0x3fe698d5c06d31ac,0x3fe376dff48312c8,2
+np.float64,0xffd5323df12a647c,0xbff0000000000000,2
+np.float64,0xffea7f111174fe22,0xbff0000000000000,2
+np.float64,0x3feb4656a9b68cad,0x3fe627392eb2156f,2
+np.float64,0x7fc1260e9c224c1c,0x3ff0000000000000,2
+np.float64,0x80056e45e5eadc8d,0x80056e45e5eadc8d,2
+np.float64,0x7fd0958ef6a12b1d,0x3ff0000000000000,2
+np.float64,0x8001f85664e3f0ae,0x8001f85664e3f0ae,2
+np.float64,0x3fe553853beaa70a,0x3fe2a4f5e7c83558,2
+np.float64,0xbfeb33ce6276679d,0xbfe61d8ec9e5ff8c,2
+np.float64,0xbfd1b24e21a3649c,0xbfd14245df6065e9,2
+np.float64,0x3fe286fc40650df9,0x3fe0b395c8059429,2
+np.float64,0xffed378058fa6f00,0xbff0000000000000,2
+np.float64,0xbfd0c4a2d7a18946,0xbfd06509a434d6a0,2
+np.float64,0xbfea31d581f463ab,0xbfe593d976139f94,2
+np.float64,0xbfe0705c85e0e0b9,0xbfde42efa978eb0c,2
+np.float64,0xe4c4c339c9899,0xe4c4c339c9899,2
+np.float64,0x3fd68befa9ad17df,0x3fd5a870b3f1f83e,2
+np.float64,0x8000000000000001,0x8000000000000001,2
+np.float64,0x3fe294256965284b,0x3fe0bd271e22d86b,2
+np.float64,0x8005327a862a64f6,0x8005327a862a64f6,2
+np.float64,0xbfdb8155ce3702ac,0xbfd9ed9ef97920f8,2
+np.float64,0xbff0000000000000,0xbfe85efab514f394,2
+np.float64,0xffe66988f1ecd312,0xbff0000000000000,2
+np.float64,0x3fb178a85e22f150,0x3fb171b9fbf95f1d,2
+np.float64,0x7f829b900025371f,0x3ff0000000000000,2
+np.float64,0x8000000000000000,0x8000000000000000,2
+np.float64,0x8006cb77f60d96f1,0x8006cb77f60d96f1,2
+np.float64,0x3fe0c5d53aa18baa,0x3fdec7012ab92b42,2
+np.float64,0x77266426ee4cd,0x77266426ee4cd,2
+np.float64,0xbfec95f468392be9,0xbfe6d11428f60136,2
+np.float64,0x3fedbf532dfb7ea6,0x3fe75f8436dd1d58,2
+np.float64,0x8002fadd3f85f5bb,0x8002fadd3f85f5bb,2
+np.float64,0xbfefebaa8d3fd755,0xbfe8566c6aa90fba,2
+np.float64,0xffc7dd2b712fba58,0xbff0000000000000,2
+np.float64,0x7fe5d3a6e8aba74d,0x3ff0000000000000,2
+np.float64,0x2da061525b40d,0x2da061525b40d,2
+np.float64,0x7fcb9b9953373732,0x3ff0000000000000,2
+np.float64,0x2ca2f6fc59460,0x2ca2f6fc59460,2
+np.float64,0xffeb84b05af70960,0xbff0000000000000,2
+np.float64,0xffe551e86c6aa3d0,0xbff0000000000000,2
+np.float64,0xbfdb311311366226,0xbfd9aa6688faafb9,2
+np.float64,0xbfd4f3875629e70e,0xbfd43bcd73534c66,2
+np.float64,0x7fe95666f932accd,0x3ff0000000000000,2
+np.float64,0x3fc73dfb482e7bf7,0x3fc6fd70c20ebf60,2
+np.float64,0x800cd9e40939b3c8,0x800cd9e40939b3c8,2
+np.float64,0x3fb0c9fa422193f0,0x3fb0c3d38879a2ac,2
+np.float64,0xffd59a38372b3470,0xbff0000000000000,2
+np.float64,0x3fa8320ef4306420,0x3fa82d739e937d35,2
+np.float64,0x3fd517f16caa2fe4,0x3fd45c8de1e93b37,2
+np.float64,0xaed921655db24,0xaed921655db24,2
+np.float64,0x93478fb9268f2,0x93478fb9268f2,2
+np.float64,0x1615e28a2c2bd,0x1615e28a2c2bd,2
+np.float64,0xbfead23010f5a460,0xbfe5ea24d5d8f820,2
+np.float64,0x774a6070ee94d,0x774a6070ee94d,2
+np.float64,0x3fdf5874bd3eb0e9,0x3fdd0ef121dd915c,2
+np.float64,0x8004b25f53a964bf,0x8004b25f53a964bf,2
+np.float64,0xbfddacdd2ebb59ba,0xbfdbb78198fab36b,2
+np.float64,0x8008a3acf271475a,0x8008a3acf271475a,2
+np.float64,0xbfdb537c8736a6fa,0xbfd9c741038bb8f0,2
+np.float64,0xbfe56a133f6ad426,0xbfe2b3d5b8d259a1,2
+np.float64,0xffda1db531343b6a,0xbff0000000000000,2
+np.float64,0x3fcbe05f3a37c0be,0x3fcb71a54a64ddfb,2
+np.float64,0x7fe1ccaa7da39954,0x3ff0000000000000,2
+np.float64,0x3faeadd8343d5bb0,0x3faea475608860e6,2
+np.float64,0x3fe662ba1c2cc574,0x3fe354a6176e90df,2
+np.float64,0xffe4d49f4e69a93e,0xbff0000000000000,2
+np.float64,0xbfeadbc424f5b788,0xbfe5ef39dbe66343,2
+np.float64,0x99cf66f1339ed,0x99cf66f1339ed,2
+np.float64,0x33af77a2675f0,0x33af77a2675f0,2
+np.float64,0x7fec7b32ecf8f665,0x3ff0000000000000,2
+np.float64,0xffef3e44993e7c88,0xbff0000000000000,2
+np.float64,0xffe8f8ceac31f19c,0xbff0000000000000,2
+np.float64,0x7fe0d15b6da1a2b6,0x3ff0000000000000,2
+np.float64,0x4ba795c2974f3,0x4ba795c2974f3,2
+np.float64,0x3fe361aa37a6c354,0x3fe15079021d6b15,2
+np.float64,0xffe709714f6e12e2,0xbff0000000000000,2
+np.float64,0xffe7ea6a872fd4d4,0xbff0000000000000,2
+np.float64,0xffdb9441c8b72884,0xbff0000000000000,2
+np.float64,0xffd5e11ae9abc236,0xbff0000000000000,2
+np.float64,0xffe092a08b612540,0xbff0000000000000,2
+np.float64,0x3fe1f27e1ca3e4fc,0x3fe04685b5131207,2
+np.float64,0xbfe71ce1bdee39c4,0xbfe3c940809a7081,2
+np.float64,0xffe8c3aa68318754,0xbff0000000000000,2
+np.float64,0x800d4e2919da9c52,0x800d4e2919da9c52,2
+np.float64,0x7fe6c8bca76d9178,0x3ff0000000000000,2
+np.float64,0x7fced8751e3db0e9,0x3ff0000000000000,2
+np.float64,0xd61d0c8bac3a2,0xd61d0c8bac3a2,2
+np.float64,0x3fec57732938aee6,0x3fe6b22f15f38352,2
+np.float64,0xff9251cc7024a3a0,0xbff0000000000000,2
+np.float64,0xf4a68cb9e94d2,0xf4a68cb9e94d2,2
+np.float64,0x3feed76703bdaece,0x3fe7def0fc9a080c,2
+np.float64,0xbfe8971ff7712e40,0xbfe4ac3eb8ebff07,2
+np.float64,0x3fe4825f682904bf,0x3fe218c1952fe67d,2
+np.float64,0xbfd60f7698ac1eee,0xbfd539f0979b4b0c,2
+np.float64,0x3fcf0845993e1088,0x3fce7032f7180144,2
+np.float64,0x7fc83443f3306887,0x3ff0000000000000,2
+np.float64,0x3fe93123ae726247,0x3fe504e4fc437e89,2
+np.float64,0x3fbf9eb8363f3d70,0x3fbf75cdfa6828d5,2
+np.float64,0xbf8b45e5d0368bc0,0xbf8b457c29dfe1a9,2
+np.float64,0x8006c2853d0d850b,0x8006c2853d0d850b,2
+np.float64,0xffef26e25ffe4dc4,0xbff0000000000000,2
+np.float64,0x7fefffffffffffff,0x3ff0000000000000,2
+np.float64,0xbfde98f2c2bd31e6,0xbfdc761bfab1c4cb,2
+np.float64,0xffb725e6222e4bd0,0xbff0000000000000,2
+np.float64,0x800c63ead5d8c7d6,0x800c63ead5d8c7d6,2
+np.float64,0x3fea087e95f410fd,0x3fe57d3ab440706c,2
+np.float64,0xbfdf9f8a603f3f14,0xbfdd4742d77dfa57,2
+np.float64,0xfff0000000000000,0xbff0000000000000,2
+np.float64,0xbfcdc0841d3b8108,0xbfcd3a401debba9a,2
+np.float64,0x800f0c8f4f7e191f,0x800f0c8f4f7e191f,2
+np.float64,0x800ba6e75fd74dcf,0x800ba6e75fd74dcf,2
+np.float64,0x7fee4927e8bc924f,0x3ff0000000000000,2
+np.float64,0x3fadf141903be283,0x3fade8878d9d3551,2
+np.float64,0x3efb1a267df64,0x3efb1a267df64,2
+np.float64,0xffebf55f22b7eabe,0xbff0000000000000,2
+np.float64,0x7fbe8045663d008a,0x3ff0000000000000,2
+np.float64,0x3fefc0129f7f8026,0x3fe843f8b7d6cf38,2
+np.float64,0xbfe846b420f08d68,0xbfe47d1709e43937,2
+np.float64,0x7fe8e87043f1d0e0,0x3ff0000000000000,2
+np.float64,0x3fcfb718453f6e31,0x3fcf14ecee7b32b4,2
+np.float64,0x7fe4306b71a860d6,0x3ff0000000000000,2
+np.float64,0x7fee08459f7c108a,0x3ff0000000000000,2
+np.float64,0x3fed705165fae0a3,0x3fe73a66369c5700,2
+np.float64,0x7fd0e63f4da1cc7e,0x3ff0000000000000,2
+np.float64,0xffd1a40c2ea34818,0xbff0000000000000,2
+np.float64,0xbfa369795c26d2f0,0xbfa36718218d46b3,2
+np.float64,0xef70b9f5dee17,0xef70b9f5dee17,2
+np.float64,0x3fb50a0a6e2a1410,0x3fb4fdf27724560a,2
+np.float64,0x7fe30a0f6166141e,0x3ff0000000000000,2
+np.float64,0xbfd7b3ca7daf6794,0xbfd6accb81032b2d,2
+np.float64,0x3fc21dceb3243b9d,0x3fc1ff15d5d277a3,2
+np.float64,0x3fe483e445a907c9,0x3fe219ca0e269552,2
+np.float64,0x3fb2b1e2a22563c0,0x3fb2a96554900eaf,2
+np.float64,0x4b1ff6409641,0x4b1ff6409641,2
+np.float64,0xbfd92eabc9b25d58,0xbfd7f55d7776d64e,2
+np.float64,0x8003b8604c8770c1,0x8003b8604c8770c1,2
+np.float64,0x800d20a9df1a4154,0x800d20a9df1a4154,2
+np.float64,0xecf8a535d9f15,0xecf8a535d9f15,2
+np.float64,0x3fe92d15bab25a2b,0x3fe50296aa15ae85,2
+np.float64,0x800239c205a47385,0x800239c205a47385,2
+np.float64,0x3fc48664a9290cc8,0x3fc459d126320ef6,2
+np.float64,0x3fe7620625eec40c,0x3fe3f3bcbee3e8c6,2
+np.float64,0x3fd242ff4ca48600,0x3fd1c81ed7a971c8,2
+np.float64,0xbfe39bafcfa73760,0xbfe17959c7a279db,2
+np.float64,0x7fdcd2567239a4ac,0x3ff0000000000000,2
+np.float64,0x3fe5f2f292ebe5e6,0x3fe30d12f05e2752,2
+np.float64,0x7fda3819d1347033,0x3ff0000000000000,2
+np.float64,0xffca5b4d4334b69c,0xbff0000000000000,2
+np.float64,0xb8a2b7cd71457,0xb8a2b7cd71457,2
+np.float64,0x3fee689603fcd12c,0x3fe7ad4ace26d6dd,2
+np.float64,0x7fe26541a564ca82,0x3ff0000000000000,2
+np.float64,0x3fe6912ee66d225e,0x3fe3720d242c4d82,2
+np.float64,0xffe6580c75ecb018,0xbff0000000000000,2
+np.float64,0x7fe01a3370603466,0x3ff0000000000000,2
+np.float64,0xffe84e3f84b09c7e,0xbff0000000000000,2
+np.float64,0x3ff0000000000000,0x3fe85efab514f394,2
+np.float64,0x3fe214d4266429a8,0x3fe05fec03a3c247,2
+np.float64,0x3fd00aec5da015d8,0x3fcf6e070ad4ad62,2
+np.float64,0x800aac8631f5590d,0x800aac8631f5590d,2
+np.float64,0xbfe7c4f5f76f89ec,0xbfe42fc1c57b4a13,2
+np.float64,0xaf146c7d5e28e,0xaf146c7d5e28e,2
+np.float64,0xbfe57188b66ae312,0xbfe2b8be4615ef75,2
+np.float64,0xffef8cb8e1ff1971,0xbff0000000000000,2
+np.float64,0x8001daf8aa63b5f2,0x8001daf8aa63b5f2,2
+np.float64,0x3fdddcc339bbb986,0x3fdbde5f3783538b,2
+np.float64,0xdd8c92c3bb193,0xdd8c92c3bb193,2
+np.float64,0xbfe861a148f0c342,0xbfe48cf1d228a336,2
+np.float64,0xffe260a32e24c146,0xbff0000000000000,2
+np.float64,0x1f7474b43ee8f,0x1f7474b43ee8f,2
+np.float64,0x3fe81dbd89703b7c,0x3fe464d78df92b7b,2
+np.float64,0x7fed0101177a0201,0x3ff0000000000000,2
+np.float64,0x7fd8b419a8316832,0x3ff0000000000000,2
+np.float64,0x3fe93debccf27bd8,0x3fe50c27727917f0,2
+np.float64,0xe5ead05bcbd5a,0xe5ead05bcbd5a,2
+np.float64,0xbfebbbc4cff7778a,0xbfe663c4ca003bbf,2
+np.float64,0xbfea343eb474687e,0xbfe59529f73ea151,2
+np.float64,0x3fbe74a5963ce94b,0x3fbe50123ed05d8d,2
+np.float64,0x3fd31d3a5d263a75,0x3fd290c026cb38a5,2
+np.float64,0xbfd79908acaf3212,0xbfd695620e31c3c6,2
+np.float64,0xbfc26a350324d46c,0xbfc249f335f3e465,2
+np.float64,0xbfac38d5583871b0,0xbfac31866d12a45e,2
+np.float64,0x3fe40cea672819d5,0x3fe1c83754e72c92,2
+np.float64,0xbfa74770642e8ee0,0xbfa74355fcf67332,2
+np.float64,0x7fc60942d32c1285,0x3ff0000000000000,2
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/examples/cython/checks.pyx b/venv/lib/python3.9/site-packages/numpy/core/tests/examples/cython/checks.pyx
new file mode 100644
index 00000000..e41c6d65
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/examples/cython/checks.pyx
@@ -0,0 +1,32 @@
+#cython: language_level=3
+
+"""
+Functions in this module give python-space wrappers for cython functions
+exposed in numpy/__init__.pxd, so they can be tested in test_cython.py
+"""
+cimport numpy as cnp
+cnp.import_array()
+
+
+def is_td64(obj):
+ return cnp.is_timedelta64_object(obj)
+
+
+def is_dt64(obj):
+ return cnp.is_datetime64_object(obj)
+
+
+def get_dt64_value(obj):
+ return cnp.get_datetime64_value(obj)
+
+
+def get_td64_value(obj):
+ return cnp.get_timedelta64_value(obj)
+
+
+def get_dt64_unit(obj):
+ return cnp.get_datetime64_unit(obj)
+
+
+def is_integer(obj):
+ return isinstance(obj, (cnp.integer, int))
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/examples/cython/setup.py b/venv/lib/python3.9/site-packages/numpy/core/tests/examples/cython/setup.py
new file mode 100644
index 00000000..6e34aa77
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/examples/cython/setup.py
@@ -0,0 +1,25 @@
+"""
+Provide python-space access to the functions exposed in numpy/__init__.pxd
+for testing.
+"""
+
+import numpy as np
+from distutils.core import setup
+from Cython.Build import cythonize
+from setuptools.extension import Extension
+import os
+
+macros = [("NPY_NO_DEPRECATED_API", 0)]
+
+checks = Extension(
+ "checks",
+ sources=[os.path.join('.', "checks.pyx")],
+ include_dirs=[np.get_include()],
+ define_macros=macros,
+)
+
+extensions = [checks]
+
+setup(
+ ext_modules=cythonize(extensions)
+)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/limited_api.c b/venv/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/limited_api.c
new file mode 100644
index 00000000..698c54c5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/limited_api.c
@@ -0,0 +1,17 @@
+#define Py_LIMITED_API 0x03060000
+
+#include <Python.h>
+#include <numpy/arrayobject.h>
+#include <numpy/ufuncobject.h>
+
+static PyModuleDef moduledef = {
+ .m_base = PyModuleDef_HEAD_INIT,
+ .m_name = "limited_api"
+};
+
+PyMODINIT_FUNC PyInit_limited_api(void)
+{
+ import_array();
+ import_umath();
+ return PyModule_Create(&moduledef);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/setup.py b/venv/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/setup.py
new file mode 100644
index 00000000..18747dc8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/examples/limited_api/setup.py
@@ -0,0 +1,22 @@
+"""
+Build an example package using the limited Python C API.
+"""
+
+import numpy as np
+from setuptools import setup, Extension
+import os
+
+macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")]
+
+limited_api = Extension(
+ "limited_api",
+ sources=[os.path.join('.', "limited_api.c")],
+ include_dirs=[np.get_include()],
+ define_macros=macros,
+)
+
+extensions = [limited_api]
+
+setup(
+ ext_modules=extensions
+)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test__exceptions.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test__exceptions.py
new file mode 100644
index 00000000..10b87e05
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test__exceptions.py
@@ -0,0 +1,88 @@
+"""
+Tests of the ._exceptions module. Primarily for exercising the __str__ methods.
+"""
+
+import pickle
+
+import pytest
+import numpy as np
+
+_ArrayMemoryError = np.core._exceptions._ArrayMemoryError
+_UFuncNoLoopError = np.core._exceptions._UFuncNoLoopError
+
+class TestArrayMemoryError:
+ def test_pickling(self):
+ """ Test that _ArrayMemoryError can be pickled """
+ error = _ArrayMemoryError((1023,), np.dtype(np.uint8))
+ res = pickle.loads(pickle.dumps(error))
+ assert res._total_size == error._total_size
+
+ def test_str(self):
+ e = _ArrayMemoryError((1023,), np.dtype(np.uint8))
+ str(e) # not crashing is enough
+
+ # testing these properties is easier than testing the full string repr
+ def test__size_to_string(self):
+ """ Test e._size_to_string """
+ f = _ArrayMemoryError._size_to_string
+ Ki = 1024
+ assert f(0) == '0 bytes'
+ assert f(1) == '1 bytes'
+ assert f(1023) == '1023 bytes'
+ assert f(Ki) == '1.00 KiB'
+ assert f(Ki+1) == '1.00 KiB'
+ assert f(10*Ki) == '10.0 KiB'
+ assert f(int(999.4*Ki)) == '999. KiB'
+ assert f(int(1023.4*Ki)) == '1023. KiB'
+ assert f(int(1023.5*Ki)) == '1.00 MiB'
+ assert f(Ki*Ki) == '1.00 MiB'
+
+ # 1023.9999 Mib should round to 1 GiB
+ assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
+ assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
+ # larger than sys.maxsize, adding larger prefixes isn't going to help
+ # anyway.
+ assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
+
+ def test__total_size(self):
+ """ Test e._total_size """
+ e = _ArrayMemoryError((1,), np.dtype(np.uint8))
+ assert e._total_size == 1
+
+ e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16)))
+ assert e._total_size == 1024
+
+
+class TestUFuncNoLoopError:
+ def test_pickling(self):
+ """ Test that _UFuncNoLoopError can be pickled """
+ assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes)
+
+
+@pytest.mark.parametrize("args", [
+ (2, 1, None),
+ (2, 1, "test_prefix"),
+ ("test message",),
+])
+class TestAxisError:
+ def test_attr(self, args):
+ """Validate attribute types."""
+ exc = np.AxisError(*args)
+ if len(args) == 1:
+ assert exc.axis is None
+ assert exc.ndim is None
+ else:
+ axis, ndim, *_ = args
+ assert exc.axis == axis
+ assert exc.ndim == ndim
+
+ def test_pickling(self, args):
+ """Test that `AxisError` can be pickled."""
+ exc = np.AxisError(*args)
+ exc2 = pickle.loads(pickle.dumps(exc))
+
+ assert type(exc) is type(exc2)
+ for name in ("axis", "ndim", "args"):
+ attr1 = getattr(exc, name)
+ attr2 = getattr(exc2, name)
+ assert attr1 == attr2, name
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_abc.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_abc.py
new file mode 100644
index 00000000..8b12d07a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_abc.py
@@ -0,0 +1,54 @@
+from numpy.testing import assert_
+
+import numbers
+
+import numpy as np
+from numpy.core.numerictypes import sctypes
+
+class TestABC:
+ def test_abstract(self):
+ assert_(issubclass(np.number, numbers.Number))
+
+ assert_(issubclass(np.inexact, numbers.Complex))
+ assert_(issubclass(np.complexfloating, numbers.Complex))
+ assert_(issubclass(np.floating, numbers.Real))
+
+ assert_(issubclass(np.integer, numbers.Integral))
+ assert_(issubclass(np.signedinteger, numbers.Integral))
+ assert_(issubclass(np.unsignedinteger, numbers.Integral))
+
+ def test_floats(self):
+ for t in sctypes['float']:
+ assert_(isinstance(t(), numbers.Real),
+ f"{t.__name__} is not instance of Real")
+ assert_(issubclass(t, numbers.Real),
+ f"{t.__name__} is not subclass of Real")
+ assert_(not isinstance(t(), numbers.Rational),
+ f"{t.__name__} is instance of Rational")
+ assert_(not issubclass(t, numbers.Rational),
+ f"{t.__name__} is subclass of Rational")
+
+ def test_complex(self):
+ for t in sctypes['complex']:
+ assert_(isinstance(t(), numbers.Complex),
+ f"{t.__name__} is not instance of Complex")
+ assert_(issubclass(t, numbers.Complex),
+ f"{t.__name__} is not subclass of Complex")
+ assert_(not isinstance(t(), numbers.Real),
+ f"{t.__name__} is instance of Real")
+ assert_(not issubclass(t, numbers.Real),
+ f"{t.__name__} is subclass of Real")
+
+ def test_int(self):
+ for t in sctypes['int']:
+ assert_(isinstance(t(), numbers.Integral),
+ f"{t.__name__} is not instance of Integral")
+ assert_(issubclass(t, numbers.Integral),
+ f"{t.__name__} is not subclass of Integral")
+
+ def test_uint(self):
+ for t in sctypes['uint']:
+ assert_(isinstance(t(), numbers.Integral),
+ f"{t.__name__} is not instance of Integral")
+ assert_(issubclass(t, numbers.Integral),
+ f"{t.__name__} is not subclass of Integral")
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_api.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_api.py
new file mode 100644
index 00000000..5006e77f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_api.py
@@ -0,0 +1,602 @@
+import sys
+
+import numpy as np
+from numpy.core._rational_tests import rational
+import pytest
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
+ HAS_REFCOUNT
+ )
+
+
+def test_array_array():
+ tobj = type(object)
+ ones11 = np.ones((1, 1), np.float64)
+ tndarray = type(ones11)
+ # Test is_ndarray
+ assert_equal(np.array(ones11, dtype=np.float64), ones11)
+ if HAS_REFCOUNT:
+ old_refcount = sys.getrefcount(tndarray)
+ np.array(ones11)
+ assert_equal(old_refcount, sys.getrefcount(tndarray))
+
+ # test None
+ assert_equal(np.array(None, dtype=np.float64),
+ np.array(np.nan, dtype=np.float64))
+ if HAS_REFCOUNT:
+ old_refcount = sys.getrefcount(tobj)
+ np.array(None, dtype=np.float64)
+ assert_equal(old_refcount, sys.getrefcount(tobj))
+
+ # test scalar
+ assert_equal(np.array(1.0, dtype=np.float64),
+ np.ones((), dtype=np.float64))
+ if HAS_REFCOUNT:
+ old_refcount = sys.getrefcount(np.float64)
+ np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
+ assert_equal(old_refcount, sys.getrefcount(np.float64))
+
+ # test string
+ S2 = np.dtype((bytes, 2))
+ S3 = np.dtype((bytes, 3))
+ S5 = np.dtype((bytes, 5))
+ assert_equal(np.array(b"1.0", dtype=np.float64),
+ np.ones((), dtype=np.float64))
+ assert_equal(np.array(b"1.0").dtype, S3)
+ assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3)
+ assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1."))
+ assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5))
+
+ # test string
+ U2 = np.dtype((str, 2))
+ U3 = np.dtype((str, 3))
+ U5 = np.dtype((str, 5))
+ assert_equal(np.array("1.0", dtype=np.float64),
+ np.ones((), dtype=np.float64))
+ assert_equal(np.array("1.0").dtype, U3)
+ assert_equal(np.array("1.0", dtype=str).dtype, U3)
+ assert_equal(np.array("1.0", dtype=U2), np.array(str("1.")))
+ assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5))
+
+ builtins = getattr(__builtins__, '__dict__', __builtins__)
+ assert_(hasattr(builtins, 'get'))
+
+ # test memoryview
+ dat = np.array(memoryview(b'1.0'), dtype=np.float64)
+ assert_equal(dat, [49.0, 46.0, 48.0])
+ assert_(dat.dtype.type is np.float64)
+
+ dat = np.array(memoryview(b'1.0'))
+ assert_equal(dat, [49, 46, 48])
+ assert_(dat.dtype.type is np.uint8)
+
+ # test array interface
+ a = np.array(100.0, dtype=np.float64)
+ o = type("o", (object,),
+ dict(__array_interface__=a.__array_interface__))
+ assert_equal(np.array(o, dtype=np.float64), a)
+
+ # test array_struct interface
+ a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
+ dtype=[('f0', int), ('f1', float), ('f2', str)])
+ o = type("o", (object,),
+ dict(__array_struct__=a.__array_struct__))
+ ## wasn't what I expected... is np.array(o) supposed to equal a ?
+ ## instead we get a array([...], dtype=">V18")
+ assert_equal(bytes(np.array(o).data), bytes(a.data))
+
+ # test array
+ o = type("o", (object,),
+ dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))()
+ assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64))
+
+ # test recursion
+ nested = 1.5
+ for i in range(np.MAXDIMS):
+ nested = [nested]
+
+ # no error
+ np.array(nested)
+
+ # Exceeds recursion limit
+ assert_raises(ValueError, np.array, [nested], dtype=np.float64)
+
+ # Try with lists...
+ assert_equal(np.array([None] * 10, dtype=np.float64),
+ np.full((10,), np.nan, dtype=np.float64))
+ assert_equal(np.array([[None]] * 10, dtype=np.float64),
+ np.full((10, 1), np.nan, dtype=np.float64))
+ assert_equal(np.array([[None] * 10], dtype=np.float64),
+ np.full((1, 10), np.nan, dtype=np.float64))
+ assert_equal(np.array([[None] * 10] * 10, dtype=np.float64),
+ np.full((10, 10), np.nan, dtype=np.float64))
+
+ assert_equal(np.array([1.0] * 10, dtype=np.float64),
+ np.ones((10,), dtype=np.float64))
+ assert_equal(np.array([[1.0]] * 10, dtype=np.float64),
+ np.ones((10, 1), dtype=np.float64))
+ assert_equal(np.array([[1.0] * 10], dtype=np.float64),
+ np.ones((1, 10), dtype=np.float64))
+ assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64),
+ np.ones((10, 10), dtype=np.float64))
+
+ # Try with tuples
+ assert_equal(np.array((None,) * 10, dtype=np.float64),
+ np.full((10,), np.nan, dtype=np.float64))
+ assert_equal(np.array([(None,)] * 10, dtype=np.float64),
+ np.full((10, 1), np.nan, dtype=np.float64))
+ assert_equal(np.array([(None,) * 10], dtype=np.float64),
+ np.full((1, 10), np.nan, dtype=np.float64))
+ assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64),
+ np.full((10, 10), np.nan, dtype=np.float64))
+
+ assert_equal(np.array((1.0,) * 10, dtype=np.float64),
+ np.ones((10,), dtype=np.float64))
+ assert_equal(np.array([(1.0,)] * 10, dtype=np.float64),
+ np.ones((10, 1), dtype=np.float64))
+ assert_equal(np.array([(1.0,) * 10], dtype=np.float64),
+ np.ones((1, 10), dtype=np.float64))
+ assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
+ np.ones((10, 10), dtype=np.float64))
+
+@pytest.mark.parametrize("array", [True, False])
+def test_array_impossible_casts(array):
+ # All builtin types can be forcibly cast, at least theoretically,
+ # but user dtypes cannot necessarily.
+ rt = rational(1, 2)
+ if array:
+ rt = np.array(rt)
+ with assert_raises(TypeError):
+ np.array(rt, dtype="M8")
+
+
+# TODO: remove when fastCopyAndTranspose deprecation expires
+@pytest.mark.parametrize("a",
+ (
+ np.array(2), # 0D array
+ np.array([3, 2, 7, 0]), # 1D array
+ np.arange(6).reshape(2, 3) # 2D array
+ ),
+)
+def test_fastCopyAndTranspose(a):
+ with pytest.deprecated_call():
+ b = np.fastCopyAndTranspose(a)
+ assert_equal(b, a.T)
+ assert b.flags.owndata
+
+
+def test_array_astype():
+ a = np.arange(6, dtype='f4').reshape(2, 3)
+ # Default behavior: allows unsafe casts, keeps memory layout,
+ # always copies.
+ b = a.astype('i4')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('i4'))
+ assert_equal(a.strides, b.strides)
+ b = a.T.astype('i4')
+ assert_equal(a.T, b)
+ assert_equal(b.dtype, np.dtype('i4'))
+ assert_equal(a.T.strides, b.strides)
+ b = a.astype('f4')
+ assert_equal(a, b)
+ assert_(not (a is b))
+
+ # copy=False parameter can sometimes skip a copy
+ b = a.astype('f4', copy=False)
+ assert_(a is b)
+
+ # order parameter allows overriding of the memory layout,
+ # forcing a copy if the layout is wrong
+ b = a.astype('f4', order='F', copy=False)
+ assert_equal(a, b)
+ assert_(not (a is b))
+ assert_(b.flags.f_contiguous)
+
+ b = a.astype('f4', order='C', copy=False)
+ assert_equal(a, b)
+ assert_(a is b)
+ assert_(b.flags.c_contiguous)
+
+ # casting parameter allows catching bad casts
+ b = a.astype('c8', casting='safe')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('c8'))
+
+ assert_raises(TypeError, a.astype, 'i4', casting='safe')
+
+ # subok=False passes through a non-subclassed array
+ b = a.astype('f4', subok=0, copy=False)
+ assert_(a is b)
+
+ class MyNDArray(np.ndarray):
+ pass
+
+ a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
+
+ # subok=True passes through a subclass
+ b = a.astype('f4', subok=True, copy=False)
+ assert_(a is b)
+
+ # subok=True is default, and creates a subtype on a cast
+ b = a.astype('i4', copy=False)
+ assert_equal(a, b)
+ assert_equal(type(b), MyNDArray)
+
+ # subok=False never returns a subclass
+ b = a.astype('f4', subok=False, copy=False)
+ assert_equal(a, b)
+ assert_(not (a is b))
+ assert_(type(b) is not MyNDArray)
+
+ # Make sure converting from string object to fixed length string
+ # does not truncate.
+ a = np.array([b'a'*100], dtype='O')
+ b = a.astype('S')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('S100'))
+ a = np.array(['a'*100], dtype='O')
+ b = a.astype('U')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('U100'))
+
+ # Same test as above but for strings shorter than 64 characters
+ a = np.array([b'a'*10], dtype='O')
+ b = a.astype('S')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('S10'))
+ a = np.array(['a'*10], dtype='O')
+ b = a.astype('U')
+ assert_equal(a, b)
+ assert_equal(b.dtype, np.dtype('U10'))
+
+ a = np.array(123456789012345678901234567890, dtype='O').astype('S')
+ assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
+ a = np.array(123456789012345678901234567890, dtype='O').astype('U')
+ assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
+
+ a = np.array([123456789012345678901234567890], dtype='O').astype('S')
+ assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
+ a = np.array([123456789012345678901234567890], dtype='O').astype('U')
+ assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
+
+ a = np.array(123456789012345678901234567890, dtype='S')
+ assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
+ a = np.array(123456789012345678901234567890, dtype='U')
+ assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
+
+ a = np.array('a\u0140', dtype='U')
+ b = np.ndarray(buffer=a, dtype='uint32', shape=2)
+ assert_(b.size == 2)
+
+ a = np.array([1000], dtype='i4')
+ assert_raises(TypeError, a.astype, 'S1', casting='safe')
+
+ a = np.array(1000, dtype='i4')
+ assert_raises(TypeError, a.astype, 'U1', casting='safe')
+
+@pytest.mark.parametrize("dt", ["S", "U"])
+def test_array_astype_to_string_discovery_empty(dt):
+ # See also gh-19085
+ arr = np.array([""], dtype=object)
+ # Note, the itemsize is the `0 -> 1` logic, which should change.
+ # The important part the test is rather that it does not error.
+ assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize
+
+ # check the same thing for `np.can_cast` (since it accepts arrays)
+ assert np.can_cast(arr, dt, casting="unsafe")
+ assert not np.can_cast(arr, dt, casting="same_kind")
+ # as well as for the object as a descriptor:
+ assert np.can_cast("O", dt, casting="unsafe")
+
+@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"])
+def test_array_astype_to_void(dt):
+ dt = np.dtype(dt)
+ arr = np.array([], dtype=dt)
+ assert arr.astype("V").dtype.itemsize == dt.itemsize
+
+def test_object_array_astype_to_void():
+ # This is different to `test_array_astype_to_void` as object arrays
+ # are inspected. The default void is "V8" (8 is the length of double)
+ arr = np.array([], dtype="O").astype("V")
+ assert arr.dtype == "V8"
+
+@pytest.mark.parametrize("t",
+ np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float']
+)
+def test_array_astype_warning(t):
+ # test ComplexWarning when casting from complex to float or int
+ a = np.array(10, dtype=np.complex_)
+ assert_warns(np.ComplexWarning, a.astype, t)
+
+@pytest.mark.parametrize(["dtype", "out_dtype"],
+ [(np.bytes_, np.bool_),
+ (np.unicode_, np.bool_),
+ (np.dtype("S10,S9"), np.dtype("?,?"))])
+def test_string_to_boolean_cast(dtype, out_dtype):
+ """
+ Currently, for `astype` strings are cast to booleans effectively by
+ calling `bool(int(string)`. This is not consistent (see gh-9875) and
+ will eventually be deprecated.
+ """
+ arr = np.array(["10", "10\0\0\0", "0\0\0", "0"], dtype=dtype)
+ expected = np.array([True, True, False, False], dtype=out_dtype)
+ assert_array_equal(arr.astype(out_dtype), expected)
+
+@pytest.mark.parametrize(["dtype", "out_dtype"],
+ [(np.bytes_, np.bool_),
+ (np.unicode_, np.bool_),
+ (np.dtype("S10,S9"), np.dtype("?,?"))])
+def test_string_to_boolean_cast_errors(dtype, out_dtype):
+ """
+ These currently error out, since cast to integers fails, but should not
+ error out in the future.
+ """
+ for invalid in ["False", "True", "", "\0", "non-empty"]:
+ arr = np.array([invalid], dtype=dtype)
+ with assert_raises(ValueError):
+ arr.astype(out_dtype)
+
+@pytest.mark.parametrize("str_type", [str, bytes, np.str_, np.unicode_])
+@pytest.mark.parametrize("scalar_type",
+ [np.complex64, np.complex128, np.clongdouble])
+def test_string_to_complex_cast(str_type, scalar_type):
+ value = scalar_type(b"1+3j")
+ assert scalar_type(value) == 1+3j
+ assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j
+ assert np.array(value).astype(scalar_type)[()] == 1+3j
+ arr = np.zeros(1, dtype=scalar_type)
+ arr[0] = value
+ assert arr[0] == 1+3j
+
+@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+def test_none_to_nan_cast(dtype):
+ # Note that at the time of writing this test, the scalar constructors
+ # reject None
+ arr = np.zeros(1, dtype=dtype)
+ arr[0] = None
+ assert np.isnan(arr)[0]
+ assert np.isnan(np.array(None, dtype=dtype))[()]
+ assert np.isnan(np.array([None], dtype=dtype))[0]
+ assert np.isnan(np.array(None).astype(dtype))[()]
+
+def test_copyto_fromscalar():
+ a = np.arange(6, dtype='f4').reshape(2, 3)
+
+ # Simple copy
+ np.copyto(a, 1.5)
+ assert_equal(a, 1.5)
+ np.copyto(a.T, 2.5)
+ assert_equal(a, 2.5)
+
+ # Where-masked copy
+ mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
+ np.copyto(a, 3.5, where=mask)
+ assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
+ mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
+ np.copyto(a.T, 4.5, where=mask)
+ assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
+
+def test_copyto():
+ a = np.arange(6, dtype='i4').reshape(2, 3)
+
+ # Simple copy
+ np.copyto(a, [[3, 1, 5], [6, 2, 1]])
+ assert_equal(a, [[3, 1, 5], [6, 2, 1]])
+
+ # Overlapping copy should work
+ np.copyto(a[:, :2], a[::-1, 1::-1])
+ assert_equal(a, [[2, 6, 5], [1, 3, 1]])
+
+ # Defaults to 'same_kind' casting
+ assert_raises(TypeError, np.copyto, a, 1.5)
+
+ # Force a copy with 'unsafe' casting, truncating 1.5 to 1
+ np.copyto(a, 1.5, casting='unsafe')
+ assert_equal(a, 1)
+
+ # Copying with a mask
+ np.copyto(a, 3, where=[True, False, True])
+ assert_equal(a, [[3, 1, 3], [3, 1, 3]])
+
+ # Casting rule still applies with a mask
+ assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
+
+ # Lists of integer 0's and 1's is ok too
+ np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
+ assert_equal(a, [[3, 4, 4], [4, 1, 3]])
+
+ # Overlapping copy with mask should work
+ np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
+ assert_equal(a, [[3, 4, 4], [4, 3, 3]])
+
+ # 'dst' must be an array
+ assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
+
+def test_copyto_permut():
+ # test explicit overflow case
+ pad = 500
+ l = [True] * pad + [True, True, True, True]
+ r = np.zeros(len(l)-pad)
+ d = np.ones(len(l)-pad)
+ mask = np.array(l)[pad:]
+ np.copyto(r, d, where=mask[::-1])
+
+ # test all permutation of possible masks, 9 should be sufficient for
+ # current 4 byte unrolled code
+ power = 9
+ d = np.ones(power)
+ for i in range(2**power):
+ r = np.zeros(power)
+ l = [(i & x) != 0 for x in range(power)]
+ mask = np.array(l)
+ np.copyto(r, d, where=mask)
+ assert_array_equal(r == 1, l)
+ assert_equal(r.sum(), sum(l))
+
+ r = np.zeros(power)
+ np.copyto(r, d, where=mask[::-1])
+ assert_array_equal(r == 1, l[::-1])
+ assert_equal(r.sum(), sum(l))
+
+ r = np.zeros(power)
+ np.copyto(r[::2], d[::2], where=mask[::2])
+ assert_array_equal(r[::2] == 1, l[::2])
+ assert_equal(r[::2].sum(), sum(l[::2]))
+
+ r = np.zeros(power)
+ np.copyto(r[::2], d[::2], where=mask[::-2])
+ assert_array_equal(r[::2] == 1, l[::-2])
+ assert_equal(r[::2].sum(), sum(l[::-2]))
+
+ for c in [0xFF, 0x7F, 0x02, 0x10]:
+ r = np.zeros(power)
+ mask = np.array(l)
+ imask = np.array(l).view(np.uint8)
+ imask[mask != 0] = c
+ np.copyto(r, d, where=mask)
+ assert_array_equal(r == 1, l)
+ assert_equal(r.sum(), sum(l))
+
+ r = np.zeros(power)
+ np.copyto(r, d, where=True)
+ assert_equal(r.sum(), r.size)
+ r = np.ones(power)
+ d = np.zeros(power)
+ np.copyto(r, d, where=False)
+ assert_equal(r.sum(), r.size)
+
+def test_copy_order():
+ a = np.arange(24).reshape(2, 1, 3, 4)
+ b = a.copy(order='F')
+ c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3)
+
+ def check_copy_result(x, y, ccontig, fcontig, strides=False):
+ assert_(not (x is y))
+ assert_equal(x, y)
+ assert_equal(res.flags.c_contiguous, ccontig)
+ assert_equal(res.flags.f_contiguous, fcontig)
+
+ # Validate the initial state of a, b, and c
+ assert_(a.flags.c_contiguous)
+ assert_(not a.flags.f_contiguous)
+ assert_(not b.flags.c_contiguous)
+ assert_(b.flags.f_contiguous)
+ assert_(not c.flags.c_contiguous)
+ assert_(not c.flags.f_contiguous)
+
+ # Copy with order='C'
+ res = a.copy(order='C')
+ check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
+ res = b.copy(order='C')
+ check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
+ res = c.copy(order='C')
+ check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
+ res = np.copy(a, order='C')
+ check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
+ res = np.copy(b, order='C')
+ check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
+ res = np.copy(c, order='C')
+ check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
+
+ # Copy with order='F'
+ res = a.copy(order='F')
+ check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
+ res = b.copy(order='F')
+ check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
+ res = c.copy(order='F')
+ check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
+ res = np.copy(a, order='F')
+ check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
+ res = np.copy(b, order='F')
+ check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
+ res = np.copy(c, order='F')
+ check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
+
+ # Copy with order='K'
+ res = a.copy(order='K')
+ check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
+ res = b.copy(order='K')
+ check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
+ res = c.copy(order='K')
+ check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
+ res = np.copy(a, order='K')
+ check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
+ res = np.copy(b, order='K')
+ check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
+ res = np.copy(c, order='K')
+ check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
+
+def test_contiguous_flags():
+ a = np.ones((4, 4, 1))[::2,:,:]
+ a.strides = a.strides[:2] + (-123,)
+ b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4)
+
+ def check_contig(a, ccontig, fcontig):
+ assert_(a.flags.c_contiguous == ccontig)
+ assert_(a.flags.f_contiguous == fcontig)
+
+ # Check if new arrays are correct:
+ check_contig(a, False, False)
+ check_contig(b, False, False)
+ check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
+ check_contig(np.array([[[1], [2]]], order='F'), True, True)
+ check_contig(np.empty((2, 2)), True, False)
+ check_contig(np.empty((2, 2), order='F'), False, True)
+
+ # Check that np.array creates correct contiguous flags:
+ check_contig(np.array(a, copy=False), False, False)
+ check_contig(np.array(a, copy=False, order='C'), True, False)
+ check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True)
+
+ # Check slicing update of flags and :
+ check_contig(a[0], True, True)
+ check_contig(a[None, ::4, ..., None], True, True)
+ check_contig(b[0, 0, ...], False, True)
+ check_contig(b[:, :, 0:0, :, :], True, True)
+
+ # Test ravel and squeeze.
+ check_contig(a.ravel(), True, True)
+ check_contig(np.ones((1, 3, 1)).squeeze(), True, True)
+
+def test_broadcast_arrays():
+ # Test user defined dtypes
+ a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
+ b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
+ result = np.broadcast_arrays(a, b)
+ assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
+ assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
+
+@pytest.mark.parametrize(["shape", "fill_value", "expected_output"],
+ [((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])),
+ ((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))])
+def test_full_from_list(shape, fill_value, expected_output):
+ output = np.full(shape, fill_value)
+ assert_equal(output, expected_output)
+
+def test_astype_copyflag():
+ # test the various copyflag options
+ arr = np.arange(10, dtype=np.intp)
+
+ res_true = arr.astype(np.intp, copy=True)
+ assert not np.may_share_memory(arr, res_true)
+ res_always = arr.astype(np.intp, copy=np._CopyMode.ALWAYS)
+ assert not np.may_share_memory(arr, res_always)
+
+ res_false = arr.astype(np.intp, copy=False)
+ # `res_false is arr` currently, but check `may_share_memory`.
+ assert np.may_share_memory(arr, res_false)
+ res_if_needed = arr.astype(np.intp, copy=np._CopyMode.IF_NEEDED)
+ # `res_if_needed is arr` currently, but check `may_share_memory`.
+ assert np.may_share_memory(arr, res_if_needed)
+
+ res_never = arr.astype(np.intp, copy=np._CopyMode.NEVER)
+ assert np.may_share_memory(arr, res_never)
+
+ # Simple tests for when a copy is necessary:
+ res_false = arr.astype(np.float64, copy=False)
+ assert_array_equal(res_false, arr)
+ res_if_needed = arr.astype(np.float64,
+ copy=np._CopyMode.IF_NEEDED)
+ assert_array_equal(res_if_needed, arr)
+ assert_raises(ValueError, arr.astype, np.float64,
+ copy=np._CopyMode.NEVER)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_argparse.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_argparse.py
new file mode 100644
index 00000000..63a01dee
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_argparse.py
@@ -0,0 +1,62 @@
+"""
+Tests for the private NumPy argument parsing functionality.
+They mainly exists to ensure good test coverage without having to try the
+weirder cases on actual numpy functions but test them in one place.
+
+The test function is defined in C to be equivalent to (errors may not always
+match exactly, and could be adjusted):
+
+ def func(arg1, /, arg2, *, arg3):
+ i = integer(arg1) # reproducing the 'i' parsing in Python.
+ return None
+"""
+
+import pytest
+
+import numpy as np
+from numpy.core._multiarray_tests import argparse_example_function as func
+
+
+def test_invalid_integers():
+ with pytest.raises(TypeError,
+ match="integer argument expected, got float"):
+ func(1.)
+ with pytest.raises(OverflowError):
+ func(2**100)
+
+
+def test_missing_arguments():
+ with pytest.raises(TypeError,
+ match="missing required positional argument 0"):
+ func()
+ with pytest.raises(TypeError,
+ match="missing required positional argument 0"):
+ func(arg2=1, arg3=4)
+ with pytest.raises(TypeError,
+ match=r"missing required argument \'arg2\' \(pos 1\)"):
+ func(1, arg3=5)
+
+
+def test_too_many_positional():
+ # the second argument is positional but can be passed as keyword.
+ with pytest.raises(TypeError,
+ match="takes from 2 to 3 positional arguments but 4 were given"):
+ func(1, 2, 3, 4)
+
+
+def test_multiple_values():
+ with pytest.raises(TypeError,
+ match=r"given by name \('arg2'\) and position \(position 1\)"):
+ func(1, 2, arg2=3)
+
+
+def test_string_fallbacks():
+ # We can (currently?) use numpy strings to test the "slow" fallbacks
+ # that should normally not be taken due to string interning.
+ arg2 = np.unicode_("arg2")
+ missing_arg = np.unicode_("missing_arg")
+ func(1, **{arg2: 3})
+ with pytest.raises(TypeError,
+ match="got an unexpected keyword argument 'missing_arg'"):
+ func(2, **{missing_arg: 3})
+
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_array_coercion.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_array_coercion.py
new file mode 100644
index 00000000..fade5729
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_array_coercion.py
@@ -0,0 +1,835 @@
+"""
+Tests for array coercion, mainly through testing `np.array` results directly.
+Note that other such tests exist e.g. in `test_api.py` and many corner-cases
+are tested (sometimes indirectly) elsewhere.
+"""
+
+from itertools import permutations, product
+
+import pytest
+from pytest import param
+
+import numpy as np
+from numpy.core._rational_tests import rational
+from numpy.core._multiarray_umath import _discover_array_parameters
+
+from numpy.testing import (
+ assert_array_equal, assert_warns, IS_PYPY)
+
+
+def arraylikes():
+ """
+ Generator for functions converting an array into various array-likes.
+ If full is True (default) includes array-likes not capable of handling
+ all dtypes
+ """
+ # base array:
+ def ndarray(a):
+ return a
+
+ yield param(ndarray, id="ndarray")
+
+ # subclass:
+ class MyArr(np.ndarray):
+ pass
+
+ def subclass(a):
+ return a.view(MyArr)
+
+ yield subclass
+
+ class _SequenceLike():
+ # We are giving a warning that array-like's were also expected to be
+ # sequence-like in `np.array([array_like])`, this can be removed
+ # when the deprecation exired (started NumPy 1.20)
+ def __len__(self):
+ raise TypeError
+
+ def __getitem__(self):
+ raise TypeError
+
+ # Array-interface
+ class ArrayDunder(_SequenceLike):
+ def __init__(self, a):
+ self.a = a
+
+ def __array__(self, dtype=None):
+ return self.a
+
+ yield param(ArrayDunder, id="__array__")
+
+ # memory-view
+ yield param(memoryview, id="memoryview")
+
+ # Array-interface
+ class ArrayInterface(_SequenceLike):
+ def __init__(self, a):
+ self.a = a # need to hold on to keep interface valid
+ self.__array_interface__ = a.__array_interface__
+
+ yield param(ArrayInterface, id="__array_interface__")
+
+ # Array-Struct
+ class ArrayStruct(_SequenceLike):
+ def __init__(self, a):
+ self.a = a # need to hold on to keep struct valid
+ self.__array_struct__ = a.__array_struct__
+
+ yield param(ArrayStruct, id="__array_struct__")
+
+
+def scalar_instances(times=True, extended_precision=True, user_dtype=True):
+ # Hard-coded list of scalar instances.
+ # Floats:
+ yield param(np.sqrt(np.float16(5)), id="float16")
+ yield param(np.sqrt(np.float32(5)), id="float32")
+ yield param(np.sqrt(np.float64(5)), id="float64")
+ if extended_precision:
+ yield param(np.sqrt(np.longdouble(5)), id="longdouble")
+
+ # Complex:
+ yield param(np.sqrt(np.complex64(2+3j)), id="complex64")
+ yield param(np.sqrt(np.complex128(2+3j)), id="complex128")
+ if extended_precision:
+ yield param(np.sqrt(np.longcomplex(2+3j)), id="clongdouble")
+
+ # Bool:
+ # XFAIL: Bool should be added, but has some bad properties when it
+ # comes to strings, see also gh-9875
+ # yield param(np.bool_(0), id="bool")
+
+ # Integers:
+ yield param(np.int8(2), id="int8")
+ yield param(np.int16(2), id="int16")
+ yield param(np.int32(2), id="int32")
+ yield param(np.int64(2), id="int64")
+
+ yield param(np.uint8(2), id="uint8")
+ yield param(np.uint16(2), id="uint16")
+ yield param(np.uint32(2), id="uint32")
+ yield param(np.uint64(2), id="uint64")
+
+ # Rational:
+ if user_dtype:
+ yield param(rational(1, 2), id="rational")
+
+ # Cannot create a structured void scalar directly:
+ structured = np.array([(1, 3)], "i,i")[0]
+ assert isinstance(structured, np.void)
+ assert structured.dtype == np.dtype("i,i")
+ yield param(structured, id="structured")
+
+ if times:
+ # Datetimes and timedelta
+ yield param(np.timedelta64(2), id="timedelta64[generic]")
+ yield param(np.timedelta64(23, "s"), id="timedelta64[s]")
+ yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)")
+
+ yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)")
+ yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]")
+
+ # Strings and unstructured void:
+ yield param(np.bytes_(b"1234"), id="bytes")
+ yield param(np.unicode_("2345"), id="unicode")
+ yield param(np.void(b"4321"), id="unstructured_void")
+
+
+def is_parametric_dtype(dtype):
+ """Returns True if the dtype is a parametric legacy dtype (itemsize
+ is 0, or a datetime without units)
+ """
+ if dtype.itemsize == 0:
+ return True
+ if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
+ if dtype.name.endswith("64"):
+ # Generic time units
+ return True
+ return False
+
+
+class TestStringDiscovery:
+ @pytest.mark.parametrize("obj",
+ [object(), 1.2, 10**43, None, "string"],
+ ids=["object", "1.2", "10**43", "None", "string"])
+ def test_basic_stringlength(self, obj):
+ length = len(str(obj))
+ expected = np.dtype(f"S{length}")
+
+ assert np.array(obj, dtype="S").dtype == expected
+ assert np.array([obj], dtype="S").dtype == expected
+
+ # A nested array is also discovered correctly
+ arr = np.array(obj, dtype="O")
+ assert np.array(arr, dtype="S").dtype == expected
+ # Check that .astype() behaves identical
+ assert arr.astype("S").dtype == expected
+
+ @pytest.mark.parametrize("obj",
+ [object(), 1.2, 10**43, None, "string"],
+ ids=["object", "1.2", "10**43", "None", "string"])
+ def test_nested_arrays_stringlength(self, obj):
+ length = len(str(obj))
+ expected = np.dtype(f"S{length}")
+ arr = np.array(obj, dtype="O")
+ assert np.array([arr, arr], dtype="S").dtype == expected
+
+ @pytest.mark.parametrize("arraylike", arraylikes())
+ def test_unpack_first_level(self, arraylike):
+ # We unpack exactly one level of array likes
+ obj = np.array([None])
+ obj[0] = np.array(1.2)
+ # the length of the included item, not of the float dtype
+ length = len(str(obj[0]))
+ expected = np.dtype(f"S{length}")
+
+ obj = arraylike(obj)
+ # casting to string usually calls str(obj)
+ arr = np.array([obj], dtype="S")
+ assert arr.shape == (1, 1)
+ assert arr.dtype == expected
+
+
+class TestScalarDiscovery:
+ def test_void_special_case(self):
+ # Void dtypes with structures discover tuples as elements
+ arr = np.array((1, 2, 3), dtype="i,i,i")
+ assert arr.shape == ()
+ arr = np.array([(1, 2, 3)], dtype="i,i,i")
+ assert arr.shape == (1,)
+
+ def test_char_special_case(self):
+ arr = np.array("string", dtype="c")
+ assert arr.shape == (6,)
+ assert arr.dtype.char == "c"
+ arr = np.array(["string"], dtype="c")
+ assert arr.shape == (1, 6)
+ assert arr.dtype.char == "c"
+
+ def test_char_special_case_deep(self):
+ # Check that the character special case errors correctly if the
+ # array is too deep:
+ nested = ["string"] # 2 dimensions (due to string being sequence)
+ for i in range(np.MAXDIMS - 2):
+ nested = [nested]
+
+ arr = np.array(nested, dtype='c')
+ assert arr.shape == (1,) * (np.MAXDIMS - 1) + (6,)
+ with pytest.raises(ValueError):
+ np.array([nested], dtype="c")
+
+ def test_unknown_object(self):
+ arr = np.array(object())
+ assert arr.shape == ()
+ assert arr.dtype == np.dtype("O")
+
+ @pytest.mark.parametrize("scalar", scalar_instances())
+ def test_scalar(self, scalar):
+ arr = np.array(scalar)
+ assert arr.shape == ()
+ assert arr.dtype == scalar.dtype
+
+ arr = np.array([[scalar, scalar]])
+ assert arr.shape == (1, 2)
+ assert arr.dtype == scalar.dtype
+
+ # Additionally to string this test also runs into a corner case
+ # with datetime promotion (the difference is the promotion order).
+ @pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning")
+ def test_scalar_promotion(self):
+ for sc1, sc2 in product(scalar_instances(), scalar_instances()):
+ sc1, sc2 = sc1.values[0], sc2.values[0]
+ # test all combinations:
+ try:
+ arr = np.array([sc1, sc2])
+ except (TypeError, ValueError):
+ # The promotion between two times can fail
+ # XFAIL (ValueError): Some object casts are currently undefined
+ continue
+ assert arr.shape == (2,)
+ try:
+ dt1, dt2 = sc1.dtype, sc2.dtype
+ expected_dtype = np.promote_types(dt1, dt2)
+ assert arr.dtype == expected_dtype
+ except TypeError as e:
+ # Will currently always go to object dtype
+ assert arr.dtype == np.dtype("O")
+
+ @pytest.mark.parametrize("scalar", scalar_instances())
+ def test_scalar_coercion(self, scalar):
+ # This tests various scalar coercion paths, mainly for the numerical
+ # types. It includes some paths not directly related to `np.array`
+ if isinstance(scalar, np.inexact):
+ # Ensure we have a full-precision number if available
+ scalar = type(scalar)((scalar * 2)**0.5)
+
+ if type(scalar) is rational:
+ # Rational generally fails due to a missing cast. In the future
+ # object casts should automatically be defined based on `setitem`.
+ pytest.xfail("Rational to object cast is undefined currently.")
+
+ # Use casting from object:
+ arr = np.array(scalar, dtype=object).astype(scalar.dtype)
+
+ # Test various ways to create an array containing this scalar:
+ arr1 = np.array(scalar).reshape(1)
+ arr2 = np.array([scalar])
+ arr3 = np.empty(1, dtype=scalar.dtype)
+ arr3[0] = scalar
+ arr4 = np.empty(1, dtype=scalar.dtype)
+ arr4[:] = [scalar]
+ # All of these methods should yield the same results
+ assert_array_equal(arr, arr1)
+ assert_array_equal(arr, arr2)
+ assert_array_equal(arr, arr3)
+ assert_array_equal(arr, arr4)
+
+ @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy")
+ @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
+ @pytest.mark.parametrize("cast_to", scalar_instances())
+ def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to):
+ """
+ Test that in most cases:
+ * `np.array(scalar, dtype=dtype)`
+ * `np.empty((), dtype=dtype)[()] = scalar`
+ * `np.array(scalar).astype(dtype)`
+ should behave the same. The only exceptions are paramteric dtypes
+ (mainly datetime/timedelta without unit) and void without fields.
+ """
+ dtype = cast_to.dtype # use to parametrize only the target dtype
+
+ for scalar in scalar_instances(times=False):
+ scalar = scalar.values[0]
+
+ if dtype.type == np.void:
+ if scalar.dtype.fields is not None and dtype.fields is None:
+ # Here, coercion to "V6" works, but the cast fails.
+ # Since the types are identical, SETITEM takes care of
+ # this, but has different rules than the cast.
+ with pytest.raises(TypeError):
+ np.array(scalar).astype(dtype)
+ np.array(scalar, dtype=dtype)
+ np.array([scalar], dtype=dtype)
+ continue
+
+ # The main test, we first try to use casting and if it succeeds
+ # continue below testing that things are the same, otherwise
+ # test that the alternative paths at least also fail.
+ try:
+ cast = np.array(scalar).astype(dtype)
+ except (TypeError, ValueError, RuntimeError):
+ # coercion should also raise (error type may change)
+ with pytest.raises(Exception):
+ np.array(scalar, dtype=dtype)
+
+ if (isinstance(scalar, rational) and
+ np.issubdtype(dtype, np.signedinteger)):
+ return
+
+ with pytest.raises(Exception):
+ np.array([scalar], dtype=dtype)
+ # assignment should also raise
+ res = np.zeros((), dtype=dtype)
+ with pytest.raises(Exception):
+ res[()] = scalar
+
+ return
+
+ # Non error path:
+ arr = np.array(scalar, dtype=dtype)
+ assert_array_equal(arr, cast)
+ # assignment behaves the same
+ ass = np.zeros((), dtype=dtype)
+ ass[()] = scalar
+ assert_array_equal(ass, cast)
+
+ @pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100])
+ def test_pyscalar_subclasses(self, pyscalar):
+ """NumPy arrays are read/write which means that anything but invariant
+ behaviour is on thin ice. However, we currently are happy to discover
+ subclasses of Python float, int, complex the same as the base classes.
+ This should potentially be deprecated.
+ """
+ class MyScalar(type(pyscalar)):
+ pass
+
+ res = np.array(MyScalar(pyscalar))
+ expected = np.array(pyscalar)
+ assert_array_equal(res, expected)
+
+ @pytest.mark.parametrize("dtype_char", np.typecodes["All"])
+ def test_default_dtype_instance(self, dtype_char):
+ if dtype_char in "SU":
+ dtype = np.dtype(dtype_char + "1")
+ elif dtype_char == "V":
+ # Legacy behaviour was to use V8. The reason was float64 being the
+ # default dtype and that having 8 bytes.
+ dtype = np.dtype("V8")
+ else:
+ dtype = np.dtype(dtype_char)
+
+ discovered_dtype, _ = _discover_array_parameters([], type(dtype))
+
+ assert discovered_dtype == dtype
+ assert discovered_dtype.itemsize == dtype.itemsize
+
+ @pytest.mark.parametrize("dtype", np.typecodes["Integer"])
+ @pytest.mark.parametrize(["scalar", "error"],
+ [(np.float64(np.nan), ValueError),
+ (np.array(-1).astype(np.ulonglong)[()], OverflowError)])
+ def test_scalar_to_int_coerce_does_not_cast(self, dtype, scalar, error):
+ """
+ Signed integers are currently different in that they do not cast other
+ NumPy scalar, but instead use scalar.__int__(). The hardcoded
+ exception to this rule is `np.array(scalar, dtype=integer)`.
+ """
+ dtype = np.dtype(dtype)
+
+ # This is a special case using casting logic. It warns for the NaN
+ # but allows the cast (giving undefined behaviour).
+ with np.errstate(invalid="ignore"):
+ coerced = np.array(scalar, dtype=dtype)
+ cast = np.array(scalar).astype(dtype)
+ assert_array_equal(coerced, cast)
+
+ # However these fail:
+ with pytest.raises(error):
+ np.array([scalar], dtype=dtype)
+ with pytest.raises(error):
+ cast[()] = scalar
+
+
+class TestTimeScalars:
+ @pytest.mark.parametrize("dtype", [np.int64, np.float32])
+ @pytest.mark.parametrize("scalar",
+ [param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"),
+ param(np.timedelta64(123, "s"), id="timedelta64[s]"),
+ param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"),
+ param(np.datetime64(1, "D"), id="datetime64[D]")],)
+ def test_coercion_basic(self, dtype, scalar):
+ # Note the `[scalar]` is there because np.array(scalar) uses stricter
+ # `scalar.__int__()` rules for backward compatibility right now.
+ arr = np.array(scalar, dtype=dtype)
+ cast = np.array(scalar).astype(dtype)
+ assert_array_equal(arr, cast)
+
+ ass = np.ones((), dtype=dtype)
+ if issubclass(dtype, np.integer):
+ with pytest.raises(TypeError):
+ # raises, as would np.array([scalar], dtype=dtype), this is
+ # conversion from times, but behaviour of integers.
+ ass[()] = scalar
+ else:
+ ass[()] = scalar
+ assert_array_equal(ass, cast)
+
+ @pytest.mark.parametrize("dtype", [np.int64, np.float32])
+ @pytest.mark.parametrize("scalar",
+ [param(np.timedelta64(123, "ns"), id="timedelta64[ns]"),
+ param(np.timedelta64(12, "generic"), id="timedelta64[generic]")])
+ def test_coercion_timedelta_convert_to_number(self, dtype, scalar):
+ # Only "ns" and "generic" timedeltas can be converted to numbers
+ # so these are slightly special.
+ arr = np.array(scalar, dtype=dtype)
+ cast = np.array(scalar).astype(dtype)
+ ass = np.ones((), dtype=dtype)
+ ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype)
+
+ assert_array_equal(arr, cast)
+ assert_array_equal(cast, cast)
+
+ @pytest.mark.parametrize("dtype", ["S6", "U6"])
+ @pytest.mark.parametrize(["val", "unit"],
+ [param(123, "s", id="[s]"), param(123, "D", id="[D]")])
+ def test_coercion_assignment_datetime(self, val, unit, dtype):
+ # String from datetime64 assignment is currently special cased to
+ # never use casting. This is because casting will error in this
+ # case, and traditionally in most cases the behaviour is maintained
+ # like this. (`np.array(scalar, dtype="U6")` would have failed before)
+ # TODO: This discrepancy _should_ be resolved, either by relaxing the
+ # cast, or by deprecating the first part.
+ scalar = np.datetime64(val, unit)
+ dtype = np.dtype(dtype)
+ cut_string = dtype.type(str(scalar)[:6])
+
+ arr = np.array(scalar, dtype=dtype)
+ assert arr[()] == cut_string
+ ass = np.ones((), dtype=dtype)
+ ass[()] = scalar
+ assert ass[()] == cut_string
+
+ with pytest.raises(RuntimeError):
+ # However, unlike the above assignment using `str(scalar)[:6]`
+ # due to being handled by the string DType and not be casting
+ # the explicit cast fails:
+ np.array(scalar).astype(dtype)
+
+
+ @pytest.mark.parametrize(["val", "unit"],
+ [param(123, "s", id="[s]"), param(123, "D", id="[D]")])
+ def test_coercion_assignment_timedelta(self, val, unit):
+ scalar = np.timedelta64(val, unit)
+
+ # Unlike datetime64, timedelta allows the unsafe cast:
+ np.array(scalar, dtype="S6")
+ cast = np.array(scalar).astype("S6")
+ ass = np.ones((), dtype="S6")
+ ass[()] = scalar
+ expected = scalar.astype("S")[:6]
+ assert cast[()] == expected
+ assert ass[()] == expected
+
+class TestNested:
+ def test_nested_simple(self):
+ initial = [1.2]
+ nested = initial
+ for i in range(np.MAXDIMS - 1):
+ nested = [nested]
+
+ arr = np.array(nested, dtype="float64")
+ assert arr.shape == (1,) * np.MAXDIMS
+ with pytest.raises(ValueError):
+ np.array([nested], dtype="float64")
+
+ with pytest.raises(ValueError, match=".*would exceed the maximum"):
+ np.array([nested]) # user must ask for `object` explicitly
+
+ arr = np.array([nested], dtype=object)
+ assert arr.dtype == np.dtype("O")
+ assert arr.shape == (1,) * np.MAXDIMS
+ assert arr.item() is initial
+
+ def test_pathological_self_containing(self):
+ # Test that this also works for two nested sequences
+ l = []
+ l.append(l)
+ arr = np.array([l, l, l], dtype=object)
+ assert arr.shape == (3,) + (1,) * (np.MAXDIMS - 1)
+
+ # Also check a ragged case:
+ arr = np.array([l, [None], l], dtype=object)
+ assert arr.shape == (3, 1)
+
+ @pytest.mark.parametrize("arraylike", arraylikes())
+ def test_nested_arraylikes(self, arraylike):
+ # We try storing an array like into an array, but the array-like
+ # will have too many dimensions. This means the shape discovery
+ # decides that the array-like must be treated as an object (a special
+ # case of ragged discovery). The result will be an array with one
+ # dimension less than the maximum dimensions, and the array being
+ # assigned to it (which does work for object or if `float(arraylike)`
+ # works).
+ initial = arraylike(np.ones((1, 1)))
+
+ nested = initial
+ for i in range(np.MAXDIMS - 1):
+ nested = [nested]
+
+ with pytest.raises(ValueError, match=".*would exceed the maximum"):
+ # It will refuse to assign the array into
+ np.array(nested, dtype="float64")
+
+ # If this is object, we end up assigning a (1, 1) array into (1,)
+ # (due to running out of dimensions), this is currently supported but
+ # a special case which is not ideal.
+ arr = np.array(nested, dtype=object)
+ assert arr.shape == (1,) * np.MAXDIMS
+ assert arr.item() == np.array(initial).item()
+
+ @pytest.mark.parametrize("arraylike", arraylikes())
+ def test_uneven_depth_ragged(self, arraylike):
+ arr = np.arange(4).reshape((2, 2))
+ arr = arraylike(arr)
+
+ # Array is ragged in the second dimension already:
+ out = np.array([arr, [arr]], dtype=object)
+ assert out.shape == (2,)
+ assert out[0] is arr
+ assert type(out[1]) is list
+
+ # Array is ragged in the third dimension:
+ with pytest.raises(ValueError):
+ # This is a broadcast error during assignment, because
+ # the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails.
+ np.array([arr, [arr, arr]], dtype=object)
+
+ def test_empty_sequence(self):
+ arr = np.array([[], [1], [[1]]], dtype=object)
+ assert arr.shape == (3,)
+
+ # The empty sequence stops further dimension discovery, so the
+ # result shape will be (0,) which leads to an error during:
+ with pytest.raises(ValueError):
+ np.array([[], np.empty((0, 1))], dtype=object)
+
+ def test_array_of_different_depths(self):
+ # When multiple arrays (or array-likes) are included in a
+ # sequences and have different depth, we currently discover
+ # as many dimensions as they share. (see also gh-17224)
+ arr = np.zeros((3, 2))
+ mismatch_first_dim = np.zeros((1, 2))
+ mismatch_second_dim = np.zeros((3, 3))
+
+ dtype, shape = _discover_array_parameters(
+ [arr, mismatch_second_dim], dtype=np.dtype("O"))
+ assert shape == (2, 3)
+
+ dtype, shape = _discover_array_parameters(
+ [arr, mismatch_first_dim], dtype=np.dtype("O"))
+ assert shape == (2,)
+ # The second case is currently supported because the arrays
+ # can be stored as objects:
+ res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
+ assert res[0] is arr
+ assert res[1] is mismatch_first_dim
+
+
+class TestBadSequences:
+ # These are tests for bad objects passed into `np.array`, in general
+ # these have undefined behaviour. In the old code they partially worked
+ # when now they will fail. We could (and maybe should) create a copy
+ # of all sequences to be safe against bad-actors.
+
+ def test_growing_list(self):
+ # List to coerce, `mylist` will append to it during coercion
+ obj = []
+ class mylist(list):
+ def __len__(self):
+ obj.append([1, 2])
+ return super().__len__()
+
+ obj.append(mylist([1, 2]))
+
+ with pytest.raises(RuntimeError):
+ np.array(obj)
+
+ # Note: We do not test a shrinking list. These do very evil things
+ # and the only way to fix them would be to copy all sequences.
+ # (which may be a real option in the future).
+
+ def test_mutated_list(self):
+ # List to coerce, `mylist` will mutate the first element
+ obj = []
+ class mylist(list):
+ def __len__(self):
+ obj[0] = [2, 3] # replace with a different list.
+ return super().__len__()
+
+ obj.append([2, 3])
+ obj.append(mylist([1, 2]))
+ # Does not crash:
+ np.array(obj)
+
+ def test_replace_0d_array(self):
+ # List to coerce, `mylist` will mutate the first element
+ obj = []
+ class baditem:
+ def __len__(self):
+ obj[0][0] = 2 # replace with a different list.
+ raise ValueError("not actually a sequence!")
+
+ def __getitem__(self):
+ pass
+
+ # Runs into a corner case in the new code, the `array(2)` is cached
+ # so replacing it invalidates the cache.
+ obj.append([np.array(2), baditem()])
+ with pytest.raises(RuntimeError):
+ np.array(obj)
+
+
+class TestArrayLikes:
+ @pytest.mark.parametrize("arraylike", arraylikes())
+ def test_0d_object_special_case(self, arraylike):
+ arr = np.array(0.)
+ obj = arraylike(arr)
+ # A single array-like is always converted:
+ res = np.array(obj, dtype=object)
+ assert_array_equal(arr, res)
+
+ # But a single 0-D nested array-like never:
+ res = np.array([obj], dtype=object)
+ assert res[0] is obj
+
+ def test_0d_generic_special_case(self):
+ class ArraySubclass(np.ndarray):
+ def __float__(self):
+ raise TypeError("e.g. quantities raise on this")
+
+ arr = np.array(0.)
+ obj = arr.view(ArraySubclass)
+ res = np.array(obj)
+ # The subclass is simply cast:
+ assert_array_equal(arr, res)
+
+ # If the 0-D array-like is included, __float__ is currently
+ # guaranteed to be used. We may want to change that, quantities
+ # and masked arrays half make use of this.
+ with pytest.raises(TypeError):
+ np.array([obj])
+
+ # The same holds for memoryview:
+ obj = memoryview(arr)
+ res = np.array(obj)
+ assert_array_equal(arr, res)
+ with pytest.raises(ValueError):
+ # The error type does not matter much here.
+ np.array([obj])
+
+ def test_arraylike_classes(self):
+ # The classes of array-likes should generally be acceptable to be
+ # stored inside a numpy (object) array. This tests all of the
+ # special attributes (since all are checked during coercion).
+ arr = np.array(np.int64)
+ assert arr[()] is np.int64
+ arr = np.array([np.int64])
+ assert arr[0] is np.int64
+
+ # This also works for properties/unbound methods:
+ class ArrayLike:
+ @property
+ def __array_interface__(self):
+ pass
+
+ @property
+ def __array_struct__(self):
+ pass
+
+ def __array__(self):
+ pass
+
+ arr = np.array(ArrayLike)
+ assert arr[()] is ArrayLike
+ arr = np.array([ArrayLike])
+ assert arr[0] is ArrayLike
+
+ @pytest.mark.skipif(
+ np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform")
+ def test_too_large_array_error_paths(self):
+ """Test the error paths, including for memory leaks"""
+ arr = np.array(0, dtype="uint8")
+ # Guarantees that a contiguous copy won't work:
+ arr = np.broadcast_to(arr, 2**62)
+
+ for i in range(5):
+ # repeat, to ensure caching cannot have an effect:
+ with pytest.raises(MemoryError):
+ np.array(arr)
+ with pytest.raises(MemoryError):
+ np.array([arr])
+
+ @pytest.mark.parametrize("attribute",
+ ["__array_interface__", "__array__", "__array_struct__"])
+ @pytest.mark.parametrize("error", [RecursionError, MemoryError])
+ def test_bad_array_like_attributes(self, attribute, error):
+ # RecursionError and MemoryError are considered fatal. All errors
+ # (except AttributeError) should probably be raised in the future,
+ # but shapely made use of it, so it will require a deprecation.
+
+ class BadInterface:
+ def __getattr__(self, attr):
+ if attr == attribute:
+ raise error
+ super().__getattr__(attr)
+
+ with pytest.raises(error):
+ np.array(BadInterface())
+
+ @pytest.mark.parametrize("error", [RecursionError, MemoryError])
+ def test_bad_array_like_bad_length(self, error):
+ # RecursionError and MemoryError are considered "critical" in
+ # sequences. We could expand this more generally though. (NumPy 1.20)
+ class BadSequence:
+ def __len__(self):
+ raise error
+ def __getitem__(self):
+ # must have getitem to be a Sequence
+ return 1
+
+ with pytest.raises(error):
+ np.array(BadSequence())
+
+
+class TestAsArray:
+ """Test expected behaviors of ``asarray``."""
+
+ def test_dtype_identity(self):
+ """Confirm the intended behavior for *dtype* kwarg.
+
+ The result of ``asarray()`` should have the dtype provided through the
+ keyword argument, when used. This forces unique array handles to be
+ produced for unique np.dtype objects, but (for equivalent dtypes), the
+ underlying data (the base object) is shared with the original array
+ object.
+
+ Ref https://github.com/numpy/numpy/issues/1468
+ """
+ int_array = np.array([1, 2, 3], dtype='i')
+ assert np.asarray(int_array) is int_array
+
+ # The character code resolves to the singleton dtype object provided
+ # by the numpy package.
+ assert np.asarray(int_array, dtype='i') is int_array
+
+ # Derive a dtype from n.dtype('i'), but add a metadata object to force
+ # the dtype to be distinct.
+ unequal_type = np.dtype('i', metadata={'spam': True})
+ annotated_int_array = np.asarray(int_array, dtype=unequal_type)
+ assert annotated_int_array is not int_array
+ assert annotated_int_array.base is int_array
+ # Create an equivalent descriptor with a new and distinct dtype
+ # instance.
+ equivalent_requirement = np.dtype('i', metadata={'spam': True})
+ annotated_int_array_alt = np.asarray(annotated_int_array,
+ dtype=equivalent_requirement)
+ assert unequal_type == equivalent_requirement
+ assert unequal_type is not equivalent_requirement
+ assert annotated_int_array_alt is not annotated_int_array
+ assert annotated_int_array_alt.dtype is equivalent_requirement
+
+ # Check the same logic for a pair of C types whose equivalence may vary
+ # between computing environments.
+ # Find an equivalent pair.
+ integer_type_codes = ('i', 'l', 'q')
+ integer_dtypes = [np.dtype(code) for code in integer_type_codes]
+ typeA = None
+ typeB = None
+ for typeA, typeB in permutations(integer_dtypes, r=2):
+ if typeA == typeB:
+ assert typeA is not typeB
+ break
+ assert isinstance(typeA, np.dtype) and isinstance(typeB, np.dtype)
+
+ # These ``asarray()`` calls may produce a new view or a copy,
+ # but never the same object.
+ long_int_array = np.asarray(int_array, dtype='l')
+ long_long_int_array = np.asarray(int_array, dtype='q')
+ assert long_int_array is not int_array
+ assert long_long_int_array is not int_array
+ assert np.asarray(long_int_array, dtype='q') is not long_int_array
+ array_a = np.asarray(int_array, dtype=typeA)
+ assert typeA == typeB
+ assert typeA is not typeB
+ assert array_a.dtype is typeA
+ assert array_a is not np.asarray(array_a, dtype=typeB)
+ assert np.asarray(array_a, dtype=typeB).dtype is typeB
+ assert array_a is np.asarray(array_a, dtype=typeB).base
+
+
+class TestSpecialAttributeLookupFailure:
+ # An exception was raised while fetching the attribute
+
+ class WeirdArrayLike:
+ @property
+ def __array__(self):
+ raise RuntimeError("oops!")
+
+ class WeirdArrayInterface:
+ @property
+ def __array_interface__(self):
+ raise RuntimeError("oops!")
+
+ def test_deprecated(self):
+ with pytest.raises(RuntimeError):
+ np.array(self.WeirdArrayLike())
+ with pytest.raises(RuntimeError):
+ np.array(self.WeirdArrayInterface())
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_array_interface.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_array_interface.py
new file mode 100644
index 00000000..8b1ab27c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_array_interface.py
@@ -0,0 +1,216 @@
+import sys
+import pytest
+import numpy as np
+from numpy.testing import extbuild
+
+
+@pytest.fixture
+def get_module(tmp_path):
+ """ Some codes to generate data and manage temporary buffers use when
+ sharing with numpy via the array interface protocol.
+ """
+
+ if not sys.platform.startswith('linux'):
+ pytest.skip('link fails on cygwin')
+
+ prologue = '''
+ #include <Python.h>
+ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+ #include <numpy/arrayobject.h>
+ #include <stdio.h>
+ #include <math.h>
+
+ NPY_NO_EXPORT
+ void delete_array_struct(PyObject *cap) {
+
+ /* get the array interface structure */
+ PyArrayInterface *inter = (PyArrayInterface*)
+ PyCapsule_GetPointer(cap, NULL);
+
+ /* get the buffer by which data was shared */
+ double *ptr = (double*)PyCapsule_GetContext(cap);
+
+ /* for the purposes of the regression test set the elements
+ to nan */
+ for (npy_intp i = 0; i < inter->shape[0]; ++i)
+ ptr[i] = nan("");
+
+ /* free the shared buffer */
+ free(ptr);
+
+ /* free the array interface structure */
+ free(inter->shape);
+ free(inter);
+
+ fprintf(stderr, "delete_array_struct\\ncap = %ld inter = %ld"
+ " ptr = %ld\\n", (long)cap, (long)inter, (long)ptr);
+ }
+ '''
+
+ functions = [
+ ("new_array_struct", "METH_VARARGS", """
+
+ long long n_elem = 0;
+ double value = 0.0;
+
+ if (!PyArg_ParseTuple(args, "Ld", &n_elem, &value)) {
+ Py_RETURN_NONE;
+ }
+
+ /* allocate and initialize the data to share with numpy */
+ long long n_bytes = n_elem*sizeof(double);
+ double *data = (double*)malloc(n_bytes);
+
+ if (!data) {
+ PyErr_Format(PyExc_MemoryError,
+ "Failed to malloc %lld bytes", n_bytes);
+
+ Py_RETURN_NONE;
+ }
+
+ for (long long i = 0; i < n_elem; ++i) {
+ data[i] = value;
+ }
+
+ /* calculate the shape and stride */
+ int nd = 1;
+
+ npy_intp *ss = (npy_intp*)malloc(2*nd*sizeof(npy_intp));
+ npy_intp *shape = ss;
+ npy_intp *stride = ss + nd;
+
+ shape[0] = n_elem;
+ stride[0] = sizeof(double);
+
+ /* construct the array interface */
+ PyArrayInterface *inter = (PyArrayInterface*)
+ malloc(sizeof(PyArrayInterface));
+
+ memset(inter, 0, sizeof(PyArrayInterface));
+
+ inter->two = 2;
+ inter->nd = nd;
+ inter->typekind = 'f';
+ inter->itemsize = sizeof(double);
+ inter->shape = shape;
+ inter->strides = stride;
+ inter->data = data;
+ inter->flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_NOTSWAPPED |
+ NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS;
+
+ /* package into a capsule */
+ PyObject *cap = PyCapsule_New(inter, NULL, delete_array_struct);
+
+ /* save the pointer to the data */
+ PyCapsule_SetContext(cap, data);
+
+ fprintf(stderr, "new_array_struct\\ncap = %ld inter = %ld"
+ " ptr = %ld\\n", (long)cap, (long)inter, (long)data);
+
+ return cap;
+ """)
+ ]
+
+ more_init = "import_array();"
+
+ try:
+ import array_interface_testing
+ return array_interface_testing
+ except ImportError:
+ pass
+
+ # if it does not exist, build and load it
+ return extbuild.build_and_import_extension('array_interface_testing',
+ functions,
+ prologue=prologue,
+ include_dirs=[np.get_include()],
+ build_dir=tmp_path,
+ more_init=more_init)
+
+
+@pytest.mark.slow
+def test_cstruct(get_module):
+
+ class data_source:
+ """
+ This class is for testing the timing of the PyCapsule destructor
+ invoked when numpy release its reference to the shared data as part of
+ the numpy array interface protocol. If the PyCapsule destructor is
+ called early the shared data is freed and invalid memory accesses will
+ occur.
+ """
+
+ def __init__(self, size, value):
+ self.size = size
+ self.value = value
+
+ @property
+ def __array_struct__(self):
+ return get_module.new_array_struct(self.size, self.value)
+
+ # write to the same stream as the C code
+ stderr = sys.__stderr__
+
+ # used to validate the shared data.
+ expected_value = -3.1415
+ multiplier = -10000.0
+
+ # create some data to share with numpy via the array interface
+ # assign the data an expected value.
+ stderr.write(' ---- create an object to share data ---- \n')
+ buf = data_source(256, expected_value)
+ stderr.write(' ---- OK!\n\n')
+
+ # share the data
+ stderr.write(' ---- share data via the array interface protocol ---- \n')
+ arr = np.array(buf, copy=False)
+ stderr.write('arr.__array_interface___ = %s\n' % (
+ str(arr.__array_interface__)))
+ stderr.write('arr.base = %s\n' % (str(arr.base)))
+ stderr.write(' ---- OK!\n\n')
+
+ # release the source of the shared data. this will not release the data
+ # that was shared with numpy, that is done in the PyCapsule destructor.
+ stderr.write(' ---- destroy the object that shared data ---- \n')
+ buf = None
+ stderr.write(' ---- OK!\n\n')
+
+ # check that we got the expected data. If the PyCapsule destructor we
+ # defined was prematurely called then this test will fail because our
+ # destructor sets the elements of the array to NaN before free'ing the
+ # buffer. Reading the values here may also cause a SEGV
+ assert np.allclose(arr, expected_value)
+
+ # read the data. If the PyCapsule destructor we defined was prematurely
+ # called then reading the values here may cause a SEGV and will be reported
+ # as invalid reads by valgrind
+ stderr.write(' ---- read shared data ---- \n')
+ stderr.write('arr = %s\n' % (str(arr)))
+ stderr.write(' ---- OK!\n\n')
+
+ # write to the shared buffer. If the shared data was prematurely deleted
+ # this will may cause a SEGV and valgrind will report invalid writes
+ stderr.write(' ---- modify shared data ---- \n')
+ arr *= multiplier
+ expected_value *= multiplier
+ stderr.write('arr.__array_interface___ = %s\n' % (
+ str(arr.__array_interface__)))
+ stderr.write('arr.base = %s\n' % (str(arr.base)))
+ stderr.write(' ---- OK!\n\n')
+
+ # read the data. If the shared data was prematurely deleted this
+ # will may cause a SEGV and valgrind will report invalid reads
+ stderr.write(' ---- read modified shared data ---- \n')
+ stderr.write('arr = %s\n' % (str(arr)))
+ stderr.write(' ---- OK!\n\n')
+
+ # check that we got the expected data. If the PyCapsule destructor we
+ # defined was prematurely called then this test will fail because our
+ # destructor sets the elements of the array to NaN before free'ing the
+ # buffer. Reading the values here may also cause a SEGV
+ assert np.allclose(arr, expected_value)
+
+ # free the shared data, the PyCapsule destructor should run here
+ stderr.write(' ---- free shared data ---- \n')
+ arr = None
+ stderr.write(' ---- OK!\n\n')
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_arraymethod.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_arraymethod.py
new file mode 100644
index 00000000..6b75d192
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_arraymethod.py
@@ -0,0 +1,93 @@
+"""
+This file tests the generic aspects of ArrayMethod. At the time of writing
+this is private API, but when added, public API may be added here.
+"""
+
+from __future__ import annotations
+
+import sys
+import types
+from typing import Any
+
+import pytest
+
+import numpy as np
+from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
+
+
+class TestResolveDescriptors:
+ # Test mainly error paths of the resolve_descriptors function,
+ # note that the `casting_unittests` tests exercise this non-error paths.
+
+ # Casting implementations are the main/only current user:
+ method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
+
+ @pytest.mark.parametrize("args", [
+ (True,), # Not a tuple.
+ ((None,)), # Too few elements
+ ((None, None, None),), # Too many
+ ((None, None),), # Input dtype is None, which is invalid.
+ ((np.dtype("d"), True),), # Output dtype is not a dtype
+ ((np.dtype("f"), None),), # Input dtype does not match method
+ ])
+ def test_invalid_arguments(self, args):
+ with pytest.raises(TypeError):
+ self.method._resolve_descriptors(*args)
+
+
+class TestSimpleStridedCall:
+ # Test mainly error paths of the resolve_descriptors function,
+ # note that the `casting_unittests` tests exercise this non-error paths.
+
+ # Casting implementations are the main/only current user:
+ method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
+
+ @pytest.mark.parametrize(["args", "error"], [
+ ((True,), TypeError), # Not a tuple
+ (((None,),), TypeError), # Too few elements
+ ((None, None), TypeError), # Inputs are not arrays.
+ (((None, None, None),), TypeError), # Too many
+ (((np.arange(3), np.arange(3)),), TypeError), # Incorrect dtypes
+ (((np.ones(3, dtype=">d"), np.ones(3, dtype="<f")),),
+ TypeError), # Does not support byte-swapping
+ (((np.ones((2, 2), dtype="d"), np.ones((2, 2), dtype="f")),),
+ ValueError), # not 1-D
+ (((np.ones(3, dtype="d"), np.ones(4, dtype="f")),),
+ ValueError), # different length
+ (((np.frombuffer(b"\0x00"*3*2, dtype="d"),
+ np.frombuffer(b"\0x00"*3, dtype="f")),),
+ ValueError), # output not writeable
+ ])
+ def test_invalid_arguments(self, args, error):
+ # This is private API, which may be modified freely
+ with pytest.raises(error):
+ self.method._simple_strided_call(*args)
+
+
+@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
+@pytest.mark.parametrize(
+ "cls", [np.ndarray, np.recarray, np.chararray, np.matrix, np.memmap]
+)
+class TestClassGetItem:
+ def test_class_getitem(self, cls: type[np.ndarray]) -> None:
+ """Test `ndarray.__class_getitem__`."""
+ alias = cls[Any, Any]
+ assert isinstance(alias, types.GenericAlias)
+ assert alias.__origin__ is cls
+
+ @pytest.mark.parametrize("arg_len", range(4))
+ def test_subscript_tup(self, cls: type[np.ndarray], arg_len: int) -> None:
+ arg_tup = (Any,) * arg_len
+ if arg_len in (1, 2):
+ assert cls[arg_tup]
+ else:
+ match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
+ with pytest.raises(TypeError, match=match):
+ cls[arg_tup]
+
+
+@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
+def test_class_getitem_38() -> None:
+ match = "Type subscription requires python >= 3.9"
+ with pytest.raises(TypeError, match=match):
+ np.ndarray[Any, Any]
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_arrayprint.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_arrayprint.py
new file mode 100644
index 00000000..f1883f70
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_arrayprint.py
@@ -0,0 +1,967 @@
+import sys
+import gc
+from hypothesis import given
+from hypothesis.extra import numpy as hynp
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
+ assert_raises_regex,
+ )
+import textwrap
+
+class TestArrayRepr:
+ def test_nan_inf(self):
+ x = np.array([np.nan, np.inf])
+ assert_equal(repr(x), 'array([nan, inf])')
+
+ def test_subclass(self):
+ class sub(np.ndarray): pass
+
+ # one dimensional
+ x1d = np.array([1, 2]).view(sub)
+ assert_equal(repr(x1d), 'sub([1, 2])')
+
+ # two dimensional
+ x2d = np.array([[1, 2], [3, 4]]).view(sub)
+ assert_equal(repr(x2d),
+ 'sub([[1, 2],\n'
+ ' [3, 4]])')
+
+ # two dimensional with flexible dtype
+ xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub)
+ assert_equal(repr(xstruct),
+ "sub([[(1,), (1,)],\n"
+ " [(1,), (1,)]], dtype=[('a', '<i4')])"
+ )
+
+ @pytest.mark.xfail(reason="See gh-10544")
+ def test_object_subclass(self):
+ class sub(np.ndarray):
+ def __new__(cls, inp):
+ obj = np.asarray(inp).view(cls)
+ return obj
+
+ def __getitem__(self, ind):
+ ret = super().__getitem__(ind)
+ return sub(ret)
+
+ # test that object + subclass is OK:
+ x = sub([None, None])
+ assert_equal(repr(x), 'sub([None, None], dtype=object)')
+ assert_equal(str(x), '[None None]')
+
+ x = sub([None, sub([None, None])])
+ assert_equal(repr(x),
+ 'sub([None, sub([None, None], dtype=object)], dtype=object)')
+ assert_equal(str(x), '[None sub([None, None], dtype=object)]')
+
+ def test_0d_object_subclass(self):
+ # make sure that subclasses which return 0ds instead
+ # of scalars don't cause infinite recursion in str
+ class sub(np.ndarray):
+ def __new__(cls, inp):
+ obj = np.asarray(inp).view(cls)
+ return obj
+
+ def __getitem__(self, ind):
+ ret = super().__getitem__(ind)
+ return sub(ret)
+
+ x = sub(1)
+ assert_equal(repr(x), 'sub(1)')
+ assert_equal(str(x), '1')
+
+ x = sub([1, 1])
+ assert_equal(repr(x), 'sub([1, 1])')
+ assert_equal(str(x), '[1 1]')
+
+ # check it works properly with object arrays too
+ x = sub(None)
+ assert_equal(repr(x), 'sub(None, dtype=object)')
+ assert_equal(str(x), 'None')
+
+ # plus recursive object arrays (even depth > 1)
+ y = sub(None)
+ x[()] = y
+ y[()] = x
+ assert_equal(repr(x),
+ 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
+ assert_equal(str(x), '...')
+ x[()] = 0 # resolve circular references for garbage collector
+
+ # nested 0d-subclass-object
+ x = sub(None)
+ x[()] = sub(None)
+ assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)')
+ assert_equal(str(x), 'None')
+
+ # gh-10663
+ class DuckCounter(np.ndarray):
+ def __getitem__(self, item):
+ result = super().__getitem__(item)
+ if not isinstance(result, DuckCounter):
+ result = result[...].view(DuckCounter)
+ return result
+
+ def to_string(self):
+ return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many')
+
+ def __str__(self):
+ if self.shape == ():
+ return self.to_string()
+ else:
+ fmt = {'all': lambda x: x.to_string()}
+ return np.array2string(self, formatter=fmt)
+
+ dc = np.arange(5).view(DuckCounter)
+ assert_equal(str(dc), "[zero one two many many]")
+ assert_equal(str(dc[0]), "zero")
+
+ def test_self_containing(self):
+ arr0d = np.array(None)
+ arr0d[()] = arr0d
+ assert_equal(repr(arr0d),
+ 'array(array(..., dtype=object), dtype=object)')
+ arr0d[()] = 0 # resolve recursion for garbage collector
+
+ arr1d = np.array([None, None])
+ arr1d[1] = arr1d
+ assert_equal(repr(arr1d),
+ 'array([None, array(..., dtype=object)], dtype=object)')
+ arr1d[1] = 0 # resolve recursion for garbage collector
+
+ first = np.array(None)
+ second = np.array(None)
+ first[()] = second
+ second[()] = first
+ assert_equal(repr(first),
+ 'array(array(array(..., dtype=object), dtype=object), dtype=object)')
+ first[()] = 0 # resolve circular references for garbage collector
+
+ def test_containing_list(self):
+ # printing square brackets directly would be ambiguuous
+ arr1d = np.array([None, None])
+ arr1d[0] = [1, 2]
+ arr1d[1] = [3]
+ assert_equal(repr(arr1d),
+ 'array([list([1, 2]), list([3])], dtype=object)')
+
+ def test_void_scalar_recursion(self):
+ # gh-9345
+ repr(np.void(b'test')) # RecursionError ?
+
+ def test_fieldless_structured(self):
+ # gh-10366
+ no_fields = np.dtype([])
+ arr_no_fields = np.empty(4, dtype=no_fields)
+ assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
+
+
+class TestComplexArray:
+ def test_str(self):
+ rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
+ cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
+ dtypes = [np.complex64, np.cdouble, np.clongdouble]
+ actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
+ wanted = [
+ '[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
+ '[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
+ '[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
+ '[0.+infj]', '[0.+infj]', '[0.+infj]',
+ '[0.-infj]', '[0.-infj]', '[0.-infj]',
+ '[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
+ '[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
+ '[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
+ '[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
+ '[1.+infj]', '[1.+infj]', '[1.+infj]',
+ '[1.-infj]', '[1.-infj]', '[1.-infj]',
+ '[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
+ '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
+ '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
+ '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
+ '[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
+ '[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
+ '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
+ '[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
+ '[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
+ '[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
+ '[inf+infj]', '[inf+infj]', '[inf+infj]',
+ '[inf-infj]', '[inf-infj]', '[inf-infj]',
+ '[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
+ '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
+ '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
+ '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
+ '[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
+ '[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
+ '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
+ '[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
+ '[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
+ '[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
+ '[nan+infj]', '[nan+infj]', '[nan+infj]',
+ '[nan-infj]', '[nan-infj]', '[nan-infj]',
+ '[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
+
+ for res, val in zip(actual, wanted):
+ assert_equal(res, val)
+
+class TestArray2String:
+ def test_basic(self):
+ """Basic test of array2string."""
+ a = np.arange(3)
+ assert_(np.array2string(a) == '[0 1 2]')
+ assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
+ assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
+
+ def test_unexpected_kwarg(self):
+ # ensure than an appropriate TypeError
+ # is raised when array2string receives
+ # an unexpected kwarg
+
+ with assert_raises_regex(TypeError, 'nonsense'):
+ np.array2string(np.array([1, 2, 3]),
+ nonsense=None)
+
+ def test_format_function(self):
+ """Test custom format function for each element in array."""
+ def _format_function(x):
+ if np.abs(x) < 1:
+ return '.'
+ elif np.abs(x) < 2:
+ return 'o'
+ else:
+ return 'O'
+
+ x = np.arange(3)
+ x_hex = "[0x0 0x1 0x2]"
+ x_oct = "[0o0 0o1 0o2]"
+ assert_(np.array2string(x, formatter={'all':_format_function}) ==
+ "[. o O]")
+ assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
+ "[. o O]")
+ assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
+ "[0.0000 1.0000 2.0000]")
+ assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
+ x_hex)
+ assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
+ x_oct)
+
+ x = np.arange(3.)
+ assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
+ "[0.00 1.00 2.00]")
+ assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
+ "[0.00 1.00 2.00]")
+
+ s = np.array(['abc', 'def'])
+ assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
+ '[abcabc defdef]')
+
+
+ def test_structure_format(self):
+ dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
+ assert_equal(np.array2string(x),
+ "[('Sarah', [8., 7.]) ('John', [6., 7.])]")
+
+ np.set_printoptions(legacy='1.13')
+ try:
+ # for issue #5692
+ A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
+ A[5:].fill(np.datetime64('NaT'))
+ assert_equal(
+ np.array2string(A),
+ textwrap.dedent("""\
+ [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
+ ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
+ ('NaT',) ('NaT',) ('NaT',)]""")
+ )
+ finally:
+ np.set_printoptions(legacy=False)
+
+ # same again, but with non-legacy behavior
+ assert_equal(
+ np.array2string(A),
+ textwrap.dedent("""\
+ [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
+ ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
+ ('1970-01-01T00:00:00',) ( 'NaT',)
+ ( 'NaT',) ( 'NaT',)
+ ( 'NaT',) ( 'NaT',)]""")
+ )
+
+ # and again, with timedeltas
+ A = np.full(10, 123456, dtype=[("A", "m8[s]")])
+ A[5:].fill(np.datetime64('NaT'))
+ assert_equal(
+ np.array2string(A),
+ textwrap.dedent("""\
+ [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
+ ( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
+ )
+
+ # See #8160
+ struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
+ assert_equal(np.array2string(struct_int),
+ "[([ 1, -1],) ([123, 1],)]")
+ struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
+ dtype=[('B', 'i4', (2, 2))])
+ assert_equal(np.array2string(struct_2dint),
+ "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
+
+ # See #8172
+ array_scalar = np.array(
+ (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
+ assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
+
+ def test_unstructured_void_repr(self):
+ a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
+ 27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
+ assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
+ assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
+ assert_equal(repr(a),
+ r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
+ r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
+
+ assert_equal(eval(repr(a), vars(np)), a)
+ assert_equal(eval(repr(a[0]), vars(np)), a[0])
+
+ def test_edgeitems_kwarg(self):
+ # previously the global print options would be taken over the kwarg
+ arr = np.zeros(3, int)
+ assert_equal(
+ np.array2string(arr, edgeitems=1, threshold=0),
+ "[0 ... 0]"
+ )
+
+ def test_summarize_1d(self):
+ A = np.arange(1001)
+ strA = '[ 0 1 2 ... 998 999 1000]'
+ assert_equal(str(A), strA)
+
+ reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
+ assert_equal(repr(A), reprA)
+
+ def test_summarize_2d(self):
+ A = np.arange(1002).reshape(2, 501)
+ strA = '[[ 0 1 2 ... 498 499 500]\n' \
+ ' [ 501 502 503 ... 999 1000 1001]]'
+ assert_equal(str(A), strA)
+
+ reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
+ ' [ 501, 502, 503, ..., 999, 1000, 1001]])'
+ assert_equal(repr(A), reprA)
+
+ def test_linewidth(self):
+ a = np.full(6, 1)
+
+ def make_str(a, width, **kw):
+ return np.array2string(a, separator="", max_line_width=width, **kw)
+
+ assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
+ assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
+ assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
+ ' 11]')
+
+ assert_equal(make_str(a, 8), '[111111]')
+ assert_equal(make_str(a, 7), '[11111\n'
+ ' 1]')
+ assert_equal(make_str(a, 5), '[111\n'
+ ' 111]')
+
+ b = a[None,None,:]
+
+ assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
+ assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
+ assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
+ ' 1]]]')
+
+ assert_equal(make_str(b, 12), '[[[111111]]]')
+ assert_equal(make_str(b, 9), '[[[111\n'
+ ' 111]]]')
+ assert_equal(make_str(b, 8), '[[[11\n'
+ ' 11\n'
+ ' 11]]]')
+
+ def test_wide_element(self):
+ a = np.array(['xxxxx'])
+ assert_equal(
+ np.array2string(a, max_line_width=5),
+ "['xxxxx']"
+ )
+ assert_equal(
+ np.array2string(a, max_line_width=5, legacy='1.13'),
+ "[ 'xxxxx']"
+ )
+
+ def test_multiline_repr(self):
+ class MultiLine:
+ def __repr__(self):
+ return "Line 1\nLine 2"
+
+ a = np.array([[None, MultiLine()], [MultiLine(), None]])
+
+ assert_equal(
+ np.array2string(a),
+ '[[None Line 1\n'
+ ' Line 2]\n'
+ ' [Line 1\n'
+ ' Line 2 None]]'
+ )
+ assert_equal(
+ np.array2string(a, max_line_width=5),
+ '[[None\n'
+ ' Line 1\n'
+ ' Line 2]\n'
+ ' [Line 1\n'
+ ' Line 2\n'
+ ' None]]'
+ )
+ assert_equal(
+ repr(a),
+ 'array([[None, Line 1\n'
+ ' Line 2],\n'
+ ' [Line 1\n'
+ ' Line 2, None]], dtype=object)'
+ )
+
+ class MultiLineLong:
+ def __repr__(self):
+ return "Line 1\nLooooooooooongestLine2\nLongerLine 3"
+
+ a = np.array([[None, MultiLineLong()], [MultiLineLong(), None]])
+ assert_equal(
+ repr(a),
+ 'array([[None, Line 1\n'
+ ' LooooooooooongestLine2\n'
+ ' LongerLine 3 ],\n'
+ ' [Line 1\n'
+ ' LooooooooooongestLine2\n'
+ ' LongerLine 3 , None]], dtype=object)'
+ )
+ assert_equal(
+ np.array_repr(a, 20),
+ 'array([[None,\n'
+ ' Line 1\n'
+ ' LooooooooooongestLine2\n'
+ ' LongerLine 3 ],\n'
+ ' [Line 1\n'
+ ' LooooooooooongestLine2\n'
+ ' LongerLine 3 ,\n'
+ ' None]],\n'
+ ' dtype=object)'
+ )
+
+ def test_nested_array_repr(self):
+ a = np.empty((2, 2), dtype=object)
+ a[0, 0] = np.eye(2)
+ a[0, 1] = np.eye(3)
+ a[1, 0] = None
+ a[1, 1] = np.ones((3, 1))
+ assert_equal(
+ repr(a),
+ 'array([[array([[1., 0.],\n'
+ ' [0., 1.]]), array([[1., 0., 0.],\n'
+ ' [0., 1., 0.],\n'
+ ' [0., 0., 1.]])],\n'
+ ' [None, array([[1.],\n'
+ ' [1.],\n'
+ ' [1.]])]], dtype=object)'
+ )
+
+ @given(hynp.from_dtype(np.dtype("U")))
+ def test_any_text(self, text):
+ # This test checks that, given any value that can be represented in an
+ # array of dtype("U") (i.e. unicode string), ...
+ a = np.array([text, text, text])
+ # casting a list of them to an array does not e.g. truncate the value
+ assert_equal(a[0], text)
+ # and that np.array2string puts a newline in the expected location
+ expected_repr = "[{0!r} {0!r}\n {0!r}]".format(text)
+ result = np.array2string(a, max_line_width=len(repr(text)) * 2 + 3)
+ assert_equal(result, expected_repr)
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_refcount(self):
+ # make sure we do not hold references to the array due to a recursive
+ # closure (gh-10620)
+ gc.disable()
+ a = np.arange(2)
+ r1 = sys.getrefcount(a)
+ np.array2string(a)
+ np.array2string(a)
+ r2 = sys.getrefcount(a)
+ gc.collect()
+ gc.enable()
+ assert_(r1 == r2)
+
+class TestPrintOptions:
+ """Test getting and setting global print options."""
+
+ def setup_method(self):
+ self.oldopts = np.get_printoptions()
+
+ def teardown_method(self):
+ np.set_printoptions(**self.oldopts)
+
+ def test_basic(self):
+ x = np.array([1.5, 0, 1.234567890])
+ assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
+ np.set_printoptions(precision=4)
+ assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
+
+ def test_precision_zero(self):
+ np.set_printoptions(precision=0)
+ for values, string in (
+ ([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
+ ([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
+ ([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
+ ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
+ x = np.array(values)
+ assert_equal(repr(x), "array([%s])" % string)
+
+ def test_formatter(self):
+ x = np.arange(3)
+ np.set_printoptions(formatter={'all':lambda x: str(x-1)})
+ assert_equal(repr(x), "array([-1, 0, 1])")
+
+ def test_formatter_reset(self):
+ x = np.arange(3)
+ np.set_printoptions(formatter={'all':lambda x: str(x-1)})
+ assert_equal(repr(x), "array([-1, 0, 1])")
+ np.set_printoptions(formatter={'int':None})
+ assert_equal(repr(x), "array([0, 1, 2])")
+
+ np.set_printoptions(formatter={'all':lambda x: str(x-1)})
+ assert_equal(repr(x), "array([-1, 0, 1])")
+ np.set_printoptions(formatter={'all':None})
+ assert_equal(repr(x), "array([0, 1, 2])")
+
+ np.set_printoptions(formatter={'int':lambda x: str(x-1)})
+ assert_equal(repr(x), "array([-1, 0, 1])")
+ np.set_printoptions(formatter={'int_kind':None})
+ assert_equal(repr(x), "array([0, 1, 2])")
+
+ x = np.arange(3.)
+ np.set_printoptions(formatter={'float':lambda x: str(x-1)})
+ assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
+ np.set_printoptions(formatter={'float_kind':None})
+ assert_equal(repr(x), "array([0., 1., 2.])")
+
+ def test_0d_arrays(self):
+ assert_equal(str(np.array('café', '<U4')), 'café')
+
+ assert_equal(repr(np.array('café', '<U4')),
+ "array('café', dtype='<U4')")
+ assert_equal(str(np.array('test', np.str_)), 'test')
+
+ a = np.zeros(1, dtype=[('a', '<i4', (3,))])
+ assert_equal(str(a[0]), '([0, 0, 0],)')
+
+ assert_equal(repr(np.datetime64('2005-02-25')[...]),
+ "array('2005-02-25', dtype='datetime64[D]')")
+
+ assert_equal(repr(np.timedelta64('10', 'Y')[...]),
+ "array(10, dtype='timedelta64[Y]')")
+
+ # repr of 0d arrays is affected by printoptions
+ x = np.array(1)
+ np.set_printoptions(formatter={'all':lambda x: "test"})
+ assert_equal(repr(x), "array(test)")
+ # str is unaffected
+ assert_equal(str(x), "1")
+
+ # check `style` arg raises
+ assert_warns(DeprecationWarning, np.array2string,
+ np.array(1.), style=repr)
+ # but not in legacy mode
+ np.array2string(np.array(1.), style=repr, legacy='1.13')
+ # gh-10934 style was broken in legacy mode, check it works
+ np.array2string(np.array(1.), legacy='1.13')
+
+ def test_float_spacing(self):
+ x = np.array([1., 2., 3.])
+ y = np.array([1., 2., -10.])
+ z = np.array([100., 2., -1.])
+ w = np.array([-100., 2., 1.])
+
+ assert_equal(repr(x), 'array([1., 2., 3.])')
+ assert_equal(repr(y), 'array([ 1., 2., -10.])')
+ assert_equal(repr(np.array(y[0])), 'array(1.)')
+ assert_equal(repr(np.array(y[-1])), 'array(-10.)')
+ assert_equal(repr(z), 'array([100., 2., -1.])')
+ assert_equal(repr(w), 'array([-100., 2., 1.])')
+
+ assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
+ assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
+
+ x = np.array([np.inf, 100000, 1.1234])
+ y = np.array([np.inf, 100000, -1.1234])
+ z = np.array([np.inf, 1.1234, -1e120])
+ np.set_printoptions(precision=2)
+ assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
+ assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
+ assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
+
+ def test_bool_spacing(self):
+ assert_equal(repr(np.array([True, True])),
+ 'array([ True, True])')
+ assert_equal(repr(np.array([True, False])),
+ 'array([ True, False])')
+ assert_equal(repr(np.array([True])),
+ 'array([ True])')
+ assert_equal(repr(np.array(True)),
+ 'array(True)')
+ assert_equal(repr(np.array(False)),
+ 'array(False)')
+
+ def test_sign_spacing(self):
+ a = np.arange(4.)
+ b = np.array([1.234e9])
+ c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
+
+ assert_equal(repr(a), 'array([0., 1., 2., 3.])')
+ assert_equal(repr(np.array(1.)), 'array(1.)')
+ assert_equal(repr(b), 'array([1.234e+09])')
+ assert_equal(repr(np.array([0.])), 'array([0.])')
+ assert_equal(repr(c),
+ "array([1. +1.j , 1.12345679+1.12345679j])")
+ assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
+
+ np.set_printoptions(sign=' ')
+ assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
+ assert_equal(repr(np.array(1.)), 'array( 1.)')
+ assert_equal(repr(b), 'array([ 1.234e+09])')
+ assert_equal(repr(c),
+ "array([ 1. +1.j , 1.12345679+1.12345679j])")
+ assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
+
+ np.set_printoptions(sign='+')
+ assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
+ assert_equal(repr(np.array(1.)), 'array(+1.)')
+ assert_equal(repr(b), 'array([+1.234e+09])')
+ assert_equal(repr(c),
+ "array([+1. +1.j , +1.12345679+1.12345679j])")
+
+ np.set_printoptions(legacy='1.13')
+ assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
+ assert_equal(repr(b), 'array([ 1.23400000e+09])')
+ assert_equal(repr(-b), 'array([ -1.23400000e+09])')
+ assert_equal(repr(np.array(1.)), 'array(1.0)')
+ assert_equal(repr(np.array([0.])), 'array([ 0.])')
+ assert_equal(repr(c),
+ "array([ 1.00000000+1.j , 1.12345679+1.12345679j])")
+ # gh-10383
+ assert_equal(str(np.array([-1., 10])), "[ -1. 10.]")
+
+ assert_raises(TypeError, np.set_printoptions, wrongarg=True)
+
+ def test_float_overflow_nowarn(self):
+ # make sure internal computations in FloatingFormat don't
+ # warn about overflow
+ repr(np.array([1e4, 0.1], dtype='f2'))
+
+ def test_sign_spacing_structured(self):
+ a = np.ones(2, dtype='<f,<f')
+ assert_equal(repr(a),
+ "array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
+ assert_equal(repr(a[0]), "(1., 1.)")
+
+ def test_floatmode(self):
+ x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244,
+ 0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16)
+ y = np.array([0.2918820979355541, 0.5064172631089138,
+ 0.2848750619642916, 0.4342965294660567,
+ 0.7326538397312751, 0.3459503329096204,
+ 0.0862072768214508, 0.39112753029631175],
+ dtype=np.float64)
+ z = np.arange(6, dtype=np.float16)/10
+ c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
+
+ # also make sure 1e23 is right (is between two fp numbers)
+ w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
+ # note: we construct w from the strings `1eXX` instead of doing
+ # `10.**arange(24)` because it turns out the two are not equivalent in
+ # python. On some architectures `1e23 != 10.**23`.
+ wp = np.array([1.234e1, 1e2, 1e123])
+
+ # unique mode
+ np.set_printoptions(floatmode='unique')
+ assert_equal(repr(x),
+ "array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
+ " 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
+ assert_equal(repr(y),
+ "array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n"
+ " 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n"
+ " 0.0862072768214508 , 0.39112753029631175])")
+ assert_equal(repr(z),
+ "array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
+ assert_equal(repr(w),
+ "array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n"
+ " 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n"
+ " 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
+ " 1.e+24])")
+ assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
+ assert_equal(repr(c),
+ "array([1. +1.j , 1.123456789+1.123456789j])")
+
+ # maxprec mode, precision=8
+ np.set_printoptions(floatmode='maxprec', precision=8)
+ assert_equal(repr(x),
+ "array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
+ " 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
+ assert_equal(repr(y),
+ "array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
+ " 0.34595033, 0.08620728, 0.39112753])")
+ assert_equal(repr(z),
+ "array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
+ assert_equal(repr(w[::5]),
+ "array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
+ assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
+ assert_equal(repr(c),
+ "array([1. +1.j , 1.12345679+1.12345679j])")
+
+ # fixed mode, precision=4
+ np.set_printoptions(floatmode='fixed', precision=4)
+ assert_equal(repr(x),
+ "array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n"
+ " 0.2383, 0.4226], dtype=float16)")
+ assert_equal(repr(y),
+ "array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])")
+ assert_equal(repr(z),
+ "array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)")
+ assert_equal(repr(w[::5]),
+ "array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
+ assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
+ assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
+ assert_equal(repr(c),
+ "array([1.0000+1.0000j, 1.1235+1.1235j])")
+ # for larger precision, representation error becomes more apparent:
+ np.set_printoptions(floatmode='fixed', precision=8)
+ assert_equal(repr(z),
+ "array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n"
+ " 0.50000000], dtype=float16)")
+
+ # maxprec_equal mode, precision=8
+ np.set_printoptions(floatmode='maxprec_equal', precision=8)
+ assert_equal(repr(x),
+ "array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n"
+ " 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)")
+ assert_equal(repr(y),
+ "array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
+ " 0.34595033, 0.08620728, 0.39112753])")
+ assert_equal(repr(z),
+ "array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
+ assert_equal(repr(w[::5]),
+ "array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
+ assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
+ assert_equal(repr(c),
+ "array([1.00000000+1.00000000j, 1.12345679+1.12345679j])")
+
+ # test unique special case (gh-18609)
+ a = np.float64.fromhex('-1p-97')
+ assert_equal(np.float64(np.array2string(a, floatmode='unique')), a)
+
+ def test_legacy_mode_scalars(self):
+ # in legacy mode, str of floats get truncated, and complex scalars
+ # use * for non-finite imaginary part
+ np.set_printoptions(legacy='1.13')
+ assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912')
+ assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)')
+
+ np.set_printoptions(legacy=False)
+ assert_equal(str(np.float64(1.123456789123456789)),
+ '1.1234567891234568')
+ assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)')
+
+ def test_legacy_stray_comma(self):
+ np.set_printoptions(legacy='1.13')
+ assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]')
+
+ np.set_printoptions(legacy=False)
+ assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]')
+
+ def test_dtype_linewidth_wrapping(self):
+ np.set_printoptions(linewidth=75)
+ assert_equal(repr(np.arange(10,20., dtype='f4')),
+ "array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)")
+ assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\
+ array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
+ dtype=float32)"""))
+
+ styp = '<U4'
+ assert_equal(repr(np.ones(3, dtype=styp)),
+ "array(['1', '1', '1'], dtype='{}')".format(styp))
+ assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
+ array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
+ dtype='{}')""".format(styp)))
+
+ def test_linewidth_repr(self):
+ a = np.full(7, fill_value=2)
+ np.set_printoptions(linewidth=17)
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([2, 2, 2,
+ 2, 2, 2,
+ 2])""")
+ )
+ np.set_printoptions(linewidth=17, legacy='1.13')
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([2, 2, 2,
+ 2, 2, 2, 2])""")
+ )
+
+ a = np.full(8, fill_value=2)
+
+ np.set_printoptions(linewidth=18, legacy=False)
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([2, 2, 2,
+ 2, 2, 2,
+ 2, 2])""")
+ )
+
+ np.set_printoptions(linewidth=18, legacy='1.13')
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([2, 2, 2, 2,
+ 2, 2, 2, 2])""")
+ )
+
+ def test_linewidth_str(self):
+ a = np.full(18, fill_value=2)
+ np.set_printoptions(linewidth=18)
+ assert_equal(
+ str(a),
+ textwrap.dedent("""\
+ [2 2 2 2 2 2 2 2
+ 2 2 2 2 2 2 2 2
+ 2 2]""")
+ )
+ np.set_printoptions(linewidth=18, legacy='1.13')
+ assert_equal(
+ str(a),
+ textwrap.dedent("""\
+ [2 2 2 2 2 2 2 2 2
+ 2 2 2 2 2 2 2 2 2]""")
+ )
+
+ def test_edgeitems(self):
+ np.set_printoptions(edgeitems=1, threshold=1)
+ a = np.arange(27).reshape((3, 3, 3))
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([[[ 0, ..., 2],
+ ...,
+ [ 6, ..., 8]],
+
+ ...,
+
+ [[18, ..., 20],
+ ...,
+ [24, ..., 26]]])""")
+ )
+
+ b = np.zeros((3, 3, 1, 1))
+ assert_equal(
+ repr(b),
+ textwrap.dedent("""\
+ array([[[[0.]],
+
+ ...,
+
+ [[0.]]],
+
+
+ ...,
+
+
+ [[[0.]],
+
+ ...,
+
+ [[0.]]]])""")
+ )
+
+ # 1.13 had extra trailing spaces, and was missing newlines
+ np.set_printoptions(legacy='1.13')
+
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ array([[[ 0, ..., 2],
+ ...,
+ [ 6, ..., 8]],
+
+ ...,
+ [[18, ..., 20],
+ ...,
+ [24, ..., 26]]])""")
+ )
+
+ assert_equal(
+ repr(b),
+ textwrap.dedent("""\
+ array([[[[ 0.]],
+
+ ...,
+ [[ 0.]]],
+
+
+ ...,
+ [[[ 0.]],
+
+ ...,
+ [[ 0.]]]])""")
+ )
+
+ def test_bad_args(self):
+ assert_raises(ValueError, np.set_printoptions, threshold=float('nan'))
+ assert_raises(TypeError, np.set_printoptions, threshold='1')
+ assert_raises(TypeError, np.set_printoptions, threshold=b'1')
+
+ assert_raises(TypeError, np.set_printoptions, precision='1')
+ assert_raises(TypeError, np.set_printoptions, precision=1.5)
+
+def test_unicode_object_array():
+ expected = "array(['é'], dtype=object)"
+ x = np.array(['\xe9'], dtype=object)
+ assert_equal(repr(x), expected)
+
+
+class TestContextManager:
+ def test_ctx_mgr(self):
+ # test that context manager actually works
+ with np.printoptions(precision=2):
+ s = str(np.array([2.0]) / 3)
+ assert_equal(s, '[0.67]')
+
+ def test_ctx_mgr_restores(self):
+ # test that print options are actually restrored
+ opts = np.get_printoptions()
+ with np.printoptions(precision=opts['precision'] - 1,
+ linewidth=opts['linewidth'] - 4):
+ pass
+ assert_equal(np.get_printoptions(), opts)
+
+ def test_ctx_mgr_exceptions(self):
+ # test that print options are restored even if an exception is raised
+ opts = np.get_printoptions()
+ try:
+ with np.printoptions(precision=2, linewidth=11):
+ raise ValueError
+ except ValueError:
+ pass
+ assert_equal(np.get_printoptions(), opts)
+
+ def test_ctx_mgr_as_smth(self):
+ opts = {"precision": 2}
+ with np.printoptions(**opts) as ctx:
+ saved_opts = ctx.copy()
+ assert_equal({k: saved_opts[k] for k in opts}, opts)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_casting_floatingpoint_errors.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_casting_floatingpoint_errors.py
new file mode 100644
index 00000000..d8318017
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_casting_floatingpoint_errors.py
@@ -0,0 +1,154 @@
+import pytest
+from pytest import param
+from numpy.testing import IS_WASM
+import numpy as np
+
+
+def values_and_dtypes():
+ """
+ Generate value+dtype pairs that generate floating point errors during
+ casts. The invalid casts to integers will generate "invalid" value
+ warnings, the float casts all generate "overflow".
+
+ (The Python int/float paths don't need to get tested in all the same
+ situations, but it does not hurt.)
+ """
+ # Casting to float16:
+ yield param(70000, "float16", id="int-to-f2")
+ yield param("70000", "float16", id="str-to-f2")
+ yield param(70000.0, "float16", id="float-to-f2")
+ yield param(np.longdouble(70000.), "float16", id="longdouble-to-f2")
+ yield param(np.float64(70000.), "float16", id="double-to-f2")
+ yield param(np.float32(70000.), "float16", id="float-to-f2")
+ # Casting to float32:
+ yield param(10**100, "float32", id="int-to-f4")
+ yield param(1e100, "float32", id="float-to-f2")
+ yield param(np.longdouble(1e300), "float32", id="longdouble-to-f2")
+ yield param(np.float64(1e300), "float32", id="double-to-f2")
+ # Casting to float64:
+ # If longdouble is double-double, its max can be rounded down to the double
+ # max. So we correct the double spacing (a bit weird, admittedly):
+ max_ld = np.finfo(np.longdouble).max
+ spacing = np.spacing(np.nextafter(np.finfo("f8").max, 0))
+ if max_ld - spacing > np.finfo("f8").max:
+ yield param(np.finfo(np.longdouble).max, "float64",
+ id="longdouble-to-f8")
+
+ # Cast to complex32:
+ yield param(2e300, "complex64", id="float-to-c8")
+ yield param(2e300+0j, "complex64", id="complex-to-c8")
+ yield param(2e300j, "complex64", id="complex-to-c8")
+ yield param(np.longdouble(2e300), "complex64", id="longdouble-to-c8")
+
+ # Invalid float to integer casts:
+ with np.errstate(over="ignore"):
+ for to_dt in np.typecodes["AllInteger"]:
+ for value in [np.inf, np.nan]:
+ for from_dt in np.typecodes["AllFloat"]:
+ from_dt = np.dtype(from_dt)
+ from_val = from_dt.type(value)
+
+ yield param(from_val, to_dt, id=f"{from_val}-to-{to_dt}")
+
+
+def check_operations(dtype, value):
+ """
+ There are many dedicated paths in NumPy which cast and should check for
+ floating point errors which occurred during those casts.
+ """
+ if dtype.kind != 'i':
+ # These assignments use the stricter setitem logic:
+ def assignment():
+ arr = np.empty(3, dtype=dtype)
+ arr[0] = value
+
+ yield assignment
+
+ def fill():
+ arr = np.empty(3, dtype=dtype)
+ arr.fill(value)
+
+ yield fill
+
+ def copyto_scalar():
+ arr = np.empty(3, dtype=dtype)
+ np.copyto(arr, value, casting="unsafe")
+
+ yield copyto_scalar
+
+ def copyto():
+ arr = np.empty(3, dtype=dtype)
+ np.copyto(arr, np.array([value, value, value]), casting="unsafe")
+
+ yield copyto
+
+ def copyto_scalar_masked():
+ arr = np.empty(3, dtype=dtype)
+ np.copyto(arr, value, casting="unsafe",
+ where=[True, False, True])
+
+ yield copyto_scalar_masked
+
+ def copyto_masked():
+ arr = np.empty(3, dtype=dtype)
+ np.copyto(arr, np.array([value, value, value]), casting="unsafe",
+ where=[True, False, True])
+
+ yield copyto_masked
+
+ def direct_cast():
+ np.array([value, value, value]).astype(dtype)
+
+ yield direct_cast
+
+ def direct_cast_nd_strided():
+ arr = np.full((5, 5, 5), fill_value=value)[:, ::2, :]
+ arr.astype(dtype)
+
+ yield direct_cast_nd_strided
+
+ def boolean_array_assignment():
+ arr = np.empty(3, dtype=dtype)
+ arr[[True, False, True]] = np.array([value, value])
+
+ yield boolean_array_assignment
+
+ def integer_array_assignment():
+ arr = np.empty(3, dtype=dtype)
+ values = np.array([value, value])
+
+ arr[[0, 1]] = values
+
+ yield integer_array_assignment
+
+ def integer_array_assignment_with_subspace():
+ arr = np.empty((5, 3), dtype=dtype)
+ values = np.array([value, value, value])
+
+ arr[[0, 2]] = values
+
+ yield integer_array_assignment_with_subspace
+
+ def flat_assignment():
+ arr = np.empty((3,), dtype=dtype)
+ values = np.array([value, value, value])
+ arr.flat[:] = values
+
+ yield flat_assignment
+
+@pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
+@pytest.mark.parametrize(["value", "dtype"], values_and_dtypes())
+@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
+def test_floatingpoint_errors_casting(dtype, value):
+ dtype = np.dtype(dtype)
+ for operation in check_operations(dtype, value):
+ dtype = np.dtype(dtype)
+
+ match = "invalid" if dtype.kind in 'iu' else "overflow"
+ with pytest.warns(RuntimeWarning, match=match):
+ operation()
+
+ with np.errstate(all="raise"):
+ with pytest.raises(FloatingPointError, match=match):
+ operation()
+
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_casting_unittests.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_casting_unittests.py
new file mode 100644
index 00000000..a49d876d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_casting_unittests.py
@@ -0,0 +1,819 @@
+"""
+The tests exercise the casting machinery in a more low-level manner.
+The reason is mostly to test a new implementation of the casting machinery.
+
+Unlike most tests in NumPy, these are closer to unit-tests rather
+than integration tests.
+"""
+
+import pytest
+import textwrap
+import enum
+import random
+import ctypes
+
+import numpy as np
+from numpy.lib.stride_tricks import as_strided
+
+from numpy.testing import assert_array_equal
+from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
+
+
+# Simple skips object, parametric and long double (unsupported by struct)
+simple_dtypes = "?bhilqBHILQefdFD"
+if np.dtype("l").itemsize != np.dtype("q").itemsize:
+ # Remove l and L, the table was generated with 64bit linux in mind.
+ simple_dtypes = simple_dtypes.replace("l", "").replace("L", "")
+simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes]
+
+
+def simple_dtype_instances():
+ for dtype_class in simple_dtypes:
+ dt = dtype_class()
+ yield pytest.param(dt, id=str(dt))
+ if dt.byteorder != "|":
+ dt = dt.newbyteorder()
+ yield pytest.param(dt, id=str(dt))
+
+
+def get_expected_stringlength(dtype):
+ """Returns the string length when casting the basic dtypes to strings.
+ """
+ if dtype == np.bool_:
+ return 5
+ if dtype.kind in "iu":
+ if dtype.itemsize == 1:
+ length = 3
+ elif dtype.itemsize == 2:
+ length = 5
+ elif dtype.itemsize == 4:
+ length = 10
+ elif dtype.itemsize == 8:
+ length = 20
+ else:
+ raise AssertionError(f"did not find expected length for {dtype}")
+
+ if dtype.kind == "i":
+ length += 1 # adds one character for the sign
+
+ return length
+
+ # Note: Can't do dtype comparison for longdouble on windows
+ if dtype.char == "g":
+ return 48
+ elif dtype.char == "G":
+ return 48 * 2
+ elif dtype.kind == "f":
+ return 32 # also for half apparently.
+ elif dtype.kind == "c":
+ return 32 * 2
+
+ raise AssertionError(f"did not find expected length for {dtype}")
+
+
+class Casting(enum.IntEnum):
+ no = 0
+ equiv = 1
+ safe = 2
+ same_kind = 3
+ unsafe = 4
+
+
+def _get_cancast_table():
+ table = textwrap.dedent("""
+ X ? b h i l q B H I L Q e f d g F D G S U V O M m
+ ? # = = = = = = = = = = = = = = = = = = = = = . =
+ b . # = = = = . . . . . = = = = = = = = = = = . =
+ h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
+ i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
+ l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
+ q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
+ B . ~ = = = = # = = = = = = = = = = = = = = = . =
+ H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . =
+ I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . =
+ L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
+ Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
+ e . . . . . . . . . . . # = = = = = = = = = = . .
+ f . . . . . . . . . . . ~ # = = = = = = = = = . .
+ d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . .
+ g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . .
+ F . . . . . . . . . . . . . . . # = = = = = = . .
+ D . . . . . . . . . . . . . . . ~ # = = = = = . .
+ G . . . . . . . . . . . . . . . ~ ~ # = = = = . .
+ S . . . . . . . . . . . . . . . . . . # = = = . .
+ U . . . . . . . . . . . . . . . . . . . # = = . .
+ V . . . . . . . . . . . . . . . . . . . . # = . .
+ O . . . . . . . . . . . . . . . . . . . . = # . .
+ M . . . . . . . . . . . . . . . . . . . . = = # .
+ m . . . . . . . . . . . . . . . . . . . . = = . #
+ """).strip().split("\n")
+ dtypes = [type(np.dtype(c)) for c in table[0][2::2]]
+
+ convert_cast = {".": Casting.unsafe, "~": Casting.same_kind,
+ "=": Casting.safe, "#": Casting.equiv,
+ " ": -1}
+
+ cancast = {}
+ for from_dt, row in zip(dtypes, table[1:]):
+ cancast[from_dt] = {}
+ for to_dt, c in zip(dtypes, row[2::2]):
+ cancast[from_dt][to_dt] = convert_cast[c]
+
+ return cancast
+
+CAST_TABLE = _get_cancast_table()
+
+
+class TestChanges:
+ """
+ These test cases exercise some behaviour changes
+ """
+ @pytest.mark.parametrize("string", ["S", "U"])
+ @pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
+ def test_float_to_string(self, floating, string):
+ assert np.can_cast(floating, string)
+ # 100 is long enough to hold any formatted floating
+ assert np.can_cast(floating, f"{string}100")
+
+ def test_to_void(self):
+ # But in general, we do consider these safe:
+ assert np.can_cast("d", "V")
+ assert np.can_cast("S20", "V")
+
+ # Do not consider it a safe cast if the void is too smaller:
+ assert not np.can_cast("d", "V1")
+ assert not np.can_cast("S20", "V1")
+ assert not np.can_cast("U1", "V1")
+ # Structured to unstructured is just like any other:
+ assert np.can_cast("d,i", "V", casting="same_kind")
+ # Unstructured void to unstructured is actually no cast at all:
+ assert np.can_cast("V3", "V", casting="no")
+ assert np.can_cast("V0", "V", casting="no")
+
+
+class TestCasting:
+ size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
+
+ def get_data(self, dtype1, dtype2):
+ if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:
+ length = self.size // dtype1.itemsize
+ else:
+ length = self.size // dtype2.itemsize
+
+ # Assume that the base array is well enough aligned for all inputs.
+ arr1 = np.empty(length, dtype=dtype1)
+ assert arr1.flags.c_contiguous
+ assert arr1.flags.aligned
+
+ values = [random.randrange(-128, 128) for _ in range(length)]
+
+ for i, value in enumerate(values):
+ # Use item assignment to ensure this is not using casting:
+ if value < 0 and dtype1.kind == "u":
+ # Manually rollover unsigned integers (-1 -> int.max)
+ value = value + np.iinfo(dtype1).max + 1
+ arr1[i] = value
+
+ if dtype2 is None:
+ if dtype1.char == "?":
+ values = [bool(v) for v in values]
+ return arr1, values
+
+ if dtype2.char == "?":
+ values = [bool(v) for v in values]
+
+ arr2 = np.empty(length, dtype=dtype2)
+ assert arr2.flags.c_contiguous
+ assert arr2.flags.aligned
+
+ for i, value in enumerate(values):
+ # Use item assignment to ensure this is not using casting:
+ if value < 0 and dtype2.kind == "u":
+ # Manually rollover unsigned integers (-1 -> int.max)
+ value = value + np.iinfo(dtype2).max + 1
+ arr2[i] = value
+
+ return arr1, arr2, values
+
+ def get_data_variation(self, arr1, arr2, aligned=True, contig=True):
+ """
+ Returns a copy of arr1 that may be non-contiguous or unaligned, and a
+ matching array for arr2 (although not a copy).
+ """
+ if contig:
+ stride1 = arr1.dtype.itemsize
+ stride2 = arr2.dtype.itemsize
+ elif aligned:
+ stride1 = 2 * arr1.dtype.itemsize
+ stride2 = 2 * arr2.dtype.itemsize
+ else:
+ stride1 = arr1.dtype.itemsize + 1
+ stride2 = arr2.dtype.itemsize + 1
+
+ max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1
+ max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1
+ from_bytes = np.zeros(max_size1, dtype=np.uint8)
+ to_bytes = np.zeros(max_size2, dtype=np.uint8)
+
+ # Sanity check that the above is large enough:
+ assert stride1 * len(arr1) <= from_bytes.nbytes
+ assert stride2 * len(arr2) <= to_bytes.nbytes
+
+ if aligned:
+ new1 = as_strided(from_bytes[:-1].view(arr1.dtype),
+ arr1.shape, (stride1,))
+ new2 = as_strided(to_bytes[:-1].view(arr2.dtype),
+ arr2.shape, (stride2,))
+ else:
+ new1 = as_strided(from_bytes[1:].view(arr1.dtype),
+ arr1.shape, (stride1,))
+ new2 = as_strided(to_bytes[1:].view(arr2.dtype),
+ arr2.shape, (stride2,))
+
+ new1[...] = arr1
+
+ if not contig:
+ # Ensure we did not overwrite bytes that should not be written:
+ offset = arr1.dtype.itemsize if aligned else 0
+ buf = from_bytes[offset::stride1].tobytes()
+ assert buf.count(b"\0") == len(buf)
+
+ if contig:
+ assert new1.flags.c_contiguous
+ assert new2.flags.c_contiguous
+ else:
+ assert not new1.flags.c_contiguous
+ assert not new2.flags.c_contiguous
+
+ if aligned:
+ assert new1.flags.aligned
+ assert new2.flags.aligned
+ else:
+ assert not new1.flags.aligned or new1.dtype.alignment == 1
+ assert not new2.flags.aligned or new2.dtype.alignment == 1
+
+ return new1, new2
+
+ @pytest.mark.parametrize("from_Dt", simple_dtypes)
+ def test_simple_cancast(self, from_Dt):
+ for to_Dt in simple_dtypes:
+ cast = get_castingimpl(from_Dt, to_Dt)
+
+ for from_dt in [from_Dt(), from_Dt().newbyteorder()]:
+ default = cast._resolve_descriptors((from_dt, None))[1][1]
+ assert default == to_Dt()
+ del default
+
+ for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
+ casting, (from_res, to_res), view_off = (
+ cast._resolve_descriptors((from_dt, to_dt)))
+ assert(type(from_res) == from_Dt)
+ assert(type(to_res) == to_Dt)
+ if view_off is not None:
+ # If a view is acceptable, this is "no" casting
+ # and byte order must be matching.
+ assert casting == Casting.no
+ # The above table lists this as "equivalent"
+ assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]
+ # Note that to_res may not be the same as from_dt
+ assert from_res.isnative == to_res.isnative
+ else:
+ if from_Dt == to_Dt:
+ # Note that to_res may not be the same as from_dt
+ assert from_res.isnative != to_res.isnative
+ assert casting == CAST_TABLE[from_Dt][to_Dt]
+
+ if from_Dt is to_Dt:
+ assert(from_dt is from_res)
+ assert(to_dt is to_res)
+
+
+ @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
+ @pytest.mark.parametrize("from_dt", simple_dtype_instances())
+ def test_simple_direct_casts(self, from_dt):
+ """
+ This test checks numeric direct casts for dtypes supported also by the
+ struct module (plus complex). It tries to be test a wide range of
+ inputs, but skips over possibly undefined behaviour (e.g. int rollover).
+ Longdouble and CLongdouble are tested, but only using double precision.
+
+ If this test creates issues, it should possibly just be simplified
+ or even removed (checking whether unaligned/non-contiguous casts give
+ the same results is useful, though).
+ """
+ for to_dt in simple_dtype_instances():
+ to_dt = to_dt.values[0]
+ cast = get_castingimpl(type(from_dt), type(to_dt))
+
+ casting, (from_res, to_res), view_off = cast._resolve_descriptors(
+ (from_dt, to_dt))
+
+ if from_res is not from_dt or to_res is not to_dt:
+ # Do not test this case, it is handled in multiple steps,
+ # each of which should is tested individually.
+ return
+
+ safe = casting <= Casting.safe
+ del from_res, to_res, casting
+
+ arr1, arr2, values = self.get_data(from_dt, to_dt)
+
+ cast._simple_strided_call((arr1, arr2))
+
+ # Check via python list
+ assert arr2.tolist() == values
+
+ # Check that the same results are achieved for strided loops
+ arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
+ cast._simple_strided_call((arr1_o, arr2_o))
+
+ assert_array_equal(arr2_o, arr2)
+ assert arr2_o.tobytes() == arr2.tobytes()
+
+ # Check if alignment makes a difference, but only if supported
+ # and only if the alignment can be wrong
+ if ((from_dt.alignment == 1 and to_dt.alignment == 1) or
+ not cast._supports_unaligned):
+ return
+
+ arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)
+ cast._simple_strided_call((arr1_o, arr2_o))
+
+ assert_array_equal(arr2_o, arr2)
+ assert arr2_o.tobytes() == arr2.tobytes()
+
+ arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)
+ cast._simple_strided_call((arr1_o, arr2_o))
+
+ assert_array_equal(arr2_o, arr2)
+ assert arr2_o.tobytes() == arr2.tobytes()
+
+ del arr1_o, arr2_o, cast
+
+ @pytest.mark.parametrize("from_Dt", simple_dtypes)
+ def test_numeric_to_times(self, from_Dt):
+ # We currently only implement contiguous loops, so only need to
+ # test those.
+ from_dt = from_Dt()
+
+ time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"),
+ np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")]
+ for time_dt in time_dtypes:
+ cast = get_castingimpl(type(from_dt), type(time_dt))
+
+ casting, (from_res, to_res), view_off = cast._resolve_descriptors(
+ (from_dt, time_dt))
+
+ assert from_res is from_dt
+ assert to_res is time_dt
+ del from_res, to_res
+
+ assert casting & CAST_TABLE[from_Dt][type(time_dt)]
+ assert view_off is None
+
+ int64_dt = np.dtype(np.int64)
+ arr1, arr2, values = self.get_data(from_dt, int64_dt)
+ arr2 = arr2.view(time_dt)
+ arr2[...] = np.datetime64("NaT")
+
+ if time_dt == np.dtype("M8"):
+ # This is a bit of a strange path, and could probably be removed
+ arr1[-1] = 0 # ensure at least one value is not NaT
+
+ # The cast currently succeeds, but the values are invalid:
+ cast._simple_strided_call((arr1, arr2))
+ with pytest.raises(ValueError):
+ str(arr2[-1]) # e.g. conversion to string fails
+ return
+
+ cast._simple_strided_call((arr1, arr2))
+
+ assert [int(v) for v in arr2.tolist()] == values
+
+ # Check that the same results are achieved for strided loops
+ arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
+ cast._simple_strided_call((arr1_o, arr2_o))
+
+ assert_array_equal(arr2_o, arr2)
+ assert arr2_o.tobytes() == arr2.tobytes()
+
+ @pytest.mark.parametrize(
+ ["from_dt", "to_dt", "expected_casting", "expected_view_off",
+ "nom", "denom"],
+ [("M8[ns]", None, Casting.no, 0, 1, 1),
+ (str(np.dtype("M8[ns]").newbyteorder()), None,
+ Casting.equiv, None, 1, 1),
+ ("M8", "M8[ms]", Casting.safe, 0, 1, 1),
+ # should be invalid cast:
+ ("M8[ms]", "M8", Casting.unsafe, None, 1, 1),
+ ("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1),
+ ("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6),
+ ("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1),
+ ("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7),
+ ("M8[4D]", "M8[1M]", Casting.same_kind, None, None,
+ # give full values based on NumPy 1.19.x
+ [-2**63, 0, -1, 1314, -1315, 564442610]),
+ ("m8[ns]", None, Casting.no, 0, 1, 1),
+ (str(np.dtype("m8[ns]").newbyteorder()), None,
+ Casting.equiv, None, 1, 1),
+ ("m8", "m8[ms]", Casting.safe, 0, 1, 1),
+ # should be invalid cast:
+ ("m8[ms]", "m8", Casting.unsafe, None, 1, 1),
+ ("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1),
+ ("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6),
+ ("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1),
+ ("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7),
+ ("m8[4D]", "m8[1M]", Casting.unsafe, None, None,
+ # give full values based on NumPy 1.19.x
+ [-2**63, 0, 0, 1314, -1315, 564442610])])
+ def test_time_to_time(self, from_dt, to_dt,
+ expected_casting, expected_view_off,
+ nom, denom):
+ from_dt = np.dtype(from_dt)
+ if to_dt is not None:
+ to_dt = np.dtype(to_dt)
+
+ # Test a few values for casting (results generated with NumPy 1.19)
+ values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])
+ values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder))
+ assert values.dtype.byteorder == from_dt.byteorder
+ assert np.isnat(values.view(from_dt)[0])
+
+ DType = type(from_dt)
+ cast = get_castingimpl(DType, DType)
+ casting, (from_res, to_res), view_off = cast._resolve_descriptors(
+ (from_dt, to_dt))
+ assert from_res is from_dt
+ assert to_res is to_dt or to_dt is None
+ assert casting == expected_casting
+ assert view_off == expected_view_off
+
+ if nom is not None:
+ expected_out = (values * nom // denom).view(to_res)
+ expected_out[0] = "NaT"
+ else:
+ expected_out = np.empty_like(values)
+ expected_out[...] = denom
+ expected_out = expected_out.view(to_dt)
+
+ orig_arr = values.view(from_dt)
+ orig_out = np.empty_like(expected_out)
+
+ if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"):
+ # Casting from non-generic to generic units is an error and should
+ # probably be reported as an invalid cast earlier.
+ with pytest.raises(ValueError):
+ cast._simple_strided_call((orig_arr, orig_out))
+ return
+
+ for aligned in [True, True]:
+ for contig in [True, True]:
+ arr, out = self.get_data_variation(
+ orig_arr, orig_out, aligned, contig)
+ out[...] = 0
+ cast._simple_strided_call((arr, out))
+ assert_array_equal(out.view("int64"), expected_out.view("int64"))
+
+ def string_with_modified_length(self, dtype, change_length):
+ fact = 1 if dtype.char == "S" else 4
+ length = dtype.itemsize // fact + change_length
+ return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
+
+ @pytest.mark.parametrize("other_DT", simple_dtypes)
+ @pytest.mark.parametrize("string_char", ["S", "U"])
+ def test_string_cancast(self, other_DT, string_char):
+ fact = 1 if string_char == "S" else 4
+
+ string_DT = type(np.dtype(string_char))
+ cast = get_castingimpl(other_DT, string_DT)
+
+ other_dt = other_DT()
+ expected_length = get_expected_stringlength(other_dt)
+ string_dt = np.dtype(f"{string_char}{expected_length}")
+
+ safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
+ (other_dt, None))
+ assert res_dt.itemsize == expected_length * fact
+ assert safety == Casting.safe # we consider to string casts "safe"
+ assert view_off is None
+ assert isinstance(res_dt, string_DT)
+
+ # These casts currently implement changing the string length, so
+ # check the cast-safety for too long/fixed string lengths:
+ for change_length in [-1, 0, 1]:
+ if change_length >= 0:
+ expected_safety = Casting.safe
+ else:
+ expected_safety = Casting.same_kind
+
+ to_dt = self.string_with_modified_length(string_dt, change_length)
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
+ (other_dt, to_dt))
+ assert res_dt is to_dt
+ assert safety == expected_safety
+ assert view_off is None
+
+ # The opposite direction is always considered unsafe:
+ cast = get_castingimpl(string_DT, other_DT)
+
+ safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt))
+ assert safety == Casting.unsafe
+ assert view_off is None
+
+ cast = get_castingimpl(string_DT, other_DT)
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
+ (string_dt, None))
+ assert safety == Casting.unsafe
+ assert view_off is None
+ assert other_dt is res_dt # returns the singleton for simple dtypes
+
+ @pytest.mark.parametrize("string_char", ["S", "U"])
+ @pytest.mark.parametrize("other_dt", simple_dtype_instances())
+ def test_simple_string_casts_roundtrip(self, other_dt, string_char):
+ """
+ Tests casts from and to string by checking the roundtripping property.
+
+ The test also covers some string to string casts (but not all).
+
+ If this test creates issues, it should possibly just be simplified
+ or even removed (checking whether unaligned/non-contiguous casts give
+ the same results is useful, though).
+ """
+ string_DT = type(np.dtype(string_char))
+
+ cast = get_castingimpl(type(other_dt), string_DT)
+ cast_back = get_castingimpl(string_DT, type(other_dt))
+ _, (res_other_dt, string_dt), _ = cast._resolve_descriptors(
+ (other_dt, None))
+
+ if res_other_dt is not other_dt:
+ # do not support non-native byteorder, skip test in that case
+ assert other_dt.byteorder != res_other_dt.byteorder
+ return
+
+ orig_arr, values = self.get_data(other_dt, None)
+ str_arr = np.zeros(len(orig_arr), dtype=string_dt)
+ string_dt_short = self.string_with_modified_length(string_dt, -1)
+ str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
+ string_dt_long = self.string_with_modified_length(string_dt, 1)
+ str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
+
+ assert not cast._supports_unaligned # if support is added, should test
+ assert not cast_back._supports_unaligned
+
+ for contig in [True, False]:
+ other_arr, str_arr = self.get_data_variation(
+ orig_arr, str_arr, True, contig)
+ _, str_arr_short = self.get_data_variation(
+ orig_arr, str_arr_short.copy(), True, contig)
+ _, str_arr_long = self.get_data_variation(
+ orig_arr, str_arr_long, True, contig)
+
+ cast._simple_strided_call((other_arr, str_arr))
+
+ cast._simple_strided_call((other_arr, str_arr_short))
+ assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)
+
+ cast._simple_strided_call((other_arr, str_arr_long))
+ assert_array_equal(str_arr, str_arr_long)
+
+ if other_dt.kind == "b":
+ # Booleans do not roundtrip
+ continue
+
+ other_arr[...] = 0
+ cast_back._simple_strided_call((str_arr, other_arr))
+ assert_array_equal(orig_arr, other_arr)
+
+ other_arr[...] = 0
+ cast_back._simple_strided_call((str_arr_long, other_arr))
+ assert_array_equal(orig_arr, other_arr)
+
+ @pytest.mark.parametrize("other_dt", ["S8", "<U8", ">U8"])
+ @pytest.mark.parametrize("string_char", ["S", "U"])
+ def test_string_to_string_cancast(self, other_dt, string_char):
+ other_dt = np.dtype(other_dt)
+
+ fact = 1 if string_char == "S" else 4
+ div = 1 if other_dt.char == "S" else 4
+
+ string_DT = type(np.dtype(string_char))
+ cast = get_castingimpl(type(other_dt), string_DT)
+
+ expected_length = other_dt.itemsize // div
+ string_dt = np.dtype(f"{string_char}{expected_length}")
+
+ safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
+ (other_dt, None))
+ assert res_dt.itemsize == expected_length * fact
+ assert isinstance(res_dt, string_DT)
+
+ expected_view_off = None
+ if other_dt.char == string_char:
+ if other_dt.isnative:
+ expected_safety = Casting.no
+ expected_view_off = 0
+ else:
+ expected_safety = Casting.equiv
+ elif string_char == "U":
+ expected_safety = Casting.safe
+ else:
+ expected_safety = Casting.unsafe
+
+ assert view_off == expected_view_off
+ assert expected_safety == safety
+
+ for change_length in [-1, 0, 1]:
+ to_dt = self.string_with_modified_length(string_dt, change_length)
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
+ (other_dt, to_dt))
+
+ assert res_dt is to_dt
+ if change_length <= 0:
+ assert view_off == expected_view_off
+ else:
+ assert view_off is None
+ if expected_safety == Casting.unsafe:
+ assert safety == expected_safety
+ elif change_length < 0:
+ assert safety == Casting.same_kind
+ elif change_length == 0:
+ assert safety == expected_safety
+ elif change_length > 0:
+ assert safety == Casting.safe
+
+ @pytest.mark.parametrize("order1", [">", "<"])
+ @pytest.mark.parametrize("order2", [">", "<"])
+ def test_unicode_byteswapped_cast(self, order1, order2):
+ # Very specific tests (not using the castingimpl directly)
+ # that tests unicode bytedwaps including for unaligned array data.
+ dtype1 = np.dtype(f"{order1}U30")
+ dtype2 = np.dtype(f"{order2}U30")
+ data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)
+ data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)
+ if dtype1.alignment != 1:
+ # alignment should always be >1, but skip the check if not
+ assert not data1.flags.aligned
+ assert not data2.flags.aligned
+
+ element = "this is a ünicode string‽"
+ data1[()] = element
+ # Test both `data1` and `data1.copy()` (which should be aligned)
+ for data in [data1, data1.copy()]:
+ data2[...] = data1
+ assert data2[()] == element
+ assert data2.copy()[()] == element
+
+ def test_void_to_string_special_case(self):
+ # Cover a small special case in void to string casting that could
+ # probably just as well be turned into an error (compare
+ # `test_object_to_parametric_internal_error` below).
+ assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5
+ assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5
+
+ def test_object_to_parametric_internal_error(self):
+ # We reject casting from object to a parametric type, without
+ # figuring out the correct instance first.
+ object_dtype = type(np.dtype(object))
+ other_dtype = type(np.dtype(str))
+ cast = get_castingimpl(object_dtype, other_dtype)
+ with pytest.raises(TypeError,
+ match="casting from object to the parametric DType"):
+ cast._resolve_descriptors((np.dtype("O"), None))
+
+ @pytest.mark.parametrize("dtype", simple_dtype_instances())
+ def test_object_and_simple_resolution(self, dtype):
+ # Simple test to exercise the cast when no instance is specified
+ object_dtype = type(np.dtype(object))
+ cast = get_castingimpl(object_dtype, type(dtype))
+
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
+ (np.dtype("O"), dtype))
+ assert safety == Casting.unsafe
+ assert view_off is None
+ assert res_dt is dtype
+
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
+ (np.dtype("O"), None))
+ assert safety == Casting.unsafe
+ assert view_off is None
+ assert res_dt == dtype.newbyteorder("=")
+
+ @pytest.mark.parametrize("dtype", simple_dtype_instances())
+ def test_simple_to_object_resolution(self, dtype):
+ # Simple test to exercise the cast when no instance is specified
+ object_dtype = type(np.dtype(object))
+ cast = get_castingimpl(type(dtype), object_dtype)
+
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
+ (dtype, None))
+ assert safety == Casting.safe
+ assert view_off is None
+ assert res_dt is np.dtype("O")
+
+ @pytest.mark.parametrize("casting", ["no", "unsafe"])
+ def test_void_and_structured_with_subarray(self, casting):
+ # test case corresponding to gh-19325
+ dtype = np.dtype([("foo", "<f4", (3, 2))])
+ expected = casting == "unsafe"
+ assert np.can_cast("V4", dtype, casting=casting) == expected
+ assert np.can_cast(dtype, "V4", casting=casting) == expected
+
+ @pytest.mark.parametrize(["to_dt", "expected_off"],
+ [ # Same as `from_dt` but with both fields shifted:
+ (np.dtype({"names": ["a", "b"], "formats": ["i4", "f4"],
+ "offsets": [0, 4]}), 2),
+ # Additional change of the names
+ (np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
+ "offsets": [0, 4]}), 2),
+ # Incompatible field offset change
+ (np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
+ "offsets": [0, 6]}), None)])
+ def test_structured_field_offsets(self, to_dt, expected_off):
+ # This checks the cast-safety and view offset for swapped and "shifted"
+ # fields which are viewable
+ from_dt = np.dtype({"names": ["a", "b"],
+ "formats": ["i4", "f4"],
+ "offsets": [2, 6]})
+ cast = get_castingimpl(type(from_dt), type(to_dt))
+ safety, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
+ if from_dt.names == to_dt.names:
+ assert safety == Casting.equiv
+ else:
+ assert safety == Casting.safe
+ # Shifting the original data pointer by -2 will align both by
+ # effectively adding 2 bytes of spacing before `from_dt`.
+ assert view_off == expected_off
+
+ @pytest.mark.parametrize(("from_dt", "to_dt", "expected_off"), [
+ # Subarray cases:
+ ("i", "(1,1)i", 0),
+ ("(1,1)i", "i", 0),
+ ("(2,1)i", "(2,1)i", 0),
+ # field cases (field to field is tested explicitly also):
+ # Not considered viewable, because a negative offset would allow
+ # may structured dtype to indirectly access invalid memory.
+ ("i", dict(names=["a"], formats=["i"], offsets=[2]), None),
+ (dict(names=["a"], formats=["i"], offsets=[2]), "i", 2),
+ # Currently considered not viewable, due to multiple fields
+ # even though they overlap (maybe we should not allow that?)
+ ("i", dict(names=["a", "b"], formats=["i", "i"], offsets=[2, 2]),
+ None),
+ # different number of fields can't work, should probably just fail
+ # so it never reports "viewable":
+ ("i,i", "i,i,i", None),
+ # Unstructured void cases:
+ ("i4", "V3", 0), # void smaller or equal
+ ("i4", "V4", 0), # void smaller or equal
+ ("i4", "V10", None), # void is larger (no view)
+ ("O", "V4", None), # currently reject objects for view here.
+ ("O", "V8", None), # currently reject objects for view here.
+ ("V4", "V3", 0),
+ ("V4", "V4", 0),
+ ("V3", "V4", None),
+ # Note that currently void-to-other cast goes via byte-strings
+ # and is not a "view" based cast like the opposite direction:
+ ("V4", "i4", None),
+ # completely invalid/impossible cast:
+ ("i,i", "i,i,i", None),
+ ])
+ def test_structured_view_offsets_paramteric(
+ self, from_dt, to_dt, expected_off):
+ # TODO: While this test is fairly thorough, right now, it does not
+ # really test some paths that may have nonzero offsets (they don't
+ # really exists).
+ from_dt = np.dtype(from_dt)
+ to_dt = np.dtype(to_dt)
+ cast = get_castingimpl(type(from_dt), type(to_dt))
+ _, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
+ assert view_off == expected_off
+
+ @pytest.mark.parametrize("dtype", np.typecodes["All"])
+ def test_object_casts_NULL_None_equivalence(self, dtype):
+ # None to <other> casts may succeed or fail, but a NULL'ed array must
+ # behave the same as one filled with None's.
+ arr_normal = np.array([None] * 5)
+ arr_NULLs = np.empty_like(arr_normal)
+ ctypes.memset(arr_NULLs.ctypes.data, 0, arr_NULLs.nbytes)
+ # If the check fails (maybe it should) the test would lose its purpose:
+ assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes
+
+ try:
+ expected = arr_normal.astype(dtype)
+ except TypeError:
+ with pytest.raises(TypeError):
+ arr_NULLs.astype(dtype),
+ else:
+ assert_array_equal(expected, arr_NULLs.astype(dtype))
+
+ @pytest.mark.parametrize("dtype",
+ np.typecodes["AllInteger"] + np.typecodes["AllFloat"])
+ def test_nonstandard_bool_to_other(self, dtype):
+ # simple test for casting bool_ to numeric types, which should not
+ # expose the detail that NumPy bools can sometimes take values other
+ # than 0 and 1. See also gh-19514.
+ nonstandard_bools = np.array([0, 3, -7], dtype=np.int8).view(bool)
+ res = nonstandard_bools.astype(dtype)
+ expected = [0, 1, 1]
+ assert_array_equal(res, expected)
+
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_conversion_utils.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_conversion_utils.py
new file mode 100644
index 00000000..c602eba4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_conversion_utils.py
@@ -0,0 +1,208 @@
+"""
+Tests for numpy/core/src/multiarray/conversion_utils.c
+"""
+import re
+import sys
+
+import pytest
+
+import numpy as np
+import numpy.core._multiarray_tests as mt
+from numpy.testing import assert_warns, IS_PYPY
+
+
+class StringConverterTestCase:
+ allow_bytes = True
+ case_insensitive = True
+ exact_match = False
+ warn = True
+
+ def _check_value_error(self, val):
+ pattern = r'\(got {}\)'.format(re.escape(repr(val)))
+ with pytest.raises(ValueError, match=pattern) as exc:
+ self.conv(val)
+
+ def _check_conv_assert_warn(self, val, expected):
+ if self.warn:
+ with assert_warns(DeprecationWarning) as exc:
+ assert self.conv(val) == expected
+ else:
+ assert self.conv(val) == expected
+
+ def _check(self, val, expected):
+ """Takes valid non-deprecated inputs for converters,
+ runs converters on inputs, checks correctness of outputs,
+ warnings and errors"""
+ assert self.conv(val) == expected
+
+ if self.allow_bytes:
+ assert self.conv(val.encode('ascii')) == expected
+ else:
+ with pytest.raises(TypeError):
+ self.conv(val.encode('ascii'))
+
+ if len(val) != 1:
+ if self.exact_match:
+ self._check_value_error(val[:1])
+ self._check_value_error(val + '\0')
+ else:
+ self._check_conv_assert_warn(val[:1], expected)
+
+ if self.case_insensitive:
+ if val != val.lower():
+ self._check_conv_assert_warn(val.lower(), expected)
+ if val != val.upper():
+ self._check_conv_assert_warn(val.upper(), expected)
+ else:
+ if val != val.lower():
+ self._check_value_error(val.lower())
+ if val != val.upper():
+ self._check_value_error(val.upper())
+
+ def test_wrong_type(self):
+ # common cases which apply to all the below
+ with pytest.raises(TypeError):
+ self.conv({})
+ with pytest.raises(TypeError):
+ self.conv([])
+
+ def test_wrong_value(self):
+ # nonsense strings
+ self._check_value_error('')
+ self._check_value_error('\N{greek small letter pi}')
+
+ if self.allow_bytes:
+ self._check_value_error(b'')
+ # bytes which can't be converted to strings via utf8
+ self._check_value_error(b"\xFF")
+ if self.exact_match:
+ self._check_value_error("there's no way this is supported")
+
+
+class TestByteorderConverter(StringConverterTestCase):
+ """ Tests of PyArray_ByteorderConverter """
+ conv = mt.run_byteorder_converter
+ warn = False
+
+ def test_valid(self):
+ for s in ['big', '>']:
+ self._check(s, 'NPY_BIG')
+ for s in ['little', '<']:
+ self._check(s, 'NPY_LITTLE')
+ for s in ['native', '=']:
+ self._check(s, 'NPY_NATIVE')
+ for s in ['ignore', '|']:
+ self._check(s, 'NPY_IGNORE')
+ for s in ['swap']:
+ self._check(s, 'NPY_SWAP')
+
+
+class TestSortkindConverter(StringConverterTestCase):
+ """ Tests of PyArray_SortkindConverter """
+ conv = mt.run_sortkind_converter
+ warn = False
+
+ def test_valid(self):
+ self._check('quicksort', 'NPY_QUICKSORT')
+ self._check('heapsort', 'NPY_HEAPSORT')
+ self._check('mergesort', 'NPY_STABLESORT') # alias
+ self._check('stable', 'NPY_STABLESORT')
+
+
+class TestSelectkindConverter(StringConverterTestCase):
+ """ Tests of PyArray_SelectkindConverter """
+ conv = mt.run_selectkind_converter
+ case_insensitive = False
+ exact_match = True
+
+ def test_valid(self):
+ self._check('introselect', 'NPY_INTROSELECT')
+
+
+class TestSearchsideConverter(StringConverterTestCase):
+ """ Tests of PyArray_SearchsideConverter """
+ conv = mt.run_searchside_converter
+ def test_valid(self):
+ self._check('left', 'NPY_SEARCHLEFT')
+ self._check('right', 'NPY_SEARCHRIGHT')
+
+
+class TestOrderConverter(StringConverterTestCase):
+ """ Tests of PyArray_OrderConverter """
+ conv = mt.run_order_converter
+ warn = False
+
+ def test_valid(self):
+ self._check('c', 'NPY_CORDER')
+ self._check('f', 'NPY_FORTRANORDER')
+ self._check('a', 'NPY_ANYORDER')
+ self._check('k', 'NPY_KEEPORDER')
+
+ def test_flatten_invalid_order(self):
+ # invalid after gh-14596
+ with pytest.raises(ValueError):
+ self.conv('Z')
+ for order in [False, True, 0, 8]:
+ with pytest.raises(TypeError):
+ self.conv(order)
+
+
+class TestClipmodeConverter(StringConverterTestCase):
+ """ Tests of PyArray_ClipmodeConverter """
+ conv = mt.run_clipmode_converter
+ def test_valid(self):
+ self._check('clip', 'NPY_CLIP')
+ self._check('wrap', 'NPY_WRAP')
+ self._check('raise', 'NPY_RAISE')
+
+ # integer values allowed here
+ assert self.conv(np.CLIP) == 'NPY_CLIP'
+ assert self.conv(np.WRAP) == 'NPY_WRAP'
+ assert self.conv(np.RAISE) == 'NPY_RAISE'
+
+
+class TestCastingConverter(StringConverterTestCase):
+ """ Tests of PyArray_CastingConverter """
+ conv = mt.run_casting_converter
+ case_insensitive = False
+ exact_match = True
+
+ def test_valid(self):
+ self._check("no", "NPY_NO_CASTING")
+ self._check("equiv", "NPY_EQUIV_CASTING")
+ self._check("safe", "NPY_SAFE_CASTING")
+ self._check("same_kind", "NPY_SAME_KIND_CASTING")
+ self._check("unsafe", "NPY_UNSAFE_CASTING")
+
+
+class TestIntpConverter:
+ """ Tests of PyArray_IntpConverter """
+ conv = mt.run_intp_converter
+
+ def test_basic(self):
+ assert self.conv(1) == (1,)
+ assert self.conv((1, 2)) == (1, 2)
+ assert self.conv([1, 2]) == (1, 2)
+ assert self.conv(()) == ()
+
+ def test_none(self):
+ # once the warning expires, this will raise TypeError
+ with pytest.warns(DeprecationWarning):
+ assert self.conv(None) == ()
+
+ @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+ def test_float(self):
+ with pytest.raises(TypeError):
+ self.conv(1.0)
+ with pytest.raises(TypeError):
+ self.conv([1, 1.0])
+
+ def test_too_large(self):
+ with pytest.raises(ValueError):
+ self.conv(2**64)
+
+ def test_too_many_dims(self):
+ assert self.conv([1]*32) == (1,)*32
+ with pytest.raises(ValueError):
+ self.conv([1]*33)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_cpu_dispatcher.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_cpu_dispatcher.py
new file mode 100644
index 00000000..2f7eac7e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_cpu_dispatcher.py
@@ -0,0 +1,42 @@
+from numpy.core._multiarray_umath import __cpu_features__, __cpu_baseline__, __cpu_dispatch__
+from numpy.core import _umath_tests
+from numpy.testing import assert_equal
+
+def test_dispatcher():
+ """
+ Testing the utilities of the CPU dispatcher
+ """
+ targets = (
+ "SSE2", "SSE41", "AVX2",
+ "VSX", "VSX2", "VSX3",
+ "NEON", "ASIMD", "ASIMDHP"
+ )
+ highest_sfx = "" # no suffix for the baseline
+ all_sfx = []
+ for feature in reversed(targets):
+ # skip baseline features, by the default `CCompilerOpt` do not generate separated objects
+ # for the baseline, just one object combined all of them via 'baseline' option
+ # within the configuration statements.
+ if feature in __cpu_baseline__:
+ continue
+ # check compiler and running machine support
+ if feature not in __cpu_dispatch__ or not __cpu_features__[feature]:
+ continue
+
+ if not highest_sfx:
+ highest_sfx = "_" + feature
+ all_sfx.append("func" + "_" + feature)
+
+ test = _umath_tests.test_dispatch()
+ assert_equal(test["func"], "func" + highest_sfx)
+ assert_equal(test["var"], "var" + highest_sfx)
+
+ if highest_sfx:
+ assert_equal(test["func_xb"], "func" + highest_sfx)
+ assert_equal(test["var_xb"], "var" + highest_sfx)
+ else:
+ assert_equal(test["func_xb"], "nobase")
+ assert_equal(test["var_xb"], "nobase")
+
+ all_sfx.append("func") # add the baseline
+ assert_equal(test["all"], all_sfx)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_cpu_features.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_cpu_features.py
new file mode 100644
index 00000000..1a76897e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_cpu_features.py
@@ -0,0 +1,185 @@
+import sys, platform, re, pytest
+from numpy.core._multiarray_umath import __cpu_features__
+
+def assert_features_equal(actual, desired, fname):
+ __tracebackhide__ = True # Hide traceback for py.test
+ actual, desired = str(actual), str(desired)
+ if actual == desired:
+ return
+ detected = str(__cpu_features__).replace("'", "")
+ try:
+ with open("/proc/cpuinfo", "r") as fd:
+ cpuinfo = fd.read(2048)
+ except Exception as err:
+ cpuinfo = str(err)
+
+ try:
+ import subprocess
+ auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
+ auxv = auxv.decode()
+ except Exception as err:
+ auxv = str(err)
+
+ import textwrap
+ error_report = textwrap.indent(
+"""
+###########################################
+### Extra debugging information
+###########################################
+-------------------------------------------
+--- NumPy Detections
+-------------------------------------------
+%s
+-------------------------------------------
+--- SYS / CPUINFO
+-------------------------------------------
+%s....
+-------------------------------------------
+--- SYS / AUXV
+-------------------------------------------
+%s
+""" % (detected, cpuinfo, auxv), prefix='\r')
+
+ raise AssertionError((
+ "Failure Detection\n"
+ " NAME: '%s'\n"
+ " ACTUAL: %s\n"
+ " DESIRED: %s\n"
+ "%s"
+ ) % (fname, actual, desired, error_report))
+
+class AbstractTest:
+ features = []
+ features_groups = {}
+ features_map = {}
+ features_flags = set()
+
+ def load_flags(self):
+ # a hook
+ pass
+ def test_features(self):
+ self.load_flags()
+ for gname, features in self.features_groups.items():
+ test_features = [self.cpu_have(f) for f in features]
+ assert_features_equal(__cpu_features__.get(gname), all(test_features), gname)
+
+ for feature_name in self.features:
+ cpu_have = self.cpu_have(feature_name)
+ npy_have = __cpu_features__.get(feature_name)
+ assert_features_equal(npy_have, cpu_have, feature_name)
+
+ def cpu_have(self, feature_name):
+ map_names = self.features_map.get(feature_name, feature_name)
+ if isinstance(map_names, str):
+ return map_names in self.features_flags
+ for f in map_names:
+ if f in self.features_flags:
+ return True
+ return False
+
+ def load_flags_cpuinfo(self, magic_key):
+ self.features_flags = self.get_cpuinfo_item(magic_key)
+
+ def get_cpuinfo_item(self, magic_key):
+ values = set()
+ with open('/proc/cpuinfo') as fd:
+ for line in fd:
+ if not line.startswith(magic_key):
+ continue
+ flags_value = [s.strip() for s in line.split(':', 1)]
+ if len(flags_value) == 2:
+ values = values.union(flags_value[1].upper().split())
+ return values
+
+ def load_flags_auxv(self):
+ import subprocess
+ auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
+ for at in auxv.split(b'\n'):
+ if not at.startswith(b"AT_HWCAP"):
+ continue
+ hwcap_value = [s.strip() for s in at.split(b':', 1)]
+ if len(hwcap_value) == 2:
+ self.features_flags = self.features_flags.union(
+ hwcap_value[1].upper().decode().split()
+ )
+
+is_linux = sys.platform.startswith('linux')
+is_cygwin = sys.platform.startswith('cygwin')
+machine = platform.machine()
+is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE)
+@pytest.mark.skipif(
+ not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86"
+)
+class Test_X86_Features(AbstractTest):
+ features = [
+ "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42",
+ "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD",
+ "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ",
+ "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA",
+ "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG",
+ ]
+ features_groups = dict(
+ AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"],
+ AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS",
+ "AVX5124VNNIW", "AVX512VPOPCNTDQ"],
+ AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"],
+ AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"],
+ AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
+ "AVX512VBMI"],
+ AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
+ "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"],
+ )
+ features_map = dict(
+ SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA",
+ AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2",
+ AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ",
+ )
+ def load_flags(self):
+ self.load_flags_cpuinfo("flags")
+
+is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE)
+@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power")
+class Test_POWER_Features(AbstractTest):
+ features = ["VSX", "VSX2", "VSX3", "VSX4"]
+ features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1")
+
+ def load_flags(self):
+ self.load_flags_auxv()
+
+
+is_zarch = re.match("^(s390x)", machine, re.IGNORECASE)
+@pytest.mark.skipif(not is_linux or not is_zarch,
+ reason="Only for Linux and IBM Z")
+class Test_ZARCH_Features(AbstractTest):
+ features = ["VX", "VXE", "VXE2"]
+
+ def load_flags(self):
+ self.load_flags_auxv()
+
+
+is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
+@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
+class Test_ARM_Features(AbstractTest):
+ features = [
+ "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
+ ]
+ features_groups = dict(
+ NEON_FP16 = ["NEON", "HALF"],
+ NEON_VFPV4 = ["NEON", "VFPV4"],
+ )
+ def load_flags(self):
+ self.load_flags_cpuinfo("Features")
+ arch = self.get_cpuinfo_item("CPU architecture")
+ # in case of mounting virtual filesystem of aarch64 kernel
+ is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0
+ if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8:
+ self.features_map = dict(
+ NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD"
+ )
+ else:
+ self.features_map = dict(
+ # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32)
+ # doesn't provide information about ASIMD, so we assume that ASIMD is supported
+ # if the kernel reports any one of the following ARM8 features.
+ ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32")
+ )
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_custom_dtypes.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_custom_dtypes.py
new file mode 100644
index 00000000..6bcc45d6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_custom_dtypes.py
@@ -0,0 +1,201 @@
+import pytest
+
+import numpy as np
+from numpy.testing import assert_array_equal
+from numpy.core._multiarray_umath import (
+ _discover_array_parameters as discover_array_params, _get_sfloat_dtype)
+
+
+SF = _get_sfloat_dtype()
+
+
+class TestSFloat:
+ def _get_array(self, scaling, aligned=True):
+ if not aligned:
+ a = np.empty(3*8 + 1, dtype=np.uint8)[1:]
+ a = a.view(np.float64)
+ a[:] = [1., 2., 3.]
+ else:
+ a = np.array([1., 2., 3.])
+
+ a *= 1./scaling # the casting code also uses the reciprocal.
+ return a.view(SF(scaling))
+
+ def test_sfloat_rescaled(self):
+ sf = SF(1.)
+ sf2 = sf.scaled_by(2.)
+ assert sf2.get_scaling() == 2.
+ sf6 = sf2.scaled_by(3.)
+ assert sf6.get_scaling() == 6.
+
+ def test_class_discovery(self):
+ # This does not test much, since we always discover the scaling as 1.
+ # But most of NumPy (when writing) does not understand DType classes
+ dt, _ = discover_array_params([1., 2., 3.], dtype=SF)
+ assert dt == SF(1.)
+
+ @pytest.mark.parametrize("scaling", [1., -1., 2.])
+ def test_scaled_float_from_floats(self, scaling):
+ a = np.array([1., 2., 3.], dtype=SF(scaling))
+
+ assert a.dtype.get_scaling() == scaling
+ assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
+
+ def test_repr(self):
+ # Check the repr, mainly to cover the code paths:
+ assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)"
+
+ @pytest.mark.parametrize("scaling", [1., -1., 2.])
+ def test_sfloat_from_float(self, scaling):
+ a = np.array([1., 2., 3.]).astype(dtype=SF(scaling))
+
+ assert a.dtype.get_scaling() == scaling
+ assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
+
+ @pytest.mark.parametrize("aligned", [True, False])
+ @pytest.mark.parametrize("scaling", [1., -1., 2.])
+ def test_sfloat_getitem(self, aligned, scaling):
+ a = self._get_array(1., aligned)
+ assert a.tolist() == [1., 2., 3.]
+
+ @pytest.mark.parametrize("aligned", [True, False])
+ def test_sfloat_casts(self, aligned):
+ a = self._get_array(1., aligned)
+
+ assert np.can_cast(a, SF(-1.), casting="equiv")
+ assert not np.can_cast(a, SF(-1.), casting="no")
+ na = a.astype(SF(-1.))
+ assert_array_equal(-1 * na.view(np.float64), a.view(np.float64))
+
+ assert np.can_cast(a, SF(2.), casting="same_kind")
+ assert not np.can_cast(a, SF(2.), casting="safe")
+ a2 = a.astype(SF(2.))
+ assert_array_equal(2 * a2.view(np.float64), a.view(np.float64))
+
+ @pytest.mark.parametrize("aligned", [True, False])
+ def test_sfloat_cast_internal_errors(self, aligned):
+ a = self._get_array(2e300, aligned)
+
+ with pytest.raises(TypeError,
+ match="error raised inside the core-loop: non-finite factor!"):
+ a.astype(SF(2e-300))
+
+ def test_sfloat_promotion(self):
+ assert np.result_type(SF(2.), SF(3.)) == SF(3.)
+ assert np.result_type(SF(3.), SF(2.)) == SF(3.)
+ # Float64 -> SF(1.) and then promotes normally, so both of this work:
+ assert np.result_type(SF(3.), np.float64) == SF(3.)
+ assert np.result_type(np.float64, SF(0.5)) == SF(1.)
+
+ # Test an undefined promotion:
+ with pytest.raises(TypeError):
+ np.result_type(SF(1.), np.int64)
+
+ def test_basic_multiply(self):
+ a = self._get_array(2.)
+ b = self._get_array(4.)
+
+ res = a * b
+ # multiplies dtype scaling and content separately:
+ assert res.dtype.get_scaling() == 8.
+ expected_view = a.view(np.float64) * b.view(np.float64)
+ assert_array_equal(res.view(np.float64), expected_view)
+
+ def test_possible_and_impossible_reduce(self):
+ # For reductions to work, the first and last operand must have the
+ # same dtype. For this parametric DType that is not necessarily true.
+ a = self._get_array(2.)
+ # Addition reductin works (as of writing requires to pass initial
+ # because setting a scaled-float from the default `0` fails).
+ res = np.add.reduce(a, initial=0.)
+ assert res == a.astype(np.float64).sum()
+
+ # But each multiplication changes the factor, so a reduction is not
+ # possible (the relaxed version of the old refusal to handle any
+ # flexible dtype).
+ with pytest.raises(TypeError,
+ match="the resolved dtypes are not compatible"):
+ np.multiply.reduce(a)
+
+ def test_basic_ufunc_at(self):
+ float_a = np.array([1., 2., 3.])
+ b = self._get_array(2.)
+
+ float_b = b.view(np.float64).copy()
+ np.multiply.at(float_b, [1, 1, 1], float_a)
+ np.multiply.at(b, [1, 1, 1], float_a)
+
+ assert_array_equal(b.view(np.float64), float_b)
+
+ def test_basic_multiply_promotion(self):
+ float_a = np.array([1., 2., 3.])
+ b = self._get_array(2.)
+
+ res1 = float_a * b
+ res2 = b * float_a
+
+ # one factor is one, so we get the factor of b:
+ assert res1.dtype == res2.dtype == b.dtype
+ expected_view = float_a * b.view(np.float64)
+ assert_array_equal(res1.view(np.float64), expected_view)
+ assert_array_equal(res2.view(np.float64), expected_view)
+
+ # Check that promotion works when `out` is used:
+ np.multiply(b, float_a, out=res2)
+ with pytest.raises(TypeError):
+ # The promoter accepts this (maybe it should not), but the SFloat
+ # result cannot be cast to integer:
+ np.multiply(b, float_a, out=np.arange(3))
+
+ def test_basic_addition(self):
+ a = self._get_array(2.)
+ b = self._get_array(4.)
+
+ res = a + b
+ # addition uses the type promotion rules for the result:
+ assert res.dtype == np.result_type(a.dtype, b.dtype)
+ expected_view = (a.astype(res.dtype).view(np.float64) +
+ b.astype(res.dtype).view(np.float64))
+ assert_array_equal(res.view(np.float64), expected_view)
+
+ def test_addition_cast_safety(self):
+ """The addition method is special for the scaled float, because it
+ includes the "cast" between different factors, thus cast-safety
+ is influenced by the implementation.
+ """
+ a = self._get_array(2.)
+ b = self._get_array(-2.)
+ c = self._get_array(3.)
+
+ # sign change is "equiv":
+ np.add(a, b, casting="equiv")
+ with pytest.raises(TypeError):
+ np.add(a, b, casting="no")
+
+ # Different factor is "same_kind" (default) so check that "safe" fails
+ with pytest.raises(TypeError):
+ np.add(a, c, casting="safe")
+
+ # Check that casting the output fails also (done by the ufunc here)
+ with pytest.raises(TypeError):
+ np.add(a, a, out=c, casting="safe")
+
+ @pytest.mark.parametrize("ufunc",
+ [np.logical_and, np.logical_or, np.logical_xor])
+ def test_logical_ufuncs_casts_to_bool(self, ufunc):
+ a = self._get_array(2.)
+ a[0] = 0. # make sure first element is considered False.
+
+ float_equiv = a.astype(float)
+ expected = ufunc(float_equiv, float_equiv)
+ res = ufunc(a, a)
+ assert_array_equal(res, expected)
+
+ # also check that the same works for reductions:
+ expected = ufunc.reduce(float_equiv)
+ res = ufunc.reduce(a)
+ assert_array_equal(res, expected)
+
+ # The output casting does not match the bool, bool -> bool loop:
+ with pytest.raises(TypeError):
+ ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv")
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_cython.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_cython.py
new file mode 100644
index 00000000..f4aac4a3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_cython.py
@@ -0,0 +1,137 @@
+import os
+import shutil
+import subprocess
+import sys
+import pytest
+
+import numpy as np
+from numpy.testing import IS_WASM
+
+# This import is copied from random.tests.test_extending
+try:
+ import cython
+ from Cython.Compiler.Version import version as cython_version
+except ImportError:
+ cython = None
+else:
+ from numpy.compat import _pep440
+
+ # Cython 0.29.30 is required for Python 3.11 and there are
+ # other fixes in the 0.29 series that are needed even for earlier
+ # Python versions.
+ # Note: keep in sync with the one in pyproject.toml
+ required_version = "0.29.30"
+ if _pep440.parse(cython_version) < _pep440.Version(required_version):
+ # too old or wrong cython, skip the test
+ cython = None
+
+pytestmark = pytest.mark.skipif(cython is None, reason="requires cython")
+
+
+@pytest.fixture
+def install_temp(request, tmp_path):
+ # Based in part on test_cython from random.tests.test_extending
+ if IS_WASM:
+ pytest.skip("No subprocess")
+
+ here = os.path.dirname(__file__)
+ ext_dir = os.path.join(here, "examples", "cython")
+
+ cytest = str(tmp_path / "cytest")
+
+ shutil.copytree(ext_dir, cytest)
+ # build the examples and "install" them into a temporary directory
+
+ install_log = str(tmp_path / "tmp_install_log.txt")
+ subprocess.check_output(
+ [
+ sys.executable,
+ "setup.py",
+ "build",
+ "install",
+ "--prefix", str(tmp_path / "installdir"),
+ "--single-version-externally-managed",
+ "--record",
+ install_log,
+ ],
+ cwd=cytest,
+ )
+
+ # In order to import the built module, we need its path to sys.path
+ # so parse that out of the record
+ with open(install_log) as fid:
+ for line in fid:
+ if "checks" in line:
+ sys.path.append(os.path.dirname(line))
+ break
+ else:
+ raise RuntimeError(f'could not parse "{install_log}"')
+
+
+def test_is_timedelta64_object(install_temp):
+ import checks
+
+ assert checks.is_td64(np.timedelta64(1234))
+ assert checks.is_td64(np.timedelta64(1234, "ns"))
+ assert checks.is_td64(np.timedelta64("NaT", "ns"))
+
+ assert not checks.is_td64(1)
+ assert not checks.is_td64(None)
+ assert not checks.is_td64("foo")
+ assert not checks.is_td64(np.datetime64("now", "s"))
+
+
+def test_is_datetime64_object(install_temp):
+ import checks
+
+ assert checks.is_dt64(np.datetime64(1234, "ns"))
+ assert checks.is_dt64(np.datetime64("NaT", "ns"))
+
+ assert not checks.is_dt64(1)
+ assert not checks.is_dt64(None)
+ assert not checks.is_dt64("foo")
+ assert not checks.is_dt64(np.timedelta64(1234))
+
+
+def test_get_datetime64_value(install_temp):
+ import checks
+
+ dt64 = np.datetime64("2016-01-01", "ns")
+
+ result = checks.get_dt64_value(dt64)
+ expected = dt64.view("i8")
+
+ assert result == expected
+
+
+def test_get_timedelta64_value(install_temp):
+ import checks
+
+ td64 = np.timedelta64(12345, "h")
+
+ result = checks.get_td64_value(td64)
+ expected = td64.view("i8")
+
+ assert result == expected
+
+
+def test_get_datetime64_unit(install_temp):
+ import checks
+
+ dt64 = np.datetime64("2016-01-01", "ns")
+ result = checks.get_dt64_unit(dt64)
+ expected = 10
+ assert result == expected
+
+ td64 = np.timedelta64(12345, "h")
+ result = checks.get_dt64_unit(td64)
+ expected = 5
+ assert result == expected
+
+
+def test_abstract_scalars(install_temp):
+ import checks
+
+ assert checks.is_integer(1)
+ assert checks.is_integer(np.int8(1))
+ assert checks.is_integer(np.uint64(1))
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_datetime.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_datetime.py
new file mode 100644
index 00000000..da4a9b0f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_datetime.py
@@ -0,0 +1,2552 @@
+
+import numpy
+import numpy as np
+import datetime
+import pytest
+from numpy.testing import (
+ IS_WASM,
+ assert_, assert_equal, assert_raises, assert_warns, suppress_warnings,
+ assert_raises_regex, assert_array_equal,
+ )
+from numpy.compat import pickle
+
+# Use pytz to test out various time zones if available
+try:
+ from pytz import timezone as tz
+ _has_pytz = True
+except ImportError:
+ _has_pytz = False
+
+try:
+ RecursionError
+except NameError:
+ RecursionError = RuntimeError # python < 3.5
+
+
+class TestDateTime:
+ def test_datetime_dtype_creation(self):
+ for unit in ['Y', 'M', 'W', 'D',
+ 'h', 'm', 's', 'ms', 'us',
+ 'μs', # alias for us
+ 'ns', 'ps', 'fs', 'as']:
+ dt1 = np.dtype('M8[750%s]' % unit)
+ assert_(dt1 == np.dtype('datetime64[750%s]' % unit))
+ dt2 = np.dtype('m8[%s]' % unit)
+ assert_(dt2 == np.dtype('timedelta64[%s]' % unit))
+
+ # Generic units shouldn't add [] to the end
+ assert_equal(str(np.dtype("M8")), "datetime64")
+
+ # Should be possible to specify the endianness
+ assert_equal(np.dtype("=M8"), np.dtype("M8"))
+ assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]"))
+ assert_(np.dtype(">M8") == np.dtype("M8") or
+ np.dtype("<M8") == np.dtype("M8"))
+ assert_(np.dtype(">M8[D]") == np.dtype("M8[D]") or
+ np.dtype("<M8[D]") == np.dtype("M8[D]"))
+ assert_(np.dtype(">M8") != np.dtype("<M8"))
+
+ assert_equal(np.dtype("=m8"), np.dtype("m8"))
+ assert_equal(np.dtype("=m8[s]"), np.dtype("m8[s]"))
+ assert_(np.dtype(">m8") == np.dtype("m8") or
+ np.dtype("<m8") == np.dtype("m8"))
+ assert_(np.dtype(">m8[D]") == np.dtype("m8[D]") or
+ np.dtype("<m8[D]") == np.dtype("m8[D]"))
+ assert_(np.dtype(">m8") != np.dtype("<m8"))
+
+ # Check that the parser rejects bad datetime types
+ assert_raises(TypeError, np.dtype, 'M8[badunit]')
+ assert_raises(TypeError, np.dtype, 'm8[badunit]')
+ assert_raises(TypeError, np.dtype, 'M8[YY]')
+ assert_raises(TypeError, np.dtype, 'm8[YY]')
+ assert_raises(TypeError, np.dtype, 'm4')
+ assert_raises(TypeError, np.dtype, 'M7')
+ assert_raises(TypeError, np.dtype, 'm7')
+ assert_raises(TypeError, np.dtype, 'M16')
+ assert_raises(TypeError, np.dtype, 'm16')
+ assert_raises(TypeError, np.dtype, 'M8[3000000000ps]')
+
+ def test_datetime_casting_rules(self):
+ # Cannot cast safely/same_kind between timedelta and datetime
+ assert_(not np.can_cast('m8', 'M8', casting='same_kind'))
+ assert_(not np.can_cast('M8', 'm8', casting='same_kind'))
+ assert_(not np.can_cast('m8', 'M8', casting='safe'))
+ assert_(not np.can_cast('M8', 'm8', casting='safe'))
+
+ # Can cast safely/same_kind from integer to timedelta
+ assert_(np.can_cast('i8', 'm8', casting='same_kind'))
+ assert_(np.can_cast('i8', 'm8', casting='safe'))
+ assert_(np.can_cast('i4', 'm8', casting='same_kind'))
+ assert_(np.can_cast('i4', 'm8', casting='safe'))
+ assert_(np.can_cast('u4', 'm8', casting='same_kind'))
+ assert_(np.can_cast('u4', 'm8', casting='safe'))
+
+ # Cannot cast safely from unsigned integer of the same size, which
+ # could overflow
+ assert_(np.can_cast('u8', 'm8', casting='same_kind'))
+ assert_(not np.can_cast('u8', 'm8', casting='safe'))
+
+ # Cannot cast safely/same_kind from float to timedelta
+ assert_(not np.can_cast('f4', 'm8', casting='same_kind'))
+ assert_(not np.can_cast('f4', 'm8', casting='safe'))
+
+ # Cannot cast safely/same_kind from integer to datetime
+ assert_(not np.can_cast('i8', 'M8', casting='same_kind'))
+ assert_(not np.can_cast('i8', 'M8', casting='safe'))
+
+ # Cannot cast safely/same_kind from bool to datetime
+ assert_(not np.can_cast('b1', 'M8', casting='same_kind'))
+ assert_(not np.can_cast('b1', 'M8', casting='safe'))
+ # Can cast safely/same_kind from bool to timedelta
+ assert_(np.can_cast('b1', 'm8', casting='same_kind'))
+ assert_(np.can_cast('b1', 'm8', casting='safe'))
+
+ # Can cast datetime safely from months/years to days
+ assert_(np.can_cast('M8[M]', 'M8[D]', casting='safe'))
+ assert_(np.can_cast('M8[Y]', 'M8[D]', casting='safe'))
+ # Cannot cast timedelta safely from months/years to days
+ assert_(not np.can_cast('m8[M]', 'm8[D]', casting='safe'))
+ assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='safe'))
+ # Can cast datetime same_kind from months/years to days
+ assert_(np.can_cast('M8[M]', 'M8[D]', casting='same_kind'))
+ assert_(np.can_cast('M8[Y]', 'M8[D]', casting='same_kind'))
+ # Can't cast timedelta same_kind from months/years to days
+ assert_(not np.can_cast('m8[M]', 'm8[D]', casting='same_kind'))
+ assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='same_kind'))
+ # Can cast datetime same_kind across the date/time boundary
+ assert_(np.can_cast('M8[D]', 'M8[h]', casting='same_kind'))
+ # Can cast timedelta same_kind across the date/time boundary
+ assert_(np.can_cast('m8[D]', 'm8[h]', casting='same_kind'))
+ assert_(np.can_cast('m8[h]', 'm8[D]', casting='same_kind'))
+
+ # Cannot cast safely if the integer multiplier doesn't divide
+ assert_(not np.can_cast('M8[7h]', 'M8[3h]', casting='safe'))
+ assert_(not np.can_cast('M8[3h]', 'M8[6h]', casting='safe'))
+ # But can cast same_kind
+ assert_(np.can_cast('M8[7h]', 'M8[3h]', casting='same_kind'))
+ # Can cast safely if the integer multiplier does divide
+ assert_(np.can_cast('M8[6h]', 'M8[3h]', casting='safe'))
+
+ # We can always cast types with generic units (corresponding to NaT) to
+ # more specific types
+ assert_(np.can_cast('m8', 'm8[h]', casting='same_kind'))
+ assert_(np.can_cast('m8', 'm8[h]', casting='safe'))
+ assert_(np.can_cast('M8', 'M8[h]', casting='same_kind'))
+ assert_(np.can_cast('M8', 'M8[h]', casting='safe'))
+ # but not the other way around
+ assert_(not np.can_cast('m8[h]', 'm8', casting='same_kind'))
+ assert_(not np.can_cast('m8[h]', 'm8', casting='safe'))
+ assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind'))
+ assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
+
+ def test_datetime_prefix_conversions(self):
+ # regression tests related to gh-19631;
+ # test metric prefixes from seconds down to
+ # attoseconds for bidirectional conversions
+ smaller_units = ['M8[7000ms]',
+ 'M8[2000us]',
+ 'M8[1000ns]',
+ 'M8[5000ns]',
+ 'M8[2000ps]',
+ 'M8[9000fs]',
+ 'M8[1000as]',
+ 'M8[2000000ps]',
+ 'M8[1000000as]',
+ 'M8[2000000000ps]',
+ 'M8[1000000000as]']
+ larger_units = ['M8[7s]',
+ 'M8[2ms]',
+ 'M8[us]',
+ 'M8[5us]',
+ 'M8[2ns]',
+ 'M8[9ps]',
+ 'M8[1fs]',
+ 'M8[2us]',
+ 'M8[1ps]',
+ 'M8[2ms]',
+ 'M8[1ns]']
+ for larger_unit, smaller_unit in zip(larger_units, smaller_units):
+ assert np.can_cast(larger_unit, smaller_unit, casting='safe')
+ assert np.can_cast(smaller_unit, larger_unit, casting='safe')
+
+ @pytest.mark.parametrize("unit", [
+ "s", "ms", "us", "ns", "ps", "fs", "as"])
+ def test_prohibit_negative_datetime(self, unit):
+ with assert_raises(TypeError):
+ np.array([1], dtype=f"M8[-1{unit}]")
+
+ def test_compare_generic_nat(self):
+ # regression tests for gh-6452
+ assert_(np.datetime64('NaT') !=
+ np.datetime64('2000') + np.timedelta64('NaT'))
+ assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))
+ assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))
+
+ @pytest.mark.parametrize("size", [
+ 3, 21, 217, 1000])
+ def test_datetime_nat_argsort_stability(self, size):
+ # NaT < NaT should be False internally for
+ # sort stability
+ expected = np.arange(size)
+ arr = np.tile(np.datetime64('NaT'), size)
+ assert_equal(np.argsort(arr, kind='mergesort'), expected)
+
+ @pytest.mark.parametrize("size", [
+ 3, 21, 217, 1000])
+ def test_timedelta_nat_argsort_stability(self, size):
+ # NaT < NaT should be False internally for
+ # sort stability
+ expected = np.arange(size)
+ arr = np.tile(np.timedelta64('NaT'), size)
+ assert_equal(np.argsort(arr, kind='mergesort'), expected)
+
+ @pytest.mark.parametrize("arr, expected", [
+ # the example provided in gh-12629
+ (['NaT', 1, 2, 3],
+ [1, 2, 3, 'NaT']),
+ # multiple NaTs
+ (['NaT', 9, 'NaT', -707],
+ [-707, 9, 'NaT', 'NaT']),
+ # this sort explores another code path for NaT
+ ([1, -2, 3, 'NaT'],
+ [-2, 1, 3, 'NaT']),
+ # 2-D array
+ ([[51, -220, 'NaT'],
+ [-17, 'NaT', -90]],
+ [[-220, 51, 'NaT'],
+ [-90, -17, 'NaT']]),
+ ])
+ @pytest.mark.parametrize("dtype", [
+ 'M8[ns]', 'M8[us]',
+ 'm8[ns]', 'm8[us]'])
+ def test_datetime_timedelta_sort_nat(self, arr, expected, dtype):
+ # fix for gh-12629 and gh-15063; NaT sorting to end of array
+ arr = np.array(arr, dtype=dtype)
+ expected = np.array(expected, dtype=dtype)
+ arr.sort()
+ assert_equal(arr, expected)
+
+ def test_datetime_scalar_construction(self):
+ # Construct with different units
+ assert_equal(np.datetime64('1950-03-12', 'D'),
+ np.datetime64('1950-03-12'))
+ assert_equal(np.datetime64('1950-03-12T13', 's'),
+ np.datetime64('1950-03-12T13', 'm'))
+
+ # Default construction means NaT
+ assert_equal(np.datetime64(), np.datetime64('NaT'))
+
+ # Some basic strings and repr
+ assert_equal(str(np.datetime64('NaT')), 'NaT')
+ assert_equal(repr(np.datetime64('NaT')),
+ "numpy.datetime64('NaT')")
+ assert_equal(str(np.datetime64('2011-02')), '2011-02')
+ assert_equal(repr(np.datetime64('2011-02')),
+ "numpy.datetime64('2011-02')")
+
+ # None gets constructed as NaT
+ assert_equal(np.datetime64(None), np.datetime64('NaT'))
+
+ # Default construction of NaT is in generic units
+ assert_equal(np.datetime64().dtype, np.dtype('M8'))
+ assert_equal(np.datetime64('NaT').dtype, np.dtype('M8'))
+
+ # Construction from integers requires a specified unit
+ assert_raises(ValueError, np.datetime64, 17)
+
+ # When constructing from a scalar or zero-dimensional array,
+ # it either keeps the units or you can override them.
+ a = np.datetime64('2000-03-18T16', 'h')
+ b = np.array('2000-03-18T16', dtype='M8[h]')
+
+ assert_equal(a.dtype, np.dtype('M8[h]'))
+ assert_equal(b.dtype, np.dtype('M8[h]'))
+
+ assert_equal(np.datetime64(a), a)
+ assert_equal(np.datetime64(a).dtype, np.dtype('M8[h]'))
+
+ assert_equal(np.datetime64(b), a)
+ assert_equal(np.datetime64(b).dtype, np.dtype('M8[h]'))
+
+ assert_equal(np.datetime64(a, 's'), a)
+ assert_equal(np.datetime64(a, 's').dtype, np.dtype('M8[s]'))
+
+ assert_equal(np.datetime64(b, 's'), a)
+ assert_equal(np.datetime64(b, 's').dtype, np.dtype('M8[s]'))
+
+ # Construction from datetime.date
+ assert_equal(np.datetime64('1945-03-25'),
+ np.datetime64(datetime.date(1945, 3, 25)))
+ assert_equal(np.datetime64('2045-03-25', 'D'),
+ np.datetime64(datetime.date(2045, 3, 25), 'D'))
+ # Construction from datetime.datetime
+ assert_equal(np.datetime64('1980-01-25T14:36:22.5'),
+ np.datetime64(datetime.datetime(1980, 1, 25,
+ 14, 36, 22, 500000)))
+
+ # Construction with time units from a date is okay
+ assert_equal(np.datetime64('1920-03-13', 'h'),
+ np.datetime64('1920-03-13T00'))
+ assert_equal(np.datetime64('1920-03', 'm'),
+ np.datetime64('1920-03-01T00:00'))
+ assert_equal(np.datetime64('1920', 's'),
+ np.datetime64('1920-01-01T00:00:00'))
+ assert_equal(np.datetime64(datetime.date(2045, 3, 25), 'ms'),
+ np.datetime64('2045-03-25T00:00:00.000'))
+
+ # Construction with date units from a datetime is also okay
+ assert_equal(np.datetime64('1920-03-13T18', 'D'),
+ np.datetime64('1920-03-13'))
+ assert_equal(np.datetime64('1920-03-13T18:33:12', 'M'),
+ np.datetime64('1920-03'))
+ assert_equal(np.datetime64('1920-03-13T18:33:12.5', 'Y'),
+ np.datetime64('1920'))
+
+ def test_datetime_scalar_construction_timezone(self):
+ # verify that supplying an explicit timezone works, but is deprecated
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.datetime64('2000-01-01T00Z'),
+ np.datetime64('2000-01-01T00'))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.datetime64('2000-01-01T00-08'),
+ np.datetime64('2000-01-01T08'))
+
+ def test_datetime_array_find_type(self):
+ dt = np.datetime64('1970-01-01', 'M')
+ arr = np.array([dt])
+ assert_equal(arr.dtype, np.dtype('M8[M]'))
+
+ # at the moment, we don't automatically convert these to datetime64
+
+ dt = datetime.date(1970, 1, 1)
+ arr = np.array([dt])
+ assert_equal(arr.dtype, np.dtype('O'))
+
+ dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
+ arr = np.array([dt])
+ assert_equal(arr.dtype, np.dtype('O'))
+
+ # find "supertype" for non-dates and dates
+
+ b = np.bool_(True)
+ dm = np.datetime64('1970-01-01', 'M')
+ d = datetime.date(1970, 1, 1)
+ dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
+
+ arr = np.array([b, dm])
+ assert_equal(arr.dtype, np.dtype('O'))
+
+ arr = np.array([b, d])
+ assert_equal(arr.dtype, np.dtype('O'))
+
+ arr = np.array([b, dt])
+ assert_equal(arr.dtype, np.dtype('O'))
+
+ arr = np.array([d, d]).astype('datetime64')
+ assert_equal(arr.dtype, np.dtype('M8[D]'))
+
+ arr = np.array([dt, dt]).astype('datetime64')
+ assert_equal(arr.dtype, np.dtype('M8[us]'))
+
+ @pytest.mark.parametrize("unit", [
+ # test all date / time units and use
+ # "generic" to select generic unit
+ ("Y"), ("M"), ("W"), ("D"), ("h"), ("m"),
+ ("s"), ("ms"), ("us"), ("ns"), ("ps"),
+ ("fs"), ("as"), ("generic") ])
+ def test_timedelta_np_int_construction(self, unit):
+ # regression test for gh-7617
+ if unit != "generic":
+ assert_equal(np.timedelta64(np.int64(123), unit),
+ np.timedelta64(123, unit))
+ else:
+ assert_equal(np.timedelta64(np.int64(123)),
+ np.timedelta64(123))
+
+ def test_timedelta_scalar_construction(self):
+ # Construct with different units
+ assert_equal(np.timedelta64(7, 'D'),
+ np.timedelta64(1, 'W'))
+ assert_equal(np.timedelta64(120, 's'),
+ np.timedelta64(2, 'm'))
+
+ # Default construction means 0
+ assert_equal(np.timedelta64(), np.timedelta64(0))
+
+ # None gets constructed as NaT
+ assert_equal(np.timedelta64(None), np.timedelta64('NaT'))
+
+ # Some basic strings and repr
+ assert_equal(str(np.timedelta64('NaT')), 'NaT')
+ assert_equal(repr(np.timedelta64('NaT')),
+ "numpy.timedelta64('NaT')")
+ assert_equal(str(np.timedelta64(3, 's')), '3 seconds')
+ assert_equal(repr(np.timedelta64(-3, 's')),
+ "numpy.timedelta64(-3,'s')")
+ assert_equal(repr(np.timedelta64(12)),
+ "numpy.timedelta64(12)")
+
+ # Construction from an integer produces generic units
+ assert_equal(np.timedelta64(12).dtype, np.dtype('m8'))
+
+ # When constructing from a scalar or zero-dimensional array,
+ # it either keeps the units or you can override them.
+ a = np.timedelta64(2, 'h')
+ b = np.array(2, dtype='m8[h]')
+
+ assert_equal(a.dtype, np.dtype('m8[h]'))
+ assert_equal(b.dtype, np.dtype('m8[h]'))
+
+ assert_equal(np.timedelta64(a), a)
+ assert_equal(np.timedelta64(a).dtype, np.dtype('m8[h]'))
+
+ assert_equal(np.timedelta64(b), a)
+ assert_equal(np.timedelta64(b).dtype, np.dtype('m8[h]'))
+
+ assert_equal(np.timedelta64(a, 's'), a)
+ assert_equal(np.timedelta64(a, 's').dtype, np.dtype('m8[s]'))
+
+ assert_equal(np.timedelta64(b, 's'), a)
+ assert_equal(np.timedelta64(b, 's').dtype, np.dtype('m8[s]'))
+
+ # Construction from datetime.timedelta
+ assert_equal(np.timedelta64(5, 'D'),
+ np.timedelta64(datetime.timedelta(days=5)))
+ assert_equal(np.timedelta64(102347621, 's'),
+ np.timedelta64(datetime.timedelta(seconds=102347621)))
+ assert_equal(np.timedelta64(-10234760000, 'us'),
+ np.timedelta64(datetime.timedelta(
+ microseconds=-10234760000)))
+ assert_equal(np.timedelta64(10234760000, 'us'),
+ np.timedelta64(datetime.timedelta(
+ microseconds=10234760000)))
+ assert_equal(np.timedelta64(1023476, 'ms'),
+ np.timedelta64(datetime.timedelta(milliseconds=1023476)))
+ assert_equal(np.timedelta64(10, 'm'),
+ np.timedelta64(datetime.timedelta(minutes=10)))
+ assert_equal(np.timedelta64(281, 'h'),
+ np.timedelta64(datetime.timedelta(hours=281)))
+ assert_equal(np.timedelta64(28, 'W'),
+ np.timedelta64(datetime.timedelta(weeks=28)))
+
+ # Cannot construct across nonlinear time unit boundaries
+ a = np.timedelta64(3, 's')
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+ a = np.timedelta64(6, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'D')
+ assert_raises(TypeError, np.timedelta64, a, 'h')
+ a = np.timedelta64(1, 'Y')
+ assert_raises(TypeError, np.timedelta64, a, 'D')
+ assert_raises(TypeError, np.timedelta64, a, 'm')
+ a = datetime.timedelta(seconds=3)
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+ a = datetime.timedelta(weeks=3)
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+ a = datetime.timedelta()
+ assert_raises(TypeError, np.timedelta64, a, 'M')
+ assert_raises(TypeError, np.timedelta64, a, 'Y')
+
+ def test_timedelta_object_array_conversion(self):
+ # Regression test for gh-11096
+ inputs = [datetime.timedelta(28),
+ datetime.timedelta(30),
+ datetime.timedelta(31)]
+ expected = np.array([28, 30, 31], dtype='timedelta64[D]')
+ actual = np.array(inputs, dtype='timedelta64[D]')
+ assert_equal(expected, actual)
+
+ def test_timedelta_0_dim_object_array_conversion(self):
+ # Regression test for gh-11151
+ test = np.array(datetime.timedelta(seconds=20))
+ actual = test.astype(np.timedelta64)
+ # expected value from the array constructor workaround
+ # described in above issue
+ expected = np.array(datetime.timedelta(seconds=20),
+ np.timedelta64)
+ assert_equal(actual, expected)
+
+ def test_timedelta_nat_format(self):
+ # gh-17552
+ assert_equal('NaT', '{0}'.format(np.timedelta64('nat')))
+
+ def test_timedelta_scalar_construction_units(self):
+ # String construction detecting units
+ assert_equal(np.datetime64('2010').dtype,
+ np.dtype('M8[Y]'))
+ assert_equal(np.datetime64('2010-03').dtype,
+ np.dtype('M8[M]'))
+ assert_equal(np.datetime64('2010-03-12').dtype,
+ np.dtype('M8[D]'))
+ assert_equal(np.datetime64('2010-03-12T17').dtype,
+ np.dtype('M8[h]'))
+ assert_equal(np.datetime64('2010-03-12T17:15').dtype,
+ np.dtype('M8[m]'))
+ assert_equal(np.datetime64('2010-03-12T17:15:08').dtype,
+ np.dtype('M8[s]'))
+
+ assert_equal(np.datetime64('2010-03-12T17:15:08.1').dtype,
+ np.dtype('M8[ms]'))
+ assert_equal(np.datetime64('2010-03-12T17:15:08.12').dtype,
+ np.dtype('M8[ms]'))
+ assert_equal(np.datetime64('2010-03-12T17:15:08.123').dtype,
+ np.dtype('M8[ms]'))
+
+ assert_equal(np.datetime64('2010-03-12T17:15:08.1234').dtype,
+ np.dtype('M8[us]'))
+ assert_equal(np.datetime64('2010-03-12T17:15:08.12345').dtype,
+ np.dtype('M8[us]'))
+ assert_equal(np.datetime64('2010-03-12T17:15:08.123456').dtype,
+ np.dtype('M8[us]'))
+
+ assert_equal(np.datetime64('1970-01-01T00:00:02.1234567').dtype,
+ np.dtype('M8[ns]'))
+ assert_equal(np.datetime64('1970-01-01T00:00:02.12345678').dtype,
+ np.dtype('M8[ns]'))
+ assert_equal(np.datetime64('1970-01-01T00:00:02.123456789').dtype,
+ np.dtype('M8[ns]'))
+
+ assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890').dtype,
+ np.dtype('M8[ps]'))
+ assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901').dtype,
+ np.dtype('M8[ps]'))
+ assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012').dtype,
+ np.dtype('M8[ps]'))
+
+ assert_equal(np.datetime64(
+ '1970-01-01T00:00:02.1234567890123').dtype,
+ np.dtype('M8[fs]'))
+ assert_equal(np.datetime64(
+ '1970-01-01T00:00:02.12345678901234').dtype,
+ np.dtype('M8[fs]'))
+ assert_equal(np.datetime64(
+ '1970-01-01T00:00:02.123456789012345').dtype,
+ np.dtype('M8[fs]'))
+
+ assert_equal(np.datetime64(
+ '1970-01-01T00:00:02.1234567890123456').dtype,
+ np.dtype('M8[as]'))
+ assert_equal(np.datetime64(
+ '1970-01-01T00:00:02.12345678901234567').dtype,
+ np.dtype('M8[as]'))
+ assert_equal(np.datetime64(
+ '1970-01-01T00:00:02.123456789012345678').dtype,
+ np.dtype('M8[as]'))
+
+ # Python date object
+ assert_equal(np.datetime64(datetime.date(2010, 4, 16)).dtype,
+ np.dtype('M8[D]'))
+
+ # Python datetime object
+ assert_equal(np.datetime64(
+ datetime.datetime(2010, 4, 16, 13, 45, 18)).dtype,
+ np.dtype('M8[us]'))
+
+ # 'today' special value
+ assert_equal(np.datetime64('today').dtype,
+ np.dtype('M8[D]'))
+
+ # 'now' special value
+ assert_equal(np.datetime64('now').dtype,
+ np.dtype('M8[s]'))
+
+ def test_datetime_nat_casting(self):
+ a = np.array('NaT', dtype='M8[D]')
+ b = np.datetime64('NaT', '[D]')
+
+ # Arrays
+ assert_equal(a.astype('M8[s]'), np.array('NaT', dtype='M8[s]'))
+ assert_equal(a.astype('M8[ms]'), np.array('NaT', dtype='M8[ms]'))
+ assert_equal(a.astype('M8[M]'), np.array('NaT', dtype='M8[M]'))
+ assert_equal(a.astype('M8[Y]'), np.array('NaT', dtype='M8[Y]'))
+ assert_equal(a.astype('M8[W]'), np.array('NaT', dtype='M8[W]'))
+
+ # Scalars -> Scalars
+ assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]'))
+ assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]'))
+ assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]'))
+ assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]'))
+ assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]'))
+
+ # Arrays -> Scalars
+ assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]'))
+ assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]'))
+ assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]'))
+ assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))
+ assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))
+
+ # NaN -> NaT
+ nan = np.array([np.nan] * 8)
+ fnan = nan.astype('f')
+ lnan = nan.astype('g')
+ cnan = nan.astype('D')
+ cfnan = nan.astype('F')
+ clnan = nan.astype('G')
+
+ nat = np.array([np.datetime64('NaT')] * 8)
+ assert_equal(nan.astype('M8[ns]'), nat)
+ assert_equal(fnan.astype('M8[ns]'), nat)
+ assert_equal(lnan.astype('M8[ns]'), nat)
+ assert_equal(cnan.astype('M8[ns]'), nat)
+ assert_equal(cfnan.astype('M8[ns]'), nat)
+ assert_equal(clnan.astype('M8[ns]'), nat)
+
+ nat = np.array([np.timedelta64('NaT')] * 8)
+ assert_equal(nan.astype('timedelta64[ns]'), nat)
+ assert_equal(fnan.astype('timedelta64[ns]'), nat)
+ assert_equal(lnan.astype('timedelta64[ns]'), nat)
+ assert_equal(cnan.astype('timedelta64[ns]'), nat)
+ assert_equal(cfnan.astype('timedelta64[ns]'), nat)
+ assert_equal(clnan.astype('timedelta64[ns]'), nat)
+
+ def test_days_creation(self):
+ assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),
+ (1600-1970)*365 - (1972-1600)/4 + 3 - 365)
+ assert_equal(np.array('1600', dtype='M8[D]').astype('i8'),
+ (1600-1970)*365 - (1972-1600)/4 + 3)
+ assert_equal(np.array('1601', dtype='M8[D]').astype('i8'),
+ (1600-1970)*365 - (1972-1600)/4 + 3 + 366)
+ assert_equal(np.array('1900', dtype='M8[D]').astype('i8'),
+ (1900-1970)*365 - (1970-1900)//4)
+ assert_equal(np.array('1901', dtype='M8[D]').astype('i8'),
+ (1900-1970)*365 - (1970-1900)//4 + 365)
+ assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1)
+ assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1)
+ assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365)
+ assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365)
+ assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365)
+ assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365)
+ assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1)
+ assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1)
+ assert_equal(np.array('2000', dtype='M8[D]').astype('i8'),
+ (2000 - 1970)*365 + (2000 - 1972)//4)
+ assert_equal(np.array('2001', dtype='M8[D]').astype('i8'),
+ (2000 - 1970)*365 + (2000 - 1972)//4 + 366)
+ assert_equal(np.array('2400', dtype='M8[D]').astype('i8'),
+ (2400 - 1970)*365 + (2400 - 1972)//4 - 3)
+ assert_equal(np.array('2401', dtype='M8[D]').astype('i8'),
+ (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366)
+
+ assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'),
+ (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28)
+ assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'),
+ (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29)
+ assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'),
+ (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28)
+ assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'),
+ (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29)
+ assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'),
+ (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21)
+
+ def test_days_to_pydate(self):
+ assert_equal(np.array('1599', dtype='M8[D]').astype('O'),
+ datetime.date(1599, 1, 1))
+ assert_equal(np.array('1600', dtype='M8[D]').astype('O'),
+ datetime.date(1600, 1, 1))
+ assert_equal(np.array('1601', dtype='M8[D]').astype('O'),
+ datetime.date(1601, 1, 1))
+ assert_equal(np.array('1900', dtype='M8[D]').astype('O'),
+ datetime.date(1900, 1, 1))
+ assert_equal(np.array('1901', dtype='M8[D]').astype('O'),
+ datetime.date(1901, 1, 1))
+ assert_equal(np.array('2000', dtype='M8[D]').astype('O'),
+ datetime.date(2000, 1, 1))
+ assert_equal(np.array('2001', dtype='M8[D]').astype('O'),
+ datetime.date(2001, 1, 1))
+ assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'),
+ datetime.date(1600, 2, 29))
+ assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'),
+ datetime.date(1600, 3, 1))
+ assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'),
+ datetime.date(2001, 3, 22))
+
+ def test_dtype_comparison(self):
+ assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]')))
+ assert_(np.dtype('M8[us]') != np.dtype('M8[ms]'))
+ assert_(np.dtype('M8[2D]') != np.dtype('M8[D]'))
+ assert_(np.dtype('M8[D]') != np.dtype('M8[2D]'))
+
+ def test_pydatetime_creation(self):
+ a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]')
+ assert_equal(a[0], a[1])
+ a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]')
+ assert_equal(a[0], a[1])
+ a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]')
+ assert_equal(a[0], a[1])
+ # Will fail if the date changes during the exact right moment
+ a = np.array(['today', datetime.date.today()], dtype='M8[D]')
+ assert_equal(a[0], a[1])
+ # datetime.datetime.now() returns local time, not UTC
+ #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]')
+ #assert_equal(a[0], a[1])
+
+ # we can give a datetime.date time units
+ assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'),
+ np.array(np.datetime64('1960-03-12T00:00:00')))
+
+ def test_datetime_string_conversion(self):
+ a = ['2011-03-16', '1920-01-01', '2013-05-19']
+ str_a = np.array(a, dtype='S')
+ uni_a = np.array(a, dtype='U')
+ dt_a = np.array(a, dtype='M')
+
+ # String to datetime
+ assert_equal(dt_a, str_a.astype('M'))
+ assert_equal(dt_a.dtype, str_a.astype('M').dtype)
+ dt_b = np.empty_like(dt_a)
+ dt_b[...] = str_a
+ assert_equal(dt_a, dt_b)
+
+ # Datetime to string
+ assert_equal(str_a, dt_a.astype('S0'))
+ str_b = np.empty_like(str_a)
+ str_b[...] = dt_a
+ assert_equal(str_a, str_b)
+
+ # Unicode to datetime
+ assert_equal(dt_a, uni_a.astype('M'))
+ assert_equal(dt_a.dtype, uni_a.astype('M').dtype)
+ dt_b = np.empty_like(dt_a)
+ dt_b[...] = uni_a
+ assert_equal(dt_a, dt_b)
+
+ # Datetime to unicode
+ assert_equal(uni_a, dt_a.astype('U'))
+ uni_b = np.empty_like(uni_a)
+ uni_b[...] = dt_a
+ assert_equal(uni_a, uni_b)
+
+ # Datetime to long string - gh-9712
+ assert_equal(str_a, dt_a.astype((np.string_, 128)))
+ str_b = np.empty(str_a.shape, dtype=(np.string_, 128))
+ str_b[...] = dt_a
+ assert_equal(str_a, str_b)
+
+ @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"])
+ def test_time_byteswapping(self, time_dtype):
+ times = np.array(["2017", "NaT"], dtype=time_dtype)
+ times_swapped = times.astype(times.dtype.newbyteorder())
+ assert_array_equal(times, times_swapped)
+
+ unswapped = times_swapped.view(np.int64).newbyteorder()
+ assert_array_equal(unswapped, times.view(np.int64))
+
+ @pytest.mark.parametrize(["time1", "time2"],
+ [("M8[s]", "M8[D]"), ("m8[s]", "m8[ns]")])
+ def test_time_byteswapped_cast(self, time1, time2):
+ dtype1 = np.dtype(time1)
+ dtype2 = np.dtype(time2)
+ times = np.array(["2017", "NaT"], dtype=dtype1)
+ expected = times.astype(dtype2)
+
+ # Test that every byte-swapping combination also returns the same
+ # results (previous tests check that this comparison works fine).
+ res = times.astype(dtype1.newbyteorder()).astype(dtype2)
+ assert_array_equal(res, expected)
+ res = times.astype(dtype2.newbyteorder())
+ assert_array_equal(res, expected)
+ res = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder())
+ assert_array_equal(res, expected)
+
+ @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"])
+ @pytest.mark.parametrize("str_dtype", ["U", "S"])
+ def test_datetime_conversions_byteorders(self, str_dtype, time_dtype):
+ times = np.array(["2017", "NaT"], dtype=time_dtype)
+ # Unfortunately, timedelta does not roundtrip:
+ from_strings = np.array(["2017", "NaT"], dtype=str_dtype)
+ to_strings = times.astype(str_dtype) # assume this is correct
+
+ # Check that conversion from times to string works if src is swapped:
+ times_swapped = times.astype(times.dtype.newbyteorder())
+ res = times_swapped.astype(str_dtype)
+ assert_array_equal(res, to_strings)
+ # And also if both are swapped:
+ res = times_swapped.astype(to_strings.dtype.newbyteorder())
+ assert_array_equal(res, to_strings)
+ # only destination is swapped:
+ res = times.astype(to_strings.dtype.newbyteorder())
+ assert_array_equal(res, to_strings)
+
+ # Check that conversion from string to times works if src is swapped:
+ from_strings_swapped = from_strings.astype(
+ from_strings.dtype.newbyteorder())
+ res = from_strings_swapped.astype(time_dtype)
+ assert_array_equal(res, times)
+ # And if both are swapped:
+ res = from_strings_swapped.astype(times.dtype.newbyteorder())
+ assert_array_equal(res, times)
+ # Only destination is swapped:
+ res = from_strings.astype(times.dtype.newbyteorder())
+ assert_array_equal(res, times)
+
+ def test_datetime_array_str(self):
+ a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M')
+ assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']")
+
+ a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M')
+ assert_equal(np.array2string(a, separator=', ',
+ formatter={'datetime': lambda x:
+ "'%s'" % np.datetime_as_string(x, timezone='UTC')}),
+ "['2011-03-16T13:55Z', '1920-01-01T03:12Z']")
+
+ # Check that one NaT doesn't corrupt subsequent entries
+ a = np.array(['2010', 'NaT', '2030']).astype('M')
+ assert_equal(str(a), "['2010' 'NaT' '2030']")
+
+ def test_timedelta_array_str(self):
+ a = np.array([-1, 0, 100], dtype='m')
+ assert_equal(str(a), "[ -1 0 100]")
+ a = np.array(['NaT', 'NaT'], dtype='m')
+ assert_equal(str(a), "['NaT' 'NaT']")
+ # Check right-alignment with NaTs
+ a = np.array([-1, 'NaT', 0], dtype='m')
+ assert_equal(str(a), "[ -1 'NaT' 0]")
+ a = np.array([-1, 'NaT', 1234567], dtype='m')
+ assert_equal(str(a), "[ -1 'NaT' 1234567]")
+
+ # Test with other byteorder:
+ a = np.array([-1, 'NaT', 1234567], dtype='>m')
+ assert_equal(str(a), "[ -1 'NaT' 1234567]")
+ a = np.array([-1, 'NaT', 1234567], dtype='<m')
+ assert_equal(str(a), "[ -1 'NaT' 1234567]")
+
+ def test_pickle(self):
+ # Check that pickle roundtripping works
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ dt = np.dtype('M8[7D]')
+ assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
+ dt = np.dtype('M8[W]')
+ assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
+ scalar = np.datetime64('2016-01-01T00:00:00.000000000')
+ assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)),
+ scalar)
+ delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')
+ assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)),
+ delta)
+
+ # Check that loading pickles from 1.6 works
+ pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
+ b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \
+ b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb."
+ assert_equal(pickle.loads(pkl), np.dtype('<M8[7D]'))
+ pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
+ b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'W'\np6\n" + \
+ b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
+ assert_equal(pickle.loads(pkl), np.dtype('<M8[W]'))
+ pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
+ b"(I4\nS'>'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \
+ b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
+ assert_equal(pickle.loads(pkl), np.dtype('>M8[us]'))
+
+ def test_setstate(self):
+ "Verify that datetime dtype __setstate__ can handle bad arguments"
+ dt = np.dtype('>M8[us]')
+ assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))
+ assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
+ assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
+ assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
+
+ def test_dtype_promotion(self):
+ # datetime <op> datetime computes the metadata gcd
+ # timedelta <op> timedelta computes the metadata gcd
+ for mM in ['m', 'M']:
+ assert_equal(
+ np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')),
+ np.dtype(mM+'8[2Y]'))
+ assert_equal(
+ np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')),
+ np.dtype(mM+'8[3Y]'))
+ assert_equal(
+ np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')),
+ np.dtype(mM+'8[2M]'))
+ assert_equal(
+ np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')),
+ np.dtype(mM+'8[1D]'))
+ assert_equal(
+ np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')),
+ np.dtype(mM+'8[s]'))
+ assert_equal(
+ np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')),
+ np.dtype(mM+'8[7s]'))
+ # timedelta <op> timedelta raises when there is no reasonable gcd
+ assert_raises(TypeError, np.promote_types,
+ np.dtype('m8[Y]'), np.dtype('m8[D]'))
+ assert_raises(TypeError, np.promote_types,
+ np.dtype('m8[M]'), np.dtype('m8[W]'))
+ # timedelta and float cannot be safely cast with each other
+ assert_raises(TypeError, np.promote_types, "float32", "m8")
+ assert_raises(TypeError, np.promote_types, "m8", "float32")
+ assert_raises(TypeError, np.promote_types, "uint64", "m8")
+ assert_raises(TypeError, np.promote_types, "m8", "uint64")
+
+ # timedelta <op> timedelta may overflow with big unit ranges
+ assert_raises(OverflowError, np.promote_types,
+ np.dtype('m8[W]'), np.dtype('m8[fs]'))
+ assert_raises(OverflowError, np.promote_types,
+ np.dtype('m8[s]'), np.dtype('m8[as]'))
+
+ def test_cast_overflow(self):
+ # gh-4486
+ def cast():
+ numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("<M8[D]")
+ assert_raises(OverflowError, cast)
+
+ def cast2():
+ numpy.datetime64("2014").astype("<M8[fs]")
+ assert_raises(OverflowError, cast2)
+
+ def test_pyobject_roundtrip(self):
+ # All datetime types should be able to roundtrip through object
+ a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0,
+ -1020040340, -2942398, -1, 0, 1, 234523453, 1199164176],
+ dtype=np.int64)
+ # With date units
+ for unit in ['M8[D]', 'M8[W]', 'M8[M]', 'M8[Y]']:
+ b = a.copy().view(dtype=unit)
+ b[0] = '-0001-01-01'
+ b[1] = '-0001-12-31'
+ b[2] = '0000-01-01'
+ b[3] = '0001-01-01'
+ b[4] = '1969-12-31'
+ b[5] = '1970-01-01'
+ b[6] = '9999-12-31'
+ b[7] = '10000-01-01'
+ b[8] = 'NaT'
+
+ assert_equal(b.astype(object).astype(unit), b,
+ "Error roundtripping unit %s" % unit)
+ # With time units
+ for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]',
+ 'M8[300as]', 'M8[20us]']:
+ b = a.copy().view(dtype=unit)
+ b[0] = '-0001-01-01T00'
+ b[1] = '-0001-12-31T00'
+ b[2] = '0000-01-01T00'
+ b[3] = '0001-01-01T00'
+ b[4] = '1969-12-31T23:59:59.999999'
+ b[5] = '1970-01-01T00'
+ b[6] = '9999-12-31T23:59:59.999999'
+ b[7] = '10000-01-01T00'
+ b[8] = 'NaT'
+
+ assert_equal(b.astype(object).astype(unit), b,
+ "Error roundtripping unit %s" % unit)
+
+ def test_month_truncation(self):
+ # Make sure that months are truncating correctly
+ assert_equal(np.array('1945-03-01', dtype='M8[M]'),
+ np.array('1945-03-31', dtype='M8[M]'))
+ assert_equal(np.array('1969-11-01', dtype='M8[M]'),
+ np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]'))
+ assert_equal(np.array('1969-12-01', dtype='M8[M]'),
+ np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]'))
+ assert_equal(np.array('1970-01-01', dtype='M8[M]'),
+ np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]'))
+ assert_equal(np.array('1980-02-01', dtype='M8[M]'),
+ np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]'))
+
+ def test_different_unit_comparison(self):
+ # Check some years with date units
+ for unit1 in ['Y', 'M', 'D']:
+ dt1 = np.dtype('M8[%s]' % unit1)
+ for unit2 in ['Y', 'M', 'D']:
+ dt2 = np.dtype('M8[%s]' % unit2)
+ assert_equal(np.array('1945', dtype=dt1),
+ np.array('1945', dtype=dt2))
+ assert_equal(np.array('1970', dtype=dt1),
+ np.array('1970', dtype=dt2))
+ assert_equal(np.array('9999', dtype=dt1),
+ np.array('9999', dtype=dt2))
+ assert_equal(np.array('10000', dtype=dt1),
+ np.array('10000-01-01', dtype=dt2))
+ assert_equal(np.datetime64('1945', unit1),
+ np.datetime64('1945', unit2))
+ assert_equal(np.datetime64('1970', unit1),
+ np.datetime64('1970', unit2))
+ assert_equal(np.datetime64('9999', unit1),
+ np.datetime64('9999', unit2))
+ assert_equal(np.datetime64('10000', unit1),
+ np.datetime64('10000-01-01', unit2))
+ # Check some datetimes with time units
+ for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']:
+ dt1 = np.dtype('M8[%s]' % unit1)
+ for unit2 in ['h', 'm', 's', 'ms', 'us']:
+ dt2 = np.dtype('M8[%s]' % unit2)
+ assert_equal(np.array('1945-03-12T18', dtype=dt1),
+ np.array('1945-03-12T18', dtype=dt2))
+ assert_equal(np.array('1970-03-12T18', dtype=dt1),
+ np.array('1970-03-12T18', dtype=dt2))
+ assert_equal(np.array('9999-03-12T18', dtype=dt1),
+ np.array('9999-03-12T18', dtype=dt2))
+ assert_equal(np.array('10000-01-01T00', dtype=dt1),
+ np.array('10000-01-01T00', dtype=dt2))
+ assert_equal(np.datetime64('1945-03-12T18', unit1),
+ np.datetime64('1945-03-12T18', unit2))
+ assert_equal(np.datetime64('1970-03-12T18', unit1),
+ np.datetime64('1970-03-12T18', unit2))
+ assert_equal(np.datetime64('9999-03-12T18', unit1),
+ np.datetime64('9999-03-12T18', unit2))
+ assert_equal(np.datetime64('10000-01-01T00', unit1),
+ np.datetime64('10000-01-01T00', unit2))
+ # Check some days with units that won't overflow
+ for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']:
+ dt1 = np.dtype('M8[%s]' % unit1)
+ for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']:
+ dt2 = np.dtype('M8[%s]' % unit2)
+ assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1),
+ np.array('1932-02-17T00:00:00', dtype='M').astype(dt2),
+ casting='unsafe'))
+ assert_(np.equal(np.array('10000-04-27', dtype='M').astype(dt1),
+ np.array('10000-04-27T00:00:00', dtype='M').astype(dt2),
+ casting='unsafe'))
+
+ # Shouldn't be able to compare datetime and timedelta
+ # TODO: Changing to 'same_kind' or 'safe' casting in the ufuncs by
+ # default is needed to properly catch this kind of thing...
+ a = np.array('2012-12-21', dtype='M8[D]')
+ b = np.array(3, dtype='m8[D]')
+ #assert_raises(TypeError, np.less, a, b)
+ assert_raises(TypeError, np.less, a, b, casting='same_kind')
+
+ def test_datetime_like(self):
+ a = np.array([3], dtype='m8[4D]')
+ b = np.array(['2012-12-21'], dtype='M8[D]')
+
+ assert_equal(np.ones_like(a).dtype, a.dtype)
+ assert_equal(np.zeros_like(a).dtype, a.dtype)
+ assert_equal(np.empty_like(a).dtype, a.dtype)
+ assert_equal(np.ones_like(b).dtype, b.dtype)
+ assert_equal(np.zeros_like(b).dtype, b.dtype)
+ assert_equal(np.empty_like(b).dtype, b.dtype)
+
+ def test_datetime_unary(self):
+ for tda, tdb, tdzero, tdone, tdmone in \
+ [
+ # One-dimensional arrays
+ (np.array([3], dtype='m8[D]'),
+ np.array([-3], dtype='m8[D]'),
+ np.array([0], dtype='m8[D]'),
+ np.array([1], dtype='m8[D]'),
+ np.array([-1], dtype='m8[D]')),
+ # NumPy scalars
+ (np.timedelta64(3, '[D]'),
+ np.timedelta64(-3, '[D]'),
+ np.timedelta64(0, '[D]'),
+ np.timedelta64(1, '[D]'),
+ np.timedelta64(-1, '[D]'))]:
+ # negative ufunc
+ assert_equal(-tdb, tda)
+ assert_equal((-tdb).dtype, tda.dtype)
+ assert_equal(np.negative(tdb), tda)
+ assert_equal(np.negative(tdb).dtype, tda.dtype)
+
+ # positive ufunc
+ assert_equal(np.positive(tda), tda)
+ assert_equal(np.positive(tda).dtype, tda.dtype)
+ assert_equal(np.positive(tdb), tdb)
+ assert_equal(np.positive(tdb).dtype, tdb.dtype)
+
+ # absolute ufunc
+ assert_equal(np.absolute(tdb), tda)
+ assert_equal(np.absolute(tdb).dtype, tda.dtype)
+
+ # sign ufunc
+ assert_equal(np.sign(tda), tdone)
+ assert_equal(np.sign(tdb), tdmone)
+ assert_equal(np.sign(tdzero), tdzero)
+ assert_equal(np.sign(tda).dtype, tda.dtype)
+
+ # The ufuncs always produce native-endian results
+ assert_
+
+ def test_datetime_add(self):
+ for dta, dtb, dtc, dtnat, tda, tdb, tdc in \
+ [
+ # One-dimensional arrays
+ (np.array(['2012-12-21'], dtype='M8[D]'),
+ np.array(['2012-12-24'], dtype='M8[D]'),
+ np.array(['2012-12-21T11'], dtype='M8[h]'),
+ np.array(['NaT'], dtype='M8[D]'),
+ np.array([3], dtype='m8[D]'),
+ np.array([11], dtype='m8[h]'),
+ np.array([3*24 + 11], dtype='m8[h]')),
+ # NumPy scalars
+ (np.datetime64('2012-12-21', '[D]'),
+ np.datetime64('2012-12-24', '[D]'),
+ np.datetime64('2012-12-21T11', '[h]'),
+ np.datetime64('NaT', '[D]'),
+ np.timedelta64(3, '[D]'),
+ np.timedelta64(11, '[h]'),
+ np.timedelta64(3*24 + 11, '[h]'))]:
+ # m8 + m8
+ assert_equal(tda + tdb, tdc)
+ assert_equal((tda + tdb).dtype, np.dtype('m8[h]'))
+ # m8 + bool
+ assert_equal(tdb + True, tdb + 1)
+ assert_equal((tdb + True).dtype, np.dtype('m8[h]'))
+ # m8 + int
+ assert_equal(tdb + 3*24, tdc)
+ assert_equal((tdb + 3*24).dtype, np.dtype('m8[h]'))
+ # bool + m8
+ assert_equal(False + tdb, tdb)
+ assert_equal((False + tdb).dtype, np.dtype('m8[h]'))
+ # int + m8
+ assert_equal(3*24 + tdb, tdc)
+ assert_equal((3*24 + tdb).dtype, np.dtype('m8[h]'))
+ # M8 + bool
+ assert_equal(dta + True, dta + 1)
+ assert_equal(dtnat + True, dtnat)
+ assert_equal((dta + True).dtype, np.dtype('M8[D]'))
+ # M8 + int
+ assert_equal(dta + 3, dtb)
+ assert_equal(dtnat + 3, dtnat)
+ assert_equal((dta + 3).dtype, np.dtype('M8[D]'))
+ # bool + M8
+ assert_equal(False + dta, dta)
+ assert_equal(False + dtnat, dtnat)
+ assert_equal((False + dta).dtype, np.dtype('M8[D]'))
+ # int + M8
+ assert_equal(3 + dta, dtb)
+ assert_equal(3 + dtnat, dtnat)
+ assert_equal((3 + dta).dtype, np.dtype('M8[D]'))
+ # M8 + m8
+ assert_equal(dta + tda, dtb)
+ assert_equal(dtnat + tda, dtnat)
+ assert_equal((dta + tda).dtype, np.dtype('M8[D]'))
+ # m8 + M8
+ assert_equal(tda + dta, dtb)
+ assert_equal(tda + dtnat, dtnat)
+ assert_equal((tda + dta).dtype, np.dtype('M8[D]'))
+
+ # In M8 + m8, the result goes to higher precision
+ assert_equal(np.add(dta, tdb, casting='unsafe'), dtc)
+ assert_equal(np.add(dta, tdb, casting='unsafe').dtype,
+ np.dtype('M8[h]'))
+ assert_equal(np.add(tdb, dta, casting='unsafe'), dtc)
+ assert_equal(np.add(tdb, dta, casting='unsafe').dtype,
+ np.dtype('M8[h]'))
+
+ # M8 + M8
+ assert_raises(TypeError, np.add, dta, dtb)
+
+ def test_datetime_subtract(self):
+ for dta, dtb, dtc, dtd, dte, dtnat, tda, tdb, tdc in \
+ [
+ # One-dimensional arrays
+ (np.array(['2012-12-21'], dtype='M8[D]'),
+ np.array(['2012-12-24'], dtype='M8[D]'),
+ np.array(['1940-12-24'], dtype='M8[D]'),
+ np.array(['1940-12-24T00'], dtype='M8[h]'),
+ np.array(['1940-12-23T13'], dtype='M8[h]'),
+ np.array(['NaT'], dtype='M8[D]'),
+ np.array([3], dtype='m8[D]'),
+ np.array([11], dtype='m8[h]'),
+ np.array([3*24 - 11], dtype='m8[h]')),
+ # NumPy scalars
+ (np.datetime64('2012-12-21', '[D]'),
+ np.datetime64('2012-12-24', '[D]'),
+ np.datetime64('1940-12-24', '[D]'),
+ np.datetime64('1940-12-24T00', '[h]'),
+ np.datetime64('1940-12-23T13', '[h]'),
+ np.datetime64('NaT', '[D]'),
+ np.timedelta64(3, '[D]'),
+ np.timedelta64(11, '[h]'),
+ np.timedelta64(3*24 - 11, '[h]'))]:
+ # m8 - m8
+ assert_equal(tda - tdb, tdc)
+ assert_equal((tda - tdb).dtype, np.dtype('m8[h]'))
+ assert_equal(tdb - tda, -tdc)
+ assert_equal((tdb - tda).dtype, np.dtype('m8[h]'))
+ # m8 - bool
+ assert_equal(tdc - True, tdc - 1)
+ assert_equal((tdc - True).dtype, np.dtype('m8[h]'))
+ # m8 - int
+ assert_equal(tdc - 3*24, -tdb)
+ assert_equal((tdc - 3*24).dtype, np.dtype('m8[h]'))
+ # int - m8
+ assert_equal(False - tdb, -tdb)
+ assert_equal((False - tdb).dtype, np.dtype('m8[h]'))
+ # int - m8
+ assert_equal(3*24 - tdb, tdc)
+ assert_equal((3*24 - tdb).dtype, np.dtype('m8[h]'))
+ # M8 - bool
+ assert_equal(dtb - True, dtb - 1)
+ assert_equal(dtnat - True, dtnat)
+ assert_equal((dtb - True).dtype, np.dtype('M8[D]'))
+ # M8 - int
+ assert_equal(dtb - 3, dta)
+ assert_equal(dtnat - 3, dtnat)
+ assert_equal((dtb - 3).dtype, np.dtype('M8[D]'))
+ # M8 - m8
+ assert_equal(dtb - tda, dta)
+ assert_equal(dtnat - tda, dtnat)
+ assert_equal((dtb - tda).dtype, np.dtype('M8[D]'))
+
+ # In M8 - m8, the result goes to higher precision
+ assert_equal(np.subtract(dtc, tdb, casting='unsafe'), dte)
+ assert_equal(np.subtract(dtc, tdb, casting='unsafe').dtype,
+ np.dtype('M8[h]'))
+
+ # M8 - M8 with different goes to higher precision
+ assert_equal(np.subtract(dtc, dtd, casting='unsafe'),
+ np.timedelta64(0, 'h'))
+ assert_equal(np.subtract(dtc, dtd, casting='unsafe').dtype,
+ np.dtype('m8[h]'))
+ assert_equal(np.subtract(dtd, dtc, casting='unsafe'),
+ np.timedelta64(0, 'h'))
+ assert_equal(np.subtract(dtd, dtc, casting='unsafe').dtype,
+ np.dtype('m8[h]'))
+
+ # m8 - M8
+ assert_raises(TypeError, np.subtract, tda, dta)
+ # bool - M8
+ assert_raises(TypeError, np.subtract, False, dta)
+ # int - M8
+ assert_raises(TypeError, np.subtract, 3, dta)
+
+ def test_datetime_multiply(self):
+ for dta, tda, tdb, tdc in \
+ [
+ # One-dimensional arrays
+ (np.array(['2012-12-21'], dtype='M8[D]'),
+ np.array([6], dtype='m8[h]'),
+ np.array([9], dtype='m8[h]'),
+ np.array([12], dtype='m8[h]')),
+ # NumPy scalars
+ (np.datetime64('2012-12-21', '[D]'),
+ np.timedelta64(6, '[h]'),
+ np.timedelta64(9, '[h]'),
+ np.timedelta64(12, '[h]'))]:
+ # m8 * int
+ assert_equal(tda * 2, tdc)
+ assert_equal((tda * 2).dtype, np.dtype('m8[h]'))
+ # int * m8
+ assert_equal(2 * tda, tdc)
+ assert_equal((2 * tda).dtype, np.dtype('m8[h]'))
+ # m8 * float
+ assert_equal(tda * 1.5, tdb)
+ assert_equal((tda * 1.5).dtype, np.dtype('m8[h]'))
+ # float * m8
+ assert_equal(1.5 * tda, tdb)
+ assert_equal((1.5 * tda).dtype, np.dtype('m8[h]'))
+
+ # m8 * m8
+ assert_raises(TypeError, np.multiply, tda, tdb)
+ # m8 * M8
+ assert_raises(TypeError, np.multiply, dta, tda)
+ # M8 * m8
+ assert_raises(TypeError, np.multiply, tda, dta)
+ # M8 * int
+ assert_raises(TypeError, np.multiply, dta, 2)
+ # int * M8
+ assert_raises(TypeError, np.multiply, 2, dta)
+ # M8 * float
+ assert_raises(TypeError, np.multiply, dta, 1.5)
+ # float * M8
+ assert_raises(TypeError, np.multiply, 1.5, dta)
+
+ # NaTs
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "invalid value encountered in multiply")
+ nat = np.timedelta64('NaT')
+ def check(a, b, res):
+ assert_equal(a * b, res)
+ assert_equal(b * a, res)
+ for tp in (int, float):
+ check(nat, tp(2), nat)
+ check(nat, tp(0), nat)
+ for f in (float('inf'), float('nan')):
+ check(np.timedelta64(1), f, nat)
+ check(np.timedelta64(0), f, nat)
+ check(nat, f, nat)
+
+ @pytest.mark.parametrize("op1, op2, exp", [
+ # m8 same units round down
+ (np.timedelta64(7, 's'),
+ np.timedelta64(4, 's'),
+ 1),
+ # m8 same units round down with negative
+ (np.timedelta64(7, 's'),
+ np.timedelta64(-4, 's'),
+ -2),
+ # m8 same units negative no round down
+ (np.timedelta64(8, 's'),
+ np.timedelta64(-4, 's'),
+ -2),
+ # m8 different units
+ (np.timedelta64(1, 'm'),
+ np.timedelta64(31, 's'),
+ 1),
+ # m8 generic units
+ (np.timedelta64(1890),
+ np.timedelta64(31),
+ 60),
+ # Y // M works
+ (np.timedelta64(2, 'Y'),
+ np.timedelta64('13', 'M'),
+ 1),
+ # handle 1D arrays
+ (np.array([1, 2, 3], dtype='m8'),
+ np.array([2], dtype='m8'),
+ np.array([0, 1, 1], dtype=np.int64)),
+ ])
+ def test_timedelta_floor_divide(self, op1, op2, exp):
+ assert_equal(op1 // op2, exp)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ @pytest.mark.parametrize("op1, op2", [
+ # div by 0
+ (np.timedelta64(10, 'us'),
+ np.timedelta64(0, 'us')),
+ # div with NaT
+ (np.timedelta64('NaT'),
+ np.timedelta64(50, 'us')),
+ # special case for int64 min
+ # in integer floor division
+ (np.timedelta64(np.iinfo(np.int64).min),
+ np.timedelta64(-1)),
+ ])
+ def test_timedelta_floor_div_warnings(self, op1, op2):
+ with assert_warns(RuntimeWarning):
+ actual = op1 // op2
+ assert_equal(actual, 0)
+ assert_equal(actual.dtype, np.int64)
+
+ @pytest.mark.parametrize("val1, val2", [
+ # the smallest integer that can't be represented
+ # exactly in a double should be preserved if we avoid
+ # casting to double in floordiv operation
+ (9007199254740993, 1),
+ # stress the alternate floordiv code path where
+ # operand signs don't match and remainder isn't 0
+ (9007199254740999, -2),
+ ])
+ def test_timedelta_floor_div_precision(self, val1, val2):
+ op1 = np.timedelta64(val1)
+ op2 = np.timedelta64(val2)
+ actual = op1 // op2
+ # Python reference integer floor
+ expected = val1 // val2
+ assert_equal(actual, expected)
+
+ @pytest.mark.parametrize("val1, val2", [
+ # years and months sometimes can't be unambiguously
+ # divided for floor division operation
+ (np.timedelta64(7, 'Y'),
+ np.timedelta64(3, 's')),
+ (np.timedelta64(7, 'M'),
+ np.timedelta64(1, 'D')),
+ ])
+ def test_timedelta_floor_div_error(self, val1, val2):
+ with assert_raises_regex(TypeError, "common metadata divisor"):
+ val1 // val2
+
+ @pytest.mark.parametrize("op1, op2", [
+ # reuse the test cases from floordiv
+ (np.timedelta64(7, 's'),
+ np.timedelta64(4, 's')),
+ # m8 same units round down with negative
+ (np.timedelta64(7, 's'),
+ np.timedelta64(-4, 's')),
+ # m8 same units negative no round down
+ (np.timedelta64(8, 's'),
+ np.timedelta64(-4, 's')),
+ # m8 different units
+ (np.timedelta64(1, 'm'),
+ np.timedelta64(31, 's')),
+ # m8 generic units
+ (np.timedelta64(1890),
+ np.timedelta64(31)),
+ # Y // M works
+ (np.timedelta64(2, 'Y'),
+ np.timedelta64('13', 'M')),
+ # handle 1D arrays
+ (np.array([1, 2, 3], dtype='m8'),
+ np.array([2], dtype='m8')),
+ ])
+ def test_timedelta_divmod(self, op1, op2):
+ expected = (op1 // op2, op1 % op2)
+ assert_equal(divmod(op1, op2), expected)
+
+ @pytest.mark.skipif(IS_WASM, reason="does not work in wasm")
+ @pytest.mark.parametrize("op1, op2", [
+ # reuse cases from floordiv
+ # div by 0
+ (np.timedelta64(10, 'us'),
+ np.timedelta64(0, 'us')),
+ # div with NaT
+ (np.timedelta64('NaT'),
+ np.timedelta64(50, 'us')),
+ # special case for int64 min
+ # in integer floor division
+ (np.timedelta64(np.iinfo(np.int64).min),
+ np.timedelta64(-1)),
+ ])
+ def test_timedelta_divmod_warnings(self, op1, op2):
+ with assert_warns(RuntimeWarning):
+ expected = (op1 // op2, op1 % op2)
+ with assert_warns(RuntimeWarning):
+ actual = divmod(op1, op2)
+ assert_equal(actual, expected)
+
+ def test_datetime_divide(self):
+ for dta, tda, tdb, tdc, tdd in \
+ [
+ # One-dimensional arrays
+ (np.array(['2012-12-21'], dtype='M8[D]'),
+ np.array([6], dtype='m8[h]'),
+ np.array([9], dtype='m8[h]'),
+ np.array([12], dtype='m8[h]'),
+ np.array([6], dtype='m8[m]')),
+ # NumPy scalars
+ (np.datetime64('2012-12-21', '[D]'),
+ np.timedelta64(6, '[h]'),
+ np.timedelta64(9, '[h]'),
+ np.timedelta64(12, '[h]'),
+ np.timedelta64(6, '[m]'))]:
+ # m8 / int
+ assert_equal(tdc / 2, tda)
+ assert_equal((tdc / 2).dtype, np.dtype('m8[h]'))
+ # m8 / float
+ assert_equal(tda / 0.5, tdc)
+ assert_equal((tda / 0.5).dtype, np.dtype('m8[h]'))
+ # m8 / m8
+ assert_equal(tda / tdb, 6 / 9)
+ assert_equal(np.divide(tda, tdb), 6 / 9)
+ assert_equal(np.true_divide(tda, tdb), 6 / 9)
+ assert_equal(tdb / tda, 9 / 6)
+ assert_equal((tda / tdb).dtype, np.dtype('f8'))
+ assert_equal(tda / tdd, 60)
+ assert_equal(tdd / tda, 1 / 60)
+
+ # int / m8
+ assert_raises(TypeError, np.divide, 2, tdb)
+ # float / m8
+ assert_raises(TypeError, np.divide, 0.5, tdb)
+ # m8 / M8
+ assert_raises(TypeError, np.divide, dta, tda)
+ # M8 / m8
+ assert_raises(TypeError, np.divide, tda, dta)
+ # M8 / int
+ assert_raises(TypeError, np.divide, dta, 2)
+ # int / M8
+ assert_raises(TypeError, np.divide, 2, dta)
+ # M8 / float
+ assert_raises(TypeError, np.divide, dta, 1.5)
+ # float / M8
+ assert_raises(TypeError, np.divide, 1.5, dta)
+
+ # NaTs
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, r".*encountered in divide")
+ nat = np.timedelta64('NaT')
+ for tp in (int, float):
+ assert_equal(np.timedelta64(1) / tp(0), nat)
+ assert_equal(np.timedelta64(0) / tp(0), nat)
+ assert_equal(nat / tp(0), nat)
+ assert_equal(nat / tp(2), nat)
+ # Division by inf
+ assert_equal(np.timedelta64(1) / float('inf'), np.timedelta64(0))
+ assert_equal(np.timedelta64(0) / float('inf'), np.timedelta64(0))
+ assert_equal(nat / float('inf'), nat)
+ # Division by nan
+ assert_equal(np.timedelta64(1) / float('nan'), nat)
+ assert_equal(np.timedelta64(0) / float('nan'), nat)
+ assert_equal(nat / float('nan'), nat)
+
+ def test_datetime_compare(self):
+ # Test all the comparison operators
+ a = np.datetime64('2000-03-12T18:00:00.000000')
+ b = np.array(['2000-03-12T18:00:00.000000',
+ '2000-03-12T17:59:59.999999',
+ '2000-03-12T18:00:00.000001',
+ '1970-01-11T12:00:00.909090',
+ '2016-01-11T12:00:00.909090'],
+ dtype='datetime64[us]')
+ assert_equal(np.equal(a, b), [1, 0, 0, 0, 0])
+ assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1])
+ assert_equal(np.less(a, b), [0, 0, 1, 0, 1])
+ assert_equal(np.less_equal(a, b), [1, 0, 1, 0, 1])
+ assert_equal(np.greater(a, b), [0, 1, 0, 1, 0])
+ assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0])
+
+ def test_datetime_compare_nat(self):
+ dt_nat = np.datetime64('NaT', 'D')
+ dt_other = np.datetime64('2000-01-01')
+ td_nat = np.timedelta64('NaT', 'h')
+ td_other = np.timedelta64(1, 'h')
+
+ for op in [np.equal, np.less, np.less_equal,
+ np.greater, np.greater_equal]:
+ assert_(not op(dt_nat, dt_nat))
+ assert_(not op(dt_nat, dt_other))
+ assert_(not op(dt_other, dt_nat))
+
+ assert_(not op(td_nat, td_nat))
+ assert_(not op(td_nat, td_other))
+ assert_(not op(td_other, td_nat))
+
+ assert_(np.not_equal(dt_nat, dt_nat))
+ assert_(np.not_equal(dt_nat, dt_other))
+ assert_(np.not_equal(dt_other, dt_nat))
+
+ assert_(np.not_equal(td_nat, td_nat))
+ assert_(np.not_equal(td_nat, td_other))
+ assert_(np.not_equal(td_other, td_nat))
+
+ def test_datetime_minmax(self):
+ # The metadata of the result should become the GCD
+ # of the operand metadata
+ a = np.array('1999-03-12T13', dtype='M8[2m]')
+ b = np.array('1999-03-12T12', dtype='M8[s]')
+ assert_equal(np.minimum(a, b), b)
+ assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]'))
+ assert_equal(np.fmin(a, b), b)
+ assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]'))
+ assert_equal(np.maximum(a, b), a)
+ assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]'))
+ assert_equal(np.fmax(a, b), a)
+ assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]'))
+ # Viewed as integers, the comparison is opposite because
+ # of the units chosen
+ assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
+
+ # Interaction with NaT
+ a = np.array('1999-03-12T13', dtype='M8[2m]')
+ dtnat = np.array('NaT', dtype='M8[h]')
+ assert_equal(np.minimum(a, dtnat), dtnat)
+ assert_equal(np.minimum(dtnat, a), dtnat)
+ assert_equal(np.maximum(a, dtnat), dtnat)
+ assert_equal(np.maximum(dtnat, a), dtnat)
+ assert_equal(np.fmin(dtnat, a), a)
+ assert_equal(np.fmin(a, dtnat), a)
+ assert_equal(np.fmax(dtnat, a), a)
+ assert_equal(np.fmax(a, dtnat), a)
+
+ # Also do timedelta
+ a = np.array(3, dtype='m8[h]')
+ b = np.array(3*3600 - 3, dtype='m8[s]')
+ assert_equal(np.minimum(a, b), b)
+ assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]'))
+ assert_equal(np.fmin(a, b), b)
+ assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]'))
+ assert_equal(np.maximum(a, b), a)
+ assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]'))
+ assert_equal(np.fmax(a, b), a)
+ assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]'))
+ # Viewed as integers, the comparison is opposite because
+ # of the units chosen
+ assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
+
+ # should raise between datetime and timedelta
+ #
+ # TODO: Allowing unsafe casting by
+ # default in ufuncs strikes again... :(
+ a = np.array(3, dtype='m8[h]')
+ b = np.array('1999-03-12T12', dtype='M8[s]')
+ #assert_raises(TypeError, np.minimum, a, b)
+ #assert_raises(TypeError, np.maximum, a, b)
+ #assert_raises(TypeError, np.fmin, a, b)
+ #assert_raises(TypeError, np.fmax, a, b)
+ assert_raises(TypeError, np.minimum, a, b, casting='same_kind')
+ assert_raises(TypeError, np.maximum, a, b, casting='same_kind')
+ assert_raises(TypeError, np.fmin, a, b, casting='same_kind')
+ assert_raises(TypeError, np.fmax, a, b, casting='same_kind')
+
+ def test_hours(self):
+ t = np.ones(3, dtype='M8[s]')
+ t[0] = 60*60*24 + 60*60*10
+ assert_(t[0].item().hour == 10)
+
+ def test_divisor_conversion_year(self):
+ assert_(np.dtype('M8[Y/4]') == np.dtype('M8[3M]'))
+ assert_(np.dtype('M8[Y/13]') == np.dtype('M8[4W]'))
+ assert_(np.dtype('M8[3Y/73]') == np.dtype('M8[15D]'))
+
+ def test_divisor_conversion_month(self):
+ assert_(np.dtype('M8[M/2]') == np.dtype('M8[2W]'))
+ assert_(np.dtype('M8[M/15]') == np.dtype('M8[2D]'))
+ assert_(np.dtype('M8[3M/40]') == np.dtype('M8[54h]'))
+
+ def test_divisor_conversion_week(self):
+ assert_(np.dtype('m8[W/7]') == np.dtype('m8[D]'))
+ assert_(np.dtype('m8[3W/14]') == np.dtype('m8[36h]'))
+ assert_(np.dtype('m8[5W/140]') == np.dtype('m8[360m]'))
+
+ def test_divisor_conversion_day(self):
+ assert_(np.dtype('M8[D/12]') == np.dtype('M8[2h]'))
+ assert_(np.dtype('M8[D/120]') == np.dtype('M8[12m]'))
+ assert_(np.dtype('M8[3D/960]') == np.dtype('M8[270s]'))
+
+ def test_divisor_conversion_hour(self):
+ assert_(np.dtype('m8[h/30]') == np.dtype('m8[2m]'))
+ assert_(np.dtype('m8[3h/300]') == np.dtype('m8[36s]'))
+
+ def test_divisor_conversion_minute(self):
+ assert_(np.dtype('m8[m/30]') == np.dtype('m8[2s]'))
+ assert_(np.dtype('m8[3m/300]') == np.dtype('m8[600ms]'))
+
+ def test_divisor_conversion_second(self):
+ assert_(np.dtype('m8[s/100]') == np.dtype('m8[10ms]'))
+ assert_(np.dtype('m8[3s/10000]') == np.dtype('m8[300us]'))
+
+ def test_divisor_conversion_fs(self):
+ assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))
+ assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
+
+ def test_divisor_conversion_as(self):
+ assert_raises(ValueError, lambda: np.dtype('M8[as/10]'))
+
+ def test_string_parser_variants(self):
+ # Allow space instead of 'T' between date and time
+ assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')),
+ np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]')))
+ # Allow positive years
+ assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
+ np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]')))
+ # Allow negative years
+ assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
+ np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]')))
+ # UTC specifier
+ with assert_warns(DeprecationWarning):
+ assert_equal(
+ np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
+ np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
+ with assert_warns(DeprecationWarning):
+ assert_equal(
+ np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
+ np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
+ # Time zone offset
+ with assert_warns(DeprecationWarning):
+ assert_equal(
+ np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')),
+ np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]')))
+ with assert_warns(DeprecationWarning):
+ assert_equal(
+ np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')),
+ np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]')))
+ with assert_warns(DeprecationWarning):
+ assert_equal(
+ np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')),
+ np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]')))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.datetime64('1977-03-02T12:30-0230'),
+ np.datetime64('1977-03-02T15:00'))
+
+ def test_string_parser_error_check(self):
+ # Arbitrary bad string
+ assert_raises(ValueError, np.array, ['badvalue'], np.dtype('M8[us]'))
+ # Character after year must be '-'
+ assert_raises(ValueError, np.array, ['1980X'], np.dtype('M8[us]'))
+ # Cannot have trailing '-'
+ assert_raises(ValueError, np.array, ['1980-'], np.dtype('M8[us]'))
+ # Month must be in range [1,12]
+ assert_raises(ValueError, np.array, ['1980-00'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-13'], np.dtype('M8[us]'))
+ # Month must have two digits
+ assert_raises(ValueError, np.array, ['1980-1'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-1-02'], np.dtype('M8[us]'))
+ # 'Mor' is not a valid month
+ assert_raises(ValueError, np.array, ['1980-Mor'], np.dtype('M8[us]'))
+ # Cannot have trailing '-'
+ assert_raises(ValueError, np.array, ['1980-01-'], np.dtype('M8[us]'))
+ # Day must be in range [1,len(month)]
+ assert_raises(ValueError, np.array, ['1980-01-0'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-01-00'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-01-32'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1979-02-29'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-02-30'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-03-32'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-04-31'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-05-32'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-06-31'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-07-32'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-08-32'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-09-31'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-10-32'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-11-31'], np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-12-32'], np.dtype('M8[us]'))
+ # Cannot have trailing characters
+ assert_raises(ValueError, np.array, ['1980-02-03%'],
+ np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-02-03 q'],
+ np.dtype('M8[us]'))
+
+ # Hours must be in range [0, 23]
+ assert_raises(ValueError, np.array, ['1980-02-03 25'],
+ np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-02-03T25'],
+ np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-02-03 24:01'],
+ np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-02-03T24:01'],
+ np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-02-03 -1'],
+ np.dtype('M8[us]'))
+ # No trailing ':'
+ assert_raises(ValueError, np.array, ['1980-02-03 01:'],
+ np.dtype('M8[us]'))
+ # Minutes must be in range [0, 59]
+ assert_raises(ValueError, np.array, ['1980-02-03 01:-1'],
+ np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-02-03 01:60'],
+ np.dtype('M8[us]'))
+ # No trailing ':'
+ assert_raises(ValueError, np.array, ['1980-02-03 01:60:'],
+ np.dtype('M8[us]'))
+ # Seconds must be in range [0, 59]
+ assert_raises(ValueError, np.array, ['1980-02-03 01:10:-1'],
+ np.dtype('M8[us]'))
+ assert_raises(ValueError, np.array, ['1980-02-03 01:01:60'],
+ np.dtype('M8[us]'))
+ # Timezone offset must within a reasonable range
+ with assert_warns(DeprecationWarning):
+ assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'],
+ np.dtype('M8[us]'))
+ with assert_warns(DeprecationWarning):
+ assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'],
+ np.dtype('M8[us]'))
+ with assert_warns(DeprecationWarning):
+ assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'],
+ np.dtype('M8[us]'))
+ with assert_warns(DeprecationWarning):
+ assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'],
+ np.dtype('M8[us]'))
+ with assert_warns(DeprecationWarning):
+ assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'],
+ np.dtype('M8[us]'))
+
+ def test_creation_overflow(self):
+ date = '1980-03-23 20:00:00'
+ timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64)
+ for unit in ['ms', 'us', 'ns']:
+ timesteps *= 1000
+ x = np.array([date], dtype='datetime64[%s]' % unit)
+
+ assert_equal(timesteps, x[0].astype(np.int64),
+ err_msg='Datetime conversion error for unit %s' % unit)
+
+ assert_equal(x[0].astype(np.int64), 322689600000000000)
+
+ # gh-13062
+ with pytest.raises(OverflowError):
+ np.datetime64(2**64, 'D')
+ with pytest.raises(OverflowError):
+ np.timedelta64(2**64, 'D')
+
+ def test_datetime_as_string(self):
+ # Check all the units with default string conversion
+ date = '1959-10-13'
+ datetime = '1959-10-13T12:34:56.789012345678901234'
+
+ assert_equal(np.datetime_as_string(np.datetime64(date, 'Y')),
+ '1959')
+ assert_equal(np.datetime_as_string(np.datetime64(date, 'M')),
+ '1959-10')
+ assert_equal(np.datetime_as_string(np.datetime64(date, 'D')),
+ '1959-10-13')
+ assert_equal(np.datetime_as_string(np.datetime64(datetime, 'h')),
+ '1959-10-13T12')
+ assert_equal(np.datetime_as_string(np.datetime64(datetime, 'm')),
+ '1959-10-13T12:34')
+ assert_equal(np.datetime_as_string(np.datetime64(datetime, 's')),
+ '1959-10-13T12:34:56')
+ assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ms')),
+ '1959-10-13T12:34:56.789')
+ for us in ['us', 'μs', b'us']: # check non-ascii and bytes too
+ assert_equal(np.datetime_as_string(np.datetime64(datetime, us)),
+ '1959-10-13T12:34:56.789012')
+
+ datetime = '1969-12-31T23:34:56.789012345678901234'
+
+ assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
+ '1969-12-31T23:34:56.789012345')
+ assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
+ '1969-12-31T23:34:56.789012345678')
+ assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
+ '1969-12-31T23:34:56.789012345678901')
+
+ datetime = '1969-12-31T23:59:57.789012345678901234'
+
+ assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
+ datetime)
+ datetime = '1970-01-01T00:34:56.789012345678901234'
+
+ assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
+ '1970-01-01T00:34:56.789012345')
+ assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
+ '1970-01-01T00:34:56.789012345678')
+ assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
+ '1970-01-01T00:34:56.789012345678901')
+
+ datetime = '1970-01-01T00:00:05.789012345678901234'
+
+ assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
+ datetime)
+
+ # String conversion with the unit= parameter
+ a = np.datetime64('2032-07-18T12:23:34.123456', 'us')
+ assert_equal(np.datetime_as_string(a, unit='Y', casting='unsafe'),
+ '2032')
+ assert_equal(np.datetime_as_string(a, unit='M', casting='unsafe'),
+ '2032-07')
+ assert_equal(np.datetime_as_string(a, unit='W', casting='unsafe'),
+ '2032-07-18')
+ assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'),
+ '2032-07-18')
+ assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12')
+ assert_equal(np.datetime_as_string(a, unit='m'),
+ '2032-07-18T12:23')
+ assert_equal(np.datetime_as_string(a, unit='s'),
+ '2032-07-18T12:23:34')
+ assert_equal(np.datetime_as_string(a, unit='ms'),
+ '2032-07-18T12:23:34.123')
+ assert_equal(np.datetime_as_string(a, unit='us'),
+ '2032-07-18T12:23:34.123456')
+ assert_equal(np.datetime_as_string(a, unit='ns'),
+ '2032-07-18T12:23:34.123456000')
+ assert_equal(np.datetime_as_string(a, unit='ps'),
+ '2032-07-18T12:23:34.123456000000')
+ assert_equal(np.datetime_as_string(a, unit='fs'),
+ '2032-07-18T12:23:34.123456000000000')
+ assert_equal(np.datetime_as_string(a, unit='as'),
+ '2032-07-18T12:23:34.123456000000000000')
+
+ # unit='auto' parameter
+ assert_equal(np.datetime_as_string(
+ np.datetime64('2032-07-18T12:23:34.123456', 'us'), unit='auto'),
+ '2032-07-18T12:23:34.123456')
+ assert_equal(np.datetime_as_string(
+ np.datetime64('2032-07-18T12:23:34.12', 'us'), unit='auto'),
+ '2032-07-18T12:23:34.120')
+ assert_equal(np.datetime_as_string(
+ np.datetime64('2032-07-18T12:23:34', 'us'), unit='auto'),
+ '2032-07-18T12:23:34')
+ assert_equal(np.datetime_as_string(
+ np.datetime64('2032-07-18T12:23:00', 'us'), unit='auto'),
+ '2032-07-18T12:23')
+ # 'auto' doesn't split up hour and minute
+ assert_equal(np.datetime_as_string(
+ np.datetime64('2032-07-18T12:00:00', 'us'), unit='auto'),
+ '2032-07-18T12:00')
+ assert_equal(np.datetime_as_string(
+ np.datetime64('2032-07-18T00:00:00', 'us'), unit='auto'),
+ '2032-07-18')
+ # 'auto' doesn't split up the date
+ assert_equal(np.datetime_as_string(
+ np.datetime64('2032-07-01T00:00:00', 'us'), unit='auto'),
+ '2032-07-01')
+ assert_equal(np.datetime_as_string(
+ np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'),
+ '2032-01-01')
+
+ @pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.")
+ def test_datetime_as_string_timezone(self):
+ # timezone='local' vs 'UTC'
+ a = np.datetime64('2010-03-15T06:30', 'm')
+ assert_equal(np.datetime_as_string(a),
+ '2010-03-15T06:30')
+ assert_equal(np.datetime_as_string(a, timezone='naive'),
+ '2010-03-15T06:30')
+ assert_equal(np.datetime_as_string(a, timezone='UTC'),
+ '2010-03-15T06:30Z')
+ assert_(np.datetime_as_string(a, timezone='local') !=
+ '2010-03-15T06:30')
+
+ b = np.datetime64('2010-02-15T06:30', 'm')
+
+ assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')),
+ '2010-03-15T01:30-0500')
+ assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')),
+ '2010-03-15T02:30-0400')
+ assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')),
+ '2010-03-14T23:30-0700')
+
+ assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')),
+ '2010-02-15T00:30-0600')
+ assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')),
+ '2010-02-15T01:30-0500')
+ assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')),
+ '2010-02-14T22:30-0800')
+
+ # Dates to strings with a timezone attached is disabled by default
+ assert_raises(TypeError, np.datetime_as_string, a, unit='D',
+ timezone=tz('US/Pacific'))
+ # Check that we can print out the date in the specified time zone
+ assert_equal(np.datetime_as_string(a, unit='D',
+ timezone=tz('US/Pacific'), casting='unsafe'),
+ '2010-03-14')
+ assert_equal(np.datetime_as_string(b, unit='D',
+ timezone=tz('US/Central'), casting='unsafe'),
+ '2010-02-15')
+
+ def test_datetime_arange(self):
+ # With two datetimes provided as strings
+ a = np.arange('2010-01-05', '2010-01-10', dtype='M8[D]')
+ assert_equal(a.dtype, np.dtype('M8[D]'))
+ assert_equal(a,
+ np.array(['2010-01-05', '2010-01-06', '2010-01-07',
+ '2010-01-08', '2010-01-09'], dtype='M8[D]'))
+
+ a = np.arange('1950-02-10', '1950-02-06', -1, dtype='M8[D]')
+ assert_equal(a.dtype, np.dtype('M8[D]'))
+ assert_equal(a,
+ np.array(['1950-02-10', '1950-02-09', '1950-02-08',
+ '1950-02-07'], dtype='M8[D]'))
+
+ # Unit should be detected as months here
+ a = np.arange('1969-05', '1970-05', 2, dtype='M8')
+ assert_equal(a.dtype, np.dtype('M8[M]'))
+ assert_equal(a,
+ np.datetime64('1969-05') + np.arange(12, step=2))
+
+ # datetime, integer|timedelta works as well
+ # produces arange (start, start + stop) in this case
+ a = np.arange('1969', 18, 3, dtype='M8')
+ assert_equal(a.dtype, np.dtype('M8[Y]'))
+ assert_equal(a,
+ np.datetime64('1969') + np.arange(18, step=3))
+ a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8')
+ assert_equal(a.dtype, np.dtype('M8[D]'))
+ assert_equal(a,
+ np.datetime64('1969-12-19') + np.arange(22, step=2))
+
+ # Step of 0 is disallowed
+ assert_raises(ValueError, np.arange, np.datetime64('today'),
+ np.datetime64('today') + 3, 0)
+ # Promotion across nonlinear unit boundaries is disallowed
+ assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'),
+ np.timedelta64(5, 'M'))
+ assert_raises(TypeError, np.arange,
+ np.datetime64('2012-02-03T14', 's'),
+ np.timedelta64(5, 'Y'))
+
+ def test_datetime_arange_no_dtype(self):
+ d = np.array('2010-01-04', dtype="M8[D]")
+ assert_equal(np.arange(d, d + 1), d)
+ assert_raises(ValueError, np.arange, d)
+
+ def test_timedelta_arange(self):
+ a = np.arange(3, 10, dtype='m8')
+ assert_equal(a.dtype, np.dtype('m8'))
+ assert_equal(a, np.timedelta64(0) + np.arange(3, 10))
+
+ a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8')
+ assert_equal(a.dtype, np.dtype('m8[s]'))
+ assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2))
+
+ # Step of 0 is disallowed
+ assert_raises(ValueError, np.arange, np.timedelta64(0),
+ np.timedelta64(5), 0)
+ # Promotion across nonlinear unit boundaries is disallowed
+ assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'),
+ np.timedelta64(5, 'M'))
+ assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),
+ np.timedelta64(5, 'D'))
+
+ @pytest.mark.parametrize("val1, val2, expected", [
+ # case from gh-12092
+ (np.timedelta64(7, 's'),
+ np.timedelta64(3, 's'),
+ np.timedelta64(1, 's')),
+ # negative value cases
+ (np.timedelta64(3, 's'),
+ np.timedelta64(-2, 's'),
+ np.timedelta64(-1, 's')),
+ (np.timedelta64(-3, 's'),
+ np.timedelta64(2, 's'),
+ np.timedelta64(1, 's')),
+ # larger value cases
+ (np.timedelta64(17, 's'),
+ np.timedelta64(22, 's'),
+ np.timedelta64(17, 's')),
+ (np.timedelta64(22, 's'),
+ np.timedelta64(17, 's'),
+ np.timedelta64(5, 's')),
+ # different units
+ (np.timedelta64(1, 'm'),
+ np.timedelta64(57, 's'),
+ np.timedelta64(3, 's')),
+ (np.timedelta64(1, 'us'),
+ np.timedelta64(727, 'ns'),
+ np.timedelta64(273, 'ns')),
+ # NaT is propagated
+ (np.timedelta64('NaT'),
+ np.timedelta64(50, 'ns'),
+ np.timedelta64('NaT')),
+ # Y % M works
+ (np.timedelta64(2, 'Y'),
+ np.timedelta64(22, 'M'),
+ np.timedelta64(2, 'M')),
+ ])
+ def test_timedelta_modulus(self, val1, val2, expected):
+ assert_equal(val1 % val2, expected)
+
+ @pytest.mark.parametrize("val1, val2", [
+ # years and months sometimes can't be unambiguously
+ # divided for modulus operation
+ (np.timedelta64(7, 'Y'),
+ np.timedelta64(3, 's')),
+ (np.timedelta64(7, 'M'),
+ np.timedelta64(1, 'D')),
+ ])
+ def test_timedelta_modulus_error(self, val1, val2):
+ with assert_raises_regex(TypeError, "common metadata divisor"):
+ val1 % val2
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_timedelta_modulus_div_by_zero(self):
+ with assert_warns(RuntimeWarning):
+ actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')
+ assert_equal(actual, np.timedelta64('NaT'))
+
+ @pytest.mark.parametrize("val1, val2", [
+ # cases where one operand is not
+ # timedelta64
+ (np.timedelta64(7, 'Y'),
+ 15,),
+ (7.5,
+ np.timedelta64(1, 'D')),
+ ])
+ def test_timedelta_modulus_type_resolution(self, val1, val2):
+ # NOTE: some of the operations may be supported
+ # in the future
+ with assert_raises_regex(TypeError,
+ "'remainder' cannot use operands with types"):
+ val1 % val2
+
+ def test_timedelta_arange_no_dtype(self):
+ d = np.array(5, dtype="m8[D]")
+ assert_equal(np.arange(d, d + 1), d)
+ assert_equal(np.arange(d), np.arange(0, d))
+
+ def test_datetime_maximum_reduce(self):
+ a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')
+ assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]'))
+ assert_equal(np.maximum.reduce(a),
+ np.datetime64('2010-01-02'))
+
+ a = np.array([1, 4, 0, 7, 2], dtype='m8[s]')
+ assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]'))
+ assert_equal(np.maximum.reduce(a),
+ np.timedelta64(7, 's'))
+
+ def test_timedelta_correct_mean(self):
+ # test mainly because it worked only via a bug in that allowed:
+ # `timedelta.sum(dtype="f8")` to ignore the dtype request.
+ a = np.arange(1000, dtype="m8[s]")
+ assert_array_equal(a.mean(), a.sum() / len(a))
+
+ def test_datetime_no_subtract_reducelike(self):
+ # subtracting two datetime64 works, but we cannot reduce it, since
+ # the result of that subtraction will have a different dtype.
+ arr = np.array(["2021-12-02", "2019-05-12"], dtype="M8[ms]")
+ msg = r"the resolved dtypes are not compatible"
+
+ with pytest.raises(TypeError, match=msg):
+ np.subtract.reduce(arr)
+
+ with pytest.raises(TypeError, match=msg):
+ np.subtract.accumulate(arr)
+
+ with pytest.raises(TypeError, match=msg):
+ np.subtract.reduceat(arr, [0])
+
+ def test_datetime_busday_offset(self):
+ # First Monday in June
+ assert_equal(
+ np.busday_offset('2011-06', 0, roll='forward', weekmask='Mon'),
+ np.datetime64('2011-06-06'))
+ # Last Monday in June
+ assert_equal(
+ np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
+ np.datetime64('2011-06-27'))
+ assert_equal(
+ np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
+ np.datetime64('2011-06-27'))
+
+ # Default M-F business days, different roll modes
+ assert_equal(np.busday_offset('2010-08', 0, roll='backward'),
+ np.datetime64('2010-07-30'))
+ assert_equal(np.busday_offset('2010-08', 0, roll='preceding'),
+ np.datetime64('2010-07-30'))
+ assert_equal(np.busday_offset('2010-08', 0, roll='modifiedpreceding'),
+ np.datetime64('2010-08-02'))
+ assert_equal(np.busday_offset('2010-08', 0, roll='modifiedfollowing'),
+ np.datetime64('2010-08-02'))
+ assert_equal(np.busday_offset('2010-08', 0, roll='forward'),
+ np.datetime64('2010-08-02'))
+ assert_equal(np.busday_offset('2010-08', 0, roll='following'),
+ np.datetime64('2010-08-02'))
+ assert_equal(np.busday_offset('2010-10-30', 0, roll='following'),
+ np.datetime64('2010-11-01'))
+ assert_equal(
+ np.busday_offset('2010-10-30', 0, roll='modifiedfollowing'),
+ np.datetime64('2010-10-29'))
+ assert_equal(
+ np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'),
+ np.datetime64('2010-10-29'))
+ assert_equal(
+ np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'),
+ np.datetime64('2010-10-18'))
+ assert_equal(
+ np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'),
+ np.datetime64('2010-10-15'))
+ # roll='raise' by default
+ assert_raises(ValueError, np.busday_offset, '2011-06-04', 0)
+
+ # Bigger offset values
+ assert_equal(np.busday_offset('2006-02-01', 25),
+ np.datetime64('2006-03-08'))
+ assert_equal(np.busday_offset('2006-03-08', -25),
+ np.datetime64('2006-02-01'))
+ assert_equal(np.busday_offset('2007-02-25', 11, weekmask='SatSun'),
+ np.datetime64('2007-04-07'))
+ assert_equal(np.busday_offset('2007-04-07', -11, weekmask='SatSun'),
+ np.datetime64('2007-02-25'))
+
+ # NaT values when roll is not raise
+ assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'),
+ np.datetime64('NaT'))
+ assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'),
+ np.datetime64('NaT'))
+ assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'),
+ np.datetime64('NaT'))
+
+ def test_datetime_busdaycalendar(self):
+ # Check that it removes NaT, duplicates, and weekends
+ # and sorts the result.
+ bdd = np.busdaycalendar(
+ holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT',
+ '2011-12-26', '2011-05-30', '2011-01-17'])
+ assert_equal(bdd.holidays,
+ np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8'))
+ # Default M-F weekmask
+ assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?'))
+
+ # Check string weekmask with varying whitespace.
+ bdd = np.busdaycalendar(weekmask="Sun TueWed Thu\tFri")
+ assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?'))
+
+ # Check length 7 0/1 string
+ bdd = np.busdaycalendar(weekmask="0011001")
+ assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?'))
+
+ # Check length 7 string weekmask.
+ bdd = np.busdaycalendar(weekmask="Mon Tue")
+ assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?'))
+
+ # All-zeros weekmask should raise
+ assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0])
+ # weekday names must be correct case
+ assert_raises(ValueError, np.busdaycalendar, weekmask="satsun")
+ # All-zeros weekmask should raise
+ assert_raises(ValueError, np.busdaycalendar, weekmask="")
+ # Invalid weekday name codes should raise
+ assert_raises(ValueError, np.busdaycalendar, weekmask="Mon Tue We")
+ assert_raises(ValueError, np.busdaycalendar, weekmask="Max")
+ assert_raises(ValueError, np.busdaycalendar, weekmask="Monday Tue")
+
+ def test_datetime_busday_holidays_offset(self):
+ # With exactly one holiday
+ assert_equal(
+ np.busday_offset('2011-11-10', 1, holidays=['2011-11-11']),
+ np.datetime64('2011-11-14'))
+ assert_equal(
+ np.busday_offset('2011-11-04', 5, holidays=['2011-11-11']),
+ np.datetime64('2011-11-14'))
+ assert_equal(
+ np.busday_offset('2011-11-10', 5, holidays=['2011-11-11']),
+ np.datetime64('2011-11-18'))
+ assert_equal(
+ np.busday_offset('2011-11-14', -1, holidays=['2011-11-11']),
+ np.datetime64('2011-11-10'))
+ assert_equal(
+ np.busday_offset('2011-11-18', -5, holidays=['2011-11-11']),
+ np.datetime64('2011-11-10'))
+ assert_equal(
+ np.busday_offset('2011-11-14', -5, holidays=['2011-11-11']),
+ np.datetime64('2011-11-04'))
+ # With the holiday appearing twice
+ assert_equal(
+ np.busday_offset('2011-11-10', 1,
+ holidays=['2011-11-11', '2011-11-11']),
+ np.datetime64('2011-11-14'))
+ assert_equal(
+ np.busday_offset('2011-11-14', -1,
+ holidays=['2011-11-11', '2011-11-11']),
+ np.datetime64('2011-11-10'))
+ # With a NaT holiday
+ assert_equal(
+ np.busday_offset('2011-11-10', 1,
+ holidays=['2011-11-11', 'NaT']),
+ np.datetime64('2011-11-14'))
+ assert_equal(
+ np.busday_offset('2011-11-14', -1,
+ holidays=['NaT', '2011-11-11']),
+ np.datetime64('2011-11-10'))
+ # With another holiday after
+ assert_equal(
+ np.busday_offset('2011-11-10', 1,
+ holidays=['2011-11-11', '2011-11-24']),
+ np.datetime64('2011-11-14'))
+ assert_equal(
+ np.busday_offset('2011-11-14', -1,
+ holidays=['2011-11-11', '2011-11-24']),
+ np.datetime64('2011-11-10'))
+ # With another holiday before
+ assert_equal(
+ np.busday_offset('2011-11-10', 1,
+ holidays=['2011-10-10', '2011-11-11']),
+ np.datetime64('2011-11-14'))
+ assert_equal(
+ np.busday_offset('2011-11-14', -1,
+ holidays=['2011-10-10', '2011-11-11']),
+ np.datetime64('2011-11-10'))
+ # With another holiday before and after
+ assert_equal(
+ np.busday_offset('2011-11-10', 1,
+ holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
+ np.datetime64('2011-11-14'))
+ assert_equal(
+ np.busday_offset('2011-11-14', -1,
+ holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
+ np.datetime64('2011-11-10'))
+
+ # A bigger forward jump across more than one week/holiday
+ holidays = ['2011-10-10', '2011-11-11', '2011-11-24',
+ '2011-12-25', '2011-05-30', '2011-02-21',
+ '2011-12-26', '2012-01-02']
+ bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
+ assert_equal(
+ np.busday_offset('2011-10-03', 4, holidays=holidays),
+ np.busday_offset('2011-10-03', 4))
+ assert_equal(
+ np.busday_offset('2011-10-03', 5, holidays=holidays),
+ np.busday_offset('2011-10-03', 5 + 1))
+ assert_equal(
+ np.busday_offset('2011-10-03', 27, holidays=holidays),
+ np.busday_offset('2011-10-03', 27 + 1))
+ assert_equal(
+ np.busday_offset('2011-10-03', 28, holidays=holidays),
+ np.busday_offset('2011-10-03', 28 + 2))
+ assert_equal(
+ np.busday_offset('2011-10-03', 35, holidays=holidays),
+ np.busday_offset('2011-10-03', 35 + 2))
+ assert_equal(
+ np.busday_offset('2011-10-03', 36, holidays=holidays),
+ np.busday_offset('2011-10-03', 36 + 3))
+ assert_equal(
+ np.busday_offset('2011-10-03', 56, holidays=holidays),
+ np.busday_offset('2011-10-03', 56 + 3))
+ assert_equal(
+ np.busday_offset('2011-10-03', 57, holidays=holidays),
+ np.busday_offset('2011-10-03', 57 + 4))
+ assert_equal(
+ np.busday_offset('2011-10-03', 60, holidays=holidays),
+ np.busday_offset('2011-10-03', 60 + 4))
+ assert_equal(
+ np.busday_offset('2011-10-03', 61, holidays=holidays),
+ np.busday_offset('2011-10-03', 61 + 5))
+ assert_equal(
+ np.busday_offset('2011-10-03', 61, busdaycal=bdd),
+ np.busday_offset('2011-10-03', 61 + 5))
+ # A bigger backward jump across more than one week/holiday
+ assert_equal(
+ np.busday_offset('2012-01-03', -1, holidays=holidays),
+ np.busday_offset('2012-01-03', -1 - 1))
+ assert_equal(
+ np.busday_offset('2012-01-03', -4, holidays=holidays),
+ np.busday_offset('2012-01-03', -4 - 1))
+ assert_equal(
+ np.busday_offset('2012-01-03', -5, holidays=holidays),
+ np.busday_offset('2012-01-03', -5 - 2))
+ assert_equal(
+ np.busday_offset('2012-01-03', -25, holidays=holidays),
+ np.busday_offset('2012-01-03', -25 - 2))
+ assert_equal(
+ np.busday_offset('2012-01-03', -26, holidays=holidays),
+ np.busday_offset('2012-01-03', -26 - 3))
+ assert_equal(
+ np.busday_offset('2012-01-03', -33, holidays=holidays),
+ np.busday_offset('2012-01-03', -33 - 3))
+ assert_equal(
+ np.busday_offset('2012-01-03', -34, holidays=holidays),
+ np.busday_offset('2012-01-03', -34 - 4))
+ assert_equal(
+ np.busday_offset('2012-01-03', -56, holidays=holidays),
+ np.busday_offset('2012-01-03', -56 - 4))
+ assert_equal(
+ np.busday_offset('2012-01-03', -57, holidays=holidays),
+ np.busday_offset('2012-01-03', -57 - 5))
+ assert_equal(
+ np.busday_offset('2012-01-03', -57, busdaycal=bdd),
+ np.busday_offset('2012-01-03', -57 - 5))
+
+ # Can't supply both a weekmask/holidays and busdaycal
+ assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
+ weekmask='1111100', busdaycal=bdd)
+ assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
+ holidays=holidays, busdaycal=bdd)
+
+ # Roll with the holidays
+ assert_equal(
+ np.busday_offset('2011-12-25', 0,
+ roll='forward', holidays=holidays),
+ np.datetime64('2011-12-27'))
+ assert_equal(
+ np.busday_offset('2011-12-26', 0,
+ roll='forward', holidays=holidays),
+ np.datetime64('2011-12-27'))
+ assert_equal(
+ np.busday_offset('2011-12-26', 0,
+ roll='backward', holidays=holidays),
+ np.datetime64('2011-12-23'))
+ assert_equal(
+ np.busday_offset('2012-02-27', 0,
+ roll='modifiedfollowing',
+ holidays=['2012-02-27', '2012-02-26', '2012-02-28',
+ '2012-03-01', '2012-02-29']),
+ np.datetime64('2012-02-24'))
+ assert_equal(
+ np.busday_offset('2012-03-06', 0,
+ roll='modifiedpreceding',
+ holidays=['2012-03-02', '2012-03-03', '2012-03-01',
+ '2012-03-05', '2012-03-07', '2012-03-06']),
+ np.datetime64('2012-03-08'))
+
+ def test_datetime_busday_holidays_count(self):
+ holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
+ '2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
+ '2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
+ '2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10']
+ bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
+
+ # Validate against busday_offset broadcast against
+ # a range of offsets
+ dates = np.busday_offset('2011-01-01', np.arange(366),
+ roll='forward', busdaycal=bdd)
+ assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd),
+ np.arange(366))
+ # Returns negative value when reversed
+ assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd),
+ -np.arange(366))
+
+ dates = np.busday_offset('2011-12-31', -np.arange(366),
+ roll='forward', busdaycal=bdd)
+ assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd),
+ np.arange(366))
+ # Returns negative value when reversed
+ assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd),
+ -np.arange(366))
+
+ # Can't supply both a weekmask/holidays and busdaycal
+ assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
+ weekmask='1111100', busdaycal=bdd)
+ assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
+ holidays=holidays, busdaycal=bdd)
+
+ # Number of Mondays in March 2011
+ assert_equal(np.busday_count('2011-03', '2011-04', weekmask='Mon'), 4)
+ # Returns negative value when reversed
+ assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4)
+
+ def test_datetime_is_busday(self):
+ holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
+ '2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
+ '2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
+ '2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10',
+ 'NaT']
+ bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
+
+ # Weekend/weekday tests
+ assert_equal(np.is_busday('2011-01-01'), False)
+ assert_equal(np.is_busday('2011-01-02'), False)
+ assert_equal(np.is_busday('2011-01-03'), True)
+
+ # All the holidays are not business days
+ assert_equal(np.is_busday(holidays, busdaycal=bdd),
+ np.zeros(len(holidays), dtype='?'))
+
+ def test_datetime_y2038(self):
+ # Test parsing on either side of the Y2038 boundary
+ a = np.datetime64('2038-01-19T03:14:07')
+ assert_equal(a.view(np.int64), 2**31 - 1)
+ a = np.datetime64('2038-01-19T03:14:08')
+ assert_equal(a.view(np.int64), 2**31)
+
+ # Test parsing on either side of the Y2038 boundary with
+ # a manually specified timezone offset
+ with assert_warns(DeprecationWarning):
+ a = np.datetime64('2038-01-19T04:14:07+0100')
+ assert_equal(a.view(np.int64), 2**31 - 1)
+ with assert_warns(DeprecationWarning):
+ a = np.datetime64('2038-01-19T04:14:08+0100')
+ assert_equal(a.view(np.int64), 2**31)
+
+ # Test parsing a date after Y2038
+ a = np.datetime64('2038-01-20T13:21:14')
+ assert_equal(str(a), '2038-01-20T13:21:14')
+
+ def test_isnat(self):
+ assert_(np.isnat(np.datetime64('NaT', 'ms')))
+ assert_(np.isnat(np.datetime64('NaT', 'ns')))
+ assert_(not np.isnat(np.datetime64('2038-01-19T03:14:07')))
+
+ assert_(np.isnat(np.timedelta64('NaT', "ms")))
+ assert_(not np.isnat(np.timedelta64(34, "ms")))
+
+ res = np.array([False, False, True])
+ for unit in ['Y', 'M', 'W', 'D',
+ 'h', 'm', 's', 'ms', 'us',
+ 'ns', 'ps', 'fs', 'as']:
+ arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit)
+ assert_equal(np.isnat(arr), res)
+ arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit)
+ assert_equal(np.isnat(arr), res)
+ arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit)
+ assert_equal(np.isnat(arr), res)
+ arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit)
+ assert_equal(np.isnat(arr), res)
+
+ def test_isnat_error(self):
+ # Test that only datetime dtype arrays are accepted
+ for t in np.typecodes["All"]:
+ if t in np.typecodes["Datetime"]:
+ continue
+ assert_raises(TypeError, np.isnat, np.zeros(10, t))
+
+ def test_isfinite_scalar(self):
+ assert_(not np.isfinite(np.datetime64('NaT', 'ms')))
+ assert_(not np.isfinite(np.datetime64('NaT', 'ns')))
+ assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))
+
+ assert_(not np.isfinite(np.timedelta64('NaT', "ms")))
+ assert_(np.isfinite(np.timedelta64(34, "ms")))
+
+ @pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
+ 'us', 'ns', 'ps', 'fs', 'as'])
+ @pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]',
+ '<timedelta64[%s]', '>timedelta64[%s]'])
+ def test_isfinite_isinf_isnan_units(self, unit, dstr):
+ '''check isfinite, isinf, isnan for all units of <M, >M, <m, >m dtypes
+ '''
+ arr_val = [123, -321, "NaT"]
+ arr = np.array(arr_val, dtype= dstr % unit)
+ pos = np.array([True, True, False])
+ neg = np.array([False, False, True])
+ false = np.array([False, False, False])
+ assert_equal(np.isfinite(arr), pos)
+ assert_equal(np.isinf(arr), false)
+ assert_equal(np.isnan(arr), neg)
+
+ def test_assert_equal(self):
+ assert_raises(AssertionError, assert_equal,
+ np.datetime64('nat'), np.timedelta64('nat'))
+
+ def test_corecursive_input(self):
+ # construct a co-recursive list
+ a, b = [], []
+ a.append(b)
+ b.append(a)
+ obj_arr = np.array([None])
+ obj_arr[0] = a
+
+ # At some point this caused a stack overflow (gh-11154). Now raises
+ # ValueError since the nested list cannot be converted to a datetime.
+ assert_raises(ValueError, obj_arr.astype, 'M8')
+ assert_raises(ValueError, obj_arr.astype, 'm8')
+
+ @pytest.mark.parametrize("shape", [(), (1,)])
+ def test_discovery_from_object_array(self, shape):
+ arr = np.array("2020-10-10", dtype=object).reshape(shape)
+ res = np.array("2020-10-10", dtype="M8").reshape(shape)
+ assert res.dtype == np.dtype("M8[D]")
+ assert_equal(arr.astype("M8"), res)
+ arr[...] = np.bytes_("2020-10-10") # try a numpy string type
+ assert_equal(arr.astype("M8"), res)
+ arr = arr.astype("S")
+ assert_equal(arr.astype("S").astype("M8"), res)
+
+ @pytest.mark.parametrize("time_unit", [
+ "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as",
+ # compound units
+ "10D", "2M",
+ ])
+ def test_limit_symmetry(self, time_unit):
+ """
+ Dates should have symmetric limits around the unix epoch at +/-np.int64
+ """
+ epoch = np.datetime64(0, time_unit)
+ latest = np.datetime64(np.iinfo(np.int64).max, time_unit)
+ earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit)
+
+ # above should not have overflowed
+ assert earliest < epoch < latest
+
+ @pytest.mark.parametrize("time_unit", [
+ "Y", "M",
+ pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")),
+ "D", "h", "m",
+ "s", "ms", "us", "ns", "ps", "fs", "as",
+ pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")),
+ ])
+ @pytest.mark.parametrize("sign", [-1, 1])
+ def test_limit_str_roundtrip(self, time_unit, sign):
+ """
+ Limits should roundtrip when converted to strings.
+
+ This tests the conversion to and from npy_datetimestruct.
+ """
+ # TODO: add absolute (gold standard) time span limit strings
+ limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit)
+
+ # Convert to string and back. Explicit unit needed since the day and
+ # week reprs are not distinguishable.
+ limit_via_str = np.datetime64(str(limit), time_unit)
+ assert limit_via_str == limit
+
+
+class TestDateTimeData:
+
+ def test_basic(self):
+ a = np.array(['1980-03-23'], dtype=np.datetime64)
+ assert_equal(np.datetime_data(a.dtype), ('D', 1))
+
+ def test_bytes(self):
+ # byte units are converted to unicode
+ dt = np.datetime64('2000', (b'ms', 5))
+ assert np.datetime_data(dt.dtype) == ('ms', 5)
+
+ dt = np.datetime64('2000', b'5ms')
+ assert np.datetime_data(dt.dtype) == ('ms', 5)
+
+ def test_non_ascii(self):
+ # μs is normalized to μ
+ dt = np.datetime64('2000', ('μs', 5))
+ assert np.datetime_data(dt.dtype) == ('us', 5)
+
+ dt = np.datetime64('2000', '5μs')
+ assert np.datetime_data(dt.dtype) == ('us', 5)
+
+
+def test_comparisons_return_not_implemented():
+ # GH#17017
+
+ class custom:
+ __array_priority__ = 10000
+
+ obj = custom()
+
+ dt = np.datetime64('2000', 'ns')
+ td = dt - dt
+
+ for item in [dt, td]:
+ assert item.__eq__(obj) is NotImplemented
+ assert item.__ne__(obj) is NotImplemented
+ assert item.__le__(obj) is NotImplemented
+ assert item.__lt__(obj) is NotImplemented
+ assert item.__ge__(obj) is NotImplemented
+ assert item.__gt__(obj) is NotImplemented
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_defchararray.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_defchararray.py
new file mode 100644
index 00000000..22296604
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_defchararray.py
@@ -0,0 +1,672 @@
+import numpy as np
+from numpy.core.multiarray import _vec_string
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_raises,
+ assert_raises_regex
+ )
+
+kw_unicode_true = {'unicode': True} # make 2to3 work properly
+kw_unicode_false = {'unicode': False}
+
+class TestBasic:
+ def test_from_object_array(self):
+ A = np.array([['abc', 2],
+ ['long ', '0123456789']], dtype='O')
+ B = np.char.array(A)
+ assert_equal(B.dtype.itemsize, 10)
+ assert_array_equal(B, [[b'abc', b'2'],
+ [b'long', b'0123456789']])
+
+ def test_from_object_array_unicode(self):
+ A = np.array([['abc', 'Sigma \u03a3'],
+ ['long ', '0123456789']], dtype='O')
+ assert_raises(ValueError, np.char.array, (A,))
+ B = np.char.array(A, **kw_unicode_true)
+ assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
+ assert_array_equal(B, [['abc', 'Sigma \u03a3'],
+ ['long', '0123456789']])
+
+ def test_from_string_array(self):
+ A = np.array([[b'abc', b'foo'],
+ [b'long ', b'0123456789']])
+ assert_equal(A.dtype.type, np.string_)
+ B = np.char.array(A)
+ assert_array_equal(B, A)
+ assert_equal(B.dtype, A.dtype)
+ assert_equal(B.shape, A.shape)
+ B[0, 0] = 'changed'
+ assert_(B[0, 0] != A[0, 0])
+ C = np.char.asarray(A)
+ assert_array_equal(C, A)
+ assert_equal(C.dtype, A.dtype)
+ C[0, 0] = 'changed again'
+ assert_(C[0, 0] != B[0, 0])
+ assert_(C[0, 0] == A[0, 0])
+
+ def test_from_unicode_array(self):
+ A = np.array([['abc', 'Sigma \u03a3'],
+ ['long ', '0123456789']])
+ assert_equal(A.dtype.type, np.unicode_)
+ B = np.char.array(A)
+ assert_array_equal(B, A)
+ assert_equal(B.dtype, A.dtype)
+ assert_equal(B.shape, A.shape)
+ B = np.char.array(A, **kw_unicode_true)
+ assert_array_equal(B, A)
+ assert_equal(B.dtype, A.dtype)
+ assert_equal(B.shape, A.shape)
+
+ def fail():
+ np.char.array(A, **kw_unicode_false)
+
+ assert_raises(UnicodeEncodeError, fail)
+
+ def test_unicode_upconvert(self):
+ A = np.char.array(['abc'])
+ B = np.char.array(['\u03a3'])
+ assert_(issubclass((A + B).dtype.type, np.unicode_))
+
+ def test_from_string(self):
+ A = np.char.array(b'abc')
+ assert_equal(len(A), 1)
+ assert_equal(len(A[0]), 3)
+ assert_(issubclass(A.dtype.type, np.string_))
+
+ def test_from_unicode(self):
+ A = np.char.array('\u03a3')
+ assert_equal(len(A), 1)
+ assert_equal(len(A[0]), 1)
+ assert_equal(A.itemsize, 4)
+ assert_(issubclass(A.dtype.type, np.unicode_))
+
+class TestVecString:
+ def test_non_existent_method(self):
+
+ def fail():
+ _vec_string('a', np.string_, 'bogus')
+
+ assert_raises(AttributeError, fail)
+
+ def test_non_string_array(self):
+
+ def fail():
+ _vec_string(1, np.string_, 'strip')
+
+ assert_raises(TypeError, fail)
+
+ def test_invalid_args_tuple(self):
+
+ def fail():
+ _vec_string(['a'], np.string_, 'strip', 1)
+
+ assert_raises(TypeError, fail)
+
+ def test_invalid_type_descr(self):
+
+ def fail():
+ _vec_string(['a'], 'BOGUS', 'strip')
+
+ assert_raises(TypeError, fail)
+
+ def test_invalid_function_args(self):
+
+ def fail():
+ _vec_string(['a'], np.string_, 'strip', (1,))
+
+ assert_raises(TypeError, fail)
+
+ def test_invalid_result_type(self):
+
+ def fail():
+ _vec_string(['a'], np.int_, 'strip')
+
+ assert_raises(TypeError, fail)
+
+ def test_broadcast_error(self):
+
+ def fail():
+ _vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],))
+
+ assert_raises(ValueError, fail)
+
+
+class TestWhitespace:
+ def setup_method(self):
+ self.A = np.array([['abc ', '123 '],
+ ['789 ', 'xyz ']]).view(np.chararray)
+ self.B = np.array([['abc', '123'],
+ ['789', 'xyz']]).view(np.chararray)
+
+ def test1(self):
+ assert_(np.all(self.A == self.B))
+ assert_(np.all(self.A >= self.B))
+ assert_(np.all(self.A <= self.B))
+ assert_(not np.any(self.A > self.B))
+ assert_(not np.any(self.A < self.B))
+ assert_(not np.any(self.A != self.B))
+
+class TestChar:
+ def setup_method(self):
+ self.A = np.array('abc1', dtype='c').view(np.chararray)
+
+ def test_it(self):
+ assert_equal(self.A.shape, (4,))
+ assert_equal(self.A.upper()[:2].tobytes(), b'AB')
+
+class TestComparisons:
+ def setup_method(self):
+ self.A = np.array([['abc', '123'],
+ ['789', 'xyz']]).view(np.chararray)
+ self.B = np.array([['efg', '123 '],
+ ['051', 'tuv']]).view(np.chararray)
+
+ def test_not_equal(self):
+ assert_array_equal((self.A != self.B), [[True, False], [True, True]])
+
+ def test_equal(self):
+ assert_array_equal((self.A == self.B), [[False, True], [False, False]])
+
+ def test_greater_equal(self):
+ assert_array_equal((self.A >= self.B), [[False, True], [True, True]])
+
+ def test_less_equal(self):
+ assert_array_equal((self.A <= self.B), [[True, True], [False, False]])
+
+ def test_greater(self):
+ assert_array_equal((self.A > self.B), [[False, False], [True, True]])
+
+ def test_less(self):
+ assert_array_equal((self.A < self.B), [[True, False], [False, False]])
+
+ def test_type(self):
+ out1 = np.char.equal(self.A, self.B)
+ out2 = np.char.equal('a', 'a')
+ assert_(isinstance(out1, np.ndarray))
+ assert_(isinstance(out2, np.ndarray))
+
+class TestComparisonsMixed1(TestComparisons):
+ """Ticket #1276"""
+
+ def setup_method(self):
+ TestComparisons.setup_method(self)
+ self.B = np.array([['efg', '123 '],
+ ['051', 'tuv']], np.unicode_).view(np.chararray)
+
+class TestComparisonsMixed2(TestComparisons):
+ """Ticket #1276"""
+
+ def setup_method(self):
+ TestComparisons.setup_method(self)
+ self.A = np.array([['abc', '123'],
+ ['789', 'xyz']], np.unicode_).view(np.chararray)
+
+class TestInformation:
+ def setup_method(self):
+ self.A = np.array([[' abc ', ''],
+ ['12345', 'MixedCase'],
+ ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
+ self.B = np.array([[' \u03a3 ', ''],
+ ['12345', 'MixedCase'],
+ ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
+
+ def test_len(self):
+ assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
+ assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])
+ assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])
+
+ def test_count(self):
+ assert_(issubclass(self.A.count('').dtype.type, np.integer))
+ assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])
+ assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])
+ # Python doesn't seem to like counting NULL characters
+ # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]])
+ assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
+ assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])
+ assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])
+ # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]])
+
+ def test_endswith(self):
+ assert_(issubclass(self.A.endswith('').dtype.type, np.bool_))
+ assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
+ assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
+
+ def fail():
+ self.A.endswith('3', 'fdjk')
+
+ assert_raises(TypeError, fail)
+
+ def test_find(self):
+ assert_(issubclass(self.A.find('a').dtype.type, np.integer))
+ assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]])
+ assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]])
+ assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
+ assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]])
+
+ def test_index(self):
+
+ def fail():
+ self.A.index('a')
+
+ assert_raises(ValueError, fail)
+ assert_(np.char.index('abcba', 'b') == 1)
+ assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
+
+ def test_isalnum(self):
+ assert_(issubclass(self.A.isalnum().dtype.type, np.bool_))
+ assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])
+
+ def test_isalpha(self):
+ assert_(issubclass(self.A.isalpha().dtype.type, np.bool_))
+ assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])
+
+ def test_isdigit(self):
+ assert_(issubclass(self.A.isdigit().dtype.type, np.bool_))
+ assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])
+
+ def test_islower(self):
+ assert_(issubclass(self.A.islower().dtype.type, np.bool_))
+ assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])
+
+ def test_isspace(self):
+ assert_(issubclass(self.A.isspace().dtype.type, np.bool_))
+ assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])
+
+ def test_istitle(self):
+ assert_(issubclass(self.A.istitle().dtype.type, np.bool_))
+ assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])
+
+ def test_isupper(self):
+ assert_(issubclass(self.A.isupper().dtype.type, np.bool_))
+ assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])
+
+ def test_rfind(self):
+ assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))
+ assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
+ assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
+ assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
+ assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
+
+ def test_rindex(self):
+
+ def fail():
+ self.A.rindex('a')
+
+ assert_raises(ValueError, fail)
+ assert_(np.char.rindex('abcba', 'b') == 3)
+ assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
+
+ def test_startswith(self):
+ assert_(issubclass(self.A.startswith('').dtype.type, np.bool_))
+ assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
+ assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
+
+ def fail():
+ self.A.startswith('3', 'fdjk')
+
+ assert_raises(TypeError, fail)
+
+
+class TestMethods:
+ def setup_method(self):
+ self.A = np.array([[' abc ', ''],
+ ['12345', 'MixedCase'],
+ ['123 \t 345 \0 ', 'UPPER']],
+ dtype='S').view(np.chararray)
+ self.B = np.array([[' \u03a3 ', ''],
+ ['12345', 'MixedCase'],
+ ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
+
+ def test_capitalize(self):
+ tgt = [[b' abc ', b''],
+ [b'12345', b'Mixedcase'],
+ [b'123 \t 345 \0 ', b'Upper']]
+ assert_(issubclass(self.A.capitalize().dtype.type, np.string_))
+ assert_array_equal(self.A.capitalize(), tgt)
+
+ tgt = [[' \u03c3 ', ''],
+ ['12345', 'Mixedcase'],
+ ['123 \t 345 \0 ', 'Upper']]
+ assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_))
+ assert_array_equal(self.B.capitalize(), tgt)
+
+ def test_center(self):
+ assert_(issubclass(self.A.center(10).dtype.type, np.string_))
+ C = self.A.center([10, 20])
+ assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
+
+ C = self.A.center(20, b'#')
+ assert_(np.all(C.startswith(b'#')))
+ assert_(np.all(C.endswith(b'#')))
+
+ C = np.char.center(b'FOO', [[10, 20], [15, 8]])
+ tgt = [[b' FOO ', b' FOO '],
+ [b' FOO ', b' FOO ']]
+ assert_(issubclass(C.dtype.type, np.string_))
+ assert_array_equal(C, tgt)
+
+ def test_decode(self):
+ A = np.char.array([b'\\u03a3'])
+ assert_(A.decode('unicode-escape')[0] == '\u03a3')
+
+ def test_encode(self):
+ B = self.B.encode('unicode_escape')
+ assert_(B[0][0] == str(' \\u03a3 ').encode('latin1'))
+
+ def test_expandtabs(self):
+ T = self.A.expandtabs()
+ assert_(T[2, 0] == b'123 345 \0')
+
+ def test_join(self):
+ # NOTE: list(b'123') == [49, 50, 51]
+ # so that b','.join(b'123') results to an error on Py3
+ A0 = self.A.decode('ascii')
+
+ A = np.char.join([',', '#'], A0)
+ assert_(issubclass(A.dtype.type, np.unicode_))
+ tgt = np.array([[' ,a,b,c, ', ''],
+ ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
+ ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
+ assert_array_equal(np.char.join([',', '#'], A0), tgt)
+
+ def test_ljust(self):
+ assert_(issubclass(self.A.ljust(10).dtype.type, np.string_))
+
+ C = self.A.ljust([10, 20])
+ assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
+
+ C = self.A.ljust(20, b'#')
+ assert_array_equal(C.startswith(b'#'), [
+ [False, True], [False, False], [False, False]])
+ assert_(np.all(C.endswith(b'#')))
+
+ C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])
+ tgt = [[b'FOO ', b'FOO '],
+ [b'FOO ', b'FOO ']]
+ assert_(issubclass(C.dtype.type, np.string_))
+ assert_array_equal(C, tgt)
+
+ def test_lower(self):
+ tgt = [[b' abc ', b''],
+ [b'12345', b'mixedcase'],
+ [b'123 \t 345 \0 ', b'upper']]
+ assert_(issubclass(self.A.lower().dtype.type, np.string_))
+ assert_array_equal(self.A.lower(), tgt)
+
+ tgt = [[' \u03c3 ', ''],
+ ['12345', 'mixedcase'],
+ ['123 \t 345 \0 ', 'upper']]
+ assert_(issubclass(self.B.lower().dtype.type, np.unicode_))
+ assert_array_equal(self.B.lower(), tgt)
+
+ def test_lstrip(self):
+ tgt = [[b'abc ', b''],
+ [b'12345', b'MixedCase'],
+ [b'123 \t 345 \0 ', b'UPPER']]
+ assert_(issubclass(self.A.lstrip().dtype.type, np.string_))
+ assert_array_equal(self.A.lstrip(), tgt)
+
+ tgt = [[b' abc', b''],
+ [b'2345', b'ixedCase'],
+ [b'23 \t 345 \x00', b'UPPER']]
+ assert_array_equal(self.A.lstrip([b'1', b'M']), tgt)
+
+ tgt = [['\u03a3 ', ''],
+ ['12345', 'MixedCase'],
+ ['123 \t 345 \0 ', 'UPPER']]
+ assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_))
+ assert_array_equal(self.B.lstrip(), tgt)
+
+ def test_partition(self):
+ P = self.A.partition([b'3', b'M'])
+ tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],
+ [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
+ [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]]
+ assert_(issubclass(P.dtype.type, np.string_))
+ assert_array_equal(P, tgt)
+
+ def test_replace(self):
+ R = self.A.replace([b'3', b'a'],
+ [b'##########', b'@'])
+ tgt = [[b' abc ', b''],
+ [b'12##########45', b'MixedC@se'],
+ [b'12########## \t ##########45 \x00', b'UPPER']]
+ assert_(issubclass(R.dtype.type, np.string_))
+ assert_array_equal(R, tgt)
+
+ def test_rjust(self):
+ assert_(issubclass(self.A.rjust(10).dtype.type, np.string_))
+
+ C = self.A.rjust([10, 20])
+ assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
+
+ C = self.A.rjust(20, b'#')
+ assert_(np.all(C.startswith(b'#')))
+ assert_array_equal(C.endswith(b'#'),
+ [[False, True], [False, False], [False, False]])
+
+ C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])
+ tgt = [[b' FOO', b' FOO'],
+ [b' FOO', b' FOO']]
+ assert_(issubclass(C.dtype.type, np.string_))
+ assert_array_equal(C, tgt)
+
+ def test_rpartition(self):
+ P = self.A.rpartition([b'3', b'M'])
+ tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],
+ [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
+ [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]]
+ assert_(issubclass(P.dtype.type, np.string_))
+ assert_array_equal(P, tgt)
+
+ def test_rsplit(self):
+ A = self.A.rsplit(b'3')
+ tgt = [[[b' abc '], [b'']],
+ [[b'12', b'45'], [b'MixedCase']],
+ [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
+ assert_(issubclass(A.dtype.type, np.object_))
+ assert_equal(A.tolist(), tgt)
+
+ def test_rstrip(self):
+ assert_(issubclass(self.A.rstrip().dtype.type, np.string_))
+
+ tgt = [[b' abc', b''],
+ [b'12345', b'MixedCase'],
+ [b'123 \t 345', b'UPPER']]
+ assert_array_equal(self.A.rstrip(), tgt)
+
+ tgt = [[b' abc ', b''],
+ [b'1234', b'MixedCase'],
+ [b'123 \t 345 \x00', b'UPP']
+ ]
+ assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt)
+
+ tgt = [[' \u03a3', ''],
+ ['12345', 'MixedCase'],
+ ['123 \t 345', 'UPPER']]
+ assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_))
+ assert_array_equal(self.B.rstrip(), tgt)
+
+ def test_strip(self):
+ tgt = [[b'abc', b''],
+ [b'12345', b'MixedCase'],
+ [b'123 \t 345', b'UPPER']]
+ assert_(issubclass(self.A.strip().dtype.type, np.string_))
+ assert_array_equal(self.A.strip(), tgt)
+
+ tgt = [[b' abc ', b''],
+ [b'234', b'ixedCas'],
+ [b'23 \t 345 \x00', b'UPP']]
+ assert_array_equal(self.A.strip([b'15', b'EReM']), tgt)
+
+ tgt = [['\u03a3', ''],
+ ['12345', 'MixedCase'],
+ ['123 \t 345', 'UPPER']]
+ assert_(issubclass(self.B.strip().dtype.type, np.unicode_))
+ assert_array_equal(self.B.strip(), tgt)
+
+ def test_split(self):
+ A = self.A.split(b'3')
+ tgt = [
+ [[b' abc '], [b'']],
+ [[b'12', b'45'], [b'MixedCase']],
+ [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
+ assert_(issubclass(A.dtype.type, np.object_))
+ assert_equal(A.tolist(), tgt)
+
+ def test_splitlines(self):
+ A = np.char.array(['abc\nfds\nwer']).splitlines()
+ assert_(issubclass(A.dtype.type, np.object_))
+ assert_(A.shape == (1,))
+ assert_(len(A[0]) == 3)
+
+ def test_swapcase(self):
+ tgt = [[b' ABC ', b''],
+ [b'12345', b'mIXEDcASE'],
+ [b'123 \t 345 \0 ', b'upper']]
+ assert_(issubclass(self.A.swapcase().dtype.type, np.string_))
+ assert_array_equal(self.A.swapcase(), tgt)
+
+ tgt = [[' \u03c3 ', ''],
+ ['12345', 'mIXEDcASE'],
+ ['123 \t 345 \0 ', 'upper']]
+ assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_))
+ assert_array_equal(self.B.swapcase(), tgt)
+
+ def test_title(self):
+ tgt = [[b' Abc ', b''],
+ [b'12345', b'Mixedcase'],
+ [b'123 \t 345 \0 ', b'Upper']]
+ assert_(issubclass(self.A.title().dtype.type, np.string_))
+ assert_array_equal(self.A.title(), tgt)
+
+ tgt = [[' \u03a3 ', ''],
+ ['12345', 'Mixedcase'],
+ ['123 \t 345 \0 ', 'Upper']]
+ assert_(issubclass(self.B.title().dtype.type, np.unicode_))
+ assert_array_equal(self.B.title(), tgt)
+
+ def test_upper(self):
+ tgt = [[b' ABC ', b''],
+ [b'12345', b'MIXEDCASE'],
+ [b'123 \t 345 \0 ', b'UPPER']]
+ assert_(issubclass(self.A.upper().dtype.type, np.string_))
+ assert_array_equal(self.A.upper(), tgt)
+
+ tgt = [[' \u03a3 ', ''],
+ ['12345', 'MIXEDCASE'],
+ ['123 \t 345 \0 ', 'UPPER']]
+ assert_(issubclass(self.B.upper().dtype.type, np.unicode_))
+ assert_array_equal(self.B.upper(), tgt)
+
+ def test_isnumeric(self):
+
+ def fail():
+ self.A.isnumeric()
+
+ assert_raises(TypeError, fail)
+ assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
+ assert_array_equal(self.B.isnumeric(), [
+ [False, False], [True, False], [False, False]])
+
+ def test_isdecimal(self):
+
+ def fail():
+ self.A.isdecimal()
+
+ assert_raises(TypeError, fail)
+ assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
+ assert_array_equal(self.B.isdecimal(), [
+ [False, False], [True, False], [False, False]])
+
+
+class TestOperations:
+ def setup_method(self):
+ self.A = np.array([['abc', '123'],
+ ['789', 'xyz']]).view(np.chararray)
+ self.B = np.array([['efg', '456'],
+ ['051', 'tuv']]).view(np.chararray)
+
+ def test_add(self):
+ AB = np.array([['abcefg', '123456'],
+ ['789051', 'xyztuv']]).view(np.chararray)
+ assert_array_equal(AB, (self.A + self.B))
+ assert_(len((self.A + self.B)[0][0]) == 6)
+
+ def test_radd(self):
+ QA = np.array([['qabc', 'q123'],
+ ['q789', 'qxyz']]).view(np.chararray)
+ assert_array_equal(QA, ('q' + self.A))
+
+ def test_mul(self):
+ A = self.A
+ for r in (2, 3, 5, 7, 197):
+ Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
+ [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
+
+ assert_array_equal(Ar, (self.A * r))
+
+ for ob in [object(), 'qrs']:
+ with assert_raises_regex(ValueError,
+ 'Can only multiply by integers'):
+ A*ob
+
+ def test_rmul(self):
+ A = self.A
+ for r in (2, 3, 5, 7, 197):
+ Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
+ [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
+ assert_array_equal(Ar, (r * self.A))
+
+ for ob in [object(), 'qrs']:
+ with assert_raises_regex(ValueError,
+ 'Can only multiply by integers'):
+ ob * A
+
+ def test_mod(self):
+ """Ticket #856"""
+ F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray)
+ C = np.array([[3, 7], [19, 1]])
+ FC = np.array([['3', '7.000000'],
+ ['19', '1']]).view(np.chararray)
+ assert_array_equal(FC, F % C)
+
+ A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray)
+ A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray)
+ assert_array_equal(A1, (A % 1))
+
+ A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray)
+ assert_array_equal(A2, (A % [[1, 2], [3, 4]]))
+
+ def test_rmod(self):
+ assert_(("%s" % self.A) == str(self.A))
+ assert_(("%r" % self.A) == repr(self.A))
+
+ for ob in [42, object()]:
+ with assert_raises_regex(
+ TypeError, "unsupported operand type.* and 'chararray'"):
+ ob % self.A
+
+ def test_slice(self):
+ """Regression test for https://github.com/numpy/numpy/issues/5982"""
+
+ arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],
+ dtype='S4').view(np.chararray)
+ sl1 = arr[:]
+ assert_array_equal(sl1, arr)
+ assert_(sl1.base is arr)
+ assert_(sl1.base.base is arr.base)
+
+ sl2 = arr[:, :]
+ assert_array_equal(sl2, arr)
+ assert_(sl2.base is arr)
+ assert_(sl2.base.base is arr.base)
+
+ assert_(arr[0, 0] == b'abc')
+
+
+def test_empty_indexing():
+ """Regression test for ticket 1948."""
+ # Check that indexing a chararray with an empty list/array returns an
+ # empty chararray instead of a chararray with a single empty string in it.
+ s = np.chararray((4,))
+ assert_(s[[]].size == 0)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_deprecations.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_deprecations.py
new file mode 100644
index 00000000..03381515
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_deprecations.py
@@ -0,0 +1,1193 @@
+"""
+Tests related to deprecation warnings. Also a convenient place
+to document how deprecations should eventually be turned into errors.
+
+"""
+import datetime
+import operator
+import warnings
+import pytest
+import tempfile
+import re
+import sys
+
+import numpy as np
+from numpy.testing import (
+ assert_raises, assert_warns, assert_, assert_array_equal, SkipTest,
+ KnownFailureException, break_cycles,
+ )
+
+from numpy.core._multiarray_tests import fromstring_null_term_c_api
+
+try:
+ import pytz
+ _has_pytz = True
+except ImportError:
+ _has_pytz = False
+
+
+class _DeprecationTestCase:
+ # Just as warning: warnings uses re.match, so the start of this message
+ # must match.
+ message = ''
+ warning_cls = DeprecationWarning
+
+ def setup_method(self):
+ self.warn_ctx = warnings.catch_warnings(record=True)
+ self.log = self.warn_ctx.__enter__()
+
+ # Do *not* ignore other DeprecationWarnings. Ignoring warnings
+ # can give very confusing results because of
+ # https://bugs.python.org/issue4180 and it is probably simplest to
+ # try to keep the tests cleanly giving only the right warning type.
+ # (While checking them set to "error" those are ignored anyway)
+ # We still have them show up, because otherwise they would be raised
+ warnings.filterwarnings("always", category=self.warning_cls)
+ warnings.filterwarnings("always", message=self.message,
+ category=self.warning_cls)
+
+ def teardown_method(self):
+ self.warn_ctx.__exit__()
+
+ def assert_deprecated(self, function, num=1, ignore_others=False,
+ function_fails=False,
+ exceptions=np._NoValue,
+ args=(), kwargs={}):
+ """Test if DeprecationWarnings are given and raised.
+
+ This first checks if the function when called gives `num`
+ DeprecationWarnings, after that it tries to raise these
+ DeprecationWarnings and compares them with `exceptions`.
+ The exceptions can be different for cases where this code path
+ is simply not anticipated and the exception is replaced.
+
+ Parameters
+ ----------
+ function : callable
+ The function to test
+ num : int
+ Number of DeprecationWarnings to expect. This should normally be 1.
+ ignore_others : bool
+ Whether warnings of the wrong type should be ignored (note that
+ the message is not checked)
+ function_fails : bool
+ If the function would normally fail, setting this will check for
+ warnings inside a try/except block.
+ exceptions : Exception or tuple of Exceptions
+ Exception to expect when turning the warnings into an error.
+ The default checks for DeprecationWarnings. If exceptions is
+ empty the function is expected to run successfully.
+ args : tuple
+ Arguments for `function`
+ kwargs : dict
+ Keyword arguments for `function`
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+
+ # reset the log
+ self.log[:] = []
+
+ if exceptions is np._NoValue:
+ exceptions = (self.warning_cls,)
+
+ try:
+ function(*args, **kwargs)
+ except (Exception if function_fails else tuple()):
+ pass
+
+ # just in case, clear the registry
+ num_found = 0
+ for warning in self.log:
+ if warning.category is self.warning_cls:
+ num_found += 1
+ elif not ignore_others:
+ raise AssertionError(
+ "expected %s but got: %s" %
+ (self.warning_cls.__name__, warning.category))
+ if num is not None and num_found != num:
+ msg = "%i warnings found but %i expected." % (len(self.log), num)
+ lst = [str(w) for w in self.log]
+ raise AssertionError("\n".join([msg] + lst))
+
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error", message=self.message,
+ category=self.warning_cls)
+ try:
+ function(*args, **kwargs)
+ if exceptions != tuple():
+ raise AssertionError(
+ "No error raised during function call")
+ except exceptions:
+ if exceptions == tuple():
+ raise AssertionError(
+ "Error raised during function call")
+
+ def assert_not_deprecated(self, function, args=(), kwargs={}):
+ """Test that warnings are not raised.
+
+ This is just a shorthand for:
+
+ self.assert_deprecated(function, num=0, ignore_others=True,
+ exceptions=tuple(), args=args, kwargs=kwargs)
+ """
+ self.assert_deprecated(function, num=0, ignore_others=True,
+ exceptions=tuple(), args=args, kwargs=kwargs)
+
+
+class _VisibleDeprecationTestCase(_DeprecationTestCase):
+ warning_cls = np.VisibleDeprecationWarning
+
+
+class TestComparisonDeprecations(_DeprecationTestCase):
+ """This tests the deprecation, for non-element-wise comparison logic.
+ This used to mean that when an error occurred during element-wise comparison
+ (i.e. broadcasting) NotImplemented was returned, but also in the comparison
+ itself, False was given instead of the error.
+
+ Also test FutureWarning for the None comparison.
+ """
+
+ message = "elementwise.* comparison failed; .*"
+
+ def test_normal_types(self):
+ for op in (operator.eq, operator.ne):
+ # Broadcasting errors:
+ self.assert_deprecated(op, args=(np.zeros(3), []))
+ a = np.zeros(3, dtype='i,i')
+ # (warning is issued a couple of times here)
+ self.assert_deprecated(op, args=(a, a[:-1]), num=None)
+
+ # ragged array comparison returns True/False
+ a = np.array([1, np.array([1,2,3])], dtype=object)
+ b = np.array([1, np.array([1,2,3])], dtype=object)
+ self.assert_deprecated(op, args=(a, b), num=None)
+
+ def test_string(self):
+ # For two string arrays, strings always raised the broadcasting error:
+ a = np.array(['a', 'b'])
+ b = np.array(['a', 'b', 'c'])
+ assert_warns(FutureWarning, lambda x, y: x == y, a, b)
+
+ # The empty list is not cast to string, and this used to pass due
+ # to dtype mismatch; now (2018-06-21) it correctly leads to a
+ # FutureWarning.
+ assert_warns(FutureWarning, lambda: a == [])
+
+ def test_void_dtype_equality_failures(self):
+ class NotArray:
+ def __array__(self):
+ raise TypeError
+
+ # Needed so Python 3 does not raise DeprecationWarning twice.
+ def __ne__(self, other):
+ return NotImplemented
+
+ self.assert_deprecated(lambda: np.arange(2) == NotArray())
+ self.assert_deprecated(lambda: np.arange(2) != NotArray())
+
+ def test_array_richcompare_legacy_weirdness(self):
+ # It doesn't really work to use assert_deprecated here, b/c part of
+ # the point of assert_deprecated is to check that when warnings are
+ # set to "error" mode then the error is propagated -- which is good!
+ # But here we are testing a bunch of code that is deprecated *because*
+ # it has the habit of swallowing up errors and converting them into
+ # different warnings. So assert_warns will have to be sufficient.
+ assert_warns(FutureWarning, lambda: np.arange(2) == "a")
+ assert_warns(FutureWarning, lambda: np.arange(2) != "a")
+ # No warning for scalar comparisons
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error")
+ assert_(not (np.array(0) == "a"))
+ assert_(np.array(0) != "a")
+ assert_(not (np.int16(0) == "a"))
+ assert_(np.int16(0) != "a")
+
+ for arg1 in [np.asarray(0), np.int16(0)]:
+ struct = np.zeros(2, dtype="i4,i4")
+ for arg2 in [struct, "a"]:
+ for f in [operator.lt, operator.le, operator.gt, operator.ge]:
+ with warnings.catch_warnings() as l:
+ warnings.filterwarnings("always")
+ assert_raises(TypeError, f, arg1, arg2)
+ assert_(not l)
+
+
+class TestDatetime64Timezone(_DeprecationTestCase):
+ """Parsing of datetime64 with timezones deprecated in 1.11.0, because
+ datetime64 is now timezone naive rather than UTC only.
+
+ It will be quite a while before we can remove this, because, at the very
+ least, a lot of existing code uses the 'Z' modifier to avoid conversion
+ from local time to UTC, even if otherwise it handles time in a timezone
+ naive fashion.
+ """
+ def test_string(self):
+ self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
+ self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
+
+ @pytest.mark.skipif(not _has_pytz,
+ reason="The pytz module is not available.")
+ def test_datetime(self):
+ tz = pytz.timezone('US/Eastern')
+ dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
+ self.assert_deprecated(np.datetime64, args=(dt,))
+
+
+class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
+ """Assigning the 'data' attribute of an ndarray is unsafe as pointed
+ out in gh-7093. Eventually, such assignment should NOT be allowed, but
+ in the interests of maintaining backwards compatibility, only a Deprecation-
+ Warning will be raised instead for the time being to give developers time to
+ refactor relevant code.
+ """
+
+ def test_data_attr_assignment(self):
+ a = np.arange(10)
+ b = np.linspace(0, 1, 10)
+
+ self.message = ("Assigning the 'data' attribute is an "
+ "inherently unsafe operation and will "
+ "be removed in the future.")
+ self.assert_deprecated(a.__setattr__, args=('data', b.data))
+
+
+class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
+ """
+ If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
+ represent the number in base 2 (positive) or 2's complement (negative) form,
+ the function used to silently ignore the parameter and return a representation
+ using the minimal number of bits needed for the form in question. Such behavior
+ is now considered unsafe from a user perspective and will raise an error in the future.
+ """
+
+ def test_insufficient_width_positive(self):
+ args = (10,)
+ kwargs = {'width': 2}
+
+ self.message = ("Insufficient bit width provided. This behavior "
+ "will raise an error in the future.")
+ self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
+
+ def test_insufficient_width_negative(self):
+ args = (-5,)
+ kwargs = {'width': 2}
+
+ self.message = ("Insufficient bit width provided. This behavior "
+ "will raise an error in the future.")
+ self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
+
+
+class TestDTypeAttributeIsDTypeDeprecation(_DeprecationTestCase):
+ # Deprecated 2021-01-05, NumPy 1.21
+ message = r".*`.dtype` attribute"
+
+ def test_deprecation_dtype_attribute_is_dtype(self):
+ class dt:
+ dtype = "f8"
+
+ class vdt(np.void):
+ dtype = "f,f"
+
+ self.assert_deprecated(lambda: np.dtype(dt))
+ self.assert_deprecated(lambda: np.dtype(dt()))
+ self.assert_deprecated(lambda: np.dtype(vdt))
+ self.assert_deprecated(lambda: np.dtype(vdt(1)))
+
+
+class TestTestDeprecated:
+ def test_assert_deprecated(self):
+ test_case_instance = _DeprecationTestCase()
+ test_case_instance.setup_method()
+ assert_raises(AssertionError,
+ test_case_instance.assert_deprecated,
+ lambda: None)
+
+ def foo():
+ warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
+
+ test_case_instance.assert_deprecated(foo)
+ test_case_instance.teardown_method()
+
+
+class TestNonNumericConjugate(_DeprecationTestCase):
+ """
+ Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
+ which conflicts with the error behavior of np.conjugate.
+ """
+ def test_conjugate(self):
+ for a in np.array(5), np.array(5j):
+ self.assert_not_deprecated(a.conjugate)
+ for a in (np.array('s'), np.array('2016', 'M'),
+ np.array((1, 2), [('a', int), ('b', int)])):
+ self.assert_deprecated(a.conjugate)
+
+
+class TestNPY_CHAR(_DeprecationTestCase):
+ # 2017-05-03, 1.13.0
+ def test_npy_char_deprecation(self):
+ from numpy.core._multiarray_tests import npy_char_deprecation
+ self.assert_deprecated(npy_char_deprecation)
+ assert_(npy_char_deprecation() == 'S1')
+
+
+class TestPyArray_AS1D(_DeprecationTestCase):
+ def test_npy_pyarrayas1d_deprecation(self):
+ from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
+ assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
+
+
+class TestPyArray_AS2D(_DeprecationTestCase):
+ def test_npy_pyarrayas2d_deprecation(self):
+ from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
+ assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
+
+
+class TestDatetimeEvent(_DeprecationTestCase):
+ # 2017-08-11, 1.14.0
+ def test_3_tuple(self):
+ for cls in (np.datetime64, np.timedelta64):
+ # two valid uses - (unit, num) and (unit, num, den, None)
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
+
+ # trying to use the event argument, removed in 1.7.0, is deprecated
+ # it used to be a uint8
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
+
+
+class TestTruthTestingEmptyArrays(_DeprecationTestCase):
+ # 2017-09-25, 1.14.0
+ message = '.*truth value of an empty array is ambiguous.*'
+
+ def test_1d(self):
+ self.assert_deprecated(bool, args=(np.array([]),))
+
+ def test_2d(self):
+ self.assert_deprecated(bool, args=(np.zeros((1, 0)),))
+ self.assert_deprecated(bool, args=(np.zeros((0, 1)),))
+ self.assert_deprecated(bool, args=(np.zeros((0, 0)),))
+
+
+class TestBincount(_DeprecationTestCase):
+ # 2017-06-01, 1.14.0
+ def test_bincount_minlength(self):
+ self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
+
+
+
+class TestGeneratorSum(_DeprecationTestCase):
+ # 2018-02-25, 1.15.0
+ def test_generator_sum(self):
+ self.assert_deprecated(np.sum, args=((i for i in range(5)),))
+
+
+class TestPositiveOnNonNumerical(_DeprecationTestCase):
+ # 2018-06-28, 1.16.0
+ def test_positive_on_non_number(self):
+ self.assert_deprecated(operator.pos, args=(np.array('foo'),))
+
+
+class TestFromstring(_DeprecationTestCase):
+ # 2017-10-19, 1.14
+ def test_fromstring(self):
+ self.assert_deprecated(np.fromstring, args=('\x00'*80,))
+
+
+class TestFromStringAndFileInvalidData(_DeprecationTestCase):
+ # 2019-06-08, 1.17.0
+ # Tests should be moved to real tests when deprecation is done.
+ message = "string or file could not be read to its end"
+
+ @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
+ def test_deprecate_unparsable_data_file(self, invalid_str):
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
+
+ with tempfile.TemporaryFile(mode="w") as f:
+ x.tofile(f, sep=',', format='%.2f')
+ f.write(invalid_str)
+
+ f.seek(0)
+ self.assert_deprecated(lambda: np.fromfile(f, sep=","))
+ f.seek(0)
+ self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
+ # Should not raise:
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+ f.seek(0)
+ res = np.fromfile(f, sep=",", count=4)
+ assert_array_equal(res, x)
+
+ @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
+ def test_deprecate_unparsable_string(self, invalid_str):
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
+ x_str = "1.51,2,3.51,4{}".format(invalid_str)
+
+ self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
+ self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
+
+ # The C-level API can use not fixed size, but 0 terminated strings,
+ # so test that as well:
+ bytestr = x_str.encode("ascii")
+ self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
+
+ with assert_warns(DeprecationWarning):
+ # this is slightly strange, in that fromstring leaves data
+ # potentially uninitialized (would be good to error when all is
+ # read, but count is larger then actual data maybe).
+ res = np.fromstring(x_str, sep=",", count=5)
+ assert_array_equal(res[:-1], x)
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+
+ # Should not raise:
+ res = np.fromstring(x_str, sep=",", count=4)
+ assert_array_equal(res, x)
+
+
+class Test_GetSet_NumericOps(_DeprecationTestCase):
+ # 2018-09-20, 1.16.0
+ def test_get_numeric_ops(self):
+ from numpy.core._multiarray_tests import getset_numericops
+ self.assert_deprecated(getset_numericops, num=2)
+
+ # empty kwargs prevents any state actually changing which would break
+ # other tests.
+ self.assert_deprecated(np.set_numeric_ops, kwargs={})
+ assert_raises(ValueError, np.set_numeric_ops, add='abc')
+
+
+class TestShape1Fields(_DeprecationTestCase):
+ warning_cls = FutureWarning
+
+ # 2019-05-20, 1.17.0
+ def test_shape_1_fields(self):
+ self.assert_deprecated(np.dtype, args=([('a', int, 1)],))
+
+
+class TestNonZero(_DeprecationTestCase):
+ # 2019-05-26, 1.17.0
+ def test_zerod(self):
+ self.assert_deprecated(lambda: np.nonzero(np.array(0)))
+ self.assert_deprecated(lambda: np.nonzero(np.array(1)))
+
+
+class TestToString(_DeprecationTestCase):
+ # 2020-03-06 1.19.0
+ message = re.escape("tostring() is deprecated. Use tobytes() instead.")
+
+ def test_tostring(self):
+ arr = np.array(list(b"test\xFF"), dtype=np.uint8)
+ self.assert_deprecated(arr.tostring)
+
+ def test_tostring_matches_tobytes(self):
+ arr = np.array(list(b"test\xFF"), dtype=np.uint8)
+ b = arr.tobytes()
+ with assert_warns(DeprecationWarning):
+ s = arr.tostring()
+ assert s == b
+
+
+class TestDTypeCoercion(_DeprecationTestCase):
+ # 2020-02-06 1.19.0
+ message = "Converting .* to a dtype .*is deprecated"
+ deprecated_types = [
+ # The builtin scalar super types:
+ np.generic, np.flexible, np.number,
+ np.inexact, np.floating, np.complexfloating,
+ np.integer, np.unsignedinteger, np.signedinteger,
+ # character is a deprecated S1 special case:
+ np.character,
+ ]
+
+ def test_dtype_coercion(self):
+ for scalar_type in self.deprecated_types:
+ self.assert_deprecated(np.dtype, args=(scalar_type,))
+
+ def test_array_construction(self):
+ for scalar_type in self.deprecated_types:
+ self.assert_deprecated(np.array, args=([], scalar_type,))
+
+ def test_not_deprecated(self):
+ # All specific types are not deprecated:
+ for group in np.sctypes.values():
+ for scalar_type in group:
+ self.assert_not_deprecated(np.dtype, args=(scalar_type,))
+
+ for scalar_type in [type, dict, list, tuple]:
+ # Typical python types are coerced to object currently:
+ self.assert_not_deprecated(np.dtype, args=(scalar_type,))
+
+
+class BuiltInRoundComplexDType(_DeprecationTestCase):
+ # 2020-03-31 1.19.0
+ deprecated_types = [np.csingle, np.cdouble, np.clongdouble]
+ not_deprecated_types = [
+ np.int8, np.int16, np.int32, np.int64,
+ np.uint8, np.uint16, np.uint32, np.uint64,
+ np.float16, np.float32, np.float64,
+ ]
+
+ def test_deprecated(self):
+ for scalar_type in self.deprecated_types:
+ scalar = scalar_type(0)
+ self.assert_deprecated(round, args=(scalar,))
+ self.assert_deprecated(round, args=(scalar, 0))
+ self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
+
+ def test_not_deprecated(self):
+ for scalar_type in self.not_deprecated_types:
+ scalar = scalar_type(0)
+ self.assert_not_deprecated(round, args=(scalar,))
+ self.assert_not_deprecated(round, args=(scalar, 0))
+ self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
+
+
+class TestIncorrectAdvancedIndexWithEmptyResult(_DeprecationTestCase):
+ # 2020-05-27, NumPy 1.20.0
+ message = "Out of bound index found. This was previously ignored.*"
+
+ @pytest.mark.parametrize("index", [([3, 0],), ([0, 0], [3, 0])])
+ def test_empty_subspace(self, index):
+ # Test for both a single and two/multiple advanced indices. These
+ # This will raise an IndexError in the future.
+ arr = np.ones((2, 2, 0))
+ self.assert_deprecated(arr.__getitem__, args=(index,))
+ self.assert_deprecated(arr.__setitem__, args=(index, 0.))
+
+ # for this array, the subspace is only empty after applying the slice
+ arr2 = np.ones((2, 2, 1))
+ index2 = (slice(0, 0),) + index
+ self.assert_deprecated(arr2.__getitem__, args=(index2,))
+ self.assert_deprecated(arr2.__setitem__, args=(index2, 0.))
+
+ def test_empty_index_broadcast_not_deprecated(self):
+ arr = np.ones((2, 2, 2))
+
+ index = ([[3], [2]], []) # broadcast to an empty result.
+ self.assert_not_deprecated(arr.__getitem__, args=(index,))
+ self.assert_not_deprecated(arr.__setitem__,
+ args=(index, np.empty((2, 0, 2))))
+
+
+class TestNonExactMatchDeprecation(_DeprecationTestCase):
+ # 2020-04-22
+ def test_non_exact_match(self):
+ arr = np.array([[3, 6, 6], [4, 5, 1]])
+ # misspelt mode check
+ self.assert_deprecated(lambda: np.ravel_multi_index(arr, (7, 6), mode='Cilp'))
+ # using completely different word with first character as R
+ self.assert_deprecated(lambda: np.searchsorted(arr[0], 4, side='Random'))
+
+
+class TestMatrixInOuter(_DeprecationTestCase):
+ # 2020-05-13 NumPy 1.20.0
+ message = (r"add.outer\(\) was passed a numpy matrix as "
+ r"(first|second) argument.")
+
+ def test_deprecated(self):
+ arr = np.array([1, 2, 3])
+ m = np.array([1, 2, 3]).view(np.matrix)
+ self.assert_deprecated(np.add.outer, args=(m, m), num=2)
+ self.assert_deprecated(np.add.outer, args=(arr, m))
+ self.assert_deprecated(np.add.outer, args=(m, arr))
+ self.assert_not_deprecated(np.add.outer, args=(arr, arr))
+
+
+class FlatteningConcatenateUnsafeCast(_DeprecationTestCase):
+ # NumPy 1.20, 2020-09-03
+ message = "concatenate with `axis=None` will use same-kind casting"
+
+ def test_deprecated(self):
+ self.assert_deprecated(np.concatenate,
+ args=(([0.], [1.]),),
+ kwargs=dict(axis=None, out=np.empty(2, dtype=np.int64)))
+
+ def test_not_deprecated(self):
+ self.assert_not_deprecated(np.concatenate,
+ args=(([0.], [1.]),),
+ kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64),
+ 'casting': "unsafe"})
+
+ with assert_raises(TypeError):
+ # Tests should notice if the deprecation warning is given first...
+ np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64),
+ casting="same_kind")
+
+
+class TestDeprecateSubarrayDTypeDuringArrayCoercion(_DeprecationTestCase):
+ warning_cls = FutureWarning
+ message = "(creating|casting) an array (with|to) a subarray dtype"
+
+ def test_deprecated_array(self):
+ # Arrays are more complex, since they "broadcast" on success:
+ arr = np.array([1, 2])
+
+ self.assert_deprecated(lambda: arr.astype("(2)i,"))
+ with pytest.warns(FutureWarning):
+ res = arr.astype("(2)i,")
+
+ assert_array_equal(res, [[1, 2], [1, 2]])
+
+ self.assert_deprecated(lambda: np.array(arr, dtype="(2)i,"))
+ with pytest.warns(FutureWarning):
+ res = np.array(arr, dtype="(2)i,")
+
+ assert_array_equal(res, [[1, 2], [1, 2]])
+
+ with pytest.warns(FutureWarning):
+ res = np.array([[(1,), (2,)], arr], dtype="(2)i,")
+
+ assert_array_equal(res, [[[1, 1], [2, 2]], [[1, 2], [1, 2]]])
+
+ def test_deprecated_and_error(self):
+ # These error paths do not give a warning, but will succeed in the
+ # future.
+ arr = np.arange(5 * 2).reshape(5, 2)
+ def check():
+ with pytest.raises(ValueError):
+ arr.astype("(2,2)f")
+
+ self.assert_deprecated(check)
+
+ def check():
+ with pytest.raises(ValueError):
+ np.array(arr, dtype="(2,2)f")
+
+ self.assert_deprecated(check)
+
+
+class TestFutureWarningArrayLikeNotIterable(_DeprecationTestCase):
+ # Deprecated 2020-12-09, NumPy 1.20
+ warning_cls = FutureWarning
+ message = "The input object of type.*but not a sequence"
+
+ @pytest.mark.parametrize("protocol",
+ ["__array__", "__array_interface__", "__array_struct__"])
+ def test_deprecated(self, protocol):
+ """Test that these objects give a warning since they are not 0-D,
+ not coerced at the top level `np.array(obj)`, but nested, and do
+ *not* define the sequence protocol.
+
+ NOTE: Tests for the versions including __len__ and __getitem__ exist
+ in `test_array_coercion.py` and they can be modified or amended
+ when this deprecation expired.
+ """
+ blueprint = np.arange(10)
+ MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)})
+ self.assert_deprecated(lambda: np.array([MyArr()], dtype=object))
+
+ @pytest.mark.parametrize("protocol",
+ ["__array__", "__array_interface__", "__array_struct__"])
+ def test_0d_not_deprecated(self, protocol):
+ # 0-D always worked (albeit it would use __float__ or similar for the
+ # conversion, which may not happen anymore)
+ blueprint = np.array(1.)
+ MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)})
+ myarr = MyArr()
+
+ self.assert_not_deprecated(lambda: np.array([myarr], dtype=object))
+ res = np.array([myarr], dtype=object)
+ expected = np.empty(1, dtype=object)
+ expected[0] = myarr
+ assert_array_equal(res, expected)
+
+ @pytest.mark.parametrize("protocol",
+ ["__array__", "__array_interface__", "__array_struct__"])
+ def test_unnested_not_deprecated(self, protocol):
+ blueprint = np.arange(10)
+ MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol)})
+ myarr = MyArr()
+
+ self.assert_not_deprecated(lambda: np.array(myarr))
+ res = np.array(myarr)
+ assert_array_equal(res, blueprint)
+
+ @pytest.mark.parametrize("protocol",
+ ["__array__", "__array_interface__", "__array_struct__"])
+ def test_strange_dtype_handling(self, protocol):
+ """The old code would actually use the dtype from the array, but
+ then end up not using the array (for dimension discovery)
+ """
+ blueprint = np.arange(10).astype("f4")
+ MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol),
+ "__float__": lambda _: 0.5})
+ myarr = MyArr()
+
+ # Make sure we warn (and capture the FutureWarning)
+ with pytest.warns(FutureWarning, match=self.message):
+ res = np.array([[myarr]])
+
+ assert res.shape == (1, 1)
+ assert res.dtype == "f4"
+ assert res[0, 0] == 0.5
+
+ @pytest.mark.parametrize("protocol",
+ ["__array__", "__array_interface__", "__array_struct__"])
+ def test_assignment_not_deprecated(self, protocol):
+ # If the result is dtype=object we do not unpack a nested array or
+ # array-like, if it is nested at exactly the right depth.
+ # NOTE: We actually do still call __array__, etc. but ignore the result
+ # in the end. For `dtype=object` we could optimize that away.
+ blueprint = np.arange(10).astype("f4")
+ MyArr = type("MyArr", (), {protocol: getattr(blueprint, protocol),
+ "__float__": lambda _: 0.5})
+ myarr = MyArr()
+
+ res = np.empty(3, dtype=object)
+ def set():
+ res[:] = [myarr, myarr, myarr]
+ self.assert_not_deprecated(set)
+ assert res[0] is myarr
+ assert res[1] is myarr
+ assert res[2] is myarr
+
+
+class TestDeprecatedUnpickleObjectScalar(_DeprecationTestCase):
+ # Deprecated 2020-11-24, NumPy 1.20
+ """
+ Technically, it should be impossible to create numpy object scalars,
+ but there was an unpickle path that would in theory allow it. That
+ path is invalid and must lead to the warning.
+ """
+ message = "Unpickling a scalar with object dtype is deprecated."
+
+ def test_deprecated(self):
+ ctor = np.core.multiarray.scalar
+ self.assert_deprecated(lambda: ctor(np.dtype("O"), 1))
+
+try:
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ import nose # noqa: F401
+except ImportError:
+ HAVE_NOSE = False
+else:
+ HAVE_NOSE = True
+
+
+@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose")
+class TestNoseDecoratorsDeprecated(_DeprecationTestCase):
+ class DidntSkipException(Exception):
+ pass
+
+ def test_slow(self):
+ def _test_slow():
+ @np.testing.dec.slow
+ def slow_func(x, y, z):
+ pass
+
+ assert_(slow_func.slow)
+ self.assert_deprecated(_test_slow)
+
+ def test_setastest(self):
+ def _test_setastest():
+ @np.testing.dec.setastest()
+ def f_default(a):
+ pass
+
+ @np.testing.dec.setastest(True)
+ def f_istest(a):
+ pass
+
+ @np.testing.dec.setastest(False)
+ def f_isnottest(a):
+ pass
+
+ assert_(f_default.__test__)
+ assert_(f_istest.__test__)
+ assert_(not f_isnottest.__test__)
+ self.assert_deprecated(_test_setastest, num=3)
+
+ def test_skip_functions_hardcoded(self):
+ def _test_skip_functions_hardcoded():
+ @np.testing.dec.skipif(True)
+ def f1(x):
+ raise self.DidntSkipException
+
+ try:
+ f1('a')
+ except self.DidntSkipException:
+ raise Exception('Failed to skip')
+ except SkipTest().__class__:
+ pass
+
+ @np.testing.dec.skipif(False)
+ def f2(x):
+ raise self.DidntSkipException
+
+ try:
+ f2('a')
+ except self.DidntSkipException:
+ pass
+ except SkipTest().__class__:
+ raise Exception('Skipped when not expected to')
+ self.assert_deprecated(_test_skip_functions_hardcoded, num=2)
+
+ def test_skip_functions_callable(self):
+ def _test_skip_functions_callable():
+ def skip_tester():
+ return skip_flag == 'skip me!'
+
+ @np.testing.dec.skipif(skip_tester)
+ def f1(x):
+ raise self.DidntSkipException
+
+ try:
+ skip_flag = 'skip me!'
+ f1('a')
+ except self.DidntSkipException:
+ raise Exception('Failed to skip')
+ except SkipTest().__class__:
+ pass
+
+ @np.testing.dec.skipif(skip_tester)
+ def f2(x):
+ raise self.DidntSkipException
+
+ try:
+ skip_flag = 'five is right out!'
+ f2('a')
+ except self.DidntSkipException:
+ pass
+ except SkipTest().__class__:
+ raise Exception('Skipped when not expected to')
+ self.assert_deprecated(_test_skip_functions_callable, num=2)
+
+ def test_skip_generators_hardcoded(self):
+ def _test_skip_generators_hardcoded():
+ @np.testing.dec.knownfailureif(True, "This test is known to fail")
+ def g1(x):
+ yield from range(x)
+
+ try:
+ for j in g1(10):
+ pass
+ except KnownFailureException().__class__:
+ pass
+ else:
+ raise Exception('Failed to mark as known failure')
+
+ @np.testing.dec.knownfailureif(False, "This test is NOT known to fail")
+ def g2(x):
+ yield from range(x)
+ raise self.DidntSkipException('FAIL')
+
+ try:
+ for j in g2(10):
+ pass
+ except KnownFailureException().__class__:
+ raise Exception('Marked incorrectly as known failure')
+ except self.DidntSkipException:
+ pass
+ self.assert_deprecated(_test_skip_generators_hardcoded, num=2)
+
+ def test_skip_generators_callable(self):
+ def _test_skip_generators_callable():
+ def skip_tester():
+ return skip_flag == 'skip me!'
+
+ @np.testing.dec.knownfailureif(skip_tester, "This test is known to fail")
+ def g1(x):
+ yield from range(x)
+
+ try:
+ skip_flag = 'skip me!'
+ for j in g1(10):
+ pass
+ except KnownFailureException().__class__:
+ pass
+ else:
+ raise Exception('Failed to mark as known failure')
+
+ @np.testing.dec.knownfailureif(skip_tester, "This test is NOT known to fail")
+ def g2(x):
+ yield from range(x)
+ raise self.DidntSkipException('FAIL')
+
+ try:
+ skip_flag = 'do not skip'
+ for j in g2(10):
+ pass
+ except KnownFailureException().__class__:
+ raise Exception('Marked incorrectly as known failure')
+ except self.DidntSkipException:
+ pass
+ self.assert_deprecated(_test_skip_generators_callable, num=2)
+
+ def test_deprecated(self):
+ def _test_deprecated():
+ @np.testing.dec.deprecated(True)
+ def non_deprecated_func():
+ pass
+
+ @np.testing.dec.deprecated()
+ def deprecated_func():
+ import warnings
+ warnings.warn("TEST: deprecated func", DeprecationWarning, stacklevel=1)
+
+ @np.testing.dec.deprecated()
+ def deprecated_func2():
+ import warnings
+ warnings.warn("AHHHH", stacklevel=1)
+ raise ValueError
+
+ @np.testing.dec.deprecated()
+ def deprecated_func3():
+ import warnings
+ warnings.warn("AHHHH", stacklevel=1)
+
+ # marked as deprecated, but does not raise DeprecationWarning
+ assert_raises(AssertionError, non_deprecated_func)
+ # should be silent
+ deprecated_func()
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter("always") # do not propagate unrelated warnings
+ # fails if deprecated decorator just disables test. See #1453.
+ assert_raises(ValueError, deprecated_func2)
+ # warning is not a DeprecationWarning
+ assert_raises(AssertionError, deprecated_func3)
+ self.assert_deprecated(_test_deprecated, num=4)
+
+ def test_parametrize(self):
+ def _test_parametrize():
+ # dec.parametrize assumes that it is being run by nose. Because
+ # we are running under pytest, we need to explicitly check the
+ # results.
+ @np.testing.dec.parametrize('base, power, expected',
+ [(1, 1, 1),
+ (2, 1, 2),
+ (2, 2, 4)])
+ def check_parametrize(base, power, expected):
+ assert_(base**power == expected)
+
+ count = 0
+ for test in check_parametrize():
+ test[0](*test[1:])
+ count += 1
+ assert_(count == 3)
+ self.assert_deprecated(_test_parametrize)
+
+
+class TestSingleElementSignature(_DeprecationTestCase):
+ # Deprecated 2021-04-01, NumPy 1.21
+ message = r"The use of a length 1"
+
+ def test_deprecated(self):
+ self.assert_deprecated(lambda: np.add(1, 2, signature="d"))
+ self.assert_deprecated(lambda: np.add(1, 2, sig=(np.dtype("l"),)))
+
+
+class TestCtypesGetter(_DeprecationTestCase):
+ # Deprecated 2021-05-18, Numpy 1.21.0
+ warning_cls = DeprecationWarning
+ ctypes = np.array([1]).ctypes
+
+ @pytest.mark.parametrize(
+ "name", ["get_data", "get_shape", "get_strides", "get_as_parameter"]
+ )
+ def test_deprecated(self, name: str) -> None:
+ func = getattr(self.ctypes, name)
+ self.assert_deprecated(lambda: func())
+
+ @pytest.mark.parametrize(
+ "name", ["data", "shape", "strides", "_as_parameter_"]
+ )
+ def test_not_deprecated(self, name: str) -> None:
+ self.assert_not_deprecated(lambda: getattr(self.ctypes, name))
+
+
+PARTITION_DICT = {
+ "partition method": np.arange(10).partition,
+ "argpartition method": np.arange(10).argpartition,
+ "partition function": lambda kth: np.partition(np.arange(10), kth),
+ "argpartition function": lambda kth: np.argpartition(np.arange(10), kth),
+}
+
+
+@pytest.mark.parametrize("func", PARTITION_DICT.values(), ids=PARTITION_DICT)
+class TestPartitionBoolIndex(_DeprecationTestCase):
+ # Deprecated 2021-09-29, NumPy 1.22
+ warning_cls = DeprecationWarning
+ message = "Passing booleans as partition index is deprecated"
+
+ def test_deprecated(self, func):
+ self.assert_deprecated(lambda: func(True))
+ self.assert_deprecated(lambda: func([False, True]))
+
+ def test_not_deprecated(self, func):
+ self.assert_not_deprecated(lambda: func(1))
+ self.assert_not_deprecated(lambda: func([0, 1]))
+
+
+class TestMachAr(_DeprecationTestCase):
+ # Deprecated 2021-10-19, NumPy 1.22
+ warning_cls = DeprecationWarning
+
+ def test_deprecated_module(self):
+ self.assert_deprecated(lambda: getattr(np.core, "machar"))
+
+ def test_deprecated_attr(self):
+ finfo = np.finfo(float)
+ self.assert_deprecated(lambda: getattr(finfo, "machar"))
+
+
+class TestQuantileInterpolationDeprecation(_DeprecationTestCase):
+ # Deprecated 2021-11-08, NumPy 1.22
+ @pytest.mark.parametrize("func",
+ [np.percentile, np.quantile, np.nanpercentile, np.nanquantile])
+ def test_deprecated(self, func):
+ self.assert_deprecated(
+ lambda: func([0., 1.], 0., interpolation="linear"))
+ self.assert_deprecated(
+ lambda: func([0., 1.], 0., interpolation="nearest"))
+
+ @pytest.mark.parametrize("func",
+ [np.percentile, np.quantile, np.nanpercentile, np.nanquantile])
+ def test_both_passed(self, func):
+ with warnings.catch_warnings():
+ # catch the DeprecationWarning so that it does not raise:
+ warnings.simplefilter("always", DeprecationWarning)
+ with pytest.raises(TypeError):
+ func([0., 1.], 0., interpolation="nearest", method="nearest")
+
+
+class TestMemEventHook(_DeprecationTestCase):
+ # Deprecated 2021-11-18, NumPy 1.23
+ def test_mem_seteventhook(self):
+ # The actual tests are within the C code in
+ # multiarray/_multiarray_tests.c.src
+ import numpy.core._multiarray_tests as ma_tests
+ with pytest.warns(DeprecationWarning,
+ match='PyDataMem_SetEventHook is deprecated'):
+ ma_tests.test_pydatamem_seteventhook_start()
+ # force an allocation and free of a numpy array
+ # needs to be larger then limit of small memory cacher in ctors.c
+ a = np.zeros(1000)
+ del a
+ break_cycles()
+ with pytest.warns(DeprecationWarning,
+ match='PyDataMem_SetEventHook is deprecated'):
+ ma_tests.test_pydatamem_seteventhook_end()
+
+
+class TestArrayFinalizeNone(_DeprecationTestCase):
+ message = "Setting __array_finalize__ = None"
+
+ def test_use_none_is_deprecated(self):
+ # Deprecated way that ndarray itself showed nothing needs finalizing.
+ class NoFinalize(np.ndarray):
+ __array_finalize__ = None
+
+ self.assert_deprecated(lambda: np.array(1).view(NoFinalize))
+
+class TestAxisNotMAXDIMS(_DeprecationTestCase):
+ # Deprecated 2022-01-08, NumPy 1.23
+ message = r"Using `axis=32` \(MAXDIMS\) is deprecated"
+
+ def test_deprecated(self):
+ a = np.zeros((1,)*32)
+ self.assert_deprecated(lambda: np.repeat(a, 1, axis=np.MAXDIMS))
+
+
+class TestLoadtxtParseIntsViaFloat(_DeprecationTestCase):
+ # Deprecated 2022-07-03, NumPy 1.23
+ # This test can be removed without replacement after the deprecation.
+ # The tests:
+ # * numpy/lib/tests/test_loadtxt.py::test_integer_signs
+ # * lib/tests/test_loadtxt.py::test_implicit_cast_float_to_int_fails
+ # Have a warning filter that needs to be removed.
+ message = r"loadtxt\(\): Parsing an integer via a float is deprecated.*"
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+ def test_deprecated_warning(self, dtype):
+ with pytest.warns(DeprecationWarning, match=self.message):
+ np.loadtxt(["10.5"], dtype=dtype)
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+ def test_deprecated_raised(self, dtype):
+ # The DeprecationWarning is chained when raised, so test manually:
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+ try:
+ np.loadtxt(["10.5"], dtype=dtype)
+ except ValueError as e:
+ assert isinstance(e.__cause__, DeprecationWarning)
+
+
+class TestPyIntConversion(_DeprecationTestCase):
+ message = r".*stop allowing conversion of out-of-bound.*"
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+ def test_deprecated_scalar(self, dtype):
+ dtype = np.dtype(dtype)
+ info = np.iinfo(dtype)
+
+ # Cover the most common creation paths (all end up in the
+ # same place):
+ def scalar(value, dtype):
+ dtype.type(value)
+
+ def assign(value, dtype):
+ arr = np.array([0, 0, 0], dtype=dtype)
+ arr[2] = value
+
+ def create(value, dtype):
+ np.array([value], dtype=dtype)
+
+ for creation_func in [scalar, assign, create]:
+ try:
+ self.assert_deprecated(
+ lambda: creation_func(info.min - 1, dtype))
+ except OverflowError:
+ pass # OverflowErrors always happened also before and are OK.
+
+ try:
+ self.assert_deprecated(
+ lambda: creation_func(info.max + 1, dtype))
+ except OverflowError:
+ pass # OverflowErrors always happened also before and are OK.
+
+
+class TestDeprecatedGlobals(_DeprecationTestCase):
+ # Deprecated 2022-11-17, NumPy 1.24
+ def test_type_aliases(self):
+ # from builtins
+ self.assert_deprecated(lambda: np.bool8)
+ self.assert_deprecated(lambda: np.int0)
+ self.assert_deprecated(lambda: np.uint0)
+ self.assert_deprecated(lambda: np.bytes0)
+ self.assert_deprecated(lambda: np.str0)
+ self.assert_deprecated(lambda: np.object0)
+
+
+@pytest.mark.parametrize("name",
+ ["bool", "long", "ulong", "str", "bytes", "object"])
+def test_future_scalar_attributes(name):
+ # FutureWarning added 2022-11-17, NumPy 1.24,
+ assert name not in dir(np) # we may want to not add them
+ with pytest.warns(FutureWarning,
+ match=f"In the future .*{name}"):
+ assert not hasattr(np, name)
+
+ # Unfortunately, they are currently still valid via `np.dtype()`
+ np.dtype(name)
+ name in np.sctypeDict
+
+
+# Ignore the above future attribute warning for this test.
+@pytest.mark.filterwarnings("ignore:In the future:FutureWarning")
+class TestRemovedGlobals:
+ # Removed 2023-01-12, NumPy 1.24.0
+ # Not a deprecation, but the large error was added to aid those who missed
+ # the previous deprecation, and should be removed similarly to one
+ # (or faster).
+ @pytest.mark.parametrize("name",
+ ["object", "bool", "float", "complex", "str", "int"])
+ def test_attributeerror_includes_info(self, name):
+ msg = f".*\n`np.{name}` was a deprecated alias for the builtin"
+ with pytest.raises(AttributeError, match=msg):
+ getattr(np, name)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_dlpack.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_dlpack.py
new file mode 100644
index 00000000..278bdd12
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_dlpack.py
@@ -0,0 +1,123 @@
+import sys
+import pytest
+
+import numpy as np
+from numpy.testing import assert_array_equal, IS_PYPY
+
+
+class TestDLPack:
+ @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
+ def test_dunder_dlpack_refcount(self):
+ x = np.arange(5)
+ y = x.__dlpack__()
+ assert sys.getrefcount(x) == 3
+ del y
+ assert sys.getrefcount(x) == 2
+
+ def test_dunder_dlpack_stream(self):
+ x = np.arange(5)
+ x.__dlpack__(stream=None)
+
+ with pytest.raises(RuntimeError):
+ x.__dlpack__(stream=1)
+
+ def test_strides_not_multiple_of_itemsize(self):
+ dt = np.dtype([('int', np.int32), ('char', np.int8)])
+ y = np.zeros((5,), dtype=dt)
+ z = y['int']
+
+ with pytest.raises(BufferError):
+ np.from_dlpack(z)
+
+ @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
+ def test_from_dlpack_refcount(self):
+ x = np.arange(5)
+ y = np.from_dlpack(x)
+ assert sys.getrefcount(x) == 3
+ del y
+ assert sys.getrefcount(x) == 2
+
+ @pytest.mark.parametrize("dtype", [
+ np.int8, np.int16, np.int32, np.int64,
+ np.uint8, np.uint16, np.uint32, np.uint64,
+ np.float16, np.float32, np.float64,
+ np.complex64, np.complex128
+ ])
+ def test_dtype_passthrough(self, dtype):
+ x = np.arange(5, dtype=dtype)
+ y = np.from_dlpack(x)
+
+ assert y.dtype == x.dtype
+ assert_array_equal(x, y)
+
+ def test_invalid_dtype(self):
+ x = np.asarray(np.datetime64('2021-05-27'))
+
+ with pytest.raises(BufferError):
+ np.from_dlpack(x)
+
+ def test_invalid_byte_swapping(self):
+ dt = np.dtype('=i8').newbyteorder()
+ x = np.arange(5, dtype=dt)
+
+ with pytest.raises(BufferError):
+ np.from_dlpack(x)
+
+ def test_non_contiguous(self):
+ x = np.arange(25).reshape((5, 5))
+
+ y1 = x[0]
+ assert_array_equal(y1, np.from_dlpack(y1))
+
+ y2 = x[:, 0]
+ assert_array_equal(y2, np.from_dlpack(y2))
+
+ y3 = x[1, :]
+ assert_array_equal(y3, np.from_dlpack(y3))
+
+ y4 = x[1]
+ assert_array_equal(y4, np.from_dlpack(y4))
+
+ y5 = np.diagonal(x).copy()
+ assert_array_equal(y5, np.from_dlpack(y5))
+
+ @pytest.mark.parametrize("ndim", range(33))
+ def test_higher_dims(self, ndim):
+ shape = (1,) * ndim
+ x = np.zeros(shape, dtype=np.float64)
+
+ assert shape == np.from_dlpack(x).shape
+
+ def test_dlpack_device(self):
+ x = np.arange(5)
+ assert x.__dlpack_device__() == (1, 0)
+ y = np.from_dlpack(x)
+ assert y.__dlpack_device__() == (1, 0)
+ z = y[::2]
+ assert z.__dlpack_device__() == (1, 0)
+
+ def dlpack_deleter_exception(self):
+ x = np.arange(5)
+ _ = x.__dlpack__()
+ raise RuntimeError
+
+ def test_dlpack_destructor_exception(self):
+ with pytest.raises(RuntimeError):
+ self.dlpack_deleter_exception()
+
+ def test_readonly(self):
+ x = np.arange(5)
+ x.flags.writeable = False
+ with pytest.raises(BufferError):
+ x.__dlpack__()
+
+ def test_ndim0(self):
+ x = np.array(1.0)
+ y = np.from_dlpack(x)
+ assert_array_equal(x, y)
+
+ def test_size1dims_arrays(self):
+ x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4),
+ buffer=np.ones(1000, dtype=np.uint8), order='F')
+ y = np.from_dlpack(x)
+ assert_array_equal(x, y)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_dtype.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_dtype.py
new file mode 100644
index 00000000..0a56483d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_dtype.py
@@ -0,0 +1,1827 @@
+import sys
+import operator
+import pytest
+import ctypes
+import gc
+import types
+from typing import Any
+
+import numpy as np
+from numpy.core._rational_tests import rational
+from numpy.core._multiarray_tests import create_custom_field_dtype
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT,
+ IS_PYSTON, _OLD_PROMOTION)
+from numpy.compat import pickle
+from itertools import permutations
+import random
+
+import hypothesis
+from hypothesis.extra import numpy as hynp
+
+
+
+def assert_dtype_equal(a, b):
+ assert_equal(a, b)
+ assert_equal(hash(a), hash(b),
+ "two equivalent types do not hash to the same value !")
+
+def assert_dtype_not_equal(a, b):
+ assert_(a != b)
+ assert_(hash(a) != hash(b),
+ "two different types hash to the same value !")
+
+class TestBuiltin:
+ @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
+ np.compat.unicode])
+ def test_run(self, t):
+ """Only test hash runs at all."""
+ dt = np.dtype(t)
+ hash(dt)
+
+ @pytest.mark.parametrize('t', [int, float])
+ def test_dtype(self, t):
+ # Make sure equivalent byte order char hash the same (e.g. < and = on
+ # little endian)
+ dt = np.dtype(t)
+ dt2 = dt.newbyteorder("<")
+ dt3 = dt.newbyteorder(">")
+ if dt == dt2:
+ assert_(dt.byteorder != dt2.byteorder, "bogus test")
+ assert_dtype_equal(dt, dt2)
+ else:
+ assert_(dt.byteorder != dt3.byteorder, "bogus test")
+ assert_dtype_equal(dt, dt3)
+
+ def test_equivalent_dtype_hashing(self):
+ # Make sure equivalent dtypes with different type num hash equal
+ uintp = np.dtype(np.uintp)
+ if uintp.itemsize == 4:
+ left = uintp
+ right = np.dtype(np.uint32)
+ else:
+ left = uintp
+ right = np.dtype(np.ulonglong)
+ assert_(left == right)
+ assert_(hash(left) == hash(right))
+
+ def test_invalid_types(self):
+ # Make sure invalid type strings raise an error
+
+ assert_raises(TypeError, np.dtype, 'O3')
+ assert_raises(TypeError, np.dtype, 'O5')
+ assert_raises(TypeError, np.dtype, 'O7')
+ assert_raises(TypeError, np.dtype, 'b3')
+ assert_raises(TypeError, np.dtype, 'h4')
+ assert_raises(TypeError, np.dtype, 'I5')
+ assert_raises(TypeError, np.dtype, 'e3')
+ assert_raises(TypeError, np.dtype, 'f5')
+
+ if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
+ assert_raises(TypeError, np.dtype, 'g12')
+ elif np.dtype('g').itemsize == 12:
+ assert_raises(TypeError, np.dtype, 'g16')
+
+ if np.dtype('l').itemsize == 8:
+ assert_raises(TypeError, np.dtype, 'l4')
+ assert_raises(TypeError, np.dtype, 'L4')
+ else:
+ assert_raises(TypeError, np.dtype, 'l8')
+ assert_raises(TypeError, np.dtype, 'L8')
+
+ if np.dtype('q').itemsize == 8:
+ assert_raises(TypeError, np.dtype, 'q4')
+ assert_raises(TypeError, np.dtype, 'Q4')
+ else:
+ assert_raises(TypeError, np.dtype, 'q8')
+ assert_raises(TypeError, np.dtype, 'Q8')
+
+ def test_richcompare_invalid_dtype_equality(self):
+ # Make sure objects that cannot be converted to valid
+ # dtypes results in False/True when compared to valid dtypes.
+ # Here 7 cannot be converted to dtype. No exceptions should be raised
+
+ assert not np.dtype(np.int32) == 7, "dtype richcompare failed for =="
+ assert np.dtype(np.int32) != 7, "dtype richcompare failed for !="
+
+ @pytest.mark.parametrize(
+ 'operation',
+ [operator.le, operator.lt, operator.ge, operator.gt])
+ def test_richcompare_invalid_dtype_comparison(self, operation):
+ # Make sure TypeError is raised for comparison operators
+ # for invalid dtypes. Here 7 is an invalid dtype.
+
+ with pytest.raises(TypeError):
+ operation(np.dtype(np.int32), 7)
+
+ @pytest.mark.parametrize("dtype",
+ ['Bool', 'Bytes0', 'Complex32', 'Complex64',
+ 'Datetime64', 'Float16', 'Float32', 'Float64',
+ 'Int8', 'Int16', 'Int32', 'Int64',
+ 'Object0', 'Str0', 'Timedelta64',
+ 'UInt8', 'UInt16', 'Uint32', 'UInt32',
+ 'Uint64', 'UInt64', 'Void0',
+ "Float128", "Complex128"])
+ def test_numeric_style_types_are_invalid(self, dtype):
+ with assert_raises(TypeError):
+ np.dtype(dtype)
+
+ def test_remaining_dtypes_with_bad_bytesize(self):
+ # The np.<name> aliases were deprecated, these probably should be too
+ assert np.dtype("int0") is np.dtype("intp")
+ assert np.dtype("uint0") is np.dtype("uintp")
+ assert np.dtype("bool8") is np.dtype("bool")
+ assert np.dtype("bytes0") is np.dtype("bytes")
+ assert np.dtype("str0") is np.dtype("str")
+ assert np.dtype("object0") is np.dtype("object")
+
+ @pytest.mark.parametrize(
+ 'value',
+ ['m8', 'M8', 'datetime64', 'timedelta64',
+ 'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
+ '>f', '<f', '=f', '|f',
+ ])
+ def test_dtype_bytes_str_equivalence(self, value):
+ bytes_value = value.encode('ascii')
+ from_bytes = np.dtype(bytes_value)
+ from_str = np.dtype(value)
+ assert_dtype_equal(from_bytes, from_str)
+
+ def test_dtype_from_bytes(self):
+ # Empty bytes object
+ assert_raises(TypeError, np.dtype, b'')
+ # Byte order indicator, but no type
+ assert_raises(TypeError, np.dtype, b'|')
+
+ # Single character with ordinal < NPY_NTYPES returns
+ # type by index into _builtin_descrs
+ assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
+ assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
+
+ # Single character where value is a valid type code
+ assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
+
+ # Bytes with non-ascii values raise errors
+ assert_raises(TypeError, np.dtype, b'\xff')
+ assert_raises(TypeError, np.dtype, b's\xff')
+
+ def test_bad_param(self):
+ # Can't give a size that's too small
+ assert_raises(ValueError, np.dtype,
+ {'names':['f0', 'f1'],
+ 'formats':['i4', 'i1'],
+ 'offsets':[0, 4],
+ 'itemsize':4})
+ # If alignment is enabled, the alignment (4) must divide the itemsize
+ assert_raises(ValueError, np.dtype,
+ {'names':['f0', 'f1'],
+ 'formats':['i4', 'i1'],
+ 'offsets':[0, 4],
+ 'itemsize':9}, align=True)
+ # If alignment is enabled, the individual fields must be aligned
+ assert_raises(ValueError, np.dtype,
+ {'names':['f0', 'f1'],
+ 'formats':['i1', 'f4'],
+ 'offsets':[0, 2]}, align=True)
+
+ def test_field_order_equality(self):
+ x = np.dtype({'names': ['A', 'B'],
+ 'formats': ['i4', 'f4'],
+ 'offsets': [0, 4]})
+ y = np.dtype({'names': ['B', 'A'],
+ 'formats': ['i4', 'f4'],
+ 'offsets': [4, 0]})
+ assert_equal(x == y, False)
+ # This is an safe cast (not equiv) due to the different names:
+ assert np.can_cast(x, y, casting="safe")
+
+
+class TestRecord:
+ def test_equivalent_record(self):
+ """Test whether equivalent record dtypes hash the same."""
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('yo', int)])
+ assert_dtype_equal(a, b)
+
+ def test_different_names(self):
+ # In theory, they may hash the same (collision) ?
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('ye', int)])
+ assert_dtype_not_equal(a, b)
+
+ def test_different_titles(self):
+ # In theory, they may hash the same (collision) ?
+ a = np.dtype({'names': ['r', 'b'],
+ 'formats': ['u1', 'u1'],
+ 'titles': ['Red pixel', 'Blue pixel']})
+ b = np.dtype({'names': ['r', 'b'],
+ 'formats': ['u1', 'u1'],
+ 'titles': ['RRed pixel', 'Blue pixel']})
+ assert_dtype_not_equal(a, b)
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_refcount_dictionary_setting(self):
+ names = ["name1"]
+ formats = ["f8"]
+ titles = ["t1"]
+ offsets = [0]
+ d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
+ refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
+ np.dtype(d)
+ refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
+ assert refcounts == refcounts_new
+
+ def test_mutate(self):
+ # Mutating a dtype should reset the cached hash value.
+ # NOTE: Mutating should be deprecated, but new API added to replace it.
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('yo', int)])
+ c = np.dtype([('ye', int)])
+ assert_dtype_equal(a, b)
+ assert_dtype_not_equal(a, c)
+ a.names = ['ye']
+ assert_dtype_equal(a, c)
+ assert_dtype_not_equal(a, b)
+ state = b.__reduce__()[2]
+ a.__setstate__(state)
+ assert_dtype_equal(a, b)
+ assert_dtype_not_equal(a, c)
+
+ def test_mutate_error(self):
+ # NOTE: Mutating should be deprecated, but new API added to replace it.
+ a = np.dtype("i,i")
+
+ with pytest.raises(ValueError, match="must replace all names at once"):
+ a.names = ["f0"]
+
+ with pytest.raises(ValueError, match=".*and not string"):
+ a.names = ["f0", b"not a unicode name"]
+
+ def test_not_lists(self):
+ """Test if an appropriate exception is raised when passing bad values to
+ the dtype constructor.
+ """
+ assert_raises(TypeError, np.dtype,
+ dict(names={'A', 'B'}, formats=['f8', 'i4']))
+ assert_raises(TypeError, np.dtype,
+ dict(names=['A', 'B'], formats={'f8', 'i4'}))
+
+ def test_aligned_size(self):
+ # Check that structured dtypes get padded to an aligned size
+ dt = np.dtype('i4, i1', align=True)
+ assert_equal(dt.itemsize, 8)
+ dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
+ assert_equal(dt.itemsize, 8)
+ dt = np.dtype({'names':['f0', 'f1'],
+ 'formats':['i4', 'u1'],
+ 'offsets':[0, 4]}, align=True)
+ assert_equal(dt.itemsize, 8)
+ dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
+ assert_equal(dt.itemsize, 8)
+ # Nesting should preserve that alignment
+ dt1 = np.dtype([('f0', 'i4'),
+ ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
+ ('f2', 'i1')], align=True)
+ assert_equal(dt1.itemsize, 20)
+ dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
+ 'formats':['i4',
+ [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
+ 'i1'],
+ 'offsets':[0, 4, 16]}, align=True)
+ assert_equal(dt2.itemsize, 20)
+ dt3 = np.dtype({'f0': ('i4', 0),
+ 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
+ 'f2': ('i1', 16)}, align=True)
+ assert_equal(dt3.itemsize, 20)
+ assert_equal(dt1, dt2)
+ assert_equal(dt2, dt3)
+ # Nesting should preserve packing
+ dt1 = np.dtype([('f0', 'i4'),
+ ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
+ ('f2', 'i1')], align=False)
+ assert_equal(dt1.itemsize, 11)
+ dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
+ 'formats':['i4',
+ [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
+ 'i1'],
+ 'offsets':[0, 4, 10]}, align=False)
+ assert_equal(dt2.itemsize, 11)
+ dt3 = np.dtype({'f0': ('i4', 0),
+ 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
+ 'f2': ('i1', 10)}, align=False)
+ assert_equal(dt3.itemsize, 11)
+ assert_equal(dt1, dt2)
+ assert_equal(dt2, dt3)
+ # Array of subtype should preserve alignment
+ dt1 = np.dtype([('a', '|i1'),
+ ('b', [('f0', '<i2'),
+ ('f1', '<f4')], 2)], align=True)
+ assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
+ ('b', [('f0', '<i2'), ('', '|V2'),
+ ('f1', '<f4')], (2,))])
+
+ def test_union_struct(self):
+ # Should be able to create union dtypes
+ dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
+ 'offsets':[0, 0, 2]}, align=True)
+ assert_equal(dt.itemsize, 4)
+ a = np.array([3], dtype='<u4').view(dt)
+ a['f1'] = 10
+ a['f2'] = 36
+ assert_equal(a['f0'], 10 + 36*256*256)
+ # Should be able to specify fields out of order
+ dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
+ 'offsets':[4, 0, 2]}, align=True)
+ assert_equal(dt.itemsize, 8)
+ # field name should not matter: assignment is by position
+ dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
+ 'formats':['<u4', '<u2', '<u2'],
+ 'offsets':[4, 0, 2]}, align=True)
+ vals = [(0, 1, 2), (3, 2**15-1, 4)]
+ vals2 = [(0, 1, 2), (3, 2**15-1, 4)]
+ a = np.array(vals, dt)
+ b = np.array(vals2, dt2)
+ assert_equal(a.astype(dt2), b)
+ assert_equal(b.astype(dt), a)
+ assert_equal(a.view(dt2), b)
+ assert_equal(b.view(dt), a)
+ # Should not be able to overlap objects with other types
+ assert_raises(TypeError, np.dtype,
+ {'names':['f0', 'f1'],
+ 'formats':['O', 'i1'],
+ 'offsets':[0, 2]})
+ assert_raises(TypeError, np.dtype,
+ {'names':['f0', 'f1'],
+ 'formats':['i4', 'O'],
+ 'offsets':[0, 3]})
+ assert_raises(TypeError, np.dtype,
+ {'names':['f0', 'f1'],
+ 'formats':[[('a', 'O')], 'i1'],
+ 'offsets':[0, 2]})
+ assert_raises(TypeError, np.dtype,
+ {'names':['f0', 'f1'],
+ 'formats':['i4', [('a', 'O')]],
+ 'offsets':[0, 3]})
+ # Out of order should still be ok, however
+ dt = np.dtype({'names':['f0', 'f1'],
+ 'formats':['i1', 'O'],
+ 'offsets':[np.dtype('intp').itemsize, 0]})
+
+ @pytest.mark.parametrize(["obj", "dtype", "expected"],
+ [([], ("(2)f4,"), np.empty((0, 2), dtype="f4")),
+ (3, "(3)f4,", [3, 3, 3]),
+ (np.float64(2), "(2)f4,", [2, 2]),
+ ([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),
+ (["1", "2"], "(2)i,", None)])
+ def test_subarray_list(self, obj, dtype, expected):
+ dtype = np.dtype(dtype)
+ res = np.array(obj, dtype=dtype)
+
+ if expected is None:
+ # iterate the 1-d list to fill the array
+ expected = np.empty(len(obj), dtype=dtype)
+ for i in range(len(expected)):
+ expected[i] = obj[i]
+
+ assert_array_equal(res, expected)
+
+ def test_comma_datetime(self):
+ dt = np.dtype('M8[D],datetime64[Y],i8')
+ assert_equal(dt, np.dtype([('f0', 'M8[D]'),
+ ('f1', 'datetime64[Y]'),
+ ('f2', 'i8')]))
+
+ def test_from_dictproxy(self):
+ # Tests for PR #5920
+ dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
+ assert_dtype_equal(dt, np.dtype(dt.fields))
+ dt2 = np.dtype((np.void, dt.fields))
+ assert_equal(dt2.fields, dt.fields)
+
+ def test_from_dict_with_zero_width_field(self):
+ # Regression test for #6430 / #2196
+ dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
+ dt2 = np.dtype({'names': ['val1', 'val2'],
+ 'formats': [(np.float32, (0,)), int]})
+
+ assert_dtype_equal(dt, dt2)
+ assert_equal(dt.fields['val1'][0].itemsize, 0)
+ assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
+
+ def test_bool_commastring(self):
+ d = np.dtype('?,?,?') # raises?
+ assert_equal(len(d.names), 3)
+ for n in d.names:
+ assert_equal(d.fields[n][0], np.dtype('?'))
+
+ def test_nonint_offsets(self):
+ # gh-8059
+ def make_dtype(off):
+ return np.dtype({'names': ['A'], 'formats': ['i4'],
+ 'offsets': [off]})
+
+ assert_raises(TypeError, make_dtype, 'ASD')
+ assert_raises(OverflowError, make_dtype, 2**70)
+ assert_raises(TypeError, make_dtype, 2.3)
+ assert_raises(ValueError, make_dtype, -10)
+
+ # no errors here:
+ dt = make_dtype(np.uint32(0))
+ np.zeros(1, dtype=dt)[0].item()
+
+ def test_fields_by_index(self):
+ dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])
+ assert_dtype_equal(dt[0], np.dtype(np.int8))
+ assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))
+ assert_dtype_equal(dt[-1], dt[1])
+ assert_dtype_equal(dt[-2], dt[0])
+ assert_raises(IndexError, lambda: dt[-3])
+
+ assert_raises(TypeError, operator.getitem, dt, 3.0)
+
+ assert_equal(dt[1], dt[np.int8(1)])
+
+ @pytest.mark.parametrize('align_flag',[False, True])
+ def test_multifield_index(self, align_flag):
+ # indexing with a list produces subfields
+ # the align flag should be preserved
+ dt = np.dtype([
+ (('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')
+ ], align=align_flag)
+
+ dt_sub = dt[['B', 'col1']]
+ assert_equal(
+ dt_sub,
+ np.dtype({
+ 'names': ['B', 'col1'],
+ 'formats': ['<f8', '<U20'],
+ 'offsets': [88, 0],
+ 'titles': [None, 'title'],
+ 'itemsize': 96
+ })
+ )
+ assert_equal(dt_sub.isalignedstruct, align_flag)
+
+ dt_sub = dt[['B']]
+ assert_equal(
+ dt_sub,
+ np.dtype({
+ 'names': ['B'],
+ 'formats': ['<f8'],
+ 'offsets': [88],
+ 'itemsize': 96
+ })
+ )
+ assert_equal(dt_sub.isalignedstruct, align_flag)
+
+ dt_sub = dt[[]]
+ assert_equal(
+ dt_sub,
+ np.dtype({
+ 'names': [],
+ 'formats': [],
+ 'offsets': [],
+ 'itemsize': 96
+ })
+ )
+ assert_equal(dt_sub.isalignedstruct, align_flag)
+
+ assert_raises(TypeError, operator.getitem, dt, ())
+ assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])
+ assert_raises(TypeError, operator.getitem, dt, ['col1', 2])
+ assert_raises(KeyError, operator.getitem, dt, ['fake'])
+ assert_raises(KeyError, operator.getitem, dt, ['title'])
+ assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])
+
+ def test_partial_dict(self):
+ # 'names' is missing
+ assert_raises(ValueError, np.dtype,
+ {'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
+
+ def test_fieldless_views(self):
+ a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
+ 'itemsize':8})
+ assert_raises(ValueError, a.view, np.dtype([]))
+
+ d = np.dtype((np.dtype([]), 10))
+ assert_equal(d.shape, (10,))
+ assert_equal(d.itemsize, 0)
+ assert_equal(d.base, np.dtype([]))
+
+ arr = np.fromiter((() for i in range(10)), [])
+ assert_equal(arr.dtype, np.dtype([]))
+ assert_raises(ValueError, np.frombuffer, b'', dtype=[])
+ assert_equal(np.frombuffer(b'', dtype=[], count=2),
+ np.empty(2, dtype=[]))
+
+ assert_raises(ValueError, np.dtype, ([], 'f8'))
+ assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
+
+ assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
+ np.ones(2, dtype=bool))
+
+ assert_equal(np.zeros((1, 2), dtype=[]) == a,
+ np.ones((1, 2), dtype=bool))
+
+
+class TestSubarray:
+ def test_single_subarray(self):
+ a = np.dtype((int, (2)))
+ b = np.dtype((int, (2,)))
+ assert_dtype_equal(a, b)
+
+ assert_equal(type(a.subdtype[1]), tuple)
+ assert_equal(type(b.subdtype[1]), tuple)
+
+ def test_equivalent_record(self):
+ """Test whether equivalent subarray dtypes hash the same."""
+ a = np.dtype((int, (2, 3)))
+ b = np.dtype((int, (2, 3)))
+ assert_dtype_equal(a, b)
+
+ def test_nonequivalent_record(self):
+ """Test whether different subarray dtypes hash differently."""
+ a = np.dtype((int, (2, 3)))
+ b = np.dtype((int, (3, 2)))
+ assert_dtype_not_equal(a, b)
+
+ a = np.dtype((int, (2, 3)))
+ b = np.dtype((int, (2, 2)))
+ assert_dtype_not_equal(a, b)
+
+ a = np.dtype((int, (1, 2, 3)))
+ b = np.dtype((int, (1, 2)))
+ assert_dtype_not_equal(a, b)
+
+ def test_shape_equal(self):
+ """Test some data types that are equal"""
+ assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
+ # FutureWarning during deprecation period; after it is passed this
+ # should instead check that "(1)f8" == "1f8" == ("f8", 1).
+ with pytest.warns(FutureWarning):
+ assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
+ assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
+ assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
+ d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
+ assert_dtype_equal(np.dtype(d), np.dtype(d))
+
+ def test_shape_simple(self):
+ """Test some simple cases that shouldn't be equal"""
+ assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))
+ assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))
+ assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))
+
+ def test_shape_monster(self):
+ """Test some more complicated cases that shouldn't be equal"""
+ assert_dtype_not_equal(
+ np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
+ np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))
+ assert_dtype_not_equal(
+ np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
+ np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))
+ assert_dtype_not_equal(
+ np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
+ np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))
+ assert_dtype_not_equal(
+ np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
+ np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))
+
+ def test_shape_sequence(self):
+ # Any sequence of integers should work as shape, but the result
+ # should be a tuple (immutable) of base type integers.
+ a = np.array([1, 2, 3], dtype=np.int16)
+ l = [1, 2, 3]
+ # Array gets converted
+ dt = np.dtype([('a', 'f4', a)])
+ assert_(isinstance(dt['a'].shape, tuple))
+ assert_(isinstance(dt['a'].shape[0], int))
+ # List gets converted
+ dt = np.dtype([('a', 'f4', l)])
+ assert_(isinstance(dt['a'].shape, tuple))
+ #
+
+ class IntLike:
+ def __index__(self):
+ return 3
+
+ def __int__(self):
+ # (a PyNumber_Check fails without __int__)
+ return 3
+
+ dt = np.dtype([('a', 'f4', IntLike())])
+ assert_(isinstance(dt['a'].shape, tuple))
+ assert_(isinstance(dt['a'].shape[0], int))
+ dt = np.dtype([('a', 'f4', (IntLike(),))])
+ assert_(isinstance(dt['a'].shape, tuple))
+ assert_(isinstance(dt['a'].shape[0], int))
+
+ def test_shape_matches_ndim(self):
+ dt = np.dtype([('a', 'f4', ())])
+ assert_equal(dt['a'].shape, ())
+ assert_equal(dt['a'].ndim, 0)
+
+ dt = np.dtype([('a', 'f4')])
+ assert_equal(dt['a'].shape, ())
+ assert_equal(dt['a'].ndim, 0)
+
+ dt = np.dtype([('a', 'f4', 4)])
+ assert_equal(dt['a'].shape, (4,))
+ assert_equal(dt['a'].ndim, 1)
+
+ dt = np.dtype([('a', 'f4', (1, 2, 3))])
+ assert_equal(dt['a'].shape, (1, 2, 3))
+ assert_equal(dt['a'].ndim, 3)
+
+ def test_shape_invalid(self):
+ # Check that the shape is valid.
+ max_int = np.iinfo(np.intc).max
+ max_intp = np.iinfo(np.intp).max
+ # Too large values (the datatype is part of this)
+ assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
+ assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
+ assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
+ # Takes a different code path (fails earlier:
+ assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
+ # Negative values
+ assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
+ assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
+
+ def test_alignment(self):
+ #Check that subarrays are aligned
+ t1 = np.dtype('(1,)i4', align=True)
+ t2 = np.dtype('2i4', align=True)
+ assert_equal(t1.alignment, t2.alignment)
+
+ def test_aligned_empty(self):
+ # Mainly regression test for gh-19696: construction failed completely
+ dt = np.dtype([], align=True)
+ assert dt == np.dtype([])
+ dt = np.dtype({"names": [], "formats": [], "itemsize": 0}, align=True)
+ assert dt == np.dtype([])
+
+ def test_subarray_base_item(self):
+ arr = np.ones(3, dtype=[("f", "i", 3)])
+ # Extracting the field "absorbs" the subarray into a view:
+ assert arr["f"].base is arr
+ # Extract the structured item, and then check the tuple component:
+ item = arr.item(0)
+ assert type(item) is tuple and len(item) == 1
+ assert item[0].base is arr
+
+ def test_subarray_cast_copies(self):
+ # Older versions of NumPy did NOT copy, but they got the ownership
+ # wrong (not actually knowing the correct base!). Versions since 1.21
+ # (I think) crashed fairly reliable. This defines the correct behavior
+ # as a copy. Keeping the ownership would be possible (but harder)
+ arr = np.ones(3, dtype=[("f", "i", 3)])
+ cast = arr.astype(object)
+ for fields in cast:
+ assert type(fields) == tuple and len(fields) == 1
+ subarr = fields[0]
+ assert subarr.base is None
+ assert subarr.flags.owndata
+
+
+def iter_struct_object_dtypes():
+ """
+ Iterates over a few complex dtypes and object pattern which
+ fill the array with a given object (defaults to a singleton).
+
+ Yields
+ ------
+ dtype : dtype
+ pattern : tuple
+ Structured tuple for use with `np.array`.
+ count : int
+ Number of objects stored in the dtype.
+ singleton : object
+ A singleton object. The returned pattern is constructed so that
+ all objects inside the datatype are set to the singleton.
+ """
+ obj = object()
+
+ dt = np.dtype([('b', 'O', (2, 3))])
+ p = ([[obj] * 3] * 2,)
+ yield pytest.param(dt, p, 6, obj, id="<subarray>")
+
+ dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
+ p = (0, [[obj] * 3] * 2)
+ yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
+
+ dt = np.dtype([('a', 'i4'),
+ ('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
+ p = (0, [[(obj, 0)] * 3] * 2)
+ yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
+
+ dt = np.dtype([('a', 'i4'),
+ ('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
+ p = (0, [[(obj, obj)] * 3] * 2)
+ yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+class TestStructuredObjectRefcounting:
+ """These tests cover various uses of complicated structured types which
+ include objects and thus require reference counting.
+ """
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+ iter_struct_object_dtypes())
+ @pytest.mark.parametrize(["creation_func", "creation_obj"], [
+ pytest.param(np.empty, None,
+ # None is probably used for too many things
+ marks=pytest.mark.skip("unreliable due to python's behaviour")),
+ (np.ones, 1),
+ (np.zeros, 0)])
+ def test_structured_object_create_delete(self, dt, pat, count, singleton,
+ creation_func, creation_obj):
+ """Structured object reference counting in creation and deletion"""
+ # The test assumes that 0, 1, and None are singletons.
+ gc.collect()
+ before = sys.getrefcount(creation_obj)
+ arr = creation_func(3, dt)
+
+ now = sys.getrefcount(creation_obj)
+ assert now - before == count * 3
+ del arr
+ now = sys.getrefcount(creation_obj)
+ assert now == before
+
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+ iter_struct_object_dtypes())
+ def test_structured_object_item_setting(self, dt, pat, count, singleton):
+ """Structured object reference counting for simple item setting"""
+ one = 1
+
+ gc.collect()
+ before = sys.getrefcount(singleton)
+ arr = np.array([pat] * 3, dt)
+ assert sys.getrefcount(singleton) - before == count * 3
+ # Fill with `1` and check that it was replaced correctly:
+ before2 = sys.getrefcount(one)
+ arr[...] = one
+ after2 = sys.getrefcount(one)
+ assert after2 - before2 == count * 3
+ del arr
+ gc.collect()
+ assert sys.getrefcount(one) == before2
+ assert sys.getrefcount(singleton) == before
+
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+ iter_struct_object_dtypes())
+ @pytest.mark.parametrize(
+ ['shape', 'index', 'items_changed'],
+ [((3,), ([0, 2],), 2),
+ ((3, 2), ([0, 2], slice(None)), 4),
+ ((3, 2), ([0, 2], [1]), 2),
+ ((3,), ([True, False, True]), 2)])
+ def test_structured_object_indexing(self, shape, index, items_changed,
+ dt, pat, count, singleton):
+ """Structured object reference counting for advanced indexing."""
+ # Use two small negative values (should be singletons, but less likely
+ # to run into race-conditions). This failed in some threaded envs
+ # When using 0 and 1. If it fails again, should remove all explicit
+ # checks, and rely on `pytest-leaks` reference count checker only.
+ val0 = -4
+ val1 = -5
+
+ arr = np.full(shape, val0, dt)
+
+ gc.collect()
+ before_val0 = sys.getrefcount(val0)
+ before_val1 = sys.getrefcount(val1)
+ # Test item getting:
+ part = arr[index]
+ after_val0 = sys.getrefcount(val0)
+ assert after_val0 - before_val0 == count * items_changed
+ del part
+ # Test item setting:
+ arr[index] = val1
+ gc.collect()
+ after_val0 = sys.getrefcount(val0)
+ after_val1 = sys.getrefcount(val1)
+ assert before_val0 - after_val0 == count * items_changed
+ assert after_val1 - before_val1 == count * items_changed
+
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+ iter_struct_object_dtypes())
+ def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
+ """Structured object reference counting for specialized functions.
+ The older functions such as take and repeat use different code paths
+ then item setting (when writing this).
+ """
+ indices = [0, 1]
+
+ arr = np.array([pat] * 3, dt)
+ gc.collect()
+ before = sys.getrefcount(singleton)
+ res = arr.take(indices)
+ after = sys.getrefcount(singleton)
+ assert after - before == count * 2
+ new = res.repeat(10)
+ gc.collect()
+ after_repeat = sys.getrefcount(singleton)
+ assert after_repeat - after == count * 2 * 10
+
+
+class TestStructuredDtypeSparseFields:
+ """Tests subarray fields which contain sparse dtypes so that
+ not all memory is used by the dtype work. Such dtype's should
+ leave the underlying memory unchanged.
+ """
+ dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
+ 'offsets':[0, 4]}, (2, 3))])
+ sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
+ 'offsets':[4]}, (2, 3))])
+
+ def test_sparse_field_assignment(self):
+ arr = np.zeros(3, self.dtype)
+ sparse_arr = arr.view(self.sparse_dtype)
+
+ sparse_arr[...] = np.finfo(np.float32).max
+ # dtype is reduced when accessing the field, so shape is (3, 2, 3):
+ assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
+
+ def test_sparse_field_assignment_fancy(self):
+ # Fancy assignment goes to the copyswap function for complex types:
+ arr = np.zeros(3, self.dtype)
+ sparse_arr = arr.view(self.sparse_dtype)
+
+ sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
+ # dtype is reduced when accessing the field, so shape is (3, 2, 3):
+ assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
+
+
+class TestMonsterType:
+ """Test deeply nested subtypes."""
+
+ def test1(self):
+ simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
+ 'titles': ['Red pixel', 'Blue pixel']})
+ a = np.dtype([('yo', int), ('ye', simple1),
+ ('yi', np.dtype((int, (3, 2))))])
+ b = np.dtype([('yo', int), ('ye', simple1),
+ ('yi', np.dtype((int, (3, 2))))])
+ assert_dtype_equal(a, b)
+
+ c = np.dtype([('yo', int), ('ye', simple1),
+ ('yi', np.dtype((a, (3, 2))))])
+ d = np.dtype([('yo', int), ('ye', simple1),
+ ('yi', np.dtype((a, (3, 2))))])
+ assert_dtype_equal(c, d)
+
+ @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
+ def test_list_recursion(self):
+ l = list()
+ l.append(('f', l))
+ with pytest.raises(RecursionError):
+ np.dtype(l)
+
+ @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
+ def test_tuple_recursion(self):
+ d = np.int32
+ for i in range(100000):
+ d = (d, (1,))
+ with pytest.raises(RecursionError):
+ np.dtype(d)
+
+ @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
+ def test_dict_recursion(self):
+ d = dict(names=['self'], formats=[None], offsets=[0])
+ d['formats'][0] = d
+ with pytest.raises(RecursionError):
+ np.dtype(d)
+
+
+class TestMetadata:
+ def test_no_metadata(self):
+ d = np.dtype(int)
+ assert_(d.metadata is None)
+
+ def test_metadata_takes_dict(self):
+ d = np.dtype(int, metadata={'datum': 1})
+ assert_(d.metadata == {'datum': 1})
+
+ def test_metadata_rejects_nondict(self):
+ assert_raises(TypeError, np.dtype, int, metadata='datum')
+ assert_raises(TypeError, np.dtype, int, metadata=1)
+ assert_raises(TypeError, np.dtype, int, metadata=None)
+
+ def test_nested_metadata(self):
+ d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
+ assert_(d['a'].metadata == {'datum': 1})
+
+ def test_base_metadata_copied(self):
+ d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
+ assert_(d.metadata == {'datum': 1})
+
+class TestString:
+ def test_complex_dtype_str(self):
+ dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
+ ('rtile', '>f4', (64, 36))], (3,)),
+ ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
+ ('bright', '>f4', (8, 36))])])
+ assert_equal(str(dt),
+ "[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
+ "('rtile', '>f4', (64, 36))], (3,)), "
+ "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
+ "('bright', '>f4', (8, 36))])]")
+
+ # If the sticky aligned flag is set to True, it makes the
+ # str() function use a dict representation with an 'aligned' flag
+ dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
+ ('rtile', '>f4', (64, 36))],
+ (3,)),
+ ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
+ ('bright', '>f4', (8, 36))])],
+ align=True)
+ assert_equal(str(dt),
+ "{'names': ['top', 'bottom'],"
+ " 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), "
+ "('rtile', '>f4', (64, 36))], (3,)), "
+ "[('bleft', ('>f4', (8, 64)), (1,)), "
+ "('bright', '>f4', (8, 36))]],"
+ " 'offsets': [0, 76800],"
+ " 'itemsize': 80000,"
+ " 'aligned': True}")
+ with np.printoptions(legacy='1.21'):
+ assert_equal(str(dt),
+ "{'names':['top','bottom'], "
+ "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
+ "('rtile', '>f4', (64, 36))], (3,)),"
+ "[('bleft', ('>f4', (8, 64)), (1,)), "
+ "('bright', '>f4', (8, 36))]], "
+ "'offsets':[0,76800], "
+ "'itemsize':80000, "
+ "'aligned':True}")
+ assert_equal(np.dtype(eval(str(dt))), dt)
+
+ dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
+ 'offsets': [0, 1, 2],
+ 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
+ assert_equal(str(dt),
+ "[(('Red pixel', 'r'), 'u1'), "
+ "(('Green pixel', 'g'), 'u1'), "
+ "(('Blue pixel', 'b'), 'u1')]")
+
+ dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
+ 'formats': ['<u4', 'u1', 'u1', 'u1'],
+ 'offsets': [0, 0, 1, 2],
+ 'titles': ['Color', 'Red pixel',
+ 'Green pixel', 'Blue pixel']})
+ assert_equal(str(dt),
+ "{'names': ['rgba', 'r', 'g', 'b'],"
+ " 'formats': ['<u4', 'u1', 'u1', 'u1'],"
+ " 'offsets': [0, 0, 1, 2],"
+ " 'titles': ['Color', 'Red pixel', "
+ "'Green pixel', 'Blue pixel'],"
+ " 'itemsize': 4}")
+
+ dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
+ 'offsets': [0, 2],
+ 'titles': ['Red pixel', 'Blue pixel']})
+ assert_equal(str(dt),
+ "{'names': ['r', 'b'],"
+ " 'formats': ['u1', 'u1'],"
+ " 'offsets': [0, 2],"
+ " 'titles': ['Red pixel', 'Blue pixel'],"
+ " 'itemsize': 3}")
+
+ dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
+ assert_equal(str(dt),
+ "[('a', '<m8[D]'), ('b', '<M8[us]')]")
+
+ def test_repr_structured(self):
+ dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
+ ('rtile', '>f4', (64, 36))], (3,)),
+ ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
+ ('bright', '>f4', (8, 36))])])
+ assert_equal(repr(dt),
+ "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
+ "('rtile', '>f4', (64, 36))], (3,)), "
+ "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
+ "('bright', '>f4', (8, 36))])])")
+
+ dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
+ 'offsets': [0, 1, 2],
+ 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
+ align=True)
+ assert_equal(repr(dt),
+ "dtype([(('Red pixel', 'r'), 'u1'), "
+ "(('Green pixel', 'g'), 'u1'), "
+ "(('Blue pixel', 'b'), 'u1')], align=True)")
+
+ def test_repr_structured_not_packed(self):
+ dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
+ 'formats': ['<u4', 'u1', 'u1', 'u1'],
+ 'offsets': [0, 0, 1, 2],
+ 'titles': ['Color', 'Red pixel',
+ 'Green pixel', 'Blue pixel']}, align=True)
+ assert_equal(repr(dt),
+ "dtype({'names': ['rgba', 'r', 'g', 'b'],"
+ " 'formats': ['<u4', 'u1', 'u1', 'u1'],"
+ " 'offsets': [0, 0, 1, 2],"
+ " 'titles': ['Color', 'Red pixel', "
+ "'Green pixel', 'Blue pixel'],"
+ " 'itemsize': 4}, align=True)")
+
+ dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
+ 'offsets': [0, 2],
+ 'titles': ['Red pixel', 'Blue pixel'],
+ 'itemsize': 4})
+ assert_equal(repr(dt),
+ "dtype({'names': ['r', 'b'], "
+ "'formats': ['u1', 'u1'], "
+ "'offsets': [0, 2], "
+ "'titles': ['Red pixel', 'Blue pixel'], "
+ "'itemsize': 4})")
+
+ def test_repr_structured_datetime(self):
+ dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
+ assert_equal(repr(dt),
+ "dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
+
+ def test_repr_str_subarray(self):
+ dt = np.dtype(('<i2', (1,)))
+ assert_equal(repr(dt), "dtype(('<i2', (1,)))")
+ assert_equal(str(dt), "('<i2', (1,))")
+
+ def test_base_dtype_with_object_type(self):
+ # Issue gh-2798, should not error.
+ np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
+
+ def test_empty_string_to_object(self):
+ # Pull request #4722
+ np.array(["", ""]).astype(object)
+
+ def test_void_subclass_unsized(self):
+ dt = np.dtype(np.record)
+ assert_equal(repr(dt), "dtype('V')")
+ assert_equal(str(dt), '|V0')
+ assert_equal(dt.name, 'record')
+
+ def test_void_subclass_sized(self):
+ dt = np.dtype((np.record, 2))
+ assert_equal(repr(dt), "dtype('V2')")
+ assert_equal(str(dt), '|V2')
+ assert_equal(dt.name, 'record16')
+
+ def test_void_subclass_fields(self):
+ dt = np.dtype((np.record, [('a', '<u2')]))
+ assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
+ assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
+ assert_equal(dt.name, 'record16')
+
+
+class TestDtypeAttributeDeletion:
+
+ def test_dtype_non_writable_attributes_deletion(self):
+ dt = np.dtype(np.double)
+ attr = ["subdtype", "descr", "str", "name", "base", "shape",
+ "isbuiltin", "isnative", "isalignedstruct", "fields",
+ "metadata", "hasobject"]
+
+ for s in attr:
+ assert_raises(AttributeError, delattr, dt, s)
+
+ def test_dtype_writable_attributes_deletion(self):
+ dt = np.dtype(np.double)
+ attr = ["names"]
+ for s in attr:
+ assert_raises(AttributeError, delattr, dt, s)
+
+
+class TestDtypeAttributes:
+ def test_descr_has_trailing_void(self):
+ # see gh-6359
+ dtype = np.dtype({
+ 'names': ['A', 'B'],
+ 'formats': ['f4', 'f4'],
+ 'offsets': [0, 8],
+ 'itemsize': 16})
+ new_dtype = np.dtype(dtype.descr)
+ assert_equal(new_dtype.itemsize, 16)
+
+ def test_name_dtype_subclass(self):
+ # Ticket #4357
+ class user_def_subcls(np.void):
+ pass
+ assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
+
+ def test_zero_stride(self):
+ arr = np.ones(1, dtype="i8")
+ arr = np.broadcast_to(arr, 10)
+ assert arr.strides == (0,)
+ with pytest.raises(ValueError):
+ arr.dtype = "i1"
+
+class TestDTypeMakeCanonical:
+ def check_canonical(self, dtype, canonical):
+ """
+ Check most properties relevant to "canonical" versions of a dtype,
+ which is mainly native byte order for datatypes supporting this.
+
+ The main work is checking structured dtypes with fields, where we
+ reproduce most the actual logic used in the C-code.
+ """
+ assert type(dtype) is type(canonical)
+
+ # a canonical DType should always have equivalent casting (both ways)
+ assert np.can_cast(dtype, canonical, casting="equiv")
+ assert np.can_cast(canonical, dtype, casting="equiv")
+ # a canonical dtype (and its fields) is always native (checks fields):
+ assert canonical.isnative
+
+ # Check that canonical of canonical is the same (no casting):
+ assert np.result_type(canonical) == canonical
+
+ if not dtype.names:
+ # The flags currently never change for unstructured dtypes
+ assert dtype.flags == canonical.flags
+ return
+
+ # Must have all the needs API flag set:
+ assert dtype.flags & 0b10000
+
+ # Check that the fields are identical (including titles):
+ assert dtype.fields.keys() == canonical.fields.keys()
+
+ def aligned_offset(offset, alignment):
+ # round up offset:
+ return - (-offset // alignment) * alignment
+
+ totalsize = 0
+ max_alignment = 1
+ for name in dtype.names:
+ # each field is also canonical:
+ new_field_descr = canonical.fields[name][0]
+ self.check_canonical(dtype.fields[name][0], new_field_descr)
+
+ # Must have the "inherited" object related flags:
+ expected = 0b11011 & new_field_descr.flags
+ assert (canonical.flags & expected) == expected
+
+ if canonical.isalignedstruct:
+ totalsize = aligned_offset(totalsize, new_field_descr.alignment)
+ max_alignment = max(new_field_descr.alignment, max_alignment)
+
+ assert canonical.fields[name][1] == totalsize
+ # if a title exists, they must match (otherwise empty tuple):
+ assert dtype.fields[name][2:] == canonical.fields[name][2:]
+
+ totalsize += new_field_descr.itemsize
+
+ if canonical.isalignedstruct:
+ totalsize = aligned_offset(totalsize, max_alignment)
+ assert canonical.itemsize == totalsize
+ assert canonical.alignment == max_alignment
+
+ def test_simple(self):
+ dt = np.dtype(">i4")
+ assert np.result_type(dt).isnative
+ assert np.result_type(dt).num == dt.num
+
+ # dtype with empty space:
+ struct_dt = np.dtype(">i4,<i1,i8,V3")[["f0", "f2"]]
+ canonical = np.result_type(struct_dt)
+ assert canonical.itemsize == 4+8
+ assert canonical.isnative
+
+ # aligned struct dtype with empty space:
+ struct_dt = np.dtype(">i1,<i4,i8,V3", align=True)[["f0", "f2"]]
+ canonical = np.result_type(struct_dt)
+ assert canonical.isalignedstruct
+ assert canonical.itemsize == np.dtype("i8").alignment + 8
+ assert canonical.isnative
+
+ def test_object_flag_not_inherited(self):
+ # The following dtype still indicates "object", because its included
+ # in the unaccessible space (maybe this could change at some point):
+ arr = np.ones(3, "i,O,i")[["f0", "f2"]]
+ assert arr.dtype.hasobject
+ canonical_dt = np.result_type(arr.dtype)
+ assert not canonical_dt.hasobject
+
+ @pytest.mark.slow
+ @hypothesis.given(dtype=hynp.nested_dtypes())
+ def test_make_canonical_hypothesis(self, dtype):
+ canonical = np.result_type(dtype)
+ self.check_canonical(dtype, canonical)
+ # result_type with two arguments should always give identical results:
+ two_arg_result = np.result_type(dtype, dtype)
+ assert np.can_cast(two_arg_result, canonical, casting="no")
+
+ @pytest.mark.slow
+ @hypothesis.given(
+ dtype=hypothesis.extra.numpy.array_dtypes(
+ subtype_strategy=hypothesis.extra.numpy.array_dtypes(),
+ min_size=5, max_size=10, allow_subarrays=True))
+ def test_structured(self, dtype):
+ # Pick 4 of the fields at random. This will leave empty space in the
+ # dtype (since we do not canonicalize it here).
+ field_subset = random.sample(dtype.names, k=4)
+ dtype_with_empty_space = dtype[field_subset]
+ assert dtype_with_empty_space.itemsize == dtype.itemsize
+ canonicalized = np.result_type(dtype_with_empty_space)
+ self.check_canonical(dtype_with_empty_space, canonicalized)
+ # promotion with two arguments should always give identical results:
+ two_arg_result = np.promote_types(
+ dtype_with_empty_space, dtype_with_empty_space)
+ assert np.can_cast(two_arg_result, canonicalized, casting="no")
+
+ # Ensure that we also check aligned struct (check the opposite, in
+ # case hypothesis grows support for `align`. Then repeat the test:
+ dtype_aligned = np.dtype(dtype.descr, align=not dtype.isalignedstruct)
+ dtype_with_empty_space = dtype_aligned[field_subset]
+ assert dtype_with_empty_space.itemsize == dtype_aligned.itemsize
+ canonicalized = np.result_type(dtype_with_empty_space)
+ self.check_canonical(dtype_with_empty_space, canonicalized)
+ # promotion with two arguments should always give identical results:
+ two_arg_result = np.promote_types(
+ dtype_with_empty_space, dtype_with_empty_space)
+ assert np.can_cast(two_arg_result, canonicalized, casting="no")
+
+
+class TestPickling:
+
+ def check_pickling(self, dtype):
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ buf = pickle.dumps(dtype, proto)
+ # The dtype pickling itself pickles `np.dtype` if it is pickled
+ # as a singleton `dtype` should be stored in the buffer:
+ assert b"_DType_reconstruct" not in buf
+ assert b"dtype" in buf
+ pickled = pickle.loads(buf)
+ assert_equal(pickled, dtype)
+ assert_equal(pickled.descr, dtype.descr)
+ if dtype.metadata is not None:
+ assert_equal(pickled.metadata, dtype.metadata)
+ # Check the reconstructed dtype is functional
+ x = np.zeros(3, dtype=dtype)
+ y = np.zeros(3, dtype=pickled)
+ assert_equal(x, y)
+ assert_equal(x[0], y[0])
+
+ @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
+ np.compat.unicode, bool])
+ def test_builtin(self, t):
+ self.check_pickling(np.dtype(t))
+
+ def test_structured(self):
+ dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
+ self.check_pickling(dt)
+
+ def test_structured_aligned(self):
+ dt = np.dtype('i4, i1', align=True)
+ self.check_pickling(dt)
+
+ def test_structured_unaligned(self):
+ dt = np.dtype('i4, i1', align=False)
+ self.check_pickling(dt)
+
+ def test_structured_padded(self):
+ dt = np.dtype({
+ 'names': ['A', 'B'],
+ 'formats': ['f4', 'f4'],
+ 'offsets': [0, 8],
+ 'itemsize': 16})
+ self.check_pickling(dt)
+
+ def test_structured_titles(self):
+ dt = np.dtype({'names': ['r', 'b'],
+ 'formats': ['u1', 'u1'],
+ 'titles': ['Red pixel', 'Blue pixel']})
+ self.check_pickling(dt)
+
+ @pytest.mark.parametrize('base', ['m8', 'M8'])
+ @pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',
+ 'ms', 'us', 'ns', 'ps', 'fs', 'as'])
+ def test_datetime(self, base, unit):
+ dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
+ self.check_pickling(dt)
+ if unit:
+ dt = np.dtype('%s[7%s]' % (base, unit))
+ self.check_pickling(dt)
+
+ def test_metadata(self):
+ dt = np.dtype(int, metadata={'datum': 1})
+ self.check_pickling(dt)
+
+ @pytest.mark.parametrize("DType",
+ [type(np.dtype(t)) for t in np.typecodes['All']] +
+ [np.dtype(rational), np.dtype])
+ def test_pickle_types(self, DType):
+ # Check that DTypes (the classes/types) roundtrip when pickling
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ roundtrip_DType = pickle.loads(pickle.dumps(DType, proto))
+ assert roundtrip_DType is DType
+
+
+class TestPromotion:
+ """Test cases related to more complex DType promotions. Further promotion
+ tests are defined in `test_numeric.py`
+ """
+ @np._no_nep50_warning()
+ @pytest.mark.parametrize(["other", "expected", "expected_weak"],
+ [(2**16-1, np.complex64, None),
+ (2**32-1, np.complex128, np.complex64),
+ (np.float16(2), np.complex64, None),
+ (np.float32(2), np.complex64, None),
+ (np.longdouble(2), np.complex64, np.clongdouble),
+ # Base of the double value to sidestep any rounding issues:
+ (np.longdouble(np.nextafter(1.7e308, 0.)),
+ np.complex128, np.clongdouble),
+ # Additionally use "nextafter" so the cast can't round down:
+ (np.longdouble(np.nextafter(1.7e308, np.inf)),
+ np.clongdouble, None),
+ # repeat for complex scalars:
+ (np.complex64(2), np.complex64, None),
+ (np.clongdouble(2), np.complex64, np.clongdouble),
+ # Base of the double value to sidestep any rounding issues:
+ (np.clongdouble(np.nextafter(1.7e308, 0.) * 1j),
+ np.complex128, np.clongdouble),
+ # Additionally use "nextafter" so the cast can't round down:
+ (np.clongdouble(np.nextafter(1.7e308, np.inf)),
+ np.clongdouble, None),
+ ])
+ def test_complex_other_value_based(self,
+ weak_promotion, other, expected, expected_weak):
+ if weak_promotion and expected_weak is not None:
+ expected = expected_weak
+
+ # This would change if we modify the value based promotion
+ min_complex = np.dtype(np.complex64)
+
+ res = np.result_type(other, min_complex)
+ assert res == expected
+ # Check the same for a simple ufunc call that uses the same logic:
+ res = np.minimum(other, np.ones(3, dtype=min_complex)).dtype
+ assert res == expected
+
+ @pytest.mark.parametrize(["other", "expected"],
+ [(np.bool_, np.complex128),
+ (np.int64, np.complex128),
+ (np.float16, np.complex64),
+ (np.float32, np.complex64),
+ (np.float64, np.complex128),
+ (np.longdouble, np.clongdouble),
+ (np.complex64, np.complex64),
+ (np.complex128, np.complex128),
+ (np.clongdouble, np.clongdouble),
+ ])
+ def test_complex_scalar_value_based(self, other, expected):
+ # This would change if we modify the value based promotion
+ complex_scalar = 1j
+
+ res = np.result_type(other, complex_scalar)
+ assert res == expected
+ # Check the same for a simple ufunc call that uses the same logic:
+ res = np.minimum(np.ones(3, dtype=other), complex_scalar).dtype
+ assert res == expected
+
+ def test_complex_pyscalar_promote_rational(self):
+ with pytest.raises(TypeError,
+ match=r".* no common DType exists for the given inputs"):
+ np.result_type(1j, rational)
+
+ with pytest.raises(TypeError,
+ match=r".* no common DType exists for the given inputs"):
+ np.result_type(1j, rational(1, 2))
+
+ @pytest.mark.parametrize("val", [2, 2**32, 2**63, 2**64, 2*100])
+ def test_python_integer_promotion(self, val):
+ # If we only path scalars (mainly python ones!), the result must take
+ # into account that the integer may be considered int32, int64, uint64,
+ # or object depending on the input value. So test those paths!
+ expected_dtype = np.result_type(np.array(val).dtype, np.array(0).dtype)
+ assert np.result_type(val, 0) == expected_dtype
+ # For completeness sake, also check with a NumPy scalar as second arg:
+ assert np.result_type(val, np.int8(0)) == expected_dtype
+
+ @pytest.mark.parametrize(["other", "expected"],
+ [(1, rational), (1., np.float64)])
+ @np._no_nep50_warning()
+ def test_float_int_pyscalar_promote_rational(
+ self, weak_promotion, other, expected):
+ # Note that rationals are a bit akward as they promote with float64
+ # or default ints, but not float16 or uint8/int8 (which looks
+ # inconsistent here). The new promotion fixes this (partially?)
+ if not weak_promotion and type(other) == float:
+ # The float version, checks float16 in the legacy path, which fails
+ # the integer version seems to check int8 (also), so it can
+ # pass.
+ with pytest.raises(TypeError,
+ match=r".* do not have a common DType"):
+ np.result_type(other, rational)
+ else:
+ assert np.result_type(other, rational) == expected
+
+ assert np.result_type(other, rational(1, 2)) == expected
+
+ @pytest.mark.parametrize(["dtypes", "expected"], [
+ # These promotions are not associative/commutative:
+ ([np.uint16, np.int16, np.float16], np.float32),
+ ([np.uint16, np.int8, np.float16], np.float32),
+ ([np.uint8, np.int16, np.float16], np.float32),
+ # The following promotions are not ambiguous, but cover code
+ # paths of abstract promotion (no particular logic being tested)
+ ([1, 1, np.float64], np.float64),
+ ([1, 1., np.complex128], np.complex128),
+ ([1, 1j, np.float64], np.complex128),
+ ([1., 1., np.int64], np.float64),
+ ([1., 1j, np.float64], np.complex128),
+ ([1j, 1j, np.float64], np.complex128),
+ ([1, True, np.bool_], np.int_),
+ ])
+ def test_permutations_do_not_influence_result(self, dtypes, expected):
+ # Tests that most permutations do not influence the result. In the
+ # above some uint and int combintations promote to a larger integer
+ # type, which would then promote to a larger than necessary float.
+ for perm in permutations(dtypes):
+ assert np.result_type(*perm) == expected
+
+
+def test_rational_dtype():
+ # test for bug gh-5719
+ a = np.array([1111], dtype=rational).astype
+ assert_raises(OverflowError, a, 'int8')
+
+ # test that dtype detection finds user-defined types
+ x = rational(1)
+ assert_equal(np.array([x,x]).dtype, np.dtype(rational))
+
+
+def test_dtypes_are_true():
+ # test for gh-6294
+ assert bool(np.dtype('f8'))
+ assert bool(np.dtype('i8'))
+ assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))
+
+
+def test_invalid_dtype_string():
+ # test for gh-10440
+ assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')
+ assert_raises(TypeError, np.dtype, 'Fl\xfcgel')
+
+
+def test_keyword_argument():
+ # test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971
+ assert np.dtype(dtype=np.float64) == np.dtype(np.float64)
+
+
+def test_ulong_dtype():
+ # test for gh-21063
+ assert np.dtype("ulong") == np.dtype(np.uint)
+
+
+class TestFromDTypeAttribute:
+ def test_simple(self):
+ class dt:
+ dtype = np.dtype("f8")
+
+ assert np.dtype(dt) == np.float64
+ assert np.dtype(dt()) == np.float64
+
+ @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
+ def test_recursion(self):
+ class dt:
+ pass
+
+ dt.dtype = dt
+ with pytest.raises(RecursionError):
+ np.dtype(dt)
+
+ dt_instance = dt()
+ dt_instance.dtype = dt
+ with pytest.raises(RecursionError):
+ np.dtype(dt_instance)
+
+ def test_void_subtype(self):
+ class dt(np.void):
+ # This code path is fully untested before, so it is unclear
+ # what this should be useful for. Note that if np.void is used
+ # numpy will think we are deallocating a base type [1.17, 2019-02].
+ dtype = np.dtype("f,f")
+
+ np.dtype(dt)
+ np.dtype(dt(1))
+
+ @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
+ def test_void_subtype_recursion(self):
+ class vdt(np.void):
+ pass
+
+ vdt.dtype = vdt
+
+ with pytest.raises(RecursionError):
+ np.dtype(vdt)
+
+ with pytest.raises(RecursionError):
+ np.dtype(vdt(1))
+
+
+class TestDTypeClasses:
+ @pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational])
+ def test_basic_dtypes_subclass_properties(self, dtype):
+ # Note: Except for the isinstance and type checks, these attributes
+ # are considered currently private and may change.
+ dtype = np.dtype(dtype)
+ assert isinstance(dtype, np.dtype)
+ assert type(dtype) is not np.dtype
+ assert type(dtype).__name__ == f"dtype[{dtype.type.__name__}]"
+ assert type(dtype).__module__ == "numpy"
+ assert not type(dtype)._abstract
+
+ # the flexible dtypes and datetime/timedelta have additional parameters
+ # which are more than just storage information, these would need to be
+ # given when creating a dtype:
+ parametric = (np.void, np.str_, np.bytes_, np.datetime64, np.timedelta64)
+ if dtype.type not in parametric:
+ assert not type(dtype)._parametric
+ assert type(dtype)() is dtype
+ else:
+ assert type(dtype)._parametric
+ with assert_raises(TypeError):
+ type(dtype)()
+
+ def test_dtype_superclass(self):
+ assert type(np.dtype) is not type
+ assert isinstance(np.dtype, type)
+
+ assert type(np.dtype).__name__ == "_DTypeMeta"
+ assert type(np.dtype).__module__ == "numpy"
+ assert np.dtype._abstract
+
+
+class TestFromCTypes:
+
+ @staticmethod
+ def check(ctype, dtype):
+ dtype = np.dtype(dtype)
+ assert_equal(np.dtype(ctype), dtype)
+ assert_equal(np.dtype(ctype()), dtype)
+
+ def test_array(self):
+ c8 = ctypes.c_uint8
+ self.check( 3 * c8, (np.uint8, (3,)))
+ self.check( 1 * c8, (np.uint8, (1,)))
+ self.check( 0 * c8, (np.uint8, (0,)))
+ self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))
+ self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))
+
+ def test_padded_structure(self):
+ class PaddedStruct(ctypes.Structure):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16)
+ ]
+ expected = np.dtype([
+ ('a', np.uint8),
+ ('b', np.uint16)
+ ], align=True)
+ self.check(PaddedStruct, expected)
+
+ def test_bit_fields(self):
+ class BitfieldStruct(ctypes.Structure):
+ _fields_ = [
+ ('a', ctypes.c_uint8, 7),
+ ('b', ctypes.c_uint8, 1)
+ ]
+ assert_raises(TypeError, np.dtype, BitfieldStruct)
+ assert_raises(TypeError, np.dtype, BitfieldStruct())
+
+ def test_pointer(self):
+ p_uint8 = ctypes.POINTER(ctypes.c_uint8)
+ assert_raises(TypeError, np.dtype, p_uint8)
+
+ def test_void_pointer(self):
+ self.check(ctypes.c_void_p, np.uintp)
+
+ def test_union(self):
+ class Union(ctypes.Union):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ]
+ expected = np.dtype(dict(
+ names=['a', 'b'],
+ formats=[np.uint8, np.uint16],
+ offsets=[0, 0],
+ itemsize=2
+ ))
+ self.check(Union, expected)
+
+ def test_union_with_struct_packed(self):
+ class Struct(ctypes.Structure):
+ _pack_ = 1
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+
+ class Union(ctypes.Union):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ('c', ctypes.c_uint32),
+ ('d', Struct),
+ ]
+ expected = np.dtype(dict(
+ names=['a', 'b', 'c', 'd'],
+ formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
+ offsets=[0, 0, 0, 0],
+ itemsize=ctypes.sizeof(Union)
+ ))
+ self.check(Union, expected)
+
+ def test_union_packed(self):
+ class Struct(ctypes.Structure):
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+ _pack_ = 1
+ class Union(ctypes.Union):
+ _pack_ = 1
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ('c', ctypes.c_uint32),
+ ('d', Struct),
+ ]
+ expected = np.dtype(dict(
+ names=['a', 'b', 'c', 'd'],
+ formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
+ offsets=[0, 0, 0, 0],
+ itemsize=ctypes.sizeof(Union)
+ ))
+ self.check(Union, expected)
+
+ def test_packed_structure(self):
+ class PackedStructure(ctypes.Structure):
+ _pack_ = 1
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16)
+ ]
+ expected = np.dtype([
+ ('a', np.uint8),
+ ('b', np.uint16)
+ ])
+ self.check(PackedStructure, expected)
+
+ def test_large_packed_structure(self):
+ class PackedStructure(ctypes.Structure):
+ _pack_ = 2
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ('c', ctypes.c_uint8),
+ ('d', ctypes.c_uint16),
+ ('e', ctypes.c_uint32),
+ ('f', ctypes.c_uint32),
+ ('g', ctypes.c_uint8)
+ ]
+ expected = np.dtype(dict(
+ formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],
+ offsets=[0, 2, 4, 6, 8, 12, 16],
+ names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
+ itemsize=18))
+ self.check(PackedStructure, expected)
+
+ def test_big_endian_structure_packed(self):
+ class BigEndStruct(ctypes.BigEndianStructure):
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+ _pack_ = 1
+ expected = np.dtype([('one', 'u1'), ('two', '>u4')])
+ self.check(BigEndStruct, expected)
+
+ def test_little_endian_structure_packed(self):
+ class LittleEndStruct(ctypes.LittleEndianStructure):
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+ _pack_ = 1
+ expected = np.dtype([('one', 'u1'), ('two', '<u4')])
+ self.check(LittleEndStruct, expected)
+
+ def test_little_endian_structure(self):
+ class PaddedStruct(ctypes.LittleEndianStructure):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16)
+ ]
+ expected = np.dtype([
+ ('a', '<B'),
+ ('b', '<H')
+ ], align=True)
+ self.check(PaddedStruct, expected)
+
+ def test_big_endian_structure(self):
+ class PaddedStruct(ctypes.BigEndianStructure):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16)
+ ]
+ expected = np.dtype([
+ ('a', '>B'),
+ ('b', '>H')
+ ], align=True)
+ self.check(PaddedStruct, expected)
+
+ def test_simple_endian_types(self):
+ self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
+ self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
+ self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
+ self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
+
+ all_types = set(np.typecodes['All'])
+ all_pairs = permutations(all_types, 2)
+
+ @pytest.mark.parametrize("pair", all_pairs)
+ def test_pairs(self, pair):
+ """
+ Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')]
+ Example: np.dtype('d,I') -> dtype([('f0', '<f8'), ('f1', '<u4')])
+ """
+ # gh-5645: check that np.dtype('i,L') can be used
+ pair_type = np.dtype('{},{}'.format(*pair))
+ expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])
+ assert_equal(pair_type, expected)
+
+
+class TestUserDType:
+ @pytest.mark.leaks_references(reason="dynamically creates custom dtype.")
+ def test_custom_structured_dtype(self):
+ class mytype:
+ pass
+
+ blueprint = np.dtype([("field", object)])
+ dt = create_custom_field_dtype(blueprint, mytype, 0)
+ assert dt.type == mytype
+ # We cannot (currently) *create* this dtype with `np.dtype` because
+ # mytype does not inherit from `np.generic`. This seems like an
+ # unnecessary restriction, but one that has been around forever:
+ assert np.dtype(mytype) == np.dtype("O")
+
+ def test_custom_structured_dtype_errors(self):
+ class mytype:
+ pass
+
+ blueprint = np.dtype([("field", object)])
+
+ with pytest.raises(ValueError):
+ # Tests what happens if fields are unset during creation
+ # which is currently rejected due to the containing object
+ # (see PyArray_RegisterDataType).
+ create_custom_field_dtype(blueprint, mytype, 1)
+
+ with pytest.raises(RuntimeError):
+ # Tests that a dtype must have its type field set up to np.dtype
+ # or in this case a builtin instance.
+ create_custom_field_dtype(blueprint, mytype, 2)
+
+
+@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
+class TestClassGetItem:
+ def test_dtype(self) -> None:
+ alias = np.dtype[Any]
+ assert isinstance(alias, types.GenericAlias)
+ assert alias.__origin__ is np.dtype
+
+ @pytest.mark.parametrize("code", np.typecodes["All"])
+ def test_dtype_subclass(self, code: str) -> None:
+ cls = type(np.dtype(code))
+ alias = cls[Any]
+ assert isinstance(alias, types.GenericAlias)
+ assert alias.__origin__ is cls
+
+ @pytest.mark.parametrize("arg_len", range(4))
+ def test_subscript_tuple(self, arg_len: int) -> None:
+ arg_tup = (Any,) * arg_len
+ if arg_len == 1:
+ assert np.dtype[arg_tup]
+ else:
+ with pytest.raises(TypeError):
+ np.dtype[arg_tup]
+
+ def test_subscript_scalar(self) -> None:
+ assert np.dtype[Any]
+
+
+def test_result_type_integers_and_unitless_timedelta64():
+ # Regression test for gh-20077. The following call of `result_type`
+ # would cause a seg. fault.
+ td = np.timedelta64(4)
+ result = np.result_type(0, td)
+ assert_dtype_equal(result, td.dtype)
+
+
+@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
+def test_class_getitem_38() -> None:
+ match = "Type subscription requires python >= 3.9"
+ with pytest.raises(TypeError, match=match):
+ np.dtype[Any]
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_einsum.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_einsum.py
new file mode 100644
index 00000000..7c0e8d97
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_einsum.py
@@ -0,0 +1,1137 @@
+import itertools
+
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_raises, suppress_warnings, assert_raises_regex, assert_allclose
+ )
+
+# Setup for optimize einsum
+chars = 'abcdefghij'
+sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3])
+global_size_dict = dict(zip(chars, sizes))
+
+
+class TestEinsum:
+ def test_einsum_errors(self):
+ for do_opt in [True, False]:
+ # Need enough arguments
+ assert_raises(ValueError, np.einsum, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "", optimize=do_opt)
+
+ # subscripts must be a string
+ assert_raises(TypeError, np.einsum, 0, 0, optimize=do_opt)
+
+ # out parameter must be an array
+ assert_raises(TypeError, np.einsum, "", 0, out='test',
+ optimize=do_opt)
+
+ # order parameter must be a valid order
+ assert_raises(ValueError, np.einsum, "", 0, order='W',
+ optimize=do_opt)
+
+ # casting parameter must be a valid casting
+ assert_raises(ValueError, np.einsum, "", 0, casting='blah',
+ optimize=do_opt)
+
+ # dtype parameter must be a valid dtype
+ assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type',
+ optimize=do_opt)
+
+ # other keyword arguments are rejected
+ assert_raises(TypeError, np.einsum, "", 0, bad_arg=0,
+ optimize=do_opt)
+
+ # issue 4528 revealed a segfault with this call
+ assert_raises(TypeError, np.einsum, *(None,)*63, optimize=do_opt)
+
+ # number of operands must match count in subscripts string
+ assert_raises(ValueError, np.einsum, "", 0, 0, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, ",", 0, [0], [0],
+ optimize=do_opt)
+ assert_raises(ValueError, np.einsum, ",", [0], optimize=do_opt)
+
+ # can't have more subscripts than dimensions in the operand
+ assert_raises(ValueError, np.einsum, "i", 0, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "ij", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "...i", 0, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "i...j", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "i...", 0, optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "ij...", [0, 0], optimize=do_opt)
+
+ # invalid ellipsis
+ assert_raises(ValueError, np.einsum, "i..", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, ".i...", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "j->..j", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt)
+
+ # invalid subscript character
+ assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt)
+
+ # output subscripts must appear in input
+ assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt)
+
+ # output subscripts may only be specified once
+ assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]],
+ optimize=do_opt)
+
+ # dimensions much match when being collapsed
+ assert_raises(ValueError, np.einsum, "ii",
+ np.arange(6).reshape(2, 3), optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "ii->i",
+ np.arange(6).reshape(2, 3), optimize=do_opt)
+
+ # broadcasting to new dimensions must be enabled explicitly
+ assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3),
+ optimize=do_opt)
+ assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
+ out=np.arange(4).reshape(2, 2), optimize=do_opt)
+ with assert_raises_regex(ValueError, "'b'"):
+ # gh-11221 - 'c' erroneously appeared in the error message
+ a = np.ones((3, 3, 4, 5, 6))
+ b = np.ones((3, 4, 5))
+ np.einsum('aabcb,abc', a, b)
+
+ # Check order kwarg, asanyarray allows 1d to pass through
+ assert_raises(ValueError, np.einsum, "i->i", np.arange(6).reshape(-1, 1),
+ optimize=do_opt, order='d')
+
+ def test_einsum_views(self):
+ # pass-through
+ for do_opt in [True, False]:
+ a = np.arange(6)
+ a.shape = (2, 3)
+
+ b = np.einsum("...", a, optimize=do_opt)
+ assert_(b.base is a)
+
+ b = np.einsum(a, [Ellipsis], optimize=do_opt)
+ assert_(b.base is a)
+
+ b = np.einsum("ij", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a)
+
+ b = np.einsum(a, [0, 1], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a)
+
+ # output is writeable whenever input is writeable
+ b = np.einsum("...", a, optimize=do_opt)
+ assert_(b.flags['WRITEABLE'])
+ a.flags['WRITEABLE'] = False
+ b = np.einsum("...", a, optimize=do_opt)
+ assert_(not b.flags['WRITEABLE'])
+
+ # transpose
+ a = np.arange(6)
+ a.shape = (2, 3)
+
+ b = np.einsum("ji", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a.T)
+
+ b = np.einsum(a, [1, 0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a.T)
+
+ # diagonal
+ a = np.arange(9)
+ a.shape = (3, 3)
+
+ b = np.einsum("ii->i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[i, i] for i in range(3)])
+
+ b = np.einsum(a, [0, 0], [0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[i, i] for i in range(3)])
+
+ # diagonal with various ways of broadcasting an additional dimension
+ a = np.arange(27)
+ a.shape = (3, 3, 3)
+
+ b = np.einsum("...ii->...i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
+
+ b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
+
+ b = np.einsum("ii...->...i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)]
+ for x in a.transpose(2, 0, 1)])
+
+ b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)]
+ for x in a.transpose(2, 0, 1)])
+
+ b = np.einsum("...ii->i...", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[:, i, i] for i in range(3)])
+
+ b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[:, i, i] for i in range(3)])
+
+ b = np.einsum("jii->ij", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[:, i, i] for i in range(3)])
+
+ b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[:, i, i] for i in range(3)])
+
+ b = np.einsum("ii...->i...", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
+
+ b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
+
+ b = np.einsum("i...i->i...", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
+
+ b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
+
+ b = np.einsum("i...i->...i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)]
+ for x in a.transpose(1, 0, 2)])
+
+ b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [[x[i, i] for i in range(3)]
+ for x in a.transpose(1, 0, 2)])
+
+ # triple diagonal
+ a = np.arange(27)
+ a.shape = (3, 3, 3)
+
+ b = np.einsum("iii->i", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[i, i, i] for i in range(3)])
+
+ b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, [a[i, i, i] for i in range(3)])
+
+ # swap axes
+ a = np.arange(24)
+ a.shape = (2, 3, 4)
+
+ b = np.einsum("ijk->jik", a, optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a.swapaxes(0, 1))
+
+ b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt)
+ assert_(b.base is a)
+ assert_equal(b, a.swapaxes(0, 1))
+
+ @np._no_nep50_warning()
+ def check_einsum_sums(self, dtype, do_opt=False):
+ dtype = np.dtype(dtype)
+ # Check various sums. Does many sizes to exercise unrolled loops.
+
+ # sum(a, axis=-1)
+ for n in range(1, 17):
+ a = np.arange(n, dtype=dtype)
+ assert_equal(np.einsum("i->", a, optimize=do_opt),
+ np.sum(a, axis=-1).astype(dtype))
+ assert_equal(np.einsum(a, [0], [], optimize=do_opt),
+ np.sum(a, axis=-1).astype(dtype))
+
+ for n in range(1, 17):
+ a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
+ assert_equal(np.einsum("...i->...", a, optimize=do_opt),
+ np.sum(a, axis=-1).astype(dtype))
+ assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt),
+ np.sum(a, axis=-1).astype(dtype))
+
+ # sum(a, axis=0)
+ for n in range(1, 17):
+ a = np.arange(2*n, dtype=dtype).reshape(2, n)
+ assert_equal(np.einsum("i...->...", a, optimize=do_opt),
+ np.sum(a, axis=0).astype(dtype))
+ assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt),
+ np.sum(a, axis=0).astype(dtype))
+
+ for n in range(1, 17):
+ a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
+ assert_equal(np.einsum("i...->...", a, optimize=do_opt),
+ np.sum(a, axis=0).astype(dtype))
+ assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt),
+ np.sum(a, axis=0).astype(dtype))
+
+ # trace(a)
+ for n in range(1, 17):
+ a = np.arange(n*n, dtype=dtype).reshape(n, n)
+ assert_equal(np.einsum("ii", a, optimize=do_opt),
+ np.trace(a).astype(dtype))
+ assert_equal(np.einsum(a, [0, 0], optimize=do_opt),
+ np.trace(a).astype(dtype))
+
+ # gh-15961: should accept numpy int64 type in subscript list
+ np_array = np.asarray([0, 0])
+ assert_equal(np.einsum(a, np_array, optimize=do_opt),
+ np.trace(a).astype(dtype))
+ assert_equal(np.einsum(a, list(np_array), optimize=do_opt),
+ np.trace(a).astype(dtype))
+
+ # multiply(a, b)
+ assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case
+ for n in range(1, 17):
+ a = np.arange(3 * n, dtype=dtype).reshape(3, n)
+ b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
+ assert_equal(np.einsum("..., ...", a, b, optimize=do_opt),
+ np.multiply(a, b))
+ assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt),
+ np.multiply(a, b))
+
+ # inner(a,b)
+ for n in range(1, 17):
+ a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
+ b = np.arange(n, dtype=dtype)
+ assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b))
+ assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt),
+ np.inner(a, b))
+
+ for n in range(1, 11):
+ a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2)
+ b = np.arange(n, dtype=dtype)
+ assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt),
+ np.inner(a.T, b.T).T)
+ assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt),
+ np.inner(a.T, b.T).T)
+
+ # outer(a,b)
+ for n in range(1, 17):
+ a = np.arange(3, dtype=dtype)+1
+ b = np.arange(n, dtype=dtype)+1
+ assert_equal(np.einsum("i,j", a, b, optimize=do_opt),
+ np.outer(a, b))
+ assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt),
+ np.outer(a, b))
+
+ # Suppress the complex warnings for the 'as f8' tests
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning)
+
+ # matvec(a,b) / a.dot(b) where a is matrix, b is vector
+ for n in range(1, 17):
+ a = np.arange(4*n, dtype=dtype).reshape(4, n)
+ b = np.arange(n, dtype=dtype)
+ assert_equal(np.einsum("ij, j", a, b, optimize=do_opt),
+ np.dot(a, b))
+ assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt),
+ np.dot(a, b))
+
+ c = np.arange(4, dtype=dtype)
+ np.einsum("ij,j", a, b, out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c,
+ np.dot(a.astype('f8'),
+ b.astype('f8')).astype(dtype))
+ c[...] = 0
+ np.einsum(a, [0, 1], b, [1], out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c,
+ np.dot(a.astype('f8'),
+ b.astype('f8')).astype(dtype))
+
+ for n in range(1, 17):
+ a = np.arange(4*n, dtype=dtype).reshape(4, n)
+ b = np.arange(n, dtype=dtype)
+ assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt),
+ np.dot(b.T, a.T))
+ assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt),
+ np.dot(b.T, a.T))
+
+ c = np.arange(4, dtype=dtype)
+ np.einsum("ji,j", a.T, b.T, out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c,
+ np.dot(b.T.astype('f8'),
+ a.T.astype('f8')).astype(dtype))
+ c[...] = 0
+ np.einsum(a.T, [1, 0], b.T, [1], out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c,
+ np.dot(b.T.astype('f8'),
+ a.T.astype('f8')).astype(dtype))
+
+ # matmat(a,b) / a.dot(b) where a is matrix, b is matrix
+ for n in range(1, 17):
+ if n < 8 or dtype != 'f2':
+ a = np.arange(4*n, dtype=dtype).reshape(4, n)
+ b = np.arange(n*6, dtype=dtype).reshape(n, 6)
+ assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt),
+ np.dot(a, b))
+ assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt),
+ np.dot(a, b))
+
+ for n in range(1, 17):
+ a = np.arange(4*n, dtype=dtype).reshape(4, n)
+ b = np.arange(n*6, dtype=dtype).reshape(n, 6)
+ c = np.arange(24, dtype=dtype).reshape(4, 6)
+ np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe',
+ optimize=do_opt)
+ assert_equal(c,
+ np.dot(a.astype('f8'),
+ b.astype('f8')).astype(dtype))
+ c[...] = 0
+ np.einsum(a, [0, 1], b, [1, 2], out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c,
+ np.dot(a.astype('f8'),
+ b.astype('f8')).astype(dtype))
+
+ # matrix triple product (note this is not currently an efficient
+ # way to multiply 3 matrices)
+ a = np.arange(12, dtype=dtype).reshape(3, 4)
+ b = np.arange(20, dtype=dtype).reshape(4, 5)
+ c = np.arange(30, dtype=dtype).reshape(5, 6)
+ if dtype != 'f2':
+ assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt),
+ a.dot(b).dot(c))
+ assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3],
+ optimize=do_opt), a.dot(b).dot(c))
+
+ d = np.arange(18, dtype=dtype).reshape(3, 6)
+ np.einsum("ij,jk,kl", a, b, c, out=d,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ tgt = a.astype('f8').dot(b.astype('f8'))
+ tgt = tgt.dot(c.astype('f8')).astype(dtype)
+ assert_equal(d, tgt)
+
+ d[...] = 0
+ np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ tgt = a.astype('f8').dot(b.astype('f8'))
+ tgt = tgt.dot(c.astype('f8')).astype(dtype)
+ assert_equal(d, tgt)
+
+ # tensordot(a, b)
+ if np.dtype(dtype) != np.dtype('f2'):
+ a = np.arange(60, dtype=dtype).reshape(3, 4, 5)
+ b = np.arange(24, dtype=dtype).reshape(4, 3, 2)
+ assert_equal(np.einsum("ijk, jil -> kl", a, b),
+ np.tensordot(a, b, axes=([1, 0], [0, 1])))
+ assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]),
+ np.tensordot(a, b, axes=([1, 0], [0, 1])))
+
+ c = np.arange(10, dtype=dtype).reshape(5, 2)
+ np.einsum("ijk,jil->kl", a, b, out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
+ axes=([1, 0], [0, 1])).astype(dtype))
+ c[...] = 0
+ np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c,
+ dtype='f8', casting='unsafe', optimize=do_opt)
+ assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
+ axes=([1, 0], [0, 1])).astype(dtype))
+
+ # logical_and(logical_and(a!=0, b!=0), c!=0)
+ neg_val = -2 if dtype.kind != "u" else np.iinfo(dtype).max - 1
+ a = np.array([1, 3, neg_val, 0, 12, 13, 0, 1], dtype=dtype)
+ b = np.array([0, 3.5, 0., neg_val, 0, 1, 3, 12], dtype=dtype)
+ c = np.array([True, True, False, True, True, False, True, True])
+
+ assert_equal(np.einsum("i,i,i->i", a, b, c,
+ dtype='?', casting='unsafe', optimize=do_opt),
+ np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
+ assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
+ dtype='?', casting='unsafe'),
+ np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
+
+ a = np.arange(9, dtype=dtype)
+ assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
+ assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a))
+ assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a))
+ assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a))
+
+ # Various stride0, contiguous, and SSE aligned variants
+ for n in range(1, 25):
+ a = np.arange(n, dtype=dtype)
+ if np.dtype(dtype).itemsize > 1:
+ assert_equal(np.einsum("...,...", a, a, optimize=do_opt),
+ np.multiply(a, a))
+ assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a))
+ assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a)
+ assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a)
+ assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a))
+ assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a))
+
+ assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt),
+ np.multiply(a[1:], a[:-1]))
+ assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt),
+ np.dot(a[1:], a[:-1]))
+ assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:])
+ assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:])
+ assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt),
+ 2*np.sum(a[1:]))
+ assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt),
+ 2*np.sum(a[1:]))
+
+ # An object array, summed as the data type
+ a = np.arange(9, dtype=object)
+
+ b = np.einsum("i->", a, dtype=dtype, casting='unsafe')
+ assert_equal(b, np.sum(a))
+ assert_equal(b.dtype, np.dtype(dtype))
+
+ b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe')
+ assert_equal(b, np.sum(a))
+ assert_equal(b.dtype, np.dtype(dtype))
+
+ # A case which was failing (ticket #1885)
+ p = np.arange(2) + 1
+ q = np.arange(4).reshape(2, 2) + 3
+ r = np.arange(4).reshape(2, 2) + 7
+ assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)
+
+ # singleton dimensions broadcast (gh-10343)
+ p = np.ones((10,2))
+ q = np.ones((1,2))
+ assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
+ np.einsum('ij,ij->j', p, q, optimize=False))
+ assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
+ [10.] * 2)
+
+ # a blas-compatible contraction broadcasting case which was failing
+ # for optimize=True (ticket #10930)
+ x = np.array([2., 3.])
+ y = np.array([4.])
+ assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.)
+ assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.)
+
+ # all-ones array was bypassing bug (ticket #10930)
+ p = np.ones((1, 5)) / 2
+ q = np.ones((5, 5)) / 2
+ for optimize in (True, False):
+ assert_array_equal(np.einsum("...ij,...jk->...ik", p, p,
+ optimize=optimize),
+ np.einsum("...ij,...jk->...ik", p, q,
+ optimize=optimize))
+ assert_array_equal(np.einsum("...ij,...jk->...ik", p, q,
+ optimize=optimize),
+ np.full((1, 5), 1.25))
+
+ # Cases which were failing (gh-10899)
+ x = np.eye(2, dtype=dtype)
+ y = np.ones(2, dtype=dtype)
+ assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize),
+ [2.]) # contig_contig_outstride0_two
+ assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize),
+ [2.]) # stride0_contig_outstride0_two
+ assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize),
+ [2.]) # contig_stride0_outstride0_two
+
+ def test_einsum_sums_int8(self):
+ self.check_einsum_sums('i1')
+
+ def test_einsum_sums_uint8(self):
+ self.check_einsum_sums('u1')
+
+ def test_einsum_sums_int16(self):
+ self.check_einsum_sums('i2')
+
+ def test_einsum_sums_uint16(self):
+ self.check_einsum_sums('u2')
+
+ def test_einsum_sums_int32(self):
+ self.check_einsum_sums('i4')
+ self.check_einsum_sums('i4', True)
+
+ def test_einsum_sums_uint32(self):
+ self.check_einsum_sums('u4')
+ self.check_einsum_sums('u4', True)
+
+ def test_einsum_sums_int64(self):
+ self.check_einsum_sums('i8')
+
+ def test_einsum_sums_uint64(self):
+ self.check_einsum_sums('u8')
+
+ def test_einsum_sums_float16(self):
+ self.check_einsum_sums('f2')
+
+ def test_einsum_sums_float32(self):
+ self.check_einsum_sums('f4')
+
+ def test_einsum_sums_float64(self):
+ self.check_einsum_sums('f8')
+ self.check_einsum_sums('f8', True)
+
+ def test_einsum_sums_longdouble(self):
+ self.check_einsum_sums(np.longdouble)
+
+ def test_einsum_sums_cfloat64(self):
+ self.check_einsum_sums('c8')
+ self.check_einsum_sums('c8', True)
+
+ def test_einsum_sums_cfloat128(self):
+ self.check_einsum_sums('c16')
+
+ def test_einsum_sums_clongdouble(self):
+ self.check_einsum_sums(np.clongdouble)
+
+ def test_einsum_misc(self):
+ # This call used to crash because of a bug in
+ # PyArray_AssignZero
+ a = np.ones((1, 2))
+ b = np.ones((2, 2, 1))
+ assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
+ assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]])
+
+ # Regression test for issue #10369 (test unicode inputs with Python 2)
+ assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
+ assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20)
+ assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4],
+ optimize='greedy'), 20)
+
+ # The iterator had an issue with buffering this reduction
+ a = np.ones((5, 12, 4, 2, 3), np.int64)
+ b = np.ones((5, 12, 11), np.int64)
+ assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
+ np.einsum('ijklm,ijn->', a, b))
+ assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True),
+ np.einsum('ijklm,ijn->', a, b, optimize=True))
+
+ # Issue #2027, was a problem in the contiguous 3-argument
+ # inner loop implementation
+ a = np.arange(1, 3)
+ b = np.arange(1, 5).reshape(2, 2)
+ c = np.arange(1, 9).reshape(4, 2)
+ assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
+ [[[1, 3], [3, 9], [5, 15], [7, 21]],
+ [[8, 16], [16, 32], [24, 48], [32, 64]]])
+ assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True),
+ [[[1, 3], [3, 9], [5, 15], [7, 21]],
+ [[8, 16], [16, 32], [24, 48], [32, 64]]])
+
+ # Ensure explicitly setting out=None does not cause an error
+ # see issue gh-15776 and issue gh-15256
+ assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]])
+
+ def test_subscript_range(self):
+ # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used
+ # when creating a subscript from arrays
+ a = np.ones((2, 3))
+ b = np.ones((3, 4))
+ np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False)
+ np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False)
+ np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False)
+ assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False))
+ assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False))
+
+ def test_einsum_broadcast(self):
+ # Issue #2455 change in handling ellipsis
+ # remove the 'middle broadcast' error
+ # only use the 'RIGHT' iteration in prepare_op_axes
+ # adds auto broadcast on left where it belongs
+ # broadcast on right has to be explicit
+ # We need to test the optimized parsing as well
+
+ A = np.arange(2 * 3 * 4).reshape(2, 3, 4)
+ B = np.arange(3)
+ ref = np.einsum('ijk,j->ijk', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error
+
+ A = np.arange(12).reshape((4, 3))
+ B = np.arange(6).reshape((3, 2))
+ ref = np.einsum('ik,kj->ij', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error
+ assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error
+
+ dims = [2, 3, 4, 5]
+ a = np.arange(np.prod(dims)).reshape(dims)
+ v = np.arange(dims[2])
+ ref = np.einsum('ijkl,k->ijl', a, v, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref)
+ assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error
+ assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref)
+
+ J, K, M = 160, 160, 120
+ A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M)
+ B = np.arange(J * K * M * 3).reshape(J, K, M, 3)
+ ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('...lmn,lmno->...o', A, B,
+ optimize=opt), ref) # used to raise error
+
+ def test_einsum_fixedstridebug(self):
+ # Issue #4485 obscure einsum bug
+ # This case revealed a bug in nditer where it reported a stride
+ # as 'fixed' (0) when it was in fact not fixed during processing
+ # (0 or 4). The reason for the bug was that the check for a fixed
+ # stride was using the information from the 2D inner loop reuse
+ # to restrict the iteration dimensions it had to validate to be
+ # the same, but that 2D inner loop reuse logic is only triggered
+ # during the buffer copying step, and hence it was invalid to
+ # rely on those values. The fix is to check all the dimensions
+ # of the stride in question, which in the test case reveals that
+ # the stride is not fixed.
+ #
+ # NOTE: This test is triggered by the fact that the default buffersize,
+ # used by einsum, is 8192, and 3*2731 = 8193, is larger than that
+ # and results in a mismatch between the buffering and the
+ # striding for operand A.
+ A = np.arange(2 * 3).reshape(2, 3).astype(np.float32)
+ B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16)
+ es = np.einsum('cl, cpx->lpx', A, B)
+ tp = np.tensordot(A, B, axes=(0, 0))
+ assert_equal(es, tp)
+ # The following is the original test case from the bug report,
+ # made repeatable by changing random arrays to aranges.
+ A = np.arange(3 * 3).reshape(3, 3).astype(np.float64)
+ B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32)
+ es = np.einsum('cl, cpxy->lpxy', A, B)
+ tp = np.tensordot(A, B, axes=(0, 0))
+ assert_equal(es, tp)
+
+ def test_einsum_fixed_collapsingbug(self):
+ # Issue #5147.
+ # The bug only occurred when output argument of einssum was used.
+ x = np.random.normal(0, 1, (5, 5, 5, 5))
+ y1 = np.zeros((5, 5))
+ np.einsum('aabb->ab', x, out=y1)
+ idx = np.arange(5)
+ y2 = x[idx[:, None], idx[:, None], idx, idx]
+ assert_equal(y1, y2)
+
+ def test_einsum_failed_on_p9_and_s390x(self):
+ # Issues gh-14692 and gh-12689
+ # Bug with signed vs unsigned char errored on power9 and s390x Linux
+ tensor = np.random.random_sample((10, 10, 10, 10))
+ x = np.einsum('ijij->', tensor)
+ y = tensor.trace(axis1=0, axis2=2).trace()
+ assert_allclose(x, y)
+
+ def test_einsum_all_contig_non_contig_output(self):
+ # Issue gh-5907, tests that the all contiguous special case
+ # actually checks the contiguity of the output
+ x = np.ones((5, 5))
+ out = np.ones(10)[::2]
+ correct_base = np.ones(10)
+ correct_base[::2] = 5
+ # Always worked (inner iteration is done with 0-stride):
+ np.einsum('mi,mi,mi->m', x, x, x, out=out)
+ assert_array_equal(out.base, correct_base)
+ # Example 1:
+ out = np.ones(10)[::2]
+ np.einsum('im,im,im->m', x, x, x, out=out)
+ assert_array_equal(out.base, correct_base)
+ # Example 2, buffering causes x to be contiguous but
+ # special cases do not catch the operation before:
+ out = np.ones((2, 2, 2))[..., 0]
+ correct_base = np.ones((2, 2, 2))
+ correct_base[..., 0] = 2
+ x = np.ones((2, 2), np.float32)
+ np.einsum('ij,jk->ik', x, x, out=out)
+ assert_array_equal(out.base, correct_base)
+
+ @pytest.mark.parametrize("dtype",
+ np.typecodes["AllFloat"] + np.typecodes["AllInteger"])
+ def test_different_paths(self, dtype):
+ # Test originally added to cover broken float16 path: gh-20305
+ # Likely most are covered elsewhere, at least partially.
+ dtype = np.dtype(dtype)
+ # Simple test, designed to excersize most specialized code paths,
+ # note the +0.5 for floats. This makes sure we use a float value
+ # where the results must be exact.
+ arr = (np.arange(7) + 0.5).astype(dtype)
+ scalar = np.array(2, dtype=dtype)
+
+ # contig -> scalar:
+ res = np.einsum('i->', arr)
+ assert res == arr.sum()
+ # contig, contig -> contig:
+ res = np.einsum('i,i->i', arr, arr)
+ assert_array_equal(res, arr * arr)
+ # noncontig, noncontig -> contig:
+ res = np.einsum('i,i->i', arr.repeat(2)[::2], arr.repeat(2)[::2])
+ assert_array_equal(res, arr * arr)
+ # contig + contig -> scalar
+ assert np.einsum('i,i->', arr, arr) == (arr * arr).sum()
+ # contig + scalar -> contig (with out)
+ out = np.ones(7, dtype=dtype)
+ res = np.einsum('i,->i', arr, dtype.type(2), out=out)
+ assert_array_equal(res, arr * dtype.type(2))
+ # scalar + contig -> contig (with out)
+ res = np.einsum(',i->i', scalar, arr)
+ assert_array_equal(res, arr * dtype.type(2))
+ # scalar + contig -> scalar
+ res = np.einsum(',i->', scalar, arr)
+ # Use einsum to compare to not have difference due to sum round-offs:
+ assert res == np.einsum('i->', scalar * arr)
+ # contig + scalar -> scalar
+ res = np.einsum('i,->', arr, scalar)
+ # Use einsum to compare to not have difference due to sum round-offs:
+ assert res == np.einsum('i->', scalar * arr)
+ # contig + contig + contig -> scalar
+ arr = np.array([0.5, 0.5, 0.25, 4.5, 3.], dtype=dtype)
+ res = np.einsum('i,i,i->', arr, arr, arr)
+ assert_array_equal(res, (arr * arr * arr).sum())
+ # four arrays:
+ res = np.einsum('i,i,i,i->', arr, arr, arr, arr)
+ assert_array_equal(res, (arr * arr * arr * arr).sum())
+
+ def test_small_boolean_arrays(self):
+ # See gh-5946.
+ # Use array of True embedded in False.
+ a = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
+ a[...] = True
+ out = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
+ tgt = np.ones((2, 1, 1), dtype=np.bool_)
+ res = np.einsum('...ij,...jk->...ik', a, a, out=out)
+ assert_equal(res, tgt)
+
+ def test_out_is_res(self):
+ a = np.arange(9).reshape(3, 3)
+ res = np.einsum('...ij,...jk->...ik', a, a, out=a)
+ assert res is a
+
+ def optimize_compare(self, subscripts, operands=None):
+ # Tests all paths of the optimization function against
+ # conventional einsum
+ if operands is None:
+ args = [subscripts]
+ terms = subscripts.split('->')[0].split(',')
+ for term in terms:
+ dims = [global_size_dict[x] for x in term]
+ args.append(np.random.rand(*dims))
+ else:
+ args = [subscripts] + operands
+
+ noopt = np.einsum(*args, optimize=False)
+ opt = np.einsum(*args, optimize='greedy')
+ assert_almost_equal(opt, noopt)
+ opt = np.einsum(*args, optimize='optimal')
+ assert_almost_equal(opt, noopt)
+
+ def test_hadamard_like_products(self):
+ # Hadamard outer products
+ self.optimize_compare('a,ab,abc->abc')
+ self.optimize_compare('a,b,ab->ab')
+
+ def test_index_transformations(self):
+ # Simple index transformation cases
+ self.optimize_compare('ea,fb,gc,hd,abcd->efgh')
+ self.optimize_compare('ea,fb,abcd,gc,hd->efgh')
+ self.optimize_compare('abcd,ea,fb,gc,hd->efgh')
+
+ def test_complex(self):
+ # Long test cases
+ self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
+ self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
+ self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac')
+ self.optimize_compare('abhe,hidj,jgba,hiab,gab')
+ self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac')
+ self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad')
+ self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb')
+ self.optimize_compare('bdhe,acad,hiab,agac,hibd')
+
+ def test_collapse(self):
+ # Inner products
+ self.optimize_compare('ab,ab,c->')
+ self.optimize_compare('ab,ab,c->c')
+ self.optimize_compare('ab,ab,cd,cd->')
+ self.optimize_compare('ab,ab,cd,cd->ac')
+ self.optimize_compare('ab,ab,cd,cd->cd')
+ self.optimize_compare('ab,ab,cd,cd,ef,ef->')
+
+ def test_expand(self):
+ # Outer products
+ self.optimize_compare('ab,cd,ef->abcdef')
+ self.optimize_compare('ab,cd,ef->acdf')
+ self.optimize_compare('ab,cd,de->abcde')
+ self.optimize_compare('ab,cd,de->be')
+ self.optimize_compare('ab,bcd,cd->abcd')
+ self.optimize_compare('ab,bcd,cd->abd')
+
+ def test_edge_cases(self):
+ # Difficult edge cases for optimization
+ self.optimize_compare('eb,cb,fb->cef')
+ self.optimize_compare('dd,fb,be,cdb->cef')
+ self.optimize_compare('bca,cdb,dbf,afc->')
+ self.optimize_compare('dcc,fce,ea,dbf->ab')
+ self.optimize_compare('fdf,cdd,ccd,afe->ae')
+ self.optimize_compare('abcd,ad')
+ self.optimize_compare('ed,fcd,ff,bcf->be')
+ self.optimize_compare('baa,dcf,af,cde->be')
+ self.optimize_compare('bd,db,eac->ace')
+ self.optimize_compare('fff,fae,bef,def->abd')
+ self.optimize_compare('efc,dbc,acf,fd->abe')
+ self.optimize_compare('ba,ac,da->bcd')
+
+ def test_inner_product(self):
+ # Inner products
+ self.optimize_compare('ab,ab')
+ self.optimize_compare('ab,ba')
+ self.optimize_compare('abc,abc')
+ self.optimize_compare('abc,bac')
+ self.optimize_compare('abc,cba')
+
+ def test_random_cases(self):
+ # Randomly built test cases
+ self.optimize_compare('aab,fa,df,ecc->bde')
+ self.optimize_compare('ecb,fef,bad,ed->ac')
+ self.optimize_compare('bcf,bbb,fbf,fc->')
+ self.optimize_compare('bb,ff,be->e')
+ self.optimize_compare('bcb,bb,fc,fff->')
+ self.optimize_compare('fbb,dfd,fc,fc->')
+ self.optimize_compare('afd,ba,cc,dc->bf')
+ self.optimize_compare('adb,bc,fa,cfc->d')
+ self.optimize_compare('bbd,bda,fc,db->acf')
+ self.optimize_compare('dba,ead,cad->bce')
+ self.optimize_compare('aef,fbc,dca->bde')
+
+ def test_combined_views_mapping(self):
+ # gh-10792
+ a = np.arange(9).reshape(1, 1, 3, 1, 3)
+ b = np.einsum('bbcdc->d', a)
+ assert_equal(b, [12])
+
+ def test_broadcasting_dot_cases(self):
+ # Ensures broadcasting cases are not mistaken for GEMM
+
+ a = np.random.rand(1, 5, 4)
+ b = np.random.rand(4, 6)
+ c = np.random.rand(5, 6)
+ d = np.random.rand(10)
+
+ self.optimize_compare('ijk,kl,jl', operands=[a, b, c])
+ self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d])
+
+ e = np.random.rand(1, 1, 5, 4)
+ f = np.random.rand(7, 7)
+ self.optimize_compare('abjk,kl,jl', operands=[e, b, c])
+ self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f])
+
+ # Edge case found in gh-11308
+ g = np.arange(64).reshape(2, 4, 8)
+ self.optimize_compare('obk,ijk->ioj', operands=[g, g])
+
+ def test_output_order(self):
+ # Ensure output order is respected for optimize cases, the below
+ # conraction should yield a reshaped tensor view
+ # gh-16415
+
+ a = np.ones((2, 3, 5), order='F')
+ b = np.ones((4, 3), order='F')
+
+ for opt in [True, False]:
+ tmp = np.einsum('...ft,mf->...mt', a, b, order='a', optimize=opt)
+ assert_(tmp.flags.f_contiguous)
+
+ tmp = np.einsum('...ft,mf->...mt', a, b, order='f', optimize=opt)
+ assert_(tmp.flags.f_contiguous)
+
+ tmp = np.einsum('...ft,mf->...mt', a, b, order='c', optimize=opt)
+ assert_(tmp.flags.c_contiguous)
+
+ tmp = np.einsum('...ft,mf->...mt', a, b, order='k', optimize=opt)
+ assert_(tmp.flags.c_contiguous is False)
+ assert_(tmp.flags.f_contiguous is False)
+
+ tmp = np.einsum('...ft,mf->...mt', a, b, optimize=opt)
+ assert_(tmp.flags.c_contiguous is False)
+ assert_(tmp.flags.f_contiguous is False)
+
+ c = np.ones((4, 3), order='C')
+ for opt in [True, False]:
+ tmp = np.einsum('...ft,mf->...mt', a, c, order='a', optimize=opt)
+ assert_(tmp.flags.c_contiguous)
+
+ d = np.ones((2, 3, 5), order='C')
+ for opt in [True, False]:
+ tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt)
+ assert_(tmp.flags.c_contiguous)
+
+class TestEinsumPath:
+ def build_operands(self, string, size_dict=global_size_dict):
+
+ # Builds views based off initial operands
+ operands = [string]
+ terms = string.split('->')[0].split(',')
+ for term in terms:
+ dims = [size_dict[x] for x in term]
+ operands.append(np.random.rand(*dims))
+
+ return operands
+
+ def assert_path_equal(self, comp, benchmark):
+ # Checks if list of tuples are equivalent
+ ret = (len(comp) == len(benchmark))
+ assert_(ret)
+ for pos in range(len(comp) - 1):
+ ret &= isinstance(comp[pos + 1], tuple)
+ ret &= (comp[pos + 1] == benchmark[pos + 1])
+ assert_(ret)
+
+ def test_memory_contraints(self):
+ # Ensure memory constraints are satisfied
+
+ outer_test = self.build_operands('a,b,c->abc')
+
+ path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0))
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
+
+ path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0))
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
+
+ long_test = self.build_operands('acdf,jbje,gihb,hfac')
+ path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0))
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
+
+ path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0))
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
+
+ def test_long_paths(self):
+ # Long complex cases
+
+ # Long test 1
+ long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
+ path, path_str = np.einsum_path(*long_test1, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path',
+ (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
+
+ path, path_str = np.einsum_path(*long_test1, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path',
+ (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
+
+ # Long test 2
+ long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb')
+ path, path_str = np.einsum_path(*long_test2, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path',
+ (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)])
+
+ path, path_str = np.einsum_path(*long_test2, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path',
+ (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)])
+
+ def test_edge_paths(self):
+ # Difficult edge cases
+
+ # Edge test1
+ edge_test1 = self.build_operands('eb,cb,fb->cef')
+ path, path_str = np.einsum_path(*edge_test1, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
+
+ path, path_str = np.einsum_path(*edge_test1, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
+
+ # Edge test2
+ edge_test2 = self.build_operands('dd,fb,be,cdb->cef')
+ path, path_str = np.einsum_path(*edge_test2, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
+
+ path, path_str = np.einsum_path(*edge_test2, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
+
+ # Edge test3
+ edge_test3 = self.build_operands('bca,cdb,dbf,afc->')
+ path, path_str = np.einsum_path(*edge_test3, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
+
+ path, path_str = np.einsum_path(*edge_test3, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
+
+ # Edge test4
+ edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab')
+ path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
+
+ path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
+
+ # Edge test5
+ edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->',
+ size_dict={"a": 20, "b": 20, "c": 20, "d": 20})
+ path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
+ self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
+
+ path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
+ self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
+
+ def test_path_type_input(self):
+ # Test explicit path handling
+ path_test = self.build_operands('dcc,fce,ea,dbf->ab')
+
+ path, path_str = np.einsum_path(*path_test, optimize=False)
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
+
+ path, path_str = np.einsum_path(*path_test, optimize=True)
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
+
+ exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)]
+ path, path_str = np.einsum_path(*path_test, optimize=exp_path)
+ self.assert_path_equal(path, exp_path)
+
+ # Double check einsum works on the input path
+ noopt = np.einsum(*path_test, optimize=False)
+ opt = np.einsum(*path_test, optimize=exp_path)
+ assert_almost_equal(noopt, opt)
+
+ def test_path_type_input_internal_trace(self):
+ #gh-20962
+ path_test = self.build_operands('cab,cdd->ab')
+ exp_path = ['einsum_path', (1,), (0, 1)]
+
+ path, path_str = np.einsum_path(*path_test, optimize=exp_path)
+ self.assert_path_equal(path, exp_path)
+
+ # Double check einsum works on the input path
+ noopt = np.einsum(*path_test, optimize=False)
+ opt = np.einsum(*path_test, optimize=exp_path)
+ assert_almost_equal(noopt, opt)
+
+ def test_path_type_input_invalid(self):
+ path_test = self.build_operands('ab,bc,cd,de->ae')
+ exp_path = ['einsum_path', (2, 3), (0, 1)]
+ assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
+ assert_raises(
+ RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
+
+ path_test = self.build_operands('a,a,a->a')
+ exp_path = ['einsum_path', (1,), (0, 1)]
+ assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
+ assert_raises(
+ RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
+
+ def test_spaces(self):
+ #gh-10794
+ arr = np.array([[1]])
+ for sp in itertools.product(['', ' '], repeat=4):
+ # no error for any spacing
+ np.einsum('{}...a{}->{}...a{}'.format(*sp), arr)
+
+def test_overlap():
+ a = np.arange(9, dtype=int).reshape(3, 3)
+ b = np.arange(9, dtype=int).reshape(3, 3)
+ d = np.dot(a, b)
+ # sanity check
+ c = np.einsum('ij,jk->ik', a, b)
+ assert_equal(c, d)
+ #gh-10080, out overlaps one of the operands
+ c = np.einsum('ij,jk->ik', a, b, out=b)
+ assert_equal(c, d)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_errstate.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_errstate.py
new file mode 100644
index 00000000..3a5647f6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_errstate.py
@@ -0,0 +1,61 @@
+import pytest
+import sysconfig
+
+import numpy as np
+from numpy.testing import assert_, assert_raises, IS_WASM
+
+# The floating point emulation on ARM EABI systems lacking a hardware FPU is
+# known to be buggy. This is an attempt to identify these hosts. It may not
+# catch all possible cases, but it catches the known cases of gh-413 and
+# gh-15562.
+hosttype = sysconfig.get_config_var('HOST_GNU_TYPE')
+arm_softfloat = False if hosttype is None else hosttype.endswith('gnueabi')
+
+class TestErrstate:
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ @pytest.mark.skipif(arm_softfloat,
+ reason='platform/cpu issue with FPU (gh-413,-15562)')
+ def test_invalid(self):
+ with np.errstate(all='raise', under='ignore'):
+ a = -np.arange(3)
+ # This should work
+ with np.errstate(invalid='ignore'):
+ np.sqrt(a)
+ # While this should fail!
+ with assert_raises(FloatingPointError):
+ np.sqrt(a)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ @pytest.mark.skipif(arm_softfloat,
+ reason='platform/cpu issue with FPU (gh-15562)')
+ def test_divide(self):
+ with np.errstate(all='raise', under='ignore'):
+ a = -np.arange(3)
+ # This should work
+ with np.errstate(divide='ignore'):
+ a // 0
+ # While this should fail!
+ with assert_raises(FloatingPointError):
+ a // 0
+ # As should this, see gh-15562
+ with assert_raises(FloatingPointError):
+ a // a
+
+ def test_errcall(self):
+ def foo(*args):
+ print(args)
+
+ olderrcall = np.geterrcall()
+ with np.errstate(call=foo):
+ assert_(np.geterrcall() is foo, 'call is not foo')
+ with np.errstate(call=None):
+ assert_(np.geterrcall() is None, 'call is not None')
+ assert_(np.geterrcall() is olderrcall, 'call is not olderrcall')
+
+ def test_errstate_decorator(self):
+ @np.errstate(all='ignore')
+ def foo():
+ a = -np.arange(3)
+ a // 0
+
+ foo()
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_extint128.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_extint128.py
new file mode 100644
index 00000000..3b64915f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_extint128.py
@@ -0,0 +1,219 @@
+import itertools
+import contextlib
+import operator
+import pytest
+
+import numpy as np
+import numpy.core._multiarray_tests as mt
+
+from numpy.testing import assert_raises, assert_equal
+
+
+INT64_MAX = np.iinfo(np.int64).max
+INT64_MIN = np.iinfo(np.int64).min
+INT64_MID = 2**32
+
+# int128 is not two's complement, the sign bit is separate
+INT128_MAX = 2**128 - 1
+INT128_MIN = -INT128_MAX
+INT128_MID = 2**64
+
+INT64_VALUES = (
+ [INT64_MIN + j for j in range(20)] +
+ [INT64_MAX - j for j in range(20)] +
+ [INT64_MID + j for j in range(-20, 20)] +
+ [2*INT64_MID + j for j in range(-20, 20)] +
+ [INT64_MID//2 + j for j in range(-20, 20)] +
+ list(range(-70, 70))
+)
+
+INT128_VALUES = (
+ [INT128_MIN + j for j in range(20)] +
+ [INT128_MAX - j for j in range(20)] +
+ [INT128_MID + j for j in range(-20, 20)] +
+ [2*INT128_MID + j for j in range(-20, 20)] +
+ [INT128_MID//2 + j for j in range(-20, 20)] +
+ list(range(-70, 70)) +
+ [False] # negative zero
+)
+
+INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0]
+
+
+@contextlib.contextmanager
+def exc_iter(*args):
+ """
+ Iterate over Cartesian product of *args, and if an exception is raised,
+ add information of the current iterate.
+ """
+
+ value = [None]
+
+ def iterate():
+ for v in itertools.product(*args):
+ value[0] = v
+ yield v
+
+ try:
+ yield iterate()
+ except Exception:
+ import traceback
+ msg = "At: %r\n%s" % (repr(value[0]),
+ traceback.format_exc())
+ raise AssertionError(msg)
+
+
+def test_safe_binop():
+ # Test checked arithmetic routines
+
+ ops = [
+ (operator.add, 1),
+ (operator.sub, 2),
+ (operator.mul, 3)
+ ]
+
+ with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it:
+ for xop, a, b in it:
+ pyop, op = xop
+ c = pyop(a, b)
+
+ if not (INT64_MIN <= c <= INT64_MAX):
+ assert_raises(OverflowError, mt.extint_safe_binop, a, b, op)
+ else:
+ d = mt.extint_safe_binop(a, b, op)
+ if c != d:
+ # assert_equal is slow
+ assert_equal(d, c)
+
+
+def test_to_128():
+ with exc_iter(INT64_VALUES) as it:
+ for a, in it:
+ b = mt.extint_to_128(a)
+ if a != b:
+ assert_equal(b, a)
+
+
+def test_to_64():
+ with exc_iter(INT128_VALUES) as it:
+ for a, in it:
+ if not (INT64_MIN <= a <= INT64_MAX):
+ assert_raises(OverflowError, mt.extint_to_64, a)
+ else:
+ b = mt.extint_to_64(a)
+ if a != b:
+ assert_equal(b, a)
+
+
+def test_mul_64_64():
+ with exc_iter(INT64_VALUES, INT64_VALUES) as it:
+ for a, b in it:
+ c = a * b
+ d = mt.extint_mul_64_64(a, b)
+ if c != d:
+ assert_equal(d, c)
+
+
+def test_add_128():
+ with exc_iter(INT128_VALUES, INT128_VALUES) as it:
+ for a, b in it:
+ c = a + b
+ if not (INT128_MIN <= c <= INT128_MAX):
+ assert_raises(OverflowError, mt.extint_add_128, a, b)
+ else:
+ d = mt.extint_add_128(a, b)
+ if c != d:
+ assert_equal(d, c)
+
+
+def test_sub_128():
+ with exc_iter(INT128_VALUES, INT128_VALUES) as it:
+ for a, b in it:
+ c = a - b
+ if not (INT128_MIN <= c <= INT128_MAX):
+ assert_raises(OverflowError, mt.extint_sub_128, a, b)
+ else:
+ d = mt.extint_sub_128(a, b)
+ if c != d:
+ assert_equal(d, c)
+
+
+def test_neg_128():
+ with exc_iter(INT128_VALUES) as it:
+ for a, in it:
+ b = -a
+ c = mt.extint_neg_128(a)
+ if b != c:
+ assert_equal(c, b)
+
+
+def test_shl_128():
+ with exc_iter(INT128_VALUES) as it:
+ for a, in it:
+ if a < 0:
+ b = -(((-a) << 1) & (2**128-1))
+ else:
+ b = (a << 1) & (2**128-1)
+ c = mt.extint_shl_128(a)
+ if b != c:
+ assert_equal(c, b)
+
+
+def test_shr_128():
+ with exc_iter(INT128_VALUES) as it:
+ for a, in it:
+ if a < 0:
+ b = -((-a) >> 1)
+ else:
+ b = a >> 1
+ c = mt.extint_shr_128(a)
+ if b != c:
+ assert_equal(c, b)
+
+
+def test_gt_128():
+ with exc_iter(INT128_VALUES, INT128_VALUES) as it:
+ for a, b in it:
+ c = a > b
+ d = mt.extint_gt_128(a, b)
+ if c != d:
+ assert_equal(d, c)
+
+
+@pytest.mark.slow
+def test_divmod_128_64():
+ with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
+ for a, b in it:
+ if a >= 0:
+ c, cr = divmod(a, b)
+ else:
+ c, cr = divmod(-a, b)
+ c = -c
+ cr = -cr
+
+ d, dr = mt.extint_divmod_128_64(a, b)
+
+ if c != d or d != dr or b*d + dr != a:
+ assert_equal(d, c)
+ assert_equal(dr, cr)
+ assert_equal(b*d + dr, a)
+
+
+def test_floordiv_128_64():
+ with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
+ for a, b in it:
+ c = a // b
+ d = mt.extint_floordiv_128_64(a, b)
+
+ if c != d:
+ assert_equal(d, c)
+
+
+def test_ceildiv_128_64():
+ with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
+ for a, b in it:
+ c = (a + b - 1) // b
+ d = mt.extint_ceildiv_128_64(a, b)
+
+ if c != d:
+ assert_equal(d, c)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_function_base.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_function_base.py
new file mode 100644
index 00000000..dad7a588
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_function_base.py
@@ -0,0 +1,409 @@
+from numpy import (
+ logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
+ ndarray, sqrt, nextafter, stack, errstate
+ )
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
+ )
+
+
+class PhysicalQuantity(float):
+ def __new__(cls, value):
+ return float.__new__(cls, value)
+
+ def __add__(self, x):
+ assert_(isinstance(x, PhysicalQuantity))
+ return PhysicalQuantity(float(x) + float(self))
+ __radd__ = __add__
+
+ def __sub__(self, x):
+ assert_(isinstance(x, PhysicalQuantity))
+ return PhysicalQuantity(float(self) - float(x))
+
+ def __rsub__(self, x):
+ assert_(isinstance(x, PhysicalQuantity))
+ return PhysicalQuantity(float(x) - float(self))
+
+ def __mul__(self, x):
+ return PhysicalQuantity(float(x) * float(self))
+ __rmul__ = __mul__
+
+ def __div__(self, x):
+ return PhysicalQuantity(float(self) / float(x))
+
+ def __rdiv__(self, x):
+ return PhysicalQuantity(float(x) / float(self))
+
+
+class PhysicalQuantity2(ndarray):
+ __array_priority__ = 10
+
+
+class TestLogspace:
+
+ def test_basic(self):
+ y = logspace(0, 6)
+ assert_(len(y) == 50)
+ y = logspace(0, 6, num=100)
+ assert_(y[-1] == 10 ** 6)
+ y = logspace(0, 6, endpoint=False)
+ assert_(y[-1] < 10 ** 6)
+ y = logspace(0, 6, num=7)
+ assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
+
+ def test_start_stop_array(self):
+ start = array([0., 1.])
+ stop = array([6., 7.])
+ t1 = logspace(start, stop, 6)
+ t2 = stack([logspace(_start, _stop, 6)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = logspace(start, stop[0], 6)
+ t4 = stack([logspace(_start, stop[0], 6)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = logspace(start, stop, 6, axis=-1)
+ assert_equal(t5, t2.T)
+
+ def test_dtype(self):
+ y = logspace(0, 6, dtype='float32')
+ assert_equal(y.dtype, dtype('float32'))
+ y = logspace(0, 6, dtype='float64')
+ assert_equal(y.dtype, dtype('float64'))
+ y = logspace(0, 6, dtype='int32')
+ assert_equal(y.dtype, dtype('int32'))
+
+ def test_physical_quantities(self):
+ a = PhysicalQuantity(1.0)
+ b = PhysicalQuantity(5.0)
+ assert_equal(logspace(a, b), logspace(1.0, 5.0))
+
+ def test_subclass(self):
+ a = array(1).view(PhysicalQuantity2)
+ b = array(7).view(PhysicalQuantity2)
+ ls = logspace(a, b)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, logspace(1.0, 7.0))
+ ls = logspace(a, b, 1)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, logspace(1.0, 7.0, 1))
+
+
+class TestGeomspace:
+
+ def test_basic(self):
+ y = geomspace(1, 1e6)
+ assert_(len(y) == 50)
+ y = geomspace(1, 1e6, num=100)
+ assert_(y[-1] == 10 ** 6)
+ y = geomspace(1, 1e6, endpoint=False)
+ assert_(y[-1] < 10 ** 6)
+ y = geomspace(1, 1e6, num=7)
+ assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
+
+ y = geomspace(8, 2, num=3)
+ assert_allclose(y, [8, 4, 2])
+ assert_array_equal(y.imag, 0)
+
+ y = geomspace(-1, -100, num=3)
+ assert_array_equal(y, [-1, -10, -100])
+ assert_array_equal(y.imag, 0)
+
+ y = geomspace(-100, -1, num=3)
+ assert_array_equal(y, [-100, -10, -1])
+ assert_array_equal(y.imag, 0)
+
+ def test_boundaries_match_start_and_stop_exactly(self):
+ # make sure that the boundaries of the returned array exactly
+ # equal 'start' and 'stop' - this isn't obvious because
+ # np.exp(np.log(x)) isn't necessarily exactly equal to x
+ start = 0.3
+ stop = 20.3
+
+ y = geomspace(start, stop, num=1)
+ assert_equal(y[0], start)
+
+ y = geomspace(start, stop, num=1, endpoint=False)
+ assert_equal(y[0], start)
+
+ y = geomspace(start, stop, num=3)
+ assert_equal(y[0], start)
+ assert_equal(y[-1], stop)
+
+ y = geomspace(start, stop, num=3, endpoint=False)
+ assert_equal(y[0], start)
+
+ def test_nan_interior(self):
+ with errstate(invalid='ignore'):
+ y = geomspace(-3, 3, num=4)
+
+ assert_equal(y[0], -3.0)
+ assert_(isnan(y[1:-1]).all())
+ assert_equal(y[3], 3.0)
+
+ with errstate(invalid='ignore'):
+ y = geomspace(-3, 3, num=4, endpoint=False)
+
+ assert_equal(y[0], -3.0)
+ assert_(isnan(y[1:]).all())
+
+ def test_complex(self):
+ # Purely imaginary
+ y = geomspace(1j, 16j, num=5)
+ assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
+ assert_array_equal(y.real, 0)
+
+ y = geomspace(-4j, -324j, num=5)
+ assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
+ assert_array_equal(y.real, 0)
+
+ y = geomspace(1+1j, 1000+1000j, num=4)
+ assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j])
+
+ y = geomspace(-1+1j, -1000+1000j, num=4)
+ assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j])
+
+ # Logarithmic spirals
+ y = geomspace(-1, 1, num=3, dtype=complex)
+ assert_allclose(y, [-1, 1j, +1])
+
+ y = geomspace(0+3j, -3+0j, 3)
+ assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
+ y = geomspace(0+3j, 3+0j, 3)
+ assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j])
+ y = geomspace(-3+0j, 0-3j, 3)
+ assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j])
+ y = geomspace(0+3j, -3+0j, 3)
+ assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
+ y = geomspace(-2-3j, 5+7j, 7)
+ assert_allclose(y, [-2-3j, -0.29058977-4.15771027j,
+ 2.08885354-4.34146838j, 4.58345529-3.16355218j,
+ 6.41401745-0.55233457j, 6.75707386+3.11795092j,
+ 5+7j])
+
+ # Type promotion should prevent the -5 from becoming a NaN
+ y = geomspace(3j, -5, 2)
+ assert_allclose(y, [3j, -5])
+ y = geomspace(-5, 3j, 2)
+ assert_allclose(y, [-5, 3j])
+
+ def test_dtype(self):
+ y = geomspace(1, 1e6, dtype='float32')
+ assert_equal(y.dtype, dtype('float32'))
+ y = geomspace(1, 1e6, dtype='float64')
+ assert_equal(y.dtype, dtype('float64'))
+ y = geomspace(1, 1e6, dtype='int32')
+ assert_equal(y.dtype, dtype('int32'))
+
+ # Native types
+ y = geomspace(1, 1e6, dtype=float)
+ assert_equal(y.dtype, dtype('float_'))
+ y = geomspace(1, 1e6, dtype=complex)
+ assert_equal(y.dtype, dtype('complex'))
+
+ def test_start_stop_array_scalar(self):
+ lim1 = array([120, 100], dtype="int8")
+ lim2 = array([-120, -100], dtype="int8")
+ lim3 = array([1200, 1000], dtype="uint16")
+ t1 = geomspace(lim1[0], lim1[1], 5)
+ t2 = geomspace(lim2[0], lim2[1], 5)
+ t3 = geomspace(lim3[0], lim3[1], 5)
+ t4 = geomspace(120.0, 100.0, 5)
+ t5 = geomspace(-120.0, -100.0, 5)
+ t6 = geomspace(1200.0, 1000.0, 5)
+
+ # t3 uses float32, t6 uses float64
+ assert_allclose(t1, t4, rtol=1e-2)
+ assert_allclose(t2, t5, rtol=1e-2)
+ assert_allclose(t3, t6, rtol=1e-5)
+
+ def test_start_stop_array(self):
+ # Try to use all special cases.
+ start = array([1.e0, 32., 1j, -4j, 1+1j, -1])
+ stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1])
+ t1 = geomspace(start, stop, 5)
+ t2 = stack([geomspace(_start, _stop, 5)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = geomspace(start, stop[0], 5)
+ t4 = stack([geomspace(_start, stop[0], 5)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = geomspace(start, stop, 5, axis=-1)
+ assert_equal(t5, t2.T)
+
+ def test_physical_quantities(self):
+ a = PhysicalQuantity(1.0)
+ b = PhysicalQuantity(5.0)
+ assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
+
+ def test_subclass(self):
+ a = array(1).view(PhysicalQuantity2)
+ b = array(7).view(PhysicalQuantity2)
+ gs = geomspace(a, b)
+ assert type(gs) is PhysicalQuantity2
+ assert_equal(gs, geomspace(1.0, 7.0))
+ gs = geomspace(a, b, 1)
+ assert type(gs) is PhysicalQuantity2
+ assert_equal(gs, geomspace(1.0, 7.0, 1))
+
+ def test_bounds(self):
+ assert_raises(ValueError, geomspace, 0, 10)
+ assert_raises(ValueError, geomspace, 10, 0)
+ assert_raises(ValueError, geomspace, 0, 0)
+
+
+class TestLinspace:
+
+ def test_basic(self):
+ y = linspace(0, 10)
+ assert_(len(y) == 50)
+ y = linspace(2, 10, num=100)
+ assert_(y[-1] == 10)
+ y = linspace(2, 10, endpoint=False)
+ assert_(y[-1] < 10)
+ assert_raises(ValueError, linspace, 0, 10, num=-1)
+
+ def test_corner(self):
+ y = list(linspace(0, 1, 1))
+ assert_(y == [0.0], y)
+ assert_raises(TypeError, linspace, 0, 1, num=2.5)
+
+ def test_type(self):
+ t1 = linspace(0, 1, 0).dtype
+ t2 = linspace(0, 1, 1).dtype
+ t3 = linspace(0, 1, 2).dtype
+ assert_equal(t1, t2)
+ assert_equal(t2, t3)
+
+ def test_dtype(self):
+ y = linspace(0, 6, dtype='float32')
+ assert_equal(y.dtype, dtype('float32'))
+ y = linspace(0, 6, dtype='float64')
+ assert_equal(y.dtype, dtype('float64'))
+ y = linspace(0, 6, dtype='int32')
+ assert_equal(y.dtype, dtype('int32'))
+
+ def test_start_stop_array_scalar(self):
+ lim1 = array([-120, 100], dtype="int8")
+ lim2 = array([120, -100], dtype="int8")
+ lim3 = array([1200, 1000], dtype="uint16")
+ t1 = linspace(lim1[0], lim1[1], 5)
+ t2 = linspace(lim2[0], lim2[1], 5)
+ t3 = linspace(lim3[0], lim3[1], 5)
+ t4 = linspace(-120.0, 100.0, 5)
+ t5 = linspace(120.0, -100.0, 5)
+ t6 = linspace(1200.0, 1000.0, 5)
+ assert_equal(t1, t4)
+ assert_equal(t2, t5)
+ assert_equal(t3, t6)
+
+ def test_start_stop_array(self):
+ start = array([-120, 120], dtype="int8")
+ stop = array([100, -100], dtype="int8")
+ t1 = linspace(start, stop, 5)
+ t2 = stack([linspace(_start, _stop, 5)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = linspace(start, stop[0], 5)
+ t4 = stack([linspace(_start, stop[0], 5)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = linspace(start, stop, 5, axis=-1)
+ assert_equal(t5, t2.T)
+
+ def test_complex(self):
+ lim1 = linspace(1 + 2j, 3 + 4j, 5)
+ t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j])
+ lim2 = linspace(1j, 10, 5)
+ t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j])
+ assert_equal(lim1, t1)
+ assert_equal(lim2, t2)
+
+ def test_physical_quantities(self):
+ a = PhysicalQuantity(0.0)
+ b = PhysicalQuantity(1.0)
+ assert_equal(linspace(a, b), linspace(0.0, 1.0))
+
+ def test_subclass(self):
+ a = array(0).view(PhysicalQuantity2)
+ b = array(1).view(PhysicalQuantity2)
+ ls = linspace(a, b)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, linspace(0.0, 1.0))
+ ls = linspace(a, b, 1)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, linspace(0.0, 1.0, 1))
+
+ def test_array_interface(self):
+ # Regression test for https://github.com/numpy/numpy/pull/6659
+ # Ensure that start/stop can be objects that implement
+ # __array_interface__ and are convertible to numeric scalars
+
+ class Arrayish:
+ """
+ A generic object that supports the __array_interface__ and hence
+ can in principle be converted to a numeric scalar, but is not
+ otherwise recognized as numeric, but also happens to support
+ multiplication by floats.
+
+ Data should be an object that implements the buffer interface,
+ and contains at least 4 bytes.
+ """
+
+ def __init__(self, data):
+ self._data = data
+
+ @property
+ def __array_interface__(self):
+ return {'shape': (), 'typestr': '<i4', 'data': self._data,
+ 'version': 3}
+
+ def __mul__(self, other):
+ # For the purposes of this test any multiplication is an
+ # identity operation :)
+ return self
+
+ one = Arrayish(array(1, dtype='<i4'))
+ five = Arrayish(array(5, dtype='<i4'))
+
+ assert_equal(linspace(one, five), linspace(1, 5))
+
+ def test_denormal_numbers(self):
+ # Regression test for gh-5437. Will probably fail when compiled
+ # with ICC, which flushes denormals to zero
+ for ftype in sctypes['float']:
+ stop = nextafter(ftype(0), ftype(1)) * 5 # A denormal number
+ assert_(any(linspace(0, stop, 10, endpoint=False, dtype=ftype)))
+
+ def test_equivalent_to_arange(self):
+ for j in range(1000):
+ assert_equal(linspace(0, j, j+1, dtype=int),
+ arange(j+1, dtype=int))
+
+ def test_retstep(self):
+ for num in [0, 1, 2]:
+ for ept in [False, True]:
+ y = linspace(0, 1, num, endpoint=ept, retstep=True)
+ assert isinstance(y, tuple) and len(y) == 2
+ if num == 2:
+ y0_expect = [0.0, 1.0] if ept else [0.0, 0.5]
+ assert_array_equal(y[0], y0_expect)
+ assert_equal(y[1], y0_expect[1])
+ elif num == 1 and not ept:
+ assert_array_equal(y[0], [0.0])
+ assert_equal(y[1], 1.0)
+ else:
+ assert_array_equal(y[0], [0.0][:num])
+ assert isnan(y[1])
+
+ def test_object(self):
+ start = array(1, dtype='O')
+ stop = array(2, dtype='O')
+ y = linspace(start, stop, 3)
+ assert_array_equal(y, array([1., 1.5, 2.]))
+
+ def test_round_negative(self):
+ y = linspace(-1, 3, num=8, dtype=int)
+ t = array([-1, -1, 0, 0, 1, 1, 2, 3], dtype=int)
+ assert_array_equal(y, t)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_getlimits.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_getlimits.py
new file mode 100644
index 00000000..b8aaba38
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_getlimits.py
@@ -0,0 +1,147 @@
+""" Test functions for limits module.
+
+"""
+import warnings
+import numpy as np
+from numpy.core import finfo, iinfo
+from numpy import half, single, double, longdouble
+from numpy.testing import assert_equal, assert_, assert_raises
+from numpy.core.getlimits import _discovered_machar, _float_ma
+
+##################################################
+
+class TestPythonFloat:
+ def test_singleton(self):
+ ftype = finfo(float)
+ ftype2 = finfo(float)
+ assert_equal(id(ftype), id(ftype2))
+
+class TestHalf:
+ def test_singleton(self):
+ ftype = finfo(half)
+ ftype2 = finfo(half)
+ assert_equal(id(ftype), id(ftype2))
+
+class TestSingle:
+ def test_singleton(self):
+ ftype = finfo(single)
+ ftype2 = finfo(single)
+ assert_equal(id(ftype), id(ftype2))
+
+class TestDouble:
+ def test_singleton(self):
+ ftype = finfo(double)
+ ftype2 = finfo(double)
+ assert_equal(id(ftype), id(ftype2))
+
+class TestLongdouble:
+ def test_singleton(self):
+ ftype = finfo(longdouble)
+ ftype2 = finfo(longdouble)
+ assert_equal(id(ftype), id(ftype2))
+
+class TestFinfo:
+ def test_basic(self):
+ dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'],
+ [np.float16, np.float32, np.float64, np.complex64,
+ np.complex128]))
+ for dt1, dt2 in dts:
+ for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machep',
+ 'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp',
+ 'nmant', 'precision', 'resolution', 'tiny',
+ 'smallest_normal', 'smallest_subnormal'):
+ assert_equal(getattr(finfo(dt1), attr),
+ getattr(finfo(dt2), attr), attr)
+ assert_raises(ValueError, finfo, 'i4')
+
+class TestIinfo:
+ def test_basic(self):
+ dts = list(zip(['i1', 'i2', 'i4', 'i8',
+ 'u1', 'u2', 'u4', 'u8'],
+ [np.int8, np.int16, np.int32, np.int64,
+ np.uint8, np.uint16, np.uint32, np.uint64]))
+ for dt1, dt2 in dts:
+ for attr in ('bits', 'min', 'max'):
+ assert_equal(getattr(iinfo(dt1), attr),
+ getattr(iinfo(dt2), attr), attr)
+ assert_raises(ValueError, iinfo, 'f4')
+
+ def test_unsigned_max(self):
+ types = np.sctypes['uint']
+ for T in types:
+ with np.errstate(over="ignore"):
+ max_calculated = T(0) - T(1)
+ assert_equal(iinfo(T).max, max_calculated)
+
+class TestRepr:
+ def test_iinfo_repr(self):
+ expected = "iinfo(min=-32768, max=32767, dtype=int16)"
+ assert_equal(repr(np.iinfo(np.int16)), expected)
+
+ def test_finfo_repr(self):
+ expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \
+ " max=3.4028235e+38, dtype=float32)"
+ assert_equal(repr(np.finfo(np.float32)), expected)
+
+
+def test_instances():
+ iinfo(10)
+ finfo(3.0)
+
+
+def assert_ma_equal(discovered, ma_like):
+ # Check MachAr-like objects same as calculated MachAr instances
+ for key, value in discovered.__dict__.items():
+ assert_equal(value, getattr(ma_like, key))
+ if hasattr(value, 'shape'):
+ assert_equal(value.shape, getattr(ma_like, key).shape)
+ assert_equal(value.dtype, getattr(ma_like, key).dtype)
+
+
+def test_known_types():
+ # Test we are correctly compiling parameters for known types
+ for ftype, ma_like in ((np.float16, _float_ma[16]),
+ (np.float32, _float_ma[32]),
+ (np.float64, _float_ma[64])):
+ assert_ma_equal(_discovered_machar(ftype), ma_like)
+ # Suppress warning for broken discovery of double double on PPC
+ with np.errstate(all='ignore'):
+ ld_ma = _discovered_machar(np.longdouble)
+ bytes = np.dtype(np.longdouble).itemsize
+ if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
+ # 80-bit extended precision
+ assert_ma_equal(ld_ma, _float_ma[80])
+ elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
+ # IEE 754 128-bit
+ assert_ma_equal(ld_ma, _float_ma[128])
+
+
+def test_subnormal_warning():
+ """Test that the subnormal is zero warning is not being raised."""
+ with np.errstate(all='ignore'):
+ ld_ma = _discovered_machar(np.longdouble)
+ bytes = np.dtype(np.longdouble).itemsize
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
+ # 80-bit extended precision
+ ld_ma.smallest_subnormal
+ assert len(w) == 0
+ elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
+ # IEE 754 128-bit
+ ld_ma.smallest_subnormal
+ assert len(w) == 0
+ else:
+ # Double double
+ ld_ma.smallest_subnormal
+ # This test may fail on some platforms
+ assert len(w) == 0
+
+
+def test_plausible_finfo():
+ # Assert that finfo returns reasonable results for all types
+ for ftype in np.sctypes['float'] + np.sctypes['complex']:
+ info = np.finfo(ftype)
+ assert_(info.nmant > 1)
+ assert_(info.minexp < -1)
+ assert_(info.maxexp > 1)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_half.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_half.py
new file mode 100644
index 00000000..ca849ad5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_half.py
@@ -0,0 +1,563 @@
+import platform
+import pytest
+
+import numpy as np
+from numpy import uint16, float16, float32, float64
+from numpy.testing import assert_, assert_equal, _OLD_PROMOTION, IS_WASM
+
+
+def assert_raises_fpe(strmatch, callable, *args, **kwargs):
+ try:
+ callable(*args, **kwargs)
+ except FloatingPointError as exc:
+ assert_(str(exc).find(strmatch) >= 0,
+ "Did not raise floating point %s error" % strmatch)
+ else:
+ assert_(False,
+ "Did not raise floating point %s error" % strmatch)
+
+class TestHalf:
+ def setup_method(self):
+ # An array of all possible float16 values
+ self.all_f16 = np.arange(0x10000, dtype=uint16)
+ self.all_f16.dtype = float16
+ self.all_f32 = np.array(self.all_f16, dtype=float32)
+ self.all_f64 = np.array(self.all_f16, dtype=float64)
+
+ # An array of all non-NaN float16 values, in sorted order
+ self.nonan_f16 = np.concatenate(
+ (np.arange(0xfc00, 0x7fff, -1, dtype=uint16),
+ np.arange(0x0000, 0x7c01, 1, dtype=uint16)))
+ self.nonan_f16.dtype = float16
+ self.nonan_f32 = np.array(self.nonan_f16, dtype=float32)
+ self.nonan_f64 = np.array(self.nonan_f16, dtype=float64)
+
+ # An array of all finite float16 values, in sorted order
+ self.finite_f16 = self.nonan_f16[1:-1]
+ self.finite_f32 = self.nonan_f32[1:-1]
+ self.finite_f64 = self.nonan_f64[1:-1]
+
+ def test_half_conversions(self):
+ """Checks that all 16-bit values survive conversion
+ to/from 32-bit and 64-bit float"""
+ # Because the underlying routines preserve the NaN bits, every
+ # value is preserved when converting to/from other floats.
+
+ # Convert from float32 back to float16
+ b = np.array(self.all_f32, dtype=float16)
+ assert_equal(self.all_f16.view(dtype=uint16),
+ b.view(dtype=uint16))
+
+ # Convert from float64 back to float16
+ b = np.array(self.all_f64, dtype=float16)
+ assert_equal(self.all_f16.view(dtype=uint16),
+ b.view(dtype=uint16))
+
+ # Convert float16 to longdouble and back
+ # This doesn't necessarily preserve the extra NaN bits,
+ # so exclude NaNs.
+ a_ld = np.array(self.nonan_f16, dtype=np.longdouble)
+ b = np.array(a_ld, dtype=float16)
+ assert_equal(self.nonan_f16.view(dtype=uint16),
+ b.view(dtype=uint16))
+
+ # Check the range for which all integers can be represented
+ i_int = np.arange(-2048, 2049)
+ i_f16 = np.array(i_int, dtype=float16)
+ j = np.array(i_f16, dtype=int)
+ assert_equal(i_int, j)
+
+ @pytest.mark.parametrize("string_dt", ["S", "U"])
+ def test_half_conversion_to_string(self, string_dt):
+ # Currently uses S/U32 (which is sufficient for float32)
+ expected_dt = np.dtype(f"{string_dt}32")
+ assert np.promote_types(np.float16, string_dt) == expected_dt
+ assert np.promote_types(string_dt, np.float16) == expected_dt
+
+ arr = np.ones(3, dtype=np.float16).astype(string_dt)
+ assert arr.dtype == expected_dt
+
+ @pytest.mark.parametrize("string_dt", ["S", "U"])
+ def test_half_conversion_from_string(self, string_dt):
+ string = np.array("3.1416", dtype=string_dt)
+ assert string.astype(np.float16) == np.array(3.1416, dtype=np.float16)
+
+ @pytest.mark.parametrize("offset", [None, "up", "down"])
+ @pytest.mark.parametrize("shift", [None, "up", "down"])
+ @pytest.mark.parametrize("float_t", [np.float32, np.float64])
+ @np._no_nep50_warning()
+ def test_half_conversion_rounding(self, float_t, shift, offset):
+ # Assumes that round to even is used during casting.
+ max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16)
+
+ # Test all (positive) finite numbers, denormals are most interesting
+ # however:
+ f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16)
+ f16s_float = f16s_patterns.view(np.float16).astype(float_t)
+
+ # Shift the values by half a bit up or a down (or do not shift),
+ if shift == "up":
+ f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:]
+ elif shift == "down":
+ f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1]
+ else:
+ f16s_float = f16s_float[1:-1]
+
+ # Increase the float by a minimal value:
+ if offset == "up":
+ f16s_float = np.nextafter(f16s_float, float_t(np.inf))
+ elif offset == "down":
+ f16s_float = np.nextafter(f16s_float, float_t(-np.inf))
+
+ # Convert back to float16 and its bit pattern:
+ res_patterns = f16s_float.astype(np.float16).view(np.uint16)
+
+ # The above calculations tries the original values, or the exact
+ # mid points between the float16 values. It then further offsets them
+ # by as little as possible. If no offset occurs, "round to even"
+ # logic will be necessary, an arbitrarily small offset should cause
+ # normal up/down rounding always.
+
+ # Calculate the expected pattern:
+ cmp_patterns = f16s_patterns[1:-1].copy()
+
+ if shift == "down" and offset != "up":
+ shift_pattern = -1
+ elif shift == "up" and offset != "down":
+ shift_pattern = 1
+ else:
+ # There cannot be a shift, either shift is None, so all rounding
+ # will go back to original, or shift is reduced by offset too much.
+ shift_pattern = 0
+
+ # If rounding occurs, is it normal rounding or round to even?
+ if offset is None:
+ # Round to even occurs, modify only non-even, cast to allow + (-1)
+ cmp_patterns[0::2].view(np.int16)[...] += shift_pattern
+ else:
+ cmp_patterns.view(np.int16)[...] += shift_pattern
+
+ assert_equal(res_patterns, cmp_patterns)
+
+ @pytest.mark.parametrize(["float_t", "uint_t", "bits"],
+ [(np.float32, np.uint32, 23),
+ (np.float64, np.uint64, 52)])
+ def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits):
+ # Test specifically that all bits are considered when deciding
+ # whether round to even should occur (i.e. no bits are lost at the
+ # end. Compare also gh-12721. The most bits can get lost for the
+ # smallest denormal:
+ smallest_value = np.uint16(1).view(np.float16).astype(float_t)
+ assert smallest_value == 2**-24
+
+ # Will be rounded to zero based on round to even rule:
+ rounded_to_zero = smallest_value / float_t(2)
+ assert rounded_to_zero.astype(np.float16) == 0
+
+ # The significand will be all 0 for the float_t, test that we do not
+ # lose the lower ones of these:
+ for i in range(bits):
+ # slightly increasing the value should make it round up:
+ larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i)
+ larger_value = larger_pattern.view(float_t)
+ assert larger_value.astype(np.float16) == smallest_value
+
+ def test_nans_infs(self):
+ with np.errstate(all='ignore'):
+ # Check some of the ufuncs
+ assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
+ assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
+ assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
+ assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
+ assert_equal(np.spacing(float16(65504)), np.inf)
+
+ # Check comparisons of all values with NaN
+ nan = float16(np.nan)
+
+ assert_(not (self.all_f16 == nan).any())
+ assert_(not (nan == self.all_f16).any())
+
+ assert_((self.all_f16 != nan).all())
+ assert_((nan != self.all_f16).all())
+
+ assert_(not (self.all_f16 < nan).any())
+ assert_(not (nan < self.all_f16).any())
+
+ assert_(not (self.all_f16 <= nan).any())
+ assert_(not (nan <= self.all_f16).any())
+
+ assert_(not (self.all_f16 > nan).any())
+ assert_(not (nan > self.all_f16).any())
+
+ assert_(not (self.all_f16 >= nan).any())
+ assert_(not (nan >= self.all_f16).any())
+
+ def test_half_values(self):
+ """Confirms a small number of known half values"""
+ a = np.array([1.0, -1.0,
+ 2.0, -2.0,
+ 0.0999755859375, 0.333251953125, # 1/10, 1/3
+ 65504, -65504, # Maximum magnitude
+ 2.0**(-14), -2.0**(-14), # Minimum normal
+ 2.0**(-24), -2.0**(-24), # Minimum subnormal
+ 0, -1/1e1000, # Signed zeros
+ np.inf, -np.inf])
+ b = np.array([0x3c00, 0xbc00,
+ 0x4000, 0xc000,
+ 0x2e66, 0x3555,
+ 0x7bff, 0xfbff,
+ 0x0400, 0x8400,
+ 0x0001, 0x8001,
+ 0x0000, 0x8000,
+ 0x7c00, 0xfc00], dtype=uint16)
+ b.dtype = float16
+ assert_equal(a, b)
+
+ def test_half_rounding(self):
+ """Checks that rounding when converting to half is correct"""
+ a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal
+ 2.0**-25, # Underflows to zero (nearest even mode)
+ 2.0**-26, # Underflows to zero
+ 1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10)
+ 1.0+2.0**-11, # rounds to 1.0 (nearest even mode)
+ 1.0+2.0**-12, # rounds to 1.0
+ 65519, # rounds to 65504
+ 65520], # rounds to inf
+ dtype=float64)
+ rounded = [2.0**-24,
+ 0.0,
+ 0.0,
+ 1.0+2.0**(-10),
+ 1.0,
+ 1.0,
+ 65504,
+ np.inf]
+
+ # Check float64->float16 rounding
+ with np.errstate(over="ignore"):
+ b = np.array(a, dtype=float16)
+ assert_equal(b, rounded)
+
+ # Check float32->float16 rounding
+ a = np.array(a, dtype=float32)
+ with np.errstate(over="ignore"):
+ b = np.array(a, dtype=float16)
+ assert_equal(b, rounded)
+
+ def test_half_correctness(self):
+ """Take every finite float16, and check the casting functions with
+ a manual conversion."""
+
+ # Create an array of all finite float16s
+ a_bits = self.finite_f16.view(dtype=uint16)
+
+ # Convert to 64-bit float manually
+ a_sgn = (-1.0)**((a_bits & 0x8000) >> 15)
+ a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15
+ a_man = (a_bits & 0x03ff) * 2.0**(-10)
+ # Implicit bit of normalized floats
+ a_man[a_exp != -15] += 1
+ # Denormalized exponent is -14
+ a_exp[a_exp == -15] = -14
+
+ a_manual = a_sgn * a_man * 2.0**a_exp
+
+ a32_fail = np.nonzero(self.finite_f32 != a_manual)[0]
+ if len(a32_fail) != 0:
+ bad_index = a32_fail[0]
+ assert_equal(self.finite_f32, a_manual,
+ "First non-equal is half value %x -> %g != %g" %
+ (self.finite_f16[bad_index],
+ self.finite_f32[bad_index],
+ a_manual[bad_index]))
+
+ a64_fail = np.nonzero(self.finite_f64 != a_manual)[0]
+ if len(a64_fail) != 0:
+ bad_index = a64_fail[0]
+ assert_equal(self.finite_f64, a_manual,
+ "First non-equal is half value %x -> %g != %g" %
+ (self.finite_f16[bad_index],
+ self.finite_f64[bad_index],
+ a_manual[bad_index]))
+
+ def test_half_ordering(self):
+ """Make sure comparisons are working right"""
+
+ # All non-NaN float16 values in reverse order
+ a = self.nonan_f16[::-1].copy()
+
+ # 32-bit float copy
+ b = np.array(a, dtype=float32)
+
+ # Should sort the same
+ a.sort()
+ b.sort()
+ assert_equal(a, b)
+
+ # Comparisons should work
+ assert_((a[:-1] <= a[1:]).all())
+ assert_(not (a[:-1] > a[1:]).any())
+ assert_((a[1:] >= a[:-1]).all())
+ assert_(not (a[1:] < a[:-1]).any())
+ # All != except for +/-0
+ assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
+ assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
+
+ def test_half_funcs(self):
+ """Test the various ArrFuncs"""
+
+ # fill
+ assert_equal(np.arange(10, dtype=float16),
+ np.arange(10, dtype=float32))
+
+ # fillwithscalar
+ a = np.zeros((5,), dtype=float16)
+ a.fill(1)
+ assert_equal(a, np.ones((5,), dtype=float16))
+
+ # nonzero and copyswap
+ a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16)
+ assert_equal(a.nonzero()[0],
+ [2, 5, 6])
+ a = a.byteswap().newbyteorder()
+ assert_equal(a.nonzero()[0],
+ [2, 5, 6])
+
+ # dot
+ a = np.arange(0, 10, 0.5, dtype=float16)
+ b = np.ones((20,), dtype=float16)
+ assert_equal(np.dot(a, b),
+ 95)
+
+ # argmax
+ a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
+ assert_equal(a.argmax(),
+ 4)
+ a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
+ assert_equal(a.argmax(),
+ 5)
+
+ # getitem
+ a = np.arange(10, dtype=float16)
+ for i in range(10):
+ assert_equal(a.item(i), i)
+
+ def test_spacing_nextafter(self):
+ """Test np.spacing and np.nextafter"""
+ # All non-negative finite #'s
+ a = np.arange(0x7c00, dtype=uint16)
+ hinf = np.array((np.inf,), dtype=float16)
+ hnan = np.array((np.nan,), dtype=float16)
+ a_f16 = a.view(dtype=float16)
+
+ assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
+
+ assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
+ assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
+ assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
+
+ assert_equal(np.nextafter(hinf, a_f16), a_f16[-1])
+ assert_equal(np.nextafter(-hinf, a_f16), -a_f16[-1])
+
+ assert_equal(np.nextafter(hinf, hinf), hinf)
+ assert_equal(np.nextafter(hinf, -hinf), a_f16[-1])
+ assert_equal(np.nextafter(-hinf, hinf), -a_f16[-1])
+ assert_equal(np.nextafter(-hinf, -hinf), -hinf)
+
+ assert_equal(np.nextafter(a_f16, hnan), hnan[0])
+ assert_equal(np.nextafter(hnan, a_f16), hnan[0])
+
+ assert_equal(np.nextafter(hnan, hnan), hnan)
+ assert_equal(np.nextafter(hinf, hnan), hnan)
+ assert_equal(np.nextafter(hnan, hinf), hnan)
+
+ # switch to negatives
+ a |= 0x8000
+
+ assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
+ assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
+
+ assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
+ assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
+ assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
+
+ assert_equal(np.nextafter(hinf, a_f16), -a_f16[-1])
+ assert_equal(np.nextafter(-hinf, a_f16), a_f16[-1])
+
+ assert_equal(np.nextafter(a_f16, hnan), hnan[0])
+ assert_equal(np.nextafter(hnan, a_f16), hnan[0])
+
+ def test_half_ufuncs(self):
+ """Test the various ufuncs"""
+
+ a = np.array([0, 1, 2, 4, 2], dtype=float16)
+ b = np.array([-2, 5, 1, 4, 3], dtype=float16)
+ c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)
+
+ assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
+ assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
+ assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
+ assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])
+
+ assert_equal(np.equal(a, b), [False, False, False, True, False])
+ assert_equal(np.not_equal(a, b), [True, True, True, False, True])
+ assert_equal(np.less(a, b), [False, True, False, False, True])
+ assert_equal(np.less_equal(a, b), [False, True, False, True, True])
+ assert_equal(np.greater(a, b), [True, False, True, False, False])
+ assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
+ assert_equal(np.logical_and(a, b), [False, True, True, True, True])
+ assert_equal(np.logical_or(a, b), [True, True, True, True, True])
+ assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
+ assert_equal(np.logical_not(a), [True, False, False, False, False])
+
+ assert_equal(np.isnan(c), [False, False, False, True, False])
+ assert_equal(np.isinf(c), [False, False, True, False, False])
+ assert_equal(np.isfinite(c), [True, True, False, False, True])
+ assert_equal(np.signbit(b), [True, False, False, False, False])
+
+ assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
+
+ assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
+
+ x = np.maximum(b, c)
+ assert_(np.isnan(x[3]))
+ x[3] = 0
+ assert_equal(x, [0, 5, 1, 0, 6])
+
+ assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
+
+ x = np.minimum(b, c)
+ assert_(np.isnan(x[3]))
+ x[3] = 0
+ assert_equal(x, [-2, -1, -np.inf, 0, 3])
+
+ assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
+ assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
+ assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
+ assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])
+
+ assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
+ assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
+ assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
+ assert_equal(np.square(b), [4, 25, 1, 16, 9])
+ assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
+ assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
+ assert_equal(np.conjugate(b), b)
+ assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
+ assert_equal(np.negative(b), [2, -5, -1, -4, -3])
+ assert_equal(np.positive(b), b)
+ assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
+ assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
+ assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
+ assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
+
+ @np._no_nep50_warning()
+ def test_half_coercion(self, weak_promotion):
+ """Test that half gets coerced properly with the other types"""
+ a16 = np.array((1,), dtype=float16)
+ a32 = np.array((1,), dtype=float32)
+ b16 = float16(1)
+ b32 = float32(1)
+
+ assert np.power(a16, 2).dtype == float16
+ assert np.power(a16, 2.0).dtype == float16
+ assert np.power(a16, b16).dtype == float16
+ expected_dt = float32 if weak_promotion else float16
+ assert np.power(a16, b32).dtype == expected_dt
+ assert np.power(a16, a16).dtype == float16
+ assert np.power(a16, a32).dtype == float32
+
+ expected_dt = float16 if weak_promotion else float64
+ assert np.power(b16, 2).dtype == expected_dt
+ assert np.power(b16, 2.0).dtype == expected_dt
+ assert np.power(b16, b16).dtype, float16
+ assert np.power(b16, b32).dtype, float32
+ assert np.power(b16, a16).dtype, float16
+ assert np.power(b16, a32).dtype, float32
+
+ assert np.power(a32, a16).dtype == float32
+ assert np.power(a32, b16).dtype == float32
+ expected_dt = float32 if weak_promotion else float16
+ assert np.power(b32, a16).dtype == expected_dt
+ assert np.power(b32, b16).dtype == float32
+
+ @pytest.mark.skipif(platform.machine() == "armv5tel",
+ reason="See gh-413.")
+ @pytest.mark.skipif(IS_WASM,
+ reason="fp exceptions don't work in wasm.")
+ def test_half_fpe(self):
+ with np.errstate(all='raise'):
+ sx16 = np.array((1e-4,), dtype=float16)
+ bx16 = np.array((1e4,), dtype=float16)
+ sy16 = float16(1e-4)
+ by16 = float16(1e4)
+
+ # Underflow errors
+ assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16)
+ assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16)
+ assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16)
+ assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16)
+ assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16)
+ assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16)
+ assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16)
+ assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16)
+ assert_raises_fpe('underflow', lambda a, b:a/b,
+ float16(2.**-14), float16(2**11))
+ assert_raises_fpe('underflow', lambda a, b:a/b,
+ float16(-2.**-14), float16(2**11))
+ assert_raises_fpe('underflow', lambda a, b:a/b,
+ float16(2.**-14+2**-24), float16(2))
+ assert_raises_fpe('underflow', lambda a, b:a/b,
+ float16(-2.**-14-2**-24), float16(2))
+ assert_raises_fpe('underflow', lambda a, b:a/b,
+ float16(2.**-14+2**-23), float16(4))
+
+ # Overflow errors
+ assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16)
+ assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16)
+ assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16)
+ assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16)
+ assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16)
+ assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16)
+ assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16)
+ assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16)
+ assert_raises_fpe('overflow', lambda a, b:a+b,
+ float16(65504), float16(17))
+ assert_raises_fpe('overflow', lambda a, b:a-b,
+ float16(-65504), float16(17))
+ assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf))
+ assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf))
+ assert_raises_fpe('overflow', np.spacing, float16(65504))
+
+ # Invalid value errors
+ assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf))
+ assert_raises_fpe('invalid', np.spacing, float16(np.inf))
+ assert_raises_fpe('invalid', np.spacing, float16(np.nan))
+
+ # These should not raise
+ float16(65472)+float16(32)
+ float16(2**-13)/float16(2)
+ float16(2**-14)/float16(2**10)
+ np.spacing(float16(-65504))
+ np.nextafter(float16(65504), float16(-np.inf))
+ np.nextafter(float16(-65504), float16(np.inf))
+ np.nextafter(float16(np.inf), float16(0))
+ np.nextafter(float16(-np.inf), float16(0))
+ np.nextafter(float16(0), float16(np.nan))
+ np.nextafter(float16(np.nan), float16(0))
+ float16(2**-14)/float16(2**10)
+ float16(-2**-14)/float16(2**10)
+ float16(2**-14+2**-23)/float16(2)
+ float16(-2**-14-2**-23)/float16(2)
+
+ def test_half_array_interface(self):
+ """Test that half is compatible with __array_interface__"""
+ class Dummy:
+ pass
+
+ a = np.ones((1,), dtype=float16)
+ b = Dummy()
+ b.__array_interface__ = a.__array_interface__
+ c = np.array(b)
+ assert_(c.dtype == float16)
+ assert_equal(a, c)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_hashtable.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_hashtable.py
new file mode 100644
index 00000000..bace4c05
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_hashtable.py
@@ -0,0 +1,30 @@
+import pytest
+
+import random
+from numpy.core._multiarray_tests import identityhash_tester
+
+
+@pytest.mark.parametrize("key_length", [1, 3, 6])
+@pytest.mark.parametrize("length", [1, 16, 2000])
+def test_identity_hashtable(key_length, length):
+ # use a 30 object pool for everything (duplicates will happen)
+ pool = [object() for i in range(20)]
+ keys_vals = []
+ for i in range(length):
+ keys = tuple(random.choices(pool, k=key_length))
+ keys_vals.append((keys, random.choice(pool)))
+
+ dictionary = dict(keys_vals)
+
+ # add a random item at the end:
+ keys_vals.append(random.choice(keys_vals))
+ # the expected one could be different with duplicates:
+ expected = dictionary[keys_vals[-1][0]]
+
+ res = identityhash_tester(key_length, keys_vals, replace=True)
+ assert res is expected
+
+ # check that ensuring one duplicate definitely raises:
+ keys_vals.insert(0, keys_vals[-2])
+ with pytest.raises(RuntimeError):
+ identityhash_tester(key_length, keys_vals)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_indexerrors.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_indexerrors.py
new file mode 100644
index 00000000..a0e9a8c5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_indexerrors.py
@@ -0,0 +1,133 @@
+import numpy as np
+from numpy.testing import (
+ assert_raises, assert_raises_regex,
+ )
+
+
+class TestIndexErrors:
+ '''Tests to exercise indexerrors not covered by other tests.'''
+
+ def test_arraytypes_fasttake(self):
+ 'take from a 0-length dimension'
+ x = np.empty((2, 3, 0, 4))
+ assert_raises(IndexError, x.take, [0], axis=2)
+ assert_raises(IndexError, x.take, [1], axis=2)
+ assert_raises(IndexError, x.take, [0], axis=2, mode='wrap')
+ assert_raises(IndexError, x.take, [0], axis=2, mode='clip')
+
+ def test_take_from_object(self):
+ # Check exception taking from object array
+ d = np.zeros(5, dtype=object)
+ assert_raises(IndexError, d.take, [6])
+
+ # Check exception taking from 0-d array
+ d = np.zeros((5, 0), dtype=object)
+ assert_raises(IndexError, d.take, [1], axis=1)
+ assert_raises(IndexError, d.take, [0], axis=1)
+ assert_raises(IndexError, d.take, [0])
+ assert_raises(IndexError, d.take, [0], mode='wrap')
+ assert_raises(IndexError, d.take, [0], mode='clip')
+
+ def test_multiindex_exceptions(self):
+ a = np.empty(5, dtype=object)
+ assert_raises(IndexError, a.item, 20)
+ a = np.empty((5, 0), dtype=object)
+ assert_raises(IndexError, a.item, (0, 0))
+
+ a = np.empty(5, dtype=object)
+ assert_raises(IndexError, a.itemset, 20, 0)
+ a = np.empty((5, 0), dtype=object)
+ assert_raises(IndexError, a.itemset, (0, 0), 0)
+
+ def test_put_exceptions(self):
+ a = np.zeros((5, 5))
+ assert_raises(IndexError, a.put, 100, 0)
+ a = np.zeros((5, 5), dtype=object)
+ assert_raises(IndexError, a.put, 100, 0)
+ a = np.zeros((5, 5, 0))
+ assert_raises(IndexError, a.put, 100, 0)
+ a = np.zeros((5, 5, 0), dtype=object)
+ assert_raises(IndexError, a.put, 100, 0)
+
+ def test_iterators_exceptions(self):
+ "cases in iterators.c"
+ def assign(obj, ind, val):
+ obj[ind] = val
+
+ a = np.zeros([1, 2, 3])
+ assert_raises(IndexError, lambda: a[0, 5, None, 2])
+ assert_raises(IndexError, lambda: a[0, 5, 0, 2])
+ assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1))
+ assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1))
+
+ a = np.zeros([1, 0, 3])
+ assert_raises(IndexError, lambda: a[0, 0, None, 2])
+ assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1))
+
+ a = np.zeros([1, 2, 3])
+ assert_raises(IndexError, lambda: a.flat[10])
+ assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
+ a = np.zeros([1, 0, 3])
+ assert_raises(IndexError, lambda: a.flat[10])
+ assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
+
+ a = np.zeros([1, 2, 3])
+ assert_raises(IndexError, lambda: a.flat[np.array(10)])
+ assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
+ a = np.zeros([1, 0, 3])
+ assert_raises(IndexError, lambda: a.flat[np.array(10)])
+ assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
+
+ a = np.zeros([1, 2, 3])
+ assert_raises(IndexError, lambda: a.flat[np.array([10])])
+ assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
+ a = np.zeros([1, 0, 3])
+ assert_raises(IndexError, lambda: a.flat[np.array([10])])
+ assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
+
+ def test_mapping(self):
+ "cases from mapping.c"
+
+ def assign(obj, ind, val):
+ obj[ind] = val
+
+ a = np.zeros((0, 10))
+ assert_raises(IndexError, lambda: a[12])
+
+ a = np.zeros((3, 5))
+ assert_raises(IndexError, lambda: a[(10, 20)])
+ assert_raises(IndexError, lambda: assign(a, (10, 20), 1))
+ a = np.zeros((3, 0))
+ assert_raises(IndexError, lambda: a[(1, 0)])
+ assert_raises(IndexError, lambda: assign(a, (1, 0), 1))
+
+ a = np.zeros((10,))
+ assert_raises(IndexError, lambda: assign(a, 10, 1))
+ a = np.zeros((0,))
+ assert_raises(IndexError, lambda: assign(a, 10, 1))
+
+ a = np.zeros((3, 5))
+ assert_raises(IndexError, lambda: a[(1, [1, 20])])
+ assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1))
+ a = np.zeros((3, 0))
+ assert_raises(IndexError, lambda: a[(1, [0, 1])])
+ assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1))
+
+ def test_mapping_error_message(self):
+ a = np.zeros((3, 5))
+ index = (1, 2, 3, 4, 5)
+ assert_raises_regex(
+ IndexError,
+ "too many indices for array: "
+ "array is 2-dimensional, but 5 were indexed",
+ lambda: a[index])
+
+ def test_methods(self):
+ "cases from methods.c"
+
+ a = np.zeros((3, 3))
+ assert_raises(IndexError, lambda: a.item(100))
+ assert_raises(IndexError, lambda: a.itemset(100, 1))
+ a = np.zeros((0, 3))
+ assert_raises(IndexError, lambda: a.item(100))
+ assert_raises(IndexError, lambda: a.itemset(100, 1))
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_indexing.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_indexing.py
new file mode 100644
index 00000000..74075639
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_indexing.py
@@ -0,0 +1,1417 @@
+import sys
+import warnings
+import functools
+import operator
+
+import pytest
+
+import numpy as np
+from numpy.core._multiarray_tests import array_indexing
+from itertools import product
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_raises_regex,
+ assert_array_equal, assert_warns, HAS_REFCOUNT, IS_WASM
+ )
+
+
+class TestIndexing:
+ def test_index_no_floats(self):
+ a = np.array([[[5]]])
+
+ assert_raises(IndexError, lambda: a[0.0])
+ assert_raises(IndexError, lambda: a[0, 0.0])
+ assert_raises(IndexError, lambda: a[0.0, 0])
+ assert_raises(IndexError, lambda: a[0.0,:])
+ assert_raises(IndexError, lambda: a[:, 0.0])
+ assert_raises(IndexError, lambda: a[:, 0.0,:])
+ assert_raises(IndexError, lambda: a[0.0,:,:])
+ assert_raises(IndexError, lambda: a[0, 0, 0.0])
+ assert_raises(IndexError, lambda: a[0.0, 0, 0])
+ assert_raises(IndexError, lambda: a[0, 0.0, 0])
+ assert_raises(IndexError, lambda: a[-1.4])
+ assert_raises(IndexError, lambda: a[0, -1.4])
+ assert_raises(IndexError, lambda: a[-1.4, 0])
+ assert_raises(IndexError, lambda: a[-1.4,:])
+ assert_raises(IndexError, lambda: a[:, -1.4])
+ assert_raises(IndexError, lambda: a[:, -1.4,:])
+ assert_raises(IndexError, lambda: a[-1.4,:,:])
+ assert_raises(IndexError, lambda: a[0, 0, -1.4])
+ assert_raises(IndexError, lambda: a[-1.4, 0, 0])
+ assert_raises(IndexError, lambda: a[0, -1.4, 0])
+ assert_raises(IndexError, lambda: a[0.0:, 0.0])
+ assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
+
+ def test_slicing_no_floats(self):
+ a = np.array([[5]])
+
+ # start as float.
+ assert_raises(TypeError, lambda: a[0.0:])
+ assert_raises(TypeError, lambda: a[0:, 0.0:2])
+ assert_raises(TypeError, lambda: a[0.0::2, :0])
+ assert_raises(TypeError, lambda: a[0.0:1:2,:])
+ assert_raises(TypeError, lambda: a[:, 0.0:])
+ # stop as float.
+ assert_raises(TypeError, lambda: a[:0.0])
+ assert_raises(TypeError, lambda: a[:0, 1:2.0])
+ assert_raises(TypeError, lambda: a[:0.0:2, :0])
+ assert_raises(TypeError, lambda: a[:0.0,:])
+ assert_raises(TypeError, lambda: a[:, 0:4.0:2])
+ # step as float.
+ assert_raises(TypeError, lambda: a[::1.0])
+ assert_raises(TypeError, lambda: a[0:, :2:2.0])
+ assert_raises(TypeError, lambda: a[1::4.0, :0])
+ assert_raises(TypeError, lambda: a[::5.0,:])
+ assert_raises(TypeError, lambda: a[:, 0:4:2.0])
+ # mixed.
+ assert_raises(TypeError, lambda: a[1.0:2:2.0])
+ assert_raises(TypeError, lambda: a[1.0::2.0])
+ assert_raises(TypeError, lambda: a[0:, :2.0:2.0])
+ assert_raises(TypeError, lambda: a[1.0:1:4.0, :0])
+ assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:])
+ assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0])
+ # should still get the DeprecationWarning if step = 0.
+ assert_raises(TypeError, lambda: a[::0.0])
+
+ def test_index_no_array_to_index(self):
+ # No non-scalar arrays.
+ a = np.array([[[1]]])
+
+ assert_raises(TypeError, lambda: a[a:a:a])
+
+ def test_none_index(self):
+ # `None` index adds newaxis
+ a = np.array([1, 2, 3])
+ assert_equal(a[None], a[np.newaxis])
+ assert_equal(a[None].ndim, a.ndim + 1)
+
+ def test_empty_tuple_index(self):
+ # Empty tuple index creates a view
+ a = np.array([1, 2, 3])
+ assert_equal(a[()], a)
+ assert_(a[()].base is a)
+ a = np.array(0)
+ assert_(isinstance(a[()], np.int_))
+
+ def test_void_scalar_empty_tuple(self):
+ s = np.zeros((), dtype='V4')
+ assert_equal(s[()].dtype, s.dtype)
+ assert_equal(s[()], s)
+ assert_equal(type(s[...]), np.ndarray)
+
+ def test_same_kind_index_casting(self):
+ # Indexes should be cast with same-kind and not safe, even if that
+ # is somewhat unsafe. So test various different code paths.
+ index = np.arange(5)
+ u_index = index.astype(np.uintp)
+ arr = np.arange(10)
+
+ assert_array_equal(arr[index], arr[u_index])
+ arr[u_index] = np.arange(5)
+ assert_array_equal(arr, np.arange(10))
+
+ arr = np.arange(10).reshape(5, 2)
+ assert_array_equal(arr[index], arr[u_index])
+
+ arr[u_index] = np.arange(5)[:,None]
+ assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))
+
+ arr = np.arange(25).reshape(5, 5)
+ assert_array_equal(arr[u_index, u_index], arr[index, index])
+
+ def test_empty_fancy_index(self):
+ # Empty list index creates an empty array
+ # with the same dtype (but with weird shape)
+ a = np.array([1, 2, 3])
+ assert_equal(a[[]], [])
+ assert_equal(a[[]].dtype, a.dtype)
+
+ b = np.array([], dtype=np.intp)
+ assert_equal(a[[]], [])
+ assert_equal(a[[]].dtype, a.dtype)
+
+ b = np.array([])
+ assert_raises(IndexError, a.__getitem__, b)
+
+ def test_ellipsis_index(self):
+ a = np.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+ assert_(a[...] is not a)
+ assert_equal(a[...], a)
+ # `a[...]` was `a` in numpy <1.9.
+ assert_(a[...].base is a)
+
+ # Slicing with ellipsis can skip an
+ # arbitrary number of dimensions
+ assert_equal(a[0, ...], a[0])
+ assert_equal(a[0, ...], a[0,:])
+ assert_equal(a[..., 0], a[:, 0])
+
+ # Slicing with ellipsis always results
+ # in an array, not a scalar
+ assert_equal(a[0, ..., 1], np.array(2))
+
+ # Assignment with `(Ellipsis,)` on 0-d arrays
+ b = np.array(1)
+ b[(Ellipsis,)] = 2
+ assert_equal(b, 2)
+
+ def test_single_int_index(self):
+ # Single integer index selects one row
+ a = np.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+
+ assert_equal(a[0], [1, 2, 3])
+ assert_equal(a[-1], [7, 8, 9])
+
+ # Index out of bounds produces IndexError
+ assert_raises(IndexError, a.__getitem__, 1 << 30)
+ # Index overflow produces IndexError
+ assert_raises(IndexError, a.__getitem__, 1 << 64)
+
+ def test_single_bool_index(self):
+ # Single boolean index
+ a = np.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+
+ assert_equal(a[np.array(True)], a[None])
+ assert_equal(a[np.array(False)], a[None][0:0])
+
+ def test_boolean_shape_mismatch(self):
+ arr = np.ones((5, 4, 3))
+
+ index = np.array([True])
+ assert_raises(IndexError, arr.__getitem__, index)
+
+ index = np.array([False] * 6)
+ assert_raises(IndexError, arr.__getitem__, index)
+
+ index = np.zeros((4, 4), dtype=bool)
+ assert_raises(IndexError, arr.__getitem__, index)
+
+ assert_raises(IndexError, arr.__getitem__, (slice(None), index))
+
+ def test_boolean_indexing_onedim(self):
+ # Indexing a 2-dimensional array with
+ # boolean array of length one
+ a = np.array([[ 0., 0., 0.]])
+ b = np.array([ True], dtype=bool)
+ assert_equal(a[b], a)
+ # boolean assignment
+ a[b] = 1.
+ assert_equal(a, [[1., 1., 1.]])
+
+ def test_boolean_assignment_value_mismatch(self):
+ # A boolean assignment should fail when the shape of the values
+ # cannot be broadcast to the subscription. (see also gh-3458)
+ a = np.arange(4)
+
+ def f(a, v):
+ a[a > -1] = v
+
+ assert_raises(ValueError, f, a, [])
+ assert_raises(ValueError, f, a, [1, 2, 3])
+ assert_raises(ValueError, f, a[:1], [1, 2, 3])
+
+ def test_boolean_assignment_needs_api(self):
+ # See also gh-7666
+ # This caused a segfault on Python 2 due to the GIL not being
+ # held when the iterator does not need it, but the transfer function
+ # does
+ arr = np.zeros(1000)
+ indx = np.zeros(1000, dtype=bool)
+ indx[:100] = True
+ arr[indx] = np.ones(100, dtype=object)
+
+ expected = np.zeros(1000)
+ expected[:100] = 1
+ assert_array_equal(arr, expected)
+
+ def test_boolean_indexing_twodim(self):
+ # Indexing a 2-dimensional array with
+ # 2-dimensional boolean array
+ a = np.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+ b = np.array([[ True, False, True],
+ [False, True, False],
+ [ True, False, True]])
+ assert_equal(a[b], [1, 3, 5, 7, 9])
+ assert_equal(a[b[1]], [[4, 5, 6]])
+ assert_equal(a[b[0]], a[b[2]])
+
+ # boolean assignment
+ a[b] = 0
+ assert_equal(a, [[0, 2, 0],
+ [4, 0, 6],
+ [0, 8, 0]])
+
+ def test_boolean_indexing_list(self):
+ # Regression test for #13715. It's a use-after-free bug which the
+ # test won't directly catch, but it will show up in valgrind.
+ a = np.array([1, 2, 3])
+ b = [True, False, True]
+ # Two variants of the test because the first takes a fast path
+ assert_equal(a[b], [1, 3])
+ assert_equal(a[None, b], [[1, 3]])
+
+ def test_reverse_strides_and_subspace_bufferinit(self):
+ # This tests that the strides are not reversed for simple and
+ # subspace fancy indexing.
+ a = np.ones(5)
+ b = np.zeros(5, dtype=np.intp)[::-1]
+ c = np.arange(5)[::-1]
+
+ a[b] = c
+ # If the strides are not reversed, the 0 in the arange comes last.
+ assert_equal(a[0], 0)
+
+ # This also tests that the subspace buffer is initialized:
+ a = np.ones((5, 2))
+ c = np.arange(10).reshape(5, 2)[::-1]
+ a[b, :] = c
+ assert_equal(a[0], [0, 1])
+
+ def test_reversed_strides_result_allocation(self):
+ # Test a bug when calculating the output strides for a result array
+ # when the subspace size was 1 (and test other cases as well)
+ a = np.arange(10)[:, None]
+ i = np.arange(10)[::-1]
+ assert_array_equal(a[i], a[i.copy('C')])
+
+ a = np.arange(20).reshape(-1, 2)
+
+ def test_uncontiguous_subspace_assignment(self):
+ # During development there was a bug activating a skip logic
+ # based on ndim instead of size.
+ a = np.full((3, 4, 2), -1)
+ b = np.full((3, 4, 2), -1)
+
+ a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
+ b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()
+
+ assert_equal(a, b)
+
+ def test_too_many_fancy_indices_special_case(self):
+ # Just documents behaviour, this is a small limitation.
+ a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS
+ assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
+
+ def test_scalar_array_bool(self):
+ # NumPy bools can be used as boolean index (python ones as of yet not)
+ a = np.array(1)
+ assert_equal(a[np.bool_(True)], a[np.array(True)])
+ assert_equal(a[np.bool_(False)], a[np.array(False)])
+
+ # After deprecating bools as integers:
+ #a = np.array([0,1,2])
+ #assert_equal(a[True, :], a[None, :])
+ #assert_equal(a[:, True], a[:, None])
+ #
+ #assert_(not np.may_share_memory(a, a[True, :]))
+
+ def test_everything_returns_views(self):
+ # Before `...` would return a itself.
+ a = np.arange(5)
+
+ assert_(a is not a[()])
+ assert_(a is not a[...])
+ assert_(a is not a[:])
+
+ def test_broaderrors_indexing(self):
+ a = np.zeros((5, 5))
+ assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
+ assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
+
+ def test_trivial_fancy_out_of_bounds(self):
+ a = np.zeros(5)
+ ind = np.ones(20, dtype=np.intp)
+ ind[-1] = 10
+ assert_raises(IndexError, a.__getitem__, ind)
+ assert_raises(IndexError, a.__setitem__, ind, 0)
+ ind = np.ones(20, dtype=np.intp)
+ ind[0] = 11
+ assert_raises(IndexError, a.__getitem__, ind)
+ assert_raises(IndexError, a.__setitem__, ind, 0)
+
+ def test_trivial_fancy_not_possible(self):
+ # Test that the fast path for trivial assignment is not incorrectly
+ # used when the index is not contiguous or 1D, see also gh-11467.
+ a = np.arange(6)
+ idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]
+ assert_array_equal(a[idx], idx)
+
+ # this case must not go into the fast path, note that idx is
+ # a non-contiuguous none 1D array here.
+ a[idx] = -1
+ res = np.arange(6)
+ res[0] = -1
+ res[3] = -1
+ assert_array_equal(a, res)
+
+ def test_nonbaseclass_values(self):
+ class SubClass(np.ndarray):
+ def __array_finalize__(self, old):
+ # Have array finalize do funny things
+ self.fill(99)
+
+ a = np.zeros((5, 5))
+ s = a.copy().view(type=SubClass)
+ s.fill(1)
+
+ a[[0, 1, 2, 3, 4], :] = s
+ assert_((a == 1).all())
+
+ # Subspace is last, so transposing might want to finalize
+ a[:, [0, 1, 2, 3, 4]] = s
+ assert_((a == 1).all())
+
+ a.fill(0)
+ a[...] = s
+ assert_((a == 1).all())
+
+ def test_array_like_values(self):
+ # Similar to the above test, but use a memoryview instead
+ a = np.zeros((5, 5))
+ s = np.arange(25, dtype=np.float64).reshape(5, 5)
+
+ a[[0, 1, 2, 3, 4], :] = memoryview(s)
+ assert_array_equal(a, s)
+
+ a[:, [0, 1, 2, 3, 4]] = memoryview(s)
+ assert_array_equal(a, s)
+
+ a[...] = memoryview(s)
+ assert_array_equal(a, s)
+
+ def test_subclass_writeable(self):
+ d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],
+ dtype=[('target', 'S20'), ('V_mag', '>f4')])
+ ind = np.array([False, True, True], dtype=bool)
+ assert_(d[ind].flags.writeable)
+ ind = np.array([0, 1])
+ assert_(d[ind].flags.writeable)
+ assert_(d[...].flags.writeable)
+ assert_(d[0].flags.writeable)
+
+ def test_memory_order(self):
+ # This is not necessary to preserve. Memory layouts for
+ # more complex indices are not as simple.
+ a = np.arange(10)
+ b = np.arange(10).reshape(5,2).T
+ assert_(a[b].flags.f_contiguous)
+
+ # Takes a different implementation branch:
+ a = a.reshape(-1, 1)
+ assert_(a[b, 0].flags.f_contiguous)
+
+ def test_scalar_return_type(self):
+ # Full scalar indices should return scalars and object
+ # arrays should not call PyArray_Return on their items
+ class Zero:
+ # The most basic valid indexing
+ def __index__(self):
+ return 0
+
+ z = Zero()
+
+ class ArrayLike:
+ # Simple array, should behave like the array
+ def __array__(self):
+ return np.array(0)
+
+ a = np.zeros(())
+ assert_(isinstance(a[()], np.float_))
+ a = np.zeros(1)
+ assert_(isinstance(a[z], np.float_))
+ a = np.zeros((1, 1))
+ assert_(isinstance(a[z, np.array(0)], np.float_))
+ assert_(isinstance(a[z, ArrayLike()], np.float_))
+
+ # And object arrays do not call it too often:
+ b = np.array(0)
+ a = np.array(0, dtype=object)
+ a[()] = b
+ assert_(isinstance(a[()], np.ndarray))
+ a = np.array([b, None])
+ assert_(isinstance(a[z], np.ndarray))
+ a = np.array([[b, None]])
+ assert_(isinstance(a[z, np.array(0)], np.ndarray))
+ assert_(isinstance(a[z, ArrayLike()], np.ndarray))
+
+ def test_small_regressions(self):
+ # Reference count of intp for index checks
+ a = np.array([0])
+ if HAS_REFCOUNT:
+ refcount = sys.getrefcount(np.dtype(np.intp))
+ # item setting always checks indices in separate function:
+ a[np.array([0], dtype=np.intp)] = 1
+ a[np.array([0], dtype=np.uint8)] = 1
+ assert_raises(IndexError, a.__setitem__,
+ np.array([1], dtype=np.intp), 1)
+ assert_raises(IndexError, a.__setitem__,
+ np.array([1], dtype=np.uint8), 1)
+
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
+
+ def test_unaligned(self):
+ v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
+ d = v.view(np.dtype("S8"))
+ # unaligned source
+ x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
+ x = x.view(np.dtype("S8"))
+ x[...] = np.array("b" * 8, dtype="S")
+ b = np.arange(d.size)
+ #trivial
+ assert_equal(d[b], d)
+ d[b] = x
+ # nontrivial
+ # unaligned index array
+ b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
+ b = b.view(np.intp)[:d.size]
+ b[...] = np.arange(d.size)
+ assert_equal(d[b.astype(np.int16)], d)
+ d[b.astype(np.int16)] = x
+ # boolean
+ d[b % 2 == 0]
+ d[b % 2 == 0] = x[::2]
+
+ def test_tuple_subclass(self):
+ arr = np.ones((5, 5))
+
+ # A tuple subclass should also be an nd-index
+ class TupleSubclass(tuple):
+ pass
+ index = ([1], [1])
+ index = TupleSubclass(index)
+ assert_(arr[index].shape == (1,))
+ # Unlike the non nd-index:
+ assert_(arr[index,].shape != (1,))
+
+ def test_broken_sequence_not_nd_index(self):
+ # See gh-5063:
+ # If we have an object which claims to be a sequence, but fails
+ # on item getting, this should not be converted to an nd-index (tuple)
+ # If this object happens to be a valid index otherwise, it should work
+ # This object here is very dubious and probably bad though:
+ class SequenceLike:
+ def __index__(self):
+ return 0
+
+ def __len__(self):
+ return 1
+
+ def __getitem__(self, item):
+ raise IndexError('Not possible')
+
+ arr = np.arange(10)
+ assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
+
+ # also test that field indexing does not segfault
+ # for a similar reason, by indexing a structured array
+ arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')])
+ assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
+
+ def test_indexing_array_weird_strides(self):
+ # See also gh-6221
+ # the shapes used here come from the issue and create the correct
+ # size for the iterator buffering size.
+ x = np.ones(10)
+ x2 = np.ones((10, 2))
+ ind = np.arange(10)[:, None, None, None]
+ ind = np.broadcast_to(ind, (10, 55, 4, 4))
+
+ # single advanced index case
+ assert_array_equal(x[ind], x[ind.copy()])
+ # higher dimensional advanced index
+ zind = np.zeros(4, dtype=np.intp)
+ assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
+
+ def test_indexing_array_negative_strides(self):
+ # From gh-8264,
+ # core dumps if negative strides are used in iteration
+ arro = np.zeros((4, 4))
+ arr = arro[::-1, ::-1]
+
+ slices = (slice(None), [0, 1, 2, 3])
+ arr[slices] = 10
+ assert_array_equal(arr, 10.)
+
+ def test_character_assignment(self):
+ # This is an example a function going through CopyObject which
+ # used to have an untested special path for scalars
+ # (the character special dtype case, should be deprecated probably)
+ arr = np.zeros((1, 5), dtype="c")
+ arr[0] = np.str_("asdfg") # must assign as a sequence
+ assert_array_equal(arr[0], np.array("asdfg", dtype="c"))
+ assert arr[0, 1] == b"s" # make sure not all were set to "a" for both
+
+ @pytest.mark.parametrize("index",
+ [True, False, np.array([0])])
+ @pytest.mark.parametrize("num", [32, 40])
+ @pytest.mark.parametrize("original_ndim", [1, 32])
+ def test_too_many_advanced_indices(self, index, num, original_ndim):
+ # These are limitations based on the number of arguments we can process.
+ # For `num=32` (and all boolean cases), the result is actually define;
+ # but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons.
+ arr = np.ones((1,) * original_ndim)
+ with pytest.raises(IndexError):
+ arr[(index,) * num]
+ with pytest.raises(IndexError):
+ arr[(index,) * num] = 1.
+
+ @pytest.mark.skipif(IS_WASM, reason="no threading")
+ def test_structured_advanced_indexing(self):
+ # Test that copyswap(n) used by integer array indexing is threadsafe
+ # for structured datatypes, see gh-15387. This test can behave randomly.
+ from concurrent.futures import ThreadPoolExecutor
+
+ # Create a deeply nested dtype to make a failure more likely:
+ dt = np.dtype([("", "f8")])
+ dt = np.dtype([("", dt)] * 2)
+ dt = np.dtype([("", dt)] * 2)
+ # The array should be large enough to likely run into threading issues
+ arr = np.random.uniform(size=(6000, 8)).view(dt)[:, 0]
+
+ rng = np.random.default_rng()
+ def func(arr):
+ indx = rng.integers(0, len(arr), size=6000, dtype=np.intp)
+ arr[indx]
+
+ tpe = ThreadPoolExecutor(max_workers=8)
+ futures = [tpe.submit(func, arr) for _ in range(10)]
+ for f in futures:
+ f.result()
+
+ assert arr.dtype is dt
+
+ def test_nontuple_ndindex(self):
+ a = np.arange(25).reshape((5, 5))
+ assert_equal(a[[0, 1]], np.array([a[0], a[1]]))
+ assert_equal(a[[0, 1], [0, 1]], np.array([0, 6]))
+ assert_raises(IndexError, a.__getitem__, [slice(None)])
+
+
+class TestFieldIndexing:
+ def test_scalar_return_type(self):
+ # Field access on an array should return an array, even if it
+ # is 0-d.
+ a = np.zeros((), [('a','f8')])
+ assert_(isinstance(a['a'], np.ndarray))
+ assert_(isinstance(a[['a']], np.ndarray))
+
+
+class TestBroadcastedAssignments:
+ def assign(self, a, ind, val):
+ a[ind] = val
+ return a
+
+ def test_prepending_ones(self):
+ a = np.zeros((3, 2))
+
+ a[...] = np.ones((1, 3, 2))
+ # Fancy with subspace with and without transpose
+ a[[0, 1, 2], :] = np.ones((1, 3, 2))
+ a[:, [0, 1]] = np.ones((1, 3, 2))
+ # Fancy without subspace (with broadcasting)
+ a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))
+
+ def test_prepend_not_one(self):
+ assign = self.assign
+ s_ = np.s_
+ a = np.zeros(5)
+
+ # Too large and not only ones.
+ assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))
+ assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
+ assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
+
+ def test_simple_broadcasting_errors(self):
+ assign = self.assign
+ s_ = np.s_
+ a = np.zeros((5, 1))
+
+ assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
+ assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))
+ assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
+ assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))
+ assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
+
+ @pytest.mark.parametrize("index", [
+ (..., [1, 2], slice(None)),
+ ([0, 1], ..., 0),
+ (..., [1, 2], [1, 2])])
+ def test_broadcast_error_reports_correct_shape(self, index):
+ values = np.zeros((100, 100)) # will never broadcast below
+
+ arr = np.zeros((3, 4, 5, 6, 7))
+ # We currently report without any spaces (could be changed)
+ shape_str = str(arr[index].shape).replace(" ", "")
+
+ with pytest.raises(ValueError) as e:
+ arr[index] = values
+
+ assert str(e.value).endswith(shape_str)
+
+ def test_index_is_larger(self):
+ # Simple case of fancy index broadcasting of the index.
+ a = np.zeros((5, 5))
+ a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]
+
+ assert_((a[:3, :3] == [2, 3, 4]).all())
+
+ def test_broadcast_subspace(self):
+ a = np.zeros((100, 100))
+ v = np.arange(100)[:,None]
+ b = np.arange(100)[::-1]
+ a[b] = v
+ assert_((a[::-1] == v).all())
+
+
+class TestSubclasses:
+ def test_basic(self):
+ # Test that indexing in various ways produces SubClass instances,
+ # and that the base is set up correctly: the original subclass
+ # instance for views, and a new ndarray for advanced/boolean indexing
+ # where a copy was made (latter a regression test for gh-11983).
+ class SubClass(np.ndarray):
+ pass
+
+ a = np.arange(5)
+ s = a.view(SubClass)
+ s_slice = s[:3]
+ assert_(type(s_slice) is SubClass)
+ assert_(s_slice.base is s)
+ assert_array_equal(s_slice, a[:3])
+
+ s_fancy = s[[0, 1, 2]]
+ assert_(type(s_fancy) is SubClass)
+ assert_(s_fancy.base is not s)
+ assert_(type(s_fancy.base) is np.ndarray)
+ assert_array_equal(s_fancy, a[[0, 1, 2]])
+ assert_array_equal(s_fancy.base, a[[0, 1, 2]])
+
+ s_bool = s[s > 0]
+ assert_(type(s_bool) is SubClass)
+ assert_(s_bool.base is not s)
+ assert_(type(s_bool.base) is np.ndarray)
+ assert_array_equal(s_bool, a[a > 0])
+ assert_array_equal(s_bool.base, a[a > 0])
+
+ def test_fancy_on_read_only(self):
+ # Test that fancy indexing on read-only SubClass does not make a
+ # read-only copy (gh-14132)
+ class SubClass(np.ndarray):
+ pass
+
+ a = np.arange(5)
+ s = a.view(SubClass)
+ s.flags.writeable = False
+ s_fancy = s[[0, 1, 2]]
+ assert_(s_fancy.flags.writeable)
+
+
+ def test_finalize_gets_full_info(self):
+ # Array finalize should be called on the filled array.
+ class SubClass(np.ndarray):
+ def __array_finalize__(self, old):
+ self.finalize_status = np.array(self)
+ self.old = old
+
+ s = np.arange(10).view(SubClass)
+ new_s = s[:3]
+ assert_array_equal(new_s.finalize_status, new_s)
+ assert_array_equal(new_s.old, s)
+
+ new_s = s[[0,1,2,3]]
+ assert_array_equal(new_s.finalize_status, new_s)
+ assert_array_equal(new_s.old, s)
+
+ new_s = s[s > 0]
+ assert_array_equal(new_s.finalize_status, new_s)
+ assert_array_equal(new_s.old, s)
+
+
+class TestFancyIndexingCast:
+ def test_boolean_index_cast_assign(self):
+ # Setup the boolean index and float arrays.
+ shape = (8, 63)
+ bool_index = np.zeros(shape).astype(bool)
+ bool_index[0, 1] = True
+ zero_array = np.zeros(shape)
+
+ # Assigning float is fine.
+ zero_array[bool_index] = np.array([1])
+ assert_equal(zero_array[0, 1], 1)
+
+ # Fancy indexing works, although we get a cast warning.
+ assert_warns(np.ComplexWarning,
+ zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))
+ assert_equal(zero_array[0, 1], 2) # No complex part
+
+ # Cast complex to float, throwing away the imaginary portion.
+ assert_warns(np.ComplexWarning,
+ zero_array.__setitem__, bool_index, np.array([1j]))
+ assert_equal(zero_array[0, 1], 0)
+
+class TestFancyIndexingEquivalence:
+ def test_object_assign(self):
+ # Check that the field and object special case using copyto is active.
+ # The right hand side cannot be converted to an array here.
+ a = np.arange(5, dtype=object)
+ b = a.copy()
+ a[:3] = [1, (1,2), 3]
+ b[[0, 1, 2]] = [1, (1,2), 3]
+ assert_array_equal(a, b)
+
+ # test same for subspace fancy indexing
+ b = np.arange(5, dtype=object)[None, :]
+ b[[0], :3] = [[1, (1,2), 3]]
+ assert_array_equal(a, b[0])
+
+ # Check that swapping of axes works.
+ # There was a bug that made the later assignment throw a ValueError
+ # do to an incorrectly transposed temporary right hand side (gh-5714)
+ b = b.T
+ b[:3, [0]] = [[1], [(1,2)], [3]]
+ assert_array_equal(a, b[:, 0])
+
+ # Another test for the memory order of the subspace
+ arr = np.ones((3, 4, 5), dtype=object)
+ # Equivalent slicing assignment for comparison
+ cmp_arr = arr.copy()
+ cmp_arr[:1, ...] = [[[1], [2], [3], [4]]]
+ arr[[0], ...] = [[[1], [2], [3], [4]]]
+ assert_array_equal(arr, cmp_arr)
+ arr = arr.copy('F')
+ arr[[0], ...] = [[[1], [2], [3], [4]]]
+ assert_array_equal(arr, cmp_arr)
+
+ def test_cast_equivalence(self):
+ # Yes, normal slicing uses unsafe casting.
+ a = np.arange(5)
+ b = a.copy()
+
+ a[:3] = np.array(['2', '-3', '-1'])
+ b[[0, 2, 1]] = np.array(['2', '-1', '-3'])
+ assert_array_equal(a, b)
+
+ # test the same for subspace fancy indexing
+ b = np.arange(5)[None, :]
+ b[[0], :3] = np.array([['2', '-3', '-1']])
+ assert_array_equal(a, b[0])
+
+
+class TestMultiIndexingAutomated:
+ """
+ These tests use code to mimic the C-Code indexing for selection.
+
+ NOTE:
+
+ * This still lacks tests for complex item setting.
+ * If you change behavior of indexing, you might want to modify
+ these tests to try more combinations.
+ * Behavior was written to match numpy version 1.8. (though a
+ first version matched 1.7.)
+ * Only tuple indices are supported by the mimicking code.
+ (and tested as of writing this)
+ * Error types should match most of the time as long as there
+ is only one error. For multiple errors, what gets raised
+ will usually not be the same one. They are *not* tested.
+
+ Update 2016-11-30: It is probably not worth maintaining this test
+ indefinitely and it can be dropped if maintenance becomes a burden.
+
+ """
+
+ def setup_method(self):
+ self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
+ self.b = np.empty((3, 0, 5, 6))
+ self.complex_indices = ['skip', Ellipsis,
+ 0,
+ # Boolean indices, up to 3-d for some special cases of eating up
+ # dimensions, also need to test all False
+ np.array([True, False, False]),
+ np.array([[True, False], [False, True]]),
+ np.array([[[False, False], [False, False]]]),
+ # Some slices:
+ slice(-5, 5, 2),
+ slice(1, 1, 100),
+ slice(4, -1, -2),
+ slice(None, None, -3),
+ # Some Fancy indexes:
+ np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast
+ np.array([0, 1, -2]),
+ np.array([[2], [0], [1]]),
+ np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
+ np.array([2, -1], dtype=np.int8),
+ np.zeros([1]*31, dtype=int), # trigger too large array.
+ np.array([0., 1.])] # invalid datatype
+ # Some simpler indices that still cover a bit more
+ self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),
+ 'skip']
+ # Very simple ones to fill the rest:
+ self.fill_indices = [slice(None, None), 0]
+
+ def _get_multi_index(self, arr, indices):
+ """Mimic multi dimensional indexing.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Array to be indexed.
+ indices : tuple of index objects
+
+ Returns
+ -------
+ out : ndarray
+ An array equivalent to the indexing operation (but always a copy).
+ `arr[indices]` should be identical.
+ no_copy : bool
+ Whether the indexing operation requires a copy. If this is `True`,
+ `np.may_share_memory(arr, arr[indices])` should be `True` (with
+ some exceptions for scalars and possibly 0-d arrays).
+
+ Notes
+ -----
+ While the function may mostly match the errors of normal indexing this
+ is generally not the case.
+ """
+ in_indices = list(indices)
+ indices = []
+ # if False, this is a fancy or boolean index
+ no_copy = True
+ # number of fancy/scalar indexes that are not consecutive
+ num_fancy = 0
+ # number of dimensions indexed by a "fancy" index
+ fancy_dim = 0
+ # NOTE: This is a funny twist (and probably OK to change).
+ # The boolean array has illegal indexes, but this is
+ # allowed if the broadcast fancy-indices are 0-sized.
+ # This variable is to catch that case.
+ error_unless_broadcast_to_empty = False
+
+ # We need to handle Ellipsis and make arrays from indices, also
+ # check if this is fancy indexing (set no_copy).
+ ndim = 0
+ ellipsis_pos = None # define here mostly to replace all but first.
+ for i, indx in enumerate(in_indices):
+ if indx is None:
+ continue
+ if isinstance(indx, np.ndarray) and indx.dtype == bool:
+ no_copy = False
+ if indx.ndim == 0:
+ raise IndexError
+ # boolean indices can have higher dimensions
+ ndim += indx.ndim
+ fancy_dim += indx.ndim
+ continue
+ if indx is Ellipsis:
+ if ellipsis_pos is None:
+ ellipsis_pos = i
+ continue # do not increment ndim counter
+ raise IndexError
+ if isinstance(indx, slice):
+ ndim += 1
+ continue
+ if not isinstance(indx, np.ndarray):
+ # This could be open for changes in numpy.
+ # numpy should maybe raise an error if casting to intp
+ # is not safe. It rejects np.array([1., 2.]) but not
+ # [1., 2.] as index (same for ie. np.take).
+ # (Note the importance of empty lists if changing this here)
+ try:
+ indx = np.array(indx, dtype=np.intp)
+ except ValueError:
+ raise IndexError
+ in_indices[i] = indx
+ elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
+ raise IndexError('arrays used as indices must be of '
+ 'integer (or boolean) type')
+ if indx.ndim != 0:
+ no_copy = False
+ ndim += 1
+ fancy_dim += 1
+
+ if arr.ndim - ndim < 0:
+ # we can't take more dimensions then we have, not even for 0-d
+ # arrays. since a[()] makes sense, but not a[(),]. We will
+ # raise an error later on, unless a broadcasting error occurs
+ # first.
+ raise IndexError
+
+ if ndim == 0 and None not in in_indices:
+ # Well we have no indexes or one Ellipsis. This is legal.
+ return arr.copy(), no_copy
+
+ if ellipsis_pos is not None:
+ in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *
+ (arr.ndim - ndim))
+
+ for ax, indx in enumerate(in_indices):
+ if isinstance(indx, slice):
+ # convert to an index array
+ indx = np.arange(*indx.indices(arr.shape[ax]))
+ indices.append(['s', indx])
+ continue
+ elif indx is None:
+ # this is like taking a slice with one element from a new axis:
+ indices.append(['n', np.array([0], dtype=np.intp)])
+ arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))
+ continue
+ if isinstance(indx, np.ndarray) and indx.dtype == bool:
+ if indx.shape != arr.shape[ax:ax+indx.ndim]:
+ raise IndexError
+
+ try:
+ flat_indx = np.ravel_multi_index(np.nonzero(indx),
+ arr.shape[ax:ax+indx.ndim], mode='raise')
+ except Exception:
+ error_unless_broadcast_to_empty = True
+ # fill with 0s instead, and raise error later
+ flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
+ # concatenate axis into a single one:
+ if indx.ndim != 0:
+ arr = arr.reshape((arr.shape[:ax]
+ + (np.prod(arr.shape[ax:ax+indx.ndim]),)
+ + arr.shape[ax+indx.ndim:]))
+ indx = flat_indx
+ else:
+ # This could be changed, a 0-d boolean index can
+ # make sense (even outside the 0-d indexed array case)
+ # Note that originally this is could be interpreted as
+ # integer in the full integer special case.
+ raise IndexError
+ else:
+ # If the index is a singleton, the bounds check is done
+ # before the broadcasting. This used to be different in <1.9
+ if indx.ndim == 0:
+ if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
+ raise IndexError
+ if indx.ndim == 0:
+ # The index is a scalar. This used to be two fold, but if
+ # fancy indexing was active, the check was done later,
+ # possibly after broadcasting it away (1.7. or earlier).
+ # Now it is always done.
+ if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
+ raise IndexError
+ if (len(indices) > 0 and
+ indices[-1][0] == 'f' and
+ ax != ellipsis_pos):
+ # NOTE: There could still have been a 0-sized Ellipsis
+ # between them. Checked that with ellipsis_pos.
+ indices[-1].append(indx)
+ else:
+ # We have a fancy index that is not after an existing one.
+ # NOTE: A 0-d array triggers this as well, while one may
+ # expect it to not trigger it, since a scalar would not be
+ # considered fancy indexing.
+ num_fancy += 1
+ indices.append(['f', indx])
+
+ if num_fancy > 1 and not no_copy:
+ # We have to flush the fancy indexes left
+ new_indices = indices[:]
+ axes = list(range(arr.ndim))
+ fancy_axes = []
+ new_indices.insert(0, ['f'])
+ ni = 0
+ ai = 0
+ for indx in indices:
+ ni += 1
+ if indx[0] == 'f':
+ new_indices[0].extend(indx[1:])
+ del new_indices[ni]
+ ni -= 1
+ for ax in range(ai, ai + len(indx[1:])):
+ fancy_axes.append(ax)
+ axes.remove(ax)
+ ai += len(indx) - 1 # axis we are at
+ indices = new_indices
+ # and now we need to transpose arr:
+ arr = arr.transpose(*(fancy_axes + axes))
+
+ # We only have one 'f' index now and arr is transposed accordingly.
+ # Now handle newaxis by reshaping...
+ ax = 0
+ for indx in indices:
+ if indx[0] == 'f':
+ if len(indx) == 1:
+ continue
+ # First of all, reshape arr to combine fancy axes into one:
+ orig_shape = arr.shape
+ orig_slice = orig_shape[ax:ax + len(indx[1:])]
+ arr = arr.reshape((arr.shape[:ax]
+ + (np.prod(orig_slice).astype(int),)
+ + arr.shape[ax + len(indx[1:]):]))
+
+ # Check if broadcasting works
+ res = np.broadcast(*indx[1:])
+ # unfortunately the indices might be out of bounds. So check
+ # that first, and use mode='wrap' then. However only if
+ # there are any indices...
+ if res.size != 0:
+ if error_unless_broadcast_to_empty:
+ raise IndexError
+ for _indx, _size in zip(indx[1:], orig_slice):
+ if _indx.size == 0:
+ continue
+ if np.any(_indx >= _size) or np.any(_indx < -_size):
+ raise IndexError
+ if len(indx[1:]) == len(orig_slice):
+ if np.product(orig_slice) == 0:
+ # Work around for a crash or IndexError with 'wrap'
+ # in some 0-sized cases.
+ try:
+ mi = np.ravel_multi_index(indx[1:], orig_slice,
+ mode='raise')
+ except Exception:
+ # This happens with 0-sized orig_slice (sometimes?)
+ # here it is a ValueError, but indexing gives a:
+ raise IndexError('invalid index into 0-sized')
+ else:
+ mi = np.ravel_multi_index(indx[1:], orig_slice,
+ mode='wrap')
+ else:
+ # Maybe never happens...
+ raise ValueError
+ arr = arr.take(mi.ravel(), axis=ax)
+ try:
+ arr = arr.reshape((arr.shape[:ax]
+ + mi.shape
+ + arr.shape[ax+1:]))
+ except ValueError:
+ # too many dimensions, probably
+ raise IndexError
+ ax += mi.ndim
+ continue
+
+ # If we are here, we have a 1D array for take:
+ arr = arr.take(indx[1], axis=ax)
+ ax += 1
+
+ return arr, no_copy
+
+ def _check_multi_index(self, arr, index):
+ """Check a multi index item getting and simple setting.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Array to be indexed, must be a reshaped arange.
+ index : tuple of indexing objects
+ Index being tested.
+ """
+ # Test item getting
+ try:
+ mimic_get, no_copy = self._get_multi_index(arr, index)
+ except Exception as e:
+ if HAS_REFCOUNT:
+ prev_refcount = sys.getrefcount(arr)
+ assert_raises(type(e), arr.__getitem__, index)
+ assert_raises(type(e), arr.__setitem__, index, 0)
+ if HAS_REFCOUNT:
+ assert_equal(prev_refcount, sys.getrefcount(arr))
+ return
+
+ self._compare_index_result(arr, index, mimic_get, no_copy)
+
+ def _check_single_index(self, arr, index):
+ """Check a single index item getting and simple setting.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Array to be indexed, must be an arange.
+ index : indexing object
+ Index being tested. Must be a single index and not a tuple
+ of indexing objects (see also `_check_multi_index`).
+ """
+ try:
+ mimic_get, no_copy = self._get_multi_index(arr, (index,))
+ except Exception as e:
+ if HAS_REFCOUNT:
+ prev_refcount = sys.getrefcount(arr)
+ assert_raises(type(e), arr.__getitem__, index)
+ assert_raises(type(e), arr.__setitem__, index, 0)
+ if HAS_REFCOUNT:
+ assert_equal(prev_refcount, sys.getrefcount(arr))
+ return
+
+ self._compare_index_result(arr, index, mimic_get, no_copy)
+
+ def _compare_index_result(self, arr, index, mimic_get, no_copy):
+ """Compare mimicked result to indexing result.
+ """
+ arr = arr.copy()
+ indexed_arr = arr[index]
+ assert_array_equal(indexed_arr, mimic_get)
+ # Check if we got a view, unless its a 0-sized or 0-d array.
+ # (then its not a view, and that does not matter)
+ if indexed_arr.size != 0 and indexed_arr.ndim != 0:
+ assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
+ # Check reference count of the original array
+ if HAS_REFCOUNT:
+ if no_copy:
+ # refcount increases by one:
+ assert_equal(sys.getrefcount(arr), 3)
+ else:
+ assert_equal(sys.getrefcount(arr), 2)
+
+ # Test non-broadcast setitem:
+ b = arr.copy()
+ b[index] = mimic_get + 1000
+ if b.size == 0:
+ return # nothing to compare here...
+ if no_copy and indexed_arr.ndim != 0:
+ # change indexed_arr in-place to manipulate original:
+ indexed_arr += 1000
+ assert_array_equal(arr, b)
+ return
+ # Use the fact that the array is originally an arange:
+ arr.flat[indexed_arr.ravel()] += 1000
+ assert_array_equal(arr, b)
+
+ def test_boolean(self):
+ a = np.array(5)
+ assert_equal(a[np.array(True)], 5)
+ a[np.array(True)] = 1
+ assert_equal(a, 1)
+ # NOTE: This is different from normal broadcasting, as
+ # arr[boolean_array] works like in a multi index. Which means
+ # it is aligned to the left. This is probably correct for
+ # consistency with arr[boolean_array,] also no broadcasting
+ # is done at all
+ self._check_multi_index(
+ self.a, (np.zeros_like(self.a, dtype=bool),))
+ self._check_multi_index(
+ self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
+ self._check_multi_index(
+ self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
+
+ def test_multidim(self):
+ # Automatically test combinations with complex indexes on 2nd (or 1st)
+ # spot and the simple ones in one other spot.
+ with warnings.catch_warnings():
+ # This is so that np.array(True) is not accepted in a full integer
+ # index, when running the file separately.
+ warnings.filterwarnings('error', '', DeprecationWarning)
+ warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)
+
+ def isskip(idx):
+ return isinstance(idx, str) and idx == "skip"
+
+ for simple_pos in [0, 2, 3]:
+ tocheck = [self.fill_indices, self.complex_indices,
+ self.fill_indices, self.fill_indices]
+ tocheck[simple_pos] = self.simple_indices
+ for index in product(*tocheck):
+ index = tuple(i for i in index if not isskip(i))
+ self._check_multi_index(self.a, index)
+ self._check_multi_index(self.b, index)
+
+ # Check very simple item getting:
+ self._check_multi_index(self.a, (0, 0, 0, 0))
+ self._check_multi_index(self.b, (0, 0, 0, 0))
+ # Also check (simple cases of) too many indices:
+ assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))
+ assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)
+ assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))
+ assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)
+
+ def test_1d(self):
+ a = np.arange(10)
+ for index in self.complex_indices:
+ self._check_single_index(a, index)
+
+class TestFloatNonIntegerArgument:
+ """
+ These test that ``TypeError`` is raised when you try to use
+ non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
+ and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
+
+ """
+ def test_valid_indexing(self):
+ # These should raise no errors.
+ a = np.array([[[5]]])
+
+ a[np.array([0])]
+ a[[0, 0]]
+ a[:, [0, 0]]
+ a[:, 0,:]
+ a[:,:,:]
+
+ def test_valid_slicing(self):
+ # These should raise no errors.
+ a = np.array([[[5]]])
+
+ a[::]
+ a[0:]
+ a[:2]
+ a[0:2]
+ a[::2]
+ a[1::2]
+ a[:2:2]
+ a[1:2:2]
+
+ def test_non_integer_argument_errors(self):
+ a = np.array([[5]])
+
+ assert_raises(TypeError, np.reshape, a, (1., 1., -1))
+ assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
+ assert_raises(TypeError, np.take, a, [0], 1.)
+ assert_raises(TypeError, np.take, a, [0], np.float64(1.))
+
+ def test_non_integer_sequence_multiplication(self):
+ # NumPy scalar sequence multiply should not work with non-integers
+ def mult(a, b):
+ return a * b
+
+ assert_raises(TypeError, mult, [1], np.float_(3))
+ # following should be OK
+ mult([1], np.int_(3))
+
+ def test_reduce_axis_float_index(self):
+ d = np.zeros((3,3,3))
+ assert_raises(TypeError, np.min, d, 0.5)
+ assert_raises(TypeError, np.min, d, (0.5, 1))
+ assert_raises(TypeError, np.min, d, (1, 2.2))
+ assert_raises(TypeError, np.min, d, (.2, 1.2))
+
+
+class TestBooleanIndexing:
+ # Using a boolean as integer argument/indexing is an error.
+ def test_bool_as_int_argument_errors(self):
+ a = np.array([[[1]]])
+
+ assert_raises(TypeError, np.reshape, a, (True, -1))
+ assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
+ # Note that operator.index(np.array(True)) does not work, a boolean
+ # array is thus also deprecated, but not with the same message:
+ assert_raises(TypeError, operator.index, np.array(True))
+ assert_warns(DeprecationWarning, operator.index, np.True_)
+ assert_raises(TypeError, np.take, args=(a, [0], False))
+
+ def test_boolean_indexing_weirdness(self):
+ # Weird boolean indexing things
+ a = np.ones((2, 3, 4))
+ assert a[False, True, ...].shape == (0, 2, 3, 4)
+ assert a[True, [0, 1], True, True, [1], [[2]]].shape == (1, 2)
+ assert_raises(IndexError, lambda: a[False, [0, 1], ...])
+
+ def test_boolean_indexing_fast_path(self):
+ # These used to either give the wrong error, or incorrectly give no
+ # error.
+ a = np.ones((3, 3))
+
+ # This used to incorrectly work (and give an array of shape (0,))
+ idx1 = np.array([[False]*9])
+ assert_raises_regex(IndexError,
+ "boolean index did not match indexed array along dimension 0; "
+ "dimension is 3 but corresponding boolean dimension is 1",
+ lambda: a[idx1])
+
+ # This used to incorrectly give a ValueError: operands could not be broadcast together
+ idx2 = np.array([[False]*8 + [True]])
+ assert_raises_regex(IndexError,
+ "boolean index did not match indexed array along dimension 0; "
+ "dimension is 3 but corresponding boolean dimension is 1",
+ lambda: a[idx2])
+
+ # This is the same as it used to be. The above two should work like this.
+ idx3 = np.array([[False]*10])
+ assert_raises_regex(IndexError,
+ "boolean index did not match indexed array along dimension 0; "
+ "dimension is 3 but corresponding boolean dimension is 1",
+ lambda: a[idx3])
+
+ # This used to give ValueError: non-broadcastable operand
+ a = np.ones((1, 1, 2))
+ idx = np.array([[[True], [False]]])
+ assert_raises_regex(IndexError,
+ "boolean index did not match indexed array along dimension 1; "
+ "dimension is 1 but corresponding boolean dimension is 2",
+ lambda: a[idx])
+
+
+class TestArrayToIndexDeprecation:
+ """Creating an index from array not 0-D is an error.
+
+ """
+ def test_array_to_index_error(self):
+ # so no exception is expected. The raising is effectively tested above.
+ a = np.array([[[1]]])
+
+ assert_raises(TypeError, operator.index, np.array([1]))
+ assert_raises(TypeError, np.reshape, a, (a, -1))
+ assert_raises(TypeError, np.take, a, [0], a)
+
+
+class TestNonIntegerArrayLike:
+ """Tests that array_likes only valid if can safely cast to integer.
+
+ For instance, lists give IndexError when they cannot be safely cast to
+ an integer.
+
+ """
+ def test_basic(self):
+ a = np.arange(10)
+
+ assert_raises(IndexError, a.__getitem__, [0.5, 1.5])
+ assert_raises(IndexError, a.__getitem__, (['1', '2'],))
+
+ # The following is valid
+ a.__getitem__([])
+
+
+class TestMultipleEllipsisError:
+ """An index can only have a single ellipsis.
+
+ """
+ def test_basic(self):
+ a = np.arange(10)
+ assert_raises(IndexError, lambda: a[..., ...])
+ assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))
+ assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
+
+
+class TestCApiAccess:
+ def test_getitem(self):
+ subscript = functools.partial(array_indexing, 0)
+
+ # 0-d arrays don't work:
+ assert_raises(IndexError, subscript, np.ones(()), 0)
+ # Out of bound values:
+ assert_raises(IndexError, subscript, np.ones(10), 11)
+ assert_raises(IndexError, subscript, np.ones(10), -11)
+ assert_raises(IndexError, subscript, np.ones((10, 10)), 11)
+ assert_raises(IndexError, subscript, np.ones((10, 10)), -11)
+
+ a = np.arange(10)
+ assert_array_equal(a[4], subscript(a, 4))
+ a = a.reshape(5, 2)
+ assert_array_equal(a[-4], subscript(a, -4))
+
+ def test_setitem(self):
+ assign = functools.partial(array_indexing, 1)
+
+ # Deletion is impossible:
+ assert_raises(ValueError, assign, np.ones(10), 0)
+ # 0-d arrays don't work:
+ assert_raises(IndexError, assign, np.ones(()), 0, 0)
+ # Out of bound values:
+ assert_raises(IndexError, assign, np.ones(10), 11, 0)
+ assert_raises(IndexError, assign, np.ones(10), -11, 0)
+ assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)
+ assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)
+
+ a = np.arange(10)
+ assign(a, 4, 10)
+ assert_(a[4] == 10)
+
+ a = a.reshape(5, 2)
+ assign(a, 4, 10)
+ assert_array_equal(a[-1], [10, 10])
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_item_selection.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_item_selection.py
new file mode 100644
index 00000000..3c35245a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_item_selection.py
@@ -0,0 +1,86 @@
+import sys
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_raises, assert_array_equal, HAS_REFCOUNT
+ )
+
+
+class TestTake:
+ def test_simple(self):
+ a = [[1, 2], [3, 4]]
+ a_str = [[b'1', b'2'], [b'3', b'4']]
+ modes = ['raise', 'wrap', 'clip']
+ indices = [-1, 4]
+ index_arrays = [np.empty(0, dtype=np.intp),
+ np.empty(tuple(), dtype=np.intp),
+ np.empty((1, 1), dtype=np.intp)]
+ real_indices = {'raise': {-1: 1, 4: IndexError},
+ 'wrap': {-1: 1, 4: 0},
+ 'clip': {-1: 0, 4: 1}}
+ # Currently all types but object, use the same function generation.
+ # So it should not be necessary to test all. However test also a non
+ # refcounted struct on top of object, which has a size that hits the
+ # default (non-specialized) path.
+ types = int, object, np.dtype([('', 'i2', 3)])
+ for t in types:
+ # ta works, even if the array may be odd if buffer interface is used
+ ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t)
+ tresult = list(ta.T.copy())
+ for index_array in index_arrays:
+ if index_array.size != 0:
+ tresult[0].shape = (2,) + index_array.shape
+ tresult[1].shape = (2,) + index_array.shape
+ for mode in modes:
+ for index in indices:
+ real_index = real_indices[mode][index]
+ if real_index is IndexError and index_array.size != 0:
+ index_array.put(0, index)
+ assert_raises(IndexError, ta.take, index_array,
+ mode=mode, axis=1)
+ elif index_array.size != 0:
+ index_array.put(0, index)
+ res = ta.take(index_array, mode=mode, axis=1)
+ assert_array_equal(res, tresult[real_index])
+ else:
+ res = ta.take(index_array, mode=mode, axis=1)
+ assert_(res.shape == (2,) + index_array.shape)
+
+ def test_refcounting(self):
+ objects = [object() for i in range(10)]
+ for mode in ('raise', 'clip', 'wrap'):
+ a = np.array(objects)
+ b = np.array([2, 2, 4, 5, 3, 5])
+ a.take(b, out=a[:6], mode=mode)
+ del a
+ if HAS_REFCOUNT:
+ assert_(all(sys.getrefcount(o) == 3 for o in objects))
+ # not contiguous, example:
+ a = np.array(objects * 2)[::2]
+ a.take(b, out=a[:6], mode=mode)
+ del a
+ if HAS_REFCOUNT:
+ assert_(all(sys.getrefcount(o) == 3 for o in objects))
+
+ def test_unicode_mode(self):
+ d = np.arange(10)
+ k = b'\xc3\xa4'.decode("UTF8")
+ assert_raises(ValueError, d.take, 5, mode=k)
+
+ def test_empty_partition(self):
+ # In reference to github issue #6530
+ a_original = np.array([0, 2, 4, 6, 8, 10])
+ a = a_original.copy()
+
+ # An empty partition should be a successful no-op
+ a.partition(np.array([], dtype=np.int16))
+
+ assert_array_equal(a, a_original)
+
+ def test_empty_argpartition(self):
+ # In reference to github issue #6530
+ a = np.array([0, 2, 4, 6, 8, 10])
+ a = a.argpartition(np.array([], dtype=np.int16))
+
+ b = np.array([0, 1, 2, 3, 4, 5])
+ assert_array_equal(a, b)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_limited_api.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_limited_api.py
new file mode 100644
index 00000000..725de19b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_limited_api.py
@@ -0,0 +1,44 @@
+import os
+import shutil
+import subprocess
+import sys
+import sysconfig
+import pytest
+
+from numpy.testing import IS_WASM
+
+
+@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess")
+@pytest.mark.xfail(
+ sysconfig.get_config_var("Py_DEBUG"),
+ reason=(
+ "Py_LIMITED_API is incompatible with Py_DEBUG, Py_TRACE_REFS, "
+ "and Py_REF_DEBUG"
+ ),
+)
+def test_limited_api(tmp_path):
+ """Test building a third-party C extension with the limited API."""
+ # Based in part on test_cython from random.tests.test_extending
+
+ here = os.path.dirname(__file__)
+ ext_dir = os.path.join(here, "examples", "limited_api")
+
+ cytest = str(tmp_path / "limited_api")
+
+ shutil.copytree(ext_dir, cytest)
+ # build the examples and "install" them into a temporary directory
+
+ install_log = str(tmp_path / "tmp_install_log.txt")
+ subprocess.check_output(
+ [
+ sys.executable,
+ "setup.py",
+ "build",
+ "install",
+ "--prefix", str(tmp_path / "installdir"),
+ "--single-version-externally-managed",
+ "--record",
+ install_log,
+ ],
+ cwd=cytest,
+ )
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_longdouble.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_longdouble.py
new file mode 100644
index 00000000..1a54e62d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_longdouble.py
@@ -0,0 +1,370 @@
+import warnings
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_warns, assert_array_equal,
+ temppath,
+ )
+from numpy.core.tests._locales import CommaDecimalPointLocale
+
+
+LD_INFO = np.finfo(np.longdouble)
+longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
+
+
+_o = 1 + LD_INFO.eps
+string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o)))
+del _o
+
+
+def test_scalar_extraction():
+ """Confirm that extracting a value doesn't convert to python float"""
+ o = 1 + LD_INFO.eps
+ a = np.array([o, o, o])
+ assert_equal(a[1], o)
+
+
+# Conversions string -> long double
+
+# 0.1 not exactly representable in base 2 floating point.
+repr_precision = len(repr(np.longdouble(0.1)))
+# +2 from macro block starting around line 842 in scalartypes.c.src.
+@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision,
+ reason="repr precision not enough to show eps")
+def test_repr_roundtrip():
+ # We will only see eps in repr if within printing precision.
+ o = 1 + LD_INFO.eps
+ assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o))
+
+
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
+def test_repr_roundtrip_bytes():
+ o = 1 + LD_INFO.eps
+ assert_equal(np.longdouble(repr(o).encode("ascii")), o)
+
+
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
+@pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes))
+def test_array_and_stringlike_roundtrip(strtype):
+ """
+ Test that string representations of long-double roundtrip both
+ for array casting and scalar coercion, see also gh-15608.
+ """
+ o = 1 + LD_INFO.eps
+
+ if strtype in (np.bytes_, bytes):
+ o_str = strtype(repr(o).encode("ascii"))
+ else:
+ o_str = strtype(repr(o))
+
+ # Test that `o` is correctly coerced from the string-like
+ assert o == np.longdouble(o_str)
+
+ # Test that arrays also roundtrip correctly:
+ o_strarr = np.asarray([o] * 3, dtype=strtype)
+ assert (o == o_strarr.astype(np.longdouble)).all()
+
+ # And array coercion and casting to string give the same as scalar repr:
+ assert (o_strarr == o_str).all()
+ assert (np.asarray([o] * 3).astype(strtype) == o_str).all()
+
+
+def test_bogus_string():
+ assert_raises(ValueError, np.longdouble, "spam")
+ assert_raises(ValueError, np.longdouble, "1.0 flub")
+
+
+@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
+def test_fromstring():
+ o = 1 + LD_INFO.eps
+ s = (" " + repr(o))*5
+ a = np.array([o]*5)
+ assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a,
+ err_msg="reading '%s'" % s)
+
+
+def test_fromstring_complex():
+ for ctype in ["complex", "cdouble", "cfloat"]:
+ # Check spacing between separator
+ assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype),
+ np.array([1., 2., 3., 4.]))
+ # Real component not specified
+ assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype),
+ np.array([1.j, -2.j, 3.j, 40.j]))
+ # Both components specified
+ assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype),
+ np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
+ # Spaces at wrong places
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+j", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","),
+ np.array([1j]))
+
+
+def test_fromstring_bogus():
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
+ np.array([1., 2., 3.]))
+
+
+def test_fromstring_empty():
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("xxxxx", sep="x"),
+ np.array([]))
+
+
+def test_fromstring_missing():
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
+ np.array([1]))
+
+
+class TestFileBased:
+
+ ldbl = 1 + LD_INFO.eps
+ tgt = np.array([ldbl]*5)
+ out = ''.join([repr(t) + '\n' for t in tgt])
+
+ def test_fromfile_bogus(self):
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1. 2. 3. flop 4.\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=float, sep=" ")
+ assert_equal(res, np.array([1., 2., 3.]))
+
+ def test_fromfile_complex(self):
+ for ctype in ["complex", "cdouble", "cfloat"]:
+ # Check spacing between separator and only real component specified
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1, 2 , 3 ,4\n")
+
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1., 2., 3., 4.]))
+
+ # Real component not specified
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1j, -2j, 3j, 4e1j\n")
+
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j]))
+
+ # Both components specified
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+1j,2-2j, -3+3j, -4e1+4j\n")
+
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+2 j,3\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+ 2j,3\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1 +2j,3\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+j\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1j+1\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.j]))
+
+
+
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+ def test_fromfile(self):
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write(self.out)
+ res = np.fromfile(path, dtype=np.longdouble, sep="\n")
+ assert_equal(res, self.tgt)
+
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+ def test_genfromtxt(self):
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write(self.out)
+ res = np.genfromtxt(path, dtype=np.longdouble)
+ assert_equal(res, self.tgt)
+
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+ def test_loadtxt(self):
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write(self.out)
+ res = np.loadtxt(path, dtype=np.longdouble)
+ assert_equal(res, self.tgt)
+
+ @pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+ def test_tofile_roundtrip(self):
+ with temppath() as path:
+ self.tgt.tofile(path, sep=" ")
+ res = np.fromfile(path, dtype=np.longdouble, sep=" ")
+ assert_equal(res, self.tgt)
+
+
+# Conversions long double -> string
+
+
+def test_repr_exact():
+ o = 1 + LD_INFO.eps
+ assert_(repr(o) != '1')
+
+
+@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+def test_format():
+ o = 1 + LD_INFO.eps
+ assert_("{0:.40g}".format(o) != '1')
+
+
+@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+def test_percent():
+ o = 1 + LD_INFO.eps
+ assert_("%.40g" % o != '1')
+
+
+@pytest.mark.skipif(longdouble_longer_than_double,
+ reason="array repr problem")
+@pytest.mark.skipif(string_to_longdouble_inaccurate,
+ reason="Need strtold_l")
+def test_array_repr():
+ o = 1 + LD_INFO.eps
+ a = np.array([o])
+ b = np.array([1], dtype=np.longdouble)
+ if not np.all(a != b):
+ raise ValueError("precision loss creating arrays")
+ assert_(repr(a) != repr(b))
+
+#
+# Locale tests: scalar types formatting should be independent of the locale
+#
+
+class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
+
+ def test_repr_roundtrip_foreign(self):
+ o = 1.5
+ assert_equal(o, np.longdouble(repr(o)))
+
+ def test_fromstring_foreign_repr(self):
+ f = 1.234
+ a = np.fromstring(repr(f), dtype=float, sep=" ")
+ assert_equal(a[0], f)
+
+ def test_fromstring_best_effort_float(self):
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
+ np.array([1.]))
+
+ def test_fromstring_best_effort(self):
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
+ np.array([1.]))
+
+ def test_fromstring_foreign(self):
+ s = "1.234"
+ a = np.fromstring(s, dtype=np.longdouble, sep=" ")
+ assert_equal(a[0], np.longdouble(s))
+
+ def test_fromstring_foreign_sep(self):
+ a = np.array([1, 2, 3, 4])
+ b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",")
+ assert_array_equal(a, b)
+
+ def test_fromstring_foreign_value(self):
+ with assert_warns(DeprecationWarning):
+ b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
+ assert_array_equal(b[0], 1)
+
+
+@pytest.mark.parametrize("int_val", [
+ # cases discussed in gh-10723
+ # and gh-9968
+ 2 ** 1024, 0])
+def test_longdouble_from_int(int_val):
+ # for issue gh-9968
+ str_val = str(int_val)
+ # we'll expect a RuntimeWarning on platforms
+ # with np.longdouble equivalent to np.double
+ # for large integer input
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ # can be inf==inf on some platforms
+ assert np.longdouble(int_val) == np.longdouble(str_val)
+ # we can't directly compare the int and
+ # max longdouble value on all platforms
+ if np.allclose(np.finfo(np.longdouble).max,
+ np.finfo(np.double).max) and w:
+ assert w[0].category is RuntimeWarning
+
+@pytest.mark.parametrize("bool_val", [
+ True, False])
+def test_longdouble_from_bool(bool_val):
+ assert np.longdouble(bool_val) == np.longdouble(int(bool_val))
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_machar.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_machar.py
new file mode 100644
index 00000000..3a66ec51
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_machar.py
@@ -0,0 +1,30 @@
+"""
+Test machar. Given recent changes to hardcode type data, we might want to get
+rid of both MachAr and this test at some point.
+
+"""
+from numpy.core._machar import MachAr
+import numpy.core.numerictypes as ntypes
+from numpy import errstate, array
+
+
+class TestMachAr:
+ def _run_machar_highprec(self):
+ # Instantiate MachAr instance with high enough precision to cause
+ # underflow
+ try:
+ hiprec = ntypes.float96
+ MachAr(lambda v: array(v, hiprec))
+ except AttributeError:
+ # Fixme, this needs to raise a 'skip' exception.
+ "Skipping test: no ntypes.float96 available on this platform."
+
+ def test_underlow(self):
+ # Regression test for #759:
+ # instantiating MachAr for dtype = np.float96 raises spurious warning.
+ with errstate(all='raise'):
+ try:
+ self._run_machar_highprec()
+ except FloatingPointError as e:
+ msg = "Caught %s exception, should not have been raised." % e
+ raise AssertionError(msg)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_mem_overlap.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_mem_overlap.py
new file mode 100644
index 00000000..d66decfd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_mem_overlap.py
@@ -0,0 +1,931 @@
+import itertools
+import pytest
+
+import numpy as np
+from numpy.core._multiarray_tests import solve_diophantine, internal_overlap
+from numpy.core import _umath_tests
+from numpy.lib.stride_tricks import as_strided
+from numpy.testing import (
+ assert_, assert_raises, assert_equal, assert_array_equal
+ )
+
+
+ndims = 2
+size = 10
+shape = tuple([size] * ndims)
+
+MAY_SHARE_BOUNDS = 0
+MAY_SHARE_EXACT = -1
+
+
+def _indices_for_nelems(nelems):
+ """Returns slices of length nelems, from start onwards, in direction sign."""
+
+ if nelems == 0:
+ return [size // 2] # int index
+
+ res = []
+ for step in (1, 2):
+ for sign in (-1, 1):
+ start = size // 2 - nelems * step * sign // 2
+ stop = start + nelems * step * sign
+ res.append(slice(start, stop, step * sign))
+
+ return res
+
+
+def _indices_for_axis():
+ """Returns (src, dst) pairs of indices."""
+
+ res = []
+ for nelems in (0, 2, 3):
+ ind = _indices_for_nelems(nelems)
+ res.extend(itertools.product(ind, ind)) # all assignments of size "nelems"
+
+ return res
+
+
+def _indices(ndims):
+ """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs."""
+
+ ind = _indices_for_axis()
+ return itertools.product(ind, repeat=ndims)
+
+
+def _check_assignment(srcidx, dstidx):
+ """Check assignment arr[dstidx] = arr[srcidx] works."""
+
+ arr = np.arange(np.product(shape)).reshape(shape)
+
+ cpy = arr.copy()
+
+ cpy[dstidx] = arr[srcidx]
+ arr[dstidx] = arr[srcidx]
+
+ assert_(np.all(arr == cpy),
+ 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx))
+
+
+def test_overlapping_assignments():
+ # Test automatically generated assignments which overlap in memory.
+
+ inds = _indices(ndims)
+
+ for ind in inds:
+ srcidx = tuple([a[0] for a in ind])
+ dstidx = tuple([a[1] for a in ind])
+
+ _check_assignment(srcidx, dstidx)
+
+
+@pytest.mark.slow
+def test_diophantine_fuzz():
+ # Fuzz test the diophantine solver
+ rng = np.random.RandomState(1234)
+
+ max_int = np.iinfo(np.intp).max
+
+ for ndim in range(10):
+ feasible_count = 0
+ infeasible_count = 0
+
+ min_count = 500//(ndim + 1)
+
+ while min(feasible_count, infeasible_count) < min_count:
+ # Ensure big and small integer problems
+ A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6
+ U_max = rng.randint(0, 11, dtype=np.intp)**6
+
+ A_max = min(max_int, A_max)
+ U_max = min(max_int-1, U_max)
+
+ A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp))
+ for j in range(ndim))
+ U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp))
+ for j in range(ndim))
+
+ b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U)))
+ b = int(rng.randint(-1, b_ub+2, dtype=np.intp))
+
+ if ndim == 0 and feasible_count < min_count:
+ b = 0
+
+ X = solve_diophantine(A, U, b)
+
+ if X is None:
+ # Check the simplified decision problem agrees
+ X_simplified = solve_diophantine(A, U, b, simplify=1)
+ assert_(X_simplified is None, (A, U, b, X_simplified))
+
+ # Check no solution exists (provided the problem is
+ # small enough so that brute force checking doesn't
+ # take too long)
+ ranges = tuple(range(0, a*ub+1, a) for a, ub in zip(A, U))
+
+ size = 1
+ for r in ranges:
+ size *= len(r)
+ if size < 100000:
+ assert_(not any(sum(w) == b for w in itertools.product(*ranges)))
+ infeasible_count += 1
+ else:
+ # Check the simplified decision problem agrees
+ X_simplified = solve_diophantine(A, U, b, simplify=1)
+ assert_(X_simplified is not None, (A, U, b, X_simplified))
+
+ # Check validity
+ assert_(sum(a*x for a, x in zip(A, X)) == b)
+ assert_(all(0 <= x <= ub for x, ub in zip(X, U)))
+ feasible_count += 1
+
+
+def test_diophantine_overflow():
+ # Smoke test integer overflow detection
+ max_intp = np.iinfo(np.intp).max
+ max_int64 = np.iinfo(np.int64).max
+
+ if max_int64 <= max_intp:
+ # Check that the algorithm works internally in 128-bit;
+ # solving this problem requires large intermediate numbers
+ A = (max_int64//2, max_int64//2 - 10)
+ U = (max_int64//2, max_int64//2 - 10)
+ b = 2*(max_int64//2) - 10
+
+ assert_equal(solve_diophantine(A, U, b), (1, 1))
+
+
+def check_may_share_memory_exact(a, b):
+ got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
+
+ assert_equal(np.may_share_memory(a, b),
+ np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS))
+
+ a.fill(0)
+ b.fill(0)
+ a.fill(1)
+ exact = b.any()
+
+ err_msg = ""
+ if got != exact:
+ err_msg = " " + "\n ".join([
+ "base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],),
+ "shape_a = %r" % (a.shape,),
+ "shape_b = %r" % (b.shape,),
+ "strides_a = %r" % (a.strides,),
+ "strides_b = %r" % (b.strides,),
+ "size_a = %r" % (a.size,),
+ "size_b = %r" % (b.size,)
+ ])
+
+ assert_equal(got, exact, err_msg=err_msg)
+
+
+def test_may_share_memory_manual():
+ # Manual test cases for may_share_memory
+
+ # Base arrays
+ xs0 = [
+ np.zeros([13, 21, 23, 22], dtype=np.int8),
+ np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:]
+ ]
+
+ # Generate all negative stride combinations
+ xs = []
+ for x in xs0:
+ for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)):
+ xp = x[ss]
+ xs.append(xp)
+
+ for x in xs:
+ # The default is a simple extent check
+ assert_(np.may_share_memory(x[:,0,:], x[:,1,:]))
+ assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None))
+
+ # Exact checks
+ check_may_share_memory_exact(x[:,0,:], x[:,1,:])
+ check_may_share_memory_exact(x[:,::7], x[:,3::3])
+
+ try:
+ xp = x.ravel()
+ if xp.flags.owndata:
+ continue
+ xp = xp.view(np.int16)
+ except ValueError:
+ continue
+
+ # 0-size arrays cannot overlap
+ check_may_share_memory_exact(x.ravel()[6:6],
+ xp.reshape(13, 21, 23, 11)[:,::7])
+
+ # Test itemsize is dealt with
+ check_may_share_memory_exact(x[:,::7],
+ xp.reshape(13, 21, 23, 11))
+ check_may_share_memory_exact(x[:,::7],
+ xp.reshape(13, 21, 23, 11)[:,3::3])
+ check_may_share_memory_exact(x.ravel()[6:7],
+ xp.reshape(13, 21, 23, 11)[:,::7])
+
+ # Check unit size
+ x = np.zeros([1], dtype=np.int8)
+ check_may_share_memory_exact(x, x)
+ check_may_share_memory_exact(x, x.copy())
+
+
+def iter_random_view_pairs(x, same_steps=True, equal_size=False):
+ rng = np.random.RandomState(1234)
+
+ if equal_size and same_steps:
+ raise ValueError()
+
+ def random_slice(n, step):
+ start = rng.randint(0, n+1, dtype=np.intp)
+ stop = rng.randint(start, n+1, dtype=np.intp)
+ if rng.randint(0, 2, dtype=np.intp) == 0:
+ stop, start = start, stop
+ step *= -1
+ return slice(start, stop, step)
+
+ def random_slice_fixed_size(n, step, size):
+ start = rng.randint(0, n+1 - size*step)
+ stop = start + (size-1)*step + 1
+ if rng.randint(0, 2) == 0:
+ stop, start = start-1, stop-1
+ if stop < 0:
+ stop = None
+ step *= -1
+ return slice(start, stop, step)
+
+ # First a few regular views
+ yield x, x
+ for j in range(1, 7, 3):
+ yield x[j:], x[:-j]
+ yield x[...,j:], x[...,:-j]
+
+ # An array with zero stride internal overlap
+ strides = list(x.strides)
+ strides[0] = 0
+ xp = as_strided(x, shape=x.shape, strides=strides)
+ yield x, xp
+ yield xp, xp
+
+ # An array with non-zero stride internal overlap
+ strides = list(x.strides)
+ if strides[0] > 1:
+ strides[0] = 1
+ xp = as_strided(x, shape=x.shape, strides=strides)
+ yield x, xp
+ yield xp, xp
+
+ # Then discontiguous views
+ while True:
+ steps = tuple(rng.randint(1, 11, dtype=np.intp)
+ if rng.randint(0, 5, dtype=np.intp) == 0 else 1
+ for j in range(x.ndim))
+ s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
+
+ t1 = np.arange(x.ndim)
+ rng.shuffle(t1)
+
+ if equal_size:
+ t2 = t1
+ else:
+ t2 = np.arange(x.ndim)
+ rng.shuffle(t2)
+
+ a = x[s1]
+
+ if equal_size:
+ if a.size == 0:
+ continue
+
+ steps2 = tuple(rng.randint(1, max(2, p//(1+pa)))
+ if rng.randint(0, 5) == 0 else 1
+ for p, s, pa in zip(x.shape, s1, a.shape))
+ s2 = tuple(random_slice_fixed_size(p, s, pa)
+ for p, s, pa in zip(x.shape, steps2, a.shape))
+ elif same_steps:
+ steps2 = steps
+ else:
+ steps2 = tuple(rng.randint(1, 11, dtype=np.intp)
+ if rng.randint(0, 5, dtype=np.intp) == 0 else 1
+ for j in range(x.ndim))
+
+ if not equal_size:
+ s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2))
+
+ a = a.transpose(t1)
+ b = x[s2].transpose(t2)
+
+ yield a, b
+
+
+def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count):
+ # Check that overlap problems with common strides are solved with
+ # little work.
+ x = np.zeros([17,34,71,97], dtype=np.int16)
+
+ feasible = 0
+ infeasible = 0
+
+ pair_iter = iter_random_view_pairs(x, same_steps)
+
+ while min(feasible, infeasible) < min_count:
+ a, b = next(pair_iter)
+
+ bounds_overlap = np.may_share_memory(a, b)
+ may_share_answer = np.may_share_memory(a, b)
+ easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b))
+ exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
+
+ if easy_answer != exact_answer:
+ # assert_equal is slow...
+ assert_equal(easy_answer, exact_answer)
+
+ if may_share_answer != bounds_overlap:
+ assert_equal(may_share_answer, bounds_overlap)
+
+ if bounds_overlap:
+ if exact_answer:
+ feasible += 1
+ else:
+ infeasible += 1
+
+
+@pytest.mark.slow
+def test_may_share_memory_easy_fuzz():
+ # Check that overlap problems with common strides are always
+ # solved with little work.
+
+ check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1,
+ same_steps=True,
+ min_count=2000)
+
+
+@pytest.mark.slow
+def test_may_share_memory_harder_fuzz():
+ # Overlap problems with not necessarily common strides take more
+ # work.
+ #
+ # The work bound below can't be reduced much. Harder problems can
+ # also exist but not be detected here, as the set of problems
+ # comes from RNG.
+
+ check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2,
+ same_steps=False,
+ min_count=2000)
+
+
+def test_shares_memory_api():
+ x = np.zeros([4, 5, 6], dtype=np.int8)
+
+ assert_equal(np.shares_memory(x, x), True)
+ assert_equal(np.shares_memory(x, x.copy()), False)
+
+ a = x[:,::2,::3]
+ b = x[:,::3,::2]
+ assert_equal(np.shares_memory(a, b), True)
+ assert_equal(np.shares_memory(a, b, max_work=None), True)
+ assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1)
+
+
+def test_may_share_memory_bad_max_work():
+ x = np.zeros([1])
+ assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100)
+ assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100)
+
+
+def test_internal_overlap_diophantine():
+ def check(A, U, exists=None):
+ X = solve_diophantine(A, U, 0, require_ub_nontrivial=1)
+
+ if exists is None:
+ exists = (X is not None)
+
+ if X is not None:
+ assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U)))
+ assert_(all(0 <= x <= u for x, u in zip(X, U)))
+ assert_(any(x != u//2 for x, u in zip(X, U)))
+
+ if exists:
+ assert_(X is not None, repr(X))
+ else:
+ assert_(X is None, repr(X))
+
+ # Smoke tests
+ check((3, 2), (2*2, 3*2), exists=True)
+ check((3*2, 2), (15*2, (3-1)*2), exists=False)
+
+
+def test_internal_overlap_slices():
+ # Slicing an array never generates internal overlap
+
+ x = np.zeros([17,34,71,97], dtype=np.int16)
+
+ rng = np.random.RandomState(1234)
+
+ def random_slice(n, step):
+ start = rng.randint(0, n+1, dtype=np.intp)
+ stop = rng.randint(start, n+1, dtype=np.intp)
+ if rng.randint(0, 2, dtype=np.intp) == 0:
+ stop, start = start, stop
+ step *= -1
+ return slice(start, stop, step)
+
+ cases = 0
+ min_count = 5000
+
+ while cases < min_count:
+ steps = tuple(rng.randint(1, 11, dtype=np.intp)
+ if rng.randint(0, 5, dtype=np.intp) == 0 else 1
+ for j in range(x.ndim))
+ t1 = np.arange(x.ndim)
+ rng.shuffle(t1)
+ s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
+ a = x[s1].transpose(t1)
+
+ assert_(not internal_overlap(a))
+ cases += 1
+
+
+def check_internal_overlap(a, manual_expected=None):
+ got = internal_overlap(a)
+
+ # Brute-force check
+ m = set()
+ ranges = tuple(range(n) for n in a.shape)
+ for v in itertools.product(*ranges):
+ offset = sum(s*w for s, w in zip(a.strides, v))
+ if offset in m:
+ expected = True
+ break
+ else:
+ m.add(offset)
+ else:
+ expected = False
+
+ # Compare
+ if got != expected:
+ assert_equal(got, expected, err_msg=repr((a.strides, a.shape)))
+ if manual_expected is not None and expected != manual_expected:
+ assert_equal(expected, manual_expected)
+ return got
+
+
+def test_internal_overlap_manual():
+ # Stride tricks can construct arrays with internal overlap
+
+ # We don't care about memory bounds, the array is not
+ # read/write accessed
+ x = np.arange(1).astype(np.int8)
+
+ # Check low-dimensional special cases
+
+ check_internal_overlap(x, False) # 1-dim
+ check_internal_overlap(x.reshape([]), False) # 0-dim
+
+ a = as_strided(x, strides=(3, 4), shape=(4, 4))
+ check_internal_overlap(a, False)
+
+ a = as_strided(x, strides=(3, 4), shape=(5, 4))
+ check_internal_overlap(a, True)
+
+ a = as_strided(x, strides=(0,), shape=(0,))
+ check_internal_overlap(a, False)
+
+ a = as_strided(x, strides=(0,), shape=(1,))
+ check_internal_overlap(a, False)
+
+ a = as_strided(x, strides=(0,), shape=(2,))
+ check_internal_overlap(a, True)
+
+ a = as_strided(x, strides=(0, -9993), shape=(87, 22))
+ check_internal_overlap(a, True)
+
+ a = as_strided(x, strides=(0, -9993), shape=(1, 22))
+ check_internal_overlap(a, False)
+
+ a = as_strided(x, strides=(0, -9993), shape=(0, 22))
+ check_internal_overlap(a, False)
+
+
+def test_internal_overlap_fuzz():
+ # Fuzz check; the brute-force check is fairly slow
+
+ x = np.arange(1).astype(np.int8)
+
+ overlap = 0
+ no_overlap = 0
+ min_count = 100
+
+ rng = np.random.RandomState(1234)
+
+ while min(overlap, no_overlap) < min_count:
+ ndim = rng.randint(1, 4, dtype=np.intp)
+
+ strides = tuple(rng.randint(-1000, 1000, dtype=np.intp)
+ for j in range(ndim))
+ shape = tuple(rng.randint(1, 30, dtype=np.intp)
+ for j in range(ndim))
+
+ a = as_strided(x, strides=strides, shape=shape)
+ result = check_internal_overlap(a)
+
+ if result:
+ overlap += 1
+ else:
+ no_overlap += 1
+
+
+def test_non_ndarray_inputs():
+ # Regression check for gh-5604
+
+ class MyArray:
+ def __init__(self, data):
+ self.data = data
+
+ @property
+ def __array_interface__(self):
+ return self.data.__array_interface__
+
+ class MyArray2:
+ def __init__(self, data):
+ self.data = data
+
+ def __array__(self):
+ return self.data
+
+ for cls in [MyArray, MyArray2]:
+ x = np.arange(5)
+
+ assert_(np.may_share_memory(cls(x[::2]), x[1::2]))
+ assert_(not np.shares_memory(cls(x[::2]), x[1::2]))
+
+ assert_(np.shares_memory(cls(x[1::3]), x[::2]))
+ assert_(np.may_share_memory(cls(x[1::3]), x[::2]))
+
+
+def view_element_first_byte(x):
+ """Construct an array viewing the first byte of each element of `x`"""
+ from numpy.lib.stride_tricks import DummyArray
+ interface = dict(x.__array_interface__)
+ interface['typestr'] = '|b1'
+ interface['descr'] = [('', '|b1')]
+ return np.asarray(DummyArray(interface, x))
+
+
+def assert_copy_equivalent(operation, args, out, **kwargs):
+ """
+ Check that operation(*args, out=out) produces results
+ equivalent to out[...] = operation(*args, out=out.copy())
+ """
+
+ kwargs['out'] = out
+ kwargs2 = dict(kwargs)
+ kwargs2['out'] = out.copy()
+
+ out_orig = out.copy()
+ out[...] = operation(*args, **kwargs2)
+ expected = out.copy()
+ out[...] = out_orig
+
+ got = operation(*args, **kwargs).copy()
+
+ if (got != expected).any():
+ assert_equal(got, expected)
+
+
+class TestUFunc:
+ """
+ Test ufunc call memory overlap handling
+ """
+
+ def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16,
+ count=5000):
+ shapes = [7, 13, 8, 21, 29, 32]
+
+ rng = np.random.RandomState(1234)
+
+ for ndim in range(1, 6):
+ x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype)
+
+ it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
+
+ min_count = count // (ndim + 1)**2
+
+ overlapping = 0
+ while overlapping < min_count:
+ a, b = next(it)
+
+ a_orig = a.copy()
+ b_orig = b.copy()
+
+ if get_out_axis_size is None:
+ assert_copy_equivalent(operation, [a], out=b)
+
+ if np.shares_memory(a, b):
+ overlapping += 1
+ else:
+ for axis in itertools.chain(range(ndim), [None]):
+ a[...] = a_orig
+ b[...] = b_orig
+
+ # Determine size for reduction axis (None if scalar)
+ outsize, scalarize = get_out_axis_size(a, b, axis)
+ if outsize == 'skip':
+ continue
+
+ # Slice b to get an output array of the correct size
+ sl = [slice(None)] * ndim
+ if axis is None:
+ if outsize is None:
+ sl = [slice(0, 1)] + [0]*(ndim - 1)
+ else:
+ sl = [slice(0, outsize)] + [0]*(ndim - 1)
+ else:
+ if outsize is None:
+ k = b.shape[axis]//2
+ if ndim == 1:
+ sl[axis] = slice(k, k + 1)
+ else:
+ sl[axis] = k
+ else:
+ assert b.shape[axis] >= outsize
+ sl[axis] = slice(0, outsize)
+ b_out = b[tuple(sl)]
+
+ if scalarize:
+ b_out = b_out.reshape([])
+
+ if np.shares_memory(a, b_out):
+ overlapping += 1
+
+ # Check result
+ assert_copy_equivalent(operation, [a], out=b_out, axis=axis)
+
+ @pytest.mark.slow
+ def test_unary_ufunc_call_fuzz(self):
+ self.check_unary_fuzz(np.invert, None, np.int16)
+
+ @pytest.mark.slow
+ def test_unary_ufunc_call_complex_fuzz(self):
+ # Complex typically has a smaller alignment than itemsize
+ self.check_unary_fuzz(np.negative, None, np.complex128, count=500)
+
+ def test_binary_ufunc_accumulate_fuzz(self):
+ def get_out_axis_size(a, b, axis):
+ if axis is None:
+ if a.ndim == 1:
+ return a.size, False
+ else:
+ return 'skip', False # accumulate doesn't support this
+ else:
+ return a.shape[axis], False
+
+ self.check_unary_fuzz(np.add.accumulate, get_out_axis_size,
+ dtype=np.int16, count=500)
+
+ def test_binary_ufunc_reduce_fuzz(self):
+ def get_out_axis_size(a, b, axis):
+ return None, (axis is None or a.ndim == 1)
+
+ self.check_unary_fuzz(np.add.reduce, get_out_axis_size,
+ dtype=np.int16, count=500)
+
+ def test_binary_ufunc_reduceat_fuzz(self):
+ def get_out_axis_size(a, b, axis):
+ if axis is None:
+ if a.ndim == 1:
+ return a.size, False
+ else:
+ return 'skip', False # reduceat doesn't support this
+ else:
+ return a.shape[axis], False
+
+ def do_reduceat(a, out, axis):
+ if axis is None:
+ size = len(a)
+ step = size//len(out)
+ else:
+ size = a.shape[axis]
+ step = a.shape[axis] // out.shape[axis]
+ idx = np.arange(0, size, step)
+ return np.add.reduceat(a, idx, out=out, axis=axis)
+
+ self.check_unary_fuzz(do_reduceat, get_out_axis_size,
+ dtype=np.int16, count=500)
+
+ def test_binary_ufunc_reduceat_manual(self):
+ def check(ufunc, a, ind, out):
+ c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy())
+ c2 = ufunc.reduceat(a, ind, out=out)
+ assert_array_equal(c1, c2)
+
+ # Exactly same input/output arrays
+ a = np.arange(10000, dtype=np.int16)
+ check(np.add, a, a[::-1].copy(), a)
+
+ # Overlap with index
+ a = np.arange(10000, dtype=np.int16)
+ check(np.add, a, a[::-1], a)
+
+ @pytest.mark.slow
+ def test_unary_gufunc_fuzz(self):
+ shapes = [7, 13, 8, 21, 29, 32]
+ gufunc = _umath_tests.euclidean_pdist
+
+ rng = np.random.RandomState(1234)
+
+ for ndim in range(2, 6):
+ x = rng.rand(*shapes[:ndim])
+
+ it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
+
+ min_count = 500 // (ndim + 1)**2
+
+ overlapping = 0
+ while overlapping < min_count:
+ a, b = next(it)
+
+ if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2:
+ continue
+
+ # Ensure the shapes are so that euclidean_pdist is happy
+ if b.shape[-1] > b.shape[-2]:
+ b = b[...,0,:]
+ else:
+ b = b[...,:,0]
+
+ n = a.shape[-2]
+ p = n * (n - 1) // 2
+ if p <= b.shape[-1] and p > 0:
+ b = b[...,:p]
+ else:
+ n = max(2, int(np.sqrt(b.shape[-1]))//2)
+ p = n * (n - 1) // 2
+ a = a[...,:n,:]
+ b = b[...,:p]
+
+ # Call
+ if np.shares_memory(a, b):
+ overlapping += 1
+
+ with np.errstate(over='ignore', invalid='ignore'):
+ assert_copy_equivalent(gufunc, [a], out=b)
+
+ def test_ufunc_at_manual(self):
+ def check(ufunc, a, ind, b=None):
+ a0 = a.copy()
+ if b is None:
+ ufunc.at(a0, ind.copy())
+ c1 = a0.copy()
+ ufunc.at(a, ind)
+ c2 = a.copy()
+ else:
+ ufunc.at(a0, ind.copy(), b.copy())
+ c1 = a0.copy()
+ ufunc.at(a, ind, b)
+ c2 = a.copy()
+ assert_array_equal(c1, c2)
+
+ # Overlap with index
+ a = np.arange(10000, dtype=np.int16)
+ check(np.invert, a[::-1], a)
+
+ # Overlap with second data array
+ a = np.arange(100, dtype=np.int16)
+ ind = np.arange(0, 100, 2, dtype=np.int16)
+ check(np.add, a, ind, a[25:75])
+
+ def test_unary_ufunc_1d_manual(self):
+ # Exercise ufunc fast-paths (that avoid creation of an `np.nditer`)
+
+ def check(a, b):
+ a_orig = a.copy()
+ b_orig = b.copy()
+
+ b0 = b.copy()
+ c1 = ufunc(a, out=b0)
+ c2 = ufunc(a, out=b)
+ assert_array_equal(c1, c2)
+
+ # Trigger "fancy ufunc loop" code path
+ mask = view_element_first_byte(b).view(np.bool_)
+
+ a[...] = a_orig
+ b[...] = b_orig
+ c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy()
+
+ a[...] = a_orig
+ b[...] = b_orig
+ c2 = ufunc(a, out=b, where=mask.copy()).copy()
+
+ # Also, mask overlapping with output
+ a[...] = a_orig
+ b[...] = b_orig
+ c3 = ufunc(a, out=b, where=mask).copy()
+
+ assert_array_equal(c1, c2)
+ assert_array_equal(c1, c3)
+
+ dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32,
+ np.float64, np.complex64, np.complex128]
+ dtypes = [np.dtype(x) for x in dtypes]
+
+ for dtype in dtypes:
+ if np.issubdtype(dtype, np.integer):
+ ufunc = np.invert
+ else:
+ ufunc = np.reciprocal
+
+ n = 1000
+ k = 10
+ indices = [
+ np.index_exp[:n],
+ np.index_exp[k:k+n],
+ np.index_exp[n-1::-1],
+ np.index_exp[k+n-1:k-1:-1],
+ np.index_exp[:2*n:2],
+ np.index_exp[k:k+2*n:2],
+ np.index_exp[2*n-1::-2],
+ np.index_exp[k+2*n-1:k-1:-2],
+ ]
+
+ for xi, yi in itertools.product(indices, indices):
+ v = np.arange(1, 1 + n*2 + k, dtype=dtype)
+ x = v[xi]
+ y = v[yi]
+
+ with np.errstate(all='ignore'):
+ check(x, y)
+
+ # Scalar cases
+ check(x[:1], y)
+ check(x[-1:], y)
+ check(x[:1].reshape([]), y)
+ check(x[-1:].reshape([]), y)
+
+ def test_unary_ufunc_where_same(self):
+ # Check behavior at wheremask overlap
+ ufunc = np.invert
+
+ def check(a, out, mask):
+ c1 = ufunc(a, out=out.copy(), where=mask.copy())
+ c2 = ufunc(a, out=out, where=mask)
+ assert_array_equal(c1, c2)
+
+ # Check behavior with same input and output arrays
+ x = np.arange(100).astype(np.bool_)
+ check(x, x, x)
+ check(x, x.copy(), x)
+ check(x, x, x.copy())
+
+ @pytest.mark.slow
+ def test_binary_ufunc_1d_manual(self):
+ ufunc = np.add
+
+ def check(a, b, c):
+ c0 = c.copy()
+ c1 = ufunc(a, b, out=c0)
+ c2 = ufunc(a, b, out=c)
+ assert_array_equal(c1, c2)
+
+ for dtype in [np.int8, np.int16, np.int32, np.int64,
+ np.float32, np.float64, np.complex64, np.complex128]:
+ # Check different data dependency orders
+
+ n = 1000
+ k = 10
+
+ indices = []
+ for p in [1, 2]:
+ indices.extend([
+ np.index_exp[:p*n:p],
+ np.index_exp[k:k+p*n:p],
+ np.index_exp[p*n-1::-p],
+ np.index_exp[k+p*n-1:k-1:-p],
+ ])
+
+ for x, y, z in itertools.product(indices, indices, indices):
+ v = np.arange(6*n).astype(dtype)
+ x = v[x]
+ y = v[y]
+ z = v[z]
+
+ check(x, y, z)
+
+ # Scalar cases
+ check(x[:1], y, z)
+ check(x[-1:], y, z)
+ check(x[:1].reshape([]), y, z)
+ check(x[-1:].reshape([]), y, z)
+ check(x, y[:1], z)
+ check(x, y[-1:], z)
+ check(x, y[:1].reshape([]), z)
+ check(x, y[-1:].reshape([]), z)
+
+ def test_inplace_op_simple_manual(self):
+ rng = np.random.RandomState(1234)
+ x = rng.rand(200, 200) # bigger than bufsize
+
+ x += x.T
+ assert_array_equal(x - x.T, 0)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_mem_policy.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_mem_policy.py
new file mode 100644
index 00000000..d5dfbc38
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_mem_policy.py
@@ -0,0 +1,425 @@
+import asyncio
+import gc
+import os
+import pytest
+import numpy as np
+import threading
+import warnings
+from numpy.testing import extbuild, assert_warns, IS_WASM
+import sys
+
+
+@pytest.fixture
+def get_module(tmp_path):
+ """ Add a memory policy that returns a false pointer 64 bytes into the
+ actual allocation, and fill the prefix with some text. Then check at each
+ memory manipulation that the prefix exists, to make sure all alloc/realloc/
+ free/calloc go via the functions here.
+ """
+ if sys.platform.startswith('cygwin'):
+ pytest.skip('link fails on cygwin')
+ if IS_WASM:
+ pytest.skip("Can't build module inside Wasm")
+ functions = [
+ ("get_default_policy", "METH_NOARGS", """
+ Py_INCREF(PyDataMem_DefaultHandler);
+ return PyDataMem_DefaultHandler;
+ """),
+ ("set_secret_data_policy", "METH_NOARGS", """
+ PyObject *secret_data =
+ PyCapsule_New(&secret_data_handler, "mem_handler", NULL);
+ if (secret_data == NULL) {
+ return NULL;
+ }
+ PyObject *old = PyDataMem_SetHandler(secret_data);
+ Py_DECREF(secret_data);
+ return old;
+ """),
+ ("set_old_policy", "METH_O", """
+ PyObject *old;
+ if (args != NULL && PyCapsule_CheckExact(args)) {
+ old = PyDataMem_SetHandler(args);
+ }
+ else {
+ old = PyDataMem_SetHandler(NULL);
+ }
+ return old;
+ """),
+ ("get_array", "METH_NOARGS", """
+ char *buf = (char *)malloc(20);
+ npy_intp dims[1];
+ dims[0] = 20;
+ PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8);
+ return PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, NULL,
+ buf, NPY_ARRAY_WRITEABLE, NULL);
+ """),
+ ("set_own", "METH_O", """
+ if (!PyArray_Check(args)) {
+ PyErr_SetString(PyExc_ValueError,
+ "need an ndarray");
+ return NULL;
+ }
+ PyArray_ENABLEFLAGS((PyArrayObject*)args, NPY_ARRAY_OWNDATA);
+ // Maybe try this too?
+ // PyArray_BASE(PyArrayObject *)args) = NULL;
+ Py_RETURN_NONE;
+ """),
+ ("get_array_with_base", "METH_NOARGS", """
+ char *buf = (char *)malloc(20);
+ npy_intp dims[1];
+ dims[0] = 20;
+ PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8);
+ PyObject *arr = PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims,
+ NULL, buf,
+ NPY_ARRAY_WRITEABLE, NULL);
+ if (arr == NULL) return NULL;
+ PyObject *obj = PyCapsule_New(buf, "buf capsule",
+ (PyCapsule_Destructor)&warn_on_free);
+ if (obj == NULL) {
+ Py_DECREF(arr);
+ return NULL;
+ }
+ if (PyArray_SetBaseObject((PyArrayObject *)arr, obj) < 0) {
+ Py_DECREF(arr);
+ Py_DECREF(obj);
+ return NULL;
+ }
+ return arr;
+
+ """),
+ ]
+ prologue = '''
+ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+ #include <numpy/arrayobject.h>
+ /*
+ * This struct allows the dynamic configuration of the allocator funcs
+ * of the `secret_data_allocator`. It is provided here for
+ * demonstration purposes, as a valid `ctx` use-case scenario.
+ */
+ typedef struct {
+ void *(*malloc)(size_t);
+ void *(*calloc)(size_t, size_t);
+ void *(*realloc)(void *, size_t);
+ void (*free)(void *);
+ } SecretDataAllocatorFuncs;
+
+ NPY_NO_EXPORT void *
+ shift_alloc(void *ctx, size_t sz) {
+ SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
+ char *real = (char *)funcs->malloc(sz + 64);
+ if (real == NULL) {
+ return NULL;
+ }
+ snprintf(real, 64, "originally allocated %ld", (unsigned long)sz);
+ return (void *)(real + 64);
+ }
+ NPY_NO_EXPORT void *
+ shift_zero(void *ctx, size_t sz, size_t cnt) {
+ SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
+ char *real = (char *)funcs->calloc(sz + 64, cnt);
+ if (real == NULL) {
+ return NULL;
+ }
+ snprintf(real, 64, "originally allocated %ld via zero",
+ (unsigned long)sz);
+ return (void *)(real + 64);
+ }
+ NPY_NO_EXPORT void
+ shift_free(void *ctx, void * p, npy_uintp sz) {
+ SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
+ if (p == NULL) {
+ return ;
+ }
+ char *real = (char *)p - 64;
+ if (strncmp(real, "originally allocated", 20) != 0) {
+ fprintf(stdout, "uh-oh, unmatched shift_free, "
+ "no appropriate prefix\\n");
+ /* Make C runtime crash by calling free on the wrong address */
+ funcs->free((char *)p + 10);
+ /* funcs->free(real); */
+ }
+ else {
+ npy_uintp i = (npy_uintp)atoi(real +20);
+ if (i != sz) {
+ fprintf(stderr, "uh-oh, unmatched shift_free"
+ "(ptr, %ld) but allocated %ld\\n", sz, i);
+ /* This happens in some places, only print */
+ funcs->free(real);
+ }
+ else {
+ funcs->free(real);
+ }
+ }
+ }
+ NPY_NO_EXPORT void *
+ shift_realloc(void *ctx, void * p, npy_uintp sz) {
+ SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
+ if (p != NULL) {
+ char *real = (char *)p - 64;
+ if (strncmp(real, "originally allocated", 20) != 0) {
+ fprintf(stdout, "uh-oh, unmatched shift_realloc\\n");
+ return realloc(p, sz);
+ }
+ return (void *)((char *)funcs->realloc(real, sz + 64) + 64);
+ }
+ else {
+ char *real = (char *)funcs->realloc(p, sz + 64);
+ if (real == NULL) {
+ return NULL;
+ }
+ snprintf(real, 64, "originally allocated "
+ "%ld via realloc", (unsigned long)sz);
+ return (void *)(real + 64);
+ }
+ }
+ /* As an example, we use the standard {m|c|re}alloc/free funcs. */
+ static SecretDataAllocatorFuncs secret_data_handler_ctx = {
+ malloc,
+ calloc,
+ realloc,
+ free
+ };
+ static PyDataMem_Handler secret_data_handler = {
+ "secret_data_allocator",
+ 1,
+ {
+ &secret_data_handler_ctx, /* ctx */
+ shift_alloc, /* malloc */
+ shift_zero, /* calloc */
+ shift_realloc, /* realloc */
+ shift_free /* free */
+ }
+ };
+ void warn_on_free(void *capsule) {
+ PyErr_WarnEx(PyExc_UserWarning, "in warn_on_free", 1);
+ void * obj = PyCapsule_GetPointer(capsule,
+ PyCapsule_GetName(capsule));
+ free(obj);
+ };
+ '''
+ more_init = "import_array();"
+ try:
+ import mem_policy
+ return mem_policy
+ except ImportError:
+ pass
+ # if it does not exist, build and load it
+ return extbuild.build_and_import_extension('mem_policy',
+ functions,
+ prologue=prologue,
+ include_dirs=[np.get_include()],
+ build_dir=tmp_path,
+ more_init=more_init)
+
+
+def test_set_policy(get_module):
+
+ get_handler_name = np.core.multiarray.get_handler_name
+ get_handler_version = np.core.multiarray.get_handler_version
+ orig_policy_name = get_handler_name()
+
+ a = np.arange(10).reshape((2, 5)) # a doesn't own its own data
+ assert get_handler_name(a) is None
+ assert get_handler_version(a) is None
+ assert get_handler_name(a.base) == orig_policy_name
+ assert get_handler_version(a.base) == 1
+
+ orig_policy = get_module.set_secret_data_policy()
+
+ b = np.arange(10).reshape((2, 5)) # b doesn't own its own data
+ assert get_handler_name(b) is None
+ assert get_handler_version(b) is None
+ assert get_handler_name(b.base) == 'secret_data_allocator'
+ assert get_handler_version(b.base) == 1
+
+ if orig_policy_name == 'default_allocator':
+ get_module.set_old_policy(None) # tests PyDataMem_SetHandler(NULL)
+ assert get_handler_name() == 'default_allocator'
+ else:
+ get_module.set_old_policy(orig_policy)
+ assert get_handler_name() == orig_policy_name
+
+
+def test_default_policy_singleton(get_module):
+ get_handler_name = np.core.multiarray.get_handler_name
+
+ # set the policy to default
+ orig_policy = get_module.set_old_policy(None)
+
+ assert get_handler_name() == 'default_allocator'
+
+ # re-set the policy to default
+ def_policy_1 = get_module.set_old_policy(None)
+
+ assert get_handler_name() == 'default_allocator'
+
+ # set the policy to original
+ def_policy_2 = get_module.set_old_policy(orig_policy)
+
+ # since default policy is a singleton,
+ # these should be the same object
+ assert def_policy_1 is def_policy_2 is get_module.get_default_policy()
+
+
+def test_policy_propagation(get_module):
+ # The memory policy goes hand-in-hand with flags.owndata
+
+ class MyArr(np.ndarray):
+ pass
+
+ get_handler_name = np.core.multiarray.get_handler_name
+ orig_policy_name = get_handler_name()
+ a = np.arange(10).view(MyArr).reshape((2, 5))
+ assert get_handler_name(a) is None
+ assert a.flags.owndata is False
+
+ assert get_handler_name(a.base) is None
+ assert a.base.flags.owndata is False
+
+ assert get_handler_name(a.base.base) == orig_policy_name
+ assert a.base.base.flags.owndata is True
+
+
+async def concurrent_context1(get_module, orig_policy_name, event):
+ if orig_policy_name == 'default_allocator':
+ get_module.set_secret_data_policy()
+ assert np.core.multiarray.get_handler_name() == 'secret_data_allocator'
+ else:
+ get_module.set_old_policy(None)
+ assert np.core.multiarray.get_handler_name() == 'default_allocator'
+ event.set()
+
+
+async def concurrent_context2(get_module, orig_policy_name, event):
+ await event.wait()
+ # the policy is not affected by changes in parallel contexts
+ assert np.core.multiarray.get_handler_name() == orig_policy_name
+ # change policy in the child context
+ if orig_policy_name == 'default_allocator':
+ get_module.set_secret_data_policy()
+ assert np.core.multiarray.get_handler_name() == 'secret_data_allocator'
+ else:
+ get_module.set_old_policy(None)
+ assert np.core.multiarray.get_handler_name() == 'default_allocator'
+
+
+async def async_test_context_locality(get_module):
+ orig_policy_name = np.core.multiarray.get_handler_name()
+
+ event = asyncio.Event()
+ # the child contexts inherit the parent policy
+ concurrent_task1 = asyncio.create_task(
+ concurrent_context1(get_module, orig_policy_name, event))
+ concurrent_task2 = asyncio.create_task(
+ concurrent_context2(get_module, orig_policy_name, event))
+ await concurrent_task1
+ await concurrent_task2
+
+ # the parent context is not affected by child policy changes
+ assert np.core.multiarray.get_handler_name() == orig_policy_name
+
+
+def test_context_locality(get_module):
+ if (sys.implementation.name == 'pypy'
+ and sys.pypy_version_info[:3] < (7, 3, 6)):
+ pytest.skip('no context-locality support in PyPy < 7.3.6')
+ asyncio.run(async_test_context_locality(get_module))
+
+
+def concurrent_thread1(get_module, event):
+ get_module.set_secret_data_policy()
+ assert np.core.multiarray.get_handler_name() == 'secret_data_allocator'
+ event.set()
+
+
+def concurrent_thread2(get_module, event):
+ event.wait()
+ # the policy is not affected by changes in parallel threads
+ assert np.core.multiarray.get_handler_name() == 'default_allocator'
+ # change policy in the child thread
+ get_module.set_secret_data_policy()
+
+
+def test_thread_locality(get_module):
+ orig_policy_name = np.core.multiarray.get_handler_name()
+
+ event = threading.Event()
+ # the child threads do not inherit the parent policy
+ concurrent_task1 = threading.Thread(target=concurrent_thread1,
+ args=(get_module, event))
+ concurrent_task2 = threading.Thread(target=concurrent_thread2,
+ args=(get_module, event))
+ concurrent_task1.start()
+ concurrent_task2.start()
+ concurrent_task1.join()
+ concurrent_task2.join()
+
+ # the parent thread is not affected by child policy changes
+ assert np.core.multiarray.get_handler_name() == orig_policy_name
+
+
+@pytest.mark.slow
+def test_new_policy(get_module):
+ a = np.arange(10)
+ orig_policy_name = np.core.multiarray.get_handler_name(a)
+
+ orig_policy = get_module.set_secret_data_policy()
+
+ b = np.arange(10)
+ assert np.core.multiarray.get_handler_name(b) == 'secret_data_allocator'
+
+ # test array manipulation. This is slow
+ if orig_policy_name == 'default_allocator':
+ # when the np.core.test tests recurse into this test, the
+ # policy will be set so this "if" will be false, preventing
+ # infinite recursion
+ #
+ # if needed, debug this by
+ # - running tests with -- -s (to not capture stdout/stderr
+ # - setting extra_argv=['-vv'] here
+ assert np.core.test('full', verbose=2, extra_argv=['-vv'])
+ # also try the ma tests, the pickling test is quite tricky
+ assert np.ma.test('full', verbose=2, extra_argv=['-vv'])
+
+ get_module.set_old_policy(orig_policy)
+
+ c = np.arange(10)
+ assert np.core.multiarray.get_handler_name(c) == orig_policy_name
+
+@pytest.mark.xfail(sys.implementation.name == "pypy",
+ reason=("bad interaction between getenv and "
+ "os.environ inside pytest"))
+@pytest.mark.parametrize("policy", ["0", "1", None])
+def test_switch_owner(get_module, policy):
+ a = get_module.get_array()
+ assert np.core.multiarray.get_handler_name(a) is None
+ get_module.set_own(a)
+ oldval = os.environ.get('NUMPY_WARN_IF_NO_MEM_POLICY', None)
+ if policy is None:
+ if 'NUMPY_WARN_IF_NO_MEM_POLICY' in os.environ:
+ os.environ.pop('NUMPY_WARN_IF_NO_MEM_POLICY')
+ else:
+ os.environ['NUMPY_WARN_IF_NO_MEM_POLICY'] = policy
+ try:
+ # The policy should be NULL, so we have to assume we can call
+ # "free". A warning is given if the policy == "1"
+ if policy == "1":
+ with assert_warns(RuntimeWarning) as w:
+ del a
+ gc.collect()
+ else:
+ del a
+ gc.collect()
+
+ finally:
+ if oldval is None:
+ if 'NUMPY_WARN_IF_NO_MEM_POLICY' in os.environ:
+ os.environ.pop('NUMPY_WARN_IF_NO_MEM_POLICY')
+ else:
+ os.environ['NUMPY_WARN_IF_NO_MEM_POLICY'] = oldval
+
+def test_owner_is_base(get_module):
+ a = get_module.get_array_with_base()
+ with pytest.warns(UserWarning, match='warn_on_free'):
+ del a
+ gc.collect()
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_memmap.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_memmap.py
new file mode 100644
index 00000000..914f86f1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_memmap.py
@@ -0,0 +1,215 @@
+import sys
+import os
+import mmap
+import pytest
+from pathlib import Path
+from tempfile import NamedTemporaryFile, TemporaryFile
+
+from numpy import (
+ memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)
+
+from numpy import arange, allclose, asarray
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY,
+ break_cycles
+ )
+
+class TestMemmap:
+ def setup_method(self):
+ self.tmpfp = NamedTemporaryFile(prefix='mmap')
+ self.shape = (3, 4)
+ self.dtype = 'float32'
+ self.data = arange(12, dtype=self.dtype)
+ self.data.resize(self.shape)
+
+ def teardown_method(self):
+ self.tmpfp.close()
+ self.data = None
+ if IS_PYPY:
+ break_cycles()
+ break_cycles()
+
+ def test_roundtrip(self):
+ # Write data to file
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ fp[:] = self.data[:]
+ del fp # Test __del__ machinery, which handles cleanup
+
+ # Read data back from file
+ newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
+ shape=self.shape)
+ assert_(allclose(self.data, newfp))
+ assert_array_equal(self.data, newfp)
+ assert_equal(newfp.flags.writeable, False)
+
+ def test_open_with_filename(self, tmp_path):
+ tmpname = tmp_path / 'mmap'
+ fp = memmap(tmpname, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ fp[:] = self.data[:]
+ del fp
+
+ def test_unnamed_file(self):
+ with TemporaryFile() as f:
+ fp = memmap(f, dtype=self.dtype, shape=self.shape)
+ del fp
+
+ def test_attributes(self):
+ offset = 1
+ mode = "w+"
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
+ shape=self.shape, offset=offset)
+ assert_equal(offset, fp.offset)
+ assert_equal(mode, fp.mode)
+ del fp
+
+ def test_filename(self, tmp_path):
+ tmpname = tmp_path / "mmap"
+ fp = memmap(tmpname, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ abspath = Path(os.path.abspath(tmpname))
+ fp[:] = self.data[:]
+ assert_equal(abspath, fp.filename)
+ b = fp[:1]
+ assert_equal(abspath, b.filename)
+ del b
+ del fp
+
+ def test_path(self, tmp_path):
+ tmpname = tmp_path / "mmap"
+ fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ # os.path.realpath does not resolve symlinks on Windows
+ # see: https://bugs.python.org/issue9949
+ # use Path.resolve, just as memmap class does internally
+ abspath = str(Path(tmpname).resolve())
+ fp[:] = self.data[:]
+ assert_equal(abspath, str(fp.filename.resolve()))
+ b = fp[:1]
+ assert_equal(abspath, str(b.filename.resolve()))
+ del b
+ del fp
+
+ def test_filename_fileobj(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
+ shape=self.shape)
+ assert_equal(fp.filename, self.tmpfp.name)
+
+ @pytest.mark.skipif(sys.platform == 'gnu0',
+ reason="Known to fail on hurd")
+ def test_flush(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ fp[:] = self.data[:]
+ assert_equal(fp[0], self.data[0])
+ fp.flush()
+
+ def test_del(self):
+ # Make sure a view does not delete the underlying mmap
+ fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ fp_base[0] = 5
+ fp_view = fp_base[0:1]
+ assert_equal(fp_view[0], 5)
+ del fp_view
+ # Should still be able to access and assign values after
+ # deleting the view
+ assert_equal(fp_base[0], 5)
+ fp_base[0] = 6
+ assert_equal(fp_base[0], 6)
+
+ def test_arithmetic_drops_references(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ tmp = (fp + 10)
+ if isinstance(tmp, memmap):
+ assert_(tmp._mmap is not fp._mmap)
+
+ def test_indexing_drops_references(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ tmp = fp[(1, 2), (2, 3)]
+ if isinstance(tmp, memmap):
+ assert_(tmp._mmap is not fp._mmap)
+
+ def test_slicing_keeps_references(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
+ shape=self.shape)
+ assert_(fp[:2, :2]._mmap is fp._mmap)
+
+ def test_view(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
+ new1 = fp.view()
+ new2 = new1.view()
+ assert_(new1.base is fp)
+ assert_(new2.base is fp)
+ new_array = asarray(fp)
+ assert_(new_array.base is fp)
+
+ def test_ufunc_return_ndarray(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
+ fp[:] = self.data
+
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning, "np.average currently does not preserve")
+ for unary_op in [sum, average, product]:
+ result = unary_op(fp)
+ assert_(isscalar(result))
+ assert_(result.__class__ is self.data[0, 0].__class__)
+
+ assert_(unary_op(fp, axis=0).__class__ is ndarray)
+ assert_(unary_op(fp, axis=1).__class__ is ndarray)
+
+ for binary_op in [add, subtract, multiply]:
+ assert_(binary_op(fp, self.data).__class__ is ndarray)
+ assert_(binary_op(self.data, fp).__class__ is ndarray)
+ assert_(binary_op(fp, fp).__class__ is ndarray)
+
+ fp += 1
+ assert(fp.__class__ is memmap)
+ add(fp, 1, out=fp)
+ assert(fp.__class__ is memmap)
+
+ def test_getitem(self):
+ fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
+ fp[:] = self.data
+
+ assert_(fp[1:, :-1].__class__ is memmap)
+ # Fancy indexing returns a copy that is not memmapped
+ assert_(fp[[0, 1]].__class__ is ndarray)
+
+ def test_memmap_subclass(self):
+ class MemmapSubClass(memmap):
+ pass
+
+ fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape)
+ fp[:] = self.data
+
+ # We keep previous behavior for subclasses of memmap, i.e. the
+ # ufunc and __getitem__ output is never turned into a ndarray
+ assert_(sum(fp, axis=0).__class__ is MemmapSubClass)
+ assert_(sum(fp).__class__ is MemmapSubClass)
+ assert_(fp[1:, :-1].__class__ is MemmapSubClass)
+ assert(fp[[0, 1]].__class__ is MemmapSubClass)
+
+ def test_mmap_offset_greater_than_allocation_granularity(self):
+ size = 5 * mmap.ALLOCATIONGRANULARITY
+ offset = mmap.ALLOCATIONGRANULARITY + 1
+ fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
+ assert_(fp.offset == offset)
+
+ def test_no_shape(self):
+ self.tmpfp.write(b'a'*16)
+ mm = memmap(self.tmpfp, dtype='float64')
+ assert_equal(mm.shape, (2,))
+
+ def test_empty_array(self):
+ # gh-12653
+ with pytest.raises(ValueError, match='empty file'):
+ memmap(self.tmpfp, shape=(0,4), mode='w+')
+
+ self.tmpfp.write(b'\0')
+
+ # ok now the file is not empty
+ memmap(self.tmpfp, shape=(0,4), mode='w+')
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_multiarray.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_multiarray.py
new file mode 100644
index 00000000..9f113cb2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_multiarray.py
@@ -0,0 +1,9775 @@
+import collections.abc
+import tempfile
+import sys
+import warnings
+import operator
+import io
+import itertools
+import functools
+import ctypes
+import os
+import gc
+import re
+import weakref
+import pytest
+from contextlib import contextmanager
+
+from numpy.compat import pickle
+
+import pathlib
+import builtins
+from decimal import Decimal
+import mmap
+
+import numpy as np
+import numpy.core._multiarray_tests as _multiarray_tests
+from numpy.core._rational_tests import rational
+from numpy.testing import (
+ assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
+ assert_array_equal, assert_raises_regex, assert_array_almost_equal,
+ assert_allclose, IS_PYPY, IS_PYSTON, HAS_REFCOUNT, assert_array_less,
+ runstring, temppath, suppress_warnings, break_cycles,
+ )
+from numpy.testing._private.utils import requires_memory, _no_tracing
+from numpy.core.tests._locales import CommaDecimalPointLocale
+from numpy.lib.recfunctions import repack_fields
+
+# Need to test an object that does not fully implement math interface
+from datetime import timedelta, datetime
+
+
+def _aligned_zeros(shape, dtype=float, order="C", align=None):
+ """
+ Allocate a new ndarray with aligned memory.
+
+ The ndarray is guaranteed *not* aligned to twice the requested alignment.
+ Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
+ dtype.alignment."""
+ dtype = np.dtype(dtype)
+ if dtype == np.dtype(object):
+ # Can't do this, fall back to standard allocation (which
+ # should always be sufficiently aligned)
+ if align is not None:
+ raise ValueError("object array alignment not supported")
+ return np.zeros(shape, dtype=dtype, order=order)
+ if align is None:
+ align = dtype.alignment
+ if not hasattr(shape, '__len__'):
+ shape = (shape,)
+ size = functools.reduce(operator.mul, shape) * dtype.itemsize
+ buf = np.empty(size + 2*align + 1, np.uint8)
+
+ ptr = buf.__array_interface__['data'][0]
+ offset = ptr % align
+ if offset != 0:
+ offset = align - offset
+ if (ptr % (2*align)) == 0:
+ offset += align
+
+ # Note: slices producing 0-size arrays do not necessarily change
+ # data pointer --- so we use and allocate size+1
+ buf = buf[offset:offset+size+1][:-1]
+ buf.fill(0)
+ data = np.ndarray(shape, dtype, buf, order=order)
+ return data
+
+
+class TestFlags:
+ def setup_method(self):
+ self.a = np.arange(10)
+
+ def test_writeable(self):
+ mydict = locals()
+ self.a.flags.writeable = False
+ assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
+ assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
+ self.a.flags.writeable = True
+ self.a[0] = 5
+ self.a[0] = 0
+
+ def test_writeable_any_base(self):
+ # Ensure that any base being writeable is sufficient to change flag;
+ # this is especially interesting for arrays from an array interface.
+ arr = np.arange(10)
+
+ class subclass(np.ndarray):
+ pass
+
+ # Create subclass so base will not be collapsed, this is OK to change
+ view1 = arr.view(subclass)
+ view2 = view1[...]
+ arr.flags.writeable = False
+ view2.flags.writeable = False
+ view2.flags.writeable = True # Can be set to True again.
+
+ arr = np.arange(10)
+
+ class frominterface:
+ def __init__(self, arr):
+ self.arr = arr
+ self.__array_interface__ = arr.__array_interface__
+
+ view1 = np.asarray(frominterface)
+ view2 = view1[...]
+ view2.flags.writeable = False
+ view2.flags.writeable = True
+
+ view1.flags.writeable = False
+ view2.flags.writeable = False
+ with assert_raises(ValueError):
+ # Must assume not writeable, since only base is not:
+ view2.flags.writeable = True
+
+ def test_writeable_from_readonly(self):
+ # gh-9440 - make sure fromstring, from buffer on readonly buffers
+ # set writeable False
+ data = b'\x00' * 100
+ vals = np.frombuffer(data, 'B')
+ assert_raises(ValueError, vals.setflags, write=True)
+ types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
+ values = np.core.records.fromstring(data, types)
+ vals = values['vals']
+ assert_raises(ValueError, vals.setflags, write=True)
+
+ def test_writeable_from_buffer(self):
+ data = bytearray(b'\x00' * 100)
+ vals = np.frombuffer(data, 'B')
+ assert_(vals.flags.writeable)
+ vals.setflags(write=False)
+ assert_(vals.flags.writeable is False)
+ vals.setflags(write=True)
+ assert_(vals.flags.writeable)
+ types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
+ values = np.core.records.fromstring(data, types)
+ vals = values['vals']
+ assert_(vals.flags.writeable)
+ vals.setflags(write=False)
+ assert_(vals.flags.writeable is False)
+ vals.setflags(write=True)
+ assert_(vals.flags.writeable)
+
+ @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies")
+ def test_writeable_pickle(self):
+ import pickle
+ # Small arrays will be copied without setting base.
+ # See condition for using PyArray_SetBaseObject in
+ # array_setstate.
+ a = np.arange(1000)
+ for v in range(pickle.HIGHEST_PROTOCOL):
+ vals = pickle.loads(pickle.dumps(a, v))
+ assert_(vals.flags.writeable)
+ assert_(isinstance(vals.base, bytes))
+
+ def test_writeable_from_c_data(self):
+ # Test that the writeable flag can be changed for an array wrapping
+ # low level C-data, but not owning its data.
+ # Also see that this is deprecated to change from python.
+ from numpy.core._multiarray_tests import get_c_wrapping_array
+
+ arr_writeable = get_c_wrapping_array(True)
+ assert not arr_writeable.flags.owndata
+ assert arr_writeable.flags.writeable
+ view = arr_writeable[...]
+
+ # Toggling the writeable flag works on the view:
+ view.flags.writeable = False
+ assert not view.flags.writeable
+ view.flags.writeable = True
+ assert view.flags.writeable
+ # Flag can be unset on the arr_writeable:
+ arr_writeable.flags.writeable = False
+
+ arr_readonly = get_c_wrapping_array(False)
+ assert not arr_readonly.flags.owndata
+ assert not arr_readonly.flags.writeable
+
+ for arr in [arr_writeable, arr_readonly]:
+ view = arr[...]
+ view.flags.writeable = False # make sure it is readonly
+ arr.flags.writeable = False
+ assert not arr.flags.writeable
+
+ with assert_raises(ValueError):
+ view.flags.writeable = True
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+ with assert_raises(DeprecationWarning):
+ arr.flags.writeable = True
+
+ with assert_warns(DeprecationWarning):
+ arr.flags.writeable = True
+
+ def test_warnonwrite(self):
+ a = np.arange(10)
+ a.flags._warn_on_write = True
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always')
+ a[1] = 10
+ a[2] = 10
+ # only warn once
+ assert_(len(w) == 1)
+
+ @pytest.mark.parametrize(["flag", "flag_value", "writeable"],
+ [("writeable", True, True),
+ # Delete _warn_on_write after deprecation and simplify
+ # the parameterization:
+ ("_warn_on_write", True, False),
+ ("writeable", False, False)])
+ def test_readonly_flag_protocols(self, flag, flag_value, writeable):
+ a = np.arange(10)
+ setattr(a.flags, flag, flag_value)
+
+ class MyArr():
+ __array_struct__ = a.__array_struct__
+
+ assert memoryview(a).readonly is not writeable
+ assert a.__array_interface__['data'][1] is not writeable
+ assert np.asarray(MyArr()).flags.writeable is writeable
+
+ def test_otherflags(self):
+ assert_equal(self.a.flags.carray, True)
+ assert_equal(self.a.flags['C'], True)
+ assert_equal(self.a.flags.farray, False)
+ assert_equal(self.a.flags.behaved, True)
+ assert_equal(self.a.flags.fnc, False)
+ assert_equal(self.a.flags.forc, True)
+ assert_equal(self.a.flags.owndata, True)
+ assert_equal(self.a.flags.writeable, True)
+ assert_equal(self.a.flags.aligned, True)
+ assert_equal(self.a.flags.writebackifcopy, False)
+ assert_equal(self.a.flags['X'], False)
+ assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
+
+ def test_string_align(self):
+ a = np.zeros(4, dtype=np.dtype('|S4'))
+ assert_(a.flags.aligned)
+ # not power of two are accessed byte-wise and thus considered aligned
+ a = np.zeros(5, dtype=np.dtype('|S4'))
+ assert_(a.flags.aligned)
+
+ def test_void_align(self):
+ a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
+ assert_(a.flags.aligned)
+
+
+class TestHash:
+ # see #3793
+ def test_int(self):
+ for st, ut, s in [(np.int8, np.uint8, 8),
+ (np.int16, np.uint16, 16),
+ (np.int32, np.uint32, 32),
+ (np.int64, np.uint64, 64)]:
+ for i in range(1, s):
+ assert_equal(hash(st(-2**i)), hash(-2**i),
+ err_msg="%r: -2**%d" % (st, i))
+ assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
+ err_msg="%r: 2**%d" % (st, i - 1))
+ assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
+ err_msg="%r: 2**%d - 1" % (st, i))
+
+ i = max(i - 1, 1)
+ assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
+ err_msg="%r: 2**%d" % (ut, i - 1))
+ assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
+ err_msg="%r: 2**%d - 1" % (ut, i))
+
+
+class TestAttributes:
+ def setup_method(self):
+ self.one = np.arange(10)
+ self.two = np.arange(20).reshape(4, 5)
+ self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
+
+ def test_attributes(self):
+ assert_equal(self.one.shape, (10,))
+ assert_equal(self.two.shape, (4, 5))
+ assert_equal(self.three.shape, (2, 5, 6))
+ self.three.shape = (10, 3, 2)
+ assert_equal(self.three.shape, (10, 3, 2))
+ self.three.shape = (2, 5, 6)
+ assert_equal(self.one.strides, (self.one.itemsize,))
+ num = self.two.itemsize
+ assert_equal(self.two.strides, (5*num, num))
+ num = self.three.itemsize
+ assert_equal(self.three.strides, (30*num, 6*num, num))
+ assert_equal(self.one.ndim, 1)
+ assert_equal(self.two.ndim, 2)
+ assert_equal(self.three.ndim, 3)
+ num = self.two.itemsize
+ assert_equal(self.two.size, 20)
+ assert_equal(self.two.nbytes, 20*num)
+ assert_equal(self.two.itemsize, self.two.dtype.itemsize)
+ assert_equal(self.two.base, np.arange(20))
+
+ def test_dtypeattr(self):
+ assert_equal(self.one.dtype, np.dtype(np.int_))
+ assert_equal(self.three.dtype, np.dtype(np.float_))
+ assert_equal(self.one.dtype.char, 'l')
+ assert_equal(self.three.dtype.char, 'd')
+ assert_(self.three.dtype.str[0] in '<>')
+ assert_equal(self.one.dtype.str[1], 'i')
+ assert_equal(self.three.dtype.str[1], 'f')
+
+ def test_int_subclassing(self):
+ # Regression test for https://github.com/numpy/numpy/pull/3526
+
+ numpy_int = np.int_(0)
+
+ # int_ doesn't inherit from Python int, because it's not fixed-width
+ assert_(not isinstance(numpy_int, int))
+
+ def test_stridesattr(self):
+ x = self.one
+
+ def make_array(size, offset, strides):
+ return np.ndarray(size, buffer=x, dtype=int,
+ offset=offset*x.itemsize,
+ strides=strides*x.itemsize)
+
+ assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
+ assert_raises(ValueError, make_array, 4, 4, -2)
+ assert_raises(ValueError, make_array, 4, 2, -1)
+ assert_raises(ValueError, make_array, 8, 3, 1)
+ assert_equal(make_array(8, 3, 0), np.array([3]*8))
+ # Check behavior reported in gh-2503:
+ assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
+ make_array(0, 0, 10)
+
+ def test_set_stridesattr(self):
+ x = self.one
+
+ def make_array(size, offset, strides):
+ try:
+ r = np.ndarray([size], dtype=int, buffer=x,
+ offset=offset*x.itemsize)
+ except Exception as e:
+ raise RuntimeError(e)
+ r.strides = strides = strides*x.itemsize
+ return r
+
+ assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
+ assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
+ assert_raises(ValueError, make_array, 4, 4, -2)
+ assert_raises(ValueError, make_array, 4, 2, -1)
+ assert_raises(RuntimeError, make_array, 8, 3, 1)
+ # Check that the true extent of the array is used.
+ # Test relies on as_strided base not exposing a buffer.
+ x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
+
+ def set_strides(arr, strides):
+ arr.strides = strides
+
+ assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
+
+ # Test for offset calculations:
+ x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
+ shape=(10,), strides=(-1,))
+ assert_raises(ValueError, set_strides, x[::-1], -1)
+ a = x[::-1]
+ a.strides = 1
+ a[::2].strides = 2
+
+ # test 0d
+ arr_0d = np.array(0)
+ arr_0d.strides = ()
+ assert_raises(TypeError, set_strides, arr_0d, None)
+
+ def test_fill(self):
+ for t in "?bhilqpBHILQPfdgFDGO":
+ x = np.empty((3, 2, 1), t)
+ y = np.empty((3, 2, 1), t)
+ x.fill(1)
+ y[...] = 1
+ assert_equal(x, y)
+
+ def test_fill_max_uint64(self):
+ x = np.empty((3, 2, 1), dtype=np.uint64)
+ y = np.empty((3, 2, 1), dtype=np.uint64)
+ value = 2**64 - 1
+ y[...] = value
+ x.fill(value)
+ assert_array_equal(x, y)
+
+ def test_fill_struct_array(self):
+ # Filling from a scalar
+ x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
+ x.fill(x[0])
+ assert_equal(x['f1'][1], x['f1'][0])
+ # Filling from a tuple that can be converted
+ # to a scalar
+ x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
+ x.fill((3.5, -2))
+ assert_array_equal(x['a'], [3.5, 3.5])
+ assert_array_equal(x['b'], [-2, -2])
+
+ def test_fill_readonly(self):
+ # gh-22922
+ a = np.zeros(11)
+ a.setflags(write=False)
+ with pytest.raises(ValueError, match=".*read-only"):
+ a.fill(0)
+
+
+class TestArrayConstruction:
+ def test_array(self):
+ d = np.ones(6)
+ r = np.array([d, d])
+ assert_equal(r, np.ones((2, 6)))
+
+ d = np.ones(6)
+ tgt = np.ones((2, 6))
+ r = np.array([d, d])
+ assert_equal(r, tgt)
+ tgt[1] = 2
+ r = np.array([d, d + 1])
+ assert_equal(r, tgt)
+
+ d = np.ones(6)
+ r = np.array([[d, d]])
+ assert_equal(r, np.ones((1, 2, 6)))
+
+ d = np.ones(6)
+ r = np.array([[d, d], [d, d]])
+ assert_equal(r, np.ones((2, 2, 6)))
+
+ d = np.ones((6, 6))
+ r = np.array([d, d])
+ assert_equal(r, np.ones((2, 6, 6)))
+
+ d = np.ones((6, ))
+ r = np.array([[d, d + 1], d + 2], dtype=object)
+ assert_equal(len(r), 2)
+ assert_equal(r[0], [d, d + 1])
+ assert_equal(r[1], d + 2)
+
+ tgt = np.ones((2, 3), dtype=bool)
+ tgt[0, 2] = False
+ tgt[1, 0:2] = False
+ r = np.array([[True, True, False], [False, False, True]])
+ assert_equal(r, tgt)
+ r = np.array([[True, False], [True, False], [False, True]])
+ assert_equal(r, tgt.T)
+
+ def test_array_empty(self):
+ assert_raises(TypeError, np.array)
+
+ def test_0d_array_shape(self):
+ assert np.ones(np.array(3)).shape == (3,)
+
+ def test_array_copy_false(self):
+ d = np.array([1, 2, 3])
+ e = np.array(d, copy=False)
+ d[1] = 3
+ assert_array_equal(e, [1, 3, 3])
+ e = np.array(d, copy=False, order='F')
+ d[1] = 4
+ assert_array_equal(e, [1, 4, 3])
+ e[2] = 7
+ assert_array_equal(d, [1, 4, 7])
+
+ def test_array_copy_true(self):
+ d = np.array([[1,2,3], [1, 2, 3]])
+ e = np.array(d, copy=True)
+ d[0, 1] = 3
+ e[0, 2] = -7
+ assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
+ assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
+ e = np.array(d, copy=True, order='F')
+ d[0, 1] = 5
+ e[0, 2] = 7
+ assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
+ assert_array_equal(d, [[1, 5, 3], [1,2,3]])
+
+ def test_array_cont(self):
+ d = np.ones(10)[::2]
+ assert_(np.ascontiguousarray(d).flags.c_contiguous)
+ assert_(np.ascontiguousarray(d).flags.f_contiguous)
+ assert_(np.asfortranarray(d).flags.c_contiguous)
+ assert_(np.asfortranarray(d).flags.f_contiguous)
+ d = np.ones((10, 10))[::2,::2]
+ assert_(np.ascontiguousarray(d).flags.c_contiguous)
+ assert_(np.asfortranarray(d).flags.f_contiguous)
+
+ @pytest.mark.parametrize("func",
+ [np.array,
+ np.asarray,
+ np.asanyarray,
+ np.ascontiguousarray,
+ np.asfortranarray])
+ def test_bad_arguments_error(self, func):
+ with pytest.raises(TypeError):
+ func(3, dtype="bad dtype")
+ with pytest.raises(TypeError):
+ func() # missing arguments
+ with pytest.raises(TypeError):
+ func(1, 2, 3, 4, 5, 6, 7, 8) # too many arguments
+
+ @pytest.mark.parametrize("func",
+ [np.array,
+ np.asarray,
+ np.asanyarray,
+ np.ascontiguousarray,
+ np.asfortranarray])
+ def test_array_as_keyword(self, func):
+ # This should likely be made positional only, but do not change
+ # the name accidentally.
+ if func is np.array:
+ func(object=3)
+ else:
+ func(a=3)
+
+
+class TestAssignment:
+ def test_assignment_broadcasting(self):
+ a = np.arange(6).reshape(2, 3)
+
+ # Broadcasting the input to the output
+ a[...] = np.arange(3)
+ assert_equal(a, [[0, 1, 2], [0, 1, 2]])
+ a[...] = np.arange(2).reshape(2, 1)
+ assert_equal(a, [[0, 0, 0], [1, 1, 1]])
+
+ # For compatibility with <= 1.5, a limited version of broadcasting
+ # the output to the input.
+ #
+ # This behavior is inconsistent with NumPy broadcasting
+ # in general, because it only uses one of the two broadcasting
+ # rules (adding a new "1" dimension to the left of the shape),
+ # applied to the output instead of an input. In NumPy 2.0, this kind
+ # of broadcasting assignment will likely be disallowed.
+ a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
+ assert_equal(a, [[5, 4, 3], [2, 1, 0]])
+ # The other type of broadcasting would require a reduction operation.
+
+ def assign(a, b):
+ a[...] = b
+
+ assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
+
+ def test_assignment_errors(self):
+ # Address issue #2276
+ class C:
+ pass
+ a = np.zeros(1)
+
+ def assign(v):
+ a[0] = v
+
+ assert_raises((AttributeError, TypeError), assign, C())
+ assert_raises(ValueError, assign, [1])
+
+ def test_unicode_assignment(self):
+ # gh-5049
+ from numpy.core.numeric import set_string_function
+
+ @contextmanager
+ def inject_str(s):
+ """ replace ndarray.__str__ temporarily """
+ set_string_function(lambda x: s, repr=False)
+ try:
+ yield
+ finally:
+ set_string_function(None, repr=False)
+
+ a1d = np.array(['test'])
+ a0d = np.array('done')
+ with inject_str('bad'):
+ a1d[0] = a0d # previously this would invoke __str__
+ assert_equal(a1d[0], 'done')
+
+ # this would crash for the same reason
+ np.array([np.array('\xe5\xe4\xf6')])
+
+ def test_stringlike_empty_list(self):
+ # gh-8902
+ u = np.array(['done'])
+ b = np.array([b'done'])
+
+ class bad_sequence:
+ def __getitem__(self): pass
+ def __len__(self): raise RuntimeError
+
+ assert_raises(ValueError, operator.setitem, u, 0, [])
+ assert_raises(ValueError, operator.setitem, b, 0, [])
+
+ assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
+ assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
+
+ def test_longdouble_assignment(self):
+ # only relevant if longdouble is larger than float
+ # we're looking for loss of precision
+
+ for dtype in (np.longdouble, np.longcomplex):
+ # gh-8902
+ tinyb = np.nextafter(np.longdouble(0), 1).astype(dtype)
+ tinya = np.nextafter(np.longdouble(0), -1).astype(dtype)
+
+ # construction
+ tiny1d = np.array([tinya])
+ assert_equal(tiny1d[0], tinya)
+
+ # scalar = scalar
+ tiny1d[0] = tinyb
+ assert_equal(tiny1d[0], tinyb)
+
+ # 0d = scalar
+ tiny1d[0, ...] = tinya
+ assert_equal(tiny1d[0], tinya)
+
+ # 0d = 0d
+ tiny1d[0, ...] = tinyb[...]
+ assert_equal(tiny1d[0], tinyb)
+
+ # scalar = 0d
+ tiny1d[0] = tinyb[...]
+ assert_equal(tiny1d[0], tinyb)
+
+ arr = np.array([np.array(tinya)])
+ assert_equal(arr[0], tinya)
+
+ def test_cast_to_string(self):
+ # cast to str should do "str(scalar)", not "str(scalar.item())"
+ # Example: In python2, str(float) is truncated, so we want to avoid
+ # str(np.float64(...).item()) as this would incorrectly truncate.
+ a = np.zeros(1, dtype='S20')
+ a[:] = np.array(['1.12345678901234567890'], dtype='f8')
+ assert_equal(a[0], b"1.1234567890123457")
+
+
+class TestDtypedescr:
+ def test_construction(self):
+ d1 = np.dtype('i4')
+ assert_equal(d1, np.dtype(np.int32))
+ d2 = np.dtype('f8')
+ assert_equal(d2, np.dtype(np.float64))
+
+ def test_byteorders(self):
+ assert_(np.dtype('<i4') != np.dtype('>i4'))
+ assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
+
+ def test_structured_non_void(self):
+ fields = [('a', '<i2'), ('b', '<i2')]
+ dt_int = np.dtype(('i4', fields))
+ assert_equal(str(dt_int), "(numpy.int32, [('a', '<i2'), ('b', '<i2')])")
+
+ # gh-9821
+ arr_int = np.zeros(4, dt_int)
+ assert_equal(repr(arr_int),
+ "array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
+
+
+class TestZeroRank:
+ def setup_method(self):
+ self.d = np.array(0), np.array('x', object)
+
+ def test_ellipsis_subscript(self):
+ a, b = self.d
+ assert_equal(a[...], 0)
+ assert_equal(b[...], 'x')
+ assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
+ assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
+
+ def test_empty_subscript(self):
+ a, b = self.d
+ assert_equal(a[()], 0)
+ assert_equal(b[()], 'x')
+ assert_(type(a[()]) is a.dtype.type)
+ assert_(type(b[()]) is str)
+
+ def test_invalid_subscript(self):
+ a, b = self.d
+ assert_raises(IndexError, lambda x: x[0], a)
+ assert_raises(IndexError, lambda x: x[0], b)
+ assert_raises(IndexError, lambda x: x[np.array([], int)], a)
+ assert_raises(IndexError, lambda x: x[np.array([], int)], b)
+
+ def test_ellipsis_subscript_assignment(self):
+ a, b = self.d
+ a[...] = 42
+ assert_equal(a, 42)
+ b[...] = ''
+ assert_equal(b.item(), '')
+
+ def test_empty_subscript_assignment(self):
+ a, b = self.d
+ a[()] = 42
+ assert_equal(a, 42)
+ b[()] = ''
+ assert_equal(b.item(), '')
+
+ def test_invalid_subscript_assignment(self):
+ a, b = self.d
+
+ def assign(x, i, v):
+ x[i] = v
+
+ assert_raises(IndexError, assign, a, 0, 42)
+ assert_raises(IndexError, assign, b, 0, '')
+ assert_raises(ValueError, assign, a, (), '')
+
+ def test_newaxis(self):
+ a, b = self.d
+ assert_equal(a[np.newaxis].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ...].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
+ assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
+ assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
+ assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
+
+ def test_invalid_newaxis(self):
+ a, b = self.d
+
+ def subscript(x, i):
+ x[i]
+
+ assert_raises(IndexError, subscript, a, (np.newaxis, 0))
+ assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
+
+ def test_constructor(self):
+ x = np.ndarray(())
+ x[()] = 5
+ assert_equal(x[()], 5)
+ y = np.ndarray((), buffer=x)
+ y[()] = 6
+ assert_equal(x[()], 6)
+
+ # strides and shape must be the same length
+ with pytest.raises(ValueError):
+ np.ndarray((2,), strides=())
+ with pytest.raises(ValueError):
+ np.ndarray((), strides=(2,))
+
+ def test_output(self):
+ x = np.array(2)
+ assert_raises(ValueError, np.add, x, [1], x)
+
+ def test_real_imag(self):
+ # contiguity checks are for gh-11245
+ x = np.array(1j)
+ xr = x.real
+ xi = x.imag
+
+ assert_equal(xr, np.array(0))
+ assert_(type(xr) is np.ndarray)
+ assert_equal(xr.flags.contiguous, True)
+ assert_equal(xr.flags.f_contiguous, True)
+
+ assert_equal(xi, np.array(1))
+ assert_(type(xi) is np.ndarray)
+ assert_equal(xi.flags.contiguous, True)
+ assert_equal(xi.flags.f_contiguous, True)
+
+
+class TestScalarIndexing:
+ def setup_method(self):
+ self.d = np.array([0, 1])[0]
+
+ def test_ellipsis_subscript(self):
+ a = self.d
+ assert_equal(a[...], 0)
+ assert_equal(a[...].shape, ())
+
+ def test_empty_subscript(self):
+ a = self.d
+ assert_equal(a[()], 0)
+ assert_equal(a[()].shape, ())
+
+ def test_invalid_subscript(self):
+ a = self.d
+ assert_raises(IndexError, lambda x: x[0], a)
+ assert_raises(IndexError, lambda x: x[np.array([], int)], a)
+
+ def test_invalid_subscript_assignment(self):
+ a = self.d
+
+ def assign(x, i, v):
+ x[i] = v
+
+ assert_raises(TypeError, assign, a, 0, 42)
+
+ def test_newaxis(self):
+ a = self.d
+ assert_equal(a[np.newaxis].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ...].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
+ assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
+ assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
+ assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
+
+ def test_invalid_newaxis(self):
+ a = self.d
+
+ def subscript(x, i):
+ x[i]
+
+ assert_raises(IndexError, subscript, a, (np.newaxis, 0))
+ assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
+
+ def test_overlapping_assignment(self):
+ # With positive strides
+ a = np.arange(4)
+ a[:-1] = a[1:]
+ assert_equal(a, [1, 2, 3, 3])
+
+ a = np.arange(4)
+ a[1:] = a[:-1]
+ assert_equal(a, [0, 0, 1, 2])
+
+ # With positive and negative strides
+ a = np.arange(4)
+ a[:] = a[::-1]
+ assert_equal(a, [3, 2, 1, 0])
+
+ a = np.arange(6).reshape(2, 3)
+ a[::-1,:] = a[:, ::-1]
+ assert_equal(a, [[5, 4, 3], [2, 1, 0]])
+
+ a = np.arange(6).reshape(2, 3)
+ a[::-1, ::-1] = a[:, ::-1]
+ assert_equal(a, [[3, 4, 5], [0, 1, 2]])
+
+ # With just one element overlapping
+ a = np.arange(5)
+ a[:3] = a[2:]
+ assert_equal(a, [2, 3, 4, 3, 4])
+
+ a = np.arange(5)
+ a[2:] = a[:3]
+ assert_equal(a, [0, 1, 0, 1, 2])
+
+ a = np.arange(5)
+ a[2::-1] = a[2:]
+ assert_equal(a, [4, 3, 2, 3, 4])
+
+ a = np.arange(5)
+ a[2:] = a[2::-1]
+ assert_equal(a, [0, 1, 2, 1, 0])
+
+ a = np.arange(5)
+ a[2::-1] = a[:1:-1]
+ assert_equal(a, [2, 3, 4, 3, 4])
+
+ a = np.arange(5)
+ a[:1:-1] = a[2::-1]
+ assert_equal(a, [0, 1, 0, 1, 2])
+
+
+class TestCreation:
+ """
+ Test the np.array constructor
+ """
+ def test_from_attribute(self):
+ class x:
+ def __array__(self, dtype=None):
+ pass
+
+ assert_raises(ValueError, np.array, x())
+
+ def test_from_string(self):
+ types = np.typecodes['AllInteger'] + np.typecodes['Float']
+ nstr = ['123', '123']
+ result = np.array([123, 123], dtype=int)
+ for type in types:
+ msg = 'String conversion for %s' % type
+ assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
+
+ def test_void(self):
+ arr = np.array([], dtype='V')
+ assert arr.dtype == 'V8' # current default
+ # Same length scalars (those that go to the same void) work:
+ arr = np.array([b"1234", b"1234"], dtype="V")
+ assert arr.dtype == "V4"
+
+ # Promoting different lengths will fail (pre 1.20 this worked)
+ # by going via S5 and casting to V5.
+ with pytest.raises(TypeError):
+ np.array([b"1234", b"12345"], dtype="V")
+ with pytest.raises(TypeError):
+ np.array([b"12345", b"1234"], dtype="V")
+
+ # Check the same for the casting path:
+ arr = np.array([b"1234", b"1234"], dtype="O").astype("V")
+ assert arr.dtype == "V4"
+ with pytest.raises(TypeError):
+ np.array([b"1234", b"12345"], dtype="O").astype("V")
+
+ @pytest.mark.parametrize("idx",
+ [pytest.param(Ellipsis, id="arr"), pytest.param((), id="scalar")])
+ def test_structured_void_promotion(self, idx):
+ arr = np.array(
+ [np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i')[idx]],
+ dtype="V")
+ assert_array_equal(arr, np.array([(1, 1), (2, 2)], dtype="i,i"))
+ # The following fails to promote the two dtypes, resulting in an error
+ with pytest.raises(TypeError):
+ np.array(
+ [np.array(1, dtype="i,i")[idx], np.array(2, dtype='i,i,i')[idx]],
+ dtype="V")
+
+
+ def test_too_big_error(self):
+ # 45341 is the smallest integer greater than sqrt(2**31 - 1).
+ # 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
+ # We want to make sure that the square byte array with those dimensions
+ # is too big on 32 or 64 bit systems respectively.
+ if np.iinfo('intp').max == 2**31 - 1:
+ shape = (46341, 46341)
+ elif np.iinfo('intp').max == 2**63 - 1:
+ shape = (3037000500, 3037000500)
+ else:
+ return
+ assert_raises(ValueError, np.empty, shape, dtype=np.int8)
+ assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
+ assert_raises(ValueError, np.ones, shape, dtype=np.int8)
+
+ @pytest.mark.skipif(np.dtype(np.intp).itemsize != 8,
+ reason="malloc may not fail on 32 bit systems")
+ def test_malloc_fails(self):
+ # This test is guaranteed to fail due to a too large allocation
+ with assert_raises(np.core._exceptions._ArrayMemoryError):
+ np.empty(np.iinfo(np.intp).max, dtype=np.uint8)
+
+ def test_zeros(self):
+ types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
+ for dt in types:
+ d = np.zeros((13,), dtype=dt)
+ assert_equal(np.count_nonzero(d), 0)
+ # true for ieee floats
+ assert_equal(d.sum(), 0)
+ assert_(not d.any())
+
+ d = np.zeros(2, dtype='(2,4)i4')
+ assert_equal(np.count_nonzero(d), 0)
+ assert_equal(d.sum(), 0)
+ assert_(not d.any())
+
+ d = np.zeros(2, dtype='4i4')
+ assert_equal(np.count_nonzero(d), 0)
+ assert_equal(d.sum(), 0)
+ assert_(not d.any())
+
+ d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
+ assert_equal(np.count_nonzero(d), 0)
+
+ @pytest.mark.slow
+ def test_zeros_big(self):
+ # test big array as they might be allocated different by the system
+ types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
+ for dt in types:
+ d = np.zeros((30 * 1024**2,), dtype=dt)
+ assert_(not d.any())
+ # This test can fail on 32-bit systems due to insufficient
+ # contiguous memory. Deallocating the previous array increases the
+ # chance of success.
+ del(d)
+
+ def test_zeros_obj(self):
+ # test initialization from PyLong(0)
+ d = np.zeros((13,), dtype=object)
+ assert_array_equal(d, [0] * 13)
+ assert_equal(np.count_nonzero(d), 0)
+
+ def test_zeros_obj_obj(self):
+ d = np.zeros(10, dtype=[('k', object, 2)])
+ assert_array_equal(d['k'], 0)
+
+ def test_zeros_like_like_zeros(self):
+ # test zeros_like returns the same as zeros
+ for c in np.typecodes['All']:
+ if c == 'V':
+ continue
+ d = np.zeros((3,3), dtype=c)
+ assert_array_equal(np.zeros_like(d), d)
+ assert_equal(np.zeros_like(d).dtype, d.dtype)
+ # explicitly check some special cases
+ d = np.zeros((3,3), dtype='S5')
+ assert_array_equal(np.zeros_like(d), d)
+ assert_equal(np.zeros_like(d).dtype, d.dtype)
+ d = np.zeros((3,3), dtype='U5')
+ assert_array_equal(np.zeros_like(d), d)
+ assert_equal(np.zeros_like(d).dtype, d.dtype)
+
+ d = np.zeros((3,3), dtype='<i4')
+ assert_array_equal(np.zeros_like(d), d)
+ assert_equal(np.zeros_like(d).dtype, d.dtype)
+ d = np.zeros((3,3), dtype='>i4')
+ assert_array_equal(np.zeros_like(d), d)
+ assert_equal(np.zeros_like(d).dtype, d.dtype)
+
+ d = np.zeros((3,3), dtype='<M8[s]')
+ assert_array_equal(np.zeros_like(d), d)
+ assert_equal(np.zeros_like(d).dtype, d.dtype)
+ d = np.zeros((3,3), dtype='>M8[s]')
+ assert_array_equal(np.zeros_like(d), d)
+ assert_equal(np.zeros_like(d).dtype, d.dtype)
+
+ d = np.zeros((3,3), dtype='f4,f4')
+ assert_array_equal(np.zeros_like(d), d)
+ assert_equal(np.zeros_like(d).dtype, d.dtype)
+
+ def test_empty_unicode(self):
+ # don't throw decode errors on garbage memory
+ for i in range(5, 100, 5):
+ d = np.empty(i, dtype='U')
+ str(d)
+
+ def test_sequence_non_homogeneous(self):
+ assert_equal(np.array([4, 2**80]).dtype, object)
+ assert_equal(np.array([4, 2**80, 4]).dtype, object)
+ assert_equal(np.array([2**80, 4]).dtype, object)
+ assert_equal(np.array([2**80] * 3).dtype, object)
+ assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
+ assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
+ assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
+
+ def test_non_sequence_sequence(self):
+ """Should not segfault.
+
+ Class Fail breaks the sequence protocol for new style classes, i.e.,
+ those derived from object. Class Map is a mapping type indicated by
+ raising a ValueError. At some point we may raise a warning instead
+ of an error in the Fail case.
+
+ """
+ class Fail:
+ def __len__(self):
+ return 1
+
+ def __getitem__(self, index):
+ raise ValueError()
+
+ class Map:
+ def __len__(self):
+ return 1
+
+ def __getitem__(self, index):
+ raise KeyError()
+
+ a = np.array([Map()])
+ assert_(a.shape == (1,))
+ assert_(a.dtype == np.dtype(object))
+ assert_raises(ValueError, np.array, [Fail()])
+
+ def test_no_len_object_type(self):
+ # gh-5100, want object array from iterable object without len()
+ class Point2:
+ def __init__(self):
+ pass
+
+ def __getitem__(self, ind):
+ if ind in [0, 1]:
+ return ind
+ else:
+ raise IndexError()
+ d = np.array([Point2(), Point2(), Point2()])
+ assert_equal(d.dtype, np.dtype(object))
+
+ def test_false_len_sequence(self):
+ # gh-7264, segfault for this example
+ class C:
+ def __getitem__(self, i):
+ raise IndexError
+ def __len__(self):
+ return 42
+
+ a = np.array(C()) # segfault?
+ assert_equal(len(a), 0)
+
+ def test_false_len_iterable(self):
+ # Special case where a bad __getitem__ makes us fall back on __iter__:
+ class C:
+ def __getitem__(self, x):
+ raise Exception
+ def __iter__(self):
+ return iter(())
+ def __len__(self):
+ return 2
+
+ a = np.empty(2)
+ with assert_raises(ValueError):
+ a[:] = C() # Segfault!
+
+ np.array(C()) == list(C())
+
+ def test_failed_len_sequence(self):
+ # gh-7393
+ class A:
+ def __init__(self, data):
+ self._data = data
+ def __getitem__(self, item):
+ return type(self)(self._data[item])
+ def __len__(self):
+ return len(self._data)
+
+ # len(d) should give 3, but len(d[0]) will fail
+ d = A([1,2,3])
+ assert_equal(len(np.array(d)), 3)
+
+ def test_array_too_big(self):
+ # Test that array creation succeeds for arrays addressable by intp
+ # on the byte level and fails for too large arrays.
+ buf = np.zeros(100)
+
+ max_bytes = np.iinfo(np.intp).max
+ for dtype in ["intp", "S20", "b"]:
+ dtype = np.dtype(dtype)
+ itemsize = dtype.itemsize
+
+ np.ndarray(buffer=buf, strides=(0,),
+ shape=(max_bytes//itemsize,), dtype=dtype)
+ assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
+ shape=(max_bytes//itemsize + 1,), dtype=dtype)
+
+ def _ragged_creation(self, seq):
+ # without dtype=object, the ragged object raises
+ with pytest.raises(ValueError, match=".*detected shape was"):
+ a = np.array(seq)
+
+ return np.array(seq, dtype=object)
+
+ def test_ragged_ndim_object(self):
+ # Lists of mismatching depths are treated as object arrays
+ a = self._ragged_creation([[1], 2, 3])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = self._ragged_creation([1, [2], 3])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = self._ragged_creation([1, 2, [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ def test_ragged_shape_object(self):
+ # The ragged dimension of a list is turned into an object array
+ a = self._ragged_creation([[1, 1], [2], [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = self._ragged_creation([[1], [2, 2], [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = self._ragged_creation([[1], [2], [3, 3]])
+ assert a.shape == (3,)
+ assert a.dtype == object
+
+ def test_array_of_ragged_array(self):
+ outer = np.array([None, None])
+ outer[0] = outer[1] = np.array([1, 2, 3])
+ assert np.array(outer).shape == (2,)
+ assert np.array([outer]).shape == (1, 2)
+
+ outer_ragged = np.array([None, None])
+ outer_ragged[0] = np.array([1, 2, 3])
+ outer_ragged[1] = np.array([1, 2, 3, 4])
+ # should both of these emit deprecation warnings?
+ assert np.array(outer_ragged).shape == (2,)
+ assert np.array([outer_ragged]).shape == (1, 2,)
+
+ def test_deep_nonragged_object(self):
+ # None of these should raise, even though they are missing dtype=object
+ a = np.array([[[Decimal(1)]]])
+ a = np.array([1, Decimal(1)])
+ a = np.array([[1], [Decimal(1)]])
+
+ @pytest.mark.parametrize("dtype", [object, "O,O", "O,(3)O", "(2,3)O"])
+ @pytest.mark.parametrize("function", [
+ np.ndarray, np.empty,
+ lambda shape, dtype: np.empty_like(np.empty(shape, dtype=dtype))])
+ def test_object_initialized_to_None(self, function, dtype):
+ # NumPy has support for object fields to be NULL (meaning None)
+ # but generally, we should always fill with the proper None, and
+ # downstream may rely on that. (For fully initialized arrays!)
+ arr = function(3, dtype=dtype)
+ # We expect a fill value of None, which is not NULL:
+ expected = np.array(None).tobytes()
+ expected = expected * (arr.nbytes // len(expected))
+ assert arr.tobytes() == expected
+
+class TestStructured:
+ def test_subarray_field_access(self):
+ a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
+ a['a'] = np.arange(60).reshape(3, 5, 2, 2)
+
+ # Since the subarray is always in C-order, a transpose
+ # does not swap the subarray:
+ assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
+
+ # In Fortran order, the subarray gets appended
+ # like in all other cases, not prepended as a special case
+ b = a.copy(order='F')
+ assert_equal(a['a'].shape, b['a'].shape)
+ assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
+
+ def test_subarray_comparison(self):
+ # Check that comparisons between record arrays with
+ # multi-dimensional field types work properly
+ a = np.rec.fromrecords(
+ [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
+ dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
+ b = a.copy()
+ assert_equal(a == b, [True, True])
+ assert_equal(a != b, [False, False])
+ b[1].b = 'c'
+ assert_equal(a == b, [True, False])
+ assert_equal(a != b, [False, True])
+ for i in range(3):
+ b[0].a = a[0].a
+ b[0].a[i] = 5
+ assert_equal(a == b, [False, False])
+ assert_equal(a != b, [True, True])
+ for i in range(2):
+ for j in range(2):
+ b = a.copy()
+ b[0].c[i, j] = 10
+ assert_equal(a == b, [False, True])
+ assert_equal(a != b, [True, False])
+
+ # Check that broadcasting with a subarray works, including cases that
+ # require promotion to work:
+ a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
+ b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
+ assert_equal(a == b, [[True, True, False], [False, False, True]])
+ assert_equal(b == a, [[True, True, False], [False, False, True]])
+ a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
+ b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
+ assert_equal(a == b, [[True, True, False], [False, False, True]])
+ assert_equal(b == a, [[True, True, False], [False, False, True]])
+ a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
+ b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
+ assert_equal(a == b, [[True, False, False], [False, False, True]])
+ assert_equal(b == a, [[True, False, False], [False, False, True]])
+
+ # Check that broadcasting Fortran-style arrays with a subarray work
+ a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
+ b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
+ assert_equal(a == b, [[True, False, False], [False, False, True]])
+ assert_equal(b == a, [[True, False, False], [False, False, True]])
+
+ # Check that incompatible sub-array shapes don't result to broadcasting
+ x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
+ y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
+ # The main importance is that it does not return True:
+ with pytest.raises(TypeError):
+ x == y
+
+ x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
+ y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
+ # The main importance is that it does not return True:
+ with pytest.raises(TypeError):
+ x == y
+
+ def test_empty_structured_array_comparison(self):
+ # Check that comparison works on empty arrays with nontrivially
+ # shaped fields
+ a = np.zeros(0, [('a', '<f8', (1, 1))])
+ assert_equal(a, a)
+ a = np.zeros(0, [('a', '<f8', (1,))])
+ assert_equal(a, a)
+ a = np.zeros((0, 0), [('a', '<f8', (1, 1))])
+ assert_equal(a, a)
+ a = np.zeros((1, 0, 1), [('a', '<f8', (1, 1))])
+ assert_equal(a, a)
+
+ def test_structured_comparisons_with_promotion(self):
+ # Check that structured arrays can be compared so long as their
+ # dtypes promote fine:
+ a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
+ b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
+ assert_equal(a == b, [False, True])
+ assert_equal(a != b, [True, False])
+
+ a = np.array([(5, 42), (10, 1)], dtype=[('a', '>f8'), ('b', '<f8')])
+ b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>i8')])
+ assert_equal(a == b, [False, True])
+ assert_equal(a != b, [True, False])
+
+ # Including with embedded subarray dtype (although subarray comparison
+ # itself may still be a bit weird and compare the raw data)
+ a = np.array([(5, 42), (10, 1)], dtype=[('a', '10>f8'), ('b', '5<f8')])
+ b = np.array([(5, 43), (10, 1)], dtype=[('a', '10<i8'), ('b', '5>i8')])
+ assert_equal(a == b, [False, True])
+ assert_equal(a != b, [True, False])
+
+ def test_void_comparison_failures(self):
+ # In principle, one could decide to return an array of False for some
+ # if comparisons are impossible. But right now we return TypeError
+ # when "void" dtype are involved.
+ x = np.zeros(3, dtype=[('a', 'i1')])
+ y = np.zeros(3)
+ # Cannot compare non-structured to structured:
+ with pytest.raises(TypeError):
+ x == y
+
+ # Added title prevents promotion, but casts are OK:
+ y = np.zeros(3, dtype=[(('title', 'a'), 'i1')])
+ assert np.can_cast(y.dtype, x.dtype)
+ with pytest.raises(TypeError):
+ x == y
+
+ x = np.zeros(3, dtype="V7")
+ y = np.zeros(3, dtype="V8")
+ with pytest.raises(TypeError):
+ x == y
+
+ def test_casting(self):
+ # Check that casting a structured array to change its byte order
+ # works
+ a = np.array([(1,)], dtype=[('a', '<i4')])
+ assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
+ b = a.astype([('a', '>i4')])
+ assert_equal(b, a.byteswap().newbyteorder())
+ assert_equal(a['a'][0], b['a'][0])
+
+ # Check that equality comparison works on structured arrays if
+ # they are 'equiv'-castable
+ a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
+ b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
+ assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
+ assert_equal(a == b, [True, True])
+
+ # Check that 'equiv' casting can change byte order
+ assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
+ c = a.astype(b.dtype, casting='equiv')
+ assert_equal(a == c, [True, True])
+
+ # Check that 'safe' casting can change byte order and up-cast
+ # fields
+ t = [('a', '<i8'), ('b', '>f8')]
+ assert_(np.can_cast(a.dtype, t, casting='safe'))
+ c = a.astype(t, casting='safe')
+ assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
+ [True, True])
+
+ # Check that 'same_kind' casting can change byte order and
+ # change field widths within a "kind"
+ t = [('a', '<i4'), ('b', '>f4')]
+ assert_(np.can_cast(a.dtype, t, casting='same_kind'))
+ c = a.astype(t, casting='same_kind')
+ assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
+ [True, True])
+
+ # Check that casting fails if the casting rule should fail on
+ # any of the fields
+ t = [('a', '>i8'), ('b', '<f4')]
+ assert_(not np.can_cast(a.dtype, t, casting='safe'))
+ assert_raises(TypeError, a.astype, t, casting='safe')
+ t = [('a', '>i2'), ('b', '<f8')]
+ assert_(not np.can_cast(a.dtype, t, casting='equiv'))
+ assert_raises(TypeError, a.astype, t, casting='equiv')
+ t = [('a', '>i8'), ('b', '<i2')]
+ assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
+ assert_raises(TypeError, a.astype, t, casting='same_kind')
+ assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
+ assert_raises(TypeError, a.astype, b.dtype, casting='no')
+
+ # Check that non-'unsafe' casting can't change the set of field names
+ for casting in ['no', 'safe', 'equiv', 'same_kind']:
+ t = [('a', '>i4')]
+ assert_(not np.can_cast(a.dtype, t, casting=casting))
+ t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
+ assert_(not np.can_cast(a.dtype, t, casting=casting))
+
+ def test_objview(self):
+ # https://github.com/numpy/numpy/issues/3286
+ a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
+ a[['a', 'b']] # TypeError?
+
+ # https://github.com/numpy/numpy/issues/3253
+ dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
+ dat2[['B', 'A']] # TypeError?
+
+ def test_setfield(self):
+ # https://github.com/numpy/numpy/issues/3126
+ struct_dt = np.dtype([('elem', 'i4', 5),])
+ dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
+ x = np.zeros(1, dt)
+ x[0]['field'] = np.ones(10, dtype='i4')
+ x[0]['struct'] = np.ones(1, dtype=struct_dt)
+ assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
+
+ def test_setfield_object(self):
+ # make sure object field assignment with ndarray value
+ # on void scalar mimics setitem behavior
+ b = np.zeros(1, dtype=[('x', 'O')])
+ # next line should work identically to b['x'][0] = np.arange(3)
+ b[0]['x'] = np.arange(3)
+ assert_equal(b[0]['x'], np.arange(3))
+
+ # check that broadcasting check still works
+ c = np.zeros(1, dtype=[('x', 'O', 5)])
+
+ def testassign():
+ c[0]['x'] = np.arange(3)
+
+ assert_raises(ValueError, testassign)
+
+ def test_zero_width_string(self):
+ # Test for PR #6430 / issues #473, #4955, #2585
+
+ dt = np.dtype([('I', int), ('S', 'S0')])
+
+ x = np.zeros(4, dtype=dt)
+
+ assert_equal(x['S'], [b'', b'', b'', b''])
+ assert_equal(x['S'].itemsize, 0)
+
+ x['S'] = ['a', 'b', 'c', 'd']
+ assert_equal(x['S'], [b'', b'', b'', b''])
+ assert_equal(x['I'], [0, 0, 0, 0])
+
+ # Variation on test case from #4955
+ x['S'][x['I'] == 0] = 'hello'
+ assert_equal(x['S'], [b'', b'', b'', b''])
+ assert_equal(x['I'], [0, 0, 0, 0])
+
+ # Variation on test case from #2585
+ x['S'] = 'A'
+ assert_equal(x['S'], [b'', b'', b'', b''])
+ assert_equal(x['I'], [0, 0, 0, 0])
+
+ # Allow zero-width dtypes in ndarray constructor
+ y = np.ndarray(4, dtype=x['S'].dtype)
+ assert_equal(y.itemsize, 0)
+ assert_equal(x['S'], y)
+
+ # More tests for indexing an array with zero-width fields
+ assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
+ ('b', 'u1')])['a'].itemsize, 0)
+ assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
+ assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
+
+ xx = x['S'].reshape((2, 2))
+ assert_equal(xx.itemsize, 0)
+ assert_equal(xx, [[b'', b''], [b'', b'']])
+ # check for no uninitialized memory due to viewing S0 array
+ assert_equal(xx[:].dtype, xx.dtype)
+ assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)
+
+ b = io.BytesIO()
+ np.save(b, xx)
+
+ b.seek(0)
+ yy = np.load(b)
+ assert_equal(yy.itemsize, 0)
+ assert_equal(xx, yy)
+
+ with temppath(suffix='.npy') as tmp:
+ np.save(tmp, xx)
+ yy = np.load(tmp)
+ assert_equal(yy.itemsize, 0)
+ assert_equal(xx, yy)
+
+ def test_base_attr(self):
+ a = np.zeros(3, dtype='i4,f4')
+ b = a[0]
+ assert_(b.base is a)
+
+ def test_assignment(self):
+ def testassign(arr, v):
+ c = arr.copy()
+ c[0] = v # assign using setitem
+ c[1:] = v # assign using "dtype_transfer" code paths
+ return c
+
+ dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
+ arr = np.ones(2, dt)
+ v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
+ v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
+ v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
+ v4 = np.array([(2,)], dtype=[('bar', 'i8')])
+ v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
+ w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
+
+ ans = np.array([(2,3),(2,3)], dtype=dt)
+ assert_equal(testassign(arr, v1), ans)
+ assert_equal(testassign(arr, v2), ans)
+ assert_equal(testassign(arr, v3), ans)
+ assert_raises(TypeError, lambda: testassign(arr, v4))
+ assert_equal(testassign(arr, v5), ans)
+ w[:] = 4
+ assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
+
+ # test field-reordering, assignment by position, and self-assignment
+ a = np.array([(1,2,3)],
+ dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
+ a[['foo', 'bar']] = a[['bar', 'foo']]
+ assert_equal(a[0].item(), (2,1,3))
+
+ # test that this works even for 'simple_unaligned' structs
+ # (ie, that PyArray_EquivTypes cares about field order too)
+ a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
+ a[['a', 'b']] = a[['b', 'a']]
+ assert_equal(a[0].item(), (2,1))
+
+ def test_scalar_assignment(self):
+ with assert_raises(ValueError):
+ arr = np.arange(25).reshape(5, 5)
+ arr.itemset(3)
+
+ def test_structuredscalar_indexing(self):
+ # test gh-7262
+ x = np.empty(shape=1, dtype="(2)3S,(2)3U")
+ assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
+ assert_equal(x[0], x[0][()])
+
+ def test_multiindex_titles(self):
+ a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
+ assert_raises(KeyError, lambda : a[['a','c']])
+ assert_raises(KeyError, lambda : a[['a','a']])
+ assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated
+ a[['b','c']] # no exception
+
+ def test_structured_cast_promotion_fieldorder(self):
+ # gh-15494
+ # dtypes with different field names are not promotable
+ A = ("a", "<i8")
+ B = ("b", ">i8")
+ ab = np.array([(1, 2)], dtype=[A, B])
+ ba = np.array([(1, 2)], dtype=[B, A])
+ assert_raises(TypeError, np.concatenate, ab, ba)
+ assert_raises(TypeError, np.result_type, ab.dtype, ba.dtype)
+ assert_raises(TypeError, np.promote_types, ab.dtype, ba.dtype)
+
+ # dtypes with same field names/order but different memory offsets
+ # and byte-order are promotable to packed nbo.
+ assert_equal(np.promote_types(ab.dtype, ba[['a', 'b']].dtype),
+ repack_fields(ab.dtype.newbyteorder('N')))
+
+ # gh-13667
+ # dtypes with different fieldnames but castable field types are castable
+ assert_equal(np.can_cast(ab.dtype, ba.dtype), True)
+ assert_equal(ab.astype(ba.dtype).dtype, ba.dtype)
+ assert_equal(np.can_cast('f8,i8', [('f0', 'f8'), ('f1', 'i8')]), True)
+ assert_equal(np.can_cast('f8,i8', [('f1', 'f8'), ('f0', 'i8')]), True)
+ assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')]), False)
+ assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')],
+ casting='unsafe'), True)
+
+ ab[:] = ba # make sure assignment still works
+
+ # tests of type-promotion of corresponding fields
+ dt1 = np.dtype([("", "i4")])
+ dt2 = np.dtype([("", "i8")])
+ assert_equal(np.promote_types(dt1, dt2), np.dtype([('f0', 'i8')]))
+ assert_equal(np.promote_types(dt2, dt1), np.dtype([('f0', 'i8')]))
+ assert_raises(TypeError, np.promote_types, dt1, np.dtype([("", "V3")]))
+ assert_equal(np.promote_types('i4,f8', 'i8,f4'),
+ np.dtype([('f0', 'i8'), ('f1', 'f8')]))
+ # test nested case
+ dt1nest = np.dtype([("", dt1)])
+ dt2nest = np.dtype([("", dt2)])
+ assert_equal(np.promote_types(dt1nest, dt2nest),
+ np.dtype([('f0', np.dtype([('f0', 'i8')]))]))
+
+ # note that offsets are lost when promoting:
+ dt = np.dtype({'names': ['x'], 'formats': ['i4'], 'offsets': [8]})
+ a = np.ones(3, dtype=dt)
+ assert_equal(np.concatenate([a, a]).dtype, np.dtype([('x', 'i4')]))
+
+ @pytest.mark.parametrize("dtype_dict", [
+ dict(names=["a", "b"], formats=["i4", "f"], itemsize=100),
+ dict(names=["a", "b"], formats=["i4", "f"],
+ offsets=[0, 12])])
+ @pytest.mark.parametrize("align", [True, False])
+ def test_structured_promotion_packs(self, dtype_dict, align):
+ # Structured dtypes are packed when promoted (we consider the packed
+ # form to be "canonical"), so tere is no extra padding.
+ dtype = np.dtype(dtype_dict, align=align)
+ # Remove non "canonical" dtype options:
+ dtype_dict.pop("itemsize", None)
+ dtype_dict.pop("offsets", None)
+ expected = np.dtype(dtype_dict, align=align)
+
+ res = np.promote_types(dtype, dtype)
+ assert res.itemsize == expected.itemsize
+ assert res.fields == expected.fields
+
+ # But the "expected" one, should just be returned unchanged:
+ res = np.promote_types(expected, expected)
+ assert res is expected
+
+ def test_structured_asarray_is_view(self):
+ # A scalar viewing an array preserves its view even when creating a
+ # new array. This test documents behaviour, it may not be the best
+ # desired behaviour.
+ arr = np.array([1], dtype="i,i")
+ scalar = arr[0]
+ assert not scalar.flags.owndata # view into the array
+ assert np.asarray(scalar).base is scalar
+ # But never when a dtype is passed in:
+ assert np.asarray(scalar, dtype=scalar.dtype).base is None
+ # A scalar which owns its data does not have this property.
+ # It is not easy to create one, one method is to use pickle:
+ scalar = pickle.loads(pickle.dumps(scalar))
+ assert scalar.flags.owndata
+ assert np.asarray(scalar).base is None
+
+class TestBool:
+ def test_test_interning(self):
+ a0 = np.bool_(0)
+ b0 = np.bool_(False)
+ assert_(a0 is b0)
+ a1 = np.bool_(1)
+ b1 = np.bool_(True)
+ assert_(a1 is b1)
+ assert_(np.array([True])[0] is a1)
+ assert_(np.array(True)[()] is a1)
+
+ def test_sum(self):
+ d = np.ones(101, dtype=bool)
+ assert_equal(d.sum(), d.size)
+ assert_equal(d[::2].sum(), d[::2].size)
+ assert_equal(d[::-2].sum(), d[::-2].size)
+
+ d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
+ assert_equal(d.sum(), d.size)
+ assert_equal(d[::2].sum(), d[::2].size)
+ assert_equal(d[::-2].sum(), d[::-2].size)
+
+ def check_count_nonzero(self, power, length):
+ powers = [2 ** i for i in range(length)]
+ for i in range(2**power):
+ l = [(i & x) != 0 for x in powers]
+ a = np.array(l, dtype=bool)
+ c = builtins.sum(l)
+ assert_equal(np.count_nonzero(a), c)
+ av = a.view(np.uint8)
+ av *= 3
+ assert_equal(np.count_nonzero(a), c)
+ av *= 4
+ assert_equal(np.count_nonzero(a), c)
+ av[av != 0] = 0xFF
+ assert_equal(np.count_nonzero(a), c)
+
+ def test_count_nonzero(self):
+ # check all 12 bit combinations in a length 17 array
+ # covers most cases of the 16 byte unrolled code
+ self.check_count_nonzero(12, 17)
+
+ @pytest.mark.slow
+ def test_count_nonzero_all(self):
+ # check all combinations in a length 17 array
+ # covers all cases of the 16 byte unrolled code
+ self.check_count_nonzero(17, 17)
+
+ def test_count_nonzero_unaligned(self):
+ # prevent mistakes as e.g. gh-4060
+ for o in range(7):
+ a = np.zeros((18,), dtype=bool)[o+1:]
+ a[:o] = True
+ assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
+ a = np.ones((18,), dtype=bool)[o+1:]
+ a[:o] = False
+ assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
+
+ def _test_cast_from_flexible(self, dtype):
+ # empty string -> false
+ for n in range(3):
+ v = np.array(b'', (dtype, n))
+ assert_equal(bool(v), False)
+ assert_equal(bool(v[()]), False)
+ assert_equal(v.astype(bool), False)
+ assert_(isinstance(v.astype(bool), np.ndarray))
+ assert_(v[()].astype(bool) is np.False_)
+
+ # anything else -> true
+ for n in range(1, 4):
+ for val in [b'a', b'0', b' ']:
+ v = np.array(val, (dtype, n))
+ assert_equal(bool(v), True)
+ assert_equal(bool(v[()]), True)
+ assert_equal(v.astype(bool), True)
+ assert_(isinstance(v.astype(bool), np.ndarray))
+ assert_(v[()].astype(bool) is np.True_)
+
+ def test_cast_from_void(self):
+ self._test_cast_from_flexible(np.void)
+
+ @pytest.mark.xfail(reason="See gh-9847")
+ def test_cast_from_unicode(self):
+ self._test_cast_from_flexible(np.unicode_)
+
+ @pytest.mark.xfail(reason="See gh-9847")
+ def test_cast_from_bytes(self):
+ self._test_cast_from_flexible(np.bytes_)
+
+
+class TestZeroSizeFlexible:
+ @staticmethod
+ def _zeros(shape, dtype=str):
+ dtype = np.dtype(dtype)
+ if dtype == np.void:
+ return np.zeros(shape, dtype=(dtype, 0))
+
+ # not constructable directly
+ dtype = np.dtype([('x', dtype, 0)])
+ return np.zeros(shape, dtype=dtype)['x']
+
+ def test_create(self):
+ zs = self._zeros(10, bytes)
+ assert_equal(zs.itemsize, 0)
+ zs = self._zeros(10, np.void)
+ assert_equal(zs.itemsize, 0)
+ zs = self._zeros(10, str)
+ assert_equal(zs.itemsize, 0)
+
+ def _test_sort_partition(self, name, kinds, **kwargs):
+ # Previously, these would all hang
+ for dt in [bytes, np.void, str]:
+ zs = self._zeros(10, dt)
+ sort_method = getattr(zs, name)
+ sort_func = getattr(np, name)
+ for kind in kinds:
+ sort_method(kind=kind, **kwargs)
+ sort_func(zs, kind=kind, **kwargs)
+
+ def test_sort(self):
+ self._test_sort_partition('sort', kinds='qhs')
+
+ def test_argsort(self):
+ self._test_sort_partition('argsort', kinds='qhs')
+
+ def test_partition(self):
+ self._test_sort_partition('partition', kinds=['introselect'], kth=2)
+
+ def test_argpartition(self):
+ self._test_sort_partition('argpartition', kinds=['introselect'], kth=2)
+
+ def test_resize(self):
+ # previously an error
+ for dt in [bytes, np.void, str]:
+ zs = self._zeros(10, dt)
+ zs.resize(25)
+ zs.resize((10, 10))
+
+ def test_view(self):
+ for dt in [bytes, np.void, str]:
+ zs = self._zeros(10, dt)
+
+ # viewing as itself should be allowed
+ assert_equal(zs.view(dt).dtype, np.dtype(dt))
+
+ # viewing as any non-empty type gives an empty result
+ assert_equal(zs.view((dt, 1)).shape, (0,))
+
+ def test_dumps(self):
+ zs = self._zeros(10, int)
+ assert_equal(zs, pickle.loads(zs.dumps()))
+
+ def test_pickle(self):
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ for dt in [bytes, np.void, str]:
+ zs = self._zeros(10, dt)
+ p = pickle.dumps(zs, protocol=proto)
+ zs2 = pickle.loads(p)
+
+ assert_equal(zs.dtype, zs2.dtype)
+
+ def test_pickle_empty(self):
+ """Checking if an empty array pickled and un-pickled will not cause a
+ segmentation fault"""
+ arr = np.array([]).reshape(999999, 0)
+ pk_dmp = pickle.dumps(arr)
+ pk_load = pickle.loads(pk_dmp)
+
+ assert pk_load.size == 0
+
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
+ reason="requires pickle protocol 5")
+ def test_pickle_with_buffercallback(self):
+ array = np.arange(10)
+ buffers = []
+ bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
+ protocol=5)
+ array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
+ # when using pickle protocol 5 with buffer callbacks,
+ # array_from_buffer is reconstructed from a buffer holding a view
+ # to the initial array's data, so modifying an element in array
+ # should modify it in array_from_buffer too.
+ array[0] = -1
+ assert array_from_buffer[0] == -1, array_from_buffer[0]
+
+
+class TestMethods:
+
+ sort_kinds = ['quicksort', 'heapsort', 'stable']
+
+ def test_all_where(self):
+ a = np.array([[True, False, True],
+ [False, False, False],
+ [True, True, True]])
+ wh_full = np.array([[True, False, True],
+ [False, False, False],
+ [True, False, True]])
+ wh_lower = np.array([[False],
+ [False],
+ [True]])
+ for _ax in [0, None]:
+ assert_equal(a.all(axis=_ax, where=wh_lower),
+ np.all(a[wh_lower[:,0],:], axis=_ax))
+ assert_equal(np.all(a, axis=_ax, where=wh_lower),
+ a[wh_lower[:,0],:].all(axis=_ax))
+
+ assert_equal(a.all(where=wh_full), True)
+ assert_equal(np.all(a, where=wh_full), True)
+ assert_equal(a.all(where=False), True)
+ assert_equal(np.all(a, where=False), True)
+
+ def test_any_where(self):
+ a = np.array([[True, False, True],
+ [False, False, False],
+ [True, True, True]])
+ wh_full = np.array([[False, True, False],
+ [True, True, True],
+ [False, False, False]])
+ wh_middle = np.array([[False],
+ [True],
+ [False]])
+ for _ax in [0, None]:
+ assert_equal(a.any(axis=_ax, where=wh_middle),
+ np.any(a[wh_middle[:,0],:], axis=_ax))
+ assert_equal(np.any(a, axis=_ax, where=wh_middle),
+ a[wh_middle[:,0],:].any(axis=_ax))
+ assert_equal(a.any(where=wh_full), False)
+ assert_equal(np.any(a, where=wh_full), False)
+ assert_equal(a.any(where=False), False)
+ assert_equal(np.any(a, where=False), False)
+
+ def test_compress(self):
+ tgt = [[5, 6, 7, 8, 9]]
+ arr = np.arange(10).reshape(2, 5)
+ out = arr.compress([0, 1], axis=0)
+ assert_equal(out, tgt)
+
+ tgt = [[1, 3], [6, 8]]
+ out = arr.compress([0, 1, 0, 1, 0], axis=1)
+ assert_equal(out, tgt)
+
+ tgt = [[1], [6]]
+ arr = np.arange(10).reshape(2, 5)
+ out = arr.compress([0, 1], axis=1)
+ assert_equal(out, tgt)
+
+ arr = np.arange(10).reshape(2, 5)
+ out = arr.compress([0, 1])
+ assert_equal(out, 1)
+
+ def test_choose(self):
+ x = 2*np.ones((3,), dtype=int)
+ y = 3*np.ones((3,), dtype=int)
+ x2 = 2*np.ones((2, 3), dtype=int)
+ y2 = 3*np.ones((2, 3), dtype=int)
+ ind = np.array([0, 0, 1])
+
+ A = ind.choose((x, y))
+ assert_equal(A, [2, 2, 3])
+
+ A = ind.choose((x2, y2))
+ assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+
+ A = ind.choose((x, y2))
+ assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+
+ oned = np.ones(1)
+ # gh-12031, caused SEGFAULT
+ assert_raises(TypeError, oned.choose,np.void(0), [oned])
+
+ out = np.array(0)
+ ret = np.choose(np.array(1), [10, 20, 30], out=out)
+ assert out is ret
+ assert_equal(out[()], 20)
+
+ # gh-6272 check overlap on out
+ x = np.arange(5)
+ y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap')
+ assert_equal(y, np.array([0, 1, 2]))
+
+ def test_prod(self):
+ ba = [1, 2, 10, 11, 6, 5, 4]
+ ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
+
+ for ctype in [np.int16, np.uint16, np.int32, np.uint32,
+ np.float32, np.float64, np.complex64, np.complex128]:
+ a = np.array(ba, ctype)
+ a2 = np.array(ba2, ctype)
+ if ctype in ['1', 'b']:
+ assert_raises(ArithmeticError, a.prod)
+ assert_raises(ArithmeticError, a2.prod, axis=1)
+ else:
+ assert_equal(a.prod(axis=0), 26400)
+ assert_array_equal(a2.prod(axis=0),
+ np.array([50, 36, 84, 180], ctype))
+ assert_array_equal(a2.prod(axis=-1),
+ np.array([24, 1890, 600], ctype))
+
+ def test_repeat(self):
+ m = np.array([1, 2, 3, 4, 5, 6])
+ m_rect = m.reshape((2, 3))
+
+ A = m.repeat([1, 3, 2, 1, 1, 2])
+ assert_equal(A, [1, 2, 2, 2, 3,
+ 3, 4, 5, 6, 6])
+
+ A = m.repeat(2)
+ assert_equal(A, [1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6])
+
+ A = m_rect.repeat([2, 1], axis=0)
+ assert_equal(A, [[1, 2, 3],
+ [1, 2, 3],
+ [4, 5, 6]])
+
+ A = m_rect.repeat([1, 3, 2], axis=1)
+ assert_equal(A, [[1, 2, 2, 2, 3, 3],
+ [4, 5, 5, 5, 6, 6]])
+
+ A = m_rect.repeat(2, axis=0)
+ assert_equal(A, [[1, 2, 3],
+ [1, 2, 3],
+ [4, 5, 6],
+ [4, 5, 6]])
+
+ A = m_rect.repeat(2, axis=1)
+ assert_equal(A, [[1, 1, 2, 2, 3, 3],
+ [4, 4, 5, 5, 6, 6]])
+
+ def test_reshape(self):
+ arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
+
+ tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
+ assert_equal(arr.reshape(2, 6), tgt)
+
+ tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
+ assert_equal(arr.reshape(3, 4), tgt)
+
+ tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
+ assert_equal(arr.reshape((3, 4), order='F'), tgt)
+
+ tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
+ assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
+
+ def test_round(self):
+ def check_round(arr, expected, *round_args):
+ assert_equal(arr.round(*round_args), expected)
+ # With output array
+ out = np.zeros_like(arr)
+ res = arr.round(*round_args, out=out)
+ assert_equal(out, expected)
+ assert out is res
+
+ check_round(np.array([1.2, 1.5]), [1, 2])
+ check_round(np.array(1.5), 2)
+ check_round(np.array([12.2, 15.5]), [10, 20], -1)
+ check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
+ # Complex rounding
+ check_round(np.array([4.5 + 1.5j]), [4 + 2j])
+ check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
+
+ def test_squeeze(self):
+ a = np.array([[[1], [2], [3]]])
+ assert_equal(a.squeeze(), [1, 2, 3])
+ assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
+ assert_raises(ValueError, a.squeeze, axis=(1,))
+ assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
+
+ def test_transpose(self):
+ a = np.array([[1, 2], [3, 4]])
+ assert_equal(a.transpose(), [[1, 3], [2, 4]])
+ assert_raises(ValueError, lambda: a.transpose(0))
+ assert_raises(ValueError, lambda: a.transpose(0, 0))
+ assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
+
+ def test_sort(self):
+ # test ordering for floats and complex containing nans. It is only
+ # necessary to check the less-than comparison, so sorts that
+ # only follow the insertion sort path are sufficient. We only
+ # test doubles and complex doubles as the logic is the same.
+
+ # check doubles
+ msg = "Test real sort order with nans"
+ a = np.array([np.nan, 1, 0])
+ b = np.sort(a)
+ assert_equal(b, a[::-1], msg)
+ # check complex
+ msg = "Test complex sort order with nans"
+ a = np.zeros(9, dtype=np.complex128)
+ a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
+ a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
+ b = np.sort(a)
+ assert_equal(b, a[::-1], msg)
+
+ # all c scalar sorts use the same code with different types
+ # so it suffices to run a quick check with one type. The number
+ # of sorted items must be greater than ~50 to check the actual
+ # algorithm because quick and merge sort fall over to insertion
+ # sort for small arrays.
+
+ @pytest.mark.parametrize('dtype', [np.uint8, np.uint16, np.uint32, np.uint64,
+ np.float16, np.float32, np.float64,
+ np.longdouble])
+ def test_sort_unsigned(self, dtype):
+ a = np.arange(101, dtype=dtype)
+ b = a[::-1].copy()
+ for kind in self.sort_kinds:
+ msg = "scalar sort, kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ @pytest.mark.parametrize('dtype',
+ [np.int8, np.int16, np.int32, np.int64, np.float16,
+ np.float32, np.float64, np.longdouble])
+ def test_sort_signed(self, dtype):
+ a = np.arange(-50, 51, dtype=dtype)
+ b = a[::-1].copy()
+ for kind in self.sort_kinds:
+ msg = "scalar sort, kind=%s" % (kind)
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ @pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble])
+ @pytest.mark.parametrize('part', ['real', 'imag'])
+ def test_sort_complex(self, part, dtype):
+ # test complex sorts. These use the same code as the scalars
+ # but the compare function differs.
+ cdtype = {
+ np.single: np.csingle,
+ np.double: np.cdouble,
+ np.longdouble: np.clongdouble,
+ }[dtype]
+ a = np.arange(-50, 51, dtype=dtype)
+ b = a[::-1].copy()
+ ai = (a * (1+1j)).astype(cdtype)
+ bi = (b * (1+1j)).astype(cdtype)
+ setattr(ai, part, 1)
+ setattr(bi, part, 1)
+ for kind in self.sort_kinds:
+ msg = "complex sort, %s part == 1, kind=%s" % (part, kind)
+ c = ai.copy()
+ c.sort(kind=kind)
+ assert_equal(c, ai, msg)
+ c = bi.copy()
+ c.sort(kind=kind)
+ assert_equal(c, ai, msg)
+
+ def test_sort_complex_byte_swapping(self):
+ # test sorting of complex arrays requiring byte-swapping, gh-5441
+ for endianness in '<>':
+ for dt in np.typecodes['Complex']:
+ arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
+ c = arr.copy()
+ c.sort()
+ msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
+ assert_equal(c, arr, msg)
+
+ @pytest.mark.parametrize('dtype', [np.bytes_, np.unicode_])
+ def test_sort_string(self, dtype):
+ # np.array will perform the encoding to bytes for us in the bytes test
+ a = np.array(['aaaaaaaa' + chr(i) for i in range(101)], dtype=dtype)
+ b = a[::-1].copy()
+ for kind in self.sort_kinds:
+ msg = "kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ def test_sort_object(self):
+ # test object array sorts.
+ a = np.empty((101,), dtype=object)
+ a[:] = list(range(101))
+ b = a[::-1]
+ for kind in ['q', 'h', 'm']:
+ msg = "kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ def test_sort_structured(self):
+ # test record array sorts.
+ dt = np.dtype([('f', float), ('i', int)])
+ a = np.array([(i, i) for i in range(101)], dtype=dt)
+ b = a[::-1]
+ for kind in ['q', 'h', 'm']:
+ msg = "kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ @pytest.mark.parametrize('dtype', ['datetime64[D]', 'timedelta64[D]'])
+ def test_sort_time(self, dtype):
+ # test datetime64 and timedelta64 sorts.
+ a = np.arange(0, 101, dtype=dtype)
+ b = a[::-1]
+ for kind in ['q', 'h', 'm']:
+ msg = "kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ def test_sort_axis(self):
+ # check axis handling. This should be the same for all type
+ # specific sorts, so we only check it for one type and one kind
+ a = np.array([[3, 2], [1, 0]])
+ b = np.array([[1, 0], [3, 2]])
+ c = np.array([[2, 3], [0, 1]])
+ d = a.copy()
+ d.sort(axis=0)
+ assert_equal(d, b, "test sort with axis=0")
+ d = a.copy()
+ d.sort(axis=1)
+ assert_equal(d, c, "test sort with axis=1")
+ d = a.copy()
+ d.sort()
+ assert_equal(d, c, "test sort with default axis")
+
+ def test_sort_size_0(self):
+ # check axis handling for multidimensional empty arrays
+ a = np.array([])
+ a.shape = (3, 2, 1, 0)
+ for axis in range(-a.ndim, a.ndim):
+ msg = 'test empty array sort with axis={0}'.format(axis)
+ assert_equal(np.sort(a, axis=axis), a, msg)
+ msg = 'test empty array sort with axis=None'
+ assert_equal(np.sort(a, axis=None), a.ravel(), msg)
+
+ def test_sort_bad_ordering(self):
+ # test generic class with bogus ordering,
+ # should not segfault.
+ class Boom:
+ def __lt__(self, other):
+ return True
+
+ a = np.array([Boom()] * 100, dtype=object)
+ for kind in self.sort_kinds:
+ msg = "kind=%s" % kind
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ def test_void_sort(self):
+ # gh-8210 - previously segfaulted
+ for i in range(4):
+ rand = np.random.randint(256, size=4000, dtype=np.uint8)
+ arr = rand.view('V4')
+ arr[::-1].sort()
+
+ dt = np.dtype([('val', 'i4', (1,))])
+ for i in range(4):
+ rand = np.random.randint(256, size=4000, dtype=np.uint8)
+ arr = rand.view(dt)
+ arr[::-1].sort()
+
+ def test_sort_raises(self):
+ #gh-9404
+ arr = np.array([0, datetime.now(), 1], dtype=object)
+ for kind in self.sort_kinds:
+ assert_raises(TypeError, arr.sort, kind=kind)
+ #gh-3879
+ class Raiser:
+ def raises_anything(*args, **kwargs):
+ raise TypeError("SOMETHING ERRORED")
+ __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
+ arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
+ np.random.shuffle(arr)
+ for kind in self.sort_kinds:
+ assert_raises(TypeError, arr.sort, kind=kind)
+
+ def test_sort_degraded(self):
+ # test degraded dataset would take minutes to run with normal qsort
+ d = np.arange(1000000)
+ do = d.copy()
+ x = d
+ # create a median of 3 killer where each median is the sorted second
+ # last element of the quicksort partition
+ while x.size > 3:
+ mid = x.size // 2
+ x[mid], x[-2] = x[-2], x[mid]
+ x = x[:-2]
+
+ assert_equal(np.sort(d), do)
+ assert_equal(d[np.argsort(d)], do)
+
+ def test_copy(self):
+ def assert_fortran(arr):
+ assert_(arr.flags.fortran)
+ assert_(arr.flags.f_contiguous)
+ assert_(not arr.flags.c_contiguous)
+
+ def assert_c(arr):
+ assert_(not arr.flags.fortran)
+ assert_(not arr.flags.f_contiguous)
+ assert_(arr.flags.c_contiguous)
+
+ a = np.empty((2, 2), order='F')
+ # Test copying a Fortran array
+ assert_c(a.copy())
+ assert_c(a.copy('C'))
+ assert_fortran(a.copy('F'))
+ assert_fortran(a.copy('A'))
+
+ # Now test starting with a C array.
+ a = np.empty((2, 2), order='C')
+ assert_c(a.copy())
+ assert_c(a.copy('C'))
+ assert_fortran(a.copy('F'))
+ assert_c(a.copy('A'))
+
+ @pytest.mark.parametrize("dtype", ['O', np.int32, 'i,O'])
+ def test__deepcopy__(self, dtype):
+ # Force the entry of NULLs into array
+ a = np.empty(4, dtype=dtype)
+ ctypes.memset(a.ctypes.data, 0, a.nbytes)
+
+ # Ensure no error is raised, see gh-21833
+ b = a.__deepcopy__({})
+
+ a[0] = 42
+ with pytest.raises(AssertionError):
+ assert_array_equal(a, b)
+
+ def test__deepcopy__catches_failure(self):
+ class MyObj:
+ def __deepcopy__(self, *args, **kwargs):
+ raise RuntimeError
+
+ arr = np.array([1, MyObj(), 3], dtype='O')
+ with pytest.raises(RuntimeError):
+ arr.__deepcopy__({})
+
+ def test_sort_order(self):
+ # Test sorting an array with fields
+ x1 = np.array([21, 32, 14])
+ x2 = np.array(['my', 'first', 'name'])
+ x3 = np.array([3.1, 4.5, 6.2])
+ r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
+
+ r.sort(order=['id'])
+ assert_equal(r.id, np.array([14, 21, 32]))
+ assert_equal(r.word, np.array(['name', 'my', 'first']))
+ assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
+
+ r.sort(order=['word'])
+ assert_equal(r.id, np.array([32, 21, 14]))
+ assert_equal(r.word, np.array(['first', 'my', 'name']))
+ assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
+
+ r.sort(order=['number'])
+ assert_equal(r.id, np.array([21, 32, 14]))
+ assert_equal(r.word, np.array(['my', 'first', 'name']))
+ assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
+
+ assert_raises_regex(ValueError, 'duplicate',
+ lambda: r.sort(order=['id', 'id']))
+
+ if sys.byteorder == 'little':
+ strtype = '>i2'
+ else:
+ strtype = '<i2'
+ mydtype = [('name', 'U5'), ('col2', strtype)]
+ r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
+ dtype=mydtype)
+ r.sort(order='col2')
+ assert_equal(r['col2'], [1, 3, 255, 258])
+ assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
+ dtype=mydtype))
+
+ def test_argsort(self):
+ # all c scalar argsorts use the same code with different types
+ # so it suffices to run a quick check with one type. The number
+ # of sorted items must be greater than ~50 to check the actual
+ # algorithm because quick and merge sort fall over to insertion
+ # sort for small arrays.
+
+ for dtype in [np.int32, np.uint32, np.float32]:
+ a = np.arange(101, dtype=dtype)
+ b = a[::-1].copy()
+ for kind in self.sort_kinds:
+ msg = "scalar argsort, kind=%s, dtype=%s" % (kind, dtype)
+ assert_equal(a.copy().argsort(kind=kind), a, msg)
+ assert_equal(b.copy().argsort(kind=kind), b, msg)
+
+ # test complex argsorts. These use the same code as the scalars
+ # but the compare function differs.
+ ai = a*1j + 1
+ bi = b*1j + 1
+ for kind in self.sort_kinds:
+ msg = "complex argsort, kind=%s" % kind
+ assert_equal(ai.copy().argsort(kind=kind), a, msg)
+ assert_equal(bi.copy().argsort(kind=kind), b, msg)
+ ai = a + 1j
+ bi = b + 1j
+ for kind in self.sort_kinds:
+ msg = "complex argsort, kind=%s" % kind
+ assert_equal(ai.copy().argsort(kind=kind), a, msg)
+ assert_equal(bi.copy().argsort(kind=kind), b, msg)
+
+ # test argsort of complex arrays requiring byte-swapping, gh-5441
+ for endianness in '<>':
+ for dt in np.typecodes['Complex']:
+ arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianness + dt)
+ msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
+ assert_equal(arr.argsort(),
+ np.arange(len(arr), dtype=np.intp), msg)
+
+ # test string argsorts.
+ s = 'aaaaaaaa'
+ a = np.array([s + chr(i) for i in range(101)])
+ b = a[::-1].copy()
+ r = np.arange(101)
+ rr = r[::-1]
+ for kind in self.sort_kinds:
+ msg = "string argsort, kind=%s" % kind
+ assert_equal(a.copy().argsort(kind=kind), r, msg)
+ assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+ # test unicode argsorts.
+ s = 'aaaaaaaa'
+ a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_)
+ b = a[::-1]
+ r = np.arange(101)
+ rr = r[::-1]
+ for kind in self.sort_kinds:
+ msg = "unicode argsort, kind=%s" % kind
+ assert_equal(a.copy().argsort(kind=kind), r, msg)
+ assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+ # test object array argsorts.
+ a = np.empty((101,), dtype=object)
+ a[:] = list(range(101))
+ b = a[::-1]
+ r = np.arange(101)
+ rr = r[::-1]
+ for kind in self.sort_kinds:
+ msg = "object argsort, kind=%s" % kind
+ assert_equal(a.copy().argsort(kind=kind), r, msg)
+ assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+ # test structured array argsorts.
+ dt = np.dtype([('f', float), ('i', int)])
+ a = np.array([(i, i) for i in range(101)], dtype=dt)
+ b = a[::-1]
+ r = np.arange(101)
+ rr = r[::-1]
+ for kind in self.sort_kinds:
+ msg = "structured array argsort, kind=%s" % kind
+ assert_equal(a.copy().argsort(kind=kind), r, msg)
+ assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+ # test datetime64 argsorts.
+ a = np.arange(0, 101, dtype='datetime64[D]')
+ b = a[::-1]
+ r = np.arange(101)
+ rr = r[::-1]
+ for kind in ['q', 'h', 'm']:
+ msg = "datetime64 argsort, kind=%s" % kind
+ assert_equal(a.copy().argsort(kind=kind), r, msg)
+ assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+ # test timedelta64 argsorts.
+ a = np.arange(0, 101, dtype='timedelta64[D]')
+ b = a[::-1]
+ r = np.arange(101)
+ rr = r[::-1]
+ for kind in ['q', 'h', 'm']:
+ msg = "timedelta64 argsort, kind=%s" % kind
+ assert_equal(a.copy().argsort(kind=kind), r, msg)
+ assert_equal(b.copy().argsort(kind=kind), rr, msg)
+
+ # check axis handling. This should be the same for all type
+ # specific argsorts, so we only check it for one type and one kind
+ a = np.array([[3, 2], [1, 0]])
+ b = np.array([[1, 1], [0, 0]])
+ c = np.array([[1, 0], [1, 0]])
+ assert_equal(a.copy().argsort(axis=0), b)
+ assert_equal(a.copy().argsort(axis=1), c)
+ assert_equal(a.copy().argsort(), c)
+
+ # check axis handling for multidimensional empty arrays
+ a = np.array([])
+ a.shape = (3, 2, 1, 0)
+ for axis in range(-a.ndim, a.ndim):
+ msg = 'test empty array argsort with axis={0}'.format(axis)
+ assert_equal(np.argsort(a, axis=axis),
+ np.zeros_like(a, dtype=np.intp), msg)
+ msg = 'test empty array argsort with axis=None'
+ assert_equal(np.argsort(a, axis=None),
+ np.zeros_like(a.ravel(), dtype=np.intp), msg)
+
+ # check that stable argsorts are stable
+ r = np.arange(100)
+ # scalars
+ a = np.zeros(100)
+ assert_equal(a.argsort(kind='m'), r)
+ # complex
+ a = np.zeros(100, dtype=complex)
+ assert_equal(a.argsort(kind='m'), r)
+ # string
+ a = np.array(['aaaaaaaaa' for i in range(100)])
+ assert_equal(a.argsort(kind='m'), r)
+ # unicode
+ a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode_)
+ assert_equal(a.argsort(kind='m'), r)
+
+ def test_sort_unicode_kind(self):
+ d = np.arange(10)
+ k = b'\xc3\xa4'.decode("UTF8")
+ assert_raises(ValueError, d.sort, kind=k)
+ assert_raises(ValueError, d.argsort, kind=k)
+
+ @pytest.mark.parametrize('a', [
+ np.array([0, 1, np.nan], dtype=np.float16),
+ np.array([0, 1, np.nan], dtype=np.float32),
+ np.array([0, 1, np.nan]),
+ ])
+ def test_searchsorted_floats(self, a):
+ # test for floats arrays containing nans. Explicitly test
+ # half, single, and double precision floats to verify that
+ # the NaN-handling is correct.
+ msg = "Test real (%s) searchsorted with nans, side='l'" % a.dtype
+ b = a.searchsorted(a, side='left')
+ assert_equal(b, np.arange(3), msg)
+ msg = "Test real (%s) searchsorted with nans, side='r'" % a.dtype
+ b = a.searchsorted(a, side='right')
+ assert_equal(b, np.arange(1, 4), msg)
+ # check keyword arguments
+ a.searchsorted(v=1)
+ x = np.array([0, 1, np.nan], dtype='float32')
+ y = np.searchsorted(x, x[-1])
+ assert_equal(y, 2)
+
+ def test_searchsorted_complex(self):
+ # test for complex arrays containing nans.
+ # The search sorted routines use the compare functions for the
+ # array type, so this checks if that is consistent with the sort
+ # order.
+ # check double complex
+ a = np.zeros(9, dtype=np.complex128)
+ a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
+ a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
+ msg = "Test complex searchsorted with nans, side='l'"
+ b = a.searchsorted(a, side='left')
+ assert_equal(b, np.arange(9), msg)
+ msg = "Test complex searchsorted with nans, side='r'"
+ b = a.searchsorted(a, side='right')
+ assert_equal(b, np.arange(1, 10), msg)
+ msg = "Test searchsorted with little endian, side='l'"
+ a = np.array([0, 128], dtype='<i4')
+ b = a.searchsorted(np.array(128, dtype='<i4'))
+ assert_equal(b, 1, msg)
+ msg = "Test searchsorted with big endian, side='l'"
+ a = np.array([0, 128], dtype='>i4')
+ b = a.searchsorted(np.array(128, dtype='>i4'))
+ assert_equal(b, 1, msg)
+
+ def test_searchsorted_n_elements(self):
+ # Check 0 elements
+ a = np.ones(0)
+ b = a.searchsorted([0, 1, 2], 'left')
+ assert_equal(b, [0, 0, 0])
+ b = a.searchsorted([0, 1, 2], 'right')
+ assert_equal(b, [0, 0, 0])
+ a = np.ones(1)
+ # Check 1 element
+ b = a.searchsorted([0, 1, 2], 'left')
+ assert_equal(b, [0, 0, 1])
+ b = a.searchsorted([0, 1, 2], 'right')
+ assert_equal(b, [0, 1, 1])
+ # Check all elements equal
+ a = np.ones(2)
+ b = a.searchsorted([0, 1, 2], 'left')
+ assert_equal(b, [0, 0, 2])
+ b = a.searchsorted([0, 1, 2], 'right')
+ assert_equal(b, [0, 2, 2])
+
+ def test_searchsorted_unaligned_array(self):
+ # Test searching unaligned array
+ a = np.arange(10)
+ aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
+ unaligned = aligned[1:].view(a.dtype)
+ unaligned[:] = a
+ # Test searching unaligned array
+ b = unaligned.searchsorted(a, 'left')
+ assert_equal(b, a)
+ b = unaligned.searchsorted(a, 'right')
+ assert_equal(b, a + 1)
+ # Test searching for unaligned keys
+ b = a.searchsorted(unaligned, 'left')
+ assert_equal(b, a)
+ b = a.searchsorted(unaligned, 'right')
+ assert_equal(b, a + 1)
+
+ def test_searchsorted_resetting(self):
+ # Test smart resetting of binsearch indices
+ a = np.arange(5)
+ b = a.searchsorted([6, 5, 4], 'left')
+ assert_equal(b, [5, 5, 4])
+ b = a.searchsorted([6, 5, 4], 'right')
+ assert_equal(b, [5, 5, 5])
+
+ def test_searchsorted_type_specific(self):
+ # Test all type specific binary search functions
+ types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
+ np.typecodes['Datetime'], '?O'))
+ for dt in types:
+ if dt == 'M':
+ dt = 'M8[D]'
+ if dt == '?':
+ a = np.arange(2, dtype=dt)
+ out = np.arange(2)
+ else:
+ a = np.arange(0, 5, dtype=dt)
+ out = np.arange(5)
+ b = a.searchsorted(a, 'left')
+ assert_equal(b, out)
+ b = a.searchsorted(a, 'right')
+ assert_equal(b, out + 1)
+ # Test empty array, use a fresh array to get warnings in
+ # valgrind if access happens.
+ e = np.ndarray(shape=0, buffer=b'', dtype=dt)
+ b = e.searchsorted(a, 'left')
+ assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
+ b = a.searchsorted(e, 'left')
+ assert_array_equal(b, np.zeros(0, dtype=np.intp))
+
+ def test_searchsorted_unicode(self):
+ # Test searchsorted on unicode strings.
+
+ # 1.6.1 contained a string length miscalculation in
+ # arraytypes.c.src:UNICODE_compare() which manifested as
+ # incorrect/inconsistent results from searchsorted.
+ a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
+ 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
+ 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
+ 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
+ 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
+ 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
+ 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
+ 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
+ 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
+ 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
+ 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
+ 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
+ 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
+ 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
+ dtype=np.unicode_)
+ ind = np.arange(len(a))
+ assert_equal([a.searchsorted(v, 'left') for v in a], ind)
+ assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
+ assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
+ assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
+
+ def test_searchsorted_with_invalid_sorter(self):
+ a = np.array([5, 2, 1, 3, 4])
+ s = np.argsort(a)
+ assert_raises(TypeError, np.searchsorted, a, 0,
+ sorter=np.array((1, (2, 3)), dtype=object))
+ assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
+ assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
+ assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
+
+ # bounds check
+ assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
+ assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
+ assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
+
+ def test_searchsorted_with_sorter(self):
+ a = np.random.rand(300)
+ s = a.argsort()
+ b = np.sort(a)
+ k = np.linspace(0, 1, 20)
+ assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
+
+ a = np.array([0, 1, 2, 3, 5]*20)
+ s = a.argsort()
+ k = [0, 1, 2, 3, 5]
+ expected = [0, 20, 40, 60, 80]
+ assert_equal(a.searchsorted(k, side='left', sorter=s), expected)
+ expected = [20, 40, 60, 80, 100]
+ assert_equal(a.searchsorted(k, side='right', sorter=s), expected)
+
+ # Test searching unaligned array
+ keys = np.arange(10)
+ a = keys.copy()
+ np.random.shuffle(s)
+ s = a.argsort()
+ aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
+ unaligned = aligned[1:].view(a.dtype)
+ # Test searching unaligned array
+ unaligned[:] = a
+ b = unaligned.searchsorted(keys, 'left', s)
+ assert_equal(b, keys)
+ b = unaligned.searchsorted(keys, 'right', s)
+ assert_equal(b, keys + 1)
+ # Test searching for unaligned keys
+ unaligned[:] = keys
+ b = a.searchsorted(unaligned, 'left', s)
+ assert_equal(b, keys)
+ b = a.searchsorted(unaligned, 'right', s)
+ assert_equal(b, keys + 1)
+
+ # Test all type specific indirect binary search functions
+ types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
+ np.typecodes['Datetime'], '?O'))
+ for dt in types:
+ if dt == 'M':
+ dt = 'M8[D]'
+ if dt == '?':
+ a = np.array([1, 0], dtype=dt)
+ # We want the sorter array to be of a type that is different
+ # from np.intp in all platforms, to check for #4698
+ s = np.array([1, 0], dtype=np.int16)
+ out = np.array([1, 0])
+ else:
+ a = np.array([3, 4, 1, 2, 0], dtype=dt)
+ # We want the sorter array to be of a type that is different
+ # from np.intp in all platforms, to check for #4698
+ s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
+ out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
+ b = a.searchsorted(a, 'left', s)
+ assert_equal(b, out)
+ b = a.searchsorted(a, 'right', s)
+ assert_equal(b, out + 1)
+ # Test empty array, use a fresh array to get warnings in
+ # valgrind if access happens.
+ e = np.ndarray(shape=0, buffer=b'', dtype=dt)
+ b = e.searchsorted(a, 'left', s[:0])
+ assert_array_equal(b, np.zeros(len(a), dtype=np.intp))
+ b = a.searchsorted(e, 'left', s)
+ assert_array_equal(b, np.zeros(0, dtype=np.intp))
+
+ # Test non-contiguous sorter array
+ a = np.array([3, 4, 1, 2, 0])
+ srt = np.empty((10,), dtype=np.intp)
+ srt[1::2] = -1
+ srt[::2] = [4, 2, 3, 0, 1]
+ s = srt[::2]
+ out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
+ b = a.searchsorted(a, 'left', s)
+ assert_equal(b, out)
+ b = a.searchsorted(a, 'right', s)
+ assert_equal(b, out + 1)
+
+ def test_searchsorted_return_type(self):
+ # Functions returning indices should always return base ndarrays
+ class A(np.ndarray):
+ pass
+ a = np.arange(5).view(A)
+ b = np.arange(1, 3).view(A)
+ s = np.arange(5).view(A)
+ assert_(not isinstance(a.searchsorted(b, 'left'), A))
+ assert_(not isinstance(a.searchsorted(b, 'right'), A))
+ assert_(not isinstance(a.searchsorted(b, 'left', s), A))
+ assert_(not isinstance(a.searchsorted(b, 'right', s), A))
+
+ @pytest.mark.parametrize("dtype", np.typecodes["All"])
+ def test_argpartition_out_of_range(self, dtype):
+ # Test out of range values in kth raise an error, gh-5469
+ d = np.arange(10).astype(dtype=dtype)
+ assert_raises(ValueError, d.argpartition, 10)
+ assert_raises(ValueError, d.argpartition, -11)
+
+ @pytest.mark.parametrize("dtype", np.typecodes["All"])
+ def test_partition_out_of_range(self, dtype):
+ # Test out of range values in kth raise an error, gh-5469
+ d = np.arange(10).astype(dtype=dtype)
+ assert_raises(ValueError, d.partition, 10)
+ assert_raises(ValueError, d.partition, -11)
+
+ def test_argpartition_integer(self):
+ # Test non-integer values in kth raise an error/
+ d = np.arange(10)
+ assert_raises(TypeError, d.argpartition, 9.)
+ # Test also for generic type argpartition, which uses sorting
+ # and used to not bound check kth
+ d_obj = np.arange(10, dtype=object)
+ assert_raises(TypeError, d_obj.argpartition, 9.)
+
+ def test_partition_integer(self):
+ # Test out of range values in kth raise an error, gh-5469
+ d = np.arange(10)
+ assert_raises(TypeError, d.partition, 9.)
+ # Test also for generic type partition, which uses sorting
+ # and used to not bound check kth
+ d_obj = np.arange(10, dtype=object)
+ assert_raises(TypeError, d_obj.partition, 9.)
+
+ @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
+ def test_partition_empty_array(self, kth_dtype):
+ # check axis handling for multidimensional empty arrays
+ kth = np.array(0, dtype=kth_dtype)[()]
+ a = np.array([])
+ a.shape = (3, 2, 1, 0)
+ for axis in range(-a.ndim, a.ndim):
+ msg = 'test empty array partition with axis={0}'.format(axis)
+ assert_equal(np.partition(a, kth, axis=axis), a, msg)
+ msg = 'test empty array partition with axis=None'
+ assert_equal(np.partition(a, kth, axis=None), a.ravel(), msg)
+
+ @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
+ def test_argpartition_empty_array(self, kth_dtype):
+ # check axis handling for multidimensional empty arrays
+ kth = np.array(0, dtype=kth_dtype)[()]
+ a = np.array([])
+ a.shape = (3, 2, 1, 0)
+ for axis in range(-a.ndim, a.ndim):
+ msg = 'test empty array argpartition with axis={0}'.format(axis)
+ assert_equal(np.partition(a, kth, axis=axis),
+ np.zeros_like(a, dtype=np.intp), msg)
+ msg = 'test empty array argpartition with axis=None'
+ assert_equal(np.partition(a, kth, axis=None),
+ np.zeros_like(a.ravel(), dtype=np.intp), msg)
+
+ def test_partition(self):
+ d = np.arange(10)
+ assert_raises(TypeError, np.partition, d, 2, kind=1)
+ assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
+ assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
+ assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
+ assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
+ for k in ("introselect",):
+ d = np.array([])
+ assert_array_equal(np.partition(d, 0, kind=k), d)
+ assert_array_equal(np.argpartition(d, 0, kind=k), d)
+ d = np.ones(1)
+ assert_array_equal(np.partition(d, 0, kind=k)[0], d)
+ assert_array_equal(d[np.argpartition(d, 0, kind=k)],
+ np.partition(d, 0, kind=k))
+
+ # kth not modified
+ kth = np.array([30, 15, 5])
+ okth = kth.copy()
+ np.partition(np.arange(40), kth)
+ assert_array_equal(kth, okth)
+
+ for r in ([2, 1], [1, 2], [1, 1]):
+ d = np.array(r)
+ tgt = np.sort(d)
+ assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
+ assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
+ assert_array_equal(d[np.argpartition(d, 0, kind=k)],
+ np.partition(d, 0, kind=k))
+ assert_array_equal(d[np.argpartition(d, 1, kind=k)],
+ np.partition(d, 1, kind=k))
+ for i in range(d.size):
+ d[i:].partition(0, kind=k)
+ assert_array_equal(d, tgt)
+
+ for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
+ [1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
+ d = np.array(r)
+ tgt = np.sort(d)
+ assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
+ assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
+ assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
+ assert_array_equal(d[np.argpartition(d, 0, kind=k)],
+ np.partition(d, 0, kind=k))
+ assert_array_equal(d[np.argpartition(d, 1, kind=k)],
+ np.partition(d, 1, kind=k))
+ assert_array_equal(d[np.argpartition(d, 2, kind=k)],
+ np.partition(d, 2, kind=k))
+ for i in range(d.size):
+ d[i:].partition(0, kind=k)
+ assert_array_equal(d, tgt)
+
+ d = np.ones(50)
+ assert_array_equal(np.partition(d, 0, kind=k), d)
+ assert_array_equal(d[np.argpartition(d, 0, kind=k)],
+ np.partition(d, 0, kind=k))
+
+ # sorted
+ d = np.arange(49)
+ assert_equal(np.partition(d, 5, kind=k)[5], 5)
+ assert_equal(np.partition(d, 15, kind=k)[15], 15)
+ assert_array_equal(d[np.argpartition(d, 5, kind=k)],
+ np.partition(d, 5, kind=k))
+ assert_array_equal(d[np.argpartition(d, 15, kind=k)],
+ np.partition(d, 15, kind=k))
+
+ # rsorted
+ d = np.arange(47)[::-1]
+ assert_equal(np.partition(d, 6, kind=k)[6], 6)
+ assert_equal(np.partition(d, 16, kind=k)[16], 16)
+ assert_array_equal(d[np.argpartition(d, 6, kind=k)],
+ np.partition(d, 6, kind=k))
+ assert_array_equal(d[np.argpartition(d, 16, kind=k)],
+ np.partition(d, 16, kind=k))
+
+ assert_array_equal(np.partition(d, -6, kind=k),
+ np.partition(d, 41, kind=k))
+ assert_array_equal(np.partition(d, -16, kind=k),
+ np.partition(d, 31, kind=k))
+ assert_array_equal(d[np.argpartition(d, -6, kind=k)],
+ np.partition(d, 41, kind=k))
+
+ # median of 3 killer, O(n^2) on pure median 3 pivot quickselect
+ # exercises the median of median of 5 code used to keep O(n)
+ d = np.arange(1000000)
+ x = np.roll(d, d.size // 2)
+ mid = x.size // 2 + 1
+ assert_equal(np.partition(x, mid)[mid], mid)
+ d = np.arange(1000001)
+ x = np.roll(d, d.size // 2 + 1)
+ mid = x.size // 2 + 1
+ assert_equal(np.partition(x, mid)[mid], mid)
+
+ # max
+ d = np.ones(10)
+ d[1] = 4
+ assert_equal(np.partition(d, (2, -1))[-1], 4)
+ assert_equal(np.partition(d, (2, -1))[2], 1)
+ assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
+ assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
+ d[1] = np.nan
+ assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
+ assert_(np.isnan(np.partition(d, (2, -1))[-1]))
+
+ # equal elements
+ d = np.arange(47) % 7
+ tgt = np.sort(np.arange(47) % 7)
+ np.random.shuffle(d)
+ for i in range(d.size):
+ assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
+ assert_array_equal(d[np.argpartition(d, 6, kind=k)],
+ np.partition(d, 6, kind=k))
+ assert_array_equal(d[np.argpartition(d, 16, kind=k)],
+ np.partition(d, 16, kind=k))
+ for i in range(d.size):
+ d[i:].partition(0, kind=k)
+ assert_array_equal(d, tgt)
+
+ d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 9])
+ kth = [0, 3, 19, 20]
+ assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
+ assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
+
+ d = np.array([2, 1])
+ d.partition(0, kind=k)
+ assert_raises(ValueError, d.partition, 2)
+ assert_raises(np.AxisError, d.partition, 3, axis=1)
+ assert_raises(ValueError, np.partition, d, 2)
+ assert_raises(np.AxisError, np.partition, d, 2, axis=1)
+ assert_raises(ValueError, d.argpartition, 2)
+ assert_raises(np.AxisError, d.argpartition, 3, axis=1)
+ assert_raises(ValueError, np.argpartition, d, 2)
+ assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)
+ d = np.arange(10).reshape((2, 5))
+ d.partition(1, axis=0, kind=k)
+ d.partition(4, axis=1, kind=k)
+ np.partition(d, 1, axis=0, kind=k)
+ np.partition(d, 4, axis=1, kind=k)
+ np.partition(d, 1, axis=None, kind=k)
+ np.partition(d, 9, axis=None, kind=k)
+ d.argpartition(1, axis=0, kind=k)
+ d.argpartition(4, axis=1, kind=k)
+ np.argpartition(d, 1, axis=0, kind=k)
+ np.argpartition(d, 4, axis=1, kind=k)
+ np.argpartition(d, 1, axis=None, kind=k)
+ np.argpartition(d, 9, axis=None, kind=k)
+ assert_raises(ValueError, d.partition, 2, axis=0)
+ assert_raises(ValueError, d.partition, 11, axis=1)
+ assert_raises(TypeError, d.partition, 2, axis=None)
+ assert_raises(ValueError, np.partition, d, 9, axis=1)
+ assert_raises(ValueError, np.partition, d, 11, axis=None)
+ assert_raises(ValueError, d.argpartition, 2, axis=0)
+ assert_raises(ValueError, d.argpartition, 11, axis=1)
+ assert_raises(ValueError, np.argpartition, d, 9, axis=1)
+ assert_raises(ValueError, np.argpartition, d, 11, axis=None)
+
+ td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
+ for s in (9, 16)]
+ for dt, s in td:
+ aae = assert_array_equal
+ at = assert_
+
+ d = np.arange(s, dtype=dt)
+ np.random.shuffle(d)
+ d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
+ map(np.random.shuffle, d1)
+ d0 = np.transpose(d1)
+ for i in range(d.size):
+ p = np.partition(d, i, kind=k)
+ assert_equal(p[i], i)
+ # all before are smaller
+ assert_array_less(p[:i], p[i])
+ # all after are larger
+ assert_array_less(p[i], p[i + 1:])
+ aae(p, d[np.argpartition(d, i, kind=k)])
+
+ p = np.partition(d1, i, axis=1, kind=k)
+ aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
+ # array_less does not seem to work right
+ at((p[:, :i].T <= p[:, i]).all(),
+ msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
+ at((p[:, i + 1:].T > p[:, i]).all(),
+ msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
+ aae(p, d1[np.arange(d1.shape[0])[:, None],
+ np.argpartition(d1, i, axis=1, kind=k)])
+
+ p = np.partition(d0, i, axis=0, kind=k)
+ aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
+ # array_less does not seem to work right
+ at((p[:i, :] <= p[i, :]).all(),
+ msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
+ at((p[i + 1:, :] > p[i, :]).all(),
+ msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
+ aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
+ np.arange(d0.shape[1])[None, :]])
+
+ # check inplace
+ dc = d.copy()
+ dc.partition(i, kind=k)
+ assert_equal(dc, np.partition(d, i, kind=k))
+ dc = d0.copy()
+ dc.partition(i, axis=0, kind=k)
+ assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
+ dc = d1.copy()
+ dc.partition(i, axis=1, kind=k)
+ assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
+
+ def assert_partitioned(self, d, kth):
+ prev = 0
+ for k in np.sort(kth):
+ assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
+ assert_((d[k:] >= d[k]).all(),
+ msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
+ prev = k + 1
+
+ def test_partition_iterative(self):
+ d = np.arange(17)
+ kth = (0, 1, 2, 429, 231)
+ assert_raises(ValueError, d.partition, kth)
+ assert_raises(ValueError, d.argpartition, kth)
+ d = np.arange(10).reshape((2, 5))
+ assert_raises(ValueError, d.partition, kth, axis=0)
+ assert_raises(ValueError, d.partition, kth, axis=1)
+ assert_raises(ValueError, np.partition, d, kth, axis=1)
+ assert_raises(ValueError, np.partition, d, kth, axis=None)
+
+ d = np.array([3, 4, 2, 1])
+ p = np.partition(d, (0, 3))
+ self.assert_partitioned(p, (0, 3))
+ self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
+
+ assert_array_equal(p, np.partition(d, (-3, -1)))
+ assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
+
+ d = np.arange(17)
+ np.random.shuffle(d)
+ d.partition(range(d.size))
+ assert_array_equal(np.arange(17), d)
+ np.random.shuffle(d)
+ assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
+
+ # test unsorted kth
+ d = np.arange(17)
+ np.random.shuffle(d)
+ keys = np.array([1, 3, 8, -2])
+ np.random.shuffle(d)
+ p = np.partition(d, keys)
+ self.assert_partitioned(p, keys)
+ p = d[np.argpartition(d, keys)]
+ self.assert_partitioned(p, keys)
+ np.random.shuffle(keys)
+ assert_array_equal(np.partition(d, keys), p)
+ assert_array_equal(d[np.argpartition(d, keys)], p)
+
+ # equal kth
+ d = np.arange(20)[::-1]
+ self.assert_partitioned(np.partition(d, [5]*4), [5])
+ self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
+ [5]*4 + [6, 13])
+ self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
+ self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
+ [5]*4 + [6, 13])
+
+ d = np.arange(12)
+ np.random.shuffle(d)
+ d1 = np.tile(np.arange(12), (4, 1))
+ map(np.random.shuffle, d1)
+ d0 = np.transpose(d1)
+
+ kth = (1, 6, 7, -1)
+ p = np.partition(d1, kth, axis=1)
+ pa = d1[np.arange(d1.shape[0])[:, None],
+ d1.argpartition(kth, axis=1)]
+ assert_array_equal(p, pa)
+ for i in range(d1.shape[0]):
+ self.assert_partitioned(p[i,:], kth)
+ p = np.partition(d0, kth, axis=0)
+ pa = d0[np.argpartition(d0, kth, axis=0),
+ np.arange(d0.shape[1])[None,:]]
+ assert_array_equal(p, pa)
+ for i in range(d0.shape[1]):
+ self.assert_partitioned(p[:, i], kth)
+
+ def test_partition_cdtype(self):
+ d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
+ ('Lancelot', 1.9, 38)],
+ dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
+
+ tgt = np.sort(d, order=['age', 'height'])
+ assert_array_equal(np.partition(d, range(d.size),
+ order=['age', 'height']),
+ tgt)
+ assert_array_equal(d[np.argpartition(d, range(d.size),
+ order=['age', 'height'])],
+ tgt)
+ for k in range(d.size):
+ assert_equal(np.partition(d, k, order=['age', 'height'])[k],
+ tgt[k])
+ assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
+ tgt[k])
+
+ d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
+ tgt = np.sort(d)
+ assert_array_equal(np.partition(d, range(d.size)), tgt)
+ for k in range(d.size):
+ assert_equal(np.partition(d, k)[k], tgt[k])
+ assert_equal(d[np.argpartition(d, k)][k], tgt[k])
+
+ def test_partition_unicode_kind(self):
+ d = np.arange(10)
+ k = b'\xc3\xa4'.decode("UTF8")
+ assert_raises(ValueError, d.partition, 2, kind=k)
+ assert_raises(ValueError, d.argpartition, 2, kind=k)
+
+ def test_partition_fuzz(self):
+ # a few rounds of random data testing
+ for j in range(10, 30):
+ for i in range(1, j - 2):
+ d = np.arange(j)
+ np.random.shuffle(d)
+ d = d % np.random.randint(2, 30)
+ idx = np.random.randint(d.size)
+ kth = [0, idx, i, i + 1]
+ tgt = np.sort(d)[kth]
+ assert_array_equal(np.partition(d, kth)[kth], tgt,
+ err_msg="data: %r\n kth: %r" % (d, kth))
+
+ @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
+ def test_argpartition_gh5524(self, kth_dtype):
+ # A test for functionality of argpartition on lists.
+ kth = np.array(1, dtype=kth_dtype)[()]
+ d = [6, 7, 3, 2, 9, 0]
+ p = np.argpartition(d, kth)
+ self.assert_partitioned(np.array(d)[p],[1])
+
+ def test_flatten(self):
+ x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
+ x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
+ y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
+ y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
+ y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
+ y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
+ assert_equal(x0.flatten(), y0)
+ assert_equal(x0.flatten('F'), y0f)
+ assert_equal(x0.flatten('F'), x0.T.flatten())
+ assert_equal(x1.flatten(), y1)
+ assert_equal(x1.flatten('F'), y1f)
+ assert_equal(x1.flatten('F'), x1.T.flatten())
+
+
+ @pytest.mark.parametrize('func', (np.dot, np.matmul))
+ def test_arr_mult(self, func):
+ a = np.array([[1, 0], [0, 1]])
+ b = np.array([[0, 1], [1, 0]])
+ c = np.array([[9, 1], [1, -9]])
+ d = np.arange(24).reshape(4, 6)
+ ddt = np.array(
+ [[ 55, 145, 235, 325],
+ [ 145, 451, 757, 1063],
+ [ 235, 757, 1279, 1801],
+ [ 325, 1063, 1801, 2539]]
+ )
+ dtd = np.array(
+ [[504, 540, 576, 612, 648, 684],
+ [540, 580, 620, 660, 700, 740],
+ [576, 620, 664, 708, 752, 796],
+ [612, 660, 708, 756, 804, 852],
+ [648, 700, 752, 804, 856, 908],
+ [684, 740, 796, 852, 908, 964]]
+ )
+
+
+ # gemm vs syrk optimizations
+ for et in [np.float32, np.float64, np.complex64, np.complex128]:
+ eaf = a.astype(et)
+ assert_equal(func(eaf, eaf), eaf)
+ assert_equal(func(eaf.T, eaf), eaf)
+ assert_equal(func(eaf, eaf.T), eaf)
+ assert_equal(func(eaf.T, eaf.T), eaf)
+ assert_equal(func(eaf.T.copy(), eaf), eaf)
+ assert_equal(func(eaf, eaf.T.copy()), eaf)
+ assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf)
+
+ # syrk validations
+ for et in [np.float32, np.float64, np.complex64, np.complex128]:
+ eaf = a.astype(et)
+ ebf = b.astype(et)
+ assert_equal(func(ebf, ebf), eaf)
+ assert_equal(func(ebf.T, ebf), eaf)
+ assert_equal(func(ebf, ebf.T), eaf)
+ assert_equal(func(ebf.T, ebf.T), eaf)
+
+ # syrk - different shape, stride, and view validations
+ for et in [np.float32, np.float64, np.complex64, np.complex128]:
+ edf = d.astype(et)
+ assert_equal(
+ func(edf[::-1, :], edf.T),
+ func(edf[::-1, :].copy(), edf.T.copy())
+ )
+ assert_equal(
+ func(edf[:, ::-1], edf.T),
+ func(edf[:, ::-1].copy(), edf.T.copy())
+ )
+ assert_equal(
+ func(edf, edf[::-1, :].T),
+ func(edf, edf[::-1, :].T.copy())
+ )
+ assert_equal(
+ func(edf, edf[:, ::-1].T),
+ func(edf, edf[:, ::-1].T.copy())
+ )
+ assert_equal(
+ func(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
+ func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
+ )
+ assert_equal(
+ func(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
+ func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
+ )
+
+ # syrk - different shape
+ for et in [np.float32, np.float64, np.complex64, np.complex128]:
+ edf = d.astype(et)
+ eddtf = ddt.astype(et)
+ edtdf = dtd.astype(et)
+ assert_equal(func(edf, edf.T), eddtf)
+ assert_equal(func(edf.T, edf), edtdf)
+
+ @pytest.mark.parametrize('func', (np.dot, np.matmul))
+ @pytest.mark.parametrize('dtype', 'ifdFD')
+ def test_no_dgemv(self, func, dtype):
+ # check vector arg for contiguous before gemv
+ # gh-12156
+ a = np.arange(8.0, dtype=dtype).reshape(2, 4)
+ b = np.broadcast_to(1., (4, 1))
+ ret1 = func(a, b)
+ ret2 = func(a, b.copy())
+ assert_equal(ret1, ret2)
+
+ ret1 = func(b.T, a.T)
+ ret2 = func(b.T.copy(), a.T)
+ assert_equal(ret1, ret2)
+
+ # check for unaligned data
+ dt = np.dtype(dtype)
+ a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype)
+ a = a.reshape(2, 4)
+ b = a[0]
+ # make sure it is not aligned
+ assert_(a.__array_interface__['data'][0] % dt.itemsize != 0)
+ ret1 = func(a, b)
+ ret2 = func(a.copy(), b.copy())
+ assert_equal(ret1, ret2)
+
+ ret1 = func(b.T, a.T)
+ ret2 = func(b.T.copy(), a.T.copy())
+ assert_equal(ret1, ret2)
+
+ def test_dot(self):
+ a = np.array([[1, 0], [0, 1]])
+ b = np.array([[0, 1], [1, 0]])
+ c = np.array([[9, 1], [1, -9]])
+ # function versus methods
+ assert_equal(np.dot(a, b), a.dot(b))
+ assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
+
+ # test passing in an output array
+ c = np.zeros_like(a)
+ a.dot(b, c)
+ assert_equal(c, np.dot(a, b))
+
+ # test keyword args
+ c = np.zeros_like(a)
+ a.dot(b=b, out=c)
+ assert_equal(c, np.dot(a, b))
+
+ def test_dot_type_mismatch(self):
+ c = 1.
+ A = np.array((1,1), dtype='i,i')
+
+ assert_raises(TypeError, np.dot, c, A)
+ assert_raises(TypeError, np.dot, A, c)
+
+ def test_dot_out_mem_overlap(self):
+ np.random.seed(1)
+
+ # Test BLAS and non-BLAS code paths, including all dtypes
+ # that dot() supports
+ dtypes = [np.dtype(code) for code in np.typecodes['All']
+ if code not in 'USVM']
+ for dtype in dtypes:
+ a = np.random.rand(3, 3).astype(dtype)
+
+ # Valid dot() output arrays must be aligned
+ b = _aligned_zeros((3, 3), dtype=dtype)
+ b[...] = np.random.rand(3, 3)
+
+ y = np.dot(a, b)
+ x = np.dot(a, b, out=b)
+ assert_equal(x, y, err_msg=repr(dtype))
+
+ # Check invalid output array
+ assert_raises(ValueError, np.dot, a, b, out=b[::2])
+ assert_raises(ValueError, np.dot, a, b, out=b.T)
+
+ def test_dot_matmul_out(self):
+ # gh-9641
+ class Sub(np.ndarray):
+ pass
+ a = np.ones((2, 2)).view(Sub)
+ b = np.ones((2, 2)).view(Sub)
+ out = np.ones((2, 2))
+
+ # make sure out can be any ndarray (not only subclass of inputs)
+ np.dot(a, b, out=out)
+ np.matmul(a, b, out=out)
+
+ def test_dot_matmul_inner_array_casting_fails(self):
+
+ class A:
+ def __array__(self, *args, **kwargs):
+ raise NotImplementedError
+
+ # Don't override the error from calling __array__()
+ assert_raises(NotImplementedError, np.dot, A(), A())
+ assert_raises(NotImplementedError, np.matmul, A(), A())
+ assert_raises(NotImplementedError, np.inner, A(), A())
+
+ def test_matmul_out(self):
+ # overlapping memory
+ a = np.arange(18).reshape(2, 3, 3)
+ b = np.matmul(a, a)
+ c = np.matmul(a, a, out=a)
+ assert_(c is a)
+ assert_equal(c, b)
+ a = np.arange(18).reshape(2, 3, 3)
+ c = np.matmul(a, a, out=a[::-1, ...])
+ assert_(c.base is a.base)
+ assert_equal(c, b)
+
+ def test_diagonal(self):
+ a = np.arange(12).reshape((3, 4))
+ assert_equal(a.diagonal(), [0, 5, 10])
+ assert_equal(a.diagonal(0), [0, 5, 10])
+ assert_equal(a.diagonal(1), [1, 6, 11])
+ assert_equal(a.diagonal(-1), [4, 9])
+ assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5)
+ assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0)
+ assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5)
+ assert_raises(ValueError, a.diagonal, axis1=1, axis2=1)
+
+ b = np.arange(8).reshape((2, 2, 2))
+ assert_equal(b.diagonal(), [[0, 6], [1, 7]])
+ assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
+ assert_equal(b.diagonal(1), [[2], [3]])
+ assert_equal(b.diagonal(-1), [[4], [5]])
+ assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
+ assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
+ assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
+ assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
+ # Order of axis argument doesn't matter:
+ assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
+
+ def test_diagonal_view_notwriteable(self):
+ a = np.eye(3).diagonal()
+ assert_(not a.flags.writeable)
+ assert_(not a.flags.owndata)
+
+ a = np.diagonal(np.eye(3))
+ assert_(not a.flags.writeable)
+ assert_(not a.flags.owndata)
+
+ a = np.diag(np.eye(3))
+ assert_(not a.flags.writeable)
+ assert_(not a.flags.owndata)
+
+ def test_diagonal_memleak(self):
+ # Regression test for a bug that crept in at one point
+ a = np.zeros((100, 100))
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(a) < 50)
+ for i in range(100):
+ a.diagonal()
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(a) < 50)
+
+ def test_size_zero_memleak(self):
+ # Regression test for issue 9615
+ # Exercises a special-case code path for dot products of length
+ # zero in cblasfuncs (making it is specific to floating dtypes).
+ a = np.array([], dtype=np.float64)
+ x = np.array(2.0)
+ for _ in range(100):
+ np.dot(a, a, out=x)
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(x) < 50)
+
+ def test_trace(self):
+ a = np.arange(12).reshape((3, 4))
+ assert_equal(a.trace(), 15)
+ assert_equal(a.trace(0), 15)
+ assert_equal(a.trace(1), 18)
+ assert_equal(a.trace(-1), 13)
+
+ b = np.arange(8).reshape((2, 2, 2))
+ assert_equal(b.trace(), [6, 8])
+ assert_equal(b.trace(0), [6, 8])
+ assert_equal(b.trace(1), [2, 3])
+ assert_equal(b.trace(-1), [4, 5])
+ assert_equal(b.trace(0, 0, 1), [6, 8])
+ assert_equal(b.trace(0, 0, 2), [5, 9])
+ assert_equal(b.trace(0, 1, 2), [3, 11])
+ assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
+
+ out = np.array(1)
+ ret = a.trace(out=out)
+ assert ret is out
+
+ def test_trace_subclass(self):
+ # The class would need to overwrite trace to ensure single-element
+ # output also has the right subclass.
+ class MyArray(np.ndarray):
+ pass
+
+ b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
+ t = b.trace()
+ assert_(isinstance(t, MyArray))
+
+ def test_put(self):
+ icodes = np.typecodes['AllInteger']
+ fcodes = np.typecodes['AllFloat']
+ for dt in icodes + fcodes + 'O':
+ tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
+
+ # test 1-d
+ a = np.zeros(6, dtype=dt)
+ a.put([1, 3, 5], [1, 3, 5])
+ assert_equal(a, tgt)
+
+ # test 2-d
+ a = np.zeros((2, 3), dtype=dt)
+ a.put([1, 3, 5], [1, 3, 5])
+ assert_equal(a, tgt.reshape(2, 3))
+
+ for dt in '?':
+ tgt = np.array([False, True, False, True, False, True], dtype=dt)
+
+ # test 1-d
+ a = np.zeros(6, dtype=dt)
+ a.put([1, 3, 5], [True]*3)
+ assert_equal(a, tgt)
+
+ # test 2-d
+ a = np.zeros((2, 3), dtype=dt)
+ a.put([1, 3, 5], [True]*3)
+ assert_equal(a, tgt.reshape(2, 3))
+
+ # check must be writeable
+ a = np.zeros(6)
+ a.flags.writeable = False
+ assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
+
+ # when calling np.put, make sure a
+ # TypeError is raised if the object
+ # isn't an ndarray
+ bad_array = [1, 2, 3]
+ assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
+
+ def test_ravel(self):
+ a = np.array([[0, 1], [2, 3]])
+ assert_equal(a.ravel(), [0, 1, 2, 3])
+ assert_(not a.ravel().flags.owndata)
+ assert_equal(a.ravel('F'), [0, 2, 1, 3])
+ assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
+ assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
+ assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
+ assert_(not a.ravel(order='A').flags.owndata)
+ assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
+ assert_(not a.ravel(order='K').flags.owndata)
+ assert_equal(a.ravel(), a.reshape(-1))
+
+ a = np.array([[0, 1], [2, 3]], order='F')
+ assert_equal(a.ravel(), [0, 1, 2, 3])
+ assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
+ assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
+ assert_(not a.ravel(order='A').flags.owndata)
+ assert_(not a.ravel(order='K').flags.owndata)
+ assert_equal(a.ravel(), a.reshape(-1))
+ assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
+
+ a = np.array([[0, 1], [2, 3]])[::-1, :]
+ assert_equal(a.ravel(), [2, 3, 0, 1])
+ assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
+ assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
+ assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
+ # 'K' doesn't reverse the axes of negative strides
+ assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
+ assert_(a.ravel(order='K').flags.owndata)
+
+ # Test simple 1-d copy behaviour:
+ a = np.arange(10)[::2]
+ assert_(a.ravel('K').flags.owndata)
+ assert_(a.ravel('C').flags.owndata)
+ assert_(a.ravel('F').flags.owndata)
+
+ # Not contiguous and 1-sized axis with non matching stride
+ a = np.arange(2**3 * 2)[::2]
+ a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
+ strides = list(a.strides)
+ strides[1] = 123
+ a.strides = strides
+ assert_(a.ravel(order='K').flags.owndata)
+ assert_equal(a.ravel('K'), np.arange(0, 15, 2))
+
+ # contiguous and 1-sized axis with non matching stride works:
+ a = np.arange(2**3)
+ a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
+ strides = list(a.strides)
+ strides[1] = 123
+ a.strides = strides
+ assert_(np.may_share_memory(a.ravel(order='K'), a))
+ assert_equal(a.ravel(order='K'), np.arange(2**3))
+
+ # Test negative strides (not very interesting since non-contiguous):
+ a = np.arange(4)[::-1].reshape(2, 2)
+ assert_(a.ravel(order='C').flags.owndata)
+ assert_(a.ravel(order='K').flags.owndata)
+ assert_equal(a.ravel('C'), [3, 2, 1, 0])
+ assert_equal(a.ravel('K'), [3, 2, 1, 0])
+
+ # 1-element tidy strides test:
+ a = np.array([[1]])
+ a.strides = (123, 432)
+ # If the following stride is not 8, NPY_RELAXED_STRIDES_DEBUG is
+ # messing them up on purpose:
+ if np.ones(1).strides == (8,):
+ assert_(np.may_share_memory(a.ravel('K'), a))
+ assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
+
+ for order in ('C', 'F', 'A', 'K'):
+ # 0-d corner case:
+ a = np.array(0)
+ assert_equal(a.ravel(order), [0])
+ assert_(np.may_share_memory(a.ravel(order), a))
+
+ # Test that certain non-inplace ravels work right (mostly) for 'K':
+ b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
+ a = b[..., ::2]
+ assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
+ assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
+ assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
+ assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
+
+ a = b[::2, ...]
+ assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
+ assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
+ assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
+ assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
+
+ def test_ravel_subclass(self):
+ class ArraySubclass(np.ndarray):
+ pass
+
+ a = np.arange(10).view(ArraySubclass)
+ assert_(isinstance(a.ravel('C'), ArraySubclass))
+ assert_(isinstance(a.ravel('F'), ArraySubclass))
+ assert_(isinstance(a.ravel('A'), ArraySubclass))
+ assert_(isinstance(a.ravel('K'), ArraySubclass))
+
+ a = np.arange(10)[::2].view(ArraySubclass)
+ assert_(isinstance(a.ravel('C'), ArraySubclass))
+ assert_(isinstance(a.ravel('F'), ArraySubclass))
+ assert_(isinstance(a.ravel('A'), ArraySubclass))
+ assert_(isinstance(a.ravel('K'), ArraySubclass))
+
+ def test_swapaxes(self):
+ a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
+ idx = np.indices(a.shape)
+ assert_(a.flags['OWNDATA'])
+ b = a.copy()
+ # check exceptions
+ assert_raises(np.AxisError, a.swapaxes, -5, 0)
+ assert_raises(np.AxisError, a.swapaxes, 4, 0)
+ assert_raises(np.AxisError, a.swapaxes, 0, -5)
+ assert_raises(np.AxisError, a.swapaxes, 0, 4)
+
+ for i in range(-4, 4):
+ for j in range(-4, 4):
+ for k, src in enumerate((a, b)):
+ c = src.swapaxes(i, j)
+ # check shape
+ shape = list(src.shape)
+ shape[i] = src.shape[j]
+ shape[j] = src.shape[i]
+ assert_equal(c.shape, shape, str((i, j, k)))
+ # check array contents
+ i0, i1, i2, i3 = [dim-1 for dim in c.shape]
+ j0, j1, j2, j3 = [dim-1 for dim in src.shape]
+ assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
+ c[idx[i0], idx[i1], idx[i2], idx[i3]],
+ str((i, j, k)))
+ # check a view is always returned, gh-5260
+ assert_(not c.flags['OWNDATA'], str((i, j, k)))
+ # check on non-contiguous input array
+ if k == 1:
+ b = c
+
+ def test_conjugate(self):
+ a = np.array([1-1j, 1+1j, 23+23.0j])
+ ac = a.conj()
+ assert_equal(a.real, ac.real)
+ assert_equal(a.imag, -ac.imag)
+ assert_equal(ac, a.conjugate())
+ assert_equal(ac, np.conjugate(a))
+
+ a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
+ ac = a.conj()
+ assert_equal(a.real, ac.real)
+ assert_equal(a.imag, -ac.imag)
+ assert_equal(ac, a.conjugate())
+ assert_equal(ac, np.conjugate(a))
+
+ a = np.array([1, 2, 3])
+ ac = a.conj()
+ assert_equal(a, ac)
+ assert_equal(ac, a.conjugate())
+ assert_equal(ac, np.conjugate(a))
+
+ a = np.array([1.0, 2.0, 3.0])
+ ac = a.conj()
+ assert_equal(a, ac)
+ assert_equal(ac, a.conjugate())
+ assert_equal(ac, np.conjugate(a))
+
+ a = np.array([1-1j, 1+1j, 1, 2.0], object)
+ ac = a.conj()
+ assert_equal(ac, [k.conjugate() for k in a])
+ assert_equal(ac, a.conjugate())
+ assert_equal(ac, np.conjugate(a))
+
+ a = np.array([1-1j, 1, 2.0, 'f'], object)
+ assert_raises(TypeError, lambda: a.conj())
+ assert_raises(TypeError, lambda: a.conjugate())
+
+ def test_conjugate_out(self):
+ # Minimal test for the out argument being passed on correctly
+ # NOTE: The ability to pass `out` is currently undocumented!
+ a = np.array([1-1j, 1+1j, 23+23.0j])
+ out = np.empty_like(a)
+ res = a.conjugate(out)
+ assert res is out
+ assert_array_equal(out, a.conjugate())
+
+ def test__complex__(self):
+ dtypes = ['i1', 'i2', 'i4', 'i8',
+ 'u1', 'u2', 'u4', 'u8',
+ 'f', 'd', 'g', 'F', 'D', 'G',
+ '?', 'O']
+ for dt in dtypes:
+ a = np.array(7, dtype=dt)
+ b = np.array([7], dtype=dt)
+ c = np.array([[[[[7]]]]], dtype=dt)
+
+ msg = 'dtype: {0}'.format(dt)
+ ap = complex(a)
+ assert_equal(ap, a, msg)
+ bp = complex(b)
+ assert_equal(bp, b, msg)
+ cp = complex(c)
+ assert_equal(cp, c, msg)
+
+ def test__complex__should_not_work(self):
+ dtypes = ['i1', 'i2', 'i4', 'i8',
+ 'u1', 'u2', 'u4', 'u8',
+ 'f', 'd', 'g', 'F', 'D', 'G',
+ '?', 'O']
+ for dt in dtypes:
+ a = np.array([1, 2, 3], dtype=dt)
+ assert_raises(TypeError, complex, a)
+
+ dt = np.dtype([('a', 'f8'), ('b', 'i1')])
+ b = np.array((1.0, 3), dtype=dt)
+ assert_raises(TypeError, complex, b)
+
+ c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
+ assert_raises(TypeError, complex, c)
+
+ d = np.array('1+1j')
+ assert_raises(TypeError, complex, d)
+
+ e = np.array(['1+1j'], 'U')
+ assert_raises(TypeError, complex, e)
+
+class TestCequenceMethods:
+ def test_array_contains(self):
+ assert_(4.0 in np.arange(16.).reshape(4,4))
+ assert_(20.0 not in np.arange(16.).reshape(4,4))
+
+class TestBinop:
+ def test_inplace(self):
+ # test refcount 1 inplace conversion
+ assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
+ [0.5, 1.0])
+
+ d = np.array([0.5, 0.5])[::2]
+ assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
+ [0.25, 0.5])
+
+ a = np.array([0.5])
+ b = np.array([0.5])
+ c = a + b
+ c = a - b
+ c = a * b
+ c = a / b
+ assert_equal(a, b)
+ assert_almost_equal(c, 1.)
+
+ c = a + b * 2. / b * a - a / b
+ assert_equal(a, b)
+ assert_equal(c, 0.5)
+
+ # true divide
+ a = np.array([5])
+ b = np.array([3])
+ c = (a * a) / b
+
+ assert_almost_equal(c, 25 / 3)
+ assert_equal(a, 5)
+ assert_equal(b, 3)
+
+ # ndarray.__rop__ always calls ufunc
+ # ndarray.__iop__ always calls ufunc
+ # ndarray.__op__, __rop__:
+ # - defer if other has __array_ufunc__ and it is None
+ # or other is not a subclass and has higher array priority
+ # - else, call ufunc
+ def test_ufunc_binop_interaction(self):
+ # Python method name (without underscores)
+ # -> (numpy ufunc, has_in_place_version, preferred_dtype)
+ ops = {
+ 'add': (np.add, True, float),
+ 'sub': (np.subtract, True, float),
+ 'mul': (np.multiply, True, float),
+ 'truediv': (np.true_divide, True, float),
+ 'floordiv': (np.floor_divide, True, float),
+ 'mod': (np.remainder, True, float),
+ 'divmod': (np.divmod, False, float),
+ 'pow': (np.power, True, int),
+ 'lshift': (np.left_shift, True, int),
+ 'rshift': (np.right_shift, True, int),
+ 'and': (np.bitwise_and, True, int),
+ 'xor': (np.bitwise_xor, True, int),
+ 'or': (np.bitwise_or, True, int),
+ 'matmul': (np.matmul, False, float),
+ # 'ge': (np.less_equal, False),
+ # 'gt': (np.less, False),
+ # 'le': (np.greater_equal, False),
+ # 'lt': (np.greater, False),
+ # 'eq': (np.equal, False),
+ # 'ne': (np.not_equal, False),
+ }
+
+ class Coerced(Exception):
+ pass
+
+ def array_impl(self):
+ raise Coerced
+
+ def op_impl(self, other):
+ return "forward"
+
+ def rop_impl(self, other):
+ return "reverse"
+
+ def iop_impl(self, other):
+ return "in-place"
+
+ def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
+ return ("__array_ufunc__", ufunc, method, args, kwargs)
+
+ # Create an object with the given base, in the given module, with a
+ # bunch of placeholder __op__ methods, and optionally a
+ # __array_ufunc__ and __array_priority__.
+ def make_obj(base, array_priority=False, array_ufunc=False,
+ alleged_module="__main__"):
+ class_namespace = {"__array__": array_impl}
+ if array_priority is not False:
+ class_namespace["__array_priority__"] = array_priority
+ for op in ops:
+ class_namespace["__{0}__".format(op)] = op_impl
+ class_namespace["__r{0}__".format(op)] = rop_impl
+ class_namespace["__i{0}__".format(op)] = iop_impl
+ if array_ufunc is not False:
+ class_namespace["__array_ufunc__"] = array_ufunc
+ eval_namespace = {"base": base,
+ "class_namespace": class_namespace,
+ "__name__": alleged_module,
+ }
+ MyType = eval("type('MyType', (base,), class_namespace)",
+ eval_namespace)
+ if issubclass(MyType, np.ndarray):
+ # Use this range to avoid special case weirdnesses around
+ # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
+ return np.arange(3, 7).reshape(2, 2).view(MyType)
+ else:
+ return MyType()
+
+ def check(obj, binop_override_expected, ufunc_override_expected,
+ inplace_override_expected, check_scalar=True):
+ for op, (ufunc, has_inplace, dtype) in ops.items():
+ err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
+ % (op, ufunc, has_inplace, dtype))
+ check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
+ if check_scalar:
+ check_objs.append(check_objs[0][0])
+ for arr in check_objs:
+ arr_method = getattr(arr, "__{0}__".format(op))
+
+ def first_out_arg(result):
+ if op == "divmod":
+ assert_(isinstance(result, tuple))
+ return result[0]
+ else:
+ return result
+
+ # arr __op__ obj
+ if binop_override_expected:
+ assert_equal(arr_method(obj), NotImplemented, err_msg)
+ elif ufunc_override_expected:
+ assert_equal(arr_method(obj)[0], "__array_ufunc__",
+ err_msg)
+ else:
+ if (isinstance(obj, np.ndarray) and
+ (type(obj).__array_ufunc__ is
+ np.ndarray.__array_ufunc__)):
+ # __array__ gets ignored
+ res = first_out_arg(arr_method(obj))
+ assert_(res.__class__ is obj.__class__, err_msg)
+ else:
+ assert_raises((TypeError, Coerced),
+ arr_method, obj, err_msg=err_msg)
+ # obj __op__ arr
+ arr_rmethod = getattr(arr, "__r{0}__".format(op))
+ if ufunc_override_expected:
+ res = arr_rmethod(obj)
+ assert_equal(res[0], "__array_ufunc__",
+ err_msg=err_msg)
+ assert_equal(res[1], ufunc, err_msg=err_msg)
+ else:
+ if (isinstance(obj, np.ndarray) and
+ (type(obj).__array_ufunc__ is
+ np.ndarray.__array_ufunc__)):
+ # __array__ gets ignored
+ res = first_out_arg(arr_rmethod(obj))
+ assert_(res.__class__ is obj.__class__, err_msg)
+ else:
+ # __array_ufunc__ = "asdf" creates a TypeError
+ assert_raises((TypeError, Coerced),
+ arr_rmethod, obj, err_msg=err_msg)
+
+ # arr __iop__ obj
+ # array scalars don't have in-place operators
+ if has_inplace and isinstance(arr, np.ndarray):
+ arr_imethod = getattr(arr, "__i{0}__".format(op))
+ if inplace_override_expected:
+ assert_equal(arr_method(obj), NotImplemented,
+ err_msg=err_msg)
+ elif ufunc_override_expected:
+ res = arr_imethod(obj)
+ assert_equal(res[0], "__array_ufunc__", err_msg)
+ assert_equal(res[1], ufunc, err_msg)
+ assert_(type(res[-1]["out"]) is tuple, err_msg)
+ assert_(res[-1]["out"][0] is arr, err_msg)
+ else:
+ if (isinstance(obj, np.ndarray) and
+ (type(obj).__array_ufunc__ is
+ np.ndarray.__array_ufunc__)):
+ # __array__ gets ignored
+ assert_(arr_imethod(obj) is arr, err_msg)
+ else:
+ assert_raises((TypeError, Coerced),
+ arr_imethod, obj,
+ err_msg=err_msg)
+
+ op_fn = getattr(operator, op, None)
+ if op_fn is None:
+ op_fn = getattr(operator, op + "_", None)
+ if op_fn is None:
+ op_fn = getattr(builtins, op)
+ assert_equal(op_fn(obj, arr), "forward", err_msg)
+ if not isinstance(obj, np.ndarray):
+ if binop_override_expected:
+ assert_equal(op_fn(arr, obj), "reverse", err_msg)
+ elif ufunc_override_expected:
+ assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
+ err_msg)
+ if ufunc_override_expected:
+ assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
+ err_msg)
+
+ # No array priority, no array_ufunc -> nothing called
+ check(make_obj(object), False, False, False)
+ # Negative array priority, no array_ufunc -> nothing called
+ # (has to be very negative, because scalar priority is -1000000.0)
+ check(make_obj(object, array_priority=-2**30), False, False, False)
+ # Positive array priority, no array_ufunc -> binops and iops only
+ check(make_obj(object, array_priority=1), True, False, True)
+ # ndarray ignores array_priority for ndarray subclasses
+ check(make_obj(np.ndarray, array_priority=1), False, False, False,
+ check_scalar=False)
+ # Positive array_priority and array_ufunc -> array_ufunc only
+ check(make_obj(object, array_priority=1,
+ array_ufunc=array_ufunc_impl), False, True, False)
+ check(make_obj(np.ndarray, array_priority=1,
+ array_ufunc=array_ufunc_impl), False, True, False)
+ # array_ufunc set to None -> defer binops only
+ check(make_obj(object, array_ufunc=None), True, False, False)
+ check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
+ check_scalar=False)
+
+ @pytest.mark.parametrize("priority", [None, "runtime error"])
+ def test_ufunc_binop_bad_array_priority(self, priority):
+ # Mainly checks that this does not crash. The second array has a lower
+ # priority than -1 ("error value"). If the __radd__ actually exists,
+ # bad things can happen (I think via the scalar paths).
+ # In principle both of these can probably just be errors in the future.
+ class BadPriority:
+ @property
+ def __array_priority__(self):
+ if priority == "runtime error":
+ raise RuntimeError("RuntimeError in __array_priority__!")
+ return priority
+
+ def __radd__(self, other):
+ return "result"
+
+ class LowPriority(np.ndarray):
+ __array_priority__ = -1000
+
+ # Priority failure uses the same as scalars (smaller -1000). So the
+ # LowPriority wins with 'result' for each element (inner operation).
+ res = np.arange(3).view(LowPriority) + BadPriority()
+ assert res.shape == (3,)
+ assert res[0] == 'result'
+
+
+ def test_ufunc_override_normalize_signature(self):
+ # gh-5674
+ class SomeClass:
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+ return kw
+
+ a = SomeClass()
+ kw = np.add(a, [1])
+ assert_('sig' not in kw and 'signature' not in kw)
+ kw = np.add(a, [1], sig='ii->i')
+ assert_('sig' not in kw and 'signature' in kw)
+ assert_equal(kw['signature'], 'ii->i')
+ kw = np.add(a, [1], signature='ii->i')
+ assert_('sig' not in kw and 'signature' in kw)
+ assert_equal(kw['signature'], 'ii->i')
+
+ def test_array_ufunc_index(self):
+ # Check that index is set appropriately, also if only an output
+ # is passed on (latter is another regression tests for github bug 4753)
+ # This also checks implicitly that 'out' is always a tuple.
+ class CheckIndex:
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+ for i, a in enumerate(inputs):
+ if a is self:
+ return i
+ # calls below mean we must be in an output.
+ for j, a in enumerate(kw['out']):
+ if a is self:
+ return (j,)
+
+ a = CheckIndex()
+ dummy = np.arange(2.)
+ # 1 input, 1 output
+ assert_equal(np.sin(a), 0)
+ assert_equal(np.sin(dummy, a), (0,))
+ assert_equal(np.sin(dummy, out=a), (0,))
+ assert_equal(np.sin(dummy, out=(a,)), (0,))
+ assert_equal(np.sin(a, a), 0)
+ assert_equal(np.sin(a, out=a), 0)
+ assert_equal(np.sin(a, out=(a,)), 0)
+ # 1 input, 2 outputs
+ assert_equal(np.modf(dummy, a), (0,))
+ assert_equal(np.modf(dummy, None, a), (1,))
+ assert_equal(np.modf(dummy, dummy, a), (1,))
+ assert_equal(np.modf(dummy, out=(a, None)), (0,))
+ assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
+ assert_equal(np.modf(dummy, out=(None, a)), (1,))
+ assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
+ assert_equal(np.modf(a, out=(dummy, a)), 0)
+ with assert_raises(TypeError):
+ # Out argument must be tuple, since there are multiple outputs
+ np.modf(dummy, out=a)
+
+ assert_raises(ValueError, np.modf, dummy, out=(a,))
+
+ # 2 inputs, 1 output
+ assert_equal(np.add(a, dummy), 0)
+ assert_equal(np.add(dummy, a), 1)
+ assert_equal(np.add(dummy, dummy, a), (0,))
+ assert_equal(np.add(dummy, a, a), 1)
+ assert_equal(np.add(dummy, dummy, out=a), (0,))
+ assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
+ assert_equal(np.add(a, dummy, out=a), 0)
+
+ def test_out_override(self):
+ # regression test for github bug 4753
+ class OutClass(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+ if 'out' in kw:
+ tmp_kw = kw.copy()
+ tmp_kw.pop('out')
+ func = getattr(ufunc, method)
+ kw['out'][0][...] = func(*inputs, **tmp_kw)
+
+ A = np.array([0]).view(OutClass)
+ B = np.array([5])
+ C = np.array([6])
+ np.multiply(C, B, A)
+ assert_equal(A[0], 30)
+ assert_(isinstance(A, OutClass))
+ A[0] = 0
+ np.multiply(C, B, out=A)
+ assert_equal(A[0], 30)
+ assert_(isinstance(A, OutClass))
+
+ def test_pow_override_with_errors(self):
+ # regression test for gh-9112
+ class PowerOnly(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+ if ufunc is not np.power:
+ raise NotImplementedError
+ return "POWER!"
+ # explicit cast to float, to ensure the fast power path is taken.
+ a = np.array(5., dtype=np.float64).view(PowerOnly)
+ assert_equal(a ** 2.5, "POWER!")
+ with assert_raises(NotImplementedError):
+ a ** 0.5
+ with assert_raises(NotImplementedError):
+ a ** 0
+ with assert_raises(NotImplementedError):
+ a ** 1
+ with assert_raises(NotImplementedError):
+ a ** -1
+ with assert_raises(NotImplementedError):
+ a ** 2
+
+ def test_pow_array_object_dtype(self):
+ # test pow on arrays of object dtype
+ class SomeClass:
+ def __init__(self, num=None):
+ self.num = num
+
+ # want to ensure a fast pow path is not taken
+ def __mul__(self, other):
+ raise AssertionError('__mul__ should not be called')
+
+ def __div__(self, other):
+ raise AssertionError('__div__ should not be called')
+
+ def __pow__(self, exp):
+ return SomeClass(num=self.num ** exp)
+
+ def __eq__(self, other):
+ if isinstance(other, SomeClass):
+ return self.num == other.num
+
+ __rpow__ = __pow__
+
+ def pow_for(exp, arr):
+ return np.array([x ** exp for x in arr])
+
+ obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)])
+
+ assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr))
+ assert_equal(obj_arr ** 0, pow_for(0, obj_arr))
+ assert_equal(obj_arr ** 1, pow_for(1, obj_arr))
+ assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
+ assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
+
+ def test_pos_array_ufunc_override(self):
+ class A(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return getattr(ufunc, method)(*[i.view(np.ndarray) for
+ i in inputs], **kwargs)
+ tst = np.array('foo').view(A)
+ with assert_raises(TypeError):
+ +tst
+
+
+class TestTemporaryElide:
+ # elision is only triggered on relatively large arrays
+
+ def test_extension_incref_elide(self):
+ # test extension (e.g. cython) calling PyNumber_* slots without
+ # increasing the reference counts
+ #
+ # def incref_elide(a):
+ # d = input.copy() # refcount 1
+ # return d, d + d # PyNumber_Add without increasing refcount
+ from numpy.core._multiarray_tests import incref_elide
+ d = np.ones(100000)
+ orig, res = incref_elide(d)
+ d + d
+ # the return original should not be changed to an inplace operation
+ assert_array_equal(orig, d)
+ assert_array_equal(res, d + d)
+
+ def test_extension_incref_elide_stack(self):
+ # scanning if the refcount == 1 object is on the python stack to check
+ # that we are called directly from python is flawed as object may still
+ # be above the stack pointer and we have no access to the top of it
+ #
+ # def incref_elide_l(d):
+ # return l[4] + l[4] # PyNumber_Add without increasing refcount
+ from numpy.core._multiarray_tests import incref_elide_l
+ # padding with 1 makes sure the object on the stack is not overwritten
+ l = [1, 1, 1, 1, np.ones(100000)]
+ res = incref_elide_l(l)
+ # the return original should not be changed to an inplace operation
+ assert_array_equal(l[4], np.ones(100000))
+ assert_array_equal(res, l[4] + l[4])
+
+ def test_temporary_with_cast(self):
+ # check that we don't elide into a temporary which would need casting
+ d = np.ones(200000, dtype=np.int64)
+ assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
+
+ r = ((d + d) / 2)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = np.true_divide((d + d), 2)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = ((d + d) / 2.)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = ((d + d) // 2)
+ assert_equal(r.dtype, np.dtype(np.int64))
+
+ # commutative elision into the astype result
+ f = np.ones(100000, dtype=np.float32)
+ assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
+
+ # no elision into lower type
+ d = f.astype(np.float64)
+ assert_equal(((f + f) + d).dtype, d.dtype)
+ l = np.ones(100000, dtype=np.longdouble)
+ assert_equal(((d + d) + l).dtype, l.dtype)
+
+ # test unary abs with different output dtype
+ for dt in (np.complex64, np.complex128, np.clongdouble):
+ c = np.ones(100000, dtype=dt)
+ r = abs(c * 2.0)
+ assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
+
+ def test_elide_broadcast(self):
+ # test no elision on broadcast to higher dimension
+ # only triggers elision code path in debug mode as triggering it in
+ # normal mode needs 256kb large matching dimension, so a lot of memory
+ d = np.ones((2000, 1), dtype=int)
+ b = np.ones((2000), dtype=bool)
+ r = (1 - d) + b
+ assert_equal(r, 1)
+ assert_equal(r.shape, (2000, 2000))
+
+ def test_elide_scalar(self):
+ # check inplace op does not create ndarray from scalars
+ a = np.bool_()
+ assert_(type(~(a & a)) is np.bool_)
+
+ def test_elide_scalar_readonly(self):
+ # The imaginary part of a real array is readonly. This needs to go
+ # through fast_scalar_power which is only called for powers of
+ # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
+ # elision which can be gotten for the imaginary part of a real
+ # array. Should not error.
+ a = np.empty(100000, dtype=np.float64)
+ a.imag ** 2
+
+ def test_elide_readonly(self):
+ # don't try to elide readonly temporaries
+ r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
+ assert_equal(r, 0)
+
+ def test_elide_updateifcopy(self):
+ a = np.ones(2**20)[::2]
+ b = a.flat.__array__() + 1
+ del b
+ assert_equal(a, 1)
+
+
+class TestCAPI:
+ def test_IsPythonScalar(self):
+ from numpy.core._multiarray_tests import IsPythonScalar
+ assert_(IsPythonScalar(b'foobar'))
+ assert_(IsPythonScalar(1))
+ assert_(IsPythonScalar(2**80))
+ assert_(IsPythonScalar(2.))
+ assert_(IsPythonScalar("a"))
+
+ @pytest.mark.parametrize("converter",
+ [_multiarray_tests.run_scalar_intp_converter,
+ _multiarray_tests.run_scalar_intp_from_sequence])
+ def test_intp_sequence_converters(self, converter):
+ # Test simple values (-1 is special for error return paths)
+ assert converter(10) == (10,)
+ assert converter(-1) == (-1,)
+ # A 0-D array looks a bit like a sequence but must take the integer
+ # path:
+ assert converter(np.array(123)) == (123,)
+ # Test simple sequences (intp_from_sequence only supports length 1):
+ assert converter((10,)) == (10,)
+ assert converter(np.array([11])) == (11,)
+
+ @pytest.mark.parametrize("converter",
+ [_multiarray_tests.run_scalar_intp_converter,
+ _multiarray_tests.run_scalar_intp_from_sequence])
+ @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+ def test_intp_sequence_converters_errors(self, converter):
+ with pytest.raises(TypeError,
+ match="expected a sequence of integers or a single integer, "):
+ converter(object())
+ with pytest.raises(TypeError,
+ match="expected a sequence of integers or a single integer, "
+ "got '32.0'"):
+ converter(32.)
+ with pytest.raises(TypeError,
+ match="'float' object cannot be interpreted as an integer"):
+ converter([32.])
+ with pytest.raises(ValueError,
+ match="Maximum allowed dimension"):
+ # These converters currently convert overflows to a ValueError
+ converter(2**64)
+
+
+class TestSubscripting:
+ def test_test_zero_rank(self):
+ x = np.array([1, 2, 3])
+ assert_(isinstance(x[0], np.int_))
+ assert_(type(x[0, ...]) is np.ndarray)
+
+
+class TestPickling:
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
+ reason=('this tests the error messages when trying to'
+ 'protocol 5 although it is not available'))
+ def test_correct_protocol5_error_message(self):
+ array = np.arange(10)
+
+ def test_record_array_with_object_dtype(self):
+ my_object = object()
+
+ arr_with_object = np.array(
+ [(my_object, 1, 2.0)],
+ dtype=[('a', object), ('b', int), ('c', float)])
+ arr_without_object = np.array(
+ [('xxx', 1, 2.0)],
+ dtype=[('a', str), ('b', int), ('c', float)])
+
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ depickled_arr_with_object = pickle.loads(
+ pickle.dumps(arr_with_object, protocol=proto))
+ depickled_arr_without_object = pickle.loads(
+ pickle.dumps(arr_without_object, protocol=proto))
+
+ assert_equal(arr_with_object.dtype,
+ depickled_arr_with_object.dtype)
+ assert_equal(arr_without_object.dtype,
+ depickled_arr_without_object.dtype)
+
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
+ reason="requires pickle protocol 5")
+ def test_f_contiguous_array(self):
+ f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
+ buffers = []
+
+ # When using pickle protocol 5, Fortran-contiguous arrays can be
+ # serialized using out-of-band buffers
+ bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
+ buffer_callback=buffers.append)
+
+ assert len(buffers) > 0
+
+ depickled_f_contiguous_array = pickle.loads(bytes_string,
+ buffers=buffers)
+
+ assert_equal(f_contiguous_array, depickled_f_contiguous_array)
+
+ def test_non_contiguous_array(self):
+ non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
+ assert not non_contiguous_array.flags.c_contiguous
+ assert not non_contiguous_array.flags.f_contiguous
+
+ # make sure non-contiguous arrays can be pickled-depickled
+ # using any protocol
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ depickled_non_contiguous_array = pickle.loads(
+ pickle.dumps(non_contiguous_array, protocol=proto))
+
+ assert_equal(non_contiguous_array, depickled_non_contiguous_array)
+
+ def test_roundtrip(self):
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ carray = np.array([[2, 9], [7, 0], [3, 8]])
+ DATA = [
+ carray,
+ np.transpose(carray),
+ np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
+ ('c', float)])
+ ]
+
+ refs = [weakref.ref(a) for a in DATA]
+ for a in DATA:
+ assert_equal(
+ a, pickle.loads(pickle.dumps(a, protocol=proto)),
+ err_msg="%r" % a)
+ del a, DATA, carray
+ break_cycles()
+ # check for reference leaks (gh-12793)
+ for ref in refs:
+ assert ref() is None
+
+ def _loads(self, obj):
+ return pickle.loads(obj, encoding='latin1')
+
+ # version 0 pickles, using protocol=2 to pickle
+ # version 0 doesn't have a version field
+ def test_version0_int8(self):
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
+ a = np.array([1, 2, 3, 4], dtype=np.int8)
+ p = self._loads(s)
+ assert_equal(a, p)
+
+ def test_version0_float32(self):
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
+ a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
+ p = self._loads(s)
+ assert_equal(a, p)
+
+ def test_version0_object(self):
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
+ a = np.array([{'a': 1}, {'b': 2}])
+ p = self._loads(s)
+ assert_equal(a, p)
+
+ # version 1 pickles, using protocol=2 to pickle
+ def test_version1_int8(self):
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
+ a = np.array([1, 2, 3, 4], dtype=np.int8)
+ p = self._loads(s)
+ assert_equal(a, p)
+
+ def test_version1_float32(self):
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
+ a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
+ p = self._loads(s)
+ assert_equal(a, p)
+
+ def test_version1_object(self):
+ s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
+ a = np.array([{'a': 1}, {'b': 2}])
+ p = self._loads(s)
+ assert_equal(a, p)
+
+ def test_subarray_int_shape(self):
+ s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
+ a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
+ p = self._loads(s)
+ assert_equal(a, p)
+
+ def test_datetime64_byteorder(self):
+ original = np.array([['2015-02-24T00:00:00.000000000']], dtype='datetime64[ns]')
+
+ original_byte_reversed = original.copy(order='K')
+ original_byte_reversed.dtype = original_byte_reversed.dtype.newbyteorder('S')
+ original_byte_reversed.byteswap(inplace=True)
+
+ new = pickle.loads(pickle.dumps(original_byte_reversed))
+
+ assert_equal(original.dtype, new.dtype)
+
+
+class TestFancyIndexing:
+ def test_list(self):
+ x = np.ones((1, 1))
+ x[:, [0]] = 2.0
+ assert_array_equal(x, np.array([[2.0]]))
+
+ x = np.ones((1, 1, 1))
+ x[:, :, [0]] = 2.0
+ assert_array_equal(x, np.array([[[2.0]]]))
+
+ def test_tuple(self):
+ x = np.ones((1, 1))
+ x[:, (0,)] = 2.0
+ assert_array_equal(x, np.array([[2.0]]))
+ x = np.ones((1, 1, 1))
+ x[:, :, (0,)] = 2.0
+ assert_array_equal(x, np.array([[[2.0]]]))
+
+ def test_mask(self):
+ x = np.array([1, 2, 3, 4])
+ m = np.array([0, 1, 0, 0], bool)
+ assert_array_equal(x[m], np.array([2]))
+
+ def test_mask2(self):
+ x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
+ m = np.array([0, 1], bool)
+ m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
+ m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
+ assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
+ assert_array_equal(x[m2], np.array([2, 5]))
+ assert_array_equal(x[m3], np.array([2]))
+
+ def test_assign_mask(self):
+ x = np.array([1, 2, 3, 4])
+ m = np.array([0, 1, 0, 0], bool)
+ x[m] = 5
+ assert_array_equal(x, np.array([1, 5, 3, 4]))
+
+ def test_assign_mask2(self):
+ xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
+ m = np.array([0, 1], bool)
+ m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
+ m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
+ x = xorig.copy()
+ x[m] = 10
+ assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
+ x = xorig.copy()
+ x[m2] = 10
+ assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
+ x = xorig.copy()
+ x[m3] = 10
+ assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
+
+
+class TestStringCompare:
+ def test_string(self):
+ g1 = np.array(["This", "is", "example"])
+ g2 = np.array(["This", "was", "example"])
+ assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
+
+ def test_mixed(self):
+ g1 = np.array(["spam", "spa", "spammer", "and eggs"])
+ g2 = "spam"
+ assert_array_equal(g1 == g2, [x == g2 for x in g1])
+ assert_array_equal(g1 != g2, [x != g2 for x in g1])
+ assert_array_equal(g1 < g2, [x < g2 for x in g1])
+ assert_array_equal(g1 > g2, [x > g2 for x in g1])
+ assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
+ assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
+
+ def test_unicode(self):
+ g1 = np.array(["This", "is", "example"])
+ g2 = np.array(["This", "was", "example"])
+ assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
+ assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
+
+class TestArgmaxArgminCommon:
+
+ sizes = [(), (3,), (3, 2), (2, 3),
+ (3, 3), (2, 3, 4), (4, 3, 2),
+ (1, 2, 3, 4), (2, 3, 4, 1),
+ (3, 4, 1, 2), (4, 1, 2, 3),
+ (64,), (128,), (256,)]
+
+ @pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis)
+ for axis in list(range(-len(size), len(size))) + [None]]
+ for size in sizes]))
+ @pytest.mark.parametrize('method', [np.argmax, np.argmin])
+ def test_np_argmin_argmax_keepdims(self, size, axis, method):
+
+ arr = np.random.normal(size=size)
+
+ # contiguous arrays
+ if axis is None:
+ new_shape = [1 for _ in range(len(size))]
+ else:
+ new_shape = list(size)
+ new_shape[axis] = 1
+ new_shape = tuple(new_shape)
+
+ _res_orig = method(arr, axis=axis)
+ res_orig = _res_orig.reshape(new_shape)
+ res = method(arr, axis=axis, keepdims=True)
+ assert_equal(res, res_orig)
+ assert_(res.shape == new_shape)
+ outarray = np.empty(res.shape, dtype=res.dtype)
+ res1 = method(arr, axis=axis, out=outarray,
+ keepdims=True)
+ assert_(res1 is outarray)
+ assert_equal(res, outarray)
+
+ if len(size) > 0:
+ wrong_shape = list(new_shape)
+ if axis is not None:
+ wrong_shape[axis] = 2
+ else:
+ wrong_shape[0] = 2
+ wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
+ with pytest.raises(ValueError):
+ method(arr.T, axis=axis,
+ out=wrong_outarray, keepdims=True)
+
+ # non-contiguous arrays
+ if axis is None:
+ new_shape = [1 for _ in range(len(size))]
+ else:
+ new_shape = list(size)[::-1]
+ new_shape[axis] = 1
+ new_shape = tuple(new_shape)
+
+ _res_orig = method(arr.T, axis=axis)
+ res_orig = _res_orig.reshape(new_shape)
+ res = method(arr.T, axis=axis, keepdims=True)
+ assert_equal(res, res_orig)
+ assert_(res.shape == new_shape)
+ outarray = np.empty(new_shape[::-1], dtype=res.dtype)
+ outarray = outarray.T
+ res1 = method(arr.T, axis=axis, out=outarray,
+ keepdims=True)
+ assert_(res1 is outarray)
+ assert_equal(res, outarray)
+
+ if len(size) > 0:
+ # one dimension lesser for non-zero sized
+ # array should raise an error
+ with pytest.raises(ValueError):
+ method(arr[0], axis=axis,
+ out=outarray, keepdims=True)
+
+ if len(size) > 0:
+ wrong_shape = list(new_shape)
+ if axis is not None:
+ wrong_shape[axis] = 2
+ else:
+ wrong_shape[0] = 2
+ wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
+ with pytest.raises(ValueError):
+ method(arr.T, axis=axis,
+ out=wrong_outarray, keepdims=True)
+
+ @pytest.mark.parametrize('method', ['max', 'min'])
+ def test_all(self, method):
+ a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
+ arg_method = getattr(a, 'arg' + method)
+ val_method = getattr(a, method)
+ for i in range(a.ndim):
+ a_maxmin = val_method(i)
+ aarg_maxmin = arg_method(i)
+ axes = list(range(a.ndim))
+ axes.remove(i)
+ assert_(np.all(a_maxmin == aarg_maxmin.choose(
+ *a.transpose(i, *axes))))
+
+ @pytest.mark.parametrize('method', ['argmax', 'argmin'])
+ def test_output_shape(self, method):
+ # see also gh-616
+ a = np.ones((10, 5))
+ arg_method = getattr(a, method)
+ # Check some simple shape mismatches
+ out = np.ones(11, dtype=np.int_)
+ assert_raises(ValueError, arg_method, -1, out)
+
+ out = np.ones((2, 5), dtype=np.int_)
+ assert_raises(ValueError, arg_method, -1, out)
+
+ # these could be relaxed possibly (used to allow even the previous)
+ out = np.ones((1, 10), dtype=np.int_)
+ assert_raises(ValueError, arg_method, -1, out)
+
+ out = np.ones(10, dtype=np.int_)
+ arg_method(-1, out=out)
+ assert_equal(out, arg_method(-1))
+
+ @pytest.mark.parametrize('ndim', [0, 1])
+ @pytest.mark.parametrize('method', ['argmax', 'argmin'])
+ def test_ret_is_out(self, ndim, method):
+ a = np.ones((4,) + (256,)*ndim)
+ arg_method = getattr(a, method)
+ out = np.empty((256,)*ndim, dtype=np.intp)
+ ret = arg_method(axis=0, out=out)
+ assert ret is out
+
+ @pytest.mark.parametrize('np_array, method, idx, val',
+ [(np.zeros, 'argmax', 5942, "as"),
+ (np.ones, 'argmin', 6001, "0")])
+ def test_unicode(self, np_array, method, idx, val):
+ d = np_array(6031, dtype='<U9')
+ arg_method = getattr(d, method)
+ d[idx] = val
+ assert_equal(arg_method(), idx)
+
+ @pytest.mark.parametrize('arr_method, np_method',
+ [('argmax', np.argmax),
+ ('argmin', np.argmin)])
+ def test_np_vs_ndarray(self, arr_method, np_method):
+ # make sure both ndarray.argmax/argmin and
+ # numpy.argmax/argmin support out/axis args
+ a = np.random.normal(size=(2, 3))
+ arg_method = getattr(a, arr_method)
+
+ # check positional args
+ out1 = np.zeros(2, dtype=int)
+ out2 = np.zeros(2, dtype=int)
+ assert_equal(arg_method(1, out1), np_method(a, 1, out2))
+ assert_equal(out1, out2)
+
+ # check keyword args
+ out1 = np.zeros(3, dtype=int)
+ out2 = np.zeros(3, dtype=int)
+ assert_equal(arg_method(out=out1, axis=0),
+ np_method(a, out=out2, axis=0))
+ assert_equal(out1, out2)
+
+ @pytest.mark.leaks_references(reason="replaces None with NULL.")
+ @pytest.mark.parametrize('method, vals',
+ [('argmax', (10, 30)),
+ ('argmin', (30, 10))])
+ def test_object_with_NULLs(self, method, vals):
+ # See gh-6032
+ a = np.empty(4, dtype='O')
+ arg_method = getattr(a, method)
+ ctypes.memset(a.ctypes.data, 0, a.nbytes)
+ assert_equal(arg_method(), 0)
+ a[3] = vals[0]
+ assert_equal(arg_method(), 3)
+ a[1] = vals[1]
+ assert_equal(arg_method(), 1)
+
+class TestArgmax:
+ usg_data = [
+ ([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 0),
+ ([3, 3, 3, 3, 2, 2, 2, 2], 0),
+ ([0, 1, 2, 3, 4, 5, 6, 7], 7),
+ ([7, 6, 5, 4, 3, 2, 1, 0], 0)
+ ]
+ sg_data = usg_data + [
+ ([1, 2, 3, 4, -4, -3, -2, -1], 3),
+ ([1, 2, 3, 4, -1, -2, -3, -4], 3)
+ ]
+ darr = [(np.array(d[0], dtype=t), d[1]) for d, t in (
+ itertools.product(usg_data, (
+ np.uint8, np.uint16, np.uint32, np.uint64
+ ))
+ )]
+ darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
+ itertools.product(sg_data, (
+ np.int8, np.int16, np.int32, np.int64, np.float32, np.float64
+ ))
+ )]
+ darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
+ itertools.product((
+ ([0, 1, 2, 3, np.nan], 4),
+ ([0, 1, 2, np.nan, 3], 3),
+ ([np.nan, 0, 1, 2, 3], 0),
+ ([np.nan, 0, np.nan, 2, 3], 0),
+ # To hit the tail of SIMD multi-level(x4, x1) inner loops
+ # on variant SIMD widthes
+ ([1] * (2*5-1) + [np.nan], 2*5-1),
+ ([1] * (4*5-1) + [np.nan], 4*5-1),
+ ([1] * (8*5-1) + [np.nan], 8*5-1),
+ ([1] * (16*5-1) + [np.nan], 16*5-1),
+ ([1] * (32*5-1) + [np.nan], 32*5-1)
+ ), (
+ np.float32, np.float64
+ ))
+ )]
+ nan_arr = darr + [
+ ([0, 1, 2, 3, complex(0, np.nan)], 4),
+ ([0, 1, 2, 3, complex(np.nan, 0)], 4),
+ ([0, 1, 2, complex(np.nan, 0), 3], 3),
+ ([0, 1, 2, complex(0, np.nan), 3], 3),
+ ([complex(0, np.nan), 0, 1, 2, 3], 0),
+ ([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
+ ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
+ ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
+ ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
+
+ ([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
+ ([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
+ ([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
+
+ ([np.datetime64('1923-04-14T12:43:12'),
+ np.datetime64('1994-06-21T14:43:15'),
+ np.datetime64('2001-10-15T04:10:32'),
+ np.datetime64('1995-11-25T16:02:16'),
+ np.datetime64('2005-01-04T03:14:12'),
+ np.datetime64('2041-12-03T14:05:03')], 5),
+ ([np.datetime64('1935-09-14T04:40:11'),
+ np.datetime64('1949-10-12T12:32:11'),
+ np.datetime64('2010-01-03T05:14:12'),
+ np.datetime64('2015-11-20T12:20:59'),
+ np.datetime64('1932-09-23T10:10:13'),
+ np.datetime64('2014-10-10T03:50:30')], 3),
+ # Assorted tests with NaTs
+ ([np.datetime64('NaT'),
+ np.datetime64('NaT'),
+ np.datetime64('2010-01-03T05:14:12'),
+ np.datetime64('NaT'),
+ np.datetime64('2015-09-23T10:10:13'),
+ np.datetime64('1932-10-10T03:50:30')], 0),
+ ([np.datetime64('2059-03-14T12:43:12'),
+ np.datetime64('1996-09-21T14:43:15'),
+ np.datetime64('NaT'),
+ np.datetime64('2022-12-25T16:02:16'),
+ np.datetime64('1963-10-04T03:14:12'),
+ np.datetime64('2013-05-08T18:15:23')], 2),
+ ([np.timedelta64(2, 's'),
+ np.timedelta64(1, 's'),
+ np.timedelta64('NaT', 's'),
+ np.timedelta64(3, 's')], 2),
+ ([np.timedelta64('NaT', 's')] * 3, 0),
+
+ ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
+ timedelta(days=-1, seconds=23)], 0),
+ ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
+ timedelta(days=5, seconds=14)], 1),
+ ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
+ timedelta(days=10, seconds=43)], 2),
+
+ ([False, False, False, False, True], 4),
+ ([False, False, False, True, False], 3),
+ ([True, False, False, False, False], 0),
+ ([True, False, True, False, False], 0),
+ ]
+
+ @pytest.mark.parametrize('data', nan_arr)
+ def test_combinations(self, data):
+ arr, pos = data
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ val = np.max(arr)
+
+ assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
+ assert_equal(arr[np.argmax(arr)], val, err_msg="%r" % arr)
+
+ # add padding to test SIMD loops
+ rarr = np.repeat(arr, 129)
+ rpos = pos * 129
+ assert_equal(np.argmax(rarr), rpos, err_msg="%r" % rarr)
+ assert_equal(rarr[np.argmax(rarr)], val, err_msg="%r" % rarr)
+
+ padd = np.repeat(np.min(arr), 513)
+ rarr = np.concatenate((arr, padd))
+ rpos = pos
+ assert_equal(np.argmax(rarr), rpos, err_msg="%r" % rarr)
+ assert_equal(rarr[np.argmax(rarr)], val, err_msg="%r" % rarr)
+
+
+ def test_maximum_signed_integers(self):
+
+ a = np.array([1, 2**7 - 1, -2**7], dtype=np.int8)
+ assert_equal(np.argmax(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmax(a), 1)
+
+ a = np.array([1, 2**15 - 1, -2**15], dtype=np.int16)
+ assert_equal(np.argmax(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmax(a), 1)
+
+ a = np.array([1, 2**31 - 1, -2**31], dtype=np.int32)
+ assert_equal(np.argmax(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmax(a), 1)
+
+ a = np.array([1, 2**63 - 1, -2**63], dtype=np.int64)
+ assert_equal(np.argmax(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmax(a), 1)
+
+class TestArgmin:
+ usg_data = [
+ ([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 8),
+ ([3, 3, 3, 3, 2, 2, 2, 2], 4),
+ ([0, 1, 2, 3, 4, 5, 6, 7], 0),
+ ([7, 6, 5, 4, 3, 2, 1, 0], 7)
+ ]
+ sg_data = usg_data + [
+ ([1, 2, 3, 4, -4, -3, -2, -1], 4),
+ ([1, 2, 3, 4, -1, -2, -3, -4], 7)
+ ]
+ darr = [(np.array(d[0], dtype=t), d[1]) for d, t in (
+ itertools.product(usg_data, (
+ np.uint8, np.uint16, np.uint32, np.uint64
+ ))
+ )]
+ darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
+ itertools.product(sg_data, (
+ np.int8, np.int16, np.int32, np.int64, np.float32, np.float64
+ ))
+ )]
+ darr = darr + [(np.array(d[0], dtype=t), d[1]) for d, t in (
+ itertools.product((
+ ([0, 1, 2, 3, np.nan], 4),
+ ([0, 1, 2, np.nan, 3], 3),
+ ([np.nan, 0, 1, 2, 3], 0),
+ ([np.nan, 0, np.nan, 2, 3], 0),
+ # To hit the tail of SIMD multi-level(x4, x1) inner loops
+ # on variant SIMD widthes
+ ([1] * (2*5-1) + [np.nan], 2*5-1),
+ ([1] * (4*5-1) + [np.nan], 4*5-1),
+ ([1] * (8*5-1) + [np.nan], 8*5-1),
+ ([1] * (16*5-1) + [np.nan], 16*5-1),
+ ([1] * (32*5-1) + [np.nan], 32*5-1)
+ ), (
+ np.float32, np.float64
+ ))
+ )]
+ nan_arr = darr + [
+ ([0, 1, 2, 3, complex(0, np.nan)], 4),
+ ([0, 1, 2, 3, complex(np.nan, 0)], 4),
+ ([0, 1, 2, complex(np.nan, 0), 3], 3),
+ ([0, 1, 2, complex(0, np.nan), 3], 3),
+ ([complex(0, np.nan), 0, 1, 2, 3], 0),
+ ([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
+ ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
+ ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
+ ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
+
+ ([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
+ ([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
+ ([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
+
+ ([np.datetime64('1923-04-14T12:43:12'),
+ np.datetime64('1994-06-21T14:43:15'),
+ np.datetime64('2001-10-15T04:10:32'),
+ np.datetime64('1995-11-25T16:02:16'),
+ np.datetime64('2005-01-04T03:14:12'),
+ np.datetime64('2041-12-03T14:05:03')], 0),
+ ([np.datetime64('1935-09-14T04:40:11'),
+ np.datetime64('1949-10-12T12:32:11'),
+ np.datetime64('2010-01-03T05:14:12'),
+ np.datetime64('2014-11-20T12:20:59'),
+ np.datetime64('2015-09-23T10:10:13'),
+ np.datetime64('1932-10-10T03:50:30')], 5),
+ # Assorted tests with NaTs
+ ([np.datetime64('NaT'),
+ np.datetime64('NaT'),
+ np.datetime64('2010-01-03T05:14:12'),
+ np.datetime64('NaT'),
+ np.datetime64('2015-09-23T10:10:13'),
+ np.datetime64('1932-10-10T03:50:30')], 0),
+ ([np.datetime64('2059-03-14T12:43:12'),
+ np.datetime64('1996-09-21T14:43:15'),
+ np.datetime64('NaT'),
+ np.datetime64('2022-12-25T16:02:16'),
+ np.datetime64('1963-10-04T03:14:12'),
+ np.datetime64('2013-05-08T18:15:23')], 2),
+ ([np.timedelta64(2, 's'),
+ np.timedelta64(1, 's'),
+ np.timedelta64('NaT', 's'),
+ np.timedelta64(3, 's')], 2),
+ ([np.timedelta64('NaT', 's')] * 3, 0),
+
+ ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
+ timedelta(days=-1, seconds=23)], 2),
+ ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
+ timedelta(days=5, seconds=14)], 0),
+ ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
+ timedelta(days=10, seconds=43)], 1),
+
+ ([True, True, True, True, False], 4),
+ ([True, True, True, False, True], 3),
+ ([False, True, True, True, True], 0),
+ ([False, True, False, True, True], 0),
+ ]
+
+ @pytest.mark.parametrize('data', nan_arr)
+ def test_combinations(self, data):
+ arr, pos = data
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ min_val = np.min(arr)
+
+ assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
+ assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
+
+ # add padding to test SIMD loops
+ rarr = np.repeat(arr, 129)
+ rpos = pos * 129
+ assert_equal(np.argmin(rarr), rpos, err_msg="%r" % rarr)
+ assert_equal(rarr[np.argmin(rarr)], min_val, err_msg="%r" % rarr)
+
+ padd = np.repeat(np.max(arr), 513)
+ rarr = np.concatenate((arr, padd))
+ rpos = pos
+ assert_equal(np.argmin(rarr), rpos, err_msg="%r" % rarr)
+ assert_equal(rarr[np.argmin(rarr)], min_val, err_msg="%r" % rarr)
+
+ def test_minimum_signed_integers(self):
+
+ a = np.array([1, -2**7, -2**7 + 1, 2**7 - 1], dtype=np.int8)
+ assert_equal(np.argmin(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmin(a), 1)
+
+ a = np.array([1, -2**15, -2**15 + 1, 2**15 - 1], dtype=np.int16)
+ assert_equal(np.argmin(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmin(a), 1)
+
+ a = np.array([1, -2**31, -2**31 + 1, 2**31 - 1], dtype=np.int32)
+ assert_equal(np.argmin(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmin(a), 1)
+
+ a = np.array([1, -2**63, -2**63 + 1, 2**63 - 1], dtype=np.int64)
+ assert_equal(np.argmin(a), 1)
+ a.repeat(129)
+ assert_equal(np.argmin(a), 1)
+
+class TestMinMax:
+
+ def test_scalar(self):
+ assert_raises(np.AxisError, np.amax, 1, 1)
+ assert_raises(np.AxisError, np.amin, 1, 1)
+
+ assert_equal(np.amax(1, axis=0), 1)
+ assert_equal(np.amin(1, axis=0), 1)
+ assert_equal(np.amax(1, axis=None), 1)
+ assert_equal(np.amin(1, axis=None), 1)
+
+ def test_axis(self):
+ assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000)
+ assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
+
+ def test_datetime(self):
+ # Do not ignore NaT
+ for dtype in ('m8[s]', 'm8[Y]'):
+ a = np.arange(10).astype(dtype)
+ assert_equal(np.amin(a), a[0])
+ assert_equal(np.amax(a), a[9])
+ a[3] = 'NaT'
+ assert_equal(np.amin(a), a[3])
+ assert_equal(np.amax(a), a[3])
+
+
+class TestNewaxis:
+ def test_basic(self):
+ sk = np.array([0, -0.1, 0.1])
+ res = 250*sk[:, np.newaxis]
+ assert_almost_equal(res.ravel(), 250*sk)
+
+
+class TestClip:
+ def _check_range(self, x, cmin, cmax):
+ assert_(np.all(x >= cmin))
+ assert_(np.all(x <= cmax))
+
+ def _clip_type(self, type_group, array_max,
+ clip_min, clip_max, inplace=False,
+ expected_min=None, expected_max=None):
+ if expected_min is None:
+ expected_min = clip_min
+ if expected_max is None:
+ expected_max = clip_max
+
+ for T in np.sctypes[type_group]:
+ if sys.byteorder == 'little':
+ byte_orders = ['=', '>']
+ else:
+ byte_orders = ['<', '=']
+
+ for byteorder in byte_orders:
+ dtype = np.dtype(T).newbyteorder(byteorder)
+
+ x = (np.random.random(1000) * array_max).astype(dtype)
+ if inplace:
+ # The tests that call us pass clip_min and clip_max that
+ # might not fit in the destination dtype. They were written
+ # assuming the previous unsafe casting, which now must be
+ # passed explicitly to avoid a warning.
+ x.clip(clip_min, clip_max, x, casting='unsafe')
+ else:
+ x = x.clip(clip_min, clip_max)
+ byteorder = '='
+
+ if x.dtype.byteorder == '|':
+ byteorder = '|'
+ assert_equal(x.dtype.byteorder, byteorder)
+ self._check_range(x, expected_min, expected_max)
+ return x
+
+ def test_basic(self):
+ for inplace in [False, True]:
+ self._clip_type(
+ 'float', 1024, -12.8, 100.2, inplace=inplace)
+ self._clip_type(
+ 'float', 1024, 0, 0, inplace=inplace)
+
+ self._clip_type(
+ 'int', 1024, -120, 100, inplace=inplace)
+ self._clip_type(
+ 'int', 1024, 0, 0, inplace=inplace)
+
+ self._clip_type(
+ 'uint', 1024, 0, 0, inplace=inplace)
+ self._clip_type(
+ 'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
+
+ def test_record_array(self):
+ rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
+ dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
+ y = rec['x'].clip(-0.3, 0.5)
+ self._check_range(y, -0.3, 0.5)
+
+ def test_max_or_min(self):
+ val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
+ x = val.clip(3)
+ assert_(np.all(x >= 3))
+ x = val.clip(min=3)
+ assert_(np.all(x >= 3))
+ x = val.clip(max=4)
+ assert_(np.all(x <= 4))
+
+ def test_nan(self):
+ input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
+ result = input_arr.clip(-1, 1)
+ expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
+ assert_array_equal(result, expected)
+
+
+class TestCompress:
+ def test_axis(self):
+ tgt = [[5, 6, 7, 8, 9]]
+ arr = np.arange(10).reshape(2, 5)
+ out = np.compress([0, 1], arr, axis=0)
+ assert_equal(out, tgt)
+
+ tgt = [[1, 3], [6, 8]]
+ out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
+ assert_equal(out, tgt)
+
+ def test_truncate(self):
+ tgt = [[1], [6]]
+ arr = np.arange(10).reshape(2, 5)
+ out = np.compress([0, 1], arr, axis=1)
+ assert_equal(out, tgt)
+
+ def test_flatten(self):
+ arr = np.arange(10).reshape(2, 5)
+ out = np.compress([0, 1], arr)
+ assert_equal(out, 1)
+
+
+class TestPutmask:
+ def tst_basic(self, x, T, mask, val):
+ np.putmask(x, mask, val)
+ assert_equal(x[mask], np.array(val, T))
+
+ def test_ip_types(self):
+ unchecked_types = [bytes, str, np.void]
+
+ x = np.random.random(1000)*100
+ mask = x < 40
+
+ for val in [-100, 0, 15]:
+ for types in np.sctypes.values():
+ for T in types:
+ if T not in unchecked_types:
+ if val < 0 and np.dtype(T).kind == "u":
+ val = np.iinfo(T).max - 99
+ self.tst_basic(x.copy().astype(T), T, mask, val)
+
+ # Also test string of a length which uses an untypical length
+ dt = np.dtype("S3")
+ self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3])
+
+ def test_mask_size(self):
+ assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
+
+ @pytest.mark.parametrize('dtype', ('>i4', '<i4'))
+ def test_byteorder(self, dtype):
+ x = np.array([1, 2, 3], dtype)
+ np.putmask(x, [True, False, True], -1)
+ assert_array_equal(x, [-1, 2, -1])
+
+ def test_record_array(self):
+ # Note mixed byteorder.
+ rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
+ dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
+ np.putmask(rec['x'], [True, False], 10)
+ assert_array_equal(rec['x'], [10, 5])
+ assert_array_equal(rec['y'], [2, 4])
+ assert_array_equal(rec['z'], [3, 3])
+ np.putmask(rec['y'], [True, False], 11)
+ assert_array_equal(rec['x'], [10, 5])
+ assert_array_equal(rec['y'], [11, 4])
+ assert_array_equal(rec['z'], [3, 3])
+
+ def test_overlaps(self):
+ # gh-6272 check overlap
+ x = np.array([True, False, True, False])
+ np.putmask(x[1:4], [True, True, True], x[:3])
+ assert_equal(x, np.array([True, True, False, True]))
+
+ x = np.array([True, False, True, False])
+ np.putmask(x[1:4], x[:3], [True, False, True])
+ assert_equal(x, np.array([True, True, True, True]))
+
+ def test_writeable(self):
+ a = np.arange(5)
+ a.flags.writeable = False
+
+ with pytest.raises(ValueError):
+ np.putmask(a, a >= 2, 3)
+
+
+class TestTake:
+ def tst_basic(self, x):
+ ind = list(range(x.shape[0]))
+ assert_array_equal(x.take(ind, axis=0), x)
+
+ def test_ip_types(self):
+ unchecked_types = [bytes, str, np.void]
+
+ x = np.random.random(24)*100
+ x.shape = 2, 3, 4
+ for types in np.sctypes.values():
+ for T in types:
+ if T not in unchecked_types:
+ self.tst_basic(x.copy().astype(T))
+
+ # Also test string of a length which uses an untypical length
+ self.tst_basic(x.astype("S3"))
+
+ def test_raise(self):
+ x = np.random.random(24)*100
+ x.shape = 2, 3, 4
+ assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
+ assert_raises(IndexError, x.take, [-3], axis=0)
+ assert_array_equal(x.take([-1], axis=0)[0], x[1])
+
+ def test_clip(self):
+ x = np.random.random(24)*100
+ x.shape = 2, 3, 4
+ assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
+ assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
+
+ def test_wrap(self):
+ x = np.random.random(24)*100
+ x.shape = 2, 3, 4
+ assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
+ assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
+ assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
+
+ @pytest.mark.parametrize('dtype', ('>i4', '<i4'))
+ def test_byteorder(self, dtype):
+ x = np.array([1, 2, 3], dtype)
+ assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
+
+ def test_record_array(self):
+ # Note mixed byteorder.
+ rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
+ dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
+ rec1 = rec.take([1])
+ assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
+
+ def test_out_overlap(self):
+ # gh-6272 check overlap on out
+ x = np.arange(5)
+ y = np.take(x, [1, 2, 3], out=x[2:5], mode='wrap')
+ assert_equal(y, np.array([1, 2, 3]))
+
+ @pytest.mark.parametrize('shape', [(1, 2), (1,), ()])
+ def test_ret_is_out(self, shape):
+ # 0d arrays should not be an exception to this rule
+ x = np.arange(5)
+ inds = np.zeros(shape, dtype=np.intp)
+ out = np.zeros(shape, dtype=x.dtype)
+ ret = np.take(x, inds, out=out)
+ assert ret is out
+
+
+class TestLexsort:
+ @pytest.mark.parametrize('dtype',[
+ np.uint8, np.uint16, np.uint32, np.uint64,
+ np.int8, np.int16, np.int32, np.int64,
+ np.float16, np.float32, np.float64
+ ])
+ def test_basic(self, dtype):
+ a = np.array([1, 2, 1, 3, 1, 5], dtype=dtype)
+ b = np.array([0, 4, 5, 6, 2, 3], dtype=dtype)
+ idx = np.lexsort((b, a))
+ expected_idx = np.array([0, 4, 2, 1, 3, 5])
+ assert_array_equal(idx, expected_idx)
+ assert_array_equal(a[idx], np.sort(a))
+
+ def test_mixed(self):
+ a = np.array([1, 2, 1, 3, 1, 5])
+ b = np.array([0, 4, 5, 6, 2, 3], dtype='datetime64[D]')
+
+ idx = np.lexsort((b, a))
+ expected_idx = np.array([0, 4, 2, 1, 3, 5])
+ assert_array_equal(idx, expected_idx)
+
+ def test_datetime(self):
+ a = np.array([0,0,0], dtype='datetime64[D]')
+ b = np.array([2,1,0], dtype='datetime64[D]')
+ idx = np.lexsort((b, a))
+ expected_idx = np.array([2, 1, 0])
+ assert_array_equal(idx, expected_idx)
+
+ a = np.array([0,0,0], dtype='timedelta64[D]')
+ b = np.array([2,1,0], dtype='timedelta64[D]')
+ idx = np.lexsort((b, a))
+ expected_idx = np.array([2, 1, 0])
+ assert_array_equal(idx, expected_idx)
+
+ def test_object(self): # gh-6312
+ a = np.random.choice(10, 1000)
+ b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
+
+ for u in a, b:
+ left = np.lexsort((u.astype('O'),))
+ right = np.argsort(u, kind='mergesort')
+ assert_array_equal(left, right)
+
+ for u, v in (a, b), (b, a):
+ idx = np.lexsort((u, v))
+ assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
+ assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
+ u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
+ assert_array_equal(idx, np.lexsort((u, v)))
+
+ def test_invalid_axis(self): # gh-7528
+ x = np.linspace(0., 1., 42*3).reshape(42, 3)
+ assert_raises(np.AxisError, np.lexsort, x, axis=2)
+
+class TestIO:
+ """Test tofile, fromfile, tobytes, and fromstring"""
+
+ @pytest.fixture()
+ def x(self):
+ shape = (2, 4, 3)
+ rand = np.random.random
+ x = rand(shape) + rand(shape).astype(complex) * 1j
+ x[0, :, 1] = [np.nan, np.inf, -np.inf, np.nan]
+ return x
+
+ @pytest.fixture(params=["string", "path_obj"])
+ def tmp_filename(self, tmp_path, request):
+ # This fixture covers two cases:
+ # one where the filename is a string and
+ # another where it is a pathlib object
+ filename = tmp_path / "file"
+ if request.param == "string":
+ filename = str(filename)
+ yield filename
+
+ def test_nofile(self):
+ # this should probably be supported as a file
+ # but for now test for proper errors
+ b = io.BytesIO()
+ assert_raises(OSError, np.fromfile, b, np.uint8, 80)
+ d = np.ones(7)
+ assert_raises(OSError, lambda x: x.tofile(b), d)
+
+ def test_bool_fromstring(self):
+ v = np.array([True, False, True, False], dtype=np.bool_)
+ y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
+ assert_array_equal(v, y)
+
+ def test_uint64_fromstring(self):
+ d = np.fromstring("9923372036854775807 104783749223640",
+ dtype=np.uint64, sep=' ')
+ e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
+ assert_array_equal(d, e)
+
+ def test_int64_fromstring(self):
+ d = np.fromstring("-25041670086757 104783749223640",
+ dtype=np.int64, sep=' ')
+ e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
+ assert_array_equal(d, e)
+
+ def test_fromstring_count0(self):
+ d = np.fromstring("1,2", sep=",", dtype=np.int64, count=0)
+ assert d.shape == (0,)
+
+ def test_empty_files_text(self, tmp_filename):
+ with open(tmp_filename, 'w') as f:
+ pass
+ y = np.fromfile(tmp_filename)
+ assert_(y.size == 0, "Array not empty")
+
+ def test_empty_files_binary(self, tmp_filename):
+ with open(tmp_filename, 'wb') as f:
+ pass
+ y = np.fromfile(tmp_filename, sep=" ")
+ assert_(y.size == 0, "Array not empty")
+
+ def test_roundtrip_file(self, x, tmp_filename):
+ with open(tmp_filename, 'wb') as f:
+ x.tofile(f)
+ # NB. doesn't work with flush+seek, due to use of C stdio
+ with open(tmp_filename, 'rb') as f:
+ y = np.fromfile(f, dtype=x.dtype)
+ assert_array_equal(y, x.flat)
+
+ def test_roundtrip(self, x, tmp_filename):
+ x.tofile(tmp_filename)
+ y = np.fromfile(tmp_filename, dtype=x.dtype)
+ assert_array_equal(y, x.flat)
+
+ def test_roundtrip_dump_pathlib(self, x, tmp_filename):
+ p = pathlib.Path(tmp_filename)
+ x.dump(p)
+ y = np.load(p, allow_pickle=True)
+ assert_array_equal(y, x)
+
+ def test_roundtrip_binary_str(self, x):
+ s = x.tobytes()
+ y = np.frombuffer(s, dtype=x.dtype)
+ assert_array_equal(y, x.flat)
+
+ s = x.tobytes('F')
+ y = np.frombuffer(s, dtype=x.dtype)
+ assert_array_equal(y, x.flatten('F'))
+
+ def test_roundtrip_str(self, x):
+ x = x.real.ravel()
+ s = "@".join(map(str, x))
+ y = np.fromstring(s, sep="@")
+ # NB. str imbues less precision
+ nan_mask = ~np.isfinite(x)
+ assert_array_equal(x[nan_mask], y[nan_mask])
+ assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
+
+ def test_roundtrip_repr(self, x):
+ x = x.real.ravel()
+ s = "@".join(map(repr, x))
+ y = np.fromstring(s, sep="@")
+ assert_array_equal(x, y)
+
+ def test_unseekable_fromfile(self, x, tmp_filename):
+ # gh-6246
+ x.tofile(tmp_filename)
+
+ def fail(*args, **kwargs):
+ raise OSError('Can not tell or seek')
+
+ with io.open(tmp_filename, 'rb', buffering=0) as f:
+ f.seek = fail
+ f.tell = fail
+ assert_raises(OSError, np.fromfile, f, dtype=x.dtype)
+
+ def test_io_open_unbuffered_fromfile(self, x, tmp_filename):
+ # gh-6632
+ x.tofile(tmp_filename)
+ with io.open(tmp_filename, 'rb', buffering=0) as f:
+ y = np.fromfile(f, dtype=x.dtype)
+ assert_array_equal(y, x.flat)
+
+ def test_largish_file(self, tmp_filename):
+ # check the fallocate path on files > 16MB
+ d = np.zeros(4 * 1024 ** 2)
+ d.tofile(tmp_filename)
+ assert_equal(os.path.getsize(tmp_filename), d.nbytes)
+ assert_array_equal(d, np.fromfile(tmp_filename))
+ # check offset
+ with open(tmp_filename, "r+b") as f:
+ f.seek(d.nbytes)
+ d.tofile(f)
+ assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
+ # check append mode (gh-8329)
+ open(tmp_filename, "w").close() # delete file contents
+ with open(tmp_filename, "ab") as f:
+ d.tofile(f)
+ assert_array_equal(d, np.fromfile(tmp_filename))
+ with open(tmp_filename, "ab") as f:
+ d.tofile(f)
+ assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
+
+ def test_io_open_buffered_fromfile(self, x, tmp_filename):
+ # gh-6632
+ x.tofile(tmp_filename)
+ with io.open(tmp_filename, 'rb', buffering=-1) as f:
+ y = np.fromfile(f, dtype=x.dtype)
+ assert_array_equal(y, x.flat)
+
+ def test_file_position_after_fromfile(self, tmp_filename):
+ # gh-4118
+ sizes = [io.DEFAULT_BUFFER_SIZE//8,
+ io.DEFAULT_BUFFER_SIZE,
+ io.DEFAULT_BUFFER_SIZE*8]
+
+ for size in sizes:
+ with open(tmp_filename, 'wb') as f:
+ f.seek(size-1)
+ f.write(b'\0')
+
+ for mode in ['rb', 'r+b']:
+ err_msg = "%d %s" % (size, mode)
+
+ with open(tmp_filename, mode) as f:
+ f.read(2)
+ np.fromfile(f, dtype=np.float64, count=1)
+ pos = f.tell()
+ assert_equal(pos, 10, err_msg=err_msg)
+
+ def test_file_position_after_tofile(self, tmp_filename):
+ # gh-4118
+ sizes = [io.DEFAULT_BUFFER_SIZE//8,
+ io.DEFAULT_BUFFER_SIZE,
+ io.DEFAULT_BUFFER_SIZE*8]
+
+ for size in sizes:
+ err_msg = "%d" % (size,)
+
+ with open(tmp_filename, 'wb') as f:
+ f.seek(size-1)
+ f.write(b'\0')
+ f.seek(10)
+ f.write(b'12')
+ np.array([0], dtype=np.float64).tofile(f)
+ pos = f.tell()
+ assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
+
+ with open(tmp_filename, 'r+b') as f:
+ f.read(2)
+ f.seek(0, 1) # seek between read&write required by ANSI C
+ np.array([0], dtype=np.float64).tofile(f)
+ pos = f.tell()
+ assert_equal(pos, 10, err_msg=err_msg)
+
+ def test_load_object_array_fromfile(self, tmp_filename):
+ # gh-12300
+ with open(tmp_filename, 'w') as f:
+ # Ensure we have a file with consistent contents
+ pass
+
+ with open(tmp_filename, 'rb') as f:
+ assert_raises_regex(ValueError, "Cannot read into object array",
+ np.fromfile, f, dtype=object)
+
+ assert_raises_regex(ValueError, "Cannot read into object array",
+ np.fromfile, tmp_filename, dtype=object)
+
+ def test_fromfile_offset(self, x, tmp_filename):
+ with open(tmp_filename, 'wb') as f:
+ x.tofile(f)
+
+ with open(tmp_filename, 'rb') as f:
+ y = np.fromfile(f, dtype=x.dtype, offset=0)
+ assert_array_equal(y, x.flat)
+
+ with open(tmp_filename, 'rb') as f:
+ count_items = len(x.flat) // 8
+ offset_items = len(x.flat) // 4
+ offset_bytes = x.dtype.itemsize * offset_items
+ y = np.fromfile(
+ f, dtype=x.dtype, count=count_items, offset=offset_bytes
+ )
+ assert_array_equal(
+ y, x.flat[offset_items:offset_items+count_items]
+ )
+
+ # subsequent seeks should stack
+ offset_bytes = x.dtype.itemsize
+ z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes)
+ assert_array_equal(z, x.flat[offset_items+count_items+1:])
+
+ with open(tmp_filename, 'wb') as f:
+ x.tofile(f, sep=",")
+
+ with open(tmp_filename, 'rb') as f:
+ assert_raises_regex(
+ TypeError,
+ "'offset' argument only permitted for binary files",
+ np.fromfile, tmp_filename, dtype=x.dtype,
+ sep=",", offset=1)
+
+ @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t")
+ def test_fromfile_bad_dup(self, x, tmp_filename):
+ def dup_str(fd):
+ return 'abc'
+
+ def dup_bigint(fd):
+ return 2**68
+
+ old_dup = os.dup
+ try:
+ with open(tmp_filename, 'wb') as f:
+ x.tofile(f)
+ for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)):
+ os.dup = dup
+ assert_raises(exc, np.fromfile, f)
+ finally:
+ os.dup = old_dup
+
+ def _check_from(self, s, value, filename, **kw):
+ if 'sep' not in kw:
+ y = np.frombuffer(s, **kw)
+ else:
+ y = np.fromstring(s, **kw)
+ assert_array_equal(y, value)
+
+ with open(filename, 'wb') as f:
+ f.write(s)
+ y = np.fromfile(filename, **kw)
+ assert_array_equal(y, value)
+
+ @pytest.fixture(params=["period", "comma"])
+ def decimal_sep_localization(self, request):
+ """
+ Including this fixture in a test will automatically
+ execute it with both types of decimal separator.
+
+ So::
+
+ def test_decimal(decimal_sep_localization):
+ pass
+
+ is equivalent to the following two tests::
+
+ def test_decimal_period_separator():
+ pass
+
+ def test_decimal_comma_separator():
+ with CommaDecimalPointLocale():
+ pass
+ """
+ if request.param == "period":
+ yield
+ elif request.param == "comma":
+ with CommaDecimalPointLocale():
+ yield
+ else:
+ assert False, request.param
+
+ def test_nan(self, tmp_filename, decimal_sep_localization):
+ self._check_from(
+ b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
+ tmp_filename,
+ sep=' ')
+
+ def test_inf(self, tmp_filename, decimal_sep_localization):
+ self._check_from(
+ b"inf +inf -inf infinity -Infinity iNfInItY -inF",
+ [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
+ tmp_filename,
+ sep=' ')
+
+ def test_numbers(self, tmp_filename, decimal_sep_localization):
+ self._check_from(
+ b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
+ [1.234, -1.234, .3, .3e55, -123133.1231e+133],
+ tmp_filename,
+ sep=' ')
+
+ def test_binary(self, tmp_filename):
+ self._check_from(
+ b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
+ np.array([1, 2, 3, 4]),
+ tmp_filename,
+ dtype='<f4')
+
+ @pytest.mark.slow # takes > 1 minute on mechanical hard drive
+ def test_big_binary(self):
+ """Test workarounds for 32-bit limit for MSVC fwrite, fseek, and ftell
+
+ These normally would hang doing something like this.
+ See : https://github.com/numpy/numpy/issues/2256
+ """
+ if sys.platform != 'win32' or '[GCC ' in sys.version:
+ return
+ try:
+ # before workarounds, only up to 2**32-1 worked
+ fourgbplus = 2**32 + 2**16
+ testbytes = np.arange(8, dtype=np.int8)
+ n = len(testbytes)
+ flike = tempfile.NamedTemporaryFile()
+ f = flike.file
+ np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
+ flike.seek(0)
+ a = np.fromfile(f, dtype=np.int8)
+ flike.close()
+ assert_(len(a) == fourgbplus)
+ # check only start and end for speed:
+ assert_((a[:n] == testbytes).all())
+ assert_((a[-n:] == testbytes).all())
+ except (MemoryError, ValueError):
+ pass
+
+ def test_string(self, tmp_filename):
+ self._check_from(b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, sep=',')
+
+ def test_counted_string(self, tmp_filename, decimal_sep_localization):
+ self._check_from(
+ b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=4, sep=',')
+ self._check_from(
+ b'1,2,3,4', [1., 2., 3.], tmp_filename, count=3, sep=',')
+ self._check_from(
+ b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=-1, sep=',')
+
+ def test_string_with_ws(self, tmp_filename):
+ self._check_from(
+ b'1 2 3 4 ', [1, 2, 3, 4], tmp_filename, dtype=int, sep=' ')
+
+ def test_counted_string_with_ws(self, tmp_filename):
+ self._check_from(
+ b'1 2 3 4 ', [1, 2, 3], tmp_filename, count=3, dtype=int,
+ sep=' ')
+
+ def test_ascii(self, tmp_filename, decimal_sep_localization):
+ self._check_from(
+ b'1 , 2 , 3 , 4', [1., 2., 3., 4.], tmp_filename, sep=',')
+ self._check_from(
+ b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, dtype=float, sep=',')
+
+ def test_malformed(self, tmp_filename, decimal_sep_localization):
+ with assert_warns(DeprecationWarning):
+ self._check_from(
+ b'1.234 1,234', [1.234, 1.], tmp_filename, sep=' ')
+
+ def test_long_sep(self, tmp_filename):
+ self._check_from(
+ b'1_x_3_x_4_x_5', [1, 3, 4, 5], tmp_filename, sep='_x_')
+
+ def test_dtype(self, tmp_filename):
+ v = np.array([1, 2, 3, 4], dtype=np.int_)
+ self._check_from(b'1,2,3,4', v, tmp_filename, sep=',', dtype=np.int_)
+
+ def test_dtype_bool(self, tmp_filename):
+ # can't use _check_from because fromstring can't handle True/False
+ v = np.array([True, False, True, False], dtype=np.bool_)
+ s = b'1,0,-2.3,0'
+ with open(tmp_filename, 'wb') as f:
+ f.write(s)
+ y = np.fromfile(tmp_filename, sep=',', dtype=np.bool_)
+ assert_(y.dtype == '?')
+ assert_array_equal(y, v)
+
+ def test_tofile_sep(self, tmp_filename, decimal_sep_localization):
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
+ with open(tmp_filename, 'w') as f:
+ x.tofile(f, sep=',')
+ with open(tmp_filename, 'r') as f:
+ s = f.read()
+ #assert_equal(s, '1.51,2.0,3.51,4.0')
+ y = np.array([float(p) for p in s.split(',')])
+ assert_array_equal(x,y)
+
+ def test_tofile_format(self, tmp_filename, decimal_sep_localization):
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
+ with open(tmp_filename, 'w') as f:
+ x.tofile(f, sep=',', format='%.2f')
+ with open(tmp_filename, 'r') as f:
+ s = f.read()
+ assert_equal(s, '1.51,2.00,3.51,4.00')
+
+ def test_tofile_cleanup(self, tmp_filename):
+ x = np.zeros((10), dtype=object)
+ with open(tmp_filename, 'wb') as f:
+ assert_raises(OSError, lambda: x.tofile(f, sep=''))
+ # Dup-ed file handle should be closed or remove will fail on Windows OS
+ os.remove(tmp_filename)
+
+ # Also make sure that we close the Python handle
+ assert_raises(OSError, lambda: x.tofile(tmp_filename))
+ os.remove(tmp_filename)
+
+ def test_fromfile_subarray_binary(self, tmp_filename):
+ # Test subarray dtypes which are absorbed into the shape
+ x = np.arange(24, dtype="i4").reshape(2, 3, 4)
+ x.tofile(tmp_filename)
+ res = np.fromfile(tmp_filename, dtype="(3,4)i4")
+ assert_array_equal(x, res)
+
+ x_str = x.tobytes()
+ with assert_warns(DeprecationWarning):
+ # binary fromstring is deprecated
+ res = np.fromstring(x_str, dtype="(3,4)i4")
+ assert_array_equal(x, res)
+
+ def test_parsing_subarray_unsupported(self, tmp_filename):
+ # We currently do not support parsing subarray dtypes
+ data = "12,42,13," * 50
+ with pytest.raises(ValueError):
+ expected = np.fromstring(data, dtype="(3,)i", sep=",")
+
+ with open(tmp_filename, "w") as f:
+ f.write(data)
+
+ with pytest.raises(ValueError):
+ np.fromfile(tmp_filename, dtype="(3,)i", sep=",")
+
+ def test_read_shorter_than_count_subarray(self, tmp_filename):
+ # Test that requesting more values does not cause any problems
+ # in conjunction with subarray dimensions being absorbed into the
+ # array dimension.
+ expected = np.arange(511 * 10, dtype="i").reshape(-1, 10)
+
+ binary = expected.tobytes()
+ with pytest.raises(ValueError):
+ with pytest.warns(DeprecationWarning):
+ np.fromstring(binary, dtype="(10,)i", count=10000)
+
+ expected.tofile(tmp_filename)
+ res = np.fromfile(tmp_filename, dtype="(10,)i", count=10000)
+ assert_array_equal(res, expected)
+
+
+class TestFromBuffer:
+ @pytest.mark.parametrize('byteorder', ['<', '>'])
+ @pytest.mark.parametrize('dtype', [float, int, complex])
+ def test_basic(self, byteorder, dtype):
+ dt = np.dtype(dtype).newbyteorder(byteorder)
+ x = (np.random.random((4, 7)) * 5).astype(dt)
+ buf = x.tobytes()
+ assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
+
+ @pytest.mark.parametrize("obj", [np.arange(10), b"12345678"])
+ def test_array_base(self, obj):
+ # Objects (including NumPy arrays), which do not use the
+ # `release_buffer` slot should be directly used as a base object.
+ # See also gh-21612
+ new = np.frombuffer(obj)
+ assert new.base is obj
+
+ def test_empty(self):
+ assert_array_equal(np.frombuffer(b''), np.array([]))
+
+ @pytest.mark.skipif(IS_PYPY,
+ reason="PyPy's memoryview currently does not track exports. See: "
+ "https://foss.heptapod.net/pypy/pypy/-/issues/3724")
+ def test_mmap_close(self):
+ # The old buffer protocol was not safe for some things that the new
+ # one is. But `frombuffer` always used the old one for a long time.
+ # Checks that it is safe with the new one (using memoryviews)
+ with tempfile.TemporaryFile(mode='wb') as tmp:
+ tmp.write(b"asdf")
+ tmp.flush()
+ mm = mmap.mmap(tmp.fileno(), 0)
+ arr = np.frombuffer(mm, dtype=np.uint8)
+ with pytest.raises(BufferError):
+ mm.close() # cannot close while array uses the buffer
+ del arr
+ mm.close()
+
+class TestFlat:
+ def setup_method(self):
+ a0 = np.arange(20.0)
+ a = a0.reshape(4, 5)
+ a0.shape = (4, 5)
+ a.flags.writeable = False
+ self.a = a
+ self.b = a[::2, ::2]
+ self.a0 = a0
+ self.b0 = a0[::2, ::2]
+
+ def test_contiguous(self):
+ testpassed = False
+ try:
+ self.a.flat[12] = 100.0
+ except ValueError:
+ testpassed = True
+ assert_(testpassed)
+ assert_(self.a.flat[12] == 12.0)
+
+ def test_discontiguous(self):
+ testpassed = False
+ try:
+ self.b.flat[4] = 100.0
+ except ValueError:
+ testpassed = True
+ assert_(testpassed)
+ assert_(self.b.flat[4] == 12.0)
+
+ def test___array__(self):
+ c = self.a.flat.__array__()
+ d = self.b.flat.__array__()
+ e = self.a0.flat.__array__()
+ f = self.b0.flat.__array__()
+
+ assert_(c.flags.writeable is False)
+ assert_(d.flags.writeable is False)
+ assert_(e.flags.writeable is True)
+ assert_(f.flags.writeable is False)
+ assert_(c.flags.writebackifcopy is False)
+ assert_(d.flags.writebackifcopy is False)
+ assert_(e.flags.writebackifcopy is False)
+ assert_(f.flags.writebackifcopy is False)
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_refcount(self):
+ # includes regression test for reference count error gh-13165
+ inds = [np.intp(0), np.array([True]*self.a.size), np.array([0]), None]
+ indtype = np.dtype(np.intp)
+ rc_indtype = sys.getrefcount(indtype)
+ for ind in inds:
+ rc_ind = sys.getrefcount(ind)
+ for _ in range(100):
+ try:
+ self.a.flat[ind]
+ except IndexError:
+ pass
+ assert_(abs(sys.getrefcount(ind) - rc_ind) < 50)
+ assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50)
+
+ def test_index_getset(self):
+ it = np.arange(10).reshape(2, 1, 5).flat
+ with pytest.raises(AttributeError):
+ it.index = 10
+
+ for _ in it:
+ pass
+ # Check the value of `.index` is updated correctly (see also gh-19153)
+ # If the type was incorrect, this would show up on big-endian machines
+ assert it.index == it.base.size
+
+
+class TestResize:
+
+ @_no_tracing
+ def test_basic(self):
+ x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ if IS_PYPY:
+ x.resize((5, 5), refcheck=False)
+ else:
+ x.resize((5, 5))
+ assert_array_equal(x.flat[:9],
+ np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
+ assert_array_equal(x[9:].flat, 0)
+
+ def test_check_reference(self):
+ x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ y = x
+ assert_raises(ValueError, x.resize, (5, 1))
+ del y # avoid pyflakes unused variable warning.
+
+ @_no_tracing
+ def test_int_shape(self):
+ x = np.eye(3)
+ if IS_PYPY:
+ x.resize(3, refcheck=False)
+ else:
+ x.resize(3)
+ assert_array_equal(x, np.eye(3)[0,:])
+
+ def test_none_shape(self):
+ x = np.eye(3)
+ x.resize(None)
+ assert_array_equal(x, np.eye(3))
+ x.resize()
+ assert_array_equal(x, np.eye(3))
+
+ def test_0d_shape(self):
+ # to it multiple times to test it does not break alloc cache gh-9216
+ for i in range(10):
+ x = np.empty((1,))
+ x.resize(())
+ assert_equal(x.shape, ())
+ assert_equal(x.size, 1)
+ x = np.empty(())
+ x.resize((1,))
+ assert_equal(x.shape, (1,))
+ assert_equal(x.size, 1)
+
+ def test_invalid_arguments(self):
+ assert_raises(TypeError, np.eye(3).resize, 'hi')
+ assert_raises(ValueError, np.eye(3).resize, -1)
+ assert_raises(TypeError, np.eye(3).resize, order=1)
+ assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
+
+ @_no_tracing
+ def test_freeform_shape(self):
+ x = np.eye(3)
+ if IS_PYPY:
+ x.resize(3, 2, 1, refcheck=False)
+ else:
+ x.resize(3, 2, 1)
+ assert_(x.shape == (3, 2, 1))
+
+ @_no_tracing
+ def test_zeros_appended(self):
+ x = np.eye(3)
+ if IS_PYPY:
+ x.resize(2, 3, 3, refcheck=False)
+ else:
+ x.resize(2, 3, 3)
+ assert_array_equal(x[0], np.eye(3))
+ assert_array_equal(x[1], np.zeros((3, 3)))
+
+ @_no_tracing
+ def test_obj_obj(self):
+ # check memory is initialized on resize, gh-4857
+ a = np.ones(10, dtype=[('k', object, 2)])
+ if IS_PYPY:
+ a.resize(15, refcheck=False)
+ else:
+ a.resize(15,)
+ assert_equal(a.shape, (15,))
+ assert_array_equal(a['k'][-5:], 0)
+ assert_array_equal(a['k'][:-5], 1)
+
+ def test_empty_view(self):
+ # check that sizes containing a zero don't trigger a reallocate for
+ # already empty arrays
+ x = np.zeros((10, 0), int)
+ x_view = x[...]
+ x_view.resize((0, 10))
+ x_view.resize((0, 100))
+
+ def test_check_weakref(self):
+ x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ xref = weakref.ref(x)
+ assert_raises(ValueError, x.resize, (5, 1))
+ del xref # avoid pyflakes unused variable warning.
+
+
+class TestRecord:
+ def test_field_rename(self):
+ dt = np.dtype([('f', float), ('i', int)])
+ dt.names = ['p', 'q']
+ assert_equal(dt.names, ['p', 'q'])
+
+ def test_multiple_field_name_occurrence(self):
+ def test_dtype_init():
+ np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
+
+ # Error raised when multiple fields have the same name
+ assert_raises(ValueError, test_dtype_init)
+
+ def test_bytes_fields(self):
+ # Bytes are not allowed in field names and not recognized in titles
+ # on Py3
+ assert_raises(TypeError, np.dtype, [(b'a', int)])
+ assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
+
+ dt = np.dtype([((b'a', 'b'), int)])
+ assert_raises(TypeError, dt.__getitem__, b'a')
+
+ x = np.array([(1,), (2,), (3,)], dtype=dt)
+ assert_raises(IndexError, x.__getitem__, b'a')
+
+ y = x[0]
+ assert_raises(IndexError, y.__getitem__, b'a')
+
+ def test_multiple_field_name_unicode(self):
+ def test_dtype_unicode():
+ np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
+
+ # Error raised when multiple fields have the same name(unicode included)
+ assert_raises(ValueError, test_dtype_unicode)
+
+ def test_fromarrays_unicode(self):
+ # A single name string provided to fromarrays() is allowed to be unicode
+ # on both Python 2 and 3:
+ x = np.core.records.fromarrays(
+ [[0], [1]], names='a,b', formats='i4,i4')
+ assert_equal(x['a'][0], 0)
+ assert_equal(x['b'][0], 1)
+
+ def test_unicode_order(self):
+ # Test that we can sort with order as a unicode field name in both Python 2 and
+ # 3:
+ name = 'b'
+ x = np.array([1, 3, 2], dtype=[(name, int)])
+ x.sort(order=name)
+ assert_equal(x['b'], np.array([1, 2, 3]))
+
+ def test_field_names(self):
+ # Test unicode and 8-bit / byte strings can be used
+ a = np.zeros((1,), dtype=[('f1', 'i4'),
+ ('f2', 'i4'),
+ ('f3', [('sf1', 'i4')])])
+ # byte string indexing fails gracefully
+ assert_raises(IndexError, a.__setitem__, b'f1', 1)
+ assert_raises(IndexError, a.__getitem__, b'f1')
+ assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
+ assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
+ b = a.copy()
+ fn1 = str('f1')
+ b[fn1] = 1
+ assert_equal(b[fn1], 1)
+ fnn = str('not at all')
+ assert_raises(ValueError, b.__setitem__, fnn, 1)
+ assert_raises(ValueError, b.__getitem__, fnn)
+ b[0][fn1] = 2
+ assert_equal(b[fn1], 2)
+ # Subfield
+ assert_raises(ValueError, b[0].__setitem__, fnn, 1)
+ assert_raises(ValueError, b[0].__getitem__, fnn)
+ # Subfield
+ fn3 = str('f3')
+ sfn1 = str('sf1')
+ b[fn3][sfn1] = 1
+ assert_equal(b[fn3][sfn1], 1)
+ assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
+ assert_raises(ValueError, b[fn3].__getitem__, fnn)
+ # multiple subfields
+ fn2 = str('f2')
+ b[fn2] = 3
+
+ assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
+ assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
+ assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
+
+ # non-ascii unicode field indexing is well behaved
+ assert_raises(ValueError, a.__setitem__, '\u03e0', 1)
+ assert_raises(ValueError, a.__getitem__, '\u03e0')
+
+ def test_record_hash(self):
+ a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
+ a.flags.writeable = False
+ b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
+ b.flags.writeable = False
+ c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
+ c.flags.writeable = False
+ assert_(hash(a[0]) == hash(a[1]))
+ assert_(hash(a[0]) == hash(b[0]))
+ assert_(hash(a[0]) != hash(b[1]))
+ assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
+
+ def test_record_no_hash(self):
+ a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
+ assert_raises(TypeError, hash, a[0])
+
+ def test_empty_structure_creation(self):
+ # make sure these do not raise errors (gh-5631)
+ np.array([()], dtype={'names': [], 'formats': [],
+ 'offsets': [], 'itemsize': 12})
+ np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
+ 'offsets': [], 'itemsize': 12})
+
+ def test_multifield_indexing_view(self):
+ a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])
+ v = a[['a', 'c']]
+ assert_(v.base is a)
+ assert_(v.dtype == np.dtype({'names': ['a', 'c'],
+ 'formats': ['i4', 'u4'],
+ 'offsets': [0, 8]}))
+ v[:] = (4,5)
+ assert_equal(a[0].item(), (4, 1, 5))
+
+class TestView:
+ def test_basic(self):
+ x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
+ dtype=[('r', np.int8), ('g', np.int8),
+ ('b', np.int8), ('a', np.int8)])
+ # We must be specific about the endianness here:
+ y = x.view(dtype='<i4')
+ # ... and again without the keyword.
+ z = x.view('<i4')
+ assert_array_equal(y, z)
+ assert_array_equal(y, [67305985, 134678021])
+
+
+def _mean(a, **args):
+ return a.mean(**args)
+
+
+def _var(a, **args):
+ return a.var(**args)
+
+
+def _std(a, **args):
+ return a.std(**args)
+
+
+class TestStats:
+
+ funcs = [_mean, _var, _std]
+
+ def setup_method(self):
+ np.random.seed(range(3))
+ self.rmat = np.random.random((4, 5))
+ self.cmat = self.rmat + 1j * self.rmat
+ self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
+ self.omat = self.omat.reshape(4, 5)
+
+ def test_python_type(self):
+ for x in (np.float16(1.), 1, 1., 1+0j):
+ assert_equal(np.mean([x]), 1.)
+ assert_equal(np.std([x]), 0.)
+ assert_equal(np.var([x]), 0.)
+
+ def test_keepdims(self):
+ mat = np.eye(3)
+ for f in self.funcs:
+ for axis in [0, 1]:
+ res = f(mat, axis=axis, keepdims=True)
+ assert_(res.ndim == mat.ndim)
+ assert_(res.shape[axis] == 1)
+ for axis in [None]:
+ res = f(mat, axis=axis, keepdims=True)
+ assert_(res.shape == (1, 1))
+
+ def test_out(self):
+ mat = np.eye(3)
+ for f in self.funcs:
+ out = np.zeros(3)
+ tgt = f(mat, axis=1)
+ res = f(mat, axis=1, out=out)
+ assert_almost_equal(res, out)
+ assert_almost_equal(res, tgt)
+ out = np.empty(2)
+ assert_raises(ValueError, f, mat, axis=1, out=out)
+ out = np.empty((2, 2))
+ assert_raises(ValueError, f, mat, axis=1, out=out)
+
+ def test_dtype_from_input(self):
+
+ icodes = np.typecodes['AllInteger']
+ fcodes = np.typecodes['AllFloat']
+
+ # object type
+ for f in self.funcs:
+ mat = np.array([[Decimal(1)]*3]*3)
+ tgt = mat.dtype.type
+ res = f(mat, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ res = type(f(mat, axis=None))
+ assert_(res is Decimal)
+
+ # integer types
+ for f in self.funcs:
+ for c in icodes:
+ mat = np.eye(3, dtype=c)
+ tgt = np.float64
+ res = f(mat, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ res = f(mat, axis=None).dtype.type
+ assert_(res is tgt)
+
+ # mean for float types
+ for f in [_mean]:
+ for c in fcodes:
+ mat = np.eye(3, dtype=c)
+ tgt = mat.dtype.type
+ res = f(mat, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ res = f(mat, axis=None).dtype.type
+ assert_(res is tgt)
+
+ # var, std for float types
+ for f in [_var, _std]:
+ for c in fcodes:
+ mat = np.eye(3, dtype=c)
+ # deal with complex types
+ tgt = mat.real.dtype.type
+ res = f(mat, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ res = f(mat, axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_dtype_from_dtype(self):
+ mat = np.eye(3)
+
+ # stats for integer types
+ # FIXME:
+ # this needs definition as there are lots places along the line
+ # where type casting may take place.
+
+ # for f in self.funcs:
+ # for c in np.typecodes['AllInteger']:
+ # tgt = np.dtype(c).type
+ # res = f(mat, axis=1, dtype=c).dtype.type
+ # assert_(res is tgt)
+ # # scalar case
+ # res = f(mat, axis=None, dtype=c).dtype.type
+ # assert_(res is tgt)
+
+ # stats for float types
+ for f in self.funcs:
+ for c in np.typecodes['AllFloat']:
+ tgt = np.dtype(c).type
+ res = f(mat, axis=1, dtype=c).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ res = f(mat, axis=None, dtype=c).dtype.type
+ assert_(res is tgt)
+
+ def test_ddof(self):
+ for f in [_var]:
+ for ddof in range(3):
+ dim = self.rmat.shape[1]
+ tgt = f(self.rmat, axis=1) * dim
+ res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
+ for f in [_std]:
+ for ddof in range(3):
+ dim = self.rmat.shape[1]
+ tgt = f(self.rmat, axis=1) * np.sqrt(dim)
+ res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
+ assert_almost_equal(res, tgt)
+ assert_almost_equal(res, tgt)
+
+ def test_ddof_too_big(self):
+ dim = self.rmat.shape[1]
+ for f in [_var, _std]:
+ for ddof in range(dim, dim + 2):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(self.rmat, axis=1, ddof=ddof)
+ assert_(not (res < 0).any())
+ assert_(len(w) > 0)
+ assert_(issubclass(w[0].category, RuntimeWarning))
+
+ def test_empty(self):
+ A = np.zeros((0, 3))
+ for f in self.funcs:
+ for axis in [0, None]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_(np.isnan(f(A, axis=axis)).all())
+ assert_(len(w) > 0)
+ assert_(issubclass(w[0].category, RuntimeWarning))
+ for axis in [1]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_equal(f(A, axis=axis), np.zeros([]))
+
+ def test_mean_values(self):
+ for mat in [self.rmat, self.cmat, self.omat]:
+ for axis in [0, 1]:
+ tgt = mat.sum(axis=axis)
+ res = _mean(mat, axis=axis) * mat.shape[axis]
+ assert_almost_equal(res, tgt)
+ for axis in [None]:
+ tgt = mat.sum(axis=axis)
+ res = _mean(mat, axis=axis) * np.prod(mat.shape)
+ assert_almost_equal(res, tgt)
+
+ def test_mean_float16(self):
+ # This fail if the sum inside mean is done in float16 instead
+ # of float32.
+ assert_(_mean(np.ones(100000, dtype='float16')) == 1)
+
+ def test_mean_axis_error(self):
+ # Ensure that AxisError is raised instead of IndexError when axis is
+ # out of bounds, see gh-15817.
+ with assert_raises(np.core._exceptions.AxisError):
+ np.arange(10).mean(axis=2)
+
+ def test_mean_where(self):
+ a = np.arange(16).reshape((4, 4))
+ wh_full = np.array([[False, True, False, True],
+ [True, False, True, False],
+ [True, True, False, False],
+ [False, False, True, True]])
+ wh_partial = np.array([[False],
+ [True],
+ [True],
+ [False]])
+ _cases = [(1, True, [1.5, 5.5, 9.5, 13.5]),
+ (0, wh_full, [6., 5., 10., 9.]),
+ (1, wh_full, [2., 5., 8.5, 14.5]),
+ (0, wh_partial, [6., 7., 8., 9.])]
+ for _ax, _wh, _res in _cases:
+ assert_allclose(a.mean(axis=_ax, where=_wh),
+ np.array(_res))
+ assert_allclose(np.mean(a, axis=_ax, where=_wh),
+ np.array(_res))
+
+ a3d = np.arange(16).reshape((2, 2, 4))
+ _wh_partial = np.array([False, True, True, False])
+ _res = [[1.5, 5.5], [9.5, 13.5]]
+ assert_allclose(a3d.mean(axis=2, where=_wh_partial),
+ np.array(_res))
+ assert_allclose(np.mean(a3d, axis=2, where=_wh_partial),
+ np.array(_res))
+
+ with pytest.warns(RuntimeWarning) as w:
+ assert_allclose(a.mean(axis=1, where=wh_partial),
+ np.array([np.nan, 5.5, 9.5, np.nan]))
+ with pytest.warns(RuntimeWarning) as w:
+ assert_equal(a.mean(where=False), np.nan)
+ with pytest.warns(RuntimeWarning) as w:
+ assert_equal(np.mean(a, where=False), np.nan)
+
+ def test_var_values(self):
+ for mat in [self.rmat, self.cmat, self.omat]:
+ for axis in [0, 1, None]:
+ msqr = _mean(mat * mat.conj(), axis=axis)
+ mean = _mean(mat, axis=axis)
+ tgt = msqr - mean * mean.conjugate()
+ res = _var(mat, axis=axis)
+ assert_almost_equal(res, tgt)
+
+ @pytest.mark.parametrize(('complex_dtype', 'ndec'), (
+ ('complex64', 6),
+ ('complex128', 7),
+ ('clongdouble', 7),
+ ))
+ def test_var_complex_values(self, complex_dtype, ndec):
+ # Test fast-paths for every builtin complex type
+ for axis in [0, 1, None]:
+ mat = self.cmat.copy().astype(complex_dtype)
+ msqr = _mean(mat * mat.conj(), axis=axis)
+ mean = _mean(mat, axis=axis)
+ tgt = msqr - mean * mean.conjugate()
+ res = _var(mat, axis=axis)
+ assert_almost_equal(res, tgt, decimal=ndec)
+
+ def test_var_dimensions(self):
+ # _var paths for complex number introduce additions on views that
+ # increase dimensions. Ensure this generalizes to higher dims
+ mat = np.stack([self.cmat]*3)
+ for axis in [0, 1, 2, -1, None]:
+ msqr = _mean(mat * mat.conj(), axis=axis)
+ mean = _mean(mat, axis=axis)
+ tgt = msqr - mean * mean.conjugate()
+ res = _var(mat, axis=axis)
+ assert_almost_equal(res, tgt)
+
+ def test_var_complex_byteorder(self):
+ # Test that var fast-path does not cause failures for complex arrays
+ # with non-native byteorder
+ cmat = self.cmat.copy().astype('complex128')
+ cmat_swapped = cmat.astype(cmat.dtype.newbyteorder())
+ assert_almost_equal(cmat.var(), cmat_swapped.var())
+
+ def test_var_axis_error(self):
+ # Ensure that AxisError is raised instead of IndexError when axis is
+ # out of bounds, see gh-15817.
+ with assert_raises(np.core._exceptions.AxisError):
+ np.arange(10).var(axis=2)
+
+ def test_var_where(self):
+ a = np.arange(25).reshape((5, 5))
+ wh_full = np.array([[False, True, False, True, True],
+ [True, False, True, True, False],
+ [True, True, False, False, True],
+ [False, True, True, False, True],
+ [True, False, True, True, False]])
+ wh_partial = np.array([[False],
+ [True],
+ [True],
+ [False],
+ [True]])
+ _cases = [(0, True, [50., 50., 50., 50., 50.]),
+ (1, True, [2., 2., 2., 2., 2.])]
+ for _ax, _wh, _res in _cases:
+ assert_allclose(a.var(axis=_ax, where=_wh),
+ np.array(_res))
+ assert_allclose(np.var(a, axis=_ax, where=_wh),
+ np.array(_res))
+
+ a3d = np.arange(16).reshape((2, 2, 4))
+ _wh_partial = np.array([False, True, True, False])
+ _res = [[0.25, 0.25], [0.25, 0.25]]
+ assert_allclose(a3d.var(axis=2, where=_wh_partial),
+ np.array(_res))
+ assert_allclose(np.var(a3d, axis=2, where=_wh_partial),
+ np.array(_res))
+
+ assert_allclose(np.var(a, axis=1, where=wh_full),
+ np.var(a[wh_full].reshape((5, 3)), axis=1))
+ assert_allclose(np.var(a, axis=0, where=wh_partial),
+ np.var(a[wh_partial[:,0]], axis=0))
+ with pytest.warns(RuntimeWarning) as w:
+ assert_equal(a.var(where=False), np.nan)
+ with pytest.warns(RuntimeWarning) as w:
+ assert_equal(np.var(a, where=False), np.nan)
+
+ def test_std_values(self):
+ for mat in [self.rmat, self.cmat, self.omat]:
+ for axis in [0, 1, None]:
+ tgt = np.sqrt(_var(mat, axis=axis))
+ res = _std(mat, axis=axis)
+ assert_almost_equal(res, tgt)
+
+ def test_std_where(self):
+ a = np.arange(25).reshape((5,5))[::-1]
+ whf = np.array([[False, True, False, True, True],
+ [True, False, True, False, True],
+ [True, True, False, True, False],
+ [True, False, True, True, False],
+ [False, True, False, True, True]])
+ whp = np.array([[False],
+ [False],
+ [True],
+ [True],
+ [False]])
+ _cases = [
+ (0, True, 7.07106781*np.ones((5))),
+ (1, True, 1.41421356*np.ones((5))),
+ (0, whf,
+ np.array([4.0824829 , 8.16496581, 5., 7.39509973, 8.49836586])),
+ (0, whp, 2.5*np.ones((5)))
+ ]
+ for _ax, _wh, _res in _cases:
+ assert_allclose(a.std(axis=_ax, where=_wh), _res)
+ assert_allclose(np.std(a, axis=_ax, where=_wh), _res)
+
+ a3d = np.arange(16).reshape((2, 2, 4))
+ _wh_partial = np.array([False, True, True, False])
+ _res = [[0.5, 0.5], [0.5, 0.5]]
+ assert_allclose(a3d.std(axis=2, where=_wh_partial),
+ np.array(_res))
+ assert_allclose(np.std(a3d, axis=2, where=_wh_partial),
+ np.array(_res))
+
+ assert_allclose(a.std(axis=1, where=whf),
+ np.std(a[whf].reshape((5,3)), axis=1))
+ assert_allclose(np.std(a, axis=1, where=whf),
+ (a[whf].reshape((5,3))).std(axis=1))
+ assert_allclose(a.std(axis=0, where=whp),
+ np.std(a[whp[:,0]], axis=0))
+ assert_allclose(np.std(a, axis=0, where=whp),
+ (a[whp[:,0]]).std(axis=0))
+ with pytest.warns(RuntimeWarning) as w:
+ assert_equal(a.std(where=False), np.nan)
+ with pytest.warns(RuntimeWarning) as w:
+ assert_equal(np.std(a, where=False), np.nan)
+
+ def test_subclass(self):
+ class TestArray(np.ndarray):
+ def __new__(cls, data, info):
+ result = np.array(data)
+ result = result.view(cls)
+ result.info = info
+ return result
+
+ def __array_finalize__(self, obj):
+ self.info = getattr(obj, "info", '')
+
+ dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
+ res = dat.mean(1)
+ assert_(res.info == dat.info)
+ res = dat.std(1)
+ assert_(res.info == dat.info)
+ res = dat.var(1)
+ assert_(res.info == dat.info)
+
+
+class TestVdot:
+ def test_basic(self):
+ dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
+ dt_complex = np.typecodes['Complex']
+
+ # test real
+ a = np.eye(3)
+ for dt in dt_numeric + 'O':
+ b = a.astype(dt)
+ res = np.vdot(b, b)
+ assert_(np.isscalar(res))
+ assert_equal(np.vdot(b, b), 3)
+
+ # test complex
+ a = np.eye(3) * 1j
+ for dt in dt_complex + 'O':
+ b = a.astype(dt)
+ res = np.vdot(b, b)
+ assert_(np.isscalar(res))
+ assert_equal(np.vdot(b, b), 3)
+
+ # test boolean
+ b = np.eye(3, dtype=bool)
+ res = np.vdot(b, b)
+ assert_(np.isscalar(res))
+ assert_equal(np.vdot(b, b), True)
+
+ def test_vdot_array_order(self):
+ a = np.array([[1, 2], [3, 4]], order='C')
+ b = np.array([[1, 2], [3, 4]], order='F')
+ res = np.vdot(a, a)
+
+ # integer arrays are exact
+ assert_equal(np.vdot(a, b), res)
+ assert_equal(np.vdot(b, a), res)
+ assert_equal(np.vdot(b, b), res)
+
+ def test_vdot_uncontiguous(self):
+ for size in [2, 1000]:
+ # Different sizes match different branches in vdot.
+ a = np.zeros((size, 2, 2))
+ b = np.zeros((size, 2, 2))
+ a[:, 0, 0] = np.arange(size)
+ b[:, 0, 0] = np.arange(size) + 1
+ # Make a and b uncontiguous:
+ a = a[..., 0]
+ b = b[..., 0]
+
+ assert_equal(np.vdot(a, b),
+ np.vdot(a.flatten(), b.flatten()))
+ assert_equal(np.vdot(a, b.copy()),
+ np.vdot(a.flatten(), b.flatten()))
+ assert_equal(np.vdot(a.copy(), b),
+ np.vdot(a.flatten(), b.flatten()))
+ assert_equal(np.vdot(a.copy('F'), b),
+ np.vdot(a.flatten(), b.flatten()))
+ assert_equal(np.vdot(a, b.copy('F')),
+ np.vdot(a.flatten(), b.flatten()))
+
+
+class TestDot:
+ def setup_method(self):
+ np.random.seed(128)
+ self.A = np.random.rand(4, 2)
+ self.b1 = np.random.rand(2, 1)
+ self.b2 = np.random.rand(2)
+ self.b3 = np.random.rand(1, 2)
+ self.b4 = np.random.rand(4)
+ self.N = 7
+
+ def test_dotmatmat(self):
+ A = self.A
+ res = np.dot(A.transpose(), A)
+ tgt = np.array([[1.45046013, 0.86323640],
+ [0.86323640, 0.84934569]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotmatvec(self):
+ A, b1 = self.A, self.b1
+ res = np.dot(A, b1)
+ tgt = np.array([[0.32114320], [0.04889721],
+ [0.15696029], [0.33612621]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotmatvec2(self):
+ A, b2 = self.A, self.b2
+ res = np.dot(A, b2)
+ tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecmat(self):
+ A, b4 = self.A, self.b4
+ res = np.dot(b4, A)
+ tgt = np.array([1.23495091, 1.12222648])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecmat2(self):
+ b3, A = self.b3, self.A
+ res = np.dot(b3, A.transpose())
+ tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecmat3(self):
+ A, b4 = self.A, self.b4
+ res = np.dot(A.transpose(), b4)
+ tgt = np.array([1.23495091, 1.12222648])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecvecouter(self):
+ b1, b3 = self.b1, self.b3
+ res = np.dot(b1, b3)
+ tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecvecinner(self):
+ b1, b3 = self.b1, self.b3
+ res = np.dot(b3, b1)
+ tgt = np.array([[ 0.23129668]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotcolumnvect1(self):
+ b1 = np.ones((3, 1))
+ b2 = [5.3]
+ res = np.dot(b1, b2)
+ tgt = np.array([5.3, 5.3, 5.3])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotcolumnvect2(self):
+ b1 = np.ones((3, 1)).transpose()
+ b2 = [6.2]
+ res = np.dot(b2, b1)
+ tgt = np.array([6.2, 6.2, 6.2])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecscalar(self):
+ np.random.seed(100)
+ b1 = np.random.rand(1, 1)
+ b2 = np.random.rand(1, 4)
+ res = np.dot(b1, b2)
+ tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_dotvecscalar2(self):
+ np.random.seed(100)
+ b1 = np.random.rand(4, 1)
+ b2 = np.random.rand(1, 1)
+ res = np.dot(b1, b2)
+ tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_all(self):
+ dims = [(), (1,), (1, 1)]
+ dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
+ for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
+ b1 = np.zeros(dim1)
+ b2 = np.zeros(dim2)
+ res = np.dot(b1, b2)
+ tgt = np.zeros(dim)
+ assert_(res.shape == tgt.shape)
+ assert_almost_equal(res, tgt, decimal=self.N)
+
+ def test_vecobject(self):
+ class Vec:
+ def __init__(self, sequence=None):
+ if sequence is None:
+ sequence = []
+ self.array = np.array(sequence)
+
+ def __add__(self, other):
+ out = Vec()
+ out.array = self.array + other.array
+ return out
+
+ def __sub__(self, other):
+ out = Vec()
+ out.array = self.array - other.array
+ return out
+
+ def __mul__(self, other): # with scalar
+ out = Vec(self.array.copy())
+ out.array *= other
+ return out
+
+ def __rmul__(self, other):
+ return self*other
+
+ U_non_cont = np.transpose([[1., 1.], [1., 2.]])
+ U_cont = np.ascontiguousarray(U_non_cont)
+ x = np.array([Vec([1., 0.]), Vec([0., 1.])])
+ zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
+ zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
+ assert_equal(zeros[0].array, zeros_test[0].array)
+ assert_equal(zeros[1].array, zeros_test[1].array)
+
+ def test_dot_2args(self):
+ from numpy.core.multiarray import dot
+
+ a = np.array([[1, 2], [3, 4]], dtype=float)
+ b = np.array([[1, 0], [1, 1]], dtype=float)
+ c = np.array([[3, 2], [7, 4]], dtype=float)
+
+ d = dot(a, b)
+ assert_allclose(c, d)
+
+ def test_dot_3args(self):
+ from numpy.core.multiarray import dot
+
+ np.random.seed(22)
+ f = np.random.random_sample((1024, 16))
+ v = np.random.random_sample((16, 32))
+
+ r = np.empty((1024, 32))
+ for i in range(12):
+ dot(f, v, r)
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(r), 2)
+ r2 = dot(f, v, out=None)
+ assert_array_equal(r2, r)
+ assert_(r is dot(f, v, out=r))
+
+ v = v[:, 0].copy() # v.shape == (16,)
+ r = r[:, 0].copy() # r.shape == (1024,)
+ r2 = dot(f, v)
+ assert_(r is dot(f, v, r))
+ assert_array_equal(r2, r)
+
+ def test_dot_3args_errors(self):
+ from numpy.core.multiarray import dot
+
+ np.random.seed(22)
+ f = np.random.random_sample((1024, 16))
+ v = np.random.random_sample((16, 32))
+
+ r = np.empty((1024, 31))
+ assert_raises(ValueError, dot, f, v, r)
+
+ r = np.empty((1024,))
+ assert_raises(ValueError, dot, f, v, r)
+
+ r = np.empty((32,))
+ assert_raises(ValueError, dot, f, v, r)
+
+ r = np.empty((32, 1024))
+ assert_raises(ValueError, dot, f, v, r)
+ assert_raises(ValueError, dot, f, v, r.T)
+
+ r = np.empty((1024, 64))
+ assert_raises(ValueError, dot, f, v, r[:, ::2])
+ assert_raises(ValueError, dot, f, v, r[:, :32])
+
+ r = np.empty((1024, 32), dtype=np.float32)
+ assert_raises(ValueError, dot, f, v, r)
+
+ r = np.empty((1024, 32), dtype=int)
+ assert_raises(ValueError, dot, f, v, r)
+
+ def test_dot_array_order(self):
+ a = np.array([[1, 2], [3, 4]], order='C')
+ b = np.array([[1, 2], [3, 4]], order='F')
+ res = np.dot(a, a)
+
+ # integer arrays are exact
+ assert_equal(np.dot(a, b), res)
+ assert_equal(np.dot(b, a), res)
+ assert_equal(np.dot(b, b), res)
+
+ def test_accelerate_framework_sgemv_fix(self):
+
+ def aligned_array(shape, align, dtype, order='C'):
+ d = dtype(0)
+ N = np.prod(shape)
+ tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
+ address = tmp.__array_interface__["data"][0]
+ for offset in range(align):
+ if (address + offset) % align == 0:
+ break
+ tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
+ return tmp.reshape(shape, order=order)
+
+ def as_aligned(arr, align, dtype, order='C'):
+ aligned = aligned_array(arr.shape, align, dtype, order)
+ aligned[:] = arr[:]
+ return aligned
+
+ def assert_dot_close(A, X, desired):
+ assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
+
+ m = aligned_array(100, 15, np.float32)
+ s = aligned_array((100, 100), 15, np.float32)
+ np.dot(s, m) # this will always segfault if the bug is present
+
+ testdata = itertools.product((15, 32), (10000,), (200, 89), ('C', 'F'))
+ for align, m, n, a_order in testdata:
+ # Calculation in double precision
+ A_d = np.random.rand(m, n)
+ X_d = np.random.rand(n)
+ desired = np.dot(A_d, X_d)
+ # Calculation with aligned single precision
+ A_f = as_aligned(A_d, align, np.float32, order=a_order)
+ X_f = as_aligned(X_d, align, np.float32)
+ assert_dot_close(A_f, X_f, desired)
+ # Strided A rows
+ A_d_2 = A_d[::2]
+ desired = np.dot(A_d_2, X_d)
+ A_f_2 = A_f[::2]
+ assert_dot_close(A_f_2, X_f, desired)
+ # Strided A columns, strided X vector
+ A_d_22 = A_d_2[:, ::2]
+ X_d_2 = X_d[::2]
+ desired = np.dot(A_d_22, X_d_2)
+ A_f_22 = A_f_2[:, ::2]
+ X_f_2 = X_f[::2]
+ assert_dot_close(A_f_22, X_f_2, desired)
+ # Check the strides are as expected
+ if a_order == 'F':
+ assert_equal(A_f_22.strides, (8, 8 * m))
+ else:
+ assert_equal(A_f_22.strides, (8 * n, 8))
+ assert_equal(X_f_2.strides, (8,))
+ # Strides in A rows + cols only
+ X_f_2c = as_aligned(X_f_2, align, np.float32)
+ assert_dot_close(A_f_22, X_f_2c, desired)
+ # Strides just in A cols
+ A_d_12 = A_d[:, ::2]
+ desired = np.dot(A_d_12, X_d_2)
+ A_f_12 = A_f[:, ::2]
+ assert_dot_close(A_f_12, X_f_2c, desired)
+ # Strides in A cols and X
+ assert_dot_close(A_f_12, X_f_2, desired)
+
+ @pytest.mark.slow
+ @pytest.mark.parametrize("dtype", [np.float64, np.complex128])
+ @requires_memory(free_bytes=18e9) # complex case needs 18GiB+
+ def test_huge_vectordot(self, dtype):
+ # Large vector multiplications are chunked with 32bit BLAS
+ # Test that the chunking does the right thing, see also gh-22262
+ data = np.ones(2**30+100, dtype=dtype)
+ res = np.dot(data, data)
+ assert res == 2**30+100
+
+ def test_dtype_discovery_fails(self):
+ # See gh-14247, error checking was missing for failed dtype discovery
+ class BadObject(object):
+ def __array__(self):
+ raise TypeError("just this tiny mint leaf")
+
+ with pytest.raises(TypeError):
+ np.dot(BadObject(), BadObject())
+
+ with pytest.raises(TypeError):
+ np.dot(3.0, BadObject())
+
+
+class MatmulCommon:
+ """Common tests for '@' operator and numpy.matmul.
+
+ """
+ # Should work with these types. Will want to add
+ # "O" at some point
+ types = "?bhilqBHILQefdgFDGO"
+
+ def test_exceptions(self):
+ dims = [
+ ((1,), (2,)), # mismatched vector vector
+ ((2, 1,), (2,)), # mismatched matrix vector
+ ((2,), (1, 2)), # mismatched vector matrix
+ ((1, 2), (3, 1)), # mismatched matrix matrix
+ ((1,), ()), # vector scalar
+ ((), (1)), # scalar vector
+ ((1, 1), ()), # matrix scalar
+ ((), (1, 1)), # scalar matrix
+ ((2, 2, 1), (3, 1, 2)), # cannot broadcast
+ ]
+
+ for dt, (dm1, dm2) in itertools.product(self.types, dims):
+ a = np.ones(dm1, dtype=dt)
+ b = np.ones(dm2, dtype=dt)
+ assert_raises(ValueError, self.matmul, a, b)
+
+ def test_shapes(self):
+ dims = [
+ ((1, 1), (2, 1, 1)), # broadcast first argument
+ ((2, 1, 1), (1, 1)), # broadcast second argument
+ ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
+ ]
+
+ for dt, (dm1, dm2) in itertools.product(self.types, dims):
+ a = np.ones(dm1, dtype=dt)
+ b = np.ones(dm2, dtype=dt)
+ res = self.matmul(a, b)
+ assert_(res.shape == (2, 1, 1))
+
+ # vector vector returns scalars.
+ for dt in self.types:
+ a = np.ones((2,), dtype=dt)
+ b = np.ones((2,), dtype=dt)
+ c = self.matmul(a, b)
+ assert_(np.array(c).shape == ())
+
+ def test_result_types(self):
+ mat = np.ones((1,1))
+ vec = np.ones((1,))
+ for dt in self.types:
+ m = mat.astype(dt)
+ v = vec.astype(dt)
+ for arg in [(m, v), (v, m), (m, m)]:
+ res = self.matmul(*arg)
+ assert_(res.dtype == dt)
+
+ # vector vector returns scalars
+ if dt != "O":
+ res = self.matmul(v, v)
+ assert_(type(res) is np.dtype(dt).type)
+
+ def test_scalar_output(self):
+ vec1 = np.array([2])
+ vec2 = np.array([3, 4]).reshape(1, -1)
+ tgt = np.array([6, 8])
+ for dt in self.types[1:]:
+ v1 = vec1.astype(dt)
+ v2 = vec2.astype(dt)
+ res = self.matmul(v1, v2)
+ assert_equal(res, tgt)
+ res = self.matmul(v2.T, v1)
+ assert_equal(res, tgt)
+
+ # boolean type
+ vec = np.array([True, True], dtype='?').reshape(1, -1)
+ res = self.matmul(vec[:, 0], vec)
+ assert_equal(res, True)
+
+ def test_vector_vector_values(self):
+ vec1 = np.array([1, 2])
+ vec2 = np.array([3, 4]).reshape(-1, 1)
+ tgt1 = np.array([11])
+ tgt2 = np.array([[3, 6], [4, 8]])
+ for dt in self.types[1:]:
+ v1 = vec1.astype(dt)
+ v2 = vec2.astype(dt)
+ res = self.matmul(v1, v2)
+ assert_equal(res, tgt1)
+ # no broadcast, we must make v1 into a 2d ndarray
+ res = self.matmul(v2, v1.reshape(1, -1))
+ assert_equal(res, tgt2)
+
+ # boolean type
+ vec = np.array([True, True], dtype='?')
+ res = self.matmul(vec, vec)
+ assert_equal(res, True)
+
+ def test_vector_matrix_values(self):
+ vec = np.array([1, 2])
+ mat1 = np.array([[1, 2], [3, 4]])
+ mat2 = np.stack([mat1]*2, axis=0)
+ tgt1 = np.array([7, 10])
+ tgt2 = np.stack([tgt1]*2, axis=0)
+ for dt in self.types[1:]:
+ v = vec.astype(dt)
+ m1 = mat1.astype(dt)
+ m2 = mat2.astype(dt)
+ res = self.matmul(v, m1)
+ assert_equal(res, tgt1)
+ res = self.matmul(v, m2)
+ assert_equal(res, tgt2)
+
+ # boolean type
+ vec = np.array([True, False])
+ mat1 = np.array([[True, False], [False, True]])
+ mat2 = np.stack([mat1]*2, axis=0)
+ tgt1 = np.array([True, False])
+ tgt2 = np.stack([tgt1]*2, axis=0)
+
+ res = self.matmul(vec, mat1)
+ assert_equal(res, tgt1)
+ res = self.matmul(vec, mat2)
+ assert_equal(res, tgt2)
+
+ def test_matrix_vector_values(self):
+ vec = np.array([1, 2])
+ mat1 = np.array([[1, 2], [3, 4]])
+ mat2 = np.stack([mat1]*2, axis=0)
+ tgt1 = np.array([5, 11])
+ tgt2 = np.stack([tgt1]*2, axis=0)
+ for dt in self.types[1:]:
+ v = vec.astype(dt)
+ m1 = mat1.astype(dt)
+ m2 = mat2.astype(dt)
+ res = self.matmul(m1, v)
+ assert_equal(res, tgt1)
+ res = self.matmul(m2, v)
+ assert_equal(res, tgt2)
+
+ # boolean type
+ vec = np.array([True, False])
+ mat1 = np.array([[True, False], [False, True]])
+ mat2 = np.stack([mat1]*2, axis=0)
+ tgt1 = np.array([True, False])
+ tgt2 = np.stack([tgt1]*2, axis=0)
+
+ res = self.matmul(vec, mat1)
+ assert_equal(res, tgt1)
+ res = self.matmul(vec, mat2)
+ assert_equal(res, tgt2)
+
+ def test_matrix_matrix_values(self):
+ mat1 = np.array([[1, 2], [3, 4]])
+ mat2 = np.array([[1, 0], [1, 1]])
+ mat12 = np.stack([mat1, mat2], axis=0)
+ mat21 = np.stack([mat2, mat1], axis=0)
+ tgt11 = np.array([[7, 10], [15, 22]])
+ tgt12 = np.array([[3, 2], [7, 4]])
+ tgt21 = np.array([[1, 2], [4, 6]])
+ tgt12_21 = np.stack([tgt12, tgt21], axis=0)
+ tgt11_12 = np.stack((tgt11, tgt12), axis=0)
+ tgt11_21 = np.stack((tgt11, tgt21), axis=0)
+ for dt in self.types[1:]:
+ m1 = mat1.astype(dt)
+ m2 = mat2.astype(dt)
+ m12 = mat12.astype(dt)
+ m21 = mat21.astype(dt)
+
+ # matrix @ matrix
+ res = self.matmul(m1, m2)
+ assert_equal(res, tgt12)
+ res = self.matmul(m2, m1)
+ assert_equal(res, tgt21)
+
+ # stacked @ matrix
+ res = self.matmul(m12, m1)
+ assert_equal(res, tgt11_21)
+
+ # matrix @ stacked
+ res = self.matmul(m1, m12)
+ assert_equal(res, tgt11_12)
+
+ # stacked @ stacked
+ res = self.matmul(m12, m21)
+ assert_equal(res, tgt12_21)
+
+ # boolean type
+ m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
+ m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
+ m12 = np.stack([m1, m2], axis=0)
+ m21 = np.stack([m2, m1], axis=0)
+ tgt11 = m1
+ tgt12 = m1
+ tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
+ tgt12_21 = np.stack([tgt12, tgt21], axis=0)
+ tgt11_12 = np.stack((tgt11, tgt12), axis=0)
+ tgt11_21 = np.stack((tgt11, tgt21), axis=0)
+
+ # matrix @ matrix
+ res = self.matmul(m1, m2)
+ assert_equal(res, tgt12)
+ res = self.matmul(m2, m1)
+ assert_equal(res, tgt21)
+
+ # stacked @ matrix
+ res = self.matmul(m12, m1)
+ assert_equal(res, tgt11_21)
+
+ # matrix @ stacked
+ res = self.matmul(m1, m12)
+ assert_equal(res, tgt11_12)
+
+ # stacked @ stacked
+ res = self.matmul(m12, m21)
+ assert_equal(res, tgt12_21)
+
+
+class TestMatmul(MatmulCommon):
+ matmul = np.matmul
+
+ def test_out_arg(self):
+ a = np.ones((5, 2), dtype=float)
+ b = np.array([[1, 3], [5, 7]], dtype=float)
+ tgt = np.dot(a, b)
+
+ # test as positional argument
+ msg = "out positional argument"
+ out = np.zeros((5, 2), dtype=float)
+ self.matmul(a, b, out)
+ assert_array_equal(out, tgt, err_msg=msg)
+
+ # test as keyword argument
+ msg = "out keyword argument"
+ out = np.zeros((5, 2), dtype=float)
+ self.matmul(a, b, out=out)
+ assert_array_equal(out, tgt, err_msg=msg)
+
+ # test out with not allowed type cast (safe casting)
+ msg = "Cannot cast ufunc .* output"
+ out = np.zeros((5, 2), dtype=np.int32)
+ assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)
+
+ # test out with type upcast to complex
+ out = np.zeros((5, 2), dtype=np.complex128)
+ c = self.matmul(a, b, out=out)
+ assert_(c is out)
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning, '')
+ c = c.astype(tgt.dtype)
+ assert_array_equal(c, tgt)
+
+ def test_empty_out(self):
+ # Check that the output cannot be broadcast, so that it cannot be
+ # size zero when the outer dimensions (iterator size) has size zero.
+ arr = np.ones((0, 1, 1))
+ out = np.ones((1, 1, 1))
+ assert self.matmul(arr, arr).shape == (0, 1, 1)
+
+ with pytest.raises(ValueError, match=r"non-broadcastable"):
+ self.matmul(arr, arr, out=out)
+
+ def test_out_contiguous(self):
+ a = np.ones((5, 2), dtype=float)
+ b = np.array([[1, 3], [5, 7]], dtype=float)
+ v = np.array([1, 3], dtype=float)
+ tgt = np.dot(a, b)
+ tgt_mv = np.dot(a, v)
+
+ # test out non-contiguous
+ out = np.ones((5, 2, 2), dtype=float)
+ c = self.matmul(a, b, out=out[..., 0])
+ assert c.base is out
+ assert_array_equal(c, tgt)
+ c = self.matmul(a, v, out=out[:, 0, 0])
+ assert_array_equal(c, tgt_mv)
+ c = self.matmul(v, a.T, out=out[:, 0, 0])
+ assert_array_equal(c, tgt_mv)
+
+ # test out contiguous in only last dim
+ out = np.ones((10, 2), dtype=float)
+ c = self.matmul(a, b, out=out[::2, :])
+ assert_array_equal(c, tgt)
+
+ # test transposes of out, args
+ out = np.ones((5, 2), dtype=float)
+ c = self.matmul(b.T, a.T, out=out.T)
+ assert_array_equal(out, tgt)
+
+ m1 = np.arange(15.).reshape(5, 3)
+ m2 = np.arange(21.).reshape(3, 7)
+ m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous
+ vc = np.arange(10.)
+ vr = np.arange(6.)
+ m0 = np.zeros((3, 0))
+ @pytest.mark.parametrize('args', (
+ # matrix-matrix
+ (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()),
+ # matrix-matrix-transpose, contiguous and non
+ (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T),
+ (m3, m3.T), (m3.T, m3),
+ # matrix-matrix non-contiguous
+ (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T),
+ # vector-matrix, matrix-vector, contiguous
+ (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T),
+ # vector-matrix, matrix-vector, vector non-contiguous
+ (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T),
+ # vector-matrix, matrix-vector, matrix non-contiguous
+ (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T),
+ # vector-matrix, matrix-vector, both non-contiguous
+ (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T),
+ # size == 0
+ (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T),
+ ))
+ def test_dot_equivalent(self, args):
+ r1 = np.matmul(*args)
+ r2 = np.dot(*args)
+ assert_equal(r1, r2)
+
+ r3 = np.matmul(args[0].copy(), args[1].copy())
+ assert_equal(r1, r3)
+
+ def test_matmul_object(self):
+ import fractions
+
+ f = np.vectorize(fractions.Fraction)
+ def random_ints():
+ return np.random.randint(1, 1000, size=(10, 3, 3))
+ M1 = f(random_ints(), random_ints())
+ M2 = f(random_ints(), random_ints())
+
+ M3 = self.matmul(M1, M2)
+
+ [N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]]
+
+ assert_allclose(N3, self.matmul(N1, N2))
+
+ def test_matmul_object_type_scalar(self):
+ from fractions import Fraction as F
+ v = np.array([F(2,3), F(5,7)])
+ res = self.matmul(v, v)
+ assert_(type(res) is F)
+
+ def test_matmul_empty(self):
+ a = np.empty((3, 0), dtype=object)
+ b = np.empty((0, 3), dtype=object)
+ c = np.zeros((3, 3))
+ assert_array_equal(np.matmul(a, b), c)
+
+ def test_matmul_exception_multiply(self):
+ # test that matmul fails if `__mul__` is missing
+ class add_not_multiply():
+ def __add__(self, other):
+ return self
+ a = np.full((3,3), add_not_multiply())
+ with assert_raises(TypeError):
+ b = np.matmul(a, a)
+
+ def test_matmul_exception_add(self):
+ # test that matmul fails if `__add__` is missing
+ class multiply_not_add():
+ def __mul__(self, other):
+ return self
+ a = np.full((3,3), multiply_not_add())
+ with assert_raises(TypeError):
+ b = np.matmul(a, a)
+
+ def test_matmul_bool(self):
+ # gh-14439
+ a = np.array([[1, 0],[1, 1]], dtype=bool)
+ assert np.max(a.view(np.uint8)) == 1
+ b = np.matmul(a, a)
+ # matmul with boolean output should always be 0, 1
+ assert np.max(b.view(np.uint8)) == 1
+
+ rg = np.random.default_rng(np.random.PCG64(43))
+ d = rg.integers(2, size=4*5, dtype=np.int8)
+ d = d.reshape(4, 5) > 0
+ out1 = np.matmul(d, d.reshape(5, 4))
+ out2 = np.dot(d, d.reshape(5, 4))
+ assert_equal(out1, out2)
+
+ c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool))
+ assert not np.any(c)
+
+
+class TestMatmulOperator(MatmulCommon):
+ import operator
+ matmul = operator.matmul
+
+ def test_array_priority_override(self):
+
+ class A:
+ __array_priority__ = 1000
+
+ def __matmul__(self, other):
+ return "A"
+
+ def __rmatmul__(self, other):
+ return "A"
+
+ a = A()
+ b = np.ones(2)
+ assert_equal(self.matmul(a, b), "A")
+ assert_equal(self.matmul(b, a), "A")
+
+ def test_matmul_raises(self):
+ assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
+ assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
+ assert_raises(TypeError, self.matmul, np.arange(10), np.void(b'abc'))
+
+def test_matmul_inplace():
+ # It would be nice to support in-place matmul eventually, but for now
+ # we don't have a working implementation, so better just to error out
+ # and nudge people to writing "a = a @ b".
+ a = np.eye(3)
+ b = np.eye(3)
+ assert_raises(TypeError, a.__imatmul__, b)
+ import operator
+ assert_raises(TypeError, operator.imatmul, a, b)
+ assert_raises(TypeError, exec, "a @= b", globals(), locals())
+
+def test_matmul_axes():
+ a = np.arange(3*4*5).reshape(3, 4, 5)
+ c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
+ assert c.shape == (3, 4, 4)
+ d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
+ assert d.shape == (4, 4, 3)
+ e = np.swapaxes(d, 0, 2)
+ assert_array_equal(e, c)
+ f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
+ assert f.shape == (4, 5)
+
+
+class TestInner:
+
+ def test_inner_type_mismatch(self):
+ c = 1.
+ A = np.array((1,1), dtype='i,i')
+
+ assert_raises(TypeError, np.inner, c, A)
+ assert_raises(TypeError, np.inner, A, c)
+
+ def test_inner_scalar_and_vector(self):
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+ sca = np.array(3, dtype=dt)[()]
+ vec = np.array([1, 2], dtype=dt)
+ desired = np.array([3, 6], dtype=dt)
+ assert_equal(np.inner(vec, sca), desired)
+ assert_equal(np.inner(sca, vec), desired)
+
+ def test_vecself(self):
+ # Ticket 844.
+ # Inner product of a vector with itself segfaults or give
+ # meaningless result
+ a = np.zeros(shape=(1, 80), dtype=np.float64)
+ p = np.inner(a, a)
+ assert_almost_equal(p, 0, decimal=14)
+
+ def test_inner_product_with_various_contiguities(self):
+ # github issue 6532
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+ # check an inner product involving a matrix transpose
+ A = np.array([[1, 2], [3, 4]], dtype=dt)
+ B = np.array([[1, 3], [2, 4]], dtype=dt)
+ C = np.array([1, 1], dtype=dt)
+ desired = np.array([4, 6], dtype=dt)
+ assert_equal(np.inner(A.T, C), desired)
+ assert_equal(np.inner(C, A.T), desired)
+ assert_equal(np.inner(B, C), desired)
+ assert_equal(np.inner(C, B), desired)
+ # check a matrix product
+ desired = np.array([[7, 10], [15, 22]], dtype=dt)
+ assert_equal(np.inner(A, B), desired)
+ # check the syrk vs. gemm paths
+ desired = np.array([[5, 11], [11, 25]], dtype=dt)
+ assert_equal(np.inner(A, A), desired)
+ assert_equal(np.inner(A, A.copy()), desired)
+ # check an inner product involving an aliased and reversed view
+ a = np.arange(5).astype(dt)
+ b = a[::-1]
+ desired = np.array(10, dtype=dt).item()
+ assert_equal(np.inner(b, a), desired)
+
+ def test_3d_tensor(self):
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+ a = np.arange(24).reshape(2,3,4).astype(dt)
+ b = np.arange(24, 48).reshape(2,3,4).astype(dt)
+ desired = np.array(
+ [[[[ 158, 182, 206],
+ [ 230, 254, 278]],
+
+ [[ 566, 654, 742],
+ [ 830, 918, 1006]],
+
+ [[ 974, 1126, 1278],
+ [1430, 1582, 1734]]],
+
+ [[[1382, 1598, 1814],
+ [2030, 2246, 2462]],
+
+ [[1790, 2070, 2350],
+ [2630, 2910, 3190]],
+
+ [[2198, 2542, 2886],
+ [3230, 3574, 3918]]]]
+ ).astype(dt)
+ assert_equal(np.inner(a, b), desired)
+ assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
+
+
+class TestChoose:
+ def setup_method(self):
+ self.x = 2*np.ones((3,), dtype=int)
+ self.y = 3*np.ones((3,), dtype=int)
+ self.x2 = 2*np.ones((2, 3), dtype=int)
+ self.y2 = 3*np.ones((2, 3), dtype=int)
+ self.ind = [0, 0, 1]
+
+ def test_basic(self):
+ A = np.choose(self.ind, (self.x, self.y))
+ assert_equal(A, [2, 2, 3])
+
+ def test_broadcast1(self):
+ A = np.choose(self.ind, (self.x2, self.y2))
+ assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+
+ def test_broadcast2(self):
+ A = np.choose(self.ind, (self.x, self.y2))
+ assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+
+ @pytest.mark.parametrize("ops",
+ [(1000, np.array([1], dtype=np.uint8)),
+ (-1, np.array([1], dtype=np.uint8)),
+ (1., np.float32(3)),
+ (1., np.array([3], dtype=np.float32))],)
+ def test_output_dtype(self, ops):
+ expected_dt = np.result_type(*ops)
+ assert(np.choose([0], ops).dtype == expected_dt)
+
+
+class TestRepeat:
+ def setup_method(self):
+ self.m = np.array([1, 2, 3, 4, 5, 6])
+ self.m_rect = self.m.reshape((2, 3))
+
+ def test_basic(self):
+ A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
+ assert_equal(A, [1, 2, 2, 2, 3,
+ 3, 4, 5, 6, 6])
+
+ def test_broadcast1(self):
+ A = np.repeat(self.m, 2)
+ assert_equal(A, [1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6])
+
+ def test_axis_spec(self):
+ A = np.repeat(self.m_rect, [2, 1], axis=0)
+ assert_equal(A, [[1, 2, 3],
+ [1, 2, 3],
+ [4, 5, 6]])
+
+ A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
+ assert_equal(A, [[1, 2, 2, 2, 3, 3],
+ [4, 5, 5, 5, 6, 6]])
+
+ def test_broadcast2(self):
+ A = np.repeat(self.m_rect, 2, axis=0)
+ assert_equal(A, [[1, 2, 3],
+ [1, 2, 3],
+ [4, 5, 6],
+ [4, 5, 6]])
+
+ A = np.repeat(self.m_rect, 2, axis=1)
+ assert_equal(A, [[1, 1, 2, 2, 3, 3],
+ [4, 4, 5, 5, 6, 6]])
+
+
+# TODO: test for multidimensional
+NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
+
+
+@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object'])
+class TestNeighborhoodIter:
+ # Simple, 2d tests
+ def test_simple2d(self, dt):
+ # Test zero and one padding for simple data type
+ x = np.array([[0, 1], [2, 3]], dtype=dt)
+ r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
+ np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
+ np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
+ np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
+ np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
+ np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
+ np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one'])
+ assert_array_equal(l, r)
+
+ r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
+ np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
+ np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
+ np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
+ assert_array_equal(l, r)
+
+ # Test with start in the middle
+ r = [np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
+ np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'], 2)
+ assert_array_equal(l, r)
+
+ def test_mirror2d(self, dt):
+ x = np.array([[0, 1], [2, 3]], dtype=dt)
+ r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
+ np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
+ np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
+ np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # Simple, 1d tests
+ def test_simple(self, dt):
+ # Test padding with constant values
+ x = np.linspace(1, 5, 5).astype(dt)
+ r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 1], x[0], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 1], x[0], NEIGH_MODE['one'])
+ assert_array_equal(l, r)
+
+ r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-1, 1], x[4], NEIGH_MODE['constant'])
+ assert_array_equal(l, r)
+
+ # Test mirror modes
+ def test_mirror(self, dt):
+ x = np.linspace(1, 5, 5).astype(dt)
+ r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
+ [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-2, 2], x[1], NEIGH_MODE['mirror'])
+ assert_([i.dtype == dt for i in l])
+ assert_array_equal(l, r)
+
+ # Circular mode
+ def test_circular(self, dt):
+ x = np.linspace(1, 5, 5).astype(dt)
+ r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
+ [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
+ l = _multiarray_tests.test_neighborhood_iterator(
+ x, [-2, 2], x[0], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+
+# Test stacking neighborhood iterators
+class TestStackedNeighborhoodIter:
+ # Simple, 1d test: stacking 2 constant-padded neigh iterators
+ def test_simple_const(self):
+ dt = np.float64
+ # Test zero and one padding for simple data type
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0], dtype=dt),
+ np.array([0], dtype=dt),
+ np.array([1], dtype=dt),
+ np.array([2], dtype=dt),
+ np.array([3], dtype=dt),
+ np.array([0], dtype=dt),
+ np.array([0], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ r = [np.array([1, 0, 1], dtype=dt),
+ np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt),
+ np.array([3, 0, 1], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one'])
+ assert_array_equal(l, r)
+
+ # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
+ # mirror padding
+ def test_simple_mirror(self):
+ dt = np.float64
+ # Stacking zero on top of mirror
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0, 1, 1], dtype=dt),
+ np.array([1, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 3], dtype=dt),
+ np.array([3, 3, 0], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 0, 0], dtype=dt),
+ np.array([0, 0, 1], dtype=dt),
+ np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero: 2nd
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt),
+ np.array([3, 0, 0], dtype=dt),
+ np.array([0, 0, 3], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero: 3rd
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 0, 0, 1, 2], dtype=dt),
+ np.array([0, 0, 1, 2, 3], dtype=dt),
+ np.array([0, 1, 2, 3, 0], dtype=dt),
+ np.array([1, 2, 3, 0, 0], dtype=dt),
+ np.array([2, 3, 0, 0, 3], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
+ # circular padding
+ def test_simple_circular(self):
+ dt = np.float64
+ # Stacking zero on top of mirror
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0, 3, 1], dtype=dt),
+ np.array([3, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 1], dtype=dt),
+ np.array([3, 1, 0], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([3, 0, 0], dtype=dt),
+ np.array([0, 0, 1], dtype=dt),
+ np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero: 2nd
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt),
+ np.array([3, 0, 0], dtype=dt),
+ np.array([0, 0, 1], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero: 3rd
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([3, 0, 0, 1, 2], dtype=dt),
+ np.array([0, 0, 1, 2, 3], dtype=dt),
+ np.array([0, 1, 2, 3, 0], dtype=dt),
+ np.array([1, 2, 3, 0, 0], dtype=dt),
+ np.array([2, 3, 0, 0, 1], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+ # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
+ # being strictly within the array
+ def test_simple_strict_within(self):
+ dt = np.float64
+ # Stacking zero on top of zero, first neighborhood strictly inside the
+ # array
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 2, 3, 0], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero, first neighborhood strictly inside the
+ # array
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 2, 3, 3], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero, first neighborhood strictly inside the
+ # array
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 2, 3, 1], dtype=dt)]
+ l = _multiarray_tests.test_neighborhood_iterator_oob(
+ x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+class TestWarnings:
+
+ def test_complex_warning(self):
+ x = np.array([1, 2])
+ y = np.array([1-2j, 1+2j])
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", np.ComplexWarning)
+ assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
+ assert_equal(x, [1, 2])
+
+
+class TestMinScalarType:
+
+ def test_usigned_shortshort(self):
+ dt = np.min_scalar_type(2**8-1)
+ wanted = np.dtype('uint8')
+ assert_equal(wanted, dt)
+
+ def test_usigned_short(self):
+ dt = np.min_scalar_type(2**16-1)
+ wanted = np.dtype('uint16')
+ assert_equal(wanted, dt)
+
+ def test_usigned_int(self):
+ dt = np.min_scalar_type(2**32-1)
+ wanted = np.dtype('uint32')
+ assert_equal(wanted, dt)
+
+ def test_usigned_longlong(self):
+ dt = np.min_scalar_type(2**63-1)
+ wanted = np.dtype('uint64')
+ assert_equal(wanted, dt)
+
+ def test_object(self):
+ dt = np.min_scalar_type(2**64)
+ wanted = np.dtype('O')
+ assert_equal(wanted, dt)
+
+
+from numpy.core._internal import _dtype_from_pep3118
+
+
+class TestPEP3118Dtype:
+ def _check(self, spec, wanted):
+ dt = np.dtype(wanted)
+ actual = _dtype_from_pep3118(spec)
+ assert_equal(actual, dt,
+ err_msg="spec %r != dtype %r" % (spec, wanted))
+
+ def test_native_padding(self):
+ align = np.dtype('i').alignment
+ for j in range(8):
+ if j == 0:
+ s = 'bi'
+ else:
+ s = 'b%dxi' % j
+ self._check('@'+s, {'f0': ('i1', 0),
+ 'f1': ('i', align*(1 + j//align))})
+ self._check('='+s, {'f0': ('i1', 0),
+ 'f1': ('i', 1+j)})
+
+ def test_native_padding_2(self):
+ # Native padding should work also for structs and sub-arrays
+ self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
+ self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
+
+ def test_trailing_padding(self):
+ # Trailing padding should be included, *and*, the item size
+ # should match the alignment if in aligned mode
+ align = np.dtype('i').alignment
+ size = np.dtype('i').itemsize
+
+ def aligned(n):
+ return align*(1 + (n-1)//align)
+
+ base = dict(formats=['i'], names=['f0'])
+
+ self._check('ix', dict(itemsize=aligned(size + 1), **base))
+ self._check('ixx', dict(itemsize=aligned(size + 2), **base))
+ self._check('ixxx', dict(itemsize=aligned(size + 3), **base))
+ self._check('ixxxx', dict(itemsize=aligned(size + 4), **base))
+ self._check('i7x', dict(itemsize=aligned(size + 7), **base))
+
+ self._check('^ix', dict(itemsize=size + 1, **base))
+ self._check('^ixx', dict(itemsize=size + 2, **base))
+ self._check('^ixxx', dict(itemsize=size + 3, **base))
+ self._check('^ixxxx', dict(itemsize=size + 4, **base))
+ self._check('^i7x', dict(itemsize=size + 7, **base))
+
+ def test_native_padding_3(self):
+ dt = np.dtype(
+ [('a', 'b'), ('b', 'i'),
+ ('sub', np.dtype('b,i')), ('c', 'i')],
+ align=True)
+ self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
+
+ dt = np.dtype(
+ [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
+ ('e', 'b'), ('sub', np.dtype('b,i', align=True))])
+ self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
+
+ def test_padding_with_array_inside_struct(self):
+ dt = np.dtype(
+ [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
+ ('d', 'i')],
+ align=True)
+ self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
+
+ def test_byteorder_inside_struct(self):
+ # The byte order after @T{=i} should be '=', not '@'.
+ # Check this by noting the absence of native alignment.
+ self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
+ 'f1': ('i', 5)})
+
+ def test_intra_padding(self):
+ # Natively aligned sub-arrays may require some internal padding
+ align = np.dtype('i').alignment
+ size = np.dtype('i').itemsize
+
+ def aligned(n):
+ return (align*(1 + (n-1)//align))
+
+ self._check('(3)T{ix}', (dict(
+ names=['f0'],
+ formats=['i'],
+ offsets=[0],
+ itemsize=aligned(size + 1)
+ ), (3,)))
+
+ def test_char_vs_string(self):
+ dt = np.dtype('c')
+ self._check('c', dt)
+
+ dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
+ self._check('4c4s', dt)
+
+ def test_field_order(self):
+ # gh-9053 - previously, we relied on dictionary key order
+ self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')])
+ self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')])
+
+ def test_unnamed_fields(self):
+ self._check('ii', [('f0', 'i'), ('f1', 'i')])
+ self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')])
+
+ self._check('i', 'i')
+ self._check('i:f0:', [('f0', 'i')])
+
+
+class TestNewBufferProtocol:
+ """ Test PEP3118 buffers """
+
+ def _check_roundtrip(self, obj):
+ obj = np.asarray(obj)
+ x = memoryview(obj)
+ y = np.asarray(x)
+ y2 = np.array(x)
+ assert_(not y.flags.owndata)
+ assert_(y2.flags.owndata)
+
+ assert_equal(y.dtype, obj.dtype)
+ assert_equal(y.shape, obj.shape)
+ assert_array_equal(obj, y)
+
+ assert_equal(y2.dtype, obj.dtype)
+ assert_equal(y2.shape, obj.shape)
+ assert_array_equal(obj, y2)
+
+ def test_roundtrip(self):
+ x = np.array([1, 2, 3, 4, 5], dtype='i4')
+ self._check_roundtrip(x)
+
+ x = np.array([[1, 2], [3, 4]], dtype=np.float64)
+ self._check_roundtrip(x)
+
+ x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
+ self._check_roundtrip(x)
+
+ dt = [('a', 'b'),
+ ('b', 'h'),
+ ('c', 'i'),
+ ('d', 'l'),
+ ('dx', 'q'),
+ ('e', 'B'),
+ ('f', 'H'),
+ ('g', 'I'),
+ ('h', 'L'),
+ ('hx', 'Q'),
+ ('i', np.single),
+ ('j', np.double),
+ ('k', np.longdouble),
+ ('ix', np.csingle),
+ ('jx', np.cdouble),
+ ('kx', np.clongdouble),
+ ('l', 'S4'),
+ ('m', 'U4'),
+ ('n', 'V3'),
+ ('o', '?'),
+ ('p', np.half),
+ ]
+ x = np.array(
+ [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ b'aaaa', 'bbbb', b'xxx', True, 1.0)],
+ dtype=dt)
+ self._check_roundtrip(x)
+
+ x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
+ self._check_roundtrip(x)
+
+ x = np.array([1, 2, 3], dtype='>i2')
+ self._check_roundtrip(x)
+
+ x = np.array([1, 2, 3], dtype='<i2')
+ self._check_roundtrip(x)
+
+ x = np.array([1, 2, 3], dtype='>i4')
+ self._check_roundtrip(x)
+
+ x = np.array([1, 2, 3], dtype='<i4')
+ self._check_roundtrip(x)
+
+ # check long long can be represented as non-native
+ x = np.array([1, 2, 3], dtype='>q')
+ self._check_roundtrip(x)
+
+ # Native-only data types can be passed through the buffer interface
+ # only in native byte order
+ if sys.byteorder == 'little':
+ x = np.array([1, 2, 3], dtype='>g')
+ assert_raises(ValueError, self._check_roundtrip, x)
+ x = np.array([1, 2, 3], dtype='<g')
+ self._check_roundtrip(x)
+ else:
+ x = np.array([1, 2, 3], dtype='>g')
+ self._check_roundtrip(x)
+ x = np.array([1, 2, 3], dtype='<g')
+ assert_raises(ValueError, self._check_roundtrip, x)
+
+ def test_roundtrip_half(self):
+ half_list = [
+ 1.0,
+ -2.0,
+ 6.5504 * 10**4, # (max half precision)
+ 2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
+ 2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
+ 0.0,
+ -0.0,
+ float('+inf'),
+ float('-inf'),
+ 0.333251953125, # ~= 1/3
+ ]
+
+ x = np.array(half_list, dtype='>e')
+ self._check_roundtrip(x)
+ x = np.array(half_list, dtype='<e')
+ self._check_roundtrip(x)
+
+ def test_roundtrip_single_types(self):
+ for typ in np.sctypeDict.values():
+ dtype = np.dtype(typ)
+
+ if dtype.char in 'Mm':
+ # datetimes cannot be used in buffers
+ continue
+ if dtype.char == 'V':
+ # skip void
+ continue
+
+ x = np.zeros(4, dtype=dtype)
+ self._check_roundtrip(x)
+
+ if dtype.char not in 'qQgG':
+ dt = dtype.newbyteorder('<')
+ x = np.zeros(4, dtype=dt)
+ self._check_roundtrip(x)
+
+ dt = dtype.newbyteorder('>')
+ x = np.zeros(4, dtype=dt)
+ self._check_roundtrip(x)
+
+ def test_roundtrip_scalar(self):
+ # Issue #4015.
+ self._check_roundtrip(0)
+
+ def test_invalid_buffer_format(self):
+ # datetime64 cannot be used fully in a buffer yet
+ # Should be fixed in the next Numpy major release
+ dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
+ a = np.empty(3, dt)
+ assert_raises((ValueError, BufferError), memoryview, a)
+ assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]'))
+
+ def test_export_simple_1d(self):
+ x = np.array([1, 2, 3, 4, 5], dtype='i')
+ y = memoryview(x)
+ assert_equal(y.format, 'i')
+ assert_equal(y.shape, (5,))
+ assert_equal(y.ndim, 1)
+ assert_equal(y.strides, (4,))
+ assert_equal(y.suboffsets, ())
+ assert_equal(y.itemsize, 4)
+
+ def test_export_simple_nd(self):
+ x = np.array([[1, 2], [3, 4]], dtype=np.float64)
+ y = memoryview(x)
+ assert_equal(y.format, 'd')
+ assert_equal(y.shape, (2, 2))
+ assert_equal(y.ndim, 2)
+ assert_equal(y.strides, (16, 8))
+ assert_equal(y.suboffsets, ())
+ assert_equal(y.itemsize, 8)
+
+ def test_export_discontiguous(self):
+ x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
+ y = memoryview(x)
+ assert_equal(y.format, 'f')
+ assert_equal(y.shape, (3, 3))
+ assert_equal(y.ndim, 2)
+ assert_equal(y.strides, (36, 4))
+ assert_equal(y.suboffsets, ())
+ assert_equal(y.itemsize, 4)
+
+ def test_export_record(self):
+ dt = [('a', 'b'),
+ ('b', 'h'),
+ ('c', 'i'),
+ ('d', 'l'),
+ ('dx', 'q'),
+ ('e', 'B'),
+ ('f', 'H'),
+ ('g', 'I'),
+ ('h', 'L'),
+ ('hx', 'Q'),
+ ('i', np.single),
+ ('j', np.double),
+ ('k', np.longdouble),
+ ('ix', np.csingle),
+ ('jx', np.cdouble),
+ ('kx', np.clongdouble),
+ ('l', 'S4'),
+ ('m', 'U4'),
+ ('n', 'V3'),
+ ('o', '?'),
+ ('p', np.half),
+ ]
+ x = np.array(
+ [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ b'aaaa', 'bbbb', b' ', True, 1.0)],
+ dtype=dt)
+ y = memoryview(x)
+ assert_equal(y.shape, (1,))
+ assert_equal(y.ndim, 1)
+ assert_equal(y.suboffsets, ())
+
+ sz = sum([np.dtype(b).itemsize for a, b in dt])
+ if np.dtype('l').itemsize == 4:
+ assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
+ else:
+ assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
+ # Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides
+ if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
+ assert_equal(y.strides, (sz,))
+ assert_equal(y.itemsize, sz)
+
+ def test_export_subarray(self):
+ x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
+ y = memoryview(x)
+ assert_equal(y.format, 'T{(2,2)i:a:}')
+ assert_equal(y.shape, ())
+ assert_equal(y.ndim, 0)
+ assert_equal(y.strides, ())
+ assert_equal(y.suboffsets, ())
+ assert_equal(y.itemsize, 16)
+
+ def test_export_endian(self):
+ x = np.array([1, 2, 3], dtype='>i')
+ y = memoryview(x)
+ if sys.byteorder == 'little':
+ assert_equal(y.format, '>i')
+ else:
+ assert_equal(y.format, 'i')
+
+ x = np.array([1, 2, 3], dtype='<i')
+ y = memoryview(x)
+ if sys.byteorder == 'little':
+ assert_equal(y.format, 'i')
+ else:
+ assert_equal(y.format, '<i')
+
+ def test_export_flags(self):
+ # Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
+ assert_raises(ValueError,
+ _multiarray_tests.get_buffer_info,
+ np.arange(5)[::2], ('SIMPLE',))
+
+ @pytest.mark.parametrize(["obj", "error"], [
+ pytest.param(np.array([1, 2], dtype=rational), ValueError, id="array"),
+ pytest.param(rational(1, 2), TypeError, id="scalar")])
+ def test_export_and_pickle_user_dtype(self, obj, error):
+ # User dtypes should export successfully when FORMAT was not requested.
+ with pytest.raises(error):
+ _multiarray_tests.get_buffer_info(obj, ("STRIDED_RO", "FORMAT"))
+
+ _multiarray_tests.get_buffer_info(obj, ("STRIDED_RO",))
+
+ # This is currently also necessary to implement pickling:
+ pickle_obj = pickle.dumps(obj)
+ res = pickle.loads(pickle_obj)
+ assert_array_equal(res, obj)
+
+ def test_padding(self):
+ for j in range(8):
+ x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
+ self._check_roundtrip(x)
+
+ def test_reference_leak(self):
+ if HAS_REFCOUNT:
+ count_1 = sys.getrefcount(np.core._internal)
+ a = np.zeros(4)
+ b = memoryview(a)
+ c = np.asarray(b)
+ if HAS_REFCOUNT:
+ count_2 = sys.getrefcount(np.core._internal)
+ assert_equal(count_1, count_2)
+ del c # avoid pyflakes unused variable warning.
+
+ def test_padded_struct_array(self):
+ dt1 = np.dtype(
+ [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
+ align=True)
+ x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
+ self._check_roundtrip(x1)
+
+ dt2 = np.dtype(
+ [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
+ align=True)
+ x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
+ self._check_roundtrip(x2)
+
+ dt3 = np.dtype(
+ [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
+ ('e', 'b'), ('sub', np.dtype('b,i', align=True))])
+ x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
+ self._check_roundtrip(x3)
+
+ @pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.")
+ def test_relaxed_strides(self, c=np.ones((1, 10, 10), dtype='i8')):
+ # Note: c defined as parameter so that it is persistent and leak
+ # checks will notice gh-16934 (buffer info cache leak).
+ c.strides = (-1, 80, 8) # strides need to be fixed at export
+
+ assert_(memoryview(c).strides == (800, 80, 8))
+
+ # Writing C-contiguous data to a BytesIO buffer should work
+ fd = io.BytesIO()
+ fd.write(c.data)
+
+ fortran = c.T
+ assert_(memoryview(fortran).strides == (8, 80, 800))
+
+ arr = np.ones((1, 10))
+ if arr.flags.f_contiguous:
+ shape, strides = _multiarray_tests.get_buffer_info(
+ arr, ['F_CONTIGUOUS'])
+ assert_(strides[0] == 8)
+ arr = np.ones((10, 1), order='F')
+ shape, strides = _multiarray_tests.get_buffer_info(
+ arr, ['C_CONTIGUOUS'])
+ assert_(strides[-1] == 8)
+
+ @pytest.mark.valgrind_error(reason="leaks buffer info cache temporarily.")
+ @pytest.mark.skipif(not np.ones((10, 1), order="C").flags.f_contiguous,
+ reason="Test is unnecessary (but fails) without relaxed strides.")
+ def test_relaxed_strides_buffer_info_leak(self, arr=np.ones((1, 10))):
+ """Test that alternating export of C- and F-order buffers from
+ an array which is both C- and F-order when relaxed strides is
+ active works.
+ This test defines array in the signature to ensure leaking more
+ references every time the test is run (catching the leak with
+ pytest-leaks).
+ """
+ for i in range(10):
+ _, s = _multiarray_tests.get_buffer_info(arr, ['F_CONTIGUOUS'])
+ assert s == (8, 8)
+ _, s = _multiarray_tests.get_buffer_info(arr, ['C_CONTIGUOUS'])
+ assert s == (80, 8)
+
+ def test_out_of_order_fields(self):
+ dt = np.dtype(dict(
+ formats=['<i4', '<i4'],
+ names=['one', 'two'],
+ offsets=[4, 0],
+ itemsize=8
+ ))
+
+ # overlapping fields cannot be represented by PEP3118
+ arr = np.empty(1, dt)
+ with assert_raises(ValueError):
+ memoryview(arr)
+
+ def test_max_dims(self):
+ a = np.ones((1,) * 32)
+ self._check_roundtrip(a)
+
+ @pytest.mark.slow
+ def test_error_too_many_dims(self):
+ def make_ctype(shape, scalar_type):
+ t = scalar_type
+ for dim in shape[::-1]:
+ t = dim * t
+ return t
+
+ # construct a memoryview with 33 dimensions
+ c_u8_33d = make_ctype((1,)*33, ctypes.c_uint8)
+ m = memoryview(c_u8_33d())
+ assert_equal(m.ndim, 33)
+
+ assert_raises_regex(
+ RuntimeError, "ndim",
+ np.array, m)
+
+ # The above seems to create some deep cycles, clean them up for
+ # easier reference count debugging:
+ del c_u8_33d, m
+ for i in range(33):
+ if gc.collect() == 0:
+ break
+
+ def test_error_pointer_type(self):
+ # gh-6741
+ m = memoryview(ctypes.pointer(ctypes.c_uint8()))
+ assert_('&' in m.format)
+
+ assert_raises_regex(
+ ValueError, "format string",
+ np.array, m)
+
+ def test_error_message_unsupported(self):
+ # wchar has no corresponding numpy type - if this changes in future, we
+ # need a better way to construct an invalid memoryview format.
+ t = ctypes.c_wchar * 4
+ with assert_raises(ValueError) as cm:
+ np.array(t())
+
+ exc = cm.exception
+ with assert_raises_regex(
+ NotImplementedError,
+ r"Unrepresentable .* 'u' \(UCS-2 strings\)"
+ ):
+ raise exc.__cause__
+
+ def test_ctypes_integer_via_memoryview(self):
+ # gh-11150, due to bpo-10746
+ for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}:
+ value = c_integer(42)
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
+ np.asarray(value)
+
+ def test_ctypes_struct_via_memoryview(self):
+ # gh-10528
+ class foo(ctypes.Structure):
+ _fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)]
+ f = foo(a=1, b=2)
+
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
+ arr = np.asarray(f)
+
+ assert_equal(arr['a'], 1)
+ assert_equal(arr['b'], 2)
+ f.a = 3
+ assert_equal(arr['a'], 3)
+
+ @pytest.mark.parametrize("obj", [np.ones(3), np.ones(1, dtype="i,i")[()]])
+ def test_error_if_stored_buffer_info_is_corrupted(self, obj):
+ """
+ If a user extends a NumPy array before 1.20 and then runs it
+ on NumPy 1.20+. A C-subclassed array might in theory modify
+ the new buffer-info field. This checks that an error is raised
+ if this happens (for buffer export), an error is written on delete.
+ This is a sanity check to help users transition to safe code, it
+ may be deleted at any point.
+ """
+ # corrupt buffer info:
+ _multiarray_tests.corrupt_or_fix_bufferinfo(obj)
+ name = type(obj)
+ with pytest.raises(RuntimeError,
+ match=f".*{name} appears to be C subclassed"):
+ memoryview(obj)
+ # Fix buffer info again before we delete (or we lose the memory)
+ _multiarray_tests.corrupt_or_fix_bufferinfo(obj)
+
+ def test_no_suboffsets(self):
+ try:
+ import _testbuffer
+ except ImportError:
+ raise pytest.skip("_testbuffer is not available")
+
+ for shape in [(2, 3), (2, 3, 4)]:
+ data = list(range(np.prod(shape)))
+ buffer = _testbuffer.ndarray(data, shape, format='i',
+ flags=_testbuffer.ND_PIL)
+ msg = "NumPy currently does not support.*suboffsets"
+ with pytest.raises(BufferError, match=msg):
+ np.asarray(buffer)
+ with pytest.raises(BufferError, match=msg):
+ np.asarray([buffer])
+
+ # Also check (unrelated and more limited but similar) frombuffer:
+ with pytest.raises(BufferError):
+ np.frombuffer(buffer)
+
+
+class TestArrayCreationCopyArgument(object):
+
+ class RaiseOnBool:
+
+ def __bool__(self):
+ raise ValueError
+
+ true_vals = [True, np._CopyMode.ALWAYS, np.True_]
+ false_vals = [False, np._CopyMode.IF_NEEDED, np.False_]
+
+ def test_scalars(self):
+ # Test both numpy and python scalars
+ for dtype in np.typecodes["All"]:
+ arr = np.zeros((), dtype=dtype)
+ scalar = arr[()]
+ pyscalar = arr.item(0)
+
+ # Test never-copy raises error:
+ assert_raises(ValueError, np.array, scalar,
+ copy=np._CopyMode.NEVER)
+ assert_raises(ValueError, np.array, pyscalar,
+ copy=np._CopyMode.NEVER)
+ assert_raises(ValueError, np.array, pyscalar,
+ copy=self.RaiseOnBool())
+ assert_raises(ValueError, _multiarray_tests.npy_ensurenocopy,
+ [1])
+ # Casting with a dtype (to unsigned integers) can be special:
+ with pytest.raises(ValueError):
+ np.array(pyscalar, dtype=np.int64, copy=np._CopyMode.NEVER)
+
+ def test_compatible_cast(self):
+
+ # Some types are compatible even though they are different, no
+ # copy is necessary for them. This is mostly true for some integers
+ def int_types(byteswap=False):
+ int_types = (np.typecodes["Integer"] +
+ np.typecodes["UnsignedInteger"])
+ for int_type in int_types:
+ yield np.dtype(int_type)
+ if byteswap:
+ yield np.dtype(int_type).newbyteorder()
+
+ for int1 in int_types():
+ for int2 in int_types(True):
+ arr = np.arange(10, dtype=int1)
+
+ for copy in self.true_vals:
+ res = np.array(arr, copy=copy, dtype=int2)
+ assert res is not arr and res.flags.owndata
+ assert_array_equal(res, arr)
+
+ if int1 == int2:
+ # Casting is not necessary, base check is sufficient here
+ for copy in self.false_vals:
+ res = np.array(arr, copy=copy, dtype=int2)
+ assert res is arr or res.base is arr
+
+ res = np.array(arr,
+ copy=np._CopyMode.NEVER,
+ dtype=int2)
+ assert res is arr or res.base is arr
+
+ else:
+ # Casting is necessary, assert copy works:
+ for copy in self.false_vals:
+ res = np.array(arr, copy=copy, dtype=int2)
+ assert res is not arr and res.flags.owndata
+ assert_array_equal(res, arr)
+
+ assert_raises(ValueError, np.array,
+ arr, copy=np._CopyMode.NEVER,
+ dtype=int2)
+ assert_raises(ValueError, np.array,
+ arr, copy=None,
+ dtype=int2)
+
+ def test_buffer_interface(self):
+
+ # Buffer interface gives direct memory access (no copy)
+ arr = np.arange(10)
+ view = memoryview(arr)
+
+ # Checking bases is a bit tricky since numpy creates another
+ # memoryview, so use may_share_memory.
+ for copy in self.true_vals:
+ res = np.array(view, copy=copy)
+ assert not np.may_share_memory(arr, res)
+ for copy in self.false_vals:
+ res = np.array(view, copy=copy)
+ assert np.may_share_memory(arr, res)
+ res = np.array(view, copy=np._CopyMode.NEVER)
+ assert np.may_share_memory(arr, res)
+
+ def test_array_interfaces(self):
+ # Array interface gives direct memory access (much like a memoryview)
+ base_arr = np.arange(10)
+
+ class ArrayLike:
+ __array_interface__ = base_arr.__array_interface__
+
+ arr = ArrayLike()
+
+ for copy, val in [(True, None), (np._CopyMode.ALWAYS, None),
+ (False, arr), (np._CopyMode.IF_NEEDED, arr),
+ (np._CopyMode.NEVER, arr)]:
+ res = np.array(arr, copy=copy)
+ assert res.base is val
+
+ def test___array__(self):
+ base_arr = np.arange(10)
+
+ class ArrayLike:
+ def __array__(self):
+ # __array__ should return a copy, numpy cannot know this
+ # however.
+ return base_arr
+
+ arr = ArrayLike()
+
+ for copy in self.true_vals:
+ res = np.array(arr, copy=copy)
+ assert_array_equal(res, base_arr)
+ # An additional copy is currently forced by numpy in this case,
+ # you could argue, numpy does not trust the ArrayLike. This
+ # may be open for change:
+ assert res is not base_arr
+
+ for copy in self.false_vals:
+ res = np.array(arr, copy=False)
+ assert_array_equal(res, base_arr)
+ assert res is base_arr # numpy trusts the ArrayLike
+
+ with pytest.raises(ValueError):
+ np.array(arr, copy=np._CopyMode.NEVER)
+
+ @pytest.mark.parametrize(
+ "arr", [np.ones(()), np.arange(81).reshape((9, 9))])
+ @pytest.mark.parametrize("order1", ["C", "F", None])
+ @pytest.mark.parametrize("order2", ["C", "F", "A", "K"])
+ def test_order_mismatch(self, arr, order1, order2):
+ # The order is the main (python side) reason that can cause
+ # a never-copy to fail.
+ # Prepare C-order, F-order and non-contiguous arrays:
+ arr = arr.copy(order1)
+ if order1 == "C":
+ assert arr.flags.c_contiguous
+ elif order1 == "F":
+ assert arr.flags.f_contiguous
+ elif arr.ndim != 0:
+ # Make array non-contiguous
+ arr = arr[::2, ::2]
+ assert not arr.flags.forc
+
+ # Whether a copy is necessary depends on the order of arr:
+ if order2 == "C":
+ no_copy_necessary = arr.flags.c_contiguous
+ elif order2 == "F":
+ no_copy_necessary = arr.flags.f_contiguous
+ else:
+ # Keeporder and Anyorder are OK with non-contiguous output.
+ # This is not consistent with the `astype` behaviour which
+ # enforces contiguity for "A". It is probably historic from when
+ # "K" did not exist.
+ no_copy_necessary = True
+
+ # Test it for both the array and a memoryview
+ for view in [arr, memoryview(arr)]:
+ for copy in self.true_vals:
+ res = np.array(view, copy=copy, order=order2)
+ assert res is not arr and res.flags.owndata
+ assert_array_equal(arr, res)
+
+ if no_copy_necessary:
+ for copy in self.false_vals:
+ res = np.array(view, copy=copy, order=order2)
+ # res.base.obj refers to the memoryview
+ if not IS_PYPY:
+ assert res is arr or res.base.obj is arr
+
+ res = np.array(view, copy=np._CopyMode.NEVER,
+ order=order2)
+ if not IS_PYPY:
+ assert res is arr or res.base.obj is arr
+ else:
+ for copy in self.false_vals:
+ res = np.array(arr, copy=copy, order=order2)
+ assert_array_equal(arr, res)
+ assert_raises(ValueError, np.array,
+ view, copy=np._CopyMode.NEVER,
+ order=order2)
+ assert_raises(ValueError, np.array,
+ view, copy=None,
+ order=order2)
+
+ def test_striding_not_ok(self):
+ arr = np.array([[1, 2, 4], [3, 4, 5]])
+ assert_raises(ValueError, np.array,
+ arr.T, copy=np._CopyMode.NEVER,
+ order='C')
+ assert_raises(ValueError, np.array,
+ arr.T, copy=np._CopyMode.NEVER,
+ order='C', dtype=np.int64)
+ assert_raises(ValueError, np.array,
+ arr, copy=np._CopyMode.NEVER,
+ order='F')
+ assert_raises(ValueError, np.array,
+ arr, copy=np._CopyMode.NEVER,
+ order='F', dtype=np.int64)
+
+
+class TestArrayAttributeDeletion:
+
+ def test_multiarray_writable_attributes_deletion(self):
+ # ticket #2046, should not seqfault, raise AttributeError
+ a = np.ones(2)
+ attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
+ def test_multiarray_not_writable_attributes_deletion(self):
+ a = np.ones(2)
+ attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
+ "ctypes", "T", "__array_interface__", "__array_struct__",
+ "__array_priority__", "__array_finalize__"]
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
+ def test_multiarray_flags_writable_attribute_deletion(self):
+ a = np.ones(2).flags
+ attr = ['writebackifcopy', 'updateifcopy', 'aligned', 'writeable']
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
+ def test_multiarray_flags_not_writable_attribute_deletion(self):
+ a = np.ones(2).flags
+ attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
+ "owndata", "fnc", "forc", "behaved", "carray", "farray",
+ "num"]
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
+
+class TestArrayInterface():
+ class Foo:
+ def __init__(self, value):
+ self.value = value
+ self.iface = {'typestr': 'f8'}
+
+ def __float__(self):
+ return float(self.value)
+
+ @property
+ def __array_interface__(self):
+ return self.iface
+
+
+ f = Foo(0.5)
+
+ @pytest.mark.parametrize('val, iface, expected', [
+ (f, {}, 0.5),
+ ([f], {}, [0.5]),
+ ([f, f], {}, [0.5, 0.5]),
+ (f, {'shape': ()}, 0.5),
+ (f, {'shape': None}, TypeError),
+ (f, {'shape': (1, 1)}, [[0.5]]),
+ (f, {'shape': (2,)}, ValueError),
+ (f, {'strides': ()}, 0.5),
+ (f, {'strides': (2,)}, ValueError),
+ (f, {'strides': 16}, TypeError),
+ ])
+ def test_scalar_interface(self, val, iface, expected):
+ # Test scalar coercion within the array interface
+ self.f.iface = {'typestr': 'f8'}
+ self.f.iface.update(iface)
+ if HAS_REFCOUNT:
+ pre_cnt = sys.getrefcount(np.dtype('f8'))
+ if isinstance(expected, type):
+ assert_raises(expected, np.array, val)
+ else:
+ result = np.array(val)
+ assert_equal(np.array(val), expected)
+ assert result.dtype == 'f8'
+ del result
+ if HAS_REFCOUNT:
+ post_cnt = sys.getrefcount(np.dtype('f8'))
+ assert_equal(pre_cnt, post_cnt)
+
+def test_interface_no_shape():
+ class ArrayLike:
+ array = np.array(1)
+ __array_interface__ = array.__array_interface__
+ assert_equal(np.array(ArrayLike()), 1)
+
+
+def test_array_interface_itemsize():
+ # See gh-6361
+ my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
+ 'offsets': [0, 8], 'itemsize': 16})
+ a = np.ones(10, dtype=my_dtype)
+ descr_t = np.dtype(a.__array_interface__['descr'])
+ typestr_t = np.dtype(a.__array_interface__['typestr'])
+ assert_equal(descr_t.itemsize, typestr_t.itemsize)
+
+
+def test_array_interface_empty_shape():
+ # See gh-7994
+ arr = np.array([1, 2, 3])
+ interface1 = dict(arr.__array_interface__)
+ interface1['shape'] = ()
+
+ class DummyArray1:
+ __array_interface__ = interface1
+
+ # NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting
+ # the interface data to bytes would invoke the bug this tests for, that
+ # __array_interface__ with shape=() is not allowed if the data is an object
+ # exposing the buffer interface
+ interface2 = dict(interface1)
+ interface2['data'] = arr[0].tobytes()
+
+ class DummyArray2:
+ __array_interface__ = interface2
+
+ arr1 = np.asarray(DummyArray1())
+ arr2 = np.asarray(DummyArray2())
+ arr3 = arr[:1].reshape(())
+ assert_equal(arr1, arr2)
+ assert_equal(arr1, arr3)
+
+def test_array_interface_offset():
+ arr = np.array([1, 2, 3], dtype='int32')
+ interface = dict(arr.__array_interface__)
+ interface['data'] = memoryview(arr)
+ interface['shape'] = (2,)
+ interface['offset'] = 4
+
+
+ class DummyArray:
+ __array_interface__ = interface
+
+ arr1 = np.asarray(DummyArray())
+ assert_equal(arr1, arr[1:])
+
+def test_array_interface_unicode_typestr():
+ arr = np.array([1, 2, 3], dtype='int32')
+ interface = dict(arr.__array_interface__)
+ interface['typestr'] = '\N{check mark}'
+
+ class DummyArray:
+ __array_interface__ = interface
+
+ # should not be UnicodeEncodeError
+ with pytest.raises(TypeError):
+ np.asarray(DummyArray())
+
+def test_flat_element_deletion():
+ it = np.ones(3).flat
+ try:
+ del it[1]
+ del it[1:2]
+ except TypeError:
+ pass
+ except Exception:
+ raise AssertionError
+
+
+def test_scalar_element_deletion():
+ a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
+ assert_raises(ValueError, a[0].__delitem__, 'x')
+
+
+class TestMapIter:
+ def test_mapiter(self):
+ # The actual tests are within the C code in
+ # multiarray/_multiarray_tests.c.src
+
+ a = np.arange(12).reshape((3, 4)).astype(float)
+ index = ([1, 1, 2, 0],
+ [0, 0, 2, 3])
+ vals = [50, 50, 30, 16]
+
+ _multiarray_tests.test_inplace_increment(a, index, vals)
+ assert_equal(a, [[0.00, 1., 2.0, 19.],
+ [104., 5., 6.0, 7.0],
+ [8.00, 9., 40., 11.]])
+
+ b = np.arange(6).astype(float)
+ index = (np.array([1, 2, 0]),)
+ vals = [50, 4, 100.1]
+ _multiarray_tests.test_inplace_increment(b, index, vals)
+ assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
+
+
+class TestAsCArray:
+ def test_1darray(self):
+ array = np.arange(24, dtype=np.double)
+ from_c = _multiarray_tests.test_as_c_array(array, 3)
+ assert_equal(array[3], from_c)
+
+ def test_2darray(self):
+ array = np.arange(24, dtype=np.double).reshape(3, 8)
+ from_c = _multiarray_tests.test_as_c_array(array, 2, 4)
+ assert_equal(array[2, 4], from_c)
+
+ def test_3darray(self):
+ array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
+ from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3)
+ assert_equal(array[1, 2, 3], from_c)
+
+
+class TestConversion:
+ def test_array_scalar_relational_operation(self):
+ # All integer
+ for dt1 in np.typecodes['AllInteger']:
+ assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
+ assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
+
+ for dt2 in np.typecodes['AllInteger']:
+ assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+
+ # Unsigned integers
+ for dt1 in 'BHILQP':
+ assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
+ assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
+ assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
+
+ # Unsigned vs signed
+ for dt2 in 'bhilqp':
+ assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+
+ # Signed integers and floats
+ for dt1 in 'bhlqp' + np.typecodes['Float']:
+ assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
+ assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
+ assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
+
+ for dt2 in 'bhlqp' + np.typecodes['Float']:
+ assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
+ "type %s and %s failed" % (dt1, dt2))
+
+ def test_to_bool_scalar(self):
+ assert_equal(bool(np.array([False])), False)
+ assert_equal(bool(np.array([True])), True)
+ assert_equal(bool(np.array([[42]])), True)
+ assert_raises(ValueError, bool, np.array([1, 2]))
+
+ class NotConvertible:
+ def __bool__(self):
+ raise NotImplementedError
+
+ assert_raises(NotImplementedError, bool, np.array(NotConvertible()))
+ assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))
+ if IS_PYSTON:
+ pytest.skip("Pyston disables recursion checking")
+
+ self_containing = np.array([None])
+ self_containing[0] = self_containing
+
+ Error = RecursionError
+
+ assert_raises(Error, bool, self_containing) # previously stack overflow
+ self_containing[0] = None # resolve circular reference
+
+ def test_to_int_scalar(self):
+ # gh-9972 means that these aren't always the same
+ int_funcs = (int, lambda x: x.__int__())
+ for int_func in int_funcs:
+ assert_equal(int_func(np.array(0)), 0)
+ assert_equal(int_func(np.array([1])), 1)
+ assert_equal(int_func(np.array([[42]])), 42)
+ assert_raises(TypeError, int_func, np.array([1, 2]))
+
+ # gh-9972
+ assert_equal(4, int_func(np.array('4')))
+ assert_equal(5, int_func(np.bytes_(b'5')))
+ assert_equal(6, int_func(np.unicode_('6')))
+
+ # The delegation of int() to __trunc__ was deprecated in
+ # Python 3.11.
+ if sys.version_info < (3, 11):
+ class HasTrunc:
+ def __trunc__(self):
+ return 3
+ assert_equal(3, int_func(np.array(HasTrunc())))
+ assert_equal(3, int_func(np.array([HasTrunc()])))
+ else:
+ pass
+
+ class NotConvertible:
+ def __int__(self):
+ raise NotImplementedError
+ assert_raises(NotImplementedError,
+ int_func, np.array(NotConvertible()))
+ assert_raises(NotImplementedError,
+ int_func, np.array([NotConvertible()]))
+
+
+class TestWhere:
+ def test_basic(self):
+ dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,
+ np.longdouble, np.clongdouble]
+ for dt in dts:
+ c = np.ones(53, dtype=bool)
+ assert_equal(np.where( c, dt(0), dt(1)), dt(0))
+ assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
+ assert_equal(np.where(True, dt(0), dt(1)), dt(0))
+ assert_equal(np.where(False, dt(0), dt(1)), dt(1))
+ d = np.ones_like(c).astype(dt)
+ e = np.zeros_like(d)
+ r = d.astype(dt)
+ c[7] = False
+ r[7] = e[7]
+ assert_equal(np.where(c, e, e), e)
+ assert_equal(np.where(c, d, e), r)
+ assert_equal(np.where(c, d, e[0]), r)
+ assert_equal(np.where(c, d[0], e), r)
+ assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
+ assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
+ assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
+ assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
+ assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
+ assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
+ assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
+
+ def test_exotic(self):
+ # object
+ assert_array_equal(np.where(True, None, None), np.array(None))
+ # zero sized
+ m = np.array([], dtype=bool).reshape(0, 3)
+ b = np.array([], dtype=np.float64).reshape(0, 3)
+ assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
+
+ # object cast
+ d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
+ 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
+ 1.267, 0.229, -1.39, 0.487])
+ nan = float('NaN')
+ e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
+ 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
+ dtype=object)
+ m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
+ 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
+
+ r = e[:]
+ r[np.where(m)] = d[np.where(m)]
+ assert_array_equal(np.where(m, d, e), r)
+
+ r = e[:]
+ r[np.where(~m)] = d[np.where(~m)]
+ assert_array_equal(np.where(m, e, d), r)
+
+ assert_array_equal(np.where(m, e, e), e)
+
+ # minimal dtype result with NaN scalar (e.g required by pandas)
+ d = np.array([1., 2.], dtype=np.float32)
+ e = float('NaN')
+ assert_equal(np.where(True, d, e).dtype, np.float32)
+ e = float('Infinity')
+ assert_equal(np.where(True, d, e).dtype, np.float32)
+ e = float('-Infinity')
+ assert_equal(np.where(True, d, e).dtype, np.float32)
+ # also check upcast
+ e = float(1e150)
+ assert_equal(np.where(True, d, e).dtype, np.float64)
+
+ def test_ndim(self):
+ c = [True, False]
+ a = np.zeros((2, 25))
+ b = np.ones((2, 25))
+ r = np.where(np.array(c)[:,np.newaxis], a, b)
+ assert_array_equal(r[0], a[0])
+ assert_array_equal(r[1], b[0])
+
+ a = a.T
+ b = b.T
+ r = np.where(c, a, b)
+ assert_array_equal(r[:,0], a[:,0])
+ assert_array_equal(r[:,1], b[:,0])
+
+ def test_dtype_mix(self):
+ c = np.array([False, True, False, False, False, False, True, False,
+ False, False, True, False])
+ a = np.uint32(1)
+ b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
+ dtype=np.float64)
+ r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
+ dtype=np.float64)
+ assert_equal(np.where(c, a, b), r)
+
+ a = a.astype(np.float32)
+ b = b.astype(np.int64)
+ assert_equal(np.where(c, a, b), r)
+
+ # non bool mask
+ c = c.astype(int)
+ c[c != 0] = 34242324
+ assert_equal(np.where(c, a, b), r)
+ # invert
+ tmpmask = c != 0
+ c[c == 0] = 41247212
+ c[tmpmask] = 0
+ assert_equal(np.where(c, b, a), r)
+
+ def test_foreign(self):
+ c = np.array([False, True, False, False, False, False, True, False,
+ False, False, True, False])
+ r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
+ dtype=np.float64)
+ a = np.ones(1, dtype='>i4')
+ b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
+ dtype=np.float64)
+ assert_equal(np.where(c, a, b), r)
+
+ b = b.astype('>f8')
+ assert_equal(np.where(c, a, b), r)
+
+ a = a.astype('<i4')
+ assert_equal(np.where(c, a, b), r)
+
+ c = c.astype('>i4')
+ assert_equal(np.where(c, a, b), r)
+
+ def test_error(self):
+ c = [True, True]
+ a = np.ones((4, 5))
+ b = np.ones((5, 5))
+ assert_raises(ValueError, np.where, c, a, a)
+ assert_raises(ValueError, np.where, c[0], a, b)
+
+ def test_string(self):
+ # gh-4778 check strings are properly filled with nulls
+ a = np.array("abc")
+ b = np.array("x" * 753)
+ assert_equal(np.where(True, a, b), "abc")
+ assert_equal(np.where(False, b, a), "abc")
+
+ # check native datatype sized strings
+ a = np.array("abcd")
+ b = np.array("x" * 8)
+ assert_equal(np.where(True, a, b), "abcd")
+ assert_equal(np.where(False, b, a), "abcd")
+
+ def test_empty_result(self):
+ # pass empty where result through an assignment which reads the data of
+ # empty arrays, error detectable with valgrind, see gh-8922
+ x = np.zeros((1, 1))
+ ibad = np.vstack(np.where(x == 99.))
+ assert_array_equal(ibad,
+ np.atleast_2d(np.array([[],[]], dtype=np.intp)))
+
+ def test_largedim(self):
+ # invalid read regression gh-9304
+ shape = [10, 2, 3, 4, 5, 6]
+ np.random.seed(2)
+ array = np.random.rand(*shape)
+
+ for i in range(10):
+ benchmark = array.nonzero()
+ result = array.nonzero()
+ assert_array_equal(benchmark, result)
+
+
+if not IS_PYPY:
+ # sys.getsizeof() is not valid on PyPy
+ class TestSizeOf:
+
+ def test_empty_array(self):
+ x = np.array([])
+ assert_(sys.getsizeof(x) > 0)
+
+ def check_array(self, dtype):
+ elem_size = dtype(0).itemsize
+
+ for length in [10, 50, 100, 500]:
+ x = np.arange(length, dtype=dtype)
+ assert_(sys.getsizeof(x) > length * elem_size)
+
+ def test_array_int32(self):
+ self.check_array(np.int32)
+
+ def test_array_int64(self):
+ self.check_array(np.int64)
+
+ def test_array_float32(self):
+ self.check_array(np.float32)
+
+ def test_array_float64(self):
+ self.check_array(np.float64)
+
+ def test_view(self):
+ d = np.ones(100)
+ assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
+
+ def test_reshape(self):
+ d = np.ones(100)
+ assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
+
+ @_no_tracing
+ def test_resize(self):
+ d = np.ones(100)
+ old = sys.getsizeof(d)
+ d.resize(50)
+ assert_(old > sys.getsizeof(d))
+ d.resize(150)
+ assert_(old < sys.getsizeof(d))
+
+ def test_error(self):
+ d = np.ones(100)
+ assert_raises(TypeError, d.__sizeof__, "a")
+
+
+class TestHashing:
+
+ def test_arrays_not_hashable(self):
+ x = np.ones(3)
+ assert_raises(TypeError, hash, x)
+
+ def test_collections_hashable(self):
+ x = np.array([])
+ assert_(not isinstance(x, collections.abc.Hashable))
+
+
+class TestArrayPriority:
+ # This will go away when __array_priority__ is settled, meanwhile
+ # it serves to check unintended changes.
+ op = operator
+ binary_ops = [
+ op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
+ op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
+ op.ge, op.lt, op.le, op.ne, op.eq
+ ]
+
+ class Foo(np.ndarray):
+ __array_priority__ = 100.
+
+ def __new__(cls, *args, **kwargs):
+ return np.array(*args, **kwargs).view(cls)
+
+ class Bar(np.ndarray):
+ __array_priority__ = 101.
+
+ def __new__(cls, *args, **kwargs):
+ return np.array(*args, **kwargs).view(cls)
+
+ class Other:
+ __array_priority__ = 1000.
+
+ def _all(self, other):
+ return self.__class__()
+
+ __add__ = __radd__ = _all
+ __sub__ = __rsub__ = _all
+ __mul__ = __rmul__ = _all
+ __pow__ = __rpow__ = _all
+ __div__ = __rdiv__ = _all
+ __mod__ = __rmod__ = _all
+ __truediv__ = __rtruediv__ = _all
+ __floordiv__ = __rfloordiv__ = _all
+ __and__ = __rand__ = _all
+ __xor__ = __rxor__ = _all
+ __or__ = __ror__ = _all
+ __lshift__ = __rlshift__ = _all
+ __rshift__ = __rrshift__ = _all
+ __eq__ = _all
+ __ne__ = _all
+ __gt__ = _all
+ __ge__ = _all
+ __lt__ = _all
+ __le__ = _all
+
+ def test_ndarray_subclass(self):
+ a = np.array([1, 2])
+ b = self.Bar([1, 2])
+ for f in self.binary_ops:
+ msg = repr(f)
+ assert_(isinstance(f(a, b), self.Bar), msg)
+ assert_(isinstance(f(b, a), self.Bar), msg)
+
+ def test_ndarray_other(self):
+ a = np.array([1, 2])
+ b = self.Other()
+ for f in self.binary_ops:
+ msg = repr(f)
+ assert_(isinstance(f(a, b), self.Other), msg)
+ assert_(isinstance(f(b, a), self.Other), msg)
+
+ def test_subclass_subclass(self):
+ a = self.Foo([1, 2])
+ b = self.Bar([1, 2])
+ for f in self.binary_ops:
+ msg = repr(f)
+ assert_(isinstance(f(a, b), self.Bar), msg)
+ assert_(isinstance(f(b, a), self.Bar), msg)
+
+ def test_subclass_other(self):
+ a = self.Foo([1, 2])
+ b = self.Other()
+ for f in self.binary_ops:
+ msg = repr(f)
+ assert_(isinstance(f(a, b), self.Other), msg)
+ assert_(isinstance(f(b, a), self.Other), msg)
+
+
+class TestBytestringArrayNonzero:
+
+ def test_empty_bstring_array_is_falsey(self):
+ assert_(not np.array([''], dtype=str))
+
+ def test_whitespace_bstring_array_is_falsey(self):
+ a = np.array(['spam'], dtype=str)
+ a[0] = ' \0\0'
+ assert_(not a)
+
+ def test_all_null_bstring_array_is_falsey(self):
+ a = np.array(['spam'], dtype=str)
+ a[0] = '\0\0\0\0'
+ assert_(not a)
+
+ def test_null_inside_bstring_array_is_truthy(self):
+ a = np.array(['spam'], dtype=str)
+ a[0] = ' \0 \0'
+ assert_(a)
+
+
+class TestUnicodeEncoding:
+ """
+ Tests for encoding related bugs, such as UCS2 vs UCS4, round-tripping
+ issues, etc
+ """
+ def test_round_trip(self):
+ """ Tests that GETITEM, SETITEM, and PyArray_Scalar roundtrip """
+ # gh-15363
+ arr = np.zeros(shape=(), dtype="U1")
+ for i in range(1, sys.maxunicode + 1):
+ expected = chr(i)
+ arr[()] = expected
+ assert arr[()] == expected
+ assert arr.item() == expected
+
+ def test_assign_scalar(self):
+ # gh-3258
+ l = np.array(['aa', 'bb'])
+ l[:] = np.unicode_('cc')
+ assert_equal(l, ['cc', 'cc'])
+
+ def test_fill_scalar(self):
+ # gh-7227
+ l = np.array(['aa', 'bb'])
+ l.fill(np.unicode_('cc'))
+ assert_equal(l, ['cc', 'cc'])
+
+
+class TestUnicodeArrayNonzero:
+
+ def test_empty_ustring_array_is_falsey(self):
+ assert_(not np.array([''], dtype=np.unicode_))
+
+ def test_whitespace_ustring_array_is_falsey(self):
+ a = np.array(['eggs'], dtype=np.unicode_)
+ a[0] = ' \0\0'
+ assert_(not a)
+
+ def test_all_null_ustring_array_is_falsey(self):
+ a = np.array(['eggs'], dtype=np.unicode_)
+ a[0] = '\0\0\0\0'
+ assert_(not a)
+
+ def test_null_inside_ustring_array_is_truthy(self):
+ a = np.array(['eggs'], dtype=np.unicode_)
+ a[0] = ' \0 \0'
+ assert_(a)
+
+
+class TestFormat:
+
+ def test_0d(self):
+ a = np.array(np.pi)
+ assert_equal('{:0.3g}'.format(a), '3.14')
+ assert_equal('{:0.3g}'.format(a[()]), '3.14')
+
+ def test_1d_no_format(self):
+ a = np.array([np.pi])
+ assert_equal('{}'.format(a), str(a))
+
+ def test_1d_format(self):
+ # until gh-5543, ensure that the behaviour matches what it used to be
+ a = np.array([np.pi])
+ assert_raises(TypeError, '{:30}'.format, a)
+
+from numpy.testing import IS_PYPY
+
+class TestCTypes:
+
+ def test_ctypes_is_available(self):
+ test_arr = np.array([[1, 2, 3], [4, 5, 6]])
+
+ assert_equal(ctypes, test_arr.ctypes._ctypes)
+ assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
+
+ def test_ctypes_is_not_available(self):
+ from numpy.core import _internal
+ _internal.ctypes = None
+ try:
+ test_arr = np.array([[1, 2, 3], [4, 5, 6]])
+
+ assert_(isinstance(test_arr.ctypes._ctypes,
+ _internal._missing_ctypes))
+ assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
+ finally:
+ _internal.ctypes = ctypes
+
+ def _make_readonly(x):
+ x.flags.writeable = False
+ return x
+
+ @pytest.mark.parametrize('arr', [
+ np.array([1, 2, 3]),
+ np.array([['one', 'two'], ['three', 'four']]),
+ np.array((1, 2), dtype='i4,i4'),
+ np.zeros((2,), dtype=
+ np.dtype(dict(
+ formats=['<i4', '<i4'],
+ names=['a', 'b'],
+ offsets=[0, 2],
+ itemsize=6
+ ))
+ ),
+ np.array([None], dtype=object),
+ np.array([]),
+ np.empty((0, 0)),
+ _make_readonly(np.array([1, 2, 3])),
+ ], ids=[
+ '1d',
+ '2d',
+ 'structured',
+ 'overlapping',
+ 'object',
+ 'empty',
+ 'empty-2d',
+ 'readonly'
+ ])
+ def test_ctypes_data_as_holds_reference(self, arr):
+ # gh-9647
+ # create a copy to ensure that pytest does not mess with the refcounts
+ arr = arr.copy()
+
+ arr_ref = weakref.ref(arr)
+
+ ctypes_ptr = arr.ctypes.data_as(ctypes.c_void_p)
+
+ # `ctypes_ptr` should hold onto `arr`
+ del arr
+ break_cycles()
+ assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
+
+ # but when the `ctypes_ptr` object dies, so should `arr`
+ del ctypes_ptr
+ if IS_PYPY:
+ # Pypy does not recycle arr objects immediately. Trigger gc to
+ # release arr. Cpython uses refcounts. An explicit call to gc
+ # should not be needed here.
+ break_cycles()
+ assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
+
+ def test_ctypes_as_parameter_holds_reference(self):
+ arr = np.array([None]).copy()
+
+ arr_ref = weakref.ref(arr)
+
+ ctypes_ptr = arr.ctypes._as_parameter_
+
+ # `ctypes_ptr` should hold onto `arr`
+ del arr
+ break_cycles()
+ assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
+
+ # but when the `ctypes_ptr` object dies, so should `arr`
+ del ctypes_ptr
+ if IS_PYPY:
+ break_cycles()
+ assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
+
+
+class TestWritebackIfCopy:
+ # all these tests use the WRITEBACKIFCOPY mechanism
+ def test_argmax_with_out(self):
+ mat = np.eye(5)
+ out = np.empty(5, dtype='i2')
+ res = np.argmax(mat, 0, out=out)
+ assert_equal(res, range(5))
+
+ def test_argmin_with_out(self):
+ mat = -np.eye(5)
+ out = np.empty(5, dtype='i2')
+ res = np.argmin(mat, 0, out=out)
+ assert_equal(res, range(5))
+
+ def test_insert_noncontiguous(self):
+ a = np.arange(6).reshape(2,3).T # force non-c-contiguous
+ # uses arr_insert
+ np.place(a, a>2, [44, 55])
+ assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]]))
+ # hit one of the failing paths
+ assert_raises(ValueError, np.place, a, a>20, [])
+
+ def test_put_noncontiguous(self):
+ a = np.arange(6).reshape(2,3).T # force non-c-contiguous
+ np.put(a, [0, 2], [44, 55])
+ assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]]))
+
+ def test_putmask_noncontiguous(self):
+ a = np.arange(6).reshape(2,3).T # force non-c-contiguous
+ # uses arr_putmask
+ np.putmask(a, a>2, a**2)
+ assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]]))
+
+ def test_take_mode_raise(self):
+ a = np.arange(6, dtype='int')
+ out = np.empty(2, dtype='int')
+ np.take(a, [0, 2], out=out, mode='raise')
+ assert_equal(out, np.array([0, 2]))
+
+ def test_choose_mod_raise(self):
+ a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]])
+ out = np.empty((3,3), dtype='int')
+ choices = [-10, 10]
+ np.choose(a, choices, out=out, mode='raise')
+ assert_equal(out, np.array([[ 10, -10, 10],
+ [-10, 10, -10],
+ [ 10, -10, 10]]))
+
+ def test_flatiter__array__(self):
+ a = np.arange(9).reshape(3,3)
+ b = a.T.flat
+ c = b.__array__()
+ # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics
+ del c
+
+ def test_dot_out(self):
+ # if HAVE_CBLAS, will use WRITEBACKIFCOPY
+ a = np.arange(9, dtype=float).reshape(3,3)
+ b = np.dot(a, a, out=a)
+ assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]]))
+
+ def test_view_assign(self):
+ from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_resolve
+
+ arr = np.arange(9).reshape(3, 3).T
+ arr_wb = npy_create_writebackifcopy(arr)
+ assert_(arr_wb.flags.writebackifcopy)
+ assert_(arr_wb.base is arr)
+ arr_wb[...] = -100
+ npy_resolve(arr_wb)
+ # arr changes after resolve, even though we assigned to arr_wb
+ assert_equal(arr, -100)
+ # after resolve, the two arrays no longer reference each other
+ assert_(arr_wb.ctypes.data != 0)
+ assert_equal(arr_wb.base, None)
+ # assigning to arr_wb does not get transferred to arr
+ arr_wb[...] = 100
+ assert_equal(arr, -100)
+
+ @pytest.mark.leaks_references(
+ reason="increments self in dealloc; ignore since deprecated path.")
+ def test_dealloc_warning(self):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ arr = np.arange(9).reshape(3, 3)
+ v = arr.T
+ _multiarray_tests.npy_abuse_writebackifcopy(v)
+ assert len(sup.log) == 1
+
+ def test_view_discard_refcount(self):
+ from numpy.core._multiarray_tests import npy_create_writebackifcopy, npy_discard
+
+ arr = np.arange(9).reshape(3, 3).T
+ orig = arr.copy()
+ if HAS_REFCOUNT:
+ arr_cnt = sys.getrefcount(arr)
+ arr_wb = npy_create_writebackifcopy(arr)
+ assert_(arr_wb.flags.writebackifcopy)
+ assert_(arr_wb.base is arr)
+ arr_wb[...] = -100
+ npy_discard(arr_wb)
+ # arr remains unchanged after discard
+ assert_equal(arr, orig)
+ # after discard, the two arrays no longer reference each other
+ assert_(arr_wb.ctypes.data != 0)
+ assert_equal(arr_wb.base, None)
+ if HAS_REFCOUNT:
+ assert_equal(arr_cnt, sys.getrefcount(arr))
+ # assigning to arr_wb does not get transferred to arr
+ arr_wb[...] = 100
+ assert_equal(arr, orig)
+
+
+class TestArange:
+ def test_infinite(self):
+ assert_raises_regex(
+ ValueError, "size exceeded",
+ np.arange, 0, np.inf
+ )
+
+ def test_nan_step(self):
+ assert_raises_regex(
+ ValueError, "cannot compute length",
+ np.arange, 0, 1, np.nan
+ )
+
+ def test_zero_step(self):
+ assert_raises(ZeroDivisionError, np.arange, 0, 10, 0)
+ assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0)
+
+ # empty range
+ assert_raises(ZeroDivisionError, np.arange, 0, 0, 0)
+ assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0)
+
+ def test_require_range(self):
+ assert_raises(TypeError, np.arange)
+ assert_raises(TypeError, np.arange, step=3)
+ assert_raises(TypeError, np.arange, dtype='int64')
+ assert_raises(TypeError, np.arange, start=4)
+
+ def test_start_stop_kwarg(self):
+ keyword_stop = np.arange(stop=3)
+ keyword_zerotostop = np.arange(start=0, stop=3)
+ keyword_start_stop = np.arange(start=3, stop=9)
+
+ assert len(keyword_stop) == 3
+ assert len(keyword_zerotostop) == 3
+ assert len(keyword_start_stop) == 6
+ assert_array_equal(keyword_stop, keyword_zerotostop)
+
+ def test_arange_booleans(self):
+ # Arange makes some sense for booleans and works up to length 2.
+ # But it is weird since `arange(2, 4, dtype=bool)` works.
+ # Arguably, much or all of this could be deprecated/removed.
+ res = np.arange(False, dtype=bool)
+ assert_array_equal(res, np.array([], dtype="bool"))
+
+ res = np.arange(True, dtype="bool")
+ assert_array_equal(res, [False])
+
+ res = np.arange(2, dtype="bool")
+ assert_array_equal(res, [False, True])
+
+ # This case is especially weird, but drops out without special case:
+ res = np.arange(6, 8, dtype="bool")
+ assert_array_equal(res, [True, True])
+
+ with pytest.raises(TypeError):
+ np.arange(3, dtype="bool")
+
+ @pytest.mark.parametrize("dtype", ["S3", "U", "5i"])
+ def test_rejects_bad_dtypes(self, dtype):
+ dtype = np.dtype(dtype)
+ DType_name = re.escape(str(type(dtype)))
+ with pytest.raises(TypeError,
+ match=rf"arange\(\) not supported for inputs .* {DType_name}"):
+ np.arange(2, dtype=dtype)
+
+ def test_rejects_strings(self):
+ # Explicitly test error for strings which may call "b" - "a":
+ DType_name = re.escape(str(type(np.array("a").dtype)))
+ with pytest.raises(TypeError,
+ match=rf"arange\(\) not supported for inputs .* {DType_name}"):
+ np.arange("a", "b")
+
+ def test_byteswapped(self):
+ res_be = np.arange(1, 1000, dtype=">i4")
+ res_le = np.arange(1, 1000, dtype="<i4")
+ assert res_be.dtype == ">i4"
+ assert res_le.dtype == "<i4"
+ assert_array_equal(res_le, res_be)
+
+ @pytest.mark.parametrize("which", [0, 1, 2])
+ def test_error_paths_and_promotion(self, which):
+ args = [0, 1, 2] # start, stop, and step
+ args[which] = np.float64(2.) # should ensure float64 output
+
+ assert np.arange(*args).dtype == np.float64
+
+ # Cover stranger error path, test only to achieve code coverage!
+ args[which] = [None, []]
+ with pytest.raises(ValueError):
+ # Fails discovering start dtype
+ np.arange(*args)
+
+
+class TestArrayFinalize:
+ """ Tests __array_finalize__ """
+
+ def test_receives_base(self):
+ # gh-11237
+ class SavesBase(np.ndarray):
+ def __array_finalize__(self, obj):
+ self.saved_base = self.base
+
+ a = np.array(1).view(SavesBase)
+ assert_(a.saved_base is a.base)
+
+ def test_bad_finalize1(self):
+ class BadAttributeArray(np.ndarray):
+ @property
+ def __array_finalize__(self):
+ raise RuntimeError("boohoo!")
+
+ with pytest.raises(TypeError, match="not callable"):
+ np.arange(10).view(BadAttributeArray)
+
+ def test_bad_finalize2(self):
+ class BadAttributeArray(np.ndarray):
+ def __array_finalize__(self):
+ raise RuntimeError("boohoo!")
+
+ with pytest.raises(TypeError, match="takes 1 positional"):
+ np.arange(10).view(BadAttributeArray)
+
+ def test_bad_finalize3(self):
+ class BadAttributeArray(np.ndarray):
+ def __array_finalize__(self, obj):
+ raise RuntimeError("boohoo!")
+
+ with pytest.raises(RuntimeError, match="boohoo!"):
+ np.arange(10).view(BadAttributeArray)
+
+ def test_lifetime_on_error(self):
+ # gh-11237
+ class RaisesInFinalize(np.ndarray):
+ def __array_finalize__(self, obj):
+ # crash, but keep this object alive
+ raise Exception(self)
+
+ # a plain object can't be weakref'd
+ class Dummy: pass
+
+ # get a weak reference to an object within an array
+ obj_arr = np.array(Dummy())
+ obj_ref = weakref.ref(obj_arr[()])
+
+ # get an array that crashed in __array_finalize__
+ with assert_raises(Exception) as e:
+ obj_arr.view(RaisesInFinalize)
+
+ obj_subarray = e.exception.args[0]
+ del e
+ assert_(isinstance(obj_subarray, RaisesInFinalize))
+
+ # reference should still be held by obj_arr
+ break_cycles()
+ assert_(obj_ref() is not None, "object should not already be dead")
+
+ del obj_arr
+ break_cycles()
+ assert_(obj_ref() is not None, "obj_arr should not hold the last reference")
+
+ del obj_subarray
+ break_cycles()
+ assert_(obj_ref() is None, "no references should remain")
+
+ def test_can_use_super(self):
+ class SuperFinalize(np.ndarray):
+ def __array_finalize__(self, obj):
+ self.saved_result = super().__array_finalize__(obj)
+
+ a = np.array(1).view(SuperFinalize)
+ assert_(a.saved_result is None)
+
+
+def test_orderconverter_with_nonASCII_unicode_ordering():
+ # gh-7475
+ a = np.arange(5)
+ assert_raises(ValueError, a.flatten, order='\xe2')
+
+
+def test_equal_override():
+ # gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
+ # did not respect overrides with __array_priority__ or __array_ufunc__.
+ # The PR fixed this for __array_priority__ and __array_ufunc__ = None.
+ class MyAlwaysEqual:
+ def __eq__(self, other):
+ return "eq"
+
+ def __ne__(self, other):
+ return "ne"
+
+ class MyAlwaysEqualOld(MyAlwaysEqual):
+ __array_priority__ = 10000
+
+ class MyAlwaysEqualNew(MyAlwaysEqual):
+ __array_ufunc__ = None
+
+ array = np.array([(0, 1), (2, 3)], dtype='i4,i4')
+ for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew:
+ my_always_equal = my_always_equal_cls()
+ assert_equal(my_always_equal == array, 'eq')
+ assert_equal(array == my_always_equal, 'eq')
+ assert_equal(my_always_equal != array, 'ne')
+ assert_equal(array != my_always_equal, 'ne')
+
+
+@pytest.mark.parametrize(
+ ["fun", "npfun"],
+ [
+ (_multiarray_tests.npy_cabs, np.absolute),
+ (_multiarray_tests.npy_carg, np.angle)
+ ]
+)
+@pytest.mark.parametrize("x", [1, np.inf, -np.inf, np.nan])
+@pytest.mark.parametrize("y", [1, np.inf, -np.inf, np.nan])
+@pytest.mark.parametrize("test_dtype", np.complexfloating.__subclasses__())
+def test_npymath_complex(fun, npfun, x, y, test_dtype):
+ # Smoketest npymath functions
+ z = test_dtype(complex(x, y))
+ got = fun(z)
+ expected = npfun(z)
+ assert_allclose(got, expected)
+
+
+def test_npymath_real():
+ # Smoketest npymath functions
+ from numpy.core._multiarray_tests import (
+ npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh)
+
+ funcs = {npy_log10: np.log10,
+ npy_cosh: np.cosh,
+ npy_sinh: np.sinh,
+ npy_tan: np.tan,
+ npy_tanh: np.tanh}
+ vals = (1, np.inf, -np.inf, np.nan)
+ types = (np.float32, np.float64, np.longdouble)
+
+ with np.errstate(all='ignore'):
+ for fun, npfun in funcs.items():
+ for x, t in itertools.product(vals, types):
+ z = t(x)
+ got = fun(z)
+ expected = npfun(z)
+ assert_allclose(got, expected)
+
+def test_uintalignment_and_alignment():
+ # alignment code needs to satisfy these requirements:
+ # 1. numpy structs match C struct layout
+ # 2. ufuncs/casting is safe wrt to aligned access
+ # 3. copy code is safe wrt to "uint alidned" access
+ #
+ # Complex types are the main problem, whose alignment may not be the same
+ # as their "uint alignment".
+ #
+ # This test might only fail on certain platforms, where uint64 alignment is
+ # not equal to complex64 alignment. The second 2 tests will only fail
+ # for DEBUG=1.
+
+ d1 = np.dtype('u1,c8', align=True)
+ d2 = np.dtype('u4,c8', align=True)
+ d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True)
+
+ assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True)
+ assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True)
+ assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False)
+
+ # check that C struct matches numpy struct size
+ s = _multiarray_tests.get_struct_alignments()
+ for d, (alignment, size) in zip([d1,d2,d3], s):
+ assert_equal(d.alignment, alignment)
+ assert_equal(d.itemsize, size)
+
+ # check that ufuncs don't complain in debug mode
+ # (this is probably OK if the aligned flag is true above)
+ src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often
+ np.exp(src) # assert fails?
+
+ # check that copy code doesn't complain in debug mode
+ dst = np.zeros((2,2), dtype='c8')
+ dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails?
+
+class TestAlignment:
+ # adapted from scipy._lib.tests.test__util.test__aligned_zeros
+ # Checks that unusual memory alignments don't trip up numpy.
+ # In particular, check RELAXED_STRIDES don't trip alignment assertions in
+ # NDEBUG mode for size-0 arrays (gh-12503)
+
+ def check(self, shape, dtype, order, align):
+ err_msg = repr((shape, dtype, order, align))
+ x = _aligned_zeros(shape, dtype, order, align=align)
+ if align is None:
+ align = np.dtype(dtype).alignment
+ assert_equal(x.__array_interface__['data'][0] % align, 0)
+ if hasattr(shape, '__len__'):
+ assert_equal(x.shape, shape, err_msg)
+ else:
+ assert_equal(x.shape, (shape,), err_msg)
+ assert_equal(x.dtype, dtype)
+ if order == "C":
+ assert_(x.flags.c_contiguous, err_msg)
+ elif order == "F":
+ if x.size > 0:
+ assert_(x.flags.f_contiguous, err_msg)
+ elif order is None:
+ assert_(x.flags.c_contiguous, err_msg)
+ else:
+ raise ValueError()
+
+ def test_various_alignments(self):
+ for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]:
+ for n in [0, 1, 3, 11]:
+ for order in ["C", "F", None]:
+ for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']:
+ if dtype == 'O':
+ # object dtype can't be misaligned
+ continue
+ for shape in [n, (1, 2, 3, n)]:
+ self.check(shape, np.dtype(dtype), order, align)
+
+ def test_strided_loop_alignments(self):
+ # particularly test that complex64 and float128 use right alignment
+ # code-paths, since these are particularly problematic. It is useful to
+ # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run.
+ for align in [1, 2, 4, 8, 12, 16, None]:
+ xf64 = _aligned_zeros(3, np.float64)
+
+ xc64 = _aligned_zeros(3, np.complex64, align=align)
+ xf128 = _aligned_zeros(3, np.longdouble, align=align)
+
+ # test casting, both to and from misaligned
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning, "Casting complex values")
+ xc64.astype('f8')
+ xf64.astype(np.complex64)
+ test = xc64 + xf64
+
+ xf128.astype('f8')
+ xf64.astype(np.longdouble)
+ test = xf128 + xf64
+
+ test = xf128 + xc64
+
+ # test copy, both to and from misaligned
+ # contig copy
+ xf64[:] = xf64.copy()
+ xc64[:] = xc64.copy()
+ xf128[:] = xf128.copy()
+ # strided copy
+ xf64[::2] = xf64[::2].copy()
+ xc64[::2] = xc64[::2].copy()
+ xf128[::2] = xf128[::2].copy()
+
+def test_getfield():
+ a = np.arange(32, dtype='uint16')
+ if sys.byteorder == 'little':
+ i = 0
+ j = 1
+ else:
+ i = 1
+ j = 0
+ b = a.getfield('int8', i)
+ assert_equal(b, a)
+ b = a.getfield('int8', j)
+ assert_equal(b, 0)
+ pytest.raises(ValueError, a.getfield, 'uint8', -1)
+ pytest.raises(ValueError, a.getfield, 'uint8', 16)
+ pytest.raises(ValueError, a.getfield, 'uint64', 0)
+
+
+class TestViewDtype:
+ """
+ Verify that making a view of a non-contiguous array works as expected.
+ """
+ def test_smaller_dtype_multiple(self):
+ # x is non-contiguous
+ x = np.arange(10, dtype='<i4')[::2]
+ with pytest.raises(ValueError,
+ match='the last axis must be contiguous'):
+ x.view('<i2')
+ expected = [[0, 0], [2, 0], [4, 0], [6, 0], [8, 0]]
+ assert_array_equal(x[:, np.newaxis].view('<i2'), expected)
+
+ def test_smaller_dtype_not_multiple(self):
+ # x is non-contiguous
+ x = np.arange(5, dtype='<i4')[::2]
+
+ with pytest.raises(ValueError,
+ match='the last axis must be contiguous'):
+ x.view('S3')
+ with pytest.raises(ValueError,
+ match='When changing to a smaller dtype'):
+ x[:, np.newaxis].view('S3')
+
+ # Make sure the problem is because of the dtype size
+ expected = [[b''], [b'\x02'], [b'\x04']]
+ assert_array_equal(x[:, np.newaxis].view('S4'), expected)
+
+ def test_larger_dtype_multiple(self):
+ # x is non-contiguous in the first dimension, contiguous in the last
+ x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :]
+ expected = np.array([[65536], [327684], [589832],
+ [851980], [1114128]], dtype='<i4')
+ assert_array_equal(x.view('<i4'), expected)
+
+ def test_larger_dtype_not_multiple(self):
+ # x is non-contiguous in the first dimension, contiguous in the last
+ x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :]
+ with pytest.raises(ValueError,
+ match='When changing to a larger dtype'):
+ x.view('S3')
+ # Make sure the problem is because of the dtype size
+ expected = [[b'\x00\x00\x01'], [b'\x04\x00\x05'], [b'\x08\x00\t'],
+ [b'\x0c\x00\r'], [b'\x10\x00\x11']]
+ assert_array_equal(x.view('S4'), expected)
+
+ def test_f_contiguous(self):
+ # x is F-contiguous
+ x = np.arange(4 * 3, dtype='<i4').reshape(4, 3).T
+ with pytest.raises(ValueError,
+ match='the last axis must be contiguous'):
+ x.view('<i2')
+
+ def test_non_c_contiguous(self):
+ # x is contiguous in axis=-1, but not C-contiguous in other axes
+ x = np.arange(2 * 3 * 4, dtype='i1').\
+ reshape(2, 3, 4).transpose(1, 0, 2)
+ expected = [[[256, 770], [3340, 3854]],
+ [[1284, 1798], [4368, 4882]],
+ [[2312, 2826], [5396, 5910]]]
+ assert_array_equal(x.view('<i2'), expected)
+
+
+# Test various array sizes that hit different code paths in quicksort-avx512
+@pytest.mark.parametrize("N", [8, 16, 24, 32, 48, 64, 96, 128, 151, 191,
+ 256, 383, 512, 1023, 2047])
+def test_sort_float(N):
+ # Regular data with nan sprinkled
+ np.random.seed(42)
+ arr = -0.5 + np.random.sample(N).astype('f')
+ arr[np.random.choice(arr.shape[0], 3)] = np.nan
+ assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap'))
+
+ # (2) with +INF
+ infarr = np.inf*np.ones(N, dtype='f')
+ infarr[np.random.choice(infarr.shape[0], 5)] = -1.0
+ assert_equal(np.sort(infarr, kind='quick'), np.sort(infarr, kind='heap'))
+
+ # (3) with -INF
+ neginfarr = -np.inf*np.ones(N, dtype='f')
+ neginfarr[np.random.choice(neginfarr.shape[0], 5)] = 1.0
+ assert_equal(np.sort(neginfarr, kind='quick'),
+ np.sort(neginfarr, kind='heap'))
+
+ # (4) with +/-INF
+ infarr = np.inf*np.ones(N, dtype='f')
+ infarr[np.random.choice(infarr.shape[0], (int)(N/2))] = -np.inf
+ assert_equal(np.sort(infarr, kind='quick'), np.sort(infarr, kind='heap'))
+
+
+def test_sort_int():
+ # Random data with NPY_MAX_INT32 and NPY_MIN_INT32 sprinkled
+ rng = np.random.default_rng(42)
+ N = 2047
+ minv = np.iinfo(np.int32).min
+ maxv = np.iinfo(np.int32).max
+ arr = rng.integers(low=minv, high=maxv, size=N).astype('int32')
+ arr[np.random.choice(arr.shape[0], 10)] = minv
+ arr[np.random.choice(arr.shape[0], 10)] = maxv
+ assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap'))
+
+
+def test_sort_uint():
+ # Random data with NPY_MAX_UINT32 sprinkled
+ rng = np.random.default_rng(42)
+ N = 2047
+ maxv = np.iinfo(np.uint32).max
+ arr = rng.integers(low=0, high=maxv, size=N).astype('uint32')
+ arr[np.random.choice(arr.shape[0], 10)] = maxv
+ assert_equal(np.sort(arr, kind='quick'), np.sort(arr, kind='heap'))
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_nditer.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_nditer.py
new file mode 100644
index 00000000..b88afdfa
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_nditer.py
@@ -0,0 +1,3316 @@
+import sys
+import pytest
+
+import textwrap
+import subprocess
+
+import numpy as np
+import numpy.core._multiarray_tests as _multiarray_tests
+from numpy import array, arange, nditer, all
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_raises,
+ IS_WASM, HAS_REFCOUNT, suppress_warnings, break_cycles
+ )
+
+
+def iter_multi_index(i):
+ ret = []
+ while not i.finished:
+ ret.append(i.multi_index)
+ i.iternext()
+ return ret
+
+def iter_indices(i):
+ ret = []
+ while not i.finished:
+ ret.append(i.index)
+ i.iternext()
+ return ret
+
+def iter_iterindices(i):
+ ret = []
+ while not i.finished:
+ ret.append(i.iterindex)
+ i.iternext()
+ return ret
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_iter_refcount():
+ # Make sure the iterator doesn't leak
+
+ # Basic
+ a = arange(6)
+ dt = np.dtype('f4').newbyteorder()
+ rc_a = sys.getrefcount(a)
+ rc_dt = sys.getrefcount(dt)
+ with nditer(a, [],
+ [['readwrite', 'updateifcopy']],
+ casting='unsafe',
+ op_dtypes=[dt]) as it:
+ assert_(not it.iterationneedsapi)
+ assert_(sys.getrefcount(a) > rc_a)
+ assert_(sys.getrefcount(dt) > rc_dt)
+ # del 'it'
+ it = None
+ assert_equal(sys.getrefcount(a), rc_a)
+ assert_equal(sys.getrefcount(dt), rc_dt)
+
+ # With a copy
+ a = arange(6, dtype='f4')
+ dt = np.dtype('f4')
+ rc_a = sys.getrefcount(a)
+ rc_dt = sys.getrefcount(dt)
+ it = nditer(a, [],
+ [['readwrite']],
+ op_dtypes=[dt])
+ rc2_a = sys.getrefcount(a)
+ rc2_dt = sys.getrefcount(dt)
+ it2 = it.copy()
+ assert_(sys.getrefcount(a) > rc2_a)
+ assert_(sys.getrefcount(dt) > rc2_dt)
+ it = None
+ assert_equal(sys.getrefcount(a), rc2_a)
+ assert_equal(sys.getrefcount(dt), rc2_dt)
+ it2 = None
+ assert_equal(sys.getrefcount(a), rc_a)
+ assert_equal(sys.getrefcount(dt), rc_dt)
+
+ del it2 # avoid pyflakes unused variable warning
+
+def test_iter_best_order():
+ # The iterator should always find the iteration order
+ # with increasing memory addresses
+
+ # Test the ordering for 1-D to 5-D shapes
+ for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+ a = arange(np.prod(shape))
+ # Test each combination of positive and negative strides
+ for dirs in range(2**len(shape)):
+ dirs_index = [slice(None)]*len(shape)
+ for bit in range(len(shape)):
+ if ((2**bit) & dirs):
+ dirs_index[bit] = slice(None, None, -1)
+ dirs_index = tuple(dirs_index)
+
+ aview = a.reshape(shape)[dirs_index]
+ # C-order
+ i = nditer(aview, [], [['readonly']])
+ assert_equal([x for x in i], a)
+ # Fortran-order
+ i = nditer(aview.T, [], [['readonly']])
+ assert_equal([x for x in i], a)
+ # Other order
+ if len(shape) > 2:
+ i = nditer(aview.swapaxes(0, 1), [], [['readonly']])
+ assert_equal([x for x in i], a)
+
+def test_iter_c_order():
+ # Test forcing C order
+
+ # Test the ordering for 1-D to 5-D shapes
+ for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+ a = arange(np.prod(shape))
+ # Test each combination of positive and negative strides
+ for dirs in range(2**len(shape)):
+ dirs_index = [slice(None)]*len(shape)
+ for bit in range(len(shape)):
+ if ((2**bit) & dirs):
+ dirs_index[bit] = slice(None, None, -1)
+ dirs_index = tuple(dirs_index)
+
+ aview = a.reshape(shape)[dirs_index]
+ # C-order
+ i = nditer(aview, order='C')
+ assert_equal([x for x in i], aview.ravel(order='C'))
+ # Fortran-order
+ i = nditer(aview.T, order='C')
+ assert_equal([x for x in i], aview.T.ravel(order='C'))
+ # Other order
+ if len(shape) > 2:
+ i = nditer(aview.swapaxes(0, 1), order='C')
+ assert_equal([x for x in i],
+ aview.swapaxes(0, 1).ravel(order='C'))
+
+def test_iter_f_order():
+ # Test forcing F order
+
+ # Test the ordering for 1-D to 5-D shapes
+ for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+ a = arange(np.prod(shape))
+ # Test each combination of positive and negative strides
+ for dirs in range(2**len(shape)):
+ dirs_index = [slice(None)]*len(shape)
+ for bit in range(len(shape)):
+ if ((2**bit) & dirs):
+ dirs_index[bit] = slice(None, None, -1)
+ dirs_index = tuple(dirs_index)
+
+ aview = a.reshape(shape)[dirs_index]
+ # C-order
+ i = nditer(aview, order='F')
+ assert_equal([x for x in i], aview.ravel(order='F'))
+ # Fortran-order
+ i = nditer(aview.T, order='F')
+ assert_equal([x for x in i], aview.T.ravel(order='F'))
+ # Other order
+ if len(shape) > 2:
+ i = nditer(aview.swapaxes(0, 1), order='F')
+ assert_equal([x for x in i],
+ aview.swapaxes(0, 1).ravel(order='F'))
+
+def test_iter_c_or_f_order():
+ # Test forcing any contiguous (C or F) order
+
+ # Test the ordering for 1-D to 5-D shapes
+ for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+ a = arange(np.prod(shape))
+ # Test each combination of positive and negative strides
+ for dirs in range(2**len(shape)):
+ dirs_index = [slice(None)]*len(shape)
+ for bit in range(len(shape)):
+ if ((2**bit) & dirs):
+ dirs_index[bit] = slice(None, None, -1)
+ dirs_index = tuple(dirs_index)
+
+ aview = a.reshape(shape)[dirs_index]
+ # C-order
+ i = nditer(aview, order='A')
+ assert_equal([x for x in i], aview.ravel(order='A'))
+ # Fortran-order
+ i = nditer(aview.T, order='A')
+ assert_equal([x for x in i], aview.T.ravel(order='A'))
+ # Other order
+ if len(shape) > 2:
+ i = nditer(aview.swapaxes(0, 1), order='A')
+ assert_equal([x for x in i],
+ aview.swapaxes(0, 1).ravel(order='A'))
+
+def test_nditer_multi_index_set():
+ # Test the multi_index set
+ a = np.arange(6).reshape(2, 3)
+ it = np.nditer(a, flags=['multi_index'])
+
+ # Removes the iteration on two first elements of a[0]
+ it.multi_index = (0, 2,)
+
+ assert_equal([i for i in it], [2, 3, 4, 5])
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_nditer_multi_index_set_refcount():
+ # Test if the reference count on index variable is decreased
+
+ index = 0
+ i = np.nditer(np.array([111, 222, 333, 444]), flags=['multi_index'])
+
+ start_count = sys.getrefcount(index)
+ i.multi_index = (index,)
+ end_count = sys.getrefcount(index)
+
+ assert_equal(start_count, end_count)
+
+def test_iter_best_order_multi_index_1d():
+ # The multi-indices should be correct with any reordering
+
+ a = arange(4)
+ # 1D order
+ i = nditer(a, ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)])
+ # 1D reversed order
+ i = nditer(a[::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)])
+
+def test_iter_best_order_multi_index_2d():
+ # The multi-indices should be correct with any reordering
+
+ a = arange(6)
+ # 2D C-order
+ i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)])
+ # 2D Fortran-order
+ i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)])
+ # 2D reversed C-order
+ i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)])
+ i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)])
+ i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)])
+ # 2D reversed Fortran-order
+ i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)])
+ i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
+ ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)])
+ i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
+ ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)])
+
+def test_iter_best_order_multi_index_3d():
+ # The multi-indices should be correct with any reordering
+
+ a = arange(12)
+ # 3D C-order
+ i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1),
+ (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)])
+ # 3D Fortran-order
+ i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0),
+ (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)])
+ # 3D reversed C-order
+ i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1),
+ (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)])
+ i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1),
+ (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)])
+ i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0),
+ (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)])
+ # 3D reversed Fortran-order
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
+ ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0),
+ (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)])
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
+ ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0),
+ (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)])
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
+ ['multi_index'], [['readonly']])
+ assert_equal(iter_multi_index(i),
+ [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1),
+ (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)])
+
+def test_iter_best_order_c_index_1d():
+ # The C index should be correct with any reordering
+
+ a = arange(4)
+ # 1D order
+ i = nditer(a, ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [0, 1, 2, 3])
+ # 1D reversed order
+ i = nditer(a[::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [3, 2, 1, 0])
+
+def test_iter_best_order_c_index_2d():
+ # The C index should be correct with any reordering
+
+ a = arange(6)
+ # 2D C-order
+ i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
+ # 2D Fortran-order
+ i = nditer(a.reshape(2, 3).copy(order='F'),
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5])
+ # 2D reversed C-order
+ i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2])
+ i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3])
+ i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
+ # 2D reversed Fortran-order
+ i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2])
+ i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3])
+ i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0])
+
+def test_iter_best_order_c_index_3d():
+ # The C index should be correct with any reordering
+
+ a = arange(12)
+ # 3D C-order
+ i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+ # 3D Fortran-order
+ i = nditer(a.reshape(2, 3, 2).copy(order='F'),
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
+ # 3D reversed C-order
+ i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
+ i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
+ i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
+ # 3D reversed Fortran-order
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
+ ['c_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
+
+def test_iter_best_order_f_index_1d():
+ # The Fortran index should be correct with any reordering
+
+ a = arange(4)
+ # 1D order
+ i = nditer(a, ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [0, 1, 2, 3])
+ # 1D reversed order
+ i = nditer(a[::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [3, 2, 1, 0])
+
+def test_iter_best_order_f_index_2d():
+ # The Fortran index should be correct with any reordering
+
+ a = arange(6)
+ # 2D C-order
+ i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5])
+ # 2D Fortran-order
+ i = nditer(a.reshape(2, 3).copy(order='F'),
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5])
+ # 2D reversed C-order
+ i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4])
+ i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1])
+ i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0])
+ # 2D reversed Fortran-order
+ i = nditer(a.reshape(2, 3).copy(order='F')[::-1],
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4])
+ i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1],
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1])
+ i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1],
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0])
+
+def test_iter_best_order_f_index_3d():
+ # The Fortran index should be correct with any reordering
+
+ a = arange(12)
+ # 3D C-order
+ i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11])
+ # 3D Fortran-order
+ i = nditer(a.reshape(2, 3, 2).copy(order='F'),
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+ # 3D reversed C-order
+ i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10])
+ i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7])
+ i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5])
+ # 3D reversed Fortran-order
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1],
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10])
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1],
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7])
+ i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1],
+ ['f_index'], [['readonly']])
+ assert_equal(iter_indices(i),
+ [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5])
+
+def test_iter_no_inner_full_coalesce():
+ # Check no_inner iterators which coalesce into a single inner loop
+
+ for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]:
+ size = np.prod(shape)
+ a = arange(size)
+ # Test each combination of forward and backwards indexing
+ for dirs in range(2**len(shape)):
+ dirs_index = [slice(None)]*len(shape)
+ for bit in range(len(shape)):
+ if ((2**bit) & dirs):
+ dirs_index[bit] = slice(None, None, -1)
+ dirs_index = tuple(dirs_index)
+
+ aview = a.reshape(shape)[dirs_index]
+ # C-order
+ i = nditer(aview, ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ assert_equal(i[0].shape, (size,))
+ # Fortran-order
+ i = nditer(aview.T, ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ assert_equal(i[0].shape, (size,))
+ # Other order
+ if len(shape) > 2:
+ i = nditer(aview.swapaxes(0, 1),
+ ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ assert_equal(i[0].shape, (size,))
+
+def test_iter_no_inner_dim_coalescing():
+ # Check no_inner iterators whose dimensions may not coalesce completely
+
+ # Skipping the last element in a dimension prevents coalescing
+ # with the next-bigger dimension
+ a = arange(24).reshape(2, 3, 4)[:,:, :-1]
+ i = nditer(a, ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 2)
+ assert_equal(i[0].shape, (3,))
+ a = arange(24).reshape(2, 3, 4)[:, :-1,:]
+ i = nditer(a, ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 2)
+ assert_equal(i[0].shape, (8,))
+ a = arange(24).reshape(2, 3, 4)[:-1,:,:]
+ i = nditer(a, ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ assert_equal(i[0].shape, (12,))
+
+ # Even with lots of 1-sized dimensions, should still coalesce
+ a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1)
+ i = nditer(a, ['external_loop'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ assert_equal(i[0].shape, (24,))
+
+def test_iter_dim_coalescing():
+ # Check that the correct number of dimensions are coalesced
+
+ # Tracking a multi-index disables coalescing
+ a = arange(24).reshape(2, 3, 4)
+ i = nditer(a, ['multi_index'], [['readonly']])
+ assert_equal(i.ndim, 3)
+
+ # A tracked index can allow coalescing if it's compatible with the array
+ a3d = arange(24).reshape(2, 3, 4)
+ i = nditer(a3d, ['c_index'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']])
+ assert_equal(i.ndim, 3)
+ i = nditer(a3d.T, ['c_index'], [['readonly']])
+ assert_equal(i.ndim, 3)
+ i = nditer(a3d.T, ['f_index'], [['readonly']])
+ assert_equal(i.ndim, 1)
+ i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']])
+ assert_equal(i.ndim, 3)
+
+ # When C or F order is forced, coalescing may still occur
+ a3d = arange(24).reshape(2, 3, 4)
+ i = nditer(a3d, order='C')
+ assert_equal(i.ndim, 1)
+ i = nditer(a3d.T, order='C')
+ assert_equal(i.ndim, 3)
+ i = nditer(a3d, order='F')
+ assert_equal(i.ndim, 3)
+ i = nditer(a3d.T, order='F')
+ assert_equal(i.ndim, 1)
+ i = nditer(a3d, order='A')
+ assert_equal(i.ndim, 1)
+ i = nditer(a3d.T, order='A')
+ assert_equal(i.ndim, 1)
+
+def test_iter_broadcasting():
+ # Standard NumPy broadcasting rules
+
+ # 1D with scalar
+ i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 6)
+ assert_equal(i.shape, (6,))
+
+ # 2D with scalar
+ i = nditer([arange(6).reshape(2, 3), np.int32(2)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 6)
+ assert_equal(i.shape, (2, 3))
+ # 2D with 1D
+ i = nditer([arange(6).reshape(2, 3), arange(3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 6)
+ assert_equal(i.shape, (2, 3))
+ i = nditer([arange(2).reshape(2, 1), arange(3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 6)
+ assert_equal(i.shape, (2, 3))
+ # 2D with 2D
+ i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 6)
+ assert_equal(i.shape, (2, 3))
+
+ # 3D with scalar
+ i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ # 3D with 1D
+ i = nditer([arange(3), arange(24).reshape(4, 2, 3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ i = nditer([arange(3), arange(8).reshape(4, 2, 1)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ # 3D with 2D
+ i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ # 3D with 3D
+ i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3),
+ arange(4).reshape(4, 1, 1)],
+ ['multi_index'], [['readonly']]*3)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+ i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)],
+ ['multi_index'], [['readonly']]*2)
+ assert_equal(i.itersize, 24)
+ assert_equal(i.shape, (4, 2, 3))
+
+def test_iter_itershape():
+ # Check that allocated outputs work with a specified shape
+ a = np.arange(6, dtype='i2').reshape(2, 3)
+ i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
+ op_axes=[[0, 1, None], None],
+ itershape=(-1, -1, 4))
+ assert_equal(i.operands[1].shape, (2, 3, 4))
+ assert_equal(i.operands[1].strides, (24, 8, 2))
+
+ i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
+ op_axes=[[0, 1, None], None],
+ itershape=(-1, -1, 4))
+ assert_equal(i.operands[1].shape, (3, 2, 4))
+ assert_equal(i.operands[1].strides, (8, 24, 2))
+
+ i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']],
+ order='F',
+ op_axes=[[0, 1, None], None],
+ itershape=(-1, -1, 4))
+ assert_equal(i.operands[1].shape, (3, 2, 4))
+ assert_equal(i.operands[1].strides, (2, 6, 12))
+
+ # If we specify 1 in the itershape, it shouldn't allow broadcasting
+ # of that dimension to a bigger value
+ assert_raises(ValueError, nditer, [a, None], [],
+ [['readonly'], ['writeonly', 'allocate']],
+ op_axes=[[0, 1, None], None],
+ itershape=(-1, 1, 4))
+ # Test bug that for no op_axes but itershape, they are NULLed correctly
+ i = np.nditer([np.ones(2), None, None], itershape=(2,))
+
+def test_iter_broadcasting_errors():
+ # Check that errors are thrown for bad broadcasting shapes
+
+ # 1D with 1D
+ assert_raises(ValueError, nditer, [arange(2), arange(3)],
+ [], [['readonly']]*2)
+ # 2D with 1D
+ assert_raises(ValueError, nditer,
+ [arange(6).reshape(2, 3), arange(2)],
+ [], [['readonly']]*2)
+ # 2D with 2D
+ assert_raises(ValueError, nditer,
+ [arange(6).reshape(2, 3), arange(9).reshape(3, 3)],
+ [], [['readonly']]*2)
+ assert_raises(ValueError, nditer,
+ [arange(6).reshape(2, 3), arange(4).reshape(2, 2)],
+ [], [['readonly']]*2)
+ # 3D with 3D
+ assert_raises(ValueError, nditer,
+ [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)],
+ [], [['readonly']]*2)
+ assert_raises(ValueError, nditer,
+ [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)],
+ [], [['readonly']]*2)
+
+ # Verify that the error message mentions the right shapes
+ try:
+ nditer([arange(2).reshape(1, 2, 1),
+ arange(3).reshape(1, 3),
+ arange(6).reshape(2, 3)],
+ [],
+ [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']])
+ raise AssertionError('Should have raised a broadcast error')
+ except ValueError as e:
+ msg = str(e)
+ # The message should contain the shape of the 3rd operand
+ assert_(msg.find('(2,3)') >= 0,
+ 'Message "%s" doesn\'t contain operand shape (2,3)' % msg)
+ # The message should contain the broadcast shape
+ assert_(msg.find('(1,2,3)') >= 0,
+ 'Message "%s" doesn\'t contain broadcast shape (1,2,3)' % msg)
+
+ try:
+ nditer([arange(6).reshape(2, 3), arange(2)],
+ [],
+ [['readonly'], ['readonly']],
+ op_axes=[[0, 1], [0, np.newaxis]],
+ itershape=(4, 3))
+ raise AssertionError('Should have raised a broadcast error')
+ except ValueError as e:
+ msg = str(e)
+ # The message should contain "shape->remappedshape" for each operand
+ assert_(msg.find('(2,3)->(2,3)') >= 0,
+ 'Message "%s" doesn\'t contain operand shape (2,3)->(2,3)' % msg)
+ assert_(msg.find('(2,)->(2,newaxis)') >= 0,
+ ('Message "%s" doesn\'t contain remapped operand shape' +
+ '(2,)->(2,newaxis)') % msg)
+ # The message should contain the itershape parameter
+ assert_(msg.find('(4,3)') >= 0,
+ 'Message "%s" doesn\'t contain itershape parameter (4,3)' % msg)
+
+ try:
+ nditer([np.zeros((2, 1, 1)), np.zeros((2,))],
+ [],
+ [['writeonly', 'no_broadcast'], ['readonly']])
+ raise AssertionError('Should have raised a broadcast error')
+ except ValueError as e:
+ msg = str(e)
+ # The message should contain the shape of the bad operand
+ assert_(msg.find('(2,1,1)') >= 0,
+ 'Message "%s" doesn\'t contain operand shape (2,1,1)' % msg)
+ # The message should contain the broadcast shape
+ assert_(msg.find('(2,1,2)') >= 0,
+ 'Message "%s" doesn\'t contain the broadcast shape (2,1,2)' % msg)
+
+def test_iter_flags_errors():
+ # Check that bad combinations of flags produce errors
+
+ a = arange(6)
+
+ # Not enough operands
+ assert_raises(ValueError, nditer, [], [], [])
+ # Too many operands
+ assert_raises(ValueError, nditer, [a]*100, [], [['readonly']]*100)
+ # Bad global flag
+ assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']])
+ # Bad op flag
+ assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']])
+ # Bad order parameter
+ assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G')
+ # Bad casting parameter
+ assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon')
+ # op_flags must match ops
+ assert_raises(ValueError, nditer, [a]*3, [], [['readonly']]*2)
+ # Cannot track both a C and an F index
+ assert_raises(ValueError, nditer, a,
+ ['c_index', 'f_index'], [['readonly']])
+ # Inner iteration and multi-indices/indices are incompatible
+ assert_raises(ValueError, nditer, a,
+ ['external_loop', 'multi_index'], [['readonly']])
+ assert_raises(ValueError, nditer, a,
+ ['external_loop', 'c_index'], [['readonly']])
+ assert_raises(ValueError, nditer, a,
+ ['external_loop', 'f_index'], [['readonly']])
+ # Must specify exactly one of readwrite/readonly/writeonly per operand
+ assert_raises(ValueError, nditer, a, [], [[]])
+ assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']])
+ assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']])
+ assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']])
+ assert_raises(ValueError, nditer, a,
+ [], [['readonly', 'writeonly', 'readwrite']])
+ # Python scalars are always readonly
+ assert_raises(TypeError, nditer, 1.5, [], [['writeonly']])
+ assert_raises(TypeError, nditer, 1.5, [], [['readwrite']])
+ # Array scalars are always readonly
+ assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']])
+ assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']])
+ # Check readonly array
+ a.flags.writeable = False
+ assert_raises(ValueError, nditer, a, [], [['writeonly']])
+ assert_raises(ValueError, nditer, a, [], [['readwrite']])
+ a.flags.writeable = True
+ # Multi-indices available only with the multi_index flag
+ i = nditer(arange(6), [], [['readonly']])
+ assert_raises(ValueError, lambda i:i.multi_index, i)
+ # Index available only with an index flag
+ assert_raises(ValueError, lambda i:i.index, i)
+ # GotoCoords and GotoIndex incompatible with buffering or no_inner
+
+ def assign_multi_index(i):
+ i.multi_index = (0,)
+
+ def assign_index(i):
+ i.index = 0
+
+ def assign_iterindex(i):
+ i.iterindex = 0
+
+ def assign_iterrange(i):
+ i.iterrange = (0, 1)
+ i = nditer(arange(6), ['external_loop'])
+ assert_raises(ValueError, assign_multi_index, i)
+ assert_raises(ValueError, assign_index, i)
+ assert_raises(ValueError, assign_iterindex, i)
+ assert_raises(ValueError, assign_iterrange, i)
+ i = nditer(arange(6), ['buffered'])
+ assert_raises(ValueError, assign_multi_index, i)
+ assert_raises(ValueError, assign_index, i)
+ assert_raises(ValueError, assign_iterrange, i)
+ # Can't iterate if size is zero
+ assert_raises(ValueError, nditer, np.array([]))
+
+def test_iter_slice():
+ a, b, c = np.arange(3), np.arange(3), np.arange(3.)
+ i = nditer([a, b, c], [], ['readwrite'])
+ with i:
+ i[0:2] = (3, 3)
+ assert_equal(a, [3, 1, 2])
+ assert_equal(b, [3, 1, 2])
+ assert_equal(c, [0, 1, 2])
+ i[1] = 12
+ assert_equal(i[0:2], [3, 12])
+
+def test_iter_assign_mapping():
+ a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
+ it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
+ casting='same_kind', op_dtypes=[np.dtype('f4')])
+ with it:
+ it.operands[0][...] = 3
+ it.operands[0][...] = 14
+ assert_equal(a, 14)
+ it = np.nditer(a, [], [['readwrite', 'updateifcopy']],
+ casting='same_kind', op_dtypes=[np.dtype('f4')])
+ with it:
+ x = it.operands[0][-1:1]
+ x[...] = 14
+ it.operands[0][...] = -1234
+ assert_equal(a, -1234)
+ # check for no warnings on dealloc
+ x = None
+ it = None
+
+def test_iter_nbo_align_contig():
+ # Check that byte order, alignment, and contig changes work
+
+ # Byte order change by requesting a specific dtype
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ assert_(a.dtype.byteorder != au.dtype.byteorder)
+ i = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv',
+ op_dtypes=[np.dtype('f4')])
+ with i:
+ # context manager triggers WRITEBACKIFCOPY on i at exit
+ assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 2
+ assert_equal(au, [2]*6)
+ del i # should not raise a warning
+ # Byte order change by requesting NBO
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ assert_(a.dtype.byteorder != au.dtype.byteorder)
+ with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']],
+ casting='equiv') as i:
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder)
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 12345
+ i.operands[0][:] = 2
+ assert_equal(au, [2]*6)
+
+ # Unaligned input
+ a = np.zeros((6*4+1,), dtype='i1')[1:]
+ a.dtype = 'f4'
+ a[:] = np.arange(6, dtype='f4')
+ assert_(not a.flags.aligned)
+ # Without 'aligned', shouldn't copy
+ i = nditer(a, [], [['readonly']])
+ assert_(not i.operands[0].flags.aligned)
+ assert_equal(i.operands[0], a)
+ # With 'aligned', should make a copy
+ with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i:
+ assert_(i.operands[0].flags.aligned)
+ # context manager triggers UPDATEIFCOPY on i at exit
+ assert_equal(i.operands[0], a)
+ i.operands[0][:] = 3
+ assert_equal(a, [3]*6)
+
+ # Discontiguous input
+ a = arange(12)
+ # If it is contiguous, shouldn't copy
+ i = nditer(a[:6], [], [['readonly']])
+ assert_(i.operands[0].flags.contiguous)
+ assert_equal(i.operands[0], a[:6])
+ # If it isn't contiguous, should buffer
+ i = nditer(a[::2], ['buffered', 'external_loop'],
+ [['readonly', 'contig']],
+ buffersize=10)
+ assert_(i[0].flags.contiguous)
+ assert_equal(i[0], a[::2])
+
+def test_iter_array_cast():
+ # Check that arrays are cast as requested
+
+ # No cast 'f4' -> 'f4'
+ a = np.arange(6, dtype='f4').reshape(2, 3)
+ i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')])
+ with i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
+
+ # Byte-order cast '<f4' -> '>f4'
+ a = np.arange(6, dtype='<f4').reshape(2, 3)
+ with nditer(a, [], [['readwrite', 'updateifcopy']],
+ casting='equiv',
+ op_dtypes=[np.dtype('>f4')]) as i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('>f4'))
+
+ # Safe case 'f4' -> 'f8'
+ a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2)
+ i = nditer(a, [], [['readonly', 'copy']],
+ casting='safe',
+ op_dtypes=[np.dtype('f8')])
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f8'))
+ # The memory layout of the temporary should match a (a is (48,4,16))
+ # except negative strides get flipped to positive strides.
+ assert_equal(i.operands[0].strides, (96, 8, 32))
+ a = a[::-1,:, ::-1]
+ i = nditer(a, [], [['readonly', 'copy']],
+ casting='safe',
+ op_dtypes=[np.dtype('f8')])
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f8'))
+ assert_equal(i.operands[0].strides, (96, 8, 32))
+
+ # Same-kind cast 'f8' -> 'f4' -> 'f8'
+ a = np.arange(24, dtype='f8').reshape(2, 3, 4).T
+ with nditer(a, [],
+ [['readwrite', 'updateifcopy']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('f4')]) as i:
+ assert_equal(i.operands[0], a)
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
+ assert_equal(i.operands[0].strides, (4, 16, 48))
+ # Check that WRITEBACKIFCOPY is activated at exit
+ i.operands[0][2, 1, 1] = -12.5
+ assert_(a[2, 1, 1] != -12.5)
+ assert_equal(a[2, 1, 1], -12.5)
+
+ a = np.arange(6, dtype='i4')[::-2]
+ with nditer(a, [],
+ [['writeonly', 'updateifcopy']],
+ casting='unsafe',
+ op_dtypes=[np.dtype('f4')]) as i:
+ assert_equal(i.operands[0].dtype, np.dtype('f4'))
+ # Even though the stride was negative in 'a', it
+ # becomes positive in the temporary
+ assert_equal(i.operands[0].strides, (4,))
+ i.operands[0][:] = [1, 2, 3]
+ assert_equal(a, [1, 2, 3])
+
+def test_iter_array_cast_errors():
+ # Check that invalid casts are caught
+
+ # Need to enable copying for casts to occur
+ assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+ [['readonly']], op_dtypes=[np.dtype('f8')])
+ # Also need to allow casting for casts to occur
+ assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+ [['readonly', 'copy']], casting='no',
+ op_dtypes=[np.dtype('f8')])
+ assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+ [['readonly', 'copy']], casting='equiv',
+ op_dtypes=[np.dtype('f8')])
+ assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
+ [['writeonly', 'updateifcopy']],
+ casting='no',
+ op_dtypes=[np.dtype('f4')])
+ assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
+ [['writeonly', 'updateifcopy']],
+ casting='equiv',
+ op_dtypes=[np.dtype('f4')])
+ # '<f4' -> '>f4' should not work with casting='no'
+ assert_raises(TypeError, nditer, arange(2, dtype='<f4'), [],
+ [['readonly', 'copy']], casting='no',
+ op_dtypes=[np.dtype('>f4')])
+ # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't
+ assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+ [['readwrite', 'updateifcopy']],
+ casting='safe',
+ op_dtypes=[np.dtype('f8')])
+ assert_raises(TypeError, nditer, arange(2, dtype='f8'), [],
+ [['readwrite', 'updateifcopy']],
+ casting='safe',
+ op_dtypes=[np.dtype('f4')])
+ # 'f4' -> 'i4' is neither a safe nor a same-kind cast
+ assert_raises(TypeError, nditer, arange(2, dtype='f4'), [],
+ [['readonly', 'copy']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('i4')])
+ assert_raises(TypeError, nditer, arange(2, dtype='i4'), [],
+ [['writeonly', 'updateifcopy']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('f4')])
+
+def test_iter_scalar_cast():
+ # Check that scalars are cast as requested
+
+ # No cast 'f4' -> 'f4'
+ i = nditer(np.float32(2.5), [], [['readonly']],
+ op_dtypes=[np.dtype('f4')])
+ assert_equal(i.dtypes[0], np.dtype('f4'))
+ assert_equal(i.value.dtype, np.dtype('f4'))
+ assert_equal(i.value, 2.5)
+ # Safe cast 'f4' -> 'f8'
+ i = nditer(np.float32(2.5), [],
+ [['readonly', 'copy']],
+ casting='safe',
+ op_dtypes=[np.dtype('f8')])
+ assert_equal(i.dtypes[0], np.dtype('f8'))
+ assert_equal(i.value.dtype, np.dtype('f8'))
+ assert_equal(i.value, 2.5)
+ # Same-kind cast 'f8' -> 'f4'
+ i = nditer(np.float64(2.5), [],
+ [['readonly', 'copy']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('f4')])
+ assert_equal(i.dtypes[0], np.dtype('f4'))
+ assert_equal(i.value.dtype, np.dtype('f4'))
+ assert_equal(i.value, 2.5)
+ # Unsafe cast 'f8' -> 'i4'
+ i = nditer(np.float64(3.0), [],
+ [['readonly', 'copy']],
+ casting='unsafe',
+ op_dtypes=[np.dtype('i4')])
+ assert_equal(i.dtypes[0], np.dtype('i4'))
+ assert_equal(i.value.dtype, np.dtype('i4'))
+ assert_equal(i.value, 3)
+ # Readonly scalars may be cast even without setting COPY or BUFFERED
+ i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')])
+ assert_equal(i[0].dtype, np.dtype('f8'))
+ assert_equal(i[0], 3.)
+
+def test_iter_scalar_cast_errors():
+ # Check that invalid casts are caught
+
+ # Need to allow copying/buffering for write casts of scalars to occur
+ assert_raises(TypeError, nditer, np.float32(2), [],
+ [['readwrite']], op_dtypes=[np.dtype('f8')])
+ assert_raises(TypeError, nditer, 2.5, [],
+ [['readwrite']], op_dtypes=[np.dtype('f4')])
+ # 'f8' -> 'f4' isn't a safe cast if the value would overflow
+ assert_raises(TypeError, nditer, np.float64(1e60), [],
+ [['readonly']],
+ casting='safe',
+ op_dtypes=[np.dtype('f4')])
+ # 'f4' -> 'i4' is neither a safe nor a same-kind cast
+ assert_raises(TypeError, nditer, np.float32(2), [],
+ [['readonly']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('i4')])
+
+def test_iter_object_arrays_basic():
+ # Check that object arrays work
+
+ obj = {'a':3,'b':'d'}
+ a = np.array([[1, 2, 3], None, obj, None], dtype='O')
+ if HAS_REFCOUNT:
+ rc = sys.getrefcount(obj)
+
+ # Need to allow references for object arrays
+ assert_raises(TypeError, nditer, a)
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(obj), rc)
+
+ i = nditer(a, ['refs_ok'], ['readonly'])
+ vals = [x_[()] for x_ in i]
+ assert_equal(np.array(vals, dtype='O'), a)
+ vals, i, x = [None]*3
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(obj), rc)
+
+ i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
+ ['readonly'], order='C')
+ assert_(i.iterationneedsapi)
+ vals = [x_[()] for x_ in i]
+ assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F'))
+ vals, i, x = [None]*3
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(obj), rc)
+
+ i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'],
+ ['readwrite'], order='C')
+ with i:
+ for x in i:
+ x[...] = None
+ vals, i, x = [None]*3
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(obj) == rc-1)
+ assert_equal(a, np.array([None]*4, dtype='O'))
+
+def test_iter_object_arrays_conversions():
+ # Conversions to/from objects
+ a = np.arange(6, dtype='O')
+ i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
+ casting='unsafe', op_dtypes='i4')
+ with i:
+ for x in i:
+ x[...] += 1
+ assert_equal(a, np.arange(6)+1)
+
+ a = np.arange(6, dtype='i4')
+ i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
+ casting='unsafe', op_dtypes='O')
+ with i:
+ for x in i:
+ x[...] += 1
+ assert_equal(a, np.arange(6)+1)
+
+ # Non-contiguous object array
+ a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')])
+ a = a['a']
+ a[:] = np.arange(6)
+ i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
+ casting='unsafe', op_dtypes='i4')
+ with i:
+ for x in i:
+ x[...] += 1
+ assert_equal(a, np.arange(6)+1)
+
+ #Non-contiguous value array
+ a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')])
+ a = a['a']
+ a[:] = np.arange(6) + 98172488
+ i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'],
+ casting='unsafe', op_dtypes='O')
+ with i:
+ ob = i[0][()]
+ if HAS_REFCOUNT:
+ rc = sys.getrefcount(ob)
+ for x in i:
+ x[...] += 1
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(ob) == rc-1)
+ assert_equal(a, np.arange(6)+98172489)
+
+def test_iter_common_dtype():
+ # Check that the iterator finds a common data type correctly
+
+ i = nditer([array([3], dtype='f4'), array([0], dtype='f8')],
+ ['common_dtype'],
+ [['readonly', 'copy']]*2,
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('f8'))
+ assert_equal(i.dtypes[1], np.dtype('f8'))
+ i = nditer([array([3], dtype='i4'), array([0], dtype='f4')],
+ ['common_dtype'],
+ [['readonly', 'copy']]*2,
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('f8'))
+ assert_equal(i.dtypes[1], np.dtype('f8'))
+ i = nditer([array([3], dtype='f4'), array(0, dtype='f8')],
+ ['common_dtype'],
+ [['readonly', 'copy']]*2,
+ casting='same_kind')
+ assert_equal(i.dtypes[0], np.dtype('f4'))
+ assert_equal(i.dtypes[1], np.dtype('f4'))
+ i = nditer([array([3], dtype='u4'), array(0, dtype='i4')],
+ ['common_dtype'],
+ [['readonly', 'copy']]*2,
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('u4'))
+ assert_equal(i.dtypes[1], np.dtype('u4'))
+ i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')],
+ ['common_dtype'],
+ [['readonly', 'copy']]*2,
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('i8'))
+ assert_equal(i.dtypes[1], np.dtype('i8'))
+ i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'),
+ array([2j], dtype='c8'), array([9], dtype='f8')],
+ ['common_dtype'],
+ [['readonly', 'copy']]*4,
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('c16'))
+ assert_equal(i.dtypes[1], np.dtype('c16'))
+ assert_equal(i.dtypes[2], np.dtype('c16'))
+ assert_equal(i.dtypes[3], np.dtype('c16'))
+ assert_equal(i.value, (3, -12, 2j, 9))
+
+ # When allocating outputs, other outputs aren't factored in
+ i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [],
+ [['readonly', 'copy'],
+ ['writeonly', 'allocate'],
+ ['writeonly']],
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('i4'))
+ assert_equal(i.dtypes[1], np.dtype('i4'))
+ assert_equal(i.dtypes[2], np.dtype('c16'))
+ # But, if common data types are requested, they are
+ i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')],
+ ['common_dtype'],
+ [['readonly', 'copy'],
+ ['writeonly', 'allocate'],
+ ['writeonly']],
+ casting='safe')
+ assert_equal(i.dtypes[0], np.dtype('c16'))
+ assert_equal(i.dtypes[1], np.dtype('c16'))
+ assert_equal(i.dtypes[2], np.dtype('c16'))
+
+def test_iter_copy_if_overlap():
+ # Ensure the iterator makes copies on read/write overlap, if requested
+
+ # Copy not needed, 1 op
+ for flag in ['readonly', 'writeonly', 'readwrite']:
+ a = arange(10)
+ i = nditer([a], ['copy_if_overlap'], [[flag]])
+ with i:
+ assert_(i.operands[0] is a)
+
+ # Copy needed, 2 ops, read-write overlap
+ x = arange(10)
+ a = x[1:]
+ b = x[:-1]
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
+ assert_(not np.shares_memory(*i.operands))
+
+ # Copy not needed with elementwise, 2 ops, exactly same arrays
+ x = arange(10)
+ a = x
+ b = x
+ i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'],
+ ['readwrite', 'overlap_assume_elementwise']])
+ with i:
+ assert_(i.operands[0] is a and i.operands[1] is b)
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i:
+ assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b))
+
+ # Copy not needed, 2 ops, no overlap
+ x = arange(10)
+ a = x[::2]
+ b = x[1::2]
+ i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']])
+ assert_(i.operands[0] is a and i.operands[1] is b)
+
+ # Copy needed, 2 ops, read-write overlap
+ x = arange(4, dtype=np.int8)
+ a = x[3:]
+ b = x.view(np.int32)[:1]
+ with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i:
+ assert_(not np.shares_memory(*i.operands))
+
+ # Copy needed, 3 ops, read-write overlap
+ for flag in ['writeonly', 'readwrite']:
+ x = np.ones([10, 10])
+ a = x
+ b = x.T
+ c = x
+ with nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['readonly'], [flag]]) as i:
+ a2, b2, c2 = i.operands
+ assert_(not np.shares_memory(a2, c2))
+ assert_(not np.shares_memory(b2, c2))
+
+ # Copy not needed, 3 ops, read-only overlap
+ x = np.ones([10, 10])
+ a = x
+ b = x.T
+ c = x
+ i = nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['readonly'], ['readonly']])
+ a2, b2, c2 = i.operands
+ assert_(a is a2)
+ assert_(b is b2)
+ assert_(c is c2)
+
+ # Copy not needed, 3 ops, read-only overlap
+ x = np.ones([10, 10])
+ a = x
+ b = np.ones([10, 10])
+ c = x.T
+ i = nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['writeonly'], ['readonly']])
+ a2, b2, c2 = i.operands
+ assert_(a is a2)
+ assert_(b is b2)
+ assert_(c is c2)
+
+ # Copy not needed, 3 ops, write-only overlap
+ x = np.arange(7)
+ a = x[:3]
+ b = x[3:6]
+ c = x[4:7]
+ i = nditer([a, b, c], ['copy_if_overlap'],
+ [['readonly'], ['writeonly'], ['writeonly']])
+ a2, b2, c2 = i.operands
+ assert_(a is a2)
+ assert_(b is b2)
+ assert_(c is c2)
+
+def test_iter_op_axes():
+ # Check that custom axes work
+
+ # Reverse the axes
+ a = arange(6).reshape(2, 3)
+ i = nditer([a, a.T], [], [['readonly']]*2, op_axes=[[0, 1], [1, 0]])
+ assert_(all([x == y for (x, y) in i]))
+ a = arange(24).reshape(2, 3, 4)
+ i = nditer([a.T, a], [], [['readonly']]*2, op_axes=[[2, 1, 0], None])
+ assert_(all([x == y for (x, y) in i]))
+
+ # Broadcast 1D to any dimension
+ a = arange(1, 31).reshape(2, 3, 5)
+ b = arange(1, 3)
+ i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [0, -1, -1]])
+ assert_equal([x*y for (x, y) in i], (a*b.reshape(2, 1, 1)).ravel())
+ b = arange(1, 4)
+ i = nditer([a, b], [], [['readonly']]*2, op_axes=[None, [-1, 0, -1]])
+ assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 3, 1)).ravel())
+ b = arange(1, 6)
+ i = nditer([a, b], [], [['readonly']]*2,
+ op_axes=[None, [np.newaxis, np.newaxis, 0]])
+ assert_equal([x*y for (x, y) in i], (a*b.reshape(1, 1, 5)).ravel())
+
+ # Inner product-style broadcasting
+ a = arange(24).reshape(2, 3, 4)
+ b = arange(40).reshape(5, 2, 4)
+ i = nditer([a, b], ['multi_index'], [['readonly']]*2,
+ op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]])
+ assert_equal(i.shape, (2, 3, 5, 2))
+
+ # Matrix product-style broadcasting
+ a = arange(12).reshape(3, 4)
+ b = arange(20).reshape(4, 5)
+ i = nditer([a, b], ['multi_index'], [['readonly']]*2,
+ op_axes=[[0, -1], [-1, 1]])
+ assert_equal(i.shape, (3, 5))
+
+def test_iter_op_axes_errors():
+ # Check that custom axes throws errors for bad inputs
+
+ # Wrong number of items in op_axes
+ a = arange(6).reshape(2, 3)
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[0], [1], [0]])
+ # Out of bounds items in op_axes
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[2, 1], [0, 1]])
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[0, 1], [2, -1]])
+ # Duplicate items in op_axes
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[0, 0], [0, 1]])
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[0, 1], [1, 1]])
+
+ # Different sized arrays in op_axes
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[0, 1], [0, 1, 0]])
+
+ # Non-broadcastable dimensions in the result
+ assert_raises(ValueError, nditer, [a, a], [], [['readonly']]*2,
+ op_axes=[[0, 1], [1, 0]])
+
+def test_iter_copy():
+ # Check that copying the iterator works correctly
+ a = arange(24).reshape(2, 3, 4)
+
+ # Simple iterator
+ i = nditer(a)
+ j = i.copy()
+ assert_equal([x[()] for x in i], [x[()] for x in j])
+
+ i.iterindex = 3
+ j = i.copy()
+ assert_equal([x[()] for x in i], [x[()] for x in j])
+
+ # Buffered iterator
+ i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3)
+ j = i.copy()
+ assert_equal([x[()] for x in i], [x[()] for x in j])
+
+ i.iterindex = 3
+ j = i.copy()
+ assert_equal([x[()] for x in i], [x[()] for x in j])
+
+ i.iterrange = (3, 9)
+ j = i.copy()
+ assert_equal([x[()] for x in i], [x[()] for x in j])
+
+ i.iterrange = (2, 18)
+ next(i)
+ next(i)
+ j = i.copy()
+ assert_equal([x[()] for x in i], [x[()] for x in j])
+
+ # Casting iterator
+ with nditer(a, ['buffered'], order='F', casting='unsafe',
+ op_dtypes='f8', buffersize=5) as i:
+ j = i.copy()
+ assert_equal([x[()] for x in j], a.ravel(order='F'))
+
+ a = arange(24, dtype='<i4').reshape(2, 3, 4)
+ with nditer(a, ['buffered'], order='F', casting='unsafe',
+ op_dtypes='>f8', buffersize=5) as i:
+ j = i.copy()
+ assert_equal([x[()] for x in j], a.ravel(order='F'))
+
+
+@pytest.mark.parametrize("dtype", np.typecodes["All"])
+@pytest.mark.parametrize("loop_dtype", np.typecodes["All"])
+@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
+def test_iter_copy_casts(dtype, loop_dtype):
+ # Ensure the dtype is never flexible:
+ if loop_dtype.lower() == "m":
+ loop_dtype = loop_dtype + "[ms]"
+ elif np.dtype(loop_dtype).itemsize == 0:
+ loop_dtype = loop_dtype + "50"
+
+ # Make things a bit more interesting by requiring a byte-swap as well:
+ arr = np.ones(1000, dtype=np.dtype(dtype).newbyteorder())
+ try:
+ expected = arr.astype(loop_dtype)
+ except Exception:
+ # Some casts are not possible, do not worry about them
+ return
+
+ it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"],
+ op_dtypes=[loop_dtype], casting="unsafe")
+
+ if np.issubdtype(np.dtype(loop_dtype), np.number):
+ # Casting to strings may be strange, but for simple dtypes do not rely
+ # on the cast being correct:
+ assert_array_equal(expected, np.ones(1000, dtype=loop_dtype))
+
+ it_copy = it.copy()
+ res = next(it)
+ del it
+ res_copy = next(it_copy)
+ del it_copy
+
+ assert_array_equal(res, expected)
+ assert_array_equal(res_copy, expected)
+
+
+def test_iter_copy_casts_structured():
+ # Test a complicated structured dtype for casting, as it requires
+ # both multiple steps and a more complex casting setup.
+ # Includes a structured -> unstructured (any to object), and many other
+ # casts, which cause this to require all steps in the casting machinery
+ # one level down as well as the iterator copy (which uses NpyAuxData clone)
+ in_dtype = np.dtype([("a", np.dtype("i,")),
+ ("b", np.dtype(">i,<i,>d,S17,>d,(3)f,O,i1"))])
+ out_dtype = np.dtype([("a", np.dtype("O")),
+ ("b", np.dtype(">i,>i,S17,>d,>U3,(3)d,i1,O"))])
+ arr = np.ones(1000, dtype=in_dtype)
+
+ it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"],
+ op_dtypes=[out_dtype], casting="unsafe")
+ it_copy = it.copy()
+
+ res1 = next(it)
+ del it
+ res2 = next(it_copy)
+ del it_copy
+
+ expected = arr["a"].astype(out_dtype["a"])
+ assert_array_equal(res1["a"], expected)
+ assert_array_equal(res2["a"], expected)
+
+ for field in in_dtype["b"].names:
+ # Note that the .base avoids the subarray field
+ expected = arr["b"][field].astype(out_dtype["b"][field].base)
+ assert_array_equal(res1["b"][field], expected)
+ assert_array_equal(res2["b"][field], expected)
+
+
+def test_iter_allocate_output_simple():
+ # Check that the iterator will properly allocate outputs
+
+ # Simple case
+ a = arange(6)
+ i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
+ op_dtypes=[None, np.dtype('f4')])
+ assert_equal(i.operands[1].shape, a.shape)
+ assert_equal(i.operands[1].dtype, np.dtype('f4'))
+
+def test_iter_allocate_output_buffered_readwrite():
+ # Allocated output with buffering + delay_bufalloc
+
+ a = arange(6)
+ i = nditer([a, None], ['buffered', 'delay_bufalloc'],
+ [['readonly'], ['allocate', 'readwrite']])
+ with i:
+ i.operands[1][:] = 1
+ i.reset()
+ for x in i:
+ x[1][...] += x[0][...]
+ assert_equal(i.operands[1], a+1)
+
+def test_iter_allocate_output_itorder():
+ # The allocated output should match the iteration order
+
+ # C-order input, best iteration order
+ a = arange(6, dtype='i4').reshape(2, 3)
+ i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
+ op_dtypes=[None, np.dtype('f4')])
+ assert_equal(i.operands[1].shape, a.shape)
+ assert_equal(i.operands[1].strides, a.strides)
+ assert_equal(i.operands[1].dtype, np.dtype('f4'))
+ # F-order input, best iteration order
+ a = arange(24, dtype='i4').reshape(2, 3, 4).T
+ i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']],
+ op_dtypes=[None, np.dtype('f4')])
+ assert_equal(i.operands[1].shape, a.shape)
+ assert_equal(i.operands[1].strides, a.strides)
+ assert_equal(i.operands[1].dtype, np.dtype('f4'))
+ # Non-contiguous input, C iteration order
+ a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1)
+ i = nditer([a, None], [],
+ [['readonly'], ['writeonly', 'allocate']],
+ order='C',
+ op_dtypes=[None, np.dtype('f4')])
+ assert_equal(i.operands[1].shape, a.shape)
+ assert_equal(i.operands[1].strides, (32, 16, 4))
+ assert_equal(i.operands[1].dtype, np.dtype('f4'))
+
+def test_iter_allocate_output_opaxes():
+ # Specifying op_axes should work
+
+ a = arange(24, dtype='i4').reshape(2, 3, 4)
+ i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']],
+ op_dtypes=[np.dtype('u4'), None],
+ op_axes=[[1, 2, 0], None])
+ assert_equal(i.operands[0].shape, (4, 2, 3))
+ assert_equal(i.operands[0].strides, (4, 48, 16))
+ assert_equal(i.operands[0].dtype, np.dtype('u4'))
+
+def test_iter_allocate_output_types_promotion():
+ # Check type promotion of automatic outputs
+
+ i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [],
+ [['readonly']]*2+[['writeonly', 'allocate']])
+ assert_equal(i.dtypes[2], np.dtype('f8'))
+ i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [],
+ [['readonly']]*2+[['writeonly', 'allocate']])
+ assert_equal(i.dtypes[2], np.dtype('f8'))
+ i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [],
+ [['readonly']]*2+[['writeonly', 'allocate']])
+ assert_equal(i.dtypes[2], np.dtype('f4'))
+ i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [],
+ [['readonly']]*2+[['writeonly', 'allocate']])
+ assert_equal(i.dtypes[2], np.dtype('u4'))
+ i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [],
+ [['readonly']]*2+[['writeonly', 'allocate']])
+ assert_equal(i.dtypes[2], np.dtype('i8'))
+
+def test_iter_allocate_output_types_byte_order():
+ # Verify the rules for byte order changes
+
+ # When there's just one input, the output type exactly matches
+ a = array([3], dtype='u4').newbyteorder()
+ i = nditer([a, None], [],
+ [['readonly'], ['writeonly', 'allocate']])
+ assert_equal(i.dtypes[0], i.dtypes[1])
+ # With two or more inputs, the output type is in native byte order
+ i = nditer([a, a, None], [],
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ assert_(i.dtypes[0] != i.dtypes[2])
+ assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2])
+
+def test_iter_allocate_output_types_scalar():
+ # If the inputs are all scalars, the output should be a scalar
+
+ i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [],
+ [['writeonly', 'allocate']] + [['readonly']]*4)
+ assert_equal(i.operands[0].dtype, np.dtype('complex128'))
+ assert_equal(i.operands[0].ndim, 0)
+
+def test_iter_allocate_output_subtype():
+ # Make sure that the subtype with priority wins
+ class MyNDArray(np.ndarray):
+ __array_priority__ = 15
+
+ # subclass vs ndarray
+ a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
+ b = np.arange(4).reshape(2, 2).T
+ i = nditer([a, b, None], [],
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ assert_equal(type(a), type(i.operands[2]))
+ assert_(type(b) is not type(i.operands[2]))
+ assert_equal(i.operands[2].shape, (2, 2))
+
+ # If subtypes are disabled, we should get back an ndarray.
+ i = nditer([a, b, None], [],
+ [['readonly'], ['readonly'],
+ ['writeonly', 'allocate', 'no_subtype']])
+ assert_equal(type(b), type(i.operands[2]))
+ assert_(type(a) is not type(i.operands[2]))
+ assert_equal(i.operands[2].shape, (2, 2))
+
+def test_iter_allocate_output_errors():
+ # Check that the iterator will throw errors for bad output allocations
+
+ # Need an input if no output data type is specified
+ a = arange(6)
+ assert_raises(TypeError, nditer, [a, None], [],
+ [['writeonly'], ['writeonly', 'allocate']])
+ # Allocated output should be flagged for writing
+ assert_raises(ValueError, nditer, [a, None], [],
+ [['readonly'], ['allocate', 'readonly']])
+ # Allocated output can't have buffering without delayed bufalloc
+ assert_raises(ValueError, nditer, [a, None], ['buffered'],
+ ['allocate', 'readwrite'])
+ # Must specify dtype if there are no inputs (cannot promote existing ones;
+ # maybe this should use the 'f4' here, but it does not historically.)
+ assert_raises(TypeError, nditer, [None, None], [],
+ [['writeonly', 'allocate'],
+ ['writeonly', 'allocate']],
+ op_dtypes=[None, np.dtype('f4')])
+ # If using op_axes, must specify all the axes
+ a = arange(24, dtype='i4').reshape(2, 3, 4)
+ assert_raises(ValueError, nditer, [a, None], [],
+ [['readonly'], ['writeonly', 'allocate']],
+ op_dtypes=[None, np.dtype('f4')],
+ op_axes=[None, [0, np.newaxis, 1]])
+ # If using op_axes, the axes must be within bounds
+ assert_raises(ValueError, nditer, [a, None], [],
+ [['readonly'], ['writeonly', 'allocate']],
+ op_dtypes=[None, np.dtype('f4')],
+ op_axes=[None, [0, 3, 1]])
+ # If using op_axes, there can't be duplicates
+ assert_raises(ValueError, nditer, [a, None], [],
+ [['readonly'], ['writeonly', 'allocate']],
+ op_dtypes=[None, np.dtype('f4')],
+ op_axes=[None, [0, 2, 1, 0]])
+ # Not all axes may be specified if a reduction. If there is a hole
+ # in op_axes, this is an error.
+ a = arange(24, dtype='i4').reshape(2, 3, 4)
+ assert_raises(ValueError, nditer, [a, None], ["reduce_ok"],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_dtypes=[None, np.dtype('f4')],
+ op_axes=[None, [0, np.newaxis, 2]])
+
+def test_all_allocated():
+ # When no output and no shape is given, `()` is used as shape.
+ i = np.nditer([None], op_dtypes=["int64"])
+ assert i.operands[0].shape == ()
+ assert i.dtypes == (np.dtype("int64"),)
+
+ i = np.nditer([None], op_dtypes=["int64"], itershape=(2, 3, 4))
+ assert i.operands[0].shape == (2, 3, 4)
+
+def test_iter_remove_axis():
+ a = arange(24).reshape(2, 3, 4)
+
+ i = nditer(a, ['multi_index'])
+ i.remove_axis(1)
+ assert_equal([x for x in i], a[:, 0,:].ravel())
+
+ a = a[::-1,:,:]
+ i = nditer(a, ['multi_index'])
+ i.remove_axis(0)
+ assert_equal([x for x in i], a[0,:,:].ravel())
+
+def test_iter_remove_multi_index_inner_loop():
+ # Check that removing multi-index support works
+
+ a = arange(24).reshape(2, 3, 4)
+
+ i = nditer(a, ['multi_index'])
+ assert_equal(i.ndim, 3)
+ assert_equal(i.shape, (2, 3, 4))
+ assert_equal(i.itviews[0].shape, (2, 3, 4))
+
+ # Removing the multi-index tracking causes all dimensions to coalesce
+ before = [x for x in i]
+ i.remove_multi_index()
+ after = [x for x in i]
+
+ assert_equal(before, after)
+ assert_equal(i.ndim, 1)
+ assert_raises(ValueError, lambda i:i.shape, i)
+ assert_equal(i.itviews[0].shape, (24,))
+
+ # Removing the inner loop means there's just one iteration
+ i.reset()
+ assert_equal(i.itersize, 24)
+ assert_equal(i[0].shape, tuple())
+ i.enable_external_loop()
+ assert_equal(i.itersize, 24)
+ assert_equal(i[0].shape, (24,))
+ assert_equal(i.value, arange(24))
+
+def test_iter_iterindex():
+ # Make sure iterindex works
+
+ buffersize = 5
+ a = arange(24).reshape(4, 3, 2)
+ for flags in ([], ['buffered']):
+ i = nditer(a, flags, buffersize=buffersize)
+ assert_equal(iter_iterindices(i), list(range(24)))
+ i.iterindex = 2
+ assert_equal(iter_iterindices(i), list(range(2, 24)))
+
+ i = nditer(a, flags, order='F', buffersize=buffersize)
+ assert_equal(iter_iterindices(i), list(range(24)))
+ i.iterindex = 5
+ assert_equal(iter_iterindices(i), list(range(5, 24)))
+
+ i = nditer(a[::-1], flags, order='F', buffersize=buffersize)
+ assert_equal(iter_iterindices(i), list(range(24)))
+ i.iterindex = 9
+ assert_equal(iter_iterindices(i), list(range(9, 24)))
+
+ i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize)
+ assert_equal(iter_iterindices(i), list(range(24)))
+ i.iterindex = 13
+ assert_equal(iter_iterindices(i), list(range(13, 24)))
+
+ i = nditer(a[::1, ::-1], flags, buffersize=buffersize)
+ assert_equal(iter_iterindices(i), list(range(24)))
+ i.iterindex = 23
+ assert_equal(iter_iterindices(i), list(range(23, 24)))
+ i.reset()
+ i.iterindex = 2
+ assert_equal(iter_iterindices(i), list(range(2, 24)))
+
+def test_iter_iterrange():
+ # Make sure getting and resetting the iterrange works
+
+ buffersize = 5
+ a = arange(24, dtype='i4').reshape(4, 3, 2)
+ a_fort = a.ravel(order='F')
+
+ i = nditer(a, ['ranged'], ['readonly'], order='F',
+ buffersize=buffersize)
+ assert_equal(i.iterrange, (0, 24))
+ assert_equal([x[()] for x in i], a_fort)
+ for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
+ i.iterrange = r
+ assert_equal(i.iterrange, r)
+ assert_equal([x[()] for x in i], a_fort[r[0]:r[1]])
+
+ i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F',
+ op_dtypes='f8', buffersize=buffersize)
+ assert_equal(i.iterrange, (0, 24))
+ assert_equal([x[()] for x in i], a_fort)
+ for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
+ i.iterrange = r
+ assert_equal(i.iterrange, r)
+ assert_equal([x[()] for x in i], a_fort[r[0]:r[1]])
+
+ def get_array(i):
+ val = np.array([], dtype='f8')
+ for x in i:
+ val = np.concatenate((val, x))
+ return val
+
+ i = nditer(a, ['ranged', 'buffered', 'external_loop'],
+ ['readonly'], order='F',
+ op_dtypes='f8', buffersize=buffersize)
+ assert_equal(i.iterrange, (0, 24))
+ assert_equal(get_array(i), a_fort)
+ for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]:
+ i.iterrange = r
+ assert_equal(i.iterrange, r)
+ assert_equal(get_array(i), a_fort[r[0]:r[1]])
+
+def test_iter_buffering():
+ # Test buffering with several buffer sizes and types
+ arrays = []
+ # F-order swapped array
+ arrays.append(np.arange(24,
+ dtype='c16').reshape(2, 3, 4).T.newbyteorder().byteswap())
+ # Contiguous 1-dimensional array
+ arrays.append(np.arange(10, dtype='f4'))
+ # Unaligned array
+ a = np.zeros((4*16+1,), dtype='i1')[1:]
+ a.dtype = 'i4'
+ a[:] = np.arange(16, dtype='i4')
+ arrays.append(a)
+ # 4-D F-order array
+ arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T)
+ for a in arrays:
+ for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024):
+ vals = []
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readonly', 'nbo', 'aligned']],
+ order='C',
+ casting='equiv',
+ buffersize=buffersize)
+ while not i.finished:
+ assert_(i[0].size <= buffersize)
+ vals.append(i[0].copy())
+ i.iternext()
+ assert_equal(np.concatenate(vals), a.ravel(order='C'))
+
+def test_iter_write_buffering():
+ # Test that buffering of writes is working
+
+ # F-order swapped array
+ a = np.arange(24).reshape(2, 3, 4).T.newbyteorder().byteswap()
+ i = nditer(a, ['buffered'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='equiv',
+ order='C',
+ buffersize=16)
+ x = 0
+ with i:
+ while not i.finished:
+ i[0] = x
+ x += 1
+ i.iternext()
+ assert_equal(a.ravel(order='C'), np.arange(24))
+
+def test_iter_buffering_delayed_alloc():
+ # Test that delaying buffer allocation works
+
+ a = np.arange(6)
+ b = np.arange(1, dtype='f4')
+ i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'],
+ ['readwrite'],
+ casting='unsafe',
+ op_dtypes='f4')
+ assert_(i.has_delayed_bufalloc)
+ assert_raises(ValueError, lambda i:i.multi_index, i)
+ assert_raises(ValueError, lambda i:i[0], i)
+ assert_raises(ValueError, lambda i:i[0:2], i)
+
+ def assign_iter(i):
+ i[0] = 0
+ assert_raises(ValueError, assign_iter, i)
+
+ i.reset()
+ assert_(not i.has_delayed_bufalloc)
+ assert_equal(i.multi_index, (0,))
+ with i:
+ assert_equal(i[0], 0)
+ i[1] = 1
+ assert_equal(i[0:2], [0, 1])
+ assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1]*6)))
+
+def test_iter_buffered_cast_simple():
+ # Test that buffering can handle a simple cast
+
+ a = np.arange(10, dtype='f4')
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('f8')],
+ buffersize=3)
+ with i:
+ for v in i:
+ v[...] *= 2
+
+ assert_equal(a, 2*np.arange(10, dtype='f4'))
+
+def test_iter_buffered_cast_byteswapped():
+ # Test that buffering can handle a cast which requires swap->cast->swap
+
+ a = np.arange(10, dtype='f4').newbyteorder().byteswap()
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('f8').newbyteorder()],
+ buffersize=3)
+ with i:
+ for v in i:
+ v[...] *= 2
+
+ assert_equal(a, 2*np.arange(10, dtype='f4'))
+
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning)
+
+ a = np.arange(10, dtype='f8').newbyteorder().byteswap()
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='unsafe',
+ op_dtypes=[np.dtype('c8').newbyteorder()],
+ buffersize=3)
+ with i:
+ for v in i:
+ v[...] *= 2
+
+ assert_equal(a, 2*np.arange(10, dtype='f8'))
+
+def test_iter_buffered_cast_byteswapped_complex():
+ # Test that buffering can handle a cast which requires swap->cast->copy
+
+ a = np.arange(10, dtype='c8').newbyteorder().byteswap()
+ a += 2j
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('c16')],
+ buffersize=3)
+ with i:
+ for v in i:
+ v[...] *= 2
+ assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
+
+ a = np.arange(10, dtype='c8')
+ a += 2j
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('c16').newbyteorder()],
+ buffersize=3)
+ with i:
+ for v in i:
+ v[...] *= 2
+ assert_equal(a, 2*np.arange(10, dtype='c8') + 4j)
+
+ a = np.arange(10, dtype=np.clongdouble).newbyteorder().byteswap()
+ a += 2j
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('c16')],
+ buffersize=3)
+ with i:
+ for v in i:
+ v[...] *= 2
+ assert_equal(a, 2*np.arange(10, dtype=np.clongdouble) + 4j)
+
+ a = np.arange(10, dtype=np.longdouble).newbyteorder().byteswap()
+ i = nditer(a, ['buffered', 'external_loop'],
+ [['readwrite', 'nbo', 'aligned']],
+ casting='same_kind',
+ op_dtypes=[np.dtype('f4')],
+ buffersize=7)
+ with i:
+ for v in i:
+ v[...] *= 2
+ assert_equal(a, 2*np.arange(10, dtype=np.longdouble))
+
+def test_iter_buffered_cast_structured_type():
+ # Tests buffering of structured types
+
+ # simple -> struct type (duplicates the value)
+ sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
+ a = np.arange(3, dtype='f4') + 0.5
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt)
+ vals = [np.array(x) for x in i]
+ assert_equal(vals[0]['a'], 0.5)
+ assert_equal(vals[0]['b'], 0)
+ assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
+ assert_equal(vals[0]['d'], 0.5)
+ assert_equal(vals[1]['a'], 1.5)
+ assert_equal(vals[1]['b'], 1)
+ assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
+ assert_equal(vals[1]['d'], 1.5)
+ assert_equal(vals[0].dtype, np.dtype(sdt))
+
+ # object -> struct type
+ sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
+ a = np.zeros((3,), dtype='O')
+ a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5)
+ a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5)
+ a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5)
+ if HAS_REFCOUNT:
+ rc = sys.getrefcount(a[0])
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt)
+ vals = [x.copy() for x in i]
+ assert_equal(vals[0]['a'], 0.5)
+ assert_equal(vals[0]['b'], 0)
+ assert_equal(vals[0]['c'], [[(0.5)]*3]*2)
+ assert_equal(vals[0]['d'], 0.5)
+ assert_equal(vals[1]['a'], 1.5)
+ assert_equal(vals[1]['b'], 1)
+ assert_equal(vals[1]['c'], [[(1.5)]*3]*2)
+ assert_equal(vals[1]['d'], 1.5)
+ assert_equal(vals[0].dtype, np.dtype(sdt))
+ vals, i, x = [None]*3
+ if HAS_REFCOUNT:
+ assert_equal(sys.getrefcount(a[0]), rc)
+
+ # single-field struct type -> simple
+ sdt = [('a', 'f4')]
+ a = np.array([(5.5,), (8,)], dtype=sdt)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes='i4')
+ assert_equal([x_[()] for x_ in i], [5, 8])
+
+ # make sure multi-field struct type -> simple doesn't work
+ sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
+ a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
+ assert_raises(TypeError, lambda: (
+ nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes='i4')))
+
+ # struct type -> struct type (field-wise copy)
+ sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
+ sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')]
+ a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ assert_equal([np.array(x_) for x_ in i],
+ [np.array((1, 2, 3), dtype=sdt2),
+ np.array((4, 5, 6), dtype=sdt2)])
+
+
+def test_iter_buffered_cast_structured_type_failure_with_cleanup():
+ # make sure struct type -> struct type with different
+ # number of fields fails
+ sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
+ sdt2 = [('b', 'O'), ('a', 'f8')]
+ a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
+
+ for intent in ["readwrite", "readonly", "writeonly"]:
+ # This test was initially designed to test an error at a different
+ # place, but will now raise earlier to to the cast not being possible:
+ # `assert np.can_cast(a.dtype, sdt2, casting="unsafe")` fails.
+ # Without a faulty DType, there is probably no reliable
+ # way to get the initial tested behaviour.
+ simple_arr = np.array([1, 2], dtype="i,i") # requires clean up
+ with pytest.raises(TypeError):
+ nditer((simple_arr, a), ['buffered', 'refs_ok'], [intent, intent],
+ casting='unsafe', op_dtypes=["f,f", sdt2])
+
+
+def test_buffered_cast_error_paths():
+ with pytest.raises(ValueError):
+ # The input is cast into an `S3` buffer
+ np.nditer((np.array("a", dtype="S1"),), op_dtypes=["i"],
+ casting="unsafe", flags=["buffered"])
+
+ # The `M8[ns]` is cast into the `S3` output
+ it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"],
+ op_flags=["writeonly"], casting="unsafe", flags=["buffered"])
+ with pytest.raises(ValueError):
+ with it:
+ buf = next(it)
+ buf[...] = "a" # cannot be converted to int.
+
+@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.")
+def test_buffered_cast_error_paths_unraisable():
+ # The following gives an unraisable error. Pytest sometimes captures that
+ # (depending python and/or pytest version). So with Python>=3.8 this can
+ # probably be cleaned out in the future to check for
+ # pytest.PytestUnraisableExceptionWarning:
+ code = textwrap.dedent("""
+ import numpy as np
+
+ it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"],
+ op_flags=["writeonly"], casting="unsafe", flags=["buffered"])
+ buf = next(it)
+ buf[...] = "a"
+ del buf, it # Flushing only happens during deallocate right now.
+ """)
+ res = subprocess.check_output([sys.executable, "-c", code],
+ stderr=subprocess.STDOUT, text=True)
+ assert "ValueError" in res
+
+
+def test_iter_buffered_cast_subarray():
+ # Tests buffering of subarrays
+
+ # one element -> many (copies it to all)
+ sdt1 = [('a', 'f4')]
+ sdt2 = [('a', 'f8', (3, 2, 2))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ for x, count in zip(i, list(range(6))):
+ assert_(np.all(x['a'] == count))
+
+ # one element -> many -> back (copies it to all)
+ sdt1 = [('a', 'O', (1, 1))]
+ sdt2 = [('a', 'O', (3, 2, 2))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'][:, 0, 0] = np.arange(6)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ with i:
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_(np.all(x['a'] == count))
+ x['a'][0] += 2
+ count += 1
+ assert_equal(a['a'], np.arange(6).reshape(6, 1, 1)+2)
+
+ # many -> one element -> back (copies just element 0)
+ sdt1 = [('a', 'O', (3, 2, 2))]
+ sdt2 = [('a', 'O', (1,))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'][:, 0, 0, 0] = np.arange(6)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ with i:
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'], count)
+ x['a'] += 2
+ count += 1
+ assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1)*np.ones((1, 3, 2, 2))+2)
+
+ # many -> one element -> back (copies just element 0)
+ sdt1 = [('a', 'f8', (3, 2, 2))]
+ sdt2 = [('a', 'O', (1,))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'][:, 0, 0, 0] = np.arange(6)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'], count)
+ count += 1
+
+ # many -> one element (copies just element 0)
+ sdt1 = [('a', 'O', (3, 2, 2))]
+ sdt2 = [('a', 'f4', (1,))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'][:, 0, 0, 0] = np.arange(6)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'], count)
+ count += 1
+
+ # many -> matching shape (straightforward copy)
+ sdt1 = [('a', 'O', (3, 2, 2))]
+ sdt2 = [('a', 'f4', (3, 2, 2))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6*3*2*2).reshape(6, 3, 2, 2)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'], a[count]['a'])
+ count += 1
+
+ # vector -> smaller vector (truncates)
+ sdt1 = [('a', 'f8', (6,))]
+ sdt2 = [('a', 'f4', (2,))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6*6).reshape(6, 6)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'], a[count]['a'][:2])
+ count += 1
+
+ # vector -> bigger vector (pads with zeros)
+ sdt1 = [('a', 'f8', (2,))]
+ sdt2 = [('a', 'f4', (6,))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6*2).reshape(6, 2)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'][:2], a[count]['a'])
+ assert_equal(x['a'][2:], [0, 0, 0, 0])
+ count += 1
+
+ # vector -> matrix (broadcasts)
+ sdt1 = [('a', 'f8', (2,))]
+ sdt2 = [('a', 'f4', (2, 2))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6*2).reshape(6, 2)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'][0], a[count]['a'])
+ assert_equal(x['a'][1], a[count]['a'])
+ count += 1
+
+ # vector -> matrix (broadcasts and zero-pads)
+ sdt1 = [('a', 'f8', (2, 1))]
+ sdt2 = [('a', 'f4', (3, 2))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6*2).reshape(6, 2, 1)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
+ assert_equal(x['a'][:2, 1], a[count]['a'][:, 0])
+ assert_equal(x['a'][2,:], [0, 0])
+ count += 1
+
+ # matrix -> matrix (truncates and zero-pads)
+ sdt1 = [('a', 'f8', (2, 3))]
+ sdt2 = [('a', 'f4', (3, 2))]
+ a = np.zeros((6,), dtype=sdt1)
+ a['a'] = np.arange(6*2*3).reshape(6, 2, 3)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
+ assert_equal(i[0].dtype, np.dtype(sdt2))
+ count = 0
+ for x in i:
+ assert_equal(x['a'][:2, 0], a[count]['a'][:, 0])
+ assert_equal(x['a'][:2, 1], a[count]['a'][:, 1])
+ assert_equal(x['a'][2,:], [0, 0])
+ count += 1
+
+def test_iter_buffering_badwriteback():
+ # Writing back from a buffer cannot combine elements
+
+ # a needs write buffering, but had a broadcast dimension
+ a = np.arange(6).reshape(2, 3, 1)
+ b = np.arange(12).reshape(2, 3, 2)
+ assert_raises(ValueError, nditer, [a, b],
+ ['buffered', 'external_loop'],
+ [['readwrite'], ['writeonly']],
+ order='C')
+
+ # But if a is readonly, it's fine
+ nditer([a, b], ['buffered', 'external_loop'],
+ [['readonly'], ['writeonly']],
+ order='C')
+
+ # If a has just one element, it's fine too (constant 0 stride, a reduction)
+ a = np.arange(1).reshape(1, 1, 1)
+ nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'],
+ [['readwrite'], ['writeonly']],
+ order='C')
+
+ # check that it fails on other dimensions too
+ a = np.arange(6).reshape(1, 3, 2)
+ assert_raises(ValueError, nditer, [a, b],
+ ['buffered', 'external_loop'],
+ [['readwrite'], ['writeonly']],
+ order='C')
+ a = np.arange(4).reshape(2, 1, 2)
+ assert_raises(ValueError, nditer, [a, b],
+ ['buffered', 'external_loop'],
+ [['readwrite'], ['writeonly']],
+ order='C')
+
+def test_iter_buffering_string():
+ # Safe casting disallows shrinking strings
+ a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_)
+ assert_equal(a.dtype, np.dtype('S4'))
+ assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
+ op_dtypes='S2')
+ i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6')
+ assert_equal(i[0], b'abc')
+ assert_equal(i[0].dtype, np.dtype('S6'))
+
+ a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode_)
+ assert_equal(a.dtype, np.dtype('U4'))
+ assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
+ op_dtypes='U2')
+ i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6')
+ assert_equal(i[0], 'abc')
+ assert_equal(i[0].dtype, np.dtype('U6'))
+
+def test_iter_buffering_growinner():
+ # Test that the inner loop grows when no buffering is needed
+ a = np.arange(30)
+ i = nditer(a, ['buffered', 'growinner', 'external_loop'],
+ buffersize=5)
+ # Should end up with just one inner loop here
+ assert_equal(i[0].size, a.size)
+
+
+@pytest.mark.slow
+def test_iter_buffered_reduce_reuse():
+ # large enough array for all views, including negative strides.
+ a = np.arange(2*3**5)[3**5:3**5+1]
+ flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok']
+ op_flags = [('readonly',), ('readwrite', 'allocate')]
+ op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]]
+ # wrong dtype to force buffering
+ op_dtypes = [float, a.dtype]
+
+ def get_params():
+ for xs in range(-3**2, 3**2 + 1):
+ for ys in range(xs, 3**2 + 1):
+ for op_axes in op_axes_list:
+ # last stride is reduced and because of that not
+ # important for this test, as it is the inner stride.
+ strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize)
+ arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides)
+
+ for skip in [0, 1]:
+ yield arr, op_axes, skip
+
+ for arr, op_axes, skip in get_params():
+ nditer2 = np.nditer([arr.copy(), None],
+ op_axes=op_axes, flags=flags, op_flags=op_flags,
+ op_dtypes=op_dtypes)
+ with nditer2:
+ nditer2.operands[-1][...] = 0
+ nditer2.reset()
+ nditer2.iterindex = skip
+
+ for (a2_in, b2_in) in nditer2:
+ b2_in += a2_in.astype(np.int_)
+
+ comp_res = nditer2.operands[-1]
+
+ for bufsize in range(0, 3**3):
+ nditer1 = np.nditer([arr, None],
+ op_axes=op_axes, flags=flags, op_flags=op_flags,
+ buffersize=bufsize, op_dtypes=op_dtypes)
+ with nditer1:
+ nditer1.operands[-1][...] = 0
+ nditer1.reset()
+ nditer1.iterindex = skip
+
+ for (a1_in, b1_in) in nditer1:
+ b1_in += a1_in.astype(np.int_)
+
+ res = nditer1.operands[-1]
+ assert_array_equal(res, comp_res)
+
+
+def test_iter_no_broadcast():
+ # Test that the no_broadcast flag works
+ a = np.arange(24).reshape(2, 3, 4)
+ b = np.arange(6).reshape(2, 3, 1)
+ c = np.arange(12).reshape(3, 4)
+
+ nditer([a, b, c], [],
+ [['readonly', 'no_broadcast'],
+ ['readonly'], ['readonly']])
+ assert_raises(ValueError, nditer, [a, b, c], [],
+ [['readonly'], ['readonly', 'no_broadcast'], ['readonly']])
+ assert_raises(ValueError, nditer, [a, b, c], [],
+ [['readonly'], ['readonly'], ['readonly', 'no_broadcast']])
+
+
+class TestIterNested:
+
+ def test_basic(self):
+ # Test nested iteration basic usage
+ a = arange(12).reshape(2, 3, 2)
+
+ i, j = np.nested_iters(a, [[0], [1, 2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
+
+ i, j = np.nested_iters(a, [[0, 1], [2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
+
+ i, j = np.nested_iters(a, [[0, 2], [1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+ def test_reorder(self):
+ # Test nested iteration basic usage
+ a = arange(12).reshape(2, 3, 2)
+
+ # In 'K' order (default), it gets reordered
+ i, j = np.nested_iters(a, [[0], [2, 1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
+
+ i, j = np.nested_iters(a, [[1, 0], [2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
+
+ i, j = np.nested_iters(a, [[2, 0], [1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+ # In 'C' order, it doesn't
+ i, j = np.nested_iters(a, [[0], [2, 1]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]])
+
+ i, j = np.nested_iters(a, [[1, 0], [2]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]])
+
+ i, j = np.nested_iters(a, [[2, 0], [1]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]])
+
+ def test_flip_axes(self):
+ # Test nested iteration with negative axes
+ a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1]
+
+ # In 'K' order (default), the axes all get flipped
+ i, j = np.nested_iters(a, [[0], [1, 2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
+
+ i, j = np.nested_iters(a, [[0, 1], [2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
+
+ i, j = np.nested_iters(a, [[0, 2], [1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+ # In 'C' order, flipping axes is disabled
+ i, j = np.nested_iters(a, [[0], [1, 2]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]])
+
+ i, j = np.nested_iters(a, [[0, 1], [2]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]])
+
+ i, j = np.nested_iters(a, [[0, 2], [1]], order='C')
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]])
+
+ def test_broadcast(self):
+ # Test nested iteration with broadcasting
+ a = arange(2).reshape(2, 1)
+ b = arange(3).reshape(1, 3)
+
+ i, j = np.nested_iters([a, b], [[0], [1]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]])
+
+ i, j = np.nested_iters([a, b], [[1], [0]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]])
+
+ def test_dtype_copy(self):
+ # Test nested iteration with a copy to change dtype
+
+ # copy
+ a = arange(6, dtype='i4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ op_flags=['readonly', 'copy'],
+ op_dtypes='f8')
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
+ vals = None
+
+ # writebackifcopy - using context manager
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ op_flags=['readwrite', 'updateifcopy'],
+ casting='same_kind',
+ op_dtypes='f8')
+ with i, j:
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[0, 1, 2], [3, 4, 5]])
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+ # writebackifcopy - using close()
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ op_flags=['readwrite', 'updateifcopy'],
+ casting='same_kind',
+ op_dtypes='f8')
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[0, 1, 2], [3, 4, 5]])
+ i.close()
+ j.close()
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+ def test_dtype_buffered(self):
+ # Test nested iteration with buffering to change dtype
+
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ flags=['buffered'],
+ op_flags=['readwrite'],
+ casting='same_kind',
+ op_dtypes='f8')
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+ def test_0d(self):
+ a = np.arange(12).reshape(2, 3, 2)
+ i, j = np.nested_iters(a, [[], [1, 0, 2]])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
+
+ i, j = np.nested_iters(a, [[1, 0, 2], []])
+ vals = [list(j) for _ in i]
+ assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]])
+
+ i, j, k = np.nested_iters(a, [[2, 0], [], [1]])
+ vals = []
+ for x in i:
+ for y in j:
+ vals.append([z for z in k])
+ assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
+
+ def test_iter_nested_iters_dtype_buffered(self):
+ # Test nested iteration with buffering to change dtype
+
+ a = arange(6, dtype='f4').reshape(2, 3)
+ i, j = np.nested_iters(a, [[0], [1]],
+ flags=['buffered'],
+ op_flags=['readwrite'],
+ casting='same_kind',
+ op_dtypes='f8')
+ with i, j:
+ assert_equal(j[0].dtype, np.dtype('f8'))
+ for x in i:
+ for y in j:
+ y[...] += 1
+ assert_equal(a, [[1, 2, 3], [4, 5, 6]])
+
+def test_iter_reduction_error():
+
+ a = np.arange(6)
+ assert_raises(ValueError, nditer, [a, None], [],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[[0], [-1]])
+
+ a = np.arange(6).reshape(2, 3)
+ assert_raises(ValueError, nditer, [a, None], ['external_loop'],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[[0, 1], [-1, -1]])
+
+def test_iter_reduction():
+ # Test doing reductions with the iterator
+
+ a = np.arange(6)
+ i = nditer([a, None], ['reduce_ok'],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[[0], [-1]])
+ # Need to initialize the output operand to the addition unit
+ with i:
+ i.operands[1][...] = 0
+ # Do the reduction
+ for x, y in i:
+ y[...] += x
+ # Since no axes were specified, should have allocated a scalar
+ assert_equal(i.operands[1].ndim, 0)
+ assert_equal(i.operands[1], np.sum(a))
+
+ a = np.arange(6).reshape(2, 3)
+ i = nditer([a, None], ['reduce_ok', 'external_loop'],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[[0, 1], [-1, -1]])
+ # Need to initialize the output operand to the addition unit
+ with i:
+ i.operands[1][...] = 0
+ # Reduction shape/strides for the output
+ assert_equal(i[1].shape, (6,))
+ assert_equal(i[1].strides, (0,))
+ # Do the reduction
+ for x, y in i:
+ # Use a for loop instead of ``y[...] += x``
+ # (equivalent to ``y[...] = y[...].copy() + x``),
+ # because y has zero strides we use for the reduction
+ for j in range(len(y)):
+ y[j] += x[j]
+ # Since no axes were specified, should have allocated a scalar
+ assert_equal(i.operands[1].ndim, 0)
+ assert_equal(i.operands[1], np.sum(a))
+
+ # This is a tricky reduction case for the buffering double loop
+ # to handle
+ a = np.ones((2, 3, 5))
+ it1 = nditer([a, None], ['reduce_ok', 'external_loop'],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[None, [0, -1, 1]])
+ it2 = nditer([a, None], ['reduce_ok', 'external_loop',
+ 'buffered', 'delay_bufalloc'],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[None, [0, -1, 1]], buffersize=10)
+ with it1, it2:
+ it1.operands[1].fill(0)
+ it2.operands[1].fill(0)
+ it2.reset()
+ for x in it1:
+ x[1][...] += x[0]
+ for x in it2:
+ x[1][...] += x[0]
+ assert_equal(it1.operands[1], it2.operands[1])
+ assert_equal(it2.operands[1].sum(), a.size)
+
+def test_iter_buffering_reduction():
+ # Test doing buffered reductions with the iterator
+
+ a = np.arange(6)
+ b = np.array(0., dtype='f8').byteswap().newbyteorder()
+ i = nditer([a, b], ['reduce_ok', 'buffered'],
+ [['readonly'], ['readwrite', 'nbo']],
+ op_axes=[[0], [-1]])
+ with i:
+ assert_equal(i[1].dtype, np.dtype('f8'))
+ assert_(i[1].dtype != b.dtype)
+ # Do the reduction
+ for x, y in i:
+ y[...] += x
+ # Since no axes were specified, should have allocated a scalar
+ assert_equal(b, np.sum(a))
+
+ a = np.arange(6).reshape(2, 3)
+ b = np.array([0, 0], dtype='f8').byteswap().newbyteorder()
+ i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'],
+ [['readonly'], ['readwrite', 'nbo']],
+ op_axes=[[0, 1], [0, -1]])
+ # Reduction shape/strides for the output
+ with i:
+ assert_equal(i[1].shape, (3,))
+ assert_equal(i[1].strides, (0,))
+ # Do the reduction
+ for x, y in i:
+ # Use a for loop instead of ``y[...] += x``
+ # (equivalent to ``y[...] = y[...].copy() + x``),
+ # because y has zero strides we use for the reduction
+ for j in range(len(y)):
+ y[j] += x[j]
+ assert_equal(b, np.sum(a, axis=1))
+
+ # Iterator inner double loop was wrong on this one
+ p = np.arange(2) + 1
+ it = np.nditer([p, None],
+ ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'],
+ [['readonly'], ['readwrite', 'allocate']],
+ op_axes=[[-1, 0], [-1, -1]],
+ itershape=(2, 2))
+ with it:
+ it.operands[1].fill(0)
+ it.reset()
+ assert_equal(it[0], [1, 2, 1, 2])
+
+ # Iterator inner loop should take argument contiguity into account
+ x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0)
+ x[...] = np.arange(x.size).reshape(x.shape)
+ y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4)
+ y_base_copy = y_base.copy()
+ y = y_base[::2,:,None]
+
+ it = np.nditer([y, x],
+ ['buffered', 'external_loop', 'reduce_ok'],
+ [['readwrite'], ['readonly']])
+ with it:
+ for a, b in it:
+ a.fill(2)
+
+ assert_equal(y_base[1::2], y_base_copy[1::2])
+ assert_equal(y_base[::2], 2)
+
+def test_iter_buffering_reduction_reuse_reduce_loops():
+ # There was a bug triggering reuse of the reduce loop inappropriately,
+ # which caused processing to happen in unnecessarily small chunks
+ # and overran the buffer.
+
+ a = np.zeros((2, 7))
+ b = np.zeros((1, 7))
+ it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'],
+ op_flags=[['readonly'], ['readwrite']],
+ buffersize=5)
+
+ with it:
+ bufsizes = [x.shape[0] for x, y in it]
+ assert_equal(bufsizes, [5, 2, 5, 2])
+ assert_equal(sum(bufsizes), a.size)
+
+def test_iter_writemasked_badinput():
+ a = np.zeros((2, 3))
+ b = np.zeros((3,))
+ m = np.array([[True, True, False], [False, True, False]])
+ m2 = np.array([True, True, False])
+ m3 = np.array([0, 1, 1], dtype='u1')
+ mbad1 = np.array([0, 1, 1], dtype='i1')
+ mbad2 = np.array([0, 1, 1], dtype='f4')
+
+ # Need an 'arraymask' if any operand is 'writemasked'
+ assert_raises(ValueError, nditer, [a, m], [],
+ [['readwrite', 'writemasked'], ['readonly']])
+
+ # A 'writemasked' operand must not be readonly
+ assert_raises(ValueError, nditer, [a, m], [],
+ [['readonly', 'writemasked'], ['readonly', 'arraymask']])
+
+ # 'writemasked' and 'arraymask' may not be used together
+ assert_raises(ValueError, nditer, [a, m], [],
+ [['readonly'], ['readwrite', 'arraymask', 'writemasked']])
+
+ # 'arraymask' may only be specified once
+ assert_raises(ValueError, nditer, [a, m, m2], [],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask'],
+ ['readonly', 'arraymask']])
+
+ # An 'arraymask' with nothing 'writemasked' also doesn't make sense
+ assert_raises(ValueError, nditer, [a, m], [],
+ [['readwrite'], ['readonly', 'arraymask']])
+
+ # A writemasked reduction requires a similarly smaller mask
+ assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'],
+ [['readonly'],
+ ['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']])
+ # But this should work with a smaller/equal mask to the reduction operand
+ np.nditer([a, b, m2], ['reduce_ok'],
+ [['readonly'],
+ ['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']])
+ # The arraymask itself cannot be a reduction
+ assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'],
+ [['readonly'],
+ ['readwrite', 'writemasked'],
+ ['readwrite', 'arraymask']])
+
+ # A uint8 mask is ok too
+ np.nditer([a, m3], ['buffered'],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']],
+ op_dtypes=['f4', None],
+ casting='same_kind')
+ # An int8 mask isn't ok
+ assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']],
+ op_dtypes=['f4', None],
+ casting='same_kind')
+ # A float32 mask isn't ok
+ assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']],
+ op_dtypes=['f4', None],
+ casting='same_kind')
+
+
+def _is_buffered(iterator):
+ try:
+ iterator.itviews
+ except ValueError:
+ return True
+ return False
+
+@pytest.mark.parametrize("a",
+ [np.zeros((3,), dtype='f8'),
+ np.zeros((9876, 3*5), dtype='f8')[::2, :],
+ np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :],
+ # Also test with the last dimension strided (so it does not fit if
+ # there is repeated access)
+ np.zeros((9,), dtype='f8')[::3],
+ np.zeros((9876, 3*10), dtype='f8')[::2, ::5],
+ np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]])
+def test_iter_writemasked(a):
+ # Note, the slicing above is to ensure that nditer cannot combine multiple
+ # axes into one. The repetition is just to make things a bit more
+ # interesting.
+ shape = a.shape
+ reps = shape[-1] // 3
+ msk = np.empty(shape, dtype=bool)
+ msk[...] = [True, True, False] * reps
+
+ # When buffering is unused, 'writemasked' effectively does nothing.
+ # It's up to the user of the iterator to obey the requested semantics.
+ it = np.nditer([a, msk], [],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']])
+ with it:
+ for x, m in it:
+ x[...] = 1
+ # Because we violated the semantics, all the values became 1
+ assert_equal(a, np.broadcast_to([1, 1, 1] * reps, shape))
+
+ # Even if buffering is enabled, we still may be accessing the array
+ # directly.
+ it = np.nditer([a, msk], ['buffered'],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']])
+ # @seberg: I honestly don't currently understand why a "buffered" iterator
+ # would end up not using a buffer for the small array here at least when
+ # "writemasked" is used, that seems confusing... Check by testing for
+ # actual memory overlap!
+ is_buffered = True
+ with it:
+ for x, m in it:
+ x[...] = 2.5
+ if np.may_share_memory(x, a):
+ is_buffered = False
+
+ if not is_buffered:
+ # Because we violated the semantics, all the values became 2.5
+ assert_equal(a, np.broadcast_to([2.5, 2.5, 2.5] * reps, shape))
+ else:
+ # For large sizes, the iterator may be buffered:
+ assert_equal(a, np.broadcast_to([2.5, 2.5, 1] * reps, shape))
+ a[...] = 2.5
+
+ # If buffering will definitely happening, for instance because of
+ # a cast, only the items selected by the mask will be copied back from
+ # the buffer.
+ it = np.nditer([a, msk], ['buffered'],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']],
+ op_dtypes=['i8', None],
+ casting='unsafe')
+ with it:
+ for x, m in it:
+ x[...] = 3
+ # Even though we violated the semantics, only the selected values
+ # were copied back
+ assert_equal(a, np.broadcast_to([3, 3, 2.5] * reps, shape))
+
+
+@pytest.mark.parametrize(["mask", "mask_axes"], [
+ # Allocated operand (only broadcasts with -1)
+ (None, [-1, 0]),
+ # Reduction along the first dimension (with and without op_axes)
+ (np.zeros((1, 4), dtype="bool"), [0, 1]),
+ (np.zeros((1, 4), dtype="bool"), None),
+ # Test 0-D and -1 op_axes
+ (np.zeros(4, dtype="bool"), [-1, 0]),
+ (np.zeros((), dtype="bool"), [-1, -1]),
+ (np.zeros((), dtype="bool"), None)])
+def test_iter_writemasked_broadcast_error(mask, mask_axes):
+ # This assumes that a readwrite mask makes sense. This is likely not the
+ # case and should simply be deprecated.
+ arr = np.zeros((3, 4))
+ itflags = ["reduce_ok"]
+ mask_flags = ["arraymask", "readwrite", "allocate"]
+ a_flags = ["writeonly", "writemasked"]
+ if mask_axes is None:
+ op_axes = None
+ else:
+ op_axes = [mask_axes, [0, 1]]
+
+ with assert_raises(ValueError):
+ np.nditer((mask, arr), flags=itflags, op_flags=[mask_flags, a_flags],
+ op_axes=op_axes)
+
+
+def test_iter_writemasked_decref():
+ # force casting (to make it interesting) by using a structured dtype.
+ arr = np.arange(10000).astype(">i,O")
+ original = arr.copy()
+ mask = np.random.randint(0, 2, size=10000).astype(bool)
+
+ it = np.nditer([arr, mask], ['buffered', "refs_ok"],
+ [['readwrite', 'writemasked'],
+ ['readonly', 'arraymask']],
+ op_dtypes=["<i,O", "?"])
+ singleton = object()
+ if HAS_REFCOUNT:
+ count = sys.getrefcount(singleton)
+ for buf, mask_buf in it:
+ buf[...] = (3, singleton)
+
+ del buf, mask_buf, it # delete everything to ensure correct cleanup
+
+ if HAS_REFCOUNT:
+ # The buffer would have included additional items, they must be
+ # cleared correctly:
+ assert sys.getrefcount(singleton) - count == np.count_nonzero(mask)
+
+ assert_array_equal(arr[~mask], original[~mask])
+ assert (arr[mask] == np.array((3, singleton), arr.dtype)).all()
+ del arr
+
+ if HAS_REFCOUNT:
+ assert sys.getrefcount(singleton) == count
+
+
+def test_iter_non_writable_attribute_deletion():
+ it = np.nditer(np.ones(2))
+ attr = ["value", "shape", "operands", "itviews", "has_delayed_bufalloc",
+ "iterationneedsapi", "has_multi_index", "has_index", "dtypes",
+ "ndim", "nop", "itersize", "finished"]
+
+ for s in attr:
+ assert_raises(AttributeError, delattr, it, s)
+
+
+def test_iter_writable_attribute_deletion():
+ it = np.nditer(np.ones(2))
+ attr = [ "multi_index", "index", "iterrange", "iterindex"]
+ for s in attr:
+ assert_raises(AttributeError, delattr, it, s)
+
+
+def test_iter_element_deletion():
+ it = np.nditer(np.ones(3))
+ try:
+ del it[1]
+ del it[1:2]
+ except TypeError:
+ pass
+ except Exception:
+ raise AssertionError
+
+def test_iter_allocated_array_dtypes():
+ # If the dtype of an allocated output has a shape, the shape gets
+ # tacked onto the end of the result.
+ it = np.nditer(([1, 3, 20], None), op_dtypes=[None, ('i4', (2,))])
+ for a, b in it:
+ b[0] = a - 1
+ b[1] = a + 1
+ assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]])
+
+ # Check the same (less sensitive) thing when `op_axes` with -1 is given.
+ it = np.nditer(([[1, 3, 20]], None), op_dtypes=[None, ('i4', (2,))],
+ flags=["reduce_ok"], op_axes=[None, (-1, 0)])
+ for a, b in it:
+ b[0] = a - 1
+ b[1] = a + 1
+ assert_equal(it.operands[1], [[0, 2], [2, 4], [19, 21]])
+
+ # Make sure this works for scalars too
+ it = np.nditer((10, 2, None), op_dtypes=[None, None, ('i4', (2, 2))])
+ for a, b, c in it:
+ c[0, 0] = a - b
+ c[0, 1] = a + b
+ c[1, 0] = a * b
+ c[1, 1] = a / b
+ assert_equal(it.operands[2], [[8, 12], [20, 5]])
+
+
+def test_0d_iter():
+ # Basic test for iteration of 0-d arrays:
+ i = nditer([2, 3], ['multi_index'], [['readonly']]*2)
+ assert_equal(i.ndim, 0)
+ assert_equal(next(i), (2, 3))
+ assert_equal(i.multi_index, ())
+ assert_equal(i.iterindex, 0)
+ assert_raises(StopIteration, next, i)
+ # test reset:
+ i.reset()
+ assert_equal(next(i), (2, 3))
+ assert_raises(StopIteration, next, i)
+
+ # test forcing to 0-d
+ i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()])
+ assert_equal(i.ndim, 0)
+ assert_equal(len(i), 1)
+
+ i = nditer(np.arange(5), ['multi_index'], [['readonly']],
+ op_axes=[()], itershape=())
+ assert_equal(i.ndim, 0)
+ assert_equal(len(i), 1)
+
+ # passing an itershape alone is not enough, the op_axes are also needed
+ with assert_raises(ValueError):
+ nditer(np.arange(5), ['multi_index'], [['readonly']], itershape=())
+
+ # Test a more complex buffered casting case (same as another test above)
+ sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')]
+ a = np.array(0.5, dtype='f4')
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe', op_dtypes=sdt)
+ vals = next(i)
+ assert_equal(vals['a'], 0.5)
+ assert_equal(vals['b'], 0)
+ assert_equal(vals['c'], [[(0.5)]*3]*2)
+ assert_equal(vals['d'], 0.5)
+
+def test_object_iter_cleanup():
+ # see gh-18450
+ # object arrays can raise a python exception in ufunc inner loops using
+ # nditer, which should cause iteration to stop & cleanup. There were bugs
+ # in the nditer cleanup when decref'ing object arrays.
+ # This test would trigger valgrind "uninitialized read" before the bugfix.
+ assert_raises(TypeError, lambda: np.zeros((17000, 2), dtype='f4') * None)
+
+ # this more explicit code also triggers the invalid access
+ arr = np.arange(np.BUFSIZE * 10).reshape(10, -1).astype(str)
+ oarr = arr.astype(object)
+ oarr[:, -1] = None
+ assert_raises(TypeError, lambda: np.add(oarr[:, ::-1], arr[:, ::-1]))
+
+ # followup: this tests for a bug introduced in the first pass of gh-18450,
+ # caused by an incorrect fallthrough of the TypeError
+ class T:
+ def __bool__(self):
+ raise TypeError("Ambiguous")
+ assert_raises(TypeError, np.logical_or.reduce,
+ np.array([T(), T()], dtype='O'))
+
+def test_object_iter_cleanup_reduce():
+ # Similar as above, but a complex reduction case that was previously
+ # missed (see gh-18810).
+ # The following array is special in that it cannot be flattened:
+ arr = np.array([[None, 1], [-1, -1], [None, 2], [-1, -1]])[::2]
+ with pytest.raises(TypeError):
+ np.sum(arr)
+
+@pytest.mark.parametrize("arr", [
+ np.ones((8000, 4, 2), dtype=object)[:, ::2, :],
+ np.ones((8000, 4, 2), dtype=object, order="F")[:, ::2, :],
+ np.ones((8000, 4, 2), dtype=object)[:, ::2, :].copy("F")])
+def test_object_iter_cleanup_large_reduce(arr):
+ # More complicated calls are possible for large arrays:
+ out = np.ones(8000, dtype=np.intp)
+ # force casting with `dtype=object`
+ res = np.sum(arr, axis=(1, 2), dtype=object, out=out)
+ assert_array_equal(res, np.full(8000, 4, dtype=object))
+
+def test_iter_too_large():
+ # The total size of the iterator must not exceed the maximum intp due
+ # to broadcasting. Dividing by 1024 will keep it small enough to
+ # give a legal array.
+ size = np.iinfo(np.intp).max // 1024
+ arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,))
+ assert_raises(ValueError, nditer, (arr, arr[:, None]))
+ # test the same for multiindex. That may get more interesting when
+ # removing 0 dimensional axis is allowed (since an iterator can grow then)
+ assert_raises(ValueError, nditer,
+ (arr, arr[:, None]), flags=['multi_index'])
+
+
+def test_iter_too_large_with_multiindex():
+ # When a multi index is being tracked, the error is delayed this
+ # checks the delayed error messages and getting below that by
+ # removing an axis.
+ base_size = 2**10
+ num = 1
+ while base_size**num < np.iinfo(np.intp).max:
+ num += 1
+
+ shape_template = [1, 1] * num
+ arrays = []
+ for i in range(num):
+ shape = shape_template[:]
+ shape[i * 2] = 2**10
+ arrays.append(np.empty(shape))
+ arrays = tuple(arrays)
+
+ # arrays are now too large to be broadcast. The different modes test
+ # different nditer functionality with or without GIL.
+ for mode in range(6):
+ with assert_raises(ValueError):
+ _multiarray_tests.test_nditer_too_large(arrays, -1, mode)
+ # but if we do nothing with the nditer, it can be constructed:
+ _multiarray_tests.test_nditer_too_large(arrays, -1, 7)
+
+ # When an axis is removed, things should work again (half the time):
+ for i in range(num):
+ for mode in range(6):
+ # an axis with size 1024 is removed:
+ _multiarray_tests.test_nditer_too_large(arrays, i*2, mode)
+ # an axis with size 1 is removed:
+ with assert_raises(ValueError):
+ _multiarray_tests.test_nditer_too_large(arrays, i*2 + 1, mode)
+
+def test_writebacks():
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ assert_(a.dtype.byteorder != au.dtype.byteorder)
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ with it:
+ it.operands[0][:] = 100
+ assert_equal(au, 100)
+ # do it again, this time raise an error,
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ try:
+ with it:
+ assert_equal(au.flags.writeable, False)
+ it.operands[0][:] = 0
+ raise ValueError('exit context manager on exception')
+ except:
+ pass
+ assert_equal(au, 0)
+ assert_equal(au.flags.writeable, True)
+ # cannot reuse i outside context manager
+ assert_raises(ValueError, getattr, it, 'operands')
+
+ it = nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ with it:
+ x = it.operands[0]
+ x[:] = 6
+ assert_(x.flags.writebackifcopy)
+ assert_equal(au, 6)
+ assert_(not x.flags.writebackifcopy)
+ x[:] = 123 # x.data still valid
+ assert_equal(au, 6) # but not connected to au
+
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ # reentering works
+ with it:
+ with it:
+ for x in it:
+ x[...] = 123
+
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ # make sure exiting the inner context manager closes the iterator
+ with it:
+ with it:
+ for x in it:
+ x[...] = 123
+ assert_raises(ValueError, getattr, it, 'operands')
+ # do not crash if original data array is decrefed
+ it = nditer(au, [],
+ [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ del au
+ with it:
+ for x in it:
+ x[...] = 123
+ # make sure we cannot reenter the closed iterator
+ enter = it.__enter__
+ assert_raises(RuntimeError, enter)
+
+def test_close_equivalent():
+ ''' using a context amanger and using nditer.close are equivalent
+ '''
+ def add_close(x, y, out=None):
+ addop = np.add
+ it = np.nditer([x, y, out], [],
+ [['readonly'], ['readonly'], ['writeonly','allocate']])
+ for (a, b, c) in it:
+ addop(a, b, out=c)
+ ret = it.operands[2]
+ it.close()
+ return ret
+
+ def add_context(x, y, out=None):
+ addop = np.add
+ it = np.nditer([x, y, out], [],
+ [['readonly'], ['readonly'], ['writeonly','allocate']])
+ with it:
+ for (a, b, c) in it:
+ addop(a, b, out=c)
+ return it.operands[2]
+ z = add_close(range(5), range(5))
+ assert_equal(z, range(0, 10, 2))
+ z = add_context(range(5), range(5))
+ assert_equal(z, range(0, 10, 2))
+
+def test_close_raises():
+ it = np.nditer(np.arange(3))
+ assert_equal (next(it), 0)
+ it.close()
+ assert_raises(StopIteration, next, it)
+ assert_raises(ValueError, getattr, it, 'operands')
+
+def test_close_parameters():
+ it = np.nditer(np.arange(3))
+ assert_raises(TypeError, it.close, 1)
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_warn_noclose():
+ a = np.arange(6, dtype='f4')
+ au = a.byteswap().newbyteorder()
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ it = np.nditer(au, [], [['readwrite', 'updateifcopy']],
+ casting='equiv', op_dtypes=[np.dtype('f4')])
+ del it
+ assert len(sup.log) == 1
+
+
+@pytest.mark.skipif(sys.version_info[:2] == (3, 9) and sys.platform == "win32",
+ reason="Errors with Python 3.9 on Windows")
+@pytest.mark.parametrize(["in_dtype", "buf_dtype"],
+ [("i", "O"), ("O", "i"), # most simple cases
+ ("i,O", "O,O"), # structured partially only copying O
+ ("O,i", "i,O"), # structured casting to and from O
+ ])
+@pytest.mark.parametrize("steps", [1, 2, 3])
+def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps):
+ """
+ Checks for reference counting leaks during cleanup. Using explicit
+ reference counts lead to occasional false positives (at least in parallel
+ test setups). This test now should still test leaks correctly when
+ run e.g. with pytest-valgrind or pytest-leaks
+ """
+ value = 2**30 + 1 # just a random value that Python won't intern
+ arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
+
+ it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
+ flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
+ for step in range(steps):
+ # The iteration finishes in 3 steps, the first two are partial
+ next(it)
+
+ del it # not necessary, but we test the cleanup
+
+ # Repeat the test with `iternext`
+ it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
+ flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
+ for step in range(steps):
+ it.iternext()
+
+ del it # not necessary, but we test the cleanup
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+@pytest.mark.parametrize(["in_dtype", "buf_dtype"],
+ [("O", "i"), # most simple cases
+ ("O,i", "i,O"), # structured casting to and from O
+ ])
+def test_partial_iteration_error(in_dtype, buf_dtype):
+ value = 123 # relies on python cache (leak-check will still find it)
+ arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype)
+ if in_dtype == "O":
+ arr[int(np.BUFSIZE * 1.5)] = None
+ else:
+ arr[int(np.BUFSIZE * 1.5)]["f0"] = None
+
+ count = sys.getrefcount(value)
+
+ it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)],
+ flags=["buffered", "external_loop", "refs_ok"], casting="unsafe")
+ with pytest.raises(TypeError):
+ # pytest.raises seems to have issues with the error originating
+ # in the for loop, so manually unravel:
+ next(it)
+ next(it) # raises TypeError
+
+ # Repeat the test with `iternext` after resetting, the buffers should
+ # already be cleared from any references, so resetting is sufficient.
+ it.reset()
+ with pytest.raises(TypeError):
+ it.iternext()
+ it.iternext()
+
+ assert count == sys.getrefcount(value)
+
+
+def test_debug_print(capfd):
+ """
+ Matches the expected output of a debug print with the actual output.
+ Note that the iterator dump should not be considered stable API,
+ this test is mainly to ensure the print does not crash.
+
+ Currently uses a subprocess to avoid dealing with the C level `printf`s.
+ """
+ # the expected output with all addresses and sizes stripped (they vary
+ # and/or are platform dependent).
+ expected = """
+ ------ BEGIN ITERATOR DUMP ------
+ | Iterator Address:
+ | ItFlags: BUFFER REDUCE REUSE_REDUCE_LOOPS
+ | NDim: 2
+ | NOp: 2
+ | IterSize: 50
+ | IterStart: 0
+ | IterEnd: 50
+ | IterIndex: 0
+ | Iterator SizeOf:
+ | BufferData SizeOf:
+ | AxisData SizeOf:
+ |
+ | Perm: 0 1
+ | DTypes:
+ | DTypes: dtype('float64') dtype('int32')
+ | InitDataPtrs:
+ | BaseOffsets: 0 0
+ | Operands:
+ | Operand DTypes: dtype('int64') dtype('float64')
+ | OpItFlags:
+ | Flags[0]: READ CAST ALIGNED
+ | Flags[1]: READ WRITE CAST ALIGNED REDUCE
+ |
+ | BufferData:
+ | BufferSize: 50
+ | Size: 5
+ | BufIterEnd: 5
+ | REDUCE Pos: 0
+ | REDUCE OuterSize: 10
+ | REDUCE OuterDim: 1
+ | Strides: 8 4
+ | Ptrs:
+ | REDUCE Outer Strides: 40 0
+ | REDUCE Outer Ptrs:
+ | ReadTransferFn:
+ | ReadTransferData:
+ | WriteTransferFn:
+ | WriteTransferData:
+ | Buffers:
+ |
+ | AxisData[0]:
+ | Shape: 5
+ | Index: 0
+ | Strides: 16 8
+ | Ptrs:
+ | AxisData[1]:
+ | Shape: 10
+ | Index: 0
+ | Strides: 80 0
+ | Ptrs:
+ ------- END ITERATOR DUMP -------
+ """.strip().splitlines()
+
+ arr1 = np.arange(100, dtype=np.int64).reshape(10, 10)[:, ::2]
+ arr2 = np.arange(5.)
+ it = np.nditer((arr1, arr2), op_dtypes=["d", "i4"], casting="unsafe",
+ flags=["reduce_ok", "buffered"],
+ op_flags=[["readonly"], ["readwrite"]])
+ it.debug_print()
+ res = capfd.readouterr().out
+ res = res.strip().splitlines()
+
+ assert len(res) == len(expected)
+ for res_line, expected_line in zip(res, expected):
+ # The actual output may have additional pointers listed that are
+ # stripped from the example output:
+ assert res_line.startswith(expected_line.strip())
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_nep50_promotions.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_nep50_promotions.py
new file mode 100644
index 00000000..3c031696
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_nep50_promotions.py
@@ -0,0 +1,182 @@
+"""
+This file adds basic tests to test the NEP 50 style promotion compatibility
+mode. Most of these test are likely to be simply deleted again once NEP 50
+is adopted in the main test suite. A few may be moved elsewhere.
+"""
+
+import operator
+
+import numpy as np
+
+import pytest
+from numpy.testing import IS_WASM
+
+
+@pytest.fixture(scope="module", autouse=True)
+def _weak_promotion_enabled():
+ state = np._get_promotion_state()
+ np._set_promotion_state("weak_and_warn")
+ yield
+ np._set_promotion_state(state)
+
+
+@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for fp errors")
+def test_nep50_examples():
+ with pytest.warns(UserWarning, match="result dtype changed"):
+ res = np.uint8(1) + 2
+ assert res.dtype == np.uint8
+
+ with pytest.warns(UserWarning, match="result dtype changed"):
+ res = np.array([1], np.uint8) + np.int64(1)
+ assert res.dtype == np.int64
+
+ with pytest.warns(UserWarning, match="result dtype changed"):
+ res = np.array([1], np.uint8) + np.array(1, dtype=np.int64)
+ assert res.dtype == np.int64
+
+ with pytest.warns(UserWarning, match="result dtype changed"):
+ # Note: For "weak_and_warn" promotion state the overflow warning is
+ # unfortunately not given (because we use the full array path).
+ with np.errstate(over="raise"):
+ res = np.uint8(100) + 200
+ assert res.dtype == np.uint8
+
+ with pytest.warns(Warning) as recwarn:
+ res = np.float32(1) + 3e100
+
+ # Check that both warnings were given in the one call:
+ warning = str(recwarn.pop(UserWarning).message)
+ assert warning.startswith("result dtype changed")
+ warning = str(recwarn.pop(RuntimeWarning).message)
+ assert warning.startswith("overflow")
+ assert len(recwarn) == 0 # no further warnings
+ assert np.isinf(res)
+ assert res.dtype == np.float32
+
+ # Changes, but we don't warn for it (too noisy)
+ res = np.array([0.1], np.float32) == np.float64(0.1)
+ assert res[0] == False
+
+ # Additional test, since the above silences the warning:
+ with pytest.warns(UserWarning, match="result dtype changed"):
+ res = np.array([0.1], np.float32) + np.float64(0.1)
+ assert res.dtype == np.float64
+
+ with pytest.warns(UserWarning, match="result dtype changed"):
+ res = np.array([1.], np.float32) + np.int64(3)
+ assert res.dtype == np.float64
+
+
+@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+def test_nep50_weak_integers(dtype):
+ # Avoids warning (different code path for scalars)
+ np._set_promotion_state("weak")
+ scalar_type = np.dtype(dtype).type
+
+ maxint = int(np.iinfo(dtype).max)
+
+ with np.errstate(over="warn"):
+ with pytest.warns(RuntimeWarning):
+ res = scalar_type(100) + maxint
+ assert res.dtype == dtype
+
+ # Array operations are not expected to warn, but should give the same
+ # result dtype.
+ res = np.array(100, dtype=dtype) + maxint
+ assert res.dtype == dtype
+
+
+@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+def test_nep50_weak_integers_with_inexact(dtype):
+ # Avoids warning (different code path for scalars)
+ np._set_promotion_state("weak")
+ scalar_type = np.dtype(dtype).type
+
+ too_big_int = int(np.finfo(dtype).max) * 2
+
+ if dtype in "dDG":
+ # These dtypes currently convert to Python float internally, which
+ # raises an OverflowError, while the other dtypes overflow to inf.
+ # NOTE: It may make sense to normalize the behavior!
+ with pytest.raises(OverflowError):
+ scalar_type(1) + too_big_int
+
+ with pytest.raises(OverflowError):
+ np.array(1, dtype=dtype) + too_big_int
+ else:
+ # NumPy uses (or used) `int -> string -> longdouble` for the
+ # conversion. But Python may refuse `str(int)` for huge ints.
+ # In that case, RuntimeWarning would be correct, but conversion
+ # fails earlier (seems to happen on 32bit linux, possibly only debug).
+ if dtype in "gG":
+ try:
+ str(too_big_int)
+ except ValueError:
+ pytest.skip("`huge_int -> string -> longdouble` failed")
+
+ # Otherwise, we overflow to infinity:
+ with pytest.warns(RuntimeWarning):
+ res = scalar_type(1) + too_big_int
+ assert res.dtype == dtype
+ assert res == np.inf
+
+ with pytest.warns(RuntimeWarning):
+ # We force the dtype here, since windows may otherwise pick the
+ # double instead of the longdouble loop. That leads to slightly
+ # different results (conversion of the int fails as above).
+ res = np.add(np.array(1, dtype=dtype), too_big_int, dtype=dtype)
+ assert res.dtype == dtype
+ assert res == np.inf
+
+
+@pytest.mark.parametrize("op", [operator.add, operator.pow, operator.eq])
+def test_weak_promotion_scalar_path(op):
+ # Some additional paths excercising the weak scalars.
+ np._set_promotion_state("weak")
+
+ # Integer path:
+ res = op(np.uint8(3), 5)
+ assert res == op(3, 5)
+ assert res.dtype == np.uint8 or res.dtype == bool
+
+ with pytest.raises(OverflowError):
+ op(np.uint8(3), 1000)
+
+ # Float path:
+ res = op(np.float32(3), 5.)
+ assert res == op(3., 5.)
+ assert res.dtype == np.float32 or res.dtype == bool
+
+
+def test_nep50_complex_promotion():
+ np._set_promotion_state("weak")
+
+ with pytest.warns(RuntimeWarning, match=".*overflow"):
+ res = np.complex64(3) + complex(2**300)
+
+ assert type(res) == np.complex64
+
+
+def test_nep50_integer_conversion_errors():
+ # Do not worry about warnings here (auto-fixture will reset).
+ np._set_promotion_state("weak")
+ # Implementation for error paths is mostly missing (as of writing)
+ with pytest.raises(OverflowError, match=".*uint8"):
+ np.array([1], np.uint8) + 300
+
+ with pytest.raises(OverflowError, match=".*uint8"):
+ np.uint8(1) + 300
+
+ # Error message depends on platform (maybe unsigned int or unsigned long)
+ with pytest.raises(OverflowError,
+ match="Python integer -1 out of bounds for uint8"):
+ np.uint8(1) + -1
+
+
+def test_nep50_integer_regression():
+ # Test the old integer promotion rules. When the integer is too large,
+ # we need to keep using the old-style promotion.
+ np._set_promotion_state("legacy")
+ arr = np.array(1)
+ assert (arr + 2**63).dtype == np.float64
+ assert (arr[()] + 2**63).dtype == np.float64
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_numeric.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_numeric.py
new file mode 100644
index 00000000..3cc168b3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_numeric.py
@@ -0,0 +1,3608 @@
+import sys
+import warnings
+import itertools
+import platform
+import pytest
+import math
+from decimal import Decimal
+
+import numpy as np
+from numpy.core import umath
+from numpy.random import rand, randint, randn
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_raises_regex,
+ assert_array_equal, assert_almost_equal, assert_array_almost_equal,
+ assert_warns, assert_array_max_ulp, HAS_REFCOUNT, IS_WASM
+ )
+from numpy.core._rational_tests import rational
+
+from hypothesis import given, strategies as st
+from hypothesis.extra import numpy as hynp
+
+
+class TestResize:
+ def test_copies(self):
+ A = np.array([[1, 2], [3, 4]])
+ Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
+ assert_equal(np.resize(A, (2, 4)), Ar1)
+
+ Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
+ assert_equal(np.resize(A, (4, 2)), Ar2)
+
+ Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
+ assert_equal(np.resize(A, (4, 3)), Ar3)
+
+ def test_repeats(self):
+ A = np.array([1, 2, 3])
+ Ar1 = np.array([[1, 2, 3, 1], [2, 3, 1, 2]])
+ assert_equal(np.resize(A, (2, 4)), Ar1)
+
+ Ar2 = np.array([[1, 2], [3, 1], [2, 3], [1, 2]])
+ assert_equal(np.resize(A, (4, 2)), Ar2)
+
+ Ar3 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])
+ assert_equal(np.resize(A, (4, 3)), Ar3)
+
+ def test_zeroresize(self):
+ A = np.array([[1, 2], [3, 4]])
+ Ar = np.resize(A, (0,))
+ assert_array_equal(Ar, np.array([]))
+ assert_equal(A.dtype, Ar.dtype)
+
+ Ar = np.resize(A, (0, 2))
+ assert_equal(Ar.shape, (0, 2))
+
+ Ar = np.resize(A, (2, 0))
+ assert_equal(Ar.shape, (2, 0))
+
+ def test_reshape_from_zero(self):
+ # See also gh-6740
+ A = np.zeros(0, dtype=[('a', np.float32)])
+ Ar = np.resize(A, (2, 1))
+ assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
+ assert_equal(A.dtype, Ar.dtype)
+
+ def test_negative_resize(self):
+ A = np.arange(0, 10, dtype=np.float32)
+ new_shape = (-10, -1)
+ with pytest.raises(ValueError, match=r"negative"):
+ np.resize(A, new_shape=new_shape)
+
+ def test_subclass(self):
+ class MyArray(np.ndarray):
+ __array_priority__ = 1.
+
+ my_arr = np.array([1]).view(MyArray)
+ assert type(np.resize(my_arr, 5)) is MyArray
+ assert type(np.resize(my_arr, 0)) is MyArray
+
+ my_arr = np.array([]).view(MyArray)
+ assert type(np.resize(my_arr, 5)) is MyArray
+
+
+class TestNonarrayArgs:
+ # check that non-array arguments to functions wrap them in arrays
+ def test_choose(self):
+ choices = [[0, 1, 2],
+ [3, 4, 5],
+ [5, 6, 7]]
+ tgt = [5, 1, 5]
+ a = [2, 0, 1]
+
+ out = np.choose(a, choices)
+ assert_equal(out, tgt)
+
+ def test_clip(self):
+ arr = [-1, 5, 2, 3, 10, -4, -9]
+ out = np.clip(arr, 2, 7)
+ tgt = [2, 5, 2, 3, 7, 2, 2]
+ assert_equal(out, tgt)
+
+ def test_compress(self):
+ arr = [[0, 1, 2, 3, 4],
+ [5, 6, 7, 8, 9]]
+ tgt = [[5, 6, 7, 8, 9]]
+ out = np.compress([0, 1], arr, axis=0)
+ assert_equal(out, tgt)
+
+ def test_count_nonzero(self):
+ arr = [[0, 1, 7, 0, 0],
+ [3, 0, 0, 2, 19]]
+ tgt = np.array([2, 3])
+ out = np.count_nonzero(arr, axis=1)
+ assert_equal(out, tgt)
+
+ def test_cumproduct(self):
+ A = [[1, 2, 3], [4, 5, 6]]
+ assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
+
+ def test_diagonal(self):
+ a = [[0, 1, 2, 3],
+ [4, 5, 6, 7],
+ [8, 9, 10, 11]]
+ out = np.diagonal(a)
+ tgt = [0, 5, 10]
+
+ assert_equal(out, tgt)
+
+ def test_mean(self):
+ A = [[1, 2, 3], [4, 5, 6]]
+ assert_(np.mean(A) == 3.5)
+ assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
+ assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_(np.isnan(np.mean([])))
+ assert_(w[0].category is RuntimeWarning)
+
+ def test_ptp(self):
+ a = [3, 4, 5, 10, -3, -5, 6.0]
+ assert_equal(np.ptp(a, axis=0), 15.0)
+
+ def test_prod(self):
+ arr = [[1, 2, 3, 4],
+ [5, 6, 7, 9],
+ [10, 3, 4, 5]]
+ tgt = [24, 1890, 600]
+
+ assert_equal(np.prod(arr, axis=-1), tgt)
+
+ def test_ravel(self):
+ a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
+ tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
+ assert_equal(np.ravel(a), tgt)
+
+ def test_repeat(self):
+ a = [1, 2, 3]
+ tgt = [1, 1, 2, 2, 3, 3]
+
+ out = np.repeat(a, 2)
+ assert_equal(out, tgt)
+
+ def test_reshape(self):
+ arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
+ tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
+ assert_equal(np.reshape(arr, (2, 6)), tgt)
+
+ def test_round(self):
+ arr = [1.56, 72.54, 6.35, 3.25]
+ tgt = [1.6, 72.5, 6.4, 3.2]
+ assert_equal(np.around(arr, decimals=1), tgt)
+ s = np.float64(1.)
+ assert_(isinstance(s.round(), np.float64))
+ assert_equal(s.round(), 1.)
+
+ @pytest.mark.parametrize('dtype', [
+ np.int8, np.int16, np.int32, np.int64,
+ np.uint8, np.uint16, np.uint32, np.uint64,
+ np.float16, np.float32, np.float64,
+ ])
+ def test_dunder_round(self, dtype):
+ s = dtype(1)
+ assert_(isinstance(round(s), int))
+ assert_(isinstance(round(s, None), int))
+ assert_(isinstance(round(s, ndigits=None), int))
+ assert_equal(round(s), 1)
+ assert_equal(round(s, None), 1)
+ assert_equal(round(s, ndigits=None), 1)
+
+ @pytest.mark.parametrize('val, ndigits', [
+ pytest.param(2**31 - 1, -1,
+ marks=pytest.mark.xfail(reason="Out of range of int32")
+ ),
+ (2**31 - 1, 1-math.ceil(math.log10(2**31 - 1))),
+ (2**31 - 1, -math.ceil(math.log10(2**31 - 1)))
+ ])
+ def test_dunder_round_edgecases(self, val, ndigits):
+ assert_equal(round(val, ndigits), round(np.int32(val), ndigits))
+
+ def test_dunder_round_accuracy(self):
+ f = np.float64(5.1 * 10**73)
+ assert_(isinstance(round(f, -73), np.float64))
+ assert_array_max_ulp(round(f, -73), 5.0 * 10**73)
+ assert_(isinstance(round(f, ndigits=-73), np.float64))
+ assert_array_max_ulp(round(f, ndigits=-73), 5.0 * 10**73)
+
+ i = np.int64(501)
+ assert_(isinstance(round(i, -2), np.int64))
+ assert_array_max_ulp(round(i, -2), 500)
+ assert_(isinstance(round(i, ndigits=-2), np.int64))
+ assert_array_max_ulp(round(i, ndigits=-2), 500)
+
+ @pytest.mark.xfail(raises=AssertionError, reason="gh-15896")
+ def test_round_py_consistency(self):
+ f = 5.1 * 10**73
+ assert_equal(round(np.float64(f), -73), round(f, -73))
+
+ def test_searchsorted(self):
+ arr = [-8, -5, -1, 3, 6, 10]
+ out = np.searchsorted(arr, 0)
+ assert_equal(out, 3)
+
+ def test_size(self):
+ A = [[1, 2, 3], [4, 5, 6]]
+ assert_(np.size(A) == 6)
+ assert_(np.size(A, 0) == 2)
+ assert_(np.size(A, 1) == 3)
+
+ def test_squeeze(self):
+ A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
+ assert_equal(np.squeeze(A).shape, (3, 3))
+ assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
+ assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
+ assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
+ assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
+ assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
+ assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
+ assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
+ assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
+
+ def test_std(self):
+ A = [[1, 2, 3], [4, 5, 6]]
+ assert_almost_equal(np.std(A), 1.707825127659933)
+ assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
+ assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_(np.isnan(np.std([])))
+ assert_(w[0].category is RuntimeWarning)
+
+ def test_swapaxes(self):
+ tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
+ a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
+ out = np.swapaxes(a, 0, 2)
+ assert_equal(out, tgt)
+
+ def test_sum(self):
+ m = [[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]]
+ tgt = [[6], [15], [24]]
+ out = np.sum(m, axis=1, keepdims=True)
+
+ assert_equal(tgt, out)
+
+ def test_take(self):
+ tgt = [2, 3, 5]
+ indices = [1, 2, 4]
+ a = [1, 2, 3, 4, 5]
+
+ out = np.take(a, indices)
+ assert_equal(out, tgt)
+
+ def test_trace(self):
+ c = [[1, 2], [3, 4], [5, 6]]
+ assert_equal(np.trace(c), 5)
+
+ def test_transpose(self):
+ arr = [[1, 2], [3, 4], [5, 6]]
+ tgt = [[1, 3, 5], [2, 4, 6]]
+ assert_equal(np.transpose(arr, (1, 0)), tgt)
+
+ def test_var(self):
+ A = [[1, 2, 3], [4, 5, 6]]
+ assert_almost_equal(np.var(A), 2.9166666666666665)
+ assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
+ assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_(np.isnan(np.var([])))
+ assert_(w[0].category is RuntimeWarning)
+
+ B = np.array([None, 0])
+ B[0] = 1j
+ assert_almost_equal(np.var(B), 0.25)
+
+
+class TestIsscalar:
+ def test_isscalar(self):
+ assert_(np.isscalar(3.1))
+ assert_(np.isscalar(np.int16(12345)))
+ assert_(np.isscalar(False))
+ assert_(np.isscalar('numpy'))
+ assert_(not np.isscalar([3.1]))
+ assert_(not np.isscalar(None))
+
+ # PEP 3141
+ from fractions import Fraction
+ assert_(np.isscalar(Fraction(5, 17)))
+ from numbers import Number
+ assert_(np.isscalar(Number()))
+
+
+class TestBoolScalar:
+ def test_logical(self):
+ f = np.False_
+ t = np.True_
+ s = "xyz"
+ assert_((t and s) is s)
+ assert_((f and s) is f)
+
+ def test_bitwise_or(self):
+ f = np.False_
+ t = np.True_
+ assert_((t | t) is t)
+ assert_((f | t) is t)
+ assert_((t | f) is t)
+ assert_((f | f) is f)
+
+ def test_bitwise_and(self):
+ f = np.False_
+ t = np.True_
+ assert_((t & t) is t)
+ assert_((f & t) is f)
+ assert_((t & f) is f)
+ assert_((f & f) is f)
+
+ def test_bitwise_xor(self):
+ f = np.False_
+ t = np.True_
+ assert_((t ^ t) is f)
+ assert_((f ^ t) is t)
+ assert_((t ^ f) is t)
+ assert_((f ^ f) is f)
+
+
+class TestBoolArray:
+ def setup_method(self):
+ # offset for simd tests
+ self.t = np.array([True] * 41, dtype=bool)[1::]
+ self.f = np.array([False] * 41, dtype=bool)[1::]
+ self.o = np.array([False] * 42, dtype=bool)[2::]
+ self.nm = self.f.copy()
+ self.im = self.t.copy()
+ self.nm[3] = True
+ self.nm[-2] = True
+ self.im[3] = False
+ self.im[-2] = False
+
+ def test_all_any(self):
+ assert_(self.t.all())
+ assert_(self.t.any())
+ assert_(not self.f.all())
+ assert_(not self.f.any())
+ assert_(self.nm.any())
+ assert_(self.im.any())
+ assert_(not self.nm.all())
+ assert_(not self.im.all())
+ # check bad element in all positions
+ for i in range(256 - 7):
+ d = np.array([False] * 256, dtype=bool)[7::]
+ d[i] = True
+ assert_(np.any(d))
+ e = np.array([True] * 256, dtype=bool)[7::]
+ e[i] = False
+ assert_(not np.all(e))
+ assert_array_equal(e, ~d)
+ # big array test for blocked libc loops
+ for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
+ d = np.array([False] * 100043, dtype=bool)
+ d[i] = True
+ assert_(np.any(d), msg="%r" % i)
+ e = np.array([True] * 100043, dtype=bool)
+ e[i] = False
+ assert_(not np.all(e), msg="%r" % i)
+
+ def test_logical_not_abs(self):
+ assert_array_equal(~self.t, self.f)
+ assert_array_equal(np.abs(~self.t), self.f)
+ assert_array_equal(np.abs(~self.f), self.t)
+ assert_array_equal(np.abs(self.f), self.f)
+ assert_array_equal(~np.abs(self.f), self.t)
+ assert_array_equal(~np.abs(self.t), self.f)
+ assert_array_equal(np.abs(~self.nm), self.im)
+ np.logical_not(self.t, out=self.o)
+ assert_array_equal(self.o, self.f)
+ np.abs(self.t, out=self.o)
+ assert_array_equal(self.o, self.t)
+
+ def test_logical_and_or_xor(self):
+ assert_array_equal(self.t | self.t, self.t)
+ assert_array_equal(self.f | self.f, self.f)
+ assert_array_equal(self.t | self.f, self.t)
+ assert_array_equal(self.f | self.t, self.t)
+ np.logical_or(self.t, self.t, out=self.o)
+ assert_array_equal(self.o, self.t)
+ assert_array_equal(self.t & self.t, self.t)
+ assert_array_equal(self.f & self.f, self.f)
+ assert_array_equal(self.t & self.f, self.f)
+ assert_array_equal(self.f & self.t, self.f)
+ np.logical_and(self.t, self.t, out=self.o)
+ assert_array_equal(self.o, self.t)
+ assert_array_equal(self.t ^ self.t, self.f)
+ assert_array_equal(self.f ^ self.f, self.f)
+ assert_array_equal(self.t ^ self.f, self.t)
+ assert_array_equal(self.f ^ self.t, self.t)
+ np.logical_xor(self.t, self.t, out=self.o)
+ assert_array_equal(self.o, self.f)
+
+ assert_array_equal(self.nm & self.t, self.nm)
+ assert_array_equal(self.im & self.f, False)
+ assert_array_equal(self.nm & True, self.nm)
+ assert_array_equal(self.im & False, self.f)
+ assert_array_equal(self.nm | self.t, self.t)
+ assert_array_equal(self.im | self.f, self.im)
+ assert_array_equal(self.nm | True, self.t)
+ assert_array_equal(self.im | False, self.im)
+ assert_array_equal(self.nm ^ self.t, self.im)
+ assert_array_equal(self.im ^ self.f, self.im)
+ assert_array_equal(self.nm ^ True, self.im)
+ assert_array_equal(self.im ^ False, self.im)
+
+
+class TestBoolCmp:
+ def setup_method(self):
+ self.f = np.ones(256, dtype=np.float32)
+ self.ef = np.ones(self.f.size, dtype=bool)
+ self.d = np.ones(128, dtype=np.float64)
+ self.ed = np.ones(self.d.size, dtype=bool)
+ # generate values for all permutation of 256bit simd vectors
+ s = 0
+ for i in range(32):
+ self.f[s:s+8] = [i & 2**x for x in range(8)]
+ self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
+ s += 8
+ s = 0
+ for i in range(16):
+ self.d[s:s+4] = [i & 2**x for x in range(4)]
+ self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
+ s += 4
+
+ self.nf = self.f.copy()
+ self.nd = self.d.copy()
+ self.nf[self.ef] = np.nan
+ self.nd[self.ed] = np.nan
+
+ self.inff = self.f.copy()
+ self.infd = self.d.copy()
+ self.inff[::3][self.ef[::3]] = np.inf
+ self.infd[::3][self.ed[::3]] = np.inf
+ self.inff[1::3][self.ef[1::3]] = -np.inf
+ self.infd[1::3][self.ed[1::3]] = -np.inf
+ self.inff[2::3][self.ef[2::3]] = np.nan
+ self.infd[2::3][self.ed[2::3]] = np.nan
+ self.efnonan = self.ef.copy()
+ self.efnonan[2::3] = False
+ self.ednonan = self.ed.copy()
+ self.ednonan[2::3] = False
+
+ self.signf = self.f.copy()
+ self.signd = self.d.copy()
+ self.signf[self.ef] *= -1.
+ self.signd[self.ed] *= -1.
+ self.signf[1::6][self.ef[1::6]] = -np.inf
+ self.signd[1::6][self.ed[1::6]] = -np.inf
+ self.signf[3::6][self.ef[3::6]] = -np.nan
+ self.signd[3::6][self.ed[3::6]] = -np.nan
+ self.signf[4::6][self.ef[4::6]] = -0.
+ self.signd[4::6][self.ed[4::6]] = -0.
+
+ def test_float(self):
+ # offset for alignment test
+ for i in range(4):
+ assert_array_equal(self.f[i:] > 0, self.ef[i:])
+ assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
+ assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
+ assert_array_equal(-self.f[i:] < 0, self.ef[i:])
+ assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
+ r = self.f[i:] != 0
+ assert_array_equal(r, self.ef[i:])
+ r2 = self.f[i:] != np.zeros_like(self.f[i:])
+ r3 = 0 != self.f[i:]
+ assert_array_equal(r, r2)
+ assert_array_equal(r, r3)
+ # check bool == 0x1
+ assert_array_equal(r.view(np.int8), r.astype(np.int8))
+ assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
+ assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
+
+ # isnan on amd64 takes the same code path
+ assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
+ assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
+ assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
+ assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
+ assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
+
+ def test_double(self):
+ # offset for alignment test
+ for i in range(2):
+ assert_array_equal(self.d[i:] > 0, self.ed[i:])
+ assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
+ assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
+ assert_array_equal(-self.d[i:] < 0, self.ed[i:])
+ assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
+ r = self.d[i:] != 0
+ assert_array_equal(r, self.ed[i:])
+ r2 = self.d[i:] != np.zeros_like(self.d[i:])
+ r3 = 0 != self.d[i:]
+ assert_array_equal(r, r2)
+ assert_array_equal(r, r3)
+ # check bool == 0x1
+ assert_array_equal(r.view(np.int8), r.astype(np.int8))
+ assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
+ assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
+
+ # isnan on amd64 takes the same code path
+ assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
+ assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
+ assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
+ assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
+ assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
+
+
+class TestSeterr:
+ def test_default(self):
+ err = np.geterr()
+ assert_equal(err,
+ dict(divide='warn',
+ invalid='warn',
+ over='warn',
+ under='ignore')
+ )
+
+ def test_set(self):
+ with np.errstate():
+ err = np.seterr()
+ old = np.seterr(divide='print')
+ assert_(err == old)
+ new = np.seterr()
+ assert_(new['divide'] == 'print')
+ np.seterr(over='raise')
+ assert_(np.geterr()['over'] == 'raise')
+ assert_(new['divide'] == 'print')
+ np.seterr(**old)
+ assert_(np.geterr() == old)
+
+ @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
+ @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
+ def test_divide_err(self):
+ with np.errstate(divide='raise'):
+ with assert_raises(FloatingPointError):
+ np.array([1.]) / np.array([0.])
+
+ np.seterr(divide='ignore')
+ np.array([1.]) / np.array([0.])
+
+ @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
+ def test_errobj(self):
+ olderrobj = np.geterrobj()
+ self.called = 0
+ try:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ with np.errstate(divide='warn'):
+ np.seterrobj([20000, 1, None])
+ np.array([1.]) / np.array([0.])
+ assert_equal(len(w), 1)
+
+ def log_err(*args):
+ self.called += 1
+ extobj_err = args
+ assert_(len(extobj_err) == 2)
+ assert_("divide" in extobj_err[0])
+
+ with np.errstate(divide='ignore'):
+ np.seterrobj([20000, 3, log_err])
+ np.array([1.]) / np.array([0.])
+ assert_equal(self.called, 1)
+
+ np.seterrobj(olderrobj)
+ with np.errstate(divide='ignore'):
+ np.divide(1., 0., extobj=[20000, 3, log_err])
+ assert_equal(self.called, 2)
+ finally:
+ np.seterrobj(olderrobj)
+ del self.called
+
+ def test_errobj_noerrmask(self):
+ # errmask = 0 has a special code path for the default
+ olderrobj = np.geterrobj()
+ try:
+ # set errobj to something non default
+ np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
+ umath.ERR_DEFAULT + 1, None])
+ # call a ufunc
+ np.isnan(np.array([6]))
+ # same with the default, lots of times to get rid of possible
+ # pre-existing stack in the code
+ for i in range(10000):
+ np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
+ None])
+ np.isnan(np.array([6]))
+ finally:
+ np.seterrobj(olderrobj)
+
+
+class TestFloatExceptions:
+ def assert_raises_fpe(self, fpeerr, flop, x, y):
+ ftype = type(x)
+ try:
+ flop(x, y)
+ assert_(False,
+ "Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
+ except FloatingPointError as exc:
+ assert_(str(exc).find(fpeerr) >= 0,
+ "Type %s raised wrong fpe error '%s'." % (ftype, exc))
+
+ def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
+ # Check that fpe exception is raised.
+ #
+ # Given a floating operation `flop` and two scalar values, check that
+ # the operation raises the floating point exception specified by
+ # `fpeerr`. Tests all variants with 0-d array scalars as well.
+
+ self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
+ self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
+ self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
+ self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
+
+ # Test for all real and complex float types
+ @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
+ @pytest.mark.parametrize("typecode", np.typecodes["AllFloat"])
+ def test_floating_exceptions(self, typecode):
+ # Test basic arithmetic function errors
+ with np.errstate(all='raise'):
+ ftype = np.obj2sctype(typecode)
+ if np.dtype(ftype).kind == 'f':
+ # Get some extreme values for the type
+ fi = np.finfo(ftype)
+ ft_tiny = fi._machar.tiny
+ ft_max = fi.max
+ ft_eps = fi.eps
+ underflow = 'underflow'
+ divbyzero = 'divide by zero'
+ else:
+ # 'c', complex, corresponding real dtype
+ rtype = type(ftype(0).real)
+ fi = np.finfo(rtype)
+ ft_tiny = ftype(fi._machar.tiny)
+ ft_max = ftype(fi.max)
+ ft_eps = ftype(fi.eps)
+ # The complex types raise different exceptions
+ underflow = ''
+ divbyzero = ''
+ overflow = 'overflow'
+ invalid = 'invalid'
+
+ # The value of tiny for double double is NaN, so we need to
+ # pass the assert
+ if not np.isnan(ft_tiny):
+ self.assert_raises_fpe(underflow,
+ lambda a, b: a/b, ft_tiny, ft_max)
+ self.assert_raises_fpe(underflow,
+ lambda a, b: a*b, ft_tiny, ft_tiny)
+ self.assert_raises_fpe(overflow,
+ lambda a, b: a*b, ft_max, ftype(2))
+ self.assert_raises_fpe(overflow,
+ lambda a, b: a/b, ft_max, ftype(0.5))
+ self.assert_raises_fpe(overflow,
+ lambda a, b: a+b, ft_max, ft_max*ft_eps)
+ self.assert_raises_fpe(overflow,
+ lambda a, b: a-b, -ft_max, ft_max*ft_eps)
+ self.assert_raises_fpe(overflow,
+ np.power, ftype(2), ftype(2**fi.nexp))
+ self.assert_raises_fpe(divbyzero,
+ lambda a, b: a/b, ftype(1), ftype(0))
+ self.assert_raises_fpe(
+ invalid, lambda a, b: a/b, ftype(np.inf), ftype(np.inf)
+ )
+ self.assert_raises_fpe(invalid,
+ lambda a, b: a/b, ftype(0), ftype(0))
+ self.assert_raises_fpe(
+ invalid, lambda a, b: a-b, ftype(np.inf), ftype(np.inf)
+ )
+ self.assert_raises_fpe(
+ invalid, lambda a, b: a+b, ftype(np.inf), ftype(-np.inf)
+ )
+ self.assert_raises_fpe(invalid,
+ lambda a, b: a*b, ftype(0), ftype(np.inf))
+
+ @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
+ def test_warnings(self):
+ # test warning code path
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ with np.errstate(all="warn"):
+ np.divide(1, 0.)
+ assert_equal(len(w), 1)
+ assert_("divide by zero" in str(w[0].message))
+ np.array(1e300) * np.array(1e300)
+ assert_equal(len(w), 2)
+ assert_("overflow" in str(w[-1].message))
+ np.array(np.inf) - np.array(np.inf)
+ assert_equal(len(w), 3)
+ assert_("invalid value" in str(w[-1].message))
+ np.array(1e-300) * np.array(1e-300)
+ assert_equal(len(w), 4)
+ assert_("underflow" in str(w[-1].message))
+
+
+class TestTypes:
+ def check_promotion_cases(self, promote_func):
+ # tests that the scalars get coerced correctly.
+ b = np.bool_(0)
+ i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
+ u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
+ f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
+ c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
+
+ # coercion within the same kind
+ assert_equal(promote_func(i8, i16), np.dtype(np.int16))
+ assert_equal(promote_func(i32, i8), np.dtype(np.int32))
+ assert_equal(promote_func(i16, i64), np.dtype(np.int64))
+ assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
+ assert_equal(promote_func(f32, f64), np.dtype(np.float64))
+ assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
+ assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
+ assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
+ assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
+ assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
+
+ # coercion between kinds
+ assert_equal(promote_func(b, i32), np.dtype(np.int32))
+ assert_equal(promote_func(b, u8), np.dtype(np.uint8))
+ assert_equal(promote_func(i8, u8), np.dtype(np.int16))
+ assert_equal(promote_func(u8, i32), np.dtype(np.int32))
+ assert_equal(promote_func(i64, u32), np.dtype(np.int64))
+ assert_equal(promote_func(u64, i32), np.dtype(np.float64))
+ assert_equal(promote_func(i32, f32), np.dtype(np.float64))
+ assert_equal(promote_func(i64, f32), np.dtype(np.float64))
+ assert_equal(promote_func(f32, i16), np.dtype(np.float32))
+ assert_equal(promote_func(f32, u32), np.dtype(np.float64))
+ assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
+ assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
+ assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
+
+ # coercion between scalars and 1-D arrays
+ assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
+ assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
+ assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
+ assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
+ assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
+ assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
+ assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
+ assert_equal(promote_func(np.int32(-1), np.array([u64])),
+ np.dtype(np.float64))
+ assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
+ assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
+ assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
+ assert_equal(promote_func(fld, np.array([c64])),
+ np.dtype(np.complex64))
+ assert_equal(promote_func(c64, np.array([f64])),
+ np.dtype(np.complex128))
+ assert_equal(promote_func(np.complex64(3j), np.array([f64])),
+ np.dtype(np.complex128))
+
+ # coercion between scalars and 1-D arrays, where
+ # the scalar has greater kind than the array
+ assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
+ assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
+ assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
+ assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
+ assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
+
+ # uint and int are treated as the same "kind" for
+ # the purposes of array-scalar promotion.
+ assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
+
+ # float and complex are treated as the same "kind" for
+ # the purposes of array-scalar promotion, so that you can do
+ # (0j + float32array) to get a complex64 array instead of
+ # a complex128 array.
+ assert_equal(promote_func(np.array([f32]), c128),
+ np.dtype(np.complex64))
+
+ def test_coercion(self):
+ def res_type(a, b):
+ return np.add(a, b).dtype
+
+ self.check_promotion_cases(res_type)
+
+ # Use-case: float/complex scalar * bool/int8 array
+ # shouldn't narrow the float/complex type
+ for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
+ b = 1.234 * a
+ assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
+ b = np.longdouble(1.234) * a
+ assert_equal(b.dtype, np.dtype(np.longdouble),
+ "array type %s" % a.dtype)
+ b = np.float64(1.234) * a
+ assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
+ b = np.float32(1.234) * a
+ assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
+ b = np.float16(1.234) * a
+ assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
+
+ b = 1.234j * a
+ assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
+ b = np.clongdouble(1.234j) * a
+ assert_equal(b.dtype, np.dtype(np.clongdouble),
+ "array type %s" % a.dtype)
+ b = np.complex128(1.234j) * a
+ assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
+ b = np.complex64(1.234j) * a
+ assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
+
+ # The following use-case is problematic, and to resolve its
+ # tricky side-effects requires more changes.
+ #
+ # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
+ # a float32, shouldn't promote to float64
+ #
+ # a = np.array([1.0, 1.5], dtype=np.float32)
+ # t = np.array([True, False])
+ # b = t*a
+ # assert_equal(b, [1.0, 0.0])
+ # assert_equal(b.dtype, np.dtype('f4'))
+ # b = (1-t)*a
+ # assert_equal(b, [0.0, 1.5])
+ # assert_equal(b.dtype, np.dtype('f4'))
+ #
+ # Probably ~t (bitwise negation) is more proper to use here,
+ # but this is arguably less intuitive to understand at a glance, and
+ # would fail if 't' is actually an integer array instead of boolean:
+ #
+ # b = (~t)*a
+ # assert_equal(b, [0.0, 1.5])
+ # assert_equal(b.dtype, np.dtype('f4'))
+
+ def test_result_type(self):
+ self.check_promotion_cases(np.result_type)
+ assert_(np.result_type(None) == np.dtype(None))
+
+ def test_promote_types_endian(self):
+ # promote_types should always return native-endian types
+ assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
+ assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
+
+ assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
+ assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
+ assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
+ assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
+
+ assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
+ assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
+ assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8'))
+ assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8'))
+ assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8'))
+ assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8'))
+
+ assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8'))
+ assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8'))
+ assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
+ assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
+
+ def test_can_cast_and_promote_usertypes(self):
+ # The rational type defines safe casting for signed integers,
+ # boolean. Rational itself *does* cast safely to double.
+ # (rational does not actually cast to all signed integers, e.g.
+ # int64 can be both long and longlong and it registers only the first)
+ valid_types = ["int8", "int16", "int32", "int64", "bool"]
+ invalid_types = "BHILQP" + "FDG" + "mM" + "f" + "V"
+
+ rational_dt = np.dtype(rational)
+ for numpy_dtype in valid_types:
+ numpy_dtype = np.dtype(numpy_dtype)
+ assert np.can_cast(numpy_dtype, rational_dt)
+ assert np.promote_types(numpy_dtype, rational_dt) is rational_dt
+
+ for numpy_dtype in invalid_types:
+ numpy_dtype = np.dtype(numpy_dtype)
+ assert not np.can_cast(numpy_dtype, rational_dt)
+ with pytest.raises(TypeError):
+ np.promote_types(numpy_dtype, rational_dt)
+
+ double_dt = np.dtype("double")
+ assert np.can_cast(rational_dt, double_dt)
+ assert np.promote_types(double_dt, rational_dt) is double_dt
+
+ @pytest.mark.parametrize("swap", ["", "swap"])
+ @pytest.mark.parametrize("string_dtype", ["U", "S"])
+ def test_promote_types_strings(self, swap, string_dtype):
+ if swap == "swap":
+ promote_types = lambda a, b: np.promote_types(b, a)
+ else:
+ promote_types = np.promote_types
+
+ S = string_dtype
+
+ # Promote numeric with unsized string:
+ assert_equal(promote_types('bool', S), np.dtype(S+'5'))
+ assert_equal(promote_types('b', S), np.dtype(S+'4'))
+ assert_equal(promote_types('u1', S), np.dtype(S+'3'))
+ assert_equal(promote_types('u2', S), np.dtype(S+'5'))
+ assert_equal(promote_types('u4', S), np.dtype(S+'10'))
+ assert_equal(promote_types('u8', S), np.dtype(S+'20'))
+ assert_equal(promote_types('i1', S), np.dtype(S+'4'))
+ assert_equal(promote_types('i2', S), np.dtype(S+'6'))
+ assert_equal(promote_types('i4', S), np.dtype(S+'11'))
+ assert_equal(promote_types('i8', S), np.dtype(S+'21'))
+ # Promote numeric with sized string:
+ assert_equal(promote_types('bool', S+'1'), np.dtype(S+'5'))
+ assert_equal(promote_types('bool', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('b', S+'1'), np.dtype(S+'4'))
+ assert_equal(promote_types('b', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('u1', S+'1'), np.dtype(S+'3'))
+ assert_equal(promote_types('u1', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('u2', S+'1'), np.dtype(S+'5'))
+ assert_equal(promote_types('u2', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('u4', S+'1'), np.dtype(S+'10'))
+ assert_equal(promote_types('u4', S+'30'), np.dtype(S+'30'))
+ assert_equal(promote_types('u8', S+'1'), np.dtype(S+'20'))
+ assert_equal(promote_types('u8', S+'30'), np.dtype(S+'30'))
+ # Promote with object:
+ assert_equal(promote_types('O', S+'30'), np.dtype('O'))
+
+ @pytest.mark.parametrize(["dtype1", "dtype2"],
+ [[np.dtype("V6"), np.dtype("V10")], # mismatch shape
+ # Mismatching names:
+ [np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])],
+ ])
+ def test_invalid_void_promotion(self, dtype1, dtype2):
+ with pytest.raises(TypeError):
+ np.promote_types(dtype1, dtype2)
+
+ @pytest.mark.parametrize(["dtype1", "dtype2"],
+ [[np.dtype("V10"), np.dtype("V10")],
+ [np.dtype([("name1", "i8")]),
+ np.dtype([("name1", np.dtype("i8").newbyteorder())])],
+ [np.dtype("i8,i8"), np.dtype("i8,>i8")],
+ [np.dtype("i8,i8"), np.dtype("i4,i4")],
+ ])
+ def test_valid_void_promotion(self, dtype1, dtype2):
+ assert np.promote_types(dtype1, dtype2) == dtype1
+
+ @pytest.mark.parametrize("dtype",
+ list(np.typecodes["All"]) +
+ ["i,i", "10i", "S3", "S100", "U3", "U100", rational])
+ def test_promote_identical_types_metadata(self, dtype):
+ # The same type passed in twice to promote types always
+ # preserves metadata
+ metadata = {1: 1}
+ dtype = np.dtype(dtype, metadata=metadata)
+
+ res = np.promote_types(dtype, dtype)
+ assert res.metadata == dtype.metadata
+
+ # byte-swapping preserves and makes the dtype native:
+ dtype = dtype.newbyteorder()
+ if dtype.isnative:
+ # The type does not have byte swapping
+ return
+
+ res = np.promote_types(dtype, dtype)
+
+ # Metadata is (currently) generally lost on byte-swapping (except for
+ # unicode.
+ if dtype.char != "U":
+ assert res.metadata is None
+ else:
+ assert res.metadata == metadata
+ assert res.isnative
+
+ @pytest.mark.slow
+ @pytest.mark.filterwarnings('ignore:Promotion of numbers:FutureWarning')
+ @pytest.mark.parametrize(["dtype1", "dtype2"],
+ itertools.product(
+ list(np.typecodes["All"]) +
+ ["i,i", "S3", "S100", "U3", "U100", rational],
+ repeat=2))
+ def test_promote_types_metadata(self, dtype1, dtype2):
+ """Metadata handling in promotion does not appear formalized
+ right now in NumPy. This test should thus be considered to
+ document behaviour, rather than test the correct definition of it.
+
+ This test is very ugly, it was useful for rewriting part of the
+ promotion, but probably should eventually be replaced/deleted
+ (i.e. when metadata handling in promotion is better defined).
+ """
+ metadata1 = {1: 1}
+ metadata2 = {2: 2}
+ dtype1 = np.dtype(dtype1, metadata=metadata1)
+ dtype2 = np.dtype(dtype2, metadata=metadata2)
+
+ try:
+ res = np.promote_types(dtype1, dtype2)
+ except TypeError:
+ # Promotion failed, this test only checks metadata
+ return
+
+ if res.char not in "USV" or res.names is not None or res.shape != ():
+ # All except string dtypes (and unstructured void) lose metadata
+ # on promotion (unless both dtypes are identical).
+ # At some point structured ones did not, but were restrictive.
+ assert res.metadata is None
+ elif res == dtype1:
+ # If one result is the result, it is usually returned unchanged:
+ assert res is dtype1
+ elif res == dtype2:
+ # dtype1 may have been cast to the same type/kind as dtype2.
+ # If the resulting dtype is identical we currently pick the cast
+ # version of dtype1, which lost the metadata:
+ if np.promote_types(dtype1, dtype2.kind) == dtype2:
+ res.metadata is None
+ else:
+ res.metadata == metadata2
+ else:
+ assert res.metadata is None
+
+ # Try again for byteswapped version
+ dtype1 = dtype1.newbyteorder()
+ assert dtype1.metadata == metadata1
+ res_bs = np.promote_types(dtype1, dtype2)
+ assert res_bs == res
+ assert res_bs.metadata == res.metadata
+
+ def test_can_cast(self):
+ assert_(np.can_cast(np.int32, np.int64))
+ assert_(np.can_cast(np.float64, complex))
+ assert_(not np.can_cast(complex, float))
+
+ assert_(np.can_cast('i8', 'f8'))
+ assert_(not np.can_cast('i8', 'f4'))
+ assert_(np.can_cast('i4', 'S11'))
+
+ assert_(np.can_cast('i8', 'i8', 'no'))
+ assert_(not np.can_cast('<i8', '>i8', 'no'))
+
+ assert_(np.can_cast('<i8', '>i8', 'equiv'))
+ assert_(not np.can_cast('<i4', '>i8', 'equiv'))
+
+ assert_(np.can_cast('<i4', '>i8', 'safe'))
+ assert_(not np.can_cast('<i8', '>i4', 'safe'))
+
+ assert_(np.can_cast('<i8', '>i4', 'same_kind'))
+ assert_(not np.can_cast('<i8', '>u4', 'same_kind'))
+
+ assert_(np.can_cast('<i8', '>u4', 'unsafe'))
+
+ assert_(np.can_cast('bool', 'S5'))
+ assert_(not np.can_cast('bool', 'S4'))
+
+ assert_(np.can_cast('b', 'S4'))
+ assert_(not np.can_cast('b', 'S3'))
+
+ assert_(np.can_cast('u1', 'S3'))
+ assert_(not np.can_cast('u1', 'S2'))
+ assert_(np.can_cast('u2', 'S5'))
+ assert_(not np.can_cast('u2', 'S4'))
+ assert_(np.can_cast('u4', 'S10'))
+ assert_(not np.can_cast('u4', 'S9'))
+ assert_(np.can_cast('u8', 'S20'))
+ assert_(not np.can_cast('u8', 'S19'))
+
+ assert_(np.can_cast('i1', 'S4'))
+ assert_(not np.can_cast('i1', 'S3'))
+ assert_(np.can_cast('i2', 'S6'))
+ assert_(not np.can_cast('i2', 'S5'))
+ assert_(np.can_cast('i4', 'S11'))
+ assert_(not np.can_cast('i4', 'S10'))
+ assert_(np.can_cast('i8', 'S21'))
+ assert_(not np.can_cast('i8', 'S20'))
+
+ assert_(np.can_cast('bool', 'S5'))
+ assert_(not np.can_cast('bool', 'S4'))
+
+ assert_(np.can_cast('b', 'U4'))
+ assert_(not np.can_cast('b', 'U3'))
+
+ assert_(np.can_cast('u1', 'U3'))
+ assert_(not np.can_cast('u1', 'U2'))
+ assert_(np.can_cast('u2', 'U5'))
+ assert_(not np.can_cast('u2', 'U4'))
+ assert_(np.can_cast('u4', 'U10'))
+ assert_(not np.can_cast('u4', 'U9'))
+ assert_(np.can_cast('u8', 'U20'))
+ assert_(not np.can_cast('u8', 'U19'))
+
+ assert_(np.can_cast('i1', 'U4'))
+ assert_(not np.can_cast('i1', 'U3'))
+ assert_(np.can_cast('i2', 'U6'))
+ assert_(not np.can_cast('i2', 'U5'))
+ assert_(np.can_cast('i4', 'U11'))
+ assert_(not np.can_cast('i4', 'U10'))
+ assert_(np.can_cast('i8', 'U21'))
+ assert_(not np.can_cast('i8', 'U20'))
+
+ assert_raises(TypeError, np.can_cast, 'i4', None)
+ assert_raises(TypeError, np.can_cast, None, 'i4')
+
+ # Also test keyword arguments
+ assert_(np.can_cast(from_=np.int32, to=np.int64))
+
+ def test_can_cast_simple_to_structured(self):
+ # Non-structured can only be cast to structured in 'unsafe' mode.
+ assert_(not np.can_cast('i4', 'i4,i4'))
+ assert_(not np.can_cast('i4', 'i4,i2'))
+ assert_(np.can_cast('i4', 'i4,i4', casting='unsafe'))
+ assert_(np.can_cast('i4', 'i4,i2', casting='unsafe'))
+ # Even if there is just a single field which is OK.
+ assert_(not np.can_cast('i2', [('f1', 'i4')]))
+ assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind'))
+ assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe'))
+ # It should be the same for recursive structured or subarrays.
+ assert_(not np.can_cast('i2', [('f1', 'i4,i4')]))
+ assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe'))
+ assert_(not np.can_cast('i2', [('f1', '(2,3)i4')]))
+ assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe'))
+
+ def test_can_cast_structured_to_simple(self):
+ # Need unsafe casting for structured to simple.
+ assert_(not np.can_cast([('f1', 'i4')], 'i4'))
+ assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe'))
+ assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe'))
+ # Since it is unclear what is being cast, multiple fields to
+ # single should not work even for unsafe casting.
+ assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe'))
+ # But a single field inside a single field is OK.
+ assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4'))
+ assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe'))
+ # And a subarray is fine too - it will just take the first element
+ # (arguably not very consistently; might also take the first field).
+ assert_(not np.can_cast([('f0', '(3,)i4')], 'i4'))
+ assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe'))
+ # But a structured subarray with multiple fields should fail.
+ assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4',
+ casting='unsafe'))
+
+ def test_can_cast_values(self):
+ # gh-5917
+ for dt in np.sctypes['int'] + np.sctypes['uint']:
+ ii = np.iinfo(dt)
+ assert_(np.can_cast(ii.min, dt))
+ assert_(np.can_cast(ii.max, dt))
+ assert_(not np.can_cast(ii.min - 1, dt))
+ assert_(not np.can_cast(ii.max + 1, dt))
+
+ for dt in np.sctypes['float']:
+ fi = np.finfo(dt)
+ assert_(np.can_cast(fi.min, dt))
+ assert_(np.can_cast(fi.max, dt))
+
+
+# Custom exception class to test exception propagation in fromiter
+class NIterError(Exception):
+ pass
+
+
+class TestFromiter:
+ def makegen(self):
+ return (x**2 for x in range(24))
+
+ def test_types(self):
+ ai32 = np.fromiter(self.makegen(), np.int32)
+ ai64 = np.fromiter(self.makegen(), np.int64)
+ af = np.fromiter(self.makegen(), float)
+ assert_(ai32.dtype == np.dtype(np.int32))
+ assert_(ai64.dtype == np.dtype(np.int64))
+ assert_(af.dtype == np.dtype(float))
+
+ def test_lengths(self):
+ expected = np.array(list(self.makegen()))
+ a = np.fromiter(self.makegen(), int)
+ a20 = np.fromiter(self.makegen(), int, 20)
+ assert_(len(a) == len(expected))
+ assert_(len(a20) == 20)
+ assert_raises(ValueError, np.fromiter,
+ self.makegen(), int, len(expected) + 10)
+
+ def test_values(self):
+ expected = np.array(list(self.makegen()))
+ a = np.fromiter(self.makegen(), int)
+ a20 = np.fromiter(self.makegen(), int, 20)
+ assert_(np.alltrue(a == expected, axis=0))
+ assert_(np.alltrue(a20 == expected[:20], axis=0))
+
+ def load_data(self, n, eindex):
+ # Utility method for the issue 2592 tests.
+ # Raise an exception at the desired index in the iterator.
+ for e in range(n):
+ if e == eindex:
+ raise NIterError('error at index %s' % eindex)
+ yield e
+
+ @pytest.mark.parametrize("dtype", [int, object])
+ @pytest.mark.parametrize(["count", "error_index"], [(10, 5), (10, 9)])
+ def test_2592(self, count, error_index, dtype):
+ # Test iteration exceptions are correctly raised. The data/generator
+ # has `count` elements but errors at `error_index`
+ iterable = self.load_data(count, error_index)
+ with pytest.raises(NIterError):
+ np.fromiter(iterable, dtype=dtype, count=count)
+
+ @pytest.mark.parametrize("dtype", ["S", "S0", "V0", "U0"])
+ def test_empty_not_structured(self, dtype):
+ # Note, "S0" could be allowed at some point, so long "S" (without
+ # any length) is rejected.
+ with pytest.raises(ValueError, match="Must specify length"):
+ np.fromiter([], dtype=dtype)
+
+ @pytest.mark.parametrize(["dtype", "data"],
+ [("d", [1, 2, 3, 4, 5, 6, 7, 8, 9]),
+ ("O", [1, 2, 3, 4, 5, 6, 7, 8, 9]),
+ ("i,O", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]),
+ # subarray dtypes (important because their dimensions end up
+ # in the result arrays dimension:
+ ("2i", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]),
+ (np.dtype(("O", (2, 3))),
+ [((1, 2, 3), (3, 4, 5)), ((3, 2, 1), (5, 4, 3))])])
+ @pytest.mark.parametrize("length_hint", [0, 1])
+ def test_growth_and_complicated_dtypes(self, dtype, data, length_hint):
+ dtype = np.dtype(dtype)
+
+ data = data * 100 # make sure we realloc a bit
+
+ class MyIter:
+ # Class/example from gh-15789
+ def __length_hint__(self):
+ # only required to be an estimate, this is legal
+ return length_hint # 0 or 1
+
+ def __iter__(self):
+ return iter(data)
+
+ res = np.fromiter(MyIter(), dtype=dtype)
+ expected = np.array(data, dtype=dtype)
+
+ assert_array_equal(res, expected)
+
+ def test_empty_result(self):
+ class MyIter:
+ def __length_hint__(self):
+ return 10
+
+ def __iter__(self):
+ return iter([]) # actual iterator is empty.
+
+ res = np.fromiter(MyIter(), dtype="d")
+ assert res.shape == (0,)
+ assert res.dtype == "d"
+
+ def test_too_few_items(self):
+ msg = "iterator too short: Expected 10 but iterator had only 3 items."
+ with pytest.raises(ValueError, match=msg):
+ np.fromiter([1, 2, 3], count=10, dtype=int)
+
+ def test_failed_itemsetting(self):
+ with pytest.raises(TypeError):
+ np.fromiter([1, None, 3], dtype=int)
+
+ # The following manages to hit somewhat trickier code paths:
+ iterable = ((2, 3, 4) for i in range(5))
+ with pytest.raises(ValueError):
+ np.fromiter(iterable, dtype=np.dtype((int, 2)))
+
+class TestNonzero:
+ def test_nonzero_trivial(self):
+ assert_equal(np.count_nonzero(np.array([])), 0)
+ assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
+ assert_equal(np.nonzero(np.array([])), ([],))
+
+ assert_equal(np.count_nonzero(np.array([0])), 0)
+ assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0)
+ assert_equal(np.nonzero(np.array([0])), ([],))
+
+ assert_equal(np.count_nonzero(np.array([1])), 1)
+ assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1)
+ assert_equal(np.nonzero(np.array([1])), ([0],))
+
+ def test_nonzero_zerod(self):
+ assert_equal(np.count_nonzero(np.array(0)), 0)
+ assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0)
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.nonzero(np.array(0)), ([],))
+
+ assert_equal(np.count_nonzero(np.array(1)), 1)
+ assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1)
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.nonzero(np.array(1)), ([0],))
+
+ def test_nonzero_onedim(self):
+ x = np.array([1, 0, 2, -1, 0, 0, 8])
+ assert_equal(np.count_nonzero(x), 4)
+ assert_equal(np.count_nonzero(x), 4)
+ assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
+
+ # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)],
+ # dtype=[('a', 'i4'), ('b', 'i2')])
+ x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)],
+ dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')])
+ assert_equal(np.count_nonzero(x['a']), 3)
+ assert_equal(np.count_nonzero(x['b']), 4)
+ assert_equal(np.count_nonzero(x['c']), 3)
+ assert_equal(np.count_nonzero(x['d']), 4)
+ assert_equal(np.nonzero(x['a']), ([0, 2, 3],))
+ assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],))
+
+ def test_nonzero_twodim(self):
+ x = np.array([[0, 1, 0], [2, 0, 3]])
+ assert_equal(np.count_nonzero(x.astype('i1')), 3)
+ assert_equal(np.count_nonzero(x.astype('i2')), 3)
+ assert_equal(np.count_nonzero(x.astype('i4')), 3)
+ assert_equal(np.count_nonzero(x.astype('i8')), 3)
+ assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
+
+ x = np.eye(3)
+ assert_equal(np.count_nonzero(x.astype('i1')), 3)
+ assert_equal(np.count_nonzero(x.astype('i2')), 3)
+ assert_equal(np.count_nonzero(x.astype('i4')), 3)
+ assert_equal(np.count_nonzero(x.astype('i8')), 3)
+ assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
+
+ x = np.array([[(0, 1), (0, 0), (1, 11)],
+ [(1, 1), (1, 0), (0, 0)],
+ [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])
+ assert_equal(np.count_nonzero(x['a']), 4)
+ assert_equal(np.count_nonzero(x['b']), 5)
+ assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))
+ assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))
+
+ assert_(not x['a'].T.flags.aligned)
+ assert_equal(np.count_nonzero(x['a'].T), 4)
+ assert_equal(np.count_nonzero(x['b'].T), 5)
+ assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))
+ assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
+
+ def test_sparse(self):
+ # test special sparse condition boolean code path
+ for i in range(20):
+ c = np.zeros(200, dtype=bool)
+ c[i::20] = True
+ assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
+
+ c = np.zeros(400, dtype=bool)
+ c[10 + i:20 + i] = True
+ c[20 + i*2] = True
+ assert_equal(np.nonzero(c)[0],
+ np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2])))
+
+ def test_return_type(self):
+ class C(np.ndarray):
+ pass
+
+ for view in (C, np.ndarray):
+ for nd in range(1, 4):
+ shape = tuple(range(2, 2+nd))
+ x = np.arange(np.prod(shape)).reshape(shape).view(view)
+ for nzx in (np.nonzero(x), x.nonzero()):
+ for nzx_i in nzx:
+ assert_(type(nzx_i) is np.ndarray)
+ assert_(nzx_i.flags.writeable)
+
+ def test_count_nonzero_axis(self):
+ # Basic check of functionality
+ m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
+
+ expected = np.array([1, 1, 1, 1, 1])
+ assert_equal(np.count_nonzero(m, axis=0), expected)
+
+ expected = np.array([2, 3])
+ assert_equal(np.count_nonzero(m, axis=1), expected)
+
+ assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1))
+ assert_raises(TypeError, np.count_nonzero, m, axis='foo')
+ assert_raises(np.AxisError, np.count_nonzero, m, axis=3)
+ assert_raises(TypeError, np.count_nonzero,
+ m, axis=np.array([[1], [2]]))
+
+ def test_count_nonzero_axis_all_dtypes(self):
+ # More thorough test that the axis argument is respected
+ # for all dtypes and responds correctly when presented with
+ # either integer or tuple arguments for axis
+ msg = "Mismatch for dtype: %s"
+
+ def assert_equal_w_dt(a, b, err_msg):
+ assert_equal(a.dtype, b.dtype, err_msg=err_msg)
+ assert_equal(a, b, err_msg=err_msg)
+
+ for dt in np.typecodes['All']:
+ err_msg = msg % (np.dtype(dt).name,)
+
+ if dt != 'V':
+ if dt != 'M':
+ m = np.zeros((3, 3), dtype=dt)
+ n = np.ones(1, dtype=dt)
+
+ m[0, 0] = n[0]
+ m[1, 0] = n[0]
+
+ else: # np.zeros doesn't work for np.datetime64
+ m = np.array(['1970-01-01'] * 9)
+ m = m.reshape((3, 3))
+
+ m[0, 0] = '1970-01-12'
+ m[1, 0] = '1970-01-12'
+ m = m.astype(dt)
+
+ expected = np.array([2, 0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=0),
+ expected, err_msg=err_msg)
+
+ expected = np.array([1, 1, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=1),
+ expected, err_msg=err_msg)
+
+ expected = np.array(2)
+ assert_equal(np.count_nonzero(m, axis=(0, 1)),
+ expected, err_msg=err_msg)
+ assert_equal(np.count_nonzero(m, axis=None),
+ expected, err_msg=err_msg)
+ assert_equal(np.count_nonzero(m),
+ expected, err_msg=err_msg)
+
+ if dt == 'V':
+ # There are no 'nonzero' objects for np.void, so the testing
+ # setup is slightly different for this dtype
+ m = np.array([np.void(1)] * 6).reshape((2, 3))
+
+ expected = np.array([0, 0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=0),
+ expected, err_msg=err_msg)
+
+ expected = np.array([0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=1),
+ expected, err_msg=err_msg)
+
+ expected = np.array(0)
+ assert_equal(np.count_nonzero(m, axis=(0, 1)),
+ expected, err_msg=err_msg)
+ assert_equal(np.count_nonzero(m, axis=None),
+ expected, err_msg=err_msg)
+ assert_equal(np.count_nonzero(m),
+ expected, err_msg=err_msg)
+
+ def test_count_nonzero_axis_consistent(self):
+ # Check that the axis behaviour for valid axes in
+ # non-special cases is consistent (and therefore
+ # correct) by checking it against an integer array
+ # that is then casted to the generic object dtype
+ from itertools import combinations, permutations
+
+ axis = (0, 1, 2, 3)
+ size = (5, 5, 5, 5)
+ msg = "Mismatch for axis: %s"
+
+ rng = np.random.RandomState(1234)
+ m = rng.randint(-100, 100, size=size)
+ n = m.astype(object)
+
+ for length in range(len(axis)):
+ for combo in combinations(axis, length):
+ for perm in permutations(combo):
+ assert_equal(
+ np.count_nonzero(m, axis=perm),
+ np.count_nonzero(n, axis=perm),
+ err_msg=msg % (perm,))
+
+ def test_countnonzero_axis_empty(self):
+ a = np.array([[0, 0, 1], [1, 0, 1]])
+ assert_equal(np.count_nonzero(a, axis=()), a.astype(bool))
+
+ def test_countnonzero_keepdims(self):
+ a = np.array([[0, 0, 1, 0],
+ [0, 3, 5, 0],
+ [7, 9, 2, 0]])
+ assert_equal(np.count_nonzero(a, axis=0, keepdims=True),
+ [[1, 2, 3, 0]])
+ assert_equal(np.count_nonzero(a, axis=1, keepdims=True),
+ [[1], [2], [3]])
+ assert_equal(np.count_nonzero(a, keepdims=True),
+ [[6]])
+
+ def test_array_method(self):
+ # Tests that the array method
+ # call to nonzero works
+ m = np.array([[1, 0, 0], [4, 0, 6]])
+ tgt = [[0, 1, 1], [0, 0, 2]]
+
+ assert_equal(m.nonzero(), tgt)
+
+ def test_nonzero_invalid_object(self):
+ # gh-9295
+ a = np.array([np.array([1, 2]), 3], dtype=object)
+ assert_raises(ValueError, np.nonzero, a)
+
+ class BoolErrors:
+ def __bool__(self):
+ raise ValueError("Not allowed")
+
+ assert_raises(ValueError, np.nonzero, np.array([BoolErrors()]))
+
+ def test_nonzero_sideeffect_safety(self):
+ # gh-13631
+ class FalseThenTrue:
+ _val = False
+ def __bool__(self):
+ try:
+ return self._val
+ finally:
+ self._val = True
+
+ class TrueThenFalse:
+ _val = True
+ def __bool__(self):
+ try:
+ return self._val
+ finally:
+ self._val = False
+
+ # result grows on the second pass
+ a = np.array([True, FalseThenTrue()])
+ assert_raises(RuntimeError, np.nonzero, a)
+
+ a = np.array([[True], [FalseThenTrue()]])
+ assert_raises(RuntimeError, np.nonzero, a)
+
+ # result shrinks on the second pass
+ a = np.array([False, TrueThenFalse()])
+ assert_raises(RuntimeError, np.nonzero, a)
+
+ a = np.array([[False], [TrueThenFalse()]])
+ assert_raises(RuntimeError, np.nonzero, a)
+
+ def test_nonzero_sideffects_structured_void(self):
+ # Checks that structured void does not mutate alignment flag of
+ # original array.
+ arr = np.zeros(5, dtype="i1,i8,i8") # `ones` may short-circuit
+ assert arr.flags.aligned # structs are considered "aligned"
+ assert not arr["f2"].flags.aligned
+ # make sure that nonzero/count_nonzero do not flip the flag:
+ np.nonzero(arr)
+ assert arr.flags.aligned
+ np.count_nonzero(arr)
+ assert arr.flags.aligned
+
+ def test_nonzero_exception_safe(self):
+ # gh-13930
+
+ class ThrowsAfter:
+ def __init__(self, iters):
+ self.iters_left = iters
+
+ def __bool__(self):
+ if self.iters_left == 0:
+ raise ValueError("called `iters` times")
+
+ self.iters_left -= 1
+ return True
+
+ """
+ Test that a ValueError is raised instead of a SystemError
+
+ If the __bool__ function is called after the error state is set,
+ Python (cpython) will raise a SystemError.
+ """
+
+ # assert that an exception in first pass is handled correctly
+ a = np.array([ThrowsAfter(5)]*10)
+ assert_raises(ValueError, np.nonzero, a)
+
+ # raise exception in second pass for 1-dimensional loop
+ a = np.array([ThrowsAfter(15)]*10)
+ assert_raises(ValueError, np.nonzero, a)
+
+ # raise exception in second pass for n-dimensional loop
+ a = np.array([[ThrowsAfter(15)]]*10)
+ assert_raises(ValueError, np.nonzero, a)
+
+ @pytest.mark.skipif(IS_WASM, reason="wasm doesn't have threads")
+ def test_structured_threadsafety(self):
+ # Nonzero (and some other functions) should be threadsafe for
+ # structured datatypes, see gh-15387. This test can behave randomly.
+ from concurrent.futures import ThreadPoolExecutor
+
+ # Create a deeply nested dtype to make a failure more likely:
+ dt = np.dtype([("", "f8")])
+ dt = np.dtype([("", dt)])
+ dt = np.dtype([("", dt)] * 2)
+ # The array should be large enough to likely run into threading issues
+ arr = np.random.uniform(size=(5000, 4)).view(dt)[:, 0]
+ def func(arr):
+ arr.nonzero()
+
+ tpe = ThreadPoolExecutor(max_workers=8)
+ futures = [tpe.submit(func, arr) for _ in range(10)]
+ for f in futures:
+ f.result()
+
+ assert arr.dtype is dt
+
+
+class TestIndex:
+ def test_boolean(self):
+ a = rand(3, 5, 8)
+ V = rand(5, 8)
+ g1 = randint(0, 5, size=15)
+ g2 = randint(0, 8, size=15)
+ V[g1, g2] = -V[g1, g2]
+ assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all())
+
+ def test_boolean_edgecase(self):
+ a = np.array([], dtype='int32')
+ b = np.array([], dtype='bool')
+ c = a[b]
+ assert_equal(c, [])
+ assert_equal(c.dtype, np.dtype('int32'))
+
+
+class TestBinaryRepr:
+ def test_zero(self):
+ assert_equal(np.binary_repr(0), '0')
+
+ def test_positive(self):
+ assert_equal(np.binary_repr(10), '1010')
+ assert_equal(np.binary_repr(12522),
+ '11000011101010')
+ assert_equal(np.binary_repr(10736848),
+ '101000111101010011010000')
+
+ def test_negative(self):
+ assert_equal(np.binary_repr(-1), '-1')
+ assert_equal(np.binary_repr(-10), '-1010')
+ assert_equal(np.binary_repr(-12522),
+ '-11000011101010')
+ assert_equal(np.binary_repr(-10736848),
+ '-101000111101010011010000')
+
+ def test_sufficient_width(self):
+ assert_equal(np.binary_repr(0, width=5), '00000')
+ assert_equal(np.binary_repr(10, width=7), '0001010')
+ assert_equal(np.binary_repr(-5, width=7), '1111011')
+
+ def test_neg_width_boundaries(self):
+ # see gh-8670
+
+ # Ensure that the example in the issue does not
+ # break before proceeding to a more thorough test.
+ assert_equal(np.binary_repr(-128, width=8), '10000000')
+
+ for width in range(1, 11):
+ num = -2**(width - 1)
+ exp = '1' + (width - 1) * '0'
+ assert_equal(np.binary_repr(num, width=width), exp)
+
+ def test_large_neg_int64(self):
+ # See gh-14289.
+ assert_equal(np.binary_repr(np.int64(-2**62), width=64),
+ '11' + '0'*62)
+
+
+class TestBaseRepr:
+ def test_base3(self):
+ assert_equal(np.base_repr(3**5, 3), '100000')
+
+ def test_positive(self):
+ assert_equal(np.base_repr(12, 10), '12')
+ assert_equal(np.base_repr(12, 10, 4), '000012')
+ assert_equal(np.base_repr(12, 4), '30')
+ assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW')
+
+ def test_negative(self):
+ assert_equal(np.base_repr(-12, 10), '-12')
+ assert_equal(np.base_repr(-12, 10, 4), '-000012')
+ assert_equal(np.base_repr(-12, 4), '-30')
+
+ def test_base_range(self):
+ with assert_raises(ValueError):
+ np.base_repr(1, 1)
+ with assert_raises(ValueError):
+ np.base_repr(1, 37)
+
+
+class TestArrayComparisons:
+ def test_array_equal(self):
+ res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
+ assert_(res)
+ assert_(type(res) is bool)
+ res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equal(np.array([1, 2]), np.array([3, 4]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equal(np.array([1, 2]), np.array([1, 3]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1'))
+ assert_(res)
+ assert_(type(res) is bool)
+ res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'),
+ np.array([('a', 1)], dtype='S1,u4'))
+ assert_(res)
+ assert_(type(res) is bool)
+
+ def test_array_equal_equal_nan(self):
+ # Test array_equal with equal_nan kwarg
+ a1 = np.array([1, 2, np.nan])
+ a2 = np.array([1, np.nan, 2])
+ a3 = np.array([1, 2, np.inf])
+
+ # equal_nan=False by default
+ assert_(not np.array_equal(a1, a1))
+ assert_(np.array_equal(a1, a1, equal_nan=True))
+ assert_(not np.array_equal(a1, a2, equal_nan=True))
+ # nan's not conflated with inf's
+ assert_(not np.array_equal(a1, a3, equal_nan=True))
+ # 0-D arrays
+ a = np.array(np.nan)
+ assert_(not np.array_equal(a, a))
+ assert_(np.array_equal(a, a, equal_nan=True))
+ # Non-float dtype - equal_nan should have no effect
+ a = np.array([1, 2, 3], dtype=int)
+ assert_(np.array_equal(a, a))
+ assert_(np.array_equal(a, a, equal_nan=True))
+ # Multi-dimensional array
+ a = np.array([[0, 1], [np.nan, 1]])
+ assert_(not np.array_equal(a, a))
+ assert_(np.array_equal(a, a, equal_nan=True))
+ # Complex values
+ a, b = [np.array([1 + 1j])]*2
+ a.real, b.imag = np.nan, np.nan
+ assert_(not np.array_equal(a, b, equal_nan=False))
+ assert_(np.array_equal(a, b, equal_nan=True))
+
+ def test_none_compares_elementwise(self):
+ a = np.array([None, 1, None], dtype=object)
+ assert_equal(a == None, [True, False, True])
+ assert_equal(a != None, [False, True, False])
+
+ a = np.ones(3)
+ assert_equal(a == None, [False, False, False])
+ assert_equal(a != None, [True, True, True])
+
+ def test_array_equiv(self):
+ res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))
+ assert_(res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 2]), np.array([3, 4]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 2]), np.array([1, 3]))
+ assert_(not res)
+ assert_(type(res) is bool)
+
+ res = np.array_equiv(np.array([1, 1]), np.array([1]))
+ assert_(res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]]))
+ assert_(res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 2]), np.array([2]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]]))
+ assert_(not res)
+ assert_(type(res) is bool)
+ res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
+ assert_(not res)
+ assert_(type(res) is bool)
+
+ @pytest.mark.parametrize("dtype", ["V0", "V3", "V10"])
+ def test_compare_unstructured_voids(self, dtype):
+ zeros = np.zeros(3, dtype=dtype)
+
+ assert_array_equal(zeros, zeros)
+ assert not (zeros != zeros).any()
+
+ if dtype == "V0":
+ # Can't test != of actually different data
+ return
+
+ nonzeros = np.array([b"1", b"2", b"3"], dtype=dtype)
+
+ assert not (zeros == nonzeros).any()
+ assert (zeros != nonzeros).all()
+
+
+def assert_array_strict_equal(x, y):
+ assert_array_equal(x, y)
+ # Check flags, 32 bit arches typically don't provide 16 byte alignment
+ if ((x.dtype.alignment <= 8 or
+ np.intp().dtype.itemsize != 4) and
+ sys.platform != 'win32'):
+ assert_(x.flags == y.flags)
+ else:
+ assert_(x.flags.owndata == y.flags.owndata)
+ assert_(x.flags.writeable == y.flags.writeable)
+ assert_(x.flags.c_contiguous == y.flags.c_contiguous)
+ assert_(x.flags.f_contiguous == y.flags.f_contiguous)
+ assert_(x.flags.writebackifcopy == y.flags.writebackifcopy)
+ # check endianness
+ assert_(x.dtype.isnative == y.dtype.isnative)
+
+
+class TestClip:
+ def setup_method(self):
+ self.nr = 5
+ self.nc = 3
+
+ def fastclip(self, a, m, M, out=None, casting=None):
+ if out is None:
+ if casting is None:
+ return a.clip(m, M)
+ else:
+ return a.clip(m, M, casting=casting)
+ else:
+ if casting is None:
+ return a.clip(m, M, out)
+ else:
+ return a.clip(m, M, out, casting=casting)
+
+ def clip(self, a, m, M, out=None):
+ # use slow-clip
+ selector = np.less(a, m) + 2*np.greater(a, M)
+ return selector.choose((a, m, M), out=out)
+
+ # Handy functions
+ def _generate_data(self, n, m):
+ return randn(n, m)
+
+ def _generate_data_complex(self, n, m):
+ return randn(n, m) + 1.j * rand(n, m)
+
+ def _generate_flt_data(self, n, m):
+ return (randn(n, m)).astype(np.float32)
+
+ def _neg_byteorder(self, a):
+ a = np.asarray(a)
+ if sys.byteorder == 'little':
+ a = a.astype(a.dtype.newbyteorder('>'))
+ else:
+ a = a.astype(a.dtype.newbyteorder('<'))
+ return a
+
+ def _generate_non_native_data(self, n, m):
+ data = randn(n, m)
+ data = self._neg_byteorder(data)
+ assert_(not data.dtype.isnative)
+ return data
+
+ def _generate_int_data(self, n, m):
+ return (10 * rand(n, m)).astype(np.int64)
+
+ def _generate_int32_data(self, n, m):
+ return (10 * rand(n, m)).astype(np.int32)
+
+ # Now the real test cases
+
+ @pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO')
+ def test_ones_pathological(self, dtype):
+ # for preservation of behavior described in
+ # gh-12519; amin > amax behavior may still change
+ # in the future
+ arr = np.ones(10, dtype=dtype)
+ expected = np.zeros(10, dtype=dtype)
+ actual = np.clip(arr, 1, 0)
+ if dtype == 'O':
+ assert actual.tolist() == expected.tolist()
+ else:
+ assert_equal(actual, expected)
+
+ def test_simple_double(self):
+ # Test native double input with scalar min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = 0.1
+ M = 0.6
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_int(self):
+ # Test native int input with scalar min/max.
+ a = self._generate_int_data(self.nr, self.nc)
+ a = a.astype(int)
+ m = -2
+ M = 4
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_array_double(self):
+ # Test native double input with array min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = np.zeros(a.shape)
+ M = m + 0.5
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_nonnative(self):
+ # Test non native double input with scalar min/max.
+ # Test native double input with non native double scalar min/max.
+ a = self._generate_non_native_data(self.nr, self.nc)
+ m = -0.5
+ M = 0.6
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_equal(ac, act)
+
+ # Test native double input with non native double scalar min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5
+ M = self._neg_byteorder(0.6)
+ assert_(not M.dtype.isnative)
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_equal(ac, act)
+
+ def test_simple_complex(self):
+ # Test native complex input with native double scalar min/max.
+ # Test native input with complex double scalar min/max.
+ a = 3 * self._generate_data_complex(self.nr, self.nc)
+ m = -0.5
+ M = 1.
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ # Test native input with complex double scalar min/max.
+ a = 3 * self._generate_data(self.nr, self.nc)
+ m = -0.5 + 1.j
+ M = 1. + 2.j
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_complex(self):
+ # Address Issue gh-5354 for clipping complex arrays
+ # Test native complex input without explicit min/max
+ # ie, either min=None or max=None
+ a = np.ones(10, dtype=complex)
+ m = a.min()
+ M = a.max()
+ am = self.fastclip(a, m, None)
+ aM = self.fastclip(a, None, M)
+ assert_array_strict_equal(am, a)
+ assert_array_strict_equal(aM, a)
+
+ def test_clip_non_contig(self):
+ # Test clip for non contiguous native input and native scalar min/max.
+ a = self._generate_data(self.nr * 2, self.nc * 3)
+ a = a[::2, ::3]
+ assert_(not a.flags['F_CONTIGUOUS'])
+ assert_(not a.flags['C_CONTIGUOUS'])
+ ac = self.fastclip(a, -1.6, 1.7)
+ act = self.clip(a, -1.6, 1.7)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_out(self):
+ # Test native double input with scalar min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5
+ M = 0.6
+ ac = np.zeros(a.shape)
+ act = np.zeros(a.shape)
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ @pytest.mark.parametrize("casting", [None, "unsafe"])
+ def test_simple_int32_inout(self, casting):
+ # Test native int32 input with double min/max and int32 out.
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.float64(0)
+ M = np.float64(2)
+ ac = np.zeros(a.shape, dtype=np.int32)
+ act = ac.copy()
+ if casting is None:
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac, casting=casting)
+ else:
+ # explicitly passing "unsafe" will silence warning
+ self.fastclip(a, m, M, ac, casting=casting)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_int64_out(self):
+ # Test native int32 input with int32 scalar min/max and int64 out.
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.int32(-1)
+ M = np.int32(1)
+ ac = np.zeros(a.shape, dtype=np.int64)
+ act = ac.copy()
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_int64_inout(self):
+ # Test native int32 input with double array min/max and int32 out.
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.zeros(a.shape, np.float64)
+ M = np.float64(1)
+ ac = np.zeros(a.shape, dtype=np.int32)
+ act = ac.copy()
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_int32_out(self):
+ # Test native double input with scalar min/max and int out.
+ a = self._generate_data(self.nr, self.nc)
+ m = -1.0
+ M = 2.0
+ ac = np.zeros(a.shape, dtype=np.int32)
+ act = ac.copy()
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_simple_inplace_01(self):
+ # Test native double input with array min/max in-place.
+ a = self._generate_data(self.nr, self.nc)
+ ac = a.copy()
+ m = np.zeros(a.shape)
+ M = 1.0
+ self.fastclip(a, m, M, a)
+ self.clip(a, m, M, ac)
+ assert_array_strict_equal(a, ac)
+
+ def test_simple_inplace_02(self):
+ # Test native double input with scalar min/max in-place.
+ a = self._generate_data(self.nr, self.nc)
+ ac = a.copy()
+ m = -0.5
+ M = 0.6
+ self.fastclip(a, m, M, a)
+ self.clip(ac, m, M, ac)
+ assert_array_strict_equal(a, ac)
+
+ def test_noncontig_inplace(self):
+ # Test non contiguous double input with double scalar min/max in-place.
+ a = self._generate_data(self.nr * 2, self.nc * 3)
+ a = a[::2, ::3]
+ assert_(not a.flags['F_CONTIGUOUS'])
+ assert_(not a.flags['C_CONTIGUOUS'])
+ ac = a.copy()
+ m = -0.5
+ M = 0.6
+ self.fastclip(a, m, M, a)
+ self.clip(ac, m, M, ac)
+ assert_array_equal(a, ac)
+
+ def test_type_cast_01(self):
+ # Test native double input with scalar min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5
+ M = 0.6
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_02(self):
+ # Test native int32 input with int32 scalar min/max.
+ a = self._generate_int_data(self.nr, self.nc)
+ a = a.astype(np.int32)
+ m = -2
+ M = 4
+ ac = self.fastclip(a, m, M)
+ act = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_03(self):
+ # Test native int32 input with float64 scalar min/max.
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = -2
+ M = 4
+ ac = self.fastclip(a, np.float64(m), np.float64(M))
+ act = self.clip(a, np.float64(m), np.float64(M))
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_04(self):
+ # Test native int32 input with float32 scalar min/max.
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.float32(-2)
+ M = np.float32(4)
+ act = self.fastclip(a, m, M)
+ ac = self.clip(a, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_05(self):
+ # Test native int32 with double arrays min/max.
+ a = self._generate_int_data(self.nr, self.nc)
+ m = -0.5
+ M = 1.
+ ac = self.fastclip(a, m * np.zeros(a.shape), M)
+ act = self.clip(a, m * np.zeros(a.shape), M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_06(self):
+ # Test native with NON native scalar min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = 0.5
+ m_s = self._neg_byteorder(m)
+ M = 1.
+ act = self.clip(a, m_s, M)
+ ac = self.fastclip(a, m_s, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_07(self):
+ # Test NON native with native array min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5 * np.ones(a.shape)
+ M = 1.
+ a_s = self._neg_byteorder(a)
+ assert_(not a_s.dtype.isnative)
+ act = a_s.clip(m, M)
+ ac = self.fastclip(a_s, m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_08(self):
+ # Test NON native with native scalar min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5
+ M = 1.
+ a_s = self._neg_byteorder(a)
+ assert_(not a_s.dtype.isnative)
+ ac = self.fastclip(a_s, m, M)
+ act = a_s.clip(m, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_09(self):
+ # Test native with NON native array min/max.
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5 * np.ones(a.shape)
+ M = 1.
+ m_s = self._neg_byteorder(m)
+ assert_(not m_s.dtype.isnative)
+ ac = self.fastclip(a, m_s, M)
+ act = self.clip(a, m_s, M)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_10(self):
+ # Test native int32 with float min/max and float out for output argument.
+ a = self._generate_int_data(self.nr, self.nc)
+ b = np.zeros(a.shape, dtype=np.float32)
+ m = np.float32(-0.5)
+ M = np.float32(1)
+ act = self.clip(a, m, M, out=b)
+ ac = self.fastclip(a, m, M, out=b)
+ assert_array_strict_equal(ac, act)
+
+ def test_type_cast_11(self):
+ # Test non native with native scalar, min/max, out non native
+ a = self._generate_non_native_data(self.nr, self.nc)
+ b = a.copy()
+ b = b.astype(b.dtype.newbyteorder('>'))
+ bt = b.copy()
+ m = -0.5
+ M = 1.
+ self.fastclip(a, m, M, out=b)
+ self.clip(a, m, M, out=bt)
+ assert_array_strict_equal(b, bt)
+
+ def test_type_cast_12(self):
+ # Test native int32 input and min/max and float out
+ a = self._generate_int_data(self.nr, self.nc)
+ b = np.zeros(a.shape, dtype=np.float32)
+ m = np.int32(0)
+ M = np.int32(1)
+ act = self.clip(a, m, M, out=b)
+ ac = self.fastclip(a, m, M, out=b)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_with_out_simple(self):
+ # Test native double input with scalar min/max
+ a = self._generate_data(self.nr, self.nc)
+ m = -0.5
+ M = 0.6
+ ac = np.zeros(a.shape)
+ act = np.zeros(a.shape)
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_with_out_simple2(self):
+ # Test native int32 input with double min/max and int32 out
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.float64(0)
+ M = np.float64(2)
+ ac = np.zeros(a.shape, dtype=np.int32)
+ act = ac.copy()
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_with_out_simple_int32(self):
+ # Test native int32 input with int32 scalar min/max and int64 out
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.int32(-1)
+ M = np.int32(1)
+ ac = np.zeros(a.shape, dtype=np.int64)
+ act = ac.copy()
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_with_out_array_int32(self):
+ # Test native int32 input with double array min/max and int32 out
+ a = self._generate_int32_data(self.nr, self.nc)
+ m = np.zeros(a.shape, np.float64)
+ M = np.float64(1)
+ ac = np.zeros(a.shape, dtype=np.int32)
+ act = ac.copy()
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_with_out_array_outint32(self):
+ # Test native double input with scalar min/max and int out
+ a = self._generate_data(self.nr, self.nc)
+ m = -1.0
+ M = 2.0
+ ac = np.zeros(a.shape, dtype=np.int32)
+ act = ac.copy()
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
+ self.clip(a, m, M, act)
+ assert_array_strict_equal(ac, act)
+
+ def test_clip_with_out_transposed(self):
+ # Test that the out argument works when transposed
+ a = np.arange(16).reshape(4, 4)
+ out = np.empty_like(a).T
+ a.clip(4, 10, out=out)
+ expected = self.clip(a, 4, 10)
+ assert_array_equal(out, expected)
+
+ def test_clip_with_out_memory_overlap(self):
+ # Test that the out argument works when it has memory overlap
+ a = np.arange(16).reshape(4, 4)
+ ac = a.copy()
+ a[:-1].clip(4, 10, out=a[1:])
+ expected = self.clip(ac[:-1], 4, 10)
+ assert_array_equal(a[1:], expected)
+
+ def test_clip_inplace_array(self):
+ # Test native double input with array min/max
+ a = self._generate_data(self.nr, self.nc)
+ ac = a.copy()
+ m = np.zeros(a.shape)
+ M = 1.0
+ self.fastclip(a, m, M, a)
+ self.clip(a, m, M, ac)
+ assert_array_strict_equal(a, ac)
+
+ def test_clip_inplace_simple(self):
+ # Test native double input with scalar min/max
+ a = self._generate_data(self.nr, self.nc)
+ ac = a.copy()
+ m = -0.5
+ M = 0.6
+ self.fastclip(a, m, M, a)
+ self.clip(a, m, M, ac)
+ assert_array_strict_equal(a, ac)
+
+ def test_clip_func_takes_out(self):
+ # Ensure that the clip() function takes an out=argument.
+ a = self._generate_data(self.nr, self.nc)
+ ac = a.copy()
+ m = -0.5
+ M = 0.6
+ a2 = np.clip(a, m, M, out=a)
+ self.clip(a, m, M, ac)
+ assert_array_strict_equal(a2, ac)
+ assert_(a2 is a)
+
+ def test_clip_nan(self):
+ d = np.arange(7.)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(min=np.nan), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(max=np.nan), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(min=np.nan, max=np.nan), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(min=-2, max=np.nan), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(min=np.nan, max=10), d)
+
+ def test_object_clip(self):
+ a = np.arange(10, dtype=object)
+ actual = np.clip(a, 1, 5)
+ expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5])
+ assert actual.tolist() == expected.tolist()
+
+ def test_clip_all_none(self):
+ a = np.arange(10, dtype=object)
+ with assert_raises_regex(ValueError, 'max or min'):
+ np.clip(a, None, None)
+
+ def test_clip_invalid_casting(self):
+ a = np.arange(10, dtype=object)
+ with assert_raises_regex(ValueError,
+ 'casting must be one of'):
+ self.fastclip(a, 1, 8, casting="garbage")
+
+ @pytest.mark.parametrize("amin, amax", [
+ # two scalars
+ (1, 0),
+ # mix scalar and array
+ (1, np.zeros(10)),
+ # two arrays
+ (np.ones(10), np.zeros(10)),
+ ])
+ def test_clip_value_min_max_flip(self, amin, amax):
+ a = np.arange(10, dtype=np.int64)
+ # requirement from ufunc_docstrings.py
+ expected = np.minimum(np.maximum(a, amin), amax)
+ actual = np.clip(a, amin, amax)
+ assert_equal(actual, expected)
+
+ @pytest.mark.parametrize("arr, amin, amax, exp", [
+ # for a bug in npy_ObjectClip, based on a
+ # case produced by hypothesis
+ (np.zeros(10, dtype=np.int64),
+ 0,
+ -2**64+1,
+ np.full(10, -2**64+1, dtype=object)),
+ # for bugs in NPY_TIMEDELTA_MAX, based on a case
+ # produced by hypothesis
+ (np.zeros(10, dtype='m8') - 1,
+ 0,
+ 0,
+ np.zeros(10, dtype='m8')),
+ ])
+ def test_clip_problem_cases(self, arr, amin, amax, exp):
+ actual = np.clip(arr, amin, amax)
+ assert_equal(actual, exp)
+
+ @pytest.mark.xfail(reason="no scalar nan propagation yet",
+ raises=AssertionError,
+ strict=True)
+ @pytest.mark.parametrize("arr, amin, amax", [
+ # problematic scalar nan case from hypothesis
+ (np.zeros(10, dtype=np.int64),
+ np.array(np.nan),
+ np.zeros(10, dtype=np.int32)),
+ ])
+ @pytest.mark.filterwarnings("ignore::DeprecationWarning")
+ def test_clip_scalar_nan_propagation(self, arr, amin, amax):
+ # enforcement of scalar nan propagation for comparisons
+ # called through clip()
+ expected = np.minimum(np.maximum(arr, amin), amax)
+ actual = np.clip(arr, amin, amax)
+ assert_equal(actual, expected)
+
+ @pytest.mark.xfail(reason="propagation doesn't match spec")
+ @pytest.mark.parametrize("arr, amin, amax", [
+ (np.array([1] * 10, dtype='m8'),
+ np.timedelta64('NaT'),
+ np.zeros(10, dtype=np.int32)),
+ ])
+ @pytest.mark.filterwarnings("ignore::DeprecationWarning")
+ def test_NaT_propagation(self, arr, amin, amax):
+ # NOTE: the expected function spec doesn't
+ # propagate NaT, but clip() now does
+ expected = np.minimum(np.maximum(arr, amin), amax)
+ actual = np.clip(arr, amin, amax)
+ assert_equal(actual, expected)
+
+ @given(
+ data=st.data(),
+ arr=hynp.arrays(
+ dtype=hynp.integer_dtypes() | hynp.floating_dtypes(),
+ shape=hynp.array_shapes()
+ )
+ )
+ def test_clip_property(self, data, arr):
+ """A property-based test using Hypothesis.
+
+ This aims for maximum generality: it could in principle generate *any*
+ valid inputs to np.clip, and in practice generates much more varied
+ inputs than human testers come up with.
+
+ Because many of the inputs have tricky dependencies - compatible dtypes
+ and mutually-broadcastable shapes - we use `st.data()` strategy draw
+ values *inside* the test function, from strategies we construct based
+ on previous values. An alternative would be to define a custom strategy
+ with `@st.composite`, but until we have duplicated code inline is fine.
+
+ That accounts for most of the function; the actual test is just three
+ lines to calculate and compare actual vs expected results!
+ """
+ numeric_dtypes = hynp.integer_dtypes() | hynp.floating_dtypes()
+ # Generate shapes for the bounds which can be broadcast with each other
+ # and with the base shape. Below, we might decide to use scalar bounds,
+ # but it's clearer to generate these shapes unconditionally in advance.
+ in_shapes, result_shape = data.draw(
+ hynp.mutually_broadcastable_shapes(
+ num_shapes=2, base_shape=arr.shape
+ )
+ )
+ # Scalar `nan` is deprecated due to the differing behaviour it shows.
+ s = numeric_dtypes.flatmap(
+ lambda x: hynp.from_dtype(x, allow_nan=False))
+ amin = data.draw(s | hynp.arrays(dtype=numeric_dtypes,
+ shape=in_shapes[0], elements={"allow_nan": False}))
+ amax = data.draw(s | hynp.arrays(dtype=numeric_dtypes,
+ shape=in_shapes[1], elements={"allow_nan": False}))
+
+ # Then calculate our result and expected result and check that they're
+ # equal! See gh-12519 and gh-19457 for discussion deciding on this
+ # property and the result_type argument.
+ result = np.clip(arr, amin, amax)
+ t = np.result_type(arr, amin, amax)
+ expected = np.minimum(amax, np.maximum(arr, amin, dtype=t), dtype=t)
+ assert result.dtype == t
+ assert_array_equal(result, expected)
+
+
+class TestAllclose:
+ rtol = 1e-5
+ atol = 1e-8
+
+ def setup_method(self):
+ self.olderr = np.seterr(invalid='ignore')
+
+ def teardown_method(self):
+ np.seterr(**self.olderr)
+
+ def tst_allclose(self, x, y):
+ assert_(np.allclose(x, y), "%s and %s not close" % (x, y))
+
+ def tst_not_allclose(self, x, y):
+ assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y))
+
+ def test_ip_allclose(self):
+ # Parametric test factory.
+ arr = np.array([100, 1000])
+ aran = np.arange(125).reshape((5, 5, 5))
+
+ atol = self.atol
+ rtol = self.rtol
+
+ data = [([1, 0], [1, 0]),
+ ([atol], [0]),
+ ([1], [1+rtol+atol]),
+ (arr, arr + arr*rtol),
+ (arr, arr + arr*rtol + atol*2),
+ (aran, aran + aran*rtol),
+ (np.inf, np.inf),
+ (np.inf, [np.inf])]
+
+ for (x, y) in data:
+ self.tst_allclose(x, y)
+
+ def test_ip_not_allclose(self):
+ # Parametric test factory.
+ aran = np.arange(125).reshape((5, 5, 5))
+
+ atol = self.atol
+ rtol = self.rtol
+
+ data = [([np.inf, 0], [1, np.inf]),
+ ([np.inf, 0], [1, 0]),
+ ([np.inf, np.inf], [1, np.inf]),
+ ([np.inf, np.inf], [1, 0]),
+ ([-np.inf, 0], [np.inf, 0]),
+ ([np.nan, 0], [np.nan, 0]),
+ ([atol*2], [0]),
+ ([1], [1+rtol+atol*2]),
+ (aran, aran + aran*atol + atol*2),
+ (np.array([np.inf, 1]), np.array([0, np.inf]))]
+
+ for (x, y) in data:
+ self.tst_not_allclose(x, y)
+
+ def test_no_parameter_modification(self):
+ x = np.array([np.inf, 1])
+ y = np.array([0, np.inf])
+ np.allclose(x, y)
+ assert_array_equal(x, np.array([np.inf, 1]))
+ assert_array_equal(y, np.array([0, np.inf]))
+
+ def test_min_int(self):
+ # Could make problems because of abs(min_int) == min_int
+ min_int = np.iinfo(np.int_).min
+ a = np.array([min_int], dtype=np.int_)
+ assert_(np.allclose(a, a))
+
+ def test_equalnan(self):
+ x = np.array([1.0, np.nan])
+ assert_(np.allclose(x, x, equal_nan=True))
+
+ def test_return_class_is_ndarray(self):
+ # Issue gh-6475
+ # Check that allclose does not preserve subtypes
+ class Foo(np.ndarray):
+ def __new__(cls, *args, **kwargs):
+ return np.array(*args, **kwargs).view(cls)
+
+ a = Foo([1])
+ assert_(type(np.allclose(a, a)) is bool)
+
+
+class TestIsclose:
+ rtol = 1e-5
+ atol = 1e-8
+
+ def _setup(self):
+ atol = self.atol
+ rtol = self.rtol
+ arr = np.array([100, 1000])
+ aran = np.arange(125).reshape((5, 5, 5))
+
+ self.all_close_tests = [
+ ([1, 0], [1, 0]),
+ ([atol], [0]),
+ ([1], [1 + rtol + atol]),
+ (arr, arr + arr*rtol),
+ (arr, arr + arr*rtol + atol),
+ (aran, aran + aran*rtol),
+ (np.inf, np.inf),
+ (np.inf, [np.inf]),
+ ([np.inf, -np.inf], [np.inf, -np.inf]),
+ ]
+ self.none_close_tests = [
+ ([np.inf, 0], [1, np.inf]),
+ ([np.inf, -np.inf], [1, 0]),
+ ([np.inf, np.inf], [1, -np.inf]),
+ ([np.inf, np.inf], [1, 0]),
+ ([np.nan, 0], [np.nan, -np.inf]),
+ ([atol*2], [0]),
+ ([1], [1 + rtol + atol*2]),
+ (aran, aran + rtol*1.1*aran + atol*1.1),
+ (np.array([np.inf, 1]), np.array([0, np.inf])),
+ ]
+ self.some_close_tests = [
+ ([np.inf, 0], [np.inf, atol*2]),
+ ([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]),
+ (np.arange(3), [0, 1, 2.1]),
+ (np.nan, [np.nan, np.nan, np.nan]),
+ ([0], [atol, np.inf, -np.inf, np.nan]),
+ (0, [atol, np.inf, -np.inf, np.nan]),
+ ]
+ self.some_close_results = [
+ [True, False],
+ [True, False, False],
+ [True, True, False],
+ [False, False, False],
+ [True, False, False, False],
+ [True, False, False, False],
+ ]
+
+ def test_ip_isclose(self):
+ self._setup()
+ tests = self.some_close_tests
+ results = self.some_close_results
+ for (x, y), result in zip(tests, results):
+ assert_array_equal(np.isclose(x, y), result)
+
+ def tst_all_isclose(self, x, y):
+ assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y))
+
+ def tst_none_isclose(self, x, y):
+ msg = "%s and %s shouldn't be close"
+ assert_(not np.any(np.isclose(x, y)), msg % (x, y))
+
+ def tst_isclose_allclose(self, x, y):
+ msg = "isclose.all() and allclose aren't same for %s and %s"
+ msg2 = "isclose and allclose aren't same for %s and %s"
+ if np.isscalar(x) and np.isscalar(y):
+ assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y))
+ else:
+ assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y))
+
+ def test_ip_all_isclose(self):
+ self._setup()
+ for (x, y) in self.all_close_tests:
+ self.tst_all_isclose(x, y)
+
+ def test_ip_none_isclose(self):
+ self._setup()
+ for (x, y) in self.none_close_tests:
+ self.tst_none_isclose(x, y)
+
+ def test_ip_isclose_allclose(self):
+ self._setup()
+ tests = (self.all_close_tests + self.none_close_tests +
+ self.some_close_tests)
+ for (x, y) in tests:
+ self.tst_isclose_allclose(x, y)
+
+ def test_equal_nan(self):
+ assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True])
+ arr = np.array([1.0, np.nan])
+ assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True])
+
+ def test_masked_arrays(self):
+ # Make sure to test the output type when arguments are interchanged.
+
+ x = np.ma.masked_where([True, True, False], np.arange(3))
+ assert_(type(x) is type(np.isclose(2, x)))
+ assert_(type(x) is type(np.isclose(x, 2)))
+
+ x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan])
+ assert_(type(x) is type(np.isclose(np.inf, x)))
+ assert_(type(x) is type(np.isclose(x, np.inf)))
+
+ x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
+ y = np.isclose(np.nan, x, equal_nan=True)
+ assert_(type(x) is type(y))
+ # Ensure that the mask isn't modified...
+ assert_array_equal([True, True, False], y.mask)
+ y = np.isclose(x, np.nan, equal_nan=True)
+ assert_(type(x) is type(y))
+ # Ensure that the mask isn't modified...
+ assert_array_equal([True, True, False], y.mask)
+
+ x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
+ y = np.isclose(x, x, equal_nan=True)
+ assert_(type(x) is type(y))
+ # Ensure that the mask isn't modified...
+ assert_array_equal([True, True, False], y.mask)
+
+ def test_scalar_return(self):
+ assert_(np.isscalar(np.isclose(1, 1)))
+
+ def test_no_parameter_modification(self):
+ x = np.array([np.inf, 1])
+ y = np.array([0, np.inf])
+ np.isclose(x, y)
+ assert_array_equal(x, np.array([np.inf, 1]))
+ assert_array_equal(y, np.array([0, np.inf]))
+
+ def test_non_finite_scalar(self):
+ # GH7014, when two scalars are compared the output should also be a
+ # scalar
+ assert_(np.isclose(np.inf, -np.inf) is np.False_)
+ assert_(np.isclose(0, np.inf) is np.False_)
+ assert_(type(np.isclose(0, np.inf)) is np.bool_)
+
+ def test_timedelta(self):
+ # Allclose currently works for timedelta64 as long as `atol` is
+ # an integer or also a timedelta64
+ a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]")
+ assert np.isclose(a, a, atol=0, equal_nan=True).all()
+ assert np.isclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True).all()
+ assert np.allclose(a, a, atol=0, equal_nan=True)
+ assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True)
+
+
+class TestStdVar:
+ def setup_method(self):
+ self.A = np.array([1, -1, 1, -1])
+ self.real_var = 1
+
+ def test_basic(self):
+ assert_almost_equal(np.var(self.A), self.real_var)
+ assert_almost_equal(np.std(self.A)**2, self.real_var)
+
+ def test_scalars(self):
+ assert_equal(np.var(1), 0)
+ assert_equal(np.std(1), 0)
+
+ def test_ddof1(self):
+ assert_almost_equal(np.var(self.A, ddof=1),
+ self.real_var * len(self.A) / (len(self.A) - 1))
+ assert_almost_equal(np.std(self.A, ddof=1)**2,
+ self.real_var*len(self.A) / (len(self.A) - 1))
+
+ def test_ddof2(self):
+ assert_almost_equal(np.var(self.A, ddof=2),
+ self.real_var * len(self.A) / (len(self.A) - 2))
+ assert_almost_equal(np.std(self.A, ddof=2)**2,
+ self.real_var * len(self.A) / (len(self.A) - 2))
+
+ def test_out_scalar(self):
+ d = np.arange(10)
+ out = np.array(0.)
+ r = np.std(d, out=out)
+ assert_(r is out)
+ assert_array_equal(r, out)
+ r = np.var(d, out=out)
+ assert_(r is out)
+ assert_array_equal(r, out)
+ r = np.mean(d, out=out)
+ assert_(r is out)
+ assert_array_equal(r, out)
+
+
+class TestStdVarComplex:
+ def test_basic(self):
+ A = np.array([1, 1.j, -1, -1.j])
+ real_var = 1
+ assert_almost_equal(np.var(A), real_var)
+ assert_almost_equal(np.std(A)**2, real_var)
+
+ def test_scalars(self):
+ assert_equal(np.var(1j), 0)
+ assert_equal(np.std(1j), 0)
+
+
+class TestCreationFuncs:
+ # Test ones, zeros, empty and full.
+
+ def setup_method(self):
+ dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}
+ # void, bytes, str
+ variable_sized = {tp for tp in dtypes if tp.str.endswith('0')}
+ self.dtypes = sorted(dtypes - variable_sized |
+ {np.dtype(tp.str.replace("0", str(i)))
+ for tp in variable_sized for i in range(1, 10)},
+ key=lambda dtype: dtype.str)
+ self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'}
+ self.ndims = 10
+
+ def check_function(self, func, fill_value=None):
+ par = ((0, 1, 2),
+ range(self.ndims),
+ self.orders,
+ self.dtypes)
+ fill_kwarg = {}
+ if fill_value is not None:
+ fill_kwarg = {'fill_value': fill_value}
+
+ for size, ndims, order, dtype in itertools.product(*par):
+ shape = ndims * [size]
+
+ # do not fill void type
+ if fill_kwarg and dtype.str.startswith('|V'):
+ continue
+
+ arr = func(shape, order=order, dtype=dtype,
+ **fill_kwarg)
+
+ assert_equal(arr.dtype, dtype)
+ assert_(getattr(arr.flags, self.orders[order]))
+
+ if fill_value is not None:
+ if dtype.str.startswith('|S'):
+ val = str(fill_value)
+ else:
+ val = fill_value
+ assert_equal(arr, dtype.type(val))
+
+ def test_zeros(self):
+ self.check_function(np.zeros)
+
+ def test_ones(self):
+ self.check_function(np.ones)
+
+ def test_empty(self):
+ self.check_function(np.empty)
+
+ def test_full(self):
+ self.check_function(np.full, 0)
+ self.check_function(np.full, 1)
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_for_reference_leak(self):
+ # Make sure we have an object for reference
+ dim = 1
+ beg = sys.getrefcount(dim)
+ np.zeros([dim]*10)
+ assert_(sys.getrefcount(dim) == beg)
+ np.ones([dim]*10)
+ assert_(sys.getrefcount(dim) == beg)
+ np.empty([dim]*10)
+ assert_(sys.getrefcount(dim) == beg)
+ np.full([dim]*10, 0)
+ assert_(sys.getrefcount(dim) == beg)
+
+
+class TestLikeFuncs:
+ '''Test ones_like, zeros_like, empty_like and full_like'''
+
+ def setup_method(self):
+ self.data = [
+ # Array scalars
+ (np.array(3.), None),
+ (np.array(3), 'f8'),
+ # 1D arrays
+ (np.arange(6, dtype='f4'), None),
+ (np.arange(6), 'c16'),
+ # 2D C-layout arrays
+ (np.arange(6).reshape(2, 3), None),
+ (np.arange(6).reshape(3, 2), 'i1'),
+ # 2D F-layout arrays
+ (np.arange(6).reshape((2, 3), order='F'), None),
+ (np.arange(6).reshape((3, 2), order='F'), 'i1'),
+ # 3D C-layout arrays
+ (np.arange(24).reshape(2, 3, 4), None),
+ (np.arange(24).reshape(4, 3, 2), 'f4'),
+ # 3D F-layout arrays
+ (np.arange(24).reshape((2, 3, 4), order='F'), None),
+ (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'),
+ # 3D non-C/F-layout arrays
+ (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
+ (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
+ ]
+ self.shapes = [(), (5,), (5,6,), (5,6,7,)]
+
+ def compare_array_value(self, dz, value, fill_value):
+ if value is not None:
+ if fill_value:
+ # Conversion is close to what np.full_like uses
+ # but we may want to convert directly in the future
+ # which may result in errors (where this does not).
+ z = np.array(value).astype(dz.dtype)
+ assert_(np.all(dz == z))
+ else:
+ assert_(np.all(dz == value))
+
+ def check_like_function(self, like_function, value, fill_value=False):
+ if fill_value:
+ fill_kwarg = {'fill_value': value}
+ else:
+ fill_kwarg = {}
+ for d, dtype in self.data:
+ # default (K) order, dtype
+ dz = like_function(d, dtype=dtype, **fill_kwarg)
+ assert_equal(dz.shape, d.shape)
+ assert_equal(np.array(dz.strides)*d.dtype.itemsize,
+ np.array(d.strides)*dz.dtype.itemsize)
+ assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous)
+ assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous)
+ if dtype is None:
+ assert_equal(dz.dtype, d.dtype)
+ else:
+ assert_equal(dz.dtype, np.dtype(dtype))
+ self.compare_array_value(dz, value, fill_value)
+
+ # C order, default dtype
+ dz = like_function(d, order='C', dtype=dtype, **fill_kwarg)
+ assert_equal(dz.shape, d.shape)
+ assert_(dz.flags.c_contiguous)
+ if dtype is None:
+ assert_equal(dz.dtype, d.dtype)
+ else:
+ assert_equal(dz.dtype, np.dtype(dtype))
+ self.compare_array_value(dz, value, fill_value)
+
+ # F order, default dtype
+ dz = like_function(d, order='F', dtype=dtype, **fill_kwarg)
+ assert_equal(dz.shape, d.shape)
+ assert_(dz.flags.f_contiguous)
+ if dtype is None:
+ assert_equal(dz.dtype, d.dtype)
+ else:
+ assert_equal(dz.dtype, np.dtype(dtype))
+ self.compare_array_value(dz, value, fill_value)
+
+ # A order
+ dz = like_function(d, order='A', dtype=dtype, **fill_kwarg)
+ assert_equal(dz.shape, d.shape)
+ if d.flags.f_contiguous:
+ assert_(dz.flags.f_contiguous)
+ else:
+ assert_(dz.flags.c_contiguous)
+ if dtype is None:
+ assert_equal(dz.dtype, d.dtype)
+ else:
+ assert_equal(dz.dtype, np.dtype(dtype))
+ self.compare_array_value(dz, value, fill_value)
+
+ # Test the 'shape' parameter
+ for s in self.shapes:
+ for o in 'CFA':
+ sz = like_function(d, dtype=dtype, shape=s, order=o,
+ **fill_kwarg)
+ assert_equal(sz.shape, s)
+ if dtype is None:
+ assert_equal(sz.dtype, d.dtype)
+ else:
+ assert_equal(sz.dtype, np.dtype(dtype))
+ if o == 'C' or (o == 'A' and d.flags.c_contiguous):
+ assert_(sz.flags.c_contiguous)
+ elif o == 'F' or (o == 'A' and d.flags.f_contiguous):
+ assert_(sz.flags.f_contiguous)
+ self.compare_array_value(sz, value, fill_value)
+
+ if (d.ndim != len(s)):
+ assert_equal(np.argsort(like_function(d, dtype=dtype,
+ shape=s, order='K',
+ **fill_kwarg).strides),
+ np.argsort(np.empty(s, dtype=dtype,
+ order='C').strides))
+ else:
+ assert_equal(np.argsort(like_function(d, dtype=dtype,
+ shape=s, order='K',
+ **fill_kwarg).strides),
+ np.argsort(d.strides))
+
+ # Test the 'subok' parameter
+ class MyNDArray(np.ndarray):
+ pass
+
+ a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
+
+ b = like_function(a, **fill_kwarg)
+ assert_(type(b) is MyNDArray)
+
+ b = like_function(a, subok=False, **fill_kwarg)
+ assert_(type(b) is not MyNDArray)
+
+ def test_ones_like(self):
+ self.check_like_function(np.ones_like, 1)
+
+ def test_zeros_like(self):
+ self.check_like_function(np.zeros_like, 0)
+
+ def test_empty_like(self):
+ self.check_like_function(np.empty_like, None)
+
+ def test_filled_like(self):
+ self.check_like_function(np.full_like, 0, True)
+ self.check_like_function(np.full_like, 1, True)
+ self.check_like_function(np.full_like, 1000, True)
+ self.check_like_function(np.full_like, 123.456, True)
+ # Inf to integer casts cause invalid-value errors: ignore them.
+ with np.errstate(invalid="ignore"):
+ self.check_like_function(np.full_like, np.inf, True)
+
+ @pytest.mark.parametrize('likefunc', [np.empty_like, np.full_like,
+ np.zeros_like, np.ones_like])
+ @pytest.mark.parametrize('dtype', [str, bytes])
+ def test_dtype_str_bytes(self, likefunc, dtype):
+ # Regression test for gh-19860
+ a = np.arange(16).reshape(2, 8)
+ b = a[:, ::2] # Ensure b is not contiguous.
+ kwargs = {'fill_value': ''} if likefunc == np.full_like else {}
+ result = likefunc(b, dtype=dtype, **kwargs)
+ if dtype == str:
+ assert result.strides == (16, 4)
+ else:
+ # dtype is bytes
+ assert result.strides == (4, 1)
+
+
+class TestCorrelate:
+ def _setup(self, dt):
+ self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
+ self.xs = np.arange(1, 20)[::3]
+ self.y = np.array([-1, -2, -3], dtype=dt)
+ self.z1 = np.array([-3., -8., -14., -20., -26., -14., -5.], dtype=dt)
+ self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt)
+ self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt)
+ self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt)
+ self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt)
+ self.zs = np.array([-3., -14., -30., -48., -66., -84.,
+ -102., -54., -19.], dtype=dt)
+
+ def test_float(self):
+ self._setup(float)
+ z = np.correlate(self.x, self.y, 'full')
+ assert_array_almost_equal(z, self.z1)
+ z = np.correlate(self.x, self.y[:-1], 'full')
+ assert_array_almost_equal(z, self.z1_4)
+ z = np.correlate(self.y, self.x, 'full')
+ assert_array_almost_equal(z, self.z2)
+ z = np.correlate(self.x[::-1], self.y, 'full')
+ assert_array_almost_equal(z, self.z1r)
+ z = np.correlate(self.y, self.x[::-1], 'full')
+ assert_array_almost_equal(z, self.z2r)
+ z = np.correlate(self.xs, self.y, 'full')
+ assert_array_almost_equal(z, self.zs)
+
+ def test_object(self):
+ self._setup(Decimal)
+ z = np.correlate(self.x, self.y, 'full')
+ assert_array_almost_equal(z, self.z1)
+ z = np.correlate(self.y, self.x, 'full')
+ assert_array_almost_equal(z, self.z2)
+
+ def test_no_overwrite(self):
+ d = np.ones(100)
+ k = np.ones(3)
+ np.correlate(d, k)
+ assert_array_equal(d, np.ones(100))
+ assert_array_equal(k, np.ones(3))
+
+ def test_complex(self):
+ x = np.array([1, 2, 3, 4+1j], dtype=complex)
+ y = np.array([-1, -2j, 3+1j], dtype=complex)
+ r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex)
+ r_z = r_z[::-1].conjugate()
+ z = np.correlate(y, x, mode='full')
+ assert_array_almost_equal(z, r_z)
+
+ def test_zero_size(self):
+ with pytest.raises(ValueError):
+ np.correlate(np.array([]), np.ones(1000), mode='full')
+ with pytest.raises(ValueError):
+ np.correlate(np.ones(1000), np.array([]), mode='full')
+
+ def test_mode(self):
+ d = np.ones(100)
+ k = np.ones(3)
+ default_mode = np.correlate(d, k, mode='valid')
+ with assert_warns(DeprecationWarning):
+ valid_mode = np.correlate(d, k, mode='v')
+ assert_array_equal(valid_mode, default_mode)
+ # integer mode
+ with assert_raises(ValueError):
+ np.correlate(d, k, mode=-1)
+ assert_array_equal(np.correlate(d, k, mode=0), valid_mode)
+ # illegal arguments
+ with assert_raises(TypeError):
+ np.correlate(d, k, mode=None)
+
+
+class TestConvolve:
+ def test_object(self):
+ d = [1.] * 100
+ k = [1.] * 3
+ assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3))
+
+ def test_no_overwrite(self):
+ d = np.ones(100)
+ k = np.ones(3)
+ np.convolve(d, k)
+ assert_array_equal(d, np.ones(100))
+ assert_array_equal(k, np.ones(3))
+
+ def test_mode(self):
+ d = np.ones(100)
+ k = np.ones(3)
+ default_mode = np.convolve(d, k, mode='full')
+ with assert_warns(DeprecationWarning):
+ full_mode = np.convolve(d, k, mode='f')
+ assert_array_equal(full_mode, default_mode)
+ # integer mode
+ with assert_raises(ValueError):
+ np.convolve(d, k, mode=-1)
+ assert_array_equal(np.convolve(d, k, mode=2), full_mode)
+ # illegal arguments
+ with assert_raises(TypeError):
+ np.convolve(d, k, mode=None)
+
+
+class TestArgwhere:
+
+ @pytest.mark.parametrize('nd', [0, 1, 2])
+ def test_nd(self, nd):
+ # get an nd array with multiple elements in every dimension
+ x = np.empty((2,)*nd, bool)
+
+ # none
+ x[...] = False
+ assert_equal(np.argwhere(x).shape, (0, nd))
+
+ # only one
+ x[...] = False
+ x.flat[0] = True
+ assert_equal(np.argwhere(x).shape, (1, nd))
+
+ # all but one
+ x[...] = True
+ x.flat[0] = False
+ assert_equal(np.argwhere(x).shape, (x.size - 1, nd))
+
+ # all
+ x[...] = True
+ assert_equal(np.argwhere(x).shape, (x.size, nd))
+
+ def test_2D(self):
+ x = np.arange(6).reshape((2, 3))
+ assert_array_equal(np.argwhere(x > 1),
+ [[0, 2],
+ [1, 0],
+ [1, 1],
+ [1, 2]])
+
+ def test_list(self):
+ assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
+
+
+class TestStringFunction:
+
+ def test_set_string_function(self):
+ a = np.array([1])
+ np.set_string_function(lambda x: "FOO", repr=True)
+ assert_equal(repr(a), "FOO")
+ np.set_string_function(None, repr=True)
+ assert_equal(repr(a), "array([1])")
+
+ np.set_string_function(lambda x: "FOO", repr=False)
+ assert_equal(str(a), "FOO")
+ np.set_string_function(None, repr=False)
+ assert_equal(str(a), "[1]")
+
+
+class TestRoll:
+ def test_roll1d(self):
+ x = np.arange(10)
+ xr = np.roll(x, 2)
+ assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))
+
+ def test_roll2d(self):
+ x2 = np.reshape(np.arange(10), (2, 5))
+ x2r = np.roll(x2, 1)
+ assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))
+
+ x2r = np.roll(x2, 1, axis=0)
+ assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
+
+ x2r = np.roll(x2, 1, axis=1)
+ assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+ # Roll multiple axes at once.
+ x2r = np.roll(x2, 1, axis=(0, 1))
+ assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
+
+ x2r = np.roll(x2, (1, 0), axis=(0, 1))
+ assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
+
+ x2r = np.roll(x2, (-1, 0), axis=(0, 1))
+ assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
+
+ x2r = np.roll(x2, (0, 1), axis=(0, 1))
+ assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+ x2r = np.roll(x2, (0, -1), axis=(0, 1))
+ assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]))
+
+ x2r = np.roll(x2, (1, 1), axis=(0, 1))
+ assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
+
+ x2r = np.roll(x2, (-1, -1), axis=(0, 1))
+ assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]]))
+
+ # Roll the same axis multiple times.
+ x2r = np.roll(x2, 1, axis=(0, 0))
+ assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]))
+
+ x2r = np.roll(x2, 1, axis=(1, 1))
+ assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]]))
+
+ # Roll more than one turn in either direction.
+ x2r = np.roll(x2, 6, axis=1)
+ assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+ x2r = np.roll(x2, -4, axis=1)
+ assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+ def test_roll_empty(self):
+ x = np.array([])
+ assert_equal(np.roll(x, 1), np.array([]))
+
+
+class TestRollaxis:
+
+ # expected shape indexed by (axis, start) for array of
+ # shape (1, 2, 3, 4)
+ tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4),
+ (0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4),
+ (0, 4): (2, 3, 4, 1),
+ (1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4),
+ (1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4),
+ (1, 4): (1, 3, 4, 2),
+ (2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4),
+ (2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4),
+ (2, 4): (1, 2, 4, 3),
+ (3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3),
+ (3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4),
+ (3, 4): (1, 2, 3, 4)}
+
+ def test_exceptions(self):
+ a = np.arange(1*2*3*4).reshape(1, 2, 3, 4)
+ assert_raises(np.AxisError, np.rollaxis, a, -5, 0)
+ assert_raises(np.AxisError, np.rollaxis, a, 0, -5)
+ assert_raises(np.AxisError, np.rollaxis, a, 4, 0)
+ assert_raises(np.AxisError, np.rollaxis, a, 0, 5)
+
+ def test_results(self):
+ a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
+ aind = np.indices(a.shape)
+ assert_(a.flags['OWNDATA'])
+ for (i, j) in self.tgtshape:
+ # positive axis, positive start
+ res = np.rollaxis(a, axis=i, start=j)
+ i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
+ assert_(np.all(res[i0, i1, i2, i3] == a))
+ assert_(res.shape == self.tgtshape[(i, j)], str((i,j)))
+ assert_(not res.flags['OWNDATA'])
+
+ # negative axis, positive start
+ ip = i + 1
+ res = np.rollaxis(a, axis=-ip, start=j)
+ i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
+ assert_(np.all(res[i0, i1, i2, i3] == a))
+ assert_(res.shape == self.tgtshape[(4 - ip, j)])
+ assert_(not res.flags['OWNDATA'])
+
+ # positive axis, negative start
+ jp = j + 1 if j < 4 else j
+ res = np.rollaxis(a, axis=i, start=-jp)
+ i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
+ assert_(np.all(res[i0, i1, i2, i3] == a))
+ assert_(res.shape == self.tgtshape[(i, 4 - jp)])
+ assert_(not res.flags['OWNDATA'])
+
+ # negative axis, negative start
+ ip = i + 1
+ jp = j + 1 if j < 4 else j
+ res = np.rollaxis(a, axis=-ip, start=-jp)
+ i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
+ assert_(np.all(res[i0, i1, i2, i3] == a))
+ assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)])
+ assert_(not res.flags['OWNDATA'])
+
+
+class TestMoveaxis:
+ def test_move_to_end(self):
+ x = np.random.randn(5, 6, 7)
+ for source, expected in [(0, (6, 7, 5)),
+ (1, (5, 7, 6)),
+ (2, (5, 6, 7)),
+ (-1, (5, 6, 7))]:
+ actual = np.moveaxis(x, source, -1).shape
+ assert_(actual, expected)
+
+ def test_move_new_position(self):
+ x = np.random.randn(1, 2, 3, 4)
+ for source, destination, expected in [
+ (0, 1, (2, 1, 3, 4)),
+ (1, 2, (1, 3, 2, 4)),
+ (1, -1, (1, 3, 4, 2)),
+ ]:
+ actual = np.moveaxis(x, source, destination).shape
+ assert_(actual, expected)
+
+ def test_preserve_order(self):
+ x = np.zeros((1, 2, 3, 4))
+ for source, destination in [
+ (0, 0),
+ (3, -1),
+ (-1, 3),
+ ([0, -1], [0, -1]),
+ ([2, 0], [2, 0]),
+ (range(4), range(4)),
+ ]:
+ actual = np.moveaxis(x, source, destination).shape
+ assert_(actual, (1, 2, 3, 4))
+
+ def test_move_multiples(self):
+ x = np.zeros((0, 1, 2, 3))
+ for source, destination, expected in [
+ ([0, 1], [2, 3], (2, 3, 0, 1)),
+ ([2, 3], [0, 1], (2, 3, 0, 1)),
+ ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)),
+ ([3, 0], [1, 0], (0, 3, 1, 2)),
+ ([0, 3], [0, 1], (0, 3, 1, 2)),
+ ]:
+ actual = np.moveaxis(x, source, destination).shape
+ assert_(actual, expected)
+
+ def test_errors(self):
+ x = np.random.randn(1, 2, 3)
+ assert_raises_regex(np.AxisError, 'source.*out of bounds',
+ np.moveaxis, x, 3, 0)
+ assert_raises_regex(np.AxisError, 'source.*out of bounds',
+ np.moveaxis, x, -4, 0)
+ assert_raises_regex(np.AxisError, 'destination.*out of bounds',
+ np.moveaxis, x, 0, 5)
+ assert_raises_regex(ValueError, 'repeated axis in `source`',
+ np.moveaxis, x, [0, 0], [0, 1])
+ assert_raises_regex(ValueError, 'repeated axis in `destination`',
+ np.moveaxis, x, [0, 1], [1, 1])
+ assert_raises_regex(ValueError, 'must have the same number',
+ np.moveaxis, x, 0, [0, 1])
+ assert_raises_regex(ValueError, 'must have the same number',
+ np.moveaxis, x, [0, 1], [0])
+
+ def test_array_likes(self):
+ x = np.ma.zeros((1, 2, 3))
+ result = np.moveaxis(x, 0, 0)
+ assert_(x.shape, result.shape)
+ assert_(isinstance(result, np.ma.MaskedArray))
+
+ x = [1, 2, 3]
+ result = np.moveaxis(x, 0, 0)
+ assert_(x, list(result))
+ assert_(isinstance(result, np.ndarray))
+
+
+class TestCross:
+ def test_2x2(self):
+ u = [1, 2]
+ v = [3, 4]
+ z = -2
+ cp = np.cross(u, v)
+ assert_equal(cp, z)
+ cp = np.cross(v, u)
+ assert_equal(cp, -z)
+
+ def test_2x3(self):
+ u = [1, 2]
+ v = [3, 4, 5]
+ z = np.array([10, -5, -2])
+ cp = np.cross(u, v)
+ assert_equal(cp, z)
+ cp = np.cross(v, u)
+ assert_equal(cp, -z)
+
+ def test_3x3(self):
+ u = [1, 2, 3]
+ v = [4, 5, 6]
+ z = np.array([-3, 6, -3])
+ cp = np.cross(u, v)
+ assert_equal(cp, z)
+ cp = np.cross(v, u)
+ assert_equal(cp, -z)
+
+ def test_broadcasting(self):
+ # Ticket #2624 (Trac #2032)
+ u = np.tile([1, 2], (11, 1))
+ v = np.tile([3, 4], (11, 1))
+ z = -2
+ assert_equal(np.cross(u, v), z)
+ assert_equal(np.cross(v, u), -z)
+ assert_equal(np.cross(u, u), 0)
+
+ u = np.tile([1, 2], (11, 1)).T
+ v = np.tile([3, 4, 5], (11, 1))
+ z = np.tile([10, -5, -2], (11, 1))
+ assert_equal(np.cross(u, v, axisa=0), z)
+ assert_equal(np.cross(v, u.T), -z)
+ assert_equal(np.cross(v, v), 0)
+
+ u = np.tile([1, 2, 3], (11, 1)).T
+ v = np.tile([3, 4], (11, 1)).T
+ z = np.tile([-12, 9, -2], (11, 1))
+ assert_equal(np.cross(u, v, axisa=0, axisb=0), z)
+ assert_equal(np.cross(v.T, u.T), -z)
+ assert_equal(np.cross(u.T, u.T), 0)
+
+ u = np.tile([1, 2, 3], (5, 1))
+ v = np.tile([4, 5, 6], (5, 1)).T
+ z = np.tile([-3, 6, -3], (5, 1))
+ assert_equal(np.cross(u, v, axisb=0), z)
+ assert_equal(np.cross(v.T, u), -z)
+ assert_equal(np.cross(u, u), 0)
+
+ def test_broadcasting_shapes(self):
+ u = np.ones((2, 1, 3))
+ v = np.ones((5, 3))
+ assert_equal(np.cross(u, v).shape, (2, 5, 3))
+ u = np.ones((10, 3, 5))
+ v = np.ones((2, 5))
+ assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3))
+ assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2)
+ assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0)
+ u = np.ones((10, 3, 5, 7))
+ v = np.ones((5, 7, 2))
+ assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7))
+ assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2)
+ assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4)
+ # gh-5885
+ u = np.ones((3, 4, 2))
+ for axisc in range(-2, 2):
+ assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4))
+
+ def test_uint8_int32_mixed_dtypes(self):
+ # regression test for gh-19138
+ u = np.array([[195, 8, 9]], np.uint8)
+ v = np.array([250, 166, 68], np.int32)
+ z = np.array([[950, 11010, -30370]], dtype=np.int32)
+ assert_equal(np.cross(v, u), z)
+ assert_equal(np.cross(u, v), -z)
+
+
+def test_outer_out_param():
+ arr1 = np.ones((5,))
+ arr2 = np.ones((2,))
+ arr3 = np.linspace(-2, 2, 5)
+ out1 = np.ndarray(shape=(5,5))
+ out2 = np.ndarray(shape=(2, 5))
+ res1 = np.outer(arr1, arr3, out1)
+ assert_equal(res1, out1)
+ assert_equal(np.outer(arr2, arr3, out2), out2)
+
+
+class TestIndices:
+
+ def test_simple(self):
+ [x, y] = np.indices((4, 3))
+ assert_array_equal(x, np.array([[0, 0, 0],
+ [1, 1, 1],
+ [2, 2, 2],
+ [3, 3, 3]]))
+ assert_array_equal(y, np.array([[0, 1, 2],
+ [0, 1, 2],
+ [0, 1, 2],
+ [0, 1, 2]]))
+
+ def test_single_input(self):
+ [x] = np.indices((4,))
+ assert_array_equal(x, np.array([0, 1, 2, 3]))
+
+ [x] = np.indices((4,), sparse=True)
+ assert_array_equal(x, np.array([0, 1, 2, 3]))
+
+ def test_scalar_input(self):
+ assert_array_equal([], np.indices(()))
+ assert_array_equal([], np.indices((), sparse=True))
+ assert_array_equal([[]], np.indices((0,)))
+ assert_array_equal([[]], np.indices((0,), sparse=True))
+
+ def test_sparse(self):
+ [x, y] = np.indices((4,3), sparse=True)
+ assert_array_equal(x, np.array([[0], [1], [2], [3]]))
+ assert_array_equal(y, np.array([[0, 1, 2]]))
+
+ @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
+ @pytest.mark.parametrize("dims", [(), (0,), (4, 3)])
+ def test_return_type(self, dtype, dims):
+ inds = np.indices(dims, dtype=dtype)
+ assert_(inds.dtype == dtype)
+
+ for arr in np.indices(dims, dtype=dtype, sparse=True):
+ assert_(arr.dtype == dtype)
+
+
+class TestRequire:
+ flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',
+ 'F', 'F_CONTIGUOUS', 'FORTRAN',
+ 'A', 'ALIGNED',
+ 'W', 'WRITEABLE',
+ 'O', 'OWNDATA']
+
+ def generate_all_false(self, dtype):
+ arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)])
+ arr.setflags(write=False)
+ a = arr['a']
+ assert_(not a.flags['C'])
+ assert_(not a.flags['F'])
+ assert_(not a.flags['O'])
+ assert_(not a.flags['W'])
+ assert_(not a.flags['A'])
+ return a
+
+ def set_and_check_flag(self, flag, dtype, arr):
+ if dtype is None:
+ dtype = arr.dtype
+ b = np.require(arr, dtype, [flag])
+ assert_(b.flags[flag])
+ assert_(b.dtype == dtype)
+
+ # a further call to np.require ought to return the same array
+ # unless OWNDATA is specified.
+ c = np.require(b, None, [flag])
+ if flag[0] != 'O':
+ assert_(c is b)
+ else:
+ assert_(c.flags[flag])
+
+ def test_require_each(self):
+
+ id = ['f8', 'i4']
+ fd = [None, 'f8', 'c16']
+ for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names):
+ a = self.generate_all_false(idtype)
+ self.set_and_check_flag(flag, fdtype, a)
+
+ def test_unknown_requirement(self):
+ a = self.generate_all_false('f8')
+ assert_raises(KeyError, np.require, a, None, 'Q')
+
+ def test_non_array_input(self):
+ a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O'])
+ assert_(a.flags['O'])
+ assert_(a.flags['C'])
+ assert_(a.flags['A'])
+ assert_(a.dtype == 'i4')
+ assert_equal(a, [1, 2, 3, 4])
+
+ def test_C_and_F_simul(self):
+ a = self.generate_all_false('f8')
+ assert_raises(ValueError, np.require, a, None, ['C', 'F'])
+
+ def test_ensure_array(self):
+ class ArraySubclass(np.ndarray):
+ pass
+
+ a = ArraySubclass((2, 2))
+ b = np.require(a, None, ['E'])
+ assert_(type(b) is np.ndarray)
+
+ def test_preserve_subtype(self):
+ class ArraySubclass(np.ndarray):
+ pass
+
+ for flag in self.flag_names:
+ a = ArraySubclass((2, 2))
+ self.set_and_check_flag(flag, None, a)
+
+
+class TestBroadcast:
+ def test_broadcast_in_args(self):
+ # gh-5881
+ arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
+ np.empty((5, 1, 7))]
+ mits = [np.broadcast(*arrs),
+ np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])),
+ np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])),
+ np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])),
+ np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]
+ for mit in mits:
+ assert_equal(mit.shape, (5, 6, 7))
+ assert_equal(mit.ndim, 3)
+ assert_equal(mit.nd, 3)
+ assert_equal(mit.numiter, 4)
+ for a, ia in zip(arrs, mit.iters):
+ assert_(a is ia.base)
+
+ def test_broadcast_single_arg(self):
+ # gh-6899
+ arrs = [np.empty((5, 6, 7))]
+ mit = np.broadcast(*arrs)
+ assert_equal(mit.shape, (5, 6, 7))
+ assert_equal(mit.ndim, 3)
+ assert_equal(mit.nd, 3)
+ assert_equal(mit.numiter, 1)
+ assert_(arrs[0] is mit.iters[0].base)
+
+ def test_number_of_arguments(self):
+ arr = np.empty((5,))
+ for j in range(35):
+ arrs = [arr] * j
+ if j > 32:
+ assert_raises(ValueError, np.broadcast, *arrs)
+ else:
+ mit = np.broadcast(*arrs)
+ assert_equal(mit.numiter, j)
+
+ def test_broadcast_error_kwargs(self):
+ #gh-13455
+ arrs = [np.empty((5, 6, 7))]
+ mit = np.broadcast(*arrs)
+ mit2 = np.broadcast(*arrs, **{})
+ assert_equal(mit.shape, mit2.shape)
+ assert_equal(mit.ndim, mit2.ndim)
+ assert_equal(mit.nd, mit2.nd)
+ assert_equal(mit.numiter, mit2.numiter)
+ assert_(mit.iters[0].base is mit2.iters[0].base)
+
+ assert_raises(ValueError, np.broadcast, 1, **{'x': 1})
+
+ def test_shape_mismatch_error_message(self):
+ with pytest.raises(ValueError, match=r"arg 0 with shape \(1, 3\) and "
+ r"arg 2 with shape \(2,\)"):
+ np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7])
+
+
+class TestKeepdims:
+
+ class sub_array(np.ndarray):
+ def sum(self, axis=None, dtype=None, out=None):
+ return np.ndarray.sum(self, axis, dtype, out, keepdims=True)
+
+ def test_raise(self):
+ sub_class = self.sub_array
+ x = np.arange(30).view(sub_class)
+ assert_raises(TypeError, np.sum, x, keepdims=True)
+
+
+class TestTensordot:
+
+ def test_zero_dimension(self):
+ # Test resolution to issue #5663
+ a = np.ndarray((3,0))
+ b = np.ndarray((0,4))
+ td = np.tensordot(a, b, (1, 0))
+ assert_array_equal(td, np.dot(a, b))
+ assert_array_equal(td, np.einsum('ij,jk', a, b))
+
+ def test_zero_dimensional(self):
+ # gh-12130
+ arr_0d = np.array(1)
+ ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined
+ assert_array_equal(ret, arr_0d)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_numerictypes.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_numerictypes.py
new file mode 100644
index 00000000..072cd65f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_numerictypes.py
@@ -0,0 +1,564 @@
+import sys
+import itertools
+
+import pytest
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_raises, IS_PYPY
+
+# This is the structure of the table used for plain objects:
+#
+# +-+-+-+
+# |x|y|z|
+# +-+-+-+
+
+# Structure of a plain array description:
+Pdescr = [
+ ('x', 'i4', (2,)),
+ ('y', 'f8', (2, 2)),
+ ('z', 'u1')]
+
+# A plain list of tuples with values for testing:
+PbufferT = [
+ # x y z
+ ([3, 2], [[6., 4.], [6., 4.]], 8),
+ ([4, 3], [[7., 5.], [7., 5.]], 9),
+ ]
+
+
+# This is the structure of the table used for nested objects (DON'T PANIC!):
+#
+# +-+---------------------------------+-----+----------+-+-+
+# |x|Info |color|info |y|z|
+# | +-----+--+----------------+----+--+ +----+-----+ | |
+# | |value|y2|Info2 |name|z2| |Name|Value| | |
+# | | | +----+-----+--+--+ | | | | | | |
+# | | | |name|value|y3|z3| | | | | | | |
+# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
+#
+
+# The corresponding nested array description:
+Ndescr = [
+ ('x', 'i4', (2,)),
+ ('Info', [
+ ('value', 'c16'),
+ ('y2', 'f8'),
+ ('Info2', [
+ ('name', 'S2'),
+ ('value', 'c16', (2,)),
+ ('y3', 'f8', (2,)),
+ ('z3', 'u4', (2,))]),
+ ('name', 'S2'),
+ ('z2', 'b1')]),
+ ('color', 'S2'),
+ ('info', [
+ ('Name', 'U8'),
+ ('Value', 'c16')]),
+ ('y', 'f8', (2, 2)),
+ ('z', 'u1')]
+
+NbufferT = [
+ # x Info color info y z
+ # value y2 Info2 name z2 Name Value
+ # name value y3 z3
+ ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True),
+ b'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8),
+ ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False),
+ b'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9),
+ ]
+
+
+byteorder = {'little':'<', 'big':'>'}[sys.byteorder]
+
+def normalize_descr(descr):
+ "Normalize a description adding the platform byteorder."
+
+ out = []
+ for item in descr:
+ dtype = item[1]
+ if isinstance(dtype, str):
+ if dtype[0] not in ['|', '<', '>']:
+ onebyte = dtype[1:] == "1"
+ if onebyte or dtype[0] in ['S', 'V', 'b']:
+ dtype = "|" + dtype
+ else:
+ dtype = byteorder + dtype
+ if len(item) > 2 and np.prod(item[2]) > 1:
+ nitem = (item[0], dtype, item[2])
+ else:
+ nitem = (item[0], dtype)
+ out.append(nitem)
+ elif isinstance(dtype, list):
+ l = normalize_descr(dtype)
+ out.append((item[0], l))
+ else:
+ raise ValueError("Expected a str or list and got %s" %
+ (type(item)))
+ return out
+
+
+############################################################
+# Creation tests
+############################################################
+
+class CreateZeros:
+ """Check the creation of heterogeneous arrays zero-valued"""
+
+ def test_zeros0D(self):
+ """Check creation of 0-dimensional objects"""
+ h = np.zeros((), dtype=self._descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype.fields['x'][0].name[:4] == 'void')
+ assert_(h.dtype.fields['x'][0].char == 'V')
+ assert_(h.dtype.fields['x'][0].type == np.void)
+ # A small check that data is ok
+ assert_equal(h['z'], np.zeros((), dtype='u1'))
+
+ def test_zerosSD(self):
+ """Check creation of single-dimensional objects"""
+ h = np.zeros((2,), dtype=self._descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype['y'].name[:4] == 'void')
+ assert_(h.dtype['y'].char == 'V')
+ assert_(h.dtype['y'].type == np.void)
+ # A small check that data is ok
+ assert_equal(h['z'], np.zeros((2,), dtype='u1'))
+
+ def test_zerosMD(self):
+ """Check creation of multi-dimensional objects"""
+ h = np.zeros((2, 3), dtype=self._descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype['z'].name == 'uint8')
+ assert_(h.dtype['z'].char == 'B')
+ assert_(h.dtype['z'].type == np.uint8)
+ # A small check that data is ok
+ assert_equal(h['z'], np.zeros((2, 3), dtype='u1'))
+
+
+class TestCreateZerosPlain(CreateZeros):
+ """Check the creation of heterogeneous arrays zero-valued (plain)"""
+ _descr = Pdescr
+
+class TestCreateZerosNested(CreateZeros):
+ """Check the creation of heterogeneous arrays zero-valued (nested)"""
+ _descr = Ndescr
+
+
+class CreateValues:
+ """Check the creation of heterogeneous arrays with values"""
+
+ def test_tuple(self):
+ """Check creation from tuples"""
+ h = np.array(self._buffer, dtype=self._descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ if self.multiple_rows:
+ assert_(h.shape == (2,))
+ else:
+ assert_(h.shape == ())
+
+ def test_list_of_tuple(self):
+ """Check creation from list of tuples"""
+ h = np.array([self._buffer], dtype=self._descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ if self.multiple_rows:
+ assert_(h.shape == (1, 2))
+ else:
+ assert_(h.shape == (1,))
+
+ def test_list_of_list_of_tuple(self):
+ """Check creation from list of list of tuples"""
+ h = np.array([[self._buffer]], dtype=self._descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ if self.multiple_rows:
+ assert_(h.shape == (1, 1, 2))
+ else:
+ assert_(h.shape == (1, 1))
+
+
+class TestCreateValuesPlainSingle(CreateValues):
+ """Check the creation of heterogeneous arrays (plain, single row)"""
+ _descr = Pdescr
+ multiple_rows = 0
+ _buffer = PbufferT[0]
+
+class TestCreateValuesPlainMultiple(CreateValues):
+ """Check the creation of heterogeneous arrays (plain, multiple rows)"""
+ _descr = Pdescr
+ multiple_rows = 1
+ _buffer = PbufferT
+
+class TestCreateValuesNestedSingle(CreateValues):
+ """Check the creation of heterogeneous arrays (nested, single row)"""
+ _descr = Ndescr
+ multiple_rows = 0
+ _buffer = NbufferT[0]
+
+class TestCreateValuesNestedMultiple(CreateValues):
+ """Check the creation of heterogeneous arrays (nested, multiple rows)"""
+ _descr = Ndescr
+ multiple_rows = 1
+ _buffer = NbufferT
+
+
+############################################################
+# Reading tests
+############################################################
+
+class ReadValuesPlain:
+ """Check the reading of values in heterogeneous arrays (plain)"""
+
+ def test_access_fields(self):
+ h = np.array(self._buffer, dtype=self._descr)
+ if not self.multiple_rows:
+ assert_(h.shape == ())
+ assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
+ assert_equal(h['y'], np.array(self._buffer[1], dtype='f8'))
+ assert_equal(h['z'], np.array(self._buffer[2], dtype='u1'))
+ else:
+ assert_(len(h) == 2)
+ assert_equal(h['x'], np.array([self._buffer[0][0],
+ self._buffer[1][0]], dtype='i4'))
+ assert_equal(h['y'], np.array([self._buffer[0][1],
+ self._buffer[1][1]], dtype='f8'))
+ assert_equal(h['z'], np.array([self._buffer[0][2],
+ self._buffer[1][2]], dtype='u1'))
+
+
+class TestReadValuesPlainSingle(ReadValuesPlain):
+ """Check the creation of heterogeneous arrays (plain, single row)"""
+ _descr = Pdescr
+ multiple_rows = 0
+ _buffer = PbufferT[0]
+
+class TestReadValuesPlainMultiple(ReadValuesPlain):
+ """Check the values of heterogeneous arrays (plain, multiple rows)"""
+ _descr = Pdescr
+ multiple_rows = 1
+ _buffer = PbufferT
+
+class ReadValuesNested:
+ """Check the reading of values in heterogeneous arrays (nested)"""
+
+ def test_access_top_fields(self):
+ """Check reading the top fields of a nested array"""
+ h = np.array(self._buffer, dtype=self._descr)
+ if not self.multiple_rows:
+ assert_(h.shape == ())
+ assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
+ assert_equal(h['y'], np.array(self._buffer[4], dtype='f8'))
+ assert_equal(h['z'], np.array(self._buffer[5], dtype='u1'))
+ else:
+ assert_(len(h) == 2)
+ assert_equal(h['x'], np.array([self._buffer[0][0],
+ self._buffer[1][0]], dtype='i4'))
+ assert_equal(h['y'], np.array([self._buffer[0][4],
+ self._buffer[1][4]], dtype='f8'))
+ assert_equal(h['z'], np.array([self._buffer[0][5],
+ self._buffer[1][5]], dtype='u1'))
+
+ def test_nested1_acessors(self):
+ """Check reading the nested fields of a nested array (1st level)"""
+ h = np.array(self._buffer, dtype=self._descr)
+ if not self.multiple_rows:
+ assert_equal(h['Info']['value'],
+ np.array(self._buffer[1][0], dtype='c16'))
+ assert_equal(h['Info']['y2'],
+ np.array(self._buffer[1][1], dtype='f8'))
+ assert_equal(h['info']['Name'],
+ np.array(self._buffer[3][0], dtype='U2'))
+ assert_equal(h['info']['Value'],
+ np.array(self._buffer[3][1], dtype='c16'))
+ else:
+ assert_equal(h['Info']['value'],
+ np.array([self._buffer[0][1][0],
+ self._buffer[1][1][0]],
+ dtype='c16'))
+ assert_equal(h['Info']['y2'],
+ np.array([self._buffer[0][1][1],
+ self._buffer[1][1][1]],
+ dtype='f8'))
+ assert_equal(h['info']['Name'],
+ np.array([self._buffer[0][3][0],
+ self._buffer[1][3][0]],
+ dtype='U2'))
+ assert_equal(h['info']['Value'],
+ np.array([self._buffer[0][3][1],
+ self._buffer[1][3][1]],
+ dtype='c16'))
+
+ def test_nested2_acessors(self):
+ """Check reading the nested fields of a nested array (2nd level)"""
+ h = np.array(self._buffer, dtype=self._descr)
+ if not self.multiple_rows:
+ assert_equal(h['Info']['Info2']['value'],
+ np.array(self._buffer[1][2][1], dtype='c16'))
+ assert_equal(h['Info']['Info2']['z3'],
+ np.array(self._buffer[1][2][3], dtype='u4'))
+ else:
+ assert_equal(h['Info']['Info2']['value'],
+ np.array([self._buffer[0][1][2][1],
+ self._buffer[1][1][2][1]],
+ dtype='c16'))
+ assert_equal(h['Info']['Info2']['z3'],
+ np.array([self._buffer[0][1][2][3],
+ self._buffer[1][1][2][3]],
+ dtype='u4'))
+
+ def test_nested1_descriptor(self):
+ """Check access nested descriptors of a nested array (1st level)"""
+ h = np.array(self._buffer, dtype=self._descr)
+ assert_(h.dtype['Info']['value'].name == 'complex128')
+ assert_(h.dtype['Info']['y2'].name == 'float64')
+ assert_(h.dtype['info']['Name'].name == 'str256')
+ assert_(h.dtype['info']['Value'].name == 'complex128')
+
+ def test_nested2_descriptor(self):
+ """Check access nested descriptors of a nested array (2nd level)"""
+ h = np.array(self._buffer, dtype=self._descr)
+ assert_(h.dtype['Info']['Info2']['value'].name == 'void256')
+ assert_(h.dtype['Info']['Info2']['z3'].name == 'void64')
+
+
+class TestReadValuesNestedSingle(ReadValuesNested):
+ """Check the values of heterogeneous arrays (nested, single row)"""
+ _descr = Ndescr
+ multiple_rows = False
+ _buffer = NbufferT[0]
+
+class TestReadValuesNestedMultiple(ReadValuesNested):
+ """Check the values of heterogeneous arrays (nested, multiple rows)"""
+ _descr = Ndescr
+ multiple_rows = True
+ _buffer = NbufferT
+
+class TestEmptyField:
+ def test_assign(self):
+ a = np.arange(10, dtype=np.float32)
+ a.dtype = [("int", "<0i4"), ("float", "<2f4")]
+ assert_(a['int'].shape == (5, 0))
+ assert_(a['float'].shape == (5, 2))
+
+class TestCommonType:
+ def test_scalar_loses1(self):
+ res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
+ assert_(res == 'f4')
+
+ def test_scalar_loses2(self):
+ res = np.find_common_type(['f4', 'f4'], ['i8'])
+ assert_(res == 'f4')
+
+ def test_scalar_wins(self):
+ res = np.find_common_type(['f4', 'f4', 'i2'], ['c8'])
+ assert_(res == 'c8')
+
+ def test_scalar_wins2(self):
+ res = np.find_common_type(['u4', 'i4', 'i4'], ['f4'])
+ assert_(res == 'f8')
+
+ def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose
+ res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
+ assert_(res == 'f8')
+
+class TestMultipleFields:
+ def setup_method(self):
+ self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
+
+ def _bad_call(self):
+ return self.ary['f0', 'f1']
+
+ def test_no_tuple(self):
+ assert_raises(IndexError, self._bad_call)
+
+ def test_return(self):
+ res = self.ary[['f0', 'f2']].tolist()
+ assert_(res == [(1, 3), (5, 7)])
+
+
+class TestIsSubDType:
+ # scalar types can be promoted into dtypes
+ wrappers = [np.dtype, lambda x: x]
+
+ def test_both_abstract(self):
+ assert_(np.issubdtype(np.floating, np.inexact))
+ assert_(not np.issubdtype(np.inexact, np.floating))
+
+ def test_same(self):
+ for cls in (np.float32, np.int32):
+ for w1, w2 in itertools.product(self.wrappers, repeat=2):
+ assert_(np.issubdtype(w1(cls), w2(cls)))
+
+ def test_subclass(self):
+ # note we cannot promote floating to a dtype, as it would turn into a
+ # concrete type
+ for w in self.wrappers:
+ assert_(np.issubdtype(w(np.float32), np.floating))
+ assert_(np.issubdtype(w(np.float64), np.floating))
+
+ def test_subclass_backwards(self):
+ for w in self.wrappers:
+ assert_(not np.issubdtype(np.floating, w(np.float32)))
+ assert_(not np.issubdtype(np.floating, w(np.float64)))
+
+ def test_sibling_class(self):
+ for w1, w2 in itertools.product(self.wrappers, repeat=2):
+ assert_(not np.issubdtype(w1(np.float32), w2(np.float64)))
+ assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
+
+ def test_nondtype_nonscalartype(self):
+ # See gh-14619 and gh-9505 which introduced the deprecation to fix
+ # this. These tests are directly taken from gh-9505
+ assert not np.issubdtype(np.float32, 'float64')
+ assert not np.issubdtype(np.float32, 'f8')
+ assert not np.issubdtype(np.int32, str)
+ assert not np.issubdtype(np.int32, 'int64')
+ assert not np.issubdtype(np.str_, 'void')
+ # for the following the correct spellings are
+ # np.integer, np.floating, or np.complexfloating respectively:
+ assert not np.issubdtype(np.int8, int) # np.int8 is never np.int_
+ assert not np.issubdtype(np.float32, float)
+ assert not np.issubdtype(np.complex64, complex)
+ assert not np.issubdtype(np.float32, "float")
+ assert not np.issubdtype(np.float64, "f")
+
+ # Test the same for the correct first datatype and abstract one
+ # in the case of int, float, complex:
+ assert np.issubdtype(np.float64, 'float64')
+ assert np.issubdtype(np.float64, 'f8')
+ assert np.issubdtype(np.str_, str)
+ assert np.issubdtype(np.int64, 'int64')
+ assert np.issubdtype(np.void, 'void')
+ assert np.issubdtype(np.int8, np.integer)
+ assert np.issubdtype(np.float32, np.floating)
+ assert np.issubdtype(np.complex64, np.complexfloating)
+ assert np.issubdtype(np.float64, "float")
+ assert np.issubdtype(np.float32, "f")
+
+
+class TestSctypeDict:
+ def test_longdouble(self):
+ assert_(np.sctypeDict['f8'] is not np.longdouble)
+ assert_(np.sctypeDict['c16'] is not np.clongdouble)
+
+ def test_ulong(self):
+ # Test that 'ulong' behaves like 'long'. np.sctypeDict['long'] is an
+ # alias for np.int_, but np.long is not supported for historical
+ # reasons (gh-21063)
+ assert_(np.sctypeDict['ulong'] is np.uint)
+ with pytest.warns(FutureWarning):
+ # We will probably allow this in the future:
+ assert not hasattr(np, 'ulong')
+
+class TestBitName:
+ def test_abstract(self):
+ assert_raises(ValueError, np.core.numerictypes.bitname, np.floating)
+
+
+class TestMaximumSctype:
+
+ # note that parametrizing with sctype['int'] and similar would skip types
+ # with the same size (gh-11923)
+
+ @pytest.mark.parametrize('t', [np.byte, np.short, np.intc, np.int_, np.longlong])
+ def test_int(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['int'][-1])
+
+ @pytest.mark.parametrize('t', [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong])
+ def test_uint(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['uint'][-1])
+
+ @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble])
+ def test_float(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['float'][-1])
+
+ @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble])
+ def test_complex(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['complex'][-1])
+
+ @pytest.mark.parametrize('t', [np.bool_, np.object_, np.unicode_, np.bytes_, np.void])
+ def test_other(self, t):
+ assert_equal(np.maximum_sctype(t), t)
+
+
+class Test_sctype2char:
+ # This function is old enough that we're really just documenting the quirks
+ # at this point.
+
+ def test_scalar_type(self):
+ assert_equal(np.sctype2char(np.double), 'd')
+ assert_equal(np.sctype2char(np.int_), 'l')
+ assert_equal(np.sctype2char(np.unicode_), 'U')
+ assert_equal(np.sctype2char(np.bytes_), 'S')
+
+ def test_other_type(self):
+ assert_equal(np.sctype2char(float), 'd')
+ assert_equal(np.sctype2char(list), 'O')
+ assert_equal(np.sctype2char(np.ndarray), 'O')
+
+ def test_third_party_scalar_type(self):
+ from numpy.core._rational_tests import rational
+ assert_raises(KeyError, np.sctype2char, rational)
+ assert_raises(KeyError, np.sctype2char, rational(1))
+
+ def test_array_instance(self):
+ assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd')
+
+ def test_abstract_type(self):
+ assert_raises(KeyError, np.sctype2char, np.floating)
+
+ def test_non_type(self):
+ assert_raises(ValueError, np.sctype2char, 1)
+
+@pytest.mark.parametrize("rep, expected", [
+ (np.int32, True),
+ (list, False),
+ (1.1, False),
+ (str, True),
+ (np.dtype(np.float64), True),
+ (np.dtype((np.int16, (3, 4))), True),
+ (np.dtype([('a', np.int8)]), True),
+ ])
+def test_issctype(rep, expected):
+ # ensure proper identification of scalar
+ # data-types by issctype()
+ actual = np.issctype(rep)
+ assert_equal(actual, expected)
+
+
+@pytest.mark.skipif(sys.flags.optimize > 1,
+ reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
+@pytest.mark.xfail(IS_PYPY,
+ reason="PyPy cannot modify tp_doc after PyType_Ready")
+class TestDocStrings:
+ def test_platform_dependent_aliases(self):
+ if np.int64 is np.int_:
+ assert_('int64' in np.int_.__doc__)
+ elif np.int64 is np.longlong:
+ assert_('int64' in np.longlong.__doc__)
+
+
+class TestScalarTypeNames:
+ # gh-9799
+
+ numeric_types = [
+ np.byte, np.short, np.intc, np.int_, np.longlong,
+ np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong,
+ np.half, np.single, np.double, np.longdouble,
+ np.csingle, np.cdouble, np.clongdouble,
+ ]
+
+ def test_names_are_unique(self):
+ # none of the above may be aliases for each other
+ assert len(set(self.numeric_types)) == len(self.numeric_types)
+
+ # names must be unique
+ names = [t.__name__ for t in self.numeric_types]
+ assert len(set(names)) == len(names)
+
+ @pytest.mark.parametrize('t', numeric_types)
+ def test_names_reflect_attributes(self, t):
+ """ Test that names correspond to where the type is under ``np.`` """
+ assert getattr(np, t.__name__) is t
+
+ @pytest.mark.parametrize('t', numeric_types)
+ def test_names_are_undersood_by_dtype(self, t):
+ """ Test the dtype constructor maps names back to the type """
+ assert np.dtype(t.__name__).type is t
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_overrides.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_overrides.py
new file mode 100644
index 00000000..63432ffa
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_overrides.py
@@ -0,0 +1,642 @@
+import inspect
+import sys
+import os
+import tempfile
+from io import StringIO
+from unittest import mock
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_raises_regex)
+from numpy.core.overrides import (
+ _get_implementing_args, array_function_dispatch,
+ verify_matching_signatures, ARRAY_FUNCTION_ENABLED)
+from numpy.compat import pickle
+import pytest
+
+
+requires_array_function = pytest.mark.skipif(
+ not ARRAY_FUNCTION_ENABLED,
+ reason="__array_function__ dispatch not enabled.")
+
+
+def _return_not_implemented(self, *args, **kwargs):
+ return NotImplemented
+
+
+# need to define this at the top level to test pickling
+@array_function_dispatch(lambda array: (array,))
+def dispatched_one_arg(array):
+ """Docstring."""
+ return 'original'
+
+
+@array_function_dispatch(lambda array1, array2: (array1, array2))
+def dispatched_two_arg(array1, array2):
+ """Docstring."""
+ return 'original'
+
+
+class TestGetImplementingArgs:
+
+ def test_ndarray(self):
+ array = np.array(1)
+
+ args = _get_implementing_args([array])
+ assert_equal(list(args), [array])
+
+ args = _get_implementing_args([array, array])
+ assert_equal(list(args), [array])
+
+ args = _get_implementing_args([array, 1])
+ assert_equal(list(args), [array])
+
+ args = _get_implementing_args([1, array])
+ assert_equal(list(args), [array])
+
+ def test_ndarray_subclasses(self):
+
+ class OverrideSub(np.ndarray):
+ __array_function__ = _return_not_implemented
+
+ class NoOverrideSub(np.ndarray):
+ pass
+
+ array = np.array(1).view(np.ndarray)
+ override_sub = np.array(1).view(OverrideSub)
+ no_override_sub = np.array(1).view(NoOverrideSub)
+
+ args = _get_implementing_args([array, override_sub])
+ assert_equal(list(args), [override_sub, array])
+
+ args = _get_implementing_args([array, no_override_sub])
+ assert_equal(list(args), [no_override_sub, array])
+
+ args = _get_implementing_args(
+ [override_sub, no_override_sub])
+ assert_equal(list(args), [override_sub, no_override_sub])
+
+ def test_ndarray_and_duck_array(self):
+
+ class Other:
+ __array_function__ = _return_not_implemented
+
+ array = np.array(1)
+ other = Other()
+
+ args = _get_implementing_args([other, array])
+ assert_equal(list(args), [other, array])
+
+ args = _get_implementing_args([array, other])
+ assert_equal(list(args), [array, other])
+
+ def test_ndarray_subclass_and_duck_array(self):
+
+ class OverrideSub(np.ndarray):
+ __array_function__ = _return_not_implemented
+
+ class Other:
+ __array_function__ = _return_not_implemented
+
+ array = np.array(1)
+ subarray = np.array(1).view(OverrideSub)
+ other = Other()
+
+ assert_equal(_get_implementing_args([array, subarray, other]),
+ [subarray, array, other])
+ assert_equal(_get_implementing_args([array, other, subarray]),
+ [subarray, array, other])
+
+ def test_many_duck_arrays(self):
+
+ class A:
+ __array_function__ = _return_not_implemented
+
+ class B(A):
+ __array_function__ = _return_not_implemented
+
+ class C(A):
+ __array_function__ = _return_not_implemented
+
+ class D:
+ __array_function__ = _return_not_implemented
+
+ a = A()
+ b = B()
+ c = C()
+ d = D()
+
+ assert_equal(_get_implementing_args([1]), [])
+ assert_equal(_get_implementing_args([a]), [a])
+ assert_equal(_get_implementing_args([a, 1]), [a])
+ assert_equal(_get_implementing_args([a, a, a]), [a])
+ assert_equal(_get_implementing_args([a, d, a]), [a, d])
+ assert_equal(_get_implementing_args([a, b]), [b, a])
+ assert_equal(_get_implementing_args([b, a]), [b, a])
+ assert_equal(_get_implementing_args([a, b, c]), [b, c, a])
+ assert_equal(_get_implementing_args([a, c, b]), [c, b, a])
+
+ def test_too_many_duck_arrays(self):
+ namespace = dict(__array_function__=_return_not_implemented)
+ types = [type('A' + str(i), (object,), namespace) for i in range(33)]
+ relevant_args = [t() for t in types]
+
+ actual = _get_implementing_args(relevant_args[:32])
+ assert_equal(actual, relevant_args[:32])
+
+ with assert_raises_regex(TypeError, 'distinct argument types'):
+ _get_implementing_args(relevant_args)
+
+
+class TestNDArrayArrayFunction:
+
+ @requires_array_function
+ def test_method(self):
+
+ class Other:
+ __array_function__ = _return_not_implemented
+
+ class NoOverrideSub(np.ndarray):
+ pass
+
+ class OverrideSub(np.ndarray):
+ __array_function__ = _return_not_implemented
+
+ array = np.array([1])
+ other = Other()
+ no_override_sub = array.view(NoOverrideSub)
+ override_sub = array.view(OverrideSub)
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray,),
+ args=(array, 1.), kwargs={})
+ assert_equal(result, 'original')
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, Other),
+ args=(array, other), kwargs={})
+ assert_(result is NotImplemented)
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, NoOverrideSub),
+ args=(array, no_override_sub),
+ kwargs={})
+ assert_equal(result, 'original')
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, OverrideSub),
+ args=(array, override_sub),
+ kwargs={})
+ assert_equal(result, 'original')
+
+ with assert_raises_regex(TypeError, 'no implementation found'):
+ np.concatenate((array, other))
+
+ expected = np.concatenate((array, array))
+ result = np.concatenate((array, no_override_sub))
+ assert_equal(result, expected.view(NoOverrideSub))
+ result = np.concatenate((array, override_sub))
+ assert_equal(result, expected.view(OverrideSub))
+
+ def test_no_wrapper(self):
+ # This shouldn't happen unless a user intentionally calls
+ # __array_function__ with invalid arguments, but check that we raise
+ # an appropriate error all the same.
+ array = np.array(1)
+ func = lambda x: x
+ with assert_raises_regex(AttributeError, '_implementation'):
+ array.__array_function__(func=func, types=(np.ndarray,),
+ args=(array,), kwargs={})
+
+
+@requires_array_function
+class TestArrayFunctionDispatch:
+
+ def test_pickle(self):
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ roundtripped = pickle.loads(
+ pickle.dumps(dispatched_one_arg, protocol=proto))
+ assert_(roundtripped is dispatched_one_arg)
+
+ def test_name_and_docstring(self):
+ assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg')
+ if sys.flags.optimize < 2:
+ assert_equal(dispatched_one_arg.__doc__, 'Docstring.')
+
+ def test_interface(self):
+
+ class MyArray:
+ def __array_function__(self, func, types, args, kwargs):
+ return (self, func, types, args, kwargs)
+
+ original = MyArray()
+ (obj, func, types, args, kwargs) = dispatched_one_arg(original)
+ assert_(obj is original)
+ assert_(func is dispatched_one_arg)
+ assert_equal(set(types), {MyArray})
+ # assert_equal uses the overloaded np.iscomplexobj() internally
+ assert_(args == (original,))
+ assert_equal(kwargs, {})
+
+ def test_not_implemented(self):
+
+ class MyArray:
+ def __array_function__(self, func, types, args, kwargs):
+ return NotImplemented
+
+ array = MyArray()
+ with assert_raises_regex(TypeError, 'no implementation found'):
+ dispatched_one_arg(array)
+
+
+@requires_array_function
+class TestVerifyMatchingSignatures:
+
+ def test_verify_matching_signatures(self):
+
+ verify_matching_signatures(lambda x: 0, lambda x: 0)
+ verify_matching_signatures(lambda x=None: 0, lambda x=None: 0)
+ verify_matching_signatures(lambda x=1: 0, lambda x=None: 0)
+
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda a: 0, lambda b: 0)
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda x: 0, lambda x=None: 0)
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda x=None: 0, lambda y=None: 0)
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda x=1: 0, lambda y=1: 0)
+
+ def test_array_function_dispatch(self):
+
+ with assert_raises(RuntimeError):
+ @array_function_dispatch(lambda x: (x,))
+ def f(y):
+ pass
+
+ # should not raise
+ @array_function_dispatch(lambda x: (x,), verify=False)
+ def f(y):
+ pass
+
+
+def _new_duck_type_and_implements():
+ """Create a duck array type and implements functions."""
+ HANDLED_FUNCTIONS = {}
+
+ class MyArray:
+ def __array_function__(self, func, types, args, kwargs):
+ if func not in HANDLED_FUNCTIONS:
+ return NotImplemented
+ if not all(issubclass(t, MyArray) for t in types):
+ return NotImplemented
+ return HANDLED_FUNCTIONS[func](*args, **kwargs)
+
+ def implements(numpy_function):
+ """Register an __array_function__ implementations."""
+ def decorator(func):
+ HANDLED_FUNCTIONS[numpy_function] = func
+ return func
+ return decorator
+
+ return (MyArray, implements)
+
+
+@requires_array_function
+class TestArrayFunctionImplementation:
+
+ def test_one_arg(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @implements(dispatched_one_arg)
+ def _(array):
+ return 'myarray'
+
+ assert_equal(dispatched_one_arg(1), 'original')
+ assert_equal(dispatched_one_arg(MyArray()), 'myarray')
+
+ def test_optional_args(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @array_function_dispatch(lambda array, option=None: (array,))
+ def func_with_option(array, option='default'):
+ return option
+
+ @implements(func_with_option)
+ def my_array_func_with_option(array, new_option='myarray'):
+ return new_option
+
+ # we don't need to implement every option on __array_function__
+ # implementations
+ assert_equal(func_with_option(1), 'default')
+ assert_equal(func_with_option(1, option='extra'), 'extra')
+ assert_equal(func_with_option(MyArray()), 'myarray')
+ with assert_raises(TypeError):
+ func_with_option(MyArray(), option='extra')
+
+ # but new options on implementations can't be used
+ result = my_array_func_with_option(MyArray(), new_option='yes')
+ assert_equal(result, 'yes')
+ with assert_raises(TypeError):
+ func_with_option(MyArray(), new_option='no')
+
+ def test_not_implemented(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @array_function_dispatch(lambda array: (array,), module='my')
+ def func(array):
+ return array
+
+ array = np.array(1)
+ assert_(func(array) is array)
+ assert_equal(func.__module__, 'my')
+
+ with assert_raises_regex(
+ TypeError, "no implementation found for 'my.func'"):
+ func(MyArray())
+
+ def test_signature_error_message(self):
+ # The lambda function will be named "<lambda>", but the TypeError
+ # should show the name as "func"
+ def _dispatcher():
+ return ()
+
+ @array_function_dispatch(_dispatcher)
+ def func():
+ pass
+
+ try:
+ func(bad_arg=3)
+ except TypeError as e:
+ expected_exception = e
+
+ try:
+ func(bad_arg=3)
+ raise AssertionError("must fail")
+ except TypeError as exc:
+ assert exc.args == expected_exception.args
+
+ @pytest.mark.parametrize("value", [234, "this func is not replaced"])
+ def test_dispatcher_error(self, value):
+ # If the dispatcher raises an error, we must not attempt to mutate it
+ error = TypeError(value)
+
+ def dispatcher():
+ raise error
+
+ @array_function_dispatch(dispatcher)
+ def func():
+ return 3
+
+ try:
+ func()
+ raise AssertionError("must fail")
+ except TypeError as exc:
+ assert exc is error # unmodified exception
+
+
+class TestNDArrayMethods:
+
+ def test_repr(self):
+ # gh-12162: should still be defined even if __array_function__ doesn't
+ # implement np.array_repr()
+
+ class MyArray(np.ndarray):
+ def __array_function__(*args, **kwargs):
+ return NotImplemented
+
+ array = np.array(1).view(MyArray)
+ assert_equal(repr(array), 'MyArray(1)')
+ assert_equal(str(array), '1')
+
+
+class TestNumPyFunctions:
+
+ def test_set_module(self):
+ assert_equal(np.sum.__module__, 'numpy')
+ assert_equal(np.char.equal.__module__, 'numpy.char')
+ assert_equal(np.fft.fft.__module__, 'numpy.fft')
+ assert_equal(np.linalg.solve.__module__, 'numpy.linalg')
+
+ def test_inspect_sum(self):
+ signature = inspect.signature(np.sum)
+ assert_('axis' in signature.parameters)
+
+ @requires_array_function
+ def test_override_sum(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @implements(np.sum)
+ def _(array):
+ return 'yes'
+
+ assert_equal(np.sum(MyArray()), 'yes')
+
+ @requires_array_function
+ def test_sum_on_mock_array(self):
+
+ # We need a proxy for mocks because __array_function__ is only looked
+ # up in the class dict
+ class ArrayProxy:
+ def __init__(self, value):
+ self.value = value
+ def __array_function__(self, *args, **kwargs):
+ return self.value.__array_function__(*args, **kwargs)
+ def __array__(self, *args, **kwargs):
+ return self.value.__array__(*args, **kwargs)
+
+ proxy = ArrayProxy(mock.Mock(spec=ArrayProxy))
+ proxy.value.__array_function__.return_value = 1
+ result = np.sum(proxy)
+ assert_equal(result, 1)
+ proxy.value.__array_function__.assert_called_once_with(
+ np.sum, (ArrayProxy,), (proxy,), {})
+ proxy.value.__array__.assert_not_called()
+
+ @requires_array_function
+ def test_sum_forwarding_implementation(self):
+
+ class MyArray(np.ndarray):
+
+ def sum(self, axis, out):
+ return 'summed'
+
+ def __array_function__(self, func, types, args, kwargs):
+ return super().__array_function__(func, types, args, kwargs)
+
+ # note: the internal implementation of np.sum() calls the .sum() method
+ array = np.array(1).view(MyArray)
+ assert_equal(np.sum(array), 'summed')
+
+
+class TestArrayLike:
+ def setup_method(self):
+ class MyArray():
+ def __init__(self, function=None):
+ self.function = function
+
+ def __array_function__(self, func, types, args, kwargs):
+ assert func is getattr(np, func.__name__)
+ try:
+ my_func = getattr(self, func.__name__)
+ except AttributeError:
+ return NotImplemented
+ return my_func(*args, **kwargs)
+
+ self.MyArray = MyArray
+
+ class MyNoArrayFunctionArray():
+ def __init__(self, function=None):
+ self.function = function
+
+ self.MyNoArrayFunctionArray = MyNoArrayFunctionArray
+
+ def add_method(self, name, arr_class, enable_value_error=False):
+ def _definition(*args, **kwargs):
+ # Check that `like=` isn't propagated downstream
+ assert 'like' not in kwargs
+
+ if enable_value_error and 'value_error' in kwargs:
+ raise ValueError
+
+ return arr_class(getattr(arr_class, name))
+ setattr(arr_class, name, _definition)
+
+ def func_args(*args, **kwargs):
+ return args, kwargs
+
+ @requires_array_function
+ def test_array_like_not_implemented(self):
+ self.add_method('array', self.MyArray)
+
+ ref = self.MyArray.array()
+
+ with assert_raises_regex(TypeError, 'no implementation found'):
+ array_like = np.asarray(1, like=ref)
+
+ _array_tests = [
+ ('array', *func_args((1,))),
+ ('asarray', *func_args((1,))),
+ ('asanyarray', *func_args((1,))),
+ ('ascontiguousarray', *func_args((2, 3))),
+ ('asfortranarray', *func_args((2, 3))),
+ ('require', *func_args((np.arange(6).reshape(2, 3),),
+ requirements=['A', 'F'])),
+ ('empty', *func_args((1,))),
+ ('full', *func_args((1,), 2)),
+ ('ones', *func_args((1,))),
+ ('zeros', *func_args((1,))),
+ ('arange', *func_args(3)),
+ ('frombuffer', *func_args(b'\x00' * 8, dtype=int)),
+ ('fromiter', *func_args(range(3), dtype=int)),
+ ('fromstring', *func_args('1,2', dtype=int, sep=',')),
+ ('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))),
+ ('genfromtxt', *func_args(lambda: StringIO('1,2.1'),
+ dtype=[('int', 'i8'), ('float', 'f8')],
+ delimiter=',')),
+ ]
+
+ @pytest.mark.parametrize('function, args, kwargs', _array_tests)
+ @pytest.mark.parametrize('numpy_ref', [True, False])
+ @requires_array_function
+ def test_array_like(self, function, args, kwargs, numpy_ref):
+ self.add_method('array', self.MyArray)
+ self.add_method(function, self.MyArray)
+ np_func = getattr(np, function)
+ my_func = getattr(self.MyArray, function)
+
+ if numpy_ref is True:
+ ref = np.array(1)
+ else:
+ ref = self.MyArray.array()
+
+ like_args = tuple(a() if callable(a) else a for a in args)
+ array_like = np_func(*like_args, **kwargs, like=ref)
+
+ if numpy_ref is True:
+ assert type(array_like) is np.ndarray
+
+ np_args = tuple(a() if callable(a) else a for a in args)
+ np_arr = np_func(*np_args, **kwargs)
+
+ # Special-case np.empty to ensure values match
+ if function == "empty":
+ np_arr.fill(1)
+ array_like.fill(1)
+
+ assert_equal(array_like, np_arr)
+ else:
+ assert type(array_like) is self.MyArray
+ assert array_like.function is my_func
+
+ @pytest.mark.parametrize('function, args, kwargs', _array_tests)
+ @pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"])
+ @requires_array_function
+ def test_no_array_function_like(self, function, args, kwargs, ref):
+ self.add_method('array', self.MyNoArrayFunctionArray)
+ self.add_method(function, self.MyNoArrayFunctionArray)
+ np_func = getattr(np, function)
+
+ # Instantiate ref if it's the MyNoArrayFunctionArray class
+ if ref == "MyNoArrayFunctionArray":
+ ref = self.MyNoArrayFunctionArray.array()
+
+ like_args = tuple(a() if callable(a) else a for a in args)
+
+ with assert_raises_regex(TypeError,
+ 'The `like` argument must be an array-like that implements'):
+ np_func(*like_args, **kwargs, like=ref)
+
+ @pytest.mark.parametrize('numpy_ref', [True, False])
+ def test_array_like_fromfile(self, numpy_ref):
+ self.add_method('array', self.MyArray)
+ self.add_method("fromfile", self.MyArray)
+
+ if numpy_ref is True:
+ ref = np.array(1)
+ else:
+ ref = self.MyArray.array()
+
+ data = np.random.random(5)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ fname = os.path.join(tmpdir, "testfile")
+ data.tofile(fname)
+
+ array_like = np.fromfile(fname, like=ref)
+ if numpy_ref is True:
+ assert type(array_like) is np.ndarray
+ np_res = np.fromfile(fname, like=ref)
+ assert_equal(np_res, data)
+ assert_equal(array_like, np_res)
+ else:
+ assert type(array_like) is self.MyArray
+ assert array_like.function is self.MyArray.fromfile
+
+ @requires_array_function
+ def test_exception_handling(self):
+ self.add_method('array', self.MyArray, enable_value_error=True)
+
+ ref = self.MyArray.array()
+
+ with assert_raises(TypeError):
+ # Raises the error about `value_error` being invalid first
+ np.array(1, value_error=True, like=ref)
+
+ @pytest.mark.parametrize('function, args, kwargs', _array_tests)
+ def test_like_as_none(self, function, args, kwargs):
+ self.add_method('array', self.MyArray)
+ self.add_method(function, self.MyArray)
+ np_func = getattr(np, function)
+
+ like_args = tuple(a() if callable(a) else a for a in args)
+ # required for loadtxt and genfromtxt to init w/o error.
+ like_args_exp = tuple(a() if callable(a) else a for a in args)
+
+ array_like = np_func(*like_args, **kwargs, like=None)
+ expected = np_func(*like_args_exp, **kwargs)
+ # Special-case np.empty to ensure values match
+ if function == "empty":
+ array_like.fill(1)
+ expected.fill(1)
+ assert_equal(array_like, expected)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_print.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_print.py
new file mode 100644
index 00000000..89a8b48b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_print.py
@@ -0,0 +1,200 @@
+import sys
+
+import pytest
+
+import numpy as np
+from numpy.testing import assert_, assert_equal
+from numpy.core.tests._locales import CommaDecimalPointLocale
+
+
+from io import StringIO
+
+_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}
+
+
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_float_types(tp):
+ """ Check formatting.
+
+ This is only for the str function, and only for simple types.
+ The precision of np.float32 and np.longdouble aren't the same as the
+ python float precision.
+
+ """
+ for x in [0, 1, -1, 1e20]:
+ assert_equal(str(tp(x)), str(float(x)),
+ err_msg='Failed str formatting for type %s' % tp)
+
+ if tp(1e16).itemsize > 4:
+ assert_equal(str(tp(1e16)), str(float('1e16')),
+ err_msg='Failed str formatting for type %s' % tp)
+ else:
+ ref = '1e+16'
+ assert_equal(str(tp(1e16)), ref,
+ err_msg='Failed str formatting for type %s' % tp)
+
+
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_nan_inf_float(tp):
+ """ Check formatting of nan & inf.
+
+ This is only for the str function, and only for simple types.
+ The precision of np.float32 and np.longdouble aren't the same as the
+ python float precision.
+
+ """
+ for x in [np.inf, -np.inf, np.nan]:
+ assert_equal(str(tp(x)), _REF[x],
+ err_msg='Failed str formatting for type %s' % tp)
+
+
+@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_types(tp):
+ """Check formatting of complex types.
+
+ This is only for the str function, and only for simple types.
+ The precision of np.float32 and np.longdouble aren't the same as the
+ python float precision.
+
+ """
+ for x in [0, 1, -1, 1e20]:
+ assert_equal(str(tp(x)), str(complex(x)),
+ err_msg='Failed str formatting for type %s' % tp)
+ assert_equal(str(tp(x*1j)), str(complex(x*1j)),
+ err_msg='Failed str formatting for type %s' % tp)
+ assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)),
+ err_msg='Failed str formatting for type %s' % tp)
+
+ if tp(1e16).itemsize > 8:
+ assert_equal(str(tp(1e16)), str(complex(1e16)),
+ err_msg='Failed str formatting for type %s' % tp)
+ else:
+ ref = '(1e+16+0j)'
+ assert_equal(str(tp(1e16)), ref,
+ err_msg='Failed str formatting for type %s' % tp)
+
+
+@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_inf_nan(dtype):
+ """Check inf/nan formatting of complex types."""
+ TESTS = {
+ complex(np.inf, 0): "(inf+0j)",
+ complex(0, np.inf): "infj",
+ complex(-np.inf, 0): "(-inf+0j)",
+ complex(0, -np.inf): "-infj",
+ complex(np.inf, 1): "(inf+1j)",
+ complex(1, np.inf): "(1+infj)",
+ complex(-np.inf, 1): "(-inf+1j)",
+ complex(1, -np.inf): "(1-infj)",
+ complex(np.nan, 0): "(nan+0j)",
+ complex(0, np.nan): "nanj",
+ complex(-np.nan, 0): "(nan+0j)",
+ complex(0, -np.nan): "nanj",
+ complex(np.nan, 1): "(nan+1j)",
+ complex(1, np.nan): "(1+nanj)",
+ complex(-np.nan, 1): "(nan+1j)",
+ complex(1, -np.nan): "(1+nanj)",
+ }
+ for c, s in TESTS.items():
+ assert_equal(str(dtype(c)), s)
+
+
+# print tests
+def _test_redirected_print(x, tp, ref=None):
+ file = StringIO()
+ file_tp = StringIO()
+ stdout = sys.stdout
+ try:
+ sys.stdout = file_tp
+ print(tp(x))
+ sys.stdout = file
+ if ref:
+ print(ref)
+ else:
+ print(x)
+ finally:
+ sys.stdout = stdout
+
+ assert_equal(file.getvalue(), file_tp.getvalue(),
+ err_msg='print failed for type%s' % tp)
+
+
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_float_type_print(tp):
+ """Check formatting when using print """
+ for x in [0, 1, -1, 1e20]:
+ _test_redirected_print(float(x), tp)
+
+ for x in [np.inf, -np.inf, np.nan]:
+ _test_redirected_print(float(x), tp, _REF[x])
+
+ if tp(1e16).itemsize > 4:
+ _test_redirected_print(float(1e16), tp)
+ else:
+ ref = '1e+16'
+ _test_redirected_print(float(1e16), tp, ref)
+
+
+@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_type_print(tp):
+ """Check formatting when using print """
+ # We do not create complex with inf/nan directly because the feature is
+ # missing in python < 2.6
+ for x in [0, 1, -1, 1e20]:
+ _test_redirected_print(complex(x), tp)
+
+ if tp(1e16).itemsize > 8:
+ _test_redirected_print(complex(1e16), tp)
+ else:
+ ref = '(1e+16+0j)'
+ _test_redirected_print(complex(1e16), tp, ref)
+
+ _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)')
+ _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)')
+ _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)')
+
+
+def test_scalar_format():
+ """Test the str.format method with NumPy scalar types"""
+ tests = [('{0}', True, np.bool_),
+ ('{0}', False, np.bool_),
+ ('{0:d}', 130, np.uint8),
+ ('{0:d}', 50000, np.uint16),
+ ('{0:d}', 3000000000, np.uint32),
+ ('{0:d}', 15000000000000000000, np.uint64),
+ ('{0:d}', -120, np.int8),
+ ('{0:d}', -30000, np.int16),
+ ('{0:d}', -2000000000, np.int32),
+ ('{0:d}', -7000000000000000000, np.int64),
+ ('{0:g}', 1.5, np.float16),
+ ('{0:g}', 1.5, np.float32),
+ ('{0:g}', 1.5, np.float64),
+ ('{0:g}', 1.5, np.longdouble),
+ ('{0:g}', 1.5+0.5j, np.complex64),
+ ('{0:g}', 1.5+0.5j, np.complex128),
+ ('{0:g}', 1.5+0.5j, np.clongdouble)]
+
+ for (fmat, val, valtype) in tests:
+ try:
+ assert_equal(fmat.format(val), fmat.format(valtype(val)),
+ "failed with val %s, type %s" % (val, valtype))
+ except ValueError as e:
+ assert_(False,
+ "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" %
+ (fmat, repr(val), repr(valtype), str(e)))
+
+
+#
+# Locale tests: scalar types formatting should be independent of the locale
+#
+
+class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
+
+ def test_locale_single(self):
+ assert_equal(str(np.float32(1.2)), str(float(1.2)))
+
+ def test_locale_double(self):
+ assert_equal(str(np.double(1.2)), str(float(1.2)))
+
+ def test_locale_longdouble(self):
+ assert_equal(str(np.longdouble('1.2')), str(float(1.2)))
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_protocols.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_protocols.py
new file mode 100644
index 00000000..55a2bcf7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_protocols.py
@@ -0,0 +1,44 @@
+import pytest
+import warnings
+import numpy as np
+
+
+@pytest.mark.filterwarnings("error")
+def test_getattr_warning():
+ # issue gh-14735: make sure we clear only getattr errors, and let warnings
+ # through
+ class Wrapper:
+ def __init__(self, array):
+ self.array = array
+
+ def __len__(self):
+ return len(self.array)
+
+ def __getitem__(self, item):
+ return type(self)(self.array[item])
+
+ def __getattr__(self, name):
+ if name.startswith("__array_"):
+ warnings.warn("object got converted", UserWarning, stacklevel=1)
+
+ return getattr(self.array, name)
+
+ def __repr__(self):
+ return "<Wrapper({self.array})>".format(self=self)
+
+ array = Wrapper(np.arange(10))
+ with pytest.raises(UserWarning, match="object got converted"):
+ np.asarray(array)
+
+
+def test_array_called():
+ class Wrapper:
+ val = '0' * 100
+ def __array__(self, result=None):
+ return np.array([self.val], dtype=object)
+
+
+ wrapped = Wrapper()
+ arr = np.array(wrapped, dtype=str)
+ assert arr.dtype == 'U100'
+ assert arr[0] == Wrapper.val
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_records.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_records.py
new file mode 100644
index 00000000..a76ae2d9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_records.py
@@ -0,0 +1,520 @@
+import collections.abc
+import textwrap
+from io import BytesIO
+from os import path
+from pathlib import Path
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
+ assert_raises, temppath,
+ )
+from numpy.compat import pickle
+
+
+class TestFromrecords:
+ def test_fromrecords(self):
+ r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
+ names='col1,col2,col3')
+ assert_equal(r[0].item(), (456, 'dbe', 1.2))
+ assert_equal(r['col1'].dtype.kind, 'i')
+ assert_equal(r['col2'].dtype.kind, 'U')
+ assert_equal(r['col2'].dtype.itemsize, 12)
+ assert_equal(r['col3'].dtype.kind, 'f')
+
+ def test_fromrecords_0len(self):
+ """ Verify fromrecords works with a 0-length input """
+ dtype = [('a', float), ('b', float)]
+ r = np.rec.fromrecords([], dtype=dtype)
+ assert_equal(r.shape, (0,))
+
+ def test_fromrecords_2d(self):
+ data = [
+ [(1, 2), (3, 4), (5, 6)],
+ [(6, 5), (4, 3), (2, 1)]
+ ]
+ expected_a = [[1, 3, 5], [6, 4, 2]]
+ expected_b = [[2, 4, 6], [5, 3, 1]]
+
+ # try with dtype
+ r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)])
+ assert_equal(r1['a'], expected_a)
+ assert_equal(r1['b'], expected_b)
+
+ # try with names
+ r2 = np.rec.fromrecords(data, names=['a', 'b'])
+ assert_equal(r2['a'], expected_a)
+ assert_equal(r2['b'], expected_b)
+
+ assert_equal(r1, r2)
+
+ def test_method_array(self):
+ r = np.rec.array(b'abcdefg' * 100, formats='i2,a3,i4', shape=3, byteorder='big')
+ assert_equal(r[1].item(), (25444, b'efg', 1633837924))
+
+ def test_method_array2(self):
+ r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
+ (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
+ assert_equal(r[1].item(), (2, 22.0, b'b'))
+
+ def test_recarray_slices(self):
+ r = np.rec.array([(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), (5, 55, 'ex'),
+ (6, 66, 'f'), (7, 77, 'g')], formats='u1,f4,a1')
+ assert_equal(r[1::2][1].item(), (4, 44.0, b'd'))
+
+ def test_recarray_fromarrays(self):
+ x1 = np.array([1, 2, 3, 4])
+ x2 = np.array(['a', 'dd', 'xyz', '12'])
+ x3 = np.array([1.1, 2, 3, 4])
+ r = np.rec.fromarrays([x1, x2, x3], names='a,b,c')
+ assert_equal(r[1].item(), (2, 'dd', 2.0))
+ x1[1] = 34
+ assert_equal(r.a, np.array([1, 2, 3, 4]))
+
+ def test_recarray_fromfile(self):
+ data_dir = path.join(path.dirname(__file__), 'data')
+ filename = path.join(data_dir, 'recarray_from_file.fits')
+ fd = open(filename, 'rb')
+ fd.seek(2880 * 2)
+ r1 = np.rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big')
+ fd.seek(2880 * 2)
+ r2 = np.rec.array(fd, formats='f8,i4,a5', shape=3, byteorder='big')
+ fd.seek(2880 * 2)
+ bytes_array = BytesIO()
+ bytes_array.write(fd.read())
+ bytes_array.seek(0)
+ r3 = np.rec.fromfile(bytes_array, formats='f8,i4,a5', shape=3, byteorder='big')
+ fd.close()
+ assert_equal(r1, r2)
+ assert_equal(r2, r3)
+
+ def test_recarray_from_obj(self):
+ count = 10
+ a = np.zeros(count, dtype='O')
+ b = np.zeros(count, dtype='f8')
+ c = np.zeros(count, dtype='f8')
+ for i in range(len(a)):
+ a[i] = list(range(1, 10))
+
+ mine = np.rec.fromarrays([a, b, c], names='date,data1,data2')
+ for i in range(len(a)):
+ assert_((mine.date[i] == list(range(1, 10))))
+ assert_((mine.data1[i] == 0.0))
+ assert_((mine.data2[i] == 0.0))
+
+ def test_recarray_repr(self):
+ a = np.array([(1, 0.1), (2, 0.2)],
+ dtype=[('foo', '<i4'), ('bar', '<f8')])
+ a = np.rec.array(a)
+ assert_equal(
+ repr(a),
+ textwrap.dedent("""\
+ rec.array([(1, 0.1), (2, 0.2)],
+ dtype=[('foo', '<i4'), ('bar', '<f8')])""")
+ )
+
+ # make sure non-structured dtypes also show up as rec.array
+ a = np.array(np.ones(4, dtype='f8'))
+ assert_(repr(np.rec.array(a)).startswith('rec.array'))
+
+ # check that the 'np.record' part of the dtype isn't shown
+ a = np.rec.array(np.ones(3, dtype='i4,i4'))
+ assert_equal(repr(a).find('numpy.record'), -1)
+ a = np.rec.array(np.ones(3, dtype='i4'))
+ assert_(repr(a).find('dtype=int32') != -1)
+
+ def test_0d_recarray_repr(self):
+ arr_0d = np.rec.array((1, 2.0, '2003'), dtype='<i4,<f8,<M8[Y]')
+ assert_equal(repr(arr_0d), textwrap.dedent("""\
+ rec.array((1, 2., '2003'),
+ dtype=[('f0', '<i4'), ('f1', '<f8'), ('f2', '<M8[Y]')])"""))
+
+ record = arr_0d[()]
+ assert_equal(repr(record), "(1, 2., '2003')")
+ # 1.13 converted to python scalars before the repr
+ try:
+ np.set_printoptions(legacy='1.13')
+ assert_equal(repr(record), '(1, 2.0, datetime.date(2003, 1, 1))')
+ finally:
+ np.set_printoptions(legacy=False)
+
+ def test_recarray_from_repr(self):
+ a = np.array([(1,'ABC'), (2, "DEF")],
+ dtype=[('foo', int), ('bar', 'S4')])
+ recordarr = np.rec.array(a)
+ recarr = a.view(np.recarray)
+ recordview = a.view(np.dtype((np.record, a.dtype)))
+
+ recordarr_r = eval("numpy." + repr(recordarr), {'numpy': np})
+ recarr_r = eval("numpy." + repr(recarr), {'numpy': np})
+ recordview_r = eval("numpy." + repr(recordview), {'numpy': np})
+
+ assert_equal(type(recordarr_r), np.recarray)
+ assert_equal(recordarr_r.dtype.type, np.record)
+ assert_equal(recordarr, recordarr_r)
+
+ assert_equal(type(recarr_r), np.recarray)
+ assert_equal(recarr_r.dtype.type, np.record)
+ assert_equal(recarr, recarr_r)
+
+ assert_equal(type(recordview_r), np.ndarray)
+ assert_equal(recordview.dtype.type, np.record)
+ assert_equal(recordview, recordview_r)
+
+ def test_recarray_views(self):
+ a = np.array([(1,'ABC'), (2, "DEF")],
+ dtype=[('foo', int), ('bar', 'S4')])
+ b = np.array([1,2,3,4,5], dtype=np.int64)
+
+ #check that np.rec.array gives right dtypes
+ assert_equal(np.rec.array(a).dtype.type, np.record)
+ assert_equal(type(np.rec.array(a)), np.recarray)
+ assert_equal(np.rec.array(b).dtype.type, np.int64)
+ assert_equal(type(np.rec.array(b)), np.recarray)
+
+ #check that viewing as recarray does the same
+ assert_equal(a.view(np.recarray).dtype.type, np.record)
+ assert_equal(type(a.view(np.recarray)), np.recarray)
+ assert_equal(b.view(np.recarray).dtype.type, np.int64)
+ assert_equal(type(b.view(np.recarray)), np.recarray)
+
+ #check that view to non-structured dtype preserves type=np.recarray
+ r = np.rec.array(np.ones(4, dtype="f4,i4"))
+ rv = r.view('f8').view('f4,i4')
+ assert_equal(type(rv), np.recarray)
+ assert_equal(rv.dtype.type, np.record)
+
+ #check that getitem also preserves np.recarray and np.record
+ r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'),
+ ('c', 'i4,i4')]))
+ assert_equal(r['c'].dtype.type, np.record)
+ assert_equal(type(r['c']), np.recarray)
+
+ #and that it preserves subclasses (gh-6949)
+ class C(np.recarray):
+ pass
+
+ c = r.view(C)
+ assert_equal(type(c['c']), C)
+
+ # check that accessing nested structures keep record type, but
+ # not for subarrays, non-void structures, non-structured voids
+ test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4',2)),
+ ('d', ('i8', 'i4,i4'))]
+ r = np.rec.array([((1,1), b'11111111', [1,1], 1),
+ ((1,1), b'11111111', [1,1], 1)], dtype=test_dtype)
+ assert_equal(r.a.dtype.type, np.record)
+ assert_equal(r.b.dtype.type, np.void)
+ assert_equal(r.c.dtype.type, np.float32)
+ assert_equal(r.d.dtype.type, np.int64)
+ # check the same, but for views
+ r = np.rec.array(np.ones(4, dtype='i4,i4'))
+ assert_equal(r.view('f4,f4').dtype.type, np.record)
+ assert_equal(r.view(('i4',2)).dtype.type, np.int32)
+ assert_equal(r.view('V8').dtype.type, np.void)
+ assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64)
+
+ #check that we can undo the view
+ arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')]
+ for arr in arrs:
+ rec = np.rec.array(arr)
+ # recommended way to view as an ndarray:
+ arr2 = rec.view(rec.dtype.fields or rec.dtype, np.ndarray)
+ assert_equal(arr2.dtype.type, arr.dtype.type)
+ assert_equal(type(arr2), type(arr))
+
+ def test_recarray_from_names(self):
+ ra = np.rec.array([
+ (1, 'abc', 3.7000002861022949, 0),
+ (2, 'xy', 6.6999998092651367, 1),
+ (0, ' ', 0.40000000596046448, 0)],
+ names='c1, c2, c3, c4')
+ pa = np.rec.fromrecords([
+ (1, 'abc', 3.7000002861022949, 0),
+ (2, 'xy', 6.6999998092651367, 1),
+ (0, ' ', 0.40000000596046448, 0)],
+ names='c1, c2, c3, c4')
+ assert_(ra.dtype == pa.dtype)
+ assert_(ra.shape == pa.shape)
+ for k in range(len(ra)):
+ assert_(ra[k].item() == pa[k].item())
+
+ def test_recarray_conflict_fields(self):
+ ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2),
+ (3, 'wrs', 1.3)],
+ names='field, shape, mean')
+ ra.mean = [1.1, 2.2, 3.3]
+ assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3])
+ assert_(type(ra.mean) is type(ra.var))
+ ra.shape = (1, 3)
+ assert_(ra.shape == (1, 3))
+ ra.shape = ['A', 'B', 'C']
+ assert_array_equal(ra['shape'], [['A', 'B', 'C']])
+ ra.field = 5
+ assert_array_equal(ra['field'], [[5, 5, 5]])
+ assert_(isinstance(ra.field, collections.abc.Callable))
+
+ def test_fromrecords_with_explicit_dtype(self):
+ a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')],
+ dtype=[('a', int), ('b', object)])
+ assert_equal(a.a, [1, 2])
+ assert_equal(a[0].a, 1)
+ assert_equal(a.b, ['a', 'bbb'])
+ assert_equal(a[-1].b, 'bbb')
+ #
+ ndtype = np.dtype([('a', int), ('b', object)])
+ a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype)
+ assert_equal(a.a, [1, 2])
+ assert_equal(a[0].a, 1)
+ assert_equal(a.b, ['a', 'bbb'])
+ assert_equal(a[-1].b, 'bbb')
+
+ def test_recarray_stringtypes(self):
+ # Issue #3993
+ a = np.array([('abc ', 1), ('abc', 2)],
+ dtype=[('foo', 'S4'), ('bar', int)])
+ a = a.view(np.recarray)
+ assert_equal(a.foo[0] == a.foo[1], False)
+
+ def test_recarray_returntypes(self):
+ qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)}
+ a = np.rec.array([('abc ', (1,1), 1, ('abcde', 'fgehi')),
+ ('abc', (2,3), 1, ('abcde', 'jklmn'))],
+ dtype=[('foo', 'S4'),
+ ('bar', [('A', int), ('B', int)]),
+ ('baz', int), ('qux', qux_fields)])
+ assert_equal(type(a.foo), np.ndarray)
+ assert_equal(type(a['foo']), np.ndarray)
+ assert_equal(type(a.bar), np.recarray)
+ assert_equal(type(a['bar']), np.recarray)
+ assert_equal(a.bar.dtype.type, np.record)
+ assert_equal(type(a['qux']), np.recarray)
+ assert_equal(a.qux.dtype.type, np.record)
+ assert_equal(dict(a.qux.dtype.fields), qux_fields)
+ assert_equal(type(a.baz), np.ndarray)
+ assert_equal(type(a['baz']), np.ndarray)
+ assert_equal(type(a[0].bar), np.record)
+ assert_equal(type(a[0]['bar']), np.record)
+ assert_equal(a[0].bar.A, 1)
+ assert_equal(a[0].bar['A'], 1)
+ assert_equal(a[0]['bar'].A, 1)
+ assert_equal(a[0]['bar']['A'], 1)
+ assert_equal(a[0].qux.D, b'fgehi')
+ assert_equal(a[0].qux['D'], b'fgehi')
+ assert_equal(a[0]['qux'].D, b'fgehi')
+ assert_equal(a[0]['qux']['D'], b'fgehi')
+
+ def test_zero_width_strings(self):
+ # Test for #6430, based on the test case from #1901
+
+ cols = [['test'] * 3, [''] * 3]
+ rec = np.rec.fromarrays(cols)
+ assert_equal(rec['f0'], ['test', 'test', 'test'])
+ assert_equal(rec['f1'], ['', '', ''])
+
+ dt = np.dtype([('f0', '|S4'), ('f1', '|S')])
+ rec = np.rec.fromarrays(cols, dtype=dt)
+ assert_equal(rec.itemsize, 4)
+ assert_equal(rec['f0'], [b'test', b'test', b'test'])
+ assert_equal(rec['f1'], [b'', b'', b''])
+
+
+class TestPathUsage:
+ # Test that pathlib.Path can be used
+ def test_tofile_fromfile(self):
+ with temppath(suffix='.bin') as path:
+ path = Path(path)
+ np.random.seed(123)
+ a = np.random.rand(10).astype('f8,i4,a5')
+ a[5] = (0.5,10,'abcde')
+ with path.open("wb") as fd:
+ a.tofile(fd)
+ x = np.core.records.fromfile(path,
+ formats='f8,i4,a5',
+ shape=10)
+ assert_array_equal(x, a)
+
+
+class TestRecord:
+ def setup_method(self):
+ self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
+ dtype=[("col1", "<i4"),
+ ("col2", "<i4"),
+ ("col3", "<i4")])
+
+ def test_assignment1(self):
+ a = self.data
+ assert_equal(a.col1[0], 1)
+ a[0].col1 = 0
+ assert_equal(a.col1[0], 0)
+
+ def test_assignment2(self):
+ a = self.data
+ assert_equal(a.col1[0], 1)
+ a.col1[0] = 0
+ assert_equal(a.col1[0], 0)
+
+ def test_invalid_assignment(self):
+ a = self.data
+
+ def assign_invalid_column(x):
+ x[0].col5 = 1
+
+ assert_raises(AttributeError, assign_invalid_column, a)
+
+ def test_nonwriteable_setfield(self):
+ # gh-8171
+ r = np.rec.array([(0,), (1,)], dtype=[('f', 'i4')])
+ r.flags.writeable = False
+ with assert_raises(ValueError):
+ r.f = [2, 3]
+ with assert_raises(ValueError):
+ r.setfield([2,3], *r.dtype.fields['f'])
+
+ def test_out_of_order_fields(self):
+ # names in the same order, padding added to descr
+ x = self.data[['col1', 'col2']]
+ assert_equal(x.dtype.names, ('col1', 'col2'))
+ assert_equal(x.dtype.descr,
+ [('col1', '<i4'), ('col2', '<i4'), ('', '|V4')])
+
+ # names change order to match indexing, as of 1.14 - descr can't
+ # represent that
+ y = self.data[['col2', 'col1']]
+ assert_equal(y.dtype.names, ('col2', 'col1'))
+ assert_raises(ValueError, lambda: y.dtype.descr)
+
+ def test_pickle_1(self):
+ # Issue #1529
+ a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto)))
+ assert_equal(a[0], pickle.loads(pickle.dumps(a[0],
+ protocol=proto)))
+
+ def test_pickle_2(self):
+ a = self.data
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto)))
+ assert_equal(a[0], pickle.loads(pickle.dumps(a[0],
+ protocol=proto)))
+
+ def test_pickle_3(self):
+ # Issue #7140
+ a = self.data
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ pa = pickle.loads(pickle.dumps(a[0], protocol=proto))
+ assert_(pa.flags.c_contiguous)
+ assert_(pa.flags.f_contiguous)
+ assert_(pa.flags.writeable)
+ assert_(pa.flags.aligned)
+
+ def test_pickle_void(self):
+ # issue gh-13593
+ dt = np.dtype([('obj', 'O'), ('int', 'i')])
+ a = np.empty(1, dtype=dt)
+ data = (bytearray(b'eman'),)
+ a['obj'] = data
+ a['int'] = 42
+ ctor, args = a[0].__reduce__()
+ # check the constructor is what we expect before interpreting the arguments
+ assert ctor is np.core.multiarray.scalar
+ dtype, obj = args
+ # make sure we did not pickle the address
+ assert not isinstance(obj, bytes)
+
+ assert_raises(RuntimeError, ctor, dtype, 13)
+
+ # Test roundtrip:
+ dump = pickle.dumps(a[0])
+ unpickled = pickle.loads(dump)
+ assert a[0] == unpickled
+
+ # Also check the similar (impossible) "object scalar" path:
+ with pytest.warns(DeprecationWarning):
+ assert ctor(np.dtype("O"), data) is data
+
+ def test_objview_record(self):
+ # https://github.com/numpy/numpy/issues/2599
+ dt = np.dtype([('foo', 'i8'), ('bar', 'O')])
+ r = np.zeros((1,3), dtype=dt).view(np.recarray)
+ r.foo = np.array([1, 2, 3]) # TypeError?
+
+ # https://github.com/numpy/numpy/issues/3256
+ ra = np.recarray((2,), dtype=[('x', object), ('y', float), ('z', int)])
+ ra[['x','y']] # TypeError?
+
+ def test_record_scalar_setitem(self):
+ # https://github.com/numpy/numpy/issues/3561
+ rec = np.recarray(1, dtype=[('x', float, 5)])
+ rec[0].x = 1
+ assert_equal(rec[0].x, np.ones(5))
+
+ def test_missing_field(self):
+ # https://github.com/numpy/numpy/issues/4806
+ arr = np.zeros((3,), dtype=[('x', int), ('y', int)])
+ assert_raises(KeyError, lambda: arr[['nofield']])
+
+ def test_fromarrays_nested_structured_arrays(self):
+ arrays = [
+ np.arange(10),
+ np.ones(10, dtype=[('a', '<u2'), ('b', '<f4')]),
+ ]
+ arr = np.rec.fromarrays(arrays) # ValueError?
+
+ @pytest.mark.parametrize('nfields', [0, 1, 2])
+ def test_assign_dtype_attribute(self, nfields):
+ dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields])
+ data = np.zeros(3, dt).view(np.recarray)
+
+ # the original and resulting dtypes differ on whether they are records
+ assert data.dtype.type == np.record
+ assert dt.type != np.record
+
+ # ensure that the dtype remains a record even when assigned
+ data.dtype = dt
+ assert data.dtype.type == np.record
+
+ @pytest.mark.parametrize('nfields', [0, 1, 2])
+ def test_nested_fields_are_records(self, nfields):
+ """ Test that nested structured types are treated as records too """
+ dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields])
+ dt_outer = np.dtype([('inner', dt)])
+
+ data = np.zeros(3, dt_outer).view(np.recarray)
+ assert isinstance(data, np.recarray)
+ assert isinstance(data['inner'], np.recarray)
+
+ data0 = data[0]
+ assert isinstance(data0, np.record)
+ assert isinstance(data0['inner'], np.record)
+
+ def test_nested_dtype_padding(self):
+ """ test that trailing padding is preserved """
+ # construct a dtype with padding at the end
+ dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)])
+ dt_padded_end = dt[['a', 'b']]
+ assert dt_padded_end.itemsize == dt.itemsize
+
+ dt_outer = np.dtype([('inner', dt_padded_end)])
+
+ data = np.zeros(3, dt_outer).view(np.recarray)
+ assert_equal(data['inner'].dtype, dt_padded_end)
+
+ data0 = data[0]
+ assert_equal(data0['inner'].dtype, dt_padded_end)
+
+
+def test_find_duplicate():
+ l1 = [1, 2, 3, 4, 5, 6]
+ assert_(np.rec.find_duplicate(l1) == [])
+
+ l2 = [1, 2, 1, 4, 5, 6]
+ assert_(np.rec.find_duplicate(l2) == [1])
+
+ l3 = [1, 2, 1, 4, 1, 6, 2, 3]
+ assert_(np.rec.find_duplicate(l3) == [1, 2])
+
+ l3 = [2, 2, 1, 4, 1, 6, 2, 3]
+ assert_(np.rec.find_duplicate(l3) == [2, 1])
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_regression.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_regression.py
new file mode 100644
index 00000000..160e4a3a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_regression.py
@@ -0,0 +1,2558 @@
+import copy
+import sys
+import gc
+import tempfile
+import pytest
+from os import path
+from io import BytesIO
+from itertools import chain
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, IS_PYPY, assert_almost_equal,
+ assert_array_equal, assert_array_almost_equal, assert_raises,
+ assert_raises_regex, assert_warns, suppress_warnings,
+ _assert_valid_refcount, HAS_REFCOUNT, IS_PYSTON, IS_WASM
+ )
+from numpy.testing._private.utils import _no_tracing, requires_memory
+from numpy.compat import asbytes, asunicode, pickle
+
+
+class TestRegression:
+ def test_invalid_round(self):
+ # Ticket #3
+ v = 4.7599999999999998
+ assert_array_equal(np.array([v]), np.array(v))
+
+ def test_mem_empty(self):
+ # Ticket #7
+ np.empty((1,), dtype=[('x', np.int64)])
+
+ def test_pickle_transposed(self):
+ # Ticket #16
+ a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ with BytesIO() as f:
+ pickle.dump(a, f, protocol=proto)
+ f.seek(0)
+ b = pickle.load(f)
+ assert_array_equal(a, b)
+
+ def test_dtype_names(self):
+ # Ticket #35
+ # Should succeed
+ np.dtype([(('name', 'label'), np.int32, 3)])
+
+ def test_reduce(self):
+ # Ticket #40
+ assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
+
+ def test_zeros_order(self):
+ # Ticket #43
+ np.zeros([3], int, 'C')
+ np.zeros([3], order='C')
+ np.zeros([3], int, order='C')
+
+ def test_asarray_with_order(self):
+ # Check that nothing is done when order='F' and array C/F-contiguous
+ a = np.ones(2)
+ assert_(a is np.asarray(a, order='F'))
+
+ def test_ravel_with_order(self):
+ # Check that ravel works when order='F' and array C/F-contiguous
+ a = np.ones(2)
+ assert_(not a.ravel('F').flags.owndata)
+
+ def test_sort_bigendian(self):
+ # Ticket #47
+ a = np.linspace(0, 10, 11)
+ c = a.astype(np.dtype('<f8'))
+ c.sort()
+ assert_array_almost_equal(c, a)
+
+ def test_negative_nd_indexing(self):
+ # Ticket #49
+ c = np.arange(125).reshape((5, 5, 5))
+ origidx = np.array([-1, 0, 1])
+ idx = np.array(origidx)
+ c[idx]
+ assert_array_equal(idx, origidx)
+
+ def test_char_dump(self):
+ # Ticket #50
+ ca = np.char.array(np.arange(1000, 1010), itemsize=4)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ with BytesIO() as f:
+ pickle.dump(ca, f, protocol=proto)
+ f.seek(0)
+ ca = np.load(f, allow_pickle=True)
+
+ def test_noncontiguous_fill(self):
+ # Ticket #58.
+ a = np.zeros((5, 3))
+ b = a[:, :2,]
+
+ def rs():
+ b.shape = (10,)
+
+ assert_raises(AttributeError, rs)
+
+ def test_bool(self):
+ # Ticket #60
+ np.bool_(1) # Should succeed
+
+ def test_indexing1(self):
+ # Ticket #64
+ descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
+ buffer = ((([6j, 4j],),),)
+ h = np.array(buffer, dtype=descr)
+ h['x']['y']['z']
+
+ def test_indexing2(self):
+ # Ticket #65
+ descr = [('x', 'i4', (2,))]
+ buffer = ([3, 2],)
+ h = np.array(buffer, dtype=descr)
+ h['x']
+
+ def test_round(self):
+ # Ticket #67
+ x = np.array([1+2j])
+ assert_almost_equal(x**(-1), [1/(1+2j)])
+
+ def test_scalar_compare(self):
+ # Trac Ticket #72
+ # https://github.com/numpy/numpy/issues/565
+ a = np.array(['test', 'auto'])
+ assert_array_equal(a == 'auto', np.array([False, True]))
+ assert_(a[1] == 'auto')
+ assert_(a[0] != 'auto')
+ b = np.linspace(0, 10, 11)
+ # This should return true for now, but will eventually raise an error:
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning)
+ assert_(b != 'auto')
+ assert_(b[0] != 'auto')
+
+ def test_unicode_swapping(self):
+ # Ticket #79
+ ulen = 1
+ ucs_value = '\U0010FFFF'
+ ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
+ ua.newbyteorder() # Should succeed.
+
+ def test_object_array_fill(self):
+ # Ticket #86
+ x = np.zeros(1, 'O')
+ x.fill([])
+
+ def test_mem_dtype_align(self):
+ # Ticket #93
+ assert_raises(TypeError, np.dtype,
+ {'names':['a'], 'formats':['foo']}, align=1)
+
+ def test_endian_bool_indexing(self):
+ # Ticket #105
+ a = np.arange(10., dtype='>f8')
+ b = np.arange(10., dtype='<f8')
+ xa = np.where((a > 2) & (a < 6))
+ xb = np.where((b > 2) & (b < 6))
+ ya = ((a > 2) & (a < 6))
+ yb = ((b > 2) & (b < 6))
+ assert_array_almost_equal(xa, ya.nonzero())
+ assert_array_almost_equal(xb, yb.nonzero())
+ assert_(np.all(a[ya] > 0.5))
+ assert_(np.all(b[yb] > 0.5))
+
+ def test_endian_where(self):
+ # GitHub issue #369
+ net = np.zeros(3, dtype='>f4')
+ net[1] = 0.00458849
+ net[2] = 0.605202
+ max_net = net.max()
+ test = np.where(net <= 0., max_net, net)
+ correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
+ assert_array_almost_equal(test, correct)
+
+ def test_endian_recarray(self):
+ # Ticket #2185
+ dt = np.dtype([
+ ('head', '>u4'),
+ ('data', '>u4', 2),
+ ])
+ buf = np.recarray(1, dtype=dt)
+ buf[0]['head'] = 1
+ buf[0]['data'][:] = [1, 1]
+
+ h = buf[0]['head']
+ d = buf[0]['data'][0]
+ buf[0]['head'] = h
+ buf[0]['data'][0] = d
+ assert_(buf[0]['head'] == 1)
+
+ def test_mem_dot(self):
+ # Ticket #106
+ x = np.random.randn(0, 1)
+ y = np.random.randn(10, 1)
+ # Dummy array to detect bad memory access:
+ _z = np.ones(10)
+ _dummy = np.empty((0, 10))
+ z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
+ np.dot(x, np.transpose(y), out=z)
+ assert_equal(_z, np.ones(10))
+ # Do the same for the built-in dot:
+ np.core.multiarray.dot(x, np.transpose(y), out=z)
+ assert_equal(_z, np.ones(10))
+
+ def test_arange_endian(self):
+ # Ticket #111
+ ref = np.arange(10)
+ x = np.arange(10, dtype='<f8')
+ assert_array_equal(ref, x)
+ x = np.arange(10, dtype='>f8')
+ assert_array_equal(ref, x)
+
+ def test_arange_inf_step(self):
+ ref = np.arange(0, 1, 10)
+ x = np.arange(0, 1, np.inf)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, 1, -10)
+ x = np.arange(0, 1, -np.inf)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, -1, -10)
+ x = np.arange(0, -1, -np.inf)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, -1, 10)
+ x = np.arange(0, -1, np.inf)
+ assert_array_equal(ref, x)
+
+ def test_arange_underflow_stop_and_step(self):
+ finfo = np.finfo(np.float64)
+
+ ref = np.arange(0, finfo.eps, 2 * finfo.eps)
+ x = np.arange(0, finfo.eps, finfo.max)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, finfo.eps, -2 * finfo.eps)
+ x = np.arange(0, finfo.eps, -finfo.max)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, -finfo.eps, -2 * finfo.eps)
+ x = np.arange(0, -finfo.eps, -finfo.max)
+ assert_array_equal(ref, x)
+
+ ref = np.arange(0, -finfo.eps, 2 * finfo.eps)
+ x = np.arange(0, -finfo.eps, finfo.max)
+ assert_array_equal(ref, x)
+
+ def test_argmax(self):
+ # Ticket #119
+ a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
+ for i in range(a.ndim):
+ a.argmax(i) # Should succeed
+
+ def test_mem_divmod(self):
+ # Ticket #126
+ for i in range(10):
+ divmod(np.array([i])[0], 10)
+
+ def test_hstack_invalid_dims(self):
+ # Ticket #128
+ x = np.arange(9).reshape((3, 3))
+ y = np.array([0, 0, 0])
+ assert_raises(ValueError, np.hstack, (x, y))
+
+ def test_squeeze_type(self):
+ # Ticket #133
+ a = np.array([3])
+ b = np.array(3)
+ assert_(type(a.squeeze()) is np.ndarray)
+ assert_(type(b.squeeze()) is np.ndarray)
+
+ def test_add_identity(self):
+ # Ticket #143
+ assert_equal(0, np.add.identity)
+
+ def test_numpy_float_python_long_addition(self):
+ # Check that numpy float and python longs can be added correctly.
+ a = np.float_(23.) + 2**135
+ assert_equal(a, 23. + 2**135)
+
+ def test_binary_repr_0(self):
+ # Ticket #151
+ assert_equal('0', np.binary_repr(0))
+
+ def test_rec_iterate(self):
+ # Ticket #160
+ descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
+ x = np.rec.array([(1, 1.1, '1.0'),
+ (2, 2.2, '2.0')], dtype=descr)
+ x[0].tolist()
+ [i for i in x[0]]
+
+ def test_unicode_string_comparison(self):
+ # Ticket #190
+ a = np.array('hello', np.unicode_)
+ b = np.array('world')
+ a == b
+
+ def test_tobytes_FORTRANORDER_discontiguous(self):
+ # Fix in r2836
+ # Create non-contiguous Fortran ordered array
+ x = np.array(np.random.rand(3, 3), order='F')[:, :2]
+ assert_array_almost_equal(x.ravel(), np.frombuffer(x.tobytes()))
+
+ def test_flat_assignment(self):
+ # Correct behaviour of ticket #194
+ x = np.empty((3, 1))
+ x.flat = np.arange(3)
+ assert_array_almost_equal(x, [[0], [1], [2]])
+ x.flat = np.arange(3, dtype=float)
+ assert_array_almost_equal(x, [[0], [1], [2]])
+
+ def test_broadcast_flat_assignment(self):
+ # Ticket #194
+ x = np.empty((3, 1))
+
+ def bfa():
+ x[:] = np.arange(3)
+
+ def bfb():
+ x[:] = np.arange(3, dtype=float)
+
+ assert_raises(ValueError, bfa)
+ assert_raises(ValueError, bfb)
+
+ @pytest.mark.xfail(IS_WASM, reason="not sure why")
+ @pytest.mark.parametrize("index",
+ [np.ones(10, dtype=bool), np.arange(10)],
+ ids=["boolean-arr-index", "integer-arr-index"])
+ def test_nonarray_assignment(self, index):
+ # See also Issue gh-2870, test for non-array assignment
+ # and equivalent unsafe casted array assignment
+ a = np.arange(10)
+
+ with pytest.raises(ValueError):
+ a[index] = np.nan
+
+ with np.errstate(invalid="warn"):
+ with pytest.warns(RuntimeWarning, match="invalid value"):
+ a[index] = np.array(np.nan) # Only warns
+
+ def test_unpickle_dtype_with_object(self):
+ # Implemented in r2840
+ dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ with BytesIO() as f:
+ pickle.dump(dt, f, protocol=proto)
+ f.seek(0)
+ dt_ = pickle.load(f)
+ assert_equal(dt, dt_)
+
+ def test_mem_array_creation_invalid_specification(self):
+ # Ticket #196
+ dt = np.dtype([('x', int), ('y', np.object_)])
+ # Wrong way
+ assert_raises(ValueError, np.array, [1, 'object'], dt)
+ # Correct way
+ np.array([(1, 'object')], dt)
+
+ def test_recarray_single_element(self):
+ # Ticket #202
+ a = np.array([1, 2, 3], dtype=np.int32)
+ b = a.copy()
+ r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])
+ assert_array_equal(a, b)
+ assert_equal(a, r[0][0])
+
+ def test_zero_sized_array_indexing(self):
+ # Ticket #205
+ tmp = np.array([])
+
+ def index_tmp():
+ tmp[np.array(10)]
+
+ assert_raises(IndexError, index_tmp)
+
+ def test_chararray_rstrip(self):
+ # Ticket #222
+ x = np.chararray((1,), 5)
+ x[0] = b'a '
+ x = x.rstrip()
+ assert_equal(x[0], b'a')
+
+ def test_object_array_shape(self):
+ # Ticket #239
+ assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
+ assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
+ assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))
+ assert_equal(np.array([], dtype=object).shape, (0,))
+ assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
+ assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
+
+ def test_mem_around(self):
+ # Ticket #243
+ x = np.zeros((1,))
+ y = [0]
+ decimal = 6
+ np.around(abs(x-y), decimal) <= 10.0**(-decimal)
+
+ def test_character_array_strip(self):
+ # Ticket #246
+ x = np.char.array(("x", "x ", "x "))
+ for c in x:
+ assert_equal(c, "x")
+
+ def test_lexsort(self):
+ # Lexsort memory error
+ v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
+ assert_equal(np.lexsort(v), 0)
+
+ def test_lexsort_invalid_sequence(self):
+ # Issue gh-4123
+ class BuggySequence:
+ def __len__(self):
+ return 4
+
+ def __getitem__(self, key):
+ raise KeyError
+
+ assert_raises(KeyError, np.lexsort, BuggySequence())
+
+ def test_lexsort_zerolen_custom_strides(self):
+ # Ticket #14228
+ xs = np.array([], dtype='i8')
+ assert np.lexsort((xs,)).shape[0] == 0 # Works
+
+ xs.strides = (16,)
+ assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError
+
+ def test_lexsort_zerolen_custom_strides_2d(self):
+ xs = np.array([], dtype='i8')
+
+ xs.shape = (0, 2)
+ xs.strides = (16, 16)
+ assert np.lexsort((xs,), axis=0).shape[0] == 0
+
+ xs.shape = (2, 0)
+ xs.strides = (16, 16)
+ assert np.lexsort((xs,), axis=0).shape[0] == 2
+
+ def test_lexsort_invalid_axis(self):
+ assert_raises(np.AxisError, np.lexsort, (np.arange(1),), axis=2)
+ assert_raises(np.AxisError, np.lexsort, (np.array([]),), axis=1)
+ assert_raises(np.AxisError, np.lexsort, (np.array(1),), axis=10)
+
+ def test_lexsort_zerolen_element(self):
+ dt = np.dtype([]) # a void dtype with no fields
+ xs = np.empty(4, dt)
+
+ assert np.lexsort((xs,)).shape[0] == xs.shape[0]
+
+ def test_pickle_py2_bytes_encoding(self):
+ # Check that arrays and scalars pickled on Py2 are
+ # unpickleable on Py3 using encoding='bytes'
+
+ test_data = [
+ # (original, py2_pickle)
+ (np.unicode_('\u6f2c'),
+ b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
+ b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n"
+ b"I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n."),
+
+ (np.array([9e123], dtype=np.float64),
+ b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n"
+ b"p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n"
+ b"p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n"
+ b"I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb."),
+
+ (np.array([(9e123,)], dtype=[('name', float)]),
+ b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n"
+ b"(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n"
+ b"(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n"
+ b"(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n"
+ b"I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n"
+ b"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb."),
+ ]
+
+ for original, data in test_data:
+ result = pickle.loads(data, encoding='bytes')
+ assert_equal(result, original)
+
+ if isinstance(result, np.ndarray) and result.dtype.names is not None:
+ for name in result.dtype.names:
+ assert_(isinstance(name, str))
+
+ def test_pickle_dtype(self):
+ # Ticket #251
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ pickle.dumps(float, protocol=proto)
+
+ def test_swap_real(self):
+ # Ticket #265
+ assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
+ assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
+ assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
+ assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
+
+ def test_object_array_from_list(self):
+ # Ticket #270 (gh-868)
+ assert_(np.array([1, None, 'A']).shape == (3,))
+
+ def test_multiple_assign(self):
+ # Ticket #273
+ a = np.zeros((3, 1), int)
+ a[[1, 2]] = 1
+
+ def test_empty_array_type(self):
+ assert_equal(np.array([]).dtype, np.zeros(0).dtype)
+
+ def test_void_copyswap(self):
+ dt = np.dtype([('one', '<i4'), ('two', '<i4')])
+ x = np.array((1, 2), dtype=dt)
+ x = x.byteswap()
+ assert_(x['one'] > 1 and x['two'] > 2)
+
+ def test_method_args(self):
+ # Make sure methods and functions have same default axis
+ # keyword and arguments
+ funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
+ ('sometrue', 'any'),
+ ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
+ 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
+ 'round', 'min', 'max', 'argsort', 'sort']
+ funcs2 = ['compress', 'take', 'repeat']
+
+ for func in funcs1:
+ arr = np.random.rand(8, 7)
+ arr2 = arr.copy()
+ if isinstance(func, tuple):
+ func_meth = func[1]
+ func = func[0]
+ else:
+ func_meth = func
+ res1 = getattr(arr, func_meth)()
+ res2 = getattr(np, func)(arr2)
+ if res1 is None:
+ res1 = arr
+
+ if res1.dtype.kind in 'uib':
+ assert_((res1 == res2).all(), func)
+ else:
+ assert_(abs(res1-res2).max() < 1e-8, func)
+
+ for func in funcs2:
+ arr1 = np.random.rand(8, 7)
+ arr2 = np.random.rand(8, 7)
+ res1 = None
+ if func == 'compress':
+ arr1 = arr1.ravel()
+ res1 = getattr(arr2, func)(arr1)
+ else:
+ arr2 = (15*arr2).astype(int).ravel()
+ if res1 is None:
+ res1 = getattr(arr1, func)(arr2)
+ res2 = getattr(np, func)(arr1, arr2)
+ assert_(abs(res1-res2).max() < 1e-8, func)
+
+ def test_mem_lexsort_strings(self):
+ # Ticket #298
+ lst = ['abc', 'cde', 'fgh']
+ np.lexsort((lst,))
+
+ def test_fancy_index(self):
+ # Ticket #302
+ x = np.array([1, 2])[np.array([0])]
+ assert_equal(x.shape, (1,))
+
+ def test_recarray_copy(self):
+ # Ticket #312
+ dt = [('x', np.int16), ('y', np.float64)]
+ ra = np.array([(1, 2.3)], dtype=dt)
+ rb = np.rec.array(ra, dtype=dt)
+ rb['x'] = 2.
+ assert_(ra['x'] != rb['x'])
+
+ def test_rec_fromarray(self):
+ # Ticket #322
+ x1 = np.array([[1, 2], [3, 4], [5, 6]])
+ x2 = np.array(['a', 'dd', 'xyz'])
+ x3 = np.array([1.1, 2, 3])
+ np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
+
+ def test_object_array_assign(self):
+ x = np.empty((2, 2), object)
+ x.flat[2] = (1, 2, 3)
+ assert_equal(x.flat[2], (1, 2, 3))
+
+ def test_ndmin_float64(self):
+ # Ticket #324
+ x = np.array([1, 2, 3], dtype=np.float64)
+ assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
+ assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
+
+ def test_ndmin_order(self):
+ # Issue #465 and related checks
+ assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
+ assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
+ assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
+ assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
+
+ def test_mem_axis_minimization(self):
+ # Ticket #327
+ data = np.arange(5)
+ data = np.add.outer(data, data)
+
+ def test_mem_float_imag(self):
+ # Ticket #330
+ np.float64(1.0).imag
+
+ def test_dtype_tuple(self):
+ # Ticket #334
+ assert_(np.dtype('i4') == np.dtype(('i4', ())))
+
+ def test_dtype_posttuple(self):
+ # Ticket #335
+ np.dtype([('col1', '()i4')])
+
+ def test_numeric_carray_compare(self):
+ # Ticket #341
+ assert_equal(np.array(['X'], 'c'), b'X')
+
+ def test_string_array_size(self):
+ # Ticket #342
+ assert_raises(ValueError,
+ np.array, [['X'], ['X', 'X', 'X']], '|S1')
+
+ def test_dtype_repr(self):
+ # Ticket #344
+ dt1 = np.dtype(('uint32', 2))
+ dt2 = np.dtype(('uint32', (2,)))
+ assert_equal(dt1.__repr__(), dt2.__repr__())
+
+ def test_reshape_order(self):
+ # Make sure reshape order works.
+ a = np.arange(6).reshape(2, 3, order='F')
+ assert_equal(a, [[0, 2, 4], [1, 3, 5]])
+ a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
+ b = a[:, 1]
+ assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
+
+ def test_reshape_zero_strides(self):
+ # Issue #380, test reshaping of zero strided arrays
+ a = np.ones(1)
+ a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
+ assert_(a.reshape(5, 1).strides[0] == 0)
+
+ def test_reshape_zero_size(self):
+ # GitHub Issue #2700, setting shape failed for 0-sized arrays
+ a = np.ones((0, 2))
+ a.shape = (-1, 2)
+
+ # Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides.
+ # With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous.
+ @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
+ reason="Using relaxed stride debug")
+ def test_reshape_trailing_ones_strides(self):
+ # GitHub issue gh-2949, bad strides for trailing ones of new shape
+ a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
+ strides_c = (16, 8, 8, 8)
+ strides_f = (8, 24, 48, 48)
+ assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
+ assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
+ assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
+
+ def test_repeat_discont(self):
+ # Ticket #352
+ a = np.arange(12).reshape(4, 3)[:, 2]
+ assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
+
+ def test_array_index(self):
+ # Make sure optimization is not called in this case.
+ a = np.array([1, 2, 3])
+ a2 = np.array([[1, 2, 3]])
+ assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
+
+ def test_object_argmax(self):
+ a = np.array([1, 2, 3], dtype=object)
+ assert_(a.argmax() == 2)
+
+ def test_recarray_fields(self):
+ # Ticket #372
+ dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
+ dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
+ for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
+ np.rec.array([(1, 2), (3, 4)], "i4,i4"),
+ np.rec.array([(1, 2), (3, 4)]),
+ np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
+ np.rec.fromarrays([(1, 2), (3, 4)])]:
+ assert_(a.dtype in [dt0, dt1])
+
+ def test_random_shuffle(self):
+ # Ticket #374
+ a = np.arange(5).reshape((5, 1))
+ b = a.copy()
+ np.random.shuffle(b)
+ assert_equal(np.sort(b, axis=0), a)
+
+ def test_refcount_vdot(self):
+ # Changeset #3443
+ _assert_valid_refcount(np.vdot)
+
+ def test_startswith(self):
+ ca = np.char.array(['Hi', 'There'])
+ assert_equal(ca.startswith('H'), [True, False])
+
+ def test_noncommutative_reduce_accumulate(self):
+ # Ticket #413
+ tosubtract = np.arange(5)
+ todivide = np.array([2.0, 0.5, 0.25])
+ assert_equal(np.subtract.reduce(tosubtract), -10)
+ assert_equal(np.divide.reduce(todivide), 16.0)
+ assert_array_equal(np.subtract.accumulate(tosubtract),
+ np.array([0, -1, -3, -6, -10]))
+ assert_array_equal(np.divide.accumulate(todivide),
+ np.array([2., 4., 16.]))
+
+ def test_convolve_empty(self):
+ # Convolve should raise an error for empty input array.
+ assert_raises(ValueError, np.convolve, [], [1])
+ assert_raises(ValueError, np.convolve, [1], [])
+
+ def test_multidim_byteswap(self):
+ # Ticket #449
+ r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
+ assert_array_equal(r.byteswap(),
+ np.array([(256, (0, 256, 512))], r.dtype))
+
+ def test_string_NULL(self):
+ # Changeset 3557
+ assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
+ 'a\x00\x0b\x0c')
+
+ def test_junk_in_string_fields_of_recarray(self):
+ # Ticket #483
+ r = np.array([[b'abc']], dtype=[('var1', '|S20')])
+ assert_(asbytes(r['var1'][0][0]) == b'abc')
+
+ def test_take_output(self):
+ # Ensure that 'take' honours output parameter.
+ x = np.arange(12).reshape((3, 4))
+ a = np.take(x, [0, 2], axis=1)
+ b = np.zeros_like(a)
+ np.take(x, [0, 2], axis=1, out=b)
+ assert_array_equal(a, b)
+
+ def test_take_object_fail(self):
+ # Issue gh-3001
+ d = 123.
+ a = np.array([d, 1], dtype=object)
+ if HAS_REFCOUNT:
+ ref_d = sys.getrefcount(d)
+ try:
+ a.take([0, 100])
+ except IndexError:
+ pass
+ if HAS_REFCOUNT:
+ assert_(ref_d == sys.getrefcount(d))
+
+ def test_array_str_64bit(self):
+ # Ticket #501
+ s = np.array([1, np.nan], dtype=np.float64)
+ with np.errstate(all='raise'):
+ np.array_str(s) # Should succeed
+
+ def test_frompyfunc_endian(self):
+ # Ticket #503
+ from math import radians
+ uradians = np.frompyfunc(radians, 1, 1)
+ big_endian = np.array([83.4, 83.5], dtype='>f8')
+ little_endian = np.array([83.4, 83.5], dtype='<f8')
+ assert_almost_equal(uradians(big_endian).astype(float),
+ uradians(little_endian).astype(float))
+
+ def test_mem_string_arr(self):
+ # Ticket #514
+ s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ t = []
+ np.hstack((t, s))
+
+ def test_arr_transpose(self):
+ # Ticket #516
+ x = np.random.rand(*(2,)*16)
+ x.transpose(list(range(16))) # Should succeed
+
+ def test_string_mergesort(self):
+ # Ticket #540
+ x = np.array(['a']*32)
+ assert_array_equal(x.argsort(kind='m'), np.arange(32))
+
+ def test_argmax_byteorder(self):
+ # Ticket #546
+ a = np.arange(3, dtype='>f')
+ assert_(a[a.argmax()] == a.max())
+
+ def test_rand_seed(self):
+ # Ticket #555
+ for l in np.arange(4):
+ np.random.seed(l)
+
+ def test_mem_deallocation_leak(self):
+ # Ticket #562
+ a = np.zeros(5, dtype=float)
+ b = np.array(a, dtype=float)
+ del a, b
+
+ def test_mem_on_invalid_dtype(self):
+ "Ticket #583"
+ assert_raises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
+
+ def test_dot_negative_stride(self):
+ # Ticket #588
+ x = np.array([[1, 5, 25, 125., 625]])
+ y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
+ z = y[::-1].copy()
+ y2 = y[::-1]
+ assert_equal(np.dot(x, z), np.dot(x, y2))
+
+ def test_object_casting(self):
+ # This used to trigger the object-type version of
+ # the bitwise_or operation, because float64 -> object
+ # casting succeeds
+ def rs():
+ x = np.ones([484, 286])
+ y = np.zeros([484, 286])
+ x |= y
+
+ assert_raises(TypeError, rs)
+
+ def test_unicode_scalar(self):
+ # Ticket #600
+ x = np.array(["DROND", "DROND1"], dtype="U6")
+ el = x[1]
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ new = pickle.loads(pickle.dumps(el, protocol=proto))
+ assert_equal(new, el)
+
+ def test_arange_non_native_dtype(self):
+ # Ticket #616
+ for T in ('>f4', '<f4'):
+ dt = np.dtype(T)
+ assert_equal(np.arange(0, dtype=dt).dtype, dt)
+ assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
+ assert_equal(np.arange(5, dtype=dt).dtype, dt)
+
+ def test_bool_flat_indexing_invalid_nr_elements(self):
+ s = np.ones(10, dtype=float)
+ x = np.array((15,), dtype=float)
+
+ def ia(x, s, v):
+ x[(s > 0)] = v
+
+ assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float))
+ assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float))
+
+ # Old special case (different code path):
+ assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
+ assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
+
+ def test_mem_scalar_indexing(self):
+ # Ticket #603
+ x = np.array([0], dtype=float)
+ index = np.array(0, dtype=np.int32)
+ x[index]
+
+ def test_binary_repr_0_width(self):
+ assert_equal(np.binary_repr(0, width=3), '000')
+
+ def test_fromstring(self):
+ assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
+ [12, 9, 9])
+
+ def test_searchsorted_variable_length(self):
+ x = np.array(['a', 'aa', 'b'])
+ y = np.array(['d', 'e'])
+ assert_equal(x.searchsorted(y), [3, 3])
+
+ def test_string_argsort_with_zeros(self):
+ # Check argsort for strings containing zeros.
+ x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
+ assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
+ assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
+
+ def test_string_sort_with_zeros(self):
+ # Check sort for strings containing zeros.
+ x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
+ y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2")
+ assert_array_equal(np.sort(x, kind="q"), y)
+
+ def test_copy_detection_zero_dim(self):
+ # Ticket #658
+ np.indices((0, 3, 4)).T.reshape(-1, 3)
+
+ def test_flat_byteorder(self):
+ # Ticket #657
+ x = np.arange(10)
+ assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
+ assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
+
+ def test_sign_bit(self):
+ x = np.array([0, -0.0, 0])
+ assert_equal(str(np.abs(x)), '[0. 0. 0.]')
+
+ def test_flat_index_byteswap(self):
+ for dt in (np.dtype('<i4'), np.dtype('>i4')):
+ x = np.array([-1, 0, 1], dtype=dt)
+ assert_equal(x.flat[0].dtype, x[0].dtype)
+
+ def test_copy_detection_corner_case(self):
+ # Ticket #658
+ np.indices((0, 3, 4)).T.reshape(-1, 3)
+
+ # Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides.
+ # With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous,
+ # 0-sized reshape itself is tested elsewhere.
+ @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
+ reason="Using relaxed stride debug")
+ def test_copy_detection_corner_case2(self):
+ # Ticket #771: strides are not set correctly when reshaping 0-sized
+ # arrays
+ b = np.indices((0, 3, 4)).T.reshape(-1, 3)
+ assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
+
+ def test_object_array_refcounting(self):
+ # Ticket #633
+ if not hasattr(sys, 'getrefcount'):
+ return
+
+ # NB. this is probably CPython-specific
+
+ cnt = sys.getrefcount
+
+ a = object()
+ b = object()
+ c = object()
+
+ cnt0_a = cnt(a)
+ cnt0_b = cnt(b)
+ cnt0_c = cnt(c)
+
+ # -- 0d -> 1-d broadcast slice assignment
+
+ arr = np.zeros(5, dtype=np.object_)
+
+ arr[:] = a
+ assert_equal(cnt(a), cnt0_a + 5)
+
+ arr[:] = b
+ assert_equal(cnt(a), cnt0_a)
+ assert_equal(cnt(b), cnt0_b + 5)
+
+ arr[:2] = c
+ assert_equal(cnt(b), cnt0_b + 3)
+ assert_equal(cnt(c), cnt0_c + 2)
+
+ del arr
+
+ # -- 1-d -> 2-d broadcast slice assignment
+
+ arr = np.zeros((5, 2), dtype=np.object_)
+ arr0 = np.zeros(2, dtype=np.object_)
+
+ arr0[0] = a
+ assert_(cnt(a) == cnt0_a + 1)
+ arr0[1] = b
+ assert_(cnt(b) == cnt0_b + 1)
+
+ arr[:, :] = arr0
+ assert_(cnt(a) == cnt0_a + 6)
+ assert_(cnt(b) == cnt0_b + 6)
+
+ arr[:, 0] = None
+ assert_(cnt(a) == cnt0_a + 1)
+
+ del arr, arr0
+
+ # -- 2-d copying + flattening
+
+ arr = np.zeros((5, 2), dtype=np.object_)
+
+ arr[:, 0] = a
+ arr[:, 1] = b
+ assert_(cnt(a) == cnt0_a + 5)
+ assert_(cnt(b) == cnt0_b + 5)
+
+ arr2 = arr.copy()
+ assert_(cnt(a) == cnt0_a + 10)
+ assert_(cnt(b) == cnt0_b + 10)
+
+ arr2 = arr[:, 0].copy()
+ assert_(cnt(a) == cnt0_a + 10)
+ assert_(cnt(b) == cnt0_b + 5)
+
+ arr2 = arr.flatten()
+ assert_(cnt(a) == cnt0_a + 10)
+ assert_(cnt(b) == cnt0_b + 10)
+
+ del arr, arr2
+
+ # -- concatenate, repeat, take, choose
+
+ arr1 = np.zeros((5, 1), dtype=np.object_)
+ arr2 = np.zeros((5, 1), dtype=np.object_)
+
+ arr1[...] = a
+ arr2[...] = b
+ assert_(cnt(a) == cnt0_a + 5)
+ assert_(cnt(b) == cnt0_b + 5)
+
+ tmp = np.concatenate((arr1, arr2))
+ assert_(cnt(a) == cnt0_a + 5 + 5)
+ assert_(cnt(b) == cnt0_b + 5 + 5)
+
+ tmp = arr1.repeat(3, axis=0)
+ assert_(cnt(a) == cnt0_a + 5 + 3*5)
+
+ tmp = arr1.take([1, 2, 3], axis=0)
+ assert_(cnt(a) == cnt0_a + 5 + 3)
+
+ x = np.array([[0], [1], [0], [1], [1]], int)
+ tmp = x.choose(arr1, arr2)
+ assert_(cnt(a) == cnt0_a + 5 + 2)
+ assert_(cnt(b) == cnt0_b + 5 + 3)
+
+ del tmp # Avoid pyflakes unused variable warning
+
+ def test_mem_custom_float_to_array(self):
+ # Ticket 702
+ class MyFloat:
+ def __float__(self):
+ return 1.0
+
+ tmp = np.atleast_1d([MyFloat()])
+ tmp.astype(float) # Should succeed
+
+ def test_object_array_refcount_self_assign(self):
+ # Ticket #711
+ class VictimObject:
+ deleted = False
+
+ def __del__(self):
+ self.deleted = True
+
+ d = VictimObject()
+ arr = np.zeros(5, dtype=np.object_)
+ arr[:] = d
+ del d
+ arr[:] = arr # refcount of 'd' might hit zero here
+ assert_(not arr[0].deleted)
+ arr[:] = arr # trying to induce a segfault by doing it again...
+ assert_(not arr[0].deleted)
+
+ def test_mem_fromiter_invalid_dtype_string(self):
+ x = [1, 2, 3]
+ assert_raises(ValueError,
+ np.fromiter, [xi for xi in x], dtype='S')
+
+ def test_reduce_big_object_array(self):
+ # Ticket #713
+ oldsize = np.setbufsize(10*16)
+ a = np.array([None]*161, object)
+ assert_(not np.any(a))
+ np.setbufsize(oldsize)
+
+ def test_mem_0d_array_index(self):
+ # Ticket #714
+ np.zeros(10)[np.array(0)]
+
+ def test_nonnative_endian_fill(self):
+ # Non-native endian arrays were incorrectly filled with scalars
+ # before r5034.
+ if sys.byteorder == 'little':
+ dtype = np.dtype('>i4')
+ else:
+ dtype = np.dtype('<i4')
+ x = np.empty([1], dtype=dtype)
+ x.fill(1)
+ assert_equal(x, np.array([1], dtype=dtype))
+
+ def test_dot_alignment_sse2(self):
+ # Test for ticket #551, changeset r5140
+ x = np.zeros((30, 40))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ y = pickle.loads(pickle.dumps(x, protocol=proto))
+ # y is now typically not aligned on a 8-byte boundary
+ z = np.ones((1, y.shape[0]))
+ # This shouldn't cause a segmentation fault:
+ np.dot(z, y)
+
+ def test_astype_copy(self):
+ # Ticket #788, changeset r5155
+ # The test data file was generated by scipy.io.savemat.
+ # The dtype is float64, but the isbuiltin attribute is 0.
+ data_dir = path.join(path.dirname(__file__), 'data')
+ filename = path.join(data_dir, "astype_copy.pkl")
+ with open(filename, 'rb') as f:
+ xp = pickle.load(f, encoding='latin1')
+ xpd = xp.astype(np.float64)
+ assert_((xp.__array_interface__['data'][0] !=
+ xpd.__array_interface__['data'][0]))
+
+ def test_compress_small_type(self):
+ # Ticket #789, changeset 5217.
+ # compress with out argument segfaulted if cannot cast safely
+ import numpy as np
+ a = np.array([[1, 2], [3, 4]])
+ b = np.zeros((2, 1), dtype=np.single)
+ try:
+ a.compress([True, False], axis=1, out=b)
+ raise AssertionError("compress with an out which cannot be "
+ "safely casted should not return "
+ "successfully")
+ except TypeError:
+ pass
+
+ def test_attributes(self):
+ # Ticket #791
+ class TestArray(np.ndarray):
+ def __new__(cls, data, info):
+ result = np.array(data)
+ result = result.view(cls)
+ result.info = info
+ return result
+
+ def __array_finalize__(self, obj):
+ self.info = getattr(obj, 'info', '')
+
+ dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
+ assert_(dat.info == 'jubba')
+ dat.resize((4, 2))
+ assert_(dat.info == 'jubba')
+ dat.sort()
+ assert_(dat.info == 'jubba')
+ dat.fill(2)
+ assert_(dat.info == 'jubba')
+ dat.put([2, 3, 4], [6, 3, 4])
+ assert_(dat.info == 'jubba')
+ dat.setfield(4, np.int32, 0)
+ assert_(dat.info == 'jubba')
+ dat.setflags()
+ assert_(dat.info == 'jubba')
+ assert_(dat.all(1).info == 'jubba')
+ assert_(dat.any(1).info == 'jubba')
+ assert_(dat.argmax(1).info == 'jubba')
+ assert_(dat.argmin(1).info == 'jubba')
+ assert_(dat.argsort(1).info == 'jubba')
+ assert_(dat.astype(TestArray).info == 'jubba')
+ assert_(dat.byteswap().info == 'jubba')
+ assert_(dat.clip(2, 7).info == 'jubba')
+ assert_(dat.compress([0, 1, 1]).info == 'jubba')
+ assert_(dat.conj().info == 'jubba')
+ assert_(dat.conjugate().info == 'jubba')
+ assert_(dat.copy().info == 'jubba')
+ dat2 = TestArray([2, 3, 1, 0], 'jubba')
+ choices = [[0, 1, 2, 3], [10, 11, 12, 13],
+ [20, 21, 22, 23], [30, 31, 32, 33]]
+ assert_(dat2.choose(choices).info == 'jubba')
+ assert_(dat.cumprod(1).info == 'jubba')
+ assert_(dat.cumsum(1).info == 'jubba')
+ assert_(dat.diagonal().info == 'jubba')
+ assert_(dat.flatten().info == 'jubba')
+ assert_(dat.getfield(np.int32, 0).info == 'jubba')
+ assert_(dat.imag.info == 'jubba')
+ assert_(dat.max(1).info == 'jubba')
+ assert_(dat.mean(1).info == 'jubba')
+ assert_(dat.min(1).info == 'jubba')
+ assert_(dat.newbyteorder().info == 'jubba')
+ assert_(dat.prod(1).info == 'jubba')
+ assert_(dat.ptp(1).info == 'jubba')
+ assert_(dat.ravel().info == 'jubba')
+ assert_(dat.real.info == 'jubba')
+ assert_(dat.repeat(2).info == 'jubba')
+ assert_(dat.reshape((2, 4)).info == 'jubba')
+ assert_(dat.round().info == 'jubba')
+ assert_(dat.squeeze().info == 'jubba')
+ assert_(dat.std(1).info == 'jubba')
+ assert_(dat.sum(1).info == 'jubba')
+ assert_(dat.swapaxes(0, 1).info == 'jubba')
+ assert_(dat.take([2, 3, 5]).info == 'jubba')
+ assert_(dat.transpose().info == 'jubba')
+ assert_(dat.T.info == 'jubba')
+ assert_(dat.var(1).info == 'jubba')
+ assert_(dat.view(TestArray).info == 'jubba')
+ # These methods do not preserve subclasses
+ assert_(type(dat.nonzero()[0]) is np.ndarray)
+ assert_(type(dat.nonzero()[1]) is np.ndarray)
+
+ def test_recarray_tolist(self):
+ # Ticket #793, changeset r5215
+ # Comparisons fail for NaN, so we can't use random memory
+ # for the test.
+ buf = np.zeros(40, dtype=np.int8)
+ a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
+ b = a.tolist()
+ assert_( a[0].tolist() == b[0])
+ assert_( a[1].tolist() == b[1])
+
+ def test_nonscalar_item_method(self):
+ # Make sure that .item() fails graciously when it should
+ a = np.arange(5)
+ assert_raises(ValueError, a.item)
+
+ def test_char_array_creation(self):
+ a = np.array('123', dtype='c')
+ b = np.array([b'1', b'2', b'3'])
+ assert_equal(a, b)
+
+ def test_unaligned_unicode_access(self):
+ # Ticket #825
+ for i in range(1, 9):
+ msg = 'unicode offset: %d chars' % i
+ t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])
+ x = np.array([(b'a', 'b')], dtype=t)
+ assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
+
+ def test_sign_for_complex_nan(self):
+ # Ticket 794.
+ with np.errstate(invalid='ignore'):
+ C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
+ have = np.sign(C)
+ want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
+ assert_equal(have, want)
+
+ def test_for_equal_names(self):
+ # Ticket #674
+ dt = np.dtype([('foo', float), ('bar', float)])
+ a = np.zeros(10, dt)
+ b = list(a.dtype.names)
+ b[0] = "notfoo"
+ a.dtype.names = b
+ assert_(a.dtype.names[0] == "notfoo")
+ assert_(a.dtype.names[1] == "bar")
+
+ def test_for_object_scalar_creation(self):
+ # Ticket #816
+ a = np.object_()
+ b = np.object_(3)
+ b2 = np.object_(3.0)
+ c = np.object_([4, 5])
+ d = np.object_([None, {}, []])
+ assert_(a is None)
+ assert_(type(b) is int)
+ assert_(type(b2) is float)
+ assert_(type(c) is np.ndarray)
+ assert_(c.dtype == object)
+ assert_(d.dtype == object)
+
+ def test_array_resize_method_system_error(self):
+ # Ticket #840 - order should be an invalid keyword.
+ x = np.array([[0, 1], [2, 3]])
+ assert_raises(TypeError, x.resize, (2, 2), order='C')
+
+ def test_for_zero_length_in_choose(self):
+ "Ticket #882"
+ a = np.array(1)
+ assert_raises(ValueError, lambda x: x.choose([]), a)
+
+ def test_array_ndmin_overflow(self):
+ "Ticket #947."
+ assert_raises(ValueError, lambda: np.array([1], ndmin=33))
+
+ def test_void_scalar_with_titles(self):
+ # No ticket
+ data = [('john', 4), ('mary', 5)]
+ dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
+ arr = np.array(data, dtype=dtype1)
+ assert_(arr[0][0] == 'john')
+ assert_(arr[0][1] == 4)
+
+ def test_void_scalar_constructor(self):
+ #Issue #1550
+
+ #Create test string data, construct void scalar from data and assert
+ #that void scalar contains original data.
+ test_string = np.array("test")
+ test_string_void_scalar = np.core.multiarray.scalar(
+ np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes())
+
+ assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
+
+ #Create record scalar, construct from data and assert that
+ #reconstructed scalar is correct.
+ test_record = np.ones((), "i,i")
+ test_record_void_scalar = np.core.multiarray.scalar(
+ test_record.dtype, test_record.tobytes())
+
+ assert_(test_record_void_scalar == test_record)
+
+ # Test pickle and unpickle of void and record scalars
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_(pickle.loads(
+ pickle.dumps(test_string, protocol=proto)) == test_string)
+ assert_(pickle.loads(
+ pickle.dumps(test_record, protocol=proto)) == test_record)
+
+ @_no_tracing
+ def test_blasdot_uninitialized_memory(self):
+ # Ticket #950
+ for m in [0, 1, 2]:
+ for n in [0, 1, 2]:
+ for k in range(3):
+ # Try to ensure that x->data contains non-zero floats
+ x = np.array([123456789e199], dtype=np.float64)
+ if IS_PYPY:
+ x.resize((m, 0), refcheck=False)
+ else:
+ x.resize((m, 0))
+ y = np.array([123456789e199], dtype=np.float64)
+ if IS_PYPY:
+ y.resize((0, n), refcheck=False)
+ else:
+ y.resize((0, n))
+
+ # `dot` should just return zero (m, n) matrix
+ z = np.dot(x, y)
+ assert_(np.all(z == 0))
+ assert_(z.shape == (m, n))
+
+ def test_zeros(self):
+ # Regression test for #1061.
+ # Set a size which cannot fit into a 64 bits signed integer
+ sz = 2 ** 64
+ with assert_raises_regex(ValueError,
+ 'Maximum allowed dimension exceeded'):
+ np.empty(sz)
+
+ def test_huge_arange(self):
+ # Regression test for #1062.
+ # Set a size which cannot fit into a 64 bits signed integer
+ sz = 2 ** 64
+ with assert_raises_regex(ValueError,
+ 'Maximum allowed size exceeded'):
+ np.arange(sz)
+ assert_(np.size == sz)
+
+ def test_fromiter_bytes(self):
+ # Ticket #1058
+ a = np.fromiter(list(range(10)), dtype='b')
+ b = np.fromiter(list(range(10)), dtype='B')
+ assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+ assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+
+ def test_array_from_sequence_scalar_array(self):
+ # Ticket #1078: segfaults when creating an array with a sequence of
+ # 0d arrays.
+ a = np.array((np.ones(2), np.array(2)), dtype=object)
+ assert_equal(a.shape, (2,))
+ assert_equal(a.dtype, np.dtype(object))
+ assert_equal(a[0], np.ones(2))
+ assert_equal(a[1], np.array(2))
+
+ a = np.array(((1,), np.array(1)), dtype=object)
+ assert_equal(a.shape, (2,))
+ assert_equal(a.dtype, np.dtype(object))
+ assert_equal(a[0], (1,))
+ assert_equal(a[1], np.array(1))
+
+ def test_array_from_sequence_scalar_array2(self):
+ # Ticket #1081: weird array with strange input...
+ t = np.array([np.array([]), np.array(0, object)], dtype=object)
+ assert_equal(t.shape, (2,))
+ assert_equal(t.dtype, np.dtype(object))
+
+ def test_array_too_big(self):
+ # Ticket #1080.
+ assert_raises(ValueError, np.zeros, [975]*7, np.int8)
+ assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
+
+ def test_dtype_keyerrors_(self):
+ # Ticket #1106.
+ dt = np.dtype([('f1', np.uint)])
+ assert_raises(KeyError, dt.__getitem__, "f2")
+ assert_raises(IndexError, dt.__getitem__, 1)
+ assert_raises(TypeError, dt.__getitem__, 0.0)
+
+ def test_lexsort_buffer_length(self):
+ # Ticket #1217, don't segfault.
+ a = np.ones(100, dtype=np.int8)
+ b = np.ones(100, dtype=np.int32)
+ i = np.lexsort((a[::-1], b))
+ assert_equal(i, np.arange(100, dtype=int))
+
+ def test_object_array_to_fixed_string(self):
+ # Ticket #1235.
+ a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
+ b = np.array(a, dtype=(np.str_, 8))
+ assert_equal(a, b)
+ c = np.array(a, dtype=(np.str_, 5))
+ assert_equal(c, np.array(['abcde', 'ijklm']))
+ d = np.array(a, dtype=(np.str_, 12))
+ assert_equal(a, d)
+ e = np.empty((2, ), dtype=(np.str_, 8))
+ e[:] = a[:]
+ assert_equal(a, e)
+
+ def test_unicode_to_string_cast(self):
+ # Ticket #1240.
+ a = np.array([['abc', '\u03a3'],
+ ['asdf', 'erw']],
+ dtype='U')
+ assert_raises(UnicodeEncodeError, np.array, a, 'S4')
+
+ def test_unicode_to_string_cast_error(self):
+ # gh-15790
+ a = np.array(['\x80'] * 129, dtype='U3')
+ assert_raises(UnicodeEncodeError, np.array, a, 'S')
+ b = a.reshape(3, 43)[:-1, :-1]
+ assert_raises(UnicodeEncodeError, np.array, b, 'S')
+
+ def test_mixed_string_byte_array_creation(self):
+ a = np.array(['1234', b'123'])
+ assert_(a.itemsize == 16)
+ a = np.array([b'123', '1234'])
+ assert_(a.itemsize == 16)
+ a = np.array(['1234', b'123', '12345'])
+ assert_(a.itemsize == 20)
+ a = np.array([b'123', '1234', b'12345'])
+ assert_(a.itemsize == 20)
+ a = np.array([b'123', '1234', b'1234'])
+ assert_(a.itemsize == 16)
+
+ def test_misaligned_objects_segfault(self):
+ # Ticket #1198 and #1267
+ a1 = np.zeros((10,), dtype='O,c')
+ a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
+ a1['f0'] = a2
+ repr(a1)
+ np.argmax(a1['f0'])
+ a1['f0'][1] = "FOO"
+ a1['f0'] = "FOO"
+ np.array(a1['f0'], dtype='S')
+ np.nonzero(a1['f0'])
+ a1.sort()
+ copy.deepcopy(a1)
+
+ def test_misaligned_scalars_segfault(self):
+ # Ticket #1267
+ s1 = np.array(('a', 'Foo'), dtype='c,O')
+ s2 = np.array(('b', 'Bar'), dtype='c,O')
+ s1['f1'] = s2['f1']
+ s1['f1'] = 'Baz'
+
+ def test_misaligned_dot_product_objects(self):
+ # Ticket #1267
+ # This didn't require a fix, but it's worth testing anyway, because
+ # it may fail if .dot stops enforcing the arrays to be BEHAVED
+ a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
+ b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
+ np.dot(a['f0'], b['f0'])
+
+ def test_byteswap_complex_scalar(self):
+ # Ticket #1259 and gh-441
+ for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
+ z = np.array([2.2-1.1j], dtype)
+ x = z[0] # always native-endian
+ y = x.byteswap()
+ if x.dtype.byteorder == z.dtype.byteorder:
+ # little-endian machine
+ assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder()))
+ else:
+ # big-endian machine
+ assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype))
+ # double check real and imaginary parts:
+ assert_equal(x.real, y.real.byteswap())
+ assert_equal(x.imag, y.imag.byteswap())
+
+ def test_structured_arrays_with_objects1(self):
+ # Ticket #1299
+ stra = 'aaaa'
+ strb = 'bbbb'
+ x = np.array([[(0, stra), (1, strb)]], 'i8,O')
+ x[x.nonzero()] = x.ravel()[:1]
+ assert_(x[0, 1] == x[0, 0])
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_structured_arrays_with_objects2(self):
+ # Ticket #1299 second test
+ stra = 'aaaa'
+ strb = 'bbbb'
+ numb = sys.getrefcount(strb)
+ numa = sys.getrefcount(stra)
+ x = np.array([[(0, stra), (1, strb)]], 'i8,O')
+ x[x.nonzero()] = x.ravel()[:1]
+ assert_(sys.getrefcount(strb) == numb)
+ assert_(sys.getrefcount(stra) == numa + 2)
+
+ def test_duplicate_title_and_name(self):
+ # Ticket #1254
+ dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
+ assert_raises(ValueError, np.dtype, dtspec)
+
+ def test_signed_integer_division_overflow(self):
+ # Ticket #1317.
+ def test_type(t):
+ min = np.array([np.iinfo(t).min])
+ min //= -1
+
+ with np.errstate(over="ignore"):
+ for t in (np.int8, np.int16, np.int32, np.int64, int):
+ test_type(t)
+
+ def test_buffer_hashlib(self):
+ from hashlib import sha256
+
+ x = np.array([1, 2, 3], dtype=np.dtype('<i4'))
+ assert_equal(sha256(x).hexdigest(), '4636993d3e1da4e9d6b8f87b79e8f7c6d018580d52661950eabc3845c5897a4d')
+
+ def test_0d_string_scalar(self):
+ # Bug #1436; the following should succeed
+ np.asarray('x', '>c')
+
+ def test_log1p_compiler_shenanigans(self):
+ # Check if log1p is behaving on 32 bit intel systems.
+ assert_(np.isfinite(np.log1p(np.exp2(-53))))
+
+ def test_fromiter_comparison(self):
+ a = np.fromiter(list(range(10)), dtype='b')
+ b = np.fromiter(list(range(10)), dtype='B')
+ assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+ assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
+
+ def test_fromstring_crash(self):
+ # Ticket #1345: the following should not cause a crash
+ with assert_warns(DeprecationWarning):
+ np.fromstring(b'aa, aa, 1.0', sep=',')
+
+ def test_ticket_1539(self):
+ dtypes = [x for x in np.sctypeDict.values()
+ if (issubclass(x, np.number)
+ and not issubclass(x, np.timedelta64))]
+ a = np.array([], np.bool_) # not x[0] because it is unordered
+ failures = []
+
+ for x in dtypes:
+ b = a.astype(x)
+ for y in dtypes:
+ c = a.astype(y)
+ try:
+ np.dot(b, c)
+ except TypeError:
+ failures.append((x, y))
+ if failures:
+ raise AssertionError("Failures: %r" % failures)
+
+ def test_ticket_1538(self):
+ x = np.finfo(np.float32)
+ for name in 'eps epsneg max min resolution tiny'.split():
+ assert_equal(type(getattr(x, name)), np.float32,
+ err_msg=name)
+
+ def test_ticket_1434(self):
+ # Check that the out= argument in var and std has an effect
+ data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
+ out = np.zeros((3,))
+
+ ret = data.var(axis=1, out=out)
+ assert_(ret is out)
+ assert_array_equal(ret, data.var(axis=1))
+
+ ret = data.std(axis=1, out=out)
+ assert_(ret is out)
+ assert_array_equal(ret, data.std(axis=1))
+
+ def test_complex_nan_maximum(self):
+ cnan = complex(0, np.nan)
+ assert_equal(np.maximum(1, cnan), cnan)
+
+ def test_subclass_int_tuple_assignment(self):
+ # ticket #1563
+ class Subclass(np.ndarray):
+ def __new__(cls, i):
+ return np.ones((i,)).view(cls)
+
+ x = Subclass(5)
+ x[(0,)] = 2 # shouldn't raise an exception
+ assert_equal(x[0], 2)
+
+ def test_ufunc_no_unnecessary_views(self):
+ # ticket #1548
+ class Subclass(np.ndarray):
+ pass
+ x = np.array([1, 2, 3]).view(Subclass)
+ y = np.add(x, x, x)
+ assert_equal(id(x), id(y))
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_take_refcount(self):
+ # ticket #939
+ a = np.arange(16, dtype=float)
+ a.shape = (4, 4)
+ lut = np.ones((5 + 3, 4), float)
+ rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
+ c1 = sys.getrefcount(rgba)
+ try:
+ lut.take(a, axis=0, mode='clip', out=rgba)
+ except TypeError:
+ pass
+ c2 = sys.getrefcount(rgba)
+ assert_equal(c1, c2)
+
+ def test_fromfile_tofile_seeks(self):
+ # On Python 3, tofile/fromfile used to get (#1610) the Python
+ # file handle out of sync
+ f0 = tempfile.NamedTemporaryFile()
+ f = f0.file
+ f.write(np.arange(255, dtype='u1').tobytes())
+
+ f.seek(20)
+ ret = np.fromfile(f, count=4, dtype='u1')
+ assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
+ assert_equal(f.tell(), 24)
+
+ f.seek(40)
+ np.array([1, 2, 3], dtype='u1').tofile(f)
+ assert_equal(f.tell(), 43)
+
+ f.seek(40)
+ data = f.read(3)
+ assert_equal(data, b"\x01\x02\x03")
+
+ f.seek(80)
+ f.read(4)
+ data = np.fromfile(f, dtype='u1', count=4)
+ assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
+
+ f.close()
+
+ def test_complex_scalar_warning(self):
+ for tp in [np.csingle, np.cdouble, np.clongdouble]:
+ x = tp(1+2j)
+ assert_warns(np.ComplexWarning, float, x)
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning)
+ assert_equal(float(x), float(x.real))
+
+ def test_complex_scalar_complex_cast(self):
+ for tp in [np.csingle, np.cdouble, np.clongdouble]:
+ x = tp(1+2j)
+ assert_equal(complex(x), 1+2j)
+
+ def test_complex_boolean_cast(self):
+ # Ticket #2218
+ for tp in [np.csingle, np.cdouble, np.clongdouble]:
+ x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
+ assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
+ assert_(np.any(x))
+ assert_(np.all(x[1:]))
+
+ def test_uint_int_conversion(self):
+ x = 2**64 - 1
+ assert_equal(int(np.uint64(x)), x)
+
+ def test_duplicate_field_names_assign(self):
+ ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
+ ra.dtype.names = ('f1', 'f2')
+ repr(ra) # should not cause a segmentation fault
+ assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
+
+ def test_eq_string_and_object_array(self):
+ # From e-mail thread "__eq__ with str and object" (Keith Goodman)
+ a1 = np.array(['a', 'b'], dtype=object)
+ a2 = np.array(['a', 'c'])
+ assert_array_equal(a1 == a2, [True, False])
+ assert_array_equal(a2 == a1, [True, False])
+
+ def test_nonzero_byteswap(self):
+ a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
+ a.dtype = np.float32
+ assert_equal(a.nonzero()[0], [1])
+ a = a.byteswap().newbyteorder()
+ assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
+
+ def test_find_common_type_boolean(self):
+ # Ticket #1695
+ assert_(np.find_common_type([], ['?', '?']) == '?')
+
+ def test_empty_mul(self):
+ a = np.array([1.])
+ a[1:1] *= 2
+ assert_equal(a, [1.])
+
+ def test_array_side_effect(self):
+ # The second use of itemsize was throwing an exception because in
+ # ctors.c, discover_itemsize was calling PyObject_Length without
+ # checking the return code. This failed to get the length of the
+ # number 2, and the exception hung around until something checked
+ # PyErr_Occurred() and returned an error.
+ assert_equal(np.dtype('S10').itemsize, 10)
+ np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
+ assert_equal(np.dtype('S10').itemsize, 10)
+
+ def test_any_float(self):
+ # all and any for floats
+ a = np.array([0.1, 0.9])
+ assert_(np.any(a))
+ assert_(np.all(a))
+
+ def test_large_float_sum(self):
+ a = np.arange(10000, dtype='f')
+ assert_equal(a.sum(dtype='d'), a.astype('d').sum())
+
+ def test_ufunc_casting_out(self):
+ a = np.array(1.0, dtype=np.float32)
+ b = np.array(1.0, dtype=np.float64)
+ c = np.array(1.0, dtype=np.float32)
+ np.add(a, b, out=c)
+ assert_equal(c, 2.0)
+
+ def test_array_scalar_contiguous(self):
+ # Array scalars are both C and Fortran contiguous
+ assert_(np.array(1.0).flags.c_contiguous)
+ assert_(np.array(1.0).flags.f_contiguous)
+ assert_(np.array(np.float32(1.0)).flags.c_contiguous)
+ assert_(np.array(np.float32(1.0)).flags.f_contiguous)
+
+ def test_squeeze_contiguous(self):
+ # Similar to GitHub issue #387
+ a = np.zeros((1, 2)).squeeze()
+ b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze()
+ assert_(a.flags.c_contiguous)
+ assert_(a.flags.f_contiguous)
+ assert_(b.flags.f_contiguous)
+
+ def test_squeeze_axis_handling(self):
+ # Issue #10779
+ # Ensure proper handling of objects
+ # that don't support axis specification
+ # when squeezing
+
+ class OldSqueeze(np.ndarray):
+
+ def __new__(cls,
+ input_array):
+ obj = np.asarray(input_array).view(cls)
+ return obj
+
+ # it is perfectly reasonable that prior
+ # to numpy version 1.7.0 a subclass of ndarray
+ # might have been created that did not expect
+ # squeeze to have an axis argument
+ # NOTE: this example is somewhat artificial;
+ # it is designed to simulate an old API
+ # expectation to guard against regression
+ def squeeze(self):
+ return super().squeeze()
+
+ oldsqueeze = OldSqueeze(np.array([[1],[2],[3]]))
+
+ # if no axis argument is specified the old API
+ # expectation should give the correct result
+ assert_equal(np.squeeze(oldsqueeze),
+ np.array([1,2,3]))
+
+ # likewise, axis=None should work perfectly well
+ # with the old API expectation
+ assert_equal(np.squeeze(oldsqueeze, axis=None),
+ np.array([1,2,3]))
+
+ # however, specification of any particular axis
+ # should raise a TypeError in the context of the
+ # old API specification, even when using a valid
+ # axis specification like 1 for this array
+ with assert_raises(TypeError):
+ # this would silently succeed for array
+ # subclasses / objects that did not support
+ # squeeze axis argument handling before fixing
+ # Issue #10779
+ np.squeeze(oldsqueeze, axis=1)
+
+ # check for the same behavior when using an invalid
+ # axis specification -- in this case axis=0 does not
+ # have size 1, but the priority should be to raise
+ # a TypeError for the axis argument and NOT a
+ # ValueError for squeezing a non-empty dimension
+ with assert_raises(TypeError):
+ np.squeeze(oldsqueeze, axis=0)
+
+ # the new API knows how to handle the axis
+ # argument and will return a ValueError if
+ # attempting to squeeze an axis that is not
+ # of length 1
+ with assert_raises(ValueError):
+ np.squeeze(np.array([[1],[2],[3]]), axis=0)
+
+ def test_reduce_contiguous(self):
+ # GitHub issue #387
+ a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
+ b = np.add.reduce(np.zeros((2, 1, 2)), 1)
+ assert_(a.flags.c_contiguous)
+ assert_(a.flags.f_contiguous)
+ assert_(b.flags.c_contiguous)
+
+ @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
+ def test_object_array_self_reference(self):
+ # Object arrays with references to themselves can cause problems
+ a = np.array(0, dtype=object)
+ a[()] = a
+ assert_raises(RecursionError, int, a)
+ assert_raises(RecursionError, float, a)
+ a[()] = None
+
+ @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
+ def test_object_array_circular_reference(self):
+ # Test the same for a circular reference.
+ a = np.array(0, dtype=object)
+ b = np.array(0, dtype=object)
+ a[()] = b
+ b[()] = a
+ assert_raises(RecursionError, int, a)
+ # NumPy has no tp_traverse currently, so circular references
+ # cannot be detected. So resolve it:
+ a[()] = None
+
+ # This was causing a to become like the above
+ a = np.array(0, dtype=object)
+ a[...] += 1
+ assert_equal(a, 1)
+
+ def test_object_array_nested(self):
+ # but is fine with a reference to a different array
+ a = np.array(0, dtype=object)
+ b = np.array(0, dtype=object)
+ a[()] = b
+ assert_equal(int(a), int(0))
+ assert_equal(float(a), float(0))
+
+ def test_object_array_self_copy(self):
+ # An object array being copied into itself DECREF'ed before INCREF'ing
+ # causing segmentation faults (gh-3787)
+ a = np.array(object(), dtype=object)
+ np.copyto(a, a)
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(a[()]) == 2)
+ a[()].__class__ # will segfault if object was deleted
+
+ def test_zerosize_accumulate(self):
+ "Ticket #1733"
+ x = np.array([[42, 0]], dtype=np.uint32)
+ assert_equal(np.add.accumulate(x[:-1, 0]), [])
+
+ def test_objectarray_setfield(self):
+ # Setfield should not overwrite Object fields with non-Object data
+ x = np.array([1, 2, 3], dtype=object)
+ assert_raises(TypeError, x.setfield, 4, np.int32, 0)
+
+ def test_setting_rank0_string(self):
+ "Ticket #1736"
+ s1 = b"hello1"
+ s2 = b"hello2"
+ a = np.zeros((), dtype="S10")
+ a[()] = s1
+ assert_equal(a, np.array(s1))
+ a[()] = np.array(s2)
+ assert_equal(a, np.array(s2))
+
+ a = np.zeros((), dtype='f4')
+ a[()] = 3
+ assert_equal(a, np.array(3))
+ a[()] = np.array(4)
+ assert_equal(a, np.array(4))
+
+ def test_string_astype(self):
+ "Ticket #1748"
+ s1 = b'black'
+ s2 = b'white'
+ s3 = b'other'
+ a = np.array([[s1], [s2], [s3]])
+ assert_equal(a.dtype, np.dtype('S5'))
+ b = a.astype(np.dtype('S0'))
+ assert_equal(b.dtype, np.dtype('S5'))
+
+ def test_ticket_1756(self):
+ # Ticket #1756
+ s = b'0123456789abcdef'
+ a = np.array([s]*5)
+ for i in range(1, 17):
+ a1 = np.array(a, "|S%d" % i)
+ a2 = np.array([s[:i]]*5)
+ assert_equal(a1, a2)
+
+ def test_fields_strides(self):
+ "gh-2355"
+ r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
+ assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
+ assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
+ assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
+ assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
+
+ def test_alignment_update(self):
+ # Check that alignment flag is updated on stride setting
+ a = np.arange(10)
+ assert_(a.flags.aligned)
+ a.strides = 3
+ assert_(not a.flags.aligned)
+
+ def test_ticket_1770(self):
+ "Should not segfault on python 3k"
+ import numpy as np
+ try:
+ a = np.zeros((1,), dtype=[('f1', 'f')])
+ a['f1'] = 1
+ a['f2'] = 1
+ except ValueError:
+ pass
+ except Exception:
+ raise AssertionError
+
+ def test_ticket_1608(self):
+ "x.flat shouldn't modify data"
+ x = np.array([[1, 2], [3, 4]]).T
+ np.array(x.flat)
+ assert_equal(x, [[1, 3], [2, 4]])
+
+ def test_pickle_string_overwrite(self):
+ import re
+
+ data = np.array([1], dtype='b')
+ blob = pickle.dumps(data, protocol=1)
+ data = pickle.loads(blob)
+
+ # Check that loads does not clobber interned strings
+ s = re.sub("a(.)", "\x01\\1", "a_")
+ assert_equal(s[0], "\x01")
+ data[0] = 0x6a
+ s = re.sub("a(.)", "\x01\\1", "a_")
+ assert_equal(s[0], "\x01")
+
+ def test_pickle_bytes_overwrite(self):
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ data = np.array([1], dtype='b')
+ data = pickle.loads(pickle.dumps(data, protocol=proto))
+ data[0] = 0x7d
+ bytestring = "\x01 ".encode('ascii')
+ assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
+
+ def test_pickle_py2_array_latin1_hack(self):
+ # Check that unpickling hacks in Py3 that support
+ # encoding='latin1' work correctly.
+
+ # Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
+ data = (b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
+ b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
+ b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
+ b"p13\ntp14\nb.")
+ # This should work:
+ result = pickle.loads(data, encoding='latin1')
+ assert_array_equal(result, np.array([129]).astype('b'))
+ # Should not segfault:
+ assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
+
+ def test_pickle_py2_scalar_latin1_hack(self):
+ # Check that scalar unpickling hack in Py3 that supports
+ # encoding='latin1' work correctly.
+
+ # Python2 output for pickle.dumps(...)
+ datas = [
+ # (original, python2_pickle, koi8r_validity)
+ (np.unicode_('\u6bd2'),
+ (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
+ b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
+ b"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
+ 'invalid'),
+
+ (np.float64(9e123),
+ (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
+ b"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
+ b"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
+ 'invalid'),
+
+ (np.bytes_(b'\x9c'), # different 8-bit code point in KOI8-R vs latin1
+ (b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
+ b"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
+ b"tp8\nRp9\n."),
+ 'different'),
+ ]
+ for original, data, koi8r_validity in datas:
+ result = pickle.loads(data, encoding='latin1')
+ assert_equal(result, original)
+
+ # Decoding under non-latin1 encoding (e.g.) KOI8-R can
+ # produce bad results, but should not segfault.
+ if koi8r_validity == 'different':
+ # Unicode code points happen to lie within latin1,
+ # but are different in koi8-r, resulting to silent
+ # bogus results
+ result = pickle.loads(data, encoding='koi8-r')
+ assert_(result != original)
+ elif koi8r_validity == 'invalid':
+ # Unicode code points outside latin1, so results
+ # to an encoding exception
+ assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
+ else:
+ raise ValueError(koi8r_validity)
+
+ def test_structured_type_to_object(self):
+ a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
+ a_obj = np.empty((2,), dtype=object)
+ a_obj[0] = (0, 1)
+ a_obj[1] = (3, 2)
+ # astype records -> object
+ assert_equal(a_rec.astype(object), a_obj)
+ # '=' records -> object
+ b = np.empty_like(a_obj)
+ b[...] = a_rec
+ assert_equal(b, a_obj)
+ # '=' object -> records
+ b = np.empty_like(a_rec)
+ b[...] = a_obj
+ assert_equal(b, a_rec)
+
+ def test_assign_obj_listoflists(self):
+ # Ticket # 1870
+ # The inner list should get assigned to the object elements
+ a = np.zeros(4, dtype=object)
+ b = a.copy()
+ a[0] = [1]
+ a[1] = [2]
+ a[2] = [3]
+ a[3] = [4]
+ b[...] = [[1], [2], [3], [4]]
+ assert_equal(a, b)
+ # The first dimension should get broadcast
+ a = np.zeros((2, 2), dtype=object)
+ a[...] = [[1, 2]]
+ assert_equal(a, [[1, 2], [1, 2]])
+
+ @pytest.mark.slow_pypy
+ def test_memoryleak(self):
+ # Ticket #1917 - ensure that array data doesn't leak
+ for i in range(1000):
+ # 100MB times 1000 would give 100GB of memory usage if it leaks
+ a = np.empty((100000000,), dtype='i1')
+ del a
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_ufunc_reduce_memoryleak(self):
+ a = np.arange(6)
+ acnt = sys.getrefcount(a)
+ np.add.reduce(a)
+ assert_equal(sys.getrefcount(a), acnt)
+
+ def test_search_sorted_invalid_arguments(self):
+ # Ticket #2021, should not segfault.
+ x = np.arange(0, 4, dtype='datetime64[D]')
+ assert_raises(TypeError, x.searchsorted, 1)
+
+ def test_string_truncation(self):
+ # Ticket #1990 - Data can be truncated in creation of an array from a
+ # mixed sequence of numeric values and strings (gh-2583)
+ for val in [True, 1234, 123.4, complex(1, 234)]:
+ for tostr, dtype in [(asunicode, "U"), (asbytes, "S")]:
+ b = np.array([val, tostr('xx')], dtype=dtype)
+ assert_equal(tostr(b[0]), tostr(val))
+ b = np.array([tostr('xx'), val], dtype=dtype)
+ assert_equal(tostr(b[1]), tostr(val))
+
+ # test also with longer strings
+ b = np.array([val, tostr('xxxxxxxxxx')], dtype=dtype)
+ assert_equal(tostr(b[0]), tostr(val))
+ b = np.array([tostr('xxxxxxxxxx'), val], dtype=dtype)
+ assert_equal(tostr(b[1]), tostr(val))
+
+ def test_string_truncation_ucs2(self):
+ # Ticket #2081. Python compiled with two byte unicode
+ # can lead to truncation if itemsize is not properly
+ # adjusted for NumPy's four byte unicode.
+ a = np.array(['abcd'])
+ assert_equal(a.dtype.itemsize, 16)
+
+ def test_unique_stable(self):
+ # Ticket #2063 must always choose stable sort for argsort to
+ # get consistent results
+ v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
+ res = np.unique(v, return_index=True)
+ tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
+ assert_equal(res, tgt)
+
+ def test_unicode_alloc_dealloc_match(self):
+ # Ticket #1578, the mismatch only showed up when running
+ # python-debug for python versions >= 2.7, and then as
+ # a core dump and error message.
+ a = np.array(['abc'], dtype=np.unicode_)[0]
+ del a
+
+ def test_refcount_error_in_clip(self):
+ # Ticket #1588
+ a = np.zeros((2,), dtype='>i2').clip(min=0)
+ x = a + a
+ # This used to segfault:
+ y = str(x)
+ # Check the final string:
+ assert_(y == "[0 0]")
+
+ def test_searchsorted_wrong_dtype(self):
+ # Ticket #2189, it used to segfault, so we check that it raises the
+ # proper exception.
+ a = np.array([('a', 1)], dtype='S1, int')
+ assert_raises(TypeError, np.searchsorted, a, 1.2)
+ # Ticket #2066, similar problem:
+ dtype = np.format_parser(['i4', 'i4'], [], [])
+ a = np.recarray((2,), dtype)
+ a[...] = [(1, 2), (3, 4)]
+ assert_raises(TypeError, np.searchsorted, a, 1)
+
+ def test_complex64_alignment(self):
+ # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
+ dtt = np.complex64
+ arr = np.arange(10, dtype=dtt)
+ # 2D array
+ arr2 = np.reshape(arr, (2, 5))
+ # Fortran write followed by (C or F) read caused bus error
+ data_str = arr2.tobytes('F')
+ data_back = np.ndarray(arr2.shape,
+ arr2.dtype,
+ buffer=data_str,
+ order='F')
+ assert_array_equal(arr2, data_back)
+
+ def test_structured_count_nonzero(self):
+ arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
+ count = np.count_nonzero(arr)
+ assert_equal(count, 0)
+
+ def test_copymodule_preserves_f_contiguity(self):
+ a = np.empty((2, 2), order='F')
+ b = copy.copy(a)
+ c = copy.deepcopy(a)
+ assert_(b.flags.fortran)
+ assert_(b.flags.f_contiguous)
+ assert_(c.flags.fortran)
+ assert_(c.flags.f_contiguous)
+
+ def test_fortran_order_buffer(self):
+ import numpy as np
+ a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
+ arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
+ arr2 = np.array([[['H', 'e', 'l', 'l', 'o'],
+ ['F', 'o', 'o', 'b', '']]])
+ assert_array_equal(arr, arr2)
+
+ def test_assign_from_sequence_error(self):
+ # Ticket #4024.
+ arr = np.array([1, 2, 3])
+ assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])
+ arr.__setitem__(slice(None), [9])
+ assert_equal(arr, [9, 9, 9])
+
+ def test_format_on_flex_array_element(self):
+ # Ticket #4369.
+ dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])
+ arr = np.array([('2000-01-01', 1)], dt)
+ formatted = '{0}'.format(arr[0])
+ assert_equal(formatted, str(arr[0]))
+
+ def test_deepcopy_on_0d_array(self):
+ # Ticket #3311.
+ arr = np.array(3)
+ arr_cp = copy.deepcopy(arr)
+
+ assert_equal(arr, arr_cp)
+ assert_equal(arr.shape, arr_cp.shape)
+ assert_equal(int(arr), int(arr_cp))
+ assert_(arr is not arr_cp)
+ assert_(isinstance(arr_cp, type(arr)))
+
+ def test_deepcopy_F_order_object_array(self):
+ # Ticket #6456.
+ a = {'a': 1}
+ b = {'b': 2}
+ arr = np.array([[a, b], [a, b]], order='F')
+ arr_cp = copy.deepcopy(arr)
+
+ assert_equal(arr, arr_cp)
+ assert_(arr is not arr_cp)
+ # Ensure that we have actually copied the item.
+ assert_(arr[0, 1] is not arr_cp[1, 1])
+ # Ensure we are allowed to have references to the same object.
+ assert_(arr[0, 1] is arr[1, 1])
+ # Check the references hold for the copied objects.
+ assert_(arr_cp[0, 1] is arr_cp[1, 1])
+
+ def test_deepcopy_empty_object_array(self):
+ # Ticket #8536.
+ # Deepcopy should succeed
+ a = np.array([], dtype=object)
+ b = copy.deepcopy(a)
+ assert_(a.shape == b.shape)
+
+ def test_bool_subscript_crash(self):
+ # gh-4494
+ c = np.rec.array([(1, 2, 3), (4, 5, 6)])
+ masked = c[np.array([True, False])]
+ base = masked.base
+ del masked, c
+ base.dtype
+
+ def test_richcompare_crash(self):
+ # gh-4613
+ import operator as op
+
+ # dummy class where __array__ throws exception
+ class Foo:
+ __array_priority__ = 1002
+
+ def __array__(self, *args, **kwargs):
+ raise Exception()
+
+ rhs = Foo()
+ lhs = np.array(1)
+ for f in [op.lt, op.le, op.gt, op.ge]:
+ assert_raises(TypeError, f, lhs, rhs)
+ assert_(not op.eq(lhs, rhs))
+ assert_(op.ne(lhs, rhs))
+
+ def test_richcompare_scalar_and_subclass(self):
+ # gh-4709
+ class Foo(np.ndarray):
+ def __eq__(self, other):
+ return "OK"
+
+ x = np.array([1, 2, 3]).view(Foo)
+ assert_equal(10 == x, "OK")
+ assert_equal(np.int32(10) == x, "OK")
+ assert_equal(np.array([10]) == x, "OK")
+
+ def test_pickle_empty_string(self):
+ # gh-3926
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ test_string = np.string_('')
+ assert_equal(pickle.loads(
+ pickle.dumps(test_string, protocol=proto)), test_string)
+
+ def test_frompyfunc_many_args(self):
+ # gh-5672
+
+ def passer(*args):
+ pass
+
+ assert_raises(ValueError, np.frompyfunc, passer, 32, 1)
+
+ def test_repeat_broadcasting(self):
+ # gh-5743
+ a = np.arange(60).reshape(3, 4, 5)
+ for axis in chain(range(-a.ndim, a.ndim), [None]):
+ assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))
+
+ def test_frompyfunc_nout_0(self):
+ # gh-2014
+
+ def f(x):
+ x[0], x[-1] = x[-1], x[0]
+
+ uf = np.frompyfunc(f, 1, 0)
+ a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]], dtype=object)
+ assert_equal(uf(a), ())
+ expected = np.array([[3, 2, 1], [5, 4], [9, 7, 8, 6]], dtype=object)
+ assert_array_equal(a, expected)
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_leak_in_structured_dtype_comparison(self):
+ # gh-6250
+ recordtype = np.dtype([('a', np.float64),
+ ('b', np.int32),
+ ('d', (str, 5))])
+
+ # Simple case
+ a = np.zeros(2, dtype=recordtype)
+ for i in range(100):
+ a == a
+ assert_(sys.getrefcount(a) < 10)
+
+ # The case in the bug report.
+ before = sys.getrefcount(a)
+ u, v = a[0], a[1]
+ u == v
+ del u, v
+ gc.collect()
+ after = sys.getrefcount(a)
+ assert_equal(before, after)
+
+ def test_empty_percentile(self):
+ # gh-6530 / gh-6553
+ assert_array_equal(np.percentile(np.arange(10), []), np.array([]))
+
+ def test_void_compare_segfault(self):
+ # gh-6922. The following should not segfault
+ a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])
+ a.sort()
+
+ def test_reshape_size_overflow(self):
+ # gh-7455
+ a = np.ones(20)[::2]
+ if np.dtype(np.intp).itemsize == 8:
+ # 64 bit. The following are the prime factors of 2**63 + 5,
+ # plus a leading 2, so when multiplied together as int64,
+ # the result overflows to a total size of 10.
+ new_shape = (2, 13, 419, 691, 823, 2977518503)
+ else:
+ # 32 bit. The following are the prime factors of 2**31 + 5,
+ # plus a leading 2, so when multiplied together as int32,
+ # the result overflows to a total size of 10.
+ new_shape = (2, 7, 7, 43826197)
+ assert_raises(ValueError, a.reshape, new_shape)
+
+ @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+ def test_invalid_structured_dtypes(self):
+ # gh-2865
+ # mapping python objects to other dtypes
+ assert_raises(ValueError, np.dtype, ('O', [('name', 'i8')]))
+ assert_raises(ValueError, np.dtype, ('i8', [('name', 'O')]))
+ assert_raises(ValueError, np.dtype,
+ ('i8', [('name', [('name', 'O')])]))
+ assert_raises(ValueError, np.dtype, ([('a', 'i4'), ('b', 'i4')], 'O'))
+ assert_raises(ValueError, np.dtype, ('i8', 'O'))
+ # wrong number/type of tuple elements in dict
+ assert_raises(ValueError, np.dtype,
+ ('i', {'name': ('i', 0, 'title', 'oops')}))
+ assert_raises(ValueError, np.dtype,
+ ('i', {'name': ('i', 'wrongtype', 'title')}))
+ # disallowed as of 1.13
+ assert_raises(ValueError, np.dtype,
+ ([('a', 'O'), ('b', 'O')], [('c', 'O'), ('d', 'O')]))
+ # allowed as a special case due to existing use, see gh-2798
+ a = np.ones(1, dtype=('O', [('name', 'O')]))
+ assert_equal(a[0], 1)
+ # In particular, the above union dtype (and union dtypes in general)
+ # should mainly behave like the main (object) dtype:
+ assert a[0] is a.item()
+ assert type(a[0]) is int
+
+ def test_correct_hash_dict(self):
+ # gh-8887 - __hash__ would be None despite tp_hash being set
+ all_types = set(np.sctypeDict.values()) - {np.void}
+ for t in all_types:
+ val = t()
+
+ try:
+ hash(val)
+ except TypeError as e:
+ assert_equal(t.__hash__, None)
+ else:
+ assert_(t.__hash__ != None)
+
+ def test_scalar_copy(self):
+ scalar_types = set(np.sctypeDict.values())
+ values = {
+ np.void: b"a",
+ np.bytes_: b"a",
+ np.unicode_: "a",
+ np.datetime64: "2017-08-25",
+ }
+ for sctype in scalar_types:
+ item = sctype(values.get(sctype, 1))
+ item2 = copy.copy(item)
+ assert_equal(item, item2)
+
+ def test_void_item_memview(self):
+ va = np.zeros(10, 'V4')
+ x = va[:1].item()
+ va[0] = b'\xff\xff\xff\xff'
+ del va
+ assert_equal(x, b'\x00\x00\x00\x00')
+
+ def test_void_getitem(self):
+ # Test fix for gh-11668.
+ assert_(np.array([b'a'], 'V1').astype('O') == b'a')
+ assert_(np.array([b'ab'], 'V2').astype('O') == b'ab')
+ assert_(np.array([b'abc'], 'V3').astype('O') == b'abc')
+ assert_(np.array([b'abcd'], 'V4').astype('O') == b'abcd')
+
+ def test_structarray_title(self):
+ # The following used to segfault on pypy, due to NPY_TITLE_KEY
+ # not working properly and resulting to double-decref of the
+ # structured array field items:
+ # See: https://bitbucket.org/pypy/pypy/issues/2789
+ for j in range(5):
+ structure = np.array([1], dtype=[(('x', 'X'), np.object_)])
+ structure[0]['x'] = np.array([2])
+ gc.collect()
+
+ def test_dtype_scalar_squeeze(self):
+ # gh-11384
+ values = {
+ 'S': b"a",
+ 'M': "2018-06-20",
+ }
+ for ch in np.typecodes['All']:
+ if ch in 'O':
+ continue
+ sctype = np.dtype(ch).type
+ scvalue = sctype(values.get(ch, 3))
+ for axis in [None, ()]:
+ squeezed = scvalue.squeeze(axis=axis)
+ assert_equal(squeezed, scvalue)
+ assert_equal(type(squeezed), type(scvalue))
+
+ def test_field_access_by_title(self):
+ # gh-11507
+ s = 'Some long field name'
+ if HAS_REFCOUNT:
+ base = sys.getrefcount(s)
+ t = np.dtype([((s, 'f1'), np.float64)])
+ data = np.zeros(10, t)
+ for i in range(10):
+ str(data[['f1']])
+ if HAS_REFCOUNT:
+ assert_(base <= sys.getrefcount(s))
+
+ @pytest.mark.parametrize('val', [
+ # arrays and scalars
+ np.ones((10, 10), dtype='int32'),
+ np.uint64(10),
+ ])
+ @pytest.mark.parametrize('protocol',
+ range(2, pickle.HIGHEST_PROTOCOL + 1)
+ )
+ def test_pickle_module(self, protocol, val):
+ # gh-12837
+ s = pickle.dumps(val, protocol)
+ assert b'_multiarray_umath' not in s
+ if protocol == 5 and len(val.shape) > 0:
+ # unpickling ndarray goes through _frombuffer for protocol 5
+ assert b'numpy.core.numeric' in s
+ else:
+ assert b'numpy.core.multiarray' in s
+
+ def test_object_casting_errors(self):
+ # gh-11993 update to ValueError (see gh-16909), since strings can in
+ # principle be converted to complex, but this string cannot.
+ arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object)
+ assert_raises(ValueError, arr.astype, 'c8')
+
+ def test_eff1d_casting(self):
+ # gh-12711
+ x = np.array([1, 2, 4, 7, 0], dtype=np.int16)
+ res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
+ assert_equal(res, [-99, 1, 2, 3, -7, 88, 99])
+
+ # The use of safe casting means, that 1<<20 is cast unsafely, an
+ # error may be better, but currently there is no mechanism for it.
+ res = np.ediff1d(x, to_begin=(1<<20), to_end=(1<<20))
+ assert_equal(res, [0, 1, 2, 3, -7, 0])
+
+ def test_pickle_datetime64_array(self):
+ # gh-12745 (would fail with pickle5 installed)
+ d = np.datetime64('2015-07-04 12:59:59.50', 'ns')
+ arr = np.array([d])
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ dumped = pickle.dumps(arr, protocol=proto)
+ assert_equal(pickle.loads(dumped), arr)
+
+ def test_bad_array_interface(self):
+ class T:
+ __array_interface__ = {}
+
+ with assert_raises(ValueError):
+ np.array([T()])
+
+ def test_2d__array__shape(self):
+ class T:
+ def __array__(self):
+ return np.ndarray(shape=(0,0))
+
+ # Make sure __array__ is used instead of Sequence methods.
+ def __iter__(self):
+ return iter([])
+
+ def __getitem__(self, idx):
+ raise AssertionError("__getitem__ was called")
+
+ def __len__(self):
+ return 0
+
+
+ t = T()
+ # gh-13659, would raise in broadcasting [x=t for x in result]
+ arr = np.array([t])
+ assert arr.shape == (1, 0, 0)
+
+ @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
+ def test_to_ctypes(self):
+ #gh-14214
+ arr = np.zeros((2 ** 31 + 1,), 'b')
+ assert arr.size * arr.itemsize > 2 ** 31
+ c_arr = np.ctypeslib.as_ctypes(arr)
+ assert_equal(c_arr._length_, arr.size)
+
+ def test_complex_conversion_error(self):
+ # gh-17068
+ with pytest.raises(TypeError, match=r"Unable to convert dtype.*"):
+ complex(np.array("now", np.datetime64))
+
+ def test__array_interface__descr(self):
+ # gh-17068
+ dt = np.dtype(dict(names=['a', 'b'],
+ offsets=[0, 0],
+ formats=[np.int64, np.int64]))
+ descr = np.array((1, 1), dtype=dt).__array_interface__['descr']
+ assert descr == [('', '|V8')] # instead of [(b'', '|V8')]
+
+ @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
+ @requires_memory(free_bytes=9e9)
+ def test_dot_big_stride(self):
+ # gh-17111
+ # blas stride = stride//itemsize > int32 max
+ int32_max = np.iinfo(np.int32).max
+ n = int32_max + 3
+ a = np.empty([n], dtype=np.float32)
+ b = a[::n-1]
+ b[...] = 1
+ assert b.strides[0] > int32_max * b.dtype.itemsize
+ assert np.dot(b, b) == 2.0
+
+ def test_frompyfunc_name(self):
+ # name conversion was failing for python 3 strings
+ # resulting in the default '?' name. Also test utf-8
+ # encoding using non-ascii name.
+ def cassé(x):
+ return x
+
+ f = np.frompyfunc(cassé, 1, 1)
+ assert str(f) == "<ufunc 'cassé (vectorized)'>"
+
+ @pytest.mark.parametrize("operation", [
+ 'add', 'subtract', 'multiply', 'floor_divide',
+ 'conjugate', 'fmod', 'square', 'reciprocal',
+ 'power', 'absolute', 'negative', 'positive',
+ 'greater', 'greater_equal', 'less',
+ 'less_equal', 'equal', 'not_equal', 'logical_and',
+ 'logical_not', 'logical_or', 'bitwise_and', 'bitwise_or',
+ 'bitwise_xor', 'invert', 'left_shift', 'right_shift',
+ 'gcd', 'lcm'
+ ]
+ )
+ @pytest.mark.parametrize("order", [
+ ('b->', 'B->'),
+ ('h->', 'H->'),
+ ('i->', 'I->'),
+ ('l->', 'L->'),
+ ('q->', 'Q->'),
+ ]
+ )
+ def test_ufunc_order(self, operation, order):
+ # gh-18075
+ # Ensure signed types before unsigned
+ def get_idx(string, str_lst):
+ for i, s in enumerate(str_lst):
+ if string in s:
+ return i
+ raise ValueError(f"{string} not in list")
+ types = getattr(np, operation).types
+ assert get_idx(order[0], types) < get_idx(order[1], types), (
+ f"Unexpected types order of ufunc in {operation}"
+ f"for {order}. Possible fix: Use signed before unsigned"
+ "in generate_umath.py")
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalar_ctors.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalar_ctors.py
new file mode 100644
index 00000000..da976d64
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalar_ctors.py
@@ -0,0 +1,186 @@
+"""
+Test the scalar constructors, which also do type-coercion
+"""
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_equal, assert_almost_equal, assert_warns,
+ )
+
+class TestFromString:
+ def test_floating(self):
+ # Ticket #640, floats from string
+ fsingle = np.single('1.234')
+ fdouble = np.double('1.234')
+ flongdouble = np.longdouble('1.234')
+ assert_almost_equal(fsingle, 1.234)
+ assert_almost_equal(fdouble, 1.234)
+ assert_almost_equal(flongdouble, 1.234)
+
+ def test_floating_overflow(self):
+ """ Strings containing an unrepresentable float overflow """
+ fhalf = np.half('1e10000')
+ assert_equal(fhalf, np.inf)
+ fsingle = np.single('1e10000')
+ assert_equal(fsingle, np.inf)
+ fdouble = np.double('1e10000')
+ assert_equal(fdouble, np.inf)
+ flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000')
+ assert_equal(flongdouble, np.inf)
+
+ fhalf = np.half('-1e10000')
+ assert_equal(fhalf, -np.inf)
+ fsingle = np.single('-1e10000')
+ assert_equal(fsingle, -np.inf)
+ fdouble = np.double('-1e10000')
+ assert_equal(fdouble, -np.inf)
+ flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000')
+ assert_equal(flongdouble, -np.inf)
+
+
+class TestExtraArgs:
+ def test_superclass(self):
+ # try both positional and keyword arguments
+ s = np.str_(b'\\x61', encoding='unicode-escape')
+ assert s == 'a'
+ s = np.str_(b'\\x61', 'unicode-escape')
+ assert s == 'a'
+
+ # previously this would return '\\xx'
+ with pytest.raises(UnicodeDecodeError):
+ np.str_(b'\\xx', encoding='unicode-escape')
+ with pytest.raises(UnicodeDecodeError):
+ np.str_(b'\\xx', 'unicode-escape')
+
+ # superclass fails, but numpy succeeds
+ assert np.bytes_(-2) == b'-2'
+
+ def test_datetime(self):
+ dt = np.datetime64('2000-01', ('M', 2))
+ assert np.datetime_data(dt) == ('M', 2)
+
+ with pytest.raises(TypeError):
+ np.datetime64('2000', garbage=True)
+
+ def test_bool(self):
+ with pytest.raises(TypeError):
+ np.bool_(False, garbage=True)
+
+ def test_void(self):
+ with pytest.raises(TypeError):
+ np.void(b'test', garbage=True)
+
+
+class TestFromInt:
+ def test_intp(self):
+ # Ticket #99
+ assert_equal(1024, np.intp(1024))
+
+ def test_uint64_from_negative(self):
+ with pytest.warns(DeprecationWarning):
+ assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
+
+
+int_types = [np.byte, np.short, np.intc, np.int_, np.longlong]
+uint_types = [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong]
+float_types = [np.half, np.single, np.double, np.longdouble]
+cfloat_types = [np.csingle, np.cdouble, np.clongdouble]
+
+
+class TestArrayFromScalar:
+ """ gh-15467 """
+
+ def _do_test(self, t1, t2):
+ x = t1(2)
+ arr = np.array(x, dtype=t2)
+ # type should be preserved exactly
+ if t2 is None:
+ assert arr.dtype.type is t1
+ else:
+ assert arr.dtype.type is t2
+
+ @pytest.mark.parametrize('t1', int_types + uint_types)
+ @pytest.mark.parametrize('t2', int_types + uint_types + [None])
+ def test_integers(self, t1, t2):
+ return self._do_test(t1, t2)
+
+ @pytest.mark.parametrize('t1', float_types)
+ @pytest.mark.parametrize('t2', float_types + [None])
+ def test_reals(self, t1, t2):
+ return self._do_test(t1, t2)
+
+ @pytest.mark.parametrize('t1', cfloat_types)
+ @pytest.mark.parametrize('t2', cfloat_types + [None])
+ def test_complex(self, t1, t2):
+ return self._do_test(t1, t2)
+
+
+@pytest.mark.parametrize("length",
+ [5, np.int8(5), np.array(5, dtype=np.uint16)])
+def test_void_via_length(length):
+ res = np.void(length)
+ assert type(res) is np.void
+ assert res.item() == b"\0" * 5
+ assert res.dtype == "V5"
+
+@pytest.mark.parametrize("bytes_",
+ [b"spam", np.array(567.)])
+def test_void_from_byteslike(bytes_):
+ res = np.void(bytes_)
+ expected = bytes(bytes_)
+ assert type(res) is np.void
+ assert res.item() == expected
+
+ # Passing dtype can extend it (this is how filling works)
+ res = np.void(bytes_, dtype="V100")
+ assert type(res) is np.void
+ assert res.item()[:len(expected)] == expected
+ assert res.item()[len(expected):] == b"\0" * (res.nbytes - len(expected))
+ # As well as shorten:
+ res = np.void(bytes_, dtype="V4")
+ assert type(res) is np.void
+ assert res.item() == expected[:4]
+
+def test_void_arraylike_trumps_byteslike():
+ # The memoryview is converted as an array-like of shape (18,)
+ # rather than a single bytes-like of that length.
+ m = memoryview(b"just one mintleaf?")
+ res = np.void(m)
+ assert type(res) is np.ndarray
+ assert res.dtype == "V1"
+ assert res.shape == (18,)
+
+def test_void_dtype_arg():
+ # Basic test for the dtype argument (positional and keyword)
+ res = np.void((1, 2), dtype="i,i")
+ assert res.item() == (1, 2)
+ res = np.void((2, 3), "i,i")
+ assert res.item() == (2, 3)
+
+@pytest.mark.parametrize("data",
+ [5, np.int8(5), np.array(5, dtype=np.uint16)])
+def test_void_from_integer_with_dtype(data):
+ # The "length" meaning is ignored, rather data is used:
+ res = np.void(data, dtype="i,i")
+ assert type(res) is np.void
+ assert res.dtype == "i,i"
+ assert res["f0"] == 5 and res["f1"] == 5
+
+def test_void_from_structure():
+ dtype = np.dtype([('s', [('f', 'f8'), ('u', 'U1')]), ('i', 'i2')])
+ data = np.array(((1., 'a'), 2), dtype=dtype)
+ res = np.void(data[()], dtype=dtype)
+ assert type(res) is np.void
+ assert res.dtype == dtype
+ assert res == data[()]
+
+def test_void_bad_dtype():
+ with pytest.raises(TypeError,
+ match="void: descr must be a `void.*int64"):
+ np.void(4, dtype="i8")
+
+ # Subarray dtype (with shape `(4,)` is rejected):
+ with pytest.raises(TypeError,
+ match=r"void: descr must be a `void.*\(4,\)"):
+ np.void(4, dtype="4i")
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalar_methods.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalar_methods.py
new file mode 100644
index 00000000..a53e47b1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalar_methods.py
@@ -0,0 +1,212 @@
+"""
+Test the scalar constructors, which also do type-coercion
+"""
+import sys
+import fractions
+import platform
+import types
+from typing import Any, Type
+
+import pytest
+import numpy as np
+
+from numpy.testing import assert_equal, assert_raises
+
+
+class TestAsIntegerRatio:
+ # derived in part from the cpython test "test_floatasratio"
+
+ @pytest.mark.parametrize("ftype", [
+ np.half, np.single, np.double, np.longdouble])
+ @pytest.mark.parametrize("f, ratio", [
+ (0.875, (7, 8)),
+ (-0.875, (-7, 8)),
+ (0.0, (0, 1)),
+ (11.5, (23, 2)),
+ ])
+ def test_small(self, ftype, f, ratio):
+ assert_equal(ftype(f).as_integer_ratio(), ratio)
+
+ @pytest.mark.parametrize("ftype", [
+ np.half, np.single, np.double, np.longdouble])
+ def test_simple_fractions(self, ftype):
+ R = fractions.Fraction
+ assert_equal(R(0, 1),
+ R(*ftype(0.0).as_integer_ratio()))
+ assert_equal(R(5, 2),
+ R(*ftype(2.5).as_integer_ratio()))
+ assert_equal(R(1, 2),
+ R(*ftype(0.5).as_integer_ratio()))
+ assert_equal(R(-2100, 1),
+ R(*ftype(-2100.0).as_integer_ratio()))
+
+ @pytest.mark.parametrize("ftype", [
+ np.half, np.single, np.double, np.longdouble])
+ def test_errors(self, ftype):
+ assert_raises(OverflowError, ftype('inf').as_integer_ratio)
+ assert_raises(OverflowError, ftype('-inf').as_integer_ratio)
+ assert_raises(ValueError, ftype('nan').as_integer_ratio)
+
+ def test_against_known_values(self):
+ R = fractions.Fraction
+ assert_equal(R(1075, 512),
+ R(*np.half(2.1).as_integer_ratio()))
+ assert_equal(R(-1075, 512),
+ R(*np.half(-2.1).as_integer_ratio()))
+ assert_equal(R(4404019, 2097152),
+ R(*np.single(2.1).as_integer_ratio()))
+ assert_equal(R(-4404019, 2097152),
+ R(*np.single(-2.1).as_integer_ratio()))
+ assert_equal(R(4728779608739021, 2251799813685248),
+ R(*np.double(2.1).as_integer_ratio()))
+ assert_equal(R(-4728779608739021, 2251799813685248),
+ R(*np.double(-2.1).as_integer_ratio()))
+ # longdouble is platform dependent
+
+ @pytest.mark.parametrize("ftype, frac_vals, exp_vals", [
+ # dtype test cases generated using hypothesis
+ # first five generated cases per dtype
+ (np.half, [0.0, 0.01154830649280303, 0.31082276347447274,
+ 0.527350517124794, 0.8308562335072596],
+ [0, 1, 0, -8, 12]),
+ (np.single, [0.0, 0.09248576989263226, 0.8160498218131407,
+ 0.17389442853722373, 0.7956044195067877],
+ [0, 12, 10, 17, -26]),
+ (np.double, [0.0, 0.031066908499895136, 0.5214135908877832,
+ 0.45780736035689296, 0.5906586745934036],
+ [0, -801, 51, 194, -653]),
+ pytest.param(
+ np.longdouble,
+ [0.0, 0.20492557202724854, 0.4277180662199366, 0.9888085019891495,
+ 0.9620175814461964],
+ [0, -7400, 14266, -7822, -8721],
+ marks=[
+ pytest.mark.skipif(
+ np.finfo(np.double) == np.finfo(np.longdouble),
+ reason="long double is same as double"),
+ pytest.mark.skipif(
+ platform.machine().startswith("ppc"),
+ reason="IBM double double"),
+ ]
+ )
+ ])
+ def test_roundtrip(self, ftype, frac_vals, exp_vals):
+ for frac, exp in zip(frac_vals, exp_vals):
+ f = np.ldexp(ftype(frac), exp)
+ assert f.dtype == ftype
+ n, d = f.as_integer_ratio()
+
+ try:
+ nf = np.longdouble(n)
+ df = np.longdouble(d)
+ except (OverflowError, RuntimeWarning):
+ # the values may not fit in any float type
+ pytest.skip("longdouble too small on this platform")
+
+ assert_equal(nf / df, f, "{}/{}".format(n, d))
+
+
+class TestIsInteger:
+ @pytest.mark.parametrize("str_value", ["inf", "nan"])
+ @pytest.mark.parametrize("code", np.typecodes["Float"])
+ def test_special(self, code: str, str_value: str) -> None:
+ cls = np.dtype(code).type
+ value = cls(str_value)
+ assert not value.is_integer()
+
+ @pytest.mark.parametrize(
+ "code", np.typecodes["Float"] + np.typecodes["AllInteger"]
+ )
+ def test_true(self, code: str) -> None:
+ float_array = np.arange(-5, 5).astype(code)
+ for value in float_array:
+ assert value.is_integer()
+
+ @pytest.mark.parametrize("code", np.typecodes["Float"])
+ def test_false(self, code: str) -> None:
+ float_array = np.arange(-5, 5).astype(code)
+ float_array *= 1.1
+ for value in float_array:
+ if value == 0:
+ continue
+ assert not value.is_integer()
+
+
+@pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
+class TestClassGetItem:
+ @pytest.mark.parametrize("cls", [
+ np.number,
+ np.integer,
+ np.inexact,
+ np.unsignedinteger,
+ np.signedinteger,
+ np.floating,
+ ])
+ def test_abc(self, cls: Type[np.number]) -> None:
+ alias = cls[Any]
+ assert isinstance(alias, types.GenericAlias)
+ assert alias.__origin__ is cls
+
+ def test_abc_complexfloating(self) -> None:
+ alias = np.complexfloating[Any, Any]
+ assert isinstance(alias, types.GenericAlias)
+ assert alias.__origin__ is np.complexfloating
+
+ @pytest.mark.parametrize("arg_len", range(4))
+ def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None:
+ arg_tup = (Any,) * arg_len
+ if arg_len in (1, 2):
+ assert np.complexfloating[arg_tup]
+ else:
+ match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
+ with pytest.raises(TypeError, match=match):
+ np.complexfloating[arg_tup]
+
+ @pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character])
+ def test_abc_non_numeric(self, cls: Type[np.generic]) -> None:
+ with pytest.raises(TypeError):
+ cls[Any]
+
+ @pytest.mark.parametrize("code", np.typecodes["All"])
+ def test_concrete(self, code: str) -> None:
+ cls = np.dtype(code).type
+ with pytest.raises(TypeError):
+ cls[Any]
+
+ @pytest.mark.parametrize("arg_len", range(4))
+ def test_subscript_tuple(self, arg_len: int) -> None:
+ arg_tup = (Any,) * arg_len
+ if arg_len == 1:
+ assert np.number[arg_tup]
+ else:
+ with pytest.raises(TypeError):
+ np.number[arg_tup]
+
+ def test_subscript_scalar(self) -> None:
+ assert np.number[Any]
+
+
+@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
+@pytest.mark.parametrize("cls", [np.number, np.complexfloating, np.int64])
+def test_class_getitem_38(cls: Type[np.number]) -> None:
+ match = "Type subscription requires python >= 3.9"
+ with pytest.raises(TypeError, match=match):
+ cls[Any]
+
+
+class TestBitCount:
+ # derived in part from the cpython test "test_bit_count"
+
+ @pytest.mark.parametrize("itype", np.sctypes['int']+np.sctypes['uint'])
+ def test_small(self, itype):
+ for a in range(max(np.iinfo(itype).min, 0), 128):
+ msg = f"Smoke test for {itype}({a}).bit_count()"
+ assert itype(a).bit_count() == bin(a).count("1"), msg
+
+ def test_bit_count(self):
+ for exp in [10, 17, 63]:
+ a = 2**exp
+ assert np.uint64(a).bit_count() == 1
+ assert np.uint64(a - 1).bit_count() == exp
+ assert np.uint64(a ^ 63).bit_count() == 7
+ assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarbuffer.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarbuffer.py
new file mode 100644
index 00000000..0e6ab101
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarbuffer.py
@@ -0,0 +1,153 @@
+"""
+Test scalar buffer interface adheres to PEP 3118
+"""
+import numpy as np
+from numpy.core._rational_tests import rational
+from numpy.core._multiarray_tests import get_buffer_info
+import pytest
+
+from numpy.testing import assert_, assert_equal, assert_raises
+
+# PEP3118 format strings for native (standard alignment and byteorder) types
+scalars_and_codes = [
+ (np.bool_, '?'),
+ (np.byte, 'b'),
+ (np.short, 'h'),
+ (np.intc, 'i'),
+ (np.int_, 'l'),
+ (np.longlong, 'q'),
+ (np.ubyte, 'B'),
+ (np.ushort, 'H'),
+ (np.uintc, 'I'),
+ (np.uint, 'L'),
+ (np.ulonglong, 'Q'),
+ (np.half, 'e'),
+ (np.single, 'f'),
+ (np.double, 'd'),
+ (np.longdouble, 'g'),
+ (np.csingle, 'Zf'),
+ (np.cdouble, 'Zd'),
+ (np.clongdouble, 'Zg'),
+]
+scalars_only, codes_only = zip(*scalars_and_codes)
+
+
+class TestScalarPEP3118:
+
+ @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
+ def test_scalar_match_array(self, scalar):
+ x = scalar()
+ a = np.array([], dtype=np.dtype(scalar))
+ mv_x = memoryview(x)
+ mv_a = memoryview(a)
+ assert_equal(mv_x.format, mv_a.format)
+
+ @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
+ def test_scalar_dim(self, scalar):
+ x = scalar()
+ mv_x = memoryview(x)
+ assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize)
+ assert_equal(mv_x.ndim, 0)
+ assert_equal(mv_x.shape, ())
+ assert_equal(mv_x.strides, ())
+ assert_equal(mv_x.suboffsets, ())
+
+ @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only)
+ def test_scalar_code_and_properties(self, scalar, code):
+ x = scalar()
+ expected = dict(strides=(), itemsize=x.dtype.itemsize, ndim=0,
+ shape=(), format=code, readonly=True)
+
+ mv_x = memoryview(x)
+ assert self._as_dict(mv_x) == expected
+
+ @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
+ def test_scalar_buffers_readonly(self, scalar):
+ x = scalar()
+ with pytest.raises(BufferError, match="scalar buffer is readonly"):
+ get_buffer_info(x, ["WRITABLE"])
+
+ def test_void_scalar_structured_data(self):
+ dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))])
+ x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()]
+ assert_(isinstance(x, np.void))
+ mv_x = memoryview(x)
+ expected_size = 16 * np.dtype((np.unicode_, 1)).itemsize
+ expected_size += 2 * np.dtype(np.float64).itemsize
+ assert_equal(mv_x.itemsize, expected_size)
+ assert_equal(mv_x.ndim, 0)
+ assert_equal(mv_x.shape, ())
+ assert_equal(mv_x.strides, ())
+ assert_equal(mv_x.suboffsets, ())
+
+ # check scalar format string against ndarray format string
+ a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
+ assert_(isinstance(a, np.ndarray))
+ mv_a = memoryview(a)
+ assert_equal(mv_x.itemsize, mv_a.itemsize)
+ assert_equal(mv_x.format, mv_a.format)
+
+ # Check that we do not allow writeable buffer export (technically
+ # we could allow it sometimes here...)
+ with pytest.raises(BufferError, match="scalar buffer is readonly"):
+ get_buffer_info(x, ["WRITABLE"])
+
+ def _as_dict(self, m):
+ return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize,
+ ndim=m.ndim, format=m.format, readonly=m.readonly)
+
+ def test_datetime_memoryview(self):
+ # gh-11656
+ # Values verified with v1.13.3, shape is not () as in test_scalar_dim
+
+ dt1 = np.datetime64('2016-01-01')
+ dt2 = np.datetime64('2017-01-01')
+ expected = dict(strides=(1,), itemsize=1, ndim=1, shape=(8,),
+ format='B', readonly=True)
+ v = memoryview(dt1)
+ assert self._as_dict(v) == expected
+
+ v = memoryview(dt2 - dt1)
+ assert self._as_dict(v) == expected
+
+ dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
+ a = np.empty(1, dt)
+ # Fails to create a PEP 3118 valid buffer
+ assert_raises((ValueError, BufferError), memoryview, a[0])
+
+ # Check that we do not allow writeable buffer export
+ with pytest.raises(BufferError, match="scalar buffer is readonly"):
+ get_buffer_info(dt1, ["WRITABLE"])
+
+ @pytest.mark.parametrize('s', [
+ pytest.param("\x32\x32", id="ascii"),
+ pytest.param("\uFE0F\uFE0F", id="basic multilingual"),
+ pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"),
+ ])
+ def test_str_ucs4(self, s):
+ s = np.str_(s) # only our subclass implements the buffer protocol
+
+ # all the same, characters always encode as ucs4
+ expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w',
+ readonly=True)
+
+ v = memoryview(s)
+ assert self._as_dict(v) == expected
+
+ # integers of the paltform-appropriate endianness
+ code_points = np.frombuffer(v, dtype='i4')
+
+ assert_equal(code_points, [ord(c) for c in s])
+
+ # Check that we do not allow writeable buffer export
+ with pytest.raises(BufferError, match="scalar buffer is readonly"):
+ get_buffer_info(s, ["WRITABLE"])
+
+ def test_user_scalar_fails_buffer(self):
+ r = rational(1)
+ with assert_raises(TypeError):
+ memoryview(r)
+
+ # Check that we do not allow writeable buffer export
+ with pytest.raises(BufferError, match="scalar buffer is readonly"):
+ get_buffer_info(r, ["WRITABLE"])
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarinherit.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarinherit.py
new file mode 100644
index 00000000..f697c3c8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarinherit.py
@@ -0,0 +1,98 @@
+""" Test printing of scalar types.
+
+"""
+import pytest
+
+import numpy as np
+from numpy.testing import assert_, assert_raises
+
+
+class A:
+ pass
+class B(A, np.float64):
+ pass
+
+class C(B):
+ pass
+class D(C, B):
+ pass
+
+class B0(np.float64, A):
+ pass
+class C0(B0):
+ pass
+
+class HasNew:
+ def __new__(cls, *args, **kwargs):
+ return cls, args, kwargs
+
+class B1(np.float64, HasNew):
+ pass
+
+
+class TestInherit:
+ def test_init(self):
+ x = B(1.0)
+ assert_(str(x) == '1.0')
+ y = C(2.0)
+ assert_(str(y) == '2.0')
+ z = D(3.0)
+ assert_(str(z) == '3.0')
+
+ def test_init2(self):
+ x = B0(1.0)
+ assert_(str(x) == '1.0')
+ y = C0(2.0)
+ assert_(str(y) == '2.0')
+
+ def test_gh_15395(self):
+ # HasNew is the second base, so `np.float64` should have priority
+ x = B1(1.0)
+ assert_(str(x) == '1.0')
+
+ # previously caused RecursionError!?
+ with pytest.raises(TypeError):
+ B1(1.0, 2.0)
+
+
+class TestCharacter:
+ def test_char_radd(self):
+ # GH issue 9620, reached gentype_add and raise TypeError
+ np_s = np.string_('abc')
+ np_u = np.unicode_('abc')
+ s = b'def'
+ u = 'def'
+ assert_(np_s.__radd__(np_s) is NotImplemented)
+ assert_(np_s.__radd__(np_u) is NotImplemented)
+ assert_(np_s.__radd__(s) is NotImplemented)
+ assert_(np_s.__radd__(u) is NotImplemented)
+ assert_(np_u.__radd__(np_s) is NotImplemented)
+ assert_(np_u.__radd__(np_u) is NotImplemented)
+ assert_(np_u.__radd__(s) is NotImplemented)
+ assert_(np_u.__radd__(u) is NotImplemented)
+ assert_(s + np_s == b'defabc')
+ assert_(u + np_u == 'defabc')
+
+ class MyStr(str, np.generic):
+ # would segfault
+ pass
+
+ with assert_raises(TypeError):
+ # Previously worked, but gave completely wrong result
+ ret = s + MyStr('abc')
+
+ class MyBytes(bytes, np.generic):
+ # would segfault
+ pass
+
+ ret = s + MyBytes(b'abc')
+ assert(type(ret) is type(s))
+ assert ret == b"defabc"
+
+ def test_char_repeat(self):
+ np_s = np.string_('abc')
+ np_u = np.unicode_('abc')
+ res_s = b'abc' * 5
+ res_u = 'abc' * 5
+ assert_(np_s * 5 == res_s)
+ assert_(np_u * 5 == res_u)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarmath.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarmath.py
new file mode 100644
index 00000000..65b8121a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarmath.py
@@ -0,0 +1,1087 @@
+import contextlib
+import sys
+import warnings
+import itertools
+import operator
+import platform
+from numpy.compat import _pep440
+import pytest
+from hypothesis import given, settings
+from hypothesis.strategies import sampled_from
+from hypothesis.extra import numpy as hynp
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_almost_equal,
+ assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
+ assert_warns,
+ )
+
+types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
+ np.int_, np.uint, np.longlong, np.ulonglong,
+ np.single, np.double, np.longdouble, np.csingle,
+ np.cdouble, np.clongdouble]
+
+floating_types = np.floating.__subclasses__()
+complex_floating_types = np.complexfloating.__subclasses__()
+
+objecty_things = [object(), None]
+
+reasonable_operators_for_scalars = [
+ operator.lt, operator.le, operator.eq, operator.ne, operator.ge,
+ operator.gt, operator.add, operator.floordiv, operator.mod,
+ operator.mul, operator.pow, operator.sub, operator.truediv,
+]
+
+
+# This compares scalarmath against ufuncs.
+
+class TestTypes:
+ def test_types(self):
+ for atype in types:
+ a = atype(1)
+ assert_(a == 1, "error with %r: got %r" % (atype, a))
+
+ def test_type_add(self):
+ # list of types
+ for k, atype in enumerate(types):
+ a_scalar = atype(3)
+ a_array = np.array([3], dtype=atype)
+ for l, btype in enumerate(types):
+ b_scalar = btype(1)
+ b_array = np.array([1], dtype=btype)
+ c_scalar = a_scalar + b_scalar
+ c_array = a_array + b_array
+ # It was comparing the type numbers, but the new ufunc
+ # function-finding mechanism finds the lowest function
+ # to which both inputs can be cast - which produces 'l'
+ # when you do 'q' + 'b'. The old function finding mechanism
+ # skipped ahead based on the first argument, but that
+ # does not produce properly symmetric results...
+ assert_equal(c_scalar.dtype, c_array.dtype,
+ "error with types (%d/'%c' + %d/'%c')" %
+ (k, np.dtype(atype).char, l, np.dtype(btype).char))
+
+ def test_type_create(self):
+ for k, atype in enumerate(types):
+ a = np.array([1, 2, 3], atype)
+ b = atype([1, 2, 3])
+ assert_equal(a, b)
+
+ def test_leak(self):
+ # test leak of scalar objects
+ # a leak would show up in valgrind as still-reachable of ~2.6MB
+ for i in range(200000):
+ np.add(1, 1)
+
+
+def check_ufunc_scalar_equivalence(op, arr1, arr2):
+ scalar1 = arr1[()]
+ scalar2 = arr2[()]
+ assert isinstance(scalar1, np.generic)
+ assert isinstance(scalar2, np.generic)
+
+ if arr1.dtype.kind == "c" or arr2.dtype.kind == "c":
+ comp_ops = {operator.ge, operator.gt, operator.le, operator.lt}
+ if op in comp_ops and (np.isnan(scalar1) or np.isnan(scalar2)):
+ pytest.xfail("complex comp ufuncs use sort-order, scalars do not.")
+ if op == operator.pow and arr2.item() in [-1, 0, 0.5, 1, 2]:
+ # array**scalar special case can have different result dtype
+ # (Other powers may have issues also, but are not hit here.)
+ # TODO: It would be nice to resolve this issue.
+ pytest.skip("array**2 can have incorrect/weird result dtype")
+
+ # ignore fpe's since they may just mismatch for integers anyway.
+ with warnings.catch_warnings(), np.errstate(all="ignore"):
+ # Comparisons DeprecationWarnings replacing errors (2022-03):
+ warnings.simplefilter("error", DeprecationWarning)
+ try:
+ res = op(arr1, arr2)
+ except Exception as e:
+ with pytest.raises(type(e)):
+ op(scalar1, scalar2)
+ else:
+ scalar_res = op(scalar1, scalar2)
+ assert_array_equal(scalar_res, res, strict=True)
+
+
+@pytest.mark.slow
+@settings(max_examples=10000, deadline=2000)
+@given(sampled_from(reasonable_operators_for_scalars),
+ hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()),
+ hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()))
+def test_array_scalar_ufunc_equivalence(op, arr1, arr2):
+ """
+ This is a thorough test attempting to cover important promotion paths
+ and ensuring that arrays and scalars stay as aligned as possible.
+ However, if it creates troubles, it should maybe just be removed.
+ """
+ check_ufunc_scalar_equivalence(op, arr1, arr2)
+
+
+@pytest.mark.slow
+@given(sampled_from(reasonable_operators_for_scalars),
+ hynp.scalar_dtypes(), hynp.scalar_dtypes())
+def test_array_scalar_ufunc_dtypes(op, dt1, dt2):
+ # Same as above, but don't worry about sampling weird values so that we
+ # do not have to sample as much
+ arr1 = np.array(2, dtype=dt1)
+ arr2 = np.array(3, dtype=dt2) # some power do weird things.
+
+ check_ufunc_scalar_equivalence(op, arr1, arr2)
+
+
+@pytest.mark.parametrize("fscalar", [np.float16, np.float32])
+def test_int_float_promotion_truediv(fscalar):
+ # Promotion for mixed int and float32/float16 must not go to float64
+ i = np.int8(1)
+ f = fscalar(1)
+ expected = np.result_type(i, f)
+ assert (i / f).dtype == expected
+ assert (f / i).dtype == expected
+ # But normal int / int true division goes to float64:
+ assert (i / i).dtype == np.dtype("float64")
+ # For int16, result has to be ast least float32 (takes ufunc path):
+ assert (np.int16(1) / f).dtype == np.dtype("float32")
+
+
+class TestBaseMath:
+ def test_blocked(self):
+ # test alignments offsets for simd instructions
+ # alignments for vz + 2 * (vs - 1) + 1
+ for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
+ for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
+ type='binary',
+ max_size=sz):
+ exp1 = np.ones_like(inp1)
+ inp1[...] = np.ones_like(inp1)
+ inp2[...] = np.zeros_like(inp2)
+ assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
+ assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
+ assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
+
+ np.add(inp1, inp2, out=out)
+ assert_almost_equal(out, exp1, err_msg=msg)
+
+ inp2[...] += np.arange(inp2.size, dtype=dt) + 1
+ assert_almost_equal(np.square(inp2),
+ np.multiply(inp2, inp2), err_msg=msg)
+ # skip true divide for ints
+ if dt != np.int32:
+ assert_almost_equal(np.reciprocal(inp2),
+ np.divide(1, inp2), err_msg=msg)
+
+ inp1[...] = np.ones_like(inp1)
+ np.add(inp1, 2, out=out)
+ assert_almost_equal(out, exp1 + 2, err_msg=msg)
+ inp2[...] = np.ones_like(inp2)
+ np.add(2, inp2, out=out)
+ assert_almost_equal(out, exp1 + 2, err_msg=msg)
+
+ def test_lower_align(self):
+ # check data that is not aligned to element size
+ # i.e doubles are aligned to 4 bytes on i386
+ d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
+ o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
+ assert_almost_equal(d + d, d * 2)
+ np.add(d, d, out=o)
+ np.add(np.ones_like(d), d, out=o)
+ np.add(d, np.ones_like(d), out=o)
+ np.add(np.ones_like(d), d)
+ np.add(d, np.ones_like(d))
+
+
+class TestPower:
+ def test_small_types(self):
+ for t in [np.int8, np.int16, np.float16]:
+ a = t(3)
+ b = a ** 4
+ assert_(b == 81, "error with %r: got %r" % (t, b))
+
+ def test_large_types(self):
+ for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
+ a = t(51)
+ b = a ** 4
+ msg = "error with %r: got %r" % (t, b)
+ if np.issubdtype(t, np.integer):
+ assert_(b == 6765201, msg)
+ else:
+ assert_almost_equal(b, 6765201, err_msg=msg)
+
+ def test_integers_to_negative_integer_power(self):
+ # Note that the combination of uint64 with a signed integer
+ # has common type np.float64. The other combinations should all
+ # raise a ValueError for integer ** negative integer.
+ exp = [np.array(-1, dt)[()] for dt in 'bhilq']
+
+ # 1 ** -1 possible special case
+ base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
+ for i1, i2 in itertools.product(base, exp):
+ if i1.dtype != np.uint64:
+ assert_raises(ValueError, operator.pow, i1, i2)
+ else:
+ res = operator.pow(i1, i2)
+ assert_(res.dtype.type is np.float64)
+ assert_almost_equal(res, 1.)
+
+ # -1 ** -1 possible special case
+ base = [np.array(-1, dt)[()] for dt in 'bhilq']
+ for i1, i2 in itertools.product(base, exp):
+ if i1.dtype != np.uint64:
+ assert_raises(ValueError, operator.pow, i1, i2)
+ else:
+ res = operator.pow(i1, i2)
+ assert_(res.dtype.type is np.float64)
+ assert_almost_equal(res, -1.)
+
+ # 2 ** -1 perhaps generic
+ base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
+ for i1, i2 in itertools.product(base, exp):
+ if i1.dtype != np.uint64:
+ assert_raises(ValueError, operator.pow, i1, i2)
+ else:
+ res = operator.pow(i1, i2)
+ assert_(res.dtype.type is np.float64)
+ assert_almost_equal(res, .5)
+
+ def test_mixed_types(self):
+ typelist = [np.int8, np.int16, np.float16,
+ np.float32, np.float64, np.int8,
+ np.int16, np.int32, np.int64]
+ for t1 in typelist:
+ for t2 in typelist:
+ a = t1(3)
+ b = t2(2)
+ result = a**b
+ msg = ("error with %r and %r:"
+ "got %r, expected %r") % (t1, t2, result, 9)
+ if np.issubdtype(np.dtype(result), np.integer):
+ assert_(result == 9, msg)
+ else:
+ assert_almost_equal(result, 9, err_msg=msg)
+
+ def test_modular_power(self):
+ # modular power is not implemented, so ensure it errors
+ a = 5
+ b = 4
+ c = 10
+ expected = pow(a, b, c) # noqa: F841
+ for t in (np.int32, np.float32, np.complex64):
+ # note that 3-operand power only dispatches on the first argument
+ assert_raises(TypeError, operator.pow, t(a), b, c)
+ assert_raises(TypeError, operator.pow, np.array(t(a)), b, c)
+
+
+def floordiv_and_mod(x, y):
+ return (x // y, x % y)
+
+
+def _signs(dt):
+ if dt in np.typecodes['UnsignedInteger']:
+ return (+1,)
+ else:
+ return (+1, -1)
+
+
+class TestModulus:
+
+ def test_modulus_basic(self):
+ dt = np.typecodes['AllInteger'] + np.typecodes['Float']
+ for op in [floordiv_and_mod, divmod]:
+ for dt1, dt2 in itertools.product(dt, dt):
+ for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
+ fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+ msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+ a = np.array(sg1*71, dtype=dt1)[()]
+ b = np.array(sg2*19, dtype=dt2)[()]
+ div, rem = op(a, b)
+ assert_equal(div*b + rem, a, err_msg=msg)
+ if sg2 == -1:
+ assert_(b < rem <= 0, msg)
+ else:
+ assert_(b > rem >= 0, msg)
+
+ def test_float_modulus_exact(self):
+ # test that float results are exact for small integers. This also
+ # holds for the same integers scaled by powers of two.
+ nlst = list(range(-127, 0))
+ plst = list(range(1, 128))
+ dividend = nlst + [0] + plst
+ divisor = nlst + plst
+ arg = list(itertools.product(dividend, divisor))
+ tgt = list(divmod(*t) for t in arg)
+
+ a, b = np.array(arg, dtype=int).T
+ # convert exact integer results from Python to float so that
+ # signed zero can be used, it is checked.
+ tgtdiv, tgtrem = np.array(tgt, dtype=float).T
+ tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
+ tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
+
+ for op in [floordiv_and_mod, divmod]:
+ for dt in np.typecodes['Float']:
+ msg = 'op: %s, dtype: %s' % (op.__name__, dt)
+ fa = a.astype(dt)
+ fb = b.astype(dt)
+ # use list comprehension so a_ and b_ are scalars
+ div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)])
+ assert_equal(div, tgtdiv, err_msg=msg)
+ assert_equal(rem, tgtrem, err_msg=msg)
+
+ def test_float_modulus_roundoff(self):
+ # gh-6127
+ dt = np.typecodes['Float']
+ for op in [floordiv_and_mod, divmod]:
+ for dt1, dt2 in itertools.product(dt, dt):
+ for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
+ fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+ msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+ a = np.array(sg1*78*6e-8, dtype=dt1)[()]
+ b = np.array(sg2*6e-8, dtype=dt2)[()]
+ div, rem = op(a, b)
+ # Equal assertion should hold when fmod is used
+ assert_equal(div*b + rem, a, err_msg=msg)
+ if sg2 == -1:
+ assert_(b < rem <= 0, msg)
+ else:
+ assert_(b > rem >= 0, msg)
+
+ def test_float_modulus_corner_cases(self):
+ # Check remainder magnitude.
+ for dt in np.typecodes['Float']:
+ b = np.array(1.0, dtype=dt)
+ a = np.nextafter(np.array(0.0, dtype=dt), -b)
+ rem = operator.mod(a, b)
+ assert_(rem <= b, 'dt: %s' % dt)
+ rem = operator.mod(-a, -b)
+ assert_(rem >= -b, 'dt: %s' % dt)
+
+ # Check nans, inf
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "invalid value encountered in remainder")
+ sup.filter(RuntimeWarning, "divide by zero encountered in remainder")
+ sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide")
+ sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
+ sup.filter(RuntimeWarning, "invalid value encountered in divmod")
+ for dt in np.typecodes['Float']:
+ fone = np.array(1.0, dtype=dt)
+ fzer = np.array(0.0, dtype=dt)
+ finf = np.array(np.inf, dtype=dt)
+ fnan = np.array(np.nan, dtype=dt)
+ rem = operator.mod(fone, fzer)
+ assert_(np.isnan(rem), 'dt: %s' % dt)
+ # MSVC 2008 returns NaN here, so disable the check.
+ #rem = operator.mod(fone, finf)
+ #assert_(rem == fone, 'dt: %s' % dt)
+ rem = operator.mod(fone, fnan)
+ assert_(np.isnan(rem), 'dt: %s' % dt)
+ rem = operator.mod(finf, fone)
+ assert_(np.isnan(rem), 'dt: %s' % dt)
+ for op in [floordiv_and_mod, divmod]:
+ div, mod = op(fone, fzer)
+ assert_(np.isinf(div)) and assert_(np.isnan(mod))
+
+ def test_inplace_floordiv_handling(self):
+ # issue gh-12927
+ # this only applies to in-place floordiv //=, because the output type
+ # promotes to float which does not fit
+ a = np.array([1, 2], np.int64)
+ b = np.array([1, 2], np.uint64)
+ with pytest.raises(TypeError,
+ match=r"Cannot cast ufunc 'floor_divide' output from"):
+ a //= b
+
+
+class TestComplexDivision:
+ def test_zero_division(self):
+ with np.errstate(all="ignore"):
+ for t in [np.complex64, np.complex128]:
+ a = t(0.0)
+ b = t(1.0)
+ assert_(np.isinf(b/a))
+ b = t(complex(np.inf, np.inf))
+ assert_(np.isinf(b/a))
+ b = t(complex(np.inf, np.nan))
+ assert_(np.isinf(b/a))
+ b = t(complex(np.nan, np.inf))
+ assert_(np.isinf(b/a))
+ b = t(complex(np.nan, np.nan))
+ assert_(np.isnan(b/a))
+ b = t(0.)
+ assert_(np.isnan(b/a))
+
+ def test_signed_zeros(self):
+ with np.errstate(all="ignore"):
+ for t in [np.complex64, np.complex128]:
+ # tupled (numerator, denominator, expected)
+ # for testing as expected == numerator/denominator
+ data = (
+ (( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)),
+ (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
+ (( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)),
+ (( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)),
+ (( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)),
+ (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
+ ((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
+ ((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0))
+ )
+ for cases in data:
+ n = cases[0]
+ d = cases[1]
+ ex = cases[2]
+ result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
+ # check real and imag parts separately to avoid comparison
+ # in array context, which does not account for signed zeros
+ assert_equal(result.real, ex[0])
+ assert_equal(result.imag, ex[1])
+
+ def test_branches(self):
+ with np.errstate(all="ignore"):
+ for t in [np.complex64, np.complex128]:
+ # tupled (numerator, denominator, expected)
+ # for testing as expected == numerator/denominator
+ data = list()
+
+ # trigger branch: real(fabs(denom)) > imag(fabs(denom))
+ # followed by else condition as neither are == 0
+ data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0)))
+
+ # trigger branch: real(fabs(denom)) > imag(fabs(denom))
+ # followed by if condition as both are == 0
+ # is performed in test_zero_division(), so this is skipped
+
+ # trigger else if branch: real(fabs(denom)) < imag(fabs(denom))
+ data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0)))
+
+ for cases in data:
+ n = cases[0]
+ d = cases[1]
+ ex = cases[2]
+ result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
+ # check real and imag parts separately to avoid comparison
+ # in array context, which does not account for signed zeros
+ assert_equal(result.real, ex[0])
+ assert_equal(result.imag, ex[1])
+
+
+class TestConversion:
+ def test_int_from_long(self):
+ l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
+ li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
+ for T in [None, np.float64, np.int64]:
+ a = np.array(l, dtype=T)
+ assert_equal([int(_m) for _m in a], li)
+
+ a = np.array(l[:3], dtype=np.uint64)
+ assert_equal([int(_m) for _m in a], li[:3])
+
+ def test_iinfo_long_values(self):
+ for code in 'bBhH':
+ with pytest.warns(DeprecationWarning):
+ res = np.array(np.iinfo(code).max + 1, dtype=code)
+ tgt = np.iinfo(code).min
+ assert_(res == tgt)
+
+ for code in np.typecodes['AllInteger']:
+ res = np.array(np.iinfo(code).max, dtype=code)
+ tgt = np.iinfo(code).max
+ assert_(res == tgt)
+
+ for code in np.typecodes['AllInteger']:
+ res = np.dtype(code).type(np.iinfo(code).max)
+ tgt = np.iinfo(code).max
+ assert_(res == tgt)
+
+ def test_int_raise_behaviour(self):
+ def overflow_error_func(dtype):
+ dtype(np.iinfo(dtype).max + 1)
+
+ for code in [np.int_, np.uint, np.longlong, np.ulonglong]:
+ assert_raises(OverflowError, overflow_error_func, code)
+
+ def test_int_from_infinite_longdouble(self):
+ # gh-627
+ x = np.longdouble(np.inf)
+ assert_raises(OverflowError, int, x)
+ with suppress_warnings() as sup:
+ sup.record(np.ComplexWarning)
+ x = np.clongdouble(np.inf)
+ assert_raises(OverflowError, int, x)
+ assert_equal(len(sup.log), 1)
+
+ @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)")
+ def test_int_from_infinite_longdouble___int__(self):
+ x = np.longdouble(np.inf)
+ assert_raises(OverflowError, x.__int__)
+ with suppress_warnings() as sup:
+ sup.record(np.ComplexWarning)
+ x = np.clongdouble(np.inf)
+ assert_raises(OverflowError, x.__int__)
+ assert_equal(len(sup.log), 1)
+
+ @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
+ reason="long double is same as double")
+ @pytest.mark.skipif(platform.machine().startswith("ppc"),
+ reason="IBM double double")
+ def test_int_from_huge_longdouble(self):
+ # Produce a longdouble that would overflow a double,
+ # use exponent that avoids bug in Darwin pow function.
+ exp = np.finfo(np.double).maxexp - 1
+ huge_ld = 2 * 1234 * np.longdouble(2) ** exp
+ huge_i = 2 * 1234 * 2 ** exp
+ assert_(huge_ld != np.inf)
+ assert_equal(int(huge_ld), huge_i)
+
+ def test_int_from_longdouble(self):
+ x = np.longdouble(1.5)
+ assert_equal(int(x), 1)
+ x = np.longdouble(-10.5)
+ assert_equal(int(x), -10)
+
+ def test_numpy_scalar_relational_operators(self):
+ # All integer
+ for dt1 in np.typecodes['AllInteger']:
+ assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
+ assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
+
+ for dt2 in np.typecodes['AllInteger']:
+ assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+
+ #Unsigned integers
+ for dt1 in 'BHILQP':
+ assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
+ assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
+ assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
+
+ #unsigned vs signed
+ for dt2 in 'bhilqp':
+ assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+
+ #Signed integers and floats
+ for dt1 in 'bhlqp' + np.typecodes['Float']:
+ assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
+ assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
+ assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
+
+ for dt2 in 'bhlqp' + np.typecodes['Float']:
+ assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+ assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()],
+ "type %s and %s failed" % (dt1, dt2))
+
+ def test_scalar_comparison_to_none(self):
+ # Scalars should just return False and not give a warnings.
+ # The comparisons are flagged by pep8, ignore that.
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', FutureWarning)
+ assert_(not np.float32(1) == None)
+ assert_(not np.str_('test') == None)
+ # This is dubious (see below):
+ assert_(not np.datetime64('NaT') == None)
+
+ assert_(np.float32(1) != None)
+ assert_(np.str_('test') != None)
+ # This is dubious (see below):
+ assert_(np.datetime64('NaT') != None)
+ assert_(len(w) == 0)
+
+ # For documentation purposes, this is why the datetime is dubious.
+ # At the time of deprecation this was no behaviour change, but
+ # it has to be considered when the deprecations are done.
+ assert_(np.equal(np.datetime64('NaT'), None))
+
+
+#class TestRepr:
+# def test_repr(self):
+# for t in types:
+# val = t(1197346475.0137341)
+# val_repr = repr(val)
+# val2 = eval(val_repr)
+# assert_equal( val, val2 )
+
+
+class TestRepr:
+ def _test_type_repr(self, t):
+ finfo = np.finfo(t)
+ last_fraction_bit_idx = finfo.nexp + finfo.nmant
+ last_exponent_bit_idx = finfo.nexp
+ storage_bytes = np.dtype(t).itemsize*8
+ # could add some more types to the list below
+ for which in ['small denorm', 'small norm']:
+ # Values from https://en.wikipedia.org/wiki/IEEE_754
+ constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
+ if which == 'small denorm':
+ byte = last_fraction_bit_idx // 8
+ bytebit = 7-(last_fraction_bit_idx % 8)
+ constr[byte] = 1 << bytebit
+ elif which == 'small norm':
+ byte = last_exponent_bit_idx // 8
+ bytebit = 7-(last_exponent_bit_idx % 8)
+ constr[byte] = 1 << bytebit
+ else:
+ raise ValueError('hmm')
+ val = constr.view(t)[0]
+ val_repr = repr(val)
+ val2 = t(eval(val_repr))
+ if not (val2 == 0 and val < 1e-100):
+ assert_equal(val, val2)
+
+ def test_float_repr(self):
+ # long double test cannot work, because eval goes through a python
+ # float
+ for t in [np.float32, np.float64]:
+ self._test_type_repr(t)
+
+
+if not IS_PYPY:
+ # sys.getsizeof() is not valid on PyPy
+ class TestSizeOf:
+
+ def test_equal_nbytes(self):
+ for type in types:
+ x = type(0)
+ assert_(sys.getsizeof(x) > x.nbytes)
+
+ def test_error(self):
+ d = np.float32()
+ assert_raises(TypeError, d.__sizeof__, "a")
+
+
+class TestMultiply:
+ def test_seq_repeat(self):
+ # Test that basic sequences get repeated when multiplied with
+ # numpy integers. And errors are raised when multiplied with others.
+ # Some of this behaviour may be controversial and could be open for
+ # change.
+ accepted_types = set(np.typecodes["AllInteger"])
+ deprecated_types = {'?'}
+ forbidden_types = (
+ set(np.typecodes["All"]) - accepted_types - deprecated_types)
+ forbidden_types -= {'V'} # can't default-construct void scalars
+
+ for seq_type in (list, tuple):
+ seq = seq_type([1, 2, 3])
+ for numpy_type in accepted_types:
+ i = np.dtype(numpy_type).type(2)
+ assert_equal(seq * i, seq * int(i))
+ assert_equal(i * seq, int(i) * seq)
+
+ for numpy_type in deprecated_types:
+ i = np.dtype(numpy_type).type()
+ assert_equal(
+ assert_warns(DeprecationWarning, operator.mul, seq, i),
+ seq * int(i))
+ assert_equal(
+ assert_warns(DeprecationWarning, operator.mul, i, seq),
+ int(i) * seq)
+
+ for numpy_type in forbidden_types:
+ i = np.dtype(numpy_type).type()
+ assert_raises(TypeError, operator.mul, seq, i)
+ assert_raises(TypeError, operator.mul, i, seq)
+
+ def test_no_seq_repeat_basic_array_like(self):
+ # Test that an array-like which does not know how to be multiplied
+ # does not attempt sequence repeat (raise TypeError).
+ # See also gh-7428.
+ class ArrayLike:
+ def __init__(self, arr):
+ self.arr = arr
+ def __array__(self):
+ return self.arr
+
+ # Test for simple ArrayLike above and memoryviews (original report)
+ for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))):
+ assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.))
+ assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.))
+ assert_array_equal(arr_like * np.int_(3), np.full(3, 3))
+ assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))
+
+
+class TestNegative:
+ def test_exceptions(self):
+ a = np.ones((), dtype=np.bool_)[()]
+ assert_raises(TypeError, operator.neg, a)
+
+ def test_result(self):
+ types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning)
+ for dt in types:
+ a = np.ones((), dtype=dt)[()]
+ if dt in np.typecodes['UnsignedInteger']:
+ st = np.dtype(dt).type
+ max = st(np.iinfo(dt).max)
+ assert_equal(operator.neg(a), max)
+ else:
+ assert_equal(operator.neg(a) + a, 0)
+
+class TestSubtract:
+ def test_exceptions(self):
+ a = np.ones((), dtype=np.bool_)[()]
+ assert_raises(TypeError, operator.sub, a, a)
+
+ def test_result(self):
+ types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning)
+ for dt in types:
+ a = np.ones((), dtype=dt)[()]
+ assert_equal(operator.sub(a, a), 0)
+
+
+class TestAbs:
+ def _test_abs_func(self, absfunc, test_dtype):
+ x = test_dtype(-1.5)
+ assert_equal(absfunc(x), 1.5)
+ x = test_dtype(0.0)
+ res = absfunc(x)
+ # assert_equal() checks zero signedness
+ assert_equal(res, 0.0)
+ x = test_dtype(-0.0)
+ res = absfunc(x)
+ assert_equal(res, 0.0)
+
+ x = test_dtype(np.finfo(test_dtype).max)
+ assert_equal(absfunc(x), x.real)
+
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning)
+ x = test_dtype(np.finfo(test_dtype).tiny)
+ assert_equal(absfunc(x), x.real)
+
+ x = test_dtype(np.finfo(test_dtype).min)
+ assert_equal(absfunc(x), -x.real)
+
+ @pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
+ def test_builtin_abs(self, dtype):
+ if (
+ sys.platform == "cygwin" and dtype == np.clongdouble and
+ (
+ _pep440.parse(platform.release().split("-")[0])
+ < _pep440.Version("3.3.0")
+ )
+ ):
+ pytest.xfail(
+ reason="absl is computed in double precision on cygwin < 3.3"
+ )
+ self._test_abs_func(abs, dtype)
+
+ @pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
+ def test_numpy_abs(self, dtype):
+ if (
+ sys.platform == "cygwin" and dtype == np.clongdouble and
+ (
+ _pep440.parse(platform.release().split("-")[0])
+ < _pep440.Version("3.3.0")
+ )
+ ):
+ pytest.xfail(
+ reason="absl is computed in double precision on cygwin < 3.3"
+ )
+ self._test_abs_func(np.abs, dtype)
+
+class TestBitShifts:
+
+ @pytest.mark.parametrize('type_code', np.typecodes['AllInteger'])
+ @pytest.mark.parametrize('op',
+ [operator.rshift, operator.lshift], ids=['>>', '<<'])
+ def test_shift_all_bits(self, type_code, op):
+ """ Shifts where the shift amount is the width of the type or wider """
+ # gh-2449
+ dt = np.dtype(type_code)
+ nbits = dt.itemsize * 8
+ for val in [5, -5]:
+ for shift in [nbits, nbits + 4]:
+ val_scl = np.array(val).astype(dt)[()]
+ shift_scl = dt.type(shift)
+ res_scl = op(val_scl, shift_scl)
+ if val_scl < 0 and op is operator.rshift:
+ # sign bit is preserved
+ assert_equal(res_scl, -1)
+ else:
+ assert_equal(res_scl, 0)
+
+ # Result on scalars should be the same as on arrays
+ val_arr = np.array([val_scl]*32, dtype=dt)
+ shift_arr = np.array([shift]*32, dtype=dt)
+ res_arr = op(val_arr, shift_arr)
+ assert_equal(res_arr, res_scl)
+
+
+class TestHash:
+ @pytest.mark.parametrize("type_code", np.typecodes['AllInteger'])
+ def test_integer_hashes(self, type_code):
+ scalar = np.dtype(type_code).type
+ for i in range(128):
+ assert hash(i) == hash(scalar(i))
+
+ @pytest.mark.parametrize("type_code", np.typecodes['AllFloat'])
+ def test_float_and_complex_hashes(self, type_code):
+ scalar = np.dtype(type_code).type
+ for val in [np.pi, np.inf, 3, 6.]:
+ numpy_val = scalar(val)
+ # Cast back to Python, in case the NumPy scalar has less precision
+ if numpy_val.dtype.kind == 'c':
+ val = complex(numpy_val)
+ else:
+ val = float(numpy_val)
+ assert val == numpy_val
+ assert hash(val) == hash(numpy_val)
+
+ if hash(float(np.nan)) != hash(float(np.nan)):
+ # If Python distinguishes different NaNs we do so too (gh-18833)
+ assert hash(scalar(np.nan)) != hash(scalar(np.nan))
+
+ @pytest.mark.parametrize("type_code", np.typecodes['Complex'])
+ def test_complex_hashes(self, type_code):
+ # Test some complex valued hashes specifically:
+ scalar = np.dtype(type_code).type
+ for val in [np.pi+1j, np.inf-3j, 3j, 6.+1j]:
+ numpy_val = scalar(val)
+ assert hash(complex(numpy_val)) == hash(numpy_val)
+
+
+@contextlib.contextmanager
+def recursionlimit(n):
+ o = sys.getrecursionlimit()
+ try:
+ sys.setrecursionlimit(n)
+ yield
+ finally:
+ sys.setrecursionlimit(o)
+
+
+@given(sampled_from(objecty_things),
+ sampled_from(reasonable_operators_for_scalars),
+ sampled_from(types))
+def test_operator_object_left(o, op, type_):
+ try:
+ with recursionlimit(200):
+ op(o, type_(1))
+ except TypeError:
+ pass
+
+
+@given(sampled_from(objecty_things),
+ sampled_from(reasonable_operators_for_scalars),
+ sampled_from(types))
+def test_operator_object_right(o, op, type_):
+ try:
+ with recursionlimit(200):
+ op(type_(1), o)
+ except TypeError:
+ pass
+
+
+@given(sampled_from(reasonable_operators_for_scalars),
+ sampled_from(types),
+ sampled_from(types))
+def test_operator_scalars(op, type1, type2):
+ try:
+ op(type1(1), type2(1))
+ except TypeError:
+ pass
+
+
+@pytest.mark.parametrize("op", reasonable_operators_for_scalars)
+@pytest.mark.parametrize("val", [None, 2**64])
+def test_longdouble_inf_loop(op, val):
+ # Note: The 2**64 value will pass once NEP 50 is adopted.
+ try:
+ op(np.longdouble(3), val)
+ except TypeError:
+ pass
+ try:
+ op(val, np.longdouble(3))
+ except TypeError:
+ pass
+
+
+@pytest.mark.parametrize("op", reasonable_operators_for_scalars)
+@pytest.mark.parametrize("val", [None, 2**64])
+def test_clongdouble_inf_loop(op, val):
+ # Note: The 2**64 value will pass once NEP 50 is adopted.
+ try:
+ op(np.clongdouble(3), val)
+ except TypeError:
+ pass
+ try:
+ op(val, np.longdouble(3))
+ except TypeError:
+ pass
+
+
+@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+@pytest.mark.parametrize("operation", [
+ lambda min, max: max + max,
+ lambda min, max: min - max,
+ lambda min, max: max * max], ids=["+", "-", "*"])
+def test_scalar_integer_operation_overflow(dtype, operation):
+ st = np.dtype(dtype).type
+ min = st(np.iinfo(dtype).min)
+ max = st(np.iinfo(dtype).max)
+
+ with pytest.warns(RuntimeWarning, match="overflow encountered"):
+ operation(min, max)
+
+
+@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
+@pytest.mark.parametrize("operation", [
+ lambda min, neg_1: -min,
+ lambda min, neg_1: abs(min),
+ lambda min, neg_1: min * neg_1,
+ pytest.param(lambda min, neg_1: min // neg_1,
+ marks=pytest.mark.skip(reason="broken on some platforms"))],
+ ids=["neg", "abs", "*", "//"])
+def test_scalar_signed_integer_overflow(dtype, operation):
+ # The minimum signed integer can "overflow" for some additional operations
+ st = np.dtype(dtype).type
+ min = st(np.iinfo(dtype).min)
+ neg_1 = st(-1)
+
+ with pytest.warns(RuntimeWarning, match="overflow encountered"):
+ operation(min, neg_1)
+
+
+@pytest.mark.parametrize("dtype", np.typecodes["UnsignedInteger"])
+def test_scalar_unsigned_integer_overflow(dtype):
+ val = np.dtype(dtype).type(8)
+ with pytest.warns(RuntimeWarning, match="overflow encountered"):
+ -val
+
+ zero = np.dtype(dtype).type(0)
+ -zero # does not warn
+
+@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+@pytest.mark.parametrize("operation", [
+ lambda val, zero: val // zero,
+ lambda val, zero: val % zero, ], ids=["//", "%"])
+def test_scalar_integer_operation_divbyzero(dtype, operation):
+ st = np.dtype(dtype).type
+ val = st(100)
+ zero = st(0)
+
+ with pytest.warns(RuntimeWarning, match="divide by zero"):
+ operation(val, zero)
+
+
+ops_with_names = [
+ ("__lt__", "__gt__", operator.lt, True),
+ ("__le__", "__ge__", operator.le, True),
+ ("__eq__", "__eq__", operator.eq, True),
+ # Note __op__ and __rop__ may be identical here:
+ ("__ne__", "__ne__", operator.ne, True),
+ ("__gt__", "__lt__", operator.gt, True),
+ ("__ge__", "__le__", operator.ge, True),
+ ("__floordiv__", "__rfloordiv__", operator.floordiv, False),
+ ("__truediv__", "__rtruediv__", operator.truediv, False),
+ ("__add__", "__radd__", operator.add, False),
+ ("__mod__", "__rmod__", operator.mod, False),
+ ("__mul__", "__rmul__", operator.mul, False),
+ ("__pow__", "__rpow__", operator.pow, False),
+ ("__sub__", "__rsub__", operator.sub, False),
+]
+
+
+@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
+@pytest.mark.parametrize("sctype", [np.float32, np.float64, np.longdouble])
+def test_subclass_deferral(sctype, __op__, __rop__, op, cmp):
+ """
+ This test covers scalar subclass deferral. Note that this is exceedingly
+ complicated, especially since it tends to fall back to the array paths and
+ these additionally add the "array priority" mechanism.
+
+ The behaviour was modified subtly in 1.22 (to make it closer to how Python
+ scalars work). Due to its complexity and the fact that subclassing NumPy
+ scalars is probably a bad idea to begin with. There is probably room
+ for adjustments here.
+ """
+ class myf_simple1(sctype):
+ pass
+
+ class myf_simple2(sctype):
+ pass
+
+ def op_func(self, other):
+ return __op__
+
+ def rop_func(self, other):
+ return __rop__
+
+ myf_op = type("myf_op", (sctype,), {__op__: op_func, __rop__: rop_func})
+
+ # inheritance has to override, or this is correctly lost:
+ res = op(myf_simple1(1), myf_simple2(2))
+ assert type(res) == sctype or type(res) == np.bool_
+ assert op(myf_simple1(1), myf_simple2(2)) == op(1, 2) # inherited
+
+ # Two independent subclasses do not really define an order. This could
+ # be attempted, but we do not since Python's `int` does neither:
+ assert op(myf_op(1), myf_simple1(2)) == __op__
+ assert op(myf_simple1(1), myf_op(2)) == op(1, 2) # inherited
+
+
+def test_longdouble_complex():
+ # Simple test to check longdouble and complex combinations, since these
+ # need to go through promotion, which longdouble needs to be careful about.
+ x = np.longdouble(1)
+ assert x + 1j == 1+1j
+ assert 1j + x == 1+1j
+
+
+@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
+@pytest.mark.parametrize("subtype", [float, int, complex, np.float16])
+@np._no_nep50_warning()
+def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp):
+ def op_func(self, other):
+ return __op__
+
+ def rop_func(self, other):
+ return __rop__
+
+ # Check that deferring is indicated using `__array_ufunc__`:
+ myt = type("myt", (subtype,),
+ {__op__: op_func, __rop__: rop_func, "__array_ufunc__": None})
+
+ # Just like normally, we should never presume we can modify the float.
+ assert op(myt(1), np.float64(2)) == __op__
+ assert op(np.float64(1), myt(2)) == __rop__
+
+ if op in {operator.mod, operator.floordiv} and subtype == complex:
+ return # module is not support for complex. Do not test.
+
+ if __rop__ == __op__:
+ return
+
+ # When no deferring is indicated, subclasses are handled normally.
+ myt = type("myt", (subtype,), {__rop__: rop_func})
+
+ # Check for float32, as a float subclass float64 may behave differently
+ res = op(myt(1), np.float16(2))
+ expected = op(subtype(1), np.float16(2))
+ assert res == expected
+ assert type(res) == type(expected)
+ res = op(np.float32(2), myt(1))
+ expected = op(np.float32(2), subtype(1))
+ assert res == expected
+ assert type(res) == type(expected)
+
+ # Same check for longdouble:
+ res = op(myt(1), np.longdouble(2))
+ expected = op(subtype(1), np.longdouble(2))
+ assert res == expected
+ assert type(res) == type(expected)
+ res = op(np.float32(2), myt(1))
+ expected = op(np.longdouble(2), subtype(1))
+ assert res == expected
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarprint.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarprint.py
new file mode 100644
index 00000000..4deb5a0a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_scalarprint.py
@@ -0,0 +1,382 @@
+""" Test printing of scalar types.
+
+"""
+import code
+import platform
+import pytest
+import sys
+
+from tempfile import TemporaryFile
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_raises
+
+class TestRealScalars:
+ def test_str(self):
+ svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
+ styps = [np.float16, np.float32, np.float64, np.longdouble]
+ wanted = [
+ ['0.0', '0.0', '0.0', '0.0' ],
+ ['-0.0', '-0.0', '-0.0', '-0.0'],
+ ['1.0', '1.0', '1.0', '1.0' ],
+ ['-1.0', '-1.0', '-1.0', '-1.0'],
+ ['inf', 'inf', 'inf', 'inf' ],
+ ['-inf', '-inf', '-inf', '-inf'],
+ ['nan', 'nan', 'nan', 'nan']]
+
+ for wants, val in zip(wanted, svals):
+ for want, styp in zip(wants, styps):
+ msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val))
+ assert_equal(str(styp(val)), want, err_msg=msg)
+
+ def test_scalar_cutoffs(self):
+ # test that both the str and repr of np.float64 behaves
+ # like python floats in python3.
+ def check(v):
+ assert_equal(str(np.float64(v)), str(v))
+ assert_equal(str(np.float64(v)), repr(v))
+ assert_equal(repr(np.float64(v)), repr(v))
+ assert_equal(repr(np.float64(v)), str(v))
+
+ # check we use the same number of significant digits
+ check(1.12345678901234567890)
+ check(0.0112345678901234567890)
+
+ # check switch from scientific output to positional and back
+ check(1e-5)
+ check(1e-4)
+ check(1e15)
+ check(1e16)
+
+ def test_py2_float_print(self):
+ # gh-10753
+ # In python2, the python float type implements an obsolete method
+ # tp_print, which overrides tp_repr and tp_str when using "print" to
+ # output to a "real file" (ie, not a StringIO). Make sure we don't
+ # inherit it.
+ x = np.double(0.1999999999999)
+ with TemporaryFile('r+t') as f:
+ print(x, file=f)
+ f.seek(0)
+ output = f.read()
+ assert_equal(output, str(x) + '\n')
+ # In python2 the value float('0.1999999999999') prints with reduced
+ # precision as '0.2', but we want numpy's np.double('0.1999999999999')
+ # to print the unique value, '0.1999999999999'.
+
+ # gh-11031
+ # Only in the python2 interactive shell and when stdout is a "real"
+ # file, the output of the last command is printed to stdout without
+ # Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print
+ # x` are potentially different. Make sure they are the same. The only
+ # way I found to get prompt-like output is using an actual prompt from
+ # the 'code' module. Again, must use tempfile to get a "real" file.
+
+ # dummy user-input which enters one line and then ctrl-Ds.
+ def userinput():
+ yield 'np.sqrt(2)'
+ raise EOFError
+ gen = userinput()
+ input_func = lambda prompt="": next(gen)
+
+ with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe:
+ orig_stdout, orig_stderr = sys.stdout, sys.stderr
+ sys.stdout, sys.stderr = fo, fe
+
+ code.interact(local={'np': np}, readfunc=input_func, banner='')
+
+ sys.stdout, sys.stderr = orig_stdout, orig_stderr
+
+ fo.seek(0)
+ capture = fo.read().strip()
+
+ assert_equal(capture, repr(np.sqrt(2)))
+
+ def test_dragon4(self):
+ # these tests are adapted from Ryan Juckett's dragon4 implementation,
+ # see dragon4.c for details.
+
+ fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k)
+ fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k)
+ fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k)
+ fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k)
+
+ preckwd = lambda prec: {'unique': False, 'precision': prec}
+
+ assert_equal(fpos32('1.0'), "1.")
+ assert_equal(fsci32('1.0'), "1.e+00")
+ assert_equal(fpos32('10.234'), "10.234")
+ assert_equal(fpos32('-10.234'), "-10.234")
+ assert_equal(fsci32('10.234'), "1.0234e+01")
+ assert_equal(fsci32('-10.234'), "-1.0234e+01")
+ assert_equal(fpos32('1000.0'), "1000.")
+ assert_equal(fpos32('1.0', precision=0), "1.")
+ assert_equal(fsci32('1.0', precision=0), "1.e+00")
+ assert_equal(fpos32('10.234', precision=0), "10.")
+ assert_equal(fpos32('-10.234', precision=0), "-10.")
+ assert_equal(fsci32('10.234', precision=0), "1.e+01")
+ assert_equal(fsci32('-10.234', precision=0), "-1.e+01")
+ assert_equal(fpos32('10.234', precision=2), "10.23")
+ assert_equal(fsci32('-10.234', precision=2), "-1.02e+01")
+ assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)),
+ '9.9999999999999995e-08')
+ assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)),
+ '9.8813129168249309e-324')
+ assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)),
+ '9.9999999999999694e-311')
+
+
+ # test rounding
+ # 3.1415927410 is closest float32 to np.pi
+ assert_equal(fpos32('3.14159265358979323846', **preckwd(10)),
+ "3.1415927410")
+ assert_equal(fsci32('3.14159265358979323846', **preckwd(10)),
+ "3.1415927410e+00")
+ assert_equal(fpos64('3.14159265358979323846', **preckwd(10)),
+ "3.1415926536")
+ assert_equal(fsci64('3.14159265358979323846', **preckwd(10)),
+ "3.1415926536e+00")
+ # 299792448 is closest float32 to 299792458
+ assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000")
+ assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08")
+ assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000")
+ assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08")
+
+ assert_equal(fpos32('3.14159265358979323846', **preckwd(25)),
+ "3.1415927410125732421875000")
+ assert_equal(fpos64('3.14159265358979323846', **preckwd(50)),
+ "3.14159265358979311599796346854418516159057617187500")
+ assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793")
+
+
+ # smallest numbers
+ assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149),
+ "0.00000000000000000000000000000000000000000000140129846432"
+ "4817070923729583289916131280261941876515771757068283889791"
+ "08268586060148663818836212158203125")
+
+ assert_equal(fpos64(5e-324, unique=False, precision=1074),
+ "0.00000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000049406564584124654417656"
+ "8792868221372365059802614324764425585682500675507270208751"
+ "8652998363616359923797965646954457177309266567103559397963"
+ "9877479601078187812630071319031140452784581716784898210368"
+ "8718636056998730723050006387409153564984387312473397273169"
+ "6151400317153853980741262385655911710266585566867681870395"
+ "6031062493194527159149245532930545654440112748012970999954"
+ "1931989409080416563324524757147869014726780159355238611550"
+ "1348035264934720193790268107107491703332226844753335720832"
+ "4319360923828934583680601060115061698097530783422773183292"
+ "4790498252473077637592724787465608477820373446969953364701"
+ "7972677717585125660551199131504891101451037862738167250955"
+ "8373897335989936648099411642057026370902792427675445652290"
+ "87538682506419718265533447265625")
+
+ # largest numbers
+ f32x = np.finfo(np.float32).max
+ assert_equal(fpos32(f32x, **preckwd(0)),
+ "340282346638528859811704183484516925440.")
+ assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)),
+ "1797693134862315708145274237317043567980705675258449965989"
+ "1747680315726078002853876058955863276687817154045895351438"
+ "2464234321326889464182768467546703537516986049910576551282"
+ "0762454900903893289440758685084551339423045832369032229481"
+ "6580855933212334827479782620414472316873817718091929988125"
+ "0404026184124858368.")
+ # Warning: In unique mode only the integer digits necessary for
+ # uniqueness are computed, the rest are 0.
+ assert_equal(fpos32(f32x),
+ "340282350000000000000000000000000000000.")
+
+ # Further tests of zero-padding vs rounding in different combinations
+ # of unique, fractional, precision, min_digits
+ # precision can only reduce digits, not add them.
+ # min_digits can only extend digits, not reduce them.
+ assert_equal(fpos32(f32x, unique=True, fractional=True, precision=0),
+ "340282350000000000000000000000000000000.")
+ assert_equal(fpos32(f32x, unique=True, fractional=True, precision=4),
+ "340282350000000000000000000000000000000.")
+ assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=0),
+ "340282346638528859811704183484516925440.")
+ assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=4),
+ "340282346638528859811704183484516925440.0000")
+ assert_equal(fpos32(f32x, unique=True, fractional=True,
+ min_digits=4, precision=4),
+ "340282346638528859811704183484516925440.0000")
+ assert_raises(ValueError, fpos32, f32x, unique=True, fractional=False,
+ precision=0)
+ assert_equal(fpos32(f32x, unique=True, fractional=False, precision=4),
+ "340300000000000000000000000000000000000.")
+ assert_equal(fpos32(f32x, unique=True, fractional=False, precision=20),
+ "340282350000000000000000000000000000000.")
+ assert_equal(fpos32(f32x, unique=True, fractional=False, min_digits=4),
+ "340282350000000000000000000000000000000.")
+ assert_equal(fpos32(f32x, unique=True, fractional=False,
+ min_digits=20),
+ "340282346638528859810000000000000000000.")
+ assert_equal(fpos32(f32x, unique=True, fractional=False,
+ min_digits=15),
+ "340282346638529000000000000000000000000.")
+ assert_equal(fpos32(f32x, unique=False, fractional=False, precision=4),
+ "340300000000000000000000000000000000000.")
+ # test that unique rounding is preserved when precision is supplied
+ # but no extra digits need to be printed (gh-18609)
+ a = np.float64.fromhex('-1p-97')
+ assert_equal(fsci64(a, unique=True), '-6.310887241768095e-30')
+ assert_equal(fsci64(a, unique=False, precision=15),
+ '-6.310887241768094e-30')
+ assert_equal(fsci64(a, unique=True, precision=15),
+ '-6.310887241768095e-30')
+ assert_equal(fsci64(a, unique=True, min_digits=15),
+ '-6.310887241768095e-30')
+ assert_equal(fsci64(a, unique=True, precision=15, min_digits=15),
+ '-6.310887241768095e-30')
+ # adds/remove digits in unique mode with unbiased rnding
+ assert_equal(fsci64(a, unique=True, precision=14),
+ '-6.31088724176809e-30')
+ assert_equal(fsci64(a, unique=True, min_digits=16),
+ '-6.3108872417680944e-30')
+ assert_equal(fsci64(a, unique=True, precision=16),
+ '-6.310887241768095e-30')
+ assert_equal(fsci64(a, unique=True, min_digits=14),
+ '-6.310887241768095e-30')
+ # test min_digits in unique mode with different rounding cases
+ assert_equal(fsci64('1e120', min_digits=3), '1.000e+120')
+ assert_equal(fsci64('1e100', min_digits=3), '1.000e+100')
+
+ # test trailing zeros
+ assert_equal(fpos32('1.0', unique=False, precision=3), "1.000")
+ assert_equal(fpos64('1.0', unique=False, precision=3), "1.000")
+ assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00")
+ assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00")
+ assert_equal(fpos32('1.5', unique=False, precision=3), "1.500")
+ assert_equal(fpos64('1.5', unique=False, precision=3), "1.500")
+ assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00")
+ assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00")
+ # gh-10713
+ assert_equal(fpos64('324', unique=False, precision=5,
+ fractional=False), "324.00")
+
+
+ def test_dragon4_interface(self):
+ tps = [np.float16, np.float32, np.float64]
+ if hasattr(np, 'float128'):
+ tps.append(np.float128)
+
+ fpos = np.format_float_positional
+ fsci = np.format_float_scientific
+
+ for tp in tps:
+ # test padding
+ assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ")
+ assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ")
+ assert_equal(fpos(tp('-10.2'),
+ pad_left=4, pad_right=4), " -10.2 ")
+
+ # test exp_digits
+ assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001")
+
+ # test fixed (non-unique) mode
+ assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000")
+ assert_equal(fsci(tp('1.0'), unique=False, precision=4),
+ "1.0000e+00")
+
+ # test trimming
+ # trim of 'k' or '.' only affects non-unique mode, since unique
+ # mode will not output trailing 0s.
+ assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'),
+ "1.0000")
+
+ assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'),
+ "1.")
+ assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'),
+ "1.2" if tp != np.float16 else "1.2002")
+
+ assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'),
+ "1.0")
+ assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'),
+ "1.2" if tp != np.float16 else "1.2002")
+ assert_equal(fpos(tp('1.'), trim='0'), "1.0")
+
+ assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'),
+ "1")
+ assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'),
+ "1.2" if tp != np.float16 else "1.2002")
+ assert_equal(fpos(tp('1.'), trim='-'), "1")
+ assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1")
+
+ @pytest.mark.skipif(not platform.machine().startswith("ppc64"),
+ reason="only applies to ppc float128 values")
+ def test_ppc64_ibm_double_double128(self):
+ # check that the precision decreases once we get into the subnormal
+ # range. Unlike float64, this starts around 1e-292 instead of 1e-308,
+ # which happens when the first double is normal and the second is
+ # subnormal.
+ x = np.float128('2.123123123123123123123123123123123e-286')
+ got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)]
+ expected = [
+ "1.06156156156156156156156156156157e-286",
+ "1.06156156156156156156156156156158e-287",
+ "1.06156156156156156156156156156159e-288",
+ "1.0615615615615615615615615615616e-289",
+ "1.06156156156156156156156156156157e-290",
+ "1.06156156156156156156156156156156e-291",
+ "1.0615615615615615615615615615616e-292",
+ "1.0615615615615615615615615615615e-293",
+ "1.061561561561561561561561561562e-294",
+ "1.06156156156156156156156156155e-295",
+ "1.0615615615615615615615615616e-296",
+ "1.06156156156156156156156156e-297",
+ "1.06156156156156156156156157e-298",
+ "1.0615615615615615615615616e-299",
+ "1.06156156156156156156156e-300",
+ "1.06156156156156156156155e-301",
+ "1.0615615615615615615616e-302",
+ "1.061561561561561561562e-303",
+ "1.06156156156156156156e-304",
+ "1.0615615615615615618e-305",
+ "1.06156156156156156e-306",
+ "1.06156156156156157e-307",
+ "1.0615615615615616e-308",
+ "1.06156156156156e-309",
+ "1.06156156156157e-310",
+ "1.0615615615616e-311",
+ "1.06156156156e-312",
+ "1.06156156154e-313",
+ "1.0615615616e-314",
+ "1.06156156e-315",
+ "1.06156155e-316",
+ "1.061562e-317",
+ "1.06156e-318",
+ "1.06155e-319",
+ "1.0617e-320",
+ "1.06e-321",
+ "1.04e-322",
+ "1e-323",
+ "0.0",
+ "0.0"]
+ assert_equal(got, expected)
+
+ # Note: we follow glibc behavior, but it (or gcc) might not be right.
+ # In particular we can get two values that print the same but are not
+ # equal:
+ a = np.float128('2')/np.float128('3')
+ b = np.float128(str(a))
+ assert_equal(str(a), str(b))
+ assert_(a != b)
+
+ def float32_roundtrip(self):
+ # gh-9360
+ x = np.float32(1024 - 2**-14)
+ y = np.float32(1024 - 2**-13)
+ assert_(repr(x) != repr(y))
+ assert_equal(np.float32(repr(x)), x)
+ assert_equal(np.float32(repr(y)), y)
+
+ def float64_vs_python(self):
+ # gh-2643, gh-6136, gh-6908
+ assert_equal(repr(np.float64(0.1)), repr(0.1))
+ assert_(repr(np.float64(0.20000000000000004)) != repr(0.2))
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_shape_base.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_shape_base.py
new file mode 100644
index 00000000..570d006f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_shape_base.py
@@ -0,0 +1,824 @@
+import pytest
+import numpy as np
+from numpy.core import (
+ array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
+ newaxis, concatenate, stack
+ )
+from numpy.core.shape_base import (_block_dispatcher, _block_setup,
+ _block_concatenate, _block_slicing)
+from numpy.testing import (
+ assert_, assert_raises, assert_array_equal, assert_equal,
+ assert_raises_regex, assert_warns, IS_PYPY
+ )
+
+
+class TestAtleast1d:
+ def test_0D_array(self):
+ a = array(1)
+ b = array(2)
+ res = [atleast_1d(a), atleast_1d(b)]
+ desired = [array([1]), array([2])]
+ assert_array_equal(res, desired)
+
+ def test_1D_array(self):
+ a = array([1, 2])
+ b = array([2, 3])
+ res = [atleast_1d(a), atleast_1d(b)]
+ desired = [array([1, 2]), array([2, 3])]
+ assert_array_equal(res, desired)
+
+ def test_2D_array(self):
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ res = [atleast_1d(a), atleast_1d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
+
+ def test_3D_array(self):
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ a = array([a, a])
+ b = array([b, b])
+ res = [atleast_1d(a), atleast_1d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
+
+ def test_r1array(self):
+ """ Test to make sure equivalent Travis O's r1array function
+ """
+ assert_(atleast_1d(3).shape == (1,))
+ assert_(atleast_1d(3j).shape == (1,))
+ assert_(atleast_1d(3.0).shape == (1,))
+ assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
+
+
+class TestAtleast2d:
+ def test_0D_array(self):
+ a = array(1)
+ b = array(2)
+ res = [atleast_2d(a), atleast_2d(b)]
+ desired = [array([[1]]), array([[2]])]
+ assert_array_equal(res, desired)
+
+ def test_1D_array(self):
+ a = array([1, 2])
+ b = array([2, 3])
+ res = [atleast_2d(a), atleast_2d(b)]
+ desired = [array([[1, 2]]), array([[2, 3]])]
+ assert_array_equal(res, desired)
+
+ def test_2D_array(self):
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ res = [atleast_2d(a), atleast_2d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
+
+ def test_3D_array(self):
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ a = array([a, a])
+ b = array([b, b])
+ res = [atleast_2d(a), atleast_2d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
+
+ def test_r2array(self):
+ """ Test to make sure equivalent Travis O's r2array function
+ """
+ assert_(atleast_2d(3).shape == (1, 1))
+ assert_(atleast_2d([3j, 1]).shape == (1, 2))
+ assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
+
+
+class TestAtleast3d:
+ def test_0D_array(self):
+ a = array(1)
+ b = array(2)
+ res = [atleast_3d(a), atleast_3d(b)]
+ desired = [array([[[1]]]), array([[[2]]])]
+ assert_array_equal(res, desired)
+
+ def test_1D_array(self):
+ a = array([1, 2])
+ b = array([2, 3])
+ res = [atleast_3d(a), atleast_3d(b)]
+ desired = [array([[[1], [2]]]), array([[[2], [3]]])]
+ assert_array_equal(res, desired)
+
+ def test_2D_array(self):
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ res = [atleast_3d(a), atleast_3d(b)]
+ desired = [a[:,:, newaxis], b[:,:, newaxis]]
+ assert_array_equal(res, desired)
+
+ def test_3D_array(self):
+ a = array([[1, 2], [1, 2]])
+ b = array([[2, 3], [2, 3]])
+ a = array([a, a])
+ b = array([b, b])
+ res = [atleast_3d(a), atleast_3d(b)]
+ desired = [a, b]
+ assert_array_equal(res, desired)
+
+
+class TestHstack:
+ def test_non_iterable(self):
+ assert_raises(TypeError, hstack, 1)
+
+ def test_empty_input(self):
+ assert_raises(ValueError, hstack, ())
+
+ def test_0D_array(self):
+ a = array(1)
+ b = array(2)
+ res = hstack([a, b])
+ desired = array([1, 2])
+ assert_array_equal(res, desired)
+
+ def test_1D_array(self):
+ a = array([1])
+ b = array([2])
+ res = hstack([a, b])
+ desired = array([1, 2])
+ assert_array_equal(res, desired)
+
+ def test_2D_array(self):
+ a = array([[1], [2]])
+ b = array([[1], [2]])
+ res = hstack([a, b])
+ desired = array([[1, 1], [2, 2]])
+ assert_array_equal(res, desired)
+
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ hstack((np.arange(3) for _ in range(2)))
+ with assert_warns(FutureWarning):
+ hstack(map(lambda x: x, np.ones((3, 2))))
+
+ def test_casting_and_dtype(self):
+ a = np.array([1, 2, 3])
+ b = np.array([2.5, 3.5, 4.5])
+ res = np.hstack((a, b), casting="unsafe", dtype=np.int64)
+ expected_res = np.array([1, 2, 3, 2, 3, 4])
+ assert_array_equal(res, expected_res)
+
+ def test_casting_and_dtype_type_error(self):
+ a = np.array([1, 2, 3])
+ b = np.array([2.5, 3.5, 4.5])
+ with pytest.raises(TypeError):
+ hstack((a, b), casting="safe", dtype=np.int64)
+
+
+class TestVstack:
+ def test_non_iterable(self):
+ assert_raises(TypeError, vstack, 1)
+
+ def test_empty_input(self):
+ assert_raises(ValueError, vstack, ())
+
+ def test_0D_array(self):
+ a = array(1)
+ b = array(2)
+ res = vstack([a, b])
+ desired = array([[1], [2]])
+ assert_array_equal(res, desired)
+
+ def test_1D_array(self):
+ a = array([1])
+ b = array([2])
+ res = vstack([a, b])
+ desired = array([[1], [2]])
+ assert_array_equal(res, desired)
+
+ def test_2D_array(self):
+ a = array([[1], [2]])
+ b = array([[1], [2]])
+ res = vstack([a, b])
+ desired = array([[1], [2], [1], [2]])
+ assert_array_equal(res, desired)
+
+ def test_2D_array2(self):
+ a = array([1, 2])
+ b = array([1, 2])
+ res = vstack([a, b])
+ desired = array([[1, 2], [1, 2]])
+ assert_array_equal(res, desired)
+
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ vstack((np.arange(3) for _ in range(2)))
+
+ def test_casting_and_dtype(self):
+ a = np.array([1, 2, 3])
+ b = np.array([2.5, 3.5, 4.5])
+ res = np.vstack((a, b), casting="unsafe", dtype=np.int64)
+ expected_res = np.array([[1, 2, 3], [2, 3, 4]])
+ assert_array_equal(res, expected_res)
+
+ def test_casting_and_dtype_type_error(self):
+ a = np.array([1, 2, 3])
+ b = np.array([2.5, 3.5, 4.5])
+ with pytest.raises(TypeError):
+ vstack((a, b), casting="safe", dtype=np.int64)
+
+
+
+class TestConcatenate:
+ def test_returns_copy(self):
+ a = np.eye(3)
+ b = np.concatenate([a])
+ b[0, 0] = 2
+ assert b[0, 0] != a[0, 0]
+
+ def test_exceptions(self):
+ # test axis must be in bounds
+ for ndim in [1, 2, 3]:
+ a = np.ones((1,)*ndim)
+ np.concatenate((a, a), axis=0) # OK
+ assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
+ assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
+
+ # Scalars cannot be concatenated
+ assert_raises(ValueError, concatenate, (0,))
+ assert_raises(ValueError, concatenate, (np.array(0),))
+
+ # dimensionality must match
+ assert_raises_regex(
+ ValueError,
+ r"all the input arrays must have same number of dimensions, but "
+ r"the array at index 0 has 1 dimension\(s\) and the array at "
+ r"index 1 has 2 dimension\(s\)",
+ np.concatenate, (np.zeros(1), np.zeros((1, 1))))
+
+ # test shapes must match except for concatenation axis
+ a = np.ones((1, 2, 3))
+ b = np.ones((2, 2, 3))
+ axis = list(range(3))
+ for i in range(3):
+ np.concatenate((a, b), axis=axis[0]) # OK
+ assert_raises_regex(
+ ValueError,
+ "all the input array dimensions except for the concatenation axis "
+ "must match exactly, but along dimension {}, the array at "
+ "index 0 has size 1 and the array at index 1 has size 2"
+ .format(i),
+ np.concatenate, (a, b), axis=axis[1])
+ assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
+ a = np.moveaxis(a, -1, 0)
+ b = np.moveaxis(b, -1, 0)
+ axis.append(axis.pop(0))
+
+ # No arrays to concatenate raises ValueError
+ assert_raises(ValueError, concatenate, ())
+
+ def test_concatenate_axis_None(self):
+ a = np.arange(4, dtype=np.float64).reshape((2, 2))
+ b = list(range(3))
+ c = ['x']
+ r = np.concatenate((a, a), axis=None)
+ assert_equal(r.dtype, a.dtype)
+ assert_equal(r.ndim, 1)
+ r = np.concatenate((a, b), axis=None)
+ assert_equal(r.size, a.size + len(b))
+ assert_equal(r.dtype, a.dtype)
+ r = np.concatenate((a, b, c), axis=None, dtype="U")
+ d = array(['0.0', '1.0', '2.0', '3.0',
+ '0', '1', '2', 'x'])
+ assert_array_equal(r, d)
+
+ out = np.zeros(a.size + len(b))
+ r = np.concatenate((a, b), axis=None)
+ rout = np.concatenate((a, b), axis=None, out=out)
+ assert_(out is rout)
+ assert_equal(r, rout)
+
+ def test_large_concatenate_axis_None(self):
+ # When no axis is given, concatenate uses flattened versions.
+ # This also had a bug with many arrays (see gh-5979).
+ x = np.arange(1, 100)
+ r = np.concatenate(x, None)
+ assert_array_equal(x, r)
+
+ # This should probably be deprecated:
+ r = np.concatenate(x, 100) # axis is >= MAXDIMS
+ assert_array_equal(x, r)
+
+ def test_concatenate(self):
+ # Test concatenate function
+ # One sequence returns unmodified (but as array)
+ r4 = list(range(4))
+ assert_array_equal(concatenate((r4,)), r4)
+ # Any sequence
+ assert_array_equal(concatenate((tuple(r4),)), r4)
+ assert_array_equal(concatenate((array(r4),)), r4)
+ # 1D default concatenation
+ r3 = list(range(3))
+ assert_array_equal(concatenate((r4, r3)), r4 + r3)
+ # Mixed sequence types
+ assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)
+ assert_array_equal(concatenate((array(r4), r3)), r4 + r3)
+ # Explicit axis specification
+ assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
+ # Including negative
+ assert_array_equal(concatenate((r4, r3), -1), r4 + r3)
+ # 2D
+ a23 = array([[10, 11, 12], [13, 14, 15]])
+ a13 = array([[0, 1, 2]])
+ res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])
+ assert_array_equal(concatenate((a23, a13)), res)
+ assert_array_equal(concatenate((a23, a13), 0), res)
+ assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)
+ assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)
+ # Arrays much match shape
+ assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)
+ # 3D
+ res = arange(2 * 3 * 7).reshape((2, 3, 7))
+ a0 = res[..., :4]
+ a1 = res[..., 4:6]
+ a2 = res[..., 6:]
+ assert_array_equal(concatenate((a0, a1, a2), 2), res)
+ assert_array_equal(concatenate((a0, a1, a2), -1), res)
+ assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
+
+ out = res.copy()
+ rout = concatenate((a0, a1, a2), 2, out=out)
+ assert_(out is rout)
+ assert_equal(res, rout)
+
+ @pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython")
+ def test_operator_concat(self):
+ import operator
+ a = array([1, 2])
+ b = array([3, 4])
+ n = [1,2]
+ res = array([1, 2, 3, 4])
+ assert_raises(TypeError, operator.concat, a, b)
+ assert_raises(TypeError, operator.concat, a, n)
+ assert_raises(TypeError, operator.concat, n, a)
+ assert_raises(TypeError, operator.concat, a, 1)
+ assert_raises(TypeError, operator.concat, 1, a)
+
+ def test_bad_out_shape(self):
+ a = array([1, 2])
+ b = array([3, 4])
+
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
+ concatenate((a, b), out=np.empty(4))
+
+ @pytest.mark.parametrize("axis", [None, 0])
+ @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"])
+ @pytest.mark.parametrize("casting",
+ ['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
+ def test_out_and_dtype(self, axis, out_dtype, casting):
+ # Compare usage of `out=out` with `dtype=out.dtype`
+ out = np.empty(4, dtype=out_dtype)
+ to_concat = (array([1.1, 2.2]), array([3.3, 4.4]))
+
+ if not np.can_cast(to_concat[0], out_dtype, casting=casting):
+ with assert_raises(TypeError):
+ concatenate(to_concat, out=out, axis=axis, casting=casting)
+ with assert_raises(TypeError):
+ concatenate(to_concat, dtype=out.dtype,
+ axis=axis, casting=casting)
+ else:
+ res_out = concatenate(to_concat, out=out,
+ axis=axis, casting=casting)
+ res_dtype = concatenate(to_concat, dtype=out.dtype,
+ axis=axis, casting=casting)
+ assert res_out is out
+ assert_array_equal(out, res_dtype)
+ assert res_dtype.dtype == out_dtype
+
+ with assert_raises(TypeError):
+ concatenate(to_concat, out=out, dtype=out_dtype, axis=axis)
+
+ @pytest.mark.parametrize("axis", [None, 0])
+ @pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"])
+ @pytest.mark.parametrize("arrs",
+ [([0.],), ([0.], [1]), ([0], ["string"], [1.])])
+ def test_dtype_with_promotion(self, arrs, string_dt, axis):
+ # Note that U0 and S0 should be deprecated eventually and changed to
+ # actually give the empty string result (together with `np.array`)
+ res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe")
+ # The actual dtype should be identical to a cast (of a double array):
+ assert res.dtype == np.array(1.).astype(string_dt).dtype
+
+ @pytest.mark.parametrize("axis", [None, 0])
+ def test_string_dtype_does_not_inspect(self, axis):
+ with pytest.raises(TypeError):
+ np.concatenate(([None], [1]), dtype="S", axis=axis)
+ with pytest.raises(TypeError):
+ np.concatenate(([None], [1]), dtype="U", axis=axis)
+
+ @pytest.mark.parametrize("axis", [None, 0])
+ def test_subarray_error(self, axis):
+ with pytest.raises(TypeError, match=".*subarray dtype"):
+ np.concatenate(([1], [1]), dtype="(2,)i", axis=axis)
+
+
+def test_stack():
+ # non-iterable input
+ assert_raises(TypeError, stack, 1)
+
+ # 0d input
+ for input_ in [(1, 2, 3),
+ [np.int32(1), np.int32(2), np.int32(3)],
+ [np.array(1), np.array(2), np.array(3)]]:
+ assert_array_equal(stack(input_), [1, 2, 3])
+ # 1d input examples
+ a = np.array([1, 2, 3])
+ b = np.array([4, 5, 6])
+ r1 = array([[1, 2, 3], [4, 5, 6]])
+ assert_array_equal(np.stack((a, b)), r1)
+ assert_array_equal(np.stack((a, b), axis=1), r1.T)
+ # all input types
+ assert_array_equal(np.stack(list([a, b])), r1)
+ assert_array_equal(np.stack(array([a, b])), r1)
+ # all shapes for 1d input
+ arrays = [np.random.randn(3) for _ in range(10)]
+ axes = [0, 1, -1, -2]
+ expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
+ for axis, expected_shape in zip(axes, expected_shapes):
+ assert_equal(np.stack(arrays, axis).shape, expected_shape)
+ assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=2)
+ assert_raises_regex(np.AxisError, 'out of bounds', stack, arrays, axis=-3)
+ # all shapes for 2d input
+ arrays = [np.random.randn(3, 4) for _ in range(10)]
+ axes = [0, 1, 2, -1, -2, -3]
+ expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
+ (3, 4, 10), (3, 10, 4), (10, 3, 4)]
+ for axis, expected_shape in zip(axes, expected_shapes):
+ assert_equal(np.stack(arrays, axis).shape, expected_shape)
+ # empty arrays
+ assert_(stack([[], [], []]).shape == (3, 0))
+ assert_(stack([[], [], []], axis=1).shape == (0, 3))
+ # out
+ out = np.zeros_like(r1)
+ np.stack((a, b), out=out)
+ assert_array_equal(out, r1)
+ # edge cases
+ assert_raises_regex(ValueError, 'need at least one array', stack, [])
+ assert_raises_regex(ValueError, 'must have the same shape',
+ stack, [1, np.arange(3)])
+ assert_raises_regex(ValueError, 'must have the same shape',
+ stack, [np.arange(3), 1])
+ assert_raises_regex(ValueError, 'must have the same shape',
+ stack, [np.arange(3), 1], axis=1)
+ assert_raises_regex(ValueError, 'must have the same shape',
+ stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
+ assert_raises_regex(ValueError, 'must have the same shape',
+ stack, [np.arange(2), np.arange(3)])
+ # generator is deprecated
+ with assert_warns(FutureWarning):
+ result = stack((x for x in range(3)))
+ assert_array_equal(result, np.array([0, 1, 2]))
+ #casting and dtype test
+ a = np.array([1, 2, 3])
+ b = np.array([2.5, 3.5, 4.5])
+ res = np.stack((a, b), axis=1, casting="unsafe", dtype=np.int64)
+ expected_res = np.array([[1, 2], [2, 3], [3, 4]])
+ assert_array_equal(res, expected_res)
+ #casting and dtype with TypeError
+ with assert_raises(TypeError):
+ stack((a, b), dtype=np.int64, axis=1, casting="safe")
+
+
+@pytest.mark.parametrize("axis", [0])
+@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"])
+@pytest.mark.parametrize("casting",
+ ['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
+def test_stack_out_and_dtype(axis, out_dtype, casting):
+ to_concat = (array([1, 2]), array([3, 4]))
+ res = array([[1, 2], [3, 4]])
+ out = np.zeros_like(res)
+
+ if not np.can_cast(to_concat[0], out_dtype, casting=casting):
+ with assert_raises(TypeError):
+ stack(to_concat, dtype=out_dtype,
+ axis=axis, casting=casting)
+ else:
+ res_out = stack(to_concat, out=out,
+ axis=axis, casting=casting)
+ res_dtype = stack(to_concat, dtype=out_dtype,
+ axis=axis, casting=casting)
+ assert res_out is out
+ assert_array_equal(out, res_dtype)
+ assert res_dtype.dtype == out_dtype
+
+ with assert_raises(TypeError):
+ stack(to_concat, out=out, dtype=out_dtype, axis=axis)
+
+
+class TestBlock:
+ @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
+ def block(self, request):
+ # blocking small arrays and large arrays go through different paths.
+ # the algorithm is triggered depending on the number of element
+ # copies required.
+ # We define a test fixture that forces most tests to go through
+ # both code paths.
+ # Ultimately, this should be removed if a single algorithm is found
+ # to be faster for both small and large arrays.
+ def _block_force_concatenate(arrays):
+ arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
+ return _block_concatenate(arrays, list_ndim, result_ndim)
+
+ def _block_force_slicing(arrays):
+ arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
+ return _block_slicing(arrays, list_ndim, result_ndim)
+
+ if request.param == 'force_concatenate':
+ return _block_force_concatenate
+ elif request.param == 'force_slicing':
+ return _block_force_slicing
+ elif request.param == 'block':
+ return block
+ else:
+ raise ValueError('Unknown blocking request. There is a typo in the tests.')
+
+ def test_returns_copy(self, block):
+ a = np.eye(3)
+ b = block(a)
+ b[0, 0] = 2
+ assert b[0, 0] != a[0, 0]
+
+ def test_block_total_size_estimate(self, block):
+ _, _, _, total_size = _block_setup([1])
+ assert total_size == 1
+
+ _, _, _, total_size = _block_setup([[1]])
+ assert total_size == 1
+
+ _, _, _, total_size = _block_setup([[1, 1]])
+ assert total_size == 2
+
+ _, _, _, total_size = _block_setup([[1], [1]])
+ assert total_size == 2
+
+ _, _, _, total_size = _block_setup([[1, 2], [3, 4]])
+ assert total_size == 4
+
+ def test_block_simple_row_wise(self, block):
+ a_2d = np.ones((2, 2))
+ b_2d = 2 * a_2d
+ desired = np.array([[1, 1, 2, 2],
+ [1, 1, 2, 2]])
+ result = block([a_2d, b_2d])
+ assert_equal(desired, result)
+
+ def test_block_simple_column_wise(self, block):
+ a_2d = np.ones((2, 2))
+ b_2d = 2 * a_2d
+ expected = np.array([[1, 1],
+ [1, 1],
+ [2, 2],
+ [2, 2]])
+ result = block([[a_2d], [b_2d]])
+ assert_equal(expected, result)
+
+ def test_block_with_1d_arrays_row_wise(self, block):
+ # # # 1-D vectors are treated as row arrays
+ a = np.array([1, 2, 3])
+ b = np.array([2, 3, 4])
+ expected = np.array([1, 2, 3, 2, 3, 4])
+ result = block([a, b])
+ assert_equal(expected, result)
+
+ def test_block_with_1d_arrays_multiple_rows(self, block):
+ a = np.array([1, 2, 3])
+ b = np.array([2, 3, 4])
+ expected = np.array([[1, 2, 3, 2, 3, 4],
+ [1, 2, 3, 2, 3, 4]])
+ result = block([[a, b], [a, b]])
+ assert_equal(expected, result)
+
+ def test_block_with_1d_arrays_column_wise(self, block):
+ # # # 1-D vectors are treated as row arrays
+ a_1d = np.array([1, 2, 3])
+ b_1d = np.array([2, 3, 4])
+ expected = np.array([[1, 2, 3],
+ [2, 3, 4]])
+ result = block([[a_1d], [b_1d]])
+ assert_equal(expected, result)
+
+ def test_block_mixed_1d_and_2d(self, block):
+ a_2d = np.ones((2, 2))
+ b_1d = np.array([2, 2])
+ result = block([[a_2d], [b_1d]])
+ expected = np.array([[1, 1],
+ [1, 1],
+ [2, 2]])
+ assert_equal(expected, result)
+
+ def test_block_complicated(self, block):
+ # a bit more complicated
+ one_2d = np.array([[1, 1, 1]])
+ two_2d = np.array([[2, 2, 2]])
+ three_2d = np.array([[3, 3, 3, 3, 3, 3]])
+ four_1d = np.array([4, 4, 4, 4, 4, 4])
+ five_0d = np.array(5)
+ six_1d = np.array([6, 6, 6, 6, 6])
+ zero_2d = np.zeros((2, 6))
+
+ expected = np.array([[1, 1, 1, 2, 2, 2],
+ [3, 3, 3, 3, 3, 3],
+ [4, 4, 4, 4, 4, 4],
+ [5, 6, 6, 6, 6, 6],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+
+ result = block([[one_2d, two_2d],
+ [three_2d],
+ [four_1d],
+ [five_0d, six_1d],
+ [zero_2d]])
+ assert_equal(result, expected)
+
+ def test_nested(self, block):
+ one = np.array([1, 1, 1])
+ two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
+ three = np.array([3, 3, 3])
+ four = np.array([4, 4, 4])
+ five = np.array(5)
+ six = np.array([6, 6, 6, 6, 6])
+ zero = np.zeros((2, 6))
+
+ result = block([
+ [
+ block([
+ [one],
+ [three],
+ [four]
+ ]),
+ two
+ ],
+ [five, six],
+ [zero]
+ ])
+ expected = np.array([[1, 1, 1, 2, 2, 2],
+ [3, 3, 3, 2, 2, 2],
+ [4, 4, 4, 2, 2, 2],
+ [5, 6, 6, 6, 6, 6],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0]])
+
+ assert_equal(result, expected)
+
+ def test_3d(self, block):
+ a000 = np.ones((2, 2, 2), int) * 1
+
+ a100 = np.ones((3, 2, 2), int) * 2
+ a010 = np.ones((2, 3, 2), int) * 3
+ a001 = np.ones((2, 2, 3), int) * 4
+
+ a011 = np.ones((2, 3, 3), int) * 5
+ a101 = np.ones((3, 2, 3), int) * 6
+ a110 = np.ones((3, 3, 2), int) * 7
+
+ a111 = np.ones((3, 3, 3), int) * 8
+
+ result = block([
+ [
+ [a000, a001],
+ [a010, a011],
+ ],
+ [
+ [a100, a101],
+ [a110, a111],
+ ]
+ ])
+ expected = array([[[1, 1, 4, 4, 4],
+ [1, 1, 4, 4, 4],
+ [3, 3, 5, 5, 5],
+ [3, 3, 5, 5, 5],
+ [3, 3, 5, 5, 5]],
+
+ [[1, 1, 4, 4, 4],
+ [1, 1, 4, 4, 4],
+ [3, 3, 5, 5, 5],
+ [3, 3, 5, 5, 5],
+ [3, 3, 5, 5, 5]],
+
+ [[2, 2, 6, 6, 6],
+ [2, 2, 6, 6, 6],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8]],
+
+ [[2, 2, 6, 6, 6],
+ [2, 2, 6, 6, 6],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8]],
+
+ [[2, 2, 6, 6, 6],
+ [2, 2, 6, 6, 6],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8],
+ [7, 7, 8, 8, 8]]])
+
+ assert_array_equal(result, expected)
+
+ def test_block_with_mismatched_shape(self, block):
+ a = np.array([0, 0])
+ b = np.eye(2)
+ assert_raises(ValueError, block, [a, b])
+ assert_raises(ValueError, block, [b, a])
+
+ to_block = [[np.ones((2,3)), np.ones((2,2))],
+ [np.ones((2,2)), np.ones((2,2))]]
+ assert_raises(ValueError, block, to_block)
+ def test_no_lists(self, block):
+ assert_equal(block(1), np.array(1))
+ assert_equal(block(np.eye(3)), np.eye(3))
+
+ def test_invalid_nesting(self, block):
+ msg = 'depths are mismatched'
+ assert_raises_regex(ValueError, msg, block, [1, [2]])
+ assert_raises_regex(ValueError, msg, block, [1, []])
+ assert_raises_regex(ValueError, msg, block, [[1], 2])
+ assert_raises_regex(ValueError, msg, block, [[], 2])
+ assert_raises_regex(ValueError, msg, block, [
+ [[1], [2]],
+ [[3, 4]],
+ [5] # missing brackets
+ ])
+
+ def test_empty_lists(self, block):
+ assert_raises_regex(ValueError, 'empty', block, [])
+ assert_raises_regex(ValueError, 'empty', block, [[]])
+ assert_raises_regex(ValueError, 'empty', block, [[1], []])
+
+ def test_tuple(self, block):
+ assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
+ assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
+
+ def test_different_ndims(self, block):
+ a = 1.
+ b = 2 * np.ones((1, 2))
+ c = 3 * np.ones((1, 1, 3))
+
+ result = block([a, b, c])
+ expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
+
+ assert_equal(result, expected)
+
+ def test_different_ndims_depths(self, block):
+ a = 1.
+ b = 2 * np.ones((1, 2))
+ c = 3 * np.ones((1, 2, 3))
+
+ result = block([[a, b], [c]])
+ expected = np.array([[[1., 2., 2.],
+ [3., 3., 3.],
+ [3., 3., 3.]]])
+
+ assert_equal(result, expected)
+
+ def test_block_memory_order(self, block):
+ # 3D
+ arr_c = np.zeros((3,)*3, order='C')
+ arr_f = np.zeros((3,)*3, order='F')
+
+ b_c = [[[arr_c, arr_c],
+ [arr_c, arr_c]],
+ [[arr_c, arr_c],
+ [arr_c, arr_c]]]
+
+ b_f = [[[arr_f, arr_f],
+ [arr_f, arr_f]],
+ [[arr_f, arr_f],
+ [arr_f, arr_f]]]
+
+ assert block(b_c).flags['C_CONTIGUOUS']
+ assert block(b_f).flags['F_CONTIGUOUS']
+
+ arr_c = np.zeros((3, 3), order='C')
+ arr_f = np.zeros((3, 3), order='F')
+ # 2D
+ b_c = [[arr_c, arr_c],
+ [arr_c, arr_c]]
+
+ b_f = [[arr_f, arr_f],
+ [arr_f, arr_f]]
+
+ assert block(b_c).flags['C_CONTIGUOUS']
+ assert block(b_f).flags['F_CONTIGUOUS']
+
+
+def test_block_dispatcher():
+ class ArrayLike:
+ pass
+ a = ArrayLike()
+ b = ArrayLike()
+ c = ArrayLike()
+ assert_equal(list(_block_dispatcher(a)), [a])
+ assert_equal(list(_block_dispatcher([a])), [a])
+ assert_equal(list(_block_dispatcher([a, b])), [a, b])
+ assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
+ # don't recurse into non-lists
+ assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_simd.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_simd.py
new file mode 100644
index 00000000..2c16243d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_simd.py
@@ -0,0 +1,1214 @@
+# NOTE: Please avoid the use of numpy.testing since NPYV intrinsics
+# may be involved in their functionality.
+import pytest, math, re
+import itertools
+import operator
+from numpy.core._simd import targets, clear_floatstatus, get_floatstatus
+from numpy.core._multiarray_umath import __cpu_baseline__
+
+def check_floatstatus(divbyzero=False, overflow=False,
+ underflow=False, invalid=False,
+ all=False):
+ #define NPY_FPE_DIVIDEBYZERO 1
+ #define NPY_FPE_OVERFLOW 2
+ #define NPY_FPE_UNDERFLOW 4
+ #define NPY_FPE_INVALID 8
+ err = get_floatstatus()
+ ret = (all or divbyzero) and (err & 1) != 0
+ ret |= (all or overflow) and (err & 2) != 0
+ ret |= (all or underflow) and (err & 4) != 0
+ ret |= (all or invalid) and (err & 8) != 0
+ return ret
+
+class _Test_Utility:
+ # submodule of the desired SIMD extension, e.g. targets["AVX512F"]
+ npyv = None
+ # the current data type suffix e.g. 's8'
+ sfx = None
+ # target name can be 'baseline' or one or more of CPU features
+ target_name = None
+
+ def __getattr__(self, attr):
+ """
+ To call NPV intrinsics without the attribute 'npyv' and
+ auto suffixing intrinsics according to class attribute 'sfx'
+ """
+ return getattr(self.npyv, attr + "_" + self.sfx)
+
+ def _data(self, start=None, count=None, reverse=False):
+ """
+ Create list of consecutive numbers according to number of vector's lanes.
+ """
+ if start is None:
+ start = 1
+ if count is None:
+ count = self.nlanes
+ rng = range(start, start + count)
+ if reverse:
+ rng = reversed(rng)
+ if self._is_fp():
+ return [x / 1.0 for x in rng]
+ return list(rng)
+
+ def _is_unsigned(self):
+ return self.sfx[0] == 'u'
+
+ def _is_signed(self):
+ return self.sfx[0] == 's'
+
+ def _is_fp(self):
+ return self.sfx[0] == 'f'
+
+ def _scalar_size(self):
+ return int(self.sfx[1:])
+
+ def _int_clip(self, seq):
+ if self._is_fp():
+ return seq
+ max_int = self._int_max()
+ min_int = self._int_min()
+ return [min(max(v, min_int), max_int) for v in seq]
+
+ def _int_max(self):
+ if self._is_fp():
+ return None
+ max_u = self._to_unsigned(self.setall(-1))[0]
+ if self._is_signed():
+ return max_u // 2
+ return max_u
+
+ def _int_min(self):
+ if self._is_fp():
+ return None
+ if self._is_unsigned():
+ return 0
+ return -(self._int_max() + 1)
+
+ def _true_mask(self):
+ max_unsig = getattr(self.npyv, "setall_u" + self.sfx[1:])(-1)
+ return max_unsig[0]
+
+ def _to_unsigned(self, vector):
+ if isinstance(vector, (list, tuple)):
+ return getattr(self.npyv, "load_u" + self.sfx[1:])(vector)
+ else:
+ sfx = vector.__name__.replace("npyv_", "")
+ if sfx[0] == "b":
+ cvt_intrin = "cvt_u{0}_b{0}"
+ else:
+ cvt_intrin = "reinterpret_u{0}_{1}"
+ return getattr(self.npyv, cvt_intrin.format(sfx[1:], sfx))(vector)
+
+ def _pinfinity(self):
+ return float("inf")
+
+ def _ninfinity(self):
+ return -float("inf")
+
+ def _nan(self):
+ return float("nan")
+
+ def _cpu_features(self):
+ target = self.target_name
+ if target == "baseline":
+ target = __cpu_baseline__
+ else:
+ target = target.split('__') # multi-target separator
+ return ' '.join(target)
+
+class _SIMD_BOOL(_Test_Utility):
+ """
+ To test all boolean vector types at once
+ """
+ def _nlanes(self):
+ return getattr(self.npyv, "nlanes_u" + self.sfx[1:])
+
+ def _data(self, start=None, count=None, reverse=False):
+ true_mask = self._true_mask()
+ rng = range(self._nlanes())
+ if reverse:
+ rng = reversed(rng)
+ return [true_mask if x % 2 else 0 for x in rng]
+
+ def _load_b(self, data):
+ len_str = self.sfx[1:]
+ load = getattr(self.npyv, "load_u" + len_str)
+ cvt = getattr(self.npyv, f"cvt_b{len_str}_u{len_str}")
+ return cvt(load(data))
+
+ def test_operators_logical(self):
+ """
+ Logical operations for boolean types.
+ Test intrinsics:
+ npyv_xor_##SFX, npyv_and_##SFX, npyv_or_##SFX, npyv_not_##SFX,
+ npyv_andc_b8, npvy_orc_b8, nvpy_xnor_b8
+ """
+ data_a = self._data()
+ data_b = self._data(reverse=True)
+ vdata_a = self._load_b(data_a)
+ vdata_b = self._load_b(data_b)
+
+ data_and = [a & b for a, b in zip(data_a, data_b)]
+ vand = getattr(self, "and")(vdata_a, vdata_b)
+ assert vand == data_and
+
+ data_or = [a | b for a, b in zip(data_a, data_b)]
+ vor = getattr(self, "or")(vdata_a, vdata_b)
+ assert vor == data_or
+
+ data_xor = [a ^ b for a, b in zip(data_a, data_b)]
+ vxor = getattr(self, "xor")(vdata_a, vdata_b)
+ assert vxor == data_xor
+
+ vnot = getattr(self, "not")(vdata_a)
+ assert vnot == data_b
+
+ # among the boolean types, andc, orc and xnor only support b8
+ if self.sfx not in ("b8"):
+ return
+
+ data_andc = [(a & ~b) & 0xFF for a, b in zip(data_a, data_b)]
+ vandc = getattr(self, "andc")(vdata_a, vdata_b)
+ assert data_andc == vandc
+
+ data_orc = [(a | ~b) & 0xFF for a, b in zip(data_a, data_b)]
+ vorc = getattr(self, "orc")(vdata_a, vdata_b)
+ assert data_orc == vorc
+
+ data_xnor = [~(a ^ b) & 0xFF for a, b in zip(data_a, data_b)]
+ vxnor = getattr(self, "xnor")(vdata_a, vdata_b)
+ assert data_xnor == vxnor
+
+ def test_tobits(self):
+ data2bits = lambda data: sum([int(x != 0) << i for i, x in enumerate(data, 0)])
+ for data in (self._data(), self._data(reverse=True)):
+ vdata = self._load_b(data)
+ data_bits = data2bits(data)
+ tobits = self.tobits(vdata)
+ bin_tobits = bin(tobits)
+ assert bin_tobits == bin(data_bits)
+
+ def test_pack(self):
+ """
+ Pack multiple vectors into one
+ Test intrinsics:
+ npyv_pack_b8_b16
+ npyv_pack_b8_b32
+ npyv_pack_b8_b64
+ """
+ if self.sfx not in ("b16", "b32", "b64"):
+ return
+ # create the vectors
+ data = self._data()
+ rdata = self._data(reverse=True)
+ vdata = self._load_b(data)
+ vrdata = self._load_b(rdata)
+ pack_simd = getattr(self.npyv, f"pack_b8_{self.sfx}")
+ # for scalar execution, concatenate the elements of the multiple lists
+ # into a single list (spack) and then iterate over the elements of
+ # the created list applying a mask to capture the first byte of them.
+ if self.sfx == "b16":
+ spack = [(i & 0xFF) for i in (list(rdata) + list(data))]
+ vpack = pack_simd(vrdata, vdata)
+ elif self.sfx == "b32":
+ spack = [(i & 0xFF) for i in (2*list(rdata) + 2*list(data))]
+ vpack = pack_simd(vrdata, vrdata, vdata, vdata)
+ elif self.sfx == "b64":
+ spack = [(i & 0xFF) for i in (4*list(rdata) + 4*list(data))]
+ vpack = pack_simd(vrdata, vrdata, vrdata, vrdata,
+ vdata, vdata, vdata, vdata)
+ assert vpack == spack
+
+ @pytest.mark.parametrize("intrin", ["any", "all"])
+ @pytest.mark.parametrize("data", (
+ [-1, 0],
+ [0, -1],
+ [-1],
+ [0]
+ ))
+ def test_operators_crosstest(self, intrin, data):
+ """
+ Test intrinsics:
+ npyv_any_##SFX
+ npyv_all_##SFX
+ """
+ data_a = self._load_b(data * self._nlanes())
+ func = eval(intrin)
+ intrin = getattr(self, intrin)
+ desired = func(data_a)
+ simd = intrin(data_a)
+ assert not not simd == desired
+
+class _SIMD_INT(_Test_Utility):
+ """
+ To test all integer vector types at once
+ """
+ def test_operators_shift(self):
+ if self.sfx in ("u8", "s8"):
+ return
+
+ data_a = self._data(self._int_max() - self.nlanes)
+ data_b = self._data(self._int_min(), reverse=True)
+ vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+ for count in range(self._scalar_size()):
+ # load to cast
+ data_shl_a = self.load([a << count for a in data_a])
+ # left shift
+ shl = self.shl(vdata_a, count)
+ assert shl == data_shl_a
+ # load to cast
+ data_shr_a = self.load([a >> count for a in data_a])
+ # right shift
+ shr = self.shr(vdata_a, count)
+ assert shr == data_shr_a
+
+ # shift by zero or max or out-range immediate constant is not applicable and illogical
+ for count in range(1, self._scalar_size()):
+ # load to cast
+ data_shl_a = self.load([a << count for a in data_a])
+ # left shift by an immediate constant
+ shli = self.shli(vdata_a, count)
+ assert shli == data_shl_a
+ # load to cast
+ data_shr_a = self.load([a >> count for a in data_a])
+ # right shift by an immediate constant
+ shri = self.shri(vdata_a, count)
+ assert shri == data_shr_a
+
+ def test_arithmetic_subadd_saturated(self):
+ if self.sfx in ("u32", "s32", "u64", "s64"):
+ return
+
+ data_a = self._data(self._int_max() - self.nlanes)
+ data_b = self._data(self._int_min(), reverse=True)
+ vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+ data_adds = self._int_clip([a + b for a, b in zip(data_a, data_b)])
+ adds = self.adds(vdata_a, vdata_b)
+ assert adds == data_adds
+
+ data_subs = self._int_clip([a - b for a, b in zip(data_a, data_b)])
+ subs = self.subs(vdata_a, vdata_b)
+ assert subs == data_subs
+
+ def test_math_max_min(self):
+ data_a = self._data()
+ data_b = self._data(self.nlanes)
+ vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+ data_max = [max(a, b) for a, b in zip(data_a, data_b)]
+ simd_max = self.max(vdata_a, vdata_b)
+ assert simd_max == data_max
+
+ data_min = [min(a, b) for a, b in zip(data_a, data_b)]
+ simd_min = self.min(vdata_a, vdata_b)
+ assert simd_min == data_min
+
+ @pytest.mark.parametrize("start", [-100, -10000, 0, 100, 10000])
+ def test_reduce_max_min(self, start):
+ """
+ Test intrinsics:
+ npyv_reduce_max_##sfx
+ npyv_reduce_min_##sfx
+ """
+ vdata_a = self.load(self._data(start))
+ assert self.reduce_max(vdata_a) == max(vdata_a)
+ assert self.reduce_min(vdata_a) == min(vdata_a)
+
+
+class _SIMD_FP32(_Test_Utility):
+ """
+ To only test single precision
+ """
+ def test_conversions(self):
+ """
+ Round to nearest even integer, assume CPU control register is set to rounding.
+ Test intrinsics:
+ npyv_round_s32_##SFX
+ """
+ features = self._cpu_features()
+ if not self.npyv.simd_f64 and re.match(r".*(NEON|ASIMD)", features):
+ # very costly to emulate nearest even on Armv7
+ # instead we round halves to up. e.g. 0.5 -> 1, -0.5 -> -1
+ _round = lambda v: int(v + (0.5 if v >= 0 else -0.5))
+ else:
+ _round = round
+ vdata_a = self.load(self._data())
+ vdata_a = self.sub(vdata_a, self.setall(0.5))
+ data_round = [_round(x) for x in vdata_a]
+ vround = self.round_s32(vdata_a)
+ assert vround == data_round
+
+class _SIMD_FP64(_Test_Utility):
+ """
+ To only test double precision
+ """
+ def test_conversions(self):
+ """
+ Round to nearest even integer, assume CPU control register is set to rounding.
+ Test intrinsics:
+ npyv_round_s32_##SFX
+ """
+ vdata_a = self.load(self._data())
+ vdata_a = self.sub(vdata_a, self.setall(0.5))
+ vdata_b = self.mul(vdata_a, self.setall(-1.5))
+ data_round = [round(x) for x in list(vdata_a) + list(vdata_b)]
+ vround = self.round_s32(vdata_a, vdata_b)
+ assert vround == data_round
+
+class _SIMD_FP(_Test_Utility):
+ """
+ To test all float vector types at once
+ """
+ def test_arithmetic_fused(self):
+ vdata_a, vdata_b, vdata_c = [self.load(self._data())]*3
+ vdata_cx2 = self.add(vdata_c, vdata_c)
+ # multiply and add, a*b + c
+ data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)])
+ fma = self.muladd(vdata_a, vdata_b, vdata_c)
+ assert fma == data_fma
+ # multiply and subtract, a*b - c
+ fms = self.mulsub(vdata_a, vdata_b, vdata_c)
+ data_fms = self.sub(data_fma, vdata_cx2)
+ assert fms == data_fms
+ # negate multiply and add, -(a*b) + c
+ nfma = self.nmuladd(vdata_a, vdata_b, vdata_c)
+ data_nfma = self.sub(vdata_cx2, data_fma)
+ assert nfma == data_nfma
+ # negate multiply and subtract, -(a*b) - c
+ nfms = self.nmulsub(vdata_a, vdata_b, vdata_c)
+ data_nfms = self.mul(data_fma, self.setall(-1))
+ assert nfms == data_nfms
+
+ def test_abs(self):
+ pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+ data = self._data()
+ vdata = self.load(self._data())
+
+ abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan))
+ for case, desired in abs_cases:
+ data_abs = [desired]*self.nlanes
+ vabs = self.abs(self.setall(case))
+ assert vabs == pytest.approx(data_abs, nan_ok=True)
+
+ vabs = self.abs(self.mul(vdata, self.setall(-1)))
+ assert vabs == data
+
+ def test_sqrt(self):
+ pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+ data = self._data()
+ vdata = self.load(self._data())
+
+ sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf))
+ for case, desired in sqrt_cases:
+ data_sqrt = [desired]*self.nlanes
+ sqrt = self.sqrt(self.setall(case))
+ assert sqrt == pytest.approx(data_sqrt, nan_ok=True)
+
+ data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision
+ sqrt = self.sqrt(vdata)
+ assert sqrt == data_sqrt
+
+ def test_square(self):
+ pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+ data = self._data()
+ vdata = self.load(self._data())
+ # square
+ square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf))
+ for case, desired in square_cases:
+ data_square = [desired]*self.nlanes
+ square = self.square(self.setall(case))
+ assert square == pytest.approx(data_square, nan_ok=True)
+
+ data_square = [x*x for x in data]
+ square = self.square(vdata)
+ assert square == data_square
+
+ @pytest.mark.parametrize("intrin, func", [("ceil", math.ceil),
+ ("trunc", math.trunc), ("floor", math.floor), ("rint", round)])
+ def test_rounding(self, intrin, func):
+ """
+ Test intrinsics:
+ npyv_rint_##SFX
+ npyv_ceil_##SFX
+ npyv_trunc_##SFX
+ npyv_floor##SFX
+ """
+ intrin_name = intrin
+ intrin = getattr(self, intrin)
+ pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+ # special cases
+ round_cases = ((nan, nan), (pinf, pinf), (ninf, ninf))
+ for case, desired in round_cases:
+ data_round = [desired]*self.nlanes
+ _round = intrin(self.setall(case))
+ assert _round == pytest.approx(data_round, nan_ok=True)
+
+ for x in range(0, 2**20, 256**2):
+ for w in (-1.05, -1.10, -1.15, 1.05, 1.10, 1.15):
+ data = self.load([(x+a)*w for a in range(self.nlanes)])
+ data_round = [func(x) for x in data]
+ _round = intrin(data)
+ assert _round == data_round
+
+ # test large numbers
+ for i in (
+ 1.1529215045988576e+18, 4.6116860183954304e+18,
+ 5.902958103546122e+20, 2.3611832414184488e+21
+ ):
+ x = self.setall(i)
+ y = intrin(x)
+ data_round = [func(n) for n in x]
+ assert y == data_round
+
+ # signed zero
+ if intrin_name == "floor":
+ data_szero = (-0.0,)
+ else:
+ data_szero = (-0.0, -0.25, -0.30, -0.45, -0.5)
+
+ for w in data_szero:
+ _round = self._to_unsigned(intrin(self.setall(w)))
+ data_round = self._to_unsigned(self.setall(-0.0))
+ assert _round == data_round
+
+ @pytest.mark.parametrize("intrin", [
+ "max", "maxp", "maxn", "min", "minp", "minn"
+ ])
+ def test_max_min(self, intrin):
+ """
+ Test intrinsics:
+ npyv_max_##sfx
+ npyv_maxp_##sfx
+ npyv_maxn_##sfx
+ npyv_min_##sfx
+ npyv_minp_##sfx
+ npyv_minn_##sfx
+ npyv_reduce_max_##sfx
+ npyv_reduce_maxp_##sfx
+ npyv_reduce_maxn_##sfx
+ npyv_reduce_min_##sfx
+ npyv_reduce_minp_##sfx
+ npyv_reduce_minn_##sfx
+ """
+ pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+ chk_nan = {"xp": 1, "np": 1, "nn": 2, "xn": 2}.get(intrin[-2:], 0)
+ func = eval(intrin[:3])
+ reduce_intrin = getattr(self, "reduce_" + intrin)
+ intrin = getattr(self, intrin)
+ hf_nlanes = self.nlanes//2
+
+ cases = (
+ ([0.0, -0.0], [-0.0, 0.0]),
+ ([10, -10], [10, -10]),
+ ([pinf, 10], [10, ninf]),
+ ([10, pinf], [ninf, 10]),
+ ([10, -10], [10, -10]),
+ ([-10, 10], [-10, 10])
+ )
+ for op1, op2 in cases:
+ vdata_a = self.load(op1*hf_nlanes)
+ vdata_b = self.load(op2*hf_nlanes)
+ data = func(vdata_a, vdata_b)
+ simd = intrin(vdata_a, vdata_b)
+ assert simd == data
+ data = func(vdata_a)
+ simd = reduce_intrin(vdata_a)
+ assert simd == data
+
+ if not chk_nan:
+ return
+ if chk_nan == 1:
+ test_nan = lambda a, b: (
+ b if math.isnan(a) else a if math.isnan(b) else b
+ )
+ else:
+ test_nan = lambda a, b: (
+ nan if math.isnan(a) or math.isnan(b) else b
+ )
+ cases = (
+ (nan, 10),
+ (10, nan),
+ (nan, pinf),
+ (pinf, nan),
+ (nan, nan)
+ )
+ for op1, op2 in cases:
+ vdata_ab = self.load([op1, op2]*hf_nlanes)
+ data = test_nan(op1, op2)
+ simd = reduce_intrin(vdata_ab)
+ assert simd == pytest.approx(data, nan_ok=True)
+ vdata_a = self.setall(op1)
+ vdata_b = self.setall(op2)
+ data = [data] * self.nlanes
+ simd = intrin(vdata_a, vdata_b)
+ assert simd == pytest.approx(data, nan_ok=True)
+
+ def test_reciprocal(self):
+ pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+ data = self._data()
+ vdata = self.load(self._data())
+
+ recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf))
+ for case, desired in recip_cases:
+ data_recip = [desired]*self.nlanes
+ recip = self.recip(self.setall(case))
+ assert recip == pytest.approx(data_recip, nan_ok=True)
+
+ data_recip = self.load([1/x for x in data]) # load to truncate precision
+ recip = self.recip(vdata)
+ assert recip == data_recip
+
+ def test_special_cases(self):
+ """
+ Compare Not NaN. Test intrinsics:
+ npyv_notnan_##SFX
+ """
+ nnan = self.notnan(self.setall(self._nan()))
+ assert nnan == [0]*self.nlanes
+
+ @pytest.mark.parametrize("intrin_name", [
+ "rint", "trunc", "ceil", "floor"
+ ])
+ def test_unary_invalid_fpexception(self, intrin_name):
+ intrin = getattr(self, intrin_name)
+ for d in [float("nan"), float("inf"), -float("inf")]:
+ v = self.setall(d)
+ clear_floatstatus()
+ intrin(v)
+ assert check_floatstatus(invalid=True) == False
+
+ @pytest.mark.parametrize('py_comp,np_comp', [
+ (operator.lt, "cmplt"),
+ (operator.le, "cmple"),
+ (operator.gt, "cmpgt"),
+ (operator.ge, "cmpge"),
+ (operator.eq, "cmpeq"),
+ (operator.ne, "cmpneq")
+ ])
+ def test_comparison_with_nan(self, py_comp, np_comp):
+ pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
+ mask_true = self._true_mask()
+
+ def to_bool(vector):
+ return [lane == mask_true for lane in vector]
+
+ intrin = getattr(self, np_comp)
+ cmp_cases = ((0, nan), (nan, 0), (nan, nan), (pinf, nan),
+ (ninf, nan), (-0.0, +0.0))
+ for case_operand1, case_operand2 in cmp_cases:
+ data_a = [case_operand1]*self.nlanes
+ data_b = [case_operand2]*self.nlanes
+ vdata_a = self.setall(case_operand1)
+ vdata_b = self.setall(case_operand2)
+ vcmp = to_bool(intrin(vdata_a, vdata_b))
+ data_cmp = [py_comp(a, b) for a, b in zip(data_a, data_b)]
+ assert vcmp == data_cmp
+
+ @pytest.mark.parametrize("intrin", ["any", "all"])
+ @pytest.mark.parametrize("data", (
+ [float("nan"), 0],
+ [0, float("nan")],
+ [float("nan"), 1],
+ [1, float("nan")],
+ [float("nan"), float("nan")],
+ [0.0, -0.0],
+ [-0.0, 0.0],
+ [1.0, -0.0]
+ ))
+ def test_operators_crosstest(self, intrin, data):
+ """
+ Test intrinsics:
+ npyv_any_##SFX
+ npyv_all_##SFX
+ """
+ data_a = self.load(data * self.nlanes)
+ func = eval(intrin)
+ intrin = getattr(self, intrin)
+ desired = func(data_a)
+ simd = intrin(data_a)
+ assert not not simd == desired
+
+class _SIMD_ALL(_Test_Utility):
+ """
+ To test all vector types at once
+ """
+ def test_memory_load(self):
+ data = self._data()
+ # unaligned load
+ load_data = self.load(data)
+ assert load_data == data
+ # aligned load
+ loada_data = self.loada(data)
+ assert loada_data == data
+ # stream load
+ loads_data = self.loads(data)
+ assert loads_data == data
+ # load lower part
+ loadl = self.loadl(data)
+ loadl_half = list(loadl)[:self.nlanes//2]
+ data_half = data[:self.nlanes//2]
+ assert loadl_half == data_half
+ assert loadl != data # detect overflow
+
+ def test_memory_store(self):
+ data = self._data()
+ vdata = self.load(data)
+ # unaligned store
+ store = [0] * self.nlanes
+ self.store(store, vdata)
+ assert store == data
+ # aligned store
+ store_a = [0] * self.nlanes
+ self.storea(store_a, vdata)
+ assert store_a == data
+ # stream store
+ store_s = [0] * self.nlanes
+ self.stores(store_s, vdata)
+ assert store_s == data
+ # store lower part
+ store_l = [0] * self.nlanes
+ self.storel(store_l, vdata)
+ assert store_l[:self.nlanes//2] == data[:self.nlanes//2]
+ assert store_l != vdata # detect overflow
+ # store higher part
+ store_h = [0] * self.nlanes
+ self.storeh(store_h, vdata)
+ assert store_h[:self.nlanes//2] == data[self.nlanes//2:]
+ assert store_h != vdata # detect overflow
+
+ def test_memory_partial_load(self):
+ if self.sfx in ("u8", "s8", "u16", "s16"):
+ return
+
+ data = self._data()
+ lanes = list(range(1, self.nlanes + 1))
+ lanes += [self.nlanes**2, self.nlanes**4] # test out of range
+ for n in lanes:
+ load_till = self.load_till(data, n, 15)
+ data_till = data[:n] + [15] * (self.nlanes-n)
+ assert load_till == data_till
+ load_tillz = self.load_tillz(data, n)
+ data_tillz = data[:n] + [0] * (self.nlanes-n)
+ assert load_tillz == data_tillz
+
+ def test_memory_partial_store(self):
+ if self.sfx in ("u8", "s8", "u16", "s16"):
+ return
+
+ data = self._data()
+ data_rev = self._data(reverse=True)
+ vdata = self.load(data)
+ lanes = list(range(1, self.nlanes + 1))
+ lanes += [self.nlanes**2, self.nlanes**4]
+ for n in lanes:
+ data_till = data_rev.copy()
+ data_till[:n] = data[:n]
+ store_till = self._data(reverse=True)
+ self.store_till(store_till, n, vdata)
+ assert store_till == data_till
+
+ def test_memory_noncont_load(self):
+ if self.sfx in ("u8", "s8", "u16", "s16"):
+ return
+
+ for stride in range(1, 64):
+ data = self._data(count=stride*self.nlanes)
+ data_stride = data[::stride]
+ loadn = self.loadn(data, stride)
+ assert loadn == data_stride
+
+ for stride in range(-64, 0):
+ data = self._data(stride, -stride*self.nlanes)
+ data_stride = self.load(data[::stride]) # cast unsigned
+ loadn = self.loadn(data, stride)
+ assert loadn == data_stride
+
+ def test_memory_noncont_partial_load(self):
+ if self.sfx in ("u8", "s8", "u16", "s16"):
+ return
+
+ lanes = list(range(1, self.nlanes + 1))
+ lanes += [self.nlanes**2, self.nlanes**4]
+ for stride in range(1, 64):
+ data = self._data(count=stride*self.nlanes)
+ data_stride = data[::stride]
+ for n in lanes:
+ data_stride_till = data_stride[:n] + [15] * (self.nlanes-n)
+ loadn_till = self.loadn_till(data, stride, n, 15)
+ assert loadn_till == data_stride_till
+ data_stride_tillz = data_stride[:n] + [0] * (self.nlanes-n)
+ loadn_tillz = self.loadn_tillz(data, stride, n)
+ assert loadn_tillz == data_stride_tillz
+
+ for stride in range(-64, 0):
+ data = self._data(stride, -stride*self.nlanes)
+ data_stride = list(self.load(data[::stride])) # cast unsigned
+ for n in lanes:
+ data_stride_till = data_stride[:n] + [15] * (self.nlanes-n)
+ loadn_till = self.loadn_till(data, stride, n, 15)
+ assert loadn_till == data_stride_till
+ data_stride_tillz = data_stride[:n] + [0] * (self.nlanes-n)
+ loadn_tillz = self.loadn_tillz(data, stride, n)
+ assert loadn_tillz == data_stride_tillz
+
+ def test_memory_noncont_store(self):
+ if self.sfx in ("u8", "s8", "u16", "s16"):
+ return
+
+ vdata = self.load(self._data())
+ for stride in range(1, 64):
+ data = [15] * stride * self.nlanes
+ data[::stride] = vdata
+ storen = [15] * stride * self.nlanes
+ storen += [127]*64
+ self.storen(storen, stride, vdata)
+ assert storen[:-64] == data
+ assert storen[-64:] == [127]*64 # detect overflow
+
+ for stride in range(-64, 0):
+ data = [15] * -stride * self.nlanes
+ data[::stride] = vdata
+ storen = [127]*64
+ storen += [15] * -stride * self.nlanes
+ self.storen(storen, stride, vdata)
+ assert storen[64:] == data
+ assert storen[:64] == [127]*64 # detect overflow
+
+ def test_memory_noncont_partial_store(self):
+ if self.sfx in ("u8", "s8", "u16", "s16"):
+ return
+
+ data = self._data()
+ vdata = self.load(data)
+ lanes = list(range(1, self.nlanes + 1))
+ lanes += [self.nlanes**2, self.nlanes**4]
+ for stride in range(1, 64):
+ for n in lanes:
+ data_till = [15] * stride * self.nlanes
+ data_till[::stride] = data[:n] + [15] * (self.nlanes-n)
+ storen_till = [15] * stride * self.nlanes
+ storen_till += [127]*64
+ self.storen_till(storen_till, stride, n, vdata)
+ assert storen_till[:-64] == data_till
+ assert storen_till[-64:] == [127]*64 # detect overflow
+
+ for stride in range(-64, 0):
+ for n in lanes:
+ data_till = [15] * -stride * self.nlanes
+ data_till[::stride] = data[:n] + [15] * (self.nlanes-n)
+ storen_till = [127]*64
+ storen_till += [15] * -stride * self.nlanes
+ self.storen_till(storen_till, stride, n, vdata)
+ assert storen_till[64:] == data_till
+ assert storen_till[:64] == [127]*64 # detect overflow
+
+ @pytest.mark.parametrize("intrin, table_size, elsize", [
+ ("self.lut32", 32, 32),
+ ("self.lut16", 16, 64)
+ ])
+ def test_lut(self, intrin, table_size, elsize):
+ """
+ Test lookup table intrinsics:
+ npyv_lut32_##sfx
+ npyv_lut16_##sfx
+ """
+ if elsize != self._scalar_size():
+ return
+ intrin = eval(intrin)
+ idx_itrin = getattr(self.npyv, f"setall_u{elsize}")
+ table = range(0, table_size)
+ for i in table:
+ broadi = self.setall(i)
+ idx = idx_itrin(i)
+ lut = intrin(table, idx)
+ assert lut == broadi
+
+ def test_misc(self):
+ broadcast_zero = self.zero()
+ assert broadcast_zero == [0] * self.nlanes
+ for i in range(1, 10):
+ broadcasti = self.setall(i)
+ assert broadcasti == [i] * self.nlanes
+
+ data_a, data_b = self._data(), self._data(reverse=True)
+ vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+ # py level of npyv_set_* don't support ignoring the extra specified lanes or
+ # fill non-specified lanes with zero.
+ vset = self.set(*data_a)
+ assert vset == data_a
+ # py level of npyv_setf_* don't support ignoring the extra specified lanes or
+ # fill non-specified lanes with the specified scalar.
+ vsetf = self.setf(10, *data_a)
+ assert vsetf == data_a
+
+ # We're testing the sanity of _simd's type-vector,
+ # reinterpret* intrinsics itself are tested via compiler
+ # during the build of _simd module
+ sfxes = ["u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64"]
+ if self.npyv.simd_f64:
+ sfxes.append("f64")
+ if self.npyv.simd_f32:
+ sfxes.append("f32")
+ for sfx in sfxes:
+ vec_name = getattr(self, "reinterpret_" + sfx)(vdata_a).__name__
+ assert vec_name == "npyv_" + sfx
+
+ # select & mask operations
+ select_a = self.select(self.cmpeq(self.zero(), self.zero()), vdata_a, vdata_b)
+ assert select_a == data_a
+ select_b = self.select(self.cmpneq(self.zero(), self.zero()), vdata_a, vdata_b)
+ assert select_b == data_b
+
+ # test extract elements
+ assert self.extract0(vdata_b) == vdata_b[0]
+
+ # cleanup intrinsic is only used with AVX for
+ # zeroing registers to avoid the AVX-SSE transition penalty,
+ # so nothing to test here
+ self.npyv.cleanup()
+
+ def test_reorder(self):
+ data_a, data_b = self._data(), self._data(reverse=True)
+ vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+ # lower half part
+ data_a_lo = data_a[:self.nlanes//2]
+ data_b_lo = data_b[:self.nlanes//2]
+ # higher half part
+ data_a_hi = data_a[self.nlanes//2:]
+ data_b_hi = data_b[self.nlanes//2:]
+ # combine two lower parts
+ combinel = self.combinel(vdata_a, vdata_b)
+ assert combinel == data_a_lo + data_b_lo
+ # combine two higher parts
+ combineh = self.combineh(vdata_a, vdata_b)
+ assert combineh == data_a_hi + data_b_hi
+ # combine x2
+ combine = self.combine(vdata_a, vdata_b)
+ assert combine == (data_a_lo + data_b_lo, data_a_hi + data_b_hi)
+ # zip(interleave)
+ data_zipl = [v for p in zip(data_a_lo, data_b_lo) for v in p]
+ data_ziph = [v for p in zip(data_a_hi, data_b_hi) for v in p]
+ vzip = self.zip(vdata_a, vdata_b)
+ assert vzip == (data_zipl, data_ziph)
+
+ def test_reorder_rev64(self):
+ # Reverse elements of each 64-bit lane
+ ssize = self._scalar_size()
+ if ssize == 64:
+ return
+ data_rev64 = [
+ y for x in range(0, self.nlanes, 64//ssize)
+ for y in reversed(range(x, x + 64//ssize))
+ ]
+ rev64 = self.rev64(self.load(range(self.nlanes)))
+ assert rev64 == data_rev64
+
+ @pytest.mark.parametrize('func, intrin', [
+ (operator.lt, "cmplt"),
+ (operator.le, "cmple"),
+ (operator.gt, "cmpgt"),
+ (operator.ge, "cmpge"),
+ (operator.eq, "cmpeq")
+ ])
+ def test_operators_comparison(self, func, intrin):
+ if self._is_fp():
+ data_a = self._data()
+ else:
+ data_a = self._data(self._int_max() - self.nlanes)
+ data_b = self._data(self._int_min(), reverse=True)
+ vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+ intrin = getattr(self, intrin)
+
+ mask_true = self._true_mask()
+ def to_bool(vector):
+ return [lane == mask_true for lane in vector]
+
+ data_cmp = [func(a, b) for a, b in zip(data_a, data_b)]
+ cmp = to_bool(intrin(vdata_a, vdata_b))
+ assert cmp == data_cmp
+
+ def test_operators_logical(self):
+ if self._is_fp():
+ data_a = self._data()
+ else:
+ data_a = self._data(self._int_max() - self.nlanes)
+ data_b = self._data(self._int_min(), reverse=True)
+ vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+ if self._is_fp():
+ data_cast_a = self._to_unsigned(vdata_a)
+ data_cast_b = self._to_unsigned(vdata_b)
+ cast, cast_data = self._to_unsigned, self._to_unsigned
+ else:
+ data_cast_a, data_cast_b = data_a, data_b
+ cast, cast_data = lambda a: a, self.load
+
+ data_xor = cast_data([a ^ b for a, b in zip(data_cast_a, data_cast_b)])
+ vxor = cast(self.xor(vdata_a, vdata_b))
+ assert vxor == data_xor
+
+ data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)])
+ vor = cast(getattr(self, "or")(vdata_a, vdata_b))
+ assert vor == data_or
+
+ data_and = cast_data([a & b for a, b in zip(data_cast_a, data_cast_b)])
+ vand = cast(getattr(self, "and")(vdata_a, vdata_b))
+ assert vand == data_and
+
+ data_not = cast_data([~a for a in data_cast_a])
+ vnot = cast(getattr(self, "not")(vdata_a))
+ assert vnot == data_not
+
+ if self.sfx not in ("u8"):
+ return
+ data_andc = [a & ~b for a, b in zip(data_cast_a, data_cast_b)]
+ vandc = cast(getattr(self, "andc")(vdata_a, vdata_b))
+ assert vandc == data_andc
+
+ @pytest.mark.parametrize("intrin", ["any", "all"])
+ @pytest.mark.parametrize("data", (
+ [1, 2, 3, 4],
+ [-1, -2, -3, -4],
+ [0, 1, 2, 3, 4],
+ [0x7f, 0x7fff, 0x7fffffff, 0x7fffffffffffffff],
+ [0, -1, -2, -3, 4],
+ [0],
+ [1],
+ [-1]
+ ))
+ def test_operators_crosstest(self, intrin, data):
+ """
+ Test intrinsics:
+ npyv_any_##SFX
+ npyv_all_##SFX
+ """
+ data_a = self.load(data * self.nlanes)
+ func = eval(intrin)
+ intrin = getattr(self, intrin)
+ desired = func(data_a)
+ simd = intrin(data_a)
+ assert not not simd == desired
+
+ def test_conversion_boolean(self):
+ bsfx = "b" + self.sfx[1:]
+ to_boolean = getattr(self.npyv, "cvt_%s_%s" % (bsfx, self.sfx))
+ from_boolean = getattr(self.npyv, "cvt_%s_%s" % (self.sfx, bsfx))
+
+ false_vb = to_boolean(self.setall(0))
+ true_vb = self.cmpeq(self.setall(0), self.setall(0))
+ assert false_vb != true_vb
+
+ false_vsfx = from_boolean(false_vb)
+ true_vsfx = from_boolean(true_vb)
+ assert false_vsfx != true_vsfx
+
+ def test_conversion_expand(self):
+ """
+ Test expand intrinsics:
+ npyv_expand_u16_u8
+ npyv_expand_u32_u16
+ """
+ if self.sfx not in ("u8", "u16"):
+ return
+ totype = self.sfx[0]+str(int(self.sfx[1:])*2)
+ expand = getattr(self.npyv, f"expand_{totype}_{self.sfx}")
+ # close enough from the edge to detect any deviation
+ data = self._data(self._int_max() - self.nlanes)
+ vdata = self.load(data)
+ edata = expand(vdata)
+ # lower half part
+ data_lo = data[:self.nlanes//2]
+ # higher half part
+ data_hi = data[self.nlanes//2:]
+ assert edata == (data_lo, data_hi)
+
+ def test_arithmetic_subadd(self):
+ if self._is_fp():
+ data_a = self._data()
+ else:
+ data_a = self._data(self._int_max() - self.nlanes)
+ data_b = self._data(self._int_min(), reverse=True)
+ vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+ # non-saturated
+ data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast
+ add = self.add(vdata_a, vdata_b)
+ assert add == data_add
+ data_sub = self.load([a - b for a, b in zip(data_a, data_b)])
+ sub = self.sub(vdata_a, vdata_b)
+ assert sub == data_sub
+
+ def test_arithmetic_mul(self):
+ if self.sfx in ("u64", "s64"):
+ return
+
+ if self._is_fp():
+ data_a = self._data()
+ else:
+ data_a = self._data(self._int_max() - self.nlanes)
+ data_b = self._data(self._int_min(), reverse=True)
+ vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+ data_mul = self.load([a * b for a, b in zip(data_a, data_b)])
+ mul = self.mul(vdata_a, vdata_b)
+ assert mul == data_mul
+
+ def test_arithmetic_div(self):
+ if not self._is_fp():
+ return
+
+ data_a, data_b = self._data(), self._data(reverse=True)
+ vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+ # load to truncate f64 to precision of f32
+ data_div = self.load([a / b for a, b in zip(data_a, data_b)])
+ div = self.div(vdata_a, vdata_b)
+ assert div == data_div
+
+ def test_arithmetic_intdiv(self):
+ """
+ Test integer division intrinsics:
+ npyv_divisor_##sfx
+ npyv_divc_##sfx
+ """
+ if self._is_fp():
+ return
+
+ int_min = self._int_min()
+ def trunc_div(a, d):
+ """
+ Divide towards zero works with large integers > 2^53,
+ and wrap around overflow similar to what C does.
+ """
+ if d == -1 and a == int_min:
+ return a
+ sign_a, sign_d = a < 0, d < 0
+ if a == 0 or sign_a == sign_d:
+ return a // d
+ return (a + sign_d - sign_a) // d + 1
+
+ data = [1, -int_min] # to test overflow
+ data += range(0, 2**8, 2**5)
+ data += range(0, 2**8, 2**5-1)
+ bsize = self._scalar_size()
+ if bsize > 8:
+ data += range(2**8, 2**16, 2**13)
+ data += range(2**8, 2**16, 2**13-1)
+ if bsize > 16:
+ data += range(2**16, 2**32, 2**29)
+ data += range(2**16, 2**32, 2**29-1)
+ if bsize > 32:
+ data += range(2**32, 2**64, 2**61)
+ data += range(2**32, 2**64, 2**61-1)
+ # negate
+ data += [-x for x in data]
+ for dividend, divisor in itertools.product(data, data):
+ divisor = self.setall(divisor)[0] # cast
+ if divisor == 0:
+ continue
+ dividend = self.load(self._data(dividend))
+ data_divc = [trunc_div(a, divisor) for a in dividend]
+ divisor_parms = self.divisor(divisor)
+ divc = self.divc(dividend, divisor_parms)
+ assert divc == data_divc
+
+ def test_arithmetic_reduce_sum(self):
+ """
+ Test reduce sum intrinsics:
+ npyv_sum_##sfx
+ """
+ if self.sfx not in ("u32", "u64", "f32", "f64"):
+ return
+ # reduce sum
+ data = self._data()
+ vdata = self.load(data)
+
+ data_sum = sum(data)
+ vsum = self.sum(vdata)
+ assert vsum == data_sum
+
+ def test_arithmetic_reduce_sumup(self):
+ """
+ Test extend reduce sum intrinsics:
+ npyv_sumup_##sfx
+ """
+ if self.sfx not in ("u8", "u16"):
+ return
+ rdata = (0, self.nlanes, self._int_min(), self._int_max()-self.nlanes)
+ for r in rdata:
+ data = self._data(r)
+ vdata = self.load(data)
+ data_sum = sum(data)
+ vsum = self.sumup(vdata)
+ assert vsum == data_sum
+
+ def test_mask_conditional(self):
+ """
+ Conditional addition and subtraction for all supported data types.
+ Test intrinsics:
+ npyv_ifadd_##SFX, npyv_ifsub_##SFX
+ """
+ vdata_a = self.load(self._data())
+ vdata_b = self.load(self._data(reverse=True))
+ true_mask = self.cmpeq(self.zero(), self.zero())
+ false_mask = self.cmpneq(self.zero(), self.zero())
+
+ data_sub = self.sub(vdata_b, vdata_a)
+ ifsub = self.ifsub(true_mask, vdata_b, vdata_a, vdata_b)
+ assert ifsub == data_sub
+ ifsub = self.ifsub(false_mask, vdata_a, vdata_b, vdata_b)
+ assert ifsub == vdata_b
+
+ data_add = self.add(vdata_b, vdata_a)
+ ifadd = self.ifadd(true_mask, vdata_b, vdata_a, vdata_b)
+ assert ifadd == data_add
+ ifadd = self.ifadd(false_mask, vdata_a, vdata_b, vdata_b)
+ assert ifadd == vdata_b
+
+bool_sfx = ("b8", "b16", "b32", "b64")
+int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64")
+fp_sfx = ("f32", "f64")
+all_sfx = int_sfx + fp_sfx
+tests_registry = {
+ bool_sfx: _SIMD_BOOL,
+ int_sfx : _SIMD_INT,
+ fp_sfx : _SIMD_FP,
+ ("f32",): _SIMD_FP32,
+ ("f64",): _SIMD_FP64,
+ all_sfx : _SIMD_ALL
+}
+for target_name, npyv in targets.items():
+ simd_width = npyv.simd if npyv else ''
+ pretty_name = target_name.split('__') # multi-target separator
+ if len(pretty_name) > 1:
+ # multi-target
+ pretty_name = f"({' '.join(pretty_name)})"
+ else:
+ pretty_name = pretty_name[0]
+
+ skip = ""
+ skip_sfx = dict()
+ if not npyv:
+ skip = f"target '{pretty_name}' isn't supported by current machine"
+ elif not npyv.simd:
+ skip = f"target '{pretty_name}' isn't supported by NPYV"
+ else:
+ if not npyv.simd_f32:
+ skip_sfx["f32"] = f"target '{pretty_name}' "\
+ "doesn't support single-precision"
+ if not npyv.simd_f64:
+ skip_sfx["f64"] = f"target '{pretty_name}' doesn't"\
+ "support double-precision"
+
+ for sfxes, cls in tests_registry.items():
+ for sfx in sfxes:
+ skip_m = skip_sfx.get(sfx, skip)
+ inhr = (cls,)
+ attr = dict(npyv=targets[target_name], sfx=sfx, target_name=target_name)
+ tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr)
+ if skip_m:
+ pytest.mark.skip(reason=skip_m)(tcls)
+ globals()[tcls.__name__] = tcls
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_simd_module.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_simd_module.py
new file mode 100644
index 00000000..44dc58da
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_simd_module.py
@@ -0,0 +1,99 @@
+import pytest
+from numpy.core._simd import targets
+"""
+This testing unit only for checking the sanity of common functionality,
+therefore all we need is just to take one submodule that represents any
+of enabled SIMD extensions to run the test on it and the second submodule
+required to run only one check related to the possibility of mixing
+the data types among each submodule.
+"""
+npyvs = [npyv_mod for npyv_mod in targets.values() if npyv_mod and npyv_mod.simd]
+npyv, npyv2 = (npyvs + [None, None])[:2]
+
+unsigned_sfx = ["u8", "u16", "u32", "u64"]
+signed_sfx = ["s8", "s16", "s32", "s64"]
+fp_sfx = []
+if npyv and npyv.simd_f32:
+ fp_sfx.append("f32")
+if npyv and npyv.simd_f64:
+ fp_sfx.append("f64")
+
+int_sfx = unsigned_sfx + signed_sfx
+all_sfx = unsigned_sfx + int_sfx
+
+@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support")
+class Test_SIMD_MODULE:
+
+ @pytest.mark.parametrize('sfx', all_sfx)
+ def test_num_lanes(self, sfx):
+ nlanes = getattr(npyv, "nlanes_" + sfx)
+ vector = getattr(npyv, "setall_" + sfx)(1)
+ assert len(vector) == nlanes
+
+ @pytest.mark.parametrize('sfx', all_sfx)
+ def test_type_name(self, sfx):
+ vector = getattr(npyv, "setall_" + sfx)(1)
+ assert vector.__name__ == "npyv_" + sfx
+
+ def test_raises(self):
+ a, b = [npyv.setall_u32(1)]*2
+ for sfx in all_sfx:
+ vcb = lambda intrin: getattr(npyv, f"{intrin}_{sfx}")
+ pytest.raises(TypeError, vcb("add"), a)
+ pytest.raises(TypeError, vcb("add"), a, b, a)
+ pytest.raises(TypeError, vcb("setall"))
+ pytest.raises(TypeError, vcb("setall"), [1])
+ pytest.raises(TypeError, vcb("load"), 1)
+ pytest.raises(ValueError, vcb("load"), [1])
+ pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a))
+
+ @pytest.mark.skipif(not npyv2, reason=(
+ "could not find a second SIMD extension with NPYV support"
+ ))
+ def test_nomix(self):
+ # mix among submodules isn't allowed
+ a = npyv.setall_u32(1)
+ a2 = npyv2.setall_u32(1)
+ pytest.raises(TypeError, npyv.add_u32, a2, a2)
+ pytest.raises(TypeError, npyv2.add_u32, a, a)
+
+ @pytest.mark.parametrize('sfx', unsigned_sfx)
+ def test_unsigned_overflow(self, sfx):
+ nlanes = getattr(npyv, "nlanes_" + sfx)
+ maxu = (1 << int(sfx[1:])) - 1
+ maxu_72 = (1 << 72) - 1
+ lane = getattr(npyv, "setall_" + sfx)(maxu_72)[0]
+ assert lane == maxu
+ lanes = getattr(npyv, "load_" + sfx)([maxu_72] * nlanes)
+ assert lanes == [maxu] * nlanes
+ lane = getattr(npyv, "setall_" + sfx)(-1)[0]
+ assert lane == maxu
+ lanes = getattr(npyv, "load_" + sfx)([-1] * nlanes)
+ assert lanes == [maxu] * nlanes
+
+ @pytest.mark.parametrize('sfx', signed_sfx)
+ def test_signed_overflow(self, sfx):
+ nlanes = getattr(npyv, "nlanes_" + sfx)
+ maxs_72 = (1 << 71) - 1
+ lane = getattr(npyv, "setall_" + sfx)(maxs_72)[0]
+ assert lane == -1
+ lanes = getattr(npyv, "load_" + sfx)([maxs_72] * nlanes)
+ assert lanes == [-1] * nlanes
+ mins_72 = -1 << 71
+ lane = getattr(npyv, "setall_" + sfx)(mins_72)[0]
+ assert lane == 0
+ lanes = getattr(npyv, "load_" + sfx)([mins_72] * nlanes)
+ assert lanes == [0] * nlanes
+
+ def test_truncate_f32(self):
+ f32 = npyv.setall_f32(0.1)[0]
+ assert f32 != 0.1
+ assert round(f32, 1) == 0.1
+
+ def test_compare(self):
+ data_range = range(0, npyv.nlanes_u32)
+ vdata = npyv.load_u32(data_range)
+ assert vdata == list(data_range)
+ assert vdata == tuple(data_range)
+ for i in data_range:
+ assert vdata[i] == data_range[i]
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_strings.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_strings.py
new file mode 100644
index 00000000..42f775e8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_strings.py
@@ -0,0 +1,99 @@
+import pytest
+
+import operator
+import numpy as np
+
+from numpy.testing import assert_array_equal
+
+
+COMPARISONS = [
+ (operator.eq, np.equal, "=="),
+ (operator.ne, np.not_equal, "!="),
+ (operator.lt, np.less, "<"),
+ (operator.le, np.less_equal, "<="),
+ (operator.gt, np.greater, ">"),
+ (operator.ge, np.greater_equal, ">="),
+]
+
+
+@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS)
+def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym):
+ arr_string = np.array(["a", "b"], dtype="S")
+ arr_unicode = np.array(["a", "c"], dtype="U")
+
+ with pytest.raises(TypeError, match="did not contain a loop"):
+ ufunc(arr_string, arr_unicode)
+
+ with pytest.raises(TypeError, match="did not contain a loop"):
+ ufunc(arr_unicode, arr_string)
+
+@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS)
+def test_mixed_string_comparisons_ufuncs_with_cast(op, ufunc, sym):
+ arr_string = np.array(["a", "b"], dtype="S")
+ arr_unicode = np.array(["a", "c"], dtype="U")
+
+ # While there is no loop, manual casting is acceptable:
+ res1 = ufunc(arr_string, arr_unicode, signature="UU->?", casting="unsafe")
+ res2 = ufunc(arr_string, arr_unicode, signature="SS->?", casting="unsafe")
+
+ expected = op(arr_string.astype('U'), arr_unicode)
+ assert_array_equal(res1, expected)
+ assert_array_equal(res2, expected)
+
+
+@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS)
+@pytest.mark.parametrize("dtypes", [
+ ("S2", "S2"), ("S2", "S10"),
+ ("<U1", "<U1"), ("<U1", ">U1"), (">U1", ">U1"),
+ ("<U1", "<U10"), ("<U1", ">U10")])
+@pytest.mark.parametrize("aligned", [True, False])
+def test_string_comparisons(op, ufunc, sym, dtypes, aligned):
+ # ensure native byte-order for the first view to stay within unicode range
+ native_dt = np.dtype(dtypes[0]).newbyteorder("=")
+ arr = np.arange(2**15).view(native_dt).astype(dtypes[0])
+ if not aligned:
+ # Make `arr` unaligned:
+ new = np.zeros(arr.nbytes + 1, dtype=np.uint8)[1:].view(dtypes[0])
+ new[...] = arr
+ arr = new
+
+ arr2 = arr.astype(dtypes[1], copy=True)
+ np.random.shuffle(arr2)
+ arr[0] = arr2[0] # make sure one matches
+
+ expected = [op(d1, d2) for d1, d2 in zip(arr.tolist(), arr2.tolist())]
+ assert_array_equal(op(arr, arr2), expected)
+ assert_array_equal(ufunc(arr, arr2), expected)
+ assert_array_equal(np.compare_chararrays(arr, arr2, sym, False), expected)
+
+ expected = [op(d2, d1) for d1, d2 in zip(arr.tolist(), arr2.tolist())]
+ assert_array_equal(op(arr2, arr), expected)
+ assert_array_equal(ufunc(arr2, arr), expected)
+ assert_array_equal(np.compare_chararrays(arr2, arr, sym, False), expected)
+
+
+@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS)
+@pytest.mark.parametrize("dtypes", [
+ ("S2", "S2"), ("S2", "S10"), ("<U1", "<U1"), ("<U1", ">U10")])
+def test_string_comparisons_empty(op, ufunc, sym, dtypes):
+ arr = np.empty((1, 0, 1, 5), dtype=dtypes[0])
+ arr2 = np.empty((100, 1, 0, 1), dtype=dtypes[1])
+
+ expected = np.empty(np.broadcast_shapes(arr.shape, arr2.shape), dtype=bool)
+ assert_array_equal(op(arr, arr2), expected)
+ assert_array_equal(ufunc(arr, arr2), expected)
+ assert_array_equal(np.compare_chararrays(arr, arr2, sym, False), expected)
+
+
+@pytest.mark.parametrize("str_dt", ["S", "U"])
+@pytest.mark.parametrize("float_dt", np.typecodes["AllFloat"])
+def test_float_to_string_cast(str_dt, float_dt):
+ float_dt = np.dtype(float_dt)
+ fi = np.finfo(float_dt)
+ arr = np.array([np.nan, np.inf, -np.inf, fi.max, fi.min], dtype=float_dt)
+ expected = ["nan", "inf", "-inf", repr(fi.max), repr(fi.min)]
+ if float_dt.kind == 'c':
+ expected = [f"({r}+0j)" for r in expected]
+
+ res = arr.astype(str_dt)
+ assert_array_equal(res, np.array(expected, dtype=str_dt))
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_ufunc.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_ufunc.py
new file mode 100644
index 00000000..cae84669
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_ufunc.py
@@ -0,0 +1,2798 @@
+import warnings
+import itertools
+import sys
+import ctypes as ct
+
+import pytest
+from pytest import param
+
+import numpy as np
+import numpy.core._umath_tests as umt
+import numpy.linalg._umath_linalg as uml
+import numpy.core._operand_flag_tests as opflag_tests
+import numpy.core._rational_tests as _rational_tests
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_array_equal,
+ assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
+ assert_allclose, HAS_REFCOUNT, suppress_warnings, IS_WASM
+ )
+from numpy.testing._private.utils import requires_memory
+from numpy.compat import pickle
+
+
+UNARY_UFUNCS = [obj for obj in np.core.umath.__dict__.values()
+ if isinstance(obj, np.ufunc)]
+UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types]
+
+
+class TestUfuncKwargs:
+ def test_kwarg_exact(self):
+ assert_raises(TypeError, np.add, 1, 2, castingx='safe')
+ assert_raises(TypeError, np.add, 1, 2, dtypex=int)
+ assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
+ assert_raises(TypeError, np.add, 1, 2, outx=None)
+ assert_raises(TypeError, np.add, 1, 2, sigx='ii->i')
+ assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i')
+ assert_raises(TypeError, np.add, 1, 2, subokx=False)
+ assert_raises(TypeError, np.add, 1, 2, wherex=[True])
+
+ def test_sig_signature(self):
+ assert_raises(TypeError, np.add, 1, 2, sig='ii->i',
+ signature='ii->i')
+
+ def test_sig_dtype(self):
+ assert_raises(TypeError, np.add, 1, 2, sig='ii->i',
+ dtype=int)
+ assert_raises(TypeError, np.add, 1, 2, signature='ii->i',
+ dtype=int)
+
+ def test_extobj_refcount(self):
+ # Should not segfault with USE_DEBUG.
+ assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True)
+
+
+class TestUfuncGenericLoops:
+ """Test generic loops.
+
+ The loops to be tested are:
+
+ PyUFunc_ff_f_As_dd_d
+ PyUFunc_ff_f
+ PyUFunc_dd_d
+ PyUFunc_gg_g
+ PyUFunc_FF_F_As_DD_D
+ PyUFunc_DD_D
+ PyUFunc_FF_F
+ PyUFunc_GG_G
+ PyUFunc_OO_O
+ PyUFunc_OO_O_method
+ PyUFunc_f_f_As_d_d
+ PyUFunc_d_d
+ PyUFunc_f_f
+ PyUFunc_g_g
+ PyUFunc_F_F_As_D_D
+ PyUFunc_F_F
+ PyUFunc_D_D
+ PyUFunc_G_G
+ PyUFunc_O_O
+ PyUFunc_O_O_method
+ PyUFunc_On_Om
+
+ Where:
+
+ f -- float
+ d -- double
+ g -- long double
+ F -- complex float
+ D -- complex double
+ G -- complex long double
+ O -- python object
+
+ It is difficult to assure that each of these loops is entered from the
+ Python level as the special cased loops are a moving target and the
+ corresponding types are architecture dependent. We probably need to
+ define C level testing ufuncs to get at them. For the time being, I've
+ just looked at the signatures registered in the build directory to find
+ relevant functions.
+
+ """
+ np_dtypes = [
+ (np.single, np.single), (np.single, np.double),
+ (np.csingle, np.csingle), (np.csingle, np.cdouble),
+ (np.double, np.double), (np.longdouble, np.longdouble),
+ (np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)]
+
+ @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
+ def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1):
+ xs = np.full(10, input_dtype(x), dtype=output_dtype)
+ ys = f(xs)[::2]
+ assert_allclose(ys, y)
+ assert_equal(ys.dtype, output_dtype)
+
+ def f2(x, y):
+ return x**y
+
+ @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
+ def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1):
+ xs = np.full(10, input_dtype(x), dtype=output_dtype)
+ ys = f(xs, xs)[::2]
+ assert_allclose(ys, y)
+ assert_equal(ys.dtype, output_dtype)
+
+ # class to use in testing object method loops
+ class foo:
+ def conjugate(self):
+ return np.bool_(1)
+
+ def logical_xor(self, obj):
+ return np.bool_(1)
+
+ def test_unary_PyUFunc_O_O(self):
+ x = np.ones(10, dtype=object)
+ assert_(np.all(np.abs(x) == 1))
+
+ def test_unary_PyUFunc_O_O_method_simple(self, foo=foo):
+ x = np.full(10, foo(), dtype=object)
+ assert_(np.all(np.conjugate(x) == True))
+
+ def test_binary_PyUFunc_OO_O(self):
+ x = np.ones(10, dtype=object)
+ assert_(np.all(np.add(x, x) == 2))
+
+ def test_binary_PyUFunc_OO_O_method(self, foo=foo):
+ x = np.full(10, foo(), dtype=object)
+ assert_(np.all(np.logical_xor(x, x)))
+
+ def test_binary_PyUFunc_On_Om_method(self, foo=foo):
+ x = np.full((10, 2, 3), foo(), dtype=object)
+ assert_(np.all(np.logical_xor(x, x)))
+
+ def test_python_complex_conjugate(self):
+ # The conjugate ufunc should fall back to calling the method:
+ arr = np.array([1+2j, 3-4j], dtype="O")
+ assert isinstance(arr[0], complex)
+ res = np.conjugate(arr)
+ assert res.dtype == np.dtype("O")
+ assert_array_equal(res, np.array([1-2j, 3+4j], dtype="O"))
+
+ @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS)
+ def test_unary_PyUFunc_O_O_method_full(self, ufunc):
+ """Compare the result of the object loop with non-object one"""
+ val = np.float64(np.pi/4)
+
+ class MyFloat(np.float64):
+ def __getattr__(self, attr):
+ try:
+ return super().__getattr__(attr)
+ except AttributeError:
+ return lambda: getattr(np.core.umath, attr)(val)
+
+ # Use 0-D arrays, to ensure the same element call
+ num_arr = np.array(val, dtype=np.float64)
+ obj_arr = np.array(MyFloat(val), dtype="O")
+
+ with np.errstate(all="raise"):
+ try:
+ res_num = ufunc(num_arr)
+ except Exception as exc:
+ with assert_raises(type(exc)):
+ ufunc(obj_arr)
+ else:
+ res_obj = ufunc(obj_arr)
+ assert_array_almost_equal(res_num.astype("O"), res_obj)
+
+
+def _pickleable_module_global():
+ pass
+
+
+class TestUfunc:
+ def test_pickle(self):
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_(pickle.loads(pickle.dumps(np.sin,
+ protocol=proto)) is np.sin)
+
+ # Check that ufunc not defined in the top level numpy namespace
+ # such as numpy.core._rational_tests.test_add can also be pickled
+ res = pickle.loads(pickle.dumps(_rational_tests.test_add,
+ protocol=proto))
+ assert_(res is _rational_tests.test_add)
+
+ def test_pickle_withstring(self):
+ astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n"
+ b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
+ assert_(pickle.loads(astring) is np.cos)
+
+ def test_pickle_name_is_qualname(self):
+ # This tests that a simplification of our ufunc pickle code will
+ # lead to allowing qualnames as names. Future ufuncs should
+ # possible add a specific qualname, or a hook into pickling instead
+ # (dask+numba may benefit).
+ _pickleable_module_global.ufunc = umt._pickleable_module_global_ufunc
+ obj = pickle.loads(pickle.dumps(_pickleable_module_global.ufunc))
+ assert obj is umt._pickleable_module_global_ufunc
+
+ def test_reduceat_shifting_sum(self):
+ L = 6
+ x = np.arange(L)
+ idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
+ assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7])
+
+ def test_all_ufunc(self):
+ """Try to check presence and results of all ufuncs.
+
+ The list of ufuncs comes from generate_umath.py and is as follows:
+
+ ===== ==== ============= =============== ========================
+ done args function types notes
+ ===== ==== ============= =============== ========================
+ n 1 conjugate nums + O
+ n 1 absolute nums + O complex -> real
+ n 1 negative nums + O
+ n 1 sign nums + O -> int
+ n 1 invert bool + ints + O flts raise an error
+ n 1 degrees real + M cmplx raise an error
+ n 1 radians real + M cmplx raise an error
+ n 1 arccos flts + M
+ n 1 arccosh flts + M
+ n 1 arcsin flts + M
+ n 1 arcsinh flts + M
+ n 1 arctan flts + M
+ n 1 arctanh flts + M
+ n 1 cos flts + M
+ n 1 sin flts + M
+ n 1 tan flts + M
+ n 1 cosh flts + M
+ n 1 sinh flts + M
+ n 1 tanh flts + M
+ n 1 exp flts + M
+ n 1 expm1 flts + M
+ n 1 log flts + M
+ n 1 log10 flts + M
+ n 1 log1p flts + M
+ n 1 sqrt flts + M real x < 0 raises error
+ n 1 ceil real + M
+ n 1 trunc real + M
+ n 1 floor real + M
+ n 1 fabs real + M
+ n 1 rint flts + M
+ n 1 isnan flts -> bool
+ n 1 isinf flts -> bool
+ n 1 isfinite flts -> bool
+ n 1 signbit real -> bool
+ n 1 modf real -> (frac, int)
+ n 1 logical_not bool + nums + M -> bool
+ n 2 left_shift ints + O flts raise an error
+ n 2 right_shift ints + O flts raise an error
+ n 2 add bool + nums + O boolean + is ||
+ n 2 subtract bool + nums + O boolean - is ^
+ n 2 multiply bool + nums + O boolean * is &
+ n 2 divide nums + O
+ n 2 floor_divide nums + O
+ n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d
+ n 2 fmod nums + M
+ n 2 power nums + O
+ n 2 greater bool + nums + O -> bool
+ n 2 greater_equal bool + nums + O -> bool
+ n 2 less bool + nums + O -> bool
+ n 2 less_equal bool + nums + O -> bool
+ n 2 equal bool + nums + O -> bool
+ n 2 not_equal bool + nums + O -> bool
+ n 2 logical_and bool + nums + M -> bool
+ n 2 logical_or bool + nums + M -> bool
+ n 2 logical_xor bool + nums + M -> bool
+ n 2 maximum bool + nums + O
+ n 2 minimum bool + nums + O
+ n 2 bitwise_and bool + ints + O flts raise an error
+ n 2 bitwise_or bool + ints + O flts raise an error
+ n 2 bitwise_xor bool + ints + O flts raise an error
+ n 2 arctan2 real + M
+ n 2 remainder ints + real + O
+ n 2 hypot real + M
+ ===== ==== ============= =============== ========================
+
+ Types other than those listed will be accepted, but they are cast to
+ the smallest compatible type for which the function is defined. The
+ casting rules are:
+
+ bool -> int8 -> float32
+ ints -> double
+
+ """
+ pass
+
+ # from include/numpy/ufuncobject.h
+ size_inferred = 2
+ can_ignore = 4
+ def test_signature0(self):
+ # the arguments to test_signature are: nin, nout, core_signature
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(i),(i)->()")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 1, 0))
+ assert_equal(ixs, (0, 0))
+ assert_equal(flags, (self.size_inferred,))
+ assert_equal(sizes, (-1,))
+
+ def test_signature1(self):
+ # empty core signature; treat as plain ufunc (with trivial core)
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(),()->()")
+ assert_equal(enabled, 0)
+ assert_equal(num_dims, (0, 0, 0))
+ assert_equal(ixs, ())
+ assert_equal(flags, ())
+ assert_equal(sizes, ())
+
+ def test_signature2(self):
+ # more complicated names for variables
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(i1,i2),(J_1)->(_kAB)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 1, 1))
+ assert_equal(ixs, (0, 1, 2, 3))
+ assert_equal(flags, (self.size_inferred,)*4)
+ assert_equal(sizes, (-1, -1, -1, -1))
+
+ def test_signature3(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(i1, i12), (J_1)->(i12, i2)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 1, 2))
+ assert_equal(ixs, (0, 1, 2, 1, 3))
+ assert_equal(flags, (self.size_inferred,)*4)
+ assert_equal(sizes, (-1, -1, -1, -1))
+
+ def test_signature4(self):
+ # matrix_multiply signature from _umath_tests
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(n,k),(k,m)->(n,m)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 2, 2))
+ assert_equal(ixs, (0, 1, 1, 2, 0, 2))
+ assert_equal(flags, (self.size_inferred,)*3)
+ assert_equal(sizes, (-1, -1, -1))
+
+ def test_signature5(self):
+ # matmul signature from _umath_tests
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(n?,k),(k,m?)->(n?,m?)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 2, 2))
+ assert_equal(ixs, (0, 1, 1, 2, 0, 2))
+ assert_equal(flags, (self.size_inferred | self.can_ignore,
+ self.size_inferred,
+ self.size_inferred | self.can_ignore))
+ assert_equal(sizes, (-1, -1, -1))
+
+ def test_signature6(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 1, 1, "(3)->()")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 0))
+ assert_equal(ixs, (0,))
+ assert_equal(flags, (0,))
+ assert_equal(sizes, (3,))
+
+ def test_signature7(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 3, 1, "(3),(03,3),(n)->(9)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 2, 1, 1))
+ assert_equal(ixs, (0, 0, 0, 1, 2))
+ assert_equal(flags, (0, self.size_inferred, 0))
+ assert_equal(sizes, (3, -1, 9))
+
+ def test_signature8(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 3, 1, "(3?),(3?,3?),(n)->(9)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 2, 1, 1))
+ assert_equal(ixs, (0, 0, 0, 1, 2))
+ assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
+ assert_equal(sizes, (3, -1, 9))
+
+ def test_signature9(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 1, 1, "( 3) -> ( )")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 0))
+ assert_equal(ixs, (0,))
+ assert_equal(flags, (0,))
+ assert_equal(sizes, (3,))
+
+ def test_signature10(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 3, 1, "( 3? ) , (3? , 3?) ,(n )-> ( 9)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 2, 1, 1))
+ assert_equal(ixs, (0, 0, 0, 1, 2))
+ assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
+ assert_equal(sizes, (3, -1, 9))
+
+ def test_signature_failure_extra_parenthesis(self):
+ with assert_raises(ValueError):
+ umt.test_signature(2, 1, "((i)),(i)->()")
+
+ def test_signature_failure_mismatching_parenthesis(self):
+ with assert_raises(ValueError):
+ umt.test_signature(2, 1, "(i),)i(->()")
+
+ def test_signature_failure_signature_missing_input_arg(self):
+ with assert_raises(ValueError):
+ umt.test_signature(2, 1, "(i),->()")
+
+ def test_signature_failure_signature_missing_output_arg(self):
+ with assert_raises(ValueError):
+ umt.test_signature(2, 2, "(i),(i)->()")
+
+ def test_get_signature(self):
+ assert_equal(umt.inner1d.signature, "(i),(i)->()")
+
+ def test_forced_sig(self):
+ a = 0.5*np.arange(3, dtype='f8')
+ assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
+ with pytest.warns(DeprecationWarning):
+ assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
+ assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
+ with pytest.warns(DeprecationWarning):
+ assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'),
+ [0, 0, 1])
+ assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
+ casting='unsafe'), [0, 0, 1])
+
+ b = np.zeros((3,), dtype='f8')
+ np.add(a, 0.5, out=b)
+ assert_equal(b, [0.5, 1, 1.5])
+ b[:] = 0
+ with pytest.warns(DeprecationWarning):
+ np.add(a, 0.5, sig='i', out=b, casting='unsafe')
+ assert_equal(b, [0, 0, 1])
+ b[:] = 0
+ np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
+ assert_equal(b, [0, 0, 1])
+ b[:] = 0
+ with pytest.warns(DeprecationWarning):
+ np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
+ assert_equal(b, [0, 0, 1])
+ b[:] = 0
+ np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
+ assert_equal(b, [0, 0, 1])
+
+ def test_signature_all_None(self):
+ # signature all None, is an acceptable alternative (since 1.21)
+ # to not providing a signature.
+ res1 = np.add([3], [4], sig=(None, None, None))
+ res2 = np.add([3], [4])
+ assert_array_equal(res1, res2)
+ res1 = np.maximum([3], [4], sig=(None, None, None))
+ res2 = np.maximum([3], [4])
+ assert_array_equal(res1, res2)
+
+ with pytest.raises(TypeError):
+ # special case, that would be deprecated anyway, so errors:
+ np.add(3, 4, signature=(None,))
+
+ def test_signature_dtype_type(self):
+ # Since that will be the normal behaviour (past NumPy 1.21)
+ # we do support the types already:
+ float_dtype = type(np.dtype(np.float64))
+ np.add(3, 4, signature=(float_dtype, float_dtype, None))
+
+ @pytest.mark.parametrize("get_kwarg", [
+ lambda dt: dict(dtype=x),
+ lambda dt: dict(signature=(x, None, None))])
+ def test_signature_dtype_instances_allowed(self, get_kwarg):
+ # We allow certain dtype instances when there is a clear singleton
+ # and the given one is equivalent; mainly for backcompat.
+ int64 = np.dtype("int64")
+ int64_2 = pickle.loads(pickle.dumps(int64))
+ # Relies on pickling behavior, if assert fails just remove test...
+ assert int64 is not int64_2
+
+ assert np.add(1, 2, **get_kwarg(int64_2)).dtype == int64
+ td = np.timedelta(2, "s")
+ assert np.add(td, td, **get_kwarg("m8")).dtype == "m8[s]"
+
+ @pytest.mark.parametrize("get_kwarg", [
+ param(lambda x: dict(dtype=x), id="dtype"),
+ param(lambda x: dict(signature=(x, None, None)), id="signature")])
+ def test_signature_dtype_instances_allowed(self, get_kwarg):
+ msg = "The `dtype` and `signature` arguments to ufuncs"
+
+ with pytest.raises(TypeError, match=msg):
+ np.add(3, 5, **get_kwarg(np.dtype("int64").newbyteorder()))
+ with pytest.raises(TypeError, match=msg):
+ np.add(3, 5, **get_kwarg(np.dtype("m8[ns]")))
+ with pytest.raises(TypeError, match=msg):
+ np.add(3, 5, **get_kwarg("m8[ns]"))
+
+ @pytest.mark.parametrize("casting", ["unsafe", "same_kind", "safe"])
+ def test_partial_signature_mismatch(self, casting):
+ # If the second argument matches already, no need to specify it:
+ res = np.ldexp(np.float32(1.), np.int_(2), dtype="d")
+ assert res.dtype == "d"
+ res = np.ldexp(np.float32(1.), np.int_(2), signature=(None, None, "d"))
+ assert res.dtype == "d"
+
+ # ldexp only has a loop for long input as second argument, overriding
+ # the output cannot help with that (no matter the casting)
+ with pytest.raises(TypeError):
+ np.ldexp(1., np.uint64(3), dtype="d")
+ with pytest.raises(TypeError):
+ np.ldexp(1., np.uint64(3), signature=(None, None, "d"))
+
+ def test_partial_signature_mismatch_with_cache(self):
+ with pytest.raises(TypeError):
+ np.add(np.float16(1), np.uint64(2), sig=("e", "d", None))
+ # Ensure e,d->None is in the dispatching cache (double loop)
+ np.add(np.float16(1), np.float64(2))
+ # The error must still be raised:
+ with pytest.raises(TypeError):
+ np.add(np.float16(1), np.uint64(2), sig=("e", "d", None))
+
+ def test_use_output_signature_for_all_arguments(self):
+ # Test that providing only `dtype=` or `signature=(None, None, dtype)`
+ # is sufficient if falling back to a homogeneous signature works.
+ # In this case, the `intp, intp -> intp` loop is chosen.
+ res = np.power(1.5, 2.8, dtype=np.intp, casting="unsafe")
+ assert res == 1 # the cast happens first.
+ res = np.power(1.5, 2.8, signature=(None, None, np.intp),
+ casting="unsafe")
+ assert res == 1
+ with pytest.raises(TypeError):
+ # the unsafe casting would normally cause errors though:
+ np.power(1.5, 2.8, dtype=np.intp)
+
+ def test_signature_errors(self):
+ with pytest.raises(TypeError,
+ match="the signature object to ufunc must be a string or"):
+ np.add(3, 4, signature=123.) # neither a string nor a tuple
+
+ with pytest.raises(ValueError):
+ # bad symbols that do not translate to dtypes
+ np.add(3, 4, signature="%^->#")
+
+ with pytest.raises(ValueError):
+ np.add(3, 4, signature=b"ii-i") # incomplete and byte string
+
+ with pytest.raises(ValueError):
+ np.add(3, 4, signature="ii>i") # incomplete string
+
+ with pytest.raises(ValueError):
+ np.add(3, 4, signature=(None, "f8")) # bad length
+
+ with pytest.raises(UnicodeDecodeError):
+ np.add(3, 4, signature=b"\xff\xff->i")
+
+ def test_forced_dtype_times(self):
+ # Signatures only set the type numbers (not the actual loop dtypes)
+ # so using `M` in a signature/dtype should generally work:
+ a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='>M8[D]')
+ np.maximum(a, a, dtype="M")
+ np.maximum.reduce(a, dtype="M")
+
+ arr = np.arange(10, dtype="m8[s]")
+ np.add(arr, arr, dtype="m")
+ np.maximum(arr, arr, dtype="m")
+
+ @pytest.mark.parametrize("ufunc", [np.add, np.sqrt])
+ def test_cast_safety(self, ufunc):
+ """Basic test for the safest casts, because ufuncs inner loops can
+ indicate a cast-safety as well (which is normally always "no").
+ """
+ def call_ufunc(arr, **kwargs):
+ return ufunc(*(arr,) * ufunc.nin, **kwargs)
+
+ arr = np.array([1., 2., 3.], dtype=np.float32)
+ arr_bs = arr.astype(arr.dtype.newbyteorder())
+ expected = call_ufunc(arr)
+ # Normally, a "no" cast:
+ res = call_ufunc(arr, casting="no")
+ assert_array_equal(expected, res)
+ # Byte-swapping is not allowed with "no" though:
+ with pytest.raises(TypeError):
+ call_ufunc(arr_bs, casting="no")
+
+ # But is allowed with "equiv":
+ res = call_ufunc(arr_bs, casting="equiv")
+ assert_array_equal(expected, res)
+
+ # Casting to float64 is safe, but not equiv:
+ with pytest.raises(TypeError):
+ call_ufunc(arr_bs, dtype=np.float64, casting="equiv")
+
+ # but it is safe cast:
+ res = call_ufunc(arr_bs, dtype=np.float64, casting="safe")
+ expected = call_ufunc(arr.astype(np.float64)) # upcast
+ assert_array_equal(expected, res)
+
+ def test_true_divide(self):
+ a = np.array(10)
+ b = np.array(20)
+ tgt = np.array(0.5)
+
+ for tc in 'bhilqBHILQefdgFDG':
+ dt = np.dtype(tc)
+ aa = a.astype(dt)
+ bb = b.astype(dt)
+
+ # Check result value and dtype.
+ for x, y in itertools.product([aa, -aa], [bb, -bb]):
+
+ # Check with no output type specified
+ if tc in 'FDG':
+ tgt = complex(x)/complex(y)
+ else:
+ tgt = float(x)/float(y)
+
+ res = np.true_divide(x, y)
+ rtol = max(np.finfo(res).resolution, 1e-15)
+ assert_allclose(res, tgt, rtol=rtol)
+
+ if tc in 'bhilqBHILQ':
+ assert_(res.dtype.name == 'float64')
+ else:
+ assert_(res.dtype.name == dt.name )
+
+ # Check with output type specified. This also checks for the
+ # incorrect casts in issue gh-3484 because the unary '-' does
+ # not change types, even for unsigned types, Hence casts in the
+ # ufunc from signed to unsigned and vice versa will lead to
+ # errors in the values.
+ for tcout in 'bhilqBHILQ':
+ dtout = np.dtype(tcout)
+ assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
+
+ for tcout in 'efdg':
+ dtout = np.dtype(tcout)
+ if tc in 'FDG':
+ # Casting complex to float is not allowed
+ assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
+ else:
+ tgt = float(x)/float(y)
+ rtol = max(np.finfo(dtout).resolution, 1e-15)
+ # The value of tiny for double double is NaN
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning)
+ if not np.isnan(np.finfo(dtout).tiny):
+ atol = max(np.finfo(dtout).tiny, 3e-308)
+ else:
+ atol = 3e-308
+ # Some test values result in invalid for float16
+ # and the cast to it may overflow to inf.
+ with np.errstate(invalid='ignore', over='ignore'):
+ res = np.true_divide(x, y, dtype=dtout)
+ if not np.isfinite(res) and tcout == 'e':
+ continue
+ assert_allclose(res, tgt, rtol=rtol, atol=atol)
+ assert_(res.dtype.name == dtout.name)
+
+ for tcout in 'FDG':
+ dtout = np.dtype(tcout)
+ tgt = complex(x)/complex(y)
+ rtol = max(np.finfo(dtout).resolution, 1e-15)
+ # The value of tiny for double double is NaN
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning)
+ if not np.isnan(np.finfo(dtout).tiny):
+ atol = max(np.finfo(dtout).tiny, 3e-308)
+ else:
+ atol = 3e-308
+ res = np.true_divide(x, y, dtype=dtout)
+ if not np.isfinite(res):
+ continue
+ assert_allclose(res, tgt, rtol=rtol, atol=atol)
+ assert_(res.dtype.name == dtout.name)
+
+ # Check booleans
+ a = np.ones((), dtype=np.bool_)
+ res = np.true_divide(a, a)
+ assert_(res == 1.0)
+ assert_(res.dtype.name == 'float64')
+ res = np.true_divide(~a, a)
+ assert_(res == 0.0)
+ assert_(res.dtype.name == 'float64')
+
+ def test_sum_stability(self):
+ a = np.ones(500, dtype=np.float32)
+ assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4)
+
+ a = np.ones(500, dtype=np.float64)
+ assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_sum(self):
+ for dt in (int, np.float16, np.float32, np.float64, np.longdouble):
+ for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
+ 128, 1024, 1235):
+ # warning if sum overflows, which it does in float16
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always", RuntimeWarning)
+
+ tgt = dt(v * (v + 1) / 2)
+ overflow = not np.isfinite(tgt)
+ assert_equal(len(w), 1 * overflow)
+
+ d = np.arange(1, v + 1, dtype=dt)
+
+ assert_almost_equal(np.sum(d), tgt)
+ assert_equal(len(w), 2 * overflow)
+
+ assert_almost_equal(np.sum(d[::-1]), tgt)
+ assert_equal(len(w), 3 * overflow)
+
+ d = np.ones(500, dtype=dt)
+ assert_almost_equal(np.sum(d[::2]), 250.)
+ assert_almost_equal(np.sum(d[1::2]), 250.)
+ assert_almost_equal(np.sum(d[::3]), 167.)
+ assert_almost_equal(np.sum(d[1::3]), 167.)
+ assert_almost_equal(np.sum(d[::-2]), 250.)
+ assert_almost_equal(np.sum(d[-1::-2]), 250.)
+ assert_almost_equal(np.sum(d[::-3]), 167.)
+ assert_almost_equal(np.sum(d[-1::-3]), 167.)
+ # sum with first reduction entry != 0
+ d = np.ones((1,), dtype=dt)
+ d += d
+ assert_almost_equal(d, 2.)
+
+ def test_sum_complex(self):
+ for dt in (np.complex64, np.complex128, np.clongdouble):
+ for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
+ 128, 1024, 1235):
+ tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j)
+ d = np.empty(v, dtype=dt)
+ d.real = np.arange(1, v + 1)
+ d.imag = -np.arange(1, v + 1)
+ assert_almost_equal(np.sum(d), tgt)
+ assert_almost_equal(np.sum(d[::-1]), tgt)
+
+ d = np.ones(500, dtype=dt) + 1j
+ assert_almost_equal(np.sum(d[::2]), 250. + 250j)
+ assert_almost_equal(np.sum(d[1::2]), 250. + 250j)
+ assert_almost_equal(np.sum(d[::3]), 167. + 167j)
+ assert_almost_equal(np.sum(d[1::3]), 167. + 167j)
+ assert_almost_equal(np.sum(d[::-2]), 250. + 250j)
+ assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j)
+ assert_almost_equal(np.sum(d[::-3]), 167. + 167j)
+ assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j)
+ # sum with first reduction entry != 0
+ d = np.ones((1,), dtype=dt) + 1j
+ d += d
+ assert_almost_equal(d, 2. + 2j)
+
+ def test_sum_initial(self):
+ # Integer, single axis
+ assert_equal(np.sum([3], initial=2), 5)
+
+ # Floating point
+ assert_almost_equal(np.sum([0.2], initial=0.1), 0.3)
+
+ # Multiple non-adjacent axes
+ assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2),
+ [12, 12, 12])
+
+ def test_sum_where(self):
+ # More extensive tests done in test_reduction_with_where.
+ assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.)
+ assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5.,
+ where=[True, False]), [9., 5.])
+
+ def test_inner1d(self):
+ a = np.arange(6).reshape((2, 3))
+ assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
+ a = np.arange(6)
+ assert_array_equal(umt.inner1d(a, a), np.sum(a*a))
+
+ def test_broadcast(self):
+ msg = "broadcast"
+ a = np.arange(4).reshape((2, 1, 2))
+ b = np.arange(4).reshape((1, 2, 2))
+ assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
+ msg = "extend & broadcast loop dimensions"
+ b = np.arange(4).reshape((2, 2))
+ assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
+ # Broadcast in core dimensions should fail
+ a = np.arange(8).reshape((4, 2))
+ b = np.arange(4).reshape((4, 1))
+ assert_raises(ValueError, umt.inner1d, a, b)
+ # Extend core dimensions should fail
+ a = np.arange(8).reshape((4, 2))
+ b = np.array(7)
+ assert_raises(ValueError, umt.inner1d, a, b)
+ # Broadcast should fail
+ a = np.arange(2).reshape((2, 1, 1))
+ b = np.arange(3).reshape((3, 1, 1))
+ assert_raises(ValueError, umt.inner1d, a, b)
+
+ # Writing to a broadcasted array with overlap should warn, gh-2705
+ a = np.arange(2)
+ b = np.arange(4).reshape((2, 2))
+ u, v = np.broadcast_arrays(a, b)
+ assert_equal(u.strides[0], 0)
+ x = u + v
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ u += v
+ assert_equal(len(w), 1)
+ assert_(x[0, 0] != u[0, 0])
+
+ # Output reduction should not be allowed.
+ # See gh-15139
+ a = np.arange(6).reshape(3, 2)
+ b = np.ones(2)
+ out = np.empty(())
+ assert_raises(ValueError, umt.inner1d, a, b, out)
+ out2 = np.empty(3)
+ c = umt.inner1d(a, b, out2)
+ assert_(c is out2)
+
+ def test_out_broadcasts(self):
+ # For ufuncs and gufuncs (not for reductions), we currently allow
+ # the output to cause broadcasting of the input arrays.
+ # both along dimensions with shape 1 and dimensions which do not
+ # exist at all in the inputs.
+ arr = np.arange(3).reshape(1, 3)
+ out = np.empty((5, 4, 3))
+ np.add(arr, arr, out=out)
+ assert (out == np.arange(3) * 2).all()
+
+ # The same holds for gufuncs (gh-16484)
+ umt.inner1d(arr, arr, out=out)
+ # the result would be just a scalar `5`, but is broadcast fully:
+ assert (out == 5).all()
+
+ @pytest.mark.parametrize(["arr", "out"], [
+ ([2], np.empty(())),
+ ([1, 2], np.empty(1)),
+ (np.ones((4, 3)), np.empty((4, 1)))],
+ ids=["(1,)->()", "(2,)->(1,)", "(4, 3)->(4, 1)"])
+ def test_out_broadcast_errors(self, arr, out):
+ # Output is (currently) allowed to broadcast inputs, but it cannot be
+ # smaller than the actual result.
+ with pytest.raises(ValueError, match="non-broadcastable"):
+ np.positive(arr, out=out)
+
+ with pytest.raises(ValueError, match="non-broadcastable"):
+ np.add(np.ones(()), arr, out=out)
+
+ def test_type_cast(self):
+ msg = "type cast"
+ a = np.arange(6, dtype='short').reshape((2, 3))
+ assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
+ err_msg=msg)
+ msg = "type cast on one argument"
+ a = np.arange(6).reshape((2, 3))
+ b = a + 0.1
+ assert_array_almost_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1),
+ err_msg=msg)
+
+ def test_endian(self):
+ msg = "big endian"
+ a = np.arange(6, dtype='>i4').reshape((2, 3))
+ assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
+ err_msg=msg)
+ msg = "little endian"
+ a = np.arange(6, dtype='<i4').reshape((2, 3))
+ assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
+ err_msg=msg)
+
+ # Output should always be native-endian
+ Ba = np.arange(1, dtype='>f8')
+ La = np.arange(1, dtype='<f8')
+ assert_equal((Ba+Ba).dtype, np.dtype('f8'))
+ assert_equal((Ba+La).dtype, np.dtype('f8'))
+ assert_equal((La+Ba).dtype, np.dtype('f8'))
+ assert_equal((La+La).dtype, np.dtype('f8'))
+
+ assert_equal(np.absolute(La).dtype, np.dtype('f8'))
+ assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
+ assert_equal(np.negative(La).dtype, np.dtype('f8'))
+ assert_equal(np.negative(Ba).dtype, np.dtype('f8'))
+
+ def test_incontiguous_array(self):
+ msg = "incontiguous memory layout of array"
+ x = np.arange(64).reshape((2, 2, 2, 2, 2, 2))
+ a = x[:, 0,:, 0,:, 0]
+ b = x[:, 1,:, 1,:, 1]
+ a[0, 0, 0] = -1
+ msg2 = "make sure it references to the original array"
+ assert_equal(x[0, 0, 0, 0, 0, 0], -1, err_msg=msg2)
+ assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
+ x = np.arange(24).reshape(2, 3, 4)
+ a = x.T
+ b = x.T
+ a[0, 0, 0] = -1
+ assert_equal(x[0, 0, 0], -1, err_msg=msg2)
+ assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
+
+ def test_output_argument(self):
+ msg = "output argument"
+ a = np.arange(12).reshape((2, 3, 2))
+ b = np.arange(4).reshape((2, 1, 2)) + 1
+ c = np.zeros((2, 3), dtype='int')
+ umt.inner1d(a, b, c)
+ assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
+ c[:] = -1
+ umt.inner1d(a, b, out=c)
+ assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
+
+ msg = "output argument with type cast"
+ c = np.zeros((2, 3), dtype='int16')
+ umt.inner1d(a, b, c)
+ assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
+ c[:] = -1
+ umt.inner1d(a, b, out=c)
+ assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
+
+ msg = "output argument with incontiguous layout"
+ c = np.zeros((2, 3, 4), dtype='int16')
+ umt.inner1d(a, b, c[..., 0])
+ assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
+ c[:] = -1
+ umt.inner1d(a, b, out=c[..., 0])
+ assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
+
+ def test_axes_argument(self):
+ # inner1d signature: '(i),(i)->()'
+ inner1d = umt.inner1d
+ a = np.arange(27.).reshape((3, 3, 3))
+ b = np.arange(10., 19.).reshape((3, 1, 3))
+ # basic tests on inputs (outputs tested below with matrix_multiply).
+ c = inner1d(a, b)
+ assert_array_equal(c, (a * b).sum(-1))
+ # default
+ c = inner1d(a, b, axes=[(-1,), (-1,), ()])
+ assert_array_equal(c, (a * b).sum(-1))
+ # integers ok for single axis.
+ c = inner1d(a, b, axes=[-1, -1, ()])
+ assert_array_equal(c, (a * b).sum(-1))
+ # mix fine
+ c = inner1d(a, b, axes=[(-1,), -1, ()])
+ assert_array_equal(c, (a * b).sum(-1))
+ # can omit last axis.
+ c = inner1d(a, b, axes=[-1, -1])
+ assert_array_equal(c, (a * b).sum(-1))
+ # can pass in other types of integer (with __index__ protocol)
+ c = inner1d(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)])
+ assert_array_equal(c, (a * b).sum(-1))
+ # swap some axes
+ c = inner1d(a, b, axes=[0, 0])
+ assert_array_equal(c, (a * b).sum(0))
+ c = inner1d(a, b, axes=[0, 2])
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
+ # Check errors for improperly constructed axes arguments.
+ # should have list.
+ assert_raises(TypeError, inner1d, a, b, axes=-1)
+ # needs enough elements
+ assert_raises(ValueError, inner1d, a, b, axes=[-1])
+ # should pass in indices.
+ assert_raises(TypeError, inner1d, a, b, axes=[-1.0, -1.0])
+ assert_raises(TypeError, inner1d, a, b, axes=[(-1.0,), -1])
+ assert_raises(TypeError, inner1d, a, b, axes=[None, 1])
+ # cannot pass an index unless there is only one dimension
+ # (output is wrong in this case)
+ assert_raises(TypeError, inner1d, a, b, axes=[-1, -1, -1])
+ # or pass in generally the wrong number of axes
+ assert_raises(ValueError, inner1d, a, b, axes=[-1, -1, (-1,)])
+ assert_raises(ValueError, inner1d, a, b, axes=[-1, (-2, -1), ()])
+ # axes need to have same length.
+ assert_raises(ValueError, inner1d, a, b, axes=[0, 1])
+
+ # matrix_multiply signature: '(m,n),(n,p)->(m,p)'
+ mm = umt.matrix_multiply
+ a = np.arange(12).reshape((2, 3, 2))
+ b = np.arange(8).reshape((2, 2, 2, 1)) + 1
+ # Sanity check.
+ c = mm(a, b)
+ assert_array_equal(c, np.matmul(a, b))
+ # Default axes.
+ c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)])
+ assert_array_equal(c, np.matmul(a, b))
+ # Default with explicit axes.
+ c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)])
+ assert_array_equal(c, np.matmul(a, b))
+ # swap some axes.
+ c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)])
+ assert_array_equal(c, np.matmul(a.transpose(1, 0, 2),
+ b.transpose(0, 3, 1, 2)))
+ # Default with output array.
+ c = np.empty((2, 2, 3, 1))
+ d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)])
+ assert_(c is d)
+ assert_array_equal(c, np.matmul(a, b))
+ # Transposed output array
+ c = np.empty((1, 2, 2, 3))
+ d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)])
+ assert_(c is d)
+ assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2))
+ # Check errors for improperly constructed axes arguments.
+ # wrong argument
+ assert_raises(TypeError, mm, a, b, axis=1)
+ # axes should be list
+ assert_raises(TypeError, mm, a, b, axes=1)
+ assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1)))
+ # list needs to have right length
+ assert_raises(ValueError, mm, a, b, axes=[])
+ assert_raises(ValueError, mm, a, b, axes=[(-2, -1)])
+ # list should contain tuples for multiple axes
+ assert_raises(TypeError, mm, a, b, axes=[-1, -1, -1])
+ assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), -1])
+ assert_raises(TypeError,
+ mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]])
+ assert_raises(TypeError,
+ mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]])
+ assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None])
+ # tuples should not have duplicated values
+ assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)])
+ # arrays should have enough axes.
+ z = np.zeros((2, 2))
+ assert_raises(ValueError, mm, z, z[0])
+ assert_raises(ValueError, mm, z, z, out=z[:, 0])
+ assert_raises(ValueError, mm, z[1], z, axes=[0, 1])
+ assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1])
+ # Regular ufuncs should not accept axes.
+ assert_raises(TypeError, np.add, 1., 1., axes=[0])
+ # should be able to deal with bad unrelated kwargs.
+ assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True)
+
+ def test_axis_argument(self):
+ # inner1d signature: '(i),(i)->()'
+ inner1d = umt.inner1d
+ a = np.arange(27.).reshape((3, 3, 3))
+ b = np.arange(10., 19.).reshape((3, 1, 3))
+ c = inner1d(a, b)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, axis=-1)
+ assert_array_equal(c, (a * b).sum(-1))
+ out = np.zeros_like(c)
+ d = inner1d(a, b, axis=-1, out=out)
+ assert_(d is out)
+ assert_array_equal(d, c)
+ c = inner1d(a, b, axis=0)
+ assert_array_equal(c, (a * b).sum(0))
+ # Sanity checks on innerwt and cumsum.
+ a = np.arange(6).reshape((2, 3))
+ b = np.arange(10, 16).reshape((2, 3))
+ w = np.arange(20, 26).reshape((2, 3))
+ assert_array_equal(umt.innerwt(a, b, w, axis=0),
+ np.sum(a * b * w, axis=0))
+ assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0))
+ assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1))
+ out = np.empty_like(a)
+ b = umt.cumsum(a, out=out, axis=0)
+ assert_(out is b)
+ assert_array_equal(b, np.cumsum(a, axis=0))
+ b = umt.cumsum(a, out=out, axis=1)
+ assert_(out is b)
+ assert_array_equal(b, np.cumsum(a, axis=-1))
+ # Check errors.
+ # Cannot pass in both axis and axes.
+ assert_raises(TypeError, inner1d, a, b, axis=0, axes=[0, 0])
+ # Not an integer.
+ assert_raises(TypeError, inner1d, a, b, axis=[0])
+ # more than 1 core dimensions.
+ mm = umt.matrix_multiply
+ assert_raises(TypeError, mm, a, b, axis=1)
+ # Output wrong size in axis.
+ out = np.empty((1, 2, 3), dtype=a.dtype)
+ assert_raises(ValueError, umt.cumsum, a, out=out, axis=0)
+ # Regular ufuncs should not accept axis.
+ assert_raises(TypeError, np.add, 1., 1., axis=0)
+
+ def test_keepdims_argument(self):
+ # inner1d signature: '(i),(i)->()'
+ inner1d = umt.inner1d
+ a = np.arange(27.).reshape((3, 3, 3))
+ b = np.arange(10., 19.).reshape((3, 1, 3))
+ c = inner1d(a, b)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ out = np.zeros_like(c)
+ d = inner1d(a, b, keepdims=True, out=out)
+ assert_(d is out)
+ assert_array_equal(d, c)
+ # Now combined with axis and axes.
+ c = inner1d(a, b, axis=-1, keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=False))
+ c = inner1d(a, b, axis=-1, keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ c = inner1d(a, b, axis=0, keepdims=False)
+ assert_array_equal(c, (a * b).sum(0, keepdims=False))
+ c = inner1d(a, b, axis=0, keepdims=True)
+ assert_array_equal(c, (a * b).sum(0, keepdims=True))
+ c = inner1d(a, b, axes=[(-1,), (-1,), ()], keepdims=False)
+ assert_array_equal(c, (a * b).sum(-1))
+ c = inner1d(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True)
+ assert_array_equal(c, (a * b).sum(-1, keepdims=True))
+ c = inner1d(a, b, axes=[0, 0], keepdims=False)
+ assert_array_equal(c, (a * b).sum(0))
+ c = inner1d(a, b, axes=[0, 0, 0], keepdims=True)
+ assert_array_equal(c, (a * b).sum(0, keepdims=True))
+ c = inner1d(a, b, axes=[0, 2], keepdims=False)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1))
+ c = inner1d(a, b, axes=[0, 2], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+ keepdims=True))
+ c = inner1d(a, b, axes=[0, 2, 2], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1,
+ keepdims=True))
+ c = inner1d(a, b, axes=[0, 2, 0], keepdims=True)
+ assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True))
+ # Hardly useful, but should work.
+ c = inner1d(a, b, axes=[0, 2, 1], keepdims=True)
+ assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1))
+ .sum(1, keepdims=True))
+ # Check with two core dimensions.
+ a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+ expected = uml.det(a)
+ c = uml.det(a, keepdims=False)
+ assert_array_equal(c, expected)
+ c = uml.det(a, keepdims=True)
+ assert_array_equal(c, expected[:, np.newaxis, np.newaxis])
+ a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis]
+ expected_s, expected_l = uml.slogdet(a)
+ cs, cl = uml.slogdet(a, keepdims=False)
+ assert_array_equal(cs, expected_s)
+ assert_array_equal(cl, expected_l)
+ cs, cl = uml.slogdet(a, keepdims=True)
+ assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis])
+ assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis])
+ # Sanity check on innerwt.
+ a = np.arange(6).reshape((2, 3))
+ b = np.arange(10, 16).reshape((2, 3))
+ w = np.arange(20, 26).reshape((2, 3))
+ assert_array_equal(umt.innerwt(a, b, w, keepdims=True),
+ np.sum(a * b * w, axis=-1, keepdims=True))
+ assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True),
+ np.sum(a * b * w, axis=0, keepdims=True))
+ # Check errors.
+ # Not a boolean
+ assert_raises(TypeError, inner1d, a, b, keepdims='true')
+ # More than 1 core dimension, and core output dimensions.
+ mm = umt.matrix_multiply
+ assert_raises(TypeError, mm, a, b, keepdims=True)
+ assert_raises(TypeError, mm, a, b, keepdims=False)
+ # Regular ufuncs should not accept keepdims.
+ assert_raises(TypeError, np.add, 1., 1., keepdims=False)
+
+ def test_innerwt(self):
+ a = np.arange(6).reshape((2, 3))
+ b = np.arange(10, 16).reshape((2, 3))
+ w = np.arange(20, 26).reshape((2, 3))
+ assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
+ a = np.arange(100, 124).reshape((2, 3, 4))
+ b = np.arange(200, 224).reshape((2, 3, 4))
+ w = np.arange(300, 324).reshape((2, 3, 4))
+ assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
+
+ def test_innerwt_empty(self):
+ """Test generalized ufunc with zero-sized operands"""
+ a = np.array([], dtype='f8')
+ b = np.array([], dtype='f8')
+ w = np.array([], dtype='f8')
+ assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
+
+ def test_cross1d(self):
+ """Test with fixed-sized signature."""
+ a = np.eye(3)
+ assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3)))
+ out = np.zeros((3, 3))
+ result = umt.cross1d(a[0], a, out)
+ assert_(result is out)
+ assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1])))
+ assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4))
+ assert_raises(ValueError, umt.cross1d, a, np.arange(4.))
+ # Wrong output core dimension.
+ assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4)))
+ # Wrong output broadcast dimension (see gh-15139).
+ assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros(3))
+
+ def test_can_ignore_signature(self):
+ # Comparing the effects of ? in signature:
+ # matrix_multiply: (m,n),(n,p)->(m,p) # all must be there.
+ # matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p.
+ mat = np.arange(12).reshape((2, 3, 2))
+ single_vec = np.arange(2)
+ col_vec = single_vec[:, np.newaxis]
+ col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1
+ # matrix @ single column vector with proper dimension
+ mm_col_vec = umt.matrix_multiply(mat, col_vec)
+ # matmul does the same thing
+ matmul_col_vec = umt.matmul(mat, col_vec)
+ assert_array_equal(matmul_col_vec, mm_col_vec)
+ # matrix @ vector without dimension making it a column vector.
+ # matrix multiply fails -> missing core dim.
+ assert_raises(ValueError, umt.matrix_multiply, mat, single_vec)
+ # matmul mimicker passes, and returns a vector.
+ matmul_col = umt.matmul(mat, single_vec)
+ assert_array_equal(matmul_col, mm_col_vec.squeeze())
+ # Now with a column array: same as for column vector,
+ # broadcasting sensibly.
+ mm_col_vec = umt.matrix_multiply(mat, col_vec_array)
+ matmul_col_vec = umt.matmul(mat, col_vec_array)
+ assert_array_equal(matmul_col_vec, mm_col_vec)
+ # As above, but for row vector
+ single_vec = np.arange(3)
+ row_vec = single_vec[np.newaxis, :]
+ row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1
+ # row vector @ matrix
+ mm_row_vec = umt.matrix_multiply(row_vec, mat)
+ matmul_row_vec = umt.matmul(row_vec, mat)
+ assert_array_equal(matmul_row_vec, mm_row_vec)
+ # single row vector @ matrix
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, mat)
+ matmul_row = umt.matmul(single_vec, mat)
+ assert_array_equal(matmul_row, mm_row_vec.squeeze())
+ # row vector array @ matrix
+ mm_row_vec = umt.matrix_multiply(row_vec_array, mat)
+ matmul_row_vec = umt.matmul(row_vec_array, mat)
+ assert_array_equal(matmul_row_vec, mm_row_vec)
+ # Now for vector combinations
+ # row vector @ column vector
+ col_vec = row_vec.T
+ col_vec_array = row_vec_array.swapaxes(-2, -1)
+ mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec)
+ matmul_row_col_vec = umt.matmul(row_vec, col_vec)
+ assert_array_equal(matmul_row_col_vec, mm_row_col_vec)
+ # single row vector @ single col vector
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec)
+ matmul_row_col = umt.matmul(single_vec, single_vec)
+ assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze())
+ # row vector array @ matrix
+ mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array)
+ matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array)
+ assert_array_equal(matmul_row_col_array, mm_row_col_array)
+ # Finally, check that things are *not* squeezed if one gives an
+ # output.
+ out = np.zeros_like(mm_row_col_array)
+ out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out)
+ assert_array_equal(out, mm_row_col_array)
+ out[:] = 0
+ out = umt.matmul(row_vec_array, col_vec_array, out=out)
+ assert_array_equal(out, mm_row_col_array)
+ # And check one cannot put missing dimensions back.
+ out = np.zeros_like(mm_row_col_vec)
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec,
+ out)
+ # But fine for matmul, since it is just a broadcast.
+ out = umt.matmul(single_vec, single_vec, out)
+ assert_array_equal(out, mm_row_col_vec.squeeze())
+
+ def test_matrix_multiply(self):
+ self.compare_matrix_multiply_results(np.int64)
+ self.compare_matrix_multiply_results(np.double)
+
+ def test_matrix_multiply_umath_empty(self):
+ res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0)))
+ assert_array_equal(res, np.zeros((0, 0)))
+ res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10)))
+ assert_array_equal(res, np.zeros((10, 10)))
+
+ def compare_matrix_multiply_results(self, tp):
+ d1 = np.array(np.random.rand(2, 3, 4), dtype=tp)
+ d2 = np.array(np.random.rand(2, 3, 4), dtype=tp)
+ msg = "matrix multiply on type %s" % d1.dtype.name
+
+ def permute_n(n):
+ if n == 1:
+ return ([0],)
+ ret = ()
+ base = permute_n(n-1)
+ for perm in base:
+ for i in range(n):
+ new = perm + [n-1]
+ new[n-1] = new[i]
+ new[i] = n-1
+ ret += (new,)
+ return ret
+
+ def slice_n(n):
+ if n == 0:
+ return ((),)
+ ret = ()
+ base = slice_n(n-1)
+ for sl in base:
+ ret += (sl+(slice(None),),)
+ ret += (sl+(slice(0, 1),),)
+ return ret
+
+ def broadcastable(s1, s2):
+ return s1 == s2 or s1 == 1 or s2 == 1
+
+ permute_3 = permute_n(3)
+ slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,)
+
+ ref = True
+ for p1 in permute_3:
+ for p2 in permute_3:
+ for s1 in slice_3:
+ for s2 in slice_3:
+ a1 = d1.transpose(p1)[s1]
+ a2 = d2.transpose(p2)[s2]
+ ref = ref and a1.base is not None
+ ref = ref and a2.base is not None
+ if (a1.shape[-1] == a2.shape[-2] and
+ broadcastable(a1.shape[0], a2.shape[0])):
+ assert_array_almost_equal(
+ umt.matrix_multiply(a1, a2),
+ np.sum(a2[..., np.newaxis].swapaxes(-3, -1) *
+ a1[..., np.newaxis,:], axis=-1),
+ err_msg=msg + ' %s %s' % (str(a1.shape),
+ str(a2.shape)))
+
+ assert_equal(ref, True, err_msg="reference check")
+
+ def test_euclidean_pdist(self):
+ a = np.arange(12, dtype=float).reshape(4, 3)
+ out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype)
+ umt.euclidean_pdist(a, out)
+ b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1))
+ b = b[~np.tri(a.shape[0], dtype=bool)]
+ assert_almost_equal(out, b)
+ # An output array is required to determine p with signature (n,d)->(p)
+ assert_raises(ValueError, umt.euclidean_pdist, a)
+
+ def test_cumsum(self):
+ a = np.arange(10)
+ result = umt.cumsum(a)
+ assert_array_equal(result, a.cumsum())
+
+ def test_object_logical(self):
+ a = np.array([3, None, True, False, "test", ""], dtype=object)
+ assert_equal(np.logical_or(a, None),
+ np.array([x or None for x in a], dtype=object))
+ assert_equal(np.logical_or(a, True),
+ np.array([x or True for x in a], dtype=object))
+ assert_equal(np.logical_or(a, 12),
+ np.array([x or 12 for x in a], dtype=object))
+ assert_equal(np.logical_or(a, "blah"),
+ np.array([x or "blah" for x in a], dtype=object))
+
+ assert_equal(np.logical_and(a, None),
+ np.array([x and None for x in a], dtype=object))
+ assert_equal(np.logical_and(a, True),
+ np.array([x and True for x in a], dtype=object))
+ assert_equal(np.logical_and(a, 12),
+ np.array([x and 12 for x in a], dtype=object))
+ assert_equal(np.logical_and(a, "blah"),
+ np.array([x and "blah" for x in a], dtype=object))
+
+ assert_equal(np.logical_not(a),
+ np.array([not x for x in a], dtype=object))
+
+ assert_equal(np.logical_or.reduce(a), 3)
+ assert_equal(np.logical_and.reduce(a), None)
+
+ def test_object_comparison(self):
+ class HasComparisons:
+ def __eq__(self, other):
+ return '=='
+
+ arr0d = np.array(HasComparisons())
+ assert_equal(arr0d == arr0d, True)
+ assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast
+
+ arr1d = np.array([HasComparisons()])
+ assert_equal(arr1d == arr1d, np.array([True]))
+ assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast
+ assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['==']))
+
+ def test_object_array_reduction(self):
+ # Reductions on object arrays
+ a = np.array(['a', 'b', 'c'], dtype=object)
+ assert_equal(np.sum(a), 'abc')
+ assert_equal(np.max(a), 'c')
+ assert_equal(np.min(a), 'a')
+ a = np.array([True, False, True], dtype=object)
+ assert_equal(np.sum(a), 2)
+ assert_equal(np.prod(a), 0)
+ assert_equal(np.any(a), True)
+ assert_equal(np.all(a), False)
+ assert_equal(np.max(a), True)
+ assert_equal(np.min(a), False)
+ assert_equal(np.array([[1]], dtype=object).sum(), 1)
+ assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
+ assert_equal(np.array([1], dtype=object).sum(initial=1), 2)
+ assert_equal(np.array([[1], [2, 3]], dtype=object)
+ .sum(initial=[0], where=[False, True]), [0, 2, 3])
+
+ def test_object_array_accumulate_inplace(self):
+ # Checks that in-place accumulates work, see also gh-7402
+ arr = np.ones(4, dtype=object)
+ arr[:] = [[1] for i in range(4)]
+ # Twice reproduced also for tuples:
+ np.add.accumulate(arr, out=arr)
+ np.add.accumulate(arr, out=arr)
+ assert_array_equal(arr,
+ np.array([[1]*i for i in [1, 3, 6, 10]], dtype=object),
+ )
+
+ # And the same if the axis argument is used
+ arr = np.ones((2, 4), dtype=object)
+ arr[0, :] = [[2] for i in range(4)]
+ np.add.accumulate(arr, out=arr, axis=-1)
+ np.add.accumulate(arr, out=arr, axis=-1)
+ assert_array_equal(arr[0, :],
+ np.array([[2]*i for i in [1, 3, 6, 10]], dtype=object),
+ )
+
+ def test_object_array_accumulate_failure(self):
+ # Typical accumulation on object works as expected:
+ res = np.add.accumulate(np.array([1, 0, 2], dtype=object))
+ assert_array_equal(res, np.array([1, 1, 3], dtype=object))
+ # But errors are propagated from the inner-loop if they occur:
+ with pytest.raises(TypeError):
+ np.add.accumulate([1, None, 2])
+
+ def test_object_array_reduceat_inplace(self):
+ # Checks that in-place reduceats work, see also gh-7465
+ arr = np.empty(4, dtype=object)
+ arr[:] = [[1] for i in range(4)]
+ out = np.empty(4, dtype=object)
+ out[:] = [[1] for i in range(4)]
+ np.add.reduceat(arr, np.arange(4), out=arr)
+ np.add.reduceat(arr, np.arange(4), out=arr)
+ assert_array_equal(arr, out)
+
+ # And the same if the axis argument is used
+ arr = np.ones((2, 4), dtype=object)
+ arr[0, :] = [[2] for i in range(4)]
+ out = np.ones((2, 4), dtype=object)
+ out[0, :] = [[2] for i in range(4)]
+ np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
+ np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
+ assert_array_equal(arr, out)
+
+ def test_object_array_reduceat_failure(self):
+ # Reduceat works as expected when no invalid operation occurs (None is
+ # not involved in an operation here)
+ res = np.add.reduceat(np.array([1, None, 2], dtype=object), [1, 2])
+ assert_array_equal(res, np.array([None, 2], dtype=object))
+ # But errors when None would be involved in an operation:
+ with pytest.raises(TypeError):
+ np.add.reduceat([1, None, 2], [0, 2])
+
+ def test_zerosize_reduction(self):
+ # Test with default dtype and object dtype
+ for a in [[], np.array([], dtype=object)]:
+ assert_equal(np.sum(a), 0)
+ assert_equal(np.prod(a), 1)
+ assert_equal(np.any(a), False)
+ assert_equal(np.all(a), True)
+ assert_raises(ValueError, np.max, a)
+ assert_raises(ValueError, np.min, a)
+
+ def test_axis_out_of_bounds(self):
+ a = np.array([False, False])
+ assert_raises(np.AxisError, a.all, axis=1)
+ a = np.array([False, False])
+ assert_raises(np.AxisError, a.all, axis=-2)
+
+ a = np.array([False, False])
+ assert_raises(np.AxisError, a.any, axis=1)
+ a = np.array([False, False])
+ assert_raises(np.AxisError, a.any, axis=-2)
+
+ def test_scalar_reduction(self):
+ # The functions 'sum', 'prod', etc allow specifying axis=0
+ # even for scalars
+ assert_equal(np.sum(3, axis=0), 3)
+ assert_equal(np.prod(3.5, axis=0), 3.5)
+ assert_equal(np.any(True, axis=0), True)
+ assert_equal(np.all(False, axis=0), False)
+ assert_equal(np.max(3, axis=0), 3)
+ assert_equal(np.min(2.5, axis=0), 2.5)
+
+ # Check scalar behaviour for ufuncs without an identity
+ assert_equal(np.power.reduce(3), 3)
+
+ # Make sure that scalars are coming out from this operation
+ assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32)
+ assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32)
+ assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32)
+ assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32)
+
+ # check if scalars/0-d arrays get cast
+ assert_(type(np.any(0, axis=0)) is np.bool_)
+
+ # assert that 0-d arrays get wrapped
+ class MyArray(np.ndarray):
+ pass
+ a = np.array(1).view(MyArray)
+ assert_(type(np.any(a)) is MyArray)
+
+ def test_casting_out_param(self):
+ # Test that it's possible to do casts on output
+ a = np.ones((200, 100), np.int64)
+ b = np.ones((200, 100), np.int64)
+ c = np.ones((200, 100), np.float64)
+ np.add(a, b, out=c)
+ assert_equal(c, 2)
+
+ a = np.zeros(65536)
+ b = np.zeros(65536, dtype=np.float32)
+ np.subtract(a, 0, out=b)
+ assert_equal(b, 0)
+
+ def test_where_param(self):
+ # Test that the where= ufunc parameter works with regular arrays
+ a = np.arange(7)
+ b = np.ones(7)
+ c = np.zeros(7)
+ np.add(a, b, out=c, where=(a % 2 == 1))
+ assert_equal(c, [0, 2, 0, 4, 0, 6, 0])
+
+ a = np.arange(4).reshape(2, 2) + 2
+ np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]])
+ assert_equal(a, [[2, 27], [16, 5]])
+ # Broadcasting the where= parameter
+ np.subtract(a, 2, out=a, where=[True, False])
+ assert_equal(a, [[0, 27], [14, 5]])
+
+ def test_where_param_buffer_output(self):
+ # This test is temporarily skipped because it requires
+ # adding masking features to the nditer to work properly
+
+ # With casting on output
+ a = np.ones(10, np.int64)
+ b = np.ones(10, np.int64)
+ c = 1.5 * np.ones(10, np.float64)
+ np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
+ assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
+
+ def test_where_param_alloc(self):
+ # With casting and allocated output
+ a = np.array([1], dtype=np.int64)
+ m = np.array([True], dtype=bool)
+ assert_equal(np.sqrt(a, where=m), [1])
+
+ # No casting and allocated output
+ a = np.array([1], dtype=np.float64)
+ m = np.array([True], dtype=bool)
+ assert_equal(np.sqrt(a, where=m), [1])
+
+ def test_where_with_broadcasting(self):
+ # See gh-17198
+ a = np.random.random((5000, 4))
+ b = np.random.random((5000, 1))
+
+ where = a > 0.3
+ out = np.full_like(a, 0)
+ np.less(a, b, where=where, out=out)
+ b_where = np.broadcast_to(b, a.shape)[where]
+ assert_array_equal((a[where] < b_where), out[where].astype(bool))
+ assert not out[~where].any() # outside mask, out remains all 0
+
+ def check_identityless_reduction(self, a):
+ # np.minimum.reduce is an identityless reduction
+
+ # Verify that it sees the zero at various positions
+ a[...] = 1
+ a[1, 0, 0] = 0
+ assert_equal(np.minimum.reduce(a, axis=None), 0)
+ assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
+ assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
+ assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0])
+ assert_equal(np.minimum.reduce(a, axis=0),
+ [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=1),
+ [[1, 1, 1, 1], [0, 1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=2),
+ [[1, 1, 1], [0, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=()), a)
+
+ a[...] = 1
+ a[0, 1, 0] = 0
+ assert_equal(np.minimum.reduce(a, axis=None), 0)
+ assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
+ assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1])
+ assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
+ assert_equal(np.minimum.reduce(a, axis=0),
+ [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=1),
+ [[0, 1, 1, 1], [1, 1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=2),
+ [[1, 0, 1], [1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=()), a)
+
+ a[...] = 1
+ a[0, 0, 1] = 0
+ assert_equal(np.minimum.reduce(a, axis=None), 0)
+ assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1])
+ assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
+ assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
+ assert_equal(np.minimum.reduce(a, axis=0),
+ [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=1),
+ [[1, 0, 1, 1], [1, 1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=2),
+ [[0, 1, 1], [1, 1, 1]])
+ assert_equal(np.minimum.reduce(a, axis=()), a)
+
+ @requires_memory(6 * 1024**3)
+ def test_identityless_reduction_huge_array(self):
+ # Regression test for gh-20921 (copying identity incorrectly failed)
+ arr = np.zeros((2, 2**31), 'uint8')
+ arr[:, 0] = [1, 3]
+ arr[:, -1] = [4, 1]
+ res = np.maximum.reduce(arr, axis=0)
+ del arr
+ assert res[0] == 3
+ assert res[-1] == 4
+
+ def test_identityless_reduction_corder(self):
+ a = np.empty((2, 3, 4), order='C')
+ self.check_identityless_reduction(a)
+
+ def test_identityless_reduction_forder(self):
+ a = np.empty((2, 3, 4), order='F')
+ self.check_identityless_reduction(a)
+
+ def test_identityless_reduction_otherorder(self):
+ a = np.empty((2, 4, 3), order='C').swapaxes(1, 2)
+ self.check_identityless_reduction(a)
+
+ def test_identityless_reduction_noncontig(self):
+ a = np.empty((3, 5, 4), order='C').swapaxes(1, 2)
+ a = a[1:, 1:, 1:]
+ self.check_identityless_reduction(a)
+
+ def test_identityless_reduction_noncontig_unaligned(self):
+ a = np.empty((3*4*5*8 + 1,), dtype='i1')
+ a = a[1:].view(dtype='f8')
+ a.shape = (3, 4, 5)
+ a = a[1:, 1:, 1:]
+ self.check_identityless_reduction(a)
+
+ def test_initial_reduction(self):
+ # np.minimum.reduce is an identityless reduction
+
+ # For cases like np.maximum(np.abs(...), initial=0)
+ # More generally, a supremum over non-negative numbers.
+ assert_equal(np.maximum.reduce([], initial=0), 0)
+
+ # For cases like reduction of an empty array over the reals.
+ assert_equal(np.minimum.reduce([], initial=np.inf), np.inf)
+ assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf)
+
+ # Random tests
+ assert_equal(np.minimum.reduce([5], initial=4), 4)
+ assert_equal(np.maximum.reduce([4], initial=5), 5)
+ assert_equal(np.maximum.reduce([5], initial=4), 5)
+ assert_equal(np.minimum.reduce([4], initial=5), 4)
+
+ # Check initial=None raises ValueError for both types of ufunc reductions
+ assert_raises(ValueError, np.minimum.reduce, [], initial=None)
+ assert_raises(ValueError, np.add.reduce, [], initial=None)
+
+ # Check that np._NoValue gives default behavior.
+ assert_equal(np.add.reduce([], initial=np._NoValue), 0)
+
+ # Check that initial kwarg behaves as intended for dtype=object
+ a = np.array([10], dtype=object)
+ res = np.add.reduce(a, initial=5)
+ assert_equal(res, 15)
+
+ @pytest.mark.parametrize('axis', (0, 1, None))
+ @pytest.mark.parametrize('where', (np.array([False, True, True]),
+ np.array([[True], [False], [True]]),
+ np.array([[True, False, False],
+ [False, True, False],
+ [False, True, True]])))
+ def test_reduction_with_where(self, axis, where):
+ a = np.arange(9.).reshape(3, 3)
+ a_copy = a.copy()
+ a_check = np.zeros_like(a)
+ np.positive(a, out=a_check, where=where)
+
+ res = np.add.reduce(a, axis=axis, where=where)
+ check = a_check.sum(axis)
+ assert_equal(res, check)
+ # Check we do not overwrite elements of a internally.
+ assert_array_equal(a, a_copy)
+
+ @pytest.mark.parametrize(('axis', 'where'),
+ ((0, np.array([True, False, True])),
+ (1, [True, True, False]),
+ (None, True)))
+ @pytest.mark.parametrize('initial', (-np.inf, 5.))
+ def test_reduction_with_where_and_initial(self, axis, where, initial):
+ a = np.arange(9.).reshape(3, 3)
+ a_copy = a.copy()
+ a_check = np.full(a.shape, -np.inf)
+ np.positive(a, out=a_check, where=where)
+
+ res = np.maximum.reduce(a, axis=axis, where=where, initial=initial)
+ check = a_check.max(axis, initial=initial)
+ assert_equal(res, check)
+
+ def test_reduction_where_initial_needed(self):
+ a = np.arange(9.).reshape(3, 3)
+ m = [False, True, False]
+ assert_raises(ValueError, np.maximum.reduce, a, where=m)
+
+ def test_identityless_reduction_nonreorderable(self):
+ a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
+
+ res = np.divide.reduce(a, axis=0)
+ assert_equal(res, [8.0, 4.0, 8.0])
+
+ res = np.divide.reduce(a, axis=1)
+ assert_equal(res, [2.0, 8.0])
+
+ res = np.divide.reduce(a, axis=())
+ assert_equal(res, a)
+
+ assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1))
+
+ def test_reduce_zero_axis(self):
+ # If we have a n x m array and do a reduction with axis=1, then we are
+ # doing n reductions, and each reduction takes an m-element array. For
+ # a reduction operation without an identity, then:
+ # n > 0, m > 0: fine
+ # n = 0, m > 0: fine, doing 0 reductions of m-element arrays
+ # n > 0, m = 0: can't reduce a 0-element array, ValueError
+ # n = 0, m = 0: can't reduce a 0-element array, ValueError (for
+ # consistency with the above case)
+ # This test doesn't actually look at return values, it just checks to
+ # make sure that error we get an error in exactly those cases where we
+ # expect one, and assumes the calculations themselves are done
+ # correctly.
+
+ def ok(f, *args, **kwargs):
+ f(*args, **kwargs)
+
+ def err(f, *args, **kwargs):
+ assert_raises(ValueError, f, *args, **kwargs)
+
+ def t(expect, func, n, m):
+ expect(func, np.zeros((n, m)), axis=1)
+ expect(func, np.zeros((m, n)), axis=0)
+ expect(func, np.zeros((n // 2, n // 2, m)), axis=2)
+ expect(func, np.zeros((n // 2, m, n // 2)), axis=1)
+ expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2))
+ expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2))
+ expect(func, np.zeros((m // 3, m // 3, m // 3,
+ n // 2, n // 2)),
+ axis=(0, 1, 2))
+ # Check what happens if the inner (resp. outer) dimensions are a
+ # mix of zero and non-zero:
+ expect(func, np.zeros((10, m, n)), axis=(0, 1))
+ expect(func, np.zeros((10, n, m)), axis=(0, 2))
+ expect(func, np.zeros((m, 10, n)), axis=0)
+ expect(func, np.zeros((10, m, n)), axis=1)
+ expect(func, np.zeros((10, n, m)), axis=2)
+
+ # np.maximum is just an arbitrary ufunc with no reduction identity
+ assert_equal(np.maximum.identity, None)
+ t(ok, np.maximum.reduce, 30, 30)
+ t(ok, np.maximum.reduce, 0, 30)
+ t(err, np.maximum.reduce, 30, 0)
+ t(err, np.maximum.reduce, 0, 0)
+ err(np.maximum.reduce, [])
+ np.maximum.reduce(np.zeros((0, 0)), axis=())
+
+ # all of the combinations are fine for a reduction that has an
+ # identity
+ t(ok, np.add.reduce, 30, 30)
+ t(ok, np.add.reduce, 0, 30)
+ t(ok, np.add.reduce, 30, 0)
+ t(ok, np.add.reduce, 0, 0)
+ np.add.reduce([])
+ np.add.reduce(np.zeros((0, 0)), axis=())
+
+ # OTOH, accumulate always makes sense for any combination of n and m,
+ # because it maps an m-element array to an m-element array. These
+ # tests are simpler because accumulate doesn't accept multiple axes.
+ for uf in (np.maximum, np.add):
+ uf.accumulate(np.zeros((30, 0)), axis=0)
+ uf.accumulate(np.zeros((0, 30)), axis=0)
+ uf.accumulate(np.zeros((30, 30)), axis=0)
+ uf.accumulate(np.zeros((0, 0)), axis=0)
+
+ def test_safe_casting(self):
+ # In old versions of numpy, in-place operations used the 'unsafe'
+ # casting rules. In versions >= 1.10, 'same_kind' is the
+ # default and an exception is raised instead of a warning.
+ # when 'same_kind' is not satisfied.
+ a = np.array([1, 2, 3], dtype=int)
+ # Non-in-place addition is fine
+ assert_array_equal(assert_no_warnings(np.add, a, 1.1),
+ [2.1, 3.1, 4.1])
+ assert_raises(TypeError, np.add, a, 1.1, out=a)
+
+ def add_inplace(a, b):
+ a += b
+
+ assert_raises(TypeError, add_inplace, a, 1.1)
+ # Make sure that explicitly overriding the exception is allowed:
+ assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
+ assert_array_equal(a, [2, 3, 4])
+
+ def test_ufunc_custom_out(self):
+ # Test ufunc with built in input types and custom output type
+
+ a = np.array([0, 1, 2], dtype='i8')
+ b = np.array([0, 1, 2], dtype='i8')
+ c = np.empty(3, dtype=_rational_tests.rational)
+
+ # Output must be specified so numpy knows what
+ # ufunc signature to look for
+ result = _rational_tests.test_add(a, b, c)
+ target = np.array([0, 2, 4], dtype=_rational_tests.rational)
+ assert_equal(result, target)
+
+ # The new resolution means that we can (usually) find custom loops
+ # as long as they match exactly:
+ result = _rational_tests.test_add(a, b)
+ assert_equal(result, target)
+
+ # This works even more generally, so long the default common-dtype
+ # promoter works out:
+ result = _rational_tests.test_add(a, b.astype(np.uint16), out=c)
+ assert_equal(result, target)
+
+ # But, it can be fooled, e.g. (use scalars, which forces legacy
+ # type resolution to kick in, which then fails):
+ with assert_raises(TypeError):
+ _rational_tests.test_add(a, np.uint16(2))
+
+ def test_operand_flags(self):
+ a = np.arange(16, dtype='l').reshape(4, 4)
+ b = np.arange(9, dtype='l').reshape(3, 3)
+ opflag_tests.inplace_add(a[:-1, :-1], b)
+ assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7],
+ [14, 16, 18, 11], [12, 13, 14, 15]], dtype='l'))
+
+ a = np.array(0)
+ opflag_tests.inplace_add(a, 3)
+ assert_equal(a, 3)
+ opflag_tests.inplace_add(a, [3, 4])
+ assert_equal(a, 10)
+
+ def test_struct_ufunc(self):
+ import numpy.core._struct_ufunc_tests as struct_ufunc
+
+ a = np.array([(1, 2, 3)], dtype='u8,u8,u8')
+ b = np.array([(1, 2, 3)], dtype='u8,u8,u8')
+
+ result = struct_ufunc.add_triplet(a, b)
+ assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
+ assert_raises(RuntimeError, struct_ufunc.register_fail)
+
+ def test_custom_ufunc(self):
+ a = np.array(
+ [_rational_tests.rational(1, 2),
+ _rational_tests.rational(1, 3),
+ _rational_tests.rational(1, 4)],
+ dtype=_rational_tests.rational)
+ b = np.array(
+ [_rational_tests.rational(1, 2),
+ _rational_tests.rational(1, 3),
+ _rational_tests.rational(1, 4)],
+ dtype=_rational_tests.rational)
+
+ result = _rational_tests.test_add_rationals(a, b)
+ expected = np.array(
+ [_rational_tests.rational(1),
+ _rational_tests.rational(2, 3),
+ _rational_tests.rational(1, 2)],
+ dtype=_rational_tests.rational)
+ assert_equal(result, expected)
+
+ def test_custom_ufunc_forced_sig(self):
+ # gh-9351 - looking for a non-first userloop would previously hang
+ with assert_raises(TypeError):
+ np.multiply(_rational_tests.rational(1), 1,
+ signature=(_rational_tests.rational, int, None))
+
+ def test_custom_array_like(self):
+
+ class MyThing:
+ __array_priority__ = 1000
+
+ rmul_count = 0
+ getitem_count = 0
+
+ def __init__(self, shape):
+ self.shape = shape
+
+ def __len__(self):
+ return self.shape[0]
+
+ def __getitem__(self, i):
+ MyThing.getitem_count += 1
+ if not isinstance(i, tuple):
+ i = (i,)
+ if len(i) > self.ndim:
+ raise IndexError("boo")
+
+ return MyThing(self.shape[len(i):])
+
+ def __rmul__(self, other):
+ MyThing.rmul_count += 1
+ return self
+
+ np.float64(5)*MyThing((3, 3))
+ assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
+ assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
+
+ def test_inplace_fancy_indexing(self):
+
+ a = np.arange(10)
+ np.add.at(a, [2, 5, 2], 1)
+ assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9])
+
+ a = np.arange(10)
+ b = np.array([100, 100, 100])
+ np.add.at(a, [2, 5, 2], b)
+ assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9])
+
+ a = np.arange(9).reshape(3, 3)
+ b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
+ np.add.at(a, (slice(None), [1, 2, 1]), b)
+ assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]])
+
+ a = np.arange(27).reshape(3, 3, 3)
+ b = np.array([100, 200, 300])
+ np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b)
+ assert_equal(a,
+ [[[0, 401, 202],
+ [3, 404, 205],
+ [6, 407, 208]],
+
+ [[9, 410, 211],
+ [12, 413, 214],
+ [15, 416, 217]],
+
+ [[18, 419, 220],
+ [21, 422, 223],
+ [24, 425, 226]]])
+
+ a = np.arange(9).reshape(3, 3)
+ b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
+ np.add.at(a, ([1, 2, 1], slice(None)), b)
+ assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]])
+
+ a = np.arange(27).reshape(3, 3, 3)
+ b = np.array([100, 200, 300])
+ np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b)
+ assert_equal(a,
+ [[[0, 1, 2],
+ [203, 404, 605],
+ [106, 207, 308]],
+
+ [[9, 10, 11],
+ [212, 413, 614],
+ [115, 216, 317]],
+
+ [[18, 19, 20],
+ [221, 422, 623],
+ [124, 225, 326]]])
+
+ a = np.arange(9).reshape(3, 3)
+ b = np.array([100, 200, 300])
+ np.add.at(a, (0, [1, 2, 1]), b)
+ assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]])
+
+ a = np.arange(27).reshape(3, 3, 3)
+ b = np.array([100, 200, 300])
+ np.add.at(a, ([1, 2, 1], 0, slice(None)), b)
+ assert_equal(a,
+ [[[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]],
+
+ [[209, 410, 611],
+ [12, 13, 14],
+ [15, 16, 17]],
+
+ [[118, 219, 320],
+ [21, 22, 23],
+ [24, 25, 26]]])
+
+ a = np.arange(27).reshape(3, 3, 3)
+ b = np.array([100, 200, 300])
+ np.add.at(a, (slice(None), slice(None), slice(None)), b)
+ assert_equal(a,
+ [[[100, 201, 302],
+ [103, 204, 305],
+ [106, 207, 308]],
+
+ [[109, 210, 311],
+ [112, 213, 314],
+ [115, 216, 317]],
+
+ [[118, 219, 320],
+ [121, 222, 323],
+ [124, 225, 326]]])
+
+ a = np.arange(10)
+ np.negative.at(a, [2, 5, 2])
+ assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9])
+
+ # Test 0-dim array
+ a = np.array(0)
+ np.add.at(a, (), 1)
+ assert_equal(a, 1)
+
+ assert_raises(IndexError, np.add.at, a, 0, 1)
+ assert_raises(IndexError, np.add.at, a, [], 1)
+
+ # Test mixed dtypes
+ a = np.arange(10)
+ np.power.at(a, [1, 2, 3, 2], 3.5)
+ assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9]))
+
+ # Test boolean indexing and boolean ufuncs
+ a = np.arange(10)
+ index = a % 2 == 0
+ np.equal.at(a, index, [0, 2, 4, 6, 8])
+ assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9])
+
+ # Test unary operator
+ a = np.arange(10, dtype='u4')
+ np.invert.at(a, [2, 5, 2])
+ assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9])
+
+ # Test empty subspace
+ orig = np.arange(4)
+ a = orig[:, None][:, 0:0]
+ np.add.at(a, [0, 1], 3)
+ assert_array_equal(orig, np.arange(4))
+
+ # Test with swapped byte order
+ index = np.array([1, 2, 1], np.dtype('i').newbyteorder())
+ values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder())
+ np.add.at(values, index, 3)
+ assert_array_equal(values, [1, 8, 6, 4])
+
+ # Test exception thrown
+ values = np.array(['a', 1], dtype=object)
+ assert_raises(TypeError, np.add.at, values, [0, 1], 1)
+ assert_array_equal(values, np.array(['a', 1], dtype=object))
+
+ # Test multiple output ufuncs raise error, gh-5665
+ assert_raises(ValueError, np.modf.at, np.arange(10), [1])
+
+ # Test maximum
+ a = np.array([1, 2, 3])
+ np.maximum.at(a, [0], 0)
+ assert_equal(np.array([1, 2, 3]), a)
+
+ def test_at_not_none_signature(self):
+ # Test ufuncs with non-trivial signature raise a TypeError
+ a = np.ones((2, 2, 2))
+ b = np.ones((1, 2, 2))
+ assert_raises(TypeError, np.matmul.at, a, [0], b)
+
+ a = np.array([[[1, 2], [3, 4]]])
+ assert_raises(TypeError, np.linalg._umath_linalg.det.at, a, [0])
+
+ def test_reduce_arguments(self):
+ f = np.add.reduce
+ d = np.ones((5,2), dtype=int)
+ o = np.ones((2,), dtype=d.dtype)
+ r = o * 5
+ assert_equal(f(d), r)
+ # a, axis=0, dtype=None, out=None, keepdims=False
+ assert_equal(f(d, axis=0), r)
+ assert_equal(f(d, 0), r)
+ assert_equal(f(d, 0, dtype=None), r)
+ assert_equal(f(d, 0, dtype='i'), r)
+ assert_equal(f(d, 0, 'i'), r)
+ assert_equal(f(d, 0, None), r)
+ assert_equal(f(d, 0, None, out=None), r)
+ assert_equal(f(d, 0, None, out=o), r)
+ assert_equal(f(d, 0, None, o), r)
+ assert_equal(f(d, 0, None, None), r)
+ assert_equal(f(d, 0, None, None, keepdims=False), r)
+ assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
+ assert_equal(f(d, 0, None, None, False, 0), r)
+ assert_equal(f(d, 0, None, None, False, initial=0), r)
+ assert_equal(f(d, 0, None, None, False, 0, True), r)
+ assert_equal(f(d, 0, None, None, False, 0, where=True), r)
+ # multiple keywords
+ assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
+ assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
+ assert_equal(f(d, 0, None, out=None, keepdims=False), r)
+ assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0,
+ where=True), r)
+
+ # too little
+ assert_raises(TypeError, f)
+ # too much
+ assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1)
+ # invalid axis
+ assert_raises(TypeError, f, d, "invalid")
+ assert_raises(TypeError, f, d, axis="invalid")
+ assert_raises(TypeError, f, d, axis="invalid", dtype=None,
+ keepdims=True)
+ # invalid dtype
+ assert_raises(TypeError, f, d, 0, "invalid")
+ assert_raises(TypeError, f, d, dtype="invalid")
+ assert_raises(TypeError, f, d, dtype="invalid", out=None)
+ # invalid out
+ assert_raises(TypeError, f, d, 0, None, "invalid")
+ assert_raises(TypeError, f, d, out="invalid")
+ assert_raises(TypeError, f, d, out="invalid", dtype=None)
+ # keepdims boolean, no invalid value
+ # assert_raises(TypeError, f, d, 0, None, None, "invalid")
+ # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None)
+ # invalid mix
+ assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid",
+ out=None)
+
+ # invalid keyword
+ assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0)
+ assert_raises(TypeError, f, d, invalid=0)
+ assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid",
+ out=None)
+ assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True,
+ out=None, invalid=0)
+ assert_raises(TypeError, f, d, axis=0, dtype=None,
+ out=None, invalid=0)
+
+ def test_structured_equal(self):
+ # https://github.com/numpy/numpy/issues/4855
+
+ class MyA(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return getattr(ufunc, method)(*(input.view(np.ndarray)
+ for input in inputs), **kwargs)
+ a = np.arange(12.).reshape(4,3)
+ ra = a.view(dtype=('f8,f8,f8')).squeeze()
+ mra = ra.view(MyA)
+
+ target = np.array([ True, False, False, False], dtype=bool)
+ assert_equal(np.all(target == (mra == ra[0])), True)
+
+ def test_scalar_equal(self):
+ # Scalar comparisons should always work, without deprecation warnings.
+ # even when the ufunc fails.
+ a = np.array(0.)
+ b = np.array('a')
+ assert_(a != b)
+ assert_(b != a)
+ assert_(not (a == b))
+ assert_(not (b == a))
+
+ def test_NotImplemented_not_returned(self):
+ # See gh-5964 and gh-2091. Some of these functions are not operator
+ # related and were fixed for other reasons in the past.
+ binary_funcs = [
+ np.power, np.add, np.subtract, np.multiply, np.divide,
+ np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
+ np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
+ np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
+ np.maximum, np.minimum, np.mod,
+ np.greater, np.greater_equal, np.less, np.less_equal,
+ np.equal, np.not_equal]
+
+ a = np.array('1')
+ b = 1
+ c = np.array([1., 2.])
+ for f in binary_funcs:
+ assert_raises(TypeError, f, a, b)
+ assert_raises(TypeError, f, c, a)
+
+ @pytest.mark.parametrize("ufunc",
+ [np.logical_and, np.logical_or]) # logical_xor object loop is bad
+ @pytest.mark.parametrize("signature",
+ [(None, None, object), (object, None, None),
+ (None, object, None)])
+ def test_logical_ufuncs_object_signatures(self, ufunc, signature):
+ a = np.array([True, None, False], dtype=object)
+ res = ufunc(a, a, signature=signature)
+ assert res.dtype == object
+
+ @pytest.mark.parametrize("ufunc",
+ [np.logical_and, np.logical_or, np.logical_xor])
+ @pytest.mark.parametrize("signature",
+ [(bool, None, object), (object, None, bool),
+ (None, object, bool)])
+ def test_logical_ufuncs_mixed_object_signatures(self, ufunc, signature):
+ # Most mixed signatures fail (except those with bool out, e.g. `OO->?`)
+ a = np.array([True, None, False])
+ with pytest.raises(TypeError):
+ ufunc(a, a, signature=signature)
+
+ @pytest.mark.parametrize("ufunc",
+ [np.logical_and, np.logical_or, np.logical_xor])
+ def test_logical_ufuncs_support_anything(self, ufunc):
+ # The logical ufuncs support even input that can't be promoted:
+ a = np.array(b'1', dtype="V3")
+ c = np.array([1., 2.])
+ assert_array_equal(ufunc(a, c), ufunc([True, True], True))
+ assert ufunc.reduce(a) == True
+ # check that the output has no effect:
+ out = np.zeros(2, dtype=np.int32)
+ expected = ufunc([True, True], True).astype(out.dtype)
+ assert_array_equal(ufunc(a, c, out=out), expected)
+ out = np.zeros((), dtype=np.int32)
+ assert ufunc.reduce(a, out=out) == True
+ # Last check, test reduction when out and a match (the complexity here
+ # is that the "i,i->?" may seem right, but should not match.
+ a = np.array([3], dtype="i")
+ out = np.zeros((), dtype=a.dtype)
+ assert ufunc.reduce(a, out=out) == 1
+
+ @pytest.mark.parametrize("ufunc",
+ [np.logical_and, np.logical_or, np.logical_xor])
+ def test_logical_ufuncs_reject_string(self, ufunc):
+ """
+ Logical ufuncs are normally well defined by working with the boolean
+ equivalent, i.e. casting all inputs to bools should work.
+
+ However, casting strings to bools is *currently* weird, because it
+ actually uses `bool(int(str))`. Thus we explicitly reject strings.
+ This test should succeed (and can probably just be removed) as soon as
+ string to bool casts are well defined in NumPy.
+ """
+ with pytest.raises(TypeError, match="contain a loop with signature"):
+ ufunc(["1"], ["3"])
+ with pytest.raises(TypeError, match="contain a loop with signature"):
+ ufunc.reduce(["1", "2", "0"])
+
+ @pytest.mark.parametrize("ufunc",
+ [np.logical_and, np.logical_or, np.logical_xor])
+ def test_logical_ufuncs_out_cast_check(self, ufunc):
+ a = np.array('1')
+ c = np.array([1., 2.])
+ out = a.copy()
+ with pytest.raises(TypeError):
+ # It would be safe, but not equiv casting:
+ ufunc(a, c, out=out, casting="equiv")
+
+ def test_reducelike_byteorder_resolution(self):
+ # See gh-20699, byte-order changes need some extra care in the type
+ # resolution to make the following succeed:
+ arr_be = np.arange(10, dtype=">i8")
+ arr_le = np.arange(10, dtype="<i8")
+
+ assert np.add.reduce(arr_be) == np.add.reduce(arr_le)
+ assert_array_equal(np.add.accumulate(arr_be), np.add.accumulate(arr_le))
+ assert_array_equal(
+ np.add.reduceat(arr_be, [1]), np.add.reduceat(arr_le, [1]))
+
+ def test_reducelike_out_promotes(self):
+ # Check that the out argument to reductions is considered for
+ # promotion. See also gh-20455.
+ # Note that these paths could prefer `initial=` in the future and
+ # do not up-cast to the default integer for add and prod
+ arr = np.ones(1000, dtype=np.uint8)
+ out = np.zeros((), dtype=np.uint16)
+ assert np.add.reduce(arr, out=out) == 1000
+ arr[:10] = 2
+ assert np.multiply.reduce(arr, out=out) == 2**10
+
+ # For legacy dtypes, the signature currently has to be forced if `out=`
+ # is passed. The two paths below should differ, without `dtype=` the
+ # expected result should be: `np.prod(arr.astype("f8")).astype("f4")`!
+ arr = np.full(5, 2**25-1, dtype=np.int64)
+
+ # float32 and int64 promote to float64:
+ res = np.zeros((), dtype=np.float32)
+ # If `dtype=` is passed, the calculation is forced to float32:
+ single_res = np.zeros((), dtype=np.float32)
+ np.multiply.reduce(arr, out=single_res, dtype=np.float32)
+ assert single_res != res
+
+ def test_reducelike_output_needs_identical_cast(self):
+ # Checks the case where the we have a simple byte-swap works, maily
+ # tests that this is not rejected directly.
+ # (interesting because we require descriptor identity in reducelikes).
+ arr = np.ones(20, dtype="f8")
+ out = np.empty((), dtype=arr.dtype.newbyteorder())
+ expected = np.add.reduce(arr)
+ np.add.reduce(arr, out=out)
+ assert_array_equal(expected, out)
+ # Check reduceat:
+ out = np.empty(2, dtype=arr.dtype.newbyteorder())
+ expected = np.add.reduceat(arr, [0, 1])
+ np.add.reduceat(arr, [0, 1], out=out)
+ assert_array_equal(expected, out)
+ # And accumulate:
+ out = np.empty(arr.shape, dtype=arr.dtype.newbyteorder())
+ expected = np.add.accumulate(arr)
+ np.add.accumulate(arr, out=out)
+ assert_array_equal(expected, out)
+
+ def test_reduce_noncontig_output(self):
+ # Check that reduction deals with non-contiguous output arrays
+ # appropriately.
+ #
+ # gh-8036
+
+ x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8)
+ x = x[4:6,1:11:6,1:5].transpose(1, 2, 0)
+ y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4)
+ y = y_base[::2,:]
+
+ y_base_copy = y_base.copy()
+
+ r0 = np.add.reduce(x, out=y.copy(), axis=2)
+ r1 = np.add.reduce(x, out=y, axis=2)
+
+ # The results should match, and y_base shouldn't get clobbered
+ assert_equal(r0, r1)
+ assert_equal(y_base[1,:], y_base_copy[1,:])
+ assert_equal(y_base[3,:], y_base_copy[3,:])
+
+ @pytest.mark.parametrize("with_cast", [True, False])
+ def test_reduceat_and_accumulate_out_shape_mismatch(self, with_cast):
+ # Should raise an error mentioning "shape" or "size"
+ arr = np.arange(5)
+ out = np.arange(3) # definitely wrong shape
+ if with_cast:
+ # If a cast is necessary on the output, we can be sure to use
+ # the generic NpyIter (non-fast) path.
+ out = out.astype(np.float64)
+
+ with pytest.raises(ValueError, match="(shape|size)"):
+ np.add.reduceat(arr, [0, 3], out=out)
+
+ with pytest.raises(ValueError, match="(shape|size)"):
+ np.add.accumulate(arr, out=out)
+
+ @pytest.mark.parametrize('out_shape',
+ [(), (1,), (3,), (1, 1), (1, 3), (4, 3)])
+ @pytest.mark.parametrize('keepdims', [True, False])
+ @pytest.mark.parametrize('f_reduce', [np.add.reduce, np.minimum.reduce])
+ def test_reduce_wrong_dimension_output(self, f_reduce, keepdims, out_shape):
+ # Test that we're not incorrectly broadcasting dimensions.
+ # See gh-15144 (failed for np.add.reduce previously).
+ a = np.arange(12.).reshape(4, 3)
+ out = np.empty(out_shape, a.dtype)
+
+ correct_out = f_reduce(a, axis=0, keepdims=keepdims)
+ if out_shape != correct_out.shape:
+ with assert_raises(ValueError):
+ f_reduce(a, axis=0, out=out, keepdims=keepdims)
+ else:
+ check = f_reduce(a, axis=0, out=out, keepdims=keepdims)
+ assert_(check is out)
+ assert_array_equal(check, correct_out)
+
+ def test_reduce_output_does_not_broadcast_input(self):
+ # Test that the output shape cannot broadcast an input dimension
+ # (it never can add dimensions, but it might expand an existing one)
+ a = np.ones((1, 10))
+ out_correct = (np.empty((1, 1)))
+ out_incorrect = np.empty((3, 1))
+ np.add.reduce(a, axis=-1, out=out_correct, keepdims=True)
+ np.add.reduce(a, axis=-1, out=out_correct[:, 0], keepdims=False)
+ with assert_raises(ValueError):
+ np.add.reduce(a, axis=-1, out=out_incorrect, keepdims=True)
+ with assert_raises(ValueError):
+ np.add.reduce(a, axis=-1, out=out_incorrect[:, 0], keepdims=False)
+
+ def test_reduce_output_subclass_ok(self):
+ class MyArr(np.ndarray):
+ pass
+
+ out = np.empty(())
+ np.add.reduce(np.ones(5), out=out) # no subclass, all fine
+ out = out.view(MyArr)
+ assert np.add.reduce(np.ones(5), out=out) is out
+ assert type(np.add.reduce(out)) is MyArr
+
+ def test_no_doc_string(self):
+ # gh-9337
+ assert_('\n' not in umt.inner1d_no_doc.__doc__)
+
+ def test_invalid_args(self):
+ # gh-7961
+ exc = pytest.raises(TypeError, np.sqrt, None)
+ # minimally check the exception text
+ assert exc.match('loop of ufunc does not support')
+
+ @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
+ def test_nat_is_not_finite(self, nat):
+ try:
+ assert not np.isfinite(nat)
+ except TypeError:
+ pass # ok, just not implemented
+
+ @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
+ def test_nat_is_nan(self, nat):
+ try:
+ assert np.isnan(nat)
+ except TypeError:
+ pass # ok, just not implemented
+
+ @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
+ def test_nat_is_not_inf(self, nat):
+ try:
+ assert not np.isinf(nat)
+ except TypeError:
+ pass # ok, just not implemented
+
+
+@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
+ if isinstance(getattr(np, x), np.ufunc)])
+def test_ufunc_types(ufunc):
+ '''
+ Check all ufuncs that the correct type is returned. Avoid
+ object and boolean types since many operations are not defined for
+ for them.
+
+ Choose the shape so even dot and matmul will succeed
+ '''
+ for typ in ufunc.types:
+ # types is a list of strings like ii->i
+ if 'O' in typ or '?' in typ:
+ continue
+ inp, out = typ.split('->')
+ args = [np.ones((3, 3), t) for t in inp]
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings("always")
+ res = ufunc(*args)
+ if isinstance(res, tuple):
+ outs = tuple(out)
+ assert len(res) == len(outs)
+ for r, t in zip(res, outs):
+ assert r.dtype == np.dtype(t)
+ else:
+ assert res.dtype == np.dtype(out)
+
+@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
+ if isinstance(getattr(np, x), np.ufunc)])
+@np._no_nep50_warning()
+def test_ufunc_noncontiguous(ufunc):
+ '''
+ Check that contiguous and non-contiguous calls to ufuncs
+ have the same results for values in range(9)
+ '''
+ for typ in ufunc.types:
+ # types is a list of strings like ii->i
+ if any(set('O?mM') & set(typ)):
+ # bool, object, datetime are too irregular for this simple test
+ continue
+ inp, out = typ.split('->')
+ args_c = [np.empty(6, t) for t in inp]
+ args_n = [np.empty(18, t)[::3] for t in inp]
+ for a in args_c:
+ a.flat = range(1,7)
+ for a in args_n:
+ a.flat = range(1,7)
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings("always")
+ res_c = ufunc(*args_c)
+ res_n = ufunc(*args_n)
+ if len(out) == 1:
+ res_c = (res_c,)
+ res_n = (res_n,)
+ for c_ar, n_ar in zip(res_c, res_n):
+ dt = c_ar.dtype
+ if np.issubdtype(dt, np.floating):
+ # for floating point results allow a small fuss in comparisons
+ # since different algorithms (libm vs. intrinsics) can be used
+ # for different input strides
+ res_eps = np.finfo(dt).eps
+ tol = 2*res_eps
+ assert_allclose(res_c, res_n, atol=tol, rtol=tol)
+ else:
+ assert_equal(c_ar, n_ar)
+
+
+@pytest.mark.parametrize('ufunc', [np.sign, np.equal])
+def test_ufunc_warn_with_nan(ufunc):
+ # issue gh-15127
+ # test that calling certain ufuncs with a non-standard `nan` value does not
+ # emit a warning
+ # `b` holds a 64 bit signaling nan: the most significant bit of the
+ # significand is zero.
+ b = np.array([0x7ff0000000000001], 'i8').view('f8')
+ assert np.isnan(b)
+ if ufunc.nin == 1:
+ ufunc(b)
+ elif ufunc.nin == 2:
+ ufunc(b, b.copy())
+ else:
+ raise ValueError('ufunc with more than 2 inputs')
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_ufunc_out_casterrors():
+ # Tests that casting errors are correctly reported and buffers are
+ # cleared.
+ # The following array can be added to itself as an object array, but
+ # the result cannot be cast to an integer output:
+ value = 123 # relies on python cache (leak-check will still find it)
+ arr = np.array([value] * int(np.BUFSIZE * 1.5) +
+ ["string"] +
+ [value] * int(1.5 * np.BUFSIZE), dtype=object)
+ out = np.ones(len(arr), dtype=np.intp)
+
+ count = sys.getrefcount(value)
+ with pytest.raises(ValueError):
+ # Output casting failure:
+ np.add(arr, arr, out=out, casting="unsafe")
+
+ assert count == sys.getrefcount(value)
+ # output is unchanged after the error, this shows that the iteration
+ # was aborted (this is not necessarily defined behaviour)
+ assert out[-1] == 1
+
+ with pytest.raises(ValueError):
+ # Input casting failure:
+ np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe")
+
+ assert count == sys.getrefcount(value)
+ # output is unchanged after the error, this shows that the iteration
+ # was aborted (this is not necessarily defined behaviour)
+ assert out[-1] == 1
+
+
+@pytest.mark.parametrize("bad_offset", [0, int(np.BUFSIZE * 1.5)])
+def test_ufunc_input_casterrors(bad_offset):
+ value = 123
+ arr = np.array([value] * bad_offset +
+ ["string"] +
+ [value] * int(1.5 * np.BUFSIZE), dtype=object)
+ with pytest.raises(ValueError):
+ # Force cast inputs, but the buffered cast of `arr` to intp fails:
+ np.add(arr, arr, dtype=np.intp, casting="unsafe")
+
+
+@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+@pytest.mark.parametrize("bad_offset", [0, int(np.BUFSIZE * 1.5)])
+def test_ufunc_input_floatingpoint_error(bad_offset):
+ value = 123
+ arr = np.array([value] * bad_offset +
+ [np.nan] +
+ [value] * int(1.5 * np.BUFSIZE))
+ with np.errstate(invalid="raise"), pytest.raises(FloatingPointError):
+ # Force cast inputs, but the buffered cast of `arr` to intp fails:
+ np.add(arr, arr, dtype=np.intp, casting="unsafe")
+
+
+def test_trivial_loop_invalid_cast():
+ # This tests the fast-path "invalid cast", see gh-19904.
+ with pytest.raises(TypeError,
+ match="cast ufunc 'add' input 0"):
+ # the void dtype definitely cannot cast to double:
+ np.add(np.array(1, "i,i"), 3, signature="dd->d")
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+@pytest.mark.parametrize("offset",
+ [0, np.BUFSIZE//2, int(1.5*np.BUFSIZE)])
+def test_reduce_casterrors(offset):
+ # Test reporting of casting errors in reductions, we test various
+ # offsets to where the casting error will occur, since these may occur
+ # at different places during the reduction procedure. For example
+ # the first item may be special.
+ value = 123 # relies on python cache (leak-check will still find it)
+ arr = np.array([value] * offset +
+ ["string"] +
+ [value] * int(1.5 * np.BUFSIZE), dtype=object)
+ out = np.array(-1, dtype=np.intp)
+
+ count = sys.getrefcount(value)
+ with pytest.raises(ValueError, match="invalid literal"):
+ # This is an unsafe cast, but we currently always allow that.
+ # Note that the double loop is picked, but the cast fails.
+ np.add.reduce(arr, dtype=np.intp, out=out)
+ assert count == sys.getrefcount(value)
+ # If an error occurred during casting, the operation is done at most until
+ # the error occurs (the result of which would be `value * offset`) and -1
+ # if the error happened immediately.
+ # This does not define behaviour, the output is invalid and thus undefined
+ assert out[()] < value * offset
+
+
+@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+@pytest.mark.parametrize("method",
+ [np.add.accumulate, np.add.reduce,
+ pytest.param(lambda x: np.add.reduceat(x, [0]), id="reduceat"),
+ pytest.param(lambda x: np.log.at(x, [2]), id="at")])
+def test_ufunc_methods_floaterrors(method):
+ # adding inf and -inf (or log(-inf) creates an invalid float and warns
+ arr = np.array([np.inf, 0, -np.inf])
+ with np.errstate(all="warn"):
+ with pytest.warns(RuntimeWarning, match="invalid value"):
+ method(arr)
+
+ arr = np.array([np.inf, 0, -np.inf])
+ with np.errstate(all="raise"):
+ with pytest.raises(FloatingPointError):
+ method(arr)
+
+
+def _check_neg_zero(value):
+ if value != 0.0:
+ return False
+ if not np.signbit(value.real):
+ return False
+ if value.dtype.kind == "c":
+ return np.signbit(value.imag)
+ return True
+
+@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+def test_addition_negative_zero(dtype):
+ dtype = np.dtype(dtype)
+ if dtype.kind == "c":
+ neg_zero = dtype.type(complex(-0.0, -0.0))
+ else:
+ neg_zero = dtype.type(-0.0)
+
+ arr = np.array(neg_zero)
+ arr2 = np.array(neg_zero)
+
+ assert _check_neg_zero(arr + arr2)
+ # In-place ops may end up on a different path (reduce path) see gh-21211
+ arr += arr2
+ assert _check_neg_zero(arr)
+
+
+@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+@pytest.mark.parametrize("use_initial", [True, False])
+def test_addition_reduce_negative_zero(dtype, use_initial):
+ dtype = np.dtype(dtype)
+ if dtype.kind == "c":
+ neg_zero = dtype.type(complex(-0.0, -0.0))
+ else:
+ neg_zero = dtype.type(-0.0)
+
+ kwargs = {}
+ if use_initial:
+ kwargs["initial"] = neg_zero
+ else:
+ pytest.xfail("-0. propagation in sum currently requires initial")
+
+ # Test various length, in case SIMD paths or chunking play a role.
+ # 150 extends beyond the pairwise blocksize; probably not important.
+ for i in range(0, 150):
+ arr = np.array([neg_zero] * i, dtype=dtype)
+ res = np.sum(arr, **kwargs)
+ if i > 0 or use_initial:
+ assert _check_neg_zero(res)
+ else:
+ # `sum([])` should probably be 0.0 and not -0.0 like `sum([-0.0])`
+ assert not np.signbit(res.real)
+ assert not np.signbit(res.imag)
+
+class TestLowlevelAPIAccess:
+ def test_resolve_dtypes_basic(self):
+ # Basic test for dtype resolution:
+ i4 = np.dtype("i4")
+ f4 = np.dtype("f4")
+ f8 = np.dtype("f8")
+
+ r = np.add.resolve_dtypes((i4, f4, None))
+ assert r == (f8, f8, f8)
+
+ # Signature uses the same logic to parse as ufunc (less strict)
+ # the following is "same-kind" casting so works:
+ r = np.add.resolve_dtypes((
+ i4, i4, None), signature=(None, None, "f4"))
+ assert r == (f4, f4, f4)
+
+ # Check NEP 50 "weak" promotion also:
+ r = np.add.resolve_dtypes((f4, int, None))
+ assert r == (f4, f4, f4)
+
+ with pytest.raises(TypeError):
+ np.add.resolve_dtypes((i4, f4, None), casting="no")
+
+ def test_weird_dtypes(self):
+ S0 = np.dtype("S0")
+ # S0 is often converted by NumPy to S1, but not here:
+ r = np.equal.resolve_dtypes((S0, S0, None))
+ assert r == (S0, S0, np.dtype(bool))
+
+ # Subarray dtypes are weird and only really exist nested, they need
+ # the shift to full NEP 50 to be implemented nicely:
+ dts = np.dtype("10i")
+ with pytest.raises(NotImplementedError):
+ np.equal.resolve_dtypes((dts, dts, None))
+
+ def test_resolve_dtypes_reduction(self):
+ i4 = np.dtype("i4")
+ with pytest.raises(NotImplementedError):
+ np.add.resolve_dtypes((i4, i4, i4), reduction=True)
+
+ @pytest.mark.parametrize("dtypes", [
+ (np.dtype("i"), np.dtype("i")),
+ (None, np.dtype("i"), np.dtype("f")),
+ (np.dtype("i"), None, np.dtype("f")),
+ ("i4", "i4", None)])
+ def test_resolve_dtypes_errors(self, dtypes):
+ with pytest.raises(TypeError):
+ np.add.resolve_dtypes(dtypes)
+
+ def test_resolve_dtypes_reduction(self):
+ i2 = np.dtype("i2")
+ long_ = np.dtype("long")
+ # Check special addition resolution:
+ res = np.add.resolve_dtypes((None, i2, None), reduction=True)
+ assert res == (long_, long_, long_)
+
+ def test_resolve_dtypes_reduction_errors(self):
+ i2 = np.dtype("i2")
+
+ with pytest.raises(TypeError):
+ np.add.resolve_dtypes((None, i2, i2))
+
+ with pytest.raises(TypeError):
+ np.add.signature((None, None, "i4"))
+
+ @pytest.mark.skipif(not hasattr(ct, "pythonapi"),
+ reason="`ctypes.pythonapi` required for capsule unpacking.")
+ def test_loop_access(self):
+ # This is a basic test for the full strided loop access
+ data_t = ct.ARRAY(ct.c_char_p, 2)
+ dim_t = ct.ARRAY(ct.c_ssize_t, 1)
+ strides_t = ct.ARRAY(ct.c_ssize_t, 2)
+ strided_loop_t = ct.CFUNCTYPE(
+ ct.c_int, ct.c_void_p, data_t, dim_t, strides_t, ct.c_void_p)
+
+ class call_info_t(ct.Structure):
+ _fields_ = [
+ ("strided_loop", strided_loop_t),
+ ("context", ct.c_void_p),
+ ("auxdata", ct.c_void_p),
+ ("requires_pyapi", ct.c_byte),
+ ("no_floatingpoint_errors", ct.c_byte),
+ ]
+
+ i4 = np.dtype("i4")
+ dt, call_info_obj = np.negative._resolve_dtypes_and_context((i4, i4))
+ assert dt == (i4, i4) # can be used without casting
+
+ # Fill in the rest of the information:
+ np.negative._get_strided_loop(call_info_obj)
+
+ ct.pythonapi.PyCapsule_GetPointer.restype = ct.c_void_p
+ call_info = ct.pythonapi.PyCapsule_GetPointer(
+ ct.py_object(call_info_obj),
+ ct.c_char_p(b"numpy_1.24_ufunc_call_info"))
+
+ call_info = ct.cast(call_info, ct.POINTER(call_info_t)).contents
+
+ arr = np.arange(10, dtype=i4)
+ call_info.strided_loop(
+ call_info.context,
+ data_t(arr.ctypes.data, arr.ctypes.data),
+ arr.ctypes.shape, # is a C-array with 10 here
+ strides_t(arr.ctypes.strides[0], arr.ctypes.strides[0]),
+ call_info.auxdata)
+
+ # We just directly called the negative inner-loop in-place:
+ assert_array_equal(arr, -np.arange(10, dtype=i4))
+
+ @pytest.mark.parametrize("strides", [1, (1, 2, 3), (1, "2")])
+ def test__get_strided_loop_errors_bad_strides(self, strides):
+ i4 = np.dtype("i4")
+ dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4))
+
+ with pytest.raises(TypeError, match="fixed_strides.*tuple.*or None"):
+ np.negative._get_strided_loop(call_info, fixed_strides=strides)
+
+ def test__get_strided_loop_errors_bad_call_info(self):
+ i4 = np.dtype("i4")
+ dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4))
+
+ with pytest.raises(ValueError, match="PyCapsule"):
+ np.negative._get_strided_loop("not the capsule!")
+
+ with pytest.raises(TypeError, match=".*incompatible context"):
+ np.add._get_strided_loop(call_info)
+
+ np.negative._get_strided_loop(call_info)
+ with pytest.raises(TypeError):
+ # cannot call it a second time:
+ np.negative._get_strided_loop(call_info)
+
+ def test_long_arrays(self):
+ t = np.zeros((1029, 917), dtype=np.single)
+ t[0][0] = 1
+ t[28][414] = 1
+ tc = np.cos(t)
+ assert_equal(tc[0][0], tc[28][414])
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_umath.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_umath.py
new file mode 100644
index 00000000..03bed0ab
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_umath.py
@@ -0,0 +1,4419 @@
+import platform
+import warnings
+import fnmatch
+import itertools
+import pytest
+import sys
+import os
+import operator
+from fractions import Fraction
+from functools import reduce
+from collections import namedtuple
+
+import numpy.core.umath as ncu
+from numpy.core import _umath_tests as ncu_tests
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_raises_regex,
+ assert_array_equal, assert_almost_equal, assert_array_almost_equal,
+ assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings,
+ _gen_alignment_data, assert_array_almost_equal_nulp, IS_WASM
+ )
+from numpy.testing._private.utils import _glibc_older_than
+
+UFUNCS = [obj for obj in np.core.umath.__dict__.values()
+ if isinstance(obj, np.ufunc)]
+
+UFUNCS_UNARY = [
+ uf for uf in UFUNCS if uf.nin == 1
+]
+UFUNCS_UNARY_FP = [
+ uf for uf in UFUNCS_UNARY if 'f->f' in uf.types
+]
+
+UFUNCS_BINARY = [
+ uf for uf in UFUNCS if uf.nin == 2
+]
+UFUNCS_BINARY_ACC = [
+ uf for uf in UFUNCS_BINARY if hasattr(uf, "accumulate") and uf.nout == 1
+]
+
+def interesting_binop_operands(val1, val2, dtype):
+ """
+ Helper to create "interesting" operands to cover common code paths:
+ * scalar inputs
+ * only first "values" is an array (e.g. scalar division fast-paths)
+ * Longer array (SIMD) placing the value of interest at different positions
+ * Oddly strided arrays which may not be SIMD compatible
+
+ It does not attempt to cover unaligned access or mixed dtypes.
+ These are normally handled by the casting/buffering machinery.
+
+ This is not a fixture (currently), since I believe a fixture normally
+ only yields once?
+ """
+ fill_value = 1 # could be a parameter, but maybe not an optional one?
+
+ arr1 = np.full(10003, dtype=dtype, fill_value=fill_value)
+ arr2 = np.full(10003, dtype=dtype, fill_value=fill_value)
+
+ arr1[0] = val1
+ arr2[0] = val2
+
+ extractor = lambda res: res
+ yield arr1[0], arr2[0], extractor, "scalars"
+
+ extractor = lambda res: res
+ yield arr1[0, ...], arr2[0, ...], extractor, "scalar-arrays"
+
+ # reset array values to fill_value:
+ arr1[0] = fill_value
+ arr2[0] = fill_value
+
+ for pos in [0, 1, 2, 3, 4, 5, -1, -2, -3, -4]:
+ arr1[pos] = val1
+ arr2[pos] = val2
+
+ extractor = lambda res: res[pos]
+ yield arr1, arr2, extractor, f"off-{pos}"
+ yield arr1, arr2[pos], extractor, f"off-{pos}-with-scalar"
+
+ arr1[pos] = fill_value
+ arr2[pos] = fill_value
+
+ for stride in [-1, 113]:
+ op1 = arr1[::stride]
+ op2 = arr2[::stride]
+ op1[10] = val1
+ op2[10] = val2
+
+ extractor = lambda res: res[10]
+ yield op1, op2, extractor, f"stride-{stride}"
+
+ op1[10] = fill_value
+ op2[10] = fill_value
+
+
+def on_powerpc():
+ """ True if we are running on a Power PC platform."""
+ return platform.processor() == 'powerpc' or \
+ platform.machine().startswith('ppc')
+
+
+def bad_arcsinh():
+ """The blocklisted trig functions are not accurate on aarch64/PPC for
+ complex256. Rather than dig through the actual problem skip the
+ test. This should be fixed when we can move past glibc2.17
+ which is the version in manylinux2014
+ """
+ if platform.machine() == 'aarch64':
+ x = 1.78e-10
+ elif on_powerpc():
+ x = 2.16e-10
+ else:
+ return False
+ v1 = np.arcsinh(np.float128(x))
+ v2 = np.arcsinh(np.complex256(x)).real
+ # The eps for float128 is 1-e33, so this is way bigger
+ return abs((v1 / v2) - 1.0) > 1e-23
+
+
+class _FilterInvalids:
+ def setup_method(self):
+ self.olderr = np.seterr(invalid='ignore')
+
+ def teardown_method(self):
+ np.seterr(**self.olderr)
+
+
+class TestConstants:
+ def test_pi(self):
+ assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
+
+ def test_e(self):
+ assert_allclose(ncu.e, 2.718281828459045, 1e-15)
+
+ def test_euler_gamma(self):
+ assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
+
+
+class TestOut:
+ def test_out_subok(self):
+ for subok in (True, False):
+ a = np.array(0.5)
+ o = np.empty(())
+
+ r = np.add(a, 2, o, subok=subok)
+ assert_(r is o)
+ r = np.add(a, 2, out=o, subok=subok)
+ assert_(r is o)
+ r = np.add(a, 2, out=(o,), subok=subok)
+ assert_(r is o)
+
+ d = np.array(5.7)
+ o1 = np.empty(())
+ o2 = np.empty((), dtype=np.int32)
+
+ r1, r2 = np.frexp(d, o1, None, subok=subok)
+ assert_(r1 is o1)
+ r1, r2 = np.frexp(d, None, o2, subok=subok)
+ assert_(r2 is o2)
+ r1, r2 = np.frexp(d, o1, o2, subok=subok)
+ assert_(r1 is o1)
+ assert_(r2 is o2)
+
+ r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
+ assert_(r1 is o1)
+ r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
+ assert_(r2 is o2)
+ r1, r2 = np.frexp(d, out=(o1, o2), subok=subok)
+ assert_(r1 is o1)
+ assert_(r2 is o2)
+
+ with assert_raises(TypeError):
+ # Out argument must be tuple, since there are multiple outputs.
+ r1, r2 = np.frexp(d, out=o1, subok=subok)
+
+ assert_raises(TypeError, np.add, a, 2, o, o, subok=subok)
+ assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok)
+ assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok)
+ assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok)
+ assert_raises(ValueError, np.add, a, 2, out=(), subok=subok)
+ assert_raises(TypeError, np.add, a, 2, [], subok=subok)
+ assert_raises(TypeError, np.add, a, 2, out=[], subok=subok)
+ assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok)
+ o.flags.writeable = False
+ assert_raises(ValueError, np.add, a, 2, o, subok=subok)
+ assert_raises(ValueError, np.add, a, 2, out=o, subok=subok)
+ assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok)
+
+ def test_out_wrap_subok(self):
+ class ArrayWrap(np.ndarray):
+ __array_priority__ = 10
+
+ def __new__(cls, arr):
+ return np.asarray(arr).view(cls).copy()
+
+ def __array_wrap__(self, arr, context):
+ return arr.view(type(self))
+
+ for subok in (True, False):
+ a = ArrayWrap([0.5])
+
+ r = np.add(a, 2, subok=subok)
+ if subok:
+ assert_(isinstance(r, ArrayWrap))
+ else:
+ assert_(type(r) == np.ndarray)
+
+ r = np.add(a, 2, None, subok=subok)
+ if subok:
+ assert_(isinstance(r, ArrayWrap))
+ else:
+ assert_(type(r) == np.ndarray)
+
+ r = np.add(a, 2, out=None, subok=subok)
+ if subok:
+ assert_(isinstance(r, ArrayWrap))
+ else:
+ assert_(type(r) == np.ndarray)
+
+ r = np.add(a, 2, out=(None,), subok=subok)
+ if subok:
+ assert_(isinstance(r, ArrayWrap))
+ else:
+ assert_(type(r) == np.ndarray)
+
+ d = ArrayWrap([5.7])
+ o1 = np.empty((1,))
+ o2 = np.empty((1,), dtype=np.int32)
+
+ r1, r2 = np.frexp(d, o1, subok=subok)
+ if subok:
+ assert_(isinstance(r2, ArrayWrap))
+ else:
+ assert_(type(r2) == np.ndarray)
+
+ r1, r2 = np.frexp(d, o1, None, subok=subok)
+ if subok:
+ assert_(isinstance(r2, ArrayWrap))
+ else:
+ assert_(type(r2) == np.ndarray)
+
+ r1, r2 = np.frexp(d, None, o2, subok=subok)
+ if subok:
+ assert_(isinstance(r1, ArrayWrap))
+ else:
+ assert_(type(r1) == np.ndarray)
+
+ r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
+ if subok:
+ assert_(isinstance(r2, ArrayWrap))
+ else:
+ assert_(type(r2) == np.ndarray)
+
+ r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
+ if subok:
+ assert_(isinstance(r1, ArrayWrap))
+ else:
+ assert_(type(r1) == np.ndarray)
+
+ with assert_raises(TypeError):
+ # Out argument must be tuple, since there are multiple outputs.
+ r1, r2 = np.frexp(d, out=o1, subok=subok)
+
+
+class TestComparisons:
+ import operator
+
+ @pytest.mark.parametrize('dtype', np.sctypes['uint'] + np.sctypes['int'] +
+ np.sctypes['float'] + [np.bool_])
+ @pytest.mark.parametrize('py_comp,np_comp', [
+ (operator.lt, np.less),
+ (operator.le, np.less_equal),
+ (operator.gt, np.greater),
+ (operator.ge, np.greater_equal),
+ (operator.eq, np.equal),
+ (operator.ne, np.not_equal)
+ ])
+ def test_comparison_functions(self, dtype, py_comp, np_comp):
+ # Initialize input arrays
+ if dtype == np.bool_:
+ a = np.random.choice(a=[False, True], size=1000)
+ b = np.random.choice(a=[False, True], size=1000)
+ scalar = True
+ else:
+ a = np.random.randint(low=1, high=10, size=1000).astype(dtype)
+ b = np.random.randint(low=1, high=10, size=1000).astype(dtype)
+ scalar = 5
+ np_scalar = np.dtype(dtype).type(scalar)
+ a_lst = a.tolist()
+ b_lst = b.tolist()
+
+ # (Binary) Comparison (x1=array, x2=array)
+ comp_b = np_comp(a, b).view(np.uint8)
+ comp_b_list = [int(py_comp(x, y)) for x, y in zip(a_lst, b_lst)]
+
+ # (Scalar1) Comparison (x1=scalar, x2=array)
+ comp_s1 = np_comp(np_scalar, b).view(np.uint8)
+ comp_s1_list = [int(py_comp(scalar, x)) for x in b_lst]
+
+ # (Scalar2) Comparison (x1=array, x2=scalar)
+ comp_s2 = np_comp(a, np_scalar).view(np.uint8)
+ comp_s2_list = [int(py_comp(x, scalar)) for x in a_lst]
+
+ # Sequence: Binary, Scalar1 and Scalar2
+ assert_(comp_b.tolist() == comp_b_list,
+ f"Failed comparison ({py_comp.__name__})")
+ assert_(comp_s1.tolist() == comp_s1_list,
+ f"Failed comparison ({py_comp.__name__})")
+ assert_(comp_s2.tolist() == comp_s2_list,
+ f"Failed comparison ({py_comp.__name__})")
+
+ def test_ignore_object_identity_in_equal(self):
+ # Check comparing identical objects whose comparison
+ # is not a simple boolean, e.g., arrays that are compared elementwise.
+ a = np.array([np.array([1, 2, 3]), None], dtype=object)
+ assert_raises(ValueError, np.equal, a, a)
+
+ # Check error raised when comparing identical non-comparable objects.
+ class FunkyType:
+ def __eq__(self, other):
+ raise TypeError("I won't compare")
+
+ a = np.array([FunkyType()])
+ assert_raises(TypeError, np.equal, a, a)
+
+ # Check identity doesn't override comparison mismatch.
+ a = np.array([np.nan], dtype=object)
+ assert_equal(np.equal(a, a), [False])
+
+ def test_ignore_object_identity_in_not_equal(self):
+ # Check comparing identical objects whose comparison
+ # is not a simple boolean, e.g., arrays that are compared elementwise.
+ a = np.array([np.array([1, 2, 3]), None], dtype=object)
+ assert_raises(ValueError, np.not_equal, a, a)
+
+ # Check error raised when comparing identical non-comparable objects.
+ class FunkyType:
+ def __ne__(self, other):
+ raise TypeError("I won't compare")
+
+ a = np.array([FunkyType()])
+ assert_raises(TypeError, np.not_equal, a, a)
+
+ # Check identity doesn't override comparison mismatch.
+ a = np.array([np.nan], dtype=object)
+ assert_equal(np.not_equal(a, a), [True])
+
+ def test_error_in_equal_reduce(self):
+ # gh-20929
+ # make sure np.equal.reduce raises a TypeError if an array is passed
+ # without specifying the dtype
+ a = np.array([0, 0])
+ assert_equal(np.equal.reduce(a, dtype=bool), True)
+ assert_raises(TypeError, np.equal.reduce, a)
+
+ def test_object_dtype(self):
+ assert np.equal(1, [1], dtype=object).dtype == object
+ assert np.equal(1, [1], signature=(None, None, "O")).dtype == object
+
+ def test_object_nonbool_dtype_error(self):
+ # bool output dtype is fine of course:
+ assert np.equal(1, [1], dtype=bool).dtype == bool
+
+ # but the following are examples do not have a loop:
+ with pytest.raises(TypeError, match="No loop matching"):
+ np.equal(1, 1, dtype=np.int64)
+
+ with pytest.raises(TypeError, match="No loop matching"):
+ np.equal(1, 1, sig=(None, None, "l"))
+
+
+class TestAdd:
+ def test_reduce_alignment(self):
+ # gh-9876
+ # make sure arrays with weird strides work with the optimizations in
+ # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a
+ # 4 byte offset, even though its itemsize is 8.
+ a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)])
+ a['a'] = -1
+ assert_equal(a['b'].sum(), 0)
+
+
+class TestDivision:
+ def test_division_int(self):
+ # int division should follow Python
+ x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
+ if 5 / 10 == 0.5:
+ assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
+ -0.05, -0.1, -0.9, -1, -1.2])
+ else:
+ assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
+ assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
+ assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ @pytest.mark.parametrize("dtype,ex_val", itertools.product(
+ np.sctypes['int'] + np.sctypes['uint'], (
+ (
+ # dividend
+ "np.array(range(fo.max-lsize, fo.max)).astype(dtype),"
+ # divisors
+ "np.arange(lsize).astype(dtype),"
+ # scalar divisors
+ "range(15)"
+ ),
+ (
+ # dividend
+ "np.arange(fo.min, fo.min+lsize).astype(dtype),"
+ # divisors
+ "np.arange(lsize//-2, lsize//2).astype(dtype),"
+ # scalar divisors
+ "range(fo.min, fo.min + 15)"
+ ), (
+ # dividend
+ "np.array(range(fo.max-lsize, fo.max)).astype(dtype),"
+ # divisors
+ "np.arange(lsize).astype(dtype),"
+ # scalar divisors
+ "[1,3,9,13,neg, fo.min+1, fo.min//2, fo.max//3, fo.max//4]"
+ )
+ )
+ ))
+ def test_division_int_boundary(self, dtype, ex_val):
+ fo = np.iinfo(dtype)
+ neg = -1 if fo.min < 0 else 1
+ # Large enough to test SIMD loops and remaind elements
+ lsize = 512 + 7
+ a, b, divisors = eval(ex_val)
+ a_lst, b_lst = a.tolist(), b.tolist()
+
+ c_div = lambda n, d: (
+ 0 if d == 0 else (
+ fo.min if (n and n == fo.min and d == -1) else n//d
+ )
+ )
+ with np.errstate(divide='ignore'):
+ ac = a.copy()
+ ac //= b
+ div_ab = a // b
+ div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)]
+
+ msg = "Integer arrays floor division check (//)"
+ assert all(div_ab == div_lst), msg
+ msg_eq = "Integer arrays floor division check (//=)"
+ assert all(ac == div_lst), msg_eq
+
+ for divisor in divisors:
+ ac = a.copy()
+ with np.errstate(divide='ignore', over='ignore'):
+ div_a = a // divisor
+ ac //= divisor
+ div_lst = [c_div(i, divisor) for i in a_lst]
+
+ assert all(div_a == div_lst), msg
+ assert all(ac == div_lst), msg_eq
+
+ with np.errstate(divide='raise', over='raise'):
+ if 0 in b:
+ # Verify overflow case
+ with pytest.raises(FloatingPointError,
+ match="divide by zero encountered in floor_divide"):
+ a // b
+ else:
+ a // b
+ if fo.min and fo.min in a:
+ with pytest.raises(FloatingPointError,
+ match='overflow encountered in floor_divide'):
+ a // -1
+ elif fo.min:
+ a // -1
+ with pytest.raises(FloatingPointError,
+ match="divide by zero encountered in floor_divide"):
+ a // 0
+ with pytest.raises(FloatingPointError,
+ match="divide by zero encountered in floor_divide"):
+ ac = a.copy()
+ ac //= 0
+
+ np.array([], dtype=dtype) // 0
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ @pytest.mark.parametrize("dtype,ex_val", itertools.product(
+ np.sctypes['int'] + np.sctypes['uint'], (
+ "np.array([fo.max, 1, 2, 1, 1, 2, 3], dtype=dtype)",
+ "np.array([fo.min, 1, -2, 1, 1, 2, -3]).astype(dtype)",
+ "np.arange(fo.min, fo.min+(100*10), 10, dtype=dtype)",
+ "np.array(range(fo.max-(100*7), fo.max, 7)).astype(dtype)",
+ )
+ ))
+ def test_division_int_reduce(self, dtype, ex_val):
+ fo = np.iinfo(dtype)
+ a = eval(ex_val)
+ lst = a.tolist()
+ c_div = lambda n, d: (
+ 0 if d == 0 or (n and n == fo.min and d == -1) else n//d
+ )
+
+ with np.errstate(divide='ignore'):
+ div_a = np.floor_divide.reduce(a)
+ div_lst = reduce(c_div, lst)
+ msg = "Reduce floor integer division check"
+ assert div_a == div_lst, msg
+
+ with np.errstate(divide='raise', over='raise'):
+ with pytest.raises(FloatingPointError,
+ match="divide by zero encountered in reduce"):
+ np.floor_divide.reduce(np.arange(-100, 100).astype(dtype))
+ if fo.min:
+ with pytest.raises(FloatingPointError,
+ match='overflow encountered in reduce'):
+ np.floor_divide.reduce(
+ np.array([fo.min, 1, -1], dtype=dtype)
+ )
+
+ @pytest.mark.parametrize(
+ "dividend,divisor,quotient",
+ [(np.timedelta64(2,'Y'), np.timedelta64(2,'M'), 12),
+ (np.timedelta64(2,'Y'), np.timedelta64(-2,'M'), -12),
+ (np.timedelta64(-2,'Y'), np.timedelta64(2,'M'), -12),
+ (np.timedelta64(-2,'Y'), np.timedelta64(-2,'M'), 12),
+ (np.timedelta64(2,'M'), np.timedelta64(-2,'Y'), -1),
+ (np.timedelta64(2,'Y'), np.timedelta64(0,'M'), 0),
+ (np.timedelta64(2,'Y'), 2, np.timedelta64(1,'Y')),
+ (np.timedelta64(2,'Y'), -2, np.timedelta64(-1,'Y')),
+ (np.timedelta64(-2,'Y'), 2, np.timedelta64(-1,'Y')),
+ (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')),
+ (np.timedelta64(-2,'Y'), -2, np.timedelta64(1,'Y')),
+ (np.timedelta64(-2,'Y'), -3, np.timedelta64(0,'Y')),
+ (np.timedelta64(-2,'Y'), 0, np.timedelta64('Nat','Y')),
+ ])
+ def test_division_int_timedelta(self, dividend, divisor, quotient):
+ # If either divisor is 0 or quotient is Nat, check for division by 0
+ if divisor and (isinstance(quotient, int) or not np.isnat(quotient)):
+ msg = "Timedelta floor division check"
+ assert dividend // divisor == quotient, msg
+
+ # Test for arrays as well
+ msg = "Timedelta arrays floor division check"
+ dividend_array = np.array([dividend]*5)
+ quotient_array = np.array([quotient]*5)
+ assert all(dividend_array // divisor == quotient_array), msg
+ else:
+ if IS_WASM:
+ pytest.skip("fp errors don't work in wasm")
+ with np.errstate(divide='raise', invalid='raise'):
+ with pytest.raises(FloatingPointError):
+ dividend // divisor
+
+ def test_division_complex(self):
+ # check that implementation is correct
+ msg = "Complex division implementation check"
+ x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
+ assert_almost_equal(x**2/x, x, err_msg=msg)
+ # check overflow, underflow
+ msg = "Complex division overflow/underflow check"
+ x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
+ y = x**2/x
+ assert_almost_equal(y/x, [1, 1], err_msg=msg)
+
+ def test_zero_division_complex(self):
+ with np.errstate(invalid="ignore", divide="ignore"):
+ x = np.array([0.0], dtype=np.complex128)
+ y = 1.0/x
+ assert_(np.isinf(y)[0])
+ y = complex(np.inf, np.nan)/x
+ assert_(np.isinf(y)[0])
+ y = complex(np.nan, np.inf)/x
+ assert_(np.isinf(y)[0])
+ y = complex(np.inf, np.inf)/x
+ assert_(np.isinf(y)[0])
+ y = 0.0/x
+ assert_(np.isnan(y)[0])
+
+ def test_floor_division_complex(self):
+ # check that floor division, divmod and remainder raises type errors
+ x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
+ with pytest.raises(TypeError):
+ x // 7
+ with pytest.raises(TypeError):
+ np.divmod(x, 7)
+ with pytest.raises(TypeError):
+ np.remainder(x, 7)
+
+ def test_floor_division_signed_zero(self):
+ # Check that the sign bit is correctly set when dividing positive and
+ # negative zero by one.
+ x = np.zeros(10)
+ assert_equal(np.signbit(x//1), 0)
+ assert_equal(np.signbit((-x)//1), 1)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ @pytest.mark.parametrize('dtype', np.typecodes['Float'])
+ def test_floor_division_errors(self, dtype):
+ fnan = np.array(np.nan, dtype=dtype)
+ fone = np.array(1.0, dtype=dtype)
+ fzer = np.array(0.0, dtype=dtype)
+ finf = np.array(np.inf, dtype=dtype)
+ # divide by zero error check
+ with np.errstate(divide='raise', invalid='ignore'):
+ assert_raises(FloatingPointError, np.floor_divide, fone, fzer)
+ with np.errstate(divide='ignore', invalid='raise'):
+ np.floor_divide(fone, fzer)
+
+ # The following already contain a NaN and should not warn
+ with np.errstate(all='raise'):
+ np.floor_divide(fnan, fone)
+ np.floor_divide(fone, fnan)
+ np.floor_divide(fnan, fzer)
+ np.floor_divide(fzer, fnan)
+
+ @pytest.mark.parametrize('dtype', np.typecodes['Float'])
+ def test_floor_division_corner_cases(self, dtype):
+ # test corner cases like 1.0//0.0 for errors and return vals
+ x = np.zeros(10, dtype=dtype)
+ y = np.ones(10, dtype=dtype)
+ fnan = np.array(np.nan, dtype=dtype)
+ fone = np.array(1.0, dtype=dtype)
+ fzer = np.array(0.0, dtype=dtype)
+ finf = np.array(np.inf, dtype=dtype)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "invalid value encountered in floor_divide")
+ div = np.floor_divide(fnan, fone)
+ assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div)
+ div = np.floor_divide(fone, fnan)
+ assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div)
+ div = np.floor_divide(fnan, fzer)
+ assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div)
+ # verify 1.0//0.0 computations return inf
+ with np.errstate(divide='ignore'):
+ z = np.floor_divide(y, x)
+ assert_(np.isinf(z).all())
+
+def floor_divide_and_remainder(x, y):
+ return (np.floor_divide(x, y), np.remainder(x, y))
+
+
+def _signs(dt):
+ if dt in np.typecodes['UnsignedInteger']:
+ return (+1,)
+ else:
+ return (+1, -1)
+
+
+class TestRemainder:
+
+ def test_remainder_basic(self):
+ dt = np.typecodes['AllInteger'] + np.typecodes['Float']
+ for op in [floor_divide_and_remainder, np.divmod]:
+ for dt1, dt2 in itertools.product(dt, dt):
+ for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
+ fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+ msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+ a = np.array(sg1*71, dtype=dt1)
+ b = np.array(sg2*19, dtype=dt2)
+ div, rem = op(a, b)
+ assert_equal(div*b + rem, a, err_msg=msg)
+ if sg2 == -1:
+ assert_(b < rem <= 0, msg)
+ else:
+ assert_(b > rem >= 0, msg)
+
+ def test_float_remainder_exact(self):
+ # test that float results are exact for small integers. This also
+ # holds for the same integers scaled by powers of two.
+ nlst = list(range(-127, 0))
+ plst = list(range(1, 128))
+ dividend = nlst + [0] + plst
+ divisor = nlst + plst
+ arg = list(itertools.product(dividend, divisor))
+ tgt = list(divmod(*t) for t in arg)
+
+ a, b = np.array(arg, dtype=int).T
+ # convert exact integer results from Python to float so that
+ # signed zero can be used, it is checked.
+ tgtdiv, tgtrem = np.array(tgt, dtype=float).T
+ tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
+ tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
+
+ for op in [floor_divide_and_remainder, np.divmod]:
+ for dt in np.typecodes['Float']:
+ msg = 'op: %s, dtype: %s' % (op.__name__, dt)
+ fa = a.astype(dt)
+ fb = b.astype(dt)
+ div, rem = op(fa, fb)
+ assert_equal(div, tgtdiv, err_msg=msg)
+ assert_equal(rem, tgtrem, err_msg=msg)
+
+ def test_float_remainder_roundoff(self):
+ # gh-6127
+ dt = np.typecodes['Float']
+ for op in [floor_divide_and_remainder, np.divmod]:
+ for dt1, dt2 in itertools.product(dt, dt):
+ for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
+ fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
+ msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
+ a = np.array(sg1*78*6e-8, dtype=dt1)
+ b = np.array(sg2*6e-8, dtype=dt2)
+ div, rem = op(a, b)
+ # Equal assertion should hold when fmod is used
+ assert_equal(div*b + rem, a, err_msg=msg)
+ if sg2 == -1:
+ assert_(b < rem <= 0, msg)
+ else:
+ assert_(b > rem >= 0, msg)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ @pytest.mark.xfail(sys.platform.startswith("darwin"),
+ reason="MacOS seems to not give the correct 'invalid' warning for "
+ "`fmod`. Hopefully, others always do.")
+ @pytest.mark.parametrize('dtype', np.typecodes['Float'])
+ def test_float_divmod_errors(self, dtype):
+ # Check valid errors raised for divmod and remainder
+ fzero = np.array(0.0, dtype=dtype)
+ fone = np.array(1.0, dtype=dtype)
+ finf = np.array(np.inf, dtype=dtype)
+ fnan = np.array(np.nan, dtype=dtype)
+ # since divmod is combination of both remainder and divide
+ # ops it will set both dividebyzero and invalid flags
+ with np.errstate(divide='raise', invalid='ignore'):
+ assert_raises(FloatingPointError, np.divmod, fone, fzero)
+ with np.errstate(divide='ignore', invalid='raise'):
+ assert_raises(FloatingPointError, np.divmod, fone, fzero)
+ with np.errstate(invalid='raise'):
+ assert_raises(FloatingPointError, np.divmod, fzero, fzero)
+ with np.errstate(invalid='raise'):
+ assert_raises(FloatingPointError, np.divmod, finf, finf)
+ with np.errstate(divide='ignore', invalid='raise'):
+ assert_raises(FloatingPointError, np.divmod, finf, fzero)
+ with np.errstate(divide='raise', invalid='ignore'):
+ # inf / 0 does not set any flags, only the modulo creates a NaN
+ np.divmod(finf, fzero)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ @pytest.mark.xfail(sys.platform.startswith("darwin"),
+ reason="MacOS seems to not give the correct 'invalid' warning for "
+ "`fmod`. Hopefully, others always do.")
+ @pytest.mark.parametrize('dtype', np.typecodes['Float'])
+ @pytest.mark.parametrize('fn', [np.fmod, np.remainder])
+ def test_float_remainder_errors(self, dtype, fn):
+ fzero = np.array(0.0, dtype=dtype)
+ fone = np.array(1.0, dtype=dtype)
+ finf = np.array(np.inf, dtype=dtype)
+ fnan = np.array(np.nan, dtype=dtype)
+
+ # The following already contain a NaN and should not warn.
+ with np.errstate(all='raise'):
+ with pytest.raises(FloatingPointError,
+ match="invalid value"):
+ fn(fone, fzero)
+ fn(fnan, fzero)
+ fn(fzero, fnan)
+ fn(fone, fnan)
+ fn(fnan, fone)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_float_remainder_overflow(self):
+ a = np.finfo(np.float64).tiny
+ with np.errstate(over='ignore', invalid='ignore'):
+ div, mod = np.divmod(4, a)
+ np.isinf(div)
+ assert_(mod == 0)
+ with np.errstate(over='raise', invalid='ignore'):
+ assert_raises(FloatingPointError, np.divmod, 4, a)
+ with np.errstate(invalid='raise', over='ignore'):
+ assert_raises(FloatingPointError, np.divmod, 4, a)
+
+ def test_float_divmod_corner_cases(self):
+ # check nan cases
+ for dt in np.typecodes['Float']:
+ fnan = np.array(np.nan, dtype=dt)
+ fone = np.array(1.0, dtype=dt)
+ fzer = np.array(0.0, dtype=dt)
+ finf = np.array(np.inf, dtype=dt)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "invalid value encountered in divmod")
+ sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
+ div, rem = np.divmod(fone, fzer)
+ assert(np.isinf(div)), 'dt: %s, div: %s' % (dt, rem)
+ assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
+ div, rem = np.divmod(fzer, fzer)
+ assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
+ assert_(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem)
+ div, rem = np.divmod(finf, finf)
+ assert(np.isnan(div)), 'dt: %s, rem: %s' % (dt, rem)
+ assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
+ div, rem = np.divmod(finf, fzer)
+ assert(np.isinf(div)), 'dt: %s, rem: %s' % (dt, rem)
+ assert(np.isnan(rem)), 'dt: %s, rem: %s' % (dt, rem)
+ div, rem = np.divmod(fnan, fone)
+ assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
+ assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
+ div, rem = np.divmod(fone, fnan)
+ assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
+ assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
+ div, rem = np.divmod(fnan, fzer)
+ assert(np.isnan(rem)), "dt: %s, rem: %s" % (dt, rem)
+ assert(np.isnan(div)), "dt: %s, rem: %s" % (dt, rem)
+
+ def test_float_remainder_corner_cases(self):
+ # Check remainder magnitude.
+ for dt in np.typecodes['Float']:
+ fone = np.array(1.0, dtype=dt)
+ fzer = np.array(0.0, dtype=dt)
+ fnan = np.array(np.nan, dtype=dt)
+ b = np.array(1.0, dtype=dt)
+ a = np.nextafter(np.array(0.0, dtype=dt), -b)
+ rem = np.remainder(a, b)
+ assert_(rem <= b, 'dt: %s' % dt)
+ rem = np.remainder(-a, -b)
+ assert_(rem >= -b, 'dt: %s' % dt)
+
+ # Check nans, inf
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "invalid value encountered in remainder")
+ sup.filter(RuntimeWarning, "invalid value encountered in fmod")
+ for dt in np.typecodes['Float']:
+ fone = np.array(1.0, dtype=dt)
+ fzer = np.array(0.0, dtype=dt)
+ finf = np.array(np.inf, dtype=dt)
+ fnan = np.array(np.nan, dtype=dt)
+ rem = np.remainder(fone, fzer)
+ assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+ # MSVC 2008 returns NaN here, so disable the check.
+ #rem = np.remainder(fone, finf)
+ #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem))
+ rem = np.remainder(finf, fone)
+ fmod = np.fmod(finf, fone)
+ assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
+ assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+ rem = np.remainder(finf, finf)
+ fmod = np.fmod(finf, fone)
+ assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+ assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
+ rem = np.remainder(finf, fzer)
+ fmod = np.fmod(finf, fzer)
+ assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+ assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
+ rem = np.remainder(fone, fnan)
+ fmod = np.fmod(fone, fnan)
+ assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+ assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, fmod))
+ rem = np.remainder(fnan, fzer)
+ fmod = np.fmod(fnan, fzer)
+ assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+ assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem))
+ rem = np.remainder(fnan, fone)
+ fmod = np.fmod(fnan, fone)
+ assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
+ assert_(np.isnan(fmod), 'dt: %s, fmod: %s' % (dt, rem))
+
+
+class TestDivisionIntegerOverflowsAndDivideByZero:
+ result_type = namedtuple('result_type',
+ ['nocast', 'casted'])
+ helper_lambdas = {
+ 'zero': lambda dtype: 0,
+ 'min': lambda dtype: np.iinfo(dtype).min,
+ 'neg_min': lambda dtype: -np.iinfo(dtype).min,
+ 'min-zero': lambda dtype: (np.iinfo(dtype).min, 0),
+ 'neg_min-zero': lambda dtype: (-np.iinfo(dtype).min, 0),
+ }
+ overflow_results = {
+ np.remainder: result_type(
+ helper_lambdas['zero'], helper_lambdas['zero']),
+ np.fmod: result_type(
+ helper_lambdas['zero'], helper_lambdas['zero']),
+ operator.mod: result_type(
+ helper_lambdas['zero'], helper_lambdas['zero']),
+ operator.floordiv: result_type(
+ helper_lambdas['min'], helper_lambdas['neg_min']),
+ np.floor_divide: result_type(
+ helper_lambdas['min'], helper_lambdas['neg_min']),
+ np.divmod: result_type(
+ helper_lambdas['min-zero'], helper_lambdas['neg_min-zero'])
+ }
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ @pytest.mark.parametrize("dtype", np.typecodes["Integer"])
+ def test_signed_division_overflow(self, dtype):
+ to_check = interesting_binop_operands(np.iinfo(dtype).min, -1, dtype)
+ for op1, op2, extractor, operand_identifier in to_check:
+ with pytest.warns(RuntimeWarning, match="overflow encountered"):
+ res = op1 // op2
+
+ assert res.dtype == op1.dtype
+ assert extractor(res) == np.iinfo(op1.dtype).min
+
+ # Remainder is well defined though, and does not warn:
+ res = op1 % op2
+ assert res.dtype == op1.dtype
+ assert extractor(res) == 0
+ # Check fmod as well:
+ res = np.fmod(op1, op2)
+ assert extractor(res) == 0
+
+ # Divmod warns for the division part:
+ with pytest.warns(RuntimeWarning, match="overflow encountered"):
+ res1, res2 = np.divmod(op1, op2)
+
+ assert res1.dtype == res2.dtype == op1.dtype
+ assert extractor(res1) == np.iinfo(op1.dtype).min
+ assert extractor(res2) == 0
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+ def test_divide_by_zero(self, dtype):
+ # Note that the return value cannot be well defined here, but NumPy
+ # currently uses 0 consistently. This could be changed.
+ to_check = interesting_binop_operands(1, 0, dtype)
+ for op1, op2, extractor, operand_identifier in to_check:
+ with pytest.warns(RuntimeWarning, match="divide by zero"):
+ res = op1 // op2
+
+ assert res.dtype == op1.dtype
+ assert extractor(res) == 0
+
+ with pytest.warns(RuntimeWarning, match="divide by zero"):
+ res1, res2 = np.divmod(op1, op2)
+
+ assert res1.dtype == res2.dtype == op1.dtype
+ assert extractor(res1) == 0
+ assert extractor(res2) == 0
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ @pytest.mark.parametrize("dividend_dtype",
+ np.sctypes['int'])
+ @pytest.mark.parametrize("divisor_dtype",
+ np.sctypes['int'])
+ @pytest.mark.parametrize("operation",
+ [np.remainder, np.fmod, np.divmod, np.floor_divide,
+ operator.mod, operator.floordiv])
+ @np.errstate(divide='warn', over='warn')
+ def test_overflows(self, dividend_dtype, divisor_dtype, operation):
+ # SIMD tries to perform the operation on as many elements as possible
+ # that is a multiple of the register's size. We resort to the
+ # default implementation for the leftover elements.
+ # We try to cover all paths here.
+ arrays = [np.array([np.iinfo(dividend_dtype).min]*i,
+ dtype=dividend_dtype) for i in range(1, 129)]
+ divisor = np.array([-1], dtype=divisor_dtype)
+ # If dividend is a larger type than the divisor (`else` case),
+ # then, result will be a larger type than dividend and will not
+ # result in an overflow for `divmod` and `floor_divide`.
+ if np.dtype(dividend_dtype).itemsize >= np.dtype(
+ divisor_dtype).itemsize and operation in (
+ np.divmod, np.floor_divide, operator.floordiv):
+ with pytest.warns(
+ RuntimeWarning,
+ match="overflow encountered in"):
+ result = operation(
+ dividend_dtype(np.iinfo(dividend_dtype).min),
+ divisor_dtype(-1)
+ )
+ assert result == self.overflow_results[operation].nocast(
+ dividend_dtype)
+
+ # Arrays
+ for a in arrays:
+ # In case of divmod, we need to flatten the result
+ # column first as we get a column vector of quotient and
+ # remainder and a normal flatten of the expected result.
+ with pytest.warns(
+ RuntimeWarning,
+ match="overflow encountered in"):
+ result = np.array(operation(a, divisor)).flatten('f')
+ expected_array = np.array(
+ [self.overflow_results[operation].nocast(
+ dividend_dtype)]*len(a)).flatten()
+ assert_array_equal(result, expected_array)
+ else:
+ # Scalars
+ result = operation(
+ dividend_dtype(np.iinfo(dividend_dtype).min),
+ divisor_dtype(-1)
+ )
+ assert result == self.overflow_results[operation].casted(
+ dividend_dtype)
+
+ # Arrays
+ for a in arrays:
+ # See above comment on flatten
+ result = np.array(operation(a, divisor)).flatten('f')
+ expected_array = np.array(
+ [self.overflow_results[operation].casted(
+ dividend_dtype)]*len(a)).flatten()
+ assert_array_equal(result, expected_array)
+
+
+class TestCbrt:
+ def test_cbrt_scalar(self):
+ assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
+
+ def test_cbrt(self):
+ x = np.array([1., 2., -3., np.inf, -np.inf])
+ assert_almost_equal(np.cbrt(x**3), x)
+
+ assert_(np.isnan(np.cbrt(np.nan)))
+ assert_equal(np.cbrt(np.inf), np.inf)
+ assert_equal(np.cbrt(-np.inf), -np.inf)
+
+
+class TestPower:
+ def test_power_float(self):
+ x = np.array([1., 2., 3.])
+ assert_equal(x**0, [1., 1., 1.])
+ assert_equal(x**1, x)
+ assert_equal(x**2, [1., 4., 9.])
+ y = x.copy()
+ y **= 2
+ assert_equal(y, [1., 4., 9.])
+ assert_almost_equal(x**(-1), [1., 0.5, 1./3])
+ assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
+
+ for out, inp, msg in _gen_alignment_data(dtype=np.float32,
+ type='unary',
+ max_size=11):
+ exp = [ncu.sqrt(i) for i in inp]
+ assert_almost_equal(inp**(0.5), exp, err_msg=msg)
+ np.sqrt(inp, out=out)
+ assert_equal(out, exp, err_msg=msg)
+
+ for out, inp, msg in _gen_alignment_data(dtype=np.float64,
+ type='unary',
+ max_size=7):
+ exp = [ncu.sqrt(i) for i in inp]
+ assert_almost_equal(inp**(0.5), exp, err_msg=msg)
+ np.sqrt(inp, out=out)
+ assert_equal(out, exp, err_msg=msg)
+
+ def test_power_complex(self):
+ x = np.array([1+2j, 2+3j, 3+4j])
+ assert_equal(x**0, [1., 1., 1.])
+ assert_equal(x**1, x)
+ assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
+ assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
+ assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
+ assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
+ assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
+ assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
+ (-117-44j)/15625])
+ assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
+ ncu.sqrt(3+4j)])
+ norm = 1./((x**14)[0])
+ assert_almost_equal(x**14 * norm,
+ [i * norm for i in [-76443+16124j, 23161315+58317492j,
+ 5583548873 + 2465133864j]])
+
+ # Ticket #836
+ def assert_complex_equal(x, y):
+ assert_array_equal(x.real, y.real)
+ assert_array_equal(x.imag, y.imag)
+
+ for z in [complex(0, np.inf), complex(1, np.inf)]:
+ z = np.array([z], dtype=np.complex_)
+ with np.errstate(invalid="ignore"):
+ assert_complex_equal(z**1, z)
+ assert_complex_equal(z**2, z*z)
+ assert_complex_equal(z**3, z*z*z)
+
+ def test_power_zero(self):
+ # ticket #1271
+ zero = np.array([0j])
+ one = np.array([1+0j])
+ cnan = np.array([complex(np.nan, np.nan)])
+ # FIXME cinf not tested.
+ #cinf = np.array([complex(np.inf, 0)])
+
+ def assert_complex_equal(x, y):
+ x, y = np.asarray(x), np.asarray(y)
+ assert_array_equal(x.real, y.real)
+ assert_array_equal(x.imag, y.imag)
+
+ # positive powers
+ for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
+ assert_complex_equal(np.power(zero, p), zero)
+
+ # zero power
+ assert_complex_equal(np.power(zero, 0), one)
+ with np.errstate(invalid="ignore"):
+ assert_complex_equal(np.power(zero, 0+1j), cnan)
+
+ # negative power
+ for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
+ assert_complex_equal(np.power(zero, -p), cnan)
+ assert_complex_equal(np.power(zero, -1+0.2j), cnan)
+
+ def test_fast_power(self):
+ x = np.array([1, 2, 3], np.int16)
+ res = x**2.0
+ assert_((x**2.00001).dtype is res.dtype)
+ assert_array_equal(res, [1, 4, 9])
+ # check the inplace operation on the casted copy doesn't mess with x
+ assert_(not np.may_share_memory(res, x))
+ assert_array_equal(x, [1, 2, 3])
+
+ # Check that the fast path ignores 1-element not 0-d arrays
+ res = x ** np.array([[[2]]])
+ assert_equal(res.shape, (1, 1, 3))
+
+ def test_integer_power(self):
+ a = np.array([15, 15], 'i8')
+ b = np.power(a, a)
+ assert_equal(b, [437893890380859375, 437893890380859375])
+
+ def test_integer_power_with_integer_zero_exponent(self):
+ dtypes = np.typecodes['Integer']
+ for dt in dtypes:
+ arr = np.arange(-10, 10, dtype=dt)
+ assert_equal(np.power(arr, 0), np.ones_like(arr))
+
+ dtypes = np.typecodes['UnsignedInteger']
+ for dt in dtypes:
+ arr = np.arange(10, dtype=dt)
+ assert_equal(np.power(arr, 0), np.ones_like(arr))
+
+ def test_integer_power_of_1(self):
+ dtypes = np.typecodes['AllInteger']
+ for dt in dtypes:
+ arr = np.arange(10, dtype=dt)
+ assert_equal(np.power(1, arr), np.ones_like(arr))
+
+ def test_integer_power_of_zero(self):
+ dtypes = np.typecodes['AllInteger']
+ for dt in dtypes:
+ arr = np.arange(1, 10, dtype=dt)
+ assert_equal(np.power(0, arr), np.zeros_like(arr))
+
+ def test_integer_to_negative_power(self):
+ dtypes = np.typecodes['Integer']
+ for dt in dtypes:
+ a = np.array([0, 1, 2, 3], dtype=dt)
+ b = np.array([0, 1, 2, -3], dtype=dt)
+ one = np.array(1, dtype=dt)
+ minusone = np.array(-1, dtype=dt)
+ assert_raises(ValueError, np.power, a, b)
+ assert_raises(ValueError, np.power, a, minusone)
+ assert_raises(ValueError, np.power, one, b)
+ assert_raises(ValueError, np.power, one, minusone)
+
+ def test_float_to_inf_power(self):
+ for dt in [np.float32, np.float64]:
+ a = np.array([1, 1, 2, 2, -2, -2, np.inf, -np.inf], dt)
+ b = np.array([np.inf, -np.inf, np.inf, -np.inf,
+ np.inf, -np.inf, np.inf, -np.inf], dt)
+ r = np.array([1, 1, np.inf, 0, np.inf, 0, np.inf, 0], dt)
+ assert_equal(np.power(a, b), r)
+
+
+class TestFloat_power:
+ def test_type_conversion(self):
+ arg_type = '?bhilBHILefdgFDG'
+ res_type = 'ddddddddddddgDDG'
+ for dtin, dtout in zip(arg_type, res_type):
+ msg = "dtin: %s, dtout: %s" % (dtin, dtout)
+ arg = np.ones(1, dtype=dtin)
+ res = np.float_power(arg, arg)
+ assert_(res.dtype.name == np.dtype(dtout).name, msg)
+
+
+class TestLog2:
+ @pytest.mark.parametrize('dt', ['f', 'd', 'g'])
+ def test_log2_values(self, dt):
+ x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
+ y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_almost_equal(np.log2(xf), yf)
+
+ @pytest.mark.parametrize("i", range(1, 65))
+ def test_log2_ints(self, i):
+ # a good log2 implementation should provide this,
+ # might fail on OS with bad libm
+ v = np.log2(2.**i)
+ assert_equal(v, float(i), err_msg='at exponent %d' % i)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_log2_special(self):
+ assert_equal(np.log2(1.), 0.)
+ assert_equal(np.log2(np.inf), np.inf)
+ assert_(np.isnan(np.log2(np.nan)))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_(np.isnan(np.log2(-1.)))
+ assert_(np.isnan(np.log2(-np.inf)))
+ assert_equal(np.log2(0.), -np.inf)
+ assert_(w[0].category is RuntimeWarning)
+ assert_(w[1].category is RuntimeWarning)
+ assert_(w[2].category is RuntimeWarning)
+
+
+class TestExp2:
+ def test_exp2_values(self):
+ x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
+ y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_almost_equal(np.exp2(yf), xf)
+
+
+class TestLogAddExp2(_FilterInvalids):
+ # Need test for intermediate precisions
+ def test_logaddexp2_values(self):
+ x = [1, 2, 3, 4, 5]
+ y = [5, 4, 3, 2, 1]
+ z = [6, 6, 6, 6, 6]
+ for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
+ xf = np.log2(np.array(x, dtype=dt))
+ yf = np.log2(np.array(y, dtype=dt))
+ zf = np.log2(np.array(z, dtype=dt))
+ assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_)
+
+ def test_logaddexp2_range(self):
+ x = [1000000, -1000000, 1000200, -1000200]
+ y = [1000200, -1000200, 1000000, -1000000]
+ z = [1000200, -1000000, 1000200, -1000000]
+ for dt in ['f', 'd', 'g']:
+ logxf = np.array(x, dtype=dt)
+ logyf = np.array(y, dtype=dt)
+ logzf = np.array(z, dtype=dt)
+ assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
+
+ def test_inf(self):
+ inf = np.inf
+ x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
+ y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
+ z = [inf, inf, inf, -inf, inf, inf, 1, 1]
+ with np.errstate(invalid='raise'):
+ for dt in ['f', 'd', 'g']:
+ logxf = np.array(x, dtype=dt)
+ logyf = np.array(y, dtype=dt)
+ logzf = np.array(z, dtype=dt)
+ assert_equal(np.logaddexp2(logxf, logyf), logzf)
+
+ def test_nan(self):
+ assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
+ assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
+ assert_(np.isnan(np.logaddexp2(np.nan, 0)))
+ assert_(np.isnan(np.logaddexp2(0, np.nan)))
+ assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
+
+ def test_reduce(self):
+ assert_equal(np.logaddexp2.identity, -np.inf)
+ assert_equal(np.logaddexp2.reduce([]), -np.inf)
+ assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf)
+ assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0)
+
+
+class TestLog:
+ def test_log_values(self):
+ x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
+ y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ for dt in ['f', 'd', 'g']:
+ log2_ = 0.69314718055994530943
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)*log2_
+ assert_almost_equal(np.log(xf), yf)
+
+ # test aliasing(issue #17761)
+ x = np.array([2, 0.937500, 3, 0.947500, 1.054697])
+ xf = np.log(x)
+ assert_almost_equal(np.log(x, out=x), xf)
+
+ # test log() of max for dtype does not raise
+ for dt in ['f', 'd', 'g']:
+ with np.errstate(all='raise'):
+ x = np.finfo(dt).max
+ np.log(x)
+
+ def test_log_strides(self):
+ np.random.seed(42)
+ strides = np.array([-4,-3,-2,-1,1,2,3,4])
+ sizes = np.arange(2,100)
+ for ii in sizes:
+ x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0,size=ii))
+ x_special = x_f64.copy()
+ x_special[3:-1:4] = 1.0
+ y_true = np.log(x_f64)
+ y_special = np.log(x_special)
+ for jj in strides:
+ assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2)
+
+class TestExp:
+ def test_exp_values(self):
+ x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
+ y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ for dt in ['f', 'd', 'g']:
+ log2_ = 0.69314718055994530943
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)*log2_
+ assert_almost_equal(np.exp(yf), xf)
+
+ def test_exp_strides(self):
+ np.random.seed(42)
+ strides = np.array([-4,-3,-2,-1,1,2,3,4])
+ sizes = np.arange(2,100)
+ for ii in sizes:
+ x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1,size=ii))
+ y_true = np.exp(x_f64)
+ for jj in strides:
+ assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2)
+
+class TestSpecialFloats:
+ def test_exp_values(self):
+ with np.errstate(under='raise', over='raise'):
+ x = [np.nan, np.nan, np.inf, 0.]
+ y = [np.nan, -np.nan, np.inf, -np.inf]
+ for dt in ['e', 'f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.exp(yf), xf)
+
+ # See: https://github.com/numpy/numpy/issues/19192
+ @pytest.mark.xfail(
+ _glibc_older_than("2.17"),
+ reason="Older glibc versions may not raise appropriate FP exceptions"
+ )
+ def test_exp_exceptions(self):
+ with np.errstate(over='raise'):
+ assert_raises(FloatingPointError, np.exp, np.float16(11.0899))
+ assert_raises(FloatingPointError, np.exp, np.float32(100.))
+ assert_raises(FloatingPointError, np.exp, np.float32(1E19))
+ assert_raises(FloatingPointError, np.exp, np.float64(800.))
+ assert_raises(FloatingPointError, np.exp, np.float64(1E19))
+
+ with np.errstate(under='raise'):
+ assert_raises(FloatingPointError, np.exp, np.float16(-17.5))
+ assert_raises(FloatingPointError, np.exp, np.float32(-1000.))
+ assert_raises(FloatingPointError, np.exp, np.float32(-1E19))
+ assert_raises(FloatingPointError, np.exp, np.float64(-1000.))
+ assert_raises(FloatingPointError, np.exp, np.float64(-1E19))
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_log_values(self):
+ with np.errstate(all='ignore'):
+ x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan]
+ y = [np.nan, -np.nan, np.inf, -np.inf, 0.0, -1.0]
+ y1p = [np.nan, -np.nan, np.inf, -np.inf, -1.0, -2.0]
+ for dt in ['e', 'f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ yf1p = np.array(y1p, dtype=dt)
+ assert_equal(np.log(yf), xf)
+ assert_equal(np.log2(yf), xf)
+ assert_equal(np.log10(yf), xf)
+ assert_equal(np.log1p(yf1p), xf)
+
+ with np.errstate(divide='raise'):
+ for dt in ['e', 'f', 'd']:
+ assert_raises(FloatingPointError, np.log,
+ np.array(0.0, dtype=dt))
+ assert_raises(FloatingPointError, np.log2,
+ np.array(0.0, dtype=dt))
+ assert_raises(FloatingPointError, np.log10,
+ np.array(0.0, dtype=dt))
+ assert_raises(FloatingPointError, np.log1p,
+ np.array(-1.0, dtype=dt))
+
+ with np.errstate(invalid='raise'):
+ for dt in ['e', 'f', 'd']:
+ assert_raises(FloatingPointError, np.log,
+ np.array(-np.inf, dtype=dt))
+ assert_raises(FloatingPointError, np.log,
+ np.array(-1.0, dtype=dt))
+ assert_raises(FloatingPointError, np.log2,
+ np.array(-np.inf, dtype=dt))
+ assert_raises(FloatingPointError, np.log2,
+ np.array(-1.0, dtype=dt))
+ assert_raises(FloatingPointError, np.log10,
+ np.array(-np.inf, dtype=dt))
+ assert_raises(FloatingPointError, np.log10,
+ np.array(-1.0, dtype=dt))
+ assert_raises(FloatingPointError, np.log1p,
+ np.array(-np.inf, dtype=dt))
+ assert_raises(FloatingPointError, np.log1p,
+ np.array(-2.0, dtype=dt))
+
+ # See https://github.com/numpy/numpy/issues/18005
+ with assert_no_warnings():
+ a = np.array(1e9, dtype='float32')
+ np.log(a)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_sincos_values(self):
+ with np.errstate(all='ignore'):
+ x = [np.nan, np.nan, np.nan, np.nan]
+ y = [np.nan, -np.nan, np.inf, -np.inf]
+ for dt in ['e', 'f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.sin(yf), xf)
+ assert_equal(np.cos(yf), xf)
+
+
+ with np.errstate(invalid='raise'):
+ for callable in [np.sin, np.cos]:
+ for value in [np.inf, -np.inf]:
+ for dt in ['e', 'f', 'd']:
+ assert_raises(FloatingPointError, callable,
+ np.array([value], dtype=dt))
+
+ @pytest.mark.parametrize('dt', ['e', 'f', 'd', 'g'])
+ def test_sqrt_values(self, dt):
+ with np.errstate(all='ignore'):
+ x = [np.nan, np.nan, np.inf, np.nan, 0.]
+ y = [np.nan, -np.nan, np.inf, -np.inf, 0.]
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.sqrt(yf), xf)
+
+ # with np.errstate(invalid='raise'):
+ # assert_raises(
+ # FloatingPointError, np.sqrt, np.array(-100., dtype=dt)
+ # )
+
+ def test_abs_values(self):
+ x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0]
+ y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0]
+ for dt in ['e', 'f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.abs(yf), xf)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_square_values(self):
+ x = [np.nan, np.nan, np.inf, np.inf]
+ y = [np.nan, -np.nan, np.inf, -np.inf]
+ with np.errstate(all='ignore'):
+ for dt in ['e', 'f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.square(yf), xf)
+
+ with np.errstate(over='raise'):
+ assert_raises(FloatingPointError, np.square,
+ np.array(1E3, dtype='e'))
+ assert_raises(FloatingPointError, np.square,
+ np.array(1E32, dtype='f'))
+ assert_raises(FloatingPointError, np.square,
+ np.array(1E200, dtype='d'))
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_reciprocal_values(self):
+ with np.errstate(all='ignore'):
+ x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf]
+ y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.]
+ for dt in ['e', 'f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.reciprocal(yf), xf)
+
+ with np.errstate(divide='raise'):
+ for dt in ['e', 'f', 'd', 'g']:
+ assert_raises(FloatingPointError, np.reciprocal,
+ np.array(-0.0, dtype=dt))
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_tan(self):
+ with np.errstate(all='ignore'):
+ in_ = [np.nan, -np.nan, 0.0, -0.0, np.inf, -np.inf]
+ out = [np.nan, np.nan, 0.0, -0.0, np.nan, np.nan]
+ for dt in ['e', 'f', 'd']:
+ in_arr = np.array(in_, dtype=dt)
+ out_arr = np.array(out, dtype=dt)
+ assert_equal(np.tan(in_arr), out_arr)
+
+ with np.errstate(invalid='raise'):
+ for dt in ['e', 'f', 'd']:
+ assert_raises(FloatingPointError, np.tan,
+ np.array(np.inf, dtype=dt))
+ assert_raises(FloatingPointError, np.tan,
+ np.array(-np.inf, dtype=dt))
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_arcsincos(self):
+ with np.errstate(all='ignore'):
+ in_ = [np.nan, -np.nan, np.inf, -np.inf]
+ out = [np.nan, np.nan, np.nan, np.nan]
+ for dt in ['e', 'f', 'd']:
+ in_arr = np.array(in_, dtype=dt)
+ out_arr = np.array(out, dtype=dt)
+ assert_equal(np.arcsin(in_arr), out_arr)
+ assert_equal(np.arccos(in_arr), out_arr)
+
+ for callable in [np.arcsin, np.arccos]:
+ for value in [np.inf, -np.inf, 2.0, -2.0]:
+ for dt in ['e', 'f', 'd']:
+ with np.errstate(invalid='raise'):
+ assert_raises(FloatingPointError, callable,
+ np.array(value, dtype=dt))
+
+ def test_arctan(self):
+ with np.errstate(all='ignore'):
+ in_ = [np.nan, -np.nan]
+ out = [np.nan, np.nan]
+ for dt in ['e', 'f', 'd']:
+ in_arr = np.array(in_, dtype=dt)
+ out_arr = np.array(out, dtype=dt)
+ assert_equal(np.arctan(in_arr), out_arr)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_sinh(self):
+ in_ = [np.nan, -np.nan, np.inf, -np.inf]
+ out = [np.nan, np.nan, np.inf, -np.inf]
+ for dt in ['e', 'f', 'd']:
+ in_arr = np.array(in_, dtype=dt)
+ out_arr = np.array(out, dtype=dt)
+ assert_equal(np.sinh(in_arr), out_arr)
+
+ with np.errstate(over='raise'):
+ assert_raises(FloatingPointError, np.sinh,
+ np.array(12.0, dtype='e'))
+ assert_raises(FloatingPointError, np.sinh,
+ np.array(120.0, dtype='f'))
+ assert_raises(FloatingPointError, np.sinh,
+ np.array(1200.0, dtype='d'))
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_cosh(self):
+ in_ = [np.nan, -np.nan, np.inf, -np.inf]
+ out = [np.nan, np.nan, np.inf, np.inf]
+ for dt in ['e', 'f', 'd']:
+ in_arr = np.array(in_, dtype=dt)
+ out_arr = np.array(out, dtype=dt)
+ assert_equal(np.cosh(in_arr), out_arr)
+
+ with np.errstate(over='raise'):
+ assert_raises(FloatingPointError, np.cosh,
+ np.array(12.0, dtype='e'))
+ assert_raises(FloatingPointError, np.cosh,
+ np.array(120.0, dtype='f'))
+ assert_raises(FloatingPointError, np.cosh,
+ np.array(1200.0, dtype='d'))
+
+ def test_tanh(self):
+ in_ = [np.nan, -np.nan, np.inf, -np.inf]
+ out = [np.nan, np.nan, 1.0, -1.0]
+ for dt in ['e', 'f', 'd']:
+ in_arr = np.array(in_, dtype=dt)
+ out_arr = np.array(out, dtype=dt)
+ assert_equal(np.tanh(in_arr), out_arr)
+
+ def test_arcsinh(self):
+ in_ = [np.nan, -np.nan, np.inf, -np.inf]
+ out = [np.nan, np.nan, np.inf, -np.inf]
+ for dt in ['e', 'f', 'd']:
+ in_arr = np.array(in_, dtype=dt)
+ out_arr = np.array(out, dtype=dt)
+ assert_equal(np.arcsinh(in_arr), out_arr)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_arccosh(self):
+ with np.errstate(all='ignore'):
+ in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, 0.0]
+ out = [np.nan, np.nan, np.inf, np.nan, 0.0, np.nan]
+ for dt in ['e', 'f', 'd']:
+ in_arr = np.array(in_, dtype=dt)
+ out_arr = np.array(out, dtype=dt)
+ assert_equal(np.arccosh(in_arr), out_arr)
+
+ for value in [0.0, -np.inf]:
+ with np.errstate(invalid='raise'):
+ for dt in ['e', 'f', 'd']:
+ assert_raises(FloatingPointError, np.arccosh,
+ np.array(value, dtype=dt))
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_arctanh(self):
+ with np.errstate(all='ignore'):
+ in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, -1.0, 2.0]
+ out = [np.nan, np.nan, np.nan, np.nan, np.inf, -np.inf, np.nan]
+ for dt in ['e', 'f', 'd']:
+ in_arr = np.array(in_, dtype=dt)
+ out_arr = np.array(out, dtype=dt)
+ assert_equal(np.arctanh(in_arr), out_arr)
+
+ for value in [1.01, np.inf, -np.inf, 1.0, -1.0]:
+ with np.errstate(invalid='raise', divide='raise'):
+ for dt in ['e', 'f', 'd']:
+ assert_raises(FloatingPointError, np.arctanh,
+ np.array(value, dtype=dt))
+
+ # See: https://github.com/numpy/numpy/issues/20448
+ @pytest.mark.xfail(
+ _glibc_older_than("2.17"),
+ reason="Older glibc versions may not raise appropriate FP exceptions"
+ )
+ def test_exp2(self):
+ with np.errstate(all='ignore'):
+ in_ = [np.nan, -np.nan, np.inf, -np.inf]
+ out = [np.nan, np.nan, np.inf, 0.0]
+ for dt in ['e', 'f', 'd']:
+ in_arr = np.array(in_, dtype=dt)
+ out_arr = np.array(out, dtype=dt)
+ assert_equal(np.exp2(in_arr), out_arr)
+
+ for value in [2000.0, -2000.0]:
+ with np.errstate(over='raise', under='raise'):
+ for dt in ['e', 'f', 'd']:
+ assert_raises(FloatingPointError, np.exp2,
+ np.array(value, dtype=dt))
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_expm1(self):
+ with np.errstate(all='ignore'):
+ in_ = [np.nan, -np.nan, np.inf, -np.inf]
+ out = [np.nan, np.nan, np.inf, -1.0]
+ for dt in ['e', 'f', 'd']:
+ in_arr = np.array(in_, dtype=dt)
+ out_arr = np.array(out, dtype=dt)
+ assert_equal(np.expm1(in_arr), out_arr)
+
+ for value in [200.0, 2000.0]:
+ with np.errstate(over='raise'):
+ for dt in ['e', 'f']:
+ assert_raises(FloatingPointError, np.expm1,
+ np.array(value, dtype=dt))
+
+ # test to ensure no spurious FP exceptions are raised due to SIMD
+ INF_INVALID_ERR = [
+ np.cos, np.sin, np.tan, np.arccos, np.arcsin, np.spacing, np.arctanh
+ ]
+ NEG_INVALID_ERR = [
+ np.log, np.log2, np.log10, np.log1p, np.sqrt, np.arccosh,
+ np.arctanh
+ ]
+ ONE_INVALID_ERR = [
+ np.arctanh,
+ ]
+ LTONE_INVALID_ERR = [
+ np.arccosh,
+ ]
+ BYZERO_ERR = [
+ np.log, np.log2, np.log10, np.reciprocal, np.arccosh
+ ]
+
+ @pytest.mark.parametrize("ufunc", UFUNCS_UNARY_FP)
+ @pytest.mark.parametrize("dtype", ('e', 'f', 'd'))
+ @pytest.mark.parametrize("data, escape", (
+ ([0.03], LTONE_INVALID_ERR),
+ ([0.03]*32, LTONE_INVALID_ERR),
+ # neg
+ ([-1.0], NEG_INVALID_ERR),
+ ([-1.0]*32, NEG_INVALID_ERR),
+ # flat
+ ([1.0], ONE_INVALID_ERR),
+ ([1.0]*32, ONE_INVALID_ERR),
+ # zero
+ ([0.0], BYZERO_ERR),
+ ([0.0]*32, BYZERO_ERR),
+ ([-0.0], BYZERO_ERR),
+ ([-0.0]*32, BYZERO_ERR),
+ # nan
+ ([0.5, 0.5, 0.5, np.nan], LTONE_INVALID_ERR),
+ ([0.5, 0.5, 0.5, np.nan]*32, LTONE_INVALID_ERR),
+ ([np.nan, 1.0, 1.0, 1.0], ONE_INVALID_ERR),
+ ([np.nan, 1.0, 1.0, 1.0]*32, ONE_INVALID_ERR),
+ ([np.nan], []),
+ ([np.nan]*32, []),
+ # inf
+ ([0.5, 0.5, 0.5, np.inf], INF_INVALID_ERR + LTONE_INVALID_ERR),
+ ([0.5, 0.5, 0.5, np.inf]*32, INF_INVALID_ERR + LTONE_INVALID_ERR),
+ ([np.inf, 1.0, 1.0, 1.0], INF_INVALID_ERR),
+ ([np.inf, 1.0, 1.0, 1.0]*32, INF_INVALID_ERR),
+ ([np.inf], INF_INVALID_ERR),
+ ([np.inf]*32, INF_INVALID_ERR),
+ # ninf
+ ([0.5, 0.5, 0.5, -np.inf],
+ NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR),
+ ([0.5, 0.5, 0.5, -np.inf]*32,
+ NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR),
+ ([-np.inf, 1.0, 1.0, 1.0], NEG_INVALID_ERR + INF_INVALID_ERR),
+ ([-np.inf, 1.0, 1.0, 1.0]*32, NEG_INVALID_ERR + INF_INVALID_ERR),
+ ([-np.inf], NEG_INVALID_ERR + INF_INVALID_ERR),
+ ([-np.inf]*32, NEG_INVALID_ERR + INF_INVALID_ERR),
+ ))
+ def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape):
+ if escape and ufunc in escape:
+ return
+ # FIXME: NAN raises FP invalid exception:
+ # - ceil/float16 on MSVC:32-bit
+ # - spacing/float16 on almost all platforms
+ if ufunc in (np.spacing, np.ceil) and dtype == 'e':
+ return
+ array = np.array(data, dtype=dtype)
+ with assert_no_warnings():
+ ufunc(array)
+
+class TestFPClass:
+ @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
+ def test_fpclass(self, stride):
+ arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d')
+ arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f')
+ nan = np.array([True, True, False, False, False, False, False, False, False, False])
+ inf = np.array([False, False, True, True, False, False, False, False, False, False])
+ sign = np.array([False, True, False, True, True, False, True, False, False, True])
+ finite = np.array([False, False, False, False, True, True, True, True, True, True])
+ assert_equal(np.isnan(arr_f32[::stride]), nan[::stride])
+ assert_equal(np.isnan(arr_f64[::stride]), nan[::stride])
+ assert_equal(np.isinf(arr_f32[::stride]), inf[::stride])
+ assert_equal(np.isinf(arr_f64[::stride]), inf[::stride])
+ assert_equal(np.signbit(arr_f32[::stride]), sign[::stride])
+ assert_equal(np.signbit(arr_f64[::stride]), sign[::stride])
+ assert_equal(np.isfinite(arr_f32[::stride]), finite[::stride])
+ assert_equal(np.isfinite(arr_f64[::stride]), finite[::stride])
+
+class TestLDExp:
+ @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
+ @pytest.mark.parametrize("dtype", ['f', 'd'])
+ def test_ldexp(self, dtype, stride):
+ mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype)
+ exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i')
+ out = np.zeros(8, dtype=dtype)
+ assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride])
+ assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride])
+
+class TestFRExp:
+ @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
+ @pytest.mark.parametrize("dtype", ['f', 'd'])
+ @pytest.mark.skipif(not sys.platform.startswith('linux'),
+ reason="np.frexp gives different answers for NAN/INF on windows and linux")
+ def test_frexp(self, dtype, stride):
+ arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype)
+ mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype)
+ exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i')
+ out_mant = np.ones(8, dtype=dtype)
+ out_exp = 2*np.ones(8, dtype='i')
+ mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride]))
+ assert_equal(mant_true[::stride], mant)
+ assert_equal(exp_true[::stride], exp)
+ assert_equal(out_mant[::stride], mant_true[::stride])
+ assert_equal(out_exp[::stride], exp_true[::stride])
+
+# func : [maxulperror, low, high]
+avx_ufuncs = {'sqrt' :[1, 0., 100.],
+ 'absolute' :[0, -100., 100.],
+ 'reciprocal' :[1, 1., 100.],
+ 'square' :[1, -100., 100.],
+ 'rint' :[0, -100., 100.],
+ 'floor' :[0, -100., 100.],
+ 'ceil' :[0, -100., 100.],
+ 'trunc' :[0, -100., 100.]}
+
+class TestAVXUfuncs:
+ def test_avx_based_ufunc(self):
+ strides = np.array([-4,-3,-2,-1,1,2,3,4])
+ np.random.seed(42)
+ for func, prop in avx_ufuncs.items():
+ maxulperr = prop[0]
+ minval = prop[1]
+ maxval = prop[2]
+ # various array sizes to ensure masking in AVX is tested
+ for size in range(1,32):
+ myfunc = getattr(np, func)
+ x_f32 = np.float32(np.random.uniform(low=minval, high=maxval,
+ size=size))
+ x_f64 = np.float64(x_f32)
+ x_f128 = np.longdouble(x_f32)
+ y_true128 = myfunc(x_f128)
+ if maxulperr == 0:
+ assert_equal(myfunc(x_f32), np.float32(y_true128))
+ assert_equal(myfunc(x_f64), np.float64(y_true128))
+ else:
+ assert_array_max_ulp(myfunc(x_f32), np.float32(y_true128),
+ maxulp=maxulperr)
+ assert_array_max_ulp(myfunc(x_f64), np.float64(y_true128),
+ maxulp=maxulperr)
+ # various strides to test gather instruction
+ if size > 1:
+ y_true32 = myfunc(x_f32)
+ y_true64 = myfunc(x_f64)
+ for jj in strides:
+ assert_equal(myfunc(x_f64[::jj]), y_true64[::jj])
+ assert_equal(myfunc(x_f32[::jj]), y_true32[::jj])
+
+class TestAVXFloat32Transcendental:
+ def test_exp_float32(self):
+ np.random.seed(42)
+ x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000))
+ x_f64 = np.float64(x_f32)
+ assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3)
+
+ def test_log_float32(self):
+ np.random.seed(42)
+ x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000))
+ x_f64 = np.float64(x_f32)
+ assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4)
+
+ def test_sincos_float32(self):
+ np.random.seed(42)
+ N = 1000000
+ M = np.int_(N/20)
+ index = np.random.randint(low=0, high=N, size=M)
+ x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N))
+ if not _glibc_older_than("2.17"):
+ # test coverage for elements > 117435.992f for which glibc is used
+ # this is known to be problematic on old glibc, so skip it there
+ x_f32[index] = np.float32(10E+10*np.random.rand(M))
+ x_f64 = np.float64(x_f32)
+ assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2)
+ assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2)
+ # test aliasing(issue #17761)
+ tx_f32 = x_f32.copy()
+ assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2)
+ assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2)
+
+ def test_strided_float32(self):
+ np.random.seed(42)
+ strides = np.array([-4,-3,-2,-1,1,2,3,4])
+ sizes = np.arange(2,100)
+ for ii in sizes:
+ x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii))
+ x_f32_large = x_f32.copy()
+ x_f32_large[3:-1:4] = 120000.0
+ exp_true = np.exp(x_f32)
+ log_true = np.log(x_f32)
+ sin_true = np.sin(x_f32_large)
+ cos_true = np.cos(x_f32_large)
+ for jj in strides:
+ assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2)
+
+class TestLogAddExp(_FilterInvalids):
+ def test_logaddexp_values(self):
+ x = [1, 2, 3, 4, 5]
+ y = [5, 4, 3, 2, 1]
+ z = [6, 6, 6, 6, 6]
+ for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
+ xf = np.log(np.array(x, dtype=dt))
+ yf = np.log(np.array(y, dtype=dt))
+ zf = np.log(np.array(z, dtype=dt))
+ assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
+
+ def test_logaddexp_range(self):
+ x = [1000000, -1000000, 1000200, -1000200]
+ y = [1000200, -1000200, 1000000, -1000000]
+ z = [1000200, -1000000, 1000200, -1000000]
+ for dt in ['f', 'd', 'g']:
+ logxf = np.array(x, dtype=dt)
+ logyf = np.array(y, dtype=dt)
+ logzf = np.array(z, dtype=dt)
+ assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
+
+ def test_inf(self):
+ inf = np.inf
+ x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
+ y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
+ z = [inf, inf, inf, -inf, inf, inf, 1, 1]
+ with np.errstate(invalid='raise'):
+ for dt in ['f', 'd', 'g']:
+ logxf = np.array(x, dtype=dt)
+ logyf = np.array(y, dtype=dt)
+ logzf = np.array(z, dtype=dt)
+ assert_equal(np.logaddexp(logxf, logyf), logzf)
+
+ def test_nan(self):
+ assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
+ assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
+ assert_(np.isnan(np.logaddexp(np.nan, 0)))
+ assert_(np.isnan(np.logaddexp(0, np.nan)))
+ assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
+
+ def test_reduce(self):
+ assert_equal(np.logaddexp.identity, -np.inf)
+ assert_equal(np.logaddexp.reduce([]), -np.inf)
+
+
+class TestLog1p:
+ def test_log1p(self):
+ assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
+ assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
+
+ def test_special(self):
+ with np.errstate(invalid="ignore", divide="ignore"):
+ assert_equal(ncu.log1p(np.nan), np.nan)
+ assert_equal(ncu.log1p(np.inf), np.inf)
+ assert_equal(ncu.log1p(-1.), -np.inf)
+ assert_equal(ncu.log1p(-2.), np.nan)
+ assert_equal(ncu.log1p(-np.inf), np.nan)
+
+
+class TestExpm1:
+ def test_expm1(self):
+ assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
+ assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
+
+ def test_special(self):
+ assert_equal(ncu.expm1(np.inf), np.inf)
+ assert_equal(ncu.expm1(0.), 0.)
+ assert_equal(ncu.expm1(-0.), -0.)
+ assert_equal(ncu.expm1(np.inf), np.inf)
+ assert_equal(ncu.expm1(-np.inf), -1.)
+
+ def test_complex(self):
+ x = np.asarray(1e-12)
+ assert_allclose(x, ncu.expm1(x))
+ x = x.astype(np.complex128)
+ assert_allclose(x, ncu.expm1(x))
+
+
+class TestHypot:
+ def test_simple(self):
+ assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
+ assert_almost_equal(ncu.hypot(0, 0), 0)
+
+ def test_reduce(self):
+ assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0)
+ assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0)
+ assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0)
+ assert_equal(ncu.hypot.reduce([]), 0.0)
+
+
+def assert_hypot_isnan(x, y):
+ with np.errstate(invalid='ignore'):
+ assert_(np.isnan(ncu.hypot(x, y)),
+ "hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
+
+
+def assert_hypot_isinf(x, y):
+ with np.errstate(invalid='ignore'):
+ assert_(np.isinf(ncu.hypot(x, y)),
+ "hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
+
+
+class TestHypotSpecialValues:
+ def test_nan_outputs(self):
+ assert_hypot_isnan(np.nan, np.nan)
+ assert_hypot_isnan(np.nan, 1)
+
+ def test_nan_outputs2(self):
+ assert_hypot_isinf(np.nan, np.inf)
+ assert_hypot_isinf(np.inf, np.nan)
+ assert_hypot_isinf(np.inf, 0)
+ assert_hypot_isinf(0, np.inf)
+ assert_hypot_isinf(np.inf, np.inf)
+ assert_hypot_isinf(np.inf, 23.0)
+
+ def test_no_fpe(self):
+ assert_no_warnings(ncu.hypot, np.inf, 0)
+
+
+def assert_arctan2_isnan(x, y):
+ assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y)))
+
+
+def assert_arctan2_ispinf(x, y):
+ assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)))
+
+
+def assert_arctan2_isninf(x, y):
+ assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)))
+
+
+def assert_arctan2_ispzero(x, y):
+ assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)))
+
+
+def assert_arctan2_isnzero(x, y):
+ assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
+
+
+class TestArctan2SpecialValues:
+ def test_one_one(self):
+ # atan2(1, 1) returns pi/4.
+ assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
+ assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
+ assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
+
+ def test_zero_nzero(self):
+ # atan2(+-0, -0) returns +-pi.
+ assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)
+ assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)
+
+ def test_zero_pzero(self):
+ # atan2(+-0, +0) returns +-0.
+ assert_arctan2_ispzero(np.PZERO, np.PZERO)
+ assert_arctan2_isnzero(np.NZERO, np.PZERO)
+
+ def test_zero_negative(self):
+ # atan2(+-0, x) returns +-pi for x < 0.
+ assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)
+ assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)
+
+ def test_zero_positive(self):
+ # atan2(+-0, x) returns +-0 for x > 0.
+ assert_arctan2_ispzero(np.PZERO, 1)
+ assert_arctan2_isnzero(np.NZERO, 1)
+
+ def test_positive_zero(self):
+ # atan2(y, +-0) returns +pi/2 for y > 0.
+ assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)
+ assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)
+
+ def test_negative_zero(self):
+ # atan2(y, +-0) returns -pi/2 for y < 0.
+ assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)
+ assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)
+
+ def test_any_ninf(self):
+ # atan2(+-y, -infinity) returns +-pi for finite y > 0.
+ assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)
+ assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
+
+ def test_any_pinf(self):
+ # atan2(+-y, +infinity) returns +-0 for finite y > 0.
+ assert_arctan2_ispzero(1, np.inf)
+ assert_arctan2_isnzero(-1, np.inf)
+
+ def test_inf_any(self):
+ # atan2(+-infinity, x) returns +-pi/2 for finite x.
+ assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
+ assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
+
+ def test_inf_ninf(self):
+ # atan2(+-infinity, -infinity) returns +-3*pi/4.
+ assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
+ assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
+
+ def test_inf_pinf(self):
+ # atan2(+-infinity, +infinity) returns +-pi/4.
+ assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
+ assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
+
+ def test_nan_any(self):
+ # atan2(nan, x) returns nan for any x, including inf
+ assert_arctan2_isnan(np.nan, np.inf)
+ assert_arctan2_isnan(np.inf, np.nan)
+ assert_arctan2_isnan(np.nan, np.nan)
+
+
+class TestLdexp:
+ def _check_ldexp(self, tp):
+ assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
+ np.array(3, tp)), 16.)
+ assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
+ np.array(3, tp)), 16.)
+ assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
+ np.array(3, tp)), 16.)
+
+ def test_ldexp(self):
+ # The default Python int type should work
+ assert_almost_equal(ncu.ldexp(2., 3), 16.)
+ # The following int types should all be accepted
+ self._check_ldexp(np.int8)
+ self._check_ldexp(np.int16)
+ self._check_ldexp(np.int32)
+ self._check_ldexp('i')
+ self._check_ldexp('l')
+
+ def test_ldexp_overflow(self):
+ # silence warning emitted on overflow
+ with np.errstate(over="ignore"):
+ imax = np.iinfo(np.dtype('l')).max
+ imin = np.iinfo(np.dtype('l')).min
+ assert_equal(ncu.ldexp(2., imax), np.inf)
+ assert_equal(ncu.ldexp(2., imin), 0)
+
+
+class TestMaximum(_FilterInvalids):
+ def test_reduce(self):
+ dflt = np.typecodes['AllFloat']
+ dint = np.typecodes['AllInteger']
+ seq1 = np.arange(11)
+ seq2 = seq1[::-1]
+ func = np.maximum.reduce
+ for dt in dint:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 10)
+ assert_equal(func(tmp2), 10)
+ for dt in dflt:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 10)
+ assert_equal(func(tmp2), 10)
+ tmp1[::2] = np.nan
+ tmp2[::2] = np.nan
+ assert_equal(func(tmp1), np.nan)
+ assert_equal(func(tmp2), np.nan)
+
+ def test_reduce_complex(self):
+ assert_equal(np.maximum.reduce([1, 2j]), 1)
+ assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j)
+
+ def test_float_nans(self):
+ nan = np.nan
+ arg1 = np.array([0, nan, nan])
+ arg2 = np.array([nan, 0, nan])
+ out = np.array([nan, nan, nan])
+ assert_equal(np.maximum(arg1, arg2), out)
+
+ def test_object_nans(self):
+ # Multiple checks to give this a chance to
+ # fail if cmp is used instead of rich compare.
+ # Failure cannot be guaranteed.
+ for i in range(1):
+ x = np.array(float('nan'), object)
+ y = 1.0
+ z = np.array(float('nan'), object)
+ assert_(np.maximum(x, y) == 1.0)
+ assert_(np.maximum(z, y) == 1.0)
+
+ def test_complex_nans(self):
+ nan = np.nan
+ for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([nan, nan, nan], dtype=complex)
+ assert_equal(np.maximum(arg1, arg2), out)
+
+ def test_object_array(self):
+ arg1 = np.arange(5, dtype=object)
+ arg2 = arg1 + 1
+ assert_equal(np.maximum(arg1, arg2), arg2)
+
+ def test_strided_array(self):
+ arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf])
+ arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0])
+ maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0])
+ out = np.ones(8)
+ out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0])
+ assert_equal(np.maximum(arr1,arr2), maxtrue)
+ assert_equal(np.maximum(arr1[::2],arr2[::2]), maxtrue[::2])
+ assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0]))
+ assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan]))
+ assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan]))
+ assert_equal(out, out_maxtrue)
+
+ def test_precision(self):
+ dtypes = [np.float16, np.float32, np.float64, np.longdouble]
+
+ for dt in dtypes:
+ dtmin = np.finfo(dt).min
+ dtmax = np.finfo(dt).max
+ d1 = dt(0.1)
+ d1_next = np.nextafter(d1, np.inf)
+
+ test_cases = [
+ # v1 v2 expected
+ (dtmin, -np.inf, dtmin),
+ (dtmax, -np.inf, dtmax),
+ (d1, d1_next, d1_next),
+ (dtmax, np.nan, np.nan),
+ ]
+
+ for v1, v2, expected in test_cases:
+ assert_equal(np.maximum([v1], [v2]), [expected])
+ assert_equal(np.maximum.reduce([v1, v2]), expected)
+
+
+class TestMinimum(_FilterInvalids):
+ def test_reduce(self):
+ dflt = np.typecodes['AllFloat']
+ dint = np.typecodes['AllInteger']
+ seq1 = np.arange(11)
+ seq2 = seq1[::-1]
+ func = np.minimum.reduce
+ for dt in dint:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 0)
+ assert_equal(func(tmp2), 0)
+ for dt in dflt:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 0)
+ assert_equal(func(tmp2), 0)
+ tmp1[::2] = np.nan
+ tmp2[::2] = np.nan
+ assert_equal(func(tmp1), np.nan)
+ assert_equal(func(tmp2), np.nan)
+
+ def test_reduce_complex(self):
+ assert_equal(np.minimum.reduce([1, 2j]), 2j)
+ assert_equal(np.minimum.reduce([1+3j, 2j]), 2j)
+
+ def test_float_nans(self):
+ nan = np.nan
+ arg1 = np.array([0, nan, nan])
+ arg2 = np.array([nan, 0, nan])
+ out = np.array([nan, nan, nan])
+ assert_equal(np.minimum(arg1, arg2), out)
+
+ def test_object_nans(self):
+ # Multiple checks to give this a chance to
+ # fail if cmp is used instead of rich compare.
+ # Failure cannot be guaranteed.
+ for i in range(1):
+ x = np.array(float('nan'), object)
+ y = 1.0
+ z = np.array(float('nan'), object)
+ assert_(np.minimum(x, y) == 1.0)
+ assert_(np.minimum(z, y) == 1.0)
+
+ def test_complex_nans(self):
+ nan = np.nan
+ for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([nan, nan, nan], dtype=complex)
+ assert_equal(np.minimum(arg1, arg2), out)
+
+ def test_object_array(self):
+ arg1 = np.arange(5, dtype=object)
+ arg2 = arg1 + 1
+ assert_equal(np.minimum(arg1, arg2), arg1)
+
+ def test_strided_array(self):
+ arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf])
+ arr2 = np.array([-2.0,-1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0])
+ mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf])
+ out = np.ones(8)
+ out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0])
+ assert_equal(np.minimum(arr1,arr2), mintrue)
+ assert_equal(np.minimum(arr1[::2],arr2[::2]), mintrue[::2])
+ assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0]))
+ assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan]))
+ assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan]))
+ assert_equal(out, out_mintrue)
+
+ def test_precision(self):
+ dtypes = [np.float16, np.float32, np.float64, np.longdouble]
+
+ for dt in dtypes:
+ dtmin = np.finfo(dt).min
+ dtmax = np.finfo(dt).max
+ d1 = dt(0.1)
+ d1_next = np.nextafter(d1, np.inf)
+
+ test_cases = [
+ # v1 v2 expected
+ (dtmin, np.inf, dtmin),
+ (dtmax, np.inf, dtmax),
+ (d1, d1_next, d1),
+ (dtmin, np.nan, np.nan),
+ ]
+
+ for v1, v2, expected in test_cases:
+ assert_equal(np.minimum([v1], [v2]), [expected])
+ assert_equal(np.minimum.reduce([v1, v2]), expected)
+
+
+class TestFmax(_FilterInvalids):
+ def test_reduce(self):
+ dflt = np.typecodes['AllFloat']
+ dint = np.typecodes['AllInteger']
+ seq1 = np.arange(11)
+ seq2 = seq1[::-1]
+ func = np.fmax.reduce
+ for dt in dint:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 10)
+ assert_equal(func(tmp2), 10)
+ for dt in dflt:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 10)
+ assert_equal(func(tmp2), 10)
+ tmp1[::2] = np.nan
+ tmp2[::2] = np.nan
+ assert_equal(func(tmp1), 9)
+ assert_equal(func(tmp2), 9)
+
+ def test_reduce_complex(self):
+ assert_equal(np.fmax.reduce([1, 2j]), 1)
+ assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j)
+
+ def test_float_nans(self):
+ nan = np.nan
+ arg1 = np.array([0, nan, nan])
+ arg2 = np.array([nan, 0, nan])
+ out = np.array([0, 0, nan])
+ assert_equal(np.fmax(arg1, arg2), out)
+
+ def test_complex_nans(self):
+ nan = np.nan
+ for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([0, 0, nan], dtype=complex)
+ assert_equal(np.fmax(arg1, arg2), out)
+
+ def test_precision(self):
+ dtypes = [np.float16, np.float32, np.float64, np.longdouble]
+
+ for dt in dtypes:
+ dtmin = np.finfo(dt).min
+ dtmax = np.finfo(dt).max
+ d1 = dt(0.1)
+ d1_next = np.nextafter(d1, np.inf)
+
+ test_cases = [
+ # v1 v2 expected
+ (dtmin, -np.inf, dtmin),
+ (dtmax, -np.inf, dtmax),
+ (d1, d1_next, d1_next),
+ (dtmax, np.nan, dtmax),
+ ]
+
+ for v1, v2, expected in test_cases:
+ assert_equal(np.fmax([v1], [v2]), [expected])
+ assert_equal(np.fmax.reduce([v1, v2]), expected)
+
+
+class TestFmin(_FilterInvalids):
+ def test_reduce(self):
+ dflt = np.typecodes['AllFloat']
+ dint = np.typecodes['AllInteger']
+ seq1 = np.arange(11)
+ seq2 = seq1[::-1]
+ func = np.fmin.reduce
+ for dt in dint:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 0)
+ assert_equal(func(tmp2), 0)
+ for dt in dflt:
+ tmp1 = seq1.astype(dt)
+ tmp2 = seq2.astype(dt)
+ assert_equal(func(tmp1), 0)
+ assert_equal(func(tmp2), 0)
+ tmp1[::2] = np.nan
+ tmp2[::2] = np.nan
+ assert_equal(func(tmp1), 1)
+ assert_equal(func(tmp2), 1)
+
+ def test_reduce_complex(self):
+ assert_equal(np.fmin.reduce([1, 2j]), 2j)
+ assert_equal(np.fmin.reduce([1+3j, 2j]), 2j)
+
+ def test_float_nans(self):
+ nan = np.nan
+ arg1 = np.array([0, nan, nan])
+ arg2 = np.array([nan, 0, nan])
+ out = np.array([0, 0, nan])
+ assert_equal(np.fmin(arg1, arg2), out)
+
+ def test_complex_nans(self):
+ nan = np.nan
+ for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([0, 0, nan], dtype=complex)
+ assert_equal(np.fmin(arg1, arg2), out)
+
+ def test_precision(self):
+ dtypes = [np.float16, np.float32, np.float64, np.longdouble]
+
+ for dt in dtypes:
+ dtmin = np.finfo(dt).min
+ dtmax = np.finfo(dt).max
+ d1 = dt(0.1)
+ d1_next = np.nextafter(d1, np.inf)
+
+ test_cases = [
+ # v1 v2 expected
+ (dtmin, np.inf, dtmin),
+ (dtmax, np.inf, dtmax),
+ (d1, d1_next, d1),
+ (dtmin, np.nan, dtmin),
+ ]
+
+ for v1, v2, expected in test_cases:
+ assert_equal(np.fmin([v1], [v2]), [expected])
+ assert_equal(np.fmin.reduce([v1, v2]), expected)
+
+
+class TestBool:
+ def test_exceptions(self):
+ a = np.ones(1, dtype=np.bool_)
+ assert_raises(TypeError, np.negative, a)
+ assert_raises(TypeError, np.positive, a)
+ assert_raises(TypeError, np.subtract, a, a)
+
+ def test_truth_table_logical(self):
+ # 2, 3 and 4 serves as true values
+ input1 = [0, 0, 3, 2]
+ input2 = [0, 4, 0, 2]
+
+ typecodes = (np.typecodes['AllFloat']
+ + np.typecodes['AllInteger']
+ + '?') # boolean
+ for dtype in map(np.dtype, typecodes):
+ arg1 = np.asarray(input1, dtype=dtype)
+ arg2 = np.asarray(input2, dtype=dtype)
+
+ # OR
+ out = [False, True, True, True]
+ for func in (np.logical_or, np.maximum):
+ assert_equal(func(arg1, arg2).astype(bool), out)
+ # AND
+ out = [False, False, False, True]
+ for func in (np.logical_and, np.minimum):
+ assert_equal(func(arg1, arg2).astype(bool), out)
+ # XOR
+ out = [False, True, True, False]
+ for func in (np.logical_xor, np.not_equal):
+ assert_equal(func(arg1, arg2).astype(bool), out)
+
+ def test_truth_table_bitwise(self):
+ arg1 = [False, False, True, True]
+ arg2 = [False, True, False, True]
+
+ out = [False, True, True, True]
+ assert_equal(np.bitwise_or(arg1, arg2), out)
+
+ out = [False, False, False, True]
+ assert_equal(np.bitwise_and(arg1, arg2), out)
+
+ out = [False, True, True, False]
+ assert_equal(np.bitwise_xor(arg1, arg2), out)
+
+ def test_reduce(self):
+ none = np.array([0, 0, 0, 0], bool)
+ some = np.array([1, 0, 1, 1], bool)
+ every = np.array([1, 1, 1, 1], bool)
+ empty = np.array([], bool)
+
+ arrs = [none, some, every, empty]
+
+ for arr in arrs:
+ assert_equal(np.logical_and.reduce(arr), all(arr))
+
+ for arr in arrs:
+ assert_equal(np.logical_or.reduce(arr), any(arr))
+
+ for arr in arrs:
+ assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1)
+
+
+class TestBitwiseUFuncs:
+
+ bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O']
+
+ def test_values(self):
+ for dt in self.bitwise_types:
+ zeros = np.array([0], dtype=dt)
+ ones = np.array([-1]).astype(dt)
+ msg = "dt = '%s'" % dt.char
+
+ assert_equal(np.bitwise_not(zeros), ones, err_msg=msg)
+ assert_equal(np.bitwise_not(ones), zeros, err_msg=msg)
+
+ assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg)
+ assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg)
+ assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg)
+ assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg)
+
+ assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg)
+ assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg)
+ assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg)
+ assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg)
+
+ assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg)
+ assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg)
+ assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg)
+ assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg)
+
+ def test_types(self):
+ for dt in self.bitwise_types:
+ zeros = np.array([0], dtype=dt)
+ ones = np.array([-1]).astype(dt)
+ msg = "dt = '%s'" % dt.char
+
+ assert_(np.bitwise_not(zeros).dtype == dt, msg)
+ assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg)
+ assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg)
+ assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg)
+
+ def test_identity(self):
+ assert_(np.bitwise_or.identity == 0, 'bitwise_or')
+ assert_(np.bitwise_xor.identity == 0, 'bitwise_xor')
+ assert_(np.bitwise_and.identity == -1, 'bitwise_and')
+
+ def test_reduction(self):
+ binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and)
+
+ for dt in self.bitwise_types:
+ zeros = np.array([0], dtype=dt)
+ ones = np.array([-1]).astype(dt)
+ for f in binary_funcs:
+ msg = "dt: '%s', f: '%s'" % (dt, f)
+ assert_equal(f.reduce(zeros), zeros, err_msg=msg)
+ assert_equal(f.reduce(ones), ones, err_msg=msg)
+
+ # Test empty reduction, no object dtype
+ for dt in self.bitwise_types[:-1]:
+ # No object array types
+ empty = np.array([], dtype=dt)
+ for f in binary_funcs:
+ msg = "dt: '%s', f: '%s'" % (dt, f)
+ tgt = np.array(f.identity).astype(dt)
+ res = f.reduce(empty)
+ assert_equal(res, tgt, err_msg=msg)
+ assert_(res.dtype == tgt.dtype, msg)
+
+ # Empty object arrays use the identity. Note that the types may
+ # differ, the actual type used is determined by the assign_identity
+ # function and is not the same as the type returned by the identity
+ # method.
+ for f in binary_funcs:
+ msg = "dt: '%s'" % (f,)
+ empty = np.array([], dtype=object)
+ tgt = f.identity
+ res = f.reduce(empty)
+ assert_equal(res, tgt, err_msg=msg)
+
+ # Non-empty object arrays do not use the identity
+ for f in binary_funcs:
+ msg = "dt: '%s'" % (f,)
+ btype = np.array([True], dtype=object)
+ assert_(type(f.reduce(btype)) is bool, msg)
+
+
+class TestInt:
+ def test_logical_not(self):
+ x = np.ones(10, dtype=np.int16)
+ o = np.ones(10 * 2, dtype=bool)
+ tgt = o.copy()
+ tgt[::2] = False
+ os = o[::2]
+ assert_array_equal(np.logical_not(x, out=os), False)
+ assert_array_equal(o, tgt)
+
+
+class TestFloatingPoint:
+ def test_floating_point(self):
+ assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
+
+
+class TestDegrees:
+ def test_degrees(self):
+ assert_almost_equal(ncu.degrees(np.pi), 180.0)
+ assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
+
+
+class TestRadians:
+ def test_radians(self):
+ assert_almost_equal(ncu.radians(180.0), np.pi)
+ assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
+
+
+class TestHeavside:
+ def test_heaviside(self):
+ x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]])
+ expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]])
+ expected1 = expectedhalf.copy()
+ expected1[0, 2] = 1
+
+ h = ncu.heaviside(x, 0.5)
+ assert_equal(h, expectedhalf)
+
+ h = ncu.heaviside(x, 1.0)
+ assert_equal(h, expected1)
+
+ x = x.astype(np.float32)
+
+ h = ncu.heaviside(x, np.float32(0.5))
+ assert_equal(h, expectedhalf.astype(np.float32))
+
+ h = ncu.heaviside(x, np.float32(1.0))
+ assert_equal(h, expected1.astype(np.float32))
+
+
+class TestSign:
+ def test_sign(self):
+ a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
+ out = np.zeros(a.shape)
+ tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
+
+ with np.errstate(invalid='ignore'):
+ res = ncu.sign(a)
+ assert_equal(res, tgt)
+ res = ncu.sign(a, out)
+ assert_equal(res, tgt)
+ assert_equal(out, tgt)
+
+ def test_sign_dtype_object(self):
+ # In reference to github issue #6229
+
+ foo = np.array([-.1, 0, .1])
+ a = np.sign(foo.astype(object))
+ b = np.sign(foo)
+
+ assert_array_equal(a, b)
+
+ def test_sign_dtype_nan_object(self):
+ # In reference to github issue #6229
+ def test_nan():
+ foo = np.array([np.nan])
+ # FIXME: a not used
+ a = np.sign(foo.astype(object))
+
+ assert_raises(TypeError, test_nan)
+
+class TestMinMax:
+ def test_minmax_blocked(self):
+ # simd tests on max/min, test all alignments, slow but important
+ # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
+ for dt, sz in [(np.float32, 15), (np.float64, 7)]:
+ for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
+ max_size=sz):
+ for i in range(inp.size):
+ inp[:] = np.arange(inp.size, dtype=dt)
+ inp[i] = np.nan
+ emsg = lambda: '%r\n%s' % (inp, msg)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ assert_(np.isnan(inp.max()), msg=emsg)
+ assert_(np.isnan(inp.min()), msg=emsg)
+
+ inp[i] = 1e10
+ assert_equal(inp.max(), 1e10, err_msg=msg)
+ inp[i] = -1e10
+ assert_equal(inp.min(), -1e10, err_msg=msg)
+
+ def test_lower_align(self):
+ # check data that is not aligned to element size
+ # i.e doubles are aligned to 4 bytes on i386
+ d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
+ assert_equal(d.max(), d[0])
+ assert_equal(d.min(), d[0])
+
+ def test_reduce_reorder(self):
+ # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
+ # and put it before the call to an intrisic function that causes
+ # invalid status to be set. Also make sure warnings are not emitted
+ for n in (2, 4, 8, 16, 32):
+ for dt in (np.float32, np.float16, np.complex64):
+ for r in np.diagflat(np.array([np.nan] * n, dtype=dt)):
+ assert_equal(np.min(r), np.nan)
+
+ def test_minimize_no_warns(self):
+ a = np.minimum(np.nan, 1)
+ assert_equal(a, np.nan)
+
+
+class TestAbsoluteNegative:
+ def test_abs_neg_blocked(self):
+ # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
+ for dt, sz in [(np.float32, 11), (np.float64, 5)]:
+ for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
+ max_size=sz):
+ tgt = [ncu.absolute(i) for i in inp]
+ np.absolute(inp, out=out)
+ assert_equal(out, tgt, err_msg=msg)
+ assert_((out >= 0).all())
+
+ tgt = [-1*(i) for i in inp]
+ np.negative(inp, out=out)
+ assert_equal(out, tgt, err_msg=msg)
+
+ for v in [np.nan, -np.inf, np.inf]:
+ for i in range(inp.size):
+ d = np.arange(inp.size, dtype=dt)
+ inp[:] = -d
+ inp[i] = v
+ d[i] = -v if v == -np.inf else v
+ assert_array_equal(np.abs(inp), d, err_msg=msg)
+ np.abs(inp, out=out)
+ assert_array_equal(out, d, err_msg=msg)
+
+ assert_array_equal(-inp, -1*inp, err_msg=msg)
+ d = -1 * inp
+ np.negative(inp, out=out)
+ assert_array_equal(out, d, err_msg=msg)
+
+ def test_lower_align(self):
+ # check data that is not aligned to element size
+ # i.e doubles are aligned to 4 bytes on i386
+ d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
+ assert_equal(np.abs(d), d)
+ assert_equal(np.negative(d), -d)
+ np.negative(d, out=d)
+ np.negative(np.ones_like(d), out=d)
+ np.abs(d, out=d)
+ np.abs(np.ones_like(d), out=d)
+
+
+class TestPositive:
+ def test_valid(self):
+ valid_dtypes = [int, float, complex, object]
+ for dtype in valid_dtypes:
+ x = np.arange(5, dtype=dtype)
+ result = np.positive(x)
+ assert_equal(x, result, err_msg=str(dtype))
+
+ def test_invalid(self):
+ with assert_raises(TypeError):
+ np.positive(True)
+ with assert_raises(TypeError):
+ np.positive(np.datetime64('2000-01-01'))
+ with assert_raises(TypeError):
+ np.positive(np.array(['foo'], dtype=str))
+ with assert_raises(TypeError):
+ np.positive(np.array(['bar'], dtype=object))
+
+
+class TestSpecialMethods:
+ def test_wrap(self):
+
+ class with_wrap:
+ def __array__(self):
+ return np.zeros(1)
+
+ def __array_wrap__(self, arr, context):
+ r = with_wrap()
+ r.arr = arr
+ r.context = context
+ return r
+
+ a = with_wrap()
+ x = ncu.minimum(a, a)
+ assert_equal(x.arr, np.zeros(1))
+ func, args, i = x.context
+ assert_(func is ncu.minimum)
+ assert_equal(len(args), 2)
+ assert_equal(args[0], a)
+ assert_equal(args[1], a)
+ assert_equal(i, 0)
+
+ def test_wrap_and_prepare_out(self):
+ # Calling convention for out should not affect how special methods are
+ # called
+
+ class StoreArrayPrepareWrap(np.ndarray):
+ _wrap_args = None
+ _prepare_args = None
+ def __new__(cls):
+ return np.zeros(()).view(cls)
+ def __array_wrap__(self, obj, context):
+ self._wrap_args = context[1]
+ return obj
+ def __array_prepare__(self, obj, context):
+ self._prepare_args = context[1]
+ return obj
+ @property
+ def args(self):
+ # We need to ensure these are fetched at the same time, before
+ # any other ufuncs are called by the assertions
+ return (self._prepare_args, self._wrap_args)
+ def __repr__(self):
+ return "a" # for short test output
+
+ def do_test(f_call, f_expected):
+ a = StoreArrayPrepareWrap()
+ f_call(a)
+ p, w = a.args
+ expected = f_expected(a)
+ try:
+ assert_equal(p, expected)
+ assert_equal(w, expected)
+ except AssertionError as e:
+ # assert_equal produces truly useless error messages
+ raise AssertionError("\n".join([
+ "Bad arguments passed in ufunc call",
+ " expected: {}".format(expected),
+ " __array_prepare__ got: {}".format(p),
+ " __array_wrap__ got: {}".format(w)
+ ]))
+
+ # method not on the out argument
+ do_test(lambda a: np.add(a, 0), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0))
+ do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0))
+
+ # method on the out argument
+ do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a))
+ do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a))
+ do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a))
+
+ # Also check the where mask handling:
+ do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0))
+ do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a))
+
+ def test_wrap_with_iterable(self):
+ # test fix for bug #1026:
+
+ class with_wrap(np.ndarray):
+ __array_priority__ = 10
+
+ def __new__(cls):
+ return np.asarray(1).view(cls).copy()
+
+ def __array_wrap__(self, arr, context):
+ return arr.view(type(self))
+
+ a = with_wrap()
+ x = ncu.multiply(a, (1, 2, 3))
+ assert_(isinstance(x, with_wrap))
+ assert_array_equal(x, np.array((1, 2, 3)))
+
+ def test_priority_with_scalar(self):
+ # test fix for bug #826:
+
+ class A(np.ndarray):
+ __array_priority__ = 10
+
+ def __new__(cls):
+ return np.asarray(1.0, 'float64').view(cls).copy()
+
+ a = A()
+ x = np.float64(1)*a
+ assert_(isinstance(x, A))
+ assert_array_equal(x, np.array(1))
+
+ def test_old_wrap(self):
+
+ class with_wrap:
+ def __array__(self):
+ return np.zeros(1)
+
+ def __array_wrap__(self, arr):
+ r = with_wrap()
+ r.arr = arr
+ return r
+
+ a = with_wrap()
+ x = ncu.minimum(a, a)
+ assert_equal(x.arr, np.zeros(1))
+
+ def test_priority(self):
+
+ class A:
+ def __array__(self):
+ return np.zeros(1)
+
+ def __array_wrap__(self, arr, context):
+ r = type(self)()
+ r.arr = arr
+ r.context = context
+ return r
+
+ class B(A):
+ __array_priority__ = 20.
+
+ class C(A):
+ __array_priority__ = 40.
+
+ x = np.zeros(1)
+ a = A()
+ b = B()
+ c = C()
+ f = ncu.minimum
+ assert_(type(f(x, x)) is np.ndarray)
+ assert_(type(f(x, a)) is A)
+ assert_(type(f(x, b)) is B)
+ assert_(type(f(x, c)) is C)
+ assert_(type(f(a, x)) is A)
+ assert_(type(f(b, x)) is B)
+ assert_(type(f(c, x)) is C)
+
+ assert_(type(f(a, a)) is A)
+ assert_(type(f(a, b)) is B)
+ assert_(type(f(b, a)) is B)
+ assert_(type(f(b, b)) is B)
+ assert_(type(f(b, c)) is C)
+ assert_(type(f(c, b)) is C)
+ assert_(type(f(c, c)) is C)
+
+ assert_(type(ncu.exp(a) is A))
+ assert_(type(ncu.exp(b) is B))
+ assert_(type(ncu.exp(c) is C))
+
+ def test_failing_wrap(self):
+
+ class A:
+ def __array__(self):
+ return np.zeros(2)
+
+ def __array_wrap__(self, arr, context):
+ raise RuntimeError
+
+ a = A()
+ assert_raises(RuntimeError, ncu.maximum, a, a)
+ assert_raises(RuntimeError, ncu.maximum.reduce, a)
+
+ def test_failing_out_wrap(self):
+
+ singleton = np.array([1.0])
+
+ class Ok(np.ndarray):
+ def __array_wrap__(self, obj):
+ return singleton
+
+ class Bad(np.ndarray):
+ def __array_wrap__(self, obj):
+ raise RuntimeError
+
+ ok = np.empty(1).view(Ok)
+ bad = np.empty(1).view(Bad)
+ # double-free (segfault) of "ok" if "bad" raises an exception
+ for i in range(10):
+ assert_raises(RuntimeError, ncu.frexp, 1, ok, bad)
+
+ def test_none_wrap(self):
+ # Tests that issue #8507 is resolved. Previously, this would segfault
+
+ class A:
+ def __array__(self):
+ return np.zeros(1)
+
+ def __array_wrap__(self, arr, context=None):
+ return None
+
+ a = A()
+ assert_equal(ncu.maximum(a, a), None)
+
+ def test_default_prepare(self):
+
+ class with_wrap:
+ __array_priority__ = 10
+
+ def __array__(self):
+ return np.zeros(1)
+
+ def __array_wrap__(self, arr, context):
+ return arr
+
+ a = with_wrap()
+ x = ncu.minimum(a, a)
+ assert_equal(x, np.zeros(1))
+ assert_equal(type(x), np.ndarray)
+
+ @pytest.mark.parametrize("use_where", [True, False])
+ def test_prepare(self, use_where):
+
+ class with_prepare(np.ndarray):
+ __array_priority__ = 10
+
+ def __array_prepare__(self, arr, context):
+ # make sure we can return a new
+ return np.array(arr).view(type=with_prepare)
+
+ a = np.array(1).view(type=with_prepare)
+ if use_where:
+ x = np.add(a, a, where=np.array(True))
+ else:
+ x = np.add(a, a)
+ assert_equal(x, np.array(2))
+ assert_equal(type(x), with_prepare)
+
+ @pytest.mark.parametrize("use_where", [True, False])
+ def test_prepare_out(self, use_where):
+
+ class with_prepare(np.ndarray):
+ __array_priority__ = 10
+
+ def __array_prepare__(self, arr, context):
+ return np.array(arr).view(type=with_prepare)
+
+ a = np.array([1]).view(type=with_prepare)
+ if use_where:
+ x = np.add(a, a, a, where=[True])
+ else:
+ x = np.add(a, a, a)
+ # Returned array is new, because of the strange
+ # __array_prepare__ above
+ assert_(not np.shares_memory(x, a))
+ assert_equal(x, np.array([2]))
+ assert_equal(type(x), with_prepare)
+
+ def test_failing_prepare(self):
+
+ class A:
+ def __array__(self):
+ return np.zeros(1)
+
+ def __array_prepare__(self, arr, context=None):
+ raise RuntimeError
+
+ a = A()
+ assert_raises(RuntimeError, ncu.maximum, a, a)
+ assert_raises(RuntimeError, ncu.maximum, a, a, where=False)
+
+ def test_array_too_many_args(self):
+
+ class A:
+ def __array__(self, dtype, context):
+ return np.zeros(1)
+
+ a = A()
+ assert_raises_regex(TypeError, '2 required positional', np.sum, a)
+
+ def test_ufunc_override(self):
+ # check override works even with instance with high priority.
+ class A:
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ return self, func, method, inputs, kwargs
+
+ class MyNDArray(np.ndarray):
+ __array_priority__ = 100
+
+ a = A()
+ b = np.array([1]).view(MyNDArray)
+ res0 = np.multiply(a, b)
+ res1 = np.multiply(b, b, out=a)
+
+ # self
+ assert_equal(res0[0], a)
+ assert_equal(res1[0], a)
+ assert_equal(res0[1], np.multiply)
+ assert_equal(res1[1], np.multiply)
+ assert_equal(res0[2], '__call__')
+ assert_equal(res1[2], '__call__')
+ assert_equal(res0[3], (a, b))
+ assert_equal(res1[3], (b, b))
+ assert_equal(res0[4], {})
+ assert_equal(res1[4], {'out': (a,)})
+
+ def test_ufunc_override_mro(self):
+
+ # Some multi arg functions for testing.
+ def tres_mul(a, b, c):
+ return a * b * c
+
+ def quatro_mul(a, b, c, d):
+ return a * b * c * d
+
+ # Make these into ufuncs.
+ three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1)
+ four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1)
+
+ class A:
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ return "A"
+
+ class ASub(A):
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ return "ASub"
+
+ class B:
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ return "B"
+
+ class C:
+ def __init__(self):
+ self.count = 0
+
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ self.count += 1
+ return NotImplemented
+
+ class CSub(C):
+ def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ self.count += 1
+ return NotImplemented
+
+ a = A()
+ a_sub = ASub()
+ b = B()
+ c = C()
+
+ # Standard
+ res = np.multiply(a, a_sub)
+ assert_equal(res, "ASub")
+ res = np.multiply(a_sub, b)
+ assert_equal(res, "ASub")
+
+ # With 1 NotImplemented
+ res = np.multiply(c, a)
+ assert_equal(res, "A")
+ assert_equal(c.count, 1)
+ # Check our counter works, so we can trust tests below.
+ res = np.multiply(c, a)
+ assert_equal(c.count, 2)
+
+ # Both NotImplemented.
+ c = C()
+ c_sub = CSub()
+ assert_raises(TypeError, np.multiply, c, c_sub)
+ assert_equal(c.count, 1)
+ assert_equal(c_sub.count, 1)
+ c.count = c_sub.count = 0
+ assert_raises(TypeError, np.multiply, c_sub, c)
+ assert_equal(c.count, 1)
+ assert_equal(c_sub.count, 1)
+ c.count = 0
+ assert_raises(TypeError, np.multiply, c, c)
+ assert_equal(c.count, 1)
+ c.count = 0
+ assert_raises(TypeError, np.multiply, 2, c)
+ assert_equal(c.count, 1)
+
+ # Ternary testing.
+ assert_equal(three_mul_ufunc(a, 1, 2), "A")
+ assert_equal(three_mul_ufunc(1, a, 2), "A")
+ assert_equal(three_mul_ufunc(1, 2, a), "A")
+
+ assert_equal(three_mul_ufunc(a, a, 6), "A")
+ assert_equal(three_mul_ufunc(a, 2, a), "A")
+ assert_equal(three_mul_ufunc(a, 2, b), "A")
+ assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub")
+ assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub")
+ c.count = 0
+ assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub")
+ assert_equal(c.count, 1)
+ c.count = 0
+ assert_equal(three_mul_ufunc(1, a_sub, c), "ASub")
+ assert_equal(c.count, 0)
+
+ c.count = 0
+ assert_equal(three_mul_ufunc(a, b, c), "A")
+ assert_equal(c.count, 0)
+ c_sub.count = 0
+ assert_equal(three_mul_ufunc(a, b, c_sub), "A")
+ assert_equal(c_sub.count, 0)
+ assert_equal(three_mul_ufunc(1, 2, b), "B")
+
+ assert_raises(TypeError, three_mul_ufunc, 1, 2, c)
+ assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c)
+ assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3)
+
+ # Quaternary testing.
+ assert_equal(four_mul_ufunc(a, 1, 2, 3), "A")
+ assert_equal(four_mul_ufunc(1, a, 2, 3), "A")
+ assert_equal(four_mul_ufunc(1, 1, a, 3), "A")
+ assert_equal(four_mul_ufunc(1, 1, 2, a), "A")
+
+ assert_equal(four_mul_ufunc(a, b, 2, 3), "A")
+ assert_equal(four_mul_ufunc(1, a, 2, b), "A")
+ assert_equal(four_mul_ufunc(b, 1, a, 3), "B")
+ assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub")
+ assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub")
+
+ c = C()
+ c_sub = CSub()
+ assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c)
+ assert_equal(c.count, 1)
+ c.count = 0
+ assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c)
+ assert_equal(c_sub.count, 1)
+ assert_equal(c.count, 1)
+ c2 = C()
+ c.count = c_sub.count = 0
+ assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2)
+ assert_equal(c_sub.count, 1)
+ assert_equal(c.count, 1)
+ assert_equal(c2.count, 0)
+ c.count = c2.count = c_sub.count = 0
+ assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c)
+ assert_equal(c_sub.count, 1)
+ assert_equal(c.count, 0)
+ assert_equal(c2.count, 1)
+
+ def test_ufunc_override_methods(self):
+
+ class A:
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return self, ufunc, method, inputs, kwargs
+
+ # __call__
+ a = A()
+ with assert_raises(TypeError):
+ np.multiply.__call__(1, a, foo='bar', answer=42)
+ res = np.multiply.__call__(1, a, subok='bar', where=42)
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], '__call__')
+ assert_equal(res[3], (1, a))
+ assert_equal(res[4], {'subok': 'bar', 'where': 42})
+
+ # __call__, wrong args
+ assert_raises(TypeError, np.multiply, a)
+ assert_raises(TypeError, np.multiply, a, a, a, a)
+ assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a')
+ assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0])
+
+ # reduce, positional args
+ res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'reduce')
+ assert_equal(res[3], (a,))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'keepdims': 'keep0',
+ 'axis': 'axis0'})
+
+ # reduce, kwargs
+ res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',
+ keepdims='keep0', initial='init0',
+ where='where0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'reduce')
+ assert_equal(res[3], (a,))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'keepdims': 'keep0',
+ 'axis': 'axis0',
+ 'initial': 'init0',
+ 'where': 'where0'})
+
+ # reduce, output equal to None removed, but not other explicit ones,
+ # even if they are at their default value.
+ res = np.multiply.reduce(a, 0, None, None, False)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
+ res = np.multiply.reduce(a, out=None, axis=0, keepdims=True)
+ assert_equal(res[4], {'axis': 0, 'keepdims': True})
+ res = np.multiply.reduce(a, None, out=(None,), dtype=None)
+ assert_equal(res[4], {'axis': None, 'dtype': None})
+ res = np.multiply.reduce(a, 0, None, None, False, 2, True)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
+ 'initial': 2, 'where': True})
+ # np._NoValue ignored for initial
+ res = np.multiply.reduce(a, 0, None, None, False,
+ np._NoValue, True)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
+ 'where': True})
+ # None kept for initial, True for where.
+ res = np.multiply.reduce(a, 0, None, None, False, None, True)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
+ 'initial': None, 'where': True})
+
+ # reduce, wrong args
+ assert_raises(ValueError, np.multiply.reduce, a, out=())
+ assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1'))
+ assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0')
+
+ # accumulate, pos args
+ res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'accumulate')
+ assert_equal(res[3], (a,))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'axis': 'axis0'})
+
+ # accumulate, kwargs
+ res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0',
+ out='out0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'accumulate')
+ assert_equal(res[3], (a,))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'axis': 'axis0'})
+
+ # accumulate, output equal to None removed.
+ res = np.multiply.accumulate(a, 0, None, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None})
+ res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1')
+ assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'})
+ res = np.multiply.accumulate(a, None, out=(None,), dtype=None)
+ assert_equal(res[4], {'axis': None, 'dtype': None})
+
+ # accumulate, wrong args
+ assert_raises(ValueError, np.multiply.accumulate, a, out=())
+ assert_raises(ValueError, np.multiply.accumulate, a,
+ out=('out0', 'out1'))
+ assert_raises(TypeError, np.multiply.accumulate, a,
+ 'axis0', axis='axis0')
+
+ # reduceat, pos args
+ res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'reduceat')
+ assert_equal(res[3], (a, [4, 2]))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'axis': 'axis0'})
+
+ # reduceat, kwargs
+ res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0',
+ out='out0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'reduceat')
+ assert_equal(res[3], (a, [4, 2]))
+ assert_equal(res[4], {'dtype':'dtype0',
+ 'out': ('out0',),
+ 'axis': 'axis0'})
+
+ # reduceat, output equal to None removed.
+ res = np.multiply.reduceat(a, [4, 2], 0, None, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None})
+ res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt')
+ assert_equal(res[4], {'axis': None, 'dtype': 'dt'})
+ res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,))
+ assert_equal(res[4], {'axis': None, 'dtype': None})
+
+ # reduceat, wrong args
+ assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=())
+ assert_raises(ValueError, np.multiply.reduce, a, [4, 2],
+ out=('out0', 'out1'))
+ assert_raises(TypeError, np.multiply.reduce, a, [4, 2],
+ 'axis0', axis='axis0')
+
+ # outer
+ res = np.multiply.outer(a, 42)
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'outer')
+ assert_equal(res[3], (a, 42))
+ assert_equal(res[4], {})
+
+ # outer, wrong args
+ assert_raises(TypeError, np.multiply.outer, a)
+ assert_raises(TypeError, np.multiply.outer, a, a, a, a)
+ assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a')
+
+ # at
+ res = np.multiply.at(a, [4, 2], 'b0')
+ assert_equal(res[0], a)
+ assert_equal(res[1], np.multiply)
+ assert_equal(res[2], 'at')
+ assert_equal(res[3], (a, [4, 2], 'b0'))
+
+ # at, wrong args
+ assert_raises(TypeError, np.multiply.at, a)
+ assert_raises(TypeError, np.multiply.at, a, a, a, a)
+
+ def test_ufunc_override_out(self):
+
+ class A:
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return kwargs
+
+ class B:
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return kwargs
+
+ a = A()
+ b = B()
+ res0 = np.multiply(a, b, 'out_arg')
+ res1 = np.multiply(a, b, out='out_arg')
+ res2 = np.multiply(2, b, 'out_arg')
+ res3 = np.multiply(3, b, out='out_arg')
+ res4 = np.multiply(a, 4, 'out_arg')
+ res5 = np.multiply(a, 5, out='out_arg')
+
+ assert_equal(res0['out'][0], 'out_arg')
+ assert_equal(res1['out'][0], 'out_arg')
+ assert_equal(res2['out'][0], 'out_arg')
+ assert_equal(res3['out'][0], 'out_arg')
+ assert_equal(res4['out'][0], 'out_arg')
+ assert_equal(res5['out'][0], 'out_arg')
+
+ # ufuncs with multiple output modf and frexp.
+ res6 = np.modf(a, 'out0', 'out1')
+ res7 = np.frexp(a, 'out0', 'out1')
+ assert_equal(res6['out'][0], 'out0')
+ assert_equal(res6['out'][1], 'out1')
+ assert_equal(res7['out'][0], 'out0')
+ assert_equal(res7['out'][1], 'out1')
+
+ # While we're at it, check that default output is never passed on.
+ assert_(np.sin(a, None) == {})
+ assert_(np.sin(a, out=None) == {})
+ assert_(np.sin(a, out=(None,)) == {})
+ assert_(np.modf(a, None) == {})
+ assert_(np.modf(a, None, None) == {})
+ assert_(np.modf(a, out=(None, None)) == {})
+ with assert_raises(TypeError):
+ # Out argument must be tuple, since there are multiple outputs.
+ np.modf(a, out=None)
+
+ # don't give positional and output argument, or too many arguments.
+ # wrong number of arguments in the tuple is an error too.
+ assert_raises(TypeError, np.multiply, a, b, 'one', out='two')
+ assert_raises(TypeError, np.multiply, a, b, 'one', 'two')
+ assert_raises(ValueError, np.multiply, a, b, out=('one', 'two'))
+ assert_raises(TypeError, np.multiply, a, out=())
+ assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three'))
+ assert_raises(TypeError, np.modf, a, 'one', 'two', 'three')
+ assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three'))
+ assert_raises(ValueError, np.modf, a, out=('one',))
+
+ def test_ufunc_override_exception(self):
+
+ class A:
+ def __array_ufunc__(self, *a, **kwargs):
+ raise ValueError("oops")
+
+ a = A()
+ assert_raises(ValueError, np.negative, 1, out=a)
+ assert_raises(ValueError, np.negative, a)
+ assert_raises(ValueError, np.divide, 1., a)
+
+ def test_ufunc_override_not_implemented(self):
+
+ class A:
+ def __array_ufunc__(self, *args, **kwargs):
+ return NotImplemented
+
+ msg = ("operand type(s) all returned NotImplemented from "
+ "__array_ufunc__(<ufunc 'negative'>, '__call__', <*>): 'A'")
+ with assert_raises_regex(TypeError, fnmatch.translate(msg)):
+ np.negative(A())
+
+ msg = ("operand type(s) all returned NotImplemented from "
+ "__array_ufunc__(<ufunc 'add'>, '__call__', <*>, <object *>, "
+ "out=(1,)): 'A', 'object', 'int'")
+ with assert_raises_regex(TypeError, fnmatch.translate(msg)):
+ np.add(A(), object(), out=1)
+
+ def test_ufunc_override_disabled(self):
+
+ class OptOut:
+ __array_ufunc__ = None
+
+ opt_out = OptOut()
+
+ # ufuncs always raise
+ msg = "operand 'OptOut' does not support ufuncs"
+ with assert_raises_regex(TypeError, msg):
+ np.add(opt_out, 1)
+ with assert_raises_regex(TypeError, msg):
+ np.add(1, opt_out)
+ with assert_raises_regex(TypeError, msg):
+ np.negative(opt_out)
+
+ # opt-outs still hold even when other arguments have pathological
+ # __array_ufunc__ implementations
+
+ class GreedyArray:
+ def __array_ufunc__(self, *args, **kwargs):
+ return self
+
+ greedy = GreedyArray()
+ assert_(np.negative(greedy) is greedy)
+ with assert_raises_regex(TypeError, msg):
+ np.add(greedy, opt_out)
+ with assert_raises_regex(TypeError, msg):
+ np.add(greedy, 1, out=opt_out)
+
+ def test_gufunc_override(self):
+ # gufunc are just ufunc instances, but follow a different path,
+ # so check __array_ufunc__ overrides them properly.
+ class A:
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return self, ufunc, method, inputs, kwargs
+
+ inner1d = ncu_tests.inner1d
+ a = A()
+ res = inner1d(a, a)
+ assert_equal(res[0], a)
+ assert_equal(res[1], inner1d)
+ assert_equal(res[2], '__call__')
+ assert_equal(res[3], (a, a))
+ assert_equal(res[4], {})
+
+ res = inner1d(1, 1, out=a)
+ assert_equal(res[0], a)
+ assert_equal(res[1], inner1d)
+ assert_equal(res[2], '__call__')
+ assert_equal(res[3], (1, 1))
+ assert_equal(res[4], {'out': (a,)})
+
+ # wrong number of arguments in the tuple is an error too.
+ assert_raises(TypeError, inner1d, a, out='two')
+ assert_raises(TypeError, inner1d, a, a, 'one', out='two')
+ assert_raises(TypeError, inner1d, a, a, 'one', 'two')
+ assert_raises(ValueError, inner1d, a, a, out=('one', 'two'))
+ assert_raises(ValueError, inner1d, a, a, out=())
+
+ def test_ufunc_override_with_super(self):
+ # NOTE: this class is used in doc/source/user/basics.subclassing.rst
+ # if you make any changes here, do update it there too.
+ class A(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
+ args = []
+ in_no = []
+ for i, input_ in enumerate(inputs):
+ if isinstance(input_, A):
+ in_no.append(i)
+ args.append(input_.view(np.ndarray))
+ else:
+ args.append(input_)
+
+ outputs = out
+ out_no = []
+ if outputs:
+ out_args = []
+ for j, output in enumerate(outputs):
+ if isinstance(output, A):
+ out_no.append(j)
+ out_args.append(output.view(np.ndarray))
+ else:
+ out_args.append(output)
+ kwargs['out'] = tuple(out_args)
+ else:
+ outputs = (None,) * ufunc.nout
+
+ info = {}
+ if in_no:
+ info['inputs'] = in_no
+ if out_no:
+ info['outputs'] = out_no
+
+ results = super().__array_ufunc__(ufunc, method,
+ *args, **kwargs)
+ if results is NotImplemented:
+ return NotImplemented
+
+ if method == 'at':
+ if isinstance(inputs[0], A):
+ inputs[0].info = info
+ return
+
+ if ufunc.nout == 1:
+ results = (results,)
+
+ results = tuple((np.asarray(result).view(A)
+ if output is None else output)
+ for result, output in zip(results, outputs))
+ if results and isinstance(results[0], A):
+ results[0].info = info
+
+ return results[0] if len(results) == 1 else results
+
+ class B:
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ if any(isinstance(input_, A) for input_ in inputs):
+ return "A!"
+ else:
+ return NotImplemented
+
+ d = np.arange(5.)
+ # 1 input, 1 output
+ a = np.arange(5.).view(A)
+ b = np.sin(a)
+ check = np.sin(d)
+ assert_(np.all(check == b))
+ assert_equal(b.info, {'inputs': [0]})
+ b = np.sin(d, out=(a,))
+ assert_(np.all(check == b))
+ assert_equal(b.info, {'outputs': [0]})
+ assert_(b is a)
+ a = np.arange(5.).view(A)
+ b = np.sin(a, out=a)
+ assert_(np.all(check == b))
+ assert_equal(b.info, {'inputs': [0], 'outputs': [0]})
+
+ # 1 input, 2 outputs
+ a = np.arange(5.).view(A)
+ b1, b2 = np.modf(a)
+ assert_equal(b1.info, {'inputs': [0]})
+ b1, b2 = np.modf(d, out=(None, a))
+ assert_(b2 is a)
+ assert_equal(b1.info, {'outputs': [1]})
+ a = np.arange(5.).view(A)
+ b = np.arange(5.).view(A)
+ c1, c2 = np.modf(a, out=(a, b))
+ assert_(c1 is a)
+ assert_(c2 is b)
+ assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]})
+
+ # 2 input, 1 output
+ a = np.arange(5.).view(A)
+ b = np.arange(5.).view(A)
+ c = np.add(a, b, out=a)
+ assert_(c is a)
+ assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]})
+ # some tests with a non-ndarray subclass
+ a = np.arange(5.)
+ b = B()
+ assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
+ assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
+ assert_raises(TypeError, np.add, a, b)
+ a = a.view(A)
+ assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
+ assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!")
+ assert_(np.add(a, b) == "A!")
+ # regression check for gh-9102 -- tests ufunc.reduce implicitly.
+ d = np.array([[1, 2, 3], [1, 2, 3]])
+ a = d.view(A)
+ c = a.any()
+ check = d.any()
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ c = a.max()
+ check = d.max()
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.array(0).view(A)
+ c = a.max(out=b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ check = a.max(axis=0)
+ b = np.zeros_like(check).view(A)
+ c = a.max(axis=0, out=b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ # simple explicit tests of reduce, accumulate, reduceat
+ check = np.add.reduce(d, axis=1)
+ c = np.add.reduce(a, axis=1)
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.zeros_like(c)
+ c = np.add.reduce(a, 1, None, b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ check = np.add.accumulate(d, axis=0)
+ c = np.add.accumulate(a, axis=0)
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.zeros_like(c)
+ c = np.add.accumulate(a, 0, None, b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ indices = [0, 2, 1]
+ check = np.add.reduceat(d, indices, axis=1)
+ c = np.add.reduceat(a, indices, axis=1)
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.zeros_like(c)
+ c = np.add.reduceat(a, indices, 1, None, b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ # and a few tests for at
+ d = np.array([[1, 2, 3], [1, 2, 3]])
+ check = d.copy()
+ a = d.copy().view(A)
+ np.add.at(check, ([0, 1], [0, 2]), 1.)
+ np.add.at(a, ([0, 1], [0, 2]), 1.)
+ assert_equal(a, check)
+ assert_(a.info, {'inputs': [0]})
+ b = np.array(1.).view(A)
+ a = d.copy().view(A)
+ np.add.at(a, ([0, 1], [0, 2]), b)
+ assert_equal(a, check)
+ assert_(a.info, {'inputs': [0, 2]})
+
+
+class TestChoose:
+ def test_mixed(self):
+ c = np.array([True, True])
+ a = np.array([True, True])
+ assert_equal(np.choose(c, (a, 1)), np.array([1, 1]))
+
+
+class TestRationalFunctions:
+ def test_lcm(self):
+ self._test_lcm_inner(np.int16)
+ self._test_lcm_inner(np.uint16)
+
+ def test_lcm_object(self):
+ self._test_lcm_inner(np.object_)
+
+ def test_gcd(self):
+ self._test_gcd_inner(np.int16)
+ self._test_lcm_inner(np.uint16)
+
+ def test_gcd_object(self):
+ self._test_gcd_inner(np.object_)
+
+ def _test_lcm_inner(self, dtype):
+ # basic use
+ a = np.array([12, 120], dtype=dtype)
+ b = np.array([20, 200], dtype=dtype)
+ assert_equal(np.lcm(a, b), [60, 600])
+
+ if not issubclass(dtype, np.unsignedinteger):
+ # negatives are ignored
+ a = np.array([12, -12, 12, -12], dtype=dtype)
+ b = np.array([20, 20, -20, -20], dtype=dtype)
+ assert_equal(np.lcm(a, b), [60]*4)
+
+ # reduce
+ a = np.array([3, 12, 20], dtype=dtype)
+ assert_equal(np.lcm.reduce([3, 12, 20]), 60)
+
+ # broadcasting, and a test including 0
+ a = np.arange(6).astype(dtype)
+ b = 20
+ assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20])
+
+ def _test_gcd_inner(self, dtype):
+ # basic use
+ a = np.array([12, 120], dtype=dtype)
+ b = np.array([20, 200], dtype=dtype)
+ assert_equal(np.gcd(a, b), [4, 40])
+
+ if not issubclass(dtype, np.unsignedinteger):
+ # negatives are ignored
+ a = np.array([12, -12, 12, -12], dtype=dtype)
+ b = np.array([20, 20, -20, -20], dtype=dtype)
+ assert_equal(np.gcd(a, b), [4]*4)
+
+ # reduce
+ a = np.array([15, 25, 35], dtype=dtype)
+ assert_equal(np.gcd.reduce(a), 5)
+
+ # broadcasting, and a test including 0
+ a = np.arange(6).astype(dtype)
+ b = 20
+ assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5])
+
+ def test_lcm_overflow(self):
+ # verify that we don't overflow when a*b does overflow
+ big = np.int32(np.iinfo(np.int32).max // 11)
+ a = 2*big
+ b = 5*big
+ assert_equal(np.lcm(a, b), 10*big)
+
+ def test_gcd_overflow(self):
+ for dtype in (np.int32, np.int64):
+ # verify that we don't overflow when taking abs(x)
+ # not relevant for lcm, where the result is unrepresentable anyway
+ a = dtype(np.iinfo(dtype).min) # negative power of two
+ q = -(a // 4)
+ assert_equal(np.gcd(a, q*3), q)
+ assert_equal(np.gcd(a, -q*3), q)
+
+ def test_decimal(self):
+ from decimal import Decimal
+ a = np.array([1, 1, -1, -1]) * Decimal('0.20')
+ b = np.array([1, -1, 1, -1]) * Decimal('0.12')
+
+ assert_equal(np.gcd(a, b), 4*[Decimal('0.04')])
+ assert_equal(np.lcm(a, b), 4*[Decimal('0.60')])
+
+ def test_float(self):
+ # not well-defined on float due to rounding errors
+ assert_raises(TypeError, np.gcd, 0.3, 0.4)
+ assert_raises(TypeError, np.lcm, 0.3, 0.4)
+
+ def test_builtin_long(self):
+ # sanity check that array coercion is alright for builtin longs
+ assert_equal(np.array(2**200).item(), 2**200)
+
+ # expressed as prime factors
+ a = np.array(2**100 * 3**5)
+ b = np.array([2**100 * 5**7, 2**50 * 3**10])
+ assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5])
+ assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10])
+
+ assert_equal(np.gcd(2**100, 3**100), 1)
+
+
+class TestRoundingFunctions:
+
+ def test_object_direct(self):
+ """ test direct implementation of these magic methods """
+ class C:
+ def __floor__(self):
+ return 1
+ def __ceil__(self):
+ return 2
+ def __trunc__(self):
+ return 3
+
+ arr = np.array([C(), C()])
+ assert_equal(np.floor(arr), [1, 1])
+ assert_equal(np.ceil(arr), [2, 2])
+ assert_equal(np.trunc(arr), [3, 3])
+
+ def test_object_indirect(self):
+ """ test implementations via __float__ """
+ class C:
+ def __float__(self):
+ return -2.5
+
+ arr = np.array([C(), C()])
+ assert_equal(np.floor(arr), [-3, -3])
+ assert_equal(np.ceil(arr), [-2, -2])
+ with pytest.raises(TypeError):
+ np.trunc(arr) # consistent with math.trunc
+
+ def test_fraction(self):
+ f = Fraction(-4, 3)
+ assert_equal(np.floor(f), -2)
+ assert_equal(np.ceil(f), -1)
+ assert_equal(np.trunc(f), -1)
+
+
+class TestComplexFunctions:
+ funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
+ np.arctanh, np.sin, np.cos, np.tan, np.exp,
+ np.exp2, np.log, np.sqrt, np.log10, np.log2,
+ np.log1p]
+
+ def test_it(self):
+ for f in self.funcs:
+ if f is np.arccosh:
+ x = 1.5
+ else:
+ x = .5
+ fr = f(x)
+ fz = f(complex(x))
+ assert_almost_equal(fz.real, fr, err_msg='real part %s' % f)
+ assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f)
+
+ @pytest.mark.xfail(IS_WASM, reason="doesn't work")
+ def test_precisions_consistent(self):
+ z = 1 + 1j
+ for f in self.funcs:
+ fcf = f(np.csingle(z))
+ fcd = f(np.cdouble(z))
+ fcl = f(np.clongdouble(z))
+ assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f)
+ assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f)
+
+ @pytest.mark.xfail(IS_WASM, reason="doesn't work")
+ def test_branch_cuts(self):
+ # check branch cuts and continuity on them
+ _check_branch_cut(np.log, -0.5, 1j, 1, -1, True)
+ _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True)
+ _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True)
+ _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True)
+ _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True)
+
+ _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True)
+ _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True)
+ _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True)
+
+ _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True)
+ _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True)
+ _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True)
+
+ # check against bogus branch cuts: assert continuity between quadrants
+ _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1)
+ _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1)
+ _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1)
+
+ _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1)
+ _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1)
+ _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1)
+
+ @pytest.mark.xfail(IS_WASM, reason="doesn't work")
+ def test_branch_cuts_complex64(self):
+ # check branch cuts and continuity on them
+ _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64)
+ _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64)
+ _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64)
+ _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64)
+ _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64)
+
+ _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
+ _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
+ _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
+
+ _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
+ _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64)
+ _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
+
+ # check against bogus branch cuts: assert continuity between quadrants
+ _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
+ _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
+ _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64)
+
+ _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64)
+ _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64)
+ _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64)
+
+ def test_against_cmath(self):
+ import cmath
+
+ points = [-1-1j, -1+1j, +1-1j, +1+1j]
+ name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
+ 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
+ atol = 4*np.finfo(complex).eps
+ for func in self.funcs:
+ fname = func.__name__.split('.')[-1]
+ cname = name_map.get(fname, fname)
+ try:
+ cfunc = getattr(cmath, cname)
+ except AttributeError:
+ continue
+ for p in points:
+ a = complex(func(np.complex_(p)))
+ b = cfunc(p)
+ assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b))
+
+ @pytest.mark.xfail(IS_WASM, reason="doesn't work")
+ @pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex])
+ def test_loss_of_precision(self, dtype):
+ """Check loss of precision in complex arc* functions"""
+
+ # Check against known-good functions
+
+ info = np.finfo(dtype)
+ real_dtype = dtype(0.).real.dtype
+ eps = info.eps
+
+ def check(x, rtol):
+ x = x.astype(real_dtype)
+
+ z = x.astype(dtype)
+ d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)
+ assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
+ 'arcsinh'))
+
+ z = (1j*x).astype(dtype)
+ d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1)
+ assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
+ 'arcsin'))
+
+ z = x.astype(dtype)
+ d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1)
+ assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
+ 'arctanh'))
+
+ z = (1j*x).astype(dtype)
+ d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1)
+ assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
+ 'arctan'))
+
+ # The switchover was chosen as 1e-3; hence there can be up to
+ # ~eps/1e-3 of relative cancellation error before it
+
+ x_series = np.logspace(-20, -3.001, 200)
+ x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
+
+ if dtype is np.longcomplex:
+ if bad_arcsinh():
+ pytest.skip("Trig functions of np.longcomplex values known "
+ "to be inaccurate on aarch64 and PPC for some "
+ "compilation configurations.")
+ # It's not guaranteed that the system-provided arc functions
+ # are accurate down to a few epsilons. (Eg. on Linux 64-bit)
+ # So, give more leeway for long complex tests here:
+ check(x_series, 50.0*eps)
+ else:
+ check(x_series, 2.1*eps)
+ check(x_basic, 2.0*eps/1e-3)
+
+ # Check a few points
+
+ z = np.array([1e-5*(1+1j)], dtype=dtype)
+ p = 9.999999999333333333e-6 + 1.000000000066666666e-5j
+ d = np.absolute(1-np.arctanh(z)/p)
+ assert_(np.all(d < 1e-15))
+
+ p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j
+ d = np.absolute(1-np.arcsinh(z)/p)
+ assert_(np.all(d < 1e-15))
+
+ p = 9.999999999333333333e-6j + 1.000000000066666666e-5
+ d = np.absolute(1-np.arctan(z)/p)
+ assert_(np.all(d < 1e-15))
+
+ p = 1.0000000000333333333e-5j + 9.999999999666666667e-6
+ d = np.absolute(1-np.arcsin(z)/p)
+ assert_(np.all(d < 1e-15))
+
+ # Check continuity across switchover points
+
+ def check(func, z0, d=1):
+ z0 = np.asarray(z0, dtype=dtype)
+ zp = z0 + abs(z0) * d * eps * 2
+ zm = z0 - abs(z0) * d * eps * 2
+ assert_(np.all(zp != zm), (zp, zm))
+
+ # NB: the cancellation error at the switchover is at least eps
+ good = (abs(func(zp) - func(zm)) < 2*eps)
+ assert_(np.all(good), (func, z0[~good]))
+
+ for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan):
+ pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3)
+ if rp != 0 or ip != 0]
+ check(func, pts, 1)
+ check(func, pts, 1j)
+ check(func, pts, 1+1j)
+
+ @np.errstate(all="ignore")
+ def test_promotion_corner_cases(self):
+ for func in self.funcs:
+ assert func(np.float16(1)).dtype == np.float16
+ # Integer to low precision float promotion is a dubious choice:
+ assert func(np.uint8(1)).dtype == np.float16
+ assert func(np.int16(1)).dtype == np.float32
+
+
+class TestAttributes:
+ def test_attributes(self):
+ add = ncu.add
+ assert_equal(add.__name__, 'add')
+ assert_(add.ntypes >= 18) # don't fail if types added
+ assert_('ii->i' in add.types)
+ assert_equal(add.nin, 2)
+ assert_equal(add.nout, 1)
+ assert_equal(add.identity, 0)
+
+ def test_doc(self):
+ # don't bother checking the long list of kwargs, which are likely to
+ # change
+ assert_(ncu.add.__doc__.startswith(
+ "add(x1, x2, /, out=None, *, where=True"))
+ assert_(ncu.frexp.__doc__.startswith(
+ "frexp(x[, out1, out2], / [, out=(None, None)], *, where=True"))
+
+
+class TestSubclass:
+
+ def test_subclass_op(self):
+
+ class simple(np.ndarray):
+ def __new__(subtype, shape):
+ self = np.ndarray.__new__(subtype, shape, dtype=object)
+ self.fill(0)
+ return self
+
+ a = simple((3, 4))
+ assert_equal(a+a, a)
+
+
+class TestFrompyfunc:
+
+ def test_identity(self):
+ def mul(a, b):
+ return a * b
+
+ # with identity=value
+ mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=1)
+ assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
+ assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)
+ assert_equal(mul_ufunc.reduce([]), 1)
+
+ # with identity=None (reorderable)
+ mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=None)
+ assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
+ assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1)
+ assert_raises(ValueError, lambda: mul_ufunc.reduce([]))
+
+ # with no identity (not reorderable)
+ mul_ufunc = np.frompyfunc(mul, nin=2, nout=1)
+ assert_equal(mul_ufunc.reduce([2, 3, 4]), 24)
+ assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)))
+ assert_raises(ValueError, lambda: mul_ufunc.reduce([]))
+
+
+def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
+ dtype=complex):
+ """
+ Check for a branch cut in a function.
+
+ Assert that `x0` lies on a branch cut of function `f` and `f` is
+ continuous from the direction `dx`.
+
+ Parameters
+ ----------
+ f : func
+ Function to check
+ x0 : array-like
+ Point on branch cut
+ dx : array-like
+ Direction to check continuity in
+ re_sign, im_sign : {1, -1}
+ Change of sign of the real or imaginary part expected
+ sig_zero_ok : bool
+ Whether to check if the branch cut respects signed zero (if applicable)
+ dtype : dtype
+ Dtype to check (should be complex)
+
+ """
+ x0 = np.atleast_1d(x0).astype(dtype)
+ dx = np.atleast_1d(dx).astype(dtype)
+
+ if np.dtype(dtype).char == 'F':
+ scale = np.finfo(dtype).eps * 1e2
+ atol = np.float32(1e-2)
+ else:
+ scale = np.finfo(dtype).eps * 1e3
+ atol = 1e-4
+
+ y0 = f(x0)
+ yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))
+ ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx))
+
+ assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp))
+ assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp))
+ assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
+ assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
+
+ if sig_zero_ok:
+ # check that signed zeros also work as a displacement
+ jr = (x0.real == 0) & (dx.real != 0)
+ ji = (x0.imag == 0) & (dx.imag != 0)
+ if np.any(jr):
+ x = x0[jr]
+ x.real = np.NZERO
+ ym = f(x)
+ assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym))
+ assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym))
+
+ if np.any(ji):
+ x = x0[ji]
+ x.imag = np.NZERO
+ ym = f(x)
+ assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym))
+ assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym))
+
+def test_copysign():
+ assert_(np.copysign(1, -1) == -1)
+ with np.errstate(divide="ignore"):
+ assert_(1 / np.copysign(0, -1) < 0)
+ assert_(1 / np.copysign(0, 1) > 0)
+ assert_(np.signbit(np.copysign(np.nan, -1)))
+ assert_(not np.signbit(np.copysign(np.nan, 1)))
+
+def _test_nextafter(t):
+ one = t(1)
+ two = t(2)
+ zero = t(0)
+ eps = np.finfo(t).eps
+ assert_(np.nextafter(one, two) - one == eps)
+ assert_(np.nextafter(one, zero) - one < 0)
+ assert_(np.isnan(np.nextafter(np.nan, one)))
+ assert_(np.isnan(np.nextafter(one, np.nan)))
+ assert_(np.nextafter(one, one) == one)
+
+def test_nextafter():
+ return _test_nextafter(np.float64)
+
+
+def test_nextafterf():
+ return _test_nextafter(np.float32)
+
+
+@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
+ reason="long double is same as double")
+@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
+ reason="IBM double double")
+def test_nextafterl():
+ return _test_nextafter(np.longdouble)
+
+
+def test_nextafter_0():
+ for t, direction in itertools.product(np.sctypes['float'], (1, -1)):
+ # The value of tiny for double double is NaN, so we need to pass the
+ # assert
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning)
+ if not np.isnan(np.finfo(t).tiny):
+ tiny = np.finfo(t).tiny
+ assert_(
+ 0. < direction * np.nextafter(t(0), t(direction)) < tiny)
+ assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0)
+
+def _test_spacing(t):
+ one = t(1)
+ eps = np.finfo(t).eps
+ nan = t(np.nan)
+ inf = t(np.inf)
+ with np.errstate(invalid='ignore'):
+ assert_(np.spacing(one) == eps)
+ assert_(np.isnan(np.spacing(nan)))
+ assert_(np.isnan(np.spacing(inf)))
+ assert_(np.isnan(np.spacing(-inf)))
+ assert_(np.spacing(t(1e30)) != 0)
+
+def test_spacing():
+ return _test_spacing(np.float64)
+
+def test_spacingf():
+ return _test_spacing(np.float32)
+
+
+@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
+ reason="long double is same as double")
+@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"),
+ reason="IBM double double")
+def test_spacingl():
+ return _test_spacing(np.longdouble)
+
+def test_spacing_gfortran():
+ # Reference from this fortran file, built with gfortran 4.3.3 on linux
+ # 32bits:
+ # PROGRAM test_spacing
+ # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
+ # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
+ #
+ # WRITE(*,*) spacing(0.00001_DBL)
+ # WRITE(*,*) spacing(1.0_DBL)
+ # WRITE(*,*) spacing(1000._DBL)
+ # WRITE(*,*) spacing(10500._DBL)
+ #
+ # WRITE(*,*) spacing(0.00001_SGL)
+ # WRITE(*,*) spacing(1.0_SGL)
+ # WRITE(*,*) spacing(1000._SGL)
+ # WRITE(*,*) spacing(10500._SGL)
+ # END PROGRAM
+ ref = {np.float64: [1.69406589450860068E-021,
+ 2.22044604925031308E-016,
+ 1.13686837721616030E-013,
+ 1.81898940354585648E-012],
+ np.float32: [9.09494702E-13,
+ 1.19209290E-07,
+ 6.10351563E-05,
+ 9.76562500E-04]}
+
+ for dt, dec_ in zip([np.float32, np.float64], (10, 20)):
+ x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
+ assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_)
+
+def test_nextafter_vs_spacing():
+ # XXX: spacing does not handle long double yet
+ for t in [np.float32, np.float64]:
+ for _f in [1, 1e-5, 1000]:
+ f = t(_f)
+ f1 = t(_f + 1)
+ assert_(np.nextafter(f, f1) - f == np.spacing(f))
+
+def test_pos_nan():
+ """Check np.nan is a positive nan."""
+ assert_(np.signbit(np.nan) == 0)
+
+def test_reduceat():
+ """Test bug in reduceat when structured arrays are not copied."""
+ db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)])
+ a = np.empty([100], dtype=db)
+ a['name'] = 'Simple'
+ a['time'] = 10
+ a['value'] = 100
+ indx = [0, 7, 15, 25]
+
+ h2 = []
+ val1 = indx[0]
+ for val2 in indx[1:]:
+ h2.append(np.add.reduce(a['value'][val1:val2]))
+ val1 = val2
+ h2.append(np.add.reduce(a['value'][val1:]))
+ h2 = np.array(h2)
+
+ # test buffered -- this should work
+ h1 = np.add.reduceat(a['value'], indx)
+ assert_array_almost_equal(h1, h2)
+
+ # This is when the error occurs.
+ # test no buffer
+ np.setbufsize(32)
+ h1 = np.add.reduceat(a['value'], indx)
+ np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT)
+ assert_array_almost_equal(h1, h2)
+
+def test_reduceat_empty():
+ """Reduceat should work with empty arrays"""
+ indices = np.array([], 'i4')
+ x = np.array([], 'f8')
+ result = np.add.reduceat(x, indices)
+ assert_equal(result.dtype, x.dtype)
+ assert_equal(result.shape, (0,))
+ # Another case with a slightly different zero-sized shape
+ x = np.ones((5, 2))
+ result = np.add.reduceat(x, [], axis=0)
+ assert_equal(result.dtype, x.dtype)
+ assert_equal(result.shape, (0, 2))
+ result = np.add.reduceat(x, [], axis=1)
+ assert_equal(result.dtype, x.dtype)
+ assert_equal(result.shape, (5, 0))
+
+def test_complex_nan_comparisons():
+ nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)]
+ fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1),
+ complex(1, 1), complex(-1, -1), complex(0, 0)]
+
+ with np.errstate(invalid='ignore'):
+ for x in nans + fins:
+ x = np.array([x])
+ for y in nans + fins:
+ y = np.array([y])
+
+ if np.isfinite(x) and np.isfinite(y):
+ continue
+
+ assert_equal(x < y, False, err_msg="%r < %r" % (x, y))
+ assert_equal(x > y, False, err_msg="%r > %r" % (x, y))
+ assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y))
+ assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y))
+ assert_equal(x == y, False, err_msg="%r == %r" % (x, y))
+
+
+def test_rint_big_int():
+ # np.rint bug for large integer values on Windows 32-bit and MKL
+ # https://github.com/numpy/numpy/issues/6685
+ val = 4607998452777363968
+ # This is exactly representable in floating point
+ assert_equal(val, int(float(val)))
+ # Rint should not change the value
+ assert_equal(val, np.rint(val))
+
+
+@pytest.mark.parametrize('ftype', [np.float32, np.float64])
+def test_memoverlap_accumulate(ftype):
+ # Reproduces bug https://github.com/numpy/numpy/issues/15597
+ arr = np.array([0.61, 0.60, 0.77, 0.41, 0.19], dtype=ftype)
+ out_max = np.array([0.61, 0.61, 0.77, 0.77, 0.77], dtype=ftype)
+ out_min = np.array([0.61, 0.60, 0.60, 0.41, 0.19], dtype=ftype)
+ assert_equal(np.maximum.accumulate(arr), out_max)
+ assert_equal(np.minimum.accumulate(arr), out_min)
+
+@pytest.mark.parametrize("ufunc, dtype", [
+ (ufunc, t[0])
+ for ufunc in UFUNCS_BINARY_ACC
+ for t in ufunc.types
+ if t[-1] == '?' and t[0] not in 'DFGMmO'
+])
+def test_memoverlap_accumulate_cmp(ufunc, dtype):
+ if ufunc.signature:
+ pytest.skip('For generic signatures only')
+ for size in (2, 8, 32, 64, 128, 256):
+ arr = np.array([0, 1, 1]*size, dtype=dtype)
+ acc = ufunc.accumulate(arr, dtype='?')
+ acc_u8 = acc.view(np.uint8)
+ exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=np.uint8)
+ assert_equal(exp, acc_u8)
+
+@pytest.mark.parametrize("ufunc, dtype", [
+ (ufunc, t[0])
+ for ufunc in UFUNCS_BINARY_ACC
+ for t in ufunc.types
+ if t[0] == t[1] and t[0] == t[-1] and t[0] not in 'DFGMmO?'
+])
+def test_memoverlap_accumulate_symmetric(ufunc, dtype):
+ if ufunc.signature:
+ pytest.skip('For generic signatures only')
+ with np.errstate(all='ignore'):
+ for size in (2, 8, 32, 64, 128, 256):
+ arr = np.array([0, 1, 2]*size).astype(dtype)
+ acc = ufunc.accumulate(arr, dtype=dtype)
+ exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=dtype)
+ assert_equal(exp, acc)
+
+def test_signaling_nan_exceptions():
+ with assert_no_warnings():
+ a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff')
+ np.isnan(a)
+
+@pytest.mark.parametrize("arr", [
+ np.arange(2),
+ np.matrix([0, 1]),
+ np.matrix([[0, 1], [2, 5]]),
+ ])
+def test_outer_subclass_preserve(arr):
+ # for gh-8661
+ class foo(np.ndarray): pass
+ actual = np.multiply.outer(arr.view(foo), arr.view(foo))
+ assert actual.__class__.__name__ == 'foo'
+
+def test_outer_bad_subclass():
+ class BadArr1(np.ndarray):
+ def __array_finalize__(self, obj):
+ # The outer call reshapes to 3 dims, try to do a bad reshape.
+ if self.ndim == 3:
+ self.shape = self.shape + (1,)
+
+ def __array_prepare__(self, obj, context=None):
+ return obj
+
+ class BadArr2(np.ndarray):
+ def __array_finalize__(self, obj):
+ if isinstance(obj, BadArr2):
+ # outer inserts 1-sized dims. In that case disturb them.
+ if self.shape[-1] == 1:
+ self.shape = self.shape[::-1]
+
+ def __array_prepare__(self, obj, context=None):
+ return obj
+
+ for cls in [BadArr1, BadArr2]:
+ arr = np.ones((2, 3)).view(cls)
+ with assert_raises(TypeError) as a:
+ # The first array gets reshaped (not the second one)
+ np.add.outer(arr, [1, 2])
+
+ # This actually works, since we only see the reshaping error:
+ arr = np.ones((2, 3)).view(cls)
+ assert type(np.add.outer([1, 2], arr)) is cls
+
+def test_outer_exceeds_maxdims():
+ deep = np.ones((1,) * 17)
+ with assert_raises(ValueError):
+ np.add.outer(deep, deep)
+
+def test_bad_legacy_ufunc_silent_errors():
+ # legacy ufuncs can't report errors and NumPy can't check if the GIL
+ # is released. So NumPy has to check after the GIL is released just to
+ # cover all bases. `np.power` uses/used to use this.
+ arr = np.arange(3).astype(np.float64)
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error(arr, arr)
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ # not contiguous means the fast-path cannot be taken
+ non_contig = arr.repeat(20).reshape(-1, 6)[:, ::2]
+ ncu_tests.always_error(non_contig, arr)
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error.outer(arr, arr)
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error.reduce(arr)
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error.reduceat(arr, [0, 1])
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error.accumulate(arr)
+
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error.at(arr, [0, 1, 2], arr)
+
+
+@pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]])
+def test_bad_legacy_gufunc_silent_errors(x1):
+ # Verify that an exception raised in a gufunc loop propagates correctly.
+ # The signature of always_error_gufunc is '(i),()->()'.
+ with pytest.raises(RuntimeError, match=r"How unexpected :\)!"):
+ ncu_tests.always_error_gufunc(x1, 0.0)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_umath_accuracy.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_umath_accuracy.py
new file mode 100644
index 00000000..6ee4d2fe
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_umath_accuracy.py
@@ -0,0 +1,75 @@
+import numpy as np
+import os
+from os import path
+import sys
+import pytest
+from ctypes import c_longlong, c_double, c_float, c_int, cast, pointer, POINTER
+from numpy.testing import assert_array_max_ulp
+from numpy.testing._private.utils import _glibc_older_than
+from numpy.core._multiarray_umath import __cpu_features__
+
+UNARY_UFUNCS = [obj for obj in np.core.umath.__dict__.values() if
+ isinstance(obj, np.ufunc)]
+UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types]
+UNARY_OBJECT_UFUNCS.remove(getattr(np, 'invert'))
+
+IS_AVX = __cpu_features__.get('AVX512F', False) or \
+ (__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False))
+# only run on linux with AVX, also avoid old glibc (numpy/numpy#20448).
+runtest = (sys.platform.startswith('linux')
+ and IS_AVX and not _glibc_older_than("2.17"))
+platform_skip = pytest.mark.skipif(not runtest,
+ reason="avoid testing inconsistent platform "
+ "library implementations")
+
+# convert string to hex function taken from:
+# https://stackoverflow.com/questions/1592158/convert-hex-to-float #
+def convert(s, datatype="np.float32"):
+ i = int(s, 16) # convert from hex to a Python int
+ if (datatype == "np.float64"):
+ cp = pointer(c_longlong(i)) # make this into a c long long integer
+ fp = cast(cp, POINTER(c_double)) # cast the int pointer to a double pointer
+ else:
+ cp = pointer(c_int(i)) # make this into a c integer
+ fp = cast(cp, POINTER(c_float)) # cast the int pointer to a float pointer
+
+ return fp.contents.value # dereference the pointer, get the float
+
+str_to_float = np.vectorize(convert)
+
+class TestAccuracy:
+ @platform_skip
+ def test_validate_transcendentals(self):
+ with np.errstate(all='ignore'):
+ data_dir = path.join(path.dirname(__file__), 'data')
+ files = os.listdir(data_dir)
+ files = list(filter(lambda f: f.endswith('.csv'), files))
+ for filename in files:
+ filepath = path.join(data_dir, filename)
+ with open(filepath) as fid:
+ file_without_comments = (r for r in fid if not r[0] in ('$', '#'))
+ data = np.genfromtxt(file_without_comments,
+ dtype=('|S39','|S39','|S39',int),
+ names=('type','input','output','ulperr'),
+ delimiter=',',
+ skip_header=1)
+ npname = path.splitext(filename)[0].split('-')[3]
+ npfunc = getattr(np, npname)
+ for datatype in np.unique(data['type']):
+ data_subset = data[data['type'] == datatype]
+ inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype))
+ outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype))
+ perm = np.random.permutation(len(inval))
+ inval = inval[perm]
+ outval = outval[perm]
+ maxulperr = data_subset['ulperr'].max()
+ assert_array_max_ulp(npfunc(inval), outval, maxulperr)
+
+ @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS)
+ def test_validate_fp16_transcendentals(self, ufunc):
+ with np.errstate(all='ignore'):
+ arr = np.arange(65536, dtype=np.int16)
+ datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16)
+ datafp32 = datafp16.astype(np.float32)
+ assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32),
+ maxulp=1, dtype=np.float16)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_umath_complex.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_umath_complex.py
new file mode 100644
index 00000000..8aa9a28f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_umath_complex.py
@@ -0,0 +1,622 @@
+import sys
+import platform
+import pytest
+
+import numpy as np
+# import the c-extension module directly since _arg is not exported via umath
+import numpy.core._multiarray_umath as ncu
+from numpy.testing import (
+ assert_raises, assert_equal, assert_array_equal, assert_almost_equal, assert_array_max_ulp
+ )
+
+# TODO: branch cuts (use Pauli code)
+# TODO: conj 'symmetry'
+# TODO: FPU exceptions
+
+# At least on Windows the results of many complex functions are not conforming
+# to the C99 standard. See ticket 1574.
+# Ditto for Solaris (ticket 1642) and OS X on PowerPC.
+#FIXME: this will probably change when we require full C99 campatibility
+with np.errstate(all='ignore'):
+ functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0)
+ or (np.log(complex(np.NZERO, 0)).imag != np.pi))
+# TODO: replace with a check on whether platform-provided C99 funcs are used
+xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky)
+
+# TODO This can be xfail when the generator functions are got rid of.
+platform_skip = pytest.mark.skipif(xfail_complex_tests,
+ reason="Inadequate C99 complex support")
+
+
+
+class TestCexp:
+ def test_simple(self):
+ check = check_complex_value
+ f = np.exp
+
+ check(f, 1, 0, np.exp(1), 0, False)
+ check(f, 0, 1, np.cos(1), np.sin(1), False)
+
+ ref = np.exp(1) * complex(np.cos(1), np.sin(1))
+ check(f, 1, 1, ref.real, ref.imag, False)
+
+ @platform_skip
+ def test_special_values(self):
+ # C99: Section G 6.3.1
+
+ check = check_complex_value
+ f = np.exp
+
+ # cexp(+-0 + 0i) is 1 + 0i
+ check(f, np.PZERO, 0, 1, 0, False)
+ check(f, np.NZERO, 0, 1, 0, False)
+
+ # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU
+ # exception
+ check(f, 1, np.inf, np.nan, np.nan)
+ check(f, -1, np.inf, np.nan, np.nan)
+ check(f, 0, np.inf, np.nan, np.nan)
+
+ # cexp(inf + 0i) is inf + 0i
+ check(f, np.inf, 0, np.inf, 0)
+
+ # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y
+ check(f, -np.inf, 1, np.PZERO, np.PZERO)
+ check(f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO)
+
+ # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y
+ check(f, np.inf, 1, np.inf, np.inf)
+ check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf)
+
+ # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified)
+ def _check_ninf_inf(dummy):
+ msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)"
+ with np.errstate(invalid='ignore'):
+ z = f(np.array(complex(-np.inf, np.inf)))
+ if z.real != 0 or z.imag != 0:
+ raise AssertionError(msgform % (z.real, z.imag))
+
+ _check_ninf_inf(None)
+
+ # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex.
+ def _check_inf_inf(dummy):
+ msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)"
+ with np.errstate(invalid='ignore'):
+ z = f(np.array(complex(np.inf, np.inf)))
+ if not np.isinf(z.real) or not np.isnan(z.imag):
+ raise AssertionError(msgform % (z.real, z.imag))
+
+ _check_inf_inf(None)
+
+ # cexp(-inf + nan i) is +-0 +- 0i
+ def _check_ninf_nan(dummy):
+ msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)"
+ with np.errstate(invalid='ignore'):
+ z = f(np.array(complex(-np.inf, np.nan)))
+ if z.real != 0 or z.imag != 0:
+ raise AssertionError(msgform % (z.real, z.imag))
+
+ _check_ninf_nan(None)
+
+ # cexp(inf + nan i) is +-inf + nan
+ def _check_inf_nan(dummy):
+ msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)"
+ with np.errstate(invalid='ignore'):
+ z = f(np.array(complex(np.inf, np.nan)))
+ if not np.isinf(z.real) or not np.isnan(z.imag):
+ raise AssertionError(msgform % (z.real, z.imag))
+
+ _check_inf_nan(None)
+
+ # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU
+ # ex)
+ check(f, np.nan, 1, np.nan, np.nan)
+ check(f, np.nan, -1, np.nan, np.nan)
+
+ check(f, np.nan, np.inf, np.nan, np.nan)
+ check(f, np.nan, -np.inf, np.nan, np.nan)
+
+ # cexp(nan + nani) is nan + nani
+ check(f, np.nan, np.nan, np.nan, np.nan)
+
+ # TODO This can be xfail when the generator functions are got rid of.
+ @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms")
+ def test_special_values2(self):
+ # XXX: most implementations get it wrong here (including glibc <= 2.10)
+ # cexp(nan + 0i) is nan + 0i
+ check = check_complex_value
+ f = np.exp
+
+ check(f, np.nan, 0, np.nan, 0)
+
+class TestClog:
+ def test_simple(self):
+ x = np.array([1+0j, 1+2j])
+ y_r = np.log(np.abs(x)) + 1j * np.angle(x)
+ y = np.log(x)
+ assert_almost_equal(y, y_r)
+
+ @platform_skip
+ @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
+ def test_special_values(self):
+ xl = []
+ yl = []
+
+ # From C99 std (Sec 6.3.2)
+ # XXX: check exceptions raised
+ # --- raise for invalid fails.
+
+ # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero'
+ # floating-point exception.
+ with np.errstate(divide='raise'):
+ x = np.array([np.NZERO], dtype=complex)
+ y = complex(-np.inf, np.pi)
+ assert_raises(FloatingPointError, np.log, x)
+ with np.errstate(divide='ignore'):
+ assert_almost_equal(np.log(x), y)
+
+ xl.append(x)
+ yl.append(y)
+
+ # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero'
+ # floating-point exception.
+ with np.errstate(divide='raise'):
+ x = np.array([0], dtype=complex)
+ y = complex(-np.inf, 0)
+ assert_raises(FloatingPointError, np.log, x)
+ with np.errstate(divide='ignore'):
+ assert_almost_equal(np.log(x), y)
+
+ xl.append(x)
+ yl.append(y)
+
+ # clog(x + i inf returns +inf + i pi /2, for finite x.
+ x = np.array([complex(1, np.inf)], dtype=complex)
+ y = complex(np.inf, 0.5 * np.pi)
+ assert_almost_equal(np.log(x), y)
+ xl.append(x)
+ yl.append(y)
+
+ x = np.array([complex(-1, np.inf)], dtype=complex)
+ assert_almost_equal(np.log(x), y)
+ xl.append(x)
+ yl.append(y)
+
+ # clog(x + iNaN) returns NaN + iNaN and optionally raises the
+ # 'invalid' floating- point exception, for finite x.
+ with np.errstate(invalid='raise'):
+ x = np.array([complex(1., np.nan)], dtype=complex)
+ y = complex(np.nan, np.nan)
+ #assert_raises(FloatingPointError, np.log, x)
+ with np.errstate(invalid='ignore'):
+ assert_almost_equal(np.log(x), y)
+
+ xl.append(x)
+ yl.append(y)
+
+ with np.errstate(invalid='raise'):
+ x = np.array([np.inf + 1j * np.nan], dtype=complex)
+ #assert_raises(FloatingPointError, np.log, x)
+ with np.errstate(invalid='ignore'):
+ assert_almost_equal(np.log(x), y)
+
+ xl.append(x)
+ yl.append(y)
+
+ # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y.
+ x = np.array([-np.inf + 1j], dtype=complex)
+ y = complex(np.inf, np.pi)
+ assert_almost_equal(np.log(x), y)
+ xl.append(x)
+ yl.append(y)
+
+ # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y.
+ x = np.array([np.inf + 1j], dtype=complex)
+ y = complex(np.inf, 0)
+ assert_almost_equal(np.log(x), y)
+ xl.append(x)
+ yl.append(y)
+
+ # clog(- inf + i inf) returns +inf + i3pi /4.
+ x = np.array([complex(-np.inf, np.inf)], dtype=complex)
+ y = complex(np.inf, 0.75 * np.pi)
+ assert_almost_equal(np.log(x), y)
+ xl.append(x)
+ yl.append(y)
+
+ # clog(+ inf + i inf) returns +inf + ipi /4.
+ x = np.array([complex(np.inf, np.inf)], dtype=complex)
+ y = complex(np.inf, 0.25 * np.pi)
+ assert_almost_equal(np.log(x), y)
+ xl.append(x)
+ yl.append(y)
+
+ # clog(+/- inf + iNaN) returns +inf + iNaN.
+ x = np.array([complex(np.inf, np.nan)], dtype=complex)
+ y = complex(np.inf, np.nan)
+ assert_almost_equal(np.log(x), y)
+ xl.append(x)
+ yl.append(y)
+
+ x = np.array([complex(-np.inf, np.nan)], dtype=complex)
+ assert_almost_equal(np.log(x), y)
+ xl.append(x)
+ yl.append(y)
+
+ # clog(NaN + iy) returns NaN + iNaN and optionally raises the
+ # 'invalid' floating-point exception, for finite y.
+ x = np.array([complex(np.nan, 1)], dtype=complex)
+ y = complex(np.nan, np.nan)
+ assert_almost_equal(np.log(x), y)
+ xl.append(x)
+ yl.append(y)
+
+ # clog(NaN + i inf) returns +inf + iNaN.
+ x = np.array([complex(np.nan, np.inf)], dtype=complex)
+ y = complex(np.inf, np.nan)
+ assert_almost_equal(np.log(x), y)
+ xl.append(x)
+ yl.append(y)
+
+ # clog(NaN + iNaN) returns NaN + iNaN.
+ x = np.array([complex(np.nan, np.nan)], dtype=complex)
+ y = complex(np.nan, np.nan)
+ assert_almost_equal(np.log(x), y)
+ xl.append(x)
+ yl.append(y)
+
+ # clog(conj(z)) = conj(clog(z)).
+ xa = np.array(xl, dtype=complex)
+ ya = np.array(yl, dtype=complex)
+ with np.errstate(divide='ignore'):
+ for i in range(len(xa)):
+ assert_almost_equal(np.log(xa[i].conj()), ya[i].conj())
+
+
+class TestCsqrt:
+
+ def test_simple(self):
+ # sqrt(1)
+ check_complex_value(np.sqrt, 1, 0, 1, 0)
+
+ # sqrt(1i)
+ rres = 0.5*np.sqrt(2)
+ ires = rres
+ check_complex_value(np.sqrt, 0, 1, rres, ires, False)
+
+ # sqrt(-1)
+ check_complex_value(np.sqrt, -1, 0, 0, 1)
+
+ def test_simple_conjugate(self):
+ ref = np.conj(np.sqrt(complex(1, 1)))
+
+ def f(z):
+ return np.sqrt(np.conj(z))
+
+ check_complex_value(f, 1, 1, ref.real, ref.imag, False)
+
+ #def test_branch_cut(self):
+ # _check_branch_cut(f, -1, 0, 1, -1)
+
+ @platform_skip
+ def test_special_values(self):
+ # C99: Sec G 6.4.2
+
+ check = check_complex_value
+ f = np.sqrt
+
+ # csqrt(+-0 + 0i) is 0 + 0i
+ check(f, np.PZERO, 0, 0, 0)
+ check(f, np.NZERO, 0, 0, 0)
+
+ # csqrt(x + infi) is inf + infi for any x (including NaN)
+ check(f, 1, np.inf, np.inf, np.inf)
+ check(f, -1, np.inf, np.inf, np.inf)
+
+ check(f, np.PZERO, np.inf, np.inf, np.inf)
+ check(f, np.NZERO, np.inf, np.inf, np.inf)
+ check(f, np.inf, np.inf, np.inf, np.inf)
+ check(f, -np.inf, np.inf, np.inf, np.inf)
+ check(f, -np.nan, np.inf, np.inf, np.inf)
+
+ # csqrt(x + nani) is nan + nani for any finite x
+ check(f, 1, np.nan, np.nan, np.nan)
+ check(f, -1, np.nan, np.nan, np.nan)
+ check(f, 0, np.nan, np.nan, np.nan)
+
+ # csqrt(-inf + yi) is +0 + infi for any finite y > 0
+ check(f, -np.inf, 1, np.PZERO, np.inf)
+
+ # csqrt(inf + yi) is +inf + 0i for any finite y > 0
+ check(f, np.inf, 1, np.inf, np.PZERO)
+
+ # csqrt(-inf + nani) is nan +- infi (both +i infi are valid)
+ def _check_ninf_nan(dummy):
+ msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)"
+ z = np.sqrt(np.array(complex(-np.inf, np.nan)))
+ #Fixme: ugly workaround for isinf bug.
+ with np.errstate(invalid='ignore'):
+ if not (np.isnan(z.real) and np.isinf(z.imag)):
+ raise AssertionError(msgform % (z.real, z.imag))
+
+ _check_ninf_nan(None)
+
+ # csqrt(+inf + nani) is inf + nani
+ check(f, np.inf, np.nan, np.inf, np.nan)
+
+ # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x
+ # + nani)
+ check(f, np.nan, 0, np.nan, np.nan)
+ check(f, np.nan, 1, np.nan, np.nan)
+ check(f, np.nan, np.nan, np.nan, np.nan)
+
+ # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch
+ # cuts first)
+
+class TestCpow:
+ def setup_method(self):
+ self.olderr = np.seterr(invalid='ignore')
+
+ def teardown_method(self):
+ np.seterr(**self.olderr)
+
+ def test_simple(self):
+ x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
+ y_r = x ** 2
+ y = np.power(x, 2)
+ assert_almost_equal(y, y_r)
+
+ def test_scalar(self):
+ x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
+ y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
+ lx = list(range(len(x)))
+
+ # Hardcode the expected `builtins.complex` values,
+ # as complex exponentiation is broken as of bpo-44698
+ p_r = [
+ 1+0j,
+ 0.20787957635076193+0j,
+ 0.35812203996480685+0.6097119028618724j,
+ 0.12659112128185032+0.48847676699581527j,
+ complex(np.inf, np.nan),
+ complex(np.nan, np.nan),
+ ]
+
+ n_r = [x[i] ** y[i] for i in lx]
+ for i in lx:
+ assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
+
+ def test_array(self):
+ x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
+ y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
+ lx = list(range(len(x)))
+
+ # Hardcode the expected `builtins.complex` values,
+ # as complex exponentiation is broken as of bpo-44698
+ p_r = [
+ 1+0j,
+ 0.20787957635076193+0j,
+ 0.35812203996480685+0.6097119028618724j,
+ 0.12659112128185032+0.48847676699581527j,
+ complex(np.inf, np.nan),
+ complex(np.nan, np.nan),
+ ]
+
+ n_r = x ** y
+ for i in lx:
+ assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
+
+class TestCabs:
+ def setup_method(self):
+ self.olderr = np.seterr(invalid='ignore')
+
+ def teardown_method(self):
+ np.seterr(**self.olderr)
+
+ def test_simple(self):
+ x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
+ y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan])
+ y = np.abs(x)
+ assert_almost_equal(y, y_r)
+
+ def test_fabs(self):
+ # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs)
+ x = np.array([1+0j], dtype=complex)
+ assert_array_equal(np.abs(x), np.real(x))
+
+ x = np.array([complex(1, np.NZERO)], dtype=complex)
+ assert_array_equal(np.abs(x), np.real(x))
+
+ x = np.array([complex(np.inf, np.NZERO)], dtype=complex)
+ assert_array_equal(np.abs(x), np.real(x))
+
+ x = np.array([complex(np.nan, np.NZERO)], dtype=complex)
+ assert_array_equal(np.abs(x), np.real(x))
+
+ def test_cabs_inf_nan(self):
+ x, y = [], []
+
+ # cabs(+-nan + nani) returns nan
+ x.append(np.nan)
+ y.append(np.nan)
+ check_real_value(np.abs, np.nan, np.nan, np.nan)
+
+ x.append(np.nan)
+ y.append(-np.nan)
+ check_real_value(np.abs, -np.nan, np.nan, np.nan)
+
+ # According to C99 standard, if exactly one of the real/part is inf and
+ # the other nan, then cabs should return inf
+ x.append(np.inf)
+ y.append(np.nan)
+ check_real_value(np.abs, np.inf, np.nan, np.inf)
+
+ x.append(-np.inf)
+ y.append(np.nan)
+ check_real_value(np.abs, -np.inf, np.nan, np.inf)
+
+ # cabs(conj(z)) == conj(cabs(z)) (= cabs(z))
+ def f(a):
+ return np.abs(np.conj(a))
+
+ def g(a, b):
+ return np.abs(complex(a, b))
+
+ xa = np.array(x, dtype=complex)
+ assert len(xa) == len(x) == len(y)
+ for xi, yi in zip(x, y):
+ ref = g(xi, yi)
+ check_real_value(f, xi, yi, ref)
+
+class TestCarg:
+ def test_simple(self):
+ check_real_value(ncu._arg, 1, 0, 0, False)
+ check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False)
+
+ check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False)
+ check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
+
+ # TODO This can be xfail when the generator functions are got rid of.
+ @pytest.mark.skip(
+ reason="Complex arithmetic with signed zero fails on most platforms")
+ def test_zero(self):
+ # carg(-0 +- 0i) returns +- pi
+ check_real_value(ncu._arg, np.NZERO, np.PZERO, np.pi, False)
+ check_real_value(ncu._arg, np.NZERO, np.NZERO, -np.pi, False)
+
+ # carg(+0 +- 0i) returns +- 0
+ check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
+ check_real_value(ncu._arg, np.PZERO, np.NZERO, np.NZERO)
+
+ # carg(x +- 0i) returns +- 0 for x > 0
+ check_real_value(ncu._arg, 1, np.PZERO, np.PZERO, False)
+ check_real_value(ncu._arg, 1, np.NZERO, np.NZERO, False)
+
+ # carg(x +- 0i) returns +- pi for x < 0
+ check_real_value(ncu._arg, -1, np.PZERO, np.pi, False)
+ check_real_value(ncu._arg, -1, np.NZERO, -np.pi, False)
+
+ # carg(+- 0 + yi) returns pi/2 for y > 0
+ check_real_value(ncu._arg, np.PZERO, 1, 0.5 * np.pi, False)
+ check_real_value(ncu._arg, np.NZERO, 1, 0.5 * np.pi, False)
+
+ # carg(+- 0 + yi) returns -pi/2 for y < 0
+ check_real_value(ncu._arg, np.PZERO, -1, 0.5 * np.pi, False)
+ check_real_value(ncu._arg, np.NZERO, -1, -0.5 * np.pi, False)
+
+ #def test_branch_cuts(self):
+ # _check_branch_cut(ncu._arg, -1, 1j, -1, 1)
+
+ def test_special_values(self):
+ # carg(-np.inf +- yi) returns +-pi for finite y > 0
+ check_real_value(ncu._arg, -np.inf, 1, np.pi, False)
+ check_real_value(ncu._arg, -np.inf, -1, -np.pi, False)
+
+ # carg(np.inf +- yi) returns +-0 for finite y > 0
+ check_real_value(ncu._arg, np.inf, 1, np.PZERO, False)
+ check_real_value(ncu._arg, np.inf, -1, np.NZERO, False)
+
+ # carg(x +- np.infi) returns +-pi/2 for finite x
+ check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False)
+ check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False)
+
+ # carg(-np.inf +- np.infi) returns +-3pi/4
+ check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False)
+ check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False)
+
+ # carg(np.inf +- np.infi) returns +-pi/4
+ check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False)
+ check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False)
+
+ # carg(x + yi) returns np.nan if x or y is nan
+ check_real_value(ncu._arg, np.nan, 0, np.nan, False)
+ check_real_value(ncu._arg, 0, np.nan, np.nan, False)
+
+ check_real_value(ncu._arg, np.nan, np.inf, np.nan, False)
+ check_real_value(ncu._arg, np.inf, np.nan, np.nan, False)
+
+
+def check_real_value(f, x1, y1, x, exact=True):
+ z1 = np.array([complex(x1, y1)])
+ if exact:
+ assert_equal(f(z1), x)
+ else:
+ assert_almost_equal(f(z1), x)
+
+
+def check_complex_value(f, x1, y1, x2, y2, exact=True):
+ z1 = np.array([complex(x1, y1)])
+ z2 = complex(x2, y2)
+ with np.errstate(invalid='ignore'):
+ if exact:
+ assert_equal(f(z1), z2)
+ else:
+ assert_almost_equal(f(z1), z2)
+
+class TestSpecialComplexAVX:
+ @pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
+ @pytest.mark.parametrize("astype", [np.complex64, np.complex128])
+ def test_array(self, stride, astype):
+ arr = np.array([complex(np.nan , np.nan),
+ complex(np.nan , np.inf),
+ complex(np.inf , np.nan),
+ complex(np.inf , np.inf),
+ complex(0. , np.inf),
+ complex(np.inf , 0.),
+ complex(0. , 0.),
+ complex(0. , np.nan),
+ complex(np.nan , 0.)], dtype=astype)
+ abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype)
+ sq_true = np.array([complex(np.nan, np.nan),
+ complex(np.nan, np.nan),
+ complex(np.nan, np.nan),
+ complex(np.nan, np.inf),
+ complex(-np.inf, np.nan),
+ complex(np.inf, np.nan),
+ complex(0., 0.),
+ complex(np.nan, np.nan),
+ complex(np.nan, np.nan)], dtype=astype)
+ assert_equal(np.abs(arr[::stride]), abs_true[::stride])
+ with np.errstate(invalid='ignore'):
+ assert_equal(np.square(arr[::stride]), sq_true[::stride])
+
+class TestComplexAbsoluteAVX:
+ @pytest.mark.parametrize("arraysize", [1,2,3,4,5,6,7,8,9,10,11,13,15,17,18,19])
+ @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4])
+ @pytest.mark.parametrize("astype", [np.complex64, np.complex128])
+ # test to ensure masking and strides work as intended in the AVX implementation
+ def test_array(self, arraysize, stride, astype):
+ arr = np.ones(arraysize, dtype=astype)
+ abs_true = np.ones(arraysize, dtype=arr.real.dtype)
+ assert_equal(np.abs(arr[::stride]), abs_true[::stride])
+
+# Testcase taken as is from https://github.com/numpy/numpy/issues/16660
+class TestComplexAbsoluteMixedDTypes:
+ @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4])
+ @pytest.mark.parametrize("astype", [np.complex64, np.complex128])
+ @pytest.mark.parametrize("func", ['abs', 'square', 'conjugate'])
+
+ def test_array(self, stride, astype, func):
+ dtype = [('template_id', '<i8'), ('bank_chisq','<f4'),
+ ('bank_chisq_dof','<i8'), ('chisq', '<f4'), ('chisq_dof','<i8'),
+ ('cont_chisq', '<f4'), ('psd_var_val', '<f4'), ('sg_chisq','<f4'),
+ ('mycomplex', astype), ('time_index', '<i8')]
+ vec = np.array([
+ (0, 0., 0, -31.666483, 200, 0., 0., 1. , 3.0+4.0j , 613090),
+ (1, 0., 0, 260.91525 , 42, 0., 0., 1. , 5.0+12.0j , 787315),
+ (1, 0., 0, 52.15155 , 42, 0., 0., 1. , 8.0+15.0j , 806641),
+ (1, 0., 0, 52.430195, 42, 0., 0., 1. , 7.0+24.0j , 1363540),
+ (2, 0., 0, 304.43646 , 58, 0., 0., 1. , 20.0+21.0j , 787323),
+ (3, 0., 0, 299.42108 , 52, 0., 0., 1. , 12.0+35.0j , 787332),
+ (4, 0., 0, 39.4836 , 28, 0., 0., 9.182192, 9.0+40.0j , 787304),
+ (4, 0., 0, 76.83787 , 28, 0., 0., 1. , 28.0+45.0j, 1321869),
+ (5, 0., 0, 143.26366 , 24, 0., 0., 10.996129, 11.0+60.0j , 787299)], dtype=dtype)
+ myfunc = getattr(np, func)
+ a = vec['mycomplex']
+ g = myfunc(a[::stride])
+
+ b = vec['mycomplex'].copy()
+ h = myfunc(b[::stride])
+
+ assert_array_max_ulp(h.real, g.real, 1)
+ assert_array_max_ulp(h.imag, g.imag, 1)
diff --git a/venv/lib/python3.9/site-packages/numpy/core/tests/test_unicode.py b/venv/lib/python3.9/site-packages/numpy/core/tests/test_unicode.py
new file mode 100644
index 00000000..2d7c2818
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/tests/test_unicode.py
@@ -0,0 +1,368 @@
+import pytest
+
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_array_equal
+
+def buffer_length(arr):
+ if isinstance(arr, str):
+ if not arr:
+ charmax = 0
+ else:
+ charmax = max([ord(c) for c in arr])
+ if charmax < 256:
+ size = 1
+ elif charmax < 65536:
+ size = 2
+ else:
+ size = 4
+ return size * len(arr)
+ v = memoryview(arr)
+ if v.shape is None:
+ return len(v) * v.itemsize
+ else:
+ return np.prod(v.shape) * v.itemsize
+
+
+# In both cases below we need to make sure that the byte swapped value (as
+# UCS4) is still a valid unicode:
+# Value that can be represented in UCS2 interpreters
+ucs2_value = '\u0900'
+# Value that cannot be represented in UCS2 interpreters (but can in UCS4)
+ucs4_value = '\U00100900'
+
+
+def test_string_cast():
+ str_arr = np.array(["1234", "1234\0\0"], dtype='S')
+ uni_arr1 = str_arr.astype('>U')
+ uni_arr2 = str_arr.astype('<U')
+
+ with pytest.warns(FutureWarning):
+ assert str_arr != uni_arr1
+ with pytest.warns(FutureWarning):
+ assert str_arr != uni_arr2
+
+ assert_array_equal(uni_arr1, uni_arr2)
+
+
+############################################################
+# Creation tests
+############################################################
+
+class CreateZeros:
+ """Check the creation of zero-valued arrays"""
+
+ def content_check(self, ua, ua_scalar, nbytes):
+
+ # Check the length of the unicode base type
+ assert_(int(ua.dtype.str[2:]) == self.ulen)
+ # Check the length of the data buffer
+ assert_(buffer_length(ua) == nbytes)
+ # Small check that data in array element is ok
+ assert_(ua_scalar == '')
+ # Encode to ascii and double check
+ assert_(ua_scalar.encode('ascii') == b'')
+ # Check buffer lengths for scalars
+ assert_(buffer_length(ua_scalar) == 0)
+
+ def test_zeros0D(self):
+ # Check creation of 0-dimensional objects
+ ua = np.zeros((), dtype='U%s' % self.ulen)
+ self.content_check(ua, ua[()], 4*self.ulen)
+
+ def test_zerosSD(self):
+ # Check creation of single-dimensional objects
+ ua = np.zeros((2,), dtype='U%s' % self.ulen)
+ self.content_check(ua, ua[0], 4*self.ulen*2)
+ self.content_check(ua, ua[1], 4*self.ulen*2)
+
+ def test_zerosMD(self):
+ # Check creation of multi-dimensional objects
+ ua = np.zeros((2, 3, 4), dtype='U%s' % self.ulen)
+ self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
+ self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
+
+
+class TestCreateZeros_1(CreateZeros):
+ """Check the creation of zero-valued arrays (size 1)"""
+ ulen = 1
+
+
+class TestCreateZeros_2(CreateZeros):
+ """Check the creation of zero-valued arrays (size 2)"""
+ ulen = 2
+
+
+class TestCreateZeros_1009(CreateZeros):
+ """Check the creation of zero-valued arrays (size 1009)"""
+ ulen = 1009
+
+
+class CreateValues:
+ """Check the creation of unicode arrays with values"""
+
+ def content_check(self, ua, ua_scalar, nbytes):
+
+ # Check the length of the unicode base type
+ assert_(int(ua.dtype.str[2:]) == self.ulen)
+ # Check the length of the data buffer
+ assert_(buffer_length(ua) == nbytes)
+ # Small check that data in array element is ok
+ assert_(ua_scalar == self.ucs_value*self.ulen)
+ # Encode to UTF-8 and double check
+ assert_(ua_scalar.encode('utf-8') ==
+ (self.ucs_value*self.ulen).encode('utf-8'))
+ # Check buffer lengths for scalars
+ if self.ucs_value == ucs4_value:
+ # In UCS2, the \U0010FFFF will be represented using a
+ # surrogate *pair*
+ assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
+ else:
+ # In UCS2, the \uFFFF will be represented using a
+ # regular 2-byte word
+ assert_(buffer_length(ua_scalar) == 2*self.ulen)
+
+ def test_values0D(self):
+ # Check creation of 0-dimensional objects with values
+ ua = np.array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen)
+ self.content_check(ua, ua[()], 4*self.ulen)
+
+ def test_valuesSD(self):
+ # Check creation of single-dimensional objects with values
+ ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
+ self.content_check(ua, ua[0], 4*self.ulen*2)
+ self.content_check(ua, ua[1], 4*self.ulen*2)
+
+ def test_valuesMD(self):
+ # Check creation of multi-dimensional objects with values
+ ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen)
+ self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
+ self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
+
+
+class TestCreateValues_1_UCS2(CreateValues):
+ """Check the creation of valued arrays (size 1, UCS2 values)"""
+ ulen = 1
+ ucs_value = ucs2_value
+
+
+class TestCreateValues_1_UCS4(CreateValues):
+ """Check the creation of valued arrays (size 1, UCS4 values)"""
+ ulen = 1
+ ucs_value = ucs4_value
+
+
+class TestCreateValues_2_UCS2(CreateValues):
+ """Check the creation of valued arrays (size 2, UCS2 values)"""
+ ulen = 2
+ ucs_value = ucs2_value
+
+
+class TestCreateValues_2_UCS4(CreateValues):
+ """Check the creation of valued arrays (size 2, UCS4 values)"""
+ ulen = 2
+ ucs_value = ucs4_value
+
+
+class TestCreateValues_1009_UCS2(CreateValues):
+ """Check the creation of valued arrays (size 1009, UCS2 values)"""
+ ulen = 1009
+ ucs_value = ucs2_value
+
+
+class TestCreateValues_1009_UCS4(CreateValues):
+ """Check the creation of valued arrays (size 1009, UCS4 values)"""
+ ulen = 1009
+ ucs_value = ucs4_value
+
+
+############################################################
+# Assignment tests
+############################################################
+
+class AssignValues:
+ """Check the assignment of unicode arrays with values"""
+
+ def content_check(self, ua, ua_scalar, nbytes):
+
+ # Check the length of the unicode base type
+ assert_(int(ua.dtype.str[2:]) == self.ulen)
+ # Check the length of the data buffer
+ assert_(buffer_length(ua) == nbytes)
+ # Small check that data in array element is ok
+ assert_(ua_scalar == self.ucs_value*self.ulen)
+ # Encode to UTF-8 and double check
+ assert_(ua_scalar.encode('utf-8') ==
+ (self.ucs_value*self.ulen).encode('utf-8'))
+ # Check buffer lengths for scalars
+ if self.ucs_value == ucs4_value:
+ # In UCS2, the \U0010FFFF will be represented using a
+ # surrogate *pair*
+ assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
+ else:
+ # In UCS2, the \uFFFF will be represented using a
+ # regular 2-byte word
+ assert_(buffer_length(ua_scalar) == 2*self.ulen)
+
+ def test_values0D(self):
+ # Check assignment of 0-dimensional objects with values
+ ua = np.zeros((), dtype='U%s' % self.ulen)
+ ua[()] = self.ucs_value*self.ulen
+ self.content_check(ua, ua[()], 4*self.ulen)
+
+ def test_valuesSD(self):
+ # Check assignment of single-dimensional objects with values
+ ua = np.zeros((2,), dtype='U%s' % self.ulen)
+ ua[0] = self.ucs_value*self.ulen
+ self.content_check(ua, ua[0], 4*self.ulen*2)
+ ua[1] = self.ucs_value*self.ulen
+ self.content_check(ua, ua[1], 4*self.ulen*2)
+
+ def test_valuesMD(self):
+ # Check assignment of multi-dimensional objects with values
+ ua = np.zeros((2, 3, 4), dtype='U%s' % self.ulen)
+ ua[0, 0, 0] = self.ucs_value*self.ulen
+ self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
+ ua[-1, -1, -1] = self.ucs_value*self.ulen
+ self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
+
+
+class TestAssignValues_1_UCS2(AssignValues):
+ """Check the assignment of valued arrays (size 1, UCS2 values)"""
+ ulen = 1
+ ucs_value = ucs2_value
+
+
+class TestAssignValues_1_UCS4(AssignValues):
+ """Check the assignment of valued arrays (size 1, UCS4 values)"""
+ ulen = 1
+ ucs_value = ucs4_value
+
+
+class TestAssignValues_2_UCS2(AssignValues):
+ """Check the assignment of valued arrays (size 2, UCS2 values)"""
+ ulen = 2
+ ucs_value = ucs2_value
+
+
+class TestAssignValues_2_UCS4(AssignValues):
+ """Check the assignment of valued arrays (size 2, UCS4 values)"""
+ ulen = 2
+ ucs_value = ucs4_value
+
+
+class TestAssignValues_1009_UCS2(AssignValues):
+ """Check the assignment of valued arrays (size 1009, UCS2 values)"""
+ ulen = 1009
+ ucs_value = ucs2_value
+
+
+class TestAssignValues_1009_UCS4(AssignValues):
+ """Check the assignment of valued arrays (size 1009, UCS4 values)"""
+ ulen = 1009
+ ucs_value = ucs4_value
+
+
+############################################################
+# Byteorder tests
+############################################################
+
+class ByteorderValues:
+ """Check the byteorder of unicode arrays in round-trip conversions"""
+
+ def test_values0D(self):
+ # Check byteorder of 0-dimensional objects
+ ua = np.array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen)
+ ua2 = ua.newbyteorder()
+ # This changes the interpretation of the data region (but not the
+ # actual data), therefore the returned scalars are not
+ # the same (they are byte-swapped versions of each other).
+ assert_(ua[()] != ua2[()])
+ ua3 = ua2.newbyteorder()
+ # Arrays must be equal after the round-trip
+ assert_equal(ua, ua3)
+
+ def test_valuesSD(self):
+ # Check byteorder of single-dimensional objects
+ ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
+ ua2 = ua.newbyteorder()
+ assert_((ua != ua2).all())
+ assert_(ua[-1] != ua2[-1])
+ ua3 = ua2.newbyteorder()
+ # Arrays must be equal after the round-trip
+ assert_equal(ua, ua3)
+
+ def test_valuesMD(self):
+ # Check byteorder of multi-dimensional objects
+ ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4,
+ dtype='U%s' % self.ulen)
+ ua2 = ua.newbyteorder()
+ assert_((ua != ua2).all())
+ assert_(ua[-1, -1, -1] != ua2[-1, -1, -1])
+ ua3 = ua2.newbyteorder()
+ # Arrays must be equal after the round-trip
+ assert_equal(ua, ua3)
+
+ def test_values_cast(self):
+ # Check byteorder of when casting the array for a strided and
+ # contiguous array:
+ test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
+ test2 = np.repeat(test1, 2)[::2]
+ for ua in (test1, test2):
+ ua2 = ua.astype(dtype=ua.dtype.newbyteorder())
+ assert_((ua == ua2).all())
+ assert_(ua[-1] == ua2[-1])
+ ua3 = ua2.astype(dtype=ua.dtype)
+ # Arrays must be equal after the round-trip
+ assert_equal(ua, ua3)
+
+ def test_values_updowncast(self):
+ # Check byteorder of when casting the array to a longer and shorter
+ # string length for strided and contiguous arrays
+ test1 = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
+ test2 = np.repeat(test1, 2)[::2]
+ for ua in (test1, test2):
+ # Cast to a longer type with zero padding
+ longer_type = np.dtype('U%s' % (self.ulen+1)).newbyteorder()
+ ua2 = ua.astype(dtype=longer_type)
+ assert_((ua == ua2).all())
+ assert_(ua[-1] == ua2[-1])
+ # Cast back again with truncating:
+ ua3 = ua2.astype(dtype=ua.dtype)
+ # Arrays must be equal after the round-trip
+ assert_equal(ua, ua3)
+
+
+class TestByteorder_1_UCS2(ByteorderValues):
+ """Check the byteorder in unicode (size 1, UCS2 values)"""
+ ulen = 1
+ ucs_value = ucs2_value
+
+
+class TestByteorder_1_UCS4(ByteorderValues):
+ """Check the byteorder in unicode (size 1, UCS4 values)"""
+ ulen = 1
+ ucs_value = ucs4_value
+
+
+class TestByteorder_2_UCS2(ByteorderValues):
+ """Check the byteorder in unicode (size 2, UCS2 values)"""
+ ulen = 2
+ ucs_value = ucs2_value
+
+
+class TestByteorder_2_UCS4(ByteorderValues):
+ """Check the byteorder in unicode (size 2, UCS4 values)"""
+ ulen = 2
+ ucs_value = ucs4_value
+
+
+class TestByteorder_1009_UCS2(ByteorderValues):
+ """Check the byteorder in unicode (size 1009, UCS2 values)"""
+ ulen = 1009
+ ucs_value = ucs2_value
+
+
+class TestByteorder_1009_UCS4(ByteorderValues):
+ """Check the byteorder in unicode (size 1009, UCS4 values)"""
+ ulen = 1009
+ ucs_value = ucs4_value
diff --git a/venv/lib/python3.9/site-packages/numpy/core/umath.py b/venv/lib/python3.9/site-packages/numpy/core/umath.py
new file mode 100644
index 00000000..6a5474ff
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/umath.py
@@ -0,0 +1,36 @@
+"""
+Create the numpy.core.umath namespace for backward compatibility. In v1.16
+the multiarray and umath c-extension modules were merged into a single
+_multiarray_umath extension module. So we replicate the old namespace
+by importing from the extension module.
+
+"""
+
+from . import _multiarray_umath
+from ._multiarray_umath import * # noqa: F403
+# These imports are needed for backward compatibility,
+# do not change them. issue gh-11862
+# _ones_like is semi-public, on purpose not added to __all__
+from ._multiarray_umath import _UFUNC_API, _add_newdoc_ufunc, _ones_like
+
+__all__ = [
+ '_UFUNC_API', 'ERR_CALL', 'ERR_DEFAULT', 'ERR_IGNORE', 'ERR_LOG',
+ 'ERR_PRINT', 'ERR_RAISE', 'ERR_WARN', 'FLOATING_POINT_SUPPORT',
+ 'FPE_DIVIDEBYZERO', 'FPE_INVALID', 'FPE_OVERFLOW', 'FPE_UNDERFLOW', 'NAN',
+ 'NINF', 'NZERO', 'PINF', 'PZERO', 'SHIFT_DIVIDEBYZERO', 'SHIFT_INVALID',
+ 'SHIFT_OVERFLOW', 'SHIFT_UNDERFLOW', 'UFUNC_BUFSIZE_DEFAULT',
+ 'UFUNC_PYVALS_NAME', '_add_newdoc_ufunc', 'absolute', 'add',
+ 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
+ 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj',
+ 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', 'divide',
+ 'divmod', 'e', 'equal', 'euler_gamma', 'exp', 'exp2', 'expm1', 'fabs',
+ 'floor', 'floor_divide', 'float_power', 'fmax', 'fmin', 'fmod', 'frexp',
+ 'frompyfunc', 'gcd', 'geterrobj', 'greater', 'greater_equal', 'heaviside',
+ 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp',
+ 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2',
+ 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
+ 'logical_xor', 'maximum', 'minimum', 'mod', 'modf', 'multiply', 'negative',
+ 'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians',
+ 'reciprocal', 'remainder', 'right_shift', 'rint', 'seterrobj', 'sign',
+ 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan',
+ 'tanh', 'true_divide', 'trunc']
diff --git a/venv/lib/python3.9/site-packages/numpy/core/umath_tests.py b/venv/lib/python3.9/site-packages/numpy/core/umath_tests.py
new file mode 100644
index 00000000..90ab17e6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/core/umath_tests.py
@@ -0,0 +1,13 @@
+"""
+Shim for _umath_tests to allow a deprecation period for the new name.
+
+"""
+import warnings
+
+# 2018-04-04, numpy 1.15.0
+warnings.warn(("numpy.core.umath_tests is an internal NumPy "
+ "module and should not be imported. It will "
+ "be removed in a future NumPy release."),
+ category=DeprecationWarning, stacklevel=2)
+
+from ._umath_tests import *
diff --git a/venv/lib/python3.9/site-packages/numpy/ctypeslib.py b/venv/lib/python3.9/site-packages/numpy/ctypeslib.py
new file mode 100644
index 00000000..c4bafca1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ctypeslib.py
@@ -0,0 +1,547 @@
+"""
+============================
+``ctypes`` Utility Functions
+============================
+
+See Also
+--------
+load_library : Load a C library.
+ndpointer : Array restype/argtype with verification.
+as_ctypes : Create a ctypes array from an ndarray.
+as_array : Create an ndarray from a ctypes array.
+
+References
+----------
+.. [1] "SciPy Cookbook: ctypes", https://scipy-cookbook.readthedocs.io/items/Ctypes.html
+
+Examples
+--------
+Load the C library:
+
+>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP
+
+Our result type, an ndarray that must be of type double, be 1-dimensional
+and is C-contiguous in memory:
+
+>>> array_1d_double = np.ctypeslib.ndpointer(
+... dtype=np.double,
+... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP
+
+Our C-function typically takes an array and updates its values
+in-place. For example::
+
+ void foo_func(double* x, int length)
+ {
+ int i;
+ for (i = 0; i < length; i++) {
+ x[i] = i*i;
+ }
+ }
+
+We wrap it using:
+
+>>> _lib.foo_func.restype = None #doctest: +SKIP
+>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
+
+Then, we're ready to call ``foo_func``:
+
+>>> out = np.empty(15, dtype=np.double)
+>>> _lib.foo_func(out, len(out)) #doctest: +SKIP
+
+"""
+__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array',
+ 'as_ctypes_type']
+
+import os
+from numpy import (
+ integer, ndarray, dtype as _dtype, asarray, frombuffer
+)
+from numpy.core.multiarray import _flagdict, flagsobj
+
+try:
+ import ctypes
+except ImportError:
+ ctypes = None
+
+if ctypes is None:
+ def _dummy(*args, **kwds):
+ """
+ Dummy object that raises an ImportError if ctypes is not available.
+
+ Raises
+ ------
+ ImportError
+ If ctypes is not available.
+
+ """
+ raise ImportError("ctypes is not available.")
+ load_library = _dummy
+ as_ctypes = _dummy
+ as_array = _dummy
+ from numpy import intp as c_intp
+ _ndptr_base = object
+else:
+ import numpy.core._internal as nic
+ c_intp = nic._getintp_ctype()
+ del nic
+ _ndptr_base = ctypes.c_void_p
+
+ # Adapted from Albert Strasheim
+ def load_library(libname, loader_path):
+ """
+ It is possible to load a library using
+
+ >>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP
+
+ But there are cross-platform considerations, such as library file extensions,
+ plus the fact Windows will just load the first library it finds with that name.
+ NumPy supplies the load_library function as a convenience.
+
+ .. versionchanged:: 1.20.0
+ Allow libname and loader_path to take any
+ :term:`python:path-like object`.
+
+ Parameters
+ ----------
+ libname : path-like
+ Name of the library, which can have 'lib' as a prefix,
+ but without an extension.
+ loader_path : path-like
+ Where the library can be found.
+
+ Returns
+ -------
+ ctypes.cdll[libpath] : library object
+ A ctypes library object
+
+ Raises
+ ------
+ OSError
+ If there is no library with the expected extension, or the
+ library is defective and cannot be loaded.
+ """
+ if ctypes.__version__ < '1.0.1':
+ import warnings
+ warnings.warn("All features of ctypes interface may not work "
+ "with ctypes < 1.0.1", stacklevel=2)
+
+ # Convert path-like objects into strings
+ libname = os.fsdecode(libname)
+ loader_path = os.fsdecode(loader_path)
+
+ ext = os.path.splitext(libname)[1]
+ if not ext:
+ # Try to load library with platform-specific name, otherwise
+ # default to libname.[so|pyd]. Sometimes, these files are built
+ # erroneously on non-linux platforms.
+ from numpy.distutils.misc_util import get_shared_lib_extension
+ so_ext = get_shared_lib_extension()
+ libname_ext = [libname + so_ext]
+ # mac, windows and linux >= py3.2 shared library and loadable
+ # module have different extensions so try both
+ so_ext2 = get_shared_lib_extension(is_python_ext=True)
+ if not so_ext2 == so_ext:
+ libname_ext.insert(0, libname + so_ext2)
+ else:
+ libname_ext = [libname]
+
+ loader_path = os.path.abspath(loader_path)
+ if not os.path.isdir(loader_path):
+ libdir = os.path.dirname(loader_path)
+ else:
+ libdir = loader_path
+
+ for ln in libname_ext:
+ libpath = os.path.join(libdir, ln)
+ if os.path.exists(libpath):
+ try:
+ return ctypes.cdll[libpath]
+ except OSError:
+ ## defective lib file
+ raise
+ ## if no successful return in the libname_ext loop:
+ raise OSError("no file with expected extension")
+
+
+def _num_fromflags(flaglist):
+ num = 0
+ for val in flaglist:
+ num += _flagdict[val]
+ return num
+
+_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
+ 'OWNDATA', 'WRITEBACKIFCOPY']
+def _flags_fromnum(num):
+ res = []
+ for key in _flagnames:
+ value = _flagdict[key]
+ if (num & value):
+ res.append(key)
+ return res
+
+
+class _ndptr(_ndptr_base):
+ @classmethod
+ def from_param(cls, obj):
+ if not isinstance(obj, ndarray):
+ raise TypeError("argument must be an ndarray")
+ if cls._dtype_ is not None \
+ and obj.dtype != cls._dtype_:
+ raise TypeError("array must have data type %s" % cls._dtype_)
+ if cls._ndim_ is not None \
+ and obj.ndim != cls._ndim_:
+ raise TypeError("array must have %d dimension(s)" % cls._ndim_)
+ if cls._shape_ is not None \
+ and obj.shape != cls._shape_:
+ raise TypeError("array must have shape %s" % str(cls._shape_))
+ if cls._flags_ is not None \
+ and ((obj.flags.num & cls._flags_) != cls._flags_):
+ raise TypeError("array must have flags %s" %
+ _flags_fromnum(cls._flags_))
+ return obj.ctypes
+
+
+class _concrete_ndptr(_ndptr):
+ """
+ Like _ndptr, but with `_shape_` and `_dtype_` specified.
+
+ Notably, this means the pointer has enough information to reconstruct
+ the array, which is not generally true.
+ """
+ def _check_retval_(self):
+ """
+ This method is called when this class is used as the .restype
+ attribute for a shared-library function, to automatically wrap the
+ pointer into an array.
+ """
+ return self.contents
+
+ @property
+ def contents(self):
+ """
+ Get an ndarray viewing the data pointed to by this pointer.
+
+ This mirrors the `contents` attribute of a normal ctypes pointer
+ """
+ full_dtype = _dtype((self._dtype_, self._shape_))
+ full_ctype = ctypes.c_char * full_dtype.itemsize
+ buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents
+ return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0)
+
+
+# Factory for an array-checking class with from_param defined for
+# use with ctypes argtypes mechanism
+_pointer_type_cache = {}
+def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
+ """
+ Array-checking restype/argtypes.
+
+ An ndpointer instance is used to describe an ndarray in restypes
+ and argtypes specifications. This approach is more flexible than
+ using, for example, ``POINTER(c_double)``, since several restrictions
+ can be specified, which are verified upon calling the ctypes function.
+ These include data type, number of dimensions, shape and flags. If a
+ given array does not satisfy the specified restrictions,
+ a ``TypeError`` is raised.
+
+ Parameters
+ ----------
+ dtype : data-type, optional
+ Array data-type.
+ ndim : int, optional
+ Number of array dimensions.
+ shape : tuple of ints, optional
+ Array shape.
+ flags : str or tuple of str
+ Array flags; may be one or more of:
+
+ - C_CONTIGUOUS / C / CONTIGUOUS
+ - F_CONTIGUOUS / F / FORTRAN
+ - OWNDATA / O
+ - WRITEABLE / W
+ - ALIGNED / A
+ - WRITEBACKIFCOPY / X
+
+ Returns
+ -------
+ klass : ndpointer type object
+ A type object, which is an ``_ndtpr`` instance containing
+ dtype, ndim, shape and flags information.
+
+ Raises
+ ------
+ TypeError
+ If a given array does not satisfy the specified restrictions.
+
+ Examples
+ --------
+ >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
+ ... ndim=1,
+ ... flags='C_CONTIGUOUS')]
+ ... #doctest: +SKIP
+ >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
+ ... #doctest: +SKIP
+
+ """
+
+ # normalize dtype to an Optional[dtype]
+ if dtype is not None:
+ dtype = _dtype(dtype)
+
+ # normalize flags to an Optional[int]
+ num = None
+ if flags is not None:
+ if isinstance(flags, str):
+ flags = flags.split(',')
+ elif isinstance(flags, (int, integer)):
+ num = flags
+ flags = _flags_fromnum(num)
+ elif isinstance(flags, flagsobj):
+ num = flags.num
+ flags = _flags_fromnum(num)
+ if num is None:
+ try:
+ flags = [x.strip().upper() for x in flags]
+ except Exception as e:
+ raise TypeError("invalid flags specification") from e
+ num = _num_fromflags(flags)
+
+ # normalize shape to an Optional[tuple]
+ if shape is not None:
+ try:
+ shape = tuple(shape)
+ except TypeError:
+ # single integer -> 1-tuple
+ shape = (shape,)
+
+ cache_key = (dtype, ndim, shape, num)
+
+ try:
+ return _pointer_type_cache[cache_key]
+ except KeyError:
+ pass
+
+ # produce a name for the new type
+ if dtype is None:
+ name = 'any'
+ elif dtype.names is not None:
+ name = str(id(dtype))
+ else:
+ name = dtype.str
+ if ndim is not None:
+ name += "_%dd" % ndim
+ if shape is not None:
+ name += "_"+"x".join(str(x) for x in shape)
+ if flags is not None:
+ name += "_"+"_".join(flags)
+
+ if dtype is not None and shape is not None:
+ base = _concrete_ndptr
+ else:
+ base = _ndptr
+
+ klass = type("ndpointer_%s"%name, (base,),
+ {"_dtype_": dtype,
+ "_shape_" : shape,
+ "_ndim_" : ndim,
+ "_flags_" : num})
+ _pointer_type_cache[cache_key] = klass
+ return klass
+
+
+if ctypes is not None:
+ def _ctype_ndarray(element_type, shape):
+ """ Create an ndarray of the given element type and shape """
+ for dim in shape[::-1]:
+ element_type = dim * element_type
+ # prevent the type name include np.ctypeslib
+ element_type.__module__ = None
+ return element_type
+
+
+ def _get_scalar_type_map():
+ """
+ Return a dictionary mapping native endian scalar dtype to ctypes types
+ """
+ ct = ctypes
+ simple_types = [
+ ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
+ ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
+ ct.c_float, ct.c_double,
+ ct.c_bool,
+ ]
+ return {_dtype(ctype): ctype for ctype in simple_types}
+
+
+ _scalar_type_map = _get_scalar_type_map()
+
+
+ def _ctype_from_dtype_scalar(dtype):
+ # swapping twice ensure that `=` is promoted to <, >, or |
+ dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S')
+ dtype_native = dtype.newbyteorder('=')
+ try:
+ ctype = _scalar_type_map[dtype_native]
+ except KeyError as e:
+ raise NotImplementedError(
+ "Converting {!r} to a ctypes type".format(dtype)
+ ) from None
+
+ if dtype_with_endian.byteorder == '>':
+ ctype = ctype.__ctype_be__
+ elif dtype_with_endian.byteorder == '<':
+ ctype = ctype.__ctype_le__
+
+ return ctype
+
+
+ def _ctype_from_dtype_subarray(dtype):
+ element_dtype, shape = dtype.subdtype
+ ctype = _ctype_from_dtype(element_dtype)
+ return _ctype_ndarray(ctype, shape)
+
+
+ def _ctype_from_dtype_structured(dtype):
+ # extract offsets of each field
+ field_data = []
+ for name in dtype.names:
+ field_dtype, offset = dtype.fields[name][:2]
+ field_data.append((offset, name, _ctype_from_dtype(field_dtype)))
+
+ # ctypes doesn't care about field order
+ field_data = sorted(field_data, key=lambda f: f[0])
+
+ if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data):
+ # union, if multiple fields all at address 0
+ size = 0
+ _fields_ = []
+ for offset, name, ctype in field_data:
+ _fields_.append((name, ctype))
+ size = max(size, ctypes.sizeof(ctype))
+
+ # pad to the right size
+ if dtype.itemsize != size:
+ _fields_.append(('', ctypes.c_char * dtype.itemsize))
+
+ # we inserted manual padding, so always `_pack_`
+ return type('union', (ctypes.Union,), dict(
+ _fields_=_fields_,
+ _pack_=1,
+ __module__=None,
+ ))
+ else:
+ last_offset = 0
+ _fields_ = []
+ for offset, name, ctype in field_data:
+ padding = offset - last_offset
+ if padding < 0:
+ raise NotImplementedError("Overlapping fields")
+ if padding > 0:
+ _fields_.append(('', ctypes.c_char * padding))
+
+ _fields_.append((name, ctype))
+ last_offset = offset + ctypes.sizeof(ctype)
+
+
+ padding = dtype.itemsize - last_offset
+ if padding > 0:
+ _fields_.append(('', ctypes.c_char * padding))
+
+ # we inserted manual padding, so always `_pack_`
+ return type('struct', (ctypes.Structure,), dict(
+ _fields_=_fields_,
+ _pack_=1,
+ __module__=None,
+ ))
+
+
+ def _ctype_from_dtype(dtype):
+ if dtype.fields is not None:
+ return _ctype_from_dtype_structured(dtype)
+ elif dtype.subdtype is not None:
+ return _ctype_from_dtype_subarray(dtype)
+ else:
+ return _ctype_from_dtype_scalar(dtype)
+
+
+ def as_ctypes_type(dtype):
+ r"""
+ Convert a dtype into a ctypes type.
+
+ Parameters
+ ----------
+ dtype : dtype
+ The dtype to convert
+
+ Returns
+ -------
+ ctype
+ A ctype scalar, union, array, or struct
+
+ Raises
+ ------
+ NotImplementedError
+ If the conversion is not possible
+
+ Notes
+ -----
+ This function does not losslessly round-trip in either direction.
+
+ ``np.dtype(as_ctypes_type(dt))`` will:
+
+ - insert padding fields
+ - reorder fields to be sorted by offset
+ - discard field titles
+
+ ``as_ctypes_type(np.dtype(ctype))`` will:
+
+ - discard the class names of `ctypes.Structure`\ s and
+ `ctypes.Union`\ s
+ - convert single-element `ctypes.Union`\ s into single-element
+ `ctypes.Structure`\ s
+ - insert padding fields
+
+ """
+ return _ctype_from_dtype(_dtype(dtype))
+
+
+ def as_array(obj, shape=None):
+ """
+ Create a numpy array from a ctypes array or POINTER.
+
+ The numpy array shares the memory with the ctypes object.
+
+ The shape parameter must be given if converting from a ctypes POINTER.
+ The shape parameter is ignored if converting from a ctypes array
+ """
+ if isinstance(obj, ctypes._Pointer):
+ # convert pointers to an array of the desired shape
+ if shape is None:
+ raise TypeError(
+ 'as_array() requires a shape argument when called on a '
+ 'pointer')
+ p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape))
+ obj = ctypes.cast(obj, p_arr_type).contents
+
+ return asarray(obj)
+
+
+ def as_ctypes(obj):
+ """Create and return a ctypes object from a numpy array. Actually
+ anything that exposes the __array_interface__ is accepted."""
+ ai = obj.__array_interface__
+ if ai["strides"]:
+ raise TypeError("strided arrays not supported")
+ if ai["version"] != 3:
+ raise TypeError("only __array_interface__ version 3 supported")
+ addr, readonly = ai["data"]
+ if readonly:
+ raise TypeError("readonly arrays unsupported")
+
+ # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows
+ # dtype.itemsize (gh-14214)
+ ctype_scalar = as_ctypes_type(ai["typestr"])
+ result_type = _ctype_ndarray(ctype_scalar, ai["shape"])
+ result = result_type.from_address(addr)
+ result.__keep = obj
+ return result
diff --git a/venv/lib/python3.9/site-packages/numpy/ctypeslib.pyi b/venv/lib/python3.9/site-packages/numpy/ctypeslib.pyi
new file mode 100644
index 00000000..0313cd82
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ctypeslib.pyi
@@ -0,0 +1,251 @@
+# NOTE: Numpy's mypy plugin is used for importing the correct
+# platform-specific `ctypes._SimpleCData[int]` sub-type
+from ctypes import c_int64 as _c_intp
+
+import os
+import sys
+import ctypes
+from collections.abc import Iterable, Sequence
+from typing import (
+ Literal as L,
+ Any,
+ Union,
+ TypeVar,
+ Generic,
+ overload,
+ ClassVar,
+)
+
+from numpy import (
+ ndarray,
+ dtype,
+ generic,
+ bool_,
+ byte,
+ short,
+ intc,
+ int_,
+ longlong,
+ ubyte,
+ ushort,
+ uintc,
+ uint,
+ ulonglong,
+ single,
+ double,
+ longdouble,
+ void,
+)
+from numpy.core._internal import _ctypes
+from numpy.core.multiarray import flagsobj
+from numpy._typing import (
+ # Arrays
+ NDArray,
+ _ArrayLike,
+
+ # Shapes
+ _ShapeLike,
+
+ # DTypes
+ DTypeLike,
+ _DTypeLike,
+ _VoidDTypeLike,
+ _BoolCodes,
+ _UByteCodes,
+ _UShortCodes,
+ _UIntCCodes,
+ _UIntCodes,
+ _ULongLongCodes,
+ _ByteCodes,
+ _ShortCodes,
+ _IntCCodes,
+ _IntCodes,
+ _LongLongCodes,
+ _SingleCodes,
+ _DoubleCodes,
+ _LongDoubleCodes,
+)
+
+# TODO: Add a proper `_Shape` bound once we've got variadic typevars
+_DType = TypeVar("_DType", bound=dtype[Any])
+_DTypeOptional = TypeVar("_DTypeOptional", bound=None | dtype[Any])
+_SCT = TypeVar("_SCT", bound=generic)
+
+_FlagsKind = L[
+ 'C_CONTIGUOUS', 'CONTIGUOUS', 'C',
+ 'F_CONTIGUOUS', 'FORTRAN', 'F',
+ 'ALIGNED', 'A',
+ 'WRITEABLE', 'W',
+ 'OWNDATA', 'O',
+ 'WRITEBACKIFCOPY', 'X',
+]
+
+# TODO: Add a shape typevar once we have variadic typevars (PEP 646)
+class _ndptr(ctypes.c_void_p, Generic[_DTypeOptional]):
+ # In practice these 4 classvars are defined in the dynamic class
+ # returned by `ndpointer`
+ _dtype_: ClassVar[_DTypeOptional]
+ _shape_: ClassVar[None]
+ _ndim_: ClassVar[None | int]
+ _flags_: ClassVar[None | list[_FlagsKind]]
+
+ @overload
+ @classmethod
+ def from_param(cls: type[_ndptr[None]], obj: ndarray[Any, Any]) -> _ctypes: ...
+ @overload
+ @classmethod
+ def from_param(cls: type[_ndptr[_DType]], obj: ndarray[Any, _DType]) -> _ctypes: ...
+
+class _concrete_ndptr(_ndptr[_DType]):
+ _dtype_: ClassVar[_DType]
+ _shape_: ClassVar[tuple[int, ...]]
+ @property
+ def contents(self) -> ndarray[Any, _DType]: ...
+
+def load_library(
+ libname: str | bytes | os.PathLike[str] | os.PathLike[bytes],
+ loader_path: str | bytes | os.PathLike[str] | os.PathLike[bytes],
+) -> ctypes.CDLL: ...
+
+__all__: list[str]
+
+c_intp = _c_intp
+
+@overload
+def ndpointer(
+ dtype: None = ...,
+ ndim: int = ...,
+ shape: None | _ShapeLike = ...,
+ flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
+) -> type[_ndptr[None]]: ...
+@overload
+def ndpointer(
+ dtype: _DTypeLike[_SCT],
+ ndim: int = ...,
+ *,
+ shape: _ShapeLike,
+ flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
+) -> type[_concrete_ndptr[dtype[_SCT]]]: ...
+@overload
+def ndpointer(
+ dtype: DTypeLike,
+ ndim: int = ...,
+ *,
+ shape: _ShapeLike,
+ flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
+) -> type[_concrete_ndptr[dtype[Any]]]: ...
+@overload
+def ndpointer(
+ dtype: _DTypeLike[_SCT],
+ ndim: int = ...,
+ shape: None = ...,
+ flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
+) -> type[_ndptr[dtype[_SCT]]]: ...
+@overload
+def ndpointer(
+ dtype: DTypeLike,
+ ndim: int = ...,
+ shape: None = ...,
+ flags: None | _FlagsKind | Iterable[_FlagsKind] | int | flagsobj = ...,
+) -> type[_ndptr[dtype[Any]]]: ...
+
+@overload
+def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[bool_] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ...
+@overload
+def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ...
+@overload
+def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ...
+@overload
+def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ...
+@overload
+def as_ctypes_type(dtype: _IntCodes | _DTypeLike[int_] | type[int | ctypes.c_long]) -> type[ctypes.c_long]: ...
+@overload
+def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ...
+@overload
+def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ...
+@overload
+def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ...
+@overload
+def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ...
+@overload
+def as_ctypes_type(dtype: _UIntCodes | _DTypeLike[uint] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ...
+@overload
+def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ...
+@overload
+def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ...
+@overload
+def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ...
+@overload
+def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ...
+@overload
+def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ... # `ctypes.Union` or `ctypes.Structure`
+@overload
+def as_ctypes_type(dtype: str) -> type[Any]: ...
+
+@overload
+def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ...
+@overload
+def as_array(obj: _ArrayLike[_SCT], shape: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
+@overload
+def as_array(obj: object, shape: None | _ShapeLike = ...) -> NDArray[Any]: ...
+
+@overload
+def as_ctypes(obj: bool_) -> ctypes.c_bool: ...
+@overload
+def as_ctypes(obj: byte) -> ctypes.c_byte: ...
+@overload
+def as_ctypes(obj: short) -> ctypes.c_short: ...
+@overload
+def as_ctypes(obj: intc) -> ctypes.c_int: ...
+@overload
+def as_ctypes(obj: int_) -> ctypes.c_long: ...
+@overload
+def as_ctypes(obj: longlong) -> ctypes.c_longlong: ...
+@overload
+def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ...
+@overload
+def as_ctypes(obj: ushort) -> ctypes.c_ushort: ...
+@overload
+def as_ctypes(obj: uintc) -> ctypes.c_uint: ...
+@overload
+def as_ctypes(obj: uint) -> ctypes.c_ulong: ...
+@overload
+def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ...
+@overload
+def as_ctypes(obj: single) -> ctypes.c_float: ...
+@overload
+def as_ctypes(obj: double) -> ctypes.c_double: ...
+@overload
+def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ...
+@overload
+def as_ctypes(obj: void) -> Any: ... # `ctypes.Union` or `ctypes.Structure`
+@overload
+def as_ctypes(obj: NDArray[bool_]) -> ctypes.Array[ctypes.c_bool]: ...
+@overload
+def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ...
+@overload
+def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ...
+@overload
+def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ...
+@overload
+def as_ctypes(obj: NDArray[int_]) -> ctypes.Array[ctypes.c_long]: ...
+@overload
+def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ...
+@overload
+def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ...
+@overload
+def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ...
+@overload
+def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ...
+@overload
+def as_ctypes(obj: NDArray[uint]) -> ctypes.Array[ctypes.c_ulong]: ...
+@overload
+def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ...
+@overload
+def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ...
+@overload
+def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ...
+@overload
+def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ...
+@overload
+def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ... # `ctypes.Union` or `ctypes.Structure`
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/__config__.py b/venv/lib/python3.9/site-packages/numpy/distutils/__config__.py
new file mode 100644
index 00000000..2590dbb9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/__config__.py
@@ -0,0 +1,115 @@
+# This file is generated by numpy's setup.py
+# It contains system_info results at the time of building this package.
+__all__ = ["get_info","show"]
+
+
+import os
+import sys
+
+extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
+
+if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
+ os.add_dll_directory(extra_dll_dir)
+
+openblas64__info={'libraries': ['openblas64_', 'openblas64_'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None)], 'runtime_library_dirs': ['/usr/local/lib']}
+blas_ilp64_opt_info={'libraries': ['openblas64_', 'openblas64_'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None)], 'runtime_library_dirs': ['/usr/local/lib']}
+openblas64__lapack_info={'libraries': ['openblas64_', 'openblas64_'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None), ('HAVE_LAPACKE', None)], 'runtime_library_dirs': ['/usr/local/lib']}
+lapack_ilp64_opt_info={'libraries': ['openblas64_', 'openblas64_'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None), ('BLAS_SYMBOL_SUFFIX', '64_'), ('HAVE_BLAS_ILP64', None), ('HAVE_LAPACKE', None)], 'runtime_library_dirs': ['/usr/local/lib']}
+
+def get_info(name):
+ g = globals()
+ return g.get(name, g.get(name + "_info", {}))
+
+def show():
+ """
+ Show libraries in the system on which NumPy was built.
+
+ Print information about various resources (libraries, library
+ directories, include directories, etc.) in the system on which
+ NumPy was built.
+
+ See Also
+ --------
+ get_include : Returns the directory containing NumPy C
+ header files.
+
+ Notes
+ -----
+ 1. Classes specifying the information to be printed are defined
+ in the `numpy.distutils.system_info` module.
+
+ Information may include:
+
+ * ``language``: language used to write the libraries (mostly
+ C or f77)
+ * ``libraries``: names of libraries found in the system
+ * ``library_dirs``: directories containing the libraries
+ * ``include_dirs``: directories containing library header files
+ * ``src_dirs``: directories containing library source files
+ * ``define_macros``: preprocessor macros used by
+ ``distutils.setup``
+ * ``baseline``: minimum CPU features required
+ * ``found``: dispatched features supported in the system
+ * ``not found``: dispatched features that are not supported
+ in the system
+
+ 2. NumPy BLAS/LAPACK Installation Notes
+
+ Installing a numpy wheel (``pip install numpy`` or force it
+ via ``pip install numpy --only-binary :numpy: numpy``) includes
+ an OpenBLAS implementation of the BLAS and LAPACK linear algebra
+ APIs. In this case, ``library_dirs`` reports the original build
+ time configuration as compiled with gcc/gfortran; at run time
+ the OpenBLAS library is in
+ ``site-packages/numpy.libs/`` (linux), or
+ ``site-packages/numpy/.dylibs/`` (macOS), or
+ ``site-packages/numpy/.libs/`` (windows).
+
+ Installing numpy from source
+ (``pip install numpy --no-binary numpy``) searches for BLAS and
+ LAPACK dynamic link libraries at build time as influenced by
+ environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and
+ NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER;
+ or the optional file ``~/.numpy-site.cfg``.
+ NumPy remembers those locations and expects to load the same
+ libraries at run-time.
+ In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS
+ library) is in the default build-time search order after
+ 'openblas'.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.show_config()
+ blas_opt_info:
+ language = c
+ define_macros = [('HAVE_CBLAS', None)]
+ libraries = ['openblas', 'openblas']
+ library_dirs = ['/usr/local/lib']
+ """
+ from numpy.core._multiarray_umath import (
+ __cpu_features__, __cpu_baseline__, __cpu_dispatch__
+ )
+ for name,info_dict in globals().items():
+ if name[0] == "_" or type(info_dict) is not type({}): continue
+ print(name + ":")
+ if not info_dict:
+ print(" NOT AVAILABLE")
+ for k,v in info_dict.items():
+ v = str(v)
+ if k == "sources" and len(v) > 200:
+ v = v[:60] + " ...\n... " + v[-60:]
+ print(" %s = %s" % (k,v))
+
+ features_found, features_not_found = [], []
+ for feature in __cpu_dispatch__:
+ if __cpu_features__[feature]:
+ features_found.append(feature)
+ else:
+ features_not_found.append(feature)
+
+ print("Supported SIMD extensions in this NumPy install:")
+ print(" baseline = %s" % (','.join(__cpu_baseline__)))
+ print(" found = %s" % (','.join(features_found)))
+ print(" not found = %s" % (','.join(features_not_found)))
+
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/__init__.py b/venv/lib/python3.9/site-packages/numpy/distutils/__init__.py
new file mode 100644
index 00000000..f74ed4d3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/__init__.py
@@ -0,0 +1,64 @@
+"""
+An enhanced distutils, providing support for Fortran compilers, for BLAS,
+LAPACK and other common libraries for numerical computing, and more.
+
+Public submodules are::
+
+ misc_util
+ system_info
+ cpu_info
+ log
+ exec_command
+
+For details, please see the *Packaging* and *NumPy Distutils User Guide*
+sections of the NumPy Reference Guide.
+
+For configuring the preference for and location of libraries like BLAS and
+LAPACK, and for setting include paths and similar build options, please see
+``site.cfg.example`` in the root of the NumPy repository or sdist.
+
+"""
+
+import warnings
+
+# Must import local ccompiler ASAP in order to get
+# customized CCompiler.spawn effective.
+from . import ccompiler
+from . import unixccompiler
+
+from .npy_pkg_config import *
+
+warnings.warn("\n\n"
+ " `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n"
+ " of the deprecation of `distutils` itself. It will be removed for\n"
+ " Python >= 3.12. For older Python versions it will remain present.\n"
+ " It is recommended to use `setuptools < 60.0` for those Python versions.\n"
+ " For more details, see:\n"
+ " https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n",
+ DeprecationWarning, stacklevel=2
+)
+del warnings
+
+# If numpy is installed, add distutils.test()
+try:
+ from . import __config__
+ # Normally numpy is installed if the above import works, but an interrupted
+ # in-place build could also have left a __config__.py. In that case the
+ # next import may still fail, so keep it inside the try block.
+ from numpy._pytesttester import PytestTester
+ test = PytestTester(__name__)
+ del PytestTester
+except ImportError:
+ pass
+
+
+def customized_fcompiler(plat=None, compiler=None):
+ from numpy.distutils.fcompiler import new_fcompiler
+ c = new_fcompiler(plat=plat, compiler=compiler)
+ c.customize()
+ return c
+
+def customized_ccompiler(plat=None, compiler=None, verbose=1):
+ c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose)
+ c.customize('')
+ return c
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/__init__.pyi b/venv/lib/python3.9/site-packages/numpy/distutils/__init__.pyi
new file mode 100644
index 00000000..3938d68d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/__init__.pyi
@@ -0,0 +1,4 @@
+from typing import Any
+
+# TODO: remove when the full numpy namespace is defined
+def __getattr__(name: str) -> Any: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/_shell_utils.py b/venv/lib/python3.9/site-packages/numpy/distutils/_shell_utils.py
new file mode 100644
index 00000000..82abd5f4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/_shell_utils.py
@@ -0,0 +1,91 @@
+"""
+Helper functions for interacting with the shell, and consuming shell-style
+parameters provided in config files.
+"""
+import os
+import shlex
+import subprocess
+try:
+ from shlex import quote
+except ImportError:
+ from pipes import quote
+
+__all__ = ['WindowsParser', 'PosixParser', 'NativeParser']
+
+
+class CommandLineParser:
+ """
+ An object that knows how to split and join command-line arguments.
+
+ It must be true that ``argv == split(join(argv))`` for all ``argv``.
+ The reverse neednt be true - `join(split(cmd))` may result in the addition
+ or removal of unnecessary escaping.
+ """
+ @staticmethod
+ def join(argv):
+ """ Join a list of arguments into a command line string """
+ raise NotImplementedError
+
+ @staticmethod
+ def split(cmd):
+ """ Split a command line string into a list of arguments """
+ raise NotImplementedError
+
+
+class WindowsParser:
+ """
+ The parsing behavior used by `subprocess.call("string")` on Windows, which
+ matches the Microsoft C/C++ runtime.
+
+ Note that this is _not_ the behavior of cmd.
+ """
+ @staticmethod
+ def join(argv):
+ # note that list2cmdline is specific to the windows syntax
+ return subprocess.list2cmdline(argv)
+
+ @staticmethod
+ def split(cmd):
+ import ctypes # guarded import for systems without ctypes
+ try:
+ ctypes.windll
+ except AttributeError:
+ raise NotImplementedError
+
+ # Windows has special parsing rules for the executable (no quotes),
+ # that we do not care about - insert a dummy element
+ if not cmd:
+ return []
+ cmd = 'dummy ' + cmd
+
+ CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
+ CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p)
+ CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int))
+
+ nargs = ctypes.c_int()
+ lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs))
+ args = [lpargs[i] for i in range(nargs.value)]
+ assert not ctypes.windll.kernel32.LocalFree(lpargs)
+
+ # strip the element we inserted
+ assert args[0] == "dummy"
+ return args[1:]
+
+
+class PosixParser:
+ """
+ The parsing behavior used by `subprocess.call("string", shell=True)` on Posix.
+ """
+ @staticmethod
+ def join(argv):
+ return ' '.join(quote(arg) for arg in argv)
+
+ @staticmethod
+ def split(cmd):
+ return shlex.split(cmd, posix=True)
+
+
+if os.name == 'nt':
+ NativeParser = WindowsParser
+elif os.name == 'posix':
+ NativeParser = PosixParser
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/armccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/armccompiler.py
new file mode 100644
index 00000000..afba7eb3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/armccompiler.py
@@ -0,0 +1,26 @@
+from distutils.unixccompiler import UnixCCompiler
+
+class ArmCCompiler(UnixCCompiler):
+
+ """
+ Arm compiler.
+ """
+
+ compiler_type = 'arm'
+ cc_exe = 'armclang'
+ cxx_exe = 'armclang++'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ UnixCCompiler.__init__(self, verbose, dry_run, force)
+ cc_compiler = self.cc_exe
+ cxx_compiler = self.cxx_exe
+ self.set_executables(compiler=cc_compiler +
+ ' -O3 -fPIC',
+ compiler_so=cc_compiler +
+ ' -O3 -fPIC',
+ compiler_cxx=cxx_compiler +
+ ' -O3 -fPIC',
+ linker_exe=cc_compiler +
+ ' -lamath',
+ linker_so=cc_compiler +
+ ' -lamath -shared')
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/ccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/ccompiler.py
new file mode 100644
index 00000000..f0487cb6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/ccompiler.py
@@ -0,0 +1,814 @@
+import os
+import re
+import sys
+import shlex
+import time
+import subprocess
+from copy import copy
+from distutils import ccompiler
+from distutils.ccompiler import (
+ compiler_class, gen_lib_options, get_default_compiler, new_compiler,
+ CCompiler
+)
+from distutils.errors import (
+ DistutilsExecError, DistutilsModuleError, DistutilsPlatformError,
+ CompileError, UnknownFileError
+)
+from distutils.sysconfig import customize_compiler
+from distutils.version import LooseVersion
+
+from numpy.distutils import log
+from numpy.distutils.exec_command import (
+ filepath_from_subprocess_output, forward_bytes_to_stdout
+)
+from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
+ get_num_build_jobs, \
+ _commandline_dep_string, \
+ sanitize_cxx_flags
+
+# globals for parallel build management
+import threading
+
+_job_semaphore = None
+_global_lock = threading.Lock()
+_processing_files = set()
+
+
+def _needs_build(obj, cc_args, extra_postargs, pp_opts):
+ """
+ Check if an objects needs to be rebuild based on its dependencies
+
+ Parameters
+ ----------
+ obj : str
+ object file
+
+ Returns
+ -------
+ bool
+ """
+ # defined in unixcompiler.py
+ dep_file = obj + '.d'
+ if not os.path.exists(dep_file):
+ return True
+
+ # dep_file is a makefile containing 'object: dependencies'
+ # formatted like posix shell (spaces escaped, \ line continuations)
+ # the last line contains the compiler commandline arguments as some
+ # projects may compile an extension multiple times with different
+ # arguments
+ with open(dep_file, "r") as f:
+ lines = f.readlines()
+
+ cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts)
+ last_cmdline = lines[-1]
+ if last_cmdline != cmdline:
+ return True
+
+ contents = ''.join(lines[:-1])
+ deps = [x for x in shlex.split(contents, posix=True)
+ if x != "\n" and not x.endswith(":")]
+
+ try:
+ t_obj = os.stat(obj).st_mtime
+
+ # check if any of the dependencies is newer than the object
+ # the dependencies includes the source used to create the object
+ for f in deps:
+ if os.stat(f).st_mtime > t_obj:
+ return True
+ except OSError:
+ # no object counts as newer (shouldn't happen if dep_file exists)
+ return True
+
+ return False
+
+
+def replace_method(klass, method_name, func):
+ # Py3k does not have unbound method anymore, MethodType does not work
+ m = lambda self, *args, **kw: func(self, *args, **kw)
+ setattr(klass, method_name, m)
+
+
+######################################################################
+## Method that subclasses may redefine. But don't call this method,
+## it i private to CCompiler class and may return unexpected
+## results if used elsewhere. So, you have been warned..
+
+def CCompiler_find_executables(self):
+ """
+ Does nothing here, but is called by the get_version method and can be
+ overridden by subclasses. In particular it is redefined in the `FCompiler`
+ class where more documentation can be found.
+
+ """
+ pass
+
+
+replace_method(CCompiler, 'find_executables', CCompiler_find_executables)
+
+
+# Using customized CCompiler.spawn.
+def CCompiler_spawn(self, cmd, display=None, env=None):
+ """
+ Execute a command in a sub-process.
+
+ Parameters
+ ----------
+ cmd : str
+ The command to execute.
+ display : str or sequence of str, optional
+ The text to add to the log file kept by `numpy.distutils`.
+ If not given, `display` is equal to `cmd`.
+ env : a dictionary for environment variables, optional
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ DistutilsExecError
+ If the command failed, i.e. the exit status was not 0.
+
+ """
+ env = env if env is not None else dict(os.environ)
+ if display is None:
+ display = cmd
+ if is_sequence(display):
+ display = ' '.join(list(display))
+ log.info(display)
+ try:
+ if self.verbose:
+ subprocess.check_output(cmd, env=env)
+ else:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
+ except subprocess.CalledProcessError as exc:
+ o = exc.output
+ s = exc.returncode
+ except OSError as e:
+ # OSError doesn't have the same hooks for the exception
+ # output, but exec_command() historically would use an
+ # empty string for EnvironmentError (base class for
+ # OSError)
+ # o = b''
+ # still that would make the end-user lost in translation!
+ o = f"\n\n{e}\n\n\n"
+ try:
+ o = o.encode(sys.stdout.encoding)
+ except AttributeError:
+ o = o.encode('utf8')
+ # status previously used by exec_command() for parent
+ # of OSError
+ s = 127
+ else:
+ # use a convenience return here so that any kind of
+ # caught exception will execute the default code after the
+ # try / except block, which handles various exceptions
+ return None
+
+ if is_sequence(cmd):
+ cmd = ' '.join(list(cmd))
+
+ if self.verbose:
+ forward_bytes_to_stdout(o)
+
+ if re.search(b'Too many open files', o):
+ msg = '\nTry rerunning setup command until build succeeds.'
+ else:
+ msg = ''
+ raise DistutilsExecError('Command "%s" failed with exit status %d%s' %
+ (cmd, s, msg))
+
+replace_method(CCompiler, 'spawn', CCompiler_spawn)
+
+def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
+ """
+ Return the name of the object files for the given source files.
+
+ Parameters
+ ----------
+ source_filenames : list of str
+ The list of paths to source files. Paths can be either relative or
+ absolute, this is handled transparently.
+ strip_dir : bool, optional
+ Whether to strip the directory from the returned paths. If True,
+ the file name prepended by `output_dir` is returned. Default is False.
+ output_dir : str, optional
+ If given, this path is prepended to the returned paths to the
+ object files.
+
+ Returns
+ -------
+ obj_names : list of str
+ The list of paths to the object files corresponding to the source
+ files in `source_filenames`.
+
+ """
+ if output_dir is None:
+ output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ base, ext = os.path.splitext(os.path.normpath(src_name))
+ base = os.path.splitdrive(base)[1] # Chop off the drive
+ base = base[os.path.isabs(base):] # If abs, chop off leading /
+ if base.startswith('..'):
+ # Resolve starting relative path components, middle ones
+ # (if any) have been handled by os.path.normpath above.
+ i = base.rfind('..')+2
+ d = base[:i]
+ d = os.path.basename(os.path.abspath(d))
+ base = d + base[i:]
+ if ext not in self.src_extensions:
+ raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name))
+ if strip_dir:
+ base = os.path.basename(base)
+ obj_name = os.path.join(output_dir, base + self.obj_extension)
+ obj_names.append(obj_name)
+ return obj_names
+
+replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames)
+
+def CCompiler_compile(self, sources, output_dir=None, macros=None,
+ include_dirs=None, debug=0, extra_preargs=None,
+ extra_postargs=None, depends=None):
+ """
+ Compile one or more source files.
+
+ Please refer to the Python distutils API reference for more details.
+
+ Parameters
+ ----------
+ sources : list of str
+ A list of filenames
+ output_dir : str, optional
+ Path to the output directory.
+ macros : list of tuples
+ A list of macro definitions.
+ include_dirs : list of str, optional
+ The directories to add to the default include file search path for
+ this compilation only.
+ debug : bool, optional
+ Whether or not to output debug symbols in or alongside the object
+ file(s).
+ extra_preargs, extra_postargs : ?
+ Extra pre- and post-arguments.
+ depends : list of str, optional
+ A list of file names that all targets depend on.
+
+ Returns
+ -------
+ objects : list of str
+ A list of object file names, one per source file `sources`.
+
+ Raises
+ ------
+ CompileError
+ If compilation fails.
+
+ """
+ global _job_semaphore
+
+ jobs = get_num_build_jobs()
+
+ # setup semaphore to not exceed number of compile jobs when parallelized at
+ # extension level (python >= 3.5)
+ with _global_lock:
+ if _job_semaphore is None:
+ _job_semaphore = threading.Semaphore(jobs)
+
+ if not sources:
+ return []
+ from numpy.distutils.fcompiler import (FCompiler, is_f_file,
+ has_f90_header)
+ if isinstance(self, FCompiler):
+ display = []
+ for fc in ['f77', 'f90', 'fix']:
+ fcomp = getattr(self, 'compiler_'+fc)
+ if fcomp is None:
+ continue
+ display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp)))
+ display = '\n'.join(display)
+ else:
+ ccomp = self.compiler_so
+ display = "C compiler: %s\n" % (' '.join(ccomp),)
+ log.info(display)
+ macros, objects, extra_postargs, pp_opts, build = \
+ self._setup_compile(output_dir, macros, include_dirs, sources,
+ depends, extra_postargs)
+ cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
+ display = "compile options: '%s'" % (' '.join(cc_args))
+ if extra_postargs:
+ display += "\nextra options: '%s'" % (' '.join(extra_postargs))
+ log.info(display)
+
+ def single_compile(args):
+ obj, (src, ext) = args
+ if not _needs_build(obj, cc_args, extra_postargs, pp_opts):
+ return
+
+ # check if we are currently already processing the same object
+ # happens when using the same source in multiple extensions
+ while True:
+ # need explicit lock as there is no atomic check and add with GIL
+ with _global_lock:
+ # file not being worked on, start working
+ if obj not in _processing_files:
+ _processing_files.add(obj)
+ break
+ # wait for the processing to end
+ time.sleep(0.1)
+
+ try:
+ # retrieve slot from our #job semaphore and build
+ with _job_semaphore:
+ self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
+ finally:
+ # register being done processing
+ with _global_lock:
+ _processing_files.remove(obj)
+
+
+ if isinstance(self, FCompiler):
+ objects_to_build = list(build.keys())
+ f77_objects, other_objects = [], []
+ for obj in objects:
+ if obj in objects_to_build:
+ src, ext = build[obj]
+ if self.compiler_type=='absoft':
+ obj = cyg2win32(obj)
+ src = cyg2win32(src)
+ if is_f_file(src) and not has_f90_header(src):
+ f77_objects.append((obj, (src, ext)))
+ else:
+ other_objects.append((obj, (src, ext)))
+
+ # f77 objects can be built in parallel
+ build_items = f77_objects
+ # build f90 modules serial, module files are generated during
+ # compilation and may be used by files later in the list so the
+ # ordering is important
+ for o in other_objects:
+ single_compile(o)
+ else:
+ build_items = build.items()
+
+ if len(build) > 1 and jobs > 1:
+ # build parallel
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(jobs) as pool:
+ res = pool.map(single_compile, build_items)
+ list(res) # access result to raise errors
+ else:
+ # build serial
+ for o in build_items:
+ single_compile(o)
+
+ # Return *all* object filenames, not just the ones we just built.
+ return objects
+
+replace_method(CCompiler, 'compile', CCompiler_compile)
+
+def CCompiler_customize_cmd(self, cmd, ignore=()):
+ """
+ Customize compiler using distutils command.
+
+ Parameters
+ ----------
+ cmd : class instance
+ An instance inheriting from `distutils.cmd.Command`.
+ ignore : sequence of str, optional
+ List of `CCompiler` commands (without ``'set_'``) that should not be
+ altered. Strings that are checked for are:
+ ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs',
+ 'rpath', 'link_objects')``.
+
+ Returns
+ -------
+ None
+
+ """
+ log.info('customize %s using %s' % (self.__class__.__name__,
+ cmd.__class__.__name__))
+
+ if hasattr(self, 'compiler') and 'clang' in self.compiler[0]:
+ # clang defaults to a non-strict floating error point model.
+ # Since NumPy and most Python libs give warnings for these, override:
+ self.compiler.append('-ftrapping-math')
+ self.compiler_so.append('-ftrapping-math')
+
+ def allow(attr):
+ return getattr(cmd, attr, None) is not None and attr not in ignore
+
+ if allow('include_dirs'):
+ self.set_include_dirs(cmd.include_dirs)
+ if allow('define'):
+ for (name, value) in cmd.define:
+ self.define_macro(name, value)
+ if allow('undef'):
+ for macro in cmd.undef:
+ self.undefine_macro(macro)
+ if allow('libraries'):
+ self.set_libraries(self.libraries + cmd.libraries)
+ if allow('library_dirs'):
+ self.set_library_dirs(self.library_dirs + cmd.library_dirs)
+ if allow('rpath'):
+ self.set_runtime_library_dirs(cmd.rpath)
+ if allow('link_objects'):
+ self.set_link_objects(cmd.link_objects)
+
+replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd)
+
+def _compiler_to_string(compiler):
+ props = []
+ mx = 0
+ keys = list(compiler.executables.keys())
+ for key in ['version', 'libraries', 'library_dirs',
+ 'object_switch', 'compile_switch',
+ 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']:
+ if key not in keys:
+ keys.append(key)
+ for key in keys:
+ if hasattr(compiler, key):
+ v = getattr(compiler, key)
+ mx = max(mx, len(key))
+ props.append((key, repr(v)))
+ fmt = '%-' + repr(mx+1) + 's = %s'
+ lines = [fmt % prop for prop in props]
+ return '\n'.join(lines)
+
+def CCompiler_show_customization(self):
+ """
+ Print the compiler customizations to stdout.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ None
+
+ Notes
+ -----
+ Printing is only done if the distutils log threshold is < 2.
+
+ """
+ try:
+ self.get_version()
+ except Exception:
+ pass
+ if log._global_log.threshold<2:
+ print('*'*80)
+ print(self.__class__)
+ print(_compiler_to_string(self))
+ print('*'*80)
+
+replace_method(CCompiler, 'show_customization', CCompiler_show_customization)
+
+def CCompiler_customize(self, dist, need_cxx=0):
+ """
+ Do any platform-specific customization of a compiler instance.
+
+ This method calls `distutils.sysconfig.customize_compiler` for
+ platform-specific customization, as well as optionally remove a flag
+ to suppress spurious warnings in case C++ code is being compiled.
+
+ Parameters
+ ----------
+ dist : object
+ This parameter is not used for anything.
+ need_cxx : bool, optional
+ Whether or not C++ has to be compiled. If so (True), the
+ ``"-Wstrict-prototypes"`` option is removed to prevent spurious
+ warnings. Default is False.
+
+ Returns
+ -------
+ None
+
+ Notes
+ -----
+ All the default options used by distutils can be extracted with::
+
+ from distutils import sysconfig
+ sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS',
+ 'CCSHARED', 'LDSHARED', 'SO')
+
+ """
+ # See FCompiler.customize for suggested usage.
+ log.info('customize %s' % (self.__class__.__name__))
+ customize_compiler(self)
+ if need_cxx:
+ # In general, distutils uses -Wstrict-prototypes, but this option is
+ # not valid for C++ code, only for C. Remove it if it's there to
+ # avoid a spurious warning on every compilation.
+ try:
+ self.compiler_so.remove('-Wstrict-prototypes')
+ except (AttributeError, ValueError):
+ pass
+
+ if hasattr(self, 'compiler') and 'cc' in self.compiler[0]:
+ if not self.compiler_cxx:
+ if self.compiler[0].startswith('gcc'):
+ a, b = 'gcc', 'g++'
+ else:
+ a, b = 'cc', 'c++'
+ self.compiler_cxx = [self.compiler[0].replace(a, b)]\
+ + self.compiler[1:]
+ else:
+ if hasattr(self, 'compiler'):
+ log.warn("#### %s #######" % (self.compiler,))
+ if not hasattr(self, 'compiler_cxx'):
+ log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__)
+
+
+ # check if compiler supports gcc style automatic dependencies
+ # run on every extension so skip for known good compilers
+ if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or
+ 'g++' in self.compiler[0] or
+ 'clang' in self.compiler[0]):
+ self._auto_depends = True
+ elif os.name == 'posix':
+ import tempfile
+ import shutil
+ tmpdir = tempfile.mkdtemp()
+ try:
+ fn = os.path.join(tmpdir, "file.c")
+ with open(fn, "w") as f:
+ f.write("int a;\n")
+ self.compile([fn], output_dir=tmpdir,
+ extra_preargs=['-MMD', '-MF', fn + '.d'])
+ self._auto_depends = True
+ except CompileError:
+ self._auto_depends = False
+ finally:
+ shutil.rmtree(tmpdir)
+
+ return
+
+replace_method(CCompiler, 'customize', CCompiler_customize)
+
+def simple_version_match(pat=r'[-.\d]+', ignore='', start=''):
+ """
+ Simple matching of version numbers, for use in CCompiler and FCompiler.
+
+ Parameters
+ ----------
+ pat : str, optional
+ A regular expression matching version numbers.
+ Default is ``r'[-.\\d]+'``.
+ ignore : str, optional
+ A regular expression matching patterns to skip.
+ Default is ``''``, in which case nothing is skipped.
+ start : str, optional
+ A regular expression matching the start of where to start looking
+ for version numbers.
+ Default is ``''``, in which case searching is started at the
+ beginning of the version string given to `matcher`.
+
+ Returns
+ -------
+ matcher : callable
+ A function that is appropriate to use as the ``.version_match``
+ attribute of a `CCompiler` class. `matcher` takes a single parameter,
+ a version string.
+
+ """
+ def matcher(self, version_string):
+ # version string may appear in the second line, so getting rid
+ # of new lines:
+ version_string = version_string.replace('\n', ' ')
+ pos = 0
+ if start:
+ m = re.match(start, version_string)
+ if not m:
+ return None
+ pos = m.end()
+ while True:
+ m = re.search(pat, version_string[pos:])
+ if not m:
+ return None
+ if ignore and re.match(ignore, m.group(0)):
+ pos = m.end()
+ continue
+ break
+ return m.group(0)
+ return matcher
+
+def CCompiler_get_version(self, force=False, ok_status=[0]):
+ """
+ Return compiler version, or None if compiler is not available.
+
+ Parameters
+ ----------
+ force : bool, optional
+ If True, force a new determination of the version, even if the
+ compiler already has a version attribute. Default is False.
+ ok_status : list of int, optional
+ The list of status values returned by the version look-up process
+ for which a version string is returned. If the status value is not
+ in `ok_status`, None is returned. Default is ``[0]``.
+
+ Returns
+ -------
+ version : str or None
+ Version string, in the format of `distutils.version.LooseVersion`.
+
+ """
+ if not force and hasattr(self, 'version'):
+ return self.version
+ self.find_executables()
+ try:
+ version_cmd = self.version_cmd
+ except AttributeError:
+ return None
+ if not version_cmd or not version_cmd[0]:
+ return None
+ try:
+ matcher = self.version_match
+ except AttributeError:
+ try:
+ pat = self.version_pattern
+ except AttributeError:
+ return None
+ def matcher(version_string):
+ m = re.match(pat, version_string)
+ if not m:
+ return None
+ version = m.group('version')
+ return version
+
+ try:
+ output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as exc:
+ output = exc.output
+ status = exc.returncode
+ except OSError:
+ # match the historical returns for a parent
+ # exception class caught by exec_command()
+ status = 127
+ output = b''
+ else:
+ # output isn't actually a filepath but we do this
+ # for now to match previous distutils behavior
+ output = filepath_from_subprocess_output(output)
+ status = 0
+
+ version = None
+ if status in ok_status:
+ version = matcher(output)
+ if version:
+ version = LooseVersion(version)
+ self.version = version
+ return version
+
+replace_method(CCompiler, 'get_version', CCompiler_get_version)
+
+def CCompiler_cxx_compiler(self):
+ """
+ Return the C++ compiler.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ cxx : class instance
+ The C++ compiler, as a `CCompiler` instance.
+
+ """
+ if self.compiler_type in ('msvc', 'intelw', 'intelemw'):
+ return self
+
+ cxx = copy(self)
+ cxx.compiler_cxx = cxx.compiler_cxx
+ cxx.compiler_so = [cxx.compiler_cxx[0]] + \
+ sanitize_cxx_flags(cxx.compiler_so[1:])
+ if (sys.platform.startswith(('aix', 'os400')) and
+ 'ld_so_aix' in cxx.linker_so[0]):
+ # AIX needs the ld_so_aix script included with Python
+ cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \
+ + cxx.linker_so[2:]
+ if sys.platform.startswith('os400'):
+ #This is required by i 7.4 and prievous for PRId64 in printf() call.
+ cxx.compiler_so.append('-D__STDC_FORMAT_MACROS')
+ #This a bug of gcc10.3, which failed to handle the TLS init.
+ cxx.compiler_so.append('-fno-extern-tls-init')
+ cxx.linker_so.append('-fno-extern-tls-init')
+ else:
+ cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]
+ return cxx
+
+replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)
+
+compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler',
+ "Intel C Compiler for 32-bit applications")
+compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler',
+ "Intel C Itanium Compiler for Itanium-based applications")
+compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',
+ "Intel C Compiler for 64-bit applications")
+compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW',
+ "Intel C Compiler for 32-bit applications on Windows")
+compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW',
+ "Intel C Compiler for 64-bit applications on Windows")
+compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',
+ "PathScale Compiler for SiCortex-based applications")
+compiler_class['arm'] = ('armccompiler', 'ArmCCompiler',
+ "Arm C Compiler")
+
+ccompiler._default_compilers += (('linux.*', 'intel'),
+ ('linux.*', 'intele'),
+ ('linux.*', 'intelem'),
+ ('linux.*', 'pathcc'),
+ ('nt', 'intelw'),
+ ('nt', 'intelemw'))
+
+if sys.platform == 'win32':
+ compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',
+ "Mingw32 port of GNU C Compiler for Win32"\
+ "(for MSC built Python)")
+ if mingw32():
+ # On windows platforms, we want to default to mingw32 (gcc)
+ # because msvc can't build blitz stuff.
+ log.info('Setting mingw32 as default compiler for nt.')
+ ccompiler._default_compilers = (('nt', 'mingw32'),) \
+ + ccompiler._default_compilers
+
+
+_distutils_new_compiler = new_compiler
+def new_compiler (plat=None,
+ compiler=None,
+ verbose=None,
+ dry_run=0,
+ force=0):
+ # Try first C compilers from numpy.distutils.
+ if verbose is None:
+ verbose = log.get_threshold() <= log.INFO
+ if plat is None:
+ plat = os.name
+ try:
+ if compiler is None:
+ compiler = get_default_compiler(plat)
+ (module_name, class_name, long_description) = compiler_class[compiler]
+ except KeyError:
+ msg = "don't know how to compile C/C++ code on platform '%s'" % plat
+ if compiler is not None:
+ msg = msg + " with '%s' compiler" % compiler
+ raise DistutilsPlatformError(msg)
+ module_name = "numpy.distutils." + module_name
+ try:
+ __import__ (module_name)
+ except ImportError as e:
+ msg = str(e)
+ log.info('%s in numpy.distutils; trying from distutils',
+ str(msg))
+ module_name = module_name[6:]
+ try:
+ __import__(module_name)
+ except ImportError as e:
+ msg = str(e)
+ raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \
+ module_name)
+ try:
+ module = sys.modules[module_name]
+ klass = vars(module)[class_name]
+ except KeyError:
+ raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
+ "in module '%s'") % (class_name, module_name))
+ compiler = klass(None, dry_run, force)
+ compiler.verbose = verbose
+ log.debug('new_compiler returns %s' % (klass))
+ return compiler
+
+ccompiler.new_compiler = new_compiler
+
+_distutils_gen_lib_options = gen_lib_options
+def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
+ # the version of this function provided by CPython allows the following
+ # to return lists, which are unpacked automatically:
+ # - compiler.runtime_library_dir_option
+ # our version extends the behavior to:
+ # - compiler.library_dir_option
+ # - compiler.library_option
+ # - compiler.find_library_file
+ r = _distutils_gen_lib_options(compiler, library_dirs,
+ runtime_library_dirs, libraries)
+ lib_opts = []
+ for i in r:
+ if is_sequence(i):
+ lib_opts.extend(list(i))
+ else:
+ lib_opts.append(i)
+ return lib_opts
+ccompiler.gen_lib_options = gen_lib_options
+
+# Also fix up the various compiler modules, which do
+# from distutils.ccompiler import gen_lib_options
+# Don't bother with mwerks, as we don't support Classic Mac.
+for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
+ _m = sys.modules.get('distutils.' + _cc + 'compiler')
+ if _m is not None:
+ setattr(_m, 'gen_lib_options', gen_lib_options)
+
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/ccompiler_opt.py b/venv/lib/python3.9/site-packages/numpy/distutils/ccompiler_opt.py
new file mode 100644
index 00000000..da550722
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/ccompiler_opt.py
@@ -0,0 +1,2659 @@
+"""Provides the `CCompilerOpt` class, used for handling the CPU/hardware
+optimization, starting from parsing the command arguments, to managing the
+relation between the CPU baseline and dispatch-able features,
+also generating the required C headers and ending with compiling
+the sources with proper compiler's flags.
+
+`CCompilerOpt` doesn't provide runtime detection for the CPU features,
+instead only focuses on the compiler side, but it creates abstract C headers
+that can be used later for the final runtime dispatching process."""
+
+import atexit
+import inspect
+import os
+import pprint
+import re
+import subprocess
+import textwrap
+
+# These flags are used to compile any C++ source within Numpy.
+# They are chosen to have very few runtime dependencies.
+NPY_CXX_FLAGS = [
+ '-std=c++11', # Minimal standard version
+ '-D__STDC_VERSION__=0', # for compatibility with C headers
+ '-fno-exceptions', # no exception support
+ '-fno-rtti'] # no runtime type information
+
+
+class _Config:
+ """An abstract class holds all configurable attributes of `CCompilerOpt`,
+ these class attributes can be used to change the default behavior
+ of `CCompilerOpt` in order to fit other requirements.
+
+ Attributes
+ ----------
+ conf_nocache : bool
+ Set True to disable memory and file cache.
+ Default is False.
+
+ conf_noopt : bool
+ Set True to forces the optimization to be disabled,
+ in this case `CCompilerOpt` tends to generate all
+ expected headers in order to 'not' break the build.
+ Default is False.
+
+ conf_cache_factors : list
+ Add extra factors to the primary caching factors. The caching factors
+ are utilized to determine if there are changes had happened that
+ requires to discard the cache and re-updating it. The primary factors
+ are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc).
+ Default is list of two items, containing the time of last modification
+ of `ccompiler_opt` and value of attribute "conf_noopt"
+
+ conf_tmp_path : str,
+ The path of temporary directory. Default is auto-created
+ temporary directory via ``tempfile.mkdtemp()``.
+
+ conf_check_path : str
+ The path of testing files. Each added CPU feature must have a
+ **C** source file contains at least one intrinsic or instruction that
+ related to this feature, so it can be tested against the compiler.
+ Default is ``./distutils/checks``.
+
+ conf_target_groups : dict
+ Extra tokens that can be reached from dispatch-able sources through
+ the special mark ``@targets``. Default is an empty dictionary.
+
+ **Notes**:
+ - case-insensitive for tokens and group names
+ - sign '#' must stick in the begin of group name and only within ``@targets``
+
+ **Example**:
+ .. code-block:: console
+
+ $ "@targets #avx_group other_tokens" > group_inside.c
+
+ >>> CCompilerOpt.conf_target_groups["avx_group"] = \\
+ "$werror $maxopt avx2 avx512f avx512_skx"
+ >>> cco = CCompilerOpt(cc_instance)
+ >>> cco.try_dispatch(["group_inside.c"])
+
+ conf_c_prefix : str
+ The prefix of public C definitions. Default is ``"NPY_"``.
+
+ conf_c_prefix_ : str
+ The prefix of internal C definitions. Default is ``"NPY__"``.
+
+ conf_cc_flags : dict
+ Nested dictionaries defining several compiler flags
+ that linked to some major functions, the main key
+ represent the compiler name and sub-keys represent
+ flags names. Default is already covers all supported
+ **C** compilers.
+
+ Sub-keys explained as follows:
+
+ "native": str or None
+ used by argument option `native`, to detect the current
+ machine support via the compiler.
+ "werror": str or None
+ utilized to treat warning as errors during testing CPU features
+ against the compiler and also for target's policy `$werror`
+ via dispatch-able sources.
+ "maxopt": str or None
+ utilized for target's policy '$maxopt' and the value should
+ contains the maximum acceptable optimization by the compiler.
+ e.g. in gcc `'-O3'`
+
+ **Notes**:
+ * case-sensitive for compiler names and flags
+ * use space to separate multiple flags
+ * any flag will tested against the compiler and it will skipped
+ if it's not applicable.
+
+ conf_min_features : dict
+ A dictionary defines the used CPU features for
+ argument option `'min'`, the key represent the CPU architecture
+ name e.g. `'x86'`. Default values provide the best effort
+ on wide range of users platforms.
+
+ **Note**: case-sensitive for architecture names.
+
+ conf_features : dict
+ Nested dictionaries used for identifying the CPU features.
+ the primary key is represented as a feature name or group name
+ that gathers several features. Default values covers all
+ supported features but without the major options like "flags",
+ these undefined options handle it by method `conf_features_partial()`.
+ Default value is covers almost all CPU features for *X86*, *IBM/Power64*
+ and *ARM 7/8*.
+
+ Sub-keys explained as follows:
+
+ "implies" : str or list, optional,
+ List of CPU feature names to be implied by it,
+ the feature name must be defined within `conf_features`.
+ Default is None.
+
+ "flags": str or list, optional
+ List of compiler flags. Default is None.
+
+ "detect": str or list, optional
+ List of CPU feature names that required to be detected
+ in runtime. By default, its the feature name or features
+ in "group" if its specified.
+
+ "implies_detect": bool, optional
+ If True, all "detect" of implied features will be combined.
+ Default is True. see `feature_detect()`.
+
+ "group": str or list, optional
+ Same as "implies" but doesn't require the feature name to be
+ defined within `conf_features`.
+
+ "interest": int, required
+ a key for sorting CPU features
+
+ "headers": str or list, optional
+ intrinsics C header file
+
+ "disable": str, optional
+ force disable feature, the string value should contains the
+ reason of disabling.
+
+ "autovec": bool or None, optional
+ True or False to declare that CPU feature can be auto-vectorized
+ by the compiler.
+ By default(None), treated as True if the feature contains at
+ least one applicable flag. see `feature_can_autovec()`
+
+ "extra_checks": str or list, optional
+ Extra test case names for the CPU feature that need to be tested
+ against the compiler.
+
+ Each test case must have a C file named ``extra_xxxx.c``, where
+ ``xxxx`` is the case name in lower case, under 'conf_check_path'.
+ It should contain at least one intrinsic or function related to the test case.
+
+ If the compiler able to successfully compile the C file then `CCompilerOpt`
+ will add a C ``#define`` for it into the main dispatch header, e.g.
+ ``#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case.
+
+ **NOTES**:
+ * space can be used as separator with options that supports "str or list"
+ * case-sensitive for all values and feature name must be in upper-case.
+ * if flags aren't applicable, its will skipped rather than disable the
+ CPU feature
+ * the CPU feature will disabled if the compiler fail to compile
+ the test file
+ """
+ conf_nocache = False
+ conf_noopt = False
+ conf_cache_factors = None
+ conf_tmp_path = None
+ conf_check_path = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), "checks"
+ )
+ conf_target_groups = {}
+ conf_c_prefix = 'NPY_'
+ conf_c_prefix_ = 'NPY__'
+ conf_cc_flags = dict(
+ gcc = dict(
+ # native should always fail on arm and ppc64,
+ # native usually works only with x86
+ native = '-march=native',
+ opt = '-O3',
+ werror = '-Werror',
+ ),
+ clang = dict(
+ native = '-march=native',
+ opt = "-O3",
+ # One of the following flags needs to be applicable for Clang to
+ # guarantee the sanity of the testing process, however in certain
+ # cases `-Werror` gets skipped during the availability test due to
+ # "unused arguments" warnings.
+ # see https://github.com/numpy/numpy/issues/19624
+ werror = '-Werror=switch -Werror',
+ ),
+ icc = dict(
+ native = '-xHost',
+ opt = '-O3',
+ werror = '-Werror',
+ ),
+ iccw = dict(
+ native = '/QxHost',
+ opt = '/O3',
+ werror = '/Werror',
+ ),
+ msvc = dict(
+ native = None,
+ opt = '/O2',
+ werror = '/WX',
+ )
+ )
+ conf_min_features = dict(
+ x86 = "SSE SSE2",
+ x64 = "SSE SSE2 SSE3",
+ ppc64 = '', # play it safe
+ ppc64le = "VSX VSX2",
+ s390x = '',
+ armhf = '', # play it safe
+ aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD"
+ )
+ conf_features = dict(
+ # X86
+ SSE = dict(
+ interest=1, headers="xmmintrin.h",
+ # enabling SSE without SSE2 is useless also
+ # it's non-optional for x86_64
+ implies="SSE2"
+ ),
+ SSE2 = dict(interest=2, implies="SSE", headers="emmintrin.h"),
+ SSE3 = dict(interest=3, implies="SSE2", headers="pmmintrin.h"),
+ SSSE3 = dict(interest=4, implies="SSE3", headers="tmmintrin.h"),
+ SSE41 = dict(interest=5, implies="SSSE3", headers="smmintrin.h"),
+ POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"),
+ SSE42 = dict(interest=7, implies="POPCNT"),
+ AVX = dict(
+ interest=8, implies="SSE42", headers="immintrin.h",
+ implies_detect=False
+ ),
+ XOP = dict(interest=9, implies="AVX", headers="x86intrin.h"),
+ FMA4 = dict(interest=10, implies="AVX", headers="x86intrin.h"),
+ F16C = dict(interest=11, implies="AVX"),
+ FMA3 = dict(interest=12, implies="F16C"),
+ AVX2 = dict(interest=13, implies="F16C"),
+ AVX512F = dict(
+ interest=20, implies="FMA3 AVX2", implies_detect=False,
+ extra_checks="AVX512F_REDUCE"
+ ),
+ AVX512CD = dict(interest=21, implies="AVX512F"),
+ AVX512_KNL = dict(
+ interest=40, implies="AVX512CD", group="AVX512ER AVX512PF",
+ detect="AVX512_KNL", implies_detect=False
+ ),
+ AVX512_KNM = dict(
+ interest=41, implies="AVX512_KNL",
+ group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ",
+ detect="AVX512_KNM", implies_detect=False
+ ),
+ AVX512_SKX = dict(
+ interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ",
+ detect="AVX512_SKX", implies_detect=False,
+ extra_checks="AVX512BW_MASK AVX512DQ_MASK"
+ ),
+ AVX512_CLX = dict(
+ interest=43, implies="AVX512_SKX", group="AVX512VNNI",
+ detect="AVX512_CLX"
+ ),
+ AVX512_CNL = dict(
+ interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI",
+ detect="AVX512_CNL", implies_detect=False
+ ),
+ AVX512_ICL = dict(
+ interest=45, implies="AVX512_CLX AVX512_CNL",
+ group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ",
+ detect="AVX512_ICL", implies_detect=False
+ ),
+ # IBM/Power
+ ## Power7/ISA 2.06
+ VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"),
+ ## Power8/ISA 2.07
+ VSX2 = dict(interest=2, implies="VSX", implies_detect=False),
+ ## Power9/ISA 3.00
+ VSX3 = dict(interest=3, implies="VSX2", implies_detect=False),
+ ## Power10/ISA 3.1
+ VSX4 = dict(interest=4, implies="VSX3", implies_detect=False,
+ extra_checks="VSX4_MMA"),
+ # IBM/Z
+ ## VX(z13) support
+ VX = dict(interest=1, headers="vecintrin.h"),
+ ## Vector-Enhancements Facility
+ VXE = dict(interest=2, implies="VX", implies_detect=False),
+ ## Vector-Enhancements Facility 2
+ VXE2 = dict(interest=3, implies="VXE", implies_detect=False),
+ # ARM
+ NEON = dict(interest=1, headers="arm_neon.h"),
+ NEON_FP16 = dict(interest=2, implies="NEON"),
+ ## FMA
+ NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"),
+ ## Advanced SIMD
+ ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False),
+ ## ARMv8.2 half-precision & vector arithm
+ ASIMDHP = dict(interest=5, implies="ASIMD"),
+ ## ARMv8.2 dot product
+ ASIMDDP = dict(interest=6, implies="ASIMD"),
+ ## ARMv8.2 Single & half-precision Multiply
+ ASIMDFHM = dict(interest=7, implies="ASIMDHP"),
+ )
+ def conf_features_partial(self):
+ """Return a dictionary of supported CPU features by the platform,
+ and accumulate the rest of undefined options in `conf_features`,
+ the returned dict has same rules and notes in
+ class attribute `conf_features`, also its override
+ any options that been set in 'conf_features'.
+ """
+ if self.cc_noopt:
+ # optimization is disabled
+ return {}
+
+ on_x86 = self.cc_on_x86 or self.cc_on_x64
+ is_unix = self.cc_is_gcc or self.cc_is_clang
+
+ if on_x86 and is_unix: return dict(
+ SSE = dict(flags="-msse"),
+ SSE2 = dict(flags="-msse2"),
+ SSE3 = dict(flags="-msse3"),
+ SSSE3 = dict(flags="-mssse3"),
+ SSE41 = dict(flags="-msse4.1"),
+ POPCNT = dict(flags="-mpopcnt"),
+ SSE42 = dict(flags="-msse4.2"),
+ AVX = dict(flags="-mavx"),
+ F16C = dict(flags="-mf16c"),
+ XOP = dict(flags="-mxop"),
+ FMA4 = dict(flags="-mfma4"),
+ FMA3 = dict(flags="-mfma"),
+ AVX2 = dict(flags="-mavx2"),
+ AVX512F = dict(flags="-mavx512f -mno-mmx"),
+ AVX512CD = dict(flags="-mavx512cd"),
+ AVX512_KNL = dict(flags="-mavx512er -mavx512pf"),
+ AVX512_KNM = dict(
+ flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq"
+ ),
+ AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"),
+ AVX512_CLX = dict(flags="-mavx512vnni"),
+ AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"),
+ AVX512_ICL = dict(
+ flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq"
+ )
+ )
+ if on_x86 and self.cc_is_icc: return dict(
+ SSE = dict(flags="-msse"),
+ SSE2 = dict(flags="-msse2"),
+ SSE3 = dict(flags="-msse3"),
+ SSSE3 = dict(flags="-mssse3"),
+ SSE41 = dict(flags="-msse4.1"),
+ POPCNT = {},
+ SSE42 = dict(flags="-msse4.2"),
+ AVX = dict(flags="-mavx"),
+ F16C = {},
+ XOP = dict(disable="Intel Compiler doesn't support it"),
+ FMA4 = dict(disable="Intel Compiler doesn't support it"),
+ # Intel Compiler doesn't support AVX2 or FMA3 independently
+ FMA3 = dict(
+ implies="F16C AVX2", flags="-march=core-avx2"
+ ),
+ AVX2 = dict(implies="FMA3", flags="-march=core-avx2"),
+ # Intel Compiler doesn't support AVX512F or AVX512CD independently
+ AVX512F = dict(
+ implies="AVX2 AVX512CD", flags="-march=common-avx512"
+ ),
+ AVX512CD = dict(
+ implies="AVX2 AVX512F", flags="-march=common-avx512"
+ ),
+ AVX512_KNL = dict(flags="-xKNL"),
+ AVX512_KNM = dict(flags="-xKNM"),
+ AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"),
+ AVX512_CLX = dict(flags="-xCASCADELAKE"),
+ AVX512_CNL = dict(flags="-xCANNONLAKE"),
+ AVX512_ICL = dict(flags="-xICELAKE-CLIENT"),
+ )
+ if on_x86 and self.cc_is_iccw: return dict(
+ SSE = dict(flags="/arch:SSE"),
+ SSE2 = dict(flags="/arch:SSE2"),
+ SSE3 = dict(flags="/arch:SSE3"),
+ SSSE3 = dict(flags="/arch:SSSE3"),
+ SSE41 = dict(flags="/arch:SSE4.1"),
+ POPCNT = {},
+ SSE42 = dict(flags="/arch:SSE4.2"),
+ AVX = dict(flags="/arch:AVX"),
+ F16C = {},
+ XOP = dict(disable="Intel Compiler doesn't support it"),
+ FMA4 = dict(disable="Intel Compiler doesn't support it"),
+ # Intel Compiler doesn't support FMA3 or AVX2 independently
+ FMA3 = dict(
+ implies="F16C AVX2", flags="/arch:CORE-AVX2"
+ ),
+ AVX2 = dict(
+ implies="FMA3", flags="/arch:CORE-AVX2"
+ ),
+ # Intel Compiler doesn't support AVX512F or AVX512CD independently
+ AVX512F = dict(
+ implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512"
+ ),
+ AVX512CD = dict(
+ implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512"
+ ),
+ AVX512_KNL = dict(flags="/Qx:KNL"),
+ AVX512_KNM = dict(flags="/Qx:KNM"),
+ AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"),
+ AVX512_CLX = dict(flags="/Qx:CASCADELAKE"),
+ AVX512_CNL = dict(flags="/Qx:CANNONLAKE"),
+ AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT")
+ )
+ if on_x86 and self.cc_is_msvc: return dict(
+ SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {},
+ SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {},
+ SSE3 = {},
+ SSSE3 = {},
+ SSE41 = {},
+ POPCNT = dict(headers="nmmintrin.h"),
+ SSE42 = {},
+ AVX = dict(flags="/arch:AVX"),
+ F16C = {},
+ XOP = dict(headers="ammintrin.h"),
+ FMA4 = dict(headers="ammintrin.h"),
+ # MSVC doesn't support FMA3 or AVX2 independently
+ FMA3 = dict(
+ implies="F16C AVX2", flags="/arch:AVX2"
+ ),
+ AVX2 = dict(
+ implies="F16C FMA3", flags="/arch:AVX2"
+ ),
+ # MSVC doesn't support AVX512F or AVX512CD independently,
+ # always generate instructions belong to (VL/VW/DQ)
+ AVX512F = dict(
+ implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512"
+ ),
+ AVX512CD = dict(
+ implies="AVX512F AVX512_SKX", flags="/arch:AVX512"
+ ),
+ AVX512_KNL = dict(
+ disable="MSVC compiler doesn't support it"
+ ),
+ AVX512_KNM = dict(
+ disable="MSVC compiler doesn't support it"
+ ),
+ AVX512_SKX = dict(flags="/arch:AVX512"),
+ AVX512_CLX = {},
+ AVX512_CNL = {},
+ AVX512_ICL = {}
+ )
+
+ on_power = self.cc_on_ppc64le or self.cc_on_ppc64
+ if on_power:
+ partial = dict(
+ VSX = dict(
+ implies=("VSX2" if self.cc_on_ppc64le else ""),
+ flags="-mvsx"
+ ),
+ VSX2 = dict(
+ flags="-mcpu=power8", implies_detect=False
+ ),
+ VSX3 = dict(
+ flags="-mcpu=power9 -mtune=power9", implies_detect=False
+ ),
+ VSX4 = dict(
+ flags="-mcpu=power10 -mtune=power10", implies_detect=False
+ )
+ )
+ if self.cc_is_clang:
+ partial["VSX"]["flags"] = "-maltivec -mvsx"
+ partial["VSX2"]["flags"] = "-mpower8-vector"
+ partial["VSX3"]["flags"] = "-mpower9-vector"
+ partial["VSX4"]["flags"] = "-mpower10-vector"
+
+ return partial
+
+ on_zarch = self.cc_on_s390x
+ if on_zarch:
+ partial = dict(
+ VX = dict(
+ flags="-march=arch11 -mzvector"
+ ),
+ VXE = dict(
+ flags="-march=arch12", implies_detect=False
+ ),
+ VXE2 = dict(
+ flags="-march=arch13", implies_detect=False
+ )
+ )
+
+ return partial
+
+
+ if self.cc_on_aarch64 and is_unix: return dict(
+ NEON = dict(
+ implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True
+ ),
+ NEON_FP16 = dict(
+ implies="NEON NEON_VFPV4 ASIMD", autovec=True
+ ),
+ NEON_VFPV4 = dict(
+ implies="NEON NEON_FP16 ASIMD", autovec=True
+ ),
+ ASIMD = dict(
+ implies="NEON NEON_FP16 NEON_VFPV4", autovec=True
+ ),
+ ASIMDHP = dict(
+ flags="-march=armv8.2-a+fp16"
+ ),
+ ASIMDDP = dict(
+ flags="-march=armv8.2-a+dotprod"
+ ),
+ ASIMDFHM = dict(
+ flags="-march=armv8.2-a+fp16fml"
+ ),
+ )
+ if self.cc_on_armhf and is_unix: return dict(
+ NEON = dict(
+ flags="-mfpu=neon"
+ ),
+ NEON_FP16 = dict(
+ flags="-mfpu=neon-fp16 -mfp16-format=ieee"
+ ),
+ NEON_VFPV4 = dict(
+ flags="-mfpu=neon-vfpv4",
+ ),
+ ASIMD = dict(
+ flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd",
+ ),
+ ASIMDHP = dict(
+ flags="-march=armv8.2-a+fp16"
+ ),
+ ASIMDDP = dict(
+ flags="-march=armv8.2-a+dotprod",
+ ),
+ ASIMDFHM = dict(
+ flags="-march=armv8.2-a+fp16fml"
+ )
+ )
+ # TODO: ARM MSVC
+ return {}
+
+ def __init__(self):
+ if self.conf_tmp_path is None:
+ import shutil
+ import tempfile
+ tmp = tempfile.mkdtemp()
+ def rm_temp():
+ try:
+ shutil.rmtree(tmp)
+ except OSError:
+ pass
+ atexit.register(rm_temp)
+ self.conf_tmp_path = tmp
+
+ if self.conf_cache_factors is None:
+ self.conf_cache_factors = [
+ os.path.getmtime(__file__),
+ self.conf_nocache
+ ]
+
+class _Distutils:
+ """A helper class that provides a collection of fundamental methods
+ implemented in a top of Python and NumPy Distutils.
+
+ The idea behind this class is to gather all methods that it may
+ need to override in case of reuse 'CCompilerOpt' in environment
+ different than of what NumPy has.
+
+ Parameters
+ ----------
+ ccompiler : `CCompiler`
+ The generate instance that returned from `distutils.ccompiler.new_compiler()`.
+ """
+ def __init__(self, ccompiler):
+ self._ccompiler = ccompiler
+
+ def dist_compile(self, sources, flags, ccompiler=None, **kwargs):
+ """Wrap CCompiler.compile()"""
+ assert(isinstance(sources, list))
+ assert(isinstance(flags, list))
+ flags = kwargs.pop("extra_postargs", []) + flags
+ if not ccompiler:
+ ccompiler = self._ccompiler
+
+ return ccompiler.compile(sources, extra_postargs=flags, **kwargs)
+
+ def dist_test(self, source, flags, macros=[]):
+ """Return True if 'CCompiler.compile()' able to compile
+ a source file with certain flags.
+ """
+ assert(isinstance(source, str))
+ from distutils.errors import CompileError
+ cc = self._ccompiler;
+ bk_spawn = getattr(cc, 'spawn', None)
+ if bk_spawn:
+ cc_type = getattr(self._ccompiler, "compiler_type", "")
+ if cc_type in ("msvc",):
+ setattr(cc, 'spawn', self._dist_test_spawn_paths)
+ else:
+ setattr(cc, 'spawn', self._dist_test_spawn)
+ test = False
+ try:
+ self.dist_compile(
+ [source], flags, macros=macros, output_dir=self.conf_tmp_path
+ )
+ test = True
+ except CompileError as e:
+ self.dist_log(str(e), stderr=True)
+ if bk_spawn:
+ setattr(cc, 'spawn', bk_spawn)
+ return test
+
+ def dist_info(self):
+ """
+ Return a tuple containing info about (platform, compiler, extra_args),
+ required by the abstract class '_CCompiler' for discovering the
+ platform environment. This is also used as a cache factor in order
+ to detect any changes happening from outside.
+ """
+ if hasattr(self, "_dist_info"):
+ return self._dist_info
+
+ cc_type = getattr(self._ccompiler, "compiler_type", '')
+ if cc_type in ("intelem", "intelemw"):
+ platform = "x86_64"
+ elif cc_type in ("intel", "intelw", "intele"):
+ platform = "x86"
+ else:
+ from distutils.util import get_platform
+ platform = get_platform()
+
+ cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", ''))
+ if not cc_type or cc_type == "unix":
+ if hasattr(cc_info, "__iter__"):
+ compiler = cc_info[0]
+ else:
+ compiler = str(cc_info)
+ else:
+ compiler = cc_type
+
+ if hasattr(cc_info, "__iter__") and len(cc_info) > 1:
+ extra_args = ' '.join(cc_info[1:])
+ else:
+ extra_args = os.environ.get("CFLAGS", "")
+ extra_args += os.environ.get("CPPFLAGS", "")
+
+ self._dist_info = (platform, compiler, extra_args)
+ return self._dist_info
+
+ @staticmethod
+ def dist_error(*args):
+ """Raise a compiler error"""
+ from distutils.errors import CompileError
+ raise CompileError(_Distutils._dist_str(*args))
+
+ @staticmethod
+ def dist_fatal(*args):
+ """Raise a distutils error"""
+ from distutils.errors import DistutilsError
+ raise DistutilsError(_Distutils._dist_str(*args))
+
+ @staticmethod
+ def dist_log(*args, stderr=False):
+ """Print a console message"""
+ from numpy.distutils import log
+ out = _Distutils._dist_str(*args)
+ if stderr:
+ log.warn(out)
+ else:
+ log.info(out)
+
+ @staticmethod
+ def dist_load_module(name, path):
+ """Load a module from file, required by the abstract class '_Cache'."""
+ from .misc_util import exec_mod_from_location
+ try:
+ return exec_mod_from_location(name, path)
+ except Exception as e:
+ _Distutils.dist_log(e, stderr=True)
+ return None
+
+ @staticmethod
+ def _dist_str(*args):
+ """Return a string to print by log and errors."""
+ def to_str(arg):
+ if not isinstance(arg, str) and hasattr(arg, '__iter__'):
+ ret = []
+ for a in arg:
+ ret.append(to_str(a))
+ return '('+ ' '.join(ret) + ')'
+ return str(arg)
+
+ stack = inspect.stack()[2]
+ start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno)
+ out = ' '.join([
+ to_str(a)
+ for a in (*args,)
+ ])
+ return start + out
+
+ def _dist_test_spawn_paths(self, cmd, display=None):
+ """
+ Fix msvc SDK ENV path same as distutils do
+ without it we get c1: fatal error C1356: unable to find mspdbcore.dll
+ """
+ if not hasattr(self._ccompiler, "_paths"):
+ self._dist_test_spawn(cmd)
+ return
+ old_path = os.getenv("path")
+ try:
+ os.environ["path"] = self._ccompiler._paths
+ self._dist_test_spawn(cmd)
+ finally:
+ os.environ["path"] = old_path
+
+ _dist_warn_regex = re.compile(
+ # intel and msvc compilers don't raise
+ # fatal errors when flags are wrong or unsupported
+ ".*("
+ "warning D9002|" # msvc, it should be work with any language.
+ "invalid argument for option" # intel
+ ").*"
+ )
+ @staticmethod
+ def _dist_test_spawn(cmd, display=None):
+ try:
+ o = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
+ text=True)
+ if o and re.match(_Distutils._dist_warn_regex, o):
+ _Distutils.dist_error(
+ "Flags in command", cmd ,"aren't supported by the compiler"
+ ", output -> \n%s" % o
+ )
+ except subprocess.CalledProcessError as exc:
+ o = exc.output
+ s = exc.returncode
+ except OSError as e:
+ o = e
+ s = 127
+ else:
+ return None
+ _Distutils.dist_error(
+ "Command", cmd, "failed with exit status %d output -> \n%s" % (
+ s, o
+ ))
+
+_share_cache = {}
+class _Cache:
+ """An abstract class handles caching functionality, provides two
+ levels of caching, in-memory by share instances attributes among
+ each other and by store attributes into files.
+
+ **Note**:
+ any attributes that start with ``_`` or ``conf_`` will be ignored.
+
+ Parameters
+ ----------
+ cache_path : str or None
+ The path of cache file, if None then cache in file will disabled.
+
+ *factors :
+ The caching factors that need to utilize next to `conf_cache_factors`.
+
+ Attributes
+ ----------
+ cache_private : set
+ Hold the attributes that need be skipped from "in-memory cache".
+
+ cache_infile : bool
+ Utilized during initializing this class, to determine if the cache was able
+ to loaded from the specified cache path in 'cache_path'.
+ """
+
+ # skip attributes from cache
+ _cache_ignore = re.compile("^(_|conf_)")
+
+ def __init__(self, cache_path=None, *factors):
+ self.cache_me = {}
+ self.cache_private = set()
+ self.cache_infile = False
+ self._cache_path = None
+
+ if self.conf_nocache:
+ self.dist_log("cache is disabled by `Config`")
+ return
+
+ self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors)
+ self._cache_path = cache_path
+ if cache_path:
+ if os.path.exists(cache_path):
+ self.dist_log("load cache from file ->", cache_path)
+ cache_mod = self.dist_load_module("cache", cache_path)
+ if not cache_mod:
+ self.dist_log(
+ "unable to load the cache file as a module",
+ stderr=True
+ )
+ elif not hasattr(cache_mod, "hash") or \
+ not hasattr(cache_mod, "data"):
+ self.dist_log("invalid cache file", stderr=True)
+ elif self._cache_hash == cache_mod.hash:
+ self.dist_log("hit the file cache")
+ for attr, val in cache_mod.data.items():
+ setattr(self, attr, val)
+ self.cache_infile = True
+ else:
+ self.dist_log("miss the file cache")
+
+ if not self.cache_infile:
+ other_cache = _share_cache.get(self._cache_hash)
+ if other_cache:
+ self.dist_log("hit the memory cache")
+ for attr, val in other_cache.__dict__.items():
+ if attr in other_cache.cache_private or \
+ re.match(self._cache_ignore, attr):
+ continue
+ setattr(self, attr, val)
+
+ _share_cache[self._cache_hash] = self
+ atexit.register(self.cache_flush)
+
+ def __del__(self):
+ for h, o in _share_cache.items():
+ if o == self:
+ _share_cache.pop(h)
+ break
+
+ def cache_flush(self):
+ """
+ Force update the cache.
+ """
+ if not self._cache_path:
+ return
+ # TODO: don't write if the cache doesn't change
+ self.dist_log("write cache to path ->", self._cache_path)
+ cdict = self.__dict__.copy()
+ for attr in self.__dict__.keys():
+ if re.match(self._cache_ignore, attr):
+ cdict.pop(attr)
+
+ d = os.path.dirname(self._cache_path)
+ if not os.path.exists(d):
+ os.makedirs(d)
+
+ repr_dict = pprint.pformat(cdict, compact=True)
+ with open(self._cache_path, "w") as f:
+ f.write(textwrap.dedent("""\
+ # AUTOGENERATED DON'T EDIT
+ # Please make changes to the code generator \
+ (distutils/ccompiler_opt.py)
+ hash = {}
+ data = \\
+ """).format(self._cache_hash))
+ f.write(repr_dict)
+
+ def cache_hash(self, *factors):
+ # is there a built-in non-crypto hash?
+ # sdbm
+ chash = 0
+ for f in factors:
+ for char in str(f):
+ chash = ord(char) + (chash << 6) + (chash << 16) - chash
+ chash &= 0xFFFFFFFF
+ return chash
+
+ @staticmethod
+ def me(cb):
+ """
+ A static method that can be treated as a decorator to
+ dynamically cache certain methods.
+ """
+ def cache_wrap_me(self, *args, **kwargs):
+ # good for normal args
+ cache_key = str((
+ cb.__name__, *args, *kwargs.keys(), *kwargs.values()
+ ))
+ if cache_key in self.cache_me:
+ return self.cache_me[cache_key]
+ ccb = cb(self, *args, **kwargs)
+ self.cache_me[cache_key] = ccb
+ return ccb
+ return cache_wrap_me
+
+class _CCompiler:
+ """A helper class for `CCompilerOpt` containing all utilities that
+ related to the fundamental compiler's functions.
+
+ Attributes
+ ----------
+ cc_on_x86 : bool
+ True when the target architecture is 32-bit x86
+ cc_on_x64 : bool
+ True when the target architecture is 64-bit x86
+ cc_on_ppc64 : bool
+ True when the target architecture is 64-bit big-endian powerpc
+ cc_on_ppc64le : bool
+ True when the target architecture is 64-bit litle-endian powerpc
+ cc_on_s390x : bool
+ True when the target architecture is IBM/ZARCH on linux
+ cc_on_armhf : bool
+ True when the target architecture is 32-bit ARMv7+
+ cc_on_aarch64 : bool
+ True when the target architecture is 64-bit Armv8-a+
+ cc_on_noarch : bool
+ True when the target architecture is unknown or not supported
+ cc_is_gcc : bool
+ True if the compiler is GNU or
+ if the compiler is unknown
+ cc_is_clang : bool
+ True if the compiler is Clang
+ cc_is_icc : bool
+ True if the compiler is Intel compiler (unix like)
+ cc_is_iccw : bool
+ True if the compiler is Intel compiler (msvc like)
+ cc_is_nocc : bool
+ True if the compiler isn't supported directly,
+ Note: that cause a fail-back to gcc
+ cc_has_debug : bool
+ True if the compiler has debug flags
+ cc_has_native : bool
+ True if the compiler has native flags
+ cc_noopt : bool
+ True if the compiler has definition 'DISABLE_OPT*',
+ or 'cc_on_noarch' is True
+ cc_march : str
+ The target architecture name, or "unknown" if
+ the architecture isn't supported
+ cc_name : str
+ The compiler name, or "unknown" if the compiler isn't supported
+ cc_flags : dict
+ Dictionary containing the initialized flags of `_Config.conf_cc_flags`
+ """
+ def __init__(self):
+ if hasattr(self, "cc_is_cached"):
+ return
+ # attr regex compiler-expression
+ detect_arch = (
+ ("cc_on_x64", ".*(x|x86_|amd)64.*", ""),
+ ("cc_on_x86", ".*(win32|x86|i386|i686).*", ""),
+ ("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*|.*powerpc.*",
+ "defined(__powerpc64__) && "
+ "defined(__LITTLE_ENDIAN__)"),
+ ("cc_on_ppc64", ".*(powerpc|ppc).*|.*powerpc.*",
+ "defined(__powerpc64__) && "
+ "defined(__BIG_ENDIAN__)"),
+ ("cc_on_aarch64", ".*(aarch64|arm64).*", ""),
+ ("cc_on_armhf", ".*arm.*", "defined(__ARM_ARCH_7__) || "
+ "defined(__ARM_ARCH_7A__)"),
+ ("cc_on_s390x", ".*s390x.*", ""),
+ # undefined platform
+ ("cc_on_noarch", "", ""),
+ )
+ detect_compiler = (
+ ("cc_is_gcc", r".*(gcc|gnu\-g).*", ""),
+ ("cc_is_clang", ".*clang.*", ""),
+ # intel msvc like
+ ("cc_is_iccw", ".*(intelw|intelemw|iccw).*", ""),
+ ("cc_is_icc", ".*(intel|icc).*", ""), # intel unix like
+ ("cc_is_msvc", ".*msvc.*", ""),
+ # undefined compiler will be treat it as gcc
+ ("cc_is_nocc", "", ""),
+ )
+ detect_args = (
+ ("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""),
+ ("cc_has_native", ".*(-march=native|-xHost|/QxHost).*", ""),
+ # in case if the class run with -DNPY_DISABLE_OPTIMIZATION
+ ("cc_noopt", ".*DISABLE_OPT.*", ""),
+ )
+
+ dist_info = self.dist_info()
+ platform, compiler_info, extra_args = dist_info
+ # set False to all attrs
+ for section in (detect_arch, detect_compiler, detect_args):
+ for attr, rgex, cexpr in section:
+ setattr(self, attr, False)
+
+ for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)):
+ for attr, rgex, cexpr in detect:
+ if rgex and not re.match(rgex, searchin, re.IGNORECASE):
+ continue
+ if cexpr and not self.cc_test_cexpr(cexpr):
+ continue
+ setattr(self, attr, True)
+ break
+
+ for attr, rgex, cexpr in detect_args:
+ if rgex and not re.match(rgex, extra_args, re.IGNORECASE):
+ continue
+ if cexpr and not self.cc_test_cexpr(cexpr):
+ continue
+ setattr(self, attr, True)
+
+ if self.cc_on_noarch:
+ self.dist_log(
+ "unable to detect CPU architecture which lead to disable the optimization. "
+ f"check dist_info:<<\n{dist_info}\n>>",
+ stderr=True
+ )
+ self.cc_noopt = True
+
+ if self.conf_noopt:
+ self.dist_log("Optimization is disabled by the Config", stderr=True)
+ self.cc_noopt = True
+
+ if self.cc_is_nocc:
+ """
+ mingw can be treated as a gcc, and also xlc even if it based on clang,
+ but still has the same gcc optimization flags.
+ """
+ self.dist_log(
+ "unable to detect compiler type which leads to treating it as GCC. "
+ "this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC."
+ f"check dist_info:<<\n{dist_info}\n>>",
+ stderr=True
+ )
+ self.cc_is_gcc = True
+
+ self.cc_march = "unknown"
+ for arch in ("x86", "x64", "ppc64", "ppc64le",
+ "armhf", "aarch64", "s390x"):
+ if getattr(self, "cc_on_" + arch):
+ self.cc_march = arch
+ break
+
+ self.cc_name = "unknown"
+ for name in ("gcc", "clang", "iccw", "icc", "msvc"):
+ if getattr(self, "cc_is_" + name):
+ self.cc_name = name
+ break
+
+ self.cc_flags = {}
+ compiler_flags = self.conf_cc_flags.get(self.cc_name)
+ if compiler_flags is None:
+ self.dist_fatal(
+ "undefined flag for compiler '%s', "
+ "leave an empty dict instead" % self.cc_name
+ )
+ for name, flags in compiler_flags.items():
+ self.cc_flags[name] = nflags = []
+ if flags:
+ assert(isinstance(flags, str))
+ flags = flags.split()
+ for f in flags:
+ if self.cc_test_flags([f]):
+ nflags.append(f)
+
+ self.cc_is_cached = True
+
+ @_Cache.me
+ def cc_test_flags(self, flags):
+ """
+ Returns True if the compiler supports 'flags'.
+ """
+ assert(isinstance(flags, list))
+ self.dist_log("testing flags", flags)
+ test_path = os.path.join(self.conf_check_path, "test_flags.c")
+ test = self.dist_test(test_path, flags)
+ if not test:
+ self.dist_log("testing failed", stderr=True)
+ return test
+
+ @_Cache.me
+ def cc_test_cexpr(self, cexpr, flags=[]):
+ """
+ Same as the above but supports compile-time expressions.
+ """
+ self.dist_log("testing compiler expression", cexpr)
+ test_path = os.path.join(self.conf_tmp_path, "npy_dist_test_cexpr.c")
+ with open(test_path, "w") as fd:
+ fd.write(textwrap.dedent(f"""\
+ #if !({cexpr})
+ #error "unsupported expression"
+ #endif
+ int dummy;
+ """))
+ test = self.dist_test(test_path, flags)
+ if not test:
+ self.dist_log("testing failed", stderr=True)
+ return test
+
+ def cc_normalize_flags(self, flags):
+ """
+ Remove the conflicts that caused due gathering implied features flags.
+
+ Parameters
+ ----------
+ 'flags' list, compiler flags
+ flags should be sorted from the lowest to the highest interest.
+
+ Returns
+ -------
+ list, filtered from any conflicts.
+
+ Examples
+ --------
+ >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod'])
+ ['armv8.2-a+fp16+dotprod']
+
+ >>> self.cc_normalize_flags(
+ ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2']
+ )
+ ['-march=core-avx2']
+ """
+ assert(isinstance(flags, list))
+ if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc:
+ return self._cc_normalize_unix(flags)
+
+ if self.cc_is_msvc or self.cc_is_iccw:
+ return self._cc_normalize_win(flags)
+ return flags
+
+ _cc_normalize_unix_mrgx = re.compile(
+ # 1- to check the highest of
+ r"^(-mcpu=|-march=|-x[A-Z0-9\-])"
+ )
+ _cc_normalize_unix_frgx = re.compile(
+ # 2- to remove any flags starts with
+ # -march, -mcpu, -x(INTEL) and '-m' without '='
+ r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|"
+ # exclude:
+ r"(?:-mzvector)"
+ )
+ _cc_normalize_unix_krgx = re.compile(
+ # 3- keep only the highest of
+ r"^(-mfpu|-mtune)"
+ )
+ _cc_normalize_arch_ver = re.compile(
+ r"[0-9.]"
+ )
+ def _cc_normalize_unix(self, flags):
+ def ver_flags(f):
+ # arch ver subflag
+ # -march=armv8.2-a+fp16fml
+ tokens = f.split('+')
+ ver = float('0' + ''.join(
+ re.findall(self._cc_normalize_arch_ver, tokens[0])
+ ))
+ return ver, tokens[0], tokens[1:]
+
+ if len(flags) <= 1:
+ return flags
+ # get the highest matched flag
+ for i, cur_flag in enumerate(reversed(flags)):
+ if not re.match(self._cc_normalize_unix_mrgx, cur_flag):
+ continue
+ lower_flags = flags[:-(i+1)]
+ upper_flags = flags[-i:]
+ filterd = list(filter(
+ self._cc_normalize_unix_frgx.search, lower_flags
+ ))
+ # gather subflags
+ ver, arch, subflags = ver_flags(cur_flag)
+ if ver > 0 and len(subflags) > 0:
+ for xflag in lower_flags:
+ xver, _, xsubflags = ver_flags(xflag)
+ if ver == xver:
+ subflags = xsubflags + subflags
+ cur_flag = arch + '+' + '+'.join(subflags)
+
+ flags = filterd + [cur_flag]
+ if i > 0:
+ flags += upper_flags
+ break
+
+ # to remove overridable flags
+ final_flags = []
+ matched = set()
+ for f in reversed(flags):
+ match = re.match(self._cc_normalize_unix_krgx, f)
+ if not match:
+ pass
+ elif match[0] in matched:
+ continue
+ else:
+ matched.add(match[0])
+ final_flags.insert(0, f)
+ return final_flags
+
+ _cc_normalize_win_frgx = re.compile(
+ r"^(?!(/arch\:|/Qx\:))"
+ )
+ _cc_normalize_win_mrgx = re.compile(
+ r"^(/arch|/Qx:)"
+ )
+ def _cc_normalize_win(self, flags):
+ for i, f in enumerate(reversed(flags)):
+ if not re.match(self._cc_normalize_win_mrgx, f):
+ continue
+ i += 1
+ return list(filter(
+ self._cc_normalize_win_frgx.search, flags[:-i]
+ )) + flags[-i:]
+ return flags
+
+class _Feature:
+ """A helper class for `CCompilerOpt` that managing CPU features.
+
+ Attributes
+ ----------
+ feature_supported : dict
+ Dictionary containing all CPU features that supported
+ by the platform, according to the specified values in attribute
+ `_Config.conf_features` and `_Config.conf_features_partial()`
+
+ feature_min : set
+ The minimum support of CPU features, according to
+ the specified values in attribute `_Config.conf_min_features`.
+ """
+ def __init__(self):
+ if hasattr(self, "feature_is_cached"):
+ return
+ self.feature_supported = pfeatures = self.conf_features_partial()
+ for feature_name in list(pfeatures.keys()):
+ feature = pfeatures[feature_name]
+ cfeature = self.conf_features[feature_name]
+ feature.update({
+ k:v for k,v in cfeature.items() if k not in feature
+ })
+ disabled = feature.get("disable")
+ if disabled is not None:
+ pfeatures.pop(feature_name)
+ self.dist_log(
+ "feature '%s' is disabled," % feature_name,
+ disabled, stderr=True
+ )
+ continue
+ # list is used internally for these options
+ for option in (
+ "implies", "group", "detect", "headers", "flags", "extra_checks"
+ ) :
+ oval = feature.get(option)
+ if isinstance(oval, str):
+ feature[option] = oval.split()
+
+ self.feature_min = set()
+ min_f = self.conf_min_features.get(self.cc_march, "")
+ for F in min_f.upper().split():
+ if F in self.feature_supported:
+ self.feature_min.add(F)
+
+ self.feature_is_cached = True
+
+ def feature_names(self, names=None, force_flags=None, macros=[]):
+ """
+ Returns a set of CPU feature names that supported by platform and the **C** compiler.
+
+ Parameters
+ ----------
+ names : sequence or None, optional
+ Specify certain CPU features to test it against the **C** compiler.
+ if None(default), it will test all current supported features.
+ **Note**: feature names must be in upper-case.
+
+ force_flags : list or None, optional
+ If None(default), default compiler flags for every CPU feature will
+ be used during the test.
+
+ macros : list of tuples, optional
+ A list of C macro definitions.
+ """
+ assert(
+ names is None or (
+ not isinstance(names, str) and
+ hasattr(names, "__iter__")
+ )
+ )
+ assert(force_flags is None or isinstance(force_flags, list))
+ if names is None:
+ names = self.feature_supported.keys()
+ supported_names = set()
+ for f in names:
+ if self.feature_is_supported(
+ f, force_flags=force_flags, macros=macros
+ ):
+ supported_names.add(f)
+ return supported_names
+
+ def feature_is_exist(self, name):
+ """
+ Returns True if a certain feature is exist and covered within
+ `_Config.conf_features`.
+
+ Parameters
+ ----------
+ 'name': str
+ feature name in uppercase.
+ """
+ assert(name.isupper())
+ return name in self.conf_features
+
+ def feature_sorted(self, names, reverse=False):
+ """
+ Sort a list of CPU features ordered by the lowest interest.
+
+ Parameters
+ ----------
+ 'names': sequence
+ sequence of supported feature names in uppercase.
+ 'reverse': bool, optional
+ If true, the sorted features is reversed. (highest interest)
+
+ Returns
+ -------
+ list, sorted CPU features
+ """
+ def sort_cb(k):
+ if isinstance(k, str):
+ return self.feature_supported[k]["interest"]
+ # multiple features
+ rank = max([self.feature_supported[f]["interest"] for f in k])
+ # FIXME: that's not a safe way to increase the rank for
+ # multi targets
+ rank += len(k) -1
+ return rank
+ return sorted(names, reverse=reverse, key=sort_cb)
+
+ def feature_implies(self, names, keep_origins=False):
+ """
+ Return a set of CPU features that implied by 'names'
+
+ Parameters
+ ----------
+ names : str or sequence of str
+ CPU feature name(s) in uppercase.
+
+ keep_origins : bool
+ if False(default) then the returned set will not contain any
+ features from 'names'. This case happens only when two features
+ imply each other.
+
+ Examples
+ --------
+ >>> self.feature_implies("SSE3")
+ {'SSE', 'SSE2'}
+ >>> self.feature_implies("SSE2")
+ {'SSE'}
+ >>> self.feature_implies("SSE2", keep_origins=True)
+ # 'SSE2' found here since 'SSE' and 'SSE2' imply each other
+ {'SSE', 'SSE2'}
+ """
+ def get_implies(name, _caller=set()):
+ implies = set()
+ d = self.feature_supported[name]
+ for i in d.get("implies", []):
+ implies.add(i)
+ if i in _caller:
+ # infinity recursive guard since
+ # features can imply each other
+ continue
+ _caller.add(name)
+ implies = implies.union(get_implies(i, _caller))
+ return implies
+
+ if isinstance(names, str):
+ implies = get_implies(names)
+ names = [names]
+ else:
+ assert(hasattr(names, "__iter__"))
+ implies = set()
+ for n in names:
+ implies = implies.union(get_implies(n))
+ if not keep_origins:
+ implies.difference_update(names)
+ return implies
+
+ def feature_implies_c(self, names):
+ """same as feature_implies() but combining 'names'"""
+ if isinstance(names, str):
+ names = set((names,))
+ else:
+ names = set(names)
+ return names.union(self.feature_implies(names))
+
+ def feature_ahead(self, names):
+ """
+ Return list of features in 'names' after remove any
+ implied features and keep the origins.
+
+ Parameters
+ ----------
+ 'names': sequence
+ sequence of CPU feature names in uppercase.
+
+ Returns
+ -------
+ list of CPU features sorted as-is 'names'
+
+ Examples
+ --------
+ >>> self.feature_ahead(["SSE2", "SSE3", "SSE41"])
+ ["SSE41"]
+ # assume AVX2 and FMA3 implies each other and AVX2
+ # is the highest interest
+ >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"])
+ ["AVX2"]
+ # assume AVX2 and FMA3 don't implies each other
+ >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"])
+ ["AVX2", "FMA3"]
+ """
+ assert(
+ not isinstance(names, str)
+ and hasattr(names, '__iter__')
+ )
+ implies = self.feature_implies(names, keep_origins=True)
+ ahead = [n for n in names if n not in implies]
+ if len(ahead) == 0:
+ # return the highest interested feature
+ # if all features imply each other
+ ahead = self.feature_sorted(names, reverse=True)[:1]
+ return ahead
+
+ def feature_untied(self, names):
+ """
+ same as 'feature_ahead()' but if both features implied each other
+ and keep the highest interest.
+
+ Parameters
+ ----------
+ 'names': sequence
+ sequence of CPU feature names in uppercase.
+
+ Returns
+ -------
+ list of CPU features sorted as-is 'names'
+
+ Examples
+ --------
+ >>> self.feature_untied(["SSE2", "SSE3", "SSE41"])
+ ["SSE2", "SSE3", "SSE41"]
+ # assume AVX2 and FMA3 implies each other
+ >>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"])
+ ["SSE2", "SSE3", "SSE41", "AVX2"]
+ """
+ assert(
+ not isinstance(names, str)
+ and hasattr(names, '__iter__')
+ )
+ final = []
+ for n in names:
+ implies = self.feature_implies(n)
+ tied = [
+ nn for nn in final
+ if nn in implies and n in self.feature_implies(nn)
+ ]
+ if tied:
+ tied = self.feature_sorted(tied + [n])
+ if n not in tied[1:]:
+ continue
+ final.remove(tied[:1][0])
+ final.append(n)
+ return final
+
+ def feature_get_til(self, names, keyisfalse):
+ """
+ same as `feature_implies_c()` but stop collecting implied
+ features when feature's option that provided through
+ parameter 'keyisfalse' is False, also sorting the returned
+ features.
+ """
+ def til(tnames):
+ # sort from highest to lowest interest then cut if "key" is False
+ tnames = self.feature_implies_c(tnames)
+ tnames = self.feature_sorted(tnames, reverse=True)
+ for i, n in enumerate(tnames):
+ if not self.feature_supported[n].get(keyisfalse, True):
+ tnames = tnames[:i+1]
+ break
+ return tnames
+
+ if isinstance(names, str) or len(names) <= 1:
+ names = til(names)
+ # normalize the sort
+ names.reverse()
+ return names
+
+ names = self.feature_ahead(names)
+ names = {t for n in names for t in til(n)}
+ return self.feature_sorted(names)
+
+ def feature_detect(self, names):
+ """
+ Return a list of CPU features that required to be detected
+ sorted from the lowest to highest interest.
+ """
+ names = self.feature_get_til(names, "implies_detect")
+ detect = []
+ for n in names:
+ d = self.feature_supported[n]
+ detect += d.get("detect", d.get("group", [n]))
+ return detect
+
+ @_Cache.me
+ def feature_flags(self, names):
+ """
+ Return a list of CPU features flags sorted from the lowest
+ to highest interest.
+ """
+ names = self.feature_sorted(self.feature_implies_c(names))
+ flags = []
+ for n in names:
+ d = self.feature_supported[n]
+ f = d.get("flags", [])
+ if not f or not self.cc_test_flags(f):
+ continue
+ flags += f
+ return self.cc_normalize_flags(flags)
+
+ @_Cache.me
+ def feature_test(self, name, force_flags=None, macros=[]):
+ """
+ Test a certain CPU feature against the compiler through its own
+ check file.
+
+ Parameters
+ ----------
+ name : str
+ Supported CPU feature name.
+
+ force_flags : list or None, optional
+ If None(default), the returned flags from `feature_flags()`
+ will be used.
+
+ macros : list of tuples, optional
+ A list of C macro definitions.
+ """
+ if force_flags is None:
+ force_flags = self.feature_flags(name)
+
+ self.dist_log(
+ "testing feature '%s' with flags (%s)" % (
+ name, ' '.join(force_flags)
+ ))
+ # Each CPU feature must have C source code contains at
+ # least one intrinsic or instruction related to this feature.
+ test_path = os.path.join(
+ self.conf_check_path, "cpu_%s.c" % name.lower()
+ )
+ if not os.path.exists(test_path):
+ self.dist_fatal("feature test file is not exist", test_path)
+
+ test = self.dist_test(
+ test_path, force_flags + self.cc_flags["werror"], macros=macros
+ )
+ if not test:
+ self.dist_log("testing failed", stderr=True)
+ return test
+
+ @_Cache.me
+ def feature_is_supported(self, name, force_flags=None, macros=[]):
+ """
+ Check if a certain CPU feature is supported by the platform and compiler.
+
+ Parameters
+ ----------
+ name : str
+ CPU feature name in uppercase.
+
+ force_flags : list or None, optional
+ If None(default), default compiler flags for every CPU feature will
+ be used during test.
+
+ macros : list of tuples, optional
+ A list of C macro definitions.
+ """
+ assert(name.isupper())
+ assert(force_flags is None or isinstance(force_flags, list))
+
+ supported = name in self.feature_supported
+ if supported:
+ for impl in self.feature_implies(name):
+ if not self.feature_test(impl, force_flags, macros=macros):
+ return False
+ if not self.feature_test(name, force_flags, macros=macros):
+ return False
+ return supported
+
+ @_Cache.me
+ def feature_can_autovec(self, name):
+ """
+ check if the feature can be auto-vectorized by the compiler
+ """
+ assert(isinstance(name, str))
+ d = self.feature_supported[name]
+ can = d.get("autovec", None)
+ if can is None:
+ valid_flags = [
+ self.cc_test_flags([f]) for f in d.get("flags", [])
+ ]
+ can = valid_flags and any(valid_flags)
+ return can
+
+ @_Cache.me
+ def feature_extra_checks(self, name):
+ """
+ Return a list of supported extra checks after testing them against
+ the compiler.
+
+ Parameters
+ ----------
+ names : str
+ CPU feature name in uppercase.
+ """
+ assert isinstance(name, str)
+ d = self.feature_supported[name]
+ extra_checks = d.get("extra_checks", [])
+ if not extra_checks:
+ return []
+
+ self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks)
+ flags = self.feature_flags(name)
+ available = []
+ not_available = []
+ for chk in extra_checks:
+ test_path = os.path.join(
+ self.conf_check_path, "extra_%s.c" % chk.lower()
+ )
+ if not os.path.exists(test_path):
+ self.dist_fatal("extra check file does not exist", test_path)
+
+ is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"])
+ if is_supported:
+ available.append(chk)
+ else:
+ not_available.append(chk)
+
+ if not_available:
+ self.dist_log("testing failed for checks", not_available, stderr=True)
+ return available
+
+
+ def feature_c_preprocessor(self, feature_name, tabs=0):
+ """
+ Generate C preprocessor definitions and include headers of a CPU feature.
+
+ Parameters
+ ----------
+ 'feature_name': str
+ CPU feature name in uppercase.
+ 'tabs': int
+ if > 0, align the generated strings to the right depend on number of tabs.
+
+ Returns
+ -------
+ str, generated C preprocessor
+
+ Examples
+ --------
+ >>> self.feature_c_preprocessor("SSE3")
+ /** SSE3 **/
+ #define NPY_HAVE_SSE3 1
+ #include <pmmintrin.h>
+ """
+ assert(feature_name.isupper())
+ feature = self.feature_supported.get(feature_name)
+ assert(feature is not None)
+
+ prepr = [
+ "/** %s **/" % feature_name,
+ "#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name)
+ ]
+ prepr += [
+ "#include <%s>" % h for h in feature.get("headers", [])
+ ]
+
+ extra_defs = feature.get("group", [])
+ extra_defs += self.feature_extra_checks(feature_name)
+ for edef in extra_defs:
+ # Guard extra definitions in case of duplicate with
+ # another feature
+ prepr += [
+ "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef),
+ "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef),
+ "#endif",
+ ]
+
+ if tabs > 0:
+ prepr = [('\t'*tabs) + l for l in prepr]
+ return '\n'.join(prepr)
+
+class _Parse:
+ """A helper class that parsing main arguments of `CCompilerOpt`,
+ also parsing configuration statements in dispatch-able sources.
+
+ Parameters
+ ----------
+ cpu_baseline : str or None
+ minimal set of required CPU features or special options.
+
+ cpu_dispatch : str or None
+ dispatched set of additional CPU features or special options.
+
+ Special options can be:
+ - **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features`
+ - **MAX**: Enables all supported CPU features by the Compiler and platform.
+ - **NATIVE**: Enables all CPU features that supported by the current machine.
+ - **NONE**: Enables nothing
+ - **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**.
+ NOTE: operand + is only added for nominal reason.
+
+ NOTES:
+ - Case-insensitive among all CPU features and special options.
+ - Comma or space can be used as a separator.
+ - If the CPU feature is not supported by the user platform or compiler,
+ it will be skipped rather than raising a fatal error.
+ - Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features
+ - 'cpu_baseline' force enables implied features.
+
+ Attributes
+ ----------
+ parse_baseline_names : list
+ Final CPU baseline's feature names(sorted from low to high)
+ parse_baseline_flags : list
+ Compiler flags of baseline features
+ parse_dispatch_names : list
+ Final CPU dispatch-able feature names(sorted from low to high)
+ parse_target_groups : dict
+ Dictionary containing initialized target groups that configured
+ through class attribute `conf_target_groups`.
+
+ The key is represent the group name and value is a tuple
+ contains three items :
+ - bool, True if group has the 'baseline' option.
+ - list, list of CPU features.
+ - list, list of extra compiler flags.
+
+ """
+ def __init__(self, cpu_baseline, cpu_dispatch):
+ self._parse_policies = dict(
+ # POLICY NAME, (HAVE, NOT HAVE, [DEB])
+ KEEP_BASELINE = (
+ None, self._parse_policy_not_keepbase,
+ []
+ ),
+ KEEP_SORT = (
+ self._parse_policy_keepsort,
+ self._parse_policy_not_keepsort,
+ []
+ ),
+ MAXOPT = (
+ self._parse_policy_maxopt, None,
+ []
+ ),
+ WERROR = (
+ self._parse_policy_werror, None,
+ []
+ ),
+ AUTOVEC = (
+ self._parse_policy_autovec, None,
+ ["MAXOPT"]
+ )
+ )
+ if hasattr(self, "parse_is_cached"):
+ return
+
+ self.parse_baseline_names = []
+ self.parse_baseline_flags = []
+ self.parse_dispatch_names = []
+ self.parse_target_groups = {}
+
+ if self.cc_noopt:
+ # skip parsing baseline and dispatch args and keep parsing target groups
+ cpu_baseline = cpu_dispatch = None
+
+ self.dist_log("check requested baseline")
+ if cpu_baseline is not None:
+ cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline)
+ baseline_names = self.feature_names(cpu_baseline)
+ self.parse_baseline_flags = self.feature_flags(baseline_names)
+ self.parse_baseline_names = self.feature_sorted(
+ self.feature_implies_c(baseline_names)
+ )
+
+ self.dist_log("check requested dispatch-able features")
+ if cpu_dispatch is not None:
+ cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch)
+ cpu_dispatch = {
+ f for f in cpu_dispatch_
+ if f not in self.parse_baseline_names
+ }
+ conflict_baseline = cpu_dispatch_.difference(cpu_dispatch)
+ self.parse_dispatch_names = self.feature_sorted(
+ self.feature_names(cpu_dispatch)
+ )
+ if len(conflict_baseline) > 0:
+ self.dist_log(
+ "skip features", conflict_baseline, "since its part of baseline"
+ )
+
+ self.dist_log("initialize targets groups")
+ for group_name, tokens in self.conf_target_groups.items():
+ self.dist_log("parse target group", group_name)
+ GROUP_NAME = group_name.upper()
+ if not tokens or not tokens.strip():
+ # allow empty groups, useful in case if there's a need
+ # to disable certain group since '_parse_target_tokens()'
+ # requires at least one valid target
+ self.parse_target_groups[GROUP_NAME] = (
+ False, [], []
+ )
+ continue
+ has_baseline, features, extra_flags = \
+ self._parse_target_tokens(tokens)
+ self.parse_target_groups[GROUP_NAME] = (
+ has_baseline, features, extra_flags
+ )
+
+ self.parse_is_cached = True
+
+ def parse_targets(self, source):
+ """
+ Fetch and parse configuration statements that required for
+ defining the targeted CPU features, statements should be declared
+ in the top of source in between **C** comment and start
+ with a special mark **@targets**.
+
+ Configuration statements are sort of keywords representing
+ CPU features names, group of statements and policies, combined
+ together to determine the required optimization.
+
+ Parameters
+ ----------
+ source : str
+ the path of **C** source file.
+
+ Returns
+ -------
+ - bool, True if group has the 'baseline' option
+ - list, list of CPU features
+ - list, list of extra compiler flags
+ """
+ self.dist_log("looking for '@targets' inside -> ", source)
+ # get lines between /*@targets and */
+ with open(source) as fd:
+ tokens = ""
+ max_to_reach = 1000 # good enough, isn't?
+ start_with = "@targets"
+ start_pos = -1
+ end_with = "*/"
+ end_pos = -1
+ for current_line, line in enumerate(fd):
+ if current_line == max_to_reach:
+ self.dist_fatal("reached the max of lines")
+ break
+ if start_pos == -1:
+ start_pos = line.find(start_with)
+ if start_pos == -1:
+ continue
+ start_pos += len(start_with)
+ tokens += line
+ end_pos = line.find(end_with)
+ if end_pos != -1:
+ end_pos += len(tokens) - len(line)
+ break
+
+ if start_pos == -1:
+ self.dist_fatal("expected to find '%s' within a C comment" % start_with)
+ if end_pos == -1:
+ self.dist_fatal("expected to end with '%s'" % end_with)
+
+ tokens = tokens[start_pos:end_pos]
+ return self._parse_target_tokens(tokens)
+
+ _parse_regex_arg = re.compile(r'\s|,|([+-])')
+ def _parse_arg_features(self, arg_name, req_features):
+ if not isinstance(req_features, str):
+ self.dist_fatal("expected a string in '%s'" % arg_name)
+
+ final_features = set()
+ # space and comma can be used as a separator
+ tokens = list(filter(None, re.split(self._parse_regex_arg, req_features)))
+ append = True # append is the default
+ for tok in tokens:
+ if tok[0] in ("#", "$"):
+ self.dist_fatal(
+ arg_name, "target groups and policies "
+ "aren't allowed from arguments, "
+ "only from dispatch-able sources"
+ )
+ if tok == '+':
+ append = True
+ continue
+ if tok == '-':
+ append = False
+ continue
+
+ TOK = tok.upper() # we use upper-case internally
+ features_to = set()
+ if TOK == "NONE":
+ pass
+ elif TOK == "NATIVE":
+ native = self.cc_flags["native"]
+ if not native:
+ self.dist_fatal(arg_name,
+ "native option isn't supported by the compiler"
+ )
+ features_to = self.feature_names(
+ force_flags=native, macros=[("DETECT_FEATURES", 1)]
+ )
+ elif TOK == "MAX":
+ features_to = self.feature_supported.keys()
+ elif TOK == "MIN":
+ features_to = self.feature_min
+ else:
+ if TOK in self.feature_supported:
+ features_to.add(TOK)
+ else:
+ if not self.feature_is_exist(TOK):
+ self.dist_fatal(arg_name,
+ ", '%s' isn't a known feature or option" % tok
+ )
+ if append:
+ final_features = final_features.union(features_to)
+ else:
+ final_features = final_features.difference(features_to)
+
+ append = True # back to default
+
+ return final_features
+
+ _parse_regex_target = re.compile(r'\s|[*,/]|([()])')
+ def _parse_target_tokens(self, tokens):
+ assert(isinstance(tokens, str))
+ final_targets = [] # to keep it sorted as specified
+ extra_flags = []
+ has_baseline = False
+
+ skipped = set()
+ policies = set()
+ multi_target = None
+
+ tokens = list(filter(None, re.split(self._parse_regex_target, tokens)))
+ if not tokens:
+ self.dist_fatal("expected one token at least")
+
+ for tok in tokens:
+ TOK = tok.upper()
+ ch = tok[0]
+ if ch in ('+', '-'):
+ self.dist_fatal(
+ "+/- are 'not' allowed from target's groups or @targets, "
+ "only from cpu_baseline and cpu_dispatch parms"
+ )
+ elif ch == '$':
+ if multi_target is not None:
+ self.dist_fatal(
+ "policies aren't allowed inside multi-target '()'"
+ ", only CPU features"
+ )
+ policies.add(self._parse_token_policy(TOK))
+ elif ch == '#':
+ if multi_target is not None:
+ self.dist_fatal(
+ "target groups aren't allowed inside multi-target '()'"
+ ", only CPU features"
+ )
+ has_baseline, final_targets, extra_flags = \
+ self._parse_token_group(TOK, has_baseline, final_targets, extra_flags)
+ elif ch == '(':
+ if multi_target is not None:
+ self.dist_fatal("unclosed multi-target, missing ')'")
+ multi_target = set()
+ elif ch == ')':
+ if multi_target is None:
+ self.dist_fatal("multi-target opener '(' wasn't found")
+ targets = self._parse_multi_target(multi_target)
+ if targets is None:
+ skipped.add(tuple(multi_target))
+ else:
+ if len(targets) == 1:
+ targets = targets[0]
+ if targets and targets not in final_targets:
+ final_targets.append(targets)
+ multi_target = None # back to default
+ else:
+ if TOK == "BASELINE":
+ if multi_target is not None:
+ self.dist_fatal("baseline isn't allowed inside multi-target '()'")
+ has_baseline = True
+ continue
+
+ if multi_target is not None:
+ multi_target.add(TOK)
+ continue
+
+ if not self.feature_is_exist(TOK):
+ self.dist_fatal("invalid target name '%s'" % TOK)
+
+ is_enabled = (
+ TOK in self.parse_baseline_names or
+ TOK in self.parse_dispatch_names
+ )
+ if is_enabled:
+ if TOK not in final_targets:
+ final_targets.append(TOK)
+ continue
+
+ skipped.add(TOK)
+
+ if multi_target is not None:
+ self.dist_fatal("unclosed multi-target, missing ')'")
+ if skipped:
+ self.dist_log(
+ "skip targets", skipped,
+ "not part of baseline or dispatch-able features"
+ )
+
+ final_targets = self.feature_untied(final_targets)
+
+ # add polices dependencies
+ for p in list(policies):
+ _, _, deps = self._parse_policies[p]
+ for d in deps:
+ if d in policies:
+ continue
+ self.dist_log(
+ "policy '%s' force enables '%s'" % (
+ p, d
+ ))
+ policies.add(d)
+
+ # release policies filtrations
+ for p, (have, nhave, _) in self._parse_policies.items():
+ func = None
+ if p in policies:
+ func = have
+ self.dist_log("policy '%s' is ON" % p)
+ else:
+ func = nhave
+ if not func:
+ continue
+ has_baseline, final_targets, extra_flags = func(
+ has_baseline, final_targets, extra_flags
+ )
+
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_token_policy(self, token):
+ """validate policy token"""
+ if len(token) <= 1 or token[-1:] == token[0]:
+ self.dist_fatal("'$' must stuck in the begin of policy name")
+ token = token[1:]
+ if token not in self._parse_policies:
+ self.dist_fatal(
+ "'%s' is an invalid policy name, available policies are" % token,
+ self._parse_policies.keys()
+ )
+ return token
+
+ def _parse_token_group(self, token, has_baseline, final_targets, extra_flags):
+ """validate group token"""
+ if len(token) <= 1 or token[-1:] == token[0]:
+ self.dist_fatal("'#' must stuck in the begin of group name")
+
+ token = token[1:]
+ ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get(
+ token, (False, None, [])
+ )
+ if gtargets is None:
+ self.dist_fatal(
+ "'%s' is an invalid target group name, " % token + \
+ "available target groups are",
+ self.parse_target_groups.keys()
+ )
+ if ghas_baseline:
+ has_baseline = True
+ # always keep sorting as specified
+ final_targets += [f for f in gtargets if f not in final_targets]
+ extra_flags += [f for f in gextra_flags if f not in extra_flags]
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_multi_target(self, targets):
+ """validate multi targets that defined between parentheses()"""
+ # remove any implied features and keep the origins
+ if not targets:
+ self.dist_fatal("empty multi-target '()'")
+ if not all([
+ self.feature_is_exist(tar) for tar in targets
+ ]) :
+ self.dist_fatal("invalid target name in multi-target", targets)
+ if not all([
+ (
+ tar in self.parse_baseline_names or
+ tar in self.parse_dispatch_names
+ )
+ for tar in targets
+ ]) :
+ return None
+ targets = self.feature_ahead(targets)
+ if not targets:
+ return None
+ # force sort multi targets, so it can be comparable
+ targets = self.feature_sorted(targets)
+ targets = tuple(targets) # hashable
+ return targets
+
+ def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags):
+ """skip all baseline features"""
+ skipped = []
+ for tar in final_targets[:]:
+ is_base = False
+ if isinstance(tar, str):
+ is_base = tar in self.parse_baseline_names
+ else:
+ # multi targets
+ is_base = all([
+ f in self.parse_baseline_names
+ for f in tar
+ ])
+ if is_base:
+ skipped.append(tar)
+ final_targets.remove(tar)
+
+ if skipped:
+ self.dist_log("skip baseline features", skipped)
+
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags):
+ """leave a notice that $keep_sort is on"""
+ self.dist_log(
+ "policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n"
+ "are 'not' sorted depend on the highest interest but"
+ "as specified in the dispatch-able source or the extra group"
+ )
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags):
+ """sorted depend on the highest interest"""
+ final_targets = self.feature_sorted(final_targets, reverse=True)
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags):
+ """append the compiler optimization flags"""
+ if self.cc_has_debug:
+ self.dist_log("debug mode is detected, policy 'maxopt' is skipped.")
+ elif self.cc_noopt:
+ self.dist_log("optimization is disabled, policy 'maxopt' is skipped.")
+ else:
+ flags = self.cc_flags["opt"]
+ if not flags:
+ self.dist_log(
+ "current compiler doesn't support optimization flags, "
+ "policy 'maxopt' is skipped", stderr=True
+ )
+ else:
+ extra_flags += flags
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_policy_werror(self, has_baseline, final_targets, extra_flags):
+ """force warnings to treated as errors"""
+ flags = self.cc_flags["werror"]
+ if not flags:
+ self.dist_log(
+ "current compiler doesn't support werror flags, "
+ "warnings will 'not' treated as errors", stderr=True
+ )
+ else:
+ self.dist_log("compiler warnings are treated as errors")
+ extra_flags += flags
+ return has_baseline, final_targets, extra_flags
+
+ def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags):
+ """skip features that has no auto-vectorized support by compiler"""
+ skipped = []
+ for tar in final_targets[:]:
+ if isinstance(tar, str):
+ can = self.feature_can_autovec(tar)
+ else: # multiple target
+ can = all([
+ self.feature_can_autovec(t)
+ for t in tar
+ ])
+ if not can:
+ final_targets.remove(tar)
+ skipped.append(tar)
+
+ if skipped:
+ self.dist_log("skip non auto-vectorized features", skipped)
+
+ return has_baseline, final_targets, extra_flags
+
+class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
+ """
+ A helper class for `CCompiler` aims to provide extra build options
+ to effectively control of compiler optimizations that are directly
+ related to CPU features.
+ """
+ def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None):
+ _Config.__init__(self)
+ _Distutils.__init__(self, ccompiler)
+ _Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch)
+ _CCompiler.__init__(self)
+ _Feature.__init__(self)
+ if not self.cc_noopt and self.cc_has_native:
+ self.dist_log(
+ "native flag is specified through environment variables. "
+ "force cpu-baseline='native'"
+ )
+ cpu_baseline = "native"
+ _Parse.__init__(self, cpu_baseline, cpu_dispatch)
+ # keep the requested features untouched, need it later for report
+ # and trace purposes
+ self._requested_baseline = cpu_baseline
+ self._requested_dispatch = cpu_dispatch
+ # key is the dispatch-able source and value is a tuple
+ # contains two items (has_baseline[boolean], dispatched-features[list])
+ self.sources_status = getattr(self, "sources_status", {})
+ # every instance should has a separate one
+ self.cache_private.add("sources_status")
+ # set it at the end to make sure the cache writing was done after init
+ # this class
+ self.hit_cache = hasattr(self, "hit_cache")
+
+ def is_cached(self):
+ """
+ Returns True if the class loaded from the cache file
+ """
+ return self.cache_infile and self.hit_cache
+
+ def cpu_baseline_flags(self):
+ """
+ Returns a list of final CPU baseline compiler flags
+ """
+ return self.parse_baseline_flags
+
+ def cpu_baseline_names(self):
+ """
+ return a list of final CPU baseline feature names
+ """
+ return self.parse_baseline_names
+
+ def cpu_dispatch_names(self):
+ """
+ return a list of final CPU dispatch feature names
+ """
+ return self.parse_dispatch_names
+
+ def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs):
+ """
+ Compile one or more dispatch-able sources and generates object files,
+ also generates abstract C config headers and macros that
+ used later for the final runtime dispatching process.
+
+ The mechanism behind it is to takes each source file that specified
+ in 'sources' and branching it into several files depend on
+ special configuration statements that must be declared in the
+ top of each source which contains targeted CPU features,
+ then it compiles every branched source with the proper compiler flags.
+
+ Parameters
+ ----------
+ sources : list
+ Must be a list of dispatch-able sources file paths,
+ and configuration statements must be declared inside
+ each file.
+
+ src_dir : str
+ Path of parent directory for the generated headers and wrapped sources.
+ If None(default) the files will generated in-place.
+
+ ccompiler : CCompiler
+ Distutils `CCompiler` instance to be used for compilation.
+ If None (default), the provided instance during the initialization
+ will be used instead.
+
+ **kwargs : any
+ Arguments to pass on to the `CCompiler.compile()`
+
+ Returns
+ -------
+ list : generated object files
+
+ Raises
+ ------
+ CompileError
+ Raises by `CCompiler.compile()` on compiling failure.
+ DistutilsError
+ Some errors during checking the sanity of configuration statements.
+
+ See Also
+ --------
+ parse_targets :
+ Parsing the configuration statements of dispatch-able sources.
+ """
+ to_compile = {}
+ baseline_flags = self.cpu_baseline_flags()
+ include_dirs = kwargs.setdefault("include_dirs", [])
+
+ for src in sources:
+ output_dir = os.path.dirname(src)
+ if src_dir:
+ if not output_dir.startswith(src_dir):
+ output_dir = os.path.join(src_dir, output_dir)
+ if output_dir not in include_dirs:
+ # To allow including the generated config header(*.dispatch.h)
+ # by the dispatch-able sources
+ include_dirs.append(output_dir)
+
+ has_baseline, targets, extra_flags = self.parse_targets(src)
+ nochange = self._generate_config(output_dir, src, targets, has_baseline)
+ for tar in targets:
+ tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange)
+ flags = tuple(extra_flags + self.feature_flags(tar))
+ to_compile.setdefault(flags, []).append(tar_src)
+
+ if has_baseline:
+ flags = tuple(extra_flags + baseline_flags)
+ to_compile.setdefault(flags, []).append(src)
+
+ self.sources_status[src] = (has_baseline, targets)
+
+ # For these reasons, the sources are compiled in a separate loop:
+ # - Gathering all sources with the same flags to benefit from
+ # the parallel compiling as much as possible.
+ # - To generate all config headers of the dispatchable sources,
+ # before the compilation in case if there are dependency relationships
+ # among them.
+ objects = []
+ for flags, srcs in to_compile.items():
+ objects += self.dist_compile(
+ srcs, list(flags), ccompiler=ccompiler, **kwargs
+ )
+ return objects
+
+ def generate_dispatch_header(self, header_path):
+ """
+ Generate the dispatch header which contains the #definitions and headers
+ for platform-specific instruction-sets for the enabled CPU baseline and
+ dispatch-able features.
+
+ Its highly recommended to take a look at the generated header
+ also the generated source files via `try_dispatch()`
+ in order to get the full picture.
+ """
+ self.dist_log("generate CPU dispatch header: (%s)" % header_path)
+
+ baseline_names = self.cpu_baseline_names()
+ dispatch_names = self.cpu_dispatch_names()
+ baseline_len = len(baseline_names)
+ dispatch_len = len(dispatch_names)
+
+ header_dir = os.path.dirname(header_path)
+ if not os.path.exists(header_dir):
+ self.dist_log(
+ f"dispatch header dir {header_dir} does not exist, creating it",
+ stderr=True
+ )
+ os.makedirs(header_dir)
+
+ with open(header_path, 'w') as f:
+ baseline_calls = ' \\\n'.join([
+ (
+ "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))"
+ ) % (self.conf_c_prefix, f)
+ for f in baseline_names
+ ])
+ dispatch_calls = ' \\\n'.join([
+ (
+ "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))"
+ ) % (self.conf_c_prefix, f)
+ for f in dispatch_names
+ ])
+ f.write(textwrap.dedent("""\
+ /*
+ * AUTOGENERATED DON'T EDIT
+ * Please make changes to the code generator (distutils/ccompiler_opt.py)
+ */
+ #define {pfx}WITH_CPU_BASELINE "{baseline_str}"
+ #define {pfx}WITH_CPU_DISPATCH "{dispatch_str}"
+ #define {pfx}WITH_CPU_BASELINE_N {baseline_len}
+ #define {pfx}WITH_CPU_DISPATCH_N {dispatch_len}
+ #define {pfx}WITH_CPU_EXPAND_(X) X
+ #define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\
+ {baseline_calls}
+ #define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\
+ {dispatch_calls}
+ """).format(
+ pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names),
+ dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len,
+ dispatch_len=dispatch_len, baseline_calls=baseline_calls,
+ dispatch_calls=dispatch_calls
+ ))
+ baseline_pre = ''
+ for name in baseline_names:
+ baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n'
+
+ dispatch_pre = ''
+ for name in dispatch_names:
+ dispatch_pre += textwrap.dedent("""\
+ #ifdef {pfx}CPU_TARGET_{name}
+ {pre}
+ #endif /*{pfx}CPU_TARGET_{name}*/
+ """).format(
+ pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor(
+ name, tabs=1
+ ))
+
+ f.write(textwrap.dedent("""\
+ /******* baseline features *******/
+ {baseline_pre}
+ /******* dispatch features *******/
+ {dispatch_pre}
+ """).format(
+ pfx=self.conf_c_prefix_, baseline_pre=baseline_pre,
+ dispatch_pre=dispatch_pre
+ ))
+
+ def report(self, full=False):
+ report = []
+ platform_rows = []
+ baseline_rows = []
+ dispatch_rows = []
+ report.append(("Platform", platform_rows))
+ report.append(("", ""))
+ report.append(("CPU baseline", baseline_rows))
+ report.append(("", ""))
+ report.append(("CPU dispatch", dispatch_rows))
+
+ ########## platform ##########
+ platform_rows.append(("Architecture", (
+ "unsupported" if self.cc_on_noarch else self.cc_march)
+ ))
+ platform_rows.append(("Compiler", (
+ "unix-like" if self.cc_is_nocc else self.cc_name)
+ ))
+ ########## baseline ##########
+ if self.cc_noopt:
+ baseline_rows.append(("Requested", "optimization disabled"))
+ else:
+ baseline_rows.append(("Requested", repr(self._requested_baseline)))
+
+ baseline_names = self.cpu_baseline_names()
+ baseline_rows.append((
+ "Enabled", (' '.join(baseline_names) if baseline_names else "none")
+ ))
+ baseline_flags = self.cpu_baseline_flags()
+ baseline_rows.append((
+ "Flags", (' '.join(baseline_flags) if baseline_flags else "none")
+ ))
+ extra_checks = []
+ for name in baseline_names:
+ extra_checks += self.feature_extra_checks(name)
+ baseline_rows.append((
+ "Extra checks", (' '.join(extra_checks) if extra_checks else "none")
+ ))
+
+ ########## dispatch ##########
+ if self.cc_noopt:
+ baseline_rows.append(("Requested", "optimization disabled"))
+ else:
+ dispatch_rows.append(("Requested", repr(self._requested_dispatch)))
+
+ dispatch_names = self.cpu_dispatch_names()
+ dispatch_rows.append((
+ "Enabled", (' '.join(dispatch_names) if dispatch_names else "none")
+ ))
+ ########## Generated ##########
+ # TODO:
+ # - collect object names from 'try_dispatch()'
+ # then get size of each object and printed
+ # - give more details about the features that not
+ # generated due compiler support
+ # - find a better output's design.
+ #
+ target_sources = {}
+ for source, (_, targets) in self.sources_status.items():
+ for tar in targets:
+ target_sources.setdefault(tar, []).append(source)
+
+ if not full or not target_sources:
+ generated = ""
+ for tar in self.feature_sorted(target_sources):
+ sources = target_sources[tar]
+ name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
+ generated += name + "[%d] " % len(sources)
+ dispatch_rows.append(("Generated", generated[:-1] if generated else "none"))
+ else:
+ dispatch_rows.append(("Generated", ''))
+ for tar in self.feature_sorted(target_sources):
+ sources = target_sources[tar]
+ pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
+ flags = ' '.join(self.feature_flags(tar))
+ implies = ' '.join(self.feature_sorted(self.feature_implies(tar)))
+ detect = ' '.join(self.feature_detect(tar))
+ extra_checks = []
+ for name in ((tar,) if isinstance(tar, str) else tar):
+ extra_checks += self.feature_extra_checks(name)
+ extra_checks = (' '.join(extra_checks) if extra_checks else "none")
+
+ dispatch_rows.append(('', ''))
+ dispatch_rows.append((pretty_name, implies))
+ dispatch_rows.append(("Flags", flags))
+ dispatch_rows.append(("Extra checks", extra_checks))
+ dispatch_rows.append(("Detect", detect))
+ for src in sources:
+ dispatch_rows.append(("", src))
+
+ ###############################
+ # TODO: add support for 'markdown' format
+ text = []
+ secs_len = [len(secs) for secs, _ in report]
+ cols_len = [len(col) for _, rows in report for col, _ in rows]
+ tab = ' ' * 2
+ pad = max(max(secs_len), max(cols_len))
+ for sec, rows in report:
+ if not sec:
+ text.append("") # empty line
+ continue
+ sec += ' ' * (pad - len(sec))
+ text.append(sec + tab + ': ')
+ for col, val in rows:
+ col += ' ' * (pad - len(col))
+ text.append(tab + col + ': ' + val)
+
+ return '\n'.join(text)
+
+ def _wrap_target(self, output_dir, dispatch_src, target, nochange=False):
+ assert(isinstance(target, (str, tuple)))
+ if isinstance(target, str):
+ ext_name = target_name = target
+ else:
+ # multi-target
+ ext_name = '.'.join(target)
+ target_name = '__'.join(target)
+
+ wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src))
+ wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower())
+ if nochange and os.path.exists(wrap_path):
+ return wrap_path
+
+ self.dist_log("wrap dispatch-able target -> ", wrap_path)
+ # sorting for readability
+ features = self.feature_sorted(self.feature_implies_c(target))
+ target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_
+ target_defs = [target_join + f for f in features]
+ target_defs = '\n'.join(target_defs)
+
+ with open(wrap_path, "w") as fd:
+ fd.write(textwrap.dedent("""\
+ /**
+ * AUTOGENERATED DON'T EDIT
+ * Please make changes to the code generator \
+ (distutils/ccompiler_opt.py)
+ */
+ #define {pfx}CPU_TARGET_MODE
+ #define {pfx}CPU_TARGET_CURRENT {target_name}
+ {target_defs}
+ #include "{path}"
+ """).format(
+ pfx=self.conf_c_prefix_, target_name=target_name,
+ path=os.path.abspath(dispatch_src), target_defs=target_defs
+ ))
+ return wrap_path
+
+ def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False):
+ config_path = os.path.basename(dispatch_src)
+ config_path = os.path.splitext(config_path)[0] + '.h'
+ config_path = os.path.join(output_dir, config_path)
+ # check if targets didn't change to avoid recompiling
+ cache_hash = self.cache_hash(targets, has_baseline)
+ try:
+ with open(config_path) as f:
+ last_hash = f.readline().split("cache_hash:")
+ if len(last_hash) == 2 and int(last_hash[1]) == cache_hash:
+ return True
+ except OSError:
+ pass
+
+ os.makedirs(os.path.dirname(config_path), exist_ok=True)
+
+ self.dist_log("generate dispatched config -> ", config_path)
+ dispatch_calls = []
+ for tar in targets:
+ if isinstance(tar, str):
+ target_name = tar
+ else: # multi target
+ target_name = '__'.join([t for t in tar])
+ req_detect = self.feature_detect(tar)
+ req_detect = '&&'.join([
+ "CHK(%s)" % f for f in req_detect
+ ])
+ dispatch_calls.append(
+ "\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % (
+ self.conf_c_prefix_, req_detect, target_name
+ ))
+ dispatch_calls = ' \\\n'.join(dispatch_calls)
+
+ if has_baseline:
+ baseline_calls = (
+ "\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))"
+ ) % self.conf_c_prefix_
+ else:
+ baseline_calls = ''
+
+ with open(config_path, "w") as fd:
+ fd.write(textwrap.dedent("""\
+ // cache_hash:{cache_hash}
+ /**
+ * AUTOGENERATED DON'T EDIT
+ * Please make changes to the code generator (distutils/ccompiler_opt.py)
+ */
+ #ifndef {pfx}CPU_DISPATCH_EXPAND_
+ #define {pfx}CPU_DISPATCH_EXPAND_(X) X
+ #endif
+ #undef {pfx}CPU_DISPATCH_BASELINE_CALL
+ #undef {pfx}CPU_DISPATCH_CALL
+ #define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\
+ {baseline_calls}
+ #define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\
+ {dispatch_calls}
+ """).format(
+ pfx=self.conf_c_prefix_, baseline_calls=baseline_calls,
+ dispatch_calls=dispatch_calls, cache_hash=cache_hash
+ ))
+ return False
+
+def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs):
+ """
+ Create a new instance of 'CCompilerOpt' and generate the dispatch header
+ which contains the #definitions and headers of platform-specific instruction-sets for
+ the enabled CPU baseline and dispatch-able features.
+
+ Parameters
+ ----------
+ compiler : CCompiler instance
+ dispatch_hpath : str
+ path of the dispatch header
+
+ **kwargs: passed as-is to `CCompilerOpt(...)`
+ Returns
+ -------
+ new instance of CCompilerOpt
+ """
+ opt = CCompilerOpt(compiler, **kwargs)
+ if not os.path.exists(dispatch_hpath) or not opt.is_cached():
+ opt.generate_dispatch_header(dispatch_hpath)
+ return opt
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimd.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimd.c
new file mode 100644
index 00000000..6bc9022a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimd.c
@@ -0,0 +1,27 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ float *src = (float*)argv[argc-1];
+ float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
+ /* MAXMIN */
+ int ret = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0);
+ ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0);
+ /* ROUNDING */
+ ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0);
+#ifdef __aarch64__
+ {
+ double *src2 = (double*)argv[argc-1];
+ float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
+ /* MAXMIN */
+ ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0);
+ ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0);
+ /* ROUNDING */
+ ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0);
+ }
+#endif
+ return ret;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c
new file mode 100644
index 00000000..e7068ce0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c
@@ -0,0 +1,16 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ unsigned char *src = (unsigned char*)argv[argc-1];
+ uint8x16_t v1 = vdupq_n_u8(src[0]), v2 = vdupq_n_u8(src[1]);
+ uint32x4_t va = vdupq_n_u32(3);
+ int ret = (int)vgetq_lane_u32(vdotq_u32(va, v1, v2), 0);
+#ifdef __aarch64__
+ ret += (int)vgetq_lane_u32(vdotq_laneq_u32(va, v1, v2, 0), 0);
+#endif
+ return ret;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdfhm.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdfhm.c
new file mode 100644
index 00000000..54e32809
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdfhm.c
@@ -0,0 +1,19 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ float16_t *src = (float16_t*)argv[argc-1];
+ float *src2 = (float*)argv[argc-2];
+ float16x8_t vhp = vdupq_n_f16(src[0]);
+ float16x4_t vlhp = vdup_n_f16(src[1]);
+ float32x4_t vf = vdupq_n_f32(src2[0]);
+ float32x2_t vlf = vdup_n_f32(src2[1]);
+
+ int ret = (int)vget_lane_f32(vfmlal_low_f16(vlf, vlhp, vlhp), 0);
+ ret += (int)vgetq_lane_f32(vfmlslq_high_f16(vf, vhp, vhp), 0);
+
+ return ret;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdhp.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdhp.c
new file mode 100644
index 00000000..e2de0306
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimdhp.c
@@ -0,0 +1,15 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ float16_t *src = (float16_t*)argv[argc-1];
+ float16x8_t vhp = vdupq_n_f16(src[0]);
+ float16x4_t vlhp = vdup_n_f16(src[1]);
+
+ int ret = (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0);
+ ret += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0);
+ return ret;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx.c
new file mode 100644
index 00000000..26ae1846
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __AVX__
+ #error "HOST/ARCH doesn't support AVX"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m256 a = _mm256_add_ps(_mm256_loadu_ps((const float*)argv[argc-1]), _mm256_loadu_ps((const float*)argv[1]));
+ return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx2.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx2.c
new file mode 100644
index 00000000..ddde868f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx2.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __AVX2__
+ #error "HOST/ARCH doesn't support AVX2"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1]));
+ return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_clx.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_clx.c
new file mode 100644
index 00000000..81edcd06
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_clx.c
@@ -0,0 +1,22 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __AVX512VNNI__
+ #error "HOST/ARCH doesn't support CascadeLake AVX512 features"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ /* VNNI */
+ __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
+ a = _mm512_dpbusd_epi32(a, _mm512_setzero_si512(), a);
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c
new file mode 100644
index 00000000..5799f122
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c
@@ -0,0 +1,24 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__)
+ #error "HOST/ARCH doesn't support CannonLake AVX512 features"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
+ /* IFMA */
+ a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512());
+ /* VMBI */
+ a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a);
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_icl.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_icl.c
new file mode 100644
index 00000000..3cf44d73
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_icl.c
@@ -0,0 +1,26 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512BITALG__) || !defined(__AVX512VPOPCNTDQ__)
+ #error "HOST/ARCH doesn't support IceLake AVX512 features"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
+ /* VBMI2 */
+ a = _mm512_shrdv_epi64(a, a, _mm512_setzero_si512());
+ /* BITLAG */
+ a = _mm512_popcnt_epi8(a);
+ /* VPOPCNTDQ */
+ a = _mm512_popcnt_epi64(a);
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knl.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knl.c
new file mode 100644
index 00000000..b3f4f697
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knl.c
@@ -0,0 +1,25 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__AVX512ER__) || !defined(__AVX512PF__)
+ #error "HOST/ARCH doesn't support Knights Landing AVX512 features"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ int base[128];
+ __m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]);
+ /* ER */
+ __m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad));
+ /* PF */
+ _mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1);
+ return base[0];
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knm.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knm.c
new file mode 100644
index 00000000..2c426462
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_knm.c
@@ -0,0 +1,30 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__)
+ #error "HOST/ARCH doesn't support Knights Mill AVX512 features"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
+ __m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]);
+
+ /* 4FMAPS */
+ b = _mm512_4fmadd_ps(b, b, b, b, b, NULL);
+ /* 4VNNIW */
+ a = _mm512_4dpwssd_epi32(a, a, a, a, a, NULL);
+ /* VPOPCNTDQ */
+ a = _mm512_popcnt_epi64(a);
+
+ a = _mm512_add_epi32(a, _mm512_castps_si512(b));
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_skx.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_skx.c
new file mode 100644
index 00000000..8840efb7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_skx.c
@@ -0,0 +1,26 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__)
+ #error "HOST/ARCH doesn't support SkyLake AVX512 features"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
+ /* VL */
+ __m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1));
+ /* DQ */
+ __m512i b = _mm512_broadcast_i32x8(a);
+ /* BW */
+ b = _mm512_abs_epi16(b);
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(b));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512cd.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512cd.c
new file mode 100644
index 00000000..5e29c79e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512cd.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __AVX512CD__
+ #error "HOST/ARCH doesn't support AVX512CD"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m512i a = _mm512_lzcnt_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512f.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512f.c
new file mode 100644
index 00000000..d0eb7b1a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512f.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __AVX512F__
+ #error "HOST/ARCH doesn't support AVX512F"
+ #endif
+#endif
+
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m512i a = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_f16c.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_f16c.c
new file mode 100644
index 00000000..fdf36cec
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_f16c.c
@@ -0,0 +1,22 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __F16C__
+ #error "HOST/ARCH doesn't support F16C"
+ #endif
+#endif
+
+#include <emmintrin.h>
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m128 a = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1]));
+ __m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2]));
+ return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8)));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma3.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma3.c
new file mode 100644
index 00000000..bfeef22b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma3.c
@@ -0,0 +1,22 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__FMA__) && !defined(__AVX2__)
+ #error "HOST/ARCH doesn't support FMA3"
+ #endif
+#endif
+
+#include <xmmintrin.h>
+#include <immintrin.h>
+
+int main(int argc, char **argv)
+{
+ __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
+ a = _mm256_fmadd_ps(a, a, a);
+ return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma4.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma4.c
new file mode 100644
index 00000000..0ff17a48
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_fma4.c
@@ -0,0 +1,13 @@
+#include <immintrin.h>
+#ifdef _MSC_VER
+ #include <ammintrin.h>
+#else
+ #include <x86intrin.h>
+#endif
+
+int main(int argc, char **argv)
+{
+ __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
+ a = _mm256_macc_ps(a, a, a);
+ return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon.c
new file mode 100644
index 00000000..8c64f864
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon.c
@@ -0,0 +1,19 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ // passing from untraced pointers to avoid optimizing out any constants
+ // so we can test against the linker.
+ float *src = (float*)argv[argc-1];
+ float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
+ int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0);
+#ifdef __aarch64__
+ double *src2 = (double*)argv[argc-2];
+ float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
+ ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0);
+#endif
+ return ret;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_fp16.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_fp16.c
new file mode 100644
index 00000000..f3b94977
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_fp16.c
@@ -0,0 +1,11 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ short *src = (short*)argv[argc-1];
+ float32x4_t v_z4 = vcvt_f32_f16((float16x4_t)vld1_s16(src));
+ return (int)vgetq_lane_f32(v_z4, 0);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c
new file mode 100644
index 00000000..a039159d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c
@@ -0,0 +1,21 @@
+#ifdef _MSC_VER
+ #include <Intrin.h>
+#endif
+#include <arm_neon.h>
+
+int main(int argc, char **argv)
+{
+ float *src = (float*)argv[argc-1];
+ float32x4_t v1 = vdupq_n_f32(src[0]);
+ float32x4_t v2 = vdupq_n_f32(src[1]);
+ float32x4_t v3 = vdupq_n_f32(src[2]);
+ int ret = (int)vgetq_lane_f32(vfmaq_f32(v1, v2, v3), 0);
+#ifdef __aarch64__
+ double *src2 = (double*)argv[argc-2];
+ float64x2_t vd1 = vdupq_n_f64(src2[0]);
+ float64x2_t vd2 = vdupq_n_f64(src2[1]);
+ float64x2_t vd3 = vdupq_n_f64(src2[2]);
+ ret += (int)vgetq_lane_f64(vfmaq_f64(vd1, vd2, vd3), 0);
+#endif
+ return ret;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_popcnt.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_popcnt.c
new file mode 100644
index 00000000..813c461f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_popcnt.c
@@ -0,0 +1,32 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #if !defined(__SSE4_2__) && !defined(__POPCNT__)
+ #error "HOST/ARCH doesn't support POPCNT"
+ #endif
+#endif
+
+#ifdef _MSC_VER
+ #include <nmmintrin.h>
+#else
+ #include <popcntintrin.h>
+#endif
+
+int main(int argc, char **argv)
+{
+ // To make sure popcnt instructions are generated
+ // and been tested against the assembler
+ unsigned long long a = *((unsigned long long*)argv[argc-1]);
+ unsigned int b = *((unsigned int*)argv[argc-2]);
+
+#if defined(_M_X64) || defined(__x86_64__)
+ a = _mm_popcnt_u64(a);
+#endif
+ b = _mm_popcnt_u32(b);
+ return (int)a + b;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse.c
new file mode 100644
index 00000000..602b74e7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __SSE__
+ #error "HOST/ARCH doesn't support SSE"
+ #endif
+#endif
+
+#include <xmmintrin.h>
+
+int main(void)
+{
+ __m128 a = _mm_add_ps(_mm_setzero_ps(), _mm_setzero_ps());
+ return (int)_mm_cvtss_f32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse2.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse2.c
new file mode 100644
index 00000000..33826a9e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse2.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __SSE2__
+ #error "HOST/ARCH doesn't support SSE2"
+ #endif
+#endif
+
+#include <emmintrin.h>
+
+int main(void)
+{
+ __m128i a = _mm_add_epi16(_mm_setzero_si128(), _mm_setzero_si128());
+ return _mm_cvtsi128_si32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse3.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse3.c
new file mode 100644
index 00000000..d47c20f7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse3.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __SSE3__
+ #error "HOST/ARCH doesn't support SSE3"
+ #endif
+#endif
+
+#include <pmmintrin.h>
+
+int main(void)
+{
+ __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
+ return (int)_mm_cvtss_f32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse41.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse41.c
new file mode 100644
index 00000000..7c80238a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse41.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __SSE4_1__
+ #error "HOST/ARCH doesn't support SSE41"
+ #endif
+#endif
+
+#include <smmintrin.h>
+
+int main(void)
+{
+ __m128 a = _mm_floor_ps(_mm_setzero_ps());
+ return (int)_mm_cvtss_f32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse42.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse42.c
new file mode 100644
index 00000000..f60e18f3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_sse42.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __SSE4_2__
+ #error "HOST/ARCH doesn't support SSE42"
+ #endif
+#endif
+
+#include <smmintrin.h>
+
+int main(void)
+{
+ __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
+ return (int)_mm_cvtss_f32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_ssse3.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_ssse3.c
new file mode 100644
index 00000000..fde390d6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_ssse3.c
@@ -0,0 +1,20 @@
+#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
+ /*
+ * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
+ * whether or not the build options for those features are specified.
+ * Therefore, we must test #definitions of CPU features when option native/host
+ * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
+ * the test will be broken and leads to enable all possible features.
+ */
+ #ifndef __SSSE3__
+ #error "HOST/ARCH doesn't support SSSE3"
+ #endif
+#endif
+
+#include <tmmintrin.h>
+
+int main(void)
+{
+ __m128i a = _mm_hadd_epi16(_mm_setzero_si128(), _mm_setzero_si128());
+ return (int)_mm_cvtsi128_si32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx.c
new file mode 100644
index 00000000..0b3f30d6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx.c
@@ -0,0 +1,21 @@
+#ifndef __VSX__
+ #error "VSX is not supported"
+#endif
+#include <altivec.h>
+
+#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
+ #define vsx_ld vec_vsx_ld
+ #define vsx_st vec_vsx_st
+#else
+ #define vsx_ld vec_xl
+ #define vsx_st vec_xst
+#endif
+
+int main(void)
+{
+ unsigned int zout[4];
+ unsigned int z4[] = {0, 0, 0, 0};
+ __vector unsigned int v_z4 = vsx_ld(0, z4);
+ vsx_st(v_z4, 0, zout);
+ return zout[0];
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx2.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx2.c
new file mode 100644
index 00000000..410fb29d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx2.c
@@ -0,0 +1,13 @@
+#ifndef __VSX__
+ #error "VSX is not supported"
+#endif
+#include <altivec.h>
+
+typedef __vector unsigned long long v_uint64x2;
+
+int main(void)
+{
+ v_uint64x2 z2 = (v_uint64x2){0, 0};
+ z2 = (v_uint64x2)vec_cmpeq(z2, z2);
+ return (int)vec_extract(z2, 0);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx3.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx3.c
new file mode 100644
index 00000000..85752653
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx3.c
@@ -0,0 +1,13 @@
+#ifndef __VSX__
+ #error "VSX is not supported"
+#endif
+#include <altivec.h>
+
+typedef __vector unsigned int v_uint32x4;
+
+int main(void)
+{
+ v_uint32x4 z4 = (v_uint32x4){0, 0, 0, 0};
+ z4 = vec_absd(z4, z4);
+ return (int)vec_extract(z4, 0);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx4.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx4.c
new file mode 100644
index 00000000..a6acc738
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vsx4.c
@@ -0,0 +1,14 @@
+#ifndef __VSX__
+ #error "VSX is not supported"
+#endif
+#include <altivec.h>
+
+typedef __vector unsigned int v_uint32x4;
+
+int main(void)
+{
+ v_uint32x4 v1 = (v_uint32x4){2, 4, 8, 16};
+ v_uint32x4 v2 = (v_uint32x4){2, 2, 2, 2};
+ v_uint32x4 v3 = vec_mod(v1, v2);
+ return (int)vec_extractm(v3);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vx.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vx.c
new file mode 100644
index 00000000..18fb7ef9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vx.c
@@ -0,0 +1,16 @@
+#if (__VEC__ < 10301) || (__ARCH__ < 11)
+ #error VX not supported
+#endif
+
+#include <vecintrin.h>
+int main(int argc, char **argv)
+{
+ __vector double x = vec_abs(vec_xl(argc, (double*)argv));
+ __vector double y = vec_load_len((double*)argv, (unsigned int)argc);
+
+ x = vec_round(vec_ceil(x) + vec_floor(y));
+ __vector bool long long m = vec_cmpge(x, y);
+ __vector long long i = vec_signed(vec_sel(x, y, m));
+
+ return (int)vec_extract(i, 0);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe.c
new file mode 100644
index 00000000..e6933adc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe.c
@@ -0,0 +1,25 @@
+#if (__VEC__ < 10302) || (__ARCH__ < 12)
+ #error VXE not supported
+#endif
+
+#include <vecintrin.h>
+int main(int argc, char **argv)
+{
+ __vector float x = vec_nabs(vec_xl(argc, (float*)argv));
+ __vector float y = vec_load_len((float*)argv, (unsigned int)argc);
+
+ x = vec_round(vec_ceil(x) + vec_floor(y));
+ __vector bool int m = vec_cmpge(x, y);
+ x = vec_sel(x, y, m);
+
+ // need to test the existence of intrin "vflls" since vec_doublee
+ // is vec_doublee maps to wrong intrin "vfll".
+ // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100871
+#if defined(__GNUC__) && !defined(__clang__)
+ __vector long long i = vec_signed(__builtin_s390_vflls(x));
+#else
+ __vector long long i = vec_signed(vec_doublee(x));
+#endif
+
+ return (int)vec_extract(i, 0);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe2.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe2.c
new file mode 100644
index 00000000..f36d5712
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_vxe2.c
@@ -0,0 +1,21 @@
+#if (__VEC__ < 10303) || (__ARCH__ < 13)
+ #error VXE2 not supported
+#endif
+
+#include <vecintrin.h>
+
+int main(int argc, char **argv)
+{
+ int val;
+ __vector signed short large = { 'a', 'b', 'c', 'a', 'g', 'h', 'g', 'o' };
+ __vector signed short search = { 'g', 'h', 'g', 'o' };
+ __vector unsigned char len = { 0 };
+ __vector unsigned char res = vec_search_string_cc(large, search, len, &val);
+ __vector float x = vec_xl(argc, (float*)argv);
+ __vector int i = vec_signed(x);
+
+ i = vec_srdb(vec_sldb(i, i, 2), i, 3);
+ val += (int)vec_extract(res, 1);
+ val += vec_extract(i, 0);
+ return val;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_xop.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_xop.c
new file mode 100644
index 00000000..51d70cf2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_xop.c
@@ -0,0 +1,12 @@
+#include <immintrin.h>
+#ifdef _MSC_VER
+ #include <ammintrin.h>
+#else
+ #include <x86intrin.h>
+#endif
+
+int main(void)
+{
+ __m128i a = _mm_comge_epu32(_mm_setzero_si128(), _mm_setzero_si128());
+ return _mm_cvtsi128_si32(a);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c
new file mode 100644
index 00000000..9cfd0c2a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c
@@ -0,0 +1,18 @@
+#include <immintrin.h>
+/**
+ * Test BW mask operations due to:
+ * - MSVC has supported it since vs2019 see,
+ * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
+ * - Clang >= v8.0
+ * - GCC >= v7.1
+ */
+int main(void)
+{
+ __mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1));
+ m64 = _kor_mask64(m64, m64);
+ m64 = _kxor_mask64(m64, m64);
+ m64 = _cvtu64_mask64(_cvtmask64_u64(m64));
+ m64 = _mm512_kunpackd(m64, m64);
+ m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64);
+ return (int)_cvtmask64_u64(m64);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c
new file mode 100644
index 00000000..f0dc88bd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c
@@ -0,0 +1,16 @@
+#include <immintrin.h>
+/**
+ * Test DQ mask operations due to:
+ * - MSVC has supported it since vs2019 see,
+ * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
+ * - Clang >= v8.0
+ * - GCC >= v7.1
+ */
+int main(void)
+{
+ __mmask8 m8 = _mm512_cmpeq_epi64_mask(_mm512_set1_epi64(1), _mm512_set1_epi64(1));
+ m8 = _kor_mask8(m8, m8);
+ m8 = _kxor_mask8(m8, m8);
+ m8 = _cvtu32_mask8(_cvtmask8_u32(m8));
+ return (int)_cvtmask8_u32(m8);
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c
new file mode 100644
index 00000000..db01aaee
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c
@@ -0,0 +1,41 @@
+#include <immintrin.h>
+/**
+ * The following intrinsics don't have direct native support but compilers
+ * tend to emulate them.
+ * They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19
+ */
+int main(void)
+{
+ __m512 one_ps = _mm512_set1_ps(1.0f);
+ __m512d one_pd = _mm512_set1_pd(1.0);
+ __m512i one_i64 = _mm512_set1_epi64(1);
+ // add
+ float sum_ps = _mm512_reduce_add_ps(one_ps);
+ double sum_pd = _mm512_reduce_add_pd(one_pd);
+ int sum_int = (int)_mm512_reduce_add_epi64(one_i64);
+ sum_int += (int)_mm512_reduce_add_epi32(one_i64);
+ // mul
+ sum_ps += _mm512_reduce_mul_ps(one_ps);
+ sum_pd += _mm512_reduce_mul_pd(one_pd);
+ sum_int += (int)_mm512_reduce_mul_epi64(one_i64);
+ sum_int += (int)_mm512_reduce_mul_epi32(one_i64);
+ // min
+ sum_ps += _mm512_reduce_min_ps(one_ps);
+ sum_pd += _mm512_reduce_min_pd(one_pd);
+ sum_int += (int)_mm512_reduce_min_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_min_epu32(one_i64);
+ sum_int += (int)_mm512_reduce_min_epi64(one_i64);
+ // max
+ sum_ps += _mm512_reduce_max_ps(one_ps);
+ sum_pd += _mm512_reduce_max_pd(one_pd);
+ sum_int += (int)_mm512_reduce_max_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_max_epu32(one_i64);
+ sum_int += (int)_mm512_reduce_max_epi64(one_i64);
+ // and
+ sum_int += (int)_mm512_reduce_and_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_and_epi64(one_i64);
+ // or
+ sum_int += (int)_mm512_reduce_or_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_or_epi64(one_i64);
+ return (int)sum_ps + (int)sum_pd + sum_int;
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx4_mma.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx4_mma.c
new file mode 100644
index 00000000..a70b2a9f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx4_mma.c
@@ -0,0 +1,21 @@
+#ifndef __VSX__
+ #error "VSX is not supported"
+#endif
+#include <altivec.h>
+
+typedef __vector float fv4sf_t;
+typedef __vector unsigned char vec_t;
+
+int main(void)
+{
+ __vector_quad acc0;
+ float a[4] = {0,1,2,3};
+ float b[4] = {0,1,2,3};
+ vec_t *va = (vec_t *) a;
+ vec_t *vb = (vec_t *) b;
+ __builtin_mma_xvf32ger(&acc0, va[0], vb[0]);
+ fv4sf_t result[4];
+ __builtin_mma_disassemble_acc((void *)result, &acc0);
+ fv4sf_t c0 = result[0];
+ return (int)((float*)&c0)[0];
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx_asm.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx_asm.c
new file mode 100644
index 00000000..b73a6f43
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/extra_vsx_asm.c
@@ -0,0 +1,36 @@
+/**
+ * Testing ASM VSX register number fixer '%x<n>'
+ *
+ * old versions of CLANG doesn't support %x<n> in the inline asm template
+ * which fixes register number when using any of the register constraints wa, wd, wf.
+ *
+ * xref:
+ * - https://bugs.llvm.org/show_bug.cgi?id=31837
+ * - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
+ */
+#ifndef __VSX__
+ #error "VSX is not supported"
+#endif
+#include <altivec.h>
+
+#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
+ #define vsx_ld vec_vsx_ld
+ #define vsx_st vec_vsx_st
+#else
+ #define vsx_ld vec_xl
+ #define vsx_st vec_xst
+#endif
+
+int main(void)
+{
+ float z4[] = {0, 0, 0, 0};
+ signed int zout[] = {0, 0, 0, 0};
+
+ __vector float vz4 = vsx_ld(0, z4);
+ __vector signed int asm_ret = vsx_ld(0, zout);
+
+ __asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret));
+
+ vsx_st(asm_ret, 0, zout);
+ return zout[0];
+}
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/checks/test_flags.c b/venv/lib/python3.9/site-packages/numpy/distutils/checks/test_flags.c
new file mode 100644
index 00000000..4cd09d42
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/checks/test_flags.c
@@ -0,0 +1 @@
+int test_flags;
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/__init__.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/__init__.py
new file mode 100644
index 00000000..3ba501de
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/__init__.py
@@ -0,0 +1,41 @@
+"""distutils.command
+
+Package containing implementation of all the standard Distutils
+commands.
+
+"""
+def test_na_writable_attributes_deletion():
+ a = np.NA(2)
+ attr = ['payload', 'dtype']
+ for s in attr:
+ assert_raises(AttributeError, delattr, a, s)
+
+
+__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $"
+
+distutils_all = [ #'build_py',
+ 'clean',
+ 'install_clib',
+ 'install_scripts',
+ 'bdist',
+ 'bdist_dumb',
+ 'bdist_wininst',
+ ]
+
+__import__('distutils.command', globals(), locals(), distutils_all)
+
+__all__ = ['build',
+ 'config_compiler',
+ 'config',
+ 'build_src',
+ 'build_py',
+ 'build_ext',
+ 'build_clib',
+ 'build_scripts',
+ 'install',
+ 'install_data',
+ 'install_headers',
+ 'install_lib',
+ 'bdist_rpm',
+ 'sdist',
+ ] + distutils_all
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/autodist.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/autodist.py
new file mode 100644
index 00000000..b72d0cab
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/autodist.py
@@ -0,0 +1,148 @@
+"""This module implements additional tests ala autoconf which can be useful.
+
+"""
+import textwrap
+
+# We put them here since they could be easily reused outside numpy.distutils
+
+def check_inline(cmd):
+ """Return the inline identifier (may be empty)."""
+ cmd._check_compiler()
+ body = textwrap.dedent("""
+ #ifndef __cplusplus
+ static %(inline)s int static_func (void)
+ {
+ return 0;
+ }
+ %(inline)s int nostatic_func (void)
+ {
+ return 0;
+ }
+ #endif""")
+
+ for kw in ['inline', '__inline__', '__inline']:
+ st = cmd.try_compile(body % {'inline': kw}, None, None)
+ if st:
+ return kw
+
+ return ''
+
+
+def check_restrict(cmd):
+ """Return the restrict identifier (may be empty)."""
+ cmd._check_compiler()
+ body = textwrap.dedent("""
+ static int static_func (char * %(restrict)s a)
+ {
+ return 0;
+ }
+ """)
+
+ for kw in ['restrict', '__restrict__', '__restrict']:
+ st = cmd.try_compile(body % {'restrict': kw}, None, None)
+ if st:
+ return kw
+
+ return ''
+
+
+def check_compiler_gcc(cmd):
+ """Check if the compiler is GCC."""
+
+ cmd._check_compiler()
+ body = textwrap.dedent("""
+ int
+ main()
+ {
+ #if (! defined __GNUC__)
+ #error gcc required
+ #endif
+ return 0;
+ }
+ """)
+ return cmd.try_compile(body, None, None)
+
+
+def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0):
+ """
+ Check that the gcc version is at least the specified version."""
+
+ cmd._check_compiler()
+ version = '.'.join([str(major), str(minor), str(patchlevel)])
+ body = textwrap.dedent("""
+ int
+ main()
+ {
+ #if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\
+ (__GNUC_MINOR__ < %(minor)d) || \\
+ (__GNUC_PATCHLEVEL__ < %(patchlevel)d)
+ #error gcc >= %(version)s required
+ #endif
+ return 0;
+ }
+ """)
+ kw = {'version': version, 'major': major, 'minor': minor,
+ 'patchlevel': patchlevel}
+
+ return cmd.try_compile(body % kw, None, None)
+
+
+def check_gcc_function_attribute(cmd, attribute, name):
+ """Return True if the given function attribute is supported."""
+ cmd._check_compiler()
+ body = textwrap.dedent("""
+ #pragma GCC diagnostic error "-Wattributes"
+ #pragma clang diagnostic error "-Wattributes"
+
+ int %s %s(void* unused)
+ {
+ return 0;
+ }
+
+ int
+ main()
+ {
+ return 0;
+ }
+ """) % (attribute, name)
+ return cmd.try_compile(body, None, None) != 0
+
+
+def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code,
+ include):
+ """Return True if the given function attribute is supported with
+ intrinsics."""
+ cmd._check_compiler()
+ body = textwrap.dedent("""
+ #include<%s>
+ int %s %s(void)
+ {
+ %s;
+ return 0;
+ }
+
+ int
+ main()
+ {
+ return 0;
+ }
+ """) % (include, attribute, name, code)
+ return cmd.try_compile(body, None, None) != 0
+
+
+def check_gcc_variable_attribute(cmd, attribute):
+ """Return True if the given variable attribute is supported."""
+ cmd._check_compiler()
+ body = textwrap.dedent("""
+ #pragma GCC diagnostic error "-Wattributes"
+ #pragma clang diagnostic error "-Wattributes"
+
+ int %s foo;
+
+ int
+ main()
+ {
+ return 0;
+ }
+ """) % (attribute, )
+ return cmd.try_compile(body, None, None) != 0
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/bdist_rpm.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/bdist_rpm.py
new file mode 100644
index 00000000..682e7a8e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/bdist_rpm.py
@@ -0,0 +1,22 @@
+import os
+import sys
+if 'setuptools' in sys.modules:
+ from setuptools.command.bdist_rpm import bdist_rpm as old_bdist_rpm
+else:
+ from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm
+
+class bdist_rpm(old_bdist_rpm):
+
+ def _make_spec_file(self):
+ spec_file = old_bdist_rpm._make_spec_file(self)
+
+ # Replace hardcoded setup.py script name
+ # with the real setup script name.
+ setup_py = os.path.basename(sys.argv[0])
+ if setup_py == 'setup.py':
+ return spec_file
+ new_spec_file = []
+ for line in spec_file:
+ line = line.replace('setup.py', setup_py)
+ new_spec_file.append(line)
+ return new_spec_file
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/build.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/build.py
new file mode 100644
index 00000000..80830d55
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/build.py
@@ -0,0 +1,62 @@
+import os
+import sys
+from distutils.command.build import build as old_build
+from distutils.util import get_platform
+from numpy.distutils.command.config_compiler import show_fortran_compilers
+
+class build(old_build):
+
+ sub_commands = [('config_cc', lambda *args: True),
+ ('config_fc', lambda *args: True),
+ ('build_src', old_build.has_ext_modules),
+ ] + old_build.sub_commands
+
+ user_options = old_build.user_options + [
+ ('fcompiler=', None,
+ "specify the Fortran compiler type"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
+ ('cpu-baseline=', None,
+ "specify a list of enabled baseline CPU optimizations"),
+ ('cpu-dispatch=', None,
+ "specify a list of dispatched CPU optimizations"),
+ ('disable-optimization', None,
+ "disable CPU optimized code(dispatch,simd,fast...)"),
+ ('simd-test=', None,
+ "specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
+ ]
+
+ help_options = old_build.help_options + [
+ ('help-fcompiler', None, "list available Fortran compilers",
+ show_fortran_compilers),
+ ]
+
+ def initialize_options(self):
+ old_build.initialize_options(self)
+ self.fcompiler = None
+ self.warn_error = False
+ self.cpu_baseline = "min"
+ self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default
+ self.disable_optimization = False
+ """
+ the '_simd' module is a very large. Adding more dispatched features
+ will increase binary size and compile time. By default we minimize
+ the targeted features to those most commonly used by the NumPy SIMD interface(NPYV),
+ NOTE: any specified features will be ignored if they're:
+ - part of the baseline(--cpu-baseline)
+ - not part of dispatch-able features(--cpu-dispatch)
+ - not supported by compiler or platform
+ """
+ self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F " \
+ "AVX512_SKX VSX VSX2 VSX3 VSX4 NEON ASIMD VX VXE VXE2"
+
+ def finalize_options(self):
+ build_scripts = self.build_scripts
+ old_build.finalize_options(self)
+ plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
+ if build_scripts is None:
+ self.build_scripts = os.path.join(self.build_base,
+ 'scripts' + plat_specifier)
+
+ def run(self):
+ old_build.run(self)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/build_clib.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_clib.py
new file mode 100644
index 00000000..45201f98
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_clib.py
@@ -0,0 +1,468 @@
+""" Modified version of build_clib that handles fortran source files.
+"""
+import os
+from glob import glob
+import shutil
+from distutils.command.build_clib import build_clib as old_build_clib
+from distutils.errors import DistutilsSetupError, DistutilsError, \
+ DistutilsFileError
+
+from numpy.distutils import log
+from distutils.dep_util import newer_group
+from numpy.distutils.misc_util import (
+ filter_sources, get_lib_source_files, get_numpy_include_dirs,
+ has_cxx_sources, has_f_sources, is_sequence
+)
+from numpy.distutils.ccompiler_opt import new_ccompiler_opt
+
+# Fix Python distutils bug sf #1718574:
+_l = old_build_clib.user_options
+for _i in range(len(_l)):
+ if _l[_i][0] in ['build-clib', 'build-temp']:
+ _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:]
+#
+
+
+class build_clib(old_build_clib):
+
+ description = "build C/C++/F libraries used by Python extensions"
+
+ user_options = old_build_clib.user_options + [
+ ('fcompiler=', None,
+ "specify the Fortran compiler type"),
+ ('inplace', 'i', 'Build in-place'),
+ ('parallel=', 'j',
+ "number of parallel jobs"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
+ ('cpu-baseline=', None,
+ "specify a list of enabled baseline CPU optimizations"),
+ ('cpu-dispatch=', None,
+ "specify a list of dispatched CPU optimizations"),
+ ('disable-optimization', None,
+ "disable CPU optimized code(dispatch,simd,fast...)"),
+ ]
+
+ boolean_options = old_build_clib.boolean_options + \
+ ['inplace', 'warn-error', 'disable-optimization']
+
+ def initialize_options(self):
+ old_build_clib.initialize_options(self)
+ self.fcompiler = None
+ self.inplace = 0
+ self.parallel = None
+ self.warn_error = None
+ self.cpu_baseline = None
+ self.cpu_dispatch = None
+ self.disable_optimization = None
+
+
+ def finalize_options(self):
+ if self.parallel:
+ try:
+ self.parallel = int(self.parallel)
+ except ValueError as e:
+ raise ValueError("--parallel/-j argument must be an integer") from e
+ old_build_clib.finalize_options(self)
+ self.set_undefined_options('build',
+ ('parallel', 'parallel'),
+ ('warn_error', 'warn_error'),
+ ('cpu_baseline', 'cpu_baseline'),
+ ('cpu_dispatch', 'cpu_dispatch'),
+ ('disable_optimization', 'disable_optimization')
+ )
+
+ def have_f_sources(self):
+ for (lib_name, build_info) in self.libraries:
+ if has_f_sources(build_info.get('sources', [])):
+ return True
+ return False
+
+ def have_cxx_sources(self):
+ for (lib_name, build_info) in self.libraries:
+ if has_cxx_sources(build_info.get('sources', [])):
+ return True
+ return False
+
+ def run(self):
+ if not self.libraries:
+ return
+
+ # Make sure that library sources are complete.
+ languages = []
+
+ # Make sure that extension sources are complete.
+ self.run_command('build_src')
+
+ for (lib_name, build_info) in self.libraries:
+ l = build_info.get('language', None)
+ if l and l not in languages:
+ languages.append(l)
+
+ from distutils.ccompiler import new_compiler
+ self.compiler = new_compiler(compiler=self.compiler,
+ dry_run=self.dry_run,
+ force=self.force)
+ self.compiler.customize(self.distribution,
+ need_cxx=self.have_cxx_sources())
+
+ if self.warn_error:
+ self.compiler.compiler.append('-Werror')
+ self.compiler.compiler_so.append('-Werror')
+
+ libraries = self.libraries
+ self.libraries = None
+ self.compiler.customize_cmd(self)
+ self.libraries = libraries
+
+ self.compiler.show_customization()
+
+ if not self.disable_optimization:
+ dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h")
+ dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath)
+ opt_cache_path = os.path.abspath(
+ os.path.join(self.build_temp, 'ccompiler_opt_cache_clib.py')
+ )
+ if hasattr(self, "compiler_opt"):
+ # By default `CCompilerOpt` update the cache at the exit of
+ # the process, which may lead to duplicate building
+ # (see build_extension()/force_rebuild) if run() called
+ # multiple times within the same os process/thread without
+ # giving the chance the previous instances of `CCompilerOpt`
+ # to update the cache.
+ self.compiler_opt.cache_flush()
+
+ self.compiler_opt = new_ccompiler_opt(
+ compiler=self.compiler, dispatch_hpath=dispatch_hpath,
+ cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch,
+ cache_path=opt_cache_path
+ )
+ def report(copt):
+ log.info("\n########### CLIB COMPILER OPTIMIZATION ###########")
+ log.info(copt.report(full=True))
+
+ import atexit
+ atexit.register(report, self.compiler_opt)
+
+ if self.have_f_sources():
+ from numpy.distutils.fcompiler import new_fcompiler
+ self._f_compiler = new_fcompiler(compiler=self.fcompiler,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force,
+ requiref90='f90' in languages,
+ c_compiler=self.compiler)
+ if self._f_compiler is not None:
+ self._f_compiler.customize(self.distribution)
+
+ libraries = self.libraries
+ self.libraries = None
+ self._f_compiler.customize_cmd(self)
+ self.libraries = libraries
+
+ self._f_compiler.show_customization()
+ else:
+ self._f_compiler = None
+
+ self.build_libraries(self.libraries)
+
+ if self.inplace:
+ for l in self.distribution.installed_libraries:
+ libname = self.compiler.library_filename(l.name)
+ source = os.path.join(self.build_clib, libname)
+ target = os.path.join(l.target_dir, libname)
+ self.mkpath(l.target_dir)
+ shutil.copy(source, target)
+
+ def get_source_files(self):
+ self.check_library_list(self.libraries)
+ filenames = []
+ for lib in self.libraries:
+ filenames.extend(get_lib_source_files(lib))
+ return filenames
+
+ def build_libraries(self, libraries):
+ for (lib_name, build_info) in libraries:
+ self.build_a_library(build_info, lib_name, libraries)
+
+ def assemble_flags(self, in_flags):
+ """ Assemble flags from flag list
+
+ Parameters
+ ----------
+ in_flags : None or sequence
+ None corresponds to empty list. Sequence elements can be strings
+ or callables that return lists of strings. Callable takes `self` as
+ single parameter.
+
+ Returns
+ -------
+ out_flags : list
+ """
+ if in_flags is None:
+ return []
+ out_flags = []
+ for in_flag in in_flags:
+ if callable(in_flag):
+ out_flags += in_flag(self)
+ else:
+ out_flags.append(in_flag)
+ return out_flags
+
+ def build_a_library(self, build_info, lib_name, libraries):
+ # default compilers
+ compiler = self.compiler
+ fcompiler = self._f_compiler
+
+ sources = build_info.get('sources')
+ if sources is None or not is_sequence(sources):
+ raise DistutilsSetupError(("in 'libraries' option (library '%s'), " +
+ "'sources' must be present and must be " +
+ "a list of source filenames") % lib_name)
+ sources = list(sources)
+
+ c_sources, cxx_sources, f_sources, fmodule_sources \
+ = filter_sources(sources)
+ requiref90 = not not fmodule_sources or \
+ build_info.get('language', 'c') == 'f90'
+
+ # save source type information so that build_ext can use it.
+ source_languages = []
+ if c_sources:
+ source_languages.append('c')
+ if cxx_sources:
+ source_languages.append('c++')
+ if requiref90:
+ source_languages.append('f90')
+ elif f_sources:
+ source_languages.append('f77')
+ build_info['source_languages'] = source_languages
+
+ lib_file = compiler.library_filename(lib_name,
+ output_dir=self.build_clib)
+ depends = sources + build_info.get('depends', [])
+
+ force_rebuild = self.force
+ if not self.disable_optimization and not self.compiler_opt.is_cached():
+ log.debug("Detected changes on compiler optimizations")
+ force_rebuild = True
+ if not (force_rebuild or newer_group(depends, lib_file, 'newer')):
+ log.debug("skipping '%s' library (up-to-date)", lib_name)
+ return
+ else:
+ log.info("building '%s' library", lib_name)
+
+ config_fc = build_info.get('config_fc', {})
+ if fcompiler is not None and config_fc:
+ log.info('using additional config_fc from setup script '
+ 'for fortran compiler: %s'
+ % (config_fc,))
+ from numpy.distutils.fcompiler import new_fcompiler
+ fcompiler = new_fcompiler(compiler=fcompiler.compiler_type,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force,
+ requiref90=requiref90,
+ c_compiler=self.compiler)
+ if fcompiler is not None:
+ dist = self.distribution
+ base_config_fc = dist.get_option_dict('config_fc').copy()
+ base_config_fc.update(config_fc)
+ fcompiler.customize(base_config_fc)
+
+ # check availability of Fortran compilers
+ if (f_sources or fmodule_sources) and fcompiler is None:
+ raise DistutilsError("library %s has Fortran sources"
+ " but no Fortran compiler found" % (lib_name))
+
+ if fcompiler is not None:
+ fcompiler.extra_f77_compile_args = build_info.get(
+ 'extra_f77_compile_args') or []
+ fcompiler.extra_f90_compile_args = build_info.get(
+ 'extra_f90_compile_args') or []
+
+ macros = build_info.get('macros')
+ if macros is None:
+ macros = []
+ include_dirs = build_info.get('include_dirs')
+ if include_dirs is None:
+ include_dirs = []
+ # Flags can be strings, or callables that return a list of strings.
+ extra_postargs = self.assemble_flags(
+ build_info.get('extra_compiler_args'))
+ extra_cflags = self.assemble_flags(
+ build_info.get('extra_cflags'))
+ extra_cxxflags = self.assemble_flags(
+ build_info.get('extra_cxxflags'))
+
+ include_dirs.extend(get_numpy_include_dirs())
+ # where compiled F90 module files are:
+ module_dirs = build_info.get('module_dirs') or []
+ module_build_dir = os.path.dirname(lib_file)
+ if requiref90:
+ self.mkpath(module_build_dir)
+
+ if compiler.compiler_type == 'msvc':
+ # this hack works around the msvc compiler attributes
+ # problem, msvc uses its own convention :(
+ c_sources += cxx_sources
+ cxx_sources = []
+
+ # filtering C dispatch-table sources when optimization is not disabled,
+ # otherwise treated as normal sources.
+ copt_c_sources = []
+ copt_cxx_sources = []
+ copt_baseline_flags = []
+ copt_macros = []
+ if not self.disable_optimization:
+ bsrc_dir = self.get_finalized_command("build_src").build_src
+ dispatch_hpath = os.path.join("numpy", "distutils", "include")
+ dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath)
+ include_dirs.append(dispatch_hpath)
+
+ copt_build_src = None if self.inplace else bsrc_dir
+ for _srcs, _dst, _ext in (
+ ((c_sources,), copt_c_sources, ('.dispatch.c',)),
+ ((c_sources, cxx_sources), copt_cxx_sources,
+ ('.dispatch.cpp', '.dispatch.cxx'))
+ ):
+ for _src in _srcs:
+ _dst += [
+ _src.pop(_src.index(s))
+ for s in _src[:] if s.endswith(_ext)
+ ]
+ copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
+ else:
+ copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
+
+ objects = []
+ if copt_cxx_sources:
+ log.info("compiling C++ dispatch-able sources")
+ objects += self.compiler_opt.try_dispatch(
+ copt_c_sources,
+ output_dir=self.build_temp,
+ src_dir=copt_build_src,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs + extra_cxxflags,
+ ccompiler=cxx_compiler
+ )
+
+ if copt_c_sources:
+ log.info("compiling C dispatch-able sources")
+ objects += self.compiler_opt.try_dispatch(
+ copt_c_sources,
+ output_dir=self.build_temp,
+ src_dir=copt_build_src,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs + extra_cflags)
+
+ if c_sources:
+ log.info("compiling C sources")
+ objects += compiler.compile(
+ c_sources,
+ output_dir=self.build_temp,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=(extra_postargs +
+ copt_baseline_flags +
+ extra_cflags))
+
+ if cxx_sources:
+ log.info("compiling C++ sources")
+ cxx_compiler = compiler.cxx_compiler()
+ cxx_objects = cxx_compiler.compile(
+ cxx_sources,
+ output_dir=self.build_temp,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=(extra_postargs +
+ copt_baseline_flags +
+ extra_cxxflags))
+ objects.extend(cxx_objects)
+
+ if f_sources or fmodule_sources:
+ extra_postargs = []
+ f_objects = []
+
+ if requiref90:
+ if fcompiler.module_dir_switch is None:
+ existing_modules = glob('*.mod')
+ extra_postargs += fcompiler.module_options(
+ module_dirs, module_build_dir)
+
+ if fmodule_sources:
+ log.info("compiling Fortran 90 module sources")
+ f_objects += fcompiler.compile(fmodule_sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs)
+
+ if requiref90 and self._f_compiler.module_dir_switch is None:
+ # move new compiled F90 module files to module_build_dir
+ for f in glob('*.mod'):
+ if f in existing_modules:
+ continue
+ t = os.path.join(module_build_dir, f)
+ if os.path.abspath(f) == os.path.abspath(t):
+ continue
+ if os.path.isfile(t):
+ os.remove(t)
+ try:
+ self.move_file(f, module_build_dir)
+ except DistutilsFileError:
+ log.warn('failed to move %r to %r'
+ % (f, module_build_dir))
+
+ if f_sources:
+ log.info("compiling Fortran sources")
+ f_objects += fcompiler.compile(f_sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs)
+ else:
+ f_objects = []
+
+ if f_objects and not fcompiler.can_ccompiler_link(compiler):
+ # Default linker cannot link Fortran object files, and results
+ # need to be wrapped later. Instead of creating a real static
+ # library, just keep track of the object files.
+ listfn = os.path.join(self.build_clib,
+ lib_name + '.fobjects')
+ with open(listfn, 'w') as f:
+ f.write("\n".join(os.path.abspath(obj) for obj in f_objects))
+
+ listfn = os.path.join(self.build_clib,
+ lib_name + '.cobjects')
+ with open(listfn, 'w') as f:
+ f.write("\n".join(os.path.abspath(obj) for obj in objects))
+
+ # create empty "library" file for dependency tracking
+ lib_fname = os.path.join(self.build_clib,
+ lib_name + compiler.static_lib_extension)
+ with open(lib_fname, 'wb') as f:
+ pass
+ else:
+ # assume that default linker is suitable for
+ # linking Fortran object files
+ objects.extend(f_objects)
+ compiler.create_static_lib(objects, lib_name,
+ output_dir=self.build_clib,
+ debug=self.debug)
+
+ # fix library dependencies
+ clib_libraries = build_info.get('libraries', [])
+ for lname, binfo in libraries:
+ if lname in clib_libraries:
+ clib_libraries.extend(binfo.get('libraries', []))
+ if clib_libraries:
+ build_info['libraries'] = clib_libraries
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/build_ext.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_ext.py
new file mode 100644
index 00000000..6dc6b426
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_ext.py
@@ -0,0 +1,740 @@
+""" Modified version of build_ext that handles fortran source files.
+
+"""
+import os
+import subprocess
+from glob import glob
+
+from distutils.dep_util import newer_group
+from distutils.command.build_ext import build_ext as old_build_ext
+from distutils.errors import DistutilsFileError, DistutilsSetupError,\
+ DistutilsError
+from distutils.file_util import copy_file
+
+from numpy.distutils import log
+from numpy.distutils.exec_command import filepath_from_subprocess_output
+from numpy.distutils.system_info import combine_paths
+from numpy.distutils.misc_util import (
+ filter_sources, get_ext_source_files, get_numpy_include_dirs,
+ has_cxx_sources, has_f_sources, is_sequence
+)
+from numpy.distutils.command.config_compiler import show_fortran_compilers
+from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt
+
+class build_ext (old_build_ext):
+
+ description = "build C/C++/F extensions (compile/link to build directory)"
+
+ user_options = old_build_ext.user_options + [
+ ('fcompiler=', None,
+ "specify the Fortran compiler type"),
+ ('parallel=', 'j',
+ "number of parallel jobs"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
+ ('cpu-baseline=', None,
+ "specify a list of enabled baseline CPU optimizations"),
+ ('cpu-dispatch=', None,
+ "specify a list of dispatched CPU optimizations"),
+ ('disable-optimization', None,
+ "disable CPU optimized code(dispatch,simd,fast...)"),
+ ('simd-test=', None,
+ "specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
+ ]
+
+ help_options = old_build_ext.help_options + [
+ ('help-fcompiler', None, "list available Fortran compilers",
+ show_fortran_compilers),
+ ]
+
+ boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization']
+
+ def initialize_options(self):
+ old_build_ext.initialize_options(self)
+ self.fcompiler = None
+ self.parallel = None
+ self.warn_error = None
+ self.cpu_baseline = None
+ self.cpu_dispatch = None
+ self.disable_optimization = None
+ self.simd_test = None
+
+ def finalize_options(self):
+ if self.parallel:
+ try:
+ self.parallel = int(self.parallel)
+ except ValueError as e:
+ raise ValueError("--parallel/-j argument must be an integer") from e
+
+ # Ensure that self.include_dirs and self.distribution.include_dirs
+ # refer to the same list object. finalize_options will modify
+ # self.include_dirs, but self.distribution.include_dirs is used
+ # during the actual build.
+ # self.include_dirs is None unless paths are specified with
+ # --include-dirs.
+ # The include paths will be passed to the compiler in the order:
+ # numpy paths, --include-dirs paths, Python include path.
+ if isinstance(self.include_dirs, str):
+ self.include_dirs = self.include_dirs.split(os.pathsep)
+ incl_dirs = self.include_dirs or []
+ if self.distribution.include_dirs is None:
+ self.distribution.include_dirs = []
+ self.include_dirs = self.distribution.include_dirs
+ self.include_dirs.extend(incl_dirs)
+
+ old_build_ext.finalize_options(self)
+ self.set_undefined_options('build',
+ ('parallel', 'parallel'),
+ ('warn_error', 'warn_error'),
+ ('cpu_baseline', 'cpu_baseline'),
+ ('cpu_dispatch', 'cpu_dispatch'),
+ ('disable_optimization', 'disable_optimization'),
+ ('simd_test', 'simd_test')
+ )
+ CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test
+
+ def run(self):
+ if not self.extensions:
+ return
+
+ # Make sure that extension sources are complete.
+ self.run_command('build_src')
+
+ if self.distribution.has_c_libraries():
+ if self.inplace:
+ if self.distribution.have_run.get('build_clib'):
+ log.warn('build_clib already run, it is too late to '
+ 'ensure in-place build of build_clib')
+ build_clib = self.distribution.get_command_obj(
+ 'build_clib')
+ else:
+ build_clib = self.distribution.get_command_obj(
+ 'build_clib')
+ build_clib.inplace = 1
+ build_clib.ensure_finalized()
+ build_clib.run()
+ self.distribution.have_run['build_clib'] = 1
+
+ else:
+ self.run_command('build_clib')
+ build_clib = self.get_finalized_command('build_clib')
+ self.library_dirs.append(build_clib.build_clib)
+ else:
+ build_clib = None
+
+ # Not including C libraries to the list of
+ # extension libraries automatically to prevent
+ # bogus linking commands. Extensions must
+ # explicitly specify the C libraries that they use.
+
+ from distutils.ccompiler import new_compiler
+ from numpy.distutils.fcompiler import new_fcompiler
+
+ compiler_type = self.compiler
+ # Initialize C compiler:
+ self.compiler = new_compiler(compiler=compiler_type,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force)
+ self.compiler.customize(self.distribution)
+ self.compiler.customize_cmd(self)
+
+ if self.warn_error:
+ self.compiler.compiler.append('-Werror')
+ self.compiler.compiler_so.append('-Werror')
+
+ self.compiler.show_customization()
+
+ if not self.disable_optimization:
+ dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h")
+ dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath)
+ opt_cache_path = os.path.abspath(
+ os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py')
+ )
+ if hasattr(self, "compiler_opt"):
+ # By default `CCompilerOpt` update the cache at the exit of
+ # the process, which may lead to duplicate building
+ # (see build_extension()/force_rebuild) if run() called
+ # multiple times within the same os process/thread without
+ # giving the chance the previous instances of `CCompilerOpt`
+ # to update the cache.
+ self.compiler_opt.cache_flush()
+
+ self.compiler_opt = new_ccompiler_opt(
+ compiler=self.compiler, dispatch_hpath=dispatch_hpath,
+ cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch,
+ cache_path=opt_cache_path
+ )
+ def report(copt):
+ log.info("\n########### EXT COMPILER OPTIMIZATION ###########")
+ log.info(copt.report(full=True))
+
+ import atexit
+ atexit.register(report, self.compiler_opt)
+
+ # Setup directory for storing generated extra DLL files on Windows
+ self.extra_dll_dir = os.path.join(self.build_temp, '.libs')
+ if not os.path.isdir(self.extra_dll_dir):
+ os.makedirs(self.extra_dll_dir)
+
+ # Create mapping of libraries built by build_clib:
+ clibs = {}
+ if build_clib is not None:
+ for libname, build_info in build_clib.libraries or []:
+ if libname in clibs and clibs[libname] != build_info:
+ log.warn('library %r defined more than once,'
+ ' overwriting build_info\n%s... \nwith\n%s...'
+ % (libname, repr(clibs[libname])[:300], repr(build_info)[:300]))
+ clibs[libname] = build_info
+ # .. and distribution libraries:
+ for libname, build_info in self.distribution.libraries or []:
+ if libname in clibs:
+ # build_clib libraries have a precedence before distribution ones
+ continue
+ clibs[libname] = build_info
+
+ # Determine if C++/Fortran 77/Fortran 90 compilers are needed.
+ # Update extension libraries, library_dirs, and macros.
+ all_languages = set()
+ for ext in self.extensions:
+ ext_languages = set()
+ c_libs = []
+ c_lib_dirs = []
+ macros = []
+ for libname in ext.libraries:
+ if libname in clibs:
+ binfo = clibs[libname]
+ c_libs += binfo.get('libraries', [])
+ c_lib_dirs += binfo.get('library_dirs', [])
+ for m in binfo.get('macros', []):
+ if m not in macros:
+ macros.append(m)
+
+ for l in clibs.get(libname, {}).get('source_languages', []):
+ ext_languages.add(l)
+ if c_libs:
+ new_c_libs = ext.libraries + c_libs
+ log.info('updating extension %r libraries from %r to %r'
+ % (ext.name, ext.libraries, new_c_libs))
+ ext.libraries = new_c_libs
+ ext.library_dirs = ext.library_dirs + c_lib_dirs
+ if macros:
+ log.info('extending extension %r defined_macros with %r'
+ % (ext.name, macros))
+ ext.define_macros = ext.define_macros + macros
+
+ # determine extension languages
+ if has_f_sources(ext.sources):
+ ext_languages.add('f77')
+ if has_cxx_sources(ext.sources):
+ ext_languages.add('c++')
+ l = ext.language or self.compiler.detect_language(ext.sources)
+ if l:
+ ext_languages.add(l)
+
+ # reset language attribute for choosing proper linker
+ #
+ # When we build extensions with multiple languages, we have to
+ # choose a linker. The rules here are:
+ # 1. if there is Fortran code, always prefer the Fortran linker,
+ # 2. otherwise prefer C++ over C,
+ # 3. Users can force a particular linker by using
+ # `language='c'` # or 'c++', 'f90', 'f77'
+ # in their config.add_extension() calls.
+ if 'c++' in ext_languages:
+ ext_language = 'c++'
+ else:
+ ext_language = 'c' # default
+
+ has_fortran = False
+ if 'f90' in ext_languages:
+ ext_language = 'f90'
+ has_fortran = True
+ elif 'f77' in ext_languages:
+ ext_language = 'f77'
+ has_fortran = True
+
+ if not ext.language or has_fortran:
+ if l and l != ext_language and ext.language:
+ log.warn('resetting extension %r language from %r to %r.' %
+ (ext.name, l, ext_language))
+
+ ext.language = ext_language
+
+ # global language
+ all_languages.update(ext_languages)
+
+ need_f90_compiler = 'f90' in all_languages
+ need_f77_compiler = 'f77' in all_languages
+ need_cxx_compiler = 'c++' in all_languages
+
+ # Initialize C++ compiler:
+ if need_cxx_compiler:
+ self._cxx_compiler = new_compiler(compiler=compiler_type,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force)
+ compiler = self._cxx_compiler
+ compiler.customize(self.distribution, need_cxx=need_cxx_compiler)
+ compiler.customize_cmd(self)
+ compiler.show_customization()
+ self._cxx_compiler = compiler.cxx_compiler()
+ else:
+ self._cxx_compiler = None
+
+ # Initialize Fortran 77 compiler:
+ if need_f77_compiler:
+ ctype = self.fcompiler
+ self._f77_compiler = new_fcompiler(compiler=self.fcompiler,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force,
+ requiref90=False,
+ c_compiler=self.compiler)
+ fcompiler = self._f77_compiler
+ if fcompiler:
+ ctype = fcompiler.compiler_type
+ fcompiler.customize(self.distribution)
+ if fcompiler and fcompiler.get_version():
+ fcompiler.customize_cmd(self)
+ fcompiler.show_customization()
+ else:
+ self.warn('f77_compiler=%s is not available.' %
+ (ctype))
+ self._f77_compiler = None
+ else:
+ self._f77_compiler = None
+
+ # Initialize Fortran 90 compiler:
+ if need_f90_compiler:
+ ctype = self.fcompiler
+ self._f90_compiler = new_fcompiler(compiler=self.fcompiler,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force,
+ requiref90=True,
+ c_compiler=self.compiler)
+ fcompiler = self._f90_compiler
+ if fcompiler:
+ ctype = fcompiler.compiler_type
+ fcompiler.customize(self.distribution)
+ if fcompiler and fcompiler.get_version():
+ fcompiler.customize_cmd(self)
+ fcompiler.show_customization()
+ else:
+ self.warn('f90_compiler=%s is not available.' %
+ (ctype))
+ self._f90_compiler = None
+ else:
+ self._f90_compiler = None
+
+ # Build extensions
+ self.build_extensions()
+
+ # Copy over any extra DLL files
+ # FIXME: In the case where there are more than two packages,
+ # we blindly assume that both packages need all of the libraries,
+ # resulting in a larger wheel than is required. This should be fixed,
+ # but it's so rare that I won't bother to handle it.
+ pkg_roots = {
+ self.get_ext_fullname(ext.name).split('.')[0]
+ for ext in self.extensions
+ }
+ for pkg_root in pkg_roots:
+ shared_lib_dir = os.path.join(pkg_root, '.libs')
+ if not self.inplace:
+ shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir)
+ for fn in os.listdir(self.extra_dll_dir):
+ if not os.path.isdir(shared_lib_dir):
+ os.makedirs(shared_lib_dir)
+ if not fn.lower().endswith('.dll'):
+ continue
+ runtime_lib = os.path.join(self.extra_dll_dir, fn)
+ copy_file(runtime_lib, shared_lib_dir)
+
+ def swig_sources(self, sources, extensions=None):
+ # Do nothing. Swig sources have been handled in build_src command.
+ return sources
+
+ def build_extension(self, ext):
+ sources = ext.sources
+ if sources is None or not is_sequence(sources):
+ raise DistutilsSetupError(
+ ("in 'ext_modules' option (extension '%s'), " +
+ "'sources' must be present and must be " +
+ "a list of source filenames") % ext.name)
+ sources = list(sources)
+
+ if not sources:
+ return
+
+ fullname = self.get_ext_fullname(ext.name)
+ if self.inplace:
+ modpath = fullname.split('.')
+ package = '.'.join(modpath[0:-1])
+ base = modpath[-1]
+ build_py = self.get_finalized_command('build_py')
+ package_dir = build_py.get_package_dir(package)
+ ext_filename = os.path.join(package_dir,
+ self.get_ext_filename(base))
+ else:
+ ext_filename = os.path.join(self.build_lib,
+ self.get_ext_filename(fullname))
+ depends = sources + ext.depends
+
+ force_rebuild = self.force
+ if not self.disable_optimization and not self.compiler_opt.is_cached():
+ log.debug("Detected changes on compiler optimizations")
+ force_rebuild = True
+ if not (force_rebuild or newer_group(depends, ext_filename, 'newer')):
+ log.debug("skipping '%s' extension (up-to-date)", ext.name)
+ return
+ else:
+ log.info("building '%s' extension", ext.name)
+
+ extra_args = ext.extra_compile_args or []
+ extra_cflags = getattr(ext, 'extra_c_compile_args', None) or []
+ extra_cxxflags = getattr(ext, 'extra_cxx_compile_args', None) or []
+
+ macros = ext.define_macros[:]
+ for undef in ext.undef_macros:
+ macros.append((undef,))
+
+ c_sources, cxx_sources, f_sources, fmodule_sources = \
+ filter_sources(ext.sources)
+
+ if self.compiler.compiler_type == 'msvc':
+ if cxx_sources:
+ # Needed to compile kiva.agg._agg extension.
+ extra_args.append('/Zm1000')
+ # this hack works around the msvc compiler attributes
+ # problem, msvc uses its own convention :(
+ c_sources += cxx_sources
+ cxx_sources = []
+
+ # Set Fortran/C++ compilers for compilation and linking.
+ if ext.language == 'f90':
+ fcompiler = self._f90_compiler
+ elif ext.language == 'f77':
+ fcompiler = self._f77_compiler
+ else: # in case ext.language is c++, for instance
+ fcompiler = self._f90_compiler or self._f77_compiler
+ if fcompiler is not None:
+ fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(
+ ext, 'extra_f77_compile_args') else []
+ fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(
+ ext, 'extra_f90_compile_args') else []
+ cxx_compiler = self._cxx_compiler
+
+ # check for the availability of required compilers
+ if cxx_sources and cxx_compiler is None:
+ raise DistutilsError("extension %r has C++ sources"
+ "but no C++ compiler found" % (ext.name))
+ if (f_sources or fmodule_sources) and fcompiler is None:
+ raise DistutilsError("extension %r has Fortran sources "
+ "but no Fortran compiler found" % (ext.name))
+ if ext.language in ['f77', 'f90'] and fcompiler is None:
+ self.warn("extension %r has Fortran libraries "
+ "but no Fortran linker found, using default linker" % (ext.name))
+ if ext.language == 'c++' and cxx_compiler is None:
+ self.warn("extension %r has C++ libraries "
+ "but no C++ linker found, using default linker" % (ext.name))
+
+ kws = {'depends': ext.depends}
+ output_dir = self.build_temp
+
+ include_dirs = ext.include_dirs + get_numpy_include_dirs()
+
+ # filtering C dispatch-table sources when optimization is not disabled,
+ # otherwise treated as normal sources.
+ copt_c_sources = []
+ copt_cxx_sources = []
+ copt_baseline_flags = []
+ copt_macros = []
+ if not self.disable_optimization:
+ bsrc_dir = self.get_finalized_command("build_src").build_src
+ dispatch_hpath = os.path.join("numpy", "distutils", "include")
+ dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath)
+ include_dirs.append(dispatch_hpath)
+
+ copt_build_src = None if self.inplace else bsrc_dir
+ for _srcs, _dst, _ext in (
+ ((c_sources,), copt_c_sources, ('.dispatch.c',)),
+ ((c_sources, cxx_sources), copt_cxx_sources,
+ ('.dispatch.cpp', '.dispatch.cxx'))
+ ):
+ for _src in _srcs:
+ _dst += [
+ _src.pop(_src.index(s))
+ for s in _src[:] if s.endswith(_ext)
+ ]
+ copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
+ else:
+ copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
+
+ c_objects = []
+ if copt_cxx_sources:
+ log.info("compiling C++ dispatch-able sources")
+ c_objects += self.compiler_opt.try_dispatch(
+ copt_cxx_sources,
+ output_dir=output_dir,
+ src_dir=copt_build_src,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_args + extra_cxxflags,
+ ccompiler=cxx_compiler,
+ **kws
+ )
+ if copt_c_sources:
+ log.info("compiling C dispatch-able sources")
+ c_objects += self.compiler_opt.try_dispatch(
+ copt_c_sources,
+ output_dir=output_dir,
+ src_dir=copt_build_src,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_args + extra_cflags,
+ **kws)
+ if c_sources:
+ log.info("compiling C sources")
+ c_objects += self.compiler.compile(
+ c_sources,
+ output_dir=output_dir,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=(extra_args + copt_baseline_flags +
+ extra_cflags),
+ **kws)
+ if cxx_sources:
+ log.info("compiling C++ sources")
+ c_objects += cxx_compiler.compile(
+ cxx_sources,
+ output_dir=output_dir,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=(extra_args + copt_baseline_flags +
+ extra_cxxflags),
+ **kws)
+
+ extra_postargs = []
+ f_objects = []
+ if fmodule_sources:
+ log.info("compiling Fortran 90 module sources")
+ module_dirs = ext.module_dirs[:]
+ module_build_dir = os.path.join(
+ self.build_temp, os.path.dirname(
+ self.get_ext_filename(fullname)))
+
+ self.mkpath(module_build_dir)
+ if fcompiler.module_dir_switch is None:
+ existing_modules = glob('*.mod')
+ extra_postargs += fcompiler.module_options(
+ module_dirs, module_build_dir)
+ f_objects += fcompiler.compile(fmodule_sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs,
+ depends=ext.depends)
+
+ if fcompiler.module_dir_switch is None:
+ for f in glob('*.mod'):
+ if f in existing_modules:
+ continue
+ t = os.path.join(module_build_dir, f)
+ if os.path.abspath(f) == os.path.abspath(t):
+ continue
+ if os.path.isfile(t):
+ os.remove(t)
+ try:
+ self.move_file(f, module_build_dir)
+ except DistutilsFileError:
+ log.warn('failed to move %r to %r' %
+ (f, module_build_dir))
+ if f_sources:
+ log.info("compiling Fortran sources")
+ f_objects += fcompiler.compile(f_sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs,
+ depends=ext.depends)
+
+ if f_objects and not fcompiler.can_ccompiler_link(self.compiler):
+ unlinkable_fobjects = f_objects
+ objects = c_objects
+ else:
+ unlinkable_fobjects = []
+ objects = c_objects + f_objects
+
+ if ext.extra_objects:
+ objects.extend(ext.extra_objects)
+ extra_args = ext.extra_link_args or []
+ libraries = self.get_libraries(ext)[:]
+ library_dirs = ext.library_dirs[:]
+
+ linker = self.compiler.link_shared_object
+ # Always use system linker when using MSVC compiler.
+ if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'):
+ # expand libraries with fcompiler libraries as we are
+ # not using fcompiler linker
+ self._libs_with_msvc_and_fortran(
+ fcompiler, libraries, library_dirs)
+ if ext.runtime_library_dirs:
+ # gcc adds RPATH to the link. On windows, copy the dll into
+ # self.extra_dll_dir instead.
+ for d in ext.runtime_library_dirs:
+ for f in glob(d + '/*.dll'):
+ copy_file(f, self.extra_dll_dir)
+ ext.runtime_library_dirs = []
+
+ elif ext.language in ['f77', 'f90'] and fcompiler is not None:
+ linker = fcompiler.link_shared_object
+ if ext.language == 'c++' and cxx_compiler is not None:
+ linker = cxx_compiler.link_shared_object
+
+ if fcompiler is not None:
+ objects, libraries = self._process_unlinkable_fobjects(
+ objects, libraries,
+ fcompiler, library_dirs,
+ unlinkable_fobjects)
+
+ linker(objects, ext_filename,
+ libraries=libraries,
+ library_dirs=library_dirs,
+ runtime_library_dirs=ext.runtime_library_dirs,
+ extra_postargs=extra_args,
+ export_symbols=self.get_export_symbols(ext),
+ debug=self.debug,
+ build_temp=self.build_temp,
+ target_lang=ext.language)
+
+ def _add_dummy_mingwex_sym(self, c_sources):
+ build_src = self.get_finalized_command("build_src").build_src
+ build_clib = self.get_finalized_command("build_clib").build_clib
+ objects = self.compiler.compile([os.path.join(build_src,
+ "gfortran_vs2003_hack.c")],
+ output_dir=self.build_temp)
+ self.compiler.create_static_lib(
+ objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
+
+ def _process_unlinkable_fobjects(self, objects, libraries,
+ fcompiler, library_dirs,
+ unlinkable_fobjects):
+ libraries = list(libraries)
+ objects = list(objects)
+ unlinkable_fobjects = list(unlinkable_fobjects)
+
+ # Expand possible fake static libraries to objects;
+ # make sure to iterate over a copy of the list as
+ # "fake" libraries will be removed as they are
+ # encountered
+ for lib in libraries[:]:
+ for libdir in library_dirs:
+ fake_lib = os.path.join(libdir, lib + '.fobjects')
+ if os.path.isfile(fake_lib):
+ # Replace fake static library
+ libraries.remove(lib)
+ with open(fake_lib, 'r') as f:
+ unlinkable_fobjects.extend(f.read().splitlines())
+
+ # Expand C objects
+ c_lib = os.path.join(libdir, lib + '.cobjects')
+ with open(c_lib, 'r') as f:
+ objects.extend(f.read().splitlines())
+
+ # Wrap unlinkable objects to a linkable one
+ if unlinkable_fobjects:
+ fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects]
+ wrapped = fcompiler.wrap_unlinkable_objects(
+ fobjects, output_dir=self.build_temp,
+ extra_dll_dir=self.extra_dll_dir)
+ objects.extend(wrapped)
+
+ return objects, libraries
+
+ def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries,
+ c_library_dirs):
+ if fcompiler is None:
+ return
+
+ for libname in c_libraries:
+ if libname.startswith('msvc'):
+ continue
+ fileexists = False
+ for libdir in c_library_dirs or []:
+ libfile = os.path.join(libdir, '%s.lib' % (libname))
+ if os.path.isfile(libfile):
+ fileexists = True
+ break
+ if fileexists:
+ continue
+ # make g77-compiled static libs available to MSVC
+ fileexists = False
+ for libdir in c_library_dirs:
+ libfile = os.path.join(libdir, 'lib%s.a' % (libname))
+ if os.path.isfile(libfile):
+ # copy libname.a file to name.lib so that MSVC linker
+ # can find it
+ libfile2 = os.path.join(self.build_temp, libname + '.lib')
+ copy_file(libfile, libfile2)
+ if self.build_temp not in c_library_dirs:
+ c_library_dirs.append(self.build_temp)
+ fileexists = True
+ break
+ if fileexists:
+ continue
+ log.warn('could not find library %r in directories %s'
+ % (libname, c_library_dirs))
+
+ # Always use system linker when using MSVC compiler.
+ f_lib_dirs = []
+ for dir in fcompiler.library_dirs:
+ # correct path when compiling in Cygwin but with normal Win
+ # Python
+ if dir.startswith('/usr/lib'):
+ try:
+ dir = subprocess.check_output(['cygpath', '-w', dir])
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ dir = filepath_from_subprocess_output(dir)
+ f_lib_dirs.append(dir)
+ c_library_dirs.extend(f_lib_dirs)
+
+ # make g77-compiled static libs available to MSVC
+ for lib in fcompiler.libraries:
+ if not lib.startswith('msvc'):
+ c_libraries.append(lib)
+ p = combine_paths(f_lib_dirs, 'lib' + lib + '.a')
+ if p:
+ dst_name = os.path.join(self.build_temp, lib + '.lib')
+ if not os.path.isfile(dst_name):
+ copy_file(p[0], dst_name)
+ if self.build_temp not in c_library_dirs:
+ c_library_dirs.append(self.build_temp)
+
+ def get_source_files(self):
+ self.check_extensions_list(self.extensions)
+ filenames = []
+ for ext in self.extensions:
+ filenames.extend(get_ext_source_files(ext))
+ return filenames
+
+ def get_outputs(self):
+ self.check_extensions_list(self.extensions)
+
+ outputs = []
+ for ext in self.extensions:
+ if not ext.sources:
+ continue
+ fullname = self.get_ext_fullname(ext.name)
+ outputs.append(os.path.join(self.build_lib,
+ self.get_ext_filename(fullname)))
+ return outputs
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/build_py.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_py.py
new file mode 100644
index 00000000..d30dc5bf
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_py.py
@@ -0,0 +1,31 @@
+from distutils.command.build_py import build_py as old_build_py
+from numpy.distutils.misc_util import is_string
+
+class build_py(old_build_py):
+
+ def run(self):
+ build_src = self.get_finalized_command('build_src')
+ if build_src.py_modules_dict and self.packages is None:
+ self.packages = list(build_src.py_modules_dict.keys ())
+ old_build_py.run(self)
+
+ def find_package_modules(self, package, package_dir):
+ modules = old_build_py.find_package_modules(self, package, package_dir)
+
+ # Find build_src generated *.py files.
+ build_src = self.get_finalized_command('build_src')
+ modules += build_src.py_modules_dict.get(package, [])
+
+ return modules
+
+ def find_modules(self):
+ old_py_modules = self.py_modules[:]
+ new_py_modules = [_m for _m in self.py_modules if is_string(_m)]
+ self.py_modules[:] = new_py_modules
+ modules = old_build_py.find_modules(self)
+ self.py_modules[:] = old_py_modules
+
+ return modules
+
+ # XXX: Fix find_source_files for item in py_modules such that item is 3-tuple
+ # and item[2] is source file.
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/build_scripts.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_scripts.py
new file mode 100644
index 00000000..d5cadb27
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_scripts.py
@@ -0,0 +1,49 @@
+""" Modified version of build_scripts that handles building scripts from functions.
+
+"""
+from distutils.command.build_scripts import build_scripts as old_build_scripts
+from numpy.distutils import log
+from numpy.distutils.misc_util import is_string
+
+class build_scripts(old_build_scripts):
+
+ def generate_scripts(self, scripts):
+ new_scripts = []
+ func_scripts = []
+ for script in scripts:
+ if is_string(script):
+ new_scripts.append(script)
+ else:
+ func_scripts.append(script)
+ if not func_scripts:
+ return new_scripts
+
+ build_dir = self.build_dir
+ self.mkpath(build_dir)
+ for func in func_scripts:
+ script = func(build_dir)
+ if not script:
+ continue
+ if is_string(script):
+ log.info(" adding '%s' to scripts" % (script,))
+ new_scripts.append(script)
+ else:
+ [log.info(" adding '%s' to scripts" % (s,)) for s in script]
+ new_scripts.extend(list(script))
+ return new_scripts
+
+ def run (self):
+ if not self.scripts:
+ return
+
+ self.scripts = self.generate_scripts(self.scripts)
+ # Now make sure that the distribution object has this list of scripts.
+ # setuptools' develop command requires that this be a list of filenames,
+ # not functions.
+ self.distribution.scripts = self.scripts
+
+ return old_build_scripts.run(self)
+
+ def get_source_files(self):
+ from numpy.distutils.misc_util import get_script_files
+ return get_script_files(self.scripts)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/build_src.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_src.py
new file mode 100644
index 00000000..5581011f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/build_src.py
@@ -0,0 +1,773 @@
+""" Build swig and f2py sources.
+"""
+import os
+import re
+import sys
+import shlex
+import copy
+
+from distutils.command import build_ext
+from distutils.dep_util import newer_group, newer
+from distutils.util import get_platform
+from distutils.errors import DistutilsError, DistutilsSetupError
+
+
+# this import can't be done here, as it uses numpy stuff only available
+# after it's installed
+#import numpy.f2py
+from numpy.distutils import log
+from numpy.distutils.misc_util import (
+ fortran_ext_match, appendpath, is_string, is_sequence, get_cmd
+ )
+from numpy.distutils.from_template import process_file as process_f_file
+from numpy.distutils.conv_template import process_file as process_c_file
+
+def subst_vars(target, source, d):
+ """Substitute any occurrence of @foo@ by d['foo'] from source file into
+ target."""
+ var = re.compile('@([a-zA-Z_]+)@')
+ with open(source, 'r') as fs:
+ with open(target, 'w') as ft:
+ for l in fs:
+ m = var.search(l)
+ if m:
+ ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))
+ else:
+ ft.write(l)
+
+class build_src(build_ext.build_ext):
+
+ description = "build sources from SWIG, F2PY files or a function"
+
+ user_options = [
+ ('build-src=', 'd', "directory to \"build\" sources to"),
+ ('f2py-opts=', None, "list of f2py command line options"),
+ ('swig=', None, "path to the SWIG executable"),
+ ('swig-opts=', None, "list of SWIG command line options"),
+ ('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"),
+ ('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete
+ ('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete
+ ('force', 'f', "forcibly build everything (ignore file timestamps)"),
+ ('inplace', 'i',
+ "ignore build-lib and put compiled extensions into the source " +
+ "directory alongside your pure Python modules"),
+ ('verbose-cfg', None,
+ "change logging level from WARN to INFO which will show all " +
+ "compiler output")
+ ]
+
+ boolean_options = ['force', 'inplace', 'verbose-cfg']
+
+ help_options = []
+
+ def initialize_options(self):
+ self.extensions = None
+ self.package = None
+ self.py_modules = None
+ self.py_modules_dict = None
+ self.build_src = None
+ self.build_lib = None
+ self.build_base = None
+ self.force = None
+ self.inplace = None
+ self.package_dir = None
+ self.f2pyflags = None # obsolete
+ self.f2py_opts = None
+ self.swigflags = None # obsolete
+ self.swig_opts = None
+ self.swig_cpp = None
+ self.swig = None
+ self.verbose_cfg = None
+
+ def finalize_options(self):
+ self.set_undefined_options('build',
+ ('build_base', 'build_base'),
+ ('build_lib', 'build_lib'),
+ ('force', 'force'))
+ if self.package is None:
+ self.package = self.distribution.ext_package
+ self.extensions = self.distribution.ext_modules
+ self.libraries = self.distribution.libraries or []
+ self.py_modules = self.distribution.py_modules or []
+ self.data_files = self.distribution.data_files or []
+
+ if self.build_src is None:
+ plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
+ self.build_src = os.path.join(self.build_base, 'src'+plat_specifier)
+
+ # py_modules_dict is used in build_py.find_package_modules
+ self.py_modules_dict = {}
+
+ if self.f2pyflags:
+ if self.f2py_opts:
+ log.warn('ignoring --f2pyflags as --f2py-opts already used')
+ else:
+ self.f2py_opts = self.f2pyflags
+ self.f2pyflags = None
+ if self.f2py_opts is None:
+ self.f2py_opts = []
+ else:
+ self.f2py_opts = shlex.split(self.f2py_opts)
+
+ if self.swigflags:
+ if self.swig_opts:
+ log.warn('ignoring --swigflags as --swig-opts already used')
+ else:
+ self.swig_opts = self.swigflags
+ self.swigflags = None
+
+ if self.swig_opts is None:
+ self.swig_opts = []
+ else:
+ self.swig_opts = shlex.split(self.swig_opts)
+
+ # use options from build_ext command
+ build_ext = self.get_finalized_command('build_ext')
+ if self.inplace is None:
+ self.inplace = build_ext.inplace
+ if self.swig_cpp is None:
+ self.swig_cpp = build_ext.swig_cpp
+ for c in ['swig', 'swig_opt']:
+ o = '--'+c.replace('_', '-')
+ v = getattr(build_ext, c, None)
+ if v:
+ if getattr(self, c):
+ log.warn('both build_src and build_ext define %s option' % (o))
+ else:
+ log.info('using "%s=%s" option from build_ext command' % (o, v))
+ setattr(self, c, v)
+
+ def run(self):
+ log.info("build_src")
+ if not (self.extensions or self.libraries):
+ return
+ self.build_sources()
+
+ def build_sources(self):
+
+ if self.inplace:
+ self.get_package_dir = \
+ self.get_finalized_command('build_py').get_package_dir
+
+ self.build_py_modules_sources()
+
+ for libname_info in self.libraries:
+ self.build_library_sources(*libname_info)
+
+ if self.extensions:
+ self.check_extensions_list(self.extensions)
+
+ for ext in self.extensions:
+ self.build_extension_sources(ext)
+
+ self.build_data_files_sources()
+ self.build_npy_pkg_config()
+
+ def build_data_files_sources(self):
+ if not self.data_files:
+ return
+ log.info('building data_files sources')
+ from numpy.distutils.misc_util import get_data_files
+ new_data_files = []
+ for data in self.data_files:
+ if isinstance(data, str):
+ new_data_files.append(data)
+ elif isinstance(data, tuple):
+ d, files = data
+ if self.inplace:
+ build_dir = self.get_package_dir('.'.join(d.split(os.sep)))
+ else:
+ build_dir = os.path.join(self.build_src, d)
+ funcs = [f for f in files if hasattr(f, '__call__')]
+ files = [f for f in files if not hasattr(f, '__call__')]
+ for f in funcs:
+ if f.__code__.co_argcount==1:
+ s = f(build_dir)
+ else:
+ s = f()
+ if s is not None:
+ if isinstance(s, list):
+ files.extend(s)
+ elif isinstance(s, str):
+ files.append(s)
+ else:
+ raise TypeError(repr(s))
+ filenames = get_data_files((d, files))
+ new_data_files.append((d, filenames))
+ else:
+ raise TypeError(repr(data))
+ self.data_files[:] = new_data_files
+
+
+ def _build_npy_pkg_config(self, info, gd):
+ template, install_dir, subst_dict = info
+ template_dir = os.path.dirname(template)
+ for k, v in gd.items():
+ subst_dict[k] = v
+
+ if self.inplace == 1:
+ generated_dir = os.path.join(template_dir, install_dir)
+ else:
+ generated_dir = os.path.join(self.build_src, template_dir,
+ install_dir)
+ generated = os.path.basename(os.path.splitext(template)[0])
+ generated_path = os.path.join(generated_dir, generated)
+ if not os.path.exists(generated_dir):
+ os.makedirs(generated_dir)
+
+ subst_vars(generated_path, template, subst_dict)
+
+ # Where to install relatively to install prefix
+ full_install_dir = os.path.join(template_dir, install_dir)
+ return full_install_dir, generated_path
+
+ def build_npy_pkg_config(self):
+ log.info('build_src: building npy-pkg config files')
+
+ # XXX: another ugly workaround to circumvent distutils brain damage. We
+ # need the install prefix here, but finalizing the options of the
+ # install command when only building sources cause error. Instead, we
+ # copy the install command instance, and finalize the copy so that it
+ # does not disrupt how distutils want to do things when with the
+ # original install command instance.
+ install_cmd = copy.copy(get_cmd('install'))
+ if not install_cmd.finalized == 1:
+ install_cmd.finalize_options()
+ build_npkg = False
+ if self.inplace == 1:
+ top_prefix = '.'
+ build_npkg = True
+ elif hasattr(install_cmd, 'install_libbase'):
+ top_prefix = install_cmd.install_libbase
+ build_npkg = True
+
+ if build_npkg:
+ for pkg, infos in self.distribution.installed_pkg_config.items():
+ pkg_path = self.distribution.package_dir[pkg]
+ prefix = os.path.join(os.path.abspath(top_prefix), pkg_path)
+ d = {'prefix': prefix}
+ for info in infos:
+ install_dir, generated = self._build_npy_pkg_config(info, d)
+ self.distribution.data_files.append((install_dir,
+ [generated]))
+
+ def build_py_modules_sources(self):
+ if not self.py_modules:
+ return
+ log.info('building py_modules sources')
+ new_py_modules = []
+ for source in self.py_modules:
+ if is_sequence(source) and len(source)==3:
+ package, module_base, source = source
+ if self.inplace:
+ build_dir = self.get_package_dir(package)
+ else:
+ build_dir = os.path.join(self.build_src,
+ os.path.join(*package.split('.')))
+ if hasattr(source, '__call__'):
+ target = os.path.join(build_dir, module_base + '.py')
+ source = source(target)
+ if source is None:
+ continue
+ modules = [(package, module_base, source)]
+ if package not in self.py_modules_dict:
+ self.py_modules_dict[package] = []
+ self.py_modules_dict[package] += modules
+ else:
+ new_py_modules.append(source)
+ self.py_modules[:] = new_py_modules
+
+ def build_library_sources(self, lib_name, build_info):
+ sources = list(build_info.get('sources', []))
+
+ if not sources:
+ return
+
+ log.info('building library "%s" sources' % (lib_name))
+
+ sources = self.generate_sources(sources, (lib_name, build_info))
+
+ sources = self.template_sources(sources, (lib_name, build_info))
+
+ sources, h_files = self.filter_h_files(sources)
+
+ if h_files:
+ log.info('%s - nothing done with h_files = %s',
+ self.package, h_files)
+
+ #for f in h_files:
+ # self.distribution.headers.append((lib_name,f))
+
+ build_info['sources'] = sources
+ return
+
+ def build_extension_sources(self, ext):
+
+ sources = list(ext.sources)
+
+ log.info('building extension "%s" sources' % (ext.name))
+
+ fullname = self.get_ext_fullname(ext.name)
+
+ modpath = fullname.split('.')
+ package = '.'.join(modpath[0:-1])
+
+ if self.inplace:
+ self.ext_target_dir = self.get_package_dir(package)
+
+ sources = self.generate_sources(sources, ext)
+ sources = self.template_sources(sources, ext)
+ sources = self.swig_sources(sources, ext)
+ sources = self.f2py_sources(sources, ext)
+ sources = self.pyrex_sources(sources, ext)
+
+ sources, py_files = self.filter_py_files(sources)
+
+ if package not in self.py_modules_dict:
+ self.py_modules_dict[package] = []
+ modules = []
+ for f in py_files:
+ module = os.path.splitext(os.path.basename(f))[0]
+ modules.append((package, module, f))
+ self.py_modules_dict[package] += modules
+
+ sources, h_files = self.filter_h_files(sources)
+
+ if h_files:
+ log.info('%s - nothing done with h_files = %s',
+ package, h_files)
+ #for f in h_files:
+ # self.distribution.headers.append((package,f))
+
+ ext.sources = sources
+
+ def generate_sources(self, sources, extension):
+ new_sources = []
+ func_sources = []
+ for source in sources:
+ if is_string(source):
+ new_sources.append(source)
+ else:
+ func_sources.append(source)
+ if not func_sources:
+ return new_sources
+ if self.inplace and not is_sequence(extension):
+ build_dir = self.ext_target_dir
+ else:
+ if is_sequence(extension):
+ name = extension[0]
+ # if 'include_dirs' not in extension[1]:
+ # extension[1]['include_dirs'] = []
+ # incl_dirs = extension[1]['include_dirs']
+ else:
+ name = extension.name
+ # incl_dirs = extension.include_dirs
+ #if self.build_src not in incl_dirs:
+ # incl_dirs.append(self.build_src)
+ build_dir = os.path.join(*([self.build_src]
+ +name.split('.')[:-1]))
+ self.mkpath(build_dir)
+
+ if self.verbose_cfg:
+ new_level = log.INFO
+ else:
+ new_level = log.WARN
+ old_level = log.set_threshold(new_level)
+
+ for func in func_sources:
+ source = func(extension, build_dir)
+ if not source:
+ continue
+ if is_sequence(source):
+ [log.info(" adding '%s' to sources." % (s,)) for s in source]
+ new_sources.extend(source)
+ else:
+ log.info(" adding '%s' to sources." % (source,))
+ new_sources.append(source)
+ log.set_threshold(old_level)
+ return new_sources
+
+ def filter_py_files(self, sources):
+ return self.filter_files(sources, ['.py'])
+
+ def filter_h_files(self, sources):
+ return self.filter_files(sources, ['.h', '.hpp', '.inc'])
+
+ def filter_files(self, sources, exts = []):
+ new_sources = []
+ files = []
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext in exts:
+ files.append(source)
+ else:
+ new_sources.append(source)
+ return new_sources, files
+
+ def template_sources(self, sources, extension):
+ new_sources = []
+ if is_sequence(extension):
+ depends = extension[1].get('depends')
+ include_dirs = extension[1].get('include_dirs')
+ else:
+ depends = extension.depends
+ include_dirs = extension.include_dirs
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext == '.src': # Template file
+ if self.inplace:
+ target_dir = os.path.dirname(base)
+ else:
+ target_dir = appendpath(self.build_src, os.path.dirname(base))
+ self.mkpath(target_dir)
+ target_file = os.path.join(target_dir, os.path.basename(base))
+ if (self.force or newer_group([source] + depends, target_file)):
+ if _f_pyf_ext_match(base):
+ log.info("from_template:> %s" % (target_file))
+ outstr = process_f_file(source)
+ else:
+ log.info("conv_template:> %s" % (target_file))
+ outstr = process_c_file(source)
+ with open(target_file, 'w') as fid:
+ fid.write(outstr)
+ if _header_ext_match(target_file):
+ d = os.path.dirname(target_file)
+ if d not in include_dirs:
+ log.info(" adding '%s' to include_dirs." % (d))
+ include_dirs.append(d)
+ new_sources.append(target_file)
+ else:
+ new_sources.append(source)
+ return new_sources
+
+ def pyrex_sources(self, sources, extension):
+ """Pyrex not supported; this remains for Cython support (see below)"""
+ new_sources = []
+ ext_name = extension.name.split('.')[-1]
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext == '.pyx':
+ target_file = self.generate_a_pyrex_source(base, ext_name,
+ source,
+ extension)
+ new_sources.append(target_file)
+ else:
+ new_sources.append(source)
+ return new_sources
+
+ def generate_a_pyrex_source(self, base, ext_name, source, extension):
+ """Pyrex is not supported, but some projects monkeypatch this method.
+
+ That allows compiling Cython code, see gh-6955.
+ This method will remain here for compatibility reasons.
+ """
+ return []
+
+ def f2py_sources(self, sources, extension):
+ new_sources = []
+ f2py_sources = []
+ f_sources = []
+ f2py_targets = {}
+ target_dirs = []
+ ext_name = extension.name.split('.')[-1]
+ skip_f2py = 0
+
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext == '.pyf': # F2PY interface file
+ if self.inplace:
+ target_dir = os.path.dirname(base)
+ else:
+ target_dir = appendpath(self.build_src, os.path.dirname(base))
+ if os.path.isfile(source):
+ name = get_f2py_modulename(source)
+ if name != ext_name:
+ raise DistutilsSetupError('mismatch of extension names: %s '
+ 'provides %r but expected %r' % (
+ source, name, ext_name))
+ target_file = os.path.join(target_dir, name+'module.c')
+ else:
+ log.debug(' source %s does not exist: skipping f2py\'ing.' \
+ % (source))
+ name = ext_name
+ skip_f2py = 1
+ target_file = os.path.join(target_dir, name+'module.c')
+ if not os.path.isfile(target_file):
+ log.warn(' target %s does not exist:\n '\
+ 'Assuming %smodule.c was generated with '\
+ '"build_src --inplace" command.' \
+ % (target_file, name))
+ target_dir = os.path.dirname(base)
+ target_file = os.path.join(target_dir, name+'module.c')
+ if not os.path.isfile(target_file):
+ raise DistutilsSetupError("%r missing" % (target_file,))
+ log.info(' Yes! Using %r as up-to-date target.' \
+ % (target_file))
+ target_dirs.append(target_dir)
+ f2py_sources.append(source)
+ f2py_targets[source] = target_file
+ new_sources.append(target_file)
+ elif fortran_ext_match(ext):
+ f_sources.append(source)
+ else:
+ new_sources.append(source)
+
+ if not (f2py_sources or f_sources):
+ return new_sources
+
+ for d in target_dirs:
+ self.mkpath(d)
+
+ f2py_options = extension.f2py_options + self.f2py_opts
+
+ if self.distribution.libraries:
+ for name, build_info in self.distribution.libraries:
+ if name in extension.libraries:
+ f2py_options.extend(build_info.get('f2py_options', []))
+
+ log.info("f2py options: %s" % (f2py_options))
+
+ if f2py_sources:
+ if len(f2py_sources) != 1:
+ raise DistutilsSetupError(
+ 'only one .pyf file is allowed per extension module but got'\
+ ' more: %r' % (f2py_sources,))
+ source = f2py_sources[0]
+ target_file = f2py_targets[source]
+ target_dir = os.path.dirname(target_file) or '.'
+ depends = [source] + extension.depends
+ if (self.force or newer_group(depends, target_file, 'newer')) \
+ and not skip_f2py:
+ log.info("f2py: %s" % (source))
+ import numpy.f2py
+ numpy.f2py.run_main(f2py_options
+ + ['--build-dir', target_dir, source])
+ else:
+ log.debug(" skipping '%s' f2py interface (up-to-date)" % (source))
+ else:
+ #XXX TODO: --inplace support for sdist command
+ if is_sequence(extension):
+ name = extension[0]
+ else: name = extension.name
+ target_dir = os.path.join(*([self.build_src]
+ +name.split('.')[:-1]))
+ target_file = os.path.join(target_dir, ext_name + 'module.c')
+ new_sources.append(target_file)
+ depends = f_sources + extension.depends
+ if (self.force or newer_group(depends, target_file, 'newer')) \
+ and not skip_f2py:
+ log.info("f2py:> %s" % (target_file))
+ self.mkpath(target_dir)
+ import numpy.f2py
+ numpy.f2py.run_main(f2py_options + ['--lower',
+ '--build-dir', target_dir]+\
+ ['-m', ext_name]+f_sources)
+ else:
+ log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\
+ % (target_file))
+
+ if not os.path.isfile(target_file):
+ raise DistutilsError("f2py target file %r not generated" % (target_file,))
+
+ build_dir = os.path.join(self.build_src, target_dir)
+ target_c = os.path.join(build_dir, 'fortranobject.c')
+ target_h = os.path.join(build_dir, 'fortranobject.h')
+ log.info(" adding '%s' to sources." % (target_c))
+ new_sources.append(target_c)
+ if build_dir not in extension.include_dirs:
+ log.info(" adding '%s' to include_dirs." % (build_dir))
+ extension.include_dirs.append(build_dir)
+
+ if not skip_f2py:
+ import numpy.f2py
+ d = os.path.dirname(numpy.f2py.__file__)
+ source_c = os.path.join(d, 'src', 'fortranobject.c')
+ source_h = os.path.join(d, 'src', 'fortranobject.h')
+ if newer(source_c, target_c) or newer(source_h, target_h):
+ self.mkpath(os.path.dirname(target_c))
+ self.copy_file(source_c, target_c)
+ self.copy_file(source_h, target_h)
+ else:
+ if not os.path.isfile(target_c):
+ raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,))
+ if not os.path.isfile(target_h):
+ raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,))
+
+ for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']:
+ filename = os.path.join(target_dir, ext_name + name_ext)
+ if os.path.isfile(filename):
+ log.info(" adding '%s' to sources." % (filename))
+ f_sources.append(filename)
+
+ return new_sources + f_sources
+
+ def swig_sources(self, sources, extension):
+ # Assuming SWIG 1.3.14 or later. See compatibility note in
+ # http://www.swig.org/Doc1.3/Python.html#Python_nn6
+
+ new_sources = []
+ swig_sources = []
+ swig_targets = {}
+ target_dirs = []
+ py_files = [] # swig generated .py files
+ target_ext = '.c'
+ if '-c++' in extension.swig_opts:
+ typ = 'c++'
+ is_cpp = True
+ extension.swig_opts.remove('-c++')
+ elif self.swig_cpp:
+ typ = 'c++'
+ is_cpp = True
+ else:
+ typ = None
+ is_cpp = False
+ skip_swig = 0
+ ext_name = extension.name.split('.')[-1]
+
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext == '.i': # SWIG interface file
+ # the code below assumes that the sources list
+ # contains not more than one .i SWIG interface file
+ if self.inplace:
+ target_dir = os.path.dirname(base)
+ py_target_dir = self.ext_target_dir
+ else:
+ target_dir = appendpath(self.build_src, os.path.dirname(base))
+ py_target_dir = target_dir
+ if os.path.isfile(source):
+ name = get_swig_modulename(source)
+ if name != ext_name[1:]:
+ raise DistutilsSetupError(
+ 'mismatch of extension names: %s provides %r'
+ ' but expected %r' % (source, name, ext_name[1:]))
+ if typ is None:
+ typ = get_swig_target(source)
+ is_cpp = typ=='c++'
+ else:
+ typ2 = get_swig_target(source)
+ if typ2 is None:
+ log.warn('source %r does not define swig target, assuming %s swig target' \
+ % (source, typ))
+ elif typ!=typ2:
+ log.warn('expected %r but source %r defines %r swig target' \
+ % (typ, source, typ2))
+ if typ2=='c++':
+ log.warn('resetting swig target to c++ (some targets may have .c extension)')
+ is_cpp = True
+ else:
+ log.warn('assuming that %r has c++ swig target' % (source))
+ if is_cpp:
+ target_ext = '.cpp'
+ target_file = os.path.join(target_dir, '%s_wrap%s' \
+ % (name, target_ext))
+ else:
+ log.warn(' source %s does not exist: skipping swig\'ing.' \
+ % (source))
+ name = ext_name[1:]
+ skip_swig = 1
+ target_file = _find_swig_target(target_dir, name)
+ if not os.path.isfile(target_file):
+ log.warn(' target %s does not exist:\n '\
+ 'Assuming %s_wrap.{c,cpp} was generated with '\
+ '"build_src --inplace" command.' \
+ % (target_file, name))
+ target_dir = os.path.dirname(base)
+ target_file = _find_swig_target(target_dir, name)
+ if not os.path.isfile(target_file):
+ raise DistutilsSetupError("%r missing" % (target_file,))
+ log.warn(' Yes! Using %r as up-to-date target.' \
+ % (target_file))
+ target_dirs.append(target_dir)
+ new_sources.append(target_file)
+ py_files.append(os.path.join(py_target_dir, name+'.py'))
+ swig_sources.append(source)
+ swig_targets[source] = new_sources[-1]
+ else:
+ new_sources.append(source)
+
+ if not swig_sources:
+ return new_sources
+
+ if skip_swig:
+ return new_sources + py_files
+
+ for d in target_dirs:
+ self.mkpath(d)
+
+ swig = self.swig or self.find_swig()
+ swig_cmd = [swig, "-python"] + extension.swig_opts
+ if is_cpp:
+ swig_cmd.append('-c++')
+ for d in extension.include_dirs:
+ swig_cmd.append('-I'+d)
+ for source in swig_sources:
+ target = swig_targets[source]
+ depends = [source] + extension.depends
+ if self.force or newer_group(depends, target, 'newer'):
+ log.info("%s: %s" % (os.path.basename(swig) \
+ + (is_cpp and '++' or ''), source))
+ self.spawn(swig_cmd + self.swig_opts \
+ + ["-o", target, '-outdir', py_target_dir, source])
+ else:
+ log.debug(" skipping '%s' swig interface (up-to-date)" \
+ % (source))
+
+ return new_sources + py_files
+
+_f_pyf_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
+_header_ext_match = re.compile(r'.*\.(inc|h|hpp)\Z', re.I).match
+
+#### SWIG related auxiliary functions ####
+_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P<package>[\w_]+)".*\)|)\s*(?P<name>[\w_]+)',
+ re.I).match
+_has_c_header = re.compile(r'-\*-\s*c\s*-\*-', re.I).search
+_has_cpp_header = re.compile(r'-\*-\s*c\+\+\s*-\*-', re.I).search
+
+def get_swig_target(source):
+ with open(source, 'r') as f:
+ result = None
+ line = f.readline()
+ if _has_cpp_header(line):
+ result = 'c++'
+ if _has_c_header(line):
+ result = 'c'
+ return result
+
+def get_swig_modulename(source):
+ with open(source, 'r') as f:
+ name = None
+ for line in f:
+ m = _swig_module_name_match(line)
+ if m:
+ name = m.group('name')
+ break
+ return name
+
+def _find_swig_target(target_dir, name):
+ for ext in ['.cpp', '.c']:
+ target = os.path.join(target_dir, '%s_wrap%s' % (name, ext))
+ if os.path.isfile(target):
+ break
+ return target
+
+#### F2PY related auxiliary functions ####
+
+_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)',
+ re.I).match
+_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'
+ r'__user__[\w_]*)', re.I).match
+
+def get_f2py_modulename(source):
+ name = None
+ with open(source) as f:
+ for line in f:
+ m = _f2py_module_name_match(line)
+ if m:
+ if _f2py_user_module_name_match(line): # skip *__user__* names
+ continue
+ name = m.group('name')
+ break
+ return name
+
+##########################################
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/config.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/config.py
new file mode 100644
index 00000000..fdb650d3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/config.py
@@ -0,0 +1,516 @@
+# Added Fortran compiler support to config. Currently useful only for
+# try_compile call. try_run works but is untested for most of Fortran
+# compilers (they must define linker_exe first).
+# Pearu Peterson
+import os
+import signal
+import subprocess
+import sys
+import textwrap
+import warnings
+
+from distutils.command.config import config as old_config
+from distutils.command.config import LANG_EXT
+from distutils import log
+from distutils.file_util import copy_file
+from distutils.ccompiler import CompileError, LinkError
+import distutils
+from numpy.distutils.exec_command import filepath_from_subprocess_output
+from numpy.distutils.mingw32ccompiler import generate_manifest
+from numpy.distutils.command.autodist import (check_gcc_function_attribute,
+ check_gcc_function_attribute_with_intrinsics,
+ check_gcc_variable_attribute,
+ check_gcc_version_at_least,
+ check_inline,
+ check_restrict,
+ check_compiler_gcc)
+
+LANG_EXT['f77'] = '.f'
+LANG_EXT['f90'] = '.f90'
+
+class config(old_config):
+ old_config.user_options += [
+ ('fcompiler=', None, "specify the Fortran compiler type"),
+ ]
+
+ def initialize_options(self):
+ self.fcompiler = None
+ old_config.initialize_options(self)
+
+ def _check_compiler (self):
+ old_config._check_compiler(self)
+ from numpy.distutils.fcompiler import FCompiler, new_fcompiler
+
+ if sys.platform == 'win32' and (self.compiler.compiler_type in
+ ('msvc', 'intelw', 'intelemw')):
+ # XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
+ # initialize call query_vcvarsall, which throws an IOError, and
+ # causes an error along the way without much information. We try to
+ # catch it here, hoping it is early enough, and print a helpful
+ # message instead of Error: None.
+ if not self.compiler.initialized:
+ try:
+ self.compiler.initialize()
+ except IOError as e:
+ msg = textwrap.dedent("""\
+ Could not initialize compiler instance: do you have Visual Studio
+ installed? If you are trying to build with MinGW, please use "python setup.py
+ build -c mingw32" instead. If you have Visual Studio installed, check it is
+ correctly installed, and the right version (VS 2015 as of this writing).
+
+ Original exception was: %s, and the Compiler class was %s
+ ============================================================================""") \
+ % (e, self.compiler.__class__.__name__)
+ print(textwrap.dedent("""\
+ ============================================================================"""))
+ raise distutils.errors.DistutilsPlatformError(msg) from e
+
+ # After MSVC is initialized, add an explicit /MANIFEST to linker
+ # flags. See issues gh-4245 and gh-4101 for details. Also
+ # relevant are issues 4431 and 16296 on the Python bug tracker.
+ from distutils import msvc9compiler
+ if msvc9compiler.get_build_version() >= 10:
+ for ldflags in [self.compiler.ldflags_shared,
+ self.compiler.ldflags_shared_debug]:
+ if '/MANIFEST' not in ldflags:
+ ldflags.append('/MANIFEST')
+
+ if not isinstance(self.fcompiler, FCompiler):
+ self.fcompiler = new_fcompiler(compiler=self.fcompiler,
+ dry_run=self.dry_run, force=1,
+ c_compiler=self.compiler)
+ if self.fcompiler is not None:
+ self.fcompiler.customize(self.distribution)
+ if self.fcompiler.get_version():
+ self.fcompiler.customize_cmd(self)
+ self.fcompiler.show_customization()
+
+ def _wrap_method(self, mth, lang, args):
+ from distutils.ccompiler import CompileError
+ from distutils.errors import DistutilsExecError
+ save_compiler = self.compiler
+ if lang in ['f77', 'f90']:
+ self.compiler = self.fcompiler
+ if self.compiler is None:
+ raise CompileError('%s compiler is not set' % (lang,))
+ try:
+ ret = mth(*((self,)+args))
+ except (DistutilsExecError, CompileError) as e:
+ self.compiler = save_compiler
+ raise CompileError from e
+ self.compiler = save_compiler
+ return ret
+
+ def _compile (self, body, headers, include_dirs, lang):
+ src, obj = self._wrap_method(old_config._compile, lang,
+ (body, headers, include_dirs, lang))
+ # _compile in unixcompiler.py sometimes creates .d dependency files.
+ # Clean them up.
+ self.temp_files.append(obj + '.d')
+ return src, obj
+
+ def _link (self, body,
+ headers, include_dirs,
+ libraries, library_dirs, lang):
+ if self.compiler.compiler_type=='msvc':
+ libraries = (libraries or [])[:]
+ library_dirs = (library_dirs or [])[:]
+ if lang in ['f77', 'f90']:
+ lang = 'c' # always use system linker when using MSVC compiler
+ if self.fcompiler:
+ for d in self.fcompiler.library_dirs or []:
+ # correct path when compiling in Cygwin but with
+ # normal Win Python
+ if d.startswith('/usr/lib'):
+ try:
+ d = subprocess.check_output(['cygpath',
+ '-w', d])
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ d = filepath_from_subprocess_output(d)
+ library_dirs.append(d)
+ for libname in self.fcompiler.libraries or []:
+ if libname not in libraries:
+ libraries.append(libname)
+ for libname in libraries:
+ if libname.startswith('msvc'): continue
+ fileexists = False
+ for libdir in library_dirs or []:
+ libfile = os.path.join(libdir, '%s.lib' % (libname))
+ if os.path.isfile(libfile):
+ fileexists = True
+ break
+ if fileexists: continue
+ # make g77-compiled static libs available to MSVC
+ fileexists = False
+ for libdir in library_dirs:
+ libfile = os.path.join(libdir, 'lib%s.a' % (libname))
+ if os.path.isfile(libfile):
+ # copy libname.a file to name.lib so that MSVC linker
+ # can find it
+ libfile2 = os.path.join(libdir, '%s.lib' % (libname))
+ copy_file(libfile, libfile2)
+ self.temp_files.append(libfile2)
+ fileexists = True
+ break
+ if fileexists: continue
+ log.warn('could not find library %r in directories %s' \
+ % (libname, library_dirs))
+ elif self.compiler.compiler_type == 'mingw32':
+ generate_manifest(self)
+ return self._wrap_method(old_config._link, lang,
+ (body, headers, include_dirs,
+ libraries, library_dirs, lang))
+
+ def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
+ self._check_compiler()
+ return self.try_compile(
+ "/* we need a dummy line to make distutils happy */",
+ [header], include_dirs)
+
+ def check_decl(self, symbol,
+ headers=None, include_dirs=None):
+ self._check_compiler()
+ body = textwrap.dedent("""
+ int main(void)
+ {
+ #ifndef %s
+ (void) %s;
+ #endif
+ ;
+ return 0;
+ }""") % (symbol, symbol)
+
+ return self.try_compile(body, headers, include_dirs)
+
+ def check_macro_true(self, symbol,
+ headers=None, include_dirs=None):
+ self._check_compiler()
+ body = textwrap.dedent("""
+ int main(void)
+ {
+ #if %s
+ #else
+ #error false or undefined macro
+ #endif
+ ;
+ return 0;
+ }""") % (symbol,)
+
+ return self.try_compile(body, headers, include_dirs)
+
+ def check_type(self, type_name, headers=None, include_dirs=None,
+ library_dirs=None):
+ """Check type availability. Return True if the type can be compiled,
+ False otherwise"""
+ self._check_compiler()
+
+ # First check the type can be compiled
+ body = textwrap.dedent(r"""
+ int main(void) {
+ if ((%(name)s *) 0)
+ return 0;
+ if (sizeof (%(name)s))
+ return 0;
+ }
+ """) % {'name': type_name}
+
+ st = False
+ try:
+ try:
+ self._compile(body % {'type': type_name},
+ headers, include_dirs, 'c')
+ st = True
+ except distutils.errors.CompileError:
+ st = False
+ finally:
+ self._clean()
+
+ return st
+
+ def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
+ """Check size of a given type."""
+ self._check_compiler()
+
+ # First check the type can be compiled
+ body = textwrap.dedent(r"""
+ typedef %(type)s npy_check_sizeof_type;
+ int main (void)
+ {
+ static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
+ test_array [0] = 0
+
+ ;
+ return 0;
+ }
+ """)
+ self._compile(body % {'type': type_name},
+ headers, include_dirs, 'c')
+ self._clean()
+
+ if expected:
+ body = textwrap.dedent(r"""
+ typedef %(type)s npy_check_sizeof_type;
+ int main (void)
+ {
+ static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
+ test_array [0] = 0
+
+ ;
+ return 0;
+ }
+ """)
+ for size in expected:
+ try:
+ self._compile(body % {'type': type_name, 'size': size},
+ headers, include_dirs, 'c')
+ self._clean()
+ return size
+ except CompileError:
+ pass
+
+ # this fails to *compile* if size > sizeof(type)
+ body = textwrap.dedent(r"""
+ typedef %(type)s npy_check_sizeof_type;
+ int main (void)
+ {
+ static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
+ test_array [0] = 0
+
+ ;
+ return 0;
+ }
+ """)
+
+ # The principle is simple: we first find low and high bounds of size
+ # for the type, where low/high are looked up on a log scale. Then, we
+ # do a binary search to find the exact size between low and high
+ low = 0
+ mid = 0
+ while True:
+ try:
+ self._compile(body % {'type': type_name, 'size': mid},
+ headers, include_dirs, 'c')
+ self._clean()
+ break
+ except CompileError:
+ #log.info("failure to test for bound %d" % mid)
+ low = mid + 1
+ mid = 2 * mid + 1
+
+ high = mid
+ # Binary search:
+ while low != high:
+ mid = (high - low) // 2 + low
+ try:
+ self._compile(body % {'type': type_name, 'size': mid},
+ headers, include_dirs, 'c')
+ self._clean()
+ high = mid
+ except CompileError:
+ low = mid + 1
+ return low
+
+ def check_func(self, func,
+ headers=None, include_dirs=None,
+ libraries=None, library_dirs=None,
+ decl=False, call=False, call_args=None):
+ # clean up distutils's config a bit: add void to main(), and
+ # return a value.
+ self._check_compiler()
+ body = []
+ if decl:
+ if type(decl) == str:
+ body.append(decl)
+ else:
+ body.append("int %s (void);" % func)
+ # Handle MSVC intrinsics: force MS compiler to make a function call.
+ # Useful to test for some functions when built with optimization on, to
+ # avoid build error because the intrinsic and our 'fake' test
+ # declaration do not match.
+ body.append("#ifdef _MSC_VER")
+ body.append("#pragma function(%s)" % func)
+ body.append("#endif")
+ body.append("int main (void) {")
+ if call:
+ if call_args is None:
+ call_args = ''
+ body.append(" %s(%s);" % (func, call_args))
+ else:
+ body.append(" %s;" % func)
+ body.append(" return 0;")
+ body.append("}")
+ body = '\n'.join(body) + "\n"
+
+ return self.try_link(body, headers, include_dirs,
+ libraries, library_dirs)
+
+ def check_funcs_once(self, funcs,
+ headers=None, include_dirs=None,
+ libraries=None, library_dirs=None,
+ decl=False, call=False, call_args=None):
+ """Check a list of functions at once.
+
+ This is useful to speed up things, since all the functions in the funcs
+ list will be put in one compilation unit.
+
+ Arguments
+ ---------
+ funcs : seq
+ list of functions to test
+ include_dirs : seq
+ list of header paths
+ libraries : seq
+ list of libraries to link the code snippet to
+ library_dirs : seq
+ list of library paths
+ decl : dict
+ for every (key, value), the declaration in the value will be
+ used for function in key. If a function is not in the
+ dictionary, no declaration will be used.
+ call : dict
+ for every item (f, value), if the value is True, a call will be
+ done to the function f.
+ """
+ self._check_compiler()
+ body = []
+ if decl:
+ for f, v in decl.items():
+ if v:
+ body.append("int %s (void);" % f)
+
+ # Handle MS intrinsics. See check_func for more info.
+ body.append("#ifdef _MSC_VER")
+ for func in funcs:
+ body.append("#pragma function(%s)" % func)
+ body.append("#endif")
+
+ body.append("int main (void) {")
+ if call:
+ for f in funcs:
+ if f in call and call[f]:
+ if not (call_args and f in call_args and call_args[f]):
+ args = ''
+ else:
+ args = call_args[f]
+ body.append(" %s(%s);" % (f, args))
+ else:
+ body.append(" %s;" % f)
+ else:
+ for f in funcs:
+ body.append(" %s;" % f)
+ body.append(" return 0;")
+ body.append("}")
+ body = '\n'.join(body) + "\n"
+
+ return self.try_link(body, headers, include_dirs,
+ libraries, library_dirs)
+
+ def check_inline(self):
+ """Return the inline keyword recognized by the compiler, empty string
+ otherwise."""
+ return check_inline(self)
+
+ def check_restrict(self):
+ """Return the restrict keyword recognized by the compiler, empty string
+ otherwise."""
+ return check_restrict(self)
+
+ def check_compiler_gcc(self):
+ """Return True if the C compiler is gcc"""
+ return check_compiler_gcc(self)
+
+ def check_gcc_function_attribute(self, attribute, name):
+ return check_gcc_function_attribute(self, attribute, name)
+
+ def check_gcc_function_attribute_with_intrinsics(self, attribute, name,
+ code, include):
+ return check_gcc_function_attribute_with_intrinsics(self, attribute,
+ name, code, include)
+
+ def check_gcc_variable_attribute(self, attribute):
+ return check_gcc_variable_attribute(self, attribute)
+
+ def check_gcc_version_at_least(self, major, minor=0, patchlevel=0):
+ """Return True if the GCC version is greater than or equal to the
+ specified version."""
+ return check_gcc_version_at_least(self, major, minor, patchlevel)
+
+ def get_output(self, body, headers=None, include_dirs=None,
+ libraries=None, library_dirs=None,
+ lang="c", use_tee=None):
+ """Try to compile, link to an executable, and run a program
+ built from 'body' and 'headers'. Returns the exit status code
+ of the program and its output.
+ """
+ # 2008-11-16, RemoveMe
+ warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n"
+ "Usage of get_output is deprecated: please do not \n"
+ "use it anymore, and avoid configuration checks \n"
+ "involving running executable on the target machine.\n"
+ "+++++++++++++++++++++++++++++++++++++++++++++++++\n",
+ DeprecationWarning, stacklevel=2)
+ self._check_compiler()
+ exitcode, output = 255, ''
+ try:
+ grabber = GrabStdout()
+ try:
+ src, obj, exe = self._link(body, headers, include_dirs,
+ libraries, library_dirs, lang)
+ grabber.restore()
+ except Exception:
+ output = grabber.data
+ grabber.restore()
+ raise
+ exe = os.path.join('.', exe)
+ try:
+ # specify cwd arg for consistency with
+ # historic usage pattern of exec_command()
+ # also, note that exe appears to be a string,
+ # which exec_command() handled, but we now
+ # use a list for check_output() -- this assumes
+ # that exe is always a single command
+ output = subprocess.check_output([exe], cwd='.')
+ except subprocess.CalledProcessError as exc:
+ exitstatus = exc.returncode
+ output = ''
+ except OSError:
+ # preserve the EnvironmentError exit status
+ # used historically in exec_command()
+ exitstatus = 127
+ output = ''
+ else:
+ output = filepath_from_subprocess_output(output)
+ if hasattr(os, 'WEXITSTATUS'):
+ exitcode = os.WEXITSTATUS(exitstatus)
+ if os.WIFSIGNALED(exitstatus):
+ sig = os.WTERMSIG(exitstatus)
+ log.error('subprocess exited with signal %d' % (sig,))
+ if sig == signal.SIGINT:
+ # control-C
+ raise KeyboardInterrupt
+ else:
+ exitcode = exitstatus
+ log.info("success!")
+ except (CompileError, LinkError):
+ log.info("failure.")
+ self._clean()
+ return exitcode, output
+
+class GrabStdout:
+
+ def __init__(self):
+ self.sys_stdout = sys.stdout
+ self.data = ''
+ sys.stdout = self
+
+ def write (self, data):
+ self.sys_stdout.write(data)
+ self.data += data
+
+ def flush (self):
+ self.sys_stdout.flush()
+
+ def restore(self):
+ sys.stdout = self.sys_stdout
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/config_compiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/config_compiler.py
new file mode 100644
index 00000000..44265bfc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/config_compiler.py
@@ -0,0 +1,126 @@
+from distutils.core import Command
+from numpy.distutils import log
+
+#XXX: Linker flags
+
+def show_fortran_compilers(_cache=None):
+ # Using cache to prevent infinite recursion.
+ if _cache:
+ return
+ elif _cache is None:
+ _cache = []
+ _cache.append(1)
+ from numpy.distutils.fcompiler import show_fcompilers
+ import distutils.core
+ dist = distutils.core._setup_distribution
+ show_fcompilers(dist)
+
+class config_fc(Command):
+ """ Distutils command to hold user specified options
+ to Fortran compilers.
+
+ config_fc command is used by the FCompiler.customize() method.
+ """
+
+ description = "specify Fortran 77/Fortran 90 compiler information"
+
+ user_options = [
+ ('fcompiler=', None, "specify Fortran compiler type"),
+ ('f77exec=', None, "specify F77 compiler command"),
+ ('f90exec=', None, "specify F90 compiler command"),
+ ('f77flags=', None, "specify F77 compiler flags"),
+ ('f90flags=', None, "specify F90 compiler flags"),
+ ('opt=', None, "specify optimization flags"),
+ ('arch=', None, "specify architecture specific optimization flags"),
+ ('debug', 'g', "compile with debugging information"),
+ ('noopt', None, "compile without optimization"),
+ ('noarch', None, "compile without arch-dependent optimization"),
+ ]
+
+ help_options = [
+ ('help-fcompiler', None, "list available Fortran compilers",
+ show_fortran_compilers),
+ ]
+
+ boolean_options = ['debug', 'noopt', 'noarch']
+
+ def initialize_options(self):
+ self.fcompiler = None
+ self.f77exec = None
+ self.f90exec = None
+ self.f77flags = None
+ self.f90flags = None
+ self.opt = None
+ self.arch = None
+ self.debug = None
+ self.noopt = None
+ self.noarch = None
+
+ def finalize_options(self):
+ log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options')
+ build_clib = self.get_finalized_command('build_clib')
+ build_ext = self.get_finalized_command('build_ext')
+ config = self.get_finalized_command('config')
+ build = self.get_finalized_command('build')
+ cmd_list = [self, config, build_clib, build_ext, build]
+ for a in ['fcompiler']:
+ l = []
+ for c in cmd_list:
+ v = getattr(c, a)
+ if v is not None:
+ if not isinstance(v, str): v = v.compiler_type
+ if v not in l: l.append(v)
+ if not l: v1 = None
+ else: v1 = l[0]
+ if len(l)>1:
+ log.warn(' commands have different --%s options: %s'\
+ ', using first in list as default' % (a, l))
+ if v1:
+ for c in cmd_list:
+ if getattr(c, a) is None: setattr(c, a, v1)
+
+ def run(self):
+ # Do nothing.
+ return
+
+class config_cc(Command):
+ """ Distutils command to hold user specified options
+ to C/C++ compilers.
+ """
+
+ description = "specify C/C++ compiler information"
+
+ user_options = [
+ ('compiler=', None, "specify C/C++ compiler type"),
+ ]
+
+ def initialize_options(self):
+ self.compiler = None
+
+ def finalize_options(self):
+ log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options')
+ build_clib = self.get_finalized_command('build_clib')
+ build_ext = self.get_finalized_command('build_ext')
+ config = self.get_finalized_command('config')
+ build = self.get_finalized_command('build')
+ cmd_list = [self, config, build_clib, build_ext, build]
+ for a in ['compiler']:
+ l = []
+ for c in cmd_list:
+ v = getattr(c, a)
+ if v is not None:
+ if not isinstance(v, str): v = v.compiler_type
+ if v not in l: l.append(v)
+ if not l: v1 = None
+ else: v1 = l[0]
+ if len(l)>1:
+ log.warn(' commands have different --%s options: %s'\
+ ', using first in list as default' % (a, l))
+ if v1:
+ for c in cmd_list:
+ if getattr(c, a) is None: setattr(c, a, v1)
+ return
+
+ def run(self):
+ # Do nothing.
+ return
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/develop.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/develop.py
new file mode 100644
index 00000000..af24baf2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/develop.py
@@ -0,0 +1,15 @@
+""" Override the develop command from setuptools so we can ensure that our
+generated files (from build_src or build_scripts) are properly converted to real
+files with filenames.
+
+"""
+from setuptools.command.develop import develop as old_develop
+
+class develop(old_develop):
+ __doc__ = old_develop.__doc__
+ def install_for_development(self):
+ # Build sources in-place, too.
+ self.reinitialize_command('build_src', inplace=1)
+ # Make sure scripts are built.
+ self.run_command('build_scripts')
+ old_develop.install_for_development(self)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/egg_info.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/egg_info.py
new file mode 100644
index 00000000..14c62b4d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/egg_info.py
@@ -0,0 +1,25 @@
+import sys
+
+from setuptools.command.egg_info import egg_info as _egg_info
+
+class egg_info(_egg_info):
+ def run(self):
+ if 'sdist' in sys.argv:
+ import warnings
+ import textwrap
+ msg = textwrap.dedent("""
+ `build_src` is being run, this may lead to missing
+ files in your sdist! You want to use distutils.sdist
+ instead of the setuptools version:
+
+ from distutils.command.sdist import sdist
+ cmdclass={'sdist': sdist}"
+
+ See numpy's setup.py or gh-7131 for details.""")
+ warnings.warn(msg, UserWarning, stacklevel=2)
+
+ # We need to ensure that build_src has been executed in order to give
+ # setuptools' egg_info command real filenames instead of functions which
+ # generate files.
+ self.run_command("build_src")
+ _egg_info.run(self)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/install.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/install.py
new file mode 100644
index 00000000..2eff2d14
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/install.py
@@ -0,0 +1,79 @@
+import sys
+if 'setuptools' in sys.modules:
+ import setuptools.command.install as old_install_mod
+ have_setuptools = True
+else:
+ import distutils.command.install as old_install_mod
+ have_setuptools = False
+from distutils.file_util import write_file
+
+old_install = old_install_mod.install
+
+class install(old_install):
+
+ # Always run install_clib - the command is cheap, so no need to bypass it;
+ # but it's not run by setuptools -- so it's run again in install_data
+ sub_commands = old_install.sub_commands + [
+ ('install_clib', lambda x: True)
+ ]
+
+ def finalize_options (self):
+ old_install.finalize_options(self)
+ self.install_lib = self.install_libbase
+
+ def setuptools_run(self):
+ """ The setuptools version of the .run() method.
+
+ We must pull in the entire code so we can override the level used in the
+ _getframe() call since we wrap this call by one more level.
+ """
+ from distutils.command.install import install as distutils_install
+
+ # Explicit request for old-style install? Just do it
+ if self.old_and_unmanageable or self.single_version_externally_managed:
+ return distutils_install.run(self)
+
+ # Attempt to detect whether we were called from setup() or by another
+ # command. If we were called by setup(), our caller will be the
+ # 'run_command' method in 'distutils.dist', and *its* caller will be
+ # the 'run_commands' method. If we were called any other way, our
+ # immediate caller *might* be 'run_command', but it won't have been
+ # called by 'run_commands'. This is slightly kludgy, but seems to
+ # work.
+ #
+ caller = sys._getframe(3)
+ caller_module = caller.f_globals.get('__name__', '')
+ caller_name = caller.f_code.co_name
+
+ if caller_module != 'distutils.dist' or caller_name!='run_commands':
+ # We weren't called from the command line or setup(), so we
+ # should run in backward-compatibility mode to support bdist_*
+ # commands.
+ distutils_install.run(self)
+ else:
+ self.do_egg_install()
+
+ def run(self):
+ if not have_setuptools:
+ r = old_install.run(self)
+ else:
+ r = self.setuptools_run()
+ if self.record:
+ # bdist_rpm fails when INSTALLED_FILES contains
+ # paths with spaces. Such paths must be enclosed
+ # with double-quotes.
+ with open(self.record, 'r') as f:
+ lines = []
+ need_rewrite = False
+ for l in f:
+ l = l.rstrip()
+ if ' ' in l:
+ need_rewrite = True
+ l = '"%s"' % (l)
+ lines.append(l)
+ if need_rewrite:
+ self.execute(write_file,
+ (self.record, lines),
+ "re-writing list of installed files to '%s'" %
+ self.record)
+ return r
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/install_clib.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/install_clib.py
new file mode 100644
index 00000000..aa2e5594
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/install_clib.py
@@ -0,0 +1,40 @@
+import os
+from distutils.core import Command
+from distutils.ccompiler import new_compiler
+from numpy.distutils.misc_util import get_cmd
+
+class install_clib(Command):
+ description = "Command to install installable C libraries"
+
+ user_options = []
+
+ def initialize_options(self):
+ self.install_dir = None
+ self.outfiles = []
+
+ def finalize_options(self):
+ self.set_undefined_options('install', ('install_lib', 'install_dir'))
+
+ def run (self):
+ build_clib_cmd = get_cmd("build_clib")
+ if not build_clib_cmd.build_clib:
+ # can happen if the user specified `--skip-build`
+ build_clib_cmd.finalize_options()
+ build_dir = build_clib_cmd.build_clib
+
+ # We need the compiler to get the library name -> filename association
+ if not build_clib_cmd.compiler:
+ compiler = new_compiler(compiler=None)
+ compiler.customize(self.distribution)
+ else:
+ compiler = build_clib_cmd.compiler
+
+ for l in self.distribution.installed_libraries:
+ target_dir = os.path.join(self.install_dir, l.target_dir)
+ name = compiler.library_filename(l.name)
+ source = os.path.join(build_dir, name)
+ self.mkpath(target_dir)
+ self.outfiles.append(self.copy_file(source, target_dir)[0])
+
+ def get_outputs(self):
+ return self.outfiles
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/install_data.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/install_data.py
new file mode 100644
index 00000000..0a2e68ae
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/install_data.py
@@ -0,0 +1,24 @@
+import sys
+have_setuptools = ('setuptools' in sys.modules)
+
+from distutils.command.install_data import install_data as old_install_data
+
+#data installer with improved intelligence over distutils
+#data files are copied into the project directory instead
+#of willy-nilly
+class install_data (old_install_data):
+
+ def run(self):
+ old_install_data.run(self)
+
+ if have_setuptools:
+ # Run install_clib again, since setuptools does not run sub-commands
+ # of install automatically
+ self.run_command('install_clib')
+
+ def finalize_options (self):
+ self.set_undefined_options('install',
+ ('install_lib', 'install_dir'),
+ ('root', 'root'),
+ ('force', 'force'),
+ )
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py
new file mode 100644
index 00000000..bb4ad563
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/install_headers.py
@@ -0,0 +1,25 @@
+import os
+from distutils.command.install_headers import install_headers as old_install_headers
+
+class install_headers (old_install_headers):
+
+ def run (self):
+ headers = self.distribution.headers
+ if not headers:
+ return
+
+ prefix = os.path.dirname(self.install_dir)
+ for header in headers:
+ if isinstance(header, tuple):
+ # Kind of a hack, but I don't know where else to change this...
+ if header[0] == 'numpy.core':
+ header = ('numpy', header[1])
+ if os.path.splitext(header[1])[1] == '.inc':
+ continue
+ d = os.path.join(*([prefix]+header[0].split('.')))
+ header = header[1]
+ else:
+ d = self.install_dir
+ self.mkpath(d)
+ (out, _) = self.copy_file(header, d)
+ self.outfiles.append(out)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/command/sdist.py b/venv/lib/python3.9/site-packages/numpy/distutils/command/sdist.py
new file mode 100644
index 00000000..e3419388
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/command/sdist.py
@@ -0,0 +1,27 @@
+import sys
+if 'setuptools' in sys.modules:
+ from setuptools.command.sdist import sdist as old_sdist
+else:
+ from distutils.command.sdist import sdist as old_sdist
+
+from numpy.distutils.misc_util import get_data_files
+
+class sdist(old_sdist):
+
+ def add_defaults (self):
+ old_sdist.add_defaults(self)
+
+ dist = self.distribution
+
+ if dist.has_data_files():
+ for data in dist.data_files:
+ self.filelist.extend(get_data_files(data))
+
+ if dist.has_headers():
+ headers = []
+ for h in dist.headers:
+ if isinstance(h, str): headers.append(h)
+ else: headers.append(h[1])
+ self.filelist.extend(headers)
+
+ return
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/conv_template.py b/venv/lib/python3.9/site-packages/numpy/distutils/conv_template.py
new file mode 100644
index 00000000..c8933d1d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/conv_template.py
@@ -0,0 +1,329 @@
+#!/usr/bin/env python3
+"""
+takes templated file .xxx.src and produces .xxx file where .xxx is
+.i or .c or .h, using the following template rules
+
+/**begin repeat -- on a line by itself marks the start of a repeated code
+ segment
+/**end repeat**/ -- on a line by itself marks it's end
+
+After the /**begin repeat and before the */, all the named templates are placed
+these should all have the same number of replacements
+
+Repeat blocks can be nested, with each nested block labeled with its depth,
+i.e.
+/**begin repeat1
+ *....
+ */
+/**end repeat1**/
+
+When using nested loops, you can optionally exclude particular
+combinations of the variables using (inside the comment portion of the inner loop):
+
+ :exclude: var1=value1, var2=value2, ...
+
+This will exclude the pattern where var1 is value1 and var2 is value2 when
+the result is being generated.
+
+
+In the main body each replace will use one entry from the list of named replacements
+
+ Note that all #..# forms in a block must have the same number of
+ comma-separated entries.
+
+Example:
+
+ An input file containing
+
+ /**begin repeat
+ * #a = 1,2,3#
+ * #b = 1,2,3#
+ */
+
+ /**begin repeat1
+ * #c = ted, jim#
+ */
+ @a@, @b@, @c@
+ /**end repeat1**/
+
+ /**end repeat**/
+
+ produces
+
+ line 1 "template.c.src"
+
+ /*
+ *********************************************************************
+ ** This file was autogenerated from a template DO NOT EDIT!!**
+ ** Changes should be made to the original source (.src) file **
+ *********************************************************************
+ */
+
+ #line 9
+ 1, 1, ted
+
+ #line 9
+ 1, 1, jim
+
+ #line 9
+ 2, 2, ted
+
+ #line 9
+ 2, 2, jim
+
+ #line 9
+ 3, 3, ted
+
+ #line 9
+ 3, 3, jim
+
+"""
+
+__all__ = ['process_str', 'process_file']
+
+import os
+import sys
+import re
+
+# names for replacement that are already global.
+global_names = {}
+
+# header placed at the front of head processed file
+header =\
+"""
+/*
+ *****************************************************************************
+ ** This file was autogenerated from a template DO NOT EDIT!!!! **
+ ** Changes should be made to the original source (.src) file **
+ *****************************************************************************
+ */
+
+"""
+# Parse string for repeat loops
+def parse_structure(astr, level):
+ """
+ The returned line number is from the beginning of the string, starting
+ at zero. Returns an empty list if no loops found.
+
+ """
+ if level == 0 :
+ loopbeg = "/**begin repeat"
+ loopend = "/**end repeat**/"
+ else :
+ loopbeg = "/**begin repeat%d" % level
+ loopend = "/**end repeat%d**/" % level
+
+ ind = 0
+ line = 0
+ spanlist = []
+ while True:
+ start = astr.find(loopbeg, ind)
+ if start == -1:
+ break
+ start2 = astr.find("*/", start)
+ start2 = astr.find("\n", start2)
+ fini1 = astr.find(loopend, start2)
+ fini2 = astr.find("\n", fini1)
+ line += astr.count("\n", ind, start2+1)
+ spanlist.append((start, start2+1, fini1, fini2+1, line))
+ line += astr.count("\n", start2+1, fini2)
+ ind = fini2
+ spanlist.sort()
+ return spanlist
+
+
+def paren_repl(obj):
+ torep = obj.group(1)
+ numrep = obj.group(2)
+ return ','.join([torep]*int(numrep))
+
+parenrep = re.compile(r"\(([^)]*)\)\*(\d+)")
+plainrep = re.compile(r"([^*]+)\*(\d+)")
+def parse_values(astr):
+ # replaces all occurrences of '(a,b,c)*4' in astr
+ # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate
+ # empty values, i.e., ()*4 yields ',,,'. The result is
+ # split at ',' and a list of values returned.
+ astr = parenrep.sub(paren_repl, astr)
+ # replaces occurrences of xxx*3 with xxx, xxx, xxx
+ astr = ','.join([plainrep.sub(paren_repl, x.strip())
+ for x in astr.split(',')])
+ return astr.split(',')
+
+
+stripast = re.compile(r"\n\s*\*?")
+named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#")
+exclude_vars_re = re.compile(r"(\w*)=(\w*)")
+exclude_re = re.compile(":exclude:")
+def parse_loop_header(loophead) :
+ """Find all named replacements in the header
+
+ Returns a list of dictionaries, one for each loop iteration,
+ where each key is a name to be substituted and the corresponding
+ value is the replacement string.
+
+ Also return a list of exclusions. The exclusions are dictionaries
+ of key value pairs. There can be more than one exclusion.
+ [{'var1':'value1', 'var2', 'value2'[,...]}, ...]
+
+ """
+ # Strip out '\n' and leading '*', if any, in continuation lines.
+ # This should not effect code previous to this change as
+ # continuation lines were not allowed.
+ loophead = stripast.sub("", loophead)
+ # parse out the names and lists of values
+ names = []
+ reps = named_re.findall(loophead)
+ nsub = None
+ for rep in reps:
+ name = rep[0]
+ vals = parse_values(rep[1])
+ size = len(vals)
+ if nsub is None :
+ nsub = size
+ elif nsub != size :
+ msg = "Mismatch in number of values, %d != %d\n%s = %s"
+ raise ValueError(msg % (nsub, size, name, vals))
+ names.append((name, vals))
+
+
+ # Find any exclude variables
+ excludes = []
+
+ for obj in exclude_re.finditer(loophead):
+ span = obj.span()
+ # find next newline
+ endline = loophead.find('\n', span[1])
+ substr = loophead[span[1]:endline]
+ ex_names = exclude_vars_re.findall(substr)
+ excludes.append(dict(ex_names))
+
+ # generate list of dictionaries, one for each template iteration
+ dlist = []
+ if nsub is None :
+ raise ValueError("No substitution variables found")
+ for i in range(nsub):
+ tmp = {name: vals[i] for name, vals in names}
+ dlist.append(tmp)
+ return dlist
+
+replace_re = re.compile(r"@(\w+)@")
+def parse_string(astr, env, level, line) :
+ lineno = "#line %d\n" % line
+
+ # local function for string replacement, uses env
+ def replace(match):
+ name = match.group(1)
+ try :
+ val = env[name]
+ except KeyError:
+ msg = 'line %d: no definition of key "%s"'%(line, name)
+ raise ValueError(msg) from None
+ return val
+
+ code = [lineno]
+ struct = parse_structure(astr, level)
+ if struct :
+ # recurse over inner loops
+ oldend = 0
+ newlevel = level + 1
+ for sub in struct:
+ pref = astr[oldend:sub[0]]
+ head = astr[sub[0]:sub[1]]
+ text = astr[sub[1]:sub[2]]
+ oldend = sub[3]
+ newline = line + sub[4]
+ code.append(replace_re.sub(replace, pref))
+ try :
+ envlist = parse_loop_header(head)
+ except ValueError as e:
+ msg = "line %d: %s" % (newline, e)
+ raise ValueError(msg)
+ for newenv in envlist :
+ newenv.update(env)
+ newcode = parse_string(text, newenv, newlevel, newline)
+ code.extend(newcode)
+ suff = astr[oldend:]
+ code.append(replace_re.sub(replace, suff))
+ else :
+ # replace keys
+ code.append(replace_re.sub(replace, astr))
+ code.append('\n')
+ return ''.join(code)
+
+def process_str(astr):
+ code = [header]
+ code.extend(parse_string(astr, global_names, 0, 1))
+ return ''.join(code)
+
+
+include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
+ r"(?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
+
+def resolve_includes(source):
+ d = os.path.dirname(source)
+ with open(source) as fid:
+ lines = []
+ for line in fid:
+ m = include_src_re.match(line)
+ if m:
+ fn = m.group('name')
+ if not os.path.isabs(fn):
+ fn = os.path.join(d, fn)
+ if os.path.isfile(fn):
+ lines.extend(resolve_includes(fn))
+ else:
+ lines.append(line)
+ else:
+ lines.append(line)
+ return lines
+
+def process_file(source):
+ lines = resolve_includes(source)
+ sourcefile = os.path.normcase(source).replace("\\", "\\\\")
+ try:
+ code = process_str(''.join(lines))
+ except ValueError as e:
+ raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None
+ return '#line 1 "%s"\n%s' % (sourcefile, code)
+
+
+def unique_key(adict):
+ # this obtains a unique key given a dictionary
+ # currently it works by appending together n of the letters of the
+ # current keys and increasing n until a unique key is found
+ # -- not particularly quick
+ allkeys = list(adict.keys())
+ done = False
+ n = 1
+ while not done:
+ newkey = "".join([x[:n] for x in allkeys])
+ if newkey in allkeys:
+ n += 1
+ else:
+ done = True
+ return newkey
+
+
+def main():
+ try:
+ file = sys.argv[1]
+ except IndexError:
+ fid = sys.stdin
+ outfile = sys.stdout
+ else:
+ fid = open(file, 'r')
+ (base, ext) = os.path.splitext(file)
+ newname = base
+ outfile = open(newname, 'w')
+
+ allstr = fid.read()
+ try:
+ writestr = process_str(allstr)
+ except ValueError as e:
+ raise ValueError("In %s loop at %s" % (file, e)) from None
+
+ outfile.write(writestr)
+
+if __name__ == "__main__":
+ main()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/core.py b/venv/lib/python3.9/site-packages/numpy/distutils/core.py
new file mode 100644
index 00000000..c4a14e59
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/core.py
@@ -0,0 +1,215 @@
+import sys
+from distutils.core import Distribution
+
+if 'setuptools' in sys.modules:
+ have_setuptools = True
+ from setuptools import setup as old_setup
+ # easy_install imports math, it may be picked up from cwd
+ from setuptools.command import easy_install
+ try:
+ # very old versions of setuptools don't have this
+ from setuptools.command import bdist_egg
+ except ImportError:
+ have_setuptools = False
+else:
+ from distutils.core import setup as old_setup
+ have_setuptools = False
+
+import warnings
+import distutils.core
+import distutils.dist
+
+from numpy.distutils.extension import Extension # noqa: F401
+from numpy.distutils.numpy_distribution import NumpyDistribution
+from numpy.distutils.command import config, config_compiler, \
+ build, build_py, build_ext, build_clib, build_src, build_scripts, \
+ sdist, install_data, install_headers, install, bdist_rpm, \
+ install_clib
+from numpy.distutils.misc_util import is_sequence, is_string
+
+numpy_cmdclass = {'build': build.build,
+ 'build_src': build_src.build_src,
+ 'build_scripts': build_scripts.build_scripts,
+ 'config_cc': config_compiler.config_cc,
+ 'config_fc': config_compiler.config_fc,
+ 'config': config.config,
+ 'build_ext': build_ext.build_ext,
+ 'build_py': build_py.build_py,
+ 'build_clib': build_clib.build_clib,
+ 'sdist': sdist.sdist,
+ 'install_data': install_data.install_data,
+ 'install_headers': install_headers.install_headers,
+ 'install_clib': install_clib.install_clib,
+ 'install': install.install,
+ 'bdist_rpm': bdist_rpm.bdist_rpm,
+ }
+if have_setuptools:
+ # Use our own versions of develop and egg_info to ensure that build_src is
+ # handled appropriately.
+ from numpy.distutils.command import develop, egg_info
+ numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg
+ numpy_cmdclass['develop'] = develop.develop
+ numpy_cmdclass['easy_install'] = easy_install.easy_install
+ numpy_cmdclass['egg_info'] = egg_info.egg_info
+
+def _dict_append(d, **kws):
+ for k, v in kws.items():
+ if k not in d:
+ d[k] = v
+ continue
+ dv = d[k]
+ if isinstance(dv, tuple):
+ d[k] = dv + tuple(v)
+ elif isinstance(dv, list):
+ d[k] = dv + list(v)
+ elif isinstance(dv, dict):
+ _dict_append(dv, **v)
+ elif is_string(dv):
+ d[k] = dv + v
+ else:
+ raise TypeError(repr(type(dv)))
+
+def _command_line_ok(_cache=None):
+ """ Return True if command line does not contain any
+ help or display requests.
+ """
+ if _cache:
+ return _cache[0]
+ elif _cache is None:
+ _cache = []
+ ok = True
+ display_opts = ['--'+n for n in Distribution.display_option_names]
+ for o in Distribution.display_options:
+ if o[1]:
+ display_opts.append('-'+o[1])
+ for arg in sys.argv:
+ if arg.startswith('--help') or arg=='-h' or arg in display_opts:
+ ok = False
+ break
+ _cache.append(ok)
+ return ok
+
+def get_distribution(always=False):
+ dist = distutils.core._setup_distribution
+ # XXX Hack to get numpy installable with easy_install.
+ # The problem is easy_install runs it's own setup(), which
+ # sets up distutils.core._setup_distribution. However,
+ # when our setup() runs, that gets overwritten and lost.
+ # We can't use isinstance, as the DistributionWithoutHelpCommands
+ # class is local to a function in setuptools.command.easy_install
+ if dist is not None and \
+ 'DistributionWithoutHelpCommands' in repr(dist):
+ dist = None
+ if always and dist is None:
+ dist = NumpyDistribution()
+ return dist
+
+def setup(**attr):
+
+ cmdclass = numpy_cmdclass.copy()
+
+ new_attr = attr.copy()
+ if 'cmdclass' in new_attr:
+ cmdclass.update(new_attr['cmdclass'])
+ new_attr['cmdclass'] = cmdclass
+
+ if 'configuration' in new_attr:
+ # To avoid calling configuration if there are any errors
+ # or help request in command in the line.
+ configuration = new_attr.pop('configuration')
+
+ old_dist = distutils.core._setup_distribution
+ old_stop = distutils.core._setup_stop_after
+ distutils.core._setup_distribution = None
+ distutils.core._setup_stop_after = "commandline"
+ try:
+ dist = setup(**new_attr)
+ finally:
+ distutils.core._setup_distribution = old_dist
+ distutils.core._setup_stop_after = old_stop
+ if dist.help or not _command_line_ok():
+ # probably displayed help, skip running any commands
+ return dist
+
+ # create setup dictionary and append to new_attr
+ config = configuration()
+ if hasattr(config, 'todict'):
+ config = config.todict()
+ _dict_append(new_attr, **config)
+
+ # Move extension source libraries to libraries
+ libraries = []
+ for ext in new_attr.get('ext_modules', []):
+ new_libraries = []
+ for item in ext.libraries:
+ if is_sequence(item):
+ lib_name, build_info = item
+ _check_append_ext_library(libraries, lib_name, build_info)
+ new_libraries.append(lib_name)
+ elif is_string(item):
+ new_libraries.append(item)
+ else:
+ raise TypeError("invalid description of extension module "
+ "library %r" % (item,))
+ ext.libraries = new_libraries
+ if libraries:
+ if 'libraries' not in new_attr:
+ new_attr['libraries'] = []
+ for item in libraries:
+ _check_append_library(new_attr['libraries'], item)
+
+ # sources in ext_modules or libraries may contain header files
+ if ('ext_modules' in new_attr or 'libraries' in new_attr) \
+ and 'headers' not in new_attr:
+ new_attr['headers'] = []
+
+ # Use our custom NumpyDistribution class instead of distutils' one
+ new_attr['distclass'] = NumpyDistribution
+
+ return old_setup(**new_attr)
+
+def _check_append_library(libraries, item):
+ for libitem in libraries:
+ if is_sequence(libitem):
+ if is_sequence(item):
+ if item[0]==libitem[0]:
+ if item[1] is libitem[1]:
+ return
+ warnings.warn("[0] libraries list contains %r with"
+ " different build_info" % (item[0],),
+ stacklevel=2)
+ break
+ else:
+ if item==libitem[0]:
+ warnings.warn("[1] libraries list contains %r with"
+ " no build_info" % (item[0],),
+ stacklevel=2)
+ break
+ else:
+ if is_sequence(item):
+ if item[0]==libitem:
+ warnings.warn("[2] libraries list contains %r with"
+ " no build_info" % (item[0],),
+ stacklevel=2)
+ break
+ else:
+ if item==libitem:
+ return
+ libraries.append(item)
+
+def _check_append_ext_library(libraries, lib_name, build_info):
+ for item in libraries:
+ if is_sequence(item):
+ if item[0]==lib_name:
+ if item[1] is build_info:
+ return
+ warnings.warn("[3] libraries list contains %r with"
+ " different build_info" % (lib_name,),
+ stacklevel=2)
+ break
+ elif item==lib_name:
+ warnings.warn("[4] libraries list contains %r with"
+ " no build_info" % (lib_name,),
+ stacklevel=2)
+ break
+ libraries.append((lib_name, build_info))
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/cpuinfo.py b/venv/lib/python3.9/site-packages/numpy/distutils/cpuinfo.py
new file mode 100644
index 00000000..77620210
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/cpuinfo.py
@@ -0,0 +1,683 @@
+#!/usr/bin/env python3
+"""
+cpuinfo
+
+Copyright 2002 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@cens.ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+Pearu Peterson
+
+"""
+__all__ = ['cpu']
+
+import os
+import platform
+import re
+import sys
+import types
+import warnings
+
+from subprocess import getstatusoutput
+
+
+def getoutput(cmd, successful_status=(0,), stacklevel=1):
+ try:
+ status, output = getstatusoutput(cmd)
+ except OSError as e:
+ warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
+ return False, ""
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
+ return True, output
+ return False, output
+
+def command_info(successful_status=(0,), stacklevel=1, **kw):
+ info = {}
+ for key in kw:
+ ok, output = getoutput(kw[key], successful_status=successful_status,
+ stacklevel=stacklevel+1)
+ if ok:
+ info[key] = output.strip()
+ return info
+
+def command_by_line(cmd, successful_status=(0,), stacklevel=1):
+ ok, output = getoutput(cmd, successful_status=successful_status,
+ stacklevel=stacklevel+1)
+ if not ok:
+ return
+ for line in output.splitlines():
+ yield line.strip()
+
+def key_value_from_command(cmd, sep, successful_status=(0,),
+ stacklevel=1):
+ d = {}
+ for line in command_by_line(cmd, successful_status=successful_status,
+ stacklevel=stacklevel+1):
+ l = [s.strip() for s in line.split(sep, 1)]
+ if len(l) == 2:
+ d[l[0]] = l[1]
+ return d
+
+class CPUInfoBase:
+ """Holds CPU information and provides methods for requiring
+ the availability of various CPU features.
+ """
+
+ def _try_call(self, func):
+ try:
+ return func()
+ except Exception:
+ pass
+
+ def __getattr__(self, name):
+ if not name.startswith('_'):
+ if hasattr(self, '_'+name):
+ attr = getattr(self, '_'+name)
+ if isinstance(attr, types.MethodType):
+ return lambda func=self._try_call,attr=attr : func(attr)
+ else:
+ return lambda : None
+ raise AttributeError(name)
+
+ def _getNCPUs(self):
+ return 1
+
+ def __get_nbits(self):
+ abits = platform.architecture()[0]
+ nbits = re.compile(r'(\d+)bit').search(abits).group(1)
+ return nbits
+
+ def _is_32bit(self):
+ return self.__get_nbits() == '32'
+
+ def _is_64bit(self):
+ return self.__get_nbits() == '64'
+
+class LinuxCPUInfo(CPUInfoBase):
+
+ info = None
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = [ {} ]
+ ok, output = getoutput('uname -m')
+ if ok:
+ info[0]['uname_m'] = output.strip()
+ try:
+ fo = open('/proc/cpuinfo')
+ except OSError as e:
+ warnings.warn(str(e), UserWarning, stacklevel=2)
+ else:
+ for line in fo:
+ name_value = [s.strip() for s in line.split(':', 1)]
+ if len(name_value) != 2:
+ continue
+ name, value = name_value
+ if not info or name in info[-1]: # next processor
+ info.append({})
+ info[-1][name] = value
+ fo.close()
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ # Athlon
+
+ def _is_AMD(self):
+ return self.info[0]['vendor_id']=='AuthenticAMD'
+
+ def _is_AthlonK6_2(self):
+ return self._is_AMD() and self.info[0]['model'] == '2'
+
+ def _is_AthlonK6_3(self):
+ return self._is_AMD() and self.info[0]['model'] == '3'
+
+ def _is_AthlonK6(self):
+ return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None
+
+ def _is_AthlonK7(self):
+ return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None
+
+ def _is_AthlonMP(self):
+ return re.match(r'.*?Athlon\(tm\) MP\b',
+ self.info[0]['model name']) is not None
+
+ def _is_AMD64(self):
+ return self.is_AMD() and self.info[0]['family'] == '15'
+
+ def _is_Athlon64(self):
+ return re.match(r'.*?Athlon\(tm\) 64\b',
+ self.info[0]['model name']) is not None
+
+ def _is_AthlonHX(self):
+ return re.match(r'.*?Athlon HX\b',
+ self.info[0]['model name']) is not None
+
+ def _is_Opteron(self):
+ return re.match(r'.*?Opteron\b',
+ self.info[0]['model name']) is not None
+
+ def _is_Hammer(self):
+ return re.match(r'.*?Hammer\b',
+ self.info[0]['model name']) is not None
+
+ # Alpha
+
+ def _is_Alpha(self):
+ return self.info[0]['cpu']=='Alpha'
+
+ def _is_EV4(self):
+ return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4'
+
+ def _is_EV5(self):
+ return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5'
+
+ def _is_EV56(self):
+ return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56'
+
+ def _is_PCA56(self):
+ return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56'
+
+ # Intel
+
+ #XXX
+ _is_i386 = _not_impl
+
+ def _is_Intel(self):
+ return self.info[0]['vendor_id']=='GenuineIntel'
+
+ def _is_i486(self):
+ return self.info[0]['cpu']=='i486'
+
+ def _is_i586(self):
+ return self.is_Intel() and self.info[0]['cpu family'] == '5'
+
+ def _is_i686(self):
+ return self.is_Intel() and self.info[0]['cpu family'] == '6'
+
+ def _is_Celeron(self):
+ return re.match(r'.*?Celeron',
+ self.info[0]['model name']) is not None
+
+ def _is_Pentium(self):
+ return re.match(r'.*?Pentium',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumII(self):
+ return re.match(r'.*?Pentium.*?II\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumPro(self):
+ return re.match(r'.*?PentiumPro\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumMMX(self):
+ return re.match(r'.*?Pentium.*?MMX\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumIII(self):
+ return re.match(r'.*?Pentium.*?III\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumIV(self):
+ return re.match(r'.*?Pentium.*?(IV|4)\b',
+ self.info[0]['model name']) is not None
+
+ def _is_PentiumM(self):
+ return re.match(r'.*?Pentium.*?M\b',
+ self.info[0]['model name']) is not None
+
+ def _is_Prescott(self):
+ return self.is_PentiumIV() and self.has_sse3()
+
+ def _is_Nocona(self):
+ return (self.is_Intel()
+ and (self.info[0]['cpu family'] == '6'
+ or self.info[0]['cpu family'] == '15')
+ and (self.has_sse3() and not self.has_ssse3())
+ and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None)
+
+ def _is_Core2(self):
+ return (self.is_64bit() and self.is_Intel() and
+ re.match(r'.*?Core\(TM\)2\b',
+ self.info[0]['model name']) is not None)
+
+ def _is_Itanium(self):
+ return re.match(r'.*?Itanium\b',
+ self.info[0]['family']) is not None
+
+ def _is_XEON(self):
+ return re.match(r'.*?XEON\b',
+ self.info[0]['model name'], re.IGNORECASE) is not None
+
+ _is_Xeon = _is_XEON
+
+ # Varia
+
+ def _is_singleCPU(self):
+ return len(self.info) == 1
+
+ def _getNCPUs(self):
+ return len(self.info)
+
+ def _has_fdiv_bug(self):
+ return self.info[0]['fdiv_bug']=='yes'
+
+ def _has_f00f_bug(self):
+ return self.info[0]['f00f_bug']=='yes'
+
+ def _has_mmx(self):
+ return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None
+
+ def _has_sse(self):
+ return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None
+
+ def _has_sse2(self):
+ return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None
+
+ def _has_sse3(self):
+ return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None
+
+ def _has_ssse3(self):
+ return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None
+
+ def _has_3dnow(self):
+ return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None
+
+ def _has_3dnowext(self):
+ return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None
+
+class IRIXCPUInfo(CPUInfoBase):
+ info = None
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = key_value_from_command('sysconf', sep=' ',
+ successful_status=(0, 1))
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ def _is_singleCPU(self):
+ return self.info.get('NUM_PROCESSORS') == '1'
+
+ def _getNCPUs(self):
+ return int(self.info.get('NUM_PROCESSORS', 1))
+
+ def __cputype(self, n):
+ return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n)
+ def _is_r2000(self): return self.__cputype(2000)
+ def _is_r3000(self): return self.__cputype(3000)
+ def _is_r3900(self): return self.__cputype(3900)
+ def _is_r4000(self): return self.__cputype(4000)
+ def _is_r4100(self): return self.__cputype(4100)
+ def _is_r4300(self): return self.__cputype(4300)
+ def _is_r4400(self): return self.__cputype(4400)
+ def _is_r4600(self): return self.__cputype(4600)
+ def _is_r4650(self): return self.__cputype(4650)
+ def _is_r5000(self): return self.__cputype(5000)
+ def _is_r6000(self): return self.__cputype(6000)
+ def _is_r8000(self): return self.__cputype(8000)
+ def _is_r10000(self): return self.__cputype(10000)
+ def _is_r12000(self): return self.__cputype(12000)
+ def _is_rorion(self): return self.__cputype('orion')
+
+ def get_ip(self):
+ try: return self.info.get('MACHINE')
+ except Exception: pass
+ def __machine(self, n):
+ return self.info.get('MACHINE').lower() == 'ip%s' % (n)
+ def _is_IP19(self): return self.__machine(19)
+ def _is_IP20(self): return self.__machine(20)
+ def _is_IP21(self): return self.__machine(21)
+ def _is_IP22(self): return self.__machine(22)
+ def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000()
+ def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000()
+ def _is_IP24(self): return self.__machine(24)
+ def _is_IP25(self): return self.__machine(25)
+ def _is_IP26(self): return self.__machine(26)
+ def _is_IP27(self): return self.__machine(27)
+ def _is_IP28(self): return self.__machine(28)
+ def _is_IP30(self): return self.__machine(30)
+ def _is_IP32(self): return self.__machine(32)
+ def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000()
+ def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000()
+
+
+class DarwinCPUInfo(CPUInfoBase):
+ info = None
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = command_info(arch='arch',
+ machine='machine')
+ info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=')
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ def _getNCPUs(self):
+ return int(self.info['sysctl_hw'].get('hw.ncpu', 1))
+
+ def _is_Power_Macintosh(self):
+ return self.info['sysctl_hw']['hw.machine']=='Power Macintosh'
+
+ def _is_i386(self):
+ return self.info['arch']=='i386'
+ def _is_ppc(self):
+ return self.info['arch']=='ppc'
+
+ def __machine(self, n):
+ return self.info['machine'] == 'ppc%s'%n
+ def _is_ppc601(self): return self.__machine(601)
+ def _is_ppc602(self): return self.__machine(602)
+ def _is_ppc603(self): return self.__machine(603)
+ def _is_ppc603e(self): return self.__machine('603e')
+ def _is_ppc604(self): return self.__machine(604)
+ def _is_ppc604e(self): return self.__machine('604e')
+ def _is_ppc620(self): return self.__machine(620)
+ def _is_ppc630(self): return self.__machine(630)
+ def _is_ppc740(self): return self.__machine(740)
+ def _is_ppc7400(self): return self.__machine(7400)
+ def _is_ppc7450(self): return self.__machine(7450)
+ def _is_ppc750(self): return self.__machine(750)
+ def _is_ppc403(self): return self.__machine(403)
+ def _is_ppc505(self): return self.__machine(505)
+ def _is_ppc801(self): return self.__machine(801)
+ def _is_ppc821(self): return self.__machine(821)
+ def _is_ppc823(self): return self.__machine(823)
+ def _is_ppc860(self): return self.__machine(860)
+
+
+class SunOSCPUInfo(CPUInfoBase):
+
+ info = None
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = command_info(arch='arch',
+ mach='mach',
+ uname_i='uname_i',
+ isainfo_b='isainfo -b',
+ isainfo_n='isainfo -n',
+ )
+ info['uname_X'] = key_value_from_command('uname -X', sep='=')
+ for line in command_by_line('psrinfo -v 0'):
+ m = re.match(r'\s*The (?P<p>[\w\d]+) processor operates at', line)
+ if m:
+ info['processor'] = m.group('p')
+ break
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ def _is_i386(self):
+ return self.info['isainfo_n']=='i386'
+ def _is_sparc(self):
+ return self.info['isainfo_n']=='sparc'
+ def _is_sparcv9(self):
+ return self.info['isainfo_n']=='sparcv9'
+
+ def _getNCPUs(self):
+ return int(self.info['uname_X'].get('NumCPU', 1))
+
+ def _is_sun4(self):
+ return self.info['arch']=='sun4'
+
+ def _is_SUNW(self):
+ return re.match(r'SUNW', self.info['uname_i']) is not None
+ def _is_sparcstation5(self):
+ return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None
+ def _is_ultra1(self):
+ return re.match(r'.*Ultra-1', self.info['uname_i']) is not None
+ def _is_ultra250(self):
+ return re.match(r'.*Ultra-250', self.info['uname_i']) is not None
+ def _is_ultra2(self):
+ return re.match(r'.*Ultra-2', self.info['uname_i']) is not None
+ def _is_ultra30(self):
+ return re.match(r'.*Ultra-30', self.info['uname_i']) is not None
+ def _is_ultra4(self):
+ return re.match(r'.*Ultra-4', self.info['uname_i']) is not None
+ def _is_ultra5_10(self):
+ return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None
+ def _is_ultra5(self):
+ return re.match(r'.*Ultra-5', self.info['uname_i']) is not None
+ def _is_ultra60(self):
+ return re.match(r'.*Ultra-60', self.info['uname_i']) is not None
+ def _is_ultra80(self):
+ return re.match(r'.*Ultra-80', self.info['uname_i']) is not None
+ def _is_ultraenterprice(self):
+ return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None
+ def _is_ultraenterprice10k(self):
+ return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None
+ def _is_sunfire(self):
+ return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None
+ def _is_ultra(self):
+ return re.match(r'.*Ultra', self.info['uname_i']) is not None
+
+ def _is_cpusparcv7(self):
+ return self.info['processor']=='sparcv7'
+ def _is_cpusparcv8(self):
+ return self.info['processor']=='sparcv8'
+ def _is_cpusparcv9(self):
+ return self.info['processor']=='sparcv9'
+
+class Win32CPUInfo(CPUInfoBase):
+
+ info = None
+ pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor"
+ # XXX: what does the value of
+ # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0
+ # mean?
+
+ def __init__(self):
+ if self.info is not None:
+ return
+ info = []
+ try:
+ #XXX: Bad style to use so long `try:...except:...`. Fix it!
+ import winreg
+
+ prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"
+ r"\s+stepping\s+(?P<STP>\d+)", re.IGNORECASE)
+ chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey)
+ pnum=0
+ while True:
+ try:
+ proc=winreg.EnumKey(chnd, pnum)
+ except winreg.error:
+ break
+ else:
+ pnum+=1
+ info.append({"Processor":proc})
+ phnd=winreg.OpenKey(chnd, proc)
+ pidx=0
+ while True:
+ try:
+ name, value, vtpe=winreg.EnumValue(phnd, pidx)
+ except winreg.error:
+ break
+ else:
+ pidx=pidx+1
+ info[-1][name]=value
+ if name=="Identifier":
+ srch=prgx.search(value)
+ if srch:
+ info[-1]["Family"]=int(srch.group("FML"))
+ info[-1]["Model"]=int(srch.group("MDL"))
+ info[-1]["Stepping"]=int(srch.group("STP"))
+ except Exception as e:
+ print(e, '(ignoring)')
+ self.__class__.info = info
+
+ def _not_impl(self): pass
+
+ # Athlon
+
+ def _is_AMD(self):
+ return self.info[0]['VendorIdentifier']=='AuthenticAMD'
+
+ def _is_Am486(self):
+ return self.is_AMD() and self.info[0]['Family']==4
+
+ def _is_Am5x86(self):
+ return self.is_AMD() and self.info[0]['Family']==4
+
+ def _is_AMDK5(self):
+ return self.is_AMD() and self.info[0]['Family']==5 \
+ and self.info[0]['Model'] in [0, 1, 2, 3]
+
+ def _is_AMDK6(self):
+ return self.is_AMD() and self.info[0]['Family']==5 \
+ and self.info[0]['Model'] in [6, 7]
+
+ def _is_AMDK6_2(self):
+ return self.is_AMD() and self.info[0]['Family']==5 \
+ and self.info[0]['Model']==8
+
+ def _is_AMDK6_3(self):
+ return self.is_AMD() and self.info[0]['Family']==5 \
+ and self.info[0]['Model']==9
+
+ def _is_AMDK7(self):
+ return self.is_AMD() and self.info[0]['Family'] == 6
+
+ # To reliably distinguish between the different types of AMD64 chips
+ # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would
+ # require looking at the 'brand' from cpuid
+
+ def _is_AMD64(self):
+ return self.is_AMD() and self.info[0]['Family'] == 15
+
+ # Intel
+
+ def _is_Intel(self):
+ return self.info[0]['VendorIdentifier']=='GenuineIntel'
+
+ def _is_i386(self):
+ return self.info[0]['Family']==3
+
+ def _is_i486(self):
+ return self.info[0]['Family']==4
+
+ def _is_i586(self):
+ return self.is_Intel() and self.info[0]['Family']==5
+
+ def _is_i686(self):
+ return self.is_Intel() and self.info[0]['Family']==6
+
+ def _is_Pentium(self):
+ return self.is_Intel() and self.info[0]['Family']==5
+
+ def _is_PentiumMMX(self):
+ return self.is_Intel() and self.info[0]['Family']==5 \
+ and self.info[0]['Model']==4
+
+ def _is_PentiumPro(self):
+ return self.is_Intel() and self.info[0]['Family']==6 \
+ and self.info[0]['Model']==1
+
+ def _is_PentiumII(self):
+ return self.is_Intel() and self.info[0]['Family']==6 \
+ and self.info[0]['Model'] in [3, 5, 6]
+
+ def _is_PentiumIII(self):
+ return self.is_Intel() and self.info[0]['Family']==6 \
+ and self.info[0]['Model'] in [7, 8, 9, 10, 11]
+
+ def _is_PentiumIV(self):
+ return self.is_Intel() and self.info[0]['Family']==15
+
+ def _is_PentiumM(self):
+ return self.is_Intel() and self.info[0]['Family'] == 6 \
+ and self.info[0]['Model'] in [9, 13, 14]
+
+ def _is_Core2(self):
+ return self.is_Intel() and self.info[0]['Family'] == 6 \
+ and self.info[0]['Model'] in [15, 16, 17]
+
+ # Varia
+
+ def _is_singleCPU(self):
+ return len(self.info) == 1
+
+ def _getNCPUs(self):
+ return len(self.info)
+
+ def _has_mmx(self):
+ if self.is_Intel():
+ return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \
+ or (self.info[0]['Family'] in [6, 15])
+ elif self.is_AMD():
+ return self.info[0]['Family'] in [5, 6, 15]
+ else:
+ return False
+
+ def _has_sse(self):
+ if self.is_Intel():
+ return ((self.info[0]['Family']==6 and
+ self.info[0]['Model'] in [7, 8, 9, 10, 11])
+ or self.info[0]['Family']==15)
+ elif self.is_AMD():
+ return ((self.info[0]['Family']==6 and
+ self.info[0]['Model'] in [6, 7, 8, 10])
+ or self.info[0]['Family']==15)
+ else:
+ return False
+
+ def _has_sse2(self):
+ if self.is_Intel():
+ return self.is_Pentium4() or self.is_PentiumM() \
+ or self.is_Core2()
+ elif self.is_AMD():
+ return self.is_AMD64()
+ else:
+ return False
+
+ def _has_3dnow(self):
+ return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15]
+
+ def _has_3dnowext(self):
+ return self.is_AMD() and self.info[0]['Family'] in [6, 15]
+
+if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?)
+ cpuinfo = LinuxCPUInfo
+elif sys.platform.startswith('irix'):
+ cpuinfo = IRIXCPUInfo
+elif sys.platform == 'darwin':
+ cpuinfo = DarwinCPUInfo
+elif sys.platform.startswith('sunos'):
+ cpuinfo = SunOSCPUInfo
+elif sys.platform.startswith('win32'):
+ cpuinfo = Win32CPUInfo
+elif sys.platform.startswith('cygwin'):
+ cpuinfo = LinuxCPUInfo
+#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices.
+else:
+ cpuinfo = CPUInfoBase
+
+cpu = cpuinfo()
+
+#if __name__ == "__main__":
+#
+# cpu.is_blaa()
+# cpu.is_Intel()
+# cpu.is_Alpha()
+#
+# print('CPU information:'),
+# for name in dir(cpuinfo):
+# if name[0]=='_' and name[1]!='_':
+# r = getattr(cpu,name[1:])()
+# if r:
+# if r!=1:
+# print('%s=%s' %(name[1:],r))
+# else:
+# print(name[1:]),
+# print()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/exec_command.py b/venv/lib/python3.9/site-packages/numpy/distutils/exec_command.py
new file mode 100644
index 00000000..a67453ab
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/exec_command.py
@@ -0,0 +1,315 @@
+"""
+exec_command
+
+Implements exec_command function that is (almost) equivalent to
+commands.getstatusoutput function but on NT, DOS systems the
+returned status is actually correct (though, the returned status
+values may be different by a factor). In addition, exec_command
+takes keyword arguments for (re-)defining environment variables.
+
+Provides functions:
+
+ exec_command --- execute command in a specified directory and
+ in the modified environment.
+ find_executable --- locate a command using info from environment
+ variable PATH. Equivalent to posix `which`
+ command.
+
+Author: Pearu Peterson <pearu@cens.ioc.ee>
+Created: 11 January 2003
+
+Requires: Python 2.x
+
+Successfully tested on:
+
+======== ============ =================================================
+os.name sys.platform comments
+======== ============ =================================================
+posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3
+ PyCrust 0.9.3, Idle 1.0.2
+posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2
+posix sunos5 SunOS 5.9, Python 2.2, 2.3.2
+posix darwin Darwin 7.2.0, Python 2.3
+nt win32 Windows Me
+ Python 2.3(EE), Idle 1.0, PyCrust 0.7.2
+ Python 2.1.1 Idle 0.8
+nt win32 Windows 98, Python 2.1.1. Idle 0.8
+nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests
+ fail i.e. redefining environment variables may
+ not work. FIXED: don't use cygwin echo!
+ Comment: also `cmd /c echo` will not work
+ but redefining environment variables do work.
+posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special)
+nt win32 Windows XP, Python 2.3.3
+======== ============ =================================================
+
+Known bugs:
+
+* Tests, that send messages to stderr, fail when executed from MSYS prompt
+ because the messages are lost at some point.
+
+"""
+__all__ = ['exec_command', 'find_executable']
+
+import os
+import sys
+import subprocess
+import locale
+import warnings
+
+from numpy.distutils.misc_util import is_sequence, make_temp_file
+from numpy.distutils import log
+
+def filepath_from_subprocess_output(output):
+ """
+ Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`.
+
+ Inherited from `exec_command`, and possibly incorrect.
+ """
+ mylocale = locale.getpreferredencoding(False)
+ if mylocale is None:
+ mylocale = 'ascii'
+ output = output.decode(mylocale, errors='replace')
+ output = output.replace('\r\n', '\n')
+ # Another historical oddity
+ if output[-1:] == '\n':
+ output = output[:-1]
+ return output
+
+
+def forward_bytes_to_stdout(val):
+ """
+ Forward bytes from a subprocess call to the console, without attempting to
+ decode them.
+
+ The assumption is that the subprocess call already returned bytes in
+ a suitable encoding.
+ """
+ if hasattr(sys.stdout, 'buffer'):
+ # use the underlying binary output if there is one
+ sys.stdout.buffer.write(val)
+ elif hasattr(sys.stdout, 'encoding'):
+ # round-trip the encoding if necessary
+ sys.stdout.write(val.decode(sys.stdout.encoding))
+ else:
+ # make a best-guess at the encoding
+ sys.stdout.write(val.decode('utf8', errors='replace'))
+
+
+def temp_file_name():
+ # 2019-01-30, 1.17
+ warnings.warn('temp_file_name is deprecated since NumPy v1.17, use '
+ 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1)
+ fo, name = make_temp_file()
+ fo.close()
+ return name
+
+def get_pythonexe():
+ pythonexe = sys.executable
+ if os.name in ['nt', 'dos']:
+ fdir, fn = os.path.split(pythonexe)
+ fn = fn.upper().replace('PYTHONW', 'PYTHON')
+ pythonexe = os.path.join(fdir, fn)
+ assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,)
+ return pythonexe
+
+def find_executable(exe, path=None, _cache={}):
+ """Return full path of a executable or None.
+
+ Symbolic links are not followed.
+ """
+ key = exe, path
+ try:
+ return _cache[key]
+ except KeyError:
+ pass
+ log.debug('find_executable(%r)' % exe)
+ orig_exe = exe
+
+ if path is None:
+ path = os.environ.get('PATH', os.defpath)
+ if os.name=='posix':
+ realpath = os.path.realpath
+ else:
+ realpath = lambda a:a
+
+ if exe.startswith('"'):
+ exe = exe[1:-1]
+
+ suffixes = ['']
+ if os.name in ['nt', 'dos', 'os2']:
+ fn, ext = os.path.splitext(exe)
+ extra_suffixes = ['.exe', '.com', '.bat']
+ if ext.lower() not in extra_suffixes:
+ suffixes = extra_suffixes
+
+ if os.path.isabs(exe):
+ paths = ['']
+ else:
+ paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ]
+
+ for path in paths:
+ fn = os.path.join(path, exe)
+ for s in suffixes:
+ f_ext = fn+s
+ if not os.path.islink(f_ext):
+ f_ext = realpath(f_ext)
+ if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK):
+ log.info('Found executable %s' % f_ext)
+ _cache[key] = f_ext
+ return f_ext
+
+ log.warn('Could not locate executable %s' % orig_exe)
+ return None
+
+############################################################
+
+def _preserve_environment( names ):
+ log.debug('_preserve_environment(%r)' % (names))
+ env = {name: os.environ.get(name) for name in names}
+ return env
+
+def _update_environment( **env ):
+ log.debug('_update_environment(...)')
+ for name, value in env.items():
+ os.environ[name] = value or ''
+
+def exec_command(command, execute_in='', use_shell=None, use_tee=None,
+ _with_python = 1, **env ):
+ """
+ Return (status,output) of executed command.
+
+ .. deprecated:: 1.17
+ Use subprocess.Popen instead
+
+ Parameters
+ ----------
+ command : str
+ A concatenated string of executable and arguments.
+ execute_in : str
+ Before running command ``cd execute_in`` and after ``cd -``.
+ use_shell : {bool, None}, optional
+ If True, execute ``sh -c command``. Default None (True)
+ use_tee : {bool, None}, optional
+ If True use tee. Default None (True)
+
+
+ Returns
+ -------
+ res : str
+ Both stdout and stderr messages.
+
+ Notes
+ -----
+ On NT, DOS systems the returned status is correct for external commands.
+ Wild cards will not work for non-posix systems or when use_shell=0.
+
+ """
+ # 2019-01-30, 1.17
+ warnings.warn('exec_command is deprecated since NumPy v1.17, use '
+ 'subprocess.Popen instead', DeprecationWarning, stacklevel=1)
+ log.debug('exec_command(%r,%s)' % (command,
+ ','.join(['%s=%r'%kv for kv in env.items()])))
+
+ if use_tee is None:
+ use_tee = os.name=='posix'
+ if use_shell is None:
+ use_shell = os.name=='posix'
+ execute_in = os.path.abspath(execute_in)
+ oldcwd = os.path.abspath(os.getcwd())
+
+ if __name__[-12:] == 'exec_command':
+ exec_dir = os.path.dirname(os.path.abspath(__file__))
+ elif os.path.isfile('exec_command.py'):
+ exec_dir = os.path.abspath('.')
+ else:
+ exec_dir = os.path.abspath(sys.argv[0])
+ if os.path.isfile(exec_dir):
+ exec_dir = os.path.dirname(exec_dir)
+
+ if oldcwd!=execute_in:
+ os.chdir(execute_in)
+ log.debug('New cwd: %s' % execute_in)
+ else:
+ log.debug('Retaining cwd: %s' % oldcwd)
+
+ oldenv = _preserve_environment( list(env.keys()) )
+ _update_environment( **env )
+
+ try:
+ st = _exec_command(command,
+ use_shell=use_shell,
+ use_tee=use_tee,
+ **env)
+ finally:
+ if oldcwd!=execute_in:
+ os.chdir(oldcwd)
+ log.debug('Restored cwd to %s' % oldcwd)
+ _update_environment(**oldenv)
+
+ return st
+
+
+def _exec_command(command, use_shell=None, use_tee = None, **env):
+ """
+ Internal workhorse for exec_command().
+ """
+ if use_shell is None:
+ use_shell = os.name=='posix'
+ if use_tee is None:
+ use_tee = os.name=='posix'
+
+ if os.name == 'posix' and use_shell:
+ # On POSIX, subprocess always uses /bin/sh, override
+ sh = os.environ.get('SHELL', '/bin/sh')
+ if is_sequence(command):
+ command = [sh, '-c', ' '.join(command)]
+ else:
+ command = [sh, '-c', command]
+ use_shell = False
+
+ elif os.name == 'nt' and is_sequence(command):
+ # On Windows, join the string for CreateProcess() ourselves as
+ # subprocess does it a bit differently
+ command = ' '.join(_quote_arg(arg) for arg in command)
+
+ # Inherit environment by default
+ env = env or None
+ try:
+ # text is set to False so that communicate()
+ # will return bytes. We need to decode the output ourselves
+ # so that Python will not raise a UnicodeDecodeError when
+ # it encounters an invalid character; rather, we simply replace it
+ proc = subprocess.Popen(command, shell=use_shell, env=env, text=False,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ except OSError:
+ # Return 127, as os.spawn*() and /bin/sh do
+ return 127, ''
+
+ text, err = proc.communicate()
+ mylocale = locale.getpreferredencoding(False)
+ if mylocale is None:
+ mylocale = 'ascii'
+ text = text.decode(mylocale, errors='replace')
+ text = text.replace('\r\n', '\n')
+ # Another historical oddity
+ if text[-1:] == '\n':
+ text = text[:-1]
+
+ if use_tee and text:
+ print(text)
+ return proc.returncode, text
+
+
+def _quote_arg(arg):
+ """
+ Quote the argument for safe use in a shell command line.
+ """
+ # If there is a quote in the string, assume relevants parts of the
+ # string are already quoted (e.g. '-I"C:\\Program Files\\..."')
+ if '"' not in arg and ' ' in arg:
+ return '"%s"' % arg
+ return arg
+
+############################################################
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/extension.py b/venv/lib/python3.9/site-packages/numpy/distutils/extension.py
new file mode 100644
index 00000000..3ede013e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/extension.py
@@ -0,0 +1,107 @@
+"""distutils.extension
+
+Provides the Extension class, used to describe C/C++ extension
+modules in setup scripts.
+
+Overridden to support f2py.
+
+"""
+import re
+from distutils.extension import Extension as old_Extension
+
+
+cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match
+fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
+
+
+class Extension(old_Extension):
+ """
+ Parameters
+ ----------
+ name : str
+ Extension name.
+ sources : list of str
+ List of source file locations relative to the top directory of
+ the package.
+ extra_compile_args : list of str
+ Extra command line arguments to pass to the compiler.
+ extra_f77_compile_args : list of str
+ Extra command line arguments to pass to the fortran77 compiler.
+ extra_f90_compile_args : list of str
+ Extra command line arguments to pass to the fortran90 compiler.
+ """
+ def __init__(
+ self, name, sources,
+ include_dirs=None,
+ define_macros=None,
+ undef_macros=None,
+ library_dirs=None,
+ libraries=None,
+ runtime_library_dirs=None,
+ extra_objects=None,
+ extra_compile_args=None,
+ extra_link_args=None,
+ export_symbols=None,
+ swig_opts=None,
+ depends=None,
+ language=None,
+ f2py_options=None,
+ module_dirs=None,
+ extra_c_compile_args=None,
+ extra_cxx_compile_args=None,
+ extra_f77_compile_args=None,
+ extra_f90_compile_args=None,):
+
+ old_Extension.__init__(
+ self, name, [],
+ include_dirs=include_dirs,
+ define_macros=define_macros,
+ undef_macros=undef_macros,
+ library_dirs=library_dirs,
+ libraries=libraries,
+ runtime_library_dirs=runtime_library_dirs,
+ extra_objects=extra_objects,
+ extra_compile_args=extra_compile_args,
+ extra_link_args=extra_link_args,
+ export_symbols=export_symbols)
+
+ # Avoid assert statements checking that sources contains strings:
+ self.sources = sources
+
+ # Python 2.4 distutils new features
+ self.swig_opts = swig_opts or []
+ # swig_opts is assumed to be a list. Here we handle the case where it
+ # is specified as a string instead.
+ if isinstance(self.swig_opts, str):
+ import warnings
+ msg = "swig_opts is specified as a string instead of a list"
+ warnings.warn(msg, SyntaxWarning, stacklevel=2)
+ self.swig_opts = self.swig_opts.split()
+
+ # Python 2.3 distutils new features
+ self.depends = depends or []
+ self.language = language
+
+ # numpy_distutils features
+ self.f2py_options = f2py_options or []
+ self.module_dirs = module_dirs or []
+ self.extra_c_compile_args = extra_c_compile_args or []
+ self.extra_cxx_compile_args = extra_cxx_compile_args or []
+ self.extra_f77_compile_args = extra_f77_compile_args or []
+ self.extra_f90_compile_args = extra_f90_compile_args or []
+
+ return
+
+ def has_cxx_sources(self):
+ for source in self.sources:
+ if cxx_ext_re(str(source)):
+ return True
+ return False
+
+ def has_f2py_sources(self):
+ for source in self.sources:
+ if fortran_pyf_ext_re(source):
+ return True
+ return False
+
+# class Extension
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/__init__.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/__init__.py
new file mode 100644
index 00000000..ecba3e5d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/__init__.py
@@ -0,0 +1,1030 @@
+"""numpy.distutils.fcompiler
+
+Contains FCompiler, an abstract base class that defines the interface
+for the numpy.distutils Fortran compiler abstraction model.
+
+Terminology:
+
+To be consistent, where the term 'executable' is used, it means the single
+file, like 'gcc', that is executed, and should be a string. In contrast,
+'command' means the entire command line, like ['gcc', '-c', 'file.c'], and
+should be a list.
+
+But note that FCompiler.executables is actually a dictionary of commands.
+
+"""
+__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers',
+ 'dummy_fortran_file']
+
+import os
+import sys
+import re
+
+from distutils.sysconfig import get_python_lib
+from distutils.fancy_getopt import FancyGetopt
+from distutils.errors import DistutilsModuleError, \
+ DistutilsExecError, CompileError, LinkError, DistutilsPlatformError
+from distutils.util import split_quoted, strtobool
+
+from numpy.distutils.ccompiler import CCompiler, gen_lib_options
+from numpy.distutils import log
+from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \
+ make_temp_file, get_shared_lib_extension
+from numpy.distutils.exec_command import find_executable
+from numpy.distutils import _shell_utils
+
+from .environment import EnvironmentConfig
+
+__metaclass__ = type
+
+class CompilerNotFound(Exception):
+ pass
+
+def flaglist(s):
+ if is_string(s):
+ return split_quoted(s)
+ else:
+ return s
+
+def str2bool(s):
+ if is_string(s):
+ return strtobool(s)
+ return bool(s)
+
+def is_sequence_of_strings(seq):
+ return is_sequence(seq) and all_strings(seq)
+
+class FCompiler(CCompiler):
+ """Abstract base class to define the interface that must be implemented
+ by real Fortran compiler classes.
+
+ Methods that subclasses may redefine:
+
+ update_executables(), find_executables(), get_version()
+ get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug()
+ get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(),
+ get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(),
+ get_flags_arch_f90(), get_flags_debug_f90(),
+ get_flags_fix(), get_flags_linker_so()
+
+ DON'T call these methods (except get_version) after
+ constructing a compiler instance or inside any other method.
+ All methods, except update_executables() and find_executables(),
+ may call the get_version() method.
+
+ After constructing a compiler instance, always call customize(dist=None)
+ method that finalizes compiler construction and makes the following
+ attributes available:
+ compiler_f77
+ compiler_f90
+ compiler_fix
+ linker_so
+ archiver
+ ranlib
+ libraries
+ library_dirs
+ """
+
+ # These are the environment variables and distutils keys used.
+ # Each configuration description is
+ # (<hook name>, <environment variable>, <key in distutils.cfg>, <convert>, <append>)
+ # The hook names are handled by the self._environment_hook method.
+ # - names starting with 'self.' call methods in this class
+ # - names starting with 'exe.' return the key in the executables dict
+ # - names like 'flags.YYY' return self.get_flag_YYY()
+ # convert is either None or a function to convert a string to the
+ # appropriate type used.
+
+ distutils_vars = EnvironmentConfig(
+ distutils_section='config_fc',
+ noopt = (None, None, 'noopt', str2bool, False),
+ noarch = (None, None, 'noarch', str2bool, False),
+ debug = (None, None, 'debug', str2bool, False),
+ verbose = (None, None, 'verbose', str2bool, False),
+ )
+
+ command_vars = EnvironmentConfig(
+ distutils_section='config_fc',
+ compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False),
+ compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False),
+ compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False),
+ version_cmd = ('exe.version_cmd', None, None, None, False),
+ linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False),
+ linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False),
+ archiver = (None, 'AR', 'ar', None, False),
+ ranlib = (None, 'RANLIB', 'ranlib', None, False),
+ )
+
+ flag_vars = EnvironmentConfig(
+ distutils_section='config_fc',
+ f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True),
+ f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True),
+ free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True),
+ fix = ('flags.fix', None, None, flaglist, False),
+ opt = ('flags.opt', 'FOPT', 'opt', flaglist, True),
+ opt_f77 = ('flags.opt_f77', None, None, flaglist, False),
+ opt_f90 = ('flags.opt_f90', None, None, flaglist, False),
+ arch = ('flags.arch', 'FARCH', 'arch', flaglist, False),
+ arch_f77 = ('flags.arch_f77', None, None, flaglist, False),
+ arch_f90 = ('flags.arch_f90', None, None, flaglist, False),
+ debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True),
+ debug_f77 = ('flags.debug_f77', None, None, flaglist, False),
+ debug_f90 = ('flags.debug_f90', None, None, flaglist, False),
+ flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True),
+ linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True),
+ linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True),
+ ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True),
+ )
+
+ language_map = {'.f': 'f77',
+ '.for': 'f77',
+ '.F': 'f77', # XXX: needs preprocessor
+ '.ftn': 'f77',
+ '.f77': 'f77',
+ '.f90': 'f90',
+ '.F90': 'f90', # XXX: needs preprocessor
+ '.f95': 'f90',
+ }
+ language_order = ['f90', 'f77']
+
+
+ # These will be set by the subclass
+
+ compiler_type = None
+ compiler_aliases = ()
+ version_pattern = None
+
+ possible_executables = []
+ executables = {
+ 'version_cmd': ["f77", "-v"],
+ 'compiler_f77': ["f77"],
+ 'compiler_f90': ["f90"],
+ 'compiler_fix': ["f90", "-fixed"],
+ 'linker_so': ["f90", "-shared"],
+ 'linker_exe': ["f90"],
+ 'archiver': ["ar", "-cr"],
+ 'ranlib': None,
+ }
+
+ # If compiler does not support compiling Fortran 90 then it can
+ # suggest using another compiler. For example, gnu would suggest
+ # gnu95 compiler type when there are F90 sources.
+ suggested_f90_compiler = None
+
+ compile_switch = "-c"
+ object_switch = "-o " # Ending space matters! It will be stripped
+ # but if it is missing then object_switch
+ # will be prefixed to object file name by
+ # string concatenation.
+ library_switch = "-o " # Ditto!
+
+ # Switch to specify where module files are created and searched
+ # for USE statement. Normally it is a string and also here ending
+ # space matters. See above.
+ module_dir_switch = None
+
+ # Switch to specify where module files are searched for USE statement.
+ module_include_switch = '-I'
+
+ pic_flags = [] # Flags to create position-independent code
+
+ src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90', '.FOR']
+ obj_extension = ".o"
+
+ shared_lib_extension = get_shared_lib_extension()
+ static_lib_extension = ".a" # or .lib
+ static_lib_format = "lib%s%s" # or %s%s
+ shared_lib_format = "%s%s"
+ exe_extension = ""
+
+ _exe_cache = {}
+
+ _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90',
+ 'compiler_fix', 'linker_so', 'linker_exe', 'archiver',
+ 'ranlib']
+
+ # This will be set by new_fcompiler when called in
+ # command/{build_ext.py, build_clib.py, config.py} files.
+ c_compiler = None
+
+ # extra_{f77,f90}_compile_args are set by build_ext.build_extension method
+ extra_f77_compile_args = []
+ extra_f90_compile_args = []
+
+ def __init__(self, *args, **kw):
+ CCompiler.__init__(self, *args, **kw)
+ self.distutils_vars = self.distutils_vars.clone(self._environment_hook)
+ self.command_vars = self.command_vars.clone(self._environment_hook)
+ self.flag_vars = self.flag_vars.clone(self._environment_hook)
+ self.executables = self.executables.copy()
+ for e in self._executable_keys:
+ if e not in self.executables:
+ self.executables[e] = None
+
+ # Some methods depend on .customize() being called first, so
+ # this keeps track of whether that's happened yet.
+ self._is_customised = False
+
+ def __copy__(self):
+ obj = self.__new__(self.__class__)
+ obj.__dict__.update(self.__dict__)
+ obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook)
+ obj.command_vars = obj.command_vars.clone(obj._environment_hook)
+ obj.flag_vars = obj.flag_vars.clone(obj._environment_hook)
+ obj.executables = obj.executables.copy()
+ return obj
+
+ def copy(self):
+ return self.__copy__()
+
+ # Use properties for the attributes used by CCompiler. Setting them
+ # as attributes from the self.executables dictionary is error-prone,
+ # so we get them from there each time.
+ def _command_property(key):
+ def fget(self):
+ assert self._is_customised
+ return self.executables[key]
+ return property(fget=fget)
+ version_cmd = _command_property('version_cmd')
+ compiler_f77 = _command_property('compiler_f77')
+ compiler_f90 = _command_property('compiler_f90')
+ compiler_fix = _command_property('compiler_fix')
+ linker_so = _command_property('linker_so')
+ linker_exe = _command_property('linker_exe')
+ archiver = _command_property('archiver')
+ ranlib = _command_property('ranlib')
+
+ # Make our terminology consistent.
+ def set_executable(self, key, value):
+ self.set_command(key, value)
+
+ def set_commands(self, **kw):
+ for k, v in kw.items():
+ self.set_command(k, v)
+
+ def set_command(self, key, value):
+ if not key in self._executable_keys:
+ raise ValueError(
+ "unknown executable '%s' for class %s" %
+ (key, self.__class__.__name__))
+ if is_string(value):
+ value = split_quoted(value)
+ assert value is None or is_sequence_of_strings(value[1:]), (key, value)
+ self.executables[key] = value
+
+ ######################################################################
+ ## Methods that subclasses may redefine. But don't call these methods!
+ ## They are private to FCompiler class and may return unexpected
+ ## results if used elsewhere. So, you have been warned..
+
+ def find_executables(self):
+ """Go through the self.executables dictionary, and attempt to
+ find and assign appropriate executables.
+
+ Executable names are looked for in the environment (environment
+ variables, the distutils.cfg, and command line), the 0th-element of
+ the command list, and the self.possible_executables list.
+
+ Also, if the 0th element is "<F77>" or "<F90>", the Fortran 77
+ or the Fortran 90 compiler executable is used, unless overridden
+ by an environment setting.
+
+ Subclasses should call this if overridden.
+ """
+ assert self._is_customised
+ exe_cache = self._exe_cache
+ def cached_find_executable(exe):
+ if exe in exe_cache:
+ return exe_cache[exe]
+ fc_exe = find_executable(exe)
+ exe_cache[exe] = exe_cache[fc_exe] = fc_exe
+ return fc_exe
+ def verify_command_form(name, value):
+ if value is not None and not is_sequence_of_strings(value):
+ raise ValueError(
+ "%s value %r is invalid in class %s" %
+ (name, value, self.__class__.__name__))
+ def set_exe(exe_key, f77=None, f90=None):
+ cmd = self.executables.get(exe_key, None)
+ if not cmd:
+ return None
+ # Note that we get cmd[0] here if the environment doesn't
+ # have anything set
+ exe_from_environ = getattr(self.command_vars, exe_key)
+ if not exe_from_environ:
+ possibles = [f90, f77] + self.possible_executables
+ else:
+ possibles = [exe_from_environ] + self.possible_executables
+
+ seen = set()
+ unique_possibles = []
+ for e in possibles:
+ if e == '<F77>':
+ e = f77
+ elif e == '<F90>':
+ e = f90
+ if not e or e in seen:
+ continue
+ seen.add(e)
+ unique_possibles.append(e)
+
+ for exe in unique_possibles:
+ fc_exe = cached_find_executable(exe)
+ if fc_exe:
+ cmd[0] = fc_exe
+ return fc_exe
+ self.set_command(exe_key, None)
+ return None
+
+ ctype = self.compiler_type
+ f90 = set_exe('compiler_f90')
+ if not f90:
+ f77 = set_exe('compiler_f77')
+ if f77:
+ log.warn('%s: no Fortran 90 compiler found' % ctype)
+ else:
+ raise CompilerNotFound('%s: f90 nor f77' % ctype)
+ else:
+ f77 = set_exe('compiler_f77', f90=f90)
+ if not f77:
+ log.warn('%s: no Fortran 77 compiler found' % ctype)
+ set_exe('compiler_fix', f90=f90)
+
+ set_exe('linker_so', f77=f77, f90=f90)
+ set_exe('linker_exe', f77=f77, f90=f90)
+ set_exe('version_cmd', f77=f77, f90=f90)
+ set_exe('archiver')
+ set_exe('ranlib')
+
+ def update_executables(self):
+ """Called at the beginning of customisation. Subclasses should
+ override this if they need to set up the executables dictionary.
+
+ Note that self.find_executables() is run afterwards, so the
+ self.executables dictionary values can contain <F77> or <F90> as
+ the command, which will be replaced by the found F77 or F90
+ compiler.
+ """
+ pass
+
+ def get_flags(self):
+ """List of flags common to all compiler types."""
+ return [] + self.pic_flags
+
+ def _get_command_flags(self, key):
+ cmd = self.executables.get(key, None)
+ if cmd is None:
+ return []
+ return cmd[1:]
+
+ def get_flags_f77(self):
+ """List of Fortran 77 specific flags."""
+ return self._get_command_flags('compiler_f77')
+ def get_flags_f90(self):
+ """List of Fortran 90 specific flags."""
+ return self._get_command_flags('compiler_f90')
+ def get_flags_free(self):
+ """List of Fortran 90 free format specific flags."""
+ return []
+ def get_flags_fix(self):
+ """List of Fortran 90 fixed format specific flags."""
+ return self._get_command_flags('compiler_fix')
+ def get_flags_linker_so(self):
+ """List of linker flags to build a shared library."""
+ return self._get_command_flags('linker_so')
+ def get_flags_linker_exe(self):
+ """List of linker flags to build an executable."""
+ return self._get_command_flags('linker_exe')
+ def get_flags_ar(self):
+ """List of archiver flags. """
+ return self._get_command_flags('archiver')
+ def get_flags_opt(self):
+ """List of architecture independent compiler flags."""
+ return []
+ def get_flags_arch(self):
+ """List of architecture dependent compiler flags."""
+ return []
+ def get_flags_debug(self):
+ """List of compiler flags to compile with debugging information."""
+ return []
+
+ get_flags_opt_f77 = get_flags_opt_f90 = get_flags_opt
+ get_flags_arch_f77 = get_flags_arch_f90 = get_flags_arch
+ get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debug
+
+ def get_libraries(self):
+ """List of compiler libraries."""
+ return self.libraries[:]
+ def get_library_dirs(self):
+ """List of compiler library directories."""
+ return self.library_dirs[:]
+
+ def get_version(self, force=False, ok_status=[0]):
+ assert self._is_customised
+ version = CCompiler.get_version(self, force=force, ok_status=ok_status)
+ if version is None:
+ raise CompilerNotFound()
+ return version
+
+
+ ############################################################
+
+ ## Public methods:
+
+ def customize(self, dist = None):
+ """Customize Fortran compiler.
+
+ This method gets Fortran compiler specific information from
+ (i) class definition, (ii) environment, (iii) distutils config
+ files, and (iv) command line (later overrides earlier).
+
+ This method should be always called after constructing a
+ compiler instance. But not in __init__ because Distribution
+ instance is needed for (iii) and (iv).
+ """
+ log.info('customize %s' % (self.__class__.__name__))
+
+ self._is_customised = True
+
+ self.distutils_vars.use_distribution(dist)
+ self.command_vars.use_distribution(dist)
+ self.flag_vars.use_distribution(dist)
+
+ self.update_executables()
+
+ # find_executables takes care of setting the compiler commands,
+ # version_cmd, linker_so, linker_exe, ar, and ranlib
+ self.find_executables()
+
+ noopt = self.distutils_vars.get('noopt', False)
+ noarch = self.distutils_vars.get('noarch', noopt)
+ debug = self.distutils_vars.get('debug', False)
+
+ f77 = self.command_vars.compiler_f77
+ f90 = self.command_vars.compiler_f90
+
+ f77flags = []
+ f90flags = []
+ freeflags = []
+ fixflags = []
+
+ if f77:
+ f77 = _shell_utils.NativeParser.split(f77)
+ f77flags = self.flag_vars.f77
+ if f90:
+ f90 = _shell_utils.NativeParser.split(f90)
+ f90flags = self.flag_vars.f90
+ freeflags = self.flag_vars.free
+ # XXX Assuming that free format is default for f90 compiler.
+ fix = self.command_vars.compiler_fix
+ # NOTE: this and similar examples are probably just
+ # excluding --coverage flag when F90 = gfortran --coverage
+ # instead of putting that flag somewhere more appropriate
+ # this and similar examples where a Fortran compiler
+ # environment variable has been customized by CI or a user
+ # should perhaps eventually be more thoroughly tested and more
+ # robustly handled
+ if fix:
+ fix = _shell_utils.NativeParser.split(fix)
+ fixflags = self.flag_vars.fix + f90flags
+
+ oflags, aflags, dflags = [], [], []
+ # examine get_flags_<tag>_<compiler> for extra flags
+ # only add them if the method is different from get_flags_<tag>
+ def get_flags(tag, flags):
+ # note that self.flag_vars.<tag> calls self.get_flags_<tag>()
+ flags.extend(getattr(self.flag_vars, tag))
+ this_get = getattr(self, 'get_flags_' + tag)
+ for name, c, flagvar in [('f77', f77, f77flags),
+ ('f90', f90, f90flags),
+ ('f90', fix, fixflags)]:
+ t = '%s_%s' % (tag, name)
+ if c and this_get is not getattr(self, 'get_flags_' + t):
+ flagvar.extend(getattr(self.flag_vars, t))
+ if not noopt:
+ get_flags('opt', oflags)
+ if not noarch:
+ get_flags('arch', aflags)
+ if debug:
+ get_flags('debug', dflags)
+
+ fflags = self.flag_vars.flags + dflags + oflags + aflags
+
+ if f77:
+ self.set_commands(compiler_f77=f77+f77flags+fflags)
+ if f90:
+ self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags)
+ if fix:
+ self.set_commands(compiler_fix=fix+fixflags+fflags)
+
+
+ #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS
+ linker_so = self.linker_so
+ if linker_so:
+ linker_so_flags = self.flag_vars.linker_so
+ if sys.platform.startswith('aix'):
+ python_lib = get_python_lib(standard_lib=1)
+ ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
+ python_exp = os.path.join(python_lib, 'config', 'python.exp')
+ linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]
+ if sys.platform.startswith('os400'):
+ from distutils.sysconfig import get_config_var
+ python_config = get_config_var('LIBPL')
+ ld_so_aix = os.path.join(python_config, 'ld_so_aix')
+ python_exp = os.path.join(python_config, 'python.exp')
+ linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]
+ self.set_commands(linker_so=linker_so+linker_so_flags)
+
+ linker_exe = self.linker_exe
+ if linker_exe:
+ linker_exe_flags = self.flag_vars.linker_exe
+ self.set_commands(linker_exe=linker_exe+linker_exe_flags)
+
+ ar = self.command_vars.archiver
+ if ar:
+ arflags = self.flag_vars.ar
+ self.set_commands(archiver=[ar]+arflags)
+
+ self.set_library_dirs(self.get_library_dirs())
+ self.set_libraries(self.get_libraries())
+
+ def dump_properties(self):
+ """Print out the attributes of a compiler instance."""
+ props = []
+ for key in list(self.executables.keys()) + \
+ ['version', 'libraries', 'library_dirs',
+ 'object_switch', 'compile_switch']:
+ if hasattr(self, key):
+ v = getattr(self, key)
+ props.append((key, None, '= '+repr(v)))
+ props.sort()
+
+ pretty_printer = FancyGetopt(props)
+ for l in pretty_printer.generate_help("%s instance properties:" \
+ % (self.__class__.__name__)):
+ if l[:4]==' --':
+ l = ' ' + l[4:]
+ print(l)
+
+ ###################
+
+ def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
+ """Compile 'src' to product 'obj'."""
+ src_flags = {}
+ if is_f_file(src) and not has_f90_header(src):
+ flavor = ':f77'
+ compiler = self.compiler_f77
+ src_flags = get_f77flags(src)
+ extra_compile_args = self.extra_f77_compile_args or []
+ elif is_free_format(src):
+ flavor = ':f90'
+ compiler = self.compiler_f90
+ if compiler is None:
+ raise DistutilsExecError('f90 not supported by %s needed for %s'\
+ % (self.__class__.__name__, src))
+ extra_compile_args = self.extra_f90_compile_args or []
+ else:
+ flavor = ':fix'
+ compiler = self.compiler_fix
+ if compiler is None:
+ raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\
+ % (self.__class__.__name__, src))
+ extra_compile_args = self.extra_f90_compile_args or []
+ if self.object_switch[-1]==' ':
+ o_args = [self.object_switch.strip(), obj]
+ else:
+ o_args = [self.object_switch.strip()+obj]
+
+ assert self.compile_switch.strip()
+ s_args = [self.compile_switch, src]
+
+ if extra_compile_args:
+ log.info('extra %s options: %r' \
+ % (flavor[1:], ' '.join(extra_compile_args)))
+
+ extra_flags = src_flags.get(self.compiler_type, [])
+ if extra_flags:
+ log.info('using compile options from source: %r' \
+ % ' '.join(extra_flags))
+
+ command = compiler + cc_args + extra_flags + s_args + o_args \
+ + extra_postargs + extra_compile_args
+
+ display = '%s: %s' % (os.path.basename(compiler[0]) + flavor,
+ src)
+ try:
+ self.spawn(command, display=display)
+ except DistutilsExecError as e:
+ msg = str(e)
+ raise CompileError(msg) from None
+
+ def module_options(self, module_dirs, module_build_dir):
+ options = []
+ if self.module_dir_switch is not None:
+ if self.module_dir_switch[-1]==' ':
+ options.extend([self.module_dir_switch.strip(), module_build_dir])
+ else:
+ options.append(self.module_dir_switch.strip()+module_build_dir)
+ else:
+ print('XXX: module_build_dir=%r option ignored' % (module_build_dir))
+ print('XXX: Fix module_dir_switch for ', self.__class__.__name__)
+ if self.module_include_switch is not None:
+ for d in [module_build_dir]+module_dirs:
+ options.append('%s%s' % (self.module_include_switch, d))
+ else:
+ print('XXX: module_dirs=%r option ignored' % (module_dirs))
+ print('XXX: Fix module_include_switch for ', self.__class__.__name__)
+ return options
+
+ def library_option(self, lib):
+ return "-l" + lib
+ def library_dir_option(self, dir):
+ return "-L" + dir
+
+ def link(self, target_desc, objects,
+ output_filename, output_dir=None, libraries=None,
+ library_dirs=None, runtime_library_dirs=None,
+ export_symbols=None, debug=0, extra_preargs=None,
+ extra_postargs=None, build_temp=None, target_lang=None):
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+ libraries, library_dirs, runtime_library_dirs = \
+ self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
+
+ lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
+ libraries)
+ if is_string(output_dir):
+ output_filename = os.path.join(output_dir, output_filename)
+ elif output_dir is not None:
+ raise TypeError("'output_dir' must be a string or None")
+
+ if self._need_link(objects, output_filename):
+ if self.library_switch[-1]==' ':
+ o_args = [self.library_switch.strip(), output_filename]
+ else:
+ o_args = [self.library_switch.strip()+output_filename]
+
+ if is_string(self.objects):
+ ld_args = objects + [self.objects]
+ else:
+ ld_args = objects + self.objects
+ ld_args = ld_args + lib_opts + o_args
+ if debug:
+ ld_args[:0] = ['-g']
+ if extra_preargs:
+ ld_args[:0] = extra_preargs
+ if extra_postargs:
+ ld_args.extend(extra_postargs)
+ self.mkpath(os.path.dirname(output_filename))
+ if target_desc == CCompiler.EXECUTABLE:
+ linker = self.linker_exe[:]
+ else:
+ linker = self.linker_so[:]
+ command = linker + ld_args
+ try:
+ self.spawn(command)
+ except DistutilsExecError as e:
+ msg = str(e)
+ raise LinkError(msg) from None
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ def _environment_hook(self, name, hook_name):
+ if hook_name is None:
+ return None
+ if is_string(hook_name):
+ if hook_name.startswith('self.'):
+ hook_name = hook_name[5:]
+ hook = getattr(self, hook_name)
+ return hook()
+ elif hook_name.startswith('exe.'):
+ hook_name = hook_name[4:]
+ var = self.executables[hook_name]
+ if var:
+ return var[0]
+ else:
+ return None
+ elif hook_name.startswith('flags.'):
+ hook_name = hook_name[6:]
+ hook = getattr(self, 'get_flags_' + hook_name)
+ return hook()
+ else:
+ return hook_name()
+
+ def can_ccompiler_link(self, ccompiler):
+ """
+ Check if the given C compiler can link objects produced by
+ this compiler.
+ """
+ return True
+
+ def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
+ """
+ Convert a set of object files that are not compatible with the default
+ linker, to a file that is compatible.
+
+ Parameters
+ ----------
+ objects : list
+ List of object files to include.
+ output_dir : str
+ Output directory to place generated object files.
+ extra_dll_dir : str
+ Output directory to place extra DLL files that need to be
+ included on Windows.
+
+ Returns
+ -------
+ converted_objects : list of str
+ List of converted object files.
+ Note that the number of output files is not necessarily
+ the same as inputs.
+
+ """
+ raise NotImplementedError()
+
+ ## class FCompiler
+
+_default_compilers = (
+ # sys.platform mappings
+ ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95',
+ 'intelvem', 'intelem', 'flang')),
+ ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')),
+ ('linux.*', ('arm', 'gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag',
+ 'vast', 'compaq', 'intele', 'intelem', 'gnu', 'g95',
+ 'pathf95', 'nagfor', 'fujitsu')),
+ ('darwin.*', ('gnu95', 'nag', 'nagfor', 'absoft', 'ibm', 'intel', 'gnu',
+ 'g95', 'pg')),
+ ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')),
+ ('irix.*', ('mips', 'gnu', 'gnu95',)),
+ ('aix.*', ('ibm', 'gnu', 'gnu95',)),
+ # os.name mappings
+ ('posix', ('gnu', 'gnu95',)),
+ ('nt', ('gnu', 'gnu95',)),
+ ('mac', ('gnu95', 'gnu', 'pg')),
+ )
+
+fcompiler_class = None
+fcompiler_aliases = None
+
+def load_all_fcompiler_classes():
+ """Cache all the FCompiler classes found in modules in the
+ numpy.distutils.fcompiler package.
+ """
+ from glob import glob
+ global fcompiler_class, fcompiler_aliases
+ if fcompiler_class is not None:
+ return
+ pys = os.path.join(os.path.dirname(__file__), '*.py')
+ fcompiler_class = {}
+ fcompiler_aliases = {}
+ for fname in glob(pys):
+ module_name, ext = os.path.splitext(os.path.basename(fname))
+ module_name = 'numpy.distutils.fcompiler.' + module_name
+ __import__ (module_name)
+ module = sys.modules[module_name]
+ if hasattr(module, 'compilers'):
+ for cname in module.compilers:
+ klass = getattr(module, cname)
+ desc = (klass.compiler_type, klass, klass.description)
+ fcompiler_class[klass.compiler_type] = desc
+ for alias in klass.compiler_aliases:
+ if alias in fcompiler_aliases:
+ raise ValueError("alias %r defined for both %s and %s"
+ % (alias, klass.__name__,
+ fcompiler_aliases[alias][1].__name__))
+ fcompiler_aliases[alias] = desc
+
+def _find_existing_fcompiler(compiler_types,
+ osname=None, platform=None,
+ requiref90=False,
+ c_compiler=None):
+ from numpy.distutils.core import get_distribution
+ dist = get_distribution(always=True)
+ for compiler_type in compiler_types:
+ v = None
+ try:
+ c = new_fcompiler(plat=platform, compiler=compiler_type,
+ c_compiler=c_compiler)
+ c.customize(dist)
+ v = c.get_version()
+ if requiref90 and c.compiler_f90 is None:
+ v = None
+ new_compiler = c.suggested_f90_compiler
+ if new_compiler:
+ log.warn('Trying %r compiler as suggested by %r '
+ 'compiler for f90 support.' % (compiler_type,
+ new_compiler))
+ c = new_fcompiler(plat=platform, compiler=new_compiler,
+ c_compiler=c_compiler)
+ c.customize(dist)
+ v = c.get_version()
+ if v is not None:
+ compiler_type = new_compiler
+ if requiref90 and c.compiler_f90 is None:
+ raise ValueError('%s does not support compiling f90 codes, '
+ 'skipping.' % (c.__class__.__name__))
+ except DistutilsModuleError:
+ log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type)
+ except CompilerNotFound:
+ log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type)
+ if v is not None:
+ return compiler_type
+ return None
+
+def available_fcompilers_for_platform(osname=None, platform=None):
+ if osname is None:
+ osname = os.name
+ if platform is None:
+ platform = sys.platform
+ matching_compiler_types = []
+ for pattern, compiler_type in _default_compilers:
+ if re.match(pattern, platform) or re.match(pattern, osname):
+ for ct in compiler_type:
+ if ct not in matching_compiler_types:
+ matching_compiler_types.append(ct)
+ if not matching_compiler_types:
+ matching_compiler_types.append('gnu')
+ return matching_compiler_types
+
+def get_default_fcompiler(osname=None, platform=None, requiref90=False,
+ c_compiler=None):
+ """Determine the default Fortran compiler to use for the given
+ platform."""
+ matching_compiler_types = available_fcompilers_for_platform(osname,
+ platform)
+ log.info("get_default_fcompiler: matching types: '%s'",
+ matching_compiler_types)
+ compiler_type = _find_existing_fcompiler(matching_compiler_types,
+ osname=osname,
+ platform=platform,
+ requiref90=requiref90,
+ c_compiler=c_compiler)
+ return compiler_type
+
+# Flag to avoid rechecking for Fortran compiler every time
+failed_fcompilers = set()
+
+def new_fcompiler(plat=None,
+ compiler=None,
+ verbose=0,
+ dry_run=0,
+ force=0,
+ requiref90=False,
+ c_compiler = None):
+ """Generate an instance of some FCompiler subclass for the supplied
+ platform/compiler combination.
+ """
+ global failed_fcompilers
+ fcompiler_key = (plat, compiler)
+ if fcompiler_key in failed_fcompilers:
+ return None
+
+ load_all_fcompiler_classes()
+ if plat is None:
+ plat = os.name
+ if compiler is None:
+ compiler = get_default_fcompiler(plat, requiref90=requiref90,
+ c_compiler=c_compiler)
+ if compiler in fcompiler_class:
+ module_name, klass, long_description = fcompiler_class[compiler]
+ elif compiler in fcompiler_aliases:
+ module_name, klass, long_description = fcompiler_aliases[compiler]
+ else:
+ msg = "don't know how to compile Fortran code on platform '%s'" % plat
+ if compiler is not None:
+ msg = msg + " with '%s' compiler." % compiler
+ msg = msg + " Supported compilers are: %s)" \
+ % (','.join(fcompiler_class.keys()))
+ log.warn(msg)
+ failed_fcompilers.add(fcompiler_key)
+ return None
+
+ compiler = klass(verbose=verbose, dry_run=dry_run, force=force)
+ compiler.c_compiler = c_compiler
+ return compiler
+
+def show_fcompilers(dist=None):
+ """Print list of available compilers (used by the "--help-fcompiler"
+ option to "config_fc").
+ """
+ if dist is None:
+ from distutils.dist import Distribution
+ from numpy.distutils.command.config_compiler import config_fc
+ dist = Distribution()
+ dist.script_name = os.path.basename(sys.argv[0])
+ dist.script_args = ['config_fc'] + sys.argv[1:]
+ try:
+ dist.script_args.remove('--help-fcompiler')
+ except ValueError:
+ pass
+ dist.cmdclass['config_fc'] = config_fc
+ dist.parse_config_files()
+ dist.parse_command_line()
+ compilers = []
+ compilers_na = []
+ compilers_ni = []
+ if not fcompiler_class:
+ load_all_fcompiler_classes()
+ platform_compilers = available_fcompilers_for_platform()
+ for compiler in platform_compilers:
+ v = None
+ log.set_verbosity(-2)
+ try:
+ c = new_fcompiler(compiler=compiler, verbose=dist.verbose)
+ c.customize(dist)
+ v = c.get_version()
+ except (DistutilsModuleError, CompilerNotFound) as e:
+ log.debug("show_fcompilers: %s not found" % (compiler,))
+ log.debug(repr(e))
+
+ if v is None:
+ compilers_na.append(("fcompiler="+compiler, None,
+ fcompiler_class[compiler][2]))
+ else:
+ c.dump_properties()
+ compilers.append(("fcompiler="+compiler, None,
+ fcompiler_class[compiler][2] + ' (%s)' % v))
+
+ compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers))
+ compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2])
+ for fc in compilers_ni]
+
+ compilers.sort()
+ compilers_na.sort()
+ compilers_ni.sort()
+ pretty_printer = FancyGetopt(compilers)
+ pretty_printer.print_help("Fortran compilers found:")
+ pretty_printer = FancyGetopt(compilers_na)
+ pretty_printer.print_help("Compilers available for this "
+ "platform, but not found:")
+ if compilers_ni:
+ pretty_printer = FancyGetopt(compilers_ni)
+ pretty_printer.print_help("Compilers not available on this platform:")
+ print("For compiler details, run 'config_fc --verbose' setup command.")
+
+
+def dummy_fortran_file():
+ fo, name = make_temp_file(suffix='.f')
+ fo.write(" subroutine dummy()\n end\n")
+ fo.close()
+ return name[:-2]
+
+
+is_f_file = re.compile(r'.*\.(for|ftn|f77|f)\Z', re.I).match
+_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search
+_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search
+_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search
+_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).match
+
+def is_free_format(file):
+ """Check if file is in free format Fortran."""
+ # f90 allows both fixed and free format, assuming fixed unless
+ # signs of free format are detected.
+ result = 0
+ with open(file, encoding='latin1') as f:
+ line = f.readline()
+ n = 10000 # the number of non-comment lines to scan for hints
+ if _has_f_header(line) or _has_fix_header(line):
+ n = 0
+ elif _has_f90_header(line):
+ n = 0
+ result = 1
+ while n>0 and line:
+ line = line.rstrip()
+ if line and line[0]!='!':
+ n -= 1
+ if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
+ result = 1
+ break
+ line = f.readline()
+ return result
+
+def has_f90_header(src):
+ with open(src, encoding='latin1') as f:
+ line = f.readline()
+ return _has_f90_header(line) or _has_fix_header(line)
+
+_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)', re.I)
+def get_f77flags(src):
+ """
+ Search the first 20 lines of fortran 77 code for line pattern
+ `CF77FLAGS(<fcompiler type>)=<f77 flags>`
+ Return a dictionary {<fcompiler type>:<f77 flags>}.
+ """
+ flags = {}
+ with open(src, encoding='latin1') as f:
+ i = 0
+ for line in f:
+ i += 1
+ if i>20: break
+ m = _f77flags_re.match(line)
+ if not m: continue
+ fcname = m.group('fcname').strip()
+ fflags = m.group('fflags').strip()
+ flags[fcname] = split_quoted(fflags)
+ return flags
+
+# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags
+
+if __name__ == '__main__':
+ show_fcompilers()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/absoft.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/absoft.py
new file mode 100644
index 00000000..efe3a4cb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/absoft.py
@@ -0,0 +1,156 @@
+
+# http://www.absoft.com/literature/osxuserguide.pdf
+# http://www.absoft.com/documentation.html
+
+# Notes:
+# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py
+# generated extension modules (works for f2py v2.45.241_1936 and up)
+import os
+
+from numpy.distutils.cpuinfo import cpu
+from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
+from numpy.distutils.misc_util import cyg2win32
+
+compilers = ['AbsoftFCompiler']
+
+class AbsoftFCompiler(FCompiler):
+
+ compiler_type = 'absoft'
+ description = 'Absoft Corp Fortran Compiler'
+ #version_pattern = r'FORTRAN 77 Compiler (?P<version>[^\s*,]*).*?Absoft Corp'
+ version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\
+ r' (?P<version>[^\s*,]*)(.*?Absoft Corp|)'
+
+ # on windows: f90 -V -c dummy.f
+ # f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16
+
+ # samt5735(8)$ f90 -V -c dummy.f
+ # f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0
+ # Note that fink installs g77 as f77, so need to use f90 for detection.
+
+ executables = {
+ 'version_cmd' : None, # set by update_executables
+ 'compiler_f77' : ["f77"],
+ 'compiler_fix' : ["f90"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["<F90>"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ if os.name=='nt':
+ library_switch = '/out:' #No space after /out:!
+
+ module_dir_switch = None
+ module_include_switch = '-p'
+
+ def update_executables(self):
+ f = cyg2win32(dummy_fortran_file())
+ self.executables['version_cmd'] = ['<F90>', '-V', '-c',
+ f+'.f', '-o', f+'.o']
+
+ def get_flags_linker_so(self):
+ if os.name=='nt':
+ opt = ['/dll']
+ # The "-K shared" switches are being left in for pre-9.0 versions
+ # of Absoft though I don't think versions earlier than 9 can
+ # actually be used to build shared libraries. In fact, version
+ # 8 of Absoft doesn't recognize "-K shared" and will fail.
+ elif self.get_version() >= '9.0':
+ opt = ['-shared']
+ else:
+ opt = ["-K", "shared"]
+ return opt
+
+ def library_dir_option(self, dir):
+ if os.name=='nt':
+ return ['-link', '/PATH:%s' % (dir)]
+ return "-L" + dir
+
+ def library_option(self, lib):
+ if os.name=='nt':
+ return '%s.lib' % (lib)
+ return "-l" + lib
+
+ def get_library_dirs(self):
+ opt = FCompiler.get_library_dirs(self)
+ d = os.environ.get('ABSOFT')
+ if d:
+ if self.get_version() >= '10.0':
+ # use shared libraries, the static libraries were not compiled -fPIC
+ prefix = 'sh'
+ else:
+ prefix = ''
+ if cpu.is_64bit():
+ suffix = '64'
+ else:
+ suffix = ''
+ opt.append(os.path.join(d, '%slib%s' % (prefix, suffix)))
+ return opt
+
+ def get_libraries(self):
+ opt = FCompiler.get_libraries(self)
+ if self.get_version() >= '11.0':
+ opt.extend(['af90math', 'afio', 'af77math', 'amisc'])
+ elif self.get_version() >= '10.0':
+ opt.extend(['af90math', 'afio', 'af77math', 'U77'])
+ elif self.get_version() >= '8.0':
+ opt.extend(['f90math', 'fio', 'f77math', 'U77'])
+ else:
+ opt.extend(['fio', 'f90math', 'fmath', 'U77'])
+ if os.name =='nt':
+ opt.append('COMDLG32')
+ return opt
+
+ def get_flags(self):
+ opt = FCompiler.get_flags(self)
+ if os.name != 'nt':
+ opt.extend(['-s'])
+ if self.get_version():
+ if self.get_version()>='8.2':
+ opt.append('-fpic')
+ return opt
+
+ def get_flags_f77(self):
+ opt = FCompiler.get_flags_f77(self)
+ opt.extend(['-N22', '-N90', '-N110'])
+ v = self.get_version()
+ if os.name == 'nt':
+ if v and v>='8.0':
+ opt.extend(['-f', '-N15'])
+ else:
+ opt.append('-f')
+ if v:
+ if v<='4.6':
+ opt.append('-B108')
+ else:
+ # Though -N15 is undocumented, it works with
+ # Absoft 8.0 on Linux
+ opt.append('-N15')
+ return opt
+
+ def get_flags_f90(self):
+ opt = FCompiler.get_flags_f90(self)
+ opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX",
+ "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"])
+ if self.get_version():
+ if self.get_version()>'4.6':
+ opt.extend(["-YDEALLOC=ALL"])
+ return opt
+
+ def get_flags_fix(self):
+ opt = FCompiler.get_flags_fix(self)
+ opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX",
+ "-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"])
+ opt.extend(["-f", "fixed"])
+ return opt
+
+ def get_flags_opt(self):
+ opt = ['-O']
+ return opt
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='absoft').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/arm.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/arm.py
new file mode 100644
index 00000000..3eb7e9af
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/arm.py
@@ -0,0 +1,71 @@
+import sys
+
+from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
+from sys import platform
+from os.path import join, dirname, normpath
+
+compilers = ['ArmFlangCompiler']
+
+import functools
+
+class ArmFlangCompiler(FCompiler):
+ compiler_type = 'arm'
+ description = 'Arm Compiler'
+ version_pattern = r'\s*Arm.*version (?P<version>[\d.-]+).*'
+
+ ar_exe = 'lib.exe'
+ possible_executables = ['armflang']
+
+ executables = {
+ 'version_cmd': ["", "--version"],
+ 'compiler_f77': ["armflang", "-fPIC"],
+ 'compiler_fix': ["armflang", "-fPIC", "-ffixed-form"],
+ 'compiler_f90': ["armflang", "-fPIC"],
+ 'linker_so': ["armflang", "-fPIC", "-shared"],
+ 'archiver': ["ar", "-cr"],
+ 'ranlib': None
+ }
+
+ pic_flags = ["-fPIC", "-DPIC"]
+ c_compiler = 'arm'
+ module_dir_switch = '-module ' # Don't remove ending space!
+
+ def get_libraries(self):
+ opt = FCompiler.get_libraries(self)
+ opt.extend(['flang', 'flangrti', 'ompstub'])
+ return opt
+
+ @functools.lru_cache(maxsize=128)
+ def get_library_dirs(self):
+ """List of compiler library directories."""
+ opt = FCompiler.get_library_dirs(self)
+ flang_dir = dirname(self.executables['compiler_f77'][0])
+ opt.append(normpath(join(flang_dir, '..', 'lib')))
+
+ return opt
+
+ def get_flags(self):
+ return []
+
+ def get_flags_free(self):
+ return []
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ def get_flags_opt(self):
+ return ['-O3']
+
+ def get_flags_arch(self):
+ return []
+
+ def runtime_library_dir_option(self, dir):
+ return '-Wl,-rpath=%s' % dir
+
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='armflang').get_version())
+
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/compaq.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/compaq.py
new file mode 100644
index 00000000..01314c13
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/compaq.py
@@ -0,0 +1,120 @@
+
+#http://www.compaq.com/fortran/docs/
+import os
+import sys
+
+from numpy.distutils.fcompiler import FCompiler
+from distutils.errors import DistutilsPlatformError
+
+compilers = ['CompaqFCompiler']
+if os.name != 'posix' or sys.platform[:6] == 'cygwin' :
+ # Otherwise we'd get a false positive on posix systems with
+ # case-insensitive filesystems (like darwin), because we'll pick
+ # up /bin/df
+ compilers.append('CompaqVisualFCompiler')
+
+class CompaqFCompiler(FCompiler):
+
+ compiler_type = 'compaq'
+ description = 'Compaq Fortran Compiler'
+ version_pattern = r'Compaq Fortran (?P<version>[^\s]*).*'
+
+ if sys.platform[:5]=='linux':
+ fc_exe = 'fort'
+ else:
+ fc_exe = 'f90'
+
+ executables = {
+ 'version_cmd' : ['<F90>', "-version"],
+ 'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"],
+ 'compiler_fix' : [fc_exe, "-fixed"],
+ 'compiler_f90' : [fc_exe],
+ 'linker_so' : ['<F90>'],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ module_dir_switch = '-module ' # not tested
+ module_include_switch = '-I'
+
+ def get_flags(self):
+ return ['-assume no2underscore', '-nomixed_str_len_arg']
+ def get_flags_debug(self):
+ return ['-g', '-check bounds']
+ def get_flags_opt(self):
+ return ['-O4', '-align dcommons', '-assume bigarrays',
+ '-assume nozsize', '-math_library fast']
+ def get_flags_arch(self):
+ return ['-arch host', '-tune host']
+ def get_flags_linker_so(self):
+ if sys.platform[:5]=='linux':
+ return ['-shared']
+ return ['-shared', '-Wl,-expect_unresolved,*']
+
+class CompaqVisualFCompiler(FCompiler):
+
+ compiler_type = 'compaqv'
+ description = 'DIGITAL or Compaq Visual Fortran Compiler'
+ version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'
+ r' Version (?P<version>[^\s]*).*')
+
+ compile_switch = '/compile_only'
+ object_switch = '/object:'
+ library_switch = '/OUT:' #No space after /OUT:!
+
+ static_lib_extension = ".lib"
+ static_lib_format = "%s%s"
+ module_dir_switch = '/module:'
+ module_include_switch = '/I'
+
+ ar_exe = 'lib.exe'
+ fc_exe = 'DF'
+
+ if sys.platform=='win32':
+ from numpy.distutils.msvccompiler import MSVCCompiler
+
+ try:
+ m = MSVCCompiler()
+ m.initialize()
+ ar_exe = m.lib
+ except DistutilsPlatformError:
+ pass
+ except AttributeError as e:
+ if '_MSVCCompiler__root' in str(e):
+ print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e))
+ else:
+ raise
+ except OSError as e:
+ if not "vcvarsall.bat" in str(e):
+ print("Unexpected OSError in", __file__)
+ raise
+ except ValueError as e:
+ if not "'path'" in str(e):
+ print("Unexpected ValueError in", __file__)
+ raise
+
+ executables = {
+ 'version_cmd' : ['<F90>', "/what"],
+ 'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"],
+ 'compiler_fix' : [fc_exe, "/fixed"],
+ 'compiler_f90' : [fc_exe],
+ 'linker_so' : ['<F90>'],
+ 'archiver' : [ar_exe, "/OUT:"],
+ 'ranlib' : None
+ }
+
+ def get_flags(self):
+ return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)',
+ '/names:lowercase', '/assume:underscore']
+ def get_flags_opt(self):
+ return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast']
+ def get_flags_arch(self):
+ return ['/threads']
+ def get_flags_debug(self):
+ return ['/debug']
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='compaq').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/environment.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/environment.py
new file mode 100644
index 00000000..ecd4d998
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/environment.py
@@ -0,0 +1,88 @@
+import os
+from distutils.dist import Distribution
+
+__metaclass__ = type
+
+class EnvironmentConfig:
+ def __init__(self, distutils_section='ALL', **kw):
+ self._distutils_section = distutils_section
+ self._conf_keys = kw
+ self._conf = None
+ self._hook_handler = None
+
+ def dump_variable(self, name):
+ conf_desc = self._conf_keys[name]
+ hook, envvar, confvar, convert, append = conf_desc
+ if not convert:
+ convert = lambda x : x
+ print('%s.%s:' % (self._distutils_section, name))
+ v = self._hook_handler(name, hook)
+ print(' hook : %s' % (convert(v),))
+ if envvar:
+ v = os.environ.get(envvar, None)
+ print(' environ: %s' % (convert(v),))
+ if confvar and self._conf:
+ v = self._conf.get(confvar, (None, None))[1]
+ print(' config : %s' % (convert(v),))
+
+ def dump_variables(self):
+ for name in self._conf_keys:
+ self.dump_variable(name)
+
+ def __getattr__(self, name):
+ try:
+ conf_desc = self._conf_keys[name]
+ except KeyError:
+ raise AttributeError(
+ f"'EnvironmentConfig' object has no attribute '{name}'"
+ ) from None
+
+ return self._get_var(name, conf_desc)
+
+ def get(self, name, default=None):
+ try:
+ conf_desc = self._conf_keys[name]
+ except KeyError:
+ return default
+ var = self._get_var(name, conf_desc)
+ if var is None:
+ var = default
+ return var
+
+ def _get_var(self, name, conf_desc):
+ hook, envvar, confvar, convert, append = conf_desc
+ if convert is None:
+ convert = lambda x: x
+ var = self._hook_handler(name, hook)
+ if envvar is not None:
+ envvar_contents = os.environ.get(envvar)
+ if envvar_contents is not None:
+ envvar_contents = convert(envvar_contents)
+ if var and append:
+ if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1':
+ var.extend(envvar_contents)
+ else:
+ # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0
+ # to keep old (overwrite flags rather than append to
+ # them) behavior
+ var = envvar_contents
+ else:
+ var = envvar_contents
+ if confvar is not None and self._conf:
+ if confvar in self._conf:
+ source, confvar_contents = self._conf[confvar]
+ var = convert(confvar_contents)
+ return var
+
+
+ def clone(self, hook_handler):
+ ec = self.__class__(distutils_section=self._distutils_section,
+ **self._conf_keys)
+ ec._hook_handler = hook_handler
+ return ec
+
+ def use_distribution(self, dist):
+ if isinstance(dist, Distribution):
+ self._conf = dist.get_option_dict(self._distutils_section)
+ else:
+ self._conf = dist
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/fujitsu.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/fujitsu.py
new file mode 100644
index 00000000..ddce6745
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/fujitsu.py
@@ -0,0 +1,46 @@
+"""
+fujitsu
+
+Supports Fujitsu compiler function.
+This compiler is developed by Fujitsu and is used in A64FX on Fugaku.
+"""
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['FujitsuFCompiler']
+
+class FujitsuFCompiler(FCompiler):
+ compiler_type = 'fujitsu'
+ description = 'Fujitsu Fortran Compiler'
+
+ possible_executables = ['frt']
+ version_pattern = r'frt \(FRT\) (?P<version>[a-z\d.]+)'
+ # $ frt --version
+ # frt (FRT) x.x.x yyyymmdd
+
+ executables = {
+ 'version_cmd' : ["<F77>", "--version"],
+ 'compiler_f77' : ["frt", "-Fixed"],
+ 'compiler_fix' : ["frt", "-Fixed"],
+ 'compiler_f90' : ["frt"],
+ 'linker_so' : ["frt", "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ pic_flags = ['-KPIC']
+ module_dir_switch = '-M'
+ module_include_switch = '-I'
+
+ def get_flags_opt(self):
+ return ['-O3']
+ def get_flags_debug(self):
+ return ['-g']
+ def runtime_library_dir_option(self, dir):
+ return f'-Wl,-rpath={dir}'
+ def get_libraries(self):
+ return ['fj90f', 'fj90i', 'fjsrcinfo']
+
+if __name__ == '__main__':
+ from distutils import log
+ from numpy.distutils import customized_fcompiler
+ log.set_verbosity(2)
+ print(customized_fcompiler('fujitsu').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/g95.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/g95.py
new file mode 100644
index 00000000..e109a972
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/g95.py
@@ -0,0 +1,42 @@
+# http://g95.sourceforge.net/
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['G95FCompiler']
+
+class G95FCompiler(FCompiler):
+ compiler_type = 'g95'
+ description = 'G95 Fortran Compiler'
+
+# version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95!\) (?P<version>.*)\).*'
+ # $ g95 --version
+ # G95 (GCC 4.0.3 (g95!) May 22 2006)
+
+ version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95 (?P<version>.*)!\) (?P<date>.*)\).*'
+ # $ g95 --version
+ # G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006)
+
+ executables = {
+ 'version_cmd' : ["<F90>", "--version"],
+ 'compiler_f77' : ["g95", "-ffixed-form"],
+ 'compiler_fix' : ["g95", "-ffixed-form"],
+ 'compiler_f90' : ["g95"],
+ 'linker_so' : ["<F90>", "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ pic_flags = ['-fpic']
+ module_dir_switch = '-fmod='
+ module_include_switch = '-I'
+
+ def get_flags(self):
+ return ['-fno-second-underscore']
+ def get_flags_opt(self):
+ return ['-O']
+ def get_flags_debug(self):
+ return ['-g']
+
+if __name__ == '__main__':
+ from distutils import log
+ from numpy.distutils import customized_fcompiler
+ log.set_verbosity(2)
+ print(customized_fcompiler('g95').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/gnu.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/gnu.py
new file mode 100644
index 00000000..3472b5d4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/gnu.py
@@ -0,0 +1,555 @@
+import re
+import os
+import sys
+import warnings
+import platform
+import tempfile
+import hashlib
+import base64
+import subprocess
+from subprocess import Popen, PIPE, STDOUT
+from numpy.distutils.exec_command import filepath_from_subprocess_output
+from numpy.distutils.fcompiler import FCompiler
+from distutils.version import LooseVersion
+
+compilers = ['GnuFCompiler', 'Gnu95FCompiler']
+
+TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)")
+
+# XXX: handle cross compilation
+
+
+def is_win64():
+ return sys.platform == "win32" and platform.architecture()[0] == "64bit"
+
+
+class GnuFCompiler(FCompiler):
+ compiler_type = 'gnu'
+ compiler_aliases = ('g77', )
+ description = 'GNU Fortran 77 compiler'
+
+ def gnu_version_match(self, version_string):
+ """Handle the different versions of GNU fortran compilers"""
+ # Strip warning(s) that may be emitted by gfortran
+ while version_string.startswith('gfortran: warning'):
+ version_string =\
+ version_string[version_string.find('\n') + 1:].strip()
+
+ # Gfortran versions from after 2010 will output a simple string
+ # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
+ # gfortrans may still return long version strings (``-dumpversion`` was
+ # an alias for ``--version``)
+ if len(version_string) <= 20:
+ # Try to find a valid version string
+ m = re.search(r'([0-9.]+)', version_string)
+ if m:
+ # g77 provides a longer version string that starts with GNU
+ # Fortran
+ if version_string.startswith('GNU Fortran'):
+ return ('g77', m.group(1))
+
+ # gfortran only outputs a version string such as #.#.#, so check
+ # if the match is at the start of the string
+ elif m.start() == 0:
+ return ('gfortran', m.group(1))
+ else:
+ # Output probably from --version, try harder:
+ m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
+ if m:
+ return ('gfortran', m.group(1))
+ m = re.search(
+ r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string)
+ if m:
+ v = m.group(1)
+ if v.startswith('0') or v.startswith('2') or v.startswith('3'):
+ # the '0' is for early g77's
+ return ('g77', v)
+ else:
+ # at some point in the 4.x series, the ' 95' was dropped
+ # from the version string
+ return ('gfortran', v)
+
+ # If still nothing, raise an error to make the problem easy to find.
+ err = 'A valid Fortran version was not found in this string:\n'
+ raise ValueError(err + version_string)
+
+ def version_match(self, version_string):
+ v = self.gnu_version_match(version_string)
+ if not v or v[0] != 'g77':
+ return None
+ return v[1]
+
+ possible_executables = ['g77', 'f77']
+ executables = {
+ 'version_cmd' : [None, "-dumpversion"],
+ 'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
+ 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
+ 'compiler_fix' : None,
+ 'linker_so' : [None, "-g", "-Wall"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"],
+ 'linker_exe' : [None, "-g", "-Wall"]
+ }
+ module_dir_switch = None
+ module_include_switch = None
+
+ # Cygwin: f771: warning: -fPIC ignored for target (all code is
+ # position independent)
+ if os.name != 'nt' and sys.platform != 'cygwin':
+ pic_flags = ['-fPIC']
+
+ # use -mno-cygwin for g77 when Python is not Cygwin-Python
+ if sys.platform == 'win32':
+ for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
+ executables[key].append('-mno-cygwin')
+
+ g2c = 'g2c'
+ suggested_f90_compiler = 'gnu95'
+
+ def get_flags_linker_so(self):
+ opt = self.linker_so[1:]
+ if sys.platform == 'darwin':
+ target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
+ # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
+ # and leave it alone. But, distutils will complain if the
+ # environment's value is different from the one in the Python
+ # Makefile used to build Python. We let distutils handle this
+ # error checking.
+ if not target:
+ # If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
+ # we try to get it first from sysconfig and then
+ # fall back to setting it to 10.9 This is a reasonable default
+ # even when using the official Python dist and those derived
+ # from it.
+ import sysconfig
+ target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
+ if not target:
+ target = '10.9'
+ s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}'
+ warnings.warn(s, stacklevel=2)
+ os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target)
+ opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
+ else:
+ opt.append("-shared")
+ if sys.platform.startswith('sunos'):
+ # SunOS often has dynamically loaded symbols defined in the
+ # static library libg2c.a The linker doesn't like this. To
+ # ignore the problem, use the -mimpure-text flag. It isn't
+ # the safest thing, but seems to work. 'man gcc' says:
+ # ".. Instead of using -mimpure-text, you should compile all
+ # source code with -fpic or -fPIC."
+ opt.append('-mimpure-text')
+ return opt
+
+ def get_libgcc_dir(self):
+ try:
+ output = subprocess.check_output(self.compiler_f77 +
+ ['-print-libgcc-file-name'])
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ output = filepath_from_subprocess_output(output)
+ return os.path.dirname(output)
+ return None
+
+ def get_libgfortran_dir(self):
+ if sys.platform[:5] == 'linux':
+ libgfortran_name = 'libgfortran.so'
+ elif sys.platform == 'darwin':
+ libgfortran_name = 'libgfortran.dylib'
+ else:
+ libgfortran_name = None
+
+ libgfortran_dir = None
+ if libgfortran_name:
+ find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)]
+ try:
+ output = subprocess.check_output(
+ self.compiler_f77 + find_lib_arg)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ output = filepath_from_subprocess_output(output)
+ libgfortran_dir = os.path.dirname(output)
+ return libgfortran_dir
+
+ def get_library_dirs(self):
+ opt = []
+ if sys.platform[:5] != 'linux':
+ d = self.get_libgcc_dir()
+ if d:
+ # if windows and not cygwin, libg2c lies in a different folder
+ if sys.platform == 'win32' and not d.startswith('/usr/lib'):
+ d = os.path.normpath(d)
+ path = os.path.join(d, "lib%s.a" % self.g2c)
+ if not os.path.exists(path):
+ root = os.path.join(d, *((os.pardir, ) * 4))
+ d2 = os.path.abspath(os.path.join(root, 'lib'))
+ path = os.path.join(d2, "lib%s.a" % self.g2c)
+ if os.path.exists(path):
+ opt.append(d2)
+ opt.append(d)
+ # For Macports / Linux, libgfortran and libgcc are not co-located
+ lib_gfortran_dir = self.get_libgfortran_dir()
+ if lib_gfortran_dir:
+ opt.append(lib_gfortran_dir)
+ return opt
+
+ def get_libraries(self):
+ opt = []
+ d = self.get_libgcc_dir()
+ if d is not None:
+ g2c = self.g2c + '-pic'
+ f = self.static_lib_format % (g2c, self.static_lib_extension)
+ if not os.path.isfile(os.path.join(d, f)):
+ g2c = self.g2c
+ else:
+ g2c = self.g2c
+
+ if g2c is not None:
+ opt.append(g2c)
+ c_compiler = self.c_compiler
+ if sys.platform == 'win32' and c_compiler and \
+ c_compiler.compiler_type == 'msvc':
+ opt.append('gcc')
+ if sys.platform == 'darwin':
+ opt.append('cc_dynamic')
+ return opt
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ def get_flags_opt(self):
+ v = self.get_version()
+ if v and v <= '3.3.3':
+ # With this compiler version building Fortran BLAS/LAPACK
+ # with -O3 caused failures in lib.lapack heevr,syevr tests.
+ opt = ['-O2']
+ else:
+ opt = ['-O3']
+ opt.append('-funroll-loops')
+ return opt
+
+ def _c_arch_flags(self):
+ """ Return detected arch flags from CFLAGS """
+ import sysconfig
+ try:
+ cflags = sysconfig.get_config_vars()['CFLAGS']
+ except KeyError:
+ return []
+ arch_re = re.compile(r"-arch\s+(\w+)")
+ arch_flags = []
+ for arch in arch_re.findall(cflags):
+ arch_flags += ['-arch', arch]
+ return arch_flags
+
+ def get_flags_arch(self):
+ return []
+
+ def runtime_library_dir_option(self, dir):
+ if sys.platform == 'win32' or sys.platform == 'cygwin':
+ # Linux/Solaris/Unix support RPATH, Windows does not
+ raise NotImplementedError
+
+ # TODO: could use -Xlinker here, if it's supported
+ assert "," not in dir
+
+ if sys.platform == 'darwin':
+ return f'-Wl,-rpath,{dir}'
+ elif sys.platform.startswith(('aix', 'os400')):
+ # AIX RPATH is called LIBPATH
+ return f'-Wl,-blibpath:{dir}'
+ else:
+ return f'-Wl,-rpath={dir}'
+
+
+class Gnu95FCompiler(GnuFCompiler):
+ compiler_type = 'gnu95'
+ compiler_aliases = ('gfortran', )
+ description = 'GNU Fortran 95 compiler'
+
+ def version_match(self, version_string):
+ v = self.gnu_version_match(version_string)
+ if not v or v[0] != 'gfortran':
+ return None
+ v = v[1]
+ if LooseVersion(v) >= "4":
+ # gcc-4 series releases do not support -mno-cygwin option
+ pass
+ else:
+ # use -mno-cygwin flag for gfortran when Python is not
+ # Cygwin-Python
+ if sys.platform == 'win32':
+ for key in [
+ 'version_cmd', 'compiler_f77', 'compiler_f90',
+ 'compiler_fix', 'linker_so', 'linker_exe'
+ ]:
+ self.executables[key].append('-mno-cygwin')
+ return v
+
+ possible_executables = ['gfortran', 'f95']
+ executables = {
+ 'version_cmd' : ["<F90>", "-dumpversion"],
+ 'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
+ "-fno-second-underscore"],
+ 'compiler_f90' : [None, "-Wall", "-g",
+ "-fno-second-underscore"],
+ 'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
+ "-fno-second-underscore"],
+ 'linker_so' : ["<F90>", "-Wall", "-g"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"],
+ 'linker_exe' : [None, "-Wall"]
+ }
+
+ module_dir_switch = '-J'
+ module_include_switch = '-I'
+
+ if sys.platform.startswith(('aix', 'os400')):
+ executables['linker_so'].append('-lpthread')
+ if platform.architecture()[0][:2] == '64':
+ for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']:
+ executables[key].append('-maix64')
+
+ g2c = 'gfortran'
+
+ def _universal_flags(self, cmd):
+ """Return a list of -arch flags for every supported architecture."""
+ if not sys.platform == 'darwin':
+ return []
+ arch_flags = []
+ # get arches the C compiler gets.
+ c_archs = self._c_arch_flags()
+ if "i386" in c_archs:
+ c_archs[c_archs.index("i386")] = "i686"
+ # check the arches the Fortran compiler supports, and compare with
+ # arch flags from C compiler
+ for arch in ["ppc", "i686", "x86_64", "ppc64", "s390x"]:
+ if _can_target(cmd, arch) and arch in c_archs:
+ arch_flags.extend(["-arch", arch])
+ return arch_flags
+
+ def get_flags(self):
+ flags = GnuFCompiler.get_flags(self)
+ arch_flags = self._universal_flags(self.compiler_f90)
+ if arch_flags:
+ flags[:0] = arch_flags
+ return flags
+
+ def get_flags_linker_so(self):
+ flags = GnuFCompiler.get_flags_linker_so(self)
+ arch_flags = self._universal_flags(self.linker_so)
+ if arch_flags:
+ flags[:0] = arch_flags
+ return flags
+
+ def get_library_dirs(self):
+ opt = GnuFCompiler.get_library_dirs(self)
+ if sys.platform == 'win32':
+ c_compiler = self.c_compiler
+ if c_compiler and c_compiler.compiler_type == "msvc":
+ target = self.get_target()
+ if target:
+ d = os.path.normpath(self.get_libgcc_dir())
+ root = os.path.join(d, *((os.pardir, ) * 4))
+ path = os.path.join(root, "lib")
+ mingwdir = os.path.normpath(path)
+ if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
+ opt.append(mingwdir)
+ # For Macports / Linux, libgfortran and libgcc are not co-located
+ lib_gfortran_dir = self.get_libgfortran_dir()
+ if lib_gfortran_dir:
+ opt.append(lib_gfortran_dir)
+ return opt
+
+ def get_libraries(self):
+ opt = GnuFCompiler.get_libraries(self)
+ if sys.platform == 'darwin':
+ opt.remove('cc_dynamic')
+ if sys.platform == 'win32':
+ c_compiler = self.c_compiler
+ if c_compiler and c_compiler.compiler_type == "msvc":
+ if "gcc" in opt:
+ i = opt.index("gcc")
+ opt.insert(i + 1, "mingwex")
+ opt.insert(i + 1, "mingw32")
+ c_compiler = self.c_compiler
+ if c_compiler and c_compiler.compiler_type == "msvc":
+ return []
+ else:
+ pass
+ return opt
+
+ def get_target(self):
+ try:
+ p = subprocess.Popen(
+ self.compiler_f77 + ['-v'],
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ stdout, stderr = p.communicate()
+ output = (stdout or b"") + (stderr or b"")
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ output = filepath_from_subprocess_output(output)
+ m = TARGET_R.search(output)
+ if m:
+ return m.group(1)
+ return ""
+
+ def _hash_files(self, filenames):
+ h = hashlib.sha1()
+ for fn in filenames:
+ with open(fn, 'rb') as f:
+ while True:
+ block = f.read(131072)
+ if not block:
+ break
+ h.update(block)
+ text = base64.b32encode(h.digest())
+ text = text.decode('ascii')
+ return text.rstrip('=')
+
+ def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir,
+ chained_dlls, is_archive):
+ """Create a wrapper shared library for the given objects
+
+ Return an MSVC-compatible lib
+ """
+
+ c_compiler = self.c_compiler
+ if c_compiler.compiler_type != "msvc":
+ raise ValueError("This method only supports MSVC")
+
+ object_hash = self._hash_files(list(objects) + list(chained_dlls))
+
+ if is_win64():
+ tag = 'win_amd64'
+ else:
+ tag = 'win32'
+
+ basename = 'lib' + os.path.splitext(
+ os.path.basename(objects[0]))[0][:8]
+ root_name = basename + '.' + object_hash + '.gfortran-' + tag
+ dll_name = root_name + '.dll'
+ def_name = root_name + '.def'
+ lib_name = root_name + '.lib'
+ dll_path = os.path.join(extra_dll_dir, dll_name)
+ def_path = os.path.join(output_dir, def_name)
+ lib_path = os.path.join(output_dir, lib_name)
+
+ if os.path.isfile(lib_path):
+ # Nothing to do
+ return lib_path, dll_path
+
+ if is_archive:
+ objects = (["-Wl,--whole-archive"] + list(objects) +
+ ["-Wl,--no-whole-archive"])
+ self.link_shared_object(
+ objects,
+ dll_name,
+ output_dir=extra_dll_dir,
+ extra_postargs=list(chained_dlls) + [
+ '-Wl,--allow-multiple-definition',
+ '-Wl,--output-def,' + def_path,
+ '-Wl,--export-all-symbols',
+ '-Wl,--enable-auto-import',
+ '-static',
+ '-mlong-double-64',
+ ])
+
+ # No PowerPC!
+ if is_win64():
+ specifier = '/MACHINE:X64'
+ else:
+ specifier = '/MACHINE:X86'
+
+ # MSVC specific code
+ lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier]
+ if not c_compiler.initialized:
+ c_compiler.initialize()
+ c_compiler.spawn([c_compiler.lib] + lib_args)
+
+ return lib_path, dll_path
+
+ def can_ccompiler_link(self, compiler):
+ # MSVC cannot link objects compiled by GNU fortran
+ return compiler.compiler_type not in ("msvc", )
+
+ def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
+ """
+ Convert a set of object files that are not compatible with the default
+ linker, to a file that is compatible.
+ """
+ if self.c_compiler.compiler_type == "msvc":
+ # Compile a DLL and return the lib for the DLL as
+ # the object. Also keep track of previous DLLs that
+ # we have compiled so that we can link against them.
+
+ # If there are .a archives, assume they are self-contained
+ # static libraries, and build separate DLLs for each
+ archives = []
+ plain_objects = []
+ for obj in objects:
+ if obj.lower().endswith('.a'):
+ archives.append(obj)
+ else:
+ plain_objects.append(obj)
+
+ chained_libs = []
+ chained_dlls = []
+ for archive in archives[::-1]:
+ lib, dll = self._link_wrapper_lib(
+ [archive],
+ output_dir,
+ extra_dll_dir,
+ chained_dlls=chained_dlls,
+ is_archive=True)
+ chained_libs.insert(0, lib)
+ chained_dlls.insert(0, dll)
+
+ if not plain_objects:
+ return chained_libs
+
+ lib, dll = self._link_wrapper_lib(
+ plain_objects,
+ output_dir,
+ extra_dll_dir,
+ chained_dlls=chained_dlls,
+ is_archive=False)
+ return [lib] + chained_libs
+ else:
+ raise ValueError("Unsupported C compiler")
+
+
+def _can_target(cmd, arch):
+ """Return true if the architecture supports the -arch flag"""
+ newcmd = cmd[:]
+ fid, filename = tempfile.mkstemp(suffix=".f")
+ os.close(fid)
+ try:
+ d = os.path.dirname(filename)
+ output = os.path.splitext(filename)[0] + ".o"
+ try:
+ newcmd.extend(["-arch", arch, "-c", filename])
+ p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
+ p.communicate()
+ return p.returncode == 0
+ finally:
+ if os.path.exists(output):
+ os.remove(output)
+ finally:
+ os.remove(filename)
+
+
+if __name__ == '__main__':
+ from distutils import log
+ from numpy.distutils import customized_fcompiler
+ log.set_verbosity(2)
+
+ print(customized_fcompiler('gnu').get_version())
+ try:
+ print(customized_fcompiler('g95').get_version())
+ except Exception as e:
+ print(e)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py
new file mode 100644
index 00000000..09e6483b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py
@@ -0,0 +1,41 @@
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['HPUXFCompiler']
+
+class HPUXFCompiler(FCompiler):
+
+ compiler_type = 'hpux'
+ description = 'HP Fortran 90 Compiler'
+ version_pattern = r'HP F90 (?P<version>[^\s*,]*)'
+
+ executables = {
+ 'version_cmd' : ["f90", "+version"],
+ 'compiler_f77' : ["f90"],
+ 'compiler_fix' : ["f90"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["ld", "-b"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ module_dir_switch = None #XXX: fix me
+ module_include_switch = None #XXX: fix me
+ pic_flags = ['+Z']
+ def get_flags(self):
+ return self.pic_flags + ['+ppu', '+DD64']
+ def get_flags_opt(self):
+ return ['-O3']
+ def get_libraries(self):
+ return ['m']
+ def get_library_dirs(self):
+ opt = ['/usr/lib/hpux64']
+ return opt
+ def get_version(self, force=0, ok_status=[256, 0, 1]):
+ # XXX status==256 may indicate 'unrecognized option' or
+ # 'no input file'. So, version_cmd needs more work.
+ return FCompiler.get_version(self, force, ok_status)
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(10)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='hpux').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/ibm.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/ibm.py
new file mode 100644
index 00000000..eff24401
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/ibm.py
@@ -0,0 +1,97 @@
+import os
+import re
+import sys
+import subprocess
+
+from numpy.distutils.fcompiler import FCompiler
+from numpy.distutils.exec_command import find_executable
+from numpy.distutils.misc_util import make_temp_file
+from distutils import log
+
+compilers = ['IBMFCompiler']
+
+class IBMFCompiler(FCompiler):
+ compiler_type = 'ibm'
+ description = 'IBM XL Fortran Compiler'
+ version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P<version>[^\s*]*)'
+ #IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004
+
+ executables = {
+ 'version_cmd' : ["<F77>", "-qversion"],
+ 'compiler_f77' : ["xlf"],
+ 'compiler_fix' : ["xlf90", "-qfixed"],
+ 'compiler_f90' : ["xlf90"],
+ 'linker_so' : ["xlf95"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ def get_version(self,*args,**kwds):
+ version = FCompiler.get_version(self,*args,**kwds)
+
+ if version is None and sys.platform.startswith('aix'):
+ # use lslpp to find out xlf version
+ lslpp = find_executable('lslpp')
+ xlf = find_executable('xlf')
+ if os.path.exists(xlf) and os.path.exists(lslpp):
+ try:
+ o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp'])
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ m = re.search(r'xlfcmp:(?P<version>\d+([.]\d+)+)', o)
+ if m: version = m.group('version')
+
+ xlf_dir = '/etc/opt/ibmcmp/xlf'
+ if version is None and os.path.isdir(xlf_dir):
+ # linux:
+ # If the output of xlf does not contain version info
+ # (that's the case with xlf 8.1, for instance) then
+ # let's try another method:
+ l = sorted(os.listdir(xlf_dir))
+ l.reverse()
+ l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))]
+ if l:
+ from distutils.version import LooseVersion
+ self.version = version = LooseVersion(l[0])
+ return version
+
+ def get_flags(self):
+ return ['-qextname']
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ def get_flags_linker_so(self):
+ opt = []
+ if sys.platform=='darwin':
+ opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress')
+ else:
+ opt.append('-bshared')
+ version = self.get_version(ok_status=[0, 40])
+ if version is not None:
+ if sys.platform.startswith('aix'):
+ xlf_cfg = '/etc/xlf.cfg'
+ else:
+ xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version
+ fo, new_cfg = make_temp_file(suffix='_xlf.cfg')
+ log.info('Creating '+new_cfg)
+ with open(xlf_cfg, 'r') as fi:
+ crt1_match = re.compile(r'\s*crt\s*=\s*(?P<path>.*)/crt1.o').match
+ for line in fi:
+ m = crt1_match(line)
+ if m:
+ fo.write('crt = %s/bundle1.o\n' % (m.group('path')))
+ else:
+ fo.write(line)
+ fo.close()
+ opt.append('-F'+new_cfg)
+ return opt
+
+ def get_flags_opt(self):
+ return ['-O3']
+
+if __name__ == '__main__':
+ from numpy.distutils import customized_fcompiler
+ log.set_verbosity(2)
+ print(customized_fcompiler(compiler='ibm').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/intel.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/intel.py
new file mode 100644
index 00000000..1d606590
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/intel.py
@@ -0,0 +1,211 @@
+# http://developer.intel.com/software/products/compilers/flin/
+import sys
+
+from numpy.distutils.ccompiler import simple_version_match
+from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
+
+compilers = ['IntelFCompiler', 'IntelVisualFCompiler',
+ 'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler',
+ 'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler']
+
+
+def intel_version_match(type):
+ # Match against the important stuff in the version string
+ return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,))
+
+
+class BaseIntelFCompiler(FCompiler):
+ def update_executables(self):
+ f = dummy_fortran_file()
+ self.executables['version_cmd'] = ['<F77>', '-FI', '-V', '-c',
+ f + '.f', '-o', f + '.o']
+
+ def runtime_library_dir_option(self, dir):
+ # TODO: could use -Xlinker here, if it's supported
+ assert "," not in dir
+
+ return '-Wl,-rpath=%s' % dir
+
+
+class IntelFCompiler(BaseIntelFCompiler):
+
+ compiler_type = 'intel'
+ compiler_aliases = ('ifort',)
+ description = 'Intel Fortran Compiler for 32-bit apps'
+ version_match = intel_version_match('32-bit|IA-32')
+
+ possible_executables = ['ifort', 'ifc']
+
+ executables = {
+ 'version_cmd' : None, # set by update_executables
+ 'compiler_f77' : [None, "-72", "-w90", "-w95"],
+ 'compiler_f90' : [None],
+ 'compiler_fix' : [None, "-FI"],
+ 'linker_so' : ["<F90>", "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ pic_flags = ['-fPIC']
+ module_dir_switch = '-module ' # Don't remove ending space!
+ module_include_switch = '-I'
+
+ def get_flags_free(self):
+ return ['-FR']
+
+ def get_flags(self):
+ return ['-fPIC']
+
+ def get_flags_opt(self): # Scipy test failures with -O2
+ v = self.get_version()
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
+ return ['-fp-model', 'strict', '-O1',
+ '-assume', 'minus0', '-{}'.format(mpopt)]
+
+ def get_flags_arch(self):
+ return []
+
+ def get_flags_linker_so(self):
+ opt = FCompiler.get_flags_linker_so(self)
+ v = self.get_version()
+ if v and v >= '8.0':
+ opt.append('-nofor_main')
+ if sys.platform == 'darwin':
+ # Here, it's -dynamiclib
+ try:
+ idx = opt.index('-shared')
+ opt.remove('-shared')
+ except ValueError:
+ idx = 0
+ opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup']
+ return opt
+
+
+class IntelItaniumFCompiler(IntelFCompiler):
+ compiler_type = 'intele'
+ compiler_aliases = ()
+ description = 'Intel Fortran Compiler for Itanium apps'
+
+ version_match = intel_version_match('Itanium|IA-64')
+
+ possible_executables = ['ifort', 'efort', 'efc']
+
+ executables = {
+ 'version_cmd' : None,
+ 'compiler_f77' : [None, "-FI", "-w90", "-w95"],
+ 'compiler_fix' : [None, "-FI"],
+ 'compiler_f90' : [None],
+ 'linker_so' : ['<F90>', "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+
+class IntelEM64TFCompiler(IntelFCompiler):
+ compiler_type = 'intelem'
+ compiler_aliases = ()
+ description = 'Intel Fortran Compiler for 64-bit apps'
+
+ version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit')
+
+ possible_executables = ['ifort', 'efort', 'efc']
+
+ executables = {
+ 'version_cmd' : None,
+ 'compiler_f77' : [None, "-FI"],
+ 'compiler_fix' : [None, "-FI"],
+ 'compiler_f90' : [None],
+ 'linker_so' : ['<F90>', "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+# Is there no difference in the version string between the above compilers
+# and the Visual compilers?
+
+
+class IntelVisualFCompiler(BaseIntelFCompiler):
+ compiler_type = 'intelv'
+ description = 'Intel Visual Fortran Compiler for 32-bit apps'
+ version_match = intel_version_match('32-bit|IA-32')
+
+ def update_executables(self):
+ f = dummy_fortran_file()
+ self.executables['version_cmd'] = ['<F77>', '/FI', '/c',
+ f + '.f', '/o', f + '.o']
+
+ ar_exe = 'lib.exe'
+ possible_executables = ['ifort', 'ifl']
+
+ executables = {
+ 'version_cmd' : None,
+ 'compiler_f77' : [None],
+ 'compiler_fix' : [None],
+ 'compiler_f90' : [None],
+ 'linker_so' : [None],
+ 'archiver' : [ar_exe, "/verbose", "/OUT:"],
+ 'ranlib' : None
+ }
+
+ compile_switch = '/c '
+ object_switch = '/Fo' # No space after /Fo!
+ library_switch = '/OUT:' # No space after /OUT:!
+ module_dir_switch = '/module:' # No space after /module:
+ module_include_switch = '/I'
+
+ def get_flags(self):
+ opt = ['/nologo', '/MD', '/nbs', '/names:lowercase',
+ '/assume:underscore', '/fpp']
+ return opt
+
+ def get_flags_free(self):
+ return []
+
+ def get_flags_debug(self):
+ return ['/4Yb', '/d2']
+
+ def get_flags_opt(self):
+ return ['/O1', '/assume:minus0'] # Scipy test failures with /O2
+
+ def get_flags_arch(self):
+ return ["/arch:IA32", "/QaxSSE3"]
+
+ def runtime_library_dir_option(self, dir):
+ raise NotImplementedError
+
+
+class IntelItaniumVisualFCompiler(IntelVisualFCompiler):
+ compiler_type = 'intelev'
+ description = 'Intel Visual Fortran Compiler for Itanium apps'
+
+ version_match = intel_version_match('Itanium')
+
+ possible_executables = ['efl'] # XXX this is a wild guess
+ ar_exe = IntelVisualFCompiler.ar_exe
+
+ executables = {
+ 'version_cmd' : None,
+ 'compiler_f77' : [None, "-FI", "-w90", "-w95"],
+ 'compiler_fix' : [None, "-FI", "-4L72", "-w"],
+ 'compiler_f90' : [None],
+ 'linker_so' : ['<F90>', "-shared"],
+ 'archiver' : [ar_exe, "/verbose", "/OUT:"],
+ 'ranlib' : None
+ }
+
+
+class IntelEM64VisualFCompiler(IntelVisualFCompiler):
+ compiler_type = 'intelvem'
+ description = 'Intel Visual Fortran Compiler for 64-bit apps'
+
+ version_match = simple_version_match(start=r'Intel\(R\).*?64,')
+
+ def get_flags_arch(self):
+ return []
+
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='intel').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/lahey.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/lahey.py
new file mode 100644
index 00000000..e9258382
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/lahey.py
@@ -0,0 +1,45 @@
+import os
+
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['LaheyFCompiler']
+
+class LaheyFCompiler(FCompiler):
+
+ compiler_type = 'lahey'
+ description = 'Lahey/Fujitsu Fortran 95 Compiler'
+ version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\s*]*)'
+
+ executables = {
+ 'version_cmd' : ["<F90>", "--version"],
+ 'compiler_f77' : ["lf95", "--fix"],
+ 'compiler_fix' : ["lf95", "--fix"],
+ 'compiler_f90' : ["lf95"],
+ 'linker_so' : ["lf95", "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ module_dir_switch = None #XXX Fix me
+ module_include_switch = None #XXX Fix me
+
+ def get_flags_opt(self):
+ return ['-O']
+ def get_flags_debug(self):
+ return ['-g', '--chk', '--chkglobal']
+ def get_library_dirs(self):
+ opt = []
+ d = os.environ.get('LAHEY')
+ if d:
+ opt.append(os.path.join(d, 'lib'))
+ return opt
+ def get_libraries(self):
+ opt = []
+ opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6'])
+ return opt
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='lahey').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/mips.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/mips.py
new file mode 100644
index 00000000..a0973804
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/mips.py
@@ -0,0 +1,54 @@
+from numpy.distutils.cpuinfo import cpu
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['MIPSFCompiler']
+
+class MIPSFCompiler(FCompiler):
+
+ compiler_type = 'mips'
+ description = 'MIPSpro Fortran Compiler'
+ version_pattern = r'MIPSpro Compilers: Version (?P<version>[^\s*,]*)'
+
+ executables = {
+ 'version_cmd' : ["<F90>", "-version"],
+ 'compiler_f77' : ["f77", "-f77"],
+ 'compiler_fix' : ["f90", "-fixedform"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["f90", "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : None
+ }
+ module_dir_switch = None #XXX: fix me
+ module_include_switch = None #XXX: fix me
+ pic_flags = ['-KPIC']
+
+ def get_flags(self):
+ return self.pic_flags + ['-n32']
+ def get_flags_opt(self):
+ return ['-O3']
+ def get_flags_arch(self):
+ opt = []
+ for a in '19 20 21 22_4k 22_5k 24 25 26 27 28 30 32_5k 32_10k'.split():
+ if getattr(cpu, 'is_IP%s'%a)():
+ opt.append('-TARG:platform=IP%s' % a)
+ break
+ return opt
+ def get_flags_arch_f77(self):
+ r = None
+ if cpu.is_r10000(): r = 10000
+ elif cpu.is_r12000(): r = 12000
+ elif cpu.is_r8000(): r = 8000
+ elif cpu.is_r5000(): r = 5000
+ elif cpu.is_r4000(): r = 4000
+ if r is not None:
+ return ['r%s' % (r)]
+ return []
+ def get_flags_arch_f90(self):
+ r = self.get_flags_arch_f77()
+ if r:
+ r[0] = '-' + r[0]
+ return r
+
+if __name__ == '__main__':
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='mips').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nag.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nag.py
new file mode 100644
index 00000000..939201f4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nag.py
@@ -0,0 +1,87 @@
+import sys
+import re
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['NAGFCompiler', 'NAGFORCompiler']
+
+class BaseNAGFCompiler(FCompiler):
+ version_pattern = r'NAG.* Release (?P<version>[^(\s]*)'
+
+ def version_match(self, version_string):
+ m = re.search(self.version_pattern, version_string)
+ if m:
+ return m.group('version')
+ else:
+ return None
+
+ def get_flags_linker_so(self):
+ return ["-Wl,-shared"]
+ def get_flags_opt(self):
+ return ['-O4']
+ def get_flags_arch(self):
+ return []
+
+class NAGFCompiler(BaseNAGFCompiler):
+
+ compiler_type = 'nag'
+ description = 'NAGWare Fortran 95 Compiler'
+
+ executables = {
+ 'version_cmd' : ["<F90>", "-V"],
+ 'compiler_f77' : ["f95", "-fixed"],
+ 'compiler_fix' : ["f95", "-fixed"],
+ 'compiler_f90' : ["f95"],
+ 'linker_so' : ["<F90>"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ def get_flags_linker_so(self):
+ if sys.platform == 'darwin':
+ return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress']
+ return BaseNAGFCompiler.get_flags_linker_so(self)
+ def get_flags_arch(self):
+ version = self.get_version()
+ if version and version < '5.1':
+ return ['-target=native']
+ else:
+ return BaseNAGFCompiler.get_flags_arch(self)
+ def get_flags_debug(self):
+ return ['-g', '-gline', '-g90', '-nan', '-C']
+
+class NAGFORCompiler(BaseNAGFCompiler):
+
+ compiler_type = 'nagfor'
+ description = 'NAG Fortran Compiler'
+
+ executables = {
+ 'version_cmd' : ["nagfor", "-V"],
+ 'compiler_f77' : ["nagfor", "-fixed"],
+ 'compiler_fix' : ["nagfor", "-fixed"],
+ 'compiler_f90' : ["nagfor"],
+ 'linker_so' : ["nagfor"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+
+ def get_flags_linker_so(self):
+ if sys.platform == 'darwin':
+ return ['-unsharedrts',
+ '-Wl,-bundle,-flat_namespace,-undefined,suppress']
+ return BaseNAGFCompiler.get_flags_linker_so(self)
+ def get_flags_debug(self):
+ version = self.get_version()
+ if version and version > '6.1':
+ return ['-g', '-u', '-nan', '-C=all', '-thread_safe',
+ '-kind=unique', '-Warn=allocation', '-Warn=subnormal']
+ else:
+ return ['-g', '-nan', '-C=all', '-u', '-thread_safe']
+
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ compiler = customized_fcompiler(compiler='nagfor')
+ print(compiler.get_version())
+ print(compiler.get_flags_debug())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/none.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/none.py
new file mode 100644
index 00000000..ef411fff
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/none.py
@@ -0,0 +1,28 @@
+from numpy.distutils.fcompiler import FCompiler
+from numpy.distutils import customized_fcompiler
+
+compilers = ['NoneFCompiler']
+
+class NoneFCompiler(FCompiler):
+
+ compiler_type = 'none'
+ description = 'Fake Fortran compiler'
+
+ executables = {'compiler_f77': None,
+ 'compiler_f90': None,
+ 'compiler_fix': None,
+ 'linker_so': None,
+ 'linker_exe': None,
+ 'archiver': None,
+ 'ranlib': None,
+ 'version_cmd': None,
+ }
+
+ def find_executables(self):
+ pass
+
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ print(customized_fcompiler(compiler='none').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nv.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nv.py
new file mode 100644
index 00000000..212f3480
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/nv.py
@@ -0,0 +1,53 @@
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['NVHPCFCompiler']
+
+class NVHPCFCompiler(FCompiler):
+ """ NVIDIA High Performance Computing (HPC) SDK Fortran Compiler
+
+ https://developer.nvidia.com/hpc-sdk
+
+ Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers,
+ https://www.pgroup.com/index.htm.
+ See also `numpy.distutils.fcompiler.pg`.
+ """
+
+ compiler_type = 'nv'
+ description = 'NVIDIA HPC SDK'
+ version_pattern = r'\s*(nvfortran|(pg(f77|f90|fortran)) \(aka nvfortran\)) (?P<version>[\d.-]+).*'
+
+ executables = {
+ 'version_cmd': ["<F90>", "-V"],
+ 'compiler_f77': ["nvfortran"],
+ 'compiler_fix': ["nvfortran", "-Mfixed"],
+ 'compiler_f90': ["nvfortran"],
+ 'linker_so': ["<F90>"],
+ 'archiver': ["ar", "-cr"],
+ 'ranlib': ["ranlib"]
+ }
+ pic_flags = ['-fpic']
+
+ module_dir_switch = '-module '
+ module_include_switch = '-I'
+
+ def get_flags(self):
+ opt = ['-Minform=inform', '-Mnosecond_underscore']
+ return self.pic_flags + opt
+
+ def get_flags_opt(self):
+ return ['-fast']
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ def get_flags_linker_so(self):
+ return ["-shared", '-fpic']
+
+ def runtime_library_dir_option(self, dir):
+ return '-R%s' % dir
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='nv').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pathf95.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pathf95.py
new file mode 100644
index 00000000..0768cb12
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pathf95.py
@@ -0,0 +1,33 @@
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['PathScaleFCompiler']
+
+class PathScaleFCompiler(FCompiler):
+
+ compiler_type = 'pathf95'
+ description = 'PathScale Fortran Compiler'
+ version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P<version>[\d.]+)'
+
+ executables = {
+ 'version_cmd' : ["pathf95", "-version"],
+ 'compiler_f77' : ["pathf95", "-fixedform"],
+ 'compiler_fix' : ["pathf95", "-fixedform"],
+ 'compiler_f90' : ["pathf95"],
+ 'linker_so' : ["pathf95", "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ pic_flags = ['-fPIC']
+ module_dir_switch = '-module ' # Don't remove ending space!
+ module_include_switch = '-I'
+
+ def get_flags_opt(self):
+ return ['-O3']
+ def get_flags_debug(self):
+ return ['-g']
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='pathf95').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pg.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pg.py
new file mode 100644
index 00000000..72442c4f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/pg.py
@@ -0,0 +1,128 @@
+# http://www.pgroup.com
+import sys
+
+from numpy.distutils.fcompiler import FCompiler
+from sys import platform
+from os.path import join, dirname, normpath
+
+compilers = ['PGroupFCompiler', 'PGroupFlangCompiler']
+
+
+class PGroupFCompiler(FCompiler):
+
+ compiler_type = 'pg'
+ description = 'Portland Group Fortran Compiler'
+ version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P<version>[\d.-]+).*'
+
+ if platform == 'darwin':
+ executables = {
+ 'version_cmd': ["<F77>", "-V"],
+ 'compiler_f77': ["pgfortran", "-dynamiclib"],
+ 'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"],
+ 'compiler_f90': ["pgfortran", "-dynamiclib"],
+ 'linker_so': ["libtool"],
+ 'archiver': ["ar", "-cr"],
+ 'ranlib': ["ranlib"]
+ }
+ pic_flags = ['']
+ else:
+ executables = {
+ 'version_cmd': ["<F77>", "-V"],
+ 'compiler_f77': ["pgfortran"],
+ 'compiler_fix': ["pgfortran", "-Mfixed"],
+ 'compiler_f90': ["pgfortran"],
+ 'linker_so': ["<F90>"],
+ 'archiver': ["ar", "-cr"],
+ 'ranlib': ["ranlib"]
+ }
+ pic_flags = ['-fpic']
+
+ module_dir_switch = '-module '
+ module_include_switch = '-I'
+
+ def get_flags(self):
+ opt = ['-Minform=inform', '-Mnosecond_underscore']
+ return self.pic_flags + opt
+
+ def get_flags_opt(self):
+ return ['-fast']
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ if platform == 'darwin':
+ def get_flags_linker_so(self):
+ return ["-dynamic", '-undefined', 'dynamic_lookup']
+
+ else:
+ def get_flags_linker_so(self):
+ return ["-shared", '-fpic']
+
+ def runtime_library_dir_option(self, dir):
+ return '-R%s' % dir
+
+
+import functools
+
+class PGroupFlangCompiler(FCompiler):
+ compiler_type = 'flang'
+ description = 'Portland Group Fortran LLVM Compiler'
+ version_pattern = r'\s*(flang|clang) version (?P<version>[\d.-]+).*'
+
+ ar_exe = 'lib.exe'
+ possible_executables = ['flang']
+
+ executables = {
+ 'version_cmd': ["<F77>", "--version"],
+ 'compiler_f77': ["flang"],
+ 'compiler_fix': ["flang"],
+ 'compiler_f90': ["flang"],
+ 'linker_so': [None],
+ 'archiver': [ar_exe, "/verbose", "/OUT:"],
+ 'ranlib': None
+ }
+
+ library_switch = '/OUT:' # No space after /OUT:!
+ module_dir_switch = '-module ' # Don't remove ending space!
+
+ def get_libraries(self):
+ opt = FCompiler.get_libraries(self)
+ opt.extend(['flang', 'flangrti', 'ompstub'])
+ return opt
+
+ @functools.lru_cache(maxsize=128)
+ def get_library_dirs(self):
+ """List of compiler library directories."""
+ opt = FCompiler.get_library_dirs(self)
+ flang_dir = dirname(self.executables['compiler_f77'][0])
+ opt.append(normpath(join(flang_dir, '..', 'lib')))
+
+ return opt
+
+ def get_flags(self):
+ return []
+
+ def get_flags_free(self):
+ return []
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ def get_flags_opt(self):
+ return ['-O3']
+
+ def get_flags_arch(self):
+ return []
+
+ def runtime_library_dir_option(self, dir):
+ raise NotImplementedError
+
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ if 'flang' in sys.argv:
+ print(customized_fcompiler(compiler='flang').get_version())
+ else:
+ print(customized_fcompiler(compiler='pg').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/sun.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/sun.py
new file mode 100644
index 00000000..d039f0b2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/sun.py
@@ -0,0 +1,51 @@
+from numpy.distutils.ccompiler import simple_version_match
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['SunFCompiler']
+
+class SunFCompiler(FCompiler):
+
+ compiler_type = 'sun'
+ description = 'Sun or Forte Fortran 95 Compiler'
+ # ex:
+ # f90: Sun WorkShop 6 update 2 Fortran 95 6.2 Patch 111690-10 2003/08/28
+ version_match = simple_version_match(
+ start=r'f9[05]: (Sun|Forte|WorkShop).*Fortran 95')
+
+ executables = {
+ 'version_cmd' : ["<F90>", "-V"],
+ 'compiler_f77' : ["f90"],
+ 'compiler_fix' : ["f90", "-fixed"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["<F90>", "-Bdynamic", "-G"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ module_dir_switch = '-moddir='
+ module_include_switch = '-M'
+ pic_flags = ['-xcode=pic32']
+
+ def get_flags_f77(self):
+ ret = ["-ftrap=%none"]
+ if (self.get_version() or '') >= '7':
+ ret.append("-f77")
+ else:
+ ret.append("-fixed")
+ return ret
+ def get_opt(self):
+ return ['-fast', '-dalign']
+ def get_arch(self):
+ return ['-xtarget=generic']
+ def get_libraries(self):
+ opt = []
+ opt.extend(['fsu', 'sunmath', 'mvec'])
+ return opt
+
+ def runtime_library_dir_option(self, dir):
+ return '-R%s' % dir
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='sun').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/vast.py b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/vast.py
new file mode 100644
index 00000000..92a1647b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/fcompiler/vast.py
@@ -0,0 +1,52 @@
+import os
+
+from numpy.distutils.fcompiler.gnu import GnuFCompiler
+
+compilers = ['VastFCompiler']
+
+class VastFCompiler(GnuFCompiler):
+ compiler_type = 'vast'
+ compiler_aliases = ()
+ description = 'Pacific-Sierra Research Fortran 90 Compiler'
+ version_pattern = (r'\s*Pacific-Sierra Research vf90 '
+ r'(Personal|Professional)\s+(?P<version>[^\s]*)')
+
+ # VAST f90 does not support -o with -c. So, object files are created
+ # to the current directory and then moved to build directory
+ object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile '
+
+ executables = {
+ 'version_cmd' : ["vf90", "-v"],
+ 'compiler_f77' : ["g77"],
+ 'compiler_fix' : ["f90", "-Wv,-ya"],
+ 'compiler_f90' : ["f90"],
+ 'linker_so' : ["<F90>"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ module_dir_switch = None #XXX Fix me
+ module_include_switch = None #XXX Fix me
+
+ def find_executables(self):
+ pass
+
+ def get_version_cmd(self):
+ f90 = self.compiler_f90[0]
+ d, b = os.path.split(f90)
+ vf90 = os.path.join(d, 'v'+b)
+ return vf90
+
+ def get_flags_arch(self):
+ vast_version = self.get_version()
+ gnu = GnuFCompiler()
+ gnu.customize(None)
+ self.version = gnu.get_version()
+ opt = GnuFCompiler.get_flags_arch(self)
+ self.version = vast_version
+ return opt
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='vast').get_version())
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/from_template.py b/venv/lib/python3.9/site-packages/numpy/distutils/from_template.py
new file mode 100644
index 00000000..90d1f4c3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/from_template.py
@@ -0,0 +1,261 @@
+#!/usr/bin/env python3
+"""
+
+process_file(filename)
+
+ takes templated file .xxx.src and produces .xxx file where .xxx
+ is .pyf .f90 or .f using the following template rules:
+
+ '<..>' denotes a template.
+
+ All function and subroutine blocks in a source file with names that
+ contain '<..>' will be replicated according to the rules in '<..>'.
+
+ The number of comma-separated words in '<..>' will determine the number of
+ replicates.
+
+ '<..>' may have two different forms, named and short. For example,
+
+ named:
+ <p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
+ 'd', 's', 'z', and 'c' for each replicate of the block.
+
+ <_c> is already defined: <_c=s,d,c,z>
+ <_t> is already defined: <_t=real,double precision,complex,double complex>
+
+ short:
+ <s,d,c,z>, a short form of the named, useful when no <p> appears inside
+ a block.
+
+ In general, '<..>' contains a comma separated list of arbitrary
+ expressions. If these expression must contain a comma|leftarrow|rightarrow,
+ then prepend the comma|leftarrow|rightarrow with a backslash.
+
+ If an expression matches '\\<index>' then it will be replaced
+ by <index>-th expression.
+
+ Note that all '<..>' forms in a block must have the same number of
+ comma-separated entries.
+
+ Predefined named template rules:
+ <prefix=s,d,c,z>
+ <ftype=real,double precision,complex,double complex>
+ <ftypereal=real,double precision,\\0,\\1>
+ <ctype=float,double,complex_float,complex_double>
+ <ctypereal=float,double,\\0,\\1>
+
+"""
+__all__ = ['process_str', 'process_file']
+
+import os
+import sys
+import re
+
+routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I)
+routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I)
+function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I)
+
+def parse_structure(astr):
+ """ Return a list of tuples for each function or subroutine each
+ tuple is the start and end of a subroutine or function to be
+ expanded.
+ """
+
+ spanlist = []
+ ind = 0
+ while True:
+ m = routine_start_re.search(astr, ind)
+ if m is None:
+ break
+ start = m.start()
+ if function_start_re.match(astr, start, m.end()):
+ while True:
+ i = astr.rfind('\n', ind, start)
+ if i==-1:
+ break
+ start = i
+ if astr[i:i+7]!='\n $':
+ break
+ start += 1
+ m = routine_end_re.search(astr, m.end())
+ ind = end = m and m.end()-1 or len(astr)
+ spanlist.append((start, end))
+ return spanlist
+
+template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
+named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
+list_re = re.compile(r"<\s*((.*?))\s*>")
+
+def find_repl_patterns(astr):
+ reps = named_re.findall(astr)
+ names = {}
+ for rep in reps:
+ name = rep[0].strip() or unique_key(names)
+ repl = rep[1].replace(r'\,', '@comma@')
+ thelist = conv(repl)
+ names[name] = thelist
+ return names
+
+def find_and_remove_repl_patterns(astr):
+ names = find_repl_patterns(astr)
+ astr = re.subn(named_re, '', astr)[0]
+ return astr, names
+
+item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
+def conv(astr):
+ b = astr.split(',')
+ l = [x.strip() for x in b]
+ for i in range(len(l)):
+ m = item_re.match(l[i])
+ if m:
+ j = int(m.group('index'))
+ l[i] = l[j]
+ return ','.join(l)
+
+def unique_key(adict):
+ """ Obtain a unique key given a dictionary."""
+ allkeys = list(adict.keys())
+ done = False
+ n = 1
+ while not done:
+ newkey = '__l%s' % (n)
+ if newkey in allkeys:
+ n += 1
+ else:
+ done = True
+ return newkey
+
+
+template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
+def expand_sub(substr, names):
+ substr = substr.replace(r'\>', '@rightarrow@')
+ substr = substr.replace(r'\<', '@leftarrow@')
+ lnames = find_repl_patterns(substr)
+ substr = named_re.sub(r"<\1>", substr) # get rid of definition templates
+
+ def listrepl(mobj):
+ thelist = conv(mobj.group(1).replace(r'\,', '@comma@'))
+ if template_name_re.match(thelist):
+ return "<%s>" % (thelist)
+ name = None
+ for key in lnames.keys(): # see if list is already in dictionary
+ if lnames[key] == thelist:
+ name = key
+ if name is None: # this list is not in the dictionary yet
+ name = unique_key(lnames)
+ lnames[name] = thelist
+ return "<%s>" % name
+
+ substr = list_re.sub(listrepl, substr) # convert all lists to named templates
+ # newnames are constructed as needed
+
+ numsubs = None
+ base_rule = None
+ rules = {}
+ for r in template_re.findall(substr):
+ if r not in rules:
+ thelist = lnames.get(r, names.get(r, None))
+ if thelist is None:
+ raise ValueError('No replicates found for <%s>' % (r))
+ if r not in names and not thelist.startswith('_'):
+ names[r] = thelist
+ rule = [i.replace('@comma@', ',') for i in thelist.split(',')]
+ num = len(rule)
+
+ if numsubs is None:
+ numsubs = num
+ rules[r] = rule
+ base_rule = r
+ elif num == numsubs:
+ rules[r] = rule
+ else:
+ print("Mismatch in number of replacements (base <%s=%s>)"
+ " for <%s=%s>. Ignoring." %
+ (base_rule, ','.join(rules[base_rule]), r, thelist))
+ if not rules:
+ return substr
+
+ def namerepl(mobj):
+ name = mobj.group(1)
+ return rules.get(name, (k+1)*[name])[k]
+
+ newstr = ''
+ for k in range(numsubs):
+ newstr += template_re.sub(namerepl, substr) + '\n\n'
+
+ newstr = newstr.replace('@rightarrow@', '>')
+ newstr = newstr.replace('@leftarrow@', '<')
+ return newstr
+
+def process_str(allstr):
+ newstr = allstr
+ writestr = ''
+
+ struct = parse_structure(newstr)
+
+ oldend = 0
+ names = {}
+ names.update(_special_names)
+ for sub in struct:
+ cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]])
+ writestr += cleanedstr
+ names.update(defs)
+ writestr += expand_sub(newstr[sub[0]:sub[1]], names)
+ oldend = sub[1]
+ writestr += newstr[oldend:]
+
+ return writestr
+
+include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+\.src)['\"]", re.I)
+
+def resolve_includes(source):
+ d = os.path.dirname(source)
+ with open(source) as fid:
+ lines = []
+ for line in fid:
+ m = include_src_re.match(line)
+ if m:
+ fn = m.group('name')
+ if not os.path.isabs(fn):
+ fn = os.path.join(d, fn)
+ if os.path.isfile(fn):
+ lines.extend(resolve_includes(fn))
+ else:
+ lines.append(line)
+ else:
+ lines.append(line)
+ return lines
+
+def process_file(source):
+ lines = resolve_includes(source)
+ return process_str(''.join(lines))
+
+_special_names = find_repl_patterns('''
+<_c=s,d,c,z>
+<_t=real,double precision,complex,double complex>
+<prefix=s,d,c,z>
+<ftype=real,double precision,complex,double complex>
+<ctype=float,double,complex_float,complex_double>
+<ftypereal=real,double precision,\\0,\\1>
+<ctypereal=float,double,\\0,\\1>
+''')
+
+def main():
+ try:
+ file = sys.argv[1]
+ except IndexError:
+ fid = sys.stdin
+ outfile = sys.stdout
+ else:
+ fid = open(file, 'r')
+ (base, ext) = os.path.splitext(file)
+ newname = base
+ outfile = open(newname, 'w')
+
+ allstr = fid.read()
+ writestr = process_str(allstr)
+ outfile.write(writestr)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/intelccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/intelccompiler.py
new file mode 100644
index 00000000..0fa1c11d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/intelccompiler.py
@@ -0,0 +1,111 @@
+import platform
+
+from distutils.unixccompiler import UnixCCompiler
+from numpy.distutils.exec_command import find_executable
+from numpy.distutils.ccompiler import simple_version_match
+if platform.system() == 'Windows':
+ from numpy.distutils.msvc9compiler import MSVCCompiler
+
+
+class IntelCCompiler(UnixCCompiler):
+ """A modified Intel compiler compatible with a GCC-built Python."""
+ compiler_type = 'intel'
+ cc_exe = 'icc'
+ cc_args = 'fPIC'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ UnixCCompiler.__init__(self, verbose, dry_run, force)
+
+ v = self.get_version()
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
+ self.cc_exe = ('icc -fPIC -fp-model strict -O3 '
+ '-fomit-frame-pointer -{}').format(mpopt)
+ compiler = self.cc_exe
+
+ if platform.system() == 'Darwin':
+ shared_flag = '-Wl,-undefined,dynamic_lookup'
+ else:
+ shared_flag = '-shared'
+ self.set_executables(compiler=compiler,
+ compiler_so=compiler,
+ compiler_cxx=compiler,
+ archiver='xiar' + ' cru',
+ linker_exe=compiler + ' -shared-intel',
+ linker_so=compiler + ' ' + shared_flag +
+ ' -shared-intel')
+
+
+class IntelItaniumCCompiler(IntelCCompiler):
+ compiler_type = 'intele'
+
+ # On Itanium, the Intel Compiler used to be called ecc, let's search for
+ # it (now it's also icc, so ecc is last in the search).
+ for cc_exe in map(find_executable, ['icc', 'ecc']):
+ if cc_exe:
+ break
+
+
+class IntelEM64TCCompiler(UnixCCompiler):
+ """
+ A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python.
+ """
+ compiler_type = 'intelem'
+ cc_exe = 'icc -m64'
+ cc_args = '-fPIC'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ UnixCCompiler.__init__(self, verbose, dry_run, force)
+
+ v = self.get_version()
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
+ self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 '
+ '-fomit-frame-pointer -{}').format(mpopt)
+ compiler = self.cc_exe
+
+ if platform.system() == 'Darwin':
+ shared_flag = '-Wl,-undefined,dynamic_lookup'
+ else:
+ shared_flag = '-shared'
+ self.set_executables(compiler=compiler,
+ compiler_so=compiler,
+ compiler_cxx=compiler,
+ archiver='xiar' + ' cru',
+ linker_exe=compiler + ' -shared-intel',
+ linker_so=compiler + ' ' + shared_flag +
+ ' -shared-intel')
+
+
+if platform.system() == 'Windows':
+ class IntelCCompilerW(MSVCCompiler):
+ """
+ A modified Intel compiler compatible with an MSVC-built Python.
+ """
+ compiler_type = 'intelw'
+ compiler_cxx = 'icl'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ MSVCCompiler.__init__(self, verbose, dry_run, force)
+ version_match = simple_version_match(start=r'Intel\(R\).*?32,')
+ self.__version = version_match
+
+ def initialize(self, plat_name=None):
+ MSVCCompiler.initialize(self, plat_name)
+ self.cc = self.find_exe('icl.exe')
+ self.lib = self.find_exe('xilib')
+ self.linker = self.find_exe('xilink')
+ self.compile_options = ['/nologo', '/O3', '/MD', '/W3',
+ '/Qstd=c99']
+ self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
+ '/Qstd=c99', '/Z7', '/D_DEBUG']
+
+ class IntelEM64TCCompilerW(IntelCCompilerW):
+ """
+ A modified Intel x86_64 compiler compatible with
+ a 64bit MSVC-built Python.
+ """
+ compiler_type = 'intelemw'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ MSVCCompiler.__init__(self, verbose, dry_run, force)
+ version_match = simple_version_match(start=r'Intel\(R\).*?64,')
+ self.__version = version_match
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/lib2def.py b/venv/lib/python3.9/site-packages/numpy/distutils/lib2def.py
new file mode 100644
index 00000000..851682c6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/lib2def.py
@@ -0,0 +1,116 @@
+import re
+import sys
+import subprocess
+
+__doc__ = """This module generates a DEF file from the symbols in
+an MSVC-compiled DLL import library. It correctly discriminates between
+data and functions. The data is collected from the output of the program
+nm(1).
+
+Usage:
+ python lib2def.py [libname.lib] [output.def]
+or
+ python lib2def.py [libname.lib] > output.def
+
+libname.lib defaults to python<py_ver>.lib and output.def defaults to stdout
+
+Author: Robert Kern <kernr@mail.ncifcrf.gov>
+Last Update: April 30, 1999
+"""
+
+__version__ = '0.1a'
+
+py_ver = "%d%d" % tuple(sys.version_info[:2])
+
+DEFAULT_NM = ['nm', '-Cs']
+
+DEF_HEADER = """LIBRARY python%s.dll
+;CODE PRELOAD MOVEABLE DISCARDABLE
+;DATA PRELOAD SINGLE
+
+EXPORTS
+""" % py_ver
+# the header of the DEF file
+
+FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE)
+DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE)
+
+def parse_cmd():
+ """Parses the command-line arguments.
+
+libfile, deffile = parse_cmd()"""
+ if len(sys.argv) == 3:
+ if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def':
+ libfile, deffile = sys.argv[1:]
+ elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib':
+ deffile, libfile = sys.argv[1:]
+ else:
+ print("I'm assuming that your first argument is the library")
+ print("and the second is the DEF file.")
+ elif len(sys.argv) == 2:
+ if sys.argv[1][-4:] == '.def':
+ deffile = sys.argv[1]
+ libfile = 'python%s.lib' % py_ver
+ elif sys.argv[1][-4:] == '.lib':
+ deffile = None
+ libfile = sys.argv[1]
+ else:
+ libfile = 'python%s.lib' % py_ver
+ deffile = None
+ return libfile, deffile
+
+def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True):
+ """Returns the output of nm_cmd via a pipe.
+
+nm_output = getnm(nm_cmd = 'nm -Cs py_lib')"""
+ p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, text=True)
+ nm_output, nm_err = p.communicate()
+ if p.returncode != 0:
+ raise RuntimeError('failed to run "%s": "%s"' % (
+ ' '.join(nm_cmd), nm_err))
+ return nm_output
+
+def parse_nm(nm_output):
+ """Returns a tuple of lists: dlist for the list of data
+symbols and flist for the list of function symbols.
+
+dlist, flist = parse_nm(nm_output)"""
+ data = DATA_RE.findall(nm_output)
+ func = FUNC_RE.findall(nm_output)
+
+ flist = []
+ for sym in data:
+ if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'):
+ flist.append(sym)
+
+ dlist = []
+ for sym in data:
+ if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'):
+ dlist.append(sym)
+
+ dlist.sort()
+ flist.sort()
+ return dlist, flist
+
+def output_def(dlist, flist, header, file = sys.stdout):
+ """Outputs the final DEF file to a file defaulting to stdout.
+
+output_def(dlist, flist, header, file = sys.stdout)"""
+ for data_sym in dlist:
+ header = header + '\t%s DATA\n' % data_sym
+ header = header + '\n' # blank line
+ for func_sym in flist:
+ header = header + '\t%s\n' % func_sym
+ file.write(header)
+
+if __name__ == '__main__':
+ libfile, deffile = parse_cmd()
+ if deffile is None:
+ deffile = sys.stdout
+ else:
+ deffile = open(deffile, 'w')
+ nm_cmd = DEFAULT_NM + [str(libfile)]
+ nm_output = getnm(nm_cmd, shell=False)
+ dlist, flist = parse_nm(nm_output)
+ output_def(dlist, flist, DEF_HEADER, deffile)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/line_endings.py b/venv/lib/python3.9/site-packages/numpy/distutils/line_endings.py
new file mode 100644
index 00000000..686e5ebd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/line_endings.py
@@ -0,0 +1,77 @@
+""" Functions for converting from DOS to UNIX line endings
+
+"""
+import os
+import re
+import sys
+
+
+def dos2unix(file):
+ "Replace CRLF with LF in argument files. Print names of changed files."
+ if os.path.isdir(file):
+ print(file, "Directory!")
+ return
+
+ with open(file, "rb") as fp:
+ data = fp.read()
+ if '\0' in data:
+ print(file, "Binary!")
+ return
+
+ newdata = re.sub("\r\n", "\n", data)
+ if newdata != data:
+ print('dos2unix:', file)
+ with open(file, "wb") as f:
+ f.write(newdata)
+ return file
+ else:
+ print(file, 'ok')
+
+def dos2unix_one_dir(modified_files, dir_name, file_names):
+ for file in file_names:
+ full_path = os.path.join(dir_name, file)
+ file = dos2unix(full_path)
+ if file is not None:
+ modified_files.append(file)
+
+def dos2unix_dir(dir_name):
+ modified_files = []
+ os.path.walk(dir_name, dos2unix_one_dir, modified_files)
+ return modified_files
+#----------------------------------
+
+def unix2dos(file):
+ "Replace LF with CRLF in argument files. Print names of changed files."
+ if os.path.isdir(file):
+ print(file, "Directory!")
+ return
+
+ with open(file, "rb") as fp:
+ data = fp.read()
+ if '\0' in data:
+ print(file, "Binary!")
+ return
+ newdata = re.sub("\r\n", "\n", data)
+ newdata = re.sub("\n", "\r\n", newdata)
+ if newdata != data:
+ print('unix2dos:', file)
+ with open(file, "wb") as f:
+ f.write(newdata)
+ return file
+ else:
+ print(file, 'ok')
+
+def unix2dos_one_dir(modified_files, dir_name, file_names):
+ for file in file_names:
+ full_path = os.path.join(dir_name, file)
+ unix2dos(full_path)
+ if file is not None:
+ modified_files.append(file)
+
+def unix2dos_dir(dir_name):
+ modified_files = []
+ os.path.walk(dir_name, unix2dos_one_dir, modified_files)
+ return modified_files
+
+if __name__ == "__main__":
+ dos2unix_dir(sys.argv[1])
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/log.py b/venv/lib/python3.9/site-packages/numpy/distutils/log.py
new file mode 100644
index 00000000..3347f56d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/log.py
@@ -0,0 +1,111 @@
+# Colored log
+import sys
+from distutils.log import * # noqa: F403
+from distutils.log import Log as old_Log
+from distutils.log import _global_log
+
+from numpy.distutils.misc_util import (red_text, default_text, cyan_text,
+ green_text, is_sequence, is_string)
+
+
+def _fix_args(args,flag=1):
+ if is_string(args):
+ return args.replace('%', '%%')
+ if flag and is_sequence(args):
+ return tuple([_fix_args(a, flag=0) for a in args])
+ return args
+
+
+class Log(old_Log):
+ def _log(self, level, msg, args):
+ if level >= self.threshold:
+ if args:
+ msg = msg % _fix_args(args)
+ if 0:
+ if msg.startswith('copying ') and msg.find(' -> ') != -1:
+ return
+ if msg.startswith('byte-compiling '):
+ return
+ print(_global_color_map[level](msg))
+ sys.stdout.flush()
+
+ def good(self, msg, *args):
+ """
+ If we log WARN messages, log this message as a 'nice' anti-warn
+ message.
+
+ """
+ if WARN >= self.threshold:
+ if args:
+ print(green_text(msg % _fix_args(args)))
+ else:
+ print(green_text(msg))
+ sys.stdout.flush()
+
+
+_global_log.__class__ = Log
+
+good = _global_log.good
+
+def set_threshold(level, force=False):
+ prev_level = _global_log.threshold
+ if prev_level > DEBUG or force:
+ # If we're running at DEBUG, don't change the threshold, as there's
+ # likely a good reason why we're running at this level.
+ _global_log.threshold = level
+ if level <= DEBUG:
+ info('set_threshold: setting threshold to DEBUG level,'
+ ' it can be changed only with force argument')
+ else:
+ info('set_threshold: not changing threshold from DEBUG level'
+ ' %s to %s' % (prev_level, level))
+ return prev_level
+
+def get_threshold():
+ return _global_log.threshold
+
+def set_verbosity(v, force=False):
+ prev_level = _global_log.threshold
+ if v < 0:
+ set_threshold(ERROR, force)
+ elif v == 0:
+ set_threshold(WARN, force)
+ elif v == 1:
+ set_threshold(INFO, force)
+ elif v >= 2:
+ set_threshold(DEBUG, force)
+ return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1)
+
+
+_global_color_map = {
+ DEBUG:cyan_text,
+ INFO:default_text,
+ WARN:red_text,
+ ERROR:red_text,
+ FATAL:red_text
+}
+
+# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold.
+set_verbosity(0, force=True)
+
+
+_error = error
+_warn = warn
+_info = info
+_debug = debug
+
+
+def error(msg, *a, **kw):
+ _error(f"ERROR: {msg}", *a, **kw)
+
+
+def warn(msg, *a, **kw):
+ _warn(f"WARN: {msg}", *a, **kw)
+
+
+def info(msg, *a, **kw):
+ _info(f"INFO: {msg}", *a, **kw)
+
+
+def debug(msg, *a, **kw):
+ _debug(f"DEBUG: {msg}", *a, **kw)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c b/venv/lib/python3.9/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c
new file mode 100644
index 00000000..485a675d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/mingw/gfortran_vs2003_hack.c
@@ -0,0 +1,6 @@
+int _get_output_format(void)
+{
+ return 0;
+}
+
+int _imp____lc_codepage = 0;
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/mingw32ccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/mingw32ccompiler.py
new file mode 100644
index 00000000..5d1c27a6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/mingw32ccompiler.py
@@ -0,0 +1,592 @@
+"""
+Support code for building Python extensions on Windows.
+
+ # NT stuff
+ # 1. Make sure libpython<version>.a exists for gcc. If not, build it.
+ # 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
+ # 3. Force windows to use g77
+
+"""
+import os
+import platform
+import sys
+import subprocess
+import re
+import textwrap
+
+# Overwrite certain distutils.ccompiler functions:
+import numpy.distutils.ccompiler # noqa: F401
+from numpy.distutils import log
+# NT stuff
+# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
+# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
+# --> this is done in numpy/distutils/ccompiler.py
+# 3. Force windows to use g77
+
+import distutils.cygwinccompiler
+from distutils.unixccompiler import UnixCCompiler
+from distutils.msvccompiler import get_build_version as get_build_msvc_version
+from distutils.errors import UnknownFileError
+from numpy.distutils.misc_util import (msvc_runtime_library,
+ msvc_runtime_version,
+ msvc_runtime_major,
+ get_build_architecture)
+
+def get_msvcr_replacement():
+ """Replacement for outdated version of get_msvcr from cygwinccompiler"""
+ msvcr = msvc_runtime_library()
+ return [] if msvcr is None else [msvcr]
+
+
+# Useful to generate table of symbols from a dll
+_START = re.compile(r'\[Ordinal/Name Pointer\] Table')
+_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)')
+
+# the same as cygwin plus some additional parameters
+class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
+ """ A modified MingW32 compiler compatible with an MSVC built Python.
+
+ """
+
+ compiler_type = 'mingw32'
+
+ def __init__ (self,
+ verbose=0,
+ dry_run=0,
+ force=0):
+
+ distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose,
+ dry_run, force)
+
+ # **changes: eric jones 4/11/01
+ # 1. Check for import library on Windows. Build if it doesn't exist.
+
+ build_import_library()
+
+ # Check for custom msvc runtime library on Windows. Build if it doesn't exist.
+ msvcr_success = build_msvcr_library()
+ msvcr_dbg_success = build_msvcr_library(debug=True)
+ if msvcr_success or msvcr_dbg_success:
+ # add preprocessor statement for using customized msvcr lib
+ self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')
+
+ # Define the MSVC version as hint for MinGW
+ msvcr_version = msvc_runtime_version()
+ if msvcr_version:
+ self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version)
+
+ # MS_WIN64 should be defined when building for amd64 on windows,
+ # but python headers define it only for MS compilers, which has all
+ # kind of bad consequences, like using Py_ModuleInit4 instead of
+ # Py_ModuleInit4_64, etc... So we add it here
+ if get_build_architecture() == 'AMD64':
+ self.set_executables(
+ compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall',
+ compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall '
+ '-Wstrict-prototypes',
+ linker_exe='gcc -g',
+ linker_so='gcc -g -shared')
+ else:
+ self.set_executables(
+ compiler='gcc -O2 -Wall',
+ compiler_so='gcc -O2 -Wall -Wstrict-prototypes',
+ linker_exe='g++ ',
+ linker_so='g++ -shared')
+ # added for python2.3 support
+ # we can't pass it through set_executables because pre 2.2 would fail
+ self.compiler_cxx = ['g++']
+
+ # Maybe we should also append -mthreads, but then the finished dlls
+ # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support
+ # thread-safe exception handling on `Mingw32')
+
+ # no additional libraries needed
+ #self.dll_libraries=[]
+ return
+
+ # __init__ ()
+
+ def link(self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir,
+ libraries,
+ library_dirs,
+ runtime_library_dirs,
+ export_symbols = None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+ # Include the appropriate MSVC runtime library if Python was built
+ # with MSVC >= 7.0 (MinGW standard is msvcrt)
+ runtime_library = msvc_runtime_library()
+ if runtime_library:
+ if not libraries:
+ libraries = []
+ libraries.append(runtime_library)
+ args = (self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir,
+ libraries,
+ library_dirs,
+ runtime_library_dirs,
+ None, #export_symbols, we do this in our def-file
+ debug,
+ extra_preargs,
+ extra_postargs,
+ build_temp,
+ target_lang)
+ func = UnixCCompiler.link
+ func(*args[:func.__code__.co_argcount])
+ return
+
+ def object_filenames (self,
+ source_filenames,
+ strip_dir=0,
+ output_dir=''):
+ if output_dir is None: output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ # use normcase to make sure '.rc' is really '.rc' and not '.RC'
+ (base, ext) = os.path.splitext (os.path.normcase(src_name))
+
+ # added these lines to strip off windows drive letters
+ # without it, .o files are placed next to .c files
+ # instead of the build directory
+ drv, base = os.path.splitdrive(base)
+ if drv:
+ base = base[1:]
+
+ if ext not in (self.src_extensions + ['.rc', '.res']):
+ raise UnknownFileError(
+ "unknown file type '%s' (from '%s')" % \
+ (ext, src_name))
+ if strip_dir:
+ base = os.path.basename (base)
+ if ext == '.res' or ext == '.rc':
+ # these need to be compiled to object files
+ obj_names.append (os.path.join (output_dir,
+ base + ext + self.obj_extension))
+ else:
+ obj_names.append (os.path.join (output_dir,
+ base + self.obj_extension))
+ return obj_names
+
+ # object_filenames ()
+
+
+def find_python_dll():
+ # We can't do much here:
+ # - find it in the virtualenv (sys.prefix)
+ # - find it in python main dir (sys.base_prefix, if in a virtualenv)
+ # - in system32,
+ # - ortherwise (Sxs), I don't know how to get it.
+ stems = [sys.prefix]
+ if sys.base_prefix != sys.prefix:
+ stems.append(sys.base_prefix)
+
+ sub_dirs = ['', 'lib', 'bin']
+ # generate possible combinations of directory trees and sub-directories
+ lib_dirs = []
+ for stem in stems:
+ for folder in sub_dirs:
+ lib_dirs.append(os.path.join(stem, folder))
+
+ # add system directory as well
+ if 'SYSTEMROOT' in os.environ:
+ lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32'))
+
+ # search in the file system for possible candidates
+ major_version, minor_version = tuple(sys.version_info[:2])
+ implementation = platform.python_implementation()
+ if implementation == 'CPython':
+ dllname = f'python{major_version}{minor_version}.dll'
+ elif implementation == 'PyPy':
+ dllname = f'libpypy{major_version}-c.dll'
+ else:
+ dllname = f'Unknown platform {implementation}'
+ print("Looking for %s" % dllname)
+ for folder in lib_dirs:
+ dll = os.path.join(folder, dllname)
+ if os.path.exists(dll):
+ return dll
+
+ raise ValueError("%s not found in %s" % (dllname, lib_dirs))
+
+def dump_table(dll):
+ st = subprocess.check_output(["objdump.exe", "-p", dll])
+ return st.split(b'\n')
+
+def generate_def(dll, dfile):
+ """Given a dll file location, get all its exported symbols and dump them
+ into the given def file.
+
+ The .def file will be overwritten"""
+ dump = dump_table(dll)
+ for i in range(len(dump)):
+ if _START.match(dump[i].decode()):
+ break
+ else:
+ raise ValueError("Symbol table not found")
+
+ syms = []
+ for j in range(i+1, len(dump)):
+ m = _TABLE.match(dump[j].decode())
+ if m:
+ syms.append((int(m.group(1).strip()), m.group(2)))
+ else:
+ break
+
+ if len(syms) == 0:
+ log.warn('No symbols found in %s' % dll)
+
+ with open(dfile, 'w') as d:
+ d.write('LIBRARY %s\n' % os.path.basename(dll))
+ d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
+ d.write(';DATA PRELOAD SINGLE\n')
+ d.write('\nEXPORTS\n')
+ for s in syms:
+ #d.write('@%d %s\n' % (s[0], s[1]))
+ d.write('%s\n' % s[1])
+
+def find_dll(dll_name):
+
+ arch = {'AMD64' : 'amd64',
+ 'Intel' : 'x86'}[get_build_architecture()]
+
+ def _find_dll_in_winsxs(dll_name):
+ # Walk through the WinSxS directory to find the dll.
+ winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'),
+ 'winsxs')
+ if not os.path.exists(winsxs_path):
+ return None
+ for root, dirs, files in os.walk(winsxs_path):
+ if dll_name in files and arch in root:
+ return os.path.join(root, dll_name)
+ return None
+
+ def _find_dll_in_path(dll_name):
+ # First, look in the Python directory, then scan PATH for
+ # the given dll name.
+ for path in [sys.prefix] + os.environ['PATH'].split(';'):
+ filepath = os.path.join(path, dll_name)
+ if os.path.exists(filepath):
+ return os.path.abspath(filepath)
+
+ return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name)
+
+def build_msvcr_library(debug=False):
+ if os.name != 'nt':
+ return False
+
+ # If the version number is None, then we couldn't find the MSVC runtime at
+ # all, because we are running on a Python distribution which is customed
+ # compiled; trust that the compiler is the same as the one available to us
+ # now, and that it is capable of linking with the correct runtime without
+ # any extra options.
+ msvcr_ver = msvc_runtime_major()
+ if msvcr_ver is None:
+ log.debug('Skip building import library: '
+ 'Runtime is not compiled with MSVC')
+ return False
+
+ # Skip using a custom library for versions < MSVC 8.0
+ if msvcr_ver < 80:
+ log.debug('Skip building msvcr library:'
+ ' custom functionality not present')
+ return False
+
+ msvcr_name = msvc_runtime_library()
+ if debug:
+ msvcr_name += 'd'
+
+ # Skip if custom library already exists
+ out_name = "lib%s.a" % msvcr_name
+ out_file = os.path.join(sys.prefix, 'libs', out_name)
+ if os.path.isfile(out_file):
+ log.debug('Skip building msvcr library: "%s" exists' %
+ (out_file,))
+ return True
+
+ # Find the msvcr dll
+ msvcr_dll_name = msvcr_name + '.dll'
+ dll_file = find_dll(msvcr_dll_name)
+ if not dll_file:
+ log.warn('Cannot build msvcr library: "%s" not found' %
+ msvcr_dll_name)
+ return False
+
+ def_name = "lib%s.def" % msvcr_name
+ def_file = os.path.join(sys.prefix, 'libs', def_name)
+
+ log.info('Building msvcr library: "%s" (from %s)' \
+ % (out_file, dll_file))
+
+ # Generate a symbol definition file from the msvcr dll
+ generate_def(dll_file, def_file)
+
+ # Create a custom mingw library for the given symbol definitions
+ cmd = ['dlltool', '-d', def_file, '-l', out_file]
+ retcode = subprocess.call(cmd)
+
+ # Clean up symbol definitions
+ os.remove(def_file)
+
+ return (not retcode)
+
+def build_import_library():
+ if os.name != 'nt':
+ return
+
+ arch = get_build_architecture()
+ if arch == 'AMD64':
+ return _build_import_library_amd64()
+ elif arch == 'Intel':
+ return _build_import_library_x86()
+ else:
+ raise ValueError("Unhandled arch %s" % arch)
+
+def _check_for_import_lib():
+ """Check if an import library for the Python runtime already exists."""
+ major_version, minor_version = tuple(sys.version_info[:2])
+
+ # patterns for the file name of the library itself
+ patterns = ['libpython%d%d.a',
+ 'libpython%d%d.dll.a',
+ 'libpython%d.%d.dll.a']
+
+ # directory trees that may contain the library
+ stems = [sys.prefix]
+ if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
+ stems.append(sys.base_prefix)
+ elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
+ stems.append(sys.real_prefix)
+
+ # possible subdirectories within those trees where it is placed
+ sub_dirs = ['libs', 'lib']
+
+ # generate a list of candidate locations
+ candidates = []
+ for pat in patterns:
+ filename = pat % (major_version, minor_version)
+ for stem_dir in stems:
+ for folder in sub_dirs:
+ candidates.append(os.path.join(stem_dir, folder, filename))
+
+ # test the filesystem to see if we can find any of these
+ for fullname in candidates:
+ if os.path.isfile(fullname):
+ # already exists, in location given
+ return (True, fullname)
+
+ # needs to be built, preferred location given first
+ return (False, candidates[0])
+
+def _build_import_library_amd64():
+ out_exists, out_file = _check_for_import_lib()
+ if out_exists:
+ log.debug('Skip building import library: "%s" exists', out_file)
+ return
+
+ # get the runtime dll for which we are building import library
+ dll_file = find_python_dll()
+ log.info('Building import library (arch=AMD64): "%s" (from %s)' %
+ (out_file, dll_file))
+
+ # generate symbol list from this library
+ def_name = "python%d%d.def" % tuple(sys.version_info[:2])
+ def_file = os.path.join(sys.prefix, 'libs', def_name)
+ generate_def(dll_file, def_file)
+
+ # generate import library from this symbol list
+ cmd = ['dlltool', '-d', def_file, '-l', out_file]
+ subprocess.check_call(cmd)
+
+def _build_import_library_x86():
+ """ Build the import libraries for Mingw32-gcc on Windows
+ """
+ out_exists, out_file = _check_for_import_lib()
+ if out_exists:
+ log.debug('Skip building import library: "%s" exists', out_file)
+ return
+
+ lib_name = "python%d%d.lib" % tuple(sys.version_info[:2])
+ lib_file = os.path.join(sys.prefix, 'libs', lib_name)
+ if not os.path.isfile(lib_file):
+ # didn't find library file in virtualenv, try base distribution, too,
+ # and use that instead if found there. for Python 2.7 venvs, the base
+ # directory is in attribute real_prefix instead of base_prefix.
+ if hasattr(sys, 'base_prefix'):
+ base_lib = os.path.join(sys.base_prefix, 'libs', lib_name)
+ elif hasattr(sys, 'real_prefix'):
+ base_lib = os.path.join(sys.real_prefix, 'libs', lib_name)
+ else:
+ base_lib = '' # os.path.isfile('') == False
+
+ if os.path.isfile(base_lib):
+ lib_file = base_lib
+ else:
+ log.warn('Cannot build import library: "%s" not found', lib_file)
+ return
+ log.info('Building import library (ARCH=x86): "%s"', out_file)
+
+ from numpy.distutils import lib2def
+
+ def_name = "python%d%d.def" % tuple(sys.version_info[:2])
+ def_file = os.path.join(sys.prefix, 'libs', def_name)
+ nm_output = lib2def.getnm(
+ lib2def.DEFAULT_NM + [lib_file], shell=False)
+ dlist, flist = lib2def.parse_nm(nm_output)
+ with open(def_file, 'w') as fid:
+ lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid)
+
+ dll_name = find_python_dll ()
+
+ cmd = ["dlltool",
+ "--dllname", dll_name,
+ "--def", def_file,
+ "--output-lib", out_file]
+ status = subprocess.check_output(cmd)
+ if status:
+ log.warn('Failed to build import library for gcc. Linking will fail.')
+ return
+
+#=====================================
+# Dealing with Visual Studio MANIFESTS
+#=====================================
+
+# Functions to deal with visual studio manifests. Manifest are a mechanism to
+# enforce strong DLL versioning on windows, and has nothing to do with
+# distutils MANIFEST. manifests are XML files with version info, and used by
+# the OS loader; they are necessary when linking against a DLL not in the
+# system path; in particular, official python 2.6 binary is built against the
+# MS runtime 9 (the one from VS 2008), which is not available on most windows
+# systems; python 2.6 installer does install it in the Win SxS (Side by side)
+# directory, but this requires the manifest for this to work. This is a big
+# mess, thanks MS for a wonderful system.
+
+# XXX: ideally, we should use exactly the same version as used by python. I
+# submitted a patch to get this version, but it was only included for python
+# 2.6.1 and above. So for versions below, we use a "best guess".
+_MSVCRVER_TO_FULLVER = {}
+if sys.platform == 'win32':
+ try:
+ import msvcrt
+ # I took one version in my SxS directory: no idea if it is the good
+ # one, and we can't retrieve it from python
+ _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42"
+ _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
+ # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0
+ # on Windows XP:
+ _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
+ crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None)
+ if crt_ver is not None: # Available at least back to Python 3.3
+ maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups()
+ _MSVCRVER_TO_FULLVER[maj + min] = crt_ver
+ del maj, min
+ del crt_ver
+ except ImportError:
+ # If we are here, means python was not built with MSVC. Not sure what
+ # to do in that case: manifest building will fail, but it should not be
+ # used in that case anyway
+ log.warn('Cannot import msvcrt: using manifest will not be possible')
+
+def msvc_manifest_xml(maj, min):
+ """Given a major and minor version of the MSVCR, returns the
+ corresponding XML file."""
+ try:
+ fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)]
+ except KeyError:
+ raise ValueError("Version %d,%d of MSVCRT not supported yet" %
+ (maj, min)) from None
+ # Don't be fooled, it looks like an XML, but it is not. In particular, it
+ # should not have any space before starting, and its size should be
+ # divisible by 4, most likely for alignment constraints when the xml is
+ # embedded in the binary...
+ # This template was copied directly from the python 2.6 binary (using
+ # strings.exe from mingw on python.exe).
+ template = textwrap.dedent("""\
+ <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+ <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
+ <security>
+ <requestedPrivileges>
+ <requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
+ </requestedPrivileges>
+ </security>
+ </trustInfo>
+ <dependency>
+ <dependentAssembly>
+ <assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
+ </dependentAssembly>
+ </dependency>
+ </assembly>""")
+
+ return template % {'fullver': fullver, 'maj': maj, 'min': min}
+
+def manifest_rc(name, type='dll'):
+ """Return the rc file used to generate the res file which will be embedded
+ as manifest for given manifest file name, of given type ('dll' or
+ 'exe').
+
+ Parameters
+ ----------
+ name : str
+ name of the manifest file to embed
+ type : str {'dll', 'exe'}
+ type of the binary which will embed the manifest
+
+ """
+ if type == 'dll':
+ rctype = 2
+ elif type == 'exe':
+ rctype = 1
+ else:
+ raise ValueError("Type %s not supported" % type)
+
+ return """\
+#include "winuser.h"
+%d RT_MANIFEST %s""" % (rctype, name)
+
+def check_embedded_msvcr_match_linked(msver):
+ """msver is the ms runtime version used for the MANIFEST."""
+ # check msvcr major version are the same for linking and
+ # embedding
+ maj = msvc_runtime_major()
+ if maj:
+ if not maj == int(msver):
+ raise ValueError(
+ "Discrepancy between linked msvcr " \
+ "(%d) and the one about to be embedded " \
+ "(%d)" % (int(msver), maj))
+
+def configtest_name(config):
+ base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c"))
+ return os.path.splitext(base)[0]
+
+def manifest_name(config):
+ # Get configest name (including suffix)
+ root = configtest_name(config)
+ exext = config.compiler.exe_extension
+ return root + exext + ".manifest"
+
+def rc_name(config):
+ # Get configtest name (including suffix)
+ root = configtest_name(config)
+ return root + ".rc"
+
+def generate_manifest(config):
+ msver = get_build_msvc_version()
+ if msver is not None:
+ if msver >= 8:
+ check_embedded_msvcr_match_linked(msver)
+ ma_str, mi_str = str(msver).split('.')
+ # Write the manifest file
+ manxml = msvc_manifest_xml(int(ma_str), int(mi_str))
+ with open(manifest_name(config), "w") as man:
+ config.temp_files.append(manifest_name(config))
+ man.write(manxml)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/misc_util.py b/venv/lib/python3.9/site-packages/numpy/distutils/misc_util.py
new file mode 100644
index 00000000..79ba0851
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/misc_util.py
@@ -0,0 +1,2493 @@
+import os
+import re
+import sys
+import copy
+import glob
+import atexit
+import tempfile
+import subprocess
+import shutil
+import multiprocessing
+import textwrap
+import importlib.util
+from threading import local as tlocal
+from functools import reduce
+
+import distutils
+from distutils.errors import DistutilsError
+
+# stores temporary directory of each thread to only create one per thread
+_tdata = tlocal()
+
+# store all created temporary directories so they can be deleted on exit
+_tmpdirs = []
+def clean_up_temporary_directory():
+ if _tmpdirs is not None:
+ for d in _tmpdirs:
+ try:
+ shutil.rmtree(d)
+ except OSError:
+ pass
+
+atexit.register(clean_up_temporary_directory)
+
+__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
+ 'dict_append', 'appendpath', 'generate_config_py',
+ 'get_cmd', 'allpath', 'get_mathlibs',
+ 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',
+ 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings',
+ 'has_f_sources', 'has_cxx_sources', 'filter_sources',
+ 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
+ 'get_script_files', 'get_lib_source_files', 'get_data_files',
+ 'dot_join', 'get_frame', 'minrelpath', 'njoin',
+ 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
+ 'get_build_architecture', 'get_info', 'get_pkg_info',
+ 'get_num_build_jobs', 'sanitize_cxx_flags',
+ 'exec_mod_from_location']
+
+class InstallableLib:
+ """
+ Container to hold information on an installable library.
+
+ Parameters
+ ----------
+ name : str
+ Name of the installed library.
+ build_info : dict
+ Dictionary holding build information.
+ target_dir : str
+ Absolute path specifying where to install the library.
+
+ See Also
+ --------
+ Configuration.add_installed_library
+
+ Notes
+ -----
+ The three parameters are stored as attributes with the same names.
+
+ """
+ def __init__(self, name, build_info, target_dir):
+ self.name = name
+ self.build_info = build_info
+ self.target_dir = target_dir
+
+
+def get_num_build_jobs():
+ """
+ Get number of parallel build jobs set by the --parallel command line
+ argument of setup.py
+ If the command did not receive a setting the environment variable
+ NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of
+ processors on the system, with a maximum of 8 (to prevent
+ overloading the system if there a lot of CPUs).
+
+ Returns
+ -------
+ out : int
+ number of parallel jobs that can be run
+
+ """
+ from numpy.distutils.core import get_distribution
+ try:
+ cpu_count = len(os.sched_getaffinity(0))
+ except AttributeError:
+ cpu_count = multiprocessing.cpu_count()
+ cpu_count = min(cpu_count, 8)
+ envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count))
+ dist = get_distribution()
+ # may be None during configuration
+ if dist is None:
+ return envjobs
+
+ # any of these three may have the job set, take the largest
+ cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
+ getattr(dist.get_command_obj('build_ext'), 'parallel', None),
+ getattr(dist.get_command_obj('build_clib'), 'parallel', None))
+ if all(x is None for x in cmdattr):
+ return envjobs
+ else:
+ return max(x for x in cmdattr if x is not None)
+
+def quote_args(args):
+ """Quote list of arguments.
+
+ .. deprecated:: 1.22.
+ """
+ import warnings
+ warnings.warn('"quote_args" is deprecated.',
+ DeprecationWarning, stacklevel=2)
+ # don't used _nt_quote_args as it does not check if
+ # args items already have quotes or not.
+ args = list(args)
+ for i in range(len(args)):
+ a = args[i]
+ if ' ' in a and a[0] not in '"\'':
+ args[i] = '"%s"' % (a)
+ return args
+
+def allpath(name):
+ "Convert a /-separated pathname to one using the OS's path separator."
+ split = name.split('/')
+ return os.path.join(*split)
+
+def rel_path(path, parent_path):
+ """Return path relative to parent_path."""
+ # Use realpath to avoid issues with symlinked dirs (see gh-7707)
+ pd = os.path.realpath(os.path.abspath(parent_path))
+ apath = os.path.realpath(os.path.abspath(path))
+ if len(apath) < len(pd):
+ return path
+ if apath == pd:
+ return ''
+ if pd == apath[:len(pd)]:
+ assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
+ path = apath[len(pd)+1:]
+ return path
+
+def get_path_from_frame(frame, parent_path=None):
+ """Return path of the module given a frame object from the call stack.
+
+ Returned path is relative to parent_path when given,
+ otherwise it is absolute path.
+ """
+
+ # First, try to find if the file name is in the frame.
+ try:
+ caller_file = eval('__file__', frame.f_globals, frame.f_locals)
+ d = os.path.dirname(os.path.abspath(caller_file))
+ except NameError:
+ # __file__ is not defined, so let's try __name__. We try this second
+ # because setuptools spoofs __name__ to be '__main__' even though
+ # sys.modules['__main__'] might be something else, like easy_install(1).
+ caller_name = eval('__name__', frame.f_globals, frame.f_locals)
+ __import__(caller_name)
+ mod = sys.modules[caller_name]
+ if hasattr(mod, '__file__'):
+ d = os.path.dirname(os.path.abspath(mod.__file__))
+ else:
+ # we're probably running setup.py as execfile("setup.py")
+ # (likely we're building an egg)
+ d = os.path.abspath('.')
+
+ if parent_path is not None:
+ d = rel_path(d, parent_path)
+
+ return d or '.'
+
+def njoin(*path):
+ """Join two or more pathname components +
+ - convert a /-separated pathname to one using the OS's path separator.
+ - resolve `..` and `.` from path.
+
+ Either passing n arguments as in njoin('a','b'), or a sequence
+ of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
+ """
+ paths = []
+ for p in path:
+ if is_sequence(p):
+ # njoin(['a', 'b'], 'c')
+ paths.append(njoin(*p))
+ else:
+ assert is_string(p)
+ paths.append(p)
+ path = paths
+ if not path:
+ # njoin()
+ joined = ''
+ else:
+ # njoin('a', 'b')
+ joined = os.path.join(*path)
+ if os.path.sep != '/':
+ joined = joined.replace('/', os.path.sep)
+ return minrelpath(joined)
+
+def get_mathlibs(path=None):
+ """Return the MATHLIB line from numpyconfig.h
+ """
+ if path is not None:
+ config_file = os.path.join(path, '_numpyconfig.h')
+ else:
+ # Look for the file in each of the numpy include directories.
+ dirs = get_numpy_include_dirs()
+ for path in dirs:
+ fn = os.path.join(path, '_numpyconfig.h')
+ if os.path.exists(fn):
+ config_file = fn
+ break
+ else:
+ raise DistutilsError('_numpyconfig.h not found in numpy include '
+ 'dirs %r' % (dirs,))
+
+ with open(config_file) as fid:
+ mathlibs = []
+ s = '#define MATHLIB'
+ for line in fid:
+ if line.startswith(s):
+ value = line[len(s):].strip()
+ if value:
+ mathlibs.extend(value.split(','))
+ return mathlibs
+
+def minrelpath(path):
+ """Resolve `..` and '.' from path.
+ """
+ if not is_string(path):
+ return path
+ if '.' not in path:
+ return path
+ l = path.split(os.sep)
+ while l:
+ try:
+ i = l.index('.', 1)
+ except ValueError:
+ break
+ del l[i]
+ j = 1
+ while l:
+ try:
+ i = l.index('..', j)
+ except ValueError:
+ break
+ if l[i-1]=='..':
+ j += 1
+ else:
+ del l[i], l[i-1]
+ j = 1
+ if not l:
+ return ''
+ return os.sep.join(l)
+
+def sorted_glob(fileglob):
+ """sorts output of python glob for https://bugs.python.org/issue30461
+ to allow extensions to have reproducible build results"""
+ return sorted(glob.glob(fileglob))
+
+def _fix_paths(paths, local_path, include_non_existing):
+ assert is_sequence(paths), repr(type(paths))
+ new_paths = []
+ assert not is_string(paths), repr(paths)
+ for n in paths:
+ if is_string(n):
+ if '*' in n or '?' in n:
+ p = sorted_glob(n)
+ p2 = sorted_glob(njoin(local_path, n))
+ if p2:
+ new_paths.extend(p2)
+ elif p:
+ new_paths.extend(p)
+ else:
+ if include_non_existing:
+ new_paths.append(n)
+ print('could not resolve pattern in %r: %r' %
+ (local_path, n))
+ else:
+ n2 = njoin(local_path, n)
+ if os.path.exists(n2):
+ new_paths.append(n2)
+ else:
+ if os.path.exists(n):
+ new_paths.append(n)
+ elif include_non_existing:
+ new_paths.append(n)
+ if not os.path.exists(n):
+ print('non-existing path in %r: %r' %
+ (local_path, n))
+
+ elif is_sequence(n):
+ new_paths.extend(_fix_paths(n, local_path, include_non_existing))
+ else:
+ new_paths.append(n)
+ return [minrelpath(p) for p in new_paths]
+
+def gpaths(paths, local_path='', include_non_existing=True):
+ """Apply glob to paths and prepend local_path if needed.
+ """
+ if is_string(paths):
+ paths = (paths,)
+ return _fix_paths(paths, local_path, include_non_existing)
+
+def make_temp_file(suffix='', prefix='', text=True):
+ if not hasattr(_tdata, 'tempdir'):
+ _tdata.tempdir = tempfile.mkdtemp()
+ _tmpdirs.append(_tdata.tempdir)
+ fid, name = tempfile.mkstemp(suffix=suffix,
+ prefix=prefix,
+ dir=_tdata.tempdir,
+ text=text)
+ fo = os.fdopen(fid, 'w')
+ return fo, name
+
+# Hooks for colored terminal output.
+# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle
+def terminal_has_colors():
+ if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
+ # Avoid importing curses that causes illegal operation
+ # with a message:
+ # PYTHON2 caused an invalid page fault in
+ # module CYGNURSES7.DLL as 015f:18bbfc28
+ # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
+ # ssh to Win32 machine from debian
+ # curses.version is 2.2
+ # CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
+ return 0
+ if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
+ try:
+ import curses
+ curses.setupterm()
+ if (curses.tigetnum("colors") >= 0
+ and curses.tigetnum("pairs") >= 0
+ and ((curses.tigetstr("setf") is not None
+ and curses.tigetstr("setb") is not None)
+ or (curses.tigetstr("setaf") is not None
+ and curses.tigetstr("setab") is not None)
+ or curses.tigetstr("scp") is not None)):
+ return 1
+ except Exception:
+ pass
+ return 0
+
+if terminal_has_colors():
+ _colour_codes = dict(black=0, red=1, green=2, yellow=3,
+ blue=4, magenta=5, cyan=6, white=7, default=9)
+ def colour_text(s, fg=None, bg=None, bold=False):
+ seq = []
+ if bold:
+ seq.append('1')
+ if fg:
+ fgcode = 30 + _colour_codes.get(fg.lower(), 0)
+ seq.append(str(fgcode))
+ if bg:
+ bgcode = 40 + _colour_codes.get(bg.lower(), 7)
+ seq.append(str(bgcode))
+ if seq:
+ return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
+ else:
+ return s
+else:
+ def colour_text(s, fg=None, bg=None):
+ return s
+
+def default_text(s):
+ return colour_text(s, 'default')
+def red_text(s):
+ return colour_text(s, 'red')
+def green_text(s):
+ return colour_text(s, 'green')
+def yellow_text(s):
+ return colour_text(s, 'yellow')
+def cyan_text(s):
+ return colour_text(s, 'cyan')
+def blue_text(s):
+ return colour_text(s, 'blue')
+
+#########################
+
+def cyg2win32(path: str) -> str:
+ """Convert a path from Cygwin-native to Windows-native.
+
+ Uses the cygpath utility (part of the Base install) to do the
+ actual conversion. Falls back to returning the original path if
+ this fails.
+
+ Handles the default ``/cygdrive`` mount prefix as well as the
+ ``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such
+ as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or
+ ``/home/username``
+
+ Parameters
+ ----------
+ path : str
+ The path to convert
+
+ Returns
+ -------
+ converted_path : str
+ The converted path
+
+ Notes
+ -----
+ Documentation for cygpath utility:
+ https://cygwin.com/cygwin-ug-net/cygpath.html
+ Documentation for the C function it wraps:
+ https://cygwin.com/cygwin-api/func-cygwin-conv-path.html
+
+ """
+ if sys.platform != "cygwin":
+ return path
+ return subprocess.check_output(
+ ["/usr/bin/cygpath", "--windows", path], text=True
+ )
+
+
+def mingw32():
+ """Return true when using mingw32 environment.
+ """
+ if sys.platform=='win32':
+ if os.environ.get('OSTYPE', '')=='msys':
+ return True
+ if os.environ.get('MSYSTEM', '')=='MINGW32':
+ return True
+ return False
+
+def msvc_runtime_version():
+ "Return version of MSVC runtime library, as defined by __MSC_VER__ macro"
+ msc_pos = sys.version.find('MSC v.')
+ if msc_pos != -1:
+ msc_ver = int(sys.version[msc_pos+6:msc_pos+10])
+ else:
+ msc_ver = None
+ return msc_ver
+
+def msvc_runtime_library():
+ "Return name of MSVC runtime library if Python was built with MSVC >= 7"
+ ver = msvc_runtime_major ()
+ if ver:
+ if ver < 140:
+ return "msvcr%i" % ver
+ else:
+ return "vcruntime%i" % ver
+ else:
+ return None
+
+def msvc_runtime_major():
+ "Return major version of MSVC runtime coded like get_build_msvc_version"
+ major = {1300: 70, # MSVC 7.0
+ 1310: 71, # MSVC 7.1
+ 1400: 80, # MSVC 8
+ 1500: 90, # MSVC 9 (aka 2008)
+ 1600: 100, # MSVC 10 (aka 2010)
+ 1900: 140, # MSVC 14 (aka 2015)
+ }.get(msvc_runtime_version(), None)
+ return major
+
+#########################
+
+#XXX need support for .C that is also C++
+cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match
+fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match
+f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match
+f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match
+def _get_f90_modules(source):
+ """Return a list of Fortran f90 module names that
+ given source file defines.
+ """
+ if not f90_ext_match(source):
+ return []
+ modules = []
+ with open(source, 'r') as f:
+ for line in f:
+ m = f90_module_name_match(line)
+ if m:
+ name = m.group('name')
+ modules.append(name)
+ # break # XXX can we assume that there is one module per file?
+ return modules
+
+def is_string(s):
+ return isinstance(s, str)
+
+def all_strings(lst):
+ """Return True if all items in lst are string objects. """
+ for item in lst:
+ if not is_string(item):
+ return False
+ return True
+
+def is_sequence(seq):
+ if is_string(seq):
+ return False
+ try:
+ len(seq)
+ except Exception:
+ return False
+ return True
+
+def is_glob_pattern(s):
+ return is_string(s) and ('*' in s or '?' in s)
+
+def as_list(seq):
+ if is_sequence(seq):
+ return list(seq)
+ else:
+ return [seq]
+
+def get_language(sources):
+ # not used in numpy/scipy packages, use build_ext.detect_language instead
+ """Determine language value (c,f77,f90) from sources """
+ language = None
+ for source in sources:
+ if isinstance(source, str):
+ if f90_ext_match(source):
+ language = 'f90'
+ break
+ elif fortran_ext_match(source):
+ language = 'f77'
+ return language
+
+def has_f_sources(sources):
+ """Return True if sources contains Fortran files """
+ for source in sources:
+ if fortran_ext_match(source):
+ return True
+ return False
+
+def has_cxx_sources(sources):
+ """Return True if sources contains C++ files """
+ for source in sources:
+ if cxx_ext_match(source):
+ return True
+ return False
+
+def filter_sources(sources):
+ """Return four lists of filenames containing
+ C, C++, Fortran, and Fortran 90 module sources,
+ respectively.
+ """
+ c_sources = []
+ cxx_sources = []
+ f_sources = []
+ fmodule_sources = []
+ for source in sources:
+ if fortran_ext_match(source):
+ modules = _get_f90_modules(source)
+ if modules:
+ fmodule_sources.append(source)
+ else:
+ f_sources.append(source)
+ elif cxx_ext_match(source):
+ cxx_sources.append(source)
+ else:
+ c_sources.append(source)
+ return c_sources, cxx_sources, f_sources, fmodule_sources
+
+
+def _get_headers(directory_list):
+ # get *.h files from list of directories
+ headers = []
+ for d in directory_list:
+ head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
+ headers.extend(head)
+ return headers
+
+def _get_directories(list_of_sources):
+ # get unique directories from list of sources.
+ direcs = []
+ for f in list_of_sources:
+ d = os.path.split(f)
+ if d[0] != '' and not d[0] in direcs:
+ direcs.append(d[0])
+ return direcs
+
+def _commandline_dep_string(cc_args, extra_postargs, pp_opts):
+ """
+ Return commandline representation used to determine if a file needs
+ to be recompiled
+ """
+ cmdline = 'commandline: '
+ cmdline += ' '.join(cc_args)
+ cmdline += ' '.join(extra_postargs)
+ cmdline += ' '.join(pp_opts) + '\n'
+ return cmdline
+
+
+def get_dependencies(sources):
+ #XXX scan sources for include statements
+ return _get_headers(_get_directories(sources))
+
+def is_local_src_dir(directory):
+ """Return true if directory is local directory.
+ """
+ if not is_string(directory):
+ return False
+ abs_dir = os.path.abspath(directory)
+ c = os.path.commonprefix([os.getcwd(), abs_dir])
+ new_dir = abs_dir[len(c):].split(os.sep)
+ if new_dir and not new_dir[0]:
+ new_dir = new_dir[1:]
+ if new_dir and new_dir[0]=='build':
+ return False
+ new_dir = os.sep.join(new_dir)
+ return os.path.isdir(new_dir)
+
+def general_source_files(top_path):
+ pruned_directories = {'CVS':1, '.svn':1, 'build':1}
+ prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
+ for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
+ pruned = [ d for d in dirnames if d not in pruned_directories ]
+ dirnames[:] = pruned
+ for f in filenames:
+ if not prune_file_pat.search(f):
+ yield os.path.join(dirpath, f)
+
+def general_source_directories_files(top_path):
+ """Return a directory name relative to top_path and
+ files contained.
+ """
+ pruned_directories = ['CVS', '.svn', 'build']
+ prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
+ for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
+ pruned = [ d for d in dirnames if d not in pruned_directories ]
+ dirnames[:] = pruned
+ for d in dirnames:
+ dpath = os.path.join(dirpath, d)
+ rpath = rel_path(dpath, top_path)
+ files = []
+ for f in os.listdir(dpath):
+ fn = os.path.join(dpath, f)
+ if os.path.isfile(fn) and not prune_file_pat.search(fn):
+ files.append(fn)
+ yield rpath, files
+ dpath = top_path
+ rpath = rel_path(dpath, top_path)
+ filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \
+ if not prune_file_pat.search(f)]
+ files = [f for f in filenames if os.path.isfile(f)]
+ yield rpath, files
+
+
+def get_ext_source_files(ext):
+ # Get sources and any include files in the same directory.
+ filenames = []
+ sources = [_m for _m in ext.sources if is_string(_m)]
+ filenames.extend(sources)
+ filenames.extend(get_dependencies(sources))
+ for d in ext.depends:
+ if is_local_src_dir(d):
+ filenames.extend(list(general_source_files(d)))
+ elif os.path.isfile(d):
+ filenames.append(d)
+ return filenames
+
+def get_script_files(scripts):
+ scripts = [_m for _m in scripts if is_string(_m)]
+ return scripts
+
+def get_lib_source_files(lib):
+ filenames = []
+ sources = lib[1].get('sources', [])
+ sources = [_m for _m in sources if is_string(_m)]
+ filenames.extend(sources)
+ filenames.extend(get_dependencies(sources))
+ depends = lib[1].get('depends', [])
+ for d in depends:
+ if is_local_src_dir(d):
+ filenames.extend(list(general_source_files(d)))
+ elif os.path.isfile(d):
+ filenames.append(d)
+ return filenames
+
+def get_shared_lib_extension(is_python_ext=False):
+ """Return the correct file extension for shared libraries.
+
+ Parameters
+ ----------
+ is_python_ext : bool, optional
+ Whether the shared library is a Python extension. Default is False.
+
+ Returns
+ -------
+ so_ext : str
+ The shared library extension.
+
+ Notes
+ -----
+ For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,
+ and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on
+ POSIX systems according to PEP 3149.
+
+ """
+ confvars = distutils.sysconfig.get_config_vars()
+ so_ext = confvars.get('EXT_SUFFIX', '')
+
+ if not is_python_ext:
+ # hardcode known values, config vars (including SHLIB_SUFFIX) are
+ # unreliable (see #3182)
+ # darwin, windows and debug linux are wrong in 3.3.1 and older
+ if (sys.platform.startswith('linux') or
+ sys.platform.startswith('gnukfreebsd')):
+ so_ext = '.so'
+ elif sys.platform.startswith('darwin'):
+ so_ext = '.dylib'
+ elif sys.platform.startswith('win'):
+ so_ext = '.dll'
+ else:
+ # fall back to config vars for unknown platforms
+ # fix long extension for Python >=3.2, see PEP 3149.
+ if 'SOABI' in confvars:
+ # Does nothing unless SOABI config var exists
+ so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
+
+ return so_ext
+
+def get_data_files(data):
+ if is_string(data):
+ return [data]
+ sources = data[1]
+ filenames = []
+ for s in sources:
+ if hasattr(s, '__call__'):
+ continue
+ if is_local_src_dir(s):
+ filenames.extend(list(general_source_files(s)))
+ elif is_string(s):
+ if os.path.isfile(s):
+ filenames.append(s)
+ else:
+ print('Not existing data file:', s)
+ else:
+ raise TypeError(repr(s))
+ return filenames
+
+def dot_join(*args):
+ return '.'.join([a for a in args if a])
+
+def get_frame(level=0):
+ """Return frame object from call stack with given level.
+ """
+ try:
+ return sys._getframe(level+1)
+ except AttributeError:
+ frame = sys.exc_info()[2].tb_frame
+ for _ in range(level+1):
+ frame = frame.f_back
+ return frame
+
+
+######################
+
+class Configuration:
+
+ _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
+ 'libraries', 'headers', 'scripts', 'py_modules',
+ 'installed_libraries', 'define_macros']
+ _dict_keys = ['package_dir', 'installed_pkg_config']
+ _extra_keys = ['name', 'version']
+
+ numpy_include_dirs = []
+
+ def __init__(self,
+ package_name=None,
+ parent_name=None,
+ top_path=None,
+ package_path=None,
+ caller_level=1,
+ setup_name='setup.py',
+ **attrs):
+ """Construct configuration instance of a package.
+
+ package_name -- name of the package
+ Ex.: 'distutils'
+ parent_name -- name of the parent package
+ Ex.: 'numpy'
+ top_path -- directory of the toplevel package
+ Ex.: the directory where the numpy package source sits
+ package_path -- directory of package. Will be computed by magic from the
+ directory of the caller module if not specified
+ Ex.: the directory where numpy.distutils is
+ caller_level -- frame level to caller namespace, internal parameter.
+ """
+ self.name = dot_join(parent_name, package_name)
+ self.version = None
+
+ caller_frame = get_frame(caller_level)
+ self.local_path = get_path_from_frame(caller_frame, top_path)
+ # local_path -- directory of a file (usually setup.py) that
+ # defines a configuration() function.
+ # local_path -- directory of a file (usually setup.py) that
+ # defines a configuration() function.
+ if top_path is None:
+ top_path = self.local_path
+ self.local_path = ''
+ if package_path is None:
+ package_path = self.local_path
+ elif os.path.isdir(njoin(self.local_path, package_path)):
+ package_path = njoin(self.local_path, package_path)
+ if not os.path.isdir(package_path or '.'):
+ raise ValueError("%r is not a directory" % (package_path,))
+ self.top_path = top_path
+ self.package_path = package_path
+ # this is the relative path in the installed package
+ self.path_in_package = os.path.join(*self.name.split('.'))
+
+ self.list_keys = self._list_keys[:]
+ self.dict_keys = self._dict_keys[:]
+
+ for n in self.list_keys:
+ v = copy.copy(attrs.get(n, []))
+ setattr(self, n, as_list(v))
+
+ for n in self.dict_keys:
+ v = copy.copy(attrs.get(n, {}))
+ setattr(self, n, v)
+
+ known_keys = self.list_keys + self.dict_keys
+ self.extra_keys = self._extra_keys[:]
+ for n in attrs.keys():
+ if n in known_keys:
+ continue
+ a = attrs[n]
+ setattr(self, n, a)
+ if isinstance(a, list):
+ self.list_keys.append(n)
+ elif isinstance(a, dict):
+ self.dict_keys.append(n)
+ else:
+ self.extra_keys.append(n)
+
+ if os.path.exists(njoin(package_path, '__init__.py')):
+ self.packages.append(self.name)
+ self.package_dir[self.name] = package_path
+
+ self.options = dict(
+ ignore_setup_xxx_py = False,
+ assume_default_configuration = False,
+ delegate_options_to_subpackages = False,
+ quiet = False,
+ )
+
+ caller_instance = None
+ for i in range(1, 3):
+ try:
+ f = get_frame(i)
+ except ValueError:
+ break
+ try:
+ caller_instance = eval('self', f.f_globals, f.f_locals)
+ break
+ except NameError:
+ pass
+ if isinstance(caller_instance, self.__class__):
+ if caller_instance.options['delegate_options_to_subpackages']:
+ self.set_options(**caller_instance.options)
+
+ self.setup_name = setup_name
+
+ def todict(self):
+ """
+ Return a dictionary compatible with the keyword arguments of distutils
+ setup function.
+
+ Examples
+ --------
+ >>> setup(**config.todict()) #doctest: +SKIP
+ """
+
+ self._optimize_data_files()
+ d = {}
+ known_keys = self.list_keys + self.dict_keys + self.extra_keys
+ for n in known_keys:
+ a = getattr(self, n)
+ if a:
+ d[n] = a
+ return d
+
+ def info(self, message):
+ if not self.options['quiet']:
+ print(message)
+
+ def warn(self, message):
+ sys.stderr.write('Warning: %s\n' % (message,))
+
+ def set_options(self, **options):
+ """
+ Configure Configuration instance.
+
+ The following options are available:
+ - ignore_setup_xxx_py
+ - assume_default_configuration
+ - delegate_options_to_subpackages
+ - quiet
+
+ """
+ for key, value in options.items():
+ if key in self.options:
+ self.options[key] = value
+ else:
+ raise ValueError('Unknown option: '+key)
+
+ def get_distribution(self):
+ """Return the distutils distribution object for self."""
+ from numpy.distutils.core import get_distribution
+ return get_distribution()
+
+ def _wildcard_get_subpackage(self, subpackage_name,
+ parent_name,
+ caller_level = 1):
+ l = subpackage_name.split('.')
+ subpackage_path = njoin([self.local_path]+l)
+ dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
+ config_list = []
+ for d in dirs:
+ if not os.path.isfile(njoin(d, '__init__.py')):
+ continue
+ if 'build' in d.split(os.sep):
+ continue
+ n = '.'.join(d.split(os.sep)[-len(l):])
+ c = self.get_subpackage(n,
+ parent_name = parent_name,
+ caller_level = caller_level+1)
+ config_list.extend(c)
+ return config_list
+
+ def _get_configuration_from_setup_py(self, setup_py,
+ subpackage_name,
+ subpackage_path,
+ parent_name,
+ caller_level = 1):
+ # In case setup_py imports local modules:
+ sys.path.insert(0, os.path.dirname(setup_py))
+ try:
+ setup_name = os.path.splitext(os.path.basename(setup_py))[0]
+ n = dot_join(self.name, subpackage_name, setup_name)
+ setup_module = exec_mod_from_location(
+ '_'.join(n.split('.')), setup_py)
+ if not hasattr(setup_module, 'configuration'):
+ if not self.options['assume_default_configuration']:
+ self.warn('Assuming default configuration '\
+ '(%s does not define configuration())'\
+ % (setup_module))
+ config = Configuration(subpackage_name, parent_name,
+ self.top_path, subpackage_path,
+ caller_level = caller_level + 1)
+ else:
+ pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
+ args = (pn,)
+ if setup_module.configuration.__code__.co_argcount > 1:
+ args = args + (self.top_path,)
+ config = setup_module.configuration(*args)
+ if config.name!=dot_join(parent_name, subpackage_name):
+ self.warn('Subpackage %r configuration returned as %r' % \
+ (dot_join(parent_name, subpackage_name), config.name))
+ finally:
+ del sys.path[0]
+ return config
+
+ def get_subpackage(self,subpackage_name,
+ subpackage_path=None,
+ parent_name=None,
+ caller_level = 1):
+ """Return list of subpackage configurations.
+
+ Parameters
+ ----------
+ subpackage_name : str or None
+ Name of the subpackage to get the configuration. '*' in
+ subpackage_name is handled as a wildcard.
+ subpackage_path : str
+ If None, then the path is assumed to be the local path plus the
+ subpackage_name. If a setup.py file is not found in the
+ subpackage_path, then a default configuration is used.
+ parent_name : str
+ Parent name.
+ """
+ if subpackage_name is None:
+ if subpackage_path is None:
+ raise ValueError(
+ "either subpackage_name or subpackage_path must be specified")
+ subpackage_name = os.path.basename(subpackage_path)
+
+ # handle wildcards
+ l = subpackage_name.split('.')
+ if subpackage_path is None and '*' in subpackage_name:
+ return self._wildcard_get_subpackage(subpackage_name,
+ parent_name,
+ caller_level = caller_level+1)
+ assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
+ if subpackage_path is None:
+ subpackage_path = njoin([self.local_path] + l)
+ else:
+ subpackage_path = njoin([subpackage_path] + l[:-1])
+ subpackage_path = self.paths([subpackage_path])[0]
+ setup_py = njoin(subpackage_path, self.setup_name)
+ if not self.options['ignore_setup_xxx_py']:
+ if not os.path.isfile(setup_py):
+ setup_py = njoin(subpackage_path,
+ 'setup_%s.py' % (subpackage_name))
+ if not os.path.isfile(setup_py):
+ if not self.options['assume_default_configuration']:
+ self.warn('Assuming default configuration '\
+ '(%s/{setup_%s,setup}.py was not found)' \
+ % (os.path.dirname(setup_py), subpackage_name))
+ config = Configuration(subpackage_name, parent_name,
+ self.top_path, subpackage_path,
+ caller_level = caller_level+1)
+ else:
+ config = self._get_configuration_from_setup_py(
+ setup_py,
+ subpackage_name,
+ subpackage_path,
+ parent_name,
+ caller_level = caller_level + 1)
+ if config:
+ return [config]
+ else:
+ return []
+
+ def add_subpackage(self,subpackage_name,
+ subpackage_path=None,
+ standalone = False):
+ """Add a sub-package to the current Configuration instance.
+
+ This is useful in a setup.py script for adding sub-packages to a
+ package.
+
+ Parameters
+ ----------
+ subpackage_name : str
+ name of the subpackage
+ subpackage_path : str
+ if given, the subpackage path such as the subpackage is in
+ subpackage_path / subpackage_name. If None,the subpackage is
+ assumed to be located in the local path / subpackage_name.
+ standalone : bool
+ """
+
+ if standalone:
+ parent_name = None
+ else:
+ parent_name = self.name
+ config_list = self.get_subpackage(subpackage_name, subpackage_path,
+ parent_name = parent_name,
+ caller_level = 2)
+ if not config_list:
+ self.warn('No configuration returned, assuming unavailable.')
+ for config in config_list:
+ d = config
+ if isinstance(config, Configuration):
+ d = config.todict()
+ assert isinstance(d, dict), repr(type(d))
+
+ self.info('Appending %s configuration to %s' \
+ % (d.get('name'), self.name))
+ self.dict_append(**d)
+
+ dist = self.get_distribution()
+ if dist is not None:
+ self.warn('distutils distribution has been initialized,'\
+ ' it may be too late to add a subpackage '+ subpackage_name)
+
+ def add_data_dir(self, data_path):
+ """Recursively add files under data_path to data_files list.
+
+ Recursively add files under data_path to the list of data_files to be
+ installed (and distributed). The data_path can be either a relative
+ path-name, or an absolute path-name, or a 2-tuple where the first
+ argument shows where in the install directory the data directory
+ should be installed to.
+
+ Parameters
+ ----------
+ data_path : seq or str
+ Argument can be either
+
+ * 2-sequence (<datadir suffix>, <path to data directory>)
+ * path to data directory where python datadir suffix defaults
+ to package dir.
+
+ Notes
+ -----
+ Rules for installation paths::
+
+ foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
+ (gun, foo/bar) -> parent/gun
+ foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
+ (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
+ (gun/*, foo/*) -> parent/gun/a, parent/gun/b
+ /foo/bar -> (bar, /foo/bar) -> parent/bar
+ (gun, /foo/bar) -> parent/gun
+ (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
+
+ Examples
+ --------
+ For example suppose the source directory contains fun/foo.dat and
+ fun/bar/car.dat:
+
+ >>> self.add_data_dir('fun') #doctest: +SKIP
+ >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
+ >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
+
+ Will install data-files to the locations::
+
+ <package install directory>/
+ fun/
+ foo.dat
+ bar/
+ car.dat
+ sun/
+ foo.dat
+ bar/
+ car.dat
+ gun/
+ foo.dat
+ car.dat
+
+ """
+ if is_sequence(data_path):
+ d, data_path = data_path
+ else:
+ d = None
+ if is_sequence(data_path):
+ [self.add_data_dir((d, p)) for p in data_path]
+ return
+ if not is_string(data_path):
+ raise TypeError("not a string: %r" % (data_path,))
+ if d is None:
+ if os.path.isabs(data_path):
+ return self.add_data_dir((os.path.basename(data_path), data_path))
+ return self.add_data_dir((data_path, data_path))
+ paths = self.paths(data_path, include_non_existing=False)
+ if is_glob_pattern(data_path):
+ if is_glob_pattern(d):
+ pattern_list = allpath(d).split(os.sep)
+ pattern_list.reverse()
+ # /a/*//b/ -> /a/*/b
+ rl = list(range(len(pattern_list)-1)); rl.reverse()
+ for i in rl:
+ if not pattern_list[i]:
+ del pattern_list[i]
+ #
+ for path in paths:
+ if not os.path.isdir(path):
+ print('Not a directory, skipping', path)
+ continue
+ rpath = rel_path(path, self.local_path)
+ path_list = rpath.split(os.sep)
+ path_list.reverse()
+ target_list = []
+ i = 0
+ for s in pattern_list:
+ if is_glob_pattern(s):
+ if i>=len(path_list):
+ raise ValueError('cannot fill pattern %r with %r' \
+ % (d, path))
+ target_list.append(path_list[i])
+ else:
+ assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
+ target_list.append(s)
+ i += 1
+ if path_list[i:]:
+ self.warn('mismatch of pattern_list=%s and path_list=%s'\
+ % (pattern_list, path_list))
+ target_list.reverse()
+ self.add_data_dir((os.sep.join(target_list), path))
+ else:
+ for path in paths:
+ self.add_data_dir((d, path))
+ return
+ assert not is_glob_pattern(d), repr(d)
+
+ dist = self.get_distribution()
+ if dist is not None and dist.data_files is not None:
+ data_files = dist.data_files
+ else:
+ data_files = self.data_files
+
+ for path in paths:
+ for d1, f in list(general_source_directories_files(path)):
+ target_path = os.path.join(self.path_in_package, d, d1)
+ data_files.append((target_path, f))
+
+ def _optimize_data_files(self):
+ data_dict = {}
+ for p, files in self.data_files:
+ if p not in data_dict:
+ data_dict[p] = set()
+ for f in files:
+ data_dict[p].add(f)
+ self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
+
+ def add_data_files(self,*files):
+ """Add data files to configuration data_files.
+
+ Parameters
+ ----------
+ files : sequence
+ Argument(s) can be either
+
+ * 2-sequence (<datadir prefix>,<path to data file(s)>)
+ * paths to data files where python datadir prefix defaults
+ to package dir.
+
+ Notes
+ -----
+ The form of each element of the files sequence is very flexible
+ allowing many combinations of where to get the files from the package
+ and where they should ultimately be installed on the system. The most
+ basic usage is for an element of the files argument sequence to be a
+ simple filename. This will cause that file from the local path to be
+ installed to the installation path of the self.name package (package
+ path). The file argument can also be a relative path in which case the
+ entire relative path will be installed into the package directory.
+ Finally, the file can be an absolute path name in which case the file
+ will be found at the absolute path name but installed to the package
+ path.
+
+ This basic behavior can be augmented by passing a 2-tuple in as the
+ file argument. The first element of the tuple should specify the
+ relative path (under the package install directory) where the
+ remaining sequence of files should be installed to (it has nothing to
+ do with the file-names in the source distribution). The second element
+ of the tuple is the sequence of files that should be installed. The
+ files in this sequence can be filenames, relative paths, or absolute
+ paths. For absolute paths the file will be installed in the top-level
+ package installation directory (regardless of the first argument).
+ Filenames and relative path names will be installed in the package
+ install directory under the path name given as the first element of
+ the tuple.
+
+ Rules for installation paths:
+
+ #. file.txt -> (., file.txt)-> parent/file.txt
+ #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
+ #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
+ #. ``*``.txt -> parent/a.txt, parent/b.txt
+ #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt
+ #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt
+ #. (sun, file.txt) -> parent/sun/file.txt
+ #. (sun, bar/file.txt) -> parent/sun/file.txt
+ #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
+ #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
+ #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
+ #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt
+
+ An additional feature is that the path to a data-file can actually be
+ a function that takes no arguments and returns the actual path(s) to
+ the data-files. This is useful when the data files are generated while
+ building the package.
+
+ Examples
+ --------
+ Add files to the list of data_files to be included with the package.
+
+ >>> self.add_data_files('foo.dat',
+ ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
+ ... 'bar/cat.dat',
+ ... '/full/path/to/can.dat') #doctest: +SKIP
+
+ will install these data files to::
+
+ <package install directory>/
+ foo.dat
+ fun/
+ gun.dat
+ nun/
+ pun.dat
+ sun.dat
+ bar/
+ car.dat
+ can.dat
+
+ where <package install directory> is the package (or sub-package)
+ directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
+ \\Python2.4 \\Lib \\site-packages \\mypackage') or
+ '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
+ \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
+ """
+
+ if len(files)>1:
+ for f in files:
+ self.add_data_files(f)
+ return
+ assert len(files)==1
+ if is_sequence(files[0]):
+ d, files = files[0]
+ else:
+ d = None
+ if is_string(files):
+ filepat = files
+ elif is_sequence(files):
+ if len(files)==1:
+ filepat = files[0]
+ else:
+ for f in files:
+ self.add_data_files((d, f))
+ return
+ else:
+ raise TypeError(repr(type(files)))
+
+ if d is None:
+ if hasattr(filepat, '__call__'):
+ d = ''
+ elif os.path.isabs(filepat):
+ d = ''
+ else:
+ d = os.path.dirname(filepat)
+ self.add_data_files((d, files))
+ return
+
+ paths = self.paths(filepat, include_non_existing=False)
+ if is_glob_pattern(filepat):
+ if is_glob_pattern(d):
+ pattern_list = d.split(os.sep)
+ pattern_list.reverse()
+ for path in paths:
+ path_list = path.split(os.sep)
+ path_list.reverse()
+ path_list.pop() # filename
+ target_list = []
+ i = 0
+ for s in pattern_list:
+ if is_glob_pattern(s):
+ target_list.append(path_list[i])
+ i += 1
+ else:
+ target_list.append(s)
+ target_list.reverse()
+ self.add_data_files((os.sep.join(target_list), path))
+ else:
+ self.add_data_files((d, paths))
+ return
+ assert not is_glob_pattern(d), repr((d, filepat))
+
+ dist = self.get_distribution()
+ if dist is not None and dist.data_files is not None:
+ data_files = dist.data_files
+ else:
+ data_files = self.data_files
+
+ data_files.append((os.path.join(self.path_in_package, d), paths))
+
+ ### XXX Implement add_py_modules
+
+ def add_define_macros(self, macros):
+ """Add define macros to configuration
+
+ Add the given sequence of macro name and value duples to the beginning
+ of the define_macros list This list will be visible to all extension
+ modules of the current package.
+ """
+ dist = self.get_distribution()
+ if dist is not None:
+ if not hasattr(dist, 'define_macros'):
+ dist.define_macros = []
+ dist.define_macros.extend(macros)
+ else:
+ self.define_macros.extend(macros)
+
+
+ def add_include_dirs(self,*paths):
+ """Add paths to configuration include directories.
+
+ Add the given sequence of paths to the beginning of the include_dirs
+ list. This list will be visible to all extension modules of the
+ current package.
+ """
+ include_dirs = self.paths(paths)
+ dist = self.get_distribution()
+ if dist is not None:
+ if dist.include_dirs is None:
+ dist.include_dirs = []
+ dist.include_dirs.extend(include_dirs)
+ else:
+ self.include_dirs.extend(include_dirs)
+
+ def add_headers(self,*files):
+ """Add installable headers to configuration.
+
+ Add the given sequence of files to the beginning of the headers list.
+ By default, headers will be installed under <python-
+ include>/<self.name.replace('.','/')>/ directory. If an item of files
+ is a tuple, then its first argument specifies the actual installation
+ location relative to the <python-include> path.
+
+ Parameters
+ ----------
+ files : str or seq
+ Argument(s) can be either:
+
+ * 2-sequence (<includedir suffix>,<path to header file(s)>)
+ * path(s) to header file(s) where python includedir suffix will
+ default to package name.
+ """
+ headers = []
+ for path in files:
+ if is_string(path):
+ [headers.append((self.name, p)) for p in self.paths(path)]
+ else:
+ if not isinstance(path, (tuple, list)) or len(path) != 2:
+ raise TypeError(repr(path))
+ [headers.append((path[0], p)) for p in self.paths(path[1])]
+ dist = self.get_distribution()
+ if dist is not None:
+ if dist.headers is None:
+ dist.headers = []
+ dist.headers.extend(headers)
+ else:
+ self.headers.extend(headers)
+
+ def paths(self,*paths,**kws):
+ """Apply glob to paths and prepend local_path if needed.
+
+ Applies glob.glob(...) to each path in the sequence (if needed) and
+ pre-pends the local_path if needed. Because this is called on all
+ source lists, this allows wildcard characters to be specified in lists
+ of sources for extension modules and libraries and scripts and allows
+ path-names be relative to the source directory.
+
+ """
+ include_non_existing = kws.get('include_non_existing', True)
+ return gpaths(paths,
+ local_path = self.local_path,
+ include_non_existing=include_non_existing)
+
+ def _fix_paths_dict(self, kw):
+ for k in kw.keys():
+ v = kw[k]
+ if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
+ 'module_dirs', 'extra_objects']:
+ new_v = self.paths(v)
+ kw[k] = new_v
+
+ def add_extension(self,name,sources,**kw):
+ """Add extension to configuration.
+
+ Create and add an Extension instance to the ext_modules list. This
+ method also takes the following optional keyword arguments that are
+ passed on to the Extension constructor.
+
+ Parameters
+ ----------
+ name : str
+ name of the extension
+ sources : seq
+ list of the sources. The list of sources may contain functions
+ (called source generators) which must take an extension instance
+ and a build directory as inputs and return a source file or list of
+ source files or None. If None is returned then no sources are
+ generated. If the Extension instance has no sources after
+ processing all source generators, then no extension module is
+ built.
+ include_dirs :
+ define_macros :
+ undef_macros :
+ library_dirs :
+ libraries :
+ runtime_library_dirs :
+ extra_objects :
+ extra_compile_args :
+ extra_link_args :
+ extra_f77_compile_args :
+ extra_f90_compile_args :
+ export_symbols :
+ swig_opts :
+ depends :
+ The depends list contains paths to files or directories that the
+ sources of the extension module depend on. If any path in the
+ depends list is newer than the extension module, then the module
+ will be rebuilt.
+ language :
+ f2py_options :
+ module_dirs :
+ extra_info : dict or list
+ dict or list of dict of keywords to be appended to keywords.
+
+ Notes
+ -----
+ The self.paths(...) method is applied to all lists that may contain
+ paths.
+ """
+ ext_args = copy.copy(kw)
+ ext_args['name'] = dot_join(self.name, name)
+ ext_args['sources'] = sources
+
+ if 'extra_info' in ext_args:
+ extra_info = ext_args['extra_info']
+ del ext_args['extra_info']
+ if isinstance(extra_info, dict):
+ extra_info = [extra_info]
+ for info in extra_info:
+ assert isinstance(info, dict), repr(info)
+ dict_append(ext_args,**info)
+
+ self._fix_paths_dict(ext_args)
+
+ # Resolve out-of-tree dependencies
+ libraries = ext_args.get('libraries', [])
+ libnames = []
+ ext_args['libraries'] = []
+ for libname in libraries:
+ if isinstance(libname, tuple):
+ self._fix_paths_dict(libname[1])
+
+ # Handle library names of the form libname@relative/path/to/library
+ if '@' in libname:
+ lname, lpath = libname.split('@', 1)
+ lpath = os.path.abspath(njoin(self.local_path, lpath))
+ if os.path.isdir(lpath):
+ c = self.get_subpackage(None, lpath,
+ caller_level = 2)
+ if isinstance(c, Configuration):
+ c = c.todict()
+ for l in [l[0] for l in c.get('libraries', [])]:
+ llname = l.split('__OF__', 1)[0]
+ if llname == lname:
+ c.pop('name', None)
+ dict_append(ext_args,**c)
+ break
+ continue
+ libnames.append(libname)
+
+ ext_args['libraries'] = libnames + ext_args['libraries']
+ ext_args['define_macros'] = \
+ self.define_macros + ext_args.get('define_macros', [])
+
+ from numpy.distutils.core import Extension
+ ext = Extension(**ext_args)
+ self.ext_modules.append(ext)
+
+ dist = self.get_distribution()
+ if dist is not None:
+ self.warn('distutils distribution has been initialized,'\
+ ' it may be too late to add an extension '+name)
+ return ext
+
+ def add_library(self,name,sources,**build_info):
+ """
+ Add library to configuration.
+
+ Parameters
+ ----------
+ name : str
+ Name of the extension.
+ sources : sequence
+ List of the sources. The list of sources may contain functions
+ (called source generators) which must take an extension instance
+ and a build directory as inputs and return a source file or list of
+ source files or None. If None is returned then no sources are
+ generated. If the Extension instance has no sources after
+ processing all source generators, then no extension module is
+ built.
+ build_info : dict, optional
+ The following keys are allowed:
+
+ * depends
+ * macros
+ * include_dirs
+ * extra_compiler_args
+ * extra_f77_compile_args
+ * extra_f90_compile_args
+ * f2py_options
+ * language
+
+ """
+ self._add_library(name, sources, None, build_info)
+
+ dist = self.get_distribution()
+ if dist is not None:
+ self.warn('distutils distribution has been initialized,'\
+ ' it may be too late to add a library '+ name)
+
+ def _add_library(self, name, sources, install_dir, build_info):
+ """Common implementation for add_library and add_installed_library. Do
+ not use directly"""
+ build_info = copy.copy(build_info)
+ build_info['sources'] = sources
+
+ # Sometimes, depends is not set up to an empty list by default, and if
+ # depends is not given to add_library, distutils barfs (#1134)
+ if not 'depends' in build_info:
+ build_info['depends'] = []
+
+ self._fix_paths_dict(build_info)
+
+ # Add to libraries list so that it is build with build_clib
+ self.libraries.append((name, build_info))
+
+ def add_installed_library(self, name, sources, install_dir, build_info=None):
+ """
+ Similar to add_library, but the specified library is installed.
+
+ Most C libraries used with `distutils` are only used to build python
+ extensions, but libraries built through this method will be installed
+ so that they can be reused by third-party packages.
+
+ Parameters
+ ----------
+ name : str
+ Name of the installed library.
+ sources : sequence
+ List of the library's source files. See `add_library` for details.
+ install_dir : str
+ Path to install the library, relative to the current sub-package.
+ build_info : dict, optional
+ The following keys are allowed:
+
+ * depends
+ * macros
+ * include_dirs
+ * extra_compiler_args
+ * extra_f77_compile_args
+ * extra_f90_compile_args
+ * f2py_options
+ * language
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ add_library, add_npy_pkg_config, get_info
+
+ Notes
+ -----
+ The best way to encode the options required to link against the specified
+ C libraries is to use a "libname.ini" file, and use `get_info` to
+ retrieve the required options (see `add_npy_pkg_config` for more
+ information).
+
+ """
+ if not build_info:
+ build_info = {}
+
+ install_dir = os.path.join(self.package_path, install_dir)
+ self._add_library(name, sources, install_dir, build_info)
+ self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
+
+ def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
+ """
+ Generate and install a npy-pkg config file from a template.
+
+ The config file generated from `template` is installed in the
+ given install directory, using `subst_dict` for variable substitution.
+
+ Parameters
+ ----------
+ template : str
+ The path of the template, relatively to the current package path.
+ install_dir : str
+ Where to install the npy-pkg config file, relatively to the current
+ package path.
+ subst_dict : dict, optional
+ If given, any string of the form ``@key@`` will be replaced by
+ ``subst_dict[key]`` in the template file when installed. The install
+ prefix is always available through the variable ``@prefix@``, since the
+ install prefix is not easy to get reliably from setup.py.
+
+ See also
+ --------
+ add_installed_library, get_info
+
+ Notes
+ -----
+ This works for both standard installs and in-place builds, i.e. the
+ ``@prefix@`` refer to the source directory for in-place builds.
+
+ Examples
+ --------
+ ::
+
+ config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
+
+ Assuming the foo.ini.in file has the following content::
+
+ [meta]
+ Name=@foo@
+ Version=1.0
+ Description=dummy description
+
+ [default]
+ Cflags=-I@prefix@/include
+ Libs=
+
+ The generated file will have the following content::
+
+ [meta]
+ Name=bar
+ Version=1.0
+ Description=dummy description
+
+ [default]
+ Cflags=-Iprefix_dir/include
+ Libs=
+
+ and will be installed as foo.ini in the 'lib' subpath.
+
+ When cross-compiling with numpy distutils, it might be necessary to
+ use modified npy-pkg-config files. Using the default/generated files
+ will link with the host libraries (i.e. libnpymath.a). For
+ cross-compilation you of-course need to link with target libraries,
+ while using the host Python installation.
+
+ You can copy out the numpy/core/lib/npy-pkg-config directory, add a
+ pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment
+ variable to point to the directory with the modified npy-pkg-config
+ files.
+
+ Example npymath.ini modified for cross-compilation::
+
+ [meta]
+ Name=npymath
+ Description=Portable, core math library implementing C99 standard
+ Version=0.1
+
+ [variables]
+ pkgname=numpy.core
+ pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core
+ prefix=${pkgdir}
+ libdir=${prefix}/lib
+ includedir=${prefix}/include
+
+ [default]
+ Libs=-L${libdir} -lnpymath
+ Cflags=-I${includedir}
+ Requires=mlib
+
+ [msvc]
+ Libs=/LIBPATH:${libdir} npymath.lib
+ Cflags=/INCLUDE:${includedir}
+ Requires=mlib
+
+ """
+ if subst_dict is None:
+ subst_dict = {}
+ template = os.path.join(self.package_path, template)
+
+ if self.name in self.installed_pkg_config:
+ self.installed_pkg_config[self.name].append((template, install_dir,
+ subst_dict))
+ else:
+ self.installed_pkg_config[self.name] = [(template, install_dir,
+ subst_dict)]
+
+
+ def add_scripts(self,*files):
+ """Add scripts to configuration.
+
+ Add the sequence of files to the beginning of the scripts list.
+ Scripts will be installed under the <prefix>/bin/ directory.
+
+ """
+ scripts = self.paths(files)
+ dist = self.get_distribution()
+ if dist is not None:
+ if dist.scripts is None:
+ dist.scripts = []
+ dist.scripts.extend(scripts)
+ else:
+ self.scripts.extend(scripts)
+
+ def dict_append(self,**dict):
+ for key in self.list_keys:
+ a = getattr(self, key)
+ a.extend(dict.get(key, []))
+ for key in self.dict_keys:
+ a = getattr(self, key)
+ a.update(dict.get(key, {}))
+ known_keys = self.list_keys + self.dict_keys + self.extra_keys
+ for key in dict.keys():
+ if key not in known_keys:
+ a = getattr(self, key, None)
+ if a and a==dict[key]: continue
+ self.warn('Inheriting attribute %r=%r from %r' \
+ % (key, dict[key], dict.get('name', '?')))
+ setattr(self, key, dict[key])
+ self.extra_keys.append(key)
+ elif key in self.extra_keys:
+ self.info('Ignoring attempt to set %r (from %r to %r)' \
+ % (key, getattr(self, key), dict[key]))
+ elif key in known_keys:
+ # key is already processed above
+ pass
+ else:
+ raise ValueError("Don't know about key=%r" % (key))
+
+ def __str__(self):
+ from pprint import pformat
+ known_keys = self.list_keys + self.dict_keys + self.extra_keys
+ s = '<'+5*'-' + '\n'
+ s += 'Configuration of '+self.name+':\n'
+ known_keys.sort()
+ for k in known_keys:
+ a = getattr(self, k, None)
+ if a:
+ s += '%s = %s\n' % (k, pformat(a))
+ s += 5*'-' + '>'
+ return s
+
+ def get_config_cmd(self):
+ """
+ Returns the numpy.distutils config command instance.
+ """
+ cmd = get_cmd('config')
+ cmd.ensure_finalized()
+ cmd.dump_source = 0
+ cmd.noisy = 0
+ old_path = os.environ.get('PATH')
+ if old_path:
+ path = os.pathsep.join(['.', old_path])
+ os.environ['PATH'] = path
+ return cmd
+
+ def get_build_temp_dir(self):
+ """
+ Return a path to a temporary directory where temporary files should be
+ placed.
+ """
+ cmd = get_cmd('build')
+ cmd.ensure_finalized()
+ return cmd.build_temp
+
+ def have_f77c(self):
+ """Check for availability of Fortran 77 compiler.
+
+ Use it inside source generating function to ensure that
+ setup distribution instance has been initialized.
+
+ Notes
+ -----
+ True if a Fortran 77 compiler is available (because a simple Fortran 77
+ code was able to be compiled successfully).
+ """
+ simple_fortran_subroutine = '''
+ subroutine simple
+ end
+ '''
+ config_cmd = self.get_config_cmd()
+ flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
+ return flag
+
+ def have_f90c(self):
+ """Check for availability of Fortran 90 compiler.
+
+ Use it inside source generating function to ensure that
+ setup distribution instance has been initialized.
+
+ Notes
+ -----
+ True if a Fortran 90 compiler is available (because a simple Fortran
+ 90 code was able to be compiled successfully)
+ """
+ simple_fortran_subroutine = '''
+ subroutine simple
+ end
+ '''
+ config_cmd = self.get_config_cmd()
+ flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
+ return flag
+
+ def append_to(self, extlib):
+ """Append libraries, include_dirs to extension or library item.
+ """
+ if is_sequence(extlib):
+ lib_name, build_info = extlib
+ dict_append(build_info,
+ libraries=self.libraries,
+ include_dirs=self.include_dirs)
+ else:
+ from numpy.distutils.core import Extension
+ assert isinstance(extlib, Extension), repr(extlib)
+ extlib.libraries.extend(self.libraries)
+ extlib.include_dirs.extend(self.include_dirs)
+
+ def _get_svn_revision(self, path):
+ """Return path's SVN revision number.
+ """
+ try:
+ output = subprocess.check_output(['svnversion'], cwd=path)
+ except (subprocess.CalledProcessError, OSError):
+ pass
+ else:
+ m = re.match(rb'(?P<revision>\d+)', output)
+ if m:
+ return int(m.group('revision'))
+
+ if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
+ entries = njoin(path, '_svn', 'entries')
+ else:
+ entries = njoin(path, '.svn', 'entries')
+ if os.path.isfile(entries):
+ with open(entries) as f:
+ fstr = f.read()
+ if fstr[:5] == '<?xml': # pre 1.4
+ m = re.search(r'revision="(?P<revision>\d+)"', fstr)
+ if m:
+ return int(m.group('revision'))
+ else: # non-xml entries file --- check to be sure that
+ m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
+ if m:
+ return int(m.group('revision'))
+ return None
+
+ def _get_hg_revision(self, path):
+ """Return path's Mercurial revision number.
+ """
+ try:
+ output = subprocess.check_output(
+ ['hg', 'identify', '--num'], cwd=path)
+ except (subprocess.CalledProcessError, OSError):
+ pass
+ else:
+ m = re.match(rb'(?P<revision>\d+)', output)
+ if m:
+ return int(m.group('revision'))
+
+ branch_fn = njoin(path, '.hg', 'branch')
+ branch_cache_fn = njoin(path, '.hg', 'branch.cache')
+
+ if os.path.isfile(branch_fn):
+ branch0 = None
+ with open(branch_fn) as f:
+ revision0 = f.read().strip()
+
+ branch_map = {}
+ with open(branch_cache_fn, 'r') as f:
+ for line in f:
+ branch1, revision1 = line.split()[:2]
+ if revision1==revision0:
+ branch0 = branch1
+ try:
+ revision1 = int(revision1)
+ except ValueError:
+ continue
+ branch_map[branch1] = revision1
+
+ return branch_map.get(branch0)
+
+ return None
+
+
+ def get_version(self, version_file=None, version_variable=None):
+ """Try to get version string of a package.
+
+ Return a version string of the current package or None if the version
+ information could not be detected.
+
+ Notes
+ -----
+ This method scans files named
+ __version__.py, <packagename>_version.py, version.py, and
+ __svn_version__.py for string variables version, __version__, and
+ <packagename>_version, until a version number is found.
+ """
+ version = getattr(self, 'version', None)
+ if version is not None:
+ return version
+
+ # Get version from version file.
+ if version_file is None:
+ files = ['__version__.py',
+ self.name.split('.')[-1]+'_version.py',
+ 'version.py',
+ '__svn_version__.py',
+ '__hg_version__.py']
+ else:
+ files = [version_file]
+ if version_variable is None:
+ version_vars = ['version',
+ '__version__',
+ self.name.split('.')[-1]+'_version']
+ else:
+ version_vars = [version_variable]
+ for f in files:
+ fn = njoin(self.local_path, f)
+ if os.path.isfile(fn):
+ info = ('.py', 'U', 1)
+ name = os.path.splitext(os.path.basename(fn))[0]
+ n = dot_join(self.name, name)
+ try:
+ version_module = exec_mod_from_location(
+ '_'.join(n.split('.')), fn)
+ except ImportError as e:
+ self.warn(str(e))
+ version_module = None
+ if version_module is None:
+ continue
+
+ for a in version_vars:
+ version = getattr(version_module, a, None)
+ if version is not None:
+ break
+
+ # Try if versioneer module
+ try:
+ version = version_module.get_versions()['version']
+ except AttributeError:
+ pass
+
+ if version is not None:
+ break
+
+ if version is not None:
+ self.version = version
+ return version
+
+ # Get version as SVN or Mercurial revision number
+ revision = self._get_svn_revision(self.local_path)
+ if revision is None:
+ revision = self._get_hg_revision(self.local_path)
+
+ if revision is not None:
+ version = str(revision)
+ self.version = version
+
+ return version
+
+ def make_svn_version_py(self, delete=True):
+ """Appends a data function to the data_files list that will generate
+ __svn_version__.py file to the current package directory.
+
+ Generate package __svn_version__.py file from SVN revision number,
+ it will be removed after python exits but will be available
+ when sdist, etc commands are executed.
+
+ Notes
+ -----
+ If __svn_version__.py existed before, nothing is done.
+
+ This is
+ intended for working with source directories that are in an SVN
+ repository.
+ """
+ target = njoin(self.local_path, '__svn_version__.py')
+ revision = self._get_svn_revision(self.local_path)
+ if os.path.isfile(target) or revision is None:
+ return
+ else:
+ def generate_svn_version_py():
+ if not os.path.isfile(target):
+ version = str(revision)
+ self.info('Creating %s (version=%r)' % (target, version))
+ with open(target, 'w') as f:
+ f.write('version = %r\n' % (version))
+
+ def rm_file(f=target,p=self.info):
+ if delete:
+ try: os.remove(f); p('removed '+f)
+ except OSError: pass
+ try: os.remove(f+'c'); p('removed '+f+'c')
+ except OSError: pass
+
+ atexit.register(rm_file)
+
+ return target
+
+ self.add_data_files(('', generate_svn_version_py()))
+
+ def make_hg_version_py(self, delete=True):
+ """Appends a data function to the data_files list that will generate
+ __hg_version__.py file to the current package directory.
+
+ Generate package __hg_version__.py file from Mercurial revision,
+ it will be removed after python exits but will be available
+ when sdist, etc commands are executed.
+
+ Notes
+ -----
+ If __hg_version__.py existed before, nothing is done.
+
+ This is intended for working with source directories that are
+ in an Mercurial repository.
+ """
+ target = njoin(self.local_path, '__hg_version__.py')
+ revision = self._get_hg_revision(self.local_path)
+ if os.path.isfile(target) or revision is None:
+ return
+ else:
+ def generate_hg_version_py():
+ if not os.path.isfile(target):
+ version = str(revision)
+ self.info('Creating %s (version=%r)' % (target, version))
+ with open(target, 'w') as f:
+ f.write('version = %r\n' % (version))
+
+ def rm_file(f=target,p=self.info):
+ if delete:
+ try: os.remove(f); p('removed '+f)
+ except OSError: pass
+ try: os.remove(f+'c'); p('removed '+f+'c')
+ except OSError: pass
+
+ atexit.register(rm_file)
+
+ return target
+
+ self.add_data_files(('', generate_hg_version_py()))
+
+ def make_config_py(self,name='__config__'):
+ """Generate package __config__.py file containing system_info
+ information used during building the package.
+
+ This file is installed to the
+ package installation directory.
+
+ """
+ self.py_modules.append((self.name, name, generate_config_py))
+
+ def get_info(self,*names):
+ """Get resources information.
+
+ Return information (from system_info.get_info) for all of the names in
+ the argument list in a single dictionary.
+ """
+ from .system_info import get_info, dict_append
+ info_dict = {}
+ for a in names:
+ dict_append(info_dict,**get_info(a))
+ return info_dict
+
+
+def get_cmd(cmdname, _cache={}):
+ if cmdname not in _cache:
+ import distutils.core
+ dist = distutils.core._setup_distribution
+ if dist is None:
+ from distutils.errors import DistutilsInternalError
+ raise DistutilsInternalError(
+ 'setup distribution instance not initialized')
+ cmd = dist.get_command_obj(cmdname)
+ _cache[cmdname] = cmd
+ return _cache[cmdname]
+
+def get_numpy_include_dirs():
+ # numpy_include_dirs are set by numpy/core/setup.py, otherwise []
+ include_dirs = Configuration.numpy_include_dirs[:]
+ if not include_dirs:
+ import numpy
+ include_dirs = [ numpy.get_include() ]
+ # else running numpy/core/setup.py
+ return include_dirs
+
+def get_npy_pkg_dir():
+ """Return the path where to find the npy-pkg-config directory.
+
+ If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that
+ is returned. Otherwise, a path inside the location of the numpy module is
+ returned.
+
+ The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining
+ customized npy-pkg-config .ini files for the cross-compilation
+ environment, and using them when cross-compiling.
+
+ """
+ d = os.environ.get('NPY_PKG_CONFIG_PATH')
+ if d is not None:
+ return d
+ spec = importlib.util.find_spec('numpy')
+ d = os.path.join(os.path.dirname(spec.origin),
+ 'core', 'lib', 'npy-pkg-config')
+ return d
+
+def get_pkg_info(pkgname, dirs=None):
+ """
+ Return library info for the given package.
+
+ Parameters
+ ----------
+ pkgname : str
+ Name of the package (should match the name of the .ini file, without
+ the extension, e.g. foo for the file foo.ini).
+ dirs : sequence, optional
+ If given, should be a sequence of additional directories where to look
+ for npy-pkg-config files. Those directories are searched prior to the
+ NumPy directory.
+
+ Returns
+ -------
+ pkginfo : class instance
+ The `LibraryInfo` instance containing the build information.
+
+ Raises
+ ------
+ PkgNotFound
+ If the package is not found.
+
+ See Also
+ --------
+ Configuration.add_npy_pkg_config, Configuration.add_installed_library,
+ get_info
+
+ """
+ from numpy.distutils.npy_pkg_config import read_config
+
+ if dirs:
+ dirs.append(get_npy_pkg_dir())
+ else:
+ dirs = [get_npy_pkg_dir()]
+ return read_config(pkgname, dirs)
+
+def get_info(pkgname, dirs=None):
+ """
+ Return an info dict for a given C library.
+
+ The info dict contains the necessary options to use the C library.
+
+ Parameters
+ ----------
+ pkgname : str
+ Name of the package (should match the name of the .ini file, without
+ the extension, e.g. foo for the file foo.ini).
+ dirs : sequence, optional
+ If given, should be a sequence of additional directories where to look
+ for npy-pkg-config files. Those directories are searched prior to the
+ NumPy directory.
+
+ Returns
+ -------
+ info : dict
+ The dictionary with build information.
+
+ Raises
+ ------
+ PkgNotFound
+ If the package is not found.
+
+ See Also
+ --------
+ Configuration.add_npy_pkg_config, Configuration.add_installed_library,
+ get_pkg_info
+
+ Examples
+ --------
+ To get the necessary information for the npymath library from NumPy:
+
+ >>> npymath_info = np.distutils.misc_util.get_info('npymath')
+ >>> npymath_info #doctest: +SKIP
+ {'define_macros': [], 'libraries': ['npymath'], 'library_dirs':
+ ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}
+
+ This info dict can then be used as input to a `Configuration` instance::
+
+ config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
+
+ """
+ from numpy.distutils.npy_pkg_config import parse_flags
+ pkg_info = get_pkg_info(pkgname, dirs)
+
+ # Translate LibraryInfo instance into a build_info dict
+ info = parse_flags(pkg_info.cflags())
+ for k, v in parse_flags(pkg_info.libs()).items():
+ info[k].extend(v)
+
+ # add_extension extra_info argument is ANAL
+ info['define_macros'] = info['macros']
+ del info['macros']
+ del info['ignored']
+
+ return info
+
+def is_bootstrapping():
+ import builtins
+
+ try:
+ builtins.__NUMPY_SETUP__
+ return True
+ except AttributeError:
+ return False
+
+
+#########################
+
+def default_config_dict(name = None, parent_name = None, local_path=None):
+ """Return a configuration dictionary for usage in
+ configuration() function defined in file setup_<name>.py.
+ """
+ import warnings
+ warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
+ 'deprecated default_config_dict(%r,%r,%r)'
+ % (name, parent_name, local_path,
+ name, parent_name, local_path,
+ ), stacklevel=2)
+ c = Configuration(name, parent_name, local_path)
+ return c.todict()
+
+
+def dict_append(d, **kws):
+ for k, v in kws.items():
+ if k in d:
+ ov = d[k]
+ if isinstance(ov, str):
+ d[k] = v
+ else:
+ d[k].extend(v)
+ else:
+ d[k] = v
+
+def appendpath(prefix, path):
+ if os.path.sep != '/':
+ prefix = prefix.replace('/', os.path.sep)
+ path = path.replace('/', os.path.sep)
+ drive = ''
+ if os.path.isabs(path):
+ drive = os.path.splitdrive(prefix)[0]
+ absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
+ pathdrive, path = os.path.splitdrive(path)
+ d = os.path.commonprefix([absprefix, path])
+ if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \
+ or os.path.join(path[:len(d)], path[len(d):]) != path:
+ # Handle invalid paths
+ d = os.path.dirname(d)
+ subpath = path[len(d):]
+ if os.path.isabs(subpath):
+ subpath = subpath[1:]
+ else:
+ subpath = path
+ return os.path.normpath(njoin(drive + prefix, subpath))
+
+def generate_config_py(target):
+ """Generate config.py file containing system_info information
+ used during building the package.
+
+ Usage:
+ config['py_modules'].append((packagename, '__config__',generate_config_py))
+ """
+ from numpy.distutils.system_info import system_info
+ from distutils.dir_util import mkpath
+ mkpath(os.path.dirname(target))
+ with open(target, 'w') as f:
+ f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0])))
+ f.write('# It contains system_info results at the time of building this package.\n')
+ f.write('__all__ = ["get_info","show"]\n\n')
+
+ # For gfortran+msvc combination, extra shared libraries may exist
+ f.write(textwrap.dedent("""
+ import os
+ import sys
+
+ extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
+
+ if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
+ os.add_dll_directory(extra_dll_dir)
+
+ """))
+
+ for k, i in system_info.saved_results.items():
+ f.write('%s=%r\n' % (k, i))
+ f.write(textwrap.dedent(r'''
+ def get_info(name):
+ g = globals()
+ return g.get(name, g.get(name + "_info", {}))
+
+ def show():
+ """
+ Show libraries in the system on which NumPy was built.
+
+ Print information about various resources (libraries, library
+ directories, include directories, etc.) in the system on which
+ NumPy was built.
+
+ See Also
+ --------
+ get_include : Returns the directory containing NumPy C
+ header files.
+
+ Notes
+ -----
+ 1. Classes specifying the information to be printed are defined
+ in the `numpy.distutils.system_info` module.
+
+ Information may include:
+
+ * ``language``: language used to write the libraries (mostly
+ C or f77)
+ * ``libraries``: names of libraries found in the system
+ * ``library_dirs``: directories containing the libraries
+ * ``include_dirs``: directories containing library header files
+ * ``src_dirs``: directories containing library source files
+ * ``define_macros``: preprocessor macros used by
+ ``distutils.setup``
+ * ``baseline``: minimum CPU features required
+ * ``found``: dispatched features supported in the system
+ * ``not found``: dispatched features that are not supported
+ in the system
+
+ 2. NumPy BLAS/LAPACK Installation Notes
+
+ Installing a numpy wheel (``pip install numpy`` or force it
+ via ``pip install numpy --only-binary :numpy: numpy``) includes
+ an OpenBLAS implementation of the BLAS and LAPACK linear algebra
+ APIs. In this case, ``library_dirs`` reports the original build
+ time configuration as compiled with gcc/gfortran; at run time
+ the OpenBLAS library is in
+ ``site-packages/numpy.libs/`` (linux), or
+ ``site-packages/numpy/.dylibs/`` (macOS), or
+ ``site-packages/numpy/.libs/`` (windows).
+
+ Installing numpy from source
+ (``pip install numpy --no-binary numpy``) searches for BLAS and
+ LAPACK dynamic link libraries at build time as influenced by
+ environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and
+ NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER;
+ or the optional file ``~/.numpy-site.cfg``.
+ NumPy remembers those locations and expects to load the same
+ libraries at run-time.
+ In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS
+ library) is in the default build-time search order after
+ 'openblas'.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.show_config()
+ blas_opt_info:
+ language = c
+ define_macros = [('HAVE_CBLAS', None)]
+ libraries = ['openblas', 'openblas']
+ library_dirs = ['/usr/local/lib']
+ """
+ from numpy.core._multiarray_umath import (
+ __cpu_features__, __cpu_baseline__, __cpu_dispatch__
+ )
+ for name,info_dict in globals().items():
+ if name[0] == "_" or type(info_dict) is not type({}): continue
+ print(name + ":")
+ if not info_dict:
+ print(" NOT AVAILABLE")
+ for k,v in info_dict.items():
+ v = str(v)
+ if k == "sources" and len(v) > 200:
+ v = v[:60] + " ...\n... " + v[-60:]
+ print(" %s = %s" % (k,v))
+
+ features_found, features_not_found = [], []
+ for feature in __cpu_dispatch__:
+ if __cpu_features__[feature]:
+ features_found.append(feature)
+ else:
+ features_not_found.append(feature)
+
+ print("Supported SIMD extensions in this NumPy install:")
+ print(" baseline = %s" % (','.join(__cpu_baseline__)))
+ print(" found = %s" % (','.join(features_found)))
+ print(" not found = %s" % (','.join(features_not_found)))
+
+ '''))
+
+ return target
+
+def msvc_version(compiler):
+ """Return version major and minor of compiler instance if it is
+ MSVC, raise an exception otherwise."""
+ if not compiler.compiler_type == "msvc":
+ raise ValueError("Compiler instance is not msvc (%s)"\
+ % compiler.compiler_type)
+ return compiler._MSVCCompiler__version
+
+def get_build_architecture():
+ # Importing distutils.msvccompiler triggers a warning on non-Windows
+ # systems, so delay the import to here.
+ from distutils.msvccompiler import get_build_architecture
+ return get_build_architecture()
+
+
+_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'}
+
+
+def sanitize_cxx_flags(cxxflags):
+ '''
+ Some flags are valid for C but not C++. Prune them.
+ '''
+ return [flag for flag in cxxflags if flag not in _cxx_ignore_flags]
+
+
+def exec_mod_from_location(modname, modfile):
+ '''
+ Use importlib machinery to import a module `modname` from the file
+ `modfile`. Depending on the `spec.loader`, the module may not be
+ registered in sys.modules.
+ '''
+ spec = importlib.util.spec_from_file_location(modname, modfile)
+ foo = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(foo)
+ return foo
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/msvc9compiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/msvc9compiler.py
new file mode 100644
index 00000000..68239495
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/msvc9compiler.py
@@ -0,0 +1,63 @@
+import os
+from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler
+
+from .system_info import platform_bits
+
+
+def _merge(old, new):
+ """Concatenate two environment paths avoiding repeats.
+
+ Here `old` is the environment string before the base class initialize
+ function is called and `new` is the string after the call. The new string
+ will be a fixed string if it is not obtained from the current environment,
+ or the same as the old string if obtained from the same environment. The aim
+ here is not to append the new string if it is already contained in the old
+ string so as to limit the growth of the environment string.
+
+ Parameters
+ ----------
+ old : string
+ Previous environment string.
+ new : string
+ New environment string.
+
+ Returns
+ -------
+ ret : string
+ Updated environment string.
+
+ """
+ if not old:
+ return new
+ if new in old:
+ return old
+
+ # Neither new nor old is empty. Give old priority.
+ return ';'.join([old, new])
+
+
+class MSVCCompiler(_MSVCCompiler):
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ _MSVCCompiler.__init__(self, verbose, dry_run, force)
+
+ def initialize(self, plat_name=None):
+ # The 'lib' and 'include' variables may be overwritten
+ # by MSVCCompiler.initialize, so save them for later merge.
+ environ_lib = os.getenv('lib')
+ environ_include = os.getenv('include')
+ _MSVCCompiler.initialize(self, plat_name)
+
+ # Merge current and previous values of 'lib' and 'include'
+ os.environ['lib'] = _merge(environ_lib, os.environ['lib'])
+ os.environ['include'] = _merge(environ_include, os.environ['include'])
+
+ # msvc9 building for 32 bits requires SSE2 to work around a
+ # compiler bug.
+ if platform_bits == 32:
+ self.compile_options += ['/arch:SSE2']
+ self.compile_options_debug += ['/arch:SSE2']
+
+ def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
+ ld_args.append('/MANIFEST')
+ _MSVCCompiler.manifest_setup_ldargs(self, output_filename,
+ build_temp, ld_args)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/msvccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/msvccompiler.py
new file mode 100644
index 00000000..2b93221b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/msvccompiler.py
@@ -0,0 +1,76 @@
+import os
+from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler
+
+from .system_info import platform_bits
+
+
+def _merge(old, new):
+ """Concatenate two environment paths avoiding repeats.
+
+ Here `old` is the environment string before the base class initialize
+ function is called and `new` is the string after the call. The new string
+ will be a fixed string if it is not obtained from the current environment,
+ or the same as the old string if obtained from the same environment. The aim
+ here is not to append the new string if it is already contained in the old
+ string so as to limit the growth of the environment string.
+
+ Parameters
+ ----------
+ old : string
+ Previous environment string.
+ new : string
+ New environment string.
+
+ Returns
+ -------
+ ret : string
+ Updated environment string.
+
+ """
+ if new in old:
+ return old
+ if not old:
+ return new
+
+ # Neither new nor old is empty. Give old priority.
+ return ';'.join([old, new])
+
+
+class MSVCCompiler(_MSVCCompiler):
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ _MSVCCompiler.__init__(self, verbose, dry_run, force)
+
+ def initialize(self):
+ # The 'lib' and 'include' variables may be overwritten
+ # by MSVCCompiler.initialize, so save them for later merge.
+ environ_lib = os.getenv('lib', '')
+ environ_include = os.getenv('include', '')
+ _MSVCCompiler.initialize(self)
+
+ # Merge current and previous values of 'lib' and 'include'
+ os.environ['lib'] = _merge(environ_lib, os.environ['lib'])
+ os.environ['include'] = _merge(environ_include, os.environ['include'])
+
+ # msvc9 building for 32 bits requires SSE2 to work around a
+ # compiler bug.
+ if platform_bits == 32:
+ self.compile_options += ['/arch:SSE2']
+ self.compile_options_debug += ['/arch:SSE2']
+
+
+def lib_opts_if_msvc(build_cmd):
+ """ Add flags if we are using MSVC compiler
+
+ We can't see `build_cmd` in our scope, because we have not initialized
+ the distutils build command, so use this deferred calculation to run
+ when we are building the library.
+ """
+ if build_cmd.compiler.compiler_type != 'msvc':
+ return []
+ # Explicitly disable whole-program optimization.
+ flags = ['/GL-']
+ # Disable voltbl section for vc142 to allow link using mingw-w64; see:
+ # https://github.com/matthew-brett/dll_investigation/issues/1#issuecomment-1100468171
+ if build_cmd.compiler_opt.cc_test_flags(['-d2VolatileMetadata-']):
+ flags.append('-d2VolatileMetadata-')
+ return flags
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/npy_pkg_config.py b/venv/lib/python3.9/site-packages/numpy/distutils/npy_pkg_config.py
new file mode 100644
index 00000000..f6e3ad39
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/npy_pkg_config.py
@@ -0,0 +1,437 @@
+import sys
+import re
+import os
+
+from configparser import RawConfigParser
+
+__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
+ 'read_config', 'parse_flags']
+
+_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}')
+
+class FormatError(OSError):
+ """
+ Exception thrown when there is a problem parsing a configuration file.
+
+ """
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+class PkgNotFound(OSError):
+ """Exception raised when a package can not be located."""
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+def parse_flags(line):
+ """
+ Parse a line from a config file containing compile flags.
+
+ Parameters
+ ----------
+ line : str
+ A single line containing one or more compile flags.
+
+ Returns
+ -------
+ d : dict
+ Dictionary of parsed flags, split into relevant categories.
+ These categories are the keys of `d`:
+
+ * 'include_dirs'
+ * 'library_dirs'
+ * 'libraries'
+ * 'macros'
+ * 'ignored'
+
+ """
+ d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
+ 'macros': [], 'ignored': []}
+
+ flags = (' ' + line).split(' -')
+ for flag in flags:
+ flag = '-' + flag
+ if len(flag) > 0:
+ if flag.startswith('-I'):
+ d['include_dirs'].append(flag[2:].strip())
+ elif flag.startswith('-L'):
+ d['library_dirs'].append(flag[2:].strip())
+ elif flag.startswith('-l'):
+ d['libraries'].append(flag[2:].strip())
+ elif flag.startswith('-D'):
+ d['macros'].append(flag[2:].strip())
+ else:
+ d['ignored'].append(flag)
+
+ return d
+
+def _escape_backslash(val):
+ return val.replace('\\', '\\\\')
+
+class LibraryInfo:
+ """
+ Object containing build information about a library.
+
+ Parameters
+ ----------
+ name : str
+ The library name.
+ description : str
+ Description of the library.
+ version : str
+ Version string.
+ sections : dict
+ The sections of the configuration file for the library. The keys are
+ the section headers, the values the text under each header.
+ vars : class instance
+ A `VariableSet` instance, which contains ``(name, value)`` pairs for
+ variables defined in the configuration file for the library.
+ requires : sequence, optional
+ The required libraries for the library to be installed.
+
+ Notes
+ -----
+ All input parameters (except "sections" which is a method) are available as
+ attributes of the same name.
+
+ """
+ def __init__(self, name, description, version, sections, vars, requires=None):
+ self.name = name
+ self.description = description
+ if requires:
+ self.requires = requires
+ else:
+ self.requires = []
+ self.version = version
+ self._sections = sections
+ self.vars = vars
+
+ def sections(self):
+ """
+ Return the section headers of the config file.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ keys : list of str
+ The list of section headers.
+
+ """
+ return list(self._sections.keys())
+
+ def cflags(self, section="default"):
+ val = self.vars.interpolate(self._sections[section]['cflags'])
+ return _escape_backslash(val)
+
+ def libs(self, section="default"):
+ val = self.vars.interpolate(self._sections[section]['libs'])
+ return _escape_backslash(val)
+
+ def __str__(self):
+ m = ['Name: %s' % self.name, 'Description: %s' % self.description]
+ if self.requires:
+ m.append('Requires:')
+ else:
+ m.append('Requires: %s' % ",".join(self.requires))
+ m.append('Version: %s' % self.version)
+
+ return "\n".join(m)
+
+class VariableSet:
+ """
+ Container object for the variables defined in a config file.
+
+ `VariableSet` can be used as a plain dictionary, with the variable names
+ as keys.
+
+ Parameters
+ ----------
+ d : dict
+ Dict of items in the "variables" section of the configuration file.
+
+ """
+ def __init__(self, d):
+ self._raw_data = dict([(k, v) for k, v in d.items()])
+
+ self._re = {}
+ self._re_sub = {}
+
+ self._init_parse()
+
+ def _init_parse(self):
+ for k, v in self._raw_data.items():
+ self._init_parse_var(k, v)
+
+ def _init_parse_var(self, name, value):
+ self._re[name] = re.compile(r'\$\{%s\}' % name)
+ self._re_sub[name] = value
+
+ def interpolate(self, value):
+ # Brute force: we keep interpolating until there is no '${var}' anymore
+ # or until interpolated string is equal to input string
+ def _interpolate(value):
+ for k in self._re.keys():
+ value = self._re[k].sub(self._re_sub[k], value)
+ return value
+ while _VAR.search(value):
+ nvalue = _interpolate(value)
+ if nvalue == value:
+ break
+ value = nvalue
+
+ return value
+
+ def variables(self):
+ """
+ Return the list of variable names.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ names : list of str
+ The names of all variables in the `VariableSet` instance.
+
+ """
+ return list(self._raw_data.keys())
+
+ # Emulate a dict to set/get variables values
+ def __getitem__(self, name):
+ return self._raw_data[name]
+
+ def __setitem__(self, name, value):
+ self._raw_data[name] = value
+ self._init_parse_var(name, value)
+
+def parse_meta(config):
+ if not config.has_section('meta'):
+ raise FormatError("No meta section found !")
+
+ d = dict(config.items('meta'))
+
+ for k in ['name', 'description', 'version']:
+ if not k in d:
+ raise FormatError("Option %s (section [meta]) is mandatory, "
+ "but not found" % k)
+
+ if not 'requires' in d:
+ d['requires'] = []
+
+ return d
+
+def parse_variables(config):
+ if not config.has_section('variables'):
+ raise FormatError("No variables section found !")
+
+ d = {}
+
+ for name, value in config.items("variables"):
+ d[name] = value
+
+ return VariableSet(d)
+
+def parse_sections(config):
+ return meta_d, r
+
+def pkg_to_filename(pkg_name):
+ return "%s.ini" % pkg_name
+
+def parse_config(filename, dirs=None):
+ if dirs:
+ filenames = [os.path.join(d, filename) for d in dirs]
+ else:
+ filenames = [filename]
+
+ config = RawConfigParser()
+
+ n = config.read(filenames)
+ if not len(n) >= 1:
+ raise PkgNotFound("Could not find file(s) %s" % str(filenames))
+
+ # Parse meta and variables sections
+ meta = parse_meta(config)
+
+ vars = {}
+ if config.has_section('variables'):
+ for name, value in config.items("variables"):
+ vars[name] = _escape_backslash(value)
+
+ # Parse "normal" sections
+ secs = [s for s in config.sections() if not s in ['meta', 'variables']]
+ sections = {}
+
+ requires = {}
+ for s in secs:
+ d = {}
+ if config.has_option(s, "requires"):
+ requires[s] = config.get(s, 'requires')
+
+ for name, value in config.items(s):
+ d[name] = value
+ sections[s] = d
+
+ return meta, vars, sections, requires
+
+def _read_config_imp(filenames, dirs=None):
+ def _read_config(f):
+ meta, vars, sections, reqs = parse_config(f, dirs)
+ # recursively add sections and variables of required libraries
+ for rname, rvalue in reqs.items():
+ nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue))
+
+ # Update var dict for variables not in 'top' config file
+ for k, v in nvars.items():
+ if not k in vars:
+ vars[k] = v
+
+ # Update sec dict
+ for oname, ovalue in nsections[rname].items():
+ if ovalue:
+ sections[rname][oname] += ' %s' % ovalue
+
+ return meta, vars, sections, reqs
+
+ meta, vars, sections, reqs = _read_config(filenames)
+
+ # FIXME: document this. If pkgname is defined in the variables section, and
+ # there is no pkgdir variable defined, pkgdir is automatically defined to
+ # the path of pkgname. This requires the package to be imported to work
+ if not 'pkgdir' in vars and "pkgname" in vars:
+ pkgname = vars["pkgname"]
+ if not pkgname in sys.modules:
+ raise ValueError("You should import %s to get information on %s" %
+ (pkgname, meta["name"]))
+
+ mod = sys.modules[pkgname]
+ vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__))
+
+ return LibraryInfo(name=meta["name"], description=meta["description"],
+ version=meta["version"], sections=sections, vars=VariableSet(vars))
+
+# Trivial cache to cache LibraryInfo instances creation. To be really
+# efficient, the cache should be handled in read_config, since a same file can
+# be parsed many time outside LibraryInfo creation, but I doubt this will be a
+# problem in practice
+_CACHE = {}
+def read_config(pkgname, dirs=None):
+ """
+ Return library info for a package from its configuration file.
+
+ Parameters
+ ----------
+ pkgname : str
+ Name of the package (should match the name of the .ini file, without
+ the extension, e.g. foo for the file foo.ini).
+ dirs : sequence, optional
+ If given, should be a sequence of directories - usually including
+ the NumPy base directory - where to look for npy-pkg-config files.
+
+ Returns
+ -------
+ pkginfo : class instance
+ The `LibraryInfo` instance containing the build information.
+
+ Raises
+ ------
+ PkgNotFound
+ If the package is not found.
+
+ See Also
+ --------
+ misc_util.get_info, misc_util.get_pkg_info
+
+ Examples
+ --------
+ >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath')
+ >>> type(npymath_info)
+ <class 'numpy.distutils.npy_pkg_config.LibraryInfo'>
+ >>> print(npymath_info)
+ Name: npymath
+ Description: Portable, core math library implementing C99 standard
+ Requires:
+ Version: 0.1 #random
+
+ """
+ try:
+ return _CACHE[pkgname]
+ except KeyError:
+ v = _read_config_imp(pkg_to_filename(pkgname), dirs)
+ _CACHE[pkgname] = v
+ return v
+
+# TODO:
+# - implements version comparison (modversion + atleast)
+
+# pkg-config simple emulator - useful for debugging, and maybe later to query
+# the system
+if __name__ == '__main__':
+ from optparse import OptionParser
+ import glob
+
+ parser = OptionParser()
+ parser.add_option("--cflags", dest="cflags", action="store_true",
+ help="output all preprocessor and compiler flags")
+ parser.add_option("--libs", dest="libs", action="store_true",
+ help="output all linker flags")
+ parser.add_option("--use-section", dest="section",
+ help="use this section instead of default for options")
+ parser.add_option("--version", dest="version", action="store_true",
+ help="output version")
+ parser.add_option("--atleast-version", dest="min_version",
+ help="Minimal version")
+ parser.add_option("--list-all", dest="list_all", action="store_true",
+ help="Minimal version")
+ parser.add_option("--define-variable", dest="define_variable",
+ help="Replace variable with the given value")
+
+ (options, args) = parser.parse_args(sys.argv)
+
+ if len(args) < 2:
+ raise ValueError("Expect package name on the command line:")
+
+ if options.list_all:
+ files = glob.glob("*.ini")
+ for f in files:
+ info = read_config(f)
+ print("%s\t%s - %s" % (info.name, info.name, info.description))
+
+ pkg_name = args[1]
+ d = os.environ.get('NPY_PKG_CONFIG_PATH')
+ if d:
+ info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
+ else:
+ info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.'])
+
+ if options.section:
+ section = options.section
+ else:
+ section = "default"
+
+ if options.define_variable:
+ m = re.search(r'([\S]+)=([\S]+)', options.define_variable)
+ if not m:
+ raise ValueError("--define-variable option should be of "
+ "the form --define-variable=foo=bar")
+ else:
+ name = m.group(1)
+ value = m.group(2)
+ info.vars[name] = value
+
+ if options.cflags:
+ print(info.cflags(section))
+ if options.libs:
+ print(info.libs(section))
+ if options.version:
+ print(info.version)
+ if options.min_version:
+ print(info.version >= options.min_version)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/numpy_distribution.py b/venv/lib/python3.9/site-packages/numpy/distutils/numpy_distribution.py
new file mode 100644
index 00000000..ea818265
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/numpy_distribution.py
@@ -0,0 +1,17 @@
+# XXX: Handle setuptools ?
+from distutils.core import Distribution
+
+# This class is used because we add new files (sconscripts, and so on) with the
+# scons command
+class NumpyDistribution(Distribution):
+ def __init__(self, attrs = None):
+ # A list of (sconscripts, pre_hook, post_hook, src, parent_names)
+ self.scons_data = []
+ # A list of installable libraries
+ self.installed_libraries = []
+ # A dict of pkg_config files to generate/install
+ self.installed_pkg_config = {}
+ Distribution.__init__(self, attrs)
+
+ def has_scons_scripts(self):
+ return bool(self.scons_data)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/pathccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/pathccompiler.py
new file mode 100644
index 00000000..48051810
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/pathccompiler.py
@@ -0,0 +1,21 @@
+from distutils.unixccompiler import UnixCCompiler
+
+class PathScaleCCompiler(UnixCCompiler):
+
+ """
+ PathScale compiler compatible with an gcc built Python.
+ """
+
+ compiler_type = 'pathcc'
+ cc_exe = 'pathcc'
+ cxx_exe = 'pathCC'
+
+ def __init__ (self, verbose=0, dry_run=0, force=0):
+ UnixCCompiler.__init__ (self, verbose, dry_run, force)
+ cc_compiler = self.cc_exe
+ cxx_compiler = self.cxx_exe
+ self.set_executables(compiler=cc_compiler,
+ compiler_so=cc_compiler,
+ compiler_cxx=cxx_compiler,
+ linker_exe=cc_compiler,
+ linker_so=cc_compiler + ' -shared')
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/setup.py b/venv/lib/python3.9/site-packages/numpy/distutils/setup.py
new file mode 100644
index 00000000..522756fc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/setup.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python3
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('distutils', parent_package, top_path)
+ config.add_subpackage('command')
+ config.add_subpackage('fcompiler')
+ config.add_subpackage('tests')
+ config.add_data_files('site.cfg')
+ config.add_data_files('mingw/gfortran_vs2003_hack.c')
+ config.add_data_dir('checks')
+ config.add_data_files('*.pyi')
+ config.make_config_py()
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/system_info.py b/venv/lib/python3.9/site-packages/numpy/distutils/system_info.py
new file mode 100644
index 00000000..d5a1687d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/system_info.py
@@ -0,0 +1,3172 @@
+#!/usr/bin/env python3
+"""
+This file defines a set of system_info classes for getting
+information about various resources (libraries, library directories,
+include directories, etc.) in the system. Usage:
+ info_dict = get_info(<name>)
+ where <name> is a string 'atlas','x11','fftw','lapack','blas',
+ 'lapack_src', 'blas_src', etc. For a complete list of allowed names,
+ see the definition of get_info() function below.
+
+ Returned info_dict is a dictionary which is compatible with
+ distutils.setup keyword arguments. If info_dict == {}, then the
+ asked resource is not available (system_info could not find it).
+
+ Several *_info classes specify an environment variable to specify
+ the locations of software. When setting the corresponding environment
+ variable to 'None' then the software will be ignored, even when it
+ is available in system.
+
+Global parameters:
+ system_info.search_static_first - search static libraries (.a)
+ in precedence to shared ones (.so, .sl) if enabled.
+ system_info.verbosity - output the results to stdout if enabled.
+
+The file 'site.cfg' is looked for in
+
+1) Directory of main setup.py file being run.
+2) Home directory of user running the setup.py file as ~/.numpy-site.cfg
+3) System wide directory (location of this file...)
+
+The first one found is used to get system configuration options The
+format is that used by ConfigParser (i.e., Windows .INI style). The
+section ALL is not intended for general use.
+
+Appropriate defaults are used if nothing is specified.
+
+The order of finding the locations of resources is the following:
+ 1. environment variable
+ 2. section in site.cfg
+ 3. DEFAULT section in site.cfg
+ 4. System default search paths (see ``default_*`` variables below).
+Only the first complete match is returned.
+
+Currently, the following classes are available, along with their section names:
+
+ Numeric_info:Numeric
+ _numpy_info:Numeric
+ _pkg_config_info:None
+ accelerate_info:accelerate
+ agg2_info:agg2
+ amd_info:amd
+ atlas_3_10_blas_info:atlas
+ atlas_3_10_blas_threads_info:atlas
+ atlas_3_10_info:atlas
+ atlas_3_10_threads_info:atlas
+ atlas_blas_info:atlas
+ atlas_blas_threads_info:atlas
+ atlas_info:atlas
+ atlas_threads_info:atlas
+ blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix)
+ blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS)
+ blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix)
+ blas_info:blas
+ blas_mkl_info:mkl
+ blas_opt_info:ALL # usage recommended
+ blas_src_info:blas_src
+ blis_info:blis
+ boost_python_info:boost_python
+ dfftw_info:fftw
+ dfftw_threads_info:fftw
+ djbfft_info:djbfft
+ f2py_info:ALL
+ fft_opt_info:ALL
+ fftw2_info:fftw
+ fftw3_info:fftw3
+ fftw_info:fftw
+ fftw_threads_info:fftw
+ flame_info:flame
+ freetype2_info:freetype2
+ gdk_2_info:gdk_2
+ gdk_info:gdk
+ gdk_pixbuf_2_info:gdk_pixbuf_2
+ gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2
+ gdk_x11_2_info:gdk_x11_2
+ gtkp_2_info:gtkp_2
+ gtkp_x11_2_info:gtkp_x11_2
+ lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix)
+ lapack_atlas_3_10_info:atlas
+ lapack_atlas_3_10_threads_info:atlas
+ lapack_atlas_info:atlas
+ lapack_atlas_threads_info:atlas
+ lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK)
+ lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix)
+ lapack_info:lapack
+ lapack_mkl_info:mkl
+ lapack_opt_info:ALL # usage recommended
+ lapack_src_info:lapack_src
+ mkl_info:mkl
+ numarray_info:numarray
+ numerix_info:numerix
+ numpy_info:numpy
+ openblas64__info:openblas64_
+ openblas64__lapack_info:openblas64_
+ openblas_clapack_info:openblas
+ openblas_ilp64_info:openblas_ilp64
+ openblas_ilp64_lapack_info:openblas_ilp64
+ openblas_info:openblas
+ openblas_lapack_info:openblas
+ sfftw_info:fftw
+ sfftw_threads_info:fftw
+ system_info:ALL
+ umfpack_info:umfpack
+ wx_info:wx
+ x11_info:x11
+ xft_info:xft
+
+Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER
+and NPY_LAPACK_ORDER environment variables to determine the order in which
+specific BLAS and LAPACK libraries are searched for.
+
+This search (or autodetection) can be bypassed by defining the environment
+variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the
+exact linker flags to use (language will be set to F77). Building against
+Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK
+implementations at runtime. If using this to build NumPy itself, it is
+recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a
+CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized
+otherwise).
+
+Example:
+----------
+[DEFAULT]
+# default section
+library_dirs = /usr/lib:/usr/local/lib:/opt/lib
+include_dirs = /usr/include:/usr/local/include:/opt/include
+src_dirs = /usr/local/src:/opt/src
+# search static libraries (.a) in preference to shared ones (.so)
+search_static_first = 0
+
+[fftw]
+libraries = rfftw, fftw
+
+[atlas]
+library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas
+# for overriding the names of the atlas libraries
+libraries = lapack, f77blas, cblas, atlas
+
+[x11]
+library_dirs = /usr/X11R6/lib
+include_dirs = /usr/X11R6/include
+----------
+
+Note that the ``libraries`` key is the default setting for libraries.
+
+Authors:
+ Pearu Peterson <pearu@cens.ioc.ee>, February 2002
+ David M. Cooke <cookedm@physics.mcmaster.ca>, April 2002
+
+Copyright 2002 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@cens.ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy (BSD style) license. See LICENSE.txt that came with
+this distribution for specifics.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+
+"""
+import sys
+import os
+import re
+import copy
+import warnings
+import subprocess
+import textwrap
+
+from glob import glob
+from functools import reduce
+from configparser import NoOptionError
+from configparser import RawConfigParser as ConfigParser
+# It seems that some people are importing ConfigParser from here so is
+# good to keep its class name. Use of RawConfigParser is needed in
+# order to be able to load path names with percent in them, like
+# `feature%2Fcool` which is common on git flow branch names.
+
+from distutils.errors import DistutilsError
+from distutils.dist import Distribution
+import sysconfig
+from numpy.distutils import log
+from distutils.util import get_platform
+
+from numpy.distutils.exec_command import (
+ find_executable, filepath_from_subprocess_output,
+ )
+from numpy.distutils.misc_util import (is_sequence, is_string,
+ get_shared_lib_extension)
+from numpy.distutils.command.config import config as cmd_config
+from numpy.distutils import customized_ccompiler as _customized_ccompiler
+from numpy.distutils import _shell_utils
+import distutils.ccompiler
+import tempfile
+import shutil
+
+__all__ = ['system_info']
+
+# Determine number of bits
+import platform
+_bits = {'32bit': 32, '64bit': 64}
+platform_bits = _bits[platform.architecture()[0]]
+
+
+global_compiler = None
+
+def customized_ccompiler():
+ global global_compiler
+ if not global_compiler:
+ global_compiler = _customized_ccompiler()
+ return global_compiler
+
+
+def _c_string_literal(s):
+ """
+ Convert a python string into a literal suitable for inclusion into C code
+ """
+ # only these three characters are forbidden in C strings
+ s = s.replace('\\', r'\\')
+ s = s.replace('"', r'\"')
+ s = s.replace('\n', r'\n')
+ return '"{}"'.format(s)
+
+
+def libpaths(paths, bits):
+ """Return a list of library paths valid on 32 or 64 bit systems.
+
+ Inputs:
+ paths : sequence
+ A sequence of strings (typically paths)
+ bits : int
+ An integer, the only valid values are 32 or 64. A ValueError exception
+ is raised otherwise.
+
+ Examples:
+
+ Consider a list of directories
+ >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']
+
+ For a 32-bit platform, this is already valid:
+ >>> np.distutils.system_info.libpaths(paths,32)
+ ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']
+
+ On 64 bits, we prepend the '64' postfix
+ >>> np.distutils.system_info.libpaths(paths,64)
+ ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',
+ '/usr/lib64', '/usr/lib']
+ """
+ if bits not in (32, 64):
+ raise ValueError("Invalid bit size in libpaths: 32 or 64 only")
+
+ # Handle 32bit case
+ if bits == 32:
+ return paths
+
+ # Handle 64bit case
+ out = []
+ for p in paths:
+ out.extend([p + '64', p])
+
+ return out
+
+
+if sys.platform == 'win32':
+ default_lib_dirs = ['C:\\',
+ os.path.join(sysconfig.get_config_var('exec_prefix'),
+ 'libs')]
+ default_runtime_dirs = []
+ default_include_dirs = []
+ default_src_dirs = ['.']
+ default_x11_lib_dirs = []
+ default_x11_include_dirs = []
+ _include_dirs = [
+ 'include',
+ 'include/suitesparse',
+ ]
+ _lib_dirs = [
+ 'lib',
+ ]
+
+ _include_dirs = [d.replace('/', os.sep) for d in _include_dirs]
+ _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs]
+ def add_system_root(library_root):
+ """Add a package manager root to the include directories"""
+ global default_lib_dirs
+ global default_include_dirs
+
+ library_root = os.path.normpath(library_root)
+
+ default_lib_dirs.extend(
+ os.path.join(library_root, d) for d in _lib_dirs)
+ default_include_dirs.extend(
+ os.path.join(library_root, d) for d in _include_dirs)
+
+ # VCpkg is the de-facto package manager on windows for C/C++
+ # libraries. If it is on the PATH, then we append its paths here.
+ vcpkg = shutil.which('vcpkg')
+ if vcpkg:
+ vcpkg_dir = os.path.dirname(vcpkg)
+ if platform.architecture()[0] == '32bit':
+ specifier = 'x86'
+ else:
+ specifier = 'x64'
+
+ vcpkg_installed = os.path.join(vcpkg_dir, 'installed')
+ for vcpkg_root in [
+ os.path.join(vcpkg_installed, specifier + '-windows'),
+ os.path.join(vcpkg_installed, specifier + '-windows-static'),
+ ]:
+ add_system_root(vcpkg_root)
+
+ # Conda is another popular package manager that provides libraries
+ conda = shutil.which('conda')
+ if conda:
+ conda_dir = os.path.dirname(conda)
+ add_system_root(os.path.join(conda_dir, '..', 'Library'))
+ add_system_root(os.path.join(conda_dir, 'Library'))
+
+else:
+ default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',
+ '/opt/local/lib', '/sw/lib'], platform_bits)
+ default_runtime_dirs = []
+ default_include_dirs = ['/usr/local/include',
+ '/opt/include',
+ # path of umfpack under macports
+ '/opt/local/include/ufsparse',
+ '/opt/local/include', '/sw/include',
+ '/usr/include/suitesparse']
+ default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src']
+
+ default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib',
+ '/usr/lib'], platform_bits)
+ default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include']
+
+ if os.path.exists('/usr/lib/X11'):
+ globbed_x11_dir = glob('/usr/lib/*/libX11.so')
+ if globbed_x11_dir:
+ x11_so_dir = os.path.split(globbed_x11_dir[0])[0]
+ default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11'])
+ default_x11_include_dirs.extend(['/usr/lib/X11/include',
+ '/usr/include/X11'])
+
+ with open(os.devnull, 'w') as tmp:
+ try:
+ p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE,
+ stderr=tmp)
+ except (OSError, DistutilsError):
+ # OSError if gcc is not installed, or SandboxViolation (DistutilsError
+ # subclass) if an old setuptools bug is triggered (see gh-3160).
+ pass
+ else:
+ triplet = str(p.communicate()[0].decode().strip())
+ if p.returncode == 0:
+ # gcc supports the "-print-multiarch" option
+ default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)]
+ default_lib_dirs += [os.path.join("/usr/lib/", triplet)]
+
+
+if os.path.join(sys.prefix, 'lib') not in default_lib_dirs:
+ default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))
+ default_include_dirs.append(os.path.join(sys.prefix, 'include'))
+ default_src_dirs.append(os.path.join(sys.prefix, 'src'))
+
+default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)]
+default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)]
+default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)]
+default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)]
+
+so_ext = get_shared_lib_extension()
+
+
+def get_standard_file(fname):
+ """Returns a list of files named 'fname' from
+ 1) System-wide directory (directory-location of this module)
+ 2) Users HOME directory (os.environ['HOME'])
+ 3) Local directory
+ """
+ # System-wide file
+ filenames = []
+ try:
+ f = __file__
+ except NameError:
+ f = sys.argv[0]
+ sysfile = os.path.join(os.path.split(os.path.abspath(f))[0],
+ fname)
+ if os.path.isfile(sysfile):
+ filenames.append(sysfile)
+
+ # Home directory
+ # And look for the user config file
+ try:
+ f = os.path.expanduser('~')
+ except KeyError:
+ pass
+ else:
+ user_file = os.path.join(f, fname)
+ if os.path.isfile(user_file):
+ filenames.append(user_file)
+
+ # Local file
+ if os.path.isfile(fname):
+ filenames.append(os.path.abspath(fname))
+
+ return filenames
+
+
+def _parse_env_order(base_order, env):
+ """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order`
+
+ This method will sequence the environment variable and check for their
+ individual elements in `base_order`.
+
+ The items in the environment variable may be negated via '^item' or '!itema,itemb'.
+ It must start with ^/! to negate all options.
+
+ Raises
+ ------
+ ValueError: for mixed negated and non-negated orders or multiple negated orders
+
+ Parameters
+ ----------
+ base_order : list of str
+ the base list of orders
+ env : str
+ the environment variable to be parsed, if none is found, `base_order` is returned
+
+ Returns
+ -------
+ allow_order : list of str
+ allowed orders in lower-case
+ unknown_order : list of str
+ for values not overlapping with `base_order`
+ """
+ order_str = os.environ.get(env, None)
+
+ # ensure all base-orders are lower-case (for easier comparison)
+ base_order = [order.lower() for order in base_order]
+ if order_str is None:
+ return base_order, []
+
+ neg = order_str.startswith('^') or order_str.startswith('!')
+ # Check format
+ order_str_l = list(order_str)
+ sum_neg = order_str_l.count('^') + order_str_l.count('!')
+ if neg:
+ if sum_neg > 1:
+ raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}")
+ # remove prefix
+ order_str = order_str[1:]
+ elif sum_neg > 0:
+ raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}")
+
+ # Split and lower case
+ orders = order_str.lower().split(',')
+
+ # to inform callee about non-overlapping elements
+ unknown_order = []
+
+ # if negated, we have to remove from the order
+ if neg:
+ allow_order = base_order.copy()
+
+ for order in orders:
+ if not order:
+ continue
+
+ if order not in base_order:
+ unknown_order.append(order)
+ continue
+
+ if order in allow_order:
+ allow_order.remove(order)
+
+ else:
+ allow_order = []
+
+ for order in orders:
+ if not order:
+ continue
+
+ if order not in base_order:
+ unknown_order.append(order)
+ continue
+
+ if order not in allow_order:
+ allow_order.append(order)
+
+ return allow_order, unknown_order
+
+
+def get_info(name, notfound_action=0):
+ """
+ notfound_action:
+ 0 - do nothing
+ 1 - display warning message
+ 2 - raise error
+ """
+ cl = {'armpl': armpl_info,
+ 'blas_armpl': blas_armpl_info,
+ 'lapack_armpl': lapack_armpl_info,
+ 'fftw3_armpl': fftw3_armpl_info,
+ 'atlas': atlas_info, # use lapack_opt or blas_opt instead
+ 'atlas_threads': atlas_threads_info, # ditto
+ 'atlas_blas': atlas_blas_info,
+ 'atlas_blas_threads': atlas_blas_threads_info,
+ 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead
+ 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto
+ 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead
+ 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto
+ 'atlas_3_10_blas': atlas_3_10_blas_info,
+ 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,
+ 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead
+ 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
+ 'flame': flame_info, # use lapack_opt instead
+ 'mkl': mkl_info,
+ # openblas which may or may not have embedded lapack
+ 'openblas': openblas_info, # use blas_opt instead
+ # openblas with embedded lapack
+ 'openblas_lapack': openblas_lapack_info, # use blas_opt instead
+ 'openblas_clapack': openblas_clapack_info, # use blas_opt instead
+ 'blis': blis_info, # use blas_opt instead
+ 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
+ 'blas_mkl': blas_mkl_info, # use blas_opt instead
+ 'accelerate': accelerate_info, # use blas_opt instead
+ 'openblas64_': openblas64__info,
+ 'openblas64__lapack': openblas64__lapack_info,
+ 'openblas_ilp64': openblas_ilp64_info,
+ 'openblas_ilp64_lapack': openblas_ilp64_lapack_info,
+ 'x11': x11_info,
+ 'fft_opt': fft_opt_info,
+ 'fftw': fftw_info,
+ 'fftw2': fftw2_info,
+ 'fftw3': fftw3_info,
+ 'dfftw': dfftw_info,
+ 'sfftw': sfftw_info,
+ 'fftw_threads': fftw_threads_info,
+ 'dfftw_threads': dfftw_threads_info,
+ 'sfftw_threads': sfftw_threads_info,
+ 'djbfft': djbfft_info,
+ 'blas': blas_info, # use blas_opt instead
+ 'lapack': lapack_info, # use lapack_opt instead
+ 'lapack_src': lapack_src_info,
+ 'blas_src': blas_src_info,
+ 'numpy': numpy_info,
+ 'f2py': f2py_info,
+ 'Numeric': Numeric_info,
+ 'numeric': Numeric_info,
+ 'numarray': numarray_info,
+ 'numerix': numerix_info,
+ 'lapack_opt': lapack_opt_info,
+ 'lapack_ilp64_opt': lapack_ilp64_opt_info,
+ 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info,
+ 'lapack64__opt': lapack64__opt_info,
+ 'blas_opt': blas_opt_info,
+ 'blas_ilp64_opt': blas_ilp64_opt_info,
+ 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info,
+ 'blas64__opt': blas64__opt_info,
+ 'boost_python': boost_python_info,
+ 'agg2': agg2_info,
+ 'wx': wx_info,
+ 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,
+ 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,
+ 'gdk_pixbuf_2': gdk_pixbuf_2_info,
+ 'gdk-pixbuf-2.0': gdk_pixbuf_2_info,
+ 'gdk': gdk_info,
+ 'gdk_2': gdk_2_info,
+ 'gdk-2.0': gdk_2_info,
+ 'gdk_x11_2': gdk_x11_2_info,
+ 'gdk-x11-2.0': gdk_x11_2_info,
+ 'gtkp_x11_2': gtkp_x11_2_info,
+ 'gtk+-x11-2.0': gtkp_x11_2_info,
+ 'gtkp_2': gtkp_2_info,
+ 'gtk+-2.0': gtkp_2_info,
+ 'xft': xft_info,
+ 'freetype2': freetype2_info,
+ 'umfpack': umfpack_info,
+ 'amd': amd_info,
+ }.get(name.lower(), system_info)
+ return cl().get_info(notfound_action)
+
+
+class NotFoundError(DistutilsError):
+ """Some third-party program or library is not found."""
+
+
+class AliasedOptionError(DistutilsError):
+ """
+ Aliases entries in config files should not be existing.
+ In section '{section}' we found multiple appearances of options {options}."""
+
+
+class AtlasNotFoundError(NotFoundError):
+ """
+ Atlas (http://github.com/math-atlas/math-atlas) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [atlas]) or by setting
+ the ATLAS environment variable."""
+
+
+class FlameNotFoundError(NotFoundError):
+ """
+ FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [flame])."""
+
+
+class LapackNotFoundError(NotFoundError):
+ """
+ Lapack (http://www.netlib.org/lapack/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [lapack]) or by setting
+ the LAPACK environment variable."""
+
+
+class LapackSrcNotFoundError(LapackNotFoundError):
+ """
+ Lapack (http://www.netlib.org/lapack/) sources not found.
+ Directories to search for the sources can be specified in the
+ numpy/distutils/site.cfg file (section [lapack_src]) or by setting
+ the LAPACK_SRC environment variable."""
+
+
+class LapackILP64NotFoundError(NotFoundError):
+ """
+ 64-bit Lapack libraries not found.
+ Known libraries in numpy/distutils/site.cfg file are:
+ openblas64_, openblas_ilp64
+ """
+
+class BlasOptNotFoundError(NotFoundError):
+ """
+ Optimized (vendor) Blas libraries are not found.
+ Falls back to netlib Blas library which has worse performance.
+ A better performance should be easily gained by switching
+ Blas library."""
+
+class BlasNotFoundError(NotFoundError):
+ """
+ Blas (http://www.netlib.org/blas/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [blas]) or by setting
+ the BLAS environment variable."""
+
+class BlasILP64NotFoundError(NotFoundError):
+ """
+ 64-bit Blas libraries not found.
+ Known libraries in numpy/distutils/site.cfg file are:
+ openblas64_, openblas_ilp64
+ """
+
+class BlasSrcNotFoundError(BlasNotFoundError):
+ """
+ Blas (http://www.netlib.org/blas/) sources not found.
+ Directories to search for the sources can be specified in the
+ numpy/distutils/site.cfg file (section [blas_src]) or by setting
+ the BLAS_SRC environment variable."""
+
+
+class FFTWNotFoundError(NotFoundError):
+ """
+ FFTW (http://www.fftw.org/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [fftw]) or by setting
+ the FFTW environment variable."""
+
+
+class DJBFFTNotFoundError(NotFoundError):
+ """
+ DJBFFT (https://cr.yp.to/djbfft.html) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [djbfft]) or by setting
+ the DJBFFT environment variable."""
+
+
+class NumericNotFoundError(NotFoundError):
+ """
+ Numeric (https://www.numpy.org/) module not found.
+ Get it from above location, install it, and retry setup.py."""
+
+
+class X11NotFoundError(NotFoundError):
+ """X11 libraries not found."""
+
+
+class UmfpackNotFoundError(NotFoundError):
+ """
+ UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/)
+ not found. Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [umfpack]) or by setting
+ the UMFPACK environment variable."""
+
+
+class system_info:
+
+ """ get_info() is the only public method. Don't use others.
+ """
+ dir_env_var = None
+ # XXX: search_static_first is disabled by default, may disappear in
+ # future unless it is proved to be useful.
+ search_static_first = 0
+ # The base-class section name is a random word "ALL" and is not really
+ # intended for general use. It cannot be None nor can it be DEFAULT as
+ # these break the ConfigParser. See gh-15338
+ section = 'ALL'
+ saved_results = {}
+
+ notfounderror = NotFoundError
+
+ def __init__(self,
+ default_lib_dirs=default_lib_dirs,
+ default_include_dirs=default_include_dirs,
+ ):
+ self.__class__.info = {}
+ self.local_prefixes = []
+ defaults = {'library_dirs': os.pathsep.join(default_lib_dirs),
+ 'include_dirs': os.pathsep.join(default_include_dirs),
+ 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs),
+ 'rpath': '',
+ 'src_dirs': os.pathsep.join(default_src_dirs),
+ 'search_static_first': str(self.search_static_first),
+ 'extra_compile_args': '', 'extra_link_args': ''}
+ self.cp = ConfigParser(defaults)
+ self.files = []
+ self.files.extend(get_standard_file('.numpy-site.cfg'))
+ self.files.extend(get_standard_file('site.cfg'))
+ self.parse_config_files()
+
+ if self.section is not None:
+ self.search_static_first = self.cp.getboolean(
+ self.section, 'search_static_first')
+ assert isinstance(self.search_static_first, int)
+
+ def parse_config_files(self):
+ self.cp.read(self.files)
+ if not self.cp.has_section(self.section):
+ if self.section is not None:
+ self.cp.add_section(self.section)
+
+ def calc_libraries_info(self):
+ libs = self.get_libraries()
+ dirs = self.get_lib_dirs()
+ # The extensions use runtime_library_dirs
+ r_dirs = self.get_runtime_lib_dirs()
+ # Intrinsic distutils use rpath, we simply append both entries
+ # as though they were one entry
+ r_dirs.extend(self.get_runtime_lib_dirs(key='rpath'))
+ info = {}
+ for lib in libs:
+ i = self.check_libs(dirs, [lib])
+ if i is not None:
+ dict_append(info, **i)
+ else:
+ log.info('Library %s was not found. Ignoring' % (lib))
+
+ if r_dirs:
+ i = self.check_libs(r_dirs, [lib])
+ if i is not None:
+ # Swap library keywords found to runtime_library_dirs
+ # the libraries are insisting on the user having defined
+ # them using the library_dirs, and not necessarily by
+ # runtime_library_dirs
+ del i['libraries']
+ i['runtime_library_dirs'] = i.pop('library_dirs')
+ dict_append(info, **i)
+ else:
+ log.info('Runtime library %s was not found. Ignoring' % (lib))
+
+ return info
+
+ def set_info(self, **info):
+ if info:
+ lib_info = self.calc_libraries_info()
+ dict_append(info, **lib_info)
+ # Update extra information
+ extra_info = self.calc_extra_info()
+ dict_append(info, **extra_info)
+ self.saved_results[self.__class__.__name__] = info
+
+ def get_option_single(self, *options):
+ """ Ensure that only one of `options` are found in the section
+
+ Parameters
+ ----------
+ *options : list of str
+ a list of options to be found in the section (``self.section``)
+
+ Returns
+ -------
+ str :
+ the option that is uniquely found in the section
+
+ Raises
+ ------
+ AliasedOptionError :
+ in case more than one of the options are found
+ """
+ found = [self.cp.has_option(self.section, opt) for opt in options]
+ if sum(found) == 1:
+ return options[found.index(True)]
+ elif sum(found) == 0:
+ # nothing is found anyways
+ return options[0]
+
+ # Else we have more than 1 key found
+ if AliasedOptionError.__doc__ is None:
+ raise AliasedOptionError()
+ raise AliasedOptionError(AliasedOptionError.__doc__.format(
+ section=self.section, options='[{}]'.format(', '.join(options))))
+
+
+ def has_info(self):
+ return self.__class__.__name__ in self.saved_results
+
+ def calc_extra_info(self):
+ """ Updates the information in the current information with
+ respect to these flags:
+ extra_compile_args
+ extra_link_args
+ """
+ info = {}
+ for key in ['extra_compile_args', 'extra_link_args']:
+ # Get values
+ opt = self.cp.get(self.section, key)
+ opt = _shell_utils.NativeParser.split(opt)
+ if opt:
+ tmp = {key: opt}
+ dict_append(info, **tmp)
+ return info
+
+ def get_info(self, notfound_action=0):
+ """ Return a dictionary with items that are compatible
+ with numpy.distutils.setup keyword arguments.
+ """
+ flag = 0
+ if not self.has_info():
+ flag = 1
+ log.info(self.__class__.__name__ + ':')
+ if hasattr(self, 'calc_info'):
+ self.calc_info()
+ if notfound_action:
+ if not self.has_info():
+ if notfound_action == 1:
+ warnings.warn(self.notfounderror.__doc__, stacklevel=2)
+ elif notfound_action == 2:
+ raise self.notfounderror(self.notfounderror.__doc__)
+ else:
+ raise ValueError(repr(notfound_action))
+
+ if not self.has_info():
+ log.info(' NOT AVAILABLE')
+ self.set_info()
+ else:
+ log.info(' FOUND:')
+
+ res = self.saved_results.get(self.__class__.__name__)
+ if log.get_threshold() <= log.INFO and flag:
+ for k, v in res.items():
+ v = str(v)
+ if k in ['sources', 'libraries'] and len(v) > 270:
+ v = v[:120] + '...\n...\n...' + v[-120:]
+ log.info(' %s = %s', k, v)
+ log.info('')
+
+ return copy.deepcopy(res)
+
+ def get_paths(self, section, key):
+ dirs = self.cp.get(section, key).split(os.pathsep)
+ env_var = self.dir_env_var
+ if env_var:
+ if is_sequence(env_var):
+ e0 = env_var[-1]
+ for e in env_var:
+ if e in os.environ:
+ e0 = e
+ break
+ if not env_var[0] == e0:
+ log.info('Setting %s=%s' % (env_var[0], e0))
+ env_var = e0
+ if env_var and env_var in os.environ:
+ d = os.environ[env_var]
+ if d == 'None':
+ log.info('Disabled %s: %s',
+ self.__class__.__name__, '(%s is None)'
+ % (env_var,))
+ return []
+ if os.path.isfile(d):
+ dirs = [os.path.dirname(d)] + dirs
+ l = getattr(self, '_lib_names', [])
+ if len(l) == 1:
+ b = os.path.basename(d)
+ b = os.path.splitext(b)[0]
+ if b[:3] == 'lib':
+ log.info('Replacing _lib_names[0]==%r with %r' \
+ % (self._lib_names[0], b[3:]))
+ self._lib_names[0] = b[3:]
+ else:
+ ds = d.split(os.pathsep)
+ ds2 = []
+ for d in ds:
+ if os.path.isdir(d):
+ ds2.append(d)
+ for dd in ['include', 'lib']:
+ d1 = os.path.join(d, dd)
+ if os.path.isdir(d1):
+ ds2.append(d1)
+ dirs = ds2 + dirs
+ default_dirs = self.cp.get(self.section, key).split(os.pathsep)
+ dirs.extend(default_dirs)
+ ret = []
+ for d in dirs:
+ if len(d) > 0 and not os.path.isdir(d):
+ warnings.warn('Specified path %s is invalid.' % d, stacklevel=2)
+ continue
+
+ if d not in ret:
+ ret.append(d)
+
+ log.debug('( %s = %s )', key, ':'.join(ret))
+ return ret
+
+ def get_lib_dirs(self, key='library_dirs'):
+ return self.get_paths(self.section, key)
+
+ def get_runtime_lib_dirs(self, key='runtime_library_dirs'):
+ path = self.get_paths(self.section, key)
+ if path == ['']:
+ path = []
+ return path
+
+ def get_include_dirs(self, key='include_dirs'):
+ return self.get_paths(self.section, key)
+
+ def get_src_dirs(self, key='src_dirs'):
+ return self.get_paths(self.section, key)
+
+ def get_libs(self, key, default):
+ try:
+ libs = self.cp.get(self.section, key)
+ except NoOptionError:
+ if not default:
+ return []
+ if is_string(default):
+ return [default]
+ return default
+ return [b for b in [a.strip() for a in libs.split(',')] if b]
+
+ def get_libraries(self, key='libraries'):
+ if hasattr(self, '_lib_names'):
+ return self.get_libs(key, default=self._lib_names)
+ else:
+ return self.get_libs(key, '')
+
+ def library_extensions(self):
+ c = customized_ccompiler()
+ static_exts = []
+ if c.compiler_type != 'msvc':
+ # MSVC doesn't understand binutils
+ static_exts.append('.a')
+ if sys.platform == 'win32':
+ static_exts.append('.lib') # .lib is used by MSVC and others
+ if self.search_static_first:
+ exts = static_exts + [so_ext]
+ else:
+ exts = [so_ext] + static_exts
+ if sys.platform == 'cygwin':
+ exts.append('.dll.a')
+ if sys.platform == 'darwin':
+ exts.append('.dylib')
+ return exts
+
+ def check_libs(self, lib_dirs, libs, opt_libs=[]):
+ """If static or shared libraries are available then return
+ their info dictionary.
+
+ Checks for all libraries as shared libraries first, then
+ static (or vice versa if self.search_static_first is True).
+ """
+ exts = self.library_extensions()
+ info = None
+ for ext in exts:
+ info = self._check_libs(lib_dirs, libs, opt_libs, [ext])
+ if info is not None:
+ break
+ if not info:
+ log.info(' libraries %s not found in %s', ','.join(libs),
+ lib_dirs)
+ return info
+
+ def check_libs2(self, lib_dirs, libs, opt_libs=[]):
+ """If static or shared libraries are available then return
+ their info dictionary.
+
+ Checks each library for shared or static.
+ """
+ exts = self.library_extensions()
+ info = self._check_libs(lib_dirs, libs, opt_libs, exts)
+ if not info:
+ log.info(' libraries %s not found in %s', ','.join(libs),
+ lib_dirs)
+
+ return info
+
+ def _find_lib(self, lib_dir, lib, exts):
+ assert is_string(lib_dir)
+ # under windows first try without 'lib' prefix
+ if sys.platform == 'win32':
+ lib_prefixes = ['', 'lib']
+ else:
+ lib_prefixes = ['lib']
+ # for each library name, see if we can find a file for it.
+ for ext in exts:
+ for prefix in lib_prefixes:
+ p = self.combine_paths(lib_dir, prefix + lib + ext)
+ if p:
+ break
+ if p:
+ assert len(p) == 1
+ # ??? splitext on p[0] would do this for cygwin
+ # doesn't seem correct
+ if ext == '.dll.a':
+ lib += '.dll'
+ if ext == '.lib':
+ lib = prefix + lib
+ return lib
+
+ return False
+
+ def _find_libs(self, lib_dirs, libs, exts):
+ # make sure we preserve the order of libs, as it can be important
+ found_dirs, found_libs = [], []
+ for lib in libs:
+ for lib_dir in lib_dirs:
+ found_lib = self._find_lib(lib_dir, lib, exts)
+ if found_lib:
+ found_libs.append(found_lib)
+ if lib_dir not in found_dirs:
+ found_dirs.append(lib_dir)
+ break
+ return found_dirs, found_libs
+
+ def _check_libs(self, lib_dirs, libs, opt_libs, exts):
+ """Find mandatory and optional libs in expected paths.
+
+ Missing optional libraries are silently forgotten.
+ """
+ if not is_sequence(lib_dirs):
+ lib_dirs = [lib_dirs]
+ # First, try to find the mandatory libraries
+ found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts)
+ if len(found_libs) > 0 and len(found_libs) == len(libs):
+ # Now, check for optional libraries
+ opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts)
+ found_libs.extend(opt_found_libs)
+ for lib_dir in opt_found_dirs:
+ if lib_dir not in found_dirs:
+ found_dirs.append(lib_dir)
+ info = {'libraries': found_libs, 'library_dirs': found_dirs}
+ return info
+ else:
+ return None
+
+ def combine_paths(self, *args):
+ """Return a list of existing paths composed by all combinations
+ of items from the arguments.
+ """
+ return combine_paths(*args)
+
+
+class fft_opt_info(system_info):
+
+ def calc_info(self):
+ info = {}
+ fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw')
+ djbfft_info = get_info('djbfft')
+ if fftw_info:
+ dict_append(info, **fftw_info)
+ if djbfft_info:
+ dict_append(info, **djbfft_info)
+ self.set_info(**info)
+ return
+
+
+class fftw_info(system_info):
+ #variables to override
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ notfounderror = FFTWNotFoundError
+ ver_info = [{'name':'fftw3',
+ 'libs':['fftw3'],
+ 'includes':['fftw3.h'],
+ 'macros':[('SCIPY_FFTW3_H', None)]},
+ {'name':'fftw2',
+ 'libs':['rfftw', 'fftw'],
+ 'includes':['fftw.h', 'rfftw.h'],
+ 'macros':[('SCIPY_FFTW_H', None)]}]
+
+ def calc_ver_info(self, ver_param):
+ """Returns True on successful version detection, else False"""
+ lib_dirs = self.get_lib_dirs()
+ incl_dirs = self.get_include_dirs()
+
+ opt = self.get_option_single(self.section + '_libs', 'libraries')
+ libs = self.get_libs(opt, ver_param['libs'])
+ info = self.check_libs(lib_dirs, libs)
+ if info is not None:
+ flag = 0
+ for d in incl_dirs:
+ if len(self.combine_paths(d, ver_param['includes'])) \
+ == len(ver_param['includes']):
+ dict_append(info, include_dirs=[d])
+ flag = 1
+ break
+ if flag:
+ dict_append(info, define_macros=ver_param['macros'])
+ else:
+ info = None
+ if info is not None:
+ self.set_info(**info)
+ return True
+ else:
+ log.info(' %s not found' % (ver_param['name']))
+ return False
+
+ def calc_info(self):
+ for i in self.ver_info:
+ if self.calc_ver_info(i):
+ break
+
+
+class fftw2_info(fftw_info):
+ #variables to override
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ notfounderror = FFTWNotFoundError
+ ver_info = [{'name':'fftw2',
+ 'libs':['rfftw', 'fftw'],
+ 'includes':['fftw.h', 'rfftw.h'],
+ 'macros':[('SCIPY_FFTW_H', None)]}
+ ]
+
+
+class fftw3_info(fftw_info):
+ #variables to override
+ section = 'fftw3'
+ dir_env_var = 'FFTW3'
+ notfounderror = FFTWNotFoundError
+ ver_info = [{'name':'fftw3',
+ 'libs':['fftw3'],
+ 'includes':['fftw3.h'],
+ 'macros':[('SCIPY_FFTW3_H', None)]},
+ ]
+
+
+class fftw3_armpl_info(fftw_info):
+ section = 'fftw3'
+ dir_env_var = 'ARMPL_DIR'
+ notfounderror = FFTWNotFoundError
+ ver_info = [{'name': 'fftw3',
+ 'libs': ['armpl_lp64_mp'],
+ 'includes': ['fftw3.h'],
+ 'macros': [('SCIPY_FFTW3_H', None)]}]
+
+
+class dfftw_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [{'name':'dfftw',
+ 'libs':['drfftw', 'dfftw'],
+ 'includes':['dfftw.h', 'drfftw.h'],
+ 'macros':[('SCIPY_DFFTW_H', None)]}]
+
+
+class sfftw_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [{'name':'sfftw',
+ 'libs':['srfftw', 'sfftw'],
+ 'includes':['sfftw.h', 'srfftw.h'],
+ 'macros':[('SCIPY_SFFTW_H', None)]}]
+
+
+class fftw_threads_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [{'name':'fftw threads',
+ 'libs':['rfftw_threads', 'fftw_threads'],
+ 'includes':['fftw_threads.h', 'rfftw_threads.h'],
+ 'macros':[('SCIPY_FFTW_THREADS_H', None)]}]
+
+
+class dfftw_threads_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [{'name':'dfftw threads',
+ 'libs':['drfftw_threads', 'dfftw_threads'],
+ 'includes':['dfftw_threads.h', 'drfftw_threads.h'],
+ 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}]
+
+
+class sfftw_threads_info(fftw_info):
+ section = 'fftw'
+ dir_env_var = 'FFTW'
+ ver_info = [{'name':'sfftw threads',
+ 'libs':['srfftw_threads', 'sfftw_threads'],
+ 'includes':['sfftw_threads.h', 'srfftw_threads.h'],
+ 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}]
+
+
+class djbfft_info(system_info):
+ section = 'djbfft'
+ dir_env_var = 'DJBFFT'
+ notfounderror = DJBFFTNotFoundError
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend(self.combine_paths(d, ['djbfft']) + [d])
+ return [d for d in dirs if os.path.isdir(d)]
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ incl_dirs = self.get_include_dirs()
+ info = None
+ for d in lib_dirs:
+ p = self.combine_paths(d, ['djbfft.a'])
+ if p:
+ info = {'extra_objects': p}
+ break
+ p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext])
+ if p:
+ info = {'libraries': ['djbfft'], 'library_dirs': [d]}
+ break
+ if info is None:
+ return
+ for d in incl_dirs:
+ if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2:
+ dict_append(info, include_dirs=[d],
+ define_macros=[('SCIPY_DJBFFT_H', None)])
+ self.set_info(**info)
+ return
+ return
+
+
+class mkl_info(system_info):
+ section = 'mkl'
+ dir_env_var = 'MKLROOT'
+ _lib_mkl = ['mkl_rt']
+
+ def get_mkl_rootdir(self):
+ mklroot = os.environ.get('MKLROOT', None)
+ if mklroot is not None:
+ return mklroot
+ paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)
+ ld_so_conf = '/etc/ld.so.conf'
+ if os.path.isfile(ld_so_conf):
+ with open(ld_so_conf, 'r') as f:
+ for d in f:
+ d = d.strip()
+ if d:
+ paths.append(d)
+ intel_mkl_dirs = []
+ for path in paths:
+ path_atoms = path.split(os.sep)
+ for m in path_atoms:
+ if m.startswith('mkl'):
+ d = os.sep.join(path_atoms[:path_atoms.index(m) + 2])
+ intel_mkl_dirs.append(d)
+ break
+ for d in paths:
+ dirs = glob(os.path.join(d, 'mkl', '*'))
+ dirs += glob(os.path.join(d, 'mkl*'))
+ for sub_dir in dirs:
+ if os.path.isdir(os.path.join(sub_dir, 'lib')):
+ return sub_dir
+ return None
+
+ def __init__(self):
+ mklroot = self.get_mkl_rootdir()
+ if mklroot is None:
+ system_info.__init__(self)
+ else:
+ from .cpuinfo import cpu
+ if cpu.is_Itanium():
+ plt = '64'
+ elif cpu.is_Intel() and cpu.is_64bit():
+ plt = 'intel64'
+ else:
+ plt = '32'
+ system_info.__init__(
+ self,
+ default_lib_dirs=[os.path.join(mklroot, 'lib', plt)],
+ default_include_dirs=[os.path.join(mklroot, 'include')])
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ incl_dirs = self.get_include_dirs()
+ opt = self.get_option_single('mkl_libs', 'libraries')
+ mkl_libs = self.get_libs(opt, self._lib_mkl)
+ info = self.check_libs2(lib_dirs, mkl_libs)
+ if info is None:
+ return
+ dict_append(info,
+ define_macros=[('SCIPY_MKL_H', None),
+ ('HAVE_CBLAS', None)],
+ include_dirs=incl_dirs)
+ if sys.platform == 'win32':
+ pass # win32 has no pthread library
+ else:
+ dict_append(info, libraries=['pthread'])
+ self.set_info(**info)
+
+
+class lapack_mkl_info(mkl_info):
+ pass
+
+
+class blas_mkl_info(mkl_info):
+ pass
+
+
+class armpl_info(system_info):
+ section = 'armpl'
+ dir_env_var = 'ARMPL_DIR'
+ _lib_armpl = ['armpl_lp64_mp']
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ incl_dirs = self.get_include_dirs()
+ armpl_libs = self.get_libs('armpl_libs', self._lib_armpl)
+ info = self.check_libs2(lib_dirs, armpl_libs)
+ if info is None:
+ return
+ dict_append(info,
+ define_macros=[('SCIPY_MKL_H', None),
+ ('HAVE_CBLAS', None)],
+ include_dirs=incl_dirs)
+ self.set_info(**info)
+
+class lapack_armpl_info(armpl_info):
+ pass
+
+class blas_armpl_info(armpl_info):
+ pass
+
+
+class atlas_info(system_info):
+ section = 'atlas'
+ dir_env_var = 'ATLAS'
+ _lib_names = ['f77blas', 'cblas']
+ if sys.platform[:7] == 'freebsd':
+ _lib_atlas = ['atlas_r']
+ _lib_lapack = ['alapack_r']
+ else:
+ _lib_atlas = ['atlas']
+ _lib_lapack = ['lapack']
+
+ notfounderror = AtlasNotFoundError
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*',
+ 'sse', '3dnow', 'sse2']) + [d])
+ return [d for d in dirs if os.path.isdir(d)]
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ info = {}
+ opt = self.get_option_single('atlas_libs', 'libraries')
+ atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas)
+ lapack_libs = self.get_libs('lapack_libs', self._lib_lapack)
+ atlas = None
+ lapack = None
+ atlas_1 = None
+ for d in lib_dirs:
+ atlas = self.check_libs2(d, atlas_libs, [])
+ if atlas is not None:
+ lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])
+ lapack = self.check_libs2(lib_dirs2, lapack_libs, [])
+ if lapack is not None:
+ break
+ if atlas:
+ atlas_1 = atlas
+ log.info(self.__class__)
+ if atlas is None:
+ atlas = atlas_1
+ if atlas is None:
+ return
+ include_dirs = self.get_include_dirs()
+ h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
+ h = h[0]
+ if h:
+ h = os.path.dirname(h)
+ dict_append(info, include_dirs=[h])
+ info['language'] = 'c'
+ if lapack is not None:
+ dict_append(info, **lapack)
+ dict_append(info, **atlas)
+ elif 'lapack_atlas' in atlas['libraries']:
+ dict_append(info, **atlas)
+ dict_append(info,
+ define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)])
+ self.set_info(**info)
+ return
+ else:
+ dict_append(info, **atlas)
+ dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)])
+ message = textwrap.dedent("""
+ *********************************************************************
+ Could not find lapack library within the ATLAS installation.
+ *********************************************************************
+ """)
+ warnings.warn(message, stacklevel=2)
+ self.set_info(**info)
+ return
+
+ # Check if lapack library is complete, only warn if it is not.
+ lapack_dir = lapack['library_dirs'][0]
+ lapack_name = lapack['libraries'][0]
+ lapack_lib = None
+ lib_prefixes = ['lib']
+ if sys.platform == 'win32':
+ lib_prefixes.append('')
+ for e in self.library_extensions():
+ for prefix in lib_prefixes:
+ fn = os.path.join(lapack_dir, prefix + lapack_name + e)
+ if os.path.exists(fn):
+ lapack_lib = fn
+ break
+ if lapack_lib:
+ break
+ if lapack_lib is not None:
+ sz = os.stat(lapack_lib)[6]
+ if sz <= 4000 * 1024:
+ message = textwrap.dedent("""
+ *********************************************************************
+ Lapack library (from ATLAS) is probably incomplete:
+ size of %s is %sk (expected >4000k)
+
+ Follow the instructions in the KNOWN PROBLEMS section of the file
+ numpy/INSTALL.txt.
+ *********************************************************************
+ """) % (lapack_lib, sz / 1024)
+ warnings.warn(message, stacklevel=2)
+ else:
+ info['language'] = 'f77'
+
+ atlas_version, atlas_extra_info = get_atlas_version(**atlas)
+ dict_append(info, **atlas_extra_info)
+
+ self.set_info(**info)
+
+
+class atlas_blas_info(atlas_info):
+ _lib_names = ['f77blas', 'cblas']
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ info = {}
+ opt = self.get_option_single('atlas_libs', 'libraries')
+ atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas)
+ atlas = self.check_libs2(lib_dirs, atlas_libs, [])
+ if atlas is None:
+ return
+ include_dirs = self.get_include_dirs()
+ h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
+ h = h[0]
+ if h:
+ h = os.path.dirname(h)
+ dict_append(info, include_dirs=[h])
+ info['language'] = 'c'
+ info['define_macros'] = [('HAVE_CBLAS', None)]
+
+ atlas_version, atlas_extra_info = get_atlas_version(**atlas)
+ dict_append(atlas, **atlas_extra_info)
+
+ dict_append(info, **atlas)
+
+ self.set_info(**info)
+ return
+
+
+class atlas_threads_info(atlas_info):
+ dir_env_var = ['PTATLAS', 'ATLAS']
+ _lib_names = ['ptf77blas', 'ptcblas']
+
+
+class atlas_blas_threads_info(atlas_blas_info):
+ dir_env_var = ['PTATLAS', 'ATLAS']
+ _lib_names = ['ptf77blas', 'ptcblas']
+
+
+class lapack_atlas_info(atlas_info):
+ _lib_names = ['lapack_atlas'] + atlas_info._lib_names
+
+
+class lapack_atlas_threads_info(atlas_threads_info):
+ _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names
+
+
+class atlas_3_10_info(atlas_info):
+ _lib_names = ['satlas']
+ _lib_atlas = _lib_names
+ _lib_lapack = _lib_names
+
+
+class atlas_3_10_blas_info(atlas_3_10_info):
+ _lib_names = ['satlas']
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ info = {}
+ opt = self.get_option_single('atlas_lib', 'libraries')
+ atlas_libs = self.get_libs(opt, self._lib_names)
+ atlas = self.check_libs2(lib_dirs, atlas_libs, [])
+ if atlas is None:
+ return
+ include_dirs = self.get_include_dirs()
+ h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
+ h = h[0]
+ if h:
+ h = os.path.dirname(h)
+ dict_append(info, include_dirs=[h])
+ info['language'] = 'c'
+ info['define_macros'] = [('HAVE_CBLAS', None)]
+
+ atlas_version, atlas_extra_info = get_atlas_version(**atlas)
+ dict_append(atlas, **atlas_extra_info)
+
+ dict_append(info, **atlas)
+
+ self.set_info(**info)
+ return
+
+
+class atlas_3_10_threads_info(atlas_3_10_info):
+ dir_env_var = ['PTATLAS', 'ATLAS']
+ _lib_names = ['tatlas']
+ _lib_atlas = _lib_names
+ _lib_lapack = _lib_names
+
+
+class atlas_3_10_blas_threads_info(atlas_3_10_blas_info):
+ dir_env_var = ['PTATLAS', 'ATLAS']
+ _lib_names = ['tatlas']
+
+
+class lapack_atlas_3_10_info(atlas_3_10_info):
+ pass
+
+
+class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info):
+ pass
+
+
+class lapack_info(system_info):
+ section = 'lapack'
+ dir_env_var = 'LAPACK'
+ _lib_names = ['lapack']
+ notfounderror = LapackNotFoundError
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+
+ opt = self.get_option_single('lapack_libs', 'libraries')
+ lapack_libs = self.get_libs(opt, self._lib_names)
+ info = self.check_libs(lib_dirs, lapack_libs, [])
+ if info is None:
+ return
+ info['language'] = 'f77'
+ self.set_info(**info)
+
+
+class lapack_src_info(system_info):
+ # LAPACK_SRC is deprecated, please do not use this!
+ # Build or install a BLAS library via your package manager or from
+ # source separately.
+ section = 'lapack_src'
+ dir_env_var = 'LAPACK_SRC'
+ notfounderror = LapackSrcNotFoundError
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC']))
+ return [d for d in dirs if os.path.isdir(d)]
+
+ def calc_info(self):
+ src_dirs = self.get_src_dirs()
+ src_dir = ''
+ for d in src_dirs:
+ if os.path.isfile(os.path.join(d, 'dgesv.f')):
+ src_dir = d
+ break
+ if not src_dir:
+ #XXX: Get sources from netlib. May be ask first.
+ return
+ # The following is extracted from LAPACK-3.0/SRC/Makefile.
+ # Added missing names from lapack-lite-3.1.1/SRC/Makefile
+ # while keeping removed names for Lapack-3.0 compatibility.
+ allaux = '''
+ ilaenv ieeeck lsame lsamen xerbla
+ iparmq
+ ''' # *.f
+ laux = '''
+ bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1
+ laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2
+ lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre
+ larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4
+ lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1
+ lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf
+ stebz stedc steqr sterf
+
+ larra larrc larrd larr larrk larrj larrr laneg laisnan isnan
+ lazq3 lazq4
+ ''' # [s|d]*.f
+ lasrc = '''
+ gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak
+ gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv
+ gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2
+ geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd
+ gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal
+ gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd
+ ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein
+ hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0
+ lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb
+ lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp
+ laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv
+ lartv larz larzb larzt laswp lasyf latbs latdf latps latrd
+ latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv
+ pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2
+ potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri
+ pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs
+ spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv
+ sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2
+ tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs
+ trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs
+ tzrqf tzrzf
+
+ lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5
+ ''' # [s|c|d|z]*.f
+ sd_lasrc = '''
+ laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l
+ org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr
+ orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3
+ ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx
+ sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd
+ stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd
+ sygvx sytd2 sytrd
+ ''' # [s|d]*.f
+ cz_lasrc = '''
+ bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev
+ heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv
+ hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd
+ hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf
+ hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7
+ laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe
+ laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv
+ spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq
+ ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2
+ unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr
+ ''' # [c|z]*.f
+ #######
+ sclaux = laux + ' econd ' # s*.f
+ dzlaux = laux + ' secnd ' # d*.f
+ slasrc = lasrc + sd_lasrc # s*.f
+ dlasrc = lasrc + sd_lasrc # d*.f
+ clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f
+ zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f
+ oclasrc = ' icmax1 scsum1 ' # *.f
+ ozlasrc = ' izmax1 dzsum1 ' # *.f
+ sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \
+ + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \
+ + ['c%s.f' % f for f in (clasrc).split()] \
+ + ['z%s.f' % f for f in (zlasrc).split()] \
+ + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()]
+ sources = [os.path.join(src_dir, f) for f in sources]
+ # Lapack 3.1:
+ src_dir2 = os.path.join(src_dir, '..', 'INSTALL')
+ sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz']
+ # Lapack 3.2.1:
+ sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz']
+ sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz']
+ sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz']
+ # Should we check here actual existence of source files?
+ # Yes, the file listing is different between 3.0 and 3.1
+ # versions.
+ sources = [f for f in sources if os.path.isfile(f)]
+ info = {'sources': sources, 'language': 'f77'}
+ self.set_info(**info)
+
+atlas_version_c_text = r'''
+/* This file is generated from numpy/distutils/system_info.py */
+void ATL_buildinfo(void);
+int main(void) {
+ ATL_buildinfo();
+ return 0;
+}
+'''
+
+_cached_atlas_version = {}
+
+
+def get_atlas_version(**config):
+ libraries = config.get('libraries', [])
+ library_dirs = config.get('library_dirs', [])
+ key = (tuple(libraries), tuple(library_dirs))
+ if key in _cached_atlas_version:
+ return _cached_atlas_version[key]
+ c = cmd_config(Distribution())
+ atlas_version = None
+ info = {}
+ try:
+ s, o = c.get_output(atlas_version_c_text,
+ libraries=libraries, library_dirs=library_dirs,
+ )
+ if s and re.search(r'undefined reference to `_gfortran', o, re.M):
+ s, o = c.get_output(atlas_version_c_text,
+ libraries=libraries + ['gfortran'],
+ library_dirs=library_dirs,
+ )
+ if not s:
+ warnings.warn(textwrap.dedent("""
+ *****************************************************
+ Linkage with ATLAS requires gfortran. Use
+
+ python setup.py config_fc --fcompiler=gnu95 ...
+
+ when building extension libraries that use ATLAS.
+ Make sure that -lgfortran is used for C++ extensions.
+ *****************************************************
+ """), stacklevel=2)
+ dict_append(info, language='f90',
+ define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])
+ except Exception: # failed to get version from file -- maybe on Windows
+ # look at directory name
+ for o in library_dirs:
+ m = re.search(r'ATLAS_(?P<version>\d+[.]\d+[.]\d+)_', o)
+ if m:
+ atlas_version = m.group('version')
+ if atlas_version is not None:
+ break
+
+ # final choice --- look at ATLAS_VERSION environment
+ # variable
+ if atlas_version is None:
+ atlas_version = os.environ.get('ATLAS_VERSION', None)
+ if atlas_version:
+ dict_append(info, define_macros=[(
+ 'ATLAS_INFO', _c_string_literal(atlas_version))
+ ])
+ else:
+ dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])
+ return atlas_version or '?.?.?', info
+
+ if not s:
+ m = re.search(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)', o)
+ if m:
+ atlas_version = m.group('version')
+ if atlas_version is None:
+ if re.search(r'undefined symbol: ATL_buildinfo', o, re.M):
+ atlas_version = '3.2.1_pre3.3.6'
+ else:
+ log.info('Status: %d', s)
+ log.info('Output: %s', o)
+
+ elif atlas_version == '3.2.1_pre3.3.6':
+ dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
+ else:
+ dict_append(info, define_macros=[(
+ 'ATLAS_INFO', _c_string_literal(atlas_version))
+ ])
+ result = _cached_atlas_version[key] = atlas_version, info
+ return result
+
+
+class lapack_opt_info(system_info):
+ notfounderror = LapackNotFoundError
+
+ # List of all known LAPACK libraries, in the default order
+ lapack_order = ['armpl', 'mkl', 'openblas', 'flame',
+ 'accelerate', 'atlas', 'lapack']
+ order_env_var_name = 'NPY_LAPACK_ORDER'
+
+ def _calc_info_armpl(self):
+ info = get_info('lapack_armpl')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_mkl(self):
+ info = get_info('lapack_mkl')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_openblas(self):
+ info = get_info('openblas_lapack')
+ if info:
+ self.set_info(**info)
+ return True
+ info = get_info('openblas_clapack')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_flame(self):
+ info = get_info('flame')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_atlas(self):
+ info = get_info('atlas_3_10_threads')
+ if not info:
+ info = get_info('atlas_3_10')
+ if not info:
+ info = get_info('atlas_threads')
+ if not info:
+ info = get_info('atlas')
+ if info:
+ # Figure out if ATLAS has lapack...
+ # If not we need the lapack library, but not BLAS!
+ l = info.get('define_macros', [])
+ if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \
+ or ('ATLAS_WITHOUT_LAPACK', None) in l:
+ # Get LAPACK (with possible warnings)
+ # If not found we don't accept anything
+ # since we can't use ATLAS with LAPACK!
+ lapack_info = self._get_info_lapack()
+ if not lapack_info:
+ return False
+ dict_append(info, **lapack_info)
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_accelerate(self):
+ info = get_info('accelerate')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _get_info_blas(self):
+ # Default to get the optimized BLAS implementation
+ info = get_info('blas_opt')
+ if not info:
+ warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3)
+ info_src = get_info('blas_src')
+ if not info_src:
+ warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3)
+ return {}
+ dict_append(info, libraries=[('fblas_src', info_src)])
+ return info
+
+ def _get_info_lapack(self):
+ info = get_info('lapack')
+ if not info:
+ warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3)
+ info_src = get_info('lapack_src')
+ if not info_src:
+ warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3)
+ return {}
+ dict_append(info, libraries=[('flapack_src', info_src)])
+ return info
+
+ def _calc_info_lapack(self):
+ info = self._get_info_lapack()
+ if info:
+ info_blas = self._get_info_blas()
+ dict_append(info, **info_blas)
+ dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_from_envvar(self):
+ info = {}
+ info['language'] = 'f77'
+ info['libraries'] = []
+ info['include_dirs'] = []
+ info['define_macros'] = []
+ info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split()
+ self.set_info(**info)
+ return True
+
+ def _calc_info(self, name):
+ return getattr(self, '_calc_info_{}'.format(name))()
+
+ def calc_info(self):
+ lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name)
+ if len(unknown_order) > 0:
+ raise ValueError("lapack_opt_info user defined "
+ "LAPACK order has unacceptable "
+ "values: {}".format(unknown_order))
+
+ if 'NPY_LAPACK_LIBS' in os.environ:
+ # Bypass autodetection, set language to F77 and use env var linker
+ # flags directly
+ self._calc_info_from_envvar()
+ return
+
+ for lapack in lapack_order:
+ if self._calc_info(lapack):
+ return
+
+ if 'lapack' not in lapack_order:
+ # Since the user may request *not* to use any library, we still need
+ # to raise warnings to signal missing packages!
+ warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2)
+ warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2)
+
+
+class _ilp64_opt_info_mixin:
+ symbol_suffix = None
+ symbol_prefix = None
+
+ def _check_info(self, info):
+ macros = dict(info.get('define_macros', []))
+ prefix = macros.get('BLAS_SYMBOL_PREFIX', '')
+ suffix = macros.get('BLAS_SYMBOL_SUFFIX', '')
+
+ if self.symbol_prefix not in (None, prefix):
+ return False
+
+ if self.symbol_suffix not in (None, suffix):
+ return False
+
+ return bool(info)
+
+
+class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin):
+ notfounderror = LapackILP64NotFoundError
+ lapack_order = ['openblas64_', 'openblas_ilp64']
+ order_env_var_name = 'NPY_LAPACK_ILP64_ORDER'
+
+ def _calc_info(self, name):
+ info = get_info(name + '_lapack')
+ if self._check_info(info):
+ self.set_info(**info)
+ return True
+ return False
+
+
+class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info):
+ # Same as lapack_ilp64_opt_info, but fix symbol names
+ symbol_prefix = ''
+ symbol_suffix = ''
+
+
+class lapack64__opt_info(lapack_ilp64_opt_info):
+ symbol_prefix = ''
+ symbol_suffix = '64_'
+
+
+class blas_opt_info(system_info):
+ notfounderror = BlasNotFoundError
+ # List of all known BLAS libraries, in the default order
+
+ blas_order = ['armpl', 'mkl', 'blis', 'openblas',
+ 'accelerate', 'atlas', 'blas']
+ order_env_var_name = 'NPY_BLAS_ORDER'
+
+ def _calc_info_armpl(self):
+ info = get_info('blas_armpl')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_mkl(self):
+ info = get_info('blas_mkl')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_blis(self):
+ info = get_info('blis')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_openblas(self):
+ info = get_info('openblas')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_atlas(self):
+ info = get_info('atlas_3_10_blas_threads')
+ if not info:
+ info = get_info('atlas_3_10_blas')
+ if not info:
+ info = get_info('atlas_blas_threads')
+ if not info:
+ info = get_info('atlas_blas')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_accelerate(self):
+ info = get_info('accelerate')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+ def _calc_info_blas(self):
+ # Warn about a non-optimized BLAS library
+ warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3)
+ info = {}
+ dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
+
+ blas = get_info('blas')
+ if blas:
+ dict_append(info, **blas)
+ else:
+ # Not even BLAS was found!
+ warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3)
+
+ blas_src = get_info('blas_src')
+ if not blas_src:
+ warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3)
+ return False
+ dict_append(info, libraries=[('fblas_src', blas_src)])
+
+ self.set_info(**info)
+ return True
+
+ def _calc_info_from_envvar(self):
+ info = {}
+ info['language'] = 'f77'
+ info['libraries'] = []
+ info['include_dirs'] = []
+ info['define_macros'] = []
+ info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split()
+ if 'NPY_CBLAS_LIBS' in os.environ:
+ info['define_macros'].append(('HAVE_CBLAS', None))
+ info['extra_link_args'].extend(
+ os.environ['NPY_CBLAS_LIBS'].split())
+ self.set_info(**info)
+ return True
+
+ def _calc_info(self, name):
+ return getattr(self, '_calc_info_{}'.format(name))()
+
+ def calc_info(self):
+ blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name)
+ if len(unknown_order) > 0:
+ raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order))
+
+ if 'NPY_BLAS_LIBS' in os.environ:
+ # Bypass autodetection, set language to F77 and use env var linker
+ # flags directly
+ self._calc_info_from_envvar()
+ return
+
+ for blas in blas_order:
+ if self._calc_info(blas):
+ return
+
+ if 'blas' not in blas_order:
+ # Since the user may request *not* to use any library, we still need
+ # to raise warnings to signal missing packages!
+ warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2)
+ warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2)
+
+
+class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin):
+ notfounderror = BlasILP64NotFoundError
+ blas_order = ['openblas64_', 'openblas_ilp64']
+ order_env_var_name = 'NPY_BLAS_ILP64_ORDER'
+
+ def _calc_info(self, name):
+ info = get_info(name)
+ if self._check_info(info):
+ self.set_info(**info)
+ return True
+ return False
+
+
+class blas_ilp64_plain_opt_info(blas_ilp64_opt_info):
+ symbol_prefix = ''
+ symbol_suffix = ''
+
+
+class blas64__opt_info(blas_ilp64_opt_info):
+ symbol_prefix = ''
+ symbol_suffix = '64_'
+
+
+class cblas_info(system_info):
+ section = 'cblas'
+ dir_env_var = 'CBLAS'
+ # No default as it's used only in blas_info
+ _lib_names = []
+ notfounderror = BlasNotFoundError
+
+
+class blas_info(system_info):
+ section = 'blas'
+ dir_env_var = 'BLAS'
+ _lib_names = ['blas']
+ notfounderror = BlasNotFoundError
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ opt = self.get_option_single('blas_libs', 'libraries')
+ blas_libs = self.get_libs(opt, self._lib_names)
+ info = self.check_libs(lib_dirs, blas_libs, [])
+ if info is None:
+ return
+ else:
+ info['include_dirs'] = self.get_include_dirs()
+ if platform.system() == 'Windows':
+ # The check for windows is needed because get_cblas_libs uses the
+ # same compiler that was used to compile Python and msvc is
+ # often not installed when mingw is being used. This rough
+ # treatment is not desirable, but windows is tricky.
+ info['language'] = 'f77' # XXX: is it generally true?
+ # If cblas is given as an option, use those
+ cblas_info_obj = cblas_info()
+ cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries')
+ cblas_libs = cblas_info_obj.get_libs(cblas_opt, None)
+ if cblas_libs:
+ info['libraries'] = cblas_libs + blas_libs
+ info['define_macros'] = [('HAVE_CBLAS', None)]
+ else:
+ lib = self.get_cblas_libs(info)
+ if lib is not None:
+ info['language'] = 'c'
+ info['libraries'] = lib
+ info['define_macros'] = [('HAVE_CBLAS', None)]
+ self.set_info(**info)
+
+ def get_cblas_libs(self, info):
+ """ Check whether we can link with CBLAS interface
+
+ This method will search through several combinations of libraries
+ to check whether CBLAS is present:
+
+ 1. Libraries in ``info['libraries']``, as is
+ 2. As 1. but also explicitly adding ``'cblas'`` as a library
+ 3. As 1. but also explicitly adding ``'blas'`` as a library
+ 4. Check only library ``'cblas'``
+ 5. Check only library ``'blas'``
+
+ Parameters
+ ----------
+ info : dict
+ system information dictionary for compilation and linking
+
+ Returns
+ -------
+ libraries : list of str or None
+ a list of libraries that enables the use of CBLAS interface.
+ Returns None if not found or a compilation error occurs.
+
+ Since 1.17 returns a list.
+ """
+ # primitive cblas check by looking for the header and trying to link
+ # cblas or blas
+ c = customized_ccompiler()
+ tmpdir = tempfile.mkdtemp()
+ s = textwrap.dedent("""\
+ #include <cblas.h>
+ int main(int argc, const char *argv[])
+ {
+ double a[4] = {1,2,3,4};
+ double b[4] = {5,6,7,8};
+ return cblas_ddot(4, a, 1, b, 1) > 10;
+ }""")
+ src = os.path.join(tmpdir, 'source.c')
+ try:
+ with open(src, 'wt') as f:
+ f.write(s)
+
+ try:
+ # check we can compile (find headers)
+ obj = c.compile([src], output_dir=tmpdir,
+ include_dirs=self.get_include_dirs())
+ except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError):
+ return None
+
+ # check we can link (find library)
+ # some systems have separate cblas and blas libs.
+ for libs in [info['libraries'], ['cblas'] + info['libraries'],
+ ['blas'] + info['libraries'], ['cblas'], ['blas']]:
+ try:
+ c.link_executable(obj, os.path.join(tmpdir, "a.out"),
+ libraries=libs,
+ library_dirs=info['library_dirs'],
+ extra_postargs=info.get('extra_link_args', []))
+ return libs
+ except distutils.ccompiler.LinkError:
+ pass
+ finally:
+ shutil.rmtree(tmpdir)
+ return None
+
+
+class openblas_info(blas_info):
+ section = 'openblas'
+ dir_env_var = 'OPENBLAS'
+ _lib_names = ['openblas']
+ _require_symbols = []
+ notfounderror = BlasNotFoundError
+
+ @property
+ def symbol_prefix(self):
+ try:
+ return self.cp.get(self.section, 'symbol_prefix')
+ except NoOptionError:
+ return ''
+
+ @property
+ def symbol_suffix(self):
+ try:
+ return self.cp.get(self.section, 'symbol_suffix')
+ except NoOptionError:
+ return ''
+
+ def _calc_info(self):
+ c = customized_ccompiler()
+
+ lib_dirs = self.get_lib_dirs()
+
+ # Prefer to use libraries over openblas_libs
+ opt = self.get_option_single('openblas_libs', 'libraries')
+ openblas_libs = self.get_libs(opt, self._lib_names)
+
+ info = self.check_libs(lib_dirs, openblas_libs, [])
+
+ if c.compiler_type == "msvc" and info is None:
+ from numpy.distutils.fcompiler import new_fcompiler
+ f = new_fcompiler(c_compiler=c)
+ if f and f.compiler_type == 'gnu95':
+ # Try gfortran-compatible library files
+ info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs)
+ # Skip lapack check, we'd need build_ext to do it
+ skip_symbol_check = True
+ elif info:
+ skip_symbol_check = False
+ info['language'] = 'c'
+
+ if info is None:
+ return None
+
+ # Add extra info for OpenBLAS
+ extra_info = self.calc_extra_info()
+ dict_append(info, **extra_info)
+
+ if not (skip_symbol_check or self.check_symbols(info)):
+ return None
+
+ info['define_macros'] = [('HAVE_CBLAS', None)]
+ if self.symbol_prefix:
+ info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)]
+ if self.symbol_suffix:
+ info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)]
+
+ return info
+
+ def calc_info(self):
+ info = self._calc_info()
+ if info is not None:
+ self.set_info(**info)
+
+ def check_msvc_gfortran_libs(self, library_dirs, libraries):
+ # First, find the full path to each library directory
+ library_paths = []
+ for library in libraries:
+ for library_dir in library_dirs:
+ # MinGW static ext will be .a
+ fullpath = os.path.join(library_dir, library + '.a')
+ if os.path.isfile(fullpath):
+ library_paths.append(fullpath)
+ break
+ else:
+ return None
+
+ # Generate numpy.distutils virtual static library file
+ basename = self.__class__.__name__
+ tmpdir = os.path.join(os.getcwd(), 'build', basename)
+ if not os.path.isdir(tmpdir):
+ os.makedirs(tmpdir)
+
+ info = {'library_dirs': [tmpdir],
+ 'libraries': [basename],
+ 'language': 'f77'}
+
+ fake_lib_file = os.path.join(tmpdir, basename + '.fobjects')
+ fake_clib_file = os.path.join(tmpdir, basename + '.cobjects')
+ with open(fake_lib_file, 'w') as f:
+ f.write("\n".join(library_paths))
+ with open(fake_clib_file, 'w') as f:
+ pass
+
+ return info
+
+ def check_symbols(self, info):
+ res = False
+ c = customized_ccompiler()
+
+ tmpdir = tempfile.mkdtemp()
+
+ prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix,
+ symbol_name,
+ self.symbol_suffix)
+ for symbol_name in self._require_symbols)
+ calls = "\n".join("%s%s%s();" % (self.symbol_prefix,
+ symbol_name,
+ self.symbol_suffix)
+ for symbol_name in self._require_symbols)
+ s = textwrap.dedent("""\
+ %(prototypes)s
+ int main(int argc, const char *argv[])
+ {
+ %(calls)s
+ return 0;
+ }""") % dict(prototypes=prototypes, calls=calls)
+ src = os.path.join(tmpdir, 'source.c')
+ out = os.path.join(tmpdir, 'a.out')
+ # Add the additional "extra" arguments
+ try:
+ extra_args = info['extra_link_args']
+ except Exception:
+ extra_args = []
+ try:
+ with open(src, 'wt') as f:
+ f.write(s)
+ obj = c.compile([src], output_dir=tmpdir)
+ try:
+ c.link_executable(obj, out, libraries=info['libraries'],
+ library_dirs=info['library_dirs'],
+ extra_postargs=extra_args)
+ res = True
+ except distutils.ccompiler.LinkError:
+ res = False
+ finally:
+ shutil.rmtree(tmpdir)
+ return res
+
+class openblas_lapack_info(openblas_info):
+ section = 'openblas'
+ dir_env_var = 'OPENBLAS'
+ _lib_names = ['openblas']
+ _require_symbols = ['zungqr_']
+ notfounderror = BlasNotFoundError
+
+class openblas_clapack_info(openblas_lapack_info):
+ _lib_names = ['openblas', 'lapack']
+
+class openblas_ilp64_info(openblas_info):
+ section = 'openblas_ilp64'
+ dir_env_var = 'OPENBLAS_ILP64'
+ _lib_names = ['openblas64']
+ _require_symbols = ['dgemm_', 'cblas_dgemm']
+ notfounderror = BlasILP64NotFoundError
+
+ def _calc_info(self):
+ info = super()._calc_info()
+ if info is not None:
+ info['define_macros'] += [('HAVE_BLAS_ILP64', None)]
+ return info
+
+class openblas_ilp64_lapack_info(openblas_ilp64_info):
+ _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr']
+
+ def _calc_info(self):
+ info = super()._calc_info()
+ if info:
+ info['define_macros'] += [('HAVE_LAPACKE', None)]
+ return info
+
+class openblas64__info(openblas_ilp64_info):
+ # ILP64 Openblas, with default symbol suffix
+ section = 'openblas64_'
+ dir_env_var = 'OPENBLAS64_'
+ _lib_names = ['openblas64_']
+ symbol_suffix = '64_'
+ symbol_prefix = ''
+
+class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info):
+ pass
+
+class blis_info(blas_info):
+ section = 'blis'
+ dir_env_var = 'BLIS'
+ _lib_names = ['blis']
+ notfounderror = BlasNotFoundError
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ opt = self.get_option_single('blis_libs', 'libraries')
+ blis_libs = self.get_libs(opt, self._lib_names)
+ info = self.check_libs2(lib_dirs, blis_libs, [])
+ if info is None:
+ return
+
+ # Add include dirs
+ incl_dirs = self.get_include_dirs()
+ dict_append(info,
+ language='c',
+ define_macros=[('HAVE_CBLAS', None)],
+ include_dirs=incl_dirs)
+ self.set_info(**info)
+
+
+class flame_info(system_info):
+ """ Usage of libflame for LAPACK operations
+
+ This requires libflame to be compiled with lapack wrappers:
+
+ ./configure --enable-lapack2flame ...
+
+ Be aware that libflame 5.1.0 has some missing names in the shared library, so
+ if you have problems, try the static flame library.
+ """
+ section = 'flame'
+ _lib_names = ['flame']
+ notfounderror = FlameNotFoundError
+
+ def check_embedded_lapack(self, info):
+ """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """
+ c = customized_ccompiler()
+
+ tmpdir = tempfile.mkdtemp()
+ s = textwrap.dedent("""\
+ void zungqr_();
+ int main(int argc, const char *argv[])
+ {
+ zungqr_();
+ return 0;
+ }""")
+ src = os.path.join(tmpdir, 'source.c')
+ out = os.path.join(tmpdir, 'a.out')
+ # Add the additional "extra" arguments
+ extra_args = info.get('extra_link_args', [])
+ try:
+ with open(src, 'wt') as f:
+ f.write(s)
+ obj = c.compile([src], output_dir=tmpdir)
+ try:
+ c.link_executable(obj, out, libraries=info['libraries'],
+ library_dirs=info['library_dirs'],
+ extra_postargs=extra_args)
+ return True
+ except distutils.ccompiler.LinkError:
+ return False
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ flame_libs = self.get_libs('libraries', self._lib_names)
+
+ info = self.check_libs2(lib_dirs, flame_libs, [])
+ if info is None:
+ return
+
+ # Add the extra flag args to info
+ extra_info = self.calc_extra_info()
+ dict_append(info, **extra_info)
+
+ if self.check_embedded_lapack(info):
+ # check if the user has supplied all information required
+ self.set_info(**info)
+ else:
+ # Try and get the BLAS lib to see if we can get it to work
+ blas_info = get_info('blas_opt')
+ if not blas_info:
+ # since we already failed once, this ain't going to work either
+ return
+
+ # Now we need to merge the two dictionaries
+ for key in blas_info:
+ if isinstance(blas_info[key], list):
+ info[key] = info.get(key, []) + blas_info[key]
+ elif isinstance(blas_info[key], tuple):
+ info[key] = info.get(key, ()) + blas_info[key]
+ else:
+ info[key] = info.get(key, '') + blas_info[key]
+
+ # Now check again
+ if self.check_embedded_lapack(info):
+ self.set_info(**info)
+
+
+class accelerate_info(system_info):
+ section = 'accelerate'
+ _lib_names = ['accelerate', 'veclib']
+ notfounderror = BlasNotFoundError
+
+ def calc_info(self):
+ # Make possible to enable/disable from config file/env var
+ libraries = os.environ.get('ACCELERATE')
+ if libraries:
+ libraries = [libraries]
+ else:
+ libraries = self.get_libs('libraries', self._lib_names)
+ libraries = [lib.strip().lower() for lib in libraries]
+
+ if (sys.platform == 'darwin' and
+ not os.getenv('_PYTHON_HOST_PLATFORM', None)):
+ # Use the system BLAS from Accelerate or vecLib under OSX
+ args = []
+ link_args = []
+ if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
+ 'x86_64' in get_platform() or \
+ 'i386' in platform.platform():
+ intel = 1
+ else:
+ intel = 0
+ if (os.path.exists('/System/Library/Frameworks'
+ '/Accelerate.framework/') and
+ 'accelerate' in libraries):
+ if intel:
+ args.extend(['-msse3'])
+ args.extend([
+ '-I/System/Library/Frameworks/vecLib.framework/Headers'])
+ link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
+ elif (os.path.exists('/System/Library/Frameworks'
+ '/vecLib.framework/') and
+ 'veclib' in libraries):
+ if intel:
+ args.extend(['-msse3'])
+ args.extend([
+ '-I/System/Library/Frameworks/vecLib.framework/Headers'])
+ link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
+
+ if args:
+ self.set_info(extra_compile_args=args,
+ extra_link_args=link_args,
+ define_macros=[('NO_ATLAS_INFO', 3),
+ ('HAVE_CBLAS', None)])
+
+ return
+
+class blas_src_info(system_info):
+ # BLAS_SRC is deprecated, please do not use this!
+ # Build or install a BLAS library via your package manager or from
+ # source separately.
+ section = 'blas_src'
+ dir_env_var = 'BLAS_SRC'
+ notfounderror = BlasSrcNotFoundError
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend([d] + self.combine_paths(d, ['blas']))
+ return [d for d in dirs if os.path.isdir(d)]
+
+ def calc_info(self):
+ src_dirs = self.get_src_dirs()
+ src_dir = ''
+ for d in src_dirs:
+ if os.path.isfile(os.path.join(d, 'daxpy.f')):
+ src_dir = d
+ break
+ if not src_dir:
+ #XXX: Get sources from netlib. May be ask first.
+ return
+ blas1 = '''
+ caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot
+ dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2
+ srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg
+ dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax
+ snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap
+ scabs1
+ '''
+ blas2 = '''
+ cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv
+ chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv
+ dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv
+ sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger
+ stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc
+ zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2
+ ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv
+ '''
+ blas3 = '''
+ cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k
+ dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm
+ ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm
+ '''
+ sources = [os.path.join(src_dir, f + '.f') \
+ for f in (blas1 + blas2 + blas3).split()]
+ #XXX: should we check here actual existence of source files?
+ sources = [f for f in sources if os.path.isfile(f)]
+ info = {'sources': sources, 'language': 'f77'}
+ self.set_info(**info)
+
+
+class x11_info(system_info):
+ section = 'x11'
+ notfounderror = X11NotFoundError
+ _lib_names = ['X11']
+
+ def __init__(self):
+ system_info.__init__(self,
+ default_lib_dirs=default_x11_lib_dirs,
+ default_include_dirs=default_x11_include_dirs)
+
+ def calc_info(self):
+ if sys.platform in ['win32']:
+ return
+ lib_dirs = self.get_lib_dirs()
+ include_dirs = self.get_include_dirs()
+ opt = self.get_option_single('x11_libs', 'libraries')
+ x11_libs = self.get_libs(opt, self._lib_names)
+ info = self.check_libs(lib_dirs, x11_libs, [])
+ if info is None:
+ return
+ inc_dir = None
+ for d in include_dirs:
+ if self.combine_paths(d, 'X11/X.h'):
+ inc_dir = d
+ break
+ if inc_dir is not None:
+ dict_append(info, include_dirs=[inc_dir])
+ self.set_info(**info)
+
+
+class _numpy_info(system_info):
+ section = 'Numeric'
+ modulename = 'Numeric'
+ notfounderror = NumericNotFoundError
+
+ def __init__(self):
+ include_dirs = []
+ try:
+ module = __import__(self.modulename)
+ prefix = []
+ for name in module.__file__.split(os.sep):
+ if name == 'lib':
+ break
+ prefix.append(name)
+
+ # Ask numpy for its own include path before attempting
+ # anything else
+ try:
+ include_dirs.append(getattr(module, 'get_include')())
+ except AttributeError:
+ pass
+
+ include_dirs.append(sysconfig.get_path('include'))
+ except ImportError:
+ pass
+ py_incl_dir = sysconfig.get_path('include')
+ include_dirs.append(py_incl_dir)
+ py_pincl_dir = sysconfig.get_path('platinclude')
+ if py_pincl_dir not in include_dirs:
+ include_dirs.append(py_pincl_dir)
+ for d in default_include_dirs:
+ d = os.path.join(d, os.path.basename(py_incl_dir))
+ if d not in include_dirs:
+ include_dirs.append(d)
+ system_info.__init__(self,
+ default_lib_dirs=[],
+ default_include_dirs=include_dirs)
+
+ def calc_info(self):
+ try:
+ module = __import__(self.modulename)
+ except ImportError:
+ return
+ info = {}
+ macros = []
+ for v in ['__version__', 'version']:
+ vrs = getattr(module, v, None)
+ if vrs is None:
+ continue
+ macros = [(self.modulename.upper() + '_VERSION',
+ _c_string_literal(vrs)),
+ (self.modulename.upper(), None)]
+ break
+ dict_append(info, define_macros=macros)
+ include_dirs = self.get_include_dirs()
+ inc_dir = None
+ for d in include_dirs:
+ if self.combine_paths(d,
+ os.path.join(self.modulename,
+ 'arrayobject.h')):
+ inc_dir = d
+ break
+ if inc_dir is not None:
+ dict_append(info, include_dirs=[inc_dir])
+ if info:
+ self.set_info(**info)
+ return
+
+
+class numarray_info(_numpy_info):
+ section = 'numarray'
+ modulename = 'numarray'
+
+
+class Numeric_info(_numpy_info):
+ section = 'Numeric'
+ modulename = 'Numeric'
+
+
+class numpy_info(_numpy_info):
+ section = 'numpy'
+ modulename = 'numpy'
+
+
+class numerix_info(system_info):
+ section = 'numerix'
+
+ def calc_info(self):
+ which = None, None
+ if os.getenv("NUMERIX"):
+ which = os.getenv("NUMERIX"), "environment var"
+ # If all the above fail, default to numpy.
+ if which[0] is None:
+ which = "numpy", "defaulted"
+ try:
+ import numpy # noqa: F401
+ which = "numpy", "defaulted"
+ except ImportError as e:
+ msg1 = str(e)
+ try:
+ import Numeric # noqa: F401
+ which = "numeric", "defaulted"
+ except ImportError as e:
+ msg2 = str(e)
+ try:
+ import numarray # noqa: F401
+ which = "numarray", "defaulted"
+ except ImportError as e:
+ msg3 = str(e)
+ log.info(msg1)
+ log.info(msg2)
+ log.info(msg3)
+ which = which[0].strip().lower(), which[1]
+ if which[0] not in ["numeric", "numarray", "numpy"]:
+ raise ValueError("numerix selector must be either 'Numeric' "
+ "or 'numarray' or 'numpy' but the value obtained"
+ " from the %s was '%s'." % (which[1], which[0]))
+ os.environ['NUMERIX'] = which[0]
+ self.set_info(**get_info(which[0]))
+
+
+class f2py_info(system_info):
+ def calc_info(self):
+ try:
+ import numpy.f2py as f2py
+ except ImportError:
+ return
+ f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src')
+ self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')],
+ include_dirs=[f2py_dir])
+ return
+
+
+class boost_python_info(system_info):
+ section = 'boost_python'
+ dir_env_var = 'BOOST'
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend([d] + self.combine_paths(d, ['boost*']))
+ return [d for d in dirs if os.path.isdir(d)]
+
+ def calc_info(self):
+ src_dirs = self.get_src_dirs()
+ src_dir = ''
+ for d in src_dirs:
+ if os.path.isfile(os.path.join(d, 'libs', 'python', 'src',
+ 'module.cpp')):
+ src_dir = d
+ break
+ if not src_dir:
+ return
+ py_incl_dirs = [sysconfig.get_path('include')]
+ py_pincl_dir = sysconfig.get_path('platinclude')
+ if py_pincl_dir not in py_incl_dirs:
+ py_incl_dirs.append(py_pincl_dir)
+ srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
+ bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))
+ bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))
+ info = {'libraries': [('boost_python_src',
+ {'include_dirs': [src_dir] + py_incl_dirs,
+ 'sources':bpl_srcs}
+ )],
+ 'include_dirs': [src_dir],
+ }
+ if info:
+ self.set_info(**info)
+ return
+
+
+class agg2_info(system_info):
+ section = 'agg2'
+ dir_env_var = 'AGG2'
+
+ def get_paths(self, section, key):
+ pre_dirs = system_info.get_paths(self, section, key)
+ dirs = []
+ for d in pre_dirs:
+ dirs.extend([d] + self.combine_paths(d, ['agg2*']))
+ return [d for d in dirs if os.path.isdir(d)]
+
+ def calc_info(self):
+ src_dirs = self.get_src_dirs()
+ src_dir = ''
+ for d in src_dirs:
+ if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')):
+ src_dir = d
+ break
+ if not src_dir:
+ return
+ if sys.platform == 'win32':
+ agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform',
+ 'win32', 'agg_win32_bmp.cpp'))
+ else:
+ agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp'))
+ agg2_srcs += [os.path.join(src_dir, 'src', 'platform',
+ 'X11',
+ 'agg_platform_support.cpp')]
+
+ info = {'libraries':
+ [('agg2_src',
+ {'sources': agg2_srcs,
+ 'include_dirs': [os.path.join(src_dir, 'include')],
+ }
+ )],
+ 'include_dirs': [os.path.join(src_dir, 'include')],
+ }
+ if info:
+ self.set_info(**info)
+ return
+
+
+class _pkg_config_info(system_info):
+ section = None
+ config_env_var = 'PKG_CONFIG'
+ default_config_exe = 'pkg-config'
+ append_config_exe = ''
+ version_macro_name = None
+ release_macro_name = None
+ version_flag = '--modversion'
+ cflags_flag = '--cflags'
+
+ def get_config_exe(self):
+ if self.config_env_var in os.environ:
+ return os.environ[self.config_env_var]
+ return self.default_config_exe
+
+ def get_config_output(self, config_exe, option):
+ cmd = config_exe + ' ' + self.append_config_exe + ' ' + option
+ try:
+ o = subprocess.check_output(cmd)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ o = filepath_from_subprocess_output(o)
+ return o
+
+ def calc_info(self):
+ config_exe = find_executable(self.get_config_exe())
+ if not config_exe:
+ log.warn('File not found: %s. Cannot determine %s info.' \
+ % (config_exe, self.section))
+ return
+ info = {}
+ macros = []
+ libraries = []
+ library_dirs = []
+ include_dirs = []
+ extra_link_args = []
+ extra_compile_args = []
+ version = self.get_config_output(config_exe, self.version_flag)
+ if version:
+ macros.append((self.__class__.__name__.split('.')[-1].upper(),
+ _c_string_literal(version)))
+ if self.version_macro_name:
+ macros.append((self.version_macro_name + '_%s'
+ % (version.replace('.', '_')), None))
+ if self.release_macro_name:
+ release = self.get_config_output(config_exe, '--release')
+ if release:
+ macros.append((self.release_macro_name + '_%s'
+ % (release.replace('.', '_')), None))
+ opts = self.get_config_output(config_exe, '--libs')
+ if opts:
+ for opt in opts.split():
+ if opt[:2] == '-l':
+ libraries.append(opt[2:])
+ elif opt[:2] == '-L':
+ library_dirs.append(opt[2:])
+ else:
+ extra_link_args.append(opt)
+ opts = self.get_config_output(config_exe, self.cflags_flag)
+ if opts:
+ for opt in opts.split():
+ if opt[:2] == '-I':
+ include_dirs.append(opt[2:])
+ elif opt[:2] == '-D':
+ if '=' in opt:
+ n, v = opt[2:].split('=')
+ macros.append((n, v))
+ else:
+ macros.append((opt[2:], None))
+ else:
+ extra_compile_args.append(opt)
+ if macros:
+ dict_append(info, define_macros=macros)
+ if libraries:
+ dict_append(info, libraries=libraries)
+ if library_dirs:
+ dict_append(info, library_dirs=library_dirs)
+ if include_dirs:
+ dict_append(info, include_dirs=include_dirs)
+ if extra_link_args:
+ dict_append(info, extra_link_args=extra_link_args)
+ if extra_compile_args:
+ dict_append(info, extra_compile_args=extra_compile_args)
+ if info:
+ self.set_info(**info)
+ return
+
+
+class wx_info(_pkg_config_info):
+ section = 'wx'
+ config_env_var = 'WX_CONFIG'
+ default_config_exe = 'wx-config'
+ append_config_exe = ''
+ version_macro_name = 'WX_VERSION'
+ release_macro_name = 'WX_RELEASE'
+ version_flag = '--version'
+ cflags_flag = '--cxxflags'
+
+
+class gdk_pixbuf_xlib_2_info(_pkg_config_info):
+ section = 'gdk_pixbuf_xlib_2'
+ append_config_exe = 'gdk-pixbuf-xlib-2.0'
+ version_macro_name = 'GDK_PIXBUF_XLIB_VERSION'
+
+
+class gdk_pixbuf_2_info(_pkg_config_info):
+ section = 'gdk_pixbuf_2'
+ append_config_exe = 'gdk-pixbuf-2.0'
+ version_macro_name = 'GDK_PIXBUF_VERSION'
+
+
+class gdk_x11_2_info(_pkg_config_info):
+ section = 'gdk_x11_2'
+ append_config_exe = 'gdk-x11-2.0'
+ version_macro_name = 'GDK_X11_VERSION'
+
+
+class gdk_2_info(_pkg_config_info):
+ section = 'gdk_2'
+ append_config_exe = 'gdk-2.0'
+ version_macro_name = 'GDK_VERSION'
+
+
+class gdk_info(_pkg_config_info):
+ section = 'gdk'
+ append_config_exe = 'gdk'
+ version_macro_name = 'GDK_VERSION'
+
+
+class gtkp_x11_2_info(_pkg_config_info):
+ section = 'gtkp_x11_2'
+ append_config_exe = 'gtk+-x11-2.0'
+ version_macro_name = 'GTK_X11_VERSION'
+
+
+class gtkp_2_info(_pkg_config_info):
+ section = 'gtkp_2'
+ append_config_exe = 'gtk+-2.0'
+ version_macro_name = 'GTK_VERSION'
+
+
+class xft_info(_pkg_config_info):
+ section = 'xft'
+ append_config_exe = 'xft'
+ version_macro_name = 'XFT_VERSION'
+
+
+class freetype2_info(_pkg_config_info):
+ section = 'freetype2'
+ append_config_exe = 'freetype2'
+ version_macro_name = 'FREETYPE2_VERSION'
+
+
+class amd_info(system_info):
+ section = 'amd'
+ dir_env_var = 'AMD'
+ _lib_names = ['amd']
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+
+ opt = self.get_option_single('amd_libs', 'libraries')
+ amd_libs = self.get_libs(opt, self._lib_names)
+ info = self.check_libs(lib_dirs, amd_libs, [])
+ if info is None:
+ return
+
+ include_dirs = self.get_include_dirs()
+
+ inc_dir = None
+ for d in include_dirs:
+ p = self.combine_paths(d, 'amd.h')
+ if p:
+ inc_dir = os.path.dirname(p[0])
+ break
+ if inc_dir is not None:
+ dict_append(info, include_dirs=[inc_dir],
+ define_macros=[('SCIPY_AMD_H', None)],
+ swig_opts=['-I' + inc_dir])
+
+ self.set_info(**info)
+ return
+
+
+class umfpack_info(system_info):
+ section = 'umfpack'
+ dir_env_var = 'UMFPACK'
+ notfounderror = UmfpackNotFoundError
+ _lib_names = ['umfpack']
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+
+ opt = self.get_option_single('umfpack_libs', 'libraries')
+ umfpack_libs = self.get_libs(opt, self._lib_names)
+ info = self.check_libs(lib_dirs, umfpack_libs, [])
+ if info is None:
+ return
+
+ include_dirs = self.get_include_dirs()
+
+ inc_dir = None
+ for d in include_dirs:
+ p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h')
+ if p:
+ inc_dir = os.path.dirname(p[0])
+ break
+ if inc_dir is not None:
+ dict_append(info, include_dirs=[inc_dir],
+ define_macros=[('SCIPY_UMFPACK_H', None)],
+ swig_opts=['-I' + inc_dir])
+
+ dict_append(info, **get_info('amd'))
+
+ self.set_info(**info)
+ return
+
+
+def combine_paths(*args, **kws):
+ """ Return a list of existing paths composed by all combinations of
+ items from arguments.
+ """
+ r = []
+ for a in args:
+ if not a:
+ continue
+ if is_string(a):
+ a = [a]
+ r.append(a)
+ args = r
+ if not args:
+ return []
+ if len(args) == 1:
+ result = reduce(lambda a, b: a + b, map(glob, args[0]), [])
+ elif len(args) == 2:
+ result = []
+ for a0 in args[0]:
+ for a1 in args[1]:
+ result.extend(glob(os.path.join(a0, a1)))
+ else:
+ result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:]))
+ log.debug('(paths: %s)', ','.join(result))
+ return result
+
+language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3}
+inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'}
+
+
+def dict_append(d, **kws):
+ languages = []
+ for k, v in kws.items():
+ if k == 'language':
+ languages.append(v)
+ continue
+ if k in d:
+ if k in ['library_dirs', 'include_dirs',
+ 'extra_compile_args', 'extra_link_args',
+ 'runtime_library_dirs', 'define_macros']:
+ [d[k].append(vv) for vv in v if vv not in d[k]]
+ else:
+ d[k].extend(v)
+ else:
+ d[k] = v
+ if languages:
+ l = inv_language_map[max([language_map.get(l, 0) for l in languages])]
+ d['language'] = l
+ return
+
+
+def parseCmdLine(argv=(None,)):
+ import optparse
+ parser = optparse.OptionParser("usage: %prog [-v] [info objs]")
+ parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
+ default=False,
+ help='be verbose and print more messages')
+
+ opts, args = parser.parse_args(args=argv[1:])
+ return opts, args
+
+
+def show_all(argv=None):
+ import inspect
+ if argv is None:
+ argv = sys.argv
+ opts, args = parseCmdLine(argv)
+ if opts.verbose:
+ log.set_threshold(log.DEBUG)
+ else:
+ log.set_threshold(log.INFO)
+ show_only = []
+ for n in args:
+ if n[-5:] != '_info':
+ n = n + '_info'
+ show_only.append(n)
+ show_all = not show_only
+ _gdict_ = globals().copy()
+ for name, c in _gdict_.items():
+ if not inspect.isclass(c):
+ continue
+ if not issubclass(c, system_info) or c is system_info:
+ continue
+ if not show_all:
+ if name not in show_only:
+ continue
+ del show_only[show_only.index(name)]
+ conf = c()
+ conf.verbosity = 2
+ # we don't need the result, but we want
+ # the side effect of printing diagnostics
+ conf.get_info()
+ if show_only:
+ log.info('Info classes not defined: %s', ','.join(show_only))
+
+if __name__ == "__main__":
+ show_all()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_build_ext.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_build_ext.py
new file mode 100644
index 00000000..372100fc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_build_ext.py
@@ -0,0 +1,74 @@
+'''Tests for numpy.distutils.build_ext.'''
+
+import os
+import subprocess
+import sys
+from textwrap import indent, dedent
+import pytest
+from numpy.testing import IS_WASM
+
+@pytest.mark.skipif(IS_WASM, reason="cannot start subprocess in wasm")
+@pytest.mark.slow
+def test_multi_fortran_libs_link(tmp_path):
+ '''
+ Ensures multiple "fake" static libraries are correctly linked.
+ see gh-18295
+ '''
+
+ # We need to make sure we actually have an f77 compiler.
+ # This is nontrivial, so we'll borrow the utilities
+ # from f2py tests:
+ from numpy.f2py.tests.util import has_f77_compiler
+ if not has_f77_compiler():
+ pytest.skip('No F77 compiler found')
+
+ # make some dummy sources
+ with open(tmp_path / '_dummy1.f', 'w') as fid:
+ fid.write(indent(dedent('''\
+ FUNCTION dummy_one()
+ RETURN
+ END FUNCTION'''), prefix=' '*6))
+ with open(tmp_path / '_dummy2.f', 'w') as fid:
+ fid.write(indent(dedent('''\
+ FUNCTION dummy_two()
+ RETURN
+ END FUNCTION'''), prefix=' '*6))
+ with open(tmp_path / '_dummy.c', 'w') as fid:
+ # doesn't need to load - just needs to exist
+ fid.write('int PyInit_dummyext;')
+
+ # make a setup file
+ with open(tmp_path / 'setup.py', 'w') as fid:
+ srctree = os.path.join(os.path.dirname(__file__), '..', '..', '..')
+ fid.write(dedent(f'''\
+ def configuration(parent_package="", top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration("", parent_package, top_path)
+ config.add_library("dummy1", sources=["_dummy1.f"])
+ config.add_library("dummy2", sources=["_dummy2.f"])
+ config.add_extension("dummyext", sources=["_dummy.c"], libraries=["dummy1", "dummy2"])
+ return config
+
+
+ if __name__ == "__main__":
+ import sys
+ sys.path.insert(0, r"{srctree}")
+ from numpy.distutils.core import setup
+ setup(**configuration(top_path="").todict())'''))
+
+ # build the test extensino and "install" into a temporary directory
+ build_dir = tmp_path
+ subprocess.check_call([sys.executable, 'setup.py', 'build', 'install',
+ '--prefix', str(tmp_path / 'installdir'),
+ '--record', str(tmp_path / 'tmp_install_log.txt'),
+ ],
+ cwd=str(build_dir),
+ )
+ # get the path to the so
+ so = None
+ with open(tmp_path /'tmp_install_log.txt') as fid:
+ for line in fid:
+ if 'dummyext' in line:
+ so = line.strip()
+ break
+ assert so is not None
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt.py
new file mode 100644
index 00000000..657ebdb6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt.py
@@ -0,0 +1,808 @@
+import re, textwrap, os
+from os import sys, path
+from distutils.errors import DistutilsError
+
+is_standalone = __name__ == '__main__' and __package__ is None
+if is_standalone:
+ import unittest, contextlib, tempfile, shutil
+ sys.path.append(path.abspath(path.join(path.dirname(__file__), "..")))
+ from ccompiler_opt import CCompilerOpt
+
+ # from numpy/testing/_private/utils.py
+ @contextlib.contextmanager
+ def tempdir(*args, **kwargs):
+ tmpdir = tempfile.mkdtemp(*args, **kwargs)
+ try:
+ yield tmpdir
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def assert_(expr, msg=''):
+ if not expr:
+ raise AssertionError(msg)
+else:
+ from numpy.distutils.ccompiler_opt import CCompilerOpt
+ from numpy.testing import assert_, tempdir
+
+# architectures and compilers to test
+arch_compilers = dict(
+ x86 = ("gcc", "clang", "icc", "iccw", "msvc"),
+ x64 = ("gcc", "clang", "icc", "iccw", "msvc"),
+ ppc64 = ("gcc", "clang"),
+ ppc64le = ("gcc", "clang"),
+ armhf = ("gcc", "clang"),
+ aarch64 = ("gcc", "clang"),
+ s390x = ("gcc", "clang"),
+ noarch = ("gcc",)
+)
+
+class FakeCCompilerOpt(CCompilerOpt):
+ fake_info = ""
+ def __init__(self, trap_files="", trap_flags="", *args, **kwargs):
+ self.fake_trap_files = trap_files
+ self.fake_trap_flags = trap_flags
+ CCompilerOpt.__init__(self, None, **kwargs)
+
+ def __repr__(self):
+ return textwrap.dedent("""\
+ <<<<
+ march : {}
+ compiler : {}
+ ----------------
+ {}
+ >>>>
+ """).format(self.cc_march, self.cc_name, self.report())
+
+ def dist_compile(self, sources, flags, **kwargs):
+ assert(isinstance(sources, list))
+ assert(isinstance(flags, list))
+ if self.fake_trap_files:
+ for src in sources:
+ if re.match(self.fake_trap_files, src):
+ self.dist_error("source is trapped by a fake interface")
+ if self.fake_trap_flags:
+ for f in flags:
+ if re.match(self.fake_trap_flags, f):
+ self.dist_error("flag is trapped by a fake interface")
+ # fake objects
+ return zip(sources, [' '.join(flags)] * len(sources))
+
+ def dist_info(self):
+ return FakeCCompilerOpt.fake_info
+
+ @staticmethod
+ def dist_log(*args, stderr=False):
+ pass
+
+class _Test_CCompilerOpt:
+ arch = None # x86_64
+ cc = None # gcc
+
+ def setup_class(self):
+ FakeCCompilerOpt.conf_nocache = True
+ self._opt = None
+
+ def nopt(self, *args, **kwargs):
+ FakeCCompilerOpt.fake_info = (self.arch, self.cc, "")
+ return FakeCCompilerOpt(*args, **kwargs)
+
+ def opt(self):
+ if not self._opt:
+ self._opt = self.nopt()
+ return self._opt
+
+ def march(self):
+ return self.opt().cc_march
+
+ def cc_name(self):
+ return self.opt().cc_name
+
+ def get_targets(self, targets, groups, **kwargs):
+ FakeCCompilerOpt.conf_target_groups = groups
+ opt = self.nopt(
+ cpu_baseline=kwargs.get("baseline", "min"),
+ cpu_dispatch=kwargs.get("dispatch", "max"),
+ trap_files=kwargs.get("trap_files", ""),
+ trap_flags=kwargs.get("trap_flags", "")
+ )
+ with tempdir() as tmpdir:
+ file = os.path.join(tmpdir, "test_targets.c")
+ with open(file, 'w') as f:
+ f.write(targets)
+ gtargets = []
+ gflags = {}
+ fake_objects = opt.try_dispatch([file])
+ for source, flags in fake_objects:
+ gtar = path.basename(source).split('.')[1:-1]
+ glen = len(gtar)
+ if glen == 0:
+ gtar = "baseline"
+ elif glen == 1:
+ gtar = gtar[0].upper()
+ else:
+ # converting multi-target into parentheses str format to be equivalent
+ # to the configuration statements syntax.
+ gtar = ('('+' '.join(gtar)+')').upper()
+ gtargets.append(gtar)
+ gflags[gtar] = flags
+
+ has_baseline, targets = opt.sources_status[file]
+ targets = targets + ["baseline"] if has_baseline else targets
+ # convert tuple that represent multi-target into parentheses str format
+ targets = [
+ '('+' '.join(tar)+')' if isinstance(tar, tuple) else tar
+ for tar in targets
+ ]
+ if len(targets) != len(gtargets) or not all(t in gtargets for t in targets):
+ raise AssertionError(
+ "'sources_status' returns different targets than the compiled targets\n"
+ "%s != %s" % (targets, gtargets)
+ )
+ # return targets from 'sources_status' since the order is matters
+ return targets, gflags
+
+ def arg_regex(self, **kwargs):
+ map2origin = dict(
+ x64 = "x86",
+ ppc64le = "ppc64",
+ aarch64 = "armhf",
+ clang = "gcc",
+ )
+ march = self.march(); cc_name = self.cc_name()
+ map_march = map2origin.get(march, march)
+ map_cc = map2origin.get(cc_name, cc_name)
+ for key in (
+ march, cc_name, map_march, map_cc,
+ march + '_' + cc_name,
+ map_march + '_' + cc_name,
+ march + '_' + map_cc,
+ map_march + '_' + map_cc,
+ ) :
+ regex = kwargs.pop(key, None)
+ if regex is not None:
+ break
+ if regex:
+ if isinstance(regex, dict):
+ for k, v in regex.items():
+ if v[-1:] not in ')}$?\\.+*':
+ regex[k] = v + '$'
+ else:
+ assert(isinstance(regex, str))
+ if regex[-1:] not in ')}$?\\.+*':
+ regex += '$'
+ return regex
+
+ def expect(self, dispatch, baseline="", **kwargs):
+ match = self.arg_regex(**kwargs)
+ if match is None:
+ return
+ opt = self.nopt(
+ cpu_baseline=baseline, cpu_dispatch=dispatch,
+ trap_files=kwargs.get("trap_files", ""),
+ trap_flags=kwargs.get("trap_flags", "")
+ )
+ features = ' '.join(opt.cpu_dispatch_names())
+ if not match:
+ if len(features) != 0:
+ raise AssertionError(
+ 'expected empty features, not "%s"' % features
+ )
+ return
+ if not re.match(match, features, re.IGNORECASE):
+ raise AssertionError(
+ 'dispatch features "%s" not match "%s"' % (features, match)
+ )
+
+ def expect_baseline(self, baseline, dispatch="", **kwargs):
+ match = self.arg_regex(**kwargs)
+ if match is None:
+ return
+ opt = self.nopt(
+ cpu_baseline=baseline, cpu_dispatch=dispatch,
+ trap_files=kwargs.get("trap_files", ""),
+ trap_flags=kwargs.get("trap_flags", "")
+ )
+ features = ' '.join(opt.cpu_baseline_names())
+ if not match:
+ if len(features) != 0:
+ raise AssertionError(
+ 'expected empty features, not "%s"' % features
+ )
+ return
+ if not re.match(match, features, re.IGNORECASE):
+ raise AssertionError(
+ 'baseline features "%s" not match "%s"' % (features, match)
+ )
+
+ def expect_flags(self, baseline, dispatch="", **kwargs):
+ match = self.arg_regex(**kwargs)
+ if match is None:
+ return
+ opt = self.nopt(
+ cpu_baseline=baseline, cpu_dispatch=dispatch,
+ trap_files=kwargs.get("trap_files", ""),
+ trap_flags=kwargs.get("trap_flags", "")
+ )
+ flags = ' '.join(opt.cpu_baseline_flags())
+ if not match:
+ if len(flags) != 0:
+ raise AssertionError(
+ 'expected empty flags not "%s"' % flags
+ )
+ return
+ if not re.match(match, flags):
+ raise AssertionError(
+ 'flags "%s" not match "%s"' % (flags, match)
+ )
+
+ def expect_targets(self, targets, groups={}, **kwargs):
+ match = self.arg_regex(**kwargs)
+ if match is None:
+ return
+ targets, _ = self.get_targets(targets=targets, groups=groups, **kwargs)
+ targets = ' '.join(targets)
+ if not match:
+ if len(targets) != 0:
+ raise AssertionError(
+ 'expected empty targets, not "%s"' % targets
+ )
+ return
+ if not re.match(match, targets, re.IGNORECASE):
+ raise AssertionError(
+ 'targets "%s" not match "%s"' % (targets, match)
+ )
+
+ def expect_target_flags(self, targets, groups={}, **kwargs):
+ match_dict = self.arg_regex(**kwargs)
+ if match_dict is None:
+ return
+ assert(isinstance(match_dict, dict))
+ _, tar_flags = self.get_targets(targets=targets, groups=groups)
+
+ for match_tar, match_flags in match_dict.items():
+ if match_tar not in tar_flags:
+ raise AssertionError(
+ 'expected to find target "%s"' % match_tar
+ )
+ flags = tar_flags[match_tar]
+ if not match_flags:
+ if len(flags) != 0:
+ raise AssertionError(
+ 'expected to find empty flags in target "%s"' % match_tar
+ )
+ if not re.match(match_flags, flags):
+ raise AssertionError(
+ '"%s" flags "%s" not match "%s"' % (match_tar, flags, match_flags)
+ )
+
+ def test_interface(self):
+ wrong_arch = "ppc64" if self.arch != "ppc64" else "x86"
+ wrong_cc = "clang" if self.cc != "clang" else "icc"
+ opt = self.opt()
+ assert_(getattr(opt, "cc_on_" + self.arch))
+ assert_(not getattr(opt, "cc_on_" + wrong_arch))
+ assert_(getattr(opt, "cc_is_" + self.cc))
+ assert_(not getattr(opt, "cc_is_" + wrong_cc))
+
+ def test_args_empty(self):
+ for baseline, dispatch in (
+ ("", "none"),
+ (None, ""),
+ ("none +none", "none - none"),
+ ("none -max", "min - max"),
+ ("+vsx2 -VSX2", "vsx avx2 avx512f -max"),
+ ("max -vsx - avx + avx512f neon -MAX ",
+ "min -min + max -max -vsx + avx2 -avx2 +NONE")
+ ) :
+ opt = self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch)
+ assert(len(opt.cpu_baseline_names()) == 0)
+ assert(len(opt.cpu_dispatch_names()) == 0)
+
+ def test_args_validation(self):
+ if self.march() == "unknown":
+ return
+ # check sanity of argument's validation
+ for baseline, dispatch in (
+ ("unkown_feature - max +min", "unknown max min"), # unknowing features
+ ("#avx2", "$vsx") # groups and polices aren't acceptable
+ ) :
+ try:
+ self.nopt(cpu_baseline=baseline, cpu_dispatch=dispatch)
+ raise AssertionError("excepted an exception for invalid arguments")
+ except DistutilsError:
+ pass
+
+ def test_skip(self):
+ # only takes what platform supports and skip the others
+ # without casing exceptions
+ self.expect(
+ "sse vsx neon",
+ x86="sse", ppc64="vsx", armhf="neon", unknown=""
+ )
+ self.expect(
+ "sse41 avx avx2 vsx2 vsx3 neon_vfpv4 asimd",
+ x86 = "sse41 avx avx2",
+ ppc64 = "vsx2 vsx3",
+ armhf = "neon_vfpv4 asimd",
+ unknown = ""
+ )
+ # any features in cpu_dispatch must be ignored if it's part of baseline
+ self.expect(
+ "sse neon vsx", baseline="sse neon vsx",
+ x86="", ppc64="", armhf=""
+ )
+ self.expect(
+ "avx2 vsx3 asimdhp", baseline="avx2 vsx3 asimdhp",
+ x86="", ppc64="", armhf=""
+ )
+
+ def test_implies(self):
+ # baseline combining implied features, so we count
+ # on it instead of testing 'feature_implies()'' directly
+ self.expect_baseline(
+ "fma3 avx2 asimd vsx3",
+ # .* between two spaces can validate features in between
+ x86 = "sse .* sse41 .* fma3.*avx2",
+ ppc64 = "vsx vsx2 vsx3",
+ armhf = "neon neon_fp16 neon_vfpv4 asimd"
+ )
+ """
+ special cases
+ """
+ # in icc and msvc, FMA3 and AVX2 can't be separated
+ # both need to implies each other, same for avx512f & cd
+ for f0, f1 in (
+ ("fma3", "avx2"),
+ ("avx512f", "avx512cd"),
+ ):
+ diff = ".* sse42 .* %s .*%s$" % (f0, f1)
+ self.expect_baseline(f0,
+ x86_gcc=".* sse42 .* %s$" % f0,
+ x86_icc=diff, x86_iccw=diff
+ )
+ self.expect_baseline(f1,
+ x86_gcc=".* avx .* %s$" % f1,
+ x86_icc=diff, x86_iccw=diff
+ )
+ # in msvc, following features can't be separated too
+ for f in (("fma3", "avx2"), ("avx512f", "avx512cd", "avx512_skx")):
+ for ff in f:
+ self.expect_baseline(ff,
+ x86_msvc=".*%s" % ' '.join(f)
+ )
+
+ # in ppc64le VSX and VSX2 can't be separated
+ self.expect_baseline("vsx", ppc64le="vsx vsx2")
+ # in aarch64 following features can't be separated
+ for f in ("neon", "neon_fp16", "neon_vfpv4", "asimd"):
+ self.expect_baseline(f, aarch64="neon neon_fp16 neon_vfpv4 asimd")
+
+ def test_args_options(self):
+ # max & native
+ for o in ("max", "native"):
+ if o == "native" and self.cc_name() == "msvc":
+ continue
+ self.expect(o,
+ trap_files=".*cpu_(sse|vsx|neon|vx).c",
+ x86="", ppc64="", armhf="", s390x=""
+ )
+ self.expect(o,
+ trap_files=".*cpu_(sse3|vsx2|neon_vfpv4|vxe).c",
+ x86="sse sse2", ppc64="vsx", armhf="neon neon_fp16",
+ aarch64="", ppc64le="", s390x="vx"
+ )
+ self.expect(o,
+ trap_files=".*cpu_(popcnt|vsx3).c",
+ x86="sse .* sse41", ppc64="vsx vsx2",
+ armhf="neon neon_fp16 .* asimd .*",
+ s390x="vx vxe vxe2"
+ )
+ self.expect(o,
+ x86_gcc=".* xop fma4 .* avx512f .* avx512_knl avx512_knm avx512_skx .*",
+ # in icc, xop and fam4 aren't supported
+ x86_icc=".* avx512f .* avx512_knl avx512_knm avx512_skx .*",
+ x86_iccw=".* avx512f .* avx512_knl avx512_knm avx512_skx .*",
+ # in msvc, avx512_knl avx512_knm aren't supported
+ x86_msvc=".* xop fma4 .* avx512f .* avx512_skx .*",
+ armhf=".* asimd asimdhp asimddp .*",
+ ppc64="vsx vsx2 vsx3 vsx4.*",
+ s390x="vx vxe vxe2.*"
+ )
+ # min
+ self.expect("min",
+ x86="sse sse2", x64="sse sse2 sse3",
+ armhf="", aarch64="neon neon_fp16 .* asimd",
+ ppc64="", ppc64le="vsx vsx2", s390x=""
+ )
+ self.expect(
+ "min", trap_files=".*cpu_(sse2|vsx2).c",
+ x86="", ppc64le=""
+ )
+ # an exception must triggered if native flag isn't supported
+ # when option "native" is activated through the args
+ try:
+ self.expect("native",
+ trap_flags=".*(-march=native|-xHost|/QxHost).*",
+ x86=".*", ppc64=".*", armhf=".*", s390x=".*"
+ )
+ if self.march() != "unknown":
+ raise AssertionError(
+ "excepted an exception for %s" % self.march()
+ )
+ except DistutilsError:
+ if self.march() == "unknown":
+ raise AssertionError("excepted no exceptions")
+
+ def test_flags(self):
+ self.expect_flags(
+ "sse sse2 vsx vsx2 neon neon_fp16 vx vxe",
+ x86_gcc="-msse -msse2", x86_icc="-msse -msse2",
+ x86_iccw="/arch:SSE2",
+ x86_msvc="/arch:SSE2" if self.march() == "x86" else "",
+ ppc64_gcc= "-mcpu=power8",
+ ppc64_clang="-maltivec -mvsx -mpower8-vector",
+ armhf_gcc="-mfpu=neon-fp16 -mfp16-format=ieee",
+ aarch64="",
+ s390x="-mzvector -march=arch12"
+ )
+ # testing normalize -march
+ self.expect_flags(
+ "asimd",
+ aarch64="",
+ armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8-a\+simd"
+ )
+ self.expect_flags(
+ "asimdhp",
+ aarch64_gcc=r"-march=armv8.2-a\+fp16",
+ armhf_gcc=r"-mfp16-format=ieee -mfpu=neon-fp-armv8 -march=armv8.2-a\+fp16"
+ )
+ self.expect_flags(
+ "asimddp", aarch64_gcc=r"-march=armv8.2-a\+dotprod"
+ )
+ self.expect_flags(
+ # asimdfhm implies asimdhp
+ "asimdfhm", aarch64_gcc=r"-march=armv8.2-a\+fp16\+fp16fml"
+ )
+ self.expect_flags(
+ "asimddp asimdhp asimdfhm",
+ aarch64_gcc=r"-march=armv8.2-a\+dotprod\+fp16\+fp16fml"
+ )
+ self.expect_flags(
+ "vx vxe vxe2",
+ s390x=r"-mzvector -march=arch13"
+ )
+
+ def test_targets_exceptions(self):
+ for targets in (
+ "bla bla", "/*@targets",
+ "/*@targets */",
+ "/*@targets unknown */",
+ "/*@targets $unknown_policy avx2 */",
+ "/*@targets #unknown_group avx2 */",
+ "/*@targets $ */",
+ "/*@targets # vsx */",
+ "/*@targets #$ vsx */",
+ "/*@targets vsx avx2 ) */",
+ "/*@targets vsx avx2 (avx2 */",
+ "/*@targets vsx avx2 () */",
+ "/*@targets vsx avx2 ($autovec) */", # no features
+ "/*@targets vsx avx2 (xxx) */",
+ "/*@targets vsx avx2 (baseline) */",
+ ) :
+ try:
+ self.expect_targets(
+ targets,
+ x86="", armhf="", ppc64="", s390x=""
+ )
+ if self.march() != "unknown":
+ raise AssertionError(
+ "excepted an exception for %s" % self.march()
+ )
+ except DistutilsError:
+ if self.march() == "unknown":
+ raise AssertionError("excepted no exceptions")
+
+ def test_targets_syntax(self):
+ for targets in (
+ "/*@targets $keep_baseline sse vsx neon vx*/",
+ "/*@targets,$keep_baseline,sse,vsx,neon vx*/",
+ "/*@targets*$keep_baseline*sse*vsx*neon*vx*/",
+ """
+ /*
+ ** @targets
+ ** $keep_baseline, sse vsx,neon, vx
+ */
+ """,
+ """
+ /*
+ ************@targets****************
+ ** $keep_baseline, sse vsx, neon, vx
+ ************************************
+ */
+ """,
+ """
+ /*
+ /////////////@targets/////////////////
+ //$keep_baseline//sse//vsx//neon//vx
+ /////////////////////////////////////
+ */
+ """,
+ """
+ /*
+ @targets
+ $keep_baseline
+ SSE VSX NEON VX*/
+ """
+ ) :
+ self.expect_targets(targets,
+ x86="sse", ppc64="vsx", armhf="neon", s390x="vx", unknown=""
+ )
+
+ def test_targets(self):
+ # test skipping baseline features
+ self.expect_targets(
+ """
+ /*@targets
+ sse sse2 sse41 avx avx2 avx512f
+ vsx vsx2 vsx3 vsx4
+ neon neon_fp16 asimdhp asimddp
+ vx vxe vxe2
+ */
+ """,
+ baseline="avx vsx2 asimd vx vxe",
+ x86="avx512f avx2", armhf="asimddp asimdhp", ppc64="vsx4 vsx3",
+ s390x="vxe2"
+ )
+ # test skipping non-dispatch features
+ self.expect_targets(
+ """
+ /*@targets
+ sse41 avx avx2 avx512f
+ vsx2 vsx3 vsx4
+ asimd asimdhp asimddp
+ vx vxe vxe2
+ */
+ """,
+ baseline="", dispatch="sse41 avx2 vsx2 asimd asimddp vxe2",
+ x86="avx2 sse41", armhf="asimddp asimd", ppc64="vsx2", s390x="vxe2"
+ )
+ # test skipping features that not supported
+ self.expect_targets(
+ """
+ /*@targets
+ sse2 sse41 avx2 avx512f
+ vsx2 vsx3 vsx4
+ neon asimdhp asimddp
+ vx vxe vxe2
+ */
+ """,
+ baseline="",
+ trap_files=".*(avx2|avx512f|vsx3|vsx4|asimddp|vxe2).c",
+ x86="sse41 sse2", ppc64="vsx2", armhf="asimdhp neon",
+ s390x="vxe vx"
+ )
+ # test skipping features that implies each other
+ self.expect_targets(
+ """
+ /*@targets
+ sse sse2 avx fma3 avx2 avx512f avx512cd
+ vsx vsx2 vsx3
+ neon neon_vfpv4 neon_fp16 neon_fp16 asimd asimdhp
+ asimddp asimdfhm
+ */
+ """,
+ baseline="",
+ x86_gcc="avx512cd avx512f avx2 fma3 avx sse2",
+ x86_msvc="avx512cd avx2 avx sse2",
+ x86_icc="avx512cd avx2 avx sse2",
+ x86_iccw="avx512cd avx2 avx sse2",
+ ppc64="vsx3 vsx2 vsx",
+ ppc64le="vsx3 vsx2",
+ armhf="asimdfhm asimddp asimdhp asimd neon_vfpv4 neon_fp16 neon",
+ aarch64="asimdfhm asimddp asimdhp asimd"
+ )
+
+ def test_targets_policies(self):
+ # 'keep_baseline', generate objects for baseline features
+ self.expect_targets(
+ """
+ /*@targets
+ $keep_baseline
+ sse2 sse42 avx2 avx512f
+ vsx2 vsx3
+ neon neon_vfpv4 asimd asimddp
+ vx vxe vxe2
+ */
+ """,
+ baseline="sse41 avx2 vsx2 asimd vsx3 vxe",
+ x86="avx512f avx2 sse42 sse2",
+ ppc64="vsx3 vsx2",
+ armhf="asimddp asimd neon_vfpv4 neon",
+ # neon, neon_vfpv4, asimd implies each other
+ aarch64="asimddp asimd",
+ s390x="vxe2 vxe vx"
+ )
+ # 'keep_sort', leave the sort as-is
+ self.expect_targets(
+ """
+ /*@targets
+ $keep_baseline $keep_sort
+ avx512f sse42 avx2 sse2
+ vsx2 vsx3
+ asimd neon neon_vfpv4 asimddp
+ vxe vxe2
+ */
+ """,
+ x86="avx512f sse42 avx2 sse2",
+ ppc64="vsx2 vsx3",
+ armhf="asimd neon neon_vfpv4 asimddp",
+ # neon, neon_vfpv4, asimd implies each other
+ aarch64="asimd asimddp",
+ s390x="vxe vxe2"
+ )
+ # 'autovec', skipping features that can't be
+ # vectorized by the compiler
+ self.expect_targets(
+ """
+ /*@targets
+ $keep_baseline $keep_sort $autovec
+ avx512f avx2 sse42 sse41 sse2
+ vsx3 vsx2
+ asimddp asimd neon_vfpv4 neon
+ */
+ """,
+ x86_gcc="avx512f avx2 sse42 sse41 sse2",
+ x86_icc="avx512f avx2 sse42 sse41 sse2",
+ x86_iccw="avx512f avx2 sse42 sse41 sse2",
+ x86_msvc="avx512f avx2 sse2"
+ if self.march() == 'x86' else "avx512f avx2",
+ ppc64="vsx3 vsx2",
+ armhf="asimddp asimd neon_vfpv4 neon",
+ # neon, neon_vfpv4, asimd implies each other
+ aarch64="asimddp asimd"
+ )
+ for policy in ("$maxopt", "$autovec"):
+ # 'maxopt' and autovec set the max acceptable optimization flags
+ self.expect_target_flags(
+ "/*@targets baseline %s */" % policy,
+ gcc={"baseline":".*-O3.*"}, icc={"baseline":".*-O3.*"},
+ iccw={"baseline":".*/O3.*"}, msvc={"baseline":".*/O2.*"},
+ unknown={"baseline":".*"}
+ )
+
+ # 'werror', force compilers to treat warnings as errors
+ self.expect_target_flags(
+ "/*@targets baseline $werror */",
+ gcc={"baseline":".*-Werror.*"}, icc={"baseline":".*-Werror.*"},
+ iccw={"baseline":".*/Werror.*"}, msvc={"baseline":".*/WX.*"},
+ unknown={"baseline":".*"}
+ )
+
+ def test_targets_groups(self):
+ self.expect_targets(
+ """
+ /*@targets $keep_baseline baseline #test_group */
+ """,
+ groups=dict(
+ test_group=("""
+ $keep_baseline
+ asimddp sse2 vsx2 avx2 vsx3
+ avx512f asimdhp
+ """)
+ ),
+ x86="avx512f avx2 sse2 baseline",
+ ppc64="vsx3 vsx2 baseline",
+ armhf="asimddp asimdhp baseline"
+ )
+ # test skip duplicating and sorting
+ self.expect_targets(
+ """
+ /*@targets
+ * sse42 avx avx512f
+ * #test_group_1
+ * vsx2
+ * #test_group_2
+ * asimddp asimdfhm
+ */
+ """,
+ groups=dict(
+ test_group_1=("""
+ VSX2 vsx3 asimd avx2 SSE41
+ """),
+ test_group_2=("""
+ vsx2 vsx3 asImd aVx2 sse41
+ """)
+ ),
+ x86="avx512f avx2 avx sse42 sse41",
+ ppc64="vsx3 vsx2",
+ # vsx2 part of the default baseline of ppc64le, option ("min")
+ ppc64le="vsx3",
+ armhf="asimdfhm asimddp asimd",
+ # asimd part of the default baseline of aarch64, option ("min")
+ aarch64="asimdfhm asimddp"
+ )
+
+ def test_targets_multi(self):
+ self.expect_targets(
+ """
+ /*@targets
+ (avx512_clx avx512_cnl) (asimdhp asimddp)
+ */
+ """,
+ x86=r"\(avx512_clx avx512_cnl\)",
+ armhf=r"\(asimdhp asimddp\)",
+ )
+ # test skipping implied features and auto-sort
+ self.expect_targets(
+ """
+ /*@targets
+ f16c (sse41 avx sse42) (sse3 avx2 avx512f)
+ vsx2 (vsx vsx3 vsx2)
+ (neon neon_vfpv4 asimd asimdhp asimddp)
+ */
+ """,
+ x86="avx512f f16c avx",
+ ppc64="vsx3 vsx2",
+ ppc64le="vsx3", # vsx2 part of baseline
+ armhf=r"\(asimdhp asimddp\)",
+ )
+ # test skipping implied features and keep sort
+ self.expect_targets(
+ """
+ /*@targets $keep_sort
+ (sse41 avx sse42) (sse3 avx2 avx512f)
+ (vsx vsx3 vsx2)
+ (asimddp neon neon_vfpv4 asimd asimdhp)
+ (vx vxe vxe2)
+ */
+ """,
+ x86="avx avx512f",
+ ppc64="vsx3",
+ armhf=r"\(asimdhp asimddp\)",
+ s390x="vxe2"
+ )
+ # test compiler variety and avoiding duplicating
+ self.expect_targets(
+ """
+ /*@targets $keep_sort
+ fma3 avx2 (fma3 avx2) (avx2 fma3) avx2 fma3
+ */
+ """,
+ x86_gcc=r"fma3 avx2 \(fma3 avx2\)",
+ x86_icc="avx2", x86_iccw="avx2",
+ x86_msvc="avx2"
+ )
+
+def new_test(arch, cc):
+ if is_standalone: return textwrap.dedent("""\
+ class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt, unittest.TestCase):
+ arch = '{arch}'
+ cc = '{cc}'
+ def __init__(self, methodName="runTest"):
+ unittest.TestCase.__init__(self, methodName)
+ self.setup_class()
+ """).format(
+ class_name=arch + '_' + cc, arch=arch, cc=cc
+ )
+ return textwrap.dedent("""\
+ class TestCCompilerOpt_{class_name}(_Test_CCompilerOpt):
+ arch = '{arch}'
+ cc = '{cc}'
+ """).format(
+ class_name=arch + '_' + cc, arch=arch, cc=cc
+ )
+"""
+if 1 and is_standalone:
+ FakeCCompilerOpt.fake_info = "x86_icc"
+ cco = FakeCCompilerOpt(None, cpu_baseline="avx2")
+ print(' '.join(cco.cpu_baseline_names()))
+ print(cco.cpu_baseline_flags())
+ unittest.main()
+ sys.exit()
+"""
+for arch, compilers in arch_compilers.items():
+ for cc in compilers:
+ exec(new_test(arch, cc))
+
+if is_standalone:
+ unittest.main()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py
new file mode 100644
index 00000000..d9e8b2b0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_ccompiler_opt_conf.py
@@ -0,0 +1,176 @@
+import unittest
+from os import sys, path
+
+is_standalone = __name__ == '__main__' and __package__ is None
+if is_standalone:
+ sys.path.append(path.abspath(path.join(path.dirname(__file__), "..")))
+ from ccompiler_opt import CCompilerOpt
+else:
+ from numpy.distutils.ccompiler_opt import CCompilerOpt
+
+arch_compilers = dict(
+ x86 = ("gcc", "clang", "icc", "iccw", "msvc"),
+ x64 = ("gcc", "clang", "icc", "iccw", "msvc"),
+ ppc64 = ("gcc", "clang"),
+ ppc64le = ("gcc", "clang"),
+ armhf = ("gcc", "clang"),
+ aarch64 = ("gcc", "clang"),
+ narch = ("gcc",)
+)
+
+class FakeCCompilerOpt(CCompilerOpt):
+ fake_info = ("arch", "compiler", "extra_args")
+ def __init__(self, *args, **kwargs):
+ CCompilerOpt.__init__(self, None, **kwargs)
+ def dist_compile(self, sources, flags, **kwargs):
+ return sources
+ def dist_info(self):
+ return FakeCCompilerOpt.fake_info
+ @staticmethod
+ def dist_log(*args, stderr=False):
+ pass
+
+class _TestConfFeatures(FakeCCompilerOpt):
+ """A hook to check the sanity of configured features
+- before it called by the abstract class '_Feature'
+ """
+
+ def conf_features_partial(self):
+ conf_all = self.conf_features
+ for feature_name, feature in conf_all.items():
+ self.test_feature(
+ "attribute conf_features",
+ conf_all, feature_name, feature
+ )
+
+ conf_partial = FakeCCompilerOpt.conf_features_partial(self)
+ for feature_name, feature in conf_partial.items():
+ self.test_feature(
+ "conf_features_partial()",
+ conf_partial, feature_name, feature
+ )
+ return conf_partial
+
+ def test_feature(self, log, search_in, feature_name, feature_dict):
+ error_msg = (
+ "during validate '{}' within feature '{}', "
+ "march '{}' and compiler '{}'\n>> "
+ ).format(log, feature_name, self.cc_march, self.cc_name)
+
+ if not feature_name.isupper():
+ raise AssertionError(error_msg + "feature name must be in uppercase")
+
+ for option, val in feature_dict.items():
+ self.test_option_types(error_msg, option, val)
+ self.test_duplicates(error_msg, option, val)
+
+ self.test_implies(error_msg, search_in, feature_name, feature_dict)
+ self.test_group(error_msg, search_in, feature_name, feature_dict)
+ self.test_extra_checks(error_msg, search_in, feature_name, feature_dict)
+
+ def test_option_types(self, error_msg, option, val):
+ for tp, available in (
+ ((str, list), (
+ "implies", "headers", "flags", "group", "detect", "extra_checks"
+ )),
+ ((str,), ("disable",)),
+ ((int,), ("interest",)),
+ ((bool,), ("implies_detect",)),
+ ((bool, type(None)), ("autovec",)),
+ ) :
+ found_it = option in available
+ if not found_it:
+ continue
+ if not isinstance(val, tp):
+ error_tp = [t.__name__ for t in (*tp,)]
+ error_tp = ' or '.join(error_tp)
+ raise AssertionError(error_msg +
+ "expected '%s' type for option '%s' not '%s'" % (
+ error_tp, option, type(val).__name__
+ ))
+ break
+
+ if not found_it:
+ raise AssertionError(error_msg + "invalid option name '%s'" % option)
+
+ def test_duplicates(self, error_msg, option, val):
+ if option not in (
+ "implies", "headers", "flags", "group", "detect", "extra_checks"
+ ) : return
+
+ if isinstance(val, str):
+ val = val.split()
+
+ if len(val) != len(set(val)):
+ raise AssertionError(error_msg + "duplicated values in option '%s'" % option)
+
+ def test_implies(self, error_msg, search_in, feature_name, feature_dict):
+ if feature_dict.get("disabled") is not None:
+ return
+ implies = feature_dict.get("implies", "")
+ if not implies:
+ return
+ if isinstance(implies, str):
+ implies = implies.split()
+
+ if feature_name in implies:
+ raise AssertionError(error_msg + "feature implies itself")
+
+ for impl in implies:
+ impl_dict = search_in.get(impl)
+ if impl_dict is not None:
+ if "disable" in impl_dict:
+ raise AssertionError(error_msg + "implies disabled feature '%s'" % impl)
+ continue
+ raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl)
+
+ def test_group(self, error_msg, search_in, feature_name, feature_dict):
+ if feature_dict.get("disabled") is not None:
+ return
+ group = feature_dict.get("group", "")
+ if not group:
+ return
+ if isinstance(group, str):
+ group = group.split()
+
+ for f in group:
+ impl_dict = search_in.get(f)
+ if not impl_dict or "disable" in impl_dict:
+ continue
+ raise AssertionError(error_msg +
+ "in option 'group', '%s' already exists as a feature name" % f
+ )
+
+ def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict):
+ if feature_dict.get("disabled") is not None:
+ return
+ extra_checks = feature_dict.get("extra_checks", "")
+ if not extra_checks:
+ return
+ if isinstance(extra_checks, str):
+ extra_checks = extra_checks.split()
+
+ for f in extra_checks:
+ impl_dict = search_in.get(f)
+ if not impl_dict or "disable" in impl_dict:
+ continue
+ raise AssertionError(error_msg +
+ "in option 'extra_checks', extra test case '%s' already exists as a feature name" % f
+ )
+
+class TestConfFeatures(unittest.TestCase):
+ def __init__(self, methodName="runTest"):
+ unittest.TestCase.__init__(self, methodName)
+ self._setup()
+
+ def _setup(self):
+ FakeCCompilerOpt.conf_nocache = True
+
+ def test_features(self):
+ for arch, compilers in arch_compilers.items():
+ for cc in compilers:
+ FakeCCompilerOpt.fake_info = (arch, cc, "")
+ _TestConfFeatures()
+
+if is_standalone:
+ unittest.main()
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_exec_command.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_exec_command.py
new file mode 100644
index 00000000..d1a20056
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_exec_command.py
@@ -0,0 +1,217 @@
+import os
+import pytest
+import sys
+from tempfile import TemporaryFile
+
+from numpy.distutils import exec_command
+from numpy.distutils.exec_command import get_pythonexe
+from numpy.testing import tempdir, assert_, assert_warns, IS_WASM
+
+
+# In python 3 stdout, stderr are text (unicode compliant) devices, so to
+# emulate them import StringIO from the io module.
+from io import StringIO
+
+class redirect_stdout:
+ """Context manager to redirect stdout for exec_command test."""
+ def __init__(self, stdout=None):
+ self._stdout = stdout or sys.stdout
+
+ def __enter__(self):
+ self.old_stdout = sys.stdout
+ sys.stdout = self._stdout
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self._stdout.flush()
+ sys.stdout = self.old_stdout
+ # note: closing sys.stdout won't close it.
+ self._stdout.close()
+
+class redirect_stderr:
+ """Context manager to redirect stderr for exec_command test."""
+ def __init__(self, stderr=None):
+ self._stderr = stderr or sys.stderr
+
+ def __enter__(self):
+ self.old_stderr = sys.stderr
+ sys.stderr = self._stderr
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self._stderr.flush()
+ sys.stderr = self.old_stderr
+ # note: closing sys.stderr won't close it.
+ self._stderr.close()
+
+class emulate_nonposix:
+ """Context manager to emulate os.name != 'posix' """
+ def __init__(self, osname='non-posix'):
+ self._new_name = osname
+
+ def __enter__(self):
+ self._old_name = os.name
+ os.name = self._new_name
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ os.name = self._old_name
+
+
+def test_exec_command_stdout():
+ # Regression test for gh-2999 and gh-2915.
+ # There are several packages (nose, scipy.weave.inline, Sage inline
+ # Fortran) that replace stdout, in which case it doesn't have a fileno
+ # method. This is tested here, with a do-nothing command that fails if the
+ # presence of fileno() is assumed in exec_command.
+
+ # The code has a special case for posix systems, so if we are on posix test
+ # both that the special case works and that the generic code works.
+
+ # Test posix version:
+ with redirect_stdout(StringIO()):
+ with redirect_stderr(TemporaryFile()):
+ with assert_warns(DeprecationWarning):
+ exec_command.exec_command("cd '.'")
+
+ if os.name == 'posix':
+ # Test general (non-posix) version:
+ with emulate_nonposix():
+ with redirect_stdout(StringIO()):
+ with redirect_stderr(TemporaryFile()):
+ with assert_warns(DeprecationWarning):
+ exec_command.exec_command("cd '.'")
+
+def test_exec_command_stderr():
+ # Test posix version:
+ with redirect_stdout(TemporaryFile(mode='w+')):
+ with redirect_stderr(StringIO()):
+ with assert_warns(DeprecationWarning):
+ exec_command.exec_command("cd '.'")
+
+ if os.name == 'posix':
+ # Test general (non-posix) version:
+ with emulate_nonposix():
+ with redirect_stdout(TemporaryFile()):
+ with redirect_stderr(StringIO()):
+ with assert_warns(DeprecationWarning):
+ exec_command.exec_command("cd '.'")
+
+
+@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
+class TestExecCommand:
+ def setup_method(self):
+ self.pyexe = get_pythonexe()
+
+ def check_nt(self, **kws):
+ s, o = exec_command.exec_command('cmd /C echo path=%path%')
+ assert_(s == 0)
+ assert_(o != '')
+
+ s, o = exec_command.exec_command(
+ '"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe)
+ assert_(s == 0)
+ assert_(o == 'win32')
+
+ def check_posix(self, **kws):
+ s, o = exec_command.exec_command("echo Hello", **kws)
+ assert_(s == 0)
+ assert_(o == 'Hello')
+
+ s, o = exec_command.exec_command('echo $AAA', **kws)
+ assert_(s == 0)
+ assert_(o == '')
+
+ s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws)
+ assert_(s == 0)
+ assert_(o == 'Tere')
+
+ s, o = exec_command.exec_command('echo "$AAA"', **kws)
+ assert_(s == 0)
+ assert_(o == '')
+
+ if 'BBB' not in os.environ:
+ os.environ['BBB'] = 'Hi'
+ s, o = exec_command.exec_command('echo "$BBB"', **kws)
+ assert_(s == 0)
+ assert_(o == 'Hi')
+
+ s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws)
+ assert_(s == 0)
+ assert_(o == 'Hey')
+
+ s, o = exec_command.exec_command('echo "$BBB"', **kws)
+ assert_(s == 0)
+ assert_(o == 'Hi')
+
+ del os.environ['BBB']
+
+ s, o = exec_command.exec_command('echo "$BBB"', **kws)
+ assert_(s == 0)
+ assert_(o == '')
+
+
+ s, o = exec_command.exec_command('this_is_not_a_command', **kws)
+ assert_(s != 0)
+ assert_(o != '')
+
+ s, o = exec_command.exec_command('echo path=$PATH', **kws)
+ assert_(s == 0)
+ assert_(o != '')
+
+ s, o = exec_command.exec_command(
+ '"%s" -c "import sys,os;sys.stderr.write(os.name)"' %
+ self.pyexe, **kws)
+ assert_(s == 0)
+ assert_(o == 'posix')
+
+ def check_basic(self, *kws):
+ s, o = exec_command.exec_command(
+ '"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws)
+ assert_(s != 0)
+ assert_(o != '')
+
+ s, o = exec_command.exec_command(
+ '"%s" -c "import sys;sys.stderr.write(\'0\');'
+ 'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' %
+ self.pyexe, **kws)
+ assert_(s == 0)
+ assert_(o == '012')
+
+ s, o = exec_command.exec_command(
+ '"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws)
+ assert_(s == 15)
+ assert_(o == '')
+
+ s, o = exec_command.exec_command(
+ '"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws)
+ assert_(s == 0)
+ assert_(o == 'Heipa')
+
+ def check_execute_in(self, **kws):
+ with tempdir() as tmpdir:
+ fn = "file"
+ tmpfile = os.path.join(tmpdir, fn)
+ with open(tmpfile, 'w') as f:
+ f.write('Hello')
+
+ s, o = exec_command.exec_command(
+ '"%s" -c "f = open(\'%s\', \'r\'); f.close()"' %
+ (self.pyexe, fn), **kws)
+ assert_(s != 0)
+ assert_(o != '')
+ s, o = exec_command.exec_command(
+ '"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); '
+ 'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws)
+ assert_(s == 0)
+ assert_(o == 'Hello')
+
+ def test_basic(self):
+ with redirect_stdout(StringIO()):
+ with redirect_stderr(StringIO()):
+ with assert_warns(DeprecationWarning):
+ if os.name == "posix":
+ self.check_posix(use_tee=0)
+ self.check_posix(use_tee=1)
+ elif os.name == "nt":
+ self.check_nt(use_tee=0)
+ self.check_nt(use_tee=1)
+ self.check_execute_in(use_tee=0)
+ self.check_execute_in(use_tee=1)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler.py
new file mode 100644
index 00000000..dd97f1e7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler.py
@@ -0,0 +1,43 @@
+from numpy.testing import assert_
+import numpy.distutils.fcompiler
+
+customizable_flags = [
+ ('f77', 'F77FLAGS'),
+ ('f90', 'F90FLAGS'),
+ ('free', 'FREEFLAGS'),
+ ('arch', 'FARCH'),
+ ('debug', 'FDEBUG'),
+ ('flags', 'FFLAGS'),
+ ('linker_so', 'LDFLAGS'),
+]
+
+
+def test_fcompiler_flags(monkeypatch):
+ monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0')
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none')
+ flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None)
+
+ for opt, envvar in customizable_flags:
+ new_flag = '-dummy-{}-flag'.format(opt)
+ prev_flags = getattr(flag_vars, opt)
+
+ monkeypatch.setenv(envvar, new_flag)
+ new_flags = getattr(flag_vars, opt)
+
+ monkeypatch.delenv(envvar)
+ assert_(new_flags == [new_flag])
+
+ monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1')
+
+ for opt, envvar in customizable_flags:
+ new_flag = '-dummy-{}-flag'.format(opt)
+ prev_flags = getattr(flag_vars, opt)
+ monkeypatch.setenv(envvar, new_flag)
+ new_flags = getattr(flag_vars, opt)
+
+ monkeypatch.delenv(envvar)
+ if prev_flags is None:
+ assert_(new_flags == [new_flag])
+ else:
+ assert_(new_flags == prev_flags + [new_flag])
+
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py
new file mode 100644
index 00000000..0817ae58
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_gnu.py
@@ -0,0 +1,55 @@
+from numpy.testing import assert_
+
+import numpy.distutils.fcompiler
+
+g77_version_strings = [
+ ('GNU Fortran 0.5.25 20010319 (prerelease)', '0.5.25'),
+ ('GNU Fortran (GCC 3.2) 3.2 20020814 (release)', '3.2'),
+ ('GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)', '3.3.3'),
+ ('GNU Fortran (GCC) 3.3.3 (Debian 20040401)', '3.3.3'),
+ ('GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2'
+ ' 20030222 (Red Hat Linux 3.2.2-5)', '3.2.2'),
+]
+
+gfortran_version_strings = [
+ ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))',
+ '4.0.3'),
+ ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'),
+ ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'),
+ ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'),
+ ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'),
+ ('4.8.0', '4.8.0'),
+ ('4.0.3-7', '4.0.3'),
+ ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1",
+ '4.9.1'),
+ ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n"
+ "gfortran: warning: yet another warning\n4.9.1",
+ '4.9.1'),
+ ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0')
+]
+
+class TestG77Versions:
+ def test_g77_version(self):
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
+ for vs, version in g77_version_strings:
+ v = fc.version_match(vs)
+ assert_(v == version, (vs, v))
+
+ def test_not_g77(self):
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
+ for vs, _ in gfortran_version_strings:
+ v = fc.version_match(vs)
+ assert_(v is None, (vs, v))
+
+class TestGFortranVersions:
+ def test_gfortran_version(self):
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
+ for vs, version in gfortran_version_strings:
+ v = fc.version_match(vs)
+ assert_(v == version, (vs, v))
+
+ def test_not_gfortran(self):
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
+ for vs, _ in g77_version_strings:
+ v = fc.version_match(vs)
+ assert_(v is None, (vs, v))
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_intel.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_intel.py
new file mode 100644
index 00000000..45c9cdac
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_intel.py
@@ -0,0 +1,30 @@
+import numpy.distutils.fcompiler
+from numpy.testing import assert_
+
+
+intel_32bit_version_strings = [
+ ("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications"
+ "running on Intel(R) 32, Version 11.1", '11.1'),
+]
+
+intel_64bit_version_strings = [
+ ("Intel(R) Fortran IA-64 Compiler Professional for applications"
+ "running on IA-64, Version 11.0", '11.0'),
+ ("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications"
+ "running on Intel(R) 64, Version 11.1", '11.1')
+]
+
+class TestIntelFCompilerVersions:
+ def test_32bit_version(self):
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel')
+ for vs, version in intel_32bit_version_strings:
+ v = fc.version_match(vs)
+ assert_(v == version)
+
+
+class TestIntelEM64TFCompilerVersions:
+ def test_64bit_version(self):
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem')
+ for vs, version in intel_64bit_version_strings:
+ v = fc.version_match(vs)
+ assert_(v == version)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py
new file mode 100644
index 00000000..2e04f526
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py
@@ -0,0 +1,22 @@
+from numpy.testing import assert_
+import numpy.distutils.fcompiler
+
+nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release '
+ '6.2(Chiyoda) Build 6200', '6.2'),
+ ('nagfor', 'NAG Fortran Compiler Release '
+ '6.1(Tozai) Build 6136', '6.1'),
+ ('nagfor', 'NAG Fortran Compiler Release '
+ '6.0(Hibiya) Build 1021', '6.0'),
+ ('nagfor', 'NAG Fortran Compiler Release '
+ '5.3.2(971)', '5.3.2'),
+ ('nag', 'NAGWare Fortran 95 compiler Release 5.1'
+ '(347,355-367,375,380-383,389,394,399,401-402,407,'
+ '431,435,437,446,459-460,463,472,494,496,503,508,'
+ '511,517,529,555,557,565)', '5.1')]
+
+class TestNagFCompilerVersions:
+ def test_version_match(self):
+ for comp, vs, version in nag_version_strings:
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp)
+ v = fc.version_match(vs)
+ assert_(v == version)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_from_template.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_from_template.py
new file mode 100644
index 00000000..58817549
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_from_template.py
@@ -0,0 +1,44 @@
+
+from numpy.distutils.from_template import process_str
+from numpy.testing import assert_equal
+
+
+pyf_src = """
+python module foo
+ <_rd=real,double precision>
+ interface
+ subroutine <s,d>foosub(tol)
+ <_rd>, intent(in,out) :: tol
+ end subroutine <s,d>foosub
+ end interface
+end python module foo
+"""
+
+expected_pyf = """
+python module foo
+ interface
+ subroutine sfoosub(tol)
+ real, intent(in,out) :: tol
+ end subroutine sfoosub
+ subroutine dfoosub(tol)
+ double precision, intent(in,out) :: tol
+ end subroutine dfoosub
+ end interface
+end python module foo
+"""
+
+
+def normalize_whitespace(s):
+ """
+ Remove leading and trailing whitespace, and convert internal
+ stretches of whitespace to a single space.
+ """
+ return ' '.join(s.split())
+
+
+def test_from_template():
+ """Regression test for gh-10712."""
+ pyf = process_str(pyf_src)
+ normalized_pyf = normalize_whitespace(pyf)
+ normalized_expected_pyf = normalize_whitespace(expected_pyf)
+ assert_equal(normalized_pyf, normalized_expected_pyf)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py
new file mode 100644
index 00000000..72fddf37
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py
@@ -0,0 +1,34 @@
+import io
+import re
+from contextlib import redirect_stdout
+
+import pytest
+
+from numpy.distutils import log
+
+
+def setup_module():
+ f = io.StringIO() # changing verbosity also logs here, capture that
+ with redirect_stdout(f):
+ log.set_verbosity(2, force=True) # i.e. DEBUG
+
+
+def teardown_module():
+ log.set_verbosity(0, force=True) # the default
+
+
+r_ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
+
+
+@pytest.mark.parametrize("func_name", ["error", "warn", "info", "debug"])
+def test_log_prefix(func_name):
+ func = getattr(log, func_name)
+ msg = f"{func_name} message"
+ f = io.StringIO()
+ with redirect_stdout(f):
+ func(msg)
+ out = f.getvalue()
+ assert out # sanity check
+ clean_out = r_ansi.sub("", out)
+ line = next(line for line in clean_out.splitlines())
+ assert line == f"{func_name.upper()}: {msg}"
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py
new file mode 100644
index 00000000..ebedacb3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_mingw32ccompiler.py
@@ -0,0 +1,42 @@
+import shutil
+import subprocess
+import sys
+import pytest
+
+from numpy.distutils import mingw32ccompiler
+
+
+@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test')
+def test_build_import():
+ '''Test the mingw32ccompiler.build_import_library, which builds a
+ `python.a` from the MSVC `python.lib`
+ '''
+
+ # make sure `nm.exe` exists and supports the current python version. This
+ # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit
+ try:
+ out = subprocess.check_output(['nm.exe', '--help'])
+ except FileNotFoundError:
+ pytest.skip("'nm.exe' not on path, is mingw installed?")
+ supported = out[out.find(b'supported targets:'):]
+ if sys.maxsize < 2**32:
+ if b'pe-i386' not in supported:
+ raise ValueError("'nm.exe' found but it does not support 32-bit "
+ "dlls when using 32-bit python. Supported "
+ "formats: '%s'" % supported)
+ elif b'pe-x86-64' not in supported:
+ raise ValueError("'nm.exe' found but it does not support 64-bit "
+ "dlls when using 64-bit python. Supported "
+ "formats: '%s'" % supported)
+ # Hide the import library to force a build
+ has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib()
+ if has_import_lib:
+ shutil.move(fullpath, fullpath + '.bak')
+
+ try:
+ # Whew, now we can actually test the function
+ mingw32ccompiler.build_import_library()
+
+ finally:
+ if has_import_lib:
+ shutil.move(fullpath + '.bak', fullpath)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_misc_util.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_misc_util.py
new file mode 100644
index 00000000..605c8048
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_misc_util.py
@@ -0,0 +1,82 @@
+from os.path import join, sep, dirname
+
+from numpy.distutils.misc_util import (
+ appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info
+ )
+from numpy.testing import (
+ assert_, assert_equal
+ )
+
+ajoin = lambda *paths: join(*((sep,)+paths))
+
+class TestAppendpath:
+
+ def test_1(self):
+ assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))
+ assert_equal(appendpath('/prefix', 'name'), ajoin('prefix', 'name'))
+ assert_equal(appendpath('/prefix', '/name'), ajoin('prefix', 'name'))
+ assert_equal(appendpath('prefix', '/name'), join('prefix', 'name'))
+
+ def test_2(self):
+ assert_equal(appendpath('prefix/sub', 'name'),
+ join('prefix', 'sub', 'name'))
+ assert_equal(appendpath('prefix/sub', 'sup/name'),
+ join('prefix', 'sub', 'sup', 'name'))
+ assert_equal(appendpath('/prefix/sub', '/prefix/name'),
+ ajoin('prefix', 'sub', 'name'))
+
+ def test_3(self):
+ assert_equal(appendpath('/prefix/sub', '/prefix/sup/name'),
+ ajoin('prefix', 'sub', 'sup', 'name'))
+ assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sup/sup2/name'),
+ ajoin('prefix', 'sub', 'sub2', 'sup', 'sup2', 'name'))
+ assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),
+ ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))
+
+class TestMinrelpath:
+
+ def test_1(self):
+ n = lambda path: path.replace('/', sep)
+ assert_equal(minrelpath(n('aa/bb')), n('aa/bb'))
+ assert_equal(minrelpath('..'), '..')
+ assert_equal(minrelpath(n('aa/..')), '')
+ assert_equal(minrelpath(n('aa/../bb')), 'bb')
+ assert_equal(minrelpath(n('aa/bb/..')), 'aa')
+ assert_equal(minrelpath(n('aa/bb/../..')), '')
+ assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd'))
+ assert_equal(minrelpath(n('.././..')), n('../..'))
+ assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
+
+class TestGpaths:
+
+ def test_gpaths(self):
+ local_path = minrelpath(join(dirname(__file__), '..'))
+ ls = gpaths('command/*.py', local_path)
+ assert_(join(local_path, 'command', 'build_src.py') in ls, repr(ls))
+ f = gpaths('system_info.py', local_path)
+ assert_(join(local_path, 'system_info.py') == f[0], repr(f))
+
+class TestSharedExtension:
+
+ def test_get_shared_lib_extension(self):
+ import sys
+ ext = get_shared_lib_extension(is_python_ext=False)
+ if sys.platform.startswith('linux'):
+ assert_equal(ext, '.so')
+ elif sys.platform.startswith('gnukfreebsd'):
+ assert_equal(ext, '.so')
+ elif sys.platform.startswith('darwin'):
+ assert_equal(ext, '.dylib')
+ elif sys.platform.startswith('win'):
+ assert_equal(ext, '.dll')
+ # just check for no crash
+ assert_(get_shared_lib_extension(is_python_ext=True))
+
+
+def test_installed_npymath_ini():
+ # Regression test for gh-7707. If npymath.ini wasn't installed, then this
+ # will give an error.
+ info = get_info('npymath')
+
+ assert isinstance(info, dict)
+ assert "define_macros" in info
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_npy_pkg_config.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_npy_pkg_config.py
new file mode 100644
index 00000000..b287ebe2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_npy_pkg_config.py
@@ -0,0 +1,84 @@
+import os
+
+from numpy.distutils.npy_pkg_config import read_config, parse_flags
+from numpy.testing import temppath, assert_
+
+simple = """\
+[meta]
+Name = foo
+Description = foo lib
+Version = 0.1
+
+[default]
+cflags = -I/usr/include
+libs = -L/usr/lib
+"""
+simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib',
+ 'version': '0.1', 'name': 'foo'}
+
+simple_variable = """\
+[meta]
+Name = foo
+Description = foo lib
+Version = 0.1
+
+[variables]
+prefix = /foo/bar
+libdir = ${prefix}/lib
+includedir = ${prefix}/include
+
+[default]
+cflags = -I${includedir}
+libs = -L${libdir}
+"""
+simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib',
+ 'version': '0.1', 'name': 'foo'}
+
+class TestLibraryInfo:
+ def test_simple(self):
+ with temppath('foo.ini') as path:
+ with open(path, 'w') as f:
+ f.write(simple)
+ pkg = os.path.splitext(path)[0]
+ out = read_config(pkg)
+
+ assert_(out.cflags() == simple_d['cflags'])
+ assert_(out.libs() == simple_d['libflags'])
+ assert_(out.name == simple_d['name'])
+ assert_(out.version == simple_d['version'])
+
+ def test_simple_variable(self):
+ with temppath('foo.ini') as path:
+ with open(path, 'w') as f:
+ f.write(simple_variable)
+ pkg = os.path.splitext(path)[0]
+ out = read_config(pkg)
+
+ assert_(out.cflags() == simple_variable_d['cflags'])
+ assert_(out.libs() == simple_variable_d['libflags'])
+ assert_(out.name == simple_variable_d['name'])
+ assert_(out.version == simple_variable_d['version'])
+ out.vars['prefix'] = '/Users/david'
+ assert_(out.cflags() == '-I/Users/david/include')
+
+class TestParseFlags:
+ def test_simple_cflags(self):
+ d = parse_flags("-I/usr/include")
+ assert_(d['include_dirs'] == ['/usr/include'])
+
+ d = parse_flags("-I/usr/include -DFOO")
+ assert_(d['include_dirs'] == ['/usr/include'])
+ assert_(d['macros'] == ['FOO'])
+
+ d = parse_flags("-I /usr/include -DFOO")
+ assert_(d['include_dirs'] == ['/usr/include'])
+ assert_(d['macros'] == ['FOO'])
+
+ def test_simple_lflags(self):
+ d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar")
+ assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
+ assert_(d['libraries'] == ['foo', 'bar'])
+
+ d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar")
+ assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
+ assert_(d['libraries'] == ['foo', 'bar'])
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_shell_utils.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_shell_utils.py
new file mode 100644
index 00000000..696d38dd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_shell_utils.py
@@ -0,0 +1,79 @@
+import pytest
+import subprocess
+import json
+import sys
+
+from numpy.distutils import _shell_utils
+from numpy.testing import IS_WASM
+
+argv_cases = [
+ [r'exe'],
+ [r'path/exe'],
+ [r'path\exe'],
+ [r'\\server\path\exe'],
+ [r'path to/exe'],
+ [r'path to\exe'],
+
+ [r'exe', '--flag'],
+ [r'path/exe', '--flag'],
+ [r'path\exe', '--flag'],
+ [r'path to/exe', '--flag'],
+ [r'path to\exe', '--flag'],
+
+ # flags containing literal quotes in their name
+ [r'path to/exe', '--flag-"quoted"'],
+ [r'path to\exe', '--flag-"quoted"'],
+ [r'path to/exe', '"--flag-quoted"'],
+ [r'path to\exe', '"--flag-quoted"'],
+]
+
+
+@pytest.fixture(params=[
+ _shell_utils.WindowsParser,
+ _shell_utils.PosixParser
+])
+def Parser(request):
+ return request.param
+
+
+@pytest.fixture
+def runner(Parser):
+ if Parser != _shell_utils.NativeParser:
+ pytest.skip('Unable to run with non-native parser')
+
+ if Parser == _shell_utils.WindowsParser:
+ return lambda cmd: subprocess.check_output(cmd)
+ elif Parser == _shell_utils.PosixParser:
+ # posix has no non-shell string parsing
+ return lambda cmd: subprocess.check_output(cmd, shell=True)
+ else:
+ raise NotImplementedError
+
+
+@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
+@pytest.mark.parametrize('argv', argv_cases)
+def test_join_matches_subprocess(Parser, runner, argv):
+ """
+ Test that join produces strings understood by subprocess
+ """
+ # invoke python to return its arguments as json
+ cmd = [
+ sys.executable, '-c',
+ 'import json, sys; print(json.dumps(sys.argv[1:]))'
+ ]
+ joined = Parser.join(cmd + argv)
+ json_out = runner(joined).decode()
+ assert json.loads(json_out) == argv
+
+
+@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
+@pytest.mark.parametrize('argv', argv_cases)
+def test_roundtrip(Parser, argv):
+ """
+ Test that split is the inverse operation of join
+ """
+ try:
+ joined = Parser.join(argv)
+ assert argv == Parser.split(joined)
+ except NotImplementedError:
+ pytest.skip("Not implemented")
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_system_info.py b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_system_info.py
new file mode 100644
index 00000000..eb7235e0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/tests/test_system_info.py
@@ -0,0 +1,323 @@
+import os
+import shutil
+import pytest
+from tempfile import mkstemp, mkdtemp
+from subprocess import Popen, PIPE
+from distutils.errors import DistutilsError
+
+from numpy.testing import assert_, assert_equal, assert_raises
+from numpy.distutils import ccompiler, customized_ccompiler
+from numpy.distutils.system_info import system_info, ConfigParser, mkl_info
+from numpy.distutils.system_info import AliasedOptionError
+from numpy.distutils.system_info import default_lib_dirs, default_include_dirs
+from numpy.distutils import _shell_utils
+
+
+def get_class(name, notfound_action=1):
+ """
+ notfound_action:
+ 0 - do nothing
+ 1 - display warning message
+ 2 - raise error
+ """
+ cl = {'temp1': Temp1Info,
+ 'temp2': Temp2Info,
+ 'duplicate_options': DuplicateOptionInfo,
+ }.get(name.lower(), _system_info)
+ return cl()
+
+simple_site = """
+[ALL]
+library_dirs = {dir1:s}{pathsep:s}{dir2:s}
+libraries = {lib1:s},{lib2:s}
+extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os
+runtime_library_dirs = {dir1:s}
+
+[temp1]
+library_dirs = {dir1:s}
+libraries = {lib1:s}
+runtime_library_dirs = {dir1:s}
+
+[temp2]
+library_dirs = {dir2:s}
+libraries = {lib2:s}
+extra_link_args = -Wl,-rpath={lib2_escaped:s}
+rpath = {dir2:s}
+
+[duplicate_options]
+mylib_libs = {lib1:s}
+libraries = {lib2:s}
+"""
+site_cfg = simple_site
+
+fakelib_c_text = """
+/* This file is generated from numpy/distutils/testing/test_system_info.py */
+#include<stdio.h>
+void foo(void) {
+ printf("Hello foo");
+}
+void bar(void) {
+ printf("Hello bar");
+}
+"""
+
+def have_compiler():
+ """ Return True if there appears to be an executable compiler
+ """
+ compiler = customized_ccompiler()
+ try:
+ cmd = compiler.compiler # Unix compilers
+ except AttributeError:
+ try:
+ if not compiler.initialized:
+ compiler.initialize() # MSVC is different
+ except (DistutilsError, ValueError):
+ return False
+ cmd = [compiler.cc]
+ try:
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ p.stdout.close()
+ p.stderr.close()
+ p.wait()
+ except OSError:
+ return False
+ return True
+
+
+HAVE_COMPILER = have_compiler()
+
+
+class _system_info(system_info):
+
+ def __init__(self,
+ default_lib_dirs=default_lib_dirs,
+ default_include_dirs=default_include_dirs,
+ verbosity=1,
+ ):
+ self.__class__.info = {}
+ self.local_prefixes = []
+ defaults = {'library_dirs': '',
+ 'include_dirs': '',
+ 'runtime_library_dirs': '',
+ 'rpath': '',
+ 'src_dirs': '',
+ 'search_static_first': "0",
+ 'extra_compile_args': '',
+ 'extra_link_args': ''}
+ self.cp = ConfigParser(defaults)
+ # We have to parse the config files afterwards
+ # to have a consistent temporary filepath
+
+ def _check_libs(self, lib_dirs, libs, opt_libs, exts):
+ """Override _check_libs to return with all dirs """
+ info = {'libraries': libs, 'library_dirs': lib_dirs}
+ return info
+
+
+class Temp1Info(_system_info):
+ """For testing purposes"""
+ section = 'temp1'
+
+
+class Temp2Info(_system_info):
+ """For testing purposes"""
+ section = 'temp2'
+
+class DuplicateOptionInfo(_system_info):
+ """For testing purposes"""
+ section = 'duplicate_options'
+
+
+class TestSystemInfoReading:
+
+ def setup_method(self):
+ """ Create the libraries """
+ # Create 2 sources and 2 libraries
+ self._dir1 = mkdtemp()
+ self._src1 = os.path.join(self._dir1, 'foo.c')
+ self._lib1 = os.path.join(self._dir1, 'libfoo.so')
+ self._dir2 = mkdtemp()
+ self._src2 = os.path.join(self._dir2, 'bar.c')
+ self._lib2 = os.path.join(self._dir2, 'libbar.so')
+ # Update local site.cfg
+ global simple_site, site_cfg
+ site_cfg = simple_site.format(**{
+ 'dir1': self._dir1,
+ 'lib1': self._lib1,
+ 'dir2': self._dir2,
+ 'lib2': self._lib2,
+ 'pathsep': os.pathsep,
+ 'lib2_escaped': _shell_utils.NativeParser.join([self._lib2])
+ })
+ # Write site.cfg
+ fd, self._sitecfg = mkstemp()
+ os.close(fd)
+ with open(self._sitecfg, 'w') as fd:
+ fd.write(site_cfg)
+ # Write the sources
+ with open(self._src1, 'w') as fd:
+ fd.write(fakelib_c_text)
+ with open(self._src2, 'w') as fd:
+ fd.write(fakelib_c_text)
+ # We create all class-instances
+
+ def site_and_parse(c, site_cfg):
+ c.files = [site_cfg]
+ c.parse_config_files()
+ return c
+ self.c_default = site_and_parse(get_class('default'), self._sitecfg)
+ self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg)
+ self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg)
+ self.c_dup_options = site_and_parse(get_class('duplicate_options'),
+ self._sitecfg)
+
+ def teardown_method(self):
+ # Do each removal separately
+ try:
+ shutil.rmtree(self._dir1)
+ except Exception:
+ pass
+ try:
+ shutil.rmtree(self._dir2)
+ except Exception:
+ pass
+ try:
+ os.remove(self._sitecfg)
+ except Exception:
+ pass
+
+ def test_all(self):
+ # Read in all information in the ALL block
+ tsi = self.c_default
+ assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2])
+ assert_equal(tsi.get_libraries(), [self._lib1, self._lib2])
+ assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])
+ extra = tsi.calc_extra_info()
+ assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os'])
+
+ def test_temp1(self):
+ # Read in all information in the temp1 block
+ tsi = self.c_temp1
+ assert_equal(tsi.get_lib_dirs(), [self._dir1])
+ assert_equal(tsi.get_libraries(), [self._lib1])
+ assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])
+
+ def test_temp2(self):
+ # Read in all information in the temp2 block
+ tsi = self.c_temp2
+ assert_equal(tsi.get_lib_dirs(), [self._dir2])
+ assert_equal(tsi.get_libraries(), [self._lib2])
+ # Now from rpath and not runtime_library_dirs
+ assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2])
+ extra = tsi.calc_extra_info()
+ assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2])
+
+ def test_duplicate_options(self):
+ # Ensure that duplicates are raising an AliasedOptionError
+ tsi = self.c_dup_options
+ assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries")
+ assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1])
+ assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2])
+
+ @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
+ def test_compile1(self):
+ # Compile source and link the first source
+ c = customized_ccompiler()
+ previousDir = os.getcwd()
+ try:
+ # Change directory to not screw up directories
+ os.chdir(self._dir1)
+ c.compile([os.path.basename(self._src1)], output_dir=self._dir1)
+ # Ensure that the object exists
+ assert_(os.path.isfile(self._src1.replace('.c', '.o')) or
+ os.path.isfile(self._src1.replace('.c', '.obj')))
+ finally:
+ os.chdir(previousDir)
+
+ @pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
+ @pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()),
+ reason="Fails with MSVC compiler ")
+ def test_compile2(self):
+ # Compile source and link the second source
+ tsi = self.c_temp2
+ c = customized_ccompiler()
+ extra_link_args = tsi.calc_extra_info()['extra_link_args']
+ previousDir = os.getcwd()
+ try:
+ # Change directory to not screw up directories
+ os.chdir(self._dir2)
+ c.compile([os.path.basename(self._src2)], output_dir=self._dir2,
+ extra_postargs=extra_link_args)
+ # Ensure that the object exists
+ assert_(os.path.isfile(self._src2.replace('.c', '.o')))
+ finally:
+ os.chdir(previousDir)
+
+ HAS_MKL = "mkl_rt" in mkl_info().calc_libraries_info().get("libraries", [])
+
+ @pytest.mark.xfail(HAS_MKL, reason=("`[DEFAULT]` override doesn't work if "
+ "numpy is built with MKL support"))
+ def test_overrides(self):
+ previousDir = os.getcwd()
+ cfg = os.path.join(self._dir1, 'site.cfg')
+ shutil.copy(self._sitecfg, cfg)
+ try:
+ os.chdir(self._dir1)
+ # Check that the '[ALL]' section does not override
+ # missing values from other sections
+ info = mkl_info()
+ lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep)
+ assert info.get_lib_dirs() != lib_dirs
+
+ # But if we copy the values to a '[mkl]' section the value
+ # is correct
+ with open(cfg, 'r') as fid:
+ mkl = fid.read().replace('[ALL]', '[mkl]', 1)
+ with open(cfg, 'w') as fid:
+ fid.write(mkl)
+ info = mkl_info()
+ assert info.get_lib_dirs() == lib_dirs
+
+ # Also, the values will be taken from a section named '[DEFAULT]'
+ with open(cfg, 'r') as fid:
+ dflt = fid.read().replace('[mkl]', '[DEFAULT]', 1)
+ with open(cfg, 'w') as fid:
+ fid.write(dflt)
+ info = mkl_info()
+ assert info.get_lib_dirs() == lib_dirs
+ finally:
+ os.chdir(previousDir)
+
+
+def test_distutils_parse_env_order(monkeypatch):
+ from numpy.distutils.system_info import _parse_env_order
+ env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER'
+
+ base_order = list('abcdef')
+
+ monkeypatch.setenv(env, 'b,i,e,f')
+ order, unknown = _parse_env_order(base_order, env)
+ assert len(order) == 3
+ assert order == list('bef')
+ assert len(unknown) == 1
+
+ # For when LAPACK/BLAS optimization is disabled
+ monkeypatch.setenv(env, '')
+ order, unknown = _parse_env_order(base_order, env)
+ assert len(order) == 0
+ assert len(unknown) == 0
+
+ for prefix in '^!':
+ monkeypatch.setenv(env, f'{prefix}b,i,e')
+ order, unknown = _parse_env_order(base_order, env)
+ assert len(order) == 4
+ assert order == list('acdf')
+ assert len(unknown) == 1
+
+ with pytest.raises(ValueError):
+ monkeypatch.setenv(env, 'b,^e,i')
+ _parse_env_order(base_order, env)
+
+ with pytest.raises(ValueError):
+ monkeypatch.setenv(env, '!b,^e,i')
+ _parse_env_order(base_order, env)
diff --git a/venv/lib/python3.9/site-packages/numpy/distutils/unixccompiler.py b/venv/lib/python3.9/site-packages/numpy/distutils/unixccompiler.py
new file mode 100644
index 00000000..4884960f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/distutils/unixccompiler.py
@@ -0,0 +1,141 @@
+"""
+unixccompiler - can handle very long argument lists for ar.
+
+"""
+import os
+import sys
+import subprocess
+import shlex
+
+from distutils.errors import CompileError, DistutilsExecError, LibError
+from distutils.unixccompiler import UnixCCompiler
+from numpy.distutils.ccompiler import replace_method
+from numpy.distutils.misc_util import _commandline_dep_string
+from numpy.distutils import log
+
+# Note that UnixCCompiler._compile appeared in Python 2.3
+def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
+ """Compile a single source files with a Unix-style compiler."""
+ # HP ad-hoc fix, see ticket 1383
+ ccomp = self.compiler_so
+ if ccomp[0] == 'aCC':
+ # remove flags that will trigger ANSI-C mode for aCC
+ if '-Ae' in ccomp:
+ ccomp.remove('-Ae')
+ if '-Aa' in ccomp:
+ ccomp.remove('-Aa')
+ # add flags for (almost) sane C++ handling
+ ccomp += ['-AA']
+ self.compiler_so = ccomp
+ # ensure OPT environment variable is read
+ if 'OPT' in os.environ:
+ # XXX who uses this?
+ from sysconfig import get_config_vars
+ opt = shlex.join(shlex.split(os.environ['OPT']))
+ gcv_opt = shlex.join(shlex.split(get_config_vars('OPT')[0]))
+ ccomp_s = shlex.join(self.compiler_so)
+ if opt not in ccomp_s:
+ ccomp_s = ccomp_s.replace(gcv_opt, opt)
+ self.compiler_so = shlex.split(ccomp_s)
+ llink_s = shlex.join(self.linker_so)
+ if opt not in llink_s:
+ self.linker_so = self.linker_so + shlex.split(opt)
+
+ display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src)
+
+ # gcc style automatic dependencies, outputs a makefile (-MF) that lists
+ # all headers needed by a c file as a side effect of compilation (-MMD)
+ if getattr(self, '_auto_depends', False):
+ deps = ['-MMD', '-MF', obj + '.d']
+ else:
+ deps = []
+
+ try:
+ self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps +
+ extra_postargs, display = display)
+ except DistutilsExecError as e:
+ msg = str(e)
+ raise CompileError(msg) from None
+
+ # add commandline flags to dependency file
+ if deps:
+ # After running the compiler, the file created will be in EBCDIC
+ # but will not be tagged as such. This tags it so the file does not
+ # have multiple different encodings being written to it
+ if sys.platform == 'zos':
+ subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d'])
+ with open(obj + '.d', 'a') as f:
+ f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts))
+
+replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile)
+
+
+def UnixCCompiler_create_static_lib(self, objects, output_libname,
+ output_dir=None, debug=0, target_lang=None):
+ """
+ Build a static library in a separate sub-process.
+
+ Parameters
+ ----------
+ objects : list or tuple of str
+ List of paths to object files used to build the static library.
+ output_libname : str
+ The library name as an absolute or relative (if `output_dir` is used)
+ path.
+ output_dir : str, optional
+ The path to the output directory. Default is None, in which case
+ the ``output_dir`` attribute of the UnixCCompiler instance.
+ debug : bool, optional
+ This parameter is not used.
+ target_lang : str, optional
+ This parameter is not used.
+
+ Returns
+ -------
+ None
+
+ """
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+
+ output_filename = \
+ self.library_filename(output_libname, output_dir=output_dir)
+
+ if self._need_link(objects, output_filename):
+ try:
+ # previous .a may be screwed up; best to remove it first
+ # and recreate.
+ # Also, ar on OS X doesn't handle updating universal archives
+ os.unlink(output_filename)
+ except OSError:
+ pass
+ self.mkpath(os.path.dirname(output_filename))
+ tmp_objects = objects + self.objects
+ while tmp_objects:
+ objects = tmp_objects[:50]
+ tmp_objects = tmp_objects[50:]
+ display = '%s: adding %d object files to %s' % (
+ os.path.basename(self.archiver[0]),
+ len(objects), output_filename)
+ self.spawn(self.archiver + [output_filename] + objects,
+ display = display)
+
+ # Not many Unices required ranlib anymore -- SunOS 4.x is, I
+ # think the only major Unix that does. Maybe we need some
+ # platform intelligence here to skip ranlib if it's not
+ # needed -- or maybe Python's configure script took care of
+ # it for us, hence the check for leading colon.
+ if self.ranlib:
+ display = '%s:@ %s' % (os.path.basename(self.ranlib[0]),
+ output_filename)
+ try:
+ self.spawn(self.ranlib + [output_filename],
+ display = display)
+ except DistutilsExecError as e:
+ msg = str(e)
+ raise LibError(msg) from None
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+ return
+
+replace_method(UnixCCompiler, 'create_static_lib',
+ UnixCCompiler_create_static_lib)
diff --git a/venv/lib/python3.9/site-packages/numpy/doc/__init__.py b/venv/lib/python3.9/site-packages/numpy/doc/__init__.py
new file mode 100644
index 00000000..8a944fec
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/doc/__init__.py
@@ -0,0 +1,26 @@
+import os
+
+ref_dir = os.path.join(os.path.dirname(__file__))
+
+__all__ = sorted(f[:-3] for f in os.listdir(ref_dir) if f.endswith('.py') and
+ not f.startswith('__'))
+
+for f in __all__:
+ __import__(__name__ + '.' + f)
+
+del f, ref_dir
+
+__doc__ = """\
+Topical documentation
+=====================
+
+The following topics are available:
+%s
+
+You can view them by
+
+>>> help(np.doc.TOPIC) #doctest: +SKIP
+
+""" % '\n- '.join([''] + __all__)
+
+__all__.extend(['__doc__'])
diff --git a/venv/lib/python3.9/site-packages/numpy/doc/constants.py b/venv/lib/python3.9/site-packages/numpy/doc/constants.py
new file mode 100644
index 00000000..4db5c639
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/doc/constants.py
@@ -0,0 +1,412 @@
+"""
+=========
+Constants
+=========
+
+.. currentmodule:: numpy
+
+NumPy includes several constants:
+
+%(constant_list)s
+"""
+#
+# Note: the docstring is autogenerated.
+#
+import re
+import textwrap
+
+# Maintain same format as in numpy.add_newdocs
+constants = []
+def add_newdoc(module, name, doc):
+ constants.append((name, doc))
+
+add_newdoc('numpy', 'pi',
+ """
+ ``pi = 3.1415926535897932384626433...``
+
+ References
+ ----------
+ https://en.wikipedia.org/wiki/Pi
+
+ """)
+
+add_newdoc('numpy', 'e',
+ """
+ Euler's constant, base of natural logarithms, Napier's constant.
+
+ ``e = 2.71828182845904523536028747135266249775724709369995...``
+
+ See Also
+ --------
+ exp : Exponential function
+ log : Natural logarithm
+
+ References
+ ----------
+ https://en.wikipedia.org/wiki/E_%28mathematical_constant%29
+
+ """)
+
+add_newdoc('numpy', 'euler_gamma',
+ """
+ ``γ = 0.5772156649015328606065120900824024310421...``
+
+ References
+ ----------
+ https://en.wikipedia.org/wiki/Euler-Mascheroni_constant
+
+ """)
+
+add_newdoc('numpy', 'inf',
+ """
+ IEEE 754 floating point representation of (positive) infinity.
+
+ Returns
+ -------
+ y : float
+ A floating point representation of positive infinity.
+
+ See Also
+ --------
+ isinf : Shows which elements are positive or negative infinity
+
+ isposinf : Shows which elements are positive infinity
+
+ isneginf : Shows which elements are negative infinity
+
+ isnan : Shows which elements are Not a Number
+
+ isfinite : Shows which elements are finite (not one of Not a Number,
+ positive infinity and negative infinity)
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
+ Also that positive infinity is not equivalent to negative infinity. But
+ infinity is equivalent to positive infinity.
+
+ `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
+
+ Examples
+ --------
+ >>> np.inf
+ inf
+ >>> np.array([1]) / 0.
+ array([ Inf])
+
+ """)
+
+add_newdoc('numpy', 'nan',
+ """
+ IEEE 754 floating point representation of Not a Number (NaN).
+
+ Returns
+ -------
+ y : A floating point representation of Not a Number.
+
+ See Also
+ --------
+ isnan : Shows which elements are Not a Number.
+
+ isfinite : Shows which elements are finite (not one of
+ Not a Number, positive infinity and negative infinity)
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
+
+ `NaN` and `NAN` are aliases of `nan`.
+
+ Examples
+ --------
+ >>> np.nan
+ nan
+ >>> np.log(-1)
+ nan
+ >>> np.log([-1, 1, 2])
+ array([ NaN, 0. , 0.69314718])
+
+ """)
+
+add_newdoc('numpy', 'newaxis',
+ """
+ A convenient alias for None, useful for indexing arrays.
+
+ Examples
+ --------
+ >>> newaxis is None
+ True
+ >>> x = np.arange(3)
+ >>> x
+ array([0, 1, 2])
+ >>> x[:, newaxis]
+ array([[0],
+ [1],
+ [2]])
+ >>> x[:, newaxis, newaxis]
+ array([[[0]],
+ [[1]],
+ [[2]]])
+ >>> x[:, newaxis] * x
+ array([[0, 0, 0],
+ [0, 1, 2],
+ [0, 2, 4]])
+
+ Outer product, same as ``outer(x, y)``:
+
+ >>> y = np.arange(3, 6)
+ >>> x[:, newaxis] * y
+ array([[ 0, 0, 0],
+ [ 3, 4, 5],
+ [ 6, 8, 10]])
+
+ ``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
+
+ >>> x[newaxis, :].shape
+ (1, 3)
+ >>> x[newaxis].shape
+ (1, 3)
+ >>> x[None].shape
+ (1, 3)
+ >>> x[:, newaxis].shape
+ (3, 1)
+
+ """)
+
+add_newdoc('numpy', 'NZERO',
+ """
+ IEEE 754 floating point representation of negative zero.
+
+ Returns
+ -------
+ y : float
+ A floating point representation of negative zero.
+
+ See Also
+ --------
+ PZERO : Defines positive zero.
+
+ isinf : Shows which elements are positive or negative infinity.
+
+ isposinf : Shows which elements are positive infinity.
+
+ isneginf : Shows which elements are negative infinity.
+
+ isnan : Shows which elements are Not a Number.
+
+ isfinite : Shows which elements are finite - not one of
+ Not a Number, positive infinity and negative infinity.
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). Negative zero is considered to be a finite number.
+
+ Examples
+ --------
+ >>> np.NZERO
+ -0.0
+ >>> np.PZERO
+ 0.0
+
+ >>> np.isfinite([np.NZERO])
+ array([ True])
+ >>> np.isnan([np.NZERO])
+ array([False])
+ >>> np.isinf([np.NZERO])
+ array([False])
+
+ """)
+
+add_newdoc('numpy', 'PZERO',
+ """
+ IEEE 754 floating point representation of positive zero.
+
+ Returns
+ -------
+ y : float
+ A floating point representation of positive zero.
+
+ See Also
+ --------
+ NZERO : Defines negative zero.
+
+ isinf : Shows which elements are positive or negative infinity.
+
+ isposinf : Shows which elements are positive infinity.
+
+ isneginf : Shows which elements are negative infinity.
+
+ isnan : Shows which elements are Not a Number.
+
+ isfinite : Shows which elements are finite - not one of
+ Not a Number, positive infinity and negative infinity.
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). Positive zero is considered to be a finite number.
+
+ Examples
+ --------
+ >>> np.PZERO
+ 0.0
+ >>> np.NZERO
+ -0.0
+
+ >>> np.isfinite([np.PZERO])
+ array([ True])
+ >>> np.isnan([np.PZERO])
+ array([False])
+ >>> np.isinf([np.PZERO])
+ array([False])
+
+ """)
+
+add_newdoc('numpy', 'NAN',
+ """
+ IEEE 754 floating point representation of Not a Number (NaN).
+
+ `NaN` and `NAN` are equivalent definitions of `nan`. Please use
+ `nan` instead of `NAN`.
+
+ See Also
+ --------
+ nan
+
+ """)
+
+add_newdoc('numpy', 'NaN',
+ """
+ IEEE 754 floating point representation of Not a Number (NaN).
+
+ `NaN` and `NAN` are equivalent definitions of `nan`. Please use
+ `nan` instead of `NaN`.
+
+ See Also
+ --------
+ nan
+
+ """)
+
+add_newdoc('numpy', 'NINF',
+ """
+ IEEE 754 floating point representation of negative infinity.
+
+ Returns
+ -------
+ y : float
+ A floating point representation of negative infinity.
+
+ See Also
+ --------
+ isinf : Shows which elements are positive or negative infinity
+
+ isposinf : Shows which elements are positive infinity
+
+ isneginf : Shows which elements are negative infinity
+
+ isnan : Shows which elements are Not a Number
+
+ isfinite : Shows which elements are finite (not one of Not a Number,
+ positive infinity and negative infinity)
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
+ Also that positive infinity is not equivalent to negative infinity. But
+ infinity is equivalent to positive infinity.
+
+ Examples
+ --------
+ >>> np.NINF
+ -inf
+ >>> np.log(0)
+ -inf
+
+ """)
+
+add_newdoc('numpy', 'PINF',
+ """
+ IEEE 754 floating point representation of (positive) infinity.
+
+ Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
+ `inf`. For more details, see `inf`.
+
+ See Also
+ --------
+ inf
+
+ """)
+
+add_newdoc('numpy', 'infty',
+ """
+ IEEE 754 floating point representation of (positive) infinity.
+
+ Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
+ `inf`. For more details, see `inf`.
+
+ See Also
+ --------
+ inf
+
+ """)
+
+add_newdoc('numpy', 'Inf',
+ """
+ IEEE 754 floating point representation of (positive) infinity.
+
+ Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
+ `inf`. For more details, see `inf`.
+
+ See Also
+ --------
+ inf
+
+ """)
+
+add_newdoc('numpy', 'Infinity',
+ """
+ IEEE 754 floating point representation of (positive) infinity.
+
+ Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
+ `inf`. For more details, see `inf`.
+
+ See Also
+ --------
+ inf
+
+ """)
+
+
+if __doc__:
+ constants_str = []
+ constants.sort()
+ for name, doc in constants:
+ s = textwrap.dedent(doc).replace("\n", "\n ")
+
+ # Replace sections by rubrics
+ lines = s.split("\n")
+ new_lines = []
+ for line in lines:
+ m = re.match(r'^(\s+)[-=]+\s*$', line)
+ if m and new_lines:
+ prev = textwrap.dedent(new_lines.pop())
+ new_lines.append('%s.. rubric:: %s' % (m.group(1), prev))
+ new_lines.append('')
+ else:
+ new_lines.append(line)
+ s = "\n".join(new_lines)
+
+ # Done.
+ constants_str.append(""".. data:: %s\n %s""" % (name, s))
+ constants_str = "\n".join(constants_str)
+
+ __doc__ = __doc__ % dict(constant_list=constants_str)
+ del constants_str, name, doc
+ del line, lines, new_lines, m, s, prev
+
+del constants, add_newdoc
diff --git a/venv/lib/python3.9/site-packages/numpy/doc/ufuncs.py b/venv/lib/python3.9/site-packages/numpy/doc/ufuncs.py
new file mode 100644
index 00000000..c99e9abc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/doc/ufuncs.py
@@ -0,0 +1,137 @@
+"""
+===================
+Universal Functions
+===================
+
+Ufuncs are, generally speaking, mathematical functions or operations that are
+applied element-by-element to the contents of an array. That is, the result
+in each output array element only depends on the value in the corresponding
+input array (or arrays) and on no other array elements. NumPy comes with a
+large suite of ufuncs, and scipy extends that suite substantially. The simplest
+example is the addition operator: ::
+
+ >>> np.array([0,2,3,4]) + np.array([1,1,-1,2])
+ array([1, 3, 2, 6])
+
+The ufunc module lists all the available ufuncs in numpy. Documentation on
+the specific ufuncs may be found in those modules. This documentation is
+intended to address the more general aspects of ufuncs common to most of
+them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.)
+have equivalent functions defined (e.g. add() for +)
+
+Type coercion
+=============
+
+What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of
+two different types? What is the type of the result? Typically, the result is
+the higher of the two types. For example: ::
+
+ float32 + float64 -> float64
+ int8 + int32 -> int32
+ int16 + float32 -> float32
+ float32 + complex64 -> complex64
+
+There are some less obvious cases generally involving mixes of types
+(e.g. uints, ints and floats) where equal bit sizes for each are not
+capable of saving all the information in a different type of equivalent
+bit size. Some examples are int32 vs float32 or uint32 vs int32.
+Generally, the result is the higher type of larger size than both
+(if available). So: ::
+
+ int32 + float32 -> float64
+ uint32 + int32 -> int64
+
+Finally, the type coercion behavior when expressions involve Python
+scalars is different than that seen for arrays. Since Python has a
+limited number of types, combining a Python int with a dtype=np.int8
+array does not coerce to the higher type but instead, the type of the
+array prevails. So the rules for Python scalars combined with arrays is
+that the result will be that of the array equivalent the Python scalar
+if the Python scalar is of a higher 'kind' than the array (e.g., float
+vs. int), otherwise the resultant type will be that of the array.
+For example: ::
+
+ Python int + int8 -> int8
+ Python float + int8 -> float64
+
+ufunc methods
+=============
+
+Binary ufuncs support 4 methods.
+
+**.reduce(arr)** applies the binary operator to elements of the array in
+ sequence. For example: ::
+
+ >>> np.add.reduce(np.arange(10)) # adds all elements of array
+ 45
+
+For multidimensional arrays, the first dimension is reduced by default: ::
+
+ >>> np.add.reduce(np.arange(10).reshape(2,5))
+ array([ 5, 7, 9, 11, 13])
+
+The axis keyword can be used to specify different axes to reduce: ::
+
+ >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1)
+ array([10, 35])
+
+**.accumulate(arr)** applies the binary operator and generates an
+equivalently shaped array that includes the accumulated amount for each
+element of the array. A couple examples: ::
+
+ >>> np.add.accumulate(np.arange(10))
+ array([ 0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
+ >>> np.multiply.accumulate(np.arange(1,9))
+ array([ 1, 2, 6, 24, 120, 720, 5040, 40320])
+
+The behavior for multidimensional arrays is the same as for .reduce(),
+as is the use of the axis keyword).
+
+**.reduceat(arr,indices)** allows one to apply reduce to selected parts
+ of an array. It is a difficult method to understand. See the documentation
+ at:
+
+**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and
+ arr2. It will work on multidimensional arrays (the shape of the result is
+ the concatenation of the two input shapes.: ::
+
+ >>> np.multiply.outer(np.arange(3),np.arange(4))
+ array([[0, 0, 0, 0],
+ [0, 1, 2, 3],
+ [0, 2, 4, 6]])
+
+Output arguments
+================
+
+All ufuncs accept an optional output array. The array must be of the expected
+output shape. Beware that if the type of the output array is of a different
+(and lower) type than the output result, the results may be silently truncated
+or otherwise corrupted in the downcast to the lower type. This usage is useful
+when one wants to avoid creating large temporary arrays and instead allows one
+to reuse the same array memory repeatedly (at the expense of not being able to
+use more convenient operator notation in expressions). Note that when the
+output argument is used, the ufunc still returns a reference to the result.
+
+ >>> x = np.arange(2)
+ >>> np.add(np.arange(2),np.arange(2.),x)
+ array([0, 2])
+ >>> x
+ array([0, 2])
+
+and & or as ufuncs
+==================
+
+Invariably people try to use the python 'and' and 'or' as logical operators
+(and quite understandably). But these operators do not behave as normal
+operators since Python treats these quite differently. They cannot be
+overloaded with array equivalents. Thus using 'and' or 'or' with an array
+results in an error. There are two alternatives:
+
+ 1) use the ufunc functions logical_and() and logical_or().
+ 2) use the bitwise operators & and \\|. The drawback of these is that if
+ the arguments to these operators are not boolean arrays, the result is
+ likely incorrect. On the other hand, most usages of logical_and and
+ logical_or are with boolean arrays. As long as one is careful, this is
+ a convenient way to apply these operators.
+
+"""
diff --git a/venv/lib/python3.9/site-packages/numpy/dual.py b/venv/lib/python3.9/site-packages/numpy/dual.py
new file mode 100644
index 00000000..eb7e61aa
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/dual.py
@@ -0,0 +1,83 @@
+"""
+.. deprecated:: 1.20
+
+*This module is deprecated. Instead of importing functions from*
+``numpy.dual``, *the functions should be imported directly from NumPy
+or SciPy*.
+
+Aliases for functions which may be accelerated by SciPy.
+
+SciPy_ can be built to use accelerated or otherwise improved libraries
+for FFTs, linear algebra, and special functions. This module allows
+developers to transparently support these accelerated functions when
+SciPy is available but still support users who have only installed
+NumPy.
+
+.. _SciPy : https://www.scipy.org
+
+"""
+import warnings
+
+
+warnings.warn('The module numpy.dual is deprecated. Instead of using dual, '
+ 'use the functions directly from numpy or scipy.',
+ category=DeprecationWarning,
+ stacklevel=2)
+
+# This module should be used for functions both in numpy and scipy if
+# you want to use the numpy version if available but the scipy version
+# otherwise.
+# Usage --- from numpy.dual import fft, inv
+
+__all__ = ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2',
+ 'norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigvals',
+ 'eigh', 'eigvalsh', 'lstsq', 'pinv', 'cholesky', 'i0']
+
+import numpy.linalg as linpkg
+import numpy.fft as fftpkg
+from numpy.lib import i0
+import sys
+
+
+fft = fftpkg.fft
+ifft = fftpkg.ifft
+fftn = fftpkg.fftn
+ifftn = fftpkg.ifftn
+fft2 = fftpkg.fft2
+ifft2 = fftpkg.ifft2
+
+norm = linpkg.norm
+inv = linpkg.inv
+svd = linpkg.svd
+solve = linpkg.solve
+det = linpkg.det
+eig = linpkg.eig
+eigvals = linpkg.eigvals
+eigh = linpkg.eigh
+eigvalsh = linpkg.eigvalsh
+lstsq = linpkg.lstsq
+pinv = linpkg.pinv
+cholesky = linpkg.cholesky
+
+_restore_dict = {}
+
+def register_func(name, func):
+ if name not in __all__:
+ raise ValueError("{} not a dual function.".format(name))
+ f = sys._getframe(0).f_globals
+ _restore_dict[name] = f[name]
+ f[name] = func
+
+def restore_func(name):
+ if name not in __all__:
+ raise ValueError("{} not a dual function.".format(name))
+ try:
+ val = _restore_dict[name]
+ except KeyError:
+ return
+ else:
+ sys._getframe(0).f_globals[name] = val
+
+def restore_all():
+ for name in _restore_dict.keys():
+ restore_func(name)
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/__init__.py b/venv/lib/python3.9/site-packages/numpy/f2py/__init__.py
new file mode 100644
index 00000000..dbe3df27
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/__init__.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python3
+"""Fortran to Python Interface Generator.
+
+"""
+__all__ = ['run_main', 'compile', 'get_include']
+
+import sys
+import subprocess
+import os
+
+from . import f2py2e
+from . import diagnose
+
+run_main = f2py2e.run_main
+main = f2py2e.main
+
+
+def compile(source,
+ modulename='untitled',
+ extra_args='',
+ verbose=True,
+ source_fn=None,
+ extension='.f',
+ full_output=False
+ ):
+ """
+ Build extension module from a Fortran 77 source string with f2py.
+
+ Parameters
+ ----------
+ source : str or bytes
+ Fortran source of module / subroutine to compile
+
+ .. versionchanged:: 1.16.0
+ Accept str as well as bytes
+
+ modulename : str, optional
+ The name of the compiled python module
+ extra_args : str or list, optional
+ Additional parameters passed to f2py
+
+ .. versionchanged:: 1.16.0
+ A list of args may also be provided.
+
+ verbose : bool, optional
+ Print f2py output to screen
+ source_fn : str, optional
+ Name of the file where the fortran source is written.
+ The default is to use a temporary file with the extension
+ provided by the ``extension`` parameter
+ extension : ``{'.f', '.f90'}``, optional
+ Filename extension if `source_fn` is not provided.
+ The extension tells which fortran standard is used.
+ The default is ``.f``, which implies F77 standard.
+
+ .. versionadded:: 1.11.0
+
+ full_output : bool, optional
+ If True, return a `subprocess.CompletedProcess` containing
+ the stdout and stderr of the compile process, instead of just
+ the status code.
+
+ .. versionadded:: 1.20.0
+
+
+ Returns
+ -------
+ result : int or `subprocess.CompletedProcess`
+ 0 on success, or a `subprocess.CompletedProcess` if
+ ``full_output=True``
+
+ Examples
+ --------
+ .. literalinclude:: ../../source/f2py/code/results/compile_session.dat
+ :language: python
+
+ """
+ import tempfile
+ import shlex
+
+ if source_fn is None:
+ f, fname = tempfile.mkstemp(suffix=extension)
+ # f is a file descriptor so need to close it
+ # carefully -- not with .close() directly
+ os.close(f)
+ else:
+ fname = source_fn
+
+ if not isinstance(source, str):
+ source = str(source, 'utf-8')
+ try:
+ with open(fname, 'w') as f:
+ f.write(source)
+
+ args = ['-c', '-m', modulename, f.name]
+
+ if isinstance(extra_args, str):
+ is_posix = (os.name == 'posix')
+ extra_args = shlex.split(extra_args, posix=is_posix)
+
+ args.extend(extra_args)
+
+ c = [sys.executable,
+ '-c',
+ 'import numpy.f2py as f2py2e;f2py2e.main()'] + args
+ try:
+ cp = subprocess.run(c, capture_output=True)
+ except OSError:
+ # preserve historic status code used by exec_command()
+ cp = subprocess.CompletedProcess(c, 127, stdout=b'', stderr=b'')
+ else:
+ if verbose:
+ print(cp.stdout.decode())
+ finally:
+ if source_fn is None:
+ os.remove(fname)
+
+ if full_output:
+ return cp
+ else:
+ return cp.returncode
+
+
+def get_include():
+ """
+ Return the directory that contains the ``fortranobject.c`` and ``.h`` files.
+
+ .. note::
+
+ This function is not needed when building an extension with
+ `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files
+ in one go.
+
+ Python extension modules built with f2py-generated code need to use
+ ``fortranobject.c`` as a source file, and include the ``fortranobject.h``
+ header. This function can be used to obtain the directory containing
+ both of these files.
+
+ Returns
+ -------
+ include_path : str
+ Absolute path to the directory containing ``fortranobject.c`` and
+ ``fortranobject.h``.
+
+ Notes
+ -----
+ .. versionadded:: 1.21.1
+
+ Unless the build system you are using has specific support for f2py,
+ building a Python extension using a ``.pyf`` signature file is a two-step
+ process. For a module ``mymod``:
+
+ * Step 1: run ``python -m numpy.f2py mymod.pyf --quiet``. This
+ generates ``_mymodmodule.c`` and (if needed)
+ ``_fblas-f2pywrappers.f`` files next to ``mymod.pyf``.
+ * Step 2: build your Python extension module. This requires the
+ following source files:
+
+ * ``_mymodmodule.c``
+ * ``_mymod-f2pywrappers.f`` (if it was generated in Step 1)
+ * ``fortranobject.c``
+
+ See Also
+ --------
+ numpy.get_include : function that returns the numpy include directory
+
+ """
+ return os.path.join(os.path.dirname(__file__), 'src')
+
+
+def __getattr__(attr):
+
+ # Avoid importing things that aren't needed for building
+ # which might import the main numpy module
+ if attr == "test":
+ from numpy._pytesttester import PytestTester
+ test = PytestTester(__name__)
+ return test
+
+ else:
+ raise AttributeError("module {!r} has no attribute "
+ "{!r}".format(__name__, attr))
+
+
+def __dir__():
+ return list(globals().keys() | {"test"})
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/__init__.pyi b/venv/lib/python3.9/site-packages/numpy/f2py/__init__.pyi
new file mode 100644
index 00000000..6e3a82cf
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/__init__.pyi
@@ -0,0 +1,43 @@
+import os
+import subprocess
+from collections.abc import Iterable
+from typing import Literal as L, Any, overload, TypedDict
+
+from numpy._pytesttester import PytestTester
+
+class _F2PyDictBase(TypedDict):
+ csrc: list[str]
+ h: list[str]
+
+class _F2PyDict(_F2PyDictBase, total=False):
+ fsrc: list[str]
+ ltx: list[str]
+
+__all__: list[str]
+__path__: list[str]
+test: PytestTester
+
+def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]: ...
+
+@overload
+def compile( # type: ignore[misc]
+ source: str | bytes,
+ modulename: str = ...,
+ extra_args: str | list[str] = ...,
+ verbose: bool = ...,
+ source_fn: None | str | bytes | os.PathLike[Any] = ...,
+ extension: L[".f", ".f90"] = ...,
+ full_output: L[False] = ...,
+) -> int: ...
+@overload
+def compile(
+ source: str | bytes,
+ modulename: str = ...,
+ extra_args: str | list[str] = ...,
+ verbose: bool = ...,
+ source_fn: None | str | bytes | os.PathLike[Any] = ...,
+ extension: L[".f", ".f90"] = ...,
+ full_output: L[True] = ...,
+) -> subprocess.CompletedProcess[bytes]: ...
+
+def get_include() -> str: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/__main__.py b/venv/lib/python3.9/site-packages/numpy/f2py/__main__.py
new file mode 100644
index 00000000..936a753a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/__main__.py
@@ -0,0 +1,5 @@
+# See:
+# https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e
+from numpy.f2py.f2py2e import main
+
+main()
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/__version__.py b/venv/lib/python3.9/site-packages/numpy/f2py/__version__.py
new file mode 100644
index 00000000..e20d7c1d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/__version__.py
@@ -0,0 +1 @@
+from numpy.version import version
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/auxfuncs.py b/venv/lib/python3.9/site-packages/numpy/f2py/auxfuncs.py
new file mode 100644
index 00000000..3f9b0cea
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/auxfuncs.py
@@ -0,0 +1,890 @@
+#!/usr/bin/env python3
+"""
+
+Auxiliary functions for f2py2e.
+
+Copyright 1999,2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy (BSD style) LICENSE.
+
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/07/24 19:01:55 $
+Pearu Peterson
+
+"""
+import pprint
+import sys
+import types
+from functools import reduce
+
+from . import __version__
+from . import cfuncs
+
+__all__ = [
+ 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle',
+ 'getargs2', 'getcallprotoargument', 'getcallstatement',
+ 'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode',
+ 'getusercode1', 'hasbody', 'hascallstatement', 'hascommon',
+ 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote',
+ 'isallocatable', 'isarray', 'isarrayofstrings',
+ 'ischaracter', 'ischaracterarray', 'ischaracter_or_characterarray',
+ 'iscomplex',
+ 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn',
+ 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction',
+ 'isfunction_wrap', 'isint1', 'isint1array', 'isinteger', 'isintent_aux',
+ 'isintent_c', 'isintent_callback', 'isintent_copy', 'isintent_dict',
+ 'isintent_hide', 'isintent_in', 'isintent_inout', 'isintent_inplace',
+ 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical',
+ 'islogicalfunction', 'islong_complex', 'islong_double',
+ 'islong_doublefunction', 'islong_long', 'islong_longfunction',
+ 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired',
+ 'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring',
+ 'isstringarray', 'isstring_or_stringarray', 'isstringfunction',
+ 'issubroutine',
+ 'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char',
+ 'isunsigned_chararray', 'isunsigned_long_long',
+ 'isunsigned_long_longarray', 'isunsigned_short',
+ 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess',
+ 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value'
+]
+
+
+f2py_version = __version__.version
+
+
+errmess = sys.stderr.write
+show = pprint.pprint
+
+options = {}
+debugoptions = []
+wrapfuncs = 1
+
+
+def outmess(t):
+ if options.get('verbose', 1):
+ sys.stdout.write(t)
+
+
+def debugcapi(var):
+ return 'capi' in debugoptions
+
+
+def _ischaracter(var):
+ return 'typespec' in var and var['typespec'] == 'character' and \
+ not isexternal(var)
+
+
+def _isstring(var):
+ return 'typespec' in var and var['typespec'] == 'character' and \
+ not isexternal(var)
+
+
+def ischaracter_or_characterarray(var):
+ return _ischaracter(var) and 'charselector' not in var
+
+
+def ischaracter(var):
+ return ischaracter_or_characterarray(var) and not isarray(var)
+
+
+def ischaracterarray(var):
+ return ischaracter_or_characterarray(var) and isarray(var)
+
+
+def isstring_or_stringarray(var):
+ return _ischaracter(var) and 'charselector' in var
+
+
+def isstring(var):
+ return isstring_or_stringarray(var) and not isarray(var)
+
+
+def isstringarray(var):
+ return isstring_or_stringarray(var) and isarray(var)
+
+
+def isarrayofstrings(var): # obsolete?
+ # leaving out '*' for now so that `character*(*) a(m)` and `character
+ # a(m,*)` are treated differently. Luckily `character**` is illegal.
+ return isstringarray(var) and var['dimension'][-1] == '(*)'
+
+
+def isarray(var):
+ return 'dimension' in var and not isexternal(var)
+
+
+def isscalar(var):
+ return not (isarray(var) or isstring(var) or isexternal(var))
+
+
+def iscomplex(var):
+ return isscalar(var) and \
+ var.get('typespec') in ['complex', 'double complex']
+
+
+def islogical(var):
+ return isscalar(var) and var.get('typespec') == 'logical'
+
+
+def isinteger(var):
+ return isscalar(var) and var.get('typespec') == 'integer'
+
+
+def isreal(var):
+ return isscalar(var) and var.get('typespec') == 'real'
+
+
+def get_kind(var):
+ try:
+ return var['kindselector']['*']
+ except KeyError:
+ try:
+ return var['kindselector']['kind']
+ except KeyError:
+ pass
+
+
+def isint1(var):
+ return var.get('typespec') == 'integer' \
+ and get_kind(var) == '1' and not isarray(var)
+
+
+def islong_long(var):
+ if not isscalar(var):
+ return 0
+ if var.get('typespec') not in ['integer', 'logical']:
+ return 0
+ return get_kind(var) == '8'
+
+
+def isunsigned_char(var):
+ if not isscalar(var):
+ return 0
+ if var.get('typespec') != 'integer':
+ return 0
+ return get_kind(var) == '-1'
+
+
+def isunsigned_short(var):
+ if not isscalar(var):
+ return 0
+ if var.get('typespec') != 'integer':
+ return 0
+ return get_kind(var) == '-2'
+
+
+def isunsigned(var):
+ if not isscalar(var):
+ return 0
+ if var.get('typespec') != 'integer':
+ return 0
+ return get_kind(var) == '-4'
+
+
+def isunsigned_long_long(var):
+ if not isscalar(var):
+ return 0
+ if var.get('typespec') != 'integer':
+ return 0
+ return get_kind(var) == '-8'
+
+
+def isdouble(var):
+ if not isscalar(var):
+ return 0
+ if not var.get('typespec') == 'real':
+ return 0
+ return get_kind(var) == '8'
+
+
+def islong_double(var):
+ if not isscalar(var):
+ return 0
+ if not var.get('typespec') == 'real':
+ return 0
+ return get_kind(var) == '16'
+
+
+def islong_complex(var):
+ if not iscomplex(var):
+ return 0
+ return get_kind(var) == '32'
+
+
+def iscomplexarray(var):
+ return isarray(var) and \
+ var.get('typespec') in ['complex', 'double complex']
+
+
+def isint1array(var):
+ return isarray(var) and var.get('typespec') == 'integer' \
+ and get_kind(var) == '1'
+
+
+def isunsigned_chararray(var):
+ return isarray(var) and var.get('typespec') in ['integer', 'logical']\
+ and get_kind(var) == '-1'
+
+
+def isunsigned_shortarray(var):
+ return isarray(var) and var.get('typespec') in ['integer', 'logical']\
+ and get_kind(var) == '-2'
+
+
+def isunsignedarray(var):
+ return isarray(var) and var.get('typespec') in ['integer', 'logical']\
+ and get_kind(var) == '-4'
+
+
+def isunsigned_long_longarray(var):
+ return isarray(var) and var.get('typespec') in ['integer', 'logical']\
+ and get_kind(var) == '-8'
+
+
+def issigned_chararray(var):
+ return isarray(var) and var.get('typespec') in ['integer', 'logical']\
+ and get_kind(var) == '1'
+
+
+def issigned_shortarray(var):
+ return isarray(var) and var.get('typespec') in ['integer', 'logical']\
+ and get_kind(var) == '2'
+
+
+def issigned_array(var):
+ return isarray(var) and var.get('typespec') in ['integer', 'logical']\
+ and get_kind(var) == '4'
+
+
+def issigned_long_longarray(var):
+ return isarray(var) and var.get('typespec') in ['integer', 'logical']\
+ and get_kind(var) == '8'
+
+
+def isallocatable(var):
+ return 'attrspec' in var and 'allocatable' in var['attrspec']
+
+
+def ismutable(var):
+ return not ('dimension' not in var or isstring(var))
+
+
+def ismoduleroutine(rout):
+ return 'modulename' in rout
+
+
+def ismodule(rout):
+ return 'block' in rout and 'module' == rout['block']
+
+
+def isfunction(rout):
+ return 'block' in rout and 'function' == rout['block']
+
+
+def isfunction_wrap(rout):
+ if isintent_c(rout):
+ return 0
+ return wrapfuncs and isfunction(rout) and (not isexternal(rout))
+
+
+def issubroutine(rout):
+ return 'block' in rout and 'subroutine' == rout['block']
+
+
+def issubroutine_wrap(rout):
+ if isintent_c(rout):
+ return 0
+ return issubroutine(rout) and hasassumedshape(rout)
+
+def isattr_value(var):
+ return 'value' in var.get('attrspec', [])
+
+
+def hasassumedshape(rout):
+ if rout.get('hasassumedshape'):
+ return True
+ for a in rout['args']:
+ for d in rout['vars'].get(a, {}).get('dimension', []):
+ if d == ':':
+ rout['hasassumedshape'] = True
+ return True
+ return False
+
+
+def requiresf90wrapper(rout):
+ return ismoduleroutine(rout) or hasassumedshape(rout)
+
+
+def isroutine(rout):
+ return isfunction(rout) or issubroutine(rout)
+
+
+def islogicalfunction(rout):
+ if not isfunction(rout):
+ return 0
+ if 'result' in rout:
+ a = rout['result']
+ else:
+ a = rout['name']
+ if a in rout['vars']:
+ return islogical(rout['vars'][a])
+ return 0
+
+
+def islong_longfunction(rout):
+ if not isfunction(rout):
+ return 0
+ if 'result' in rout:
+ a = rout['result']
+ else:
+ a = rout['name']
+ if a in rout['vars']:
+ return islong_long(rout['vars'][a])
+ return 0
+
+
+def islong_doublefunction(rout):
+ if not isfunction(rout):
+ return 0
+ if 'result' in rout:
+ a = rout['result']
+ else:
+ a = rout['name']
+ if a in rout['vars']:
+ return islong_double(rout['vars'][a])
+ return 0
+
+
+def iscomplexfunction(rout):
+ if not isfunction(rout):
+ return 0
+ if 'result' in rout:
+ a = rout['result']
+ else:
+ a = rout['name']
+ if a in rout['vars']:
+ return iscomplex(rout['vars'][a])
+ return 0
+
+
+def iscomplexfunction_warn(rout):
+ if iscomplexfunction(rout):
+ outmess("""\
+ **************************************************************
+ Warning: code with a function returning complex value
+ may not work correctly with your Fortran compiler.
+ When using GNU gcc/g77 compilers, codes should work
+ correctly for callbacks with:
+ f2py -c -DF2PY_CB_RETURNCOMPLEX
+ **************************************************************\n""")
+ return 1
+ return 0
+
+
+def isstringfunction(rout):
+ if not isfunction(rout):
+ return 0
+ if 'result' in rout:
+ a = rout['result']
+ else:
+ a = rout['name']
+ if a in rout['vars']:
+ return isstring(rout['vars'][a])
+ return 0
+
+
+def hasexternals(rout):
+ return 'externals' in rout and rout['externals']
+
+
+def isthreadsafe(rout):
+ return 'f2pyenhancements' in rout and \
+ 'threadsafe' in rout['f2pyenhancements']
+
+
+def hasvariables(rout):
+ return 'vars' in rout and rout['vars']
+
+
+def isoptional(var):
+ return ('attrspec' in var and 'optional' in var['attrspec'] and
+ 'required' not in var['attrspec']) and isintent_nothide(var)
+
+
+def isexternal(var):
+ return 'attrspec' in var and 'external' in var['attrspec']
+
+
+def isrequired(var):
+ return not isoptional(var) and isintent_nothide(var)
+
+
+def isintent_in(var):
+ if 'intent' not in var:
+ return 1
+ if 'hide' in var['intent']:
+ return 0
+ if 'inplace' in var['intent']:
+ return 0
+ if 'in' in var['intent']:
+ return 1
+ if 'out' in var['intent']:
+ return 0
+ if 'inout' in var['intent']:
+ return 0
+ if 'outin' in var['intent']:
+ return 0
+ return 1
+
+
+def isintent_inout(var):
+ return ('intent' in var and ('inout' in var['intent'] or
+ 'outin' in var['intent']) and 'in' not in var['intent'] and
+ 'hide' not in var['intent'] and 'inplace' not in var['intent'])
+
+
+def isintent_out(var):
+ return 'out' in var.get('intent', [])
+
+
+def isintent_hide(var):
+ return ('intent' in var and ('hide' in var['intent'] or
+ ('out' in var['intent'] and 'in' not in var['intent'] and
+ (not l_or(isintent_inout, isintent_inplace)(var)))))
+
+
+def isintent_nothide(var):
+ return not isintent_hide(var)
+
+
+def isintent_c(var):
+ return 'c' in var.get('intent', [])
+
+
+def isintent_cache(var):
+ return 'cache' in var.get('intent', [])
+
+
+def isintent_copy(var):
+ return 'copy' in var.get('intent', [])
+
+
+def isintent_overwrite(var):
+ return 'overwrite' in var.get('intent', [])
+
+
+def isintent_callback(var):
+ return 'callback' in var.get('intent', [])
+
+
+def isintent_inplace(var):
+ return 'inplace' in var.get('intent', [])
+
+
+def isintent_aux(var):
+ return 'aux' in var.get('intent', [])
+
+
+def isintent_aligned4(var):
+ return 'aligned4' in var.get('intent', [])
+
+
+def isintent_aligned8(var):
+ return 'aligned8' in var.get('intent', [])
+
+
+def isintent_aligned16(var):
+ return 'aligned16' in var.get('intent', [])
+
+
+isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT',
+ isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE',
+ isintent_cache: 'INTENT_CACHE',
+ isintent_c: 'INTENT_C', isoptional: 'OPTIONAL',
+ isintent_inplace: 'INTENT_INPLACE',
+ isintent_aligned4: 'INTENT_ALIGNED4',
+ isintent_aligned8: 'INTENT_ALIGNED8',
+ isintent_aligned16: 'INTENT_ALIGNED16',
+ }
+
+
+def isprivate(var):
+ return 'attrspec' in var and 'private' in var['attrspec']
+
+
+def hasinitvalue(var):
+ return '=' in var
+
+
+def hasinitvalueasstring(var):
+ if not hasinitvalue(var):
+ return 0
+ return var['='][0] in ['"', "'"]
+
+
+def hasnote(var):
+ return 'note' in var
+
+
+def hasresultnote(rout):
+ if not isfunction(rout):
+ return 0
+ if 'result' in rout:
+ a = rout['result']
+ else:
+ a = rout['name']
+ if a in rout['vars']:
+ return hasnote(rout['vars'][a])
+ return 0
+
+
+def hascommon(rout):
+ return 'common' in rout
+
+
+def containscommon(rout):
+ if hascommon(rout):
+ return 1
+ if hasbody(rout):
+ for b in rout['body']:
+ if containscommon(b):
+ return 1
+ return 0
+
+
+def containsmodule(block):
+ if ismodule(block):
+ return 1
+ if not hasbody(block):
+ return 0
+ for b in block['body']:
+ if containsmodule(b):
+ return 1
+ return 0
+
+
+def hasbody(rout):
+ return 'body' in rout
+
+
+def hascallstatement(rout):
+ return getcallstatement(rout) is not None
+
+
+def istrue(var):
+ return 1
+
+
+def isfalse(var):
+ return 0
+
+
+class F2PYError(Exception):
+ pass
+
+
+class throw_error:
+
+ def __init__(self, mess):
+ self.mess = mess
+
+ def __call__(self, var):
+ mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess)
+ raise F2PYError(mess)
+
+
+def l_and(*f):
+ l1, l2 = 'lambda v', []
+ for i in range(len(f)):
+ l1 = '%s,f%d=f[%d]' % (l1, i, i)
+ l2.append('f%d(v)' % (i))
+ return eval('%s:%s' % (l1, ' and '.join(l2)))
+
+
+def l_or(*f):
+ l1, l2 = 'lambda v', []
+ for i in range(len(f)):
+ l1 = '%s,f%d=f[%d]' % (l1, i, i)
+ l2.append('f%d(v)' % (i))
+ return eval('%s:%s' % (l1, ' or '.join(l2)))
+
+
+def l_not(f):
+ return eval('lambda v,f=f:not f(v)')
+
+
+def isdummyroutine(rout):
+ try:
+ return rout['f2pyenhancements']['fortranname'] == ''
+ except KeyError:
+ return 0
+
+
+def getfortranname(rout):
+ try:
+ name = rout['f2pyenhancements']['fortranname']
+ if name == '':
+ raise KeyError
+ if not name:
+ errmess('Failed to use fortranname from %s\n' %
+ (rout['f2pyenhancements']))
+ raise KeyError
+ except KeyError:
+ name = rout['name']
+ return name
+
+
+def getmultilineblock(rout, blockname, comment=1, counter=0):
+ try:
+ r = rout['f2pyenhancements'].get(blockname)
+ except KeyError:
+ return
+ if not r:
+ return
+ if counter > 0 and isinstance(r, str):
+ return
+ if isinstance(r, list):
+ if counter >= len(r):
+ return
+ r = r[counter]
+ if r[:3] == "'''":
+ if comment:
+ r = '\t/* start ' + blockname + \
+ ' multiline (' + repr(counter) + ') */\n' + r[3:]
+ else:
+ r = r[3:]
+ if r[-3:] == "'''":
+ if comment:
+ r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/'
+ else:
+ r = r[:-3]
+ else:
+ errmess("%s multiline block should end with `'''`: %s\n"
+ % (blockname, repr(r)))
+ return r
+
+
+def getcallstatement(rout):
+ return getmultilineblock(rout, 'callstatement')
+
+
+def getcallprotoargument(rout, cb_map={}):
+ r = getmultilineblock(rout, 'callprotoargument', comment=0)
+ if r:
+ return r
+ if hascallstatement(rout):
+ outmess(
+ 'warning: callstatement is defined without callprotoargument\n')
+ return
+ from .capi_maps import getctype
+ arg_types, arg_types2 = [], []
+ if l_and(isstringfunction, l_not(isfunction_wrap))(rout):
+ arg_types.extend(['char*', 'size_t'])
+ for n in rout['args']:
+ var = rout['vars'][n]
+ if isintent_callback(var):
+ continue
+ if n in cb_map:
+ ctype = cb_map[n] + '_typedef'
+ else:
+ ctype = getctype(var)
+ if l_and(isintent_c, l_or(isscalar, iscomplex))(var):
+ pass
+ elif isstring(var):
+ pass
+ else:
+ if not isattr_value(var):
+ ctype = ctype + '*'
+ if ((isstring(var)
+ or isarrayofstrings(var) # obsolete?
+ or isstringarray(var))):
+ arg_types2.append('size_t')
+ arg_types.append(ctype)
+
+ proto_args = ','.join(arg_types + arg_types2)
+ if not proto_args:
+ proto_args = 'void'
+ return proto_args
+
+
+def getusercode(rout):
+ return getmultilineblock(rout, 'usercode')
+
+
+def getusercode1(rout):
+ return getmultilineblock(rout, 'usercode', counter=1)
+
+
+def getpymethoddef(rout):
+ return getmultilineblock(rout, 'pymethoddef')
+
+
+def getargs(rout):
+ sortargs, args = [], []
+ if 'args' in rout:
+ args = rout['args']
+ if 'sortvars' in rout:
+ for a in rout['sortvars']:
+ if a in args:
+ sortargs.append(a)
+ for a in args:
+ if a not in sortargs:
+ sortargs.append(a)
+ else:
+ sortargs = rout['args']
+ return args, sortargs
+
+
+def getargs2(rout):
+ sortargs, args = [], rout.get('args', [])
+ auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])
+ and a not in args]
+ args = auxvars + args
+ if 'sortvars' in rout:
+ for a in rout['sortvars']:
+ if a in args:
+ sortargs.append(a)
+ for a in args:
+ if a not in sortargs:
+ sortargs.append(a)
+ else:
+ sortargs = auxvars + rout['args']
+ return args, sortargs
+
+
+def getrestdoc(rout):
+ if 'f2pymultilines' not in rout:
+ return None
+ k = None
+ if rout['block'] == 'python module':
+ k = rout['block'], rout['name']
+ return rout['f2pymultilines'].get(k, None)
+
+
+def gentitle(name):
+ ln = (80 - len(name) - 6) // 2
+ return '/*%s %s %s*/' % (ln * '*', name, ln * '*')
+
+
+def flatlist(lst):
+ if isinstance(lst, list):
+ return reduce(lambda x, y, f=flatlist: x + f(y), lst, [])
+ return [lst]
+
+
+def stripcomma(s):
+ if s and s[-1] == ',':
+ return s[:-1]
+ return s
+
+
+def replace(str, d, defaultsep=''):
+ if isinstance(d, list):
+ return [replace(str, _m, defaultsep) for _m in d]
+ if isinstance(str, list):
+ return [replace(_m, d, defaultsep) for _m in str]
+ for k in 2 * list(d.keys()):
+ if k == 'separatorsfor':
+ continue
+ if 'separatorsfor' in d and k in d['separatorsfor']:
+ sep = d['separatorsfor'][k]
+ else:
+ sep = defaultsep
+ if isinstance(d[k], list):
+ str = str.replace('#%s#' % (k), sep.join(flatlist(d[k])))
+ else:
+ str = str.replace('#%s#' % (k), d[k])
+ return str
+
+
+def dictappend(rd, ar):
+ if isinstance(ar, list):
+ for a in ar:
+ rd = dictappend(rd, a)
+ return rd
+ for k in ar.keys():
+ if k[0] == '_':
+ continue
+ if k in rd:
+ if isinstance(rd[k], str):
+ rd[k] = [rd[k]]
+ if isinstance(rd[k], list):
+ if isinstance(ar[k], list):
+ rd[k] = rd[k] + ar[k]
+ else:
+ rd[k].append(ar[k])
+ elif isinstance(rd[k], dict):
+ if isinstance(ar[k], dict):
+ if k == 'separatorsfor':
+ for k1 in ar[k].keys():
+ if k1 not in rd[k]:
+ rd[k][k1] = ar[k][k1]
+ else:
+ rd[k] = dictappend(rd[k], ar[k])
+ else:
+ rd[k] = ar[k]
+ return rd
+
+
+def applyrules(rules, d, var={}):
+ ret = {}
+ if isinstance(rules, list):
+ for r in rules:
+ rr = applyrules(r, d, var)
+ ret = dictappend(ret, rr)
+ if '_break' in rr:
+ break
+ return ret
+ if '_check' in rules and (not rules['_check'](var)):
+ return ret
+ if 'need' in rules:
+ res = applyrules({'needs': rules['need']}, d, var)
+ if 'needs' in res:
+ cfuncs.append_needs(res['needs'])
+
+ for k in rules.keys():
+ if k == 'separatorsfor':
+ ret[k] = rules[k]
+ continue
+ if isinstance(rules[k], str):
+ ret[k] = replace(rules[k], d)
+ elif isinstance(rules[k], list):
+ ret[k] = []
+ for i in rules[k]:
+ ar = applyrules({k: i}, d, var)
+ if k in ar:
+ ret[k].append(ar[k])
+ elif k[0] == '_':
+ continue
+ elif isinstance(rules[k], dict):
+ ret[k] = []
+ for k1 in rules[k].keys():
+ if isinstance(k1, types.FunctionType) and k1(var):
+ if isinstance(rules[k][k1], list):
+ for i in rules[k][k1]:
+ if isinstance(i, dict):
+ res = applyrules({'supertext': i}, d, var)
+ if 'supertext' in res:
+ i = res['supertext']
+ else:
+ i = ''
+ ret[k].append(replace(i, d))
+ else:
+ i = rules[k][k1]
+ if isinstance(i, dict):
+ res = applyrules({'supertext': i}, d)
+ if 'supertext' in res:
+ i = res['supertext']
+ else:
+ i = ''
+ ret[k].append(replace(i, d))
+ else:
+ errmess('applyrules: ignoring rule %s.\n' % repr(rules[k]))
+ if isinstance(ret[k], list):
+ if len(ret[k]) == 1:
+ ret[k] = ret[k][0]
+ if ret[k] == []:
+ del ret[k]
+ return ret
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/capi_maps.py b/venv/lib/python3.9/site-packages/numpy/f2py/capi_maps.py
new file mode 100644
index 00000000..aaae3a8e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/capi_maps.py
@@ -0,0 +1,880 @@
+#!/usr/bin/env python3
+"""
+
+Copyright 1999,2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/05/06 10:57:33 $
+Pearu Peterson
+
+"""
+from . import __version__
+f2py_version = __version__.version
+
+import copy
+import re
+import os
+from .crackfortran import markoutercomma
+from . import cb_rules
+
+# The environment provided by auxfuncs.py is needed for some calls to eval.
+# As the needed functions cannot be determined by static inspection of the
+# code, it is safest to use import * pending a major refactoring of f2py.
+from .auxfuncs import *
+
+__all__ = [
+ 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign',
+ 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map',
+ 'cb_sign2map', 'cb_routsign2map', 'common_sign2map'
+]
+
+
+# Numarray and Numeric users should set this False
+using_newcore = True
+
+depargs = []
+lcb_map = {}
+lcb2_map = {}
+# forced casting: mainly caused by the fact that Python or Numeric
+# C/APIs do not support the corresponding C types.
+c2py_map = {'double': 'float',
+ 'float': 'float', # forced casting
+ 'long_double': 'float', # forced casting
+ 'char': 'int', # forced casting
+ 'signed_char': 'int', # forced casting
+ 'unsigned_char': 'int', # forced casting
+ 'short': 'int', # forced casting
+ 'unsigned_short': 'int', # forced casting
+ 'int': 'int', # forced casting
+ 'long': 'int',
+ 'long_long': 'long',
+ 'unsigned': 'int', # forced casting
+ 'complex_float': 'complex', # forced casting
+ 'complex_double': 'complex',
+ 'complex_long_double': 'complex', # forced casting
+ 'string': 'string',
+ 'character': 'bytes',
+ }
+c2capi_map = {'double': 'NPY_DOUBLE',
+ 'float': 'NPY_FLOAT',
+ 'long_double': 'NPY_DOUBLE', # forced casting
+ 'char': 'NPY_STRING',
+ 'unsigned_char': 'NPY_UBYTE',
+ 'signed_char': 'NPY_BYTE',
+ 'short': 'NPY_SHORT',
+ 'unsigned_short': 'NPY_USHORT',
+ 'int': 'NPY_INT',
+ 'unsigned': 'NPY_UINT',
+ 'long': 'NPY_LONG',
+ 'long_long': 'NPY_LONG', # forced casting
+ 'complex_float': 'NPY_CFLOAT',
+ 'complex_double': 'NPY_CDOUBLE',
+ 'complex_long_double': 'NPY_CDOUBLE', # forced casting
+ 'string': 'NPY_STRING',
+ 'character': 'NPY_CHAR'}
+
+# These new maps aren't used anywhere yet, but should be by default
+# unless building numeric or numarray extensions.
+if using_newcore:
+ c2capi_map = {'double': 'NPY_DOUBLE',
+ 'float': 'NPY_FLOAT',
+ 'long_double': 'NPY_LONGDOUBLE',
+ 'char': 'NPY_BYTE',
+ 'unsigned_char': 'NPY_UBYTE',
+ 'signed_char': 'NPY_BYTE',
+ 'short': 'NPY_SHORT',
+ 'unsigned_short': 'NPY_USHORT',
+ 'int': 'NPY_INT',
+ 'unsigned': 'NPY_UINT',
+ 'long': 'NPY_LONG',
+ 'unsigned_long': 'NPY_ULONG',
+ 'long_long': 'NPY_LONGLONG',
+ 'unsigned_long_long': 'NPY_ULONGLONG',
+ 'complex_float': 'NPY_CFLOAT',
+ 'complex_double': 'NPY_CDOUBLE',
+ 'complex_long_double': 'NPY_CDOUBLE',
+ 'string': 'NPY_STRING',
+ 'character': 'NPY_STRING'}
+
+c2pycode_map = {'double': 'd',
+ 'float': 'f',
+ 'long_double': 'd', # forced casting
+ 'char': '1',
+ 'signed_char': '1',
+ 'unsigned_char': 'b',
+ 'short': 's',
+ 'unsigned_short': 'w',
+ 'int': 'i',
+ 'unsigned': 'u',
+ 'long': 'l',
+ 'long_long': 'L',
+ 'complex_float': 'F',
+ 'complex_double': 'D',
+ 'complex_long_double': 'D', # forced casting
+ 'string': 'c',
+ 'character': 'c'
+ }
+
+if using_newcore:
+ c2pycode_map = {'double': 'd',
+ 'float': 'f',
+ 'long_double': 'g',
+ 'char': 'b',
+ 'unsigned_char': 'B',
+ 'signed_char': 'b',
+ 'short': 'h',
+ 'unsigned_short': 'H',
+ 'int': 'i',
+ 'unsigned': 'I',
+ 'long': 'l',
+ 'unsigned_long': 'L',
+ 'long_long': 'q',
+ 'unsigned_long_long': 'Q',
+ 'complex_float': 'F',
+ 'complex_double': 'D',
+ 'complex_long_double': 'G',
+ 'string': 'S',
+ 'character': 'c'}
+
+# https://docs.python.org/3/c-api/arg.html#building-values
+# c2buildvalue_map is NumPy agnostic, so no need to bother with using_newcore
+c2buildvalue_map = {'double': 'd',
+ 'float': 'f',
+ 'char': 'b',
+ 'signed_char': 'b',
+ 'short': 'h',
+ 'int': 'i',
+ 'long': 'l',
+ 'long_long': 'L',
+ 'complex_float': 'N',
+ 'complex_double': 'N',
+ 'complex_long_double': 'N',
+ 'string': 'y',
+ 'character': 'c'}
+
+f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double',
+ '12': 'long_double', '16': 'long_double'},
+ 'integer': {'': 'int', '1': 'signed_char', '2': 'short',
+ '4': 'int', '8': 'long_long',
+ '-1': 'unsigned_char', '-2': 'unsigned_short',
+ '-4': 'unsigned', '-8': 'unsigned_long_long'},
+ 'complex': {'': 'complex_float', '8': 'complex_float',
+ '16': 'complex_double', '24': 'complex_long_double',
+ '32': 'complex_long_double'},
+ 'complexkind': {'': 'complex_float', '4': 'complex_float',
+ '8': 'complex_double', '12': 'complex_long_double',
+ '16': 'complex_long_double'},
+ 'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int',
+ '8': 'long_long'},
+ 'double complex': {'': 'complex_double'},
+ 'double precision': {'': 'double'},
+ 'byte': {'': 'char'},
+ }
+
+f2cmap_default = copy.deepcopy(f2cmap_all)
+
+f2cmap_mapped = []
+
+def load_f2cmap_file(f2cmap_file):
+ global f2cmap_all
+
+ f2cmap_all = copy.deepcopy(f2cmap_default)
+
+ if f2cmap_file is None:
+ # Default value
+ f2cmap_file = '.f2py_f2cmap'
+ if not os.path.isfile(f2cmap_file):
+ return
+
+ # User defined additions to f2cmap_all.
+ # f2cmap_file must contain a dictionary of dictionaries, only. For
+ # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is
+ # interpreted as C 'float'. This feature is useful for F90/95 users if
+ # they use PARAMETERS in type specifications.
+ try:
+ outmess('Reading f2cmap from {!r} ...\n'.format(f2cmap_file))
+ with open(f2cmap_file, 'r') as f:
+ d = eval(f.read().lower(), {}, {})
+ for k, d1 in d.items():
+ for k1 in d1.keys():
+ d1[k1.lower()] = d1[k1]
+ d[k.lower()] = d[k]
+ for k in d.keys():
+ if k not in f2cmap_all:
+ f2cmap_all[k] = {}
+ for k1 in d[k].keys():
+ if d[k][k1] in c2py_map:
+ if k1 in f2cmap_all[k]:
+ outmess(
+ "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" % (k, k1, f2cmap_all[k][k1], d[k][k1]))
+ f2cmap_all[k][k1] = d[k][k1]
+ outmess('\tMapping "%s(kind=%s)" to "%s"\n' %
+ (k, k1, d[k][k1]))
+ f2cmap_mapped.append(d[k][k1])
+ else:
+ errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" % (
+ k, k1, d[k][k1], d[k][k1], list(c2py_map.keys())))
+ outmess('Successfully applied user defined f2cmap changes\n')
+ except Exception as msg:
+ errmess(
+ 'Failed to apply user defined f2cmap changes: %s. Skipping.\n' % (msg))
+
+cformat_map = {'double': '%g',
+ 'float': '%g',
+ 'long_double': '%Lg',
+ 'char': '%d',
+ 'signed_char': '%d',
+ 'unsigned_char': '%hhu',
+ 'short': '%hd',
+ 'unsigned_short': '%hu',
+ 'int': '%d',
+ 'unsigned': '%u',
+ 'long': '%ld',
+ 'unsigned_long': '%lu',
+ 'long_long': '%ld',
+ 'complex_float': '(%g,%g)',
+ 'complex_double': '(%g,%g)',
+ 'complex_long_double': '(%Lg,%Lg)',
+ 'string': '\\"%s\\"',
+ 'character': "'%c'",
+ }
+
+# Auxiliary functions
+
+
+def getctype(var):
+ """
+ Determines C type
+ """
+ ctype = 'void'
+ if isfunction(var):
+ if 'result' in var:
+ a = var['result']
+ else:
+ a = var['name']
+ if a in var['vars']:
+ return getctype(var['vars'][a])
+ else:
+ errmess('getctype: function %s has no return value?!\n' % a)
+ elif issubroutine(var):
+ return ctype
+ elif ischaracter_or_characterarray(var):
+ return 'character'
+ elif isstring_or_stringarray(var):
+ return 'string'
+ elif 'typespec' in var and var['typespec'].lower() in f2cmap_all:
+ typespec = var['typespec'].lower()
+ f2cmap = f2cmap_all[typespec]
+ ctype = f2cmap[''] # default type
+ if 'kindselector' in var:
+ if '*' in var['kindselector']:
+ try:
+ ctype = f2cmap[var['kindselector']['*']]
+ except KeyError:
+ errmess('getctype: "%s %s %s" not supported.\n' %
+ (var['typespec'], '*', var['kindselector']['*']))
+ elif 'kind' in var['kindselector']:
+ if typespec + 'kind' in f2cmap_all:
+ f2cmap = f2cmap_all[typespec + 'kind']
+ try:
+ ctype = f2cmap[var['kindselector']['kind']]
+ except KeyError:
+ if typespec in f2cmap_all:
+ f2cmap = f2cmap_all[typespec]
+ try:
+ ctype = f2cmap[str(var['kindselector']['kind'])]
+ except KeyError:
+ errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="<C typespec>")) in %s/.f2py_f2cmap file).\n'
+ % (typespec, var['kindselector']['kind'], ctype,
+ typespec, var['kindselector']['kind'], os.getcwd()))
+ else:
+ if not isexternal(var):
+ errmess('getctype: No C-type found in "%s", assuming void.\n' % var)
+ return ctype
+
+
+def f2cexpr(expr):
+ """Rewrite Fortran expression as f2py supported C expression.
+
+ Due to the lack of a proper expression parser in f2py, this
+ function uses a heuristic approach that assumes that Fortran
+ arithmetic expressions are valid C arithmetic expressions when
+ mapping Fortran function calls to the corresponding C function/CPP
+ macros calls.
+
+ """
+ # TODO: support Fortran `len` function with optional kind parameter
+ expr = re.sub(r'\blen\b', 'f2py_slen', expr)
+ return expr
+
+
+def getstrlength(var):
+ if isstringfunction(var):
+ if 'result' in var:
+ a = var['result']
+ else:
+ a = var['name']
+ if a in var['vars']:
+ return getstrlength(var['vars'][a])
+ else:
+ errmess('getstrlength: function %s has no return value?!\n' % a)
+ if not isstring(var):
+ errmess(
+ 'getstrlength: expected a signature of a string but got: %s\n' % (repr(var)))
+ len = '1'
+ if 'charselector' in var:
+ a = var['charselector']
+ if '*' in a:
+ len = a['*']
+ elif 'len' in a:
+ len = f2cexpr(a['len'])
+ if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len):
+ if isintent_hide(var):
+ errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % (
+ repr(var)))
+ len = '-1'
+ return len
+
+
+def getarrdims(a, var, verbose=0):
+ ret = {}
+ if isstring(var) and not isarray(var):
+ ret['size'] = getstrlength(var)
+ ret['rank'] = '0'
+ ret['dims'] = ''
+ elif isscalar(var):
+ ret['size'] = '1'
+ ret['rank'] = '0'
+ ret['dims'] = ''
+ elif isarray(var):
+ dim = copy.copy(var['dimension'])
+ ret['size'] = '*'.join(dim)
+ try:
+ ret['size'] = repr(eval(ret['size']))
+ except Exception:
+ pass
+ ret['dims'] = ','.join(dim)
+ ret['rank'] = repr(len(dim))
+ ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1]
+ for i in range(len(dim)): # solve dim for dependencies
+ v = []
+ if dim[i] in depargs:
+ v = [dim[i]]
+ else:
+ for va in depargs:
+ if re.match(r'.*?\b%s\b.*' % va, dim[i]):
+ v.append(va)
+ for va in v:
+ if depargs.index(va) > depargs.index(a):
+ dim[i] = '*'
+ break
+ ret['setdims'], i = '', -1
+ for d in dim:
+ i = i + 1
+ if d not in ['*', ':', '(*)', '(:)']:
+ ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % (
+ ret['setdims'], i, d)
+ if ret['setdims']:
+ ret['setdims'] = ret['setdims'][:-1]
+ ret['cbsetdims'], i = '', -1
+ for d in var['dimension']:
+ i = i + 1
+ if d not in ['*', ':', '(*)', '(:)']:
+ ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % (
+ ret['cbsetdims'], i, d)
+ elif isintent_in(var):
+ outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n'
+ % (d))
+ ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % (
+ ret['cbsetdims'], i, 0)
+ elif verbose:
+ errmess(
+ 'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n' % (repr(a), repr(d)))
+ if ret['cbsetdims']:
+ ret['cbsetdims'] = ret['cbsetdims'][:-1]
+# if not isintent_c(var):
+# var['dimension'].reverse()
+ return ret
+
+
+def getpydocsign(a, var):
+ global lcb_map
+ if isfunction(var):
+ if 'result' in var:
+ af = var['result']
+ else:
+ af = var['name']
+ if af in var['vars']:
+ return getpydocsign(af, var['vars'][af])
+ else:
+ errmess('getctype: function %s has no return value?!\n' % af)
+ return '', ''
+ sig, sigout = a, a
+ opt = ''
+ if isintent_in(var):
+ opt = 'input'
+ elif isintent_inout(var):
+ opt = 'in/output'
+ out_a = a
+ if isintent_out(var):
+ for k in var['intent']:
+ if k[:4] == 'out=':
+ out_a = k[4:]
+ break
+ init = ''
+ ctype = getctype(var)
+
+ if hasinitvalue(var):
+ init, showinit = getinit(a, var)
+ init = ', optional\\n Default: %s' % showinit
+ if isscalar(var):
+ if isintent_inout(var):
+ sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype],
+ c2pycode_map[ctype], init)
+ else:
+ sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init)
+ sigout = '%s : %s' % (out_a, c2py_map[ctype])
+ elif isstring(var):
+ if isintent_inout(var):
+ sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % (
+ a, opt, getstrlength(var), init)
+ else:
+ sig = '%s : %s string(len=%s)%s' % (
+ a, opt, getstrlength(var), init)
+ sigout = '%s : string(len=%s)' % (out_a, getstrlength(var))
+ elif isarray(var):
+ dim = var['dimension']
+ rank = repr(len(dim))
+ sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank,
+ c2pycode_map[
+ ctype],
+ ','.join(dim), init)
+ if a == out_a:
+ sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\
+ % (a, rank, c2pycode_map[ctype], ','.join(dim))
+ else:
+ sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\
+ % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a)
+ elif isexternal(var):
+ ua = ''
+ if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]:
+ ua = lcb2_map[lcb_map[a]]['argname']
+ if not ua == a:
+ ua = ' => %s' % ua
+ else:
+ ua = ''
+ sig = '%s : call-back function%s' % (a, ua)
+ sigout = sig
+ else:
+ errmess(
+ 'getpydocsign: Could not resolve docsignature for "%s".\n' % a)
+ return sig, sigout
+
+
+def getarrdocsign(a, var):
+ ctype = getctype(var)
+ if isstring(var) and (not isarray(var)):
+ sig = '%s : rank-0 array(string(len=%s),\'c\')' % (a,
+ getstrlength(var))
+ elif isscalar(var):
+ sig = '%s : rank-0 array(%s,\'%s\')' % (a, c2py_map[ctype],
+ c2pycode_map[ctype],)
+ elif isarray(var):
+ dim = var['dimension']
+ rank = repr(len(dim))
+ sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank,
+ c2pycode_map[
+ ctype],
+ ','.join(dim))
+ return sig
+
+
+def getinit(a, var):
+ if isstring(var):
+ init, showinit = '""', "''"
+ else:
+ init, showinit = '', ''
+ if hasinitvalue(var):
+ init = var['=']
+ showinit = init
+ if iscomplex(var) or iscomplexarray(var):
+ ret = {}
+
+ try:
+ v = var["="]
+ if ',' in v:
+ ret['init.r'], ret['init.i'] = markoutercomma(
+ v[1:-1]).split('@,@')
+ else:
+ v = eval(v, {}, {})
+ ret['init.r'], ret['init.i'] = str(v.real), str(v.imag)
+ except Exception:
+ raise ValueError(
+ 'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a))
+ if isarray(var):
+ init = '(capi_c.r=%s,capi_c.i=%s,capi_c)' % (
+ ret['init.r'], ret['init.i'])
+ elif isstring(var):
+ if not init:
+ init, showinit = '""', "''"
+ if init[0] == "'":
+ init = '"%s"' % (init[1:-1].replace('"', '\\"'))
+ if init[0] == '"':
+ showinit = "'%s'" % (init[1:-1])
+ return init, showinit
+
+
+def get_elsize(var):
+ if isstring(var) or isstringarray(var):
+ elsize = getstrlength(var)
+ # override with user-specified length when available:
+ elsize = var['charselector'].get('f2py_len', elsize)
+ return elsize
+ if ischaracter(var) or ischaracterarray(var):
+ return '1'
+ # for numerical types, PyArray_New* functions ignore specified
+ # elsize, so we just return 1 and let elsize be determined at
+ # runtime, see fortranobject.c
+ return '1'
+
+
+def sign2map(a, var):
+ """
+ varname,ctype,atype
+ init,init.r,init.i,pytype
+ vardebuginfo,vardebugshowvalue,varshowvalue
+ varrformat
+
+ intent
+ """
+ out_a = a
+ if isintent_out(var):
+ for k in var['intent']:
+ if k[:4] == 'out=':
+ out_a = k[4:]
+ break
+ ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)}
+ intent_flags = []
+ for f, s in isintent_dict.items():
+ if f(var):
+ intent_flags.append('F2PY_%s' % s)
+ if intent_flags:
+ # TODO: Evaluate intent_flags here.
+ ret['intent'] = '|'.join(intent_flags)
+ else:
+ ret['intent'] = 'F2PY_INTENT_IN'
+ if isarray(var):
+ ret['varrformat'] = 'N'
+ elif ret['ctype'] in c2buildvalue_map:
+ ret['varrformat'] = c2buildvalue_map[ret['ctype']]
+ else:
+ ret['varrformat'] = 'O'
+ ret['init'], ret['showinit'] = getinit(a, var)
+ if hasinitvalue(var) and iscomplex(var) and not isarray(var):
+ ret['init.r'], ret['init.i'] = markoutercomma(
+ ret['init'][1:-1]).split('@,@')
+ if isexternal(var):
+ ret['cbnamekey'] = a
+ if a in lcb_map:
+ ret['cbname'] = lcb_map[a]
+ ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs']
+ ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs']
+ ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr']
+ ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr']
+ else:
+ ret['cbname'] = a
+ errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % (
+ a, list(lcb_map.keys())))
+ if isstring(var):
+ ret['length'] = getstrlength(var)
+ if isarray(var):
+ ret = dictappend(ret, getarrdims(a, var))
+ dim = copy.copy(var['dimension'])
+ if ret['ctype'] in c2capi_map:
+ ret['atype'] = c2capi_map[ret['ctype']]
+ ret['elsize'] = get_elsize(var)
+ # Debug info
+ if debugcapi(var):
+ il = [isintent_in, 'input', isintent_out, 'output',
+ isintent_inout, 'inoutput', isrequired, 'required',
+ isoptional, 'optional', isintent_hide, 'hidden',
+ iscomplex, 'complex scalar',
+ l_and(isscalar, l_not(iscomplex)), 'scalar',
+ isstring, 'string', isarray, 'array',
+ iscomplexarray, 'complex array', isstringarray, 'string array',
+ iscomplexfunction, 'complex function',
+ l_and(isfunction, l_not(iscomplexfunction)), 'function',
+ isexternal, 'callback',
+ isintent_callback, 'callback',
+ isintent_aux, 'auxiliary',
+ ]
+ rl = []
+ for i in range(0, len(il), 2):
+ if il[i](var):
+ rl.append(il[i + 1])
+ if isstring(var):
+ rl.append('slen(%s)=%s' % (a, ret['length']))
+ if isarray(var):
+ ddim = ','.join(
+ map(lambda x, y: '%s|%s' % (x, y), var['dimension'], dim))
+ rl.append('dims(%s)' % ddim)
+ if isexternal(var):
+ ret['vardebuginfo'] = 'debug-capi:%s=>%s:%s' % (
+ a, ret['cbname'], ','.join(rl))
+ else:
+ ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % (
+ ret['ctype'], a, ret['showinit'], ','.join(rl))
+ if isscalar(var):
+ if ret['ctype'] in cformat_map:
+ ret['vardebugshowvalue'] = 'debug-capi:%s=%s' % (
+ a, cformat_map[ret['ctype']])
+ if isstring(var):
+ ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % (
+ a, a)
+ if isexternal(var):
+ ret['vardebugshowvalue'] = 'debug-capi:%s=%%p' % (a)
+ if ret['ctype'] in cformat_map:
+ ret['varshowvalue'] = '#name#:%s=%s' % (a, cformat_map[ret['ctype']])
+ ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']])
+ if isstring(var):
+ ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a)
+ ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var)
+ if hasnote(var):
+ ret['note'] = var['note']
+ return ret
+
+
+def routsign2map(rout):
+ """
+ name,NAME,begintitle,endtitle
+ rname,ctype,rformat
+ routdebugshowvalue
+ """
+ global lcb_map
+ name = rout['name']
+ fname = getfortranname(rout)
+ ret = {'name': name,
+ 'texname': name.replace('_', '\\_'),
+ 'name_lower': name.lower(),
+ 'NAME': name.upper(),
+ 'begintitle': gentitle(name),
+ 'endtitle': gentitle('end of %s' % name),
+ 'fortranname': fname,
+ 'FORTRANNAME': fname.upper(),
+ 'callstatement': getcallstatement(rout) or '',
+ 'usercode': getusercode(rout) or '',
+ 'usercode1': getusercode1(rout) or '',
+ }
+ if '_' in fname:
+ ret['F_FUNC'] = 'F_FUNC_US'
+ else:
+ ret['F_FUNC'] = 'F_FUNC'
+ if '_' in name:
+ ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US'
+ else:
+ ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC'
+ lcb_map = {}
+ if 'use' in rout:
+ for u in rout['use'].keys():
+ if u in cb_rules.cb_map:
+ for un in cb_rules.cb_map[u]:
+ ln = un[0]
+ if 'map' in rout['use'][u]:
+ for k in rout['use'][u]['map'].keys():
+ if rout['use'][u]['map'][k] == un[0]:
+ ln = k
+ break
+ lcb_map[ln] = un[1]
+ elif 'externals' in rout and rout['externals']:
+ errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % (
+ ret['name'], repr(rout['externals'])))
+ ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or ''
+ if isfunction(rout):
+ if 'result' in rout:
+ a = rout['result']
+ else:
+ a = rout['name']
+ ret['rname'] = a
+ ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout)
+ ret['ctype'] = getctype(rout['vars'][a])
+ if hasresultnote(rout):
+ ret['resultnote'] = rout['vars'][a]['note']
+ rout['vars'][a]['note'] = ['See elsewhere.']
+ if ret['ctype'] in c2buildvalue_map:
+ ret['rformat'] = c2buildvalue_map[ret['ctype']]
+ else:
+ ret['rformat'] = 'O'
+ errmess('routsign2map: no c2buildvalue key for type %s\n' %
+ (repr(ret['ctype'])))
+ if debugcapi(rout):
+ if ret['ctype'] in cformat_map:
+ ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % (
+ a, cformat_map[ret['ctype']])
+ if isstringfunction(rout):
+ ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % (
+ a, a)
+ if isstringfunction(rout):
+ ret['rlength'] = getstrlength(rout['vars'][a])
+ if ret['rlength'] == '-1':
+ errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % (
+ repr(rout['name'])))
+ ret['rlength'] = '10'
+ if hasnote(rout):
+ ret['note'] = rout['note']
+ rout['note'] = ['See elsewhere.']
+ return ret
+
+
+def modsign2map(m):
+ """
+ modulename
+ """
+ if ismodule(m):
+ ret = {'f90modulename': m['name'],
+ 'F90MODULENAME': m['name'].upper(),
+ 'texf90modulename': m['name'].replace('_', '\\_')}
+ else:
+ ret = {'modulename': m['name'],
+ 'MODULENAME': m['name'].upper(),
+ 'texmodulename': m['name'].replace('_', '\\_')}
+ ret['restdoc'] = getrestdoc(m) or []
+ if hasnote(m):
+ ret['note'] = m['note']
+ ret['usercode'] = getusercode(m) or ''
+ ret['usercode1'] = getusercode1(m) or ''
+ if m['body']:
+ ret['interface_usercode'] = getusercode(m['body'][0]) or ''
+ else:
+ ret['interface_usercode'] = ''
+ ret['pymethoddef'] = getpymethoddef(m) or ''
+ if 'coutput' in m:
+ ret['coutput'] = m['coutput']
+ if 'f2py_wrapper_output' in m:
+ ret['f2py_wrapper_output'] = m['f2py_wrapper_output']
+ return ret
+
+
+def cb_sign2map(a, var, index=None):
+ ret = {'varname': a}
+ ret['varname_i'] = ret['varname']
+ ret['ctype'] = getctype(var)
+ if ret['ctype'] in c2capi_map:
+ ret['atype'] = c2capi_map[ret['ctype']]
+ ret['elsize'] = get_elsize(var)
+ if ret['ctype'] in cformat_map:
+ ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']])
+ if isarray(var):
+ ret = dictappend(ret, getarrdims(a, var))
+ ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var)
+ if hasnote(var):
+ ret['note'] = var['note']
+ var['note'] = ['See elsewhere.']
+ return ret
+
+
+def cb_routsign2map(rout, um):
+ """
+ name,begintitle,endtitle,argname
+ ctype,rctype,maxnofargs,nofoptargs,returncptr
+ """
+ ret = {'name': 'cb_%s_in_%s' % (rout['name'], um),
+ 'returncptr': ''}
+ if isintent_callback(rout):
+ if '_' in rout['name']:
+ F_FUNC = 'F_FUNC_US'
+ else:
+ F_FUNC = 'F_FUNC'
+ ret['callbackname'] = '%s(%s,%s)' \
+ % (F_FUNC,
+ rout['name'].lower(),
+ rout['name'].upper(),
+ )
+ ret['static'] = 'extern'
+ else:
+ ret['callbackname'] = ret['name']
+ ret['static'] = 'static'
+ ret['argname'] = rout['name']
+ ret['begintitle'] = gentitle(ret['name'])
+ ret['endtitle'] = gentitle('end of %s' % ret['name'])
+ ret['ctype'] = getctype(rout)
+ ret['rctype'] = 'void'
+ if ret['ctype'] == 'string':
+ ret['rctype'] = 'void'
+ else:
+ ret['rctype'] = ret['ctype']
+ if ret['rctype'] != 'void':
+ if iscomplexfunction(rout):
+ ret['returncptr'] = """
+#ifdef F2PY_CB_RETURNCOMPLEX
+return_value=
+#endif
+"""
+ else:
+ ret['returncptr'] = 'return_value='
+ if ret['ctype'] in cformat_map:
+ ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']])
+ if isstringfunction(rout):
+ ret['strlength'] = getstrlength(rout)
+ if isfunction(rout):
+ if 'result' in rout:
+ a = rout['result']
+ else:
+ a = rout['name']
+ if hasnote(rout['vars'][a]):
+ ret['note'] = rout['vars'][a]['note']
+ rout['vars'][a]['note'] = ['See elsewhere.']
+ ret['rname'] = a
+ ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout)
+ if iscomplexfunction(rout):
+ ret['rctype'] = """
+#ifdef F2PY_CB_RETURNCOMPLEX
+#ctype#
+#else
+void
+#endif
+"""
+ else:
+ if hasnote(rout):
+ ret['note'] = rout['note']
+ rout['note'] = ['See elsewhere.']
+ nofargs = 0
+ nofoptargs = 0
+ if 'args' in rout and 'vars' in rout:
+ for a in rout['args']:
+ var = rout['vars'][a]
+ if l_or(isintent_in, isintent_inout)(var):
+ nofargs = nofargs + 1
+ if isoptional(var):
+ nofoptargs = nofoptargs + 1
+ ret['maxnofargs'] = repr(nofargs)
+ ret['nofoptargs'] = repr(nofoptargs)
+ if hasnote(rout) and isfunction(rout) and 'result' in rout:
+ ret['routnote'] = rout['note']
+ rout['note'] = ['See elsewhere.']
+ return ret
+
+
+def common_sign2map(a, var): # obsolute
+ ret = {'varname': a, 'ctype': getctype(var)}
+ if isstringarray(var):
+ ret['ctype'] = 'char'
+ if ret['ctype'] in c2capi_map:
+ ret['atype'] = c2capi_map[ret['ctype']]
+ ret['elsize'] = get_elsize(var)
+ if ret['ctype'] in cformat_map:
+ ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']])
+ if isarray(var):
+ ret = dictappend(ret, getarrdims(a, var))
+ elif isstring(var):
+ ret['size'] = getstrlength(var)
+ ret['rank'] = '1'
+ ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var)
+ if hasnote(var):
+ ret['note'] = var['note']
+ var['note'] = ['See elsewhere.']
+ # for strings this returns 0-rank but actually is 1-rank
+ ret['arrdocstr'] = getarrdocsign(a, var)
+ return ret
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/cb_rules.py b/venv/lib/python3.9/site-packages/numpy/f2py/cb_rules.py
new file mode 100644
index 00000000..761831e0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/cb_rules.py
@@ -0,0 +1,649 @@
+#!/usr/bin/env python3
+"""
+
+Build call-back mechanism for f2py2e.
+
+Copyright 2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/07/20 11:27:58 $
+Pearu Peterson
+
+"""
+from . import __version__
+from .auxfuncs import (
+ applyrules, debugcapi, dictappend, errmess, getargs, hasnote, isarray,
+ iscomplex, iscomplexarray, iscomplexfunction, isfunction, isintent_c,
+ isintent_hide, isintent_in, isintent_inout, isintent_nothide,
+ isintent_out, isoptional, isrequired, isscalar, isstring,
+ isstringfunction, issubroutine, l_and, l_not, l_or, outmess, replace,
+ stripcomma, throw_error
+)
+from . import cfuncs
+
+f2py_version = __version__.version
+
+
+################## Rules for callback function ##############
+
+cb_routine_rules = {
+ 'cbtypedefs': 'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);',
+ 'body': """
+#begintitle#
+typedef struct {
+ PyObject *capi;
+ PyTupleObject *args_capi;
+ int nofargs;
+ jmp_buf jmpbuf;
+} #name#_t;
+
+#if defined(F2PY_THREAD_LOCAL_DECL) && !defined(F2PY_USE_PYTHON_TLS)
+
+static F2PY_THREAD_LOCAL_DECL #name#_t *_active_#name# = NULL;
+
+static #name#_t *swap_active_#name#(#name#_t *ptr) {
+ #name#_t *prev = _active_#name#;
+ _active_#name# = ptr;
+ return prev;
+}
+
+static #name#_t *get_active_#name#(void) {
+ return _active_#name#;
+}
+
+#else
+
+static #name#_t *swap_active_#name#(#name#_t *ptr) {
+ char *key = "__f2py_cb_#name#";
+ return (#name#_t *)F2PySwapThreadLocalCallbackPtr(key, ptr);
+}
+
+static #name#_t *get_active_#name#(void) {
+ char *key = "__f2py_cb_#name#";
+ return (#name#_t *)F2PyGetThreadLocalCallbackPtr(key);
+}
+
+#endif
+
+/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/
+#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) {
+ #name#_t cb_local = { NULL, NULL, 0 };
+ #name#_t *cb = NULL;
+ PyTupleObject *capi_arglist = NULL;
+ PyObject *capi_return = NULL;
+ PyObject *capi_tmp = NULL;
+ PyObject *capi_arglist_list = NULL;
+ int capi_j,capi_i = 0;
+ int capi_longjmp_ok = 1;
+#decl#
+#ifdef F2PY_REPORT_ATEXIT
+f2py_cb_start_clock();
+#endif
+ cb = get_active_#name#();
+ if (cb == NULL) {
+ capi_longjmp_ok = 0;
+ cb = &cb_local;
+ }
+ capi_arglist = cb->args_capi;
+ CFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\");
+ CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi);
+ if (cb->capi==NULL) {
+ capi_longjmp_ok = 0;
+ cb->capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\");
+ CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi);
+ }
+ if (cb->capi==NULL) {
+ PyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\");
+ goto capi_fail;
+ }
+ if (F2PyCapsule_Check(cb->capi)) {
+ #name#_typedef #name#_cptr;
+ #name#_cptr = F2PyCapsule_AsVoidPtr(cb->capi);
+ #returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#);
+ #return#
+ }
+ if (capi_arglist==NULL) {
+ capi_longjmp_ok = 0;
+ capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\");
+ if (capi_tmp) {
+ capi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp);
+ Py_DECREF(capi_tmp);
+ if (capi_arglist==NULL) {
+ PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\");
+ goto capi_fail;
+ }
+ } else {
+ PyErr_Clear();
+ capi_arglist = (PyTupleObject *)Py_BuildValue(\"()\");
+ }
+ }
+ if (capi_arglist == NULL) {
+ PyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\");
+ goto capi_fail;
+ }
+#setdims#
+#ifdef PYPY_VERSION
+#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value)
+ capi_arglist_list = PySequence_List(capi_arglist);
+ if (capi_arglist_list == NULL) goto capi_fail;
+#else
+#define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value)
+#endif
+#pyobjfrom#
+#undef CAPI_ARGLIST_SETITEM
+#ifdef PYPY_VERSION
+ CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list);
+#else
+ CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist);
+#endif
+ CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\");
+#ifdef F2PY_REPORT_ATEXIT
+f2py_cb_start_call_clock();
+#endif
+#ifdef PYPY_VERSION
+ capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist_list);
+ Py_DECREF(capi_arglist_list);
+ capi_arglist_list = NULL;
+#else
+ capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist);
+#endif
+#ifdef F2PY_REPORT_ATEXIT
+f2py_cb_stop_call_clock();
+#endif
+ CFUNCSMESSPY(\"cb:capi_return=\",capi_return);
+ if (capi_return == NULL) {
+ fprintf(stderr,\"capi_return is NULL\\n\");
+ goto capi_fail;
+ }
+ if (capi_return == Py_None) {
+ Py_DECREF(capi_return);
+ capi_return = Py_BuildValue(\"()\");
+ }
+ else if (!PyTuple_Check(capi_return)) {
+ capi_return = Py_BuildValue(\"(N)\",capi_return);
+ }
+ capi_j = PyTuple_Size(capi_return);
+ capi_i = 0;
+#frompyobj#
+ CFUNCSMESS(\"cb:#name#:successful\\n\");
+ Py_DECREF(capi_return);
+#ifdef F2PY_REPORT_ATEXIT
+f2py_cb_stop_clock();
+#endif
+ goto capi_return_pt;
+capi_fail:
+ fprintf(stderr,\"Call-back #name# failed.\\n\");
+ Py_XDECREF(capi_return);
+ Py_XDECREF(capi_arglist_list);
+ if (capi_longjmp_ok) {
+ longjmp(cb->jmpbuf,-1);
+ }
+capi_return_pt:
+ ;
+#return#
+}
+#endtitle#
+""",
+ 'need': ['setjmp.h', 'CFUNCSMESS', 'F2PY_THREAD_LOCAL_DECL'],
+ 'maxnofargs': '#maxnofargs#',
+ 'nofoptargs': '#nofoptargs#',
+ 'docstr': """\
+ def #argname#(#docsignature#): return #docreturn#\\n\\
+#docstrsigns#""",
+ 'latexdocstr': """
+{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}}
+#routnote#
+
+#latexdocstrsigns#""",
+ 'docstrshort': 'def #argname#(#docsignature#): return #docreturn#'
+}
+cb_rout_rules = [
+ { # Init
+ 'separatorsfor': {'decl': '\n',
+ 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n',
+ 'args_td': ',', 'optargs_td': '',
+ 'args_nm': ',', 'optargs_nm': '',
+ 'frompyobj': '\n', 'setdims': '\n',
+ 'docstrsigns': '\\n"\n"',
+ 'latexdocstrsigns': '\n',
+ 'latexdocstrreq': '\n', 'latexdocstropt': '\n',
+ 'latexdocstrout': '\n', 'latexdocstrcbs': '\n',
+ },
+ 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/',
+ 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/',
+ 'args_td': [], 'optargs_td': '', 'strarglens_td': '',
+ 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '',
+ 'noargs': '',
+ 'setdims': '/*setdims*/',
+ 'docstrsigns': '', 'latexdocstrsigns': '',
+ 'docstrreq': ' Required arguments:',
+ 'docstropt': ' Optional arguments:',
+ 'docstrout': ' Return objects:',
+ 'docstrcbs': ' Call-back functions:',
+ 'docreturn': '', 'docsign': '', 'docsignopt': '',
+ 'latexdocstrreq': '\\noindent Required arguments:',
+ 'latexdocstropt': '\\noindent Optional arguments:',
+ 'latexdocstrout': '\\noindent Return objects:',
+ 'latexdocstrcbs': '\\noindent Call-back functions:',
+ 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''},
+ }, { # Function
+ 'decl': ' #ctype# return_value = 0;',
+ 'frompyobj': [
+ {debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'},
+ '''\
+ if (capi_j>capi_i) {
+ GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,
+ "#ctype#_from_pyobj failed in converting return_value of"
+ " call-back function #name# to C #ctype#\\n");
+ } else {
+ fprintf(stderr,"Warning: call-back function #name# did not provide"
+ " return value (index=%d, type=#ctype#)\\n",capi_i);
+ }''',
+ {debugcapi:
+ ' fprintf(stderr,"#showvalueformat#.\\n",return_value);'}
+ ],
+ 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'],
+ 'return': ' return return_value;',
+ '_check': l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction))
+ },
+ { # String function
+ 'pyobjfrom': {debugcapi: ' fprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'},
+ 'args': '#ctype# return_value,int return_value_len',
+ 'args_nm': 'return_value,&return_value_len',
+ 'args_td': '#ctype# ,int',
+ 'frompyobj': [
+ {debugcapi: ' CFUNCSMESS("cb:Getting return_value->\\"");'},
+ """\
+ if (capi_j>capi_i) {
+ GETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);
+ } else {
+ fprintf(stderr,"Warning: call-back function #name# did not provide"
+ " return value (index=%d, type=#ctype#)\\n",capi_i);
+ }""",
+ {debugcapi:
+ ' fprintf(stderr,"#showvalueformat#\\".\\n",return_value);'}
+ ],
+ 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'},
+ 'string.h', 'GETSTRFROMPYTUPLE'],
+ 'return': 'return;',
+ '_check': isstringfunction
+ },
+ { # Complex function
+ 'optargs': """
+#ifndef F2PY_CB_RETURNCOMPLEX
+#ctype# *return_value
+#endif
+""",
+ 'optargs_nm': """
+#ifndef F2PY_CB_RETURNCOMPLEX
+return_value
+#endif
+""",
+ 'optargs_td': """
+#ifndef F2PY_CB_RETURNCOMPLEX
+#ctype# *
+#endif
+""",
+ 'decl': """
+#ifdef F2PY_CB_RETURNCOMPLEX
+ #ctype# return_value = {0, 0};
+#endif
+""",
+ 'frompyobj': [
+ {debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'},
+ """\
+ if (capi_j>capi_i) {
+#ifdef F2PY_CB_RETURNCOMPLEX
+ GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,
+ \"#ctype#_from_pyobj failed in converting return_value of call-back\"
+ \" function #name# to C #ctype#\\n\");
+#else
+ GETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,
+ \"#ctype#_from_pyobj failed in converting return_value of call-back\"
+ \" function #name# to C #ctype#\\n\");
+#endif
+ } else {
+ fprintf(stderr,
+ \"Warning: call-back function #name# did not provide\"
+ \" return value (index=%d, type=#ctype#)\\n\",capi_i);
+ }""",
+ {debugcapi: """\
+#ifdef F2PY_CB_RETURNCOMPLEX
+ fprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i);
+#else
+ fprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i);
+#endif
+"""}
+ ],
+ 'return': """
+#ifdef F2PY_CB_RETURNCOMPLEX
+ return return_value;
+#else
+ return;
+#endif
+""",
+ 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'},
+ 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'],
+ '_check': iscomplexfunction
+ },
+ {'docstrout': ' #pydocsignout#',
+ 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}',
+ {hasnote: '--- #note#'}],
+ 'docreturn': '#rname#,',
+ '_check': isfunction},
+ {'_check': issubroutine, 'return': 'return;'}
+]
+
+cb_arg_rules = [
+ { # Doc
+ 'docstropt': {l_and(isoptional, isintent_nothide): ' #pydocsign#'},
+ 'docstrreq': {l_and(isrequired, isintent_nothide): ' #pydocsign#'},
+ 'docstrout': {isintent_out: ' #pydocsignout#'},
+ 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
+ {hasnote: '--- #note#'}]},
+ 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
+ {hasnote: '--- #note#'}]},
+ 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}',
+ {l_and(hasnote, isintent_hide): '--- #note#',
+ l_and(hasnote, isintent_nothide): '--- See above.'}]},
+ 'docsign': {l_and(isrequired, isintent_nothide): '#varname#,'},
+ 'docsignopt': {l_and(isoptional, isintent_nothide): '#varname#,'},
+ 'depend': ''
+ },
+ {
+ 'args': {
+ l_and(isscalar, isintent_c): '#ctype# #varname_i#',
+ l_and(isscalar, l_not(isintent_c)): '#ctype# *#varname_i#_cb_capi',
+ isarray: '#ctype# *#varname_i#',
+ isstring: '#ctype# #varname_i#'
+ },
+ 'args_nm': {
+ l_and(isscalar, isintent_c): '#varname_i#',
+ l_and(isscalar, l_not(isintent_c)): '#varname_i#_cb_capi',
+ isarray: '#varname_i#',
+ isstring: '#varname_i#'
+ },
+ 'args_td': {
+ l_and(isscalar, isintent_c): '#ctype#',
+ l_and(isscalar, l_not(isintent_c)): '#ctype# *',
+ isarray: '#ctype# *',
+ isstring: '#ctype#'
+ },
+ 'need': {l_or(isscalar, isarray, isstring): '#ctype#'},
+ # untested with multiple args
+ 'strarglens': {isstring: ',int #varname_i#_cb_len'},
+ 'strarglens_td': {isstring: ',int'}, # untested with multiple args
+ # untested with multiple args
+ 'strarglens_nm': {isstring: ',#varname_i#_cb_len'},
+ },
+ { # Scalars
+ 'decl': {l_not(isintent_c): ' #ctype# #varname_i#=(*#varname_i#_cb_capi);'},
+ 'error': {l_and(isintent_c, isintent_out,
+ throw_error('intent(c,out) is forbidden for callback scalar arguments')):
+ ''},
+ 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'},
+ {isintent_out:
+ ' if (capi_j>capi_i)\n GETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'},
+ {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)):
+ ' fprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'},
+ {l_and(debugcapi, l_and(l_not(iscomplex), l_not( isintent_c))):
+ ' fprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'},
+ {l_and(debugcapi, l_and(iscomplex, isintent_c)):
+ ' fprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'},
+ {l_and(debugcapi, l_and(iscomplex, l_not( isintent_c))):
+ ' fprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'},
+ ],
+ 'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']},
+ {debugcapi: 'CFUNCSMESS'}],
+ '_check': isscalar
+ }, {
+ 'pyobjfrom': [{isintent_in: """\
+ if (cb->nofargs>capi_i)
+ if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1(#varname_i#)))
+ goto capi_fail;"""},
+ {isintent_inout: """\
+ if (cb->nofargs>capi_i)
+ if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi)))
+ goto capi_fail;"""}],
+ 'need': [{isintent_in: 'pyobj_from_#ctype#1'},
+ {isintent_inout: 'pyarr_from_p_#ctype#1'},
+ {iscomplex: '#ctype#'}],
+ '_check': l_and(isscalar, isintent_nothide),
+ '_optional': ''
+ }, { # String
+ 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->\\"");'},
+ """ if (capi_j>capi_i)
+ GETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""",
+ {debugcapi:
+ ' fprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'},
+ ],
+ 'need': ['#ctype#', 'GETSTRFROMPYTUPLE',
+ {debugcapi: 'CFUNCSMESS'}, 'string.h'],
+ '_check': l_and(isstring, isintent_out)
+ }, {
+ 'pyobjfrom': [
+ {debugcapi:
+ (' fprintf(stderr,"debug-capi:cb:#varname#=#showvalueformat#:'
+ '%d:\\n",#varname_i#,#varname_i#_cb_len);')},
+ {isintent_in: """\
+ if (cb->nofargs>capi_i)
+ if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len)))
+ goto capi_fail;"""},
+ {isintent_inout: """\
+ if (cb->nofargs>capi_i) {
+ int #varname_i#_cb_dims[] = {#varname_i#_cb_len};
+ if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims)))
+ goto capi_fail;
+ }"""}],
+ 'need': [{isintent_in: 'pyobj_from_#ctype#1size'},
+ {isintent_inout: 'pyarr_from_p_#ctype#1'}],
+ '_check': l_and(isstring, isintent_nothide),
+ '_optional': ''
+ },
+ # Array ...
+ {
+ 'decl': ' npy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};',
+ 'setdims': ' #cbsetdims#;',
+ '_check': isarray,
+ '_depend': ''
+ },
+ {
+ 'pyobjfrom': [{debugcapi: ' fprintf(stderr,"debug-capi:cb:#varname#\\n");'},
+ {isintent_c: """\
+ if (cb->nofargs>capi_i) {
+ /* tmp_arr will be inserted to capi_arglist_list that will be
+ destroyed when leaving callback function wrapper together
+ with tmp_arr. */
+ PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,
+ #rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,#elsize#,
+ NPY_ARRAY_CARRAY,NULL);
+""",
+ l_not(isintent_c): """\
+ if (cb->nofargs>capi_i) {
+ /* tmp_arr will be inserted to capi_arglist_list that will be
+ destroyed when leaving callback function wrapper together
+ with tmp_arr. */
+ PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,
+ #rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,#elsize#,
+ NPY_ARRAY_FARRAY,NULL);
+""",
+ },
+ """
+ if (tmp_arr==NULL)
+ goto capi_fail;
+ if (CAPI_ARGLIST_SETITEM(capi_i++,(PyObject *)tmp_arr))
+ goto capi_fail;
+}"""],
+ '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)),
+ '_optional': '',
+ }, {
+ 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'},
+ """ if (capi_j>capi_i) {
+ PyArrayObject *rv_cb_arr = NULL;
+ if ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail;
+ rv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""",
+ {isintent_c: '|F2PY_INTENT_C'},
+ """,capi_tmp);
+ if (rv_cb_arr == NULL) {
+ fprintf(stderr,\"rv_cb_arr is NULL\\n\");
+ goto capi_fail;
+ }
+ MEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr));
+ if (capi_tmp != (PyObject *)rv_cb_arr) {
+ Py_DECREF(rv_cb_arr);
+ }
+ }""",
+ {debugcapi: ' fprintf(stderr,"<-.\\n");'},
+ ],
+ 'need': ['MEMCOPY', {iscomplexarray: '#ctype#'}],
+ '_check': l_and(isarray, isintent_out)
+ }, {
+ 'docreturn': '#varname#,',
+ '_check': isintent_out
+ }
+]
+
+################## Build call-back module #############
+cb_map = {}
+
+
+def buildcallbacks(m):
+ cb_map[m['name']] = []
+ for bi in m['body']:
+ if bi['block'] == 'interface':
+ for b in bi['body']:
+ if b:
+ buildcallback(b, m['name'])
+ else:
+ errmess('warning: empty body for %s\n' % (m['name']))
+
+
+def buildcallback(rout, um):
+ from . import capi_maps
+
+ outmess(' Constructing call-back function "cb_%s_in_%s"\n' %
+ (rout['name'], um))
+ args, depargs = getargs(rout)
+ capi_maps.depargs = depargs
+ var = rout['vars']
+ vrd = capi_maps.cb_routsign2map(rout, um)
+ rd = dictappend({}, vrd)
+ cb_map[um].append([rout['name'], rd['name']])
+ for r in cb_rout_rules:
+ if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
+ ar = applyrules(r, vrd, rout)
+ rd = dictappend(rd, ar)
+ savevrd = {}
+ for i, a in enumerate(args):
+ vrd = capi_maps.cb_sign2map(a, var[a], index=i)
+ savevrd[a] = vrd
+ for r in cb_arg_rules:
+ if '_depend' in r:
+ continue
+ if '_optional' in r and isoptional(var[a]):
+ continue
+ if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
+ ar = applyrules(r, vrd, var[a])
+ rd = dictappend(rd, ar)
+ if '_break' in r:
+ break
+ for a in args:
+ vrd = savevrd[a]
+ for r in cb_arg_rules:
+ if '_depend' in r:
+ continue
+ if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])):
+ continue
+ if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
+ ar = applyrules(r, vrd, var[a])
+ rd = dictappend(rd, ar)
+ if '_break' in r:
+ break
+ for a in depargs:
+ vrd = savevrd[a]
+ for r in cb_arg_rules:
+ if '_depend' not in r:
+ continue
+ if '_optional' in r:
+ continue
+ if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
+ ar = applyrules(r, vrd, var[a])
+ rd = dictappend(rd, ar)
+ if '_break' in r:
+ break
+ if 'args' in rd and 'optargs' in rd:
+ if isinstance(rd['optargs'], list):
+ rd['optargs'] = rd['optargs'] + ["""
+#ifndef F2PY_CB_RETURNCOMPLEX
+,
+#endif
+"""]
+ rd['optargs_nm'] = rd['optargs_nm'] + ["""
+#ifndef F2PY_CB_RETURNCOMPLEX
+,
+#endif
+"""]
+ rd['optargs_td'] = rd['optargs_td'] + ["""
+#ifndef F2PY_CB_RETURNCOMPLEX
+,
+#endif
+"""]
+ if isinstance(rd['docreturn'], list):
+ rd['docreturn'] = stripcomma(
+ replace('#docreturn#', {'docreturn': rd['docreturn']}))
+ optargs = stripcomma(replace('#docsignopt#',
+ {'docsignopt': rd['docsignopt']}
+ ))
+ if optargs == '':
+ rd['docsignature'] = stripcomma(
+ replace('#docsign#', {'docsign': rd['docsign']}))
+ else:
+ rd['docsignature'] = replace('#docsign#[#docsignopt#]',
+ {'docsign': rd['docsign'],
+ 'docsignopt': optargs,
+ })
+ rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_')
+ rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ')
+ rd['docstrsigns'] = []
+ rd['latexdocstrsigns'] = []
+ for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']:
+ if k in rd and isinstance(rd[k], list):
+ rd['docstrsigns'] = rd['docstrsigns'] + rd[k]
+ k = 'latex' + k
+ if k in rd and isinstance(rd[k], list):
+ rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\
+ ['\\begin{description}'] + rd[k][1:] +\
+ ['\\end{description}']
+ if 'args' not in rd:
+ rd['args'] = ''
+ rd['args_td'] = ''
+ rd['args_nm'] = ''
+ if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')):
+ rd['noargs'] = 'void'
+
+ ar = applyrules(cb_routine_rules, rd)
+ cfuncs.callbacks[rd['name']] = ar['body']
+ if isinstance(ar['need'], str):
+ ar['need'] = [ar['need']]
+
+ if 'need' in rd:
+ for t in cfuncs.typedefs.keys():
+ if t in rd['need']:
+ ar['need'].append(t)
+
+ cfuncs.typedefs_generated[rd['name'] + '_typedef'] = ar['cbtypedefs']
+ ar['need'].append(rd['name'] + '_typedef')
+ cfuncs.needs[rd['name']] = ar['need']
+
+ capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'],
+ 'nofoptargs': ar['nofoptargs'],
+ 'docstr': ar['docstr'],
+ 'latexdocstr': ar['latexdocstr'],
+ 'argname': rd['argname']
+ }
+ outmess(' %s\n' % (ar['docstrshort']))
+ return
+################## Build call-back function #############
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/cfuncs.py b/venv/lib/python3.9/site-packages/numpy/f2py/cfuncs.py
new file mode 100644
index 00000000..74169256
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/cfuncs.py
@@ -0,0 +1,1522 @@
+#!/usr/bin/env python3
+"""
+
+C declarations, CPP macros, and C functions for f2py2e.
+Only required declarations/macros/functions will be used.
+
+Copyright 1999,2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/05/06 11:42:34 $
+Pearu Peterson
+
+"""
+import sys
+import copy
+
+from . import __version__
+
+f2py_version = __version__.version
+errmess = sys.stderr.write
+
+##################### Definitions ##################
+
+outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [],
+ 'userincludes': [],
+ 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [],
+ 'commonhooks': []}
+needs = {}
+includes0 = {'includes0': '/*need_includes0*/'}
+includes = {'includes': '/*need_includes*/'}
+userincludes = {'userincludes': '/*need_userincludes*/'}
+typedefs = {'typedefs': '/*need_typedefs*/'}
+typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'}
+cppmacros = {'cppmacros': '/*need_cppmacros*/'}
+cfuncs = {'cfuncs': '/*need_cfuncs*/'}
+callbacks = {'callbacks': '/*need_callbacks*/'}
+f90modhooks = {'f90modhooks': '/*need_f90modhooks*/',
+ 'initf90modhooksstatic': '/*initf90modhooksstatic*/',
+ 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/',
+ }
+commonhooks = {'commonhooks': '/*need_commonhooks*/',
+ 'initcommonhooks': '/*need_initcommonhooks*/',
+ }
+
+############ Includes ###################
+
+includes0['math.h'] = '#include <math.h>'
+includes0['string.h'] = '#include <string.h>'
+includes0['setjmp.h'] = '#include <setjmp.h>'
+
+includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API
+#include "arrayobject.h"'''
+
+includes['arrayobject.h'] = '#include "fortranobject.h"'
+includes['stdarg.h'] = '#include <stdarg.h>'
+
+############# Type definitions ###############
+
+typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;'
+typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;'
+typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;'
+typedefs['signed_char'] = 'typedef signed char signed_char;'
+typedefs['long_long'] = """\
+#if defined(NPY_OS_WIN32)
+typedef __int64 long_long;
+#else
+typedef long long long_long;
+typedef unsigned long long unsigned_long_long;
+#endif
+"""
+typedefs['unsigned_long_long'] = """\
+#if defined(NPY_OS_WIN32)
+typedef __uint64 long_long;
+#else
+typedef unsigned long long unsigned_long_long;
+#endif
+"""
+typedefs['long_double'] = """\
+#ifndef _LONG_DOUBLE
+typedef long double long_double;
+#endif
+"""
+typedefs[
+ 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;'
+typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;'
+typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;'
+typedefs['string'] = """typedef char * string;"""
+typedefs['character'] = """typedef char character;"""
+
+
+############### CPP macros ####################
+cppmacros['CFUNCSMESS'] = """\
+#ifdef DEBUGCFUNCS
+#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess);
+#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\
+ PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
+ fprintf(stderr,\"\\n\");
+#else
+#define CFUNCSMESS(mess)
+#define CFUNCSMESSPY(mess,obj)
+#endif
+"""
+cppmacros['F_FUNC'] = """\
+#if defined(PREPEND_FORTRAN)
+#if defined(NO_APPEND_FORTRAN)
+#if defined(UPPERCASE_FORTRAN)
+#define F_FUNC(f,F) _##F
+#else
+#define F_FUNC(f,F) _##f
+#endif
+#else
+#if defined(UPPERCASE_FORTRAN)
+#define F_FUNC(f,F) _##F##_
+#else
+#define F_FUNC(f,F) _##f##_
+#endif
+#endif
+#else
+#if defined(NO_APPEND_FORTRAN)
+#if defined(UPPERCASE_FORTRAN)
+#define F_FUNC(f,F) F
+#else
+#define F_FUNC(f,F) f
+#endif
+#else
+#if defined(UPPERCASE_FORTRAN)
+#define F_FUNC(f,F) F##_
+#else
+#define F_FUNC(f,F) f##_
+#endif
+#endif
+#endif
+#if defined(UNDERSCORE_G77)
+#define F_FUNC_US(f,F) F_FUNC(f##_,F##_)
+#else
+#define F_FUNC_US(f,F) F_FUNC(f,F)
+#endif
+"""
+cppmacros['F_WRAPPEDFUNC'] = """\
+#if defined(PREPEND_FORTRAN)
+#if defined(NO_APPEND_FORTRAN)
+#if defined(UPPERCASE_FORTRAN)
+#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F
+#else
+#define F_WRAPPEDFUNC(f,F) _f2pywrap##f
+#endif
+#else
+#if defined(UPPERCASE_FORTRAN)
+#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_
+#else
+#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_
+#endif
+#endif
+#else
+#if defined(NO_APPEND_FORTRAN)
+#if defined(UPPERCASE_FORTRAN)
+#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F
+#else
+#define F_WRAPPEDFUNC(f,F) f2pywrap##f
+#endif
+#else
+#if defined(UPPERCASE_FORTRAN)
+#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_
+#else
+#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_
+#endif
+#endif
+#endif
+#if defined(UNDERSCORE_G77)
+#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_)
+#else
+#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F)
+#endif
+"""
+cppmacros['F_MODFUNC'] = """\
+#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */
+#if defined(NO_APPEND_FORTRAN)
+#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f
+#else
+#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _
+#endif
+#endif
+
+#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */
+#if defined(NO_APPEND_FORTRAN)
+#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f
+#else
+#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _
+#endif
+#endif
+
+#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */
+#if defined(NO_APPEND_FORTRAN)
+#define F_MODFUNCNAME(m,f) f ## .in. ## m
+#else
+#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _
+#endif
+#endif
+/*
+#if defined(UPPERCASE_FORTRAN)
+#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F)
+#else
+#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f)
+#endif
+*/
+
+#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f))
+"""
+cppmacros['SWAPUNSAFE'] = """\
+#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\
+ (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\
+ (size_t)(a) = ((size_t)(a) ^ (size_t)(b))
+"""
+cppmacros['SWAP'] = """\
+#define SWAP(a,b,t) {\\
+ t *c;\\
+ c = a;\\
+ a = b;\\
+ b = c;}
+"""
+# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) &
+# NPY_ARRAY_C_CONTIGUOUS)'
+cppmacros['PRINTPYOBJERR'] = """\
+#define PRINTPYOBJERR(obj)\\
+ fprintf(stderr,\"#modulename#.error is related to \");\\
+ PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
+ fprintf(stderr,\"\\n\");
+"""
+cppmacros['MINMAX'] = """\
+#ifndef max
+#define max(a,b) ((a > b) ? (a) : (b))
+#endif
+#ifndef min
+#define min(a,b) ((a < b) ? (a) : (b))
+#endif
+#ifndef MAX
+#define MAX(a,b) ((a > b) ? (a) : (b))
+#endif
+#ifndef MIN
+#define MIN(a,b) ((a < b) ? (a) : (b))
+#endif
+"""
+cppmacros['len..'] = """\
+/* See fortranobject.h for definitions. The macros here are provided for BC. */
+#define rank f2py_rank
+#define shape f2py_shape
+#define fshape f2py_shape
+#define len f2py_len
+#define flen f2py_flen
+#define slen f2py_slen
+#define size f2py_size
+"""
+cppmacros[
+ 'pyobj_from_char1'] = '#define pyobj_from_char1(v) (PyLong_FromLong(v))'
+cppmacros[
+ 'pyobj_from_short1'] = '#define pyobj_from_short1(v) (PyLong_FromLong(v))'
+needs['pyobj_from_int1'] = ['signed_char']
+cppmacros['pyobj_from_int1'] = '#define pyobj_from_int1(v) (PyLong_FromLong(v))'
+cppmacros[
+ 'pyobj_from_long1'] = '#define pyobj_from_long1(v) (PyLong_FromLong(v))'
+needs['pyobj_from_long_long1'] = ['long_long']
+cppmacros['pyobj_from_long_long1'] = """\
+#ifdef HAVE_LONG_LONG
+#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v))
+#else
+#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long.
+#define pyobj_from_long_long1(v) (PyLong_FromLong(v))
+#endif
+"""
+needs['pyobj_from_long_double1'] = ['long_double']
+cppmacros[
+ 'pyobj_from_long_double1'] = '#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))'
+cppmacros[
+ 'pyobj_from_double1'] = '#define pyobj_from_double1(v) (PyFloat_FromDouble(v))'
+cppmacros[
+ 'pyobj_from_float1'] = '#define pyobj_from_float1(v) (PyFloat_FromDouble(v))'
+needs['pyobj_from_complex_long_double1'] = ['complex_long_double']
+cppmacros[
+ 'pyobj_from_complex_long_double1'] = '#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
+needs['pyobj_from_complex_double1'] = ['complex_double']
+cppmacros[
+ 'pyobj_from_complex_double1'] = '#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))'
+needs['pyobj_from_complex_float1'] = ['complex_float']
+cppmacros[
+ 'pyobj_from_complex_float1'] = '#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))'
+needs['pyobj_from_string1'] = ['string']
+cppmacros[
+ 'pyobj_from_string1'] = '#define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))'
+needs['pyobj_from_string1size'] = ['string']
+cppmacros[
+ 'pyobj_from_string1size'] = '#define pyobj_from_string1size(v,len) (PyUnicode_FromStringAndSize((char *)v, len))'
+needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR']
+cppmacros['TRYPYARRAYTEMPLATE'] = """\
+/* New SciPy */
+#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break;
+#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break;
+#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break;
+
+#define TRYPYARRAYTEMPLATE(ctype,typecode) \\
+ PyArrayObject *arr = NULL;\\
+ if (!obj) return -2;\\
+ if (!PyArray_Check(obj)) return -1;\\
+ if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
+ if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\
+ switch (PyArray_TYPE(arr)) {\\
+ case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\
+ case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\
+ default: return -2;\\
+ };\\
+ return 1
+"""
+
+needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR']
+cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\
+#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;
+#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\
+ PyArrayObject *arr = NULL;\\
+ if (!obj) return -2;\\
+ if (!PyArray_Check(obj)) return -1;\\
+ if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
+ if (PyArray_DESCR(arr)->type==typecode) {\\
+ *(ctype *)(PyArray_DATA(arr))=(*v).r;\\
+ *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\
+ return 1;\\
+ }\\
+ switch (PyArray_TYPE(arr)) {\\
+ case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r;\\
+ *(npy_double *)(PyArray_DATA(arr)+sizeof(npy_double))=(*v).i;\\
+ break;\\
+ case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r;\\
+ *(npy_float *)(PyArray_DATA(arr)+sizeof(npy_float))=(*v).i;\\
+ break;\\
+ case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\
+ case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;\\
+ *(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;\\
+ break;\\
+ case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\
+ default: return -2;\\
+ };\\
+ return -1;
+"""
+# cppmacros['NUMFROMARROBJ']="""\
+# define NUMFROMARROBJ(typenum,ctype) \\
+# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
+# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
+# if (arr) {\\
+# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\
+# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
+# goto capi_fail;\\
+# } else {\\
+# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\
+# }\\
+# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
+# return 1;\\
+# }
+# """
+# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ
+# cppmacros['CNUMFROMARROBJ']="""\
+# define CNUMFROMARROBJ(typenum,ctype) \\
+# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
+# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
+# if (arr) {\\
+# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\
+# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
+# goto capi_fail;\\
+# } else {\\
+# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\
+# }\\
+# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
+# return 1;\\
+# }
+# """
+
+
+needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR']
+cppmacros['GETSTRFROMPYTUPLE'] = """\
+#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\
+ PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\
+ if (rv_cb_str == NULL)\\
+ goto capi_fail;\\
+ if (PyBytes_Check(rv_cb_str)) {\\
+ str[len-1]='\\0';\\
+ STRINGCOPYN((str),PyBytes_AS_STRING((PyBytesObject*)rv_cb_str),(len));\\
+ } else {\\
+ PRINTPYOBJERR(rv_cb_str);\\
+ PyErr_SetString(#modulename#_error,\"string object expected\");\\
+ goto capi_fail;\\
+ }\\
+ }
+"""
+cppmacros['GETSCALARFROMPYTUPLE'] = """\
+#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\
+ if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\
+ if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\
+ goto capi_fail;\\
+ }
+"""
+
+cppmacros['FAILNULL'] = """\\
+#define FAILNULL(p) do { \\
+ if ((p) == NULL) { \\
+ PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\
+ goto capi_fail; \\
+ } \\
+} while (0)
+"""
+needs['MEMCOPY'] = ['string.h', 'FAILNULL']
+cppmacros['MEMCOPY'] = """\
+#define MEMCOPY(to,from,n)\\
+ do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0)
+"""
+cppmacros['STRINGMALLOC'] = """\
+#define STRINGMALLOC(str,len)\\
+ if ((str = (string)malloc(len+1)) == NULL) {\\
+ PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\
+ goto capi_fail;\\
+ } else {\\
+ (str)[len] = '\\0';\\
+ }
+"""
+cppmacros['STRINGFREE'] = """\
+#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0)
+"""
+needs['STRINGPADN'] = ['string.h']
+cppmacros['STRINGPADN'] = """\
+/*
+STRINGPADN replaces null values with padding values from the right.
+
+`to` must have size of at least N bytes.
+
+If the `to[N-1]` has null value, then replace it and all the
+preceding, nulls with the given padding.
+
+STRINGPADN(to, N, PADDING, NULLVALUE) is an inverse operation.
+*/
+#define STRINGPADN(to, N, NULLVALUE, PADDING) \\
+ do { \\
+ int _m = (N); \\
+ char *_to = (to); \\
+ for (_m -= 1; _m >= 0 && _to[_m] == NULLVALUE; _m--) { \\
+ _to[_m] = PADDING; \\
+ } \\
+ } while (0)
+"""
+needs['STRINGCOPYN'] = ['string.h', 'FAILNULL']
+cppmacros['STRINGCOPYN'] = """\
+/*
+STRINGCOPYN copies N bytes.
+
+`to` and `from` buffers must have sizes of at least N bytes.
+*/
+#define STRINGCOPYN(to,from,N) \\
+ do { \\
+ int _m = (N); \\
+ char *_to = (to); \\
+ char *_from = (from); \\
+ FAILNULL(_to); FAILNULL(_from); \\
+ (void)strncpy(_to, _from, _m); \\
+ } while (0)
+"""
+needs['STRINGCOPY'] = ['string.h', 'FAILNULL']
+cppmacros['STRINGCOPY'] = """\
+#define STRINGCOPY(to,from)\\
+ do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0)
+"""
+cppmacros['CHECKGENERIC'] = """\
+#define CHECKGENERIC(check,tcheck,name) \\
+ if (!(check)) {\\
+ PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
+ /*goto capi_fail;*/\\
+ } else """
+cppmacros['CHECKARRAY'] = """\
+#define CHECKARRAY(check,tcheck,name) \\
+ if (!(check)) {\\
+ PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
+ /*goto capi_fail;*/\\
+ } else """
+cppmacros['CHECKSTRING'] = """\
+#define CHECKSTRING(check,tcheck,name,show,var)\\
+ if (!(check)) {\\
+ char errstring[256];\\
+ sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\
+ PyErr_SetString(#modulename#_error, errstring);\\
+ /*goto capi_fail;*/\\
+ } else """
+cppmacros['CHECKSCALAR'] = """\
+#define CHECKSCALAR(check,tcheck,name,show,var)\\
+ if (!(check)) {\\
+ char errstring[256];\\
+ sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\
+ PyErr_SetString(#modulename#_error,errstring);\\
+ /*goto capi_fail;*/\\
+ } else """
+# cppmacros['CHECKDIMS']="""\
+# define CHECKDIMS(dims,rank) \\
+# for (int i=0;i<(rank);i++)\\
+# if (dims[i]<0) {\\
+# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\
+# goto capi_fail;\\
+# }
+# """
+cppmacros[
+ 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
+cppmacros['OLDPYNUM'] = """\
+#ifdef OLDPYNUM
+#error You need to install NumPy version 0.13 or higher. See https://scipy.org/install.html
+#endif
+"""
+cppmacros["F2PY_THREAD_LOCAL_DECL"] = """\
+#ifndef F2PY_THREAD_LOCAL_DECL
+#if defined(_MSC_VER)
+#define F2PY_THREAD_LOCAL_DECL __declspec(thread)
+#elif defined(NPY_OS_MINGW)
+#define F2PY_THREAD_LOCAL_DECL __thread
+#elif defined(__STDC_VERSION__) \\
+ && (__STDC_VERSION__ >= 201112L) \\
+ && !defined(__STDC_NO_THREADS__) \\
+ && (!defined(__GLIBC__) || __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 12)) \\
+ && !defined(NPY_OS_OPENBSD) && !defined(NPY_OS_HAIKU)
+/* __STDC_NO_THREADS__ was first defined in a maintenance release of glibc 2.12,
+ see https://lists.gnu.org/archive/html/commit-hurd/2012-07/msg00180.html,
+ so `!defined(__STDC_NO_THREADS__)` may give false positive for the existence
+ of `threads.h` when using an older release of glibc 2.12
+ See gh-19437 for details on OpenBSD */
+#include <threads.h>
+#define F2PY_THREAD_LOCAL_DECL thread_local
+#elif defined(__GNUC__) \\
+ && (__GNUC__ > 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 4)))
+#define F2PY_THREAD_LOCAL_DECL __thread
+#endif
+#endif
+"""
+################# C functions ###############
+
+cfuncs['calcarrindex'] = """\
+static int calcarrindex(int *i,PyArrayObject *arr) {
+ int k,ii = i[0];
+ for (k=1; k < PyArray_NDIM(arr); k++)
+ ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */
+ return ii;
+}"""
+cfuncs['calcarrindextr'] = """\
+static int calcarrindextr(int *i,PyArrayObject *arr) {
+ int k,ii = i[PyArray_NDIM(arr)-1];
+ for (k=1; k < PyArray_NDIM(arr); k++)
+ ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */
+ return ii;
+}"""
+cfuncs['forcomb'] = """\
+static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache;
+static int initforcomb(npy_intp *dims,int nd,int tr) {
+ int k;
+ if (dims==NULL) return 0;
+ if (nd<0) return 0;
+ forcombcache.nd = nd;
+ forcombcache.d = dims;
+ forcombcache.tr = tr;
+ if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
+ if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
+ for (k=1;k<nd;k++) {
+ forcombcache.i[k] = forcombcache.i_tr[nd-k-1] = 0;
+ }
+ forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1;
+ return 1;
+}
+static int *nextforcomb(void) {
+ int j,*i,*i_tr,k;
+ int nd=forcombcache.nd;
+ if ((i=forcombcache.i) == NULL) return NULL;
+ if ((i_tr=forcombcache.i_tr) == NULL) return NULL;
+ if (forcombcache.d == NULL) return NULL;
+ i[0]++;
+ if (i[0]==forcombcache.d[0]) {
+ j=1;
+ while ((j<nd) && (i[j]==forcombcache.d[j]-1)) j++;
+ if (j==nd) {
+ free(i);
+ free(i_tr);
+ return NULL;
+ }
+ for (k=0;k<j;k++) i[k] = i_tr[nd-k-1] = 0;
+ i[j]++;
+ i_tr[nd-j-1]++;
+ } else
+ i_tr[nd-1]++;
+ if (forcombcache.tr) return i_tr;
+ return i;
+}"""
+needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string']
+cfuncs['try_pyarr_from_string'] = """\
+/*
+ try_pyarr_from_string copies str[:len(obj)] to the data of an `ndarray`.
+
+ If obj is an `ndarray`, it is assumed to be contiguous.
+
+ If the specified len==-1, str must be null-terminated.
+*/
+static int try_pyarr_from_string(PyObject *obj,
+ const string str, const int len) {
+#ifdef DEBUGCFUNCS
+fprintf(stderr, "try_pyarr_from_string(str='%s', len=%d, obj=%p)\\n",
+ (char*)str,len, obj);
+#endif
+ if (PyArray_Check(obj)) {
+ PyArrayObject *arr = (PyArrayObject *)obj;
+ assert(ISCONTIGUOUS(arr));
+ string buf = PyArray_DATA(arr);
+ npy_intp n = len;
+ if (n == -1) {
+ /* Assuming null-terminated str. */
+ n = strlen(str);
+ }
+ if (n > PyArray_NBYTES(arr)) {
+ n = PyArray_NBYTES(arr);
+ }
+ STRINGCOPYN(buf, str, n);
+ return 1;
+ }
+capi_fail:
+ PRINTPYOBJERR(obj);
+ PyErr_SetString(#modulename#_error, \"try_pyarr_from_string failed\");
+ return 0;
+}
+"""
+needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN']
+cfuncs['string_from_pyobj'] = """\
+/*
+ Create a new string buffer `str` of at most length `len` from a
+ Python string-like object `obj`.
+
+ The string buffer has given size (len) or the size of inistr when len==-1.
+
+ The string buffer is padded with blanks: in Fortran, trailing blanks
+ are insignificant contrary to C nulls.
+ */
+static int
+string_from_pyobj(string *str, int *len, const string inistr, PyObject *obj,
+ const char *errmess)
+{
+ PyObject *tmp = NULL;
+ string buf = NULL;
+ npy_intp n = -1;
+#ifdef DEBUGCFUNCS
+fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",
+ (char*)str, *len, (char *)inistr, obj);
+#endif
+ if (obj == Py_None) {
+ n = strlen(inistr);
+ buf = inistr;
+ }
+ else if (PyArray_Check(obj)) {
+ PyArrayObject *arr = (PyArrayObject *)obj;
+ if (!ISCONTIGUOUS(arr)) {
+ PyErr_SetString(PyExc_ValueError,
+ \"array object is non-contiguous.\");
+ goto capi_fail;
+ }
+ n = PyArray_NBYTES(arr);
+ buf = PyArray_DATA(arr);
+ n = strnlen(buf, n);
+ }
+ else {
+ if (PyBytes_Check(obj)) {
+ tmp = obj;
+ Py_INCREF(tmp);
+ }
+ else if (PyUnicode_Check(obj)) {
+ tmp = PyUnicode_AsASCIIString(obj);
+ }
+ else {
+ PyObject *tmp2;
+ tmp2 = PyObject_Str(obj);
+ if (tmp2) {
+ tmp = PyUnicode_AsASCIIString(tmp2);
+ Py_DECREF(tmp2);
+ }
+ else {
+ tmp = NULL;
+ }
+ }
+ if (tmp == NULL) goto capi_fail;
+ n = PyBytes_GET_SIZE(tmp);
+ buf = PyBytes_AS_STRING(tmp);
+ }
+ if (*len == -1) {
+ /* TODO: change the type of `len` so that we can remove this */
+ if (n > NPY_MAX_INT) {
+ PyErr_SetString(PyExc_OverflowError,
+ "object too large for a 32-bit int");
+ goto capi_fail;
+ }
+ *len = n;
+ }
+ else if (*len < n) {
+ /* discard the last (len-n) bytes of input buf */
+ n = *len;
+ }
+ if (n < 0 || *len < 0 || buf == NULL) {
+ goto capi_fail;
+ }
+ STRINGMALLOC(*str, *len); // *str is allocated with size (*len + 1)
+ if (n < *len) {
+ /*
+ Pad fixed-width string with nulls. The caller will replace
+ nulls with blanks when the corresponding argument is not
+ intent(c).
+ */
+ memset(*str + n, '\\0', *len - n);
+ }
+ STRINGCOPYN(*str, buf, n);
+ Py_XDECREF(tmp);
+ return 1;
+capi_fail:
+ Py_XDECREF(tmp);
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err == NULL) {
+ err = #modulename#_error;
+ }
+ PyErr_SetString(err, errmess);
+ }
+ return 0;
+}
+"""
+
+cfuncs['character_from_pyobj'] = """\
+static int
+character_from_pyobj(character* v, PyObject *obj, const char *errmess) {
+ if (PyBytes_Check(obj)) {
+ /* empty bytes has trailing null, so dereferencing is always safe */
+ *v = PyBytes_AS_STRING(obj)[0];
+ return 1;
+ } else if (PyUnicode_Check(obj)) {
+ PyObject* tmp = PyUnicode_AsASCIIString(obj);
+ if (tmp != NULL) {
+ *v = PyBytes_AS_STRING(tmp)[0];
+ Py_DECREF(tmp);
+ return 1;
+ }
+ } else if (PyArray_Check(obj)) {
+ PyArrayObject* arr = (PyArrayObject*)obj;
+ if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) {
+ *v = PyArray_BYTES(arr)[0];
+ return 1;
+ } else if (F2PY_IS_UNICODE_ARRAY(arr)) {
+ // TODO: update when numpy will support 1-byte and
+ // 2-byte unicode dtypes
+ PyObject* tmp = PyUnicode_FromKindAndData(
+ PyUnicode_4BYTE_KIND,
+ PyArray_BYTES(arr),
+ (PyArray_NBYTES(arr)>0?1:0));
+ if (tmp != NULL) {
+ if (character_from_pyobj(v, tmp, errmess)) {
+ Py_DECREF(tmp);
+ return 1;
+ }
+ Py_DECREF(tmp);
+ }
+ }
+ } else if (PySequence_Check(obj)) {
+ PyObject* tmp = PySequence_GetItem(obj,0);
+ if (tmp != NULL) {
+ if (character_from_pyobj(v, tmp, errmess)) {
+ Py_DECREF(tmp);
+ return 1;
+ }
+ Py_DECREF(tmp);
+ }
+ }
+ {
+ char mess[F2PY_MESSAGE_BUFFER_SIZE];
+ strcpy(mess, errmess);
+ PyObject* err = PyErr_Occurred();
+ if (err == NULL) {
+ err = PyExc_TypeError;
+ }
+ sprintf(mess + strlen(mess),
+ " -- expected str|bytes|sequence-of-str-or-bytes, got ");
+ f2py_describe(obj, mess + strlen(mess));
+ PyErr_SetString(err, mess);
+ }
+ return 0;
+}
+"""
+
+needs['char_from_pyobj'] = ['int_from_pyobj']
+cfuncs['char_from_pyobj'] = """\
+static int
+char_from_pyobj(char* v, PyObject *obj, const char *errmess) {
+ int i = 0;
+ if (int_from_pyobj(&i, obj, errmess)) {
+ *v = (char)i;
+ return 1;
+ }
+ return 0;
+}
+"""
+
+
+needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char']
+cfuncs['signed_char_from_pyobj'] = """\
+static int
+signed_char_from_pyobj(signed_char* v, PyObject *obj, const char *errmess) {
+ int i = 0;
+ if (int_from_pyobj(&i, obj, errmess)) {
+ *v = (signed_char)i;
+ return 1;
+ }
+ return 0;
+}
+"""
+
+
+needs['short_from_pyobj'] = ['int_from_pyobj']
+cfuncs['short_from_pyobj'] = """\
+static int
+short_from_pyobj(short* v, PyObject *obj, const char *errmess) {
+ int i = 0;
+ if (int_from_pyobj(&i, obj, errmess)) {
+ *v = (short)i;
+ return 1;
+ }
+ return 0;
+}
+"""
+
+
+cfuncs['int_from_pyobj'] = """\
+static int
+int_from_pyobj(int* v, PyObject *obj, const char *errmess)
+{
+ PyObject* tmp = NULL;
+
+ if (PyLong_Check(obj)) {
+ *v = Npy__PyLong_AsInt(obj);
+ return !(*v == -1 && PyErr_Occurred());
+ }
+
+ tmp = PyNumber_Long(obj);
+ if (tmp) {
+ *v = Npy__PyLong_AsInt(tmp);
+ Py_DECREF(tmp);
+ return !(*v == -1 && PyErr_Occurred());
+ }
+
+ if (PyComplex_Check(obj)) {
+ PyErr_Clear();
+ tmp = PyObject_GetAttrString(obj,\"real\");
+ }
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
+ /*pass*/;
+ }
+ else if (PySequence_Check(obj)) {
+ PyErr_Clear();
+ tmp = PySequence_GetItem(obj, 0);
+ }
+
+ if (tmp) {
+ if (int_from_pyobj(v, tmp, errmess)) {
+ Py_DECREF(tmp);
+ return 1;
+ }
+ Py_DECREF(tmp);
+ }
+
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err == NULL) {
+ err = #modulename#_error;
+ }
+ PyErr_SetString(err, errmess);
+ }
+ return 0;
+}
+"""
+
+
+cfuncs['long_from_pyobj'] = """\
+static int
+long_from_pyobj(long* v, PyObject *obj, const char *errmess) {
+ PyObject* tmp = NULL;
+
+ if (PyLong_Check(obj)) {
+ *v = PyLong_AsLong(obj);
+ return !(*v == -1 && PyErr_Occurred());
+ }
+
+ tmp = PyNumber_Long(obj);
+ if (tmp) {
+ *v = PyLong_AsLong(tmp);
+ Py_DECREF(tmp);
+ return !(*v == -1 && PyErr_Occurred());
+ }
+
+ if (PyComplex_Check(obj)) {
+ PyErr_Clear();
+ tmp = PyObject_GetAttrString(obj,\"real\");
+ }
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
+ /*pass*/;
+ }
+ else if (PySequence_Check(obj)) {
+ PyErr_Clear();
+ tmp = PySequence_GetItem(obj, 0);
+ }
+
+ if (tmp) {
+ if (long_from_pyobj(v, tmp, errmess)) {
+ Py_DECREF(tmp);
+ return 1;
+ }
+ Py_DECREF(tmp);
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err == NULL) {
+ err = #modulename#_error;
+ }
+ PyErr_SetString(err, errmess);
+ }
+ return 0;
+}
+"""
+
+
+needs['long_long_from_pyobj'] = ['long_long']
+cfuncs['long_long_from_pyobj'] = """\
+static int
+long_long_from_pyobj(long_long* v, PyObject *obj, const char *errmess)
+{
+ PyObject* tmp = NULL;
+
+ if (PyLong_Check(obj)) {
+ *v = PyLong_AsLongLong(obj);
+ return !(*v == -1 && PyErr_Occurred());
+ }
+
+ tmp = PyNumber_Long(obj);
+ if (tmp) {
+ *v = PyLong_AsLongLong(tmp);
+ Py_DECREF(tmp);
+ return !(*v == -1 && PyErr_Occurred());
+ }
+
+ if (PyComplex_Check(obj)) {
+ PyErr_Clear();
+ tmp = PyObject_GetAttrString(obj,\"real\");
+ }
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
+ /*pass*/;
+ }
+ else if (PySequence_Check(obj)) {
+ PyErr_Clear();
+ tmp = PySequence_GetItem(obj, 0);
+ }
+
+ if (tmp) {
+ if (long_long_from_pyobj(v, tmp, errmess)) {
+ Py_DECREF(tmp);
+ return 1;
+ }
+ Py_DECREF(tmp);
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err == NULL) {
+ err = #modulename#_error;
+ }
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
+}
+"""
+
+
+needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double']
+cfuncs['long_double_from_pyobj'] = """\
+static int
+long_double_from_pyobj(long_double* v, PyObject *obj, const char *errmess)
+{
+ double d=0;
+ if (PyArray_CheckScalar(obj)){
+ if PyArray_IsScalar(obj, LongDouble) {
+ PyArray_ScalarAsCtype(obj, v);
+ return 1;
+ }
+ else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) {
+ (*v) = *((npy_longdouble *)PyArray_DATA(obj));
+ return 1;
+ }
+ }
+ if (double_from_pyobj(&d, obj, errmess)) {
+ *v = (long_double)d;
+ return 1;
+ }
+ return 0;
+}
+"""
+
+
+cfuncs['double_from_pyobj'] = """\
+static int
+double_from_pyobj(double* v, PyObject *obj, const char *errmess)
+{
+ PyObject* tmp = NULL;
+ if (PyFloat_Check(obj)) {
+ *v = PyFloat_AsDouble(obj);
+ return !(*v == -1.0 && PyErr_Occurred());
+ }
+
+ tmp = PyNumber_Float(obj);
+ if (tmp) {
+ *v = PyFloat_AsDouble(tmp);
+ Py_DECREF(tmp);
+ return !(*v == -1.0 && PyErr_Occurred());
+ }
+
+ if (PyComplex_Check(obj)) {
+ PyErr_Clear();
+ tmp = PyObject_GetAttrString(obj,\"real\");
+ }
+ else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
+ /*pass*/;
+ }
+ else if (PySequence_Check(obj)) {
+ PyErr_Clear();
+ tmp = PySequence_GetItem(obj, 0);
+ }
+
+ if (tmp) {
+ if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+ Py_DECREF(tmp);
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL) err = #modulename#_error;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
+}
+"""
+
+
+needs['float_from_pyobj'] = ['double_from_pyobj']
+cfuncs['float_from_pyobj'] = """\
+static int
+float_from_pyobj(float* v, PyObject *obj, const char *errmess)
+{
+ double d=0.0;
+ if (double_from_pyobj(&d,obj,errmess)) {
+ *v = (float)d;
+ return 1;
+ }
+ return 0;
+}
+"""
+
+
+needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double',
+ 'complex_double_from_pyobj']
+cfuncs['complex_long_double_from_pyobj'] = """\
+static int
+complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess)
+{
+ complex_double cd = {0.0,0.0};
+ if (PyArray_CheckScalar(obj)){
+ if PyArray_IsScalar(obj, CLongDouble) {
+ PyArray_ScalarAsCtype(obj, v);
+ return 1;
+ }
+ else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) {
+ (*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real;
+ (*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag;
+ return 1;
+ }
+ }
+ if (complex_double_from_pyobj(&cd,obj,errmess)) {
+ (*v).r = (long_double)cd.r;
+ (*v).i = (long_double)cd.i;
+ return 1;
+ }
+ return 0;
+}
+"""
+
+
+needs['complex_double_from_pyobj'] = ['complex_double']
+cfuncs['complex_double_from_pyobj'] = """\
+static int
+complex_double_from_pyobj(complex_double* v, PyObject *obj, const char *errmess) {
+ Py_complex c;
+ if (PyComplex_Check(obj)) {
+ c = PyComplex_AsCComplex(obj);
+ (*v).r = c.real;
+ (*v).i = c.imag;
+ return 1;
+ }
+ if (PyArray_IsScalar(obj, ComplexFloating)) {
+ if (PyArray_IsScalar(obj, CFloat)) {
+ npy_cfloat new;
+ PyArray_ScalarAsCtype(obj, &new);
+ (*v).r = (double)new.real;
+ (*v).i = (double)new.imag;
+ }
+ else if (PyArray_IsScalar(obj, CLongDouble)) {
+ npy_clongdouble new;
+ PyArray_ScalarAsCtype(obj, &new);
+ (*v).r = (double)new.real;
+ (*v).i = (double)new.imag;
+ }
+ else { /* if (PyArray_IsScalar(obj, CDouble)) */
+ PyArray_ScalarAsCtype(obj, v);
+ }
+ return 1;
+ }
+ if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */
+ PyArrayObject *arr;
+ if (PyArray_Check(obj)) {
+ arr = (PyArrayObject *)PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE);
+ }
+ else {
+ arr = (PyArrayObject *)PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE));
+ }
+ if (arr == NULL) {
+ return 0;
+ }
+ (*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real;
+ (*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag;
+ Py_DECREF(arr);
+ return 1;
+ }
+ /* Python does not provide PyNumber_Complex function :-( */
+ (*v).i = 0.0;
+ if (PyFloat_Check(obj)) {
+ (*v).r = PyFloat_AsDouble(obj);
+ return !((*v).r == -1.0 && PyErr_Occurred());
+ }
+ if (PyLong_Check(obj)) {
+ (*v).r = PyLong_AsDouble(obj);
+ return !((*v).r == -1.0 && PyErr_Occurred());
+ }
+ if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) {
+ PyObject *tmp = PySequence_GetItem(obj,0);
+ if (tmp) {
+ if (complex_double_from_pyobj(v,tmp,errmess)) {
+ Py_DECREF(tmp);
+ return 1;
+ }
+ Py_DECREF(tmp);
+ }
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL)
+ err = PyExc_TypeError;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
+}
+"""
+
+
+needs['complex_float_from_pyobj'] = [
+ 'complex_float', 'complex_double_from_pyobj']
+cfuncs['complex_float_from_pyobj'] = """\
+static int
+complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess)
+{
+ complex_double cd={0.0,0.0};
+ if (complex_double_from_pyobj(&cd,obj,errmess)) {
+ (*v).r = (float)cd.r;
+ (*v).i = (float)cd.i;
+ return 1;
+ }
+ return 0;
+}
+"""
+
+
+cfuncs['try_pyarr_from_character'] = """\
+static int try_pyarr_from_character(PyObject* obj, character* v) {
+ PyArrayObject *arr = (PyArrayObject*)obj;
+ if (!obj) return -2;
+ if (PyArray_Check(obj)) {
+ if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) {
+ *(character *)(PyArray_DATA(arr)) = *v;
+ return 1;
+ }
+ }
+ {
+ char mess[F2PY_MESSAGE_BUFFER_SIZE];
+ PyObject* err = PyErr_Occurred();
+ if (err == NULL) {
+ err = PyExc_ValueError;
+ strcpy(mess, "try_pyarr_from_character failed"
+ " -- expected bytes array-scalar|array, got ");
+ f2py_describe(obj, mess + strlen(mess));
+ }
+ PyErr_SetString(err, mess);
+ }
+ return 0;
+}
+"""
+
+needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE']
+cfuncs[
+ 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n'
+needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char']
+cfuncs[
+ 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n'
+needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char']
+cfuncs[
+ 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n'
+needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE']
+cfuncs[
+ 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n'
+needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE']
+cfuncs[
+ 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n'
+needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE']
+cfuncs[
+ 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n'
+needs['try_pyarr_from_long_long'] = [
+ 'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long']
+cfuncs[
+ 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n'
+needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE']
+cfuncs[
+ 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n'
+needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE']
+cfuncs[
+ 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n'
+needs['try_pyarr_from_complex_float'] = [
+ 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float']
+cfuncs[
+ 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n'
+needs['try_pyarr_from_complex_double'] = [
+ 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double']
+cfuncs[
+ 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
+
+
+needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX']
+# create the list of arguments to be used when calling back to python
+cfuncs['create_cb_arglist'] = """\
+static int
+create_cb_arglist(PyObject* fun, PyTupleObject* xa , const int maxnofargs,
+ const int nofoptargs, int *nofargs, PyTupleObject **args,
+ const char *errmess)
+{
+ PyObject *tmp = NULL;
+ PyObject *tmp_fun = NULL;
+ Py_ssize_t tot, opt, ext, siz, i, di = 0;
+ CFUNCSMESS(\"create_cb_arglist\\n\");
+ tot=opt=ext=siz=0;
+ /* Get the total number of arguments */
+ if (PyFunction_Check(fun)) {
+ tmp_fun = fun;
+ Py_INCREF(tmp_fun);
+ }
+ else {
+ di = 1;
+ if (PyObject_HasAttrString(fun,\"im_func\")) {
+ tmp_fun = PyObject_GetAttrString(fun,\"im_func\");
+ }
+ else if (PyObject_HasAttrString(fun,\"__call__\")) {
+ tmp = PyObject_GetAttrString(fun,\"__call__\");
+ if (PyObject_HasAttrString(tmp,\"im_func\"))
+ tmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
+ else {
+ tmp_fun = fun; /* built-in function */
+ Py_INCREF(tmp_fun);
+ tot = maxnofargs;
+ if (PyCFunction_Check(fun)) {
+ /* In case the function has a co_argcount (like on PyPy) */
+ di = 0;
+ }
+ if (xa != NULL)
+ tot += PyTuple_Size((PyObject *)xa);
+ }
+ Py_XDECREF(tmp);
+ }
+ else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) {
+ tot = maxnofargs;
+ if (xa != NULL)
+ tot += PyTuple_Size((PyObject *)xa);
+ tmp_fun = fun;
+ Py_INCREF(tmp_fun);
+ }
+ else if (F2PyCapsule_Check(fun)) {
+ tot = maxnofargs;
+ if (xa != NULL)
+ ext = PyTuple_Size((PyObject *)xa);
+ if(ext>0) {
+ fprintf(stderr,\"extra arguments tuple cannot be used with PyCapsule call-back\\n\");
+ goto capi_fail;
+ }
+ tmp_fun = fun;
+ Py_INCREF(tmp_fun);
+ }
+ }
+
+ if (tmp_fun == NULL) {
+ fprintf(stderr,
+ \"Call-back argument must be function|instance|instance.__call__|f2py-function \"
+ \"but got %s.\\n\",
+ ((fun == NULL) ? \"NULL\" : Py_TYPE(fun)->tp_name));
+ goto capi_fail;
+ }
+
+ if (PyObject_HasAttrString(tmp_fun,\"__code__\")) {
+ if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) {
+ PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\");
+ Py_DECREF(tmp);
+ if (tmp_argcount == NULL) {
+ goto capi_fail;
+ }
+ tot = PyLong_AsSsize_t(tmp_argcount) - di;
+ Py_DECREF(tmp_argcount);
+ }
+ }
+ /* Get the number of optional arguments */
+ if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) {
+ if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\")))
+ opt = PyTuple_Size(tmp);
+ Py_XDECREF(tmp);
+ }
+ /* Get the number of extra arguments */
+ if (xa != NULL)
+ ext = PyTuple_Size((PyObject *)xa);
+ /* Calculate the size of call-backs argument list */
+ siz = MIN(maxnofargs+ext,tot);
+ *nofargs = MAX(0,siz-ext);
+
+#ifdef DEBUGCFUNCS
+ fprintf(stderr,
+ \"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),\"
+ \"tot,opt,ext,siz,nofargs = %d(-%d), %zd, %zd, %zd, %zd, %d\\n\",
+ maxnofargs, nofoptargs, tot, opt, ext, siz, *nofargs);
+#endif
+
+ if (siz < tot-opt) {
+ fprintf(stderr,
+ \"create_cb_arglist: Failed to build argument list \"
+ \"(siz) with enough arguments (tot-opt) required by \"
+ \"user-supplied function (siz,tot,opt=%zd, %zd, %zd).\\n\",
+ siz, tot, opt);
+ goto capi_fail;
+ }
+
+ /* Initialize argument list */
+ *args = (PyTupleObject *)PyTuple_New(siz);
+ for (i=0;i<*nofargs;i++) {
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None);
+ }
+ if (xa != NULL)
+ for (i=(*nofargs);i<siz;i++) {
+ tmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs));
+ Py_INCREF(tmp);
+ PyTuple_SET_ITEM(*args,i,tmp);
+ }
+ CFUNCSMESS(\"create_cb_arglist-end\\n\");
+ Py_DECREF(tmp_fun);
+ return 1;
+
+capi_fail:
+ if (PyErr_Occurred() == NULL)
+ PyErr_SetString(#modulename#_error, errmess);
+ Py_XDECREF(tmp_fun);
+ return 0;
+}
+"""
+
+
+def buildcfuncs():
+ from .capi_maps import c2capi_map
+ for k in c2capi_map.keys():
+ m = 'pyarr_from_p_%s1' % k
+ cppmacros[
+ m] = '#define %s(v) (PyArray_SimpleNewFromData(0,NULL,%s,(char *)v))' % (m, c2capi_map[k])
+ k = 'string'
+ m = 'pyarr_from_p_%s1' % k
+ # NPY_CHAR compatibility, NPY_STRING with itemsize 1
+ cppmacros[
+ m] = '#define %s(v,dims) (PyArray_New(&PyArray_Type, 1, dims, NPY_STRING, NULL, v, 1, NPY_ARRAY_CARRAY, NULL))' % (m)
+
+
+############ Auxiliary functions for sorting needs ###################
+
+def append_needs(need, flag=1):
+ # This function modifies the contents of the global `outneeds` dict.
+ if isinstance(need, list):
+ for n in need:
+ append_needs(n, flag)
+ elif isinstance(need, str):
+ if not need:
+ return
+ if need in includes0:
+ n = 'includes0'
+ elif need in includes:
+ n = 'includes'
+ elif need in typedefs:
+ n = 'typedefs'
+ elif need in typedefs_generated:
+ n = 'typedefs_generated'
+ elif need in cppmacros:
+ n = 'cppmacros'
+ elif need in cfuncs:
+ n = 'cfuncs'
+ elif need in callbacks:
+ n = 'callbacks'
+ elif need in f90modhooks:
+ n = 'f90modhooks'
+ elif need in commonhooks:
+ n = 'commonhooks'
+ else:
+ errmess('append_needs: unknown need %s\n' % (repr(need)))
+ return
+ if need in outneeds[n]:
+ return
+ if flag:
+ tmp = {}
+ if need in needs:
+ for nn in needs[need]:
+ t = append_needs(nn, 0)
+ if isinstance(t, dict):
+ for nnn in t.keys():
+ if nnn in tmp:
+ tmp[nnn] = tmp[nnn] + t[nnn]
+ else:
+ tmp[nnn] = t[nnn]
+ for nn in tmp.keys():
+ for nnn in tmp[nn]:
+ if nnn not in outneeds[nn]:
+ outneeds[nn] = [nnn] + outneeds[nn]
+ outneeds[n].append(need)
+ else:
+ tmp = {}
+ if need in needs:
+ for nn in needs[need]:
+ t = append_needs(nn, flag)
+ if isinstance(t, dict):
+ for nnn in t.keys():
+ if nnn in tmp:
+ tmp[nnn] = t[nnn] + tmp[nnn]
+ else:
+ tmp[nnn] = t[nnn]
+ if n not in tmp:
+ tmp[n] = []
+ tmp[n].append(need)
+ return tmp
+ else:
+ errmess('append_needs: expected list or string but got :%s\n' %
+ (repr(need)))
+
+
+def get_needs():
+ # This function modifies the contents of the global `outneeds` dict.
+ res = {}
+ for n in outneeds.keys():
+ out = []
+ saveout = copy.copy(outneeds[n])
+ while len(outneeds[n]) > 0:
+ if outneeds[n][0] not in needs:
+ out.append(outneeds[n][0])
+ del outneeds[n][0]
+ else:
+ flag = 0
+ for k in outneeds[n][1:]:
+ if k in needs[outneeds[n][0]]:
+ flag = 1
+ break
+ if flag:
+ outneeds[n] = outneeds[n][1:] + [outneeds[n][0]]
+ else:
+ out.append(outneeds[n][0])
+ del outneeds[n][0]
+ if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \
+ and outneeds[n] != []:
+ print(n, saveout)
+ errmess(
+ 'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n')
+ out = out + saveout
+ break
+ saveout = copy.copy(outneeds[n])
+ if out == []:
+ out = [n]
+ res[n] = out
+ return res
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/common_rules.py b/venv/lib/python3.9/site-packages/numpy/f2py/common_rules.py
new file mode 100644
index 00000000..5a488bf5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/common_rules.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python3
+"""
+
+Build common block mechanism for f2py2e.
+
+Copyright 2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/05/06 10:57:33 $
+Pearu Peterson
+
+"""
+from . import __version__
+f2py_version = __version__.version
+
+from .auxfuncs import (
+ hasbody, hascommon, hasnote, isintent_hide, outmess
+)
+from . import capi_maps
+from . import func2subr
+from .crackfortran import rmbadname
+
+
+def findcommonblocks(block, top=1):
+ ret = []
+ if hascommon(block):
+ for key, value in block['common'].items():
+ vars_ = {v: block['vars'][v] for v in value}
+ ret.append((key, value, vars_))
+ elif hasbody(block):
+ for b in block['body']:
+ ret = ret + findcommonblocks(b, 0)
+ if top:
+ tret = []
+ names = []
+ for t in ret:
+ if t[0] not in names:
+ names.append(t[0])
+ tret.append(t)
+ return tret
+ return ret
+
+
+def buildhooks(m):
+ ret = {'commonhooks': [], 'initcommonhooks': [],
+ 'docs': ['"COMMON blocks:\\n"']}
+ fwrap = ['']
+
+ def fadd(line, s=fwrap):
+ s[0] = '%s\n %s' % (s[0], line)
+ chooks = ['']
+
+ def cadd(line, s=chooks):
+ s[0] = '%s\n%s' % (s[0], line)
+ ihooks = ['']
+
+ def iadd(line, s=ihooks):
+ s[0] = '%s\n%s' % (s[0], line)
+ doc = ['']
+
+ def dadd(line, s=doc):
+ s[0] = '%s\n%s' % (s[0], line)
+ for (name, vnames, vars) in findcommonblocks(m):
+ lower_name = name.lower()
+ hnames, inames = [], []
+ for n in vnames:
+ if isintent_hide(vars[n]):
+ hnames.append(n)
+ else:
+ inames.append(n)
+ if hnames:
+ outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n' % (
+ name, ','.join(inames), ','.join(hnames)))
+ else:
+ outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % (
+ name, ','.join(inames)))
+ fadd('subroutine f2pyinit%s(setupfunc)' % name)
+ fadd('external setupfunc')
+ for n in vnames:
+ fadd(func2subr.var2fixfortran(vars, n))
+ if name == '_BLNK_':
+ fadd('common %s' % (','.join(vnames)))
+ else:
+ fadd('common /%s/ %s' % (name, ','.join(vnames)))
+ fadd('call setupfunc(%s)' % (','.join(inames)))
+ fadd('end\n')
+ cadd('static FortranDataDef f2py_%s_def[] = {' % (name))
+ idims = []
+ for n in inames:
+ ct = capi_maps.getctype(vars[n])
+ elsize = capi_maps.get_elsize(vars[n])
+ at = capi_maps.c2capi_map[ct]
+ dm = capi_maps.getarrdims(n, vars[n])
+ if dm['dims']:
+ idims.append('(%s)' % (dm['dims']))
+ else:
+ idims.append('')
+ dms = dm['dims'].strip()
+ if not dms:
+ dms = '-1'
+ cadd('\t{\"%s\",%s,{{%s}},%s, %s},'
+ % (n, dm['rank'], dms, at, elsize))
+ cadd('\t{NULL}\n};')
+ inames1 = rmbadname(inames)
+ inames1_tps = ','.join(['char *' + s for s in inames1])
+ cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps))
+ cadd('\tint i_f2py=0;')
+ for n in inames1:
+ cadd('\tf2py_%s_def[i_f2py++].data = %s;' % (name, n))
+ cadd('}')
+ if '_' in lower_name:
+ F_FUNC = 'F_FUNC_US'
+ else:
+ F_FUNC = 'F_FUNC'
+ cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));'
+ % (F_FUNC, lower_name, name.upper(),
+ ','.join(['char*'] * len(inames1))))
+ cadd('static void f2py_init_%s(void) {' % name)
+ cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'
+ % (F_FUNC, lower_name, name.upper(), name))
+ cadd('}\n')
+ iadd('\ttmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' % (name, name))
+ iadd('\tif (tmp == NULL) return NULL;')
+ iadd('\tif (F2PyDict_SetItemString(d, \"%s\", tmp) == -1) return NULL;'
+ % name)
+ iadd('\tPy_DECREF(tmp);')
+ tname = name.replace('_', '\\_')
+ dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname))
+ dadd('\\begin{description}')
+ for n in inames:
+ dadd('\\item[]{{}\\verb@%s@{}}' %
+ (capi_maps.getarrdocsign(n, vars[n])))
+ if hasnote(vars[n]):
+ note = vars[n]['note']
+ if isinstance(note, list):
+ note = '\n'.join(note)
+ dadd('--- %s' % (note))
+ dadd('\\end{description}')
+ ret['docs'].append(
+ '"\t/%s/ %s\\n"' % (name, ','.join(map(lambda v, d: v + d, inames, idims))))
+ ret['commonhooks'] = chooks
+ ret['initcommonhooks'] = ihooks
+ ret['latexdoc'] = doc[0]
+ if len(ret['docs']) <= 1:
+ ret['docs'] = ''
+ return ret, fwrap[0]
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/crackfortran.py b/venv/lib/python3.9/site-packages/numpy/f2py/crackfortran.py
new file mode 100644
index 00000000..63597cef
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/crackfortran.py
@@ -0,0 +1,3545 @@
+#!/usr/bin/env python3
+"""
+crackfortran --- read fortran (77,90) code and extract declaration information.
+
+Copyright 1999-2004 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/09/27 07:13:49 $
+Pearu Peterson
+
+
+Usage of crackfortran:
+======================
+Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
+ -m <module name for f77 routines>,--ignore-contains
+Functions: crackfortran, crack2fortran
+The following Fortran statements/constructions are supported
+(or will be if needed):
+ block data,byte,call,character,common,complex,contains,data,
+ dimension,double complex,double precision,end,external,function,
+ implicit,integer,intent,interface,intrinsic,
+ logical,module,optional,parameter,private,public,
+ program,real,(sequence?),subroutine,type,use,virtual,
+ include,pythonmodule
+Note: 'virtual' is mapped to 'dimension'.
+Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
+Note: code after 'contains' will be ignored until its scope ends.
+Note: 'common' statement is extended: dimensions are moved to variable definitions
+Note: f2py directive: <commentchar>f2py<line> is read as <line>
+Note: pythonmodule is introduced to represent Python module
+
+Usage:
+ `postlist=crackfortran(files)`
+ `postlist` contains declaration information read from the list of files `files`.
+ `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
+
+ `postlist` has the following structure:
+ *** it is a list of dictionaries containing `blocks':
+ B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
+ 'implicit','externals','interfaced','common','sortvars',
+ 'commonvars','note']}
+ B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
+ 'program' | 'block data' | 'type' | 'pythonmodule' |
+ 'abstract interface'
+ B['body'] --- list containing `subblocks' with the same structure as `blocks'
+ B['parent_block'] --- dictionary of a parent block:
+ C['body'][<index>]['parent_block'] is C
+ B['vars'] --- dictionary of variable definitions
+ B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
+ B['name'] --- name of the block (not if B['block']=='interface')
+ B['prefix'] --- prefix string (only if B['block']=='function')
+ B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
+ B['result'] --- name of the return value (only if B['block']=='function')
+ B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
+ B['externals'] --- list of variables being external
+ B['interfaced'] --- list of variables being external and defined
+ B['common'] --- dictionary of common blocks (list of objects)
+ B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
+ B['from'] --- string showing the 'parents' of the current block
+ B['use'] --- dictionary of modules used in current block:
+ {<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
+ B['note'] --- list of LaTeX comments on the block
+ B['f2pyenhancements'] --- optional dictionary
+ {'threadsafe':'','fortranname':<name>,
+ 'callstatement':<C-expr>|<multi-line block>,
+ 'callprotoargument':<C-expr-list>,
+ 'usercode':<multi-line block>|<list of multi-line blocks>,
+ 'pymethoddef:<multi-line block>'
+ }
+ B['entry'] --- dictionary {entryname:argslist,..}
+ B['varnames'] --- list of variable names given in the order of reading the
+ Fortran code, useful for derived types.
+ B['saved_interface'] --- a string of scanned routine signature, defines explicit interface
+ *** Variable definition is a dictionary
+ D = B['vars'][<variable name>] =
+ {'typespec'[,'attrspec','kindselector','charselector','=','typename']}
+ D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
+ 'double precision' | 'integer' | 'logical' | 'real' | 'type'
+ D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
+ 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)',
+ 'optional','required', etc)
+ K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
+ 'complex' | 'integer' | 'logical' | 'real' )
+ C = D['charselector'] = {['*','len','kind','f2py_len']}
+ (only if D['typespec']=='character')
+ D['='] --- initialization expression string
+ D['typename'] --- name of the type if D['typespec']=='type'
+ D['dimension'] --- list of dimension bounds
+ D['intent'] --- list of intent specifications
+ D['depend'] --- list of variable names on which current variable depends on
+ D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
+ D['note'] --- list of LaTeX comments on the variable
+ *** Meaning of kind/char selectors (few examples):
+ D['typespec>']*K['*']
+ D['typespec'](kind=K['kind'])
+ character*C['*']
+ character(len=C['len'],kind=C['kind'], f2py_len=C['f2py_len'])
+ (see also fortran type declaration statement formats below)
+
+Fortran 90 type declaration statement format (F77 is subset of F90)
+====================================================================
+(Main source: IBM XL Fortran 5.1 Language Reference Manual)
+type declaration = <typespec> [[<attrspec>]::] <entitydecl>
+<typespec> = byte |
+ character[<charselector>] |
+ complex[<kindselector>] |
+ double complex |
+ double precision |
+ integer[<kindselector>] |
+ logical[<kindselector>] |
+ real[<kindselector>] |
+ type(<typename>)
+<charselector> = * <charlen> |
+ ([len=]<len>[,[kind=]<kind>]) |
+ (kind=<kind>[,len=<len>])
+<kindselector> = * <intlen> |
+ ([kind=]<kind>)
+<attrspec> = comma separated list of attributes.
+ Only the following attributes are used in
+ building up the interface:
+ external
+ (parameter --- affects '=' key)
+ optional
+ intent
+ Other attributes are ignored.
+<intentspec> = in | out | inout
+<arrayspec> = comma separated list of dimension bounds.
+<entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
+ [/<init_expr>/ | =<init_expr>] [,<entitydecl>]
+
+In addition, the following attributes are used: check,depend,note
+
+TODO:
+ * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
+ -> 'real x(2)')
+ The above may be solved by creating appropriate preprocessor program, for example.
+
+"""
+import sys
+import string
+import fileinput
+import re
+import os
+import copy
+import platform
+import codecs
+try:
+ import charset_normalizer
+except ImportError:
+ charset_normalizer = None
+
+from . import __version__
+
+# The environment provided by auxfuncs.py is needed for some calls to eval.
+# As the needed functions cannot be determined by static inspection of the
+# code, it is safest to use import * pending a major refactoring of f2py.
+from .auxfuncs import *
+from . import symbolic
+
+f2py_version = __version__.version
+
+# Global flags:
+strictf77 = 1 # Ignore `!' comments unless line[0]=='!'
+sourcecodeform = 'fix' # 'fix','free'
+quiet = 0 # Be verbose if 0 (Obsolete: not used any more)
+verbose = 1 # Be quiet if 0, extra verbose if > 1.
+tabchar = 4 * ' '
+pyffilename = ''
+f77modulename = ''
+skipemptyends = 0 # for old F77 programs without 'program' statement
+ignorecontains = 1
+dolowercase = 1
+debug = []
+
+# Global variables
+beginpattern = ''
+currentfilename = ''
+expectbegin = 1
+f90modulevars = {}
+filepositiontext = ''
+gotnextfile = 1
+groupcache = None
+groupcounter = 0
+grouplist = {groupcounter: []}
+groupname = ''
+include_paths = []
+neededmodule = -1
+onlyfuncs = []
+previous_context = None
+skipblocksuntil = -1
+skipfuncs = []
+skipfunctions = []
+usermodules = []
+
+
+def reset_global_f2py_vars():
+ global groupcounter, grouplist, neededmodule, expectbegin
+ global skipblocksuntil, usermodules, f90modulevars, gotnextfile
+ global filepositiontext, currentfilename, skipfunctions, skipfuncs
+ global onlyfuncs, include_paths, previous_context
+ global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename
+ global f77modulename, skipemptyends, ignorecontains, dolowercase, debug
+
+ # flags
+ strictf77 = 1
+ sourcecodeform = 'fix'
+ quiet = 0
+ verbose = 1
+ tabchar = 4 * ' '
+ pyffilename = ''
+ f77modulename = ''
+ skipemptyends = 0
+ ignorecontains = 1
+ dolowercase = 1
+ debug = []
+ # variables
+ groupcounter = 0
+ grouplist = {groupcounter: []}
+ neededmodule = -1
+ expectbegin = 1
+ skipblocksuntil = -1
+ usermodules = []
+ f90modulevars = {}
+ gotnextfile = 1
+ filepositiontext = ''
+ currentfilename = ''
+ skipfunctions = []
+ skipfuncs = []
+ onlyfuncs = []
+ include_paths = []
+ previous_context = None
+
+
+def outmess(line, flag=1):
+ global filepositiontext
+
+ if not verbose:
+ return
+ if not quiet:
+ if flag:
+ sys.stdout.write(filepositiontext)
+ sys.stdout.write(line)
+
+re._MAXCACHE = 50
+defaultimplicitrules = {}
+for c in "abcdefghopqrstuvwxyz$_":
+ defaultimplicitrules[c] = {'typespec': 'real'}
+for c in "ijklmn":
+ defaultimplicitrules[c] = {'typespec': 'integer'}
+badnames = {}
+invbadnames = {}
+for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while',
+ 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union',
+ 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch',
+ 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto',
+ 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i',
+ 'max', 'min',
+ 'flen', 'fshape',
+ 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout',
+ 'type', 'default']:
+ badnames[n] = n + '_bn'
+ invbadnames[n + '_bn'] = n
+
+
+def rmbadname1(name):
+ if name in badnames:
+ errmess('rmbadname1: Replacing "%s" with "%s".\n' %
+ (name, badnames[name]))
+ return badnames[name]
+ return name
+
+
+def rmbadname(names):
+ return [rmbadname1(_m) for _m in names]
+
+
+def undo_rmbadname1(name):
+ if name in invbadnames:
+ errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'
+ % (name, invbadnames[name]))
+ return invbadnames[name]
+ return name
+
+
+def undo_rmbadname(names):
+ return [undo_rmbadname1(_m) for _m in names]
+
+
+def getextension(name):
+ i = name.rfind('.')
+ if i == -1:
+ return ''
+ if '\\' in name[i:]:
+ return ''
+ if '/' in name[i:]:
+ return ''
+ return name[i + 1:]
+
+is_f_file = re.compile(r'.*\.(for|ftn|f77|f)\Z', re.I).match
+_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search
+_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search
+_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search
+_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match
+
+
+def openhook(filename, mode):
+ """Ensures that filename is opened with correct encoding parameter.
+
+ This function uses charset_normalizer package, when available, for
+ determining the encoding of the file to be opened. When charset_normalizer
+ is not available, the function detects only UTF encodings, otherwise, ASCII
+ encoding is used as fallback.
+ """
+ # Reads in the entire file. Robust detection of encoding.
+ # Correctly handles comments or late stage unicode characters
+ # gh-22871
+ if charset_normalizer is not None:
+ encoding = charset_normalizer.from_path(filename).best().encoding
+ else:
+ # hint: install charset_normalizer for correct encoding handling
+ # No need to read the whole file for trying with startswith
+ nbytes = min(32, os.path.getsize(filename))
+ with open(filename, 'rb') as fhandle:
+ raw = fhandle.read(nbytes)
+ if raw.startswith(codecs.BOM_UTF8):
+ encoding = 'UTF-8-SIG'
+ elif raw.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):
+ encoding = 'UTF-32'
+ elif raw.startswith((codecs.BOM_LE, codecs.BOM_BE)):
+ encoding = 'UTF-16'
+ else:
+ # Fallback, without charset_normalizer
+ encoding = 'ascii'
+ return open(filename, mode, encoding=encoding)
+
+
+def is_free_format(file):
+ """Check if file is in free format Fortran."""
+ # f90 allows both fixed and free format, assuming fixed unless
+ # signs of free format are detected.
+ result = 0
+ with openhook(file, 'r') as f:
+ line = f.readline()
+ n = 15 # the number of non-comment lines to scan for hints
+ if _has_f_header(line):
+ n = 0
+ elif _has_f90_header(line):
+ n = 0
+ result = 1
+ while n > 0 and line:
+ if line[0] != '!' and line.strip():
+ n -= 1
+ if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&':
+ result = 1
+ break
+ line = f.readline()
+ return result
+
+
+# Read fortran (77,90) code
+def readfortrancode(ffile, dowithline=show, istop=1):
+ """
+ Read fortran codes from files and
+ 1) Get rid of comments, line continuations, and empty lines; lower cases.
+ 2) Call dowithline(line) on every line.
+ 3) Recursively call itself when statement \"include '<filename>'\" is met.
+ """
+ global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77
+ global beginpattern, quiet, verbose, dolowercase, include_paths
+
+ if not istop:
+ saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
+ beginpattern, quiet, verbose, dolowercase
+ if ffile == []:
+ return
+ localdolowercase = dolowercase
+ # cont: set to True when the content of the last line read
+ # indicates statement continuation
+ cont = False
+ finalline = ''
+ ll = ''
+ includeline = re.compile(
+ r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I)
+ cont1 = re.compile(r'(?P<line>.*)&\s*\Z')
+ cont2 = re.compile(r'(\s*&|)(?P<line>.*)')
+ mline_mark = re.compile(r".*?'''")
+ if istop:
+ dowithline('', -1)
+ ll, l1 = '', ''
+ spacedigits = [' '] + [str(_m) for _m in range(10)]
+ filepositiontext = ''
+ fin = fileinput.FileInput(ffile, openhook=openhook)
+ while True:
+ try:
+ l = fin.readline()
+ except UnicodeDecodeError as msg:
+ raise Exception(
+ f'readfortrancode: reading {fin.filename()}#{fin.lineno()}'
+ f' failed with\n{msg}.\nIt is likely that installing charset_normalizer'
+ ' package will help f2py determine the input file encoding'
+ ' correctly.')
+ if not l:
+ break
+ if fin.isfirstline():
+ filepositiontext = ''
+ currentfilename = fin.filename()
+ gotnextfile = 1
+ l1 = l
+ strictf77 = 0
+ sourcecodeform = 'fix'
+ ext = os.path.splitext(currentfilename)[1]
+ if is_f_file(currentfilename) and \
+ not (_has_f90_header(l) or _has_fix_header(l)):
+ strictf77 = 1
+ elif is_free_format(currentfilename) and not _has_fix_header(l):
+ sourcecodeform = 'free'
+ if strictf77:
+ beginpattern = beginpattern77
+ else:
+ beginpattern = beginpattern90
+ outmess('\tReading file %s (format:%s%s)\n'
+ % (repr(currentfilename), sourcecodeform,
+ strictf77 and ',strict' or ''))
+
+ l = l.expandtabs().replace('\xa0', ' ')
+ # Get rid of newline characters
+ while not l == '':
+ if l[-1] not in "\n\r\f":
+ break
+ l = l[:-1]
+ if not strictf77:
+ (l, rl) = split_by_unquoted(l, '!')
+ l += ' '
+ if rl[:5].lower() == '!f2py': # f2py directive
+ l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!')
+ if l.strip() == '': # Skip empty line
+ if sourcecodeform == 'free':
+ # In free form, a statement continues in the next line
+ # that is not a comment line [3.3.2.4^1], lines with
+ # blanks are comment lines [3.3.2.3^1]. Hence, the
+ # line continuation flag must retain its state.
+ pass
+ else:
+ # In fixed form, statement continuation is determined
+ # by a non-blank character at the 6-th position. Empty
+ # line indicates a start of a new statement
+ # [3.3.3.3^1]. Hence, the line continuation flag must
+ # be reset.
+ cont = False
+ continue
+ if sourcecodeform == 'fix':
+ if l[0] in ['*', 'c', '!', 'C', '#']:
+ if l[1:5].lower() == 'f2py': # f2py directive
+ l = ' ' + l[5:]
+ else: # Skip comment line
+ cont = False
+ continue
+ elif strictf77:
+ if len(l) > 72:
+ l = l[:72]
+ if not (l[0] in spacedigits):
+ raise Exception('readfortrancode: Found non-(space,digit) char '
+ 'in the first column.\n\tAre you sure that '
+ 'this code is in fix form?\n\tline=%s' % repr(l))
+
+ if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '):
+ # Continuation of a previous line
+ ll = ll + l[6:]
+ finalline = ''
+ origfinalline = ''
+ else:
+ if not strictf77:
+ # F90 continuation
+ r = cont1.match(l)
+ if r:
+ l = r.group('line') # Continuation follows ..
+ if cont:
+ ll = ll + cont2.match(l).group('line')
+ finalline = ''
+ origfinalline = ''
+ else:
+ # clean up line beginning from possible digits.
+ l = ' ' + l[5:]
+ if localdolowercase:
+ finalline = ll.lower()
+ else:
+ finalline = ll
+ origfinalline = ll
+ ll = l
+ cont = (r is not None)
+ else:
+ # clean up line beginning from possible digits.
+ l = ' ' + l[5:]
+ if localdolowercase:
+ finalline = ll.lower()
+ else:
+ finalline = ll
+ origfinalline = ll
+ ll = l
+
+ elif sourcecodeform == 'free':
+ if not cont and ext == '.pyf' and mline_mark.match(l):
+ l = l + '\n'
+ while True:
+ lc = fin.readline()
+ if not lc:
+ errmess(
+ 'Unexpected end of file when reading multiline\n')
+ break
+ l = l + lc
+ if mline_mark.match(lc):
+ break
+ l = l.rstrip()
+ r = cont1.match(l)
+ if r:
+ l = r.group('line') # Continuation follows ..
+ if cont:
+ ll = ll + cont2.match(l).group('line')
+ finalline = ''
+ origfinalline = ''
+ else:
+ if localdolowercase:
+ finalline = ll.lower()
+ else:
+ finalline = ll
+ origfinalline = ll
+ ll = l
+ cont = (r is not None)
+ else:
+ raise ValueError(
+ "Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform))
+ filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
+ fin.filelineno() - 1, currentfilename, l1)
+ m = includeline.match(origfinalline)
+ if m:
+ fn = m.group('name')
+ if os.path.isfile(fn):
+ readfortrancode(fn, dowithline=dowithline, istop=0)
+ else:
+ include_dirs = [
+ os.path.dirname(currentfilename)] + include_paths
+ foundfile = 0
+ for inc_dir in include_dirs:
+ fn1 = os.path.join(inc_dir, fn)
+ if os.path.isfile(fn1):
+ foundfile = 1
+ readfortrancode(fn1, dowithline=dowithline, istop=0)
+ break
+ if not foundfile:
+ outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
+ repr(fn), os.pathsep.join(include_dirs)))
+ else:
+ dowithline(finalline)
+ l1 = ll
+ if localdolowercase:
+ finalline = ll.lower()
+ else:
+ finalline = ll
+ origfinalline = ll
+ filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
+ fin.filelineno() - 1, currentfilename, l1)
+ m = includeline.match(origfinalline)
+ if m:
+ fn = m.group('name')
+ if os.path.isfile(fn):
+ readfortrancode(fn, dowithline=dowithline, istop=0)
+ else:
+ include_dirs = [os.path.dirname(currentfilename)] + include_paths
+ foundfile = 0
+ for inc_dir in include_dirs:
+ fn1 = os.path.join(inc_dir, fn)
+ if os.path.isfile(fn1):
+ foundfile = 1
+ readfortrancode(fn1, dowithline=dowithline, istop=0)
+ break
+ if not foundfile:
+ outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
+ repr(fn), os.pathsep.join(include_dirs)))
+ else:
+ dowithline(finalline)
+ filepositiontext = ''
+ fin.close()
+ if istop:
+ dowithline('', 1)
+ else:
+ gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
+ beginpattern, quiet, verbose, dolowercase = saveglobals
+
+# Crack line
+beforethisafter = r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))' + \
+ r'\s*(?P<this>(\b(%s)\b))' + \
+ r'\s*(?P<after>%s)\s*\Z'
+##
+fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte'
+typespattern = re.compile(
+ beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type'
+typespattern4implicit = re.compile(beforethisafter % (
+ '', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I)
+#
+functionpattern = re.compile(beforethisafter % (
+ r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin'
+subroutinepattern = re.compile(beforethisafter % (
+ r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin'
+# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin'
+#
+groupbegins77 = r'program|block\s*data'
+beginpattern77 = re.compile(
+ beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin'
+groupbegins90 = groupbegins77 + \
+ r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|' + \
+ r'type(?!\s*\()'
+beginpattern90 = re.compile(
+ beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin'
+groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|'
+ r'endinterface|endsubroutine|endfunction')
+endpattern = re.compile(
+ beforethisafter % ('', groupends, groupends, r'.*'), re.I), 'end'
+endifs = r'end\s*(if|do|where|select|while|forall|associate|block|' + \
+ r'critical|enum|team)'
+endifpattern = re.compile(
+ beforethisafter % (r'[\w]*?', endifs, endifs, r'[\w\s]*'), re.I), 'endif'
+#
+moduleprocedures = r'module\s*procedure'
+moduleprocedurepattern = re.compile(
+ beforethisafter % ('', moduleprocedures, moduleprocedures, r'.*'), re.I), \
+ 'moduleprocedure'
+implicitpattern = re.compile(
+ beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit'
+dimensionpattern = re.compile(beforethisafter % (
+ '', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension'
+externalpattern = re.compile(
+ beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external'
+optionalpattern = re.compile(
+ beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional'
+requiredpattern = re.compile(
+ beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required'
+publicpattern = re.compile(
+ beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public'
+privatepattern = re.compile(
+ beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private'
+intrinsicpattern = re.compile(
+ beforethisafter % ('', 'intrinsic', 'intrinsic', '.*'), re.I), 'intrinsic'
+intentpattern = re.compile(beforethisafter % (
+ '', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent'
+parameterpattern = re.compile(
+ beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter'
+datapattern = re.compile(
+ beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data'
+callpattern = re.compile(
+ beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call'
+entrypattern = re.compile(
+ beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry'
+callfunpattern = re.compile(
+ beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun'
+commonpattern = re.compile(
+ beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common'
+usepattern = re.compile(
+ beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use'
+containspattern = re.compile(
+ beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains'
+formatpattern = re.compile(
+ beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format'
+# Non-fortran and f2py-specific statements
+f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef',
+ 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements'
+multilinepattern = re.compile(
+ r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline'
+##
+
+def split_by_unquoted(line, characters):
+ """
+ Splits the line into (line[:i], line[i:]),
+ where i is the index of first occurrence of one of the characters
+ not within quotes, or len(line) if no such index exists
+ """
+ assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes"
+ r = re.compile(
+ r"\A(?P<before>({single_quoted}|{double_quoted}|{not_quoted})*)"
+ r"(?P<after>{char}.*)\Z".format(
+ not_quoted="[^\"'{}]".format(re.escape(characters)),
+ char="[{}]".format(re.escape(characters)),
+ single_quoted=r"('([^'\\]|(\\.))*')",
+ double_quoted=r'("([^"\\]|(\\.))*")'))
+ m = r.match(line)
+ if m:
+ d = m.groupdict()
+ return (d["before"], d["after"])
+ return (line, "")
+
+def _simplifyargs(argsline):
+ a = []
+ for n in markoutercomma(argsline).split('@,@'):
+ for r in '(),':
+ n = n.replace(r, '_')
+ a.append(n)
+ return ','.join(a)
+
+crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+\w*\b)\s*=.*', re.I)
+
+
+def crackline(line, reset=0):
+ """
+ reset=-1 --- initialize
+ reset=0 --- crack the line
+ reset=1 --- final check if mismatch of blocks occurred
+
+ Cracked data is saved in grouplist[0].
+ """
+ global beginpattern, groupcounter, groupname, groupcache, grouplist
+ global filepositiontext, currentfilename, neededmodule, expectbegin
+ global skipblocksuntil, skipemptyends, previous_context, gotnextfile
+
+ _, has_semicolon = split_by_unquoted(line, ";")
+ if has_semicolon and not (f2pyenhancementspattern[0].match(line) or
+ multilinepattern[0].match(line)):
+ # XXX: non-zero reset values need testing
+ assert reset == 0, repr(reset)
+ # split line on unquoted semicolons
+ line, semicolon_line = split_by_unquoted(line, ";")
+ while semicolon_line:
+ crackline(line, reset)
+ line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";")
+ crackline(line, reset)
+ return
+ if reset < 0:
+ groupcounter = 0
+ groupname = {groupcounter: ''}
+ groupcache = {groupcounter: {}}
+ grouplist = {groupcounter: []}
+ groupcache[groupcounter]['body'] = []
+ groupcache[groupcounter]['vars'] = {}
+ groupcache[groupcounter]['block'] = ''
+ groupcache[groupcounter]['name'] = ''
+ neededmodule = -1
+ skipblocksuntil = -1
+ return
+ if reset > 0:
+ fl = 0
+ if f77modulename and neededmodule == groupcounter:
+ fl = 2
+ while groupcounter > fl:
+ outmess('crackline: groupcounter=%s groupname=%s\n' %
+ (repr(groupcounter), repr(groupname)))
+ outmess(
+ 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
+ grouplist[groupcounter - 1].append(groupcache[groupcounter])
+ grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
+ del grouplist[groupcounter]
+ groupcounter = groupcounter - 1
+ if f77modulename and neededmodule == groupcounter:
+ grouplist[groupcounter - 1].append(groupcache[groupcounter])
+ grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
+ del grouplist[groupcounter]
+ groupcounter = groupcounter - 1 # end interface
+ grouplist[groupcounter - 1].append(groupcache[groupcounter])
+ grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
+ del grouplist[groupcounter]
+ groupcounter = groupcounter - 1 # end module
+ neededmodule = -1
+ return
+ if line == '':
+ return
+ flag = 0
+ for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern,
+ requiredpattern,
+ parameterpattern, datapattern, publicpattern, privatepattern,
+ intrinsicpattern,
+ endifpattern, endpattern,
+ formatpattern,
+ beginpattern, functionpattern, subroutinepattern,
+ implicitpattern, typespattern, commonpattern,
+ callpattern, usepattern, containspattern,
+ entrypattern,
+ f2pyenhancementspattern,
+ multilinepattern,
+ moduleprocedurepattern
+ ]:
+ m = pat[0].match(line)
+ if m:
+ break
+ flag = flag + 1
+ if not m:
+ re_1 = crackline_re_1
+ if 0 <= skipblocksuntil <= groupcounter:
+ return
+ if 'externals' in groupcache[groupcounter]:
+ for name in groupcache[groupcounter]['externals']:
+ if name in invbadnames:
+ name = invbadnames[name]
+ if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']:
+ continue
+ m1 = re.match(
+ r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I)
+ if m1:
+ m2 = re_1.match(m1.group('before'))
+ a = _simplifyargs(m1.group('args'))
+ if m2:
+ line = 'callfun %s(%s) result (%s)' % (
+ name, a, m2.group('result'))
+ else:
+ line = 'callfun %s(%s)' % (name, a)
+ m = callfunpattern[0].match(line)
+ if not m:
+ outmess(
+ 'crackline: could not resolve function call for line=%s.\n' % repr(line))
+ return
+ analyzeline(m, 'callfun', line)
+ return
+ if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')):
+ previous_context = None
+ outmess('crackline:%d: No pattern for line\n' % (groupcounter))
+ return
+ elif pat[1] == 'end':
+ if 0 <= skipblocksuntil < groupcounter:
+ groupcounter = groupcounter - 1
+ if skipblocksuntil <= groupcounter:
+ return
+ if groupcounter <= 0:
+ raise Exception('crackline: groupcounter(=%s) is nonpositive. '
+ 'Check the blocks.'
+ % (groupcounter))
+ m1 = beginpattern[0].match((line))
+ if (m1) and (not m1.group('this') == groupname[groupcounter]):
+ raise Exception('crackline: End group %s does not match with '
+ 'previous Begin group %s\n\t%s' %
+ (repr(m1.group('this')), repr(groupname[groupcounter]),
+ filepositiontext)
+ )
+ if skipblocksuntil == groupcounter:
+ skipblocksuntil = -1
+ grouplist[groupcounter - 1].append(groupcache[groupcounter])
+ grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
+ del grouplist[groupcounter]
+ groupcounter = groupcounter - 1
+ if not skipemptyends:
+ expectbegin = 1
+ elif pat[1] == 'begin':
+ if 0 <= skipblocksuntil <= groupcounter:
+ groupcounter = groupcounter + 1
+ return
+ gotnextfile = 0
+ analyzeline(m, pat[1], line)
+ expectbegin = 0
+ elif pat[1] == 'endif':
+ pass
+ elif pat[1] == 'moduleprocedure':
+ analyzeline(m, pat[1], line)
+ elif pat[1] == 'contains':
+ if ignorecontains:
+ return
+ if 0 <= skipblocksuntil <= groupcounter:
+ return
+ skipblocksuntil = groupcounter
+ else:
+ if 0 <= skipblocksuntil <= groupcounter:
+ return
+ analyzeline(m, pat[1], line)
+
+
+def markouterparen(line):
+ l = ''
+ f = 0
+ for c in line:
+ if c == '(':
+ f = f + 1
+ if f == 1:
+ l = l + '@(@'
+ continue
+ elif c == ')':
+ f = f - 1
+ if f == 0:
+ l = l + '@)@'
+ continue
+ l = l + c
+ return l
+
+
+def markoutercomma(line, comma=','):
+ l = ''
+ f = 0
+ before, after = split_by_unquoted(line, comma + '()')
+ l += before
+ while after:
+ if (after[0] == comma) and (f == 0):
+ l += '@' + comma + '@'
+ else:
+ l += after[0]
+ if after[0] == '(':
+ f += 1
+ elif after[0] == ')':
+ f -= 1
+ before, after = split_by_unquoted(after[1:], comma + '()')
+ l += before
+ assert not f, repr((f, line, l))
+ return l
+
+def unmarkouterparen(line):
+ r = line.replace('@(@', '(').replace('@)@', ')')
+ return r
+
+
+def appenddecl(decl, decl2, force=1):
+ if not decl:
+ decl = {}
+ if not decl2:
+ return decl
+ if decl is decl2:
+ return decl
+ for k in list(decl2.keys()):
+ if k == 'typespec':
+ if force or k not in decl:
+ decl[k] = decl2[k]
+ elif k == 'attrspec':
+ for l in decl2[k]:
+ decl = setattrspec(decl, l, force)
+ elif k == 'kindselector':
+ decl = setkindselector(decl, decl2[k], force)
+ elif k == 'charselector':
+ decl = setcharselector(decl, decl2[k], force)
+ elif k in ['=', 'typename']:
+ if force or k not in decl:
+ decl[k] = decl2[k]
+ elif k == 'note':
+ pass
+ elif k in ['intent', 'check', 'dimension', 'optional',
+ 'required', 'depend']:
+ errmess('appenddecl: "%s" not implemented.\n' % k)
+ else:
+ raise Exception('appenddecl: Unknown variable definition key: ' +
+ str(k))
+ return decl
+
+selectpattern = re.compile(
+ r'\s*(?P<this>(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I)
+typedefpattern = re.compile(
+ r'(?:,(?P<attributes>[\w(),]+))?(::)?(?P<name>\b[a-z$_][\w$]*\b)'
+ r'(?:\((?P<params>[\w,]*)\))?\Z', re.I)
+nameargspattern = re.compile(
+ r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I)
+operatorpattern = re.compile(
+ r'\s*(?P<scheme>(operator|assignment))'
+ r'@\(@\s*(?P<name>[^)]+)\s*@\)@\s*\Z', re.I)
+callnameargspattern = re.compile(
+ r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I)
+real16pattern = re.compile(
+ r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)')
+real8pattern = re.compile(
+ r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))')
+
+_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I)
+
+
+def _is_intent_callback(vdecl):
+ for a in vdecl.get('attrspec', []):
+ if _intentcallbackpattern.match(a):
+ return 1
+ return 0
+
+
+def _resolvetypedefpattern(line):
+ line = ''.join(line.split()) # removes whitespace
+ m1 = typedefpattern.match(line)
+ print(line, m1)
+ if m1:
+ attrs = m1.group('attributes')
+ attrs = [a.lower() for a in attrs.split(',')] if attrs else []
+ return m1.group('name'), attrs, m1.group('params')
+ return None, [], None
+
+
+def _resolvenameargspattern(line):
+ line = markouterparen(line)
+ m1 = nameargspattern.match(line)
+ if m1:
+ return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind')
+ m1 = operatorpattern.match(line)
+ if m1:
+ name = m1.group('scheme') + '(' + m1.group('name') + ')'
+ return name, [], None, None
+ m1 = callnameargspattern.match(line)
+ if m1:
+ return m1.group('name'), m1.group('args'), None, None
+ return None, [], None, None
+
+
+def analyzeline(m, case, line):
+ global groupcounter, groupname, groupcache, grouplist, filepositiontext
+ global currentfilename, f77modulename, neededinterface, neededmodule
+ global expectbegin, gotnextfile, previous_context
+
+ block = m.group('this')
+ if case != 'multiline':
+ previous_context = None
+ if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \
+ and not skipemptyends and groupcounter < 1:
+ newname = os.path.basename(currentfilename).split('.')[0]
+ outmess(
+ 'analyzeline: no group yet. Creating program group with name "%s".\n' % newname)
+ gotnextfile = 0
+ groupcounter = groupcounter + 1
+ groupname[groupcounter] = 'program'
+ groupcache[groupcounter] = {}
+ grouplist[groupcounter] = []
+ groupcache[groupcounter]['body'] = []
+ groupcache[groupcounter]['vars'] = {}
+ groupcache[groupcounter]['block'] = 'program'
+ groupcache[groupcounter]['name'] = newname
+ groupcache[groupcounter]['from'] = 'fromsky'
+ expectbegin = 0
+ if case in ['begin', 'call', 'callfun']:
+ # Crack line => block,name,args,result
+ block = block.lower()
+ if re.match(r'block\s*data', block, re.I):
+ block = 'block data'
+ elif re.match(r'python\s*module', block, re.I):
+ block = 'python module'
+ elif re.match(r'abstract\s*interface', block, re.I):
+ block = 'abstract interface'
+ if block == 'type':
+ name, attrs, _ = _resolvetypedefpattern(m.group('after'))
+ groupcache[groupcounter]['vars'][name] = dict(attrspec = attrs)
+ args = []
+ result = None
+ else:
+ name, args, result, _ = _resolvenameargspattern(m.group('after'))
+ if name is None:
+ if block == 'block data':
+ name = '_BLOCK_DATA_'
+ else:
+ name = ''
+ if block not in ['interface', 'block data', 'abstract interface']:
+ outmess('analyzeline: No name/args pattern found for line.\n')
+
+ previous_context = (block, name, groupcounter)
+ if args:
+ args = rmbadname([x.strip()
+ for x in markoutercomma(args).split('@,@')])
+ else:
+ args = []
+ if '' in args:
+ while '' in args:
+ args.remove('')
+ outmess(
+ 'analyzeline: argument list is malformed (missing argument).\n')
+
+ # end of crack line => block,name,args,result
+ needmodule = 0
+ needinterface = 0
+
+ if case in ['call', 'callfun']:
+ needinterface = 1
+ if 'args' not in groupcache[groupcounter]:
+ return
+ if name not in groupcache[groupcounter]['args']:
+ return
+ for it in grouplist[groupcounter]:
+ if it['name'] == name:
+ return
+ if name in groupcache[groupcounter]['interfaced']:
+ return
+ block = {'call': 'subroutine', 'callfun': 'function'}[case]
+ if f77modulename and neededmodule == -1 and groupcounter <= 1:
+ neededmodule = groupcounter + 2
+ needmodule = 1
+ if block not in ['interface', 'abstract interface']:
+ needinterface = 1
+ # Create new block(s)
+ groupcounter = groupcounter + 1
+ groupcache[groupcounter] = {}
+ grouplist[groupcounter] = []
+ if needmodule:
+ if verbose > 1:
+ outmess('analyzeline: Creating module block %s\n' %
+ repr(f77modulename), 0)
+ groupname[groupcounter] = 'module'
+ groupcache[groupcounter]['block'] = 'python module'
+ groupcache[groupcounter]['name'] = f77modulename
+ groupcache[groupcounter]['from'] = ''
+ groupcache[groupcounter]['body'] = []
+ groupcache[groupcounter]['externals'] = []
+ groupcache[groupcounter]['interfaced'] = []
+ groupcache[groupcounter]['vars'] = {}
+ groupcounter = groupcounter + 1
+ groupcache[groupcounter] = {}
+ grouplist[groupcounter] = []
+ if needinterface:
+ if verbose > 1:
+ outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (
+ groupcounter), 0)
+ groupname[groupcounter] = 'interface'
+ groupcache[groupcounter]['block'] = 'interface'
+ groupcache[groupcounter]['name'] = 'unknown_interface'
+ groupcache[groupcounter]['from'] = '%s:%s' % (
+ groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
+ groupcache[groupcounter]['body'] = []
+ groupcache[groupcounter]['externals'] = []
+ groupcache[groupcounter]['interfaced'] = []
+ groupcache[groupcounter]['vars'] = {}
+ groupcounter = groupcounter + 1
+ groupcache[groupcounter] = {}
+ grouplist[groupcounter] = []
+ groupname[groupcounter] = block
+ groupcache[groupcounter]['block'] = block
+ if not name:
+ name = 'unknown_' + block.replace(' ', '_')
+ groupcache[groupcounter]['prefix'] = m.group('before')
+ groupcache[groupcounter]['name'] = rmbadname1(name)
+ groupcache[groupcounter]['result'] = result
+ if groupcounter == 1:
+ groupcache[groupcounter]['from'] = currentfilename
+ else:
+ if f77modulename and groupcounter == 3:
+ groupcache[groupcounter]['from'] = '%s:%s' % (
+ groupcache[groupcounter - 1]['from'], currentfilename)
+ else:
+ groupcache[groupcounter]['from'] = '%s:%s' % (
+ groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
+ for k in list(groupcache[groupcounter].keys()):
+ if not groupcache[groupcounter][k]:
+ del groupcache[groupcounter][k]
+
+ groupcache[groupcounter]['args'] = args
+ groupcache[groupcounter]['body'] = []
+ groupcache[groupcounter]['externals'] = []
+ groupcache[groupcounter]['interfaced'] = []
+ groupcache[groupcounter]['vars'] = {}
+ groupcache[groupcounter]['entry'] = {}
+ # end of creation
+ if block == 'type':
+ groupcache[groupcounter]['varnames'] = []
+
+ if case in ['call', 'callfun']: # set parents variables
+ if name not in groupcache[groupcounter - 2]['externals']:
+ groupcache[groupcounter - 2]['externals'].append(name)
+ groupcache[groupcounter]['vars'] = copy.deepcopy(
+ groupcache[groupcounter - 2]['vars'])
+ try:
+ del groupcache[groupcounter]['vars'][name][
+ groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
+ except Exception:
+ pass
+ if block in ['function', 'subroutine']: # set global attributes
+ try:
+ groupcache[groupcounter]['vars'][name] = appenddecl(
+ groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars'][''])
+ except Exception:
+ pass
+ if case == 'callfun': # return type
+ if result and result in groupcache[groupcounter]['vars']:
+ if not name == result:
+ groupcache[groupcounter]['vars'][name] = appenddecl(
+ groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result])
+ # if groupcounter>1: # name is interfaced
+ try:
+ groupcache[groupcounter - 2]['interfaced'].append(name)
+ except Exception:
+ pass
+ if block == 'function':
+ t = typespattern[0].match(m.group('before') + ' ' + name)
+ if t:
+ typespec, selector, attr, edecl = cracktypespec0(
+ t.group('this'), t.group('after'))
+ updatevars(typespec, selector, attr, edecl)
+
+ if case in ['call', 'callfun']:
+ grouplist[groupcounter - 1].append(groupcache[groupcounter])
+ grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
+ del grouplist[groupcounter]
+ groupcounter = groupcounter - 1 # end routine
+ grouplist[groupcounter - 1].append(groupcache[groupcounter])
+ grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
+ del grouplist[groupcounter]
+ groupcounter = groupcounter - 1 # end interface
+
+ elif case == 'entry':
+ name, args, result, bind = _resolvenameargspattern(m.group('after'))
+ if name is not None:
+ if args:
+ args = rmbadname([x.strip()
+ for x in markoutercomma(args).split('@,@')])
+ else:
+ args = []
+ assert result is None, repr(result)
+ groupcache[groupcounter]['entry'][name] = args
+ previous_context = ('entry', name, groupcounter)
+ elif case == 'type':
+ typespec, selector, attr, edecl = cracktypespec0(
+ block, m.group('after'))
+ last_name = updatevars(typespec, selector, attr, edecl)
+ if last_name is not None:
+ previous_context = ('variable', last_name, groupcounter)
+ elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrinsic']:
+ edecl = groupcache[groupcounter]['vars']
+ ll = m.group('after').strip()
+ i = ll.find('::')
+ if i < 0 and case == 'intent':
+ i = markouterparen(ll).find('@)@') - 2
+ ll = ll[:i + 1] + '::' + ll[i + 1:]
+ i = ll.find('::')
+ if ll[i:] == '::' and 'args' in groupcache[groupcounter]:
+ outmess('All arguments will have attribute %s%s\n' %
+ (m.group('this'), ll[:i]))
+ ll = ll + ','.join(groupcache[groupcounter]['args'])
+ if i < 0:
+ i = 0
+ pl = ''
+ else:
+ pl = ll[:i].strip()
+ ll = ll[i + 2:]
+ ch = markoutercomma(pl).split('@,@')
+ if len(ch) > 1:
+ pl = ch[0]
+ outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (
+ ','.join(ch[1:])))
+ last_name = None
+
+ for e in [x.strip() for x in markoutercomma(ll).split('@,@')]:
+ m1 = namepattern.match(e)
+ if not m1:
+ if case in ['public', 'private']:
+ k = ''
+ else:
+ print(m.groupdict())
+ outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % (
+ case, repr(e)))
+ continue
+ else:
+ k = rmbadname1(m1.group('name'))
+ if case in ['public', 'private'] and \
+ (k == 'operator' or k == 'assignment'):
+ k += m1.group('after')
+ if k not in edecl:
+ edecl[k] = {}
+ if case == 'dimension':
+ ap = case + m1.group('after')
+ if case == 'intent':
+ ap = m.group('this') + pl
+ if _intentcallbackpattern.match(ap):
+ if k not in groupcache[groupcounter]['args']:
+ if groupcounter > 1:
+ if '__user__' not in groupcache[groupcounter - 2]['name']:
+ outmess(
+ 'analyzeline: missing __user__ module (could be nothing)\n')
+ # fixes ticket 1693
+ if k != groupcache[groupcounter]['name']:
+ outmess('analyzeline: appending intent(callback) %s'
+ ' to %s arguments\n' % (k, groupcache[groupcounter]['name']))
+ groupcache[groupcounter]['args'].append(k)
+ else:
+ errmess(
+ 'analyzeline: intent(callback) %s is ignored\n' % (k))
+ else:
+ errmess('analyzeline: intent(callback) %s is already'
+ ' in argument list\n' % (k))
+ if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']:
+ ap = case
+ if 'attrspec' in edecl[k]:
+ edecl[k]['attrspec'].append(ap)
+ else:
+ edecl[k]['attrspec'] = [ap]
+ if case == 'external':
+ if groupcache[groupcounter]['block'] == 'program':
+ outmess('analyzeline: ignoring program arguments\n')
+ continue
+ if k not in groupcache[groupcounter]['args']:
+ continue
+ if 'externals' not in groupcache[groupcounter]:
+ groupcache[groupcounter]['externals'] = []
+ groupcache[groupcounter]['externals'].append(k)
+ last_name = k
+ groupcache[groupcounter]['vars'] = edecl
+ if last_name is not None:
+ previous_context = ('variable', last_name, groupcounter)
+ elif case == 'moduleprocedure':
+ groupcache[groupcounter]['implementedby'] = \
+ [x.strip() for x in m.group('after').split(',')]
+ elif case == 'parameter':
+ edecl = groupcache[groupcounter]['vars']
+ ll = m.group('after').strip()[1:-1]
+ last_name = None
+ for e in markoutercomma(ll).split('@,@'):
+ try:
+ k, initexpr = [x.strip() for x in e.split('=')]
+ except Exception:
+ outmess(
+ 'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll))
+ continue
+ params = get_parameters(edecl)
+ k = rmbadname1(k)
+ if k not in edecl:
+ edecl[k] = {}
+ if '=' in edecl[k] and (not edecl[k]['='] == initexpr):
+ outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % (
+ k, edecl[k]['='], initexpr))
+ t = determineexprtype(initexpr, params)
+ if t:
+ if t.get('typespec') == 'real':
+ tt = list(initexpr)
+ for m in real16pattern.finditer(initexpr):
+ tt[m.start():m.end()] = list(
+ initexpr[m.start():m.end()].lower().replace('d', 'e'))
+ initexpr = ''.join(tt)
+ elif t.get('typespec') == 'complex':
+ initexpr = initexpr[1:].lower().replace('d', 'e').\
+ replace(',', '+1j*(')
+ try:
+ v = eval(initexpr, {}, params)
+ except (SyntaxError, NameError, TypeError) as msg:
+ errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'
+ % (initexpr, msg))
+ continue
+ edecl[k]['='] = repr(v)
+ if 'attrspec' in edecl[k]:
+ edecl[k]['attrspec'].append('parameter')
+ else:
+ edecl[k]['attrspec'] = ['parameter']
+ last_name = k
+ groupcache[groupcounter]['vars'] = edecl
+ if last_name is not None:
+ previous_context = ('variable', last_name, groupcounter)
+ elif case == 'implicit':
+ if m.group('after').strip().lower() == 'none':
+ groupcache[groupcounter]['implicit'] = None
+ elif m.group('after'):
+ if 'implicit' in groupcache[groupcounter]:
+ impl = groupcache[groupcounter]['implicit']
+ else:
+ impl = {}
+ if impl is None:
+ outmess(
+ 'analyzeline: Overwriting earlier "implicit none" statement.\n')
+ impl = {}
+ for e in markoutercomma(m.group('after')).split('@,@'):
+ decl = {}
+ m1 = re.match(
+ r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z', e, re.I)
+ if not m1:
+ outmess(
+ 'analyzeline: could not extract info of implicit statement part "%s"\n' % (e))
+ continue
+ m2 = typespattern4implicit.match(m1.group('this'))
+ if not m2:
+ outmess(
+ 'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e))
+ continue
+ typespec, selector, attr, edecl = cracktypespec0(
+ m2.group('this'), m2.group('after'))
+ kindselect, charselect, typename = cracktypespec(
+ typespec, selector)
+ decl['typespec'] = typespec
+ decl['kindselector'] = kindselect
+ decl['charselector'] = charselect
+ decl['typename'] = typename
+ for k in list(decl.keys()):
+ if not decl[k]:
+ del decl[k]
+ for r in markoutercomma(m1.group('after')).split('@,@'):
+ if '-' in r:
+ try:
+ begc, endc = [x.strip() for x in r.split('-')]
+ except Exception:
+ outmess(
+ 'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n' % r)
+ continue
+ else:
+ begc = endc = r.strip()
+ if not len(begc) == len(endc) == 1:
+ outmess(
+ 'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n' % r)
+ continue
+ for o in range(ord(begc), ord(endc) + 1):
+ impl[chr(o)] = decl
+ groupcache[groupcounter]['implicit'] = impl
+ elif case == 'data':
+ ll = []
+ dl = ''
+ il = ''
+ f = 0
+ fc = 1
+ inp = 0
+ for c in m.group('after'):
+ if not inp:
+ if c == "'":
+ fc = not fc
+ if c == '/' and fc:
+ f = f + 1
+ continue
+ if c == '(':
+ inp = inp + 1
+ elif c == ')':
+ inp = inp - 1
+ if f == 0:
+ dl = dl + c
+ elif f == 1:
+ il = il + c
+ elif f == 2:
+ dl = dl.strip()
+ if dl.startswith(','):
+ dl = dl[1:].strip()
+ ll.append([dl, il])
+ dl = c
+ il = ''
+ f = 0
+ if f == 2:
+ dl = dl.strip()
+ if dl.startswith(','):
+ dl = dl[1:].strip()
+ ll.append([dl, il])
+ vars = {}
+ if 'vars' in groupcache[groupcounter]:
+ vars = groupcache[groupcounter]['vars']
+ last_name = None
+ for l in ll:
+ l = [x.strip() for x in l]
+ if l[0][0] == ',':
+ l[0] = l[0][1:]
+ if l[0][0] == '(':
+ outmess(
+ 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0])
+ continue
+ i = 0
+ j = 0
+ llen = len(l[1])
+ for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]):
+ if v[0] == '(':
+ outmess(
+ 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v)
+ # XXX: subsequent init expressions may get wrong values.
+ # Ignoring since data statements are irrelevant for
+ # wrapping.
+ continue
+ fc = 0
+ while (i < llen) and (fc or not l[1][i] == ','):
+ if l[1][i] == "'":
+ fc = not fc
+ i = i + 1
+ i = i + 1
+ if v not in vars:
+ vars[v] = {}
+ if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]:
+ outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % (
+ v, vars[v]['='], l[1][j:i - 1]))
+ vars[v]['='] = l[1][j:i - 1]
+ j = i
+ last_name = v
+ groupcache[groupcounter]['vars'] = vars
+ if last_name is not None:
+ previous_context = ('variable', last_name, groupcounter)
+ elif case == 'common':
+ line = m.group('after').strip()
+ if not line[0] == '/':
+ line = '//' + line
+ cl = []
+ f = 0
+ bn = ''
+ ol = ''
+ for c in line:
+ if c == '/':
+ f = f + 1
+ continue
+ if f >= 3:
+ bn = bn.strip()
+ if not bn:
+ bn = '_BLNK_'
+ cl.append([bn, ol])
+ f = f - 2
+ bn = ''
+ ol = ''
+ if f % 2:
+ bn = bn + c
+ else:
+ ol = ol + c
+ bn = bn.strip()
+ if not bn:
+ bn = '_BLNK_'
+ cl.append([bn, ol])
+ commonkey = {}
+ if 'common' in groupcache[groupcounter]:
+ commonkey = groupcache[groupcounter]['common']
+ for c in cl:
+ if c[0] not in commonkey:
+ commonkey[c[0]] = []
+ for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]:
+ if i:
+ commonkey[c[0]].append(i)
+ groupcache[groupcounter]['common'] = commonkey
+ previous_context = ('common', bn, groupcounter)
+ elif case == 'use':
+ m1 = re.match(
+ r'\A\s*(?P<name>\b\w+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z', m.group('after'), re.I)
+ if m1:
+ mm = m1.groupdict()
+ if 'use' not in groupcache[groupcounter]:
+ groupcache[groupcounter]['use'] = {}
+ name = m1.group('name')
+ groupcache[groupcounter]['use'][name] = {}
+ isonly = 0
+ if 'list' in mm and mm['list'] is not None:
+ if 'notonly' in mm and mm['notonly'] is None:
+ isonly = 1
+ groupcache[groupcounter]['use'][name]['only'] = isonly
+ ll = [x.strip() for x in mm['list'].split(',')]
+ rl = {}
+ for l in ll:
+ if '=' in l:
+ m2 = re.match(
+ r'\A\s*(?P<local>\b\w+\b)\s*=\s*>\s*(?P<use>\b\w+\b)\s*\Z', l, re.I)
+ if m2:
+ rl[m2.group('local').strip()] = m2.group(
+ 'use').strip()
+ else:
+ outmess(
+ 'analyzeline: Not local=>use pattern found in %s\n' % repr(l))
+ else:
+ rl[l] = l
+ groupcache[groupcounter]['use'][name]['map'] = rl
+ else:
+ pass
+ else:
+ print(m.groupdict())
+ outmess('analyzeline: Could not crack the use statement.\n')
+ elif case in ['f2pyenhancements']:
+ if 'f2pyenhancements' not in groupcache[groupcounter]:
+ groupcache[groupcounter]['f2pyenhancements'] = {}
+ d = groupcache[groupcounter]['f2pyenhancements']
+ if m.group('this') == 'usercode' and 'usercode' in d:
+ if isinstance(d['usercode'], str):
+ d['usercode'] = [d['usercode']]
+ d['usercode'].append(m.group('after'))
+ else:
+ d[m.group('this')] = m.group('after')
+ elif case == 'multiline':
+ if previous_context is None:
+ if verbose:
+ outmess('analyzeline: No context for multiline block.\n')
+ return
+ gc = groupcounter
+ appendmultiline(groupcache[gc],
+ previous_context[:2],
+ m.group('this'))
+ else:
+ if verbose > 1:
+ print(m.groupdict())
+ outmess('analyzeline: No code implemented for line.\n')
+
+
+def appendmultiline(group, context_name, ml):
+ if 'f2pymultilines' not in group:
+ group['f2pymultilines'] = {}
+ d = group['f2pymultilines']
+ if context_name not in d:
+ d[context_name] = []
+ d[context_name].append(ml)
+ return
+
+
+def cracktypespec0(typespec, ll):
+ selector = None
+ attr = None
+ if re.match(r'double\s*complex', typespec, re.I):
+ typespec = 'double complex'
+ elif re.match(r'double\s*precision', typespec, re.I):
+ typespec = 'double precision'
+ else:
+ typespec = typespec.strip().lower()
+ m1 = selectpattern.match(markouterparen(ll))
+ if not m1:
+ outmess(
+ 'cracktypespec0: no kind/char_selector pattern found for line.\n')
+ return
+ d = m1.groupdict()
+ for k in list(d.keys()):
+ d[k] = unmarkouterparen(d[k])
+ if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']:
+ selector = d['this']
+ ll = d['after']
+ i = ll.find('::')
+ if i >= 0:
+ attr = ll[:i].strip()
+ ll = ll[i + 2:]
+ return typespec, selector, attr, ll
+#####
+namepattern = re.compile(r'\s*(?P<name>\b\w+\b)\s*(?P<after>.*)\s*\Z', re.I)
+kindselector = re.compile(
+ r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|\*\s*(?P<kind2>.*?))\s*\Z', re.I)
+charselector = re.compile(
+ r'\s*(\((?P<lenkind>.*)\)|\*\s*(?P<charlen>.*))\s*\Z', re.I)
+lenkindpattern = re.compile(
+ r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)'
+ r'|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)'
+ r'|(f2py_len\s*=\s*(?P<f2py_len>.*))|))\s*\Z', re.I)
+lenarraypattern = re.compile(
+ r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*\*\s*(?P<len>.*?)|(\*\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z', re.I)
+
+
+def removespaces(expr):
+ expr = expr.strip()
+ if len(expr) <= 1:
+ return expr
+ expr2 = expr[0]
+ for i in range(1, len(expr) - 1):
+ if (expr[i] == ' ' and
+ ((expr[i + 1] in "()[]{}=+-/* ") or
+ (expr[i - 1] in "()[]{}=+-/* "))):
+ continue
+ expr2 = expr2 + expr[i]
+ expr2 = expr2 + expr[-1]
+ return expr2
+
+
+def markinnerspaces(line):
+ """
+ The function replace all spaces in the input variable line which are
+ surrounded with quotation marks, with the triplet "@_@".
+
+ For instance, for the input "a 'b c'" the function returns "a 'b@_@c'"
+
+ Parameters
+ ----------
+ line : str
+
+ Returns
+ -------
+ str
+
+ """
+ fragment = ''
+ inside = False
+ current_quote = None
+ escaped = ''
+ for c in line:
+ if escaped == '\\' and c in ['\\', '\'', '"']:
+ fragment += c
+ escaped = c
+ continue
+ if not inside and c in ['\'', '"']:
+ current_quote = c
+ if c == current_quote:
+ inside = not inside
+ elif c == ' ' and inside:
+ fragment += '@_@'
+ continue
+ fragment += c
+ escaped = c # reset to non-backslash
+ return fragment
+
+
+def updatevars(typespec, selector, attrspec, entitydecl):
+ global groupcache, groupcounter
+
+ last_name = None
+ kindselect, charselect, typename = cracktypespec(typespec, selector)
+ if attrspec:
+ attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')]
+ l = []
+ c = re.compile(r'(?P<start>[a-zA-Z]+)')
+ for a in attrspec:
+ if not a:
+ continue
+ m = c.match(a)
+ if m:
+ s = m.group('start').lower()
+ a = s + a[len(s):]
+ l.append(a)
+ attrspec = l
+ el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')]
+ el1 = []
+ for e in el:
+ for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]:
+ if e1:
+ el1.append(e1.replace('@_@', ' '))
+ for e in el1:
+ m = namepattern.match(e)
+ if not m:
+ outmess(
+ 'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e)))
+ continue
+ ename = rmbadname1(m.group('name'))
+ edecl = {}
+ if ename in groupcache[groupcounter]['vars']:
+ edecl = groupcache[groupcounter]['vars'][ename].copy()
+ not_has_typespec = 'typespec' not in edecl
+ if not_has_typespec:
+ edecl['typespec'] = typespec
+ elif typespec and (not typespec == edecl['typespec']):
+ outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (
+ ename, edecl['typespec'], typespec))
+ if 'kindselector' not in edecl:
+ edecl['kindselector'] = copy.copy(kindselect)
+ elif kindselect:
+ for k in list(kindselect.keys()):
+ if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]):
+ outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
+ k, ename, edecl['kindselector'][k], kindselect[k]))
+ else:
+ edecl['kindselector'][k] = copy.copy(kindselect[k])
+ if 'charselector' not in edecl and charselect:
+ if not_has_typespec:
+ edecl['charselector'] = charselect
+ else:
+ errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n'
+ % (ename, charselect))
+ elif charselect:
+ for k in list(charselect.keys()):
+ if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]):
+ outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
+ k, ename, edecl['charselector'][k], charselect[k]))
+ else:
+ edecl['charselector'][k] = copy.copy(charselect[k])
+ if 'typename' not in edecl:
+ edecl['typename'] = typename
+ elif typename and (not edecl['typename'] == typename):
+ outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (
+ ename, edecl['typename'], typename))
+ if 'attrspec' not in edecl:
+ edecl['attrspec'] = copy.copy(attrspec)
+ elif attrspec:
+ for a in attrspec:
+ if a not in edecl['attrspec']:
+ edecl['attrspec'].append(a)
+ else:
+ edecl['typespec'] = copy.copy(typespec)
+ edecl['kindselector'] = copy.copy(kindselect)
+ edecl['charselector'] = copy.copy(charselect)
+ edecl['typename'] = typename
+ edecl['attrspec'] = copy.copy(attrspec)
+ if 'external' in (edecl.get('attrspec') or []) and e in groupcache[groupcounter]['args']:
+ if 'externals' not in groupcache[groupcounter]:
+ groupcache[groupcounter]['externals'] = []
+ groupcache[groupcounter]['externals'].append(e)
+ if m.group('after'):
+ m1 = lenarraypattern.match(markouterparen(m.group('after')))
+ if m1:
+ d1 = m1.groupdict()
+ for lk in ['len', 'array', 'init']:
+ if d1[lk + '2'] is not None:
+ d1[lk] = d1[lk + '2']
+ del d1[lk + '2']
+ for k in list(d1.keys()):
+ if d1[k] is not None:
+ d1[k] = unmarkouterparen(d1[k])
+ else:
+ del d1[k]
+
+ if 'len' in d1:
+ if typespec in ['complex', 'integer', 'logical', 'real']:
+ if ('kindselector' not in edecl) or (not edecl['kindselector']):
+ edecl['kindselector'] = {}
+ edecl['kindselector']['*'] = d1['len']
+ del d1['len']
+ elif typespec == 'character':
+ if ('charselector' not in edecl) or (not edecl['charselector']):
+ edecl['charselector'] = {}
+ if 'len' in edecl['charselector']:
+ del edecl['charselector']['len']
+ edecl['charselector']['*'] = d1['len']
+ del d1['len']
+
+ if 'init' in d1:
+ if '=' in edecl and (not edecl['='] == d1['init']):
+ outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (
+ ename, edecl['='], d1['init']))
+ else:
+ edecl['='] = d1['init']
+
+ if 'len' in d1 and 'array' in d1:
+ if d1['len'] == '':
+ d1['len'] = d1['array']
+ del d1['array']
+ else:
+ d1['array'] = d1['array'] + ',' + d1['len']
+ del d1['len']
+ errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % (
+ typespec, e, typespec, ename, d1['array']))
+
+ if 'array' in d1:
+ dm = 'dimension(%s)' % d1['array']
+ if 'attrspec' not in edecl or (not edecl['attrspec']):
+ edecl['attrspec'] = [dm]
+ else:
+ edecl['attrspec'].append(dm)
+ for dm1 in edecl['attrspec']:
+ if dm1[:9] == 'dimension' and dm1 != dm:
+ del edecl['attrspec'][-1]
+ errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n'
+ % (ename, dm1, dm))
+ break
+
+ else:
+ outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % (
+ ename + m.group('after')))
+ for k in list(edecl.keys()):
+ if not edecl[k]:
+ del edecl[k]
+ groupcache[groupcounter]['vars'][ename] = edecl
+ if 'varnames' in groupcache[groupcounter]:
+ groupcache[groupcounter]['varnames'].append(ename)
+ last_name = ename
+ return last_name
+
+
+def cracktypespec(typespec, selector):
+ kindselect = None
+ charselect = None
+ typename = None
+ if selector:
+ if typespec in ['complex', 'integer', 'logical', 'real']:
+ kindselect = kindselector.match(selector)
+ if not kindselect:
+ outmess(
+ 'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector)))
+ return
+ kindselect = kindselect.groupdict()
+ kindselect['*'] = kindselect['kind2']
+ del kindselect['kind2']
+ for k in list(kindselect.keys()):
+ if not kindselect[k]:
+ del kindselect[k]
+ for k, i in list(kindselect.items()):
+ kindselect[k] = rmbadname1(i)
+ elif typespec == 'character':
+ charselect = charselector.match(selector)
+ if not charselect:
+ outmess(
+ 'cracktypespec: no charselector pattern found for %s\n' % (repr(selector)))
+ return
+ charselect = charselect.groupdict()
+ charselect['*'] = charselect['charlen']
+ del charselect['charlen']
+ if charselect['lenkind']:
+ lenkind = lenkindpattern.match(
+ markoutercomma(charselect['lenkind']))
+ lenkind = lenkind.groupdict()
+ for lk in ['len', 'kind']:
+ if lenkind[lk + '2']:
+ lenkind[lk] = lenkind[lk + '2']
+ charselect[lk] = lenkind[lk]
+ del lenkind[lk + '2']
+ if lenkind['f2py_len'] is not None:
+ # used to specify the length of assumed length strings
+ charselect['f2py_len'] = lenkind['f2py_len']
+ del charselect['lenkind']
+ for k in list(charselect.keys()):
+ if not charselect[k]:
+ del charselect[k]
+ for k, i in list(charselect.items()):
+ charselect[k] = rmbadname1(i)
+ elif typespec == 'type':
+ typename = re.match(r'\s*\(\s*(?P<name>\w+)\s*\)', selector, re.I)
+ if typename:
+ typename = typename.group('name')
+ else:
+ outmess('cracktypespec: no typename found in %s\n' %
+ (repr(typespec + selector)))
+ else:
+ outmess('cracktypespec: no selector used for %s\n' %
+ (repr(selector)))
+ return kindselect, charselect, typename
+######
+
+
+def setattrspec(decl, attr, force=0):
+ if not decl:
+ decl = {}
+ if not attr:
+ return decl
+ if 'attrspec' not in decl:
+ decl['attrspec'] = [attr]
+ return decl
+ if force:
+ decl['attrspec'].append(attr)
+ if attr in decl['attrspec']:
+ return decl
+ if attr == 'static' and 'automatic' not in decl['attrspec']:
+ decl['attrspec'].append(attr)
+ elif attr == 'automatic' and 'static' not in decl['attrspec']:
+ decl['attrspec'].append(attr)
+ elif attr == 'public':
+ if 'private' not in decl['attrspec']:
+ decl['attrspec'].append(attr)
+ elif attr == 'private':
+ if 'public' not in decl['attrspec']:
+ decl['attrspec'].append(attr)
+ else:
+ decl['attrspec'].append(attr)
+ return decl
+
+
+def setkindselector(decl, sel, force=0):
+ if not decl:
+ decl = {}
+ if not sel:
+ return decl
+ if 'kindselector' not in decl:
+ decl['kindselector'] = sel
+ return decl
+ for k in list(sel.keys()):
+ if force or k not in decl['kindselector']:
+ decl['kindselector'][k] = sel[k]
+ return decl
+
+
+def setcharselector(decl, sel, force=0):
+ if not decl:
+ decl = {}
+ if not sel:
+ return decl
+ if 'charselector' not in decl:
+ decl['charselector'] = sel
+ return decl
+
+ for k in list(sel.keys()):
+ if force or k not in decl['charselector']:
+ decl['charselector'][k] = sel[k]
+ return decl
+
+
+def getblockname(block, unknown='unknown'):
+ if 'name' in block:
+ return block['name']
+ return unknown
+
+# post processing
+
+
+def setmesstext(block):
+ global filepositiontext
+
+ try:
+ filepositiontext = 'In: %s:%s\n' % (block['from'], block['name'])
+ except Exception:
+ pass
+
+
+def get_usedict(block):
+ usedict = {}
+ if 'parent_block' in block:
+ usedict = get_usedict(block['parent_block'])
+ if 'use' in block:
+ usedict.update(block['use'])
+ return usedict
+
+
+def get_useparameters(block, param_map=None):
+ global f90modulevars
+
+ if param_map is None:
+ param_map = {}
+ usedict = get_usedict(block)
+ if not usedict:
+ return param_map
+ for usename, mapping in list(usedict.items()):
+ usename = usename.lower()
+ if usename not in f90modulevars:
+ outmess('get_useparameters: no module %s info used by %s\n' %
+ (usename, block.get('name')))
+ continue
+ mvars = f90modulevars[usename]
+ params = get_parameters(mvars)
+ if not params:
+ continue
+ # XXX: apply mapping
+ if mapping:
+ errmess('get_useparameters: mapping for %s not impl.\n' % (mapping))
+ for k, v in list(params.items()):
+ if k in param_map:
+ outmess('get_useparameters: overriding parameter %s with'
+ ' value from module %s\n' % (repr(k), repr(usename)))
+ param_map[k] = v
+
+ return param_map
+
+
+def postcrack2(block, tab='', param_map=None):
+ global f90modulevars
+
+ if not f90modulevars:
+ return block
+ if isinstance(block, list):
+ ret = [postcrack2(g, tab=tab + '\t', param_map=param_map)
+ for g in block]
+ return ret
+ setmesstext(block)
+ outmess('%sBlock: %s\n' % (tab, block['name']), 0)
+
+ if param_map is None:
+ param_map = get_useparameters(block)
+
+ if param_map is not None and 'vars' in block:
+ vars = block['vars']
+ for n in list(vars.keys()):
+ var = vars[n]
+ if 'kindselector' in var:
+ kind = var['kindselector']
+ if 'kind' in kind:
+ val = kind['kind']
+ if val in param_map:
+ kind['kind'] = param_map[val]
+ new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map)
+ for b in block['body']]
+ block['body'] = new_body
+
+ return block
+
+
+def postcrack(block, args=None, tab=''):
+ """
+ TODO:
+ function return values
+ determine expression types if in argument list
+ """
+ global usermodules, onlyfunctions
+
+ if isinstance(block, list):
+ gret = []
+ uret = []
+ for g in block:
+ setmesstext(g)
+ g = postcrack(g, tab=tab + '\t')
+ # sort user routines to appear first
+ if 'name' in g and '__user__' in g['name']:
+ uret.append(g)
+ else:
+ gret.append(g)
+ return uret + gret
+ setmesstext(block)
+ if not isinstance(block, dict) and 'block' not in block:
+ raise Exception('postcrack: Expected block dictionary instead of ' +
+ str(block))
+ if 'name' in block and not block['name'] == 'unknown_interface':
+ outmess('%sBlock: %s\n' % (tab, block['name']), 0)
+ block = analyzeargs(block)
+ block = analyzecommon(block)
+ block['vars'] = analyzevars(block)
+ block['sortvars'] = sortvarnames(block['vars'])
+ if 'args' in block and block['args']:
+ args = block['args']
+ block['body'] = analyzebody(block, args, tab=tab)
+
+ userisdefined = []
+ if 'use' in block:
+ useblock = block['use']
+ for k in list(useblock.keys()):
+ if '__user__' in k:
+ userisdefined.append(k)
+ else:
+ useblock = {}
+ name = ''
+ if 'name' in block:
+ name = block['name']
+ # and not userisdefined: # Build a __user__ module
+ if 'externals' in block and block['externals']:
+ interfaced = []
+ if 'interfaced' in block:
+ interfaced = block['interfaced']
+ mvars = copy.copy(block['vars'])
+ if name:
+ mname = name + '__user__routines'
+ else:
+ mname = 'unknown__user__routines'
+ if mname in userisdefined:
+ i = 1
+ while '%s_%i' % (mname, i) in userisdefined:
+ i = i + 1
+ mname = '%s_%i' % (mname, i)
+ interface = {'block': 'interface', 'body': [],
+ 'vars': {}, 'name': name + '_user_interface'}
+ for e in block['externals']:
+ if e in interfaced:
+ edef = []
+ j = -1
+ for b in block['body']:
+ j = j + 1
+ if b['block'] == 'interface':
+ i = -1
+ for bb in b['body']:
+ i = i + 1
+ if 'name' in bb and bb['name'] == e:
+ edef = copy.copy(bb)
+ del b['body'][i]
+ break
+ if edef:
+ if not b['body']:
+ del block['body'][j]
+ del interfaced[interfaced.index(e)]
+ break
+ interface['body'].append(edef)
+ else:
+ if e in mvars and not isexternal(mvars[e]):
+ interface['vars'][e] = mvars[e]
+ if interface['vars'] or interface['body']:
+ block['interfaced'] = interfaced
+ mblock = {'block': 'python module', 'body': [
+ interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']}
+ useblock[mname] = {}
+ usermodules.append(mblock)
+ if useblock:
+ block['use'] = useblock
+ return block
+
+
+def sortvarnames(vars):
+ indep = []
+ dep = []
+ for v in list(vars.keys()):
+ if 'depend' in vars[v] and vars[v]['depend']:
+ dep.append(v)
+ else:
+ indep.append(v)
+ n = len(dep)
+ i = 0
+ while dep: # XXX: How to catch dependence cycles correctly?
+ v = dep[0]
+ fl = 0
+ for w in dep[1:]:
+ if w in vars[v]['depend']:
+ fl = 1
+ break
+ if fl:
+ dep = dep[1:] + [v]
+ i = i + 1
+ if i > n:
+ errmess('sortvarnames: failed to compute dependencies because'
+ ' of cyclic dependencies between '
+ + ', '.join(dep) + '\n')
+ indep = indep + dep
+ break
+ else:
+ indep.append(v)
+ dep = dep[1:]
+ n = len(dep)
+ i = 0
+ return indep
+
+
+def analyzecommon(block):
+ if not hascommon(block):
+ return block
+ commonvars = []
+ for k in list(block['common'].keys()):
+ comvars = []
+ for e in block['common'][k]:
+ m = re.match(
+ r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z', e, re.I)
+ if m:
+ dims = []
+ if m.group('dims'):
+ dims = [x.strip()
+ for x in markoutercomma(m.group('dims')).split('@,@')]
+ n = rmbadname1(m.group('name').strip())
+ if n in block['vars']:
+ if 'attrspec' in block['vars'][n]:
+ block['vars'][n]['attrspec'].append(
+ 'dimension(%s)' % (','.join(dims)))
+ else:
+ block['vars'][n]['attrspec'] = [
+ 'dimension(%s)' % (','.join(dims))]
+ else:
+ if dims:
+ block['vars'][n] = {
+ 'attrspec': ['dimension(%s)' % (','.join(dims))]}
+ else:
+ block['vars'][n] = {}
+ if n not in commonvars:
+ commonvars.append(n)
+ else:
+ n = e
+ errmess(
+ 'analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n' % (e, k))
+ comvars.append(n)
+ block['common'][k] = comvars
+ if 'commonvars' not in block:
+ block['commonvars'] = commonvars
+ else:
+ block['commonvars'] = block['commonvars'] + commonvars
+ return block
+
+
+def analyzebody(block, args, tab=''):
+ global usermodules, skipfuncs, onlyfuncs, f90modulevars
+
+ setmesstext(block)
+ body = []
+ for b in block['body']:
+ b['parent_block'] = block
+ if b['block'] in ['function', 'subroutine']:
+ if args is not None and b['name'] not in args:
+ continue
+ else:
+ as_ = b['args']
+ if b['name'] in skipfuncs:
+ continue
+ if onlyfuncs and b['name'] not in onlyfuncs:
+ continue
+ b['saved_interface'] = crack2fortrangen(
+ b, '\n' + ' ' * 6, as_interface=True)
+
+ else:
+ as_ = args
+ b = postcrack(b, as_, tab=tab + '\t')
+ if b['block'] in ['interface', 'abstract interface'] and \
+ not b['body'] and not b.get('implementedby'):
+ if 'f2pyenhancements' not in b:
+ continue
+ if b['block'].replace(' ', '') == 'pythonmodule':
+ usermodules.append(b)
+ else:
+ if b['block'] == 'module':
+ f90modulevars[b['name']] = b['vars']
+ body.append(b)
+ return body
+
+
+def buildimplicitrules(block):
+ setmesstext(block)
+ implicitrules = defaultimplicitrules
+ attrrules = {}
+ if 'implicit' in block:
+ if block['implicit'] is None:
+ implicitrules = None
+ if verbose > 1:
+ outmess(
+ 'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name']))
+ else:
+ for k in list(block['implicit'].keys()):
+ if block['implicit'][k].get('typespec') not in ['static', 'automatic']:
+ implicitrules[k] = block['implicit'][k]
+ else:
+ attrrules[k] = block['implicit'][k]['typespec']
+ return implicitrules, attrrules
+
+
+def myeval(e, g=None, l=None):
+ """ Like `eval` but returns only integers and floats """
+ r = eval(e, g, l)
+ if type(r) in [int, float]:
+ return r
+ raise ValueError('r=%r' % (r))
+
+getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I)
+
+
+def getlincoef(e, xset): # e = a*x+b ; x in xset
+ """
+ Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in
+ xset.
+
+ >>> getlincoef('2*x + 1', {'x'})
+ (2, 1, 'x')
+ >>> getlincoef('3*x + x*2 + 2 + 1', {'x'})
+ (5, 3, 'x')
+ >>> getlincoef('0', {'x'})
+ (0, 0, None)
+ >>> getlincoef('0*x', {'x'})
+ (0, 0, 'x')
+ >>> getlincoef('x*x', {'x'})
+ (None, None, None)
+
+ This can be tricked by sufficiently complex expressions
+
+ >>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'})
+ (2.0, 3.0, 'x')
+ """
+ try:
+ c = int(myeval(e, {}, {}))
+ return 0, c, None
+ except Exception:
+ pass
+ if getlincoef_re_1.match(e):
+ return 1, 0, e
+ len_e = len(e)
+ for x in xset:
+ if len(x) > len_e:
+ continue
+ if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e):
+ # skip function calls having x as an argument, e.g max(1, x)
+ continue
+ re_1 = re.compile(r'(?P<before>.*?)\b' + x + r'\b(?P<after>.*)', re.I)
+ m = re_1.match(e)
+ if m:
+ try:
+ m1 = re_1.match(e)
+ while m1:
+ ee = '%s(%s)%s' % (
+ m1.group('before'), 0, m1.group('after'))
+ m1 = re_1.match(ee)
+ b = myeval(ee, {}, {})
+ m1 = re_1.match(e)
+ while m1:
+ ee = '%s(%s)%s' % (
+ m1.group('before'), 1, m1.group('after'))
+ m1 = re_1.match(ee)
+ a = myeval(ee, {}, {}) - b
+ m1 = re_1.match(e)
+ while m1:
+ ee = '%s(%s)%s' % (
+ m1.group('before'), 0.5, m1.group('after'))
+ m1 = re_1.match(ee)
+ c = myeval(ee, {}, {})
+ # computing another point to be sure that expression is linear
+ m1 = re_1.match(e)
+ while m1:
+ ee = '%s(%s)%s' % (
+ m1.group('before'), 1.5, m1.group('after'))
+ m1 = re_1.match(ee)
+ c2 = myeval(ee, {}, {})
+ if (a * 0.5 + b == c and a * 1.5 + b == c2):
+ return a, b, x
+ except Exception:
+ pass
+ break
+ return None, None, None
+
+
+word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I)
+
+
+def _get_depend_dict(name, vars, deps):
+ if name in vars:
+ words = vars[name].get('depend', [])
+
+ if '=' in vars[name] and not isstring(vars[name]):
+ for word in word_pattern.findall(vars[name]['=']):
+ # The word_pattern may return values that are not
+ # only variables, they can be string content for instance
+ if word not in words and word in vars and word != name:
+ words.append(word)
+ for word in words[:]:
+ for w in deps.get(word, []) \
+ or _get_depend_dict(word, vars, deps):
+ if w not in words:
+ words.append(w)
+ else:
+ outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name)))
+ words = []
+ deps[name] = words
+ return words
+
+
+def _calc_depend_dict(vars):
+ names = list(vars.keys())
+ depend_dict = {}
+ for n in names:
+ _get_depend_dict(n, vars, depend_dict)
+ return depend_dict
+
+
+def get_sorted_names(vars):
+ """
+ """
+ depend_dict = _calc_depend_dict(vars)
+ names = []
+ for name in list(depend_dict.keys()):
+ if not depend_dict[name]:
+ names.append(name)
+ del depend_dict[name]
+ while depend_dict:
+ for name, lst in list(depend_dict.items()):
+ new_lst = [n for n in lst if n in depend_dict]
+ if not new_lst:
+ names.append(name)
+ del depend_dict[name]
+ else:
+ depend_dict[name] = new_lst
+ return [name for name in names if name in vars]
+
+
+def _kind_func(string):
+ # XXX: return something sensible.
+ if string[0] in "'\"":
+ string = string[1:-1]
+ if real16pattern.match(string):
+ return 8
+ elif real8pattern.match(string):
+ return 4
+ return 'kind(' + string + ')'
+
+
+def _selected_int_kind_func(r):
+ # XXX: This should be processor dependent
+ m = 10 ** r
+ if m <= 2 ** 8:
+ return 1
+ if m <= 2 ** 16:
+ return 2
+ if m <= 2 ** 32:
+ return 4
+ if m <= 2 ** 63:
+ return 8
+ if m <= 2 ** 128:
+ return 16
+ return -1
+
+
+def _selected_real_kind_func(p, r=0, radix=0):
+ # XXX: This should be processor dependent
+ # This is only good for 0 <= p <= 20
+ if p < 7:
+ return 4
+ if p < 16:
+ return 8
+ machine = platform.machine().lower()
+ if machine.startswith(('aarch64', 'power', 'ppc', 'riscv', 's390x', 'sparc')):
+ if p <= 20:
+ return 16
+ else:
+ if p < 19:
+ return 10
+ elif p <= 20:
+ return 16
+ return -1
+
+
+def get_parameters(vars, global_params={}):
+ params = copy.copy(global_params)
+ g_params = copy.copy(global_params)
+ for name, func in [('kind', _kind_func),
+ ('selected_int_kind', _selected_int_kind_func),
+ ('selected_real_kind', _selected_real_kind_func), ]:
+ if name not in g_params:
+ g_params[name] = func
+ param_names = []
+ for n in get_sorted_names(vars):
+ if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']:
+ param_names.append(n)
+ kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)', re.I)
+ selected_int_kind_re = re.compile(
+ r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
+ selected_kind_re = re.compile(
+ r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
+ for n in param_names:
+ if '=' in vars[n]:
+ v = vars[n]['=']
+ if islogical(vars[n]):
+ v = v.lower()
+ for repl in [
+ ('.false.', 'False'),
+ ('.true.', 'True'),
+ # TODO: test .eq., .neq., etc replacements.
+ ]:
+ v = v.replace(*repl)
+ v = kind_re.sub(r'kind("\1")', v)
+ v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v)
+
+ # We need to act according to the data.
+ # The easy case is if the data has a kind-specifier,
+ # then we may easily remove those specifiers.
+ # However, it may be that the user uses other specifiers...(!)
+ is_replaced = False
+ if 'kindselector' in vars[n]:
+ if 'kind' in vars[n]['kindselector']:
+ orig_v_len = len(v)
+ v = v.replace('_' + vars[n]['kindselector']['kind'], '')
+ # Again, this will be true if even a single specifier
+ # has been replaced, see comment above.
+ is_replaced = len(v) < orig_v_len
+
+ if not is_replaced:
+ if not selected_kind_re.match(v):
+ v_ = v.split('_')
+ # In case there are additive parameters
+ if len(v_) > 1:
+ v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '')
+
+ # Currently this will not work for complex numbers.
+ # There is missing code for extracting a complex number,
+ # which may be defined in either of these:
+ # a) (Re, Im)
+ # b) cmplx(Re, Im)
+ # c) dcmplx(Re, Im)
+ # d) cmplx(Re, Im, <prec>)
+
+ if isdouble(vars[n]):
+ tt = list(v)
+ for m in real16pattern.finditer(v):
+ tt[m.start():m.end()] = list(
+ v[m.start():m.end()].lower().replace('d', 'e'))
+ v = ''.join(tt)
+
+ elif iscomplex(vars[n]):
+ outmess(f'get_parameters[TODO]: '
+ f'implement evaluation of complex expression {v}\n')
+
+ # Handle _dp for gh-6624
+ # Also fixes gh-20460
+ if real16pattern.search(v):
+ v = 8
+ elif real8pattern.search(v):
+ v = 4
+ try:
+ params[n] = eval(v, g_params, params)
+
+ except Exception as msg:
+ params[n] = v
+ outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v)))
+ if isstring(vars[n]) and isinstance(params[n], int):
+ params[n] = chr(params[n])
+ nl = n.lower()
+ if nl != n:
+ params[nl] = params[n]
+ else:
+ print(vars[n])
+ outmess(
+ 'get_parameters:parameter %s does not have value?!\n' % (repr(n)))
+ return params
+
+
+def _eval_length(length, params):
+ if length in ['(:)', '(*)', '*']:
+ return '(*)'
+ return _eval_scalar(length, params)
+
+_is_kind_number = re.compile(r'\d+_').match
+
+
+def _eval_scalar(value, params):
+ if _is_kind_number(value):
+ value = value.split('_')[0]
+ try:
+ # TODO: use symbolic from PR #19805
+ value = eval(value, {}, params)
+ value = (repr if isinstance(value, str) else str)(value)
+ except (NameError, SyntaxError, TypeError):
+ return value
+ except Exception as msg:
+ errmess('"%s" in evaluating %r '
+ '(available names: %s)\n'
+ % (msg, value, list(params.keys())))
+ return value
+
+
+def analyzevars(block):
+ global f90modulevars
+
+ setmesstext(block)
+ implicitrules, attrrules = buildimplicitrules(block)
+ vars = copy.copy(block['vars'])
+ if block['block'] == 'function' and block['name'] not in vars:
+ vars[block['name']] = {}
+ if '' in block['vars']:
+ del vars['']
+ if 'attrspec' in block['vars']['']:
+ gen = block['vars']['']['attrspec']
+ for n in set(vars) | set(b['name'] for b in block['body']):
+ for k in ['public', 'private']:
+ if k in gen:
+ vars[n] = setattrspec(vars.get(n, {}), k)
+ svars = []
+ args = block['args']
+ for a in args:
+ try:
+ vars[a]
+ svars.append(a)
+ except KeyError:
+ pass
+ for n in list(vars.keys()):
+ if n not in args:
+ svars.append(n)
+
+ params = get_parameters(vars, get_useparameters(block))
+
+ dep_matches = {}
+ name_match = re.compile(r'[A-Za-z][\w$]*').match
+ for v in list(vars.keys()):
+ m = name_match(v)
+ if m:
+ n = v[m.start():m.end()]
+ try:
+ dep_matches[n]
+ except KeyError:
+ dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match
+ for n in svars:
+ if n[0] in list(attrrules.keys()):
+ vars[n] = setattrspec(vars[n], attrrules[n[0]])
+ if 'typespec' not in vars[n]:
+ if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
+ if implicitrules:
+ ln0 = n[0].lower()
+ for k in list(implicitrules[ln0].keys()):
+ if k == 'typespec' and implicitrules[ln0][k] == 'undefined':
+ continue
+ if k not in vars[n]:
+ vars[n][k] = implicitrules[ln0][k]
+ elif k == 'attrspec':
+ for l in implicitrules[ln0][k]:
+ vars[n] = setattrspec(vars[n], l)
+ elif n in block['args']:
+ outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % (
+ repr(n), block['name']))
+ if 'charselector' in vars[n]:
+ if 'len' in vars[n]['charselector']:
+ l = vars[n]['charselector']['len']
+ try:
+ l = str(eval(l, {}, params))
+ except Exception:
+ pass
+ vars[n]['charselector']['len'] = l
+
+ if 'kindselector' in vars[n]:
+ if 'kind' in vars[n]['kindselector']:
+ l = vars[n]['kindselector']['kind']
+ try:
+ l = str(eval(l, {}, params))
+ except Exception:
+ pass
+ vars[n]['kindselector']['kind'] = l
+
+ dimension_exprs = {}
+ if 'attrspec' in vars[n]:
+ attr = vars[n]['attrspec']
+ attr.reverse()
+ vars[n]['attrspec'] = []
+ dim, intent, depend, check, note = None, None, None, None, None
+ for a in attr:
+ if a[:9] == 'dimension':
+ dim = (a[9:].strip())[1:-1]
+ elif a[:6] == 'intent':
+ intent = (a[6:].strip())[1:-1]
+ elif a[:6] == 'depend':
+ depend = (a[6:].strip())[1:-1]
+ elif a[:5] == 'check':
+ check = (a[5:].strip())[1:-1]
+ elif a[:4] == 'note':
+ note = (a[4:].strip())[1:-1]
+ else:
+ vars[n] = setattrspec(vars[n], a)
+ if intent:
+ if 'intent' not in vars[n]:
+ vars[n]['intent'] = []
+ for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
+ # Remove spaces so that 'in out' becomes 'inout'
+ tmp = c.replace(' ', '')
+ if tmp not in vars[n]['intent']:
+ vars[n]['intent'].append(tmp)
+ intent = None
+ if note:
+ note = note.replace('\\n\\n', '\n\n')
+ note = note.replace('\\n ', '\n')
+ if 'note' not in vars[n]:
+ vars[n]['note'] = [note]
+ else:
+ vars[n]['note'].append(note)
+ note = None
+ if depend is not None:
+ if 'depend' not in vars[n]:
+ vars[n]['depend'] = []
+ for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]):
+ if c not in vars[n]['depend']:
+ vars[n]['depend'].append(c)
+ depend = None
+ if check is not None:
+ if 'check' not in vars[n]:
+ vars[n]['check'] = []
+ for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
+ if c not in vars[n]['check']:
+ vars[n]['check'].append(c)
+ check = None
+ if dim and 'dimension' not in vars[n]:
+ vars[n]['dimension'] = []
+ for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]):
+ star = ':' if d == ':' else '*'
+ # Evaluate `d` with respect to params
+ if d in params:
+ d = str(params[d])
+ for p in params:
+ re_1 = re.compile(r'(?P<before>.*?)\b' + p + r'\b(?P<after>.*)', re.I)
+ m = re_1.match(d)
+ while m:
+ d = m.group('before') + \
+ str(params[p]) + m.group('after')
+ m = re_1.match(d)
+
+ if d == star:
+ dl = [star]
+ else:
+ dl = markoutercomma(d, ':').split('@:@')
+ if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*)
+ dl = ['*']
+ d = '*'
+ if len(dl) == 1 and dl[0] != star:
+ dl = ['1', dl[0]]
+ if len(dl) == 2:
+ d1, d2 = map(symbolic.Expr.parse, dl)
+ dsize = d2 - d1 + 1
+ d = dsize.tostring(language=symbolic.Language.C)
+ # find variables v that define d as a linear
+ # function, `d == a * v + b`, and store
+ # coefficients a and b for further analysis.
+ solver_and_deps = {}
+ for v in block['vars']:
+ s = symbolic.as_symbol(v)
+ if dsize.contains(s):
+ try:
+ a, b = dsize.linear_solve(s)
+
+ def solve_v(s, a=a, b=b):
+ return (s - b) / a
+
+ all_symbols = set(a.symbols())
+ all_symbols.update(b.symbols())
+ except RuntimeError as msg:
+ # d is not a linear function of v,
+ # however, if v can be determined
+ # from d using other means,
+ # implement the corresponding
+ # solve_v function here.
+ solve_v = None
+ all_symbols = set(dsize.symbols())
+ v_deps = set(
+ s.data for s in all_symbols
+ if s.data in vars)
+ solver_and_deps[v] = solve_v, list(v_deps)
+ # Note that dsize may contain symbols that are
+ # not defined in block['vars']. Here we assume
+ # these correspond to Fortran/C intrinsic
+ # functions or that are defined by other
+ # means. We'll let the compiler validate the
+ # definiteness of such symbols.
+ dimension_exprs[d] = solver_and_deps
+ vars[n]['dimension'].append(d)
+
+ if 'check' not in vars[n] and 'args' in block and n in block['args']:
+ # n is an argument that has no checks defined. Here we
+ # generate some consistency checks for n, and when n is an
+ # array, generate checks for its dimensions and construct
+ # initialization expressions.
+ n_deps = vars[n].get('depend', [])
+ n_checks = []
+ n_is_input = l_or(isintent_in, isintent_inout,
+ isintent_inplace)(vars[n])
+ if isarray(vars[n]): # n is array
+ for i, d in enumerate(vars[n]['dimension']):
+ coeffs_and_deps = dimension_exprs.get(d)
+ if coeffs_and_deps is None:
+ # d is `:` or `*` or a constant expression
+ pass
+ elif n_is_input:
+ # n is an input array argument and its shape
+ # may define variables used in dimension
+ # specifications.
+ for v, (solver, deps) in coeffs_and_deps.items():
+ def compute_deps(v, deps):
+ for v1 in coeffs_and_deps.get(v, [None, []])[1]:
+ if v1 not in deps:
+ deps.add(v1)
+ compute_deps(v1, deps)
+ all_deps = set()
+ compute_deps(v, all_deps)
+ if ((v in n_deps
+ or '=' in vars[v]
+ or 'depend' in vars[v])):
+ # Skip a variable that
+ # - n depends on
+ # - has user-defined initialization expression
+ # - has user-defined dependencies
+ continue
+ if solver is not None and v not in all_deps:
+ # v can be solved from d, hence, we
+ # make it an optional argument with
+ # initialization expression:
+ is_required = False
+ init = solver(symbolic.as_symbol(
+ f'shape({n}, {i})'))
+ init = init.tostring(
+ language=symbolic.Language.C)
+ vars[v]['='] = init
+ # n needs to be initialized before v. So,
+ # making v dependent on n and on any
+ # variables in solver or d.
+ vars[v]['depend'] = [n] + deps
+ if 'check' not in vars[v]:
+ # add check only when no
+ # user-specified checks exist
+ vars[v]['check'] = [
+ f'shape({n}, {i}) == {d}']
+ else:
+ # d is a non-linear function on v,
+ # hence, v must be a required input
+ # argument that n will depend on
+ is_required = True
+ if 'intent' not in vars[v]:
+ vars[v]['intent'] = []
+ if 'in' not in vars[v]['intent']:
+ vars[v]['intent'].append('in')
+ # v needs to be initialized before n
+ n_deps.append(v)
+ n_checks.append(
+ f'shape({n}, {i}) == {d}')
+ v_attr = vars[v].get('attrspec', [])
+ if not ('optional' in v_attr
+ or 'required' in v_attr):
+ v_attr.append(
+ 'required' if is_required else 'optional')
+ if v_attr:
+ vars[v]['attrspec'] = v_attr
+ if coeffs_and_deps is not None:
+ # extend v dependencies with ones specified in attrspec
+ for v, (solver, deps) in coeffs_and_deps.items():
+ v_deps = vars[v].get('depend', [])
+ for aa in vars[v].get('attrspec', []):
+ if aa.startswith('depend'):
+ aa = ''.join(aa.split())
+ v_deps.extend(aa[7:-1].split(','))
+ if v_deps:
+ vars[v]['depend'] = list(set(v_deps))
+ if n not in v_deps:
+ n_deps.append(v)
+ elif isstring(vars[n]):
+ if 'charselector' in vars[n]:
+ if '*' in vars[n]['charselector']:
+ length = _eval_length(vars[n]['charselector']['*'],
+ params)
+ vars[n]['charselector']['*'] = length
+ elif 'len' in vars[n]['charselector']:
+ length = _eval_length(vars[n]['charselector']['len'],
+ params)
+ del vars[n]['charselector']['len']
+ vars[n]['charselector']['*'] = length
+ if n_checks:
+ vars[n]['check'] = n_checks
+ if n_deps:
+ vars[n]['depend'] = list(set(n_deps))
+
+ if '=' in vars[n]:
+ if 'attrspec' not in vars[n]:
+ vars[n]['attrspec'] = []
+ if ('optional' not in vars[n]['attrspec']) and \
+ ('required' not in vars[n]['attrspec']):
+ vars[n]['attrspec'].append('optional')
+ if 'depend' not in vars[n]:
+ vars[n]['depend'] = []
+ for v, m in list(dep_matches.items()):
+ if m(vars[n]['=']):
+ vars[n]['depend'].append(v)
+ if not vars[n]['depend']:
+ del vars[n]['depend']
+ if isscalar(vars[n]):
+ vars[n]['='] = _eval_scalar(vars[n]['='], params)
+
+ for n in list(vars.keys()):
+ if n == block['name']: # n is block name
+ if 'note' in vars[n]:
+ block['note'] = vars[n]['note']
+ if block['block'] == 'function':
+ if 'result' in block and block['result'] in vars:
+ vars[n] = appenddecl(vars[n], vars[block['result']])
+ if 'prefix' in block:
+ pr = block['prefix']
+ pr1 = pr.replace('pure', '')
+ ispure = (not pr == pr1)
+ pr = pr1.replace('recursive', '')
+ isrec = (not pr == pr1)
+ m = typespattern[0].match(pr)
+ if m:
+ typespec, selector, attr, edecl = cracktypespec0(
+ m.group('this'), m.group('after'))
+ kindselect, charselect, typename = cracktypespec(
+ typespec, selector)
+ vars[n]['typespec'] = typespec
+ if kindselect:
+ if 'kind' in kindselect:
+ try:
+ kindselect['kind'] = eval(
+ kindselect['kind'], {}, params)
+ except Exception:
+ pass
+ vars[n]['kindselector'] = kindselect
+ if charselect:
+ vars[n]['charselector'] = charselect
+ if typename:
+ vars[n]['typename'] = typename
+ if ispure:
+ vars[n] = setattrspec(vars[n], 'pure')
+ if isrec:
+ vars[n] = setattrspec(vars[n], 'recursive')
+ else:
+ outmess(
+ 'analyzevars: prefix (%s) were not used\n' % repr(block['prefix']))
+ if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']:
+ if 'commonvars' in block:
+ neededvars = copy.copy(block['args'] + block['commonvars'])
+ else:
+ neededvars = copy.copy(block['args'])
+ for n in list(vars.keys()):
+ if l_or(isintent_callback, isintent_aux)(vars[n]):
+ neededvars.append(n)
+ if 'entry' in block:
+ neededvars.extend(list(block['entry'].keys()))
+ for k in list(block['entry'].keys()):
+ for n in block['entry'][k]:
+ if n not in neededvars:
+ neededvars.append(n)
+ if block['block'] == 'function':
+ if 'result' in block:
+ neededvars.append(block['result'])
+ else:
+ neededvars.append(block['name'])
+ if block['block'] in ['subroutine', 'function']:
+ name = block['name']
+ if name in vars and 'intent' in vars[name]:
+ block['intent'] = vars[name]['intent']
+ if block['block'] == 'type':
+ neededvars.extend(list(vars.keys()))
+ for n in list(vars.keys()):
+ if n not in neededvars:
+ del vars[n]
+ return vars
+
+analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I)
+
+
+def expr2name(a, block, args=[]):
+ orig_a = a
+ a_is_expr = not analyzeargs_re_1.match(a)
+ if a_is_expr: # `a` is an expression
+ implicitrules, attrrules = buildimplicitrules(block)
+ at = determineexprtype(a, block['vars'], implicitrules)
+ na = 'e_'
+ for c in a:
+ c = c.lower()
+ if c not in string.ascii_lowercase + string.digits:
+ c = '_'
+ na = na + c
+ if na[-1] == '_':
+ na = na + 'e'
+ else:
+ na = na + '_e'
+ a = na
+ while a in block['vars'] or a in block['args']:
+ a = a + 'r'
+ if a in args:
+ k = 1
+ while a + str(k) in args:
+ k = k + 1
+ a = a + str(k)
+ if a_is_expr:
+ block['vars'][a] = at
+ else:
+ if a not in block['vars']:
+ if orig_a in block['vars']:
+ block['vars'][a] = block['vars'][orig_a]
+ else:
+ block['vars'][a] = {}
+ if 'externals' in block and orig_a in block['externals'] + block['interfaced']:
+ block['vars'][a] = setattrspec(block['vars'][a], 'external')
+ return a
+
+
+def analyzeargs(block):
+ setmesstext(block)
+ implicitrules, _ = buildimplicitrules(block)
+ if 'args' not in block:
+ block['args'] = []
+ args = []
+ for a in block['args']:
+ a = expr2name(a, block, args)
+ args.append(a)
+ block['args'] = args
+ if 'entry' in block:
+ for k, args1 in list(block['entry'].items()):
+ for a in args1:
+ if a not in block['vars']:
+ block['vars'][a] = {}
+
+ for b in block['body']:
+ if b['name'] in args:
+ if 'externals' not in block:
+ block['externals'] = []
+ if b['name'] not in block['externals']:
+ block['externals'].append(b['name'])
+ if 'result' in block and block['result'] not in block['vars']:
+ block['vars'][block['result']] = {}
+ return block
+
+determineexprtype_re_1 = re.compile(r'\A\(.+?,.+?\)\Z', re.I)
+determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P<name>\w+)|)\Z', re.I)
+determineexprtype_re_3 = re.compile(
+ r'\A[+-]?[\d.]+[-\d+de.]*(_(?P<name>\w+)|)\Z', re.I)
+determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I)
+determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z', re.I)
+
+
+def _ensure_exprdict(r):
+ if isinstance(r, int):
+ return {'typespec': 'integer'}
+ if isinstance(r, float):
+ return {'typespec': 'real'}
+ if isinstance(r, complex):
+ return {'typespec': 'complex'}
+ if isinstance(r, dict):
+ return r
+ raise AssertionError(repr(r))
+
+
+def determineexprtype(expr, vars, rules={}):
+ if expr in vars:
+ return _ensure_exprdict(vars[expr])
+ expr = expr.strip()
+ if determineexprtype_re_1.match(expr):
+ return {'typespec': 'complex'}
+ m = determineexprtype_re_2.match(expr)
+ if m:
+ if 'name' in m.groupdict() and m.group('name'):
+ outmess(
+ 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
+ return {'typespec': 'integer'}
+ m = determineexprtype_re_3.match(expr)
+ if m:
+ if 'name' in m.groupdict() and m.group('name'):
+ outmess(
+ 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
+ return {'typespec': 'real'}
+ for op in ['+', '-', '*', '/']:
+ for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]:
+ if e in vars:
+ return _ensure_exprdict(vars[e])
+ t = {}
+ if determineexprtype_re_4.match(expr): # in parenthesis
+ t = determineexprtype(expr[1:-1], vars, rules)
+ else:
+ m = determineexprtype_re_5.match(expr)
+ if m:
+ rn = m.group('name')
+ t = determineexprtype(m.group('name'), vars, rules)
+ if t and 'attrspec' in t:
+ del t['attrspec']
+ if not t:
+ if rn[0] in rules:
+ return _ensure_exprdict(rules[rn[0]])
+ if expr[0] in '\'"':
+ return {'typespec': 'character', 'charselector': {'*': '*'}}
+ if not t:
+ outmess(
+ 'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr)))
+ return t
+
+######
+
+
+def crack2fortrangen(block, tab='\n', as_interface=False):
+ global skipfuncs, onlyfuncs
+
+ setmesstext(block)
+ ret = ''
+ if isinstance(block, list):
+ for g in block:
+ if g and g['block'] in ['function', 'subroutine']:
+ if g['name'] in skipfuncs:
+ continue
+ if onlyfuncs and g['name'] not in onlyfuncs:
+ continue
+ ret = ret + crack2fortrangen(g, tab, as_interface=as_interface)
+ return ret
+ prefix = ''
+ name = ''
+ args = ''
+ blocktype = block['block']
+ if blocktype == 'program':
+ return ''
+ argsl = []
+ if 'name' in block:
+ name = block['name']
+ if 'args' in block:
+ vars = block['vars']
+ for a in block['args']:
+ a = expr2name(a, block, argsl)
+ if not isintent_callback(vars[a]):
+ argsl.append(a)
+ if block['block'] == 'function' or argsl:
+ args = '(%s)' % ','.join(argsl)
+ f2pyenhancements = ''
+ if 'f2pyenhancements' in block:
+ for k in list(block['f2pyenhancements'].keys()):
+ f2pyenhancements = '%s%s%s %s' % (
+ f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k])
+ intent_lst = block.get('intent', [])[:]
+ if blocktype == 'function' and 'callback' in intent_lst:
+ intent_lst.remove('callback')
+ if intent_lst:
+ f2pyenhancements = '%s%sintent(%s) %s' %\
+ (f2pyenhancements, tab + tabchar,
+ ','.join(intent_lst), name)
+ use = ''
+ if 'use' in block:
+ use = use2fortran(block['use'], tab + tabchar)
+ common = ''
+ if 'common' in block:
+ common = common2fortran(block['common'], tab + tabchar)
+ if name == 'unknown_interface':
+ name = ''
+ result = ''
+ if 'result' in block:
+ result = ' result (%s)' % block['result']
+ if block['result'] not in argsl:
+ argsl.append(block['result'])
+ body = crack2fortrangen(block['body'], tab + tabchar, as_interface=as_interface)
+ vars = vars2fortran(
+ block, block['vars'], argsl, tab + tabchar, as_interface=as_interface)
+ mess = ''
+ if 'from' in block and not as_interface:
+ mess = '! in %s' % block['from']
+ if 'entry' in block:
+ entry_stmts = ''
+ for k, i in list(block['entry'].items()):
+ entry_stmts = '%s%sentry %s(%s)' \
+ % (entry_stmts, tab + tabchar, k, ','.join(i))
+ body = body + entry_stmts
+ if blocktype == 'block data' and name == '_BLOCK_DATA_':
+ name = ''
+ ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % (
+ tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name)
+ return ret
+
+
+def common2fortran(common, tab=''):
+ ret = ''
+ for k in list(common.keys()):
+ if k == '_BLNK_':
+ ret = '%s%scommon %s' % (ret, tab, ','.join(common[k]))
+ else:
+ ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k]))
+ return ret
+
+
+def use2fortran(use, tab=''):
+ ret = ''
+ for m in list(use.keys()):
+ ret = '%s%suse %s,' % (ret, tab, m)
+ if use[m] == {}:
+ if ret and ret[-1] == ',':
+ ret = ret[:-1]
+ continue
+ if 'only' in use[m] and use[m]['only']:
+ ret = '%s only:' % (ret)
+ if 'map' in use[m] and use[m]['map']:
+ c = ' '
+ for k in list(use[m]['map'].keys()):
+ if k == use[m]['map'][k]:
+ ret = '%s%s%s' % (ret, c, k)
+ c = ','
+ else:
+ ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k])
+ c = ','
+ if ret and ret[-1] == ',':
+ ret = ret[:-1]
+ return ret
+
+
+def true_intent_list(var):
+ lst = var['intent']
+ ret = []
+ for intent in lst:
+ try:
+ f = globals()['isintent_%s' % intent]
+ except KeyError:
+ pass
+ else:
+ if f(var):
+ ret.append(intent)
+ return ret
+
+
+def vars2fortran(block, vars, args, tab='', as_interface=False):
+ """
+ TODO:
+ public sub
+ ...
+ """
+ setmesstext(block)
+ ret = ''
+ nout = []
+ for a in args:
+ if a in block['vars']:
+ nout.append(a)
+ if 'commonvars' in block:
+ for a in block['commonvars']:
+ if a in vars:
+ if a not in nout:
+ nout.append(a)
+ else:
+ errmess(
+ 'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a)
+ if 'varnames' in block:
+ nout.extend(block['varnames'])
+ if not as_interface:
+ for a in list(vars.keys()):
+ if a not in nout:
+ nout.append(a)
+ for a in nout:
+ if 'depend' in vars[a]:
+ for d in vars[a]['depend']:
+ if d in vars and 'depend' in vars[d] and a in vars[d]['depend']:
+ errmess(
+ 'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d))
+ if 'externals' in block and a in block['externals']:
+ if isintent_callback(vars[a]):
+ ret = '%s%sintent(callback) %s' % (ret, tab, a)
+ ret = '%s%sexternal %s' % (ret, tab, a)
+ if isoptional(vars[a]):
+ ret = '%s%soptional %s' % (ret, tab, a)
+ if a in vars and 'typespec' not in vars[a]:
+ continue
+ cont = 1
+ for b in block['body']:
+ if a == b['name'] and b['block'] == 'function':
+ cont = 0
+ break
+ if cont:
+ continue
+ if a not in vars:
+ show(vars)
+ outmess('vars2fortran: No definition for argument "%s".\n' % a)
+ continue
+ if a == block['name']:
+ if block['block'] != 'function' or block.get('result'):
+ # 1) skip declaring a variable that name matches with
+ # subroutine name
+ # 2) skip declaring function when its type is
+ # declared via `result` construction
+ continue
+ if 'typespec' not in vars[a]:
+ if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']:
+ if a in args:
+ ret = '%s%sexternal %s' % (ret, tab, a)
+ continue
+ show(vars[a])
+ outmess('vars2fortran: No typespec for argument "%s".\n' % a)
+ continue
+ vardef = vars[a]['typespec']
+ if vardef == 'type' and 'typename' in vars[a]:
+ vardef = '%s(%s)' % (vardef, vars[a]['typename'])
+ selector = {}
+ if 'kindselector' in vars[a]:
+ selector = vars[a]['kindselector']
+ elif 'charselector' in vars[a]:
+ selector = vars[a]['charselector']
+ if '*' in selector:
+ if selector['*'] in ['*', ':']:
+ vardef = '%s*(%s)' % (vardef, selector['*'])
+ else:
+ vardef = '%s*%s' % (vardef, selector['*'])
+ else:
+ if 'len' in selector:
+ vardef = '%s(len=%s' % (vardef, selector['len'])
+ if 'kind' in selector:
+ vardef = '%s,kind=%s)' % (vardef, selector['kind'])
+ else:
+ vardef = '%s)' % (vardef)
+ elif 'kind' in selector:
+ vardef = '%s(kind=%s)' % (vardef, selector['kind'])
+ c = ' '
+ if 'attrspec' in vars[a]:
+ attr = [l for l in vars[a]['attrspec']
+ if l not in ['external']]
+ if as_interface and 'intent(in)' in attr and 'intent(out)' in attr:
+ # In Fortran, intent(in, out) are conflicting while
+ # intent(in, out) can be specified only via
+ # `!f2py intent(out) ..`.
+ # So, for the Fortran interface, we'll drop
+ # intent(out) to resolve the conflict.
+ attr.remove('intent(out)')
+ if attr:
+ vardef = '%s, %s' % (vardef, ','.join(attr))
+ c = ','
+ if 'dimension' in vars[a]:
+ vardef = '%s%sdimension(%s)' % (
+ vardef, c, ','.join(vars[a]['dimension']))
+ c = ','
+ if 'intent' in vars[a]:
+ lst = true_intent_list(vars[a])
+ if lst:
+ vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst))
+ c = ','
+ if 'check' in vars[a]:
+ vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check']))
+ c = ','
+ if 'depend' in vars[a]:
+ vardef = '%s%sdepend(%s)' % (
+ vardef, c, ','.join(vars[a]['depend']))
+ c = ','
+ if '=' in vars[a]:
+ v = vars[a]['=']
+ if vars[a]['typespec'] in ['complex', 'double complex']:
+ try:
+ v = eval(v)
+ v = '(%s,%s)' % (v.real, v.imag)
+ except Exception:
+ pass
+ vardef = '%s :: %s=%s' % (vardef, a, v)
+ else:
+ vardef = '%s :: %s' % (vardef, a)
+ ret = '%s%s%s' % (ret, tab, vardef)
+ return ret
+######
+
+
+# We expose post_processing_hooks as global variable so that
+# user-libraries could register their own hooks to f2py.
+post_processing_hooks = []
+
+
+def crackfortran(files):
+ global usermodules, post_processing_hooks
+
+ outmess('Reading fortran codes...\n', 0)
+ readfortrancode(files, crackline)
+ outmess('Post-processing...\n', 0)
+ usermodules = []
+ postlist = postcrack(grouplist[0])
+ outmess('Applying post-processing hooks...\n', 0)
+ for hook in post_processing_hooks:
+ outmess(f' {hook.__name__}\n', 0)
+ postlist = traverse(postlist, hook)
+ outmess('Post-processing (stage 2)...\n', 0)
+ postlist = postcrack2(postlist)
+ return usermodules + postlist
+
+
+def crack2fortran(block):
+ global f2py_version
+
+ pyf = crack2fortrangen(block) + '\n'
+ header = """! -*- f90 -*-
+! Note: the context of this file is case sensitive.
+"""
+ footer = """
+! This file was auto-generated with f2py (version:%s).
+! See:
+! https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e
+""" % (f2py_version)
+ return header + pyf + footer
+
+
+def _is_visit_pair(obj):
+ return (isinstance(obj, tuple)
+ and len(obj) == 2
+ and isinstance(obj[0], (int, str)))
+
+
+def traverse(obj, visit, parents=[], result=None, *args, **kwargs):
+ '''Traverse f2py data structure with the following visit function:
+
+ def visit(item, parents, result, *args, **kwargs):
+ """
+
+ parents is a list of key-"f2py data structure" pairs from which
+ items are taken from.
+
+ result is a f2py data structure that is filled with the
+ return value of the visit function.
+
+ item is 2-tuple (index, value) if parents[-1][1] is a list
+ item is 2-tuple (key, value) if parents[-1][1] is a dict
+
+ The return value of visit must be None, or of the same kind as
+ item, that is, if parents[-1] is a list, the return value must
+ be 2-tuple (new_index, new_value), or if parents[-1] is a
+ dict, the return value must be 2-tuple (new_key, new_value).
+
+ If new_index or new_value is None, the return value of visit
+ is ignored, that is, it will not be added to the result.
+
+ If the return value is None, the content of obj will be
+ traversed, otherwise not.
+ """
+ '''
+
+ if _is_visit_pair(obj):
+ if obj[0] == 'parent_block':
+ # avoid infinite recursion
+ return obj
+ new_result = visit(obj, parents, result, *args, **kwargs)
+ if new_result is not None:
+ assert _is_visit_pair(new_result)
+ return new_result
+ parent = obj
+ result_key, obj = obj
+ else:
+ parent = (None, obj)
+ result_key = None
+
+ if isinstance(obj, list):
+ new_result = []
+ for index, value in enumerate(obj):
+ new_index, new_item = traverse((index, value), visit,
+ parents=parents + [parent],
+ result=result, *args, **kwargs)
+ if new_index is not None:
+ new_result.append(new_item)
+ elif isinstance(obj, dict):
+ new_result = dict()
+ for key, value in obj.items():
+ new_key, new_value = traverse((key, value), visit,
+ parents=parents + [parent],
+ result=result, *args, **kwargs)
+ if new_key is not None:
+ new_result[new_key] = new_value
+ else:
+ new_result = obj
+
+ if result_key is None:
+ return new_result
+ return result_key, new_result
+
+
+def character_backward_compatibility_hook(item, parents, result,
+ *args, **kwargs):
+ """Previously, Fortran character was incorrectly treated as
+ character*1. This hook fixes the usage of the corresponding
+ variables in `check`, `dimension`, `=`, and `callstatement`
+ expressions.
+
+ The usage of `char*` in `callprotoargument` expression can be left
+ unchanged because C `character` is C typedef of `char`, although,
+ new implementations should use `character*` in the corresponding
+ expressions.
+
+ See https://github.com/numpy/numpy/pull/19388 for more information.
+
+ """
+ parent_key, parent_value = parents[-1]
+ key, value = item
+
+ def fix_usage(varname, value):
+ value = re.sub(r'[*]\s*\b' + varname + r'\b', varname, value)
+ value = re.sub(r'\b' + varname + r'\b\s*[\[]\s*0\s*[\]]',
+ varname, value)
+ return value
+
+ if parent_key in ['dimension', 'check']:
+ assert parents[-3][0] == 'vars'
+ vars_dict = parents[-3][1]
+ elif key == '=':
+ assert parents[-2][0] == 'vars'
+ vars_dict = parents[-2][1]
+ else:
+ vars_dict = None
+
+ new_value = None
+ if vars_dict is not None:
+ new_value = value
+ for varname, vd in vars_dict.items():
+ if ischaracter(vd):
+ new_value = fix_usage(varname, new_value)
+ elif key == 'callstatement':
+ vars_dict = parents[-2][1]['vars']
+ new_value = value
+ for varname, vd in vars_dict.items():
+ if ischaracter(vd):
+ # replace all occurrences of `<varname>` with
+ # `&<varname>` in argument passing
+ new_value = re.sub(
+ r'(?<![&])\b' + varname + r'\b', '&' + varname, new_value)
+
+ if new_value is not None:
+ if new_value != value:
+ # We report the replacements here so that downstream
+ # software could update their source codes
+ # accordingly. However, such updates are recommended only
+ # when BC with numpy 1.21 or older is not required.
+ outmess(f'character_bc_hook[{parent_key}.{key}]:'
+ f' replaced `{value}` -> `{new_value}`\n', 1)
+ return (key, new_value)
+
+
+post_processing_hooks.append(character_backward_compatibility_hook)
+
+
+if __name__ == "__main__":
+ files = []
+ funcs = []
+ f = 1
+ f2 = 0
+ f3 = 0
+ showblocklist = 0
+ for l in sys.argv[1:]:
+ if l == '':
+ pass
+ elif l[0] == ':':
+ f = 0
+ elif l == '-quiet':
+ quiet = 1
+ verbose = 0
+ elif l == '-verbose':
+ verbose = 2
+ quiet = 0
+ elif l == '-fix':
+ if strictf77:
+ outmess(
+ 'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0)
+ skipemptyends = 1
+ sourcecodeform = 'fix'
+ elif l == '-skipemptyends':
+ skipemptyends = 1
+ elif l == '--ignore-contains':
+ ignorecontains = 1
+ elif l == '-f77':
+ strictf77 = 1
+ sourcecodeform = 'fix'
+ elif l == '-f90':
+ strictf77 = 0
+ sourcecodeform = 'free'
+ skipemptyends = 1
+ elif l == '-h':
+ f2 = 1
+ elif l == '-show':
+ showblocklist = 1
+ elif l == '-m':
+ f3 = 1
+ elif l[0] == '-':
+ errmess('Unknown option %s\n' % repr(l))
+ elif f2:
+ f2 = 0
+ pyffilename = l
+ elif f3:
+ f3 = 0
+ f77modulename = l
+ elif f:
+ try:
+ open(l).close()
+ files.append(l)
+ except OSError as detail:
+ errmess(f'OSError: {detail!s}\n')
+ else:
+ funcs.append(l)
+ if not strictf77 and f77modulename and not skipemptyends:
+ outmess("""\
+ Warning: You have specified module name for non Fortran 77 code that
+ should not need one (expect if you are scanning F90 code for non
+ module blocks but then you should use flag -skipemptyends and also
+ be sure that the files do not contain programs without program
+ statement).
+""", 0)
+
+ postlist = crackfortran(files)
+ if pyffilename:
+ outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0)
+ pyf = crack2fortran(postlist)
+ with open(pyffilename, 'w') as f:
+ f.write(pyf)
+ if showblocklist:
+ show(postlist)
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/diagnose.py b/venv/lib/python3.9/site-packages/numpy/f2py/diagnose.py
new file mode 100644
index 00000000..21ee399f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/diagnose.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python3
+import os
+import sys
+import tempfile
+
+
+def run_command(cmd):
+ print('Running %r:' % (cmd))
+ os.system(cmd)
+ print('------')
+
+
+def run():
+ _path = os.getcwd()
+ os.chdir(tempfile.gettempdir())
+ print('------')
+ print('os.name=%r' % (os.name))
+ print('------')
+ print('sys.platform=%r' % (sys.platform))
+ print('------')
+ print('sys.version:')
+ print(sys.version)
+ print('------')
+ print('sys.prefix:')
+ print(sys.prefix)
+ print('------')
+ print('sys.path=%r' % (':'.join(sys.path)))
+ print('------')
+
+ try:
+ import numpy
+ has_newnumpy = 1
+ except ImportError:
+ print('Failed to import new numpy:', sys.exc_info()[1])
+ has_newnumpy = 0
+
+ try:
+ from numpy.f2py import f2py2e
+ has_f2py2e = 1
+ except ImportError:
+ print('Failed to import f2py2e:', sys.exc_info()[1])
+ has_f2py2e = 0
+
+ try:
+ import numpy.distutils
+ has_numpy_distutils = 2
+ except ImportError:
+ try:
+ import numpy_distutils
+ has_numpy_distutils = 1
+ except ImportError:
+ print('Failed to import numpy_distutils:', sys.exc_info()[1])
+ has_numpy_distutils = 0
+
+ if has_newnumpy:
+ try:
+ print('Found new numpy version %r in %s' %
+ (numpy.__version__, numpy.__file__))
+ except Exception as msg:
+ print('error:', msg)
+ print('------')
+
+ if has_f2py2e:
+ try:
+ print('Found f2py2e version %r in %s' %
+ (f2py2e.__version__.version, f2py2e.__file__))
+ except Exception as msg:
+ print('error:', msg)
+ print('------')
+
+ if has_numpy_distutils:
+ try:
+ if has_numpy_distutils == 2:
+ print('Found numpy.distutils version %r in %r' % (
+ numpy.distutils.__version__,
+ numpy.distutils.__file__))
+ else:
+ print('Found numpy_distutils version %r in %r' % (
+ numpy_distutils.numpy_distutils_version.numpy_distutils_version,
+ numpy_distutils.__file__))
+ print('------')
+ except Exception as msg:
+ print('error:', msg)
+ print('------')
+ try:
+ if has_numpy_distutils == 1:
+ print(
+ 'Importing numpy_distutils.command.build_flib ...', end=' ')
+ import numpy_distutils.command.build_flib as build_flib
+ print('ok')
+ print('------')
+ try:
+ print(
+ 'Checking availability of supported Fortran compilers:')
+ for compiler_class in build_flib.all_compilers:
+ compiler_class(verbose=1).is_available()
+ print('------')
+ except Exception as msg:
+ print('error:', msg)
+ print('------')
+ except Exception as msg:
+ print(
+ 'error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)')
+ print('------')
+ try:
+ if has_numpy_distutils == 2:
+ print('Importing numpy.distutils.fcompiler ...', end=' ')
+ import numpy.distutils.fcompiler as fcompiler
+ else:
+ print('Importing numpy_distutils.fcompiler ...', end=' ')
+ import numpy_distutils.fcompiler as fcompiler
+ print('ok')
+ print('------')
+ try:
+ print('Checking availability of supported Fortran compilers:')
+ fcompiler.show_fcompilers()
+ print('------')
+ except Exception as msg:
+ print('error:', msg)
+ print('------')
+ except Exception as msg:
+ print('error:', msg)
+ print('------')
+ try:
+ if has_numpy_distutils == 2:
+ print('Importing numpy.distutils.cpuinfo ...', end=' ')
+ from numpy.distutils.cpuinfo import cpuinfo
+ print('ok')
+ print('------')
+ else:
+ try:
+ print(
+ 'Importing numpy_distutils.command.cpuinfo ...', end=' ')
+ from numpy_distutils.command.cpuinfo import cpuinfo
+ print('ok')
+ print('------')
+ except Exception as msg:
+ print('error:', msg, '(ignore it)')
+ print('Importing numpy_distutils.cpuinfo ...', end=' ')
+ from numpy_distutils.cpuinfo import cpuinfo
+ print('ok')
+ print('------')
+ cpu = cpuinfo()
+ print('CPU information:', end=' ')
+ for name in dir(cpuinfo):
+ if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])():
+ print(name[1:], end=' ')
+ print('------')
+ except Exception as msg:
+ print('error:', msg)
+ print('------')
+ os.chdir(_path)
+if __name__ == "__main__":
+ run()
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/f2py2e.py b/venv/lib/python3.9/site-packages/numpy/f2py/f2py2e.py
new file mode 100644
index 00000000..10508488
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/f2py2e.py
@@ -0,0 +1,704 @@
+#!/usr/bin/env python3
+"""
+
+f2py2e - Fortran to Python C/API generator. 2nd Edition.
+ See __usage__ below.
+
+Copyright 1999--2011 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@cens.ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/05/06 08:31:19 $
+Pearu Peterson
+
+"""
+import sys
+import os
+import pprint
+import re
+from pathlib import Path
+
+from . import crackfortran
+from . import rules
+from . import cb_rules
+from . import auxfuncs
+from . import cfuncs
+from . import f90mod_rules
+from . import __version__
+from . import capi_maps
+
+f2py_version = __version__.version
+numpy_version = __version__.version
+errmess = sys.stderr.write
+# outmess=sys.stdout.write
+show = pprint.pprint
+outmess = auxfuncs.outmess
+
+__usage__ =\
+f"""Usage:
+
+1) To construct extension module sources:
+
+ f2py [<options>] <fortran files> [[[only:]||[skip:]] \\
+ <fortran functions> ] \\
+ [: <fortran files> ...]
+
+2) To compile fortran files and build extension modules:
+
+ f2py -c [<options>, <build_flib options>, <extra options>] <fortran files>
+
+3) To generate signature files:
+
+ f2py -h <filename.pyf> ...< same options as in (1) >
+
+Description: This program generates a Python C/API file (<modulename>module.c)
+ that contains wrappers for given fortran functions so that they
+ can be called from Python. With the -c option the corresponding
+ extension modules are built.
+
+Options:
+
+ --2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT]
+ --2d-numeric Use f2py2e tool with Numeric support.
+ --2d-numarray Use f2py2e tool with Numarray support.
+ --g3-numpy Use 3rd generation f2py from the separate f2py package.
+ [NOT AVAILABLE YET]
+
+ -h <filename> Write signatures of the fortran routines to file <filename>
+ and exit. You can then edit <filename> and use it instead
+ of <fortran files>. If <filename>==stdout then the
+ signatures are printed to stdout.
+ <fortran functions> Names of fortran routines for which Python C/API
+ functions will be generated. Default is all that are found
+ in <fortran files>.
+ <fortran files> Paths to fortran/signature files that will be scanned for
+ <fortran functions> in order to determine their signatures.
+ skip: Ignore fortran functions that follow until `:'.
+ only: Use only fortran functions that follow until `:'.
+ : Get back to <fortran files> mode.
+
+ -m <modulename> Name of the module; f2py generates a Python/C API
+ file <modulename>module.c or extension module <modulename>.
+ Default is 'untitled'.
+
+ '-include<header>' Writes additional headers in the C wrapper, can be passed
+ multiple times, generates #include <header> each time.
+
+ --[no-]lower Do [not] lower the cases in <fortran files>. By default,
+ --lower is assumed with -h key, and --no-lower without -h key.
+
+ --build-dir <dirname> All f2py generated files are created in <dirname>.
+ Default is tempfile.mkdtemp().
+
+ --overwrite-signature Overwrite existing signature file.
+
+ --[no-]latex-doc Create (or not) <modulename>module.tex.
+ Default is --no-latex-doc.
+ --short-latex Create 'incomplete' LaTeX document (without commands
+ \\documentclass, \\tableofcontents, and \\begin{{document}},
+ \\end{{document}}).
+
+ --[no-]rest-doc Create (or not) <modulename>module.rst.
+ Default is --no-rest-doc.
+
+ --debug-capi Create C/API code that reports the state of the wrappers
+ during runtime. Useful for debugging.
+
+ --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77
+ functions. --wrap-functions is default because it ensures
+ maximum portability/compiler independence.
+
+ --include-paths <path1>:<path2>:... Search include files from the given
+ directories.
+
+ --help-link [..] List system resources found by system_info.py. See also
+ --link-<resource> switch below. [..] is optional list
+ of resources names. E.g. try 'f2py --help-link lapack_opt'.
+
+ --f2cmap <filename> Load Fortran-to-Python KIND specification from the given
+ file. Default: .f2py_f2cmap in current directory.
+
+ --quiet Run quietly.
+ --verbose Run with extra verbosity.
+ --skip-empty-wrappers Only generate wrapper files when needed.
+ -v Print f2py version ID and exit.
+
+
+numpy.distutils options (only effective with -c):
+
+ --fcompiler= Specify Fortran compiler type by vendor
+ --compiler= Specify C compiler type (as defined by distutils)
+
+ --help-fcompiler List available Fortran compilers and exit
+ --f77exec= Specify the path to F77 compiler
+ --f90exec= Specify the path to F90 compiler
+ --f77flags= Specify F77 compiler flags
+ --f90flags= Specify F90 compiler flags
+ --opt= Specify optimization flags
+ --arch= Specify architecture specific optimization flags
+ --noopt Compile without optimization
+ --noarch Compile without arch-dependent optimization
+ --debug Compile with debugging information
+
+Extra options (only effective with -c):
+
+ --link-<resource> Link extension module with <resource> as defined
+ by numpy.distutils/system_info.py. E.g. to link
+ with optimized LAPACK libraries (vecLib on MacOSX,
+ ATLAS elsewhere), use --link-lapack_opt.
+ See also --help-link switch.
+
+ -L/path/to/lib/ -l<libname>
+ -D<define> -U<name>
+ -I/path/to/include/
+ <filename>.o <filename>.so <filename>.a
+
+ Using the following macros may be required with non-gcc Fortran
+ compilers:
+ -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN
+ -DUNDERSCORE_G77
+
+ When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY
+ interface is printed out at exit (platforms: Linux).
+
+ When using -DF2PY_REPORT_ON_ARRAY_COPY=<int>, a message is
+ sent to stderr whenever F2PY interface makes a copy of an
+ array. Integer <int> sets the threshold for array sizes when
+ a message should be shown.
+
+Version: {f2py_version}
+numpy Version: {numpy_version}
+Requires: Python 3.5 or higher.
+License: NumPy license (see LICENSE.txt in the NumPy source code)
+Copyright 1999 - 2011 Pearu Peterson all rights reserved.
+https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e"""
+
+
+def scaninputline(inputline):
+ files, skipfuncs, onlyfuncs, debug = [], [], [], []
+ f, f2, f3, f5, f6, f7, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0, 0
+ verbose = 1
+ emptygen = True
+ dolc = -1
+ dolatexdoc = 0
+ dorestdoc = 0
+ wrapfuncs = 1
+ buildpath = '.'
+ include_paths = []
+ signsfile, modulename = None, None
+ options = {'buildpath': buildpath,
+ 'coutput': None,
+ 'f2py_wrapper_output': None}
+ for l in inputline:
+ if l == '':
+ pass
+ elif l == 'only:':
+ f = 0
+ elif l == 'skip:':
+ f = -1
+ elif l == ':':
+ f = 1
+ elif l[:8] == '--debug-':
+ debug.append(l[8:])
+ elif l == '--lower':
+ dolc = 1
+ elif l == '--build-dir':
+ f6 = 1
+ elif l == '--no-lower':
+ dolc = 0
+ elif l == '--quiet':
+ verbose = 0
+ elif l == '--verbose':
+ verbose += 1
+ elif l == '--latex-doc':
+ dolatexdoc = 1
+ elif l == '--no-latex-doc':
+ dolatexdoc = 0
+ elif l == '--rest-doc':
+ dorestdoc = 1
+ elif l == '--no-rest-doc':
+ dorestdoc = 0
+ elif l == '--wrap-functions':
+ wrapfuncs = 1
+ elif l == '--no-wrap-functions':
+ wrapfuncs = 0
+ elif l == '--short-latex':
+ options['shortlatex'] = 1
+ elif l == '--coutput':
+ f8 = 1
+ elif l == '--f2py-wrapper-output':
+ f9 = 1
+ elif l == '--f2cmap':
+ f10 = 1
+ elif l == '--overwrite-signature':
+ options['h-overwrite'] = 1
+ elif l == '-h':
+ f2 = 1
+ elif l == '-m':
+ f3 = 1
+ elif l[:2] == '-v':
+ print(f2py_version)
+ sys.exit()
+ elif l == '--show-compilers':
+ f5 = 1
+ elif l[:8] == '-include':
+ cfuncs.outneeds['userincludes'].append(l[9:-1])
+ cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:]
+ elif l[:15] in '--include_paths':
+ outmess(
+ 'f2py option --include_paths is deprecated, use --include-paths instead.\n')
+ f7 = 1
+ elif l[:15] in '--include-paths':
+ f7 = 1
+ elif l == '--skip-empty-wrappers':
+ emptygen = False
+ elif l[0] == '-':
+ errmess('Unknown option %s\n' % repr(l))
+ sys.exit()
+ elif f2:
+ f2 = 0
+ signsfile = l
+ elif f3:
+ f3 = 0
+ modulename = l
+ elif f6:
+ f6 = 0
+ buildpath = l
+ elif f7:
+ f7 = 0
+ include_paths.extend(l.split(os.pathsep))
+ elif f8:
+ f8 = 0
+ options["coutput"] = l
+ elif f9:
+ f9 = 0
+ options["f2py_wrapper_output"] = l
+ elif f10:
+ f10 = 0
+ options["f2cmap_file"] = l
+ elif f == 1:
+ try:
+ with open(l):
+ pass
+ files.append(l)
+ except OSError as detail:
+ errmess(f'OSError: {detail!s}. Skipping file "{l!s}".\n')
+ elif f == -1:
+ skipfuncs.append(l)
+ elif f == 0:
+ onlyfuncs.append(l)
+ if not f5 and not files and not modulename:
+ print(__usage__)
+ sys.exit()
+ if not os.path.isdir(buildpath):
+ if not verbose:
+ outmess('Creating build directory %s\n' % (buildpath))
+ os.mkdir(buildpath)
+ if signsfile:
+ signsfile = os.path.join(buildpath, signsfile)
+ if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options:
+ errmess(
+ 'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile))
+ sys.exit()
+
+ options['emptygen'] = emptygen
+ options['debug'] = debug
+ options['verbose'] = verbose
+ if dolc == -1 and not signsfile:
+ options['do-lower'] = 0
+ else:
+ options['do-lower'] = dolc
+ if modulename:
+ options['module'] = modulename
+ if signsfile:
+ options['signsfile'] = signsfile
+ if onlyfuncs:
+ options['onlyfuncs'] = onlyfuncs
+ if skipfuncs:
+ options['skipfuncs'] = skipfuncs
+ options['dolatexdoc'] = dolatexdoc
+ options['dorestdoc'] = dorestdoc
+ options['wrapfuncs'] = wrapfuncs
+ options['buildpath'] = buildpath
+ options['include_paths'] = include_paths
+ options.setdefault('f2cmap_file', None)
+ return files, options
+
+
+def callcrackfortran(files, options):
+ rules.options = options
+ crackfortran.debug = options['debug']
+ crackfortran.verbose = options['verbose']
+ if 'module' in options:
+ crackfortran.f77modulename = options['module']
+ if 'skipfuncs' in options:
+ crackfortran.skipfuncs = options['skipfuncs']
+ if 'onlyfuncs' in options:
+ crackfortran.onlyfuncs = options['onlyfuncs']
+ crackfortran.include_paths[:] = options['include_paths']
+ crackfortran.dolowercase = options['do-lower']
+ postlist = crackfortran.crackfortran(files)
+ if 'signsfile' in options:
+ outmess('Saving signatures to file "%s"\n' % (options['signsfile']))
+ pyf = crackfortran.crack2fortran(postlist)
+ if options['signsfile'][-6:] == 'stdout':
+ sys.stdout.write(pyf)
+ else:
+ with open(options['signsfile'], 'w') as f:
+ f.write(pyf)
+ if options["coutput"] is None:
+ for mod in postlist:
+ mod["coutput"] = "%smodule.c" % mod["name"]
+ else:
+ for mod in postlist:
+ mod["coutput"] = options["coutput"]
+ if options["f2py_wrapper_output"] is None:
+ for mod in postlist:
+ mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"]
+ else:
+ for mod in postlist:
+ mod["f2py_wrapper_output"] = options["f2py_wrapper_output"]
+ return postlist
+
+
+def buildmodules(lst):
+ cfuncs.buildcfuncs()
+ outmess('Building modules...\n')
+ modules, mnames, isusedby = [], [], {}
+ for item in lst:
+ if '__user__' in item['name']:
+ cb_rules.buildcallbacks(item)
+ else:
+ if 'use' in item:
+ for u in item['use'].keys():
+ if u not in isusedby:
+ isusedby[u] = []
+ isusedby[u].append(item['name'])
+ modules.append(item)
+ mnames.append(item['name'])
+ ret = {}
+ for module, name in zip(modules, mnames):
+ if name in isusedby:
+ outmess('\tSkipping module "%s" which is used by %s.\n' % (
+ name, ','.join('"%s"' % s for s in isusedby[name])))
+ else:
+ um = []
+ if 'use' in module:
+ for u in module['use'].keys():
+ if u in isusedby and u in mnames:
+ um.append(modules[mnames.index(u)])
+ else:
+ outmess(
+ f'\tModule "{name}" uses nonexisting "{u}" '
+ 'which will be ignored.\n')
+ ret[name] = {}
+ dict_append(ret[name], rules.buildmodule(module, um))
+ return ret
+
+
+def dict_append(d_out, d_in):
+ for (k, v) in d_in.items():
+ if k not in d_out:
+ d_out[k] = []
+ if isinstance(v, list):
+ d_out[k] = d_out[k] + v
+ else:
+ d_out[k].append(v)
+
+
+def run_main(comline_list):
+ """
+ Equivalent to running::
+
+ f2py <args>
+
+ where ``<args>=string.join(<list>,' ')``, but in Python. Unless
+ ``-h`` is used, this function returns a dictionary containing
+ information on generated modules and their dependencies on source
+ files.
+
+ You cannot build extension modules with this function, that is,
+ using ``-c`` is not allowed. Use the ``compile`` command instead.
+
+ Examples
+ --------
+ The command ``f2py -m scalar scalar.f`` can be executed from Python as
+ follows.
+
+ .. literalinclude:: ../../source/f2py/code/results/run_main_session.dat
+ :language: python
+
+ """
+ crackfortran.reset_global_f2py_vars()
+ f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__))
+ fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h')
+ fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c')
+ files, options = scaninputline(comline_list)
+ auxfuncs.options = options
+ capi_maps.load_f2cmap_file(options['f2cmap_file'])
+ postlist = callcrackfortran(files, options)
+ isusedby = {}
+ for plist in postlist:
+ if 'use' in plist:
+ for u in plist['use'].keys():
+ if u not in isusedby:
+ isusedby[u] = []
+ isusedby[u].append(plist['name'])
+ for plist in postlist:
+ if plist['block'] == 'python module' and '__user__' in plist['name']:
+ if plist['name'] in isusedby:
+ # if not quiet:
+ outmess(
+ f'Skipping Makefile build for module "{plist["name"]}" '
+ 'which is used by {}\n'.format(
+ ','.join(f'"{s}"' for s in isusedby[plist['name']])))
+ if 'signsfile' in options:
+ if options['verbose'] > 1:
+ outmess(
+ 'Stopping. Edit the signature file and then run f2py on the signature file: ')
+ outmess('%s %s\n' %
+ (os.path.basename(sys.argv[0]), options['signsfile']))
+ return
+ for plist in postlist:
+ if plist['block'] != 'python module':
+ if 'python module' not in options:
+ errmess(
+ 'Tip: If your original code is Fortran source then you must use -m option.\n')
+ raise TypeError('All blocks must be python module blocks but got %s' % (
+ repr(plist['block'])))
+ auxfuncs.debugoptions = options['debug']
+ f90mod_rules.options = options
+ auxfuncs.wrapfuncs = options['wrapfuncs']
+
+ ret = buildmodules(postlist)
+
+ for mn in ret.keys():
+ dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc})
+ return ret
+
+
+def filter_files(prefix, suffix, files, remove_prefix=None):
+ """
+ Filter files by prefix and suffix.
+ """
+ filtered, rest = [], []
+ match = re.compile(prefix + r'.*' + suffix + r'\Z').match
+ if remove_prefix:
+ ind = len(prefix)
+ else:
+ ind = 0
+ for file in [x.strip() for x in files]:
+ if match(file):
+ filtered.append(file[ind:])
+ else:
+ rest.append(file)
+ return filtered, rest
+
+
+def get_prefix(module):
+ p = os.path.dirname(os.path.dirname(module.__file__))
+ return p
+
+
+def run_compile():
+ """
+ Do it all in one call!
+ """
+ import tempfile
+
+ i = sys.argv.index('-c')
+ del sys.argv[i]
+
+ remove_build_dir = 0
+ try:
+ i = sys.argv.index('--build-dir')
+ except ValueError:
+ i = None
+ if i is not None:
+ build_dir = sys.argv[i + 1]
+ del sys.argv[i + 1]
+ del sys.argv[i]
+ else:
+ remove_build_dir = 1
+ build_dir = tempfile.mkdtemp()
+
+ _reg1 = re.compile(r'--link-')
+ sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)]
+ sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags]
+ if sysinfo_flags:
+ sysinfo_flags = [f[7:] for f in sysinfo_flags]
+
+ _reg2 = re.compile(
+ r'--((no-|)(wrap-functions|lower)|debug-capi|quiet|skip-empty-wrappers)|-include')
+ f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)]
+ sys.argv = [_m for _m in sys.argv if _m not in f2py_flags]
+ f2py_flags2 = []
+ fl = 0
+ for a in sys.argv[1:]:
+ if a in ['only:', 'skip:']:
+ fl = 1
+ elif a == ':':
+ fl = 0
+ if fl or a == ':':
+ f2py_flags2.append(a)
+ if f2py_flags2 and f2py_flags2[-1] != ':':
+ f2py_flags2.append(':')
+ f2py_flags.extend(f2py_flags2)
+
+ sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2]
+ _reg3 = re.compile(
+ r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)')
+ flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)]
+ sys.argv = [_m for _m in sys.argv if _m not in flib_flags]
+ _reg4 = re.compile(
+ r'--((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help-fcompiler))')
+ fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)]
+ sys.argv = [_m for _m in sys.argv if _m not in fc_flags]
+
+ del_list = []
+ for s in flib_flags:
+ v = '--fcompiler='
+ if s[:len(v)] == v:
+ from numpy.distutils import fcompiler
+ fcompiler.load_all_fcompiler_classes()
+ allowed_keys = list(fcompiler.fcompiler_class.keys())
+ nv = ov = s[len(v):].lower()
+ if ov not in allowed_keys:
+ vmap = {} # XXX
+ try:
+ nv = vmap[ov]
+ except KeyError:
+ if ov not in vmap.values():
+ print('Unknown vendor: "%s"' % (s[len(v):]))
+ nv = ov
+ i = flib_flags.index(s)
+ flib_flags[i] = '--fcompiler=' + nv
+ continue
+ for s in del_list:
+ i = flib_flags.index(s)
+ del flib_flags[i]
+ assert len(flib_flags) <= 2, repr(flib_flags)
+
+ _reg5 = re.compile(r'--(verbose)')
+ setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)]
+ sys.argv = [_m for _m in sys.argv if _m not in setup_flags]
+
+ if '--quiet' in f2py_flags:
+ setup_flags.append('--quiet')
+
+ modulename = 'untitled'
+ sources = sys.argv[1:]
+
+ for optname in ['--include_paths', '--include-paths', '--f2cmap']:
+ if optname in sys.argv:
+ i = sys.argv.index(optname)
+ f2py_flags.extend(sys.argv[i:i + 2])
+ del sys.argv[i + 1], sys.argv[i]
+ sources = sys.argv[1:]
+
+ if '-m' in sys.argv:
+ i = sys.argv.index('-m')
+ modulename = sys.argv[i + 1]
+ del sys.argv[i + 1], sys.argv[i]
+ sources = sys.argv[1:]
+ else:
+ from numpy.distutils.command.build_src import get_f2py_modulename
+ pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources)
+ sources = pyf_files + sources
+ for f in pyf_files:
+ modulename = get_f2py_modulename(f)
+ if modulename:
+ break
+
+ extra_objects, sources = filter_files('', '[.](o|a|so|dylib)', sources)
+ include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1)
+ library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1)
+ libraries, sources = filter_files('-l', '', sources, remove_prefix=1)
+ undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1)
+ define_macros, sources = filter_files('-D', '', sources, remove_prefix=1)
+ for i in range(len(define_macros)):
+ name_value = define_macros[i].split('=', 1)
+ if len(name_value) == 1:
+ name_value.append(None)
+ if len(name_value) == 2:
+ define_macros[i] = tuple(name_value)
+ else:
+ print('Invalid use of -D:', name_value)
+
+ from numpy.distutils.system_info import get_info
+
+ num_info = {}
+ if num_info:
+ include_dirs.extend(num_info.get('include_dirs', []))
+
+ from numpy.distutils.core import setup, Extension
+ ext_args = {'name': modulename, 'sources': sources,
+ 'include_dirs': include_dirs,
+ 'library_dirs': library_dirs,
+ 'libraries': libraries,
+ 'define_macros': define_macros,
+ 'undef_macros': undef_macros,
+ 'extra_objects': extra_objects,
+ 'f2py_options': f2py_flags,
+ }
+
+ if sysinfo_flags:
+ from numpy.distutils.misc_util import dict_append
+ for n in sysinfo_flags:
+ i = get_info(n)
+ if not i:
+ outmess('No %s resources found in system'
+ ' (try `f2py --help-link`)\n' % (repr(n)))
+ dict_append(ext_args, **i)
+
+ ext = Extension(**ext_args)
+ sys.argv = [sys.argv[0]] + setup_flags
+ sys.argv.extend(['build',
+ '--build-temp', build_dir,
+ '--build-base', build_dir,
+ '--build-platlib', '.',
+ # disable CCompilerOpt
+ '--disable-optimization'])
+ if fc_flags:
+ sys.argv.extend(['config_fc'] + fc_flags)
+ if flib_flags:
+ sys.argv.extend(['build_ext'] + flib_flags)
+
+ setup(ext_modules=[ext])
+
+ if remove_build_dir and os.path.exists(build_dir):
+ import shutil
+ outmess('Removing build directory %s\n' % (build_dir))
+ shutil.rmtree(build_dir)
+
+
+def main():
+ if '--help-link' in sys.argv[1:]:
+ sys.argv.remove('--help-link')
+ from numpy.distutils.system_info import show_all
+ show_all()
+ return
+
+ # Probably outdated options that were not working before 1.16
+ if '--g3-numpy' in sys.argv[1:]:
+ sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
+ sys.exit(1)
+ elif '--2e-numeric' in sys.argv[1:]:
+ sys.argv.remove('--2e-numeric')
+ elif '--2e-numarray' in sys.argv[1:]:
+ # Note that this errors becaust the -DNUMARRAY argument is
+ # not recognized. Just here for back compatibility and the
+ # error message.
+ sys.argv.append("-DNUMARRAY")
+ sys.argv.remove('--2e-numarray')
+ elif '--2e-numpy' in sys.argv[1:]:
+ sys.argv.remove('--2e-numpy')
+ else:
+ pass
+
+ if '-c' in sys.argv[1:]:
+ run_compile()
+ else:
+ run_main(sys.argv[1:])
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/f90mod_rules.py b/venv/lib/python3.9/site-packages/numpy/f2py/f90mod_rules.py
new file mode 100644
index 00000000..a3bb6a21
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/f90mod_rules.py
@@ -0,0 +1,264 @@
+#!/usr/bin/env python3
+"""
+
+Build F90 module support for f2py2e.
+
+Copyright 2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/02/03 19:30:23 $
+Pearu Peterson
+
+"""
+__version__ = "$Revision: 1.27 $"[10:-1]
+
+f2py_version = 'See `f2py -v`'
+
+import numpy as np
+
+from . import capi_maps
+from . import func2subr
+from .crackfortran import undo_rmbadname, undo_rmbadname1
+
+# The environment provided by auxfuncs.py is needed for some calls to eval.
+# As the needed functions cannot be determined by static inspection of the
+# code, it is safest to use import * pending a major refactoring of f2py.
+from .auxfuncs import *
+
+options = {}
+
+
+def findf90modules(m):
+ if ismodule(m):
+ return [m]
+ if not hasbody(m):
+ return []
+ ret = []
+ for b in m['body']:
+ if ismodule(b):
+ ret.append(b)
+ else:
+ ret = ret + findf90modules(b)
+ return ret
+
+fgetdims1 = """\
+ external f2pysetdata
+ logical ns
+ integer r,i
+ integer(%d) s(*)
+ ns = .FALSE.
+ if (allocated(d)) then
+ do i=1,r
+ if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then
+ ns = .TRUE.
+ end if
+ end do
+ if (ns) then
+ deallocate(d)
+ end if
+ end if
+ if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize
+
+fgetdims2 = """\
+ end if
+ if (allocated(d)) then
+ do i=1,r
+ s(i) = size(d,i)
+ end do
+ end if
+ flag = 1
+ call f2pysetdata(d,allocated(d))"""
+
+fgetdims2_sa = """\
+ end if
+ if (allocated(d)) then
+ do i=1,r
+ s(i) = size(d,i)
+ end do
+ !s(r) must be equal to len(d(1))
+ end if
+ flag = 2
+ call f2pysetdata(d,allocated(d))"""
+
+
+def buildhooks(pymod):
+ from . import rules
+ ret = {'f90modhooks': [], 'initf90modhooks': [], 'body': [],
+ 'need': ['F_FUNC', 'arrayobject.h'],
+ 'separatorsfor': {'includes0': '\n', 'includes': '\n'},
+ 'docs': ['"Fortran 90/95 modules:\\n"'],
+ 'latexdoc': []}
+ fhooks = ['']
+
+ def fadd(line, s=fhooks):
+ s[0] = '%s\n %s' % (s[0], line)
+ doc = ['']
+
+ def dadd(line, s=doc):
+ s[0] = '%s\n%s' % (s[0], line)
+ for m in findf90modules(pymod):
+ sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [
+ m['name']], []
+ sargsp = []
+ ifargs = []
+ mfargs = []
+ if hasbody(m):
+ for b in m['body']:
+ notvars.append(b['name'])
+ for n in m['vars'].keys():
+ var = m['vars'][n]
+ if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)):
+ onlyvars.append(n)
+ mfargs.append(n)
+ outmess('\t\tConstructing F90 module support for "%s"...\n' %
+ (m['name']))
+ if onlyvars:
+ outmess('\t\t Variables: %s\n' % (' '.join(onlyvars)))
+ chooks = ['']
+
+ def cadd(line, s=chooks):
+ s[0] = '%s\n%s' % (s[0], line)
+ ihooks = ['']
+
+ def iadd(line, s=ihooks):
+ s[0] = '%s\n%s' % (s[0], line)
+
+ vrd = capi_maps.modsign2map(m)
+ cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name']))
+ dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name']))
+ if hasnote(m):
+ note = m['note']
+ if isinstance(note, list):
+ note = '\n'.join(note)
+ dadd(note)
+ if onlyvars:
+ dadd('\\begin{description}')
+ for n in onlyvars:
+ var = m['vars'][n]
+ modobjs.append(n)
+ ct = capi_maps.getctype(var)
+ at = capi_maps.c2capi_map[ct]
+ dm = capi_maps.getarrdims(n, var)
+ dms = dm['dims'].replace('*', '-1').strip()
+ dms = dms.replace(':', '-1').strip()
+ if not dms:
+ dms = '-1'
+ use_fgetdims2 = fgetdims2
+ cadd('\t{"%s",%s,{{%s}},%s, %s},' %
+ (undo_rmbadname1(n), dm['rank'], dms, at,
+ capi_maps.get_elsize(var)))
+ dadd('\\item[]{{}\\verb@%s@{}}' %
+ (capi_maps.getarrdocsign(n, var)))
+ if hasnote(var):
+ note = var['note']
+ if isinstance(note, list):
+ note = '\n'.join(note)
+ dadd('--- %s' % (note))
+ if isallocatable(var):
+ fargs.append('f2py_%s_getdims_%s' % (m['name'], n))
+ efargs.append(fargs[-1])
+ sargs.append(
+ 'void (*%s)(int*,int*,void(*)(char*,int*),int*)' % (n))
+ sargsp.append('void (*)(int*,int*,void(*)(char*,int*),int*)')
+ iadd('\tf2py_%s_def[i_f2py++].func = %s;' % (m['name'], n))
+ fadd('subroutine %s(r,s,f2pysetdata,flag)' % (fargs[-1]))
+ fadd('use %s, only: d => %s\n' %
+ (m['name'], undo_rmbadname1(n)))
+ fadd('integer flag\n')
+ fhooks[0] = fhooks[0] + fgetdims1
+ dms = range(1, int(dm['rank']) + 1)
+ fadd(' allocate(d(%s))\n' %
+ (','.join(['s(%s)' % i for i in dms])))
+ fhooks[0] = fhooks[0] + use_fgetdims2
+ fadd('end subroutine %s' % (fargs[-1]))
+ else:
+ fargs.append(n)
+ sargs.append('char *%s' % (n))
+ sargsp.append('char*')
+ iadd('\tf2py_%s_def[i_f2py++].data = %s;' % (m['name'], n))
+ if onlyvars:
+ dadd('\\end{description}')
+ if hasbody(m):
+ for b in m['body']:
+ if not isroutine(b):
+ outmess("f90mod_rules.buildhooks:"
+ f" skipping {b['block']} {b['name']}\n")
+ continue
+ modobjs.append('%s()' % (b['name']))
+ b['modulename'] = m['name']
+ api, wrap = rules.buildapi(b)
+ if isfunction(b):
+ fhooks[0] = fhooks[0] + wrap
+ fargs.append('f2pywrap_%s_%s' % (m['name'], b['name']))
+ ifargs.append(func2subr.createfuncwrapper(b, signature=1))
+ else:
+ if wrap:
+ fhooks[0] = fhooks[0] + wrap
+ fargs.append('f2pywrap_%s_%s' % (m['name'], b['name']))
+ ifargs.append(
+ func2subr.createsubrwrapper(b, signature=1))
+ else:
+ fargs.append(b['name'])
+ mfargs.append(fargs[-1])
+ api['externroutines'] = []
+ ar = applyrules(api, vrd)
+ ar['docs'] = []
+ ar['docshort'] = []
+ ret = dictappend(ret, ar)
+ cadd(('\t{"%s",-1,{{-1}},0,0,NULL,(void *)'
+ 'f2py_rout_#modulename#_%s_%s,'
+ 'doc_f2py_rout_#modulename#_%s_%s},')
+ % (b['name'], m['name'], b['name'], m['name'], b['name']))
+ sargs.append('char *%s' % (b['name']))
+ sargsp.append('char *')
+ iadd('\tf2py_%s_def[i_f2py++].data = %s;' %
+ (m['name'], b['name']))
+ cadd('\t{NULL}\n};\n')
+ iadd('}')
+ ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % (
+ m['name'], ','.join(sargs), ihooks[0])
+ if '_' in m['name']:
+ F_FUNC = 'F_FUNC_US'
+ else:
+ F_FUNC = 'F_FUNC'
+ iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));'
+ % (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp)))
+ iadd('static void f2py_init_%s(void) {' % (m['name']))
+ iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'
+ % (F_FUNC, m['name'], m['name'].upper(), m['name']))
+ iadd('}\n')
+ ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks
+ ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % (
+ m['name'], m['name'], m['name'])] + ret['initf90modhooks']
+ fadd('')
+ fadd('subroutine f2pyinit%s(f2pysetupfunc)' % (m['name']))
+ if mfargs:
+ for a in undo_rmbadname(mfargs):
+ fadd('use %s, only : %s' % (m['name'], a))
+ if ifargs:
+ fadd(' '.join(['interface'] + ifargs))
+ fadd('end interface')
+ fadd('external f2pysetupfunc')
+ if efargs:
+ for a in undo_rmbadname(efargs):
+ fadd('external %s' % (a))
+ fadd('call f2pysetupfunc(%s)' % (','.join(undo_rmbadname(fargs))))
+ fadd('end subroutine f2pyinit%s\n' % (m['name']))
+
+ dadd('\n'.join(ret['latexdoc']).replace(
+ r'\subsection{', r'\subsubsection{'))
+
+ ret['latexdoc'] = []
+ ret['docs'].append('"\t%s --- %s"' % (m['name'],
+ ','.join(undo_rmbadname(modobjs))))
+
+ ret['routine_defs'] = ''
+ ret['doc'] = []
+ ret['docshort'] = []
+ ret['latexdoc'] = doc[0]
+ if len(ret['docs']) <= 1:
+ ret['docs'] = ''
+ return ret, fhooks[0]
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/func2subr.py b/venv/lib/python3.9/site-packages/numpy/f2py/func2subr.py
new file mode 100644
index 00000000..2a05f065
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/func2subr.py
@@ -0,0 +1,303 @@
+#!/usr/bin/env python3
+"""
+
+Rules for building C/API module with f2py2e.
+
+Copyright 1999,2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2004/11/26 11:13:06 $
+Pearu Peterson
+
+"""
+import copy
+
+from .auxfuncs import (
+ getfortranname, isexternal, isfunction, isfunction_wrap, isintent_in,
+ isintent_out, islogicalfunction, ismoduleroutine, isscalar,
+ issubroutine, issubroutine_wrap, outmess, show
+)
+
+
+def var2fixfortran(vars, a, fa=None, f90mode=None):
+ if fa is None:
+ fa = a
+ if a not in vars:
+ show(vars)
+ outmess('var2fixfortran: No definition for argument "%s".\n' % a)
+ return ''
+ if 'typespec' not in vars[a]:
+ show(vars[a])
+ outmess('var2fixfortran: No typespec for argument "%s".\n' % a)
+ return ''
+ vardef = vars[a]['typespec']
+ if vardef == 'type' and 'typename' in vars[a]:
+ vardef = '%s(%s)' % (vardef, vars[a]['typename'])
+ selector = {}
+ lk = ''
+ if 'kindselector' in vars[a]:
+ selector = vars[a]['kindselector']
+ lk = 'kind'
+ elif 'charselector' in vars[a]:
+ selector = vars[a]['charselector']
+ lk = 'len'
+ if '*' in selector:
+ if f90mode:
+ if selector['*'] in ['*', ':', '(*)']:
+ vardef = '%s(len=*)' % (vardef)
+ else:
+ vardef = '%s(%s=%s)' % (vardef, lk, selector['*'])
+ else:
+ if selector['*'] in ['*', ':']:
+ vardef = '%s*(%s)' % (vardef, selector['*'])
+ else:
+ vardef = '%s*%s' % (vardef, selector['*'])
+ else:
+ if 'len' in selector:
+ vardef = '%s(len=%s' % (vardef, selector['len'])
+ if 'kind' in selector:
+ vardef = '%s,kind=%s)' % (vardef, selector['kind'])
+ else:
+ vardef = '%s)' % (vardef)
+ elif 'kind' in selector:
+ vardef = '%s(kind=%s)' % (vardef, selector['kind'])
+
+ vardef = '%s %s' % (vardef, fa)
+ if 'dimension' in vars[a]:
+ vardef = '%s(%s)' % (vardef, ','.join(vars[a]['dimension']))
+ return vardef
+
+
+def createfuncwrapper(rout, signature=0):
+ assert isfunction(rout)
+
+ extra_args = []
+ vars = rout['vars']
+ for a in rout['args']:
+ v = rout['vars'][a]
+ for i, d in enumerate(v.get('dimension', [])):
+ if d == ':':
+ dn = 'f2py_%s_d%s' % (a, i)
+ dv = dict(typespec='integer', intent=['hide'])
+ dv['='] = 'shape(%s, %s)' % (a, i)
+ extra_args.append(dn)
+ vars[dn] = dv
+ v['dimension'][i] = dn
+ rout['args'].extend(extra_args)
+ need_interface = bool(extra_args)
+
+ ret = ['']
+
+ def add(line, ret=ret):
+ ret[0] = '%s\n %s' % (ret[0], line)
+ name = rout['name']
+ fortranname = getfortranname(rout)
+ f90mode = ismoduleroutine(rout)
+ newname = '%sf2pywrap' % (name)
+
+ if newname not in vars:
+ vars[newname] = vars[name]
+ args = [newname] + rout['args'][1:]
+ else:
+ args = [newname] + rout['args']
+
+ l_tmpl = var2fixfortran(vars, name, '@@@NAME@@@', f90mode)
+ if l_tmpl[:13] == 'character*(*)':
+ if f90mode:
+ l_tmpl = 'character(len=10)' + l_tmpl[13:]
+ else:
+ l_tmpl = 'character*10' + l_tmpl[13:]
+ charselect = vars[name]['charselector']
+ if charselect.get('*', '') == '(*)':
+ charselect['*'] = '10'
+
+ l1 = l_tmpl.replace('@@@NAME@@@', newname)
+ rl = None
+
+ sargs = ', '.join(args)
+ if f90mode:
+ add('subroutine f2pywrap_%s_%s (%s)' %
+ (rout['modulename'], name, sargs))
+ if not signature:
+ add('use %s, only : %s' % (rout['modulename'], fortranname))
+ else:
+ add('subroutine f2pywrap%s (%s)' % (name, sargs))
+ if not need_interface:
+ add('external %s' % (fortranname))
+ rl = l_tmpl.replace('@@@NAME@@@', '') + ' ' + fortranname
+
+ if need_interface:
+ for line in rout['saved_interface'].split('\n'):
+ if line.lstrip().startswith('use ') and '__user__' not in line:
+ add(line)
+
+ args = args[1:]
+ dumped_args = []
+ for a in args:
+ if isexternal(vars[a]):
+ add('external %s' % (a))
+ dumped_args.append(a)
+ for a in args:
+ if a in dumped_args:
+ continue
+ if isscalar(vars[a]):
+ add(var2fixfortran(vars, a, f90mode=f90mode))
+ dumped_args.append(a)
+ for a in args:
+ if a in dumped_args:
+ continue
+ if isintent_in(vars[a]):
+ add(var2fixfortran(vars, a, f90mode=f90mode))
+ dumped_args.append(a)
+ for a in args:
+ if a in dumped_args:
+ continue
+ add(var2fixfortran(vars, a, f90mode=f90mode))
+
+ add(l1)
+ if rl is not None:
+ add(rl)
+
+ if need_interface:
+ if f90mode:
+ # f90 module already defines needed interface
+ pass
+ else:
+ add('interface')
+ add(rout['saved_interface'].lstrip())
+ add('end interface')
+
+ sargs = ', '.join([a for a in args if a not in extra_args])
+
+ if not signature:
+ if islogicalfunction(rout):
+ add('%s = .not.(.not.%s(%s))' % (newname, fortranname, sargs))
+ else:
+ add('%s = %s(%s)' % (newname, fortranname, sargs))
+ if f90mode:
+ add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name))
+ else:
+ add('end')
+ return ret[0]
+
+
+def createsubrwrapper(rout, signature=0):
+ assert issubroutine(rout)
+
+ extra_args = []
+ vars = rout['vars']
+ for a in rout['args']:
+ v = rout['vars'][a]
+ for i, d in enumerate(v.get('dimension', [])):
+ if d == ':':
+ dn = 'f2py_%s_d%s' % (a, i)
+ dv = dict(typespec='integer', intent=['hide'])
+ dv['='] = 'shape(%s, %s)' % (a, i)
+ extra_args.append(dn)
+ vars[dn] = dv
+ v['dimension'][i] = dn
+ rout['args'].extend(extra_args)
+ need_interface = bool(extra_args)
+
+ ret = ['']
+
+ def add(line, ret=ret):
+ ret[0] = '%s\n %s' % (ret[0], line)
+ name = rout['name']
+ fortranname = getfortranname(rout)
+ f90mode = ismoduleroutine(rout)
+
+ args = rout['args']
+
+ sargs = ', '.join(args)
+ if f90mode:
+ add('subroutine f2pywrap_%s_%s (%s)' %
+ (rout['modulename'], name, sargs))
+ if not signature:
+ add('use %s, only : %s' % (rout['modulename'], fortranname))
+ else:
+ add('subroutine f2pywrap%s (%s)' % (name, sargs))
+ if not need_interface:
+ add('external %s' % (fortranname))
+
+ if need_interface:
+ for line in rout['saved_interface'].split('\n'):
+ if line.lstrip().startswith('use ') and '__user__' not in line:
+ add(line)
+
+ dumped_args = []
+ for a in args:
+ if isexternal(vars[a]):
+ add('external %s' % (a))
+ dumped_args.append(a)
+ for a in args:
+ if a in dumped_args:
+ continue
+ if isscalar(vars[a]):
+ add(var2fixfortran(vars, a, f90mode=f90mode))
+ dumped_args.append(a)
+ for a in args:
+ if a in dumped_args:
+ continue
+ add(var2fixfortran(vars, a, f90mode=f90mode))
+
+ if need_interface:
+ if f90mode:
+ # f90 module already defines needed interface
+ pass
+ else:
+ add('interface')
+ for line in rout['saved_interface'].split('\n'):
+ if line.lstrip().startswith('use ') and '__user__' in line:
+ continue
+ add(line)
+ add('end interface')
+
+ sargs = ', '.join([a for a in args if a not in extra_args])
+
+ if not signature:
+ add('call %s(%s)' % (fortranname, sargs))
+ if f90mode:
+ add('end subroutine f2pywrap_%s_%s' % (rout['modulename'], name))
+ else:
+ add('end')
+ return ret[0]
+
+
+def assubr(rout):
+ if isfunction_wrap(rout):
+ fortranname = getfortranname(rout)
+ name = rout['name']
+ outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % (
+ name, fortranname))
+ rout = copy.copy(rout)
+ fname = name
+ rname = fname
+ if 'result' in rout:
+ rname = rout['result']
+ rout['vars'][fname] = rout['vars'][rname]
+ fvar = rout['vars'][fname]
+ if not isintent_out(fvar):
+ if 'intent' not in fvar:
+ fvar['intent'] = []
+ fvar['intent'].append('out')
+ flag = 1
+ for i in fvar['intent']:
+ if i.startswith('out='):
+ flag = 0
+ break
+ if flag:
+ fvar['intent'].append('out=%s' % (rname))
+ rout['args'][:] = [fname] + rout['args']
+ return rout, createfuncwrapper(rout)
+ if issubroutine_wrap(rout):
+ fortranname = getfortranname(rout)
+ name = rout['name']
+ outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n'
+ % (name, fortranname))
+ rout = copy.copy(rout)
+ return rout, createsubrwrapper(rout)
+ return rout, ''
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/rules.py b/venv/lib/python3.9/site-packages/numpy/f2py/rules.py
new file mode 100644
index 00000000..1bac8710
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/rules.py
@@ -0,0 +1,1571 @@
+#!/usr/bin/env python3
+"""
+
+Rules for building C/API module with f2py2e.
+
+Here is a skeleton of a new wrapper function (13Dec2001):
+
+wrapper_function(args)
+ declarations
+ get_python_arguments, say, `a' and `b'
+
+ get_a_from_python
+ if (successful) {
+
+ get_b_from_python
+ if (successful) {
+
+ callfortran
+ if (successful) {
+
+ put_a_to_python
+ if (successful) {
+
+ put_b_to_python
+ if (successful) {
+
+ buildvalue = ...
+
+ }
+
+ }
+
+ }
+
+ }
+ cleanup_b
+
+ }
+ cleanup_a
+
+ return buildvalue
+
+Copyright 1999,2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2005/08/30 08:58:42 $
+Pearu Peterson
+
+"""
+import os, sys
+import time
+import copy
+from pathlib import Path
+
+# __version__.version is now the same as the NumPy version
+from . import __version__
+
+from .auxfuncs import (
+ applyrules, debugcapi, dictappend, errmess, gentitle, getargs2,
+ hascallstatement, hasexternals, hasinitvalue, hasnote,
+ hasresultnote, isarray, isarrayofstrings, ischaracter,
+ ischaracterarray, ischaracter_or_characterarray, iscomplex,
+ iscomplexarray, iscomplexfunction, iscomplexfunction_warn,
+ isdummyroutine, isexternal, isfunction, isfunction_wrap, isint1,
+ isint1array, isintent_aux, isintent_c, isintent_callback,
+ isintent_copy, isintent_hide, isintent_inout, isintent_nothide,
+ isintent_out, isintent_overwrite, islogical, islong_complex,
+ islong_double, islong_doublefunction, islong_long,
+ islong_longfunction, ismoduleroutine, isoptional, isrequired,
+ isscalar, issigned_long_longarray, isstring, isstringarray,
+ isstringfunction, issubroutine, isattr_value,
+ issubroutine_wrap, isthreadsafe, isunsigned, isunsigned_char,
+ isunsigned_chararray, isunsigned_long_long,
+ isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray,
+ l_and, l_not, l_or, outmess, replace, stripcomma, requiresf90wrapper
+)
+
+from . import capi_maps
+from . import cfuncs
+from . import common_rules
+from . import use_rules
+from . import f90mod_rules
+from . import func2subr
+
+f2py_version = __version__.version
+numpy_version = __version__.version
+
+options = {}
+sepdict = {}
+# for k in ['need_cfuncs']: sepdict[k]=','
+for k in ['decl',
+ 'frompyobj',
+ 'cleanupfrompyobj',
+ 'topyarr', 'method',
+ 'pyobjfrom', 'closepyobjfrom',
+ 'freemem',
+ 'userincludes',
+ 'includes0', 'includes', 'typedefs', 'typedefs_generated',
+ 'cppmacros', 'cfuncs', 'callbacks',
+ 'latexdoc',
+ 'restdoc',
+ 'routine_defs', 'externroutines',
+ 'initf2pywraphooks',
+ 'commonhooks', 'initcommonhooks',
+ 'f90modhooks', 'initf90modhooks']:
+ sepdict[k] = '\n'
+
+#################### Rules for C/API module #################
+
+generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
+module_rules = {
+ 'modulebody': """\
+/* File: #modulename#module.c
+ * This file is auto-generated with f2py (version:#f2py_version#).
+ * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
+ * written by Pearu Peterson <pearu@cens.ioc.ee>.
+ * Generation date: """ + time.asctime(time.gmtime(generationtime)) + """
+ * Do not edit this file directly unless you know what you are doing!!!
+ */
+
+#ifdef __cplusplus
+extern \"C\" {
+#endif
+
+#ifndef PY_SSIZE_T_CLEAN
+#define PY_SSIZE_T_CLEAN
+#endif /* PY_SSIZE_T_CLEAN */
+
+/* Unconditionally included */
+#include <Python.h>
+#include <numpy/npy_os.h>
+
+""" + gentitle("See f2py2e/cfuncs.py: includes") + """
+#includes#
+#includes0#
+
+""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """
+static PyObject *#modulename#_error;
+static PyObject *#modulename#_module;
+
+""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """
+#typedefs#
+
+""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """
+#typedefs_generated#
+
+""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """
+#cppmacros#
+
+""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """
+#cfuncs#
+
+""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """
+#userincludes#
+
+""" + gentitle("See f2py2e/capi_rules.py: usercode") + """
+#usercode#
+
+/* See f2py2e/rules.py */
+#externroutines#
+
+""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """
+#usercode1#
+
+""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """
+#callbacks#
+
+""" + gentitle("See f2py2e/rules.py: buildapi") + """
+#body#
+
+""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """
+#f90modhooks#
+
+""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """
+
+""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """
+#commonhooks#
+
+""" + gentitle("See f2py2e/rules.py") + """
+
+static FortranDataDef f2py_routine_defs[] = {
+#routine_defs#
+ {NULL}
+};
+
+static PyMethodDef f2py_module_methods[] = {
+#pymethoddef#
+ {NULL,NULL}
+};
+
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "#modulename#",
+ NULL,
+ -1,
+ f2py_module_methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+PyMODINIT_FUNC PyInit_#modulename#(void) {
+ int i;
+ PyObject *m,*d, *s, *tmp;
+ m = #modulename#_module = PyModule_Create(&moduledef);
+ Py_SET_TYPE(&PyFortran_Type, &PyType_Type);
+ import_array();
+ if (PyErr_Occurred())
+ {PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;}
+ d = PyModule_GetDict(m);
+ s = PyUnicode_FromString(\"#f2py_version#\");
+ PyDict_SetItemString(d, \"__version__\", s);
+ Py_DECREF(s);
+ s = PyUnicode_FromString(
+ \"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
+ PyDict_SetItemString(d, \"__doc__\", s);
+ Py_DECREF(s);
+ s = PyUnicode_FromString(\"""" + numpy_version + """\");
+ PyDict_SetItemString(d, \"__f2py_numpy_version__\", s);
+ Py_DECREF(s);
+ #modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
+ /*
+ * Store the error object inside the dict, so that it could get deallocated.
+ * (in practice, this is a module, so it likely will not and cannot.)
+ */
+ PyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error);
+ Py_DECREF(#modulename#_error);
+ for(i=0;f2py_routine_defs[i].name!=NULL;i++) {
+ tmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]);
+ PyDict_SetItemString(d, f2py_routine_defs[i].name, tmp);
+ Py_DECREF(tmp);
+ }
+#initf2pywraphooks#
+#initf90modhooks#
+#initcommonhooks#
+#interface_usercode#
+
+#ifdef F2PY_REPORT_ATEXIT
+ if (! PyErr_Occurred())
+ on_exit(f2py_report_on_exit,(void*)\"#modulename#\");
+#endif
+ return m;
+}
+#ifdef __cplusplus
+}
+#endif
+""",
+ 'separatorsfor': {'latexdoc': '\n\n',
+ 'restdoc': '\n\n'},
+ 'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n',
+ '#modnote#\n',
+ '#latexdoc#'],
+ 'restdoc': ['Module #modulename#\n' + '=' * 80,
+ '\n#restdoc#']
+}
+
+defmod_rules = [
+ {'body': '/*eof body*/',
+ 'method': '/*eof method*/',
+ 'externroutines': '/*eof externroutines*/',
+ 'routine_defs': '/*eof routine_defs*/',
+ 'initf90modhooks': '/*eof initf90modhooks*/',
+ 'initf2pywraphooks': '/*eof initf2pywraphooks*/',
+ 'initcommonhooks': '/*eof initcommonhooks*/',
+ 'latexdoc': '',
+ 'restdoc': '',
+ 'modnote': {hasnote: '#note#', l_not(hasnote): ''},
+ }
+]
+
+routine_rules = {
+ 'separatorsfor': sepdict,
+ 'body': """
+#begintitle#
+static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\";
+/* #declfortranroutine# */
+static PyObject *#apiname#(const PyObject *capi_self,
+ PyObject *capi_args,
+ PyObject *capi_keywds,
+ #functype# (*f2py_func)(#callprotoargument#)) {
+ PyObject * volatile capi_buildvalue = NULL;
+ volatile int f2py_success = 1;
+#decl#
+ static char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL};
+#usercode#
+#routdebugenter#
+#ifdef F2PY_REPORT_ATEXIT
+f2py_start_clock();
+#endif
+ if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
+ \"#argformat#|#keyformat##xaformat#:#pyname#\",\\
+ capi_kwlist#args_capi##keys_capi##keys_xa#))\n return NULL;
+#frompyobj#
+/*end of frompyobj*/
+#ifdef F2PY_REPORT_ATEXIT
+f2py_start_call_clock();
+#endif
+#callfortranroutine#
+if (PyErr_Occurred())
+ f2py_success = 0;
+#ifdef F2PY_REPORT_ATEXIT
+f2py_stop_call_clock();
+#endif
+/*end of callfortranroutine*/
+ if (f2py_success) {
+#pyobjfrom#
+/*end of pyobjfrom*/
+ CFUNCSMESS(\"Building return value.\\n\");
+ capi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#);
+/*closepyobjfrom*/
+#closepyobjfrom#
+ } /*if (f2py_success) after callfortranroutine*/
+/*cleanupfrompyobj*/
+#cleanupfrompyobj#
+ if (capi_buildvalue == NULL) {
+#routdebugfailure#
+ } else {
+#routdebugleave#
+ }
+ CFUNCSMESS(\"Freeing memory.\\n\");
+#freemem#
+#ifdef F2PY_REPORT_ATEXIT
+f2py_stop_clock();
+#endif
+ return capi_buildvalue;
+}
+#endtitle#
+""",
+ 'routine_defs': '#routine_def#',
+ 'initf2pywraphooks': '#initf2pywraphook#',
+ 'externroutines': '#declfortranroutine#',
+ 'doc': '#docreturn##name#(#docsignature#)',
+ 'docshort': '#docreturn##name#(#docsignatureshort#)',
+ 'docs': '" #docreturn##name#(#docsignature#)\\n"\n',
+ 'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'],
+ 'cppmacros': {debugcapi: '#define DEBUGCFUNCS'},
+ 'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n',
+ """
+\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)}
+#routnote#
+
+#latexdocstrsigns#
+"""],
+ 'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80,
+
+ ]
+}
+
+################## Rules for C/API function ##############
+
+rout_rules = [
+ { # Init
+ 'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n',
+ 'routdebugleave': '\n', 'routdebugfailure': '\n',
+ 'setjmpbuf': ' || ',
+ 'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n',
+ 'docstrcbs': '\n', 'docstrsigns': '\\n"\n"',
+ 'latexdocstrsigns': '\n',
+ 'latexdocstrreq': '\n', 'latexdocstropt': '\n',
+ 'latexdocstrout': '\n', 'latexdocstrcbs': '\n',
+ },
+ 'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '',
+ 'docsign': '', 'docsignopt': '', 'decl': '/*decl*/',
+ 'freemem': '/*freemem*/',
+ 'docsignshort': '', 'docsignoptshort': '',
+ 'docstrsigns': '', 'latexdocstrsigns': '',
+ 'docstrreq': '\\nParameters\\n----------',
+ 'docstropt': '\\nOther Parameters\\n----------------',
+ 'docstrout': '\\nReturns\\n-------',
+ 'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n',
+ 'latexdocstrreq': '\\noindent Required arguments:',
+ 'latexdocstropt': '\\noindent Optional arguments:',
+ 'latexdocstrout': '\\noindent Return objects:',
+ 'latexdocstrcbs': '\\noindent Call-back functions:',
+ 'args_capi': '', 'keys_capi': '', 'functype': '',
+ 'frompyobj': '/*frompyobj*/',
+ # this list will be reversed
+ 'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'],
+ 'pyobjfrom': '/*pyobjfrom*/',
+ # this list will be reversed
+ 'closepyobjfrom': ['/*end of closepyobjfrom*/'],
+ 'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/',
+ 'routdebugenter': '/*routdebugenter*/',
+ 'routdebugfailure': '/*routdebugfailure*/',
+ 'callfortranroutine': '/*callfortranroutine*/',
+ 'argformat': '', 'keyformat': '', 'need_cfuncs': '',
+ 'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '',
+ 'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '',
+ 'initf2pywraphook': '',
+ 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''},
+ }, {
+ 'apiname': 'f2py_rout_#modulename#_#name#',
+ 'pyname': '#modulename#.#name#',
+ 'decl': '',
+ '_check': l_not(ismoduleroutine)
+ }, {
+ 'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#',
+ 'pyname': '#modulename#.#f90modulename#.#name#',
+ 'decl': '',
+ '_check': ismoduleroutine
+ }, { # Subroutine
+ 'functype': 'void',
+ 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
+ l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);',
+ ismoduleroutine: '',
+ isdummyroutine: ''
+ },
+ 'routine_def': {
+ l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)):
+ ' {\"#name#\",-1,{{-1}},0,0,(char *)'
+ ' #F_FUNC#(#fortranname#,#FORTRANNAME#),'
+ ' (f2py_init_func)#apiname#,doc_#apiname#},',
+ l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)):
+ ' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,'
+ ' (f2py_init_func)#apiname#,doc_#apiname#},',
+ l_and(l_not(ismoduleroutine), isdummyroutine):
+ ' {\"#name#\",-1,{{-1}},0,0,NULL,'
+ ' (f2py_init_func)#apiname#,doc_#apiname#},',
+ },
+ 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'},
+ 'callfortranroutine': [
+ {debugcapi: [
+ """ fprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
+ {hasexternals: """\
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
+ {hascallstatement: ''' #callstatement#;
+ /*(*f2py_func)(#callfortran#);*/'''},
+ {l_not(l_or(hascallstatement, isdummyroutine))
+ : ' (*f2py_func)(#callfortran#);'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: """ }"""}
+ ],
+ '_check': l_and(issubroutine, l_not(issubroutine_wrap)),
+ }, { # Wrapped function
+ 'functype': 'void',
+ 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
+ isdummyroutine: '',
+ },
+
+ 'routine_def': {
+ l_not(l_or(ismoduleroutine, isdummyroutine)):
+ ' {\"#name#\",-1,{{-1}},0,0,(char *)'
+ ' #F_WRAPPEDFUNC#(#name_lower#,#NAME#),'
+ ' (f2py_init_func)#apiname#,doc_#apiname#},',
+ isdummyroutine:
+ ' {\"#name#\",-1,{{-1}},0,0,NULL,'
+ ' (f2py_init_func)#apiname#,doc_#apiname#},',
+ },
+ 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
+ {
+ extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
+ PyObject* o = PyDict_GetItemString(d,"#name#");
+ tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
+ PyObject_SetAttrString(o,"_cpointer", tmp);
+ Py_DECREF(tmp);
+ s = PyUnicode_FromString("#name#");
+ PyObject_SetAttrString(o,"__name__", s);
+ Py_DECREF(s);
+ }
+ '''},
+ 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
+ 'callfortranroutine': [
+ {debugcapi: [
+ """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
+ {hasexternals: """\
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
+ {l_not(l_or(hascallstatement, isdummyroutine))
+ : ' (*f2py_func)(#callfortran#);'},
+ {hascallstatement:
+ ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: ' }'}
+ ],
+ '_check': isfunction_wrap,
+ }, { # Wrapped subroutine
+ 'functype': 'void',
+ 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
+ isdummyroutine: '',
+ },
+
+ 'routine_def': {
+ l_not(l_or(ismoduleroutine, isdummyroutine)):
+ ' {\"#name#\",-1,{{-1}},0,0,(char *)'
+ ' #F_WRAPPEDFUNC#(#name_lower#,#NAME#),'
+ ' (f2py_init_func)#apiname#,doc_#apiname#},',
+ isdummyroutine:
+ ' {\"#name#\",-1,{{-1}},0,0,NULL,'
+ ' (f2py_init_func)#apiname#,doc_#apiname#},',
+ },
+ 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
+ {
+ extern void #F_FUNC#(#name_lower#,#NAME#)(void);
+ PyObject* o = PyDict_GetItemString(d,"#name#");
+ tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
+ PyObject_SetAttrString(o,"_cpointer", tmp);
+ Py_DECREF(tmp);
+ s = PyUnicode_FromString("#name#");
+ PyObject_SetAttrString(o,"__name__", s);
+ Py_DECREF(s);
+ }
+ '''},
+ 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
+ 'callfortranroutine': [
+ {debugcapi: [
+ """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
+ {hasexternals: """\
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
+ {l_not(l_or(hascallstatement, isdummyroutine))
+ : ' (*f2py_func)(#callfortran#);'},
+ {hascallstatement:
+ ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: ' }'}
+ ],
+ '_check': issubroutine_wrap,
+ }, { # Function
+ 'functype': '#ctype#',
+ 'docreturn': {l_not(isintent_hide): '#rname#,'},
+ 'docstrout': '#pydocsignout#',
+ 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}',
+ {hasresultnote: '--- #resultnote#'}],
+ 'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\
+#ifdef USESCOMPAQFORTRAN
+ fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
+#else
+ fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
+#endif
+"""},
+ {l_and(debugcapi, l_not(isstringfunction)): """\
+ fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
+"""}
+ ],
+ '_check': l_and(isfunction, l_not(isfunction_wrap))
+ }, { # Scalar function
+ 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
+ l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);',
+ isdummyroutine: ''
+ },
+ 'routine_def': {
+ l_and(l_not(l_or(ismoduleroutine, isintent_c)),
+ l_not(isdummyroutine)):
+ (' {\"#name#\",-1,{{-1}},0,0,(char *)'
+ ' #F_FUNC#(#fortranname#,#FORTRANNAME#),'
+ ' (f2py_init_func)#apiname#,doc_#apiname#},'),
+ l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)):
+ (' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,'
+ ' (f2py_init_func)#apiname#,doc_#apiname#},'),
+ isdummyroutine:
+ ' {\"#name#\",-1,{{-1}},0,0,NULL,'
+ '(f2py_init_func)#apiname#,doc_#apiname#},',
+ },
+ 'decl': [{iscomplexfunction_warn: ' #ctype# #name#_return_value={0,0};',
+ l_not(iscomplexfunction): ' #ctype# #name#_return_value=0;'},
+ {iscomplexfunction:
+ ' PyObject *#name#_return_value_capi = Py_None;'}
+ ],
+ 'callfortranroutine': [
+ {hasexternals: """\
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
+ {hascallstatement: ''' #callstatement#;
+/* #name#_return_value = (*f2py_func)(#callfortran#);*/
+'''},
+ {l_not(l_or(hascallstatement, isdummyroutine))
+ : ' #name#_return_value = (*f2py_func)(#callfortran#);'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: ' }'},
+ {l_and(debugcapi, iscomplexfunction)
+ : ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
+ {l_and(debugcapi, l_not(iscomplexfunction)): ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
+ 'pyobjfrom': {iscomplexfunction: ' #name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
+ 'need': [{l_not(isdummyroutine): 'F_FUNC'},
+ {iscomplexfunction: 'pyobj_from_#ctype#1'},
+ {islong_longfunction: 'long_long'},
+ {islong_doublefunction: 'long_double'}],
+ 'returnformat': {l_not(isintent_hide): '#rformat#'},
+ 'return': {iscomplexfunction: ',#name#_return_value_capi',
+ l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'},
+ '_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap))
+ }, { # String function # in use for --no-wrap
+ 'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
+ 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)):
+ ' {\"#name#\",-1,{{-1}},0,0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ l_and(l_not(ismoduleroutine), isintent_c):
+ ' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
+ },
+ 'decl': [' #ctype# #name#_return_value = NULL;',
+ ' int #name#_return_value_len = 0;'],
+ 'callfortran':'#name#_return_value,#name#_return_value_len,',
+ 'callfortranroutine':[' #name#_return_value_len = #rlength#;',
+ ' if ((#name#_return_value = (string)malloc('
+ + '#name#_return_value_len+1) == NULL) {',
+ ' PyErr_SetString(PyExc_MemoryError, \"out of memory\");',
+ ' f2py_success = 0;',
+ ' } else {',
+ " (#name#_return_value)[#name#_return_value_len] = '\\0';",
+ ' }',
+ ' if (f2py_success) {',
+ {hasexternals: """\
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
+ """\
+#ifdef USESCOMPAQFORTRAN
+ (*f2py_func)(#callcompaqfortran#);
+#else
+ (*f2py_func)(#callfortran#);
+#endif
+""",
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: ' }'},
+ {debugcapi:
+ ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
+ ' } /* if (f2py_success) after (string)malloc */',
+ ],
+ 'returnformat': '#rformat#',
+ 'return': ',#name#_return_value',
+ 'freemem': ' STRINGFREE(#name#_return_value);',
+ 'need': ['F_FUNC', '#ctype#', 'STRINGFREE'],
+ '_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete
+ },
+ { # Debugging
+ 'routdebugenter': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
+ 'routdebugleave': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
+ 'routdebugfailure': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
+ '_check': debugcapi
+ }
+]
+
+################ Rules for arguments ##################
+
+typedef_need_dict = {islong_long: 'long_long',
+ islong_double: 'long_double',
+ islong_complex: 'complex_long_double',
+ isunsigned_char: 'unsigned_char',
+ isunsigned_short: 'unsigned_short',
+ isunsigned: 'unsigned',
+ isunsigned_long_long: 'unsigned_long_long',
+ isunsigned_chararray: 'unsigned_char',
+ isunsigned_shortarray: 'unsigned_short',
+ isunsigned_long_longarray: 'unsigned_long_long',
+ issigned_long_longarray: 'long_long',
+ isint1: 'signed_char',
+ ischaracter_or_characterarray: 'character',
+ }
+
+aux_rules = [
+ {
+ 'separatorsfor': sepdict
+ },
+ { # Common
+ 'frompyobj': [' /* Processing auxiliary variable #varname# */',
+ {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ],
+ 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */',
+ 'need': typedef_need_dict,
+ },
+ # Scalars (not complex)
+ { # Common
+ 'decl': ' #ctype# #varname# = 0;',
+ 'need': {hasinitvalue: 'math.h'},
+ 'frompyobj': {hasinitvalue: ' #varname# = #init#;'},
+ '_check': l_and(isscalar, l_not(iscomplex)),
+ },
+ {
+ 'return': ',#varname#',
+ 'docstrout': '#pydocsignout#',
+ 'docreturn': '#outvarname#,',
+ 'returnformat': '#varrformat#',
+ '_check': l_and(isscalar, l_not(iscomplex), isintent_out),
+ },
+ # Complex scalars
+ { # Common
+ 'decl': ' #ctype# #varname#;',
+ 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'},
+ '_check': iscomplex
+ },
+ # String
+ { # Common
+ 'decl': [' #ctype# #varname# = NULL;',
+ ' int slen(#varname#);',
+ ],
+ 'need':['len..'],
+ '_check':isstring
+ },
+ # Array
+ { # Common
+ 'decl': [' #ctype# *#varname# = NULL;',
+ ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
+ ' const int #varname#_Rank = #rank#;',
+ ],
+ 'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}],
+ '_check': isarray
+ },
+ # Scalararray
+ { # Common
+ '_check': l_and(isarray, l_not(iscomplexarray))
+ }, { # Not hidden
+ '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide)
+ },
+ # Integer*1 array
+ {'need': '#ctype#',
+ '_check': isint1array,
+ '_depend': ''
+ },
+ # Integer*-1 array
+ {'need': '#ctype#',
+ '_check': l_or(isunsigned_chararray, isunsigned_char),
+ '_depend': ''
+ },
+ # Integer*-2 array
+ {'need': '#ctype#',
+ '_check': isunsigned_shortarray,
+ '_depend': ''
+ },
+ # Integer*-8 array
+ {'need': '#ctype#',
+ '_check': isunsigned_long_longarray,
+ '_depend': ''
+ },
+ # Complexarray
+ {'need': '#ctype#',
+ '_check': iscomplexarray,
+ '_depend': ''
+ },
+ # Stringarray
+ {
+ 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'},
+ 'need': 'string',
+ '_check': isstringarray
+ }
+]
+
+arg_rules = [
+ {
+ 'separatorsfor': sepdict
+ },
+ { # Common
+ 'frompyobj': [' /* Processing variable #varname# */',
+ {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ],
+ 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */',
+ '_depend': '',
+ 'need': typedef_need_dict,
+ },
+ # Doc signatures
+ {
+ 'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'},
+ 'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'},
+ 'docstrout': {isintent_out: '#pydocsignout#'},
+ 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
+ {hasnote: '--- #note#'}]},
+ 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
+ {hasnote: '--- #note#'}]},
+ 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}',
+ {l_and(hasnote, isintent_hide): '--- #note#',
+ l_and(hasnote, isintent_nothide): '--- See above.'}]},
+ 'depend': ''
+ },
+ # Required/Optional arguments
+ {
+ 'kwlist': '"#varname#",',
+ 'docsign': '#varname#,',
+ '_check': l_and(isintent_nothide, l_not(isoptional))
+ },
+ {
+ 'kwlistopt': '"#varname#",',
+ 'docsignopt': '#varname#=#showinit#,',
+ 'docsignoptshort': '#varname#,',
+ '_check': l_and(isintent_nothide, isoptional)
+ },
+ # Docstring/BuildValue
+ {
+ 'docreturn': '#outvarname#,',
+ 'returnformat': '#varrformat#',
+ '_check': isintent_out
+ },
+ # Externals (call-back functions)
+ { # Common
+ 'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'},
+ 'docsignxashort': {isintent_nothide: '#varname#_extra_args,'},
+ 'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'},
+ 'docstrcbs': '#cbdocstr#',
+ 'latexdocstrcbs': '\\item[] #cblatexdocstr#',
+ 'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'},
+ 'decl': [' #cbname#_t #varname#_cb = { Py_None, NULL, 0 };',
+ ' #cbname#_t *#varname#_cb_ptr = &#varname#_cb;',
+ ' PyTupleObject *#varname#_xa_capi = NULL;',
+ {l_not(isintent_callback):
+ ' #cbname#_typedef #varname#_cptr;'}
+ ],
+ 'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'},
+ 'argformat': {isrequired: 'O'},
+ 'keyformat': {isoptional: 'O'},
+ 'xaformat': {isintent_nothide: 'O!'},
+ 'args_capi': {isrequired: ',&#varname#_cb.capi'},
+ 'keys_capi': {isoptional: ',&#varname#_cb.capi'},
+ 'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi',
+ 'setjmpbuf': '(setjmp(#varname#_cb.jmpbuf))',
+ 'callfortran': {l_not(isintent_callback): '#varname#_cptr,'},
+ 'need': ['#cbname#', 'setjmp.h'],
+ '_check':isexternal
+ },
+ {
+ 'frompyobj': [{l_not(isintent_callback): """\
+if(F2PyCapsule_Check(#varname#_cb.capi)) {
+ #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_cb.capi);
+} else {
+ #varname#_cptr = #cbname#;
+}
+"""}, {isintent_callback: """\
+if (#varname#_cb.capi==Py_None) {
+ #varname#_cb.capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\");
+ if (#varname#_cb.capi) {
+ if (#varname#_xa_capi==NULL) {
+ if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
+ PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
+ if (capi_tmp) {
+ #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
+ Py_DECREF(capi_tmp);
+ }
+ else {
+ #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
+ }
+ if (#varname#_xa_capi==NULL) {
+ PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
+ return NULL;
+ }
+ }
+ }
+ }
+ if (#varname#_cb.capi==NULL) {
+ PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\");
+ return NULL;
+ }
+}
+"""},
+ """\
+ if (create_cb_arglist(#varname#_cb.capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#varname#_cb.nofargs,&#varname#_cb.args_capi,\"failed in processing argument list for call-back #varname#.\")) {
+""",
+ {debugcapi: ["""\
+ fprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#varname#_cb.nofargs);
+ CFUNCSMESSPY(\"for #varname#=\",#varname#_cb.capi);""",
+ {l_not(isintent_callback): """ fprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]},
+ """\
+ CFUNCSMESS(\"Saving callback variables for `#varname#`.\\n\");
+ #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr);""",
+ ],
+ 'cleanupfrompyobj':
+ """\
+ CFUNCSMESS(\"Restoring callback variables for `#varname#`.\\n\");
+ #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr);
+ Py_DECREF(#varname#_cb.args_capi);
+ }""",
+ 'need': ['SWAP', 'create_cb_arglist'],
+ '_check':isexternal,
+ '_depend':''
+ },
+ # Scalars (not complex)
+ { # Common
+ 'decl': ' #ctype# #varname# = 0;',
+ 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
+ 'callfortran': {l_or(isintent_c, isattr_value): '#varname#,', l_not(l_or(isintent_c, isattr_value)): '&#varname#,'},
+ 'return': {isintent_out: ',#varname#'},
+ '_check': l_and(isscalar, l_not(iscomplex))
+ }, {
+ 'need': {hasinitvalue: 'math.h'},
+ '_check': l_and(isscalar, l_not(iscomplex)),
+ }, { # Not hidden
+ 'decl': ' PyObject *#varname#_capi = Py_None;',
+ 'argformat': {isrequired: 'O'},
+ 'keyformat': {isoptional: 'O'},
+ 'args_capi': {isrequired: ',&#varname#_capi'},
+ 'keys_capi': {isoptional: ',&#varname#_capi'},
+ 'pyobjfrom': {isintent_inout: """\
+ f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
+ if (f2py_success) {"""},
+ 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"},
+ 'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
+ '_check': l_and(isscalar, l_not(iscomplex), l_not(isstring),
+ isintent_nothide)
+ }, {
+ 'frompyobj': [
+ # hasinitvalue...
+ # if pyobj is None:
+ # varname = init
+ # else
+ # from_pyobj(varname)
+ #
+ # isoptional and noinitvalue...
+ # if pyobj is not None:
+ # from_pyobj(varname)
+ # else:
+ # varname is uninitialized
+ #
+ # ...
+ # from_pyobj(varname)
+ #
+ {hasinitvalue: ' if (#varname#_capi == Py_None) #varname# = #init#; else',
+ '_depend': ''},
+ {l_and(isoptional, l_not(hasinitvalue)): ' if (#varname#_capi != Py_None)',
+ '_depend': ''},
+ {l_not(islogical): '''\
+ f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
+ if (f2py_success) {'''},
+ {islogical: '''\
+ #varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
+ f2py_success = 1;
+ if (f2py_success) {'''},
+ ],
+ 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname#*/',
+ 'need': {l_not(islogical): '#ctype#_from_pyobj'},
+ '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide),
+ '_depend': ''
+ }, { # Hidden
+ 'frompyobj': {hasinitvalue: ' #varname# = #init#;'},
+ 'need': typedef_need_dict,
+ '_check': l_and(isscalar, l_not(iscomplex), isintent_hide),
+ '_depend': ''
+ }, { # Common
+ 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
+ '_check': l_and(isscalar, l_not(iscomplex)),
+ '_depend': ''
+ },
+ # Complex scalars
+ { # Common
+ 'decl': ' #ctype# #varname#;',
+ 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'},
+ 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
+ 'return': {isintent_out: ',#varname#_capi'},
+ '_check': iscomplex
+ }, { # Not hidden
+ 'decl': ' PyObject *#varname#_capi = Py_None;',
+ 'argformat': {isrequired: 'O'},
+ 'keyformat': {isoptional: 'O'},
+ 'args_capi': {isrequired: ',&#varname#_capi'},
+ 'keys_capi': {isoptional: ',&#varname#_capi'},
+ 'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
+ 'pyobjfrom': {isintent_inout: """\
+ f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
+ if (f2py_success) {"""},
+ 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"},
+ '_check': l_and(iscomplex, isintent_nothide)
+ }, {
+ 'frompyobj': [{hasinitvalue: ' if (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
+ {l_and(isoptional, l_not(hasinitvalue))
+ : ' if (#varname#_capi != Py_None)'},
+ ' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
+ '\n if (f2py_success) {'],
+ 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname# frompyobj*/',
+ 'need': ['#ctype#_from_pyobj'],
+ '_check': l_and(iscomplex, isintent_nothide),
+ '_depend': ''
+ }, { # Hidden
+ 'decl': {isintent_out: ' PyObject *#varname#_capi = Py_None;'},
+ '_check': l_and(iscomplex, isintent_hide)
+ }, {
+ 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'},
+ '_check': l_and(iscomplex, isintent_hide),
+ '_depend': ''
+ }, { # Common
+ 'pyobjfrom': {isintent_out: ' #varname#_capi = pyobj_from_#ctype#1(#varname#);'},
+ 'need': ['pyobj_from_#ctype#1'],
+ '_check': iscomplex
+ }, {
+ 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
+ '_check': iscomplex,
+ '_depend': ''
+ },
+ # String
+ { # Common
+ 'decl': [' #ctype# #varname# = NULL;',
+ ' int slen(#varname#);',
+ ' PyObject *#varname#_capi = Py_None;'],
+ 'callfortran':'#varname#,',
+ 'callfortranappend':'slen(#varname#),',
+ 'pyobjfrom':[
+ {debugcapi:
+ ' fprintf(stderr,'
+ '"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
+ # The trailing null value for Fortran is blank.
+ {l_and(isintent_out, l_not(isintent_c)):
+ " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
+ ],
+ 'return': {isintent_out: ',#varname#'},
+ 'need': ['len..',
+ {l_and(isintent_out, l_not(isintent_c)): 'STRINGPADN'}],
+ '_check': isstring
+ }, { # Common
+ 'frompyobj': [
+ """\
+ slen(#varname#) = #elsize#;
+ f2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,"""
+"""#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#"""
+"""`#varname#\' of #pyname# to C #ctype#\");
+ if (f2py_success) {""",
+ # The trailing null value for Fortran is blank.
+ {l_not(isintent_c):
+ " STRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"},
+ ],
+ 'cleanupfrompyobj': """\
+ STRINGFREE(#varname#);
+ } /*if (f2py_success) of #varname#*/""",
+ 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE',
+ {l_not(isintent_c): 'STRINGPADN'}],
+ '_check':isstring,
+ '_depend':''
+ }, { # Not hidden
+ 'argformat': {isrequired: 'O'},
+ 'keyformat': {isoptional: 'O'},
+ 'args_capi': {isrequired: ',&#varname#_capi'},
+ 'keys_capi': {isoptional: ',&#varname#_capi'},
+ 'pyobjfrom': [
+ {l_and(isintent_inout, l_not(isintent_c)):
+ " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
+ {isintent_inout: '''\
+ f2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#,
+ slen(#varname#));
+ if (f2py_success) {'''}],
+ 'closepyobjfrom': {isintent_inout: ' } /*if (f2py_success) of #varname# pyobjfrom*/'},
+ 'need': {isintent_inout: 'try_pyarr_from_#ctype#',
+ l_and(isintent_inout, l_not(isintent_c)): 'STRINGPADN'},
+ '_check': l_and(isstring, isintent_nothide)
+ }, { # Hidden
+ '_check': l_and(isstring, isintent_hide)
+ }, {
+ 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
+ '_check': isstring,
+ '_depend': ''
+ },
+ # Array
+ { # Common
+ 'decl': [' #ctype# *#varname# = NULL;',
+ ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
+ ' const int #varname#_Rank = #rank#;',
+ ' PyArrayObject *capi_#varname#_as_array = NULL;',
+ ' int capi_#varname#_intent = 0;',
+ {isstringarray: ' int slen(#varname#) = 0;'},
+ ],
+ 'callfortran':'#varname#,',
+ 'callfortranappend': {isstringarray: 'slen(#varname#),'},
+ 'return': {isintent_out: ',capi_#varname#_as_array'},
+ 'need': 'len..',
+ '_check': isarray
+ }, { # intent(overwrite) array
+ 'decl': ' int capi_overwrite_#varname# = 1;',
+ 'kwlistxa': '"overwrite_#varname#",',
+ 'xaformat': 'i',
+ 'keys_xa': ',&capi_overwrite_#varname#',
+ 'docsignxa': 'overwrite_#varname#=1,',
+ 'docsignxashort': 'overwrite_#varname#,',
+ 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1',
+ '_check': l_and(isarray, isintent_overwrite),
+ }, {
+ 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
+ '_check': l_and(isarray, isintent_overwrite),
+ '_depend': '',
+ },
+ { # intent(copy) array
+ 'decl': ' int capi_overwrite_#varname# = 0;',
+ 'kwlistxa': '"overwrite_#varname#",',
+ 'xaformat': 'i',
+ 'keys_xa': ',&capi_overwrite_#varname#',
+ 'docsignxa': 'overwrite_#varname#=0,',
+ 'docsignxashort': 'overwrite_#varname#,',
+ 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0',
+ '_check': l_and(isarray, isintent_copy),
+ }, {
+ 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
+ '_check': l_and(isarray, isintent_copy),
+ '_depend': '',
+ }, {
+ 'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}],
+ '_check': isarray,
+ '_depend': ''
+ }, { # Not hidden
+ 'decl': ' PyObject *#varname#_capi = Py_None;',
+ 'argformat': {isrequired: 'O'},
+ 'keyformat': {isoptional: 'O'},
+ 'args_capi': {isrequired: ',&#varname#_capi'},
+ 'keys_capi': {isoptional: ',&#varname#_capi'},
+ '_check': l_and(isarray, isintent_nothide)
+ }, {
+ 'frompyobj': [
+ ' #setdims#;',
+ ' capi_#varname#_intent |= #intent#;',
+ (' const char * capi_errmess = "#modulename#.#pyname#:'
+ ' failed to create array from the #nth# `#varname#`";'),
+ {isintent_hide:
+ ' capi_#varname#_as_array = ndarray_from_pyobj('
+ ' #atype#,#elsize#,#varname#_Dims,#varname#_Rank,'
+ ' capi_#varname#_intent,Py_None,capi_errmess);'},
+ {isintent_nothide:
+ ' capi_#varname#_as_array = ndarray_from_pyobj('
+ ' #atype#,#elsize#,#varname#_Dims,#varname#_Rank,'
+ ' capi_#varname#_intent,#varname#_capi,capi_errmess);'},
+ """\
+ if (capi_#varname#_as_array == NULL) {
+ PyObject* capi_err = PyErr_Occurred();
+ if (capi_err == NULL) {
+ capi_err = #modulename#_error;
+ PyErr_SetString(capi_err, capi_errmess);
+ }
+ } else {
+ #varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_as_array));
+""",
+ {isstringarray:
+ ' slen(#varname#) = f2py_itemsize(#varname#);'},
+ {hasinitvalue: [
+ {isintent_nothide:
+ ' if (#varname#_capi == Py_None) {'},
+ {isintent_hide: ' {'},
+ {iscomplexarray: ' #ctype# capi_c;'},
+ """\
+ int *_i,capi_i=0;
+ CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
+ if (initforcomb(PyArray_DIMS(capi_#varname#_as_array),
+ PyArray_NDIM(capi_#varname#_as_array),1)) {
+ while ((_i = nextforcomb()))
+ #varname#[capi_i++] = #init#; /* fortran way */
+ } else {
+ PyObject *exc, *val, *tb;
+ PyErr_Fetch(&exc, &val, &tb);
+ PyErr_SetString(exc ? exc : #modulename#_error,
+ \"Initialization of #nth# #varname# failed (initforcomb).\");
+ npy_PyErr_ChainExceptionsCause(exc, val, tb);
+ f2py_success = 0;
+ }
+ }
+ if (f2py_success) {"""]},
+ ],
+ 'cleanupfrompyobj': [ # note that this list will be reversed
+ ' } '
+ '/* if (capi_#varname#_as_array == NULL) ... else of #varname# */',
+ {l_not(l_or(isintent_out, isintent_hide)): """\
+ if((PyObject *)capi_#varname#_as_array!=#varname#_capi) {
+ Py_XDECREF(capi_#varname#_as_array); }"""},
+ {l_and(isintent_hide, l_not(isintent_out))
+ : """ Py_XDECREF(capi_#varname#_as_array);"""},
+ {hasinitvalue: ' } /*if (f2py_success) of #varname# init*/'},
+ ],
+ '_check': isarray,
+ '_depend': ''
+ },
+ # Scalararray
+ { # Common
+ '_check': l_and(isarray, l_not(iscomplexarray))
+ }, { # Not hidden
+ '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide)
+ },
+ # Integer*1 array
+ {'need': '#ctype#',
+ '_check': isint1array,
+ '_depend': ''
+ },
+ # Integer*-1 array
+ {'need': '#ctype#',
+ '_check': isunsigned_chararray,
+ '_depend': ''
+ },
+ # Integer*-2 array
+ {'need': '#ctype#',
+ '_check': isunsigned_shortarray,
+ '_depend': ''
+ },
+ # Integer*-8 array
+ {'need': '#ctype#',
+ '_check': isunsigned_long_longarray,
+ '_depend': ''
+ },
+ # Complexarray
+ {'need': '#ctype#',
+ '_check': iscomplexarray,
+ '_depend': ''
+ },
+ # Character
+ {
+ 'need': 'string',
+ '_check': ischaracter,
+ },
+ # Character array
+ {
+ 'need': 'string',
+ '_check': ischaracterarray,
+ },
+ # Stringarray
+ {
+ 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'},
+ 'need': 'string',
+ '_check': isstringarray
+ }
+]
+
+################# Rules for checking ###############
+
+check_rules = [
+ {
+ 'frompyobj': {debugcapi: ' fprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
+ 'need': 'len..'
+ }, {
+ 'frompyobj': ' CHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
+ 'cleanupfrompyobj': ' } /*CHECKSCALAR(#check#)*/',
+ 'need': 'CHECKSCALAR',
+ '_check': l_and(isscalar, l_not(iscomplex)),
+ '_break': ''
+ }, {
+ 'frompyobj': ' CHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
+ 'cleanupfrompyobj': ' } /*CHECKSTRING(#check#)*/',
+ 'need': 'CHECKSTRING',
+ '_check': isstring,
+ '_break': ''
+ }, {
+ 'need': 'CHECKARRAY',
+ 'frompyobj': ' CHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
+ 'cleanupfrompyobj': ' } /*CHECKARRAY(#check#)*/',
+ '_check': isarray,
+ '_break': ''
+ }, {
+ 'need': 'CHECKGENERIC',
+ 'frompyobj': ' CHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
+ 'cleanupfrompyobj': ' } /*CHECKGENERIC(#check#)*/',
+ }
+]
+
+########## Applying the rules. No need to modify what follows #############
+
+#################### Build C/API module #######################
+
+
+def buildmodule(m, um):
+ """
+ Return
+ """
+ outmess(' Building module "%s"...\n' % (m['name']))
+ ret = {}
+ mod_rules = defmod_rules[:]
+ vrd = capi_maps.modsign2map(m)
+ rd = dictappend({'f2py_version': f2py_version}, vrd)
+ funcwrappers = []
+ funcwrappers2 = [] # F90 codes
+ for n in m['interfaced']:
+ nb = None
+ for bi in m['body']:
+ if bi['block'] not in ['interface', 'abstract interface']:
+ errmess('buildmodule: Expected interface block. Skipping.\n')
+ continue
+ for b in bi['body']:
+ if b['name'] == n:
+ nb = b
+ break
+
+ if not nb:
+ print(
+ 'buildmodule: Could not find the body of interfaced routine "%s". Skipping.\n' % (n), file=sys.stderr)
+ continue
+ nb_list = [nb]
+ if 'entry' in nb:
+ for k, a in nb['entry'].items():
+ nb1 = copy.deepcopy(nb)
+ del nb1['entry']
+ nb1['name'] = k
+ nb1['args'] = a
+ nb_list.append(nb1)
+ for nb in nb_list:
+ # requiresf90wrapper must be called before buildapi as it
+ # rewrites assumed shape arrays as automatic arrays.
+ isf90 = requiresf90wrapper(nb)
+ # options is in scope here
+ if options['emptygen']:
+ b_path = options['buildpath']
+ m_name = vrd['modulename']
+ outmess(' Generating possibly empty wrappers"\n')
+ Path(f"{b_path}/{vrd['coutput']}").touch()
+ if isf90:
+ # f77 + f90 wrappers
+ outmess(f' Maybe empty "{m_name}-f2pywrappers2.f90"\n')
+ Path(f'{b_path}/{m_name}-f2pywrappers2.f90').touch()
+ outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n')
+ Path(f'{b_path}/{m_name}-f2pywrappers.f').touch()
+ else:
+ # only f77 wrappers
+ outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n')
+ Path(f'{b_path}/{m_name}-f2pywrappers.f').touch()
+ api, wrap = buildapi(nb)
+ if wrap:
+ if isf90:
+ funcwrappers2.append(wrap)
+ else:
+ funcwrappers.append(wrap)
+ ar = applyrules(api, vrd)
+ rd = dictappend(rd, ar)
+
+ # Construct COMMON block support
+ cr, wrap = common_rules.buildhooks(m)
+ if wrap:
+ funcwrappers.append(wrap)
+ ar = applyrules(cr, vrd)
+ rd = dictappend(rd, ar)
+
+ # Construct F90 module support
+ mr, wrap = f90mod_rules.buildhooks(m)
+ if wrap:
+ funcwrappers2.append(wrap)
+ ar = applyrules(mr, vrd)
+ rd = dictappend(rd, ar)
+
+ for u in um:
+ ar = use_rules.buildusevars(u, m['use'][u['name']])
+ rd = dictappend(rd, ar)
+
+ needs = cfuncs.get_needs()
+ # Add mapped definitions
+ needs['typedefs'] += [cvar for cvar in capi_maps.f2cmap_mapped #
+ if cvar in typedef_need_dict.values()]
+ code = {}
+ for n in needs.keys():
+ code[n] = []
+ for k in needs[n]:
+ c = ''
+ if k in cfuncs.includes0:
+ c = cfuncs.includes0[k]
+ elif k in cfuncs.includes:
+ c = cfuncs.includes[k]
+ elif k in cfuncs.userincludes:
+ c = cfuncs.userincludes[k]
+ elif k in cfuncs.typedefs:
+ c = cfuncs.typedefs[k]
+ elif k in cfuncs.typedefs_generated:
+ c = cfuncs.typedefs_generated[k]
+ elif k in cfuncs.cppmacros:
+ c = cfuncs.cppmacros[k]
+ elif k in cfuncs.cfuncs:
+ c = cfuncs.cfuncs[k]
+ elif k in cfuncs.callbacks:
+ c = cfuncs.callbacks[k]
+ elif k in cfuncs.f90modhooks:
+ c = cfuncs.f90modhooks[k]
+ elif k in cfuncs.commonhooks:
+ c = cfuncs.commonhooks[k]
+ else:
+ errmess('buildmodule: unknown need %s.\n' % (repr(k)))
+ continue
+ code[n].append(c)
+ mod_rules.append(code)
+ for r in mod_rules:
+ if ('_check' in r and r['_check'](m)) or ('_check' not in r):
+ ar = applyrules(r, vrd, m)
+ rd = dictappend(rd, ar)
+ ar = applyrules(module_rules, rd)
+
+ fn = os.path.join(options['buildpath'], vrd['coutput'])
+ ret['csrc'] = fn
+ with open(fn, 'w') as f:
+ f.write(ar['modulebody'].replace('\t', 2 * ' '))
+ outmess(' Wrote C/API module "%s" to file "%s"\n' % (m['name'], fn))
+
+ if options['dorestdoc']:
+ fn = os.path.join(
+ options['buildpath'], vrd['modulename'] + 'module.rest')
+ with open(fn, 'w') as f:
+ f.write('.. -*- rest -*-\n')
+ f.write('\n'.join(ar['restdoc']))
+ outmess(' ReST Documentation is saved to file "%s/%smodule.rest"\n' %
+ (options['buildpath'], vrd['modulename']))
+ if options['dolatexdoc']:
+ fn = os.path.join(
+ options['buildpath'], vrd['modulename'] + 'module.tex')
+ ret['ltx'] = fn
+ with open(fn, 'w') as f:
+ f.write(
+ '%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version))
+ if 'shortlatex' not in options:
+ f.write(
+ '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
+ f.write('\n'.join(ar['latexdoc']))
+ if 'shortlatex' not in options:
+ f.write('\\end{document}')
+ outmess(' Documentation is saved to file "%s/%smodule.tex"\n' %
+ (options['buildpath'], vrd['modulename']))
+ if funcwrappers:
+ wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output'])
+ ret['fsrc'] = wn
+ with open(wn, 'w') as f:
+ f.write('C -*- fortran -*-\n')
+ f.write(
+ 'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
+ f.write(
+ 'C It contains Fortran 77 wrappers to fortran functions.\n')
+ lines = []
+ for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'):
+ if 0 <= l.find('!') < 66:
+ # don't split comment lines
+ lines.append(l + '\n')
+ elif l and l[0] == ' ':
+ while len(l) >= 66:
+ lines.append(l[:66] + '\n &')
+ l = l[66:]
+ lines.append(l + '\n')
+ else:
+ lines.append(l + '\n')
+ lines = ''.join(lines).replace('\n &\n', '\n')
+ f.write(lines)
+ outmess(' Fortran 77 wrappers are saved to "%s"\n' % (wn))
+ if funcwrappers2:
+ wn = os.path.join(
+ options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename']))
+ ret['fsrc'] = wn
+ with open(wn, 'w') as f:
+ f.write('! -*- f90 -*-\n')
+ f.write(
+ '! This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
+ f.write(
+ '! It contains Fortran 90 wrappers to fortran functions.\n')
+ lines = []
+ for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'):
+ if 0 <= l.find('!') < 72:
+ # don't split comment lines
+ lines.append(l + '\n')
+ elif len(l) > 72 and l[0] == ' ':
+ lines.append(l[:72] + '&\n &')
+ l = l[72:]
+ while len(l) > 66:
+ lines.append(l[:66] + '&\n &')
+ l = l[66:]
+ lines.append(l + '\n')
+ else:
+ lines.append(l + '\n')
+ lines = ''.join(lines).replace('\n &\n', '\n')
+ f.write(lines)
+ outmess(' Fortran 90 wrappers are saved to "%s"\n' % (wn))
+ return ret
+
+################## Build C/API function #############
+
+stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th',
+ 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'}
+
+
+def buildapi(rout):
+ rout, wrap = func2subr.assubr(rout)
+ args, depargs = getargs2(rout)
+ capi_maps.depargs = depargs
+ var = rout['vars']
+
+ if ismoduleroutine(rout):
+ outmess(' Constructing wrapper function "%s.%s"...\n' %
+ (rout['modulename'], rout['name']))
+ else:
+ outmess(' Constructing wrapper function "%s"...\n' % (rout['name']))
+ # Routine
+ vrd = capi_maps.routsign2map(rout)
+ rd = dictappend({}, vrd)
+ for r in rout_rules:
+ if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
+ ar = applyrules(r, vrd, rout)
+ rd = dictappend(rd, ar)
+
+ # Args
+ nth, nthk = 0, 0
+ savevrd = {}
+ for a in args:
+ vrd = capi_maps.sign2map(a, var[a])
+ if isintent_aux(var[a]):
+ _rules = aux_rules
+ else:
+ _rules = arg_rules
+ if not isintent_hide(var[a]):
+ if not isoptional(var[a]):
+ nth = nth + 1
+ vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument'
+ else:
+ nthk = nthk + 1
+ vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword'
+ else:
+ vrd['nth'] = 'hidden'
+ savevrd[a] = vrd
+ for r in _rules:
+ if '_depend' in r:
+ continue
+ if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
+ ar = applyrules(r, vrd, var[a])
+ rd = dictappend(rd, ar)
+ if '_break' in r:
+ break
+ for a in depargs:
+ if isintent_aux(var[a]):
+ _rules = aux_rules
+ else:
+ _rules = arg_rules
+ vrd = savevrd[a]
+ for r in _rules:
+ if '_depend' not in r:
+ continue
+ if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
+ ar = applyrules(r, vrd, var[a])
+ rd = dictappend(rd, ar)
+ if '_break' in r:
+ break
+ if 'check' in var[a]:
+ for c in var[a]['check']:
+ vrd['check'] = c
+ ar = applyrules(check_rules, vrd, var[a])
+ rd = dictappend(rd, ar)
+ if isinstance(rd['cleanupfrompyobj'], list):
+ rd['cleanupfrompyobj'].reverse()
+ if isinstance(rd['closepyobjfrom'], list):
+ rd['closepyobjfrom'].reverse()
+ rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#',
+ {'docsign': rd['docsign'],
+ 'docsignopt': rd['docsignopt'],
+ 'docsignxa': rd['docsignxa']}))
+ optargs = stripcomma(replace('#docsignopt##docsignxa#',
+ {'docsignxa': rd['docsignxashort'],
+ 'docsignopt': rd['docsignoptshort']}
+ ))
+ if optargs == '':
+ rd['docsignatureshort'] = stripcomma(
+ replace('#docsign#', {'docsign': rd['docsign']}))
+ else:
+ rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]',
+ {'docsign': rd['docsign'],
+ 'docsignopt': optargs,
+ })
+ rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_')
+ rd['latexdocsignatureshort'] = rd[
+ 'latexdocsignatureshort'].replace(',', ', ')
+ cfs = stripcomma(replace('#callfortran##callfortranappend#', {
+ 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']}))
+ if len(rd['callfortranappend']) > 1:
+ rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', {
+ 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']}))
+ else:
+ rd['callcompaqfortran'] = cfs
+ rd['callfortran'] = cfs
+ if isinstance(rd['docreturn'], list):
+ rd['docreturn'] = stripcomma(
+ replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = '
+ rd['docstrsigns'] = []
+ rd['latexdocstrsigns'] = []
+ for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']:
+ if k in rd and isinstance(rd[k], list):
+ rd['docstrsigns'] = rd['docstrsigns'] + rd[k]
+ k = 'latex' + k
+ if k in rd and isinstance(rd[k], list):
+ rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\
+ ['\\begin{description}'] + rd[k][1:] +\
+ ['\\end{description}']
+
+ ar = applyrules(routine_rules, rd)
+ if ismoduleroutine(rout):
+ outmess(' %s\n' % (ar['docshort']))
+ else:
+ outmess(' %s\n' % (ar['docshort']))
+ return ar, wrap
+
+
+#################### EOF rules.py #######################
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/setup.py b/venv/lib/python3.9/site-packages/numpy/f2py/setup.py
new file mode 100644
index 00000000..499609f9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/setup.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python3
+"""
+setup.py for installing F2PY
+
+Usage:
+ pip install .
+
+Copyright 2001-2005 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@cens.ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Revision: 1.32 $
+$Date: 2005/01/30 17:22:14 $
+Pearu Peterson
+
+"""
+from numpy.distutils.core import setup
+from numpy.distutils.misc_util import Configuration
+
+
+from __version__ import version
+
+
+def configuration(parent_package='', top_path=None):
+ config = Configuration('f2py', parent_package, top_path)
+ config.add_subpackage('tests')
+ config.add_data_dir('tests/src')
+ config.add_data_files(
+ 'src/fortranobject.c',
+ 'src/fortranobject.h')
+ config.add_data_files('*.pyi')
+ return config
+
+
+if __name__ == "__main__":
+
+ config = configuration(top_path='')
+ config = config.todict()
+
+ config['classifiers'] = [
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: Science/Research',
+ 'License :: OSI Approved :: NumPy License',
+ 'Natural Language :: English',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: C',
+ 'Programming Language :: Fortran',
+ 'Programming Language :: Python',
+ 'Topic :: Scientific/Engineering',
+ 'Topic :: Software Development :: Code Generators',
+ ]
+ setup(version=version,
+ description="F2PY - Fortran to Python Interface Generator",
+ author="Pearu Peterson",
+ author_email="pearu@cens.ioc.ee",
+ maintainer="Pearu Peterson",
+ maintainer_email="pearu@cens.ioc.ee",
+ license="BSD",
+ platforms="Unix, Windows (mingw|cygwin), Mac OSX",
+ long_description="""\
+The Fortran to Python Interface Generator, or F2PY for short, is a
+command line tool (f2py) for generating Python C/API modules for
+wrapping Fortran 77/90/95 subroutines, accessing common blocks from
+Python, and calling Python functions from Fortran (call-backs).
+Interfacing subroutines/data from Fortran 90/95 modules is supported.""",
+ url="https://numpy.org/doc/stable/f2py/",
+ keywords=['Fortran', 'f2py'],
+ **config)
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.c b/venv/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.c
new file mode 100644
index 00000000..add6e8b6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.c
@@ -0,0 +1,1422 @@
+#define FORTRANOBJECT_C
+#include "fortranobject.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+
+/*
+ This file implements: FortranObject, array_from_pyobj, copy_ND_array
+
+ Author: Pearu Peterson <pearu@cens.ioc.ee>
+ $Revision: 1.52 $
+ $Date: 2005/07/11 07:44:20 $
+*/
+
+int
+F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj)
+{
+ if (obj == NULL) {
+ fprintf(stderr, "Error loading %s\n", name);
+ if (PyErr_Occurred()) {
+ PyErr_Print();
+ PyErr_Clear();
+ }
+ return -1;
+ }
+ return PyDict_SetItemString(dict, name, obj);
+}
+
+/*
+ * Python-only fallback for thread-local callback pointers
+ */
+void *
+F2PySwapThreadLocalCallbackPtr(char *key, void *ptr)
+{
+ PyObject *local_dict, *value;
+ void *prev;
+
+ local_dict = PyThreadState_GetDict();
+ if (local_dict == NULL) {
+ Py_FatalError(
+ "F2PySwapThreadLocalCallbackPtr: PyThreadState_GetDict "
+ "failed");
+ }
+
+ value = PyDict_GetItemString(local_dict, key);
+ if (value != NULL) {
+ prev = PyLong_AsVoidPtr(value);
+ if (PyErr_Occurred()) {
+ Py_FatalError(
+ "F2PySwapThreadLocalCallbackPtr: PyLong_AsVoidPtr failed");
+ }
+ }
+ else {
+ prev = NULL;
+ }
+
+ value = PyLong_FromVoidPtr((void *)ptr);
+ if (value == NULL) {
+ Py_FatalError(
+ "F2PySwapThreadLocalCallbackPtr: PyLong_FromVoidPtr failed");
+ }
+
+ if (PyDict_SetItemString(local_dict, key, value) != 0) {
+ Py_FatalError(
+ "F2PySwapThreadLocalCallbackPtr: PyDict_SetItemString failed");
+ }
+
+ Py_DECREF(value);
+
+ return prev;
+}
+
+void *
+F2PyGetThreadLocalCallbackPtr(char *key)
+{
+ PyObject *local_dict, *value;
+ void *prev;
+
+ local_dict = PyThreadState_GetDict();
+ if (local_dict == NULL) {
+ Py_FatalError(
+ "F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed");
+ }
+
+ value = PyDict_GetItemString(local_dict, key);
+ if (value != NULL) {
+ prev = PyLong_AsVoidPtr(value);
+ if (PyErr_Occurred()) {
+ Py_FatalError(
+ "F2PyGetThreadLocalCallbackPtr: PyLong_AsVoidPtr failed");
+ }
+ }
+ else {
+ prev = NULL;
+ }
+
+ return prev;
+}
+
+static PyArray_Descr *
+get_descr_from_type_and_elsize(const int type_num, const int elsize) {
+ PyArray_Descr * descr = PyArray_DescrFromType(type_num);
+ if (type_num == NPY_STRING) {
+ // PyArray_DescrFromType returns descr with elsize = 0.
+ PyArray_DESCR_REPLACE(descr);
+ if (descr == NULL) {
+ return NULL;
+ }
+ descr->elsize = elsize;
+ }
+ return descr;
+}
+
+/************************* FortranObject *******************************/
+
+typedef PyObject *(*fortranfunc)(PyObject *, PyObject *, PyObject *, void *);
+
+PyObject *
+PyFortranObject_New(FortranDataDef *defs, f2py_void_func init)
+{
+ int i;
+ PyFortranObject *fp = NULL;
+ PyObject *v = NULL;
+ if (init != NULL) { /* Initialize F90 module objects */
+ (*(init))();
+ }
+ fp = PyObject_New(PyFortranObject, &PyFortran_Type);
+ if (fp == NULL) {
+ return NULL;
+ }
+ if ((fp->dict = PyDict_New()) == NULL) {
+ Py_DECREF(fp);
+ return NULL;
+ }
+ fp->len = 0;
+ while (defs[fp->len].name != NULL) {
+ fp->len++;
+ }
+ if (fp->len == 0) {
+ goto fail;
+ }
+ fp->defs = defs;
+ for (i = 0; i < fp->len; i++) {
+ if (fp->defs[i].rank == -1) { /* Is Fortran routine */
+ v = PyFortranObject_NewAsAttr(&(fp->defs[i]));
+ if (v == NULL) {
+ goto fail;
+ }
+ PyDict_SetItemString(fp->dict, fp->defs[i].name, v);
+ Py_XDECREF(v);
+ }
+ else if ((fp->defs[i].data) !=
+ NULL) { /* Is Fortran variable or array (not allocatable) */
+ PyArray_Descr *
+ descr = get_descr_from_type_and_elsize(fp->defs[i].type,
+ fp->defs[i].elsize);
+ if (descr == NULL) {
+ goto fail;
+ }
+ v = PyArray_NewFromDescr(&PyArray_Type, descr, fp->defs[i].rank,
+ fp->defs[i].dims.d, NULL, fp->defs[i].data,
+ NPY_ARRAY_FARRAY, NULL);
+ if (v == NULL) {
+ Py_DECREF(descr);
+ goto fail;
+ }
+ PyDict_SetItemString(fp->dict, fp->defs[i].name, v);
+ Py_XDECREF(v);
+ }
+ }
+ return (PyObject *)fp;
+fail:
+ Py_XDECREF(fp);
+ return NULL;
+}
+
+PyObject *
+PyFortranObject_NewAsAttr(FortranDataDef *defs)
+{ /* used for calling F90 module routines */
+ PyFortranObject *fp = NULL;
+ fp = PyObject_New(PyFortranObject, &PyFortran_Type);
+ if (fp == NULL)
+ return NULL;
+ if ((fp->dict = PyDict_New()) == NULL) {
+ PyObject_Del(fp);
+ return NULL;
+ }
+ fp->len = 1;
+ fp->defs = defs;
+ if (defs->rank == -1) {
+ PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("function %s", defs->name));
+ } else if (defs->rank == 0) {
+ PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("scalar %s", defs->name));
+ } else {
+ PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("array %s", defs->name));
+ }
+ return (PyObject *)fp;
+}
+
+/* Fortran methods */
+
+static void
+fortran_dealloc(PyFortranObject *fp)
+{
+ Py_XDECREF(fp->dict);
+ PyObject_Del(fp);
+}
+
+/* Returns number of bytes consumed from buf, or -1 on error. */
+static Py_ssize_t
+format_def(char *buf, Py_ssize_t size, FortranDataDef def)
+{
+ char *p = buf;
+ int i;
+ npy_intp n;
+
+ n = PyOS_snprintf(p, size, "array(%" NPY_INTP_FMT, def.dims.d[0]);
+ if (n < 0 || n >= size) {
+ return -1;
+ }
+ p += n;
+ size -= n;
+
+ for (i = 1; i < def.rank; i++) {
+ n = PyOS_snprintf(p, size, ",%" NPY_INTP_FMT, def.dims.d[i]);
+ if (n < 0 || n >= size) {
+ return -1;
+ }
+ p += n;
+ size -= n;
+ }
+
+ if (size <= 0) {
+ return -1;
+ }
+
+ *p++ = ')';
+ size--;
+
+ if (def.data == NULL) {
+ static const char notalloc[] = ", not allocated";
+ if ((size_t)size < sizeof(notalloc)) {
+ return -1;
+ }
+ memcpy(p, notalloc, sizeof(notalloc));
+ p += sizeof(notalloc);
+ size -= sizeof(notalloc);
+ }
+
+ return p - buf;
+}
+
+static PyObject *
+fortran_doc(FortranDataDef def)
+{
+ char *buf, *p;
+ PyObject *s = NULL;
+ Py_ssize_t n, origsize, size = 100;
+
+ if (def.doc != NULL) {
+ size += strlen(def.doc);
+ }
+ origsize = size;
+ buf = p = (char *)PyMem_Malloc(size);
+ if (buf == NULL) {
+ return PyErr_NoMemory();
+ }
+
+ if (def.rank == -1) {
+ if (def.doc) {
+ n = strlen(def.doc);
+ if (n > size) {
+ goto fail;
+ }
+ memcpy(p, def.doc, n);
+ p += n;
+ size -= n;
+ }
+ else {
+ n = PyOS_snprintf(p, size, "%s - no docs available", def.name);
+ if (n < 0 || n >= size) {
+ goto fail;
+ }
+ p += n;
+ size -= n;
+ }
+ }
+ else {
+ PyArray_Descr *d = PyArray_DescrFromType(def.type);
+ n = PyOS_snprintf(p, size, "%s : '%c'-", def.name, d->type);
+ Py_DECREF(d);
+ if (n < 0 || n >= size) {
+ goto fail;
+ }
+ p += n;
+ size -= n;
+
+ if (def.data == NULL) {
+ n = format_def(p, size, def);
+ if (n < 0) {
+ goto fail;
+ }
+ p += n;
+ size -= n;
+ }
+ else if (def.rank > 0) {
+ n = format_def(p, size, def);
+ if (n < 0) {
+ goto fail;
+ }
+ p += n;
+ size -= n;
+ }
+ else {
+ n = strlen("scalar");
+ if (size < n) {
+ goto fail;
+ }
+ memcpy(p, "scalar", n);
+ p += n;
+ size -= n;
+ }
+ }
+ if (size <= 1) {
+ goto fail;
+ }
+ *p++ = '\n';
+ size--;
+
+ /* p now points one beyond the last character of the string in buf */
+ s = PyUnicode_FromStringAndSize(buf, p - buf);
+
+ PyMem_Free(buf);
+ return s;
+
+fail:
+ fprintf(stderr,
+ "fortranobject.c: fortran_doc: len(p)=%zd>%zd=size:"
+ " too long docstring required, increase size\n",
+ p - buf, origsize);
+ PyMem_Free(buf);
+ return NULL;
+}
+
+static FortranDataDef *save_def; /* save pointer of an allocatable array */
+static void
+set_data(char *d, npy_intp *f)
+{ /* callback from Fortran */
+ if (*f) /* In fortran f=allocated(d) */
+ save_def->data = d;
+ else
+ save_def->data = NULL;
+ /* printf("set_data: d=%p,f=%d\n",d,*f); */
+}
+
+static PyObject *
+fortran_getattr(PyFortranObject *fp, char *name)
+{
+ int i, j, k, flag;
+ if (fp->dict != NULL) {
+ PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name);
+ if (v == NULL && PyErr_Occurred()) {
+ return NULL;
+ }
+ else if (v != NULL) {
+ Py_INCREF(v);
+ return v;
+ }
+ }
+ for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name));
+ i++)
+ ;
+ if (j == 0)
+ if (fp->defs[i].rank != -1) { /* F90 allocatable array */
+ if (fp->defs[i].func == NULL)
+ return NULL;
+ for (k = 0; k < fp->defs[i].rank; ++k) fp->defs[i].dims.d[k] = -1;
+ save_def = &fp->defs[i];
+ (*(fp->defs[i].func))(&fp->defs[i].rank, fp->defs[i].dims.d,
+ set_data, &flag);
+ if (flag == 2)
+ k = fp->defs[i].rank + 1;
+ else
+ k = fp->defs[i].rank;
+ if (fp->defs[i].data != NULL) { /* array is allocated */
+ PyObject *v = PyArray_New(
+ &PyArray_Type, k, fp->defs[i].dims.d, fp->defs[i].type,
+ NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, NULL);
+ if (v == NULL)
+ return NULL;
+ /* Py_INCREF(v); */
+ return v;
+ }
+ else { /* array is not allocated */
+ Py_RETURN_NONE;
+ }
+ }
+ if (strcmp(name, "__dict__") == 0) {
+ Py_INCREF(fp->dict);
+ return fp->dict;
+ }
+ if (strcmp(name, "__doc__") == 0) {
+ PyObject *s = PyUnicode_FromString(""), *s2, *s3;
+ for (i = 0; i < fp->len; i++) {
+ s2 = fortran_doc(fp->defs[i]);
+ s3 = PyUnicode_Concat(s, s2);
+ Py_DECREF(s2);
+ Py_DECREF(s);
+ s = s3;
+ }
+ if (PyDict_SetItemString(fp->dict, name, s))
+ return NULL;
+ return s;
+ }
+ if ((strcmp(name, "_cpointer") == 0) && (fp->len == 1)) {
+ PyObject *cobj =
+ F2PyCapsule_FromVoidPtr((void *)(fp->defs[0].data), NULL);
+ if (PyDict_SetItemString(fp->dict, name, cobj))
+ return NULL;
+ return cobj;
+ }
+ PyObject *str, *ret;
+ str = PyUnicode_FromString(name);
+ ret = PyObject_GenericGetAttr((PyObject *)fp, str);
+ Py_DECREF(str);
+ return ret;
+}
+
+static int
+fortran_setattr(PyFortranObject *fp, char *name, PyObject *v)
+{
+ int i, j, flag;
+ PyArrayObject *arr = NULL;
+ for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name));
+ i++)
+ ;
+ if (j == 0) {
+ if (fp->defs[i].rank == -1) {
+ PyErr_SetString(PyExc_AttributeError,
+ "over-writing fortran routine");
+ return -1;
+ }
+ if (fp->defs[i].func != NULL) { /* is allocatable array */
+ npy_intp dims[F2PY_MAX_DIMS];
+ int k;
+ save_def = &fp->defs[i];
+ if (v != Py_None) { /* set new value (reallocate if needed --
+ see f2py generated code for more
+ details ) */
+ for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1;
+ if ((arr = array_from_pyobj(fp->defs[i].type, dims,
+ fp->defs[i].rank, F2PY_INTENT_IN,
+ v)) == NULL)
+ return -1;
+ (*(fp->defs[i].func))(&fp->defs[i].rank, PyArray_DIMS(arr),
+ set_data, &flag);
+ }
+ else { /* deallocate */
+ for (k = 0; k < fp->defs[i].rank; k++) dims[k] = 0;
+ (*(fp->defs[i].func))(&fp->defs[i].rank, dims, set_data,
+ &flag);
+ for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1;
+ }
+ memcpy(fp->defs[i].dims.d, dims,
+ fp->defs[i].rank * sizeof(npy_intp));
+ }
+ else { /* not allocatable array */
+ if ((arr = array_from_pyobj(fp->defs[i].type, fp->defs[i].dims.d,
+ fp->defs[i].rank, F2PY_INTENT_IN,
+ v)) == NULL)
+ return -1;
+ }
+ if (fp->defs[i].data !=
+ NULL) { /* copy Python object to Fortran array */
+ npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d,
+ PyArray_NDIM(arr));
+ if (s == -1)
+ s = PyArray_MultiplyList(PyArray_DIMS(arr), PyArray_NDIM(arr));
+ if (s < 0 || (memcpy(fp->defs[i].data, PyArray_DATA(arr),
+ s * PyArray_ITEMSIZE(arr))) == NULL) {
+ if ((PyObject *)arr != v) {
+ Py_DECREF(arr);
+ }
+ return -1;
+ }
+ if ((PyObject *)arr != v) {
+ Py_DECREF(arr);
+ }
+ }
+ else
+ return (fp->defs[i].func == NULL ? -1 : 0);
+ return 0; /* successful */
+ }
+ if (fp->dict == NULL) {
+ fp->dict = PyDict_New();
+ if (fp->dict == NULL)
+ return -1;
+ }
+ if (v == NULL) {
+ int rv = PyDict_DelItemString(fp->dict, name);
+ if (rv < 0)
+ PyErr_SetString(PyExc_AttributeError,
+ "delete non-existing fortran attribute");
+ return rv;
+ }
+ else
+ return PyDict_SetItemString(fp->dict, name, v);
+}
+
+static PyObject *
+fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw)
+{
+ int i = 0;
+ /* printf("fortran call
+ name=%s,func=%p,data=%p,%p\n",fp->defs[i].name,
+ fp->defs[i].func,fp->defs[i].data,&fp->defs[i].data); */
+ if (fp->defs[i].rank == -1) { /* is Fortran routine */
+ if (fp->defs[i].func == NULL) {
+ PyErr_Format(PyExc_RuntimeError, "no function to call");
+ return NULL;
+ }
+ else if (fp->defs[i].data == NULL)
+ /* dummy routine */
+ return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp, arg,
+ kw, NULL);
+ else
+ return (*((fortranfunc)(fp->defs[i].func)))(
+ (PyObject *)fp, arg, kw, (void *)fp->defs[i].data);
+ }
+ PyErr_Format(PyExc_TypeError, "this fortran object is not callable");
+ return NULL;
+}
+
+static PyObject *
+fortran_repr(PyFortranObject *fp)
+{
+ PyObject *name = NULL, *repr = NULL;
+ name = PyObject_GetAttrString((PyObject *)fp, "__name__");
+ PyErr_Clear();
+ if (name != NULL && PyUnicode_Check(name)) {
+ repr = PyUnicode_FromFormat("<fortran %U>", name);
+ }
+ else {
+ repr = PyUnicode_FromString("<fortran object>");
+ }
+ Py_XDECREF(name);
+ return repr;
+}
+
+PyTypeObject PyFortran_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0).tp_name = "fortran",
+ .tp_basicsize = sizeof(PyFortranObject),
+ .tp_dealloc = (destructor)fortran_dealloc,
+ .tp_getattr = (getattrfunc)fortran_getattr,
+ .tp_setattr = (setattrfunc)fortran_setattr,
+ .tp_repr = (reprfunc)fortran_repr,
+ .tp_call = (ternaryfunc)fortran_call,
+};
+
+/************************* f2py_report_atexit *******************************/
+
+#ifdef F2PY_REPORT_ATEXIT
+static int passed_time = 0;
+static int passed_counter = 0;
+static int passed_call_time = 0;
+static struct timeb start_time;
+static struct timeb stop_time;
+static struct timeb start_call_time;
+static struct timeb stop_call_time;
+static int cb_passed_time = 0;
+static int cb_passed_counter = 0;
+static int cb_passed_call_time = 0;
+static struct timeb cb_start_time;
+static struct timeb cb_stop_time;
+static struct timeb cb_start_call_time;
+static struct timeb cb_stop_call_time;
+
+extern void
+f2py_start_clock(void)
+{
+ ftime(&start_time);
+}
+extern void
+f2py_start_call_clock(void)
+{
+ f2py_stop_clock();
+ ftime(&start_call_time);
+}
+extern void
+f2py_stop_clock(void)
+{
+ ftime(&stop_time);
+ passed_time += 1000 * (stop_time.time - start_time.time);
+ passed_time += stop_time.millitm - start_time.millitm;
+}
+extern void
+f2py_stop_call_clock(void)
+{
+ ftime(&stop_call_time);
+ passed_call_time += 1000 * (stop_call_time.time - start_call_time.time);
+ passed_call_time += stop_call_time.millitm - start_call_time.millitm;
+ passed_counter += 1;
+ f2py_start_clock();
+}
+
+extern void
+f2py_cb_start_clock(void)
+{
+ ftime(&cb_start_time);
+}
+extern void
+f2py_cb_start_call_clock(void)
+{
+ f2py_cb_stop_clock();
+ ftime(&cb_start_call_time);
+}
+extern void
+f2py_cb_stop_clock(void)
+{
+ ftime(&cb_stop_time);
+ cb_passed_time += 1000 * (cb_stop_time.time - cb_start_time.time);
+ cb_passed_time += cb_stop_time.millitm - cb_start_time.millitm;
+}
+extern void
+f2py_cb_stop_call_clock(void)
+{
+ ftime(&cb_stop_call_time);
+ cb_passed_call_time +=
+ 1000 * (cb_stop_call_time.time - cb_start_call_time.time);
+ cb_passed_call_time +=
+ cb_stop_call_time.millitm - cb_start_call_time.millitm;
+ cb_passed_counter += 1;
+ f2py_cb_start_clock();
+}
+
+static int f2py_report_on_exit_been_here = 0;
+extern void
+f2py_report_on_exit(int exit_flag, void *name)
+{
+ if (f2py_report_on_exit_been_here) {
+ fprintf(stderr, " %s\n", (char *)name);
+ return;
+ }
+ f2py_report_on_exit_been_here = 1;
+ fprintf(stderr, " /-----------------------\\\n");
+ fprintf(stderr, " < F2PY performance report >\n");
+ fprintf(stderr, " \\-----------------------/\n");
+ fprintf(stderr, "Overall time spent in ...\n");
+ fprintf(stderr, "(a) wrapped (Fortran/C) functions : %8d msec\n",
+ passed_call_time);
+ fprintf(stderr, "(b) f2py interface, %6d calls : %8d msec\n",
+ passed_counter, passed_time);
+ fprintf(stderr, "(c) call-back (Python) functions : %8d msec\n",
+ cb_passed_call_time);
+ fprintf(stderr, "(d) f2py call-back interface, %6d calls : %8d msec\n",
+ cb_passed_counter, cb_passed_time);
+
+ fprintf(stderr,
+ "(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n",
+ passed_call_time - cb_passed_call_time - cb_passed_time);
+ fprintf(stderr,
+ "Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n");
+ fprintf(stderr, "Exit status: %d\n", exit_flag);
+ fprintf(stderr, "Modules : %s\n", (char *)name);
+}
+#endif
+
+/********************** report on array copy ****************************/
+
+#ifdef F2PY_REPORT_ON_ARRAY_COPY
+static void
+f2py_report_on_array_copy(PyArrayObject *arr)
+{
+ const npy_intp arr_size = PyArray_Size((PyObject *)arr);
+ if (arr_size > F2PY_REPORT_ON_ARRAY_COPY) {
+ fprintf(stderr,
+ "copied an array: size=%ld, elsize=%" NPY_INTP_FMT "\n",
+ arr_size, (npy_intp)PyArray_ITEMSIZE(arr));
+ }
+}
+static void
+f2py_report_on_array_copy_fromany(void)
+{
+ fprintf(stderr, "created an array from object\n");
+}
+
+#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR \
+ f2py_report_on_array_copy((PyArrayObject *)arr)
+#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY f2py_report_on_array_copy_fromany()
+#else
+#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR
+#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY
+#endif
+
+/************************* array_from_obj *******************************/
+
+/*
+ * File: array_from_pyobj.c
+ *
+ * Description:
+ * ------------
+ * Provides array_from_pyobj function that returns a contiguous array
+ * object with the given dimensions and required storage order, either
+ * in row-major (C) or column-major (Fortran) order. The function
+ * array_from_pyobj is very flexible about its Python object argument
+ * that can be any number, list, tuple, or array.
+ *
+ * array_from_pyobj is used in f2py generated Python extension
+ * modules.
+ *
+ * Author: Pearu Peterson <pearu@cens.ioc.ee>
+ * Created: 13-16 January 2002
+ * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $
+ */
+
+static int check_and_fix_dimensions(const PyArrayObject* arr,
+ const int rank,
+ npy_intp *dims,
+ const char *errmess);
+
+static int
+find_first_negative_dimension(const int rank, const npy_intp *dims)
+{
+ for (int i = 0; i < rank; ++i) {
+ if (dims[i] < 0) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+#ifdef DEBUG_COPY_ND_ARRAY
+void
+dump_dims(int rank, npy_intp const *dims)
+{
+ int i;
+ printf("[");
+ for (i = 0; i < rank; ++i) {
+ printf("%3" NPY_INTP_FMT, dims[i]);
+ }
+ printf("]\n");
+}
+void
+dump_attrs(const PyArrayObject *obj)
+{
+ const PyArrayObject_fields *arr = (const PyArrayObject_fields *)obj;
+ int rank = PyArray_NDIM(arr);
+ npy_intp size = PyArray_Size((PyObject *)arr);
+ printf("\trank = %d, flags = %d, size = %" NPY_INTP_FMT "\n", rank,
+ arr->flags, size);
+ printf("\tstrides = ");
+ dump_dims(rank, arr->strides);
+ printf("\tdimensions = ");
+ dump_dims(rank, arr->dimensions);
+}
+#endif
+
+#define SWAPTYPE(a, b, t) \
+ { \
+ t c; \
+ c = (a); \
+ (a) = (b); \
+ (b) = c; \
+ }
+
+static int
+swap_arrays(PyArrayObject *obj1, PyArrayObject *obj2)
+{
+ PyArrayObject_fields *arr1 = (PyArrayObject_fields *)obj1,
+ *arr2 = (PyArrayObject_fields *)obj2;
+ SWAPTYPE(arr1->data, arr2->data, char *);
+ SWAPTYPE(arr1->nd, arr2->nd, int);
+ SWAPTYPE(arr1->dimensions, arr2->dimensions, npy_intp *);
+ SWAPTYPE(arr1->strides, arr2->strides, npy_intp *);
+ SWAPTYPE(arr1->base, arr2->base, PyObject *);
+ SWAPTYPE(arr1->descr, arr2->descr, PyArray_Descr *);
+ SWAPTYPE(arr1->flags, arr2->flags, int);
+ /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */
+ return 0;
+}
+
+#define ARRAY_ISCOMPATIBLE(arr,type_num) \
+ ((PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) || \
+ (PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) || \
+ (PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) || \
+ (PyArray_ISBOOL(arr) && PyTypeNum_ISBOOL(type_num)) || \
+ (PyArray_ISSTRING(arr) && PyTypeNum_ISSTRING(type_num)))
+
+static int
+get_elsize(PyObject *obj) {
+ /*
+ get_elsize determines array itemsize from a Python object. Returns
+ elsize if successful, -1 otherwise.
+
+ Supported types of the input are: numpy.ndarray, bytes, str, tuple,
+ list.
+ */
+
+ if (PyArray_Check(obj)) {
+ return PyArray_DESCR((PyArrayObject *)obj)->elsize;
+ } else if (PyBytes_Check(obj)) {
+ return PyBytes_GET_SIZE(obj);
+ } else if (PyUnicode_Check(obj)) {
+ return PyUnicode_GET_LENGTH(obj);
+ } else if (PySequence_Check(obj)) {
+ PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize");
+ if (fast != NULL) {
+ Py_ssize_t i, n = PySequence_Fast_GET_SIZE(fast);
+ int sz, elsize = 0;
+ for (i=0; i<n; i++) {
+ sz = get_elsize(PySequence_Fast_GET_ITEM(fast, i) /* borrowed */);
+ if (sz > elsize) {
+ elsize = sz;
+ }
+ }
+ Py_DECREF(fast);
+ return elsize;
+ }
+ }
+ return -1;
+}
+
+extern PyArrayObject *
+ndarray_from_pyobj(const int type_num,
+ const int elsize_,
+ npy_intp *dims,
+ const int rank,
+ const int intent,
+ PyObject *obj,
+ const char *errmess) {
+ /*
+ * Return an array with given element type and shape from a Python
+ * object while taking into account the usage intent of the array.
+ *
+ * - element type is defined by type_num and elsize
+ * - shape is defined by dims and rank
+ *
+ * ndarray_from_pyobj is used to convert Python object arguments
+ * to numpy ndarrays with given type and shape that data is passed
+ * to interfaced Fortran or C functions.
+ *
+ * errmess (if not NULL), contains a prefix of an error message
+ * for an exception to be triggered within this function.
+ *
+ * Negative elsize value means that elsize is to be determined
+ * from the Python object in runtime.
+ *
+ * Note on strings
+ * ---------------
+ *
+ * String type (type_num == NPY_STRING) does not have fixed
+ * element size and, by default, the type object sets it to
+ * 0. Therefore, for string types, one has to use elsize
+ * argument. For other types, elsize value is ignored.
+ *
+ * NumPy defines the type of a fixed-width string as
+ * dtype('S<width>'). In addition, there is also dtype('c'), that
+ * appears as dtype('S1') (these have the same type_num value),
+ * but is actually different (.char attribute is either 'S' or
+ * 'c', respecitely).
+ *
+ * In Fortran, character arrays and strings are different
+ * concepts. The relation between Fortran types, NumPy dtypes,
+ * and type_num-elsize pairs, is defined as follows:
+ *
+ * character*5 foo | dtype('S5') | elsize=5, shape=()
+ * character(5) foo | dtype('S1') | elsize=1, shape=(5)
+ * character*5 foo(n) | dtype('S5') | elsize=5, shape=(n,)
+ * character(5) foo(n) | dtype('S1') | elsize=1, shape=(5, n)
+ * character*(*) foo | dtype('S') | elsize=-1, shape=()
+ *
+ * Note about reference counting
+ * -----------------------------
+ *
+ * If the caller returns the array to Python, it must be done with
+ * Py_BuildValue("N",arr). Otherwise, if obj!=arr then the caller
+ * must call Py_DECREF(arr).
+ *
+ * Note on intent(cache,out,..)
+ * ----------------------------
+ * Don't expect correct data when returning intent(cache) array.
+ *
+ */
+ char mess[F2PY_MESSAGE_BUFFER_SIZE];
+ PyArrayObject *arr = NULL;
+ int elsize = (elsize_ < 0 ? get_elsize(obj) : elsize_);
+ if (elsize < 0) {
+ if (errmess != NULL) {
+ strcpy(mess, errmess);
+ }
+ sprintf(mess + strlen(mess),
+ " -- failed to determine element size from %s",
+ Py_TYPE(obj)->tp_name);
+ PyErr_SetString(PyExc_SystemError, mess);
+ return NULL;
+ }
+ PyArray_Descr * descr = get_descr_from_type_and_elsize(type_num, elsize); // new reference
+ if (descr == NULL) {
+ return NULL;
+ }
+ elsize = descr->elsize;
+ if ((intent & F2PY_INTENT_HIDE)
+ || ((intent & F2PY_INTENT_CACHE) && (obj == Py_None))
+ || ((intent & F2PY_OPTIONAL) && (obj == Py_None))
+ ) {
+ /* intent(cache), optional, intent(hide) */
+ int ineg = find_first_negative_dimension(rank, dims);
+ if (ineg >= 0) {
+ int i;
+ strcpy(mess, "failed to create intent(cache|hide)|optional array"
+ "-- must have defined dimensions but got (");
+ for(i = 0; i < rank; ++i)
+ sprintf(mess + strlen(mess), "%" NPY_INTP_FMT ",", dims[i]);
+ strcat(mess, ")");
+ PyErr_SetString(PyExc_ValueError, mess);
+ Py_DECREF(descr);
+ return NULL;
+ }
+ arr = (PyArrayObject *) \
+ PyArray_NewFromDescr(&PyArray_Type, descr, rank, dims,
+ NULL, NULL, !(intent & F2PY_INTENT_C), NULL);
+ if (arr == NULL) {
+ Py_DECREF(descr);
+ return NULL;
+ }
+ if (PyArray_ITEMSIZE(arr) != elsize) {
+ strcpy(mess, "failed to create intent(cache|hide)|optional array");
+ sprintf(mess+strlen(mess)," -- expected elsize=%d got %" NPY_INTP_FMT, elsize, (npy_intp)PyArray_ITEMSIZE(arr));
+ PyErr_SetString(PyExc_ValueError,mess);
+ Py_DECREF(arr);
+ return NULL;
+ }
+ if (!(intent & F2PY_INTENT_CACHE)) {
+ PyArray_FILLWBYTE(arr, 0);
+ }
+ return arr;
+ }
+
+ if (PyArray_Check(obj)) {
+ arr = (PyArrayObject *)obj;
+ if (intent & F2PY_INTENT_CACHE) {
+ /* intent(cache) */
+ if (PyArray_ISONESEGMENT(arr)
+ && PyArray_ITEMSIZE(arr) >= elsize) {
+ if (check_and_fix_dimensions(arr, rank, dims, errmess)) {
+ Py_DECREF(descr);
+ return NULL;
+ }
+ if (intent & F2PY_INTENT_OUT)
+ Py_INCREF(arr);
+ Py_DECREF(descr);
+ return arr;
+ }
+ strcpy(mess, "failed to initialize intent(cache) array");
+ if (!PyArray_ISONESEGMENT(arr))
+ strcat(mess, " -- input must be in one segment");
+ if (PyArray_ITEMSIZE(arr) < elsize)
+ sprintf(mess + strlen(mess),
+ " -- expected at least elsize=%d but got "
+ "%" NPY_INTP_FMT,
+ elsize, (npy_intp)PyArray_ITEMSIZE(arr));
+ PyErr_SetString(PyExc_ValueError, mess);
+ Py_DECREF(descr);
+ return NULL;
+ }
+
+ /* here we have always intent(in) or intent(inout) or intent(inplace)
+ */
+
+ if (check_and_fix_dimensions(arr, rank, dims, errmess)) {
+ Py_DECREF(descr);
+ return NULL;
+ }
+ /*
+ printf("intent alignment=%d\n", F2PY_GET_ALIGNMENT(intent));
+ printf("alignment check=%d\n", F2PY_CHECK_ALIGNMENT(arr, intent));
+ int i;
+ for (i=1;i<=16;i++)
+ printf("i=%d isaligned=%d\n", i, ARRAY_ISALIGNED(arr, i));
+ */
+ if ((! (intent & F2PY_INTENT_COPY)) &&
+ PyArray_ITEMSIZE(arr) == elsize &&
+ ARRAY_ISCOMPATIBLE(arr,type_num) &&
+ F2PY_CHECK_ALIGNMENT(arr, intent)) {
+ if ((intent & F2PY_INTENT_INOUT || intent & F2PY_INTENT_INPLACE)
+ ? ((intent & F2PY_INTENT_C) ? PyArray_ISCARRAY(arr) : PyArray_ISFARRAY(arr))
+ : ((intent & F2PY_INTENT_C) ? PyArray_ISCARRAY_RO(arr) : PyArray_ISFARRAY_RO(arr))) {
+ if ((intent & F2PY_INTENT_OUT)) {
+ Py_INCREF(arr);
+ }
+ /* Returning input array */
+ Py_DECREF(descr);
+ return arr;
+ }
+ }
+ if (intent & F2PY_INTENT_INOUT) {
+ strcpy(mess, "failed to initialize intent(inout) array");
+ /* Must use PyArray_IS*ARRAY because intent(inout) requires
+ * writable input */
+ if ((intent & F2PY_INTENT_C) && !PyArray_ISCARRAY(arr))
+ strcat(mess, " -- input not contiguous");
+ if (!(intent & F2PY_INTENT_C) && !PyArray_ISFARRAY(arr))
+ strcat(mess, " -- input not fortran contiguous");
+ if (PyArray_ITEMSIZE(arr) != elsize)
+ sprintf(mess + strlen(mess),
+ " -- expected elsize=%d but got %" NPY_INTP_FMT,
+ elsize,
+ (npy_intp)PyArray_ITEMSIZE(arr)
+ );
+ if (!(ARRAY_ISCOMPATIBLE(arr, type_num))) {
+ sprintf(mess + strlen(mess),
+ " -- input '%c' not compatible to '%c'",
+ PyArray_DESCR(arr)->type, descr->type);
+ }
+ if (!(F2PY_CHECK_ALIGNMENT(arr, intent)))
+ sprintf(mess + strlen(mess), " -- input not %d-aligned",
+ F2PY_GET_ALIGNMENT(intent));
+ PyErr_SetString(PyExc_ValueError, mess);
+ Py_DECREF(descr);
+ return NULL;
+ }
+
+ /* here we have always intent(in) or intent(inplace) */
+
+ {
+ PyArrayObject * retarr = (PyArrayObject *) \
+ PyArray_NewFromDescr(&PyArray_Type, descr, PyArray_NDIM(arr), PyArray_DIMS(arr),
+ NULL, NULL, !(intent & F2PY_INTENT_C), NULL);
+ if (retarr==NULL) {
+ Py_DECREF(descr);
+ return NULL;
+ }
+ F2PY_REPORT_ON_ARRAY_COPY_FROMARR;
+ if (PyArray_CopyInto(retarr, arr)) {
+ Py_DECREF(retarr);
+ return NULL;
+ }
+ if (intent & F2PY_INTENT_INPLACE) {
+ if (swap_arrays(arr,retarr)) {
+ Py_DECREF(retarr);
+ return NULL; /* XXX: set exception */
+ }
+ Py_XDECREF(retarr);
+ if (intent & F2PY_INTENT_OUT)
+ Py_INCREF(arr);
+ } else {
+ arr = retarr;
+ }
+ }
+ return arr;
+ }
+
+ if ((intent & F2PY_INTENT_INOUT) || (intent & F2PY_INTENT_INPLACE) ||
+ (intent & F2PY_INTENT_CACHE)) {
+ PyErr_Format(PyExc_TypeError,
+ "failed to initialize intent(inout|inplace|cache) "
+ "array, input '%s' object is not an array",
+ Py_TYPE(obj)->tp_name);
+ Py_DECREF(descr);
+ return NULL;
+ }
+
+ {
+ F2PY_REPORT_ON_ARRAY_COPY_FROMANY;
+ arr = (PyArrayObject *)PyArray_FromAny(
+ obj, descr, 0, 0,
+ ((intent & F2PY_INTENT_C) ? NPY_ARRAY_CARRAY
+ : NPY_ARRAY_FARRAY) |
+ NPY_ARRAY_FORCECAST,
+ NULL);
+ // Warning: in the case of NPY_STRING, PyArray_FromAny may
+ // reset descr->elsize, e.g. dtype('S0') becomes dtype('S1').
+ if (arr == NULL) {
+ Py_DECREF(descr);
+ return NULL;
+ }
+ if (type_num != NPY_STRING && PyArray_ITEMSIZE(arr) != elsize) {
+ // This is internal sanity tests: elsize has been set to
+ // descr->elsize in the beginning of this function.
+ strcpy(mess, "failed to initialize intent(in) array");
+ sprintf(mess + strlen(mess),
+ " -- expected elsize=%d got %" NPY_INTP_FMT, elsize,
+ (npy_intp)PyArray_ITEMSIZE(arr));
+ PyErr_SetString(PyExc_ValueError, mess);
+ Py_DECREF(arr);
+ return NULL;
+ }
+ if (check_and_fix_dimensions(arr, rank, dims, errmess)) {
+ Py_DECREF(arr);
+ return NULL;
+ }
+ return arr;
+ }
+}
+
+extern PyArrayObject *
+array_from_pyobj(const int type_num,
+ npy_intp *dims,
+ const int rank,
+ const int intent,
+ PyObject *obj) {
+ /*
+ Same as ndarray_from_pyobj but with elsize determined from type,
+ if possible. Provided for backward compatibility.
+ */
+ PyArray_Descr* descr = PyArray_DescrFromType(type_num);
+ int elsize = descr->elsize;
+ Py_DECREF(descr);
+ return ndarray_from_pyobj(type_num, elsize, dims, rank, intent, obj, NULL);
+}
+
+/*****************************************/
+/* Helper functions for array_from_pyobj */
+/*****************************************/
+
+static int
+check_and_fix_dimensions(const PyArrayObject* arr, const int rank,
+ npy_intp *dims, const char *errmess)
+{
+ /*
+ * This function fills in blanks (that are -1's) in dims list using
+ * the dimensions from arr. It also checks that non-blank dims will
+ * match with the corresponding values in arr dimensions.
+ *
+ * Returns 0 if the function is successful.
+ *
+ * If an error condition is detected, an exception is set and 1 is
+ * returned.
+ */
+ char mess[F2PY_MESSAGE_BUFFER_SIZE];
+ const npy_intp arr_size =
+ (PyArray_NDIM(arr)) ? PyArray_Size((PyObject *)arr) : 1;
+#ifdef DEBUG_COPY_ND_ARRAY
+ dump_attrs(arr);
+ printf("check_and_fix_dimensions:init: dims=");
+ dump_dims(rank, dims);
+#endif
+ if (rank > PyArray_NDIM(arr)) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */
+ npy_intp new_size = 1;
+ int free_axe = -1;
+ int i;
+ npy_intp d;
+ /* Fill dims where -1 or 0; check dimensions; calc new_size; */
+ for (i = 0; i < PyArray_NDIM(arr); ++i) {
+ d = PyArray_DIM(arr, i);
+ if (dims[i] >= 0) {
+ if (d > 1 && dims[i] != d) {
+ PyErr_Format(
+ PyExc_ValueError,
+ "%d-th dimension must be fixed to %" NPY_INTP_FMT
+ " but got %" NPY_INTP_FMT "\n",
+ i, dims[i], d);
+ return 1;
+ }
+ if (!dims[i])
+ dims[i] = 1;
+ }
+ else {
+ dims[i] = d ? d : 1;
+ }
+ new_size *= dims[i];
+ }
+ for (i = PyArray_NDIM(arr); i < rank; ++i)
+ if (dims[i] > 1) {
+ PyErr_Format(PyExc_ValueError,
+ "%d-th dimension must be %" NPY_INTP_FMT
+ " but got 0 (not defined).\n",
+ i, dims[i]);
+ return 1;
+ }
+ else if (free_axe < 0)
+ free_axe = i;
+ else
+ dims[i] = 1;
+ if (free_axe >= 0) {
+ dims[free_axe] = arr_size / new_size;
+ new_size *= dims[free_axe];
+ }
+ if (new_size != arr_size) {
+ PyErr_Format(PyExc_ValueError,
+ "unexpected array size: new_size=%" NPY_INTP_FMT
+ ", got array with arr_size=%" NPY_INTP_FMT
+ " (maybe too many free indices)\n",
+ new_size, arr_size);
+ return 1;
+ }
+ }
+ else if (rank == PyArray_NDIM(arr)) {
+ npy_intp new_size = 1;
+ int i;
+ npy_intp d;
+ for (i = 0; i < rank; ++i) {
+ d = PyArray_DIM(arr, i);
+ if (dims[i] >= 0) {
+ if (d > 1 && d != dims[i]) {
+ if (errmess != NULL) {
+ strcpy(mess, errmess);
+ }
+ sprintf(mess + strlen(mess),
+ " -- %d-th dimension must be fixed to %"
+ NPY_INTP_FMT " but got %" NPY_INTP_FMT,
+ i, dims[i], d);
+ PyErr_SetString(PyExc_ValueError, mess);
+ return 1;
+ }
+ if (!dims[i])
+ dims[i] = 1;
+ }
+ else
+ dims[i] = d;
+ new_size *= dims[i];
+ }
+ if (new_size != arr_size) {
+ PyErr_Format(PyExc_ValueError,
+ "unexpected array size: new_size=%" NPY_INTP_FMT
+ ", got array with arr_size=%" NPY_INTP_FMT "\n",
+ new_size, arr_size);
+ return 1;
+ }
+ }
+ else { /* [[1,2]] -> [[1],[2]] */
+ int i, j;
+ npy_intp d;
+ int effrank;
+ npy_intp size;
+ for (i = 0, effrank = 0; i < PyArray_NDIM(arr); ++i)
+ if (PyArray_DIM(arr, i) > 1)
+ ++effrank;
+ if (dims[rank - 1] >= 0)
+ if (effrank > rank) {
+ PyErr_Format(PyExc_ValueError,
+ "too many axes: %d (effrank=%d), "
+ "expected rank=%d\n",
+ PyArray_NDIM(arr), effrank, rank);
+ return 1;
+ }
+
+ for (i = 0, j = 0; i < rank; ++i) {
+ while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j;
+ if (j >= PyArray_NDIM(arr))
+ d = 1;
+ else
+ d = PyArray_DIM(arr, j++);
+ if (dims[i] >= 0) {
+ if (d > 1 && d != dims[i]) {
+ if (errmess != NULL) {
+ strcpy(mess, errmess);
+ }
+ sprintf(mess + strlen(mess),
+ " -- %d-th dimension must be fixed to %"
+ NPY_INTP_FMT " but got %" NPY_INTP_FMT
+ " (real index=%d)\n",
+ i, dims[i], d, j-1);
+ PyErr_SetString(PyExc_ValueError, mess);
+ return 1;
+ }
+ if (!dims[i])
+ dims[i] = 1;
+ }
+ else
+ dims[i] = d;
+ }
+
+ for (i = rank; i < PyArray_NDIM(arr);
+ ++i) { /* [[1,2],[3,4]] -> [1,2,3,4] */
+ while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j;
+ if (j >= PyArray_NDIM(arr))
+ d = 1;
+ else
+ d = PyArray_DIM(arr, j++);
+ dims[rank - 1] *= d;
+ }
+ for (i = 0, size = 1; i < rank; ++i) size *= dims[i];
+ if (size != arr_size) {
+ char msg[200];
+ int len;
+ snprintf(msg, sizeof(msg),
+ "unexpected array size: size=%" NPY_INTP_FMT
+ ", arr_size=%" NPY_INTP_FMT
+ ", rank=%d, effrank=%d, arr.nd=%d, dims=[",
+ size, arr_size, rank, effrank, PyArray_NDIM(arr));
+ for (i = 0; i < rank; ++i) {
+ len = strlen(msg);
+ snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT,
+ dims[i]);
+ }
+ len = strlen(msg);
+ snprintf(msg + len, sizeof(msg) - len, " ], arr.dims=[");
+ for (i = 0; i < PyArray_NDIM(arr); ++i) {
+ len = strlen(msg);
+ snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT,
+ PyArray_DIM(arr, i));
+ }
+ len = strlen(msg);
+ snprintf(msg + len, sizeof(msg) - len, " ]\n");
+ PyErr_SetString(PyExc_ValueError, msg);
+ return 1;
+ }
+ }
+#ifdef DEBUG_COPY_ND_ARRAY
+ printf("check_and_fix_dimensions:end: dims=");
+ dump_dims(rank, dims);
+#endif
+ return 0;
+}
+
+/* End of file: array_from_pyobj.c */
+
+/************************* copy_ND_array *******************************/
+
+extern int
+copy_ND_array(const PyArrayObject *arr, PyArrayObject *out)
+{
+ F2PY_REPORT_ON_ARRAY_COPY_FROMARR;
+ return PyArray_CopyInto(out, (PyArrayObject *)arr);
+}
+
+/********************* Various utility functions ***********************/
+
+extern int
+f2py_describe(PyObject *obj, char *buf) {
+ /*
+ Write the description of a Python object to buf. The caller must
+ provide buffer with size sufficient to write the description.
+
+ Return 1 on success.
+ */
+ char localbuf[F2PY_MESSAGE_BUFFER_SIZE];
+ if (PyBytes_Check(obj)) {
+ sprintf(localbuf, "%d-%s", (npy_int)PyBytes_GET_SIZE(obj), Py_TYPE(obj)->tp_name);
+ } else if (PyUnicode_Check(obj)) {
+ sprintf(localbuf, "%d-%s", (npy_int)PyUnicode_GET_LENGTH(obj), Py_TYPE(obj)->tp_name);
+ } else if (PyArray_CheckScalar(obj)) {
+ PyArrayObject* arr = (PyArrayObject*)obj;
+ sprintf(localbuf, "%c%" NPY_INTP_FMT "-%s-scalar", PyArray_DESCR(arr)->kind, PyArray_ITEMSIZE(arr), Py_TYPE(obj)->tp_name);
+ } else if (PyArray_Check(obj)) {
+ int i;
+ PyArrayObject* arr = (PyArrayObject*)obj;
+ strcpy(localbuf, "(");
+ for (i=0; i<PyArray_NDIM(arr); i++) {
+ if (i) {
+ strcat(localbuf, " ");
+ }
+ sprintf(localbuf + strlen(localbuf), "%" NPY_INTP_FMT ",", PyArray_DIM(arr, i));
+ }
+ sprintf(localbuf + strlen(localbuf), ")-%c%" NPY_INTP_FMT "-%s", PyArray_DESCR(arr)->kind, PyArray_ITEMSIZE(arr), Py_TYPE(obj)->tp_name);
+ } else if (PySequence_Check(obj)) {
+ sprintf(localbuf, "%d-%s", (npy_int)PySequence_Length(obj), Py_TYPE(obj)->tp_name);
+ } else {
+ sprintf(localbuf, "%s instance", Py_TYPE(obj)->tp_name);
+ }
+ // TODO: detect the size of buf and make sure that size(buf) >= size(localbuf).
+ strcpy(buf, localbuf);
+ return 1;
+}
+
+extern npy_intp
+f2py_size_impl(PyArrayObject* var, ...)
+{
+ npy_intp sz = 0;
+ npy_intp dim;
+ npy_intp rank;
+ va_list argp;
+ va_start(argp, var);
+ dim = va_arg(argp, npy_int);
+ if (dim==-1)
+ {
+ sz = PyArray_SIZE(var);
+ }
+ else
+ {
+ rank = PyArray_NDIM(var);
+ if (dim>=1 && dim<=rank)
+ sz = PyArray_DIM(var, dim-1);
+ else
+ fprintf(stderr, "f2py_size: 2nd argument value=%" NPY_INTP_FMT
+ " fails to satisfy 1<=value<=%" NPY_INTP_FMT
+ ". Result will be 0.\n", dim, rank);
+ }
+ va_end(argp);
+ return sz;
+}
+
+/*********************************************/
+/* Compatibility functions for Python >= 3.0 */
+/*********************************************/
+
+PyObject *
+F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
+{
+ PyObject *ret = PyCapsule_New(ptr, NULL, dtor);
+ if (ret == NULL) {
+ PyErr_Clear();
+ }
+ return ret;
+}
+
+void *
+F2PyCapsule_AsVoidPtr(PyObject *obj)
+{
+ void *ret = PyCapsule_GetPointer(obj, NULL);
+ if (ret == NULL) {
+ PyErr_Clear();
+ }
+ return ret;
+}
+
+int
+F2PyCapsule_Check(PyObject *ptr)
+{
+ return PyCapsule_CheckExact(ptr);
+}
+
+#ifdef __cplusplus
+}
+#endif
+/************************* EOF fortranobject.c *******************************/
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.h b/venv/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.h
new file mode 100644
index 00000000..abd699c2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/src/fortranobject.h
@@ -0,0 +1,173 @@
+#ifndef Py_FORTRANOBJECT_H
+#define Py_FORTRANOBJECT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <Python.h>
+
+#ifndef NPY_NO_DEPRECATED_API
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#endif
+#ifdef FORTRANOBJECT_C
+#define NO_IMPORT_ARRAY
+#endif
+#define PY_ARRAY_UNIQUE_SYMBOL _npy_f2py_ARRAY_API
+#include "numpy/arrayobject.h"
+#include "numpy/npy_3kcompat.h"
+
+#ifdef F2PY_REPORT_ATEXIT
+#include <sys/timeb.h>
+// clang-format off
+extern void f2py_start_clock(void);
+extern void f2py_stop_clock(void);
+extern void f2py_start_call_clock(void);
+extern void f2py_stop_call_clock(void);
+extern void f2py_cb_start_clock(void);
+extern void f2py_cb_stop_clock(void);
+extern void f2py_cb_start_call_clock(void);
+extern void f2py_cb_stop_call_clock(void);
+extern void f2py_report_on_exit(int, void *);
+// clang-format on
+#endif
+
+#ifdef DMALLOC
+#include "dmalloc.h"
+#endif
+
+/* Fortran object interface */
+
+/*
+123456789-123456789-123456789-123456789-123456789-123456789-123456789-12
+
+PyFortranObject represents various Fortran objects:
+Fortran (module) routines, COMMON blocks, module data.
+
+Author: Pearu Peterson <pearu@cens.ioc.ee>
+*/
+
+#define F2PY_MAX_DIMS 40
+#define F2PY_MESSAGE_BUFFER_SIZE 300 // Increase on "stack smashing detected"
+
+typedef void (*f2py_set_data_func)(char *, npy_intp *);
+typedef void (*f2py_void_func)(void);
+typedef void (*f2py_init_func)(int *, npy_intp *, f2py_set_data_func, int *);
+
+/*typedef void* (*f2py_c_func)(void*,...);*/
+
+typedef void *(*f2pycfunc)(void);
+
+typedef struct {
+ char *name; /* attribute (array||routine) name */
+ int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS,
+ || rank=-1 for Fortran routine */
+ struct {
+ npy_intp d[F2PY_MAX_DIMS];
+ } dims; /* dimensions of the array, || not used */
+ int type; /* PyArray_<type> || not used */
+ int elsize; /* Element size || not used */
+ char *data; /* pointer to array || Fortran routine */
+ f2py_init_func func; /* initialization function for
+ allocatable arrays:
+ func(&rank,dims,set_ptr_func,name,len(name))
+ || C/API wrapper for Fortran routine */
+ char *doc; /* documentation string; only recommended
+ for routines. */
+} FortranDataDef;
+
+typedef struct {
+ PyObject_HEAD
+ int len; /* Number of attributes */
+ FortranDataDef *defs; /* An array of FortranDataDef's */
+ PyObject *dict; /* Fortran object attribute dictionary */
+} PyFortranObject;
+
+#define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type)
+#define PyFortran_Check1(op) (0 == strcmp(Py_TYPE(op)->tp_name, "fortran"))
+
+extern PyTypeObject PyFortran_Type;
+extern int
+F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj);
+extern PyObject *
+PyFortranObject_New(FortranDataDef *defs, f2py_void_func init);
+extern PyObject *
+PyFortranObject_NewAsAttr(FortranDataDef *defs);
+
+PyObject *
+F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *));
+void *
+F2PyCapsule_AsVoidPtr(PyObject *obj);
+int
+F2PyCapsule_Check(PyObject *ptr);
+
+extern void *
+F2PySwapThreadLocalCallbackPtr(char *key, void *ptr);
+extern void *
+F2PyGetThreadLocalCallbackPtr(char *key);
+
+#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS)
+#define F2PY_INTENT_IN 1
+#define F2PY_INTENT_INOUT 2
+#define F2PY_INTENT_OUT 4
+#define F2PY_INTENT_HIDE 8
+#define F2PY_INTENT_CACHE 16
+#define F2PY_INTENT_COPY 32
+#define F2PY_INTENT_C 64
+#define F2PY_OPTIONAL 128
+#define F2PY_INTENT_INPLACE 256
+#define F2PY_INTENT_ALIGNED4 512
+#define F2PY_INTENT_ALIGNED8 1024
+#define F2PY_INTENT_ALIGNED16 2048
+
+#define ARRAY_ISALIGNED(ARR, SIZE) ((size_t)(PyArray_DATA(ARR)) % (SIZE) == 0)
+#define F2PY_ALIGN4(intent) (intent & F2PY_INTENT_ALIGNED4)
+#define F2PY_ALIGN8(intent) (intent & F2PY_INTENT_ALIGNED8)
+#define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16)
+
+#define F2PY_GET_ALIGNMENT(intent) \
+ (F2PY_ALIGN4(intent) \
+ ? 4 \
+ : (F2PY_ALIGN8(intent) ? 8 : (F2PY_ALIGN16(intent) ? 16 : 1)))
+#define F2PY_CHECK_ALIGNMENT(arr, intent) \
+ ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent))
+#define F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr) ((PyArray_DESCR(arr)->type_num == NPY_STRING && PyArray_DESCR(arr)->elsize >= 1) \
+ || PyArray_DESCR(arr)->type_num == NPY_UINT8)
+#define F2PY_IS_UNICODE_ARRAY(arr) (PyArray_DESCR(arr)->type_num == NPY_UNICODE)
+
+extern PyArrayObject *
+ndarray_from_pyobj(const int type_num, const int elsize_, npy_intp *dims,
+ const int rank, const int intent, PyObject *obj,
+ const char *errmess);
+
+extern PyArrayObject *
+array_from_pyobj(const int type_num, npy_intp *dims, const int rank,
+ const int intent, PyObject *obj);
+extern int
+copy_ND_array(const PyArrayObject *in, PyArrayObject *out);
+
+#ifdef DEBUG_COPY_ND_ARRAY
+extern void
+dump_attrs(const PyArrayObject *arr);
+#endif
+
+ extern int f2py_describe(PyObject *obj, char *buf);
+
+ /* Utility CPP macros and functions that can be used in signature file
+ expressions. See signature-file.rst for documentation.
+ */
+
+#define f2py_itemsize(var) (PyArray_DESCR((capi_ ## var ## _as_array))->elsize)
+#define f2py_size(var, ...) f2py_size_impl((PyArrayObject *)(capi_ ## var ## _as_array), ## __VA_ARGS__, -1)
+#define f2py_rank(var) var ## _Rank
+#define f2py_shape(var,dim) var ## _Dims[dim]
+#define f2py_len(var) f2py_shape(var,0)
+#define f2py_fshape(var,dim) f2py_shape(var,rank(var)-dim-1)
+#define f2py_flen(var) f2py_fshape(var,0)
+#define f2py_slen(var) capi_ ## var ## _len
+
+ extern npy_intp f2py_size_impl(PyArrayObject* var, ...);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_FORTRANOBJECT_H */
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/symbolic.py b/venv/lib/python3.9/site-packages/numpy/f2py/symbolic.py
new file mode 100644
index 00000000..c2ab0f14
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/symbolic.py
@@ -0,0 +1,1510 @@
+"""Fortran/C symbolic expressions
+
+References:
+- J3/21-007: Draft Fortran 202x. https://j3-fortran.org/doc/year/21/21-007.pdf
+"""
+
+# To analyze Fortran expressions to solve dimensions specifications,
+# for instances, we implement a minimal symbolic engine for parsing
+# expressions into a tree of expression instances. As a first
+# instance, we care only about arithmetic expressions involving
+# integers and operations like addition (+), subtraction (-),
+# multiplication (*), division (Fortran / is Python //, Fortran // is
+# concatenate), and exponentiation (**). In addition, .pyf files may
+# contain C expressions that support here is implemented as well.
+#
+# TODO: support logical constants (Op.BOOLEAN)
+# TODO: support logical operators (.AND., ...)
+# TODO: support defined operators (.MYOP., ...)
+#
+__all__ = ['Expr']
+
+
+import re
+import warnings
+from enum import Enum
+from math import gcd
+
+
+class Language(Enum):
+ """
+ Used as Expr.tostring language argument.
+ """
+ Python = 0
+ Fortran = 1
+ C = 2
+
+
+class Op(Enum):
+ """
+ Used as Expr op attribute.
+ """
+ INTEGER = 10
+ REAL = 12
+ COMPLEX = 15
+ STRING = 20
+ ARRAY = 30
+ SYMBOL = 40
+ TERNARY = 100
+ APPLY = 200
+ INDEXING = 210
+ CONCAT = 220
+ RELATIONAL = 300
+ TERMS = 1000
+ FACTORS = 2000
+ REF = 3000
+ DEREF = 3001
+
+
+class RelOp(Enum):
+ """
+ Used in Op.RELATIONAL expression to specify the function part.
+ """
+ EQ = 1
+ NE = 2
+ LT = 3
+ LE = 4
+ GT = 5
+ GE = 6
+
+ @classmethod
+ def fromstring(cls, s, language=Language.C):
+ if language is Language.Fortran:
+ return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE,
+ '.lt.': RelOp.LT, '.le.': RelOp.LE,
+ '.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()]
+ return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT,
+ '<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s]
+
+ def tostring(self, language=Language.C):
+ if language is Language.Fortran:
+ return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.',
+ RelOp.LT: '.lt.', RelOp.LE: '.le.',
+ RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self]
+ return {RelOp.EQ: '==', RelOp.NE: '!=',
+ RelOp.LT: '<', RelOp.LE: '<=',
+ RelOp.GT: '>', RelOp.GE: '>='}[self]
+
+
+class ArithOp(Enum):
+ """
+ Used in Op.APPLY expression to specify the function part.
+ """
+ POS = 1
+ NEG = 2
+ ADD = 3
+ SUB = 4
+ MUL = 5
+ DIV = 6
+ POW = 7
+
+
+class OpError(Exception):
+ pass
+
+
+class Precedence(Enum):
+ """
+ Used as Expr.tostring precedence argument.
+ """
+ ATOM = 0
+ POWER = 1
+ UNARY = 2
+ PRODUCT = 3
+ SUM = 4
+ LT = 6
+ EQ = 7
+ LAND = 11
+ LOR = 12
+ TERNARY = 13
+ ASSIGN = 14
+ TUPLE = 15
+ NONE = 100
+
+
+integer_types = (int,)
+number_types = (int, float)
+
+
+def _pairs_add(d, k, v):
+ # Internal utility method for updating terms and factors data.
+ c = d.get(k)
+ if c is None:
+ d[k] = v
+ else:
+ c = c + v
+ if c:
+ d[k] = c
+ else:
+ del d[k]
+
+
+class ExprWarning(UserWarning):
+ pass
+
+
+def ewarn(message):
+ warnings.warn(message, ExprWarning, stacklevel=2)
+
+
+class Expr:
+ """Represents a Fortran expression as a op-data pair.
+
+ Expr instances are hashable and sortable.
+ """
+
+ @staticmethod
+ def parse(s, language=Language.C):
+ """Parse a Fortran expression to a Expr.
+ """
+ return fromstring(s, language=language)
+
+ def __init__(self, op, data):
+ assert isinstance(op, Op)
+
+ # sanity checks
+ if op is Op.INTEGER:
+ # data is a 2-tuple of numeric object and a kind value
+ # (default is 4)
+ assert isinstance(data, tuple) and len(data) == 2
+ assert isinstance(data[0], int)
+ assert isinstance(data[1], (int, str)), data
+ elif op is Op.REAL:
+ # data is a 2-tuple of numeric object and a kind value
+ # (default is 4)
+ assert isinstance(data, tuple) and len(data) == 2
+ assert isinstance(data[0], float)
+ assert isinstance(data[1], (int, str)), data
+ elif op is Op.COMPLEX:
+ # data is a 2-tuple of constant expressions
+ assert isinstance(data, tuple) and len(data) == 2
+ elif op is Op.STRING:
+ # data is a 2-tuple of quoted string and a kind value
+ # (default is 1)
+ assert isinstance(data, tuple) and len(data) == 2
+ assert (isinstance(data[0], str)
+ and data[0][::len(data[0])-1] in ('""', "''", '@@'))
+ assert isinstance(data[1], (int, str)), data
+ elif op is Op.SYMBOL:
+ # data is any hashable object
+ assert hash(data) is not None
+ elif op in (Op.ARRAY, Op.CONCAT):
+ # data is a tuple of expressions
+ assert isinstance(data, tuple)
+ assert all(isinstance(item, Expr) for item in data), data
+ elif op in (Op.TERMS, Op.FACTORS):
+ # data is {<term|base>:<coeff|exponent>} where dict values
+ # are nonzero Python integers
+ assert isinstance(data, dict)
+ elif op is Op.APPLY:
+ # data is (<function>, <operands>, <kwoperands>) where
+ # operands are Expr instances
+ assert isinstance(data, tuple) and len(data) == 3
+ # function is any hashable object
+ assert hash(data[0]) is not None
+ assert isinstance(data[1], tuple)
+ assert isinstance(data[2], dict)
+ elif op is Op.INDEXING:
+ # data is (<object>, <indices>)
+ assert isinstance(data, tuple) and len(data) == 2
+ # function is any hashable object
+ assert hash(data[0]) is not None
+ elif op is Op.TERNARY:
+ # data is (<cond>, <expr1>, <expr2>)
+ assert isinstance(data, tuple) and len(data) == 3
+ elif op in (Op.REF, Op.DEREF):
+ # data is Expr instance
+ assert isinstance(data, Expr)
+ elif op is Op.RELATIONAL:
+ # data is (<relop>, <left>, <right>)
+ assert isinstance(data, tuple) and len(data) == 3
+ else:
+ raise NotImplementedError(
+ f'unknown op or missing sanity check: {op}')
+
+ self.op = op
+ self.data = data
+
+ def __eq__(self, other):
+ return (isinstance(other, Expr)
+ and self.op is other.op
+ and self.data == other.data)
+
+ def __hash__(self):
+ if self.op in (Op.TERMS, Op.FACTORS):
+ data = tuple(sorted(self.data.items()))
+ elif self.op is Op.APPLY:
+ data = self.data[:2] + tuple(sorted(self.data[2].items()))
+ else:
+ data = self.data
+ return hash((self.op, data))
+
+ def __lt__(self, other):
+ if isinstance(other, Expr):
+ if self.op is not other.op:
+ return self.op.value < other.op.value
+ if self.op in (Op.TERMS, Op.FACTORS):
+ return (tuple(sorted(self.data.items()))
+ < tuple(sorted(other.data.items())))
+ if self.op is Op.APPLY:
+ if self.data[:2] != other.data[:2]:
+ return self.data[:2] < other.data[:2]
+ return tuple(sorted(self.data[2].items())) < tuple(
+ sorted(other.data[2].items()))
+ return self.data < other.data
+ return NotImplemented
+
+ def __le__(self, other): return self == other or self < other
+
+ def __gt__(self, other): return not (self <= other)
+
+ def __ge__(self, other): return not (self < other)
+
+ def __repr__(self):
+ return f'{type(self).__name__}({self.op}, {self.data!r})'
+
+ def __str__(self):
+ return self.tostring()
+
+ def tostring(self, parent_precedence=Precedence.NONE,
+ language=Language.Fortran):
+ """Return a string representation of Expr.
+ """
+ if self.op in (Op.INTEGER, Op.REAL):
+ precedence = (Precedence.SUM if self.data[0] < 0
+ else Precedence.ATOM)
+ r = str(self.data[0]) + (f'_{self.data[1]}'
+ if self.data[1] != 4 else '')
+ elif self.op is Op.COMPLEX:
+ r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
+ for item in self.data)
+ r = '(' + r + ')'
+ precedence = Precedence.ATOM
+ elif self.op is Op.SYMBOL:
+ precedence = Precedence.ATOM
+ r = str(self.data)
+ elif self.op is Op.STRING:
+ r = self.data[0]
+ if self.data[1] != 1:
+ r = self.data[1] + '_' + r
+ precedence = Precedence.ATOM
+ elif self.op is Op.ARRAY:
+ r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
+ for item in self.data)
+ r = '[' + r + ']'
+ precedence = Precedence.ATOM
+ elif self.op is Op.TERMS:
+ terms = []
+ for term, coeff in sorted(self.data.items()):
+ if coeff < 0:
+ op = ' - '
+ coeff = -coeff
+ else:
+ op = ' + '
+ if coeff == 1:
+ term = term.tostring(Precedence.SUM, language=language)
+ else:
+ if term == as_number(1):
+ term = str(coeff)
+ else:
+ term = f'{coeff} * ' + term.tostring(
+ Precedence.PRODUCT, language=language)
+ if terms:
+ terms.append(op)
+ elif op == ' - ':
+ terms.append('-')
+ terms.append(term)
+ r = ''.join(terms) or '0'
+ precedence = Precedence.SUM if terms else Precedence.ATOM
+ elif self.op is Op.FACTORS:
+ factors = []
+ tail = []
+ for base, exp in sorted(self.data.items()):
+ op = ' * '
+ if exp == 1:
+ factor = base.tostring(Precedence.PRODUCT,
+ language=language)
+ elif language is Language.C:
+ if exp in range(2, 10):
+ factor = base.tostring(Precedence.PRODUCT,
+ language=language)
+ factor = ' * '.join([factor] * exp)
+ elif exp in range(-10, 0):
+ factor = base.tostring(Precedence.PRODUCT,
+ language=language)
+ tail += [factor] * -exp
+ continue
+ else:
+ factor = base.tostring(Precedence.TUPLE,
+ language=language)
+ factor = f'pow({factor}, {exp})'
+ else:
+ factor = base.tostring(Precedence.POWER,
+ language=language) + f' ** {exp}'
+ if factors:
+ factors.append(op)
+ factors.append(factor)
+ if tail:
+ if not factors:
+ factors += ['1']
+ factors += ['/', '(', ' * '.join(tail), ')']
+ r = ''.join(factors) or '1'
+ precedence = Precedence.PRODUCT if factors else Precedence.ATOM
+ elif self.op is Op.APPLY:
+ name, args, kwargs = self.data
+ if name is ArithOp.DIV and language is Language.C:
+ numer, denom = [arg.tostring(Precedence.PRODUCT,
+ language=language)
+ for arg in args]
+ r = f'{numer} / {denom}'
+ precedence = Precedence.PRODUCT
+ else:
+ args = [arg.tostring(Precedence.TUPLE, language=language)
+ for arg in args]
+ args += [k + '=' + v.tostring(Precedence.NONE)
+ for k, v in kwargs.items()]
+ r = f'{name}({", ".join(args)})'
+ precedence = Precedence.ATOM
+ elif self.op is Op.INDEXING:
+ name = self.data[0]
+ args = [arg.tostring(Precedence.TUPLE, language=language)
+ for arg in self.data[1:]]
+ r = f'{name}[{", ".join(args)}]'
+ precedence = Precedence.ATOM
+ elif self.op is Op.CONCAT:
+ args = [arg.tostring(Precedence.PRODUCT, language=language)
+ for arg in self.data]
+ r = " // ".join(args)
+ precedence = Precedence.PRODUCT
+ elif self.op is Op.TERNARY:
+ cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
+ language=language)
+ for a in self.data]
+ if language is Language.C:
+ r = f'({cond}?{expr1}:{expr2})'
+ elif language is Language.Python:
+ r = f'({expr1} if {cond} else {expr2})'
+ elif language is Language.Fortran:
+ r = f'merge({expr1}, {expr2}, {cond})'
+ else:
+ raise NotImplementedError(
+ f'tostring for {self.op} and {language}')
+ precedence = Precedence.ATOM
+ elif self.op is Op.REF:
+ r = '&' + self.data.tostring(Precedence.UNARY, language=language)
+ precedence = Precedence.UNARY
+ elif self.op is Op.DEREF:
+ r = '*' + self.data.tostring(Precedence.UNARY, language=language)
+ precedence = Precedence.UNARY
+ elif self.op is Op.RELATIONAL:
+ rop, left, right = self.data
+ precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
+ else Precedence.LT)
+ left = left.tostring(precedence, language=language)
+ right = right.tostring(precedence, language=language)
+ rop = rop.tostring(language=language)
+ r = f'{left} {rop} {right}'
+ else:
+ raise NotImplementedError(f'tostring for op {self.op}')
+ if parent_precedence.value < precedence.value:
+ # If parent precedence is higher than operand precedence,
+ # operand will be enclosed in parenthesis.
+ return '(' + r + ')'
+ return r
+
+ def __pos__(self):
+ return self
+
+ def __neg__(self):
+ return self * -1
+
+ def __add__(self, other):
+ other = as_expr(other)
+ if isinstance(other, Expr):
+ if self.op is other.op:
+ if self.op in (Op.INTEGER, Op.REAL):
+ return as_number(
+ self.data[0] + other.data[0],
+ max(self.data[1], other.data[1]))
+ if self.op is Op.COMPLEX:
+ r1, i1 = self.data
+ r2, i2 = other.data
+ return as_complex(r1 + r2, i1 + i2)
+ if self.op is Op.TERMS:
+ r = Expr(self.op, dict(self.data))
+ for k, v in other.data.items():
+ _pairs_add(r.data, k, v)
+ return normalize(r)
+ if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
+ return self + as_complex(other)
+ elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
+ return as_complex(self) + other
+ elif self.op is Op.REAL and other.op is Op.INTEGER:
+ return self + as_real(other, kind=self.data[1])
+ elif self.op is Op.INTEGER and other.op is Op.REAL:
+ return as_real(self, kind=other.data[1]) + other
+ return as_terms(self) + as_terms(other)
+ return NotImplemented
+
+ def __radd__(self, other):
+ if isinstance(other, number_types):
+ return as_number(other) + self
+ return NotImplemented
+
+ def __sub__(self, other):
+ return self + (-other)
+
+ def __rsub__(self, other):
+ if isinstance(other, number_types):
+ return as_number(other) - self
+ return NotImplemented
+
+ def __mul__(self, other):
+ other = as_expr(other)
+ if isinstance(other, Expr):
+ if self.op is other.op:
+ if self.op in (Op.INTEGER, Op.REAL):
+ return as_number(self.data[0] * other.data[0],
+ max(self.data[1], other.data[1]))
+ elif self.op is Op.COMPLEX:
+ r1, i1 = self.data
+ r2, i2 = other.data
+ return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
+
+ if self.op is Op.FACTORS:
+ r = Expr(self.op, dict(self.data))
+ for k, v in other.data.items():
+ _pairs_add(r.data, k, v)
+ return normalize(r)
+ elif self.op is Op.TERMS:
+ r = Expr(self.op, {})
+ for t1, c1 in self.data.items():
+ for t2, c2 in other.data.items():
+ _pairs_add(r.data, t1 * t2, c1 * c2)
+ return normalize(r)
+
+ if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
+ return self * as_complex(other)
+ elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
+ return as_complex(self) * other
+ elif self.op is Op.REAL and other.op is Op.INTEGER:
+ return self * as_real(other, kind=self.data[1])
+ elif self.op is Op.INTEGER and other.op is Op.REAL:
+ return as_real(self, kind=other.data[1]) * other
+
+ if self.op is Op.TERMS:
+ return self * as_terms(other)
+ elif other.op is Op.TERMS:
+ return as_terms(self) * other
+
+ return as_factors(self) * as_factors(other)
+ return NotImplemented
+
+ def __rmul__(self, other):
+ if isinstance(other, number_types):
+ return as_number(other) * self
+ return NotImplemented
+
+ def __pow__(self, other):
+ other = as_expr(other)
+ if isinstance(other, Expr):
+ if other.op is Op.INTEGER:
+ exponent = other.data[0]
+ # TODO: other kind not used
+ if exponent == 0:
+ return as_number(1)
+ if exponent == 1:
+ return self
+ if exponent > 0:
+ if self.op is Op.FACTORS:
+ r = Expr(self.op, {})
+ for k, v in self.data.items():
+ r.data[k] = v * exponent
+ return normalize(r)
+ return self * (self ** (exponent - 1))
+ elif exponent != -1:
+ return (self ** (-exponent)) ** -1
+ return Expr(Op.FACTORS, {self: exponent})
+ return as_apply(ArithOp.POW, self, other)
+ return NotImplemented
+
+ def __truediv__(self, other):
+ other = as_expr(other)
+ if isinstance(other, Expr):
+ # Fortran / is different from Python /:
+ # - `/` is a truncate operation for integer operands
+ return normalize(as_apply(ArithOp.DIV, self, other))
+ return NotImplemented
+
+ def __rtruediv__(self, other):
+ other = as_expr(other)
+ if isinstance(other, Expr):
+ return other / self
+ return NotImplemented
+
+ def __floordiv__(self, other):
+ other = as_expr(other)
+ if isinstance(other, Expr):
+ # Fortran // is different from Python //:
+ # - `//` is a concatenate operation for string operands
+ return normalize(Expr(Op.CONCAT, (self, other)))
+ return NotImplemented
+
+ def __rfloordiv__(self, other):
+ other = as_expr(other)
+ if isinstance(other, Expr):
+ return other // self
+ return NotImplemented
+
+ def __call__(self, *args, **kwargs):
+ # In Fortran, parenthesis () are use for both function call as
+ # well as indexing operations.
+ #
+ # TODO: implement a method for deciding when __call__ should
+ # return an INDEXING expression.
+ return as_apply(self, *map(as_expr, args),
+ **dict((k, as_expr(v)) for k, v in kwargs.items()))
+
+ def __getitem__(self, index):
+ # Provided to support C indexing operations that .pyf files
+ # may contain.
+ index = as_expr(index)
+ if not isinstance(index, tuple):
+ index = index,
+ if len(index) > 1:
+ ewarn(f'C-index should be a single expression but got `{index}`')
+ return Expr(Op.INDEXING, (self,) + index)
+
+ def substitute(self, symbols_map):
+ """Recursively substitute symbols with values in symbols map.
+
+ Symbols map is a dictionary of symbol-expression pairs.
+ """
+ if self.op is Op.SYMBOL:
+ value = symbols_map.get(self)
+ if value is None:
+ return self
+ m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
+ if m:
+ # complement to fromstring method
+ items, paren = m.groups()
+ if paren in ['ROUNDDIV', 'SQUARE']:
+ return as_array(value)
+ assert paren == 'ROUND', (paren, value)
+ return value
+ if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
+ return self
+ if self.op in (Op.ARRAY, Op.COMPLEX):
+ return Expr(self.op, tuple(item.substitute(symbols_map)
+ for item in self.data))
+ if self.op is Op.CONCAT:
+ return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
+ for item in self.data)))
+ if self.op is Op.TERMS:
+ r = None
+ for term, coeff in self.data.items():
+ if r is None:
+ r = term.substitute(symbols_map) * coeff
+ else:
+ r += term.substitute(symbols_map) * coeff
+ if r is None:
+ ewarn('substitute: empty TERMS expression interpreted as'
+ ' int-literal 0')
+ return as_number(0)
+ return r
+ if self.op is Op.FACTORS:
+ r = None
+ for base, exponent in self.data.items():
+ if r is None:
+ r = base.substitute(symbols_map) ** exponent
+ else:
+ r *= base.substitute(symbols_map) ** exponent
+ if r is None:
+ ewarn('substitute: empty FACTORS expression interpreted'
+ ' as int-literal 1')
+ return as_number(1)
+ return r
+ if self.op is Op.APPLY:
+ target, args, kwargs = self.data
+ if isinstance(target, Expr):
+ target = target.substitute(symbols_map)
+ args = tuple(a.substitute(symbols_map) for a in args)
+ kwargs = dict((k, v.substitute(symbols_map))
+ for k, v in kwargs.items())
+ return normalize(Expr(self.op, (target, args, kwargs)))
+ if self.op is Op.INDEXING:
+ func = self.data[0]
+ if isinstance(func, Expr):
+ func = func.substitute(symbols_map)
+ args = tuple(a.substitute(symbols_map) for a in self.data[1:])
+ return normalize(Expr(self.op, (func,) + args))
+ if self.op is Op.TERNARY:
+ operands = tuple(a.substitute(symbols_map) for a in self.data)
+ return normalize(Expr(self.op, operands))
+ if self.op in (Op.REF, Op.DEREF):
+ return normalize(Expr(self.op, self.data.substitute(symbols_map)))
+ if self.op is Op.RELATIONAL:
+ rop, left, right = self.data
+ left = left.substitute(symbols_map)
+ right = right.substitute(symbols_map)
+ return normalize(Expr(self.op, (rop, left, right)))
+ raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
+
+ def traverse(self, visit, *args, **kwargs):
+ """Traverse expression tree with visit function.
+
+ The visit function is applied to an expression with given args
+ and kwargs.
+
+ Traverse call returns an expression returned by visit when not
+ None, otherwise return a new normalized expression with
+ traverse-visit sub-expressions.
+ """
+ result = visit(self, *args, **kwargs)
+ if result is not None:
+ return result
+
+ if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
+ return self
+ elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
+ return normalize(Expr(self.op, tuple(
+ item.traverse(visit, *args, **kwargs)
+ for item in self.data)))
+ elif self.op in (Op.TERMS, Op.FACTORS):
+ data = {}
+ for k, v in self.data.items():
+ k = k.traverse(visit, *args, **kwargs)
+ v = (v.traverse(visit, *args, **kwargs)
+ if isinstance(v, Expr) else v)
+ if k in data:
+ v = data[k] + v
+ data[k] = v
+ return normalize(Expr(self.op, data))
+ elif self.op is Op.APPLY:
+ obj = self.data[0]
+ func = (obj.traverse(visit, *args, **kwargs)
+ if isinstance(obj, Expr) else obj)
+ operands = tuple(operand.traverse(visit, *args, **kwargs)
+ for operand in self.data[1])
+ kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
+ for k, v in self.data[2].items())
+ return normalize(Expr(self.op, (func, operands, kwoperands)))
+ elif self.op is Op.INDEXING:
+ obj = self.data[0]
+ obj = (obj.traverse(visit, *args, **kwargs)
+ if isinstance(obj, Expr) else obj)
+ indices = tuple(index.traverse(visit, *args, **kwargs)
+ for index in self.data[1:])
+ return normalize(Expr(self.op, (obj,) + indices))
+ elif self.op in (Op.REF, Op.DEREF):
+ return normalize(Expr(self.op,
+ self.data.traverse(visit, *args, **kwargs)))
+ elif self.op is Op.RELATIONAL:
+ rop, left, right = self.data
+ left = left.traverse(visit, *args, **kwargs)
+ right = right.traverse(visit, *args, **kwargs)
+ return normalize(Expr(self.op, (rop, left, right)))
+ raise NotImplementedError(f'traverse method for {self.op}')
+
+ def contains(self, other):
+ """Check if self contains other.
+ """
+ found = []
+
+ def visit(expr, found=found):
+ if found:
+ return expr
+ elif expr == other:
+ found.append(1)
+ return expr
+
+ self.traverse(visit)
+
+ return len(found) != 0
+
+ def symbols(self):
+ """Return a set of symbols contained in self.
+ """
+ found = set()
+
+ def visit(expr, found=found):
+ if expr.op is Op.SYMBOL:
+ found.add(expr)
+
+ self.traverse(visit)
+
+ return found
+
+ def polynomial_atoms(self):
+ """Return a set of expressions used as atoms in polynomial self.
+ """
+ found = set()
+
+ def visit(expr, found=found):
+ if expr.op is Op.FACTORS:
+ for b in expr.data:
+ b.traverse(visit)
+ return expr
+ if expr.op in (Op.TERMS, Op.COMPLEX):
+ return
+ if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
+ if expr.data[0] is ArithOp.POW:
+ expr.data[1][0].traverse(visit)
+ return expr
+ return
+ if expr.op in (Op.INTEGER, Op.REAL):
+ return expr
+
+ found.add(expr)
+
+ if expr.op in (Op.INDEXING, Op.APPLY):
+ return expr
+
+ self.traverse(visit)
+
+ return found
+
+ def linear_solve(self, symbol):
+ """Return a, b such that a * symbol + b == self.
+
+ If self is not linear with respect to symbol, raise RuntimeError.
+ """
+ b = self.substitute({symbol: as_number(0)})
+ ax = self - b
+ a = ax.substitute({symbol: as_number(1)})
+
+ zero, _ = as_numer_denom(a * symbol - ax)
+
+ if zero != as_number(0):
+ raise RuntimeError(f'not a {symbol}-linear equation:'
+ f' {a} * {symbol} + {b} == {self}')
+ return a, b
+
+
+def normalize(obj):
+ """Normalize Expr and apply basic evaluation methods.
+ """
+ if not isinstance(obj, Expr):
+ return obj
+
+ if obj.op is Op.TERMS:
+ d = {}
+ for t, c in obj.data.items():
+ if c == 0:
+ continue
+ if t.op is Op.COMPLEX and c != 1:
+ t = t * c
+ c = 1
+ if t.op is Op.TERMS:
+ for t1, c1 in t.data.items():
+ _pairs_add(d, t1, c1 * c)
+ else:
+ _pairs_add(d, t, c)
+ if len(d) == 0:
+ # TODO: deterimine correct kind
+ return as_number(0)
+ elif len(d) == 1:
+ (t, c), = d.items()
+ if c == 1:
+ return t
+ return Expr(Op.TERMS, d)
+
+ if obj.op is Op.FACTORS:
+ coeff = 1
+ d = {}
+ for b, e in obj.data.items():
+ if e == 0:
+ continue
+ if b.op is Op.TERMS and isinstance(e, integer_types) and e > 1:
+ # expand integer powers of sums
+ b = b * (b ** (e - 1))
+ e = 1
+
+ if b.op in (Op.INTEGER, Op.REAL):
+ if e == 1:
+ coeff *= b.data[0]
+ elif e > 0:
+ coeff *= b.data[0] ** e
+ else:
+ _pairs_add(d, b, e)
+ elif b.op is Op.FACTORS:
+ if e > 0 and isinstance(e, integer_types):
+ for b1, e1 in b.data.items():
+ _pairs_add(d, b1, e1 * e)
+ else:
+ _pairs_add(d, b, e)
+ else:
+ _pairs_add(d, b, e)
+ if len(d) == 0 or coeff == 0:
+ # TODO: deterimine correct kind
+ assert isinstance(coeff, number_types)
+ return as_number(coeff)
+ elif len(d) == 1:
+ (b, e), = d.items()
+ if e == 1:
+ t = b
+ else:
+ t = Expr(Op.FACTORS, d)
+ if coeff == 1:
+ return t
+ return Expr(Op.TERMS, {t: coeff})
+ elif coeff == 1:
+ return Expr(Op.FACTORS, d)
+ else:
+ return Expr(Op.TERMS, {Expr(Op.FACTORS, d): coeff})
+
+ if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV:
+ dividend, divisor = obj.data[1]
+ t1, c1 = as_term_coeff(dividend)
+ t2, c2 = as_term_coeff(divisor)
+ if isinstance(c1, integer_types) and isinstance(c2, integer_types):
+ g = gcd(c1, c2)
+ c1, c2 = c1//g, c2//g
+ else:
+ c1, c2 = c1/c2, 1
+
+ if t1.op is Op.APPLY and t1.data[0] is ArithOp.DIV:
+ numer = t1.data[1][0] * c1
+ denom = t1.data[1][1] * t2 * c2
+ return as_apply(ArithOp.DIV, numer, denom)
+
+ if t2.op is Op.APPLY and t2.data[0] is ArithOp.DIV:
+ numer = t2.data[1][1] * t1 * c1
+ denom = t2.data[1][0] * c2
+ return as_apply(ArithOp.DIV, numer, denom)
+
+ d = dict(as_factors(t1).data)
+ for b, e in as_factors(t2).data.items():
+ _pairs_add(d, b, -e)
+ numer, denom = {}, {}
+ for b, e in d.items():
+ if e > 0:
+ numer[b] = e
+ else:
+ denom[b] = -e
+ numer = normalize(Expr(Op.FACTORS, numer)) * c1
+ denom = normalize(Expr(Op.FACTORS, denom)) * c2
+
+ if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] == 1:
+ # TODO: denom kind not used
+ return numer
+ return as_apply(ArithOp.DIV, numer, denom)
+
+ if obj.op is Op.CONCAT:
+ lst = [obj.data[0]]
+ for s in obj.data[1:]:
+ last = lst[-1]
+ if (
+ last.op is Op.STRING
+ and s.op is Op.STRING
+ and last.data[0][0] in '"\''
+ and s.data[0][0] == last.data[0][-1]
+ ):
+ new_last = as_string(last.data[0][:-1] + s.data[0][1:],
+ max(last.data[1], s.data[1]))
+ lst[-1] = new_last
+ else:
+ lst.append(s)
+ if len(lst) == 1:
+ return lst[0]
+ return Expr(Op.CONCAT, tuple(lst))
+
+ if obj.op is Op.TERNARY:
+ cond, expr1, expr2 = map(normalize, obj.data)
+ if cond.op is Op.INTEGER:
+ return expr1 if cond.data[0] else expr2
+ return Expr(Op.TERNARY, (cond, expr1, expr2))
+
+ return obj
+
+
+def as_expr(obj):
+ """Convert non-Expr objects to Expr objects.
+ """
+ if isinstance(obj, complex):
+ return as_complex(obj.real, obj.imag)
+ if isinstance(obj, number_types):
+ return as_number(obj)
+ if isinstance(obj, str):
+ # STRING expression holds string with boundary quotes, hence
+ # applying repr:
+ return as_string(repr(obj))
+ if isinstance(obj, tuple):
+ return tuple(map(as_expr, obj))
+ return obj
+
+
+def as_symbol(obj):
+ """Return object as SYMBOL expression (variable or unparsed expression).
+ """
+ return Expr(Op.SYMBOL, obj)
+
+
+def as_number(obj, kind=4):
+ """Return object as INTEGER or REAL constant.
+ """
+ if isinstance(obj, int):
+ return Expr(Op.INTEGER, (obj, kind))
+ if isinstance(obj, float):
+ return Expr(Op.REAL, (obj, kind))
+ if isinstance(obj, Expr):
+ if obj.op in (Op.INTEGER, Op.REAL):
+ return obj
+ raise OpError(f'cannot convert {obj} to INTEGER or REAL constant')
+
+
+def as_integer(obj, kind=4):
+ """Return object as INTEGER constant.
+ """
+ if isinstance(obj, int):
+ return Expr(Op.INTEGER, (obj, kind))
+ if isinstance(obj, Expr):
+ if obj.op is Op.INTEGER:
+ return obj
+ raise OpError(f'cannot convert {obj} to INTEGER constant')
+
+
+def as_real(obj, kind=4):
+ """Return object as REAL constant.
+ """
+ if isinstance(obj, int):
+ return Expr(Op.REAL, (float(obj), kind))
+ if isinstance(obj, float):
+ return Expr(Op.REAL, (obj, kind))
+ if isinstance(obj, Expr):
+ if obj.op is Op.REAL:
+ return obj
+ elif obj.op is Op.INTEGER:
+ return Expr(Op.REAL, (float(obj.data[0]), kind))
+ raise OpError(f'cannot convert {obj} to REAL constant')
+
+
+def as_string(obj, kind=1):
+ """Return object as STRING expression (string literal constant).
+ """
+ return Expr(Op.STRING, (obj, kind))
+
+
+def as_array(obj):
+ """Return object as ARRAY expression (array constant).
+ """
+ if isinstance(obj, Expr):
+ obj = obj,
+ return Expr(Op.ARRAY, obj)
+
+
+def as_complex(real, imag=0):
+ """Return object as COMPLEX expression (complex literal constant).
+ """
+ return Expr(Op.COMPLEX, (as_expr(real), as_expr(imag)))
+
+
+def as_apply(func, *args, **kwargs):
+ """Return object as APPLY expression (function call, constructor, etc.)
+ """
+ return Expr(Op.APPLY,
+ (func, tuple(map(as_expr, args)),
+ dict((k, as_expr(v)) for k, v in kwargs.items())))
+
+
+def as_ternary(cond, expr1, expr2):
+ """Return object as TERNARY expression (cond?expr1:expr2).
+ """
+ return Expr(Op.TERNARY, (cond, expr1, expr2))
+
+
+def as_ref(expr):
+ """Return object as referencing expression.
+ """
+ return Expr(Op.REF, expr)
+
+
+def as_deref(expr):
+ """Return object as dereferencing expression.
+ """
+ return Expr(Op.DEREF, expr)
+
+
+def as_eq(left, right):
+ return Expr(Op.RELATIONAL, (RelOp.EQ, left, right))
+
+
+def as_ne(left, right):
+ return Expr(Op.RELATIONAL, (RelOp.NE, left, right))
+
+
+def as_lt(left, right):
+ return Expr(Op.RELATIONAL, (RelOp.LT, left, right))
+
+
+def as_le(left, right):
+ return Expr(Op.RELATIONAL, (RelOp.LE, left, right))
+
+
+def as_gt(left, right):
+ return Expr(Op.RELATIONAL, (RelOp.GT, left, right))
+
+
+def as_ge(left, right):
+ return Expr(Op.RELATIONAL, (RelOp.GE, left, right))
+
+
+def as_terms(obj):
+ """Return expression as TERMS expression.
+ """
+ if isinstance(obj, Expr):
+ obj = normalize(obj)
+ if obj.op is Op.TERMS:
+ return obj
+ if obj.op is Op.INTEGER:
+ return Expr(Op.TERMS, {as_integer(1, obj.data[1]): obj.data[0]})
+ if obj.op is Op.REAL:
+ return Expr(Op.TERMS, {as_real(1, obj.data[1]): obj.data[0]})
+ return Expr(Op.TERMS, {obj: 1})
+ raise OpError(f'cannot convert {type(obj)} to terms Expr')
+
+
+def as_factors(obj):
+ """Return expression as FACTORS expression.
+ """
+ if isinstance(obj, Expr):
+ obj = normalize(obj)
+ if obj.op is Op.FACTORS:
+ return obj
+ if obj.op is Op.TERMS:
+ if len(obj.data) == 1:
+ (term, coeff), = obj.data.items()
+ if coeff == 1:
+ return Expr(Op.FACTORS, {term: 1})
+ return Expr(Op.FACTORS, {term: 1, Expr.number(coeff): 1})
+ if ((obj.op is Op.APPLY
+ and obj.data[0] is ArithOp.DIV
+ and not obj.data[2])):
+ return Expr(Op.FACTORS, {obj.data[1][0]: 1, obj.data[1][1]: -1})
+ return Expr(Op.FACTORS, {obj: 1})
+ raise OpError(f'cannot convert {type(obj)} to terms Expr')
+
+
+def as_term_coeff(obj):
+ """Return expression as term-coefficient pair.
+ """
+ if isinstance(obj, Expr):
+ obj = normalize(obj)
+ if obj.op is Op.INTEGER:
+ return as_integer(1, obj.data[1]), obj.data[0]
+ if obj.op is Op.REAL:
+ return as_real(1, obj.data[1]), obj.data[0]
+ if obj.op is Op.TERMS:
+ if len(obj.data) == 1:
+ (term, coeff), = obj.data.items()
+ return term, coeff
+ # TODO: find common divisor of coefficients
+ if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV:
+ t, c = as_term_coeff(obj.data[1][0])
+ return as_apply(ArithOp.DIV, t, obj.data[1][1]), c
+ return obj, 1
+ raise OpError(f'cannot convert {type(obj)} to term and coeff')
+
+
+def as_numer_denom(obj):
+ """Return expression as numer-denom pair.
+ """
+ if isinstance(obj, Expr):
+ obj = normalize(obj)
+ if obj.op in (Op.INTEGER, Op.REAL, Op.COMPLEX, Op.SYMBOL,
+ Op.INDEXING, Op.TERNARY):
+ return obj, as_number(1)
+ elif obj.op is Op.APPLY:
+ if obj.data[0] is ArithOp.DIV and not obj.data[2]:
+ numers, denoms = map(as_numer_denom, obj.data[1])
+ return numers[0] * denoms[1], numers[1] * denoms[0]
+ return obj, as_number(1)
+ elif obj.op is Op.TERMS:
+ numers, denoms = [], []
+ for term, coeff in obj.data.items():
+ n, d = as_numer_denom(term)
+ n = n * coeff
+ numers.append(n)
+ denoms.append(d)
+ numer, denom = as_number(0), as_number(1)
+ for i in range(len(numers)):
+ n = numers[i]
+ for j in range(len(numers)):
+ if i != j:
+ n *= denoms[j]
+ numer += n
+ denom *= denoms[i]
+ if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] < 0:
+ numer, denom = -numer, -denom
+ return numer, denom
+ elif obj.op is Op.FACTORS:
+ numer, denom = as_number(1), as_number(1)
+ for b, e in obj.data.items():
+ bnumer, bdenom = as_numer_denom(b)
+ if e > 0:
+ numer *= bnumer ** e
+ denom *= bdenom ** e
+ elif e < 0:
+ numer *= bdenom ** (-e)
+ denom *= bnumer ** (-e)
+ return numer, denom
+ raise OpError(f'cannot convert {type(obj)} to numer and denom')
+
+
+def _counter():
+ # Used internally to generate unique dummy symbols
+ counter = 0
+ while True:
+ counter += 1
+ yield counter
+
+
+COUNTER = _counter()
+
+
+def eliminate_quotes(s):
+ """Replace quoted substrings of input string.
+
+ Return a new string and a mapping of replacements.
+ """
+ d = {}
+
+ def repl(m):
+ kind, value = m.groups()[:2]
+ if kind:
+ # remove trailing underscore
+ kind = kind[:-1]
+ p = {"'": "SINGLE", '"': "DOUBLE"}[value[0]]
+ k = f'{kind}@__f2py_QUOTES_{p}_{COUNTER.__next__()}@'
+ d[k] = value
+ return k
+
+ new_s = re.sub(r'({kind}_|)({single_quoted}|{double_quoted})'.format(
+ kind=r'\w[\w\d_]*',
+ single_quoted=r"('([^'\\]|(\\.))*')",
+ double_quoted=r'("([^"\\]|(\\.))*")'),
+ repl, s)
+
+ assert '"' not in new_s
+ assert "'" not in new_s
+
+ return new_s, d
+
+
+def insert_quotes(s, d):
+ """Inverse of eliminate_quotes.
+ """
+ for k, v in d.items():
+ kind = k[:k.find('@')]
+ if kind:
+ kind += '_'
+ s = s.replace(k, kind + v)
+ return s
+
+
+def replace_parenthesis(s):
+ """Replace substrings of input that are enclosed in parenthesis.
+
+ Return a new string and a mapping of replacements.
+ """
+ # Find a parenthesis pair that appears first.
+
+ # Fortran deliminator are `(`, `)`, `[`, `]`, `(/', '/)`, `/`.
+ # We don't handle `/` deliminator because it is not a part of an
+ # expression.
+ left, right = None, None
+ mn_i = len(s)
+ for left_, right_ in (('(/', '/)'),
+ '()',
+ '{}', # to support C literal structs
+ '[]'):
+ i = s.find(left_)
+ if i == -1:
+ continue
+ if i < mn_i:
+ mn_i = i
+ left, right = left_, right_
+
+ if left is None:
+ return s, {}
+
+ i = mn_i
+ j = s.find(right, i)
+
+ while s.count(left, i + 1, j) != s.count(right, i + 1, j):
+ j = s.find(right, j + 1)
+ if j == -1:
+ raise ValueError(f'Mismatch of {left+right} parenthesis in {s!r}')
+
+ p = {'(': 'ROUND', '[': 'SQUARE', '{': 'CURLY', '(/': 'ROUNDDIV'}[left]
+
+ k = f'@__f2py_PARENTHESIS_{p}_{COUNTER.__next__()}@'
+ v = s[i+len(left):j]
+ r, d = replace_parenthesis(s[j+len(right):])
+ d[k] = v
+ return s[:i] + k + r, d
+
+
+def _get_parenthesis_kind(s):
+ assert s.startswith('@__f2py_PARENTHESIS_'), s
+ return s.split('_')[4]
+
+
+def unreplace_parenthesis(s, d):
+ """Inverse of replace_parenthesis.
+ """
+ for k, v in d.items():
+ p = _get_parenthesis_kind(k)
+ left = dict(ROUND='(', SQUARE='[', CURLY='{', ROUNDDIV='(/')[p]
+ right = dict(ROUND=')', SQUARE=']', CURLY='}', ROUNDDIV='/)')[p]
+ s = s.replace(k, left + v + right)
+ return s
+
+
+def fromstring(s, language=Language.C):
+ """Create an expression from a string.
+
+ This is a "lazy" parser, that is, only arithmetic operations are
+ resolved, non-arithmetic operations are treated as symbols.
+ """
+ r = _FromStringWorker(language=language).parse(s)
+ if isinstance(r, Expr):
+ return r
+ raise ValueError(f'failed to parse `{s}` to Expr instance: got `{r}`')
+
+
+class _Pair:
+ # Internal class to represent a pair of expressions
+
+ def __init__(self, left, right):
+ self.left = left
+ self.right = right
+
+ def substitute(self, symbols_map):
+ left, right = self.left, self.right
+ if isinstance(left, Expr):
+ left = left.substitute(symbols_map)
+ if isinstance(right, Expr):
+ right = right.substitute(symbols_map)
+ return _Pair(left, right)
+
+ def __repr__(self):
+ return f'{type(self).__name__}({self.left}, {self.right})'
+
+
+class _FromStringWorker:
+
+ def __init__(self, language=Language.C):
+ self.original = None
+ self.quotes_map = None
+ self.language = language
+
+ def finalize_string(self, s):
+ return insert_quotes(s, self.quotes_map)
+
+ def parse(self, inp):
+ self.original = inp
+ unquoted, self.quotes_map = eliminate_quotes(inp)
+ return self.process(unquoted)
+
+ def process(self, s, context='expr'):
+ """Parse string within the given context.
+
+ The context may define the result in case of ambiguous
+ expressions. For instance, consider expressions `f(x, y)` and
+ `(x, y) + (a, b)` where `f` is a function and pair `(x, y)`
+ denotes complex number. Specifying context as "args" or
+ "expr", the subexpression `(x, y)` will be parse to an
+ argument list or to a complex number, respectively.
+ """
+ if isinstance(s, (list, tuple)):
+ return type(s)(self.process(s_, context) for s_ in s)
+
+ assert isinstance(s, str), (type(s), s)
+
+ # replace subexpressions in parenthesis with f2py @-names
+ r, raw_symbols_map = replace_parenthesis(s)
+ r = r.strip()
+
+ def restore(r):
+ # restores subexpressions marked with f2py @-names
+ if isinstance(r, (list, tuple)):
+ return type(r)(map(restore, r))
+ return unreplace_parenthesis(r, raw_symbols_map)
+
+ # comma-separated tuple
+ if ',' in r:
+ operands = restore(r.split(','))
+ if context == 'args':
+ return tuple(self.process(operands))
+ if context == 'expr':
+ if len(operands) == 2:
+ # complex number literal
+ return as_complex(*self.process(operands))
+ raise NotImplementedError(
+ f'parsing comma-separated list (context={context}): {r}')
+
+ # ternary operation
+ m = re.match(r'\A([^?]+)[?]([^:]+)[:](.+)\Z', r)
+ if m:
+ assert context == 'expr', context
+ oper, expr1, expr2 = restore(m.groups())
+ oper = self.process(oper)
+ expr1 = self.process(expr1)
+ expr2 = self.process(expr2)
+ return as_ternary(oper, expr1, expr2)
+
+ # relational expression
+ if self.language is Language.Fortran:
+ m = re.match(
+ r'\A(.+)\s*[.](eq|ne|lt|le|gt|ge)[.]\s*(.+)\Z', r, re.I)
+ else:
+ m = re.match(
+ r'\A(.+)\s*([=][=]|[!][=]|[<][=]|[<]|[>][=]|[>])\s*(.+)\Z', r)
+ if m:
+ left, rop, right = m.groups()
+ if self.language is Language.Fortran:
+ rop = '.' + rop + '.'
+ left, right = self.process(restore((left, right)))
+ rop = RelOp.fromstring(rop, language=self.language)
+ return Expr(Op.RELATIONAL, (rop, left, right))
+
+ # keyword argument
+ m = re.match(r'\A(\w[\w\d_]*)\s*[=](.*)\Z', r)
+ if m:
+ keyname, value = m.groups()
+ value = restore(value)
+ return _Pair(keyname, self.process(value))
+
+ # addition/subtraction operations
+ operands = re.split(r'((?<!\d[edED])[+-])', r)
+ if len(operands) > 1:
+ result = self.process(restore(operands[0] or '0'))
+ for op, operand in zip(operands[1::2], operands[2::2]):
+ operand = self.process(restore(operand))
+ op = op.strip()
+ if op == '+':
+ result += operand
+ else:
+ assert op == '-'
+ result -= operand
+ return result
+
+ # string concatenate operation
+ if self.language is Language.Fortran and '//' in r:
+ operands = restore(r.split('//'))
+ return Expr(Op.CONCAT,
+ tuple(self.process(operands)))
+
+ # multiplication/division operations
+ operands = re.split(r'(?<=[@\w\d_])\s*([*]|/)',
+ (r if self.language is Language.C
+ else r.replace('**', '@__f2py_DOUBLE_STAR@')))
+ if len(operands) > 1:
+ operands = restore(operands)
+ if self.language is not Language.C:
+ operands = [operand.replace('@__f2py_DOUBLE_STAR@', '**')
+ for operand in operands]
+ # Expression is an arithmetic product
+ result = self.process(operands[0])
+ for op, operand in zip(operands[1::2], operands[2::2]):
+ operand = self.process(operand)
+ op = op.strip()
+ if op == '*':
+ result *= operand
+ else:
+ assert op == '/'
+ result /= operand
+ return result
+
+ # referencing/dereferencing
+ if r.startswith('*') or r.startswith('&'):
+ op = {'*': Op.DEREF, '&': Op.REF}[r[0]]
+ operand = self.process(restore(r[1:]))
+ return Expr(op, operand)
+
+ # exponentiation operations
+ if self.language is not Language.C and '**' in r:
+ operands = list(reversed(restore(r.split('**'))))
+ result = self.process(operands[0])
+ for operand in operands[1:]:
+ operand = self.process(operand)
+ result = operand ** result
+ return result
+
+ # int-literal-constant
+ m = re.match(r'\A({digit_string})({kind}|)\Z'.format(
+ digit_string=r'\d+',
+ kind=r'_(\d+|\w[\w\d_]*)'), r)
+ if m:
+ value, _, kind = m.groups()
+ if kind and kind.isdigit():
+ kind = int(kind)
+ return as_integer(int(value), kind or 4)
+
+ # real-literal-constant
+ m = re.match(r'\A({significant}({exponent}|)|\d+{exponent})({kind}|)\Z'
+ .format(
+ significant=r'[.]\d+|\d+[.]\d*',
+ exponent=r'[edED][+-]?\d+',
+ kind=r'_(\d+|\w[\w\d_]*)'), r)
+ if m:
+ value, _, _, kind = m.groups()
+ if kind and kind.isdigit():
+ kind = int(kind)
+ value = value.lower()
+ if 'd' in value:
+ return as_real(float(value.replace('d', 'e')), kind or 8)
+ return as_real(float(value), kind or 4)
+
+ # string-literal-constant with kind parameter specification
+ if r in self.quotes_map:
+ kind = r[:r.find('@')]
+ return as_string(self.quotes_map[r], kind or 1)
+
+ # array constructor or literal complex constant or
+ # parenthesized expression
+ if r in raw_symbols_map:
+ paren = _get_parenthesis_kind(r)
+ items = self.process(restore(raw_symbols_map[r]),
+ 'expr' if paren == 'ROUND' else 'args')
+ if paren == 'ROUND':
+ if isinstance(items, Expr):
+ return items
+ if paren in ['ROUNDDIV', 'SQUARE']:
+ # Expression is a array constructor
+ if isinstance(items, Expr):
+ items = (items,)
+ return as_array(items)
+
+ # function call/indexing
+ m = re.match(r'\A(.+)\s*(@__f2py_PARENTHESIS_(ROUND|SQUARE)_\d+@)\Z',
+ r)
+ if m:
+ target, args, paren = m.groups()
+ target = self.process(restore(target))
+ args = self.process(restore(args)[1:-1], 'args')
+ if not isinstance(args, tuple):
+ args = args,
+ if paren == 'ROUND':
+ kwargs = dict((a.left, a.right) for a in args
+ if isinstance(a, _Pair))
+ args = tuple(a for a in args if not isinstance(a, _Pair))
+ # Warning: this could also be Fortran indexing operation..
+ return as_apply(target, *args, **kwargs)
+ else:
+ # Expression is a C/Python indexing operation
+ # (e.g. used in .pyf files)
+ assert paren == 'SQUARE'
+ return target[args]
+
+ # Fortran standard conforming identifier
+ m = re.match(r'\A\w[\w\d_]*\Z', r)
+ if m:
+ return as_symbol(r)
+
+ # fall-back to symbol
+ r = self.finalize_string(restore(r))
+ ewarn(
+ f'fromstring: treating {r!r} as symbol (original={self.original})')
+ return as_symbol(r)
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f90
new file mode 100644
index 00000000..76d16aae
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/foo.f90
@@ -0,0 +1,34 @@
+module ops_module
+
+ abstract interface
+ subroutine op(x, y, z)
+ integer, intent(in) :: x, y
+ integer, intent(out) :: z
+ end subroutine
+ end interface
+
+contains
+
+ subroutine foo(x, y, r1, r2)
+ integer, intent(in) :: x, y
+ integer, intent(out) :: r1, r2
+ procedure (op) add1, add2
+ procedure (op), pointer::p
+ p=>add1
+ call p(x, y, r1)
+ p=>add2
+ call p(x, y, r2)
+ end subroutine
+end module
+
+subroutine add1(x, y, z)
+ integer, intent(in) :: x, y
+ integer, intent(out) :: z
+ z = x + y
+end subroutine
+
+subroutine add2(x, y, z)
+ integer, intent(in) :: x, y
+ integer, intent(out) :: z
+ z = x + 2 * y
+end subroutine
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90
new file mode 100644
index 00000000..36791e46
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90
@@ -0,0 +1,6 @@
+module test
+ abstract interface
+ subroutine foo()
+ end subroutine
+ end interface
+end module test
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
new file mode 100644
index 00000000..9a8b4a75
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
@@ -0,0 +1,230 @@
+/*
+ * This file was auto-generated with f2py (version:2_1330) and hand edited by
+ * Pearu for testing purposes. Do not edit this file unless you know what you
+ * are doing!!!
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*********************** See f2py2e/cfuncs.py: includes ***********************/
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+#include "fortranobject.h"
+#include <math.h>
+
+static PyObject *wrap_error;
+static PyObject *wrap_module;
+
+/************************************ call ************************************/
+static char doc_f2py_rout_wrap_call[] = "\
+Function signature:\n\
+ arr = call(type_num,dims,intent,obj)\n\
+Required arguments:\n"
+" type_num : input int\n"
+" dims : input int-sequence\n"
+" intent : input int\n"
+" obj : input python object\n"
+"Return objects:\n"
+" arr : array";
+static PyObject *f2py_rout_wrap_call(PyObject *capi_self,
+ PyObject *capi_args) {
+ PyObject * volatile capi_buildvalue = NULL;
+ int type_num = 0;
+ int elsize = 0;
+ npy_intp *dims = NULL;
+ PyObject *dims_capi = Py_None;
+ int rank = 0;
+ int intent = 0;
+ PyArrayObject *capi_arr_tmp = NULL;
+ PyObject *arr_capi = Py_None;
+ int i;
+
+ if (!PyArg_ParseTuple(capi_args,"iiOiO|:wrap.call",\
+ &type_num,&elsize,&dims_capi,&intent,&arr_capi))
+ return NULL;
+ rank = PySequence_Length(dims_capi);
+ dims = malloc(rank*sizeof(npy_intp));
+ for (i=0;i<rank;++i) {
+ PyObject *tmp;
+ tmp = PySequence_GetItem(dims_capi, i);
+ if (tmp == NULL) {
+ goto fail;
+ }
+ dims[i] = (npy_intp)PyLong_AsLong(tmp);
+ Py_DECREF(tmp);
+ if (dims[i] == -1 && PyErr_Occurred()) {
+ goto fail;
+ }
+ }
+ capi_arr_tmp = ndarray_from_pyobj(type_num,elsize,dims,rank,intent|F2PY_INTENT_OUT,arr_capi,"wrap.call failed");
+ if (capi_arr_tmp == NULL) {
+ free(dims);
+ return NULL;
+ }
+ capi_buildvalue = Py_BuildValue("N",capi_arr_tmp);
+ free(dims);
+ return capi_buildvalue;
+
+fail:
+ free(dims);
+ return NULL;
+}
+
+static char doc_f2py_rout_wrap_attrs[] = "\
+Function signature:\n\
+ arr = array_attrs(arr)\n\
+Required arguments:\n"
+" arr : input array object\n"
+"Return objects:\n"
+" data : data address in hex\n"
+" nd : int\n"
+" dimensions : tuple\n"
+" strides : tuple\n"
+" base : python object\n"
+" (kind,type,type_num,elsize,alignment) : 4-tuple\n"
+" flags : int\n"
+" itemsize : int\n"
+;
+static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self,
+ PyObject *capi_args) {
+ PyObject *arr_capi = Py_None;
+ PyArrayObject *arr = NULL;
+ PyObject *dimensions = NULL;
+ PyObject *strides = NULL;
+ char s[100];
+ int i;
+ memset(s,0,100);
+ if (!PyArg_ParseTuple(capi_args,"O!|:wrap.attrs",
+ &PyArray_Type,&arr_capi))
+ return NULL;
+ arr = (PyArrayObject *)arr_capi;
+ sprintf(s,"%p",PyArray_DATA(arr));
+ dimensions = PyTuple_New(PyArray_NDIM(arr));
+ strides = PyTuple_New(PyArray_NDIM(arr));
+ for (i=0;i<PyArray_NDIM(arr);++i) {
+ PyTuple_SetItem(dimensions,i,PyLong_FromLong(PyArray_DIM(arr,i)));
+ PyTuple_SetItem(strides,i,PyLong_FromLong(PyArray_STRIDE(arr,i)));
+ }
+ return Py_BuildValue("siNNO(cciii)ii",s,PyArray_NDIM(arr),
+ dimensions,strides,
+ (PyArray_BASE(arr)==NULL?Py_None:PyArray_BASE(arr)),
+ PyArray_DESCR(arr)->kind,
+ PyArray_DESCR(arr)->type,
+ PyArray_TYPE(arr),
+ PyArray_ITEMSIZE(arr),
+ PyArray_DESCR(arr)->alignment,
+ PyArray_FLAGS(arr),
+ PyArray_ITEMSIZE(arr));
+}
+
+static PyMethodDef f2py_module_methods[] = {
+
+ {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call},
+ {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs},
+ {NULL,NULL}
+};
+
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "test_array_from_pyobj_ext",
+ NULL,
+ -1,
+ f2py_module_methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) {
+ PyObject *m,*d, *s;
+ m = wrap_module = PyModule_Create(&moduledef);
+ Py_SET_TYPE(&PyFortran_Type, &PyType_Type);
+ import_array();
+ if (PyErr_Occurred())
+ Py_FatalError("can't initialize module wrap (failed to import numpy)");
+ d = PyModule_GetDict(m);
+ s = PyUnicode_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n"
+ " arr = call(type_num,dims,intent,obj)\n"
+ ".");
+ PyDict_SetItemString(d, "__doc__", s);
+ wrap_error = PyErr_NewException ("wrap.error", NULL, NULL);
+ Py_DECREF(s);
+
+#define ADDCONST(NAME, CONST) \
+ s = PyLong_FromLong(CONST); \
+ PyDict_SetItemString(d, NAME, s); \
+ Py_DECREF(s)
+
+ ADDCONST("F2PY_INTENT_IN", F2PY_INTENT_IN);
+ ADDCONST("F2PY_INTENT_INOUT", F2PY_INTENT_INOUT);
+ ADDCONST("F2PY_INTENT_OUT", F2PY_INTENT_OUT);
+ ADDCONST("F2PY_INTENT_HIDE", F2PY_INTENT_HIDE);
+ ADDCONST("F2PY_INTENT_CACHE", F2PY_INTENT_CACHE);
+ ADDCONST("F2PY_INTENT_COPY", F2PY_INTENT_COPY);
+ ADDCONST("F2PY_INTENT_C", F2PY_INTENT_C);
+ ADDCONST("F2PY_OPTIONAL", F2PY_OPTIONAL);
+ ADDCONST("F2PY_INTENT_INPLACE", F2PY_INTENT_INPLACE);
+ ADDCONST("NPY_BOOL", NPY_BOOL);
+ ADDCONST("NPY_BYTE", NPY_BYTE);
+ ADDCONST("NPY_UBYTE", NPY_UBYTE);
+ ADDCONST("NPY_SHORT", NPY_SHORT);
+ ADDCONST("NPY_USHORT", NPY_USHORT);
+ ADDCONST("NPY_INT", NPY_INT);
+ ADDCONST("NPY_UINT", NPY_UINT);
+ ADDCONST("NPY_INTP", NPY_INTP);
+ ADDCONST("NPY_UINTP", NPY_UINTP);
+ ADDCONST("NPY_LONG", NPY_LONG);
+ ADDCONST("NPY_ULONG", NPY_ULONG);
+ ADDCONST("NPY_LONGLONG", NPY_LONGLONG);
+ ADDCONST("NPY_ULONGLONG", NPY_ULONGLONG);
+ ADDCONST("NPY_FLOAT", NPY_FLOAT);
+ ADDCONST("NPY_DOUBLE", NPY_DOUBLE);
+ ADDCONST("NPY_LONGDOUBLE", NPY_LONGDOUBLE);
+ ADDCONST("NPY_CFLOAT", NPY_CFLOAT);
+ ADDCONST("NPY_CDOUBLE", NPY_CDOUBLE);
+ ADDCONST("NPY_CLONGDOUBLE", NPY_CLONGDOUBLE);
+ ADDCONST("NPY_OBJECT", NPY_OBJECT);
+ ADDCONST("NPY_STRING", NPY_STRING);
+ ADDCONST("NPY_UNICODE", NPY_UNICODE);
+ ADDCONST("NPY_VOID", NPY_VOID);
+ ADDCONST("NPY_NTYPES", NPY_NTYPES);
+ ADDCONST("NPY_NOTYPE", NPY_NOTYPE);
+ ADDCONST("NPY_USERDEF", NPY_USERDEF);
+
+ ADDCONST("CONTIGUOUS", NPY_ARRAY_C_CONTIGUOUS);
+ ADDCONST("FORTRAN", NPY_ARRAY_F_CONTIGUOUS);
+ ADDCONST("OWNDATA", NPY_ARRAY_OWNDATA);
+ ADDCONST("FORCECAST", NPY_ARRAY_FORCECAST);
+ ADDCONST("ENSURECOPY", NPY_ARRAY_ENSURECOPY);
+ ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY);
+ ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED);
+ ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE);
+ ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY);
+
+ ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED);
+ ADDCONST("BEHAVED_NS", NPY_ARRAY_BEHAVED_NS);
+ ADDCONST("CARRAY", NPY_ARRAY_CARRAY);
+ ADDCONST("FARRAY", NPY_ARRAY_FARRAY);
+ ADDCONST("CARRAY_RO", NPY_ARRAY_CARRAY_RO);
+ ADDCONST("FARRAY_RO", NPY_ARRAY_FARRAY_RO);
+ ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT);
+ ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL);
+
+#undef ADDCONST(
+
+ if (PyErr_Occurred())
+ Py_FatalError("can't initialize module wrap");
+
+#ifdef F2PY_REPORT_ATEXIT
+ on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call");
+#endif
+
+ return m;
+}
+#ifdef __cplusplus
+}
+#endif
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap
new file mode 100644
index 00000000..2665f89b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap
@@ -0,0 +1 @@
+dict(real=dict(rk="double"))
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90
new file mode 100644
index 00000000..b301710f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_free.f90
@@ -0,0 +1,34 @@
+
+subroutine sum(x, res)
+ implicit none
+ real, intent(in) :: x(:)
+ real, intent(out) :: res
+
+ integer :: i
+
+ !print *, "sum: size(x) = ", size(x)
+
+ res = 0.0
+
+ do i = 1, size(x)
+ res = res + x(i)
+ enddo
+
+end subroutine sum
+
+function fsum(x) result (res)
+ implicit none
+ real, intent(in) :: x(:)
+ real :: res
+
+ integer :: i
+
+ !print *, "fsum: size(x) = ", size(x)
+
+ res = 0.0
+
+ do i = 1, size(x)
+ res = res + x(i)
+ enddo
+
+end function fsum
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90
new file mode 100644
index 00000000..cbe6317e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_mod.f90
@@ -0,0 +1,41 @@
+
+module mod
+
+contains
+
+subroutine sum(x, res)
+ implicit none
+ real, intent(in) :: x(:)
+ real, intent(out) :: res
+
+ integer :: i
+
+ !print *, "sum: size(x) = ", size(x)
+
+ res = 0.0
+
+ do i = 1, size(x)
+ res = res + x(i)
+ enddo
+
+end subroutine sum
+
+function fsum(x) result (res)
+ implicit none
+ real, intent(in) :: x(:)
+ real :: res
+
+ integer :: i
+
+ !print *, "fsum: size(x) = ", size(x)
+
+ res = 0.0
+
+ do i = 1, size(x)
+ res = res + x(i)
+ enddo
+
+end function fsum
+
+
+end module mod
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90
new file mode 100644
index 00000000..337465ac
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/foo_use.f90
@@ -0,0 +1,19 @@
+subroutine sum_with_use(x, res)
+ use precision
+
+ implicit none
+
+ real(kind=rk), intent(in) :: x(:)
+ real(kind=rk), intent(out) :: res
+
+ integer :: i
+
+ !print *, "size(x) = ", size(x)
+
+ res = 0.0
+
+ do i = 1, size(x)
+ res = res + x(i)
+ enddo
+
+ end subroutine
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90
new file mode 100644
index 00000000..ed6c70cb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/assumed_shape/precision.f90
@@ -0,0 +1,4 @@
+module precision
+ integer, parameter :: rk = selected_real_kind(8)
+ integer, parameter :: ik = selected_real_kind(4)
+end module
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/block_docstring/foo.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/block_docstring/foo.f
new file mode 100644
index 00000000..c8315f12
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/block_docstring/foo.f
@@ -0,0 +1,6 @@
+ SUBROUTINE FOO()
+ INTEGER BAR(2, 3)
+
+ COMMON /BLOCK/ BAR
+ RETURN
+ END
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/foo.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/foo.f
new file mode 100644
index 00000000..ba397bb3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/foo.f
@@ -0,0 +1,62 @@
+ subroutine t(fun,a)
+ integer a
+cf2py intent(out) a
+ external fun
+ call fun(a)
+ end
+
+ subroutine func(a)
+cf2py intent(in,out) a
+ integer a
+ a = a + 11
+ end
+
+ subroutine func0(a)
+cf2py intent(out) a
+ integer a
+ a = 11
+ end
+
+ subroutine t2(a)
+cf2py intent(callback) fun
+ integer a
+cf2py intent(out) a
+ external fun
+ call fun(a)
+ end
+
+ subroutine string_callback(callback, a)
+ external callback
+ double precision callback
+ double precision a
+ character*1 r
+cf2py intent(out) a
+ r = 'r'
+ a = callback(r)
+ end
+
+ subroutine string_callback_array(callback, cu, lencu, a)
+ external callback
+ integer callback
+ integer lencu
+ character*8 cu(lencu)
+ integer a
+cf2py intent(out) a
+
+ a = callback(cu, lencu)
+ end
+
+ subroutine hidden_callback(a, r)
+ external global_f
+cf2py intent(callback, hide) global_f
+ integer a, r, global_f
+cf2py intent(out) r
+ r = global_f(a)
+ end
+
+ subroutine hidden_callback2(a, r)
+ external global_f
+ integer a, r, global_f
+cf2py intent(out) r
+ r = global_f(a)
+ end
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh17797.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh17797.f90
new file mode 100644
index 00000000..49853afd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh17797.f90
@@ -0,0 +1,7 @@
+function gh17797(f, y) result(r)
+ external f
+ integer(8) :: r, f
+ integer(8), dimension(:) :: y
+ r = f(0)
+ r = r + sum(y)
+end function gh17797
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh18335.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh18335.f90
new file mode 100644
index 00000000..92b6d754
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/callback/gh18335.f90
@@ -0,0 +1,17 @@
+ ! When gh18335_workaround is defined as an extension,
+ ! the issue cannot be reproduced.
+ !subroutine gh18335_workaround(f, y)
+ ! implicit none
+ ! external f
+ ! integer(kind=1) :: y(1)
+ ! call f(y)
+ !end subroutine gh18335_workaround
+
+ function gh18335(f) result (r)
+ implicit none
+ external f
+ integer(kind=1) :: y(1), r
+ y(1) = 123
+ call f(y)
+ r = y(1)
+ end function gh18335
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hi77.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hi77.f
new file mode 100644
index 00000000..8b916ebe
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hi77.f
@@ -0,0 +1,3 @@
+ SUBROUTINE HI
+ PRINT*, "HELLO WORLD"
+ END SUBROUTINE
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hiworld.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hiworld.f90
new file mode 100644
index 00000000..981f8775
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/cli/hiworld.f90
@@ -0,0 +1,3 @@
+function hi()
+ print*, "Hello World"
+end function
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/common/block.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/common/block.f
new file mode 100644
index 00000000..7ea7968f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/common/block.f
@@ -0,0 +1,11 @@
+ SUBROUTINE INITCB
+ DOUBLE PRECISION LONG
+ CHARACTER STRING
+ INTEGER OK
+
+ COMMON /BLOCK/ LONG, STRING, OK
+ LONG = 1.0
+ STRING = '2'
+ OK = 3
+ RETURN
+ END
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f90
new file mode 100644
index 00000000..e2cbd445
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/accesstype.f90
@@ -0,0 +1,13 @@
+module foo
+ public
+ type, private, bind(c) :: a
+ integer :: i
+ end type a
+ type, bind(c) :: b_
+ integer :: j
+ end type b_
+ public :: b_
+ type :: c
+ integer :: k
+ end type c
+end module foo
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90
new file mode 100644
index 00000000..e327b25c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/foo_deps.f90
@@ -0,0 +1,6 @@
+module foo
+ type bar
+ character(len = 4) :: text
+ end type bar
+ type(bar), parameter :: abar = bar('abar')
+end module foo
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f
new file mode 100644
index 00000000..1bb2e674
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh15035.f
@@ -0,0 +1,16 @@
+ subroutine subb(k)
+ real(8), intent(inout) :: k(:)
+ k=k+1
+ endsubroutine
+
+ subroutine subc(w,k)
+ real(8), intent(in) :: w(:)
+ real(8), intent(out) :: k(size(w))
+ k=w+1
+ endsubroutine
+
+ function t0(value)
+ character value
+ character t0
+ t0 = value
+ endfunction
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f
new file mode 100644
index 00000000..99595384
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh17859.f
@@ -0,0 +1,12 @@
+ integer(8) function external_as_statement(fcn)
+ implicit none
+ external fcn
+ integer(8) :: fcn
+ external_as_statement = fcn(0)
+ end
+
+ integer(8) function external_as_attribute(fcn)
+ implicit none
+ integer(8), external :: fcn
+ external_as_attribute = fcn(0)
+ end
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f90
new file mode 100644
index 00000000..31ea9327
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/gh2848.f90
@@ -0,0 +1,13 @@
+ subroutine gh2848( &
+ ! first 2 parameters
+ par1, par2,&
+ ! last 2 parameters
+ par3, par4)
+
+ integer, intent(in) :: par1, par2
+ integer, intent(out) :: par3, par4
+
+ par3 = par1
+ par4 = par2
+
+ end subroutine gh2848
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/operators.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/operators.f90
new file mode 100644
index 00000000..1d060a3d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/operators.f90
@@ -0,0 +1,49 @@
+module foo
+ type bar
+ character(len = 32) :: item
+ end type bar
+ interface operator(.item.)
+ module procedure item_int, item_real
+ end interface operator(.item.)
+ interface operator(==)
+ module procedure items_are_equal
+ end interface operator(==)
+ interface assignment(=)
+ module procedure get_int, get_real
+ end interface assignment(=)
+contains
+ function item_int(val) result(elem)
+ integer, intent(in) :: val
+ type(bar) :: elem
+
+ write(elem%item, "(I32)") val
+ end function item_int
+
+ function item_real(val) result(elem)
+ real, intent(in) :: val
+ type(bar) :: elem
+
+ write(elem%item, "(1PE32.12)") val
+ end function item_real
+
+ function items_are_equal(val1, val2) result(equal)
+ type(bar), intent(in) :: val1, val2
+ logical :: equal
+
+ equal = (val1%item == val2%item)
+ end function items_are_equal
+
+ subroutine get_real(rval, item)
+ real, intent(out) :: rval
+ type(bar), intent(in) :: item
+
+ read(item%item, *) rval
+ end subroutine get_real
+
+ subroutine get_int(rval, item)
+ integer, intent(out) :: rval
+ type(bar), intent(in) :: item
+
+ read(item%item, *) rval
+ end subroutine get_int
+end module foo
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f90
new file mode 100644
index 00000000..2674c214
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/privatemod.f90
@@ -0,0 +1,11 @@
+module foo
+ private
+ integer :: a
+ public :: setA
+ integer :: b
+contains
+ subroutine setA(v)
+ integer, intent(in) :: v
+ a = v
+ end subroutine setA
+end module foo
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f90
new file mode 100644
index 00000000..1db76e3f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/publicmod.f90
@@ -0,0 +1,10 @@
+module foo
+ public
+ integer, private :: a
+ public :: setA
+contains
+ subroutine setA(v)
+ integer, intent(in) :: v
+ a = v
+ end subroutine setA
+end module foo
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90
new file mode 100644
index 00000000..46bef7cb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/pubprivmod.f90
@@ -0,0 +1,10 @@
+module foo
+ public
+ integer, private :: a
+ integer :: b
+contains
+ subroutine setA(v)
+ integer, intent(in) :: v
+ a = v
+ end subroutine setA
+end module foo
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90
new file mode 100644
index 00000000..13515ce9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/crackfortran/unicode_comment.f90
@@ -0,0 +1,4 @@
+subroutine foo(x)
+ real(8), intent(in) :: x
+ ! Écrit à l'écran la valeur de x
+end subroutine
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap
new file mode 100644
index 00000000..a4425f88
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap
@@ -0,0 +1 @@
+dict(real=dict(real32='float', real64='double'), integer=dict(int64='long_long'))
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90
new file mode 100644
index 00000000..3f0e12c7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90
@@ -0,0 +1,9 @@
+ subroutine func1(n, x, res)
+ use, intrinsic :: iso_fortran_env, only: int64, real64
+ implicit none
+ integer(int64), intent(in) :: n
+ real(real64), intent(in) :: x(n)
+ real(real64), intent(out) :: res
+Cf2py intent(hide) :: n
+ res = sum(x)
+ end
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/kind/foo.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/kind/foo.f90
new file mode 100644
index 00000000..d3d15cfb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/kind/foo.f90
@@ -0,0 +1,20 @@
+
+
+subroutine selectedrealkind(p, r, res)
+ implicit none
+
+ integer, intent(in) :: p, r
+ !f2py integer :: r=0
+ integer, intent(out) :: res
+ res = selected_real_kind(p, r)
+
+end subroutine
+
+subroutine selectedintkind(p, res)
+ implicit none
+
+ integer, intent(in) :: p
+ integer, intent(out) :: res
+ res = selected_int_kind(p)
+
+end subroutine
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo.f
new file mode 100644
index 00000000..c3474257
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo.f
@@ -0,0 +1,5 @@
+ subroutine bar11(a)
+cf2py intent(out) a
+ integer a
+ a = 11
+ end
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90
new file mode 100644
index 00000000..7543a6ac
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_fixed.f90
@@ -0,0 +1,8 @@
+ module foo_fixed
+ contains
+ subroutine bar12(a)
+!f2py intent(out) a
+ integer a
+ a = 12
+ end subroutine bar12
+ end module foo_fixed
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90
new file mode 100644
index 00000000..c1b641f1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/mixed/foo_free.f90
@@ -0,0 +1,8 @@
+module foo_free
+contains
+ subroutine bar13(a)
+ !f2py intent(out) a
+ integer a
+ a = 13
+ end subroutine bar13
+end module foo_free
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/mod.mod b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/mod.mod
new file mode 100644
index 00000000..8670a97e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/mod.mod
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/module_data_docstring.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/module_data_docstring.f90
new file mode 100644
index 00000000..4505e0cb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/module_data/module_data_docstring.f90
@@ -0,0 +1,12 @@
+module mod
+ integer :: i
+ integer :: x(4)
+ real, dimension(2,3) :: a
+ real, allocatable, dimension(:,:) :: b
+contains
+ subroutine foo
+ integer :: k
+ k = 1
+ a(1,2) = a(1,2)+3
+ end subroutine foo
+end module mod
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90
new file mode 100644
index 00000000..bf1fa928
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/negative_bounds/issue_20853.f90
@@ -0,0 +1,7 @@
+subroutine foo(is_, ie_, arr, tout)
+ implicit none
+ integer :: is_,ie_
+ real, intent(in) :: arr(is_:ie_)
+ real, intent(out) :: tout(is_:ie_)
+ tout = arr
+end
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90
new file mode 100644
index 00000000..ac90cedc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_both.f90
@@ -0,0 +1,57 @@
+! Check that parameters are correct intercepted.
+! Constants with comma separations are commonly
+! used, for instance Pi = 3._dp
+subroutine foo(x)
+ implicit none
+ integer, parameter :: sp = selected_real_kind(6)
+ integer, parameter :: dp = selected_real_kind(15)
+ integer, parameter :: ii = selected_int_kind(9)
+ integer, parameter :: il = selected_int_kind(18)
+ real(dp), intent(inout) :: x
+ dimension x(3)
+ real(sp), parameter :: three_s = 3._sp
+ real(dp), parameter :: three_d = 3._dp
+ integer(ii), parameter :: three_i = 3_ii
+ integer(il), parameter :: three_l = 3_il
+ x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l
+ x(2) = x(2) * three_s
+ x(3) = x(3) * three_l
+ return
+end subroutine
+
+
+subroutine foo_no(x)
+ implicit none
+ integer, parameter :: sp = selected_real_kind(6)
+ integer, parameter :: dp = selected_real_kind(15)
+ integer, parameter :: ii = selected_int_kind(9)
+ integer, parameter :: il = selected_int_kind(18)
+ real(dp), intent(inout) :: x
+ dimension x(3)
+ real(sp), parameter :: three_s = 3.
+ real(dp), parameter :: three_d = 3.
+ integer(ii), parameter :: three_i = 3
+ integer(il), parameter :: three_l = 3
+ x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l
+ x(2) = x(2) * three_s
+ x(3) = x(3) * three_l
+ return
+end subroutine
+
+subroutine foo_sum(x)
+ implicit none
+ integer, parameter :: sp = selected_real_kind(6)
+ integer, parameter :: dp = selected_real_kind(15)
+ integer, parameter :: ii = selected_int_kind(9)
+ integer, parameter :: il = selected_int_kind(18)
+ real(dp), intent(inout) :: x
+ dimension x(3)
+ real(sp), parameter :: three_s = 2._sp + 1._sp
+ real(dp), parameter :: three_d = 1._dp + 2._dp
+ integer(ii), parameter :: three_i = 2_ii + 1_ii
+ integer(il), parameter :: three_l = 1_il + 2_il
+ x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l
+ x(2) = x(2) * three_s
+ x(3) = x(3) * three_l
+ return
+end subroutine
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90
new file mode 100644
index 00000000..e51f5e9b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_compound.f90
@@ -0,0 +1,15 @@
+! Check that parameters are correct intercepted.
+! Constants with comma separations are commonly
+! used, for instance Pi = 3._dp
+subroutine foo_compound_int(x)
+ implicit none
+ integer, parameter :: ii = selected_int_kind(9)
+ integer(ii), intent(inout) :: x
+ dimension x(3)
+ integer(ii), parameter :: three = 3_ii
+ integer(ii), parameter :: two = 2_ii
+ integer(ii), parameter :: six = three * 1_ii * two
+
+ x(1) = x(1) + x(2) + x(3) * six
+ return
+end subroutine
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90
new file mode 100644
index 00000000..aaa83d2e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_integer.f90
@@ -0,0 +1,22 @@
+! Check that parameters are correct intercepted.
+! Constants with comma separations are commonly
+! used, for instance Pi = 3._dp
+subroutine foo_int(x)
+ implicit none
+ integer, parameter :: ii = selected_int_kind(9)
+ integer(ii), intent(inout) :: x
+ dimension x(3)
+ integer(ii), parameter :: three = 3_ii
+ x(1) = x(1) + x(2) + x(3) * three
+ return
+end subroutine
+
+subroutine foo_long(x)
+ implicit none
+ integer, parameter :: ii = selected_int_kind(18)
+ integer(ii), intent(inout) :: x
+ dimension x(3)
+ integer(ii), parameter :: three = 3_ii
+ x(1) = x(1) + x(2) + x(3) * three
+ return
+end subroutine
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90
new file mode 100644
index 00000000..62c9a5b9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_non_compound.f90
@@ -0,0 +1,23 @@
+! Check that parameters are correct intercepted.
+! Specifically that types of constants without
+! compound kind specs are correctly inferred
+! adapted Gibbs iteration code from pymc
+! for this test case
+subroutine foo_non_compound_int(x)
+ implicit none
+ integer, parameter :: ii = selected_int_kind(9)
+
+ integer(ii) maxiterates
+ parameter (maxiterates=2)
+
+ integer(ii) maxseries
+ parameter (maxseries=2)
+
+ integer(ii) wasize
+ parameter (wasize=maxiterates*maxseries)
+ integer(ii), intent(inout) :: x
+ dimension x(wasize)
+
+ x(1) = x(1) + x(2) + x(3) + x(4) * wasize
+ return
+end subroutine
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90
new file mode 100644
index 00000000..02ac9dd9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/parameter/constant_real.f90
@@ -0,0 +1,23 @@
+! Check that parameters are correct intercepted.
+! Constants with comma separations are commonly
+! used, for instance Pi = 3._dp
+subroutine foo_single(x)
+ implicit none
+ integer, parameter :: rp = selected_real_kind(6)
+ real(rp), intent(inout) :: x
+ dimension x(3)
+ real(rp), parameter :: three = 3._rp
+ x(1) = x(1) + x(2) + x(3) * three
+ return
+end subroutine
+
+subroutine foo_double(x)
+ implicit none
+ integer, parameter :: rp = selected_real_kind(15)
+ real(rp), intent(inout) :: x
+ dimension x(3)
+ real(rp), parameter :: three = 3._rp
+ x(1) = x(1) + x(2) + x(3) * three
+ return
+end subroutine
+
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/quoted_character/foo.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/quoted_character/foo.f
new file mode 100644
index 00000000..9dc1cfa4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/quoted_character/foo.f
@@ -0,0 +1,14 @@
+ SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6)
+ CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR
+ PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!",
+ 1 OPENPAR="(", CLOSEPAR=")")
+ CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6
+Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6
+ OUT1 = SINGLE
+ OUT2 = DOUBLE
+ OUT3 = SEMICOL
+ OUT4 = EXCLA
+ OUT5 = OPENPAR
+ OUT6 = CLOSEPAR
+ RETURN
+ END
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/regression/inout.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/regression/inout.f90
new file mode 100644
index 00000000..80cdad90
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/regression/inout.f90
@@ -0,0 +1,9 @@
+! Check that intent(in out) translates as intent(inout).
+! The separation seems to be a common usage.
+ subroutine foo(x)
+ implicit none
+ real(4), intent(in out) :: x
+ dimension x(3)
+ x(1) = x(1) + x(2) + x(3)
+ return
+ end
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo77.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo77.f
new file mode 100644
index 00000000..facae101
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo77.f
@@ -0,0 +1,45 @@
+ function t0(value)
+ character value
+ character t0
+ t0 = value
+ end
+ function t1(value)
+ character*1 value
+ character*1 t1
+ t1 = value
+ end
+ function t5(value)
+ character*5 value
+ character*5 t5
+ t5 = value
+ end
+ function ts(value)
+ character*(*) value
+ character*(*) ts
+ ts = value
+ end
+
+ subroutine s0(t0,value)
+ character value
+ character t0
+cf2py intent(out) t0
+ t0 = value
+ end
+ subroutine s1(t1,value)
+ character*1 value
+ character*1 t1
+cf2py intent(out) t1
+ t1 = value
+ end
+ subroutine s5(t5,value)
+ character*5 value
+ character*5 t5
+cf2py intent(out) t5
+ t5 = value
+ end
+ subroutine ss(ts,value)
+ character*(*) value
+ character*10 ts
+cf2py intent(out) ts
+ ts = value
+ end
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo90.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo90.f90
new file mode 100644
index 00000000..36182bcf
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_character/foo90.f90
@@ -0,0 +1,48 @@
+module f90_return_char
+ contains
+ function t0(value)
+ character :: value
+ character :: t0
+ t0 = value
+ end function t0
+ function t1(value)
+ character(len=1) :: value
+ character(len=1) :: t1
+ t1 = value
+ end function t1
+ function t5(value)
+ character(len=5) :: value
+ character(len=5) :: t5
+ t5 = value
+ end function t5
+ function ts(value)
+ character(len=*) :: value
+ character(len=10) :: ts
+ ts = value
+ end function ts
+
+ subroutine s0(t0,value)
+ character :: value
+ character :: t0
+!f2py intent(out) t0
+ t0 = value
+ end subroutine s0
+ subroutine s1(t1,value)
+ character(len=1) :: value
+ character(len=1) :: t1
+!f2py intent(out) t1
+ t1 = value
+ end subroutine s1
+ subroutine s5(t5,value)
+ character(len=5) :: value
+ character(len=5) :: t5
+!f2py intent(out) t5
+ t5 = value
+ end subroutine s5
+ subroutine ss(ts,value)
+ character(len=*) :: value
+ character(len=10) :: ts
+!f2py intent(out) ts
+ ts = value
+ end subroutine ss
+end module f90_return_char
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo77.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo77.f
new file mode 100644
index 00000000..37a1ec84
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo77.f
@@ -0,0 +1,45 @@
+ function t0(value)
+ complex value
+ complex t0
+ t0 = value
+ end
+ function t8(value)
+ complex*8 value
+ complex*8 t8
+ t8 = value
+ end
+ function t16(value)
+ complex*16 value
+ complex*16 t16
+ t16 = value
+ end
+ function td(value)
+ double complex value
+ double complex td
+ td = value
+ end
+
+ subroutine s0(t0,value)
+ complex value
+ complex t0
+cf2py intent(out) t0
+ t0 = value
+ end
+ subroutine s8(t8,value)
+ complex*8 value
+ complex*8 t8
+cf2py intent(out) t8
+ t8 = value
+ end
+ subroutine s16(t16,value)
+ complex*16 value
+ complex*16 t16
+cf2py intent(out) t16
+ t16 = value
+ end
+ subroutine sd(td,value)
+ double complex value
+ double complex td
+cf2py intent(out) td
+ td = value
+ end
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo90.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo90.f90
new file mode 100644
index 00000000..adc27b47
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_complex/foo90.f90
@@ -0,0 +1,48 @@
+module f90_return_complex
+ contains
+ function t0(value)
+ complex :: value
+ complex :: t0
+ t0 = value
+ end function t0
+ function t8(value)
+ complex(kind=4) :: value
+ complex(kind=4) :: t8
+ t8 = value
+ end function t8
+ function t16(value)
+ complex(kind=8) :: value
+ complex(kind=8) :: t16
+ t16 = value
+ end function t16
+ function td(value)
+ double complex :: value
+ double complex :: td
+ td = value
+ end function td
+
+ subroutine s0(t0,value)
+ complex :: value
+ complex :: t0
+!f2py intent(out) t0
+ t0 = value
+ end subroutine s0
+ subroutine s8(t8,value)
+ complex(kind=4) :: value
+ complex(kind=4) :: t8
+!f2py intent(out) t8
+ t8 = value
+ end subroutine s8
+ subroutine s16(t16,value)
+ complex(kind=8) :: value
+ complex(kind=8) :: t16
+!f2py intent(out) t16
+ t16 = value
+ end subroutine s16
+ subroutine sd(td,value)
+ double complex :: value
+ double complex :: td
+!f2py intent(out) td
+ td = value
+ end subroutine sd
+end module f90_return_complex
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo77.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo77.f
new file mode 100644
index 00000000..1ab895b9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo77.f
@@ -0,0 +1,56 @@
+ function t0(value)
+ integer value
+ integer t0
+ t0 = value
+ end
+ function t1(value)
+ integer*1 value
+ integer*1 t1
+ t1 = value
+ end
+ function t2(value)
+ integer*2 value
+ integer*2 t2
+ t2 = value
+ end
+ function t4(value)
+ integer*4 value
+ integer*4 t4
+ t4 = value
+ end
+ function t8(value)
+ integer*8 value
+ integer*8 t8
+ t8 = value
+ end
+
+ subroutine s0(t0,value)
+ integer value
+ integer t0
+cf2py intent(out) t0
+ t0 = value
+ end
+ subroutine s1(t1,value)
+ integer*1 value
+ integer*1 t1
+cf2py intent(out) t1
+ t1 = value
+ end
+ subroutine s2(t2,value)
+ integer*2 value
+ integer*2 t2
+cf2py intent(out) t2
+ t2 = value
+ end
+ subroutine s4(t4,value)
+ integer*4 value
+ integer*4 t4
+cf2py intent(out) t4
+ t4 = value
+ end
+ subroutine s8(t8,value)
+ integer*8 value
+ integer*8 t8
+cf2py intent(out) t8
+ t8 = value
+ end
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo90.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo90.f90
new file mode 100644
index 00000000..ba9249aa
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_integer/foo90.f90
@@ -0,0 +1,59 @@
+module f90_return_integer
+ contains
+ function t0(value)
+ integer :: value
+ integer :: t0
+ t0 = value
+ end function t0
+ function t1(value)
+ integer(kind=1) :: value
+ integer(kind=1) :: t1
+ t1 = value
+ end function t1
+ function t2(value)
+ integer(kind=2) :: value
+ integer(kind=2) :: t2
+ t2 = value
+ end function t2
+ function t4(value)
+ integer(kind=4) :: value
+ integer(kind=4) :: t4
+ t4 = value
+ end function t4
+ function t8(value)
+ integer(kind=8) :: value
+ integer(kind=8) :: t8
+ t8 = value
+ end function t8
+
+ subroutine s0(t0,value)
+ integer :: value
+ integer :: t0
+!f2py intent(out) t0
+ t0 = value
+ end subroutine s0
+ subroutine s1(t1,value)
+ integer(kind=1) :: value
+ integer(kind=1) :: t1
+!f2py intent(out) t1
+ t1 = value
+ end subroutine s1
+ subroutine s2(t2,value)
+ integer(kind=2) :: value
+ integer(kind=2) :: t2
+!f2py intent(out) t2
+ t2 = value
+ end subroutine s2
+ subroutine s4(t4,value)
+ integer(kind=4) :: value
+ integer(kind=4) :: t4
+!f2py intent(out) t4
+ t4 = value
+ end subroutine s4
+ subroutine s8(t8,value)
+ integer(kind=8) :: value
+ integer(kind=8) :: t8
+!f2py intent(out) t8
+ t8 = value
+ end subroutine s8
+end module f90_return_integer
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo77.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo77.f
new file mode 100644
index 00000000..ef530145
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo77.f
@@ -0,0 +1,56 @@
+ function t0(value)
+ logical value
+ logical t0
+ t0 = value
+ end
+ function t1(value)
+ logical*1 value
+ logical*1 t1
+ t1 = value
+ end
+ function t2(value)
+ logical*2 value
+ logical*2 t2
+ t2 = value
+ end
+ function t4(value)
+ logical*4 value
+ logical*4 t4
+ t4 = value
+ end
+c function t8(value)
+c logical*8 value
+c logical*8 t8
+c t8 = value
+c end
+
+ subroutine s0(t0,value)
+ logical value
+ logical t0
+cf2py intent(out) t0
+ t0 = value
+ end
+ subroutine s1(t1,value)
+ logical*1 value
+ logical*1 t1
+cf2py intent(out) t1
+ t1 = value
+ end
+ subroutine s2(t2,value)
+ logical*2 value
+ logical*2 t2
+cf2py intent(out) t2
+ t2 = value
+ end
+ subroutine s4(t4,value)
+ logical*4 value
+ logical*4 t4
+cf2py intent(out) t4
+ t4 = value
+ end
+c subroutine s8(t8,value)
+c logical*8 value
+c logical*8 t8
+cf2py intent(out) t8
+c t8 = value
+c end
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo90.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo90.f90
new file mode 100644
index 00000000..a4526468
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_logical/foo90.f90
@@ -0,0 +1,59 @@
+module f90_return_logical
+ contains
+ function t0(value)
+ logical :: value
+ logical :: t0
+ t0 = value
+ end function t0
+ function t1(value)
+ logical(kind=1) :: value
+ logical(kind=1) :: t1
+ t1 = value
+ end function t1
+ function t2(value)
+ logical(kind=2) :: value
+ logical(kind=2) :: t2
+ t2 = value
+ end function t2
+ function t4(value)
+ logical(kind=4) :: value
+ logical(kind=4) :: t4
+ t4 = value
+ end function t4
+ function t8(value)
+ logical(kind=8) :: value
+ logical(kind=8) :: t8
+ t8 = value
+ end function t8
+
+ subroutine s0(t0,value)
+ logical :: value
+ logical :: t0
+!f2py intent(out) t0
+ t0 = value
+ end subroutine s0
+ subroutine s1(t1,value)
+ logical(kind=1) :: value
+ logical(kind=1) :: t1
+!f2py intent(out) t1
+ t1 = value
+ end subroutine s1
+ subroutine s2(t2,value)
+ logical(kind=2) :: value
+ logical(kind=2) :: t2
+!f2py intent(out) t2
+ t2 = value
+ end subroutine s2
+ subroutine s4(t4,value)
+ logical(kind=4) :: value
+ logical(kind=4) :: t4
+!f2py intent(out) t4
+ t4 = value
+ end subroutine s4
+ subroutine s8(t8,value)
+ logical(kind=8) :: value
+ logical(kind=8) :: t8
+!f2py intent(out) t8
+ t8 = value
+ end subroutine s8
+end module f90_return_logical
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo77.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo77.f
new file mode 100644
index 00000000..bf43dbf1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo77.f
@@ -0,0 +1,45 @@
+ function t0(value)
+ real value
+ real t0
+ t0 = value
+ end
+ function t4(value)
+ real*4 value
+ real*4 t4
+ t4 = value
+ end
+ function t8(value)
+ real*8 value
+ real*8 t8
+ t8 = value
+ end
+ function td(value)
+ double precision value
+ double precision td
+ td = value
+ end
+
+ subroutine s0(t0,value)
+ real value
+ real t0
+cf2py intent(out) t0
+ t0 = value
+ end
+ subroutine s4(t4,value)
+ real*4 value
+ real*4 t4
+cf2py intent(out) t4
+ t4 = value
+ end
+ subroutine s8(t8,value)
+ real*8 value
+ real*8 t8
+cf2py intent(out) t8
+ t8 = value
+ end
+ subroutine sd(td,value)
+ double precision value
+ double precision td
+cf2py intent(out) td
+ td = value
+ end
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo90.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo90.f90
new file mode 100644
index 00000000..df971998
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/return_real/foo90.f90
@@ -0,0 +1,48 @@
+module f90_return_real
+ contains
+ function t0(value)
+ real :: value
+ real :: t0
+ t0 = value
+ end function t0
+ function t4(value)
+ real(kind=4) :: value
+ real(kind=4) :: t4
+ t4 = value
+ end function t4
+ function t8(value)
+ real(kind=8) :: value
+ real(kind=8) :: t8
+ t8 = value
+ end function t8
+ function td(value)
+ double precision :: value
+ double precision :: td
+ td = value
+ end function td
+
+ subroutine s0(t0,value)
+ real :: value
+ real :: t0
+!f2py intent(out) t0
+ t0 = value
+ end subroutine s0
+ subroutine s4(t4,value)
+ real(kind=4) :: value
+ real(kind=4) :: t4
+!f2py intent(out) t4
+ t4 = value
+ end subroutine s4
+ subroutine s8(t8,value)
+ real(kind=8) :: value
+ real(kind=8) :: t8
+!f2py intent(out) t8
+ t8 = value
+ end subroutine s8
+ subroutine sd(td,value)
+ double precision :: value
+ double precision :: td
+!f2py intent(out) td
+ td = value
+ end subroutine sd
+end module f90_return_real
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/size/foo.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/size/foo.f90
new file mode 100644
index 00000000..5b66f8c4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/size/foo.f90
@@ -0,0 +1,44 @@
+
+subroutine foo(a, n, m, b)
+ implicit none
+
+ real, intent(in) :: a(n, m)
+ integer, intent(in) :: n, m
+ real, intent(out) :: b(size(a, 1))
+
+ integer :: i
+
+ do i = 1, size(b)
+ b(i) = sum(a(i,:))
+ enddo
+end subroutine
+
+subroutine trans(x,y)
+ implicit none
+ real, intent(in), dimension(:,:) :: x
+ real, intent(out), dimension( size(x,2), size(x,1) ) :: y
+ integer :: N, M, i, j
+ N = size(x,1)
+ M = size(x,2)
+ DO i=1,N
+ do j=1,M
+ y(j,i) = x(i,j)
+ END DO
+ END DO
+end subroutine trans
+
+subroutine flatten(x,y)
+ implicit none
+ real, intent(in), dimension(:,:) :: x
+ real, intent(out), dimension( size(x) ) :: y
+ integer :: N, M, i, j, k
+ N = size(x,1)
+ M = size(x,2)
+ k = 1
+ DO i=1,N
+ do j=1,M
+ y(k) = x(i,j)
+ k = k + 1
+ END DO
+ END DO
+end subroutine flatten
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/char.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/char.f90
new file mode 100644
index 00000000..bb7985ce
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/char.f90
@@ -0,0 +1,29 @@
+MODULE char_test
+
+CONTAINS
+
+SUBROUTINE change_strings(strings, n_strs, out_strings)
+ IMPLICIT NONE
+
+ ! Inputs
+ INTEGER, INTENT(IN) :: n_strs
+ CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings
+ CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: out_strings
+
+!f2py INTEGER, INTENT(IN) :: n_strs
+!f2py CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings
+!f2py CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: strings
+
+ ! Misc.
+ INTEGER*4 :: j
+
+
+ DO j=1, n_strs
+ out_strings(1,j) = strings(1,j)
+ out_strings(2,j) = 'A'
+ END DO
+
+END SUBROUTINE change_strings
+
+END MODULE char_test
+
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/fixed_string.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/fixed_string.f90
new file mode 100644
index 00000000..7fd15854
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/fixed_string.f90
@@ -0,0 +1,34 @@
+function sint(s) result(i)
+ implicit none
+ character(len=*) :: s
+ integer :: j, i
+ i = 0
+ do j=len(s), 1, -1
+ if (.not.((i.eq.0).and.(s(j:j).eq.' '))) then
+ i = i + ichar(s(j:j)) * 10 ** (j - 1)
+ endif
+ end do
+ return
+ end function sint
+
+ function test_in_bytes4(a) result (i)
+ implicit none
+ integer :: sint
+ character(len=4) :: a
+ integer :: i
+ i = sint(a)
+ a(1:1) = 'A'
+ return
+ end function test_in_bytes4
+
+ function test_inout_bytes4(a) result (i)
+ implicit none
+ integer :: sint
+ character(len=4), intent(inout) :: a
+ integer :: i
+ if (a(1:1).ne.' ') then
+ a(1:1) = 'E'
+ endif
+ i = sint(a)
+ return
+ end function test_inout_bytes4
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/scalar_string.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/scalar_string.f90
new file mode 100644
index 00000000..f8f07617
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/scalar_string.f90
@@ -0,0 +1,9 @@
+MODULE string_test
+
+ character(len=8) :: string
+ character string77 * 8
+
+ character(len=12), dimension(5,7) :: strarr
+ character strarr77(5,7) * 12
+
+END MODULE string_test
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/string.f b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/string.f
new file mode 100644
index 00000000..5210ca4d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/string/string.f
@@ -0,0 +1,12 @@
+C FILE: STRING.F
+ SUBROUTINE FOO(A,B,C,D)
+ CHARACTER*5 A, B
+ CHARACTER*(*) C,D
+Cf2py intent(in) a,c
+Cf2py intent(inout) b,d
+ A(1:1) = 'A'
+ B(1:1) = 'B'
+ C(1:1) = 'C'
+ D(1:1) = 'D'
+ END
+C END OF FILE STRING.F
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90 b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90
new file mode 100644
index 00000000..7d9dc0fd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/src/value_attrspec/gh21665.f90
@@ -0,0 +1,9 @@
+module fortfuncs
+ implicit none
+contains
+ subroutine square(x,y)
+ integer, intent(in), value :: x
+ integer, intent(out) :: y
+ y = x*x
+ end subroutine square
+end module fortfuncs
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_abstract_interface.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_abstract_interface.py
new file mode 100644
index 00000000..42902913
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_abstract_interface.py
@@ -0,0 +1,25 @@
+from pathlib import Path
+import pytest
+import textwrap
+from . import util
+from numpy.f2py import crackfortran
+from numpy.testing import IS_WASM
+
+
+@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
+class TestAbstractInterface(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "abstract_interface", "foo.f90")]
+
+ skip = ["add1", "add2"]
+
+ def test_abstract_interface(self):
+ assert self.module.ops_module.foo(3, 5) == (8, 13)
+
+ def test_parse_abstract_interface(self):
+ # Test gh18403
+ fpath = util.getpath("tests", "src", "abstract_interface",
+ "gh18403_mod.f90")
+ mod = crackfortran.crackfortran([str(fpath)])
+ assert len(mod) == 1
+ assert len(mod[0]["body"]) == 1
+ assert mod[0]["body"][0]["block"] == "abstract interface"
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_array_from_pyobj.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_array_from_pyobj.py
new file mode 100644
index 00000000..2b8c8def
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_array_from_pyobj.py
@@ -0,0 +1,686 @@
+import os
+import sys
+import copy
+import platform
+import pytest
+
+import numpy as np
+
+from numpy.testing import assert_, assert_equal
+from numpy.core.multiarray import typeinfo as _typeinfo
+from . import util
+
+wrap = None
+
+# Extend core typeinfo with CHARACTER to test dtype('c')
+_ti = _typeinfo['STRING']
+typeinfo = dict(
+ CHARACTER=type(_ti)(('c', _ti.num, 8, _ti.alignment, _ti.type)),
+ **_typeinfo)
+
+
+def setup_module():
+ """
+ Build the required testing extension module
+
+ """
+ global wrap
+
+ # Check compiler availability first
+ if not util.has_c_compiler():
+ pytest.skip("No C compiler available")
+
+ if wrap is None:
+ config_code = """
+ config.add_extension('test_array_from_pyobj_ext',
+ sources=['wrapmodule.c', 'fortranobject.c'],
+ define_macros=[])
+ """
+ d = os.path.dirname(__file__)
+ src = [
+ util.getpath("tests", "src", "array_from_pyobj", "wrapmodule.c"),
+ util.getpath("src", "fortranobject.c"),
+ util.getpath("src", "fortranobject.h"),
+ ]
+ wrap = util.build_module_distutils(src, config_code,
+ "test_array_from_pyobj_ext")
+
+
+def flags_info(arr):
+ flags = wrap.array_attrs(arr)[6]
+ return flags2names(flags)
+
+
+def flags2names(flags):
+ info = []
+ for flagname in [
+ "CONTIGUOUS",
+ "FORTRAN",
+ "OWNDATA",
+ "ENSURECOPY",
+ "ENSUREARRAY",
+ "ALIGNED",
+ "NOTSWAPPED",
+ "WRITEABLE",
+ "WRITEBACKIFCOPY",
+ "UPDATEIFCOPY",
+ "BEHAVED",
+ "BEHAVED_RO",
+ "CARRAY",
+ "FARRAY",
+ ]:
+ if abs(flags) & getattr(wrap, flagname, 0):
+ info.append(flagname)
+ return info
+
+
+class Intent:
+ def __init__(self, intent_list=[]):
+ self.intent_list = intent_list[:]
+ flags = 0
+ for i in intent_list:
+ if i == "optional":
+ flags |= wrap.F2PY_OPTIONAL
+ else:
+ flags |= getattr(wrap, "F2PY_INTENT_" + i.upper())
+ self.flags = flags
+
+ def __getattr__(self, name):
+ name = name.lower()
+ if name == "in_":
+ name = "in"
+ return self.__class__(self.intent_list + [name])
+
+ def __str__(self):
+ return "intent(%s)" % (",".join(self.intent_list))
+
+ def __repr__(self):
+ return "Intent(%r)" % (self.intent_list)
+
+ def is_intent(self, *names):
+ for name in names:
+ if name not in self.intent_list:
+ return False
+ return True
+
+ def is_intent_exact(self, *names):
+ return len(self.intent_list) == len(names) and self.is_intent(*names)
+
+
+intent = Intent()
+
+_type_names = [
+ "BOOL",
+ "BYTE",
+ "UBYTE",
+ "SHORT",
+ "USHORT",
+ "INT",
+ "UINT",
+ "LONG",
+ "ULONG",
+ "LONGLONG",
+ "ULONGLONG",
+ "FLOAT",
+ "DOUBLE",
+ "CFLOAT",
+ "STRING1",
+ "STRING5",
+ "CHARACTER",
+]
+
+_cast_dict = {"BOOL": ["BOOL"]}
+_cast_dict["BYTE"] = _cast_dict["BOOL"] + ["BYTE"]
+_cast_dict["UBYTE"] = _cast_dict["BOOL"] + ["UBYTE"]
+_cast_dict["BYTE"] = ["BYTE"]
+_cast_dict["UBYTE"] = ["UBYTE"]
+_cast_dict["SHORT"] = _cast_dict["BYTE"] + ["UBYTE", "SHORT"]
+_cast_dict["USHORT"] = _cast_dict["UBYTE"] + ["BYTE", "USHORT"]
+_cast_dict["INT"] = _cast_dict["SHORT"] + ["USHORT", "INT"]
+_cast_dict["UINT"] = _cast_dict["USHORT"] + ["SHORT", "UINT"]
+
+_cast_dict["LONG"] = _cast_dict["INT"] + ["LONG"]
+_cast_dict["ULONG"] = _cast_dict["UINT"] + ["ULONG"]
+
+_cast_dict["LONGLONG"] = _cast_dict["LONG"] + ["LONGLONG"]
+_cast_dict["ULONGLONG"] = _cast_dict["ULONG"] + ["ULONGLONG"]
+
+_cast_dict["FLOAT"] = _cast_dict["SHORT"] + ["USHORT", "FLOAT"]
+_cast_dict["DOUBLE"] = _cast_dict["INT"] + ["UINT", "FLOAT", "DOUBLE"]
+
+_cast_dict["CFLOAT"] = _cast_dict["FLOAT"] + ["CFLOAT"]
+
+_cast_dict['STRING1'] = ['STRING1']
+_cast_dict['STRING5'] = ['STRING5']
+_cast_dict['CHARACTER'] = ['CHARACTER']
+
+# 32 bit system malloc typically does not provide the alignment required by
+# 16 byte long double types this means the inout intent cannot be satisfied
+# and several tests fail as the alignment flag can be randomly true or fals
+# when numpy gains an aligned allocator the tests could be enabled again
+#
+# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE.
+if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8)
+ and sys.platform != "win32"
+ and (platform.system(), platform.processor()) != ("Darwin", "arm")):
+ _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"])
+ _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [
+ "ULONG",
+ "FLOAT",
+ "DOUBLE",
+ "LONGDOUBLE",
+ ]
+ _cast_dict["CLONGDOUBLE"] = _cast_dict["LONGDOUBLE"] + [
+ "CFLOAT",
+ "CDOUBLE",
+ "CLONGDOUBLE",
+ ]
+ _cast_dict["CDOUBLE"] = _cast_dict["DOUBLE"] + ["CFLOAT", "CDOUBLE"]
+
+
+class Type:
+ _type_cache = {}
+
+ def __new__(cls, name):
+ if isinstance(name, np.dtype):
+ dtype0 = name
+ name = None
+ for n, i in typeinfo.items():
+ if not isinstance(i, type) and dtype0.type is i.type:
+ name = n
+ break
+ obj = cls._type_cache.get(name.upper(), None)
+ if obj is not None:
+ return obj
+ obj = object.__new__(cls)
+ obj._init(name)
+ cls._type_cache[name.upper()] = obj
+ return obj
+
+ def _init(self, name):
+ self.NAME = name.upper()
+
+ if self.NAME == 'CHARACTER':
+ info = typeinfo[self.NAME]
+ self.type_num = getattr(wrap, 'NPY_STRING')
+ self.elsize = 1
+ self.dtype = np.dtype('c')
+ elif self.NAME.startswith('STRING'):
+ info = typeinfo[self.NAME[:6]]
+ self.type_num = getattr(wrap, 'NPY_STRING')
+ self.elsize = int(self.NAME[6:] or 0)
+ self.dtype = np.dtype(f'S{self.elsize}')
+ else:
+ info = typeinfo[self.NAME]
+ self.type_num = getattr(wrap, 'NPY_' + self.NAME)
+ self.elsize = info.bits // 8
+ self.dtype = np.dtype(info.type)
+
+ assert self.type_num == info.num
+ self.type = info.type
+ self.dtypechar = info.char
+
+ def __repr__(self):
+ return (f"Type({self.NAME})|type_num={self.type_num},"
+ f" dtype={self.dtype},"
+ f" type={self.type}, elsize={self.elsize},"
+ f" dtypechar={self.dtypechar}")
+
+ def cast_types(self):
+ return [self.__class__(_m) for _m in _cast_dict[self.NAME]]
+
+ def all_types(self):
+ return [self.__class__(_m) for _m in _type_names]
+
+ def smaller_types(self):
+ bits = typeinfo[self.NAME].alignment
+ types = []
+ for name in _type_names:
+ if typeinfo[name].alignment < bits:
+ types.append(Type(name))
+ return types
+
+ def equal_types(self):
+ bits = typeinfo[self.NAME].alignment
+ types = []
+ for name in _type_names:
+ if name == self.NAME:
+ continue
+ if typeinfo[name].alignment == bits:
+ types.append(Type(name))
+ return types
+
+ def larger_types(self):
+ bits = typeinfo[self.NAME].alignment
+ types = []
+ for name in _type_names:
+ if typeinfo[name].alignment > bits:
+ types.append(Type(name))
+ return types
+
+
+class Array:
+
+ def __repr__(self):
+ return (f'Array({self.type}, {self.dims}, {self.intent},'
+ f' {self.obj})|arr={self.arr}')
+
+ def __init__(self, typ, dims, intent, obj):
+ self.type = typ
+ self.dims = dims
+ self.intent = intent
+ self.obj_copy = copy.deepcopy(obj)
+ self.obj = obj
+
+ # arr.dtypechar may be different from typ.dtypechar
+ self.arr = wrap.call(typ.type_num,
+ typ.elsize,
+ dims, intent.flags, obj)
+
+ assert isinstance(self.arr, np.ndarray)
+
+ self.arr_attr = wrap.array_attrs(self.arr)
+
+ if len(dims) > 1:
+ if self.intent.is_intent("c"):
+ assert (intent.flags & wrap.F2PY_INTENT_C)
+ assert not self.arr.flags["FORTRAN"]
+ assert self.arr.flags["CONTIGUOUS"]
+ assert (not self.arr_attr[6] & wrap.FORTRAN)
+ else:
+ assert (not intent.flags & wrap.F2PY_INTENT_C)
+ assert self.arr.flags["FORTRAN"]
+ assert not self.arr.flags["CONTIGUOUS"]
+ assert (self.arr_attr[6] & wrap.FORTRAN)
+
+ if obj is None:
+ self.pyarr = None
+ self.pyarr_attr = None
+ return
+
+ if intent.is_intent("cache"):
+ assert isinstance(obj, np.ndarray), repr(type(obj))
+ self.pyarr = np.array(obj).reshape(*dims).copy()
+ else:
+ self.pyarr = np.array(
+ np.array(obj, dtype=typ.dtypechar).reshape(*dims),
+ order=self.intent.is_intent("c") and "C" or "F",
+ )
+ assert self.pyarr.dtype == typ
+ self.pyarr.setflags(write=self.arr.flags["WRITEABLE"])
+ assert self.pyarr.flags["OWNDATA"], (obj, intent)
+ self.pyarr_attr = wrap.array_attrs(self.pyarr)
+
+ if len(dims) > 1:
+ if self.intent.is_intent("c"):
+ assert not self.pyarr.flags["FORTRAN"]
+ assert self.pyarr.flags["CONTIGUOUS"]
+ assert (not self.pyarr_attr[6] & wrap.FORTRAN)
+ else:
+ assert self.pyarr.flags["FORTRAN"]
+ assert not self.pyarr.flags["CONTIGUOUS"]
+ assert (self.pyarr_attr[6] & wrap.FORTRAN)
+
+ assert self.arr_attr[1] == self.pyarr_attr[1] # nd
+ assert self.arr_attr[2] == self.pyarr_attr[2] # dimensions
+ if self.arr_attr[1] <= 1:
+ assert self.arr_attr[3] == self.pyarr_attr[3], repr((
+ self.arr_attr[3],
+ self.pyarr_attr[3],
+ self.arr.tobytes(),
+ self.pyarr.tobytes(),
+ )) # strides
+ assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], repr((
+ self.arr_attr[5], self.pyarr_attr[5]
+ )) # descr
+ assert self.arr_attr[6] == self.pyarr_attr[6], repr((
+ self.arr_attr[6],
+ self.pyarr_attr[6],
+ flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]),
+ flags2names(self.arr_attr[6]),
+ intent,
+ )) # flags
+
+ if intent.is_intent("cache"):
+ assert self.arr_attr[5][3] >= self.type.elsize
+ else:
+ assert self.arr_attr[5][3] == self.type.elsize
+ assert (self.arr_equal(self.pyarr, self.arr))
+
+ if isinstance(self.obj, np.ndarray):
+ if typ.elsize == Type(obj.dtype).elsize:
+ if not intent.is_intent("copy") and self.arr_attr[1] <= 1:
+ assert self.has_shared_memory()
+
+ def arr_equal(self, arr1, arr2):
+ if arr1.shape != arr2.shape:
+ return False
+ return (arr1 == arr2).all()
+
+ def __str__(self):
+ return str(self.arr)
+
+ def has_shared_memory(self):
+ """Check that created array shares data with input array."""
+ if self.obj is self.arr:
+ return True
+ if not isinstance(self.obj, np.ndarray):
+ return False
+ obj_attr = wrap.array_attrs(self.obj)
+ return obj_attr[0] == self.arr_attr[0]
+
+
+class TestIntent:
+ def test_in_out(self):
+ assert str(intent.in_.out) == "intent(in,out)"
+ assert intent.in_.c.is_intent("c")
+ assert not intent.in_.c.is_intent_exact("c")
+ assert intent.in_.c.is_intent_exact("c", "in")
+ assert intent.in_.c.is_intent_exact("in", "c")
+ assert not intent.in_.is_intent("c")
+
+
+class TestSharedMemory:
+
+ @pytest.fixture(autouse=True, scope="class", params=_type_names)
+ def setup_type(self, request):
+ request.cls.type = Type(request.param)
+ request.cls.array = lambda self, dims, intent, obj: Array(
+ Type(request.param), dims, intent, obj)
+
+ @property
+ def num2seq(self):
+ if self.type.NAME.startswith('STRING'):
+ elsize = self.type.elsize
+ return ['1' * elsize, '2' * elsize]
+ return [1, 2]
+
+ @property
+ def num23seq(self):
+ if self.type.NAME.startswith('STRING'):
+ elsize = self.type.elsize
+ return [['1' * elsize, '2' * elsize, '3' * elsize],
+ ['4' * elsize, '5' * elsize, '6' * elsize]]
+ return [[1, 2, 3], [4, 5, 6]]
+
+ def test_in_from_2seq(self):
+ a = self.array([2], intent.in_, self.num2seq)
+ assert not a.has_shared_memory()
+
+ def test_in_from_2casttype(self):
+ for t in self.type.cast_types():
+ obj = np.array(self.num2seq, dtype=t.dtype)
+ a = self.array([len(self.num2seq)], intent.in_, obj)
+ if t.elsize == self.type.elsize:
+ assert a.has_shared_memory(), repr((self.type.dtype, t.dtype))
+ else:
+ assert not a.has_shared_memory()
+
+ @pytest.mark.parametrize("write", ["w", "ro"])
+ @pytest.mark.parametrize("order", ["C", "F"])
+ @pytest.mark.parametrize("inp", ["2seq", "23seq"])
+ def test_in_nocopy(self, write, order, inp):
+ """Test if intent(in) array can be passed without copies"""
+ seq = getattr(self, "num" + inp)
+ obj = np.array(seq, dtype=self.type.dtype, order=order)
+ obj.setflags(write=(write == 'w'))
+ a = self.array(obj.shape,
+ ((order == 'C' and intent.in_.c) or intent.in_), obj)
+ assert a.has_shared_memory()
+
+ def test_inout_2seq(self):
+ obj = np.array(self.num2seq, dtype=self.type.dtype)
+ a = self.array([len(self.num2seq)], intent.inout, obj)
+ assert a.has_shared_memory()
+
+ try:
+ a = self.array([2], intent.in_.inout, self.num2seq)
+ except TypeError as msg:
+ if not str(msg).startswith(
+ "failed to initialize intent(inout|inplace|cache) array"):
+ raise
+ else:
+ raise SystemError("intent(inout) should have failed on sequence")
+
+ def test_f_inout_23seq(self):
+ obj = np.array(self.num23seq, dtype=self.type.dtype, order="F")
+ shape = (len(self.num23seq), len(self.num23seq[0]))
+ a = self.array(shape, intent.in_.inout, obj)
+ assert a.has_shared_memory()
+
+ obj = np.array(self.num23seq, dtype=self.type.dtype, order="C")
+ shape = (len(self.num23seq), len(self.num23seq[0]))
+ try:
+ a = self.array(shape, intent.in_.inout, obj)
+ except ValueError as msg:
+ if not str(msg).startswith(
+ "failed to initialize intent(inout) array"):
+ raise
+ else:
+ raise SystemError(
+ "intent(inout) should have failed on improper array")
+
+ def test_c_inout_23seq(self):
+ obj = np.array(self.num23seq, dtype=self.type.dtype)
+ shape = (len(self.num23seq), len(self.num23seq[0]))
+ a = self.array(shape, intent.in_.c.inout, obj)
+ assert a.has_shared_memory()
+
+ def test_in_copy_from_2casttype(self):
+ for t in self.type.cast_types():
+ obj = np.array(self.num2seq, dtype=t.dtype)
+ a = self.array([len(self.num2seq)], intent.in_.copy, obj)
+ assert not a.has_shared_memory()
+
+ def test_c_in_from_23seq(self):
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_,
+ self.num23seq)
+ assert not a.has_shared_memory()
+
+ def test_in_from_23casttype(self):
+ for t in self.type.cast_types():
+ obj = np.array(self.num23seq, dtype=t.dtype)
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj)
+ assert not a.has_shared_memory()
+
+ def test_f_in_from_23casttype(self):
+ for t in self.type.cast_types():
+ obj = np.array(self.num23seq, dtype=t.dtype, order="F")
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj)
+ if t.elsize == self.type.elsize:
+ assert a.has_shared_memory()
+ else:
+ assert not a.has_shared_memory()
+
+ def test_c_in_from_23casttype(self):
+ for t in self.type.cast_types():
+ obj = np.array(self.num23seq, dtype=t.dtype)
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj)
+ if t.elsize == self.type.elsize:
+ assert a.has_shared_memory()
+ else:
+ assert not a.has_shared_memory()
+
+ def test_f_copy_in_from_23casttype(self):
+ for t in self.type.cast_types():
+ obj = np.array(self.num23seq, dtype=t.dtype, order="F")
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_.copy,
+ obj)
+ assert not a.has_shared_memory()
+
+ def test_c_copy_in_from_23casttype(self):
+ for t in self.type.cast_types():
+ obj = np.array(self.num23seq, dtype=t.dtype)
+ a = self.array(
+ [len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy,
+ obj)
+ assert not a.has_shared_memory()
+
+ def test_in_cache_from_2casttype(self):
+ for t in self.type.all_types():
+ if t.elsize != self.type.elsize:
+ continue
+ obj = np.array(self.num2seq, dtype=t.dtype)
+ shape = (len(self.num2seq), )
+ a = self.array(shape, intent.in_.c.cache, obj)
+ assert a.has_shared_memory()
+
+ a = self.array(shape, intent.in_.cache, obj)
+ assert a.has_shared_memory()
+
+ obj = np.array(self.num2seq, dtype=t.dtype, order="F")
+ a = self.array(shape, intent.in_.c.cache, obj)
+ assert a.has_shared_memory()
+
+ a = self.array(shape, intent.in_.cache, obj)
+ assert a.has_shared_memory(), repr(t.dtype)
+
+ try:
+ a = self.array(shape, intent.in_.cache, obj[::-1])
+ except ValueError as msg:
+ if not str(msg).startswith(
+ "failed to initialize intent(cache) array"):
+ raise
+ else:
+ raise SystemError(
+ "intent(cache) should have failed on multisegmented array")
+
+ def test_in_cache_from_2casttype_failure(self):
+ for t in self.type.all_types():
+ if t.NAME == 'STRING':
+ # string elsize is 0, so skipping the test
+ continue
+ if t.elsize >= self.type.elsize:
+ continue
+ obj = np.array(self.num2seq, dtype=t.dtype)
+ shape = (len(self.num2seq), )
+ try:
+ self.array(shape, intent.in_.cache, obj) # Should succeed
+ except ValueError as msg:
+ if not str(msg).startswith(
+ "failed to initialize intent(cache) array"):
+ raise
+ else:
+ raise SystemError(
+ "intent(cache) should have failed on smaller array")
+
+ def test_cache_hidden(self):
+ shape = (2, )
+ a = self.array(shape, intent.cache.hide, None)
+ assert a.arr.shape == shape
+
+ shape = (2, 3)
+ a = self.array(shape, intent.cache.hide, None)
+ assert a.arr.shape == shape
+
+ shape = (-1, 3)
+ try:
+ a = self.array(shape, intent.cache.hide, None)
+ except ValueError as msg:
+ if not str(msg).startswith(
+ "failed to create intent(cache|hide)|optional array"):
+ raise
+ else:
+ raise SystemError(
+ "intent(cache) should have failed on undefined dimensions")
+
+ def test_hidden(self):
+ shape = (2, )
+ a = self.array(shape, intent.hide, None)
+ assert a.arr.shape == shape
+ assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))
+
+ shape = (2, 3)
+ a = self.array(shape, intent.hide, None)
+ assert a.arr.shape == shape
+ assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))
+ assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"]
+
+ shape = (2, 3)
+ a = self.array(shape, intent.c.hide, None)
+ assert a.arr.shape == shape
+ assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))
+ assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"]
+
+ shape = (-1, 3)
+ try:
+ a = self.array(shape, intent.hide, None)
+ except ValueError as msg:
+ if not str(msg).startswith(
+ "failed to create intent(cache|hide)|optional array"):
+ raise
+ else:
+ raise SystemError(
+ "intent(hide) should have failed on undefined dimensions")
+
+ def test_optional_none(self):
+ shape = (2, )
+ a = self.array(shape, intent.optional, None)
+ assert a.arr.shape == shape
+ assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))
+
+ shape = (2, 3)
+ a = self.array(shape, intent.optional, None)
+ assert a.arr.shape == shape
+ assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))
+ assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"]
+
+ shape = (2, 3)
+ a = self.array(shape, intent.c.optional, None)
+ assert a.arr.shape == shape
+ assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype))
+ assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"]
+
+ def test_optional_from_2seq(self):
+ obj = self.num2seq
+ shape = (len(obj), )
+ a = self.array(shape, intent.optional, obj)
+ assert a.arr.shape == shape
+ assert not a.has_shared_memory()
+
+ def test_optional_from_23seq(self):
+ obj = self.num23seq
+ shape = (len(obj), len(obj[0]))
+ a = self.array(shape, intent.optional, obj)
+ assert a.arr.shape == shape
+ assert not a.has_shared_memory()
+
+ a = self.array(shape, intent.optional.c, obj)
+ assert a.arr.shape == shape
+ assert not a.has_shared_memory()
+
+ def test_inplace(self):
+ obj = np.array(self.num23seq, dtype=self.type.dtype)
+ assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"]
+ shape = obj.shape
+ a = self.array(shape, intent.inplace, obj)
+ assert obj[1][2] == a.arr[1][2], repr((obj, a.arr))
+ a.arr[1][2] = 54
+ assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype)
+ assert a.arr is obj
+ assert obj.flags["FORTRAN"] # obj attributes are changed inplace!
+ assert not obj.flags["CONTIGUOUS"]
+
+ def test_inplace_from_casttype(self):
+ for t in self.type.cast_types():
+ if t is self.type:
+ continue
+ obj = np.array(self.num23seq, dtype=t.dtype)
+ assert obj.dtype.type == t.type
+ assert obj.dtype.type is not self.type.type
+ assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"]
+ shape = obj.shape
+ a = self.array(shape, intent.inplace, obj)
+ assert obj[1][2] == a.arr[1][2], repr((obj, a.arr))
+ a.arr[1][2] = 54
+ assert obj[1][2] == a.arr[1][2] == np.array(54,
+ dtype=self.type.dtype)
+ assert a.arr is obj
+ assert obj.flags["FORTRAN"] # obj attributes changed inplace!
+ assert not obj.flags["CONTIGUOUS"]
+ assert obj.dtype.type is self.type.type # obj changed inplace!
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_assumed_shape.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_assumed_shape.py
new file mode 100644
index 00000000..d4664cf8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_assumed_shape.py
@@ -0,0 +1,49 @@
+import os
+import pytest
+import tempfile
+
+from . import util
+
+
+class TestAssumedShapeSumExample(util.F2PyTest):
+ sources = [
+ util.getpath("tests", "src", "assumed_shape", "foo_free.f90"),
+ util.getpath("tests", "src", "assumed_shape", "foo_use.f90"),
+ util.getpath("tests", "src", "assumed_shape", "precision.f90"),
+ util.getpath("tests", "src", "assumed_shape", "foo_mod.f90"),
+ util.getpath("tests", "src", "assumed_shape", ".f2py_f2cmap"),
+ ]
+
+ @pytest.mark.slow
+ def test_all(self):
+ r = self.module.fsum([1, 2])
+ assert r == 3
+ r = self.module.sum([1, 2])
+ assert r == 3
+ r = self.module.sum_with_use([1, 2])
+ assert r == 3
+
+ r = self.module.mod.sum([1, 2])
+ assert r == 3
+ r = self.module.mod.fsum([1, 2])
+ assert r == 3
+
+
+class TestF2cmapOption(TestAssumedShapeSumExample):
+ def setup_method(self):
+ # Use a custom file name for .f2py_f2cmap
+ self.sources = list(self.sources)
+ f2cmap_src = self.sources.pop(-1)
+
+ self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False)
+ with open(f2cmap_src, "rb") as f:
+ self.f2cmap_file.write(f.read())
+ self.f2cmap_file.close()
+
+ self.sources.append(self.f2cmap_file.name)
+ self.options = ["--f2cmap", self.f2cmap_file.name]
+
+ super().setup_method()
+
+ def teardown_method(self):
+ os.unlink(self.f2cmap_file.name)
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_block_docstring.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_block_docstring.py
new file mode 100644
index 00000000..e0eacc03
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_block_docstring.py
@@ -0,0 +1,17 @@
+import sys
+import pytest
+from . import util
+
+from numpy.testing import IS_PYPY
+
+
+class TestBlockDocString(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "block_docstring", "foo.f")]
+
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
+ @pytest.mark.xfail(IS_PYPY,
+ reason="PyPy cannot modify tp_doc after PyType_Ready")
+ def test_block_docstring(self):
+ expected = "bar : 'i'-array(2,3)\n"
+ assert self.module.block.__doc__ == expected
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_callback.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_callback.py
new file mode 100644
index 00000000..018cea4f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_callback.py
@@ -0,0 +1,230 @@
+import math
+import textwrap
+import sys
+import pytest
+import threading
+import traceback
+import time
+
+import numpy as np
+from numpy.testing import IS_PYPY
+from . import util
+
+
+class TestF77Callback(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "callback", "foo.f")]
+
+ @pytest.mark.parametrize("name", "t,t2".split(","))
+ def test_all(self, name):
+ self.check_function(name)
+
+ @pytest.mark.xfail(IS_PYPY,
+ reason="PyPy cannot modify tp_doc after PyType_Ready")
+ def test_docstring(self):
+ expected = textwrap.dedent("""\
+ a = t(fun,[fun_extra_args])
+
+ Wrapper for ``t``.
+
+ Parameters
+ ----------
+ fun : call-back function
+
+ Other Parameters
+ ----------------
+ fun_extra_args : input tuple, optional
+ Default: ()
+
+ Returns
+ -------
+ a : int
+
+ Notes
+ -----
+ Call-back functions::
+
+ def fun(): return a
+ Return objects:
+ a : int
+ """)
+ assert self.module.t.__doc__ == expected
+
+ def check_function(self, name):
+ t = getattr(self.module, name)
+ r = t(lambda: 4)
+ assert r == 4
+ r = t(lambda a: 5, fun_extra_args=(6, ))
+ assert r == 5
+ r = t(lambda a: a, fun_extra_args=(6, ))
+ assert r == 6
+ r = t(lambda a: 5 + a, fun_extra_args=(7, ))
+ assert r == 12
+ r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi, ))
+ assert r == 180
+ r = t(math.degrees, fun_extra_args=(math.pi, ))
+ assert r == 180
+
+ r = t(self.module.func, fun_extra_args=(6, ))
+ assert r == 17
+ r = t(self.module.func0)
+ assert r == 11
+ r = t(self.module.func0._cpointer)
+ assert r == 11
+
+ class A:
+ def __call__(self):
+ return 7
+
+ def mth(self):
+ return 9
+
+ a = A()
+ r = t(a)
+ assert r == 7
+ r = t(a.mth)
+ assert r == 9
+
+ @pytest.mark.skipif(sys.platform == 'win32',
+ reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ def test_string_callback(self):
+ def callback(code):
+ if code == "r":
+ return 0
+ else:
+ return 1
+
+ f = getattr(self.module, "string_callback")
+ r = f(callback)
+ assert r == 0
+
+ @pytest.mark.skipif(sys.platform == 'win32',
+ reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ def test_string_callback_array(self):
+ # See gh-10027
+ cu1 = np.zeros((1, ), "S8")
+ cu2 = np.zeros((1, 8), "c")
+ cu3 = np.array([""], "S8")
+
+ def callback(cu, lencu):
+ if cu.shape != (lencu,):
+ return 1
+ if cu.dtype != "S8":
+ return 2
+ if not np.all(cu == b""):
+ return 3
+ return 0
+
+ f = getattr(self.module, "string_callback_array")
+ for cu in [cu1, cu2, cu3]:
+ res = f(callback, cu, cu.size)
+ assert res == 0
+
+ def test_threadsafety(self):
+ # Segfaults if the callback handling is not threadsafe
+
+ errors = []
+
+ def cb():
+ # Sleep here to make it more likely for another thread
+ # to call their callback at the same time.
+ time.sleep(1e-3)
+
+ # Check reentrancy
+ r = self.module.t(lambda: 123)
+ assert r == 123
+
+ return 42
+
+ def runner(name):
+ try:
+ for j in range(50):
+ r = self.module.t(cb)
+ assert r == 42
+ self.check_function(name)
+ except Exception:
+ errors.append(traceback.format_exc())
+
+ threads = [
+ threading.Thread(target=runner, args=(arg, ))
+ for arg in ("t", "t2") for n in range(20)
+ ]
+
+ for t in threads:
+ t.start()
+
+ for t in threads:
+ t.join()
+
+ errors = "\n\n".join(errors)
+ if errors:
+ raise AssertionError(errors)
+
+ def test_hidden_callback(self):
+ try:
+ self.module.hidden_callback(2)
+ except Exception as msg:
+ assert str(msg).startswith("Callback global_f not defined")
+
+ try:
+ self.module.hidden_callback2(2)
+ except Exception as msg:
+ assert str(msg).startswith("cb: Callback global_f not defined")
+
+ self.module.global_f = lambda x: x + 1
+ r = self.module.hidden_callback(2)
+ assert r == 3
+
+ self.module.global_f = lambda x: x + 2
+ r = self.module.hidden_callback(2)
+ assert r == 4
+
+ del self.module.global_f
+ try:
+ self.module.hidden_callback(2)
+ except Exception as msg:
+ assert str(msg).startswith("Callback global_f not defined")
+
+ self.module.global_f = lambda x=0: x + 3
+ r = self.module.hidden_callback(2)
+ assert r == 5
+
+ # reproducer of gh18341
+ r = self.module.hidden_callback2(2)
+ assert r == 3
+
+
+class TestF77CallbackPythonTLS(TestF77Callback):
+ """
+ Callback tests using Python thread-local storage instead of
+ compiler-provided
+ """
+
+ options = ["-DF2PY_USE_PYTHON_TLS"]
+
+
+class TestF90Callback(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "callback", "gh17797.f90")]
+
+ def test_gh17797(self):
+ def incr(x):
+ return x + 123
+
+ y = np.array([1, 2, 3], dtype=np.int64)
+ r = self.module.gh17797(incr, y)
+ assert r == 123 + 1 + 2 + 3
+
+
+class TestGH18335(util.F2PyTest):
+ """The reproduction of the reported issue requires specific input that
+ extensions may break the issue conditions, so the reproducer is
+ implemented as a separate test class. Do not extend this test with
+ other tests!
+ """
+ sources = [util.getpath("tests", "src", "callback", "gh18335.f90")]
+
+ def test_gh18335(self):
+ def foo(x):
+ x[0] += 1
+
+ r = self.module.gh18335(foo)
+ assert r == 123 + 1
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_character.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_character.py
new file mode 100644
index 00000000..79998b46
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_character.py
@@ -0,0 +1,592 @@
+import pytest
+import textwrap
+from numpy.testing import assert_array_equal, assert_equal, assert_raises
+import numpy as np
+from numpy.f2py.tests import util
+
+
+class TestCharacterString(util.F2PyTest):
+ # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py']
+ suffix = '.f90'
+ fprefix = 'test_character_string'
+ length_list = ['1', '3', 'star']
+
+ code = ''
+ for length in length_list:
+ fsuffix = length
+ clength = dict(star='(*)').get(length, length)
+
+ code += textwrap.dedent(f"""
+
+ subroutine {fprefix}_input_{fsuffix}(c, o, n)
+ character*{clength}, intent(in) :: c
+ integer n
+ !f2py integer, depend(c), intent(hide) :: n = slen(c)
+ integer*1, dimension(n) :: o
+ !f2py intent(out) o
+ o = transfer(c, o)
+ end subroutine {fprefix}_input_{fsuffix}
+
+ subroutine {fprefix}_output_{fsuffix}(c, o, n)
+ character*{clength}, intent(out) :: c
+ integer n
+ integer*1, dimension(n), intent(in) :: o
+ !f2py integer, depend(o), intent(hide) :: n = len(o)
+ c = transfer(o, c)
+ end subroutine {fprefix}_output_{fsuffix}
+
+ subroutine {fprefix}_array_input_{fsuffix}(c, o, m, n)
+ integer m, i, n
+ character*{clength}, intent(in), dimension(m) :: c
+ !f2py integer, depend(c), intent(hide) :: m = len(c)
+ !f2py integer, depend(c), intent(hide) :: n = f2py_itemsize(c)
+ integer*1, dimension(m, n), intent(out) :: o
+ do i=1,m
+ o(i, :) = transfer(c(i), o(i, :))
+ end do
+ end subroutine {fprefix}_array_input_{fsuffix}
+
+ subroutine {fprefix}_array_output_{fsuffix}(c, o, m, n)
+ character*{clength}, intent(out), dimension(m) :: c
+ integer n
+ integer*1, dimension(m, n), intent(in) :: o
+ !f2py character(f2py_len=n) :: c
+ !f2py integer, depend(o), intent(hide) :: m = len(o)
+ !f2py integer, depend(o), intent(hide) :: n = shape(o, 1)
+ do i=1,m
+ c(i) = transfer(o(i, :), c(i))
+ end do
+ end subroutine {fprefix}_array_output_{fsuffix}
+
+ subroutine {fprefix}_2d_array_input_{fsuffix}(c, o, m1, m2, n)
+ integer m1, m2, i, j, n
+ character*{clength}, intent(in), dimension(m1, m2) :: c
+ !f2py integer, depend(c), intent(hide) :: m1 = len(c)
+ !f2py integer, depend(c), intent(hide) :: m2 = shape(c, 1)
+ !f2py integer, depend(c), intent(hide) :: n = f2py_itemsize(c)
+ integer*1, dimension(m1, m2, n), intent(out) :: o
+ do i=1,m1
+ do j=1,m2
+ o(i, j, :) = transfer(c(i, j), o(i, j, :))
+ end do
+ end do
+ end subroutine {fprefix}_2d_array_input_{fsuffix}
+ """)
+
+ @pytest.mark.parametrize("length", length_list)
+ def test_input(self, length):
+ fsuffix = {'(*)': 'star'}.get(length, length)
+ f = getattr(self.module, self.fprefix + '_input_' + fsuffix)
+
+ a = {'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length]
+
+ assert_array_equal(f(a), np.array(list(map(ord, a)), dtype='u1'))
+
+ @pytest.mark.parametrize("length", length_list[:-1])
+ def test_output(self, length):
+ fsuffix = length
+ f = getattr(self.module, self.fprefix + '_output_' + fsuffix)
+
+ a = {'1': 'a', '3': 'abc'}[length]
+
+ assert_array_equal(f(np.array(list(map(ord, a)), dtype='u1')),
+ a.encode())
+
+ @pytest.mark.parametrize("length", length_list)
+ def test_array_input(self, length):
+ fsuffix = length
+ f = getattr(self.module, self.fprefix + '_array_input_' + fsuffix)
+
+ a = np.array([{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length],
+ {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length],
+ ], dtype='S')
+
+ expected = np.array([[c for c in s] for s in a], dtype='u1')
+ assert_array_equal(f(a), expected)
+
+ @pytest.mark.parametrize("length", length_list)
+ def test_array_output(self, length):
+ fsuffix = length
+ f = getattr(self.module, self.fprefix + '_array_output_' + fsuffix)
+
+ expected = np.array(
+ [{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length],
+ {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]], dtype='S')
+
+ a = np.array([[c for c in s] for s in expected], dtype='u1')
+ assert_array_equal(f(a), expected)
+
+ @pytest.mark.parametrize("length", length_list)
+ def test_2d_array_input(self, length):
+ fsuffix = length
+ f = getattr(self.module, self.fprefix + '_2d_array_input_' + fsuffix)
+
+ a = np.array([[{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length],
+ {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]],
+ [{'1': 'f', '3': 'fgh', 'star': 'fghij' * 3}[length],
+ {'1': 'F', '3': 'FGH', 'star': 'FGHIJ' * 3}[length]]],
+ dtype='S')
+ expected = np.array([[[c for c in item] for item in row] for row in a],
+ dtype='u1', order='F')
+ assert_array_equal(f(a), expected)
+
+
+class TestCharacter(util.F2PyTest):
+ # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py']
+ suffix = '.f90'
+ fprefix = 'test_character'
+
+ code = textwrap.dedent(f"""
+ subroutine {fprefix}_input(c, o)
+ character, intent(in) :: c
+ integer*1 o
+ !f2py intent(out) o
+ o = transfer(c, o)
+ end subroutine {fprefix}_input
+
+ subroutine {fprefix}_output(c, o)
+ character :: c
+ integer*1, intent(in) :: o
+ !f2py intent(out) c
+ c = transfer(o, c)
+ end subroutine {fprefix}_output
+
+ subroutine {fprefix}_input_output(c, o)
+ character, intent(in) :: c
+ character o
+ !f2py intent(out) o
+ o = c
+ end subroutine {fprefix}_input_output
+
+ subroutine {fprefix}_inout(c, n)
+ character :: c, n
+ !f2py intent(in) n
+ !f2py intent(inout) c
+ c = n
+ end subroutine {fprefix}_inout
+
+ function {fprefix}_return(o) result (c)
+ character :: c
+ character, intent(in) :: o
+ c = transfer(o, c)
+ end function {fprefix}_return
+
+ subroutine {fprefix}_array_input(c, o)
+ character, intent(in) :: c(3)
+ integer*1 o(3)
+ !f2py intent(out) o
+ integer i
+ do i=1,3
+ o(i) = transfer(c(i), o(i))
+ end do
+ end subroutine {fprefix}_array_input
+
+ subroutine {fprefix}_2d_array_input(c, o)
+ character, intent(in) :: c(2, 3)
+ integer*1 o(2, 3)
+ !f2py intent(out) o
+ integer i, j
+ do i=1,2
+ do j=1,3
+ o(i, j) = transfer(c(i, j), o(i, j))
+ end do
+ end do
+ end subroutine {fprefix}_2d_array_input
+
+ subroutine {fprefix}_array_output(c, o)
+ character :: c(3)
+ integer*1, intent(in) :: o(3)
+ !f2py intent(out) c
+ do i=1,3
+ c(i) = transfer(o(i), c(i))
+ end do
+ end subroutine {fprefix}_array_output
+
+ subroutine {fprefix}_array_inout(c, n)
+ character :: c(3), n(3)
+ !f2py intent(in) n(3)
+ !f2py intent(inout) c(3)
+ do i=1,3
+ c(i) = n(i)
+ end do
+ end subroutine {fprefix}_array_inout
+
+ subroutine {fprefix}_2d_array_inout(c, n)
+ character :: c(2, 3), n(2, 3)
+ !f2py intent(in) n(2, 3)
+ !f2py intent(inout) c(2. 3)
+ integer i, j
+ do i=1,2
+ do j=1,3
+ c(i, j) = n(i, j)
+ end do
+ end do
+ end subroutine {fprefix}_2d_array_inout
+
+ function {fprefix}_array_return(o) result (c)
+ character, dimension(3) :: c
+ character, intent(in) :: o(3)
+ do i=1,3
+ c(i) = o(i)
+ end do
+ end function {fprefix}_array_return
+
+ function {fprefix}_optional(o) result (c)
+ character, intent(in) :: o
+ !f2py character o = "a"
+ character :: c
+ c = o
+ end function {fprefix}_optional
+ """)
+
+ @pytest.mark.parametrize("dtype", ['c', 'S1'])
+ def test_input(self, dtype):
+ f = getattr(self.module, self.fprefix + '_input')
+
+ assert_equal(f(np.array('a', dtype=dtype)), ord('a'))
+ assert_equal(f(np.array(b'a', dtype=dtype)), ord('a'))
+ assert_equal(f(np.array(['a'], dtype=dtype)), ord('a'))
+ assert_equal(f(np.array('abc', dtype=dtype)), ord('a'))
+ assert_equal(f(np.array([['a']], dtype=dtype)), ord('a'))
+
+ def test_input_varia(self):
+ f = getattr(self.module, self.fprefix + '_input')
+
+ assert_equal(f('a'), ord('a'))
+ assert_equal(f(b'a'), ord(b'a'))
+ assert_equal(f(''), 0)
+ assert_equal(f(b''), 0)
+ assert_equal(f(b'\0'), 0)
+ assert_equal(f('ab'), ord('a'))
+ assert_equal(f(b'ab'), ord('a'))
+ assert_equal(f(['a']), ord('a'))
+
+ assert_equal(f(np.array(b'a')), ord('a'))
+ assert_equal(f(np.array([b'a'])), ord('a'))
+ a = np.array('a')
+ assert_equal(f(a), ord('a'))
+ a = np.array(['a'])
+ assert_equal(f(a), ord('a'))
+
+ try:
+ f([])
+ except IndexError as msg:
+ if not str(msg).endswith(' got 0-list'):
+ raise
+ else:
+ raise SystemError(f'{f.__name__} should have failed on empty list')
+
+ try:
+ f(97)
+ except TypeError as msg:
+ if not str(msg).endswith(' got int instance'):
+ raise
+ else:
+ raise SystemError(f'{f.__name__} should have failed on int value')
+
+ @pytest.mark.parametrize("dtype", ['c', 'S1', 'U1'])
+ def test_array_input(self, dtype):
+ f = getattr(self.module, self.fprefix + '_array_input')
+
+ assert_array_equal(f(np.array(['a', 'b', 'c'], dtype=dtype)),
+ np.array(list(map(ord, 'abc')), dtype='i1'))
+ assert_array_equal(f(np.array([b'a', b'b', b'c'], dtype=dtype)),
+ np.array(list(map(ord, 'abc')), dtype='i1'))
+
+ def test_array_input_varia(self):
+ f = getattr(self.module, self.fprefix + '_array_input')
+ assert_array_equal(f(['a', 'b', 'c']),
+ np.array(list(map(ord, 'abc')), dtype='i1'))
+ assert_array_equal(f([b'a', b'b', b'c']),
+ np.array(list(map(ord, 'abc')), dtype='i1'))
+
+ try:
+ f(['a', 'b', 'c', 'd'])
+ except ValueError as msg:
+ if not str(msg).endswith(
+ 'th dimension must be fixed to 3 but got 4'):
+ raise
+ else:
+ raise SystemError(
+ f'{f.__name__} should have failed on wrong input')
+
+ @pytest.mark.parametrize("dtype", ['c', 'S1', 'U1'])
+ def test_2d_array_input(self, dtype):
+ f = getattr(self.module, self.fprefix + '_2d_array_input')
+
+ a = np.array([['a', 'b', 'c'],
+ ['d', 'e', 'f']], dtype=dtype, order='F')
+ expected = a.view(np.uint32 if dtype == 'U1' else np.uint8)
+ assert_array_equal(f(a), expected)
+
+ def test_output(self):
+ f = getattr(self.module, self.fprefix + '_output')
+
+ assert_equal(f(ord(b'a')), b'a')
+ assert_equal(f(0), b'\0')
+
+ def test_array_output(self):
+ f = getattr(self.module, self.fprefix + '_array_output')
+
+ assert_array_equal(f(list(map(ord, 'abc'))),
+ np.array(list('abc'), dtype='S1'))
+
+ def test_input_output(self):
+ f = getattr(self.module, self.fprefix + '_input_output')
+
+ assert_equal(f(b'a'), b'a')
+ assert_equal(f('a'), b'a')
+ assert_equal(f(''), b'\0')
+
+ @pytest.mark.parametrize("dtype", ['c', 'S1'])
+ def test_inout(self, dtype):
+ f = getattr(self.module, self.fprefix + '_inout')
+
+ a = np.array(list('abc'), dtype=dtype)
+ f(a, 'A')
+ assert_array_equal(a, np.array(list('Abc'), dtype=a.dtype))
+ f(a[1:], 'B')
+ assert_array_equal(a, np.array(list('ABc'), dtype=a.dtype))
+
+ a = np.array(['abc'], dtype=dtype)
+ f(a, 'A')
+ assert_array_equal(a, np.array(['Abc'], dtype=a.dtype))
+
+ def test_inout_varia(self):
+ f = getattr(self.module, self.fprefix + '_inout')
+ a = np.array('abc', dtype='S3')
+ f(a, 'A')
+ assert_array_equal(a, np.array('Abc', dtype=a.dtype))
+
+ a = np.array(['abc'], dtype='S3')
+ f(a, 'A')
+ assert_array_equal(a, np.array(['Abc'], dtype=a.dtype))
+
+ try:
+ f('abc', 'A')
+ except ValueError as msg:
+ if not str(msg).endswith(' got 3-str'):
+ raise
+ else:
+ raise SystemError(f'{f.__name__} should have failed on str value')
+
+ @pytest.mark.parametrize("dtype", ['c', 'S1'])
+ def test_array_inout(self, dtype):
+ f = getattr(self.module, self.fprefix + '_array_inout')
+ n = np.array(['A', 'B', 'C'], dtype=dtype, order='F')
+
+ a = np.array(['a', 'b', 'c'], dtype=dtype, order='F')
+ f(a, n)
+ assert_array_equal(a, n)
+
+ a = np.array(['a', 'b', 'c', 'd'], dtype=dtype)
+ f(a[1:], n)
+ assert_array_equal(a, np.array(['a', 'A', 'B', 'C'], dtype=dtype))
+
+ a = np.array([['a', 'b', 'c']], dtype=dtype, order='F')
+ f(a, n)
+ assert_array_equal(a, np.array([['A', 'B', 'C']], dtype=dtype))
+
+ a = np.array(['a', 'b', 'c', 'd'], dtype=dtype, order='F')
+ try:
+ f(a, n)
+ except ValueError as msg:
+ if not str(msg).endswith(
+ 'th dimension must be fixed to 3 but got 4'):
+ raise
+ else:
+ raise SystemError(
+ f'{f.__name__} should have failed on wrong input')
+
+ @pytest.mark.parametrize("dtype", ['c', 'S1'])
+ def test_2d_array_inout(self, dtype):
+ f = getattr(self.module, self.fprefix + '_2d_array_inout')
+ n = np.array([['A', 'B', 'C'],
+ ['D', 'E', 'F']],
+ dtype=dtype, order='F')
+ a = np.array([['a', 'b', 'c'],
+ ['d', 'e', 'f']],
+ dtype=dtype, order='F')
+ f(a, n)
+ assert_array_equal(a, n)
+
+ def test_return(self):
+ f = getattr(self.module, self.fprefix + '_return')
+
+ assert_equal(f('a'), b'a')
+
+ @pytest.mark.skip('fortran function returning array segfaults')
+ def test_array_return(self):
+ f = getattr(self.module, self.fprefix + '_array_return')
+
+ a = np.array(list('abc'), dtype='S1')
+ assert_array_equal(f(a), a)
+
+ def test_optional(self):
+ f = getattr(self.module, self.fprefix + '_optional')
+
+ assert_equal(f(), b"a")
+ assert_equal(f(b'B'), b"B")
+
+
+class TestMiscCharacter(util.F2PyTest):
+ # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py']
+ suffix = '.f90'
+ fprefix = 'test_misc_character'
+
+ code = textwrap.dedent(f"""
+ subroutine {fprefix}_gh18684(x, y, m)
+ character(len=5), dimension(m), intent(in) :: x
+ character*5, dimension(m), intent(out) :: y
+ integer i, m
+ !f2py integer, intent(hide), depend(x) :: m = f2py_len(x)
+ do i=1,m
+ y(i) = x(i)
+ end do
+ end subroutine {fprefix}_gh18684
+
+ subroutine {fprefix}_gh6308(x, i)
+ integer i
+ !f2py check(i>=0 && i<12) i
+ character*5 name, x
+ common name(12)
+ name(i + 1) = x
+ end subroutine {fprefix}_gh6308
+
+ subroutine {fprefix}_gh4519(x)
+ character(len=*), intent(in) :: x(:)
+ !f2py intent(out) x
+ integer :: i
+ do i=1, size(x)
+ print*, "x(",i,")=", x(i)
+ end do
+ end subroutine {fprefix}_gh4519
+
+ pure function {fprefix}_gh3425(x) result (y)
+ character(len=*), intent(in) :: x
+ character(len=len(x)) :: y
+ integer :: i
+ do i = 1, len(x)
+ j = iachar(x(i:i))
+ if (j>=iachar("a") .and. j<=iachar("z") ) then
+ y(i:i) = achar(j-32)
+ else
+ y(i:i) = x(i:i)
+ endif
+ end do
+ end function {fprefix}_gh3425
+
+ subroutine {fprefix}_character_bc_new(x, y, z)
+ character, intent(in) :: x
+ character, intent(out) :: y
+ !f2py character, depend(x) :: y = x
+ !f2py character, dimension((x=='a'?1:2)), depend(x), intent(out) :: z
+ character, dimension(*) :: z
+ !f2py character, optional, check(x == 'a' || x == 'b') :: x = 'a'
+ !f2py callstatement (*f2py_func)(&x, &y, z)
+ !f2py callprotoargument character*, character*, character*
+ if (y.eq.x) then
+ y = x
+ else
+ y = 'e'
+ endif
+ z(1) = 'c'
+ end subroutine {fprefix}_character_bc_new
+
+ subroutine {fprefix}_character_bc_old(x, y, z)
+ character, intent(in) :: x
+ character, intent(out) :: y
+ !f2py character, depend(x) :: y = x[0]
+ !f2py character, dimension((*x=='a'?1:2)), depend(x), intent(out) :: z
+ character, dimension(*) :: z
+ !f2py character, optional, check(*x == 'a' || x[0] == 'b') :: x = 'a'
+ !f2py callstatement (*f2py_func)(x, y, z)
+ !f2py callprotoargument char*, char*, char*
+ if (y.eq.x) then
+ y = x
+ else
+ y = 'e'
+ endif
+ z(1) = 'c'
+ end subroutine {fprefix}_character_bc_old
+ """)
+
+ def test_gh18684(self):
+ # Test character(len=5) and character*5 usages
+ f = getattr(self.module, self.fprefix + '_gh18684')
+ x = np.array(["abcde", "fghij"], dtype='S5')
+ y = f(x)
+
+ assert_array_equal(x, y)
+
+ def test_gh6308(self):
+ # Test character string array in a common block
+ f = getattr(self.module, self.fprefix + '_gh6308')
+
+ assert_equal(self.module._BLNK_.name.dtype, np.dtype('S5'))
+ assert_equal(len(self.module._BLNK_.name), 12)
+ f("abcde", 0)
+ assert_equal(self.module._BLNK_.name[0], b"abcde")
+ f("12345", 5)
+ assert_equal(self.module._BLNK_.name[5], b"12345")
+
+ def test_gh4519(self):
+ # Test array of assumed length strings
+ f = getattr(self.module, self.fprefix + '_gh4519')
+
+ for x, expected in [
+ ('a', dict(shape=(), dtype=np.dtype('S1'))),
+ ('text', dict(shape=(), dtype=np.dtype('S4'))),
+ (np.array(['1', '2', '3'], dtype='S1'),
+ dict(shape=(3,), dtype=np.dtype('S1'))),
+ (['1', '2', '34'],
+ dict(shape=(3,), dtype=np.dtype('S2'))),
+ (['', ''], dict(shape=(2,), dtype=np.dtype('S1')))]:
+ r = f(x)
+ for k, v in expected.items():
+ assert_equal(getattr(r, k), v)
+
+ def test_gh3425(self):
+ # Test returning a copy of assumed length string
+ f = getattr(self.module, self.fprefix + '_gh3425')
+ # f is equivalent to bytes.upper
+
+ assert_equal(f('abC'), b'ABC')
+ assert_equal(f(''), b'')
+ assert_equal(f('abC12d'), b'ABC12D')
+
+ @pytest.mark.parametrize("state", ['new', 'old'])
+ def test_character_bc(self, state):
+ f = getattr(self.module, self.fprefix + '_character_bc_' + state)
+
+ c, a = f()
+ assert_equal(c, b'a')
+ assert_equal(len(a), 1)
+
+ c, a = f(b'b')
+ assert_equal(c, b'b')
+ assert_equal(len(a), 2)
+
+ assert_raises(Exception, lambda: f(b'c'))
+
+
+class TestStringScalarArr(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "string", "scalar_string.f90")]
+
+ @pytest.mark.slow
+ def test_char(self):
+ for out in (self.module.string_test.string,
+ self.module.string_test.string77):
+ expected = ()
+ assert out.shape == expected
+ expected = '|S8'
+ assert out.dtype == expected
+
+ @pytest.mark.slow
+ def test_char_arr(self):
+ for out in (self.module.string_test.strarr,
+ self.module.string_test.strarr77):
+ expected = (5,7)
+ assert out.shape == expected
+ expected = '|S12'
+ assert out.dtype == expected
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_common.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_common.py
new file mode 100644
index 00000000..8a4b221e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_common.py
@@ -0,0 +1,18 @@
+import os
+import sys
+import pytest
+
+import numpy as np
+from . import util
+
+
+class TestCommonBlock(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "common", "block.f")]
+
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
+ def test_common_block(self):
+ self.module.initcb()
+ assert self.module.block.long_bn == np.array(1.0, dtype=np.float64)
+ assert self.module.block.string_bn == np.array("2", dtype="|S1")
+ assert self.module.block.ok == np.array(3, dtype=np.int32)
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_compile_function.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_compile_function.py
new file mode 100644
index 00000000..3c16f319
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_compile_function.py
@@ -0,0 +1,117 @@
+"""See https://github.com/numpy/numpy/pull/11937.
+
+"""
+import sys
+import os
+import uuid
+from importlib import import_module
+import pytest
+
+import numpy.f2py
+
+from . import util
+
+
+def setup_module():
+ if not util.has_c_compiler():
+ pytest.skip("Needs C compiler")
+ if not util.has_f77_compiler():
+ pytest.skip("Needs FORTRAN 77 compiler")
+
+
+# extra_args can be a list (since gh-11937) or string.
+# also test absence of extra_args
+@pytest.mark.parametrize("extra_args",
+ [["--noopt", "--debug"], "--noopt --debug", ""])
+@pytest.mark.leaks_references(reason="Imported module seems never deleted.")
+def test_f2py_init_compile(extra_args):
+ # flush through the f2py __init__ compile() function code path as a
+ # crude test for input handling following migration from
+ # exec_command() to subprocess.check_output() in gh-11937
+
+ # the Fortran 77 syntax requires 6 spaces before any commands, but
+ # more space may be added/
+ fsource = """
+ integer function foo()
+ foo = 10 + 5
+ return
+ end
+ """
+ # use various helper functions in util.py to enable robust build /
+ # compile and reimport cycle in test suite
+ moddir = util.get_module_dir()
+ modname = util.get_temp_module_name()
+
+ cwd = os.getcwd()
+ target = os.path.join(moddir, str(uuid.uuid4()) + ".f")
+ # try running compile() with and without a source_fn provided so
+ # that the code path where a temporary file for writing Fortran
+ # source is created is also explored
+ for source_fn in [target, None]:
+ # mimic the path changing behavior used by build_module() in
+ # util.py, but don't actually use build_module() because it has
+ # its own invocation of subprocess that circumvents the
+ # f2py.compile code block under test
+ with util.switchdir(moddir):
+ ret_val = numpy.f2py.compile(fsource,
+ modulename=modname,
+ extra_args=extra_args,
+ source_fn=source_fn)
+
+ # check for compile success return value
+ assert ret_val == 0
+
+ # we are not currently able to import the Python-Fortran
+ # interface module on Windows / Appveyor, even though we do get
+ # successful compilation on that platform with Python 3.x
+ if sys.platform != "win32":
+ # check for sensible result of Fortran function; that means
+ # we can import the module name in Python and retrieve the
+ # result of the sum operation
+ return_check = import_module(modname)
+ calc_result = return_check.foo()
+ assert calc_result == 15
+ # Removal from sys.modules, is not as such necessary. Even with
+ # removal, the module (dict) stays alive.
+ del sys.modules[modname]
+
+
+def test_f2py_init_compile_failure():
+ # verify an appropriate integer status value returned by
+ # f2py.compile() when invalid Fortran is provided
+ ret_val = numpy.f2py.compile(b"invalid")
+ assert ret_val == 1
+
+
+def test_f2py_init_compile_bad_cmd():
+ # verify that usage of invalid command in f2py.compile() returns
+ # status value of 127 for historic consistency with exec_command()
+ # error handling
+
+ # patch the sys Python exe path temporarily to induce an OSError
+ # downstream NOTE: how bad of an idea is this patching?
+ try:
+ temp = sys.executable
+ sys.executable = "does not exist"
+
+ # the OSError should take precedence over invalid Fortran
+ ret_val = numpy.f2py.compile(b"invalid")
+ assert ret_val == 127
+ finally:
+ sys.executable = temp
+
+
+@pytest.mark.parametrize(
+ "fsource",
+ [
+ "program test_f2py\nend program test_f2py",
+ b"program test_f2py\nend program test_f2py",
+ ],
+)
+def test_compile_from_strings(tmpdir, fsource):
+ # Make sure we can compile str and bytes gh-12796
+ with util.switchdir(tmpdir):
+ ret_val = numpy.f2py.compile(fsource,
+ modulename="test_compile_from_strings",
+ extension=".f90")
+ assert ret_val == 0
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_crackfortran.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_crackfortran.py
new file mode 100644
index 00000000..73ac4e27
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_crackfortran.py
@@ -0,0 +1,278 @@
+import importlib
+import codecs
+import unicodedata
+import pytest
+import numpy as np
+from numpy.f2py.crackfortran import markinnerspaces
+from . import util
+from numpy.f2py import crackfortran
+import textwrap
+
+
+class TestNoSpace(util.F2PyTest):
+ # issue gh-15035: add handling for endsubroutine, endfunction with no space
+ # between "end" and the block name
+ sources = [util.getpath("tests", "src", "crackfortran", "gh15035.f")]
+
+ def test_module(self):
+ k = np.array([1, 2, 3], dtype=np.float64)
+ w = np.array([1, 2, 3], dtype=np.float64)
+ self.module.subb(k)
+ assert np.allclose(k, w + 1)
+ self.module.subc([w, k])
+ assert np.allclose(k, w + 1)
+ assert self.module.t0("23") == b"2"
+
+
+class TestPublicPrivate:
+ def test_defaultPrivate(self):
+ fpath = util.getpath("tests", "src", "crackfortran", "privatemod.f90")
+ mod = crackfortran.crackfortran([str(fpath)])
+ assert len(mod) == 1
+ mod = mod[0]
+ assert "private" in mod["vars"]["a"]["attrspec"]
+ assert "public" not in mod["vars"]["a"]["attrspec"]
+ assert "private" in mod["vars"]["b"]["attrspec"]
+ assert "public" not in mod["vars"]["b"]["attrspec"]
+ assert "private" not in mod["vars"]["seta"]["attrspec"]
+ assert "public" in mod["vars"]["seta"]["attrspec"]
+
+ def test_defaultPublic(self, tmp_path):
+ fpath = util.getpath("tests", "src", "crackfortran", "publicmod.f90")
+ mod = crackfortran.crackfortran([str(fpath)])
+ assert len(mod) == 1
+ mod = mod[0]
+ assert "private" in mod["vars"]["a"]["attrspec"]
+ assert "public" not in mod["vars"]["a"]["attrspec"]
+ assert "private" not in mod["vars"]["seta"]["attrspec"]
+ assert "public" in mod["vars"]["seta"]["attrspec"]
+
+ def test_access_type(self, tmp_path):
+ fpath = util.getpath("tests", "src", "crackfortran", "accesstype.f90")
+ mod = crackfortran.crackfortran([str(fpath)])
+ assert len(mod) == 1
+ tt = mod[0]['vars']
+ assert set(tt['a']['attrspec']) == {'private', 'bind(c)'}
+ assert set(tt['b_']['attrspec']) == {'public', 'bind(c)'}
+ assert set(tt['c']['attrspec']) == {'public'}
+
+
+class TestModuleProcedure():
+ def test_moduleOperators(self, tmp_path):
+ fpath = util.getpath("tests", "src", "crackfortran", "operators.f90")
+ mod = crackfortran.crackfortran([str(fpath)])
+ assert len(mod) == 1
+ mod = mod[0]
+ assert "body" in mod and len(mod["body"]) == 9
+ assert mod["body"][1]["name"] == "operator(.item.)"
+ assert "implementedby" in mod["body"][1]
+ assert mod["body"][1]["implementedby"] == \
+ ["item_int", "item_real"]
+ assert mod["body"][2]["name"] == "operator(==)"
+ assert "implementedby" in mod["body"][2]
+ assert mod["body"][2]["implementedby"] == ["items_are_equal"]
+ assert mod["body"][3]["name"] == "assignment(=)"
+ assert "implementedby" in mod["body"][3]
+ assert mod["body"][3]["implementedby"] == \
+ ["get_int", "get_real"]
+
+ def test_notPublicPrivate(self, tmp_path):
+ fpath = util.getpath("tests", "src", "crackfortran", "pubprivmod.f90")
+ mod = crackfortran.crackfortran([str(fpath)])
+ assert len(mod) == 1
+ mod = mod[0]
+ assert mod['vars']['a']['attrspec'] == ['private', ]
+ assert mod['vars']['b']['attrspec'] == ['public', ]
+ assert mod['vars']['seta']['attrspec'] == ['public', ]
+
+
+class TestExternal(util.F2PyTest):
+ # issue gh-17859: add external attribute support
+ sources = [util.getpath("tests", "src", "crackfortran", "gh17859.f")]
+
+ def test_external_as_statement(self):
+ def incr(x):
+ return x + 123
+
+ r = self.module.external_as_statement(incr)
+ assert r == 123
+
+ def test_external_as_attribute(self):
+ def incr(x):
+ return x + 123
+
+ r = self.module.external_as_attribute(incr)
+ assert r == 123
+
+
+class TestCrackFortran(util.F2PyTest):
+ # gh-2848: commented lines between parameters in subroutine parameter lists
+ sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90")]
+
+ def test_gh2848(self):
+ r = self.module.gh2848(1, 2)
+ assert r == (1, 2)
+
+
+class TestMarkinnerspaces:
+ # gh-14118: markinnerspaces does not handle multiple quotations
+
+ def test_do_not_touch_normal_spaces(self):
+ test_list = ["a ", " a", "a b c", "'abcdefghij'"]
+ for i in test_list:
+ assert markinnerspaces(i) == i
+
+ def test_one_relevant_space(self):
+ assert markinnerspaces("a 'b c' \\' \\'") == "a 'b@_@c' \\' \\'"
+ assert markinnerspaces(r'a "b c" \" \"') == r'a "b@_@c" \" \"'
+
+ def test_ignore_inner_quotes(self):
+ assert markinnerspaces("a 'b c\" \" d' e") == "a 'b@_@c\"@_@\"@_@d' e"
+ assert markinnerspaces("a \"b c' ' d\" e") == "a \"b@_@c'@_@'@_@d\" e"
+
+ def test_multiple_relevant_spaces(self):
+ assert markinnerspaces("a 'b c' 'd e'") == "a 'b@_@c' 'd@_@e'"
+ assert markinnerspaces(r'a "b c" "d e"') == r'a "b@_@c" "d@_@e"'
+
+class TestDimSpec(util.F2PyTest):
+ """This test suite tests various expressions that are used as dimension
+ specifications.
+
+ There exists two usage cases where analyzing dimensions
+ specifications are important.
+
+ In the first case, the size of output arrays must be defined based
+ on the inputs to a Fortran function. Because Fortran supports
+ arbitrary bases for indexing, for instance, `arr(lower:upper)`,
+ f2py has to evaluate an expression `upper - lower + 1` where
+ `lower` and `upper` are arbitrary expressions of input parameters.
+ The evaluation is performed in C, so f2py has to translate Fortran
+ expressions to valid C expressions (an alternative approach is
+ that a developer specifies the corresponding C expressions in a
+ .pyf file).
+
+ In the second case, when user provides an input array with a given
+ size but some hidden parameters used in dimensions specifications
+ need to be determined based on the input array size. This is a
+ harder problem because f2py has to solve the inverse problem: find
+ a parameter `p` such that `upper(p) - lower(p) + 1` equals to the
+ size of input array. In the case when this equation cannot be
+ solved (e.g. because the input array size is wrong), raise an
+ error before calling the Fortran function (that otherwise would
+ likely crash Python process when the size of input arrays is
+ wrong). f2py currently supports this case only when the equation
+ is linear with respect to unknown parameter.
+
+ """
+
+ suffix = ".f90"
+
+ code_template = textwrap.dedent("""
+ function get_arr_size_{count}(a, n) result (length)
+ integer, intent(in) :: n
+ integer, dimension({dimspec}), intent(out) :: a
+ integer length
+ length = size(a)
+ end function
+
+ subroutine get_inv_arr_size_{count}(a, n)
+ integer :: n
+ ! the value of n is computed in f2py wrapper
+ !f2py intent(out) n
+ integer, dimension({dimspec}), intent(in) :: a
+ if (a({first}).gt.0) then
+ print*, "a=", a
+ endif
+ end subroutine
+ """)
+
+ linear_dimspecs = [
+ "n", "2*n", "2:n", "n/2", "5 - n/2", "3*n:20", "n*(n+1):n*(n+5)",
+ "2*n, n"
+ ]
+ nonlinear_dimspecs = ["2*n:3*n*n+2*n"]
+ all_dimspecs = linear_dimspecs + nonlinear_dimspecs
+
+ code = ""
+ for count, dimspec in enumerate(all_dimspecs):
+ lst = [(d.split(":")[0] if ":" in d else "1") for d in dimspec.split(',')]
+ code += code_template.format(
+ count=count,
+ dimspec=dimspec,
+ first=", ".join(lst),
+ )
+
+ @pytest.mark.parametrize("dimspec", all_dimspecs)
+ def test_array_size(self, dimspec):
+
+ count = self.all_dimspecs.index(dimspec)
+ get_arr_size = getattr(self.module, f"get_arr_size_{count}")
+
+ for n in [1, 2, 3, 4, 5]:
+ sz, a = get_arr_size(n)
+ assert a.size == sz
+
+ @pytest.mark.parametrize("dimspec", all_dimspecs)
+ def test_inv_array_size(self, dimspec):
+
+ count = self.all_dimspecs.index(dimspec)
+ get_arr_size = getattr(self.module, f"get_arr_size_{count}")
+ get_inv_arr_size = getattr(self.module, f"get_inv_arr_size_{count}")
+
+ for n in [1, 2, 3, 4, 5]:
+ sz, a = get_arr_size(n)
+ if dimspec in self.nonlinear_dimspecs:
+ # one must specify n as input, the call we'll ensure
+ # that a and n are compatible:
+ n1 = get_inv_arr_size(a, n)
+ else:
+ # in case of linear dependence, n can be determined
+ # from the shape of a:
+ n1 = get_inv_arr_size(a)
+ # n1 may be different from n (for instance, when `a` size
+ # is a function of some `n` fraction) but it must produce
+ # the same sized array
+ sz1, _ = get_arr_size(n1)
+ assert sz == sz1, (n, n1, sz, sz1)
+
+
+class TestModuleDeclaration:
+ def test_dependencies(self, tmp_path):
+ fpath = util.getpath("tests", "src", "crackfortran", "foo_deps.f90")
+ mod = crackfortran.crackfortran([str(fpath)])
+ assert len(mod) == 1
+ assert mod[0]["vars"]["abar"]["="] == "bar('abar')"
+
+class TestEval(util.F2PyTest):
+ def test_eval_scalar(self):
+ eval_scalar = crackfortran._eval_scalar
+
+ assert eval_scalar('123', {}) == '123'
+ assert eval_scalar('12 + 3', {}) == '15'
+ assert eval_scalar('a + b', dict(a=1, b=2)) == '3'
+ assert eval_scalar('"123"', {}) == "'123'"
+
+
+class TestFortranReader(util.F2PyTest):
+ @pytest.mark.parametrize("encoding",
+ ['ascii', 'utf-8', 'utf-16', 'utf-32'])
+ def test_input_encoding(self, tmp_path, encoding):
+ # gh-635
+ f_path = tmp_path / f"input_with_{encoding}_encoding.f90"
+ with f_path.open('w', encoding=encoding) as ff:
+ ff.write("""
+ subroutine foo()
+ end subroutine foo
+ """)
+ mod = crackfortran.crackfortran([str(f_path)])
+ assert mod[0]['name'] == 'foo'
+
+class TestUnicodeComment(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "crackfortran", "unicode_comment.f90")]
+
+ @pytest.mark.skipif(
+ (importlib.util.find_spec("charset_normalizer") is None),
+ reason="test requires charset_normalizer which is not installed",
+ )
+ def test_encoding_comment(self):
+ self.module.foo(3)
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_docs.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_docs.py
new file mode 100644
index 00000000..6631dd82
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_docs.py
@@ -0,0 +1,55 @@
+import os
+import pytest
+import numpy as np
+from numpy.testing import assert_array_equal, assert_equal
+from . import util
+
+
+def get_docdir():
+ # assuming that documentation tests are run from a source
+ # directory
+ return os.path.abspath(os.path.join(
+ os.path.dirname(__file__),
+ '..', '..', '..',
+ 'doc', 'source', 'f2py', 'code'))
+
+
+pytestmark = pytest.mark.skipif(
+ not os.path.isdir(get_docdir()),
+ reason=('Could not find f2py documentation sources'
+ f' ({get_docdir()} does not exists)'))
+
+
+def _path(*a):
+ return os.path.join(*((get_docdir(),) + a))
+
+
+class TestDocAdvanced(util.F2PyTest):
+ # options = ['--debug-capi', '--build-dir', '/tmp/build-f2py']
+ sources = [_path('asterisk1.f90'), _path('asterisk2.f90'),
+ _path('ftype.f')]
+
+ def test_asterisk1(self):
+ foo = getattr(self.module, 'foo1')
+ assert_equal(foo(), b'123456789A12')
+
+ def test_asterisk2(self):
+ foo = getattr(self.module, 'foo2')
+ assert_equal(foo(2), b'12')
+ assert_equal(foo(12), b'123456789A12')
+ assert_equal(foo(24), b'123456789A123456789B')
+
+ def test_ftype(self):
+ ftype = self.module
+ ftype.foo()
+ assert_equal(ftype.data.a, 0)
+ ftype.data.a = 3
+ ftype.data.x = [1, 2, 3]
+ assert_equal(ftype.data.a, 3)
+ assert_array_equal(ftype.data.x,
+ np.array([1, 2, 3], dtype=np.float32))
+ ftype.data.x[1] = 45
+ assert_array_equal(ftype.data.x,
+ np.array([1, 45, 3], dtype=np.float32))
+
+ # TODO: implement test methods for other example Fortran codes
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_f2cmap.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_f2cmap.py
new file mode 100644
index 00000000..d2967e4f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_f2cmap.py
@@ -0,0 +1,15 @@
+from . import util
+import numpy as np
+
+class TestF2Cmap(util.F2PyTest):
+ sources = [
+ util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90"),
+ util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap")
+ ]
+
+ # gh-15095
+ def test_long_long_map(self):
+ inp = np.ones(3)
+ out = self.module.func1(inp)
+ exp_out = 3
+ assert out == exp_out
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_f2py2e.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_f2py2e.py
new file mode 100644
index 00000000..2c10f046
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_f2py2e.py
@@ -0,0 +1,769 @@
+import textwrap, re, sys, subprocess, shlex
+from pathlib import Path
+from collections import namedtuple
+
+import pytest
+
+from . import util
+from numpy.f2py.f2py2e import main as f2pycli
+
+#########################
+# CLI utils and classes #
+#########################
+
+PPaths = namedtuple("PPaths", "finp, f90inp, pyf, wrap77, wrap90, cmodf")
+
+
+def get_io_paths(fname_inp, mname="untitled"):
+ """Takes in a temporary file for testing and returns the expected output and input paths
+
+ Here expected output is essentially one of any of the possible generated
+ files.
+
+ ..note::
+
+ Since this does not actually run f2py, none of these are guaranteed to
+ exist, and module names are typically incorrect
+
+ Parameters
+ ----------
+ fname_inp : str
+ The input filename
+ mname : str, optional
+ The name of the module, untitled by default
+
+ Returns
+ -------
+ genp : NamedTuple PPaths
+ The possible paths which are generated, not all of which exist
+ """
+ bpath = Path(fname_inp)
+ return PPaths(
+ finp=bpath.with_suffix(".f"),
+ f90inp=bpath.with_suffix(".f90"),
+ pyf=bpath.with_suffix(".pyf"),
+ wrap77=bpath.with_name(f"{mname}-f2pywrappers.f"),
+ wrap90=bpath.with_name(f"{mname}-f2pywrappers2.f90"),
+ cmodf=bpath.with_name(f"{mname}module.c"),
+ )
+
+
+##############
+# CLI Fixtures and Tests #
+#############
+
+
+@pytest.fixture(scope="session")
+def hello_world_f90(tmpdir_factory):
+ """Generates a single f90 file for testing"""
+ fdat = util.getpath("tests", "src", "cli", "hiworld.f90").read_text()
+ fn = tmpdir_factory.getbasetemp() / "hello.f90"
+ fn.write_text(fdat, encoding="ascii")
+ return fn
+
+
+@pytest.fixture(scope="session")
+def hello_world_f77(tmpdir_factory):
+ """Generates a single f77 file for testing"""
+ fdat = util.getpath("tests", "src", "cli", "hi77.f").read_text()
+ fn = tmpdir_factory.getbasetemp() / "hello.f"
+ fn.write_text(fdat, encoding="ascii")
+ return fn
+
+
+@pytest.fixture(scope="session")
+def retreal_f77(tmpdir_factory):
+ """Generates a single f77 file for testing"""
+ fdat = util.getpath("tests", "src", "return_real", "foo77.f").read_text()
+ fn = tmpdir_factory.getbasetemp() / "foo.f"
+ fn.write_text(fdat, encoding="ascii")
+ return fn
+
+@pytest.fixture(scope="session")
+def f2cmap_f90(tmpdir_factory):
+ """Generates a single f90 file for testing"""
+ fdat = util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90").read_text()
+ f2cmap = util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap").read_text()
+ fn = tmpdir_factory.getbasetemp() / "f2cmap.f90"
+ fmap = tmpdir_factory.getbasetemp() / "mapfile"
+ fn.write_text(fdat, encoding="ascii")
+ fmap.write_text(f2cmap, encoding="ascii")
+ return fn
+
+
+def test_gen_pyf(capfd, hello_world_f90, monkeypatch):
+ """Ensures that a signature file is generated via the CLI
+ CLI :: -h
+ """
+ ipath = Path(hello_world_f90)
+ opath = Path(hello_world_f90).stem + ".pyf"
+ monkeypatch.setattr(sys, "argv", f'f2py -h {opath} {ipath}'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli() # Generate wrappers
+ out, _ = capfd.readouterr()
+ assert "Saving signatures to file" in out
+ assert Path(f'{opath}').exists()
+
+
+def test_gen_pyf_stdout(capfd, hello_world_f90, monkeypatch):
+ """Ensures that a signature file can be dumped to stdout
+ CLI :: -h
+ """
+ ipath = Path(hello_world_f90)
+ monkeypatch.setattr(sys, "argv", f'f2py -h stdout {ipath}'.split())
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert "Saving signatures to file" in out
+ assert "function hi() ! in " in out
+
+
+def test_gen_pyf_no_overwrite(capfd, hello_world_f90, monkeypatch):
+ """Ensures that the CLI refuses to overwrite signature files
+ CLI :: -h without --overwrite-signature
+ """
+ ipath = Path(hello_world_f90)
+ monkeypatch.setattr(sys, "argv", f'f2py -h faker.pyf {ipath}'.split())
+
+ with util.switchdir(ipath.parent):
+ Path("faker.pyf").write_text("Fake news", encoding="ascii")
+ with pytest.raises(SystemExit):
+ f2pycli() # Refuse to overwrite
+ _, err = capfd.readouterr()
+ assert "Use --overwrite-signature to overwrite" in err
+
+
+@pytest.mark.xfail
+def test_f2py_skip(capfd, retreal_f77, monkeypatch):
+ """Tests that functions can be skipped
+ CLI :: skip:
+ """
+ foutl = get_io_paths(retreal_f77, mname="test")
+ ipath = foutl.finp
+ toskip = "t0 t4 t8 sd s8 s4"
+ remaining = "td s0"
+ monkeypatch.setattr(
+ sys, "argv",
+ f'f2py {ipath} -m test skip: {toskip}'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, err = capfd.readouterr()
+ for skey in toskip.split():
+ assert (
+ f'buildmodule: Could not found the body of interfaced routine "{skey}". Skipping.'
+ in err)
+ for rkey in remaining.split():
+ assert f'Constructing wrapper function "{rkey}"' in out
+
+
+def test_f2py_only(capfd, retreal_f77, monkeypatch):
+ """Test that functions can be kept by only:
+ CLI :: only:
+ """
+ foutl = get_io_paths(retreal_f77, mname="test")
+ ipath = foutl.finp
+ toskip = "t0 t4 t8 sd s8 s4"
+ tokeep = "td s0"
+ monkeypatch.setattr(
+ sys, "argv",
+ f'f2py {ipath} -m test only: {tokeep}'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, err = capfd.readouterr()
+ for skey in toskip.split():
+ assert (
+ f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.'
+ in err)
+ for rkey in tokeep.split():
+ assert f'Constructing wrapper function "{rkey}"' in out
+
+
+def test_file_processing_switch(capfd, hello_world_f90, retreal_f77,
+ monkeypatch):
+ """Tests that it is possible to return to file processing mode
+ CLI :: :
+ BUG: numpy-gh #20520
+ """
+ foutl = get_io_paths(retreal_f77, mname="test")
+ ipath = foutl.finp
+ toskip = "t0 t4 t8 sd s8 s4"
+ ipath2 = Path(hello_world_f90)
+ tokeep = "td s0 hi" # hi is in ipath2
+ mname = "blah"
+ monkeypatch.setattr(
+ sys,
+ "argv",
+ f'f2py {ipath} -m {mname} only: {tokeep} : {ipath2}'.split(
+ ),
+ )
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, err = capfd.readouterr()
+ for skey in toskip.split():
+ assert (
+ f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.'
+ in err)
+ for rkey in tokeep.split():
+ assert f'Constructing wrapper function "{rkey}"' in out
+
+
+def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch):
+ """Checks the generation of files based on a module name
+ CLI :: -m
+ """
+ MNAME = "hi"
+ foutl = get_io_paths(hello_world_f90, mname=MNAME)
+ ipath = foutl.f90inp
+ monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split())
+ with util.switchdir(ipath.parent):
+ f2pycli()
+
+ # Always generate C module
+ assert Path.exists(foutl.cmodf)
+ # File contains a function, check for F77 wrappers
+ assert Path.exists(foutl.wrap77)
+
+
+def test_lower_cmod(capfd, hello_world_f77, monkeypatch):
+ """Lowers cases by flag or when -h is present
+
+ CLI :: --[no-]lower
+ """
+ foutl = get_io_paths(hello_world_f77, mname="test")
+ ipath = foutl.finp
+ capshi = re.compile(r"HI\(\)")
+ capslo = re.compile(r"hi\(\)")
+ # Case I: --lower is passed
+ monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m test --lower'.split())
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert capslo.search(out) is not None
+ assert capshi.search(out) is None
+ # Case II: --no-lower is passed
+ monkeypatch.setattr(sys, "argv",
+ f'f2py {ipath} -m test --no-lower'.split())
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert capslo.search(out) is None
+ assert capshi.search(out) is not None
+
+
+def test_lower_sig(capfd, hello_world_f77, monkeypatch):
+ """Lowers cases in signature files by flag or when -h is present
+
+ CLI :: --[no-]lower -h
+ """
+ foutl = get_io_paths(hello_world_f77, mname="test")
+ ipath = foutl.finp
+ # Signature files
+ capshi = re.compile(r"Block: HI")
+ capslo = re.compile(r"Block: hi")
+ # Case I: --lower is implied by -h
+ # TODO: Clean up to prevent passing --overwrite-signature
+ monkeypatch.setattr(
+ sys,
+ "argv",
+ f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature'.split(),
+ )
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert capslo.search(out) is not None
+ assert capshi.search(out) is None
+
+ # Case II: --no-lower overrides -h
+ monkeypatch.setattr(
+ sys,
+ "argv",
+ f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature --no-lower'
+ .split(),
+ )
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert capslo.search(out) is None
+ assert capshi.search(out) is not None
+
+
+def test_build_dir(capfd, hello_world_f90, monkeypatch):
+ """Ensures that the build directory can be specified
+
+ CLI :: --build-dir
+ """
+ ipath = Path(hello_world_f90)
+ mname = "blah"
+ odir = "tttmp"
+ monkeypatch.setattr(sys, "argv",
+ f'f2py -m {mname} {ipath} --build-dir {odir}'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert f"Wrote C/API module \"{mname}\"" in out
+
+
+def test_overwrite(capfd, hello_world_f90, monkeypatch):
+ """Ensures that the build directory can be specified
+
+ CLI :: --overwrite-signature
+ """
+ ipath = Path(hello_world_f90)
+ monkeypatch.setattr(
+ sys, "argv",
+ f'f2py -h faker.pyf {ipath} --overwrite-signature'.split())
+
+ with util.switchdir(ipath.parent):
+ Path("faker.pyf").write_text("Fake news", encoding="ascii")
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert "Saving signatures to file" in out
+
+
+def test_latexdoc(capfd, hello_world_f90, monkeypatch):
+ """Ensures that TeX documentation is written out
+
+ CLI :: --latex-doc
+ """
+ ipath = Path(hello_world_f90)
+ mname = "blah"
+ monkeypatch.setattr(sys, "argv",
+ f'f2py -m {mname} {ipath} --latex-doc'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert "Documentation is saved to file" in out
+ with Path(f"{mname}module.tex").open() as otex:
+ assert "\\documentclass" in otex.read()
+
+
+def test_nolatexdoc(capfd, hello_world_f90, monkeypatch):
+ """Ensures that TeX documentation is written out
+
+ CLI :: --no-latex-doc
+ """
+ ipath = Path(hello_world_f90)
+ mname = "blah"
+ monkeypatch.setattr(sys, "argv",
+ f'f2py -m {mname} {ipath} --no-latex-doc'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert "Documentation is saved to file" not in out
+
+
+def test_shortlatex(capfd, hello_world_f90, monkeypatch):
+ """Ensures that truncated documentation is written out
+
+ TODO: Test to ensure this has no effect without --latex-doc
+ CLI :: --latex-doc --short-latex
+ """
+ ipath = Path(hello_world_f90)
+ mname = "blah"
+ monkeypatch.setattr(
+ sys,
+ "argv",
+ f'f2py -m {mname} {ipath} --latex-doc --short-latex'.split(),
+ )
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert "Documentation is saved to file" in out
+ with Path(f"./{mname}module.tex").open() as otex:
+ assert "\\documentclass" not in otex.read()
+
+
+def test_restdoc(capfd, hello_world_f90, monkeypatch):
+ """Ensures that RsT documentation is written out
+
+ CLI :: --rest-doc
+ """
+ ipath = Path(hello_world_f90)
+ mname = "blah"
+ monkeypatch.setattr(sys, "argv",
+ f'f2py -m {mname} {ipath} --rest-doc'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert "ReST Documentation is saved to file" in out
+ with Path(f"./{mname}module.rest").open() as orst:
+ assert r".. -*- rest -*-" in orst.read()
+
+
+def test_norestexdoc(capfd, hello_world_f90, monkeypatch):
+ """Ensures that TeX documentation is written out
+
+ CLI :: --no-rest-doc
+ """
+ ipath = Path(hello_world_f90)
+ mname = "blah"
+ monkeypatch.setattr(sys, "argv",
+ f'f2py -m {mname} {ipath} --no-rest-doc'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert "ReST Documentation is saved to file" not in out
+
+
+def test_debugcapi(capfd, hello_world_f90, monkeypatch):
+ """Ensures that debugging wrappers are written
+
+ CLI :: --debug-capi
+ """
+ ipath = Path(hello_world_f90)
+ mname = "blah"
+ monkeypatch.setattr(sys, "argv",
+ f'f2py -m {mname} {ipath} --debug-capi'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ with Path(f"./{mname}module.c").open() as ocmod:
+ assert r"#define DEBUGCFUNCS" in ocmod.read()
+
+
+@pytest.mark.xfail(reason="Consistently fails on CI.")
+def test_debugcapi_bld(hello_world_f90, monkeypatch):
+ """Ensures that debugging wrappers work
+
+ CLI :: --debug-capi -c
+ """
+ ipath = Path(hello_world_f90)
+ mname = "blah"
+ monkeypatch.setattr(sys, "argv",
+ f'f2py -m {mname} {ipath} -c --debug-capi'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ cmd_run = shlex.split("python3 -c \"import blah; blah.hi()\"")
+ rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8')
+ eout = ' Hello World\n'
+ eerr = textwrap.dedent("""\
+debug-capi:Python C/API function blah.hi()
+debug-capi:float hi=:output,hidden,scalar
+debug-capi:hi=0
+debug-capi:Fortran subroutine `f2pywraphi(&hi)'
+debug-capi:hi=0
+debug-capi:Building return value.
+debug-capi:Python C/API function blah.hi: successful.
+debug-capi:Freeing memory.
+ """)
+ assert rout.stdout == eout
+ assert rout.stderr == eerr
+
+
+def test_wrapfunc_def(capfd, hello_world_f90, monkeypatch):
+ """Ensures that fortran subroutine wrappers for F77 are included by default
+
+ CLI :: --[no]-wrap-functions
+ """
+ # Implied
+ ipath = Path(hello_world_f90)
+ mname = "blah"
+ monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath}'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert r"Fortran 77 wrappers are saved to" in out
+
+ # Explicit
+ monkeypatch.setattr(sys, "argv",
+ f'f2py -m {mname} {ipath} --wrap-functions'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert r"Fortran 77 wrappers are saved to" in out
+
+
+def test_nowrapfunc(capfd, hello_world_f90, monkeypatch):
+ """Ensures that fortran subroutine wrappers for F77 can be disabled
+
+ CLI :: --no-wrap-functions
+ """
+ ipath = Path(hello_world_f90)
+ mname = "blah"
+ monkeypatch.setattr(sys, "argv",
+ f'f2py -m {mname} {ipath} --no-wrap-functions'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert r"Fortran 77 wrappers are saved to" not in out
+
+
+def test_inclheader(capfd, hello_world_f90, monkeypatch):
+ """Add to the include directories
+
+ CLI :: -include
+ TODO: Document this in the help string
+ """
+ ipath = Path(hello_world_f90)
+ mname = "blah"
+ monkeypatch.setattr(
+ sys,
+ "argv",
+ f'f2py -m {mname} {ipath} -include<stdbool.h> -include<stdio.h> '.
+ split(),
+ )
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ with Path(f"./{mname}module.c").open() as ocmod:
+ ocmr = ocmod.read()
+ assert "#include <stdbool.h>" in ocmr
+ assert "#include <stdio.h>" in ocmr
+
+
+def test_inclpath():
+ """Add to the include directories
+
+ CLI :: --include-paths
+ """
+ # TODO: populate
+ pass
+
+
+def test_hlink():
+ """Add to the include directories
+
+ CLI :: --help-link
+ """
+ # TODO: populate
+ pass
+
+
+def test_f2cmap(capfd, f2cmap_f90, monkeypatch):
+ """Check that Fortran-to-Python KIND specs can be passed
+
+ CLI :: --f2cmap
+ """
+ ipath = Path(f2cmap_f90)
+ monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --f2cmap mapfile'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert "Reading f2cmap from 'mapfile' ..." in out
+ assert "Mapping \"real(kind=real32)\" to \"float\"" in out
+ assert "Mapping \"real(kind=real64)\" to \"double\"" in out
+ assert "Mapping \"integer(kind=int64)\" to \"long_long\"" in out
+ assert "Successfully applied user defined f2cmap changes" in out
+
+
+def test_quiet(capfd, hello_world_f90, monkeypatch):
+ """Reduce verbosity
+
+ CLI :: --quiet
+ """
+ ipath = Path(hello_world_f90)
+ monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --quiet'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert len(out) == 0
+
+
+def test_verbose(capfd, hello_world_f90, monkeypatch):
+ """Increase verbosity
+
+ CLI :: --verbose
+ """
+ ipath = Path(hello_world_f90)
+ monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --verbose'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ assert "analyzeline" in out
+
+
+def test_version(capfd, monkeypatch):
+ """Ensure version
+
+ CLI :: -v
+ """
+ monkeypatch.setattr(sys, "argv", 'f2py -v'.split())
+ # TODO: f2py2e should not call sys.exit() after printing the version
+ with pytest.raises(SystemExit):
+ f2pycli()
+ out, _ = capfd.readouterr()
+ import numpy as np
+ assert np.__version__ == out.strip()
+
+
+@pytest.mark.xfail(reason="Consistently fails on CI.")
+def test_npdistop(hello_world_f90, monkeypatch):
+ """
+ CLI :: -c
+ """
+ ipath = Path(hello_world_f90)
+ monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c'.split())
+
+ with util.switchdir(ipath.parent):
+ f2pycli()
+ cmd_run = shlex.split("python -c \"import blah; blah.hi()\"")
+ rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8')
+ eout = ' Hello World\n'
+ assert rout.stdout == eout
+
+
+# Numpy distutils flags
+# TODO: These should be tested separately
+
+
+def test_npd_fcompiler():
+ """
+ CLI :: -c --fcompiler
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_compiler():
+ """
+ CLI :: -c --compiler
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_help_fcompiler():
+ """
+ CLI :: -c --help-fcompiler
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_f77exec():
+ """
+ CLI :: -c --f77exec
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_f90exec():
+ """
+ CLI :: -c --f90exec
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_f77flags():
+ """
+ CLI :: -c --f77flags
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_f90flags():
+ """
+ CLI :: -c --f90flags
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_opt():
+ """
+ CLI :: -c --opt
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_arch():
+ """
+ CLI :: -c --arch
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_noopt():
+ """
+ CLI :: -c --noopt
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_noarch():
+ """
+ CLI :: -c --noarch
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_debug():
+ """
+ CLI :: -c --debug
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_link_auto():
+ """
+ CLI :: -c --link-<resource>
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_lib():
+ """
+ CLI :: -c -L/path/to/lib/ -l<libname>
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_define():
+ """
+ CLI :: -D<define>
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_undefine():
+ """
+ CLI :: -U<name>
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_incl():
+ """
+ CLI :: -I/path/to/include/
+ """
+ # TODO: populate
+ pass
+
+
+def test_npd_linker():
+ """
+ CLI :: <filename>.o <filename>.so <filename>.a
+ """
+ # TODO: populate
+ pass
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_kind.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_kind.py
new file mode 100644
index 00000000..f0cb61fb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_kind.py
@@ -0,0 +1,26 @@
+import os
+import pytest
+
+from numpy.f2py.crackfortran import (
+ _selected_int_kind_func as selected_int_kind,
+ _selected_real_kind_func as selected_real_kind,
+)
+from . import util
+
+
+class TestKind(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "kind", "foo.f90")]
+
+ def test_all(self):
+ selectedrealkind = self.module.selectedrealkind
+ selectedintkind = self.module.selectedintkind
+
+ for i in range(40):
+ assert selectedintkind(i) == selected_int_kind(
+ i
+ ), f"selectedintkind({i}): expected {selected_int_kind(i)!r} but got {selectedintkind(i)!r}"
+
+ for i in range(20):
+ assert selectedrealkind(i) == selected_real_kind(
+ i
+ ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}"
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_mixed.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_mixed.py
new file mode 100644
index 00000000..80653b7d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_mixed.py
@@ -0,0 +1,33 @@
+import os
+import textwrap
+import pytest
+
+from numpy.testing import IS_PYPY
+from . import util
+
+
+class TestMixed(util.F2PyTest):
+ sources = [
+ util.getpath("tests", "src", "mixed", "foo.f"),
+ util.getpath("tests", "src", "mixed", "foo_fixed.f90"),
+ util.getpath("tests", "src", "mixed", "foo_free.f90"),
+ ]
+
+ def test_all(self):
+ assert self.module.bar11() == 11
+ assert self.module.foo_fixed.bar12() == 12
+ assert self.module.foo_free.bar13() == 13
+
+ @pytest.mark.xfail(IS_PYPY,
+ reason="PyPy cannot modify tp_doc after PyType_Ready")
+ def test_docstring(self):
+ expected = textwrap.dedent("""\
+ a = bar11()
+
+ Wrapper for ``bar11``.
+
+ Returns
+ -------
+ a : int
+ """)
+ assert self.module.bar11.__doc__ == expected
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_module_doc.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_module_doc.py
new file mode 100644
index 00000000..28822d40
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_module_doc.py
@@ -0,0 +1,27 @@
+import os
+import sys
+import pytest
+import textwrap
+
+from . import util
+from numpy.testing import IS_PYPY
+
+
+class TestModuleDocString(util.F2PyTest):
+ sources = [
+ util.getpath("tests", "src", "module_data",
+ "module_data_docstring.f90")
+ ]
+
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
+ @pytest.mark.xfail(IS_PYPY,
+ reason="PyPy cannot modify tp_doc after PyType_Ready")
+ def test_module_docstring(self):
+ assert self.module.mod.__doc__ == textwrap.dedent("""\
+ i : 'i'-scalar
+ x : 'i'-array(4)
+ a : 'f'-array(2,3)
+ b : 'f'-array(-1,-1), not allocated\x00
+ foo()\n
+ Wrapper for ``foo``.\n\n""")
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_parameter.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_parameter.py
new file mode 100644
index 00000000..2f620eaa
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_parameter.py
@@ -0,0 +1,112 @@
+import os
+import pytest
+
+import numpy as np
+
+from . import util
+
+
+class TestParameters(util.F2PyTest):
+ # Check that intent(in out) translates as intent(inout)
+ sources = [
+ util.getpath("tests", "src", "parameter", "constant_real.f90"),
+ util.getpath("tests", "src", "parameter", "constant_integer.f90"),
+ util.getpath("tests", "src", "parameter", "constant_both.f90"),
+ util.getpath("tests", "src", "parameter", "constant_compound.f90"),
+ util.getpath("tests", "src", "parameter", "constant_non_compound.f90"),
+ ]
+
+ @pytest.mark.slow
+ def test_constant_real_single(self):
+ # non-contiguous should raise error
+ x = np.arange(6, dtype=np.float32)[::2]
+ pytest.raises(ValueError, self.module.foo_single, x)
+
+ # check values with contiguous array
+ x = np.arange(3, dtype=np.float32)
+ self.module.foo_single(x)
+ assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2])
+
+ @pytest.mark.slow
+ def test_constant_real_double(self):
+ # non-contiguous should raise error
+ x = np.arange(6, dtype=np.float64)[::2]
+ pytest.raises(ValueError, self.module.foo_double, x)
+
+ # check values with contiguous array
+ x = np.arange(3, dtype=np.float64)
+ self.module.foo_double(x)
+ assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2])
+
+ @pytest.mark.slow
+ def test_constant_compound_int(self):
+ # non-contiguous should raise error
+ x = np.arange(6, dtype=np.int32)[::2]
+ pytest.raises(ValueError, self.module.foo_compound_int, x)
+
+ # check values with contiguous array
+ x = np.arange(3, dtype=np.int32)
+ self.module.foo_compound_int(x)
+ assert np.allclose(x, [0 + 1 + 2 * 6, 1, 2])
+
+ @pytest.mark.slow
+ def test_constant_non_compound_int(self):
+ # check values
+ x = np.arange(4, dtype=np.int32)
+ self.module.foo_non_compound_int(x)
+ assert np.allclose(x, [0 + 1 + 2 + 3 * 4, 1, 2, 3])
+
+ @pytest.mark.slow
+ def test_constant_integer_int(self):
+ # non-contiguous should raise error
+ x = np.arange(6, dtype=np.int32)[::2]
+ pytest.raises(ValueError, self.module.foo_int, x)
+
+ # check values with contiguous array
+ x = np.arange(3, dtype=np.int32)
+ self.module.foo_int(x)
+ assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2])
+
+ @pytest.mark.slow
+ def test_constant_integer_long(self):
+ # non-contiguous should raise error
+ x = np.arange(6, dtype=np.int64)[::2]
+ pytest.raises(ValueError, self.module.foo_long, x)
+
+ # check values with contiguous array
+ x = np.arange(3, dtype=np.int64)
+ self.module.foo_long(x)
+ assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2])
+
+ @pytest.mark.slow
+ def test_constant_both(self):
+ # non-contiguous should raise error
+ x = np.arange(6, dtype=np.float64)[::2]
+ pytest.raises(ValueError, self.module.foo, x)
+
+ # check values with contiguous array
+ x = np.arange(3, dtype=np.float64)
+ self.module.foo(x)
+ assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
+
+ @pytest.mark.slow
+ def test_constant_no(self):
+ # non-contiguous should raise error
+ x = np.arange(6, dtype=np.float64)[::2]
+ pytest.raises(ValueError, self.module.foo_no, x)
+
+ # check values with contiguous array
+ x = np.arange(3, dtype=np.float64)
+ self.module.foo_no(x)
+ assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
+
+ @pytest.mark.slow
+ def test_constant_sum(self):
+ # non-contiguous should raise error
+ x = np.arange(6, dtype=np.float64)[::2]
+ pytest.raises(ValueError, self.module.foo_sum, x)
+
+ # check values with contiguous array
+ x = np.arange(3, dtype=np.float64)
+ self.module.foo_sum(x)
+ assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3])
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_quoted_character.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_quoted_character.py
new file mode 100644
index 00000000..82671cd8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_quoted_character.py
@@ -0,0 +1,16 @@
+"""See https://github.com/numpy/numpy/pull/10676.
+
+"""
+import sys
+import pytest
+
+from . import util
+
+
+class TestQuotedCharacter(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "quoted_character", "foo.f")]
+
+ @pytest.mark.skipif(sys.platform == "win32",
+ reason="Fails with MinGW64 Gfortran (Issue #9673)")
+ def test_quoted_character(self):
+ assert self.module.foo() == (b"'", b'"', b";", b"!", b"(", b")")
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_regression.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_regression.py
new file mode 100644
index 00000000..044f952f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_regression.py
@@ -0,0 +1,66 @@
+import os
+import pytest
+
+import numpy as np
+
+from . import util
+
+
+class TestIntentInOut(util.F2PyTest):
+ # Check that intent(in out) translates as intent(inout)
+ sources = [util.getpath("tests", "src", "regression", "inout.f90")]
+
+ @pytest.mark.slow
+ def test_inout(self):
+ # non-contiguous should raise error
+ x = np.arange(6, dtype=np.float32)[::2]
+ pytest.raises(ValueError, self.module.foo, x)
+
+ # check values with contiguous array
+ x = np.arange(3, dtype=np.float32)
+ self.module.foo(x)
+ assert np.allclose(x, [3, 1, 2])
+
+
+class TestNegativeBounds(util.F2PyTest):
+ # Check that negative bounds work correctly
+ sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")]
+
+ @pytest.mark.slow
+ def test_negbound(self):
+ xvec = np.arange(12)
+ xlow = -6
+ xhigh = 4
+ # Calculate the upper bound,
+ # Keeping the 1 index in mind
+ def ubound(xl, xh):
+ return xh - xl + 1
+ rval = self.module.foo(is_=xlow, ie_=xhigh,
+ arr=xvec[:ubound(xlow, xhigh)])
+ expval = np.arange(11, dtype = np.float32)
+ assert np.allclose(rval, expval)
+
+
+class TestNumpyVersionAttribute(util.F2PyTest):
+ # Check that th attribute __f2py_numpy_version__ is present
+ # in the compiled module and that has the value np.__version__.
+ sources = [util.getpath("tests", "src", "regression", "inout.f90")]
+
+ @pytest.mark.slow
+ def test_numpy_version_attribute(self):
+
+ # Check that self.module has an attribute named "__f2py_numpy_version__"
+ assert hasattr(self.module, "__f2py_numpy_version__")
+
+ # Check that the attribute __f2py_numpy_version__ is a string
+ assert isinstance(self.module.__f2py_numpy_version__, str)
+
+ # Check that __f2py_numpy_version__ has the value numpy.__version__
+ assert np.__version__ == self.module.__f2py_numpy_version__
+
+
+def test_include_path():
+ incdir = np.f2py.get_include()
+ fnames_in_dir = os.listdir(incdir)
+ for fname in ("fortranobject.c", "fortranobject.h"):
+ assert fname in fnames_in_dir
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_character.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_character.py
new file mode 100644
index 00000000..36c1f10f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_character.py
@@ -0,0 +1,45 @@
+import pytest
+
+from numpy import array
+from . import util
+import platform
+
+IS_S390X = platform.machine() == "s390x"
+
+
+class TestReturnCharacter(util.F2PyTest):
+ def check_function(self, t, tname):
+ if tname in ["t0", "t1", "s0", "s1"]:
+ assert t("23") == b"2"
+ r = t("ab")
+ assert r == b"a"
+ r = t(array("ab"))
+ assert r == b"a"
+ r = t(array(77, "u1"))
+ assert r == b"M"
+ elif tname in ["ts", "ss"]:
+ assert t(23) == b"23"
+ assert t("123456789abcdef") == b"123456789a"
+ elif tname in ["t5", "s5"]:
+ assert t(23) == b"23"
+ assert t("ab") == b"ab"
+ assert t("123456789abcdef") == b"12345"
+ else:
+ raise NotImplementedError
+
+
+class TestFReturnCharacter(TestReturnCharacter):
+ sources = [
+ util.getpath("tests", "src", "return_character", "foo77.f"),
+ util.getpath("tests", "src", "return_character", "foo90.f90"),
+ ]
+
+ @pytest.mark.xfail(IS_S390X, reason="callback returns ' '")
+ @pytest.mark.parametrize("name", "t0,t1,t5,s0,s1,s5,ss".split(","))
+ def test_all_f77(self, name):
+ self.check_function(getattr(self.module, name), name)
+
+ @pytest.mark.xfail(IS_S390X, reason="callback returns ' '")
+ @pytest.mark.parametrize("name", "t0,t1,t5,ts,s0,s1,s5,ss".split(","))
+ def test_all_f90(self, name):
+ self.check_function(getattr(self.module.f90_return_char, name), name)
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_complex.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_complex.py
new file mode 100644
index 00000000..9df79632
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_complex.py
@@ -0,0 +1,65 @@
+import pytest
+
+from numpy import array
+from . import util
+
+
+class TestReturnComplex(util.F2PyTest):
+ def check_function(self, t, tname):
+ if tname in ["t0", "t8", "s0", "s8"]:
+ err = 1e-5
+ else:
+ err = 0.0
+ assert abs(t(234j) - 234.0j) <= err
+ assert abs(t(234.6) - 234.6) <= err
+ assert abs(t(234) - 234.0) <= err
+ assert abs(t(234.6 + 3j) - (234.6 + 3j)) <= err
+ # assert abs(t('234')-234.)<=err
+ # assert abs(t('234.6')-234.6)<=err
+ assert abs(t(-234) + 234.0) <= err
+ assert abs(t([234]) - 234.0) <= err
+ assert abs(t((234, )) - 234.0) <= err
+ assert abs(t(array(234)) - 234.0) <= err
+ assert abs(t(array(23 + 4j, "F")) - (23 + 4j)) <= err
+ assert abs(t(array([234])) - 234.0) <= err
+ assert abs(t(array([[234]])) - 234.0) <= err
+ assert abs(t(array([234]).astype("b")) + 22.0) <= err
+ assert abs(t(array([234], "h")) - 234.0) <= err
+ assert abs(t(array([234], "i")) - 234.0) <= err
+ assert abs(t(array([234], "l")) - 234.0) <= err
+ assert abs(t(array([234], "q")) - 234.0) <= err
+ assert abs(t(array([234], "f")) - 234.0) <= err
+ assert abs(t(array([234], "d")) - 234.0) <= err
+ assert abs(t(array([234 + 3j], "F")) - (234 + 3j)) <= err
+ assert abs(t(array([234], "D")) - 234.0) <= err
+
+ # pytest.raises(TypeError, t, array([234], 'a1'))
+ pytest.raises(TypeError, t, "abc")
+
+ pytest.raises(IndexError, t, [])
+ pytest.raises(IndexError, t, ())
+
+ pytest.raises(TypeError, t, t)
+ pytest.raises(TypeError, t, {})
+
+ try:
+ r = t(10**400)
+ assert repr(r) in ["(inf+0j)", "(Infinity+0j)"]
+ except OverflowError:
+ pass
+
+
+class TestFReturnComplex(TestReturnComplex):
+ sources = [
+ util.getpath("tests", "src", "return_complex", "foo77.f"),
+ util.getpath("tests", "src", "return_complex", "foo90.f90"),
+ ]
+
+ @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(","))
+ def test_all_f77(self, name):
+ self.check_function(getattr(self.module, name), name)
+
+ @pytest.mark.parametrize("name", "t0,t8,t16,td,s0,s8,s16,sd".split(","))
+ def test_all_f90(self, name):
+ self.check_function(getattr(self.module.f90_return_complex, name),
+ name)
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_integer.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_integer.py
new file mode 100644
index 00000000..a43c677f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_integer.py
@@ -0,0 +1,55 @@
+import pytest
+
+from numpy import array
+from . import util
+
+
+class TestReturnInteger(util.F2PyTest):
+ def check_function(self, t, tname):
+ assert t(123) == 123
+ assert t(123.6) == 123
+ assert t("123") == 123
+ assert t(-123) == -123
+ assert t([123]) == 123
+ assert t((123, )) == 123
+ assert t(array(123)) == 123
+ assert t(array([123])) == 123
+ assert t(array([[123]])) == 123
+ assert t(array([123], "b")) == 123
+ assert t(array([123], "h")) == 123
+ assert t(array([123], "i")) == 123
+ assert t(array([123], "l")) == 123
+ assert t(array([123], "B")) == 123
+ assert t(array([123], "f")) == 123
+ assert t(array([123], "d")) == 123
+
+ # pytest.raises(ValueError, t, array([123],'S3'))
+ pytest.raises(ValueError, t, "abc")
+
+ pytest.raises(IndexError, t, [])
+ pytest.raises(IndexError, t, ())
+
+ pytest.raises(Exception, t, t)
+ pytest.raises(Exception, t, {})
+
+ if tname in ["t8", "s8"]:
+ pytest.raises(OverflowError, t, 100000000000000000000000)
+ pytest.raises(OverflowError, t, 10000000011111111111111.23)
+
+
+class TestFReturnInteger(TestReturnInteger):
+ sources = [
+ util.getpath("tests", "src", "return_integer", "foo77.f"),
+ util.getpath("tests", "src", "return_integer", "foo90.f90"),
+ ]
+
+ @pytest.mark.parametrize("name",
+ "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","))
+ def test_all_f77(self, name):
+ self.check_function(getattr(self.module, name), name)
+
+ @pytest.mark.parametrize("name",
+ "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","))
+ def test_all_f90(self, name):
+ self.check_function(getattr(self.module.f90_return_integer, name),
+ name)
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_logical.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_logical.py
new file mode 100644
index 00000000..92fb902a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_logical.py
@@ -0,0 +1,64 @@
+import pytest
+
+from numpy import array
+from . import util
+
+
+class TestReturnLogical(util.F2PyTest):
+ def check_function(self, t):
+ assert t(True) == 1
+ assert t(False) == 0
+ assert t(0) == 0
+ assert t(None) == 0
+ assert t(0.0) == 0
+ assert t(0j) == 0
+ assert t(1j) == 1
+ assert t(234) == 1
+ assert t(234.6) == 1
+ assert t(234.6 + 3j) == 1
+ assert t("234") == 1
+ assert t("aaa") == 1
+ assert t("") == 0
+ assert t([]) == 0
+ assert t(()) == 0
+ assert t({}) == 0
+ assert t(t) == 1
+ assert t(-234) == 1
+ assert t(10**100) == 1
+ assert t([234]) == 1
+ assert t((234, )) == 1
+ assert t(array(234)) == 1
+ assert t(array([234])) == 1
+ assert t(array([[234]])) == 1
+ assert t(array([127], "b")) == 1
+ assert t(array([234], "h")) == 1
+ assert t(array([234], "i")) == 1
+ assert t(array([234], "l")) == 1
+ assert t(array([234], "f")) == 1
+ assert t(array([234], "d")) == 1
+ assert t(array([234 + 3j], "F")) == 1
+ assert t(array([234], "D")) == 1
+ assert t(array(0)) == 0
+ assert t(array([0])) == 0
+ assert t(array([[0]])) == 0
+ assert t(array([0j])) == 0
+ assert t(array([1])) == 1
+ pytest.raises(ValueError, t, array([0, 0]))
+
+
+class TestFReturnLogical(TestReturnLogical):
+ sources = [
+ util.getpath("tests", "src", "return_logical", "foo77.f"),
+ util.getpath("tests", "src", "return_logical", "foo90.f90"),
+ ]
+
+ @pytest.mark.slow
+ @pytest.mark.parametrize("name", "t0,t1,t2,t4,s0,s1,s2,s4".split(","))
+ def test_all_f77(self, name):
+ self.check_function(getattr(self.module, name))
+
+ @pytest.mark.slow
+ @pytest.mark.parametrize("name",
+ "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","))
+ def test_all_f90(self, name):
+ self.check_function(getattr(self.module.f90_return_logical, name))
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_real.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_real.py
new file mode 100644
index 00000000..9e76c151
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_return_real.py
@@ -0,0 +1,109 @@
+import platform
+import pytest
+import numpy as np
+
+from numpy import array
+from . import util
+
+
+class TestReturnReal(util.F2PyTest):
+ def check_function(self, t, tname):
+ if tname in ["t0", "t4", "s0", "s4"]:
+ err = 1e-5
+ else:
+ err = 0.0
+ assert abs(t(234) - 234.0) <= err
+ assert abs(t(234.6) - 234.6) <= err
+ assert abs(t("234") - 234) <= err
+ assert abs(t("234.6") - 234.6) <= err
+ assert abs(t(-234) + 234) <= err
+ assert abs(t([234]) - 234) <= err
+ assert abs(t((234, )) - 234.0) <= err
+ assert abs(t(array(234)) - 234.0) <= err
+ assert abs(t(array([234])) - 234.0) <= err
+ assert abs(t(array([[234]])) - 234.0) <= err
+ assert abs(t(array([234]).astype("b")) + 22) <= err
+ assert abs(t(array([234], "h")) - 234.0) <= err
+ assert abs(t(array([234], "i")) - 234.0) <= err
+ assert abs(t(array([234], "l")) - 234.0) <= err
+ assert abs(t(array([234], "B")) - 234.0) <= err
+ assert abs(t(array([234], "f")) - 234.0) <= err
+ assert abs(t(array([234], "d")) - 234.0) <= err
+ if tname in ["t0", "t4", "s0", "s4"]:
+ assert t(1e200) == t(1e300) # inf
+
+ # pytest.raises(ValueError, t, array([234], 'S1'))
+ pytest.raises(ValueError, t, "abc")
+
+ pytest.raises(IndexError, t, [])
+ pytest.raises(IndexError, t, ())
+
+ pytest.raises(Exception, t, t)
+ pytest.raises(Exception, t, {})
+
+ try:
+ r = t(10**400)
+ assert repr(r) in ["inf", "Infinity"]
+ except OverflowError:
+ pass
+
+
+@pytest.mark.skipif(
+ platform.system() == "Darwin",
+ reason="Prone to error when run with numpy/f2py/tests on mac os, "
+ "but not when run in isolation",
+)
+@pytest.mark.skipif(
+ np.dtype(np.intp).itemsize < 8,
+ reason="32-bit builds are buggy"
+)
+class TestCReturnReal(TestReturnReal):
+ suffix = ".pyf"
+ module_name = "c_ext_return_real"
+ code = """
+python module c_ext_return_real
+usercode \'\'\'
+float t4(float value) { return value; }
+void s4(float *t4, float value) { *t4 = value; }
+double t8(double value) { return value; }
+void s8(double *t8, double value) { *t8 = value; }
+\'\'\'
+interface
+ function t4(value)
+ real*4 intent(c) :: t4,value
+ end
+ function t8(value)
+ real*8 intent(c) :: t8,value
+ end
+ subroutine s4(t4,value)
+ intent(c) s4
+ real*4 intent(out) :: t4
+ real*4 intent(c) :: value
+ end
+ subroutine s8(t8,value)
+ intent(c) s8
+ real*8 intent(out) :: t8
+ real*8 intent(c) :: value
+ end
+end interface
+end python module c_ext_return_real
+ """
+
+ @pytest.mark.parametrize("name", "t4,t8,s4,s8".split(","))
+ def test_all(self, name):
+ self.check_function(getattr(self.module, name), name)
+
+
+class TestFReturnReal(TestReturnReal):
+ sources = [
+ util.getpath("tests", "src", "return_real", "foo77.f"),
+ util.getpath("tests", "src", "return_real", "foo90.f90"),
+ ]
+
+ @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(","))
+ def test_all_f77(self, name):
+ self.check_function(getattr(self.module, name), name)
+
+ @pytest.mark.parametrize("name", "t0,t4,t8,td,s0,s4,s8,sd".split(","))
+ def test_all_f90(self, name):
+ self.check_function(getattr(self.module.f90_return_real, name), name)
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_semicolon_split.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_semicolon_split.py
new file mode 100644
index 00000000..6d499046
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_semicolon_split.py
@@ -0,0 +1,74 @@
+import platform
+import pytest
+import numpy as np
+
+from . import util
+
+
+@pytest.mark.skipif(
+ platform.system() == "Darwin",
+ reason="Prone to error when run with numpy/f2py/tests on mac os, "
+ "but not when run in isolation",
+)
+@pytest.mark.skipif(
+ np.dtype(np.intp).itemsize < 8,
+ reason="32-bit builds are buggy"
+)
+class TestMultiline(util.F2PyTest):
+ suffix = ".pyf"
+ module_name = "multiline"
+ code = f"""
+python module {module_name}
+ usercode '''
+void foo(int* x) {{
+ char dummy = ';';
+ *x = 42;
+}}
+'''
+ interface
+ subroutine foo(x)
+ intent(c) foo
+ integer intent(out) :: x
+ end subroutine foo
+ end interface
+end python module {module_name}
+ """
+
+ def test_multiline(self):
+ assert self.module.foo() == 42
+
+
+@pytest.mark.skipif(
+ platform.system() == "Darwin",
+ reason="Prone to error when run with numpy/f2py/tests on mac os, "
+ "but not when run in isolation",
+)
+@pytest.mark.skipif(
+ np.dtype(np.intp).itemsize < 8,
+ reason="32-bit builds are buggy"
+)
+class TestCallstatement(util.F2PyTest):
+ suffix = ".pyf"
+ module_name = "callstatement"
+ code = f"""
+python module {module_name}
+ usercode '''
+void foo(int* x) {{
+}}
+'''
+ interface
+ subroutine foo(x)
+ intent(c) foo
+ integer intent(out) :: x
+ callprotoargument int*
+ callstatement {{ &
+ ; &
+ x = 42; &
+ }}
+ end subroutine foo
+ end interface
+end python module {module_name}
+ """
+
+ def test_callstatement(self):
+ assert self.module.foo() == 42
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_size.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_size.py
new file mode 100644
index 00000000..bd2c349d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_size.py
@@ -0,0 +1,45 @@
+import os
+import pytest
+import numpy as np
+
+from . import util
+
+
+class TestSizeSumExample(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "size", "foo.f90")]
+
+ @pytest.mark.slow
+ def test_all(self):
+ r = self.module.foo([[]])
+ assert r == [0]
+
+ r = self.module.foo([[1, 2]])
+ assert r == [3]
+
+ r = self.module.foo([[1, 2], [3, 4]])
+ assert np.allclose(r, [3, 7])
+
+ r = self.module.foo([[1, 2], [3, 4], [5, 6]])
+ assert np.allclose(r, [3, 7, 11])
+
+ @pytest.mark.slow
+ def test_transpose(self):
+ r = self.module.trans([[]])
+ assert np.allclose(r.T, np.array([[]]))
+
+ r = self.module.trans([[1, 2]])
+ assert np.allclose(r, [[1.], [2.]])
+
+ r = self.module.trans([[1, 2, 3], [4, 5, 6]])
+ assert np.allclose(r, [[1, 4], [2, 5], [3, 6]])
+
+ @pytest.mark.slow
+ def test_flatten(self):
+ r = self.module.flatten([[]])
+ assert np.allclose(r, [])
+
+ r = self.module.flatten([[1, 2]])
+ assert np.allclose(r, [1, 2])
+
+ r = self.module.flatten([[1, 2, 3], [4, 5, 6]])
+ assert np.allclose(r, [1, 2, 3, 4, 5, 6])
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_string.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_string.py
new file mode 100644
index 00000000..9e937188
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_string.py
@@ -0,0 +1,100 @@
+import os
+import pytest
+import textwrap
+import numpy as np
+from . import util
+
+
+class TestString(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "string", "char.f90")]
+
+ @pytest.mark.slow
+ def test_char(self):
+ strings = np.array(["ab", "cd", "ef"], dtype="c").T
+ inp, out = self.module.char_test.change_strings(
+ strings, strings.shape[1])
+ assert inp == pytest.approx(strings)
+ expected = strings.copy()
+ expected[1, :] = "AAA"
+ assert out == pytest.approx(expected)
+
+
+class TestDocStringArguments(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "string", "string.f")]
+
+ def test_example(self):
+ a = np.array(b"123\0\0")
+ b = np.array(b"123\0\0")
+ c = np.array(b"123")
+ d = np.array(b"123")
+
+ self.module.foo(a, b, c, d)
+
+ assert a.tobytes() == b"123\0\0"
+ assert b.tobytes() == b"B23\0\0"
+ assert c.tobytes() == b"123"
+ assert d.tobytes() == b"D23"
+
+
+class TestFixedString(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "string", "fixed_string.f90")]
+
+ @staticmethod
+ def _sint(s, start=0, end=None):
+ """Return the content of a string buffer as integer value.
+
+ For example:
+ _sint('1234') -> 4321
+ _sint('123A') -> 17321
+ """
+ if isinstance(s, np.ndarray):
+ s = s.tobytes()
+ elif isinstance(s, str):
+ s = s.encode()
+ assert isinstance(s, bytes)
+ if end is None:
+ end = len(s)
+ i = 0
+ for j in range(start, min(end, len(s))):
+ i += s[j] * 10**j
+ return i
+
+ def _get_input(self, intent="in"):
+ if intent in ["in"]:
+ yield ""
+ yield "1"
+ yield "1234"
+ yield "12345"
+ yield b""
+ yield b"\0"
+ yield b"1"
+ yield b"\01"
+ yield b"1\0"
+ yield b"1234"
+ yield b"12345"
+ yield np.ndarray((), np.bytes_, buffer=b"") # array(b'', dtype='|S0')
+ yield np.array(b"") # array(b'', dtype='|S1')
+ yield np.array(b"\0")
+ yield np.array(b"1")
+ yield np.array(b"1\0")
+ yield np.array(b"\01")
+ yield np.array(b"1234")
+ yield np.array(b"123\0")
+ yield np.array(b"12345")
+
+ def test_intent_in(self):
+ for s in self._get_input():
+ r = self.module.test_in_bytes4(s)
+ # also checks that s is not changed inplace
+ expected = self._sint(s, end=4)
+ assert r == expected, s
+
+ def test_intent_inout(self):
+ for s in self._get_input(intent="inout"):
+ rest = self._sint(s, start=4)
+ r = self.module.test_inout_bytes4(s)
+ expected = self._sint(s, end=4)
+ assert r == expected
+
+ # check that the rest of input string is preserved
+ assert rest == self._sint(s, start=4)
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_symbolic.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_symbolic.py
new file mode 100644
index 00000000..84527831
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_symbolic.py
@@ -0,0 +1,494 @@
+import pytest
+
+from numpy.f2py.symbolic import (
+ Expr,
+ Op,
+ ArithOp,
+ Language,
+ as_symbol,
+ as_number,
+ as_string,
+ as_array,
+ as_complex,
+ as_terms,
+ as_factors,
+ eliminate_quotes,
+ insert_quotes,
+ fromstring,
+ as_expr,
+ as_apply,
+ as_numer_denom,
+ as_ternary,
+ as_ref,
+ as_deref,
+ normalize,
+ as_eq,
+ as_ne,
+ as_lt,
+ as_gt,
+ as_le,
+ as_ge,
+)
+from . import util
+
+
+class TestSymbolic(util.F2PyTest):
+ def test_eliminate_quotes(self):
+ def worker(s):
+ r, d = eliminate_quotes(s)
+ s1 = insert_quotes(r, d)
+ assert s1 == s
+
+ for kind in ["", "mykind_"]:
+ worker(kind + '"1234" // "ABCD"')
+ worker(kind + '"1234" // ' + kind + '"ABCD"')
+ worker(kind + "\"1234\" // 'ABCD'")
+ worker(kind + '"1234" // ' + kind + "'ABCD'")
+ worker(kind + '"1\\"2\'AB\'34"')
+ worker("a = " + kind + "'1\\'2\"AB\"34'")
+
+ def test_sanity(self):
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
+
+ assert x.op == Op.SYMBOL
+ assert repr(x) == "Expr(Op.SYMBOL, 'x')"
+ assert x == x
+ assert x != y
+ assert hash(x) is not None
+
+ n = as_number(123)
+ m = as_number(456)
+ assert n.op == Op.INTEGER
+ assert repr(n) == "Expr(Op.INTEGER, (123, 4))"
+ assert n == n
+ assert n != m
+ assert hash(n) is not None
+
+ fn = as_number(12.3)
+ fm = as_number(45.6)
+ assert fn.op == Op.REAL
+ assert repr(fn) == "Expr(Op.REAL, (12.3, 4))"
+ assert fn == fn
+ assert fn != fm
+ assert hash(fn) is not None
+
+ c = as_complex(1, 2)
+ c2 = as_complex(3, 4)
+ assert c.op == Op.COMPLEX
+ assert repr(c) == ("Expr(Op.COMPLEX, (Expr(Op.INTEGER, (1, 4)),"
+ " Expr(Op.INTEGER, (2, 4))))")
+ assert c == c
+ assert c != c2
+ assert hash(c) is not None
+
+ s = as_string("'123'")
+ s2 = as_string('"ABC"')
+ assert s.op == Op.STRING
+ assert repr(s) == "Expr(Op.STRING, (\"'123'\", 1))", repr(s)
+ assert s == s
+ assert s != s2
+
+ a = as_array((n, m))
+ b = as_array((n, ))
+ assert a.op == Op.ARRAY
+ assert repr(a) == ("Expr(Op.ARRAY, (Expr(Op.INTEGER, (123, 4)),"
+ " Expr(Op.INTEGER, (456, 4))))")
+ assert a == a
+ assert a != b
+
+ t = as_terms(x)
+ u = as_terms(y)
+ assert t.op == Op.TERMS
+ assert repr(t) == "Expr(Op.TERMS, {Expr(Op.SYMBOL, 'x'): 1})"
+ assert t == t
+ assert t != u
+ assert hash(t) is not None
+
+ v = as_factors(x)
+ w = as_factors(y)
+ assert v.op == Op.FACTORS
+ assert repr(v) == "Expr(Op.FACTORS, {Expr(Op.SYMBOL, 'x'): 1})"
+ assert v == v
+ assert w != v
+ assert hash(v) is not None
+
+ t = as_ternary(x, y, z)
+ u = as_ternary(x, z, y)
+ assert t.op == Op.TERNARY
+ assert t == t
+ assert t != u
+ assert hash(t) is not None
+
+ e = as_eq(x, y)
+ f = as_lt(x, y)
+ assert e.op == Op.RELATIONAL
+ assert e == e
+ assert e != f
+ assert hash(e) is not None
+
+ def test_tostring_fortran(self):
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
+ n = as_number(123)
+ m = as_number(456)
+ a = as_array((n, m))
+ c = as_complex(n, m)
+
+ assert str(x) == "x"
+ assert str(n) == "123"
+ assert str(a) == "[123, 456]"
+ assert str(c) == "(123, 456)"
+
+ assert str(Expr(Op.TERMS, {x: 1})) == "x"
+ assert str(Expr(Op.TERMS, {x: 2})) == "2 * x"
+ assert str(Expr(Op.TERMS, {x: -1})) == "-x"
+ assert str(Expr(Op.TERMS, {x: -2})) == "-2 * x"
+ assert str(Expr(Op.TERMS, {x: 1, y: 1})) == "x + y"
+ assert str(Expr(Op.TERMS, {x: -1, y: -1})) == "-x - y"
+ assert str(Expr(Op.TERMS, {x: 2, y: 3})) == "2 * x + 3 * y"
+ assert str(Expr(Op.TERMS, {x: -2, y: 3})) == "-2 * x + 3 * y"
+ assert str(Expr(Op.TERMS, {x: 2, y: -3})) == "2 * x - 3 * y"
+
+ assert str(Expr(Op.FACTORS, {x: 1})) == "x"
+ assert str(Expr(Op.FACTORS, {x: 2})) == "x ** 2"
+ assert str(Expr(Op.FACTORS, {x: -1})) == "x ** -1"
+ assert str(Expr(Op.FACTORS, {x: -2})) == "x ** -2"
+ assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == "x * y"
+ assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == "x ** 2 * y ** 3"
+
+ v = Expr(Op.FACTORS, {x: 2, Expr(Op.TERMS, {x: 1, y: 1}): 3})
+ assert str(v) == "x ** 2 * (x + y) ** 3", str(v)
+ v = Expr(Op.FACTORS, {x: 2, Expr(Op.FACTORS, {x: 1, y: 1}): 3})
+ assert str(v) == "x ** 2 * (x * y) ** 3", str(v)
+
+ assert str(Expr(Op.APPLY, ("f", (), {}))) == "f()"
+ assert str(Expr(Op.APPLY, ("f", (x, ), {}))) == "f(x)"
+ assert str(Expr(Op.APPLY, ("f", (x, y), {}))) == "f(x, y)"
+ assert str(Expr(Op.INDEXING, ("f", x))) == "f[x]"
+
+ assert str(as_ternary(x, y, z)) == "merge(y, z, x)"
+ assert str(as_eq(x, y)) == "x .eq. y"
+ assert str(as_ne(x, y)) == "x .ne. y"
+ assert str(as_lt(x, y)) == "x .lt. y"
+ assert str(as_le(x, y)) == "x .le. y"
+ assert str(as_gt(x, y)) == "x .gt. y"
+ assert str(as_ge(x, y)) == "x .ge. y"
+
+ def test_tostring_c(self):
+ language = Language.C
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
+ n = as_number(123)
+
+ assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == "x * x"
+ assert (Expr(Op.FACTORS, {
+ x + y: 2
+ }).tostring(language=language) == "(x + y) * (x + y)")
+ assert Expr(Op.FACTORS, {
+ x: 12
+ }).tostring(language=language) == "pow(x, 12)"
+
+ assert as_apply(ArithOp.DIV, x,
+ y).tostring(language=language) == "x / y"
+ assert (as_apply(ArithOp.DIV, x,
+ x + y).tostring(language=language) == "x / (x + y)")
+ assert (as_apply(ArithOp.DIV, x - y, x +
+ y).tostring(language=language) == "(x - y) / (x + y)")
+ assert (x + (x - y) / (x + y) +
+ n).tostring(language=language) == "123 + x + (x - y) / (x + y)"
+
+ assert as_ternary(x, y, z).tostring(language=language) == "(x?y:z)"
+ assert as_eq(x, y).tostring(language=language) == "x == y"
+ assert as_ne(x, y).tostring(language=language) == "x != y"
+ assert as_lt(x, y).tostring(language=language) == "x < y"
+ assert as_le(x, y).tostring(language=language) == "x <= y"
+ assert as_gt(x, y).tostring(language=language) == "x > y"
+ assert as_ge(x, y).tostring(language=language) == "x >= y"
+
+ def test_operations(self):
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
+
+ assert x + x == Expr(Op.TERMS, {x: 2})
+ assert x - x == Expr(Op.INTEGER, (0, 4))
+ assert x + y == Expr(Op.TERMS, {x: 1, y: 1})
+ assert x - y == Expr(Op.TERMS, {x: 1, y: -1})
+ assert x * x == Expr(Op.FACTORS, {x: 2})
+ assert x * y == Expr(Op.FACTORS, {x: 1, y: 1})
+
+ assert +x == x
+ assert -x == Expr(Op.TERMS, {x: -1}), repr(-x)
+ assert 2 * x == Expr(Op.TERMS, {x: 2})
+ assert 2 + x == Expr(Op.TERMS, {x: 1, as_number(1): 2})
+ assert 2 * x + 3 * y == Expr(Op.TERMS, {x: 2, y: 3})
+ assert (x + y) * 2 == Expr(Op.TERMS, {x: 2, y: 2})
+
+ assert x**2 == Expr(Op.FACTORS, {x: 2})
+ assert (x + y)**2 == Expr(
+ Op.TERMS,
+ {
+ Expr(Op.FACTORS, {x: 2}): 1,
+ Expr(Op.FACTORS, {y: 2}): 1,
+ Expr(Op.FACTORS, {
+ x: 1,
+ y: 1
+ }): 2,
+ },
+ )
+ assert (x + y) * x == x**2 + x * y
+ assert (x + y)**2 == x**2 + 2 * x * y + y**2
+ assert (x + y)**2 + (x - y)**2 == 2 * x**2 + 2 * y**2
+ assert (x + y) * z == x * z + y * z
+ assert z * (x + y) == x * z + y * z
+
+ assert (x / 2) == as_apply(ArithOp.DIV, x, as_number(2))
+ assert (2 * x / 2) == x
+ assert (3 * x / 2) == as_apply(ArithOp.DIV, 3 * x, as_number(2))
+ assert (4 * x / 2) == 2 * x
+ assert (5 * x / 2) == as_apply(ArithOp.DIV, 5 * x, as_number(2))
+ assert (6 * x / 2) == 3 * x
+ assert ((3 * 5) * x / 6) == as_apply(ArithOp.DIV, 5 * x, as_number(2))
+ assert (30 * x**2 * y**4 / (24 * x**3 * y**3)) == as_apply(
+ ArithOp.DIV, 5 * y, 4 * x)
+ assert ((15 * x / 6) / 5) == as_apply(ArithOp.DIV, x,
+ as_number(2)), (15 * x / 6) / 5
+ assert (x / (5 / x)) == as_apply(ArithOp.DIV, x**2, as_number(5))
+
+ assert (x / 2.0) == Expr(Op.TERMS, {x: 0.5})
+
+ s = as_string('"ABC"')
+ t = as_string('"123"')
+
+ assert s // t == Expr(Op.STRING, ('"ABC123"', 1))
+ assert s // x == Expr(Op.CONCAT, (s, x))
+ assert x // s == Expr(Op.CONCAT, (x, s))
+
+ c = as_complex(1.0, 2.0)
+ assert -c == as_complex(-1.0, -2.0)
+ assert c + c == as_expr((1 + 2j) * 2)
+ assert c * c == as_expr((1 + 2j)**2)
+
+ def test_substitute(self):
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
+ a = as_array((x, y))
+
+ assert x.substitute({x: y}) == y
+ assert (x + y).substitute({x: z}) == y + z
+ assert (x * y).substitute({x: z}) == y * z
+ assert (x**4).substitute({x: z}) == z**4
+ assert (x / y).substitute({x: z}) == z / y
+ assert x.substitute({x: y + z}) == y + z
+ assert a.substitute({x: y + z}) == as_array((y + z, y))
+
+ assert as_ternary(x, y,
+ z).substitute({x: y + z}) == as_ternary(y + z, y, z)
+ assert as_eq(x, y).substitute({x: y + z}) == as_eq(y + z, y)
+
+ def test_fromstring(self):
+
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
+ f = as_symbol("f")
+ s = as_string('"ABC"')
+ t = as_string('"123"')
+ a = as_array((x, y))
+
+ assert fromstring("x") == x
+ assert fromstring("+ x") == x
+ assert fromstring("- x") == -x
+ assert fromstring("x + y") == x + y
+ assert fromstring("x + 1") == x + 1
+ assert fromstring("x * y") == x * y
+ assert fromstring("x * 2") == x * 2
+ assert fromstring("x / y") == x / y
+ assert fromstring("x ** 2", language=Language.Python) == x**2
+ assert fromstring("x ** 2 ** 3", language=Language.Python) == x**2**3
+ assert fromstring("(x + y) * z") == (x + y) * z
+
+ assert fromstring("f(x)") == f(x)
+ assert fromstring("f(x,y)") == f(x, y)
+ assert fromstring("f[x]") == f[x]
+ assert fromstring("f[x][y]") == f[x][y]
+
+ assert fromstring('"ABC"') == s
+ assert (normalize(
+ fromstring('"ABC" // "123" ',
+ language=Language.Fortran)) == s // t)
+ assert fromstring('f("ABC")') == f(s)
+ assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', "MYSTRKIND")
+
+ assert fromstring("(/x, y/)") == a, fromstring("(/x, y/)")
+ assert fromstring("f((/x, y/))") == f(a)
+ assert fromstring("(/(x+y)*z/)") == as_array(((x + y) * z, ))
+
+ assert fromstring("123") == as_number(123)
+ assert fromstring("123_2") == as_number(123, 2)
+ assert fromstring("123_myintkind") == as_number(123, "myintkind")
+
+ assert fromstring("123.0") == as_number(123.0, 4)
+ assert fromstring("123.0_4") == as_number(123.0, 4)
+ assert fromstring("123.0_8") == as_number(123.0, 8)
+ assert fromstring("123.0e0") == as_number(123.0, 4)
+ assert fromstring("123.0d0") == as_number(123.0, 8)
+ assert fromstring("123d0") == as_number(123.0, 8)
+ assert fromstring("123e-0") == as_number(123.0, 4)
+ assert fromstring("123d+0") == as_number(123.0, 8)
+ assert fromstring("123.0_myrealkind") == as_number(123.0, "myrealkind")
+ assert fromstring("3E4") == as_number(30000.0, 4)
+
+ assert fromstring("(1, 2)") == as_complex(1, 2)
+ assert fromstring("(1e2, PI)") == as_complex(as_number(100.0),
+ as_symbol("PI"))
+
+ assert fromstring("[1, 2]") == as_array((as_number(1), as_number(2)))
+
+ assert fromstring("POINT(x, y=1)") == as_apply(as_symbol("POINT"),
+ x,
+ y=as_number(1))
+ assert fromstring(
+ 'PERSON(name="John", age=50, shape=(/34, 23/))') == as_apply(
+ as_symbol("PERSON"),
+ name=as_string('"John"'),
+ age=as_number(50),
+ shape=as_array((as_number(34), as_number(23))),
+ )
+
+ assert fromstring("x?y:z") == as_ternary(x, y, z)
+
+ assert fromstring("*x") == as_deref(x)
+ assert fromstring("**x") == as_deref(as_deref(x))
+ assert fromstring("&x") == as_ref(x)
+ assert fromstring("(*x) * (*y)") == as_deref(x) * as_deref(y)
+ assert fromstring("(*x) * *y") == as_deref(x) * as_deref(y)
+ assert fromstring("*x * *y") == as_deref(x) * as_deref(y)
+ assert fromstring("*x**y") == as_deref(x) * as_deref(y)
+
+ assert fromstring("x == y") == as_eq(x, y)
+ assert fromstring("x != y") == as_ne(x, y)
+ assert fromstring("x < y") == as_lt(x, y)
+ assert fromstring("x > y") == as_gt(x, y)
+ assert fromstring("x <= y") == as_le(x, y)
+ assert fromstring("x >= y") == as_ge(x, y)
+
+ assert fromstring("x .eq. y", language=Language.Fortran) == as_eq(x, y)
+ assert fromstring("x .ne. y", language=Language.Fortran) == as_ne(x, y)
+ assert fromstring("x .lt. y", language=Language.Fortran) == as_lt(x, y)
+ assert fromstring("x .gt. y", language=Language.Fortran) == as_gt(x, y)
+ assert fromstring("x .le. y", language=Language.Fortran) == as_le(x, y)
+ assert fromstring("x .ge. y", language=Language.Fortran) == as_ge(x, y)
+
+ def test_traverse(self):
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
+ f = as_symbol("f")
+
+ # Use traverse to substitute a symbol
+ def replace_visit(s, r=z):
+ if s == x:
+ return r
+
+ assert x.traverse(replace_visit) == z
+ assert y.traverse(replace_visit) == y
+ assert z.traverse(replace_visit) == z
+ assert (f(y)).traverse(replace_visit) == f(y)
+ assert (f(x)).traverse(replace_visit) == f(z)
+ assert (f[y]).traverse(replace_visit) == f[y]
+ assert (f[z]).traverse(replace_visit) == f[z]
+ assert (x + y + z).traverse(replace_visit) == (2 * z + y)
+ assert (x +
+ f(y, x - z)).traverse(replace_visit) == (z +
+ f(y, as_number(0)))
+ assert as_eq(x, y).traverse(replace_visit) == as_eq(z, y)
+
+ # Use traverse to collect symbols, method 1
+ function_symbols = set()
+ symbols = set()
+
+ def collect_symbols(s):
+ if s.op is Op.APPLY:
+ oper = s.data[0]
+ function_symbols.add(oper)
+ if oper in symbols:
+ symbols.remove(oper)
+ elif s.op is Op.SYMBOL and s not in function_symbols:
+ symbols.add(s)
+
+ (x + f(y, x - z)).traverse(collect_symbols)
+ assert function_symbols == {f}
+ assert symbols == {x, y, z}
+
+ # Use traverse to collect symbols, method 2
+ def collect_symbols2(expr, symbols):
+ if expr.op is Op.SYMBOL:
+ symbols.add(expr)
+
+ symbols = set()
+ (x + f(y, x - z)).traverse(collect_symbols2, symbols)
+ assert symbols == {x, y, z, f}
+
+ # Use traverse to partially collect symbols
+ def collect_symbols3(expr, symbols):
+ if expr.op is Op.APPLY:
+ # skip traversing function calls
+ return expr
+ if expr.op is Op.SYMBOL:
+ symbols.add(expr)
+
+ symbols = set()
+ (x + f(y, x - z)).traverse(collect_symbols3, symbols)
+ assert symbols == {x}
+
+ def test_linear_solve(self):
+ x = as_symbol("x")
+ y = as_symbol("y")
+ z = as_symbol("z")
+
+ assert x.linear_solve(x) == (as_number(1), as_number(0))
+ assert (x + 1).linear_solve(x) == (as_number(1), as_number(1))
+ assert (2 * x).linear_solve(x) == (as_number(2), as_number(0))
+ assert (2 * x + 3).linear_solve(x) == (as_number(2), as_number(3))
+ assert as_number(3).linear_solve(x) == (as_number(0), as_number(3))
+ assert y.linear_solve(x) == (as_number(0), y)
+ assert (y * z).linear_solve(x) == (as_number(0), y * z)
+
+ assert (x + y).linear_solve(x) == (as_number(1), y)
+ assert (z * x + y).linear_solve(x) == (z, y)
+ assert ((z + y) * x + y).linear_solve(x) == (z + y, y)
+ assert (z * y * x + y).linear_solve(x) == (z * y, y)
+
+ pytest.raises(RuntimeError, lambda: (x * x).linear_solve(x))
+
+ def test_as_numer_denom(self):
+ x = as_symbol("x")
+ y = as_symbol("y")
+ n = as_number(123)
+
+ assert as_numer_denom(x) == (x, as_number(1))
+ assert as_numer_denom(x / n) == (x, n)
+ assert as_numer_denom(n / x) == (n, x)
+ assert as_numer_denom(x / y) == (x, y)
+ assert as_numer_denom(x * y) == (x * y, as_number(1))
+ assert as_numer_denom(n + x / y) == (x + n * y, y)
+ assert as_numer_denom(n + x / (y - x / n)) == (y * n**2, y * n - x)
+
+ def test_polynomial_atoms(self):
+ x = as_symbol("x")
+ y = as_symbol("y")
+ n = as_number(123)
+
+ assert x.polynomial_atoms() == {x}
+ assert n.polynomial_atoms() == set()
+ assert (y[x]).polynomial_atoms() == {y[x]}
+ assert (y(x)).polynomial_atoms() == {y(x)}
+ assert (y(x) + x).polynomial_atoms() == {y(x), x}
+ assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]}
+ assert (y(x)**x).polynomial_atoms() == {y(x)}
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_value_attrspec.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_value_attrspec.py
new file mode 100644
index 00000000..83aaf6c9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/test_value_attrspec.py
@@ -0,0 +1,14 @@
+import os
+import pytest
+
+from . import util
+
+class TestValueAttr(util.F2PyTest):
+ sources = [util.getpath("tests", "src", "value_attrspec", "gh21665.f90")]
+
+ # gh-21665
+ def test_long_long_map(self):
+ inp = 2
+ out = self.module.fortfuncs.square(inp)
+ exp_out = 4
+ assert out == exp_out
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/tests/util.py b/venv/lib/python3.9/site-packages/numpy/f2py/tests/util.py
new file mode 100644
index 00000000..1534c4e7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/tests/util.py
@@ -0,0 +1,419 @@
+"""
+Utility functions for
+
+- building and importing modules on test time, using a temporary location
+- detecting if compilers are present
+- determining paths to tests
+
+"""
+import os
+import sys
+import subprocess
+import tempfile
+import shutil
+import atexit
+import textwrap
+import re
+import pytest
+import contextlib
+import numpy
+
+from pathlib import Path
+from numpy.compat import asbytes, asstr
+from numpy.testing import temppath, IS_WASM
+from importlib import import_module
+
+#
+# Maintaining a temporary module directory
+#
+
+_module_dir = None
+_module_num = 5403
+
+
+def _cleanup():
+ global _module_dir
+ if _module_dir is not None:
+ try:
+ sys.path.remove(_module_dir)
+ except ValueError:
+ pass
+ try:
+ shutil.rmtree(_module_dir)
+ except OSError:
+ pass
+ _module_dir = None
+
+
+def get_module_dir():
+ global _module_dir
+ if _module_dir is None:
+ _module_dir = tempfile.mkdtemp()
+ atexit.register(_cleanup)
+ if _module_dir not in sys.path:
+ sys.path.insert(0, _module_dir)
+ return _module_dir
+
+
+def get_temp_module_name():
+ # Assume single-threaded, and the module dir usable only by this thread
+ global _module_num
+ get_module_dir()
+ name = "_test_ext_module_%d" % _module_num
+ _module_num += 1
+ if name in sys.modules:
+ # this should not be possible, but check anyway
+ raise RuntimeError("Temporary module name already in use.")
+ return name
+
+
+def _memoize(func):
+ memo = {}
+
+ def wrapper(*a, **kw):
+ key = repr((a, kw))
+ if key not in memo:
+ try:
+ memo[key] = func(*a, **kw)
+ except Exception as e:
+ memo[key] = e
+ raise
+ ret = memo[key]
+ if isinstance(ret, Exception):
+ raise ret
+ return ret
+
+ wrapper.__name__ = func.__name__
+ return wrapper
+
+
+#
+# Building modules
+#
+
+
+@_memoize
+def build_module(source_files, options=[], skip=[], only=[], module_name=None):
+ """
+ Compile and import a f2py module, built from the given files.
+
+ """
+
+ code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()"
+
+ d = get_module_dir()
+
+ # Copy files
+ dst_sources = []
+ f2py_sources = []
+ for fn in source_files:
+ if not os.path.isfile(fn):
+ raise RuntimeError("%s is not a file" % fn)
+ dst = os.path.join(d, os.path.basename(fn))
+ shutil.copyfile(fn, dst)
+ dst_sources.append(dst)
+
+ base, ext = os.path.splitext(dst)
+ if ext in (".f90", ".f", ".c", ".pyf"):
+ f2py_sources.append(dst)
+
+ assert f2py_sources
+
+ # Prepare options
+ if module_name is None:
+ module_name = get_temp_module_name()
+ f2py_opts = ["-c", "-m", module_name] + options + f2py_sources
+ if skip:
+ f2py_opts += ["skip:"] + skip
+ if only:
+ f2py_opts += ["only:"] + only
+
+ # Build
+ cwd = os.getcwd()
+ try:
+ os.chdir(d)
+ cmd = [sys.executable, "-c", code] + f2py_opts
+ p = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ out, err = p.communicate()
+ if p.returncode != 0:
+ raise RuntimeError("Running f2py failed: %s\n%s" %
+ (cmd[4:], asstr(out)))
+ finally:
+ os.chdir(cwd)
+
+ # Partial cleanup
+ for fn in dst_sources:
+ os.unlink(fn)
+
+ # Import
+ return import_module(module_name)
+
+
+@_memoize
+def build_code(source_code,
+ options=[],
+ skip=[],
+ only=[],
+ suffix=None,
+ module_name=None):
+ """
+ Compile and import Fortran code using f2py.
+
+ """
+ if suffix is None:
+ suffix = ".f"
+ with temppath(suffix=suffix) as path:
+ with open(path, "w") as f:
+ f.write(source_code)
+ return build_module([path],
+ options=options,
+ skip=skip,
+ only=only,
+ module_name=module_name)
+
+
+#
+# Check if compilers are available at all...
+#
+
+_compiler_status = None
+
+
+def _get_compiler_status():
+ global _compiler_status
+ if _compiler_status is not None:
+ return _compiler_status
+
+ _compiler_status = (False, False, False)
+ if IS_WASM:
+ # Can't run compiler from inside WASM.
+ return _compiler_status
+
+ # XXX: this is really ugly. But I don't know how to invoke Distutils
+ # in a safer way...
+ code = textwrap.dedent(f"""\
+ import os
+ import sys
+ sys.path = {repr(sys.path)}
+
+ def configuration(parent_name='',top_path=None):
+ global config
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('', parent_name, top_path)
+ return config
+
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
+
+ config_cmd = config.get_config_cmd()
+ have_c = config_cmd.try_compile('void foo() {{}}')
+ print('COMPILERS:%%d,%%d,%%d' %% (have_c,
+ config.have_f77c(),
+ config.have_f90c()))
+ sys.exit(99)
+ """)
+ code = code % dict(syspath=repr(sys.path))
+
+ tmpdir = tempfile.mkdtemp()
+ try:
+ script = os.path.join(tmpdir, "setup.py")
+
+ with open(script, "w") as f:
+ f.write(code)
+
+ cmd = [sys.executable, "setup.py", "config"]
+ p = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ cwd=tmpdir)
+ out, err = p.communicate()
+ finally:
+ shutil.rmtree(tmpdir)
+
+ m = re.search(br"COMPILERS:(\d+),(\d+),(\d+)", out)
+ if m:
+ _compiler_status = (
+ bool(int(m.group(1))),
+ bool(int(m.group(2))),
+ bool(int(m.group(3))),
+ )
+ # Finished
+ return _compiler_status
+
+
+def has_c_compiler():
+ return _get_compiler_status()[0]
+
+
+def has_f77_compiler():
+ return _get_compiler_status()[1]
+
+
+def has_f90_compiler():
+ return _get_compiler_status()[2]
+
+
+#
+# Building with distutils
+#
+
+
+@_memoize
+def build_module_distutils(source_files, config_code, module_name, **kw):
+ """
+ Build a module via distutils and import it.
+
+ """
+ d = get_module_dir()
+
+ # Copy files
+ dst_sources = []
+ for fn in source_files:
+ if not os.path.isfile(fn):
+ raise RuntimeError("%s is not a file" % fn)
+ dst = os.path.join(d, os.path.basename(fn))
+ shutil.copyfile(fn, dst)
+ dst_sources.append(dst)
+
+ # Build script
+ config_code = textwrap.dedent(config_code).replace("\n", "\n ")
+
+ code = fr"""
+import os
+import sys
+sys.path = {repr(sys.path)}
+
+def configuration(parent_name='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('', parent_name, top_path)
+ {config_code}
+ return config
+
+if __name__ == "__main__":
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
+ """
+ script = os.path.join(d, get_temp_module_name() + ".py")
+ dst_sources.append(script)
+ with open(script, "wb") as f:
+ f.write(asbytes(code))
+
+ # Build
+ cwd = os.getcwd()
+ try:
+ os.chdir(d)
+ cmd = [sys.executable, script, "build_ext", "-i"]
+ p = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ out, err = p.communicate()
+ if p.returncode != 0:
+ raise RuntimeError("Running distutils build failed: %s\n%s" %
+ (cmd[4:], asstr(out)))
+ finally:
+ os.chdir(cwd)
+
+ # Partial cleanup
+ for fn in dst_sources:
+ os.unlink(fn)
+
+ # Import
+ __import__(module_name)
+ return sys.modules[module_name]
+
+
+#
+# Unittest convenience
+#
+
+
+class F2PyTest:
+ code = None
+ sources = None
+ options = []
+ skip = []
+ only = []
+ suffix = ".f"
+ module = None
+
+ @property
+ def module_name(self):
+ cls = type(self)
+ return f'_{cls.__module__.rsplit(".",1)[-1]}_{cls.__name__}_ext_module'
+
+ def setup_method(self):
+ if sys.platform == "win32":
+ pytest.skip("Fails with MinGW64 Gfortran (Issue #9673)")
+
+ if self.module is not None:
+ return
+
+ # Check compiler availability first
+ if not has_c_compiler():
+ pytest.skip("No C compiler available")
+
+ codes = []
+ if self.sources:
+ codes.extend(self.sources)
+ if self.code is not None:
+ codes.append(self.suffix)
+
+ needs_f77 = False
+ needs_f90 = False
+ needs_pyf = False
+ for fn in codes:
+ if str(fn).endswith(".f"):
+ needs_f77 = True
+ elif str(fn).endswith(".f90"):
+ needs_f90 = True
+ elif str(fn).endswith(".pyf"):
+ needs_pyf = True
+ if needs_f77 and not has_f77_compiler():
+ pytest.skip("No Fortran 77 compiler available")
+ if needs_f90 and not has_f90_compiler():
+ pytest.skip("No Fortran 90 compiler available")
+ if needs_pyf and not (has_f90_compiler() or has_f77_compiler()):
+ pytest.skip("No Fortran compiler available")
+
+ # Build the module
+ if self.code is not None:
+ self.module = build_code(
+ self.code,
+ options=self.options,
+ skip=self.skip,
+ only=self.only,
+ suffix=self.suffix,
+ module_name=self.module_name,
+ )
+
+ if self.sources is not None:
+ self.module = build_module(
+ self.sources,
+ options=self.options,
+ skip=self.skip,
+ only=self.only,
+ module_name=self.module_name,
+ )
+
+
+#
+# Helper functions
+#
+
+
+def getpath(*a):
+ # Package root
+ d = Path(numpy.f2py.__file__).parent.resolve()
+ return d.joinpath(*a)
+
+
+@contextlib.contextmanager
+def switchdir(path):
+ curpath = Path.cwd()
+ os.chdir(path)
+ try:
+ yield
+ finally:
+ os.chdir(curpath)
diff --git a/venv/lib/python3.9/site-packages/numpy/f2py/use_rules.py b/venv/lib/python3.9/site-packages/numpy/f2py/use_rules.py
new file mode 100644
index 00000000..f1b71e83
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/f2py/use_rules.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python3
+"""
+
+Build 'use others module data' mechanism for f2py2e.
+
+Unfinished.
+
+Copyright 2000 Pearu Peterson all rights reserved,
+Pearu Peterson <pearu@ioc.ee>
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
+$Date: 2000/09/10 12:35:43 $
+Pearu Peterson
+
+"""
+__version__ = "$Revision: 1.3 $"[10:-1]
+
+f2py_version = 'See `f2py -v`'
+
+
+from .auxfuncs import (
+ applyrules, dictappend, gentitle, hasnote, outmess
+)
+
+
+usemodule_rules = {
+ 'body': """
+#begintitle#
+static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\
+\t #name# = get_#name#()\\n\\
+Arguments:\\n\\
+#docstr#\";
+extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#);
+static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) {
+/*#decl#*/
+\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail;
+printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#));
+\treturn Py_BuildValue(\"\");
+capi_fail:
+\treturn NULL;
+}
+""",
+ 'method': '\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},',
+ 'need': ['F_MODFUNC']
+}
+
+################
+
+
+def buildusevars(m, r):
+ ret = {}
+ outmess(
+ '\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n' % (m['name']))
+ varsmap = {}
+ revmap = {}
+ if 'map' in r:
+ for k in r['map'].keys():
+ if r['map'][k] in revmap:
+ outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % (
+ r['map'][k], k, revmap[r['map'][k]]))
+ else:
+ revmap[r['map'][k]] = k
+ if 'only' in r and r['only']:
+ for v in r['map'].keys():
+ if r['map'][v] in m['vars']:
+
+ if revmap[r['map'][v]] == v:
+ varsmap[v] = r['map'][v]
+ else:
+ outmess('\t\t\tIgnoring map "%s=>%s". See above.\n' %
+ (v, r['map'][v]))
+ else:
+ outmess(
+ '\t\t\tNo definition for variable "%s=>%s". Skipping.\n' % (v, r['map'][v]))
+ else:
+ for v in m['vars'].keys():
+ if v in revmap:
+ varsmap[v] = revmap[v]
+ else:
+ varsmap[v] = v
+ for v in varsmap.keys():
+ ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name']))
+ return ret
+
+
+def buildusevar(name, realname, vars, usemodulename):
+ outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % (
+ name, realname))
+ ret = {}
+ vrd = {'name': name,
+ 'realname': realname,
+ 'REALNAME': realname.upper(),
+ 'usemodulename': usemodulename,
+ 'USEMODULENAME': usemodulename.upper(),
+ 'texname': name.replace('_', '\\_'),
+ 'begintitle': gentitle('%s=>%s' % (name, realname)),
+ 'endtitle': gentitle('end of %s=>%s' % (name, realname)),
+ 'apiname': '#modulename#_use_%s_from_%s' % (realname, usemodulename)
+ }
+ nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv',
+ 5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'}
+ vrd['texnamename'] = name
+ for i in nummap.keys():
+ vrd['texnamename'] = vrd['texnamename'].replace(repr(i), nummap[i])
+ if hasnote(vars[realname]):
+ vrd['note'] = vars[realname]['note']
+ rd = dictappend({}, vrd)
+
+ print(name, realname, vars[realname])
+ ret = applyrules(usemodule_rules, rd)
+ return ret
diff --git a/venv/lib/python3.9/site-packages/numpy/fft/__init__.py b/venv/lib/python3.9/site-packages/numpy/fft/__init__.py
new file mode 100644
index 00000000..fd5e4758
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/fft/__init__.py
@@ -0,0 +1,212 @@
+"""
+Discrete Fourier Transform (:mod:`numpy.fft`)
+=============================================
+
+.. currentmodule:: numpy.fft
+
+The SciPy module `scipy.fft` is a more comprehensive superset
+of ``numpy.fft``, which includes only a basic set of routines.
+
+Standard FFTs
+-------------
+
+.. autosummary::
+ :toctree: generated/
+
+ fft Discrete Fourier transform.
+ ifft Inverse discrete Fourier transform.
+ fft2 Discrete Fourier transform in two dimensions.
+ ifft2 Inverse discrete Fourier transform in two dimensions.
+ fftn Discrete Fourier transform in N-dimensions.
+ ifftn Inverse discrete Fourier transform in N dimensions.
+
+Real FFTs
+---------
+
+.. autosummary::
+ :toctree: generated/
+
+ rfft Real discrete Fourier transform.
+ irfft Inverse real discrete Fourier transform.
+ rfft2 Real discrete Fourier transform in two dimensions.
+ irfft2 Inverse real discrete Fourier transform in two dimensions.
+ rfftn Real discrete Fourier transform in N dimensions.
+ irfftn Inverse real discrete Fourier transform in N dimensions.
+
+Hermitian FFTs
+--------------
+
+.. autosummary::
+ :toctree: generated/
+
+ hfft Hermitian discrete Fourier transform.
+ ihfft Inverse Hermitian discrete Fourier transform.
+
+Helper routines
+---------------
+
+.. autosummary::
+ :toctree: generated/
+
+ fftfreq Discrete Fourier Transform sample frequencies.
+ rfftfreq DFT sample frequencies (for usage with rfft, irfft).
+ fftshift Shift zero-frequency component to center of spectrum.
+ ifftshift Inverse of fftshift.
+
+
+Background information
+----------------------
+
+Fourier analysis is fundamentally a method for expressing a function as a
+sum of periodic components, and for recovering the function from those
+components. When both the function and its Fourier transform are
+replaced with discretized counterparts, it is called the discrete Fourier
+transform (DFT). The DFT has become a mainstay of numerical computing in
+part because of a very fast algorithm for computing it, called the Fast
+Fourier Transform (FFT), which was known to Gauss (1805) and was brought
+to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
+provide an accessible introduction to Fourier analysis and its
+applications.
+
+Because the discrete Fourier transform separates its input into
+components that contribute at discrete frequencies, it has a great number
+of applications in digital signal processing, e.g., for filtering, and in
+this context the discretized input to the transform is customarily
+referred to as a *signal*, which exists in the *time domain*. The output
+is called a *spectrum* or *transform* and exists in the *frequency
+domain*.
+
+Implementation details
+----------------------
+
+There are many ways to define the DFT, varying in the sign of the
+exponent, normalization, etc. In this implementation, the DFT is defined
+as
+
+.. math::
+ A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
+ \\qquad k = 0,\\ldots,n-1.
+
+The DFT is in general defined for complex inputs and outputs, and a
+single-frequency component at linear frequency :math:`f` is
+represented by a complex exponential
+:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
+is the sampling interval.
+
+The values in the result follow so-called "standard" order: If ``A =
+fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of
+the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
+contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
+negative-frequency terms, in order of decreasingly negative frequency.
+For an even number of input points, ``A[n/2]`` represents both positive and
+negative Nyquist frequency, and is also purely real for real input. For
+an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
+frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
+The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
+of corresponding elements in the output. The routine
+``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
+zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
+that shift.
+
+When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
+is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
+The phase spectrum is obtained by ``np.angle(A)``.
+
+The inverse DFT is defined as
+
+.. math::
+ a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
+ \\qquad m = 0,\\ldots,n-1.
+
+It differs from the forward transform by the sign of the exponential
+argument and the default normalization by :math:`1/n`.
+
+Type Promotion
+--------------
+
+`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and
+``complex128`` arrays respectively. For an FFT implementation that does not
+promote input arrays, see `scipy.fftpack`.
+
+Normalization
+-------------
+
+The argument ``norm`` indicates which direction of the pair of direct/inverse
+transforms is scaled and with what normalization factor.
+The default normalization (``"backward"``) has the direct (forward) transforms
+unscaled and the inverse (backward) transforms scaled by :math:`1/n`. It is
+possible to obtain unitary transforms by setting the keyword argument ``norm``
+to ``"ortho"`` so that both direct and inverse transforms are scaled by
+:math:`1/\\sqrt{n}`. Finally, setting the keyword argument ``norm`` to
+``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse
+transforms unscaled (i.e. exactly opposite to the default ``"backward"``).
+`None` is an alias of the default option ``"backward"`` for backward
+compatibility.
+
+Real and Hermitian transforms
+-----------------------------
+
+When the input is purely real, its transform is Hermitian, i.e., the
+component at frequency :math:`f_k` is the complex conjugate of the
+component at frequency :math:`-f_k`, which means that for real
+inputs there is no information in the negative frequency components that
+is not already available from the positive frequency components.
+The family of `rfft` functions is
+designed to operate on real inputs, and exploits this symmetry by
+computing only the positive frequency components, up to and including the
+Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
+output points. The inverses of this family assumes the same symmetry of
+its input, and for an output of ``n`` points uses ``n/2+1`` input points.
+
+Correspondingly, when the spectrum is purely real, the signal is
+Hermitian. The `hfft` family of functions exploits this symmetry by
+using ``n/2+1`` complex points in the input (time) domain for ``n`` real
+points in the frequency domain.
+
+In higher dimensions, FFTs are used, e.g., for image analysis and
+filtering. The computational efficiency of the FFT means that it can
+also be a faster way to compute large convolutions, using the property
+that a convolution in the time domain is equivalent to a point-by-point
+multiplication in the frequency domain.
+
+Higher dimensions
+-----------------
+
+In two dimensions, the DFT is defined as
+
+.. math::
+ A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
+ a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
+ \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
+
+which extends in the obvious way to higher dimensions, and the inverses
+in higher dimensions also extend in the same way.
+
+References
+----------
+
+.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
+ machine calculation of complex Fourier series," *Math. Comput.*
+ 19: 297-301.
+
+.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
+ 2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
+ 12-13. Cambridge Univ. Press, Cambridge, UK.
+
+Examples
+--------
+
+For examples, see the various functions.
+
+"""
+
+from . import _pocketfft, helper
+from ._pocketfft import *
+from .helper import *
+
+__all__ = _pocketfft.__all__.copy()
+__all__ += helper.__all__
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/fft/__init__.pyi b/venv/lib/python3.9/site-packages/numpy/fft/__init__.pyi
new file mode 100644
index 00000000..5518aac1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/fft/__init__.pyi
@@ -0,0 +1,29 @@
+from numpy._pytesttester import PytestTester
+
+from numpy.fft._pocketfft import (
+ fft as fft,
+ ifft as ifft,
+ rfft as rfft,
+ irfft as irfft,
+ hfft as hfft,
+ ihfft as ihfft,
+ rfftn as rfftn,
+ irfftn as irfftn,
+ rfft2 as rfft2,
+ irfft2 as irfft2,
+ fft2 as fft2,
+ ifft2 as ifft2,
+ fftn as fftn,
+ ifftn as ifftn,
+)
+
+from numpy.fft.helper import (
+ fftshift as fftshift,
+ ifftshift as ifftshift,
+ fftfreq as fftfreq,
+ rfftfreq as rfftfreq,
+)
+
+__all__: list[str]
+__path__: list[str]
+test: PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/fft/_pocketfft.py b/venv/lib/python3.9/site-packages/numpy/fft/_pocketfft.py
new file mode 100644
index 00000000..ad69f7c8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/fft/_pocketfft.py
@@ -0,0 +1,1424 @@
+"""
+Discrete Fourier Transforms
+
+Routines in this module:
+
+fft(a, n=None, axis=-1, norm="backward")
+ifft(a, n=None, axis=-1, norm="backward")
+rfft(a, n=None, axis=-1, norm="backward")
+irfft(a, n=None, axis=-1, norm="backward")
+hfft(a, n=None, axis=-1, norm="backward")
+ihfft(a, n=None, axis=-1, norm="backward")
+fftn(a, s=None, axes=None, norm="backward")
+ifftn(a, s=None, axes=None, norm="backward")
+rfftn(a, s=None, axes=None, norm="backward")
+irfftn(a, s=None, axes=None, norm="backward")
+fft2(a, s=None, axes=(-2,-1), norm="backward")
+ifft2(a, s=None, axes=(-2, -1), norm="backward")
+rfft2(a, s=None, axes=(-2,-1), norm="backward")
+irfft2(a, s=None, axes=(-2, -1), norm="backward")
+
+i = inverse transform
+r = transform of purely real data
+h = Hermite transform
+n = n-dimensional transform
+2 = 2-dimensional transform
+(Note: 2D routines are just nD routines with different default
+behavior.)
+
+"""
+__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
+ 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
+
+import functools
+
+from numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt
+from . import _pocketfft_internal as pfi
+from numpy.core.multiarray import normalize_axis_index
+from numpy.core import overrides
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy.fft')
+
+
+# `inv_norm` is a float by which the result of the transform needs to be
+# divided. This replaces the original, more intuitive 'fct` parameter to avoid
+# divisions by zero (or alternatively additional checks) in the case of
+# zero-length axes during its computation.
+def _raw_fft(a, n, axis, is_real, is_forward, inv_norm):
+ axis = normalize_axis_index(axis, a.ndim)
+ if n is None:
+ n = a.shape[axis]
+
+ fct = 1/inv_norm
+
+ if a.shape[axis] != n:
+ s = list(a.shape)
+ index = [slice(None)]*len(s)
+ if s[axis] > n:
+ index[axis] = slice(0, n)
+ a = a[tuple(index)]
+ else:
+ index[axis] = slice(0, s[axis])
+ s[axis] = n
+ z = zeros(s, a.dtype.char)
+ z[tuple(index)] = a
+ a = z
+
+ if axis == a.ndim-1:
+ r = pfi.execute(a, is_real, is_forward, fct)
+ else:
+ a = swapaxes(a, axis, -1)
+ r = pfi.execute(a, is_real, is_forward, fct)
+ r = swapaxes(r, axis, -1)
+ return r
+
+
+def _get_forward_norm(n, norm):
+ if n < 1:
+ raise ValueError(f"Invalid number of FFT data points ({n}) specified.")
+
+ if norm is None or norm == "backward":
+ return 1
+ elif norm == "ortho":
+ return sqrt(n)
+ elif norm == "forward":
+ return n
+ raise ValueError(f'Invalid norm value {norm}; should be "backward",'
+ '"ortho" or "forward".')
+
+
+def _get_backward_norm(n, norm):
+ if n < 1:
+ raise ValueError(f"Invalid number of FFT data points ({n}) specified.")
+
+ if norm is None or norm == "backward":
+ return n
+ elif norm == "ortho":
+ return sqrt(n)
+ elif norm == "forward":
+ return 1
+ raise ValueError(f'Invalid norm value {norm}; should be "backward", '
+ '"ortho" or "forward".')
+
+
+_SWAP_DIRECTION_MAP = {"backward": "forward", None: "forward",
+ "ortho": "ortho", "forward": "backward"}
+
+
+def _swap_direction(norm):
+ try:
+ return _SWAP_DIRECTION_MAP[norm]
+ except KeyError:
+ raise ValueError(f'Invalid norm value {norm}; should be "backward", '
+ '"ortho" or "forward".') from None
+
+
+def _fft_dispatcher(a, n=None, axis=None, norm=None):
+ return (a,)
+
+
+@array_function_dispatch(_fft_dispatcher)
+def fft(a, n=None, axis=-1, norm=None):
+ """
+ Compute the one-dimensional discrete Fourier Transform.
+
+ This function computes the one-dimensional *n*-point discrete Fourier
+ Transform (DFT) with the efficient Fast Fourier Transform (FFT)
+ algorithm [CT].
+
+ Parameters
+ ----------
+ a : array_like
+ Input array, can be complex.
+ n : int, optional
+ Length of the transformed axis of the output.
+ If `n` is smaller than the length of the input, the input is cropped.
+ If it is larger, the input is padded with zeros. If `n` is not given,
+ the length of the input along the axis specified by `axis` is used.
+ axis : int, optional
+ Axis over which to compute the FFT. If not given, the last axis is
+ used.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axis
+ indicated by `axis`, or the last one if `axis` is not specified.
+
+ Raises
+ ------
+ IndexError
+ If `axis` is not a valid axis of `a`.
+
+ See Also
+ --------
+ numpy.fft : for definition of the DFT and conventions used.
+ ifft : The inverse of `fft`.
+ fft2 : The two-dimensional FFT.
+ fftn : The *n*-dimensional FFT.
+ rfftn : The *n*-dimensional FFT of real input.
+ fftfreq : Frequency bins for given FFT parameters.
+
+ Notes
+ -----
+ FFT (Fast Fourier Transform) refers to a way the discrete Fourier
+ Transform (DFT) can be calculated efficiently, by using symmetries in the
+ calculated terms. The symmetry is highest when `n` is a power of 2, and
+ the transform is therefore most efficient for these sizes.
+
+ The DFT is defined, with the conventions used in this implementation, in
+ the documentation for the `numpy.fft` module.
+
+ References
+ ----------
+ .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
+ machine calculation of complex Fourier series," *Math. Comput.*
+ 19: 297-301.
+
+ Examples
+ --------
+ >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
+ array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j,
+ 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j,
+ -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j,
+ 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j])
+
+ In this example, real input has an FFT which is Hermitian, i.e., symmetric
+ in the real part and anti-symmetric in the imaginary part, as described in
+ the `numpy.fft` documentation:
+
+ >>> import matplotlib.pyplot as plt
+ >>> t = np.arange(256)
+ >>> sp = np.fft.fft(np.sin(t))
+ >>> freq = np.fft.fftfreq(t.shape[-1])
+ >>> plt.plot(freq, sp.real, freq, sp.imag)
+ [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.show()
+
+ """
+ a = asarray(a)
+ if n is None:
+ n = a.shape[axis]
+ inv_norm = _get_forward_norm(n, norm)
+ output = _raw_fft(a, n, axis, False, True, inv_norm)
+ return output
+
+
+@array_function_dispatch(_fft_dispatcher)
+def ifft(a, n=None, axis=-1, norm=None):
+ """
+ Compute the one-dimensional inverse discrete Fourier Transform.
+
+ This function computes the inverse of the one-dimensional *n*-point
+ discrete Fourier transform computed by `fft`. In other words,
+ ``ifft(fft(a)) == a`` to within numerical accuracy.
+ For a general description of the algorithm and definitions,
+ see `numpy.fft`.
+
+ The input should be ordered in the same way as is returned by `fft`,
+ i.e.,
+
+ * ``a[0]`` should contain the zero frequency term,
+ * ``a[1:n//2]`` should contain the positive-frequency terms,
+ * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
+ increasing order starting from the most negative frequency.
+
+ For an even number of input points, ``A[n//2]`` represents the sum of
+ the values at the positive and negative Nyquist frequencies, as the two
+ are aliased together. See `numpy.fft` for details.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array, can be complex.
+ n : int, optional
+ Length of the transformed axis of the output.
+ If `n` is smaller than the length of the input, the input is cropped.
+ If it is larger, the input is padded with zeros. If `n` is not given,
+ the length of the input along the axis specified by `axis` is used.
+ See notes about padding issues.
+ axis : int, optional
+ Axis over which to compute the inverse DFT. If not given, the last
+ axis is used.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axis
+ indicated by `axis`, or the last one if `axis` is not specified.
+
+ Raises
+ ------
+ IndexError
+ If `axis` is not a valid axis of `a`.
+
+ See Also
+ --------
+ numpy.fft : An introduction, with definitions and general explanations.
+ fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
+ ifft2 : The two-dimensional inverse FFT.
+ ifftn : The n-dimensional inverse FFT.
+
+ Notes
+ -----
+ If the input parameter `n` is larger than the size of the input, the input
+ is padded by appending zeros at the end. Even though this is the common
+ approach, it might lead to surprising results. If a different padding is
+ desired, it must be performed before calling `ifft`.
+
+ Examples
+ --------
+ >>> np.fft.ifft([0, 4, 0, 0])
+ array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary
+
+ Create and plot a band-limited signal with random phases:
+
+ >>> import matplotlib.pyplot as plt
+ >>> t = np.arange(400)
+ >>> n = np.zeros((400,), dtype=complex)
+ >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
+ >>> s = np.fft.ifft(n)
+ >>> plt.plot(t, s.real, label='real')
+ [<matplotlib.lines.Line2D object at ...>]
+ >>> plt.plot(t, s.imag, '--', label='imaginary')
+ [<matplotlib.lines.Line2D object at ...>]
+ >>> plt.legend()
+ <matplotlib.legend.Legend object at ...>
+ >>> plt.show()
+
+ """
+ a = asarray(a)
+ if n is None:
+ n = a.shape[axis]
+ inv_norm = _get_backward_norm(n, norm)
+ output = _raw_fft(a, n, axis, False, False, inv_norm)
+ return output
+
+
+@array_function_dispatch(_fft_dispatcher)
+def rfft(a, n=None, axis=-1, norm=None):
+ """
+ Compute the one-dimensional discrete Fourier Transform for real input.
+
+ This function computes the one-dimensional *n*-point discrete Fourier
+ Transform (DFT) of a real-valued array by means of an efficient algorithm
+ called the Fast Fourier Transform (FFT).
+
+ Parameters
+ ----------
+ a : array_like
+ Input array
+ n : int, optional
+ Number of points along transformation axis in the input to use.
+ If `n` is smaller than the length of the input, the input is cropped.
+ If it is larger, the input is padded with zeros. If `n` is not given,
+ the length of the input along the axis specified by `axis` is used.
+ axis : int, optional
+ Axis over which to compute the FFT. If not given, the last axis is
+ used.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axis
+ indicated by `axis`, or the last one if `axis` is not specified.
+ If `n` is even, the length of the transformed axis is ``(n/2)+1``.
+ If `n` is odd, the length is ``(n+1)/2``.
+
+ Raises
+ ------
+ IndexError
+ If `axis` is not a valid axis of `a`.
+
+ See Also
+ --------
+ numpy.fft : For definition of the DFT and conventions used.
+ irfft : The inverse of `rfft`.
+ fft : The one-dimensional FFT of general (complex) input.
+ fftn : The *n*-dimensional FFT.
+ rfftn : The *n*-dimensional FFT of real input.
+
+ Notes
+ -----
+ When the DFT is computed for purely real input, the output is
+ Hermitian-symmetric, i.e. the negative frequency terms are just the complex
+ conjugates of the corresponding positive-frequency terms, and the
+ negative-frequency terms are therefore redundant. This function does not
+ compute the negative frequency terms, and the length of the transformed
+ axis of the output is therefore ``n//2 + 1``.
+
+ When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
+ the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
+
+ If `n` is even, ``A[-1]`` contains the term representing both positive
+ and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
+ real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
+ the largest positive frequency (fs/2*(n-1)/n), and is complex in the
+ general case.
+
+ If the input `a` contains an imaginary part, it is silently discarded.
+
+ Examples
+ --------
+ >>> np.fft.fft([0, 1, 0, 0])
+ array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary
+ >>> np.fft.rfft([0, 1, 0, 0])
+ array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary
+
+ Notice how the final element of the `fft` output is the complex conjugate
+ of the second element, for real input. For `rfft`, this symmetry is
+ exploited to compute only the non-negative frequency terms.
+
+ """
+ a = asarray(a)
+ if n is None:
+ n = a.shape[axis]
+ inv_norm = _get_forward_norm(n, norm)
+ output = _raw_fft(a, n, axis, True, True, inv_norm)
+ return output
+
+
+@array_function_dispatch(_fft_dispatcher)
+def irfft(a, n=None, axis=-1, norm=None):
+ """
+ Computes the inverse of `rfft`.
+
+ This function computes the inverse of the one-dimensional *n*-point
+ discrete Fourier Transform of real input computed by `rfft`.
+ In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
+ accuracy. (See Notes below for why ``len(a)`` is necessary here.)
+
+ The input is expected to be in the form returned by `rfft`, i.e. the
+ real zero-frequency term followed by the complex positive frequency terms
+ in order of increasing frequency. Since the discrete Fourier Transform of
+ real input is Hermitian-symmetric, the negative frequency terms are taken
+ to be the complex conjugates of the corresponding positive frequency terms.
+
+ Parameters
+ ----------
+ a : array_like
+ The input array.
+ n : int, optional
+ Length of the transformed axis of the output.
+ For `n` output points, ``n//2+1`` input points are necessary. If the
+ input is longer than this, it is cropped. If it is shorter than this,
+ it is padded with zeros. If `n` is not given, it is taken to be
+ ``2*(m-1)`` where ``m`` is the length of the input along the axis
+ specified by `axis`.
+ axis : int, optional
+ Axis over which to compute the inverse FFT. If not given, the last
+ axis is used.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : ndarray
+ The truncated or zero-padded input, transformed along the axis
+ indicated by `axis`, or the last one if `axis` is not specified.
+ The length of the transformed axis is `n`, or, if `n` is not given,
+ ``2*(m-1)`` where ``m`` is the length of the transformed axis of the
+ input. To get an odd number of output points, `n` must be specified.
+
+ Raises
+ ------
+ IndexError
+ If `axis` is not a valid axis of `a`.
+
+ See Also
+ --------
+ numpy.fft : For definition of the DFT and conventions used.
+ rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
+ fft : The one-dimensional FFT.
+ irfft2 : The inverse of the two-dimensional FFT of real input.
+ irfftn : The inverse of the *n*-dimensional FFT of real input.
+
+ Notes
+ -----
+ Returns the real valued `n`-point inverse discrete Fourier transform
+ of `a`, where `a` contains the non-negative frequency terms of a
+ Hermitian-symmetric sequence. `n` is the length of the result, not the
+ input.
+
+ If you specify an `n` such that `a` must be zero-padded or truncated, the
+ extra/removed values will be added/removed at high frequencies. One can
+ thus resample a series to `m` points via Fourier interpolation by:
+ ``a_resamp = irfft(rfft(a), m)``.
+
+ The correct interpretation of the hermitian input depends on the length of
+ the original data, as given by `n`. This is because each input shape could
+ correspond to either an odd or even length signal. By default, `irfft`
+ assumes an even output length which puts the last entry at the Nyquist
+ frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
+ the value is thus treated as purely real. To avoid losing information, the
+ correct length of the real input **must** be given.
+
+ Examples
+ --------
+ >>> np.fft.ifft([1, -1j, -1, 1j])
+ array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary
+ >>> np.fft.irfft([1, -1j, -1])
+ array([0., 1., 0., 0.])
+
+ Notice how the last term in the input to the ordinary `ifft` is the
+ complex conjugate of the second term, and the output has zero imaginary
+ part everywhere. When calling `irfft`, the negative frequencies are not
+ specified, and the output array is purely real.
+
+ """
+ a = asarray(a)
+ if n is None:
+ n = (a.shape[axis] - 1) * 2
+ inv_norm = _get_backward_norm(n, norm)
+ output = _raw_fft(a, n, axis, True, False, inv_norm)
+ return output
+
+
+@array_function_dispatch(_fft_dispatcher)
+def hfft(a, n=None, axis=-1, norm=None):
+ """
+ Compute the FFT of a signal that has Hermitian symmetry, i.e., a real
+ spectrum.
+
+ Parameters
+ ----------
+ a : array_like
+ The input array.
+ n : int, optional
+ Length of the transformed axis of the output. For `n` output
+ points, ``n//2 + 1`` input points are necessary. If the input is
+ longer than this, it is cropped. If it is shorter than this, it is
+ padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``
+ where ``m`` is the length of the input along the axis specified by
+ `axis`.
+ axis : int, optional
+ Axis over which to compute the FFT. If not given, the last
+ axis is used.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : ndarray
+ The truncated or zero-padded input, transformed along the axis
+ indicated by `axis`, or the last one if `axis` is not specified.
+ The length of the transformed axis is `n`, or, if `n` is not given,
+ ``2*m - 2`` where ``m`` is the length of the transformed axis of
+ the input. To get an odd number of output points, `n` must be
+ specified, for instance as ``2*m - 1`` in the typical case,
+
+ Raises
+ ------
+ IndexError
+ If `axis` is not a valid axis of `a`.
+
+ See also
+ --------
+ rfft : Compute the one-dimensional FFT for real input.
+ ihfft : The inverse of `hfft`.
+
+ Notes
+ -----
+ `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
+ opposite case: here the signal has Hermitian symmetry in the time
+ domain and is real in the frequency domain. So here it's `hfft` for
+ which you must supply the length of the result if it is to be odd.
+
+ * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error,
+ * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error.
+
+ The correct interpretation of the hermitian input depends on the length of
+ the original data, as given by `n`. This is because each input shape could
+ correspond to either an odd or even length signal. By default, `hfft`
+ assumes an even output length which puts the last entry at the Nyquist
+ frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
+ the value is thus treated as purely real. To avoid losing information, the
+ shape of the full signal **must** be given.
+
+ Examples
+ --------
+ >>> signal = np.array([1, 2, 3, 4, 3, 2])
+ >>> np.fft.fft(signal)
+ array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary
+ >>> np.fft.hfft(signal[:4]) # Input first half of signal
+ array([15., -4., 0., -1., 0., -4.])
+ >>> np.fft.hfft(signal, 6) # Input entire signal and truncate
+ array([15., -4., 0., -1., 0., -4.])
+
+
+ >>> signal = np.array([[1, 1.j], [-1.j, 2]])
+ >>> np.conj(signal.T) - signal # check Hermitian symmetry
+ array([[ 0.-0.j, -0.+0.j], # may vary
+ [ 0.+0.j, 0.-0.j]])
+ >>> freq_spectrum = np.fft.hfft(signal)
+ >>> freq_spectrum
+ array([[ 1., 1.],
+ [ 2., -2.]])
+
+ """
+ a = asarray(a)
+ if n is None:
+ n = (a.shape[axis] - 1) * 2
+ new_norm = _swap_direction(norm)
+ output = irfft(conjugate(a), n, axis, norm=new_norm)
+ return output
+
+
+@array_function_dispatch(_fft_dispatcher)
+def ihfft(a, n=None, axis=-1, norm=None):
+ """
+ Compute the inverse FFT of a signal that has Hermitian symmetry.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ n : int, optional
+ Length of the inverse FFT, the number of points along
+ transformation axis in the input to use. If `n` is smaller than
+ the length of the input, the input is cropped. If it is larger,
+ the input is padded with zeros. If `n` is not given, the length of
+ the input along the axis specified by `axis` is used.
+ axis : int, optional
+ Axis over which to compute the inverse FFT. If not given, the last
+ axis is used.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axis
+ indicated by `axis`, or the last one if `axis` is not specified.
+ The length of the transformed axis is ``n//2 + 1``.
+
+ See also
+ --------
+ hfft, irfft
+
+ Notes
+ -----
+ `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
+ opposite case: here the signal has Hermitian symmetry in the time
+ domain and is real in the frequency domain. So here it's `hfft` for
+ which you must supply the length of the result if it is to be odd:
+
+ * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error,
+ * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error.
+
+ Examples
+ --------
+ >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
+ >>> np.fft.ifft(spectrum)
+ array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary
+ >>> np.fft.ihfft(spectrum)
+ array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary
+
+ """
+ a = asarray(a)
+ if n is None:
+ n = a.shape[axis]
+ new_norm = _swap_direction(norm)
+ output = conjugate(rfft(a, n, axis, norm=new_norm))
+ return output
+
+
+def _cook_nd_args(a, s=None, axes=None, invreal=0):
+ if s is None:
+ shapeless = 1
+ if axes is None:
+ s = list(a.shape)
+ else:
+ s = take(a.shape, axes)
+ else:
+ shapeless = 0
+ s = list(s)
+ if axes is None:
+ axes = list(range(-len(s), 0))
+ if len(s) != len(axes):
+ raise ValueError("Shape and axes have different lengths.")
+ if invreal and shapeless:
+ s[-1] = (a.shape[axes[-1]] - 1) * 2
+ return s, axes
+
+
+def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
+ a = asarray(a)
+ s, axes = _cook_nd_args(a, s, axes)
+ itl = list(range(len(axes)))
+ itl.reverse()
+ for ii in itl:
+ a = function(a, n=s[ii], axis=axes[ii], norm=norm)
+ return a
+
+
+def _fftn_dispatcher(a, s=None, axes=None, norm=None):
+ return (a,)
+
+
+@array_function_dispatch(_fftn_dispatcher)
+def fftn(a, s=None, axes=None, norm=None):
+ """
+ Compute the N-dimensional discrete Fourier Transform.
+
+ This function computes the *N*-dimensional discrete Fourier Transform over
+ any number of axes in an *M*-dimensional array by means of the Fast Fourier
+ Transform (FFT).
+
+ Parameters
+ ----------
+ a : array_like
+ Input array, can be complex.
+ s : sequence of ints, optional
+ Shape (length of each transformed axis) of the output
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+ This corresponds to ``n`` for ``fft(x, n)``.
+ Along any axis, if the given shape is smaller than that of the input,
+ the input is cropped. If it is larger, the input is padded with zeros.
+ if `s` is not given, the shape of the input along the axes specified
+ by `axes` is used.
+ axes : sequence of ints, optional
+ Axes over which to compute the FFT. If not given, the last ``len(s)``
+ axes are used, or all axes if `s` is also not specified.
+ Repeated indices in `axes` means that the transform over that axis is
+ performed multiple times.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or by a combination of `s` and `a`,
+ as explained in the parameters section above.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `a`.
+
+ See Also
+ --------
+ numpy.fft : Overall view of discrete Fourier transforms, with definitions
+ and conventions used.
+ ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
+ fft : The one-dimensional FFT, with definitions and conventions used.
+ rfftn : The *n*-dimensional FFT of real input.
+ fft2 : The two-dimensional FFT.
+ fftshift : Shifts zero-frequency terms to centre of array
+
+ Notes
+ -----
+ The output, analogously to `fft`, contains the term for zero frequency in
+ the low-order corner of all axes, the positive frequency terms in the
+ first half of all axes, the term for the Nyquist frequency in the middle
+ of all axes and the negative frequency terms in the second half of all
+ axes, in order of decreasingly negative frequency.
+
+ See `numpy.fft` for details, definitions and conventions used.
+
+ Examples
+ --------
+ >>> a = np.mgrid[:3, :3, :3][0]
+ >>> np.fft.fftn(a, axes=(1, 2))
+ array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary
+ [ 0.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j]],
+ [[ 9.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j]],
+ [[18.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j]]])
+ >>> np.fft.fftn(a, (2, 2), axes=(0, 1))
+ array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary
+ [ 0.+0.j, 0.+0.j, 0.+0.j]],
+ [[-2.+0.j, -2.+0.j, -2.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j]]])
+
+ >>> import matplotlib.pyplot as plt
+ >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
+ ... 2 * np.pi * np.arange(200) / 34)
+ >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
+ >>> FS = np.fft.fftn(S)
+ >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
+ <matplotlib.image.AxesImage object at 0x...>
+ >>> plt.show()
+
+ """
+ return _raw_fftnd(a, s, axes, fft, norm)
+
+
+@array_function_dispatch(_fftn_dispatcher)
+def ifftn(a, s=None, axes=None, norm=None):
+ """
+ Compute the N-dimensional inverse discrete Fourier Transform.
+
+ This function computes the inverse of the N-dimensional discrete
+ Fourier Transform over any number of axes in an M-dimensional array by
+ means of the Fast Fourier Transform (FFT). In other words,
+ ``ifftn(fftn(a)) == a`` to within numerical accuracy.
+ For a description of the definitions and conventions used, see `numpy.fft`.
+
+ The input, analogously to `ifft`, should be ordered in the same way as is
+ returned by `fftn`, i.e. it should have the term for zero frequency
+ in all axes in the low-order corner, the positive frequency terms in the
+ first half of all axes, the term for the Nyquist frequency in the middle
+ of all axes and the negative frequency terms in the second half of all
+ axes, in order of decreasingly negative frequency.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array, can be complex.
+ s : sequence of ints, optional
+ Shape (length of each transformed axis) of the output
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+ This corresponds to ``n`` for ``ifft(x, n)``.
+ Along any axis, if the given shape is smaller than that of the input,
+ the input is cropped. If it is larger, the input is padded with zeros.
+ if `s` is not given, the shape of the input along the axes specified
+ by `axes` is used. See notes for issue on `ifft` zero padding.
+ axes : sequence of ints, optional
+ Axes over which to compute the IFFT. If not given, the last ``len(s)``
+ axes are used, or all axes if `s` is also not specified.
+ Repeated indices in `axes` means that the inverse transform over that
+ axis is performed multiple times.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or by a combination of `s` or `a`,
+ as explained in the parameters section above.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `a`.
+
+ See Also
+ --------
+ numpy.fft : Overall view of discrete Fourier transforms, with definitions
+ and conventions used.
+ fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
+ ifft : The one-dimensional inverse FFT.
+ ifft2 : The two-dimensional inverse FFT.
+ ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
+ of array.
+
+ Notes
+ -----
+ See `numpy.fft` for definitions and conventions used.
+
+ Zero-padding, analogously with `ifft`, is performed by appending zeros to
+ the input along the specified dimension. Although this is the common
+ approach, it might lead to surprising results. If another form of zero
+ padding is desired, it must be performed before `ifftn` is called.
+
+ Examples
+ --------
+ >>> a = np.eye(4)
+ >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
+ array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
+ [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
+
+
+ Create and plot an image with band-limited frequency content:
+
+ >>> import matplotlib.pyplot as plt
+ >>> n = np.zeros((200,200), dtype=complex)
+ >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
+ >>> im = np.fft.ifftn(n).real
+ >>> plt.imshow(im)
+ <matplotlib.image.AxesImage object at 0x...>
+ >>> plt.show()
+
+ """
+ return _raw_fftnd(a, s, axes, ifft, norm)
+
+
+@array_function_dispatch(_fftn_dispatcher)
+def fft2(a, s=None, axes=(-2, -1), norm=None):
+ """
+ Compute the 2-dimensional discrete Fourier Transform.
+
+ This function computes the *n*-dimensional discrete Fourier Transform
+ over any axes in an *M*-dimensional array by means of the
+ Fast Fourier Transform (FFT). By default, the transform is computed over
+ the last two axes of the input array, i.e., a 2-dimensional FFT.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array, can be complex
+ s : sequence of ints, optional
+ Shape (length of each transformed axis) of the output
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+ This corresponds to ``n`` for ``fft(x, n)``.
+ Along each axis, if the given shape is smaller than that of the input,
+ the input is cropped. If it is larger, the input is padded with zeros.
+ if `s` is not given, the shape of the input along the axes specified
+ by `axes` is used.
+ axes : sequence of ints, optional
+ Axes over which to compute the FFT. If not given, the last two
+ axes are used. A repeated index in `axes` means the transform over
+ that axis is performed multiple times. A one-element sequence means
+ that a one-dimensional FFT is performed.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or the last two axes if `axes` is not given.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length, or `axes` not given and
+ ``len(s) != 2``.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `a`.
+
+ See Also
+ --------
+ numpy.fft : Overall view of discrete Fourier transforms, with definitions
+ and conventions used.
+ ifft2 : The inverse two-dimensional FFT.
+ fft : The one-dimensional FFT.
+ fftn : The *n*-dimensional FFT.
+ fftshift : Shifts zero-frequency terms to the center of the array.
+ For two-dimensional input, swaps first and third quadrants, and second
+ and fourth quadrants.
+
+ Notes
+ -----
+ `fft2` is just `fftn` with a different default for `axes`.
+
+ The output, analogously to `fft`, contains the term for zero frequency in
+ the low-order corner of the transformed axes, the positive frequency terms
+ in the first half of these axes, the term for the Nyquist frequency in the
+ middle of the axes and the negative frequency terms in the second half of
+ the axes, in order of decreasingly negative frequency.
+
+ See `fftn` for details and a plotting example, and `numpy.fft` for
+ definitions and conventions used.
+
+
+ Examples
+ --------
+ >>> a = np.mgrid[:5, :5][0]
+ >>> np.fft.fft2(a)
+ array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary
+ 0. +0.j , 0. +0.j ],
+ [-12.5+17.20477401j, 0. +0.j , 0. +0.j ,
+ 0. +0.j , 0. +0.j ],
+ [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ,
+ 0. +0.j , 0. +0.j ],
+ [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ,
+ 0. +0.j , 0. +0.j ],
+ [-12.5-17.20477401j, 0. +0.j , 0. +0.j ,
+ 0. +0.j , 0. +0.j ]])
+
+ """
+ return _raw_fftnd(a, s, axes, fft, norm)
+
+
+@array_function_dispatch(_fftn_dispatcher)
+def ifft2(a, s=None, axes=(-2, -1), norm=None):
+ """
+ Compute the 2-dimensional inverse discrete Fourier Transform.
+
+ This function computes the inverse of the 2-dimensional discrete Fourier
+ Transform over any number of axes in an M-dimensional array by means of
+ the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
+ to within numerical accuracy. By default, the inverse transform is
+ computed over the last two axes of the input array.
+
+ The input, analogously to `ifft`, should be ordered in the same way as is
+ returned by `fft2`, i.e. it should have the term for zero frequency
+ in the low-order corner of the two axes, the positive frequency terms in
+ the first half of these axes, the term for the Nyquist frequency in the
+ middle of the axes and the negative frequency terms in the second half of
+ both axes, in order of decreasingly negative frequency.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array, can be complex.
+ s : sequence of ints, optional
+ Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
+ ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
+ Along each axis, if the given shape is smaller than that of the input,
+ the input is cropped. If it is larger, the input is padded with zeros.
+ if `s` is not given, the shape of the input along the axes specified
+ by `axes` is used. See notes for issue on `ifft` zero padding.
+ axes : sequence of ints, optional
+ Axes over which to compute the FFT. If not given, the last two
+ axes are used. A repeated index in `axes` means the transform over
+ that axis is performed multiple times. A one-element sequence means
+ that a one-dimensional FFT is performed.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or the last two axes if `axes` is not given.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length, or `axes` not given and
+ ``len(s) != 2``.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `a`.
+
+ See Also
+ --------
+ numpy.fft : Overall view of discrete Fourier transforms, with definitions
+ and conventions used.
+ fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
+ ifftn : The inverse of the *n*-dimensional FFT.
+ fft : The one-dimensional FFT.
+ ifft : The one-dimensional inverse FFT.
+
+ Notes
+ -----
+ `ifft2` is just `ifftn` with a different default for `axes`.
+
+ See `ifftn` for details and a plotting example, and `numpy.fft` for
+ definition and conventions used.
+
+ Zero-padding, analogously with `ifft`, is performed by appending zeros to
+ the input along the specified dimension. Although this is the common
+ approach, it might lead to surprising results. If another form of zero
+ padding is desired, it must be performed before `ifft2` is called.
+
+ Examples
+ --------
+ >>> a = 4 * np.eye(4)
+ >>> np.fft.ifft2(a)
+ array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
+ [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
+ [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
+ [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
+
+ """
+ return _raw_fftnd(a, s, axes, ifft, norm)
+
+
+@array_function_dispatch(_fftn_dispatcher)
+def rfftn(a, s=None, axes=None, norm=None):
+ """
+ Compute the N-dimensional discrete Fourier Transform for real input.
+
+ This function computes the N-dimensional discrete Fourier Transform over
+ any number of axes in an M-dimensional real array by means of the Fast
+ Fourier Transform (FFT). By default, all axes are transformed, with the
+ real transform performed over the last axis, while the remaining
+ transforms are complex.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array, taken to be real.
+ s : sequence of ints, optional
+ Shape (length along each transformed axis) to use from the input.
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
+ The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
+ for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
+ Along any axis, if the given shape is smaller than that of the input,
+ the input is cropped. If it is larger, the input is padded with zeros.
+ if `s` is not given, the shape of the input along the axes specified
+ by `axes` is used.
+ axes : sequence of ints, optional
+ Axes over which to compute the FFT. If not given, the last ``len(s)``
+ axes are used, or all axes if `s` is also not specified.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : complex ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or by a combination of `s` and `a`,
+ as explained in the parameters section above.
+ The length of the last axis transformed will be ``s[-1]//2+1``,
+ while the remaining transformed axes will have lengths according to
+ `s`, or unchanged from the input.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `a`.
+
+ See Also
+ --------
+ irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
+ of real input.
+ fft : The one-dimensional FFT, with definitions and conventions used.
+ rfft : The one-dimensional FFT of real input.
+ fftn : The n-dimensional FFT.
+ rfft2 : The two-dimensional FFT of real input.
+
+ Notes
+ -----
+ The transform for real input is performed over the last transformation
+ axis, as by `rfft`, then the transform over the remaining axes is
+ performed as by `fftn`. The order of the output is as for `rfft` for the
+ final transformation axis, and as for `fftn` for the remaining
+ transformation axes.
+
+ See `fft` for details, definitions and conventions used.
+
+ Examples
+ --------
+ >>> a = np.ones((2, 2, 2))
+ >>> np.fft.rfftn(a)
+ array([[[8.+0.j, 0.+0.j], # may vary
+ [0.+0.j, 0.+0.j]],
+ [[0.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j]]])
+
+ >>> np.fft.rfftn(a, axes=(2, 0))
+ array([[[4.+0.j, 0.+0.j], # may vary
+ [4.+0.j, 0.+0.j]],
+ [[0.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j]]])
+
+ """
+ a = asarray(a)
+ s, axes = _cook_nd_args(a, s, axes)
+ a = rfft(a, s[-1], axes[-1], norm)
+ for ii in range(len(axes)-1):
+ a = fft(a, s[ii], axes[ii], norm)
+ return a
+
+
+@array_function_dispatch(_fftn_dispatcher)
+def rfft2(a, s=None, axes=(-2, -1), norm=None):
+ """
+ Compute the 2-dimensional FFT of a real array.
+
+ Parameters
+ ----------
+ a : array
+ Input array, taken to be real.
+ s : sequence of ints, optional
+ Shape of the FFT.
+ axes : sequence of ints, optional
+ Axes over which to compute the FFT.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : ndarray
+ The result of the real 2-D FFT.
+
+ See Also
+ --------
+ rfftn : Compute the N-dimensional discrete Fourier Transform for real
+ input.
+
+ Notes
+ -----
+ This is really just `rfftn` with different default behavior.
+ For more details see `rfftn`.
+
+ Examples
+ --------
+ >>> a = np.mgrid[:5, :5][0]
+ >>> np.fft.rfft2(a)
+ array([[ 50. +0.j , 0. +0.j , 0. +0.j ],
+ [-12.5+17.20477401j, 0. +0.j , 0. +0.j ],
+ [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ],
+ [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ],
+ [-12.5-17.20477401j, 0. +0.j , 0. +0.j ]])
+ """
+ return rfftn(a, s, axes, norm)
+
+
+@array_function_dispatch(_fftn_dispatcher)
+def irfftn(a, s=None, axes=None, norm=None):
+ """
+ Computes the inverse of `rfftn`.
+
+ This function computes the inverse of the N-dimensional discrete
+ Fourier Transform for real input over any number of axes in an
+ M-dimensional array by means of the Fast Fourier Transform (FFT). In
+ other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
+ accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
+ and for the same reason.)
+
+ The input should be ordered in the same way as is returned by `rfftn`,
+ i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
+ along all the other axes.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ s : sequence of ints, optional
+ Shape (length of each transformed axis) of the output
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
+ number of input points used along this axis, except for the last axis,
+ where ``s[-1]//2+1`` points of the input are used.
+ Along any axis, if the shape indicated by `s` is smaller than that of
+ the input, the input is cropped. If it is larger, the input is padded
+ with zeros. If `s` is not given, the shape of the input along the axes
+ specified by axes is used. Except for the last axis which is taken to
+ be ``2*(m-1)`` where ``m`` is the length of the input along that axis.
+ axes : sequence of ints, optional
+ Axes over which to compute the inverse FFT. If not given, the last
+ `len(s)` axes are used, or all axes if `s` is also not specified.
+ Repeated indices in `axes` means that the inverse transform over that
+ axis is performed multiple times.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : ndarray
+ The truncated or zero-padded input, transformed along the axes
+ indicated by `axes`, or by a combination of `s` or `a`,
+ as explained in the parameters section above.
+ The length of each transformed axis is as given by the corresponding
+ element of `s`, or the length of the input in every axis except for the
+ last one if `s` is not given. In the final transformed axis the length
+ of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
+ length of the final transformed axis of the input. To get an odd
+ number of output points in the final axis, `s` must be specified.
+
+ Raises
+ ------
+ ValueError
+ If `s` and `axes` have different length.
+ IndexError
+ If an element of `axes` is larger than than the number of axes of `a`.
+
+ See Also
+ --------
+ rfftn : The forward n-dimensional FFT of real input,
+ of which `ifftn` is the inverse.
+ fft : The one-dimensional FFT, with definitions and conventions used.
+ irfft : The inverse of the one-dimensional FFT of real input.
+ irfft2 : The inverse of the two-dimensional FFT of real input.
+
+ Notes
+ -----
+ See `fft` for definitions and conventions used.
+
+ See `rfft` for definitions and conventions used for real input.
+
+ The correct interpretation of the hermitian input depends on the shape of
+ the original data, as given by `s`. This is because each input shape could
+ correspond to either an odd or even length signal. By default, `irfftn`
+ assumes an even output length which puts the last entry at the Nyquist
+ frequency; aliasing with its symmetric counterpart. When performing the
+ final complex to real transform, the last value is thus treated as purely
+ real. To avoid losing information, the correct shape of the real input
+ **must** be given.
+
+ Examples
+ --------
+ >>> a = np.zeros((3, 2, 2))
+ >>> a[0, 0, 0] = 3 * 2 * 2
+ >>> np.fft.irfftn(a)
+ array([[[1., 1.],
+ [1., 1.]],
+ [[1., 1.],
+ [1., 1.]],
+ [[1., 1.],
+ [1., 1.]]])
+
+ """
+ a = asarray(a)
+ s, axes = _cook_nd_args(a, s, axes, invreal=1)
+ for ii in range(len(axes)-1):
+ a = ifft(a, s[ii], axes[ii], norm)
+ a = irfft(a, s[-1], axes[-1], norm)
+ return a
+
+
+@array_function_dispatch(_fftn_dispatcher)
+def irfft2(a, s=None, axes=(-2, -1), norm=None):
+ """
+ Computes the inverse of `rfft2`.
+
+ Parameters
+ ----------
+ a : array_like
+ The input array
+ s : sequence of ints, optional
+ Shape of the real output to the inverse FFT.
+ axes : sequence of ints, optional
+ The axes over which to compute the inverse fft.
+ Default is the last two axes.
+ norm : {"backward", "ortho", "forward"}, optional
+ .. versionadded:: 1.10.0
+
+ Normalization mode (see `numpy.fft`). Default is "backward".
+ Indicates which direction of the forward/backward pair of transforms
+ is scaled and with what normalization factor.
+
+ .. versionadded:: 1.20.0
+
+ The "backward", "forward" values were added.
+
+ Returns
+ -------
+ out : ndarray
+ The result of the inverse real 2-D FFT.
+
+ See Also
+ --------
+ rfft2 : The forward two-dimensional FFT of real input,
+ of which `irfft2` is the inverse.
+ rfft : The one-dimensional FFT for real input.
+ irfft : The inverse of the one-dimensional FFT of real input.
+ irfftn : Compute the inverse of the N-dimensional FFT of real input.
+
+ Notes
+ -----
+ This is really `irfftn` with different defaults.
+ For more details see `irfftn`.
+
+ Examples
+ --------
+ >>> a = np.mgrid[:5, :5][0]
+ >>> A = np.fft.rfft2(a)
+ >>> np.fft.irfft2(A, s=a.shape)
+ array([[0., 0., 0., 0., 0.],
+ [1., 1., 1., 1., 1.],
+ [2., 2., 2., 2., 2.],
+ [3., 3., 3., 3., 3.],
+ [4., 4., 4., 4., 4.]])
+ """
+ return irfftn(a, s, axes, norm)
diff --git a/venv/lib/python3.9/site-packages/numpy/fft/_pocketfft.pyi b/venv/lib/python3.9/site-packages/numpy/fft/_pocketfft.pyi
new file mode 100644
index 00000000..2bd8b0ba
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/fft/_pocketfft.pyi
@@ -0,0 +1,108 @@
+from collections.abc import Sequence
+from typing import Literal as L
+
+from numpy import complex128, float64
+from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co
+
+_NormKind = L[None, "backward", "ortho", "forward"]
+
+__all__: list[str]
+
+def fft(
+ a: ArrayLike,
+ n: None | int = ...,
+ axis: int = ...,
+ norm: _NormKind = ...,
+) -> NDArray[complex128]: ...
+
+def ifft(
+ a: ArrayLike,
+ n: None | int = ...,
+ axis: int = ...,
+ norm: _NormKind = ...,
+) -> NDArray[complex128]: ...
+
+def rfft(
+ a: ArrayLike,
+ n: None | int = ...,
+ axis: int = ...,
+ norm: _NormKind = ...,
+) -> NDArray[complex128]: ...
+
+def irfft(
+ a: ArrayLike,
+ n: None | int = ...,
+ axis: int = ...,
+ norm: _NormKind = ...,
+) -> NDArray[float64]: ...
+
+# Input array must be compatible with `np.conjugate`
+def hfft(
+ a: _ArrayLikeNumber_co,
+ n: None | int = ...,
+ axis: int = ...,
+ norm: _NormKind = ...,
+) -> NDArray[float64]: ...
+
+def ihfft(
+ a: ArrayLike,
+ n: None | int = ...,
+ axis: int = ...,
+ norm: _NormKind = ...,
+) -> NDArray[complex128]: ...
+
+def fftn(
+ a: ArrayLike,
+ s: None | Sequence[int] = ...,
+ axes: None | Sequence[int] = ...,
+ norm: _NormKind = ...,
+) -> NDArray[complex128]: ...
+
+def ifftn(
+ a: ArrayLike,
+ s: None | Sequence[int] = ...,
+ axes: None | Sequence[int] = ...,
+ norm: _NormKind = ...,
+) -> NDArray[complex128]: ...
+
+def rfftn(
+ a: ArrayLike,
+ s: None | Sequence[int] = ...,
+ axes: None | Sequence[int] = ...,
+ norm: _NormKind = ...,
+) -> NDArray[complex128]: ...
+
+def irfftn(
+ a: ArrayLike,
+ s: None | Sequence[int] = ...,
+ axes: None | Sequence[int] = ...,
+ norm: _NormKind = ...,
+) -> NDArray[float64]: ...
+
+def fft2(
+ a: ArrayLike,
+ s: None | Sequence[int] = ...,
+ axes: None | Sequence[int] = ...,
+ norm: _NormKind = ...,
+) -> NDArray[complex128]: ...
+
+def ifft2(
+ a: ArrayLike,
+ s: None | Sequence[int] = ...,
+ axes: None | Sequence[int] = ...,
+ norm: _NormKind = ...,
+) -> NDArray[complex128]: ...
+
+def rfft2(
+ a: ArrayLike,
+ s: None | Sequence[int] = ...,
+ axes: None | Sequence[int] = ...,
+ norm: _NormKind = ...,
+) -> NDArray[complex128]: ...
+
+def irfft2(
+ a: ArrayLike,
+ s: None | Sequence[int] = ...,
+ axes: None | Sequence[int] = ...,
+ norm: _NormKind = ...,
+) -> NDArray[float64]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/fft/_pocketfft_internal.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/fft/_pocketfft_internal.cpython-39-darwin.so
new file mode 100755
index 00000000..d99ff1f9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/fft/_pocketfft_internal.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/fft/helper.py b/venv/lib/python3.9/site-packages/numpy/fft/helper.py
new file mode 100644
index 00000000..927ee1af
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/fft/helper.py
@@ -0,0 +1,221 @@
+"""
+Discrete Fourier Transforms - helper.py
+
+"""
+from numpy.core import integer, empty, arange, asarray, roll
+from numpy.core.overrides import array_function_dispatch, set_module
+
+# Created by Pearu Peterson, September 2002
+
+__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']
+
+integer_types = (int, integer)
+
+
+def _fftshift_dispatcher(x, axes=None):
+ return (x,)
+
+
+@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
+def fftshift(x, axes=None):
+ """
+ Shift the zero-frequency component to the center of the spectrum.
+
+ This function swaps half-spaces for all axes listed (defaults to all).
+ Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array.
+ axes : int or shape tuple, optional
+ Axes over which to shift. Default is None, which shifts all axes.
+
+ Returns
+ -------
+ y : ndarray
+ The shifted array.
+
+ See Also
+ --------
+ ifftshift : The inverse of `fftshift`.
+
+ Examples
+ --------
+ >>> freqs = np.fft.fftfreq(10, 0.1)
+ >>> freqs
+ array([ 0., 1., 2., ..., -3., -2., -1.])
+ >>> np.fft.fftshift(freqs)
+ array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
+
+ Shift the zero-frequency component only along the second axis:
+
+ >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
+ >>> freqs
+ array([[ 0., 1., 2.],
+ [ 3., 4., -4.],
+ [-3., -2., -1.]])
+ >>> np.fft.fftshift(freqs, axes=(1,))
+ array([[ 2., 0., 1.],
+ [-4., 3., 4.],
+ [-1., -3., -2.]])
+
+ """
+ x = asarray(x)
+ if axes is None:
+ axes = tuple(range(x.ndim))
+ shift = [dim // 2 for dim in x.shape]
+ elif isinstance(axes, integer_types):
+ shift = x.shape[axes] // 2
+ else:
+ shift = [x.shape[ax] // 2 for ax in axes]
+
+ return roll(x, shift, axes)
+
+
+@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
+def ifftshift(x, axes=None):
+ """
+ The inverse of `fftshift`. Although identical for even-length `x`, the
+ functions differ by one sample for odd-length `x`.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array.
+ axes : int or shape tuple, optional
+ Axes over which to calculate. Defaults to None, which shifts all axes.
+
+ Returns
+ -------
+ y : ndarray
+ The shifted array.
+
+ See Also
+ --------
+ fftshift : Shift zero-frequency component to the center of the spectrum.
+
+ Examples
+ --------
+ >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
+ >>> freqs
+ array([[ 0., 1., 2.],
+ [ 3., 4., -4.],
+ [-3., -2., -1.]])
+ >>> np.fft.ifftshift(np.fft.fftshift(freqs))
+ array([[ 0., 1., 2.],
+ [ 3., 4., -4.],
+ [-3., -2., -1.]])
+
+ """
+ x = asarray(x)
+ if axes is None:
+ axes = tuple(range(x.ndim))
+ shift = [-(dim // 2) for dim in x.shape]
+ elif isinstance(axes, integer_types):
+ shift = -(x.shape[axes] // 2)
+ else:
+ shift = [-(x.shape[ax] // 2) for ax in axes]
+
+ return roll(x, shift, axes)
+
+
+@set_module('numpy.fft')
+def fftfreq(n, d=1.0):
+ """
+ Return the Discrete Fourier Transform sample frequencies.
+
+ The returned float array `f` contains the frequency bin centers in cycles
+ per unit of the sample spacing (with zero at the start). For instance, if
+ the sample spacing is in seconds, then the frequency unit is cycles/second.
+
+ Given a window length `n` and a sample spacing `d`::
+
+ f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even
+ f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd
+
+ Parameters
+ ----------
+ n : int
+ Window length.
+ d : scalar, optional
+ Sample spacing (inverse of the sampling rate). Defaults to 1.
+
+ Returns
+ -------
+ f : ndarray
+ Array of length `n` containing the sample frequencies.
+
+ Examples
+ --------
+ >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
+ >>> fourier = np.fft.fft(signal)
+ >>> n = signal.size
+ >>> timestep = 0.1
+ >>> freq = np.fft.fftfreq(n, d=timestep)
+ >>> freq
+ array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25])
+
+ """
+ if not isinstance(n, integer_types):
+ raise ValueError("n should be an integer")
+ val = 1.0 / (n * d)
+ results = empty(n, int)
+ N = (n-1)//2 + 1
+ p1 = arange(0, N, dtype=int)
+ results[:N] = p1
+ p2 = arange(-(n//2), 0, dtype=int)
+ results[N:] = p2
+ return results * val
+
+
+@set_module('numpy.fft')
+def rfftfreq(n, d=1.0):
+ """
+ Return the Discrete Fourier Transform sample frequencies
+ (for usage with rfft, irfft).
+
+ The returned float array `f` contains the frequency bin centers in cycles
+ per unit of the sample spacing (with zero at the start). For instance, if
+ the sample spacing is in seconds, then the frequency unit is cycles/second.
+
+ Given a window length `n` and a sample spacing `d`::
+
+ f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even
+ f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd
+
+ Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`)
+ the Nyquist frequency component is considered to be positive.
+
+ Parameters
+ ----------
+ n : int
+ Window length.
+ d : scalar, optional
+ Sample spacing (inverse of the sampling rate). Defaults to 1.
+
+ Returns
+ -------
+ f : ndarray
+ Array of length ``n//2 + 1`` containing the sample frequencies.
+
+ Examples
+ --------
+ >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float)
+ >>> fourier = np.fft.rfft(signal)
+ >>> n = signal.size
+ >>> sample_rate = 100
+ >>> freq = np.fft.fftfreq(n, d=1./sample_rate)
+ >>> freq
+ array([ 0., 10., 20., ..., -30., -20., -10.])
+ >>> freq = np.fft.rfftfreq(n, d=1./sample_rate)
+ >>> freq
+ array([ 0., 10., 20., 30., 40., 50.])
+
+ """
+ if not isinstance(n, integer_types):
+ raise ValueError("n should be an integer")
+ val = 1.0/(n*d)
+ N = n//2 + 1
+ results = arange(0, N, dtype=int)
+ return results * val
diff --git a/venv/lib/python3.9/site-packages/numpy/fft/helper.pyi b/venv/lib/python3.9/site-packages/numpy/fft/helper.pyi
new file mode 100644
index 00000000..9b652519
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/fft/helper.pyi
@@ -0,0 +1,47 @@
+from typing import Any, TypeVar, overload
+
+from numpy import generic, integer, floating, complexfloating
+from numpy._typing import (
+ NDArray,
+ ArrayLike,
+ _ShapeLike,
+ _ArrayLike,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+
+__all__: list[str]
+
+@overload
+def fftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
+@overload
+def fftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ...
+
+@overload
+def ifftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
+@overload
+def ifftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ...
+
+@overload
+def fftfreq(
+ n: int | integer[Any],
+ d: _ArrayLikeFloat_co = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def fftfreq(
+ n: int | integer[Any],
+ d: _ArrayLikeComplex_co = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def rfftfreq(
+ n: int | integer[Any],
+ d: _ArrayLikeFloat_co = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def rfftfreq(
+ n: int | integer[Any],
+ d: _ArrayLikeComplex_co = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/fft/setup.py b/venv/lib/python3.9/site-packages/numpy/fft/setup.py
new file mode 100644
index 00000000..477948a0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/fft/setup.py
@@ -0,0 +1,22 @@
+import sys
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('fft', parent_package, top_path)
+
+ config.add_subpackage('tests')
+
+ # AIX needs to be told to use large file support - at all times
+ defs = [('_LARGE_FILES', None)] if sys.platform[:3] == "aix" else []
+ # Configure pocketfft_internal
+ config.add_extension('_pocketfft_internal',
+ sources=['_pocketfft.c'],
+ define_macros=defs,
+ )
+
+ config.add_data_files('*.pyi')
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/venv/lib/python3.9/site-packages/numpy/fft/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/fft/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/fft/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/fft/tests/test_helper.py b/venv/lib/python3.9/site-packages/numpy/fft/tests/test_helper.py
new file mode 100644
index 00000000..3fb700bb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/fft/tests/test_helper.py
@@ -0,0 +1,167 @@
+"""Test functions for fftpack.helper module
+
+Copied from fftpack.helper by Pearu Peterson, October 2005
+
+"""
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from numpy import fft, pi
+
+
+class TestFFTShift:
+
+ def test_definition(self):
+ x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
+ y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
+ assert_array_almost_equal(fft.fftshift(x), y)
+ assert_array_almost_equal(fft.ifftshift(y), x)
+ x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
+ y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
+ assert_array_almost_equal(fft.fftshift(x), y)
+ assert_array_almost_equal(fft.ifftshift(y), x)
+
+ def test_inverse(self):
+ for n in [1, 4, 9, 100, 211]:
+ x = np.random.random((n,))
+ assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)
+
+ def test_axes_keyword(self):
+ freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
+ shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
+ assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)
+ assert_array_almost_equal(fft.fftshift(freqs, axes=0),
+ fft.fftshift(freqs, axes=(0,)))
+ assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)
+ assert_array_almost_equal(fft.ifftshift(shifted, axes=0),
+ fft.ifftshift(shifted, axes=(0,)))
+
+ assert_array_almost_equal(fft.fftshift(freqs), shifted)
+ assert_array_almost_equal(fft.ifftshift(shifted), freqs)
+
+ def test_uneven_dims(self):
+ """ Test 2D input, which has uneven dimension sizes """
+ freqs = [
+ [0, 1],
+ [2, 3],
+ [4, 5]
+ ]
+
+ # shift in dimension 0
+ shift_dim0 = [
+ [4, 5],
+ [0, 1],
+ [2, 3]
+ ]
+ assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0)
+ assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs)
+ assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0)
+ assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs)
+
+ # shift in dimension 1
+ shift_dim1 = [
+ [1, 0],
+ [3, 2],
+ [5, 4]
+ ]
+ assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1)
+ assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs)
+
+ # shift in both dimensions
+ shift_dim_both = [
+ [5, 4],
+ [1, 0],
+ [3, 2]
+ ]
+ assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both)
+ assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs)
+ assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both)
+ assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs)
+
+ # axes=None (default) shift in all dimensions
+ assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both)
+ assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs)
+ assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both)
+ assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs)
+
+ def test_equal_to_original(self):
+ """ Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """
+ from numpy.core import asarray, concatenate, arange, take
+
+ def original_fftshift(x, axes=None):
+ """ How fftshift was implemented in v1.14"""
+ tmp = asarray(x)
+ ndim = tmp.ndim
+ if axes is None:
+ axes = list(range(ndim))
+ elif isinstance(axes, int):
+ axes = (axes,)
+ y = tmp
+ for k in axes:
+ n = tmp.shape[k]
+ p2 = (n + 1) // 2
+ mylist = concatenate((arange(p2, n), arange(p2)))
+ y = take(y, mylist, k)
+ return y
+
+ def original_ifftshift(x, axes=None):
+ """ How ifftshift was implemented in v1.14 """
+ tmp = asarray(x)
+ ndim = tmp.ndim
+ if axes is None:
+ axes = list(range(ndim))
+ elif isinstance(axes, int):
+ axes = (axes,)
+ y = tmp
+ for k in axes:
+ n = tmp.shape[k]
+ p2 = n - (n + 1) // 2
+ mylist = concatenate((arange(p2, n), arange(p2)))
+ y = take(y, mylist, k)
+ return y
+
+ # create possible 2d array combinations and try all possible keywords
+ # compare output to original functions
+ for i in range(16):
+ for j in range(16):
+ for axes_keyword in [0, 1, None, (0,), (0, 1)]:
+ inp = np.random.rand(i, j)
+
+ assert_array_almost_equal(fft.fftshift(inp, axes_keyword),
+ original_fftshift(inp, axes_keyword))
+
+ assert_array_almost_equal(fft.ifftshift(inp, axes_keyword),
+ original_ifftshift(inp, axes_keyword))
+
+
+class TestFFTFreq:
+
+ def test_definition(self):
+ x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
+ assert_array_almost_equal(9*fft.fftfreq(9), x)
+ assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x)
+ x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
+ assert_array_almost_equal(10*fft.fftfreq(10), x)
+ assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
+
+
+class TestRFFTFreq:
+
+ def test_definition(self):
+ x = [0, 1, 2, 3, 4]
+ assert_array_almost_equal(9*fft.rfftfreq(9), x)
+ assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x)
+ x = [0, 1, 2, 3, 4, 5]
+ assert_array_almost_equal(10*fft.rfftfreq(10), x)
+ assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
+
+
+class TestIRFFTN:
+
+ def test_not_last_axis_success(self):
+ ar, ai = np.random.random((2, 16, 8, 32))
+ a = ar + 1j*ai
+
+ axes = (-2,)
+
+ # Should not raise error
+ fft.irfftn(a, axes=axes)
diff --git a/venv/lib/python3.9/site-packages/numpy/fft/tests/test_pocketfft.py b/venv/lib/python3.9/site-packages/numpy/fft/tests/test_pocketfft.py
new file mode 100644
index 00000000..122a9fac
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/fft/tests/test_pocketfft.py
@@ -0,0 +1,308 @@
+import numpy as np
+import pytest
+from numpy.random import random
+from numpy.testing import (
+ assert_array_equal, assert_raises, assert_allclose, IS_WASM
+ )
+import threading
+import queue
+
+
+def fft1(x):
+ L = len(x)
+ phase = -2j * np.pi * (np.arange(L) / L)
+ phase = np.arange(L).reshape(-1, 1) * phase
+ return np.sum(x*np.exp(phase), axis=1)
+
+
+class TestFFTShift:
+
+ def test_fft_n(self):
+ assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
+
+
+class TestFFT1D:
+
+ def test_identity(self):
+ maxlen = 512
+ x = random(maxlen) + 1j*random(maxlen)
+ xr = random(maxlen)
+ for i in range(1, maxlen):
+ assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i],
+ atol=1e-12)
+ assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]), i),
+ xr[0:i], atol=1e-12)
+
+ def test_fft(self):
+ x = random(30) + 1j*random(30)
+ assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6)
+ assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6)
+ assert_allclose(fft1(x) / np.sqrt(30),
+ np.fft.fft(x, norm="ortho"), atol=1e-6)
+ assert_allclose(fft1(x) / 30.,
+ np.fft.fft(x, norm="forward"), atol=1e-6)
+
+ @pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward'))
+ def test_ifft(self, norm):
+ x = random(30) + 1j*random(30)
+ assert_allclose(
+ x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm),
+ atol=1e-6)
+ # Ensure we get the correct error message
+ with pytest.raises(ValueError,
+ match='Invalid number of FFT data points'):
+ np.fft.ifft([], norm=norm)
+
+ def test_fft2(self):
+ x = random((30, 20)) + 1j*random((30, 20))
+ assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
+ np.fft.fft2(x), atol=1e-6)
+ assert_allclose(np.fft.fft2(x),
+ np.fft.fft2(x, norm="backward"), atol=1e-6)
+ assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20),
+ np.fft.fft2(x, norm="ortho"), atol=1e-6)
+ assert_allclose(np.fft.fft2(x) / (30. * 20.),
+ np.fft.fft2(x, norm="forward"), atol=1e-6)
+
+ def test_ifft2(self):
+ x = random((30, 20)) + 1j*random((30, 20))
+ assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
+ np.fft.ifft2(x), atol=1e-6)
+ assert_allclose(np.fft.ifft2(x),
+ np.fft.ifft2(x, norm="backward"), atol=1e-6)
+ assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20),
+ np.fft.ifft2(x, norm="ortho"), atol=1e-6)
+ assert_allclose(np.fft.ifft2(x) * (30. * 20.),
+ np.fft.ifft2(x, norm="forward"), atol=1e-6)
+
+ def test_fftn(self):
+ x = random((30, 20, 10)) + 1j*random((30, 20, 10))
+ assert_allclose(
+ np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
+ np.fft.fftn(x), atol=1e-6)
+ assert_allclose(np.fft.fftn(x),
+ np.fft.fftn(x, norm="backward"), atol=1e-6)
+ assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
+ np.fft.fftn(x, norm="ortho"), atol=1e-6)
+ assert_allclose(np.fft.fftn(x) / (30. * 20. * 10.),
+ np.fft.fftn(x, norm="forward"), atol=1e-6)
+
+ def test_ifftn(self):
+ x = random((30, 20, 10)) + 1j*random((30, 20, 10))
+ assert_allclose(
+ np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
+ np.fft.ifftn(x), atol=1e-6)
+ assert_allclose(np.fft.ifftn(x),
+ np.fft.ifftn(x, norm="backward"), atol=1e-6)
+ assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
+ np.fft.ifftn(x, norm="ortho"), atol=1e-6)
+ assert_allclose(np.fft.ifftn(x) * (30. * 20. * 10.),
+ np.fft.ifftn(x, norm="forward"), atol=1e-6)
+
+ def test_rfft(self):
+ x = random(30)
+ for n in [x.size, 2*x.size]:
+ for norm in [None, 'backward', 'ortho', 'forward']:
+ assert_allclose(
+ np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
+ np.fft.rfft(x, n=n, norm=norm), atol=1e-6)
+ assert_allclose(
+ np.fft.rfft(x, n=n),
+ np.fft.rfft(x, n=n, norm="backward"), atol=1e-6)
+ assert_allclose(
+ np.fft.rfft(x, n=n) / np.sqrt(n),
+ np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6)
+ assert_allclose(
+ np.fft.rfft(x, n=n) / n,
+ np.fft.rfft(x, n=n, norm="forward"), atol=1e-6)
+
+ def test_irfft(self):
+ x = random(30)
+ assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6)
+ assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="backward"),
+ norm="backward"), atol=1e-6)
+ assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="ortho"),
+ norm="ortho"), atol=1e-6)
+ assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="forward"),
+ norm="forward"), atol=1e-6)
+
+ def test_rfft2(self):
+ x = random((30, 20))
+ assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6)
+ assert_allclose(np.fft.rfft2(x),
+ np.fft.rfft2(x, norm="backward"), atol=1e-6)
+ assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20),
+ np.fft.rfft2(x, norm="ortho"), atol=1e-6)
+ assert_allclose(np.fft.rfft2(x) / (30. * 20.),
+ np.fft.rfft2(x, norm="forward"), atol=1e-6)
+
+ def test_irfft2(self):
+ x = random((30, 20))
+ assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6)
+ assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="backward"),
+ norm="backward"), atol=1e-6)
+ assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"),
+ norm="ortho"), atol=1e-6)
+ assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"),
+ norm="forward"), atol=1e-6)
+
+ def test_rfftn(self):
+ x = random((30, 20, 10))
+ assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6)
+ assert_allclose(np.fft.rfftn(x),
+ np.fft.rfftn(x, norm="backward"), atol=1e-6)
+ assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
+ np.fft.rfftn(x, norm="ortho"), atol=1e-6)
+ assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.),
+ np.fft.rfftn(x, norm="forward"), atol=1e-6)
+
+ def test_irfftn(self):
+ x = random((30, 20, 10))
+ assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6)
+ assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="backward"),
+ norm="backward"), atol=1e-6)
+ assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"),
+ norm="ortho"), atol=1e-6)
+ assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="forward"),
+ norm="forward"), atol=1e-6)
+
+ def test_hfft(self):
+ x = random(14) + 1j*random(14)
+ x_herm = np.concatenate((random(1), x, random(1)))
+ x = np.concatenate((x_herm, x[::-1].conj()))
+ assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6)
+ assert_allclose(np.fft.hfft(x_herm),
+ np.fft.hfft(x_herm, norm="backward"), atol=1e-6)
+ assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30),
+ np.fft.hfft(x_herm, norm="ortho"), atol=1e-6)
+ assert_allclose(np.fft.hfft(x_herm) / 30.,
+ np.fft.hfft(x_herm, norm="forward"), atol=1e-6)
+
+ def test_ihfft(self):
+ x = random(14) + 1j*random(14)
+ x_herm = np.concatenate((random(1), x, random(1)))
+ x = np.concatenate((x_herm, x[::-1].conj()))
+ assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6)
+ assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
+ norm="backward"), norm="backward"), atol=1e-6)
+ assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
+ norm="ortho"), norm="ortho"), atol=1e-6)
+ assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
+ norm="forward"), norm="forward"), atol=1e-6)
+
+ @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
+ np.fft.rfftn, np.fft.irfftn])
+ def test_axes(self, op):
+ x = random((30, 20, 10))
+ axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
+ for a in axes:
+ op_tr = op(np.transpose(x, a))
+ tr_op = np.transpose(op(x, axes=a), a)
+ assert_allclose(op_tr, tr_op, atol=1e-6)
+
+ def test_all_1d_norm_preserving(self):
+ # verify that round-trip transforms are norm-preserving
+ x = random(30)
+ x_norm = np.linalg.norm(x)
+ n = x.size * 2
+ func_pairs = [(np.fft.fft, np.fft.ifft),
+ (np.fft.rfft, np.fft.irfft),
+ # hfft: order so the first function takes x.size samples
+ # (necessary for comparison to x_norm above)
+ (np.fft.ihfft, np.fft.hfft),
+ ]
+ for forw, back in func_pairs:
+ for n in [x.size, 2*x.size]:
+ for norm in [None, 'backward', 'ortho', 'forward']:
+ tmp = forw(x, n=n, norm=norm)
+ tmp = back(tmp, n=n, norm=norm)
+ assert_allclose(x_norm,
+ np.linalg.norm(tmp), atol=1e-6)
+
+ @pytest.mark.parametrize("dtype", [np.half, np.single, np.double,
+ np.longdouble])
+ def test_dtypes(self, dtype):
+ # make sure that all input precisions are accepted and internally
+ # converted to 64bit
+ x = random(30).astype(dtype)
+ assert_allclose(np.fft.ifft(np.fft.fft(x)), x, atol=1e-6)
+ assert_allclose(np.fft.irfft(np.fft.rfft(x)), x, atol=1e-6)
+
+
+@pytest.mark.parametrize(
+ "dtype",
+ [np.float32, np.float64, np.complex64, np.complex128])
+@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
+@pytest.mark.parametrize(
+ "fft",
+ [np.fft.fft, np.fft.fft2, np.fft.fftn,
+ np.fft.ifft, np.fft.ifft2, np.fft.ifftn])
+def test_fft_with_order(dtype, order, fft):
+ # Check that FFT/IFFT produces identical results for C, Fortran and
+ # non contiguous arrays
+ rng = np.random.RandomState(42)
+ X = rng.rand(8, 7, 13).astype(dtype, copy=False)
+ # See discussion in pull/14178
+ _tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps
+ if order == 'F':
+ Y = np.asfortranarray(X)
+ else:
+ # Make a non contiguous array
+ Y = X[::-1]
+ X = np.ascontiguousarray(X[::-1])
+
+ if fft.__name__.endswith('fft'):
+ for axis in range(3):
+ X_res = fft(X, axis=axis)
+ Y_res = fft(Y, axis=axis)
+ assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
+ elif fft.__name__.endswith(('fft2', 'fftn')):
+ axes = [(0, 1), (1, 2), (0, 2)]
+ if fft.__name__.endswith('fftn'):
+ axes.extend([(0,), (1,), (2,), None])
+ for ax in axes:
+ X_res = fft(X, axes=ax)
+ Y_res = fft(Y, axes=ax)
+ assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
+ else:
+ raise ValueError()
+
+
+@pytest.mark.skipif(IS_WASM, reason="Cannot start thread")
+class TestFFTThreadSafe:
+ threads = 16
+ input_shape = (800, 200)
+
+ def _test_mtsame(self, func, *args):
+ def worker(args, q):
+ q.put(func(*args))
+
+ q = queue.Queue()
+ expected = func(*args)
+
+ # Spin off a bunch of threads to call the same function simultaneously
+ t = [threading.Thread(target=worker, args=(args, q))
+ for i in range(self.threads)]
+ [x.start() for x in t]
+
+ [x.join() for x in t]
+ # Make sure all threads returned the correct value
+ for i in range(self.threads):
+ assert_array_equal(q.get(timeout=5), expected,
+ 'Function returned wrong value in multithreaded context')
+
+ def test_fft(self):
+ a = np.ones(self.input_shape) * 1+0j
+ self._test_mtsame(np.fft.fft, a)
+
+ def test_ifft(self):
+ a = np.ones(self.input_shape) * 1+0j
+ self._test_mtsame(np.fft.ifft, a)
+
+ def test_rfft(self):
+ a = np.ones(self.input_shape)
+ self._test_mtsame(np.fft.rfft, a)
+
+ def test_irfft(self):
+ a = np.ones(self.input_shape) * 1+0j
+ self._test_mtsame(np.fft.irfft, a)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/__init__.py b/venv/lib/python3.9/site-packages/numpy/lib/__init__.py
new file mode 100644
index 00000000..58166d4b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/__init__.py
@@ -0,0 +1,79 @@
+"""
+**Note:** almost all functions in the ``numpy.lib`` namespace
+are also present in the main ``numpy`` namespace. Please use the
+functions as ``np.<funcname>`` where possible.
+
+``numpy.lib`` is mostly a space for implementing functions that don't
+belong in core or in another NumPy submodule with a clear purpose
+(e.g. ``random``, ``fft``, ``linalg``, ``ma``).
+
+Most contains basic functions that are used by several submodules and are
+useful to have in the main name-space.
+
+"""
+import math
+
+from numpy.version import version as __version__
+
+# Public submodules
+# Note: recfunctions and (maybe) format are public too, but not imported
+from . import mixins
+from . import scimath as emath
+
+# Private submodules
+# load module names. See https://github.com/networkx/networkx/issues/5838
+from . import type_check
+from . import index_tricks
+from . import function_base
+from . import nanfunctions
+from . import shape_base
+from . import stride_tricks
+from . import twodim_base
+from . import ufunclike
+from . import histograms
+from . import polynomial
+from . import utils
+from . import arraysetops
+from . import npyio
+from . import arrayterator
+from . import arraypad
+from . import _version
+
+from .type_check import *
+from .index_tricks import *
+from .function_base import *
+from .nanfunctions import *
+from .shape_base import *
+from .stride_tricks import *
+from .twodim_base import *
+from .ufunclike import *
+from .histograms import *
+
+from .polynomial import *
+from .utils import *
+from .arraysetops import *
+from .npyio import *
+from .arrayterator import Arrayterator
+from .arraypad import *
+from ._version import *
+from numpy.core._multiarray_umath import tracemalloc_domain
+
+__all__ = ['emath', 'math', 'tracemalloc_domain', 'Arrayterator']
+__all__ += type_check.__all__
+__all__ += index_tricks.__all__
+__all__ += function_base.__all__
+__all__ += shape_base.__all__
+__all__ += stride_tricks.__all__
+__all__ += twodim_base.__all__
+__all__ += ufunclike.__all__
+__all__ += arraypad.__all__
+__all__ += polynomial.__all__
+__all__ += utils.__all__
+__all__ += arraysetops.__all__
+__all__ += npyio.__all__
+__all__ += nanfunctions.__all__
+__all__ += histograms.__all__
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/__init__.pyi b/venv/lib/python3.9/site-packages/numpy/lib/__init__.pyi
new file mode 100644
index 00000000..d3553bbc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/__init__.pyi
@@ -0,0 +1,245 @@
+import math as math
+from typing import Any
+
+from numpy._pytesttester import PytestTester
+
+from numpy import (
+ ndenumerate as ndenumerate,
+ ndindex as ndindex,
+)
+
+from numpy.version import version
+
+from numpy.lib import (
+ format as format,
+ mixins as mixins,
+ scimath as scimath,
+ stride_tricks as stride_tricks,
+)
+
+from numpy.lib._version import (
+ NumpyVersion as NumpyVersion,
+)
+
+from numpy.lib.arraypad import (
+ pad as pad,
+)
+
+from numpy.lib.arraysetops import (
+ ediff1d as ediff1d,
+ intersect1d as intersect1d,
+ setxor1d as setxor1d,
+ union1d as union1d,
+ setdiff1d as setdiff1d,
+ unique as unique,
+ in1d as in1d,
+ isin as isin,
+)
+
+from numpy.lib.arrayterator import (
+ Arrayterator as Arrayterator,
+)
+
+from numpy.lib.function_base import (
+ select as select,
+ piecewise as piecewise,
+ trim_zeros as trim_zeros,
+ copy as copy,
+ iterable as iterable,
+ percentile as percentile,
+ diff as diff,
+ gradient as gradient,
+ angle as angle,
+ unwrap as unwrap,
+ sort_complex as sort_complex,
+ disp as disp,
+ flip as flip,
+ rot90 as rot90,
+ extract as extract,
+ place as place,
+ vectorize as vectorize,
+ asarray_chkfinite as asarray_chkfinite,
+ average as average,
+ bincount as bincount,
+ digitize as digitize,
+ cov as cov,
+ corrcoef as corrcoef,
+ median as median,
+ sinc as sinc,
+ hamming as hamming,
+ hanning as hanning,
+ bartlett as bartlett,
+ blackman as blackman,
+ kaiser as kaiser,
+ trapz as trapz,
+ i0 as i0,
+ add_newdoc as add_newdoc,
+ add_docstring as add_docstring,
+ meshgrid as meshgrid,
+ delete as delete,
+ insert as insert,
+ append as append,
+ interp as interp,
+ add_newdoc_ufunc as add_newdoc_ufunc,
+ quantile as quantile,
+)
+
+from numpy.lib.histograms import (
+ histogram_bin_edges as histogram_bin_edges,
+ histogram as histogram,
+ histogramdd as histogramdd,
+)
+
+from numpy.lib.index_tricks import (
+ ravel_multi_index as ravel_multi_index,
+ unravel_index as unravel_index,
+ mgrid as mgrid,
+ ogrid as ogrid,
+ r_ as r_,
+ c_ as c_,
+ s_ as s_,
+ index_exp as index_exp,
+ ix_ as ix_,
+ fill_diagonal as fill_diagonal,
+ diag_indices as diag_indices,
+ diag_indices_from as diag_indices_from,
+)
+
+from numpy.lib.nanfunctions import (
+ nansum as nansum,
+ nanmax as nanmax,
+ nanmin as nanmin,
+ nanargmax as nanargmax,
+ nanargmin as nanargmin,
+ nanmean as nanmean,
+ nanmedian as nanmedian,
+ nanpercentile as nanpercentile,
+ nanvar as nanvar,
+ nanstd as nanstd,
+ nanprod as nanprod,
+ nancumsum as nancumsum,
+ nancumprod as nancumprod,
+ nanquantile as nanquantile,
+)
+
+from numpy.lib.npyio import (
+ savetxt as savetxt,
+ loadtxt as loadtxt,
+ genfromtxt as genfromtxt,
+ recfromtxt as recfromtxt,
+ recfromcsv as recfromcsv,
+ load as load,
+ save as save,
+ savez as savez,
+ savez_compressed as savez_compressed,
+ packbits as packbits,
+ unpackbits as unpackbits,
+ fromregex as fromregex,
+ DataSource as DataSource,
+)
+
+from numpy.lib.polynomial import (
+ poly as poly,
+ roots as roots,
+ polyint as polyint,
+ polyder as polyder,
+ polyadd as polyadd,
+ polysub as polysub,
+ polymul as polymul,
+ polydiv as polydiv,
+ polyval as polyval,
+ polyfit as polyfit,
+ RankWarning as RankWarning,
+ poly1d as poly1d,
+)
+
+from numpy.lib.shape_base import (
+ column_stack as column_stack,
+ row_stack as row_stack,
+ dstack as dstack,
+ array_split as array_split,
+ split as split,
+ hsplit as hsplit,
+ vsplit as vsplit,
+ dsplit as dsplit,
+ apply_over_axes as apply_over_axes,
+ expand_dims as expand_dims,
+ apply_along_axis as apply_along_axis,
+ kron as kron,
+ tile as tile,
+ get_array_wrap as get_array_wrap,
+ take_along_axis as take_along_axis,
+ put_along_axis as put_along_axis,
+)
+
+from numpy.lib.stride_tricks import (
+ broadcast_to as broadcast_to,
+ broadcast_arrays as broadcast_arrays,
+ broadcast_shapes as broadcast_shapes,
+)
+
+from numpy.lib.twodim_base import (
+ diag as diag,
+ diagflat as diagflat,
+ eye as eye,
+ fliplr as fliplr,
+ flipud as flipud,
+ tri as tri,
+ triu as triu,
+ tril as tril,
+ vander as vander,
+ histogram2d as histogram2d,
+ mask_indices as mask_indices,
+ tril_indices as tril_indices,
+ tril_indices_from as tril_indices_from,
+ triu_indices as triu_indices,
+ triu_indices_from as triu_indices_from,
+)
+
+from numpy.lib.type_check import (
+ mintypecode as mintypecode,
+ asfarray as asfarray,
+ real as real,
+ imag as imag,
+ iscomplex as iscomplex,
+ isreal as isreal,
+ iscomplexobj as iscomplexobj,
+ isrealobj as isrealobj,
+ nan_to_num as nan_to_num,
+ real_if_close as real_if_close,
+ typename as typename,
+ common_type as common_type,
+)
+
+from numpy.lib.ufunclike import (
+ fix as fix,
+ isposinf as isposinf,
+ isneginf as isneginf,
+)
+
+from numpy.lib.utils import (
+ issubclass_ as issubclass_,
+ issubsctype as issubsctype,
+ issubdtype as issubdtype,
+ deprecate as deprecate,
+ deprecate_with_doc as deprecate_with_doc,
+ get_include as get_include,
+ info as info,
+ source as source,
+ who as who,
+ lookfor as lookfor,
+ byte_bounds as byte_bounds,
+ safe_eval as safe_eval,
+ show_runtime as show_runtime,
+)
+
+from numpy.core.multiarray import (
+ tracemalloc_domain as tracemalloc_domain,
+)
+
+__all__: list[str]
+__path__: list[str]
+test: PytestTester
+
+__version__ = version
+emath = scimath
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/_datasource.py b/venv/lib/python3.9/site-packages/numpy/lib/_datasource.py
new file mode 100644
index 00000000..b7778234
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/_datasource.py
@@ -0,0 +1,704 @@
+"""A file interface for handling local and remote data files.
+
+The goal of datasource is to abstract some of the file system operations
+when dealing with data files so the researcher doesn't have to know all the
+low-level details. Through datasource, a researcher can obtain and use a
+file with one function call, regardless of location of the file.
+
+DataSource is meant to augment standard python libraries, not replace them.
+It should work seamlessly with standard file IO operations and the os
+module.
+
+DataSource files can originate locally or remotely:
+
+- local files : '/home/guido/src/local/data.txt'
+- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
+
+DataSource files can also be compressed or uncompressed. Currently only
+gzip, bz2 and xz are supported.
+
+Example::
+
+ >>> # Create a DataSource, use os.curdir (default) for local storage.
+ >>> from numpy import DataSource
+ >>> ds = DataSource()
+ >>>
+ >>> # Open a remote file.
+ >>> # DataSource downloads the file, stores it locally in:
+ >>> # './www.google.com/index.html'
+ >>> # opens the file and returns a file object.
+ >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP
+ >>>
+ >>> # Use the file as you normally would
+ >>> fp.read() # doctest: +SKIP
+ >>> fp.close() # doctest: +SKIP
+
+"""
+import os
+import io
+
+from numpy.core.overrides import set_module
+
+
+_open = open
+
+
+def _check_mode(mode, encoding, newline):
+ """Check mode and that encoding and newline are compatible.
+
+ Parameters
+ ----------
+ mode : str
+ File open mode.
+ encoding : str
+ File encoding.
+ newline : str
+ Newline for text files.
+
+ """
+ if "t" in mode:
+ if "b" in mode:
+ raise ValueError("Invalid mode: %r" % (mode,))
+ else:
+ if encoding is not None:
+ raise ValueError("Argument 'encoding' not supported in binary mode")
+ if newline is not None:
+ raise ValueError("Argument 'newline' not supported in binary mode")
+
+
+# Using a class instead of a module-level dictionary
+# to reduce the initial 'import numpy' overhead by
+# deferring the import of lzma, bz2 and gzip until needed
+
+# TODO: .zip support, .tar support?
+class _FileOpeners:
+ """
+ Container for different methods to open (un-)compressed files.
+
+ `_FileOpeners` contains a dictionary that holds one method for each
+ supported file format. Attribute lookup is implemented in such a way
+ that an instance of `_FileOpeners` itself can be indexed with the keys
+ of that dictionary. Currently uncompressed files as well as files
+ compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported.
+
+ Notes
+ -----
+ `_file_openers`, an instance of `_FileOpeners`, is made available for
+ use in the `_datasource` module.
+
+ Examples
+ --------
+ >>> import gzip
+ >>> np.lib._datasource._file_openers.keys()
+ [None, '.bz2', '.gz', '.xz', '.lzma']
+ >>> np.lib._datasource._file_openers['.gz'] is gzip.open
+ True
+
+ """
+
+ def __init__(self):
+ self._loaded = False
+ self._file_openers = {None: io.open}
+
+ def _load(self):
+ if self._loaded:
+ return
+
+ try:
+ import bz2
+ self._file_openers[".bz2"] = bz2.open
+ except ImportError:
+ pass
+
+ try:
+ import gzip
+ self._file_openers[".gz"] = gzip.open
+ except ImportError:
+ pass
+
+ try:
+ import lzma
+ self._file_openers[".xz"] = lzma.open
+ self._file_openers[".lzma"] = lzma.open
+ except (ImportError, AttributeError):
+ # There are incompatible backports of lzma that do not have the
+ # lzma.open attribute, so catch that as well as ImportError.
+ pass
+
+ self._loaded = True
+
+ def keys(self):
+ """
+ Return the keys of currently supported file openers.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ keys : list
+ The keys are None for uncompressed files and the file extension
+ strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression
+ methods.
+
+ """
+ self._load()
+ return list(self._file_openers.keys())
+
+ def __getitem__(self, key):
+ self._load()
+ return self._file_openers[key]
+
+_file_openers = _FileOpeners()
+
+def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
+ """
+ Open `path` with `mode` and return the file object.
+
+ If ``path`` is an URL, it will be downloaded, stored in the
+ `DataSource` `destpath` directory and opened from there.
+
+ Parameters
+ ----------
+ path : str
+ Local file path or URL to open.
+ mode : str, optional
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
+ append. Available modes depend on the type of object specified by
+ path. Default is 'r'.
+ destpath : str, optional
+ Path to the directory where the source file gets downloaded to for
+ use. If `destpath` is None, a temporary directory will be created.
+ The default path is the current directory.
+ encoding : {None, str}, optional
+ Open text file with given encoding. The default encoding will be
+ what `io.open` uses.
+ newline : {None, str}, optional
+ Newline to use when reading text file.
+
+ Returns
+ -------
+ out : file object
+ The opened file.
+
+ Notes
+ -----
+ This is a convenience function that instantiates a `DataSource` and
+ returns the file object from ``DataSource.open(path)``.
+
+ """
+
+ ds = DataSource(destpath)
+ return ds.open(path, mode, encoding=encoding, newline=newline)
+
+
+@set_module('numpy')
+class DataSource:
+ """
+ DataSource(destpath='.')
+
+ A generic data source file (file, http, ftp, ...).
+
+ DataSources can be local files or remote files/URLs. The files may
+ also be compressed or uncompressed. DataSource hides some of the
+ low-level details of downloading the file, allowing you to simply pass
+ in a valid file path (or URL) and obtain a file object.
+
+ Parameters
+ ----------
+ destpath : str or None, optional
+ Path to the directory where the source file gets downloaded to for
+ use. If `destpath` is None, a temporary directory will be created.
+ The default path is the current directory.
+
+ Notes
+ -----
+ URLs require a scheme string (``http://``) to be used, without it they
+ will fail::
+
+ >>> repos = np.DataSource()
+ >>> repos.exists('www.google.com/index.html')
+ False
+ >>> repos.exists('http://www.google.com/index.html')
+ True
+
+ Temporary directories are deleted when the DataSource is deleted.
+
+ Examples
+ --------
+ ::
+
+ >>> ds = np.DataSource('/home/guido')
+ >>> urlname = 'http://www.google.com/'
+ >>> gfile = ds.open('http://www.google.com/')
+ >>> ds.abspath(urlname)
+ '/home/guido/www.google.com/index.html'
+
+ >>> ds = np.DataSource(None) # use with temporary file
+ >>> ds.open('/home/guido/foobar.txt')
+ <open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
+ >>> ds.abspath('/home/guido/foobar.txt')
+ '/tmp/.../home/guido/foobar.txt'
+
+ """
+
+ def __init__(self, destpath=os.curdir):
+ """Create a DataSource with a local path at destpath."""
+ if destpath:
+ self._destpath = os.path.abspath(destpath)
+ self._istmpdest = False
+ else:
+ import tempfile # deferring import to improve startup time
+ self._destpath = tempfile.mkdtemp()
+ self._istmpdest = True
+
+ def __del__(self):
+ # Remove temp directories
+ if hasattr(self, '_istmpdest') and self._istmpdest:
+ import shutil
+
+ shutil.rmtree(self._destpath)
+
+ def _iszip(self, filename):
+ """Test if the filename is a zip file by looking at the file extension.
+
+ """
+ fname, ext = os.path.splitext(filename)
+ return ext in _file_openers.keys()
+
+ def _iswritemode(self, mode):
+ """Test if the given mode will open a file for writing."""
+
+ # Currently only used to test the bz2 files.
+ _writemodes = ("w", "+")
+ for c in mode:
+ if c in _writemodes:
+ return True
+ return False
+
+ def _splitzipext(self, filename):
+ """Split zip extension from filename and return filename.
+
+ Returns
+ -------
+ base, zip_ext : {tuple}
+
+ """
+
+ if self._iszip(filename):
+ return os.path.splitext(filename)
+ else:
+ return filename, None
+
+ def _possible_names(self, filename):
+ """Return a tuple containing compressed filename variations."""
+ names = [filename]
+ if not self._iszip(filename):
+ for zipext in _file_openers.keys():
+ if zipext:
+ names.append(filename+zipext)
+ return names
+
+ def _isurl(self, path):
+ """Test if path is a net location. Tests the scheme and netloc."""
+
+ # We do this here to reduce the 'import numpy' initial import time.
+ from urllib.parse import urlparse
+
+ # BUG : URLs require a scheme string ('http://') to be used.
+ # www.google.com will fail.
+ # Should we prepend the scheme for those that don't have it and
+ # test that also? Similar to the way we append .gz and test for
+ # for compressed versions of files.
+
+ scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
+ return bool(scheme and netloc)
+
+ def _cache(self, path):
+ """Cache the file specified by path.
+
+ Creates a copy of the file in the datasource cache.
+
+ """
+ # We import these here because importing them is slow and
+ # a significant fraction of numpy's total import time.
+ import shutil
+ from urllib.request import urlopen
+
+ upath = self.abspath(path)
+
+ # ensure directory exists
+ if not os.path.exists(os.path.dirname(upath)):
+ os.makedirs(os.path.dirname(upath))
+
+ # TODO: Doesn't handle compressed files!
+ if self._isurl(path):
+ with urlopen(path) as openedurl:
+ with _open(upath, 'wb') as f:
+ shutil.copyfileobj(openedurl, f)
+ else:
+ shutil.copyfile(path, upath)
+ return upath
+
+ def _findfile(self, path):
+ """Searches for ``path`` and returns full path if found.
+
+ If path is an URL, _findfile will cache a local copy and return the
+ path to the cached file. If path is a local file, _findfile will
+ return a path to that local file.
+
+ The search will include possible compressed versions of the file
+ and return the first occurrence found.
+
+ """
+
+ # Build list of possible local file paths
+ if not self._isurl(path):
+ # Valid local paths
+ filelist = self._possible_names(path)
+ # Paths in self._destpath
+ filelist += self._possible_names(self.abspath(path))
+ else:
+ # Cached URLs in self._destpath
+ filelist = self._possible_names(self.abspath(path))
+ # Remote URLs
+ filelist = filelist + self._possible_names(path)
+
+ for name in filelist:
+ if self.exists(name):
+ if self._isurl(name):
+ name = self._cache(name)
+ return name
+ return None
+
+ def abspath(self, path):
+ """
+ Return absolute path of file in the DataSource directory.
+
+ If `path` is an URL, then `abspath` will return either the location
+ the file exists locally or the location it would exist when opened
+ using the `open` method.
+
+ Parameters
+ ----------
+ path : str
+ Can be a local file or a remote URL.
+
+ Returns
+ -------
+ out : str
+ Complete path, including the `DataSource` destination directory.
+
+ Notes
+ -----
+ The functionality is based on `os.path.abspath`.
+
+ """
+ # We do this here to reduce the 'import numpy' initial import time.
+ from urllib.parse import urlparse
+
+ # TODO: This should be more robust. Handles case where path includes
+ # the destpath, but not other sub-paths. Failing case:
+ # path = /home/guido/datafile.txt
+ # destpath = /home/alex/
+ # upath = self.abspath(path)
+ # upath == '/home/alex/home/guido/datafile.txt'
+
+ # handle case where path includes self._destpath
+ splitpath = path.split(self._destpath, 2)
+ if len(splitpath) > 1:
+ path = splitpath[1]
+ scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
+ netloc = self._sanitize_relative_path(netloc)
+ upath = self._sanitize_relative_path(upath)
+ return os.path.join(self._destpath, netloc, upath)
+
+ def _sanitize_relative_path(self, path):
+ """Return a sanitised relative path for which
+ os.path.abspath(os.path.join(base, path)).startswith(base)
+ """
+ last = None
+ path = os.path.normpath(path)
+ while path != last:
+ last = path
+ # Note: os.path.join treats '/' as os.sep on Windows
+ path = path.lstrip(os.sep).lstrip('/')
+ path = path.lstrip(os.pardir).lstrip('..')
+ drive, path = os.path.splitdrive(path) # for Windows
+ return path
+
+ def exists(self, path):
+ """
+ Test if path exists.
+
+ Test if `path` exists as (and in this order):
+
+ - a local file.
+ - a remote URL that has been downloaded and stored locally in the
+ `DataSource` directory.
+ - a remote URL that has not been downloaded, but is valid and
+ accessible.
+
+ Parameters
+ ----------
+ path : str
+ Can be a local file or a remote URL.
+
+ Returns
+ -------
+ out : bool
+ True if `path` exists.
+
+ Notes
+ -----
+ When `path` is an URL, `exists` will return True if it's either
+ stored locally in the `DataSource` directory, or is a valid remote
+ URL. `DataSource` does not discriminate between the two, the file
+ is accessible if it exists in either location.
+
+ """
+
+ # First test for local path
+ if os.path.exists(path):
+ return True
+
+ # We import this here because importing urllib is slow and
+ # a significant fraction of numpy's total import time.
+ from urllib.request import urlopen
+ from urllib.error import URLError
+
+ # Test cached url
+ upath = self.abspath(path)
+ if os.path.exists(upath):
+ return True
+
+ # Test remote url
+ if self._isurl(path):
+ try:
+ netfile = urlopen(path)
+ netfile.close()
+ del(netfile)
+ return True
+ except URLError:
+ return False
+ return False
+
+ def open(self, path, mode='r', encoding=None, newline=None):
+ """
+ Open and return file-like object.
+
+ If `path` is an URL, it will be downloaded, stored in the
+ `DataSource` directory and opened from there.
+
+ Parameters
+ ----------
+ path : str
+ Local file path or URL to open.
+ mode : {'r', 'w', 'a'}, optional
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
+ 'a' to append. Available modes depend on the type of object
+ specified by `path`. Default is 'r'.
+ encoding : {None, str}, optional
+ Open text file with given encoding. The default encoding will be
+ what `io.open` uses.
+ newline : {None, str}, optional
+ Newline to use when reading text file.
+
+ Returns
+ -------
+ out : file object
+ File object.
+
+ """
+
+ # TODO: There is no support for opening a file for writing which
+ # doesn't exist yet (creating a file). Should there be?
+
+ # TODO: Add a ``subdir`` parameter for specifying the subdirectory
+ # used to store URLs in self._destpath.
+
+ if self._isurl(path) and self._iswritemode(mode):
+ raise ValueError("URLs are not writeable")
+
+ # NOTE: _findfile will fail on a new file opened for writing.
+ found = self._findfile(path)
+ if found:
+ _fname, ext = self._splitzipext(found)
+ if ext == 'bz2':
+ mode.replace("+", "")
+ return _file_openers[ext](found, mode=mode,
+ encoding=encoding, newline=newline)
+ else:
+ raise FileNotFoundError(f"{path} not found.")
+
+
+class Repository (DataSource):
+ """
+ Repository(baseurl, destpath='.')
+
+ A data repository where multiple DataSource's share a base
+ URL/directory.
+
+ `Repository` extends `DataSource` by prepending a base URL (or
+ directory) to all the files it handles. Use `Repository` when you will
+ be working with multiple files from one base URL. Initialize
+ `Repository` with the base URL, then refer to each file by its filename
+ only.
+
+ Parameters
+ ----------
+ baseurl : str
+ Path to the local directory or remote location that contains the
+ data files.
+ destpath : str or None, optional
+ Path to the directory where the source file gets downloaded to for
+ use. If `destpath` is None, a temporary directory will be created.
+ The default path is the current directory.
+
+ Examples
+ --------
+ To analyze all files in the repository, do something like this
+ (note: this is not self-contained code)::
+
+ >>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
+ >>> for filename in filelist:
+ ... fp = repos.open(filename)
+ ... fp.analyze()
+ ... fp.close()
+
+ Similarly you could use a URL for a repository::
+
+ >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
+
+ """
+
+ def __init__(self, baseurl, destpath=os.curdir):
+ """Create a Repository with a shared url or directory of baseurl."""
+ DataSource.__init__(self, destpath=destpath)
+ self._baseurl = baseurl
+
+ def __del__(self):
+ DataSource.__del__(self)
+
+ def _fullpath(self, path):
+ """Return complete path for path. Prepends baseurl if necessary."""
+ splitpath = path.split(self._baseurl, 2)
+ if len(splitpath) == 1:
+ result = os.path.join(self._baseurl, path)
+ else:
+ result = path # path contains baseurl already
+ return result
+
+ def _findfile(self, path):
+ """Extend DataSource method to prepend baseurl to ``path``."""
+ return DataSource._findfile(self, self._fullpath(path))
+
+ def abspath(self, path):
+ """
+ Return absolute path of file in the Repository directory.
+
+ If `path` is an URL, then `abspath` will return either the location
+ the file exists locally or the location it would exist when opened
+ using the `open` method.
+
+ Parameters
+ ----------
+ path : str
+ Can be a local file or a remote URL. This may, but does not
+ have to, include the `baseurl` with which the `Repository` was
+ initialized.
+
+ Returns
+ -------
+ out : str
+ Complete path, including the `DataSource` destination directory.
+
+ """
+ return DataSource.abspath(self, self._fullpath(path))
+
+ def exists(self, path):
+ """
+ Test if path exists prepending Repository base URL to path.
+
+ Test if `path` exists as (and in this order):
+
+ - a local file.
+ - a remote URL that has been downloaded and stored locally in the
+ `DataSource` directory.
+ - a remote URL that has not been downloaded, but is valid and
+ accessible.
+
+ Parameters
+ ----------
+ path : str
+ Can be a local file or a remote URL. This may, but does not
+ have to, include the `baseurl` with which the `Repository` was
+ initialized.
+
+ Returns
+ -------
+ out : bool
+ True if `path` exists.
+
+ Notes
+ -----
+ When `path` is an URL, `exists` will return True if it's either
+ stored locally in the `DataSource` directory, or is a valid remote
+ URL. `DataSource` does not discriminate between the two, the file
+ is accessible if it exists in either location.
+
+ """
+ return DataSource.exists(self, self._fullpath(path))
+
+ def open(self, path, mode='r', encoding=None, newline=None):
+ """
+ Open and return file-like object prepending Repository base URL.
+
+ If `path` is an URL, it will be downloaded, stored in the
+ DataSource directory and opened from there.
+
+ Parameters
+ ----------
+ path : str
+ Local file path or URL to open. This may, but does not have to,
+ include the `baseurl` with which the `Repository` was
+ initialized.
+ mode : {'r', 'w', 'a'}, optional
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
+ 'a' to append. Available modes depend on the type of object
+ specified by `path`. Default is 'r'.
+ encoding : {None, str}, optional
+ Open text file with given encoding. The default encoding will be
+ what `io.open` uses.
+ newline : {None, str}, optional
+ Newline to use when reading text file.
+
+ Returns
+ -------
+ out : file object
+ File object.
+
+ """
+ return DataSource.open(self, self._fullpath(path), mode,
+ encoding=encoding, newline=newline)
+
+ def listdir(self):
+ """
+ List files in the source Repository.
+
+ Returns
+ -------
+ files : list of str
+ List of file names (not containing a directory part).
+
+ Notes
+ -----
+ Does not currently work for remote repositories.
+
+ """
+ if self._isurl(self._baseurl):
+ raise NotImplementedError(
+ "Directory listing of URLs, not supported yet.")
+ else:
+ return os.listdir(self._baseurl)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/_iotools.py b/venv/lib/python3.9/site-packages/numpy/lib/_iotools.py
new file mode 100644
index 00000000..4a5ac128
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/_iotools.py
@@ -0,0 +1,897 @@
+"""A collection of functions designed to help I/O with ascii files.
+
+"""
+__docformat__ = "restructuredtext en"
+
+import numpy as np
+import numpy.core.numeric as nx
+from numpy.compat import asbytes, asunicode
+
+
+def _decode_line(line, encoding=None):
+ """Decode bytes from binary input streams.
+
+ Defaults to decoding from 'latin1'. That differs from the behavior of
+ np.compat.asunicode that decodes from 'ascii'.
+
+ Parameters
+ ----------
+ line : str or bytes
+ Line to be decoded.
+ encoding : str
+ Encoding used to decode `line`.
+
+ Returns
+ -------
+ decoded_line : str
+
+ """
+ if type(line) is bytes:
+ if encoding is None:
+ encoding = "latin1"
+ line = line.decode(encoding)
+
+ return line
+
+
+def _is_string_like(obj):
+ """
+ Check whether obj behaves like a string.
+ """
+ try:
+ obj + ''
+ except (TypeError, ValueError):
+ return False
+ return True
+
+
+def _is_bytes_like(obj):
+ """
+ Check whether obj behaves like a bytes object.
+ """
+ try:
+ obj + b''
+ except (TypeError, ValueError):
+ return False
+ return True
+
+
+def has_nested_fields(ndtype):
+ """
+ Returns whether one or several fields of a dtype are nested.
+
+ Parameters
+ ----------
+ ndtype : dtype
+ Data-type of a structured array.
+
+ Raises
+ ------
+ AttributeError
+ If `ndtype` does not have a `names` attribute.
+
+ Examples
+ --------
+ >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
+ >>> np.lib._iotools.has_nested_fields(dt)
+ False
+
+ """
+ for name in ndtype.names or ():
+ if ndtype[name].names is not None:
+ return True
+ return False
+
+
+def flatten_dtype(ndtype, flatten_base=False):
+ """
+ Unpack a structured data-type by collapsing nested fields and/or fields
+ with a shape.
+
+ Note that the field names are lost.
+
+ Parameters
+ ----------
+ ndtype : dtype
+ The datatype to collapse
+ flatten_base : bool, optional
+ If True, transform a field with a shape into several fields. Default is
+ False.
+
+ Examples
+ --------
+ >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
+ ... ('block', int, (2, 3))])
+ >>> np.lib._iotools.flatten_dtype(dt)
+ [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')]
+ >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
+ [dtype('S4'),
+ dtype('float64'),
+ dtype('float64'),
+ dtype('int64'),
+ dtype('int64'),
+ dtype('int64'),
+ dtype('int64'),
+ dtype('int64'),
+ dtype('int64')]
+
+ """
+ names = ndtype.names
+ if names is None:
+ if flatten_base:
+ return [ndtype.base] * int(np.prod(ndtype.shape))
+ return [ndtype.base]
+ else:
+ types = []
+ for field in names:
+ info = ndtype.fields[field]
+ flat_dt = flatten_dtype(info[0], flatten_base)
+ types.extend(flat_dt)
+ return types
+
+
+class LineSplitter:
+ """
+ Object to split a string at a given delimiter or at given places.
+
+ Parameters
+ ----------
+ delimiter : str, int, or sequence of ints, optional
+ If a string, character used to delimit consecutive fields.
+ If an integer or a sequence of integers, width(s) of each field.
+ comments : str, optional
+ Character used to mark the beginning of a comment. Default is '#'.
+ autostrip : bool, optional
+ Whether to strip each individual field. Default is True.
+
+ """
+
+ def autostrip(self, method):
+ """
+ Wrapper to strip each member of the output of `method`.
+
+ Parameters
+ ----------
+ method : function
+ Function that takes a single argument and returns a sequence of
+ strings.
+
+ Returns
+ -------
+ wrapped : function
+ The result of wrapping `method`. `wrapped` takes a single input
+ argument and returns a list of strings that are stripped of
+ white-space.
+
+ """
+ return lambda input: [_.strip() for _ in method(input)]
+
+ def __init__(self, delimiter=None, comments='#', autostrip=True,
+ encoding=None):
+ delimiter = _decode_line(delimiter)
+ comments = _decode_line(comments)
+
+ self.comments = comments
+
+ # Delimiter is a character
+ if (delimiter is None) or isinstance(delimiter, str):
+ delimiter = delimiter or None
+ _handyman = self._delimited_splitter
+ # Delimiter is a list of field widths
+ elif hasattr(delimiter, '__iter__'):
+ _handyman = self._variablewidth_splitter
+ idx = np.cumsum([0] + list(delimiter))
+ delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
+ # Delimiter is a single integer
+ elif int(delimiter):
+ (_handyman, delimiter) = (
+ self._fixedwidth_splitter, int(delimiter))
+ else:
+ (_handyman, delimiter) = (self._delimited_splitter, None)
+ self.delimiter = delimiter
+ if autostrip:
+ self._handyman = self.autostrip(_handyman)
+ else:
+ self._handyman = _handyman
+ self.encoding = encoding
+
+ def _delimited_splitter(self, line):
+ """Chop off comments, strip, and split at delimiter. """
+ if self.comments is not None:
+ line = line.split(self.comments)[0]
+ line = line.strip(" \r\n")
+ if not line:
+ return []
+ return line.split(self.delimiter)
+
+ def _fixedwidth_splitter(self, line):
+ if self.comments is not None:
+ line = line.split(self.comments)[0]
+ line = line.strip("\r\n")
+ if not line:
+ return []
+ fixed = self.delimiter
+ slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
+ return [line[s] for s in slices]
+
+ def _variablewidth_splitter(self, line):
+ if self.comments is not None:
+ line = line.split(self.comments)[0]
+ if not line:
+ return []
+ slices = self.delimiter
+ return [line[s] for s in slices]
+
+ def __call__(self, line):
+ return self._handyman(_decode_line(line, self.encoding))
+
+
+class NameValidator:
+ """
+ Object to validate a list of strings to use as field names.
+
+ The strings are stripped of any non alphanumeric character, and spaces
+ are replaced by '_'. During instantiation, the user can define a list
+ of names to exclude, as well as a list of invalid characters. Names in
+ the exclusion list are appended a '_' character.
+
+ Once an instance has been created, it can be called with a list of
+ names, and a list of valid names will be created. The `__call__`
+ method accepts an optional keyword "default" that sets the default name
+ in case of ambiguity. By default this is 'f', so that names will
+ default to `f0`, `f1`, etc.
+
+ Parameters
+ ----------
+ excludelist : sequence, optional
+ A list of names to exclude. This list is appended to the default
+ list ['return', 'file', 'print']. Excluded names are appended an
+ underscore: for example, `file` becomes `file_` if supplied.
+ deletechars : str, optional
+ A string combining invalid characters that must be deleted from the
+ names.
+ case_sensitive : {True, False, 'upper', 'lower'}, optional
+ * If True, field names are case-sensitive.
+ * If False or 'upper', field names are converted to upper case.
+ * If 'lower', field names are converted to lower case.
+
+ The default value is True.
+ replace_space : '_', optional
+ Character(s) used in replacement of white spaces.
+
+ Notes
+ -----
+ Calling an instance of `NameValidator` is the same as calling its
+ method `validate`.
+
+ Examples
+ --------
+ >>> validator = np.lib._iotools.NameValidator()
+ >>> validator(['file', 'field2', 'with space', 'CaSe'])
+ ('file_', 'field2', 'with_space', 'CaSe')
+
+ >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
+ ... deletechars='q',
+ ... case_sensitive=False)
+ >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
+ ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')
+
+ """
+
+ defaultexcludelist = ['return', 'file', 'print']
+ defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
+
+ def __init__(self, excludelist=None, deletechars=None,
+ case_sensitive=None, replace_space='_'):
+ # Process the exclusion list ..
+ if excludelist is None:
+ excludelist = []
+ excludelist.extend(self.defaultexcludelist)
+ self.excludelist = excludelist
+ # Process the list of characters to delete
+ if deletechars is None:
+ delete = self.defaultdeletechars
+ else:
+ delete = set(deletechars)
+ delete.add('"')
+ self.deletechars = delete
+ # Process the case option .....
+ if (case_sensitive is None) or (case_sensitive is True):
+ self.case_converter = lambda x: x
+ elif (case_sensitive is False) or case_sensitive.startswith('u'):
+ self.case_converter = lambda x: x.upper()
+ elif case_sensitive.startswith('l'):
+ self.case_converter = lambda x: x.lower()
+ else:
+ msg = 'unrecognized case_sensitive value %s.' % case_sensitive
+ raise ValueError(msg)
+
+ self.replace_space = replace_space
+
+ def validate(self, names, defaultfmt="f%i", nbfields=None):
+ """
+ Validate a list of strings as field names for a structured array.
+
+ Parameters
+ ----------
+ names : sequence of str
+ Strings to be validated.
+ defaultfmt : str, optional
+ Default format string, used if validating a given string
+ reduces its length to zero.
+ nbfields : integer, optional
+ Final number of validated names, used to expand or shrink the
+ initial list of names.
+
+ Returns
+ -------
+ validatednames : list of str
+ The list of validated field names.
+
+ Notes
+ -----
+ A `NameValidator` instance can be called directly, which is the
+ same as calling `validate`. For examples, see `NameValidator`.
+
+ """
+ # Initial checks ..............
+ if (names is None):
+ if (nbfields is None):
+ return None
+ names = []
+ if isinstance(names, str):
+ names = [names, ]
+ if nbfields is not None:
+ nbnames = len(names)
+ if (nbnames < nbfields):
+ names = list(names) + [''] * (nbfields - nbnames)
+ elif (nbnames > nbfields):
+ names = names[:nbfields]
+ # Set some shortcuts ...........
+ deletechars = self.deletechars
+ excludelist = self.excludelist
+ case_converter = self.case_converter
+ replace_space = self.replace_space
+ # Initializes some variables ...
+ validatednames = []
+ seen = dict()
+ nbempty = 0
+
+ for item in names:
+ item = case_converter(item).strip()
+ if replace_space:
+ item = item.replace(' ', replace_space)
+ item = ''.join([c for c in item if c not in deletechars])
+ if item == '':
+ item = defaultfmt % nbempty
+ while item in names:
+ nbempty += 1
+ item = defaultfmt % nbempty
+ nbempty += 1
+ elif item in excludelist:
+ item += '_'
+ cnt = seen.get(item, 0)
+ if cnt > 0:
+ validatednames.append(item + '_%d' % cnt)
+ else:
+ validatednames.append(item)
+ seen[item] = cnt + 1
+ return tuple(validatednames)
+
+ def __call__(self, names, defaultfmt="f%i", nbfields=None):
+ return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
+
+
+def str2bool(value):
+ """
+ Tries to transform a string supposed to represent a boolean to a boolean.
+
+ Parameters
+ ----------
+ value : str
+ The string that is transformed to a boolean.
+
+ Returns
+ -------
+ boolval : bool
+ The boolean representation of `value`.
+
+ Raises
+ ------
+ ValueError
+ If the string is not 'True' or 'False' (case independent)
+
+ Examples
+ --------
+ >>> np.lib._iotools.str2bool('TRUE')
+ True
+ >>> np.lib._iotools.str2bool('false')
+ False
+
+ """
+ value = value.upper()
+ if value == 'TRUE':
+ return True
+ elif value == 'FALSE':
+ return False
+ else:
+ raise ValueError("Invalid boolean")
+
+
+class ConverterError(Exception):
+ """
+ Exception raised when an error occurs in a converter for string values.
+
+ """
+ pass
+
+
+class ConverterLockError(ConverterError):
+ """
+ Exception raised when an attempt is made to upgrade a locked converter.
+
+ """
+ pass
+
+
+class ConversionWarning(UserWarning):
+ """
+ Warning issued when a string converter has a problem.
+
+ Notes
+ -----
+ In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
+ is explicitly suppressed with the "invalid_raise" keyword.
+
+ """
+ pass
+
+
+class StringConverter:
+ """
+ Factory class for function transforming a string into another object
+ (int, float).
+
+ After initialization, an instance can be called to transform a string
+ into another object. If the string is recognized as representing a
+ missing value, a default value is returned.
+
+ Attributes
+ ----------
+ func : function
+ Function used for the conversion.
+ default : any
+ Default value to return when the input corresponds to a missing
+ value.
+ type : type
+ Type of the output.
+ _status : int
+ Integer representing the order of the conversion.
+ _mapper : sequence of tuples
+ Sequence of tuples (dtype, function, default value) to evaluate in
+ order.
+ _locked : bool
+ Holds `locked` parameter.
+
+ Parameters
+ ----------
+ dtype_or_func : {None, dtype, function}, optional
+ If a `dtype`, specifies the input data type, used to define a basic
+ function and a default value for missing data. For example, when
+ `dtype` is float, the `func` attribute is set to `float` and the
+ default value to `np.nan`. If a function, this function is used to
+ convert a string to another object. In this case, it is recommended
+ to give an associated default value as input.
+ default : any, optional
+ Value to return by default, that is, when the string to be
+ converted is flagged as missing. If not given, `StringConverter`
+ tries to supply a reasonable default value.
+ missing_values : {None, sequence of str}, optional
+ ``None`` or sequence of strings indicating a missing value. If ``None``
+ then missing values are indicated by empty entries. The default is
+ ``None``.
+ locked : bool, optional
+ Whether the StringConverter should be locked to prevent automatic
+ upgrade or not. Default is False.
+
+ """
+ _mapper = [(nx.bool_, str2bool, False),
+ (nx.int_, int, -1),]
+
+ # On 32-bit systems, we need to make sure that we explicitly include
+ # nx.int64 since ns.int_ is nx.int32.
+ if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:
+ _mapper.append((nx.int64, int, -1))
+
+ _mapper.extend([(nx.float64, float, nx.nan),
+ (nx.complex128, complex, nx.nan + 0j),
+ (nx.longdouble, nx.longdouble, nx.nan),
+ # If a non-default dtype is passed, fall back to generic
+ # ones (should only be used for the converter)
+ (nx.integer, int, -1),
+ (nx.floating, float, nx.nan),
+ (nx.complexfloating, complex, nx.nan + 0j),
+ # Last, try with the string types (must be last, because
+ # `_mapper[-1]` is used as default in some cases)
+ (nx.unicode_, asunicode, '???'),
+ (nx.string_, asbytes, '???'),
+ ])
+
+ @classmethod
+ def _getdtype(cls, val):
+ """Returns the dtype of the input variable."""
+ return np.array(val).dtype
+
+ @classmethod
+ def _getsubdtype(cls, val):
+ """Returns the type of the dtype of the input variable."""
+ return np.array(val).dtype.type
+
+ @classmethod
+ def _dtypeortype(cls, dtype):
+ """Returns dtype for datetime64 and type of dtype otherwise."""
+
+ # This is a bit annoying. We want to return the "general" type in most
+ # cases (ie. "string" rather than "S10"), but we want to return the
+ # specific type for datetime64 (ie. "datetime64[us]" rather than
+ # "datetime64").
+ if dtype.type == np.datetime64:
+ return dtype
+ return dtype.type
+
+ @classmethod
+ def upgrade_mapper(cls, func, default=None):
+ """
+ Upgrade the mapper of a StringConverter by adding a new function and
+ its corresponding default.
+
+ The input function (or sequence of functions) and its associated
+ default value (if any) is inserted in penultimate position of the
+ mapper. The corresponding type is estimated from the dtype of the
+ default value.
+
+ Parameters
+ ----------
+ func : var
+ Function, or sequence of functions
+
+ Examples
+ --------
+ >>> import dateutil.parser
+ >>> import datetime
+ >>> dateparser = dateutil.parser.parse
+ >>> defaultdate = datetime.date(2000, 1, 1)
+ >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
+ """
+ # Func is a single functions
+ if hasattr(func, '__call__'):
+ cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
+ return
+ elif hasattr(func, '__iter__'):
+ if isinstance(func[0], (tuple, list)):
+ for _ in func:
+ cls._mapper.insert(-1, _)
+ return
+ if default is None:
+ default = [None] * len(func)
+ else:
+ default = list(default)
+ default.append([None] * (len(func) - len(default)))
+ for fct, dft in zip(func, default):
+ cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
+
+ @classmethod
+ def _find_map_entry(cls, dtype):
+ # if a converter for the specific dtype is available use that
+ for i, (deftype, func, default_def) in enumerate(cls._mapper):
+ if dtype.type == deftype:
+ return i, (deftype, func, default_def)
+
+ # otherwise find an inexact match
+ for i, (deftype, func, default_def) in enumerate(cls._mapper):
+ if np.issubdtype(dtype.type, deftype):
+ return i, (deftype, func, default_def)
+
+ raise LookupError
+
+ def __init__(self, dtype_or_func=None, default=None, missing_values=None,
+ locked=False):
+ # Defines a lock for upgrade
+ self._locked = bool(locked)
+ # No input dtype: minimal initialization
+ if dtype_or_func is None:
+ self.func = str2bool
+ self._status = 0
+ self.default = default or False
+ dtype = np.dtype('bool')
+ else:
+ # Is the input a np.dtype ?
+ try:
+ self.func = None
+ dtype = np.dtype(dtype_or_func)
+ except TypeError:
+ # dtype_or_func must be a function, then
+ if not hasattr(dtype_or_func, '__call__'):
+ errmsg = ("The input argument `dtype` is neither a"
+ " function nor a dtype (got '%s' instead)")
+ raise TypeError(errmsg % type(dtype_or_func))
+ # Set the function
+ self.func = dtype_or_func
+ # If we don't have a default, try to guess it or set it to
+ # None
+ if default is None:
+ try:
+ default = self.func('0')
+ except ValueError:
+ default = None
+ dtype = self._getdtype(default)
+
+ # find the best match in our mapper
+ try:
+ self._status, (_, func, default_def) = self._find_map_entry(dtype)
+ except LookupError:
+ # no match
+ self.default = default
+ _, func, _ = self._mapper[-1]
+ self._status = 0
+ else:
+ # use the found default only if we did not already have one
+ if default is None:
+ self.default = default_def
+ else:
+ self.default = default
+
+ # If the input was a dtype, set the function to the last we saw
+ if self.func is None:
+ self.func = func
+
+ # If the status is 1 (int), change the function to
+ # something more robust.
+ if self.func == self._mapper[1][1]:
+ if issubclass(dtype.type, np.uint64):
+ self.func = np.uint64
+ elif issubclass(dtype.type, np.int64):
+ self.func = np.int64
+ else:
+ self.func = lambda x: int(float(x))
+ # Store the list of strings corresponding to missing values.
+ if missing_values is None:
+ self.missing_values = {''}
+ else:
+ if isinstance(missing_values, str):
+ missing_values = missing_values.split(",")
+ self.missing_values = set(list(missing_values) + [''])
+
+ self._callingfunction = self._strict_call
+ self.type = self._dtypeortype(dtype)
+ self._checked = False
+ self._initial_default = default
+
+ def _loose_call(self, value):
+ try:
+ return self.func(value)
+ except ValueError:
+ return self.default
+
+ def _strict_call(self, value):
+ try:
+
+ # We check if we can convert the value using the current function
+ new_value = self.func(value)
+
+ # In addition to having to check whether func can convert the
+ # value, we also have to make sure that we don't get overflow
+ # errors for integers.
+ if self.func is int:
+ try:
+ np.array(value, dtype=self.type)
+ except OverflowError:
+ raise ValueError
+
+ # We're still here so we can now return the new value
+ return new_value
+
+ except ValueError:
+ if value.strip() in self.missing_values:
+ if not self._status:
+ self._checked = False
+ return self.default
+ raise ValueError("Cannot convert string '%s'" % value)
+
+ def __call__(self, value):
+ return self._callingfunction(value)
+
+ def _do_upgrade(self):
+ # Raise an exception if we locked the converter...
+ if self._locked:
+ errmsg = "Converter is locked and cannot be upgraded"
+ raise ConverterLockError(errmsg)
+ _statusmax = len(self._mapper)
+ # Complains if we try to upgrade by the maximum
+ _status = self._status
+ if _status == _statusmax:
+ errmsg = "Could not find a valid conversion function"
+ raise ConverterError(errmsg)
+ elif _status < _statusmax - 1:
+ _status += 1
+ self.type, self.func, default = self._mapper[_status]
+ self._status = _status
+ if self._initial_default is not None:
+ self.default = self._initial_default
+ else:
+ self.default = default
+
+ def upgrade(self, value):
+ """
+ Find the best converter for a given string, and return the result.
+
+ The supplied string `value` is converted by testing different
+ converters in order. First the `func` method of the
+ `StringConverter` instance is tried, if this fails other available
+ converters are tried. The order in which these other converters
+ are tried is determined by the `_status` attribute of the instance.
+
+ Parameters
+ ----------
+ value : str
+ The string to convert.
+
+ Returns
+ -------
+ out : any
+ The result of converting `value` with the appropriate converter.
+
+ """
+ self._checked = True
+ try:
+ return self._strict_call(value)
+ except ValueError:
+ self._do_upgrade()
+ return self.upgrade(value)
+
+ def iterupgrade(self, value):
+ self._checked = True
+ if not hasattr(value, '__iter__'):
+ value = (value,)
+ _strict_call = self._strict_call
+ try:
+ for _m in value:
+ _strict_call(_m)
+ except ValueError:
+ self._do_upgrade()
+ self.iterupgrade(value)
+
+ def update(self, func, default=None, testing_value=None,
+ missing_values='', locked=False):
+ """
+ Set StringConverter attributes directly.
+
+ Parameters
+ ----------
+ func : function
+ Conversion function.
+ default : any, optional
+ Value to return by default, that is, when the string to be
+ converted is flagged as missing. If not given,
+ `StringConverter` tries to supply a reasonable default value.
+ testing_value : str, optional
+ A string representing a standard input value of the converter.
+ This string is used to help defining a reasonable default
+ value.
+ missing_values : {sequence of str, None}, optional
+ Sequence of strings indicating a missing value. If ``None``, then
+ the existing `missing_values` are cleared. The default is `''`.
+ locked : bool, optional
+ Whether the StringConverter should be locked to prevent
+ automatic upgrade or not. Default is False.
+
+ Notes
+ -----
+ `update` takes the same parameters as the constructor of
+ `StringConverter`, except that `func` does not accept a `dtype`
+ whereas `dtype_or_func` in the constructor does.
+
+ """
+ self.func = func
+ self._locked = locked
+
+ # Don't reset the default to None if we can avoid it
+ if default is not None:
+ self.default = default
+ self.type = self._dtypeortype(self._getdtype(default))
+ else:
+ try:
+ tester = func(testing_value or '1')
+ except (TypeError, ValueError):
+ tester = None
+ self.type = self._dtypeortype(self._getdtype(tester))
+
+ # Add the missing values to the existing set or clear it.
+ if missing_values is None:
+ # Clear all missing values even though the ctor initializes it to
+ # set(['']) when the argument is None.
+ self.missing_values = set()
+ else:
+ if not np.iterable(missing_values):
+ missing_values = [missing_values]
+ if not all(isinstance(v, str) for v in missing_values):
+ raise TypeError("missing_values must be strings or unicode")
+ self.missing_values.update(missing_values)
+
+
+def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
+ """
+ Convenience function to create a `np.dtype` object.
+
+ The function processes the input `dtype` and matches it with the given
+ names.
+
+ Parameters
+ ----------
+ ndtype : var
+ Definition of the dtype. Can be any string or dictionary recognized
+ by the `np.dtype` function, or a sequence of types.
+ names : str or sequence, optional
+ Sequence of strings to use as field names for a structured dtype.
+ For convenience, `names` can be a string of a comma-separated list
+ of names.
+ defaultfmt : str, optional
+ Format string used to define missing names, such as ``"f%i"``
+ (default) or ``"fields_%02i"``.
+ validationargs : optional
+ A series of optional arguments used to initialize a
+ `NameValidator`.
+
+ Examples
+ --------
+ >>> np.lib._iotools.easy_dtype(float)
+ dtype('float64')
+ >>> np.lib._iotools.easy_dtype("i4, f8")
+ dtype([('f0', '<i4'), ('f1', '<f8')])
+ >>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
+ dtype([('field_000', '<i4'), ('field_001', '<f8')])
+
+ >>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
+ dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
+ >>> np.lib._iotools.easy_dtype(float, names="a,b,c")
+ dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
+
+ """
+ try:
+ ndtype = np.dtype(ndtype)
+ except TypeError:
+ validate = NameValidator(**validationargs)
+ nbfields = len(ndtype)
+ if names is None:
+ names = [''] * len(ndtype)
+ elif isinstance(names, str):
+ names = names.split(",")
+ names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
+ ndtype = np.dtype(dict(formats=ndtype, names=names))
+ else:
+ # Explicit names
+ if names is not None:
+ validate = NameValidator(**validationargs)
+ if isinstance(names, str):
+ names = names.split(",")
+ # Simple dtype: repeat to match the nb of names
+ if ndtype.names is None:
+ formats = tuple([ndtype.type] * len(names))
+ names = validate(names, defaultfmt=defaultfmt)
+ ndtype = np.dtype(list(zip(names, formats)))
+ # Structured dtype: just validate the names as needed
+ else:
+ ndtype.names = validate(names, nbfields=len(ndtype.names),
+ defaultfmt=defaultfmt)
+ # No implicit names
+ elif ndtype.names is not None:
+ validate = NameValidator(**validationargs)
+ # Default initial names : should we change the format ?
+ numbered_names = tuple("f%i" % i for i in range(len(ndtype.names)))
+ if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):
+ ndtype.names = validate([''] * len(ndtype.names),
+ defaultfmt=defaultfmt)
+ # Explicit initial names : just validate
+ else:
+ ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
+ return ndtype
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/_version.py b/venv/lib/python3.9/site-packages/numpy/lib/_version.py
new file mode 100644
index 00000000..bfac5f81
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/_version.py
@@ -0,0 +1,155 @@
+"""Utility to compare (NumPy) version strings.
+
+The NumpyVersion class allows properly comparing numpy version strings.
+The LooseVersion and StrictVersion classes that distutils provides don't
+work; they don't recognize anything like alpha/beta/rc/dev versions.
+
+"""
+import re
+
+
+__all__ = ['NumpyVersion']
+
+
+class NumpyVersion():
+ """Parse and compare numpy version strings.
+
+ NumPy has the following versioning scheme (numbers given are examples; they
+ can be > 9 in principle):
+
+ - Released version: '1.8.0', '1.8.1', etc.
+ - Alpha: '1.8.0a1', '1.8.0a2', etc.
+ - Beta: '1.8.0b1', '1.8.0b2', etc.
+ - Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
+ - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
+ - Development versions after a1: '1.8.0a1.dev-f1234afa',
+ '1.8.0b2.dev-f1234afa',
+ '1.8.1rc1.dev-f1234afa', etc.
+ - Development versions (no git hash available): '1.8.0.dev-Unknown'
+
+ Comparing needs to be done against a valid version string or other
+ `NumpyVersion` instance. Note that all development versions of the same
+ (pre-)release compare equal.
+
+ .. versionadded:: 1.9.0
+
+ Parameters
+ ----------
+ vstring : str
+ NumPy version string (``np.__version__``).
+
+ Examples
+ --------
+ >>> from numpy.lib import NumpyVersion
+ >>> if NumpyVersion(np.__version__) < '1.7.0':
+ ... print('skip')
+ >>> # skip
+
+ >>> NumpyVersion('1.7') # raises ValueError, add ".0"
+ Traceback (most recent call last):
+ ...
+ ValueError: Not a valid numpy version string
+
+ """
+
+ def __init__(self, vstring):
+ self.vstring = vstring
+ ver_main = re.match(r'\d+\.\d+\.\d+', vstring)
+ if not ver_main:
+ raise ValueError("Not a valid numpy version string")
+
+ self.version = ver_main.group()
+ self.major, self.minor, self.bugfix = [int(x) for x in
+ self.version.split('.')]
+ if len(vstring) == ver_main.end():
+ self.pre_release = 'final'
+ else:
+ alpha = re.match(r'a\d', vstring[ver_main.end():])
+ beta = re.match(r'b\d', vstring[ver_main.end():])
+ rc = re.match(r'rc\d', vstring[ver_main.end():])
+ pre_rel = [m for m in [alpha, beta, rc] if m is not None]
+ if pre_rel:
+ self.pre_release = pre_rel[0].group()
+ else:
+ self.pre_release = ''
+
+ self.is_devversion = bool(re.search(r'.dev', vstring))
+
+ def _compare_version(self, other):
+ """Compare major.minor.bugfix"""
+ if self.major == other.major:
+ if self.minor == other.minor:
+ if self.bugfix == other.bugfix:
+ vercmp = 0
+ elif self.bugfix > other.bugfix:
+ vercmp = 1
+ else:
+ vercmp = -1
+ elif self.minor > other.minor:
+ vercmp = 1
+ else:
+ vercmp = -1
+ elif self.major > other.major:
+ vercmp = 1
+ else:
+ vercmp = -1
+
+ return vercmp
+
+ def _compare_pre_release(self, other):
+ """Compare alpha/beta/rc/final."""
+ if self.pre_release == other.pre_release:
+ vercmp = 0
+ elif self.pre_release == 'final':
+ vercmp = 1
+ elif other.pre_release == 'final':
+ vercmp = -1
+ elif self.pre_release > other.pre_release:
+ vercmp = 1
+ else:
+ vercmp = -1
+
+ return vercmp
+
+ def _compare(self, other):
+ if not isinstance(other, (str, NumpyVersion)):
+ raise ValueError("Invalid object to compare with NumpyVersion.")
+
+ if isinstance(other, str):
+ other = NumpyVersion(other)
+
+ vercmp = self._compare_version(other)
+ if vercmp == 0:
+ # Same x.y.z version, check for alpha/beta/rc
+ vercmp = self._compare_pre_release(other)
+ if vercmp == 0:
+ # Same version and same pre-release, check if dev version
+ if self.is_devversion is other.is_devversion:
+ vercmp = 0
+ elif self.is_devversion:
+ vercmp = -1
+ else:
+ vercmp = 1
+
+ return vercmp
+
+ def __lt__(self, other):
+ return self._compare(other) < 0
+
+ def __le__(self, other):
+ return self._compare(other) <= 0
+
+ def __eq__(self, other):
+ return self._compare(other) == 0
+
+ def __ne__(self, other):
+ return self._compare(other) != 0
+
+ def __gt__(self, other):
+ return self._compare(other) > 0
+
+ def __ge__(self, other):
+ return self._compare(other) >= 0
+
+ def __repr__(self):
+ return "NumpyVersion(%s)" % self.vstring
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/_version.pyi b/venv/lib/python3.9/site-packages/numpy/lib/_version.pyi
new file mode 100644
index 00000000..1c82c99b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/_version.pyi
@@ -0,0 +1,17 @@
+__all__: list[str]
+
+class NumpyVersion:
+ vstring: str
+ version: str
+ major: int
+ minor: int
+ bugfix: int
+ pre_release: str
+ is_devversion: bool
+ def __init__(self, vstring: str) -> None: ...
+ def __lt__(self, other: str | NumpyVersion) -> bool: ...
+ def __le__(self, other: str | NumpyVersion) -> bool: ...
+ def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
+ def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
+ def __gt__(self, other: str | NumpyVersion) -> bool: ...
+ def __ge__(self, other: str | NumpyVersion) -> bool: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/arraypad.py b/venv/lib/python3.9/site-packages/numpy/lib/arraypad.py
new file mode 100644
index 00000000..28123246
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/arraypad.py
@@ -0,0 +1,877 @@
+"""
+The arraypad module contains a group of functions to pad values onto the edges
+of an n-dimensional array.
+
+"""
+import numpy as np
+from numpy.core.overrides import array_function_dispatch
+from numpy.lib.index_tricks import ndindex
+
+
+__all__ = ['pad']
+
+
+###############################################################################
+# Private utility functions.
+
+
+def _round_if_needed(arr, dtype):
+ """
+ Rounds arr inplace if destination dtype is integer.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array.
+ dtype : dtype
+ The dtype of the destination array.
+ """
+ if np.issubdtype(dtype, np.integer):
+ arr.round(out=arr)
+
+
+def _slice_at_axis(sl, axis):
+ """
+ Construct tuple of slices to slice an array in the given dimension.
+
+ Parameters
+ ----------
+ sl : slice
+ The slice for the given dimension.
+ axis : int
+ The axis to which `sl` is applied. All other dimensions are left
+ "unsliced".
+
+ Returns
+ -------
+ sl : tuple of slices
+ A tuple with slices matching `shape` in length.
+
+ Examples
+ --------
+ >>> _slice_at_axis(slice(None, 3, -1), 1)
+ (slice(None, None, None), slice(None, 3, -1), (...,))
+ """
+ return (slice(None),) * axis + (sl,) + (...,)
+
+
+def _view_roi(array, original_area_slice, axis):
+ """
+ Get a view of the current region of interest during iterative padding.
+
+ When padding multiple dimensions iteratively corner values are
+ unnecessarily overwritten multiple times. This function reduces the
+ working area for the first dimensions so that corners are excluded.
+
+ Parameters
+ ----------
+ array : ndarray
+ The array with the region of interest.
+ original_area_slice : tuple of slices
+ Denotes the area with original values of the unpadded array.
+ axis : int
+ The currently padded dimension assuming that `axis` is padded before
+ `axis` + 1.
+
+ Returns
+ -------
+ roi : ndarray
+ The region of interest of the original `array`.
+ """
+ axis += 1
+ sl = (slice(None),) * axis + original_area_slice[axis:]
+ return array[sl]
+
+
+def _pad_simple(array, pad_width, fill_value=None):
+ """
+ Pad array on all sides with either a single value or undefined values.
+
+ Parameters
+ ----------
+ array : ndarray
+ Array to grow.
+ pad_width : sequence of tuple[int, int]
+ Pad width on both sides for each dimension in `arr`.
+ fill_value : scalar, optional
+ If provided the padded area is filled with this value, otherwise
+ the pad area left undefined.
+
+ Returns
+ -------
+ padded : ndarray
+ The padded array with the same dtype as`array`. Its order will default
+ to C-style if `array` is not F-contiguous.
+ original_area_slice : tuple
+ A tuple of slices pointing to the area of the original array.
+ """
+ # Allocate grown array
+ new_shape = tuple(
+ left + size + right
+ for size, (left, right) in zip(array.shape, pad_width)
+ )
+ order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order
+ padded = np.empty(new_shape, dtype=array.dtype, order=order)
+
+ if fill_value is not None:
+ padded.fill(fill_value)
+
+ # Copy old array into correct space
+ original_area_slice = tuple(
+ slice(left, left + size)
+ for size, (left, right) in zip(array.shape, pad_width)
+ )
+ padded[original_area_slice] = array
+
+ return padded, original_area_slice
+
+
+def _set_pad_area(padded, axis, width_pair, value_pair):
+ """
+ Set empty-padded area in given dimension.
+
+ Parameters
+ ----------
+ padded : ndarray
+ Array with the pad area which is modified inplace.
+ axis : int
+ Dimension with the pad area to set.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+ value_pair : tuple of scalars or ndarrays
+ Values inserted into the pad area on each side. It must match or be
+ broadcastable to the shape of `arr`.
+ """
+ left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
+ padded[left_slice] = value_pair[0]
+
+ right_slice = _slice_at_axis(
+ slice(padded.shape[axis] - width_pair[1], None), axis)
+ padded[right_slice] = value_pair[1]
+
+
+def _get_edges(padded, axis, width_pair):
+ """
+ Retrieve edge values from empty-padded array in given dimension.
+
+ Parameters
+ ----------
+ padded : ndarray
+ Empty-padded array.
+ axis : int
+ Dimension in which the edges are considered.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+
+ Returns
+ -------
+ left_edge, right_edge : ndarray
+ Edge values of the valid area in `padded` in the given dimension. Its
+ shape will always match `padded` except for the dimension given by
+ `axis` which will have a length of 1.
+ """
+ left_index = width_pair[0]
+ left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
+ left_edge = padded[left_slice]
+
+ right_index = padded.shape[axis] - width_pair[1]
+ right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
+ right_edge = padded[right_slice]
+
+ return left_edge, right_edge
+
+
+def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
+ """
+ Construct linear ramps for empty-padded array in given dimension.
+
+ Parameters
+ ----------
+ padded : ndarray
+ Empty-padded array.
+ axis : int
+ Dimension in which the ramps are constructed.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+ end_value_pair : (scalar, scalar)
+ End values for the linear ramps which form the edge of the fully padded
+ array. These values are included in the linear ramps.
+
+ Returns
+ -------
+ left_ramp, right_ramp : ndarray
+ Linear ramps to set on both sides of `padded`.
+ """
+ edge_pair = _get_edges(padded, axis, width_pair)
+
+ left_ramp, right_ramp = (
+ np.linspace(
+ start=end_value,
+ stop=edge.squeeze(axis), # Dimension is replaced by linspace
+ num=width,
+ endpoint=False,
+ dtype=padded.dtype,
+ axis=axis
+ )
+ for end_value, edge, width in zip(
+ end_value_pair, edge_pair, width_pair
+ )
+ )
+
+ # Reverse linear space in appropriate dimension
+ right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
+
+ return left_ramp, right_ramp
+
+
+def _get_stats(padded, axis, width_pair, length_pair, stat_func):
+ """
+ Calculate statistic for the empty-padded array in given dimension.
+
+ Parameters
+ ----------
+ padded : ndarray
+ Empty-padded array.
+ axis : int
+ Dimension in which the statistic is calculated.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+ length_pair : 2-element sequence of None or int
+ Gives the number of values in valid area from each side that is
+ taken into account when calculating the statistic. If None the entire
+ valid area in `padded` is considered.
+ stat_func : function
+ Function to compute statistic. The expected signature is
+ ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.
+
+ Returns
+ -------
+ left_stat, right_stat : ndarray
+ Calculated statistic for both sides of `padded`.
+ """
+ # Calculate indices of the edges of the area with original values
+ left_index = width_pair[0]
+ right_index = padded.shape[axis] - width_pair[1]
+ # as well as its length
+ max_length = right_index - left_index
+
+ # Limit stat_lengths to max_length
+ left_length, right_length = length_pair
+ if left_length is None or max_length < left_length:
+ left_length = max_length
+ if right_length is None or max_length < right_length:
+ right_length = max_length
+
+ if (left_length == 0 or right_length == 0) \
+ and stat_func in {np.amax, np.amin}:
+ # amax and amin can't operate on an empty array,
+ # raise a more descriptive warning here instead of the default one
+ raise ValueError("stat_length of 0 yields no value for padding")
+
+ # Calculate statistic for the left side
+ left_slice = _slice_at_axis(
+ slice(left_index, left_index + left_length), axis)
+ left_chunk = padded[left_slice]
+ left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
+ _round_if_needed(left_stat, padded.dtype)
+
+ if left_length == right_length == max_length:
+ # return early as right_stat must be identical to left_stat
+ return left_stat, left_stat
+
+ # Calculate statistic for the right side
+ right_slice = _slice_at_axis(
+ slice(right_index - right_length, right_index), axis)
+ right_chunk = padded[right_slice]
+ right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
+ _round_if_needed(right_stat, padded.dtype)
+
+ return left_stat, right_stat
+
+
+def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):
+ """
+ Pad `axis` of `arr` with reflection.
+
+ Parameters
+ ----------
+ padded : ndarray
+ Input array of arbitrary shape.
+ axis : int
+ Axis along which to pad `arr`.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+ method : str
+ Controls method of reflection; options are 'even' or 'odd'.
+ include_edge : bool
+ If true, edge value is included in reflection, otherwise the edge
+ value forms the symmetric axis to the reflection.
+
+ Returns
+ -------
+ pad_amt : tuple of ints, length 2
+ New index positions of padding to do along the `axis`. If these are
+ both 0, padding is done in this dimension.
+ """
+ left_pad, right_pad = width_pair
+ old_length = padded.shape[axis] - right_pad - left_pad
+
+ if include_edge:
+ # Edge is included, we need to offset the pad amount by 1
+ edge_offset = 1
+ else:
+ edge_offset = 0 # Edge is not included, no need to offset pad amount
+ old_length -= 1 # but must be omitted from the chunk
+
+ if left_pad > 0:
+ # Pad with reflected values on left side:
+ # First limit chunk size which can't be larger than pad area
+ chunk_length = min(old_length, left_pad)
+ # Slice right to left, stop on or next to edge, start relative to stop
+ stop = left_pad - edge_offset
+ start = stop + chunk_length
+ left_slice = _slice_at_axis(slice(start, stop, -1), axis)
+ left_chunk = padded[left_slice]
+
+ if method == "odd":
+ # Negate chunk and align with edge
+ edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
+ left_chunk = 2 * padded[edge_slice] - left_chunk
+
+ # Insert chunk into padded area
+ start = left_pad - chunk_length
+ stop = left_pad
+ pad_area = _slice_at_axis(slice(start, stop), axis)
+ padded[pad_area] = left_chunk
+ # Adjust pointer to left edge for next iteration
+ left_pad -= chunk_length
+
+ if right_pad > 0:
+ # Pad with reflected values on right side:
+ # First limit chunk size which can't be larger than pad area
+ chunk_length = min(old_length, right_pad)
+ # Slice right to left, start on or next to edge, stop relative to start
+ start = -right_pad + edge_offset - 2
+ stop = start - chunk_length
+ right_slice = _slice_at_axis(slice(start, stop, -1), axis)
+ right_chunk = padded[right_slice]
+
+ if method == "odd":
+ # Negate chunk and align with edge
+ edge_slice = _slice_at_axis(
+ slice(-right_pad - 1, -right_pad), axis)
+ right_chunk = 2 * padded[edge_slice] - right_chunk
+
+ # Insert chunk into padded area
+ start = padded.shape[axis] - right_pad
+ stop = start + chunk_length
+ pad_area = _slice_at_axis(slice(start, stop), axis)
+ padded[pad_area] = right_chunk
+ # Adjust pointer to right edge for next iteration
+ right_pad -= chunk_length
+
+ return left_pad, right_pad
+
+
+def _set_wrap_both(padded, axis, width_pair):
+ """
+ Pad `axis` of `arr` with wrapped values.
+
+ Parameters
+ ----------
+ padded : ndarray
+ Input array of arbitrary shape.
+ axis : int
+ Axis along which to pad `arr`.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+
+ Returns
+ -------
+ pad_amt : tuple of ints, length 2
+ New index positions of padding to do along the `axis`. If these are
+ both 0, padding is done in this dimension.
+ """
+ left_pad, right_pad = width_pair
+ period = padded.shape[axis] - right_pad - left_pad
+
+ # If the current dimension of `arr` doesn't contain enough valid values
+ # (not part of the undefined pad area) we need to pad multiple times.
+ # Each time the pad area shrinks on both sides which is communicated with
+ # these variables.
+ new_left_pad = 0
+ new_right_pad = 0
+
+ if left_pad > 0:
+ # Pad with wrapped values on left side
+ # First slice chunk from right side of the non-pad area.
+ # Use min(period, left_pad) to ensure that chunk is not larger than
+ # pad area
+ right_slice = _slice_at_axis(
+ slice(-right_pad - min(period, left_pad),
+ -right_pad if right_pad != 0 else None),
+ axis
+ )
+ right_chunk = padded[right_slice]
+
+ if left_pad > period:
+ # Chunk is smaller than pad area
+ pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
+ new_left_pad = left_pad - period
+ else:
+ # Chunk matches pad area
+ pad_area = _slice_at_axis(slice(None, left_pad), axis)
+ padded[pad_area] = right_chunk
+
+ if right_pad > 0:
+ # Pad with wrapped values on right side
+ # First slice chunk from left side of the non-pad area.
+ # Use min(period, right_pad) to ensure that chunk is not larger than
+ # pad area
+ left_slice = _slice_at_axis(
+ slice(left_pad, left_pad + min(period, right_pad),), axis)
+ left_chunk = padded[left_slice]
+
+ if right_pad > period:
+ # Chunk is smaller than pad area
+ pad_area = _slice_at_axis(
+ slice(-right_pad, -right_pad + period), axis)
+ new_right_pad = right_pad - period
+ else:
+ # Chunk matches pad area
+ pad_area = _slice_at_axis(slice(-right_pad, None), axis)
+ padded[pad_area] = left_chunk
+
+ return new_left_pad, new_right_pad
+
+
+def _as_pairs(x, ndim, as_index=False):
+ """
+ Broadcast `x` to an array with the shape (`ndim`, 2).
+
+ A helper function for `pad` that prepares and validates arguments like
+ `pad_width` for iteration in pairs.
+
+ Parameters
+ ----------
+ x : {None, scalar, array-like}
+ The object to broadcast to the shape (`ndim`, 2).
+ ndim : int
+ Number of pairs the broadcasted `x` will have.
+ as_index : bool, optional
+ If `x` is not None, try to round each element of `x` to an integer
+ (dtype `np.intp`) and ensure every element is positive.
+
+ Returns
+ -------
+ pairs : nested iterables, shape (`ndim`, 2)
+ The broadcasted version of `x`.
+
+ Raises
+ ------
+ ValueError
+ If `as_index` is True and `x` contains negative elements.
+ Or if `x` is not broadcastable to the shape (`ndim`, 2).
+ """
+ if x is None:
+ # Pass through None as a special case, otherwise np.round(x) fails
+ # with an AttributeError
+ return ((None, None),) * ndim
+
+ x = np.array(x)
+ if as_index:
+ x = np.round(x).astype(np.intp, copy=False)
+
+ if x.ndim < 3:
+ # Optimization: Possibly use faster paths for cases where `x` has
+ # only 1 or 2 elements. `np.broadcast_to` could handle these as well
+ # but is currently slower
+
+ if x.size == 1:
+ # x was supplied as a single value
+ x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
+ if as_index and x < 0:
+ raise ValueError("index can't contain negative values")
+ return ((x[0], x[0]),) * ndim
+
+ if x.size == 2 and x.shape != (2, 1):
+ # x was supplied with a single value for each side
+ # but except case when each dimension has a single value
+ # which should be broadcasted to a pair,
+ # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
+ x = x.ravel() # Ensure x[0], x[1] works
+ if as_index and (x[0] < 0 or x[1] < 0):
+ raise ValueError("index can't contain negative values")
+ return ((x[0], x[1]),) * ndim
+
+ if as_index and x.min() < 0:
+ raise ValueError("index can't contain negative values")
+
+ # Converting the array with `tolist` seems to improve performance
+ # when iterating and indexing the result (see usage in `pad`)
+ return np.broadcast_to(x, (ndim, 2)).tolist()
+
+
+def _pad_dispatcher(array, pad_width, mode=None, **kwargs):
+ return (array,)
+
+
+###############################################################################
+# Public functions
+
+
+@array_function_dispatch(_pad_dispatcher, module='numpy')
+def pad(array, pad_width, mode='constant', **kwargs):
+ """
+ Pad an array.
+
+ Parameters
+ ----------
+ array : array_like of rank N
+ The array to pad.
+ pad_width : {sequence, array_like, int}
+ Number of values padded to the edges of each axis.
+ ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths
+ for each axis.
+ ``(before, after)`` or ``((before, after),)`` yields same before
+ and after pad for each axis.
+ ``(pad,)`` or ``int`` is a shortcut for before = after = pad width
+ for all axes.
+ mode : str or function, optional
+ One of the following string values or a user supplied function.
+
+ 'constant' (default)
+ Pads with a constant value.
+ 'edge'
+ Pads with the edge values of array.
+ 'linear_ramp'
+ Pads with the linear ramp between end_value and the
+ array edge value.
+ 'maximum'
+ Pads with the maximum value of all or part of the
+ vector along each axis.
+ 'mean'
+ Pads with the mean value of all or part of the
+ vector along each axis.
+ 'median'
+ Pads with the median value of all or part of the
+ vector along each axis.
+ 'minimum'
+ Pads with the minimum value of all or part of the
+ vector along each axis.
+ 'reflect'
+ Pads with the reflection of the vector mirrored on
+ the first and last values of the vector along each
+ axis.
+ 'symmetric'
+ Pads with the reflection of the vector mirrored
+ along the edge of the array.
+ 'wrap'
+ Pads with the wrap of the vector along the axis.
+ The first values are used to pad the end and the
+ end values are used to pad the beginning.
+ 'empty'
+ Pads with undefined values.
+
+ .. versionadded:: 1.17
+
+ <function>
+ Padding function, see Notes.
+ stat_length : sequence or int, optional
+ Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
+ values at edge of each axis used to calculate the statistic value.
+
+ ``((before_1, after_1), ... (before_N, after_N))`` unique statistic
+ lengths for each axis.
+
+ ``(before, after)`` or ``((before, after),)`` yields same before
+ and after statistic lengths for each axis.
+
+ ``(stat_length,)`` or ``int`` is a shortcut for
+ ``before = after = statistic`` length for all axes.
+
+ Default is ``None``, to use the entire axis.
+ constant_values : sequence or scalar, optional
+ Used in 'constant'. The values to set the padded values for each
+ axis.
+
+ ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants
+ for each axis.
+
+ ``(before, after)`` or ``((before, after),)`` yields same before
+ and after constants for each axis.
+
+ ``(constant,)`` or ``constant`` is a shortcut for
+ ``before = after = constant`` for all axes.
+
+ Default is 0.
+ end_values : sequence or scalar, optional
+ Used in 'linear_ramp'. The values used for the ending value of the
+ linear_ramp and that will form the edge of the padded array.
+
+ ``((before_1, after_1), ... (before_N, after_N))`` unique end values
+ for each axis.
+
+ ``(before, after)`` or ``((before, after),)`` yields same before
+ and after end values for each axis.
+
+ ``(constant,)`` or ``constant`` is a shortcut for
+ ``before = after = constant`` for all axes.
+
+ Default is 0.
+ reflect_type : {'even', 'odd'}, optional
+ Used in 'reflect', and 'symmetric'. The 'even' style is the
+ default with an unaltered reflection around the edge value. For
+ the 'odd' style, the extended part of the array is created by
+ subtracting the reflected values from two times the edge value.
+
+ Returns
+ -------
+ pad : ndarray
+ Padded array of rank equal to `array` with shape increased
+ according to `pad_width`.
+
+ Notes
+ -----
+ .. versionadded:: 1.7.0
+
+ For an array with rank greater than 1, some of the padding of later
+ axes is calculated from padding of previous axes. This is easiest to
+ think about with a rank 2 array where the corners of the padded array
+ are calculated by using padded values from the first axis.
+
+ The padding function, if used, should modify a rank 1 array in-place. It
+ has the following signature::
+
+ padding_func(vector, iaxis_pad_width, iaxis, kwargs)
+
+ where
+
+ vector : ndarray
+ A rank 1 array already padded with zeros. Padded values are
+ vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:].
+ iaxis_pad_width : tuple
+ A 2-tuple of ints, iaxis_pad_width[0] represents the number of
+ values padded at the beginning of vector where
+ iaxis_pad_width[1] represents the number of values padded at
+ the end of vector.
+ iaxis : int
+ The axis currently being calculated.
+ kwargs : dict
+ Any keyword arguments the function requires.
+
+ Examples
+ --------
+ >>> a = [1, 2, 3, 4, 5]
+ >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6))
+ array([4, 4, 1, ..., 6, 6, 6])
+
+ >>> np.pad(a, (2, 3), 'edge')
+ array([1, 1, 1, ..., 5, 5, 5])
+
+ >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
+ array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
+
+ >>> np.pad(a, (2,), 'maximum')
+ array([5, 5, 1, 2, 3, 4, 5, 5, 5])
+
+ >>> np.pad(a, (2,), 'mean')
+ array([3, 3, 1, 2, 3, 4, 5, 3, 3])
+
+ >>> np.pad(a, (2,), 'median')
+ array([3, 3, 1, 2, 3, 4, 5, 3, 3])
+
+ >>> a = [[1, 2], [3, 4]]
+ >>> np.pad(a, ((3, 2), (2, 3)), 'minimum')
+ array([[1, 1, 1, 2, 1, 1, 1],
+ [1, 1, 1, 2, 1, 1, 1],
+ [1, 1, 1, 2, 1, 1, 1],
+ [1, 1, 1, 2, 1, 1, 1],
+ [3, 3, 3, 4, 3, 3, 3],
+ [1, 1, 1, 2, 1, 1, 1],
+ [1, 1, 1, 2, 1, 1, 1]])
+
+ >>> a = [1, 2, 3, 4, 5]
+ >>> np.pad(a, (2, 3), 'reflect')
+ array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
+
+ >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd')
+ array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
+
+ >>> np.pad(a, (2, 3), 'symmetric')
+ array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
+
+ >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd')
+ array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
+
+ >>> np.pad(a, (2, 3), 'wrap')
+ array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
+
+ >>> def pad_with(vector, pad_width, iaxis, kwargs):
+ ... pad_value = kwargs.get('padder', 10)
+ ... vector[:pad_width[0]] = pad_value
+ ... vector[-pad_width[1]:] = pad_value
+ >>> a = np.arange(6)
+ >>> a = a.reshape((2, 3))
+ >>> np.pad(a, 2, pad_with)
+ array([[10, 10, 10, 10, 10, 10, 10],
+ [10, 10, 10, 10, 10, 10, 10],
+ [10, 10, 0, 1, 2, 10, 10],
+ [10, 10, 3, 4, 5, 10, 10],
+ [10, 10, 10, 10, 10, 10, 10],
+ [10, 10, 10, 10, 10, 10, 10]])
+ >>> np.pad(a, 2, pad_with, padder=100)
+ array([[100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 0, 1, 2, 100, 100],
+ [100, 100, 3, 4, 5, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100]])
+ """
+ array = np.asarray(array)
+ pad_width = np.asarray(pad_width)
+
+ if not pad_width.dtype.kind == 'i':
+ raise TypeError('`pad_width` must be of integral type.')
+
+ # Broadcast to shape (array.ndim, 2)
+ pad_width = _as_pairs(pad_width, array.ndim, as_index=True)
+
+ if callable(mode):
+ # Old behavior: Use user-supplied function with np.apply_along_axis
+ function = mode
+ # Create a new zero padded array
+ padded, _ = _pad_simple(array, pad_width, fill_value=0)
+ # And apply along each axis
+
+ for axis in range(padded.ndim):
+ # Iterate using ndindex as in apply_along_axis, but assuming that
+ # function operates inplace on the padded array.
+
+ # view with the iteration axis at the end
+ view = np.moveaxis(padded, axis, -1)
+
+ # compute indices for the iteration axes, and append a trailing
+ # ellipsis to prevent 0d arrays decaying to scalars (gh-8642)
+ inds = ndindex(view.shape[:-1])
+ inds = (ind + (Ellipsis,) for ind in inds)
+ for ind in inds:
+ function(view[ind], pad_width[axis], axis, kwargs)
+
+ return padded
+
+ # Make sure that no unsupported keywords were passed for the current mode
+ allowed_kwargs = {
+ 'empty': [], 'edge': [], 'wrap': [],
+ 'constant': ['constant_values'],
+ 'linear_ramp': ['end_values'],
+ 'maximum': ['stat_length'],
+ 'mean': ['stat_length'],
+ 'median': ['stat_length'],
+ 'minimum': ['stat_length'],
+ 'reflect': ['reflect_type'],
+ 'symmetric': ['reflect_type'],
+ }
+ try:
+ unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])
+ except KeyError:
+ raise ValueError("mode '{}' is not supported".format(mode)) from None
+ if unsupported_kwargs:
+ raise ValueError("unsupported keyword arguments for mode '{}': {}"
+ .format(mode, unsupported_kwargs))
+
+ stat_functions = {"maximum": np.amax, "minimum": np.amin,
+ "mean": np.mean, "median": np.median}
+
+ # Create array with final shape and original values
+ # (padded area is undefined)
+ padded, original_area_slice = _pad_simple(array, pad_width)
+ # And prepare iteration over all dimensions
+ # (zipping may be more readable than using enumerate)
+ axes = range(padded.ndim)
+
+ if mode == "constant":
+ values = kwargs.get("constant_values", 0)
+ values = _as_pairs(values, padded.ndim)
+ for axis, width_pair, value_pair in zip(axes, pad_width, values):
+ roi = _view_roi(padded, original_area_slice, axis)
+ _set_pad_area(roi, axis, width_pair, value_pair)
+
+ elif mode == "empty":
+ pass # Do nothing as _pad_simple already returned the correct result
+
+ elif array.size == 0:
+ # Only modes "constant" and "empty" can extend empty axes, all other
+ # modes depend on `array` not being empty
+ # -> ensure every empty axis is only "padded with 0"
+ for axis, width_pair in zip(axes, pad_width):
+ if array.shape[axis] == 0 and any(width_pair):
+ raise ValueError(
+ "can't extend empty axis {} using modes other than "
+ "'constant' or 'empty'".format(axis)
+ )
+ # passed, don't need to do anything more as _pad_simple already
+ # returned the correct result
+
+ elif mode == "edge":
+ for axis, width_pair in zip(axes, pad_width):
+ roi = _view_roi(padded, original_area_slice, axis)
+ edge_pair = _get_edges(roi, axis, width_pair)
+ _set_pad_area(roi, axis, width_pair, edge_pair)
+
+ elif mode == "linear_ramp":
+ end_values = kwargs.get("end_values", 0)
+ end_values = _as_pairs(end_values, padded.ndim)
+ for axis, width_pair, value_pair in zip(axes, pad_width, end_values):
+ roi = _view_roi(padded, original_area_slice, axis)
+ ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair)
+ _set_pad_area(roi, axis, width_pair, ramp_pair)
+
+ elif mode in stat_functions:
+ func = stat_functions[mode]
+ length = kwargs.get("stat_length", None)
+ length = _as_pairs(length, padded.ndim, as_index=True)
+ for axis, width_pair, length_pair in zip(axes, pad_width, length):
+ roi = _view_roi(padded, original_area_slice, axis)
+ stat_pair = _get_stats(roi, axis, width_pair, length_pair, func)
+ _set_pad_area(roi, axis, width_pair, stat_pair)
+
+ elif mode in {"reflect", "symmetric"}:
+ method = kwargs.get("reflect_type", "even")
+ include_edge = True if mode == "symmetric" else False
+ for axis, (left_index, right_index) in zip(axes, pad_width):
+ if array.shape[axis] == 1 and (left_index > 0 or right_index > 0):
+ # Extending singleton dimension for 'reflect' is legacy
+ # behavior; it really should raise an error.
+ edge_pair = _get_edges(padded, axis, (left_index, right_index))
+ _set_pad_area(
+ padded, axis, (left_index, right_index), edge_pair)
+ continue
+
+ roi = _view_roi(padded, original_area_slice, axis)
+ while left_index > 0 or right_index > 0:
+ # Iteratively pad until dimension is filled with reflected
+ # values. This is necessary if the pad area is larger than
+ # the length of the original values in the current dimension.
+ left_index, right_index = _set_reflect_both(
+ roi, axis, (left_index, right_index),
+ method, include_edge
+ )
+
+ elif mode == "wrap":
+ for axis, (left_index, right_index) in zip(axes, pad_width):
+ roi = _view_roi(padded, original_area_slice, axis)
+ while left_index > 0 or right_index > 0:
+ # Iteratively pad until dimension is filled with wrapped
+ # values. This is necessary if the pad area is larger than
+ # the length of the original values in the current dimension.
+ left_index, right_index = _set_wrap_both(
+ roi, axis, (left_index, right_index))
+
+ return padded
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/arraypad.pyi b/venv/lib/python3.9/site-packages/numpy/lib/arraypad.pyi
new file mode 100644
index 00000000..1ac6fc7d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/arraypad.pyi
@@ -0,0 +1,85 @@
+from typing import (
+ Literal as L,
+ Any,
+ overload,
+ TypeVar,
+ Protocol,
+)
+
+from numpy import generic
+
+from numpy._typing import (
+ ArrayLike,
+ NDArray,
+ _ArrayLikeInt,
+ _ArrayLike,
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+
+class _ModeFunc(Protocol):
+ def __call__(
+ self,
+ vector: NDArray[Any],
+ iaxis_pad_width: tuple[int, int],
+ iaxis: int,
+ kwargs: dict[str, Any],
+ /,
+ ) -> None: ...
+
+_ModeKind = L[
+ "constant",
+ "edge",
+ "linear_ramp",
+ "maximum",
+ "mean",
+ "median",
+ "minimum",
+ "reflect",
+ "symmetric",
+ "wrap",
+ "empty",
+]
+
+__all__: list[str]
+
+# TODO: In practice each keyword argument is exclusive to one or more
+# specific modes. Consider adding more overloads to express this in the future.
+
+# Expand `**kwargs` into explicit keyword-only arguments
+@overload
+def pad(
+ array: _ArrayLike[_SCT],
+ pad_width: _ArrayLikeInt,
+ mode: _ModeKind = ...,
+ *,
+ stat_length: None | _ArrayLikeInt = ...,
+ constant_values: ArrayLike = ...,
+ end_values: ArrayLike = ...,
+ reflect_type: L["odd", "even"] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def pad(
+ array: ArrayLike,
+ pad_width: _ArrayLikeInt,
+ mode: _ModeKind = ...,
+ *,
+ stat_length: None | _ArrayLikeInt = ...,
+ constant_values: ArrayLike = ...,
+ end_values: ArrayLike = ...,
+ reflect_type: L["odd", "even"] = ...,
+) -> NDArray[Any]: ...
+@overload
+def pad(
+ array: _ArrayLike[_SCT],
+ pad_width: _ArrayLikeInt,
+ mode: _ModeFunc,
+ **kwargs: Any,
+) -> NDArray[_SCT]: ...
+@overload
+def pad(
+ array: ArrayLike,
+ pad_width: _ArrayLikeInt,
+ mode: _ModeFunc,
+ **kwargs: Any,
+) -> NDArray[Any]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/arraysetops.py b/venv/lib/python3.9/site-packages/numpy/lib/arraysetops.py
new file mode 100644
index 00000000..300bbda2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/arraysetops.py
@@ -0,0 +1,981 @@
+"""
+Set operations for arrays based on sorting.
+
+Notes
+-----
+
+For floating point arrays, inaccurate results may appear due to usual round-off
+and floating point comparison issues.
+
+Speed could be gained in some operations by an implementation of
+`numpy.sort`, that can provide directly the permutation vectors, thus avoiding
+calls to `numpy.argsort`.
+
+Original author: Robert Cimrman
+
+"""
+import functools
+
+import numpy as np
+from numpy.core import overrides
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+__all__ = [
+ 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
+ 'in1d', 'isin'
+ ]
+
+
+def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
+ return (ary, to_end, to_begin)
+
+
+@array_function_dispatch(_ediff1d_dispatcher)
+def ediff1d(ary, to_end=None, to_begin=None):
+ """
+ The differences between consecutive elements of an array.
+
+ Parameters
+ ----------
+ ary : array_like
+ If necessary, will be flattened before the differences are taken.
+ to_end : array_like, optional
+ Number(s) to append at the end of the returned differences.
+ to_begin : array_like, optional
+ Number(s) to prepend at the beginning of the returned differences.
+
+ Returns
+ -------
+ ediff1d : ndarray
+ The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
+
+ See Also
+ --------
+ diff, gradient
+
+ Notes
+ -----
+ When applied to masked arrays, this function drops the mask information
+ if the `to_begin` and/or `to_end` parameters are used.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 4, 7, 0])
+ >>> np.ediff1d(x)
+ array([ 1, 2, 3, -7])
+
+ >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
+ array([-99, 1, 2, ..., -7, 88, 99])
+
+ The returned array is always 1D.
+
+ >>> y = [[1, 2, 4], [1, 6, 24]]
+ >>> np.ediff1d(y)
+ array([ 1, 2, -3, 5, 18])
+
+ """
+ # force a 1d array
+ ary = np.asanyarray(ary).ravel()
+
+ # enforce that the dtype of `ary` is used for the output
+ dtype_req = ary.dtype
+
+ # fast track default case
+ if to_begin is None and to_end is None:
+ return ary[1:] - ary[:-1]
+
+ if to_begin is None:
+ l_begin = 0
+ else:
+ to_begin = np.asanyarray(to_begin)
+ if not np.can_cast(to_begin, dtype_req, casting="same_kind"):
+ raise TypeError("dtype of `to_begin` must be compatible "
+ "with input `ary` under the `same_kind` rule.")
+
+ to_begin = to_begin.ravel()
+ l_begin = len(to_begin)
+
+ if to_end is None:
+ l_end = 0
+ else:
+ to_end = np.asanyarray(to_end)
+ if not np.can_cast(to_end, dtype_req, casting="same_kind"):
+ raise TypeError("dtype of `to_end` must be compatible "
+ "with input `ary` under the `same_kind` rule.")
+
+ to_end = to_end.ravel()
+ l_end = len(to_end)
+
+ # do the calculation in place and copy to_begin and to_end
+ l_diff = max(len(ary) - 1, 0)
+ result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype)
+ result = ary.__array_wrap__(result)
+ if l_begin > 0:
+ result[:l_begin] = to_begin
+ if l_end > 0:
+ result[l_begin + l_diff:] = to_end
+ np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])
+ return result
+
+
+def _unpack_tuple(x):
+ """ Unpacks one-element tuples for use as return values """
+ if len(x) == 1:
+ return x[0]
+ else:
+ return x
+
+
+def _unique_dispatcher(ar, return_index=None, return_inverse=None,
+ return_counts=None, axis=None, *, equal_nan=None):
+ return (ar,)
+
+
+@array_function_dispatch(_unique_dispatcher)
+def unique(ar, return_index=False, return_inverse=False,
+ return_counts=False, axis=None, *, equal_nan=True):
+ """
+ Find the unique elements of an array.
+
+ Returns the sorted unique elements of an array. There are three optional
+ outputs in addition to the unique elements:
+
+ * the indices of the input array that give the unique values
+ * the indices of the unique array that reconstruct the input array
+ * the number of times each unique value comes up in the input array
+
+ Parameters
+ ----------
+ ar : array_like
+ Input array. Unless `axis` is specified, this will be flattened if it
+ is not already 1-D.
+ return_index : bool, optional
+ If True, also return the indices of `ar` (along the specified axis,
+ if provided, or in the flattened array) that result in the unique array.
+ return_inverse : bool, optional
+ If True, also return the indices of the unique array (for the specified
+ axis, if provided) that can be used to reconstruct `ar`.
+ return_counts : bool, optional
+ If True, also return the number of times each unique item appears
+ in `ar`.
+ axis : int or None, optional
+ The axis to operate on. If None, `ar` will be flattened. If an integer,
+ the subarrays indexed by the given axis will be flattened and treated
+ as the elements of a 1-D array with the dimension of the given axis,
+ see the notes for more details. Object arrays or structured arrays
+ that contain objects are not supported if the `axis` kwarg is used. The
+ default is None.
+
+ .. versionadded:: 1.13.0
+
+ equal_nan : bool, optional
+ If True, collapses multiple NaN values in the return array into one.
+
+ .. versionadded:: 1.24
+
+ Returns
+ -------
+ unique : ndarray
+ The sorted unique values.
+ unique_indices : ndarray, optional
+ The indices of the first occurrences of the unique values in the
+ original array. Only provided if `return_index` is True.
+ unique_inverse : ndarray, optional
+ The indices to reconstruct the original array from the
+ unique array. Only provided if `return_inverse` is True.
+ unique_counts : ndarray, optional
+ The number of times each of the unique values comes up in the
+ original array. Only provided if `return_counts` is True.
+
+ .. versionadded:: 1.9.0
+
+ See Also
+ --------
+ numpy.lib.arraysetops : Module with a number of other functions for
+ performing set operations on arrays.
+ repeat : Repeat elements of an array.
+
+ Notes
+ -----
+ When an axis is specified the subarrays indexed by the axis are sorted.
+ This is done by making the specified axis the first dimension of the array
+ (move the axis to the first dimension to keep the order of the other axes)
+ and then flattening the subarrays in C order. The flattened subarrays are
+ then viewed as a structured type with each element given a label, with the
+ effect that we end up with a 1-D array of structured types that can be
+ treated in the same way as any other 1-D array. The result is that the
+ flattened subarrays are sorted in lexicographic order starting with the
+ first element.
+
+ .. versionchanged: NumPy 1.21
+ If nan values are in the input array, a single nan is put
+ to the end of the sorted unique values.
+
+ Also for complex arrays all NaN values are considered equivalent
+ (no matter whether the NaN is in the real or imaginary part).
+ As the representant for the returned array the smallest one in the
+ lexicographical order is chosen - see np.sort for how the lexicographical
+ order is defined for complex arrays.
+
+ Examples
+ --------
+ >>> np.unique([1, 1, 2, 2, 3, 3])
+ array([1, 2, 3])
+ >>> a = np.array([[1, 1], [2, 3]])
+ >>> np.unique(a)
+ array([1, 2, 3])
+
+ Return the unique rows of a 2D array
+
+ >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
+ >>> np.unique(a, axis=0)
+ array([[1, 0, 0], [2, 3, 4]])
+
+ Return the indices of the original array that give the unique values:
+
+ >>> a = np.array(['a', 'b', 'b', 'c', 'a'])
+ >>> u, indices = np.unique(a, return_index=True)
+ >>> u
+ array(['a', 'b', 'c'], dtype='<U1')
+ >>> indices
+ array([0, 1, 3])
+ >>> a[indices]
+ array(['a', 'b', 'c'], dtype='<U1')
+
+ Reconstruct the input array from the unique values and inverse:
+
+ >>> a = np.array([1, 2, 6, 4, 2, 3, 2])
+ >>> u, indices = np.unique(a, return_inverse=True)
+ >>> u
+ array([1, 2, 3, 4, 6])
+ >>> indices
+ array([0, 1, 4, 3, 1, 2, 1])
+ >>> u[indices]
+ array([1, 2, 6, 4, 2, 3, 2])
+
+ Reconstruct the input values from the unique values and counts:
+
+ >>> a = np.array([1, 2, 6, 4, 2, 3, 2])
+ >>> values, counts = np.unique(a, return_counts=True)
+ >>> values
+ array([1, 2, 3, 4, 6])
+ >>> counts
+ array([1, 3, 1, 1, 1])
+ >>> np.repeat(values, counts)
+ array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved
+
+ """
+ ar = np.asanyarray(ar)
+ if axis is None:
+ ret = _unique1d(ar, return_index, return_inverse, return_counts,
+ equal_nan=equal_nan)
+ return _unpack_tuple(ret)
+
+ # axis was specified and not None
+ try:
+ ar = np.moveaxis(ar, axis, 0)
+ except np.AxisError:
+ # this removes the "axis1" or "axis2" prefix from the error message
+ raise np.AxisError(axis, ar.ndim) from None
+
+ # Must reshape to a contiguous 2D array for this to work...
+ orig_shape, orig_dtype = ar.shape, ar.dtype
+ ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp))
+ ar = np.ascontiguousarray(ar)
+ dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])]
+
+ # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured
+ # data type with `m` fields where each field has the data type of `ar`.
+ # In the following, we create the array `consolidated`, which has
+ # shape `(n,)` with data type `dtype`.
+ try:
+ if ar.shape[1] > 0:
+ consolidated = ar.view(dtype)
+ else:
+ # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is
+ # a data type with itemsize 0, and the call `ar.view(dtype)` will
+ # fail. Instead, we'll use `np.empty` to explicitly create the
+ # array with shape `(len(ar),)`. Because `dtype` in this case has
+ # itemsize 0, the total size of the result is still 0 bytes.
+ consolidated = np.empty(len(ar), dtype=dtype)
+ except TypeError as e:
+ # There's no good way to do this for object arrays, etc...
+ msg = 'The axis argument to unique is not supported for dtype {dt}'
+ raise TypeError(msg.format(dt=ar.dtype)) from e
+
+ def reshape_uniq(uniq):
+ n = len(uniq)
+ uniq = uniq.view(orig_dtype)
+ uniq = uniq.reshape(n, *orig_shape[1:])
+ uniq = np.moveaxis(uniq, 0, axis)
+ return uniq
+
+ output = _unique1d(consolidated, return_index,
+ return_inverse, return_counts, equal_nan=equal_nan)
+ output = (reshape_uniq(output[0]),) + output[1:]
+ return _unpack_tuple(output)
+
+
+def _unique1d(ar, return_index=False, return_inverse=False,
+ return_counts=False, *, equal_nan=True):
+ """
+ Find the unique elements of an array, ignoring shape.
+ """
+ ar = np.asanyarray(ar).flatten()
+
+ optional_indices = return_index or return_inverse
+
+ if optional_indices:
+ perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
+ aux = ar[perm]
+ else:
+ ar.sort()
+ aux = ar
+ mask = np.empty(aux.shape, dtype=np.bool_)
+ mask[:1] = True
+ if (equal_nan and aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and
+ np.isnan(aux[-1])):
+ if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent
+ aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left')
+ else:
+ aux_firstnan = np.searchsorted(aux, aux[-1], side='left')
+ if aux_firstnan > 0:
+ mask[1:aux_firstnan] = (
+ aux[1:aux_firstnan] != aux[:aux_firstnan - 1])
+ mask[aux_firstnan] = True
+ mask[aux_firstnan + 1:] = False
+ else:
+ mask[1:] = aux[1:] != aux[:-1]
+
+ ret = (aux[mask],)
+ if return_index:
+ ret += (perm[mask],)
+ if return_inverse:
+ imask = np.cumsum(mask) - 1
+ inv_idx = np.empty(mask.shape, dtype=np.intp)
+ inv_idx[perm] = imask
+ ret += (inv_idx,)
+ if return_counts:
+ idx = np.concatenate(np.nonzero(mask) + ([mask.size],))
+ ret += (np.diff(idx),)
+ return ret
+
+
+def _intersect1d_dispatcher(
+ ar1, ar2, assume_unique=None, return_indices=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_intersect1d_dispatcher)
+def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
+ """
+ Find the intersection of two arrays.
+
+ Return the sorted, unique values that are in both of the input arrays.
+
+ Parameters
+ ----------
+ ar1, ar2 : array_like
+ Input arrays. Will be flattened if not already 1D.
+ assume_unique : bool
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. If True but ``ar1`` or ``ar2`` are not
+ unique, incorrect results and out-of-bounds indices could result.
+ Default is False.
+ return_indices : bool
+ If True, the indices which correspond to the intersection of the two
+ arrays are returned. The first instance of a value is used if there are
+ multiple. Default is False.
+
+ .. versionadded:: 1.15.0
+
+ Returns
+ -------
+ intersect1d : ndarray
+ Sorted 1D array of common and unique elements.
+ comm1 : ndarray
+ The indices of the first occurrences of the common values in `ar1`.
+ Only provided if `return_indices` is True.
+ comm2 : ndarray
+ The indices of the first occurrences of the common values in `ar2`.
+ Only provided if `return_indices` is True.
+
+
+ See Also
+ --------
+ numpy.lib.arraysetops : Module with a number of other functions for
+ performing set operations on arrays.
+
+ Examples
+ --------
+ >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
+ array([1, 3])
+
+ To intersect more than two arrays, use functools.reduce:
+
+ >>> from functools import reduce
+ >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
+ array([3])
+
+ To return the indices of the values common to the input arrays
+ along with the intersected values:
+
+ >>> x = np.array([1, 1, 2, 3, 4])
+ >>> y = np.array([2, 1, 4, 6])
+ >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
+ >>> x_ind, y_ind
+ (array([0, 2, 4]), array([1, 0, 2]))
+ >>> xy, x[x_ind], y[y_ind]
+ (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
+
+ """
+ ar1 = np.asanyarray(ar1)
+ ar2 = np.asanyarray(ar2)
+
+ if not assume_unique:
+ if return_indices:
+ ar1, ind1 = unique(ar1, return_index=True)
+ ar2, ind2 = unique(ar2, return_index=True)
+ else:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+ else:
+ ar1 = ar1.ravel()
+ ar2 = ar2.ravel()
+
+ aux = np.concatenate((ar1, ar2))
+ if return_indices:
+ aux_sort_indices = np.argsort(aux, kind='mergesort')
+ aux = aux[aux_sort_indices]
+ else:
+ aux.sort()
+
+ mask = aux[1:] == aux[:-1]
+ int1d = aux[:-1][mask]
+
+ if return_indices:
+ ar1_indices = aux_sort_indices[:-1][mask]
+ ar2_indices = aux_sort_indices[1:][mask] - ar1.size
+ if not assume_unique:
+ ar1_indices = ind1[ar1_indices]
+ ar2_indices = ind2[ar2_indices]
+
+ return int1d, ar1_indices, ar2_indices
+ else:
+ return int1d
+
+
+def _setxor1d_dispatcher(ar1, ar2, assume_unique=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_setxor1d_dispatcher)
+def setxor1d(ar1, ar2, assume_unique=False):
+ """
+ Find the set exclusive-or of two arrays.
+
+ Return the sorted, unique values that are in only one (not both) of the
+ input arrays.
+
+ Parameters
+ ----------
+ ar1, ar2 : array_like
+ Input arrays.
+ assume_unique : bool
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. Default is False.
+
+ Returns
+ -------
+ setxor1d : ndarray
+ Sorted 1D array of unique values that are in only one of the input
+ arrays.
+
+ Examples
+ --------
+ >>> a = np.array([1, 2, 3, 2, 4])
+ >>> b = np.array([2, 3, 5, 7, 5])
+ >>> np.setxor1d(a,b)
+ array([1, 4, 5, 7])
+
+ """
+ if not assume_unique:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+
+ aux = np.concatenate((ar1, ar2))
+ if aux.size == 0:
+ return aux
+
+ aux.sort()
+ flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
+ return aux[flag[1:] & flag[:-1]]
+
+
+def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *,
+ kind=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_in1d_dispatcher)
+def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
+ """
+ Test whether each element of a 1-D array is also present in a second array.
+
+ Returns a boolean array the same length as `ar1` that is True
+ where an element of `ar1` is in `ar2` and False otherwise.
+
+ We recommend using :func:`isin` instead of `in1d` for new code.
+
+ Parameters
+ ----------
+ ar1 : (M,) array_like
+ Input array.
+ ar2 : array_like
+ The values against which to test each value of `ar1`.
+ assume_unique : bool, optional
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. Default is False.
+ invert : bool, optional
+ If True, the values in the returned array are inverted (that is,
+ False where an element of `ar1` is in `ar2` and True otherwise).
+ Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
+ to (but is faster than) ``np.invert(in1d(a, b))``.
+ kind : {None, 'sort', 'table'}, optional
+ The algorithm to use. This will not affect the final result,
+ but will affect the speed and memory use. The default, None,
+ will select automatically based on memory considerations.
+
+ * If 'sort', will use a mergesort-based approach. This will have
+ a memory usage of roughly 6 times the sum of the sizes of
+ `ar1` and `ar2`, not accounting for size of dtypes.
+ * If 'table', will use a lookup table approach similar
+ to a counting sort. This is only available for boolean and
+ integer arrays. This will have a memory usage of the
+ size of `ar1` plus the max-min value of `ar2`. `assume_unique`
+ has no effect when the 'table' option is used.
+ * If None, will automatically choose 'table' if
+ the required memory allocation is less than or equal to
+ 6 times the sum of the sizes of `ar1` and `ar2`,
+ otherwise will use 'sort'. This is done to not use
+ a large amount of memory by default, even though
+ 'table' may be faster in most cases. If 'table' is chosen,
+ `assume_unique` will have no effect.
+
+ .. versionadded:: 1.8.0
+
+ Returns
+ -------
+ in1d : (M,) ndarray, bool
+ The values `ar1[in1d]` are in `ar2`.
+
+ See Also
+ --------
+ isin : Version of this function that preserves the
+ shape of ar1.
+ numpy.lib.arraysetops : Module with a number of other functions for
+ performing set operations on arrays.
+
+ Notes
+ -----
+ `in1d` can be considered as an element-wise function version of the
+ python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly
+ equivalent to ``np.array([item in b for item in a])``.
+ However, this idea fails if `ar2` is a set, or similar (non-sequence)
+ container: As ``ar2`` is converted to an array, in those cases
+ ``asarray(ar2)`` is an object array rather than the expected array of
+ contained values.
+
+ Using ``kind='table'`` tends to be faster than `kind='sort'` if the
+ following relationship is true:
+ ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
+ but may use greater memory. The default value for `kind` will
+ be automatically selected based only on memory usage, so one may
+ manually set ``kind='table'`` if memory constraints can be relaxed.
+
+ .. versionadded:: 1.4.0
+
+ Examples
+ --------
+ >>> test = np.array([0, 1, 2, 5, 0])
+ >>> states = [0, 2]
+ >>> mask = np.in1d(test, states)
+ >>> mask
+ array([ True, False, True, False, True])
+ >>> test[mask]
+ array([0, 2, 0])
+ >>> mask = np.in1d(test, states, invert=True)
+ >>> mask
+ array([False, True, False, True, False])
+ >>> test[mask]
+ array([1, 5])
+ """
+ # Ravel both arrays, behavior for the first array could be different
+ ar1 = np.asarray(ar1).ravel()
+ ar2 = np.asarray(ar2).ravel()
+
+ # Ensure that iteration through object arrays yields size-1 arrays
+ if ar2.dtype == object:
+ ar2 = ar2.reshape(-1, 1)
+
+ if kind not in {None, 'sort', 'table'}:
+ raise ValueError(
+ f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.")
+
+ # Can use the table method if all arrays are integers or boolean:
+ is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2))
+ use_table_method = is_int_arrays and kind in {None, 'table'}
+
+ if use_table_method:
+ if ar2.size == 0:
+ if invert:
+ return np.ones_like(ar1, dtype=bool)
+ else:
+ return np.zeros_like(ar1, dtype=bool)
+
+ # Convert booleans to uint8 so we can use the fast integer algorithm
+ if ar1.dtype == bool:
+ ar1 = ar1.astype(np.uint8)
+ if ar2.dtype == bool:
+ ar2 = ar2.astype(np.uint8)
+
+ ar2_min = np.min(ar2)
+ ar2_max = np.max(ar2)
+
+ ar2_range = int(ar2_max) - int(ar2_min)
+
+ # Constraints on whether we can actually use the table method:
+ # 1. Assert memory usage is not too large
+ below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size)
+ # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype
+ range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max
+ # 3. Check overflows for (ar1 - ar2_min); dtype=ar1.dtype
+ if ar1.size > 0:
+ ar1_min = np.min(ar1)
+ ar1_max = np.max(ar1)
+
+ # After masking, the range of ar1 is guaranteed to be
+ # within the range of ar2:
+ ar1_upper = min(int(ar1_max), int(ar2_max))
+ ar1_lower = max(int(ar1_min), int(ar2_min))
+
+ range_safe_from_overflow &= all((
+ ar1_upper - int(ar2_min) <= np.iinfo(ar1.dtype).max,
+ ar1_lower - int(ar2_min) >= np.iinfo(ar1.dtype).min
+ ))
+
+ # Optimal performance is for approximately
+ # log10(size) > (log10(range) - 2.27) / 0.927.
+ # However, here we set the requirement that by default
+ # the intermediate array can only be 6x
+ # the combined memory allocation of the original
+ # arrays. See discussion on
+ # https://github.com/numpy/numpy/pull/12065.
+
+ if (
+ range_safe_from_overflow and
+ (below_memory_constraint or kind == 'table')
+ ):
+
+ if invert:
+ outgoing_array = np.ones_like(ar1, dtype=bool)
+ else:
+ outgoing_array = np.zeros_like(ar1, dtype=bool)
+
+ # Make elements 1 where the integer exists in ar2
+ if invert:
+ isin_helper_ar = np.ones(ar2_range + 1, dtype=bool)
+ isin_helper_ar[ar2 - ar2_min] = 0
+ else:
+ isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool)
+ isin_helper_ar[ar2 - ar2_min] = 1
+
+ # Mask out elements we know won't work
+ basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min)
+ outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] -
+ ar2_min]
+
+ return outgoing_array
+ elif kind == 'table': # not range_safe_from_overflow
+ raise RuntimeError(
+ "You have specified kind='table', "
+ "but the range of values in `ar2` or `ar1` exceed the "
+ "maximum integer of the datatype. "
+ "Please set `kind` to None or 'sort'."
+ )
+ elif kind == 'table':
+ raise ValueError(
+ "The 'table' method is only "
+ "supported for boolean or integer arrays. "
+ "Please select 'sort' or None for kind."
+ )
+
+
+ # Check if one of the arrays may contain arbitrary objects
+ contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject
+
+ # This code is run when
+ # a) the first condition is true, making the code significantly faster
+ # b) the second condition is true (i.e. `ar1` or `ar2` may contain
+ # arbitrary objects), since then sorting is not guaranteed to work
+ if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:
+ if invert:
+ mask = np.ones(len(ar1), dtype=bool)
+ for a in ar2:
+ mask &= (ar1 != a)
+ else:
+ mask = np.zeros(len(ar1), dtype=bool)
+ for a in ar2:
+ mask |= (ar1 == a)
+ return mask
+
+ # Otherwise use sorting
+ if not assume_unique:
+ ar1, rev_idx = np.unique(ar1, return_inverse=True)
+ ar2 = np.unique(ar2)
+
+ ar = np.concatenate((ar1, ar2))
+ # We need this to be a stable sort, so always use 'mergesort'
+ # here. The values from the first array should always come before
+ # the values from the second array.
+ order = ar.argsort(kind='mergesort')
+ sar = ar[order]
+ if invert:
+ bool_ar = (sar[1:] != sar[:-1])
+ else:
+ bool_ar = (sar[1:] == sar[:-1])
+ flag = np.concatenate((bool_ar, [invert]))
+ ret = np.empty(ar.shape, dtype=bool)
+ ret[order] = flag
+
+ if assume_unique:
+ return ret[:len(ar1)]
+ else:
+ return ret[rev_idx]
+
+
+def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None,
+ *, kind=None):
+ return (element, test_elements)
+
+
+@array_function_dispatch(_isin_dispatcher)
+def isin(element, test_elements, assume_unique=False, invert=False, *,
+ kind=None):
+ """
+ Calculates ``element in test_elements``, broadcasting over `element` only.
+ Returns a boolean array of the same shape as `element` that is True
+ where an element of `element` is in `test_elements` and False otherwise.
+
+ Parameters
+ ----------
+ element : array_like
+ Input array.
+ test_elements : array_like
+ The values against which to test each value of `element`.
+ This argument is flattened if it is an array or array_like.
+ See notes for behavior with non-array-like parameters.
+ assume_unique : bool, optional
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. Default is False.
+ invert : bool, optional
+ If True, the values in the returned array are inverted, as if
+ calculating `element not in test_elements`. Default is False.
+ ``np.isin(a, b, invert=True)`` is equivalent to (but faster
+ than) ``np.invert(np.isin(a, b))``.
+ kind : {None, 'sort', 'table'}, optional
+ The algorithm to use. This will not affect the final result,
+ but will affect the speed and memory use. The default, None,
+ will select automatically based on memory considerations.
+
+ * If 'sort', will use a mergesort-based approach. This will have
+ a memory usage of roughly 6 times the sum of the sizes of
+ `ar1` and `ar2`, not accounting for size of dtypes.
+ * If 'table', will use a lookup table approach similar
+ to a counting sort. This is only available for boolean and
+ integer arrays. This will have a memory usage of the
+ size of `ar1` plus the max-min value of `ar2`. `assume_unique`
+ has no effect when the 'table' option is used.
+ * If None, will automatically choose 'table' if
+ the required memory allocation is less than or equal to
+ 6 times the sum of the sizes of `ar1` and `ar2`,
+ otherwise will use 'sort'. This is done to not use
+ a large amount of memory by default, even though
+ 'table' may be faster in most cases. If 'table' is chosen,
+ `assume_unique` will have no effect.
+
+
+ Returns
+ -------
+ isin : ndarray, bool
+ Has the same shape as `element`. The values `element[isin]`
+ are in `test_elements`.
+
+ See Also
+ --------
+ in1d : Flattened version of this function.
+ numpy.lib.arraysetops : Module with a number of other functions for
+ performing set operations on arrays.
+
+ Notes
+ -----
+
+ `isin` is an element-wise function version of the python keyword `in`.
+ ``isin(a, b)`` is roughly equivalent to
+ ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
+
+ `element` and `test_elements` are converted to arrays if they are not
+ already. If `test_elements` is a set (or other non-sequence collection)
+ it will be converted to an object array with one element, rather than an
+ array of the values contained in `test_elements`. This is a consequence
+ of the `array` constructor's way of handling non-sequence collections.
+ Converting the set to a list usually gives the desired behavior.
+
+ Using ``kind='table'`` tends to be faster than `kind='sort'` if the
+ following relationship is true:
+ ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
+ but may use greater memory. The default value for `kind` will
+ be automatically selected based only on memory usage, so one may
+ manually set ``kind='table'`` if memory constraints can be relaxed.
+
+ .. versionadded:: 1.13.0
+
+ Examples
+ --------
+ >>> element = 2*np.arange(4).reshape((2, 2))
+ >>> element
+ array([[0, 2],
+ [4, 6]])
+ >>> test_elements = [1, 2, 4, 8]
+ >>> mask = np.isin(element, test_elements)
+ >>> mask
+ array([[False, True],
+ [ True, False]])
+ >>> element[mask]
+ array([2, 4])
+
+ The indices of the matched values can be obtained with `nonzero`:
+
+ >>> np.nonzero(mask)
+ (array([0, 1]), array([1, 0]))
+
+ The test can also be inverted:
+
+ >>> mask = np.isin(element, test_elements, invert=True)
+ >>> mask
+ array([[ True, False],
+ [False, True]])
+ >>> element[mask]
+ array([0, 6])
+
+ Because of how `array` handles sets, the following does not
+ work as expected:
+
+ >>> test_set = {1, 2, 4, 8}
+ >>> np.isin(element, test_set)
+ array([[False, False],
+ [False, False]])
+
+ Casting the set to a list gives the expected result:
+
+ >>> np.isin(element, list(test_set))
+ array([[False, True],
+ [ True, False]])
+ """
+ element = np.asarray(element)
+ return in1d(element, test_elements, assume_unique=assume_unique,
+ invert=invert, kind=kind).reshape(element.shape)
+
+
+def _union1d_dispatcher(ar1, ar2):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_union1d_dispatcher)
+def union1d(ar1, ar2):
+ """
+ Find the union of two arrays.
+
+ Return the unique, sorted array of values that are in either of the two
+ input arrays.
+
+ Parameters
+ ----------
+ ar1, ar2 : array_like
+ Input arrays. They are flattened if they are not already 1D.
+
+ Returns
+ -------
+ union1d : ndarray
+ Unique, sorted union of the input arrays.
+
+ See Also
+ --------
+ numpy.lib.arraysetops : Module with a number of other functions for
+ performing set operations on arrays.
+
+ Examples
+ --------
+ >>> np.union1d([-1, 0, 1], [-2, 0, 2])
+ array([-2, -1, 0, 1, 2])
+
+ To find the union of more than two arrays, use functools.reduce:
+
+ >>> from functools import reduce
+ >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
+ array([1, 2, 3, 4, 6])
+ """
+ return unique(np.concatenate((ar1, ar2), axis=None))
+
+
+def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_setdiff1d_dispatcher)
+def setdiff1d(ar1, ar2, assume_unique=False):
+ """
+ Find the set difference of two arrays.
+
+ Return the unique values in `ar1` that are not in `ar2`.
+
+ Parameters
+ ----------
+ ar1 : array_like
+ Input array.
+ ar2 : array_like
+ Input comparison array.
+ assume_unique : bool
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. Default is False.
+
+ Returns
+ -------
+ setdiff1d : ndarray
+ 1D array of values in `ar1` that are not in `ar2`. The result
+ is sorted when `assume_unique=False`, but otherwise only sorted
+ if the input is sorted.
+
+ See Also
+ --------
+ numpy.lib.arraysetops : Module with a number of other functions for
+ performing set operations on arrays.
+
+ Examples
+ --------
+ >>> a = np.array([1, 2, 3, 2, 4, 1])
+ >>> b = np.array([3, 4, 5, 6])
+ >>> np.setdiff1d(a, b)
+ array([1, 2])
+
+ """
+ if assume_unique:
+ ar1 = np.asarray(ar1).ravel()
+ else:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+ return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/arraysetops.pyi b/venv/lib/python3.9/site-packages/numpy/lib/arraysetops.pyi
new file mode 100644
index 00000000..aa1310a3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/arraysetops.pyi
@@ -0,0 +1,360 @@
+from typing import (
+ Literal as L,
+ Any,
+ TypeVar,
+ overload,
+ SupportsIndex,
+)
+
+from numpy import (
+ generic,
+ number,
+ bool_,
+ ushort,
+ ubyte,
+ uintc,
+ uint,
+ ulonglong,
+ short,
+ int8,
+ byte,
+ intc,
+ int_,
+ intp,
+ longlong,
+ half,
+ single,
+ double,
+ longdouble,
+ csingle,
+ cdouble,
+ clongdouble,
+ timedelta64,
+ datetime64,
+ object_,
+ str_,
+ bytes_,
+ void,
+)
+
+from numpy._typing import (
+ ArrayLike,
+ NDArray,
+ _ArrayLike,
+ _ArrayLikeBool_co,
+ _ArrayLikeDT64_co,
+ _ArrayLikeTD64_co,
+ _ArrayLikeObject_co,
+ _ArrayLikeNumber_co,
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+_NumberType = TypeVar("_NumberType", bound=number[Any])
+
+# Explicitly set all allowed values to prevent accidental castings to
+# abstract dtypes (their common super-type).
+#
+# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`)
+# which could result in, for example, `int64` and `float64`producing a
+# `number[_64Bit]` array
+_SCTNoCast = TypeVar(
+ "_SCTNoCast",
+ bool_,
+ ushort,
+ ubyte,
+ uintc,
+ uint,
+ ulonglong,
+ short,
+ byte,
+ intc,
+ int_,
+ longlong,
+ half,
+ single,
+ double,
+ longdouble,
+ csingle,
+ cdouble,
+ clongdouble,
+ timedelta64,
+ datetime64,
+ object_,
+ str_,
+ bytes_,
+ void,
+)
+
+__all__: list[str]
+
+@overload
+def ediff1d(
+ ary: _ArrayLikeBool_co,
+ to_end: None | ArrayLike = ...,
+ to_begin: None | ArrayLike = ...,
+) -> NDArray[int8]: ...
+@overload
+def ediff1d(
+ ary: _ArrayLike[_NumberType],
+ to_end: None | ArrayLike = ...,
+ to_begin: None | ArrayLike = ...,
+) -> NDArray[_NumberType]: ...
+@overload
+def ediff1d(
+ ary: _ArrayLikeNumber_co,
+ to_end: None | ArrayLike = ...,
+ to_begin: None | ArrayLike = ...,
+) -> NDArray[Any]: ...
+@overload
+def ediff1d(
+ ary: _ArrayLikeDT64_co | _ArrayLikeTD64_co,
+ to_end: None | ArrayLike = ...,
+ to_begin: None | ArrayLike = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def ediff1d(
+ ary: _ArrayLikeObject_co,
+ to_end: None | ArrayLike = ...,
+ to_begin: None | ArrayLike = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def unique(
+ ar: _ArrayLike[_SCT],
+ return_index: L[False] = ...,
+ return_inverse: L[False] = ...,
+ return_counts: L[False] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def unique(
+ ar: ArrayLike,
+ return_index: L[False] = ...,
+ return_inverse: L[False] = ...,
+ return_counts: L[False] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> NDArray[Any]: ...
+@overload
+def unique(
+ ar: _ArrayLike[_SCT],
+ return_index: L[True] = ...,
+ return_inverse: L[False] = ...,
+ return_counts: L[False] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
+@overload
+def unique(
+ ar: ArrayLike,
+ return_index: L[True] = ...,
+ return_inverse: L[False] = ...,
+ return_counts: L[False] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[Any], NDArray[intp]]: ...
+@overload
+def unique(
+ ar: _ArrayLike[_SCT],
+ return_index: L[False] = ...,
+ return_inverse: L[True] = ...,
+ return_counts: L[False] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
+@overload
+def unique(
+ ar: ArrayLike,
+ return_index: L[False] = ...,
+ return_inverse: L[True] = ...,
+ return_counts: L[False] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[Any], NDArray[intp]]: ...
+@overload
+def unique(
+ ar: _ArrayLike[_SCT],
+ return_index: L[False] = ...,
+ return_inverse: L[False] = ...,
+ return_counts: L[True] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
+@overload
+def unique(
+ ar: ArrayLike,
+ return_index: L[False] = ...,
+ return_inverse: L[False] = ...,
+ return_counts: L[True] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[Any], NDArray[intp]]: ...
+@overload
+def unique(
+ ar: _ArrayLike[_SCT],
+ return_index: L[True] = ...,
+ return_inverse: L[True] = ...,
+ return_counts: L[False] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
+@overload
+def unique(
+ ar: ArrayLike,
+ return_index: L[True] = ...,
+ return_inverse: L[True] = ...,
+ return_counts: L[False] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
+@overload
+def unique(
+ ar: _ArrayLike[_SCT],
+ return_index: L[True] = ...,
+ return_inverse: L[False] = ...,
+ return_counts: L[True] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
+@overload
+def unique(
+ ar: ArrayLike,
+ return_index: L[True] = ...,
+ return_inverse: L[False] = ...,
+ return_counts: L[True] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
+@overload
+def unique(
+ ar: _ArrayLike[_SCT],
+ return_index: L[False] = ...,
+ return_inverse: L[True] = ...,
+ return_counts: L[True] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
+@overload
+def unique(
+ ar: ArrayLike,
+ return_index: L[False] = ...,
+ return_inverse: L[True] = ...,
+ return_counts: L[True] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
+@overload
+def unique(
+ ar: _ArrayLike[_SCT],
+ return_index: L[True] = ...,
+ return_inverse: L[True] = ...,
+ return_counts: L[True] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp], NDArray[intp]]: ...
+@overload
+def unique(
+ ar: ArrayLike,
+ return_index: L[True] = ...,
+ return_inverse: L[True] = ...,
+ return_counts: L[True] = ...,
+ axis: None | SupportsIndex = ...,
+ *,
+ equal_nan: bool = ...,
+) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp], NDArray[intp]]: ...
+
+@overload
+def intersect1d(
+ ar1: _ArrayLike[_SCTNoCast],
+ ar2: _ArrayLike[_SCTNoCast],
+ assume_unique: bool = ...,
+ return_indices: L[False] = ...,
+) -> NDArray[_SCTNoCast]: ...
+@overload
+def intersect1d(
+ ar1: ArrayLike,
+ ar2: ArrayLike,
+ assume_unique: bool = ...,
+ return_indices: L[False] = ...,
+) -> NDArray[Any]: ...
+@overload
+def intersect1d(
+ ar1: _ArrayLike[_SCTNoCast],
+ ar2: _ArrayLike[_SCTNoCast],
+ assume_unique: bool = ...,
+ return_indices: L[True] = ...,
+) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ...
+@overload
+def intersect1d(
+ ar1: ArrayLike,
+ ar2: ArrayLike,
+ assume_unique: bool = ...,
+ return_indices: L[True] = ...,
+) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
+
+@overload
+def setxor1d(
+ ar1: _ArrayLike[_SCTNoCast],
+ ar2: _ArrayLike[_SCTNoCast],
+ assume_unique: bool = ...,
+) -> NDArray[_SCTNoCast]: ...
+@overload
+def setxor1d(
+ ar1: ArrayLike,
+ ar2: ArrayLike,
+ assume_unique: bool = ...,
+) -> NDArray[Any]: ...
+
+def in1d(
+ ar1: ArrayLike,
+ ar2: ArrayLike,
+ assume_unique: bool = ...,
+ invert: bool = ...,
+) -> NDArray[bool_]: ...
+
+def isin(
+ element: ArrayLike,
+ test_elements: ArrayLike,
+ assume_unique: bool = ...,
+ invert: bool = ...,
+) -> NDArray[bool_]: ...
+
+@overload
+def union1d(
+ ar1: _ArrayLike[_SCTNoCast],
+ ar2: _ArrayLike[_SCTNoCast],
+) -> NDArray[_SCTNoCast]: ...
+@overload
+def union1d(
+ ar1: ArrayLike,
+ ar2: ArrayLike,
+) -> NDArray[Any]: ...
+
+@overload
+def setdiff1d(
+ ar1: _ArrayLike[_SCTNoCast],
+ ar2: _ArrayLike[_SCTNoCast],
+ assume_unique: bool = ...,
+) -> NDArray[_SCTNoCast]: ...
+@overload
+def setdiff1d(
+ ar1: ArrayLike,
+ ar2: ArrayLike,
+ assume_unique: bool = ...,
+) -> NDArray[Any]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/arrayterator.py b/venv/lib/python3.9/site-packages/numpy/lib/arrayterator.py
new file mode 100644
index 00000000..b9ea21f8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/arrayterator.py
@@ -0,0 +1,219 @@
+"""
+A buffered iterator for big arrays.
+
+This module solves the problem of iterating over a big file-based array
+without having to read it into memory. The `Arrayterator` class wraps
+an array object, and when iterated it will return sub-arrays with at most
+a user-specified number of elements.
+
+"""
+from operator import mul
+from functools import reduce
+
+__all__ = ['Arrayterator']
+
+
+class Arrayterator:
+ """
+ Buffered iterator for big arrays.
+
+ `Arrayterator` creates a buffered iterator for reading big arrays in small
+ contiguous blocks. The class is useful for objects stored in the
+ file system. It allows iteration over the object *without* reading
+ everything in memory; instead, small blocks are read and iterated over.
+
+ `Arrayterator` can be used with any object that supports multidimensional
+ slices. This includes NumPy arrays, but also variables from
+ Scientific.IO.NetCDF or pynetcdf for example.
+
+ Parameters
+ ----------
+ var : array_like
+ The object to iterate over.
+ buf_size : int, optional
+ The buffer size. If `buf_size` is supplied, the maximum amount of
+ data that will be read into memory is `buf_size` elements.
+ Default is None, which will read as many element as possible
+ into memory.
+
+ Attributes
+ ----------
+ var
+ buf_size
+ start
+ stop
+ step
+ shape
+ flat
+
+ See Also
+ --------
+ ndenumerate : Multidimensional array iterator.
+ flatiter : Flat array iterator.
+ memmap : Create a memory-map to an array stored in a binary file on disk.
+
+ Notes
+ -----
+ The algorithm works by first finding a "running dimension", along which
+ the blocks will be extracted. Given an array of dimensions
+ ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the
+ first dimension will be used. If, on the other hand,
+ ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.
+ Blocks are extracted along this dimension, and when the last block is
+ returned the process continues from the next dimension, until all
+ elements have been read.
+
+ Examples
+ --------
+ >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
+ >>> a_itor = np.lib.Arrayterator(a, 2)
+ >>> a_itor.shape
+ (3, 4, 5, 6)
+
+ Now we can iterate over ``a_itor``, and it will return arrays of size
+ two. Since `buf_size` was smaller than any dimension, the first
+ dimension will be iterated over first:
+
+ >>> for subarr in a_itor:
+ ... if not subarr.all():
+ ... print(subarr, subarr.shape) # doctest: +SKIP
+ >>> # [[[[0 1]]]] (1, 1, 1, 2)
+
+ """
+
+ def __init__(self, var, buf_size=None):
+ self.var = var
+ self.buf_size = buf_size
+
+ self.start = [0 for dim in var.shape]
+ self.stop = [dim for dim in var.shape]
+ self.step = [1 for dim in var.shape]
+
+ def __getattr__(self, attr):
+ return getattr(self.var, attr)
+
+ def __getitem__(self, index):
+ """
+ Return a new arrayterator.
+
+ """
+ # Fix index, handling ellipsis and incomplete slices.
+ if not isinstance(index, tuple):
+ index = (index,)
+ fixed = []
+ length, dims = len(index), self.ndim
+ for slice_ in index:
+ if slice_ is Ellipsis:
+ fixed.extend([slice(None)] * (dims-length+1))
+ length = len(fixed)
+ elif isinstance(slice_, int):
+ fixed.append(slice(slice_, slice_+1, 1))
+ else:
+ fixed.append(slice_)
+ index = tuple(fixed)
+ if len(index) < dims:
+ index += (slice(None),) * (dims-len(index))
+
+ # Return a new arrayterator object.
+ out = self.__class__(self.var, self.buf_size)
+ for i, (start, stop, step, slice_) in enumerate(
+ zip(self.start, self.stop, self.step, index)):
+ out.start[i] = start + (slice_.start or 0)
+ out.step[i] = step * (slice_.step or 1)
+ out.stop[i] = start + (slice_.stop or stop-start)
+ out.stop[i] = min(stop, out.stop[i])
+ return out
+
+ def __array__(self):
+ """
+ Return corresponding data.
+
+ """
+ slice_ = tuple(slice(*t) for t in zip(
+ self.start, self.stop, self.step))
+ return self.var[slice_]
+
+ @property
+ def flat(self):
+ """
+ A 1-D flat iterator for Arrayterator objects.
+
+ This iterator returns elements of the array to be iterated over in
+ `Arrayterator` one by one. It is similar to `flatiter`.
+
+ See Also
+ --------
+ Arrayterator
+ flatiter
+
+ Examples
+ --------
+ >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
+ >>> a_itor = np.lib.Arrayterator(a, 2)
+
+ >>> for subarr in a_itor.flat:
+ ... if not subarr:
+ ... print(subarr, type(subarr))
+ ...
+ 0 <class 'numpy.int64'>
+
+ """
+ for block in self:
+ yield from block.flat
+
+ @property
+ def shape(self):
+ """
+ The shape of the array to be iterated over.
+
+ For an example, see `Arrayterator`.
+
+ """
+ return tuple(((stop-start-1)//step+1) for start, stop, step in
+ zip(self.start, self.stop, self.step))
+
+ def __iter__(self):
+ # Skip arrays with degenerate dimensions
+ if [dim for dim in self.shape if dim <= 0]:
+ return
+
+ start = self.start[:]
+ stop = self.stop[:]
+ step = self.step[:]
+ ndims = self.var.ndim
+
+ while True:
+ count = self.buf_size or reduce(mul, self.shape)
+
+ # iterate over each dimension, looking for the
+ # running dimension (ie, the dimension along which
+ # the blocks will be built from)
+ rundim = 0
+ for i in range(ndims-1, -1, -1):
+ # if count is zero we ran out of elements to read
+ # along higher dimensions, so we read only a single position
+ if count == 0:
+ stop[i] = start[i]+1
+ elif count <= self.shape[i]:
+ # limit along this dimension
+ stop[i] = start[i] + count*step[i]
+ rundim = i
+ else:
+ # read everything along this dimension
+ stop[i] = self.stop[i]
+ stop[i] = min(self.stop[i], stop[i])
+ count = count//self.shape[i]
+
+ # yield a block
+ slice_ = tuple(slice(*t) for t in zip(start, stop, step))
+ yield self.var[slice_]
+
+ # Update start position, taking care of overflow to
+ # other dimensions
+ start[rundim] = stop[rundim] # start where we stopped
+ for i in range(ndims-1, 0, -1):
+ if start[i] >= self.stop[i]:
+ start[i] = self.start[i]
+ start[i-1] += self.step[i-1]
+ if start[0] >= self.stop[0]:
+ return
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/arrayterator.pyi b/venv/lib/python3.9/site-packages/numpy/lib/arrayterator.pyi
new file mode 100644
index 00000000..aa192fb7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/arrayterator.pyi
@@ -0,0 +1,49 @@
+from collections.abc import Generator
+from typing import (
+ Any,
+ TypeVar,
+ Union,
+ overload,
+)
+
+from numpy import ndarray, dtype, generic
+from numpy._typing import DTypeLike
+
+# TODO: Set a shape bound once we've got proper shape support
+_Shape = TypeVar("_Shape", bound=Any)
+_DType = TypeVar("_DType", bound=dtype[Any])
+_ScalarType = TypeVar("_ScalarType", bound=generic)
+
+_Index = Union[
+ Union[ellipsis, int, slice],
+ tuple[Union[ellipsis, int, slice], ...],
+]
+
+__all__: list[str]
+
+# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`,
+# but its ``__getattr__` method does wrap around the former and thus has
+# access to all its methods
+
+class Arrayterator(ndarray[_Shape, _DType]):
+ var: ndarray[_Shape, _DType] # type: ignore[assignment]
+ buf_size: None | int
+ start: list[int]
+ stop: list[int]
+ step: list[int]
+
+ @property # type: ignore[misc]
+ def shape(self) -> tuple[int, ...]: ...
+ @property
+ def flat( # type: ignore[override]
+ self: ndarray[Any, dtype[_ScalarType]]
+ ) -> Generator[_ScalarType, None, None]: ...
+ def __init__(
+ self, var: ndarray[_Shape, _DType], buf_size: None | int = ...
+ ) -> None: ...
+ @overload
+ def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ...
+ @overload
+ def __array__(self, dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ...
+ def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]: ...
+ def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/format.py b/venv/lib/python3.9/site-packages/numpy/lib/format.py
new file mode 100644
index 00000000..54fd0b0b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/format.py
@@ -0,0 +1,968 @@
+"""
+Binary serialization
+
+NPY format
+==========
+
+A simple format for saving numpy arrays to disk with the full
+information about them.
+
+The ``.npy`` format is the standard binary file format in NumPy for
+persisting a *single* arbitrary NumPy array on disk. The format stores all
+of the shape and dtype information necessary to reconstruct the array
+correctly even on another machine with a different architecture.
+The format is designed to be as simple as possible while achieving
+its limited goals.
+
+The ``.npz`` format is the standard format for persisting *multiple* NumPy
+arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``
+files, one for each array.
+
+Capabilities
+------------
+
+- Can represent all NumPy arrays including nested record arrays and
+ object arrays.
+
+- Represents the data in its native binary form.
+
+- Supports Fortran-contiguous arrays directly.
+
+- Stores all of the necessary information to reconstruct the array
+ including shape and dtype on a machine of a different
+ architecture. Both little-endian and big-endian arrays are
+ supported, and a file with little-endian numbers will yield
+ a little-endian array on any machine reading the file. The
+ types are described in terms of their actual sizes. For example,
+ if a machine with a 64-bit C "long int" writes out an array with
+ "long ints", a reading machine with 32-bit C "long ints" will yield
+ an array with 64-bit integers.
+
+- Is straightforward to reverse engineer. Datasets often live longer than
+ the programs that created them. A competent developer should be
+ able to create a solution in their preferred programming language to
+ read most ``.npy`` files that they have been given without much
+ documentation.
+
+- Allows memory-mapping of the data. See `open_memmap`.
+
+- Can be read from a filelike stream object instead of an actual file.
+
+- Stores object arrays, i.e. arrays containing elements that are arbitrary
+ Python objects. Files with object arrays are not to be mmapable, but
+ can be read and written to disk.
+
+Limitations
+-----------
+
+- Arbitrary subclasses of numpy.ndarray are not completely preserved.
+ Subclasses will be accepted for writing, but only the array data will
+ be written out. A regular numpy.ndarray object will be created
+ upon reading the file.
+
+.. warning::
+
+ Due to limitations in the interpretation of structured dtypes, dtypes
+ with fields with empty names will have the names replaced by 'f0', 'f1',
+ etc. Such arrays will not round-trip through the format entirely
+ accurately. The data is intact; only the field names will differ. We are
+ working on a fix for this. This fix will not require a change in the
+ file format. The arrays with such structures can still be saved and
+ restored, and the correct dtype may be restored by using the
+ ``loadedarray.view(correct_dtype)`` method.
+
+File extensions
+---------------
+
+We recommend using the ``.npy`` and ``.npz`` extensions for files saved
+in this format. This is by no means a requirement; applications may wish
+to use these file formats but use an extension specific to the
+application. In the absence of an obvious alternative, however,
+we suggest using ``.npy`` and ``.npz``.
+
+Version numbering
+-----------------
+
+The version numbering of these formats is independent of NumPy version
+numbering. If the format is upgraded, the code in `numpy.io` will still
+be able to read and write Version 1.0 files.
+
+Format Version 1.0
+------------------
+
+The first 6 bytes are a magic string: exactly ``\\x93NUMPY``.
+
+The next 1 byte is an unsigned byte: the major version number of the file
+format, e.g. ``\\x01``.
+
+The next 1 byte is an unsigned byte: the minor version number of the file
+format, e.g. ``\\x00``. Note: the version of the file format is not tied
+to the version of the numpy package.
+
+The next 2 bytes form a little-endian unsigned short int: the length of
+the header data HEADER_LEN.
+
+The next HEADER_LEN bytes form the header data describing the array's
+format. It is an ASCII string which contains a Python literal expression
+of a dictionary. It is terminated by a newline (``\\n``) and padded with
+spaces (``\\x20``) to make the total of
+``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible
+by 64 for alignment purposes.
+
+The dictionary contains three keys:
+
+ "descr" : dtype.descr
+ An object that can be passed as an argument to the `numpy.dtype`
+ constructor to create the array's dtype.
+ "fortran_order" : bool
+ Whether the array data is Fortran-contiguous or not. Since
+ Fortran-contiguous arrays are a common form of non-C-contiguity,
+ we allow them to be written directly to disk for efficiency.
+ "shape" : tuple of int
+ The shape of the array.
+
+For repeatability and readability, the dictionary keys are sorted in
+alphabetic order. This is for convenience only. A writer SHOULD implement
+this if possible. A reader MUST NOT depend on this.
+
+Following the header comes the array data. If the dtype contains Python
+objects (i.e. ``dtype.hasobject is True``), then the data is a Python
+pickle of the array. Otherwise the data is the contiguous (either C-
+or Fortran-, depending on ``fortran_order``) bytes of the array.
+Consumers can figure out the number of bytes by multiplying the number
+of elements given by the shape (noting that ``shape=()`` means there is
+1 element) by ``dtype.itemsize``.
+
+Format Version 2.0
+------------------
+
+The version 1.0 format only allowed the array header to have a total size of
+65535 bytes. This can be exceeded by structured arrays with a large number of
+columns. The version 2.0 format extends the header size to 4 GiB.
+`numpy.save` will automatically save in 2.0 format if the data requires it,
+else it will always use the more compatible 1.0 format.
+
+The description of the fourth element of the header therefore has become:
+"The next 4 bytes form a little-endian unsigned int: the length of the header
+data HEADER_LEN."
+
+Format Version 3.0
+------------------
+
+This version replaces the ASCII string (which in practice was latin1) with
+a utf8-encoded string, so supports structured types with any unicode field
+names.
+
+Notes
+-----
+The ``.npy`` format, including motivation for creating it and a comparison of
+alternatives, is described in the
+:doc:`"npy-format" NEP <neps:nep-0001-npy-format>`, however details have
+evolved with time and this document is more current.
+
+"""
+import numpy
+import warnings
+from numpy.lib.utils import safe_eval
+from numpy.compat import (
+ isfileobj, os_fspath, pickle
+ )
+
+
+__all__ = []
+
+
+EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'}
+MAGIC_PREFIX = b'\x93NUMPY'
+MAGIC_LEN = len(MAGIC_PREFIX) + 2
+ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
+BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
+# allow growth within the address space of a 64 bit machine along one axis
+GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype
+
+# difference between version 1.0 and 2.0 is a 4 byte (I) header length
+# instead of 2 bytes (H) allowing storage of large structured arrays
+_header_size_info = {
+ (1, 0): ('<H', 'latin1'),
+ (2, 0): ('<I', 'latin1'),
+ (3, 0): ('<I', 'utf8'),
+}
+
+# Python's literal_eval is not actually safe for large inputs, since parsing
+# may become slow or even cause interpreter crashes.
+# This is an arbitrary, low limit which should make it safe in practice.
+_MAX_HEADER_SIZE = 10000
+
+def _check_version(version):
+ if version not in [(1, 0), (2, 0), (3, 0), None]:
+ msg = "we only support format version (1,0), (2,0), and (3,0), not %s"
+ raise ValueError(msg % (version,))
+
+def magic(major, minor):
+ """ Return the magic string for the given file format version.
+
+ Parameters
+ ----------
+ major : int in [0, 255]
+ minor : int in [0, 255]
+
+ Returns
+ -------
+ magic : str
+
+ Raises
+ ------
+ ValueError if the version cannot be formatted.
+ """
+ if major < 0 or major > 255:
+ raise ValueError("major version must be 0 <= major < 256")
+ if minor < 0 or minor > 255:
+ raise ValueError("minor version must be 0 <= minor < 256")
+ return MAGIC_PREFIX + bytes([major, minor])
+
+def read_magic(fp):
+ """ Read the magic string to get the version of the file format.
+
+ Parameters
+ ----------
+ fp : filelike object
+
+ Returns
+ -------
+ major : int
+ minor : int
+ """
+ magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")
+ if magic_str[:-2] != MAGIC_PREFIX:
+ msg = "the magic string is not correct; expected %r, got %r"
+ raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
+ major, minor = magic_str[-2:]
+ return major, minor
+
+def _has_metadata(dt):
+ if dt.metadata is not None:
+ return True
+ elif dt.names is not None:
+ return any(_has_metadata(dt[k]) for k in dt.names)
+ elif dt.subdtype is not None:
+ return _has_metadata(dt.base)
+ else:
+ return False
+
+def dtype_to_descr(dtype):
+ """
+ Get a serializable descriptor from the dtype.
+
+ The .descr attribute of a dtype object cannot be round-tripped through
+ the dtype() constructor. Simple types, like dtype('float32'), have
+ a descr which looks like a record array with one field with '' as
+ a name. The dtype() constructor interprets this as a request to give
+ a default name. Instead, we construct descriptor that can be passed to
+ dtype().
+
+ Parameters
+ ----------
+ dtype : dtype
+ The dtype of the array that will be written to disk.
+
+ Returns
+ -------
+ descr : object
+ An object that can be passed to `numpy.dtype()` in order to
+ replicate the input dtype.
+
+ """
+ if _has_metadata(dtype):
+ warnings.warn("metadata on a dtype may be saved or ignored, but will "
+ "raise if saved when read. Use another form of storage.",
+ UserWarning, stacklevel=2)
+ if dtype.names is not None:
+ # This is a record array. The .descr is fine. XXX: parts of the
+ # record array with an empty name, like padding bytes, still get
+ # fiddled with. This needs to be fixed in the C implementation of
+ # dtype().
+ return dtype.descr
+ else:
+ return dtype.str
+
+def descr_to_dtype(descr):
+ """
+ Returns a dtype based off the given description.
+
+ This is essentially the reverse of `dtype_to_descr()`. It will remove
+ the valueless padding fields created by, i.e. simple fields like
+ dtype('float32'), and then convert the description to its corresponding
+ dtype.
+
+ Parameters
+ ----------
+ descr : object
+ The object retrieved by dtype.descr. Can be passed to
+ `numpy.dtype()` in order to replicate the input dtype.
+
+ Returns
+ -------
+ dtype : dtype
+ The dtype constructed by the description.
+
+ """
+ if isinstance(descr, str):
+ # No padding removal needed
+ return numpy.dtype(descr)
+ elif isinstance(descr, tuple):
+ # subtype, will always have a shape descr[1]
+ dt = descr_to_dtype(descr[0])
+ return numpy.dtype((dt, descr[1]))
+
+ titles = []
+ names = []
+ formats = []
+ offsets = []
+ offset = 0
+ for field in descr:
+ if len(field) == 2:
+ name, descr_str = field
+ dt = descr_to_dtype(descr_str)
+ else:
+ name, descr_str, shape = field
+ dt = numpy.dtype((descr_to_dtype(descr_str), shape))
+
+ # Ignore padding bytes, which will be void bytes with '' as name
+ # Once support for blank names is removed, only "if name == ''" needed)
+ is_pad = (name == '' and dt.type is numpy.void and dt.names is None)
+ if not is_pad:
+ title, name = name if isinstance(name, tuple) else (None, name)
+ titles.append(title)
+ names.append(name)
+ formats.append(dt)
+ offsets.append(offset)
+ offset += dt.itemsize
+
+ return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,
+ 'offsets': offsets, 'itemsize': offset})
+
+def header_data_from_array_1_0(array):
+ """ Get the dictionary of header metadata from a numpy.ndarray.
+
+ Parameters
+ ----------
+ array : numpy.ndarray
+
+ Returns
+ -------
+ d : dict
+ This has the appropriate entries for writing its string representation
+ to the header of the file.
+ """
+ d = {'shape': array.shape}
+ if array.flags.c_contiguous:
+ d['fortran_order'] = False
+ elif array.flags.f_contiguous:
+ d['fortran_order'] = True
+ else:
+ # Totally non-contiguous data. We will have to make it C-contiguous
+ # before writing. Note that we need to test for C_CONTIGUOUS first
+ # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS.
+ d['fortran_order'] = False
+
+ d['descr'] = dtype_to_descr(array.dtype)
+ return d
+
+
+def _wrap_header(header, version):
+ """
+ Takes a stringified header, and attaches the prefix and padding to it
+ """
+ import struct
+ assert version is not None
+ fmt, encoding = _header_size_info[version]
+ header = header.encode(encoding)
+ hlen = len(header) + 1
+ padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)
+ try:
+ header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen)
+ except struct.error:
+ msg = "Header length {} too big for version={}".format(hlen, version)
+ raise ValueError(msg) from None
+
+ # Pad the header with spaces and a final newline such that the magic
+ # string, the header-length short and the header are aligned on a
+ # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
+ # aligned up to ARRAY_ALIGN on systems like Linux where mmap()
+ # offset must be page-aligned (i.e. the beginning of the file).
+ return header_prefix + header + b' '*padlen + b'\n'
+
+
+def _wrap_header_guess_version(header):
+ """
+ Like `_wrap_header`, but chooses an appropriate version given the contents
+ """
+ try:
+ return _wrap_header(header, (1, 0))
+ except ValueError:
+ pass
+
+ try:
+ ret = _wrap_header(header, (2, 0))
+ except UnicodeEncodeError:
+ pass
+ else:
+ warnings.warn("Stored array in format 2.0. It can only be"
+ "read by NumPy >= 1.9", UserWarning, stacklevel=2)
+ return ret
+
+ header = _wrap_header(header, (3, 0))
+ warnings.warn("Stored array in format 3.0. It can only be "
+ "read by NumPy >= 1.17", UserWarning, stacklevel=2)
+ return header
+
+
+def _write_array_header(fp, d, version=None):
+ """ Write the header for an array and returns the version used
+
+ Parameters
+ ----------
+ fp : filelike object
+ d : dict
+ This has the appropriate entries for writing its string representation
+ to the header of the file.
+ version : tuple or None
+ None means use oldest that works. Providing an explicit version will
+ raise a ValueError if the format does not allow saving this data.
+ Default: None
+ """
+ header = ["{"]
+ for key, value in sorted(d.items()):
+ # Need to use repr here, since we eval these when reading
+ header.append("'%s': %s, " % (key, repr(value)))
+ header.append("}")
+ header = "".join(header)
+
+ # Add some spare space so that the array header can be modified in-place
+ # when changing the array size, e.g. when growing it by appending data at
+ # the end.
+ shape = d['shape']
+ header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr(
+ shape[-1 if d['fortran_order'] else 0]
+ ))) if len(shape) > 0 else 0)
+
+ if version is None:
+ header = _wrap_header_guess_version(header)
+ else:
+ header = _wrap_header(header, version)
+ fp.write(header)
+
+def write_array_header_1_0(fp, d):
+ """ Write the header for an array using the 1.0 format.
+
+ Parameters
+ ----------
+ fp : filelike object
+ d : dict
+ This has the appropriate entries for writing its string
+ representation to the header of the file.
+ """
+ _write_array_header(fp, d, (1, 0))
+
+
+def write_array_header_2_0(fp, d):
+ """ Write the header for an array using the 2.0 format.
+ The 2.0 format allows storing very large structured arrays.
+
+ .. versionadded:: 1.9.0
+
+ Parameters
+ ----------
+ fp : filelike object
+ d : dict
+ This has the appropriate entries for writing its string
+ representation to the header of the file.
+ """
+ _write_array_header(fp, d, (2, 0))
+
+def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE):
+ """
+ Read an array header from a filelike object using the 1.0 file format
+ version.
+
+ This will leave the file object located just after the header.
+
+ Parameters
+ ----------
+ fp : filelike object
+ A file object or something with a `.read()` method like a file.
+
+ Returns
+ -------
+ shape : tuple of int
+ The shape of the array.
+ fortran_order : bool
+ The array data will be written out directly if it is either
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
+ contiguous before writing it out.
+ dtype : dtype
+ The dtype of the file's data.
+ max_header_size : int, optional
+ Maximum allowed size of the header. Large headers may not be safe
+ to load securely and thus require explicitly passing a larger value.
+ See :py:meth:`ast.literal_eval()` for details.
+
+ Raises
+ ------
+ ValueError
+ If the data is invalid.
+
+ """
+ return _read_array_header(
+ fp, version=(1, 0), max_header_size=max_header_size)
+
+def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE):
+ """
+ Read an array header from a filelike object using the 2.0 file format
+ version.
+
+ This will leave the file object located just after the header.
+
+ .. versionadded:: 1.9.0
+
+ Parameters
+ ----------
+ fp : filelike object
+ A file object or something with a `.read()` method like a file.
+ max_header_size : int, optional
+ Maximum allowed size of the header. Large headers may not be safe
+ to load securely and thus require explicitly passing a larger value.
+ See :py:meth:`ast.literal_eval()` for details.
+
+ Returns
+ -------
+ shape : tuple of int
+ The shape of the array.
+ fortran_order : bool
+ The array data will be written out directly if it is either
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
+ contiguous before writing it out.
+ dtype : dtype
+ The dtype of the file's data.
+
+ Raises
+ ------
+ ValueError
+ If the data is invalid.
+
+ """
+ return _read_array_header(
+ fp, version=(2, 0), max_header_size=max_header_size)
+
+
+def _filter_header(s):
+ """Clean up 'L' in npz header ints.
+
+ Cleans up the 'L' in strings representing integers. Needed to allow npz
+ headers produced in Python2 to be read in Python3.
+
+ Parameters
+ ----------
+ s : string
+ Npy file header.
+
+ Returns
+ -------
+ header : str
+ Cleaned up header.
+
+ """
+ import tokenize
+ from io import StringIO
+
+ tokens = []
+ last_token_was_number = False
+ for token in tokenize.generate_tokens(StringIO(s).readline):
+ token_type = token[0]
+ token_string = token[1]
+ if (last_token_was_number and
+ token_type == tokenize.NAME and
+ token_string == "L"):
+ continue
+ else:
+ tokens.append(token)
+ last_token_was_number = (token_type == tokenize.NUMBER)
+ return tokenize.untokenize(tokens)
+
+
+def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE):
+ """
+ see read_array_header_1_0
+ """
+ # Read an unsigned, little-endian short int which has the length of the
+ # header.
+ import struct
+ hinfo = _header_size_info.get(version)
+ if hinfo is None:
+ raise ValueError("Invalid version {!r}".format(version))
+ hlength_type, encoding = hinfo
+
+ hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
+ header_length = struct.unpack(hlength_type, hlength_str)[0]
+ header = _read_bytes(fp, header_length, "array header")
+ header = header.decode(encoding)
+ if len(header) > max_header_size:
+ raise ValueError(
+ f"Header info length ({len(header)}) is large and may not be safe "
+ "to load securely.\n"
+ "To allow loading, adjust `max_header_size` or fully trust "
+ "the `.npy` file using `allow_pickle=True`.\n"
+ "For safety against large resource use or crashes, sandboxing "
+ "may be necessary.")
+
+ # The header is a pretty-printed string representation of a literal
+ # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
+ # boundary. The keys are strings.
+ # "shape" : tuple of int
+ # "fortran_order" : bool
+ # "descr" : dtype.descr
+ # Versions (2, 0) and (1, 0) could have been created by a Python 2
+ # implementation before header filtering was implemented.
+ if version <= (2, 0):
+ header = _filter_header(header)
+ try:
+ d = safe_eval(header)
+ except SyntaxError as e:
+ msg = "Cannot parse header: {!r}"
+ raise ValueError(msg.format(header)) from e
+ if not isinstance(d, dict):
+ msg = "Header is not a dictionary: {!r}"
+ raise ValueError(msg.format(d))
+
+ if EXPECTED_KEYS != d.keys():
+ keys = sorted(d.keys())
+ msg = "Header does not contain the correct keys: {!r}"
+ raise ValueError(msg.format(keys))
+
+ # Sanity-check the values.
+ if (not isinstance(d['shape'], tuple) or
+ not all(isinstance(x, int) for x in d['shape'])):
+ msg = "shape is not valid: {!r}"
+ raise ValueError(msg.format(d['shape']))
+ if not isinstance(d['fortran_order'], bool):
+ msg = "fortran_order is not a valid bool: {!r}"
+ raise ValueError(msg.format(d['fortran_order']))
+ try:
+ dtype = descr_to_dtype(d['descr'])
+ except TypeError as e:
+ msg = "descr is not a valid dtype descriptor: {!r}"
+ raise ValueError(msg.format(d['descr'])) from e
+
+ return d['shape'], d['fortran_order'], dtype
+
+def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
+ """
+ Write an array to an NPY file, including a header.
+
+ If the array is neither C-contiguous nor Fortran-contiguous AND the
+ file_like object is not a real file object, this function will have to
+ copy data in memory.
+
+ Parameters
+ ----------
+ fp : file_like object
+ An open, writable file object, or similar object with a
+ ``.write()`` method.
+ array : ndarray
+ The array to write to disk.
+ version : (int, int) or None, optional
+ The version number of the format. None means use the oldest
+ supported version that is able to store the data. Default: None
+ allow_pickle : bool, optional
+ Whether to allow writing pickled data. Default: True
+ pickle_kwargs : dict, optional
+ Additional keyword arguments to pass to pickle.dump, excluding
+ 'protocol'. These are only useful when pickling objects in object
+ arrays on Python 3 to Python 2 compatible format.
+
+ Raises
+ ------
+ ValueError
+ If the array cannot be persisted. This includes the case of
+ allow_pickle=False and array being an object array.
+ Various other errors
+ If the array contains Python objects as part of its dtype, the
+ process of pickling them may raise various errors if the objects
+ are not picklable.
+
+ """
+ _check_version(version)
+ _write_array_header(fp, header_data_from_array_1_0(array), version)
+
+ if array.itemsize == 0:
+ buffersize = 0
+ else:
+ # Set buffer size to 16 MiB to hide the Python loop overhead.
+ buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
+
+ if array.dtype.hasobject:
+ # We contain Python objects so we cannot write out the data
+ # directly. Instead, we will pickle it out
+ if not allow_pickle:
+ raise ValueError("Object arrays cannot be saved when "
+ "allow_pickle=False")
+ if pickle_kwargs is None:
+ pickle_kwargs = {}
+ pickle.dump(array, fp, protocol=3, **pickle_kwargs)
+ elif array.flags.f_contiguous and not array.flags.c_contiguous:
+ if isfileobj(fp):
+ array.T.tofile(fp)
+ else:
+ for chunk in numpy.nditer(
+ array, flags=['external_loop', 'buffered', 'zerosize_ok'],
+ buffersize=buffersize, order='F'):
+ fp.write(chunk.tobytes('C'))
+ else:
+ if isfileobj(fp):
+ array.tofile(fp)
+ else:
+ for chunk in numpy.nditer(
+ array, flags=['external_loop', 'buffered', 'zerosize_ok'],
+ buffersize=buffersize, order='C'):
+ fp.write(chunk.tobytes('C'))
+
+
+def read_array(fp, allow_pickle=False, pickle_kwargs=None, *,
+ max_header_size=_MAX_HEADER_SIZE):
+ """
+ Read an array from an NPY file.
+
+ Parameters
+ ----------
+ fp : file_like object
+ If this is not a real file object, then this may take extra memory
+ and time.
+ allow_pickle : bool, optional
+ Whether to allow writing pickled data. Default: False
+
+ .. versionchanged:: 1.16.3
+ Made default False in response to CVE-2019-6446.
+
+ pickle_kwargs : dict
+ Additional keyword arguments to pass to pickle.load. These are only
+ useful when loading object arrays saved on Python 2 when using
+ Python 3.
+ max_header_size : int, optional
+ Maximum allowed size of the header. Large headers may not be safe
+ to load securely and thus require explicitly passing a larger value.
+ See :py:meth:`ast.literal_eval()` for details.
+ This option is ignored when `allow_pickle` is passed. In that case
+ the file is by definition trusted and the limit is unnecessary.
+
+ Returns
+ -------
+ array : ndarray
+ The array from the data on disk.
+
+ Raises
+ ------
+ ValueError
+ If the data is invalid, or allow_pickle=False and the file contains
+ an object array.
+
+ """
+ if allow_pickle:
+ # Effectively ignore max_header_size, since `allow_pickle` indicates
+ # that the input is fully trusted.
+ max_header_size = 2**64
+
+ version = read_magic(fp)
+ _check_version(version)
+ shape, fortran_order, dtype = _read_array_header(
+ fp, version, max_header_size=max_header_size)
+ if len(shape) == 0:
+ count = 1
+ else:
+ count = numpy.multiply.reduce(shape, dtype=numpy.int64)
+
+ # Now read the actual data.
+ if dtype.hasobject:
+ # The array contained Python objects. We need to unpickle the data.
+ if not allow_pickle:
+ raise ValueError("Object arrays cannot be loaded when "
+ "allow_pickle=False")
+ if pickle_kwargs is None:
+ pickle_kwargs = {}
+ try:
+ array = pickle.load(fp, **pickle_kwargs)
+ except UnicodeError as err:
+ # Friendlier error message
+ raise UnicodeError("Unpickling a python object failed: %r\n"
+ "You may need to pass the encoding= option "
+ "to numpy.load" % (err,)) from err
+ else:
+ if isfileobj(fp):
+ # We can use the fast fromfile() function.
+ array = numpy.fromfile(fp, dtype=dtype, count=count)
+ else:
+ # This is not a real file. We have to read it the
+ # memory-intensive way.
+ # crc32 module fails on reads greater than 2 ** 32 bytes,
+ # breaking large reads from gzip streams. Chunk reads to
+ # BUFFER_SIZE bytes to avoid issue and reduce memory overhead
+ # of the read. In non-chunked case count < max_read_count, so
+ # only one read is performed.
+
+ # Use np.ndarray instead of np.empty since the latter does
+ # not correctly instantiate zero-width string dtypes; see
+ # https://github.com/numpy/numpy/pull/6430
+ array = numpy.ndarray(count, dtype=dtype)
+
+ if dtype.itemsize > 0:
+ # If dtype.itemsize == 0 then there's nothing more to read
+ max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
+
+ for i in range(0, count, max_read_count):
+ read_count = min(max_read_count, count - i)
+ read_size = int(read_count * dtype.itemsize)
+ data = _read_bytes(fp, read_size, "array data")
+ array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
+ count=read_count)
+
+ if fortran_order:
+ array.shape = shape[::-1]
+ array = array.transpose()
+ else:
+ array.shape = shape
+
+ return array
+
+
+def open_memmap(filename, mode='r+', dtype=None, shape=None,
+ fortran_order=False, version=None, *,
+ max_header_size=_MAX_HEADER_SIZE):
+ """
+ Open a .npy file as a memory-mapped array.
+
+ This may be used to read an existing file or create a new one.
+
+ Parameters
+ ----------
+ filename : str or path-like
+ The name of the file on disk. This may *not* be a file-like
+ object.
+ mode : str, optional
+ The mode in which to open the file; the default is 'r+'. In
+ addition to the standard file modes, 'c' is also accepted to mean
+ "copy on write." See `memmap` for the available mode strings.
+ dtype : data-type, optional
+ The data type of the array if we are creating a new file in "write"
+ mode, if not, `dtype` is ignored. The default value is None, which
+ results in a data-type of `float64`.
+ shape : tuple of int
+ The shape of the array if we are creating a new file in "write"
+ mode, in which case this parameter is required. Otherwise, this
+ parameter is ignored and is thus optional.
+ fortran_order : bool, optional
+ Whether the array should be Fortran-contiguous (True) or
+ C-contiguous (False, the default) if we are creating a new file in
+ "write" mode.
+ version : tuple of int (major, minor) or None
+ If the mode is a "write" mode, then this is the version of the file
+ format used to create the file. None means use the oldest
+ supported version that is able to store the data. Default: None
+ max_header_size : int, optional
+ Maximum allowed size of the header. Large headers may not be safe
+ to load securely and thus require explicitly passing a larger value.
+ See :py:meth:`ast.literal_eval()` for details.
+
+ Returns
+ -------
+ marray : memmap
+ The memory-mapped array.
+
+ Raises
+ ------
+ ValueError
+ If the data or the mode is invalid.
+ OSError
+ If the file is not found or cannot be opened correctly.
+
+ See Also
+ --------
+ numpy.memmap
+
+ """
+ if isfileobj(filename):
+ raise ValueError("Filename must be a string or a path-like object."
+ " Memmap cannot use existing file handles.")
+
+ if 'w' in mode:
+ # We are creating the file, not reading it.
+ # Check if we ought to create the file.
+ _check_version(version)
+ # Ensure that the given dtype is an authentic dtype object rather
+ # than just something that can be interpreted as a dtype object.
+ dtype = numpy.dtype(dtype)
+ if dtype.hasobject:
+ msg = "Array can't be memory-mapped: Python objects in dtype."
+ raise ValueError(msg)
+ d = dict(
+ descr=dtype_to_descr(dtype),
+ fortran_order=fortran_order,
+ shape=shape,
+ )
+ # If we got here, then it should be safe to create the file.
+ with open(os_fspath(filename), mode+'b') as fp:
+ _write_array_header(fp, d, version)
+ offset = fp.tell()
+ else:
+ # Read the header of the file first.
+ with open(os_fspath(filename), 'rb') as fp:
+ version = read_magic(fp)
+ _check_version(version)
+
+ shape, fortran_order, dtype = _read_array_header(
+ fp, version, max_header_size=max_header_size)
+ if dtype.hasobject:
+ msg = "Array can't be memory-mapped: Python objects in dtype."
+ raise ValueError(msg)
+ offset = fp.tell()
+
+ if fortran_order:
+ order = 'F'
+ else:
+ order = 'C'
+
+ # We need to change a write-only mode to a read-write mode since we've
+ # already written data to the file.
+ if mode == 'w+':
+ mode = 'r+'
+
+ marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,
+ mode=mode, offset=offset)
+
+ return marray
+
+
+def _read_bytes(fp, size, error_template="ran out of data"):
+ """
+ Read from file-like object until size bytes are read.
+ Raises ValueError if not EOF is encountered before size bytes are read.
+ Non-blocking objects only supported if they derive from io objects.
+
+ Required as e.g. ZipExtFile in python 2.6 can return less data than
+ requested.
+ """
+ data = bytes()
+ while True:
+ # io files (default in python3) return None or raise on
+ # would-block, python2 file will truncate, probably nothing can be
+ # done about that. note that regular files can't be non-blocking
+ try:
+ r = fp.read(size - len(data))
+ data += r
+ if len(r) == 0 or len(data) == size:
+ break
+ except BlockingIOError:
+ pass
+ if len(data) != size:
+ msg = "EOF: reading %s, expected %d bytes got %d"
+ raise ValueError(msg % (error_template, size, len(data)))
+ else:
+ return data
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/format.pyi b/venv/lib/python3.9/site-packages/numpy/lib/format.pyi
new file mode 100644
index 00000000..a4468f52
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/format.pyi
@@ -0,0 +1,22 @@
+from typing import Any, Literal, Final
+
+__all__: list[str]
+
+EXPECTED_KEYS: Final[set[str]]
+MAGIC_PREFIX: Final[bytes]
+MAGIC_LEN: Literal[8]
+ARRAY_ALIGN: Literal[64]
+BUFFER_SIZE: Literal[262144] # 2**18
+
+def magic(major, minor): ...
+def read_magic(fp): ...
+def dtype_to_descr(dtype): ...
+def descr_to_dtype(descr): ...
+def header_data_from_array_1_0(array): ...
+def write_array_header_1_0(fp, d): ...
+def write_array_header_2_0(fp, d): ...
+def read_array_header_1_0(fp): ...
+def read_array_header_2_0(fp): ...
+def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ...
+def read_array(fp, allow_pickle=..., pickle_kwargs=...): ...
+def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/function_base.py b/venv/lib/python3.9/site-packages/numpy/lib/function_base.py
new file mode 100644
index 00000000..84b96d54
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/function_base.py
@@ -0,0 +1,5614 @@
+import collections.abc
+import functools
+import re
+import sys
+import warnings
+
+import numpy as np
+import numpy.core.numeric as _nx
+from numpy.core import transpose
+from numpy.core.numeric import (
+ ones, zeros_like, arange, concatenate, array, asarray, asanyarray, empty,
+ ndarray, take, dot, where, intp, integer, isscalar, absolute
+ )
+from numpy.core.umath import (
+ pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
+ mod, exp, not_equal, subtract
+ )
+from numpy.core.fromnumeric import (
+ ravel, nonzero, partition, mean, any, sum
+ )
+from numpy.core.numerictypes import typecodes
+from numpy.core.overrides import set_module
+from numpy.core import overrides
+from numpy.core.function_base import add_newdoc
+from numpy.lib.twodim_base import diag
+from numpy.core.multiarray import (
+ _insert, add_docstring, bincount, normalize_axis_index, _monotonicity,
+ interp as compiled_interp, interp_complex as compiled_interp_complex
+ )
+from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
+
+import builtins
+
+# needed in this module for compatibility
+from numpy.lib.histograms import histogram, histogramdd # noqa: F401
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+__all__ = [
+ 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
+ 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip',
+ 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
+ 'bincount', 'digitize', 'cov', 'corrcoef',
+ 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
+ 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
+ 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc',
+ 'quantile'
+ ]
+
+# _QuantileMethods is a dictionary listing all the supported methods to
+# compute quantile/percentile.
+#
+# Below virtual_index refer to the index of the element where the percentile
+# would be found in the sorted sample.
+# When the sample contains exactly the percentile wanted, the virtual_index is
+# an integer to the index of this element.
+# When the percentile wanted is in between two elements, the virtual_index
+# is made of a integer part (a.k.a 'i' or 'left') and a fractional part
+# (a.k.a 'g' or 'gamma')
+#
+# Each method in _QuantileMethods has two properties
+# get_virtual_index : Callable
+# The function used to compute the virtual_index.
+# fix_gamma : Callable
+# A function used for discret methods to force the index to a specific value.
+_QuantileMethods = dict(
+ # --- HYNDMAN and FAN METHODS
+ # Discrete methods
+ inverted_cdf=dict(
+ get_virtual_index=lambda n, quantiles: _inverted_cdf(n, quantiles),
+ fix_gamma=lambda gamma, _: gamma, # should never be called
+ ),
+ averaged_inverted_cdf=dict(
+ get_virtual_index=lambda n, quantiles: (n * quantiles) - 1,
+ fix_gamma=lambda gamma, _: _get_gamma_mask(
+ shape=gamma.shape,
+ default_value=1.,
+ conditioned_value=0.5,
+ where=gamma == 0),
+ ),
+ closest_observation=dict(
+ get_virtual_index=lambda n, quantiles: _closest_observation(n,
+ quantiles),
+ fix_gamma=lambda gamma, _: gamma, # should never be called
+ ),
+ # Continuous methods
+ interpolated_inverted_cdf=dict(
+ get_virtual_index=lambda n, quantiles:
+ _compute_virtual_index(n, quantiles, 0, 1),
+ fix_gamma=lambda gamma, _: gamma,
+ ),
+ hazen=dict(
+ get_virtual_index=lambda n, quantiles:
+ _compute_virtual_index(n, quantiles, 0.5, 0.5),
+ fix_gamma=lambda gamma, _: gamma,
+ ),
+ weibull=dict(
+ get_virtual_index=lambda n, quantiles:
+ _compute_virtual_index(n, quantiles, 0, 0),
+ fix_gamma=lambda gamma, _: gamma,
+ ),
+ # Default method.
+ # To avoid some rounding issues, `(n-1) * quantiles` is preferred to
+ # `_compute_virtual_index(n, quantiles, 1, 1)`.
+ # They are mathematically equivalent.
+ linear=dict(
+ get_virtual_index=lambda n, quantiles: (n - 1) * quantiles,
+ fix_gamma=lambda gamma, _: gamma,
+ ),
+ median_unbiased=dict(
+ get_virtual_index=lambda n, quantiles:
+ _compute_virtual_index(n, quantiles, 1 / 3.0, 1 / 3.0),
+ fix_gamma=lambda gamma, _: gamma,
+ ),
+ normal_unbiased=dict(
+ get_virtual_index=lambda n, quantiles:
+ _compute_virtual_index(n, quantiles, 3 / 8.0, 3 / 8.0),
+ fix_gamma=lambda gamma, _: gamma,
+ ),
+ # --- OTHER METHODS
+ lower=dict(
+ get_virtual_index=lambda n, quantiles: np.floor(
+ (n - 1) * quantiles).astype(np.intp),
+ fix_gamma=lambda gamma, _: gamma,
+ # should never be called, index dtype is int
+ ),
+ higher=dict(
+ get_virtual_index=lambda n, quantiles: np.ceil(
+ (n - 1) * quantiles).astype(np.intp),
+ fix_gamma=lambda gamma, _: gamma,
+ # should never be called, index dtype is int
+ ),
+ midpoint=dict(
+ get_virtual_index=lambda n, quantiles: 0.5 * (
+ np.floor((n - 1) * quantiles)
+ + np.ceil((n - 1) * quantiles)),
+ fix_gamma=lambda gamma, index: _get_gamma_mask(
+ shape=gamma.shape,
+ default_value=0.5,
+ conditioned_value=0.,
+ where=index % 1 == 0),
+ ),
+ nearest=dict(
+ get_virtual_index=lambda n, quantiles: np.around(
+ (n - 1) * quantiles).astype(np.intp),
+ fix_gamma=lambda gamma, _: gamma,
+ # should never be called, index dtype is int
+ ))
+
+
+def _rot90_dispatcher(m, k=None, axes=None):
+ return (m,)
+
+
+@array_function_dispatch(_rot90_dispatcher)
+def rot90(m, k=1, axes=(0, 1)):
+ """
+ Rotate an array by 90 degrees in the plane specified by axes.
+
+ Rotation direction is from the first towards the second axis.
+
+ Parameters
+ ----------
+ m : array_like
+ Array of two or more dimensions.
+ k : integer
+ Number of times the array is rotated by 90 degrees.
+ axes : (2,) array_like
+ The array is rotated in the plane defined by the axes.
+ Axes must be different.
+
+ .. versionadded:: 1.12.0
+
+ Returns
+ -------
+ y : ndarray
+ A rotated view of `m`.
+
+ See Also
+ --------
+ flip : Reverse the order of elements in an array along the given axis.
+ fliplr : Flip an array horizontally.
+ flipud : Flip an array vertically.
+
+ Notes
+ -----
+ ``rot90(m, k=1, axes=(1,0))`` is the reverse of
+ ``rot90(m, k=1, axes=(0,1))``
+
+ ``rot90(m, k=1, axes=(1,0))`` is equivalent to
+ ``rot90(m, k=-1, axes=(0,1))``
+
+ Examples
+ --------
+ >>> m = np.array([[1,2],[3,4]], int)
+ >>> m
+ array([[1, 2],
+ [3, 4]])
+ >>> np.rot90(m)
+ array([[2, 4],
+ [1, 3]])
+ >>> np.rot90(m, 2)
+ array([[4, 3],
+ [2, 1]])
+ >>> m = np.arange(8).reshape((2,2,2))
+ >>> np.rot90(m, 1, (1,2))
+ array([[[1, 3],
+ [0, 2]],
+ [[5, 7],
+ [4, 6]]])
+
+ """
+ axes = tuple(axes)
+ if len(axes) != 2:
+ raise ValueError("len(axes) must be 2.")
+
+ m = asanyarray(m)
+
+ if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim:
+ raise ValueError("Axes must be different.")
+
+ if (axes[0] >= m.ndim or axes[0] < -m.ndim
+ or axes[1] >= m.ndim or axes[1] < -m.ndim):
+ raise ValueError("Axes={} out of range for array of ndim={}."
+ .format(axes, m.ndim))
+
+ k %= 4
+
+ if k == 0:
+ return m[:]
+ if k == 2:
+ return flip(flip(m, axes[0]), axes[1])
+
+ axes_list = arange(0, m.ndim)
+ (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]],
+ axes_list[axes[0]])
+
+ if k == 1:
+ return transpose(flip(m, axes[1]), axes_list)
+ else:
+ # k == 3
+ return flip(transpose(m, axes_list), axes[1])
+
+
+def _flip_dispatcher(m, axis=None):
+ return (m,)
+
+
+@array_function_dispatch(_flip_dispatcher)
+def flip(m, axis=None):
+ """
+ Reverse the order of elements in an array along the given axis.
+
+ The shape of the array is preserved, but the elements are reordered.
+
+ .. versionadded:: 1.12.0
+
+ Parameters
+ ----------
+ m : array_like
+ Input array.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to flip over. The default,
+ axis=None, will flip over all of the axes of the input array.
+ If axis is negative it counts from the last to the first axis.
+
+ If axis is a tuple of ints, flipping is performed on all of the axes
+ specified in the tuple.
+
+ .. versionchanged:: 1.15.0
+ None and tuples of axes are supported
+
+ Returns
+ -------
+ out : array_like
+ A view of `m` with the entries of axis reversed. Since a view is
+ returned, this operation is done in constant time.
+
+ See Also
+ --------
+ flipud : Flip an array vertically (axis=0).
+ fliplr : Flip an array horizontally (axis=1).
+
+ Notes
+ -----
+ flip(m, 0) is equivalent to flipud(m).
+
+ flip(m, 1) is equivalent to fliplr(m).
+
+ flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
+
+ flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all
+ positions.
+
+ flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at
+ position 0 and position 1.
+
+ Examples
+ --------
+ >>> A = np.arange(8).reshape((2,2,2))
+ >>> A
+ array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+ >>> np.flip(A, 0)
+ array([[[4, 5],
+ [6, 7]],
+ [[0, 1],
+ [2, 3]]])
+ >>> np.flip(A, 1)
+ array([[[2, 3],
+ [0, 1]],
+ [[6, 7],
+ [4, 5]]])
+ >>> np.flip(A)
+ array([[[7, 6],
+ [5, 4]],
+ [[3, 2],
+ [1, 0]]])
+ >>> np.flip(A, (0, 2))
+ array([[[5, 4],
+ [7, 6]],
+ [[1, 0],
+ [3, 2]]])
+ >>> A = np.random.randn(3,4,5)
+ >>> np.all(np.flip(A,2) == A[:,:,::-1,...])
+ True
+ """
+ if not hasattr(m, 'ndim'):
+ m = asarray(m)
+ if axis is None:
+ indexer = (np.s_[::-1],) * m.ndim
+ else:
+ axis = _nx.normalize_axis_tuple(axis, m.ndim)
+ indexer = [np.s_[:]] * m.ndim
+ for ax in axis:
+ indexer[ax] = np.s_[::-1]
+ indexer = tuple(indexer)
+ return m[indexer]
+
+
+@set_module('numpy')
+def iterable(y):
+ """
+ Check whether or not an object can be iterated over.
+
+ Parameters
+ ----------
+ y : object
+ Input object.
+
+ Returns
+ -------
+ b : bool
+ Return ``True`` if the object has an iterator method or is a
+ sequence and ``False`` otherwise.
+
+
+ Examples
+ --------
+ >>> np.iterable([1, 2, 3])
+ True
+ >>> np.iterable(2)
+ False
+
+ Notes
+ -----
+ In most cases, the results of ``np.iterable(obj)`` are consistent with
+ ``isinstance(obj, collections.abc.Iterable)``. One notable exception is
+ the treatment of 0-dimensional arrays::
+
+ >>> from collections.abc import Iterable
+ >>> a = np.array(1.0) # 0-dimensional numpy array
+ >>> isinstance(a, Iterable)
+ True
+ >>> np.iterable(a)
+ False
+
+ """
+ try:
+ iter(y)
+ except TypeError:
+ return False
+ return True
+
+
+def _average_dispatcher(a, axis=None, weights=None, returned=None, *,
+ keepdims=None):
+ return (a, weights)
+
+
+@array_function_dispatch(_average_dispatcher)
+def average(a, axis=None, weights=None, returned=False, *,
+ keepdims=np._NoValue):
+ """
+ Compute the weighted average along the specified axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing data to be averaged. If `a` is not an array, a
+ conversion is attempted.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to average `a`. The default,
+ axis=None, will average over all of the elements of the input array.
+ If axis is negative it counts from the last to the first axis.
+
+ .. versionadded:: 1.7.0
+
+ If axis is a tuple of ints, averaging is performed on all of the axes
+ specified in the tuple instead of a single axis or all the axes as
+ before.
+ weights : array_like, optional
+ An array of weights associated with the values in `a`. Each value in
+ `a` contributes to the average according to its associated weight.
+ The weights array can either be 1-D (in which case its length must be
+ the size of `a` along the given axis) or of the same shape as `a`.
+ If `weights=None`, then all data in `a` are assumed to have a
+ weight equal to one. The 1-D calculation is::
+
+ avg = sum(a * weights) / sum(weights)
+
+ The only constraint on `weights` is that `sum(weights)` must not be 0.
+ returned : bool, optional
+ Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
+ is returned, otherwise only the average is returned.
+ If `weights=None`, `sum_of_weights` is equivalent to the number of
+ elements over which the average is taken.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+ *Note:* `keepdims` will not work with instances of `numpy.matrix`
+ or other classes whose methods do not support `keepdims`.
+
+ .. versionadded:: 1.23.0
+
+ Returns
+ -------
+ retval, [sum_of_weights] : array_type or double
+ Return the average along the specified axis. When `returned` is `True`,
+ return a tuple with the average as the first element and the sum
+ of the weights as the second element. `sum_of_weights` is of the
+ same type as `retval`. The result dtype follows a genereal pattern.
+ If `weights` is None, the result dtype will be that of `a` , or ``float64``
+ if `a` is integral. Otherwise, if `weights` is not None and `a` is non-
+ integral, the result type will be the type of lowest precision capable of
+ representing values of both `a` and `weights`. If `a` happens to be
+ integral, the previous rules still applies but the result dtype will
+ at least be ``float64``.
+
+ Raises
+ ------
+ ZeroDivisionError
+ When all weights along axis are zero. See `numpy.ma.average` for a
+ version robust to this type of error.
+ TypeError
+ When the length of 1D `weights` is not the same as the shape of `a`
+ along axis.
+
+ See Also
+ --------
+ mean
+
+ ma.average : average for masked arrays -- useful if your data contains
+ "missing" values
+ numpy.result_type : Returns the type that results from applying the
+ numpy type promotion rules to the arguments.
+
+ Examples
+ --------
+ >>> data = np.arange(1, 5)
+ >>> data
+ array([1, 2, 3, 4])
+ >>> np.average(data)
+ 2.5
+ >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
+ 4.0
+
+ >>> data = np.arange(6).reshape((3, 2))
+ >>> data
+ array([[0, 1],
+ [2, 3],
+ [4, 5]])
+ >>> np.average(data, axis=1, weights=[1./4, 3./4])
+ array([0.75, 2.75, 4.75])
+ >>> np.average(data, weights=[1./4, 3./4])
+ Traceback (most recent call last):
+ ...
+ TypeError: Axis must be specified when shapes of a and weights differ.
+
+ >>> a = np.ones(5, dtype=np.float128)
+ >>> w = np.ones(5, dtype=np.complex64)
+ >>> avg = np.average(a, weights=w)
+ >>> print(avg.dtype)
+ complex256
+
+ With ``keepdims=True``, the following result has shape (3, 1).
+
+ >>> np.average(data, axis=1, keepdims=True)
+ array([[0.5],
+ [2.5],
+ [4.5]])
+ """
+ a = np.asanyarray(a)
+
+ if keepdims is np._NoValue:
+ # Don't pass on the keepdims argument if one wasn't given.
+ keepdims_kw = {}
+ else:
+ keepdims_kw = {'keepdims': keepdims}
+
+ if weights is None:
+ avg = a.mean(axis, **keepdims_kw)
+ avg_as_array = np.asanyarray(avg)
+ scl = avg_as_array.dtype.type(a.size/avg_as_array.size)
+ else:
+ wgt = np.asanyarray(weights)
+
+ if issubclass(a.dtype.type, (np.integer, np.bool_)):
+ result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
+ else:
+ result_dtype = np.result_type(a.dtype, wgt.dtype)
+
+ # Sanity checks
+ if a.shape != wgt.shape:
+ if axis is None:
+ raise TypeError(
+ "Axis must be specified when shapes of a and weights "
+ "differ.")
+ if wgt.ndim != 1:
+ raise TypeError(
+ "1D weights expected when shapes of a and weights differ.")
+ if wgt.shape[0] != a.shape[axis]:
+ raise ValueError(
+ "Length of weights not compatible with specified axis.")
+
+ # setup wgt to broadcast along axis
+ wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)
+ wgt = wgt.swapaxes(-1, axis)
+
+ scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw)
+ if np.any(scl == 0.0):
+ raise ZeroDivisionError(
+ "Weights sum to zero, can't be normalized")
+
+ avg = avg_as_array = np.multiply(a, wgt,
+ dtype=result_dtype).sum(axis, **keepdims_kw) / scl
+
+ if returned:
+ if scl.shape != avg_as_array.shape:
+ scl = np.broadcast_to(scl, avg_as_array.shape).copy()
+ return avg, scl
+ else:
+ return avg
+
+
+@set_module('numpy')
+def asarray_chkfinite(a, dtype=None, order=None):
+ """Convert the input to an array, checking for NaNs or Infs.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data, in any form that can be converted to an array. This
+ includes lists, lists of tuples, tuples, tuples of tuples, tuples
+ of lists and ndarrays. Success requires no NaNs or Infs.
+ dtype : data-type, optional
+ By default, the data-type is inferred from the input data.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Memory layout. 'A' and 'K' depend on the order of input array a.
+ 'C' row-major (C-style),
+ 'F' column-major (Fortran-style) memory representation.
+ 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise
+ 'K' (keep) preserve input order
+ Defaults to 'C'.
+
+ Returns
+ -------
+ out : ndarray
+ Array interpretation of `a`. No copy is performed if the input
+ is already an ndarray. If `a` is a subclass of ndarray, a base
+ class ndarray is returned.
+
+ Raises
+ ------
+ ValueError
+ Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
+
+ See Also
+ --------
+ asarray : Create and array.
+ asanyarray : Similar function which passes through subclasses.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfarray : Convert input to a floating point ndarray.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ fromiter : Create an array from an iterator.
+ fromfunction : Construct an array by executing a function on grid
+ positions.
+
+ Examples
+ --------
+ Convert a list into an array. If all elements are finite
+ ``asarray_chkfinite`` is identical to ``asarray``.
+
+ >>> a = [1, 2]
+ >>> np.asarray_chkfinite(a, dtype=float)
+ array([1., 2.])
+
+ Raises ValueError if array_like contains Nans or Infs.
+
+ >>> a = [1, 2, np.inf]
+ >>> try:
+ ... np.asarray_chkfinite(a)
+ ... except ValueError:
+ ... print('ValueError')
+ ...
+ ValueError
+
+ """
+ a = asarray(a, dtype=dtype, order=order)
+ if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
+ raise ValueError(
+ "array must not contain infs or NaNs")
+ return a
+
+
+def _piecewise_dispatcher(x, condlist, funclist, *args, **kw):
+ yield x
+ # support the undocumented behavior of allowing scalars
+ if np.iterable(condlist):
+ yield from condlist
+
+
+@array_function_dispatch(_piecewise_dispatcher)
+def piecewise(x, condlist, funclist, *args, **kw):
+ """
+ Evaluate a piecewise-defined function.
+
+ Given a set of conditions and corresponding functions, evaluate each
+ function on the input data wherever its condition is true.
+
+ Parameters
+ ----------
+ x : ndarray or scalar
+ The input domain.
+ condlist : list of bool arrays or bool scalars
+ Each boolean array corresponds to a function in `funclist`. Wherever
+ `condlist[i]` is True, `funclist[i](x)` is used as the output value.
+
+ Each boolean array in `condlist` selects a piece of `x`,
+ and should therefore be of the same shape as `x`.
+
+ The length of `condlist` must correspond to that of `funclist`.
+ If one extra function is given, i.e. if
+ ``len(funclist) == len(condlist) + 1``, then that extra function
+ is the default value, used wherever all conditions are false.
+ funclist : list of callables, f(x,*args,**kw), or scalars
+ Each function is evaluated over `x` wherever its corresponding
+ condition is True. It should take a 1d array as input and give an 1d
+ array or a scalar value as output. If, instead of a callable,
+ a scalar is provided then a constant function (``lambda x: scalar``) is
+ assumed.
+ args : tuple, optional
+ Any further arguments given to `piecewise` are passed to the functions
+ upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
+ each function is called as ``f(x, 1, 'a')``.
+ kw : dict, optional
+ Keyword arguments used in calling `piecewise` are passed to the
+ functions upon execution, i.e., if called
+ ``piecewise(..., ..., alpha=1)``, then each function is called as
+ ``f(x, alpha=1)``.
+
+ Returns
+ -------
+ out : ndarray
+ The output is the same shape and type as x and is found by
+ calling the functions in `funclist` on the appropriate portions of `x`,
+ as defined by the boolean arrays in `condlist`. Portions not covered
+ by any condition have a default value of 0.
+
+
+ See Also
+ --------
+ choose, select, where
+
+ Notes
+ -----
+ This is similar to choose or select, except that functions are
+ evaluated on elements of `x` that satisfy the corresponding condition from
+ `condlist`.
+
+ The result is::
+
+ |--
+ |funclist[0](x[condlist[0]])
+ out = |funclist[1](x[condlist[1]])
+ |...
+ |funclist[n2](x[condlist[n2]])
+ |--
+
+ Examples
+ --------
+ Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
+
+ >>> x = np.linspace(-2.5, 2.5, 6)
+ >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
+ array([-1., -1., -1., 1., 1., 1.])
+
+ Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
+ ``x >= 0``.
+
+ >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
+ array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
+
+ Apply the same function to a scalar value.
+
+ >>> y = -2
+ >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x])
+ array(2)
+
+ """
+ x = asanyarray(x)
+ n2 = len(funclist)
+
+ # undocumented: single condition is promoted to a list of one condition
+ if isscalar(condlist) or (
+ not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0):
+ condlist = [condlist]
+
+ condlist = asarray(condlist, dtype=bool)
+ n = len(condlist)
+
+ if n == n2 - 1: # compute the "otherwise" condition.
+ condelse = ~np.any(condlist, axis=0, keepdims=True)
+ condlist = np.concatenate([condlist, condelse], axis=0)
+ n += 1
+ elif n != n2:
+ raise ValueError(
+ "with {} condition(s), either {} or {} functions are expected"
+ .format(n, n, n+1)
+ )
+
+ y = zeros_like(x)
+ for cond, func in zip(condlist, funclist):
+ if not isinstance(func, collections.abc.Callable):
+ y[cond] = func
+ else:
+ vals = x[cond]
+ if vals.size > 0:
+ y[cond] = func(vals, *args, **kw)
+
+ return y
+
+
+def _select_dispatcher(condlist, choicelist, default=None):
+ yield from condlist
+ yield from choicelist
+
+
+@array_function_dispatch(_select_dispatcher)
+def select(condlist, choicelist, default=0):
+ """
+ Return an array drawn from elements in choicelist, depending on conditions.
+
+ Parameters
+ ----------
+ condlist : list of bool ndarrays
+ The list of conditions which determine from which array in `choicelist`
+ the output elements are taken. When multiple conditions are satisfied,
+ the first one encountered in `condlist` is used.
+ choicelist : list of ndarrays
+ The list of arrays from which the output elements are taken. It has
+ to be of the same length as `condlist`.
+ default : scalar, optional
+ The element inserted in `output` when all conditions evaluate to False.
+
+ Returns
+ -------
+ output : ndarray
+ The output at position m is the m-th element of the array in
+ `choicelist` where the m-th element of the corresponding array in
+ `condlist` is True.
+
+ See Also
+ --------
+ where : Return elements from one of two arrays depending on condition.
+ take, choose, compress, diag, diagonal
+
+ Examples
+ --------
+ >>> x = np.arange(6)
+ >>> condlist = [x<3, x>3]
+ >>> choicelist = [x, x**2]
+ >>> np.select(condlist, choicelist, 42)
+ array([ 0, 1, 2, 42, 16, 25])
+
+ >>> condlist = [x<=4, x>3]
+ >>> choicelist = [x, x**2]
+ >>> np.select(condlist, choicelist, 55)
+ array([ 0, 1, 2, 3, 4, 25])
+
+ """
+ # Check the size of condlist and choicelist are the same, or abort.
+ if len(condlist) != len(choicelist):
+ raise ValueError(
+ 'list of cases must be same length as list of conditions')
+
+ # Now that the dtype is known, handle the deprecated select([], []) case
+ if len(condlist) == 0:
+ raise ValueError("select with an empty condition list is not possible")
+
+ choicelist = [np.asarray(choice) for choice in choicelist]
+
+ try:
+ intermediate_dtype = np.result_type(*choicelist)
+ except TypeError as e:
+ msg = f'Choicelist elements do not have a common dtype: {e}'
+ raise TypeError(msg) from None
+ default_array = np.asarray(default)
+ choicelist.append(default_array)
+
+ # need to get the result type before broadcasting for correct scalar
+ # behaviour
+ try:
+ dtype = np.result_type(intermediate_dtype, default_array)
+ except TypeError as e:
+ msg = f'Choicelists and default value do not have a common dtype: {e}'
+ raise TypeError(msg) from None
+
+ # Convert conditions to arrays and broadcast conditions and choices
+ # as the shape is needed for the result. Doing it separately optimizes
+ # for example when all choices are scalars.
+ condlist = np.broadcast_arrays(*condlist)
+ choicelist = np.broadcast_arrays(*choicelist)
+
+ # If cond array is not an ndarray in boolean format or scalar bool, abort.
+ for i, cond in enumerate(condlist):
+ if cond.dtype.type is not np.bool_:
+ raise TypeError(
+ 'invalid entry {} in condlist: should be boolean ndarray'.format(i))
+
+ if choicelist[0].ndim == 0:
+ # This may be common, so avoid the call.
+ result_shape = condlist[0].shape
+ else:
+ result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
+
+ result = np.full(result_shape, choicelist[-1], dtype)
+
+ # Use np.copyto to burn each choicelist array onto result, using the
+ # corresponding condlist as a boolean mask. This is done in reverse
+ # order since the first choice should take precedence.
+ choicelist = choicelist[-2::-1]
+ condlist = condlist[::-1]
+ for choice, cond in zip(choicelist, condlist):
+ np.copyto(result, choice, where=cond)
+
+ return result
+
+
+def _copy_dispatcher(a, order=None, subok=None):
+ return (a,)
+
+
+@array_function_dispatch(_copy_dispatcher)
+def copy(a, order='K', subok=False):
+ """
+ Return an array copy of the given object.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ order : {'C', 'F', 'A', 'K'}, optional
+ Controls the memory layout of the copy. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
+ 'C' otherwise. 'K' means match the layout of `a` as closely
+ as possible. (Note that this function and :meth:`ndarray.copy` are very
+ similar, but have different default values for their order=
+ arguments.)
+ subok : bool, optional
+ If True, then sub-classes will be passed-through, otherwise the
+ returned array will be forced to be a base-class array (defaults to False).
+
+ .. versionadded:: 1.19.0
+
+ Returns
+ -------
+ arr : ndarray
+ Array interpretation of `a`.
+
+ See Also
+ --------
+ ndarray.copy : Preferred method for creating an array copy
+
+ Notes
+ -----
+ This is equivalent to:
+
+ >>> np.array(a, copy=True) #doctest: +SKIP
+
+ Examples
+ --------
+ Create an array x, with a reference y and a copy z:
+
+ >>> x = np.array([1, 2, 3])
+ >>> y = x
+ >>> z = np.copy(x)
+
+ Note that, when we modify x, y changes, but not z:
+
+ >>> x[0] = 10
+ >>> x[0] == y[0]
+ True
+ >>> x[0] == z[0]
+ False
+
+ Note that, np.copy clears previously set WRITEABLE=False flag.
+
+ >>> a = np.array([1, 2, 3])
+ >>> a.flags["WRITEABLE"] = False
+ >>> b = np.copy(a)
+ >>> b.flags["WRITEABLE"]
+ True
+ >>> b[0] = 3
+ >>> b
+ array([3, 2, 3])
+
+ Note that np.copy is a shallow copy and will not copy object
+ elements within arrays. This is mainly important for arrays
+ containing Python objects. The new array will contain the
+ same object which may lead to surprises if that object can
+ be modified (is mutable):
+
+ >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)
+ >>> b = np.copy(a)
+ >>> b[2][0] = 10
+ >>> a
+ array([1, 'm', list([10, 3, 4])], dtype=object)
+
+ To ensure all elements within an ``object`` array are copied,
+ use `copy.deepcopy`:
+
+ >>> import copy
+ >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)
+ >>> c = copy.deepcopy(a)
+ >>> c[2][0] = 10
+ >>> c
+ array([1, 'm', list([10, 3, 4])], dtype=object)
+ >>> a
+ array([1, 'm', list([2, 3, 4])], dtype=object)
+
+ """
+ return array(a, order=order, subok=subok, copy=True)
+
+# Basic operations
+
+
+def _gradient_dispatcher(f, *varargs, axis=None, edge_order=None):
+ yield f
+ yield from varargs
+
+
+@array_function_dispatch(_gradient_dispatcher)
+def gradient(f, *varargs, axis=None, edge_order=1):
+ """
+ Return the gradient of an N-dimensional array.
+
+ The gradient is computed using second order accurate central differences
+ in the interior points and either first or second order accurate one-sides
+ (forward or backwards) differences at the boundaries.
+ The returned gradient hence has the same shape as the input array.
+
+ Parameters
+ ----------
+ f : array_like
+ An N-dimensional array containing samples of a scalar function.
+ varargs : list of scalar or array, optional
+ Spacing between f values. Default unitary spacing for all dimensions.
+ Spacing can be specified using:
+
+ 1. single scalar to specify a sample distance for all dimensions.
+ 2. N scalars to specify a constant sample distance for each dimension.
+ i.e. `dx`, `dy`, `dz`, ...
+ 3. N arrays to specify the coordinates of the values along each
+ dimension of F. The length of the array must match the size of
+ the corresponding dimension
+ 4. Any combination of N scalars/arrays with the meaning of 2. and 3.
+
+ If `axis` is given, the number of varargs must equal the number of axes.
+ Default: 1.
+
+ edge_order : {1, 2}, optional
+ Gradient is calculated using N-th order accurate differences
+ at the boundaries. Default: 1.
+
+ .. versionadded:: 1.9.1
+
+ axis : None or int or tuple of ints, optional
+ Gradient is calculated only along the given axis or axes
+ The default (axis = None) is to calculate the gradient for all the axes
+ of the input array. axis may be negative, in which case it counts from
+ the last to the first axis.
+
+ .. versionadded:: 1.11.0
+
+ Returns
+ -------
+ gradient : ndarray or list of ndarray
+ A list of ndarrays (or a single ndarray if there is only one dimension)
+ corresponding to the derivatives of f with respect to each dimension.
+ Each derivative has the same shape as f.
+
+ Examples
+ --------
+ >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float)
+ >>> np.gradient(f)
+ array([1. , 1.5, 2.5, 3.5, 4.5, 5. ])
+ >>> np.gradient(f, 2)
+ array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
+
+ Spacing can be also specified with an array that represents the coordinates
+ of the values F along the dimensions.
+ For instance a uniform spacing:
+
+ >>> x = np.arange(f.size)
+ >>> np.gradient(f, x)
+ array([1. , 1.5, 2.5, 3.5, 4.5, 5. ])
+
+ Or a non uniform one:
+
+ >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float)
+ >>> np.gradient(f, x)
+ array([1. , 3. , 3.5, 6.7, 6.9, 2.5])
+
+ For two dimensional arrays, the return will be two arrays ordered by
+ axis. In this example the first array stands for the gradient in
+ rows and the second one in columns direction:
+
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float))
+ [array([[ 2., 2., -1.],
+ [ 2., 2., -1.]]), array([[1. , 2.5, 4. ],
+ [1. , 1. , 1. ]])]
+
+ In this example the spacing is also specified:
+ uniform for axis=0 and non uniform for axis=1
+
+ >>> dx = 2.
+ >>> y = [1., 1.5, 3.5]
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y)
+ [array([[ 1. , 1. , -0.5],
+ [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ],
+ [2. , 1.7, 0.5]])]
+
+ It is possible to specify how boundaries are treated using `edge_order`
+
+ >>> x = np.array([0, 1, 2, 3, 4])
+ >>> f = x**2
+ >>> np.gradient(f, edge_order=1)
+ array([1., 2., 4., 6., 7.])
+ >>> np.gradient(f, edge_order=2)
+ array([0., 2., 4., 6., 8.])
+
+ The `axis` keyword can be used to specify a subset of axes of which the
+ gradient is calculated
+
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0)
+ array([[ 2., 2., -1.],
+ [ 2., 2., -1.]])
+
+ Notes
+ -----
+ Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous
+ derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we
+ minimize the "consistency error" :math:`\\eta_{i}` between the true gradient
+ and its estimate from a linear combination of the neighboring grid-points:
+
+ .. math::
+
+ \\eta_{i} = f_{i}^{\\left(1\\right)} -
+ \\left[ \\alpha f\\left(x_{i}\\right) +
+ \\beta f\\left(x_{i} + h_{d}\\right) +
+ \\gamma f\\left(x_{i}-h_{s}\\right)
+ \\right]
+
+ By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})`
+ with their Taylor series expansion, this translates into solving
+ the following the linear system:
+
+ .. math::
+
+ \\left\\{
+ \\begin{array}{r}
+ \\alpha+\\beta+\\gamma=0 \\\\
+ \\beta h_{d}-\\gamma h_{s}=1 \\\\
+ \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0
+ \\end{array}
+ \\right.
+
+ The resulting approximation of :math:`f_{i}^{(1)}` is the following:
+
+ .. math::
+
+ \\hat f_{i}^{(1)} =
+ \\frac{
+ h_{s}^{2}f\\left(x_{i} + h_{d}\\right)
+ + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right)
+ - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)}
+ { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)}
+ + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2}
+ + h_{s}h_{d}^{2}}{h_{d}
+ + h_{s}}\\right)
+
+ It is worth noting that if :math:`h_{s}=h_{d}`
+ (i.e., data are evenly spaced)
+ we find the standard second order approximation:
+
+ .. math::
+
+ \\hat f_{i}^{(1)}=
+ \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h}
+ + \\mathcal{O}\\left(h^{2}\\right)
+
+ With a similar procedure the forward/backward approximations used for
+ boundaries can be derived.
+
+ References
+ ----------
+ .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics
+ (Texts in Applied Mathematics). New York: Springer.
+ .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations
+ in Geophysical Fluid Dynamics. New York: Springer.
+ .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on
+ Arbitrarily Spaced Grids,
+ Mathematics of Computation 51, no. 184 : 699-706.
+ `PDF <http://www.ams.org/journals/mcom/1988-51-184/
+ S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_.
+ """
+ f = np.asanyarray(f)
+ N = f.ndim # number of dimensions
+
+ if axis is None:
+ axes = tuple(range(N))
+ else:
+ axes = _nx.normalize_axis_tuple(axis, N)
+
+ len_axes = len(axes)
+ n = len(varargs)
+ if n == 0:
+ # no spacing argument - use 1 in all axes
+ dx = [1.0] * len_axes
+ elif n == 1 and np.ndim(varargs[0]) == 0:
+ # single scalar for all axes
+ dx = varargs * len_axes
+ elif n == len_axes:
+ # scalar or 1d array for each axis
+ dx = list(varargs)
+ for i, distances in enumerate(dx):
+ distances = np.asanyarray(distances)
+ if distances.ndim == 0:
+ continue
+ elif distances.ndim != 1:
+ raise ValueError("distances must be either scalars or 1d")
+ if len(distances) != f.shape[axes[i]]:
+ raise ValueError("when 1d, distances must match "
+ "the length of the corresponding dimension")
+ if np.issubdtype(distances.dtype, np.integer):
+ # Convert numpy integer types to float64 to avoid modular
+ # arithmetic in np.diff(distances).
+ distances = distances.astype(np.float64)
+ diffx = np.diff(distances)
+ # if distances are constant reduce to the scalar case
+ # since it brings a consistent speedup
+ if (diffx == diffx[0]).all():
+ diffx = diffx[0]
+ dx[i] = diffx
+ else:
+ raise TypeError("invalid number of arguments")
+
+ if edge_order > 2:
+ raise ValueError("'edge_order' greater than 2 not supported")
+
+ # use central differences on interior and one-sided differences on the
+ # endpoints. This preserves second order-accuracy over the full domain.
+
+ outvals = []
+
+ # create slice objects --- initially all are [:, :, ..., :]
+ slice1 = [slice(None)]*N
+ slice2 = [slice(None)]*N
+ slice3 = [slice(None)]*N
+ slice4 = [slice(None)]*N
+
+ otype = f.dtype
+ if otype.type is np.datetime64:
+ # the timedelta dtype with the same unit information
+ otype = np.dtype(otype.name.replace('datetime', 'timedelta'))
+ # view as timedelta to allow addition
+ f = f.view(otype)
+ elif otype.type is np.timedelta64:
+ pass
+ elif np.issubdtype(otype, np.inexact):
+ pass
+ else:
+ # All other types convert to floating point.
+ # First check if f is a numpy integer type; if so, convert f to float64
+ # to avoid modular arithmetic when computing the changes in f.
+ if np.issubdtype(otype, np.integer):
+ f = f.astype(np.float64)
+ otype = np.float64
+
+ for axis, ax_dx in zip(axes, dx):
+ if f.shape[axis] < edge_order + 1:
+ raise ValueError(
+ "Shape of array too small to calculate a numerical gradient, "
+ "at least (edge_order + 1) elements are required.")
+ # result allocation
+ out = np.empty_like(f, dtype=otype)
+
+ # spacing for the current axis
+ uniform_spacing = np.ndim(ax_dx) == 0
+
+ # Numerical differentiation: 2nd order interior
+ slice1[axis] = slice(1, -1)
+ slice2[axis] = slice(None, -2)
+ slice3[axis] = slice(1, -1)
+ slice4[axis] = slice(2, None)
+
+ if uniform_spacing:
+ out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx)
+ else:
+ dx1 = ax_dx[0:-1]
+ dx2 = ax_dx[1:]
+ a = -(dx2)/(dx1 * (dx1 + dx2))
+ b = (dx2 - dx1) / (dx1 * dx2)
+ c = dx1 / (dx2 * (dx1 + dx2))
+ # fix the shape for broadcasting
+ shape = np.ones(N, dtype=int)
+ shape[axis] = -1
+ a.shape = b.shape = c.shape = shape
+ # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
+ out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
+
+ # Numerical differentiation: 1st order edges
+ if edge_order == 1:
+ slice1[axis] = 0
+ slice2[axis] = 1
+ slice3[axis] = 0
+ dx_0 = ax_dx if uniform_spacing else ax_dx[0]
+ # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
+ out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0
+
+ slice1[axis] = -1
+ slice2[axis] = -1
+ slice3[axis] = -2
+ dx_n = ax_dx if uniform_spacing else ax_dx[-1]
+ # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
+ out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n
+
+ # Numerical differentiation: 2nd order edges
+ else:
+ slice1[axis] = 0
+ slice2[axis] = 0
+ slice3[axis] = 1
+ slice4[axis] = 2
+ if uniform_spacing:
+ a = -1.5 / ax_dx
+ b = 2. / ax_dx
+ c = -0.5 / ax_dx
+ else:
+ dx1 = ax_dx[0]
+ dx2 = ax_dx[1]
+ a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2))
+ b = (dx1 + dx2) / (dx1 * dx2)
+ c = - dx1 / (dx2 * (dx1 + dx2))
+ # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2]
+ out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
+
+ slice1[axis] = -1
+ slice2[axis] = -3
+ slice3[axis] = -2
+ slice4[axis] = -1
+ if uniform_spacing:
+ a = 0.5 / ax_dx
+ b = -2. / ax_dx
+ c = 1.5 / ax_dx
+ else:
+ dx1 = ax_dx[-2]
+ dx2 = ax_dx[-1]
+ a = (dx2) / (dx1 * (dx1 + dx2))
+ b = - (dx2 + dx1) / (dx1 * dx2)
+ c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2))
+ # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
+ out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
+
+ outvals.append(out)
+
+ # reset the slice object in this dimension to ":"
+ slice1[axis] = slice(None)
+ slice2[axis] = slice(None)
+ slice3[axis] = slice(None)
+ slice4[axis] = slice(None)
+
+ if len_axes == 1:
+ return outvals[0]
+ else:
+ return outvals
+
+
+def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None):
+ return (a, prepend, append)
+
+
+@array_function_dispatch(_diff_dispatcher)
+def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
+ """
+ Calculate the n-th discrete difference along the given axis.
+
+ The first difference is given by ``out[i] = a[i+1] - a[i]`` along
+ the given axis, higher differences are calculated by using `diff`
+ recursively.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array
+ n : int, optional
+ The number of times values are differenced. If zero, the input
+ is returned as-is.
+ axis : int, optional
+ The axis along which the difference is taken, default is the
+ last axis.
+ prepend, append : array_like, optional
+ Values to prepend or append to `a` along axis prior to
+ performing the difference. Scalar values are expanded to
+ arrays with length 1 in the direction of axis and the shape
+ of the input array in along all other axes. Otherwise the
+ dimension and shape must match `a` except along axis.
+
+ .. versionadded:: 1.16.0
+
+ Returns
+ -------
+ diff : ndarray
+ The n-th differences. The shape of the output is the same as `a`
+ except along `axis` where the dimension is smaller by `n`. The
+ type of the output is the same as the type of the difference
+ between any two elements of `a`. This is the same as the type of
+ `a` in most cases. A notable exception is `datetime64`, which
+ results in a `timedelta64` output array.
+
+ See Also
+ --------
+ gradient, ediff1d, cumsum
+
+ Notes
+ -----
+ Type is preserved for boolean arrays, so the result will contain
+ `False` when consecutive elements are the same and `True` when they
+ differ.
+
+ For unsigned integer arrays, the results will also be unsigned. This
+ should not be surprising, as the result is consistent with
+ calculating the difference directly:
+
+ >>> u8_arr = np.array([1, 0], dtype=np.uint8)
+ >>> np.diff(u8_arr)
+ array([255], dtype=uint8)
+ >>> u8_arr[1,...] - u8_arr[0,...]
+ 255
+
+ If this is not desirable, then the array should be cast to a larger
+ integer type first:
+
+ >>> i16_arr = u8_arr.astype(np.int16)
+ >>> np.diff(i16_arr)
+ array([-1], dtype=int16)
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 4, 7, 0])
+ >>> np.diff(x)
+ array([ 1, 2, 3, -7])
+ >>> np.diff(x, n=2)
+ array([ 1, 1, -10])
+
+ >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
+ >>> np.diff(x)
+ array([[2, 3, 4],
+ [5, 1, 2]])
+ >>> np.diff(x, axis=0)
+ array([[-1, 2, 0, -2]])
+
+ >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)
+ >>> np.diff(x)
+ array([1, 1], dtype='timedelta64[D]')
+
+ """
+ if n == 0:
+ return a
+ if n < 0:
+ raise ValueError(
+ "order must be non-negative but got " + repr(n))
+
+ a = asanyarray(a)
+ nd = a.ndim
+ if nd == 0:
+ raise ValueError("diff requires input that is at least one dimensional")
+ axis = normalize_axis_index(axis, nd)
+
+ combined = []
+ if prepend is not np._NoValue:
+ prepend = np.asanyarray(prepend)
+ if prepend.ndim == 0:
+ shape = list(a.shape)
+ shape[axis] = 1
+ prepend = np.broadcast_to(prepend, tuple(shape))
+ combined.append(prepend)
+
+ combined.append(a)
+
+ if append is not np._NoValue:
+ append = np.asanyarray(append)
+ if append.ndim == 0:
+ shape = list(a.shape)
+ shape[axis] = 1
+ append = np.broadcast_to(append, tuple(shape))
+ combined.append(append)
+
+ if len(combined) > 1:
+ a = np.concatenate(combined, axis)
+
+ slice1 = [slice(None)] * nd
+ slice2 = [slice(None)] * nd
+ slice1[axis] = slice(1, None)
+ slice2[axis] = slice(None, -1)
+ slice1 = tuple(slice1)
+ slice2 = tuple(slice2)
+
+ op = not_equal if a.dtype == np.bool_ else subtract
+ for _ in range(n):
+ a = op(a[slice1], a[slice2])
+
+ return a
+
+
+def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None):
+ return (x, xp, fp)
+
+
+@array_function_dispatch(_interp_dispatcher)
+def interp(x, xp, fp, left=None, right=None, period=None):
+ """
+ One-dimensional linear interpolation for monotonically increasing sample points.
+
+ Returns the one-dimensional piecewise linear interpolant to a function
+ with given discrete data points (`xp`, `fp`), evaluated at `x`.
+
+ Parameters
+ ----------
+ x : array_like
+ The x-coordinates at which to evaluate the interpolated values.
+
+ xp : 1-D sequence of floats
+ The x-coordinates of the data points, must be increasing if argument
+ `period` is not specified. Otherwise, `xp` is internally sorted after
+ normalizing the periodic boundaries with ``xp = xp % period``.
+
+ fp : 1-D sequence of float or complex
+ The y-coordinates of the data points, same length as `xp`.
+
+ left : optional float or complex corresponding to fp
+ Value to return for `x < xp[0]`, default is `fp[0]`.
+
+ right : optional float or complex corresponding to fp
+ Value to return for `x > xp[-1]`, default is `fp[-1]`.
+
+ period : None or float, optional
+ A period for the x-coordinates. This parameter allows the proper
+ interpolation of angular x-coordinates. Parameters `left` and `right`
+ are ignored if `period` is specified.
+
+ .. versionadded:: 1.10.0
+
+ Returns
+ -------
+ y : float or complex (corresponding to fp) or ndarray
+ The interpolated values, same shape as `x`.
+
+ Raises
+ ------
+ ValueError
+ If `xp` and `fp` have different length
+ If `xp` or `fp` are not 1-D sequences
+ If `period == 0`
+
+ See Also
+ --------
+ scipy.interpolate
+
+ Warnings
+ --------
+ The x-coordinate sequence is expected to be increasing, but this is not
+ explicitly enforced. However, if the sequence `xp` is non-increasing,
+ interpolation results are meaningless.
+
+ Note that, since NaN is unsortable, `xp` also cannot contain NaNs.
+
+ A simple check for `xp` being strictly increasing is::
+
+ np.all(np.diff(xp) > 0)
+
+ Examples
+ --------
+ >>> xp = [1, 2, 3]
+ >>> fp = [3, 2, 0]
+ >>> np.interp(2.5, xp, fp)
+ 1.0
+ >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
+ array([3. , 3. , 2.5 , 0.56, 0. ])
+ >>> UNDEF = -99.0
+ >>> np.interp(3.14, xp, fp, right=UNDEF)
+ -99.0
+
+ Plot an interpolant to the sine function:
+
+ >>> x = np.linspace(0, 2*np.pi, 10)
+ >>> y = np.sin(x)
+ >>> xvals = np.linspace(0, 2*np.pi, 50)
+ >>> yinterp = np.interp(xvals, x, y)
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot(x, y, 'o')
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.plot(xvals, yinterp, '-x')
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.show()
+
+ Interpolation with periodic x-coordinates:
+
+ >>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
+ >>> xp = [190, -190, 350, -350]
+ >>> fp = [5, 10, 3, 4]
+ >>> np.interp(x, xp, fp, period=360)
+ array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75])
+
+ Complex interpolation:
+
+ >>> x = [1.5, 4.0]
+ >>> xp = [2,3,5]
+ >>> fp = [1.0j, 0, 2+3j]
+ >>> np.interp(x, xp, fp)
+ array([0.+1.j , 1.+1.5j])
+
+ """
+
+ fp = np.asarray(fp)
+
+ if np.iscomplexobj(fp):
+ interp_func = compiled_interp_complex
+ input_dtype = np.complex128
+ else:
+ interp_func = compiled_interp
+ input_dtype = np.float64
+
+ if period is not None:
+ if period == 0:
+ raise ValueError("period must be a non-zero value")
+ period = abs(period)
+ left = None
+ right = None
+
+ x = np.asarray(x, dtype=np.float64)
+ xp = np.asarray(xp, dtype=np.float64)
+ fp = np.asarray(fp, dtype=input_dtype)
+
+ if xp.ndim != 1 or fp.ndim != 1:
+ raise ValueError("Data points must be 1-D sequences")
+ if xp.shape[0] != fp.shape[0]:
+ raise ValueError("fp and xp are not of the same length")
+ # normalizing periodic boundaries
+ x = x % period
+ xp = xp % period
+ asort_xp = np.argsort(xp)
+ xp = xp[asort_xp]
+ fp = fp[asort_xp]
+ xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
+ fp = np.concatenate((fp[-1:], fp, fp[0:1]))
+
+ return interp_func(x, xp, fp, left, right)
+
+
+def _angle_dispatcher(z, deg=None):
+ return (z,)
+
+
+@array_function_dispatch(_angle_dispatcher)
+def angle(z, deg=False):
+ """
+ Return the angle of the complex argument.
+
+ Parameters
+ ----------
+ z : array_like
+ A complex number or sequence of complex numbers.
+ deg : bool, optional
+ Return angle in degrees if True, radians if False (default).
+
+ Returns
+ -------
+ angle : ndarray or scalar
+ The counterclockwise angle from the positive real axis on the complex
+ plane in the range ``(-pi, pi]``, with dtype as numpy.float64.
+
+ .. versionchanged:: 1.16.0
+ This function works on subclasses of ndarray like `ma.array`.
+
+ See Also
+ --------
+ arctan2
+ absolute
+
+ Notes
+ -----
+ Although the angle of the complex number 0 is undefined, ``numpy.angle(0)``
+ returns the value 0.
+
+ Examples
+ --------
+ >>> np.angle([1.0, 1.0j, 1+1j]) # in radians
+ array([ 0. , 1.57079633, 0.78539816]) # may vary
+ >>> np.angle(1+1j, deg=True) # in degrees
+ 45.0
+
+ """
+ z = asanyarray(z)
+ if issubclass(z.dtype.type, _nx.complexfloating):
+ zimag = z.imag
+ zreal = z.real
+ else:
+ zimag = 0
+ zreal = z
+
+ a = arctan2(zimag, zreal)
+ if deg:
+ a *= 180/pi
+ return a
+
+
+def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None):
+ return (p,)
+
+
+@array_function_dispatch(_unwrap_dispatcher)
+def unwrap(p, discont=None, axis=-1, *, period=2*pi):
+ r"""
+ Unwrap by taking the complement of large deltas with respect to the period.
+
+ This unwraps a signal `p` by changing elements which have an absolute
+ difference from their predecessor of more than ``max(discont, period/2)``
+ to their `period`-complementary values.
+
+ For the default case where `period` is :math:`2\pi` and `discont` is
+ :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences
+ are never greater than :math:`\pi` by adding :math:`2k\pi` for some
+ integer :math:`k`.
+
+ Parameters
+ ----------
+ p : array_like
+ Input array.
+ discont : float, optional
+ Maximum discontinuity between values, default is ``period/2``.
+ Values below ``period/2`` are treated as if they were ``period/2``.
+ To have an effect different from the default, `discont` should be
+ larger than ``period/2``.
+ axis : int, optional
+ Axis along which unwrap will operate, default is the last axis.
+ period : float, optional
+ Size of the range over which the input wraps. By default, it is
+ ``2 pi``.
+
+ .. versionadded:: 1.21.0
+
+ Returns
+ -------
+ out : ndarray
+ Output array.
+
+ See Also
+ --------
+ rad2deg, deg2rad
+
+ Notes
+ -----
+ If the discontinuity in `p` is smaller than ``period/2``,
+ but larger than `discont`, no unwrapping is done because taking
+ the complement would only make the discontinuity larger.
+
+ Examples
+ --------
+ >>> phase = np.linspace(0, np.pi, num=5)
+ >>> phase[3:] += np.pi
+ >>> phase
+ array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary
+ >>> np.unwrap(phase)
+ array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary
+ >>> np.unwrap([0, 1, 2, -1, 0], period=4)
+ array([0, 1, 2, 3, 4])
+ >>> np.unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6)
+ array([1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4)
+ array([2, 3, 4, 5, 6, 7, 8, 9])
+ >>> phase_deg = np.mod(np.linspace(0 ,720, 19), 360) - 180
+ >>> np.unwrap(phase_deg, period=360)
+ array([-180., -140., -100., -60., -20., 20., 60., 100., 140.,
+ 180., 220., 260., 300., 340., 380., 420., 460., 500.,
+ 540.])
+ """
+ p = asarray(p)
+ nd = p.ndim
+ dd = diff(p, axis=axis)
+ if discont is None:
+ discont = period/2
+ slice1 = [slice(None, None)]*nd # full slices
+ slice1[axis] = slice(1, None)
+ slice1 = tuple(slice1)
+ dtype = np.result_type(dd, period)
+ if _nx.issubdtype(dtype, _nx.integer):
+ interval_high, rem = divmod(period, 2)
+ boundary_ambiguous = rem == 0
+ else:
+ interval_high = period / 2
+ boundary_ambiguous = True
+ interval_low = -interval_high
+ ddmod = mod(dd - interval_low, period) + interval_low
+ if boundary_ambiguous:
+ # for `mask = (abs(dd) == period/2)`, the above line made
+ # `ddmod[mask] == -period/2`. correct these such that
+ # `ddmod[mask] == sign(dd[mask])*period/2`.
+ _nx.copyto(ddmod, interval_high,
+ where=(ddmod == interval_low) & (dd > 0))
+ ph_correct = ddmod - dd
+ _nx.copyto(ph_correct, 0, where=abs(dd) < discont)
+ up = array(p, copy=True, dtype=dtype)
+ up[slice1] = p[slice1] + ph_correct.cumsum(axis)
+ return up
+
+
+def _sort_complex(a):
+ return (a,)
+
+
+@array_function_dispatch(_sort_complex)
+def sort_complex(a):
+ """
+ Sort a complex array using the real part first, then the imaginary part.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array
+
+ Returns
+ -------
+ out : complex ndarray
+ Always returns a sorted complex array.
+
+ Examples
+ --------
+ >>> np.sort_complex([5, 3, 6, 2, 1])
+ array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
+
+ >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
+ array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
+
+ """
+ b = array(a, copy=True)
+ b.sort()
+ if not issubclass(b.dtype.type, _nx.complexfloating):
+ if b.dtype.char in 'bhBH':
+ return b.astype('F')
+ elif b.dtype.char == 'g':
+ return b.astype('G')
+ else:
+ return b.astype('D')
+ else:
+ return b
+
+
+def _trim_zeros(filt, trim=None):
+ return (filt,)
+
+
+@array_function_dispatch(_trim_zeros)
+def trim_zeros(filt, trim='fb'):
+ """
+ Trim the leading and/or trailing zeros from a 1-D array or sequence.
+
+ Parameters
+ ----------
+ filt : 1-D array or sequence
+ Input array.
+ trim : str, optional
+ A string with 'f' representing trim from front and 'b' to trim from
+ back. Default is 'fb', trim zeros from both front and back of the
+ array.
+
+ Returns
+ -------
+ trimmed : 1-D array or sequence
+ The result of trimming the input. The input data type is preserved.
+
+ Examples
+ --------
+ >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
+ >>> np.trim_zeros(a)
+ array([1, 2, 3, 0, 2, 1])
+
+ >>> np.trim_zeros(a, 'b')
+ array([0, 0, 0, ..., 0, 2, 1])
+
+ The input data type is preserved, list/tuple in means list/tuple out.
+
+ >>> np.trim_zeros([0, 1, 2, 0])
+ [1, 2]
+
+ """
+
+ first = 0
+ trim = trim.upper()
+ if 'F' in trim:
+ for i in filt:
+ if i != 0.:
+ break
+ else:
+ first = first + 1
+ last = len(filt)
+ if 'B' in trim:
+ for i in filt[::-1]:
+ if i != 0.:
+ break
+ else:
+ last = last - 1
+ return filt[first:last]
+
+
+def _extract_dispatcher(condition, arr):
+ return (condition, arr)
+
+
+@array_function_dispatch(_extract_dispatcher)
+def extract(condition, arr):
+ """
+ Return the elements of an array that satisfy some condition.
+
+ This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
+ `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
+
+ Note that `place` does the exact opposite of `extract`.
+
+ Parameters
+ ----------
+ condition : array_like
+ An array whose nonzero or True entries indicate the elements of `arr`
+ to extract.
+ arr : array_like
+ Input array of the same size as `condition`.
+
+ Returns
+ -------
+ extract : ndarray
+ Rank 1 array of values from `arr` where `condition` is True.
+
+ See Also
+ --------
+ take, put, copyto, compress, place
+
+ Examples
+ --------
+ >>> arr = np.arange(12).reshape((3, 4))
+ >>> arr
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> condition = np.mod(arr, 3)==0
+ >>> condition
+ array([[ True, False, False, True],
+ [False, False, True, False],
+ [False, True, False, False]])
+ >>> np.extract(condition, arr)
+ array([0, 3, 6, 9])
+
+
+ If `condition` is boolean:
+
+ >>> arr[condition]
+ array([0, 3, 6, 9])
+
+ """
+ return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
+
+
+def _place_dispatcher(arr, mask, vals):
+ return (arr, mask, vals)
+
+
+@array_function_dispatch(_place_dispatcher)
+def place(arr, mask, vals):
+ """
+ Change elements of an array based on conditional and input values.
+
+ Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
+ `place` uses the first N elements of `vals`, where N is the number of
+ True values in `mask`, while `copyto` uses the elements where `mask`
+ is True.
+
+ Note that `extract` does the exact opposite of `place`.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Array to put data into.
+ mask : array_like
+ Boolean mask array. Must have the same size as `a`.
+ vals : 1-D sequence
+ Values to put into `a`. Only the first N elements are used, where
+ N is the number of True values in `mask`. If `vals` is smaller
+ than N, it will be repeated, and if elements of `a` are to be masked,
+ this sequence must be non-empty.
+
+ See Also
+ --------
+ copyto, put, take, extract
+
+ Examples
+ --------
+ >>> arr = np.arange(6).reshape(2, 3)
+ >>> np.place(arr, arr>2, [44, 55])
+ >>> arr
+ array([[ 0, 1, 2],
+ [44, 55, 44]])
+
+ """
+ if not isinstance(arr, np.ndarray):
+ raise TypeError("argument 1 must be numpy.ndarray, "
+ "not {name}".format(name=type(arr).__name__))
+
+ return _insert(arr, mask, vals)
+
+
+def disp(mesg, device=None, linefeed=True):
+ """
+ Display a message on a device.
+
+ Parameters
+ ----------
+ mesg : str
+ Message to display.
+ device : object
+ Device to write message. If None, defaults to ``sys.stdout`` which is
+ very similar to ``print``. `device` needs to have ``write()`` and
+ ``flush()`` methods.
+ linefeed : bool, optional
+ Option whether to print a line feed or not. Defaults to True.
+
+ Raises
+ ------
+ AttributeError
+ If `device` does not have a ``write()`` or ``flush()`` method.
+
+ Examples
+ --------
+ Besides ``sys.stdout``, a file-like object can also be used as it has
+ both required methods:
+
+ >>> from io import StringIO
+ >>> buf = StringIO()
+ >>> np.disp(u'"Display" in a file', device=buf)
+ >>> buf.getvalue()
+ '"Display" in a file\\n'
+
+ """
+ if device is None:
+ device = sys.stdout
+ if linefeed:
+ device.write('%s\n' % mesg)
+ else:
+ device.write('%s' % mesg)
+ device.flush()
+ return
+
+
+# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
+_DIMENSION_NAME = r'\w+'
+_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME)
+_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST)
+_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT)
+_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST)
+
+
+def _parse_gufunc_signature(signature):
+ """
+ Parse string signatures for a generalized universal function.
+
+ Arguments
+ ---------
+ signature : string
+ Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)``
+ for ``np.matmul``.
+
+ Returns
+ -------
+ Tuple of input and output core dimensions parsed from the signature, each
+ of the form List[Tuple[str, ...]].
+ """
+ signature = re.sub(r'\s+', '', signature)
+
+ if not re.match(_SIGNATURE, signature):
+ raise ValueError(
+ 'not a valid gufunc signature: {}'.format(signature))
+ return tuple([tuple(re.findall(_DIMENSION_NAME, arg))
+ for arg in re.findall(_ARGUMENT, arg_list)]
+ for arg_list in signature.split('->'))
+
+
+def _update_dim_sizes(dim_sizes, arg, core_dims):
+ """
+ Incrementally check and update core dimension sizes for a single argument.
+
+ Arguments
+ ---------
+ dim_sizes : Dict[str, int]
+ Sizes of existing core dimensions. Will be updated in-place.
+ arg : ndarray
+ Argument to examine.
+ core_dims : Tuple[str, ...]
+ Core dimensions for this argument.
+ """
+ if not core_dims:
+ return
+
+ num_core_dims = len(core_dims)
+ if arg.ndim < num_core_dims:
+ raise ValueError(
+ '%d-dimensional argument does not have enough '
+ 'dimensions for all core dimensions %r'
+ % (arg.ndim, core_dims))
+
+ core_shape = arg.shape[-num_core_dims:]
+ for dim, size in zip(core_dims, core_shape):
+ if dim in dim_sizes:
+ if size != dim_sizes[dim]:
+ raise ValueError(
+ 'inconsistent size for core dimension %r: %r vs %r'
+ % (dim, size, dim_sizes[dim]))
+ else:
+ dim_sizes[dim] = size
+
+
+def _parse_input_dimensions(args, input_core_dims):
+ """
+ Parse broadcast and core dimensions for vectorize with a signature.
+
+ Arguments
+ ---------
+ args : Tuple[ndarray, ...]
+ Tuple of input arguments to examine.
+ input_core_dims : List[Tuple[str, ...]]
+ List of core dimensions corresponding to each input.
+
+ Returns
+ -------
+ broadcast_shape : Tuple[int, ...]
+ Common shape to broadcast all non-core dimensions to.
+ dim_sizes : Dict[str, int]
+ Common sizes for named core dimensions.
+ """
+ broadcast_args = []
+ dim_sizes = {}
+ for arg, core_dims in zip(args, input_core_dims):
+ _update_dim_sizes(dim_sizes, arg, core_dims)
+ ndim = arg.ndim - len(core_dims)
+ dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim])
+ broadcast_args.append(dummy_array)
+ broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args)
+ return broadcast_shape, dim_sizes
+
+
+def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims):
+ """Helper for calculating broadcast shapes with core dimensions."""
+ return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims)
+ for core_dims in list_of_core_dims]
+
+
+def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes,
+ results=None):
+ """Helper for creating output arrays in vectorize."""
+ shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims)
+ if dtypes is None:
+ dtypes = [None] * len(shapes)
+ if results is None:
+ arrays = tuple(np.empty(shape=shape, dtype=dtype)
+ for shape, dtype in zip(shapes, dtypes))
+ else:
+ arrays = tuple(np.empty_like(result, shape=shape, dtype=dtype)
+ for result, shape, dtype
+ in zip(results, shapes, dtypes))
+ return arrays
+
+
+@set_module('numpy')
+class vectorize:
+ """
+ vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False,
+ signature=None)
+
+ Generalized function class.
+
+ Define a vectorized function which takes a nested sequence of objects or
+ numpy arrays as inputs and returns a single numpy array or a tuple of numpy
+ arrays. The vectorized function evaluates `pyfunc` over successive tuples
+ of the input arrays like the python map function, except it uses the
+ broadcasting rules of numpy.
+
+ The data type of the output of `vectorized` is determined by calling
+ the function with the first element of the input. This can be avoided
+ by specifying the `otypes` argument.
+
+ Parameters
+ ----------
+ pyfunc : callable
+ A python function or method.
+ otypes : str or list of dtypes, optional
+ The output data type. It must be specified as either a string of
+ typecode characters or a list of data type specifiers. There should
+ be one data type specifier for each output.
+ doc : str, optional
+ The docstring for the function. If None, the docstring will be the
+ ``pyfunc.__doc__``.
+ excluded : set, optional
+ Set of strings or integers representing the positional or keyword
+ arguments for which the function will not be vectorized. These will be
+ passed directly to `pyfunc` unmodified.
+
+ .. versionadded:: 1.7.0
+
+ cache : bool, optional
+ If `True`, then cache the first function call that determines the number
+ of outputs if `otypes` is not provided.
+
+ .. versionadded:: 1.7.0
+
+ signature : string, optional
+ Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for
+ vectorized matrix-vector multiplication. If provided, ``pyfunc`` will
+ be called with (and expected to return) arrays with shapes given by the
+ size of corresponding core dimensions. By default, ``pyfunc`` is
+ assumed to take scalars as input and output.
+
+ .. versionadded:: 1.12.0
+
+ Returns
+ -------
+ vectorized : callable
+ Vectorized function.
+
+ See Also
+ --------
+ frompyfunc : Takes an arbitrary Python function and returns a ufunc
+
+ Notes
+ -----
+ The `vectorize` function is provided primarily for convenience, not for
+ performance. The implementation is essentially a for loop.
+
+ If `otypes` is not specified, then a call to the function with the
+ first argument will be used to determine the number of outputs. The
+ results of this call will be cached if `cache` is `True` to prevent
+ calling the function twice. However, to implement the cache, the
+ original function must be wrapped which will slow down subsequent
+ calls, so only do this if your function is expensive.
+
+ The new keyword argument interface and `excluded` argument support
+ further degrades performance.
+
+ References
+ ----------
+ .. [1] :doc:`/reference/c-api/generalized-ufuncs`
+
+ Examples
+ --------
+ >>> def myfunc(a, b):
+ ... "Return a-b if a>b, otherwise return a+b"
+ ... if a > b:
+ ... return a - b
+ ... else:
+ ... return a + b
+
+ >>> vfunc = np.vectorize(myfunc)
+ >>> vfunc([1, 2, 3, 4], 2)
+ array([3, 4, 1, 2])
+
+ The docstring is taken from the input function to `vectorize` unless it
+ is specified:
+
+ >>> vfunc.__doc__
+ 'Return a-b if a>b, otherwise return a+b'
+ >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
+ >>> vfunc.__doc__
+ 'Vectorized `myfunc`'
+
+ The output type is determined by evaluating the first element of the input,
+ unless it is specified:
+
+ >>> out = vfunc([1, 2, 3, 4], 2)
+ >>> type(out[0])
+ <class 'numpy.int64'>
+ >>> vfunc = np.vectorize(myfunc, otypes=[float])
+ >>> out = vfunc([1, 2, 3, 4], 2)
+ >>> type(out[0])
+ <class 'numpy.float64'>
+
+ The `excluded` argument can be used to prevent vectorizing over certain
+ arguments. This can be useful for array-like arguments of a fixed length
+ such as the coefficients for a polynomial as in `polyval`:
+
+ >>> def mypolyval(p, x):
+ ... _p = list(p)
+ ... res = _p.pop(0)
+ ... while _p:
+ ... res = res*x + _p.pop(0)
+ ... return res
+ >>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
+ >>> vpolyval(p=[1, 2, 3], x=[0, 1])
+ array([3, 6])
+
+ Positional arguments may also be excluded by specifying their position:
+
+ >>> vpolyval.excluded.add(0)
+ >>> vpolyval([1, 2, 3], x=[0, 1])
+ array([3, 6])
+
+ The `signature` argument allows for vectorizing functions that act on
+ non-scalar arrays of fixed length. For example, you can use it for a
+ vectorized calculation of Pearson correlation coefficient and its p-value:
+
+ >>> import scipy.stats
+ >>> pearsonr = np.vectorize(scipy.stats.pearsonr,
+ ... signature='(n),(n)->(),()')
+ >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]])
+ (array([ 1., -1.]), array([ 0., 0.]))
+
+ Or for a vectorized convolution:
+
+ >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)')
+ >>> convolve(np.eye(4), [1, 2, 1])
+ array([[1., 2., 1., 0., 0., 0.],
+ [0., 1., 2., 1., 0., 0.],
+ [0., 0., 1., 2., 1., 0.],
+ [0., 0., 0., 1., 2., 1.]])
+
+ """
+ def __init__(self, pyfunc, otypes=None, doc=None, excluded=None,
+ cache=False, signature=None):
+ self.pyfunc = pyfunc
+ self.cache = cache
+ self.signature = signature
+ self._ufunc = {} # Caching to improve default performance
+
+ if doc is None:
+ self.__doc__ = pyfunc.__doc__
+ else:
+ self.__doc__ = doc
+
+ if isinstance(otypes, str):
+ for char in otypes:
+ if char not in typecodes['All']:
+ raise ValueError("Invalid otype specified: %s" % (char,))
+ elif iterable(otypes):
+ otypes = ''.join([_nx.dtype(x).char for x in otypes])
+ elif otypes is not None:
+ raise ValueError("Invalid otype specification")
+ self.otypes = otypes
+
+ # Excluded variable support
+ if excluded is None:
+ excluded = set()
+ self.excluded = set(excluded)
+
+ if signature is not None:
+ self._in_and_out_core_dims = _parse_gufunc_signature(signature)
+ else:
+ self._in_and_out_core_dims = None
+
+ def __call__(self, *args, **kwargs):
+ """
+ Return arrays with the results of `pyfunc` broadcast (vectorized) over
+ `args` and `kwargs` not in `excluded`.
+ """
+ excluded = self.excluded
+ if not kwargs and not excluded:
+ func = self.pyfunc
+ vargs = args
+ else:
+ # The wrapper accepts only positional arguments: we use `names` and
+ # `inds` to mutate `the_args` and `kwargs` to pass to the original
+ # function.
+ nargs = len(args)
+
+ names = [_n for _n in kwargs if _n not in excluded]
+ inds = [_i for _i in range(nargs) if _i not in excluded]
+ the_args = list(args)
+
+ def func(*vargs):
+ for _n, _i in enumerate(inds):
+ the_args[_i] = vargs[_n]
+ kwargs.update(zip(names, vargs[len(inds):]))
+ return self.pyfunc(*the_args, **kwargs)
+
+ vargs = [args[_i] for _i in inds]
+ vargs.extend([kwargs[_n] for _n in names])
+
+ return self._vectorize_call(func=func, args=vargs)
+
+ def _get_ufunc_and_otypes(self, func, args):
+ """Return (ufunc, otypes)."""
+ # frompyfunc will fail if args is empty
+ if not args:
+ raise ValueError('args can not be empty')
+
+ if self.otypes is not None:
+ otypes = self.otypes
+
+ # self._ufunc is a dictionary whose keys are the number of
+ # arguments (i.e. len(args)) and whose values are ufuncs created
+ # by frompyfunc. len(args) can be different for different calls if
+ # self.pyfunc has parameters with default values. We only use the
+ # cache when func is self.pyfunc, which occurs when the call uses
+ # only positional arguments and no arguments are excluded.
+
+ nin = len(args)
+ nout = len(self.otypes)
+ if func is not self.pyfunc or nin not in self._ufunc:
+ ufunc = frompyfunc(func, nin, nout)
+ else:
+ ufunc = None # We'll get it from self._ufunc
+ if func is self.pyfunc:
+ ufunc = self._ufunc.setdefault(nin, ufunc)
+ else:
+ # Get number of outputs and output types by calling the function on
+ # the first entries of args. We also cache the result to prevent
+ # the subsequent call when the ufunc is evaluated.
+ # Assumes that ufunc first evaluates the 0th elements in the input
+ # arrays (the input values are not checked to ensure this)
+ args = [asarray(arg) for arg in args]
+ if builtins.any(arg.size == 0 for arg in args):
+ raise ValueError('cannot call `vectorize` on size 0 inputs '
+ 'unless `otypes` is set')
+
+ inputs = [arg.flat[0] for arg in args]
+ outputs = func(*inputs)
+
+ # Performance note: profiling indicates that -- for simple
+ # functions at least -- this wrapping can almost double the
+ # execution time.
+ # Hence we make it optional.
+ if self.cache:
+ _cache = [outputs]
+
+ def _func(*vargs):
+ if _cache:
+ return _cache.pop()
+ else:
+ return func(*vargs)
+ else:
+ _func = func
+
+ if isinstance(outputs, tuple):
+ nout = len(outputs)
+ else:
+ nout = 1
+ outputs = (outputs,)
+
+ otypes = ''.join([asarray(outputs[_k]).dtype.char
+ for _k in range(nout)])
+
+ # Performance note: profiling indicates that creating the ufunc is
+ # not a significant cost compared with wrapping so it seems not
+ # worth trying to cache this.
+ ufunc = frompyfunc(_func, len(args), nout)
+
+ return ufunc, otypes
+
+ def _vectorize_call(self, func, args):
+ """Vectorized call to `func` over positional `args`."""
+ if self.signature is not None:
+ res = self._vectorize_call_with_signature(func, args)
+ elif not args:
+ res = func()
+ else:
+ ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
+
+ # Convert args to object arrays first
+ inputs = [asanyarray(a, dtype=object) for a in args]
+
+ outputs = ufunc(*inputs)
+
+ if ufunc.nout == 1:
+ res = asanyarray(outputs, dtype=otypes[0])
+ else:
+ res = tuple([asanyarray(x, dtype=t)
+ for x, t in zip(outputs, otypes)])
+ return res
+
+ def _vectorize_call_with_signature(self, func, args):
+ """Vectorized call over positional arguments with a signature."""
+ input_core_dims, output_core_dims = self._in_and_out_core_dims
+
+ if len(args) != len(input_core_dims):
+ raise TypeError('wrong number of positional arguments: '
+ 'expected %r, got %r'
+ % (len(input_core_dims), len(args)))
+ args = tuple(asanyarray(arg) for arg in args)
+
+ broadcast_shape, dim_sizes = _parse_input_dimensions(
+ args, input_core_dims)
+ input_shapes = _calculate_shapes(broadcast_shape, dim_sizes,
+ input_core_dims)
+ args = [np.broadcast_to(arg, shape, subok=True)
+ for arg, shape in zip(args, input_shapes)]
+
+ outputs = None
+ otypes = self.otypes
+ nout = len(output_core_dims)
+
+ for index in np.ndindex(*broadcast_shape):
+ results = func(*(arg[index] for arg in args))
+
+ n_results = len(results) if isinstance(results, tuple) else 1
+
+ if nout != n_results:
+ raise ValueError(
+ 'wrong number of outputs from pyfunc: expected %r, got %r'
+ % (nout, n_results))
+
+ if nout == 1:
+ results = (results,)
+
+ if outputs is None:
+ for result, core_dims in zip(results, output_core_dims):
+ _update_dim_sizes(dim_sizes, result, core_dims)
+
+ outputs = _create_arrays(broadcast_shape, dim_sizes,
+ output_core_dims, otypes, results)
+
+ for output, result in zip(outputs, results):
+ output[index] = result
+
+ if outputs is None:
+ # did not call the function even once
+ if otypes is None:
+ raise ValueError('cannot call `vectorize` on size 0 inputs '
+ 'unless `otypes` is set')
+ if builtins.any(dim not in dim_sizes
+ for dims in output_core_dims
+ for dim in dims):
+ raise ValueError('cannot call `vectorize` with a signature '
+ 'including new output dimensions on size 0 '
+ 'inputs')
+ outputs = _create_arrays(broadcast_shape, dim_sizes,
+ output_core_dims, otypes)
+
+ return outputs[0] if nout == 1 else outputs
+
+
+def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None,
+ fweights=None, aweights=None, *, dtype=None):
+ return (m, y, fweights, aweights)
+
+
+@array_function_dispatch(_cov_dispatcher)
+def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
+ aweights=None, *, dtype=None):
+ """
+ Estimate a covariance matrix, given data and weights.
+
+ Covariance indicates the level to which two variables vary together.
+ If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
+ then the covariance matrix element :math:`C_{ij}` is the covariance of
+ :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
+ of :math:`x_i`.
+
+ See the notes for an outline of the algorithm.
+
+ Parameters
+ ----------
+ m : array_like
+ A 1-D or 2-D array containing multiple variables and observations.
+ Each row of `m` represents a variable, and each column a single
+ observation of all those variables. Also see `rowvar` below.
+ y : array_like, optional
+ An additional set of variables and observations. `y` has the same form
+ as that of `m`.
+ rowvar : bool, optional
+ If `rowvar` is True (default), then each row represents a
+ variable, with observations in the columns. Otherwise, the relationship
+ is transposed: each column represents a variable, while the rows
+ contain observations.
+ bias : bool, optional
+ Default normalization (False) is by ``(N - 1)``, where ``N`` is the
+ number of observations given (unbiased estimate). If `bias` is True,
+ then normalization is by ``N``. These values can be overridden by using
+ the keyword ``ddof`` in numpy versions >= 1.5.
+ ddof : int, optional
+ If not ``None`` the default value implied by `bias` is overridden.
+ Note that ``ddof=1`` will return the unbiased estimate, even if both
+ `fweights` and `aweights` are specified, and ``ddof=0`` will return
+ the simple average. See the notes for the details. The default value
+ is ``None``.
+
+ .. versionadded:: 1.5
+ fweights : array_like, int, optional
+ 1-D array of integer frequency weights; the number of times each
+ observation vector should be repeated.
+
+ .. versionadded:: 1.10
+ aweights : array_like, optional
+ 1-D array of observation vector weights. These relative weights are
+ typically large for observations considered "important" and smaller for
+ observations considered less "important". If ``ddof=0`` the array of
+ weights can be used to assign probabilities to observation vectors.
+
+ .. versionadded:: 1.10
+ dtype : data-type, optional
+ Data-type of the result. By default, the return data-type will have
+ at least `numpy.float64` precision.
+
+ .. versionadded:: 1.20
+
+ Returns
+ -------
+ out : ndarray
+ The covariance matrix of the variables.
+
+ See Also
+ --------
+ corrcoef : Normalized covariance matrix
+
+ Notes
+ -----
+ Assume that the observations are in the columns of the observation
+ array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
+ steps to compute the weighted covariance are as follows::
+
+ >>> m = np.arange(10, dtype=np.float64)
+ >>> f = np.arange(10) * 2
+ >>> a = np.arange(10) ** 2.
+ >>> ddof = 1
+ >>> w = f * a
+ >>> v1 = np.sum(w)
+ >>> v2 = np.sum(w * a)
+ >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1
+ >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
+
+ Note that when ``a == 1``, the normalization factor
+ ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
+ as it should.
+
+ Examples
+ --------
+ Consider two variables, :math:`x_0` and :math:`x_1`, which
+ correlate perfectly, but in opposite directions:
+
+ >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
+ >>> x
+ array([[0, 1, 2],
+ [2, 1, 0]])
+
+ Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
+ matrix shows this clearly:
+
+ >>> np.cov(x)
+ array([[ 1., -1.],
+ [-1., 1.]])
+
+ Note that element :math:`C_{0,1}`, which shows the correlation between
+ :math:`x_0` and :math:`x_1`, is negative.
+
+ Further, note how `x` and `y` are combined:
+
+ >>> x = [-2.1, -1, 4.3]
+ >>> y = [3, 1.1, 0.12]
+ >>> X = np.stack((x, y), axis=0)
+ >>> np.cov(X)
+ array([[11.71 , -4.286 ], # may vary
+ [-4.286 , 2.144133]])
+ >>> np.cov(x, y)
+ array([[11.71 , -4.286 ], # may vary
+ [-4.286 , 2.144133]])
+ >>> np.cov(x)
+ array(11.71)
+
+ """
+ # Check inputs
+ if ddof is not None and ddof != int(ddof):
+ raise ValueError(
+ "ddof must be integer")
+
+ # Handles complex arrays too
+ m = np.asarray(m)
+ if m.ndim > 2:
+ raise ValueError("m has more than 2 dimensions")
+
+ if y is not None:
+ y = np.asarray(y)
+ if y.ndim > 2:
+ raise ValueError("y has more than 2 dimensions")
+
+ if dtype is None:
+ if y is None:
+ dtype = np.result_type(m, np.float64)
+ else:
+ dtype = np.result_type(m, y, np.float64)
+
+ X = array(m, ndmin=2, dtype=dtype)
+ if not rowvar and X.shape[0] != 1:
+ X = X.T
+ if X.shape[0] == 0:
+ return np.array([]).reshape(0, 0)
+ if y is not None:
+ y = array(y, copy=False, ndmin=2, dtype=dtype)
+ if not rowvar and y.shape[0] != 1:
+ y = y.T
+ X = np.concatenate((X, y), axis=0)
+
+ if ddof is None:
+ if bias == 0:
+ ddof = 1
+ else:
+ ddof = 0
+
+ # Get the product of frequencies and weights
+ w = None
+ if fweights is not None:
+ fweights = np.asarray(fweights, dtype=float)
+ if not np.all(fweights == np.around(fweights)):
+ raise TypeError(
+ "fweights must be integer")
+ if fweights.ndim > 1:
+ raise RuntimeError(
+ "cannot handle multidimensional fweights")
+ if fweights.shape[0] != X.shape[1]:
+ raise RuntimeError(
+ "incompatible numbers of samples and fweights")
+ if any(fweights < 0):
+ raise ValueError(
+ "fweights cannot be negative")
+ w = fweights
+ if aweights is not None:
+ aweights = np.asarray(aweights, dtype=float)
+ if aweights.ndim > 1:
+ raise RuntimeError(
+ "cannot handle multidimensional aweights")
+ if aweights.shape[0] != X.shape[1]:
+ raise RuntimeError(
+ "incompatible numbers of samples and aweights")
+ if any(aweights < 0):
+ raise ValueError(
+ "aweights cannot be negative")
+ if w is None:
+ w = aweights
+ else:
+ w *= aweights
+
+ avg, w_sum = average(X, axis=1, weights=w, returned=True)
+ w_sum = w_sum[0]
+
+ # Determine the normalization
+ if w is None:
+ fact = X.shape[1] - ddof
+ elif ddof == 0:
+ fact = w_sum
+ elif aweights is None:
+ fact = w_sum - ddof
+ else:
+ fact = w_sum - ddof*sum(w*aweights)/w_sum
+
+ if fact <= 0:
+ warnings.warn("Degrees of freedom <= 0 for slice",
+ RuntimeWarning, stacklevel=3)
+ fact = 0.0
+
+ X -= avg[:, None]
+ if w is None:
+ X_T = X.T
+ else:
+ X_T = (X*w).T
+ c = dot(X, X_T.conj())
+ c *= np.true_divide(1, fact)
+ return c.squeeze()
+
+
+def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *,
+ dtype=None):
+ return (x, y)
+
+
+@array_function_dispatch(_corrcoef_dispatcher)
+def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *,
+ dtype=None):
+ """
+ Return Pearson product-moment correlation coefficients.
+
+ Please refer to the documentation for `cov` for more detail. The
+ relationship between the correlation coefficient matrix, `R`, and the
+ covariance matrix, `C`, is
+
+ .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} C_{jj} } }
+
+ The values of `R` are between -1 and 1, inclusive.
+
+ Parameters
+ ----------
+ x : array_like
+ A 1-D or 2-D array containing multiple variables and observations.
+ Each row of `x` represents a variable, and each column a single
+ observation of all those variables. Also see `rowvar` below.
+ y : array_like, optional
+ An additional set of variables and observations. `y` has the same
+ shape as `x`.
+ rowvar : bool, optional
+ If `rowvar` is True (default), then each row represents a
+ variable, with observations in the columns. Otherwise, the relationship
+ is transposed: each column represents a variable, while the rows
+ contain observations.
+ bias : _NoValue, optional
+ Has no effect, do not use.
+
+ .. deprecated:: 1.10.0
+ ddof : _NoValue, optional
+ Has no effect, do not use.
+
+ .. deprecated:: 1.10.0
+ dtype : data-type, optional
+ Data-type of the result. By default, the return data-type will have
+ at least `numpy.float64` precision.
+
+ .. versionadded:: 1.20
+
+ Returns
+ -------
+ R : ndarray
+ The correlation coefficient matrix of the variables.
+
+ See Also
+ --------
+ cov : Covariance matrix
+
+ Notes
+ -----
+ Due to floating point rounding the resulting array may not be Hermitian,
+ the diagonal elements may not be 1, and the elements may not satisfy the
+ inequality abs(a) <= 1. The real and imaginary parts are clipped to the
+ interval [-1, 1] in an attempt to improve on that situation but is not
+ much help in the complex case.
+
+ This function accepts but discards arguments `bias` and `ddof`. This is
+ for backwards compatibility with previous versions of this function. These
+ arguments had no effect on the return values of the function and can be
+ safely ignored in this and previous versions of numpy.
+
+ Examples
+ --------
+ In this example we generate two random arrays, ``xarr`` and ``yarr``, and
+ compute the row-wise and column-wise Pearson correlation coefficients,
+ ``R``. Since ``rowvar`` is true by default, we first find the row-wise
+ Pearson correlation coefficients between the variables of ``xarr``.
+
+ >>> import numpy as np
+ >>> rng = np.random.default_rng(seed=42)
+ >>> xarr = rng.random((3, 3))
+ >>> xarr
+ array([[0.77395605, 0.43887844, 0.85859792],
+ [0.69736803, 0.09417735, 0.97562235],
+ [0.7611397 , 0.78606431, 0.12811363]])
+ >>> R1 = np.corrcoef(xarr)
+ >>> R1
+ array([[ 1. , 0.99256089, -0.68080986],
+ [ 0.99256089, 1. , -0.76492172],
+ [-0.68080986, -0.76492172, 1. ]])
+
+ If we add another set of variables and observations ``yarr``, we can
+ compute the row-wise Pearson correlation coefficients between the
+ variables in ``xarr`` and ``yarr``.
+
+ >>> yarr = rng.random((3, 3))
+ >>> yarr
+ array([[0.45038594, 0.37079802, 0.92676499],
+ [0.64386512, 0.82276161, 0.4434142 ],
+ [0.22723872, 0.55458479, 0.06381726]])
+ >>> R2 = np.corrcoef(xarr, yarr)
+ >>> R2
+ array([[ 1. , 0.99256089, -0.68080986, 0.75008178, -0.934284 ,
+ -0.99004057],
+ [ 0.99256089, 1. , -0.76492172, 0.82502011, -0.97074098,
+ -0.99981569],
+ [-0.68080986, -0.76492172, 1. , -0.99507202, 0.89721355,
+ 0.77714685],
+ [ 0.75008178, 0.82502011, -0.99507202, 1. , -0.93657855,
+ -0.83571711],
+ [-0.934284 , -0.97074098, 0.89721355, -0.93657855, 1. ,
+ 0.97517215],
+ [-0.99004057, -0.99981569, 0.77714685, -0.83571711, 0.97517215,
+ 1. ]])
+
+ Finally if we use the option ``rowvar=False``, the columns are now
+ being treated as the variables and we will find the column-wise Pearson
+ correlation coefficients between variables in ``xarr`` and ``yarr``.
+
+ >>> R3 = np.corrcoef(xarr, yarr, rowvar=False)
+ >>> R3
+ array([[ 1. , 0.77598074, -0.47458546, -0.75078643, -0.9665554 ,
+ 0.22423734],
+ [ 0.77598074, 1. , -0.92346708, -0.99923895, -0.58826587,
+ -0.44069024],
+ [-0.47458546, -0.92346708, 1. , 0.93773029, 0.23297648,
+ 0.75137473],
+ [-0.75078643, -0.99923895, 0.93773029, 1. , 0.55627469,
+ 0.47536961],
+ [-0.9665554 , -0.58826587, 0.23297648, 0.55627469, 1. ,
+ -0.46666491],
+ [ 0.22423734, -0.44069024, 0.75137473, 0.47536961, -0.46666491,
+ 1. ]])
+
+ """
+ if bias is not np._NoValue or ddof is not np._NoValue:
+ # 2015-03-15, 1.10
+ warnings.warn('bias and ddof have no effect and are deprecated',
+ DeprecationWarning, stacklevel=3)
+ c = cov(x, y, rowvar, dtype=dtype)
+ try:
+ d = diag(c)
+ except ValueError:
+ # scalar covariance
+ # nan if incorrect value (nan, inf, 0), 1 otherwise
+ return c / c
+ stddev = sqrt(d.real)
+ c /= stddev[:, None]
+ c /= stddev[None, :]
+
+ # Clip real and imaginary parts to [-1, 1]. This does not guarantee
+ # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without
+ # excessive work.
+ np.clip(c.real, -1, 1, out=c.real)
+ if np.iscomplexobj(c):
+ np.clip(c.imag, -1, 1, out=c.imag)
+
+ return c
+
+
+@set_module('numpy')
+def blackman(M):
+ """
+ Return the Blackman window.
+
+ The Blackman window is a taper formed by using the first three
+ terms of a summation of cosines. It was designed to have close to the
+ minimal leakage possible. It is close to optimal, only slightly worse
+ than a Kaiser window.
+
+ Parameters
+ ----------
+ M : int
+ Number of points in the output window. If zero or less, an empty
+ array is returned.
+
+ Returns
+ -------
+ out : ndarray
+ The window, with the maximum value normalized to one (the value one
+ appears only if the number of samples is odd).
+
+ See Also
+ --------
+ bartlett, hamming, hanning, kaiser
+
+ Notes
+ -----
+ The Blackman window is defined as
+
+ .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
+
+ Most references to the Blackman window come from the signal processing
+ literature, where it is used as one of many windowing functions for
+ smoothing values. It is also known as an apodization (which means
+ "removing the foot", i.e. smoothing discontinuities at the beginning
+ and end of the sampled signal) or tapering function. It is known as a
+ "near optimal" tapering function, almost as good (by some measures)
+ as the kaiser window.
+
+ References
+ ----------
+ Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
+ Dover Publications, New York.
+
+ Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
+ Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
+
+ Examples
+ --------
+ >>> import matplotlib.pyplot as plt
+ >>> np.blackman(12)
+ array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary
+ 4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
+ 9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
+ 1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
+
+ Plot the window and the frequency response:
+
+ >>> from numpy.fft import fft, fftshift
+ >>> window = np.blackman(51)
+ >>> plt.plot(window)
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.title("Blackman window")
+ Text(0.5, 1.0, 'Blackman window')
+ >>> plt.ylabel("Amplitude")
+ Text(0, 0.5, 'Amplitude')
+ >>> plt.xlabel("Sample")
+ Text(0.5, 0, 'Sample')
+ >>> plt.show()
+
+ >>> plt.figure()
+ <Figure size 640x480 with 0 Axes>
+ >>> A = fft(window, 2048) / 25.5
+ >>> mag = np.abs(fftshift(A))
+ >>> freq = np.linspace(-0.5, 0.5, len(A))
+ >>> with np.errstate(divide='ignore', invalid='ignore'):
+ ... response = 20 * np.log10(mag)
+ ...
+ >>> response = np.clip(response, -100, 100)
+ >>> plt.plot(freq, response)
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.title("Frequency response of Blackman window")
+ Text(0.5, 1.0, 'Frequency response of Blackman window')
+ >>> plt.ylabel("Magnitude [dB]")
+ Text(0, 0.5, 'Magnitude [dB]')
+ >>> plt.xlabel("Normalized frequency [cycles per sample]")
+ Text(0.5, 0, 'Normalized frequency [cycles per sample]')
+ >>> _ = plt.axis('tight')
+ >>> plt.show()
+
+ """
+ if M < 1:
+ return array([], dtype=np.result_type(M, 0.0))
+ if M == 1:
+ return ones(1, dtype=np.result_type(M, 0.0))
+ n = arange(1-M, M, 2)
+ return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1))
+
+
+@set_module('numpy')
+def bartlett(M):
+ """
+ Return the Bartlett window.
+
+ The Bartlett window is very similar to a triangular window, except
+ that the end points are at zero. It is often used in signal
+ processing for tapering a signal, without generating too much
+ ripple in the frequency domain.
+
+ Parameters
+ ----------
+ M : int
+ Number of points in the output window. If zero or less, an
+ empty array is returned.
+
+ Returns
+ -------
+ out : array
+ The triangular window, with the maximum value normalized to one
+ (the value one appears only if the number of samples is odd), with
+ the first and last samples equal to zero.
+
+ See Also
+ --------
+ blackman, hamming, hanning, kaiser
+
+ Notes
+ -----
+ The Bartlett window is defined as
+
+ .. math:: w(n) = \\frac{2}{M-1} \\left(
+ \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
+ \\right)
+
+ Most references to the Bartlett window come from the signal processing
+ literature, where it is used as one of many windowing functions for
+ smoothing values. Note that convolution with this window produces linear
+ interpolation. It is also known as an apodization (which means "removing
+ the foot", i.e. smoothing discontinuities at the beginning and end of the
+ sampled signal) or tapering function. The Fourier transform of the
+ Bartlett window is the product of two sinc functions. Note the excellent
+ discussion in Kanasewich [2]_.
+
+ References
+ ----------
+ .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
+ Biometrika 37, 1-16, 1950.
+ .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
+ The University of Alberta Press, 1975, pp. 109-110.
+ .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
+ Processing", Prentice-Hall, 1999, pp. 468-471.
+ .. [4] Wikipedia, "Window function",
+ https://en.wikipedia.org/wiki/Window_function
+ .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
+ "Numerical Recipes", Cambridge University Press, 1986, page 429.
+
+ Examples
+ --------
+ >>> import matplotlib.pyplot as plt
+ >>> np.bartlett(12)
+ array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary
+ 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
+ 0.18181818, 0. ])
+
+ Plot the window and its frequency response (requires SciPy and matplotlib):
+
+ >>> from numpy.fft import fft, fftshift
+ >>> window = np.bartlett(51)
+ >>> plt.plot(window)
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.title("Bartlett window")
+ Text(0.5, 1.0, 'Bartlett window')
+ >>> plt.ylabel("Amplitude")
+ Text(0, 0.5, 'Amplitude')
+ >>> plt.xlabel("Sample")
+ Text(0.5, 0, 'Sample')
+ >>> plt.show()
+
+ >>> plt.figure()
+ <Figure size 640x480 with 0 Axes>
+ >>> A = fft(window, 2048) / 25.5
+ >>> mag = np.abs(fftshift(A))
+ >>> freq = np.linspace(-0.5, 0.5, len(A))
+ >>> with np.errstate(divide='ignore', invalid='ignore'):
+ ... response = 20 * np.log10(mag)
+ ...
+ >>> response = np.clip(response, -100, 100)
+ >>> plt.plot(freq, response)
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.title("Frequency response of Bartlett window")
+ Text(0.5, 1.0, 'Frequency response of Bartlett window')
+ >>> plt.ylabel("Magnitude [dB]")
+ Text(0, 0.5, 'Magnitude [dB]')
+ >>> plt.xlabel("Normalized frequency [cycles per sample]")
+ Text(0.5, 0, 'Normalized frequency [cycles per sample]')
+ >>> _ = plt.axis('tight')
+ >>> plt.show()
+
+ """
+ if M < 1:
+ return array([], dtype=np.result_type(M, 0.0))
+ if M == 1:
+ return ones(1, dtype=np.result_type(M, 0.0))
+ n = arange(1-M, M, 2)
+ return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1))
+
+
+@set_module('numpy')
+def hanning(M):
+ """
+ Return the Hanning window.
+
+ The Hanning window is a taper formed by using a weighted cosine.
+
+ Parameters
+ ----------
+ M : int
+ Number of points in the output window. If zero or less, an
+ empty array is returned.
+
+ Returns
+ -------
+ out : ndarray, shape(M,)
+ The window, with the maximum value normalized to one (the value
+ one appears only if `M` is odd).
+
+ See Also
+ --------
+ bartlett, blackman, hamming, kaiser
+
+ Notes
+ -----
+ The Hanning window is defined as
+
+ .. math:: w(n) = 0.5 - 0.5\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
+ \\qquad 0 \\leq n \\leq M-1
+
+ The Hanning was named for Julius von Hann, an Austrian meteorologist.
+ It is also known as the Cosine Bell. Some authors prefer that it be
+ called a Hann window, to help avoid confusion with the very similar
+ Hamming window.
+
+ Most references to the Hanning window come from the signal processing
+ literature, where it is used as one of many windowing functions for
+ smoothing values. It is also known as an apodization (which means
+ "removing the foot", i.e. smoothing discontinuities at the beginning
+ and end of the sampled signal) or tapering function.
+
+ References
+ ----------
+ .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
+ spectra, Dover Publications, New York.
+ .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
+ The University of Alberta Press, 1975, pp. 106-108.
+ .. [3] Wikipedia, "Window function",
+ https://en.wikipedia.org/wiki/Window_function
+ .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
+ "Numerical Recipes", Cambridge University Press, 1986, page 425.
+
+ Examples
+ --------
+ >>> np.hanning(12)
+ array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
+ 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
+ 0.07937323, 0. ])
+
+ Plot the window and its frequency response:
+
+ >>> import matplotlib.pyplot as plt
+ >>> from numpy.fft import fft, fftshift
+ >>> window = np.hanning(51)
+ >>> plt.plot(window)
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.title("Hann window")
+ Text(0.5, 1.0, 'Hann window')
+ >>> plt.ylabel("Amplitude")
+ Text(0, 0.5, 'Amplitude')
+ >>> plt.xlabel("Sample")
+ Text(0.5, 0, 'Sample')
+ >>> plt.show()
+
+ >>> plt.figure()
+ <Figure size 640x480 with 0 Axes>
+ >>> A = fft(window, 2048) / 25.5
+ >>> mag = np.abs(fftshift(A))
+ >>> freq = np.linspace(-0.5, 0.5, len(A))
+ >>> with np.errstate(divide='ignore', invalid='ignore'):
+ ... response = 20 * np.log10(mag)
+ ...
+ >>> response = np.clip(response, -100, 100)
+ >>> plt.plot(freq, response)
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.title("Frequency response of the Hann window")
+ Text(0.5, 1.0, 'Frequency response of the Hann window')
+ >>> plt.ylabel("Magnitude [dB]")
+ Text(0, 0.5, 'Magnitude [dB]')
+ >>> plt.xlabel("Normalized frequency [cycles per sample]")
+ Text(0.5, 0, 'Normalized frequency [cycles per sample]')
+ >>> plt.axis('tight')
+ ...
+ >>> plt.show()
+
+ """
+ if M < 1:
+ return array([], dtype=np.result_type(M, 0.0))
+ if M == 1:
+ return ones(1, dtype=np.result_type(M, 0.0))
+ n = arange(1-M, M, 2)
+ return 0.5 + 0.5*cos(pi*n/(M-1))
+
+
+@set_module('numpy')
+def hamming(M):
+ """
+ Return the Hamming window.
+
+ The Hamming window is a taper formed by using a weighted cosine.
+
+ Parameters
+ ----------
+ M : int
+ Number of points in the output window. If zero or less, an
+ empty array is returned.
+
+ Returns
+ -------
+ out : ndarray
+ The window, with the maximum value normalized to one (the value
+ one appears only if the number of samples is odd).
+
+ See Also
+ --------
+ bartlett, blackman, hanning, kaiser
+
+ Notes
+ -----
+ The Hamming window is defined as
+
+ .. math:: w(n) = 0.54 - 0.46\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
+ \\qquad 0 \\leq n \\leq M-1
+
+ The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
+ and is described in Blackman and Tukey. It was recommended for
+ smoothing the truncated autocovariance function in the time domain.
+ Most references to the Hamming window come from the signal processing
+ literature, where it is used as one of many windowing functions for
+ smoothing values. It is also known as an apodization (which means
+ "removing the foot", i.e. smoothing discontinuities at the beginning
+ and end of the sampled signal) or tapering function.
+
+ References
+ ----------
+ .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
+ spectra, Dover Publications, New York.
+ .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
+ University of Alberta Press, 1975, pp. 109-110.
+ .. [3] Wikipedia, "Window function",
+ https://en.wikipedia.org/wiki/Window_function
+ .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
+ "Numerical Recipes", Cambridge University Press, 1986, page 425.
+
+ Examples
+ --------
+ >>> np.hamming(12)
+ array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary
+ 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
+ 0.15302337, 0.08 ])
+
+ Plot the window and the frequency response:
+
+ >>> import matplotlib.pyplot as plt
+ >>> from numpy.fft import fft, fftshift
+ >>> window = np.hamming(51)
+ >>> plt.plot(window)
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.title("Hamming window")
+ Text(0.5, 1.0, 'Hamming window')
+ >>> plt.ylabel("Amplitude")
+ Text(0, 0.5, 'Amplitude')
+ >>> plt.xlabel("Sample")
+ Text(0.5, 0, 'Sample')
+ >>> plt.show()
+
+ >>> plt.figure()
+ <Figure size 640x480 with 0 Axes>
+ >>> A = fft(window, 2048) / 25.5
+ >>> mag = np.abs(fftshift(A))
+ >>> freq = np.linspace(-0.5, 0.5, len(A))
+ >>> response = 20 * np.log10(mag)
+ >>> response = np.clip(response, -100, 100)
+ >>> plt.plot(freq, response)
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.title("Frequency response of Hamming window")
+ Text(0.5, 1.0, 'Frequency response of Hamming window')
+ >>> plt.ylabel("Magnitude [dB]")
+ Text(0, 0.5, 'Magnitude [dB]')
+ >>> plt.xlabel("Normalized frequency [cycles per sample]")
+ Text(0.5, 0, 'Normalized frequency [cycles per sample]')
+ >>> plt.axis('tight')
+ ...
+ >>> plt.show()
+
+ """
+ if M < 1:
+ return array([], dtype=np.result_type(M, 0.0))
+ if M == 1:
+ return ones(1, dtype=np.result_type(M, 0.0))
+ n = arange(1-M, M, 2)
+ return 0.54 + 0.46*cos(pi*n/(M-1))
+
+
+## Code from cephes for i0
+
+_i0A = [
+ -4.41534164647933937950E-18,
+ 3.33079451882223809783E-17,
+ -2.43127984654795469359E-16,
+ 1.71539128555513303061E-15,
+ -1.16853328779934516808E-14,
+ 7.67618549860493561688E-14,
+ -4.85644678311192946090E-13,
+ 2.95505266312963983461E-12,
+ -1.72682629144155570723E-11,
+ 9.67580903537323691224E-11,
+ -5.18979560163526290666E-10,
+ 2.65982372468238665035E-9,
+ -1.30002500998624804212E-8,
+ 6.04699502254191894932E-8,
+ -2.67079385394061173391E-7,
+ 1.11738753912010371815E-6,
+ -4.41673835845875056359E-6,
+ 1.64484480707288970893E-5,
+ -5.75419501008210370398E-5,
+ 1.88502885095841655729E-4,
+ -5.76375574538582365885E-4,
+ 1.63947561694133579842E-3,
+ -4.32430999505057594430E-3,
+ 1.05464603945949983183E-2,
+ -2.37374148058994688156E-2,
+ 4.93052842396707084878E-2,
+ -9.49010970480476444210E-2,
+ 1.71620901522208775349E-1,
+ -3.04682672343198398683E-1,
+ 6.76795274409476084995E-1
+ ]
+
+_i0B = [
+ -7.23318048787475395456E-18,
+ -4.83050448594418207126E-18,
+ 4.46562142029675999901E-17,
+ 3.46122286769746109310E-17,
+ -2.82762398051658348494E-16,
+ -3.42548561967721913462E-16,
+ 1.77256013305652638360E-15,
+ 3.81168066935262242075E-15,
+ -9.55484669882830764870E-15,
+ -4.15056934728722208663E-14,
+ 1.54008621752140982691E-14,
+ 3.85277838274214270114E-13,
+ 7.18012445138366623367E-13,
+ -1.79417853150680611778E-12,
+ -1.32158118404477131188E-11,
+ -3.14991652796324136454E-11,
+ 1.18891471078464383424E-11,
+ 4.94060238822496958910E-10,
+ 3.39623202570838634515E-9,
+ 2.26666899049817806459E-8,
+ 2.04891858946906374183E-7,
+ 2.89137052083475648297E-6,
+ 6.88975834691682398426E-5,
+ 3.36911647825569408990E-3,
+ 8.04490411014108831608E-1
+ ]
+
+
+def _chbevl(x, vals):
+ b0 = vals[0]
+ b1 = 0.0
+
+ for i in range(1, len(vals)):
+ b2 = b1
+ b1 = b0
+ b0 = x*b1 - b2 + vals[i]
+
+ return 0.5*(b0 - b2)
+
+
+def _i0_1(x):
+ return exp(x) * _chbevl(x/2.0-2, _i0A)
+
+
+def _i0_2(x):
+ return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
+
+
+def _i0_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_i0_dispatcher)
+def i0(x):
+ """
+ Modified Bessel function of the first kind, order 0.
+
+ Usually denoted :math:`I_0`.
+
+ Parameters
+ ----------
+ x : array_like of float
+ Argument of the Bessel function.
+
+ Returns
+ -------
+ out : ndarray, shape = x.shape, dtype = float
+ The modified Bessel function evaluated at each of the elements of `x`.
+
+ See Also
+ --------
+ scipy.special.i0, scipy.special.iv, scipy.special.ive
+
+ Notes
+ -----
+ The scipy implementation is recommended over this function: it is a
+ proper ufunc written in C, and more than an order of magnitude faster.
+
+ We use the algorithm published by Clenshaw [1]_ and referenced by
+ Abramowitz and Stegun [2]_, for which the function domain is
+ partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
+ polynomial expansions are employed in each interval. Relative error on
+ the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
+ peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
+
+ References
+ ----------
+ .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
+ *National Physical Laboratory Mathematical Tables*, vol. 5, London:
+ Her Majesty's Stationery Office, 1962.
+ .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
+ Functions*, 10th printing, New York: Dover, 1964, pp. 379.
+ https://personal.math.ubc.ca/~cbm/aands/page_379.htm
+ .. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero
+
+ Examples
+ --------
+ >>> np.i0(0.)
+ array(1.0)
+ >>> np.i0([0, 1, 2, 3])
+ array([1. , 1.26606588, 2.2795853 , 4.88079259])
+
+ """
+ x = np.asanyarray(x)
+ if x.dtype.kind == 'c':
+ raise TypeError("i0 not supported for complex values")
+ if x.dtype.kind != 'f':
+ x = x.astype(float)
+ x = np.abs(x)
+ return piecewise(x, [x <= 8.0], [_i0_1, _i0_2])
+
+## End of cephes code for i0
+
+
+@set_module('numpy')
+def kaiser(M, beta):
+ """
+ Return the Kaiser window.
+
+ The Kaiser window is a taper formed by using a Bessel function.
+
+ Parameters
+ ----------
+ M : int
+ Number of points in the output window. If zero or less, an
+ empty array is returned.
+ beta : float
+ Shape parameter for window.
+
+ Returns
+ -------
+ out : array
+ The window, with the maximum value normalized to one (the value
+ one appears only if the number of samples is odd).
+
+ See Also
+ --------
+ bartlett, blackman, hamming, hanning
+
+ Notes
+ -----
+ The Kaiser window is defined as
+
+ .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
+ \\right)/I_0(\\beta)
+
+ with
+
+ .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
+
+ where :math:`I_0` is the modified zeroth-order Bessel function.
+
+ The Kaiser was named for Jim Kaiser, who discovered a simple
+ approximation to the DPSS window based on Bessel functions. The Kaiser
+ window is a very good approximation to the Digital Prolate Spheroidal
+ Sequence, or Slepian window, which is the transform which maximizes the
+ energy in the main lobe of the window relative to total energy.
+
+ The Kaiser can approximate many other windows by varying the beta
+ parameter.
+
+ ==== =======================
+ beta Window shape
+ ==== =======================
+ 0 Rectangular
+ 5 Similar to a Hamming
+ 6 Similar to a Hanning
+ 8.6 Similar to a Blackman
+ ==== =======================
+
+ A beta value of 14 is probably a good starting point. Note that as beta
+ gets large, the window narrows, and so the number of samples needs to be
+ large enough to sample the increasingly narrow spike, otherwise NaNs will
+ get returned.
+
+ Most references to the Kaiser window come from the signal processing
+ literature, where it is used as one of many windowing functions for
+ smoothing values. It is also known as an apodization (which means
+ "removing the foot", i.e. smoothing discontinuities at the beginning
+ and end of the sampled signal) or tapering function.
+
+ References
+ ----------
+ .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
+ digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
+ John Wiley and Sons, New York, (1966).
+ .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
+ University of Alberta Press, 1975, pp. 177-178.
+ .. [3] Wikipedia, "Window function",
+ https://en.wikipedia.org/wiki/Window_function
+
+ Examples
+ --------
+ >>> import matplotlib.pyplot as plt
+ >>> np.kaiser(12, 14)
+ array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary
+ 2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
+ 9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
+ 4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
+
+
+ Plot the window and the frequency response:
+
+ >>> from numpy.fft import fft, fftshift
+ >>> window = np.kaiser(51, 14)
+ >>> plt.plot(window)
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.title("Kaiser window")
+ Text(0.5, 1.0, 'Kaiser window')
+ >>> plt.ylabel("Amplitude")
+ Text(0, 0.5, 'Amplitude')
+ >>> plt.xlabel("Sample")
+ Text(0.5, 0, 'Sample')
+ >>> plt.show()
+
+ >>> plt.figure()
+ <Figure size 640x480 with 0 Axes>
+ >>> A = fft(window, 2048) / 25.5
+ >>> mag = np.abs(fftshift(A))
+ >>> freq = np.linspace(-0.5, 0.5, len(A))
+ >>> response = 20 * np.log10(mag)
+ >>> response = np.clip(response, -100, 100)
+ >>> plt.plot(freq, response)
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.title("Frequency response of Kaiser window")
+ Text(0.5, 1.0, 'Frequency response of Kaiser window')
+ >>> plt.ylabel("Magnitude [dB]")
+ Text(0, 0.5, 'Magnitude [dB]')
+ >>> plt.xlabel("Normalized frequency [cycles per sample]")
+ Text(0.5, 0, 'Normalized frequency [cycles per sample]')
+ >>> plt.axis('tight')
+ (-0.5, 0.5, -100.0, ...) # may vary
+ >>> plt.show()
+
+ """
+ if M == 1:
+ return np.ones(1, dtype=np.result_type(M, 0.0))
+ n = arange(0, M)
+ alpha = (M-1)/2.0
+ return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
+
+
+def _sinc_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_sinc_dispatcher)
+def sinc(x):
+ r"""
+ Return the normalized sinc function.
+
+ The sinc function is equal to :math:`\sin(\pi x)/(\pi x)` for any argument
+ :math:`x\ne 0`. ``sinc(0)`` takes the limit value 1, making ``sinc`` not
+ only everywhere continuous but also infinitely differentiable.
+
+ .. note::
+
+ Note the normalization factor of ``pi`` used in the definition.
+ This is the most commonly used definition in signal processing.
+ Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function
+ :math:`\sin(x)/x` that is more common in mathematics.
+
+ Parameters
+ ----------
+ x : ndarray
+ Array (possibly multi-dimensional) of values for which to calculate
+ ``sinc(x)``.
+
+ Returns
+ -------
+ out : ndarray
+ ``sinc(x)``, which has the same shape as the input.
+
+ Notes
+ -----
+ The name sinc is short for "sine cardinal" or "sinus cardinalis".
+
+ The sinc function is used in various signal processing applications,
+ including in anti-aliasing, in the construction of a Lanczos resampling
+ filter, and in interpolation.
+
+ For bandlimited interpolation of discrete-time signals, the ideal
+ interpolation kernel is proportional to the sinc function.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
+ Resource. http://mathworld.wolfram.com/SincFunction.html
+ .. [2] Wikipedia, "Sinc function",
+ https://en.wikipedia.org/wiki/Sinc_function
+
+ Examples
+ --------
+ >>> import matplotlib.pyplot as plt
+ >>> x = np.linspace(-4, 4, 41)
+ >>> np.sinc(x)
+ array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary
+ -8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
+ 6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
+ 8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
+ -1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
+ 3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
+ 7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
+ 9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
+ 2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
+ -2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
+ -3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
+ 1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
+ -5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
+ -4.92362781e-02, -3.89804309e-17])
+
+ >>> plt.plot(x, np.sinc(x))
+ [<matplotlib.lines.Line2D object at 0x...>]
+ >>> plt.title("Sinc Function")
+ Text(0.5, 1.0, 'Sinc Function')
+ >>> plt.ylabel("Amplitude")
+ Text(0, 0.5, 'Amplitude')
+ >>> plt.xlabel("X")
+ Text(0.5, 0, 'X')
+ >>> plt.show()
+
+ """
+ x = np.asanyarray(x)
+ y = pi * where(x == 0, 1.0e-20, x)
+ return sin(y)/y
+
+
+def _msort_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_msort_dispatcher)
+def msort(a):
+ """
+ Return a copy of an array sorted along the first axis.
+
+ .. deprecated:: 1.24
+
+ msort is deprecated, use ``np.sort(a, axis=0)`` instead.
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be sorted.
+
+ Returns
+ -------
+ sorted_array : ndarray
+ Array of the same type and shape as `a`.
+
+ See Also
+ --------
+ sort
+
+ Notes
+ -----
+ ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 4], [3, 1]])
+ >>> np.msort(a) # sort along the first axis
+ array([[1, 1],
+ [3, 4]])
+
+ """
+ # 2022-10-20 1.24
+ warnings.warn(
+ "msort is deprecated, use np.sort(a, axis=0) instead",
+ DeprecationWarning,
+ stacklevel=3,
+ )
+ b = array(a, subok=True, copy=True)
+ b.sort(0)
+ return b
+
+
+def _ureduce(a, func, keepdims=False, **kwargs):
+ """
+ Internal Function.
+ Call `func` with `a` as first argument swapping the axes to use extended
+ axis on functions that don't support it natively.
+
+ Returns result and a.shape with axis dims set to 1.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ func : callable
+ Reduction function capable of receiving a single axis argument.
+ It is called with `a` as first argument followed by `kwargs`.
+ kwargs : keyword arguments
+ additional keyword arguments to pass to `func`.
+
+ Returns
+ -------
+ result : tuple
+ Result of func(a, **kwargs) and a.shape with axis dims set to 1
+ which can be used to reshape the result to the same shape a ufunc with
+ keepdims=True would produce.
+
+ """
+ a = np.asanyarray(a)
+ axis = kwargs.get('axis', None)
+ out = kwargs.get('out', None)
+
+ if keepdims is np._NoValue:
+ keepdims = False
+
+ nd = a.ndim
+ if axis is not None:
+ axis = _nx.normalize_axis_tuple(axis, nd)
+
+ if keepdims:
+ if out is not None:
+ index_out = tuple(
+ 0 if i in axis else slice(None) for i in range(nd))
+ kwargs['out'] = out[(Ellipsis, ) + index_out]
+
+ if len(axis) == 1:
+ kwargs['axis'] = axis[0]
+ else:
+ keep = set(range(nd)) - set(axis)
+ nkeep = len(keep)
+ # swap axis that should not be reduced to front
+ for i, s in enumerate(sorted(keep)):
+ a = a.swapaxes(i, s)
+ # merge reduced axis
+ a = a.reshape(a.shape[:nkeep] + (-1,))
+ kwargs['axis'] = -1
+ else:
+ if keepdims:
+ if out is not None:
+ index_out = (0, ) * nd
+ kwargs['out'] = out[(Ellipsis, ) + index_out]
+
+ r = func(a, **kwargs)
+
+ if out is not None:
+ return out
+
+ if keepdims:
+ if axis is None:
+ index_r = (np.newaxis, ) * nd
+ else:
+ index_r = tuple(
+ np.newaxis if i in axis else slice(None)
+ for i in range(nd))
+ r = r[(Ellipsis, ) + index_r]
+
+ return r
+
+
+def _median_dispatcher(
+ a, axis=None, out=None, overwrite_input=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_median_dispatcher)
+def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
+ """
+ Compute the median along the specified axis.
+
+ Returns the median of the array elements.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ axis : {int, sequence of int, None}, optional
+ Axis or axes along which the medians are computed. The default
+ is to compute the median along a flattened version of the array.
+ A sequence of axes is supported since version 1.9.0.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow use of memory of input array `a` for
+ calculations. The input array will be modified by the call to
+ `median`. This will save memory when you do not need to preserve
+ the contents of the input array. Treat the input as undefined,
+ but it will probably be fully or partially sorted. Default is
+ False. If `overwrite_input` is ``True`` and `a` is not already an
+ `ndarray`, an error will be raised.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `arr`.
+
+ .. versionadded:: 1.9.0
+
+ Returns
+ -------
+ median : ndarray
+ A new array holding the result. If the input contains integers
+ or floats smaller than ``float64``, then the output data-type is
+ ``np.float64``. Otherwise, the data-type of the output is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ mean, percentile
+
+ Notes
+ -----
+ Given a vector ``V`` of length ``N``, the median of ``V`` is the
+ middle value of a sorted copy of ``V``, ``V_sorted`` - i
+ e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the
+ two middle values of ``V_sorted`` when ``N`` is even.
+
+ Examples
+ --------
+ >>> a = np.array([[10, 7, 4], [3, 2, 1]])
+ >>> a
+ array([[10, 7, 4],
+ [ 3, 2, 1]])
+ >>> np.median(a)
+ 3.5
+ >>> np.median(a, axis=0)
+ array([6.5, 4.5, 2.5])
+ >>> np.median(a, axis=1)
+ array([7., 2.])
+ >>> m = np.median(a, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.median(a, axis=0, out=m)
+ array([6.5, 4.5, 2.5])
+ >>> m
+ array([6.5, 4.5, 2.5])
+ >>> b = a.copy()
+ >>> np.median(b, axis=1, overwrite_input=True)
+ array([7., 2.])
+ >>> assert not np.all(a==b)
+ >>> b = a.copy()
+ >>> np.median(b, axis=None, overwrite_input=True)
+ 3.5
+ >>> assert not np.all(a==b)
+
+ """
+ return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out,
+ overwrite_input=overwrite_input)
+
+
+def _median(a, axis=None, out=None, overwrite_input=False):
+ # can't be reasonably be implemented in terms of percentile as we have to
+ # call mean to not break astropy
+ a = np.asanyarray(a)
+
+ # Set the partition indexes
+ if axis is None:
+ sz = a.size
+ else:
+ sz = a.shape[axis]
+ if sz % 2 == 0:
+ szh = sz // 2
+ kth = [szh - 1, szh]
+ else:
+ kth = [(sz - 1) // 2]
+ # Check if the array contains any nan's
+ if np.issubdtype(a.dtype, np.inexact):
+ kth.append(-1)
+
+ if overwrite_input:
+ if axis is None:
+ part = a.ravel()
+ part.partition(kth)
+ else:
+ a.partition(kth, axis=axis)
+ part = a
+ else:
+ part = partition(a, kth, axis=axis)
+
+ if part.shape == ():
+ # make 0-D arrays work
+ return part.item()
+ if axis is None:
+ axis = 0
+
+ indexer = [slice(None)] * part.ndim
+ index = part.shape[axis] // 2
+ if part.shape[axis] % 2 == 1:
+ # index with slice to allow mean (below) to work
+ indexer[axis] = slice(index, index+1)
+ else:
+ indexer[axis] = slice(index-1, index+1)
+ indexer = tuple(indexer)
+
+ # Use mean in both odd and even case to coerce data type,
+ # using out array if needed.
+ rout = mean(part[indexer], axis=axis, out=out)
+ # Check if the array contains any nan's
+ if np.issubdtype(a.dtype, np.inexact) and sz > 0:
+ # If nans are possible, warn and replace by nans like mean would.
+ rout = np.lib.utils._median_nancheck(part, rout, axis)
+
+ return rout
+
+
+def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ method=None, keepdims=None, *, interpolation=None):
+ return (a, q, out)
+
+
+@array_function_dispatch(_percentile_dispatcher)
+def percentile(a,
+ q,
+ axis=None,
+ out=None,
+ overwrite_input=False,
+ method="linear",
+ keepdims=False,
+ *,
+ interpolation=None):
+ """
+ Compute the q-th percentile of the data along the specified axis.
+
+ Returns the q-th percentile(s) of the array elements.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ q : array_like of float
+ Percentile or sequence of percentiles to compute, which must be between
+ 0 and 100 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the percentiles are computed. The
+ default is to compute the percentile(s) along a flattened
+ version of the array.
+
+ .. versionchanged:: 1.9.0
+ A tuple of axes is supported
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by intermediate
+ calculations, to save memory. In this case, the contents of the input
+ `a` after this function completes is undefined.
+ method : str, optional
+ This parameter specifies the method to use for estimating the
+ percentile. There are many different methods, some unique to NumPy.
+ See the notes for explanation. The options sorted by their R type
+ as summarized in the H&F paper [1]_ are:
+
+ 1. 'inverted_cdf'
+ 2. 'averaged_inverted_cdf'
+ 3. 'closest_observation'
+ 4. 'interpolated_inverted_cdf'
+ 5. 'hazen'
+ 6. 'weibull'
+ 7. 'linear' (default)
+ 8. 'median_unbiased'
+ 9. 'normal_unbiased'
+
+ The first three methods are discontinuous. NumPy further defines the
+ following discontinuous variations of the default 'linear' (7.) option:
+
+ * 'lower'
+ * 'higher',
+ * 'midpoint'
+ * 'nearest'
+
+ .. versionchanged:: 1.22.0
+ This argument was previously called "interpolation" and only
+ offered the "linear" default and last four options.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ .. versionadded:: 1.9.0
+
+ interpolation : str, optional
+ Deprecated name for the method keyword argument.
+
+ .. deprecated:: 1.22.0
+
+ Returns
+ -------
+ percentile : scalar or ndarray
+ If `q` is a single percentile and `axis=None`, then the result
+ is a scalar. If multiple percentiles are given, first axis of
+ the result corresponds to the percentiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ mean
+ median : equivalent to ``percentile(..., 50)``
+ nanpercentile
+ quantile : equivalent to percentile, except q in the range [0, 1].
+
+ Notes
+ -----
+ Given a vector ``V`` of length ``n``, the q-th percentile of ``V`` is
+ the value ``q/100`` of the way from the minimum to the maximum in a
+ sorted copy of ``V``. The values and distances of the two nearest
+ neighbors as well as the `method` parameter will determine the
+ percentile if the normalized ranking does not match the location of
+ ``q`` exactly. This function is the same as the median if ``q=50``, the
+ same as the minimum if ``q=0`` and the same as the maximum if
+ ``q=100``.
+
+ The optional `method` parameter specifies the method to use when the
+ desired percentile lies between two indexes ``i`` and ``j = i + 1``.
+ In that case, we first determine ``i + g``, a virtual index that lies
+ between ``i`` and ``j``, where ``i`` is the floor and ``g`` is the
+ fractional part of the index. The final result is, then, an interpolation
+ of ``a[i]`` and ``a[j]`` based on ``g``. During the computation of ``g``,
+ ``i`` and ``j`` are modified using correction constants ``alpha`` and
+ ``beta`` whose choices depend on the ``method`` used. Finally, note that
+ since Python uses 0-based indexing, the code subtracts another 1 from the
+ index internally.
+
+ The following formula determines the virtual index ``i + g``, the location
+ of the percentile in the sorted sample:
+
+ .. math::
+ i + g = (q / 100) * ( n - alpha - beta + 1 ) + alpha
+
+ The different methods then work as follows
+
+ inverted_cdf:
+ method 1 of H&F [1]_.
+ This method gives discontinuous results:
+
+ * if g > 0 ; then take j
+ * if g = 0 ; then take i
+
+ averaged_inverted_cdf:
+ method 2 of H&F [1]_.
+ This method give discontinuous results:
+
+ * if g > 0 ; then take j
+ * if g = 0 ; then average between bounds
+
+ closest_observation:
+ method 3 of H&F [1]_.
+ This method give discontinuous results:
+
+ * if g > 0 ; then take j
+ * if g = 0 and index is odd ; then take j
+ * if g = 0 and index is even ; then take i
+
+ interpolated_inverted_cdf:
+ method 4 of H&F [1]_.
+ This method give continuous results using:
+
+ * alpha = 0
+ * beta = 1
+
+ hazen:
+ method 5 of H&F [1]_.
+ This method give continuous results using:
+
+ * alpha = 1/2
+ * beta = 1/2
+
+ weibull:
+ method 6 of H&F [1]_.
+ This method give continuous results using:
+
+ * alpha = 0
+ * beta = 0
+
+ linear:
+ method 7 of H&F [1]_.
+ This method give continuous results using:
+
+ * alpha = 1
+ * beta = 1
+
+ median_unbiased:
+ method 8 of H&F [1]_.
+ This method is probably the best method if the sample
+ distribution function is unknown (see reference).
+ This method give continuous results using:
+
+ * alpha = 1/3
+ * beta = 1/3
+
+ normal_unbiased:
+ method 9 of H&F [1]_.
+ This method is probably the best method if the sample
+ distribution function is known to be normal.
+ This method give continuous results using:
+
+ * alpha = 3/8
+ * beta = 3/8
+
+ lower:
+ NumPy method kept for backwards compatibility.
+ Takes ``i`` as the interpolation point.
+
+ higher:
+ NumPy method kept for backwards compatibility.
+ Takes ``j`` as the interpolation point.
+
+ nearest:
+ NumPy method kept for backwards compatibility.
+ Takes ``i`` or ``j``, whichever is nearest.
+
+ midpoint:
+ NumPy method kept for backwards compatibility.
+ Uses ``(i + j) / 2``.
+
+ Examples
+ --------
+ >>> a = np.array([[10, 7, 4], [3, 2, 1]])
+ >>> a
+ array([[10, 7, 4],
+ [ 3, 2, 1]])
+ >>> np.percentile(a, 50)
+ 3.5
+ >>> np.percentile(a, 50, axis=0)
+ array([6.5, 4.5, 2.5])
+ >>> np.percentile(a, 50, axis=1)
+ array([7., 2.])
+ >>> np.percentile(a, 50, axis=1, keepdims=True)
+ array([[7.],
+ [2.]])
+
+ >>> m = np.percentile(a, 50, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.percentile(a, 50, axis=0, out=out)
+ array([6.5, 4.5, 2.5])
+ >>> m
+ array([6.5, 4.5, 2.5])
+
+ >>> b = a.copy()
+ >>> np.percentile(b, 50, axis=1, overwrite_input=True)
+ array([7., 2.])
+ >>> assert not np.all(a == b)
+
+ The different methods can be visualized graphically:
+
+ .. plot::
+
+ import matplotlib.pyplot as plt
+
+ a = np.arange(4)
+ p = np.linspace(0, 100, 6001)
+ ax = plt.gca()
+ lines = [
+ ('linear', '-', 'C0'),
+ ('inverted_cdf', ':', 'C1'),
+ # Almost the same as `inverted_cdf`:
+ ('averaged_inverted_cdf', '-.', 'C1'),
+ ('closest_observation', ':', 'C2'),
+ ('interpolated_inverted_cdf', '--', 'C1'),
+ ('hazen', '--', 'C3'),
+ ('weibull', '-.', 'C4'),
+ ('median_unbiased', '--', 'C5'),
+ ('normal_unbiased', '-.', 'C6'),
+ ]
+ for method, style, color in lines:
+ ax.plot(
+ p, np.percentile(a, p, method=method),
+ label=method, linestyle=style, color=color)
+ ax.set(
+ title='Percentiles for different methods and data: ' + str(a),
+ xlabel='Percentile',
+ ylabel='Estimated percentile value',
+ yticks=a)
+ ax.legend()
+ plt.show()
+
+ References
+ ----------
+ .. [1] R. J. Hyndman and Y. Fan,
+ "Sample quantiles in statistical packages,"
+ The American Statistician, 50(4), pp. 361-365, 1996
+
+ """
+ if interpolation is not None:
+ method = _check_interpolation_as_method(
+ method, interpolation, "percentile")
+ q = np.true_divide(q, 100)
+ q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105)
+ if not _quantile_is_valid(q):
+ raise ValueError("Percentiles must be in the range [0, 100]")
+ return _quantile_unchecked(
+ a, q, axis, out, overwrite_input, method, keepdims)
+
+
+def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ method=None, keepdims=None, *, interpolation=None):
+ return (a, q, out)
+
+
+@array_function_dispatch(_quantile_dispatcher)
+def quantile(a,
+ q,
+ axis=None,
+ out=None,
+ overwrite_input=False,
+ method="linear",
+ keepdims=False,
+ *,
+ interpolation=None):
+ """
+ Compute the q-th quantile of the data along the specified axis.
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ q : array_like of float
+ Quantile or sequence of quantiles to compute, which must be between
+ 0 and 1 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the quantiles are computed. The default is
+ to compute the quantile(s) along a flattened version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape and buffer length as the expected output, but the
+ type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by
+ intermediate calculations, to save memory. In this case, the
+ contents of the input `a` after this function completes is
+ undefined.
+ method : str, optional
+ This parameter specifies the method to use for estimating the
+ quantile. There are many different methods, some unique to NumPy.
+ See the notes for explanation. The options sorted by their R type
+ as summarized in the H&F paper [1]_ are:
+
+ 1. 'inverted_cdf'
+ 2. 'averaged_inverted_cdf'
+ 3. 'closest_observation'
+ 4. 'interpolated_inverted_cdf'
+ 5. 'hazen'
+ 6. 'weibull'
+ 7. 'linear' (default)
+ 8. 'median_unbiased'
+ 9. 'normal_unbiased'
+
+ The first three methods are discontinuous. NumPy further defines the
+ following discontinuous variations of the default 'linear' (7.) option:
+
+ * 'lower'
+ * 'higher',
+ * 'midpoint'
+ * 'nearest'
+
+ .. versionchanged:: 1.22.0
+ This argument was previously called "interpolation" and only
+ offered the "linear" default and last four options.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ interpolation : str, optional
+ Deprecated name for the method keyword argument.
+
+ .. deprecated:: 1.22.0
+
+ Returns
+ -------
+ quantile : scalar or ndarray
+ If `q` is a single quantile and `axis=None`, then the result
+ is a scalar. If multiple quantiles are given, first axis of
+ the result corresponds to the quantiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ mean
+ percentile : equivalent to quantile, but with q in the range [0, 100].
+ median : equivalent to ``quantile(..., 0.5)``
+ nanquantile
+
+ Notes
+ -----
+ Given a vector ``V`` of length ``n``, the q-th quantile of ``V`` is
+ the value ``q`` of the way from the minimum to the maximum in a
+ sorted copy of ``V``. The values and distances of the two nearest
+ neighbors as well as the `method` parameter will determine the
+ quantile if the normalized ranking does not match the location of
+ ``q`` exactly. This function is the same as the median if ``q=0.5``, the
+ same as the minimum if ``q=0.0`` and the same as the maximum if
+ ``q=1.0``.
+
+ The optional `method` parameter specifies the method to use when the
+ desired quantile lies between two indexes ``i`` and ``j = i + 1``.
+ In that case, we first determine ``i + g``, a virtual index that lies
+ between ``i`` and ``j``, where ``i`` is the floor and ``g`` is the
+ fractional part of the index. The final result is, then, an interpolation
+ of ``a[i]`` and ``a[j]`` based on ``g``. During the computation of ``g``,
+ ``i`` and ``j`` are modified using correction constants ``alpha`` and
+ ``beta`` whose choices depend on the ``method`` used. Finally, note that
+ since Python uses 0-based indexing, the code subtracts another 1 from the
+ index internally.
+
+ The following formula determines the virtual index ``i + g``, the location
+ of the quantile in the sorted sample:
+
+ .. math::
+ i + g = q * ( n - alpha - beta + 1 ) + alpha
+
+ The different methods then work as follows
+
+ inverted_cdf:
+ method 1 of H&F [1]_.
+ This method gives discontinuous results:
+
+ * if g > 0 ; then take j
+ * if g = 0 ; then take i
+
+ averaged_inverted_cdf:
+ method 2 of H&F [1]_.
+ This method gives discontinuous results:
+
+ * if g > 0 ; then take j
+ * if g = 0 ; then average between bounds
+
+ closest_observation:
+ method 3 of H&F [1]_.
+ This method gives discontinuous results:
+
+ * if g > 0 ; then take j
+ * if g = 0 and index is odd ; then take j
+ * if g = 0 and index is even ; then take i
+
+ interpolated_inverted_cdf:
+ method 4 of H&F [1]_.
+ This method gives continuous results using:
+
+ * alpha = 0
+ * beta = 1
+
+ hazen:
+ method 5 of H&F [1]_.
+ This method gives continuous results using:
+
+ * alpha = 1/2
+ * beta = 1/2
+
+ weibull:
+ method 6 of H&F [1]_.
+ This method gives continuous results using:
+
+ * alpha = 0
+ * beta = 0
+
+ linear:
+ method 7 of H&F [1]_.
+ This method gives continuous results using:
+
+ * alpha = 1
+ * beta = 1
+
+ median_unbiased:
+ method 8 of H&F [1]_.
+ This method is probably the best method if the sample
+ distribution function is unknown (see reference).
+ This method gives continuous results using:
+
+ * alpha = 1/3
+ * beta = 1/3
+
+ normal_unbiased:
+ method 9 of H&F [1]_.
+ This method is probably the best method if the sample
+ distribution function is known to be normal.
+ This method gives continuous results using:
+
+ * alpha = 3/8
+ * beta = 3/8
+
+ lower:
+ NumPy method kept for backwards compatibility.
+ Takes ``i`` as the interpolation point.
+
+ higher:
+ NumPy method kept for backwards compatibility.
+ Takes ``j`` as the interpolation point.
+
+ nearest:
+ NumPy method kept for backwards compatibility.
+ Takes ``i`` or ``j``, whichever is nearest.
+
+ midpoint:
+ NumPy method kept for backwards compatibility.
+ Uses ``(i + j) / 2``.
+
+ Examples
+ --------
+ >>> a = np.array([[10, 7, 4], [3, 2, 1]])
+ >>> a
+ array([[10, 7, 4],
+ [ 3, 2, 1]])
+ >>> np.quantile(a, 0.5)
+ 3.5
+ >>> np.quantile(a, 0.5, axis=0)
+ array([6.5, 4.5, 2.5])
+ >>> np.quantile(a, 0.5, axis=1)
+ array([7., 2.])
+ >>> np.quantile(a, 0.5, axis=1, keepdims=True)
+ array([[7.],
+ [2.]])
+ >>> m = np.quantile(a, 0.5, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.quantile(a, 0.5, axis=0, out=out)
+ array([6.5, 4.5, 2.5])
+ >>> m
+ array([6.5, 4.5, 2.5])
+ >>> b = a.copy()
+ >>> np.quantile(b, 0.5, axis=1, overwrite_input=True)
+ array([7., 2.])
+ >>> assert not np.all(a == b)
+
+ See also `numpy.percentile` for a visualization of most methods.
+
+ References
+ ----------
+ .. [1] R. J. Hyndman and Y. Fan,
+ "Sample quantiles in statistical packages,"
+ The American Statistician, 50(4), pp. 361-365, 1996
+
+ """
+ if interpolation is not None:
+ method = _check_interpolation_as_method(
+ method, interpolation, "quantile")
+
+ q = np.asanyarray(q)
+ if not _quantile_is_valid(q):
+ raise ValueError("Quantiles must be in the range [0, 1]")
+ return _quantile_unchecked(
+ a, q, axis, out, overwrite_input, method, keepdims)
+
+
+def _quantile_unchecked(a,
+ q,
+ axis=None,
+ out=None,
+ overwrite_input=False,
+ method="linear",
+ keepdims=False):
+ """Assumes that q is in [0, 1], and is an ndarray"""
+ return _ureduce(a,
+ func=_quantile_ureduce_func,
+ q=q,
+ keepdims=keepdims,
+ axis=axis,
+ out=out,
+ overwrite_input=overwrite_input,
+ method=method)
+
+
+def _quantile_is_valid(q):
+ # avoid expensive reductions, relevant for arrays with < O(1000) elements
+ if q.ndim == 1 and q.size < 10:
+ for i in range(q.size):
+ if not (0.0 <= q[i] <= 1.0):
+ return False
+ else:
+ if not (np.all(0 <= q) and np.all(q <= 1)):
+ return False
+ return True
+
+
+def _check_interpolation_as_method(method, interpolation, fname):
+ # Deprecated NumPy 1.22, 2021-11-08
+ warnings.warn(
+ f"the `interpolation=` argument to {fname} was renamed to "
+ "`method=`, which has additional options.\n"
+ "Users of the modes 'nearest', 'lower', 'higher', or "
+ "'midpoint' are encouraged to review the method they used. "
+ "(Deprecated NumPy 1.22)",
+ DeprecationWarning, stacklevel=4)
+ if method != "linear":
+ # sanity check, we assume this basically never happens
+ raise TypeError(
+ "You shall not pass both `method` and `interpolation`!\n"
+ "(`interpolation` is Deprecated in favor of `method`)")
+ return interpolation
+
+
+def _compute_virtual_index(n, quantiles, alpha: float, beta: float):
+ """
+ Compute the floating point indexes of an array for the linear
+ interpolation of quantiles.
+ n : array_like
+ The sample sizes.
+ quantiles : array_like
+ The quantiles values.
+ alpha : float
+ A constant used to correct the index computed.
+ beta : float
+ A constant used to correct the index computed.
+
+ alpha and beta values depend on the chosen method
+ (see quantile documentation)
+
+ Reference:
+ Hyndman&Fan paper "Sample Quantiles in Statistical Packages",
+ DOI: 10.1080/00031305.1996.10473566
+ """
+ return n * quantiles + (
+ alpha + quantiles * (1 - alpha - beta)
+ ) - 1
+
+
+def _get_gamma(virtual_indexes, previous_indexes, method):
+ """
+ Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation
+ of quantiles.
+
+ virtual_indexes : array_like
+ The indexes where the percentile is supposed to be found in the sorted
+ sample.
+ previous_indexes : array_like
+ The floor values of virtual_indexes.
+ interpolation : dict
+ The interpolation method chosen, which may have a specific rule
+ modifying gamma.
+
+ gamma is usually the fractional part of virtual_indexes but can be modified
+ by the interpolation method.
+ """
+ gamma = np.asanyarray(virtual_indexes - previous_indexes)
+ gamma = method["fix_gamma"](gamma, virtual_indexes)
+ return np.asanyarray(gamma)
+
+
+def _lerp(a, b, t, out=None):
+ """
+ Compute the linear interpolation weighted by gamma on each point of
+ two same shape array.
+
+ a : array_like
+ Left bound.
+ b : array_like
+ Right bound.
+ t : array_like
+ The interpolation weight.
+ out : array_like
+ Output array.
+ """
+ diff_b_a = subtract(b, a)
+ # asanyarray is a stop-gap until gh-13105
+ lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out))
+ subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5)
+ if lerp_interpolation.ndim == 0 and out is None:
+ lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays
+ return lerp_interpolation
+
+
+def _get_gamma_mask(shape, default_value, conditioned_value, where):
+ out = np.full(shape, default_value)
+ np.copyto(out, conditioned_value, where=where, casting="unsafe")
+ return out
+
+
+def _discret_interpolation_to_boundaries(index, gamma_condition_fun):
+ previous = np.floor(index)
+ next = previous + 1
+ gamma = index - previous
+ res = _get_gamma_mask(shape=index.shape,
+ default_value=next,
+ conditioned_value=previous,
+ where=gamma_condition_fun(gamma, index)
+ ).astype(np.intp)
+ # Some methods can lead to out-of-bound integers, clip them:
+ res[res < 0] = 0
+ return res
+
+
+def _closest_observation(n, quantiles):
+ gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 0)
+ return _discret_interpolation_to_boundaries((n * quantiles) - 1 - 0.5,
+ gamma_fun)
+
+
+def _inverted_cdf(n, quantiles):
+ gamma_fun = lambda gamma, _: (gamma == 0)
+ return _discret_interpolation_to_boundaries((n * quantiles) - 1,
+ gamma_fun)
+
+
+def _quantile_ureduce_func(
+ a: np.array,
+ q: np.array,
+ axis: int = None,
+ out=None,
+ overwrite_input: bool = False,
+ method="linear",
+) -> np.array:
+ if q.ndim > 2:
+ # The code below works fine for nd, but it might not have useful
+ # semantics. For now, keep the supported dimensions the same as it was
+ # before.
+ raise ValueError("q must be a scalar or 1d")
+ if overwrite_input:
+ if axis is None:
+ axis = 0
+ arr = a.ravel()
+ else:
+ arr = a
+ else:
+ if axis is None:
+ axis = 0
+ arr = a.flatten()
+ else:
+ arr = a.copy()
+ result = _quantile(arr,
+ quantiles=q,
+ axis=axis,
+ method=method,
+ out=out)
+ return result
+
+
+def _get_indexes(arr, virtual_indexes, valid_values_count):
+ """
+ Get the valid indexes of arr neighbouring virtual_indexes.
+ Note
+ This is a companion function to linear interpolation of
+ Quantiles
+
+ Returns
+ -------
+ (previous_indexes, next_indexes): Tuple
+ A Tuple of virtual_indexes neighbouring indexes
+ """
+ previous_indexes = np.asanyarray(np.floor(virtual_indexes))
+ next_indexes = np.asanyarray(previous_indexes + 1)
+ indexes_above_bounds = virtual_indexes >= valid_values_count - 1
+ # When indexes is above max index, take the max value of the array
+ if indexes_above_bounds.any():
+ previous_indexes[indexes_above_bounds] = -1
+ next_indexes[indexes_above_bounds] = -1
+ # When indexes is below min index, take the min value of the array
+ indexes_below_bounds = virtual_indexes < 0
+ if indexes_below_bounds.any():
+ previous_indexes[indexes_below_bounds] = 0
+ next_indexes[indexes_below_bounds] = 0
+ if np.issubdtype(arr.dtype, np.inexact):
+ # After the sort, slices having NaNs will have for last element a NaN
+ virtual_indexes_nans = np.isnan(virtual_indexes)
+ if virtual_indexes_nans.any():
+ previous_indexes[virtual_indexes_nans] = -1
+ next_indexes[virtual_indexes_nans] = -1
+ previous_indexes = previous_indexes.astype(np.intp)
+ next_indexes = next_indexes.astype(np.intp)
+ return previous_indexes, next_indexes
+
+
+def _quantile(
+ arr: np.array,
+ quantiles: np.array,
+ axis: int = -1,
+ method="linear",
+ out=None,
+):
+ """
+ Private function that doesn't support extended axis or keepdims.
+ These methods are extended to this function using _ureduce
+ See nanpercentile for parameter usage
+ It computes the quantiles of the array for the given axis.
+ A linear interpolation is performed based on the `interpolation`.
+
+ By default, the method is "linear" where alpha == beta == 1 which
+ performs the 7th method of Hyndman&Fan.
+ With "median_unbiased" we get alpha == beta == 1/3
+ thus the 8th method of Hyndman&Fan.
+ """
+ # --- Setup
+ arr = np.asanyarray(arr)
+ values_count = arr.shape[axis]
+ # The dimensions of `q` are prepended to the output shape, so we need the
+ # axis being sampled from `arr` to be last.
+ DATA_AXIS = 0
+ if axis != DATA_AXIS: # But moveaxis is slow, so only call it if axis!=0.
+ arr = np.moveaxis(arr, axis, destination=DATA_AXIS)
+ # --- Computation of indexes
+ # Index where to find the value in the sorted array.
+ # Virtual because it is a floating point value, not an valid index.
+ # The nearest neighbours are used for interpolation
+ try:
+ method = _QuantileMethods[method]
+ except KeyError:
+ raise ValueError(
+ f"{method!r} is not a valid method. Use one of: "
+ f"{_QuantileMethods.keys()}") from None
+ virtual_indexes = method["get_virtual_index"](values_count, quantiles)
+ virtual_indexes = np.asanyarray(virtual_indexes)
+ if np.issubdtype(virtual_indexes.dtype, np.integer):
+ # No interpolation needed, take the points along axis
+ if np.issubdtype(arr.dtype, np.inexact):
+ # may contain nan, which would sort to the end
+ arr.partition(concatenate((virtual_indexes.ravel(), [-1])), axis=0)
+ slices_having_nans = np.isnan(arr[-1])
+ else:
+ # cannot contain nan
+ arr.partition(virtual_indexes.ravel(), axis=0)
+ slices_having_nans = np.array(False, dtype=bool)
+ result = take(arr, virtual_indexes, axis=0, out=out)
+ else:
+ previous_indexes, next_indexes = _get_indexes(arr,
+ virtual_indexes,
+ values_count)
+ # --- Sorting
+ arr.partition(
+ np.unique(np.concatenate(([0, -1],
+ previous_indexes.ravel(),
+ next_indexes.ravel(),
+ ))),
+ axis=DATA_AXIS)
+ if np.issubdtype(arr.dtype, np.inexact):
+ slices_having_nans = np.isnan(
+ take(arr, indices=-1, axis=DATA_AXIS)
+ )
+ else:
+ slices_having_nans = None
+ # --- Get values from indexes
+ previous = np.take(arr, previous_indexes, axis=DATA_AXIS)
+ next = np.take(arr, next_indexes, axis=DATA_AXIS)
+ # --- Linear interpolation
+ gamma = _get_gamma(virtual_indexes, previous_indexes, method)
+ result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1)
+ gamma = gamma.reshape(result_shape)
+ result = _lerp(previous,
+ next,
+ gamma,
+ out=out)
+ if np.any(slices_having_nans):
+ if result.ndim == 0 and out is None:
+ # can't write to a scalar
+ result = arr.dtype.type(np.nan)
+ else:
+ result[..., slices_having_nans] = np.nan
+ return result
+
+
+def _trapz_dispatcher(y, x=None, dx=None, axis=None):
+ return (y, x)
+
+
+@array_function_dispatch(_trapz_dispatcher)
+def trapz(y, x=None, dx=1.0, axis=-1):
+ r"""
+ Integrate along the given axis using the composite trapezoidal rule.
+
+ If `x` is provided, the integration happens in sequence along its
+ elements - they are not sorted.
+
+ Integrate `y` (`x`) along each 1d slice on the given axis, compute
+ :math:`\int y(x) dx`.
+ When `x` is specified, this integrates along the parametric curve,
+ computing :math:`\int_t y(t) dt =
+ \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`.
+
+ Parameters
+ ----------
+ y : array_like
+ Input array to integrate.
+ x : array_like, optional
+ The sample points corresponding to the `y` values. If `x` is None,
+ the sample points are assumed to be evenly spaced `dx` apart. The
+ default is None.
+ dx : scalar, optional
+ The spacing between sample points when `x` is None. The default is 1.
+ axis : int, optional
+ The axis along which to integrate.
+
+ Returns
+ -------
+ trapz : float or ndarray
+ Definite integral of `y` = n-dimensional array as approximated along
+ a single axis by the trapezoidal rule. If `y` is a 1-dimensional array,
+ then the result is a float. If `n` is greater than 1, then the result
+ is an `n`-1 dimensional array.
+
+ See Also
+ --------
+ sum, cumsum
+
+ Notes
+ -----
+ Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
+ will be taken from `y` array, by default x-axis distances between
+ points will be 1.0, alternatively they can be provided with `x` array
+ or with `dx` scalar. Return value will be equal to combined area under
+ the red lines.
+
+
+ References
+ ----------
+ .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule
+
+ .. [2] Illustration image:
+ https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
+
+ Examples
+ --------
+ >>> np.trapz([1,2,3])
+ 4.0
+ >>> np.trapz([1,2,3], x=[4,6,8])
+ 8.0
+ >>> np.trapz([1,2,3], dx=2)
+ 8.0
+
+ Using a decreasing `x` corresponds to integrating in reverse:
+
+ >>> np.trapz([1,2,3], x=[8,6,4])
+ -8.0
+
+ More generally `x` is used to integrate along a parametric curve.
+ This finds the area of a circle, noting we repeat the sample which closes
+ the curve:
+
+ >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True)
+ >>> np.trapz(np.cos(theta), x=np.sin(theta))
+ 3.141571941375841
+
+ >>> a = np.arange(6).reshape(2, 3)
+ >>> a
+ array([[0, 1, 2],
+ [3, 4, 5]])
+ >>> np.trapz(a, axis=0)
+ array([1.5, 2.5, 3.5])
+ >>> np.trapz(a, axis=1)
+ array([2., 8.])
+ """
+ y = asanyarray(y)
+ if x is None:
+ d = dx
+ else:
+ x = asanyarray(x)
+ if x.ndim == 1:
+ d = diff(x)
+ # reshape to correct shape
+ shape = [1]*y.ndim
+ shape[axis] = d.shape[0]
+ d = d.reshape(shape)
+ else:
+ d = diff(x, axis=axis)
+ nd = y.ndim
+ slice1 = [slice(None)]*nd
+ slice2 = [slice(None)]*nd
+ slice1[axis] = slice(1, None)
+ slice2[axis] = slice(None, -1)
+ try:
+ ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis)
+ except ValueError:
+ # Operations didn't work, cast to ndarray
+ d = np.asarray(d)
+ y = np.asarray(y)
+ ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis)
+ return ret
+
+
+def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None):
+ return xi
+
+
+# Based on scitools meshgrid
+@array_function_dispatch(_meshgrid_dispatcher)
+def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
+ """
+ Return coordinate matrices from coordinate vectors.
+
+ Make N-D coordinate arrays for vectorized evaluations of
+ N-D scalar/vector fields over N-D grids, given
+ one-dimensional coordinate arrays x1, x2,..., xn.
+
+ .. versionchanged:: 1.9
+ 1-D and 0-D cases are allowed.
+
+ Parameters
+ ----------
+ x1, x2,..., xn : array_like
+ 1-D arrays representing the coordinates of a grid.
+ indexing : {'xy', 'ij'}, optional
+ Cartesian ('xy', default) or matrix ('ij') indexing of output.
+ See Notes for more details.
+
+ .. versionadded:: 1.7.0
+ sparse : bool, optional
+ If True the shape of the returned coordinate array for dimension *i*
+ is reduced from ``(N1, ..., Ni, ... Nn)`` to
+ ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are
+ intended to be use with :ref:`basics.broadcasting`. When all
+ coordinates are used in an expression, broadcasting still leads to a
+ fully-dimensonal result array.
+
+ Default is False.
+
+ .. versionadded:: 1.7.0
+ copy : bool, optional
+ If False, a view into the original arrays are returned in order to
+ conserve memory. Default is True. Please note that
+ ``sparse=False, copy=False`` will likely return non-contiguous
+ arrays. Furthermore, more than one element of a broadcast array
+ may refer to a single memory location. If you need to write to the
+ arrays, make copies first.
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ X1, X2,..., XN : ndarray
+ For vectors `x1`, `x2`,..., `xn` with lengths ``Ni=len(xi)``,
+ returns ``(N1, N2, N3,..., Nn)`` shaped arrays if indexing='ij'
+ or ``(N2, N1, N3,..., Nn)`` shaped arrays if indexing='xy'
+ with the elements of `xi` repeated to fill the matrix along
+ the first dimension for `x1`, the second for `x2` and so on.
+
+ Notes
+ -----
+ This function supports both indexing conventions through the indexing
+ keyword argument. Giving the string 'ij' returns a meshgrid with
+ matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
+ In the 2-D case with inputs of length M and N, the outputs are of shape
+ (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
+ with inputs of length M, N and P, outputs are of shape (N, M, P) for
+ 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
+ illustrated by the following code snippet::
+
+ xv, yv = np.meshgrid(x, y, indexing='ij')
+ for i in range(nx):
+ for j in range(ny):
+ # treat xv[i,j], yv[i,j]
+
+ xv, yv = np.meshgrid(x, y, indexing='xy')
+ for i in range(nx):
+ for j in range(ny):
+ # treat xv[j,i], yv[j,i]
+
+ In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
+
+ See Also
+ --------
+ mgrid : Construct a multi-dimensional "meshgrid" using indexing notation.
+ ogrid : Construct an open multi-dimensional "meshgrid" using indexing
+ notation.
+ how-to-index
+
+ Examples
+ --------
+ >>> nx, ny = (3, 2)
+ >>> x = np.linspace(0, 1, nx)
+ >>> y = np.linspace(0, 1, ny)
+ >>> xv, yv = np.meshgrid(x, y)
+ >>> xv
+ array([[0. , 0.5, 1. ],
+ [0. , 0.5, 1. ]])
+ >>> yv
+ array([[0., 0., 0.],
+ [1., 1., 1.]])
+
+ The result of `meshgrid` is a coordinate grid:
+
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot(xv, yv, marker='o', color='k', linestyle='none')
+ >>> plt.show()
+
+ You can create sparse output arrays to save memory and computation time.
+
+ >>> xv, yv = np.meshgrid(x, y, sparse=True)
+ >>> xv
+ array([[0. , 0.5, 1. ]])
+ >>> yv
+ array([[0.],
+ [1.]])
+
+ `meshgrid` is very useful to evaluate functions on a grid. If the
+ function depends on all coordinates, both dense and sparse outputs can be
+ used.
+
+ >>> x = np.linspace(-5, 5, 101)
+ >>> y = np.linspace(-5, 5, 101)
+ >>> # full coordinate arrays
+ >>> xx, yy = np.meshgrid(x, y)
+ >>> zz = np.sqrt(xx**2 + yy**2)
+ >>> xx.shape, yy.shape, zz.shape
+ ((101, 101), (101, 101), (101, 101))
+ >>> # sparse coordinate arrays
+ >>> xs, ys = np.meshgrid(x, y, sparse=True)
+ >>> zs = np.sqrt(xs**2 + ys**2)
+ >>> xs.shape, ys.shape, zs.shape
+ ((1, 101), (101, 1), (101, 101))
+ >>> np.array_equal(zz, zs)
+ True
+
+ >>> h = plt.contourf(x, y, zs)
+ >>> plt.axis('scaled')
+ >>> plt.colorbar()
+ >>> plt.show()
+ """
+ ndim = len(xi)
+
+ if indexing not in ['xy', 'ij']:
+ raise ValueError(
+ "Valid values for `indexing` are 'xy' and 'ij'.")
+
+ s0 = (1,) * ndim
+ output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:])
+ for i, x in enumerate(xi)]
+
+ if indexing == 'xy' and ndim > 1:
+ # switch first and second axis
+ output[0].shape = (1, -1) + s0[2:]
+ output[1].shape = (-1, 1) + s0[2:]
+
+ if not sparse:
+ # Return the full N-D matrix (not only the 1-D vector)
+ output = np.broadcast_arrays(*output, subok=True)
+
+ if copy:
+ output = [x.copy() for x in output]
+
+ return output
+
+
+def _delete_dispatcher(arr, obj, axis=None):
+ return (arr, obj)
+
+
+@array_function_dispatch(_delete_dispatcher)
+def delete(arr, obj, axis=None):
+ """
+ Return a new array with sub-arrays along an axis deleted. For a one
+ dimensional array, this returns those entries not returned by
+ `arr[obj]`.
+
+ Parameters
+ ----------
+ arr : array_like
+ Input array.
+ obj : slice, int or array of ints
+ Indicate indices of sub-arrays to remove along the specified axis.
+
+ .. versionchanged:: 1.19.0
+ Boolean indices are now treated as a mask of elements to remove,
+ rather than being cast to the integers 0 and 1.
+
+ axis : int, optional
+ The axis along which to delete the subarray defined by `obj`.
+ If `axis` is None, `obj` is applied to the flattened array.
+
+ Returns
+ -------
+ out : ndarray
+ A copy of `arr` with the elements specified by `obj` removed. Note
+ that `delete` does not occur in-place. If `axis` is None, `out` is
+ a flattened array.
+
+ See Also
+ --------
+ insert : Insert elements into an array.
+ append : Append elements at the end of an array.
+
+ Notes
+ -----
+ Often it is preferable to use a boolean mask. For example:
+
+ >>> arr = np.arange(12) + 1
+ >>> mask = np.ones(len(arr), dtype=bool)
+ >>> mask[[0,2,4]] = False
+ >>> result = arr[mask,...]
+
+ Is equivalent to ``np.delete(arr, [0,2,4], axis=0)``, but allows further
+ use of `mask`.
+
+ Examples
+ --------
+ >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
+ >>> arr
+ array([[ 1, 2, 3, 4],
+ [ 5, 6, 7, 8],
+ [ 9, 10, 11, 12]])
+ >>> np.delete(arr, 1, 0)
+ array([[ 1, 2, 3, 4],
+ [ 9, 10, 11, 12]])
+
+ >>> np.delete(arr, np.s_[::2], 1)
+ array([[ 2, 4],
+ [ 6, 8],
+ [10, 12]])
+ >>> np.delete(arr, [1,3,5], None)
+ array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
+
+ """
+ wrap = None
+ if type(arr) is not ndarray:
+ try:
+ wrap = arr.__array_wrap__
+ except AttributeError:
+ pass
+
+ arr = asarray(arr)
+ ndim = arr.ndim
+ arrorder = 'F' if arr.flags.fnc else 'C'
+ if axis is None:
+ if ndim != 1:
+ arr = arr.ravel()
+ # needed for np.matrix, which is still not 1d after being ravelled
+ ndim = arr.ndim
+ axis = ndim - 1
+ else:
+ axis = normalize_axis_index(axis, ndim)
+
+ slobj = [slice(None)]*ndim
+ N = arr.shape[axis]
+ newshape = list(arr.shape)
+
+ if isinstance(obj, slice):
+ start, stop, step = obj.indices(N)
+ xr = range(start, stop, step)
+ numtodel = len(xr)
+
+ if numtodel <= 0:
+ if wrap:
+ return wrap(arr.copy(order=arrorder))
+ else:
+ return arr.copy(order=arrorder)
+
+ # Invert if step is negative:
+ if step < 0:
+ step = -step
+ start = xr[-1]
+ stop = xr[0] + 1
+
+ newshape[axis] -= numtodel
+ new = empty(newshape, arr.dtype, arrorder)
+ # copy initial chunk
+ if start == 0:
+ pass
+ else:
+ slobj[axis] = slice(None, start)
+ new[tuple(slobj)] = arr[tuple(slobj)]
+ # copy end chunk
+ if stop == N:
+ pass
+ else:
+ slobj[axis] = slice(stop-numtodel, None)
+ slobj2 = [slice(None)]*ndim
+ slobj2[axis] = slice(stop, None)
+ new[tuple(slobj)] = arr[tuple(slobj2)]
+ # copy middle pieces
+ if step == 1:
+ pass
+ else: # use array indexing.
+ keep = ones(stop-start, dtype=bool)
+ keep[:stop-start:step] = False
+ slobj[axis] = slice(start, stop-numtodel)
+ slobj2 = [slice(None)]*ndim
+ slobj2[axis] = slice(start, stop)
+ arr = arr[tuple(slobj2)]
+ slobj2[axis] = keep
+ new[tuple(slobj)] = arr[tuple(slobj2)]
+ if wrap:
+ return wrap(new)
+ else:
+ return new
+
+ if isinstance(obj, (int, integer)) and not isinstance(obj, bool):
+ single_value = True
+ else:
+ single_value = False
+ _obj = obj
+ obj = np.asarray(obj)
+ # `size == 0` to allow empty lists similar to indexing, but (as there)
+ # is really too generic:
+ if obj.size == 0 and not isinstance(_obj, np.ndarray):
+ obj = obj.astype(intp)
+ elif obj.size == 1 and obj.dtype.kind in "ui":
+ # For a size 1 integer array we can use the single-value path
+ # (most dtypes, except boolean, should just fail later).
+ obj = obj.item()
+ single_value = True
+
+ if single_value:
+ # optimization for a single value
+ if (obj < -N or obj >= N):
+ raise IndexError(
+ "index %i is out of bounds for axis %i with "
+ "size %i" % (obj, axis, N))
+ if (obj < 0):
+ obj += N
+ newshape[axis] -= 1
+ new = empty(newshape, arr.dtype, arrorder)
+ slobj[axis] = slice(None, obj)
+ new[tuple(slobj)] = arr[tuple(slobj)]
+ slobj[axis] = slice(obj, None)
+ slobj2 = [slice(None)]*ndim
+ slobj2[axis] = slice(obj+1, None)
+ new[tuple(slobj)] = arr[tuple(slobj2)]
+ else:
+ if obj.dtype == bool:
+ if obj.shape != (N,):
+ raise ValueError('boolean array argument obj to delete '
+ 'must be one dimensional and match the axis '
+ 'length of {}'.format(N))
+
+ # optimization, the other branch is slower
+ keep = ~obj
+ else:
+ keep = ones(N, dtype=bool)
+ keep[obj,] = False
+
+ slobj[axis] = keep
+ new = arr[tuple(slobj)]
+
+ if wrap:
+ return wrap(new)
+ else:
+ return new
+
+
+def _insert_dispatcher(arr, obj, values, axis=None):
+ return (arr, obj, values)
+
+
+@array_function_dispatch(_insert_dispatcher)
+def insert(arr, obj, values, axis=None):
+ """
+ Insert values along the given axis before the given indices.
+
+ Parameters
+ ----------
+ arr : array_like
+ Input array.
+ obj : int, slice or sequence of ints
+ Object that defines the index or indices before which `values` is
+ inserted.
+
+ .. versionadded:: 1.8.0
+
+ Support for multiple insertions when `obj` is a single scalar or a
+ sequence with one element (similar to calling insert multiple
+ times).
+ values : array_like
+ Values to insert into `arr`. If the type of `values` is different
+ from that of `arr`, `values` is converted to the type of `arr`.
+ `values` should be shaped so that ``arr[...,obj,...] = values``
+ is legal.
+ axis : int, optional
+ Axis along which to insert `values`. If `axis` is None then `arr`
+ is flattened first.
+
+ Returns
+ -------
+ out : ndarray
+ A copy of `arr` with `values` inserted. Note that `insert`
+ does not occur in-place: a new array is returned. If
+ `axis` is None, `out` is a flattened array.
+
+ See Also
+ --------
+ append : Append elements at the end of an array.
+ concatenate : Join a sequence of arrays along an existing axis.
+ delete : Delete elements from an array.
+
+ Notes
+ -----
+ Note that for higher dimensional inserts ``obj=0`` behaves very different
+ from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from
+ ``arr[:,[0],:] = values``.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 1], [2, 2], [3, 3]])
+ >>> a
+ array([[1, 1],
+ [2, 2],
+ [3, 3]])
+ >>> np.insert(a, 1, 5)
+ array([1, 5, 1, ..., 2, 3, 3])
+ >>> np.insert(a, 1, 5, axis=1)
+ array([[1, 5, 1],
+ [2, 5, 2],
+ [3, 5, 3]])
+
+ Difference between sequence and scalars:
+
+ >>> np.insert(a, [1], [[1],[2],[3]], axis=1)
+ array([[1, 1, 1],
+ [2, 2, 2],
+ [3, 3, 3]])
+ >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
+ ... np.insert(a, [1], [[1],[2],[3]], axis=1))
+ True
+
+ >>> b = a.flatten()
+ >>> b
+ array([1, 1, 2, 2, 3, 3])
+ >>> np.insert(b, [2, 2], [5, 6])
+ array([1, 1, 5, ..., 2, 3, 3])
+
+ >>> np.insert(b, slice(2, 4), [5, 6])
+ array([1, 1, 5, ..., 2, 3, 3])
+
+ >>> np.insert(b, [2, 2], [7.13, False]) # type casting
+ array([1, 1, 7, ..., 2, 3, 3])
+
+ >>> x = np.arange(8).reshape(2, 4)
+ >>> idx = (1, 3)
+ >>> np.insert(x, idx, 999, axis=1)
+ array([[ 0, 999, 1, 2, 999, 3],
+ [ 4, 999, 5, 6, 999, 7]])
+
+ """
+ wrap = None
+ if type(arr) is not ndarray:
+ try:
+ wrap = arr.__array_wrap__
+ except AttributeError:
+ pass
+
+ arr = asarray(arr)
+ ndim = arr.ndim
+ arrorder = 'F' if arr.flags.fnc else 'C'
+ if axis is None:
+ if ndim != 1:
+ arr = arr.ravel()
+ # needed for np.matrix, which is still not 1d after being ravelled
+ ndim = arr.ndim
+ axis = ndim - 1
+ else:
+ axis = normalize_axis_index(axis, ndim)
+ slobj = [slice(None)]*ndim
+ N = arr.shape[axis]
+ newshape = list(arr.shape)
+
+ if isinstance(obj, slice):
+ # turn it into a range object
+ indices = arange(*obj.indices(N), dtype=intp)
+ else:
+ # need to copy obj, because indices will be changed in-place
+ indices = np.array(obj)
+ if indices.dtype == bool:
+ # See also delete
+ # 2012-10-11, NumPy 1.8
+ warnings.warn(
+ "in the future insert will treat boolean arrays and "
+ "array-likes as a boolean index instead of casting it to "
+ "integer", FutureWarning, stacklevel=3)
+ indices = indices.astype(intp)
+ # Code after warning period:
+ #if obj.ndim != 1:
+ # raise ValueError('boolean array argument obj to insert '
+ # 'must be one dimensional')
+ #indices = np.flatnonzero(obj)
+ elif indices.ndim > 1:
+ raise ValueError(
+ "index array argument obj to insert must be one dimensional "
+ "or scalar")
+ if indices.size == 1:
+ index = indices.item()
+ if index < -N or index > N:
+ raise IndexError(f"index {obj} is out of bounds for axis {axis} "
+ f"with size {N}")
+ if (index < 0):
+ index += N
+
+ # There are some object array corner cases here, but we cannot avoid
+ # that:
+ values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
+ if indices.ndim == 0:
+ # broadcasting is very different here, since a[:,0,:] = ... behaves
+ # very different from a[:,[0],:] = ...! This changes values so that
+ # it works likes the second case. (here a[:,0:1,:])
+ values = np.moveaxis(values, 0, axis)
+ numnew = values.shape[axis]
+ newshape[axis] += numnew
+ new = empty(newshape, arr.dtype, arrorder)
+ slobj[axis] = slice(None, index)
+ new[tuple(slobj)] = arr[tuple(slobj)]
+ slobj[axis] = slice(index, index+numnew)
+ new[tuple(slobj)] = values
+ slobj[axis] = slice(index+numnew, None)
+ slobj2 = [slice(None)] * ndim
+ slobj2[axis] = slice(index, None)
+ new[tuple(slobj)] = arr[tuple(slobj2)]
+ if wrap:
+ return wrap(new)
+ return new
+ elif indices.size == 0 and not isinstance(obj, np.ndarray):
+ # Can safely cast the empty list to intp
+ indices = indices.astype(intp)
+
+ indices[indices < 0] += N
+
+ numnew = len(indices)
+ order = indices.argsort(kind='mergesort') # stable sort
+ indices[order] += np.arange(numnew)
+
+ newshape[axis] += numnew
+ old_mask = ones(newshape[axis], dtype=bool)
+ old_mask[indices] = False
+
+ new = empty(newshape, arr.dtype, arrorder)
+ slobj2 = [slice(None)]*ndim
+ slobj[axis] = indices
+ slobj2[axis] = old_mask
+ new[tuple(slobj)] = values
+ new[tuple(slobj2)] = arr
+
+ if wrap:
+ return wrap(new)
+ return new
+
+
+def _append_dispatcher(arr, values, axis=None):
+ return (arr, values)
+
+
+@array_function_dispatch(_append_dispatcher)
+def append(arr, values, axis=None):
+ """
+ Append values to the end of an array.
+
+ Parameters
+ ----------
+ arr : array_like
+ Values are appended to a copy of this array.
+ values : array_like
+ These values are appended to a copy of `arr`. It must be of the
+ correct shape (the same shape as `arr`, excluding `axis`). If
+ `axis` is not specified, `values` can be any shape and will be
+ flattened before use.
+ axis : int, optional
+ The axis along which `values` are appended. If `axis` is not
+ given, both `arr` and `values` are flattened before use.
+
+ Returns
+ -------
+ append : ndarray
+ A copy of `arr` with `values` appended to `axis`. Note that
+ `append` does not occur in-place: a new array is allocated and
+ filled. If `axis` is None, `out` is a flattened array.
+
+ See Also
+ --------
+ insert : Insert elements into an array.
+ delete : Delete elements from an array.
+
+ Examples
+ --------
+ >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
+ array([1, 2, 3, ..., 7, 8, 9])
+
+ When `axis` is specified, `values` must have the correct shape.
+
+ >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
+ array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+ >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
+ Traceback (most recent call last):
+ ...
+ ValueError: all the input arrays must have same number of dimensions, but
+ the array at index 0 has 2 dimension(s) and the array at index 1 has 1
+ dimension(s)
+
+ """
+ arr = asanyarray(arr)
+ if axis is None:
+ if arr.ndim != 1:
+ arr = arr.ravel()
+ values = ravel(values)
+ axis = arr.ndim-1
+ return concatenate((arr, values), axis=axis)
+
+
+def _digitize_dispatcher(x, bins, right=None):
+ return (x, bins)
+
+
+@array_function_dispatch(_digitize_dispatcher)
+def digitize(x, bins, right=False):
+ """
+ Return the indices of the bins to which each value in input array belongs.
+
+ ========= ============= ============================
+ `right` order of bins returned index `i` satisfies
+ ========= ============= ============================
+ ``False`` increasing ``bins[i-1] <= x < bins[i]``
+ ``True`` increasing ``bins[i-1] < x <= bins[i]``
+ ``False`` decreasing ``bins[i-1] > x >= bins[i]``
+ ``True`` decreasing ``bins[i-1] >= x > bins[i]``
+ ========= ============= ============================
+
+ If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is
+ returned as appropriate.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array to be binned. Prior to NumPy 1.10.0, this array had to
+ be 1-dimensional, but can now have any shape.
+ bins : array_like
+ Array of bins. It has to be 1-dimensional and monotonic.
+ right : bool, optional
+ Indicating whether the intervals include the right or the left bin
+ edge. Default behavior is (right==False) indicating that the interval
+ does not include the right edge. The left bin end is open in this
+ case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
+ monotonically increasing bins.
+
+ Returns
+ -------
+ indices : ndarray of ints
+ Output array of indices, of same shape as `x`.
+
+ Raises
+ ------
+ ValueError
+ If `bins` is not monotonic.
+ TypeError
+ If the type of the input is complex.
+
+ See Also
+ --------
+ bincount, histogram, unique, searchsorted
+
+ Notes
+ -----
+ If values in `x` are such that they fall outside the bin range,
+ attempting to index `bins` with the indices that `digitize` returns
+ will result in an IndexError.
+
+ .. versionadded:: 1.10.0
+
+ `np.digitize` is implemented in terms of `np.searchsorted`. This means
+ that a binary search is used to bin the values, which scales much better
+ for larger number of bins than the previous linear search. It also removes
+ the requirement for the input array to be 1-dimensional.
+
+ For monotonically _increasing_ `bins`, the following are equivalent::
+
+ np.digitize(x, bins, right=True)
+ np.searchsorted(bins, x, side='left')
+
+ Note that as the order of the arguments are reversed, the side must be too.
+ The `searchsorted` call is marginally faster, as it does not do any
+ monotonicity checks. Perhaps more importantly, it supports all dtypes.
+
+ Examples
+ --------
+ >>> x = np.array([0.2, 6.4, 3.0, 1.6])
+ >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
+ >>> inds = np.digitize(x, bins)
+ >>> inds
+ array([1, 4, 3, 2])
+ >>> for n in range(x.size):
+ ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]])
+ ...
+ 0.0 <= 0.2 < 1.0
+ 4.0 <= 6.4 < 10.0
+ 2.5 <= 3.0 < 4.0
+ 1.0 <= 1.6 < 2.5
+
+ >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
+ >>> bins = np.array([0, 5, 10, 15, 20])
+ >>> np.digitize(x,bins,right=True)
+ array([1, 2, 3, 4, 4])
+ >>> np.digitize(x,bins,right=False)
+ array([1, 3, 3, 4, 5])
+ """
+ x = _nx.asarray(x)
+ bins = _nx.asarray(bins)
+
+ # here for compatibility, searchsorted below is happy to take this
+ if np.issubdtype(x.dtype, _nx.complexfloating):
+ raise TypeError("x may not be complex")
+
+ mono = _monotonicity(bins)
+ if mono == 0:
+ raise ValueError("bins must be monotonically increasing or decreasing")
+
+ # this is backwards because the arguments below are swapped
+ side = 'left' if right else 'right'
+ if mono == -1:
+ # reverse the bins, and invert the results
+ return len(bins) - _nx.searchsorted(bins[::-1], x, side=side)
+ else:
+ return _nx.searchsorted(bins, x, side=side)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/function_base.pyi b/venv/lib/python3.9/site-packages/numpy/lib/function_base.pyi
new file mode 100644
index 00000000..687e4ab1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/function_base.pyi
@@ -0,0 +1,697 @@
+import sys
+from collections.abc import Sequence, Iterator, Callable, Iterable
+from typing import (
+ Literal as L,
+ Any,
+ TypeVar,
+ overload,
+ Protocol,
+ SupportsIndex,
+ SupportsInt,
+)
+
+if sys.version_info >= (3, 10):
+ from typing import TypeGuard
+else:
+ from typing_extensions import TypeGuard
+
+from numpy import (
+ vectorize as vectorize,
+ ufunc,
+ generic,
+ floating,
+ complexfloating,
+ intp,
+ float64,
+ complex128,
+ timedelta64,
+ datetime64,
+ object_,
+ _OrderKACF,
+)
+
+from numpy._typing import (
+ NDArray,
+ ArrayLike,
+ DTypeLike,
+ _ShapeLike,
+ _ScalarLike_co,
+ _DTypeLike,
+ _ArrayLike,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeTD64_co,
+ _ArrayLikeDT64_co,
+ _ArrayLikeObject_co,
+ _FloatLike_co,
+ _ComplexLike_co,
+)
+
+from numpy.core.function_base import (
+ add_newdoc as add_newdoc,
+)
+
+from numpy.core.multiarray import (
+ add_docstring as add_docstring,
+ bincount as bincount,
+)
+
+from numpy.core.umath import _add_newdoc_ufunc
+
+_T = TypeVar("_T")
+_T_co = TypeVar("_T_co", covariant=True)
+_SCT = TypeVar("_SCT", bound=generic)
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+
+_2Tuple = tuple[_T, _T]
+
+class _TrimZerosSequence(Protocol[_T_co]):
+ def __len__(self) -> int: ...
+ def __getitem__(self, key: slice, /) -> _T_co: ...
+ def __iter__(self) -> Iterator[Any]: ...
+
+class _SupportsWriteFlush(Protocol):
+ def write(self, s: str, /) -> object: ...
+ def flush(self) -> object: ...
+
+__all__: list[str]
+
+# NOTE: This is in reality a re-export of `np.core.umath._add_newdoc_ufunc`
+def add_newdoc_ufunc(ufunc: ufunc, new_docstring: str, /) -> None: ...
+
+@overload
+def rot90(
+ m: _ArrayLike[_SCT],
+ k: int = ...,
+ axes: tuple[int, int] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def rot90(
+ m: ArrayLike,
+ k: int = ...,
+ axes: tuple[int, int] = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def flip(m: _SCT, axis: None = ...) -> _SCT: ...
+@overload
+def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ...
+@overload
+def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
+@overload
+def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ...
+
+def iterable(y: object) -> TypeGuard[Iterable[Any]]: ...
+
+@overload
+def average(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ weights: None | _ArrayLikeFloat_co= ...,
+ returned: L[False] = ...,
+ keepdims: L[False] = ...,
+) -> floating[Any]: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ weights: None | _ArrayLikeComplex_co = ...,
+ returned: L[False] = ...,
+ keepdims: L[False] = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def average(
+ a: _ArrayLikeObject_co,
+ axis: None = ...,
+ weights: None | Any = ...,
+ returned: L[False] = ...,
+ keepdims: L[False] = ...,
+) -> Any: ...
+@overload
+def average(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ weights: None | _ArrayLikeFloat_co= ...,
+ returned: L[True] = ...,
+ keepdims: L[False] = ...,
+) -> _2Tuple[floating[Any]]: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ weights: None | _ArrayLikeComplex_co = ...,
+ returned: L[True] = ...,
+ keepdims: L[False] = ...,
+) -> _2Tuple[complexfloating[Any, Any]]: ...
+@overload
+def average(
+ a: _ArrayLikeObject_co,
+ axis: None = ...,
+ weights: None | Any = ...,
+ returned: L[True] = ...,
+ keepdims: L[False] = ...,
+) -> _2Tuple[Any]: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ weights: None | Any = ...,
+ returned: L[False] = ...,
+ keepdims: bool = ...,
+) -> Any: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ weights: None | Any = ...,
+ returned: L[True] = ...,
+ keepdims: bool = ...,
+) -> _2Tuple[Any]: ...
+
+@overload
+def asarray_chkfinite(
+ a: _ArrayLike[_SCT],
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asarray_chkfinite(
+ a: object,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+) -> NDArray[Any]: ...
+@overload
+def asarray_chkfinite(
+ a: Any,
+ dtype: _DTypeLike[_SCT],
+ order: _OrderKACF = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asarray_chkfinite(
+ a: Any,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+) -> NDArray[Any]: ...
+
+# TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate`
+# xref python/mypy#8645
+@overload
+def piecewise(
+ x: _ArrayLike[_SCT],
+ condlist: ArrayLike,
+ funclist: Sequence[Any | Callable[..., Any]],
+ *args: Any,
+ **kw: Any,
+) -> NDArray[_SCT]: ...
+@overload
+def piecewise(
+ x: ArrayLike,
+ condlist: ArrayLike,
+ funclist: Sequence[Any | Callable[..., Any]],
+ *args: Any,
+ **kw: Any,
+) -> NDArray[Any]: ...
+
+def select(
+ condlist: Sequence[ArrayLike],
+ choicelist: Sequence[ArrayLike],
+ default: ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def copy(
+ a: _ArrayType,
+ order: _OrderKACF,
+ subok: L[True],
+) -> _ArrayType: ...
+@overload
+def copy(
+ a: _ArrayType,
+ order: _OrderKACF = ...,
+ *,
+ subok: L[True],
+) -> _ArrayType: ...
+@overload
+def copy(
+ a: _ArrayLike[_SCT],
+ order: _OrderKACF = ...,
+ subok: L[False] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def copy(
+ a: ArrayLike,
+ order: _OrderKACF = ...,
+ subok: L[False] = ...,
+) -> NDArray[Any]: ...
+
+def gradient(
+ f: ArrayLike,
+ *varargs: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ edge_order: L[1, 2] = ...,
+) -> Any: ...
+
+@overload
+def diff(
+ a: _T,
+ n: L[0],
+ axis: SupportsIndex = ...,
+ prepend: ArrayLike = ...,
+ append: ArrayLike = ...,
+) -> _T: ...
+@overload
+def diff(
+ a: ArrayLike,
+ n: int = ...,
+ axis: SupportsIndex = ...,
+ prepend: ArrayLike = ...,
+ append: ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def interp(
+ x: _ArrayLikeFloat_co,
+ xp: _ArrayLikeFloat_co,
+ fp: _ArrayLikeFloat_co,
+ left: None | _FloatLike_co = ...,
+ right: None | _FloatLike_co = ...,
+ period: None | _FloatLike_co = ...,
+) -> NDArray[float64]: ...
+@overload
+def interp(
+ x: _ArrayLikeFloat_co,
+ xp: _ArrayLikeFloat_co,
+ fp: _ArrayLikeComplex_co,
+ left: None | _ComplexLike_co = ...,
+ right: None | _ComplexLike_co = ...,
+ period: None | _FloatLike_co = ...,
+) -> NDArray[complex128]: ...
+
+@overload
+def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]: ...
+@overload
+def angle(z: object_, deg: bool = ...) -> Any: ...
+@overload
+def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating[Any]]: ...
+@overload
+def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ...
+
+@overload
+def unwrap(
+ p: _ArrayLikeFloat_co,
+ discont: None | float = ...,
+ axis: int = ...,
+ *,
+ period: float = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def unwrap(
+ p: _ArrayLikeObject_co,
+ discont: None | float = ...,
+ axis: int = ...,
+ *,
+ period: float = ...,
+) -> NDArray[object_]: ...
+
+def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]: ...
+
+def trim_zeros(
+ filt: _TrimZerosSequence[_T],
+ trim: L["f", "b", "fb", "bf"] = ...,
+) -> _T: ...
+
+@overload
+def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ...
+
+def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ...
+
+def disp(
+ mesg: object,
+ device: None | _SupportsWriteFlush = ...,
+ linefeed: bool = ...,
+) -> None: ...
+
+@overload
+def cov(
+ m: _ArrayLikeFloat_co,
+ y: None | _ArrayLikeFloat_co = ...,
+ rowvar: bool = ...,
+ bias: bool = ...,
+ ddof: None | SupportsIndex | SupportsInt = ...,
+ fweights: None | ArrayLike = ...,
+ aweights: None | ArrayLike = ...,
+ *,
+ dtype: None = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def cov(
+ m: _ArrayLikeComplex_co,
+ y: None | _ArrayLikeComplex_co = ...,
+ rowvar: bool = ...,
+ bias: bool = ...,
+ ddof: None | SupportsIndex | SupportsInt = ...,
+ fweights: None | ArrayLike = ...,
+ aweights: None | ArrayLike = ...,
+ *,
+ dtype: None = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def cov(
+ m: _ArrayLikeComplex_co,
+ y: None | _ArrayLikeComplex_co = ...,
+ rowvar: bool = ...,
+ bias: bool = ...,
+ ddof: None | SupportsIndex | SupportsInt = ...,
+ fweights: None | ArrayLike = ...,
+ aweights: None | ArrayLike = ...,
+ *,
+ dtype: _DTypeLike[_SCT],
+) -> NDArray[_SCT]: ...
+@overload
+def cov(
+ m: _ArrayLikeComplex_co,
+ y: None | _ArrayLikeComplex_co = ...,
+ rowvar: bool = ...,
+ bias: bool = ...,
+ ddof: None | SupportsIndex | SupportsInt = ...,
+ fweights: None | ArrayLike = ...,
+ aweights: None | ArrayLike = ...,
+ *,
+ dtype: DTypeLike,
+) -> NDArray[Any]: ...
+
+# NOTE `bias` and `ddof` have been deprecated
+@overload
+def corrcoef(
+ m: _ArrayLikeFloat_co,
+ y: None | _ArrayLikeFloat_co = ...,
+ rowvar: bool = ...,
+ *,
+ dtype: None = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def corrcoef(
+ m: _ArrayLikeComplex_co,
+ y: None | _ArrayLikeComplex_co = ...,
+ rowvar: bool = ...,
+ *,
+ dtype: None = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def corrcoef(
+ m: _ArrayLikeComplex_co,
+ y: None | _ArrayLikeComplex_co = ...,
+ rowvar: bool = ...,
+ *,
+ dtype: _DTypeLike[_SCT],
+) -> NDArray[_SCT]: ...
+@overload
+def corrcoef(
+ m: _ArrayLikeComplex_co,
+ y: None | _ArrayLikeComplex_co = ...,
+ rowvar: bool = ...,
+ *,
+ dtype: DTypeLike,
+) -> NDArray[Any]: ...
+
+def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
+
+def bartlett(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
+
+def hanning(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
+
+def hamming(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
+
+def i0(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
+
+def kaiser(
+ M: _FloatLike_co,
+ beta: _FloatLike_co,
+) -> NDArray[floating[Any]]: ...
+
+@overload
+def sinc(x: _FloatLike_co) -> floating[Any]: ...
+@overload
+def sinc(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
+@overload
+def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+# NOTE: Deprecated
+# def msort(a: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def median(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: L[False] = ...,
+) -> floating[Any]: ...
+@overload
+def median(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: L[False] = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def median(
+ a: _ArrayLikeTD64_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: L[False] = ...,
+) -> timedelta64: ...
+@overload
+def median(
+ a: _ArrayLikeObject_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: L[False] = ...,
+) -> Any: ...
+@overload
+def median(
+ a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ keepdims: bool = ...,
+) -> Any: ...
+@overload
+def median(
+ a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ out: _ArrayType = ...,
+ overwrite_input: bool = ...,
+ keepdims: bool = ...,
+) -> _ArrayType: ...
+
+_MethodKind = L[
+ "inverted_cdf",
+ "averaged_inverted_cdf",
+ "closest_observation",
+ "interpolated_inverted_cdf",
+ "hazen",
+ "weibull",
+ "linear",
+ "median_unbiased",
+ "normal_unbiased",
+ "lower",
+ "higher",
+ "midpoint",
+ "nearest",
+]
+
+@overload
+def percentile(
+ a: _ArrayLikeFloat_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+) -> floating[Any]: ...
+@overload
+def percentile(
+ a: _ArrayLikeComplex_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def percentile(
+ a: _ArrayLikeTD64_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+) -> timedelta64: ...
+@overload
+def percentile(
+ a: _ArrayLikeDT64_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+) -> datetime64: ...
+@overload
+def percentile(
+ a: _ArrayLikeObject_co,
+ q: _FloatLike_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+) -> Any: ...
+@overload
+def percentile(
+ a: _ArrayLikeFloat_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def percentile(
+ a: _ArrayLikeComplex_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def percentile(
+ a: _ArrayLikeTD64_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def percentile(
+ a: _ArrayLikeDT64_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+) -> NDArray[datetime64]: ...
+@overload
+def percentile(
+ a: _ArrayLikeObject_co,
+ q: _ArrayLikeFloat_co,
+ axis: None = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: L[False] = ...,
+) -> NDArray[object_]: ...
+@overload
+def percentile(
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ q: _ArrayLikeFloat_co,
+ axis: None | _ShapeLike = ...,
+ out: None = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: bool = ...,
+) -> Any: ...
+@overload
+def percentile(
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ q: _ArrayLikeFloat_co,
+ axis: None | _ShapeLike = ...,
+ out: _ArrayType = ...,
+ overwrite_input: bool = ...,
+ method: _MethodKind = ...,
+ keepdims: bool = ...,
+) -> _ArrayType: ...
+
+# NOTE: Not an alias, but they do have identical signatures
+# (that we can reuse)
+quantile = percentile
+
+# TODO: Returns a scalar for <= 1D array-likes; returns an ndarray otherwise
+def trapz(
+ y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+ x: None | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co = ...,
+ dx: float = ...,
+ axis: SupportsIndex = ...,
+) -> Any: ...
+
+def meshgrid(
+ *xi: ArrayLike,
+ copy: bool = ...,
+ sparse: bool = ...,
+ indexing: L["xy", "ij"] = ...,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def delete(
+ arr: _ArrayLike[_SCT],
+ obj: slice | _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def delete(
+ arr: ArrayLike,
+ obj: slice | _ArrayLikeInt_co,
+ axis: None | SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def insert(
+ arr: _ArrayLike[_SCT],
+ obj: slice | _ArrayLikeInt_co,
+ values: ArrayLike,
+ axis: None | SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def insert(
+ arr: ArrayLike,
+ obj: slice | _ArrayLikeInt_co,
+ values: ArrayLike,
+ axis: None | SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+def append(
+ arr: ArrayLike,
+ values: ArrayLike,
+ axis: None | SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def digitize(
+ x: _FloatLike_co,
+ bins: _ArrayLikeFloat_co,
+ right: bool = ...,
+) -> intp: ...
+@overload
+def digitize(
+ x: _ArrayLikeFloat_co,
+ bins: _ArrayLikeFloat_co,
+ right: bool = ...,
+) -> NDArray[intp]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/histograms.py b/venv/lib/python3.9/site-packages/numpy/lib/histograms.py
new file mode 100644
index 00000000..0dfa7b4c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/histograms.py
@@ -0,0 +1,1070 @@
+"""
+Histogram-related functions
+"""
+import contextlib
+import functools
+import operator
+import warnings
+
+import numpy as np
+from numpy.core import overrides
+
+__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+# range is a keyword argument to many functions, so save the builtin so they can
+# use it.
+_range = range
+
+
+def _ptp(x):
+ """Peak-to-peak value of x.
+
+ This implementation avoids the problem of signed integer arrays having a
+ peak-to-peak value that cannot be represented with the array's data type.
+ This function returns an unsigned value for signed integer arrays.
+ """
+ return _unsigned_subtract(x.max(), x.min())
+
+
+def _hist_bin_sqrt(x, range):
+ """
+ Square root histogram bin estimator.
+
+ Bin width is inversely proportional to the data size. Used by many
+ programs for its simplicity.
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+ del range # unused
+ return _ptp(x) / np.sqrt(x.size)
+
+
+def _hist_bin_sturges(x, range):
+ """
+ Sturges histogram bin estimator.
+
+ A very simplistic estimator based on the assumption of normality of
+ the data. This estimator has poor performance for non-normal data,
+ which becomes especially obvious for large data sets. The estimate
+ depends only on size of the data.
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+ del range # unused
+ return _ptp(x) / (np.log2(x.size) + 1.0)
+
+
+def _hist_bin_rice(x, range):
+ """
+ Rice histogram bin estimator.
+
+ Another simple estimator with no normality assumption. It has better
+ performance for large data than Sturges, but tends to overestimate
+ the number of bins. The number of bins is proportional to the cube
+ root of data size (asymptotically optimal). The estimate depends
+ only on size of the data.
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+ del range # unused
+ return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
+
+
+def _hist_bin_scott(x, range):
+ """
+ Scott histogram bin estimator.
+
+ The binwidth is proportional to the standard deviation of the data
+ and inversely proportional to the cube root of data size
+ (asymptotically optimal).
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+ del range # unused
+ return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
+
+
+def _hist_bin_stone(x, range):
+ """
+ Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
+
+ The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
+ The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
+ https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
+
+ This paper by Stone appears to be the origination of this rule.
+ http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+ range : (float, float)
+ The lower and upper range of the bins.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+
+ n = x.size
+ ptp_x = _ptp(x)
+ if n <= 1 or ptp_x == 0:
+ return 0
+
+ def jhat(nbins):
+ hh = ptp_x / nbins
+ p_k = np.histogram(x, bins=nbins, range=range)[0] / n
+ return (2 - (n + 1) * p_k.dot(p_k)) / hh
+
+ nbins_upper_bound = max(100, int(np.sqrt(n)))
+ nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
+ if nbins == nbins_upper_bound:
+ warnings.warn("The number of bins estimated may be suboptimal.",
+ RuntimeWarning, stacklevel=3)
+ return ptp_x / nbins
+
+
+def _hist_bin_doane(x, range):
+ """
+ Doane's histogram bin estimator.
+
+ Improved version of Sturges' formula which works better for
+ non-normal data. See
+ stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+ del range # unused
+ if x.size > 2:
+ sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
+ sigma = np.std(x)
+ if sigma > 0.0:
+ # These three operations add up to
+ # g1 = np.mean(((x - np.mean(x)) / sigma)**3)
+ # but use only one temp array instead of three
+ temp = x - np.mean(x)
+ np.true_divide(temp, sigma, temp)
+ np.power(temp, 3, temp)
+ g1 = np.mean(temp)
+ return _ptp(x) / (1.0 + np.log2(x.size) +
+ np.log2(1.0 + np.absolute(g1) / sg1))
+ return 0.0
+
+
+def _hist_bin_fd(x, range):
+ """
+ The Freedman-Diaconis histogram bin estimator.
+
+ The Freedman-Diaconis rule uses interquartile range (IQR) to
+ estimate binwidth. It is considered a variation of the Scott rule
+ with more robustness as the IQR is less affected by outliers than
+ the standard deviation. However, the IQR depends on fewer points
+ than the standard deviation, so it is less accurate, especially for
+ long tailed distributions.
+
+ If the IQR is 0, this function returns 0 for the bin width.
+ Binwidth is inversely proportional to the cube root of data size
+ (asymptotically optimal).
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+ del range # unused
+ iqr = np.subtract(*np.percentile(x, [75, 25]))
+ return 2.0 * iqr * x.size ** (-1.0 / 3.0)
+
+
+def _hist_bin_auto(x, range):
+ """
+ Histogram bin estimator that uses the minimum width of the
+ Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero.
+ If the bin width from the FD estimator is 0, the Sturges estimator is used.
+
+ The FD estimator is usually the most robust method, but its width
+ estimate tends to be too large for small `x` and bad for data with limited
+ variance. The Sturges estimator is quite good for small (<1000) datasets
+ and is the default in the R language. This method gives good off-the-shelf
+ behaviour.
+
+ .. versionchanged:: 1.15.0
+ If there is limited variance the IQR can be 0, which results in the
+ FD bin width being 0 too. This is not a valid bin width, so
+ ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
+ If the IQR is 0, it's unlikely any variance-based estimators will be of
+ use, so we revert to the Sturges estimator, which only uses the size of the
+ dataset in its calculation.
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+
+ See Also
+ --------
+ _hist_bin_fd, _hist_bin_sturges
+ """
+ fd_bw = _hist_bin_fd(x, range)
+ sturges_bw = _hist_bin_sturges(x, range)
+ del range # unused
+ if fd_bw:
+ return min(fd_bw, sturges_bw)
+ else:
+ # limited variance, so we return a len dependent bw estimator
+ return sturges_bw
+
+# Private dict initialized at module load time
+_hist_bin_selectors = {'stone': _hist_bin_stone,
+ 'auto': _hist_bin_auto,
+ 'doane': _hist_bin_doane,
+ 'fd': _hist_bin_fd,
+ 'rice': _hist_bin_rice,
+ 'scott': _hist_bin_scott,
+ 'sqrt': _hist_bin_sqrt,
+ 'sturges': _hist_bin_sturges}
+
+
+def _ravel_and_check_weights(a, weights):
+ """ Check a and weights have matching shapes, and ravel both """
+ a = np.asarray(a)
+
+ # Ensure that the array is a "subtractable" dtype
+ if a.dtype == np.bool_:
+ warnings.warn("Converting input from {} to {} for compatibility."
+ .format(a.dtype, np.uint8),
+ RuntimeWarning, stacklevel=3)
+ a = a.astype(np.uint8)
+
+ if weights is not None:
+ weights = np.asarray(weights)
+ if weights.shape != a.shape:
+ raise ValueError(
+ 'weights should have the same shape as a.')
+ weights = weights.ravel()
+ a = a.ravel()
+ return a, weights
+
+
+def _get_outer_edges(a, range):
+ """
+ Determine the outer bin edges to use, from either the data or the range
+ argument
+ """
+ if range is not None:
+ first_edge, last_edge = range
+ if first_edge > last_edge:
+ raise ValueError(
+ 'max must be larger than min in range parameter.')
+ if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
+ raise ValueError(
+ "supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
+ elif a.size == 0:
+ # handle empty arrays. Can't determine range, so use 0-1.
+ first_edge, last_edge = 0, 1
+ else:
+ first_edge, last_edge = a.min(), a.max()
+ if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
+ raise ValueError(
+ "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
+
+ # expand empty range to avoid divide by zero
+ if first_edge == last_edge:
+ first_edge = first_edge - 0.5
+ last_edge = last_edge + 0.5
+
+ return first_edge, last_edge
+
+
+def _unsigned_subtract(a, b):
+ """
+ Subtract two values where a >= b, and produce an unsigned result
+
+ This is needed when finding the difference between the upper and lower
+ bound of an int16 histogram
+ """
+ # coerce to a single type
+ signed_to_unsigned = {
+ np.byte: np.ubyte,
+ np.short: np.ushort,
+ np.intc: np.uintc,
+ np.int_: np.uint,
+ np.longlong: np.ulonglong
+ }
+ dt = np.result_type(a, b)
+ try:
+ dt = signed_to_unsigned[dt.type]
+ except KeyError:
+ return np.subtract(a, b, dtype=dt)
+ else:
+ # we know the inputs are integers, and we are deliberately casting
+ # signed to unsigned
+ return np.subtract(a, b, casting='unsafe', dtype=dt)
+
+
+def _get_bin_edges(a, bins, range, weights):
+ """
+ Computes the bins used internally by `histogram`.
+
+ Parameters
+ ==========
+ a : ndarray
+ Ravelled data array
+ bins, range
+ Forwarded arguments from `histogram`.
+ weights : ndarray, optional
+ Ravelled weights array, or None
+
+ Returns
+ =======
+ bin_edges : ndarray
+ Array of bin edges
+ uniform_bins : (Number, Number, int):
+ The upper bound, lowerbound, and number of bins, used in the optimized
+ implementation of `histogram` that works on uniform bins.
+ """
+ # parse the overloaded bins argument
+ n_equal_bins = None
+ bin_edges = None
+
+ if isinstance(bins, str):
+ bin_name = bins
+ # if `bins` is a string for an automatic method,
+ # this will replace it with the number of bins calculated
+ if bin_name not in _hist_bin_selectors:
+ raise ValueError(
+ "{!r} is not a valid estimator for `bins`".format(bin_name))
+ if weights is not None:
+ raise TypeError("Automated estimation of the number of "
+ "bins is not supported for weighted data")
+
+ first_edge, last_edge = _get_outer_edges(a, range)
+
+ # truncate the range if needed
+ if range is not None:
+ keep = (a >= first_edge)
+ keep &= (a <= last_edge)
+ if not np.logical_and.reduce(keep):
+ a = a[keep]
+
+ if a.size == 0:
+ n_equal_bins = 1
+ else:
+ # Do not call selectors on empty arrays
+ width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
+ if width:
+ n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
+ else:
+ # Width can be zero for some estimators, e.g. FD when
+ # the IQR of the data is zero.
+ n_equal_bins = 1
+
+ elif np.ndim(bins) == 0:
+ try:
+ n_equal_bins = operator.index(bins)
+ except TypeError as e:
+ raise TypeError(
+ '`bins` must be an integer, a string, or an array') from e
+ if n_equal_bins < 1:
+ raise ValueError('`bins` must be positive, when an integer')
+
+ first_edge, last_edge = _get_outer_edges(a, range)
+
+ elif np.ndim(bins) == 1:
+ bin_edges = np.asarray(bins)
+ if np.any(bin_edges[:-1] > bin_edges[1:]):
+ raise ValueError(
+ '`bins` must increase monotonically, when an array')
+
+ else:
+ raise ValueError('`bins` must be 1d, when an array')
+
+ if n_equal_bins is not None:
+ # gh-10322 means that type resolution rules are dependent on array
+ # shapes. To avoid this causing problems, we pick a type now and stick
+ # with it throughout.
+ bin_type = np.result_type(first_edge, last_edge, a)
+ if np.issubdtype(bin_type, np.integer):
+ bin_type = np.result_type(bin_type, float)
+
+ # bin edges must be computed
+ bin_edges = np.linspace(
+ first_edge, last_edge, n_equal_bins + 1,
+ endpoint=True, dtype=bin_type)
+ return bin_edges, (first_edge, last_edge, n_equal_bins)
+ else:
+ return bin_edges, None
+
+
+def _search_sorted_inclusive(a, v):
+ """
+ Like `searchsorted`, but where the last item in `v` is placed on the right.
+
+ In the context of a histogram, this makes the last bin edge inclusive
+ """
+ return np.concatenate((
+ a.searchsorted(v[:-1], 'left'),
+ a.searchsorted(v[-1:], 'right')
+ ))
+
+
+def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
+ return (a, bins, weights)
+
+
+@array_function_dispatch(_histogram_bin_edges_dispatcher)
+def histogram_bin_edges(a, bins=10, range=None, weights=None):
+ r"""
+ Function to calculate only the edges of the bins used by the `histogram`
+ function.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data. The histogram is computed over the flattened array.
+ bins : int or sequence of scalars or str, optional
+ If `bins` is an int, it defines the number of equal-width
+ bins in the given range (10, by default). If `bins` is a
+ sequence, it defines the bin edges, including the rightmost
+ edge, allowing for non-uniform bin widths.
+
+ If `bins` is a string from the list below, `histogram_bin_edges` will use
+ the method chosen to calculate the optimal bin width and
+ consequently the number of bins (see `Notes` for more detail on
+ the estimators) from the data that falls within the requested
+ range. While the bin width will be optimal for the actual data
+ in the range, the number of bins will be computed to fill the
+ entire range, including the empty portions. For visualisation,
+ using the 'auto' option is suggested. Weighted data is not
+ supported for automated bin size selection.
+
+ 'auto'
+ Maximum of the 'sturges' and 'fd' estimators. Provides good
+ all around performance.
+
+ 'fd' (Freedman Diaconis Estimator)
+ Robust (resilient to outliers) estimator that takes into
+ account data variability and data size.
+
+ 'doane'
+ An improved version of Sturges' estimator that works better
+ with non-normal datasets.
+
+ 'scott'
+ Less robust estimator that takes into account data variability
+ and data size.
+
+ 'stone'
+ Estimator based on leave-one-out cross-validation estimate of
+ the integrated squared error. Can be regarded as a generalization
+ of Scott's rule.
+
+ 'rice'
+ Estimator does not take variability into account, only data
+ size. Commonly overestimates number of bins required.
+
+ 'sturges'
+ R's default method, only accounts for data size. Only
+ optimal for gaussian data and underestimates number of bins
+ for large non-gaussian datasets.
+
+ 'sqrt'
+ Square root (of data size) estimator, used by Excel and
+ other programs for its speed and simplicity.
+
+ range : (float, float), optional
+ The lower and upper range of the bins. If not provided, range
+ is simply ``(a.min(), a.max())``. Values outside the range are
+ ignored. The first element of the range must be less than or
+ equal to the second. `range` affects the automatic bin
+ computation as well. While bin width is computed to be optimal
+ based on the actual data within `range`, the bin count will fill
+ the entire range including portions containing no data.
+
+ weights : array_like, optional
+ An array of weights, of the same shape as `a`. Each value in
+ `a` only contributes its associated weight towards the bin count
+ (instead of 1). This is currently not used by any of the bin estimators,
+ but may be in the future.
+
+ Returns
+ -------
+ bin_edges : array of dtype float
+ The edges to pass into `histogram`
+
+ See Also
+ --------
+ histogram
+
+ Notes
+ -----
+ The methods to estimate the optimal number of bins are well founded
+ in literature, and are inspired by the choices R provides for
+ histogram visualisation. Note that having the number of bins
+ proportional to :math:`n^{1/3}` is asymptotically optimal, which is
+ why it appears in most estimators. These are simply plug-in methods
+ that give good starting points for number of bins. In the equations
+ below, :math:`h` is the binwidth and :math:`n_h` is the number of
+ bins. All estimators that compute bin counts are recast to bin width
+ using the `ptp` of the data. The final bin count is obtained from
+ ``np.round(np.ceil(range / h))``. The final bin width is often less
+ than what is returned by the estimators below.
+
+ 'auto' (maximum of the 'sturges' and 'fd' estimators)
+ A compromise to get a good value. For small datasets the Sturges
+ value will usually be chosen, while larger datasets will usually
+ default to FD. Avoids the overly conservative behaviour of FD
+ and Sturges for small and large datasets respectively.
+ Switchover point is usually :math:`a.size \approx 1000`.
+
+ 'fd' (Freedman Diaconis Estimator)
+ .. math:: h = 2 \frac{IQR}{n^{1/3}}
+
+ The binwidth is proportional to the interquartile range (IQR)
+ and inversely proportional to cube root of a.size. Can be too
+ conservative for small datasets, but is quite good for large
+ datasets. The IQR is very robust to outliers.
+
+ 'scott'
+ .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}}
+
+ The binwidth is proportional to the standard deviation of the
+ data and inversely proportional to cube root of ``x.size``. Can
+ be too conservative for small datasets, but is quite good for
+ large datasets. The standard deviation is not very robust to
+ outliers. Values are very similar to the Freedman-Diaconis
+ estimator in the absence of outliers.
+
+ 'rice'
+ .. math:: n_h = 2n^{1/3}
+
+ The number of bins is only proportional to cube root of
+ ``a.size``. It tends to overestimate the number of bins and it
+ does not take into account data variability.
+
+ 'sturges'
+ .. math:: n_h = \log _{2}(n) + 1
+
+ The number of bins is the base 2 log of ``a.size``. This
+ estimator assumes normality of data and is too conservative for
+ larger, non-normal datasets. This is the default method in R's
+ ``hist`` method.
+
+ 'doane'
+ .. math:: n_h = 1 + \log_{2}(n) +
+ \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right)
+
+ g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right]
+
+ \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
+
+ An improved version of Sturges' formula that produces better
+ estimates for non-normal datasets. This estimator attempts to
+ account for the skew of the data.
+
+ 'sqrt'
+ .. math:: n_h = \sqrt n
+
+ The simplest and fastest estimator. Only takes into account the
+ data size.
+
+ Examples
+ --------
+ >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
+ >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
+ array([0. , 0.25, 0.5 , 0.75, 1. ])
+ >>> np.histogram_bin_edges(arr, bins=2)
+ array([0. , 2.5, 5. ])
+
+ For consistency with histogram, an array of pre-computed bins is
+ passed through unmodified:
+
+ >>> np.histogram_bin_edges(arr, [1, 2])
+ array([1, 2])
+
+ This function allows one set of bins to be computed, and reused across
+ multiple histograms:
+
+ >>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
+ >>> shared_bins
+ array([0., 1., 2., 3., 4., 5.])
+
+ >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
+ >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
+ >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
+
+ >>> hist_0; hist_1
+ array([1, 1, 0, 1, 0])
+ array([2, 0, 1, 1, 2])
+
+ Which gives more easily comparable results than using separate bins for
+ each histogram:
+
+ >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
+ >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
+ >>> hist_0; hist_1
+ array([1, 1, 1])
+ array([2, 1, 1, 2])
+ >>> bins_0; bins_1
+ array([0., 1., 2., 3.])
+ array([0. , 1.25, 2.5 , 3.75, 5. ])
+
+ """
+ a, weights = _ravel_and_check_weights(a, weights)
+ bin_edges, _ = _get_bin_edges(a, bins, range, weights)
+ return bin_edges
+
+
+def _histogram_dispatcher(
+ a, bins=None, range=None, density=None, weights=None):
+ return (a, bins, weights)
+
+
+@array_function_dispatch(_histogram_dispatcher)
+def histogram(a, bins=10, range=None, density=None, weights=None):
+ r"""
+ Compute the histogram of a dataset.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data. The histogram is computed over the flattened array.
+ bins : int or sequence of scalars or str, optional
+ If `bins` is an int, it defines the number of equal-width
+ bins in the given range (10, by default). If `bins` is a
+ sequence, it defines a monotonically increasing array of bin edges,
+ including the rightmost edge, allowing for non-uniform bin widths.
+
+ .. versionadded:: 1.11.0
+
+ If `bins` is a string, it defines the method used to calculate the
+ optimal bin width, as defined by `histogram_bin_edges`.
+
+ range : (float, float), optional
+ The lower and upper range of the bins. If not provided, range
+ is simply ``(a.min(), a.max())``. Values outside the range are
+ ignored. The first element of the range must be less than or
+ equal to the second. `range` affects the automatic bin
+ computation as well. While bin width is computed to be optimal
+ based on the actual data within `range`, the bin count will fill
+ the entire range including portions containing no data.
+ weights : array_like, optional
+ An array of weights, of the same shape as `a`. Each value in
+ `a` only contributes its associated weight towards the bin count
+ (instead of 1). If `density` is True, the weights are
+ normalized, so that the integral of the density over the range
+ remains 1.
+ density : bool, optional
+ If ``False``, the result will contain the number of samples in
+ each bin. If ``True``, the result is the value of the
+ probability *density* function at the bin, normalized such that
+ the *integral* over the range is 1. Note that the sum of the
+ histogram values will not be equal to 1 unless bins of unity
+ width are chosen; it is not a probability *mass* function.
+
+ Returns
+ -------
+ hist : array
+ The values of the histogram. See `density` and `weights` for a
+ description of the possible semantics.
+ bin_edges : array of dtype float
+ Return the bin edges ``(length(hist)+1)``.
+
+
+ See Also
+ --------
+ histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
+
+ Notes
+ -----
+ All but the last (righthand-most) bin is half-open. In other words,
+ if `bins` is::
+
+ [1, 2, 3, 4]
+
+ then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
+ the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
+ *includes* 4.
+
+
+ Examples
+ --------
+ >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
+ (array([0, 2, 1]), array([0, 1, 2, 3]))
+ >>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
+ (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
+ >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
+ (array([1, 4, 1]), array([0, 1, 2, 3]))
+
+ >>> a = np.arange(5)
+ >>> hist, bin_edges = np.histogram(a, density=True)
+ >>> hist
+ array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
+ >>> hist.sum()
+ 2.4999999999999996
+ >>> np.sum(hist * np.diff(bin_edges))
+ 1.0
+
+ .. versionadded:: 1.11.0
+
+ Automated Bin Selection Methods example, using 2 peak random data
+ with 2000 points:
+
+ >>> import matplotlib.pyplot as plt
+ >>> rng = np.random.RandomState(10) # deterministic random data
+ >>> a = np.hstack((rng.normal(size=1000),
+ ... rng.normal(loc=5, scale=2, size=1000)))
+ >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram
+ >>> plt.title("Histogram with 'auto' bins")
+ Text(0.5, 1.0, "Histogram with 'auto' bins")
+ >>> plt.show()
+
+ """
+ a, weights = _ravel_and_check_weights(a, weights)
+
+ bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
+
+ # Histogram is an integer or a float array depending on the weights.
+ if weights is None:
+ ntype = np.dtype(np.intp)
+ else:
+ ntype = weights.dtype
+
+ # We set a block size, as this allows us to iterate over chunks when
+ # computing histograms, to minimize memory usage.
+ BLOCK = 65536
+
+ # The fast path uses bincount, but that only works for certain types
+ # of weight
+ simple_weights = (
+ weights is None or
+ np.can_cast(weights.dtype, np.double) or
+ np.can_cast(weights.dtype, complex)
+ )
+
+ if uniform_bins is not None and simple_weights:
+ # Fast algorithm for equal bins
+ # We now convert values of a to bin indices, under the assumption of
+ # equal bin widths (which is valid here).
+ first_edge, last_edge, n_equal_bins = uniform_bins
+
+ # Initialize empty histogram
+ n = np.zeros(n_equal_bins, ntype)
+
+ # Pre-compute histogram scaling factor
+ norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge)
+
+ # We iterate over blocks here for two reasons: the first is that for
+ # large arrays, it is actually faster (for example for a 10^8 array it
+ # is 2x as fast) and it results in a memory footprint 3x lower in the
+ # limit of large arrays.
+ for i in _range(0, len(a), BLOCK):
+ tmp_a = a[i:i+BLOCK]
+ if weights is None:
+ tmp_w = None
+ else:
+ tmp_w = weights[i:i + BLOCK]
+
+ # Only include values in the right range
+ keep = (tmp_a >= first_edge)
+ keep &= (tmp_a <= last_edge)
+ if not np.logical_and.reduce(keep):
+ tmp_a = tmp_a[keep]
+ if tmp_w is not None:
+ tmp_w = tmp_w[keep]
+
+ # This cast ensures no type promotions occur below, which gh-10322
+ # make unpredictable. Getting it wrong leads to precision errors
+ # like gh-8123.
+ tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
+
+ # Compute the bin indices, and for values that lie exactly on
+ # last_edge we need to subtract one
+ f_indices = _unsigned_subtract(tmp_a, first_edge) * norm
+ indices = f_indices.astype(np.intp)
+ indices[indices == n_equal_bins] -= 1
+
+ # The index computation is not guaranteed to give exactly
+ # consistent results within ~1 ULP of the bin edges.
+ decrement = tmp_a < bin_edges[indices]
+ indices[decrement] -= 1
+ # The last bin includes the right edge. The other bins do not.
+ increment = ((tmp_a >= bin_edges[indices + 1])
+ & (indices != n_equal_bins - 1))
+ indices[increment] += 1
+
+ # We now compute the histogram using bincount
+ if ntype.kind == 'c':
+ n.real += np.bincount(indices, weights=tmp_w.real,
+ minlength=n_equal_bins)
+ n.imag += np.bincount(indices, weights=tmp_w.imag,
+ minlength=n_equal_bins)
+ else:
+ n += np.bincount(indices, weights=tmp_w,
+ minlength=n_equal_bins).astype(ntype)
+ else:
+ # Compute via cumulative histogram
+ cum_n = np.zeros(bin_edges.shape, ntype)
+ if weights is None:
+ for i in _range(0, len(a), BLOCK):
+ sa = np.sort(a[i:i+BLOCK])
+ cum_n += _search_sorted_inclusive(sa, bin_edges)
+ else:
+ zero = np.zeros(1, dtype=ntype)
+ for i in _range(0, len(a), BLOCK):
+ tmp_a = a[i:i+BLOCK]
+ tmp_w = weights[i:i+BLOCK]
+ sorting_index = np.argsort(tmp_a)
+ sa = tmp_a[sorting_index]
+ sw = tmp_w[sorting_index]
+ cw = np.concatenate((zero, sw.cumsum()))
+ bin_index = _search_sorted_inclusive(sa, bin_edges)
+ cum_n += cw[bin_index]
+
+ n = np.diff(cum_n)
+
+ if density:
+ db = np.array(np.diff(bin_edges), float)
+ return n/db/n.sum(), bin_edges
+
+ return n, bin_edges
+
+
+def _histogramdd_dispatcher(sample, bins=None, range=None, density=None,
+ weights=None):
+ if hasattr(sample, 'shape'): # same condition as used in histogramdd
+ yield sample
+ else:
+ yield from sample
+ with contextlib.suppress(TypeError):
+ yield from bins
+ yield weights
+
+
+@array_function_dispatch(_histogramdd_dispatcher)
+def histogramdd(sample, bins=10, range=None, density=None, weights=None):
+ """
+ Compute the multidimensional histogram of some data.
+
+ Parameters
+ ----------
+ sample : (N, D) array, or (N, D) array_like
+ The data to be histogrammed.
+
+ Note the unusual interpretation of sample when an array_like:
+
+ * When an array, each row is a coordinate in a D-dimensional space -
+ such as ``histogramdd(np.array([p1, p2, p3]))``.
+ * When an array_like, each element is the list of values for single
+ coordinate - such as ``histogramdd((X, Y, Z))``.
+
+ The first form should be preferred.
+
+ bins : sequence or int, optional
+ The bin specification:
+
+ * A sequence of arrays describing the monotonically increasing bin
+ edges along each dimension.
+ * The number of bins for each dimension (nx, ny, ... =bins)
+ * The number of bins for all dimensions (nx=ny=...=bins).
+
+ range : sequence, optional
+ A sequence of length D, each an optional (lower, upper) tuple giving
+ the outer bin edges to be used if the edges are not given explicitly in
+ `bins`.
+ An entry of None in the sequence results in the minimum and maximum
+ values being used for the corresponding dimension.
+ The default, None, is equivalent to passing a tuple of D None values.
+ density : bool, optional
+ If False, the default, returns the number of samples in each bin.
+ If True, returns the probability *density* function at the bin,
+ ``bin_count / sample_count / bin_volume``.
+ weights : (N,) array_like, optional
+ An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
+ Weights are normalized to 1 if density is True. If density is False,
+ the values of the returned histogram are equal to the sum of the
+ weights belonging to the samples falling into each bin.
+
+ Returns
+ -------
+ H : ndarray
+ The multidimensional histogram of sample x. See density and weights
+ for the different possible semantics.
+ edges : list
+ A list of D arrays describing the bin edges for each dimension.
+
+ See Also
+ --------
+ histogram: 1-D histogram
+ histogram2d: 2-D histogram
+
+ Examples
+ --------
+ >>> r = np.random.randn(100,3)
+ >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
+ >>> H.shape, edges[0].size, edges[1].size, edges[2].size
+ ((5, 8, 4), 6, 9, 5)
+
+ """
+
+ try:
+ # Sample is an ND-array.
+ N, D = sample.shape
+ except (AttributeError, ValueError):
+ # Sample is a sequence of 1D arrays.
+ sample = np.atleast_2d(sample).T
+ N, D = sample.shape
+
+ nbin = np.empty(D, np.intp)
+ edges = D*[None]
+ dedges = D*[None]
+ if weights is not None:
+ weights = np.asarray(weights)
+
+ try:
+ M = len(bins)
+ if M != D:
+ raise ValueError(
+ 'The dimension of bins must be equal to the dimension of the '
+ ' sample x.')
+ except TypeError:
+ # bins is an integer
+ bins = D*[bins]
+
+ # normalize the range argument
+ if range is None:
+ range = (None,) * D
+ elif len(range) != D:
+ raise ValueError('range argument must have one entry per dimension')
+
+ # Create edge arrays
+ for i in _range(D):
+ if np.ndim(bins[i]) == 0:
+ if bins[i] < 1:
+ raise ValueError(
+ '`bins[{}]` must be positive, when an integer'.format(i))
+ smin, smax = _get_outer_edges(sample[:,i], range[i])
+ try:
+ n = operator.index(bins[i])
+
+ except TypeError as e:
+ raise TypeError(
+ "`bins[{}]` must be an integer, when a scalar".format(i)
+ ) from e
+
+ edges[i] = np.linspace(smin, smax, n + 1)
+ elif np.ndim(bins[i]) == 1:
+ edges[i] = np.asarray(bins[i])
+ if np.any(edges[i][:-1] > edges[i][1:]):
+ raise ValueError(
+ '`bins[{}]` must be monotonically increasing, when an array'
+ .format(i))
+ else:
+ raise ValueError(
+ '`bins[{}]` must be a scalar or 1d array'.format(i))
+
+ nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
+ dedges[i] = np.diff(edges[i])
+
+ # Compute the bin number each sample falls into.
+ Ncount = tuple(
+ # avoid np.digitize to work around gh-11022
+ np.searchsorted(edges[i], sample[:, i], side='right')
+ for i in _range(D)
+ )
+
+ # Using digitize, values that fall on an edge are put in the right bin.
+ # For the rightmost bin, we want values equal to the right edge to be
+ # counted in the last bin, and not as an outlier.
+ for i in _range(D):
+ # Find which points are on the rightmost edge.
+ on_edge = (sample[:, i] == edges[i][-1])
+ # Shift these points one bin to the left.
+ Ncount[i][on_edge] -= 1
+
+ # Compute the sample indices in the flattened histogram matrix.
+ # This raises an error if the array is too large.
+ xy = np.ravel_multi_index(Ncount, nbin)
+
+ # Compute the number of repetitions in xy and assign it to the
+ # flattened histmat.
+ hist = np.bincount(xy, weights, minlength=nbin.prod())
+
+ # Shape into a proper matrix
+ hist = hist.reshape(nbin)
+
+ # This preserves the (bad) behavior observed in gh-7845, for now.
+ hist = hist.astype(float, casting='safe')
+
+ # Remove outliers (indices 0 and -1 for each dimension).
+ core = D*(slice(1, -1),)
+ hist = hist[core]
+
+ if density:
+ # calculate the probability density function
+ s = hist.sum()
+ for i in _range(D):
+ shape = np.ones(D, int)
+ shape[i] = nbin[i] - 2
+ hist = hist / dedges[i].reshape(shape)
+ hist /= s
+
+ if (hist.shape != nbin - 2).any():
+ raise RuntimeError(
+ "Internal Shape Error")
+ return hist, edges
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/histograms.pyi b/venv/lib/python3.9/site-packages/numpy/lib/histograms.pyi
new file mode 100644
index 00000000..ce02718a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/histograms.pyi
@@ -0,0 +1,47 @@
+from collections.abc import Sequence
+from typing import (
+ Literal as L,
+ Any,
+ SupportsIndex,
+)
+
+from numpy._typing import (
+ NDArray,
+ ArrayLike,
+)
+
+_BinKind = L[
+ "stone",
+ "auto",
+ "doane",
+ "fd",
+ "rice",
+ "scott",
+ "sqrt",
+ "sturges",
+]
+
+__all__: list[str]
+
+def histogram_bin_edges(
+ a: ArrayLike,
+ bins: _BinKind | SupportsIndex | ArrayLike = ...,
+ range: None | tuple[float, float] = ...,
+ weights: None | ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+def histogram(
+ a: ArrayLike,
+ bins: _BinKind | SupportsIndex | ArrayLike = ...,
+ range: None | tuple[float, float] = ...,
+ density: bool = ...,
+ weights: None | ArrayLike = ...,
+) -> tuple[NDArray[Any], NDArray[Any]]: ...
+
+def histogramdd(
+ sample: ArrayLike,
+ bins: SupportsIndex | ArrayLike = ...,
+ range: Sequence[tuple[float, float]] = ...,
+ density: None | bool = ...,
+ weights: None | ArrayLike = ...,
+) -> tuple[NDArray[Any], list[NDArray[Any]]]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/index_tricks.py b/venv/lib/python3.9/site-packages/numpy/lib/index_tricks.py
new file mode 100644
index 00000000..95d5e3ed
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/index_tricks.py
@@ -0,0 +1,1021 @@
+import functools
+import sys
+import math
+import warnings
+
+import numpy.core.numeric as _nx
+from numpy.core.numeric import (
+ asarray, ScalarType, array, alltrue, cumprod, arange, ndim
+)
+from numpy.core.numerictypes import find_common_type, issubdtype
+
+import numpy.matrixlib as matrixlib
+from .function_base import diff
+from numpy.core.multiarray import ravel_multi_index, unravel_index
+from numpy.core.overrides import set_module
+from numpy.core import overrides, linspace
+from numpy.lib.stride_tricks import as_strided
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+__all__ = [
+ 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
+ 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
+ 'diag_indices', 'diag_indices_from'
+]
+
+
+def _ix__dispatcher(*args):
+ return args
+
+
+@array_function_dispatch(_ix__dispatcher)
+def ix_(*args):
+ """
+ Construct an open mesh from multiple sequences.
+
+ This function takes N 1-D sequences and returns N outputs with N
+ dimensions each, such that the shape is 1 in all but one dimension
+ and the dimension with the non-unit shape value cycles through all
+ N dimensions.
+
+ Using `ix_` one can quickly construct index arrays that will index
+ the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
+ ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
+
+ Parameters
+ ----------
+ args : 1-D sequences
+ Each sequence should be of integer or boolean type.
+ Boolean sequences will be interpreted as boolean masks for the
+ corresponding dimension (equivalent to passing in
+ ``np.nonzero(boolean_sequence)``).
+
+ Returns
+ -------
+ out : tuple of ndarrays
+ N arrays with N dimensions each, with N the number of input
+ sequences. Together these arrays form an open mesh.
+
+ See Also
+ --------
+ ogrid, mgrid, meshgrid
+
+ Examples
+ --------
+ >>> a = np.arange(10).reshape(2, 5)
+ >>> a
+ array([[0, 1, 2, 3, 4],
+ [5, 6, 7, 8, 9]])
+ >>> ixgrid = np.ix_([0, 1], [2, 4])
+ >>> ixgrid
+ (array([[0],
+ [1]]), array([[2, 4]]))
+ >>> ixgrid[0].shape, ixgrid[1].shape
+ ((2, 1), (1, 2))
+ >>> a[ixgrid]
+ array([[2, 4],
+ [7, 9]])
+
+ >>> ixgrid = np.ix_([True, True], [2, 4])
+ >>> a[ixgrid]
+ array([[2, 4],
+ [7, 9]])
+ >>> ixgrid = np.ix_([True, True], [False, False, True, False, True])
+ >>> a[ixgrid]
+ array([[2, 4],
+ [7, 9]])
+
+ """
+ out = []
+ nd = len(args)
+ for k, new in enumerate(args):
+ if not isinstance(new, _nx.ndarray):
+ new = asarray(new)
+ if new.size == 0:
+ # Explicitly type empty arrays to avoid float default
+ new = new.astype(_nx.intp)
+ if new.ndim != 1:
+ raise ValueError("Cross index must be 1 dimensional")
+ if issubdtype(new.dtype, _nx.bool_):
+ new, = new.nonzero()
+ new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1))
+ out.append(new)
+ return tuple(out)
+
+
+class nd_grid:
+ """
+ Construct a multi-dimensional "meshgrid".
+
+ ``grid = nd_grid()`` creates an instance which will return a mesh-grid
+ when indexed. The dimension and number of the output arrays are equal
+ to the number of indexing dimensions. If the step length is not a
+ complex number, then the stop is not inclusive.
+
+ However, if the step length is a **complex number** (e.g. 5j), then the
+ integer part of its magnitude is interpreted as specifying the
+ number of points to create between the start and stop values, where
+ the stop value **is inclusive**.
+
+ If instantiated with an argument of ``sparse=True``, the mesh-grid is
+ open (or not fleshed out) so that only one-dimension of each returned
+ argument is greater than 1.
+
+ Parameters
+ ----------
+ sparse : bool, optional
+ Whether the grid is sparse or not. Default is False.
+
+ Notes
+ -----
+ Two instances of `nd_grid` are made available in the NumPy namespace,
+ `mgrid` and `ogrid`, approximately defined as::
+
+ mgrid = nd_grid(sparse=False)
+ ogrid = nd_grid(sparse=True)
+
+ Users should use these pre-defined instances instead of using `nd_grid`
+ directly.
+ """
+
+ def __init__(self, sparse=False):
+ self.sparse = sparse
+
+ def __getitem__(self, key):
+ try:
+ size = []
+ # Mimic the behavior of `np.arange` and use a data type
+ # which is at least as large as `np.int_`
+ num_list = [0]
+ for k in range(len(key)):
+ step = key[k].step
+ start = key[k].start
+ stop = key[k].stop
+ if start is None:
+ start = 0
+ if step is None:
+ step = 1
+ if isinstance(step, (_nx.complexfloating, complex)):
+ step = abs(step)
+ size.append(int(step))
+ else:
+ size.append(
+ int(math.ceil((stop - start) / (step*1.0))))
+ num_list += [start, stop, step]
+ typ = _nx.result_type(*num_list)
+ if self.sparse:
+ nn = [_nx.arange(_x, dtype=_t)
+ for _x, _t in zip(size, (typ,)*len(size))]
+ else:
+ nn = _nx.indices(size, typ)
+ for k, kk in enumerate(key):
+ step = kk.step
+ start = kk.start
+ if start is None:
+ start = 0
+ if step is None:
+ step = 1
+ if isinstance(step, (_nx.complexfloating, complex)):
+ step = int(abs(step))
+ if step != 1:
+ step = (kk.stop - start) / float(step - 1)
+ nn[k] = (nn[k]*step+start)
+ if self.sparse:
+ slobj = [_nx.newaxis]*len(size)
+ for k in range(len(size)):
+ slobj[k] = slice(None, None)
+ nn[k] = nn[k][tuple(slobj)]
+ slobj[k] = _nx.newaxis
+ return nn
+ except (IndexError, TypeError):
+ step = key.step
+ stop = key.stop
+ start = key.start
+ if start is None:
+ start = 0
+ if isinstance(step, (_nx.complexfloating, complex)):
+ # Prevent the (potential) creation of integer arrays
+ step_float = abs(step)
+ step = length = int(step_float)
+ if step != 1:
+ step = (key.stop-start)/float(step-1)
+ typ = _nx.result_type(start, stop, step_float)
+ return _nx.arange(0, length, 1, dtype=typ)*step + start
+ else:
+ return _nx.arange(start, stop, step)
+
+
+class MGridClass(nd_grid):
+ """
+ `nd_grid` instance which returns a dense multi-dimensional "meshgrid".
+
+ An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense
+ (or fleshed out) mesh-grid when indexed, so that each returned argument
+ has the same shape. The dimensions and number of the output arrays are
+ equal to the number of indexing dimensions. If the step length is not a
+ complex number, then the stop is not inclusive.
+
+ However, if the step length is a **complex number** (e.g. 5j), then
+ the integer part of its magnitude is interpreted as specifying the
+ number of points to create between the start and stop values, where
+ the stop value **is inclusive**.
+
+ Returns
+ -------
+ mesh-grid `ndarrays` all of the same dimensions
+
+ See Also
+ --------
+ lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
+ ogrid : like mgrid but returns open (not fleshed out) mesh grids
+ meshgrid: return coordinate matrices from coordinate vectors
+ r_ : array concatenator
+ :ref:`how-to-partition`
+
+ Examples
+ --------
+ >>> np.mgrid[0:5, 0:5]
+ array([[[0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1],
+ [2, 2, 2, 2, 2],
+ [3, 3, 3, 3, 3],
+ [4, 4, 4, 4, 4]],
+ [[0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4]]])
+ >>> np.mgrid[-1:1:5j]
+ array([-1. , -0.5, 0. , 0.5, 1. ])
+
+ """
+
+ def __init__(self):
+ super().__init__(sparse=False)
+
+
+mgrid = MGridClass()
+
+
+class OGridClass(nd_grid):
+ """
+ `nd_grid` instance which returns an open multi-dimensional "meshgrid".
+
+ An instance of `numpy.lib.index_tricks.nd_grid` which returns an open
+ (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension
+ of each returned array is greater than 1. The dimension and number of the
+ output arrays are equal to the number of indexing dimensions. If the step
+ length is not a complex number, then the stop is not inclusive.
+
+ However, if the step length is a **complex number** (e.g. 5j), then
+ the integer part of its magnitude is interpreted as specifying the
+ number of points to create between the start and stop values, where
+ the stop value **is inclusive**.
+
+ Returns
+ -------
+ mesh-grid
+ `ndarrays` with only one dimension not equal to 1
+
+ See Also
+ --------
+ np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
+ mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
+ meshgrid: return coordinate matrices from coordinate vectors
+ r_ : array concatenator
+ :ref:`how-to-partition`
+
+ Examples
+ --------
+ >>> from numpy import ogrid
+ >>> ogrid[-1:1:5j]
+ array([-1. , -0.5, 0. , 0.5, 1. ])
+ >>> ogrid[0:5,0:5]
+ [array([[0],
+ [1],
+ [2],
+ [3],
+ [4]]), array([[0, 1, 2, 3, 4]])]
+
+ """
+
+ def __init__(self):
+ super().__init__(sparse=True)
+
+
+ogrid = OGridClass()
+
+
+class AxisConcatenator:
+ """
+ Translates slice objects to concatenation along an axis.
+
+ For detailed documentation on usage, see `r_`.
+ """
+ # allow ma.mr_ to override this
+ concatenate = staticmethod(_nx.concatenate)
+ makemat = staticmethod(matrixlib.matrix)
+
+ def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
+ self.axis = axis
+ self.matrix = matrix
+ self.trans1d = trans1d
+ self.ndmin = ndmin
+
+ def __getitem__(self, key):
+ # handle matrix builder syntax
+ if isinstance(key, str):
+ frame = sys._getframe().f_back
+ mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals)
+ return mymat
+
+ if not isinstance(key, tuple):
+ key = (key,)
+
+ # copy attributes, since they can be overridden in the first argument
+ trans1d = self.trans1d
+ ndmin = self.ndmin
+ matrix = self.matrix
+ axis = self.axis
+
+ objs = []
+ scalars = []
+ arraytypes = []
+ scalartypes = []
+
+ for k, item in enumerate(key):
+ scalar = False
+ if isinstance(item, slice):
+ step = item.step
+ start = item.start
+ stop = item.stop
+ if start is None:
+ start = 0
+ if step is None:
+ step = 1
+ if isinstance(step, (_nx.complexfloating, complex)):
+ size = int(abs(step))
+ newobj = linspace(start, stop, num=size)
+ else:
+ newobj = _nx.arange(start, stop, step)
+ if ndmin > 1:
+ newobj = array(newobj, copy=False, ndmin=ndmin)
+ if trans1d != -1:
+ newobj = newobj.swapaxes(-1, trans1d)
+ elif isinstance(item, str):
+ if k != 0:
+ raise ValueError("special directives must be the "
+ "first entry.")
+ if item in ('r', 'c'):
+ matrix = True
+ col = (item == 'c')
+ continue
+ if ',' in item:
+ vec = item.split(',')
+ try:
+ axis, ndmin = [int(x) for x in vec[:2]]
+ if len(vec) == 3:
+ trans1d = int(vec[2])
+ continue
+ except Exception as e:
+ raise ValueError(
+ "unknown special directive {!r}".format(item)
+ ) from e
+ try:
+ axis = int(item)
+ continue
+ except (ValueError, TypeError) as e:
+ raise ValueError("unknown special directive") from e
+ elif type(item) in ScalarType:
+ newobj = array(item, ndmin=ndmin)
+ scalars.append(len(objs))
+ scalar = True
+ scalartypes.append(newobj.dtype)
+ else:
+ item_ndim = ndim(item)
+ newobj = array(item, copy=False, subok=True, ndmin=ndmin)
+ if trans1d != -1 and item_ndim < ndmin:
+ k2 = ndmin - item_ndim
+ k1 = trans1d
+ if k1 < 0:
+ k1 += k2 + 1
+ defaxes = list(range(ndmin))
+ axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
+ newobj = newobj.transpose(axes)
+ objs.append(newobj)
+ if not scalar and isinstance(newobj, _nx.ndarray):
+ arraytypes.append(newobj.dtype)
+
+ # Ensure that scalars won't up-cast unless warranted
+ final_dtype = find_common_type(arraytypes, scalartypes)
+ if final_dtype is not None:
+ for k in scalars:
+ objs[k] = objs[k].astype(final_dtype)
+
+ res = self.concatenate(tuple(objs), axis=axis)
+
+ if matrix:
+ oldndim = res.ndim
+ res = self.makemat(res)
+ if oldndim == 1 and col:
+ res = res.T
+ return res
+
+ def __len__(self):
+ return 0
+
+# separate classes are used here instead of just making r_ = concatentor(0),
+# etc. because otherwise we couldn't get the doc string to come out right
+# in help(r_)
+
+
+class RClass(AxisConcatenator):
+ """
+ Translates slice objects to concatenation along the first axis.
+
+ This is a simple way to build up arrays quickly. There are two use cases.
+
+ 1. If the index expression contains comma separated arrays, then stack
+ them along their first axis.
+ 2. If the index expression contains slice notation or scalars then create
+ a 1-D array with a range indicated by the slice notation.
+
+ If slice notation is used, the syntax ``start:stop:step`` is equivalent
+ to ``np.arange(start, stop, step)`` inside of the brackets. However, if
+ ``step`` is an imaginary number (i.e. 100j) then its integer portion is
+ interpreted as a number-of-points desired and the start and stop are
+ inclusive. In other words ``start:stop:stepj`` is interpreted as
+ ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
+ After expansion of slice notation, all comma separated sequences are
+ concatenated together.
+
+ Optional character strings placed as the first element of the index
+ expression can be used to change the output. The strings 'r' or 'c' result
+ in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
+ matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
+ (column) matrix is produced. If the result is 2-D then both provide the
+ same matrix result.
+
+ A string integer specifies which axis to stack multiple comma separated
+ arrays along. A string of two comma-separated integers allows indication
+ of the minimum number of dimensions to force each entry into as the
+ second integer (the axis to concatenate along is still the first integer).
+
+ A string with three comma-separated integers allows specification of the
+ axis to concatenate along, the minimum number of dimensions to force the
+ entries to, and which axis should contain the start of the arrays which
+ are less than the specified number of dimensions. In other words the third
+ integer allows you to specify where the 1's should be placed in the shape
+ of the arrays that have their shapes upgraded. By default, they are placed
+ in the front of the shape tuple. The third argument allows you to specify
+ where the start of the array should be instead. Thus, a third argument of
+ '0' would place the 1's at the end of the array shape. Negative integers
+ specify where in the new shape tuple the last dimension of upgraded arrays
+ should be placed, so the default is '-1'.
+
+ Parameters
+ ----------
+ Not a function, so takes no parameters
+
+
+ Returns
+ -------
+ A concatenated ndarray or matrix.
+
+ See Also
+ --------
+ concatenate : Join a sequence of arrays along an existing axis.
+ c_ : Translates slice objects to concatenation along the second axis.
+
+ Examples
+ --------
+ >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
+ array([1, 2, 3, ..., 4, 5, 6])
+ >>> np.r_[-1:1:6j, [0]*3, 5, 6]
+ array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
+
+ String integers specify the axis to concatenate along or the minimum
+ number of dimensions to force entries into.
+
+ >>> a = np.array([[0, 1, 2], [3, 4, 5]])
+ >>> np.r_['-1', a, a] # concatenate along last axis
+ array([[0, 1, 2, 0, 1, 2],
+ [3, 4, 5, 3, 4, 5]])
+ >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
+ array([[1, 2, 3],
+ [4, 5, 6]])
+
+ >>> np.r_['0,2,0', [1,2,3], [4,5,6]]
+ array([[1],
+ [2],
+ [3],
+ [4],
+ [5],
+ [6]])
+ >>> np.r_['1,2,0', [1,2,3], [4,5,6]]
+ array([[1, 4],
+ [2, 5],
+ [3, 6]])
+
+ Using 'r' or 'c' as a first string argument creates a matrix.
+
+ >>> np.r_['r',[1,2,3], [4,5,6]]
+ matrix([[1, 2, 3, 4, 5, 6]])
+
+ """
+
+ def __init__(self):
+ AxisConcatenator.__init__(self, 0)
+
+
+r_ = RClass()
+
+
+class CClass(AxisConcatenator):
+ """
+ Translates slice objects to concatenation along the second axis.
+
+ This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
+ useful because of its common occurrence. In particular, arrays will be
+ stacked along their last axis after being upgraded to at least 2-D with
+ 1's post-pended to the shape (column vectors made out of 1-D arrays).
+
+ See Also
+ --------
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
+ r_ : For more detailed documentation.
+
+ Examples
+ --------
+ >>> np.c_[np.array([1,2,3]), np.array([4,5,6])]
+ array([[1, 4],
+ [2, 5],
+ [3, 6]])
+ >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
+ array([[1, 2, 3, ..., 4, 5, 6]])
+
+ """
+
+ def __init__(self):
+ AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
+
+
+c_ = CClass()
+
+
+@set_module('numpy')
+class ndenumerate:
+ """
+ Multidimensional index iterator.
+
+ Return an iterator yielding pairs of array coordinates and values.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array.
+
+ See Also
+ --------
+ ndindex, flatiter
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> for index, x in np.ndenumerate(a):
+ ... print(index, x)
+ (0, 0) 1
+ (0, 1) 2
+ (1, 0) 3
+ (1, 1) 4
+
+ """
+
+ def __init__(self, arr):
+ self.iter = asarray(arr).flat
+
+ def __next__(self):
+ """
+ Standard iterator method, returns the index tuple and array value.
+
+ Returns
+ -------
+ coords : tuple of ints
+ The indices of the current iteration.
+ val : scalar
+ The array element of the current iteration.
+
+ """
+ return self.iter.coords, next(self.iter)
+
+ def __iter__(self):
+ return self
+
+
+@set_module('numpy')
+class ndindex:
+ """
+ An N-dimensional iterator object to index arrays.
+
+ Given the shape of an array, an `ndindex` instance iterates over
+ the N-dimensional index of the array. At each iteration a tuple
+ of indices is returned, the last dimension is iterated over first.
+
+ Parameters
+ ----------
+ shape : ints, or a single tuple of ints
+ The size of each dimension of the array can be passed as
+ individual parameters or as the elements of a tuple.
+
+ See Also
+ --------
+ ndenumerate, flatiter
+
+ Examples
+ --------
+ Dimensions as individual arguments
+
+ >>> for index in np.ndindex(3, 2, 1):
+ ... print(index)
+ (0, 0, 0)
+ (0, 1, 0)
+ (1, 0, 0)
+ (1, 1, 0)
+ (2, 0, 0)
+ (2, 1, 0)
+
+ Same dimensions - but in a tuple ``(3, 2, 1)``
+
+ >>> for index in np.ndindex((3, 2, 1)):
+ ... print(index)
+ (0, 0, 0)
+ (0, 1, 0)
+ (1, 0, 0)
+ (1, 1, 0)
+ (2, 0, 0)
+ (2, 1, 0)
+
+ """
+
+ def __init__(self, *shape):
+ if len(shape) == 1 and isinstance(shape[0], tuple):
+ shape = shape[0]
+ x = as_strided(_nx.zeros(1), shape=shape,
+ strides=_nx.zeros_like(shape))
+ self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],
+ order='C')
+
+ def __iter__(self):
+ return self
+
+ def ndincr(self):
+ """
+ Increment the multi-dimensional index by one.
+
+ This method is for backward compatibility only: do not use.
+
+ .. deprecated:: 1.20.0
+ This method has been advised against since numpy 1.8.0, but only
+ started emitting DeprecationWarning as of this version.
+ """
+ # NumPy 1.20.0, 2020-09-08
+ warnings.warn(
+ "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead",
+ DeprecationWarning, stacklevel=2)
+ next(self)
+
+ def __next__(self):
+ """
+ Standard iterator method, updates the index and returns the index
+ tuple.
+
+ Returns
+ -------
+ val : tuple of ints
+ Returns a tuple containing the indices of the current
+ iteration.
+
+ """
+ next(self._it)
+ return self._it.multi_index
+
+
+# You can do all this with slice() plus a few special objects,
+# but there's a lot to remember. This version is simpler because
+# it uses the standard array indexing syntax.
+#
+# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
+# last revision: 1999-7-23
+#
+# Cosmetic changes by T. Oliphant 2001
+#
+#
+
+class IndexExpression:
+ """
+ A nicer way to build up index tuples for arrays.
+
+ .. note::
+ Use one of the two predefined instances `index_exp` or `s_`
+ rather than directly using `IndexExpression`.
+
+ For any index combination, including slicing and axis insertion,
+ ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
+ array `a`. However, ``np.index_exp[indices]`` can be used anywhere
+ in Python code and returns a tuple of slice objects that can be
+ used in the construction of complex index expressions.
+
+ Parameters
+ ----------
+ maketuple : bool
+ If True, always returns a tuple.
+
+ See Also
+ --------
+ index_exp : Predefined instance that always returns a tuple:
+ `index_exp = IndexExpression(maketuple=True)`.
+ s_ : Predefined instance without tuple conversion:
+ `s_ = IndexExpression(maketuple=False)`.
+
+ Notes
+ -----
+ You can do all this with `slice()` plus a few special objects,
+ but there's a lot to remember and this version is simpler because
+ it uses the standard array indexing syntax.
+
+ Examples
+ --------
+ >>> np.s_[2::2]
+ slice(2, None, 2)
+ >>> np.index_exp[2::2]
+ (slice(2, None, 2),)
+
+ >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
+ array([2, 4])
+
+ """
+
+ def __init__(self, maketuple):
+ self.maketuple = maketuple
+
+ def __getitem__(self, item):
+ if self.maketuple and not isinstance(item, tuple):
+ return (item,)
+ else:
+ return item
+
+
+index_exp = IndexExpression(maketuple=True)
+s_ = IndexExpression(maketuple=False)
+
+# End contribution from Konrad.
+
+
+# The following functions complement those in twodim_base, but are
+# applicable to N-dimensions.
+
+
+def _fill_diagonal_dispatcher(a, val, wrap=None):
+ return (a,)
+
+
+@array_function_dispatch(_fill_diagonal_dispatcher)
+def fill_diagonal(a, val, wrap=False):
+ """Fill the main diagonal of the given array of any dimensionality.
+
+ For an array `a` with ``a.ndim >= 2``, the diagonal is the list of
+ locations with indices ``a[i, ..., i]`` all identical. This function
+ modifies the input array in-place, it does not return a value.
+
+ Parameters
+ ----------
+ a : array, at least 2-D.
+ Array whose diagonal is to be filled, it gets modified in-place.
+
+ val : scalar or array_like
+ Value(s) to write on the diagonal. If `val` is scalar, the value is
+ written along the diagonal. If array-like, the flattened `val` is
+ written along the diagonal, repeating if necessary to fill all
+ diagonal entries.
+
+ wrap : bool
+ For tall matrices in NumPy version up to 1.6.2, the
+ diagonal "wrapped" after N columns. You can have this behavior
+ with this option. This affects only tall matrices.
+
+ See also
+ --------
+ diag_indices, diag_indices_from
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ This functionality can be obtained via `diag_indices`, but internally
+ this version uses a much faster implementation that never constructs the
+ indices and uses simple slicing.
+
+ Examples
+ --------
+ >>> a = np.zeros((3, 3), int)
+ >>> np.fill_diagonal(a, 5)
+ >>> a
+ array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5]])
+
+ The same function can operate on a 4-D array:
+
+ >>> a = np.zeros((3, 3, 3, 3), int)
+ >>> np.fill_diagonal(a, 4)
+
+ We only show a few blocks for clarity:
+
+ >>> a[0, 0]
+ array([[4, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0]])
+ >>> a[1, 1]
+ array([[0, 0, 0],
+ [0, 4, 0],
+ [0, 0, 0]])
+ >>> a[2, 2]
+ array([[0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 4]])
+
+ The wrap option affects only tall matrices:
+
+ >>> # tall matrices no wrap
+ >>> a = np.zeros((5, 3), int)
+ >>> np.fill_diagonal(a, 4)
+ >>> a
+ array([[4, 0, 0],
+ [0, 4, 0],
+ [0, 0, 4],
+ [0, 0, 0],
+ [0, 0, 0]])
+
+ >>> # tall matrices wrap
+ >>> a = np.zeros((5, 3), int)
+ >>> np.fill_diagonal(a, 4, wrap=True)
+ >>> a
+ array([[4, 0, 0],
+ [0, 4, 0],
+ [0, 0, 4],
+ [0, 0, 0],
+ [4, 0, 0]])
+
+ >>> # wide matrices
+ >>> a = np.zeros((3, 5), int)
+ >>> np.fill_diagonal(a, 4, wrap=True)
+ >>> a
+ array([[4, 0, 0, 0, 0],
+ [0, 4, 0, 0, 0],
+ [0, 0, 4, 0, 0]])
+
+ The anti-diagonal can be filled by reversing the order of elements
+ using either `numpy.flipud` or `numpy.fliplr`.
+
+ >>> a = np.zeros((3, 3), int);
+ >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip
+ >>> a
+ array([[0, 0, 1],
+ [0, 2, 0],
+ [3, 0, 0]])
+ >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip
+ >>> a
+ array([[0, 0, 3],
+ [0, 2, 0],
+ [1, 0, 0]])
+
+ Note that the order in which the diagonal is filled varies depending
+ on the flip function.
+ """
+ if a.ndim < 2:
+ raise ValueError("array must be at least 2-d")
+ end = None
+ if a.ndim == 2:
+ # Explicit, fast formula for the common case. For 2-d arrays, we
+ # accept rectangular ones.
+ step = a.shape[1] + 1
+ # This is needed to don't have tall matrix have the diagonal wrap.
+ if not wrap:
+ end = a.shape[1] * a.shape[1]
+ else:
+ # For more than d=2, the strided formula is only valid for arrays with
+ # all dimensions equal, so we check first.
+ if not alltrue(diff(a.shape) == 0):
+ raise ValueError("All dimensions of input must be of equal length")
+ step = 1 + (cumprod(a.shape[:-1])).sum()
+
+ # Write the value out into the diagonal.
+ a.flat[:end:step] = val
+
+
+@set_module('numpy')
+def diag_indices(n, ndim=2):
+ """
+ Return the indices to access the main diagonal of an array.
+
+ This returns a tuple of indices that can be used to access the main
+ diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
+ (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
+ ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
+ for ``i = [0..n-1]``.
+
+ Parameters
+ ----------
+ n : int
+ The size, along each dimension, of the arrays for which the returned
+ indices can be used.
+
+ ndim : int, optional
+ The number of dimensions.
+
+ See Also
+ --------
+ diag_indices_from
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ Examples
+ --------
+ Create a set of indices to access the diagonal of a (4, 4) array:
+
+ >>> di = np.diag_indices(4)
+ >>> di
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+ >>> a[di] = 100
+ >>> a
+ array([[100, 1, 2, 3],
+ [ 4, 100, 6, 7],
+ [ 8, 9, 100, 11],
+ [ 12, 13, 14, 100]])
+
+ Now, we create indices to manipulate a 3-D array:
+
+ >>> d3 = np.diag_indices(2, 3)
+ >>> d3
+ (array([0, 1]), array([0, 1]), array([0, 1]))
+
+ And use it to set the diagonal of an array of zeros to 1:
+
+ >>> a = np.zeros((2, 2, 2), dtype=int)
+ >>> a[d3] = 1
+ >>> a
+ array([[[1, 0],
+ [0, 0]],
+ [[0, 0],
+ [0, 1]]])
+
+ """
+ idx = arange(n)
+ return (idx,) * ndim
+
+
+def _diag_indices_from(arr):
+ return (arr,)
+
+
+@array_function_dispatch(_diag_indices_from)
+def diag_indices_from(arr):
+ """
+ Return the indices to access the main diagonal of an n-dimensional array.
+
+ See `diag_indices` for full details.
+
+ Parameters
+ ----------
+ arr : array, at least 2-D
+
+ See Also
+ --------
+ diag_indices
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ """
+
+ if not arr.ndim >= 2:
+ raise ValueError("input array must be at least 2-d")
+ # For more than d=2, the strided formula is only valid for arrays with
+ # all dimensions equal, so we check first.
+ if not alltrue(diff(arr.shape) == 0):
+ raise ValueError("All dimensions of input must be of equal length")
+
+ return diag_indices(arr.shape[0], arr.ndim)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/index_tricks.pyi b/venv/lib/python3.9/site-packages/numpy/lib/index_tricks.pyi
new file mode 100644
index 00000000..c9251abd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/index_tricks.pyi
@@ -0,0 +1,162 @@
+from collections.abc import Sequence
+from typing import (
+ Any,
+ TypeVar,
+ Generic,
+ overload,
+ Literal,
+ SupportsIndex,
+)
+
+from numpy import (
+ # Circumvent a naming conflict with `AxisConcatenator.matrix`
+ matrix as _Matrix,
+ ndenumerate as ndenumerate,
+ ndindex as ndindex,
+ ndarray,
+ dtype,
+ integer,
+ str_,
+ bytes_,
+ bool_,
+ int_,
+ float_,
+ complex_,
+ intp,
+ _OrderCF,
+ _ModeKind,
+)
+from numpy._typing import (
+ # Arrays
+ ArrayLike,
+ _NestedSequence,
+ _FiniteNestedSequence,
+ NDArray,
+ _ArrayLikeInt,
+
+ # DTypes
+ DTypeLike,
+ _SupportsDType,
+
+ # Shapes
+ _ShapeLike,
+)
+
+from numpy.core.multiarray import (
+ unravel_index as unravel_index,
+ ravel_multi_index as ravel_multi_index,
+)
+
+_T = TypeVar("_T")
+_DType = TypeVar("_DType", bound=dtype[Any])
+_BoolType = TypeVar("_BoolType", Literal[True], Literal[False])
+_TupType = TypeVar("_TupType", bound=tuple[Any, ...])
+_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
+
+__all__: list[str]
+
+@overload
+def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[Any, _DType], ...]: ...
+@overload
+def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ...
+@overload
+def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]: ...
+@overload
+def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[bool_], ...]: ...
+@overload
+def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]: ...
+@overload
+def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float_], ...]: ...
+@overload
+def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex_], ...]: ...
+
+class nd_grid(Generic[_BoolType]):
+ sparse: _BoolType
+ def __init__(self, sparse: _BoolType = ...) -> None: ...
+ @overload
+ def __getitem__(
+ self: nd_grid[Literal[False]],
+ key: slice | Sequence[slice],
+ ) -> NDArray[Any]: ...
+ @overload
+ def __getitem__(
+ self: nd_grid[Literal[True]],
+ key: slice | Sequence[slice],
+ ) -> list[NDArray[Any]]: ...
+
+class MGridClass(nd_grid[Literal[False]]):
+ def __init__(self) -> None: ...
+
+mgrid: MGridClass
+
+class OGridClass(nd_grid[Literal[True]]):
+ def __init__(self) -> None: ...
+
+ogrid: OGridClass
+
+class AxisConcatenator:
+ axis: int
+ matrix: bool
+ ndmin: int
+ trans1d: int
+ def __init__(
+ self,
+ axis: int = ...,
+ matrix: bool = ...,
+ ndmin: int = ...,
+ trans1d: int = ...,
+ ) -> None: ...
+ @staticmethod
+ @overload
+ def concatenate( # type: ignore[misc]
+ *a: ArrayLike, axis: SupportsIndex = ..., out: None = ...
+ ) -> NDArray[Any]: ...
+ @staticmethod
+ @overload
+ def concatenate(
+ *a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ...
+ ) -> _ArrayType: ...
+ @staticmethod
+ def makemat(
+ data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ...
+ ) -> _Matrix: ...
+
+ # TODO: Sort out this `__getitem__` method
+ def __getitem__(self, key: Any) -> Any: ...
+
+class RClass(AxisConcatenator):
+ axis: Literal[0]
+ matrix: Literal[False]
+ ndmin: Literal[1]
+ trans1d: Literal[-1]
+ def __init__(self) -> None: ...
+
+r_: RClass
+
+class CClass(AxisConcatenator):
+ axis: Literal[-1]
+ matrix: Literal[False]
+ ndmin: Literal[2]
+ trans1d: Literal[0]
+ def __init__(self) -> None: ...
+
+c_: CClass
+
+class IndexExpression(Generic[_BoolType]):
+ maketuple: _BoolType
+ def __init__(self, maketuple: _BoolType) -> None: ...
+ @overload
+ def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc]
+ @overload
+ def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]: ...
+ @overload
+ def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ...
+
+index_exp: IndexExpression[Literal[True]]
+s_: IndexExpression[Literal[False]]
+
+def fill_diagonal(a: ndarray[Any, Any], val: Any, wrap: bool = ...) -> None: ...
+def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]: ...
+def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]: ...
+
+# NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex`
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/mixins.py b/venv/lib/python3.9/site-packages/numpy/lib/mixins.py
new file mode 100644
index 00000000..c81239f6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/mixins.py
@@ -0,0 +1,176 @@
+"""Mixin classes for custom array types that don't inherit from ndarray."""
+from numpy.core import umath as um
+
+
+__all__ = ['NDArrayOperatorsMixin']
+
+
+def _disables_array_ufunc(obj):
+ """True when __array_ufunc__ is set to None."""
+ try:
+ return obj.__array_ufunc__ is None
+ except AttributeError:
+ return False
+
+
+def _binary_method(ufunc, name):
+ """Implement a forward binary method with a ufunc, e.g., __add__."""
+ def func(self, other):
+ if _disables_array_ufunc(other):
+ return NotImplemented
+ return ufunc(self, other)
+ func.__name__ = '__{}__'.format(name)
+ return func
+
+
+def _reflected_binary_method(ufunc, name):
+ """Implement a reflected binary method with a ufunc, e.g., __radd__."""
+ def func(self, other):
+ if _disables_array_ufunc(other):
+ return NotImplemented
+ return ufunc(other, self)
+ func.__name__ = '__r{}__'.format(name)
+ return func
+
+
+def _inplace_binary_method(ufunc, name):
+ """Implement an in-place binary method with a ufunc, e.g., __iadd__."""
+ def func(self, other):
+ return ufunc(self, other, out=(self,))
+ func.__name__ = '__i{}__'.format(name)
+ return func
+
+
+def _numeric_methods(ufunc, name):
+ """Implement forward, reflected and inplace binary methods with a ufunc."""
+ return (_binary_method(ufunc, name),
+ _reflected_binary_method(ufunc, name),
+ _inplace_binary_method(ufunc, name))
+
+
+def _unary_method(ufunc, name):
+ """Implement a unary special method with a ufunc."""
+ def func(self):
+ return ufunc(self)
+ func.__name__ = '__{}__'.format(name)
+ return func
+
+
+class NDArrayOperatorsMixin:
+ """Mixin defining all operator special methods using __array_ufunc__.
+
+ This class implements the special methods for almost all of Python's
+ builtin operators defined in the `operator` module, including comparisons
+ (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by
+ deferring to the ``__array_ufunc__`` method, which subclasses must
+ implement.
+
+ It is useful for writing classes that do not inherit from `numpy.ndarray`,
+ but that should support arithmetic and numpy universal functions like
+ arrays as described in `A Mechanism for Overriding Ufuncs
+ <https://numpy.org/neps/nep-0013-ufunc-overrides.html>`_.
+
+ As an trivial example, consider this implementation of an ``ArrayLike``
+ class that simply wraps a NumPy array and ensures that the result of any
+ arithmetic operation is also an ``ArrayLike`` object::
+
+ class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
+ def __init__(self, value):
+ self.value = np.asarray(value)
+
+ # One might also consider adding the built-in list type to this
+ # list, to support operations like np.add(array_like, list)
+ _HANDLED_TYPES = (np.ndarray, numbers.Number)
+
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ out = kwargs.get('out', ())
+ for x in inputs + out:
+ # Only support operations with instances of _HANDLED_TYPES.
+ # Use ArrayLike instead of type(self) for isinstance to
+ # allow subclasses that don't override __array_ufunc__ to
+ # handle ArrayLike objects.
+ if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
+ return NotImplemented
+
+ # Defer to the implementation of the ufunc on unwrapped values.
+ inputs = tuple(x.value if isinstance(x, ArrayLike) else x
+ for x in inputs)
+ if out:
+ kwargs['out'] = tuple(
+ x.value if isinstance(x, ArrayLike) else x
+ for x in out)
+ result = getattr(ufunc, method)(*inputs, **kwargs)
+
+ if type(result) is tuple:
+ # multiple return values
+ return tuple(type(self)(x) for x in result)
+ elif method == 'at':
+ # no return value
+ return None
+ else:
+ # one return value
+ return type(self)(result)
+
+ def __repr__(self):
+ return '%s(%r)' % (type(self).__name__, self.value)
+
+ In interactions between ``ArrayLike`` objects and numbers or numpy arrays,
+ the result is always another ``ArrayLike``:
+
+ >>> x = ArrayLike([1, 2, 3])
+ >>> x - 1
+ ArrayLike(array([0, 1, 2]))
+ >>> 1 - x
+ ArrayLike(array([ 0, -1, -2]))
+ >>> np.arange(3) - x
+ ArrayLike(array([-1, -1, -1]))
+ >>> x - np.arange(3)
+ ArrayLike(array([1, 1, 1]))
+
+ Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations
+ with arbitrary, unrecognized types. This ensures that interactions with
+ ArrayLike preserve a well-defined casting hierarchy.
+
+ .. versionadded:: 1.13
+ """
+ # Like np.ndarray, this mixin class implements "Option 1" from the ufunc
+ # overrides NEP.
+
+ # comparisons don't have reflected and in-place versions
+ __lt__ = _binary_method(um.less, 'lt')
+ __le__ = _binary_method(um.less_equal, 'le')
+ __eq__ = _binary_method(um.equal, 'eq')
+ __ne__ = _binary_method(um.not_equal, 'ne')
+ __gt__ = _binary_method(um.greater, 'gt')
+ __ge__ = _binary_method(um.greater_equal, 'ge')
+
+ # numeric methods
+ __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add')
+ __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub')
+ __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul')
+ __matmul__, __rmatmul__, __imatmul__ = _numeric_methods(
+ um.matmul, 'matmul')
+ # Python 3 does not use __div__, __rdiv__, or __idiv__
+ __truediv__, __rtruediv__, __itruediv__ = _numeric_methods(
+ um.true_divide, 'truediv')
+ __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(
+ um.floor_divide, 'floordiv')
+ __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod')
+ __divmod__ = _binary_method(um.divmod, 'divmod')
+ __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod')
+ # __idivmod__ does not exist
+ # TODO: handle the optional third argument for __pow__?
+ __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow')
+ __lshift__, __rlshift__, __ilshift__ = _numeric_methods(
+ um.left_shift, 'lshift')
+ __rshift__, __rrshift__, __irshift__ = _numeric_methods(
+ um.right_shift, 'rshift')
+ __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and')
+ __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor')
+ __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or')
+
+ # unary methods
+ __neg__ = _unary_method(um.negative, 'neg')
+ __pos__ = _unary_method(um.positive, 'pos')
+ __abs__ = _unary_method(um.absolute, 'abs')
+ __invert__ = _unary_method(um.invert, 'invert')
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/mixins.pyi b/venv/lib/python3.9/site-packages/numpy/lib/mixins.pyi
new file mode 100644
index 00000000..c5744213
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/mixins.pyi
@@ -0,0 +1,74 @@
+from abc import ABCMeta, abstractmethod
+from typing import Literal as L, Any
+
+from numpy import ufunc
+
+__all__: list[str]
+
+# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass,
+# even though it's reliant on subclasses implementing `__array_ufunc__`
+
+# NOTE: The accepted input- and output-types of the various dunders are
+# completely dependent on how `__array_ufunc__` is implemented.
+# As such, only little type safety can be provided here.
+
+class NDArrayOperatorsMixin(metaclass=ABCMeta):
+ @abstractmethod
+ def __array_ufunc__(
+ self,
+ ufunc: ufunc,
+ method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"],
+ *inputs: Any,
+ **kwargs: Any,
+ ) -> Any: ...
+ def __lt__(self, other: Any) -> Any: ...
+ def __le__(self, other: Any) -> Any: ...
+ def __eq__(self, other: Any) -> Any: ...
+ def __ne__(self, other: Any) -> Any: ...
+ def __gt__(self, other: Any) -> Any: ...
+ def __ge__(self, other: Any) -> Any: ...
+ def __add__(self, other: Any) -> Any: ...
+ def __radd__(self, other: Any) -> Any: ...
+ def __iadd__(self, other: Any) -> Any: ...
+ def __sub__(self, other: Any) -> Any: ...
+ def __rsub__(self, other: Any) -> Any: ...
+ def __isub__(self, other: Any) -> Any: ...
+ def __mul__(self, other: Any) -> Any: ...
+ def __rmul__(self, other: Any) -> Any: ...
+ def __imul__(self, other: Any) -> Any: ...
+ def __matmul__(self, other: Any) -> Any: ...
+ def __rmatmul__(self, other: Any) -> Any: ...
+ def __imatmul__(self, other: Any) -> Any: ...
+ def __truediv__(self, other: Any) -> Any: ...
+ def __rtruediv__(self, other: Any) -> Any: ...
+ def __itruediv__(self, other: Any) -> Any: ...
+ def __floordiv__(self, other: Any) -> Any: ...
+ def __rfloordiv__(self, other: Any) -> Any: ...
+ def __ifloordiv__(self, other: Any) -> Any: ...
+ def __mod__(self, other: Any) -> Any: ...
+ def __rmod__(self, other: Any) -> Any: ...
+ def __imod__(self, other: Any) -> Any: ...
+ def __divmod__(self, other: Any) -> Any: ...
+ def __rdivmod__(self, other: Any) -> Any: ...
+ def __pow__(self, other: Any) -> Any: ...
+ def __rpow__(self, other: Any) -> Any: ...
+ def __ipow__(self, other: Any) -> Any: ...
+ def __lshift__(self, other: Any) -> Any: ...
+ def __rlshift__(self, other: Any) -> Any: ...
+ def __ilshift__(self, other: Any) -> Any: ...
+ def __rshift__(self, other: Any) -> Any: ...
+ def __rrshift__(self, other: Any) -> Any: ...
+ def __irshift__(self, other: Any) -> Any: ...
+ def __and__(self, other: Any) -> Any: ...
+ def __rand__(self, other: Any) -> Any: ...
+ def __iand__(self, other: Any) -> Any: ...
+ def __xor__(self, other: Any) -> Any: ...
+ def __rxor__(self, other: Any) -> Any: ...
+ def __ixor__(self, other: Any) -> Any: ...
+ def __or__(self, other: Any) -> Any: ...
+ def __ror__(self, other: Any) -> Any: ...
+ def __ior__(self, other: Any) -> Any: ...
+ def __neg__(self) -> Any: ...
+ def __pos__(self) -> Any: ...
+ def __abs__(self) -> Any: ...
+ def __invert__(self) -> Any: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/nanfunctions.py b/venv/lib/python3.9/site-packages/numpy/lib/nanfunctions.py
new file mode 100644
index 00000000..ae2dfa16
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/nanfunctions.py
@@ -0,0 +1,1880 @@
+"""
+Functions that ignore NaN.
+
+Functions
+---------
+
+- `nanmin` -- minimum non-NaN value
+- `nanmax` -- maximum non-NaN value
+- `nanargmin` -- index of minimum non-NaN value
+- `nanargmax` -- index of maximum non-NaN value
+- `nansum` -- sum of non-NaN values
+- `nanprod` -- product of non-NaN values
+- `nancumsum` -- cumulative sum of non-NaN values
+- `nancumprod` -- cumulative product of non-NaN values
+- `nanmean` -- mean of non-NaN values
+- `nanvar` -- variance of non-NaN values
+- `nanstd` -- standard deviation of non-NaN values
+- `nanmedian` -- median of non-NaN values
+- `nanquantile` -- qth quantile of non-NaN values
+- `nanpercentile` -- qth percentile of non-NaN values
+
+"""
+import functools
+import warnings
+import numpy as np
+from numpy.lib import function_base
+from numpy.core import overrides
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+__all__ = [
+ 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
+ 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
+ 'nancumsum', 'nancumprod', 'nanquantile'
+ ]
+
+
+def _nan_mask(a, out=None):
+ """
+ Parameters
+ ----------
+ a : array-like
+ Input array with at least 1 dimension.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output and will prevent the allocation of a new array.
+
+ Returns
+ -------
+ y : bool ndarray or True
+ A bool array where ``np.nan`` positions are marked with ``False``
+ and other positions are marked with ``True``. If the type of ``a``
+ is such that it can't possibly contain ``np.nan``, returns ``True``.
+ """
+ # we assume that a is an array for this private function
+
+ if a.dtype.kind not in 'fc':
+ return True
+
+ y = np.isnan(a, out=out)
+ y = np.invert(y, out=y)
+ return y
+
+def _replace_nan(a, val):
+ """
+ If `a` is of inexact type, make a copy of `a`, replace NaNs with
+ the `val` value, and return the copy together with a boolean mask
+ marking the locations where NaNs were present. If `a` is not of
+ inexact type, do nothing and return `a` together with a mask of None.
+
+ Note that scalars will end up as array scalars, which is important
+ for using the result as the value of the out argument in some
+ operations.
+
+ Parameters
+ ----------
+ a : array-like
+ Input array.
+ val : float
+ NaN values are set to val before doing the operation.
+
+ Returns
+ -------
+ y : ndarray
+ If `a` is of inexact type, return a copy of `a` with the NaNs
+ replaced by the fill value, otherwise return `a`.
+ mask: {bool, None}
+ If `a` is of inexact type, return a boolean mask marking locations of
+ NaNs, otherwise return None.
+
+ """
+ a = np.asanyarray(a)
+
+ if a.dtype == np.object_:
+ # object arrays do not support `isnan` (gh-9009), so make a guess
+ mask = np.not_equal(a, a, dtype=bool)
+ elif issubclass(a.dtype.type, np.inexact):
+ mask = np.isnan(a)
+ else:
+ mask = None
+
+ if mask is not None:
+ a = np.array(a, subok=True, copy=True)
+ np.copyto(a, val, where=mask)
+
+ return a, mask
+
+
+def _copyto(a, val, mask):
+ """
+ Replace values in `a` with NaN where `mask` is True. This differs from
+ copyto in that it will deal with the case where `a` is a numpy scalar.
+
+ Parameters
+ ----------
+ a : ndarray or numpy scalar
+ Array or numpy scalar some of whose values are to be replaced
+ by val.
+ val : numpy scalar
+ Value used a replacement.
+ mask : ndarray, scalar
+ Boolean array. Where True the corresponding element of `a` is
+ replaced by `val`. Broadcasts.
+
+ Returns
+ -------
+ res : ndarray, scalar
+ Array with elements replaced or scalar `val`.
+
+ """
+ if isinstance(a, np.ndarray):
+ np.copyto(a, val, where=mask, casting='unsafe')
+ else:
+ a = a.dtype.type(val)
+ return a
+
+
+def _remove_nan_1d(arr1d, overwrite_input=False):
+ """
+ Equivalent to arr1d[~arr1d.isnan()], but in a different order
+
+ Presumably faster as it incurs fewer copies
+
+ Parameters
+ ----------
+ arr1d : ndarray
+ Array to remove nans from
+ overwrite_input : bool
+ True if `arr1d` can be modified in place
+
+ Returns
+ -------
+ res : ndarray
+ Array with nan elements removed
+ overwrite_input : bool
+ True if `res` can be modified in place, given the constraint on the
+ input
+ """
+ if arr1d.dtype == object:
+ # object arrays do not support `isnan` (gh-9009), so make a guess
+ c = np.not_equal(arr1d, arr1d, dtype=bool)
+ else:
+ c = np.isnan(arr1d)
+
+ s = np.nonzero(c)[0]
+ if s.size == arr1d.size:
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=5)
+ return arr1d[:0], True
+ elif s.size == 0:
+ return arr1d, overwrite_input
+ else:
+ if not overwrite_input:
+ arr1d = arr1d.copy()
+ # select non-nans at end of array
+ enonan = arr1d[-s.size:][~c[-s.size:]]
+ # fill nans in beginning of array with non-nans of end
+ arr1d[s[:enonan.size]] = enonan
+
+ return arr1d[:-s.size], True
+
+
+def _divide_by_count(a, b, out=None):
+ """
+ Compute a/b ignoring invalid results. If `a` is an array the division
+ is done in place. If `a` is a scalar, then its type is preserved in the
+ output. If out is None, then a is used instead so that the division
+ is in place. Note that this is only called with `a` an inexact type.
+
+ Parameters
+ ----------
+ a : {ndarray, numpy scalar}
+ Numerator. Expected to be of inexact type but not checked.
+ b : {ndarray, numpy scalar}
+ Denominator.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary.
+
+ Returns
+ -------
+ ret : {ndarray, numpy scalar}
+ The return value is a/b. If `a` was an ndarray the division is done
+ in place. If `a` is a numpy scalar, the division preserves its type.
+
+ """
+ with np.errstate(invalid='ignore', divide='ignore'):
+ if isinstance(a, np.ndarray):
+ if out is None:
+ return np.divide(a, b, out=a, casting='unsafe')
+ else:
+ return np.divide(a, b, out=out, casting='unsafe')
+ else:
+ if out is None:
+ # Precaution against reduced object arrays
+ try:
+ return a.dtype.type(a / b)
+ except AttributeError:
+ return a / b
+ else:
+ # This is questionable, but currently a numpy scalar can
+ # be output to a zero dimensional array.
+ return np.divide(a, b, out=out, casting='unsafe')
+
+
+def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmin_dispatcher)
+def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
+ """
+ Return minimum of an array or minimum along an axis, ignoring any NaNs.
+ When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
+ Nan is returned for that slice.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose minimum is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the minimum is computed. The default is to compute
+ the minimum of the flattened array.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary. See
+ :ref:`ufuncs-output-type` for more details.
+
+ .. versionadded:: 1.8.0
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ If the value is anything but the default, then
+ `keepdims` will be passed through to the `min` method
+ of sub-classes of `ndarray`. If the sub-classes methods
+ does not implement `keepdims` any exceptions will be raised.
+
+ .. versionadded:: 1.8.0
+ initial : scalar, optional
+ The maximum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+ where : array_like of bool, optional
+ Elements to compare for the minimum. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ nanmin : ndarray
+ An array with the same shape as `a`, with the specified axis
+ removed. If `a` is a 0-d array, or if axis is None, an ndarray
+ scalar is returned. The same dtype as `a` is returned.
+
+ See Also
+ --------
+ nanmax :
+ The maximum value of an array along a given axis, ignoring any NaNs.
+ amin :
+ The minimum value of an array along a given axis, propagating any NaNs.
+ fmin :
+ Element-wise minimum of two arrays, ignoring any NaNs.
+ minimum :
+ Element-wise minimum of two arrays, propagating any NaNs.
+ isnan :
+ Shows which elements are Not a Number (NaN).
+ isfinite:
+ Shows which elements are neither NaN nor infinity.
+
+ amax, fmax, maximum
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
+ Positive infinity is treated as a very large number and negative
+ infinity is treated as a very small (i.e. negative) number.
+
+ If the input has a integer type the function is equivalent to np.min.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, np.nan]])
+ >>> np.nanmin(a)
+ 1.0
+ >>> np.nanmin(a, axis=0)
+ array([1., 2.])
+ >>> np.nanmin(a, axis=1)
+ array([1., 3.])
+
+ When positive infinity and negative infinity are present:
+
+ >>> np.nanmin([1, 2, np.nan, np.inf])
+ 1.0
+ >>> np.nanmin([1, 2, np.nan, np.NINF])
+ -inf
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ if initial is not np._NoValue:
+ kwargs['initial'] = initial
+ if where is not np._NoValue:
+ kwargs['where'] = where
+
+ if type(a) is np.ndarray and a.dtype != np.object_:
+ # Fast, but not safe for subclasses of ndarray, or object arrays,
+ # which do not implement isnan (gh-9009), or fmin correctly (gh-8975)
+ res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
+ if np.isnan(res).any():
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=3)
+ else:
+ # Slow, but safe for subclasses of ndarray
+ a, mask = _replace_nan(a, +np.inf)
+ res = np.amin(a, axis=axis, out=out, **kwargs)
+ if mask is None:
+ return res
+
+ # Check for all-NaN axis
+ kwargs.pop("initial", None)
+ mask = np.all(mask, axis=axis, **kwargs)
+ if np.any(mask):
+ res = _copyto(res, np.nan, mask)
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
+ stacklevel=3)
+ return res
+
+
+def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmax_dispatcher)
+def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
+ """
+ Return the maximum of an array or maximum along an axis, ignoring any
+ NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
+ raised and NaN is returned for that slice.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose maximum is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the maximum is computed. The default is to compute
+ the maximum of the flattened array.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary. See
+ :ref:`ufuncs-output-type` for more details.
+
+ .. versionadded:: 1.8.0
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ If the value is anything but the default, then
+ `keepdims` will be passed through to the `max` method
+ of sub-classes of `ndarray`. If the sub-classes methods
+ does not implement `keepdims` any exceptions will be raised.
+
+ .. versionadded:: 1.8.0
+ initial : scalar, optional
+ The minimum value of an output element. Must be present to allow
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+ where : array_like of bool, optional
+ Elements to compare for the maximum. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ nanmax : ndarray
+ An array with the same shape as `a`, with the specified axis removed.
+ If `a` is a 0-d array, or if axis is None, an ndarray scalar is
+ returned. The same dtype as `a` is returned.
+
+ See Also
+ --------
+ nanmin :
+ The minimum value of an array along a given axis, ignoring any NaNs.
+ amax :
+ The maximum value of an array along a given axis, propagating any NaNs.
+ fmax :
+ Element-wise maximum of two arrays, ignoring any NaNs.
+ maximum :
+ Element-wise maximum of two arrays, propagating any NaNs.
+ isnan :
+ Shows which elements are Not a Number (NaN).
+ isfinite:
+ Shows which elements are neither NaN nor infinity.
+
+ amin, fmin, minimum
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
+ Positive infinity is treated as a very large number and negative
+ infinity is treated as a very small (i.e. negative) number.
+
+ If the input has a integer type the function is equivalent to np.max.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, np.nan]])
+ >>> np.nanmax(a)
+ 3.0
+ >>> np.nanmax(a, axis=0)
+ array([3., 2.])
+ >>> np.nanmax(a, axis=1)
+ array([2., 3.])
+
+ When positive infinity and negative infinity are present:
+
+ >>> np.nanmax([1, 2, np.nan, np.NINF])
+ 2.0
+ >>> np.nanmax([1, 2, np.nan, np.inf])
+ inf
+
+ """
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ if initial is not np._NoValue:
+ kwargs['initial'] = initial
+ if where is not np._NoValue:
+ kwargs['where'] = where
+
+ if type(a) is np.ndarray and a.dtype != np.object_:
+ # Fast, but not safe for subclasses of ndarray, or object arrays,
+ # which do not implement isnan (gh-9009), or fmax correctly (gh-8975)
+ res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
+ if np.isnan(res).any():
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=3)
+ else:
+ # Slow, but safe for subclasses of ndarray
+ a, mask = _replace_nan(a, -np.inf)
+ res = np.amax(a, axis=axis, out=out, **kwargs)
+ if mask is None:
+ return res
+
+ # Check for all-NaN axis
+ kwargs.pop("initial", None)
+ mask = np.all(mask, axis=axis, **kwargs)
+ if np.any(mask):
+ res = _copyto(res, np.nan, mask)
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
+ stacklevel=3)
+ return res
+
+
+def _nanargmin_dispatcher(a, axis=None, out=None, *, keepdims=None):
+ return (a,)
+
+
+@array_function_dispatch(_nanargmin_dispatcher)
+def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue):
+ """
+ Return the indices of the minimum values in the specified axis ignoring
+ NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results
+ cannot be trusted if a slice contains only NaNs and Infs.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : int, optional
+ Axis along which to operate. By default flattened input is used.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
+
+ .. versionadded:: 1.22.0
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ index_array : ndarray
+ An array of indices or a single index value.
+
+ See Also
+ --------
+ argmin, nanargmax
+
+ Examples
+ --------
+ >>> a = np.array([[np.nan, 4], [2, 3]])
+ >>> np.argmin(a)
+ 0
+ >>> np.nanargmin(a)
+ 2
+ >>> np.nanargmin(a, axis=0)
+ array([1, 1])
+ >>> np.nanargmin(a, axis=1)
+ array([1, 0])
+
+ """
+ a, mask = _replace_nan(a, np.inf)
+ if mask is not None:
+ mask = np.all(mask, axis=axis)
+ if np.any(mask):
+ raise ValueError("All-NaN slice encountered")
+ res = np.argmin(a, axis=axis, out=out, keepdims=keepdims)
+ return res
+
+
+def _nanargmax_dispatcher(a, axis=None, out=None, *, keepdims=None):
+ return (a,)
+
+
+@array_function_dispatch(_nanargmax_dispatcher)
+def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue):
+ """
+ Return the indices of the maximum values in the specified axis ignoring
+ NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the
+ results cannot be trusted if a slice contains only NaNs and -Infs.
+
+
+ Parameters
+ ----------
+ a : array_like
+ Input data.
+ axis : int, optional
+ Axis along which to operate. By default flattened input is used.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
+
+ .. versionadded:: 1.22.0
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ index_array : ndarray
+ An array of indices or a single index value.
+
+ See Also
+ --------
+ argmax, nanargmin
+
+ Examples
+ --------
+ >>> a = np.array([[np.nan, 4], [2, 3]])
+ >>> np.argmax(a)
+ 0
+ >>> np.nanargmax(a)
+ 1
+ >>> np.nanargmax(a, axis=0)
+ array([1, 0])
+ >>> np.nanargmax(a, axis=1)
+ array([1, 1])
+
+ """
+ a, mask = _replace_nan(a, -np.inf)
+ if mask is not None:
+ mask = np.all(mask, axis=axis)
+ if np.any(mask):
+ raise ValueError("All-NaN slice encountered")
+ res = np.argmax(a, axis=axis, out=out, keepdims=keepdims)
+ return res
+
+
+def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nansum_dispatcher)
+def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ initial=np._NoValue, where=np._NoValue):
+ """
+ Return the sum of array elements over a given axis treating Not a
+ Numbers (NaNs) as zero.
+
+ In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or
+ empty. In later versions zero is returned.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose sum is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the sum is computed. The default is to compute the
+ sum of the flattened array.
+ dtype : data-type, optional
+ The type of the returned array and of the accumulator in which the
+ elements are summed. By default, the dtype of `a` is used. An
+ exception is when `a` has an integer type with less precision than
+ the platform (u)intp. In that case, the default will be either
+ (u)int32 or (u)int64 depending on whether the platform is 32 or 64
+ bits. For inexact inputs, dtype must be inexact.
+
+ .. versionadded:: 1.8.0
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``. If provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary. See
+ :ref:`ufuncs-output-type` for more details. The casting of NaN to integer
+ can yield unexpected results.
+
+ .. versionadded:: 1.8.0
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+
+ If the value is anything but the default, then
+ `keepdims` will be passed through to the `mean` or `sum` methods
+ of sub-classes of `ndarray`. If the sub-classes methods
+ does not implement `keepdims` any exceptions will be raised.
+
+ .. versionadded:: 1.8.0
+ initial : scalar, optional
+ Starting value for the sum. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+ where : array_like of bool, optional
+ Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ nansum : ndarray.
+ A new array holding the result is returned unless `out` is
+ specified, in which it is returned. The result has the same
+ size as `a`, and the same shape as `a` if `axis` is not None
+ or `a` is a 1-d array.
+
+ See Also
+ --------
+ numpy.sum : Sum across array propagating NaNs.
+ isnan : Show which elements are NaN.
+ isfinite : Show which elements are not NaN or +/-inf.
+
+ Notes
+ -----
+ If both positive and negative infinity are present, the sum will be Not
+ A Number (NaN).
+
+ Examples
+ --------
+ >>> np.nansum(1)
+ 1
+ >>> np.nansum([1])
+ 1
+ >>> np.nansum([1, np.nan])
+ 1.0
+ >>> a = np.array([[1, 1], [1, np.nan]])
+ >>> np.nansum(a)
+ 3.0
+ >>> np.nansum(a, axis=0)
+ array([2., 1.])
+ >>> np.nansum([1, np.nan, np.inf])
+ inf
+ >>> np.nansum([1, np.nan, np.NINF])
+ -inf
+ >>> from numpy.testing import suppress_warnings
+ >>> with suppress_warnings() as sup:
+ ... sup.filter(RuntimeWarning)
+ ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
+ nan
+
+ """
+ a, mask = _replace_nan(a, 0)
+ return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ initial=initial, where=where)
+
+
+def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanprod_dispatcher)
+def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ initial=np._NoValue, where=np._NoValue):
+ """
+ Return the product of array elements over a given axis treating Not a
+ Numbers (NaNs) as ones.
+
+ One is returned for slices that are all-NaN or empty.
+
+ .. versionadded:: 1.10.0
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose product is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the product is computed. The default is to compute
+ the product of the flattened array.
+ dtype : data-type, optional
+ The type of the returned array and of the accumulator in which the
+ elements are summed. By default, the dtype of `a` is used. An
+ exception is when `a` has an integer type with less precision than
+ the platform (u)intp. In that case, the default will be either
+ (u)int32 or (u)int64 depending on whether the platform is 32 or 64
+ bits. For inexact inputs, dtype must be inexact.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``. If provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary. See
+ :ref:`ufuncs-output-type` for more details. The casting of NaN to integer
+ can yield unexpected results.
+ keepdims : bool, optional
+ If True, the axes which are reduced are left in the result as
+ dimensions with size one. With this option, the result will
+ broadcast correctly against the original `arr`.
+ initial : scalar, optional
+ The starting value for this product. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.22.0
+ where : array_like of bool, optional
+ Elements to include in the product. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ nanprod : ndarray
+ A new array holding the result is returned unless `out` is
+ specified, in which case it is returned.
+
+ See Also
+ --------
+ numpy.prod : Product across array propagating NaNs.
+ isnan : Show which elements are NaN.
+
+ Examples
+ --------
+ >>> np.nanprod(1)
+ 1
+ >>> np.nanprod([1])
+ 1
+ >>> np.nanprod([1, np.nan])
+ 1.0
+ >>> a = np.array([[1, 2], [3, np.nan]])
+ >>> np.nanprod(a)
+ 6.0
+ >>> np.nanprod(a, axis=0)
+ array([3., 2.])
+
+ """
+ a, mask = _replace_nan(a, 1)
+ return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ initial=initial, where=where)
+
+
+def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nancumsum_dispatcher)
+def nancumsum(a, axis=None, dtype=None, out=None):
+ """
+ Return the cumulative sum of array elements over a given axis treating Not a
+ Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are
+ encountered and leading NaNs are replaced by zeros.
+
+ Zeros are returned for slices that are all-NaN or empty.
+
+ .. versionadded:: 1.12.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ Axis along which the cumulative sum is computed. The default
+ (None) is to compute the cumsum over the flattened array.
+ dtype : dtype, optional
+ Type of the returned array and of the accumulator in which the
+ elements are summed. If `dtype` is not specified, it defaults
+ to the dtype of `a`, unless `a` has an integer dtype with a
+ precision less than that of the default platform integer. In
+ that case, the default platform integer is used.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
+ more details.
+
+ Returns
+ -------
+ nancumsum : ndarray.
+ A new array holding the result is returned unless `out` is
+ specified, in which it is returned. The result has the same
+ size as `a`, and the same shape as `a` if `axis` is not None
+ or `a` is a 1-d array.
+
+ See Also
+ --------
+ numpy.cumsum : Cumulative sum across array propagating NaNs.
+ isnan : Show which elements are NaN.
+
+ Examples
+ --------
+ >>> np.nancumsum(1)
+ array([1])
+ >>> np.nancumsum([1])
+ array([1])
+ >>> np.nancumsum([1, np.nan])
+ array([1., 1.])
+ >>> a = np.array([[1, 2], [3, np.nan]])
+ >>> np.nancumsum(a)
+ array([1., 3., 6., 6.])
+ >>> np.nancumsum(a, axis=0)
+ array([[1., 2.],
+ [4., 2.]])
+ >>> np.nancumsum(a, axis=1)
+ array([[1., 3.],
+ [3., 3.]])
+
+ """
+ a, mask = _replace_nan(a, 0)
+ return np.cumsum(a, axis=axis, dtype=dtype, out=out)
+
+
+def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nancumprod_dispatcher)
+def nancumprod(a, axis=None, dtype=None, out=None):
+ """
+ Return the cumulative product of array elements over a given axis treating Not a
+ Numbers (NaNs) as one. The cumulative product does not change when NaNs are
+ encountered and leading NaNs are replaced by ones.
+
+ Ones are returned for slices that are all-NaN or empty.
+
+ .. versionadded:: 1.12.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int, optional
+ Axis along which the cumulative product is computed. By default
+ the input is flattened.
+ dtype : dtype, optional
+ Type of the returned array, as well as of the accumulator in which
+ the elements are multiplied. If *dtype* is not specified, it
+ defaults to the dtype of `a`, unless `a` has an integer dtype with
+ a precision less than that of the default platform integer. In
+ that case, the default platform integer is used instead.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type of the resulting values will be cast if necessary.
+
+ Returns
+ -------
+ nancumprod : ndarray
+ A new array holding the result is returned unless `out` is
+ specified, in which case it is returned.
+
+ See Also
+ --------
+ numpy.cumprod : Cumulative product across array propagating NaNs.
+ isnan : Show which elements are NaN.
+
+ Examples
+ --------
+ >>> np.nancumprod(1)
+ array([1])
+ >>> np.nancumprod([1])
+ array([1])
+ >>> np.nancumprod([1, np.nan])
+ array([1., 1.])
+ >>> a = np.array([[1, 2], [3, np.nan]])
+ >>> np.nancumprod(a)
+ array([1., 2., 6., 6.])
+ >>> np.nancumprod(a, axis=0)
+ array([[1., 2.],
+ [3., 2.]])
+ >>> np.nancumprod(a, axis=1)
+ array([[1., 2.],
+ [3., 3.]])
+
+ """
+ a, mask = _replace_nan(a, 1)
+ return np.cumprod(a, axis=axis, dtype=dtype, out=out)
+
+
+def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ *, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmean_dispatcher)
+def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ *, where=np._NoValue):
+ """
+ Compute the arithmetic mean along the specified axis, ignoring NaNs.
+
+ Returns the average of the array elements. The average is taken over
+ the flattened array by default, otherwise over the specified axis.
+ `float64` intermediate and return values are used for integer inputs.
+
+ For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose mean is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the means are computed. The default is to compute
+ the mean of the flattened array.
+ dtype : data-type, optional
+ Type to use in computing the mean. For integer inputs, the default
+ is `float64`; for inexact inputs, it is the same as the input
+ dtype.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output, but the type will be cast if necessary. See
+ :ref:`ufuncs-output-type` for more details.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ If the value is anything but the default, then
+ `keepdims` will be passed through to the `mean` or `sum` methods
+ of sub-classes of `ndarray`. If the sub-classes methods
+ does not implement `keepdims` any exceptions will be raised.
+ where : array_like of bool, optional
+ Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ m : ndarray, see dtype parameter above
+ If `out=None`, returns a new array containing the mean values,
+ otherwise a reference to the output array is returned. Nan is
+ returned for slices that contain only NaNs.
+
+ See Also
+ --------
+ average : Weighted average
+ mean : Arithmetic mean taken while not ignoring NaNs
+ var, nanvar
+
+ Notes
+ -----
+ The arithmetic mean is the sum of the non-NaN elements along the axis
+ divided by the number of non-NaN elements.
+
+ Note that for floating-point input, the mean is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for `float32`. Specifying a
+ higher-precision accumulator using the `dtype` keyword can alleviate
+ this issue.
+
+ Examples
+ --------
+ >>> a = np.array([[1, np.nan], [3, 4]])
+ >>> np.nanmean(a)
+ 2.6666666666666665
+ >>> np.nanmean(a, axis=0)
+ array([2., 4.])
+ >>> np.nanmean(a, axis=1)
+ array([1., 3.5]) # may vary
+
+ """
+ arr, mask = _replace_nan(a, 0)
+ if mask is None:
+ return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ where=where)
+
+ if dtype is not None:
+ dtype = np.dtype(dtype)
+ if dtype is not None and not issubclass(dtype.type, np.inexact):
+ raise TypeError("If a is inexact, then dtype must be inexact")
+ if out is not None and not issubclass(out.dtype.type, np.inexact):
+ raise TypeError("If a is inexact, then out must be inexact")
+
+ cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims,
+ where=where)
+ tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ where=where)
+ avg = _divide_by_count(tot, cnt, out=out)
+
+ isbad = (cnt == 0)
+ if isbad.any():
+ warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=3)
+ # NaN is the only possible bad value, so no further
+ # action is needed to handle bad results.
+ return avg
+
+
+def _nanmedian1d(arr1d, overwrite_input=False):
+ """
+ Private function for rank 1 arrays. Compute the median ignoring NaNs.
+ See nanmedian for parameter usage
+ """
+ arr1d_parsed, overwrite_input = _remove_nan_1d(
+ arr1d, overwrite_input=overwrite_input,
+ )
+
+ if arr1d_parsed.size == 0:
+ # Ensure that a nan-esque scalar of the appropriate type (and unit)
+ # is returned for `timedelta64` and `complexfloating`
+ return arr1d[-1]
+
+ return np.median(arr1d_parsed, overwrite_input=overwrite_input)
+
+
+def _nanmedian(a, axis=None, out=None, overwrite_input=False):
+ """
+ Private function that doesn't support extended axis or keepdims.
+ These methods are extended to this function using _ureduce
+ See nanmedian for parameter usage
+
+ """
+ if axis is None or a.ndim == 1:
+ part = a.ravel()
+ if out is None:
+ return _nanmedian1d(part, overwrite_input)
+ else:
+ out[...] = _nanmedian1d(part, overwrite_input)
+ return out
+ else:
+ # for small medians use sort + indexing which is still faster than
+ # apply_along_axis
+ # benchmarked with shuffled (50, 50, x) containing a few NaN
+ if a.shape[axis] < 600:
+ return _nanmedian_small(a, axis, out, overwrite_input)
+ result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)
+ if out is not None:
+ out[...] = result
+ return result
+
+
+def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
+ """
+ sort + indexing median, faster for small medians along multiple
+ dimensions due to the high overhead of apply_along_axis
+
+ see nanmedian for parameter usage
+ """
+ a = np.ma.masked_array(a, np.isnan(a))
+ m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
+ for i in range(np.count_nonzero(m.mask.ravel())):
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=4)
+
+ fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan
+ if out is not None:
+ out[...] = m.filled(fill_value)
+ return out
+ return m.filled(fill_value)
+
+
+def _nanmedian_dispatcher(
+ a, axis=None, out=None, overwrite_input=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmedian_dispatcher)
+def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
+ """
+ Compute the median along the specified axis, while ignoring NaNs.
+
+ Returns the median of the array elements.
+
+ .. versionadded:: 1.9.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ axis : {int, sequence of int, None}, optional
+ Axis or axes along which the medians are computed. The default
+ is to compute the median along a flattened version of the array.
+ A sequence of axes is supported since version 1.9.0.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow use of memory of input array `a` for
+ calculations. The input array will be modified by the call to
+ `median`. This will save memory when you do not need to preserve
+ the contents of the input array. Treat the input as undefined,
+ but it will probably be fully or partially sorted. Default is
+ False. If `overwrite_input` is ``True`` and `a` is not already an
+ `ndarray`, an error will be raised.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ If this is anything but the default value it will be passed
+ through (in the special case of an empty array) to the
+ `mean` function of the underlying array. If the array is
+ a sub-class and `mean` does not have the kwarg `keepdims` this
+ will raise a RuntimeError.
+
+ Returns
+ -------
+ median : ndarray
+ A new array holding the result. If the input contains integers
+ or floats smaller than ``float64``, then the output data-type is
+ ``np.float64``. Otherwise, the data-type of the output is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ mean, median, percentile
+
+ Notes
+ -----
+ Given a vector ``V`` of length ``N``, the median of ``V`` is the
+ middle value of a sorted copy of ``V``, ``V_sorted`` - i.e.,
+ ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two
+ middle values of ``V_sorted`` when ``N`` is even.
+
+ Examples
+ --------
+ >>> a = np.array([[10.0, 7, 4], [3, 2, 1]])
+ >>> a[0, 1] = np.nan
+ >>> a
+ array([[10., nan, 4.],
+ [ 3., 2., 1.]])
+ >>> np.median(a)
+ nan
+ >>> np.nanmedian(a)
+ 3.0
+ >>> np.nanmedian(a, axis=0)
+ array([6.5, 2. , 2.5])
+ >>> np.median(a, axis=1)
+ array([nan, 2.])
+ >>> b = a.copy()
+ >>> np.nanmedian(b, axis=1, overwrite_input=True)
+ array([7., 2.])
+ >>> assert not np.all(a==b)
+ >>> b = a.copy()
+ >>> np.nanmedian(b, axis=None, overwrite_input=True)
+ 3.0
+ >>> assert not np.all(a==b)
+
+ """
+ a = np.asanyarray(a)
+ # apply_along_axis in _nanmedian doesn't handle empty arrays well,
+ # so deal them upfront
+ if a.size == 0:
+ return np.nanmean(a, axis, out=out, keepdims=keepdims)
+
+ return function_base._ureduce(a, func=_nanmedian, keepdims=keepdims,
+ axis=axis, out=out,
+ overwrite_input=overwrite_input)
+
+
+def _nanpercentile_dispatcher(
+ a, q, axis=None, out=None, overwrite_input=None,
+ method=None, keepdims=None, *, interpolation=None):
+ return (a, q, out)
+
+
+@array_function_dispatch(_nanpercentile_dispatcher)
+def nanpercentile(
+ a,
+ q,
+ axis=None,
+ out=None,
+ overwrite_input=False,
+ method="linear",
+ keepdims=np._NoValue,
+ *,
+ interpolation=None,
+):
+ """
+ Compute the qth percentile of the data along the specified axis,
+ while ignoring nan values.
+
+ Returns the qth percentile(s) of the array elements.
+
+ .. versionadded:: 1.9.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array, containing
+ nan values to be ignored.
+ q : array_like of float
+ Percentile or sequence of percentiles to compute, which must be
+ between 0 and 100 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the percentiles are computed. The default
+ is to compute the percentile(s) along a flattened version of the
+ array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape and buffer length as the expected output, but the
+ type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by
+ intermediate calculations, to save memory. In this case, the
+ contents of the input `a` after this function completes is
+ undefined.
+ method : str, optional
+ This parameter specifies the method to use for estimating the
+ percentile. There are many different methods, some unique to NumPy.
+ See the notes for explanation. The options sorted by their R type
+ as summarized in the H&F paper [1]_ are:
+
+ 1. 'inverted_cdf'
+ 2. 'averaged_inverted_cdf'
+ 3. 'closest_observation'
+ 4. 'interpolated_inverted_cdf'
+ 5. 'hazen'
+ 6. 'weibull'
+ 7. 'linear' (default)
+ 8. 'median_unbiased'
+ 9. 'normal_unbiased'
+
+ The first three methods are discontinuous. NumPy further defines the
+ following discontinuous variations of the default 'linear' (7.) option:
+
+ * 'lower'
+ * 'higher',
+ * 'midpoint'
+ * 'nearest'
+
+ .. versionchanged:: 1.22.0
+ This argument was previously called "interpolation" and only
+ offered the "linear" default and last four options.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ If this is anything but the default value it will be passed
+ through (in the special case of an empty array) to the
+ `mean` function of the underlying array. If the array is
+ a sub-class and `mean` does not have the kwarg `keepdims` this
+ will raise a RuntimeError.
+
+ interpolation : str, optional
+ Deprecated name for the method keyword argument.
+
+ .. deprecated:: 1.22.0
+
+ Returns
+ -------
+ percentile : scalar or ndarray
+ If `q` is a single percentile and `axis=None`, then the result
+ is a scalar. If multiple percentiles are given, first axis of
+ the result corresponds to the percentiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ nanmean
+ nanmedian : equivalent to ``nanpercentile(..., 50)``
+ percentile, median, mean
+ nanquantile : equivalent to nanpercentile, except q in range [0, 1].
+
+ Notes
+ -----
+ For more information please see `numpy.percentile`
+
+ Examples
+ --------
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
+ >>> a[0][1] = np.nan
+ >>> a
+ array([[10., nan, 4.],
+ [ 3., 2., 1.]])
+ >>> np.percentile(a, 50)
+ nan
+ >>> np.nanpercentile(a, 50)
+ 3.0
+ >>> np.nanpercentile(a, 50, axis=0)
+ array([6.5, 2. , 2.5])
+ >>> np.nanpercentile(a, 50, axis=1, keepdims=True)
+ array([[7.],
+ [2.]])
+ >>> m = np.nanpercentile(a, 50, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.nanpercentile(a, 50, axis=0, out=out)
+ array([6.5, 2. , 2.5])
+ >>> m
+ array([6.5, 2. , 2.5])
+
+ >>> b = a.copy()
+ >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True)
+ array([7., 2.])
+ >>> assert not np.all(a==b)
+
+ References
+ ----------
+ .. [1] R. J. Hyndman and Y. Fan,
+ "Sample quantiles in statistical packages,"
+ The American Statistician, 50(4), pp. 361-365, 1996
+
+ """
+ if interpolation is not None:
+ method = function_base._check_interpolation_as_method(
+ method, interpolation, "nanpercentile")
+
+ a = np.asanyarray(a)
+ q = np.true_divide(q, 100.0)
+ # undo any decay that the ufunc performed (see gh-13105)
+ q = np.asanyarray(q)
+ if not function_base._quantile_is_valid(q):
+ raise ValueError("Percentiles must be in the range [0, 100]")
+ return _nanquantile_unchecked(
+ a, q, axis, out, overwrite_input, method, keepdims)
+
+
+def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ method=None, keepdims=None, *, interpolation=None):
+ return (a, q, out)
+
+
+@array_function_dispatch(_nanquantile_dispatcher)
+def nanquantile(
+ a,
+ q,
+ axis=None,
+ out=None,
+ overwrite_input=False,
+ method="linear",
+ keepdims=np._NoValue,
+ *,
+ interpolation=None,
+):
+ """
+ Compute the qth quantile of the data along the specified axis,
+ while ignoring nan values.
+ Returns the qth quantile(s) of the array elements.
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array, containing
+ nan values to be ignored
+ q : array_like of float
+ Quantile or sequence of quantiles to compute, which must be between
+ 0 and 1 inclusive.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the quantiles are computed. The
+ default is to compute the quantile(s) along a flattened
+ version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow the input array `a` to be modified by intermediate
+ calculations, to save memory. In this case, the contents of the input
+ `a` after this function completes is undefined.
+ method : str, optional
+ This parameter specifies the method to use for estimating the
+ quantile. There are many different methods, some unique to NumPy.
+ See the notes for explanation. The options sorted by their R type
+ as summarized in the H&F paper [1]_ are:
+
+ 1. 'inverted_cdf'
+ 2. 'averaged_inverted_cdf'
+ 3. 'closest_observation'
+ 4. 'interpolated_inverted_cdf'
+ 5. 'hazen'
+ 6. 'weibull'
+ 7. 'linear' (default)
+ 8. 'median_unbiased'
+ 9. 'normal_unbiased'
+
+ The first three methods are discontinuous. NumPy further defines the
+ following discontinuous variations of the default 'linear' (7.) option:
+
+ * 'lower'
+ * 'higher',
+ * 'midpoint'
+ * 'nearest'
+
+ .. versionchanged:: 1.22.0
+ This argument was previously called "interpolation" and only
+ offered the "linear" default and last four options.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+
+ If this is anything but the default value it will be passed
+ through (in the special case of an empty array) to the
+ `mean` function of the underlying array. If the array is
+ a sub-class and `mean` does not have the kwarg `keepdims` this
+ will raise a RuntimeError.
+
+ interpolation : str, optional
+ Deprecated name for the method keyword argument.
+
+ .. deprecated:: 1.22.0
+
+ Returns
+ -------
+ quantile : scalar or ndarray
+ If `q` is a single percentile and `axis=None`, then the result
+ is a scalar. If multiple quantiles are given, first axis of
+ the result corresponds to the quantiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
+
+ See Also
+ --------
+ quantile
+ nanmean, nanmedian
+ nanmedian : equivalent to ``nanquantile(..., 0.5)``
+ nanpercentile : same as nanquantile, but with q in the range [0, 100].
+
+ Notes
+ -----
+ For more information please see `numpy.quantile`
+
+ Examples
+ --------
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
+ >>> a[0][1] = np.nan
+ >>> a
+ array([[10., nan, 4.],
+ [ 3., 2., 1.]])
+ >>> np.quantile(a, 0.5)
+ nan
+ >>> np.nanquantile(a, 0.5)
+ 3.0
+ >>> np.nanquantile(a, 0.5, axis=0)
+ array([6.5, 2. , 2.5])
+ >>> np.nanquantile(a, 0.5, axis=1, keepdims=True)
+ array([[7.],
+ [2.]])
+ >>> m = np.nanquantile(a, 0.5, axis=0)
+ >>> out = np.zeros_like(m)
+ >>> np.nanquantile(a, 0.5, axis=0, out=out)
+ array([6.5, 2. , 2.5])
+ >>> m
+ array([6.5, 2. , 2.5])
+ >>> b = a.copy()
+ >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True)
+ array([7., 2.])
+ >>> assert not np.all(a==b)
+
+ References
+ ----------
+ .. [1] R. J. Hyndman and Y. Fan,
+ "Sample quantiles in statistical packages,"
+ The American Statistician, 50(4), pp. 361-365, 1996
+
+ """
+ if interpolation is not None:
+ method = function_base._check_interpolation_as_method(
+ method, interpolation, "nanquantile")
+
+ a = np.asanyarray(a)
+ q = np.asanyarray(q)
+ if not function_base._quantile_is_valid(q):
+ raise ValueError("Quantiles must be in the range [0, 1]")
+ return _nanquantile_unchecked(
+ a, q, axis, out, overwrite_input, method, keepdims)
+
+
+def _nanquantile_unchecked(
+ a,
+ q,
+ axis=None,
+ out=None,
+ overwrite_input=False,
+ method="linear",
+ keepdims=np._NoValue,
+):
+ """Assumes that q is in [0, 1], and is an ndarray"""
+ # apply_along_axis in _nanpercentile doesn't handle empty arrays well,
+ # so deal them upfront
+ if a.size == 0:
+ return np.nanmean(a, axis, out=out, keepdims=keepdims)
+ return function_base._ureduce(a,
+ func=_nanquantile_ureduce_func,
+ q=q,
+ keepdims=keepdims,
+ axis=axis,
+ out=out,
+ overwrite_input=overwrite_input,
+ method=method)
+
+
+def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
+ method="linear"):
+ """
+ Private function that doesn't support extended axis or keepdims.
+ These methods are extended to this function using _ureduce
+ See nanpercentile for parameter usage
+ """
+ if axis is None or a.ndim == 1:
+ part = a.ravel()
+ result = _nanquantile_1d(part, q, overwrite_input, method)
+ else:
+ result = np.apply_along_axis(_nanquantile_1d, axis, a, q,
+ overwrite_input, method)
+ # apply_along_axis fills in collapsed axis with results.
+ # Move that axis to the beginning to match percentile's
+ # convention.
+ if q.ndim != 0:
+ result = np.moveaxis(result, axis, 0)
+
+ if out is not None:
+ out[...] = result
+ return result
+
+
+def _nanquantile_1d(arr1d, q, overwrite_input=False, method="linear"):
+ """
+ Private function for rank 1 arrays. Compute quantile ignoring NaNs.
+ See nanpercentile for parameter usage
+ """
+ arr1d, overwrite_input = _remove_nan_1d(arr1d,
+ overwrite_input=overwrite_input)
+ if arr1d.size == 0:
+ # convert to scalar
+ return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()]
+
+ return function_base._quantile_unchecked(
+ arr1d, q, overwrite_input=overwrite_input, method=method)
+
+
+def _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
+ keepdims=None, *, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanvar_dispatcher)
+def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
+ *, where=np._NoValue):
+ """
+ Compute the variance along the specified axis, while ignoring NaNs.
+
+ Returns the variance of the array elements, a measure of the spread of
+ a distribution. The variance is computed for the flattened array by
+ default, otherwise over the specified axis.
+
+ For all-NaN slices or slices with zero degrees of freedom, NaN is
+ returned and a `RuntimeWarning` is raised.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing numbers whose variance is desired. If `a` is not an
+ array, a conversion is attempted.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the variance is computed. The default is to compute
+ the variance of the flattened array.
+ dtype : data-type, optional
+ Type to use in computing the variance. For arrays of integer type
+ the default is `float64`; for arrays of float types it is the same as
+ the array type.
+ out : ndarray, optional
+ Alternate output array in which to place the result. It must have
+ the same shape as the expected output, but the type is cast if
+ necessary.
+ ddof : int, optional
+ "Delta Degrees of Freedom": the divisor used in the calculation is
+ ``N - ddof``, where ``N`` represents the number of non-NaN
+ elements. By default `ddof` is zero.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+ where : array_like of bool, optional
+ Elements to include in the variance. See `~numpy.ufunc.reduce` for
+ details.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ variance : ndarray, see dtype parameter above
+ If `out` is None, return a new array containing the variance,
+ otherwise return a reference to the output array. If ddof is >= the
+ number of non-NaN elements in a slice or the slice contains only
+ NaNs, then the result for that slice is NaN.
+
+ See Also
+ --------
+ std : Standard deviation
+ mean : Average
+ var : Variance while not ignoring NaNs
+ nanstd, nanmean
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ The variance is the average of the squared deviations from the mean,
+ i.e., ``var = mean(abs(x - x.mean())**2)``.
+
+ The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
+ If, however, `ddof` is specified, the divisor ``N - ddof`` is used
+ instead. In standard statistical practice, ``ddof=1`` provides an
+ unbiased estimator of the variance of a hypothetical infinite
+ population. ``ddof=0`` provides a maximum likelihood estimate of the
+ variance for normally distributed variables.
+
+ Note that for complex numbers, the absolute value is taken before
+ squaring, so that the result is always real and nonnegative.
+
+ For floating-point input, the variance is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for `float32` (see example
+ below). Specifying a higher-accuracy accumulator using the ``dtype``
+ keyword can alleviate this issue.
+
+ For this function to work on sub-classes of ndarray, they must define
+ `sum` with the kwarg `keepdims`
+
+ Examples
+ --------
+ >>> a = np.array([[1, np.nan], [3, 4]])
+ >>> np.nanvar(a)
+ 1.5555555555555554
+ >>> np.nanvar(a, axis=0)
+ array([1., 0.])
+ >>> np.nanvar(a, axis=1)
+ array([0., 0.25]) # may vary
+
+ """
+ arr, mask = _replace_nan(a, 0)
+ if mask is None:
+ return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof,
+ keepdims=keepdims, where=where)
+
+ if dtype is not None:
+ dtype = np.dtype(dtype)
+ if dtype is not None and not issubclass(dtype.type, np.inexact):
+ raise TypeError("If a is inexact, then dtype must be inexact")
+ if out is not None and not issubclass(out.dtype.type, np.inexact):
+ raise TypeError("If a is inexact, then out must be inexact")
+
+ # Compute mean
+ if type(arr) is np.matrix:
+ _keepdims = np._NoValue
+ else:
+ _keepdims = True
+ # we need to special case matrix for reverse compatibility
+ # in order for this to work, these sums need to be called with
+ # keepdims=True, however matrix now raises an error in this case, but
+ # the reason that it drops the keepdims kwarg is to force keepdims=True
+ # so this used to work by serendipity.
+ cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims,
+ where=where)
+ avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims, where=where)
+ avg = _divide_by_count(avg, cnt)
+
+ # Compute squared deviation from mean.
+ np.subtract(arr, avg, out=arr, casting='unsafe', where=where)
+ arr = _copyto(arr, 0, mask)
+ if issubclass(arr.dtype.type, np.complexfloating):
+ sqr = np.multiply(arr, arr.conj(), out=arr, where=where).real
+ else:
+ sqr = np.multiply(arr, arr, out=arr, where=where)
+
+ # Compute variance.
+ var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
+ where=where)
+
+ # Precaution against reduced object arrays
+ try:
+ var_ndim = var.ndim
+ except AttributeError:
+ var_ndim = np.ndim(var)
+ if var_ndim < cnt.ndim:
+ # Subclasses of ndarray may ignore keepdims, so check here.
+ cnt = cnt.squeeze(axis)
+ dof = cnt - ddof
+ var = _divide_by_count(var, dof)
+
+ isbad = (dof <= 0)
+ if np.any(isbad):
+ warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning,
+ stacklevel=3)
+ # NaN, inf, or negative numbers are all possible bad
+ # values, so explicitly replace them with NaN.
+ var = _copyto(var, np.nan, isbad)
+ return var
+
+
+def _nanstd_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
+ keepdims=None, *, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanstd_dispatcher)
+def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
+ *, where=np._NoValue):
+ """
+ Compute the standard deviation along the specified axis, while
+ ignoring NaNs.
+
+ Returns the standard deviation, a measure of the spread of a
+ distribution, of the non-NaN array elements. The standard deviation is
+ computed for the flattened array by default, otherwise over the
+ specified axis.
+
+ For all-NaN slices or slices with zero degrees of freedom, NaN is
+ returned and a `RuntimeWarning` is raised.
+
+ .. versionadded:: 1.8.0
+
+ Parameters
+ ----------
+ a : array_like
+ Calculate the standard deviation of the non-NaN values.
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the standard deviation is computed. The default is
+ to compute the standard deviation of the flattened array.
+ dtype : dtype, optional
+ Type to use in computing the standard deviation. For arrays of
+ integer type the default is float64, for arrays of float types it
+ is the same as the array type.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output but the type (of the
+ calculated values) will be cast if necessary.
+ ddof : int, optional
+ Means Delta Degrees of Freedom. The divisor used in calculations
+ is ``N - ddof``, where ``N`` represents the number of non-NaN
+ elements. By default `ddof` is zero.
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ If this value is anything but the default it is passed through
+ as-is to the relevant functions of the sub-classes. If these
+ functions do not have a `keepdims` kwarg, a RuntimeError will
+ be raised.
+ where : array_like of bool, optional
+ Elements to include in the standard deviation.
+ See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.22.0
+
+ Returns
+ -------
+ standard_deviation : ndarray, see dtype parameter above.
+ If `out` is None, return a new array containing the standard
+ deviation, otherwise return a reference to the output array. If
+ ddof is >= the number of non-NaN elements in a slice or the slice
+ contains only NaNs, then the result for that slice is NaN.
+
+ See Also
+ --------
+ var, mean, std
+ nanvar, nanmean
+ :ref:`ufuncs-output-type`
+
+ Notes
+ -----
+ The standard deviation is the square root of the average of the squared
+ deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``.
+
+ The average squared deviation is normally calculated as
+ ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is
+ specified, the divisor ``N - ddof`` is used instead. In standard
+ statistical practice, ``ddof=1`` provides an unbiased estimator of the
+ variance of the infinite population. ``ddof=0`` provides a maximum
+ likelihood estimate of the variance for normally distributed variables.
+ The standard deviation computed in this function is the square root of
+ the estimated variance, so even with ``ddof=1``, it will not be an
+ unbiased estimate of the standard deviation per se.
+
+ Note that, for complex numbers, `std` takes the absolute value before
+ squaring, so that the result is always real and nonnegative.
+
+ For floating-point input, the *std* is computed using the same
+ precision the input has. Depending on the input data, this can cause
+ the results to be inaccurate, especially for float32 (see example
+ below). Specifying a higher-accuracy accumulator using the `dtype`
+ keyword can alleviate this issue.
+
+ Examples
+ --------
+ >>> a = np.array([[1, np.nan], [3, 4]])
+ >>> np.nanstd(a)
+ 1.247219128924647
+ >>> np.nanstd(a, axis=0)
+ array([1., 0.])
+ >>> np.nanstd(a, axis=1)
+ array([0., 0.5]) # may vary
+
+ """
+ var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
+ keepdims=keepdims, where=where)
+ if isinstance(var, np.ndarray):
+ std = np.sqrt(var, out=var)
+ elif hasattr(var, 'dtype'):
+ std = var.dtype.type(np.sqrt(var))
+ else:
+ std = np.sqrt(var)
+ return std
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/nanfunctions.pyi b/venv/lib/python3.9/site-packages/numpy/lib/nanfunctions.pyi
new file mode 100644
index 00000000..8642055f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/nanfunctions.pyi
@@ -0,0 +1,38 @@
+from numpy.core.fromnumeric import (
+ amin,
+ amax,
+ argmin,
+ argmax,
+ sum,
+ prod,
+ cumsum,
+ cumprod,
+ mean,
+ var,
+ std
+)
+
+from numpy.lib.function_base import (
+ median,
+ percentile,
+ quantile,
+)
+
+__all__: list[str]
+
+# NOTE: In reaility these functions are not aliases but distinct functions
+# with identical signatures.
+nanmin = amin
+nanmax = amax
+nanargmin = argmin
+nanargmax = argmax
+nansum = sum
+nanprod = prod
+nancumsum = cumsum
+nancumprod = cumprod
+nanmean = mean
+nanvar = var
+nanstd = std
+nanmedian = median
+nanpercentile = percentile
+nanquantile = quantile
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/npyio.py b/venv/lib/python3.9/site-packages/numpy/lib/npyio.py
new file mode 100644
index 00000000..3d695141
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/npyio.py
@@ -0,0 +1,2546 @@
+import os
+import re
+import functools
+import itertools
+import warnings
+import weakref
+import contextlib
+import operator
+from operator import itemgetter, index as opindex, methodcaller
+from collections.abc import Mapping
+
+import numpy as np
+from . import format
+from ._datasource import DataSource
+from numpy.core import overrides
+from numpy.core.multiarray import packbits, unpackbits
+from numpy.core._multiarray_umath import _load_from_filelike
+from numpy.core.overrides import set_array_function_like_doc, set_module
+from ._iotools import (
+ LineSplitter, NameValidator, StringConverter, ConverterError,
+ ConverterLockError, ConversionWarning, _is_string_like,
+ has_nested_fields, flatten_dtype, easy_dtype, _decode_line
+ )
+
+from numpy.compat import (
+ asbytes, asstr, asunicode, os_fspath, os_PathLike,
+ pickle
+ )
+
+
+__all__ = [
+ 'savetxt', 'loadtxt', 'genfromtxt',
+ 'recfromtxt', 'recfromcsv', 'load', 'save', 'savez',
+ 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
+ ]
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+class BagObj:
+ """
+ BagObj(obj)
+
+ Convert attribute look-ups to getitems on the object passed in.
+
+ Parameters
+ ----------
+ obj : class instance
+ Object on which attribute look-up is performed.
+
+ Examples
+ --------
+ >>> from numpy.lib.npyio import BagObj as BO
+ >>> class BagDemo:
+ ... def __getitem__(self, key): # An instance of BagObj(BagDemo)
+ ... # will call this method when any
+ ... # attribute look-up is required
+ ... result = "Doesn't matter what you want, "
+ ... return result + "you're gonna get this"
+ ...
+ >>> demo_obj = BagDemo()
+ >>> bagobj = BO(demo_obj)
+ >>> bagobj.hello_there
+ "Doesn't matter what you want, you're gonna get this"
+ >>> bagobj.I_can_be_anything
+ "Doesn't matter what you want, you're gonna get this"
+
+ """
+
+ def __init__(self, obj):
+ # Use weakref to make NpzFile objects collectable by refcount
+ self._obj = weakref.proxy(obj)
+
+ def __getattribute__(self, key):
+ try:
+ return object.__getattribute__(self, '_obj')[key]
+ except KeyError:
+ raise AttributeError(key) from None
+
+ def __dir__(self):
+ """
+ Enables dir(bagobj) to list the files in an NpzFile.
+
+ This also enables tab-completion in an interpreter or IPython.
+ """
+ return list(object.__getattribute__(self, '_obj').keys())
+
+
+def zipfile_factory(file, *args, **kwargs):
+ """
+ Create a ZipFile.
+
+ Allows for Zip64, and the `file` argument can accept file, str, or
+ pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
+ constructor.
+ """
+ if not hasattr(file, 'read'):
+ file = os_fspath(file)
+ import zipfile
+ kwargs['allowZip64'] = True
+ return zipfile.ZipFile(file, *args, **kwargs)
+
+
+class NpzFile(Mapping):
+ """
+ NpzFile(fid)
+
+ A dictionary-like object with lazy-loading of files in the zipped
+ archive provided on construction.
+
+ `NpzFile` is used to load files in the NumPy ``.npz`` data archive
+ format. It assumes that files in the archive have a ``.npy`` extension,
+ other files are ignored.
+
+ The arrays and file strings are lazily loaded on either
+ getitem access using ``obj['key']`` or attribute lookup using
+ ``obj.f.key``. A list of all files (without ``.npy`` extensions) can
+ be obtained with ``obj.files`` and the ZipFile object itself using
+ ``obj.zip``.
+
+ Attributes
+ ----------
+ files : list of str
+ List of all files in the archive with a ``.npy`` extension.
+ zip : ZipFile instance
+ The ZipFile object initialized with the zipped archive.
+ f : BagObj instance
+ An object on which attribute can be performed as an alternative
+ to getitem access on the `NpzFile` instance itself.
+ allow_pickle : bool, optional
+ Allow loading pickled data. Default: False
+
+ .. versionchanged:: 1.16.3
+ Made default False in response to CVE-2019-6446.
+
+ pickle_kwargs : dict, optional
+ Additional keyword arguments to pass on to pickle.load.
+ These are only useful when loading object arrays saved on
+ Python 2 when using Python 3.
+ max_header_size : int, optional
+ Maximum allowed size of the header. Large headers may not be safe
+ to load securely and thus require explicitly passing a larger value.
+ See :py:meth:`ast.literal_eval()` for details.
+ This option is ignored when `allow_pickle` is passed. In that case
+ the file is by definition trusted and the limit is unnecessary.
+
+ Parameters
+ ----------
+ fid : file or str
+ The zipped archive to open. This is either a file-like object
+ or a string containing the path to the archive.
+ own_fid : bool, optional
+ Whether NpzFile should close the file handle.
+ Requires that `fid` is a file-like object.
+
+ Examples
+ --------
+ >>> from tempfile import TemporaryFile
+ >>> outfile = TemporaryFile()
+ >>> x = np.arange(10)
+ >>> y = np.sin(x)
+ >>> np.savez(outfile, x=x, y=y)
+ >>> _ = outfile.seek(0)
+
+ >>> npz = np.load(outfile)
+ >>> isinstance(npz, np.lib.npyio.NpzFile)
+ True
+ >>> sorted(npz.files)
+ ['x', 'y']
+ >>> npz['x'] # getitem access
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> npz.f.x # attribute lookup
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ """
+ # Make __exit__ safe if zipfile_factory raises an exception
+ zip = None
+ fid = None
+
+ def __init__(self, fid, own_fid=False, allow_pickle=False,
+ pickle_kwargs=None, *,
+ max_header_size=format._MAX_HEADER_SIZE):
+ # Import is postponed to here since zipfile depends on gzip, an
+ # optional component of the so-called standard library.
+ _zip = zipfile_factory(fid)
+ self._files = _zip.namelist()
+ self.files = []
+ self.allow_pickle = allow_pickle
+ self.max_header_size = max_header_size
+ self.pickle_kwargs = pickle_kwargs
+ for x in self._files:
+ if x.endswith('.npy'):
+ self.files.append(x[:-4])
+ else:
+ self.files.append(x)
+ self.zip = _zip
+ self.f = BagObj(self)
+ if own_fid:
+ self.fid = fid
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ def close(self):
+ """
+ Close the file.
+
+ """
+ if self.zip is not None:
+ self.zip.close()
+ self.zip = None
+ if self.fid is not None:
+ self.fid.close()
+ self.fid = None
+ self.f = None # break reference cycle
+
+ def __del__(self):
+ self.close()
+
+ # Implement the Mapping ABC
+ def __iter__(self):
+ return iter(self.files)
+
+ def __len__(self):
+ return len(self.files)
+
+ def __getitem__(self, key):
+ # FIXME: This seems like it will copy strings around
+ # more than is strictly necessary. The zipfile
+ # will read the string and then
+ # the format.read_array will copy the string
+ # to another place in memory.
+ # It would be better if the zipfile could read
+ # (or at least uncompress) the data
+ # directly into the array memory.
+ member = False
+ if key in self._files:
+ member = True
+ elif key in self.files:
+ member = True
+ key += '.npy'
+ if member:
+ bytes = self.zip.open(key)
+ magic = bytes.read(len(format.MAGIC_PREFIX))
+ bytes.close()
+ if magic == format.MAGIC_PREFIX:
+ bytes = self.zip.open(key)
+ return format.read_array(bytes,
+ allow_pickle=self.allow_pickle,
+ pickle_kwargs=self.pickle_kwargs,
+ max_header_size=self.max_header_size)
+ else:
+ return self.zip.read(key)
+ else:
+ raise KeyError("%s is not a file in the archive" % key)
+
+
+@set_module('numpy')
+def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
+ encoding='ASCII', *, max_header_size=format._MAX_HEADER_SIZE):
+ """
+ Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
+
+ .. warning:: Loading files that contain object arrays uses the ``pickle``
+ module, which is not secure against erroneous or maliciously
+ constructed data. Consider passing ``allow_pickle=False`` to
+ load data that is known not to contain object arrays for the
+ safer handling of untrusted sources.
+
+ Parameters
+ ----------
+ file : file-like object, string, or pathlib.Path
+ The file to read. File-like objects must support the
+ ``seek()`` and ``read()`` methods and must always
+ be opened in binary mode. Pickled files require that the
+ file-like object support the ``readline()`` method as well.
+ mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
+ If not None, then memory-map the file, using the given mode (see
+ `numpy.memmap` for a detailed description of the modes). A
+ memory-mapped array is kept on disk. However, it can be accessed
+ and sliced like any ndarray. Memory mapping is especially useful
+ for accessing small fragments of large files without reading the
+ entire file into memory.
+ allow_pickle : bool, optional
+ Allow loading pickled object arrays stored in npy files. Reasons for
+ disallowing pickles include security, as loading pickled data can
+ execute arbitrary code. If pickles are disallowed, loading object
+ arrays will fail. Default: False
+
+ .. versionchanged:: 1.16.3
+ Made default False in response to CVE-2019-6446.
+
+ fix_imports : bool, optional
+ Only useful when loading Python 2 generated pickled files on Python 3,
+ which includes npy/npz files containing object arrays. If `fix_imports`
+ is True, pickle will try to map the old Python 2 names to the new names
+ used in Python 3.
+ encoding : str, optional
+ What encoding to use when reading Python 2 strings. Only useful when
+ loading Python 2 generated pickled files in Python 3, which includes
+ npy/npz files containing object arrays. Values other than 'latin1',
+ 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
+ data. Default: 'ASCII'
+ max_header_size : int, optional
+ Maximum allowed size of the header. Large headers may not be safe
+ to load securely and thus require explicitly passing a larger value.
+ See :py:meth:`ast.literal_eval()` for details.
+ This option is ignored when `allow_pickle` is passed. In that case
+ the file is by definition trusted and the limit is unnecessary.
+
+ Returns
+ -------
+ result : array, tuple, dict, etc.
+ Data stored in the file. For ``.npz`` files, the returned instance
+ of NpzFile class must be closed to avoid leaking file descriptors.
+
+ Raises
+ ------
+ OSError
+ If the input file does not exist or cannot be read.
+ UnpicklingError
+ If ``allow_pickle=True``, but the file cannot be loaded as a pickle.
+ ValueError
+ The file contains an object array, but ``allow_pickle=False`` given.
+
+ See Also
+ --------
+ save, savez, savez_compressed, loadtxt
+ memmap : Create a memory-map to an array stored in a file on disk.
+ lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
+
+ Notes
+ -----
+ - If the file contains pickle data, then whatever object is stored
+ in the pickle is returned.
+ - If the file is a ``.npy`` file, then a single array is returned.
+ - If the file is a ``.npz`` file, then a dictionary-like object is
+ returned, containing ``{filename: array}`` key-value pairs, one for
+ each file in the archive.
+ - If the file is a ``.npz`` file, the returned value supports the
+ context manager protocol in a similar fashion to the open function::
+
+ with load('foo.npz') as data:
+ a = data['a']
+
+ The underlying file descriptor is closed when exiting the 'with'
+ block.
+
+ Examples
+ --------
+ Store data to disk, and load it again:
+
+ >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
+ >>> np.load('/tmp/123.npy')
+ array([[1, 2, 3],
+ [4, 5, 6]])
+
+ Store compressed data to disk, and load it again:
+
+ >>> a=np.array([[1, 2, 3], [4, 5, 6]])
+ >>> b=np.array([1, 2])
+ >>> np.savez('/tmp/123.npz', a=a, b=b)
+ >>> data = np.load('/tmp/123.npz')
+ >>> data['a']
+ array([[1, 2, 3],
+ [4, 5, 6]])
+ >>> data['b']
+ array([1, 2])
+ >>> data.close()
+
+ Mem-map the stored array, and then access the second row
+ directly from disk:
+
+ >>> X = np.load('/tmp/123.npy', mmap_mode='r')
+ >>> X[1, :]
+ memmap([4, 5, 6])
+
+ """
+ if encoding not in ('ASCII', 'latin1', 'bytes'):
+ # The 'encoding' value for pickle also affects what encoding
+ # the serialized binary data of NumPy arrays is loaded
+ # in. Pickle does not pass on the encoding information to
+ # NumPy. The unpickling code in numpy.core.multiarray is
+ # written to assume that unicode data appearing where binary
+ # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
+ #
+ # Other encoding values can corrupt binary data, and we
+ # purposefully disallow them. For the same reason, the errors=
+ # argument is not exposed, as values other than 'strict'
+ # result can similarly silently corrupt numerical data.
+ raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
+
+ pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
+
+ with contextlib.ExitStack() as stack:
+ if hasattr(file, 'read'):
+ fid = file
+ own_fid = False
+ else:
+ fid = stack.enter_context(open(os_fspath(file), "rb"))
+ own_fid = True
+
+ # Code to distinguish from NumPy binary files and pickles.
+ _ZIP_PREFIX = b'PK\x03\x04'
+ _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
+ N = len(format.MAGIC_PREFIX)
+ magic = fid.read(N)
+ # If the file size is less than N, we need to make sure not
+ # to seek past the beginning of the file
+ fid.seek(-min(N, len(magic)), 1) # back-up
+ if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
+ # zip-file (assume .npz)
+ # Potentially transfer file ownership to NpzFile
+ stack.pop_all()
+ ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs,
+ max_header_size=max_header_size)
+ return ret
+ elif magic == format.MAGIC_PREFIX:
+ # .npy file
+ if mmap_mode:
+ if allow_pickle:
+ max_header_size = 2**64
+ return format.open_memmap(file, mode=mmap_mode,
+ max_header_size=max_header_size)
+ else:
+ return format.read_array(fid, allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs,
+ max_header_size=max_header_size)
+ else:
+ # Try a pickle
+ if not allow_pickle:
+ raise ValueError("Cannot load file containing pickled data "
+ "when allow_pickle=False")
+ try:
+ return pickle.load(fid, **pickle_kwargs)
+ except Exception as e:
+ raise pickle.UnpicklingError(
+ f"Failed to interpret file {file!r} as a pickle") from e
+
+
+def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
+ return (arr,)
+
+
+@array_function_dispatch(_save_dispatcher)
+def save(file, arr, allow_pickle=True, fix_imports=True):
+ """
+ Save an array to a binary file in NumPy ``.npy`` format.
+
+ Parameters
+ ----------
+ file : file, str, or pathlib.Path
+ File or filename to which the data is saved. If file is a file-object,
+ then the filename is unchanged. If file is a string or Path, a ``.npy``
+ extension will be appended to the filename if it does not already
+ have one.
+ arr : array_like
+ Array data to be saved.
+ allow_pickle : bool, optional
+ Allow saving object arrays using Python pickles. Reasons for disallowing
+ pickles include security (loading pickled data can execute arbitrary
+ code) and portability (pickled objects may not be loadable on different
+ Python installations, for example if the stored objects require libraries
+ that are not available, and not all pickled data is compatible between
+ Python 2 and Python 3).
+ Default: True
+ fix_imports : bool, optional
+ Only useful in forcing objects in object arrays on Python 3 to be
+ pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
+ will try to map the new Python 3 names to the old module names used in
+ Python 2, so that the pickle data stream is readable with Python 2.
+
+ See Also
+ --------
+ savez : Save several arrays into a ``.npz`` archive
+ savetxt, load
+
+ Notes
+ -----
+ For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
+
+ Any data saved to the file is appended to the end of the file.
+
+ Examples
+ --------
+ >>> from tempfile import TemporaryFile
+ >>> outfile = TemporaryFile()
+
+ >>> x = np.arange(10)
+ >>> np.save(outfile, x)
+
+ >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
+ >>> np.load(outfile)
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+
+ >>> with open('test.npy', 'wb') as f:
+ ... np.save(f, np.array([1, 2]))
+ ... np.save(f, np.array([1, 3]))
+ >>> with open('test.npy', 'rb') as f:
+ ... a = np.load(f)
+ ... b = np.load(f)
+ >>> print(a, b)
+ # [1 2] [1 3]
+ """
+ if hasattr(file, 'write'):
+ file_ctx = contextlib.nullcontext(file)
+ else:
+ file = os_fspath(file)
+ if not file.endswith('.npy'):
+ file = file + '.npy'
+ file_ctx = open(file, "wb")
+
+ with file_ctx as fid:
+ arr = np.asanyarray(arr)
+ format.write_array(fid, arr, allow_pickle=allow_pickle,
+ pickle_kwargs=dict(fix_imports=fix_imports))
+
+
+def _savez_dispatcher(file, *args, **kwds):
+ yield from args
+ yield from kwds.values()
+
+
+@array_function_dispatch(_savez_dispatcher)
+def savez(file, *args, **kwds):
+ """Save several arrays into a single file in uncompressed ``.npz`` format.
+
+ Provide arrays as keyword arguments to store them under the
+ corresponding name in the output file: ``savez(fn, x=x, y=y)``.
+
+ If arrays are specified as positional arguments, i.e., ``savez(fn,
+ x, y)``, their names will be `arr_0`, `arr_1`, etc.
+
+ Parameters
+ ----------
+ file : str or file
+ Either the filename (string) or an open file (file-like object)
+ where the data will be saved. If file is a string or a Path, the
+ ``.npz`` extension will be appended to the filename if it is not
+ already there.
+ args : Arguments, optional
+ Arrays to save to the file. Please use keyword arguments (see
+ `kwds` below) to assign names to arrays. Arrays specified as
+ args will be named "arr_0", "arr_1", and so on.
+ kwds : Keyword arguments, optional
+ Arrays to save to the file. Each array will be saved to the
+ output file with its corresponding keyword name.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ save : Save a single array to a binary file in NumPy format.
+ savetxt : Save an array to a file as plain text.
+ savez_compressed : Save several arrays into a compressed ``.npz`` archive
+
+ Notes
+ -----
+ The ``.npz`` file format is a zipped archive of files named after the
+ variables they contain. The archive is not compressed and each file
+ in the archive contains one variable in ``.npy`` format. For a
+ description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
+
+ When opening the saved ``.npz`` file with `load` a `NpzFile` object is
+ returned. This is a dictionary-like object which can be queried for
+ its list of arrays (with the ``.files`` attribute), and for the arrays
+ themselves.
+
+ Keys passed in `kwds` are used as filenames inside the ZIP archive.
+ Therefore, keys should be valid filenames; e.g., avoid keys that begin with
+ ``/`` or contain ``.``.
+
+ When naming variables with keyword arguments, it is not possible to name a
+ variable ``file``, as this would cause the ``file`` argument to be defined
+ twice in the call to ``savez``.
+
+ Examples
+ --------
+ >>> from tempfile import TemporaryFile
+ >>> outfile = TemporaryFile()
+ >>> x = np.arange(10)
+ >>> y = np.sin(x)
+
+ Using `savez` with \\*args, the arrays are saved with default names.
+
+ >>> np.savez(outfile, x, y)
+ >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
+ >>> npzfile = np.load(outfile)
+ >>> npzfile.files
+ ['arr_0', 'arr_1']
+ >>> npzfile['arr_0']
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ Using `savez` with \\**kwds, the arrays are saved with the keyword names.
+
+ >>> outfile = TemporaryFile()
+ >>> np.savez(outfile, x=x, y=y)
+ >>> _ = outfile.seek(0)
+ >>> npzfile = np.load(outfile)
+ >>> sorted(npzfile.files)
+ ['x', 'y']
+ >>> npzfile['x']
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ """
+ _savez(file, args, kwds, False)
+
+
+def _savez_compressed_dispatcher(file, *args, **kwds):
+ yield from args
+ yield from kwds.values()
+
+
+@array_function_dispatch(_savez_compressed_dispatcher)
+def savez_compressed(file, *args, **kwds):
+ """
+ Save several arrays into a single file in compressed ``.npz`` format.
+
+ Provide arrays as keyword arguments to store them under the
+ corresponding name in the output file: ``savez(fn, x=x, y=y)``.
+
+ If arrays are specified as positional arguments, i.e., ``savez(fn,
+ x, y)``, their names will be `arr_0`, `arr_1`, etc.
+
+ Parameters
+ ----------
+ file : str or file
+ Either the filename (string) or an open file (file-like object)
+ where the data will be saved. If file is a string or a Path, the
+ ``.npz`` extension will be appended to the filename if it is not
+ already there.
+ args : Arguments, optional
+ Arrays to save to the file. Please use keyword arguments (see
+ `kwds` below) to assign names to arrays. Arrays specified as
+ args will be named "arr_0", "arr_1", and so on.
+ kwds : Keyword arguments, optional
+ Arrays to save to the file. Each array will be saved to the
+ output file with its corresponding keyword name.
+
+ Returns
+ -------
+ None
+
+ See Also
+ --------
+ numpy.save : Save a single array to a binary file in NumPy format.
+ numpy.savetxt : Save an array to a file as plain text.
+ numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
+ numpy.load : Load the files created by savez_compressed.
+
+ Notes
+ -----
+ The ``.npz`` file format is a zipped archive of files named after the
+ variables they contain. The archive is compressed with
+ ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
+ in ``.npy`` format. For a description of the ``.npy`` format, see
+ :py:mod:`numpy.lib.format`.
+
+
+ When opening the saved ``.npz`` file with `load` a `NpzFile` object is
+ returned. This is a dictionary-like object which can be queried for
+ its list of arrays (with the ``.files`` attribute), and for the arrays
+ themselves.
+
+ Examples
+ --------
+ >>> test_array = np.random.rand(3, 2)
+ >>> test_vector = np.random.rand(4)
+ >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
+ >>> loaded = np.load('/tmp/123.npz')
+ >>> print(np.array_equal(test_array, loaded['a']))
+ True
+ >>> print(np.array_equal(test_vector, loaded['b']))
+ True
+
+ """
+ _savez(file, args, kwds, True)
+
+
+def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
+ # Import is postponed to here since zipfile depends on gzip, an optional
+ # component of the so-called standard library.
+ import zipfile
+
+ if not hasattr(file, 'write'):
+ file = os_fspath(file)
+ if not file.endswith('.npz'):
+ file = file + '.npz'
+
+ namedict = kwds
+ for i, val in enumerate(args):
+ key = 'arr_%d' % i
+ if key in namedict.keys():
+ raise ValueError(
+ "Cannot use un-named variables and keyword %s" % key)
+ namedict[key] = val
+
+ if compress:
+ compression = zipfile.ZIP_DEFLATED
+ else:
+ compression = zipfile.ZIP_STORED
+
+ zipf = zipfile_factory(file, mode="w", compression=compression)
+
+ for key, val in namedict.items():
+ fname = key + '.npy'
+ val = np.asanyarray(val)
+ # always force zip64, gh-10776
+ with zipf.open(fname, 'w', force_zip64=True) as fid:
+ format.write_array(fid, val,
+ allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs)
+
+ zipf.close()
+
+
+def _ensure_ndmin_ndarray_check_param(ndmin):
+ """Just checks if the param ndmin is supported on
+ _ensure_ndmin_ndarray. It is intended to be used as
+ verification before running anything expensive.
+ e.g. loadtxt, genfromtxt
+ """
+ # Check correctness of the values of `ndmin`
+ if ndmin not in [0, 1, 2]:
+ raise ValueError(f"Illegal value of ndmin keyword: {ndmin}")
+
+def _ensure_ndmin_ndarray(a, *, ndmin: int):
+ """This is a helper function of loadtxt and genfromtxt to ensure
+ proper minimum dimension as requested
+
+ ndim : int. Supported values 1, 2, 3
+ ^^ whenever this changes, keep in sync with
+ _ensure_ndmin_ndarray_check_param
+ """
+ # Verify that the array has at least dimensions `ndmin`.
+ # Tweak the size and shape of the arrays - remove extraneous dimensions
+ if a.ndim > ndmin:
+ a = np.squeeze(a)
+ # and ensure we have the minimum number of dimensions asked for
+ # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0
+ if a.ndim < ndmin:
+ if ndmin == 1:
+ a = np.atleast_1d(a)
+ elif ndmin == 2:
+ a = np.atleast_2d(a).T
+
+ return a
+
+
+# amount of lines loadtxt reads in one chunk, can be overridden for testing
+_loadtxt_chunksize = 50000
+
+
+def _loadtxt_dispatcher(
+ fname, dtype=None, comments=None, delimiter=None,
+ converters=None, skiprows=None, usecols=None, unpack=None,
+ ndmin=None, encoding=None, max_rows=None, *, like=None):
+ return (like,)
+
+
+def _check_nonneg_int(value, name="argument"):
+ try:
+ operator.index(value)
+ except TypeError:
+ raise TypeError(f"{name} must be an integer") from None
+ if value < 0:
+ raise ValueError(f"{name} must be nonnegative")
+
+
+def _preprocess_comments(iterable, comments, encoding):
+ """
+ Generator that consumes a line iterated iterable and strips out the
+ multiple (or multi-character) comments from lines.
+ This is a pre-processing step to achieve feature parity with loadtxt
+ (we assume that this feature is a nieche feature).
+ """
+ for line in iterable:
+ if isinstance(line, bytes):
+ # Need to handle conversion here, or the splitting would fail
+ line = line.decode(encoding)
+
+ for c in comments:
+ line = line.split(c, 1)[0]
+
+ yield line
+
+
+# The number of rows we read in one go if confronted with a parametric dtype
+_loadtxt_chunksize = 50000
+
+
+def _read(fname, *, delimiter=',', comment='#', quote='"',
+ imaginary_unit='j', usecols=None, skiplines=0,
+ max_rows=None, converters=None, ndmin=None, unpack=False,
+ dtype=np.float64, encoding="bytes"):
+ r"""
+ Read a NumPy array from a text file.
+
+ Parameters
+ ----------
+ fname : str or file object
+ The filename or the file to be read.
+ delimiter : str, optional
+ Field delimiter of the fields in line of the file.
+ Default is a comma, ','. If None any sequence of whitespace is
+ considered a delimiter.
+ comment : str or sequence of str or None, optional
+ Character that begins a comment. All text from the comment
+ character to the end of the line is ignored.
+ Multiple comments or multiple-character comment strings are supported,
+ but may be slower and `quote` must be empty if used.
+ Use None to disable all use of comments.
+ quote : str or None, optional
+ Character that is used to quote string fields. Default is '"'
+ (a double quote). Use None to disable quote support.
+ imaginary_unit : str, optional
+ Character that represent the imaginay unit `sqrt(-1)`.
+ Default is 'j'.
+ usecols : array_like, optional
+ A one-dimensional array of integer column numbers. These are the
+ columns from the file to be included in the array. If this value
+ is not given, all the columns are used.
+ skiplines : int, optional
+ Number of lines to skip before interpreting the data in the file.
+ max_rows : int, optional
+ Maximum number of rows of data to read. Default is to read the
+ entire file.
+ converters : dict or callable, optional
+ A function to parse all columns strings into the desired value, or
+ a dictionary mapping column number to a parser function.
+ E.g. if column 0 is a date string: ``converters = {0: datestr2num}``.
+ Converters can also be used to provide a default value for missing
+ data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will
+ convert empty fields to 0.
+ Default: None
+ ndmin : int, optional
+ Minimum dimension of the array returned.
+ Allowed values are 0, 1 or 2. Default is 0.
+ unpack : bool, optional
+ If True, the returned array is transposed, so that arguments may be
+ unpacked using ``x, y, z = read(...)``. When used with a structured
+ data-type, arrays are returned for each field. Default is False.
+ dtype : numpy data type
+ A NumPy dtype instance, can be a structured dtype to map to the
+ columns of the file.
+ encoding : str, optional
+ Encoding used to decode the inputfile. The special value 'bytes'
+ (the default) enables backwards-compatible behavior for `converters`,
+ ensuring that inputs to the converter functions are encoded
+ bytes objects. The special value 'bytes' has no additional effect if
+ ``converters=None``. If encoding is ``'bytes'`` or ``None``, the
+ default system encoding is used.
+
+ Returns
+ -------
+ ndarray
+ NumPy array.
+
+ Examples
+ --------
+ First we create a file for the example.
+
+ >>> s1 = '1.0,2.0,3.0\n4.0,5.0,6.0\n'
+ >>> with open('example1.csv', 'w') as f:
+ ... f.write(s1)
+ >>> a1 = read_from_filename('example1.csv')
+ >>> a1
+ array([[1., 2., 3.],
+ [4., 5., 6.]])
+
+ The second example has columns with different data types, so a
+ one-dimensional array with a structured data type is returned.
+ The tab character is used as the field delimiter.
+
+ >>> s2 = '1.0\t10\talpha\n2.3\t25\tbeta\n4.5\t16\tgamma\n'
+ >>> with open('example2.tsv', 'w') as f:
+ ... f.write(s2)
+ >>> a2 = read_from_filename('example2.tsv', delimiter='\t')
+ >>> a2
+ array([(1. , 10, b'alpha'), (2.3, 25, b'beta'), (4.5, 16, b'gamma')],
+ dtype=[('f0', '<f8'), ('f1', 'u1'), ('f2', 'S5')])
+ """
+ # Handle special 'bytes' keyword for encoding
+ byte_converters = False
+ if encoding == 'bytes':
+ encoding = None
+ byte_converters = True
+
+ if dtype is None:
+ raise TypeError("a dtype must be provided.")
+ dtype = np.dtype(dtype)
+
+ read_dtype_via_object_chunks = None
+ if dtype.kind in 'SUM' and (
+ dtype == "S0" or dtype == "U0" or dtype == "M8" or dtype == 'm8'):
+ # This is a legacy "flexible" dtype. We do not truly support
+ # parametric dtypes currently (no dtype discovery step in the core),
+ # but have to support these for backward compatibility.
+ read_dtype_via_object_chunks = dtype
+ dtype = np.dtype(object)
+
+ if usecols is not None:
+ # Allow usecols to be a single int or a sequence of ints, the C-code
+ # handles the rest
+ try:
+ usecols = list(usecols)
+ except TypeError:
+ usecols = [usecols]
+
+ _ensure_ndmin_ndarray_check_param(ndmin)
+
+ if comment is None:
+ comments = None
+ else:
+ # assume comments are a sequence of strings
+ if "" in comment:
+ raise ValueError(
+ "comments cannot be an empty string. Use comments=None to "
+ "disable comments."
+ )
+ comments = tuple(comment)
+ comment = None
+ if len(comments) == 0:
+ comments = None # No comments at all
+ elif len(comments) == 1:
+ # If there is only one comment, and that comment has one character,
+ # the normal parsing can deal with it just fine.
+ if isinstance(comments[0], str) and len(comments[0]) == 1:
+ comment = comments[0]
+ comments = None
+ else:
+ # Input validation if there are multiple comment characters
+ if delimiter in comments:
+ raise TypeError(
+ f"Comment characters '{comments}' cannot include the "
+ f"delimiter '{delimiter}'"
+ )
+
+ # comment is now either a 1 or 0 character string or a tuple:
+ if comments is not None:
+ # Note: An earlier version support two character comments (and could
+ # have been extended to multiple characters, we assume this is
+ # rare enough to not optimize for.
+ if quote is not None:
+ raise ValueError(
+ "when multiple comments or a multi-character comment is "
+ "given, quotes are not supported. In this case quotechar "
+ "must be set to None.")
+
+ if len(imaginary_unit) != 1:
+ raise ValueError('len(imaginary_unit) must be 1.')
+
+ _check_nonneg_int(skiplines)
+ if max_rows is not None:
+ _check_nonneg_int(max_rows)
+ else:
+ # Passing -1 to the C code means "read the entire file".
+ max_rows = -1
+
+ fh_closing_ctx = contextlib.nullcontext()
+ filelike = False
+ try:
+ if isinstance(fname, os.PathLike):
+ fname = os.fspath(fname)
+ if isinstance(fname, str):
+ fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
+ if encoding is None:
+ encoding = getattr(fh, 'encoding', 'latin1')
+
+ fh_closing_ctx = contextlib.closing(fh)
+ data = fh
+ filelike = True
+ else:
+ if encoding is None:
+ encoding = getattr(fname, 'encoding', 'latin1')
+ data = iter(fname)
+ except TypeError as e:
+ raise ValueError(
+ f"fname must be a string, filehandle, list of strings,\n"
+ f"or generator. Got {type(fname)} instead.") from e
+
+ with fh_closing_ctx:
+ if comments is not None:
+ if filelike:
+ data = iter(data)
+ filelike = False
+ data = _preprocess_comments(data, comments, encoding)
+
+ if read_dtype_via_object_chunks is None:
+ arr = _load_from_filelike(
+ data, delimiter=delimiter, comment=comment, quote=quote,
+ imaginary_unit=imaginary_unit,
+ usecols=usecols, skiplines=skiplines, max_rows=max_rows,
+ converters=converters, dtype=dtype,
+ encoding=encoding, filelike=filelike,
+ byte_converters=byte_converters)
+
+ else:
+ # This branch reads the file into chunks of object arrays and then
+ # casts them to the desired actual dtype. This ensures correct
+ # string-length and datetime-unit discovery (like `arr.astype()`).
+ # Due to chunking, certain error reports are less clear, currently.
+ if filelike:
+ data = iter(data) # cannot chunk when reading from file
+
+ c_byte_converters = False
+ if read_dtype_via_object_chunks == "S":
+ c_byte_converters = True # Use latin1 rather than ascii
+
+ chunks = []
+ while max_rows != 0:
+ if max_rows < 0:
+ chunk_size = _loadtxt_chunksize
+ else:
+ chunk_size = min(_loadtxt_chunksize, max_rows)
+
+ next_arr = _load_from_filelike(
+ data, delimiter=delimiter, comment=comment, quote=quote,
+ imaginary_unit=imaginary_unit,
+ usecols=usecols, skiplines=skiplines, max_rows=max_rows,
+ converters=converters, dtype=dtype,
+ encoding=encoding, filelike=filelike,
+ byte_converters=byte_converters,
+ c_byte_converters=c_byte_converters)
+ # Cast here already. We hope that this is better even for
+ # large files because the storage is more compact. It could
+ # be adapted (in principle the concatenate could cast).
+ chunks.append(next_arr.astype(read_dtype_via_object_chunks))
+
+ skiprows = 0 # Only have to skip for first chunk
+ if max_rows >= 0:
+ max_rows -= chunk_size
+ if len(next_arr) < chunk_size:
+ # There was less data than requested, so we are done.
+ break
+
+ # Need at least one chunk, but if empty, the last one may have
+ # the wrong shape.
+ if len(chunks) > 1 and len(chunks[-1]) == 0:
+ del chunks[-1]
+ if len(chunks) == 1:
+ arr = chunks[0]
+ else:
+ arr = np.concatenate(chunks, axis=0)
+
+ # NOTE: ndmin works as advertised for structured dtypes, but normally
+ # these would return a 1D result plus the structured dimension,
+ # so ndmin=2 adds a third dimension even when no squeezing occurs.
+ # A `squeeze=False` could be a better solution (pandas uses squeeze).
+ arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin)
+
+ if arr.shape:
+ if arr.shape[0] == 0:
+ warnings.warn(
+ f'loadtxt: input contained no data: "{fname}"',
+ category=UserWarning,
+ stacklevel=3
+ )
+
+ if unpack:
+ # Unpack structured dtypes if requested:
+ dt = arr.dtype
+ if dt.names is not None:
+ # For structured arrays, return an array for each field.
+ return [arr[field] for field in dt.names]
+ else:
+ return arr.T
+ else:
+ return arr
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def loadtxt(fname, dtype=float, comments='#', delimiter=None,
+ converters=None, skiprows=0, usecols=None, unpack=False,
+ ndmin=0, encoding='bytes', max_rows=None, *, quotechar=None,
+ like=None):
+ r"""
+ Load data from a text file.
+
+ Parameters
+ ----------
+ fname : file, str, pathlib.Path, list of str, generator
+ File, filename, list, or generator to read. If the filename
+ extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
+ that generators must return bytes or strings. The strings
+ in a list or produced by a generator are treated as lines.
+ dtype : data-type, optional
+ Data-type of the resulting array; default: float. If this is a
+ structured data-type, the resulting array will be 1-dimensional, and
+ each row will be interpreted as an element of the array. In this
+ case, the number of columns used must match the number of fields in
+ the data-type.
+ comments : str or sequence of str or None, optional
+ The characters or list of characters used to indicate the start of a
+ comment. None implies no comments. For backwards compatibility, byte
+ strings will be decoded as 'latin1'. The default is '#'.
+ delimiter : str, optional
+ The character used to separate the values. For backwards compatibility,
+ byte strings will be decoded as 'latin1'. The default is whitespace.
+
+ .. versionchanged:: 1.23.0
+ Only single character delimiters are supported. Newline characters
+ cannot be used as the delimiter.
+
+ converters : dict or callable, optional
+ Converter functions to customize value parsing. If `converters` is
+ callable, the function is applied to all columns, else it must be a
+ dict that maps column number to a parser function.
+ See examples for further details.
+ Default: None.
+
+ .. versionchanged:: 1.23.0
+ The ability to pass a single callable to be applied to all columns
+ was added.
+
+ skiprows : int, optional
+ Skip the first `skiprows` lines, including comments; default: 0.
+ usecols : int or sequence, optional
+ Which columns to read, with 0 being the first. For example,
+ ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
+ The default, None, results in all columns being read.
+
+ .. versionchanged:: 1.11.0
+ When a single column has to be read it is possible to use
+ an integer instead of a tuple. E.g ``usecols = 3`` reads the
+ fourth column the same way as ``usecols = (3,)`` would.
+ unpack : bool, optional
+ If True, the returned array is transposed, so that arguments may be
+ unpacked using ``x, y, z = loadtxt(...)``. When used with a
+ structured data-type, arrays are returned for each field.
+ Default is False.
+ ndmin : int, optional
+ The returned array will have at least `ndmin` dimensions.
+ Otherwise mono-dimensional axes will be squeezed.
+ Legal values: 0 (default), 1 or 2.
+
+ .. versionadded:: 1.6.0
+ encoding : str, optional
+ Encoding used to decode the inputfile. Does not apply to input streams.
+ The special value 'bytes' enables backward compatibility workarounds
+ that ensures you receive byte arrays as results if possible and passes
+ 'latin1' encoded strings to converters. Override this value to receive
+ unicode arrays and pass strings as input to converters. If set to None
+ the system default is used. The default value is 'bytes'.
+
+ .. versionadded:: 1.14.0
+ max_rows : int, optional
+ Read `max_rows` rows of content after `skiprows` lines. The default is
+ to read all the rows. Note that empty rows containing no data such as
+ empty lines and comment lines are not counted towards `max_rows`,
+ while such lines are counted in `skiprows`.
+
+ .. versionadded:: 1.16.0
+
+ .. versionchanged:: 1.23.0
+ Lines containing no data, including comment lines (e.g., lines
+ starting with '#' or as specified via `comments`) are not counted
+ towards `max_rows`.
+ quotechar : unicode character or None, optional
+ The character used to denote the start and end of a quoted item.
+ Occurrences of the delimiter or comment characters are ignored within
+ a quoted item. The default value is ``quotechar=None``, which means
+ quoting support is disabled.
+
+ If two consecutive instances of `quotechar` are found within a quoted
+ field, the first is treated as an escape character. See examples.
+
+ .. versionadded:: 1.23.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Data read from the text file.
+
+ See Also
+ --------
+ load, fromstring, fromregex
+ genfromtxt : Load data with missing values handled as specified.
+ scipy.io.loadmat : reads MATLAB data files
+
+ Notes
+ -----
+ This function aims to be a fast reader for simply formatted files. The
+ `genfromtxt` function provides more sophisticated handling of, e.g.,
+ lines with missing values.
+
+ Each row in the input text file must have the same number of values to be
+ able to read all values. If all rows do not have same number of values, a
+ subset of up to n columns (where n is the least number of values present
+ in all rows) can be read by specifying the columns via `usecols`.
+
+ .. versionadded:: 1.10.0
+
+ The strings produced by the Python float.hex method can be used as
+ input for floats.
+
+ Examples
+ --------
+ >>> from io import StringIO # StringIO behaves like a file object
+ >>> c = StringIO("0 1\n2 3")
+ >>> np.loadtxt(c)
+ array([[0., 1.],
+ [2., 3.]])
+
+ >>> d = StringIO("M 21 72\nF 35 58")
+ >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
+ ... 'formats': ('S1', 'i4', 'f4')})
+ array([(b'M', 21, 72.), (b'F', 35, 58.)],
+ dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
+
+ >>> c = StringIO("1,0,2\n3,0,4")
+ >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
+ >>> x
+ array([1., 3.])
+ >>> y
+ array([2., 4.])
+
+ The `converters` argument is used to specify functions to preprocess the
+ text prior to parsing. `converters` can be a dictionary that maps
+ preprocessing functions to each column:
+
+ >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n")
+ >>> conv = {
+ ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0
+ ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1
+ ... }
+ >>> np.loadtxt(s, delimiter=",", converters=conv)
+ array([[1., 3.],
+ [3., 5.]])
+
+ `converters` can be a callable instead of a dictionary, in which case it
+ is applied to all columns:
+
+ >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE")
+ >>> import functools
+ >>> conv = functools.partial(int, base=16)
+ >>> np.loadtxt(s, converters=conv)
+ array([[222., 173.],
+ [192., 222.]])
+
+ This example shows how `converters` can be used to convert a field
+ with a trailing minus sign into a negative number.
+
+ >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
+ >>> def conv(fld):
+ ... return -float(fld[:-1]) if fld.endswith(b'-') else float(fld)
+ ...
+ >>> np.loadtxt(s, converters=conv)
+ array([[ 10.01, -31.25],
+ [ 19.22, 64.31],
+ [-17.57, 63.94]])
+
+ Using a callable as the converter can be particularly useful for handling
+ values with different formatting, e.g. floats with underscores:
+
+ >>> s = StringIO("1 2.7 100_000")
+ >>> np.loadtxt(s, converters=float)
+ array([1.e+00, 2.7e+00, 1.e+05])
+
+ This idea can be extended to automatically handle values specified in
+ many different formats:
+
+ >>> def conv(val):
+ ... try:
+ ... return float(val)
+ ... except ValueError:
+ ... return float.fromhex(val)
+ >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2")
+ >>> np.loadtxt(s, delimiter=",", converters=conv, encoding=None)
+ array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00])
+
+ Note that with the default ``encoding="bytes"``, the inputs to the
+ converter function are latin-1 encoded byte strings. To deactivate the
+ implicit encoding prior to conversion, use ``encoding=None``
+
+ >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
+ >>> conv = lambda x: -float(x[:-1]) if x.endswith('-') else float(x)
+ >>> np.loadtxt(s, converters=conv, encoding=None)
+ array([[ 10.01, -31.25],
+ [ 19.22, 64.31],
+ [-17.57, 63.94]])
+
+ Support for quoted fields is enabled with the `quotechar` parameter.
+ Comment and delimiter characters are ignored when they appear within a
+ quoted item delineated by `quotechar`:
+
+ >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n')
+ >>> dtype = np.dtype([("label", "U12"), ("value", float)])
+ >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"')
+ array([('alpha, #42', 10.), ('beta, #64', 2.)],
+ dtype=[('label', '<U12'), ('value', '<f8')])
+
+ Quoted fields can be separated by multiple whitespace characters:
+
+ >>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n')
+ >>> dtype = np.dtype([("label", "U12"), ("value", float)])
+ >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"')
+ array([('alpha, #42', 10.), ('beta, #64', 2.)],
+ dtype=[('label', '<U12'), ('value', '<f8')])
+
+ Two consecutive quote characters within a quoted field are treated as a
+ single escaped character:
+
+ >>> s = StringIO('"Hello, my name is ""Monty""!"')
+ >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"')
+ array('Hello, my name is "Monty"!', dtype='<U26')
+
+ Read subset of columns when all rows do not contain equal number of values:
+
+ >>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20")
+ >>> np.loadtxt(d, usecols=(0, 1))
+ array([[ 1., 2.],
+ [ 2., 4.],
+ [ 3., 9.],
+ [ 4., 16.]])
+
+ """
+
+ if like is not None:
+ return _loadtxt_with_like(
+ fname, dtype=dtype, comments=comments, delimiter=delimiter,
+ converters=converters, skiprows=skiprows, usecols=usecols,
+ unpack=unpack, ndmin=ndmin, encoding=encoding,
+ max_rows=max_rows, like=like
+ )
+
+ if isinstance(delimiter, bytes):
+ delimiter.decode("latin1")
+
+ if dtype is None:
+ dtype = np.float64
+
+ comment = comments
+ # Control character type conversions for Py3 convenience
+ if comment is not None:
+ if isinstance(comment, (str, bytes)):
+ comment = [comment]
+ comment = [
+ x.decode('latin1') if isinstance(x, bytes) else x for x in comment]
+ if isinstance(delimiter, bytes):
+ delimiter = delimiter.decode('latin1')
+
+ arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter,
+ converters=converters, skiplines=skiprows, usecols=usecols,
+ unpack=unpack, ndmin=ndmin, encoding=encoding,
+ max_rows=max_rows, quote=quotechar)
+
+ return arr
+
+
+_loadtxt_with_like = array_function_dispatch(
+ _loadtxt_dispatcher, use_like=True
+)(loadtxt)
+
+
+def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
+ header=None, footer=None, comments=None,
+ encoding=None):
+ return (X,)
+
+
+@array_function_dispatch(_savetxt_dispatcher)
+def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
+ footer='', comments='# ', encoding=None):
+ """
+ Save an array to a text file.
+
+ Parameters
+ ----------
+ fname : filename or file handle
+ If the filename ends in ``.gz``, the file is automatically saved in
+ compressed gzip format. `loadtxt` understands gzipped files
+ transparently.
+ X : 1D or 2D array_like
+ Data to be saved to a text file.
+ fmt : str or sequence of strs, optional
+ A single format (%10.5f), a sequence of formats, or a
+ multi-format string, e.g. 'Iteration %d -- %10.5f', in which
+ case `delimiter` is ignored. For complex `X`, the legal options
+ for `fmt` are:
+
+ * a single specifier, `fmt='%.4e'`, resulting in numbers formatted
+ like `' (%s+%sj)' % (fmt, fmt)`
+ * a full string specifying every real and imaginary part, e.g.
+ `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
+ * a list of specifiers, one per column - in this case, the real
+ and imaginary part must have separate specifiers,
+ e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
+ delimiter : str, optional
+ String or character separating columns.
+ newline : str, optional
+ String or character separating lines.
+
+ .. versionadded:: 1.5.0
+ header : str, optional
+ String that will be written at the beginning of the file.
+
+ .. versionadded:: 1.7.0
+ footer : str, optional
+ String that will be written at the end of the file.
+
+ .. versionadded:: 1.7.0
+ comments : str, optional
+ String that will be prepended to the ``header`` and ``footer`` strings,
+ to mark them as comments. Default: '# ', as expected by e.g.
+ ``numpy.loadtxt``.
+
+ .. versionadded:: 1.7.0
+ encoding : {None, str}, optional
+ Encoding used to encode the outputfile. Does not apply to output
+ streams. If the encoding is something other than 'bytes' or 'latin1'
+ you will not be able to load the file in NumPy versions < 1.14. Default
+ is 'latin1'.
+
+ .. versionadded:: 1.14.0
+
+
+ See Also
+ --------
+ save : Save an array to a binary file in NumPy ``.npy`` format
+ savez : Save several arrays into an uncompressed ``.npz`` archive
+ savez_compressed : Save several arrays into a compressed ``.npz`` archive
+
+ Notes
+ -----
+ Further explanation of the `fmt` parameter
+ (``%[flag]width[.precision]specifier``):
+
+ flags:
+ ``-`` : left justify
+
+ ``+`` : Forces to precede result with + or -.
+
+ ``0`` : Left pad the number with zeros instead of space (see width).
+
+ width:
+ Minimum number of characters to be printed. The value is not truncated
+ if it has more characters.
+
+ precision:
+ - For integer specifiers (eg. ``d,i,o,x``), the minimum number of
+ digits.
+ - For ``e, E`` and ``f`` specifiers, the number of digits to print
+ after the decimal point.
+ - For ``g`` and ``G``, the maximum number of significant digits.
+ - For ``s``, the maximum number of characters.
+
+ specifiers:
+ ``c`` : character
+
+ ``d`` or ``i`` : signed decimal integer
+
+ ``e`` or ``E`` : scientific notation with ``e`` or ``E``.
+
+ ``f`` : decimal floating point
+
+ ``g,G`` : use the shorter of ``e,E`` or ``f``
+
+ ``o`` : signed octal
+
+ ``s`` : string of characters
+
+ ``u`` : unsigned decimal integer
+
+ ``x,X`` : unsigned hexadecimal integer
+
+ This explanation of ``fmt`` is not complete, for an exhaustive
+ specification see [1]_.
+
+ References
+ ----------
+ .. [1] `Format Specification Mini-Language
+ <https://docs.python.org/library/string.html#format-specification-mini-language>`_,
+ Python Documentation.
+
+ Examples
+ --------
+ >>> x = y = z = np.arange(0.0,5.0,1.0)
+ >>> np.savetxt('test.out', x, delimiter=',') # X is an array
+ >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
+ >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
+
+ """
+
+ # Py3 conversions first
+ if isinstance(fmt, bytes):
+ fmt = asstr(fmt)
+ delimiter = asstr(delimiter)
+
+ class WriteWrap:
+ """Convert to bytes on bytestream inputs.
+
+ """
+ def __init__(self, fh, encoding):
+ self.fh = fh
+ self.encoding = encoding
+ self.do_write = self.first_write
+
+ def close(self):
+ self.fh.close()
+
+ def write(self, v):
+ self.do_write(v)
+
+ def write_bytes(self, v):
+ if isinstance(v, bytes):
+ self.fh.write(v)
+ else:
+ self.fh.write(v.encode(self.encoding))
+
+ def write_normal(self, v):
+ self.fh.write(asunicode(v))
+
+ def first_write(self, v):
+ try:
+ self.write_normal(v)
+ self.write = self.write_normal
+ except TypeError:
+ # input is probably a bytestream
+ self.write_bytes(v)
+ self.write = self.write_bytes
+
+ own_fh = False
+ if isinstance(fname, os_PathLike):
+ fname = os_fspath(fname)
+ if _is_string_like(fname):
+ # datasource doesn't support creating a new file ...
+ open(fname, 'wt').close()
+ fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
+ own_fh = True
+ elif hasattr(fname, 'write'):
+ # wrap to handle byte output streams
+ fh = WriteWrap(fname, encoding or 'latin1')
+ else:
+ raise ValueError('fname must be a string or file handle')
+
+ try:
+ X = np.asarray(X)
+
+ # Handle 1-dimensional arrays
+ if X.ndim == 0 or X.ndim > 2:
+ raise ValueError(
+ "Expected 1D or 2D array, got %dD array instead" % X.ndim)
+ elif X.ndim == 1:
+ # Common case -- 1d array of numbers
+ if X.dtype.names is None:
+ X = np.atleast_2d(X).T
+ ncol = 1
+
+ # Complex dtype -- each field indicates a separate column
+ else:
+ ncol = len(X.dtype.names)
+ else:
+ ncol = X.shape[1]
+
+ iscomplex_X = np.iscomplexobj(X)
+ # `fmt` can be a string with multiple insertion points or a
+ # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
+ if type(fmt) in (list, tuple):
+ if len(fmt) != ncol:
+ raise AttributeError('fmt has wrong shape. %s' % str(fmt))
+ format = asstr(delimiter).join(map(asstr, fmt))
+ elif isinstance(fmt, str):
+ n_fmt_chars = fmt.count('%')
+ error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
+ if n_fmt_chars == 1:
+ if iscomplex_X:
+ fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
+ else:
+ fmt = [fmt, ] * ncol
+ format = delimiter.join(fmt)
+ elif iscomplex_X and n_fmt_chars != (2 * ncol):
+ raise error
+ elif ((not iscomplex_X) and n_fmt_chars != ncol):
+ raise error
+ else:
+ format = fmt
+ else:
+ raise ValueError('invalid fmt: %r' % (fmt,))
+
+ if len(header) > 0:
+ header = header.replace('\n', '\n' + comments)
+ fh.write(comments + header + newline)
+ if iscomplex_X:
+ for row in X:
+ row2 = []
+ for number in row:
+ row2.append(number.real)
+ row2.append(number.imag)
+ s = format % tuple(row2) + newline
+ fh.write(s.replace('+-', '-'))
+ else:
+ for row in X:
+ try:
+ v = format % tuple(row) + newline
+ except TypeError as e:
+ raise TypeError("Mismatch between array dtype ('%s') and "
+ "format specifier ('%s')"
+ % (str(X.dtype), format)) from e
+ fh.write(v)
+
+ if len(footer) > 0:
+ footer = footer.replace('\n', '\n' + comments)
+ fh.write(comments + footer + newline)
+ finally:
+ if own_fh:
+ fh.close()
+
+
+@set_module('numpy')
+def fromregex(file, regexp, dtype, encoding=None):
+ r"""
+ Construct an array from a text file, using regular expression parsing.
+
+ The returned array is always a structured array, and is constructed from
+ all matches of the regular expression in the file. Groups in the regular
+ expression are converted to fields of the structured array.
+
+ Parameters
+ ----------
+ file : path or file
+ Filename or file object to read.
+
+ .. versionchanged:: 1.22.0
+ Now accepts `os.PathLike` implementations.
+ regexp : str or regexp
+ Regular expression used to parse the file.
+ Groups in the regular expression correspond to fields in the dtype.
+ dtype : dtype or list of dtypes
+ Dtype for the structured array; must be a structured datatype.
+ encoding : str, optional
+ Encoding used to decode the inputfile. Does not apply to input streams.
+
+ .. versionadded:: 1.14.0
+
+ Returns
+ -------
+ output : ndarray
+ The output array, containing the part of the content of `file` that
+ was matched by `regexp`. `output` is always a structured array.
+
+ Raises
+ ------
+ TypeError
+ When `dtype` is not a valid dtype for a structured array.
+
+ See Also
+ --------
+ fromstring, loadtxt
+
+ Notes
+ -----
+ Dtypes for structured arrays can be specified in several forms, but all
+ forms specify at least the data type and field name. For details see
+ `basics.rec`.
+
+ Examples
+ --------
+ >>> from io import StringIO
+ >>> text = StringIO("1312 foo\n1534 bar\n444 qux")
+
+ >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything]
+ >>> output = np.fromregex(text, regexp,
+ ... [('num', np.int64), ('key', 'S3')])
+ >>> output
+ array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
+ dtype=[('num', '<i8'), ('key', 'S3')])
+ >>> output['num']
+ array([1312, 1534, 444])
+
+ """
+ own_fh = False
+ if not hasattr(file, "read"):
+ file = os.fspath(file)
+ file = np.lib._datasource.open(file, 'rt', encoding=encoding)
+ own_fh = True
+
+ try:
+ if not isinstance(dtype, np.dtype):
+ dtype = np.dtype(dtype)
+ if dtype.names is None:
+ raise TypeError('dtype must be a structured datatype.')
+
+ content = file.read()
+ if isinstance(content, bytes) and isinstance(regexp, str):
+ regexp = asbytes(regexp)
+ elif isinstance(content, str) and isinstance(regexp, bytes):
+ regexp = asstr(regexp)
+
+ if not hasattr(regexp, 'match'):
+ regexp = re.compile(regexp)
+ seq = regexp.findall(content)
+ if seq and not isinstance(seq[0], tuple):
+ # Only one group is in the regexp.
+ # Create the new array as a single data-type and then
+ # re-interpret as a single-field structured array.
+ newdtype = np.dtype(dtype[dtype.names[0]])
+ output = np.array(seq, dtype=newdtype)
+ output.dtype = dtype
+ else:
+ output = np.array(seq, dtype=dtype)
+
+ return output
+ finally:
+ if own_fh:
+ file.close()
+
+
+#####--------------------------------------------------------------------------
+#---- --- ASCII functions ---
+#####--------------------------------------------------------------------------
+
+
+def _genfromtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None,
+ skip_header=None, skip_footer=None, converters=None,
+ missing_values=None, filling_values=None, usecols=None,
+ names=None, excludelist=None, deletechars=None,
+ replace_space=None, autostrip=None, case_sensitive=None,
+ defaultfmt=None, unpack=None, usemask=None, loose=None,
+ invalid_raise=None, max_rows=None, encoding=None,
+ *, ndmin=None, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
+ skip_header=0, skip_footer=0, converters=None,
+ missing_values=None, filling_values=None, usecols=None,
+ names=None, excludelist=None,
+ deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
+ replace_space='_', autostrip=False, case_sensitive=True,
+ defaultfmt="f%i", unpack=None, usemask=False, loose=True,
+ invalid_raise=True, max_rows=None, encoding='bytes',
+ *, ndmin=0, like=None):
+ """
+ Load data from a text file, with missing values handled as specified.
+
+ Each line past the first `skip_header` lines is split at the `delimiter`
+ character, and characters following the `comments` character are discarded.
+
+ Parameters
+ ----------
+ fname : file, str, pathlib.Path, list of str, generator
+ File, filename, list, or generator to read. If the filename
+ extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
+ that generators must return bytes or strings. The strings
+ in a list or produced by a generator are treated as lines.
+ dtype : dtype, optional
+ Data type of the resulting array.
+ If None, the dtypes will be determined by the contents of each
+ column, individually.
+ comments : str, optional
+ The character used to indicate the start of a comment.
+ All the characters occurring on a line after a comment are discarded.
+ delimiter : str, int, or sequence, optional
+ The string used to separate values. By default, any consecutive
+ whitespaces act as delimiter. An integer or sequence of integers
+ can also be provided as width(s) of each field.
+ skiprows : int, optional
+ `skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
+ skip_header : int, optional
+ The number of lines to skip at the beginning of the file.
+ skip_footer : int, optional
+ The number of lines to skip at the end of the file.
+ converters : variable, optional
+ The set of functions that convert the data of a column to a value.
+ The converters can also be used to provide a default value
+ for missing data: ``converters = {3: lambda s: float(s or 0)}``.
+ missing : variable, optional
+ `missing` was removed in numpy 1.10. Please use `missing_values`
+ instead.
+ missing_values : variable, optional
+ The set of strings corresponding to missing data.
+ filling_values : variable, optional
+ The set of values to be used as default when the data are missing.
+ usecols : sequence, optional
+ Which columns to read, with 0 being the first. For example,
+ ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
+ names : {None, True, str, sequence}, optional
+ If `names` is True, the field names are read from the first line after
+ the first `skip_header` lines. This line can optionally be preceded
+ by a comment delimiter. If `names` is a sequence or a single-string of
+ comma-separated names, the names will be used to define the field names
+ in a structured dtype. If `names` is None, the names of the dtype
+ fields will be used, if any.
+ excludelist : sequence, optional
+ A list of names to exclude. This list is appended to the default list
+ ['return','file','print']. Excluded names are appended with an
+ underscore: for example, `file` would become `file_`.
+ deletechars : str, optional
+ A string combining invalid characters that must be deleted from the
+ names.
+ defaultfmt : str, optional
+ A format used to define default field names, such as "f%i" or "f_%02i".
+ autostrip : bool, optional
+ Whether to automatically strip white spaces from the variables.
+ replace_space : char, optional
+ Character(s) used in replacement of white spaces in the variable
+ names. By default, use a '_'.
+ case_sensitive : {True, False, 'upper', 'lower'}, optional
+ If True, field names are case sensitive.
+ If False or 'upper', field names are converted to upper case.
+ If 'lower', field names are converted to lower case.
+ unpack : bool, optional
+ If True, the returned array is transposed, so that arguments may be
+ unpacked using ``x, y, z = genfromtxt(...)``. When used with a
+ structured data-type, arrays are returned for each field.
+ Default is False.
+ usemask : bool, optional
+ If True, return a masked array.
+ If False, return a regular array.
+ loose : bool, optional
+ If True, do not raise errors for invalid values.
+ invalid_raise : bool, optional
+ If True, an exception is raised if an inconsistency is detected in the
+ number of columns.
+ If False, a warning is emitted and the offending lines are skipped.
+ max_rows : int, optional
+ The maximum number of rows to read. Must not be used with skip_footer
+ at the same time. If given, the value must be at least 1. Default is
+ to read the entire file.
+
+ .. versionadded:: 1.10.0
+ encoding : str, optional
+ Encoding used to decode the inputfile. Does not apply when `fname` is
+ a file object. The special value 'bytes' enables backward compatibility
+ workarounds that ensure that you receive byte arrays when possible
+ and passes latin1 encoded strings to converters. Override this value to
+ receive unicode arrays and pass strings as input to converters. If set
+ to None the system default is used. The default value is 'bytes'.
+
+ .. versionadded:: 1.14.0
+ ndmin : int, optional
+ Same parameter as `loadtxt`
+
+ .. versionadded:: 1.23.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ out : ndarray
+ Data read from the text file. If `usemask` is True, this is a
+ masked array.
+
+ See Also
+ --------
+ numpy.loadtxt : equivalent function when no data is missing.
+
+ Notes
+ -----
+ * When spaces are used as delimiters, or when no delimiter has been given
+ as input, there should not be any missing data between two fields.
+ * When the variables are named (either by a flexible dtype or with `names`),
+ there must not be any header in the file (else a ValueError
+ exception is raised).
+ * Individual values are not stripped of spaces by default.
+ When using a custom converter, make sure the function does remove spaces.
+
+ References
+ ----------
+ .. [1] NumPy User Guide, section `I/O with NumPy
+ <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
+
+ Examples
+ --------
+ >>> from io import StringIO
+ >>> import numpy as np
+
+ Comma delimited file with mixed dtype
+
+ >>> s = StringIO(u"1,1.3,abcde")
+ >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
+ ... ('mystring','S5')], delimiter=",")
+ >>> data
+ array((1, 1.3, b'abcde'),
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
+
+ Using dtype = None
+
+ >>> _ = s.seek(0) # needed for StringIO example only
+ >>> data = np.genfromtxt(s, dtype=None,
+ ... names = ['myint','myfloat','mystring'], delimiter=",")
+ >>> data
+ array((1, 1.3, b'abcde'),
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
+
+ Specifying dtype and names
+
+ >>> _ = s.seek(0)
+ >>> data = np.genfromtxt(s, dtype="i8,f8,S5",
+ ... names=['myint','myfloat','mystring'], delimiter=",")
+ >>> data
+ array((1, 1.3, b'abcde'),
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
+
+ An example with fixed-width columns
+
+ >>> s = StringIO(u"11.3abcde")
+ >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
+ ... delimiter=[1,3,5])
+ >>> data
+ array((1, 1.3, b'abcde'),
+ dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
+
+ An example to show comments
+
+ >>> f = StringIO('''
+ ... text,# of chars
+ ... hello world,11
+ ... numpy,5''')
+ >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
+ array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
+ dtype=[('f0', 'S12'), ('f1', 'S12')])
+
+ """
+
+ if like is not None:
+ return _genfromtxt_with_like(
+ fname, dtype=dtype, comments=comments, delimiter=delimiter,
+ skip_header=skip_header, skip_footer=skip_footer,
+ converters=converters, missing_values=missing_values,
+ filling_values=filling_values, usecols=usecols, names=names,
+ excludelist=excludelist, deletechars=deletechars,
+ replace_space=replace_space, autostrip=autostrip,
+ case_sensitive=case_sensitive, defaultfmt=defaultfmt,
+ unpack=unpack, usemask=usemask, loose=loose,
+ invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
+ ndmin=ndmin,
+ like=like
+ )
+
+ _ensure_ndmin_ndarray_check_param(ndmin)
+
+ if max_rows is not None:
+ if skip_footer:
+ raise ValueError(
+ "The keywords 'skip_footer' and 'max_rows' can not be "
+ "specified at the same time.")
+ if max_rows < 1:
+ raise ValueError("'max_rows' must be at least 1.")
+
+ if usemask:
+ from numpy.ma import MaskedArray, make_mask_descr
+ # Check the input dictionary of converters
+ user_converters = converters or {}
+ if not isinstance(user_converters, dict):
+ raise TypeError(
+ "The input argument 'converter' should be a valid dictionary "
+ "(got '%s' instead)" % type(user_converters))
+
+ if encoding == 'bytes':
+ encoding = None
+ byte_converters = True
+ else:
+ byte_converters = False
+
+ # Initialize the filehandle, the LineSplitter and the NameValidator
+ if isinstance(fname, os_PathLike):
+ fname = os_fspath(fname)
+ if isinstance(fname, str):
+ fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
+ fid_ctx = contextlib.closing(fid)
+ else:
+ fid = fname
+ fid_ctx = contextlib.nullcontext(fid)
+ try:
+ fhd = iter(fid)
+ except TypeError as e:
+ raise TypeError(
+ "fname must be a string, a filehandle, a sequence of strings,\n"
+ f"or an iterator of strings. Got {type(fname)} instead."
+ ) from e
+ with fid_ctx:
+ split_line = LineSplitter(delimiter=delimiter, comments=comments,
+ autostrip=autostrip, encoding=encoding)
+ validate_names = NameValidator(excludelist=excludelist,
+ deletechars=deletechars,
+ case_sensitive=case_sensitive,
+ replace_space=replace_space)
+
+ # Skip the first `skip_header` rows
+ try:
+ for i in range(skip_header):
+ next(fhd)
+
+ # Keep on until we find the first valid values
+ first_values = None
+
+ while not first_values:
+ first_line = _decode_line(next(fhd), encoding)
+ if (names is True) and (comments is not None):
+ if comments in first_line:
+ first_line = (
+ ''.join(first_line.split(comments)[1:]))
+ first_values = split_line(first_line)
+ except StopIteration:
+ # return an empty array if the datafile is empty
+ first_line = ''
+ first_values = []
+ warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
+
+ # Should we take the first values as names ?
+ if names is True:
+ fval = first_values[0].strip()
+ if comments is not None:
+ if fval in comments:
+ del first_values[0]
+
+ # Check the columns to use: make sure `usecols` is a list
+ if usecols is not None:
+ try:
+ usecols = [_.strip() for _ in usecols.split(",")]
+ except AttributeError:
+ try:
+ usecols = list(usecols)
+ except TypeError:
+ usecols = [usecols, ]
+ nbcols = len(usecols or first_values)
+
+ # Check the names and overwrite the dtype.names if needed
+ if names is True:
+ names = validate_names([str(_.strip()) for _ in first_values])
+ first_line = ''
+ elif _is_string_like(names):
+ names = validate_names([_.strip() for _ in names.split(',')])
+ elif names:
+ names = validate_names(names)
+ # Get the dtype
+ if dtype is not None:
+ dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
+ excludelist=excludelist,
+ deletechars=deletechars,
+ case_sensitive=case_sensitive,
+ replace_space=replace_space)
+ # Make sure the names is a list (for 2.5)
+ if names is not None:
+ names = list(names)
+
+ if usecols:
+ for (i, current) in enumerate(usecols):
+ # if usecols is a list of names, convert to a list of indices
+ if _is_string_like(current):
+ usecols[i] = names.index(current)
+ elif current < 0:
+ usecols[i] = current + len(first_values)
+ # If the dtype is not None, make sure we update it
+ if (dtype is not None) and (len(dtype) > nbcols):
+ descr = dtype.descr
+ dtype = np.dtype([descr[_] for _ in usecols])
+ names = list(dtype.names)
+ # If `names` is not None, update the names
+ elif (names is not None) and (len(names) > nbcols):
+ names = [names[_] for _ in usecols]
+ elif (names is not None) and (dtype is not None):
+ names = list(dtype.names)
+
+ # Process the missing values ...............................
+ # Rename missing_values for convenience
+ user_missing_values = missing_values or ()
+ if isinstance(user_missing_values, bytes):
+ user_missing_values = user_missing_values.decode('latin1')
+
+ # Define the list of missing_values (one column: one list)
+ missing_values = [list(['']) for _ in range(nbcols)]
+
+ # We have a dictionary: process it field by field
+ if isinstance(user_missing_values, dict):
+ # Loop on the items
+ for (key, val) in user_missing_values.items():
+ # Is the key a string ?
+ if _is_string_like(key):
+ try:
+ # Transform it into an integer
+ key = names.index(key)
+ except ValueError:
+ # We couldn't find it: the name must have been dropped
+ continue
+ # Redefine the key as needed if it's a column number
+ if usecols:
+ try:
+ key = usecols.index(key)
+ except ValueError:
+ pass
+ # Transform the value as a list of string
+ if isinstance(val, (list, tuple)):
+ val = [str(_) for _ in val]
+ else:
+ val = [str(val), ]
+ # Add the value(s) to the current list of missing
+ if key is None:
+ # None acts as default
+ for miss in missing_values:
+ miss.extend(val)
+ else:
+ missing_values[key].extend(val)
+ # We have a sequence : each item matches a column
+ elif isinstance(user_missing_values, (list, tuple)):
+ for (value, entry) in zip(user_missing_values, missing_values):
+ value = str(value)
+ if value not in entry:
+ entry.append(value)
+ # We have a string : apply it to all entries
+ elif isinstance(user_missing_values, str):
+ user_value = user_missing_values.split(",")
+ for entry in missing_values:
+ entry.extend(user_value)
+ # We have something else: apply it to all entries
+ else:
+ for entry in missing_values:
+ entry.extend([str(user_missing_values)])
+
+ # Process the filling_values ...............................
+ # Rename the input for convenience
+ user_filling_values = filling_values
+ if user_filling_values is None:
+ user_filling_values = []
+ # Define the default
+ filling_values = [None] * nbcols
+ # We have a dictionary : update each entry individually
+ if isinstance(user_filling_values, dict):
+ for (key, val) in user_filling_values.items():
+ if _is_string_like(key):
+ try:
+ # Transform it into an integer
+ key = names.index(key)
+ except ValueError:
+ # We couldn't find it: the name must have been dropped,
+ continue
+ # Redefine the key if it's a column number and usecols is defined
+ if usecols:
+ try:
+ key = usecols.index(key)
+ except ValueError:
+ pass
+ # Add the value to the list
+ filling_values[key] = val
+ # We have a sequence : update on a one-to-one basis
+ elif isinstance(user_filling_values, (list, tuple)):
+ n = len(user_filling_values)
+ if (n <= nbcols):
+ filling_values[:n] = user_filling_values
+ else:
+ filling_values = user_filling_values[:nbcols]
+ # We have something else : use it for all entries
+ else:
+ filling_values = [user_filling_values] * nbcols
+
+ # Initialize the converters ................................
+ if dtype is None:
+ # Note: we can't use a [...]*nbcols, as we would have 3 times the same
+ # ... converter, instead of 3 different converters.
+ converters = [StringConverter(None, missing_values=miss, default=fill)
+ for (miss, fill) in zip(missing_values, filling_values)]
+ else:
+ dtype_flat = flatten_dtype(dtype, flatten_base=True)
+ # Initialize the converters
+ if len(dtype_flat) > 1:
+ # Flexible type : get a converter from each dtype
+ zipit = zip(dtype_flat, missing_values, filling_values)
+ converters = [StringConverter(dt, locked=True,
+ missing_values=miss, default=fill)
+ for (dt, miss, fill) in zipit]
+ else:
+ # Set to a default converter (but w/ different missing values)
+ zipit = zip(missing_values, filling_values)
+ converters = [StringConverter(dtype, locked=True,
+ missing_values=miss, default=fill)
+ for (miss, fill) in zipit]
+ # Update the converters to use the user-defined ones
+ uc_update = []
+ for (j, conv) in user_converters.items():
+ # If the converter is specified by column names, use the index instead
+ if _is_string_like(j):
+ try:
+ j = names.index(j)
+ i = j
+ except ValueError:
+ continue
+ elif usecols:
+ try:
+ i = usecols.index(j)
+ except ValueError:
+ # Unused converter specified
+ continue
+ else:
+ i = j
+ # Find the value to test - first_line is not filtered by usecols:
+ if len(first_line):
+ testing_value = first_values[j]
+ else:
+ testing_value = None
+ if conv is bytes:
+ user_conv = asbytes
+ elif byte_converters:
+ # converters may use decode to workaround numpy's old behaviour,
+ # so encode the string again before passing to the user converter
+ def tobytes_first(x, conv):
+ if type(x) is bytes:
+ return conv(x)
+ return conv(x.encode("latin1"))
+ user_conv = functools.partial(tobytes_first, conv=conv)
+ else:
+ user_conv = conv
+ converters[i].update(user_conv, locked=True,
+ testing_value=testing_value,
+ default=filling_values[i],
+ missing_values=missing_values[i],)
+ uc_update.append((i, user_conv))
+ # Make sure we have the corrected keys in user_converters...
+ user_converters.update(uc_update)
+
+ # Fixme: possible error as following variable never used.
+ # miss_chars = [_.missing_values for _ in converters]
+
+ # Initialize the output lists ...
+ # ... rows
+ rows = []
+ append_to_rows = rows.append
+ # ... masks
+ if usemask:
+ masks = []
+ append_to_masks = masks.append
+ # ... invalid
+ invalid = []
+ append_to_invalid = invalid.append
+
+ # Parse each line
+ for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
+ values = split_line(line)
+ nbvalues = len(values)
+ # Skip an empty line
+ if nbvalues == 0:
+ continue
+ if usecols:
+ # Select only the columns we need
+ try:
+ values = [values[_] for _ in usecols]
+ except IndexError:
+ append_to_invalid((i + skip_header + 1, nbvalues))
+ continue
+ elif nbvalues != nbcols:
+ append_to_invalid((i + skip_header + 1, nbvalues))
+ continue
+ # Store the values
+ append_to_rows(tuple(values))
+ if usemask:
+ append_to_masks(tuple([v.strip() in m
+ for (v, m) in zip(values,
+ missing_values)]))
+ if len(rows) == max_rows:
+ break
+
+ # Upgrade the converters (if needed)
+ if dtype is None:
+ for (i, converter) in enumerate(converters):
+ current_column = [itemgetter(i)(_m) for _m in rows]
+ try:
+ converter.iterupgrade(current_column)
+ except ConverterLockError:
+ errmsg = "Converter #%i is locked and cannot be upgraded: " % i
+ current_column = map(itemgetter(i), rows)
+ for (j, value) in enumerate(current_column):
+ try:
+ converter.upgrade(value)
+ except (ConverterError, ValueError):
+ errmsg += "(occurred line #%i for value '%s')"
+ errmsg %= (j + 1 + skip_header, value)
+ raise ConverterError(errmsg)
+
+ # Check that we don't have invalid values
+ nbinvalid = len(invalid)
+ if nbinvalid > 0:
+ nbrows = len(rows) + nbinvalid - skip_footer
+ # Construct the error message
+ template = " Line #%%i (got %%i columns instead of %i)" % nbcols
+ if skip_footer > 0:
+ nbinvalid_skipped = len([_ for _ in invalid
+ if _[0] > nbrows + skip_header])
+ invalid = invalid[:nbinvalid - nbinvalid_skipped]
+ skip_footer -= nbinvalid_skipped
+#
+# nbrows -= skip_footer
+# errmsg = [template % (i, nb)
+# for (i, nb) in invalid if i < nbrows]
+# else:
+ errmsg = [template % (i, nb)
+ for (i, nb) in invalid]
+ if len(errmsg):
+ errmsg.insert(0, "Some errors were detected !")
+ errmsg = "\n".join(errmsg)
+ # Raise an exception ?
+ if invalid_raise:
+ raise ValueError(errmsg)
+ # Issue a warning ?
+ else:
+ warnings.warn(errmsg, ConversionWarning, stacklevel=2)
+
+ # Strip the last skip_footer data
+ if skip_footer > 0:
+ rows = rows[:-skip_footer]
+ if usemask:
+ masks = masks[:-skip_footer]
+
+ # Convert each value according to the converter:
+ # We want to modify the list in place to avoid creating a new one...
+ if loose:
+ rows = list(
+ zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
+ for (i, conv) in enumerate(converters)]))
+ else:
+ rows = list(
+ zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
+ for (i, conv) in enumerate(converters)]))
+
+ # Reset the dtype
+ data = rows
+ if dtype is None:
+ # Get the dtypes from the types of the converters
+ column_types = [conv.type for conv in converters]
+ # Find the columns with strings...
+ strcolidx = [i for (i, v) in enumerate(column_types)
+ if v == np.unicode_]
+
+ if byte_converters and strcolidx:
+ # convert strings back to bytes for backward compatibility
+ warnings.warn(
+ "Reading unicode strings without specifying the encoding "
+ "argument is deprecated. Set the encoding, use None for the "
+ "system default.",
+ np.VisibleDeprecationWarning, stacklevel=2)
+ def encode_unicode_cols(row_tup):
+ row = list(row_tup)
+ for i in strcolidx:
+ row[i] = row[i].encode('latin1')
+ return tuple(row)
+
+ try:
+ data = [encode_unicode_cols(r) for r in data]
+ except UnicodeEncodeError:
+ pass
+ else:
+ for i in strcolidx:
+ column_types[i] = np.bytes_
+
+ # Update string types to be the right length
+ sized_column_types = column_types[:]
+ for i, col_type in enumerate(column_types):
+ if np.issubdtype(col_type, np.character):
+ n_chars = max(len(row[i]) for row in data)
+ sized_column_types[i] = (col_type, n_chars)
+
+ if names is None:
+ # If the dtype is uniform (before sizing strings)
+ base = {
+ c_type
+ for c, c_type in zip(converters, column_types)
+ if c._checked}
+ if len(base) == 1:
+ uniform_type, = base
+ (ddtype, mdtype) = (uniform_type, bool)
+ else:
+ ddtype = [(defaultfmt % i, dt)
+ for (i, dt) in enumerate(sized_column_types)]
+ if usemask:
+ mdtype = [(defaultfmt % i, bool)
+ for (i, dt) in enumerate(sized_column_types)]
+ else:
+ ddtype = list(zip(names, sized_column_types))
+ mdtype = list(zip(names, [bool] * len(sized_column_types)))
+ output = np.array(data, dtype=ddtype)
+ if usemask:
+ outputmask = np.array(masks, dtype=mdtype)
+ else:
+ # Overwrite the initial dtype names if needed
+ if names and dtype.names is not None:
+ dtype.names = names
+ # Case 1. We have a structured type
+ if len(dtype_flat) > 1:
+ # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
+ # First, create the array using a flattened dtype:
+ # [('a', int), ('b1', int), ('b2', float)]
+ # Then, view the array using the specified dtype.
+ if 'O' in (_.char for _ in dtype_flat):
+ if has_nested_fields(dtype):
+ raise NotImplementedError(
+ "Nested fields involving objects are not supported...")
+ else:
+ output = np.array(data, dtype=dtype)
+ else:
+ rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
+ output = rows.view(dtype)
+ # Now, process the rowmasks the same way
+ if usemask:
+ rowmasks = np.array(
+ masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
+ # Construct the new dtype
+ mdtype = make_mask_descr(dtype)
+ outputmask = rowmasks.view(mdtype)
+ # Case #2. We have a basic dtype
+ else:
+ # We used some user-defined converters
+ if user_converters:
+ ishomogeneous = True
+ descr = []
+ for i, ttype in enumerate([conv.type for conv in converters]):
+ # Keep the dtype of the current converter
+ if i in user_converters:
+ ishomogeneous &= (ttype == dtype.type)
+ if np.issubdtype(ttype, np.character):
+ ttype = (ttype, max(len(row[i]) for row in data))
+ descr.append(('', ttype))
+ else:
+ descr.append(('', dtype))
+ # So we changed the dtype ?
+ if not ishomogeneous:
+ # We have more than one field
+ if len(descr) > 1:
+ dtype = np.dtype(descr)
+ # We have only one field: drop the name if not needed.
+ else:
+ dtype = np.dtype(ttype)
+ #
+ output = np.array(data, dtype)
+ if usemask:
+ if dtype.names is not None:
+ mdtype = [(_, bool) for _ in dtype.names]
+ else:
+ mdtype = bool
+ outputmask = np.array(masks, dtype=mdtype)
+ # Try to take care of the missing data we missed
+ names = output.dtype.names
+ if usemask and names:
+ for (name, conv) in zip(names, converters):
+ missing_values = [conv(_) for _ in conv.missing_values
+ if _ != '']
+ for mval in missing_values:
+ outputmask[name] |= (output[name] == mval)
+ # Construct the final array
+ if usemask:
+ output = output.view(MaskedArray)
+ output._mask = outputmask
+
+ output = _ensure_ndmin_ndarray(output, ndmin=ndmin)
+
+ if unpack:
+ if names is None:
+ return output.T
+ elif len(names) == 1:
+ # squeeze single-name dtypes too
+ return output[names[0]]
+ else:
+ # For structured arrays with multiple fields,
+ # return an array for each field.
+ return [output[field] for field in names]
+ return output
+
+
+_genfromtxt_with_like = array_function_dispatch(
+ _genfromtxt_dispatcher, use_like=True
+)(genfromtxt)
+
+
+def recfromtxt(fname, **kwargs):
+ """
+ Load ASCII data from a file and return it in a record array.
+
+ If ``usemask=False`` a standard `recarray` is returned,
+ if ``usemask=True`` a MaskedRecords array is returned.
+
+ Parameters
+ ----------
+ fname, kwargs : For a description of input parameters, see `genfromtxt`.
+
+ See Also
+ --------
+ numpy.genfromtxt : generic function
+
+ Notes
+ -----
+ By default, `dtype` is None, which means that the data-type of the output
+ array will be determined from the data.
+
+ """
+ kwargs.setdefault("dtype", None)
+ usemask = kwargs.get('usemask', False)
+ output = genfromtxt(fname, **kwargs)
+ if usemask:
+ from numpy.ma.mrecords import MaskedRecords
+ output = output.view(MaskedRecords)
+ else:
+ output = output.view(np.recarray)
+ return output
+
+
+def recfromcsv(fname, **kwargs):
+ """
+ Load ASCII data stored in a comma-separated file.
+
+ The returned array is a record array (if ``usemask=False``, see
+ `recarray`) or a masked record array (if ``usemask=True``,
+ see `ma.mrecords.MaskedRecords`).
+
+ Parameters
+ ----------
+ fname, kwargs : For a description of input parameters, see `genfromtxt`.
+
+ See Also
+ --------
+ numpy.genfromtxt : generic function to load ASCII data.
+
+ Notes
+ -----
+ By default, `dtype` is None, which means that the data-type of the output
+ array will be determined from the data.
+
+ """
+ # Set default kwargs for genfromtxt as relevant to csv import.
+ kwargs.setdefault("case_sensitive", "lower")
+ kwargs.setdefault("names", True)
+ kwargs.setdefault("delimiter", ",")
+ kwargs.setdefault("dtype", None)
+ output = genfromtxt(fname, **kwargs)
+
+ usemask = kwargs.get("usemask", False)
+ if usemask:
+ from numpy.ma.mrecords import MaskedRecords
+ output = output.view(MaskedRecords)
+ else:
+ output = output.view(np.recarray)
+ return output
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/npyio.pyi b/venv/lib/python3.9/site-packages/numpy/lib/npyio.pyi
new file mode 100644
index 00000000..8007b2dc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/npyio.pyi
@@ -0,0 +1,327 @@
+import os
+import sys
+import zipfile
+import types
+from re import Pattern
+from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable
+from typing import (
+ Literal as L,
+ Any,
+ TypeVar,
+ Generic,
+ IO,
+ overload,
+ Protocol,
+)
+
+from numpy import (
+ DataSource as DataSource,
+ ndarray,
+ recarray,
+ dtype,
+ generic,
+ float64,
+ void,
+ record,
+)
+
+from numpy.ma.mrecords import MaskedRecords
+from numpy._typing import (
+ ArrayLike,
+ DTypeLike,
+ NDArray,
+ _DTypeLike,
+ _SupportsArrayFunc,
+)
+
+from numpy.core.multiarray import (
+ packbits as packbits,
+ unpackbits as unpackbits,
+)
+
+_T = TypeVar("_T")
+_T_contra = TypeVar("_T_contra", contravariant=True)
+_T_co = TypeVar("_T_co", covariant=True)
+_SCT = TypeVar("_SCT", bound=generic)
+_CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True)
+_CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True)
+
+class _SupportsGetItem(Protocol[_T_contra, _T_co]):
+ def __getitem__(self, key: _T_contra, /) -> _T_co: ...
+
+class _SupportsRead(Protocol[_CharType_co]):
+ def read(self) -> _CharType_co: ...
+
+class _SupportsReadSeek(Protocol[_CharType_co]):
+ def read(self, n: int, /) -> _CharType_co: ...
+ def seek(self, offset: int, whence: int, /) -> object: ...
+
+class _SupportsWrite(Protocol[_CharType_contra]):
+ def write(self, s: _CharType_contra, /) -> object: ...
+
+__all__: list[str]
+
+class BagObj(Generic[_T_co]):
+ def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ...
+ def __getattribute__(self, key: str) -> _T_co: ...
+ def __dir__(self) -> list[str]: ...
+
+class NpzFile(Mapping[str, NDArray[Any]]):
+ zip: zipfile.ZipFile
+ fid: None | IO[str]
+ files: list[str]
+ allow_pickle: bool
+ pickle_kwargs: None | Mapping[str, Any]
+ # Represent `f` as a mutable property so we can access the type of `self`
+ @property
+ def f(self: _T) -> BagObj[_T]: ...
+ @f.setter
+ def f(self: _T, value: BagObj[_T]) -> None: ...
+ def __init__(
+ self,
+ fid: IO[str],
+ own_fid: bool = ...,
+ allow_pickle: bool = ...,
+ pickle_kwargs: None | Mapping[str, Any] = ...,
+ ) -> None: ...
+ def __enter__(self: _T) -> _T: ...
+ def __exit__(
+ self,
+ exc_type: None | type[BaseException],
+ exc_value: None | BaseException,
+ traceback: None | types.TracebackType,
+ /,
+ ) -> None: ...
+ def close(self) -> None: ...
+ def __del__(self) -> None: ...
+ def __iter__(self) -> Iterator[str]: ...
+ def __len__(self) -> int: ...
+ def __getitem__(self, key: str) -> NDArray[Any]: ...
+
+# NOTE: Returns a `NpzFile` if file is a zip file;
+# returns an `ndarray`/`memmap` otherwise
+def load(
+ file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes],
+ mmap_mode: L[None, "r+", "r", "w+", "c"] = ...,
+ allow_pickle: bool = ...,
+ fix_imports: bool = ...,
+ encoding: L["ASCII", "latin1", "bytes"] = ...,
+) -> Any: ...
+
+def save(
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
+ arr: ArrayLike,
+ allow_pickle: bool = ...,
+ fix_imports: bool = ...,
+) -> None: ...
+
+def savez(
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
+ *args: ArrayLike,
+ **kwds: ArrayLike,
+) -> None: ...
+
+def savez_compressed(
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
+ *args: ArrayLike,
+ **kwds: ArrayLike,
+) -> None: ...
+
+# File-like objects only have to implement `__iter__` and,
+# optionally, `encoding`
+@overload
+def loadtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: None = ...,
+ comments: None | str | Sequence[str] = ...,
+ delimiter: None | str = ...,
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+ skiprows: int = ...,
+ usecols: int | Sequence[int] = ...,
+ unpack: bool = ...,
+ ndmin: L[0, 1, 2] = ...,
+ encoding: None | str = ...,
+ max_rows: None | int = ...,
+ *,
+ quotechar: None | str = ...,
+ like: None | _SupportsArrayFunc = ...
+) -> NDArray[float64]: ...
+@overload
+def loadtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: _DTypeLike[_SCT],
+ comments: None | str | Sequence[str] = ...,
+ delimiter: None | str = ...,
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+ skiprows: int = ...,
+ usecols: int | Sequence[int] = ...,
+ unpack: bool = ...,
+ ndmin: L[0, 1, 2] = ...,
+ encoding: None | str = ...,
+ max_rows: None | int = ...,
+ *,
+ quotechar: None | str = ...,
+ like: None | _SupportsArrayFunc = ...
+) -> NDArray[_SCT]: ...
+@overload
+def loadtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: DTypeLike,
+ comments: None | str | Sequence[str] = ...,
+ delimiter: None | str = ...,
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+ skiprows: int = ...,
+ usecols: int | Sequence[int] = ...,
+ unpack: bool = ...,
+ ndmin: L[0, 1, 2] = ...,
+ encoding: None | str = ...,
+ max_rows: None | int = ...,
+ *,
+ quotechar: None | str = ...,
+ like: None | _SupportsArrayFunc = ...
+) -> NDArray[Any]: ...
+
+def savetxt(
+ fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes],
+ X: ArrayLike,
+ fmt: str | Sequence[str] = ...,
+ delimiter: str = ...,
+ newline: str = ...,
+ header: str = ...,
+ footer: str = ...,
+ comments: str = ...,
+ encoding: None | str = ...,
+) -> None: ...
+
+@overload
+def fromregex(
+ file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
+ regexp: str | bytes | Pattern[Any],
+ dtype: _DTypeLike[_SCT],
+ encoding: None | str = ...
+) -> NDArray[_SCT]: ...
+@overload
+def fromregex(
+ file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
+ regexp: str | bytes | Pattern[Any],
+ dtype: DTypeLike,
+ encoding: None | str = ...
+) -> NDArray[Any]: ...
+
+@overload
+def genfromtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: None = ...,
+ comments: str = ...,
+ delimiter: None | str | int | Iterable[int] = ...,
+ skip_header: int = ...,
+ skip_footer: int = ...,
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+ missing_values: Any = ...,
+ filling_values: Any = ...,
+ usecols: None | Sequence[int] = ...,
+ names: L[None, True] | str | Collection[str] = ...,
+ excludelist: None | Sequence[str] = ...,
+ deletechars: str = ...,
+ replace_space: str = ...,
+ autostrip: bool = ...,
+ case_sensitive: bool | L['upper', 'lower'] = ...,
+ defaultfmt: str = ...,
+ unpack: None | bool = ...,
+ usemask: bool = ...,
+ loose: bool = ...,
+ invalid_raise: bool = ...,
+ max_rows: None | int = ...,
+ encoding: str = ...,
+ *,
+ ndmin: L[0, 1, 2] = ...,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def genfromtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: _DTypeLike[_SCT],
+ comments: str = ...,
+ delimiter: None | str | int | Iterable[int] = ...,
+ skip_header: int = ...,
+ skip_footer: int = ...,
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+ missing_values: Any = ...,
+ filling_values: Any = ...,
+ usecols: None | Sequence[int] = ...,
+ names: L[None, True] | str | Collection[str] = ...,
+ excludelist: None | Sequence[str] = ...,
+ deletechars: str = ...,
+ replace_space: str = ...,
+ autostrip: bool = ...,
+ case_sensitive: bool | L['upper', 'lower'] = ...,
+ defaultfmt: str = ...,
+ unpack: None | bool = ...,
+ usemask: bool = ...,
+ loose: bool = ...,
+ invalid_raise: bool = ...,
+ max_rows: None | int = ...,
+ encoding: str = ...,
+ *,
+ ndmin: L[0, 1, 2] = ...,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def genfromtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ dtype: DTypeLike,
+ comments: str = ...,
+ delimiter: None | str | int | Iterable[int] = ...,
+ skip_header: int = ...,
+ skip_footer: int = ...,
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+ missing_values: Any = ...,
+ filling_values: Any = ...,
+ usecols: None | Sequence[int] = ...,
+ names: L[None, True] | str | Collection[str] = ...,
+ excludelist: None | Sequence[str] = ...,
+ deletechars: str = ...,
+ replace_space: str = ...,
+ autostrip: bool = ...,
+ case_sensitive: bool | L['upper', 'lower'] = ...,
+ defaultfmt: str = ...,
+ unpack: None | bool = ...,
+ usemask: bool = ...,
+ loose: bool = ...,
+ invalid_raise: bool = ...,
+ max_rows: None | int = ...,
+ encoding: str = ...,
+ *,
+ ndmin: L[0, 1, 2] = ...,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def recfromtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ *,
+ usemask: L[False] = ...,
+ **kwargs: Any,
+) -> recarray[Any, dtype[record]]: ...
+@overload
+def recfromtxt(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ *,
+ usemask: L[True],
+ **kwargs: Any,
+) -> MaskedRecords[Any, dtype[void]]: ...
+
+@overload
+def recfromcsv(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ *,
+ usemask: L[False] = ...,
+ **kwargs: Any,
+) -> recarray[Any, dtype[record]]: ...
+@overload
+def recfromcsv(
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+ *,
+ usemask: L[True],
+ **kwargs: Any,
+) -> MaskedRecords[Any, dtype[void]]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/polynomial.py b/venv/lib/python3.9/site-packages/numpy/lib/polynomial.py
new file mode 100644
index 00000000..6aa70886
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/polynomial.py
@@ -0,0 +1,1452 @@
+"""
+Functions to operate on polynomials.
+
+"""
+__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
+ 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
+ 'polyfit', 'RankWarning']
+
+import functools
+import re
+import warnings
+import numpy.core.numeric as NX
+
+from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
+ ones)
+from numpy.core import overrides
+from numpy.core.overrides import set_module
+from numpy.lib.twodim_base import diag, vander
+from numpy.lib.function_base import trim_zeros
+from numpy.lib.type_check import iscomplex, real, imag, mintypecode
+from numpy.linalg import eigvals, lstsq, inv
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+@set_module('numpy')
+class RankWarning(UserWarning):
+ """
+ Issued by `polyfit` when the Vandermonde matrix is rank deficient.
+
+ For more information, a way to suppress the warning, and an example of
+ `RankWarning` being issued, see `polyfit`.
+
+ """
+ pass
+
+
+def _poly_dispatcher(seq_of_zeros):
+ return seq_of_zeros
+
+
+@array_function_dispatch(_poly_dispatcher)
+def poly(seq_of_zeros):
+ """
+ Find the coefficients of a polynomial with the given sequence of roots.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ Returns the coefficients of the polynomial whose leading coefficient
+ is one for the given sequence of zeros (multiple roots must be included
+ in the sequence as many times as their multiplicity; see Examples).
+ A square matrix (or array, which will be treated as a matrix) can also
+ be given, in which case the coefficients of the characteristic polynomial
+ of the matrix are returned.
+
+ Parameters
+ ----------
+ seq_of_zeros : array_like, shape (N,) or (N, N)
+ A sequence of polynomial roots, or a square array or matrix object.
+
+ Returns
+ -------
+ c : ndarray
+ 1D array of polynomial coefficients from highest to lowest degree:
+
+ ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
+ where c[0] always equals 1.
+
+ Raises
+ ------
+ ValueError
+ If input is the wrong shape (the input must be a 1-D or square
+ 2-D array).
+
+ See Also
+ --------
+ polyval : Compute polynomial values.
+ roots : Return the roots of a polynomial.
+ polyfit : Least squares polynomial fit.
+ poly1d : A one-dimensional polynomial class.
+
+ Notes
+ -----
+ Specifying the roots of a polynomial still leaves one degree of
+ freedom, typically represented by an undetermined leading
+ coefficient. [1]_ In the case of this function, that coefficient -
+ the first one in the returned array - is always taken as one. (If
+ for some reason you have one other point, the only automatic way
+ presently to leverage that information is to use ``polyfit``.)
+
+ The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
+ matrix **A** is given by
+
+ :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
+
+ where **I** is the `n`-by-`n` identity matrix. [2]_
+
+ References
+ ----------
+ .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
+ Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
+
+ .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
+ Academic Press, pg. 182, 1980.
+
+ Examples
+ --------
+ Given a sequence of a polynomial's zeros:
+
+ >>> np.poly((0, 0, 0)) # Multiple root example
+ array([1., 0., 0., 0.])
+
+ The line above represents z**3 + 0*z**2 + 0*z + 0.
+
+ >>> np.poly((-1./2, 0, 1./2))
+ array([ 1. , 0. , -0.25, 0. ])
+
+ The line above represents z**3 - z/4
+
+ >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))
+ array([ 1. , -0.77086955, 0.08618131, 0. ]) # random
+
+ Given a square array object:
+
+ >>> P = np.array([[0, 1./3], [-1./2, 0]])
+ >>> np.poly(P)
+ array([1. , 0. , 0.16666667])
+
+ Note how in all cases the leading coefficient is always 1.
+
+ """
+ seq_of_zeros = atleast_1d(seq_of_zeros)
+ sh = seq_of_zeros.shape
+
+ if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
+ seq_of_zeros = eigvals(seq_of_zeros)
+ elif len(sh) == 1:
+ dt = seq_of_zeros.dtype
+ # Let object arrays slip through, e.g. for arbitrary precision
+ if dt != object:
+ seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
+ else:
+ raise ValueError("input must be 1d or non-empty square 2d array.")
+
+ if len(seq_of_zeros) == 0:
+ return 1.0
+ dt = seq_of_zeros.dtype
+ a = ones((1,), dtype=dt)
+ for zero in seq_of_zeros:
+ a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full')
+
+ if issubclass(a.dtype.type, NX.complexfloating):
+ # if complex roots are all complex conjugates, the roots are real.
+ roots = NX.asarray(seq_of_zeros, complex)
+ if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
+ a = a.real.copy()
+
+ return a
+
+
+def _roots_dispatcher(p):
+ return p
+
+
+@array_function_dispatch(_roots_dispatcher)
+def roots(p):
+ """
+ Return the roots of a polynomial with coefficients given in p.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ The values in the rank-1 array `p` are coefficients of a polynomial.
+ If the length of `p` is n+1 then the polynomial is described by::
+
+ p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
+
+ Parameters
+ ----------
+ p : array_like
+ Rank-1 array of polynomial coefficients.
+
+ Returns
+ -------
+ out : ndarray
+ An array containing the roots of the polynomial.
+
+ Raises
+ ------
+ ValueError
+ When `p` cannot be converted to a rank-1 array.
+
+ See also
+ --------
+ poly : Find the coefficients of a polynomial with a given sequence
+ of roots.
+ polyval : Compute polynomial values.
+ polyfit : Least squares polynomial fit.
+ poly1d : A one-dimensional polynomial class.
+
+ Notes
+ -----
+ The algorithm relies on computing the eigenvalues of the
+ companion matrix [1]_.
+
+ References
+ ----------
+ .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
+ Cambridge University Press, 1999, pp. 146-7.
+
+ Examples
+ --------
+ >>> coeff = [3.2, 2, 1]
+ >>> np.roots(coeff)
+ array([-0.3125+0.46351241j, -0.3125-0.46351241j])
+
+ """
+ # If input is scalar, this makes it an array
+ p = atleast_1d(p)
+ if p.ndim != 1:
+ raise ValueError("Input must be a rank-1 array.")
+
+ # find non-zero array entries
+ non_zero = NX.nonzero(NX.ravel(p))[0]
+
+ # Return an empty array if polynomial is all zeros
+ if len(non_zero) == 0:
+ return NX.array([])
+
+ # find the number of trailing zeros -- this is the number of roots at 0.
+ trailing_zeros = len(p) - non_zero[-1] - 1
+
+ # strip leading and trailing zeros
+ p = p[int(non_zero[0]):int(non_zero[-1])+1]
+
+ # casting: if incoming array isn't floating point, make it floating point.
+ if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
+ p = p.astype(float)
+
+ N = len(p)
+ if N > 1:
+ # build companion matrix and find its eigenvalues (the roots)
+ A = diag(NX.ones((N-2,), p.dtype), -1)
+ A[0,:] = -p[1:] / p[0]
+ roots = eigvals(A)
+ else:
+ roots = NX.array([])
+
+ # tack any zeros onto the back of the array
+ roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
+ return roots
+
+
+def _polyint_dispatcher(p, m=None, k=None):
+ return (p,)
+
+
+@array_function_dispatch(_polyint_dispatcher)
+def polyint(p, m=1, k=None):
+ """
+ Return an antiderivative (indefinite integral) of a polynomial.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ The returned order `m` antiderivative `P` of polynomial `p` satisfies
+ :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
+ integration constants `k`. The constants determine the low-order
+ polynomial part
+
+ .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
+
+ of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
+
+ Parameters
+ ----------
+ p : array_like or poly1d
+ Polynomial to integrate.
+ A sequence is interpreted as polynomial coefficients, see `poly1d`.
+ m : int, optional
+ Order of the antiderivative. (Default: 1)
+ k : list of `m` scalars or scalar, optional
+ Integration constants. They are given in the order of integration:
+ those corresponding to highest-order terms come first.
+
+ If ``None`` (default), all constants are assumed to be zero.
+ If `m = 1`, a single scalar can be given instead of a list.
+
+ See Also
+ --------
+ polyder : derivative of a polynomial
+ poly1d.integ : equivalent method
+
+ Examples
+ --------
+ The defining property of the antiderivative:
+
+ >>> p = np.poly1d([1,1,1])
+ >>> P = np.polyint(p)
+ >>> P
+ poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary
+ >>> np.polyder(P) == p
+ True
+
+ The integration constants default to zero, but can be specified:
+
+ >>> P = np.polyint(p, 3)
+ >>> P(0)
+ 0.0
+ >>> np.polyder(P)(0)
+ 0.0
+ >>> np.polyder(P, 2)(0)
+ 0.0
+ >>> P = np.polyint(p, 3, k=[6,5,3])
+ >>> P
+ poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary
+
+ Note that 3 = 6 / 2!, and that the constants are given in the order of
+ integrations. Constant of the highest-order polynomial term comes first:
+
+ >>> np.polyder(P, 2)(0)
+ 6.0
+ >>> np.polyder(P, 1)(0)
+ 5.0
+ >>> P(0)
+ 3.0
+
+ """
+ m = int(m)
+ if m < 0:
+ raise ValueError("Order of integral must be positive (see polyder)")
+ if k is None:
+ k = NX.zeros(m, float)
+ k = atleast_1d(k)
+ if len(k) == 1 and m > 1:
+ k = k[0]*NX.ones(m, float)
+ if len(k) < m:
+ raise ValueError(
+ "k must be a scalar or a rank-1 array of length 1 or >m.")
+
+ truepoly = isinstance(p, poly1d)
+ p = NX.asarray(p)
+ if m == 0:
+ if truepoly:
+ return poly1d(p)
+ return p
+ else:
+ # Note: this must work also with object and integer arrays
+ y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
+ val = polyint(y, m - 1, k=k[1:])
+ if truepoly:
+ return poly1d(val)
+ return val
+
+
+def _polyder_dispatcher(p, m=None):
+ return (p,)
+
+
+@array_function_dispatch(_polyder_dispatcher)
+def polyder(p, m=1):
+ """
+ Return the derivative of the specified order of a polynomial.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ Parameters
+ ----------
+ p : poly1d or sequence
+ Polynomial to differentiate.
+ A sequence is interpreted as polynomial coefficients, see `poly1d`.
+ m : int, optional
+ Order of differentiation (default: 1)
+
+ Returns
+ -------
+ der : poly1d
+ A new polynomial representing the derivative.
+
+ See Also
+ --------
+ polyint : Anti-derivative of a polynomial.
+ poly1d : Class for one-dimensional polynomials.
+
+ Examples
+ --------
+ The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
+
+ >>> p = np.poly1d([1,1,1,1])
+ >>> p2 = np.polyder(p)
+ >>> p2
+ poly1d([3, 2, 1])
+
+ which evaluates to:
+
+ >>> p2(2.)
+ 17.0
+
+ We can verify this, approximating the derivative with
+ ``(f(x + h) - f(x))/h``:
+
+ >>> (p(2. + 0.001) - p(2.)) / 0.001
+ 17.007000999997857
+
+ The fourth-order derivative of a 3rd-order polynomial is zero:
+
+ >>> np.polyder(p, 2)
+ poly1d([6, 2])
+ >>> np.polyder(p, 3)
+ poly1d([6])
+ >>> np.polyder(p, 4)
+ poly1d([0])
+
+ """
+ m = int(m)
+ if m < 0:
+ raise ValueError("Order of derivative must be positive (see polyint)")
+
+ truepoly = isinstance(p, poly1d)
+ p = NX.asarray(p)
+ n = len(p) - 1
+ y = p[:-1] * NX.arange(n, 0, -1)
+ if m == 0:
+ val = p
+ else:
+ val = polyder(y, m - 1)
+ if truepoly:
+ val = poly1d(val)
+ return val
+
+
+def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
+ return (x, y, w)
+
+
+@array_function_dispatch(_polyfit_dispatcher)
+def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
+ """
+ Least squares polynomial fit.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
+ to points `(x, y)`. Returns a vector of coefficients `p` that minimises
+ the squared error in the order `deg`, `deg-1`, ... `0`.
+
+ The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
+ method is recommended for new code as it is more stable numerically. See
+ the documentation of the method for more information.
+
+ Parameters
+ ----------
+ x : array_like, shape (M,)
+ x-coordinates of the M sample points ``(x[i], y[i])``.
+ y : array_like, shape (M,) or (M, K)
+ y-coordinates of the sample points. Several data sets of sample
+ points sharing the same x-coordinates can be fitted at once by
+ passing in a 2D-array that contains one dataset per column.
+ deg : int
+ Degree of the fitting polynomial
+ rcond : float, optional
+ Relative condition number of the fit. Singular values smaller than
+ this relative to the largest singular value will be ignored. The
+ default value is len(x)*eps, where eps is the relative precision of
+ the float type, about 2e-16 in most cases.
+ full : bool, optional
+ Switch determining nature of return value. When it is False (the
+ default) just the coefficients are returned, when True diagnostic
+ information from the singular value decomposition is also returned.
+ w : array_like, shape (M,), optional
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
+ cov : bool or str, optional
+ If given and not `False`, return not just the estimate but also its
+ covariance matrix. By default, the covariance are scaled by
+ chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed
+ to be unreliable except in a relative sense and everything is scaled
+ such that the reduced chi2 is unity. This scaling is omitted if
+ ``cov='unscaled'``, as is relevant for the case that the weights are
+ w = 1/sigma, with sigma known to be a reliable estimate of the
+ uncertainty.
+
+ Returns
+ -------
+ p : ndarray, shape (deg + 1,) or (deg + 1, K)
+ Polynomial coefficients, highest power first. If `y` was 2-D, the
+ coefficients for `k`-th data set are in ``p[:,k]``.
+
+ residuals, rank, singular_values, rcond
+ These values are only returned if ``full == True``
+
+ - residuals -- sum of squared residuals of the least squares fit
+ - rank -- the effective rank of the scaled Vandermonde
+ coefficient matrix
+ - singular_values -- singular values of the scaled Vandermonde
+ coefficient matrix
+ - rcond -- value of `rcond`.
+
+ For more details, see `numpy.linalg.lstsq`.
+
+ V : ndarray, shape (M,M) or (M,M,K)
+ Present only if ``full == False`` and ``cov == True``. The covariance
+ matrix of the polynomial coefficient estimates. The diagonal of
+ this matrix are the variance estimates for each coefficient. If y
+ is a 2-D array, then the covariance matrix for the `k`-th data set
+ are in ``V[:,:,k]``
+
+
+ Warns
+ -----
+ RankWarning
+ The rank of the coefficient matrix in the least-squares fit is
+ deficient. The warning is only raised if ``full == False``.
+
+ The warnings can be turned off by
+
+ >>> import warnings
+ >>> warnings.simplefilter('ignore', np.RankWarning)
+
+ See Also
+ --------
+ polyval : Compute polynomial values.
+ linalg.lstsq : Computes a least-squares fit.
+ scipy.interpolate.UnivariateSpline : Computes spline fits.
+
+ Notes
+ -----
+ The solution minimizes the squared error
+
+ .. math::
+ E = \\sum_{j=0}^k |p(x_j) - y_j|^2
+
+ in the equations::
+
+ x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
+ x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
+ ...
+ x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
+
+ The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
+
+ `polyfit` issues a `RankWarning` when the least-squares fit is badly
+ conditioned. This implies that the best fit is not well-defined due
+ to numerical error. The results may be improved by lowering the polynomial
+ degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
+ can also be set to a value smaller than its default, but the resulting
+ fit may be spurious: including contributions from the small singular
+ values can add numerical noise to the result.
+
+ Note that fitting polynomial coefficients is inherently badly conditioned
+ when the degree of the polynomial is large or the interval of sample points
+ is badly centered. The quality of the fit should always be checked in these
+ cases. When polynomial fits are not satisfactory, splines may be a good
+ alternative.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Curve fitting",
+ https://en.wikipedia.org/wiki/Curve_fitting
+ .. [2] Wikipedia, "Polynomial interpolation",
+ https://en.wikipedia.org/wiki/Polynomial_interpolation
+
+ Examples
+ --------
+ >>> import warnings
+ >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
+ >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
+ >>> z = np.polyfit(x, y, 3)
+ >>> z
+ array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary
+
+ It is convenient to use `poly1d` objects for dealing with polynomials:
+
+ >>> p = np.poly1d(z)
+ >>> p(0.5)
+ 0.6143849206349179 # may vary
+ >>> p(3.5)
+ -0.34732142857143039 # may vary
+ >>> p(10)
+ 22.579365079365115 # may vary
+
+ High-order polynomials may oscillate wildly:
+
+ >>> with warnings.catch_warnings():
+ ... warnings.simplefilter('ignore', np.RankWarning)
+ ... p30 = np.poly1d(np.polyfit(x, y, 30))
+ ...
+ >>> p30(4)
+ -0.80000000000000204 # may vary
+ >>> p30(5)
+ -0.99999999999999445 # may vary
+ >>> p30(4.5)
+ -0.10547061179440398 # may vary
+
+ Illustration:
+
+ >>> import matplotlib.pyplot as plt
+ >>> xp = np.linspace(-2, 6, 100)
+ >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
+ >>> plt.ylim(-2,2)
+ (-2, 2)
+ >>> plt.show()
+
+ """
+ order = int(deg) + 1
+ x = NX.asarray(x) + 0.0
+ y = NX.asarray(y) + 0.0
+
+ # check arguments.
+ if deg < 0:
+ raise ValueError("expected deg >= 0")
+ if x.ndim != 1:
+ raise TypeError("expected 1D vector for x")
+ if x.size == 0:
+ raise TypeError("expected non-empty vector for x")
+ if y.ndim < 1 or y.ndim > 2:
+ raise TypeError("expected 1D or 2D array for y")
+ if x.shape[0] != y.shape[0]:
+ raise TypeError("expected x and y to have same length")
+
+ # set rcond
+ if rcond is None:
+ rcond = len(x)*finfo(x.dtype).eps
+
+ # set up least squares equation for powers of x
+ lhs = vander(x, order)
+ rhs = y
+
+ # apply weighting
+ if w is not None:
+ w = NX.asarray(w) + 0.0
+ if w.ndim != 1:
+ raise TypeError("expected a 1-d array for weights")
+ if w.shape[0] != y.shape[0]:
+ raise TypeError("expected w and y to have the same length")
+ lhs *= w[:, NX.newaxis]
+ if rhs.ndim == 2:
+ rhs *= w[:, NX.newaxis]
+ else:
+ rhs *= w
+
+ # scale lhs to improve condition number and solve
+ scale = NX.sqrt((lhs*lhs).sum(axis=0))
+ lhs /= scale
+ c, resids, rank, s = lstsq(lhs, rhs, rcond)
+ c = (c.T/scale).T # broadcast scale coefficients
+
+ # warn on rank reduction, which indicates an ill conditioned matrix
+ if rank != order and not full:
+ msg = "Polyfit may be poorly conditioned"
+ warnings.warn(msg, RankWarning, stacklevel=4)
+
+ if full:
+ return c, resids, rank, s, rcond
+ elif cov:
+ Vbase = inv(dot(lhs.T, lhs))
+ Vbase /= NX.outer(scale, scale)
+ if cov == "unscaled":
+ fac = 1
+ else:
+ if len(x) <= order:
+ raise ValueError("the number of data points must exceed order "
+ "to scale the covariance matrix")
+ # note, this used to be: fac = resids / (len(x) - order - 2.0)
+ # it was deciced that the "- 2" (originally justified by "Bayesian
+ # uncertainty analysis") is not what the user expects
+ # (see gh-11196 and gh-11197)
+ fac = resids / (len(x) - order)
+ if y.ndim == 1:
+ return c, Vbase * fac
+ else:
+ return c, Vbase[:,:, NX.newaxis] * fac
+ else:
+ return c
+
+
+def _polyval_dispatcher(p, x):
+ return (p, x)
+
+
+@array_function_dispatch(_polyval_dispatcher)
+def polyval(p, x):
+ """
+ Evaluate a polynomial at specific values.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ If `p` is of length N, this function returns the value:
+
+ ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
+
+ If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``.
+ If `x` is another polynomial then the composite polynomial ``p(x(t))``
+ is returned.
+
+ Parameters
+ ----------
+ p : array_like or poly1d object
+ 1D array of polynomial coefficients (including coefficients equal
+ to zero) from highest degree to the constant term, or an
+ instance of poly1d.
+ x : array_like or poly1d object
+ A number, an array of numbers, or an instance of poly1d, at
+ which to evaluate `p`.
+
+ Returns
+ -------
+ values : ndarray or poly1d
+ If `x` is a poly1d instance, the result is the composition of the two
+ polynomials, i.e., `x` is "substituted" in `p` and the simplified
+ result is returned. In addition, the type of `x` - array_like or
+ poly1d - governs the type of the output: `x` array_like => `values`
+ array_like, `x` a poly1d object => `values` is also.
+
+ See Also
+ --------
+ poly1d: A polynomial class.
+
+ Notes
+ -----
+ Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
+ for polynomials of high degree the values may be inaccurate due to
+ rounding errors. Use carefully.
+
+ If `x` is a subtype of `ndarray` the return value will be of the same type.
+
+ References
+ ----------
+ .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
+ trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
+ Reinhold Co., 1985, pg. 720.
+
+ Examples
+ --------
+ >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
+ 76
+ >>> np.polyval([3,0,1], np.poly1d(5))
+ poly1d([76])
+ >>> np.polyval(np.poly1d([3,0,1]), 5)
+ 76
+ >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
+ poly1d([76])
+
+ """
+ p = NX.asarray(p)
+ if isinstance(x, poly1d):
+ y = 0
+ else:
+ x = NX.asanyarray(x)
+ y = NX.zeros_like(x)
+ for pv in p:
+ y = y * x + pv
+ return y
+
+
+def _binary_op_dispatcher(a1, a2):
+ return (a1, a2)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def polyadd(a1, a2):
+ """
+ Find the sum of two polynomials.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ Returns the polynomial resulting from the sum of two input polynomials.
+ Each input must be either a poly1d object or a 1D sequence of polynomial
+ coefficients, from highest to lowest degree.
+
+ Parameters
+ ----------
+ a1, a2 : array_like or poly1d object
+ Input polynomials.
+
+ Returns
+ -------
+ out : ndarray or poly1d object
+ The sum of the inputs. If either input is a poly1d object, then the
+ output is also a poly1d object. Otherwise, it is a 1D array of
+ polynomial coefficients from highest to lowest degree.
+
+ See Also
+ --------
+ poly1d : A one-dimensional polynomial class.
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
+
+ Examples
+ --------
+ >>> np.polyadd([1, 2], [9, 5, 4])
+ array([9, 6, 6])
+
+ Using poly1d objects:
+
+ >>> p1 = np.poly1d([1, 2])
+ >>> p2 = np.poly1d([9, 5, 4])
+ >>> print(p1)
+ 1 x + 2
+ >>> print(p2)
+ 2
+ 9 x + 5 x + 4
+ >>> print(np.polyadd(p1, p2))
+ 2
+ 9 x + 6 x + 6
+
+ """
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
+ a1 = atleast_1d(a1)
+ a2 = atleast_1d(a2)
+ diff = len(a2) - len(a1)
+ if diff == 0:
+ val = a1 + a2
+ elif diff > 0:
+ zr = NX.zeros(diff, a1.dtype)
+ val = NX.concatenate((zr, a1)) + a2
+ else:
+ zr = NX.zeros(abs(diff), a2.dtype)
+ val = a1 + NX.concatenate((zr, a2))
+ if truepoly:
+ val = poly1d(val)
+ return val
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def polysub(a1, a2):
+ """
+ Difference (subtraction) of two polynomials.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
+ `a1` and `a2` can be either array_like sequences of the polynomials'
+ coefficients (including coefficients equal to zero), or `poly1d` objects.
+
+ Parameters
+ ----------
+ a1, a2 : array_like or poly1d
+ Minuend and subtrahend polynomials, respectively.
+
+ Returns
+ -------
+ out : ndarray or poly1d
+ Array or `poly1d` object of the difference polynomial's coefficients.
+
+ See Also
+ --------
+ polyval, polydiv, polymul, polyadd
+
+ Examples
+ --------
+ .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
+
+ >>> np.polysub([2, 10, -2], [3, 10, -4])
+ array([-1, 0, 2])
+
+ """
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
+ a1 = atleast_1d(a1)
+ a2 = atleast_1d(a2)
+ diff = len(a2) - len(a1)
+ if diff == 0:
+ val = a1 - a2
+ elif diff > 0:
+ zr = NX.zeros(diff, a1.dtype)
+ val = NX.concatenate((zr, a1)) - a2
+ else:
+ zr = NX.zeros(abs(diff), a2.dtype)
+ val = a1 - NX.concatenate((zr, a2))
+ if truepoly:
+ val = poly1d(val)
+ return val
+
+
+@array_function_dispatch(_binary_op_dispatcher)
+def polymul(a1, a2):
+ """
+ Find the product of two polynomials.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ Finds the polynomial resulting from the multiplication of the two input
+ polynomials. Each input must be either a poly1d object or a 1D sequence
+ of polynomial coefficients, from highest to lowest degree.
+
+ Parameters
+ ----------
+ a1, a2 : array_like or poly1d object
+ Input polynomials.
+
+ Returns
+ -------
+ out : ndarray or poly1d object
+ The polynomial resulting from the multiplication of the inputs. If
+ either inputs is a poly1d object, then the output is also a poly1d
+ object. Otherwise, it is a 1D array of polynomial coefficients from
+ highest to lowest degree.
+
+ See Also
+ --------
+ poly1d : A one-dimensional polynomial class.
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
+ convolve : Array convolution. Same output as polymul, but has parameter
+ for overlap mode.
+
+ Examples
+ --------
+ >>> np.polymul([1, 2, 3], [9, 5, 1])
+ array([ 9, 23, 38, 17, 3])
+
+ Using poly1d objects:
+
+ >>> p1 = np.poly1d([1, 2, 3])
+ >>> p2 = np.poly1d([9, 5, 1])
+ >>> print(p1)
+ 2
+ 1 x + 2 x + 3
+ >>> print(p2)
+ 2
+ 9 x + 5 x + 1
+ >>> print(np.polymul(p1, p2))
+ 4 3 2
+ 9 x + 23 x + 38 x + 17 x + 3
+
+ """
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
+ a1, a2 = poly1d(a1), poly1d(a2)
+ val = NX.convolve(a1, a2)
+ if truepoly:
+ val = poly1d(val)
+ return val
+
+
+def _polydiv_dispatcher(u, v):
+ return (u, v)
+
+
+@array_function_dispatch(_polydiv_dispatcher)
+def polydiv(u, v):
+ """
+ Returns the quotient and remainder of polynomial division.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ The input arrays are the coefficients (including any coefficients
+ equal to zero) of the "numerator" (dividend) and "denominator"
+ (divisor) polynomials, respectively.
+
+ Parameters
+ ----------
+ u : array_like or poly1d
+ Dividend polynomial's coefficients.
+
+ v : array_like or poly1d
+ Divisor polynomial's coefficients.
+
+ Returns
+ -------
+ q : ndarray
+ Coefficients, including those equal to zero, of the quotient.
+ r : ndarray
+ Coefficients, including those equal to zero, of the remainder.
+
+ See Also
+ --------
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub
+ polyval
+
+ Notes
+ -----
+ Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
+ not equal `v.ndim`. In other words, all four possible combinations -
+ ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
+ ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
+
+ Examples
+ --------
+ .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
+
+ >>> x = np.array([3.0, 5.0, 2.0])
+ >>> y = np.array([2.0, 1.0])
+ >>> np.polydiv(x, y)
+ (array([1.5 , 1.75]), array([0.25]))
+
+ """
+ truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d))
+ u = atleast_1d(u) + 0.0
+ v = atleast_1d(v) + 0.0
+ # w has the common type
+ w = u[0] + v[0]
+ m = len(u) - 1
+ n = len(v) - 1
+ scale = 1. / v[0]
+ q = NX.zeros((max(m - n + 1, 1),), w.dtype)
+ r = u.astype(w.dtype)
+ for k in range(0, m-n+1):
+ d = scale * r[k]
+ q[k] = d
+ r[k:k+n+1] -= d*v
+ while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
+ r = r[1:]
+ if truepoly:
+ return poly1d(q), poly1d(r)
+ return q, r
+
+_poly_mat = re.compile(r"\*\*([0-9]*)")
+def _raise_power(astr, wrap=70):
+ n = 0
+ line1 = ''
+ line2 = ''
+ output = ' '
+ while True:
+ mat = _poly_mat.search(astr, n)
+ if mat is None:
+ break
+ span = mat.span()
+ power = mat.groups()[0]
+ partstr = astr[n:span[0]]
+ n = span[1]
+ toadd2 = partstr + ' '*(len(power)-1)
+ toadd1 = ' '*(len(partstr)-1) + power
+ if ((len(line2) + len(toadd2) > wrap) or
+ (len(line1) + len(toadd1) > wrap)):
+ output += line1 + "\n" + line2 + "\n "
+ line1 = toadd1
+ line2 = toadd2
+ else:
+ line2 += partstr + ' '*(len(power)-1)
+ line1 += ' '*(len(partstr)-1) + power
+ output += line1 + "\n" + line2
+ return output + astr[n:]
+
+
+@set_module('numpy')
+class poly1d:
+ """
+ A one-dimensional polynomial class.
+
+ .. note::
+ This forms part of the old polynomial API. Since version 1.4, the
+ new polynomial API defined in `numpy.polynomial` is preferred.
+ A summary of the differences can be found in the
+ :doc:`transition guide </reference/routines.polynomials>`.
+
+ A convenience class, used to encapsulate "natural" operations on
+ polynomials so that said operations may take on their customary
+ form in code (see Examples).
+
+ Parameters
+ ----------
+ c_or_r : array_like
+ The polynomial's coefficients, in decreasing powers, or if
+ the value of the second parameter is True, the polynomial's
+ roots (values where the polynomial evaluates to 0). For example,
+ ``poly1d([1, 2, 3])`` returns an object that represents
+ :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
+ one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
+ r : bool, optional
+ If True, `c_or_r` specifies the polynomial's roots; the default
+ is False.
+ variable : str, optional
+ Changes the variable used when printing `p` from `x` to `variable`
+ (see Examples).
+
+ Examples
+ --------
+ Construct the polynomial :math:`x^2 + 2x + 3`:
+
+ >>> p = np.poly1d([1, 2, 3])
+ >>> print(np.poly1d(p))
+ 2
+ 1 x + 2 x + 3
+
+ Evaluate the polynomial at :math:`x = 0.5`:
+
+ >>> p(0.5)
+ 4.25
+
+ Find the roots:
+
+ >>> p.r
+ array([-1.+1.41421356j, -1.-1.41421356j])
+ >>> p(p.r)
+ array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary
+
+ These numbers in the previous line represent (0, 0) to machine precision
+
+ Show the coefficients:
+
+ >>> p.c
+ array([1, 2, 3])
+
+ Display the order (the leading zero-coefficients are removed):
+
+ >>> p.order
+ 2
+
+ Show the coefficient of the k-th power in the polynomial
+ (which is equivalent to ``p.c[-(i+1)]``):
+
+ >>> p[1]
+ 2
+
+ Polynomials can be added, subtracted, multiplied, and divided
+ (returns quotient and remainder):
+
+ >>> p * p
+ poly1d([ 1, 4, 10, 12, 9])
+
+ >>> (p**3 + 4) / p
+ (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
+
+ ``asarray(p)`` gives the coefficient array, so polynomials can be
+ used in all functions that accept arrays:
+
+ >>> p**2 # square of polynomial
+ poly1d([ 1, 4, 10, 12, 9])
+
+ >>> np.square(p) # square of individual coefficients
+ array([1, 4, 9])
+
+ The variable used in the string representation of `p` can be modified,
+ using the `variable` parameter:
+
+ >>> p = np.poly1d([1,2,3], variable='z')
+ >>> print(p)
+ 2
+ 1 z + 2 z + 3
+
+ Construct a polynomial from its roots:
+
+ >>> np.poly1d([1, 2], True)
+ poly1d([ 1., -3., 2.])
+
+ This is the same polynomial as obtained by:
+
+ >>> np.poly1d([1, -1]) * np.poly1d([1, -2])
+ poly1d([ 1, -3, 2])
+
+ """
+ __hash__ = None
+
+ @property
+ def coeffs(self):
+ """ The polynomial coefficients """
+ return self._coeffs
+
+ @coeffs.setter
+ def coeffs(self, value):
+ # allowing this makes p.coeffs *= 2 legal
+ if value is not self._coeffs:
+ raise AttributeError("Cannot set attribute")
+
+ @property
+ def variable(self):
+ """ The name of the polynomial variable """
+ return self._variable
+
+ # calculated attributes
+ @property
+ def order(self):
+ """ The order or degree of the polynomial """
+ return len(self._coeffs) - 1
+
+ @property
+ def roots(self):
+ """ The roots of the polynomial, where self(x) == 0 """
+ return roots(self._coeffs)
+
+ # our internal _coeffs property need to be backed by __dict__['coeffs'] for
+ # scipy to work correctly.
+ @property
+ def _coeffs(self):
+ return self.__dict__['coeffs']
+ @_coeffs.setter
+ def _coeffs(self, coeffs):
+ self.__dict__['coeffs'] = coeffs
+
+ # alias attributes
+ r = roots
+ c = coef = coefficients = coeffs
+ o = order
+
+ def __init__(self, c_or_r, r=False, variable=None):
+ if isinstance(c_or_r, poly1d):
+ self._variable = c_or_r._variable
+ self._coeffs = c_or_r._coeffs
+
+ if set(c_or_r.__dict__) - set(self.__dict__):
+ msg = ("In the future extra properties will not be copied "
+ "across when constructing one poly1d from another")
+ warnings.warn(msg, FutureWarning, stacklevel=2)
+ self.__dict__.update(c_or_r.__dict__)
+
+ if variable is not None:
+ self._variable = variable
+ return
+ if r:
+ c_or_r = poly(c_or_r)
+ c_or_r = atleast_1d(c_or_r)
+ if c_or_r.ndim > 1:
+ raise ValueError("Polynomial must be 1d only.")
+ c_or_r = trim_zeros(c_or_r, trim='f')
+ if len(c_or_r) == 0:
+ c_or_r = NX.array([0], dtype=c_or_r.dtype)
+ self._coeffs = c_or_r
+ if variable is None:
+ variable = 'x'
+ self._variable = variable
+
+ def __array__(self, t=None):
+ if t:
+ return NX.asarray(self.coeffs, t)
+ else:
+ return NX.asarray(self.coeffs)
+
+ def __repr__(self):
+ vals = repr(self.coeffs)
+ vals = vals[6:-1]
+ return "poly1d(%s)" % vals
+
+ def __len__(self):
+ return self.order
+
+ def __str__(self):
+ thestr = "0"
+ var = self.variable
+
+ # Remove leading zeros
+ coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
+ N = len(coeffs)-1
+
+ def fmt_float(q):
+ s = '%.4g' % q
+ if s.endswith('.0000'):
+ s = s[:-5]
+ return s
+
+ for k, coeff in enumerate(coeffs):
+ if not iscomplex(coeff):
+ coefstr = fmt_float(real(coeff))
+ elif real(coeff) == 0:
+ coefstr = '%sj' % fmt_float(imag(coeff))
+ else:
+ coefstr = '(%s + %sj)' % (fmt_float(real(coeff)),
+ fmt_float(imag(coeff)))
+
+ power = (N-k)
+ if power == 0:
+ if coefstr != '0':
+ newstr = '%s' % (coefstr,)
+ else:
+ if k == 0:
+ newstr = '0'
+ else:
+ newstr = ''
+ elif power == 1:
+ if coefstr == '0':
+ newstr = ''
+ elif coefstr == 'b':
+ newstr = var
+ else:
+ newstr = '%s %s' % (coefstr, var)
+ else:
+ if coefstr == '0':
+ newstr = ''
+ elif coefstr == 'b':
+ newstr = '%s**%d' % (var, power,)
+ else:
+ newstr = '%s %s**%d' % (coefstr, var, power)
+
+ if k > 0:
+ if newstr != '':
+ if newstr.startswith('-'):
+ thestr = "%s - %s" % (thestr, newstr[1:])
+ else:
+ thestr = "%s + %s" % (thestr, newstr)
+ else:
+ thestr = newstr
+ return _raise_power(thestr)
+
+ def __call__(self, val):
+ return polyval(self.coeffs, val)
+
+ def __neg__(self):
+ return poly1d(-self.coeffs)
+
+ def __pos__(self):
+ return self
+
+ def __mul__(self, other):
+ if isscalar(other):
+ return poly1d(self.coeffs * other)
+ else:
+ other = poly1d(other)
+ return poly1d(polymul(self.coeffs, other.coeffs))
+
+ def __rmul__(self, other):
+ if isscalar(other):
+ return poly1d(other * self.coeffs)
+ else:
+ other = poly1d(other)
+ return poly1d(polymul(self.coeffs, other.coeffs))
+
+ def __add__(self, other):
+ other = poly1d(other)
+ return poly1d(polyadd(self.coeffs, other.coeffs))
+
+ def __radd__(self, other):
+ other = poly1d(other)
+ return poly1d(polyadd(self.coeffs, other.coeffs))
+
+ def __pow__(self, val):
+ if not isscalar(val) or int(val) != val or val < 0:
+ raise ValueError("Power to non-negative integers only.")
+ res = [1]
+ for _ in range(val):
+ res = polymul(self.coeffs, res)
+ return poly1d(res)
+
+ def __sub__(self, other):
+ other = poly1d(other)
+ return poly1d(polysub(self.coeffs, other.coeffs))
+
+ def __rsub__(self, other):
+ other = poly1d(other)
+ return poly1d(polysub(other.coeffs, self.coeffs))
+
+ def __div__(self, other):
+ if isscalar(other):
+ return poly1d(self.coeffs/other)
+ else:
+ other = poly1d(other)
+ return polydiv(self, other)
+
+ __truediv__ = __div__
+
+ def __rdiv__(self, other):
+ if isscalar(other):
+ return poly1d(other/self.coeffs)
+ else:
+ other = poly1d(other)
+ return polydiv(other, self)
+
+ __rtruediv__ = __rdiv__
+
+ def __eq__(self, other):
+ if not isinstance(other, poly1d):
+ return NotImplemented
+ if self.coeffs.shape != other.coeffs.shape:
+ return False
+ return (self.coeffs == other.coeffs).all()
+
+ def __ne__(self, other):
+ if not isinstance(other, poly1d):
+ return NotImplemented
+ return not self.__eq__(other)
+
+
+ def __getitem__(self, val):
+ ind = self.order - val
+ if val > self.order:
+ return self.coeffs.dtype.type(0)
+ if val < 0:
+ return self.coeffs.dtype.type(0)
+ return self.coeffs[ind]
+
+ def __setitem__(self, key, val):
+ ind = self.order - key
+ if key < 0:
+ raise ValueError("Does not support negative powers.")
+ if key > self.order:
+ zr = NX.zeros(key-self.order, self.coeffs.dtype)
+ self._coeffs = NX.concatenate((zr, self.coeffs))
+ ind = 0
+ self._coeffs[ind] = val
+ return
+
+ def __iter__(self):
+ return iter(self.coeffs)
+
+ def integ(self, m=1, k=0):
+ """
+ Return an antiderivative (indefinite integral) of this polynomial.
+
+ Refer to `polyint` for full documentation.
+
+ See Also
+ --------
+ polyint : equivalent function
+
+ """
+ return poly1d(polyint(self.coeffs, m=m, k=k))
+
+ def deriv(self, m=1):
+ """
+ Return a derivative of this polynomial.
+
+ Refer to `polyder` for full documentation.
+
+ See Also
+ --------
+ polyder : equivalent function
+
+ """
+ return poly1d(polyder(self.coeffs, m=m))
+
+# Stuff to do on module import
+
+warnings.simplefilter('always', RankWarning)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/polynomial.pyi b/venv/lib/python3.9/site-packages/numpy/lib/polynomial.pyi
new file mode 100644
index 00000000..14bbaf39
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/polynomial.pyi
@@ -0,0 +1,303 @@
+from typing import (
+ Literal as L,
+ overload,
+ Any,
+ SupportsInt,
+ SupportsIndex,
+ TypeVar,
+ NoReturn,
+)
+
+from numpy import (
+ RankWarning as RankWarning,
+ poly1d as poly1d,
+ unsignedinteger,
+ signedinteger,
+ floating,
+ complexfloating,
+ bool_,
+ int32,
+ int64,
+ float64,
+ complex128,
+ object_,
+)
+
+from numpy._typing import (
+ NDArray,
+ ArrayLike,
+ _ArrayLikeBool_co,
+ _ArrayLikeUInt_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeObject_co,
+)
+
+_T = TypeVar("_T")
+
+_2Tup = tuple[_T, _T]
+_5Tup = tuple[
+ _T,
+ NDArray[float64],
+ NDArray[int32],
+ NDArray[float64],
+ NDArray[float64],
+]
+
+__all__: list[str]
+
+def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ...
+
+# Returns either a float or complex array depending on the input values.
+# See `np.linalg.eigvals`.
+def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]: ...
+
+@overload
+def polyint(
+ p: poly1d,
+ m: SupportsInt | SupportsIndex = ...,
+ k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
+) -> poly1d: ...
+@overload
+def polyint(
+ p: _ArrayLikeFloat_co,
+ m: SupportsInt | SupportsIndex = ...,
+ k: None | _ArrayLikeFloat_co = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polyint(
+ p: _ArrayLikeComplex_co,
+ m: SupportsInt | SupportsIndex = ...,
+ k: None | _ArrayLikeComplex_co = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polyint(
+ p: _ArrayLikeObject_co,
+ m: SupportsInt | SupportsIndex = ...,
+ k: None | _ArrayLikeObject_co = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def polyder(
+ p: poly1d,
+ m: SupportsInt | SupportsIndex = ...,
+) -> poly1d: ...
+@overload
+def polyder(
+ p: _ArrayLikeFloat_co,
+ m: SupportsInt | SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polyder(
+ p: _ArrayLikeComplex_co,
+ m: SupportsInt | SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polyder(
+ p: _ArrayLikeObject_co,
+ m: SupportsInt | SupportsIndex = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def polyfit(
+ x: _ArrayLikeFloat_co,
+ y: _ArrayLikeFloat_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: None | float = ...,
+ full: L[False] = ...,
+ w: None | _ArrayLikeFloat_co = ...,
+ cov: L[False] = ...,
+) -> NDArray[float64]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: None | float = ...,
+ full: L[False] = ...,
+ w: None | _ArrayLikeFloat_co = ...,
+ cov: L[False] = ...,
+) -> NDArray[complex128]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeFloat_co,
+ y: _ArrayLikeFloat_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: None | float = ...,
+ full: L[False] = ...,
+ w: None | _ArrayLikeFloat_co = ...,
+ cov: L[True, "unscaled"] = ...,
+) -> _2Tup[NDArray[float64]]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: None | float = ...,
+ full: L[False] = ...,
+ w: None | _ArrayLikeFloat_co = ...,
+ cov: L[True, "unscaled"] = ...,
+) -> _2Tup[NDArray[complex128]]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeFloat_co,
+ y: _ArrayLikeFloat_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: None | float = ...,
+ full: L[True] = ...,
+ w: None | _ArrayLikeFloat_co = ...,
+ cov: bool | L["unscaled"] = ...,
+) -> _5Tup[NDArray[float64]]: ...
+@overload
+def polyfit(
+ x: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co,
+ deg: SupportsIndex | SupportsInt,
+ rcond: None | float = ...,
+ full: L[True] = ...,
+ w: None | _ArrayLikeFloat_co = ...,
+ cov: bool | L["unscaled"] = ...,
+) -> _5Tup[NDArray[complex128]]: ...
+
+@overload
+def polyval(
+ p: _ArrayLikeBool_co,
+ x: _ArrayLikeBool_co,
+) -> NDArray[int64]: ...
+@overload
+def polyval(
+ p: _ArrayLikeUInt_co,
+ x: _ArrayLikeUInt_co,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def polyval(
+ p: _ArrayLikeInt_co,
+ x: _ArrayLikeInt_co,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def polyval(
+ p: _ArrayLikeFloat_co,
+ x: _ArrayLikeFloat_co,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polyval(
+ p: _ArrayLikeComplex_co,
+ x: _ArrayLikeComplex_co,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polyval(
+ p: _ArrayLikeObject_co,
+ x: _ArrayLikeObject_co,
+) -> NDArray[object_]: ...
+
+@overload
+def polyadd(
+ a1: poly1d,
+ a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+) -> poly1d: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ a2: poly1d,
+) -> poly1d: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeBool_co,
+ a2: _ArrayLikeBool_co,
+) -> NDArray[bool_]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeUInt_co,
+ a2: _ArrayLikeUInt_co,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeInt_co,
+ a2: _ArrayLikeInt_co,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeFloat_co,
+ a2: _ArrayLikeFloat_co,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeComplex_co,
+ a2: _ArrayLikeComplex_co,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polyadd(
+ a1: _ArrayLikeObject_co,
+ a2: _ArrayLikeObject_co,
+) -> NDArray[object_]: ...
+
+@overload
+def polysub(
+ a1: poly1d,
+ a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+) -> poly1d: ...
+@overload
+def polysub(
+ a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ a2: poly1d,
+) -> poly1d: ...
+@overload
+def polysub(
+ a1: _ArrayLikeBool_co,
+ a2: _ArrayLikeBool_co,
+) -> NoReturn: ...
+@overload
+def polysub(
+ a1: _ArrayLikeUInt_co,
+ a2: _ArrayLikeUInt_co,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def polysub(
+ a1: _ArrayLikeInt_co,
+ a2: _ArrayLikeInt_co,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def polysub(
+ a1: _ArrayLikeFloat_co,
+ a2: _ArrayLikeFloat_co,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polysub(
+ a1: _ArrayLikeComplex_co,
+ a2: _ArrayLikeComplex_co,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polysub(
+ a1: _ArrayLikeObject_co,
+ a2: _ArrayLikeObject_co,
+) -> NDArray[object_]: ...
+
+# NOTE: Not an alias, but they do have the same signature (that we can reuse)
+polymul = polyadd
+
+@overload
+def polydiv(
+ u: poly1d,
+ v: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+) -> _2Tup[poly1d]: ...
+@overload
+def polydiv(
+ u: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ v: poly1d,
+) -> _2Tup[poly1d]: ...
+@overload
+def polydiv(
+ u: _ArrayLikeFloat_co,
+ v: _ArrayLikeFloat_co,
+) -> _2Tup[NDArray[floating[Any]]]: ...
+@overload
+def polydiv(
+ u: _ArrayLikeComplex_co,
+ v: _ArrayLikeComplex_co,
+) -> _2Tup[NDArray[complexfloating[Any, Any]]]: ...
+@overload
+def polydiv(
+ u: _ArrayLikeObject_co,
+ v: _ArrayLikeObject_co,
+) -> _2Tup[NDArray[Any]]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/recfunctions.py b/venv/lib/python3.9/site-packages/numpy/lib/recfunctions.py
new file mode 100644
index 00000000..6afcf1b7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/recfunctions.py
@@ -0,0 +1,1590 @@
+"""
+Collection of utilities to manipulate structured arrays.
+
+Most of these functions were initially implemented by John Hunter for
+matplotlib. They have been rewritten and extended for convenience.
+
+"""
+import itertools
+import numpy as np
+import numpy.ma as ma
+from numpy import ndarray, recarray
+from numpy.ma import MaskedArray
+from numpy.ma.mrecords import MaskedRecords
+from numpy.core.overrides import array_function_dispatch
+from numpy.lib._iotools import _is_string_like
+
+_check_fill_value = np.ma.core._check_fill_value
+
+
+__all__ = [
+ 'append_fields', 'apply_along_fields', 'assign_fields_by_name',
+ 'drop_fields', 'find_duplicates', 'flatten_descr',
+ 'get_fieldstructure', 'get_names', 'get_names_flat',
+ 'join_by', 'merge_arrays', 'rec_append_fields',
+ 'rec_drop_fields', 'rec_join', 'recursive_fill_fields',
+ 'rename_fields', 'repack_fields', 'require_fields',
+ 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured',
+ ]
+
+
+def _recursive_fill_fields_dispatcher(input, output):
+ return (input, output)
+
+
+@array_function_dispatch(_recursive_fill_fields_dispatcher)
+def recursive_fill_fields(input, output):
+ """
+ Fills fields from output with fields from input,
+ with support for nested structures.
+
+ Parameters
+ ----------
+ input : ndarray
+ Input array.
+ output : ndarray
+ Output array.
+
+ Notes
+ -----
+ * `output` should be at least the same size as `input`
+
+ Examples
+ --------
+ >>> from numpy.lib import recfunctions as rfn
+ >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
+ >>> b = np.zeros((3,), dtype=a.dtype)
+ >>> rfn.recursive_fill_fields(a, b)
+ array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])
+
+ """
+ newdtype = output.dtype
+ for field in newdtype.names:
+ try:
+ current = input[field]
+ except ValueError:
+ continue
+ if current.dtype.names is not None:
+ recursive_fill_fields(current, output[field])
+ else:
+ output[field][:len(current)] = current
+ return output
+
+
+def _get_fieldspec(dtype):
+ """
+ Produce a list of name/dtype pairs corresponding to the dtype fields
+
+ Similar to dtype.descr, but the second item of each tuple is a dtype, not a
+ string. As a result, this handles subarray dtypes
+
+ Can be passed to the dtype constructor to reconstruct the dtype, noting that
+ this (deliberately) discards field offsets.
+
+ Examples
+ --------
+ >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])
+ >>> dt.descr
+ [(('a', 'A'), '<i8'), ('b', '<f8', (3,))]
+ >>> _get_fieldspec(dt)
+ [(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))]
+
+ """
+ if dtype.names is None:
+ # .descr returns a nameless field, so we should too
+ return [('', dtype)]
+ else:
+ fields = ((name, dtype.fields[name]) for name in dtype.names)
+ # keep any titles, if present
+ return [
+ (name if len(f) == 2 else (f[2], name), f[0])
+ for name, f in fields
+ ]
+
+
+def get_names(adtype):
+ """
+ Returns the field names of the input datatype as a tuple. Input datatype
+ must have fields otherwise error is raised.
+
+ Parameters
+ ----------
+ adtype : dtype
+ Input datatype
+
+ Examples
+ --------
+ >>> from numpy.lib import recfunctions as rfn
+ >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype)
+ ('A',)
+ >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype)
+ ('A', 'B')
+ >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
+ >>> rfn.get_names(adtype)
+ ('a', ('b', ('ba', 'bb')))
+ """
+ listnames = []
+ names = adtype.names
+ for name in names:
+ current = adtype[name]
+ if current.names is not None:
+ listnames.append((name, tuple(get_names(current))))
+ else:
+ listnames.append(name)
+ return tuple(listnames)
+
+
+def get_names_flat(adtype):
+ """
+ Returns the field names of the input datatype as a tuple. Input datatype
+ must have fields otherwise error is raised.
+ Nested structure are flattened beforehand.
+
+ Parameters
+ ----------
+ adtype : dtype
+ Input datatype
+
+ Examples
+ --------
+ >>> from numpy.lib import recfunctions as rfn
+ >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None
+ False
+ >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)
+ ('A', 'B')
+ >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
+ >>> rfn.get_names_flat(adtype)
+ ('a', 'b', 'ba', 'bb')
+ """
+ listnames = []
+ names = adtype.names
+ for name in names:
+ listnames.append(name)
+ current = adtype[name]
+ if current.names is not None:
+ listnames.extend(get_names_flat(current))
+ return tuple(listnames)
+
+
+def flatten_descr(ndtype):
+ """
+ Flatten a structured data-type description.
+
+ Examples
+ --------
+ >>> from numpy.lib import recfunctions as rfn
+ >>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
+ >>> rfn.flatten_descr(ndtype)
+ (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
+
+ """
+ names = ndtype.names
+ if names is None:
+ return (('', ndtype),)
+ else:
+ descr = []
+ for field in names:
+ (typ, _) = ndtype.fields[field]
+ if typ.names is not None:
+ descr.extend(flatten_descr(typ))
+ else:
+ descr.append((field, typ))
+ return tuple(descr)
+
+
+def _zip_dtype(seqarrays, flatten=False):
+ newdtype = []
+ if flatten:
+ for a in seqarrays:
+ newdtype.extend(flatten_descr(a.dtype))
+ else:
+ for a in seqarrays:
+ current = a.dtype
+ if current.names is not None and len(current.names) == 1:
+ # special case - dtypes of 1 field are flattened
+ newdtype.extend(_get_fieldspec(current))
+ else:
+ newdtype.append(('', current))
+ return np.dtype(newdtype)
+
+
+def _zip_descr(seqarrays, flatten=False):
+ """
+ Combine the dtype description of a series of arrays.
+
+ Parameters
+ ----------
+ seqarrays : sequence of arrays
+ Sequence of arrays
+ flatten : {boolean}, optional
+ Whether to collapse nested descriptions.
+ """
+ return _zip_dtype(seqarrays, flatten=flatten).descr
+
+
+def get_fieldstructure(adtype, lastname=None, parents=None,):
+ """
+ Returns a dictionary with fields indexing lists of their parent fields.
+
+ This function is used to simplify access to fields nested in other fields.
+
+ Parameters
+ ----------
+ adtype : np.dtype
+ Input datatype
+ lastname : optional
+ Last processed field name (used internally during recursion).
+ parents : dictionary
+ Dictionary of parent fields (used interbally during recursion).
+
+ Examples
+ --------
+ >>> from numpy.lib import recfunctions as rfn
+ >>> ndtype = np.dtype([('A', int),
+ ... ('B', [('BA', int),
+ ... ('BB', [('BBA', int), ('BBB', int)])])])
+ >>> rfn.get_fieldstructure(ndtype)
+ ... # XXX: possible regression, order of BBA and BBB is swapped
+ {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
+
+ """
+ if parents is None:
+ parents = {}
+ names = adtype.names
+ for name in names:
+ current = adtype[name]
+ if current.names is not None:
+ if lastname:
+ parents[name] = [lastname, ]
+ else:
+ parents[name] = []
+ parents.update(get_fieldstructure(current, name, parents))
+ else:
+ lastparent = [_ for _ in (parents.get(lastname, []) or [])]
+ if lastparent:
+ lastparent.append(lastname)
+ elif lastname:
+ lastparent = [lastname, ]
+ parents[name] = lastparent or []
+ return parents
+
+
+def _izip_fields_flat(iterable):
+ """
+ Returns an iterator of concatenated fields from a sequence of arrays,
+ collapsing any nested structure.
+
+ """
+ for element in iterable:
+ if isinstance(element, np.void):
+ yield from _izip_fields_flat(tuple(element))
+ else:
+ yield element
+
+
+def _izip_fields(iterable):
+ """
+ Returns an iterator of concatenated fields from a sequence of arrays.
+
+ """
+ for element in iterable:
+ if (hasattr(element, '__iter__') and
+ not isinstance(element, str)):
+ yield from _izip_fields(element)
+ elif isinstance(element, np.void) and len(tuple(element)) == 1:
+ # this statement is the same from the previous expression
+ yield from _izip_fields(element)
+ else:
+ yield element
+
+
+def _izip_records(seqarrays, fill_value=None, flatten=True):
+ """
+ Returns an iterator of concatenated items from a sequence of arrays.
+
+ Parameters
+ ----------
+ seqarrays : sequence of arrays
+ Sequence of arrays.
+ fill_value : {None, integer}
+ Value used to pad shorter iterables.
+ flatten : {True, False},
+ Whether to
+ """
+
+ # Should we flatten the items, or just use a nested approach
+ if flatten:
+ zipfunc = _izip_fields_flat
+ else:
+ zipfunc = _izip_fields
+
+ for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value):
+ yield tuple(zipfunc(tup))
+
+
+def _fix_output(output, usemask=True, asrecarray=False):
+ """
+ Private function: return a recarray, a ndarray, a MaskedArray
+ or a MaskedRecords depending on the input parameters
+ """
+ if not isinstance(output, MaskedArray):
+ usemask = False
+ if usemask:
+ if asrecarray:
+ output = output.view(MaskedRecords)
+ else:
+ output = ma.filled(output)
+ if asrecarray:
+ output = output.view(recarray)
+ return output
+
+
+def _fix_defaults(output, defaults=None):
+ """
+ Update the fill_value and masked data of `output`
+ from the default given in a dictionary defaults.
+ """
+ names = output.dtype.names
+ (data, mask, fill_value) = (output.data, output.mask, output.fill_value)
+ for (k, v) in (defaults or {}).items():
+ if k in names:
+ fill_value[k] = v
+ data[k][mask[k]] = v
+ return output
+
+
+def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None,
+ usemask=None, asrecarray=None):
+ return seqarrays
+
+
+@array_function_dispatch(_merge_arrays_dispatcher)
+def merge_arrays(seqarrays, fill_value=-1, flatten=False,
+ usemask=False, asrecarray=False):
+ """
+ Merge arrays field by field.
+
+ Parameters
+ ----------
+ seqarrays : sequence of ndarrays
+ Sequence of arrays
+ fill_value : {float}, optional
+ Filling value used to pad missing data on the shorter arrays.
+ flatten : {False, True}, optional
+ Whether to collapse nested fields.
+ usemask : {False, True}, optional
+ Whether to return a masked array or not.
+ asrecarray : {False, True}, optional
+ Whether to return a recarray (MaskedRecords) or not.
+
+ Examples
+ --------
+ >>> from numpy.lib import recfunctions as rfn
+ >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
+ array([( 1, 10.), ( 2, 20.), (-1, 30.)],
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
+
+ >>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64),
+ ... np.array([10., 20., 30.])), usemask=False)
+ array([(1, 10.0), (2, 20.0), (-1, 30.0)],
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
+ >>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]),
+ ... np.array([10., 20., 30.])),
+ ... usemask=False, asrecarray=True)
+ rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)],
+ dtype=[('a', '<i8'), ('f1', '<f8')])
+
+ Notes
+ -----
+ * Without a mask, the missing value will be filled with something,
+ depending on what its corresponding type:
+
+ * ``-1`` for integers
+ * ``-1.0`` for floating point numbers
+ * ``'-'`` for characters
+ * ``'-1'`` for strings
+ * ``True`` for boolean values
+ * XXX: I just obtained these values empirically
+ """
+ # Only one item in the input sequence ?
+ if (len(seqarrays) == 1):
+ seqarrays = np.asanyarray(seqarrays[0])
+ # Do we have a single ndarray as input ?
+ if isinstance(seqarrays, (ndarray, np.void)):
+ seqdtype = seqarrays.dtype
+ # Make sure we have named fields
+ if seqdtype.names is None:
+ seqdtype = np.dtype([('', seqdtype)])
+ if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
+ # Minimal processing needed: just make sure everything's a-ok
+ seqarrays = seqarrays.ravel()
+ # Find what type of array we must return
+ if usemask:
+ if asrecarray:
+ seqtype = MaskedRecords
+ else:
+ seqtype = MaskedArray
+ elif asrecarray:
+ seqtype = recarray
+ else:
+ seqtype = ndarray
+ return seqarrays.view(dtype=seqdtype, type=seqtype)
+ else:
+ seqarrays = (seqarrays,)
+ else:
+ # Make sure we have arrays in the input sequence
+ seqarrays = [np.asanyarray(_m) for _m in seqarrays]
+ # Find the sizes of the inputs and their maximum
+ sizes = tuple(a.size for a in seqarrays)
+ maxlength = max(sizes)
+ # Get the dtype of the output (flattening if needed)
+ newdtype = _zip_dtype(seqarrays, flatten=flatten)
+ # Initialize the sequences for data and mask
+ seqdata = []
+ seqmask = []
+ # If we expect some kind of MaskedArray, make a special loop.
+ if usemask:
+ for (a, n) in zip(seqarrays, sizes):
+ nbmissing = (maxlength - n)
+ # Get the data and mask
+ data = a.ravel().__array__()
+ mask = ma.getmaskarray(a).ravel()
+ # Get the filling value (if needed)
+ if nbmissing:
+ fval = _check_fill_value(fill_value, a.dtype)
+ if isinstance(fval, (ndarray, np.void)):
+ if len(fval.dtype) == 1:
+ fval = fval.item()[0]
+ fmsk = True
+ else:
+ fval = np.array(fval, dtype=a.dtype, ndmin=1)
+ fmsk = np.ones((1,), dtype=mask.dtype)
+ else:
+ fval = None
+ fmsk = True
+ # Store an iterator padding the input to the expected length
+ seqdata.append(itertools.chain(data, [fval] * nbmissing))
+ seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
+ # Create an iterator for the data
+ data = tuple(_izip_records(seqdata, flatten=flatten))
+ output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
+ mask=list(_izip_records(seqmask, flatten=flatten)))
+ if asrecarray:
+ output = output.view(MaskedRecords)
+ else:
+ # Same as before, without the mask we don't need...
+ for (a, n) in zip(seqarrays, sizes):
+ nbmissing = (maxlength - n)
+ data = a.ravel().__array__()
+ if nbmissing:
+ fval = _check_fill_value(fill_value, a.dtype)
+ if isinstance(fval, (ndarray, np.void)):
+ if len(fval.dtype) == 1:
+ fval = fval.item()[0]
+ else:
+ fval = np.array(fval, dtype=a.dtype, ndmin=1)
+ else:
+ fval = None
+ seqdata.append(itertools.chain(data, [fval] * nbmissing))
+ output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)),
+ dtype=newdtype, count=maxlength)
+ if asrecarray:
+ output = output.view(recarray)
+ # And we're done...
+ return output
+
+
+def _drop_fields_dispatcher(base, drop_names, usemask=None, asrecarray=None):
+ return (base,)
+
+
+@array_function_dispatch(_drop_fields_dispatcher)
+def drop_fields(base, drop_names, usemask=True, asrecarray=False):
+ """
+ Return a new array with fields in `drop_names` dropped.
+
+ Nested fields are supported.
+
+ .. versionchanged:: 1.18.0
+ `drop_fields` returns an array with 0 fields if all fields are dropped,
+ rather than returning ``None`` as it did previously.
+
+ Parameters
+ ----------
+ base : array
+ Input array
+ drop_names : string or sequence
+ String or sequence of strings corresponding to the names of the
+ fields to drop.
+ usemask : {False, True}, optional
+ Whether to return a masked array or not.
+ asrecarray : string or sequence, optional
+ Whether to return a recarray or a mrecarray (`asrecarray=True`) or
+ a plain ndarray or masked array with flexible dtype. The default
+ is False.
+
+ Examples
+ --------
+ >>> from numpy.lib import recfunctions as rfn
+ >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
+ ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])])
+ >>> rfn.drop_fields(a, 'a')
+ array([((2., 3),), ((5., 6),)],
+ dtype=[('b', [('ba', '<f8'), ('bb', '<i8')])])
+ >>> rfn.drop_fields(a, 'ba')
+ array([(1, (3,)), (4, (6,))], dtype=[('a', '<i8'), ('b', [('bb', '<i8')])])
+ >>> rfn.drop_fields(a, ['ba', 'bb'])
+ array([(1,), (4,)], dtype=[('a', '<i8')])
+ """
+ if _is_string_like(drop_names):
+ drop_names = [drop_names]
+ else:
+ drop_names = set(drop_names)
+
+ def _drop_descr(ndtype, drop_names):
+ names = ndtype.names
+ newdtype = []
+ for name in names:
+ current = ndtype[name]
+ if name in drop_names:
+ continue
+ if current.names is not None:
+ descr = _drop_descr(current, drop_names)
+ if descr:
+ newdtype.append((name, descr))
+ else:
+ newdtype.append((name, current))
+ return newdtype
+
+ newdtype = _drop_descr(base.dtype, drop_names)
+
+ output = np.empty(base.shape, dtype=newdtype)
+ output = recursive_fill_fields(base, output)
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
+
+
+def _keep_fields(base, keep_names, usemask=True, asrecarray=False):
+ """
+ Return a new array keeping only the fields in `keep_names`,
+ and preserving the order of those fields.
+
+ Parameters
+ ----------
+ base : array
+ Input array
+ keep_names : string or sequence
+ String or sequence of strings corresponding to the names of the
+ fields to keep. Order of the names will be preserved.
+ usemask : {False, True}, optional
+ Whether to return a masked array or not.
+ asrecarray : string or sequence, optional
+ Whether to return a recarray or a mrecarray (`asrecarray=True`) or
+ a plain ndarray or masked array with flexible dtype. The default
+ is False.
+ """
+ newdtype = [(n, base.dtype[n]) for n in keep_names]
+ output = np.empty(base.shape, dtype=newdtype)
+ output = recursive_fill_fields(base, output)
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
+
+
+def _rec_drop_fields_dispatcher(base, drop_names):
+ return (base,)
+
+
+@array_function_dispatch(_rec_drop_fields_dispatcher)
+def rec_drop_fields(base, drop_names):
+ """
+ Returns a new numpy.recarray with fields in `drop_names` dropped.
+ """
+ return drop_fields(base, drop_names, usemask=False, asrecarray=True)
+
+
+def _rename_fields_dispatcher(base, namemapper):
+ return (base,)
+
+
+@array_function_dispatch(_rename_fields_dispatcher)
+def rename_fields(base, namemapper):
+ """
+ Rename the fields from a flexible-datatype ndarray or recarray.
+
+ Nested fields are supported.
+
+ Parameters
+ ----------
+ base : ndarray
+ Input array whose fields must be modified.
+ namemapper : dictionary
+ Dictionary mapping old field names to their new version.
+
+ Examples
+ --------
+ >>> from numpy.lib import recfunctions as rfn
+ >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
+ ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
+ >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
+ array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
+ dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])
+
+ """
+ def _recursive_rename_fields(ndtype, namemapper):
+ newdtype = []
+ for name in ndtype.names:
+ newname = namemapper.get(name, name)
+ current = ndtype[name]
+ if current.names is not None:
+ newdtype.append(
+ (newname, _recursive_rename_fields(current, namemapper))
+ )
+ else:
+ newdtype.append((newname, current))
+ return newdtype
+ newdtype = _recursive_rename_fields(base.dtype, namemapper)
+ return base.view(newdtype)
+
+
+def _append_fields_dispatcher(base, names, data, dtypes=None,
+ fill_value=None, usemask=None, asrecarray=None):
+ yield base
+ yield from data
+
+
+@array_function_dispatch(_append_fields_dispatcher)
+def append_fields(base, names, data, dtypes=None,
+ fill_value=-1, usemask=True, asrecarray=False):
+ """
+ Add new fields to an existing array.
+
+ The names of the fields are given with the `names` arguments,
+ the corresponding values with the `data` arguments.
+ If a single field is appended, `names`, `data` and `dtypes` do not have
+ to be lists but just values.
+
+ Parameters
+ ----------
+ base : array
+ Input array to extend.
+ names : string, sequence
+ String or sequence of strings corresponding to the names
+ of the new fields.
+ data : array or sequence of arrays
+ Array or sequence of arrays storing the fields to add to the base.
+ dtypes : sequence of datatypes, optional
+ Datatype or sequence of datatypes.
+ If None, the datatypes are estimated from the `data`.
+ fill_value : {float}, optional
+ Filling value used to pad missing data on the shorter arrays.
+ usemask : {False, True}, optional
+ Whether to return a masked array or not.
+ asrecarray : {False, True}, optional
+ Whether to return a recarray (MaskedRecords) or not.
+
+ """
+ # Check the names
+ if isinstance(names, (tuple, list)):
+ if len(names) != len(data):
+ msg = "The number of arrays does not match the number of names"
+ raise ValueError(msg)
+ elif isinstance(names, str):
+ names = [names, ]
+ data = [data, ]
+ #
+ if dtypes is None:
+ data = [np.array(a, copy=False, subok=True) for a in data]
+ data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
+ else:
+ if not isinstance(dtypes, (tuple, list)):
+ dtypes = [dtypes, ]
+ if len(data) != len(dtypes):
+ if len(dtypes) == 1:
+ dtypes = dtypes * len(data)
+ else:
+ msg = "The dtypes argument must be None, a dtype, or a list."
+ raise ValueError(msg)
+ data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
+ for (a, n, d) in zip(data, names, dtypes)]
+ #
+ base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
+ if len(data) > 1:
+ data = merge_arrays(data, flatten=True, usemask=usemask,
+ fill_value=fill_value)
+ else:
+ data = data.pop()
+ #
+ output = ma.masked_all(
+ max(len(base), len(data)),
+ dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))
+ output = recursive_fill_fields(base, output)
+ output = recursive_fill_fields(data, output)
+ #
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
+
+
+def _rec_append_fields_dispatcher(base, names, data, dtypes=None):
+ yield base
+ yield from data
+
+
+@array_function_dispatch(_rec_append_fields_dispatcher)
+def rec_append_fields(base, names, data, dtypes=None):
+ """
+ Add new fields to an existing array.
+
+ The names of the fields are given with the `names` arguments,
+ the corresponding values with the `data` arguments.
+ If a single field is appended, `names`, `data` and `dtypes` do not have
+ to be lists but just values.
+
+ Parameters
+ ----------
+ base : array
+ Input array to extend.
+ names : string, sequence
+ String or sequence of strings corresponding to the names
+ of the new fields.
+ data : array or sequence of arrays
+ Array or sequence of arrays storing the fields to add to the base.
+ dtypes : sequence of datatypes, optional
+ Datatype or sequence of datatypes.
+ If None, the datatypes are estimated from the `data`.
+
+ See Also
+ --------
+ append_fields
+
+ Returns
+ -------
+ appended_array : np.recarray
+ """
+ return append_fields(base, names, data=data, dtypes=dtypes,
+ asrecarray=True, usemask=False)
+
+
+def _repack_fields_dispatcher(a, align=None, recurse=None):
+ return (a,)
+
+
+@array_function_dispatch(_repack_fields_dispatcher)
+def repack_fields(a, align=False, recurse=False):
+ """
+ Re-pack the fields of a structured array or dtype in memory.
+
+ The memory layout of structured datatypes allows fields at arbitrary
+ byte offsets. This means the fields can be separated by padding bytes,
+ their offsets can be non-monotonically increasing, and they can overlap.
+
+ This method removes any overlaps and reorders the fields in memory so they
+ have increasing byte offsets, and adds or removes padding bytes depending
+ on the `align` option, which behaves like the `align` option to
+ `numpy.dtype`.
+
+ If `align=False`, this method produces a "packed" memory layout in which
+ each field starts at the byte the previous field ended, and any padding
+ bytes are removed.
+
+ If `align=True`, this methods produces an "aligned" memory layout in which
+ each field's offset is a multiple of its alignment, and the total itemsize
+ is a multiple of the largest alignment, by adding padding bytes as needed.
+
+ Parameters
+ ----------
+ a : ndarray or dtype
+ array or dtype for which to repack the fields.
+ align : boolean
+ If true, use an "aligned" memory layout, otherwise use a "packed" layout.
+ recurse : boolean
+ If True, also repack nested structures.
+
+ Returns
+ -------
+ repacked : ndarray or dtype
+ Copy of `a` with fields repacked, or `a` itself if no repacking was
+ needed.
+
+ Examples
+ --------
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> def print_offsets(d):
+ ... print("offsets:", [d.fields[name][1] for name in d.names])
+ ... print("itemsize:", d.itemsize)
+ ...
+ >>> dt = np.dtype('u1, <i8, <f8', align=True)
+ >>> dt
+ dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '<i8', '<f8'], \
+'offsets': [0, 8, 16], 'itemsize': 24}, align=True)
+ >>> print_offsets(dt)
+ offsets: [0, 8, 16]
+ itemsize: 24
+ >>> packed_dt = rfn.repack_fields(dt)
+ >>> packed_dt
+ dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
+ >>> print_offsets(packed_dt)
+ offsets: [0, 1, 9]
+ itemsize: 17
+
+ """
+ if not isinstance(a, np.dtype):
+ dt = repack_fields(a.dtype, align=align, recurse=recurse)
+ return a.astype(dt, copy=False)
+
+ if a.names is None:
+ return a
+
+ fieldinfo = []
+ for name in a.names:
+ tup = a.fields[name]
+ if recurse:
+ fmt = repack_fields(tup[0], align=align, recurse=True)
+ else:
+ fmt = tup[0]
+
+ if len(tup) == 3:
+ name = (tup[2], name)
+
+ fieldinfo.append((name, fmt))
+
+ dt = np.dtype(fieldinfo, align=align)
+ return np.dtype((a.type, dt))
+
+def _get_fields_and_offsets(dt, offset=0):
+ """
+ Returns a flat list of (dtype, count, offset) tuples of all the
+ scalar fields in the dtype "dt", including nested fields, in left
+ to right order.
+ """
+
+ # counts up elements in subarrays, including nested subarrays, and returns
+ # base dtype and count
+ def count_elem(dt):
+ count = 1
+ while dt.shape != ():
+ for size in dt.shape:
+ count *= size
+ dt = dt.base
+ return dt, count
+
+ fields = []
+ for name in dt.names:
+ field = dt.fields[name]
+ f_dt, f_offset = field[0], field[1]
+ f_dt, n = count_elem(f_dt)
+
+ if f_dt.names is None:
+ fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
+ else:
+ subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
+ size = f_dt.itemsize
+
+ for i in range(n):
+ if i == 0:
+ # optimization: avoid list comprehension if no subarray
+ fields.extend(subfields)
+ else:
+ fields.extend([(d, c, o + i*size) for d, c, o in subfields])
+ return fields
+
+
+def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,
+ casting=None):
+ return (arr,)
+
+@array_function_dispatch(_structured_to_unstructured_dispatcher)
+def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
+ """
+ Converts an n-D structured array into an (n+1)-D unstructured array.
+
+ The new array will have a new last dimension equal in size to the
+ number of field-elements of the input array. If not supplied, the output
+ datatype is determined from the numpy type promotion rules applied to all
+ the field datatypes.
+
+ Nested fields, as well as each element of any subarray fields, all count
+ as a single field-elements.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Structured array or dtype to convert. Cannot contain object datatype.
+ dtype : dtype, optional
+ The dtype of the output unstructured array.
+ copy : bool, optional
+ See copy argument to `numpy.ndarray.astype`. If true, always return a
+ copy. If false, and `dtype` requirements are satisfied, a view is
+ returned.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
+ data casting may occur.
+
+ Returns
+ -------
+ unstructured : ndarray
+ Unstructured array with one more dimension.
+
+ Examples
+ --------
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+ >>> a
+ array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
+ (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
+ >>> rfn.structured_to_unstructured(a)
+ array([[0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0.]])
+
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
+ array([ 3. , 5.5, 9. , 11. ])
+
+ """
+ if arr.dtype.names is None:
+ raise ValueError('arr must be a structured array')
+
+ fields = _get_fields_and_offsets(arr.dtype)
+ n_fields = len(fields)
+ if n_fields == 0 and dtype is None:
+ raise ValueError("arr has no fields. Unable to guess dtype")
+ elif n_fields == 0:
+ # too many bugs elsewhere for this to work now
+ raise NotImplementedError("arr with no fields is not supported")
+
+ dts, counts, offsets = zip(*fields)
+ names = ['f{}'.format(n) for n in range(n_fields)]
+
+ if dtype is None:
+ out_dtype = np.result_type(*[dt.base for dt in dts])
+ else:
+ out_dtype = dtype
+
+ # Use a series of views and casts to convert to an unstructured array:
+
+ # first view using flattened fields (doesn't work for object arrays)
+ # Note: dts may include a shape for subarrays
+ flattened_fields = np.dtype({'names': names,
+ 'formats': dts,
+ 'offsets': offsets,
+ 'itemsize': arr.dtype.itemsize})
+ arr = arr.view(flattened_fields)
+
+ # next cast to a packed format with all fields converted to new dtype
+ packed_fields = np.dtype({'names': names,
+ 'formats': [(out_dtype, dt.shape) for dt in dts]})
+ arr = arr.astype(packed_fields, copy=copy, casting=casting)
+
+ # finally is it safe to view the packed fields as the unstructured type
+ return arr.view((out_dtype, (sum(counts),)))
+
+
+def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
+ align=None, copy=None, casting=None):
+ return (arr,)
+
+@array_function_dispatch(_unstructured_to_structured_dispatcher)
+def unstructured_to_structured(arr, dtype=None, names=None, align=False,
+ copy=False, casting='unsafe'):
+ """
+ Converts an n-D unstructured array into an (n-1)-D structured array.
+
+ The last dimension of the input array is converted into a structure, with
+ number of field-elements equal to the size of the last dimension of the
+ input array. By default all output fields have the input array's dtype, but
+ an output structured dtype with an equal number of fields-elements can be
+ supplied instead.
+
+ Nested fields, as well as each element of any subarray fields, all count
+ towards the number of field-elements.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Unstructured array or dtype to convert.
+ dtype : dtype, optional
+ The structured dtype of the output array
+ names : list of strings, optional
+ If dtype is not supplied, this specifies the field names for the output
+ dtype, in order. The field dtypes will be the same as the input array.
+ align : boolean, optional
+ Whether to create an aligned memory layout.
+ copy : bool, optional
+ See copy argument to `numpy.ndarray.astype`. If true, always return a
+ copy. If false, and `dtype` requirements are satisfied, a view is
+ returned.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
+ data casting may occur.
+
+ Returns
+ -------
+ structured : ndarray
+ Structured array with fewer dimensions.
+
+ Examples
+ --------
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+ >>> a = np.arange(20).reshape((4,5))
+ >>> a
+ array([[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9],
+ [10, 11, 12, 13, 14],
+ [15, 16, 17, 18, 19]])
+ >>> rfn.unstructured_to_structured(a, dt)
+ array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),
+ (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
+
+ """
+ if arr.shape == ():
+ raise ValueError('arr must have at least one dimension')
+ n_elem = arr.shape[-1]
+ if n_elem == 0:
+ # too many bugs elsewhere for this to work now
+ raise NotImplementedError("last axis with size 0 is not supported")
+
+ if dtype is None:
+ if names is None:
+ names = ['f{}'.format(n) for n in range(n_elem)]
+ out_dtype = np.dtype([(n, arr.dtype) for n in names], align=align)
+ fields = _get_fields_and_offsets(out_dtype)
+ dts, counts, offsets = zip(*fields)
+ else:
+ if names is not None:
+ raise ValueError("don't supply both dtype and names")
+ # if dtype is the args of np.dtype, construct it
+ dtype = np.dtype(dtype)
+ # sanity check of the input dtype
+ fields = _get_fields_and_offsets(dtype)
+ if len(fields) == 0:
+ dts, counts, offsets = [], [], []
+ else:
+ dts, counts, offsets = zip(*fields)
+
+ if n_elem != sum(counts):
+ raise ValueError('The length of the last dimension of arr must '
+ 'be equal to the number of fields in dtype')
+ out_dtype = dtype
+ if align and not out_dtype.isalignedstruct:
+ raise ValueError("align was True but dtype is not aligned")
+
+ names = ['f{}'.format(n) for n in range(len(fields))]
+
+ # Use a series of views and casts to convert to a structured array:
+
+ # first view as a packed structured array of one dtype
+ packed_fields = np.dtype({'names': names,
+ 'formats': [(arr.dtype, dt.shape) for dt in dts]})
+ arr = np.ascontiguousarray(arr).view(packed_fields)
+
+ # next cast to an unpacked but flattened format with varied dtypes
+ flattened_fields = np.dtype({'names': names,
+ 'formats': dts,
+ 'offsets': offsets,
+ 'itemsize': out_dtype.itemsize})
+ arr = arr.astype(flattened_fields, copy=copy, casting=casting)
+
+ # finally view as the final nested dtype and remove the last axis
+ return arr.view(out_dtype)[..., 0]
+
+def _apply_along_fields_dispatcher(func, arr):
+ return (arr,)
+
+@array_function_dispatch(_apply_along_fields_dispatcher)
+def apply_along_fields(func, arr):
+ """
+ Apply function 'func' as a reduction across fields of a structured array.
+
+ This is similar to `apply_along_axis`, but treats the fields of a
+ structured array as an extra axis. The fields are all first cast to a
+ common type following the type-promotion rules from `numpy.result_type`
+ applied to the field's dtypes.
+
+ Parameters
+ ----------
+ func : function
+ Function to apply on the "field" dimension. This function must
+ support an `axis` argument, like np.mean, np.sum, etc.
+ arr : ndarray
+ Structured array for which to apply func.
+
+ Returns
+ -------
+ out : ndarray
+ Result of the recution operation
+
+ Examples
+ --------
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ >>> rfn.apply_along_fields(np.mean, b)
+ array([ 2.66666667, 5.33333333, 8.66666667, 11. ])
+ >>> rfn.apply_along_fields(np.mean, b[['x', 'z']])
+ array([ 3. , 5.5, 9. , 11. ])
+
+ """
+ if arr.dtype.names is None:
+ raise ValueError('arr must be a structured array')
+
+ uarr = structured_to_unstructured(arr)
+ return func(uarr, axis=-1)
+ # works and avoids axis requirement, but very, very slow:
+ #return np.apply_along_axis(func, -1, uarr)
+
+def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None):
+ return dst, src
+
+@array_function_dispatch(_assign_fields_by_name_dispatcher)
+def assign_fields_by_name(dst, src, zero_unassigned=True):
+ """
+ Assigns values from one structured array to another by field name.
+
+ Normally in numpy >= 1.14, assignment of one structured array to another
+ copies fields "by position", meaning that the first field from the src is
+ copied to the first field of the dst, and so on, regardless of field name.
+
+ This function instead copies "by field name", such that fields in the dst
+ are assigned from the identically named field in the src. This applies
+ recursively for nested structures. This is how structure assignment worked
+ in numpy >= 1.6 to <= 1.13.
+
+ Parameters
+ ----------
+ dst : ndarray
+ src : ndarray
+ The source and destination arrays during assignment.
+ zero_unassigned : bool, optional
+ If True, fields in the dst for which there was no matching
+ field in the src are filled with the value 0 (zero). This
+ was the behavior of numpy <= 1.13. If False, those fields
+ are not modified.
+ """
+
+ if dst.dtype.names is None:
+ dst[...] = src
+ return
+
+ for name in dst.dtype.names:
+ if name not in src.dtype.names:
+ if zero_unassigned:
+ dst[name] = 0
+ else:
+ assign_fields_by_name(dst[name], src[name],
+ zero_unassigned)
+
+def _require_fields_dispatcher(array, required_dtype):
+ return (array,)
+
+@array_function_dispatch(_require_fields_dispatcher)
+def require_fields(array, required_dtype):
+ """
+ Casts a structured array to a new dtype using assignment by field-name.
+
+ This function assigns from the old to the new array by name, so the
+ value of a field in the output array is the value of the field with the
+ same name in the source array. This has the effect of creating a new
+ ndarray containing only the fields "required" by the required_dtype.
+
+ If a field name in the required_dtype does not exist in the
+ input array, that field is created and set to 0 in the output array.
+
+ Parameters
+ ----------
+ a : ndarray
+ array to cast
+ required_dtype : dtype
+ datatype for output array
+
+ Returns
+ -------
+ out : ndarray
+ array with the new dtype, with field values copied from the fields in
+ the input array with the same name
+
+ Examples
+ --------
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
+ >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])
+ array([(1., 1), (1., 1), (1., 1), (1., 1)],
+ dtype=[('b', '<f4'), ('c', 'u1')])
+ >>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])
+ array([(1., 0), (1., 0), (1., 0), (1., 0)],
+ dtype=[('b', '<f4'), ('newf', 'u1')])
+
+ """
+ out = np.empty(array.shape, dtype=required_dtype)
+ assign_fields_by_name(out, array)
+ return out
+
+
+def _stack_arrays_dispatcher(arrays, defaults=None, usemask=None,
+ asrecarray=None, autoconvert=None):
+ return arrays
+
+
+@array_function_dispatch(_stack_arrays_dispatcher)
+def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
+ autoconvert=False):
+ """
+ Superposes arrays fields by fields
+
+ Parameters
+ ----------
+ arrays : array or sequence
+ Sequence of input arrays.
+ defaults : dictionary, optional
+ Dictionary mapping field names to the corresponding default values.
+ usemask : {True, False}, optional
+ Whether to return a MaskedArray (or MaskedRecords is
+ `asrecarray==True`) or a ndarray.
+ asrecarray : {False, True}, optional
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
+ or just a flexible-type ndarray.
+ autoconvert : {False, True}, optional
+ Whether automatically cast the type of the field to the maximum.
+
+ Examples
+ --------
+ >>> from numpy.lib import recfunctions as rfn
+ >>> x = np.array([1, 2,])
+ >>> rfn.stack_arrays(x) is x
+ True
+ >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
+ >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
+ ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)])
+ >>> test = rfn.stack_arrays((z,zz))
+ >>> test
+ masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0),
+ (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)],
+ mask=[(False, False, True), (False, False, True),
+ (False, False, False), (False, False, False),
+ (False, False, False)],
+ fill_value=(b'N/A', 1.e+20, 1.e+20),
+ dtype=[('A', 'S3'), ('B', '<f8'), ('C', '<f8')])
+
+ """
+ if isinstance(arrays, ndarray):
+ return arrays
+ elif len(arrays) == 1:
+ return arrays[0]
+ seqarrays = [np.asanyarray(a).ravel() for a in arrays]
+ nrecords = [len(a) for a in seqarrays]
+ ndtype = [a.dtype for a in seqarrays]
+ fldnames = [d.names for d in ndtype]
+ #
+ dtype_l = ndtype[0]
+ newdescr = _get_fieldspec(dtype_l)
+ names = [n for n, d in newdescr]
+ for dtype_n in ndtype[1:]:
+ for fname, fdtype in _get_fieldspec(dtype_n):
+ if fname not in names:
+ newdescr.append((fname, fdtype))
+ names.append(fname)
+ else:
+ nameidx = names.index(fname)
+ _, cdtype = newdescr[nameidx]
+ if autoconvert:
+ newdescr[nameidx] = (fname, max(fdtype, cdtype))
+ elif fdtype != cdtype:
+ raise TypeError("Incompatible type '%s' <> '%s'" %
+ (cdtype, fdtype))
+ # Only one field: use concatenate
+ if len(newdescr) == 1:
+ output = ma.concatenate(seqarrays)
+ else:
+ #
+ output = ma.masked_all((np.sum(nrecords),), newdescr)
+ offset = np.cumsum(np.r_[0, nrecords])
+ seen = []
+ for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
+ names = a.dtype.names
+ if names is None:
+ output['f%i' % len(seen)][i:j] = a
+ else:
+ for name in n:
+ output[name][i:j] = a[name]
+ if name not in seen:
+ seen.append(name)
+ #
+ return _fix_output(_fix_defaults(output, defaults),
+ usemask=usemask, asrecarray=asrecarray)
+
+
+def _find_duplicates_dispatcher(
+ a, key=None, ignoremask=None, return_index=None):
+ return (a,)
+
+
+@array_function_dispatch(_find_duplicates_dispatcher)
+def find_duplicates(a, key=None, ignoremask=True, return_index=False):
+ """
+ Find the duplicates in a structured array along a given key
+
+ Parameters
+ ----------
+ a : array-like
+ Input array
+ key : {string, None}, optional
+ Name of the fields along which to check the duplicates.
+ If None, the search is performed by records
+ ignoremask : {True, False}, optional
+ Whether masked data should be discarded or considered as duplicates.
+ return_index : {False, True}, optional
+ Whether to return the indices of the duplicated values.
+
+ Examples
+ --------
+ >>> from numpy.lib import recfunctions as rfn
+ >>> ndtype = [('a', int)]
+ >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
+ ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
+ >>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
+ (masked_array(data=[(1,), (1,), (2,), (2,)],
+ mask=[(False,), (False,), (False,), (False,)],
+ fill_value=(999999,),
+ dtype=[('a', '<i8')]), array([0, 1, 3, 4]))
+ """
+ a = np.asanyarray(a).ravel()
+ # Get a dictionary of fields
+ fields = get_fieldstructure(a.dtype)
+ # Get the sorting data (by selecting the corresponding field)
+ base = a
+ if key:
+ for f in fields[key]:
+ base = base[f]
+ base = base[key]
+ # Get the sorting indices and the sorted data
+ sortidx = base.argsort()
+ sortedbase = base[sortidx]
+ sorteddata = sortedbase.filled()
+ # Compare the sorting data
+ flag = (sorteddata[:-1] == sorteddata[1:])
+ # If masked data must be ignored, set the flag to false where needed
+ if ignoremask:
+ sortedmask = sortedbase.recordmask
+ flag[sortedmask[1:]] = False
+ flag = np.concatenate(([False], flag))
+ # We need to take the point on the left as well (else we're missing it)
+ flag[:-1] = flag[:-1] + flag[1:]
+ duplicates = a[sortidx][flag]
+ if return_index:
+ return (duplicates, sortidx[flag])
+ else:
+ return duplicates
+
+
+def _join_by_dispatcher(
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
+ defaults=None, usemask=None, asrecarray=None):
+ return (r1, r2)
+
+
+@array_function_dispatch(_join_by_dispatcher)
+def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
+ defaults=None, usemask=True, asrecarray=False):
+ """
+ Join arrays `r1` and `r2` on key `key`.
+
+ The key should be either a string or a sequence of string corresponding
+ to the fields used to join the array. An exception is raised if the
+ `key` field cannot be found in the two input arrays. Neither `r1` nor
+ `r2` should have any duplicates along `key`: the presence of duplicates
+ will make the output quite unreliable. Note that duplicates are not
+ looked for by the algorithm.
+
+ Parameters
+ ----------
+ key : {string, sequence}
+ A string or a sequence of strings corresponding to the fields used
+ for comparison.
+ r1, r2 : arrays
+ Structured arrays.
+ jointype : {'inner', 'outer', 'leftouter'}, optional
+ If 'inner', returns the elements common to both r1 and r2.
+ If 'outer', returns the common elements as well as the elements of
+ r1 not in r2 and the elements of not in r2.
+ If 'leftouter', returns the common elements and the elements of r1
+ not in r2.
+ r1postfix : string, optional
+ String appended to the names of the fields of r1 that are present
+ in r2 but absent of the key.
+ r2postfix : string, optional
+ String appended to the names of the fields of r2 that are present
+ in r1 but absent of the key.
+ defaults : {dictionary}, optional
+ Dictionary mapping field names to the corresponding default values.
+ usemask : {True, False}, optional
+ Whether to return a MaskedArray (or MaskedRecords is
+ `asrecarray==True`) or a ndarray.
+ asrecarray : {False, True}, optional
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
+ or just a flexible-type ndarray.
+
+ Notes
+ -----
+ * The output is sorted along the key.
+ * A temporary array is formed by dropping the fields not in the key for
+ the two arrays and concatenating the result. This array is then
+ sorted, and the common entries selected. The output is constructed by
+ filling the fields with the selected entries. Matching is not
+ preserved if there are some duplicates...
+
+ """
+ # Check jointype
+ if jointype not in ('inner', 'outer', 'leftouter'):
+ raise ValueError(
+ "The 'jointype' argument should be in 'inner', "
+ "'outer' or 'leftouter' (got '%s' instead)" % jointype
+ )
+ # If we have a single key, put it in a tuple
+ if isinstance(key, str):
+ key = (key,)
+
+ # Check the keys
+ if len(set(key)) != len(key):
+ dup = next(x for n,x in enumerate(key) if x in key[n+1:])
+ raise ValueError("duplicate join key %r" % dup)
+ for name in key:
+ if name not in r1.dtype.names:
+ raise ValueError('r1 does not have key field %r' % name)
+ if name not in r2.dtype.names:
+ raise ValueError('r2 does not have key field %r' % name)
+
+ # Make sure we work with ravelled arrays
+ r1 = r1.ravel()
+ r2 = r2.ravel()
+ # Fixme: nb2 below is never used. Commenting out for pyflakes.
+ # (nb1, nb2) = (len(r1), len(r2))
+ nb1 = len(r1)
+ (r1names, r2names) = (r1.dtype.names, r2.dtype.names)
+
+ # Check the names for collision
+ collisions = (set(r1names) & set(r2names)) - set(key)
+ if collisions and not (r1postfix or r2postfix):
+ msg = "r1 and r2 contain common names, r1postfix and r2postfix "
+ msg += "can't both be empty"
+ raise ValueError(msg)
+
+ # Make temporary arrays of just the keys
+ # (use order of keys in `r1` for back-compatibility)
+ key1 = [ n for n in r1names if n in key ]
+ r1k = _keep_fields(r1, key1)
+ r2k = _keep_fields(r2, key1)
+
+ # Concatenate the two arrays for comparison
+ aux = ma.concatenate((r1k, r2k))
+ idx_sort = aux.argsort(order=key)
+ aux = aux[idx_sort]
+ #
+ # Get the common keys
+ flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
+ flag_in[:-1] = flag_in[1:] + flag_in[:-1]
+ idx_in = idx_sort[flag_in]
+ idx_1 = idx_in[(idx_in < nb1)]
+ idx_2 = idx_in[(idx_in >= nb1)] - nb1
+ (r1cmn, r2cmn) = (len(idx_1), len(idx_2))
+ if jointype == 'inner':
+ (r1spc, r2spc) = (0, 0)
+ elif jointype == 'outer':
+ idx_out = idx_sort[~flag_in]
+ idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
+ idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
+ (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
+ elif jointype == 'leftouter':
+ idx_out = idx_sort[~flag_in]
+ idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
+ (r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
+ # Select the entries from each input
+ (s1, s2) = (r1[idx_1], r2[idx_2])
+ #
+ # Build the new description of the output array .......
+ # Start with the key fields
+ ndtype = _get_fieldspec(r1k.dtype)
+
+ # Add the fields from r1
+ for fname, fdtype in _get_fieldspec(r1.dtype):
+ if fname not in key:
+ ndtype.append((fname, fdtype))
+
+ # Add the fields from r2
+ for fname, fdtype in _get_fieldspec(r2.dtype):
+ # Have we seen the current name already ?
+ # we need to rebuild this list every time
+ names = list(name for name, dtype in ndtype)
+ try:
+ nameidx = names.index(fname)
+ except ValueError:
+ #... we haven't: just add the description to the current list
+ ndtype.append((fname, fdtype))
+ else:
+ # collision
+ _, cdtype = ndtype[nameidx]
+ if fname in key:
+ # The current field is part of the key: take the largest dtype
+ ndtype[nameidx] = (fname, max(fdtype, cdtype))
+ else:
+ # The current field is not part of the key: add the suffixes,
+ # and place the new field adjacent to the old one
+ ndtype[nameidx:nameidx + 1] = [
+ (fname + r1postfix, cdtype),
+ (fname + r2postfix, fdtype)
+ ]
+ # Rebuild a dtype from the new fields
+ ndtype = np.dtype(ndtype)
+ # Find the largest nb of common fields :
+ # r1cmn and r2cmn should be equal, but...
+ cmn = max(r1cmn, r2cmn)
+ # Construct an empty array
+ output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
+ names = output.dtype.names
+ for f in r1names:
+ selected = s1[f]
+ if f not in names or (f in r2names and not r2postfix and f not in key):
+ f += r1postfix
+ current = output[f]
+ current[:r1cmn] = selected[:r1cmn]
+ if jointype in ('outer', 'leftouter'):
+ current[cmn:cmn + r1spc] = selected[r1cmn:]
+ for f in r2names:
+ selected = s2[f]
+ if f not in names or (f in r1names and not r1postfix and f not in key):
+ f += r2postfix
+ current = output[f]
+ current[:r2cmn] = selected[:r2cmn]
+ if (jointype == 'outer') and r2spc:
+ current[-r2spc:] = selected[r2cmn:]
+ # Sort and finalize the output
+ output.sort(order=key)
+ kwargs = dict(usemask=usemask, asrecarray=asrecarray)
+ return _fix_output(_fix_defaults(output, defaults), **kwargs)
+
+
+def _rec_join_dispatcher(
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
+ defaults=None):
+ return (r1, r2)
+
+
+@array_function_dispatch(_rec_join_dispatcher)
+def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
+ defaults=None):
+ """
+ Join arrays `r1` and `r2` on keys.
+ Alternative to join_by, that always returns a np.recarray.
+
+ See Also
+ --------
+ join_by : equivalent function
+ """
+ kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
+ defaults=defaults, usemask=False, asrecarray=True)
+ return join_by(key, r1, r2, **kwargs)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/scimath.py b/venv/lib/python3.9/site-packages/numpy/lib/scimath.py
new file mode 100644
index 00000000..b7ef0d71
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/scimath.py
@@ -0,0 +1,625 @@
+"""
+Wrapper functions to more user-friendly calling of certain math functions
+whose output data-type is different than the input data-type in certain
+domains of the input.
+
+For example, for functions like `log` with branch cuts, the versions in this
+module provide the mathematically valid answers in the complex plane::
+
+ >>> import math
+ >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi)
+ True
+
+Similarly, `sqrt`, other base logarithms, `power` and trig functions are
+correctly handled. See their respective docstrings for specific examples.
+
+Functions
+---------
+
+.. autosummary::
+ :toctree: generated/
+
+ sqrt
+ log
+ log2
+ logn
+ log10
+ power
+ arccos
+ arcsin
+ arctanh
+
+"""
+import numpy.core.numeric as nx
+import numpy.core.numerictypes as nt
+from numpy.core.numeric import asarray, any
+from numpy.core.overrides import array_function_dispatch
+from numpy.lib.type_check import isreal
+
+
+__all__ = [
+ 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
+ 'arctanh'
+ ]
+
+
+_ln2 = nx.log(2.0)
+
+
+def _tocomplex(arr):
+ """Convert its input `arr` to a complex array.
+
+ The input is returned as a complex array of the smallest type that will fit
+ the original data: types like single, byte, short, etc. become csingle,
+ while others become cdouble.
+
+ A copy of the input is always made.
+
+ Parameters
+ ----------
+ arr : array
+
+ Returns
+ -------
+ array
+ An array with the same input data as the input but in complex form.
+
+ Examples
+ --------
+
+ First, consider an input of type short:
+
+ >>> a = np.array([1,2,3],np.short)
+
+ >>> ac = np.lib.scimath._tocomplex(a); ac
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
+
+ >>> ac.dtype
+ dtype('complex64')
+
+ If the input is of type double, the output is correspondingly of the
+ complex double type as well:
+
+ >>> b = np.array([1,2,3],np.double)
+
+ >>> bc = np.lib.scimath._tocomplex(b); bc
+ array([1.+0.j, 2.+0.j, 3.+0.j])
+
+ >>> bc.dtype
+ dtype('complex128')
+
+ Note that even if the input was complex to begin with, a copy is still
+ made, since the astype() method always copies:
+
+ >>> c = np.array([1,2,3],np.csingle)
+
+ >>> cc = np.lib.scimath._tocomplex(c); cc
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
+
+ >>> c *= 2; c
+ array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)
+
+ >>> cc
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
+ """
+ if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,
+ nt.ushort, nt.csingle)):
+ return arr.astype(nt.csingle)
+ else:
+ return arr.astype(nt.cdouble)
+
+
+def _fix_real_lt_zero(x):
+ """Convert `x` to complex if it has real, negative components.
+
+ Otherwise, output is just the array version of the input (via asarray).
+
+ Parameters
+ ----------
+ x : array_like
+
+ Returns
+ -------
+ array
+
+ Examples
+ --------
+ >>> np.lib.scimath._fix_real_lt_zero([1,2])
+ array([1, 2])
+
+ >>> np.lib.scimath._fix_real_lt_zero([-1,2])
+ array([-1.+0.j, 2.+0.j])
+
+ """
+ x = asarray(x)
+ if any(isreal(x) & (x < 0)):
+ x = _tocomplex(x)
+ return x
+
+
+def _fix_int_lt_zero(x):
+ """Convert `x` to double if it has real, negative components.
+
+ Otherwise, output is just the array version of the input (via asarray).
+
+ Parameters
+ ----------
+ x : array_like
+
+ Returns
+ -------
+ array
+
+ Examples
+ --------
+ >>> np.lib.scimath._fix_int_lt_zero([1,2])
+ array([1, 2])
+
+ >>> np.lib.scimath._fix_int_lt_zero([-1,2])
+ array([-1., 2.])
+ """
+ x = asarray(x)
+ if any(isreal(x) & (x < 0)):
+ x = x * 1.0
+ return x
+
+
+def _fix_real_abs_gt_1(x):
+ """Convert `x` to complex if it has real components x_i with abs(x_i)>1.
+
+ Otherwise, output is just the array version of the input (via asarray).
+
+ Parameters
+ ----------
+ x : array_like
+
+ Returns
+ -------
+ array
+
+ Examples
+ --------
+ >>> np.lib.scimath._fix_real_abs_gt_1([0,1])
+ array([0, 1])
+
+ >>> np.lib.scimath._fix_real_abs_gt_1([0,2])
+ array([0.+0.j, 2.+0.j])
+ """
+ x = asarray(x)
+ if any(isreal(x) & (abs(x) > 1)):
+ x = _tocomplex(x)
+ return x
+
+
+def _unary_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def sqrt(x):
+ """
+ Compute the square root of x.
+
+ For negative input elements, a complex value is returned
+ (unlike `numpy.sqrt` which returns NaN).
+
+ Parameters
+ ----------
+ x : array_like
+ The input value(s).
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The square root of `x`. If `x` was a scalar, so is `out`,
+ otherwise an array is returned.
+
+ See Also
+ --------
+ numpy.sqrt
+
+ Examples
+ --------
+ For real, non-negative inputs this works just like `numpy.sqrt`:
+
+ >>> np.emath.sqrt(1)
+ 1.0
+ >>> np.emath.sqrt([1, 4])
+ array([1., 2.])
+
+ But it automatically handles negative inputs:
+
+ >>> np.emath.sqrt(-1)
+ 1j
+ >>> np.emath.sqrt([-1,4])
+ array([0.+1.j, 2.+0.j])
+
+ Different results are expected because:
+ floating point 0.0 and -0.0 are distinct.
+
+ For more control, explicitly use complex() as follows:
+
+ >>> np.emath.sqrt(complex(-4.0, 0.0))
+ 2j
+ >>> np.emath.sqrt(complex(-4.0, -0.0))
+ -2j
+ """
+ x = _fix_real_lt_zero(x)
+ return nx.sqrt(x)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def log(x):
+ """
+ Compute the natural logarithm of `x`.
+
+ Return the "principal value" (for a description of this, see `numpy.log`)
+ of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``
+ returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the
+ complex principle value is returned.
+
+ Parameters
+ ----------
+ x : array_like
+ The value(s) whose log is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The log of the `x` value(s). If `x` was a scalar, so is `out`,
+ otherwise an array is returned.
+
+ See Also
+ --------
+ numpy.log
+
+ Notes
+ -----
+ For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`
+ (note, however, that otherwise `numpy.log` and this `log` are identical,
+ i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,
+ notably, the complex principle value if ``x.imag != 0``).
+
+ Examples
+ --------
+ >>> np.emath.log(np.exp(1))
+ 1.0
+
+ Negative arguments are handled "correctly" (recall that
+ ``exp(log(x)) == x`` does *not* hold for real ``x < 0``):
+
+ >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)
+ True
+
+ """
+ x = _fix_real_lt_zero(x)
+ return nx.log(x)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def log10(x):
+ """
+ Compute the logarithm base 10 of `x`.
+
+ Return the "principal value" (for a description of this, see
+ `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this
+ is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``
+ returns ``inf``). Otherwise, the complex principle value is returned.
+
+ Parameters
+ ----------
+ x : array_like or scalar
+ The value(s) whose log base 10 is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,
+ otherwise an array object is returned.
+
+ See Also
+ --------
+ numpy.log10
+
+ Notes
+ -----
+ For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`
+ (note, however, that otherwise `numpy.log10` and this `log10` are
+ identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
+ and, notably, the complex principle value if ``x.imag != 0``).
+
+ Examples
+ --------
+
+ (We set the printing precision so the example can be auto-tested)
+
+ >>> np.set_printoptions(precision=4)
+
+ >>> np.emath.log10(10**1)
+ 1.0
+
+ >>> np.emath.log10([-10**1, -10**2, 10**2])
+ array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])
+
+ """
+ x = _fix_real_lt_zero(x)
+ return nx.log10(x)
+
+
+def _logn_dispatcher(n, x):
+ return (n, x,)
+
+
+@array_function_dispatch(_logn_dispatcher)
+def logn(n, x):
+ """
+ Take log base n of x.
+
+ If `x` contains negative inputs, the answer is computed and returned in the
+ complex domain.
+
+ Parameters
+ ----------
+ n : array_like
+ The integer base(s) in which the log is taken.
+ x : array_like
+ The value(s) whose log base `n` is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The log base `n` of the `x` value(s). If `x` was a scalar, so is
+ `out`, otherwise an array is returned.
+
+ Examples
+ --------
+ >>> np.set_printoptions(precision=4)
+
+ >>> np.emath.logn(2, [4, 8])
+ array([2., 3.])
+ >>> np.emath.logn(2, [-4, -8, 8])
+ array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
+
+ """
+ x = _fix_real_lt_zero(x)
+ n = _fix_real_lt_zero(n)
+ return nx.log(x)/nx.log(n)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def log2(x):
+ """
+ Compute the logarithm base 2 of `x`.
+
+ Return the "principal value" (for a description of this, see
+ `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is
+ a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns
+ ``inf``). Otherwise, the complex principle value is returned.
+
+ Parameters
+ ----------
+ x : array_like
+ The value(s) whose log base 2 is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,
+ otherwise an array is returned.
+
+ See Also
+ --------
+ numpy.log2
+
+ Notes
+ -----
+ For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`
+ (note, however, that otherwise `numpy.log2` and this `log2` are
+ identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
+ and, notably, the complex principle value if ``x.imag != 0``).
+
+ Examples
+ --------
+ We set the printing precision so the example can be auto-tested:
+
+ >>> np.set_printoptions(precision=4)
+
+ >>> np.emath.log2(8)
+ 3.0
+ >>> np.emath.log2([-4, -8, 8])
+ array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
+
+ """
+ x = _fix_real_lt_zero(x)
+ return nx.log2(x)
+
+
+def _power_dispatcher(x, p):
+ return (x, p)
+
+
+@array_function_dispatch(_power_dispatcher)
+def power(x, p):
+ """
+ Return x to the power p, (x**p).
+
+ If `x` contains negative values, the output is converted to the
+ complex domain.
+
+ Parameters
+ ----------
+ x : array_like
+ The input value(s).
+ p : array_like of ints
+ The power(s) to which `x` is raised. If `x` contains multiple values,
+ `p` has to either be a scalar, or contain the same number of values
+ as `x`. In the latter case, the result is
+ ``x[0]**p[0], x[1]**p[1], ...``.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The result of ``x**p``. If `x` and `p` are scalars, so is `out`,
+ otherwise an array is returned.
+
+ See Also
+ --------
+ numpy.power
+
+ Examples
+ --------
+ >>> np.set_printoptions(precision=4)
+
+ >>> np.emath.power([2, 4], 2)
+ array([ 4, 16])
+ >>> np.emath.power([2, 4], -2)
+ array([0.25 , 0.0625])
+ >>> np.emath.power([-2, 4], 2)
+ array([ 4.-0.j, 16.+0.j])
+
+ """
+ x = _fix_real_lt_zero(x)
+ p = _fix_int_lt_zero(p)
+ return nx.power(x, p)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def arccos(x):
+ """
+ Compute the inverse cosine of x.
+
+ Return the "principal value" (for a description of this, see
+ `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
+ `abs(x) <= 1`, this is a real number in the closed interval
+ :math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
+
+ Parameters
+ ----------
+ x : array_like or scalar
+ The value(s) whose arccos is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
+ is `out`, otherwise an array object is returned.
+
+ See Also
+ --------
+ numpy.arccos
+
+ Notes
+ -----
+ For an arccos() that returns ``NAN`` when real `x` is not in the
+ interval ``[-1,1]``, use `numpy.arccos`.
+
+ Examples
+ --------
+ >>> np.set_printoptions(precision=4)
+
+ >>> np.emath.arccos(1) # a scalar is returned
+ 0.0
+
+ >>> np.emath.arccos([1,2])
+ array([0.-0.j , 0.-1.317j])
+
+ """
+ x = _fix_real_abs_gt_1(x)
+ return nx.arccos(x)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def arcsin(x):
+ """
+ Compute the inverse sine of x.
+
+ Return the "principal value" (for a description of this, see
+ `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
+ `abs(x) <= 1`, this is a real number in the closed interval
+ :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is
+ returned.
+
+ Parameters
+ ----------
+ x : array_like or scalar
+ The value(s) whose arcsin is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
+ is `out`, otherwise an array object is returned.
+
+ See Also
+ --------
+ numpy.arcsin
+
+ Notes
+ -----
+ For an arcsin() that returns ``NAN`` when real `x` is not in the
+ interval ``[-1,1]``, use `numpy.arcsin`.
+
+ Examples
+ --------
+ >>> np.set_printoptions(precision=4)
+
+ >>> np.emath.arcsin(0)
+ 0.0
+
+ >>> np.emath.arcsin([0,1])
+ array([0. , 1.5708])
+
+ """
+ x = _fix_real_abs_gt_1(x)
+ return nx.arcsin(x)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def arctanh(x):
+ """
+ Compute the inverse hyperbolic tangent of `x`.
+
+ Return the "principal value" (for a description of this, see
+ `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that
+ ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is
+ complex, the result is complex. Finally, `x = 1` returns``inf`` and
+ ``x=-1`` returns ``-inf``.
+
+ Parameters
+ ----------
+ x : array_like
+ The value(s) whose arctanh is (are) required.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
+ a scalar so is `out`, otherwise an array is returned.
+
+
+ See Also
+ --------
+ numpy.arctanh
+
+ Notes
+ -----
+ For an arctanh() that returns ``NAN`` when real `x` is not in the
+ interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
+ return +/-inf for ``x = +/-1``).
+
+ Examples
+ --------
+ >>> np.set_printoptions(precision=4)
+
+ >>> from numpy.testing import suppress_warnings
+ >>> with suppress_warnings() as sup:
+ ... sup.filter(RuntimeWarning)
+ ... np.emath.arctanh(np.eye(2))
+ array([[inf, 0.],
+ [ 0., inf]])
+ >>> np.emath.arctanh([1j])
+ array([0.+0.7854j])
+
+ """
+ x = _fix_real_abs_gt_1(x)
+ return nx.arctanh(x)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/scimath.pyi b/venv/lib/python3.9/site-packages/numpy/lib/scimath.pyi
new file mode 100644
index 00000000..589feb15
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/scimath.pyi
@@ -0,0 +1,94 @@
+from typing import overload, Any
+
+from numpy import complexfloating
+
+from numpy._typing import (
+ NDArray,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ComplexLike_co,
+ _FloatLike_co,
+)
+
+__all__: list[str]
+
+@overload
+def sqrt(x: _FloatLike_co) -> Any: ...
+@overload
+def sqrt(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def log(x: _FloatLike_co) -> Any: ...
+@overload
+def log(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def log10(x: _FloatLike_co) -> Any: ...
+@overload
+def log10(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def log2(x: _FloatLike_co) -> Any: ...
+@overload
+def log2(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ...
+@overload
+def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ...
+@overload
+def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def arccos(x: _FloatLike_co) -> Any: ...
+@overload
+def arccos(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def arcsin(x: _FloatLike_co) -> Any: ...
+@overload
+def arcsin(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def arctanh(x: _FloatLike_co) -> Any: ...
+@overload
+def arctanh(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
+@overload
+def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/setup.py b/venv/lib/python3.9/site-packages/numpy/lib/setup.py
new file mode 100644
index 00000000..7520b72d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/setup.py
@@ -0,0 +1,12 @@
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+
+ config = Configuration('lib', parent_package, top_path)
+ config.add_subpackage('tests')
+ config.add_data_dir('tests/data')
+ config.add_data_files('*.pyi')
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/shape_base.py b/venv/lib/python3.9/site-packages/numpy/lib/shape_base.py
new file mode 100644
index 00000000..ab91423d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/shape_base.py
@@ -0,0 +1,1280 @@
+import functools
+
+import numpy.core.numeric as _nx
+from numpy.core.numeric import (
+ asarray, zeros, outer, concatenate, array, asanyarray
+ )
+from numpy.core.fromnumeric import reshape, transpose
+from numpy.core.multiarray import normalize_axis_index
+from numpy.core import overrides
+from numpy.core import vstack, atleast_3d
+from numpy.core.numeric import normalize_axis_tuple
+from numpy.core.shape_base import _arrays_for_stack_dispatcher
+from numpy.lib.index_tricks import ndindex
+from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells
+
+
+__all__ = [
+ 'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
+ 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
+ 'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis',
+ 'put_along_axis'
+ ]
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+def _make_along_axis_idx(arr_shape, indices, axis):
+ # compute dimensions to iterate over
+ if not _nx.issubdtype(indices.dtype, _nx.integer):
+ raise IndexError('`indices` must be an integer array')
+ if len(arr_shape) != indices.ndim:
+ raise ValueError(
+ "`indices` and `arr` must have the same number of dimensions")
+ shape_ones = (1,) * indices.ndim
+ dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))
+
+ # build a fancy index, consisting of orthogonal aranges, with the
+ # requested index inserted at the right location
+ fancy_index = []
+ for dim, n in zip(dest_dims, arr_shape):
+ if dim is None:
+ fancy_index.append(indices)
+ else:
+ ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
+ fancy_index.append(_nx.arange(n).reshape(ind_shape))
+
+ return tuple(fancy_index)
+
+
+def _take_along_axis_dispatcher(arr, indices, axis):
+ return (arr, indices)
+
+
+@array_function_dispatch(_take_along_axis_dispatcher)
+def take_along_axis(arr, indices, axis):
+ """
+ Take values from the input array by matching 1d index and data slices.
+
+ This iterates over matching 1d slices oriented along the specified axis in
+ the index and data arrays, and uses the former to look up values in the
+ latter. These slices can be different lengths.
+
+ Functions returning an index along an axis, like `argsort` and
+ `argpartition`, produce suitable indices for this function.
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ arr : ndarray (Ni..., M, Nk...)
+ Source array
+ indices : ndarray (Ni..., J, Nk...)
+ Indices to take along each 1d slice of `arr`. This must match the
+ dimension of arr, but dimensions Ni and Nj only need to broadcast
+ against `arr`.
+ axis : int
+ The axis to take 1d slices along. If axis is None, the input array is
+ treated as if it had first been flattened to 1d, for consistency with
+ `sort` and `argsort`.
+
+ Returns
+ -------
+ out: ndarray (Ni..., J, Nk...)
+ The indexed result.
+
+ Notes
+ -----
+ This is equivalent to (but faster than) the following use of `ndindex` and
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
+
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
+ J = indices.shape[axis] # Need not equal M
+ out = np.empty(Ni + (J,) + Nk)
+
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ a_1d = a [ii + s_[:,] + kk]
+ indices_1d = indices[ii + s_[:,] + kk]
+ out_1d = out [ii + s_[:,] + kk]
+ for j in range(J):
+ out_1d[j] = a_1d[indices_1d[j]]
+
+ Equivalently, eliminating the inner loop, the last two lines would be::
+
+ out_1d[:] = a_1d[indices_1d]
+
+ See Also
+ --------
+ take : Take along an axis, using the same indices for every 1d slice
+ put_along_axis :
+ Put values into the destination array by matching 1d index and data slices
+
+ Examples
+ --------
+
+ For this sample array
+
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
+
+ We can sort either by using sort directly, or argsort and this function
+
+ >>> np.sort(a, axis=1)
+ array([[10, 20, 30],
+ [40, 50, 60]])
+ >>> ai = np.argsort(a, axis=1); ai
+ array([[0, 2, 1],
+ [1, 2, 0]])
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[10, 20, 30],
+ [40, 50, 60]])
+
+ The same works for max and min, if you expand the dimensions:
+
+ >>> np.expand_dims(np.max(a, axis=1), axis=1)
+ array([[30],
+ [60]])
+ >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai
+ array([[1],
+ [0]])
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[30],
+ [60]])
+
+ If we want to get the max and min at the same time, we can stack the
+ indices first
+
+ >>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1)
+ >>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai = np.concatenate([ai_min, ai_max], axis=1)
+ >>> ai
+ array([[0, 1],
+ [1, 0]])
+ >>> np.take_along_axis(a, ai, axis=1)
+ array([[10, 30],
+ [40, 60]])
+ """
+ # normalize inputs
+ if axis is None:
+ arr = arr.flat
+ arr_shape = (len(arr),) # flatiter has no .shape
+ axis = 0
+ else:
+ axis = normalize_axis_index(axis, arr.ndim)
+ arr_shape = arr.shape
+
+ # use the fancy index
+ return arr[_make_along_axis_idx(arr_shape, indices, axis)]
+
+
+def _put_along_axis_dispatcher(arr, indices, values, axis):
+ return (arr, indices, values)
+
+
+@array_function_dispatch(_put_along_axis_dispatcher)
+def put_along_axis(arr, indices, values, axis):
+ """
+ Put values into the destination array by matching 1d index and data slices.
+
+ This iterates over matching 1d slices oriented along the specified axis in
+ the index and data arrays, and uses the former to place values into the
+ latter. These slices can be different lengths.
+
+ Functions returning an index along an axis, like `argsort` and
+ `argpartition`, produce suitable indices for this function.
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ arr : ndarray (Ni..., M, Nk...)
+ Destination array.
+ indices : ndarray (Ni..., J, Nk...)
+ Indices to change along each 1d slice of `arr`. This must match the
+ dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast
+ against `arr`.
+ values : array_like (Ni..., J, Nk...)
+ values to insert at those indices. Its shape and dimension are
+ broadcast to match that of `indices`.
+ axis : int
+ The axis to take 1d slices along. If axis is None, the destination
+ array is treated as if a flattened 1d view had been created of it.
+
+ Notes
+ -----
+ This is equivalent to (but faster than) the following use of `ndindex` and
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
+
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
+ J = indices.shape[axis] # Need not equal M
+
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ a_1d = a [ii + s_[:,] + kk]
+ indices_1d = indices[ii + s_[:,] + kk]
+ values_1d = values [ii + s_[:,] + kk]
+ for j in range(J):
+ a_1d[indices_1d[j]] = values_1d[j]
+
+ Equivalently, eliminating the inner loop, the last two lines would be::
+
+ a_1d[indices_1d] = values_1d
+
+ See Also
+ --------
+ take_along_axis :
+ Take values from the input array by matching 1d index and data slices
+
+ Examples
+ --------
+
+ For this sample array
+
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
+
+ We can replace the maximum values with:
+
+ >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
+ >>> ai
+ array([[1],
+ [0]])
+ >>> np.put_along_axis(a, ai, 99, axis=1)
+ >>> a
+ array([[10, 99, 20],
+ [99, 40, 50]])
+
+ """
+ # normalize inputs
+ if axis is None:
+ arr = arr.flat
+ axis = 0
+ arr_shape = (len(arr),) # flatiter has no .shape
+ else:
+ axis = normalize_axis_index(axis, arr.ndim)
+ arr_shape = arr.shape
+
+ # use the fancy index
+ arr[_make_along_axis_idx(arr_shape, indices, axis)] = values
+
+
+def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs):
+ return (arr,)
+
+
+@array_function_dispatch(_apply_along_axis_dispatcher)
+def apply_along_axis(func1d, axis, arr, *args, **kwargs):
+ """
+ Apply a function to 1-D slices along the given axis.
+
+ Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays
+ and `a` is a 1-D slice of `arr` along `axis`.
+
+ This is equivalent to (but faster than) the following use of `ndindex` and
+ `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::
+
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ f = func1d(arr[ii + s_[:,] + kk])
+ Nj = f.shape
+ for jj in ndindex(Nj):
+ out[ii + jj + kk] = f[jj]
+
+ Equivalently, eliminating the inner loop, this can be expressed as::
+
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+ for ii in ndindex(Ni):
+ for kk in ndindex(Nk):
+ out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk])
+
+ Parameters
+ ----------
+ func1d : function (M,) -> (Nj...)
+ This function should accept 1-D arrays. It is applied to 1-D
+ slices of `arr` along the specified axis.
+ axis : integer
+ Axis along which `arr` is sliced.
+ arr : ndarray (Ni..., M, Nk...)
+ Input array.
+ args : any
+ Additional arguments to `func1d`.
+ kwargs : any
+ Additional named arguments to `func1d`.
+
+ .. versionadded:: 1.9.0
+
+
+ Returns
+ -------
+ out : ndarray (Ni..., Nj..., Nk...)
+ The output array. The shape of `out` is identical to the shape of
+ `arr`, except along the `axis` dimension. This axis is removed, and
+ replaced with new dimensions equal to the shape of the return value
+ of `func1d`. So if `func1d` returns a scalar `out` will have one
+ fewer dimensions than `arr`.
+
+ See Also
+ --------
+ apply_over_axes : Apply a function repeatedly over multiple axes.
+
+ Examples
+ --------
+ >>> def my_func(a):
+ ... \"\"\"Average first and last element of a 1-D array\"\"\"
+ ... return (a[0] + a[-1]) * 0.5
+ >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
+ >>> np.apply_along_axis(my_func, 0, b)
+ array([4., 5., 6.])
+ >>> np.apply_along_axis(my_func, 1, b)
+ array([2., 5., 8.])
+
+ For a function that returns a 1D array, the number of dimensions in
+ `outarr` is the same as `arr`.
+
+ >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
+ >>> np.apply_along_axis(sorted, 1, b)
+ array([[1, 7, 8],
+ [3, 4, 9],
+ [2, 5, 6]])
+
+ For a function that returns a higher dimensional array, those dimensions
+ are inserted in place of the `axis` dimension.
+
+ >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
+ >>> np.apply_along_axis(np.diag, -1, b)
+ array([[[1, 0, 0],
+ [0, 2, 0],
+ [0, 0, 3]],
+ [[4, 0, 0],
+ [0, 5, 0],
+ [0, 0, 6]],
+ [[7, 0, 0],
+ [0, 8, 0],
+ [0, 0, 9]]])
+ """
+ # handle negative axes
+ arr = asanyarray(arr)
+ nd = arr.ndim
+ axis = normalize_axis_index(axis, nd)
+
+ # arr, with the iteration axis at the end
+ in_dims = list(range(nd))
+ inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis])
+
+ # compute indices for the iteration axes, and append a trailing ellipsis to
+ # prevent 0d arrays decaying to scalars, which fixes gh-8642
+ inds = ndindex(inarr_view.shape[:-1])
+ inds = (ind + (Ellipsis,) for ind in inds)
+
+ # invoke the function on the first item
+ try:
+ ind0 = next(inds)
+ except StopIteration as e:
+ raise ValueError(
+ 'Cannot apply_along_axis when any iteration dimensions are 0'
+ ) from None
+ res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))
+
+ # build a buffer for storing evaluations of func1d.
+ # remove the requested axis, and add the new ones on the end.
+ # laid out so that each write is contiguous.
+ # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
+ buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)
+
+ # permutation of axes such that out = buff.transpose(buff_permute)
+ buff_dims = list(range(buff.ndim))
+ buff_permute = (
+ buff_dims[0 : axis] +
+ buff_dims[buff.ndim-res.ndim : buff.ndim] +
+ buff_dims[axis : buff.ndim-res.ndim]
+ )
+
+ # matrices have a nasty __array_prepare__ and __array_wrap__
+ if not isinstance(res, matrix):
+ buff = res.__array_prepare__(buff)
+
+ # save the first result, then compute and save all remaining results
+ buff[ind0] = res
+ for ind in inds:
+ buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))
+
+ if not isinstance(res, matrix):
+ # wrap the array, to preserve subclasses
+ buff = res.__array_wrap__(buff)
+
+ # finally, rotate the inserted axes back to where they belong
+ return transpose(buff, buff_permute)
+
+ else:
+ # matrices have to be transposed first, because they collapse dimensions!
+ out_arr = transpose(buff, buff_permute)
+ return res.__array_wrap__(out_arr)
+
+
+def _apply_over_axes_dispatcher(func, a, axes):
+ return (a,)
+
+
+@array_function_dispatch(_apply_over_axes_dispatcher)
+def apply_over_axes(func, a, axes):
+ """
+ Apply a function repeatedly over multiple axes.
+
+ `func` is called as `res = func(a, axis)`, where `axis` is the first
+ element of `axes`. The result `res` of the function call must have
+ either the same dimensions as `a` or one less dimension. If `res`
+ has one less dimension than `a`, a dimension is inserted before
+ `axis`. The call to `func` is then repeated for each axis in `axes`,
+ with `res` as the first argument.
+
+ Parameters
+ ----------
+ func : function
+ This function must take two arguments, `func(a, axis)`.
+ a : array_like
+ Input array.
+ axes : array_like
+ Axes over which `func` is applied; the elements must be integers.
+
+ Returns
+ -------
+ apply_over_axis : ndarray
+ The output array. The number of dimensions is the same as `a`,
+ but the shape can be different. This depends on whether `func`
+ changes the shape of its output with respect to its input.
+
+ See Also
+ --------
+ apply_along_axis :
+ Apply a function to 1-D slices of an array along the given axis.
+
+ Notes
+ -----
+ This function is equivalent to tuple axis arguments to reorderable ufuncs
+ with keepdims=True. Tuple axis arguments to ufuncs have been available since
+ version 1.7.0.
+
+ Examples
+ --------
+ >>> a = np.arange(24).reshape(2,3,4)
+ >>> a
+ array([[[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]],
+ [[12, 13, 14, 15],
+ [16, 17, 18, 19],
+ [20, 21, 22, 23]]])
+
+ Sum over axes 0 and 2. The result has same number of dimensions
+ as the original array:
+
+ >>> np.apply_over_axes(np.sum, a, [0,2])
+ array([[[ 60],
+ [ 92],
+ [124]]])
+
+ Tuple axis arguments to ufuncs are equivalent:
+
+ >>> np.sum(a, axis=(0,2), keepdims=True)
+ array([[[ 60],
+ [ 92],
+ [124]]])
+
+ """
+ val = asarray(a)
+ N = a.ndim
+ if array(axes).ndim == 0:
+ axes = (axes,)
+ for axis in axes:
+ if axis < 0:
+ axis = N + axis
+ args = (val, axis)
+ res = func(*args)
+ if res.ndim == val.ndim:
+ val = res
+ else:
+ res = expand_dims(res, axis)
+ if res.ndim == val.ndim:
+ val = res
+ else:
+ raise ValueError("function is not returning "
+ "an array of the correct shape")
+ return val
+
+
+def _expand_dims_dispatcher(a, axis):
+ return (a,)
+
+
+@array_function_dispatch(_expand_dims_dispatcher)
+def expand_dims(a, axis):
+ """
+ Expand the shape of an array.
+
+ Insert a new axis that will appear at the `axis` position in the expanded
+ array shape.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ axis : int or tuple of ints
+ Position in the expanded axes where the new axis (or axes) is placed.
+
+ .. deprecated:: 1.13.0
+ Passing an axis where ``axis > a.ndim`` will be treated as
+ ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will
+ be treated as ``axis == 0``. This behavior is deprecated.
+
+ .. versionchanged:: 1.18.0
+ A tuple of axes is now supported. Out of range axes as
+ described above are now forbidden and raise an `AxisError`.
+
+ Returns
+ -------
+ result : ndarray
+ View of `a` with the number of dimensions increased.
+
+ See Also
+ --------
+ squeeze : The inverse operation, removing singleton dimensions
+ reshape : Insert, remove, and combine dimensions, and resize existing ones
+ doc.indexing, atleast_1d, atleast_2d, atleast_3d
+
+ Examples
+ --------
+ >>> x = np.array([1, 2])
+ >>> x.shape
+ (2,)
+
+ The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``:
+
+ >>> y = np.expand_dims(x, axis=0)
+ >>> y
+ array([[1, 2]])
+ >>> y.shape
+ (1, 2)
+
+ The following is equivalent to ``x[:, np.newaxis]``:
+
+ >>> y = np.expand_dims(x, axis=1)
+ >>> y
+ array([[1],
+ [2]])
+ >>> y.shape
+ (2, 1)
+
+ ``axis`` may also be a tuple:
+
+ >>> y = np.expand_dims(x, axis=(0, 1))
+ >>> y
+ array([[[1, 2]]])
+
+ >>> y = np.expand_dims(x, axis=(2, 0))
+ >>> y
+ array([[[1],
+ [2]]])
+
+ Note that some examples may use ``None`` instead of ``np.newaxis``. These
+ are the same objects:
+
+ >>> np.newaxis is None
+ True
+
+ """
+ if isinstance(a, matrix):
+ a = asarray(a)
+ else:
+ a = asanyarray(a)
+
+ if type(axis) not in (tuple, list):
+ axis = (axis,)
+
+ out_ndim = len(axis) + a.ndim
+ axis = normalize_axis_tuple(axis, out_ndim)
+
+ shape_it = iter(a.shape)
+ shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
+
+ return a.reshape(shape)
+
+
+row_stack = vstack
+
+
+def _column_stack_dispatcher(tup):
+ return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_column_stack_dispatcher)
+def column_stack(tup):
+ """
+ Stack 1-D arrays as columns into a 2-D array.
+
+ Take a sequence of 1-D arrays and stack them as columns
+ to make a single 2-D array. 2-D arrays are stacked as-is,
+ just like with `hstack`. 1-D arrays are turned into 2-D columns
+ first.
+
+ Parameters
+ ----------
+ tup : sequence of 1-D or 2-D arrays.
+ Arrays to stack. All of them must have the same first dimension.
+
+ Returns
+ -------
+ stacked : 2-D array
+ The array formed by stacking the given arrays.
+
+ See Also
+ --------
+ stack, hstack, vstack, concatenate
+
+ Examples
+ --------
+ >>> a = np.array((1,2,3))
+ >>> b = np.array((2,3,4))
+ >>> np.column_stack((a,b))
+ array([[1, 2],
+ [2, 3],
+ [3, 4]])
+
+ """
+ if not overrides.ARRAY_FUNCTION_ENABLED:
+ # raise warning if necessary
+ _arrays_for_stack_dispatcher(tup, stacklevel=2)
+
+ arrays = []
+ for v in tup:
+ arr = asanyarray(v)
+ if arr.ndim < 2:
+ arr = array(arr, copy=False, subok=True, ndmin=2).T
+ arrays.append(arr)
+ return _nx.concatenate(arrays, 1)
+
+
+def _dstack_dispatcher(tup):
+ return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_dstack_dispatcher)
+def dstack(tup):
+ """
+ Stack arrays in sequence depth wise (along third axis).
+
+ This is equivalent to concatenation along the third axis after 2-D arrays
+ of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
+ `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
+ `dsplit`.
+
+ This function makes most sense for arrays with up to 3 dimensions. For
+ instance, for pixel-data with a height (first axis), width (second axis),
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
+ `block` provide more general stacking and concatenation operations.
+
+ Parameters
+ ----------
+ tup : sequence of arrays
+ The arrays must have the same shape along all but the third axis.
+ 1-D or 2-D arrays must have the same shape.
+
+ Returns
+ -------
+ stacked : ndarray
+ The array formed by stacking the given arrays, will be at least 3-D.
+
+ See Also
+ --------
+ concatenate : Join a sequence of arrays along an existing axis.
+ stack : Join a sequence of arrays along a new axis.
+ block : Assemble an nd-array from nested lists of blocks.
+ vstack : Stack arrays in sequence vertically (row wise).
+ hstack : Stack arrays in sequence horizontally (column wise).
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
+ dsplit : Split array along third axis.
+
+ Examples
+ --------
+ >>> a = np.array((1,2,3))
+ >>> b = np.array((2,3,4))
+ >>> np.dstack((a,b))
+ array([[[1, 2],
+ [2, 3],
+ [3, 4]]])
+
+ >>> a = np.array([[1],[2],[3]])
+ >>> b = np.array([[2],[3],[4]])
+ >>> np.dstack((a,b))
+ array([[[1, 2]],
+ [[2, 3]],
+ [[3, 4]]])
+
+ """
+ if not overrides.ARRAY_FUNCTION_ENABLED:
+ # raise warning if necessary
+ _arrays_for_stack_dispatcher(tup, stacklevel=2)
+
+ arrs = atleast_3d(*tup)
+ if not isinstance(arrs, list):
+ arrs = [arrs]
+ return _nx.concatenate(arrs, 2)
+
+
+def _replace_zero_by_x_arrays(sub_arys):
+ for i in range(len(sub_arys)):
+ if _nx.ndim(sub_arys[i]) == 0:
+ sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
+ elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
+ sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
+ return sub_arys
+
+
+def _array_split_dispatcher(ary, indices_or_sections, axis=None):
+ return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_array_split_dispatcher)
+def array_split(ary, indices_or_sections, axis=0):
+ """
+ Split an array into multiple sub-arrays.
+
+ Please refer to the ``split`` documentation. The only difference
+ between these functions is that ``array_split`` allows
+ `indices_or_sections` to be an integer that does *not* equally
+ divide the axis. For an array of length l that should be split
+ into n sections, it returns l % n sub-arrays of size l//n + 1
+ and the rest of size l//n.
+
+ See Also
+ --------
+ split : Split array into multiple sub-arrays of equal size.
+
+ Examples
+ --------
+ >>> x = np.arange(8.0)
+ >>> np.array_split(x, 3)
+ [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
+
+ >>> x = np.arange(9)
+ >>> np.array_split(x, 4)
+ [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])]
+
+ """
+ try:
+ Ntotal = ary.shape[axis]
+ except AttributeError:
+ Ntotal = len(ary)
+ try:
+ # handle array case.
+ Nsections = len(indices_or_sections) + 1
+ div_points = [0] + list(indices_or_sections) + [Ntotal]
+ except TypeError:
+ # indices_or_sections is a scalar, not an array.
+ Nsections = int(indices_or_sections)
+ if Nsections <= 0:
+ raise ValueError('number sections must be larger than 0.') from None
+ Neach_section, extras = divmod(Ntotal, Nsections)
+ section_sizes = ([0] +
+ extras * [Neach_section+1] +
+ (Nsections-extras) * [Neach_section])
+ div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum()
+
+ sub_arys = []
+ sary = _nx.swapaxes(ary, axis, 0)
+ for i in range(Nsections):
+ st = div_points[i]
+ end = div_points[i + 1]
+ sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
+
+ return sub_arys
+
+
+def _split_dispatcher(ary, indices_or_sections, axis=None):
+ return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_split_dispatcher)
+def split(ary, indices_or_sections, axis=0):
+ """
+ Split an array into multiple sub-arrays as views into `ary`.
+
+ Parameters
+ ----------
+ ary : ndarray
+ Array to be divided into sub-arrays.
+ indices_or_sections : int or 1-D array
+ If `indices_or_sections` is an integer, N, the array will be divided
+ into N equal arrays along `axis`. If such a split is not possible,
+ an error is raised.
+
+ If `indices_or_sections` is a 1-D array of sorted integers, the entries
+ indicate where along `axis` the array is split. For example,
+ ``[2, 3]`` would, for ``axis=0``, result in
+
+ - ary[:2]
+ - ary[2:3]
+ - ary[3:]
+
+ If an index exceeds the dimension of the array along `axis`,
+ an empty sub-array is returned correspondingly.
+ axis : int, optional
+ The axis along which to split, default is 0.
+
+ Returns
+ -------
+ sub-arrays : list of ndarrays
+ A list of sub-arrays as views into `ary`.
+
+ Raises
+ ------
+ ValueError
+ If `indices_or_sections` is given as an integer, but
+ a split does not result in equal division.
+
+ See Also
+ --------
+ array_split : Split an array into multiple sub-arrays of equal or
+ near-equal size. Does not raise an exception if
+ an equal division cannot be made.
+ hsplit : Split array into multiple sub-arrays horizontally (column-wise).
+ vsplit : Split array into multiple sub-arrays vertically (row wise).
+ dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
+ concatenate : Join a sequence of arrays along an existing axis.
+ stack : Join a sequence of arrays along a new axis.
+ hstack : Stack arrays in sequence horizontally (column wise).
+ vstack : Stack arrays in sequence vertically (row wise).
+ dstack : Stack arrays in sequence depth wise (along third dimension).
+
+ Examples
+ --------
+ >>> x = np.arange(9.0)
+ >>> np.split(x, 3)
+ [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
+
+ >>> x = np.arange(8.0)
+ >>> np.split(x, [3, 5, 6, 10])
+ [array([0., 1., 2.]),
+ array([3., 4.]),
+ array([5.]),
+ array([6., 7.]),
+ array([], dtype=float64)]
+
+ """
+ try:
+ len(indices_or_sections)
+ except TypeError:
+ sections = indices_or_sections
+ N = ary.shape[axis]
+ if N % sections:
+ raise ValueError(
+ 'array split does not result in an equal division') from None
+ return array_split(ary, indices_or_sections, axis)
+
+
+def _hvdsplit_dispatcher(ary, indices_or_sections):
+ return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_hvdsplit_dispatcher)
+def hsplit(ary, indices_or_sections):
+ """
+ Split an array into multiple sub-arrays horizontally (column-wise).
+
+ Please refer to the `split` documentation. `hsplit` is equivalent
+ to `split` with ``axis=1``, the array is always split along the second
+ axis except for 1-D arrays, where it is split at ``axis=0``.
+
+ See Also
+ --------
+ split : Split an array into multiple sub-arrays of equal size.
+
+ Examples
+ --------
+ >>> x = np.arange(16.0).reshape(4, 4)
+ >>> x
+ array([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])
+ >>> np.hsplit(x, 2)
+ [array([[ 0., 1.],
+ [ 4., 5.],
+ [ 8., 9.],
+ [12., 13.]]),
+ array([[ 2., 3.],
+ [ 6., 7.],
+ [10., 11.],
+ [14., 15.]])]
+ >>> np.hsplit(x, np.array([3, 6]))
+ [array([[ 0., 1., 2.],
+ [ 4., 5., 6.],
+ [ 8., 9., 10.],
+ [12., 13., 14.]]),
+ array([[ 3.],
+ [ 7.],
+ [11.],
+ [15.]]),
+ array([], shape=(4, 0), dtype=float64)]
+
+ With a higher dimensional array the split is still along the second axis.
+
+ >>> x = np.arange(8.0).reshape(2, 2, 2)
+ >>> x
+ array([[[0., 1.],
+ [2., 3.]],
+ [[4., 5.],
+ [6., 7.]]])
+ >>> np.hsplit(x, 2)
+ [array([[[0., 1.]],
+ [[4., 5.]]]),
+ array([[[2., 3.]],
+ [[6., 7.]]])]
+
+ With a 1-D array, the split is along axis 0.
+
+ >>> x = np.array([0, 1, 2, 3, 4, 5])
+ >>> np.hsplit(x, 2)
+ [array([0, 1, 2]), array([3, 4, 5])]
+
+ """
+ if _nx.ndim(ary) == 0:
+ raise ValueError('hsplit only works on arrays of 1 or more dimensions')
+ if ary.ndim > 1:
+ return split(ary, indices_or_sections, 1)
+ else:
+ return split(ary, indices_or_sections, 0)
+
+
+@array_function_dispatch(_hvdsplit_dispatcher)
+def vsplit(ary, indices_or_sections):
+ """
+ Split an array into multiple sub-arrays vertically (row-wise).
+
+ Please refer to the ``split`` documentation. ``vsplit`` is equivalent
+ to ``split`` with `axis=0` (default), the array is always split along the
+ first axis regardless of the array dimension.
+
+ See Also
+ --------
+ split : Split an array into multiple sub-arrays of equal size.
+
+ Examples
+ --------
+ >>> x = np.arange(16.0).reshape(4, 4)
+ >>> x
+ array([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])
+ >>> np.vsplit(x, 2)
+ [array([[0., 1., 2., 3.],
+ [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])]
+ >>> np.vsplit(x, np.array([3, 6]))
+ [array([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)]
+
+ With a higher dimensional array the split is still along the first axis.
+
+ >>> x = np.arange(8.0).reshape(2, 2, 2)
+ >>> x
+ array([[[0., 1.],
+ [2., 3.]],
+ [[4., 5.],
+ [6., 7.]]])
+ >>> np.vsplit(x, 2)
+ [array([[[0., 1.],
+ [2., 3.]]]), array([[[4., 5.],
+ [6., 7.]]])]
+
+ """
+ if _nx.ndim(ary) < 2:
+ raise ValueError('vsplit only works on arrays of 2 or more dimensions')
+ return split(ary, indices_or_sections, 0)
+
+
+@array_function_dispatch(_hvdsplit_dispatcher)
+def dsplit(ary, indices_or_sections):
+ """
+ Split array into multiple sub-arrays along the 3rd axis (depth).
+
+ Please refer to the `split` documentation. `dsplit` is equivalent
+ to `split` with ``axis=2``, the array is always split along the third
+ axis provided the array dimension is greater than or equal to 3.
+
+ See Also
+ --------
+ split : Split an array into multiple sub-arrays of equal size.
+
+ Examples
+ --------
+ >>> x = np.arange(16.0).reshape(2, 2, 4)
+ >>> x
+ array([[[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.]],
+ [[ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]]])
+ >>> np.dsplit(x, 2)
+ [array([[[ 0., 1.],
+ [ 4., 5.]],
+ [[ 8., 9.],
+ [12., 13.]]]), array([[[ 2., 3.],
+ [ 6., 7.]],
+ [[10., 11.],
+ [14., 15.]]])]
+ >>> np.dsplit(x, np.array([3, 6]))
+ [array([[[ 0., 1., 2.],
+ [ 4., 5., 6.]],
+ [[ 8., 9., 10.],
+ [12., 13., 14.]]]),
+ array([[[ 3.],
+ [ 7.]],
+ [[11.],
+ [15.]]]),
+ array([], shape=(2, 2, 0), dtype=float64)]
+ """
+ if _nx.ndim(ary) < 3:
+ raise ValueError('dsplit only works on arrays of 3 or more dimensions')
+ return split(ary, indices_or_sections, 2)
+
+def get_array_prepare(*args):
+ """Find the wrapper for the array with the highest priority.
+
+ In case of ties, leftmost wins. If no wrapper is found, return None
+ """
+ wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
+ x.__array_prepare__) for i, x in enumerate(args)
+ if hasattr(x, '__array_prepare__'))
+ if wrappers:
+ return wrappers[-1][-1]
+ return None
+
+def get_array_wrap(*args):
+ """Find the wrapper for the array with the highest priority.
+
+ In case of ties, leftmost wins. If no wrapper is found, return None
+ """
+ wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
+ x.__array_wrap__) for i, x in enumerate(args)
+ if hasattr(x, '__array_wrap__'))
+ if wrappers:
+ return wrappers[-1][-1]
+ return None
+
+
+def _kron_dispatcher(a, b):
+ return (a, b)
+
+
+@array_function_dispatch(_kron_dispatcher)
+def kron(a, b):
+ """
+ Kronecker product of two arrays.
+
+ Computes the Kronecker product, a composite array made of blocks of the
+ second array scaled by the first.
+
+ Parameters
+ ----------
+ a, b : array_like
+
+ Returns
+ -------
+ out : ndarray
+
+ See Also
+ --------
+ outer : The outer product
+
+ Notes
+ -----
+ The function assumes that the number of dimensions of `a` and `b`
+ are the same, if necessary prepending the smallest with ones.
+ If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``,
+ the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``.
+ The elements are products of elements from `a` and `b`, organized
+ explicitly by::
+
+ kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
+
+ where::
+
+ kt = it * st + jt, t = 0,...,N
+
+ In the common 2-D case (N=1), the block structure can be visualized::
+
+ [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
+ [ ... ... ],
+ [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
+
+
+ Examples
+ --------
+ >>> np.kron([1,10,100], [5,6,7])
+ array([ 5, 6, 7, ..., 500, 600, 700])
+ >>> np.kron([5,6,7], [1,10,100])
+ array([ 5, 50, 500, ..., 7, 70, 700])
+
+ >>> np.kron(np.eye(2), np.ones((2,2)))
+ array([[1., 1., 0., 0.],
+ [1., 1., 0., 0.],
+ [0., 0., 1., 1.],
+ [0., 0., 1., 1.]])
+
+ >>> a = np.arange(100).reshape((2,5,2,5))
+ >>> b = np.arange(24).reshape((2,3,4))
+ >>> c = np.kron(a,b)
+ >>> c.shape
+ (2, 10, 6, 20)
+ >>> I = (1,3,0,2)
+ >>> J = (0,2,1)
+ >>> J1 = (0,) + J # extend to ndim=4
+ >>> S1 = (1,) + b.shape
+ >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
+ >>> c[K] == a[I]*b[J]
+ True
+
+ """
+ # Working:
+ # 1. Equalise the shapes by prepending smaller array with 1s
+ # 2. Expand shapes of both the arrays by adding new axes at
+ # odd positions for 1st array and even positions for 2nd
+ # 3. Compute the product of the modified array
+ # 4. The inner most array elements now contain the rows of
+ # the Kronecker product
+ # 5. Reshape the result to kron's shape, which is same as
+ # product of shapes of the two arrays.
+ b = asanyarray(b)
+ a = array(a, copy=False, subok=True, ndmin=b.ndim)
+ is_any_mat = isinstance(a, matrix) or isinstance(b, matrix)
+ ndb, nda = b.ndim, a.ndim
+ nd = max(ndb, nda)
+
+ if (nda == 0 or ndb == 0):
+ return _nx.multiply(a, b)
+
+ as_ = a.shape
+ bs = b.shape
+ if not a.flags.contiguous:
+ a = reshape(a, as_)
+ if not b.flags.contiguous:
+ b = reshape(b, bs)
+
+ # Equalise the shapes by prepending smaller one with 1s
+ as_ = (1,)*max(0, ndb-nda) + as_
+ bs = (1,)*max(0, nda-ndb) + bs
+
+ # Insert empty dimensions
+ a_arr = expand_dims(a, axis=tuple(range(ndb-nda)))
+ b_arr = expand_dims(b, axis=tuple(range(nda-ndb)))
+
+ # Compute the product
+ a_arr = expand_dims(a_arr, axis=tuple(range(1, nd*2, 2)))
+ b_arr = expand_dims(b_arr, axis=tuple(range(0, nd*2, 2)))
+ # In case of `mat`, convert result to `array`
+ result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat))
+
+ # Reshape back
+ result = result.reshape(_nx.multiply(as_, bs))
+
+ return result if not is_any_mat else matrix(result, copy=False)
+
+
+def _tile_dispatcher(A, reps):
+ return (A, reps)
+
+
+@array_function_dispatch(_tile_dispatcher)
+def tile(A, reps):
+ """
+ Construct an array by repeating A the number of times given by reps.
+
+ If `reps` has length ``d``, the result will have dimension of
+ ``max(d, A.ndim)``.
+
+ If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
+ axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
+ or shape (1, 1, 3) for 3-D replication. If this is not the desired
+ behavior, promote `A` to d-dimensions manually before calling this
+ function.
+
+ If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
+ Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
+ (1, 1, 2, 2).
+
+ Note : Although tile may be used for broadcasting, it is strongly
+ recommended to use numpy's broadcasting operations and functions.
+
+ Parameters
+ ----------
+ A : array_like
+ The input array.
+ reps : array_like
+ The number of repetitions of `A` along each axis.
+
+ Returns
+ -------
+ c : ndarray
+ The tiled output array.
+
+ See Also
+ --------
+ repeat : Repeat elements of an array.
+ broadcast_to : Broadcast an array to a new shape
+
+ Examples
+ --------
+ >>> a = np.array([0, 1, 2])
+ >>> np.tile(a, 2)
+ array([0, 1, 2, 0, 1, 2])
+ >>> np.tile(a, (2, 2))
+ array([[0, 1, 2, 0, 1, 2],
+ [0, 1, 2, 0, 1, 2]])
+ >>> np.tile(a, (2, 1, 2))
+ array([[[0, 1, 2, 0, 1, 2]],
+ [[0, 1, 2, 0, 1, 2]]])
+
+ >>> b = np.array([[1, 2], [3, 4]])
+ >>> np.tile(b, 2)
+ array([[1, 2, 1, 2],
+ [3, 4, 3, 4]])
+ >>> np.tile(b, (2, 1))
+ array([[1, 2],
+ [3, 4],
+ [1, 2],
+ [3, 4]])
+
+ >>> c = np.array([1,2,3,4])
+ >>> np.tile(c,(4,1))
+ array([[1, 2, 3, 4],
+ [1, 2, 3, 4],
+ [1, 2, 3, 4],
+ [1, 2, 3, 4]])
+ """
+ try:
+ tup = tuple(reps)
+ except TypeError:
+ tup = (reps,)
+ d = len(tup)
+ if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
+ # Fixes the problem that the function does not make a copy if A is a
+ # numpy array and the repetitions are 1 in all dimensions
+ return _nx.array(A, copy=True, subok=True, ndmin=d)
+ else:
+ # Note that no copy of zero-sized arrays is made. However since they
+ # have no data there is no risk of an inadvertent overwrite.
+ c = _nx.array(A, copy=False, subok=True, ndmin=d)
+ if (d < c.ndim):
+ tup = (1,)*(c.ndim-d) + tup
+ shape_out = tuple(s*t for s, t in zip(c.shape, tup))
+ n = c.size
+ if n > 0:
+ for dim_in, nrep in zip(c.shape, tup):
+ if nrep != 1:
+ c = c.reshape(-1, n).repeat(nrep, 0)
+ n //= dim_in
+ return c.reshape(shape_out)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/shape_base.pyi b/venv/lib/python3.9/site-packages/numpy/lib/shape_base.pyi
new file mode 100644
index 00000000..1b718da2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/shape_base.pyi
@@ -0,0 +1,215 @@
+from collections.abc import Callable, Sequence
+from typing import TypeVar, Any, overload, SupportsIndex, Protocol
+
+from numpy import (
+ generic,
+ integer,
+ ufunc,
+ bool_,
+ unsignedinteger,
+ signedinteger,
+ floating,
+ complexfloating,
+ object_,
+)
+
+from numpy._typing import (
+ ArrayLike,
+ NDArray,
+ _ShapeLike,
+ _ArrayLike,
+ _ArrayLikeBool_co,
+ _ArrayLikeUInt_co,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeObject_co,
+)
+
+from numpy.core.shape_base import vstack
+
+_SCT = TypeVar("_SCT", bound=generic)
+
+# The signatures of `__array_wrap__` and `__array_prepare__` are the same;
+# give them unique names for the sake of clarity
+class _ArrayWrap(Protocol):
+ def __call__(
+ self,
+ array: NDArray[Any],
+ context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
+ /,
+ ) -> Any: ...
+
+class _ArrayPrepare(Protocol):
+ def __call__(
+ self,
+ array: NDArray[Any],
+ context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
+ /,
+ ) -> Any: ...
+
+class _SupportsArrayWrap(Protocol):
+ @property
+ def __array_wrap__(self) -> _ArrayWrap: ...
+
+class _SupportsArrayPrepare(Protocol):
+ @property
+ def __array_prepare__(self) -> _ArrayPrepare: ...
+
+__all__: list[str]
+
+row_stack = vstack
+
+def take_along_axis(
+ arr: _SCT | NDArray[_SCT],
+ indices: NDArray[integer[Any]],
+ axis: None | int,
+) -> NDArray[_SCT]: ...
+
+def put_along_axis(
+ arr: NDArray[_SCT],
+ indices: NDArray[integer[Any]],
+ values: ArrayLike,
+ axis: None | int,
+) -> None: ...
+
+# TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate`
+# xref python/mypy#8645
+@overload
+def apply_along_axis(
+ func1d: Callable[..., _ArrayLike[_SCT]],
+ axis: SupportsIndex,
+ arr: ArrayLike,
+ *args: Any,
+ **kwargs: Any,
+) -> NDArray[_SCT]: ...
+@overload
+def apply_along_axis(
+ func1d: Callable[..., ArrayLike],
+ axis: SupportsIndex,
+ arr: ArrayLike,
+ *args: Any,
+ **kwargs: Any,
+) -> NDArray[Any]: ...
+
+def apply_over_axes(
+ func: Callable[[NDArray[Any], int], NDArray[_SCT]],
+ a: ArrayLike,
+ axes: int | Sequence[int],
+) -> NDArray[_SCT]: ...
+
+@overload
+def expand_dims(
+ a: _ArrayLike[_SCT],
+ axis: _ShapeLike,
+) -> NDArray[_SCT]: ...
+@overload
+def expand_dims(
+ a: ArrayLike,
+ axis: _ShapeLike,
+) -> NDArray[Any]: ...
+
+@overload
+def column_stack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
+@overload
+def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
+
+@overload
+def dstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
+@overload
+def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
+
+@overload
+def array_split(
+ ary: _ArrayLike[_SCT],
+ indices_or_sections: _ShapeLike,
+ axis: SupportsIndex = ...,
+) -> list[NDArray[_SCT]]: ...
+@overload
+def array_split(
+ ary: ArrayLike,
+ indices_or_sections: _ShapeLike,
+ axis: SupportsIndex = ...,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def split(
+ ary: _ArrayLike[_SCT],
+ indices_or_sections: _ShapeLike,
+ axis: SupportsIndex = ...,
+) -> list[NDArray[_SCT]]: ...
+@overload
+def split(
+ ary: ArrayLike,
+ indices_or_sections: _ShapeLike,
+ axis: SupportsIndex = ...,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def hsplit(
+ ary: _ArrayLike[_SCT],
+ indices_or_sections: _ShapeLike,
+) -> list[NDArray[_SCT]]: ...
+@overload
+def hsplit(
+ ary: ArrayLike,
+ indices_or_sections: _ShapeLike,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def vsplit(
+ ary: _ArrayLike[_SCT],
+ indices_or_sections: _ShapeLike,
+) -> list[NDArray[_SCT]]: ...
+@overload
+def vsplit(
+ ary: ArrayLike,
+ indices_or_sections: _ShapeLike,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def dsplit(
+ ary: _ArrayLike[_SCT],
+ indices_or_sections: _ShapeLike,
+) -> list[NDArray[_SCT]]: ...
+@overload
+def dsplit(
+ ary: ArrayLike,
+ indices_or_sections: _ShapeLike,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def get_array_prepare(*args: _SupportsArrayPrepare) -> _ArrayPrepare: ...
+@overload
+def get_array_prepare(*args: object) -> None | _ArrayPrepare: ...
+
+@overload
+def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ...
+@overload
+def get_array_wrap(*args: object) -> None | _ArrayWrap: ...
+
+@overload
+def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
+@overload
+def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
+@overload
+def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
+@overload
+def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
+@overload
+def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ...
+@overload
+def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ...
+
+@overload
+def tile(
+ A: _ArrayLike[_SCT],
+ reps: int | Sequence[int],
+) -> NDArray[_SCT]: ...
+@overload
+def tile(
+ A: ArrayLike,
+ reps: int | Sequence[int],
+) -> NDArray[Any]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/stride_tricks.py b/venv/lib/python3.9/site-packages/numpy/lib/stride_tricks.py
new file mode 100644
index 00000000..6794ad55
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/stride_tricks.py
@@ -0,0 +1,547 @@
+"""
+Utilities that manipulate strides to achieve desirable effects.
+
+An explanation of strides can be found in the "ndarray.rst" file in the
+NumPy reference guide.
+
+"""
+import numpy as np
+from numpy.core.numeric import normalize_axis_tuple
+from numpy.core.overrides import array_function_dispatch, set_module
+
+__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes']
+
+
+class DummyArray:
+ """Dummy object that just exists to hang __array_interface__ dictionaries
+ and possibly keep alive a reference to a base array.
+ """
+
+ def __init__(self, interface, base=None):
+ self.__array_interface__ = interface
+ self.base = base
+
+
+def _maybe_view_as_subclass(original_array, new_array):
+ if type(original_array) is not type(new_array):
+ # if input was an ndarray subclass and subclasses were OK,
+ # then view the result as that subclass.
+ new_array = new_array.view(type=type(original_array))
+ # Since we have done something akin to a view from original_array, we
+ # should let the subclass finalize (if it has it implemented, i.e., is
+ # not None).
+ if new_array.__array_finalize__:
+ new_array.__array_finalize__(original_array)
+ return new_array
+
+
+def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
+ """
+ Create a view into the array with the given shape and strides.
+
+ .. warning:: This function has to be used with extreme care, see notes.
+
+ Parameters
+ ----------
+ x : ndarray
+ Array to create a new.
+ shape : sequence of int, optional
+ The shape of the new array. Defaults to ``x.shape``.
+ strides : sequence of int, optional
+ The strides of the new array. Defaults to ``x.strides``.
+ subok : bool, optional
+ .. versionadded:: 1.10
+
+ If True, subclasses are preserved.
+ writeable : bool, optional
+ .. versionadded:: 1.12
+
+ If set to False, the returned array will always be readonly.
+ Otherwise it will be writable if the original array was. It
+ is advisable to set this to False if possible (see Notes).
+
+ Returns
+ -------
+ view : ndarray
+
+ See also
+ --------
+ broadcast_to : broadcast an array to a given shape.
+ reshape : reshape an array.
+ lib.stride_tricks.sliding_window_view :
+ userfriendly and safe function for the creation of sliding window views.
+
+ Notes
+ -----
+ ``as_strided`` creates a view into the array given the exact strides
+ and shape. This means it manipulates the internal data structure of
+ ndarray and, if done incorrectly, the array elements can point to
+ invalid memory and can corrupt results or crash your program.
+ It is advisable to always use the original ``x.strides`` when
+ calculating new strides to avoid reliance on a contiguous memory
+ layout.
+
+ Furthermore, arrays created with this function often contain self
+ overlapping memory, so that two elements are identical.
+ Vectorized write operations on such arrays will typically be
+ unpredictable. They may even give different results for small, large,
+ or transposed arrays.
+
+ Since writing to these arrays has to be tested and done with great
+ care, you may want to use ``writeable=False`` to avoid accidental write
+ operations.
+
+ For these reasons it is advisable to avoid ``as_strided`` when
+ possible.
+ """
+ # first convert input to array, possibly keeping subclass
+ x = np.array(x, copy=False, subok=subok)
+ interface = dict(x.__array_interface__)
+ if shape is not None:
+ interface['shape'] = tuple(shape)
+ if strides is not None:
+ interface['strides'] = tuple(strides)
+
+ array = np.asarray(DummyArray(interface, base=x))
+ # The route via `__interface__` does not preserve structured
+ # dtypes. Since dtype should remain unchanged, we set it explicitly.
+ array.dtype = x.dtype
+
+ view = _maybe_view_as_subclass(x, array)
+
+ if view.flags.writeable and not writeable:
+ view.flags.writeable = False
+
+ return view
+
+
+def _sliding_window_view_dispatcher(x, window_shape, axis=None, *,
+ subok=None, writeable=None):
+ return (x,)
+
+
+@array_function_dispatch(_sliding_window_view_dispatcher)
+def sliding_window_view(x, window_shape, axis=None, *,
+ subok=False, writeable=False):
+ """
+ Create a sliding window view into the array with the given window shape.
+
+ Also known as rolling or moving window, the window slides across all
+ dimensions of the array and extracts subsets of the array at all window
+ positions.
+
+ .. versionadded:: 1.20.0
+
+ Parameters
+ ----------
+ x : array_like
+ Array to create the sliding window view from.
+ window_shape : int or tuple of int
+ Size of window over each axis that takes part in the sliding window.
+ If `axis` is not present, must have same length as the number of input
+ array dimensions. Single integers `i` are treated as if they were the
+ tuple `(i,)`.
+ axis : int or tuple of int, optional
+ Axis or axes along which the sliding window is applied.
+ By default, the sliding window is applied to all axes and
+ `window_shape[i]` will refer to axis `i` of `x`.
+ If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to
+ the axis `axis[i]` of `x`.
+ Single integers `i` are treated as if they were the tuple `(i,)`.
+ subok : bool, optional
+ If True, sub-classes will be passed-through, otherwise the returned
+ array will be forced to be a base-class array (default).
+ writeable : bool, optional
+ When true, allow writing to the returned view. The default is false,
+ as this should be used with caution: the returned view contains the
+ same memory location multiple times, so writing to one location will
+ cause others to change.
+
+ Returns
+ -------
+ view : ndarray
+ Sliding window view of the array. The sliding window dimensions are
+ inserted at the end, and the original dimensions are trimmed as
+ required by the size of the sliding window.
+ That is, ``view.shape = x_shape_trimmed + window_shape``, where
+ ``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less
+ than the corresponding window size.
+
+ See Also
+ --------
+ lib.stride_tricks.as_strided: A lower-level and less safe routine for
+ creating arbitrary views from custom shape and strides.
+ broadcast_to: broadcast an array to a given shape.
+
+ Notes
+ -----
+ For many applications using a sliding window view can be convenient, but
+ potentially very slow. Often specialized solutions exist, for example:
+
+ - `scipy.signal.fftconvolve`
+
+ - filtering functions in `scipy.ndimage`
+
+ - moving window functions provided by
+ `bottleneck <https://github.com/pydata/bottleneck>`_.
+
+ As a rough estimate, a sliding window approach with an input size of `N`
+ and a window size of `W` will scale as `O(N*W)` where frequently a special
+ algorithm can achieve `O(N)`. That means that the sliding window variant
+ for a window size of 100 can be a 100 times slower than a more specialized
+ version.
+
+ Nevertheless, for small window sizes, when no custom algorithm exists, or
+ as a prototyping and developing tool, this function can be a good solution.
+
+ Examples
+ --------
+ >>> x = np.arange(6)
+ >>> x.shape
+ (6,)
+ >>> v = sliding_window_view(x, 3)
+ >>> v.shape
+ (4, 3)
+ >>> v
+ array([[0, 1, 2],
+ [1, 2, 3],
+ [2, 3, 4],
+ [3, 4, 5]])
+
+ This also works in more dimensions, e.g.
+
+ >>> i, j = np.ogrid[:3, :4]
+ >>> x = 10*i + j
+ >>> x.shape
+ (3, 4)
+ >>> x
+ array([[ 0, 1, 2, 3],
+ [10, 11, 12, 13],
+ [20, 21, 22, 23]])
+ >>> shape = (2,2)
+ >>> v = sliding_window_view(x, shape)
+ >>> v.shape
+ (2, 3, 2, 2)
+ >>> v
+ array([[[[ 0, 1],
+ [10, 11]],
+ [[ 1, 2],
+ [11, 12]],
+ [[ 2, 3],
+ [12, 13]]],
+ [[[10, 11],
+ [20, 21]],
+ [[11, 12],
+ [21, 22]],
+ [[12, 13],
+ [22, 23]]]])
+
+ The axis can be specified explicitly:
+
+ >>> v = sliding_window_view(x, 3, 0)
+ >>> v.shape
+ (1, 4, 3)
+ >>> v
+ array([[[ 0, 10, 20],
+ [ 1, 11, 21],
+ [ 2, 12, 22],
+ [ 3, 13, 23]]])
+
+ The same axis can be used several times. In that case, every use reduces
+ the corresponding original dimension:
+
+ >>> v = sliding_window_view(x, (2, 3), (1, 1))
+ >>> v.shape
+ (3, 1, 2, 3)
+ >>> v
+ array([[[[ 0, 1, 2],
+ [ 1, 2, 3]]],
+ [[[10, 11, 12],
+ [11, 12, 13]]],
+ [[[20, 21, 22],
+ [21, 22, 23]]]])
+
+ Combining with stepped slicing (`::step`), this can be used to take sliding
+ views which skip elements:
+
+ >>> x = np.arange(7)
+ >>> sliding_window_view(x, 5)[:, ::2]
+ array([[0, 2, 4],
+ [1, 3, 5],
+ [2, 4, 6]])
+
+ or views which move by multiple elements
+
+ >>> x = np.arange(7)
+ >>> sliding_window_view(x, 3)[::2, :]
+ array([[0, 1, 2],
+ [2, 3, 4],
+ [4, 5, 6]])
+
+ A common application of `sliding_window_view` is the calculation of running
+ statistics. The simplest example is the
+ `moving average <https://en.wikipedia.org/wiki/Moving_average>`_:
+
+ >>> x = np.arange(6)
+ >>> x.shape
+ (6,)
+ >>> v = sliding_window_view(x, 3)
+ >>> v.shape
+ (4, 3)
+ >>> v
+ array([[0, 1, 2],
+ [1, 2, 3],
+ [2, 3, 4],
+ [3, 4, 5]])
+ >>> moving_average = v.mean(axis=-1)
+ >>> moving_average
+ array([1., 2., 3., 4.])
+
+ Note that a sliding window approach is often **not** optimal (see Notes).
+ """
+ window_shape = (tuple(window_shape)
+ if np.iterable(window_shape)
+ else (window_shape,))
+ # first convert input to array, possibly keeping subclass
+ x = np.array(x, copy=False, subok=subok)
+
+ window_shape_array = np.array(window_shape)
+ if np.any(window_shape_array < 0):
+ raise ValueError('`window_shape` cannot contain negative values')
+
+ if axis is None:
+ axis = tuple(range(x.ndim))
+ if len(window_shape) != len(axis):
+ raise ValueError(f'Since axis is `None`, must provide '
+ f'window_shape for all dimensions of `x`; '
+ f'got {len(window_shape)} window_shape elements '
+ f'and `x.ndim` is {x.ndim}.')
+ else:
+ axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)
+ if len(window_shape) != len(axis):
+ raise ValueError(f'Must provide matching length window_shape and '
+ f'axis; got {len(window_shape)} window_shape '
+ f'elements and {len(axis)} axes elements.')
+
+ out_strides = x.strides + tuple(x.strides[ax] for ax in axis)
+
+ # note: same axis can be windowed repeatedly
+ x_shape_trimmed = list(x.shape)
+ for ax, dim in zip(axis, window_shape):
+ if x_shape_trimmed[ax] < dim:
+ raise ValueError(
+ 'window shape cannot be larger than input array shape')
+ x_shape_trimmed[ax] -= dim - 1
+ out_shape = tuple(x_shape_trimmed) + window_shape
+ return as_strided(x, strides=out_strides, shape=out_shape,
+ subok=subok, writeable=writeable)
+
+
+def _broadcast_to(array, shape, subok, readonly):
+ shape = tuple(shape) if np.iterable(shape) else (shape,)
+ array = np.array(array, copy=False, subok=subok)
+ if not shape and array.shape:
+ raise ValueError('cannot broadcast a non-scalar to a scalar array')
+ if any(size < 0 for size in shape):
+ raise ValueError('all elements of broadcast shape must be non-'
+ 'negative')
+ extras = []
+ it = np.nditer(
+ (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
+ op_flags=['readonly'], itershape=shape, order='C')
+ with it:
+ # never really has writebackifcopy semantics
+ broadcast = it.itviews[0]
+ result = _maybe_view_as_subclass(array, broadcast)
+ # In a future version this will go away
+ if not readonly and array.flags._writeable_no_warn:
+ result.flags.writeable = True
+ result.flags._warn_on_write = True
+ return result
+
+
+def _broadcast_to_dispatcher(array, shape, subok=None):
+ return (array,)
+
+
+@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')
+def broadcast_to(array, shape, subok=False):
+ """Broadcast an array to a new shape.
+
+ Parameters
+ ----------
+ array : array_like
+ The array to broadcast.
+ shape : tuple or int
+ The shape of the desired array. A single integer ``i`` is interpreted
+ as ``(i,)``.
+ subok : bool, optional
+ If True, then sub-classes will be passed-through, otherwise
+ the returned array will be forced to be a base-class array (default).
+
+ Returns
+ -------
+ broadcast : array
+ A readonly view on the original array with the given shape. It is
+ typically not contiguous. Furthermore, more than one element of a
+ broadcasted array may refer to a single memory location.
+
+ Raises
+ ------
+ ValueError
+ If the array is not compatible with the new shape according to NumPy's
+ broadcasting rules.
+
+ See Also
+ --------
+ broadcast
+ broadcast_arrays
+ broadcast_shapes
+
+ Notes
+ -----
+ .. versionadded:: 1.10.0
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> np.broadcast_to(x, (3, 3))
+ array([[1, 2, 3],
+ [1, 2, 3],
+ [1, 2, 3]])
+ """
+ return _broadcast_to(array, shape, subok=subok, readonly=True)
+
+
+def _broadcast_shape(*args):
+ """Returns the shape of the arrays that would result from broadcasting the
+ supplied arrays against each other.
+ """
+ # use the old-iterator because np.nditer does not handle size 0 arrays
+ # consistently
+ b = np.broadcast(*args[:32])
+ # unfortunately, it cannot handle 32 or more arguments directly
+ for pos in range(32, len(args), 31):
+ # ironically, np.broadcast does not properly handle np.broadcast
+ # objects (it treats them as scalars)
+ # use broadcasting to avoid allocating the full array
+ b = broadcast_to(0, b.shape)
+ b = np.broadcast(b, *args[pos:(pos + 31)])
+ return b.shape
+
+
+@set_module('numpy')
+def broadcast_shapes(*args):
+ """
+ Broadcast the input shapes into a single shape.
+
+ :ref:`Learn more about broadcasting here <basics.broadcasting>`.
+
+ .. versionadded:: 1.20.0
+
+ Parameters
+ ----------
+ `*args` : tuples of ints, or ints
+ The shapes to be broadcast against each other.
+
+ Returns
+ -------
+ tuple
+ Broadcasted shape.
+
+ Raises
+ ------
+ ValueError
+ If the shapes are not compatible and cannot be broadcast according
+ to NumPy's broadcasting rules.
+
+ See Also
+ --------
+ broadcast
+ broadcast_arrays
+ broadcast_to
+
+ Examples
+ --------
+ >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2))
+ (3, 2)
+
+ >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7))
+ (5, 6, 7)
+ """
+ arrays = [np.empty(x, dtype=[]) for x in args]
+ return _broadcast_shape(*arrays)
+
+
+def _broadcast_arrays_dispatcher(*args, subok=None):
+ return args
+
+
+@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
+def broadcast_arrays(*args, subok=False):
+ """
+ Broadcast any number of arrays against each other.
+
+ Parameters
+ ----------
+ `*args` : array_likes
+ The arrays to broadcast.
+
+ subok : bool, optional
+ If True, then sub-classes will be passed-through, otherwise
+ the returned arrays will be forced to be a base-class array (default).
+
+ Returns
+ -------
+ broadcasted : list of arrays
+ These arrays are views on the original arrays. They are typically
+ not contiguous. Furthermore, more than one element of a
+ broadcasted array may refer to a single memory location. If you need
+ to write to the arrays, make copies first. While you can set the
+ ``writable`` flag True, writing to a single output value may end up
+ changing more than one location in the output array.
+
+ .. deprecated:: 1.17
+ The output is currently marked so that if written to, a deprecation
+ warning will be emitted. A future version will set the
+ ``writable`` flag False so writing to it will raise an error.
+
+ See Also
+ --------
+ broadcast
+ broadcast_to
+ broadcast_shapes
+
+ Examples
+ --------
+ >>> x = np.array([[1,2,3]])
+ >>> y = np.array([[4],[5]])
+ >>> np.broadcast_arrays(x, y)
+ [array([[1, 2, 3],
+ [1, 2, 3]]), array([[4, 4, 4],
+ [5, 5, 5]])]
+
+ Here is a useful idiom for getting contiguous copies instead of
+ non-contiguous views.
+
+ >>> [np.array(a) for a in np.broadcast_arrays(x, y)]
+ [array([[1, 2, 3],
+ [1, 2, 3]]), array([[4, 4, 4],
+ [5, 5, 5]])]
+
+ """
+ # nditer is not used here to avoid the limit of 32 arrays.
+ # Otherwise, something like the following one-liner would suffice:
+ # return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
+ # order='C').itviews
+
+ args = [np.array(_m, copy=False, subok=subok) for _m in args]
+
+ shape = _broadcast_shape(*args)
+
+ if all(array.shape == shape for array in args):
+ # Common case where nothing needs to be broadcasted.
+ return args
+
+ return [_broadcast_to(array, shape, subok=subok, readonly=False)
+ for array in args]
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/stride_tricks.pyi b/venv/lib/python3.9/site-packages/numpy/lib/stride_tricks.pyi
new file mode 100644
index 00000000..4c9a98e8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/stride_tricks.pyi
@@ -0,0 +1,80 @@
+from collections.abc import Iterable
+from typing import Any, TypeVar, overload, SupportsIndex
+
+from numpy import generic
+from numpy._typing import (
+ NDArray,
+ ArrayLike,
+ _ShapeLike,
+ _Shape,
+ _ArrayLike
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+
+__all__: list[str]
+
+class DummyArray:
+ __array_interface__: dict[str, Any]
+ base: None | NDArray[Any]
+ def __init__(
+ self,
+ interface: dict[str, Any],
+ base: None | NDArray[Any] = ...,
+ ) -> None: ...
+
+@overload
+def as_strided(
+ x: _ArrayLike[_SCT],
+ shape: None | Iterable[int] = ...,
+ strides: None | Iterable[int] = ...,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def as_strided(
+ x: ArrayLike,
+ shape: None | Iterable[int] = ...,
+ strides: None | Iterable[int] = ...,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def sliding_window_view(
+ x: _ArrayLike[_SCT],
+ window_shape: int | Iterable[int],
+ axis: None | SupportsIndex = ...,
+ *,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def sliding_window_view(
+ x: ArrayLike,
+ window_shape: int | Iterable[int],
+ axis: None | SupportsIndex = ...,
+ *,
+ subok: bool = ...,
+ writeable: bool = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def broadcast_to(
+ array: _ArrayLike[_SCT],
+ shape: int | Iterable[int],
+ subok: bool = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def broadcast_to(
+ array: ArrayLike,
+ shape: int | Iterable[int],
+ subok: bool = ...,
+) -> NDArray[Any]: ...
+
+def broadcast_shapes(*args: _ShapeLike) -> _Shape: ...
+
+def broadcast_arrays(
+ *args: ArrayLike,
+ subok: bool = ...,
+) -> list[NDArray[Any]]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/data/py2-objarr.npy b/venv/lib/python3.9/site-packages/numpy/lib/tests/data/py2-objarr.npy
new file mode 100644
index 00000000..12936c92
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/data/py2-objarr.npy
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/data/py2-objarr.npz b/venv/lib/python3.9/site-packages/numpy/lib/tests/data/py2-objarr.npz
new file mode 100644
index 00000000..68a3b53a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/data/py2-objarr.npz
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/data/py3-objarr.npy b/venv/lib/python3.9/site-packages/numpy/lib/tests/data/py3-objarr.npy
new file mode 100644
index 00000000..6776074b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/data/py3-objarr.npy
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/data/py3-objarr.npz b/venv/lib/python3.9/site-packages/numpy/lib/tests/data/py3-objarr.npz
new file mode 100644
index 00000000..05eac0b7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/data/py3-objarr.npz
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/data/python3.npy b/venv/lib/python3.9/site-packages/numpy/lib/tests/data/python3.npy
new file mode 100644
index 00000000..7c6997dd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/data/python3.npy
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/data/win64python2.npy b/venv/lib/python3.9/site-packages/numpy/lib/tests/data/win64python2.npy
new file mode 100644
index 00000000..d9bc36af
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/data/win64python2.npy
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test__datasource.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test__datasource.py
new file mode 100644
index 00000000..c8149abc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test__datasource.py
@@ -0,0 +1,350 @@
+import os
+import pytest
+from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
+from shutil import rmtree
+
+import numpy.lib._datasource as datasource
+from numpy.testing import assert_, assert_equal, assert_raises
+
+import urllib.request as urllib_request
+from urllib.parse import urlparse
+from urllib.error import URLError
+
+
+def urlopen_stub(url, data=None):
+ '''Stub to replace urlopen for testing.'''
+ if url == valid_httpurl():
+ tmpfile = NamedTemporaryFile(prefix='urltmp_')
+ return tmpfile
+ else:
+ raise URLError('Name or service not known')
+
+# setup and teardown
+old_urlopen = None
+
+
+def setup_module():
+ global old_urlopen
+
+ old_urlopen = urllib_request.urlopen
+ urllib_request.urlopen = urlopen_stub
+
+
+def teardown_module():
+ urllib_request.urlopen = old_urlopen
+
+# A valid website for more robust testing
+http_path = 'http://www.google.com/'
+http_file = 'index.html'
+
+http_fakepath = 'http://fake.abc.web/site/'
+http_fakefile = 'fake.txt'
+
+malicious_files = ['/etc/shadow', '../../shadow',
+ '..\\system.dat', 'c:\\windows\\system.dat']
+
+magic_line = b'three is the magic number'
+
+
+# Utility functions used by many tests
+def valid_textfile(filedir):
+ # Generate and return a valid temporary file.
+ fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True)
+ os.close(fd)
+ return path
+
+
+def invalid_textfile(filedir):
+ # Generate and return an invalid filename.
+ fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir)
+ os.close(fd)
+ os.remove(path)
+ return path
+
+
+def valid_httpurl():
+ return http_path+http_file
+
+
+def invalid_httpurl():
+ return http_fakepath+http_fakefile
+
+
+def valid_baseurl():
+ return http_path
+
+
+def invalid_baseurl():
+ return http_fakepath
+
+
+def valid_httpfile():
+ return http_file
+
+
+def invalid_httpfile():
+ return http_fakefile
+
+
+class TestDataSourceOpen:
+ def setup_method(self):
+ self.tmpdir = mkdtemp()
+ self.ds = datasource.DataSource(self.tmpdir)
+
+ def teardown_method(self):
+ rmtree(self.tmpdir)
+ del self.ds
+
+ def test_ValidHTTP(self):
+ fh = self.ds.open(valid_httpurl())
+ assert_(fh)
+ fh.close()
+
+ def test_InvalidHTTP(self):
+ url = invalid_httpurl()
+ assert_raises(OSError, self.ds.open, url)
+ try:
+ self.ds.open(url)
+ except OSError as e:
+ # Regression test for bug fixed in r4342.
+ assert_(e.errno is None)
+
+ def test_InvalidHTTPCacheURLError(self):
+ assert_raises(URLError, self.ds._cache, invalid_httpurl())
+
+ def test_ValidFile(self):
+ local_file = valid_textfile(self.tmpdir)
+ fh = self.ds.open(local_file)
+ assert_(fh)
+ fh.close()
+
+ def test_InvalidFile(self):
+ invalid_file = invalid_textfile(self.tmpdir)
+ assert_raises(OSError, self.ds.open, invalid_file)
+
+ def test_ValidGzipFile(self):
+ try:
+ import gzip
+ except ImportError:
+ # We don't have the gzip capabilities to test.
+ pytest.skip()
+ # Test datasource's internal file_opener for Gzip files.
+ filepath = os.path.join(self.tmpdir, 'foobar.txt.gz')
+ fp = gzip.open(filepath, 'w')
+ fp.write(magic_line)
+ fp.close()
+ fp = self.ds.open(filepath)
+ result = fp.readline()
+ fp.close()
+ assert_equal(magic_line, result)
+
+ def test_ValidBz2File(self):
+ try:
+ import bz2
+ except ImportError:
+ # We don't have the bz2 capabilities to test.
+ pytest.skip()
+ # Test datasource's internal file_opener for BZip2 files.
+ filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
+ fp = bz2.BZ2File(filepath, 'w')
+ fp.write(magic_line)
+ fp.close()
+ fp = self.ds.open(filepath)
+ result = fp.readline()
+ fp.close()
+ assert_equal(magic_line, result)
+
+
+class TestDataSourceExists:
+ def setup_method(self):
+ self.tmpdir = mkdtemp()
+ self.ds = datasource.DataSource(self.tmpdir)
+
+ def teardown_method(self):
+ rmtree(self.tmpdir)
+ del self.ds
+
+ def test_ValidHTTP(self):
+ assert_(self.ds.exists(valid_httpurl()))
+
+ def test_InvalidHTTP(self):
+ assert_equal(self.ds.exists(invalid_httpurl()), False)
+
+ def test_ValidFile(self):
+ # Test valid file in destpath
+ tmpfile = valid_textfile(self.tmpdir)
+ assert_(self.ds.exists(tmpfile))
+ # Test valid local file not in destpath
+ localdir = mkdtemp()
+ tmpfile = valid_textfile(localdir)
+ assert_(self.ds.exists(tmpfile))
+ rmtree(localdir)
+
+ def test_InvalidFile(self):
+ tmpfile = invalid_textfile(self.tmpdir)
+ assert_equal(self.ds.exists(tmpfile), False)
+
+
+class TestDataSourceAbspath:
+ def setup_method(self):
+ self.tmpdir = os.path.abspath(mkdtemp())
+ self.ds = datasource.DataSource(self.tmpdir)
+
+ def teardown_method(self):
+ rmtree(self.tmpdir)
+ del self.ds
+
+ def test_ValidHTTP(self):
+ scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
+ local_path = os.path.join(self.tmpdir, netloc,
+ upath.strip(os.sep).strip('/'))
+ assert_equal(local_path, self.ds.abspath(valid_httpurl()))
+
+ def test_ValidFile(self):
+ tmpfile = valid_textfile(self.tmpdir)
+ tmpfilename = os.path.split(tmpfile)[-1]
+ # Test with filename only
+ assert_equal(tmpfile, self.ds.abspath(tmpfilename))
+ # Test filename with complete path
+ assert_equal(tmpfile, self.ds.abspath(tmpfile))
+
+ def test_InvalidHTTP(self):
+ scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl())
+ invalidhttp = os.path.join(self.tmpdir, netloc,
+ upath.strip(os.sep).strip('/'))
+ assert_(invalidhttp != self.ds.abspath(valid_httpurl()))
+
+ def test_InvalidFile(self):
+ invalidfile = valid_textfile(self.tmpdir)
+ tmpfile = valid_textfile(self.tmpdir)
+ tmpfilename = os.path.split(tmpfile)[-1]
+ # Test with filename only
+ assert_(invalidfile != self.ds.abspath(tmpfilename))
+ # Test filename with complete path
+ assert_(invalidfile != self.ds.abspath(tmpfile))
+
+ def test_sandboxing(self):
+ tmpfile = valid_textfile(self.tmpdir)
+ tmpfilename = os.path.split(tmpfile)[-1]
+
+ tmp_path = lambda x: os.path.abspath(self.ds.abspath(x))
+
+ assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir))
+ assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir))
+ assert_(tmp_path(tmpfile).startswith(self.tmpdir))
+ assert_(tmp_path(tmpfilename).startswith(self.tmpdir))
+ for fn in malicious_files:
+ assert_(tmp_path(http_path+fn).startswith(self.tmpdir))
+ assert_(tmp_path(fn).startswith(self.tmpdir))
+
+ def test_windows_os_sep(self):
+ orig_os_sep = os.sep
+ try:
+ os.sep = '\\'
+ self.test_ValidHTTP()
+ self.test_ValidFile()
+ self.test_InvalidHTTP()
+ self.test_InvalidFile()
+ self.test_sandboxing()
+ finally:
+ os.sep = orig_os_sep
+
+
+class TestRepositoryAbspath:
+ def setup_method(self):
+ self.tmpdir = os.path.abspath(mkdtemp())
+ self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
+
+ def teardown_method(self):
+ rmtree(self.tmpdir)
+ del self.repos
+
+ def test_ValidHTTP(self):
+ scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
+ local_path = os.path.join(self.repos._destpath, netloc,
+ upath.strip(os.sep).strip('/'))
+ filepath = self.repos.abspath(valid_httpfile())
+ assert_equal(local_path, filepath)
+
+ def test_sandboxing(self):
+ tmp_path = lambda x: os.path.abspath(self.repos.abspath(x))
+ assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir))
+ for fn in malicious_files:
+ assert_(tmp_path(http_path+fn).startswith(self.tmpdir))
+ assert_(tmp_path(fn).startswith(self.tmpdir))
+
+ def test_windows_os_sep(self):
+ orig_os_sep = os.sep
+ try:
+ os.sep = '\\'
+ self.test_ValidHTTP()
+ self.test_sandboxing()
+ finally:
+ os.sep = orig_os_sep
+
+
+class TestRepositoryExists:
+ def setup_method(self):
+ self.tmpdir = mkdtemp()
+ self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
+
+ def teardown_method(self):
+ rmtree(self.tmpdir)
+ del self.repos
+
+ def test_ValidFile(self):
+ # Create local temp file
+ tmpfile = valid_textfile(self.tmpdir)
+ assert_(self.repos.exists(tmpfile))
+
+ def test_InvalidFile(self):
+ tmpfile = invalid_textfile(self.tmpdir)
+ assert_equal(self.repos.exists(tmpfile), False)
+
+ def test_RemoveHTTPFile(self):
+ assert_(self.repos.exists(valid_httpurl()))
+
+ def test_CachedHTTPFile(self):
+ localfile = valid_httpurl()
+ # Create a locally cached temp file with an URL based
+ # directory structure. This is similar to what Repository.open
+ # would do.
+ scheme, netloc, upath, pms, qry, frg = urlparse(localfile)
+ local_path = os.path.join(self.repos._destpath, netloc)
+ os.mkdir(local_path, 0o0700)
+ tmpfile = valid_textfile(local_path)
+ assert_(self.repos.exists(tmpfile))
+
+
+class TestOpenFunc:
+ def setup_method(self):
+ self.tmpdir = mkdtemp()
+
+ def teardown_method(self):
+ rmtree(self.tmpdir)
+
+ def test_DataSourceOpen(self):
+ local_file = valid_textfile(self.tmpdir)
+ # Test case where destpath is passed in
+ fp = datasource.open(local_file, destpath=self.tmpdir)
+ assert_(fp)
+ fp.close()
+ # Test case where default destpath is used
+ fp = datasource.open(local_file)
+ assert_(fp)
+ fp.close()
+
+def test_del_attr_handling():
+ # DataSource __del__ can be called
+ # even if __init__ fails when the
+ # Exception object is caught by the
+ # caller as happens in refguide_check
+ # is_deprecated() function
+
+ ds = datasource.DataSource()
+ # simulate failed __init__ by removing key attribute
+ # produced within __init__ and expected by __del__
+ del ds._istmpdest
+ # should not raise an AttributeError if __del__
+ # gracefully handles failed __init__:
+ ds.__del__()
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test__iotools.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test__iotools.py
new file mode 100644
index 00000000..a5b78702
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test__iotools.py
@@ -0,0 +1,353 @@
+import time
+from datetime import date
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_allclose, assert_raises,
+ )
+from numpy.lib._iotools import (
+ LineSplitter, NameValidator, StringConverter,
+ has_nested_fields, easy_dtype, flatten_dtype
+ )
+
+
+class TestLineSplitter:
+ "Tests the LineSplitter class."
+
+ def test_no_delimiter(self):
+ "Test LineSplitter w/o delimiter"
+ strg = " 1 2 3 4 5 # test"
+ test = LineSplitter()(strg)
+ assert_equal(test, ['1', '2', '3', '4', '5'])
+ test = LineSplitter('')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '5'])
+
+ def test_space_delimiter(self):
+ "Test space delimiter"
+ strg = " 1 2 3 4 5 # test"
+ test = LineSplitter(' ')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5'])
+ test = LineSplitter(' ')(strg)
+ assert_equal(test, ['1 2 3 4', '5'])
+
+ def test_tab_delimiter(self):
+ "Test tab delimiter"
+ strg = " 1\t 2\t 3\t 4\t 5 6"
+ test = LineSplitter('\t')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '5 6'])
+ strg = " 1 2\t 3 4\t 5 6"
+ test = LineSplitter('\t')(strg)
+ assert_equal(test, ['1 2', '3 4', '5 6'])
+
+ def test_other_delimiter(self):
+ "Test LineSplitter on delimiter"
+ strg = "1,2,3,4,,5"
+ test = LineSplitter(',')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5'])
+ #
+ strg = " 1,2,3,4,,5 # test"
+ test = LineSplitter(',')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5'])
+
+ # gh-11028 bytes comment/delimiters should get encoded
+ strg = b" 1,2,3,4,,5 % test"
+ test = LineSplitter(delimiter=b',', comments=b'%')(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5'])
+
+ def test_constant_fixed_width(self):
+ "Test LineSplitter w/ fixed-width fields"
+ strg = " 1 2 3 4 5 # test"
+ test = LineSplitter(3)(strg)
+ assert_equal(test, ['1', '2', '3', '4', '', '5', ''])
+ #
+ strg = " 1 3 4 5 6# test"
+ test = LineSplitter(20)(strg)
+ assert_equal(test, ['1 3 4 5 6'])
+ #
+ strg = " 1 3 4 5 6# test"
+ test = LineSplitter(30)(strg)
+ assert_equal(test, ['1 3 4 5 6'])
+
+ def test_variable_fixed_width(self):
+ strg = " 1 3 4 5 6# test"
+ test = LineSplitter((3, 6, 6, 3))(strg)
+ assert_equal(test, ['1', '3', '4 5', '6'])
+ #
+ strg = " 1 3 4 5 6# test"
+ test = LineSplitter((6, 6, 9))(strg)
+ assert_equal(test, ['1', '3 4', '5 6'])
+
+# -----------------------------------------------------------------------------
+
+
+class TestNameValidator:
+
+ def test_case_sensitivity(self):
+ "Test case sensitivity"
+ names = ['A', 'a', 'b', 'c']
+ test = NameValidator().validate(names)
+ assert_equal(test, ['A', 'a', 'b', 'c'])
+ test = NameValidator(case_sensitive=False).validate(names)
+ assert_equal(test, ['A', 'A_1', 'B', 'C'])
+ test = NameValidator(case_sensitive='upper').validate(names)
+ assert_equal(test, ['A', 'A_1', 'B', 'C'])
+ test = NameValidator(case_sensitive='lower').validate(names)
+ assert_equal(test, ['a', 'a_1', 'b', 'c'])
+
+ # check exceptions
+ assert_raises(ValueError, NameValidator, case_sensitive='foobar')
+
+ def test_excludelist(self):
+ "Test excludelist"
+ names = ['dates', 'data', 'Other Data', 'mask']
+ validator = NameValidator(excludelist=['dates', 'data', 'mask'])
+ test = validator.validate(names)
+ assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_'])
+
+ def test_missing_names(self):
+ "Test validate missing names"
+ namelist = ('a', 'b', 'c')
+ validator = NameValidator()
+ assert_equal(validator(namelist), ['a', 'b', 'c'])
+ namelist = ('', 'b', 'c')
+ assert_equal(validator(namelist), ['f0', 'b', 'c'])
+ namelist = ('a', 'b', '')
+ assert_equal(validator(namelist), ['a', 'b', 'f0'])
+ namelist = ('', 'f0', '')
+ assert_equal(validator(namelist), ['f1', 'f0', 'f2'])
+
+ def test_validate_nb_names(self):
+ "Test validate nb names"
+ namelist = ('a', 'b', 'c')
+ validator = NameValidator()
+ assert_equal(validator(namelist, nbfields=1), ('a',))
+ assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"),
+ ['a', 'b', 'c', 'g0', 'g1'])
+
+ def test_validate_wo_names(self):
+ "Test validate no names"
+ namelist = None
+ validator = NameValidator()
+ assert_(validator(namelist) is None)
+ assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2'])
+
+# -----------------------------------------------------------------------------
+
+
+def _bytes_to_date(s):
+ return date(*time.strptime(s, "%Y-%m-%d")[:3])
+
+
+class TestStringConverter:
+ "Test StringConverter"
+
+ def test_creation(self):
+ "Test creation of a StringConverter"
+ converter = StringConverter(int, -99999)
+ assert_equal(converter._status, 1)
+ assert_equal(converter.default, -99999)
+
+ def test_upgrade(self):
+ "Tests the upgrade method."
+
+ converter = StringConverter()
+ assert_equal(converter._status, 0)
+
+ # test int
+ assert_equal(converter.upgrade('0'), 0)
+ assert_equal(converter._status, 1)
+
+ # On systems where long defaults to 32-bit, the statuses will be
+ # offset by one, so we check for this here.
+ import numpy.core.numeric as nx
+ status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize)
+
+ # test int > 2**32
+ assert_equal(converter.upgrade('17179869184'), 17179869184)
+ assert_equal(converter._status, 1 + status_offset)
+
+ # test float
+ assert_allclose(converter.upgrade('0.'), 0.0)
+ assert_equal(converter._status, 2 + status_offset)
+
+ # test complex
+ assert_equal(converter.upgrade('0j'), complex('0j'))
+ assert_equal(converter._status, 3 + status_offset)
+
+ # test str
+ # note that the longdouble type has been skipped, so the
+ # _status increases by 2. Everything should succeed with
+ # unicode conversion (8).
+ for s in ['a', b'a']:
+ res = converter.upgrade(s)
+ assert_(type(res) is str)
+ assert_equal(res, 'a')
+ assert_equal(converter._status, 8 + status_offset)
+
+ def test_missing(self):
+ "Tests the use of missing values."
+ converter = StringConverter(missing_values=('missing',
+ 'missed'))
+ converter.upgrade('0')
+ assert_equal(converter('0'), 0)
+ assert_equal(converter(''), converter.default)
+ assert_equal(converter('missing'), converter.default)
+ assert_equal(converter('missed'), converter.default)
+ try:
+ converter('miss')
+ except ValueError:
+ pass
+
+ def test_upgrademapper(self):
+ "Tests updatemapper"
+ dateparser = _bytes_to_date
+ _original_mapper = StringConverter._mapper[:]
+ try:
+ StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
+ convert = StringConverter(dateparser, date(2000, 1, 1))
+ test = convert('2001-01-01')
+ assert_equal(test, date(2001, 1, 1))
+ test = convert('2009-01-01')
+ assert_equal(test, date(2009, 1, 1))
+ test = convert('')
+ assert_equal(test, date(2000, 1, 1))
+ finally:
+ StringConverter._mapper = _original_mapper
+
+ def test_string_to_object(self):
+ "Make sure that string-to-object functions are properly recognized"
+ old_mapper = StringConverter._mapper[:] # copy of list
+ conv = StringConverter(_bytes_to_date)
+ assert_equal(conv._mapper, old_mapper)
+ assert_(hasattr(conv, 'default'))
+
+ def test_keep_default(self):
+ "Make sure we don't lose an explicit default"
+ converter = StringConverter(None, missing_values='',
+ default=-999)
+ converter.upgrade('3.14159265')
+ assert_equal(converter.default, -999)
+ assert_equal(converter.type, np.dtype(float))
+ #
+ converter = StringConverter(
+ None, missing_values='', default=0)
+ converter.upgrade('3.14159265')
+ assert_equal(converter.default, 0)
+ assert_equal(converter.type, np.dtype(float))
+
+ def test_keep_default_zero(self):
+ "Check that we don't lose a default of 0"
+ converter = StringConverter(int, default=0,
+ missing_values="N/A")
+ assert_equal(converter.default, 0)
+
+ def test_keep_missing_values(self):
+ "Check that we're not losing missing values"
+ converter = StringConverter(int, default=0,
+ missing_values="N/A")
+ assert_equal(
+ converter.missing_values, {'', 'N/A'})
+
+ def test_int64_dtype(self):
+ "Check that int64 integer types can be specified"
+ converter = StringConverter(np.int64, default=0)
+ val = "-9223372036854775807"
+ assert_(converter(val) == -9223372036854775807)
+ val = "9223372036854775807"
+ assert_(converter(val) == 9223372036854775807)
+
+ def test_uint64_dtype(self):
+ "Check that uint64 integer types can be specified"
+ converter = StringConverter(np.uint64, default=0)
+ val = "9223372043271415339"
+ assert_(converter(val) == 9223372043271415339)
+
+
+class TestMiscFunctions:
+
+ def test_has_nested_dtype(self):
+ "Test has_nested_dtype"
+ ndtype = np.dtype(float)
+ assert_equal(has_nested_fields(ndtype), False)
+ ndtype = np.dtype([('A', '|S3'), ('B', float)])
+ assert_equal(has_nested_fields(ndtype), False)
+ ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
+ assert_equal(has_nested_fields(ndtype), True)
+
+ def test_easy_dtype(self):
+ "Test ndtype on dtypes"
+ # Simple case
+ ndtype = float
+ assert_equal(easy_dtype(ndtype), np.dtype(float))
+ # As string w/o names
+ ndtype = "i4, f8"
+ assert_equal(easy_dtype(ndtype),
+ np.dtype([('f0', "i4"), ('f1', "f8")]))
+ # As string w/o names but different default format
+ assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"),
+ np.dtype([('field_000', "i4"), ('field_001', "f8")]))
+ # As string w/ names
+ ndtype = "i4, f8"
+ assert_equal(easy_dtype(ndtype, names="a, b"),
+ np.dtype([('a', "i4"), ('b', "f8")]))
+ # As string w/ names (too many)
+ ndtype = "i4, f8"
+ assert_equal(easy_dtype(ndtype, names="a, b, c"),
+ np.dtype([('a', "i4"), ('b', "f8")]))
+ # As string w/ names (not enough)
+ ndtype = "i4, f8"
+ assert_equal(easy_dtype(ndtype, names=", b"),
+ np.dtype([('f0', "i4"), ('b', "f8")]))
+ # ... (with different default format)
+ assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"),
+ np.dtype([('a', "i4"), ('f00', "f8")]))
+ # As list of tuples w/o names
+ ndtype = [('A', int), ('B', float)]
+ assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)]))
+ # As list of tuples w/ names
+ assert_equal(easy_dtype(ndtype, names="a,b"),
+ np.dtype([('a', int), ('b', float)]))
+ # As list of tuples w/ not enough names
+ assert_equal(easy_dtype(ndtype, names="a"),
+ np.dtype([('a', int), ('f0', float)]))
+ # As list of tuples w/ too many names
+ assert_equal(easy_dtype(ndtype, names="a,b,c"),
+ np.dtype([('a', int), ('b', float)]))
+ # As list of types w/o names
+ ndtype = (int, float, float)
+ assert_equal(easy_dtype(ndtype),
+ np.dtype([('f0', int), ('f1', float), ('f2', float)]))
+ # As list of types w names
+ ndtype = (int, float, float)
+ assert_equal(easy_dtype(ndtype, names="a, b, c"),
+ np.dtype([('a', int), ('b', float), ('c', float)]))
+ # As simple dtype w/ names
+ ndtype = np.dtype(float)
+ assert_equal(easy_dtype(ndtype, names="a, b, c"),
+ np.dtype([(_, float) for _ in ('a', 'b', 'c')]))
+ # As simple dtype w/o names (but multiple fields)
+ ndtype = np.dtype(float)
+ assert_equal(
+ easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"),
+ np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')]))
+
+ def test_flatten_dtype(self):
+ "Testing flatten_dtype"
+ # Standard dtype
+ dt = np.dtype([("a", "f8"), ("b", "f8")])
+ dt_flat = flatten_dtype(dt)
+ assert_equal(dt_flat, [float, float])
+ # Recursive dtype
+ dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)])
+ dt_flat = flatten_dtype(dt)
+ assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int])
+ # dtype with shaped fields
+ dt = np.dtype([("a", (float, 2)), ("b", (int, 3))])
+ dt_flat = flatten_dtype(dt)
+ assert_equal(dt_flat, [float, int])
+ dt_flat = flatten_dtype(dt, True)
+ assert_equal(dt_flat, [float] * 2 + [int] * 3)
+ # dtype w/ titles
+ dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
+ dt_flat = flatten_dtype(dt)
+ assert_equal(dt_flat, [float, float])
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test__version.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test__version.py
new file mode 100644
index 00000000..e6d41ad9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test__version.py
@@ -0,0 +1,64 @@
+"""Tests for the NumpyVersion class.
+
+"""
+from numpy.testing import assert_, assert_raises
+from numpy.lib import NumpyVersion
+
+
+def test_main_versions():
+ assert_(NumpyVersion('1.8.0') == '1.8.0')
+ for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']:
+ assert_(NumpyVersion('1.8.0') < ver)
+
+ for ver in ['1.7.0', '1.7.1', '0.9.9']:
+ assert_(NumpyVersion('1.8.0') > ver)
+
+
+def test_version_1_point_10():
+ # regression test for gh-2998.
+ assert_(NumpyVersion('1.9.0') < '1.10.0')
+ assert_(NumpyVersion('1.11.0') < '1.11.1')
+ assert_(NumpyVersion('1.11.0') == '1.11.0')
+ assert_(NumpyVersion('1.99.11') < '1.99.12')
+
+
+def test_alpha_beta_rc():
+ assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1')
+ for ver in ['1.8.0', '1.8.0rc2']:
+ assert_(NumpyVersion('1.8.0rc1') < ver)
+
+ for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
+ assert_(NumpyVersion('1.8.0rc1') > ver)
+
+ assert_(NumpyVersion('1.8.0b1') > '1.8.0a2')
+
+
+def test_dev_version():
+ assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0')
+ for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']:
+ assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver)
+
+ assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111')
+
+
+def test_dev_a_b_rc_mixed():
+ assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111')
+ assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2')
+
+
+def test_dev0_version():
+ assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0')
+ for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
+ assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver)
+
+ assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111')
+
+
+def test_dev0_a_b_rc_mixed():
+ assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111')
+ assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2')
+
+
+def test_raises():
+ for ver in ['1.9', '1,9.0', '1.7.x']:
+ assert_raises(ValueError, NumpyVersion, ver)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_arraypad.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_arraypad.py
new file mode 100644
index 00000000..a5968157
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_arraypad.py
@@ -0,0 +1,1363 @@
+"""Tests for the array padding functions.
+
+"""
+import pytest
+
+import numpy as np
+from numpy.testing import assert_array_equal, assert_allclose, assert_equal
+from numpy.lib.arraypad import _as_pairs
+
+
+_numeric_dtypes = (
+ np.sctypes["uint"]
+ + np.sctypes["int"]
+ + np.sctypes["float"]
+ + np.sctypes["complex"]
+)
+_all_modes = {
+ 'constant': {'constant_values': 0},
+ 'edge': {},
+ 'linear_ramp': {'end_values': 0},
+ 'maximum': {'stat_length': None},
+ 'mean': {'stat_length': None},
+ 'median': {'stat_length': None},
+ 'minimum': {'stat_length': None},
+ 'reflect': {'reflect_type': 'even'},
+ 'symmetric': {'reflect_type': 'even'},
+ 'wrap': {},
+ 'empty': {}
+}
+
+
+class TestAsPairs:
+ def test_single_value(self):
+ """Test casting for a single value."""
+ expected = np.array([[3, 3]] * 10)
+ for x in (3, [3], [[3]]):
+ result = _as_pairs(x, 10)
+ assert_equal(result, expected)
+ # Test with dtype=object
+ obj = object()
+ assert_equal(
+ _as_pairs(obj, 10),
+ np.array([[obj, obj]] * 10)
+ )
+
+ def test_two_values(self):
+ """Test proper casting for two different values."""
+ # Broadcasting in the first dimension with numbers
+ expected = np.array([[3, 4]] * 10)
+ for x in ([3, 4], [[3, 4]]):
+ result = _as_pairs(x, 10)
+ assert_equal(result, expected)
+ # and with dtype=object
+ obj = object()
+ assert_equal(
+ _as_pairs(["a", obj], 10),
+ np.array([["a", obj]] * 10)
+ )
+
+ # Broadcasting in the second / last dimension with numbers
+ assert_equal(
+ _as_pairs([[3], [4]], 2),
+ np.array([[3, 3], [4, 4]])
+ )
+ # and with dtype=object
+ assert_equal(
+ _as_pairs([["a"], [obj]], 2),
+ np.array([["a", "a"], [obj, obj]])
+ )
+
+ def test_with_none(self):
+ expected = ((None, None), (None, None), (None, None))
+ assert_equal(
+ _as_pairs(None, 3, as_index=False),
+ expected
+ )
+ assert_equal(
+ _as_pairs(None, 3, as_index=True),
+ expected
+ )
+
+ def test_pass_through(self):
+ """Test if `x` already matching desired output are passed through."""
+ expected = np.arange(12).reshape((6, 2))
+ assert_equal(
+ _as_pairs(expected, 6),
+ expected
+ )
+
+ def test_as_index(self):
+ """Test results if `as_index=True`."""
+ assert_equal(
+ _as_pairs([2.6, 3.3], 10, as_index=True),
+ np.array([[3, 3]] * 10, dtype=np.intp)
+ )
+ assert_equal(
+ _as_pairs([2.6, 4.49], 10, as_index=True),
+ np.array([[3, 4]] * 10, dtype=np.intp)
+ )
+ for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]],
+ [[1, 2]] * 9 + [[1, -2]]):
+ with pytest.raises(ValueError, match="negative values"):
+ _as_pairs(x, 10, as_index=True)
+
+ def test_exceptions(self):
+ """Ensure faulty usage is discovered."""
+ with pytest.raises(ValueError, match="more dimensions than allowed"):
+ _as_pairs([[[3]]], 10)
+ with pytest.raises(ValueError, match="could not be broadcast"):
+ _as_pairs([[1, 2], [3, 4]], 3)
+ with pytest.raises(ValueError, match="could not be broadcast"):
+ _as_pairs(np.ones((2, 3)), 3)
+
+
+class TestConditionalShortcuts:
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_zero_padding_shortcuts(self, mode):
+ test = np.arange(120).reshape(4, 5, 6)
+ pad_amt = [(0, 0) for _ in test.shape]
+ assert_array_equal(test, np.pad(test, pad_amt, mode=mode))
+
+ @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
+ def test_shallow_statistic_range(self, mode):
+ test = np.arange(120).reshape(4, 5, 6)
+ pad_amt = [(1, 1) for _ in test.shape]
+ assert_array_equal(np.pad(test, pad_amt, mode='edge'),
+ np.pad(test, pad_amt, mode=mode, stat_length=1))
+
+ @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
+ def test_clip_statistic_range(self, mode):
+ test = np.arange(30).reshape(5, 6)
+ pad_amt = [(3, 3) for _ in test.shape]
+ assert_array_equal(np.pad(test, pad_amt, mode=mode),
+ np.pad(test, pad_amt, mode=mode, stat_length=30))
+
+
+class TestStatistic:
+ def test_check_mean_stat_length(self):
+ a = np.arange(100).astype('f')
+ a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
+ b = np.array(
+ [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
+ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
+ 0.5, 0.5, 0.5, 0.5, 0.5,
+
+ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
+ 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
+ 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
+ 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
+ 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
+ 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
+ 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
+ 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
+ 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
+
+ 98., 98., 98., 98., 98., 98., 98., 98., 98., 98.,
+ 98., 98., 98., 98., 98., 98., 98., 98., 98., 98.
+ ])
+ assert_array_equal(a, b)
+
+ def test_check_maximum_1(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'maximum')
+ b = np.array(
+ [99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99, 99, 99]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_maximum_2(self):
+ a = np.arange(100) + 1
+ a = np.pad(a, (25, 20), 'maximum')
+ b = np.array(
+ [100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100,
+
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
+
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_maximum_stat_length(self):
+ a = np.arange(100) + 1
+ a = np.pad(a, (25, 20), 'maximum', stat_length=10)
+ b = np.array(
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10,
+
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
+
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_minimum_1(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'minimum')
+ b = np.array(
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_minimum_2(self):
+ a = np.arange(100) + 2
+ a = np.pad(a, (25, 20), 'minimum')
+ b = np.array(
+ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2,
+
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
+
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_minimum_stat_length(self):
+ a = np.arange(100) + 1
+ a = np.pad(a, (25, 20), 'minimum', stat_length=10)
+ b = np.array(
+ [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1,
+
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
+
+ 91, 91, 91, 91, 91, 91, 91, 91, 91, 91,
+ 91, 91, 91, 91, 91, 91, 91, 91, 91, 91]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_median(self):
+ a = np.arange(100).astype('f')
+ a = np.pad(a, (25, 20), 'median')
+ b = np.array(
+ [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
+ 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
+ 49.5, 49.5, 49.5, 49.5, 49.5,
+
+ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
+ 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
+ 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
+ 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
+ 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
+ 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
+ 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
+ 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
+ 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
+
+ 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
+ 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_median_01(self):
+ a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
+ a = np.pad(a, 1, 'median')
+ b = np.array(
+ [[4, 4, 5, 4, 4],
+
+ [3, 3, 1, 4, 3],
+ [5, 4, 5, 9, 5],
+ [8, 9, 8, 2, 8],
+
+ [4, 4, 5, 4, 4]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_median_02(self):
+ a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
+ a = np.pad(a.T, 1, 'median').T
+ b = np.array(
+ [[5, 4, 5, 4, 5],
+
+ [3, 3, 1, 4, 3],
+ [5, 4, 5, 9, 5],
+ [8, 9, 8, 2, 8],
+
+ [5, 4, 5, 4, 5]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_median_stat_length(self):
+ a = np.arange(100).astype('f')
+ a[1] = 2.
+ a[97] = 96.
+ a = np.pad(a, (25, 20), 'median', stat_length=(3, 5))
+ b = np.array(
+ [ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
+ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
+ 2., 2., 2., 2., 2.,
+
+ 0., 2., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
+ 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
+ 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
+ 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
+ 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
+ 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
+ 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
+ 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
+ 90., 91., 92., 93., 94., 95., 96., 96., 98., 99.,
+
+ 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.,
+ 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_mean_shape_one(self):
+ a = [[4, 5, 6]]
+ a = np.pad(a, (5, 7), 'mean', stat_length=2)
+ b = np.array(
+ [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
+ [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_mean_2(self):
+ a = np.arange(100).astype('f')
+ a = np.pad(a, (25, 20), 'mean')
+ b = np.array(
+ [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
+ 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
+ 49.5, 49.5, 49.5, 49.5, 49.5,
+
+ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
+ 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
+ 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
+ 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
+ 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
+ 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
+ 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
+ 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
+ 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
+ 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
+
+ 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
+ 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
+ )
+ assert_array_equal(a, b)
+
+ @pytest.mark.parametrize("mode", [
+ "mean",
+ "median",
+ "minimum",
+ "maximum"
+ ])
+ def test_same_prepend_append(self, mode):
+ """ Test that appended and prepended values are equal """
+ # This test is constructed to trigger floating point rounding errors in
+ # a way that caused gh-11216 for mode=='mean'
+ a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64)
+ a = np.pad(a, (1, 1), mode)
+ assert_equal(a[0], a[-1])
+
+ @pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"])
+ @pytest.mark.parametrize(
+ "stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]
+ )
+ def test_check_negative_stat_length(self, mode, stat_length):
+ arr = np.arange(30).reshape((6, 5))
+ match = "index can't contain negative values"
+ with pytest.raises(ValueError, match=match):
+ np.pad(arr, 2, mode, stat_length=stat_length)
+
+ def test_simple_stat_length(self):
+ a = np.arange(30)
+ a = np.reshape(a, (6, 5))
+ a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))
+ b = np.array(
+ [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
+ [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
+
+ [1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
+ [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
+ [11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
+ [16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
+ [26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
+
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
+ )
+ assert_array_equal(a, b)
+
+ @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning")
+ @pytest.mark.filterwarnings(
+ "ignore:invalid value encountered in( scalar)? divide:RuntimeWarning"
+ )
+ @pytest.mark.parametrize("mode", ["mean", "median"])
+ def test_zero_stat_length_valid(self, mode):
+ arr = np.pad([1., 2.], (1, 2), mode, stat_length=0)
+ expected = np.array([np.nan, 1., 2., np.nan, np.nan])
+ assert_equal(arr, expected)
+
+ @pytest.mark.parametrize("mode", ["minimum", "maximum"])
+ def test_zero_stat_length_invalid(self, mode):
+ match = "stat_length of 0 yields no value for padding"
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 0, mode, stat_length=0)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 0, mode, stat_length=(1, 0))
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 1, mode, stat_length=0)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 1, mode, stat_length=(1, 0))
+
+
+class TestConstant:
+ def test_check_constant(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20))
+ b = np.array(
+ [10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_constant_zeros(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'constant')
+ b = np.array(
+ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_constant_float(self):
+ # If input array is int, but constant_values are float, the dtype of
+ # the array to be padded is kept
+ arr = np.arange(30).reshape(5, 6)
+ test = np.pad(arr, (1, 2), mode='constant',
+ constant_values=1.1)
+ expected = np.array(
+ [[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
+
+ [ 1, 0, 1, 2, 3, 4, 5, 1, 1],
+ [ 1, 6, 7, 8, 9, 10, 11, 1, 1],
+ [ 1, 12, 13, 14, 15, 16, 17, 1, 1],
+ [ 1, 18, 19, 20, 21, 22, 23, 1, 1],
+ [ 1, 24, 25, 26, 27, 28, 29, 1, 1],
+
+ [ 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ [ 1, 1, 1, 1, 1, 1, 1, 1, 1]]
+ )
+ assert_allclose(test, expected)
+
+ def test_check_constant_float2(self):
+ # If input array is float, and constant_values are float, the dtype of
+ # the array to be padded is kept - here retaining the float constants
+ arr = np.arange(30).reshape(5, 6)
+ arr_float = arr.astype(np.float64)
+ test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant',
+ constant_values=1.1)
+ expected = np.array(
+ [[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
+
+ [ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1],
+ [ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1],
+ [ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1],
+ [ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1],
+ [ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1],
+
+ [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
+ [ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]]
+ )
+ assert_allclose(test, expected)
+
+ def test_check_constant_float3(self):
+ a = np.arange(100, dtype=float)
+ a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2))
+ b = np.array(
+ [-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
+ -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
+ -1.1, -1.1, -1.1, -1.1, -1.1,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2,
+ -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2]
+ )
+ assert_allclose(a, b)
+
+ def test_check_constant_odd_pad_amount(self):
+ arr = np.arange(30).reshape(5, 6)
+ test = np.pad(arr, ((1,), (2,)), mode='constant',
+ constant_values=3)
+ expected = np.array(
+ [[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
+
+ [ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3],
+ [ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3],
+ [ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3],
+ [ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3],
+ [ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3],
+
+ [ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]
+ )
+ assert_allclose(test, expected)
+
+ def test_check_constant_pad_2d(self):
+ arr = np.arange(4).reshape(2, 2)
+ test = np.lib.pad(arr, ((1, 2), (1, 3)), mode='constant',
+ constant_values=((1, 2), (3, 4)))
+ expected = np.array(
+ [[3, 1, 1, 4, 4, 4],
+ [3, 0, 1, 4, 4, 4],
+ [3, 2, 3, 4, 4, 4],
+ [3, 2, 2, 4, 4, 4],
+ [3, 2, 2, 4, 4, 4]]
+ )
+ assert_allclose(test, expected)
+
+ def test_check_large_integers(self):
+ uint64_max = 2 ** 64 - 1
+ arr = np.full(5, uint64_max, dtype=np.uint64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, uint64_max, dtype=np.uint64)
+ assert_array_equal(test, expected)
+
+ int64_max = 2 ** 63 - 1
+ arr = np.full(5, int64_max, dtype=np.int64)
+ test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
+ expected = np.full(7, int64_max, dtype=np.int64)
+ assert_array_equal(test, expected)
+
+ def test_check_object_array(self):
+ arr = np.empty(1, dtype=object)
+ obj_a = object()
+ arr[0] = obj_a
+ obj_b = object()
+ obj_c = object()
+ arr = np.pad(arr, pad_width=1, mode='constant',
+ constant_values=(obj_b, obj_c))
+
+ expected = np.empty((3,), dtype=object)
+ expected[0] = obj_b
+ expected[1] = obj_a
+ expected[2] = obj_c
+
+ assert_array_equal(arr, expected)
+
+ def test_pad_empty_dimension(self):
+ arr = np.zeros((3, 0, 2))
+ result = np.pad(arr, [(0,), (2,), (1,)], mode="constant")
+ assert result.shape == (3, 4, 4)
+
+
+class TestLinearRamp:
+ def test_check_simple(self):
+ a = np.arange(100).astype('f')
+ a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
+ b = np.array(
+ [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56,
+ 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96,
+ 0.80, 0.64, 0.48, 0.32, 0.16,
+
+ 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00,
+ 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
+ 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
+ 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,
+ 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,
+ 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,
+ 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0,
+ 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
+ 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0,
+ 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0,
+
+ 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0,
+ 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]
+ )
+ assert_allclose(a, b, rtol=1e-5, atol=1e-5)
+
+ def test_check_2d(self):
+ arr = np.arange(20).reshape(4, 5).astype(np.float64)
+ test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))
+ expected = np.array(
+ [[0., 0., 0., 0., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.],
+ [0., 0., 0., 1., 2., 3., 4., 2., 0.],
+ [0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.],
+ [0., 5., 10., 11., 12., 13., 14., 7., 0.],
+ [0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.],
+ [0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0.]])
+ assert_allclose(test, expected)
+
+ @pytest.mark.xfail(exceptions=(AssertionError,))
+ def test_object_array(self):
+ from fractions import Fraction
+ arr = np.array([Fraction(1, 2), Fraction(-1, 2)])
+ actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0)
+
+ # deliberately chosen to have a non-power-of-2 denominator such that
+ # rounding to floats causes a failure.
+ expected = np.array([
+ Fraction( 0, 12),
+ Fraction( 3, 12),
+ Fraction( 6, 12),
+ Fraction(-6, 12),
+ Fraction(-4, 12),
+ Fraction(-2, 12),
+ Fraction(-0, 12),
+ ])
+ assert_equal(actual, expected)
+
+ def test_end_values(self):
+ """Ensure that end values are exact."""
+ a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp")
+ assert_equal(a[:, 0], 0.)
+ assert_equal(a[:, -1], 0.)
+ assert_equal(a[0, :], 0.)
+ assert_equal(a[-1, :], 0.)
+
+ @pytest.mark.parametrize("dtype", _numeric_dtypes)
+ def test_negative_difference(self, dtype):
+ """
+ Check correct behavior of unsigned dtypes if there is a negative
+ difference between the edge to pad and `end_values`. Check both cases
+ to be independent of implementation. Test behavior for all other dtypes
+ in case dtype casting interferes with complex dtypes. See gh-14191.
+ """
+ x = np.array([3], dtype=dtype)
+ result = np.pad(x, 3, mode="linear_ramp", end_values=0)
+ expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype)
+ assert_equal(result, expected)
+
+ x = np.array([0], dtype=dtype)
+ result = np.pad(x, 3, mode="linear_ramp", end_values=3)
+ expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype)
+ assert_equal(result, expected)
+
+
+class TestReflect:
+ def test_check_simple(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'reflect')
+ b = np.array(
+ [25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
+ 5, 4, 3, 2, 1,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 98, 97, 96, 95, 94, 93, 92, 91, 90, 89,
+ 88, 87, 86, 85, 84, 83, 82, 81, 80, 79]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_odd_method(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'reflect', reflect_type='odd')
+ b = np.array(
+ [-25, -24, -23, -22, -21, -20, -19, -18, -17, -16,
+ -15, -14, -13, -12, -11, -10, -9, -8, -7, -6,
+ -5, -4, -3, -2, -1,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_large_pad(self):
+ a = [[4, 5, 6], [6, 7, 8]]
+ a = np.pad(a, (5, 7), 'reflect')
+ b = np.array(
+ [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_shape(self):
+ a = [[4, 5, 6]]
+ a = np.pad(a, (5, 7), 'reflect')
+ b = np.array(
+ [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
+ [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_01(self):
+ a = np.pad([1, 2, 3], 2, 'reflect')
+ b = np.array([3, 2, 1, 2, 3, 2, 1])
+ assert_array_equal(a, b)
+
+ def test_check_02(self):
+ a = np.pad([1, 2, 3], 3, 'reflect')
+ b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2])
+ assert_array_equal(a, b)
+
+ def test_check_03(self):
+ a = np.pad([1, 2, 3], 4, 'reflect')
+ b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
+ assert_array_equal(a, b)
+
+
+class TestEmptyArray:
+ """Check how padding behaves on arrays with an empty dimension."""
+
+ @pytest.mark.parametrize(
+ # Keep parametrization ordered, otherwise pytest-xdist might believe
+ # that different tests were collected during parallelization
+ "mode", sorted(_all_modes.keys() - {"constant", "empty"})
+ )
+ def test_pad_empty_dimension(self, mode):
+ match = ("can't extend empty axis 0 using modes other than 'constant' "
+ "or 'empty'")
+ with pytest.raises(ValueError, match=match):
+ np.pad([], 4, mode=mode)
+ with pytest.raises(ValueError, match=match):
+ np.pad(np.ndarray(0), 4, mode=mode)
+ with pytest.raises(ValueError, match=match):
+ np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode)
+
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_pad_non_empty_dimension(self, mode):
+ result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode)
+ assert result.shape == (8, 0, 4)
+
+
+class TestSymmetric:
+ def test_check_simple(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'symmetric')
+ b = np.array(
+ [24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
+ 14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
+ 4, 3, 2, 1, 0,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 99, 98, 97, 96, 95, 94, 93, 92, 91, 90,
+ 89, 88, 87, 86, 85, 84, 83, 82, 81, 80]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_odd_method(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd')
+ b = np.array(
+ [-24, -23, -22, -21, -20, -19, -18, -17, -16, -15,
+ -14, -13, -12, -11, -10, -9, -8, -7, -6, -5,
+ -4, -3, -2, -1, 0,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_large_pad(self):
+ a = [[4, 5, 6], [6, 7, 8]]
+ a = np.pad(a, (5, 7), 'symmetric')
+ b = np.array(
+ [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
+ [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
+
+ [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
+ [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
+ )
+
+ assert_array_equal(a, b)
+
+ def test_check_large_pad_odd(self):
+ a = [[4, 5, 6], [6, 7, 8]]
+ a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd')
+ b = np.array(
+ [[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
+ [-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
+ [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
+ [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
+ [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
+
+ [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
+ [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
+
+ [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
+ [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
+ [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
+ [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
+ [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
+ [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18],
+ [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_shape(self):
+ a = [[4, 5, 6]]
+ a = np.pad(a, (5, 7), 'symmetric')
+ b = np.array(
+ [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
+ [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_01(self):
+ a = np.pad([1, 2, 3], 2, 'symmetric')
+ b = np.array([2, 1, 1, 2, 3, 3, 2])
+ assert_array_equal(a, b)
+
+ def test_check_02(self):
+ a = np.pad([1, 2, 3], 3, 'symmetric')
+ b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1])
+ assert_array_equal(a, b)
+
+ def test_check_03(self):
+ a = np.pad([1, 2, 3], 6, 'symmetric')
+ b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
+ assert_array_equal(a, b)
+
+
+class TestWrap:
+ def test_check_simple(self):
+ a = np.arange(100)
+ a = np.pad(a, (25, 20), 'wrap')
+ b = np.array(
+ [75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_large_pad(self):
+ a = np.arange(12)
+ a = np.reshape(a, (3, 4))
+ a = np.pad(a, (10, 12), 'wrap')
+ b = np.array(
+ [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11],
+ [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
+ 3, 0, 1, 2, 3, 0, 1, 2, 3],
+ [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
+ 7, 4, 5, 6, 7, 4, 5, 6, 7],
+ [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
+ 11, 8, 9, 10, 11, 8, 9, 10, 11]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_01(self):
+ a = np.pad([1, 2, 3], 3, 'wrap')
+ b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
+ assert_array_equal(a, b)
+
+ def test_check_02(self):
+ a = np.pad([1, 2, 3], 4, 'wrap')
+ b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
+ assert_array_equal(a, b)
+
+ def test_pad_with_zero(self):
+ a = np.ones((3, 5))
+ b = np.pad(a, (0, 5), mode="wrap")
+ assert_array_equal(a, b[:-5, :-5])
+
+ def test_repeated_wrapping(self):
+ """
+ Check wrapping on each side individually if the wrapped area is longer
+ than the original array.
+ """
+ a = np.arange(5)
+ b = np.pad(a, (12, 0), mode="wrap")
+ assert_array_equal(np.r_[a, a, a, a][3:], b)
+
+ a = np.arange(5)
+ b = np.pad(a, (0, 12), mode="wrap")
+ assert_array_equal(np.r_[a, a, a, a][:-3], b)
+
+
+class TestEdge:
+ def test_check_simple(self):
+ a = np.arange(12)
+ a = np.reshape(a, (4, 3))
+ a = np.pad(a, ((2, 3), (3, 2)), 'edge')
+ b = np.array(
+ [[0, 0, 0, 0, 1, 2, 2, 2],
+ [0, 0, 0, 0, 1, 2, 2, 2],
+
+ [0, 0, 0, 0, 1, 2, 2, 2],
+ [3, 3, 3, 3, 4, 5, 5, 5],
+ [6, 6, 6, 6, 7, 8, 8, 8],
+ [9, 9, 9, 9, 10, 11, 11, 11],
+
+ [9, 9, 9, 9, 10, 11, 11, 11],
+ [9, 9, 9, 9, 10, 11, 11, 11],
+ [9, 9, 9, 9, 10, 11, 11, 11]]
+ )
+ assert_array_equal(a, b)
+
+ def test_check_width_shape_1_2(self):
+ # Check a pad_width of the form ((1, 2),).
+ # Regression test for issue gh-7808.
+ a = np.array([1, 2, 3])
+ padded = np.pad(a, ((1, 2),), 'edge')
+ expected = np.array([1, 1, 2, 3, 3, 3])
+ assert_array_equal(padded, expected)
+
+ a = np.array([[1, 2, 3], [4, 5, 6]])
+ padded = np.pad(a, ((1, 2),), 'edge')
+ expected = np.pad(a, ((1, 2), (1, 2)), 'edge')
+ assert_array_equal(padded, expected)
+
+ a = np.arange(24).reshape(2, 3, 4)
+ padded = np.pad(a, ((1, 2),), 'edge')
+ expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge')
+ assert_array_equal(padded, expected)
+
+
+class TestEmpty:
+ def test_simple(self):
+ arr = np.arange(24).reshape(4, 6)
+ result = np.pad(arr, [(2, 3), (3, 1)], mode="empty")
+ assert result.shape == (9, 10)
+ assert_equal(arr, result[2:-3, 3:-1])
+
+ def test_pad_empty_dimension(self):
+ arr = np.zeros((3, 0, 2))
+ result = np.pad(arr, [(0,), (2,), (1,)], mode="empty")
+ assert result.shape == (3, 4, 4)
+
+
+def test_legacy_vector_functionality():
+ def _padwithtens(vector, pad_width, iaxis, kwargs):
+ vector[:pad_width[0]] = 10
+ vector[-pad_width[1]:] = 10
+
+ a = np.arange(6).reshape(2, 3)
+ a = np.pad(a, 2, _padwithtens)
+ b = np.array(
+ [[10, 10, 10, 10, 10, 10, 10],
+ [10, 10, 10, 10, 10, 10, 10],
+
+ [10, 10, 0, 1, 2, 10, 10],
+ [10, 10, 3, 4, 5, 10, 10],
+
+ [10, 10, 10, 10, 10, 10, 10],
+ [10, 10, 10, 10, 10, 10, 10]]
+ )
+ assert_array_equal(a, b)
+
+
+def test_unicode_mode():
+ a = np.pad([1], 2, mode='constant')
+ b = np.array([0, 0, 1, 0, 0])
+ assert_array_equal(a, b)
+
+
+@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"])
+def test_object_input(mode):
+ # Regression test for issue gh-11395.
+ a = np.full((4, 3), fill_value=None)
+ pad_amt = ((2, 3), (3, 2))
+ b = np.full((9, 8), fill_value=None)
+ assert_array_equal(np.pad(a, pad_amt, mode=mode), b)
+
+
+class TestPadWidth:
+ @pytest.mark.parametrize("pad_width", [
+ (4, 5, 6, 7),
+ ((1,), (2,), (3,)),
+ ((1, 2), (3, 4), (5, 6)),
+ ((3, 4, 5), (0, 1, 2)),
+ ])
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_misshaped_pad_width(self, pad_width, mode):
+ arr = np.arange(30).reshape((6, 5))
+ match = "operands could not be broadcast together"
+ with pytest.raises(ValueError, match=match):
+ np.pad(arr, pad_width, mode)
+
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_misshaped_pad_width_2(self, mode):
+ arr = np.arange(30).reshape((6, 5))
+ match = ("input operand has more dimensions than allowed by the axis "
+ "remapping")
+ with pytest.raises(ValueError, match=match):
+ np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode)
+
+ @pytest.mark.parametrize(
+ "pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))])
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_negative_pad_width(self, pad_width, mode):
+ arr = np.arange(30).reshape((6, 5))
+ match = "index can't contain negative values"
+ with pytest.raises(ValueError, match=match):
+ np.pad(arr, pad_width, mode)
+
+ @pytest.mark.parametrize("pad_width, dtype", [
+ ("3", None),
+ ("word", None),
+ (None, None),
+ (object(), None),
+ (3.4, None),
+ (((2, 3, 4), (3, 2)), object),
+ (complex(1, -1), None),
+ (((-2.1, 3), (3, 2)), None),
+ ])
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_bad_type(self, pad_width, dtype, mode):
+ arr = np.arange(30).reshape((6, 5))
+ match = "`pad_width` must be of integral type."
+ if dtype is not None:
+ # avoid DeprecationWarning when not specifying dtype
+ with pytest.raises(TypeError, match=match):
+ np.pad(arr, np.array(pad_width, dtype=dtype), mode)
+ else:
+ with pytest.raises(TypeError, match=match):
+ np.pad(arr, pad_width, mode)
+ with pytest.raises(TypeError, match=match):
+ np.pad(arr, np.array(pad_width), mode)
+
+ def test_pad_width_as_ndarray(self):
+ a = np.arange(12)
+ a = np.reshape(a, (4, 3))
+ a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge')
+ b = np.array(
+ [[0, 0, 0, 0, 1, 2, 2, 2],
+ [0, 0, 0, 0, 1, 2, 2, 2],
+
+ [0, 0, 0, 0, 1, 2, 2, 2],
+ [3, 3, 3, 3, 4, 5, 5, 5],
+ [6, 6, 6, 6, 7, 8, 8, 8],
+ [9, 9, 9, 9, 10, 11, 11, 11],
+
+ [9, 9, 9, 9, 10, 11, 11, 11],
+ [9, 9, 9, 9, 10, 11, 11, 11],
+ [9, 9, 9, 9, 10, 11, 11, 11]]
+ )
+ assert_array_equal(a, b)
+
+ @pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))])
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_zero_pad_width(self, pad_width, mode):
+ arr = np.arange(30).reshape(6, 5)
+ assert_array_equal(arr, np.pad(arr, pad_width, mode=mode))
+
+
+@pytest.mark.parametrize("mode", _all_modes.keys())
+def test_kwargs(mode):
+ """Test behavior of pad's kwargs for the given mode."""
+ allowed = _all_modes[mode]
+ not_allowed = {}
+ for kwargs in _all_modes.values():
+ if kwargs != allowed:
+ not_allowed.update(kwargs)
+ # Test if allowed keyword arguments pass
+ np.pad([1, 2, 3], 1, mode, **allowed)
+ # Test if prohibited keyword arguments of other modes raise an error
+ for key, value in not_allowed.items():
+ match = "unsupported keyword arguments for mode '{}'".format(mode)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1, 2, 3], 1, mode, **{key: value})
+
+
+def test_constant_zero_default():
+ arr = np.array([1, 1])
+ assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0])
+
+
+@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False])
+def test_unsupported_mode(mode):
+ match= "mode '{}' is not supported".format(mode)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1, 2, 3], 4, mode=mode)
+
+
+@pytest.mark.parametrize("mode", _all_modes.keys())
+def test_non_contiguous_array(mode):
+ arr = np.arange(24).reshape(4, 6)[::2, ::2]
+ result = np.pad(arr, (2, 3), mode)
+ assert result.shape == (7, 8)
+ assert_equal(result[2:-3, 2:-3], arr)
+
+
+@pytest.mark.parametrize("mode", _all_modes.keys())
+def test_memory_layout_persistence(mode):
+ """Test if C and F order is preserved for all pad modes."""
+ x = np.ones((5, 10), order='C')
+ assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"]
+ x = np.ones((5, 10), order='F')
+ assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"]
+
+
+@pytest.mark.parametrize("dtype", _numeric_dtypes)
+@pytest.mark.parametrize("mode", _all_modes.keys())
+def test_dtype_persistence(dtype, mode):
+ arr = np.zeros((3, 2, 1), dtype=dtype)
+ result = np.pad(arr, 1, mode=mode)
+ assert result.dtype == dtype
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_arraysetops.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_arraysetops.py
new file mode 100644
index 00000000..a180accb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_arraysetops.py
@@ -0,0 +1,944 @@
+"""Test functions for 1D array set operations.
+
+"""
+import numpy as np
+
+from numpy.testing import (assert_array_equal, assert_equal,
+ assert_raises, assert_raises_regex)
+from numpy.lib.arraysetops import (
+ ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin
+ )
+import pytest
+
+
+class TestSetOps:
+
+ def test_intersect1d(self):
+ # unique inputs
+ a = np.array([5, 7, 1, 2])
+ b = np.array([2, 4, 3, 1, 5])
+
+ ec = np.array([1, 2, 5])
+ c = intersect1d(a, b, assume_unique=True)
+ assert_array_equal(c, ec)
+
+ # non-unique inputs
+ a = np.array([5, 5, 7, 1, 2])
+ b = np.array([2, 1, 4, 3, 3, 1, 5])
+
+ ed = np.array([1, 2, 5])
+ c = intersect1d(a, b)
+ assert_array_equal(c, ed)
+ assert_array_equal([], intersect1d([], []))
+
+ def test_intersect1d_array_like(self):
+ # See gh-11772
+ class Test:
+ def __array__(self):
+ return np.arange(3)
+
+ a = Test()
+ res = intersect1d(a, a)
+ assert_array_equal(res, a)
+ res = intersect1d([1, 2, 3], [1, 2, 3])
+ assert_array_equal(res, [1, 2, 3])
+
+ def test_intersect1d_indices(self):
+ # unique inputs
+ a = np.array([1, 2, 3, 4])
+ b = np.array([2, 1, 4, 6])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ee = np.array([1, 2, 4])
+ assert_array_equal(c, ee)
+ assert_array_equal(a[i1], ee)
+ assert_array_equal(b[i2], ee)
+
+ # non-unique inputs
+ a = np.array([1, 2, 2, 3, 4, 3, 2])
+ b = np.array([1, 8, 4, 2, 2, 3, 2, 3])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ef = np.array([1, 2, 3, 4])
+ assert_array_equal(c, ef)
+ assert_array_equal(a[i1], ef)
+ assert_array_equal(b[i2], ef)
+
+ # non1d, unique inputs
+ a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])
+ b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])
+ c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 6, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
+ # non1d, not assumed to be uniqueinputs
+ a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])
+ b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])
+ c, i1, i2 = intersect1d(a, b, return_indices=True)
+ ui1 = np.unravel_index(i1, a.shape)
+ ui2 = np.unravel_index(i2, b.shape)
+ ea = np.array([2, 7, 8])
+ assert_array_equal(ea, a[ui1])
+ assert_array_equal(ea, b[ui2])
+
+ def test_setxor1d(self):
+ a = np.array([5, 7, 1, 2])
+ b = np.array([2, 4, 3, 1, 5])
+
+ ec = np.array([3, 4, 7])
+ c = setxor1d(a, b)
+ assert_array_equal(c, ec)
+
+ a = np.array([1, 2, 3])
+ b = np.array([6, 5, 4])
+
+ ec = np.array([1, 2, 3, 4, 5, 6])
+ c = setxor1d(a, b)
+ assert_array_equal(c, ec)
+
+ a = np.array([1, 8, 2, 3])
+ b = np.array([6, 5, 4, 8])
+
+ ec = np.array([1, 2, 3, 4, 5, 6])
+ c = setxor1d(a, b)
+ assert_array_equal(c, ec)
+
+ assert_array_equal([], setxor1d([], []))
+
+ def test_ediff1d(self):
+ zero_elem = np.array([])
+ one_elem = np.array([1])
+ two_elem = np.array([1, 2])
+
+ assert_array_equal([], ediff1d(zero_elem))
+ assert_array_equal([0], ediff1d(zero_elem, to_begin=0))
+ assert_array_equal([0], ediff1d(zero_elem, to_end=0))
+ assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0))
+ assert_array_equal([], ediff1d(one_elem))
+ assert_array_equal([1], ediff1d(two_elem))
+ assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9))
+ assert_array_equal([5, 6, 1, 7, 8],
+ ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8]))
+ assert_array_equal([1, 9], ediff1d(two_elem, to_end=9))
+ assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8]))
+ assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7))
+ assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6]))
+
+ @pytest.mark.parametrize("ary, prepend, append, expected", [
+ # should fail because trying to cast
+ # np.nan standard floating point value
+ # into an integer array:
+ (np.array([1, 2, 3], dtype=np.int64),
+ None,
+ np.nan,
+ 'to_end'),
+ # should fail because attempting
+ # to downcast to int type:
+ (np.array([1, 2, 3], dtype=np.int64),
+ np.array([5, 7, 2], dtype=np.float32),
+ None,
+ 'to_begin'),
+ # should fail because attempting to cast
+ # two special floating point values
+ # to integers (on both sides of ary),
+ # `to_begin` is in the error message as the impl checks this first:
+ (np.array([1., 3., 9.], dtype=np.int8),
+ np.nan,
+ np.nan,
+ 'to_begin'),
+ ])
+ def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected):
+ # verify resolution of gh-11490
+
+ # specifically, raise an appropriate
+ # Exception when attempting to append or
+ # prepend with an incompatible type
+ msg = 'dtype of `{}` must be compatible'.format(expected)
+ with assert_raises_regex(TypeError, msg):
+ ediff1d(ary=ary,
+ to_end=append,
+ to_begin=prepend)
+
+ @pytest.mark.parametrize(
+ "ary,prepend,append,expected",
+ [
+ (np.array([1, 2, 3], dtype=np.int16),
+ 2**16, # will be cast to int16 under same kind rule.
+ 2**16 + 4,
+ np.array([0, 1, 1, 4], dtype=np.int16)),
+ (np.array([1, 2, 3], dtype=np.float32),
+ np.array([5], dtype=np.float64),
+ None,
+ np.array([5, 1, 1], dtype=np.float32)),
+ (np.array([1, 2, 3], dtype=np.int32),
+ 0,
+ 0,
+ np.array([0, 1, 1, 0], dtype=np.int32)),
+ (np.array([1, 2, 3], dtype=np.int64),
+ 3,
+ -9,
+ np.array([3, 1, 1, -9], dtype=np.int64)),
+ ]
+ )
+ def test_ediff1d_scalar_handling(self,
+ ary,
+ prepend,
+ append,
+ expected):
+ # maintain backwards-compatibility
+ # of scalar prepend / append behavior
+ # in ediff1d following fix for gh-11490
+ actual = np.ediff1d(ary=ary,
+ to_end=append,
+ to_begin=prepend)
+ assert_equal(actual, expected)
+ assert actual.dtype == expected.dtype
+
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_isin(self, kind):
+ # the tests for in1d cover most of isin's behavior
+ # if in1d is removed, would need to change those tests to test
+ # isin instead.
+ def _isin_slow(a, b):
+ b = np.asarray(b).flatten().tolist()
+ return a in b
+ isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1})
+
+ def assert_isin_equal(a, b):
+ x = isin(a, b, kind=kind)
+ y = isin_slow(a, b)
+ assert_array_equal(x, y)
+
+ # multidimensional arrays in both arguments
+ a = np.arange(24).reshape([2, 3, 4])
+ b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]])
+ assert_isin_equal(a, b)
+
+ # array-likes as both arguments
+ c = [(9, 8), (7, 6)]
+ d = (9, 7)
+ assert_isin_equal(c, d)
+
+ # zero-d array:
+ f = np.array(3)
+ assert_isin_equal(f, b)
+ assert_isin_equal(a, f)
+ assert_isin_equal(f, f)
+
+ # scalar:
+ assert_isin_equal(5, b)
+ assert_isin_equal(a, 6)
+ assert_isin_equal(5, 6)
+
+ # empty array-like:
+ if kind != "table":
+ # An empty list will become float64,
+ # which is invalid for kind="table"
+ x = []
+ assert_isin_equal(x, b)
+ assert_isin_equal(a, x)
+ assert_isin_equal(x, x)
+
+ # empty array with various types:
+ for dtype in [bool, np.int64, np.float64]:
+ if kind == "table" and dtype == np.float64:
+ continue
+
+ if dtype in {np.int64, np.float64}:
+ ar = np.array([10, 20, 30], dtype=dtype)
+ elif dtype in {bool}:
+ ar = np.array([True, False, False])
+
+ empty_array = np.array([], dtype=dtype)
+
+ assert_isin_equal(empty_array, ar)
+ assert_isin_equal(ar, empty_array)
+ assert_isin_equal(empty_array, empty_array)
+
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_in1d(self, kind):
+ # we use two different sizes for the b array here to test the
+ # two different paths in in1d().
+ for mult in (1, 10):
+ # One check without np.array to make sure lists are handled correct
+ a = [5, 7, 1, 2]
+ b = [2, 4, 3, 1, 5] * mult
+ ec = np.array([True, False, True, True])
+ c = in1d(a, b, assume_unique=True, kind=kind)
+ assert_array_equal(c, ec)
+
+ a[0] = 8
+ ec = np.array([False, False, True, True])
+ c = in1d(a, b, assume_unique=True, kind=kind)
+ assert_array_equal(c, ec)
+
+ a[0], a[3] = 4, 8
+ ec = np.array([True, False, True, False])
+ c = in1d(a, b, assume_unique=True, kind=kind)
+ assert_array_equal(c, ec)
+
+ a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
+ b = [2, 3, 4] * mult
+ ec = [False, True, False, True, True, True, True, True, True,
+ False, True, False, False, False]
+ c = in1d(a, b, kind=kind)
+ assert_array_equal(c, ec)
+
+ b = b + [5, 5, 4] * mult
+ ec = [True, True, True, True, True, True, True, True, True, True,
+ True, False, True, True]
+ c = in1d(a, b, kind=kind)
+ assert_array_equal(c, ec)
+
+ a = np.array([5, 7, 1, 2])
+ b = np.array([2, 4, 3, 1, 5] * mult)
+ ec = np.array([True, False, True, True])
+ c = in1d(a, b, kind=kind)
+ assert_array_equal(c, ec)
+
+ a = np.array([5, 7, 1, 1, 2])
+ b = np.array([2, 4, 3, 3, 1, 5] * mult)
+ ec = np.array([True, False, True, True, True])
+ c = in1d(a, b, kind=kind)
+ assert_array_equal(c, ec)
+
+ a = np.array([5, 5])
+ b = np.array([2, 2] * mult)
+ ec = np.array([False, False])
+ c = in1d(a, b, kind=kind)
+ assert_array_equal(c, ec)
+
+ a = np.array([5])
+ b = np.array([2])
+ ec = np.array([False])
+ c = in1d(a, b, kind=kind)
+ assert_array_equal(c, ec)
+
+ if kind in {None, "sort"}:
+ assert_array_equal(in1d([], [], kind=kind), [])
+
+ def test_in1d_char_array(self):
+ a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b'])
+ b = np.array(['a', 'c'])
+
+ ec = np.array([True, False, True, False, False, True, False, False])
+ c = in1d(a, b)
+
+ assert_array_equal(c, ec)
+
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_in1d_invert(self, kind):
+ "Test in1d's invert parameter"
+ # We use two different sizes for the b array here to test the
+ # two different paths in in1d().
+ for mult in (1, 10):
+ a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
+ b = [2, 3, 4] * mult
+ assert_array_equal(np.invert(in1d(a, b, kind=kind)),
+ in1d(a, b, invert=True, kind=kind))
+
+ # float:
+ if kind in {None, "sort"}:
+ for mult in (1, 10):
+ a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5],
+ dtype=np.float32)
+ b = [2, 3, 4] * mult
+ b = np.array(b, dtype=np.float32)
+ assert_array_equal(np.invert(in1d(a, b, kind=kind)),
+ in1d(a, b, invert=True, kind=kind))
+
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_in1d_ravel(self, kind):
+ # Test that in1d ravels its input arrays. This is not documented
+ # behavior however. The test is to ensure consistentency.
+ a = np.arange(6).reshape(2, 3)
+ b = np.arange(3, 9).reshape(3, 2)
+ long_b = np.arange(3, 63).reshape(30, 2)
+ ec = np.array([False, False, False, True, True, True])
+
+ assert_array_equal(in1d(a, b, assume_unique=True, kind=kind),
+ ec)
+ assert_array_equal(in1d(a, b, assume_unique=False,
+ kind=kind),
+ ec)
+ assert_array_equal(in1d(a, long_b, assume_unique=True,
+ kind=kind),
+ ec)
+ assert_array_equal(in1d(a, long_b, assume_unique=False,
+ kind=kind),
+ ec)
+
+ def test_in1d_hit_alternate_algorithm(self):
+ """Hit the standard isin code with integers"""
+ # Need extreme range to hit standard code
+ # This hits it without the use of kind='table'
+ a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64)
+ b = np.array([2, 3, 4, 1e9], dtype=np.int64)
+ expected = np.array([0, 1, 0, 1, 1, 1, 1], dtype=bool)
+ assert_array_equal(expected, in1d(a, b))
+ assert_array_equal(np.invert(expected), in1d(a, b, invert=True))
+
+ a = np.array([5, 7, 1, 2], dtype=np.int64)
+ b = np.array([2, 4, 3, 1, 5, 1e9], dtype=np.int64)
+ ec = np.array([True, False, True, True])
+ c = in1d(a, b, assume_unique=True)
+ assert_array_equal(c, ec)
+
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_in1d_boolean(self, kind):
+ """Test that in1d works for boolean input"""
+ a = np.array([True, False])
+ b = np.array([False, False, False])
+ expected = np.array([False, True])
+ assert_array_equal(expected,
+ in1d(a, b, kind=kind))
+ assert_array_equal(np.invert(expected),
+ in1d(a, b, invert=True, kind=kind))
+
+ @pytest.mark.parametrize("kind", [None, "sort"])
+ def test_in1d_timedelta(self, kind):
+ """Test that in1d works for timedelta input"""
+ rstate = np.random.RandomState(0)
+ a = rstate.randint(0, 100, size=10)
+ b = rstate.randint(0, 100, size=10)
+ truth = in1d(a, b)
+ a_timedelta = a.astype("timedelta64[s]")
+ b_timedelta = b.astype("timedelta64[s]")
+ assert_array_equal(truth, in1d(a_timedelta, b_timedelta, kind=kind))
+
+ def test_in1d_table_timedelta_fails(self):
+ a = np.array([0, 1, 2], dtype="timedelta64[s]")
+ b = a
+ # Make sure it raises a value error:
+ with pytest.raises(ValueError):
+ in1d(a, b, kind="table")
+
+ @pytest.mark.parametrize(
+ "dtype1,dtype2",
+ [
+ (np.int8, np.int16),
+ (np.int16, np.int8),
+ (np.uint8, np.uint16),
+ (np.uint16, np.uint8),
+ (np.uint8, np.int16),
+ (np.int16, np.uint8),
+ ]
+ )
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_in1d_mixed_dtype(self, dtype1, dtype2, kind):
+ """Test that in1d works as expected for mixed dtype input."""
+ is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger)
+ ar1 = np.array([0, 0, 1, 1], dtype=dtype1)
+
+ if is_dtype2_signed:
+ ar2 = np.array([-128, 0, 127], dtype=dtype2)
+ else:
+ ar2 = np.array([127, 0, 255], dtype=dtype2)
+
+ expected = np.array([True, True, False, False])
+
+ expect_failure = kind == "table" and any((
+ dtype1 == np.int8 and dtype2 == np.int16,
+ dtype1 == np.int16 and dtype2 == np.int8
+ ))
+
+ if expect_failure:
+ with pytest.raises(RuntimeError, match="exceed the maximum"):
+ in1d(ar1, ar2, kind=kind)
+ else:
+ assert_array_equal(in1d(ar1, ar2, kind=kind), expected)
+
+ @pytest.mark.parametrize("kind", [None, "sort", "table"])
+ def test_in1d_mixed_boolean(self, kind):
+ """Test that in1d works as expected for bool/int input."""
+ for dtype in np.typecodes["AllInteger"]:
+ a = np.array([True, False, False], dtype=bool)
+ b = np.array([0, 0, 0, 0], dtype=dtype)
+ expected = np.array([False, True, True], dtype=bool)
+ assert_array_equal(in1d(a, b, kind=kind), expected)
+
+ a, b = b, a
+ expected = np.array([True, True, True, True], dtype=bool)
+ assert_array_equal(in1d(a, b, kind=kind), expected)
+
+ def test_in1d_first_array_is_object(self):
+ ar1 = [None]
+ ar2 = np.array([1]*10)
+ expected = np.array([False])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+
+ def test_in1d_second_array_is_object(self):
+ ar1 = 1
+ ar2 = np.array([None]*10)
+ expected = np.array([False])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+
+ def test_in1d_both_arrays_are_object(self):
+ ar1 = [None]
+ ar2 = np.array([None]*10)
+ expected = np.array([True])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+
+ def test_in1d_both_arrays_have_structured_dtype(self):
+ # Test arrays of a structured data type containing an integer field
+ # and a field of dtype `object` allowing for arbitrary Python objects
+ dt = np.dtype([('field1', int), ('field2', object)])
+ ar1 = np.array([(1, None)], dtype=dt)
+ ar2 = np.array([(1, None)]*10, dtype=dt)
+ expected = np.array([True])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+
+ def test_in1d_with_arrays_containing_tuples(self):
+ ar1 = np.array([(1,), 2], dtype=object)
+ ar2 = np.array([(1,), 2], dtype=object)
+ expected = np.array([True, True])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+ result = np.in1d(ar1, ar2, invert=True)
+ assert_array_equal(result, np.invert(expected))
+
+ # An integer is added at the end of the array to make sure
+ # that the array builder will create the array with tuples
+ # and after it's created the integer is removed.
+ # There's a bug in the array constructor that doesn't handle
+ # tuples properly and adding the integer fixes that.
+ ar1 = np.array([(1,), (2, 1), 1], dtype=object)
+ ar1 = ar1[:-1]
+ ar2 = np.array([(1,), (2, 1), 1], dtype=object)
+ ar2 = ar2[:-1]
+ expected = np.array([True, True])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+ result = np.in1d(ar1, ar2, invert=True)
+ assert_array_equal(result, np.invert(expected))
+
+ ar1 = np.array([(1,), (2, 3), 1], dtype=object)
+ ar1 = ar1[:-1]
+ ar2 = np.array([(1,), 2], dtype=object)
+ expected = np.array([True, False])
+ result = np.in1d(ar1, ar2)
+ assert_array_equal(result, expected)
+ result = np.in1d(ar1, ar2, invert=True)
+ assert_array_equal(result, np.invert(expected))
+
+ def test_in1d_errors(self):
+ """Test that in1d raises expected errors."""
+
+ # Error 1: `kind` is not one of 'sort' 'table' or None.
+ ar1 = np.array([1, 2, 3, 4, 5])
+ ar2 = np.array([2, 4, 6, 8, 10])
+ assert_raises(ValueError, in1d, ar1, ar2, kind='quicksort')
+
+ # Error 2: `kind="table"` does not work for non-integral arrays.
+ obj_ar1 = np.array([1, 'a', 3, 'b', 5], dtype=object)
+ obj_ar2 = np.array([1, 'a', 3, 'b', 5], dtype=object)
+ assert_raises(ValueError, in1d, obj_ar1, obj_ar2, kind='table')
+
+ for dtype in [np.int32, np.int64]:
+ ar1 = np.array([-1, 2, 3, 4, 5], dtype=dtype)
+ # The range of this array will overflow:
+ overflow_ar2 = np.array([-1, np.iinfo(dtype).max], dtype=dtype)
+
+ # Error 3: `kind="table"` will trigger a runtime error
+ # if there is an integer overflow expected when computing the
+ # range of ar2
+ assert_raises(
+ RuntimeError,
+ in1d, ar1, overflow_ar2, kind='table'
+ )
+
+ # Non-error: `kind=None` will *not* trigger a runtime error
+ # if there is an integer overflow, it will switch to
+ # the `sort` algorithm.
+ result = np.in1d(ar1, overflow_ar2, kind=None)
+ assert_array_equal(result, [True] + [False] * 4)
+ result = np.in1d(ar1, overflow_ar2, kind='sort')
+ assert_array_equal(result, [True] + [False] * 4)
+
+ def test_union1d(self):
+ a = np.array([5, 4, 7, 1, 2])
+ b = np.array([2, 4, 3, 3, 2, 1, 5])
+
+ ec = np.array([1, 2, 3, 4, 5, 7])
+ c = union1d(a, b)
+ assert_array_equal(c, ec)
+
+ # Tests gh-10340, arguments to union1d should be
+ # flattened if they are not already 1D
+ x = np.array([[0, 1, 2], [3, 4, 5]])
+ y = np.array([0, 1, 2, 3, 4])
+ ez = np.array([0, 1, 2, 3, 4, 5])
+ z = union1d(x, y)
+ assert_array_equal(z, ez)
+
+ assert_array_equal([], union1d([], []))
+
+ def test_setdiff1d(self):
+ a = np.array([6, 5, 4, 7, 1, 2, 7, 4])
+ b = np.array([2, 4, 3, 3, 2, 1, 5])
+
+ ec = np.array([6, 7])
+ c = setdiff1d(a, b)
+ assert_array_equal(c, ec)
+
+ a = np.arange(21)
+ b = np.arange(19)
+ ec = np.array([19, 20])
+ c = setdiff1d(a, b)
+ assert_array_equal(c, ec)
+
+ assert_array_equal([], setdiff1d([], []))
+ a = np.array((), np.uint32)
+ assert_equal(setdiff1d(a, []).dtype, np.uint32)
+
+ def test_setdiff1d_unique(self):
+ a = np.array([3, 2, 1])
+ b = np.array([7, 5, 2])
+ expected = np.array([3, 1])
+ actual = setdiff1d(a, b, assume_unique=True)
+ assert_equal(actual, expected)
+
+ def test_setdiff1d_char_array(self):
+ a = np.array(['a', 'b', 'c'])
+ b = np.array(['a', 'b', 's'])
+ assert_array_equal(setdiff1d(a, b), np.array(['c']))
+
+ def test_manyways(self):
+ a = np.array([5, 7, 1, 2, 8])
+ b = np.array([9, 8, 2, 4, 3, 1, 5])
+
+ c1 = setxor1d(a, b)
+ aux1 = intersect1d(a, b)
+ aux2 = union1d(a, b)
+ c2 = setdiff1d(aux2, aux1)
+ assert_array_equal(c1, c2)
+
+
+class TestUnique:
+
+ def test_unique_1d(self):
+
+ def check_all(a, b, i1, i2, c, dt):
+ base_msg = 'check {0} failed for type {1}'
+
+ msg = base_msg.format('values', dt)
+ v = unique(a)
+ assert_array_equal(v, b, msg)
+
+ msg = base_msg.format('return_index', dt)
+ v, j = unique(a, True, False, False)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j, i1, msg)
+
+ msg = base_msg.format('return_inverse', dt)
+ v, j = unique(a, False, True, False)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j, i2, msg)
+
+ msg = base_msg.format('return_counts', dt)
+ v, j = unique(a, False, False, True)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j, c, msg)
+
+ msg = base_msg.format('return_index and return_inverse', dt)
+ v, j1, j2 = unique(a, True, True, False)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j1, i1, msg)
+ assert_array_equal(j2, i2, msg)
+
+ msg = base_msg.format('return_index and return_counts', dt)
+ v, j1, j2 = unique(a, True, False, True)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j1, i1, msg)
+ assert_array_equal(j2, c, msg)
+
+ msg = base_msg.format('return_inverse and return_counts', dt)
+ v, j1, j2 = unique(a, False, True, True)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j1, i2, msg)
+ assert_array_equal(j2, c, msg)
+
+ msg = base_msg.format(('return_index, return_inverse '
+ 'and return_counts'), dt)
+ v, j1, j2, j3 = unique(a, True, True, True)
+ assert_array_equal(v, b, msg)
+ assert_array_equal(j1, i1, msg)
+ assert_array_equal(j2, i2, msg)
+ assert_array_equal(j3, c, msg)
+
+ a = [5, 7, 1, 2, 1, 5, 7]*10
+ b = [1, 2, 5, 7]
+ i1 = [2, 3, 0, 1]
+ i2 = [2, 3, 0, 1, 0, 2, 3]*10
+ c = np.multiply([2, 1, 2, 2], 10)
+
+ # test for numeric arrays
+ types = []
+ types.extend(np.typecodes['AllInteger'])
+ types.extend(np.typecodes['AllFloat'])
+ types.append('datetime64[D]')
+ types.append('timedelta64[D]')
+ for dt in types:
+ aa = np.array(a, dt)
+ bb = np.array(b, dt)
+ check_all(aa, bb, i1, i2, c, dt)
+
+ # test for object arrays
+ dt = 'O'
+ aa = np.empty(len(a), dt)
+ aa[:] = a
+ bb = np.empty(len(b), dt)
+ bb[:] = b
+ check_all(aa, bb, i1, i2, c, dt)
+
+ # test for structured arrays
+ dt = [('', 'i'), ('', 'i')]
+ aa = np.array(list(zip(a, a)), dt)
+ bb = np.array(list(zip(b, b)), dt)
+ check_all(aa, bb, i1, i2, c, dt)
+
+ # test for ticket #2799
+ aa = [1. + 0.j, 1 - 1.j, 1]
+ assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j])
+
+ # test for ticket #4785
+ a = [(1, 2), (1, 2), (2, 3)]
+ unq = [1, 2, 3]
+ inv = [0, 1, 0, 1, 1, 2]
+ a1 = unique(a)
+ assert_array_equal(a1, unq)
+ a2, a2_inv = unique(a, return_inverse=True)
+ assert_array_equal(a2, unq)
+ assert_array_equal(a2_inv, inv)
+
+ # test for chararrays with return_inverse (gh-5099)
+ a = np.chararray(5)
+ a[...] = ''
+ a2, a2_inv = np.unique(a, return_inverse=True)
+ assert_array_equal(a2_inv, np.zeros(5))
+
+ # test for ticket #9137
+ a = []
+ a1_idx = np.unique(a, return_index=True)[1]
+ a2_inv = np.unique(a, return_inverse=True)[1]
+ a3_idx, a3_inv = np.unique(a, return_index=True,
+ return_inverse=True)[1:]
+ assert_equal(a1_idx.dtype, np.intp)
+ assert_equal(a2_inv.dtype, np.intp)
+ assert_equal(a3_idx.dtype, np.intp)
+ assert_equal(a3_inv.dtype, np.intp)
+
+ # test for ticket 2111 - float
+ a = [2.0, np.nan, 1.0, np.nan]
+ ua = [1.0, 2.0, np.nan]
+ ua_idx = [2, 0, 1]
+ ua_inv = [1, 2, 0, 2]
+ ua_cnt = [1, 1, 2]
+ assert_equal(np.unique(a), ua)
+ assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
+
+ # test for ticket 2111 - complex
+ a = [2.0-1j, np.nan, 1.0+1j, complex(0.0, np.nan), complex(1.0, np.nan)]
+ ua = [1.0+1j, 2.0-1j, complex(0.0, np.nan)]
+ ua_idx = [2, 0, 3]
+ ua_inv = [1, 2, 0, 2, 2]
+ ua_cnt = [1, 1, 3]
+ assert_equal(np.unique(a), ua)
+ assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
+
+ # test for ticket 2111 - datetime64
+ nat = np.datetime64('nat')
+ a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat]
+ ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat]
+ ua_idx = [2, 0, 1]
+ ua_inv = [1, 2, 0, 2]
+ ua_cnt = [1, 1, 2]
+ assert_equal(np.unique(a), ua)
+ assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
+
+ # test for ticket 2111 - timedelta
+ nat = np.timedelta64('nat')
+ a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat]
+ ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat]
+ ua_idx = [2, 0, 1]
+ ua_inv = [1, 2, 0, 2]
+ ua_cnt = [1, 1, 2]
+ assert_equal(np.unique(a), ua)
+ assert_equal(np.unique(a, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))
+
+ # test for gh-19300
+ all_nans = [np.nan] * 4
+ ua = [np.nan]
+ ua_idx = [0]
+ ua_inv = [0, 0, 0, 0]
+ ua_cnt = [4]
+ assert_equal(np.unique(all_nans), ua)
+ assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx))
+ assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv))
+ assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt))
+
+ def test_unique_axis_errors(self):
+ assert_raises(TypeError, self._run_axis_tests, object)
+ assert_raises(TypeError, self._run_axis_tests,
+ [('a', int), ('b', object)])
+
+ assert_raises(np.AxisError, unique, np.arange(10), axis=2)
+ assert_raises(np.AxisError, unique, np.arange(10), axis=-2)
+
+ def test_unique_axis_list(self):
+ msg = "Unique failed on list of lists"
+ inp = [[0, 1, 0], [0, 1, 0]]
+ inp_arr = np.asarray(inp)
+ assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg)
+ assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg)
+
+ def test_unique_axis(self):
+ types = []
+ types.extend(np.typecodes['AllInteger'])
+ types.extend(np.typecodes['AllFloat'])
+ types.append('datetime64[D]')
+ types.append('timedelta64[D]')
+ types.append([('a', int), ('b', int)])
+ types.append([('a', int), ('b', float)])
+
+ for dtype in types:
+ self._run_axis_tests(dtype)
+
+ msg = 'Non-bitwise-equal booleans test failed'
+ data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool)
+ result = np.array([[False, True], [True, True]], dtype=bool)
+ assert_array_equal(unique(data, axis=0), result, msg)
+
+ msg = 'Negative zero equality test failed'
+ data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]])
+ result = np.array([[-0.0, 0.0]])
+ assert_array_equal(unique(data, axis=0), result, msg)
+
+ @pytest.mark.parametrize("axis", [0, -1])
+ def test_unique_1d_with_axis(self, axis):
+ x = np.array([4, 3, 2, 3, 2, 1, 2, 2])
+ uniq = unique(x, axis=axis)
+ assert_array_equal(uniq, [1, 2, 3, 4])
+
+ def test_unique_axis_zeros(self):
+ # issue 15559
+ single_zero = np.empty(shape=(2, 0), dtype=np.int8)
+ uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True,
+ return_inverse=True, return_counts=True)
+
+ # there's 1 element of shape (0,) along axis 0
+ assert_equal(uniq.dtype, single_zero.dtype)
+ assert_array_equal(uniq, np.empty(shape=(1, 0)))
+ assert_array_equal(idx, np.array([0]))
+ assert_array_equal(inv, np.array([0, 0]))
+ assert_array_equal(cnt, np.array([2]))
+
+ # there's 0 elements of shape (2,) along axis 1
+ uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True,
+ return_inverse=True, return_counts=True)
+
+ assert_equal(uniq.dtype, single_zero.dtype)
+ assert_array_equal(uniq, np.empty(shape=(2, 0)))
+ assert_array_equal(idx, np.array([]))
+ assert_array_equal(inv, np.array([]))
+ assert_array_equal(cnt, np.array([]))
+
+ # test a "complicated" shape
+ shape = (0, 2, 0, 3, 0, 4, 0)
+ multiple_zeros = np.empty(shape=shape)
+ for axis in range(len(shape)):
+ expected_shape = list(shape)
+ if shape[axis] == 0:
+ expected_shape[axis] = 0
+ else:
+ expected_shape[axis] = 1
+
+ assert_array_equal(unique(multiple_zeros, axis=axis),
+ np.empty(shape=expected_shape))
+
+ def test_unique_masked(self):
+ # issue 8664
+ x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0],
+ dtype='uint8')
+ y = np.ma.masked_equal(x, 0)
+
+ v = np.unique(y)
+ v2, i, c = np.unique(y, return_index=True, return_counts=True)
+
+ msg = 'Unique returned different results when asked for index'
+ assert_array_equal(v.data, v2.data, msg)
+ assert_array_equal(v.mask, v2.mask, msg)
+
+ def test_unique_sort_order_with_axis(self):
+ # These tests fail if sorting along axis is done by treating subarrays
+ # as unsigned byte strings. See gh-10495.
+ fmt = "sort order incorrect for integer type '%s'"
+ for dt in 'bhilq':
+ a = np.array([[-1], [0]], dt)
+ b = np.unique(a, axis=0)
+ assert_array_equal(a, b, fmt % dt)
+
+ def _run_axis_tests(self, dtype):
+ data = np.array([[0, 1, 0, 0],
+ [1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [1, 0, 0, 0]]).astype(dtype)
+
+ msg = 'Unique with 1d array and axis=0 failed'
+ result = np.array([0, 1])
+ assert_array_equal(unique(data), result.astype(dtype), msg)
+
+ msg = 'Unique with 2d array and axis=0 failed'
+ result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]])
+ assert_array_equal(unique(data, axis=0), result.astype(dtype), msg)
+
+ msg = 'Unique with 2d array and axis=1 failed'
+ result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]])
+ assert_array_equal(unique(data, axis=1), result.astype(dtype), msg)
+
+ msg = 'Unique with 3d array and axis=2 failed'
+ data3d = np.array([[[1, 1],
+ [1, 0]],
+ [[0, 1],
+ [0, 0]]]).astype(dtype)
+ result = np.take(data3d, [1, 0], axis=2)
+ assert_array_equal(unique(data3d, axis=2), result, msg)
+
+ uniq, idx, inv, cnt = unique(data, axis=0, return_index=True,
+ return_inverse=True, return_counts=True)
+ msg = "Unique's return_index=True failed with axis=0"
+ assert_array_equal(data[idx], uniq, msg)
+ msg = "Unique's return_inverse=True failed with axis=0"
+ assert_array_equal(uniq[inv], data)
+ msg = "Unique's return_counts=True failed with axis=0"
+ assert_array_equal(cnt, np.array([2, 2]), msg)
+
+ uniq, idx, inv, cnt = unique(data, axis=1, return_index=True,
+ return_inverse=True, return_counts=True)
+ msg = "Unique's return_index=True failed with axis=1"
+ assert_array_equal(data[:, idx], uniq)
+ msg = "Unique's return_inverse=True failed with axis=1"
+ assert_array_equal(uniq[:, inv], data)
+ msg = "Unique's return_counts=True failed with axis=1"
+ assert_array_equal(cnt, np.array([2, 1, 1]), msg)
+
+ def test_unique_nanequals(self):
+ # issue 20326
+ a = np.array([1, 1, np.nan, np.nan, np.nan])
+ unq = np.unique(a)
+ not_unq = np.unique(a, equal_nan=False)
+ assert_array_equal(unq, np.array([1, np.nan]))
+ assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan]))
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_arrayterator.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_arrayterator.py
new file mode 100644
index 00000000..c00ed13d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_arrayterator.py
@@ -0,0 +1,46 @@
+from operator import mul
+from functools import reduce
+
+import numpy as np
+from numpy.random import randint
+from numpy.lib import Arrayterator
+from numpy.testing import assert_
+
+
+def test():
+ np.random.seed(np.arange(10))
+
+ # Create a random array
+ ndims = randint(5)+1
+ shape = tuple(randint(10)+1 for dim in range(ndims))
+ els = reduce(mul, shape)
+ a = np.arange(els)
+ a.shape = shape
+
+ buf_size = randint(2*els)
+ b = Arrayterator(a, buf_size)
+
+ # Check that each block has at most ``buf_size`` elements
+ for block in b:
+ assert_(len(block.flat) <= (buf_size or els))
+
+ # Check that all elements are iterated correctly
+ assert_(list(b.flat) == list(a.flat))
+
+ # Slice arrayterator
+ start = [randint(dim) for dim in shape]
+ stop = [randint(dim)+1 for dim in shape]
+ step = [randint(dim)+1 for dim in shape]
+ slice_ = tuple(slice(*t) for t in zip(start, stop, step))
+ c = b[slice_]
+ d = a[slice_]
+
+ # Check that each block has at most ``buf_size`` elements
+ for block in c:
+ assert_(len(block.flat) <= (buf_size or els))
+
+ # Check that the arrayterator is sliced correctly
+ assert_(np.all(c.__array__() == d))
+
+ # Check that all elements are iterated correctly
+ assert_(list(c.flat) == list(d.flat))
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_financial_expired.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_financial_expired.py
new file mode 100644
index 00000000..838f999a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_financial_expired.py
@@ -0,0 +1,11 @@
+import sys
+import pytest
+import numpy as np
+
+
+def test_financial_expired():
+ match = 'NEP 32'
+ with pytest.warns(DeprecationWarning, match=match):
+ func = np.fv
+ with pytest.raises(RuntimeError, match=match):
+ func(1, 2, 3)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_format.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_format.py
new file mode 100644
index 00000000..6f6406cf
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_format.py
@@ -0,0 +1,1027 @@
+# doctest
+r''' Test the .npy file format.
+
+Set up:
+
+ >>> import sys
+ >>> from io import BytesIO
+ >>> from numpy.lib import format
+ >>>
+ >>> scalars = [
+ ... np.uint8,
+ ... np.int8,
+ ... np.uint16,
+ ... np.int16,
+ ... np.uint32,
+ ... np.int32,
+ ... np.uint64,
+ ... np.int64,
+ ... np.float32,
+ ... np.float64,
+ ... np.complex64,
+ ... np.complex128,
+ ... object,
+ ... ]
+ >>>
+ >>> basic_arrays = []
+ >>>
+ >>> for scalar in scalars:
+ ... for endian in '<>':
+ ... dtype = np.dtype(scalar).newbyteorder(endian)
+ ... basic = np.arange(15).astype(dtype)
+ ... basic_arrays.extend([
+ ... np.array([], dtype=dtype),
+ ... np.array(10, dtype=dtype),
+ ... basic,
+ ... basic.reshape((3,5)),
+ ... basic.reshape((3,5)).T,
+ ... basic.reshape((3,5))[::-1,::2],
+ ... ])
+ ...
+ >>>
+ >>> Pdescr = [
+ ... ('x', 'i4', (2,)),
+ ... ('y', 'f8', (2, 2)),
+ ... ('z', 'u1')]
+ >>>
+ >>>
+ >>> PbufferT = [
+ ... ([3,2], [[6.,4.],[6.,4.]], 8),
+ ... ([4,3], [[7.,5.],[7.,5.]], 9),
+ ... ]
+ >>>
+ >>>
+ >>> Ndescr = [
+ ... ('x', 'i4', (2,)),
+ ... ('Info', [
+ ... ('value', 'c16'),
+ ... ('y2', 'f8'),
+ ... ('Info2', [
+ ... ('name', 'S2'),
+ ... ('value', 'c16', (2,)),
+ ... ('y3', 'f8', (2,)),
+ ... ('z3', 'u4', (2,))]),
+ ... ('name', 'S2'),
+ ... ('z2', 'b1')]),
+ ... ('color', 'S2'),
+ ... ('info', [
+ ... ('Name', 'U8'),
+ ... ('Value', 'c16')]),
+ ... ('y', 'f8', (2, 2)),
+ ... ('z', 'u1')]
+ >>>
+ >>>
+ >>> NbufferT = [
+ ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8),
+ ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9),
+ ... ]
+ >>>
+ >>>
+ >>> record_arrays = [
+ ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
+ ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
+ ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
+ ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
+ ... ]
+
+Test the magic string writing.
+
+ >>> format.magic(1, 0)
+ '\x93NUMPY\x01\x00'
+ >>> format.magic(0, 0)
+ '\x93NUMPY\x00\x00'
+ >>> format.magic(255, 255)
+ '\x93NUMPY\xff\xff'
+ >>> format.magic(2, 5)
+ '\x93NUMPY\x02\x05'
+
+Test the magic string reading.
+
+ >>> format.read_magic(BytesIO(format.magic(1, 0)))
+ (1, 0)
+ >>> format.read_magic(BytesIO(format.magic(0, 0)))
+ (0, 0)
+ >>> format.read_magic(BytesIO(format.magic(255, 255)))
+ (255, 255)
+ >>> format.read_magic(BytesIO(format.magic(2, 5)))
+ (2, 5)
+
+Test the header writing.
+
+ >>> for arr in basic_arrays + record_arrays:
+ ... f = BytesIO()
+ ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it
+ ... print(repr(f.getvalue()))
+ ...
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<u2', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<i2', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<u4', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<i4', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<u8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<i8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<f4', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<f8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<c8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '<c16', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
+ "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
+ "v\x00{'descr': [('x', '<i4', (2,)), ('y', '<f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
+ "\x16\x02{'descr': [('x', '<i4', (2,)),\n ('Info',\n [('value', '<c16'),\n ('y2', '<f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '<c16', (2,)),\n ('y3', '<f8', (2,)),\n ('z3', '<u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '<U8'), ('Value', '<c16')]),\n ('y', '<f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
+ "v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
+ "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
+'''
+import sys
+import os
+import warnings
+import pytest
+from io import BytesIO
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_array_equal, assert_raises, assert_raises_regex,
+ assert_warns, IS_PYPY, IS_WASM
+ )
+from numpy.testing._private.utils import requires_memory
+from numpy.lib import format
+
+
+# Generate some basic arrays to test with.
+scalars = [
+ np.uint8,
+ np.int8,
+ np.uint16,
+ np.int16,
+ np.uint32,
+ np.int32,
+ np.uint64,
+ np.int64,
+ np.float32,
+ np.float64,
+ np.complex64,
+ np.complex128,
+ object,
+]
+basic_arrays = []
+for scalar in scalars:
+ for endian in '<>':
+ dtype = np.dtype(scalar).newbyteorder(endian)
+ basic = np.arange(1500).astype(dtype)
+ basic_arrays.extend([
+ # Empty
+ np.array([], dtype=dtype),
+ # Rank-0
+ np.array(10, dtype=dtype),
+ # 1-D
+ basic,
+ # 2-D C-contiguous
+ basic.reshape((30, 50)),
+ # 2-D F-contiguous
+ basic.reshape((30, 50)).T,
+ # 2-D non-contiguous
+ basic.reshape((30, 50))[::-1, ::2],
+ ])
+
+# More complicated record arrays.
+# This is the structure of the table used for plain objects:
+#
+# +-+-+-+
+# |x|y|z|
+# +-+-+-+
+
+# Structure of a plain array description:
+Pdescr = [
+ ('x', 'i4', (2,)),
+ ('y', 'f8', (2, 2)),
+ ('z', 'u1')]
+
+# A plain list of tuples with values for testing:
+PbufferT = [
+ # x y z
+ ([3, 2], [[6., 4.], [6., 4.]], 8),
+ ([4, 3], [[7., 5.], [7., 5.]], 9),
+ ]
+
+
+# This is the structure of the table used for nested objects (DON'T PANIC!):
+#
+# +-+---------------------------------+-----+----------+-+-+
+# |x|Info |color|info |y|z|
+# | +-----+--+----------------+----+--+ +----+-----+ | |
+# | |value|y2|Info2 |name|z2| |Name|Value| | |
+# | | | +----+-----+--+--+ | | | | | | |
+# | | | |name|value|y3|z3| | | | | | | |
+# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
+#
+
+# The corresponding nested array description:
+Ndescr = [
+ ('x', 'i4', (2,)),
+ ('Info', [
+ ('value', 'c16'),
+ ('y2', 'f8'),
+ ('Info2', [
+ ('name', 'S2'),
+ ('value', 'c16', (2,)),
+ ('y3', 'f8', (2,)),
+ ('z3', 'u4', (2,))]),
+ ('name', 'S2'),
+ ('z2', 'b1')]),
+ ('color', 'S2'),
+ ('info', [
+ ('Name', 'U8'),
+ ('Value', 'c16')]),
+ ('y', 'f8', (2, 2)),
+ ('z', 'u1')]
+
+NbufferT = [
+ # x Info color info y z
+ # value y2 Info2 name z2 Name Value
+ # name value y3 z3
+ ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True),
+ 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8),
+ ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False),
+ 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9),
+ ]
+
+record_arrays = [
+ np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
+ np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
+ np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
+ np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
+ np.zeros(1, dtype=[('c', ('<f8', (5,)), (2,))])
+]
+
+
+#BytesIO that reads a random number of bytes at a time
+class BytesIOSRandomSize(BytesIO):
+ def read(self, size=None):
+ import random
+ size = random.randint(1, size)
+ return super().read(size)
+
+
+def roundtrip(arr):
+ f = BytesIO()
+ format.write_array(f, arr)
+ f2 = BytesIO(f.getvalue())
+ arr2 = format.read_array(f2, allow_pickle=True)
+ return arr2
+
+
+def roundtrip_randsize(arr):
+ f = BytesIO()
+ format.write_array(f, arr)
+ f2 = BytesIOSRandomSize(f.getvalue())
+ arr2 = format.read_array(f2)
+ return arr2
+
+
+def roundtrip_truncated(arr):
+ f = BytesIO()
+ format.write_array(f, arr)
+ #BytesIO is one byte short
+ f2 = BytesIO(f.getvalue()[0:-1])
+ arr2 = format.read_array(f2)
+ return arr2
+
+
+def assert_equal_(o1, o2):
+ assert_(o1 == o2)
+
+
+def test_roundtrip():
+ for arr in basic_arrays + record_arrays:
+ arr2 = roundtrip(arr)
+ assert_array_equal(arr, arr2)
+
+
+def test_roundtrip_randsize():
+ for arr in basic_arrays + record_arrays:
+ if arr.dtype != object:
+ arr2 = roundtrip_randsize(arr)
+ assert_array_equal(arr, arr2)
+
+
+def test_roundtrip_truncated():
+ for arr in basic_arrays:
+ if arr.dtype != object:
+ assert_raises(ValueError, roundtrip_truncated, arr)
+
+
+def test_long_str():
+ # check items larger than internal buffer size, gh-4027
+ long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1)))
+ long_str_arr2 = roundtrip(long_str_arr)
+ assert_array_equal(long_str_arr, long_str_arr2)
+
+
+@pytest.mark.skipif(IS_WASM, reason="memmap doesn't work correctly")
+@pytest.mark.slow
+def test_memmap_roundtrip(tmpdir):
+ for i, arr in enumerate(basic_arrays + record_arrays):
+ if arr.dtype.hasobject:
+ # Skip these since they can't be mmap'ed.
+ continue
+ # Write it out normally and through mmap.
+ nfn = os.path.join(tmpdir, f'normal{i}.npy')
+ mfn = os.path.join(tmpdir, f'memmap{i}.npy')
+ with open(nfn, 'wb') as fp:
+ format.write_array(fp, arr)
+
+ fortran_order = (
+ arr.flags.f_contiguous and not arr.flags.c_contiguous)
+ ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype,
+ shape=arr.shape, fortran_order=fortran_order)
+ ma[...] = arr
+ ma.flush()
+
+ # Check that both of these files' contents are the same.
+ with open(nfn, 'rb') as fp:
+ normal_bytes = fp.read()
+ with open(mfn, 'rb') as fp:
+ memmap_bytes = fp.read()
+ assert_equal_(normal_bytes, memmap_bytes)
+
+ # Check that reading the file using memmap works.
+ ma = format.open_memmap(nfn, mode='r')
+ ma.flush()
+
+
+def test_compressed_roundtrip(tmpdir):
+ arr = np.random.rand(200, 200)
+ npz_file = os.path.join(tmpdir, 'compressed.npz')
+ np.savez_compressed(npz_file, arr=arr)
+ with np.load(npz_file) as npz:
+ arr1 = npz['arr']
+ assert_array_equal(arr, arr1)
+
+
+# aligned
+dt1 = np.dtype('i1, i4, i1', align=True)
+# non-aligned, explicit offsets
+dt2 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
+ 'offsets': [1, 6]})
+# nested struct-in-struct
+dt3 = np.dtype({'names': ['c', 'd'], 'formats': ['i4', dt2]})
+# field with '' name
+dt4 = np.dtype({'names': ['a', '', 'b'], 'formats': ['i4']*3})
+# titles
+dt5 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
+ 'offsets': [1, 6], 'titles': ['aa', 'bb']})
+# empty
+dt6 = np.dtype({'names': [], 'formats': [], 'itemsize': 8})
+
+@pytest.mark.parametrize("dt", [dt1, dt2, dt3, dt4, dt5, dt6])
+def test_load_padded_dtype(tmpdir, dt):
+ arr = np.zeros(3, dt)
+ for i in range(3):
+ arr[i] = i + 5
+ npz_file = os.path.join(tmpdir, 'aligned.npz')
+ np.savez(npz_file, arr=arr)
+ with np.load(npz_file) as npz:
+ arr1 = npz['arr']
+ assert_array_equal(arr, arr1)
+
+
+@pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup")
+def test_python2_python3_interoperability():
+ fname = 'win64python2.npy'
+ path = os.path.join(os.path.dirname(__file__), 'data', fname)
+ data = np.load(path)
+ assert_array_equal(data, np.ones(2))
+
+def test_pickle_python2_python3():
+ # Test that loading object arrays saved on Python 2 works both on
+ # Python 2 and Python 3 and vice versa
+ data_dir = os.path.join(os.path.dirname(__file__), 'data')
+
+ expected = np.array([None, range, '\u512a\u826f',
+ b'\xe4\xb8\x8d\xe8\x89\xaf'],
+ dtype=object)
+
+ for fname in ['py2-objarr.npy', 'py2-objarr.npz',
+ 'py3-objarr.npy', 'py3-objarr.npz']:
+ path = os.path.join(data_dir, fname)
+
+ for encoding in ['bytes', 'latin1']:
+ data_f = np.load(path, allow_pickle=True, encoding=encoding)
+ if fname.endswith('.npz'):
+ data = data_f['x']
+ data_f.close()
+ else:
+ data = data_f
+
+ if encoding == 'latin1' and fname.startswith('py2'):
+ assert_(isinstance(data[3], str))
+ assert_array_equal(data[:-1], expected[:-1])
+ # mojibake occurs
+ assert_array_equal(data[-1].encode(encoding), expected[-1])
+ else:
+ assert_(isinstance(data[3], bytes))
+ assert_array_equal(data, expected)
+
+ if fname.startswith('py2'):
+ if fname.endswith('.npz'):
+ data = np.load(path, allow_pickle=True)
+ assert_raises(UnicodeError, data.__getitem__, 'x')
+ data.close()
+ data = np.load(path, allow_pickle=True, fix_imports=False,
+ encoding='latin1')
+ assert_raises(ImportError, data.__getitem__, 'x')
+ data.close()
+ else:
+ assert_raises(UnicodeError, np.load, path,
+ allow_pickle=True)
+ assert_raises(ImportError, np.load, path,
+ allow_pickle=True, fix_imports=False,
+ encoding='latin1')
+
+
+def test_pickle_disallow(tmpdir):
+ data_dir = os.path.join(os.path.dirname(__file__), 'data')
+
+ path = os.path.join(data_dir, 'py2-objarr.npy')
+ assert_raises(ValueError, np.load, path,
+ allow_pickle=False, encoding='latin1')
+
+ path = os.path.join(data_dir, 'py2-objarr.npz')
+ with np.load(path, allow_pickle=False, encoding='latin1') as f:
+ assert_raises(ValueError, f.__getitem__, 'x')
+
+ path = os.path.join(tmpdir, 'pickle-disabled.npy')
+ assert_raises(ValueError, np.save, path, np.array([None], dtype=object),
+ allow_pickle=False)
+
+@pytest.mark.parametrize('dt', [
+ np.dtype(np.dtype([('a', np.int8),
+ ('b', np.int16),
+ ('c', np.int32),
+ ], align=True),
+ (3,)),
+ np.dtype([('x', np.dtype({'names':['a','b'],
+ 'formats':['i1','i1'],
+ 'offsets':[0,4],
+ 'itemsize':8,
+ },
+ (3,)),
+ (4,),
+ )]),
+ np.dtype([('x',
+ ('<f8', (5,)),
+ (2,),
+ )]),
+ np.dtype([('x', np.dtype((
+ np.dtype((
+ np.dtype({'names':['a','b'],
+ 'formats':['i1','i1'],
+ 'offsets':[0,4],
+ 'itemsize':8}),
+ (3,)
+ )),
+ (4,)
+ )))
+ ]),
+ np.dtype([
+ ('a', np.dtype((
+ np.dtype((
+ np.dtype((
+ np.dtype([
+ ('a', int),
+ ('b', np.dtype({'names':['a','b'],
+ 'formats':['i1','i1'],
+ 'offsets':[0,4],
+ 'itemsize':8})),
+ ]),
+ (3,),
+ )),
+ (4,),
+ )),
+ (5,),
+ )))
+ ]),
+ ])
+
+def test_descr_to_dtype(dt):
+ dt1 = format.descr_to_dtype(dt.descr)
+ assert_equal_(dt1, dt)
+ arr1 = np.zeros(3, dt)
+ arr2 = roundtrip(arr1)
+ assert_array_equal(arr1, arr2)
+
+def test_version_2_0():
+ f = BytesIO()
+ # requires more than 2 byte for header
+ dt = [(("%d" % i) * 100, float) for i in range(500)]
+ d = np.ones(1000, dtype=dt)
+
+ format.write_array(f, d, version=(2, 0))
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', UserWarning)
+ format.write_array(f, d)
+ assert_(w[0].category is UserWarning)
+
+ # check alignment of data portion
+ f.seek(0)
+ header = f.readline()
+ assert_(len(header) % format.ARRAY_ALIGN == 0)
+
+ f.seek(0)
+ n = format.read_array(f, max_header_size=200000)
+ assert_array_equal(d, n)
+
+ # 1.0 requested but data cannot be saved this way
+ assert_raises(ValueError, format.write_array, f, d, (1, 0))
+
+
+@pytest.mark.skipif(IS_WASM, reason="memmap doesn't work correctly")
+def test_version_2_0_memmap(tmpdir):
+ # requires more than 2 byte for header
+ dt = [(("%d" % i) * 100, float) for i in range(500)]
+ d = np.ones(1000, dtype=dt)
+ tf1 = os.path.join(tmpdir, f'version2_01.npy')
+ tf2 = os.path.join(tmpdir, f'version2_02.npy')
+
+ # 1.0 requested but data cannot be saved this way
+ assert_raises(ValueError, format.open_memmap, tf1, mode='w+', dtype=d.dtype,
+ shape=d.shape, version=(1, 0))
+
+ ma = format.open_memmap(tf1, mode='w+', dtype=d.dtype,
+ shape=d.shape, version=(2, 0))
+ ma[...] = d
+ ma.flush()
+ ma = format.open_memmap(tf1, mode='r', max_header_size=200000)
+ assert_array_equal(ma, d)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', UserWarning)
+ ma = format.open_memmap(tf2, mode='w+', dtype=d.dtype,
+ shape=d.shape, version=None)
+ assert_(w[0].category is UserWarning)
+ ma[...] = d
+ ma.flush()
+
+ ma = format.open_memmap(tf2, mode='r', max_header_size=200000)
+
+ assert_array_equal(ma, d)
+
+@pytest.mark.parametrize("mmap_mode", ["r", None])
+def test_huge_header(tmpdir, mmap_mode):
+ f = os.path.join(tmpdir, f'large_header.npy')
+ arr = np.array(1, dtype="i,"*10000+"i")
+
+ with pytest.warns(UserWarning, match=".*format 2.0"):
+ np.save(f, arr)
+
+ with pytest.raises(ValueError, match="Header.*large"):
+ np.load(f, mmap_mode=mmap_mode)
+
+ with pytest.raises(ValueError, match="Header.*large"):
+ np.load(f, mmap_mode=mmap_mode, max_header_size=20000)
+
+ res = np.load(f, mmap_mode=mmap_mode, allow_pickle=True)
+ assert_array_equal(res, arr)
+
+ res = np.load(f, mmap_mode=mmap_mode, max_header_size=180000)
+ assert_array_equal(res, arr)
+
+def test_huge_header_npz(tmpdir):
+ f = os.path.join(tmpdir, f'large_header.npz')
+ arr = np.array(1, dtype="i,"*10000+"i")
+
+ with pytest.warns(UserWarning, match=".*format 2.0"):
+ np.savez(f, arr=arr)
+
+ # Only getting the array from the file actually reads it
+ with pytest.raises(ValueError, match="Header.*large"):
+ np.load(f)["arr"]
+
+ with pytest.raises(ValueError, match="Header.*large"):
+ np.load(f, max_header_size=20000)["arr"]
+
+ res = np.load(f, allow_pickle=True)["arr"]
+ assert_array_equal(res, arr)
+
+ res = np.load(f, max_header_size=180000)["arr"]
+ assert_array_equal(res, arr)
+
+def test_write_version():
+ f = BytesIO()
+ arr = np.arange(1)
+ # These should pass.
+ format.write_array(f, arr, version=(1, 0))
+ format.write_array(f, arr)
+
+ format.write_array(f, arr, version=None)
+ format.write_array(f, arr)
+
+ format.write_array(f, arr, version=(2, 0))
+ format.write_array(f, arr)
+
+ # These should all fail.
+ bad_versions = [
+ (1, 1),
+ (0, 0),
+ (0, 1),
+ (2, 2),
+ (255, 255),
+ ]
+ for version in bad_versions:
+ with assert_raises_regex(ValueError,
+ 'we only support format version.*'):
+ format.write_array(f, arr, version=version)
+
+
+bad_version_magic = [
+ b'\x93NUMPY\x01\x01',
+ b'\x93NUMPY\x00\x00',
+ b'\x93NUMPY\x00\x01',
+ b'\x93NUMPY\x02\x00',
+ b'\x93NUMPY\x02\x02',
+ b'\x93NUMPY\xff\xff',
+]
+malformed_magic = [
+ b'\x92NUMPY\x01\x00',
+ b'\x00NUMPY\x01\x00',
+ b'\x93numpy\x01\x00',
+ b'\x93MATLB\x01\x00',
+ b'\x93NUMPY\x01',
+ b'\x93NUMPY',
+ b'',
+]
+
+def test_read_magic():
+ s1 = BytesIO()
+ s2 = BytesIO()
+
+ arr = np.ones((3, 6), dtype=float)
+
+ format.write_array(s1, arr, version=(1, 0))
+ format.write_array(s2, arr, version=(2, 0))
+
+ s1.seek(0)
+ s2.seek(0)
+
+ version1 = format.read_magic(s1)
+ version2 = format.read_magic(s2)
+
+ assert_(version1 == (1, 0))
+ assert_(version2 == (2, 0))
+
+ assert_(s1.tell() == format.MAGIC_LEN)
+ assert_(s2.tell() == format.MAGIC_LEN)
+
+def test_read_magic_bad_magic():
+ for magic in malformed_magic:
+ f = BytesIO(magic)
+ assert_raises(ValueError, format.read_array, f)
+
+
+def test_read_version_1_0_bad_magic():
+ for magic in bad_version_magic + malformed_magic:
+ f = BytesIO(magic)
+ assert_raises(ValueError, format.read_array, f)
+
+
+def test_bad_magic_args():
+ assert_raises(ValueError, format.magic, -1, 1)
+ assert_raises(ValueError, format.magic, 256, 1)
+ assert_raises(ValueError, format.magic, 1, -1)
+ assert_raises(ValueError, format.magic, 1, 256)
+
+
+def test_large_header():
+ s = BytesIO()
+ d = {'shape': tuple(), 'fortran_order': False, 'descr': '<i8'}
+ format.write_array_header_1_0(s, d)
+
+ s = BytesIO()
+ d['descr'] = [('x'*256*256, '<i8')]
+ assert_raises(ValueError, format.write_array_header_1_0, s, d)
+
+
+def test_read_array_header_1_0():
+ s = BytesIO()
+
+ arr = np.ones((3, 6), dtype=float)
+ format.write_array(s, arr, version=(1, 0))
+
+ s.seek(format.MAGIC_LEN)
+ shape, fortran, dtype = format.read_array_header_1_0(s)
+
+ assert_(s.tell() % format.ARRAY_ALIGN == 0)
+ assert_((shape, fortran, dtype) == ((3, 6), False, float))
+
+
+def test_read_array_header_2_0():
+ s = BytesIO()
+
+ arr = np.ones((3, 6), dtype=float)
+ format.write_array(s, arr, version=(2, 0))
+
+ s.seek(format.MAGIC_LEN)
+ shape, fortran, dtype = format.read_array_header_2_0(s)
+
+ assert_(s.tell() % format.ARRAY_ALIGN == 0)
+ assert_((shape, fortran, dtype) == ((3, 6), False, float))
+
+
+def test_bad_header():
+ # header of length less than 2 should fail
+ s = BytesIO()
+ assert_raises(ValueError, format.read_array_header_1_0, s)
+ s = BytesIO(b'1')
+ assert_raises(ValueError, format.read_array_header_1_0, s)
+
+ # header shorter than indicated size should fail
+ s = BytesIO(b'\x01\x00')
+ assert_raises(ValueError, format.read_array_header_1_0, s)
+
+ # headers without the exact keys required should fail
+ # d = {"shape": (1, 2),
+ # "descr": "x"}
+ s = BytesIO(
+ b"\x93NUMPY\x01\x006\x00{'descr': 'x', 'shape': (1, 2), }" +
+ b" \n"
+ )
+ assert_raises(ValueError, format.read_array_header_1_0, s)
+
+ d = {"shape": (1, 2),
+ "fortran_order": False,
+ "descr": "x",
+ "extrakey": -1}
+ s = BytesIO()
+ format.write_array_header_1_0(s, d)
+ assert_raises(ValueError, format.read_array_header_1_0, s)
+
+
+def test_large_file_support(tmpdir):
+ if (sys.platform == 'win32' or sys.platform == 'cygwin'):
+ pytest.skip("Unknown if Windows has sparse filesystems")
+ # try creating a large sparse file
+ tf_name = os.path.join(tmpdir, 'sparse_file')
+ try:
+ # seek past end would work too, but linux truncate somewhat
+ # increases the chances that we have a sparse filesystem and can
+ # avoid actually writing 5GB
+ import subprocess as sp
+ sp.check_call(["truncate", "-s", "5368709120", tf_name])
+ except Exception:
+ pytest.skip("Could not create 5GB large file")
+ # write a small array to the end
+ with open(tf_name, "wb") as f:
+ f.seek(5368709120)
+ d = np.arange(5)
+ np.save(f, d)
+ # read it back
+ with open(tf_name, "rb") as f:
+ f.seek(5368709120)
+ r = np.load(f)
+ assert_array_equal(r, d)
+
+
+@pytest.mark.skipif(IS_PYPY, reason="flaky on PyPy")
+@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8,
+ reason="test requires 64-bit system")
+@pytest.mark.slow
+@requires_memory(free_bytes=2 * 2**30)
+def test_large_archive(tmpdir):
+ # Regression test for product of saving arrays with dimensions of array
+ # having a product that doesn't fit in int32. See gh-7598 for details.
+ shape = (2**30, 2)
+ try:
+ a = np.empty(shape, dtype=np.uint8)
+ except MemoryError:
+ pytest.skip("Could not create large file")
+
+ fname = os.path.join(tmpdir, "large_archive")
+
+ with open(fname, "wb") as f:
+ np.savez(f, arr=a)
+
+ del a
+
+ with open(fname, "rb") as f:
+ new_a = np.load(f)["arr"]
+
+ assert new_a.shape == shape
+
+
+def test_empty_npz(tmpdir):
+ # Test for gh-9989
+ fname = os.path.join(tmpdir, "nothing.npz")
+ np.savez(fname)
+ with np.load(fname) as nps:
+ pass
+
+
+def test_unicode_field_names(tmpdir):
+ # gh-7391
+ arr = np.array([
+ (1, 3),
+ (1, 2),
+ (1, 3),
+ (1, 2)
+ ], dtype=[
+ ('int', int),
+ ('\N{CJK UNIFIED IDEOGRAPH-6574}\N{CJK UNIFIED IDEOGRAPH-5F62}', int)
+ ])
+ fname = os.path.join(tmpdir, "unicode.npy")
+ with open(fname, 'wb') as f:
+ format.write_array(f, arr, version=(3, 0))
+ with open(fname, 'rb') as f:
+ arr2 = format.read_array(f)
+ assert_array_equal(arr, arr2)
+
+ # notifies the user that 3.0 is selected
+ with open(fname, 'wb') as f:
+ with assert_warns(UserWarning):
+ format.write_array(f, arr, version=None)
+
+def test_header_growth_axis():
+ for is_fortran_array, dtype_space, expected_header_length in [
+ [False, 22, 128], [False, 23, 192], [True, 23, 128], [True, 24, 192]
+ ]:
+ for size in [10**i for i in range(format.GROWTH_AXIS_MAX_DIGITS)]:
+ fp = BytesIO()
+ format.write_array_header_1_0(fp, {
+ 'shape': (2, size) if is_fortran_array else (size, 2),
+ 'fortran_order': is_fortran_array,
+ 'descr': np.dtype([(' '*dtype_space, int)])
+ })
+
+ assert len(fp.getvalue()) == expected_header_length
+
+@pytest.mark.parametrize('dt, fail', [
+ (np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3',
+ metadata={'some': 'stuff'})]}), True),
+ (np.dtype(int, metadata={'some': 'stuff'}), False),
+ (np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), False),
+ # recursive: metadata on the field of a dtype
+ (np.dtype({'names': ['a', 'b'], 'formats': [
+ float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]})
+ ]}), False)
+ ])
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+def test_metadata_dtype(dt, fail):
+ # gh-14142
+ arr = np.ones(10, dtype=dt)
+ buf = BytesIO()
+ with assert_warns(UserWarning):
+ np.save(buf, arr)
+ buf.seek(0)
+ if fail:
+ with assert_raises(ValueError):
+ np.load(buf)
+ else:
+ arr2 = np.load(buf)
+ # BUG: assert_array_equal does not check metadata
+ from numpy.lib.format import _has_metadata
+ assert_array_equal(arr, arr2)
+ assert _has_metadata(arr.dtype)
+ assert not _has_metadata(arr2.dtype)
+
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_function_base.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_function_base.py
new file mode 100644
index 00000000..1bb4c4ef
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_function_base.py
@@ -0,0 +1,3980 @@
+import operator
+import warnings
+import sys
+import decimal
+from fractions import Fraction
+import math
+import pytest
+import hypothesis
+from hypothesis.extra.numpy import arrays
+import hypothesis.strategies as st
+
+
+import numpy as np
+from numpy import ma
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, assert_allclose, IS_PYPY,
+ assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT, IS_WASM
+ )
+import numpy.lib.function_base as nfb
+from numpy.random import rand
+from numpy.lib import (
+ add_newdoc_ufunc, angle, average, bartlett, blackman, corrcoef, cov,
+ delete, diff, digitize, extract, flipud, gradient, hamming, hanning,
+ i0, insert, interp, kaiser, meshgrid, msort, piecewise, place, rot90,
+ select, setxor1d, sinc, trapz, trim_zeros, unwrap, unique, vectorize
+ )
+from numpy.core.numeric import normalize_axis_tuple
+
+
+def get_mat(n):
+ data = np.arange(n)
+ data = np.add.outer(data, data)
+ return data
+
+
+def _make_complex(real, imag):
+ """
+ Like real + 1j * imag, but behaves as expected when imag contains non-finite
+ values
+ """
+ ret = np.zeros(np.broadcast(real, imag).shape, np.complex_)
+ ret.real = real
+ ret.imag = imag
+ return ret
+
+
+class TestRot90:
+ def test_basic(self):
+ assert_raises(ValueError, rot90, np.ones(4))
+ assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2))
+ assert_raises(ValueError, rot90, np.ones((2,2)), axes=(0,2))
+ assert_raises(ValueError, rot90, np.ones((2,2)), axes=(1,1))
+ assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(-2,1))
+
+ a = [[0, 1, 2],
+ [3, 4, 5]]
+ b1 = [[2, 5],
+ [1, 4],
+ [0, 3]]
+ b2 = [[5, 4, 3],
+ [2, 1, 0]]
+ b3 = [[3, 0],
+ [4, 1],
+ [5, 2]]
+ b4 = [[0, 1, 2],
+ [3, 4, 5]]
+
+ for k in range(-3, 13, 4):
+ assert_equal(rot90(a, k=k), b1)
+ for k in range(-2, 13, 4):
+ assert_equal(rot90(a, k=k), b2)
+ for k in range(-1, 13, 4):
+ assert_equal(rot90(a, k=k), b3)
+ for k in range(0, 13, 4):
+ assert_equal(rot90(a, k=k), b4)
+
+ assert_equal(rot90(rot90(a, axes=(0,1)), axes=(1,0)), a)
+ assert_equal(rot90(a, k=1, axes=(1,0)), rot90(a, k=-1, axes=(0,1)))
+
+ def test_axes(self):
+ a = np.ones((50, 40, 3))
+ assert_equal(rot90(a).shape, (40, 50, 3))
+ assert_equal(rot90(a, axes=(0,2)), rot90(a, axes=(0,-1)))
+ assert_equal(rot90(a, axes=(1,2)), rot90(a, axes=(-2,-1)))
+
+ def test_rotation_axes(self):
+ a = np.arange(8).reshape((2,2,2))
+
+ a_rot90_01 = [[[2, 3],
+ [6, 7]],
+ [[0, 1],
+ [4, 5]]]
+ a_rot90_12 = [[[1, 3],
+ [0, 2]],
+ [[5, 7],
+ [4, 6]]]
+ a_rot90_20 = [[[4, 0],
+ [6, 2]],
+ [[5, 1],
+ [7, 3]]]
+ a_rot90_10 = [[[4, 5],
+ [0, 1]],
+ [[6, 7],
+ [2, 3]]]
+
+ assert_equal(rot90(a, axes=(0, 1)), a_rot90_01)
+ assert_equal(rot90(a, axes=(1, 0)), a_rot90_10)
+ assert_equal(rot90(a, axes=(1, 2)), a_rot90_12)
+
+ for k in range(1,5):
+ assert_equal(rot90(a, k=k, axes=(2, 0)),
+ rot90(a_rot90_20, k=k-1, axes=(2, 0)))
+
+
+class TestFlip:
+
+ def test_axes(self):
+ assert_raises(np.AxisError, np.flip, np.ones(4), axis=1)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=2)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=-3)
+ assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=(0, 3))
+
+ def test_basic_lr(self):
+ a = get_mat(4)
+ b = a[:, ::-1]
+ assert_equal(np.flip(a, 1), b)
+ a = [[0, 1, 2],
+ [3, 4, 5]]
+ b = [[2, 1, 0],
+ [5, 4, 3]]
+ assert_equal(np.flip(a, 1), b)
+
+ def test_basic_ud(self):
+ a = get_mat(4)
+ b = a[::-1, :]
+ assert_equal(np.flip(a, 0), b)
+ a = [[0, 1, 2],
+ [3, 4, 5]]
+ b = [[3, 4, 5],
+ [0, 1, 2]]
+ assert_equal(np.flip(a, 0), b)
+
+ def test_3d_swap_axis0(self):
+ a = np.array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ b = np.array([[[4, 5],
+ [6, 7]],
+ [[0, 1],
+ [2, 3]]])
+
+ assert_equal(np.flip(a, 0), b)
+
+ def test_3d_swap_axis1(self):
+ a = np.array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ b = np.array([[[2, 3],
+ [0, 1]],
+ [[6, 7],
+ [4, 5]]])
+
+ assert_equal(np.flip(a, 1), b)
+
+ def test_3d_swap_axis2(self):
+ a = np.array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ b = np.array([[[1, 0],
+ [3, 2]],
+ [[5, 4],
+ [7, 6]]])
+
+ assert_equal(np.flip(a, 2), b)
+
+ def test_4d(self):
+ a = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5)
+ for i in range(a.ndim):
+ assert_equal(np.flip(a, i),
+ np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
+
+ def test_default_axis(self):
+ a = np.array([[1, 2, 3],
+ [4, 5, 6]])
+ b = np.array([[6, 5, 4],
+ [3, 2, 1]])
+ assert_equal(np.flip(a), b)
+
+ def test_multiple_axes(self):
+ a = np.array([[[0, 1],
+ [2, 3]],
+ [[4, 5],
+ [6, 7]]])
+
+ assert_equal(np.flip(a, axis=()), a)
+
+ b = np.array([[[5, 4],
+ [7, 6]],
+ [[1, 0],
+ [3, 2]]])
+
+ assert_equal(np.flip(a, axis=(0, 2)), b)
+
+ c = np.array([[[3, 2],
+ [1, 0]],
+ [[7, 6],
+ [5, 4]]])
+
+ assert_equal(np.flip(a, axis=(1, 2)), c)
+
+
+class TestAny:
+
+ def test_basic(self):
+ y1 = [0, 0, 1, 0]
+ y2 = [0, 0, 0, 0]
+ y3 = [1, 0, 1, 0]
+ assert_(np.any(y1))
+ assert_(np.any(y3))
+ assert_(not np.any(y2))
+
+ def test_nd(self):
+ y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]]
+ assert_(np.any(y1))
+ assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0])
+ assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])
+
+
+class TestAll:
+
+ def test_basic(self):
+ y1 = [0, 1, 1, 0]
+ y2 = [0, 0, 0, 0]
+ y3 = [1, 1, 1, 1]
+ assert_(not np.all(y1))
+ assert_(np.all(y3))
+ assert_(not np.all(y2))
+ assert_(np.all(~np.array(y2)))
+
+ def test_nd(self):
+ y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
+ assert_(not np.all(y1))
+ assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1])
+ assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])
+
+
+class TestCopy:
+
+ def test_basic(self):
+ a = np.array([[1, 2], [3, 4]])
+ a_copy = np.copy(a)
+ assert_array_equal(a, a_copy)
+ a_copy[0, 0] = 10
+ assert_equal(a[0, 0], 1)
+ assert_equal(a_copy[0, 0], 10)
+
+ def test_order(self):
+ # It turns out that people rely on np.copy() preserving order by
+ # default; changing this broke scikit-learn:
+ # github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa
+ a = np.array([[1, 2], [3, 4]])
+ assert_(a.flags.c_contiguous)
+ assert_(not a.flags.f_contiguous)
+ a_fort = np.array([[1, 2], [3, 4]], order="F")
+ assert_(not a_fort.flags.c_contiguous)
+ assert_(a_fort.flags.f_contiguous)
+ a_copy = np.copy(a)
+ assert_(a_copy.flags.c_contiguous)
+ assert_(not a_copy.flags.f_contiguous)
+ a_fort_copy = np.copy(a_fort)
+ assert_(not a_fort_copy.flags.c_contiguous)
+ assert_(a_fort_copy.flags.f_contiguous)
+
+ def test_subok(self):
+ mx = ma.ones(5)
+ assert_(not ma.isMaskedArray(np.copy(mx, subok=False)))
+ assert_(ma.isMaskedArray(np.copy(mx, subok=True)))
+ # Default behavior
+ assert_(not ma.isMaskedArray(np.copy(mx)))
+
+
+class TestAverage:
+
+ def test_basic(self):
+ y1 = np.array([1, 2, 3])
+ assert_(average(y1, axis=0) == 2.)
+ y2 = np.array([1., 2., 3.])
+ assert_(average(y2, axis=0) == 2.)
+ y3 = [0., 0., 0.]
+ assert_(average(y3, axis=0) == 0.)
+
+ y4 = np.ones((4, 4))
+ y4[0, 1] = 0
+ y4[1, 0] = 2
+ assert_almost_equal(y4.mean(0), average(y4, 0))
+ assert_almost_equal(y4.mean(1), average(y4, 1))
+
+ y5 = rand(5, 5)
+ assert_almost_equal(y5.mean(0), average(y5, 0))
+ assert_almost_equal(y5.mean(1), average(y5, 1))
+
+ @pytest.mark.parametrize(
+ 'x, axis, expected_avg, weights, expected_wavg, expected_wsum',
+ [([1, 2, 3], None, [2.0], [3, 4, 1], [1.75], [8.0]),
+ ([[1, 2, 5], [1, 6, 11]], 0, [[1.0, 4.0, 8.0]],
+ [1, 3], [[1.0, 5.0, 9.5]], [[4, 4, 4]])],
+ )
+ def test_basic_keepdims(self, x, axis, expected_avg,
+ weights, expected_wavg, expected_wsum):
+ avg = np.average(x, axis=axis, keepdims=True)
+ assert avg.shape == np.shape(expected_avg)
+ assert_array_equal(avg, expected_avg)
+
+ wavg = np.average(x, axis=axis, weights=weights, keepdims=True)
+ assert wavg.shape == np.shape(expected_wavg)
+ assert_array_equal(wavg, expected_wavg)
+
+ wavg, wsum = np.average(x, axis=axis, weights=weights, returned=True,
+ keepdims=True)
+ assert wavg.shape == np.shape(expected_wavg)
+ assert_array_equal(wavg, expected_wavg)
+ assert wsum.shape == np.shape(expected_wsum)
+ assert_array_equal(wsum, expected_wsum)
+
+ def test_weights(self):
+ y = np.arange(10)
+ w = np.arange(10)
+ actual = average(y, weights=w)
+ desired = (np.arange(10) ** 2).sum() * 1. / np.arange(10).sum()
+ assert_almost_equal(actual, desired)
+
+ y1 = np.array([[1, 2, 3], [4, 5, 6]])
+ w0 = [1, 2]
+ actual = average(y1, weights=w0, axis=0)
+ desired = np.array([3., 4., 5.])
+ assert_almost_equal(actual, desired)
+
+ w1 = [0, 0, 1]
+ actual = average(y1, weights=w1, axis=1)
+ desired = np.array([3., 6.])
+ assert_almost_equal(actual, desired)
+
+ # This should raise an error. Can we test for that ?
+ # assert_equal(average(y1, weights=w1), 9./2.)
+
+ # 2D Case
+ w2 = [[0, 0, 1], [0, 0, 2]]
+ desired = np.array([3., 6.])
+ assert_array_equal(average(y1, weights=w2, axis=1), desired)
+ assert_equal(average(y1, weights=w2), 5.)
+
+ y3 = rand(5).astype(np.float32)
+ w3 = rand(5).astype(np.float64)
+
+ assert_(np.average(y3, weights=w3).dtype == np.result_type(y3, w3))
+
+ # test weights with `keepdims=False` and `keepdims=True`
+ x = np.array([2, 3, 4]).reshape(3, 1)
+ w = np.array([4, 5, 6]).reshape(3, 1)
+
+ actual = np.average(x, weights=w, axis=1, keepdims=False)
+ desired = np.array([2., 3., 4.])
+ assert_array_equal(actual, desired)
+
+ actual = np.average(x, weights=w, axis=1, keepdims=True)
+ desired = np.array([[2.], [3.], [4.]])
+ assert_array_equal(actual, desired)
+
+ def test_returned(self):
+ y = np.array([[1, 2, 3], [4, 5, 6]])
+
+ # No weights
+ avg, scl = average(y, returned=True)
+ assert_equal(scl, 6.)
+
+ avg, scl = average(y, 0, returned=True)
+ assert_array_equal(scl, np.array([2., 2., 2.]))
+
+ avg, scl = average(y, 1, returned=True)
+ assert_array_equal(scl, np.array([3., 3.]))
+
+ # With weights
+ w0 = [1, 2]
+ avg, scl = average(y, weights=w0, axis=0, returned=True)
+ assert_array_equal(scl, np.array([3., 3., 3.]))
+
+ w1 = [1, 2, 3]
+ avg, scl = average(y, weights=w1, axis=1, returned=True)
+ assert_array_equal(scl, np.array([6., 6.]))
+
+ w2 = [[0, 0, 1], [1, 2, 3]]
+ avg, scl = average(y, weights=w2, axis=1, returned=True)
+ assert_array_equal(scl, np.array([1., 6.]))
+
+ def test_subclasses(self):
+ class subclass(np.ndarray):
+ pass
+ a = np.array([[1,2],[3,4]]).view(subclass)
+ w = np.array([[1,2],[3,4]]).view(subclass)
+
+ assert_equal(type(np.average(a)), subclass)
+ assert_equal(type(np.average(a, weights=w)), subclass)
+
+ def test_upcasting(self):
+ typs = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'),
+ ('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')]
+ for at, wt, rt in typs:
+ a = np.array([[1,2],[3,4]], dtype=at)
+ w = np.array([[1,2],[3,4]], dtype=wt)
+ assert_equal(np.average(a, weights=w).dtype, np.dtype(rt))
+
+ def test_object_dtype(self):
+ a = np.array([decimal.Decimal(x) for x in range(10)])
+ w = np.array([decimal.Decimal(1) for _ in range(10)])
+ w /= w.sum()
+ assert_almost_equal(a.mean(0), average(a, weights=w))
+
+ def test_average_class_without_dtype(self):
+ # see gh-21988
+ a = np.array([Fraction(1, 5), Fraction(3, 5)])
+ assert_equal(np.average(a), Fraction(2, 5))
+
+class TestSelect:
+ choices = [np.array([1, 2, 3]),
+ np.array([4, 5, 6]),
+ np.array([7, 8, 9])]
+ conditions = [np.array([False, False, False]),
+ np.array([False, True, False]),
+ np.array([False, False, True])]
+
+ def _select(self, cond, values, default=0):
+ output = []
+ for m in range(len(cond)):
+ output += [V[m] for V, C in zip(values, cond) if C[m]] or [default]
+ return output
+
+ def test_basic(self):
+ choices = self.choices
+ conditions = self.conditions
+ assert_array_equal(select(conditions, choices, default=15),
+ self._select(conditions, choices, default=15))
+
+ assert_equal(len(choices), 3)
+ assert_equal(len(conditions), 3)
+
+ def test_broadcasting(self):
+ conditions = [np.array(True), np.array([False, True, False])]
+ choices = [1, np.arange(12).reshape(4, 3)]
+ assert_array_equal(select(conditions, choices), np.ones((4, 3)))
+ # default can broadcast too:
+ assert_equal(select([True], [0], default=[0]).shape, (1,))
+
+ def test_return_dtype(self):
+ assert_equal(select(self.conditions, self.choices, 1j).dtype,
+ np.complex_)
+ # But the conditions need to be stronger then the scalar default
+ # if it is scalar.
+ choices = [choice.astype(np.int8) for choice in self.choices]
+ assert_equal(select(self.conditions, choices).dtype, np.int8)
+
+ d = np.array([1, 2, 3, np.nan, 5, 7])
+ m = np.isnan(d)
+ assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0])
+
+ def test_deprecated_empty(self):
+ assert_raises(ValueError, select, [], [], 3j)
+ assert_raises(ValueError, select, [], [])
+
+ def test_non_bool_deprecation(self):
+ choices = self.choices
+ conditions = self.conditions[:]
+ conditions[0] = conditions[0].astype(np.int_)
+ assert_raises(TypeError, select, conditions, choices)
+ conditions[0] = conditions[0].astype(np.uint8)
+ assert_raises(TypeError, select, conditions, choices)
+ assert_raises(TypeError, select, conditions, choices)
+
+ def test_many_arguments(self):
+ # This used to be limited by NPY_MAXARGS == 32
+ conditions = [np.array([False])] * 100
+ choices = [np.array([1])] * 100
+ select(conditions, choices)
+
+
+class TestInsert:
+
+ def test_basic(self):
+ a = [1, 2, 3]
+ assert_equal(insert(a, 0, 1), [1, 1, 2, 3])
+ assert_equal(insert(a, 3, 1), [1, 2, 3, 1])
+ assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3])
+ assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3])
+ assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9])
+ assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3])
+ assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9])
+ b = np.array([0, 1], dtype=np.float64)
+ assert_equal(insert(b, 0, b[0]), [0., 0., 1.])
+ assert_equal(insert(b, [], []), b)
+ # Bools will be treated differently in the future:
+ # assert_equal(insert(a, np.array([True]*4), 9), [9, 1, 9, 2, 9, 3, 9])
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', FutureWarning)
+ assert_equal(
+ insert(a, np.array([True] * 4), 9), [1, 9, 9, 9, 9, 2, 3])
+ assert_(w[0].category is FutureWarning)
+
+ def test_multidim(self):
+ a = [[1, 1, 1]]
+ r = [[2, 2, 2],
+ [1, 1, 1]]
+ assert_equal(insert(a, 0, [1]), [1, 1, 1, 1])
+ assert_equal(insert(a, 0, [2, 2, 2], axis=0), r)
+ assert_equal(insert(a, 0, 2, axis=0), r)
+ assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]])
+
+ a = np.array([[1, 1], [2, 2], [3, 3]])
+ b = np.arange(1, 4).repeat(3).reshape(3, 3)
+ c = np.concatenate(
+ (a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T,
+ a[:, 1:2]), axis=1)
+ assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b)
+ assert_equal(insert(a, [1], [1, 2, 3], axis=1), c)
+ # scalars behave differently, in this case exactly opposite:
+ assert_equal(insert(a, 1, [1, 2, 3], axis=1), b)
+ assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c)
+
+ a = np.arange(4).reshape(2, 2)
+ assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a)
+ assert_equal(insert(a[:1,:], 1, a[1,:], axis=0), a)
+
+ # negative axis value
+ a = np.arange(24).reshape((2, 3, 4))
+ assert_equal(insert(a, 1, a[:,:, 3], axis=-1),
+ insert(a, 1, a[:,:, 3], axis=2))
+ assert_equal(insert(a, 1, a[:, 2,:], axis=-2),
+ insert(a, 1, a[:, 2,:], axis=1))
+
+ # invalid axis value
+ assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=3)
+ assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=-4)
+
+ # negative axis value
+ a = np.arange(24).reshape((2, 3, 4))
+ assert_equal(insert(a, 1, a[:, :, 3], axis=-1),
+ insert(a, 1, a[:, :, 3], axis=2))
+ assert_equal(insert(a, 1, a[:, 2, :], axis=-2),
+ insert(a, 1, a[:, 2, :], axis=1))
+
+ def test_0d(self):
+ a = np.array(1)
+ with pytest.raises(np.AxisError):
+ insert(a, [], 2, axis=0)
+ with pytest.raises(TypeError):
+ insert(a, [], 2, axis="nonsense")
+
+ def test_subclass(self):
+ class SubClass(np.ndarray):
+ pass
+ a = np.arange(10).view(SubClass)
+ assert_(isinstance(np.insert(a, 0, [0]), SubClass))
+ assert_(isinstance(np.insert(a, [], []), SubClass))
+ assert_(isinstance(np.insert(a, [0, 1], [1, 2]), SubClass))
+ assert_(isinstance(np.insert(a, slice(1, 2), [1, 2]), SubClass))
+ assert_(isinstance(np.insert(a, slice(1, -2, -1), []), SubClass))
+ # This is an error in the future:
+ a = np.array(1).view(SubClass)
+ assert_(isinstance(np.insert(a, 0, [0]), SubClass))
+
+ def test_index_array_copied(self):
+ x = np.array([1, 1, 1])
+ np.insert([0, 1, 2], x, [3, 4, 5])
+ assert_equal(x, np.array([1, 1, 1]))
+
+ def test_structured_array(self):
+ a = np.array([(1, 'a'), (2, 'b'), (3, 'c')],
+ dtype=[('foo', 'i'), ('bar', 'a1')])
+ val = (4, 'd')
+ b = np.insert(a, 0, val)
+ assert_array_equal(b[0], np.array(val, dtype=b.dtype))
+ val = [(4, 'd')] * 2
+ b = np.insert(a, [0, 2], val)
+ assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype))
+
+ def test_index_floats(self):
+ with pytest.raises(IndexError):
+ np.insert([0, 1, 2], np.array([1.0, 2.0]), [10, 20])
+ with pytest.raises(IndexError):
+ np.insert([0, 1, 2], np.array([], dtype=float), [])
+
+ @pytest.mark.parametrize('idx', [4, -4])
+ def test_index_out_of_bounds(self, idx):
+ with pytest.raises(IndexError, match='out of bounds'):
+ np.insert([0, 1, 2], [idx], [3, 4])
+
+
+class TestAmax:
+
+ def test_basic(self):
+ a = [3, 4, 5, 10, -3, -5, 6.0]
+ assert_equal(np.amax(a), 10.0)
+ b = [[3, 6.0, 9.0],
+ [4, 10.0, 5.0],
+ [8, 3.0, 2.0]]
+ assert_equal(np.amax(b, axis=0), [8.0, 10.0, 9.0])
+ assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0])
+
+
+class TestAmin:
+
+ def test_basic(self):
+ a = [3, 4, 5, 10, -3, -5, 6.0]
+ assert_equal(np.amin(a), -5.0)
+ b = [[3, 6.0, 9.0],
+ [4, 10.0, 5.0],
+ [8, 3.0, 2.0]]
+ assert_equal(np.amin(b, axis=0), [3.0, 3.0, 2.0])
+ assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0])
+
+
+class TestPtp:
+
+ def test_basic(self):
+ a = np.array([3, 4, 5, 10, -3, -5, 6.0])
+ assert_equal(a.ptp(axis=0), 15.0)
+ b = np.array([[3, 6.0, 9.0],
+ [4, 10.0, 5.0],
+ [8, 3.0, 2.0]])
+ assert_equal(b.ptp(axis=0), [5.0, 7.0, 7.0])
+ assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0])
+
+ assert_equal(b.ptp(axis=0, keepdims=True), [[5.0, 7.0, 7.0]])
+ assert_equal(b.ptp(axis=(0,1), keepdims=True), [[8.0]])
+
+
+class TestCumsum:
+
+ def test_basic(self):
+ ba = [1, 2, 10, 11, 6, 5, 4]
+ ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
+ for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32,
+ np.uint32, np.float32, np.float64, np.complex64,
+ np.complex128]:
+ a = np.array(ba, ctype)
+ a2 = np.array(ba2, ctype)
+
+ tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype)
+ assert_array_equal(np.cumsum(a, axis=0), tgt)
+
+ tgt = np.array(
+ [[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype)
+ assert_array_equal(np.cumsum(a2, axis=0), tgt)
+
+ tgt = np.array(
+ [[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype)
+ assert_array_equal(np.cumsum(a2, axis=1), tgt)
+
+
+class TestProd:
+
+ def test_basic(self):
+ ba = [1, 2, 10, 11, 6, 5, 4]
+ ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
+ for ctype in [np.int16, np.uint16, np.int32, np.uint32,
+ np.float32, np.float64, np.complex64, np.complex128]:
+ a = np.array(ba, ctype)
+ a2 = np.array(ba2, ctype)
+ if ctype in ['1', 'b']:
+ assert_raises(ArithmeticError, np.prod, a)
+ assert_raises(ArithmeticError, np.prod, a2, 1)
+ else:
+ assert_equal(a.prod(axis=0), 26400)
+ assert_array_equal(a2.prod(axis=0),
+ np.array([50, 36, 84, 180], ctype))
+ assert_array_equal(a2.prod(axis=-1),
+ np.array([24, 1890, 600], ctype))
+
+
+class TestCumprod:
+
+ def test_basic(self):
+ ba = [1, 2, 10, 11, 6, 5, 4]
+ ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
+ for ctype in [np.int16, np.uint16, np.int32, np.uint32,
+ np.float32, np.float64, np.complex64, np.complex128]:
+ a = np.array(ba, ctype)
+ a2 = np.array(ba2, ctype)
+ if ctype in ['1', 'b']:
+ assert_raises(ArithmeticError, np.cumprod, a)
+ assert_raises(ArithmeticError, np.cumprod, a2, 1)
+ assert_raises(ArithmeticError, np.cumprod, a)
+ else:
+ assert_array_equal(np.cumprod(a, axis=-1),
+ np.array([1, 2, 20, 220,
+ 1320, 6600, 26400], ctype))
+ assert_array_equal(np.cumprod(a2, axis=0),
+ np.array([[1, 2, 3, 4],
+ [5, 12, 21, 36],
+ [50, 36, 84, 180]], ctype))
+ assert_array_equal(np.cumprod(a2, axis=-1),
+ np.array([[1, 2, 6, 24],
+ [5, 30, 210, 1890],
+ [10, 30, 120, 600]], ctype))
+
+
+class TestDiff:
+
+ def test_basic(self):
+ x = [1, 4, 6, 7, 12]
+ out = np.array([3, 2, 1, 5])
+ out2 = np.array([-1, -1, 4])
+ out3 = np.array([0, 5])
+ assert_array_equal(diff(x), out)
+ assert_array_equal(diff(x, n=2), out2)
+ assert_array_equal(diff(x, n=3), out3)
+
+ x = [1.1, 2.2, 3.0, -0.2, -0.1]
+ out = np.array([1.1, 0.8, -3.2, 0.1])
+ assert_almost_equal(diff(x), out)
+
+ x = [True, True, False, False]
+ out = np.array([False, True, False])
+ out2 = np.array([True, True])
+ assert_array_equal(diff(x), out)
+ assert_array_equal(diff(x, n=2), out2)
+
+ def test_axis(self):
+ x = np.zeros((10, 20, 30))
+ x[:, 1::2, :] = 1
+ exp = np.ones((10, 19, 30))
+ exp[:, 1::2, :] = -1
+ assert_array_equal(diff(x), np.zeros((10, 20, 29)))
+ assert_array_equal(diff(x, axis=-1), np.zeros((10, 20, 29)))
+ assert_array_equal(diff(x, axis=0), np.zeros((9, 20, 30)))
+ assert_array_equal(diff(x, axis=1), exp)
+ assert_array_equal(diff(x, axis=-2), exp)
+ assert_raises(np.AxisError, diff, x, axis=3)
+ assert_raises(np.AxisError, diff, x, axis=-4)
+
+ x = np.array(1.11111111111, np.float64)
+ assert_raises(ValueError, diff, x)
+
+ def test_nd(self):
+ x = 20 * rand(10, 20, 30)
+ out1 = x[:, :, 1:] - x[:, :, :-1]
+ out2 = out1[:, :, 1:] - out1[:, :, :-1]
+ out3 = x[1:, :, :] - x[:-1, :, :]
+ out4 = out3[1:, :, :] - out3[:-1, :, :]
+ assert_array_equal(diff(x), out1)
+ assert_array_equal(diff(x, n=2), out2)
+ assert_array_equal(diff(x, axis=0), out3)
+ assert_array_equal(diff(x, n=2, axis=0), out4)
+
+ def test_n(self):
+ x = list(range(3))
+ assert_raises(ValueError, diff, x, n=-1)
+ output = [diff(x, n=n) for n in range(1, 5)]
+ expected = [[1, 1], [0], [], []]
+ assert_(diff(x, n=0) is x)
+ for n, (expected, out) in enumerate(zip(expected, output), start=1):
+ assert_(type(out) is np.ndarray)
+ assert_array_equal(out, expected)
+ assert_equal(out.dtype, np.int_)
+ assert_equal(len(out), max(0, len(x) - n))
+
+ def test_times(self):
+ x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)
+ expected = [
+ np.array([1, 1], dtype='timedelta64[D]'),
+ np.array([0], dtype='timedelta64[D]'),
+ ]
+ expected.extend([np.array([], dtype='timedelta64[D]')] * 3)
+ for n, exp in enumerate(expected, start=1):
+ out = diff(x, n=n)
+ assert_array_equal(out, exp)
+ assert_equal(out.dtype, exp.dtype)
+
+ def test_subclass(self):
+ x = ma.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]],
+ mask=[[False, False], [True, False],
+ [False, True], [True, True], [False, False]])
+ out = diff(x)
+ assert_array_equal(out.data, [[1], [1], [1], [1], [1]])
+ assert_array_equal(out.mask, [[False], [True],
+ [True], [True], [False]])
+ assert_(type(out) is type(x))
+
+ out3 = diff(x, n=3)
+ assert_array_equal(out3.data, [[], [], [], [], []])
+ assert_array_equal(out3.mask, [[], [], [], [], []])
+ assert_(type(out3) is type(x))
+
+ def test_prepend(self):
+ x = np.arange(5) + 1
+ assert_array_equal(diff(x, prepend=0), np.ones(5))
+ assert_array_equal(diff(x, prepend=[0]), np.ones(5))
+ assert_array_equal(np.cumsum(np.diff(x, prepend=0)), x)
+ assert_array_equal(diff(x, prepend=[-1, 0]), np.ones(6))
+
+ x = np.arange(4).reshape(2, 2)
+ result = np.diff(x, axis=1, prepend=0)
+ expected = [[0, 1], [2, 1]]
+ assert_array_equal(result, expected)
+ result = np.diff(x, axis=1, prepend=[[0], [0]])
+ assert_array_equal(result, expected)
+
+ result = np.diff(x, axis=0, prepend=0)
+ expected = [[0, 1], [2, 2]]
+ assert_array_equal(result, expected)
+ result = np.diff(x, axis=0, prepend=[[0, 0]])
+ assert_array_equal(result, expected)
+
+ assert_raises(ValueError, np.diff, x, prepend=np.zeros((3,3)))
+
+ assert_raises(np.AxisError, diff, x, prepend=0, axis=3)
+
+ def test_append(self):
+ x = np.arange(5)
+ result = diff(x, append=0)
+ expected = [1, 1, 1, 1, -4]
+ assert_array_equal(result, expected)
+ result = diff(x, append=[0])
+ assert_array_equal(result, expected)
+ result = diff(x, append=[0, 2])
+ expected = expected + [2]
+ assert_array_equal(result, expected)
+
+ x = np.arange(4).reshape(2, 2)
+ result = np.diff(x, axis=1, append=0)
+ expected = [[1, -1], [1, -3]]
+ assert_array_equal(result, expected)
+ result = np.diff(x, axis=1, append=[[0], [0]])
+ assert_array_equal(result, expected)
+
+ result = np.diff(x, axis=0, append=0)
+ expected = [[2, 2], [-2, -3]]
+ assert_array_equal(result, expected)
+ result = np.diff(x, axis=0, append=[[0, 0]])
+ assert_array_equal(result, expected)
+
+ assert_raises(ValueError, np.diff, x, append=np.zeros((3,3)))
+
+ assert_raises(np.AxisError, diff, x, append=0, axis=3)
+
+
+class TestDelete:
+
+ def setup_method(self):
+ self.a = np.arange(5)
+ self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2)
+
+ def _check_inverse_of_slicing(self, indices):
+ a_del = delete(self.a, indices)
+ nd_a_del = delete(self.nd_a, indices, axis=1)
+ msg = 'Delete failed for obj: %r' % indices
+ assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a,
+ err_msg=msg)
+ xor = setxor1d(nd_a_del[0,:, 0], self.nd_a[0, indices, 0])
+ assert_array_equal(xor, self.nd_a[0,:, 0], err_msg=msg)
+
+ def test_slices(self):
+ lims = [-6, -2, 0, 1, 2, 4, 5]
+ steps = [-3, -1, 1, 3]
+ for start in lims:
+ for stop in lims:
+ for step in steps:
+ s = slice(start, stop, step)
+ self._check_inverse_of_slicing(s)
+
+ def test_fancy(self):
+ self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]]))
+ with pytest.raises(IndexError):
+ delete(self.a, [100])
+ with pytest.raises(IndexError):
+ delete(self.a, [-100])
+
+ self._check_inverse_of_slicing([0, -1, 2, 2])
+
+ self._check_inverse_of_slicing([True, False, False, True, False])
+
+ # not legal, indexing with these would change the dimension
+ with pytest.raises(ValueError):
+ delete(self.a, True)
+ with pytest.raises(ValueError):
+ delete(self.a, False)
+
+ # not enough items
+ with pytest.raises(ValueError):
+ delete(self.a, [False]*4)
+
+ def test_single(self):
+ self._check_inverse_of_slicing(0)
+ self._check_inverse_of_slicing(-4)
+
+ def test_0d(self):
+ a = np.array(1)
+ with pytest.raises(np.AxisError):
+ delete(a, [], axis=0)
+ with pytest.raises(TypeError):
+ delete(a, [], axis="nonsense")
+
+ def test_subclass(self):
+ class SubClass(np.ndarray):
+ pass
+ a = self.a.view(SubClass)
+ assert_(isinstance(delete(a, 0), SubClass))
+ assert_(isinstance(delete(a, []), SubClass))
+ assert_(isinstance(delete(a, [0, 1]), SubClass))
+ assert_(isinstance(delete(a, slice(1, 2)), SubClass))
+ assert_(isinstance(delete(a, slice(1, -2)), SubClass))
+
+ def test_array_order_preserve(self):
+ # See gh-7113
+ k = np.arange(10).reshape(2, 5, order='F')
+ m = delete(k, slice(60, None), axis=1)
+
+ # 'k' is Fortran ordered, and 'm' should have the
+ # same ordering as 'k' and NOT become C ordered
+ assert_equal(m.flags.c_contiguous, k.flags.c_contiguous)
+ assert_equal(m.flags.f_contiguous, k.flags.f_contiguous)
+
+ def test_index_floats(self):
+ with pytest.raises(IndexError):
+ np.delete([0, 1, 2], np.array([1.0, 2.0]))
+ with pytest.raises(IndexError):
+ np.delete([0, 1, 2], np.array([], dtype=float))
+
+ @pytest.mark.parametrize("indexer", [np.array([1]), [1]])
+ def test_single_item_array(self, indexer):
+ a_del_int = delete(self.a, 1)
+ a_del = delete(self.a, indexer)
+ assert_equal(a_del_int, a_del)
+
+ nd_a_del_int = delete(self.nd_a, 1, axis=1)
+ nd_a_del = delete(self.nd_a, np.array([1]), axis=1)
+ assert_equal(nd_a_del_int, nd_a_del)
+
+ def test_single_item_array_non_int(self):
+ # Special handling for integer arrays must not affect non-integer ones.
+ # If `False` was cast to `0` it would delete the element:
+ res = delete(np.ones(1), np.array([False]))
+ assert_array_equal(res, np.ones(1))
+
+ # Test the more complicated (with axis) case from gh-21840
+ x = np.ones((3, 1))
+ false_mask = np.array([False], dtype=bool)
+ true_mask = np.array([True], dtype=bool)
+
+ res = delete(x, false_mask, axis=-1)
+ assert_array_equal(res, x)
+ res = delete(x, true_mask, axis=-1)
+ assert_array_equal(res, x[:, :0])
+
+ # Object or e.g. timedeltas should *not* be allowed
+ with pytest.raises(IndexError):
+ delete(np.ones(2), np.array([0], dtype=object))
+
+ with pytest.raises(IndexError):
+ # timedeltas are sometimes "integral, but clearly not allowed:
+ delete(np.ones(2), np.array([0], dtype="m8[ns]"))
+
+
+class TestGradient:
+
+ def test_basic(self):
+ v = [[1, 1], [3, 4]]
+ x = np.array(v)
+ dx = [np.array([[2., 3.], [2., 3.]]),
+ np.array([[0., 0.], [1., 1.]])]
+ assert_array_equal(gradient(x), dx)
+ assert_array_equal(gradient(v), dx)
+
+ def test_args(self):
+ dx = np.cumsum(np.ones(5))
+ dx_uneven = [1., 2., 5., 9., 11.]
+ f_2d = np.arange(25).reshape(5, 5)
+
+ # distances must be scalars or have size equal to gradient[axis]
+ gradient(np.arange(5), 3.)
+ gradient(np.arange(5), np.array(3.))
+ gradient(np.arange(5), dx)
+ # dy is set equal to dx because scalar
+ gradient(f_2d, 1.5)
+ gradient(f_2d, np.array(1.5))
+
+ gradient(f_2d, dx_uneven, dx_uneven)
+ # mix between even and uneven spaces and
+ # mix between scalar and vector
+ gradient(f_2d, dx, 2)
+
+ # 2D but axis specified
+ gradient(f_2d, dx, axis=1)
+
+ # 2d coordinate arguments are not yet allowed
+ assert_raises_regex(ValueError, '.*scalars or 1d',
+ gradient, f_2d, np.stack([dx]*2, axis=-1), 1)
+
+ def test_badargs(self):
+ f_2d = np.arange(25).reshape(5, 5)
+ x = np.cumsum(np.ones(5))
+
+ # wrong sizes
+ assert_raises(ValueError, gradient, f_2d, x, np.ones(2))
+ assert_raises(ValueError, gradient, f_2d, 1, np.ones(2))
+ assert_raises(ValueError, gradient, f_2d, np.ones(2), np.ones(2))
+ # wrong number of arguments
+ assert_raises(TypeError, gradient, f_2d, x)
+ assert_raises(TypeError, gradient, f_2d, x, axis=(0,1))
+ assert_raises(TypeError, gradient, f_2d, x, x, x)
+ assert_raises(TypeError, gradient, f_2d, 1, 1, 1)
+ assert_raises(TypeError, gradient, f_2d, x, x, axis=1)
+ assert_raises(TypeError, gradient, f_2d, 1, 1, axis=1)
+
+ def test_datetime64(self):
+ # Make sure gradient() can handle special types like datetime64
+ x = np.array(
+ ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12',
+ '1910-10-12', '1910-12-12', '1912-12-12'],
+ dtype='datetime64[D]')
+ dx = np.array(
+ [-5, -3, 0, 31, 61, 396, 731],
+ dtype='timedelta64[D]')
+ assert_array_equal(gradient(x), dx)
+ assert_(dx.dtype == np.dtype('timedelta64[D]'))
+
+ def test_masked(self):
+ # Make sure that gradient supports subclasses like masked arrays
+ x = np.ma.array([[1, 1], [3, 4]],
+ mask=[[False, False], [False, False]])
+ out = gradient(x)[0]
+ assert_equal(type(out), type(x))
+ # And make sure that the output and input don't have aliased mask
+ # arrays
+ assert_(x._mask is not out._mask)
+ # Also check that edge_order=2 doesn't alter the original mask
+ x2 = np.ma.arange(5)
+ x2[2] = np.ma.masked
+ np.gradient(x2, edge_order=2)
+ assert_array_equal(x2.mask, [False, False, True, False, False])
+
+ def test_second_order_accurate(self):
+ # Testing that the relative numerical error is less that 3% for
+ # this example problem. This corresponds to second order
+ # accurate finite differences for all interior and boundary
+ # points.
+ x = np.linspace(0, 1, 10)
+ dx = x[1] - x[0]
+ y = 2 * x ** 3 + 4 * x ** 2 + 2 * x
+ analytical = 6 * x ** 2 + 8 * x + 2
+ num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1)
+ assert_(np.all(num_error < 0.03) == True)
+
+ # test with unevenly spaced
+ np.random.seed(0)
+ x = np.sort(np.random.random(10))
+ y = 2 * x ** 3 + 4 * x ** 2 + 2 * x
+ analytical = 6 * x ** 2 + 8 * x + 2
+ num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1)
+ assert_(np.all(num_error < 0.03) == True)
+
+ def test_spacing(self):
+ f = np.array([0, 2., 3., 4., 5., 5.])
+ f = np.tile(f, (6,1)) + f.reshape(-1, 1)
+ x_uneven = np.array([0., 0.5, 1., 3., 5., 7.])
+ x_even = np.arange(6.)
+
+ fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1))
+ fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1))
+ fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1))
+ fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1))
+
+ # evenly spaced
+ for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]:
+ res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order)
+ res2 = gradient(f, x_even, x_even,
+ axis=(0,1), edge_order=edge_order)
+ res3 = gradient(f, x_even, x_even,
+ axis=None, edge_order=edge_order)
+ assert_array_equal(res1, res2)
+ assert_array_equal(res2, res3)
+ assert_almost_equal(res1[0], exp_res.T)
+ assert_almost_equal(res1[1], exp_res)
+
+ res1 = gradient(f, 1., axis=0, edge_order=edge_order)
+ res2 = gradient(f, x_even, axis=0, edge_order=edge_order)
+ assert_(res1.shape == res2.shape)
+ assert_almost_equal(res2, exp_res.T)
+
+ res1 = gradient(f, 1., axis=1, edge_order=edge_order)
+ res2 = gradient(f, x_even, axis=1, edge_order=edge_order)
+ assert_(res1.shape == res2.shape)
+ assert_array_equal(res2, exp_res)
+
+ # unevenly spaced
+ for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]:
+ res1 = gradient(f, x_uneven, x_uneven,
+ axis=(0,1), edge_order=edge_order)
+ res2 = gradient(f, x_uneven, x_uneven,
+ axis=None, edge_order=edge_order)
+ assert_array_equal(res1, res2)
+ assert_almost_equal(res1[0], exp_res.T)
+ assert_almost_equal(res1[1], exp_res)
+
+ res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order)
+ assert_almost_equal(res1, exp_res.T)
+
+ res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order)
+ assert_almost_equal(res1, exp_res)
+
+ # mixed
+ res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1)
+ res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1)
+ assert_array_equal(res1[0], res2[1])
+ assert_array_equal(res1[1], res2[0])
+ assert_almost_equal(res1[0], fdx_even_ord1.T)
+ assert_almost_equal(res1[1], fdx_uneven_ord1)
+
+ res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2)
+ res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2)
+ assert_array_equal(res1[0], res2[1])
+ assert_array_equal(res1[1], res2[0])
+ assert_almost_equal(res1[0], fdx_even_ord2.T)
+ assert_almost_equal(res1[1], fdx_uneven_ord2)
+
+ def test_specific_axes(self):
+ # Testing that gradient can work on a given axis only
+ v = [[1, 1], [3, 4]]
+ x = np.array(v)
+ dx = [np.array([[2., 3.], [2., 3.]]),
+ np.array([[0., 0.], [1., 1.]])]
+ assert_array_equal(gradient(x, axis=0), dx[0])
+ assert_array_equal(gradient(x, axis=1), dx[1])
+ assert_array_equal(gradient(x, axis=-1), dx[1])
+ assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]])
+
+ # test axis=None which means all axes
+ assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]])
+ # and is the same as no axis keyword given
+ assert_almost_equal(gradient(x, axis=None), gradient(x))
+
+ # test vararg order
+ assert_array_equal(gradient(x, 2, 3, axis=(1, 0)),
+ [dx[1]/2.0, dx[0]/3.0])
+ # test maximal number of varargs
+ assert_raises(TypeError, gradient, x, 1, 2, axis=1)
+
+ assert_raises(np.AxisError, gradient, x, axis=3)
+ assert_raises(np.AxisError, gradient, x, axis=-3)
+ # assert_raises(TypeError, gradient, x, axis=[1,])
+
+ def test_timedelta64(self):
+ # Make sure gradient() can handle special types like timedelta64
+ x = np.array(
+ [-5, -3, 10, 12, 61, 321, 300],
+ dtype='timedelta64[D]')
+ dx = np.array(
+ [2, 7, 7, 25, 154, 119, -21],
+ dtype='timedelta64[D]')
+ assert_array_equal(gradient(x), dx)
+ assert_(dx.dtype == np.dtype('timedelta64[D]'))
+
+ def test_inexact_dtypes(self):
+ for dt in [np.float16, np.float32, np.float64]:
+ # dtypes should not be promoted in a different way to what diff does
+ x = np.array([1, 2, 3], dtype=dt)
+ assert_equal(gradient(x).dtype, np.diff(x).dtype)
+
+ def test_values(self):
+ # needs at least 2 points for edge_order ==1
+ gradient(np.arange(2), edge_order=1)
+ # needs at least 3 points for edge_order ==1
+ gradient(np.arange(3), edge_order=2)
+
+ assert_raises(ValueError, gradient, np.arange(0), edge_order=1)
+ assert_raises(ValueError, gradient, np.arange(0), edge_order=2)
+ assert_raises(ValueError, gradient, np.arange(1), edge_order=1)
+ assert_raises(ValueError, gradient, np.arange(1), edge_order=2)
+ assert_raises(ValueError, gradient, np.arange(2), edge_order=2)
+
+ @pytest.mark.parametrize('f_dtype', [np.uint8, np.uint16,
+ np.uint32, np.uint64])
+ def test_f_decreasing_unsigned_int(self, f_dtype):
+ f = np.array([5, 4, 3, 2, 1], dtype=f_dtype)
+ g = gradient(f)
+ assert_array_equal(g, [-1]*len(f))
+
+ @pytest.mark.parametrize('f_dtype', [np.int8, np.int16,
+ np.int32, np.int64])
+ def test_f_signed_int_big_jump(self, f_dtype):
+ maxint = np.iinfo(f_dtype).max
+ x = np.array([1, 3])
+ f = np.array([-1, maxint], dtype=f_dtype)
+ dfdx = gradient(f, x)
+ assert_array_equal(dfdx, [(maxint + 1) // 2]*2)
+
+ @pytest.mark.parametrize('x_dtype', [np.uint8, np.uint16,
+ np.uint32, np.uint64])
+ def test_x_decreasing_unsigned(self, x_dtype):
+ x = np.array([3, 2, 1], dtype=x_dtype)
+ f = np.array([0, 2, 4])
+ dfdx = gradient(f, x)
+ assert_array_equal(dfdx, [-2]*len(x))
+
+ @pytest.mark.parametrize('x_dtype', [np.int8, np.int16,
+ np.int32, np.int64])
+ def test_x_signed_int_big_jump(self, x_dtype):
+ minint = np.iinfo(x_dtype).min
+ maxint = np.iinfo(x_dtype).max
+ x = np.array([-1, maxint], dtype=x_dtype)
+ f = np.array([minint // 2, 0])
+ dfdx = gradient(f, x)
+ assert_array_equal(dfdx, [0.5, 0.5])
+
+
+class TestAngle:
+
+ def test_basic(self):
+ x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2,
+ 1, 1j, -1, -1j, 1 - 3j, -1 + 3j]
+ y = angle(x)
+ yo = [
+ np.arctan(3.0 / 1.0),
+ np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0,
+ -np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)]
+ z = angle(x, deg=True)
+ zo = np.array(yo) * 180 / np.pi
+ assert_array_almost_equal(y, yo, 11)
+ assert_array_almost_equal(z, zo, 11)
+
+ def test_subclass(self):
+ x = np.ma.array([1 + 3j, 1, np.sqrt(2)/2 * (1 + 1j)])
+ x[1] = np.ma.masked
+ expected = np.ma.array([np.arctan(3.0 / 1.0), 0, np.arctan(1.0)])
+ expected[1] = np.ma.masked
+ actual = angle(x)
+ assert_equal(type(actual), type(expected))
+ assert_equal(actual.mask, expected.mask)
+ assert_equal(actual, expected)
+
+
+class TestTrimZeros:
+
+ a = np.array([0, 0, 1, 0, 2, 3, 4, 0])
+ b = a.astype(float)
+ c = a.astype(complex)
+ d = a.astype(object)
+
+ def values(self):
+ attr_names = ('a', 'b', 'c', 'd')
+ return (getattr(self, name) for name in attr_names)
+
+ def test_basic(self):
+ slc = np.s_[2:-1]
+ for arr in self.values():
+ res = trim_zeros(arr)
+ assert_array_equal(res, arr[slc])
+
+ def test_leading_skip(self):
+ slc = np.s_[:-1]
+ for arr in self.values():
+ res = trim_zeros(arr, trim='b')
+ assert_array_equal(res, arr[slc])
+
+ def test_trailing_skip(self):
+ slc = np.s_[2:]
+ for arr in self.values():
+ res = trim_zeros(arr, trim='F')
+ assert_array_equal(res, arr[slc])
+
+ def test_all_zero(self):
+ for _arr in self.values():
+ arr = np.zeros_like(_arr, dtype=_arr.dtype)
+
+ res1 = trim_zeros(arr, trim='B')
+ assert len(res1) == 0
+
+ res2 = trim_zeros(arr, trim='f')
+ assert len(res2) == 0
+
+ def test_size_zero(self):
+ arr = np.zeros(0)
+ res = trim_zeros(arr)
+ assert_array_equal(arr, res)
+
+ @pytest.mark.parametrize(
+ 'arr',
+ [np.array([0, 2**62, 0]),
+ np.array([0, 2**63, 0]),
+ np.array([0, 2**64, 0])]
+ )
+ def test_overflow(self, arr):
+ slc = np.s_[1:2]
+ res = trim_zeros(arr)
+ assert_array_equal(res, arr[slc])
+
+ def test_no_trim(self):
+ arr = np.array([None, 1, None])
+ res = trim_zeros(arr)
+ assert_array_equal(arr, res)
+
+ def test_list_to_list(self):
+ res = trim_zeros(self.a.tolist())
+ assert isinstance(res, list)
+
+
+class TestExtins:
+
+ def test_basic(self):
+ a = np.array([1, 3, 2, 1, 2, 3, 3])
+ b = extract(a > 1, a)
+ assert_array_equal(b, [3, 2, 2, 3, 3])
+
+ def test_place(self):
+ # Make sure that non-np.ndarray objects
+ # raise an error instead of doing nothing
+ assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1])
+
+ a = np.array([1, 4, 3, 2, 5, 8, 7])
+ place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6])
+ assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7])
+
+ place(a, np.zeros(7), [])
+ assert_array_equal(a, np.arange(1, 8))
+
+ place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9])
+ assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9])
+ assert_raises_regex(ValueError, "Cannot insert from an empty array",
+ lambda: place(a, [0, 0, 0, 0, 0, 1, 0], []))
+
+ # See Issue #6974
+ a = np.array(['12', '34'])
+ place(a, [0, 1], '9')
+ assert_array_equal(a, ['12', '9'])
+
+ def test_both(self):
+ a = rand(10)
+ mask = a > 0.5
+ ac = a.copy()
+ c = extract(mask, a)
+ place(a, mask, 0)
+ place(a, mask, c)
+ assert_array_equal(a, ac)
+
+
+# _foo1 and _foo2 are used in some tests in TestVectorize.
+
+def _foo1(x, y=1.0):
+ return y*math.floor(x)
+
+
+def _foo2(x, y=1.0, z=0.0):
+ return y*math.floor(x) + z
+
+
+class TestVectorize:
+
+ def test_simple(self):
+ def addsubtract(a, b):
+ if a > b:
+ return a - b
+ else:
+ return a + b
+
+ f = vectorize(addsubtract)
+ r = f([0, 3, 6, 9], [1, 3, 5, 7])
+ assert_array_equal(r, [1, 6, 1, 2])
+
+ def test_scalar(self):
+ def addsubtract(a, b):
+ if a > b:
+ return a - b
+ else:
+ return a + b
+
+ f = vectorize(addsubtract)
+ r = f([0, 3, 6, 9], 5)
+ assert_array_equal(r, [5, 8, 1, 4])
+
+ def test_large(self):
+ x = np.linspace(-3, 2, 10000)
+ f = vectorize(lambda x: x)
+ y = f(x)
+ assert_array_equal(y, x)
+
+ def test_ufunc(self):
+ f = vectorize(math.cos)
+ args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi])
+ r1 = f(args)
+ r2 = np.cos(args)
+ assert_array_almost_equal(r1, r2)
+
+ def test_keywords(self):
+
+ def foo(a, b=1):
+ return a + b
+
+ f = vectorize(foo)
+ args = np.array([1, 2, 3])
+ r1 = f(args)
+ r2 = np.array([2, 3, 4])
+ assert_array_equal(r1, r2)
+ r1 = f(args, 2)
+ r2 = np.array([3, 4, 5])
+ assert_array_equal(r1, r2)
+
+ def test_keywords_with_otypes_order1(self):
+ # gh-1620: The second call of f would crash with
+ # `ValueError: invalid number of arguments`.
+ f = vectorize(_foo1, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(np.arange(3.0), 1.0)
+ r2 = f(np.arange(3.0))
+ assert_array_equal(r1, r2)
+
+ def test_keywords_with_otypes_order2(self):
+ # gh-1620: The second call of f would crash with
+ # `ValueError: non-broadcastable output operand with shape ()
+ # doesn't match the broadcast shape (3,)`.
+ f = vectorize(_foo1, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(np.arange(3.0))
+ r2 = f(np.arange(3.0), 1.0)
+ assert_array_equal(r1, r2)
+
+ def test_keywords_with_otypes_order3(self):
+ # gh-1620: The third call of f would crash with
+ # `ValueError: invalid number of arguments`.
+ f = vectorize(_foo1, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(np.arange(3.0))
+ r2 = f(np.arange(3.0), y=1.0)
+ r3 = f(np.arange(3.0))
+ assert_array_equal(r1, r2)
+ assert_array_equal(r1, r3)
+
+ def test_keywords_with_otypes_several_kwd_args1(self):
+ # gh-1620 Make sure different uses of keyword arguments
+ # don't break the vectorized function.
+ f = vectorize(_foo2, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(10.4, z=100)
+ r2 = f(10.4, y=-1)
+ r3 = f(10.4)
+ assert_equal(r1, _foo2(10.4, z=100))
+ assert_equal(r2, _foo2(10.4, y=-1))
+ assert_equal(r3, _foo2(10.4))
+
+ def test_keywords_with_otypes_several_kwd_args2(self):
+ # gh-1620 Make sure different uses of keyword arguments
+ # don't break the vectorized function.
+ f = vectorize(_foo2, otypes=[float])
+ # We're testing the caching of ufuncs by vectorize, so the order
+ # of these function calls is an important part of the test.
+ r1 = f(z=100, x=10.4, y=-1)
+ r2 = f(1, 2, 3)
+ assert_equal(r1, _foo2(z=100, x=10.4, y=-1))
+ assert_equal(r2, _foo2(1, 2, 3))
+
+ def test_keywords_no_func_code(self):
+ # This needs to test a function that has keywords but
+ # no func_code attribute, since otherwise vectorize will
+ # inspect the func_code.
+ import random
+ try:
+ vectorize(random.randrange) # Should succeed
+ except Exception:
+ raise AssertionError()
+
+ def test_keywords2_ticket_2100(self):
+ # Test kwarg support: enhancement ticket 2100
+
+ def foo(a, b=1):
+ return a + b
+
+ f = vectorize(foo)
+ args = np.array([1, 2, 3])
+ r1 = f(a=args)
+ r2 = np.array([2, 3, 4])
+ assert_array_equal(r1, r2)
+ r1 = f(b=1, a=args)
+ assert_array_equal(r1, r2)
+ r1 = f(args, b=2)
+ r2 = np.array([3, 4, 5])
+ assert_array_equal(r1, r2)
+
+ def test_keywords3_ticket_2100(self):
+ # Test excluded with mixed positional and kwargs: ticket 2100
+ def mypolyval(x, p):
+ _p = list(p)
+ res = _p.pop(0)
+ while _p:
+ res = res * x + _p.pop(0)
+ return res
+
+ vpolyval = np.vectorize(mypolyval, excluded=['p', 1])
+ ans = [3, 6]
+ assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3]))
+ assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3]))
+ assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3]))
+
+ def test_keywords4_ticket_2100(self):
+ # Test vectorizing function with no positional args.
+ @vectorize
+ def f(**kw):
+ res = 1.0
+ for _k in kw:
+ res *= kw[_k]
+ return res
+
+ assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8])
+
+ def test_keywords5_ticket_2100(self):
+ # Test vectorizing function with no kwargs args.
+ @vectorize
+ def f(*v):
+ return np.prod(v)
+
+ assert_array_equal(f([1, 2], [3, 4]), [3, 8])
+
+ def test_coverage1_ticket_2100(self):
+ def foo():
+ return 1
+
+ f = vectorize(foo)
+ assert_array_equal(f(), 1)
+
+ def test_assigning_docstring(self):
+ def foo(x):
+ """Original documentation"""
+ return x
+
+ f = vectorize(foo)
+ assert_equal(f.__doc__, foo.__doc__)
+
+ doc = "Provided documentation"
+ f = vectorize(foo, doc=doc)
+ assert_equal(f.__doc__, doc)
+
+ def test_UnboundMethod_ticket_1156(self):
+ # Regression test for issue 1156
+ class Foo:
+ b = 2
+
+ def bar(self, a):
+ return a ** self.b
+
+ assert_array_equal(vectorize(Foo().bar)(np.arange(9)),
+ np.arange(9) ** 2)
+ assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)),
+ np.arange(9) ** 2)
+
+ def test_execution_order_ticket_1487(self):
+ # Regression test for dependence on execution order: issue 1487
+ f1 = vectorize(lambda x: x)
+ res1a = f1(np.arange(3))
+ res1b = f1(np.arange(0.1, 3))
+ f2 = vectorize(lambda x: x)
+ res2b = f2(np.arange(0.1, 3))
+ res2a = f2(np.arange(3))
+ assert_equal(res1a, res2a)
+ assert_equal(res1b, res2b)
+
+ def test_string_ticket_1892(self):
+ # Test vectorization over strings: issue 1892.
+ f = np.vectorize(lambda x: x)
+ s = '0123456789' * 10
+ assert_equal(s, f(s))
+
+ def test_cache(self):
+ # Ensure that vectorized func called exactly once per argument.
+ _calls = [0]
+
+ @vectorize
+ def f(x):
+ _calls[0] += 1
+ return x ** 2
+
+ f.cache = True
+ x = np.arange(5)
+ assert_array_equal(f(x), x * x)
+ assert_equal(_calls[0], len(x))
+
+ def test_otypes(self):
+ f = np.vectorize(lambda x: x)
+ f.otypes = 'i'
+ x = np.arange(5)
+ assert_array_equal(f(x), x)
+
+ def test_parse_gufunc_signature(self):
+ assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()]))
+ assert_equal(nfb._parse_gufunc_signature('(x,y)->()'),
+ ([('x', 'y')], [()]))
+ assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'),
+ ([('x',), ('y',)], [()]))
+ assert_equal(nfb._parse_gufunc_signature('(x)->(y)'),
+ ([('x',)], [('y',)]))
+ assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'),
+ ([('x',)], [('y',), ()]))
+ assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'),
+ ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))
+
+ # Tests to check if whitespaces are ignored
+ assert_equal(nfb._parse_gufunc_signature('(x )->()'), ([('x',)], [()]))
+ assert_equal(nfb._parse_gufunc_signature('( x , y )->( )'),
+ ([('x', 'y')], [()]))
+ assert_equal(nfb._parse_gufunc_signature('(x),( y) ->()'),
+ ([('x',), ('y',)], [()]))
+ assert_equal(nfb._parse_gufunc_signature('( x)-> (y ) '),
+ ([('x',)], [('y',)]))
+ assert_equal(nfb._parse_gufunc_signature(' (x)->( y),( )'),
+ ([('x',)], [('y',), ()]))
+ assert_equal(nfb._parse_gufunc_signature(
+ '( ), ( a, b,c ) ,( d) -> (d , e)'),
+ ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))
+
+ with assert_raises(ValueError):
+ nfb._parse_gufunc_signature('(x)(y)->()')
+ with assert_raises(ValueError):
+ nfb._parse_gufunc_signature('(x),(y)->')
+ with assert_raises(ValueError):
+ nfb._parse_gufunc_signature('((x))->(x)')
+
+ def test_signature_simple(self):
+ def addsubtract(a, b):
+ if a > b:
+ return a - b
+ else:
+ return a + b
+
+ f = vectorize(addsubtract, signature='(),()->()')
+ r = f([0, 3, 6, 9], [1, 3, 5, 7])
+ assert_array_equal(r, [1, 6, 1, 2])
+
+ def test_signature_mean_last(self):
+ def mean(a):
+ return a.mean()
+
+ f = vectorize(mean, signature='(n)->()')
+ r = f([[1, 3], [2, 4]])
+ assert_array_equal(r, [2, 3])
+
+ def test_signature_center(self):
+ def center(a):
+ return a - a.mean()
+
+ f = vectorize(center, signature='(n)->(n)')
+ r = f([[1, 3], [2, 4]])
+ assert_array_equal(r, [[-1, 1], [-1, 1]])
+
+ def test_signature_two_outputs(self):
+ f = vectorize(lambda x: (x, x), signature='()->(),()')
+ r = f([1, 2, 3])
+ assert_(isinstance(r, tuple) and len(r) == 2)
+ assert_array_equal(r[0], [1, 2, 3])
+ assert_array_equal(r[1], [1, 2, 3])
+
+ def test_signature_outer(self):
+ f = vectorize(np.outer, signature='(a),(b)->(a,b)')
+ r = f([1, 2], [1, 2, 3])
+ assert_array_equal(r, [[1, 2, 3], [2, 4, 6]])
+
+ r = f([[[1, 2]]], [1, 2, 3])
+ assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]])
+
+ r = f([[1, 0], [2, 0]], [1, 2, 3])
+ assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]],
+ [[2, 4, 6], [0, 0, 0]]])
+
+ r = f([1, 2], [[1, 2, 3], [0, 0, 0]])
+ assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]],
+ [[0, 0, 0], [0, 0, 0]]])
+
+ def test_signature_computed_size(self):
+ f = vectorize(lambda x: x[:-1], signature='(n)->(m)')
+ r = f([1, 2, 3])
+ assert_array_equal(r, [1, 2])
+
+ r = f([[1, 2, 3], [2, 3, 4]])
+ assert_array_equal(r, [[1, 2], [2, 3]])
+
+ def test_signature_excluded(self):
+
+ def foo(a, b=1):
+ return a + b
+
+ f = vectorize(foo, signature='()->()', excluded={'b'})
+ assert_array_equal(f([1, 2, 3]), [2, 3, 4])
+ assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3])
+
+ def test_signature_otypes(self):
+ f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64'])
+ r = f([1, 2, 3])
+ assert_equal(r.dtype, np.dtype('float64'))
+ assert_array_equal(r, [1, 2, 3])
+
+ def test_signature_invalid_inputs(self):
+ f = vectorize(operator.add, signature='(n),(n)->(n)')
+ with assert_raises_regex(TypeError, 'wrong number of positional'):
+ f([1, 2])
+ with assert_raises_regex(
+ ValueError, 'does not have enough dimensions'):
+ f(1, 2)
+ with assert_raises_regex(
+ ValueError, 'inconsistent size for core dimension'):
+ f([1, 2], [1, 2, 3])
+
+ f = vectorize(operator.add, signature='()->()')
+ with assert_raises_regex(TypeError, 'wrong number of positional'):
+ f(1, 2)
+
+ def test_signature_invalid_outputs(self):
+
+ f = vectorize(lambda x: x[:-1], signature='(n)->(n)')
+ with assert_raises_regex(
+ ValueError, 'inconsistent size for core dimension'):
+ f([1, 2, 3])
+
+ f = vectorize(lambda x: x, signature='()->(),()')
+ with assert_raises_regex(ValueError, 'wrong number of outputs'):
+ f(1)
+
+ f = vectorize(lambda x: (x, x), signature='()->()')
+ with assert_raises_regex(ValueError, 'wrong number of outputs'):
+ f([1, 2])
+
+ def test_size_zero_output(self):
+ # see issue 5868
+ f = np.vectorize(lambda x: x)
+ x = np.zeros([0, 5], dtype=int)
+ with assert_raises_regex(ValueError, 'otypes'):
+ f(x)
+
+ f.otypes = 'i'
+ assert_array_equal(f(x), x)
+
+ f = np.vectorize(lambda x: x, signature='()->()')
+ with assert_raises_regex(ValueError, 'otypes'):
+ f(x)
+
+ f = np.vectorize(lambda x: x, signature='()->()', otypes='i')
+ assert_array_equal(f(x), x)
+
+ f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i')
+ assert_array_equal(f(x), x)
+
+ f = np.vectorize(lambda x: x, signature='(n)->(n)')
+ assert_array_equal(f(x.T), x.T)
+
+ f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i')
+ with assert_raises_regex(ValueError, 'new output dimensions'):
+ f(x)
+
+ def test_subclasses(self):
+ class subclass(np.ndarray):
+ pass
+
+ m = np.array([[1., 0., 0.],
+ [0., 0., 1.],
+ [0., 1., 0.]]).view(subclass)
+ v = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]).view(subclass)
+ # generalized (gufunc)
+ matvec = np.vectorize(np.matmul, signature='(m,m),(m)->(m)')
+ r = matvec(m, v)
+ assert_equal(type(r), subclass)
+ assert_equal(r, [[1., 3., 2.], [4., 6., 5.], [7., 9., 8.]])
+
+ # element-wise (ufunc)
+ mult = np.vectorize(lambda x, y: x*y)
+ r = mult(m, v)
+ assert_equal(type(r), subclass)
+ assert_equal(r, m * v)
+
+
+class TestLeaks:
+ class A:
+ iters = 20
+
+ def bound(self, *args):
+ return 0
+
+ @staticmethod
+ def unbound(*args):
+ return 0
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ @pytest.mark.parametrize('name, incr', [
+ ('bound', A.iters),
+ ('unbound', 0),
+ ])
+ def test_frompyfunc_leaks(self, name, incr):
+ # exposed in gh-11867 as np.vectorized, but the problem stems from
+ # frompyfunc.
+ # class.attribute = np.frompyfunc(<method>) creates a
+ # reference cycle if <method> is a bound class method. It requires a
+ # gc collection cycle to break the cycle (on CPython 3)
+ import gc
+ A_func = getattr(self.A, name)
+ gc.disable()
+ try:
+ refcount = sys.getrefcount(A_func)
+ for i in range(self.A.iters):
+ a = self.A()
+ a.f = np.frompyfunc(getattr(a, name), 1, 1)
+ out = a.f(np.arange(10))
+ a = None
+ # A.func is part of a reference cycle if incr is non-zero
+ assert_equal(sys.getrefcount(A_func), refcount + incr)
+ for i in range(5):
+ gc.collect()
+ assert_equal(sys.getrefcount(A_func), refcount)
+ finally:
+ gc.enable()
+
+
+class TestDigitize:
+
+ def test_forward(self):
+ x = np.arange(-6, 5)
+ bins = np.arange(-5, 5)
+ assert_array_equal(digitize(x, bins), np.arange(11))
+
+ def test_reverse(self):
+ x = np.arange(5, -6, -1)
+ bins = np.arange(5, -5, -1)
+ assert_array_equal(digitize(x, bins), np.arange(11))
+
+ def test_random(self):
+ x = rand(10)
+ bin = np.linspace(x.min(), x.max(), 10)
+ assert_(np.all(digitize(x, bin) != 0))
+
+ def test_right_basic(self):
+ x = [1, 5, 4, 10, 8, 11, 0]
+ bins = [1, 5, 10]
+ default_answer = [1, 2, 1, 3, 2, 3, 0]
+ assert_array_equal(digitize(x, bins), default_answer)
+ right_answer = [0, 1, 1, 2, 2, 3, 0]
+ assert_array_equal(digitize(x, bins, True), right_answer)
+
+ def test_right_open(self):
+ x = np.arange(-6, 5)
+ bins = np.arange(-6, 4)
+ assert_array_equal(digitize(x, bins, True), np.arange(11))
+
+ def test_right_open_reverse(self):
+ x = np.arange(5, -6, -1)
+ bins = np.arange(4, -6, -1)
+ assert_array_equal(digitize(x, bins, True), np.arange(11))
+
+ def test_right_open_random(self):
+ x = rand(10)
+ bins = np.linspace(x.min(), x.max(), 10)
+ assert_(np.all(digitize(x, bins, True) != 10))
+
+ def test_monotonic(self):
+ x = [-1, 0, 1, 2]
+ bins = [0, 0, 1]
+ assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3])
+ assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3])
+ bins = [1, 1, 0]
+ assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0])
+ assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0])
+ bins = [1, 1, 1, 1]
+ assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4])
+ assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4])
+ bins = [0, 0, 1, 0]
+ assert_raises(ValueError, digitize, x, bins)
+ bins = [1, 1, 0, 1]
+ assert_raises(ValueError, digitize, x, bins)
+
+ def test_casting_error(self):
+ x = [1, 2, 3 + 1.j]
+ bins = [1, 2, 3]
+ assert_raises(TypeError, digitize, x, bins)
+ x, bins = bins, x
+ assert_raises(TypeError, digitize, x, bins)
+
+ def test_return_type(self):
+ # Functions returning indices should always return base ndarrays
+ class A(np.ndarray):
+ pass
+ a = np.arange(5).view(A)
+ b = np.arange(1, 3).view(A)
+ assert_(not isinstance(digitize(b, a, False), A))
+ assert_(not isinstance(digitize(b, a, True), A))
+
+ def test_large_integers_increasing(self):
+ # gh-11022
+ x = 2**54 # loses precision in a float
+ assert_equal(np.digitize(x, [x - 1, x + 1]), 1)
+
+ @pytest.mark.xfail(
+ reason="gh-11022: np.core.multiarray._monoticity loses precision")
+ def test_large_integers_decreasing(self):
+ # gh-11022
+ x = 2**54 # loses precision in a float
+ assert_equal(np.digitize(x, [x + 1, x - 1]), 1)
+
+
+class TestUnwrap:
+
+ def test_simple(self):
+ # check that unwrap removes jumps greater that 2*pi
+ assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1])
+ # check that unwrap maintains continuity
+ assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
+
+ def test_period(self):
+ # check that unwrap removes jumps greater that 255
+ assert_array_equal(unwrap([1, 1 + 256], period=255), [1, 2])
+ # check that unwrap maintains continuity
+ assert_(np.all(diff(unwrap(rand(10) * 1000, period=255)) < 255))
+ # check simple case
+ simple_seq = np.array([0, 75, 150, 225, 300])
+ wrap_seq = np.mod(simple_seq, 255)
+ assert_array_equal(unwrap(wrap_seq, period=255), simple_seq)
+ # check custom discont value
+ uneven_seq = np.array([0, 75, 150, 225, 300, 430])
+ wrap_uneven = np.mod(uneven_seq, 250)
+ no_discont = unwrap(wrap_uneven, period=250)
+ assert_array_equal(no_discont, [0, 75, 150, 225, 300, 180])
+ sm_discont = unwrap(wrap_uneven, period=250, discont=140)
+ assert_array_equal(sm_discont, [0, 75, 150, 225, 300, 430])
+ assert sm_discont.dtype == wrap_uneven.dtype
+
+
+@pytest.mark.parametrize(
+ "dtype", "O" + np.typecodes["AllInteger"] + np.typecodes["Float"]
+)
+@pytest.mark.parametrize("M", [0, 1, 10])
+class TestFilterwindows:
+
+ def test_hanning(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = hanning(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
+ # check symmetry
+ assert_equal(w, flipud(w))
+
+ # check known value
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
+
+ def test_hamming(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = hamming(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
+ # check symmetry
+ assert_equal(w, flipud(w))
+
+ # check known value
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
+
+ def test_bartlett(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = bartlett(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
+ # check symmetry
+ assert_equal(w, flipud(w))
+
+ # check known value
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
+
+ def test_blackman(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = blackman(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
+ # check symmetry
+ assert_equal(w, flipud(w))
+
+ # check known value
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
+
+ def test_kaiser(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = kaiser(scalar, 0)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
+ # check symmetry
+ assert_equal(w, flipud(w))
+
+ # check known value
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 10, 15)
+
+
+class TestTrapz:
+
+ def test_simple(self):
+ x = np.arange(-10, 10, .1)
+ r = trapz(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1)
+ # check integral of normal equals 1
+ assert_almost_equal(r, 1, 7)
+
+ def test_ndim(self):
+ x = np.linspace(0, 1, 3)
+ y = np.linspace(0, 2, 8)
+ z = np.linspace(0, 3, 13)
+
+ wx = np.ones_like(x) * (x[1] - x[0])
+ wx[0] /= 2
+ wx[-1] /= 2
+ wy = np.ones_like(y) * (y[1] - y[0])
+ wy[0] /= 2
+ wy[-1] /= 2
+ wz = np.ones_like(z) * (z[1] - z[0])
+ wz[0] /= 2
+ wz[-1] /= 2
+
+ q = x[:, None, None] + y[None,:, None] + z[None, None,:]
+
+ qx = (q * wx[:, None, None]).sum(axis=0)
+ qy = (q * wy[None, :, None]).sum(axis=1)
+ qz = (q * wz[None, None, :]).sum(axis=2)
+
+ # n-d `x`
+ r = trapz(q, x=x[:, None, None], axis=0)
+ assert_almost_equal(r, qx)
+ r = trapz(q, x=y[None,:, None], axis=1)
+ assert_almost_equal(r, qy)
+ r = trapz(q, x=z[None, None,:], axis=2)
+ assert_almost_equal(r, qz)
+
+ # 1-d `x`
+ r = trapz(q, x=x, axis=0)
+ assert_almost_equal(r, qx)
+ r = trapz(q, x=y, axis=1)
+ assert_almost_equal(r, qy)
+ r = trapz(q, x=z, axis=2)
+ assert_almost_equal(r, qz)
+
+ def test_masked(self):
+ # Testing that masked arrays behave as if the function is 0 where
+ # masked
+ x = np.arange(5)
+ y = x * x
+ mask = x == 2
+ ym = np.ma.array(y, mask=mask)
+ r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16))
+ assert_almost_equal(trapz(ym, x), r)
+
+ xm = np.ma.array(x, mask=mask)
+ assert_almost_equal(trapz(ym, xm), r)
+
+ xm = np.ma.array(x, mask=mask)
+ assert_almost_equal(trapz(y, xm), r)
+
+
+class TestSinc:
+
+ def test_simple(self):
+ assert_(sinc(0) == 1)
+ w = sinc(np.linspace(-1, 1, 100))
+ # check symmetry
+ assert_array_almost_equal(w, flipud(w), 7)
+
+ def test_array_like(self):
+ x = [0, 0.5]
+ y1 = sinc(np.array(x))
+ y2 = sinc(list(x))
+ y3 = sinc(tuple(x))
+ assert_array_equal(y1, y2)
+ assert_array_equal(y1, y3)
+
+
+class TestUnique:
+
+ def test_simple(self):
+ x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0])
+ assert_(np.all(unique(x) == [0, 1, 2, 3, 4]))
+ assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1]))
+ x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham']
+ assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget']))
+ x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j])
+ assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
+
+
+class TestCheckFinite:
+
+ def test_simple(self):
+ a = [1, 2, 3]
+ b = [1, 2, np.inf]
+ c = [1, 2, np.nan]
+ np.lib.asarray_chkfinite(a)
+ assert_raises(ValueError, np.lib.asarray_chkfinite, b)
+ assert_raises(ValueError, np.lib.asarray_chkfinite, c)
+
+ def test_dtype_order(self):
+ # Regression test for missing dtype and order arguments
+ a = [1, 2, 3]
+ a = np.lib.asarray_chkfinite(a, order='F', dtype=np.float64)
+ assert_(a.dtype == np.float64)
+
+
+class TestCorrCoef:
+ A = np.array(
+ [[0.15391142, 0.18045767, 0.14197213],
+ [0.70461506, 0.96474128, 0.27906989],
+ [0.9297531, 0.32296769, 0.19267156]])
+ B = np.array(
+ [[0.10377691, 0.5417086, 0.49807457],
+ [0.82872117, 0.77801674, 0.39226705],
+ [0.9314666, 0.66800209, 0.03538394]])
+ res1 = np.array(
+ [[1., 0.9379533, -0.04931983],
+ [0.9379533, 1., 0.30007991],
+ [-0.04931983, 0.30007991, 1.]])
+ res2 = np.array(
+ [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523],
+ [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386],
+ [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601],
+ [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113],
+ [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823],
+ [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]])
+
+ def test_non_array(self):
+ assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]),
+ [[1., -1.], [-1., 1.]])
+
+ def test_simple(self):
+ tgt1 = corrcoef(self.A)
+ assert_almost_equal(tgt1, self.res1)
+ assert_(np.all(np.abs(tgt1) <= 1.0))
+
+ tgt2 = corrcoef(self.A, self.B)
+ assert_almost_equal(tgt2, self.res2)
+ assert_(np.all(np.abs(tgt2) <= 1.0))
+
+ def test_ddof(self):
+ # ddof raises DeprecationWarning
+ with suppress_warnings() as sup:
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1)
+ sup.filter(DeprecationWarning)
+ # ddof has no or negligible effect on the function
+ assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1)
+ assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2)
+ assert_almost_equal(corrcoef(self.A, ddof=3), self.res1)
+ assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2)
+
+ def test_bias(self):
+ # bias raises DeprecationWarning
+ with suppress_warnings() as sup:
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0)
+ assert_warns(DeprecationWarning, corrcoef, self.A, bias=0)
+ sup.filter(DeprecationWarning)
+ # bias has no or negligible effect on the function
+ assert_almost_equal(corrcoef(self.A, bias=1), self.res1)
+
+ def test_complex(self):
+ x = np.array([[1, 2, 3], [1j, 2j, 3j]])
+ res = corrcoef(x)
+ tgt = np.array([[1., -1.j], [1.j, 1.]])
+ assert_allclose(res, tgt)
+ assert_(np.all(np.abs(res) <= 1.0))
+
+ def test_xy(self):
+ x = np.array([[1, 2, 3]])
+ y = np.array([[1j, 2j, 3j]])
+ assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]]))
+
+ def test_empty(self):
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter('always', RuntimeWarning)
+ assert_array_equal(corrcoef(np.array([])), np.nan)
+ assert_array_equal(corrcoef(np.array([]).reshape(0, 2)),
+ np.array([]).reshape(0, 0))
+ assert_array_equal(corrcoef(np.array([]).reshape(2, 0)),
+ np.array([[np.nan, np.nan], [np.nan, np.nan]]))
+
+ def test_extreme(self):
+ x = [[1e-100, 1e100], [1e100, 1e-100]]
+ with np.errstate(all='raise'):
+ c = corrcoef(x)
+ assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]]))
+ assert_(np.all(np.abs(c) <= 1.0))
+
+ @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble])
+ def test_corrcoef_dtype(self, test_type):
+ cast_A = self.A.astype(test_type)
+ res = corrcoef(cast_A, dtype=test_type)
+ assert test_type == res.dtype
+
+
+class TestCov:
+ x1 = np.array([[0, 2], [1, 1], [2, 0]]).T
+ res1 = np.array([[1., -1.], [-1., 1.]])
+ x2 = np.array([0.0, 1.0, 2.0], ndmin=2)
+ frequencies = np.array([1, 4, 1])
+ x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T
+ res2 = np.array([[0.4, -0.4], [-0.4, 0.4]])
+ unit_frequencies = np.ones(3, dtype=np.int_)
+ weights = np.array([1.0, 4.0, 1.0])
+ res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]])
+ unit_weights = np.ones(3)
+ x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964])
+
+ def test_basic(self):
+ assert_allclose(cov(self.x1), self.res1)
+
+ def test_complex(self):
+ x = np.array([[1, 2, 3], [1j, 2j, 3j]])
+ res = np.array([[1., -1.j], [1.j, 1.]])
+ assert_allclose(cov(x), res)
+ assert_allclose(cov(x, aweights=np.ones(3)), res)
+
+ def test_xy(self):
+ x = np.array([[1, 2, 3]])
+ y = np.array([[1j, 2j, 3j]])
+ assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]]))
+
+ def test_empty(self):
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter('always', RuntimeWarning)
+ assert_array_equal(cov(np.array([])), np.nan)
+ assert_array_equal(cov(np.array([]).reshape(0, 2)),
+ np.array([]).reshape(0, 0))
+ assert_array_equal(cov(np.array([]).reshape(2, 0)),
+ np.array([[np.nan, np.nan], [np.nan, np.nan]]))
+
+ def test_wrong_ddof(self):
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter('always', RuntimeWarning)
+ assert_array_equal(cov(self.x1, ddof=5),
+ np.array([[np.inf, -np.inf],
+ [-np.inf, np.inf]]))
+
+ def test_1D_rowvar(self):
+ assert_allclose(cov(self.x3), cov(self.x3, rowvar=False))
+ y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501])
+ assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=False))
+
+ def test_1D_variance(self):
+ assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1))
+
+ def test_fweights(self):
+ assert_allclose(cov(self.x2, fweights=self.frequencies),
+ cov(self.x2_repeats))
+ assert_allclose(cov(self.x1, fweights=self.frequencies),
+ self.res2)
+ assert_allclose(cov(self.x1, fweights=self.unit_frequencies),
+ self.res1)
+ nonint = self.frequencies + 0.5
+ assert_raises(TypeError, cov, self.x1, fweights=nonint)
+ f = np.ones((2, 3), dtype=np.int_)
+ assert_raises(RuntimeError, cov, self.x1, fweights=f)
+ f = np.ones(2, dtype=np.int_)
+ assert_raises(RuntimeError, cov, self.x1, fweights=f)
+ f = -1 * np.ones(3, dtype=np.int_)
+ assert_raises(ValueError, cov, self.x1, fweights=f)
+
+ def test_aweights(self):
+ assert_allclose(cov(self.x1, aweights=self.weights), self.res3)
+ assert_allclose(cov(self.x1, aweights=3.0 * self.weights),
+ cov(self.x1, aweights=self.weights))
+ assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1)
+ w = np.ones((2, 3))
+ assert_raises(RuntimeError, cov, self.x1, aweights=w)
+ w = np.ones(2)
+ assert_raises(RuntimeError, cov, self.x1, aweights=w)
+ w = -1.0 * np.ones(3)
+ assert_raises(ValueError, cov, self.x1, aweights=w)
+
+ def test_unit_fweights_and_aweights(self):
+ assert_allclose(cov(self.x2, fweights=self.frequencies,
+ aweights=self.unit_weights),
+ cov(self.x2_repeats))
+ assert_allclose(cov(self.x1, fweights=self.frequencies,
+ aweights=self.unit_weights),
+ self.res2)
+ assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
+ aweights=self.unit_weights),
+ self.res1)
+ assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
+ aweights=self.weights),
+ self.res3)
+ assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
+ aweights=3.0 * self.weights),
+ cov(self.x1, aweights=self.weights))
+ assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
+ aweights=self.unit_weights),
+ self.res1)
+
+ @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble])
+ def test_cov_dtype(self, test_type):
+ cast_x1 = self.x1.astype(test_type)
+ res = cov(cast_x1, dtype=test_type)
+ assert test_type == res.dtype
+
+
+class Test_I0:
+
+ def test_simple(self):
+ assert_almost_equal(
+ i0(0.5),
+ np.array(1.0634833707413234))
+
+ # need at least one test above 8, as the implementation is piecewise
+ A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0])
+ expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049, 2815.71662847])
+ assert_almost_equal(i0(A), expected)
+ assert_almost_equal(i0(-A), expected)
+
+ B = np.array([[0.827002, 0.99959078],
+ [0.89694769, 0.39298162],
+ [0.37954418, 0.05206293],
+ [0.36465447, 0.72446427],
+ [0.48164949, 0.50324519]])
+ assert_almost_equal(
+ i0(B),
+ np.array([[1.17843223, 1.26583466],
+ [1.21147086, 1.03898290],
+ [1.03633899, 1.00067775],
+ [1.03352052, 1.13557954],
+ [1.05884290, 1.06432317]]))
+ # Regression test for gh-11205
+ i0_0 = np.i0([0.])
+ assert_equal(i0_0.shape, (1,))
+ assert_array_equal(np.i0([0.]), np.array([1.]))
+
+ def test_non_array(self):
+ a = np.arange(4)
+
+ class array_like:
+ __array_interface__ = a.__array_interface__
+
+ def __array_wrap__(self, arr):
+ return self
+
+ # E.g. pandas series survive ufunc calls through array-wrap:
+ assert isinstance(np.abs(array_like()), array_like)
+ exp = np.i0(a)
+ res = np.i0(array_like())
+
+ assert_array_equal(exp, res)
+
+ def test_complex(self):
+ a = np.array([0, 1 + 2j])
+ with pytest.raises(TypeError, match="i0 not supported for complex values"):
+ res = i0(a)
+
+
+class TestKaiser:
+
+ def test_simple(self):
+ assert_(np.isfinite(kaiser(1, 1.0)))
+ assert_almost_equal(kaiser(0, 1.0),
+ np.array([]))
+ assert_almost_equal(kaiser(2, 1.0),
+ np.array([0.78984831, 0.78984831]))
+ assert_almost_equal(kaiser(5, 1.0),
+ np.array([0.78984831, 0.94503323, 1.,
+ 0.94503323, 0.78984831]))
+ assert_almost_equal(kaiser(5, 1.56789),
+ np.array([0.58285404, 0.88409679, 1.,
+ 0.88409679, 0.58285404]))
+
+ def test_int_beta(self):
+ kaiser(3, 4)
+
+
+class TestMsort:
+
+ def test_simple(self):
+ A = np.array([[0.44567325, 0.79115165, 0.54900530],
+ [0.36844147, 0.37325583, 0.96098397],
+ [0.64864341, 0.52929049, 0.39172155]])
+ with pytest.warns(DeprecationWarning, match="msort is deprecated"):
+ assert_almost_equal(
+ msort(A),
+ np.array([[0.36844147, 0.37325583, 0.39172155],
+ [0.44567325, 0.52929049, 0.54900530],
+ [0.64864341, 0.79115165, 0.96098397]]))
+
+
+class TestMeshgrid:
+
+ def test_simple(self):
+ [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])
+ assert_array_equal(X, np.array([[1, 2, 3],
+ [1, 2, 3],
+ [1, 2, 3],
+ [1, 2, 3]]))
+ assert_array_equal(Y, np.array([[4, 4, 4],
+ [5, 5, 5],
+ [6, 6, 6],
+ [7, 7, 7]]))
+
+ def test_single_input(self):
+ [X] = meshgrid([1, 2, 3, 4])
+ assert_array_equal(X, np.array([1, 2, 3, 4]))
+
+ def test_no_input(self):
+ args = []
+ assert_array_equal([], meshgrid(*args))
+ assert_array_equal([], meshgrid(*args, copy=False))
+
+ def test_indexing(self):
+ x = [1, 2, 3]
+ y = [4, 5, 6, 7]
+ [X, Y] = meshgrid(x, y, indexing='ij')
+ assert_array_equal(X, np.array([[1, 1, 1, 1],
+ [2, 2, 2, 2],
+ [3, 3, 3, 3]]))
+ assert_array_equal(Y, np.array([[4, 5, 6, 7],
+ [4, 5, 6, 7],
+ [4, 5, 6, 7]]))
+
+ # Test expected shapes:
+ z = [8, 9]
+ assert_(meshgrid(x, y)[0].shape == (4, 3))
+ assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4))
+ assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2))
+ assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2))
+
+ assert_raises(ValueError, meshgrid, x, y, indexing='notvalid')
+
+ def test_sparse(self):
+ [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True)
+ assert_array_equal(X, np.array([[1, 2, 3]]))
+ assert_array_equal(Y, np.array([[4], [5], [6], [7]]))
+
+ def test_invalid_arguments(self):
+ # Test that meshgrid complains about invalid arguments
+ # Regression test for issue #4755:
+ # https://github.com/numpy/numpy/issues/4755
+ assert_raises(TypeError, meshgrid,
+ [1, 2, 3], [4, 5, 6, 7], indices='ij')
+
+ def test_return_type(self):
+ # Test for appropriate dtype in returned arrays.
+ # Regression test for issue #5297
+ # https://github.com/numpy/numpy/issues/5297
+ x = np.arange(0, 10, dtype=np.float32)
+ y = np.arange(10, 20, dtype=np.float64)
+
+ X, Y = np.meshgrid(x,y)
+
+ assert_(X.dtype == x.dtype)
+ assert_(Y.dtype == y.dtype)
+
+ # copy
+ X, Y = np.meshgrid(x,y, copy=True)
+
+ assert_(X.dtype == x.dtype)
+ assert_(Y.dtype == y.dtype)
+
+ # sparse
+ X, Y = np.meshgrid(x,y, sparse=True)
+
+ assert_(X.dtype == x.dtype)
+ assert_(Y.dtype == y.dtype)
+
+ def test_writeback(self):
+ # Issue 8561
+ X = np.array([1.1, 2.2])
+ Y = np.array([3.3, 4.4])
+ x, y = np.meshgrid(X, Y, sparse=False, copy=True)
+
+ x[0, :] = 0
+ assert_equal(x[0, :], 0)
+ assert_equal(x[1, :], X)
+
+ def test_nd_shape(self):
+ a, b, c, d, e = np.meshgrid(*([0] * i for i in range(1, 6)))
+ expected_shape = (2, 1, 3, 4, 5)
+ assert_equal(a.shape, expected_shape)
+ assert_equal(b.shape, expected_shape)
+ assert_equal(c.shape, expected_shape)
+ assert_equal(d.shape, expected_shape)
+ assert_equal(e.shape, expected_shape)
+
+ def test_nd_values(self):
+ a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5])
+ assert_equal(a, [[[0, 0, 0]], [[0, 0, 0]]])
+ assert_equal(b, [[[1, 1, 1]], [[2, 2, 2]]])
+ assert_equal(c, [[[3, 4, 5]], [[3, 4, 5]]])
+
+ def test_nd_indexing(self):
+ a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5], indexing='ij')
+ assert_equal(a, [[[0, 0, 0], [0, 0, 0]]])
+ assert_equal(b, [[[1, 1, 1], [2, 2, 2]]])
+ assert_equal(c, [[[3, 4, 5], [3, 4, 5]]])
+
+
+class TestPiecewise:
+
+ def test_simple(self):
+ # Condition is single bool list
+ x = piecewise([0, 0], [True, False], [1])
+ assert_array_equal(x, [1, 0])
+
+ # List of conditions: single bool list
+ x = piecewise([0, 0], [[True, False]], [1])
+ assert_array_equal(x, [1, 0])
+
+ # Conditions is single bool array
+ x = piecewise([0, 0], np.array([True, False]), [1])
+ assert_array_equal(x, [1, 0])
+
+ # Condition is single int array
+ x = piecewise([0, 0], np.array([1, 0]), [1])
+ assert_array_equal(x, [1, 0])
+
+ # List of conditions: int array
+ x = piecewise([0, 0], [np.array([1, 0])], [1])
+ assert_array_equal(x, [1, 0])
+
+ x = piecewise([0, 0], [[False, True]], [lambda x:-1])
+ assert_array_equal(x, [0, -1])
+
+ assert_raises_regex(ValueError, '1 or 2 functions are expected',
+ piecewise, [0, 0], [[False, True]], [])
+ assert_raises_regex(ValueError, '1 or 2 functions are expected',
+ piecewise, [0, 0], [[False, True]], [1, 2, 3])
+
+ def test_two_conditions(self):
+ x = piecewise([1, 2], [[True, False], [False, True]], [3, 4])
+ assert_array_equal(x, [3, 4])
+
+ def test_scalar_domains_three_conditions(self):
+ x = piecewise(3, [True, False, False], [4, 2, 0])
+ assert_equal(x, 4)
+
+ def test_default(self):
+ # No value specified for x[1], should be 0
+ x = piecewise([1, 2], [True, False], [2])
+ assert_array_equal(x, [2, 0])
+
+ # Should set x[1] to 3
+ x = piecewise([1, 2], [True, False], [2, 3])
+ assert_array_equal(x, [2, 3])
+
+ def test_0d(self):
+ x = np.array(3)
+ y = piecewise(x, x > 3, [4, 0])
+ assert_(y.ndim == 0)
+ assert_(y == 0)
+
+ x = 5
+ y = piecewise(x, [True, False], [1, 0])
+ assert_(y.ndim == 0)
+ assert_(y == 1)
+
+ # With 3 ranges (It was failing, before)
+ y = piecewise(x, [False, False, True], [1, 2, 3])
+ assert_array_equal(y, 3)
+
+ def test_0d_comparison(self):
+ x = 3
+ y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed.
+ assert_equal(y, 4)
+
+ # With 3 ranges (It was failing, before)
+ x = 4
+ y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3])
+ assert_array_equal(y, 2)
+
+ assert_raises_regex(ValueError, '2 or 3 functions are expected',
+ piecewise, x, [x <= 3, x > 3], [1])
+ assert_raises_regex(ValueError, '2 or 3 functions are expected',
+ piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1])
+
+ def test_0d_0d_condition(self):
+ x = np.array(3)
+ c = np.array(x > 3)
+ y = piecewise(x, [c], [1, 2])
+ assert_equal(y, 2)
+
+ def test_multidimensional_extrafunc(self):
+ x = np.array([[-2.5, -1.5, -0.5],
+ [0.5, 1.5, 2.5]])
+ y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3])
+ assert_array_equal(y, np.array([[-1., -1., -1.],
+ [3., 3., 1.]]))
+
+ def test_subclasses(self):
+ class subclass(np.ndarray):
+ pass
+ x = np.arange(5.).view(subclass)
+ r = piecewise(x, [x<2., x>=4], [-1., 1., 0.])
+ assert_equal(type(r), subclass)
+ assert_equal(r, [-1., -1., 0., 0., 1.])
+
+
+class TestBincount:
+
+ def test_simple(self):
+ y = np.bincount(np.arange(4))
+ assert_array_equal(y, np.ones(4))
+
+ def test_simple2(self):
+ y = np.bincount(np.array([1, 5, 2, 4, 1]))
+ assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1]))
+
+ def test_simple_weight(self):
+ x = np.arange(4)
+ w = np.array([0.2, 0.3, 0.5, 0.1])
+ y = np.bincount(x, w)
+ assert_array_equal(y, w)
+
+ def test_simple_weight2(self):
+ x = np.array([1, 2, 4, 5, 2])
+ w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])
+ y = np.bincount(x, w)
+ assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1]))
+
+ def test_with_minlength(self):
+ x = np.array([0, 1, 0, 1, 1])
+ y = np.bincount(x, minlength=3)
+ assert_array_equal(y, np.array([2, 3, 0]))
+ x = []
+ y = np.bincount(x, minlength=0)
+ assert_array_equal(y, np.array([]))
+
+ def test_with_minlength_smaller_than_maxvalue(self):
+ x = np.array([0, 1, 1, 2, 2, 3, 3])
+ y = np.bincount(x, minlength=2)
+ assert_array_equal(y, np.array([1, 2, 2, 2]))
+ y = np.bincount(x, minlength=0)
+ assert_array_equal(y, np.array([1, 2, 2, 2]))
+
+ def test_with_minlength_and_weights(self):
+ x = np.array([1, 2, 4, 5, 2])
+ w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])
+ y = np.bincount(x, w, 8)
+ assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0]))
+
+ def test_empty(self):
+ x = np.array([], dtype=int)
+ y = np.bincount(x)
+ assert_array_equal(x, y)
+
+ def test_empty_with_minlength(self):
+ x = np.array([], dtype=int)
+ y = np.bincount(x, minlength=5)
+ assert_array_equal(y, np.zeros(5, dtype=int))
+
+ def test_with_incorrect_minlength(self):
+ x = np.array([], dtype=int)
+ assert_raises_regex(TypeError,
+ "'str' object cannot be interpreted",
+ lambda: np.bincount(x, minlength="foobar"))
+ assert_raises_regex(ValueError,
+ "must not be negative",
+ lambda: np.bincount(x, minlength=-1))
+
+ x = np.arange(5)
+ assert_raises_regex(TypeError,
+ "'str' object cannot be interpreted",
+ lambda: np.bincount(x, minlength="foobar"))
+ assert_raises_regex(ValueError,
+ "must not be negative",
+ lambda: np.bincount(x, minlength=-1))
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_dtype_reference_leaks(self):
+ # gh-6805
+ intp_refcount = sys.getrefcount(np.dtype(np.intp))
+ double_refcount = sys.getrefcount(np.dtype(np.double))
+
+ for j in range(10):
+ np.bincount([1, 2, 3])
+ assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)
+ assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
+
+ for j in range(10):
+ np.bincount([1, 2, 3], [4, 5, 6])
+ assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)
+ assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
+
+ @pytest.mark.parametrize("vals", [[[2, 2]], 2])
+ def test_error_not_1d(self, vals):
+ # Test that values has to be 1-D (both as array and nested list)
+ vals_arr = np.asarray(vals)
+ with assert_raises(ValueError):
+ np.bincount(vals_arr)
+ with assert_raises(ValueError):
+ np.bincount(vals)
+
+
+class TestInterp:
+
+ def test_exceptions(self):
+ assert_raises(ValueError, interp, 0, [], [])
+ assert_raises(ValueError, interp, 0, [0], [1, 2])
+ assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0)
+ assert_raises(ValueError, interp, 0, [], [], period=360)
+ assert_raises(ValueError, interp, 0, [0], [1, 2], period=360)
+
+ def test_basic(self):
+ x = np.linspace(0, 1, 5)
+ y = np.linspace(0, 1, 5)
+ x0 = np.linspace(0, 1, 50)
+ assert_almost_equal(np.interp(x0, x, y), x0)
+
+ def test_right_left_behavior(self):
+ # Needs range of sizes to test different code paths.
+ # size ==1 is special cased, 1 < size < 5 is linear search, and
+ # size >= 5 goes through local search and possibly binary search.
+ for size in range(1, 10):
+ xp = np.arange(size, dtype=np.double)
+ yp = np.ones(size, dtype=np.double)
+ incpts = np.array([-1, 0, size - 1, size], dtype=np.double)
+ decpts = incpts[::-1]
+
+ incres = interp(incpts, xp, yp)
+ decres = interp(decpts, xp, yp)
+ inctgt = np.array([1, 1, 1, 1], dtype=float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ incres = interp(incpts, xp, yp, left=0)
+ decres = interp(decpts, xp, yp, left=0)
+ inctgt = np.array([0, 1, 1, 1], dtype=float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ incres = interp(incpts, xp, yp, right=2)
+ decres = interp(decpts, xp, yp, right=2)
+ inctgt = np.array([1, 1, 1, 2], dtype=float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ incres = interp(incpts, xp, yp, left=0, right=2)
+ decres = interp(decpts, xp, yp, left=0, right=2)
+ inctgt = np.array([0, 1, 1, 2], dtype=float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ def test_scalar_interpolation_point(self):
+ x = np.linspace(0, 1, 5)
+ y = np.linspace(0, 1, 5)
+ x0 = 0
+ assert_almost_equal(np.interp(x0, x, y), x0)
+ x0 = .3
+ assert_almost_equal(np.interp(x0, x, y), x0)
+ x0 = np.float32(.3)
+ assert_almost_equal(np.interp(x0, x, y), x0)
+ x0 = np.float64(.3)
+ assert_almost_equal(np.interp(x0, x, y), x0)
+ x0 = np.nan
+ assert_almost_equal(np.interp(x0, x, y), x0)
+
+ def test_non_finite_behavior_exact_x(self):
+ x = [1, 2, 2.5, 3, 4]
+ xp = [1, 2, 3, 4]
+ fp = [1, 2, np.inf, 4]
+ assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4])
+ fp = [1, 2, np.nan, 4]
+ assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4])
+
+ @pytest.fixture(params=[
+ lambda x: np.float_(x),
+ lambda x: _make_complex(x, 0),
+ lambda x: _make_complex(0, x),
+ lambda x: _make_complex(x, np.multiply(x, -2))
+ ], ids=[
+ 'real',
+ 'complex-real',
+ 'complex-imag',
+ 'complex-both'
+ ])
+ def sc(self, request):
+ """ scale function used by the below tests """
+ return request.param
+
+ def test_non_finite_any_nan(self, sc):
+ """ test that nans are propagated """
+ assert_equal(np.interp(0.5, [np.nan, 1], sc([ 0, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, np.nan], sc([ 0, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, 1], sc([np.nan, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, 1], sc([ 0, np.nan])), sc(np.nan))
+
+ def test_non_finite_inf(self, sc):
+ """ Test that interp between opposite infs gives nan """
+ assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan))
+
+ # unless the y values are equal
+ assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10))
+
+ def test_non_finite_half_inf_xf(self, sc):
+ """ Test that interp where both axes have a bound at inf gives nan """
+ assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan))
+ assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan))
+
+ def test_non_finite_half_inf_x(self, sc):
+ """ Test interp where the x axis has a bound at inf """
+ assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10))
+ assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0))
+ assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0))
+
+ def test_non_finite_half_inf_f(self, sc):
+ """ Test interp where the f axis has a bound at inf """
+ assert_equal(np.interp(0.5, [0, 1], sc([ 0, -np.inf])), sc(-np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([ 0, +np.inf])), sc(+np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, 10])), sc(-np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, 10])), sc(+np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, -np.inf])), sc(-np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, +np.inf])), sc(+np.inf))
+
+ def test_complex_interp(self):
+ # test complex interpolation
+ x = np.linspace(0, 1, 5)
+ y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j
+ x0 = 0.3
+ y0 = x0 + (1+x0)*1.0j
+ assert_almost_equal(np.interp(x0, x, y), y0)
+ # test complex left and right
+ x0 = -1
+ left = 2 + 3.0j
+ assert_almost_equal(np.interp(x0, x, y, left=left), left)
+ x0 = 2.0
+ right = 2 + 3.0j
+ assert_almost_equal(np.interp(x0, x, y, right=right), right)
+ # test complex non finite
+ x = [1, 2, 2.5, 3, 4]
+ xp = [1, 2, 3, 4]
+ fp = [1, 2+1j, np.inf, 4]
+ y = [1, 2+1j, np.inf+0.5j, np.inf, 4]
+ assert_almost_equal(np.interp(x, xp, fp), y)
+ # test complex periodic
+ x = [-180, -170, -185, 185, -10, -5, 0, 365]
+ xp = [190, -190, 350, -350]
+ fp = [5+1.0j, 10+2j, 3+3j, 4+4j]
+ y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j,
+ 3.5+3.5j, 3.75+3.75j]
+ assert_almost_equal(np.interp(x, xp, fp, period=360), y)
+
+ def test_zero_dimensional_interpolation_point(self):
+ x = np.linspace(0, 1, 5)
+ y = np.linspace(0, 1, 5)
+ x0 = np.array(.3)
+ assert_almost_equal(np.interp(x0, x, y), x0)
+
+ xp = np.array([0, 2, 4])
+ fp = np.array([1, -1, 1])
+
+ actual = np.interp(np.array(1), xp, fp)
+ assert_equal(actual, 0)
+ assert_(isinstance(actual, np.float64))
+
+ actual = np.interp(np.array(4.5), xp, fp, period=4)
+ assert_equal(actual, 0.5)
+ assert_(isinstance(actual, np.float64))
+
+ def test_if_len_x_is_small(self):
+ xp = np.arange(0, 10, 0.0001)
+ fp = np.sin(xp)
+ assert_almost_equal(np.interp(np.pi, xp, fp), 0.0)
+
+ def test_period(self):
+ x = [-180, -170, -185, 185, -10, -5, 0, 365]
+ xp = [190, -190, 350, -350]
+ fp = [5, 10, 3, 4]
+ y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]
+ assert_almost_equal(np.interp(x, xp, fp, period=360), y)
+ x = np.array(x, order='F').reshape(2, -1)
+ y = np.array(y, order='C').reshape(2, -1)
+ assert_almost_equal(np.interp(x, xp, fp, period=360), y)
+
+
+class TestPercentile:
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.percentile(x, 0), 0.)
+ assert_equal(np.percentile(x, 100), 3.5)
+ assert_equal(np.percentile(x, 50), 1.75)
+ x[1] = np.nan
+ assert_equal(np.percentile(x, 0), np.nan)
+ assert_equal(np.percentile(x, 0, method='nearest'), np.nan)
+
+ def test_fraction(self):
+ x = [Fraction(i, 2) for i in range(8)]
+
+ p = np.percentile(x, Fraction(0))
+ assert_equal(p, Fraction(0))
+ assert_equal(type(p), Fraction)
+
+ p = np.percentile(x, Fraction(100))
+ assert_equal(p, Fraction(7, 2))
+ assert_equal(type(p), Fraction)
+
+ p = np.percentile(x, Fraction(50))
+ assert_equal(p, Fraction(7, 4))
+ assert_equal(type(p), Fraction)
+
+ p = np.percentile(x, [Fraction(50)])
+ assert_equal(p, np.array([Fraction(7, 4)]))
+ assert_equal(type(p), np.ndarray)
+
+ def test_api(self):
+ d = np.ones(5)
+ np.percentile(d, 5, None, None, False)
+ np.percentile(d, 5, None, None, False, 'linear')
+ o = np.ones((1,))
+ np.percentile(d, 5, None, o, False, 'linear')
+
+ def test_2D(self):
+ x = np.array([[1, 1, 1],
+ [1, 1, 1],
+ [4, 4, 3],
+ [1, 1, 1],
+ [1, 1, 1]])
+ assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1])
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_linear_nan_1D(self, dtype):
+ # METHOD 1 of H&F
+ arr = np.asarray([15.0, np.NAN, 35.0, 40.0, 50.0], dtype=dtype)
+ res = np.percentile(
+ arr,
+ 40.0,
+ method="linear")
+ np.testing.assert_equal(res, np.NAN)
+ np.testing.assert_equal(res.dtype, arr.dtype)
+
+ H_F_TYPE_CODES = [(int_type, np.float64)
+ for int_type in np.typecodes["AllInteger"]
+ ] + [(np.float16, np.float16),
+ (np.float32, np.float32),
+ (np.float64, np.float64),
+ (np.longdouble, np.longdouble),
+ (np.complex64, np.complex64),
+ (np.complex128, np.complex128),
+ (np.clongdouble, np.clongdouble),
+ (np.dtype("O"), np.float64)]
+
+ @pytest.mark.parametrize(["input_dtype", "expected_dtype"], H_F_TYPE_CODES)
+ @pytest.mark.parametrize(["method", "expected"],
+ [("inverted_cdf", 20),
+ ("averaged_inverted_cdf", 27.5),
+ ("closest_observation", 20),
+ ("interpolated_inverted_cdf", 20),
+ ("hazen", 27.5),
+ ("weibull", 26),
+ ("linear", 29),
+ ("median_unbiased", 27),
+ ("normal_unbiased", 27.125),
+ ])
+ def test_linear_interpolation(self,
+ method,
+ expected,
+ input_dtype,
+ expected_dtype):
+ expected_dtype = np.dtype(expected_dtype)
+ if np._get_promotion_state() == "legacy":
+ expected_dtype = np.promote_types(expected_dtype, np.float64)
+
+ arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0], dtype=input_dtype)
+ actual = np.percentile(arr, 40.0, method=method)
+
+ np.testing.assert_almost_equal(
+ actual, expected_dtype.type(expected), 14)
+
+ if method in ["inverted_cdf", "closest_observation"]:
+ if input_dtype == "O":
+ np.testing.assert_equal(np.asarray(actual).dtype, np.float64)
+ else:
+ np.testing.assert_equal(np.asarray(actual).dtype,
+ np.dtype(input_dtype))
+ else:
+ np.testing.assert_equal(np.asarray(actual).dtype,
+ np.dtype(expected_dtype))
+
+ TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O"
+
+ @pytest.mark.parametrize("dtype", TYPE_CODES)
+ def test_lower_higher(self, dtype):
+ assert_equal(np.percentile(np.arange(10, dtype=dtype), 50,
+ method='lower'), 4)
+ assert_equal(np.percentile(np.arange(10, dtype=dtype), 50,
+ method='higher'), 5)
+
+ @pytest.mark.parametrize("dtype", TYPE_CODES)
+ def test_midpoint(self, dtype):
+ assert_equal(np.percentile(np.arange(10, dtype=dtype), 51,
+ method='midpoint'), 4.5)
+ assert_equal(np.percentile(np.arange(9, dtype=dtype) + 1, 50,
+ method='midpoint'), 5)
+ assert_equal(np.percentile(np.arange(11, dtype=dtype), 51,
+ method='midpoint'), 5.5)
+ assert_equal(np.percentile(np.arange(11, dtype=dtype), 50,
+ method='midpoint'), 5)
+
+ @pytest.mark.parametrize("dtype", TYPE_CODES)
+ def test_nearest(self, dtype):
+ assert_equal(np.percentile(np.arange(10, dtype=dtype), 51,
+ method='nearest'), 5)
+ assert_equal(np.percentile(np.arange(10, dtype=dtype), 49,
+ method='nearest'), 4)
+
+ def test_linear_interpolation_extrapolation(self):
+ arr = np.random.rand(5)
+
+ actual = np.percentile(arr, 100)
+ np.testing.assert_equal(actual, arr.max())
+
+ actual = np.percentile(arr, 0)
+ np.testing.assert_equal(actual, arr.min())
+
+ def test_sequence(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75])
+
+ def test_axis(self):
+ x = np.arange(12).reshape(3, 4)
+
+ assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0])
+
+ r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]
+ assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0)
+
+ r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]]
+ assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T)
+
+ # ensure qth axis is always first as with np.array(old_percentile(..))
+ x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
+ assert_equal(np.percentile(x, (25, 50)).shape, (2,))
+ assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,))
+ assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6))
+ assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6))
+ assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6))
+ assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5))
+ assert_equal(
+ np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6))
+ assert_equal(np.percentile(x, (25, 50),
+ method="higher").shape, (2,))
+ assert_equal(np.percentile(x, (25, 50, 75),
+ method="higher").shape, (3,))
+ assert_equal(np.percentile(x, (25, 50), axis=0,
+ method="higher").shape, (2, 4, 5, 6))
+ assert_equal(np.percentile(x, (25, 50), axis=1,
+ method="higher").shape, (2, 3, 5, 6))
+ assert_equal(np.percentile(x, (25, 50), axis=2,
+ method="higher").shape, (2, 3, 4, 6))
+ assert_equal(np.percentile(x, (25, 50), axis=3,
+ method="higher").shape, (2, 3, 4, 5))
+ assert_equal(np.percentile(x, (25, 50, 75), axis=1,
+ method="higher").shape, (3, 3, 5, 6))
+
+ def test_scalar_q(self):
+ # test for no empty dimensions for compatibility with old percentile
+ x = np.arange(12).reshape(3, 4)
+ assert_equal(np.percentile(x, 50), 5.5)
+ assert_(np.isscalar(np.percentile(x, 50)))
+ r0 = np.array([4., 5., 6., 7.])
+ assert_equal(np.percentile(x, 50, axis=0), r0)
+ assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape)
+ r1 = np.array([1.5, 5.5, 9.5])
+ assert_almost_equal(np.percentile(x, 50, axis=1), r1)
+ assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape)
+
+ out = np.empty(1)
+ assert_equal(np.percentile(x, 50, out=out), 5.5)
+ assert_equal(out, 5.5)
+ out = np.empty(4)
+ assert_equal(np.percentile(x, 50, axis=0, out=out), r0)
+ assert_equal(out, r0)
+ out = np.empty(3)
+ assert_equal(np.percentile(x, 50, axis=1, out=out), r1)
+ assert_equal(out, r1)
+
+ # test for no empty dimensions for compatibility with old percentile
+ x = np.arange(12).reshape(3, 4)
+ assert_equal(np.percentile(x, 50, method='lower'), 5.)
+ assert_(np.isscalar(np.percentile(x, 50)))
+ r0 = np.array([4., 5., 6., 7.])
+ c0 = np.percentile(x, 50, method='lower', axis=0)
+ assert_equal(c0, r0)
+ assert_equal(c0.shape, r0.shape)
+ r1 = np.array([1., 5., 9.])
+ c1 = np.percentile(x, 50, method='lower', axis=1)
+ assert_almost_equal(c1, r1)
+ assert_equal(c1.shape, r1.shape)
+
+ out = np.empty((), dtype=x.dtype)
+ c = np.percentile(x, 50, method='lower', out=out)
+ assert_equal(c, 5)
+ assert_equal(out, 5)
+ out = np.empty(4, dtype=x.dtype)
+ c = np.percentile(x, 50, method='lower', axis=0, out=out)
+ assert_equal(c, r0)
+ assert_equal(out, r0)
+ out = np.empty(3, dtype=x.dtype)
+ c = np.percentile(x, 50, method='lower', axis=1, out=out)
+ assert_equal(c, r1)
+ assert_equal(out, r1)
+
+ def test_exception(self):
+ assert_raises(ValueError, np.percentile, [1, 2], 56,
+ method='foobar')
+ assert_raises(ValueError, np.percentile, [1], 101)
+ assert_raises(ValueError, np.percentile, [1], -1)
+ assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101])
+ assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1])
+
+ def test_percentile_list(self):
+ assert_equal(np.percentile([1, 2, 3], 0), 1)
+
+ def test_percentile_out(self):
+ x = np.array([1, 2, 3])
+ y = np.zeros((3,))
+ p = (1, 2, 3)
+ np.percentile(x, p, out=y)
+ assert_equal(np.percentile(x, p), y)
+
+ x = np.array([[1, 2, 3],
+ [4, 5, 6]])
+
+ y = np.zeros((3, 3))
+ np.percentile(x, p, axis=0, out=y)
+ assert_equal(np.percentile(x, p, axis=0), y)
+
+ y = np.zeros((3, 2))
+ np.percentile(x, p, axis=1, out=y)
+ assert_equal(np.percentile(x, p, axis=1), y)
+
+ x = np.arange(12).reshape(3, 4)
+ # q.dim > 1, float
+ r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]])
+ out = np.empty((2, 4))
+ assert_equal(np.percentile(x, (25, 50), axis=0, out=out), r0)
+ assert_equal(out, r0)
+ r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]])
+ out = np.empty((2, 3))
+ assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1)
+ assert_equal(out, r1)
+
+ # q.dim > 1, int
+ r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]])
+ out = np.empty((2, 4), dtype=x.dtype)
+ c = np.percentile(x, (25, 50), method='lower', axis=0, out=out)
+ assert_equal(c, r0)
+ assert_equal(out, r0)
+ r1 = np.array([[0, 4, 8], [1, 5, 9]])
+ out = np.empty((2, 3), dtype=x.dtype)
+ c = np.percentile(x, (25, 50), method='lower', axis=1, out=out)
+ assert_equal(c, r1)
+ assert_equal(out, r1)
+
+ def test_percentile_empty_dim(self):
+ # empty dims are preserved
+ d = np.arange(11 * 2).reshape(11, 1, 2, 1)
+ assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1))
+ assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1))
+ assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1))
+ assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2))
+ assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2))
+ assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1))
+ assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1))
+ assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1))
+
+ assert_array_equal(np.percentile(d, 50, axis=2,
+ method='midpoint').shape,
+ (11, 1, 1))
+ assert_array_equal(np.percentile(d, 50, axis=-2,
+ method='midpoint').shape,
+ (11, 1, 1))
+
+ assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape,
+ (2, 1, 2, 1))
+ assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape,
+ (2, 11, 2, 1))
+ assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape,
+ (2, 11, 1, 1))
+ assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape,
+ (2, 11, 1, 2))
+
+ def test_percentile_no_overwrite(self):
+ a = np.array([2, 3, 4, 1])
+ np.percentile(a, [50], overwrite_input=False)
+ assert_equal(a, np.array([2, 3, 4, 1]))
+
+ a = np.array([2, 3, 4, 1])
+ np.percentile(a, [50])
+ assert_equal(a, np.array([2, 3, 4, 1]))
+
+ def test_no_p_overwrite(self):
+ p = np.linspace(0., 100., num=5)
+ np.percentile(np.arange(100.), p, method="midpoint")
+ assert_array_equal(p, np.linspace(0., 100., num=5))
+ p = np.linspace(0., 100., num=5).tolist()
+ np.percentile(np.arange(100.), p, method="midpoint")
+ assert_array_equal(p, np.linspace(0., 100., num=5).tolist())
+
+ def test_percentile_overwrite(self):
+ a = np.array([2, 3, 4, 1])
+ b = np.percentile(a, [50], overwrite_input=True)
+ assert_equal(b, np.array([2.5]))
+
+ b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True)
+ assert_equal(b, np.array([2.5]))
+
+ def test_extended_axis(self):
+ o = np.random.normal(size=(71, 23))
+ x = np.dstack([o] * 10)
+ assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30))
+ x = np.moveaxis(x, -1, 0)
+ assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30))
+ x = x.swapaxes(0, 1).copy()
+ assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30))
+ x = x.swapaxes(0, 1).copy()
+
+ assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)),
+ np.percentile(x, [25, 60], axis=None))
+ assert_equal(np.percentile(x, [25, 60], axis=(0,)),
+ np.percentile(x, [25, 60], axis=0))
+
+ d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11))
+ np.random.shuffle(d.ravel())
+ assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0],
+ np.percentile(d[:,:,:, 0].flatten(), 25))
+ assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1],
+ np.percentile(d[:,:, 1,:].flatten(), [10, 90]))
+ assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2],
+ np.percentile(d[:,:, 2,:].flatten(), 25))
+ assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2],
+ np.percentile(d[2,:,:,:].flatten(), 25))
+ assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1],
+ np.percentile(d[2, 1,:,:].flatten(), 25))
+ assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1],
+ np.percentile(d[2,:,:, 1].flatten(), 25))
+ assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2],
+ np.percentile(d[2,:, 2,:].flatten(), 25))
+
+ def test_extended_axis_invalid(self):
+ d = np.ones((3, 5, 7, 11))
+ assert_raises(np.AxisError, np.percentile, d, axis=-5, q=25)
+ assert_raises(np.AxisError, np.percentile, d, axis=(0, -5), q=25)
+ assert_raises(np.AxisError, np.percentile, d, axis=4, q=25)
+ assert_raises(np.AxisError, np.percentile, d, axis=(0, 4), q=25)
+ # each of these refers to the same axis twice
+ assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25)
+ assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25)
+ assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25)
+
+ def test_keepdims(self):
+ d = np.ones((3, 5, 7, 11))
+ assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape,
+ (1, 1, 1, 1))
+ assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape,
+ (1, 1, 7, 11))
+ assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape,
+ (1, 5, 7, 1))
+ assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape,
+ (3, 1, 7, 11))
+ assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape,
+ (1, 1, 1, 1))
+ assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape,
+ (1, 1, 7, 1))
+
+ assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3),
+ keepdims=True).shape, (2, 1, 1, 7, 1))
+ assert_equal(np.percentile(d, [1, 7], axis=(0, 3),
+ keepdims=True).shape, (2, 1, 5, 7, 1))
+
+ @pytest.mark.parametrize('q', [7, [1, 7]])
+ @pytest.mark.parametrize(
+ argnames='axis',
+ argvalues=[
+ None,
+ 1,
+ (1,),
+ (0, 1),
+ (-3, -1),
+ ]
+ )
+ def test_keepdims_out(self, q, axis):
+ d = np.ones((3, 5, 7, 11))
+ if axis is None:
+ shape_out = (1,) * d.ndim
+ else:
+ axis_norm = normalize_axis_tuple(axis, d.ndim)
+ shape_out = tuple(
+ 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+ shape_out = np.shape(q) + shape_out
+
+ out = np.empty(shape_out)
+ result = np.percentile(d, q, axis=axis, keepdims=True, out=out)
+ assert result is out
+ assert_equal(result.shape, shape_out)
+
+ def test_out(self):
+ o = np.zeros((4,))
+ d = np.ones((3, 4))
+ assert_equal(np.percentile(d, 0, 0, out=o), o)
+ assert_equal(np.percentile(d, 0, 0, method='nearest', out=o), o)
+ o = np.zeros((3,))
+ assert_equal(np.percentile(d, 1, 1, out=o), o)
+ assert_equal(np.percentile(d, 1, 1, method='nearest', out=o), o)
+
+ o = np.zeros(())
+ assert_equal(np.percentile(d, 2, out=o), o)
+ assert_equal(np.percentile(d, 2, method='nearest', out=o), o)
+
+ def test_out_nan(self):
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ o = np.zeros((4,))
+ d = np.ones((3, 4))
+ d[2, 1] = np.nan
+ assert_equal(np.percentile(d, 0, 0, out=o), o)
+ assert_equal(
+ np.percentile(d, 0, 0, method='nearest', out=o), o)
+ o = np.zeros((3,))
+ assert_equal(np.percentile(d, 1, 1, out=o), o)
+ assert_equal(
+ np.percentile(d, 1, 1, method='nearest', out=o), o)
+ o = np.zeros(())
+ assert_equal(np.percentile(d, 1, out=o), o)
+ assert_equal(
+ np.percentile(d, 1, method='nearest', out=o), o)
+
+ def test_nan_behavior(self):
+ a = np.arange(24, dtype=float)
+ a[2] = np.nan
+ assert_equal(np.percentile(a, 0.3), np.nan)
+ assert_equal(np.percentile(a, 0.3, axis=0), np.nan)
+ assert_equal(np.percentile(a, [0.3, 0.6], axis=0),
+ np.array([np.nan] * 2))
+
+ a = np.arange(24, dtype=float).reshape(2, 3, 4)
+ a[1, 2, 3] = np.nan
+ a[1, 1, 2] = np.nan
+
+ # no axis
+ assert_equal(np.percentile(a, 0.3), np.nan)
+ assert_equal(np.percentile(a, 0.3).ndim, 0)
+
+ # axis0 zerod
+ b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0)
+ b[2, 3] = np.nan
+ b[1, 2] = np.nan
+ assert_equal(np.percentile(a, 0.3, 0), b)
+
+ # axis0 not zerod
+ b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
+ [0.3, 0.6], 0)
+ b[:, 2, 3] = np.nan
+ b[:, 1, 2] = np.nan
+ assert_equal(np.percentile(a, [0.3, 0.6], 0), b)
+
+ # axis1 zerod
+ b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1)
+ b[1, 3] = np.nan
+ b[1, 2] = np.nan
+ assert_equal(np.percentile(a, 0.3, 1), b)
+ # axis1 not zerod
+ b = np.percentile(
+ np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1)
+ b[:, 1, 3] = np.nan
+ b[:, 1, 2] = np.nan
+ assert_equal(np.percentile(a, [0.3, 0.6], 1), b)
+
+ # axis02 zerod
+ b = np.percentile(
+ np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2))
+ b[1] = np.nan
+ b[2] = np.nan
+ assert_equal(np.percentile(a, 0.3, (0, 2)), b)
+ # axis02 not zerod
+ b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
+ [0.3, 0.6], (0, 2))
+ b[:, 1] = np.nan
+ b[:, 2] = np.nan
+ assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b)
+ # axis02 not zerod with method='nearest'
+ b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
+ [0.3, 0.6], (0, 2), method='nearest')
+ b[:, 1] = np.nan
+ b[:, 2] = np.nan
+ assert_equal(np.percentile(
+ a, [0.3, 0.6], (0, 2), method='nearest'), b)
+
+ def test_nan_q(self):
+ # GH18830
+ with pytest.raises(ValueError, match="Percentiles must be in"):
+ np.percentile([1, 2, 3, 4.0], np.nan)
+ with pytest.raises(ValueError, match="Percentiles must be in"):
+ np.percentile([1, 2, 3, 4.0], [np.nan])
+ q = np.linspace(1.0, 99.0, 16)
+ q[0] = np.nan
+ with pytest.raises(ValueError, match="Percentiles must be in"):
+ np.percentile([1, 2, 3, 4.0], q)
+
+
+class TestQuantile:
+ # most of this is already tested by TestPercentile
+
+ def test_max_ulp(self):
+ x = [0.0, 0.2, 0.4]
+ a = np.quantile(x, 0.45)
+ # The default linear method would result in 0 + 0.2 * (0.45/2) = 0.18.
+ # 0.18 is not exactly representable and the formula leads to a 1 ULP
+ # different result. Ensure it is this exact within 1 ULP, see gh-20331.
+ np.testing.assert_array_max_ulp(a, 0.18, maxulp=1)
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.quantile(x, 0), 0.)
+ assert_equal(np.quantile(x, 1), 3.5)
+ assert_equal(np.quantile(x, 0.5), 1.75)
+
+ @pytest.mark.xfail(reason="See gh-19154")
+ def test_correct_quantile_value(self):
+ a = np.array([True])
+ tf_quant = np.quantile(True, False)
+ assert_equal(tf_quant, a[0])
+ assert_equal(type(tf_quant), a.dtype)
+ a = np.array([False, True, True])
+ quant_res = np.quantile(a, a)
+ assert_array_equal(quant_res, a)
+ assert_equal(quant_res.dtype, a.dtype)
+
+ def test_fraction(self):
+ # fractional input, integral quantile
+ x = [Fraction(i, 2) for i in range(8)]
+ q = np.quantile(x, 0)
+ assert_equal(q, 0)
+ assert_equal(type(q), Fraction)
+
+ q = np.quantile(x, 1)
+ assert_equal(q, Fraction(7, 2))
+ assert_equal(type(q), Fraction)
+
+ q = np.quantile(x, Fraction(1, 2))
+ assert_equal(q, Fraction(7, 4))
+ assert_equal(type(q), Fraction)
+
+ q = np.quantile(x, [Fraction(1, 2)])
+ assert_equal(q, np.array([Fraction(7, 4)]))
+ assert_equal(type(q), np.ndarray)
+
+ q = np.quantile(x, [[Fraction(1, 2)]])
+ assert_equal(q, np.array([[Fraction(7, 4)]]))
+ assert_equal(type(q), np.ndarray)
+
+ # repeat with integral input but fractional quantile
+ x = np.arange(8)
+ assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2))
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.quantile(np.arange(100.), p, method="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.quantile(np.arange(100.), p, method="midpoint")
+ assert_array_equal(p, p0)
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+ def test_quantile_preserve_int_type(self, dtype):
+ res = np.quantile(np.array([1, 2], dtype=dtype), [0.5],
+ method="nearest")
+ assert res.dtype == dtype
+
+ @pytest.mark.parametrize("method",
+ ['inverted_cdf', 'averaged_inverted_cdf', 'closest_observation',
+ 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear',
+ 'median_unbiased', 'normal_unbiased',
+ 'nearest', 'lower', 'higher', 'midpoint'])
+ def test_quantile_monotonic(self, method):
+ # GH 14685
+ # test that the return value of quantile is monotonic if p0 is ordered
+ # Also tests that the boundary values are not mishandled.
+ p0 = np.linspace(0, 1, 101)
+ quantile = np.quantile(np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9,
+ 8, 8, 7]) * 0.1, p0, method=method)
+ assert_equal(np.sort(quantile), quantile)
+
+ # Also test one where the number of data points is clearly divisible:
+ quantile = np.quantile([0., 1., 2., 3.], p0, method=method)
+ assert_equal(np.sort(quantile), quantile)
+
+ @hypothesis.given(
+ arr=arrays(dtype=np.float64,
+ shape=st.integers(min_value=3, max_value=1000),
+ elements=st.floats(allow_infinity=False, allow_nan=False,
+ min_value=-1e300, max_value=1e300)))
+ def test_quantile_monotonic_hypo(self, arr):
+ p0 = np.arange(0, 1, 0.01)
+ quantile = np.quantile(arr, p0)
+ assert_equal(np.sort(quantile), quantile)
+
+ def test_quantile_scalar_nan(self):
+ a = np.array([[10., 7., 4.], [3., 2., 1.]])
+ a[0][1] = np.nan
+ actual = np.quantile(a, 0.5)
+ assert np.isscalar(actual)
+ assert_equal(np.quantile(a, 0.5), np.nan)
+
+
+class TestLerp:
+ @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=0, max_value=1),
+ t1=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=0, max_value=1),
+ a = st.floats(allow_nan=False, allow_infinity=False,
+ min_value=-1e300, max_value=1e300),
+ b = st.floats(allow_nan=False, allow_infinity=False,
+ min_value=-1e300, max_value=1e300))
+ def test_linear_interpolation_formula_monotonic(self, t0, t1, a, b):
+ l0 = nfb._lerp(a, b, t0)
+ l1 = nfb._lerp(a, b, t1)
+ if t0 == t1 or a == b:
+ assert l0 == l1 # uninteresting
+ elif (t0 < t1) == (a < b):
+ assert l0 <= l1
+ else:
+ assert l0 >= l1
+
+ @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=0, max_value=1),
+ a=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=-1e300, max_value=1e300),
+ b=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=-1e300, max_value=1e300))
+ def test_linear_interpolation_formula_bounded(self, t, a, b):
+ if a <= b:
+ assert a <= nfb._lerp(a, b, t) <= b
+ else:
+ assert b <= nfb._lerp(a, b, t) <= a
+
+ @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=0, max_value=1),
+ a=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=-1e300, max_value=1e300),
+ b=st.floats(allow_nan=False, allow_infinity=False,
+ min_value=-1e300, max_value=1e300))
+ def test_linear_interpolation_formula_symmetric(self, t, a, b):
+ # double subtraction is needed to remove the extra precision of t < 0.5
+ left = nfb._lerp(a, b, 1 - (1 - t))
+ right = nfb._lerp(b, a, 1 - t)
+ assert_allclose(left, right)
+
+ def test_linear_interpolation_formula_0d_inputs(self):
+ a = np.array(2)
+ b = np.array(5)
+ t = np.array(0.2)
+ assert nfb._lerp(a, b, t) == 2.6
+
+
+class TestMedian:
+
+ def test_basic(self):
+ a0 = np.array(1)
+ a1 = np.arange(2)
+ a2 = np.arange(6).reshape(2, 3)
+ assert_equal(np.median(a0), 1)
+ assert_allclose(np.median(a1), 0.5)
+ assert_allclose(np.median(a2), 2.5)
+ assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5])
+ assert_equal(np.median(a2, axis=1), [1, 4])
+ assert_allclose(np.median(a2, axis=None), 2.5)
+
+ a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775])
+ assert_almost_equal((a[1] + a[3]) / 2., np.median(a))
+ a = np.array([0.0463301, 0.0444502, 0.141249])
+ assert_equal(a[0], np.median(a))
+ a = np.array([0.0444502, 0.141249, 0.0463301])
+ assert_equal(a[-1], np.median(a))
+ # check array scalar result
+ assert_equal(np.median(a).ndim, 0)
+ a[1] = np.nan
+ assert_equal(np.median(a).ndim, 0)
+
+ def test_axis_keyword(self):
+ a3 = np.array([[2, 3],
+ [0, 1],
+ [6, 7],
+ [4, 5]])
+ for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]:
+ orig = a.copy()
+ np.median(a, axis=None)
+ for ax in range(a.ndim):
+ np.median(a, axis=ax)
+ assert_array_equal(a, orig)
+
+ assert_allclose(np.median(a3, axis=0), [3, 4])
+ assert_allclose(np.median(a3.T, axis=1), [3, 4])
+ assert_allclose(np.median(a3), 3.5)
+ assert_allclose(np.median(a3, axis=None), 3.5)
+ assert_allclose(np.median(a3.T), 3.5)
+
+ def test_overwrite_keyword(self):
+ a3 = np.array([[2, 3],
+ [0, 1],
+ [6, 7],
+ [4, 5]])
+ a0 = np.array(1)
+ a1 = np.arange(2)
+ a2 = np.arange(6).reshape(2, 3)
+ assert_allclose(np.median(a0.copy(), overwrite_input=True), 1)
+ assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5)
+ assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5)
+ assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0),
+ [1.5, 2.5, 3.5])
+ assert_allclose(
+ np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4])
+ assert_allclose(
+ np.median(a2.copy(), overwrite_input=True, axis=None), 2.5)
+ assert_allclose(
+ np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4])
+ assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1),
+ [3, 4])
+
+ a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5))
+ np.random.shuffle(a4.ravel())
+ assert_allclose(np.median(a4, axis=None),
+ np.median(a4.copy(), axis=None, overwrite_input=True))
+ assert_allclose(np.median(a4, axis=0),
+ np.median(a4.copy(), axis=0, overwrite_input=True))
+ assert_allclose(np.median(a4, axis=1),
+ np.median(a4.copy(), axis=1, overwrite_input=True))
+ assert_allclose(np.median(a4, axis=2),
+ np.median(a4.copy(), axis=2, overwrite_input=True))
+
+ def test_array_like(self):
+ x = [1, 2, 3]
+ assert_almost_equal(np.median(x), 2)
+ x2 = [x]
+ assert_almost_equal(np.median(x2), 2)
+ assert_allclose(np.median(x2, axis=0), x)
+
+ def test_subclass(self):
+ # gh-3846
+ class MySubClass(np.ndarray):
+
+ def __new__(cls, input_array, info=None):
+ obj = np.asarray(input_array).view(cls)
+ obj.info = info
+ return obj
+
+ def mean(self, axis=None, dtype=None, out=None):
+ return -7
+
+ a = MySubClass([1, 2, 3])
+ assert_equal(np.median(a), -7)
+
+ @pytest.mark.parametrize('arr',
+ ([1., 2., 3.], [1., np.nan, 3.], np.nan, 0.))
+ def test_subclass2(self, arr):
+ """Check that we return subclasses, even if a NaN scalar."""
+ class MySubclass(np.ndarray):
+ pass
+
+ m = np.median(np.array(arr).view(MySubclass))
+ assert isinstance(m, MySubclass)
+
+ def test_out(self):
+ o = np.zeros((4,))
+ d = np.ones((3, 4))
+ assert_equal(np.median(d, 0, out=o), o)
+ o = np.zeros((3,))
+ assert_equal(np.median(d, 1, out=o), o)
+ o = np.zeros(())
+ assert_equal(np.median(d, out=o), o)
+
+ def test_out_nan(self):
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ o = np.zeros((4,))
+ d = np.ones((3, 4))
+ d[2, 1] = np.nan
+ assert_equal(np.median(d, 0, out=o), o)
+ o = np.zeros((3,))
+ assert_equal(np.median(d, 1, out=o), o)
+ o = np.zeros(())
+ assert_equal(np.median(d, out=o), o)
+
+ def test_nan_behavior(self):
+ a = np.arange(24, dtype=float)
+ a[2] = np.nan
+ assert_equal(np.median(a), np.nan)
+ assert_equal(np.median(a, axis=0), np.nan)
+
+ a = np.arange(24, dtype=float).reshape(2, 3, 4)
+ a[1, 2, 3] = np.nan
+ a[1, 1, 2] = np.nan
+
+ # no axis
+ assert_equal(np.median(a), np.nan)
+ assert_equal(np.median(a).ndim, 0)
+
+ # axis0
+ b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0)
+ b[2, 3] = np.nan
+ b[1, 2] = np.nan
+ assert_equal(np.median(a, 0), b)
+
+ # axis1
+ b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1)
+ b[1, 3] = np.nan
+ b[1, 2] = np.nan
+ assert_equal(np.median(a, 1), b)
+
+ # axis02
+ b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2))
+ b[1] = np.nan
+ b[2] = np.nan
+ assert_equal(np.median(a, (0, 2)), b)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work correctly")
+ def test_empty(self):
+ # mean(empty array) emits two warnings: empty slice and divide by 0
+ a = np.array([], dtype=float)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_equal(np.median(a), np.nan)
+ assert_(w[0].category is RuntimeWarning)
+ assert_equal(len(w), 2)
+
+ # multiple dimensions
+ a = np.array([], dtype=float, ndmin=3)
+ # no axis
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_equal(np.median(a), np.nan)
+ assert_(w[0].category is RuntimeWarning)
+
+ # axis 0 and 1
+ b = np.array([], dtype=float, ndmin=2)
+ assert_equal(np.median(a, axis=0), b)
+ assert_equal(np.median(a, axis=1), b)
+
+ # axis 2
+ b = np.array(np.nan, dtype=float, ndmin=2)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_equal(np.median(a, axis=2), b)
+ assert_(w[0].category is RuntimeWarning)
+
+ def test_object(self):
+ o = np.arange(7.)
+ assert_(type(np.median(o.astype(object))), float)
+ o[2] = np.nan
+ assert_(type(np.median(o.astype(object))), float)
+
+ def test_extended_axis(self):
+ o = np.random.normal(size=(71, 23))
+ x = np.dstack([o] * 10)
+ assert_equal(np.median(x, axis=(0, 1)), np.median(o))
+ x = np.moveaxis(x, -1, 0)
+ assert_equal(np.median(x, axis=(-2, -1)), np.median(o))
+ x = x.swapaxes(0, 1).copy()
+ assert_equal(np.median(x, axis=(0, -1)), np.median(o))
+
+ assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None))
+ assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0))
+ assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1))
+
+ d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11))
+ np.random.shuffle(d.ravel())
+ assert_equal(np.median(d, axis=(0, 1, 2))[0],
+ np.median(d[:,:,:, 0].flatten()))
+ assert_equal(np.median(d, axis=(0, 1, 3))[1],
+ np.median(d[:,:, 1,:].flatten()))
+ assert_equal(np.median(d, axis=(3, 1, -4))[2],
+ np.median(d[:,:, 2,:].flatten()))
+ assert_equal(np.median(d, axis=(3, 1, 2))[2],
+ np.median(d[2,:,:,:].flatten()))
+ assert_equal(np.median(d, axis=(3, 2))[2, 1],
+ np.median(d[2, 1,:,:].flatten()))
+ assert_equal(np.median(d, axis=(1, -2))[2, 1],
+ np.median(d[2,:,:, 1].flatten()))
+ assert_equal(np.median(d, axis=(1, 3))[2, 2],
+ np.median(d[2,:, 2,:].flatten()))
+
+ def test_extended_axis_invalid(self):
+ d = np.ones((3, 5, 7, 11))
+ assert_raises(np.AxisError, np.median, d, axis=-5)
+ assert_raises(np.AxisError, np.median, d, axis=(0, -5))
+ assert_raises(np.AxisError, np.median, d, axis=4)
+ assert_raises(np.AxisError, np.median, d, axis=(0, 4))
+ assert_raises(ValueError, np.median, d, axis=(1, 1))
+
+ def test_keepdims(self):
+ d = np.ones((3, 5, 7, 11))
+ assert_equal(np.median(d, axis=None, keepdims=True).shape,
+ (1, 1, 1, 1))
+ assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape,
+ (1, 1, 7, 11))
+ assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape,
+ (1, 5, 7, 1))
+ assert_equal(np.median(d, axis=(1,), keepdims=True).shape,
+ (3, 1, 7, 11))
+ assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape,
+ (1, 1, 1, 1))
+ assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape,
+ (1, 1, 7, 1))
+
+ @pytest.mark.parametrize(
+ argnames='axis',
+ argvalues=[
+ None,
+ 1,
+ (1, ),
+ (0, 1),
+ (-3, -1),
+ ]
+ )
+ def test_keepdims_out(self, axis):
+ d = np.ones((3, 5, 7, 11))
+ if axis is None:
+ shape_out = (1,) * d.ndim
+ else:
+ axis_norm = normalize_axis_tuple(axis, d.ndim)
+ shape_out = tuple(
+ 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+ out = np.empty(shape_out)
+ result = np.median(d, axis=axis, keepdims=True, out=out)
+ assert result is out
+ assert_equal(result.shape, shape_out)
+
+
+class TestAdd_newdoc_ufunc:
+
+ def test_ufunc_arg(self):
+ assert_raises(TypeError, add_newdoc_ufunc, 2, "blah")
+ assert_raises(ValueError, add_newdoc_ufunc, np.add, "blah")
+
+ def test_string_arg(self):
+ assert_raises(TypeError, add_newdoc_ufunc, np.add, 3)
+
+
+class TestAdd_newdoc:
+
+ @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
+ @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
+ def test_add_doc(self):
+ # test that np.add_newdoc did attach a docstring successfully:
+ tgt = "Current flat index into the array."
+ assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
+ assert_(len(np.core.ufunc.identity.__doc__) > 300)
+ assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)
+
+ @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
+ def test_errors_are_ignored(self):
+ prev_doc = np.core.flatiter.index.__doc__
+ # nothing changed, but error ignored, this should probably
+ # give a warning (or even error) in the future.
+ np.add_newdoc("numpy.core", "flatiter", ("index", "bad docstring"))
+ assert prev_doc == np.core.flatiter.index.__doc__
+
+
+class TestAddDocstring():
+ # Test should possibly be moved, but it also fits to be close to
+ # the newdoc tests...
+ @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
+ @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc")
+ def test_add_same_docstring(self):
+ # test for attributes (which are C-level defined)
+ np.add_docstring(np.ndarray.flat, np.ndarray.flat.__doc__)
+ # And typical functions:
+ def func():
+ """docstring"""
+ return
+
+ np.add_docstring(func, func.__doc__)
+
+ @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
+ def test_different_docstring_fails(self):
+ # test for attributes (which are C-level defined)
+ with assert_raises(RuntimeError):
+ np.add_docstring(np.ndarray.flat, "different docstring")
+ # And typical functions:
+ def func():
+ """docstring"""
+ return
+
+ with assert_raises(RuntimeError):
+ np.add_docstring(func, "different docstring")
+
+
+class TestSortComplex:
+
+ @pytest.mark.parametrize("type_in, type_out", [
+ ('l', 'D'),
+ ('h', 'F'),
+ ('H', 'F'),
+ ('b', 'F'),
+ ('B', 'F'),
+ ('g', 'G'),
+ ])
+ def test_sort_real(self, type_in, type_out):
+ # sort_complex() type casting for real input types
+ a = np.array([5, 3, 6, 2, 1], dtype=type_in)
+ actual = np.sort_complex(a)
+ expected = np.sort(a).astype(type_out)
+ assert_equal(actual, expected)
+ assert_equal(actual.dtype, expected.dtype)
+
+ def test_sort_complex(self):
+ # sort_complex() handling of complex input
+ a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D')
+ expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D')
+ actual = np.sort_complex(a)
+ assert_equal(actual, expected)
+ assert_equal(actual.dtype, expected.dtype)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_histograms.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_histograms.py
new file mode 100644
index 00000000..87e6e1d4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_histograms.py
@@ -0,0 +1,808 @@
+import numpy as np
+
+from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, assert_allclose,
+ assert_array_max_ulp, assert_raises_regex, suppress_warnings,
+ )
+from numpy.testing._private.utils import requires_memory
+import pytest
+
+
+class TestHistogram:
+
+ def setup_method(self):
+ pass
+
+ def teardown_method(self):
+ pass
+
+ def test_simple(self):
+ n = 100
+ v = np.random.rand(n)
+ (a, b) = histogram(v)
+ # check if the sum of the bins equals the number of samples
+ assert_equal(np.sum(a, axis=0), n)
+ # check that the bin counts are evenly spaced when the data is from
+ # a linear function
+ (a, b) = histogram(np.linspace(0, 10, 100))
+ assert_array_equal(a, 10)
+
+ def test_one_bin(self):
+ # Ticket 632
+ hist, edges = histogram([1, 2, 3, 4], [1, 2])
+ assert_array_equal(hist, [2, ])
+ assert_array_equal(edges, [1, 2])
+ assert_raises(ValueError, histogram, [1, 2], bins=0)
+ h, e = histogram([1, 2], bins=1)
+ assert_equal(h, np.array([2]))
+ assert_allclose(e, np.array([1., 2.]))
+
+ def test_density(self):
+ # Check that the integral of the density equals 1.
+ n = 100
+ v = np.random.rand(n)
+ a, b = histogram(v, density=True)
+ area = np.sum(a * np.diff(b))
+ assert_almost_equal(area, 1)
+
+ # Check with non-constant bin widths
+ v = np.arange(10)
+ bins = [0, 1, 3, 6, 10]
+ a, b = histogram(v, bins, density=True)
+ assert_array_equal(a, .1)
+ assert_equal(np.sum(a * np.diff(b)), 1)
+
+ # Test that passing False works too
+ a, b = histogram(v, bins, density=False)
+ assert_array_equal(a, [1, 2, 3, 4])
+
+ # Variable bin widths are especially useful to deal with
+ # infinities.
+ v = np.arange(10)
+ bins = [0, 1, 3, 6, np.inf]
+ a, b = histogram(v, bins, density=True)
+ assert_array_equal(a, [.1, .1, .1, 0.])
+
+ # Taken from a bug report from N. Becker on the numpy-discussion
+ # mailing list Aug. 6, 2010.
+ counts, dmy = np.histogram(
+ [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True)
+ assert_equal(counts, [.25, 0])
+
+ def test_outliers(self):
+ # Check that outliers are not tallied
+ a = np.arange(10) + .5
+
+ # Lower outliers
+ h, b = histogram(a, range=[0, 9])
+ assert_equal(h.sum(), 9)
+
+ # Upper outliers
+ h, b = histogram(a, range=[1, 10])
+ assert_equal(h.sum(), 9)
+
+ # Normalization
+ h, b = histogram(a, range=[1, 9], density=True)
+ assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15)
+
+ # Weights
+ w = np.arange(10) + .5
+ h, b = histogram(a, range=[1, 9], weights=w, density=True)
+ assert_equal((h * np.diff(b)).sum(), 1)
+
+ h, b = histogram(a, bins=8, range=[1, 9], weights=w)
+ assert_equal(h, w[1:-1])
+
+ def test_arr_weights_mismatch(self):
+ a = np.arange(10) + .5
+ w = np.arange(11) + .5
+ with assert_raises_regex(ValueError, "same shape as"):
+ h, b = histogram(a, range=[1, 9], weights=w, density=True)
+
+
+ def test_type(self):
+ # Check the type of the returned histogram
+ a = np.arange(10) + .5
+ h, b = histogram(a)
+ assert_(np.issubdtype(h.dtype, np.integer))
+
+ h, b = histogram(a, density=True)
+ assert_(np.issubdtype(h.dtype, np.floating))
+
+ h, b = histogram(a, weights=np.ones(10, int))
+ assert_(np.issubdtype(h.dtype, np.integer))
+
+ h, b = histogram(a, weights=np.ones(10, float))
+ assert_(np.issubdtype(h.dtype, np.floating))
+
+ def test_f32_rounding(self):
+ # gh-4799, check that the rounding of the edges works with float32
+ x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32)
+ y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32)
+ counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100)
+ assert_equal(counts_hist.sum(), 3.)
+
+ def test_bool_conversion(self):
+ # gh-12107
+ # Reference integer histogram
+ a = np.array([1, 1, 0], dtype=np.uint8)
+ int_hist, int_edges = np.histogram(a)
+
+ # Should raise an warning on booleans
+ # Ensure that the histograms are equivalent, need to suppress
+ # the warnings to get the actual outputs
+ with suppress_warnings() as sup:
+ rec = sup.record(RuntimeWarning, 'Converting input from .*')
+ hist, edges = np.histogram([True, True, False])
+ # A warning should be issued
+ assert_equal(len(rec), 1)
+ assert_array_equal(hist, int_hist)
+ assert_array_equal(edges, int_edges)
+
+ def test_weights(self):
+ v = np.random.rand(100)
+ w = np.ones(100) * 5
+ a, b = histogram(v)
+ na, nb = histogram(v, density=True)
+ wa, wb = histogram(v, weights=w)
+ nwa, nwb = histogram(v, weights=w, density=True)
+ assert_array_almost_equal(a * 5, wa)
+ assert_array_almost_equal(na, nwa)
+
+ # Check weights are properly applied.
+ v = np.linspace(0, 10, 10)
+ w = np.concatenate((np.zeros(5), np.ones(5)))
+ wa, wb = histogram(v, bins=np.arange(11), weights=w)
+ assert_array_almost_equal(wa, w)
+
+ # Check with integer weights
+ wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])
+ assert_array_equal(wa, [4, 5, 0, 1])
+ wa, wb = histogram(
+ [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True)
+ assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4)
+
+ # Check weights with non-uniform bin widths
+ a, b = histogram(
+ np.arange(9), [0, 1, 3, 6, 10],
+ weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True)
+ assert_almost_equal(a, [.2, .1, .1, .075])
+
+ def test_exotic_weights(self):
+
+ # Test the use of weights that are not integer or floats, but e.g.
+ # complex numbers or object types.
+
+ # Complex weights
+ values = np.array([1.3, 2.5, 2.3])
+ weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2])
+
+ # Check with custom bins
+ wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
+ assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
+
+ # Check with even bins
+ wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
+ assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
+
+ # Decimal weights
+ from decimal import Decimal
+ values = np.array([1.3, 2.5, 2.3])
+ weights = np.array([Decimal(1), Decimal(2), Decimal(3)])
+
+ # Check with custom bins
+ wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
+ assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
+
+ # Check with even bins
+ wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
+ assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
+
+ def test_no_side_effects(self):
+ # This is a regression test that ensures that values passed to
+ # ``histogram`` are unchanged.
+ values = np.array([1.3, 2.5, 2.3])
+ np.histogram(values, range=[-10, 10], bins=100)
+ assert_array_almost_equal(values, [1.3, 2.5, 2.3])
+
+ def test_empty(self):
+ a, b = histogram([], bins=([0, 1]))
+ assert_array_equal(a, np.array([0]))
+ assert_array_equal(b, np.array([0, 1]))
+
+ def test_error_binnum_type (self):
+ # Tests if right Error is raised if bins argument is float
+ vals = np.linspace(0.0, 1.0, num=100)
+ histogram(vals, 5)
+ assert_raises(TypeError, histogram, vals, 2.4)
+
+ def test_finite_range(self):
+ # Normal ranges should be fine
+ vals = np.linspace(0.0, 1.0, num=100)
+ histogram(vals, range=[0.25,0.75])
+ assert_raises(ValueError, histogram, vals, range=[np.nan,0.75])
+ assert_raises(ValueError, histogram, vals, range=[0.25,np.inf])
+
+ def test_invalid_range(self):
+ # start of range must be < end of range
+ vals = np.linspace(0.0, 1.0, num=100)
+ with assert_raises_regex(ValueError, "max must be larger than"):
+ np.histogram(vals, range=[0.1, 0.01])
+
+ def test_bin_edge_cases(self):
+ # Ensure that floating-point computations correctly place edge cases.
+ arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
+ hist, edges = np.histogram(arr, bins=8296, range=(2, 2280))
+ mask = hist > 0
+ left_edges = edges[:-1][mask]
+ right_edges = edges[1:][mask]
+ for x, left, right in zip(arr, left_edges, right_edges):
+ assert_(x >= left)
+ assert_(x < right)
+
+ def test_last_bin_inclusive_range(self):
+ arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
+ hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
+ assert_equal(hist[-1], 1)
+
+ def test_bin_array_dims(self):
+ # gracefully handle bins object > 1 dimension
+ vals = np.linspace(0.0, 1.0, num=100)
+ bins = np.array([[0, 0.5], [0.6, 1.0]])
+ with assert_raises_regex(ValueError, "must be 1d"):
+ np.histogram(vals, bins=bins)
+
+ def test_unsigned_monotonicity_check(self):
+ # Ensures ValueError is raised if bins not increasing monotonically
+ # when bins contain unsigned values (see #9222)
+ arr = np.array([2])
+ bins = np.array([1, 3, 1], dtype='uint64')
+ with assert_raises(ValueError):
+ hist, edges = np.histogram(arr, bins=bins)
+
+ def test_object_array_of_0d(self):
+ # gh-7864
+ assert_raises(ValueError,
+ histogram, [np.array(0.4) for i in range(10)] + [-np.inf])
+ assert_raises(ValueError,
+ histogram, [np.array(0.4) for i in range(10)] + [np.inf])
+
+ # these should not crash
+ np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001])
+ np.histogram([np.array(0.5) for i in range(10)] + [.5])
+
+ def test_some_nan_values(self):
+ # gh-7503
+ one_nan = np.array([0, 1, np.nan])
+ all_nan = np.array([np.nan, np.nan])
+
+ # the internal comparisons with NaN give warnings
+ sup = suppress_warnings()
+ sup.filter(RuntimeWarning)
+ with sup:
+ # can't infer range with nan
+ assert_raises(ValueError, histogram, one_nan, bins='auto')
+ assert_raises(ValueError, histogram, all_nan, bins='auto')
+
+ # explicit range solves the problem
+ h, b = histogram(one_nan, bins='auto', range=(0, 1))
+ assert_equal(h.sum(), 2) # nan is not counted
+ h, b = histogram(all_nan, bins='auto', range=(0, 1))
+ assert_equal(h.sum(), 0) # nan is not counted
+
+ # as does an explicit set of bins
+ h, b = histogram(one_nan, bins=[0, 1])
+ assert_equal(h.sum(), 2) # nan is not counted
+ h, b = histogram(all_nan, bins=[0, 1])
+ assert_equal(h.sum(), 0) # nan is not counted
+
+ def test_datetime(self):
+ begin = np.datetime64('2000-01-01', 'D')
+ offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20])
+ bins = np.array([0, 2, 7, 20])
+ dates = begin + offsets
+ date_bins = begin + bins
+
+ td = np.dtype('timedelta64[D]')
+
+ # Results should be the same for integer offsets or datetime values.
+ # For now, only explicit bins are supported, since linspace does not
+ # work on datetimes or timedeltas
+ d_count, d_edge = histogram(dates, bins=date_bins)
+ t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td))
+ i_count, i_edge = histogram(offsets, bins=bins)
+
+ assert_equal(d_count, i_count)
+ assert_equal(t_count, i_count)
+
+ assert_equal((d_edge - begin).astype(int), i_edge)
+ assert_equal(t_edge.astype(int), i_edge)
+
+ assert_equal(d_edge.dtype, dates.dtype)
+ assert_equal(t_edge.dtype, td)
+
+ def do_signed_overflow_bounds(self, dtype):
+ exponent = 8 * np.dtype(dtype).itemsize - 1
+ arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype)
+ hist, e = histogram(arr, bins=2)
+ assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4])
+ assert_equal(hist, [1, 1])
+
+ def test_signed_overflow_bounds(self):
+ self.do_signed_overflow_bounds(np.byte)
+ self.do_signed_overflow_bounds(np.short)
+ self.do_signed_overflow_bounds(np.intc)
+ self.do_signed_overflow_bounds(np.int_)
+ self.do_signed_overflow_bounds(np.longlong)
+
+ def do_precision_lower_bound(self, float_small, float_large):
+ eps = np.finfo(float_large).eps
+
+ arr = np.array([1.0], float_small)
+ range = np.array([1.0 + eps, 2.0], float_large)
+
+ # test is looking for behavior when the bounds change between dtypes
+ if range.astype(float_small)[0] != 1:
+ return
+
+ # previously crashed
+ count, x_loc = np.histogram(arr, bins=1, range=range)
+ assert_equal(count, [1])
+
+ # gh-10322 means that the type comes from arr - this may change
+ assert_equal(x_loc.dtype, float_small)
+
+ def do_precision_upper_bound(self, float_small, float_large):
+ eps = np.finfo(float_large).eps
+
+ arr = np.array([1.0], float_small)
+ range = np.array([0.0, 1.0 - eps], float_large)
+
+ # test is looking for behavior when the bounds change between dtypes
+ if range.astype(float_small)[-1] != 1:
+ return
+
+ # previously crashed
+ count, x_loc = np.histogram(arr, bins=1, range=range)
+ assert_equal(count, [1])
+
+ # gh-10322 means that the type comes from arr - this may change
+ assert_equal(x_loc.dtype, float_small)
+
+ def do_precision(self, float_small, float_large):
+ self.do_precision_lower_bound(float_small, float_large)
+ self.do_precision_upper_bound(float_small, float_large)
+
+ def test_precision(self):
+ # not looping results in a useful stack trace upon failure
+ self.do_precision(np.half, np.single)
+ self.do_precision(np.half, np.double)
+ self.do_precision(np.half, np.longdouble)
+ self.do_precision(np.single, np.double)
+ self.do_precision(np.single, np.longdouble)
+ self.do_precision(np.double, np.longdouble)
+
+ def test_histogram_bin_edges(self):
+ hist, e = histogram([1, 2, 3, 4], [1, 2])
+ edges = histogram_bin_edges([1, 2, 3, 4], [1, 2])
+ assert_array_equal(edges, e)
+
+ arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
+ hist, e = histogram(arr, bins=30, range=(-0.5, 5))
+ edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5))
+ assert_array_equal(edges, e)
+
+ hist, e = histogram(arr, bins='auto', range=(0, 1))
+ edges = histogram_bin_edges(arr, bins='auto', range=(0, 1))
+ assert_array_equal(edges, e)
+
+ @requires_memory(free_bytes=1e10)
+ @pytest.mark.slow
+ def test_big_arrays(self):
+ sample = np.zeros([100000000, 3])
+ xbins = 400
+ ybins = 400
+ zbins = np.arange(16000)
+ hist = np.histogramdd(sample=sample, bins=(xbins, ybins, zbins))
+ assert_equal(type(hist), type((1, 2)))
+
+
+class TestHistogramOptimBinNums:
+ """
+ Provide test coverage when using provided estimators for optimal number of
+ bins
+ """
+
+ def test_empty(self):
+ estimator_list = ['fd', 'scott', 'rice', 'sturges',
+ 'doane', 'sqrt', 'auto', 'stone']
+ # check it can deal with empty data
+ for estimator in estimator_list:
+ a, b = histogram([], bins=estimator)
+ assert_array_equal(a, np.array([0]))
+ assert_array_equal(b, np.array([0, 1]))
+
+ def test_simple(self):
+ """
+ Straightforward testing with a mixture of linspace data (for
+ consistency). All test values have been precomputed and the values
+ shouldn't change
+ """
+ # Some basic sanity checking, with some fixed data.
+ # Checking for the correct number of bins
+ basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7,
+ 'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2},
+ 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10,
+ 'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9},
+ 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14,
+ 'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}}
+
+ for testlen, expectedResults in basic_test.items():
+ # Create some sort of non uniform data to test with
+ # (2 peak uniform mixture)
+ x1 = np.linspace(-10, -1, testlen // 5 * 2)
+ x2 = np.linspace(1, 10, testlen // 5 * 3)
+ x = np.concatenate((x1, x2))
+ for estimator, numbins in expectedResults.items():
+ a, b = np.histogram(x, estimator)
+ assert_equal(len(a), numbins, err_msg="For the {0} estimator "
+ "with datasize of {1}".format(estimator, testlen))
+
+ def test_small(self):
+ """
+ Smaller datasets have the potential to cause issues with the data
+ adaptive methods, especially the FD method. All bin numbers have been
+ precalculated.
+ """
+ small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
+ 'doane': 1, 'sqrt': 1, 'stone': 1},
+ 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2,
+ 'doane': 1, 'sqrt': 2, 'stone': 1},
+ 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3,
+ 'doane': 3, 'sqrt': 2, 'stone': 1}}
+
+ for testlen, expectedResults in small_dat.items():
+ testdat = np.arange(testlen)
+ for estimator, expbins in expectedResults.items():
+ a, b = np.histogram(testdat, estimator)
+ assert_equal(len(a), expbins, err_msg="For the {0} estimator "
+ "with datasize of {1}".format(estimator, testlen))
+
+ def test_incorrect_methods(self):
+ """
+ Check a Value Error is thrown when an unknown string is passed in
+ """
+ check_list = ['mad', 'freeman', 'histograms', 'IQR']
+ for estimator in check_list:
+ assert_raises(ValueError, histogram, [1, 2, 3], estimator)
+
+ def test_novariance(self):
+ """
+ Check that methods handle no variance in data
+ Primarily for Scott and FD as the SD and IQR are both 0 in this case
+ """
+ novar_dataset = np.ones(100)
+ novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
+ 'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1}
+
+ for estimator, numbins in novar_resultdict.items():
+ a, b = np.histogram(novar_dataset, estimator)
+ assert_equal(len(a), numbins, err_msg="{0} estimator, "
+ "No Variance test".format(estimator))
+
+ def test_limited_variance(self):
+ """
+ Check when IQR is 0, but variance exists, we return the sturges value
+ and not the fd value.
+ """
+ lim_var_data = np.ones(1000)
+ lim_var_data[:3] = 0
+ lim_var_data[-4:] = 100
+
+ edges_auto = histogram_bin_edges(lim_var_data, 'auto')
+ assert_equal(edges_auto, np.linspace(0, 100, 12))
+
+ edges_fd = histogram_bin_edges(lim_var_data, 'fd')
+ assert_equal(edges_fd, np.array([0, 100]))
+
+ edges_sturges = histogram_bin_edges(lim_var_data, 'sturges')
+ assert_equal(edges_sturges, np.linspace(0, 100, 12))
+
+ def test_outlier(self):
+ """
+ Check the FD, Scott and Doane with outliers.
+
+ The FD estimates a smaller binwidth since it's less affected by
+ outliers. Since the range is so (artificially) large, this means more
+ bins, most of which will be empty, but the data of interest usually is
+ unaffected. The Scott estimator is more affected and returns fewer bins,
+ despite most of the variance being in one area of the data. The Doane
+ estimator lies somewhere between the other two.
+ """
+ xcenter = np.linspace(-10, 10, 50)
+ outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter))
+
+ outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6}
+
+ for estimator, numbins in outlier_resultdict.items():
+ a, b = np.histogram(outlier_dataset, estimator)
+ assert_equal(len(a), numbins)
+
+ def test_scott_vs_stone(self):
+ """Verify that Scott's rule and Stone's rule converges for normally distributed data"""
+
+ def nbins_ratio(seed, size):
+ rng = np.random.RandomState(seed)
+ x = rng.normal(loc=0, scale=2, size=size)
+ a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0])
+ return a / (a + b)
+
+ ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)]
+ for seed in range(10)]
+
+ # the average difference between the two methods decreases as the dataset size increases.
+ avg = abs(np.mean(ll, axis=0) - 0.5)
+ assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2)
+
+ def test_simple_range(self):
+ """
+ Straightforward testing with a mixture of linspace data (for
+ consistency). Adding in a 3rd mixture that will then be
+ completely ignored. All test values have been precomputed and
+ the shouldn't change.
+ """
+ # some basic sanity checking, with some fixed data.
+ # Checking for the correct number of bins
+ basic_test = {
+ 50: {'fd': 8, 'scott': 8, 'rice': 15,
+ 'sturges': 14, 'auto': 14, 'stone': 8},
+ 500: {'fd': 15, 'scott': 16, 'rice': 32,
+ 'sturges': 20, 'auto': 20, 'stone': 80},
+ 5000: {'fd': 33, 'scott': 33, 'rice': 69,
+ 'sturges': 27, 'auto': 33, 'stone': 80}
+ }
+
+ for testlen, expectedResults in basic_test.items():
+ # create some sort of non uniform data to test with
+ # (3 peak uniform mixture)
+ x1 = np.linspace(-10, -1, testlen // 5 * 2)
+ x2 = np.linspace(1, 10, testlen // 5 * 3)
+ x3 = np.linspace(-100, -50, testlen)
+ x = np.hstack((x1, x2, x3))
+ for estimator, numbins in expectedResults.items():
+ a, b = np.histogram(x, estimator, range = (-20, 20))
+ msg = "For the {0} estimator".format(estimator)
+ msg += " with datasize of {0}".format(testlen)
+ assert_equal(len(a), numbins, err_msg=msg)
+
+ @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',
+ 'stone', 'rice', 'sturges'])
+ def test_signed_integer_data(self, bins):
+ # Regression test for gh-14379.
+ a = np.array([-2, 0, 127], dtype=np.int8)
+ hist, edges = np.histogram(a, bins=bins)
+ hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins)
+ assert_array_equal(hist, hist32)
+ assert_array_equal(edges, edges32)
+
+ def test_simple_weighted(self):
+ """
+ Check that weighted data raises a TypeError
+ """
+ estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']
+ for estimator in estimator_list:
+ assert_raises(TypeError, histogram, [1, 2, 3],
+ estimator, weights=[1, 2, 3])
+
+
+class TestHistogramdd:
+
+ def test_simple(self):
+ x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
+ [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
+ H, edges = histogramdd(x, (2, 3, 3),
+ range=[[-1, 1], [0, 3], [0, 3]])
+ answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
+ [[0, 1, 0], [0, 0, 1], [0, 0, 1]]])
+ assert_array_equal(H, answer)
+
+ # Check normalization
+ ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]
+ H, edges = histogramdd(x, bins=ed, density=True)
+ assert_(np.all(H == answer / 12.))
+
+ # Check that H has the correct shape.
+ H, edges = histogramdd(x, (2, 3, 4),
+ range=[[-1, 1], [0, 3], [0, 4]],
+ density=True)
+ answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]],
+ [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])
+ assert_array_almost_equal(H, answer / 6., 4)
+ # Check that a sequence of arrays is accepted and H has the correct
+ # shape.
+ z = [np.squeeze(y) for y in np.split(x, 3, axis=1)]
+ H, edges = histogramdd(
+ z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])
+ answer = np.array([[[0, 0], [0, 0], [0, 0]],
+ [[0, 1], [0, 0], [1, 0]],
+ [[0, 1], [0, 0], [0, 0]],
+ [[0, 0], [0, 0], [0, 0]]])
+ assert_array_equal(H, answer)
+
+ Z = np.zeros((5, 5, 5))
+ Z[list(range(5)), list(range(5)), list(range(5))] = 1.
+ H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5)
+ assert_array_equal(H, Z)
+
+ def test_shape_3d(self):
+ # All possible permutations for bins of different lengths in 3D.
+ bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
+ (4, 5, 6))
+ r = np.random.rand(10, 3)
+ for b in bins:
+ H, edges = histogramdd(r, b)
+ assert_(H.shape == b)
+
+ def test_shape_4d(self):
+ # All possible permutations for bins of different lengths in 4D.
+ bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
+ (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
+ (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
+ (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
+ (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
+ (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
+
+ r = np.random.rand(10, 4)
+ for b in bins:
+ H, edges = histogramdd(r, b)
+ assert_(H.shape == b)
+
+ def test_weights(self):
+ v = np.random.rand(100, 2)
+ hist, edges = histogramdd(v)
+ n_hist, edges = histogramdd(v, density=True)
+ w_hist, edges = histogramdd(v, weights=np.ones(100))
+ assert_array_equal(w_hist, hist)
+ w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True)
+ assert_array_equal(w_hist, n_hist)
+ w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2)
+ assert_array_equal(w_hist, 2 * hist)
+
+ def test_identical_samples(self):
+ x = np.zeros((10, 2), int)
+ hist, edges = histogramdd(x, bins=2)
+ assert_array_equal(edges[0], np.array([-0.5, 0., 0.5]))
+
+ def test_empty(self):
+ a, b = histogramdd([[], []], bins=([0, 1], [0, 1]))
+ assert_array_max_ulp(a, np.array([[0.]]))
+ a, b = np.histogramdd([[], [], []], bins=2)
+ assert_array_max_ulp(a, np.zeros((2, 2, 2)))
+
+ def test_bins_errors(self):
+ # There are two ways to specify bins. Check for the right errors
+ # when mixing those.
+ x = np.arange(8).reshape(2, 4)
+ assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
+ assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
+ assert_raises(
+ ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
+ assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
+
+ def test_inf_edges(self):
+ # Test using +/-inf bin edges works. See #1788.
+ with np.errstate(invalid='ignore'):
+ x = np.arange(6).reshape(3, 2)
+ expected = np.array([[1, 0], [0, 1], [0, 1]])
+ h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
+ assert_allclose(h, expected)
+ h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])
+ assert_allclose(h, expected)
+ h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
+ assert_allclose(h, expected)
+
+ def test_rightmost_binedge(self):
+ # Test event very close to rightmost binedge. See Github issue #4266
+ x = [0.9999999995]
+ bins = [[0., 0.5, 1.0]]
+ hist, _ = histogramdd(x, bins=bins)
+ assert_(hist[0] == 0.0)
+ assert_(hist[1] == 1.)
+ x = [1.0]
+ bins = [[0., 0.5, 1.0]]
+ hist, _ = histogramdd(x, bins=bins)
+ assert_(hist[0] == 0.0)
+ assert_(hist[1] == 1.)
+ x = [1.0000000001]
+ bins = [[0., 0.5, 1.0]]
+ hist, _ = histogramdd(x, bins=bins)
+ assert_(hist[0] == 0.0)
+ assert_(hist[1] == 0.0)
+ x = [1.0001]
+ bins = [[0., 0.5, 1.0]]
+ hist, _ = histogramdd(x, bins=bins)
+ assert_(hist[0] == 0.0)
+ assert_(hist[1] == 0.0)
+
+ def test_finite_range(self):
+ vals = np.random.random((100, 3))
+ histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]])
+ assert_raises(ValueError, histogramdd, vals,
+ range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
+ assert_raises(ValueError, histogramdd, vals,
+ range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
+
+ def test_equal_edges(self):
+ """ Test that adjacent entries in an edge array can be equal """
+ x = np.array([0, 1, 2])
+ y = np.array([0, 1, 2])
+ x_edges = np.array([0, 2, 2])
+ y_edges = 1
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ hist_expected = np.array([
+ [2.],
+ [1.], # x == 2 falls in the final bin
+ ])
+ assert_equal(hist, hist_expected)
+
+ def test_edge_dtype(self):
+ """ Test that if an edge array is input, its type is preserved """
+ x = np.array([0, 10, 20])
+ y = x / 10
+ x_edges = np.array([0, 5, 15, 20])
+ y_edges = x_edges / 10
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(edges[0].dtype, x_edges.dtype)
+ assert_equal(edges[1].dtype, y_edges.dtype)
+
+ def test_large_integers(self):
+ big = 2**60 # Too large to represent with a full precision float
+
+ x = np.array([0], np.int64)
+ x_edges = np.array([-1, +1], np.int64)
+ y = big + x
+ y_edges = big + x_edges
+
+ hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
+
+ assert_equal(hist[0, 0], 1)
+
+ def test_density_non_uniform_2d(self):
+ # Defines the following grid:
+ #
+ # 0 2 8
+ # 0+-+-----+
+ # + | +
+ # + | +
+ # 6+-+-----+
+ # 8+-+-----+
+ x_edges = np.array([0, 2, 8])
+ y_edges = np.array([0, 6, 8])
+ relative_areas = np.array([
+ [3, 9],
+ [1, 3]])
+
+ # ensure the number of points in each region is proportional to its area
+ x = np.array([1] + [1]*3 + [7]*3 + [7]*9)
+ y = np.array([7] + [1]*3 + [7]*3 + [1]*9)
+
+ # sanity check that the above worked as intended
+ hist, edges = histogramdd((y, x), bins=(y_edges, x_edges))
+ assert_equal(hist, relative_areas)
+
+ # resulting histogram should be uniform, since counts and areas are proportional
+ hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True)
+ assert_equal(hist, 1 / (8*8))
+
+ def test_density_non_uniform_1d(self):
+ # compare to histogram to show the results are the same
+ v = np.arange(10)
+ bins = np.array([0, 1, 3, 6, 10])
+ hist, edges = histogram(v, bins, density=True)
+ hist_dd, edges_dd = histogramdd((v,), (bins,), density=True)
+ assert_equal(hist, hist_dd)
+ assert_equal(edges, edges_dd[0])
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_index_tricks.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_index_tricks.py
new file mode 100644
index 00000000..b599cb34
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_index_tricks.py
@@ -0,0 +1,551 @@
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, assert_raises_regex,
+ )
+from numpy.lib.index_tricks import (
+ mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
+ index_exp, ndindex, r_, s_, ix_
+ )
+
+
+class TestRavelUnravelIndex:
+ def test_basic(self):
+ assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
+
+ # test that new shape argument works properly
+ assert_equal(np.unravel_index(indices=2,
+ shape=(2, 2)),
+ (1, 0))
+
+ # test that an invalid second keyword argument
+ # is properly handled, including the old name `dims`.
+ with assert_raises(TypeError):
+ np.unravel_index(indices=2, hape=(2, 2))
+
+ with assert_raises(TypeError):
+ np.unravel_index(2, hape=(2, 2))
+
+ with assert_raises(TypeError):
+ np.unravel_index(254, ims=(17, 94))
+
+ with assert_raises(TypeError):
+ np.unravel_index(254, dims=(17, 94))
+
+ assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
+ assert_equal(np.unravel_index(254, (17, 94)), (2, 66))
+ assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254)
+ assert_raises(ValueError, np.unravel_index, -1, (2, 2))
+ assert_raises(TypeError, np.unravel_index, 0.5, (2, 2))
+ assert_raises(ValueError, np.unravel_index, 4, (2, 2))
+ assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2))
+ assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2))
+ assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2))
+ assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2))
+ assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2))
+
+ assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4])
+ assert_equal(
+ np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4)
+
+ arr = np.array([[3, 6, 6], [4, 5, 1]])
+ assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37])
+ assert_equal(
+ np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13])
+ assert_equal(
+ np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19])
+ assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')),
+ [12, 13, 13])
+ assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621)
+
+ assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)),
+ [[3, 6, 6], [4, 5, 1]])
+ assert_equal(
+ np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'),
+ [[3, 6, 6], [4, 5, 1]])
+ assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1])
+
+ def test_empty_indices(self):
+ msg1 = 'indices must be integral: the provided empty sequence was'
+ msg2 = 'only int indices permitted'
+ assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5))
+ assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5))
+ assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]),
+ (10, 3, 5))
+ assert_equal(np.unravel_index(np.array([],dtype=int), (10, 3, 5)),
+ [[], [], []])
+ assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []),
+ (10, 3))
+ assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']),
+ (10, 3))
+ assert_raises_regex(TypeError, msg2, np.ravel_multi_index,
+ (np.array([]), np.array([])), (5, 3))
+ assert_equal(np.ravel_multi_index(
+ (np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), [])
+ assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int),
+ (5, 3)), [])
+
+ def test_big_indices(self):
+ # ravel_multi_index for big indices (issue #7546)
+ if np.intp == np.int64:
+ arr = ([1, 29], [3, 5], [3, 117], [19, 2],
+ [2379, 1284], [2, 2], [0, 1])
+ assert_equal(
+ np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)),
+ [5627771580, 117259570957])
+
+ # test unravel_index for big indices (issue #9538)
+ assert_raises(ValueError, np.unravel_index, 1, (2**32-1, 2**31+1))
+
+ # test overflow checking for too big array (issue #7546)
+ dummy_arr = ([0],[0])
+ half_max = np.iinfo(np.intp).max // 2
+ assert_equal(
+ np.ravel_multi_index(dummy_arr, (half_max, 2)), [0])
+ assert_raises(ValueError,
+ np.ravel_multi_index, dummy_arr, (half_max+1, 2))
+ assert_equal(
+ np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0])
+ assert_raises(ValueError,
+ np.ravel_multi_index, dummy_arr, (half_max+1, 2), order='F')
+
+ def test_dtypes(self):
+ # Test with different data types
+ for dtype in [np.int16, np.uint16, np.int32,
+ np.uint32, np.int64, np.uint64]:
+ coords = np.array(
+ [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype)
+ shape = (5, 8)
+ uncoords = 8*coords[0]+coords[1]
+ assert_equal(np.ravel_multi_index(coords, shape), uncoords)
+ assert_equal(coords, np.unravel_index(uncoords, shape))
+ uncoords = coords[0]+5*coords[1]
+ assert_equal(
+ np.ravel_multi_index(coords, shape, order='F'), uncoords)
+ assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
+
+ coords = np.array(
+ [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]],
+ dtype=dtype)
+ shape = (5, 8, 10)
+ uncoords = 10*(8*coords[0]+coords[1])+coords[2]
+ assert_equal(np.ravel_multi_index(coords, shape), uncoords)
+ assert_equal(coords, np.unravel_index(uncoords, shape))
+ uncoords = coords[0]+5*(coords[1]+8*coords[2])
+ assert_equal(
+ np.ravel_multi_index(coords, shape, order='F'), uncoords)
+ assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
+
+ def test_clipmodes(self):
+ # Test clipmodes
+ assert_equal(
+ np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'),
+ np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12)))
+ assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12),
+ mode=(
+ 'wrap', 'raise', 'clip', 'raise')),
+ np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12)))
+ assert_raises(
+ ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12))
+
+ def test_writeability(self):
+ # See gh-7269
+ x, y = np.unravel_index([1, 2, 3], (4, 5))
+ assert_(x.flags.writeable)
+ assert_(y.flags.writeable)
+
+ def test_0d(self):
+ # gh-580
+ x = np.unravel_index(0, ())
+ assert_equal(x, ())
+
+ assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ())
+ assert_raises_regex(
+ ValueError, "out of bounds", np.unravel_index, [1], ())
+
+ @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"])
+ def test_empty_array_ravel(self, mode):
+ res = np.ravel_multi_index(
+ np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode)
+ assert(res.shape == (0,))
+
+ with assert_raises(ValueError):
+ np.ravel_multi_index(
+ np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode)
+
+ def test_empty_array_unravel(self):
+ res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0))
+ # res is a tuple of three empty arrays
+ assert(len(res) == 3)
+ assert(all(a.shape == (0,) for a in res))
+
+ with assert_raises(ValueError):
+ np.unravel_index([1], (2, 1, 0))
+
+class TestGrid:
+ def test_basic(self):
+ a = mgrid[-1:1:10j]
+ b = mgrid[-1:1:0.1]
+ assert_(a.shape == (10,))
+ assert_(b.shape == (20,))
+ assert_(a[0] == -1)
+ assert_almost_equal(a[-1], 1)
+ assert_(b[0] == -1)
+ assert_almost_equal(b[1]-b[0], 0.1, 11)
+ assert_almost_equal(b[-1], b[0]+19*0.1, 11)
+ assert_almost_equal(a[1]-a[0], 2.0/9.0, 11)
+
+ def test_linspace_equivalence(self):
+ y, st = np.linspace(2, 10, retstep=True)
+ assert_almost_equal(st, 8/49.0)
+ assert_array_almost_equal(y, mgrid[2:10:50j], 13)
+
+ def test_nd(self):
+ c = mgrid[-1:1:10j, -2:2:10j]
+ d = mgrid[-1:1:0.1, -2:2:0.2]
+ assert_(c.shape == (2, 10, 10))
+ assert_(d.shape == (2, 20, 20))
+ assert_array_equal(c[0][0, :], -np.ones(10, 'd'))
+ assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd'))
+ assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11)
+ assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11)
+ assert_array_almost_equal(d[0, 1, :] - d[0, 0, :],
+ 0.1*np.ones(20, 'd'), 11)
+ assert_array_almost_equal(d[1, :, 1] - d[1, :, 0],
+ 0.2*np.ones(20, 'd'), 11)
+
+ def test_sparse(self):
+ grid_full = mgrid[-1:1:10j, -2:2:10j]
+ grid_sparse = ogrid[-1:1:10j, -2:2:10j]
+
+ # sparse grids can be made dense by broadcasting
+ grid_broadcast = np.broadcast_arrays(*grid_sparse)
+ for f, b in zip(grid_full, grid_broadcast):
+ assert_equal(f, b)
+
+ @pytest.mark.parametrize("start, stop, step, expected", [
+ (None, 10, 10j, (200, 10)),
+ (-10, 20, None, (1800, 30)),
+ ])
+ def test_mgrid_size_none_handling(self, start, stop, step, expected):
+ # regression test None value handling for
+ # start and step values used by mgrid;
+ # internally, this aims to cover previously
+ # unexplored code paths in nd_grid()
+ grid = mgrid[start:stop:step, start:stop:step]
+ # need a smaller grid to explore one of the
+ # untested code paths
+ grid_small = mgrid[start:stop:step]
+ assert_equal(grid.size, expected[0])
+ assert_equal(grid_small.size, expected[1])
+
+ def test_accepts_npfloating(self):
+ # regression test for #16466
+ grid64 = mgrid[0.1:0.33:0.1, ]
+ grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1), ]
+ assert_(grid32.dtype == np.float64)
+ assert_array_almost_equal(grid64, grid32)
+
+ # different code path for single slice
+ grid64 = mgrid[0.1:0.33:0.1]
+ grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1)]
+ assert_(grid32.dtype == np.float64)
+ assert_array_almost_equal(grid64, grid32)
+
+ def test_accepts_longdouble(self):
+ # regression tests for #16945
+ grid64 = mgrid[0.1:0.33:0.1, ]
+ grid128 = mgrid[
+ np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1),
+ ]
+ assert_(grid128.dtype == np.longdouble)
+ assert_array_almost_equal(grid64, grid128)
+
+ grid128c_a = mgrid[0:np.longdouble(1):3.4j]
+ grid128c_b = mgrid[0:np.longdouble(1):3.4j, ]
+ assert_(grid128c_a.dtype == grid128c_b.dtype == np.longdouble)
+ assert_array_equal(grid128c_a, grid128c_b[0])
+
+ # different code path for single slice
+ grid64 = mgrid[0.1:0.33:0.1]
+ grid128 = mgrid[
+ np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1)
+ ]
+ assert_(grid128.dtype == np.longdouble)
+ assert_array_almost_equal(grid64, grid128)
+
+ def test_accepts_npcomplexfloating(self):
+ # Related to #16466
+ assert_array_almost_equal(
+ mgrid[0.1:0.3:3j, ], mgrid[0.1:0.3:np.complex64(3j), ]
+ )
+
+ # different code path for single slice
+ assert_array_almost_equal(
+ mgrid[0.1:0.3:3j], mgrid[0.1:0.3:np.complex64(3j)]
+ )
+
+ # Related to #16945
+ grid64_a = mgrid[0.1:0.3:3.3j]
+ grid64_b = mgrid[0.1:0.3:3.3j, ][0]
+ assert_(grid64_a.dtype == grid64_b.dtype == np.float64)
+ assert_array_equal(grid64_a, grid64_b)
+
+ grid128_a = mgrid[0.1:0.3:np.clongdouble(3.3j)]
+ grid128_b = mgrid[0.1:0.3:np.clongdouble(3.3j), ][0]
+ assert_(grid128_a.dtype == grid128_b.dtype == np.longdouble)
+ assert_array_equal(grid64_a, grid64_b)
+
+
+class TestConcatenator:
+ def test_1d(self):
+ assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6]))
+ b = np.ones(5)
+ c = r_[b, 0, 0, b]
+ assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
+
+ def test_mixed_type(self):
+ g = r_[10.1, 1:10]
+ assert_(g.dtype == 'f8')
+
+ def test_more_mixed_type(self):
+ g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0]
+ assert_(g.dtype == 'f8')
+
+ def test_complex_step(self):
+ # Regression test for #12262
+ g = r_[0:36:100j]
+ assert_(g.shape == (100,))
+
+ # Related to #16466
+ g = r_[0:36:np.complex64(100j)]
+ assert_(g.shape == (100,))
+
+ def test_2d(self):
+ b = np.random.rand(5, 5)
+ c = np.random.rand(5, 5)
+ d = r_['1', b, c] # append columns
+ assert_(d.shape == (5, 10))
+ assert_array_equal(d[:, :5], b)
+ assert_array_equal(d[:, 5:], c)
+ d = r_[b, c]
+ assert_(d.shape == (10, 5))
+ assert_array_equal(d[:5, :], b)
+ assert_array_equal(d[5:, :], c)
+
+ def test_0d(self):
+ assert_equal(r_[0, np.array(1), 2], [0, 1, 2])
+ assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3])
+ assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3])
+
+
+class TestNdenumerate:
+ def test_basic(self):
+ a = np.array([[1, 2], [3, 4]])
+ assert_equal(list(ndenumerate(a)),
+ [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
+
+
+class TestIndexExpression:
+ def test_regression_1(self):
+ # ticket #1196
+ a = np.arange(2)
+ assert_equal(a[:-1], a[s_[:-1]])
+ assert_equal(a[:-1], a[index_exp[:-1]])
+
+ def test_simple_1(self):
+ a = np.random.rand(4, 5, 6)
+
+ assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]])
+ assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]])
+
+
+class TestIx_:
+ def test_regression_1(self):
+ # Test empty untyped inputs create outputs of indexing type, gh-5804
+ a, = np.ix_(range(0))
+ assert_equal(a.dtype, np.intp)
+
+ a, = np.ix_([])
+ assert_equal(a.dtype, np.intp)
+
+ # but if the type is specified, don't change it
+ a, = np.ix_(np.array([], dtype=np.float32))
+ assert_equal(a.dtype, np.float32)
+
+ def test_shape_and_dtype(self):
+ sizes = (4, 5, 3, 2)
+ # Test both lists and arrays
+ for func in (range, np.arange):
+ arrays = np.ix_(*[func(sz) for sz in sizes])
+ for k, (a, sz) in enumerate(zip(arrays, sizes)):
+ assert_equal(a.shape[k], sz)
+ assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))
+ assert_(np.issubdtype(a.dtype, np.integer))
+
+ def test_bool(self):
+ bool_a = [True, False, True, True]
+ int_a, = np.nonzero(bool_a)
+ assert_equal(np.ix_(bool_a)[0], int_a)
+
+ def test_1d_only(self):
+ idx2d = [[1, 2, 3], [4, 5, 6]]
+ assert_raises(ValueError, np.ix_, idx2d)
+
+ def test_repeated_input(self):
+ length_of_vector = 5
+ x = np.arange(length_of_vector)
+ out = ix_(x, x)
+ assert_equal(out[0].shape, (length_of_vector, 1))
+ assert_equal(out[1].shape, (1, length_of_vector))
+ # check that input shape is not modified
+ assert_equal(x.shape, (length_of_vector,))
+
+
+def test_c_():
+ a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])]
+ assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
+
+
+class TestFillDiagonal:
+ def test_basic(self):
+ a = np.zeros((3, 3), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5]])
+ )
+
+ def test_tall_matrix(self):
+ a = np.zeros((10, 3), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0]])
+ )
+
+ def test_tall_matrix_wrap(self):
+ a = np.zeros((10, 3), int)
+ fill_diagonal(a, 5, True)
+ assert_array_equal(
+ a, np.array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5],
+ [0, 0, 0],
+ [5, 0, 0],
+ [0, 5, 0]])
+ )
+
+ def test_wide_matrix(self):
+ a = np.zeros((3, 10), int)
+ fill_diagonal(a, 5)
+ assert_array_equal(
+ a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])
+ )
+
+ def test_operate_4d_array(self):
+ a = np.zeros((3, 3, 3, 3), int)
+ fill_diagonal(a, 4)
+ i = np.array([0, 1, 2])
+ assert_equal(np.where(a != 0), (i, i, i, i))
+
+ def test_low_dim_handling(self):
+ # raise error with low dimensionality
+ a = np.zeros(3, int)
+ with assert_raises_regex(ValueError, "at least 2-d"):
+ fill_diagonal(a, 5)
+
+ def test_hetero_shape_handling(self):
+ # raise error with high dimensionality and
+ # shape mismatch
+ a = np.zeros((3,3,7,3), int)
+ with assert_raises_regex(ValueError, "equal length"):
+ fill_diagonal(a, 2)
+
+
+def test_diag_indices():
+ di = diag_indices(4)
+ a = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16]])
+ a[di] = 100
+ assert_array_equal(
+ a, np.array([[100, 2, 3, 4],
+ [5, 100, 7, 8],
+ [9, 10, 100, 12],
+ [13, 14, 15, 100]])
+ )
+
+ # Now, we create indices to manipulate a 3-d array:
+ d3 = diag_indices(2, 3)
+
+ # And use it to set the diagonal of a zeros array to 1:
+ a = np.zeros((2, 2, 2), int)
+ a[d3] = 1
+ assert_array_equal(
+ a, np.array([[[1, 0],
+ [0, 0]],
+ [[0, 0],
+ [0, 1]]])
+ )
+
+
+class TestDiagIndicesFrom:
+
+ def test_diag_indices_from(self):
+ x = np.random.random((4, 4))
+ r, c = diag_indices_from(x)
+ assert_array_equal(r, np.arange(4))
+ assert_array_equal(c, np.arange(4))
+
+ def test_error_small_input(self):
+ x = np.ones(7)
+ with assert_raises_regex(ValueError, "at least 2-d"):
+ diag_indices_from(x)
+
+ def test_error_shape_mismatch(self):
+ x = np.zeros((3, 3, 2, 3), int)
+ with assert_raises_regex(ValueError, "equal length"):
+ diag_indices_from(x)
+
+
+def test_ndindex():
+ x = list(ndindex(1, 2, 3))
+ expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))]
+ assert_array_equal(x, expected)
+
+ x = list(ndindex((1, 2, 3)))
+ assert_array_equal(x, expected)
+
+ # Test use of scalars and tuples
+ x = list(ndindex((3,)))
+ assert_array_equal(x, list(ndindex(3)))
+
+ # Make sure size argument is optional
+ x = list(ndindex())
+ assert_equal(x, [()])
+
+ x = list(ndindex(()))
+ assert_equal(x, [()])
+
+ # Make sure 0-sized ndindex works correctly
+ x = list(ndindex(*[0]))
+ assert_equal(x, [])
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_io.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_io.py
new file mode 100644
index 00000000..3af2e6f4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_io.py
@@ -0,0 +1,2749 @@
+import sys
+import gc
+import gzip
+import os
+import threading
+import time
+import warnings
+import io
+import re
+import pytest
+from pathlib import Path
+from tempfile import NamedTemporaryFile
+from io import BytesIO, StringIO
+from datetime import datetime
+import locale
+from multiprocessing import Value, get_context
+from ctypes import c_bool
+
+import numpy as np
+import numpy.ma as ma
+from numpy.lib._iotools import ConverterError, ConversionWarning
+from numpy.compat import asbytes
+from numpy.ma.testutils import assert_equal
+from numpy.testing import (
+ assert_warns, assert_, assert_raises_regex, assert_raises,
+ assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
+ HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings,
+ break_cycles, IS_WASM
+ )
+from numpy.testing._private.utils import requires_memory
+
+
+class TextIO(BytesIO):
+ """Helper IO class.
+
+ Writes encode strings to bytes if needed, reads return bytes.
+ This makes it easier to emulate files opened in binary mode
+ without needing to explicitly convert strings to bytes in
+ setting up the test data.
+
+ """
+ def __init__(self, s=""):
+ BytesIO.__init__(self, asbytes(s))
+
+ def write(self, s):
+ BytesIO.write(self, asbytes(s))
+
+ def writelines(self, lines):
+ BytesIO.writelines(self, [asbytes(s) for s in lines])
+
+
+IS_64BIT = sys.maxsize > 2**32
+try:
+ import bz2
+ HAS_BZ2 = True
+except ImportError:
+ HAS_BZ2 = False
+try:
+ import lzma
+ HAS_LZMA = True
+except ImportError:
+ HAS_LZMA = False
+
+
+def strptime(s, fmt=None):
+ """
+ This function is available in the datetime module only from Python >=
+ 2.5.
+
+ """
+ if type(s) == bytes:
+ s = s.decode("latin1")
+ return datetime(*time.strptime(s, fmt)[:3])
+
+
+class RoundtripTest:
+ def roundtrip(self, save_func, *args, **kwargs):
+ """
+ save_func : callable
+ Function used to save arrays to file.
+ file_on_disk : bool
+ If true, store the file on disk, instead of in a
+ string buffer.
+ save_kwds : dict
+ Parameters passed to `save_func`.
+ load_kwds : dict
+ Parameters passed to `numpy.load`.
+ args : tuple of arrays
+ Arrays stored to file.
+
+ """
+ save_kwds = kwargs.get('save_kwds', {})
+ load_kwds = kwargs.get('load_kwds', {"allow_pickle": True})
+ file_on_disk = kwargs.get('file_on_disk', False)
+
+ if file_on_disk:
+ target_file = NamedTemporaryFile(delete=False)
+ load_file = target_file.name
+ else:
+ target_file = BytesIO()
+ load_file = target_file
+
+ try:
+ arr = args
+
+ save_func(target_file, *arr, **save_kwds)
+ target_file.flush()
+ target_file.seek(0)
+
+ if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
+ target_file.close()
+
+ arr_reloaded = np.load(load_file, **load_kwds)
+
+ self.arr = arr
+ self.arr_reloaded = arr_reloaded
+ finally:
+ if not isinstance(target_file, BytesIO):
+ target_file.close()
+ # holds an open file descriptor so it can't be deleted on win
+ if 'arr_reloaded' in locals():
+ if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
+ os.remove(target_file.name)
+
+ def check_roundtrips(self, a):
+ self.roundtrip(a)
+ self.roundtrip(a, file_on_disk=True)
+ self.roundtrip(np.asfortranarray(a))
+ self.roundtrip(np.asfortranarray(a), file_on_disk=True)
+ if a.shape[0] > 1:
+ # neither C nor Fortran contiguous for 2D arrays or more
+ self.roundtrip(np.asfortranarray(a)[1:])
+ self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
+
+ def test_array(self):
+ a = np.array([], float)
+ self.check_roundtrips(a)
+
+ a = np.array([[1, 2], [3, 4]], float)
+ self.check_roundtrips(a)
+
+ a = np.array([[1, 2], [3, 4]], int)
+ self.check_roundtrips(a)
+
+ a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
+ self.check_roundtrips(a)
+
+ a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
+ self.check_roundtrips(a)
+
+ def test_array_object(self):
+ a = np.array([], object)
+ self.check_roundtrips(a)
+
+ a = np.array([[1, 2], [3, 4]], object)
+ self.check_roundtrips(a)
+
+ def test_1D(self):
+ a = np.array([1, 2, 3, 4], int)
+ self.roundtrip(a)
+
+ @pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32")
+ def test_mmap(self):
+ a = np.array([[1, 2.5], [4, 7.3]])
+ self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
+
+ a = np.asfortranarray([[1, 2.5], [4, 7.3]])
+ self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
+
+ def test_record(self):
+ a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+ self.check_roundtrips(a)
+
+ @pytest.mark.slow
+ def test_format_2_0(self):
+ dt = [(("%d" % i) * 100, float) for i in range(500)]
+ a = np.ones(1000, dtype=dt)
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings('always', '', UserWarning)
+ self.check_roundtrips(a)
+
+
+class TestSaveLoad(RoundtripTest):
+ def roundtrip(self, *args, **kwargs):
+ RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
+ assert_equal(self.arr[0], self.arr_reloaded)
+ assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
+ assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
+
+
+class TestSavezLoad(RoundtripTest):
+ def roundtrip(self, *args, **kwargs):
+ RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
+ try:
+ for n, arr in enumerate(self.arr):
+ reloaded = self.arr_reloaded['arr_%d' % n]
+ assert_equal(arr, reloaded)
+ assert_equal(arr.dtype, reloaded.dtype)
+ assert_equal(arr.flags.fnc, reloaded.flags.fnc)
+ finally:
+ # delete tempfile, must be done here on windows
+ if self.arr_reloaded.fid:
+ self.arr_reloaded.fid.close()
+ os.remove(self.arr_reloaded.fid.name)
+
+ @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy")
+ @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
+ @pytest.mark.slow
+ def test_big_arrays(self):
+ L = (1 << 31) + 100000
+ a = np.empty(L, dtype=np.uint8)
+ with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp:
+ np.savez(tmp, a=a)
+ del a
+ npfile = np.load(tmp)
+ a = npfile['a'] # Should succeed
+ npfile.close()
+ del a # Avoid pyflakes unused variable warning.
+
+ def test_multiple_arrays(self):
+ a = np.array([[1, 2], [3, 4]], float)
+ b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
+ self.roundtrip(a, b)
+
+ def test_named_arrays(self):
+ a = np.array([[1, 2], [3, 4]], float)
+ b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
+ c = BytesIO()
+ np.savez(c, file_a=a, file_b=b)
+ c.seek(0)
+ l = np.load(c)
+ assert_equal(a, l['file_a'])
+ assert_equal(b, l['file_b'])
+
+ def test_named_arrays_with_like(self):
+ a = np.array([[1, 2], [3, 4]], float)
+ b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
+ c = BytesIO()
+ np.savez(c, file_a=a, like=b)
+ c.seek(0)
+ l = np.load(c)
+ assert_equal(a, l['file_a'])
+ assert_equal(b, l['like'])
+
+ def test_BagObj(self):
+ a = np.array([[1, 2], [3, 4]], float)
+ b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
+ c = BytesIO()
+ np.savez(c, file_a=a, file_b=b)
+ c.seek(0)
+ l = np.load(c)
+ assert_equal(sorted(dir(l.f)), ['file_a','file_b'])
+ assert_equal(a, l.f.file_a)
+ assert_equal(b, l.f.file_b)
+
+ @pytest.mark.skipif(IS_WASM, reason="Cannot start thread")
+ def test_savez_filename_clashes(self):
+ # Test that issue #852 is fixed
+ # and savez functions in multithreaded environment
+
+ def writer(error_list):
+ with temppath(suffix='.npz') as tmp:
+ arr = np.random.randn(500, 500)
+ try:
+ np.savez(tmp, arr=arr)
+ except OSError as err:
+ error_list.append(err)
+
+ errors = []
+ threads = [threading.Thread(target=writer, args=(errors,))
+ for j in range(3)]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+
+ if errors:
+ raise AssertionError(errors)
+
+ def test_not_closing_opened_fid(self):
+ # Test that issue #2178 is fixed:
+ # verify could seek on 'loaded' file
+ with temppath(suffix='.npz') as tmp:
+ with open(tmp, 'wb') as fp:
+ np.savez(fp, data='LOVELY LOAD')
+ with open(tmp, 'rb', 10000) as fp:
+ fp.seek(0)
+ assert_(not fp.closed)
+ np.load(fp)['data']
+ # fp must not get closed by .load
+ assert_(not fp.closed)
+ fp.seek(0)
+ assert_(not fp.closed)
+
+ @pytest.mark.slow_pypy
+ def test_closing_fid(self):
+ # Test that issue #1517 (too many opened files) remains closed
+ # It might be a "weak" test since failed to get triggered on
+ # e.g. Debian sid of 2012 Jul 05 but was reported to
+ # trigger the failure on Ubuntu 10.04:
+ # http://projects.scipy.org/numpy/ticket/1517#comment:2
+ with temppath(suffix='.npz') as tmp:
+ np.savez(tmp, data='LOVELY LOAD')
+ # We need to check if the garbage collector can properly close
+ # numpy npz file returned by np.load when their reference count
+ # goes to zero. Python 3 running in debug mode raises a
+ # ResourceWarning when file closing is left to the garbage
+ # collector, so we catch the warnings.
+ with suppress_warnings() as sup:
+ sup.filter(ResourceWarning) # TODO: specify exact message
+ for i in range(1, 1025):
+ try:
+ np.load(tmp)["data"]
+ except Exception as e:
+ msg = "Failed to load data from a file: %s" % e
+ raise AssertionError(msg)
+ finally:
+ if IS_PYPY:
+ gc.collect()
+
+ def test_closing_zipfile_after_load(self):
+ # Check that zipfile owns file and can close it. This needs to
+ # pass a file name to load for the test. On windows failure will
+ # cause a second error will be raised when the attempt to remove
+ # the open file is made.
+ prefix = 'numpy_test_closing_zipfile_after_load_'
+ with temppath(suffix='.npz', prefix=prefix) as tmp:
+ np.savez(tmp, lab='place holder')
+ data = np.load(tmp)
+ fp = data.zip.fp
+ data.close()
+ assert_(fp.closed)
+
+
+class TestSaveTxt:
+ def test_array(self):
+ a = np.array([[1, 2], [3, 4]], float)
+ fmt = "%.18e"
+ c = BytesIO()
+ np.savetxt(c, a, fmt=fmt)
+ c.seek(0)
+ assert_equal(c.readlines(),
+ [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
+ asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
+
+ a = np.array([[1, 2], [3, 4]], int)
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%d')
+ c.seek(0)
+ assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
+
+ def test_1D(self):
+ a = np.array([1, 2, 3, 4], int)
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%d')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
+
+ def test_0D_3D(self):
+ c = BytesIO()
+ assert_raises(ValueError, np.savetxt, c, np.array(1))
+ assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
+
+ def test_structured(self):
+ a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%d')
+ c.seek(0)
+ assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
+
+ def test_structured_padded(self):
+ # gh-13297
+ a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[
+ ('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4')
+ ])
+ c = BytesIO()
+ np.savetxt(c, a[['foo', 'baz']], fmt='%d')
+ c.seek(0)
+ assert_equal(c.readlines(), [b'1 3\n', b'4 6\n'])
+
+ def test_multifield_view(self):
+ a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
+ v = a[['x', 'z']]
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ np.save(path, v)
+ data = np.load(path)
+ assert_array_equal(data, v)
+
+ def test_delimiter(self):
+ a = np.array([[1., 2.], [3., 4.]])
+ c = BytesIO()
+ np.savetxt(c, a, delimiter=',', fmt='%d')
+ c.seek(0)
+ assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
+
+ def test_format(self):
+ a = np.array([(1, 2), (3, 4)])
+ c = BytesIO()
+ # Sequence of formats
+ np.savetxt(c, a, fmt=['%02d', '%3.1f'])
+ c.seek(0)
+ assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
+
+ # A single multiformat string
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%02d : %3.1f')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
+
+ # Specify delimiter, should be overridden
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
+
+ # Bad fmt, should raise a ValueError
+ c = BytesIO()
+ assert_raises(ValueError, np.savetxt, c, a, fmt=99)
+
+ def test_header_footer(self):
+ # Test the functionality of the header and footer keyword argument.
+
+ c = BytesIO()
+ a = np.array([(1, 2), (3, 4)], dtype=int)
+ test_header_footer = 'Test header / footer'
+ # Test the header keyword argument
+ np.savetxt(c, a, fmt='%1d', header=test_header_footer)
+ c.seek(0)
+ assert_equal(c.read(),
+ asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
+ # Test the footer keyword argument
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
+ c.seek(0)
+ assert_equal(c.read(),
+ asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
+ # Test the commentstr keyword argument used on the header
+ c = BytesIO()
+ commentstr = '% '
+ np.savetxt(c, a, fmt='%1d',
+ header=test_header_footer, comments=commentstr)
+ c.seek(0)
+ assert_equal(c.read(),
+ asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
+ # Test the commentstr keyword argument used on the footer
+ c = BytesIO()
+ commentstr = '% '
+ np.savetxt(c, a, fmt='%1d',
+ footer=test_header_footer, comments=commentstr)
+ c.seek(0)
+ assert_equal(c.read(),
+ asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
+
+ def test_file_roundtrip(self):
+ with temppath() as name:
+ a = np.array([(1, 2), (3, 4)])
+ np.savetxt(name, a)
+ b = np.loadtxt(name)
+ assert_array_equal(a, b)
+
+ def test_complex_arrays(self):
+ ncols = 2
+ nrows = 2
+ a = np.zeros((ncols, nrows), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re + 1.0j * im
+
+ # One format only
+ c = BytesIO()
+ np.savetxt(c, a, fmt=' %+.3e')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(
+ lines,
+ [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
+ b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
+
+ # One format for each real and imaginary part
+ c = BytesIO()
+ np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(
+ lines,
+ [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
+ b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
+
+ # One format for each complex number
+ c = BytesIO()
+ np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(
+ lines,
+ [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
+ b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
+
+ def test_complex_negative_exponent(self):
+ # Previous to 1.15, some formats generated x+-yj, gh 7895
+ ncols = 2
+ nrows = 2
+ a = np.zeros((ncols, nrows), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re - 1.0j * im
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%.3e')
+ c.seek(0)
+ lines = c.readlines()
+ assert_equal(
+ lines,
+ [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
+ b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
+
+
+ def test_custom_writer(self):
+
+ class CustomWriter(list):
+ def write(self, text):
+ self.extend(text.split(b'\n'))
+
+ w = CustomWriter()
+ a = np.array([(1, 2), (3, 4)])
+ np.savetxt(w, a)
+ b = np.loadtxt(w)
+ assert_array_equal(a, b)
+
+ def test_unicode(self):
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ a = np.array([utf8], dtype=np.unicode_)
+ with tempdir() as tmpdir:
+ # set encoding as on windows it may not be unicode even on py3
+ np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
+ encoding='UTF-8')
+
+ def test_unicode_roundtrip(self):
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ a = np.array([utf8], dtype=np.unicode_)
+ # our gz wrapper support encoding
+ suffixes = ['', '.gz']
+ if HAS_BZ2:
+ suffixes.append('.bz2')
+ if HAS_LZMA:
+ suffixes.extend(['.xz', '.lzma'])
+ with tempdir() as tmpdir:
+ for suffix in suffixes:
+ np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
+ fmt=['%s'], encoding='UTF-16-LE')
+ b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
+ encoding='UTF-16-LE', dtype=np.unicode_)
+ assert_array_equal(a, b)
+
+ def test_unicode_bytestream(self):
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ a = np.array([utf8], dtype=np.unicode_)
+ s = BytesIO()
+ np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
+ s.seek(0)
+ assert_equal(s.read().decode('UTF-8'), utf8 + '\n')
+
+ def test_unicode_stringstream(self):
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ a = np.array([utf8], dtype=np.unicode_)
+ s = StringIO()
+ np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
+ s.seek(0)
+ assert_equal(s.read(), utf8 + '\n')
+
+ @pytest.mark.parametrize("fmt", ["%f", b"%f"])
+ @pytest.mark.parametrize("iotype", [StringIO, BytesIO])
+ def test_unicode_and_bytes_fmt(self, fmt, iotype):
+ # string type of fmt should not matter, see also gh-4053
+ a = np.array([1.])
+ s = iotype()
+ np.savetxt(s, a, fmt=fmt)
+ s.seek(0)
+ if iotype is StringIO:
+ assert_equal(s.read(), "%f\n" % 1.)
+ else:
+ assert_equal(s.read(), b"%f\n" % 1.)
+
+ @pytest.mark.skipif(sys.platform=='win32', reason="files>4GB may not work")
+ @pytest.mark.slow
+ @requires_memory(free_bytes=7e9)
+ def test_large_zip(self):
+ def check_large_zip(memoryerror_raised):
+ memoryerror_raised.value = False
+ try:
+ # The test takes at least 6GB of memory, writes a file larger
+ # than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile``
+ test_data = np.asarray([np.random.rand(
+ np.random.randint(50,100),4)
+ for i in range(800000)], dtype=object)
+ with tempdir() as tmpdir:
+ np.savez(os.path.join(tmpdir, 'test.npz'),
+ test_data=test_data)
+ except MemoryError:
+ memoryerror_raised.value = True
+ raise
+ # run in a subprocess to ensure memory is released on PyPy, see gh-15775
+ # Use an object in shared memory to re-raise the MemoryError exception
+ # in our process if needed, see gh-16889
+ memoryerror_raised = Value(c_bool)
+
+ # Since Python 3.8, the default start method for multiprocessing has
+ # been changed from 'fork' to 'spawn' on macOS, causing inconsistency
+ # on memory sharing model, lead to failed test for check_large_zip
+ ctx = get_context('fork')
+ p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,))
+ p.start()
+ p.join()
+ if memoryerror_raised.value:
+ raise MemoryError("Child process raised a MemoryError exception")
+ # -9 indicates a SIGKILL, probably an OOM.
+ if p.exitcode == -9:
+ pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient")
+ assert p.exitcode == 0
+
+class LoadTxtBase:
+ def check_compressed(self, fopen, suffixes):
+ # Test that we can load data from a compressed file
+ wanted = np.arange(6).reshape((2, 3))
+ linesep = ('\n', '\r\n', '\r')
+ for sep in linesep:
+ data = '0 1 2' + sep + '3 4 5'
+ for suffix in suffixes:
+ with temppath(suffix=suffix) as name:
+ with fopen(name, mode='wt', encoding='UTF-32-LE') as f:
+ f.write(data)
+ res = self.loadfunc(name, encoding='UTF-32-LE')
+ assert_array_equal(res, wanted)
+ with fopen(name, "rt", encoding='UTF-32-LE') as f:
+ res = self.loadfunc(f)
+ assert_array_equal(res, wanted)
+
+ def test_compressed_gzip(self):
+ self.check_compressed(gzip.open, ('.gz',))
+
+ @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
+ def test_compressed_bz2(self):
+ self.check_compressed(bz2.open, ('.bz2',))
+
+ @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
+ def test_compressed_lzma(self):
+ self.check_compressed(lzma.open, ('.xz', '.lzma'))
+
+ def test_encoding(self):
+ with temppath() as path:
+ with open(path, "wb") as f:
+ f.write('0.\n1.\n2.'.encode("UTF-16"))
+ x = self.loadfunc(path, encoding="UTF-16")
+ assert_array_equal(x, [0., 1., 2.])
+
+ def test_stringload(self):
+ # umlaute
+ nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8")
+ with temppath() as path:
+ with open(path, "wb") as f:
+ f.write(nonascii.encode("UTF-16"))
+ x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_)
+ assert_array_equal(x, nonascii)
+
+ def test_binary_decode(self):
+ utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
+ v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16')
+ assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
+
+ def test_converters_decode(self):
+ # test converters that decode strings
+ c = TextIO()
+ c.write(b'\xcf\x96')
+ c.seek(0)
+ x = self.loadfunc(c, dtype=np.unicode_,
+ converters={0: lambda x: x.decode('UTF-8')})
+ a = np.array([b'\xcf\x96'.decode('UTF-8')])
+ assert_array_equal(x, a)
+
+ def test_converters_nodecode(self):
+ # test native string converters enabled by setting an encoding
+ utf8 = b'\xcf\x96'.decode('UTF-8')
+ with temppath() as path:
+ with io.open(path, 'wt', encoding='UTF-8') as f:
+ f.write(utf8)
+ x = self.loadfunc(path, dtype=np.unicode_,
+ converters={0: lambda x: x + 't'},
+ encoding='UTF-8')
+ a = np.array([utf8 + 't'])
+ assert_array_equal(x, a)
+
+
+class TestLoadTxt(LoadTxtBase):
+ loadfunc = staticmethod(np.loadtxt)
+
+ def setup_method(self):
+ # lower chunksize for testing
+ self.orig_chunk = np.lib.npyio._loadtxt_chunksize
+ np.lib.npyio._loadtxt_chunksize = 1
+
+ def teardown_method(self):
+ np.lib.npyio._loadtxt_chunksize = self.orig_chunk
+
+ def test_record(self):
+ c = TextIO()
+ c.write('1 2\n3 4')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
+ a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+ assert_array_equal(x, a)
+
+ d = TextIO()
+ d.write('M 64 75.0\nF 25 60.0')
+ d.seek(0)
+ mydescriptor = {'names': ('gender', 'age', 'weight'),
+ 'formats': ('S1', 'i4', 'f4')}
+ b = np.array([('M', 64.0, 75.0),
+ ('F', 25.0, 60.0)], dtype=mydescriptor)
+ y = np.loadtxt(d, dtype=mydescriptor)
+ assert_array_equal(y, b)
+
+ def test_array(self):
+ c = TextIO()
+ c.write('1 2\n3 4')
+
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int)
+ a = np.array([[1, 2], [3, 4]], int)
+ assert_array_equal(x, a)
+
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float)
+ a = np.array([[1, 2], [3, 4]], float)
+ assert_array_equal(x, a)
+
+ def test_1D(self):
+ c = TextIO()
+ c.write('1\n2\n3\n4\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int)
+ a = np.array([1, 2, 3, 4], int)
+ assert_array_equal(x, a)
+
+ c = TextIO()
+ c.write('1,2,3,4\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',')
+ a = np.array([1, 2, 3, 4], int)
+ assert_array_equal(x, a)
+
+ def test_missing(self):
+ c = TextIO()
+ c.write('1,2,3,,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ converters={3: lambda s: int(s or - 999)})
+ a = np.array([1, 2, 3, -999, 5], int)
+ assert_array_equal(x, a)
+
+ def test_converters_with_usecols(self):
+ c = TextIO()
+ c.write('1,2,3,,5\n6,7,8,9,10\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ converters={3: lambda s: int(s or - 999)},
+ usecols=(1, 3,))
+ a = np.array([[2, -999], [7, 9]], int)
+ assert_array_equal(x, a)
+
+ def test_comments_unicode(self):
+ c = TextIO()
+ c.write('# comment\n1,2,3,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ comments='#')
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ def test_comments_byte(self):
+ c = TextIO()
+ c.write('# comment\n1,2,3,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ comments=b'#')
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ def test_comments_multiple(self):
+ c = TextIO()
+ c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ comments=['#', '@', '//'])
+ a = np.array([[1, 2, 3], [4, 5, 6]], int)
+ assert_array_equal(x, a)
+
+ @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+ def test_comments_multi_chars(self):
+ c = TextIO()
+ c.write('/* comment\n1,2,3,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ comments='/*')
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ # Check that '/*' is not transformed to ['/', '*']
+ c = TextIO()
+ c.write('*/ comment\n1,2,3,5\n')
+ c.seek(0)
+ assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',',
+ comments='/*')
+
+ def test_skiprows(self):
+ c = TextIO()
+ c.write('comment\n1,2,3,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ skiprows=1)
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ c = TextIO()
+ c.write('# comment\n1,2,3,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ skiprows=1)
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ def test_usecols(self):
+ a = np.array([[1, 2], [3, 4]], float)
+ c = BytesIO()
+ np.savetxt(c, a)
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=(1,))
+ assert_array_equal(x, a[:, 1])
+
+ a = np.array([[1, 2, 3], [3, 4, 5]], float)
+ c = BytesIO()
+ np.savetxt(c, a)
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=(1, 2))
+ assert_array_equal(x, a[:, 1:])
+
+ # Testing with arrays instead of tuples.
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
+ assert_array_equal(x, a[:, 1:])
+
+ # Testing with an integer instead of a sequence
+ for int_type in [int, np.int8, np.int16,
+ np.int32, np.int64, np.uint8, np.uint16,
+ np.uint32, np.uint64]:
+ to_read = int_type(1)
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=to_read)
+ assert_array_equal(x, a[:, 1])
+
+ # Testing with some crazy custom integer type
+ class CrazyInt:
+ def __index__(self):
+ return 1
+
+ crazy_int = CrazyInt()
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=crazy_int)
+ assert_array_equal(x, a[:, 1])
+
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=(crazy_int,))
+ assert_array_equal(x, a[:, 1])
+
+ # Checking with dtypes defined converters.
+ data = '''JOE 70.1 25.3
+ BOB 60.5 27.9
+ '''
+ c = TextIO(data)
+ names = ['stid', 'temp']
+ dtypes = ['S4', 'f8']
+ arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
+ assert_equal(arr['stid'], [b"JOE", b"BOB"])
+ assert_equal(arr['temp'], [25.3, 27.9])
+
+ # Testing non-ints in usecols
+ c.seek(0)
+ bogus_idx = 1.5
+ assert_raises_regex(
+ TypeError,
+ '^usecols must be.*%s' % type(bogus_idx).__name__,
+ np.loadtxt, c, usecols=bogus_idx
+ )
+
+ assert_raises_regex(
+ TypeError,
+ '^usecols must be.*%s' % type(bogus_idx).__name__,
+ np.loadtxt, c, usecols=[0, bogus_idx, 0]
+ )
+
+ def test_bad_usecols(self):
+ with pytest.raises(OverflowError):
+ np.loadtxt(["1\n"], usecols=[2**64], delimiter=",")
+ with pytest.raises((ValueError, OverflowError)):
+ # Overflow error on 32bit platforms
+ np.loadtxt(["1\n"], usecols=[2**62], delimiter=",")
+ with pytest.raises(TypeError,
+ match="If a structured dtype .*. But 1 usecols were given and "
+ "the number of fields is 3."):
+ np.loadtxt(["1,1\n"], dtype="i,(2)i", usecols=[0], delimiter=",")
+
+ def test_fancy_dtype(self):
+ c = TextIO()
+ c.write('1,2,3.0\n4,5,6.0\n')
+ c.seek(0)
+ dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
+ x = np.loadtxt(c, dtype=dt, delimiter=',')
+ a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
+ assert_array_equal(x, a)
+
+ def test_shaped_dtype(self):
+ c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
+ dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
+ ('block', int, (2, 3))])
+ x = np.loadtxt(c, dtype=dt)
+ a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
+ dtype=dt)
+ assert_array_equal(x, a)
+
+ def test_3d_shaped_dtype(self):
+ c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
+ dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
+ ('block', int, (2, 2, 3))])
+ x = np.loadtxt(c, dtype=dt)
+ a = np.array([('aaaa', 1.0, 8.0,
+ [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
+ dtype=dt)
+ assert_array_equal(x, a)
+
+ def test_str_dtype(self):
+ # see gh-8033
+ c = ["str1", "str2"]
+
+ for dt in (str, np.bytes_):
+ a = np.array(["str1", "str2"], dtype=dt)
+ x = np.loadtxt(c, dtype=dt)
+ assert_array_equal(x, a)
+
+ def test_empty_file(self):
+ with pytest.warns(UserWarning, match="input contained no data"):
+ c = TextIO()
+ x = np.loadtxt(c)
+ assert_equal(x.shape, (0,))
+ x = np.loadtxt(c, dtype=np.int64)
+ assert_equal(x.shape, (0,))
+ assert_(x.dtype == np.int64)
+
+ def test_unused_converter(self):
+ c = TextIO()
+ c.writelines(['1 21\n', '3 42\n'])
+ c.seek(0)
+ data = np.loadtxt(c, usecols=(1,),
+ converters={0: lambda s: int(s, 16)})
+ assert_array_equal(data, [21, 42])
+
+ c.seek(0)
+ data = np.loadtxt(c, usecols=(1,),
+ converters={1: lambda s: int(s, 16)})
+ assert_array_equal(data, [33, 66])
+
+ def test_dtype_with_object(self):
+ # Test using an explicit dtype with an object
+ data = """ 1; 2001-01-01
+ 2; 2002-01-31 """
+ ndtype = [('idx', int), ('code', object)]
+ func = lambda s: strptime(s.strip(), "%Y-%m-%d")
+ converters = {1: func}
+ test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
+ converters=converters)
+ control = np.array(
+ [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
+ dtype=ndtype)
+ assert_equal(test, control)
+
+ def test_uint64_type(self):
+ tgt = (9223372043271415339, 9223372043271415853)
+ c = TextIO()
+ c.write("%s %s" % tgt)
+ c.seek(0)
+ res = np.loadtxt(c, dtype=np.uint64)
+ assert_equal(res, tgt)
+
+ def test_int64_type(self):
+ tgt = (-9223372036854775807, 9223372036854775807)
+ c = TextIO()
+ c.write("%s %s" % tgt)
+ c.seek(0)
+ res = np.loadtxt(c, dtype=np.int64)
+ assert_equal(res, tgt)
+
+ def test_from_float_hex(self):
+ # IEEE doubles and floats only, otherwise the float32
+ # conversion may fail.
+ tgt = np.logspace(-10, 10, 5).astype(np.float32)
+ tgt = np.hstack((tgt, -tgt)).astype(float)
+ inp = '\n'.join(map(float.hex, tgt))
+ c = TextIO()
+ c.write(inp)
+ for dt in [float, np.float32]:
+ c.seek(0)
+ res = np.loadtxt(
+ c, dtype=dt, converters=float.fromhex, encoding="latin1")
+ assert_equal(res, tgt, err_msg="%s" % dt)
+
+ @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+ def test_default_float_converter_no_default_hex_conversion(self):
+ """
+ Ensure that fromhex is only used for values with the correct prefix and
+ is not called by default. Regression test related to gh-19598.
+ """
+ c = TextIO("a b c")
+ with pytest.raises(ValueError,
+ match=".*convert string 'a' to float64 at row 0, column 1"):
+ np.loadtxt(c)
+
+ @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+ def test_default_float_converter_exception(self):
+ """
+ Ensure that the exception message raised during failed floating point
+ conversion is correct. Regression test related to gh-19598.
+ """
+ c = TextIO("qrs tuv") # Invalid values for default float converter
+ with pytest.raises(ValueError,
+ match="could not convert string 'qrs' to float64"):
+ np.loadtxt(c)
+
+ def test_from_complex(self):
+ tgt = (complex(1, 1), complex(1, -1))
+ c = TextIO()
+ c.write("%s %s" % tgt)
+ c.seek(0)
+ res = np.loadtxt(c, dtype=complex)
+ assert_equal(res, tgt)
+
+ def test_complex_misformatted(self):
+ # test for backward compatibility
+ # some complex formats used to generate x+-yj
+ a = np.zeros((2, 2), dtype=np.complex128)
+ re = np.pi
+ im = np.e
+ a[:] = re - 1.0j * im
+ c = BytesIO()
+ np.savetxt(c, a, fmt='%.16e')
+ c.seek(0)
+ txt = c.read()
+ c.seek(0)
+ # misformat the sign on the imaginary part, gh 7895
+ txt_bad = txt.replace(b'e+00-', b'e00+-')
+ assert_(txt_bad != txt)
+ c.write(txt_bad)
+ c.seek(0)
+ res = np.loadtxt(c, dtype=complex)
+ assert_equal(res, a)
+
+ def test_universal_newline(self):
+ with temppath() as name:
+ with open(name, 'w') as f:
+ f.write('1 21\r3 42\r')
+ data = np.loadtxt(name)
+ assert_array_equal(data, [[1, 21], [3, 42]])
+
+ def test_empty_field_after_tab(self):
+ c = TextIO()
+ c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
+ c.seek(0)
+ dt = {'names': ('x', 'y', 'z', 'comment'),
+ 'formats': ('<i4', '<i4', '<f4', '|S8')}
+ x = np.loadtxt(c, dtype=dt, delimiter='\t')
+ a = np.array([b'start ', b' ', b''])
+ assert_array_equal(x['comment'], a)
+
+ def test_unpack_structured(self):
+ txt = TextIO("M 21 72\nF 35 58")
+ dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
+ a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
+ assert_(a.dtype.str == '|S1')
+ assert_(b.dtype.str == '<i4')
+ assert_(c.dtype.str == '<f4')
+ assert_array_equal(a, np.array([b'M', b'F']))
+ assert_array_equal(b, np.array([21, 35]))
+ assert_array_equal(c, np.array([72., 58.]))
+
+ def test_ndmin_keyword(self):
+ c = TextIO()
+ c.write('1,2,3\n4,5,6')
+ c.seek(0)
+ assert_raises(ValueError, np.loadtxt, c, ndmin=3)
+ c.seek(0)
+ assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
+ a = np.array([[1, 2, 3], [4, 5, 6]])
+ assert_array_equal(x, a)
+
+ d = TextIO()
+ d.write('0,1,2')
+ d.seek(0)
+ x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
+ assert_(x.shape == (1, 3))
+ d.seek(0)
+ x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
+ assert_(x.shape == (3,))
+ d.seek(0)
+ x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
+ assert_(x.shape == (3,))
+
+ e = TextIO()
+ e.write('0\n1\n2')
+ e.seek(0)
+ x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
+ assert_(x.shape == (3, 1))
+ e.seek(0)
+ x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
+ assert_(x.shape == (3,))
+ e.seek(0)
+ x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
+ assert_(x.shape == (3,))
+
+ # Test ndmin kw with empty file.
+ with pytest.warns(UserWarning, match="input contained no data"):
+ f = TextIO()
+ assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
+ assert_(np.loadtxt(f, ndmin=1).shape == (0,))
+
+ def test_generator_source(self):
+ def count():
+ for i in range(10):
+ yield "%d" % i
+
+ res = np.loadtxt(count())
+ assert_array_equal(res, np.arange(10))
+
+ def test_bad_line(self):
+ c = TextIO()
+ c.write('1 2 3\n4 5 6\n2 3')
+ c.seek(0)
+
+ # Check for exception and that exception contains line number
+ assert_raises_regex(ValueError, "3", np.loadtxt, c)
+
+ def test_none_as_string(self):
+ # gh-5155, None should work as string when format demands it
+ c = TextIO()
+ c.write('100,foo,200\n300,None,400')
+ c.seek(0)
+ dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)])
+ np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
+
+ @pytest.mark.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968',
+ reason="Wrong preferred encoding")
+ def test_binary_load(self):
+ butf8 = b"5,6,7,\xc3\x95scarscar\r\n15,2,3,hello\r\n"\
+ b"20,2,3,\xc3\x95scar\r\n"
+ sutf8 = butf8.decode("UTF-8").replace("\r", "").splitlines()
+ with temppath() as path:
+ with open(path, "wb") as f:
+ f.write(butf8)
+ with open(path, "rb") as f:
+ x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode_)
+ assert_array_equal(x, sutf8)
+ # test broken latin1 conversion people now rely on
+ with open(path, "rb") as f:
+ x = np.loadtxt(f, encoding="UTF-8", dtype="S")
+ x = [b'5,6,7,\xc3\x95scarscar', b'15,2,3,hello', b'20,2,3,\xc3\x95scar']
+ assert_array_equal(x, np.array(x, dtype="S"))
+
+ def test_max_rows(self):
+ c = TextIO()
+ c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ max_rows=1)
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ def test_max_rows_with_skiprows(self):
+ c = TextIO()
+ c.write('comments\n1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ skiprows=1, max_rows=1)
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ c = TextIO()
+ c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ skiprows=1, max_rows=2)
+ a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
+ assert_array_equal(x, a)
+
+ def test_max_rows_with_read_continuation(self):
+ c = TextIO()
+ c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ max_rows=2)
+ a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
+ assert_array_equal(x, a)
+ # test continuation
+ x = np.loadtxt(c, dtype=int, delimiter=',')
+ a = np.array([2,1,4,5], int)
+ assert_array_equal(x, a)
+
+ def test_max_rows_larger(self):
+ #test max_rows > num rows
+ c = TextIO()
+ c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ skiprows=1, max_rows=6)
+ a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int)
+ assert_array_equal(x, a)
+
+ @pytest.mark.parametrize(["skip", "data"], [
+ (1, ["ignored\n", "1,2\n", "\n", "3,4\n"]),
+ # "Bad" lines that do not end in newlines:
+ (1, ["ignored", "1,2", "", "3,4"]),
+ (1, StringIO("ignored\n1,2\n\n3,4")),
+ # Same as above, but do not skip any lines:
+ (0, ["-1,0\n", "1,2\n", "\n", "3,4\n"]),
+ (0, ["-1,0", "1,2", "", "3,4"]),
+ (0, StringIO("-1,0\n1,2\n\n3,4"))])
+ def test_max_rows_empty_lines(self, skip, data):
+ with pytest.warns(UserWarning,
+ match=f"Input line 3.*max_rows={3-skip}"):
+ res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",",
+ max_rows=3-skip)
+ assert_array_equal(res, [[-1, 0], [1, 2], [3, 4]][skip:])
+
+ if isinstance(data, StringIO):
+ data.seek(0)
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", UserWarning)
+ with pytest.raises(UserWarning):
+ np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",",
+ max_rows=3-skip)
+
+class Testfromregex:
+ def test_record(self):
+ c = TextIO()
+ c.write('1.312 foo\n1.534 bar\n4.444 qux')
+ c.seek(0)
+
+ dt = [('num', np.float64), ('val', 'S3')]
+ x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
+ a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
+ dtype=dt)
+ assert_array_equal(x, a)
+
+ def test_record_2(self):
+ c = TextIO()
+ c.write('1312 foo\n1534 bar\n4444 qux')
+ c.seek(0)
+
+ dt = [('num', np.int32), ('val', 'S3')]
+ x = np.fromregex(c, r"(\d+)\s+(...)", dt)
+ a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
+ dtype=dt)
+ assert_array_equal(x, a)
+
+ def test_record_3(self):
+ c = TextIO()
+ c.write('1312 foo\n1534 bar\n4444 qux')
+ c.seek(0)
+
+ dt = [('num', np.float64)]
+ x = np.fromregex(c, r"(\d+)\s+...", dt)
+ a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
+ assert_array_equal(x, a)
+
+ @pytest.mark.parametrize("path_type", [str, Path])
+ def test_record_unicode(self, path_type):
+ utf8 = b'\xcf\x96'
+ with temppath() as str_path:
+ path = path_type(str_path)
+ with open(path, 'wb') as f:
+ f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux')
+
+ dt = [('num', np.float64), ('val', 'U4')]
+ x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8')
+ a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'),
+ (4.444, 'qux')], dtype=dt)
+ assert_array_equal(x, a)
+
+ regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE)
+ x = np.fromregex(path, regexp, dt, encoding='UTF-8')
+ assert_array_equal(x, a)
+
+ def test_compiled_bytes(self):
+ regexp = re.compile(b'(\\d)')
+ c = BytesIO(b'123')
+ dt = [('num', np.float64)]
+ a = np.array([1, 2, 3], dtype=dt)
+ x = np.fromregex(c, regexp, dt)
+ assert_array_equal(x, a)
+
+ def test_bad_dtype_not_structured(self):
+ regexp = re.compile(b'(\\d)')
+ c = BytesIO(b'123')
+ with pytest.raises(TypeError, match='structured datatype'):
+ np.fromregex(c, regexp, dtype=np.float64)
+
+
+#####--------------------------------------------------------------------------
+
+
+class TestFromTxt(LoadTxtBase):
+ loadfunc = staticmethod(np.genfromtxt)
+
+ def test_record(self):
+ # Test w/ explicit dtype
+ data = TextIO('1 2\n3 4')
+ test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
+ control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+ assert_equal(test, control)
+ #
+ data = TextIO('M 64.0 75.0\nF 25.0 60.0')
+ descriptor = {'names': ('gender', 'age', 'weight'),
+ 'formats': ('S1', 'i4', 'f4')}
+ control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
+ dtype=descriptor)
+ test = np.genfromtxt(data, dtype=descriptor)
+ assert_equal(test, control)
+
+ def test_array(self):
+ # Test outputting a standard ndarray
+ data = TextIO('1 2\n3 4')
+ control = np.array([[1, 2], [3, 4]], dtype=int)
+ test = np.genfromtxt(data, dtype=int)
+ assert_array_equal(test, control)
+ #
+ data.seek(0)
+ control = np.array([[1, 2], [3, 4]], dtype=float)
+ test = np.loadtxt(data, dtype=float)
+ assert_array_equal(test, control)
+
+ def test_1D(self):
+ # Test squeezing to 1D
+ control = np.array([1, 2, 3, 4], int)
+ #
+ data = TextIO('1\n2\n3\n4\n')
+ test = np.genfromtxt(data, dtype=int)
+ assert_array_equal(test, control)
+ #
+ data = TextIO('1,2,3,4\n')
+ test = np.genfromtxt(data, dtype=int, delimiter=',')
+ assert_array_equal(test, control)
+
+ def test_comments(self):
+ # Test the stripping of comments
+ control = np.array([1, 2, 3, 5], int)
+ # Comment on its own line
+ data = TextIO('# comment\n1,2,3,5\n')
+ test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
+ assert_equal(test, control)
+ # Comment at the end of a line
+ data = TextIO('1,2,3,5# comment\n')
+ test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
+ assert_equal(test, control)
+
+ def test_skiprows(self):
+ # Test row skipping
+ control = np.array([1, 2, 3, 5], int)
+ kwargs = dict(dtype=int, delimiter=',')
+ #
+ data = TextIO('comment\n1,2,3,5\n')
+ test = np.genfromtxt(data, skip_header=1, **kwargs)
+ assert_equal(test, control)
+ #
+ data = TextIO('# comment\n1,2,3,5\n')
+ test = np.loadtxt(data, skiprows=1, **kwargs)
+ assert_equal(test, control)
+
+ def test_skip_footer(self):
+ data = ["# %i" % i for i in range(1, 6)]
+ data.append("A, B, C")
+ data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
+ data[-1] = "99,99"
+ kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
+ test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
+ ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
+ dtype=[(_, float) for _ in "ABC"])
+ assert_equal(test, ctrl)
+
+ def test_skip_footer_with_invalid(self):
+ with suppress_warnings() as sup:
+ sup.filter(ConversionWarning)
+ basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
+ # Footer too small to get rid of all invalid values
+ assert_raises(ValueError, np.genfromtxt,
+ TextIO(basestr), skip_footer=1)
+ # except ValueError:
+ # pass
+ a = np.genfromtxt(
+ TextIO(basestr), skip_footer=1, invalid_raise=False)
+ assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
+ #
+ a = np.genfromtxt(TextIO(basestr), skip_footer=3)
+ assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
+ #
+ basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
+ a = np.genfromtxt(
+ TextIO(basestr), skip_footer=1, invalid_raise=False)
+ assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
+ a = np.genfromtxt(
+ TextIO(basestr), skip_footer=3, invalid_raise=False)
+ assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
+
+ def test_header(self):
+ # Test retrieving a header
+ data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(data, dtype=None, names=True)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ control = {'gender': np.array([b'M', b'F']),
+ 'age': np.array([64.0, 25.0]),
+ 'weight': np.array([75.0, 60.0])}
+ assert_equal(test['gender'], control['gender'])
+ assert_equal(test['age'], control['age'])
+ assert_equal(test['weight'], control['weight'])
+
+ def test_auto_dtype(self):
+ # Test the automatic definition of the output dtype
+ data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(data, dtype=None)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ control = [np.array([b'A', b'BCD']),
+ np.array([64, 25]),
+ np.array([75.0, 60.0]),
+ np.array([3 + 4j, 5 + 6j]),
+ np.array([True, False]), ]
+ assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
+ for (i, ctrl) in enumerate(control):
+ assert_equal(test['f%i' % i], ctrl)
+
+ def test_auto_dtype_uniform(self):
+ # Tests whether the output dtype can be uniformized
+ data = TextIO('1 2 3 4\n5 6 7 8\n')
+ test = np.genfromtxt(data, dtype=None)
+ control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
+ assert_equal(test, control)
+
+ def test_fancy_dtype(self):
+ # Check that a nested dtype isn't MIA
+ data = TextIO('1,2,3.0\n4,5,6.0\n')
+ fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
+ test = np.genfromtxt(data, dtype=fancydtype, delimiter=',')
+ control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
+ assert_equal(test, control)
+
+ def test_names_overwrite(self):
+ # Test overwriting the names of the dtype
+ descriptor = {'names': ('g', 'a', 'w'),
+ 'formats': ('S1', 'i4', 'f4')}
+ data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
+ names = ('gender', 'age', 'weight')
+ test = np.genfromtxt(data, dtype=descriptor, names=names)
+ descriptor['names'] = names
+ control = np.array([('M', 64.0, 75.0),
+ ('F', 25.0, 60.0)], dtype=descriptor)
+ assert_equal(test, control)
+
+ def test_bad_fname(self):
+ with pytest.raises(TypeError, match='fname must be a string,'):
+ np.genfromtxt(123)
+
+ def test_commented_header(self):
+ # Check that names can be retrieved even if the line is commented out.
+ data = TextIO("""
+#gender age weight
+M 21 72.100000
+F 35 58.330000
+M 33 21.99
+ """)
+ # The # is part of the first name and should be deleted automatically.
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(data, names=True, dtype=None)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
+ dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
+ assert_equal(test, ctrl)
+ # Ditto, but we should get rid of the first element
+ data = TextIO(b"""
+# gender age weight
+M 21 72.100000
+F 35 58.330000
+M 33 21.99
+ """)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(data, names=True, dtype=None)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ assert_equal(test, ctrl)
+
+ def test_names_and_comments_none(self):
+ # Tests case when names is true but comments is None (gh-10780)
+ data = TextIO('col1 col2\n 1 2\n 3 4')
+ test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
+ control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
+ assert_equal(test, control)
+
+ def test_file_is_closed_on_error(self):
+ # gh-13200
+ with tempdir() as tmpdir:
+ fpath = os.path.join(tmpdir, "test.csv")
+ with open(fpath, "wb") as f:
+ f.write('\N{GREEK PI SYMBOL}'.encode())
+
+ # ResourceWarnings are emitted from a destructor, so won't be
+ # detected by regular propagation to errors.
+ with assert_no_warnings():
+ with pytest.raises(UnicodeDecodeError):
+ np.genfromtxt(fpath, encoding="ascii")
+
+ def test_autonames_and_usecols(self):
+ # Tests names and usecols
+ data = TextIO('A B C D\n aaaa 121 45 9.1')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(data, usecols=('A', 'C', 'D'),
+ names=True, dtype=None)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ control = np.array(('aaaa', 45, 9.1),
+ dtype=[('A', '|S4'), ('C', int), ('D', float)])
+ assert_equal(test, control)
+
+ def test_converters_with_usecols(self):
+ # Test the combination user-defined converters and usecol
+ data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
+ test = np.genfromtxt(data, dtype=int, delimiter=',',
+ converters={3: lambda s: int(s or - 999)},
+ usecols=(1, 3,))
+ control = np.array([[2, -999], [7, 9]], int)
+ assert_equal(test, control)
+
+ def test_converters_with_usecols_and_names(self):
+ # Tests names and usecols
+ data = TextIO('A B C D\n aaaa 121 45 9.1')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True,
+ dtype=None,
+ converters={'C': lambda s: 2 * int(s)})
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ control = np.array(('aaaa', 90, 9.1),
+ dtype=[('A', '|S4'), ('C', int), ('D', float)])
+ assert_equal(test, control)
+
+ def test_converters_cornercases(self):
+ # Test the conversion to datetime.
+ converter = {
+ 'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
+ data = TextIO('2009-02-03 12:00:00Z, 72214.0')
+ test = np.genfromtxt(data, delimiter=',', dtype=None,
+ names=['date', 'stid'], converters=converter)
+ control = np.array((datetime(2009, 2, 3), 72214.),
+ dtype=[('date', np.object_), ('stid', float)])
+ assert_equal(test, control)
+
+ def test_converters_cornercases2(self):
+ # Test the conversion to datetime64.
+ converter = {
+ 'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
+ data = TextIO('2009-02-03 12:00:00Z, 72214.0')
+ test = np.genfromtxt(data, delimiter=',', dtype=None,
+ names=['date', 'stid'], converters=converter)
+ control = np.array((datetime(2009, 2, 3), 72214.),
+ dtype=[('date', 'datetime64[us]'), ('stid', float)])
+ assert_equal(test, control)
+
+ def test_unused_converter(self):
+ # Test whether unused converters are forgotten
+ data = TextIO("1 21\n 3 42\n")
+ test = np.genfromtxt(data, usecols=(1,),
+ converters={0: lambda s: int(s, 16)})
+ assert_equal(test, [21, 42])
+ #
+ data.seek(0)
+ test = np.genfromtxt(data, usecols=(1,),
+ converters={1: lambda s: int(s, 16)})
+ assert_equal(test, [33, 66])
+
+ def test_invalid_converter(self):
+ strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
+ (b'r' not in x.lower() and x.strip() or 0.0))
+ strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
+ (b'%' not in x.lower() and x.strip() or 0.0))
+ s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
+ "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
+ "D02N03,10/10/2004,R 1,,7,145.55")
+ kwargs = dict(
+ converters={2: strip_per, 3: strip_rand}, delimiter=",",
+ dtype=None)
+ assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
+
+ def test_tricky_converter_bug1666(self):
+ # Test some corner cases
+ s = TextIO('q1,2\nq3,4')
+ cnv = lambda s: float(s[1:])
+ test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
+ control = np.array([[1., 2.], [3., 4.]])
+ assert_equal(test, control)
+
+ def test_dtype_with_converters(self):
+ dstr = "2009; 23; 46"
+ test = np.genfromtxt(TextIO(dstr,),
+ delimiter=";", dtype=float, converters={0: bytes})
+ control = np.array([('2009', 23., 46)],
+ dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
+ assert_equal(test, control)
+ test = np.genfromtxt(TextIO(dstr,),
+ delimiter=";", dtype=float, converters={0: float})
+ control = np.array([2009., 23., 46],)
+ assert_equal(test, control)
+
+ def test_dtype_with_converters_and_usecols(self):
+ dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
+ dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3}
+ dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')]
+ conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
+ test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
+ names=None, converters=conv)
+ control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp)
+ assert_equal(test, control)
+ dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
+ test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
+ usecols=(0,1,3), names=None, converters=conv)
+ control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp)
+ assert_equal(test, control)
+
+ def test_dtype_with_object(self):
+ # Test using an explicit dtype with an object
+ data = """ 1; 2001-01-01
+ 2; 2002-01-31 """
+ ndtype = [('idx', int), ('code', object)]
+ func = lambda s: strptime(s.strip(), "%Y-%m-%d")
+ converters = {1: func}
+ test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
+ converters=converters)
+ control = np.array(
+ [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
+ dtype=ndtype)
+ assert_equal(test, control)
+
+ ndtype = [('nest', [('idx', int), ('code', object)])]
+ with assert_raises_regex(NotImplementedError,
+ 'Nested fields.* not supported.*'):
+ test = np.genfromtxt(TextIO(data), delimiter=";",
+ dtype=ndtype, converters=converters)
+
+ # nested but empty fields also aren't supported
+ ndtype = [('idx', int), ('code', object), ('nest', [])]
+ with assert_raises_regex(NotImplementedError,
+ 'Nested fields.* not supported.*'):
+ test = np.genfromtxt(TextIO(data), delimiter=";",
+ dtype=ndtype, converters=converters)
+
+ def test_dtype_with_object_no_converter(self):
+ # Object without a converter uses bytes:
+ parsed = np.genfromtxt(TextIO("1"), dtype=object)
+ assert parsed[()] == b"1"
+ parsed = np.genfromtxt(TextIO("string"), dtype=object)
+ assert parsed[()] == b"string"
+
+ def test_userconverters_with_explicit_dtype(self):
+ # Test user_converters w/ explicit (standard) dtype
+ data = TextIO('skip,skip,2001-01-01,1.0,skip')
+ test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
+ usecols=(2, 3), converters={2: bytes})
+ control = np.array([('2001-01-01', 1.)],
+ dtype=[('', '|S10'), ('', float)])
+ assert_equal(test, control)
+
+ def test_utf8_userconverters_with_explicit_dtype(self):
+ utf8 = b'\xcf\x96'
+ with temppath() as path:
+ with open(path, 'wb') as f:
+ f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
+ test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
+ usecols=(2, 3), converters={2: np.compat.unicode},
+ encoding='UTF-8')
+ control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
+ dtype=[('', '|U11'), ('', float)])
+ assert_equal(test, control)
+
+ def test_spacedelimiter(self):
+ # Test space delimiter
+ data = TextIO("1 2 3 4 5\n6 7 8 9 10")
+ test = np.genfromtxt(data)
+ control = np.array([[1., 2., 3., 4., 5.],
+ [6., 7., 8., 9., 10.]])
+ assert_equal(test, control)
+
+ def test_integer_delimiter(self):
+ # Test using an integer for delimiter
+ data = " 1 2 3\n 4 5 67\n890123 4"
+ test = np.genfromtxt(TextIO(data), delimiter=3)
+ control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
+ assert_equal(test, control)
+
+ def test_missing(self):
+ data = TextIO('1,2,3,,5\n')
+ test = np.genfromtxt(data, dtype=int, delimiter=',',
+ converters={3: lambda s: int(s or - 999)})
+ control = np.array([1, 2, 3, -999, 5], int)
+ assert_equal(test, control)
+
+ def test_missing_with_tabs(self):
+ # Test w/ a delimiter tab
+ txt = "1\t2\t3\n\t2\t\n1\t\t3"
+ test = np.genfromtxt(TextIO(txt), delimiter="\t",
+ usemask=True,)
+ ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
+ ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
+ assert_equal(test.data, ctrl_d)
+ assert_equal(test.mask, ctrl_m)
+
+ def test_usecols(self):
+ # Test the selection of columns
+ # Select 1 column
+ control = np.array([[1, 2], [3, 4]], float)
+ data = TextIO()
+ np.savetxt(data, control)
+ data.seek(0)
+ test = np.genfromtxt(data, dtype=float, usecols=(1,))
+ assert_equal(test, control[:, 1])
+ #
+ control = np.array([[1, 2, 3], [3, 4, 5]], float)
+ data = TextIO()
+ np.savetxt(data, control)
+ data.seek(0)
+ test = np.genfromtxt(data, dtype=float, usecols=(1, 2))
+ assert_equal(test, control[:, 1:])
+ # Testing with arrays instead of tuples.
+ data.seek(0)
+ test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2]))
+ assert_equal(test, control[:, 1:])
+
+ def test_usecols_as_css(self):
+ # Test giving usecols with a comma-separated string
+ data = "1 2 3\n4 5 6"
+ test = np.genfromtxt(TextIO(data),
+ names="a, b, c", usecols="a, c")
+ ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
+ assert_equal(test, ctrl)
+
+ def test_usecols_with_structured_dtype(self):
+ # Test usecols with an explicit structured dtype
+ data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
+ names = ['stid', 'temp']
+ dtypes = ['S4', 'f8']
+ test = np.genfromtxt(
+ data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
+ assert_equal(test['stid'], [b"JOE", b"BOB"])
+ assert_equal(test['temp'], [25.3, 27.9])
+
+ def test_usecols_with_integer(self):
+ # Test usecols with an integer
+ test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
+ assert_equal(test, np.array([1., 4.]))
+
+ def test_usecols_with_named_columns(self):
+ # Test usecols with named columns
+ ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
+ data = "1 2 3\n4 5 6"
+ kwargs = dict(names="a, b, c")
+ test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
+ assert_equal(test, ctrl)
+ test = np.genfromtxt(TextIO(data),
+ usecols=('a', 'c'), **kwargs)
+ assert_equal(test, ctrl)
+
+ def test_empty_file(self):
+ # Test that an empty file raises the proper warning.
+ with suppress_warnings() as sup:
+ sup.filter(message="genfromtxt: Empty input file:")
+ data = TextIO()
+ test = np.genfromtxt(data)
+ assert_equal(test, np.array([]))
+
+ # when skip_header > 0
+ test = np.genfromtxt(data, skip_header=1)
+ assert_equal(test, np.array([]))
+
+ def test_fancy_dtype_alt(self):
+ # Check that a nested dtype isn't MIA
+ data = TextIO('1,2,3.0\n4,5,6.0\n')
+ fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
+ test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True)
+ control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
+ assert_equal(test, control)
+
+ def test_shaped_dtype(self):
+ c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
+ dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
+ ('block', int, (2, 3))])
+ x = np.genfromtxt(c, dtype=dt)
+ a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
+ dtype=dt)
+ assert_array_equal(x, a)
+
+ def test_withmissing(self):
+ data = TextIO('A,B\n0,1\n2,N/A')
+ kwargs = dict(delimiter=",", missing_values="N/A", names=True)
+ test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs)
+ control = ma.array([(0, 1), (2, -1)],
+ mask=[(False, False), (False, True)],
+ dtype=[('A', int), ('B', int)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+ #
+ data.seek(0)
+ test = np.genfromtxt(data, usemask=True, **kwargs)
+ control = ma.array([(0, 1), (2, -1)],
+ mask=[(False, False), (False, True)],
+ dtype=[('A', float), ('B', float)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ def test_user_missing_values(self):
+ data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
+ basekwargs = dict(dtype=None, delimiter=",", names=True,)
+ mdtype = [('A', int), ('B', float), ('C', complex)]
+ #
+ test = np.genfromtxt(TextIO(data), missing_values="N/A",
+ **basekwargs)
+ control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
+ (-9, 2.2, -999j), (3, -99, 3j)],
+ mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
+ dtype=mdtype)
+ assert_equal(test, control)
+ #
+ basekwargs['dtype'] = mdtype
+ test = np.genfromtxt(TextIO(data),
+ missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs)
+ control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
+ (-9, 2.2, -999j), (3, -99, 3j)],
+ mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
+ dtype=mdtype)
+ assert_equal(test, control)
+ #
+ test = np.genfromtxt(TextIO(data),
+ missing_values={0: -9, 'B': -99, 'C': -999j},
+ usemask=True,
+ **basekwargs)
+ control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
+ (-9, 2.2, -999j), (3, -99, 3j)],
+ mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
+ dtype=mdtype)
+ assert_equal(test, control)
+
+ def test_user_filling_values(self):
+ # Test with missing and filling values
+ ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
+ data = "N/A, 2, 3\n4, ,???"
+ kwargs = dict(delimiter=",",
+ dtype=int,
+ names="a,b,c",
+ missing_values={0: "N/A", 'b': " ", 2: "???"},
+ filling_values={0: 0, 'b': 0, 2: -999})
+ test = np.genfromtxt(TextIO(data), **kwargs)
+ ctrl = np.array([(0, 2, 3), (4, 0, -999)],
+ dtype=[(_, int) for _ in "abc"])
+ assert_equal(test, ctrl)
+ #
+ test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
+ ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
+ assert_equal(test, ctrl)
+
+ data2 = "1,2,*,4\n5,*,7,8\n"
+ test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
+ missing_values="*", filling_values=0)
+ ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]])
+ assert_equal(test, ctrl)
+ test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
+ missing_values="*", filling_values=-1)
+ ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]])
+ assert_equal(test, ctrl)
+
+ def test_withmissing_float(self):
+ data = TextIO('A,B\n0,1.5\n2,-999.00')
+ test = np.genfromtxt(data, dtype=None, delimiter=',',
+ missing_values='-999.0', names=True, usemask=True)
+ control = ma.array([(0, 1.5), (2, -1.)],
+ mask=[(False, False), (False, True)],
+ dtype=[('A', int), ('B', float)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ def test_with_masked_column_uniform(self):
+ # Test masked column
+ data = TextIO('1 2 3\n4 5 6\n')
+ test = np.genfromtxt(data, dtype=None,
+ missing_values='2,5', usemask=True)
+ control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
+ assert_equal(test, control)
+
+ def test_with_masked_column_various(self):
+ # Test masked column
+ data = TextIO('True 2 3\nFalse 5 6\n')
+ test = np.genfromtxt(data, dtype=None,
+ missing_values='2,5', usemask=True)
+ control = ma.array([(1, 2, 3), (0, 5, 6)],
+ mask=[(0, 1, 0), (0, 1, 0)],
+ dtype=[('f0', bool), ('f1', bool), ('f2', int)])
+ assert_equal(test, control)
+
+ def test_invalid_raise(self):
+ # Test invalid raise
+ data = ["1, 1, 1, 1, 1"] * 50
+ for i in range(5):
+ data[10 * i] = "2, 2, 2, 2 2"
+ data.insert(0, "a, b, c, d, e")
+ mdata = TextIO("\n".join(data))
+
+ kwargs = dict(delimiter=",", dtype=None, names=True)
+ def f():
+ return np.genfromtxt(mdata, invalid_raise=False, **kwargs)
+ mtest = assert_warns(ConversionWarning, f)
+ assert_equal(len(mtest), 45)
+ assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
+ #
+ mdata.seek(0)
+ assert_raises(ValueError, np.genfromtxt, mdata,
+ delimiter=",", names=True)
+
+ def test_invalid_raise_with_usecols(self):
+ # Test invalid_raise with usecols
+ data = ["1, 1, 1, 1, 1"] * 50
+ for i in range(5):
+ data[10 * i] = "2, 2, 2, 2 2"
+ data.insert(0, "a, b, c, d, e")
+ mdata = TextIO("\n".join(data))
+
+ kwargs = dict(delimiter=",", dtype=None, names=True,
+ invalid_raise=False)
+ def f():
+ return np.genfromtxt(mdata, usecols=(0, 4), **kwargs)
+ mtest = assert_warns(ConversionWarning, f)
+ assert_equal(len(mtest), 45)
+ assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
+ #
+ mdata.seek(0)
+ mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs)
+ assert_equal(len(mtest), 50)
+ control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
+ control[[10 * _ for _ in range(5)]] = (2, 2)
+ assert_equal(mtest, control)
+
+ def test_inconsistent_dtype(self):
+ # Test inconsistent dtype
+ data = ["1, 1, 1, 1, -1.1"] * 50
+ mdata = TextIO("\n".join(data))
+
+ converters = {4: lambda x: "(%s)" % x.decode()}
+ kwargs = dict(delimiter=",", converters=converters,
+ dtype=[(_, int) for _ in 'abcde'],)
+ assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
+
+ def test_default_field_format(self):
+ # Test default format
+ data = "0, 1, 2.3\n4, 5, 6.7"
+ mtest = np.genfromtxt(TextIO(data),
+ delimiter=",", dtype=None, defaultfmt="f%02i")
+ ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
+ dtype=[("f00", int), ("f01", int), ("f02", float)])
+ assert_equal(mtest, ctrl)
+
+ def test_single_dtype_wo_names(self):
+ # Test single dtype w/o names
+ data = "0, 1, 2.3\n4, 5, 6.7"
+ mtest = np.genfromtxt(TextIO(data),
+ delimiter=",", dtype=float, defaultfmt="f%02i")
+ ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
+ assert_equal(mtest, ctrl)
+
+ def test_single_dtype_w_explicit_names(self):
+ # Test single dtype w explicit names
+ data = "0, 1, 2.3\n4, 5, 6.7"
+ mtest = np.genfromtxt(TextIO(data),
+ delimiter=",", dtype=float, names="a, b, c")
+ ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
+ dtype=[(_, float) for _ in "abc"])
+ assert_equal(mtest, ctrl)
+
+ def test_single_dtype_w_implicit_names(self):
+ # Test single dtype w implicit names
+ data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
+ mtest = np.genfromtxt(TextIO(data),
+ delimiter=",", dtype=float, names=True)
+ ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
+ dtype=[(_, float) for _ in "abc"])
+ assert_equal(mtest, ctrl)
+
+ def test_easy_structured_dtype(self):
+ # Test easy structured dtype
+ data = "0, 1, 2.3\n4, 5, 6.7"
+ mtest = np.genfromtxt(TextIO(data), delimiter=",",
+ dtype=(int, float, float), defaultfmt="f_%02i")
+ ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
+ dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
+ assert_equal(mtest, ctrl)
+
+ def test_autostrip(self):
+ # Test autostrip
+ data = "01/01/2003 , 1.3, abcde"
+ kwargs = dict(delimiter=",", dtype=None)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ mtest = np.genfromtxt(TextIO(data), **kwargs)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
+ dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
+ assert_equal(mtest, ctrl)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs)
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
+ dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
+ assert_equal(mtest, ctrl)
+
+ def test_replace_space(self):
+ # Test the 'replace_space' option
+ txt = "A.A, B (B), C:C\n1, 2, 3.14"
+ # Test default: replace ' ' by '_' and delete non-alphanum chars
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=None)
+ ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
+ ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+ # Test: no replace, no delete
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=None,
+ replace_space='', deletechars='')
+ ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
+ ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+ # Test: no delete (spaces are replaced by _)
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=None,
+ deletechars='')
+ ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
+ ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+
+ def test_replace_space_known_dtype(self):
+ # Test the 'replace_space' (and related) options when dtype != None
+ txt = "A.A, B (B), C:C\n1, 2, 3"
+ # Test default: replace ' ' by '_' and delete non-alphanum chars
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=int)
+ ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)]
+ ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+ # Test: no replace, no delete
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=int,
+ replace_space='', deletechars='')
+ ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)]
+ ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+ # Test: no delete (spaces are replaced by _)
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=int,
+ deletechars='')
+ ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)]
+ ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+
+ def test_incomplete_names(self):
+ # Test w/ incomplete names
+ data = "A,,C\n0,1,2\n3,4,5"
+ kwargs = dict(delimiter=",", names=True)
+ # w/ dtype=None
+ ctrl = np.array([(0, 1, 2), (3, 4, 5)],
+ dtype=[(_, int) for _ in ('A', 'f0', 'C')])
+ test = np.genfromtxt(TextIO(data), dtype=None, **kwargs)
+ assert_equal(test, ctrl)
+ # w/ default dtype
+ ctrl = np.array([(0, 1, 2), (3, 4, 5)],
+ dtype=[(_, float) for _ in ('A', 'f0', 'C')])
+ test = np.genfromtxt(TextIO(data), **kwargs)
+
+ def test_names_auto_completion(self):
+ # Make sure that names are properly completed
+ data = "1 2 3\n 4 5 6"
+ test = np.genfromtxt(TextIO(data),
+ dtype=(int, float, int), names="a")
+ ctrl = np.array([(1, 2, 3), (4, 5, 6)],
+ dtype=[('a', int), ('f0', float), ('f1', int)])
+ assert_equal(test, ctrl)
+
+ def test_names_with_usecols_bug1636(self):
+ # Make sure we pick up the right names w/ usecols
+ data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
+ ctrl_names = ("A", "C", "E")
+ test = np.genfromtxt(TextIO(data),
+ dtype=(int, int, int), delimiter=",",
+ usecols=(0, 2, 4), names=True)
+ assert_equal(test.dtype.names, ctrl_names)
+ #
+ test = np.genfromtxt(TextIO(data),
+ dtype=(int, int, int), delimiter=",",
+ usecols=("A", "C", "E"), names=True)
+ assert_equal(test.dtype.names, ctrl_names)
+ #
+ test = np.genfromtxt(TextIO(data),
+ dtype=int, delimiter=",",
+ usecols=("A", "C", "E"), names=True)
+ assert_equal(test.dtype.names, ctrl_names)
+
+ def test_fixed_width_names(self):
+ # Test fix-width w/ names
+ data = " A B C\n 0 1 2.3\n 45 67 9."
+ kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
+ ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
+ dtype=[('A', int), ('B', int), ('C', float)])
+ test = np.genfromtxt(TextIO(data), **kwargs)
+ assert_equal(test, ctrl)
+ #
+ kwargs = dict(delimiter=5, names=True, dtype=None)
+ ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
+ dtype=[('A', int), ('B', int), ('C', float)])
+ test = np.genfromtxt(TextIO(data), **kwargs)
+ assert_equal(test, ctrl)
+
+ def test_filling_values(self):
+ # Test missing values
+ data = b"1, 2, 3\n1, , 5\n0, 6, \n"
+ kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
+ ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
+ test = np.genfromtxt(TextIO(data), **kwargs)
+ assert_equal(test, ctrl)
+
+ def test_comments_is_none(self):
+ # Github issue 329 (None was previously being converted to 'None').
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
+ dtype=None, comments=None, delimiter=',')
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ assert_equal(test[1], b'testNonetherestofthedata')
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
+ dtype=None, comments=None, delimiter=',')
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ assert_equal(test[1], b' testNonetherestofthedata')
+
+ def test_latin1(self):
+ latin1 = b'\xf6\xfc\xf6'
+ norm = b"norm1,norm2,norm3\n"
+ enc = b"test1,testNonethe" + latin1 + b",test3\n"
+ s = norm + enc + norm
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO(s),
+ dtype=None, comments=None, delimiter=',')
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ assert_equal(test[1, 0], b"test1")
+ assert_equal(test[1, 1], b"testNonethe" + latin1)
+ assert_equal(test[1, 2], b"test3")
+ test = np.genfromtxt(TextIO(s),
+ dtype=None, comments=None, delimiter=',',
+ encoding='latin1')
+ assert_equal(test[1, 0], "test1")
+ assert_equal(test[1, 1], "testNonethe" + latin1.decode('latin1'))
+ assert_equal(test[1, 2], "test3")
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1),
+ dtype=None, comments=None, delimiter=',')
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ assert_equal(test['f0'], 0)
+ assert_equal(test['f1'], b"testNonethe" + latin1)
+
+ def test_binary_decode_autodtype(self):
+ utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
+ v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16')
+ assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
+
+ def test_utf8_byte_encoding(self):
+ utf8 = b"\xcf\x96"
+ norm = b"norm1,norm2,norm3\n"
+ enc = b"test1,testNonethe" + utf8 + b",test3\n"
+ s = norm + enc + norm
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
+ test = np.genfromtxt(TextIO(s),
+ dtype=None, comments=None, delimiter=',')
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ ctl = np.array([
+ [b'norm1', b'norm2', b'norm3'],
+ [b'test1', b'testNonethe' + utf8, b'test3'],
+ [b'norm1', b'norm2', b'norm3']])
+ assert_array_equal(test, ctl)
+
+ def test_utf8_file(self):
+ utf8 = b"\xcf\x96"
+ with temppath() as path:
+ with open(path, "wb") as f:
+ f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
+ test = np.genfromtxt(path, dtype=None, comments=None,
+ delimiter=',', encoding="UTF-8")
+ ctl = np.array([
+ ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
+ ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
+ dtype=np.unicode_)
+ assert_array_equal(test, ctl)
+
+ # test a mixed dtype
+ with open(path, "wb") as f:
+ f.write(b"0,testNonethe" + utf8)
+ test = np.genfromtxt(path, dtype=None, comments=None,
+ delimiter=',', encoding="UTF-8")
+ assert_equal(test['f0'], 0)
+ assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8"))
+
+ def test_utf8_file_nodtype_unicode(self):
+ # bytes encoding with non-latin1 -> unicode upcast
+ utf8 = '\u03d6'
+ latin1 = '\xf6\xfc\xf6'
+
+ # skip test if cannot encode utf8 test string with preferred
+ # encoding. The preferred encoding is assumed to be the default
+ # encoding of io.open. Will need to change this for PyTest, maybe
+ # using pytest.mark.xfail(raises=***).
+ try:
+ encoding = locale.getpreferredencoding()
+ utf8.encode(encoding)
+ except (UnicodeError, ImportError):
+ pytest.skip('Skipping test_utf8_file_nodtype_unicode, '
+ 'unable to encode utf8 in preferred encoding')
+
+ with temppath() as path:
+ with io.open(path, "wt") as f:
+ f.write("norm1,norm2,norm3\n")
+ f.write("norm1," + latin1 + ",norm3\n")
+ f.write("test1,testNonethe" + utf8 + ",test3\n")
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '',
+ np.VisibleDeprecationWarning)
+ test = np.genfromtxt(path, dtype=None, comments=None,
+ delimiter=',')
+ # Check for warning when encoding not specified.
+ assert_(w[0].category is np.VisibleDeprecationWarning)
+ ctl = np.array([
+ ["norm1", "norm2", "norm3"],
+ ["norm1", latin1, "norm3"],
+ ["test1", "testNonethe" + utf8, "test3"]],
+ dtype=np.unicode_)
+ assert_array_equal(test, ctl)
+
+ def test_recfromtxt(self):
+ #
+ data = TextIO('A,B\n0,1\n2,3')
+ kwargs = dict(delimiter=",", missing_values="N/A", names=True)
+ test = np.recfromtxt(data, **kwargs)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
+ assert_equal(test, control)
+ #
+ data = TextIO('A,B\n0,1\n2,N/A')
+ test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
+ control = ma.array([(0, 1), (2, -1)],
+ mask=[(False, False), (False, True)],
+ dtype=[('A', int), ('B', int)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+ assert_equal(test.A, [0, 2])
+
+ def test_recfromcsv(self):
+ #
+ data = TextIO('A,B\n0,1\n2,3')
+ kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
+ test = np.recfromcsv(data, dtype=None, **kwargs)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
+ assert_equal(test, control)
+ #
+ data = TextIO('A,B\n0,1\n2,N/A')
+ test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
+ control = ma.array([(0, 1), (2, -1)],
+ mask=[(False, False), (False, True)],
+ dtype=[('A', int), ('B', int)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+ assert_equal(test.A, [0, 2])
+ #
+ data = TextIO('A,B\n0,1\n2,3')
+ test = np.recfromcsv(data, missing_values='N/A',)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=[('a', int), ('b', int)])
+ assert_(isinstance(test, np.recarray))
+ assert_equal(test, control)
+ #
+ data = TextIO('A,B\n0,1\n2,3')
+ dtype = [('a', int), ('b', float)]
+ test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=dtype)
+ assert_(isinstance(test, np.recarray))
+ assert_equal(test, control)
+
+ #gh-10394
+ data = TextIO('color\n"red"\n"blue"')
+ test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')})
+ control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))])
+ assert_equal(test.dtype, control.dtype)
+ assert_equal(test, control)
+
+ def test_max_rows(self):
+ # Test the `max_rows` keyword argument.
+ data = '1 2\n3 4\n5 6\n7 8\n9 10\n'
+ txt = TextIO(data)
+ a1 = np.genfromtxt(txt, max_rows=3)
+ a2 = np.genfromtxt(txt)
+ assert_equal(a1, [[1, 2], [3, 4], [5, 6]])
+ assert_equal(a2, [[7, 8], [9, 10]])
+
+ # max_rows must be at least 1.
+ assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0)
+
+ # An input with several invalid rows.
+ data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n'
+
+ test = np.genfromtxt(TextIO(data), max_rows=2)
+ control = np.array([[1., 1.], [2., 2.]])
+ assert_equal(test, control)
+
+ # Test keywords conflict
+ assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1,
+ max_rows=4)
+
+ # Test with invalid value
+ assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4)
+
+ # Test with invalid not raise
+ with suppress_warnings() as sup:
+ sup.filter(ConversionWarning)
+
+ test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False)
+ control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
+ assert_equal(test, control)
+
+ test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False)
+ control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
+ assert_equal(test, control)
+
+ # Structured array with field names.
+ data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n'
+
+ # Test with header, names and comments
+ txt = TextIO(data)
+ test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True)
+ control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)],
+ dtype=[('c', '<f8'), ('d', '<f8')])
+ assert_equal(test, control)
+ # To continue reading the same "file", don't use skip_header or
+ # names, and use the previously determined dtype.
+ test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype)
+ control = np.array([(4.0, 4.0), (5.0, 5.0)],
+ dtype=[('c', '<f8'), ('d', '<f8')])
+ assert_equal(test, control)
+
+ def test_gft_using_filename(self):
+ # Test that we can load data from a filename as well as a file
+ # object
+ tgt = np.arange(6).reshape((2, 3))
+ linesep = ('\n', '\r\n', '\r')
+
+ for sep in linesep:
+ data = '0 1 2' + sep + '3 4 5'
+ with temppath() as name:
+ with open(name, 'w') as f:
+ f.write(data)
+ res = np.genfromtxt(name)
+ assert_array_equal(res, tgt)
+
+ def test_gft_from_gzip(self):
+ # Test that we can load data from a gzipped file
+ wanted = np.arange(6).reshape((2, 3))
+ linesep = ('\n', '\r\n', '\r')
+
+ for sep in linesep:
+ data = '0 1 2' + sep + '3 4 5'
+ s = BytesIO()
+ with gzip.GzipFile(fileobj=s, mode='w') as g:
+ g.write(asbytes(data))
+
+ with temppath(suffix='.gz2') as name:
+ with open(name, 'w') as f:
+ f.write(data)
+ assert_array_equal(np.genfromtxt(name), wanted)
+
+ def test_gft_using_generator(self):
+ # gft doesn't work with unicode.
+ def count():
+ for i in range(10):
+ yield asbytes("%d" % i)
+
+ res = np.genfromtxt(count())
+ assert_array_equal(res, np.arange(10))
+
+ def test_auto_dtype_largeint(self):
+ # Regression test for numpy/numpy#5635 whereby large integers could
+ # cause OverflowErrors.
+
+ # Test the automatic definition of the output dtype
+ #
+ # 2**66 = 73786976294838206464 => should convert to float
+ # 2**34 = 17179869184 => should convert to int64
+ # 2**10 = 1024 => should convert to int (int32 on 32-bit systems,
+ # int64 on 64-bit systems)
+
+ data = TextIO('73786976294838206464 17179869184 1024')
+
+ test = np.genfromtxt(data, dtype=None)
+
+ assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
+
+ assert_(test.dtype['f0'] == float)
+ assert_(test.dtype['f1'] == np.int64)
+ assert_(test.dtype['f2'] == np.int_)
+
+ assert_allclose(test['f0'], 73786976294838206464.)
+ assert_equal(test['f1'], 17179869184)
+ assert_equal(test['f2'], 1024)
+
+ def test_unpack_float_data(self):
+ txt = TextIO("1,2,3\n4,5,6\n7,8,9\n0.0,1.0,2.0")
+ a, b, c = np.loadtxt(txt, delimiter=",", unpack=True)
+ assert_array_equal(a, np.array([1.0, 4.0, 7.0, 0.0]))
+ assert_array_equal(b, np.array([2.0, 5.0, 8.0, 1.0]))
+ assert_array_equal(c, np.array([3.0, 6.0, 9.0, 2.0]))
+
+ def test_unpack_structured(self):
+ # Regression test for gh-4341
+ # Unpacking should work on structured arrays
+ txt = TextIO("M 21 72\nF 35 58")
+ dt = {'names': ('a', 'b', 'c'), 'formats': ('S1', 'i4', 'f4')}
+ a, b, c = np.genfromtxt(txt, dtype=dt, unpack=True)
+ assert_equal(a.dtype, np.dtype('S1'))
+ assert_equal(b.dtype, np.dtype('i4'))
+ assert_equal(c.dtype, np.dtype('f4'))
+ assert_array_equal(a, np.array([b'M', b'F']))
+ assert_array_equal(b, np.array([21, 35]))
+ assert_array_equal(c, np.array([72., 58.]))
+
+ def test_unpack_auto_dtype(self):
+ # Regression test for gh-4341
+ # Unpacking should work when dtype=None
+ txt = TextIO("M 21 72.\nF 35 58.")
+ expected = (np.array(["M", "F"]), np.array([21, 35]), np.array([72., 58.]))
+ test = np.genfromtxt(txt, dtype=None, unpack=True, encoding="utf-8")
+ for arr, result in zip(expected, test):
+ assert_array_equal(arr, result)
+ assert_equal(arr.dtype, result.dtype)
+
+ def test_unpack_single_name(self):
+ # Regression test for gh-4341
+ # Unpacking should work when structured dtype has only one field
+ txt = TextIO("21\n35")
+ dt = {'names': ('a',), 'formats': ('i4',)}
+ expected = np.array([21, 35], dtype=np.int32)
+ test = np.genfromtxt(txt, dtype=dt, unpack=True)
+ assert_array_equal(expected, test)
+ assert_equal(expected.dtype, test.dtype)
+
+ def test_squeeze_scalar(self):
+ # Regression test for gh-4341
+ # Unpacking a scalar should give zero-dim output,
+ # even if dtype is structured
+ txt = TextIO("1")
+ dt = {'names': ('a',), 'formats': ('i4',)}
+ expected = np.array((1,), dtype=np.int32)
+ test = np.genfromtxt(txt, dtype=dt, unpack=True)
+ assert_array_equal(expected, test)
+ assert_equal((), test.shape)
+ assert_equal(expected.dtype, test.dtype)
+
+ @pytest.mark.parametrize("ndim", [0, 1, 2])
+ def test_ndmin_keyword(self, ndim: int):
+ # lets have the same behaviour of ndmin as loadtxt
+ # as they should be the same for non-missing values
+ txt = "42"
+
+ a = np.loadtxt(StringIO(txt), ndmin=ndim)
+ b = np.genfromtxt(StringIO(txt), ndmin=ndim)
+
+ assert_array_equal(a, b)
+
+
+class TestPathUsage:
+ # Test that pathlib.Path can be used
+ def test_loadtxt(self):
+ with temppath(suffix='.txt') as path:
+ path = Path(path)
+ a = np.array([[1.1, 2], [3, 4]])
+ np.savetxt(path, a)
+ x = np.loadtxt(path)
+ assert_array_equal(x, a)
+
+ def test_save_load(self):
+ # Test that pathlib.Path instances can be used with save.
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ a = np.array([[1, 2], [3, 4]], int)
+ np.save(path, a)
+ data = np.load(path)
+ assert_array_equal(data, a)
+
+ def test_save_load_memmap(self):
+ # Test that pathlib.Path instances can be loaded mem-mapped.
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ a = np.array([[1, 2], [3, 4]], int)
+ np.save(path, a)
+ data = np.load(path, mmap_mode='r')
+ assert_array_equal(data, a)
+ # close the mem-mapped file
+ del data
+ if IS_PYPY:
+ break_cycles()
+ break_cycles()
+
+ @pytest.mark.xfail(IS_WASM, reason="memmap doesn't work correctly")
+ def test_save_load_memmap_readwrite(self):
+ # Test that pathlib.Path instances can be written mem-mapped.
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ a = np.array([[1, 2], [3, 4]], int)
+ np.save(path, a)
+ b = np.load(path, mmap_mode='r+')
+ a[0][0] = 5
+ b[0][0] = 5
+ del b # closes the file
+ if IS_PYPY:
+ break_cycles()
+ break_cycles()
+ data = np.load(path)
+ assert_array_equal(data, a)
+
+ def test_savez_load(self):
+ # Test that pathlib.Path instances can be used with savez.
+ with temppath(suffix='.npz') as path:
+ path = Path(path)
+ np.savez(path, lab='place holder')
+ with np.load(path) as data:
+ assert_array_equal(data['lab'], 'place holder')
+
+ def test_savez_compressed_load(self):
+ # Test that pathlib.Path instances can be used with savez.
+ with temppath(suffix='.npz') as path:
+ path = Path(path)
+ np.savez_compressed(path, lab='place holder')
+ data = np.load(path)
+ assert_array_equal(data['lab'], 'place holder')
+ data.close()
+
+ def test_genfromtxt(self):
+ with temppath(suffix='.txt') as path:
+ path = Path(path)
+ a = np.array([(1, 2), (3, 4)])
+ np.savetxt(path, a)
+ data = np.genfromtxt(path)
+ assert_array_equal(a, data)
+
+ def test_recfromtxt(self):
+ with temppath(suffix='.txt') as path:
+ path = Path(path)
+ with path.open('w') as f:
+ f.write('A,B\n0,1\n2,3')
+
+ kwargs = dict(delimiter=",", missing_values="N/A", names=True)
+ test = np.recfromtxt(path, **kwargs)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
+ assert_equal(test, control)
+
+ def test_recfromcsv(self):
+ with temppath(suffix='.txt') as path:
+ path = Path(path)
+ with path.open('w') as f:
+ f.write('A,B\n0,1\n2,3')
+
+ kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
+ test = np.recfromcsv(path, dtype=None, **kwargs)
+ control = np.array([(0, 1), (2, 3)],
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
+ assert_equal(test, control)
+
+
+def test_gzip_load():
+ a = np.random.random((5, 5))
+
+ s = BytesIO()
+ f = gzip.GzipFile(fileobj=s, mode="w")
+
+ np.save(f, a)
+ f.close()
+ s.seek(0)
+
+ f = gzip.GzipFile(fileobj=s, mode="r")
+ assert_array_equal(np.load(f), a)
+
+
+# These next two classes encode the minimal API needed to save()/load() arrays.
+# The `test_ducktyping` ensures they work correctly
+class JustWriter:
+ def __init__(self, base):
+ self.base = base
+
+ def write(self, s):
+ return self.base.write(s)
+
+ def flush(self):
+ return self.base.flush()
+
+class JustReader:
+ def __init__(self, base):
+ self.base = base
+
+ def read(self, n):
+ return self.base.read(n)
+
+ def seek(self, off, whence=0):
+ return self.base.seek(off, whence)
+
+
+def test_ducktyping():
+ a = np.random.random((5, 5))
+
+ s = BytesIO()
+ f = JustWriter(s)
+
+ np.save(f, a)
+ f.flush()
+ s.seek(0)
+
+ f = JustReader(s)
+ assert_array_equal(np.load(f), a)
+
+
+
+def test_gzip_loadtxt():
+ # Thanks to another windows brokenness, we can't use
+ # NamedTemporaryFile: a file created from this function cannot be
+ # reopened by another open call. So we first put the gzipped string
+ # of the test reference array, write it to a securely opened file,
+ # which is then read from by the loadtxt function
+ s = BytesIO()
+ g = gzip.GzipFile(fileobj=s, mode='w')
+ g.write(b'1 2 3\n')
+ g.close()
+
+ s.seek(0)
+ with temppath(suffix='.gz') as name:
+ with open(name, 'wb') as f:
+ f.write(s.read())
+ res = np.loadtxt(name)
+ s.close()
+
+ assert_array_equal(res, [1, 2, 3])
+
+
+def test_gzip_loadtxt_from_string():
+ s = BytesIO()
+ f = gzip.GzipFile(fileobj=s, mode="w")
+ f.write(b'1 2 3\n')
+ f.close()
+ s.seek(0)
+
+ f = gzip.GzipFile(fileobj=s, mode="r")
+ assert_array_equal(np.loadtxt(f), [1, 2, 3])
+
+
+def test_npzfile_dict():
+ s = BytesIO()
+ x = np.zeros((3, 3))
+ y = np.zeros((3, 3))
+
+ np.savez(s, x=x, y=y)
+ s.seek(0)
+
+ z = np.load(s)
+
+ assert_('x' in z)
+ assert_('y' in z)
+ assert_('x' in z.keys())
+ assert_('y' in z.keys())
+
+ for f, a in z.items():
+ assert_(f in ['x', 'y'])
+ assert_equal(a.shape, (3, 3))
+
+ assert_(len(z.items()) == 2)
+
+ for f in z:
+ assert_(f in ['x', 'y'])
+
+ assert_('x' in z.keys())
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_load_refcount():
+ # Check that objects returned by np.load are directly freed based on
+ # their refcount, rather than needing the gc to collect them.
+
+ f = BytesIO()
+ np.savez(f, [1, 2, 3])
+ f.seek(0)
+
+ with assert_no_gc_cycles():
+ np.load(f)
+
+ f.seek(0)
+ dt = [("a", 'u1', 2), ("b", 'u1', 2)]
+ with assert_no_gc_cycles():
+ x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
+ assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_loadtxt.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_loadtxt.py
new file mode 100644
index 00000000..8a5b044b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_loadtxt.py
@@ -0,0 +1,1039 @@
+"""
+Tests specific to `np.loadtxt` added during the move of loadtxt to be backed
+by C code.
+These tests complement those found in `test_io.py`.
+"""
+
+import sys
+import os
+import pytest
+from tempfile import NamedTemporaryFile, mkstemp
+from io import StringIO
+
+import numpy as np
+from numpy.ma.testutils import assert_equal
+from numpy.testing import assert_array_equal, HAS_REFCOUNT, IS_PYPY
+
+
+def test_scientific_notation():
+ """Test that both 'e' and 'E' are parsed correctly."""
+ data = StringIO(
+ (
+ "1.0e-1,2.0E1,3.0\n"
+ "4.0e-2,5.0E-1,6.0\n"
+ "7.0e-3,8.0E1,9.0\n"
+ "0.0e-4,1.0E-1,2.0"
+ )
+ )
+ expected = np.array(
+ [[0.1, 20., 3.0], [0.04, 0.5, 6], [0.007, 80., 9], [0, 0.1, 2]]
+ )
+ assert_array_equal(np.loadtxt(data, delimiter=","), expected)
+
+
+@pytest.mark.parametrize("comment", ["..", "//", "@-", "this is a comment:"])
+def test_comment_multiple_chars(comment):
+ content = "# IGNORE\n1.5, 2.5# ABC\n3.0,4.0# XXX\n5.5,6.0\n"
+ txt = StringIO(content.replace("#", comment))
+ a = np.loadtxt(txt, delimiter=",", comments=comment)
+ assert_equal(a, [[1.5, 2.5], [3.0, 4.0], [5.5, 6.0]])
+
+
+@pytest.fixture
+def mixed_types_structured():
+ """
+ Fixture providing hetergeneous input data with a structured dtype, along
+ with the associated structured array.
+ """
+ data = StringIO(
+ (
+ "1000;2.4;alpha;-34\n"
+ "2000;3.1;beta;29\n"
+ "3500;9.9;gamma;120\n"
+ "4090;8.1;delta;0\n"
+ "5001;4.4;epsilon;-99\n"
+ "6543;7.8;omega;-1\n"
+ )
+ )
+ dtype = np.dtype(
+ [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)]
+ )
+ expected = np.array(
+ [
+ (1000, 2.4, "alpha", -34),
+ (2000, 3.1, "beta", 29),
+ (3500, 9.9, "gamma", 120),
+ (4090, 8.1, "delta", 0),
+ (5001, 4.4, "epsilon", -99),
+ (6543, 7.8, "omega", -1)
+ ],
+ dtype=dtype
+ )
+ return data, dtype, expected
+
+
+@pytest.mark.parametrize('skiprows', [0, 1, 2, 3])
+def test_structured_dtype_and_skiprows_no_empty_lines(
+ skiprows, mixed_types_structured):
+ data, dtype, expected = mixed_types_structured
+ a = np.loadtxt(data, dtype=dtype, delimiter=";", skiprows=skiprows)
+ assert_array_equal(a, expected[skiprows:])
+
+
+def test_unpack_structured(mixed_types_structured):
+ data, dtype, expected = mixed_types_structured
+
+ a, b, c, d = np.loadtxt(data, dtype=dtype, delimiter=";", unpack=True)
+ assert_array_equal(a, expected["f0"])
+ assert_array_equal(b, expected["f1"])
+ assert_array_equal(c, expected["f2"])
+ assert_array_equal(d, expected["f3"])
+
+
+def test_structured_dtype_with_shape():
+ dtype = np.dtype([("a", "u1", 2), ("b", "u1", 2)])
+ data = StringIO("0,1,2,3\n6,7,8,9\n")
+ expected = np.array([((0, 1), (2, 3)), ((6, 7), (8, 9))], dtype=dtype)
+ assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dtype), expected)
+
+
+def test_structured_dtype_with_multi_shape():
+ dtype = np.dtype([("a", "u1", (2, 2))])
+ data = StringIO("0 1 2 3\n")
+ expected = np.array([(((0, 1), (2, 3)),)], dtype=dtype)
+ assert_array_equal(np.loadtxt(data, dtype=dtype), expected)
+
+
+def test_nested_structured_subarray():
+ # Test from gh-16678
+ point = np.dtype([('x', float), ('y', float)])
+ dt = np.dtype([('code', int), ('points', point, (2,))])
+ data = StringIO("100,1,2,3,4\n200,5,6,7,8\n")
+ expected = np.array(
+ [
+ (100, [(1., 2.), (3., 4.)]),
+ (200, [(5., 6.), (7., 8.)]),
+ ],
+ dtype=dt
+ )
+ assert_array_equal(np.loadtxt(data, dtype=dt, delimiter=","), expected)
+
+
+def test_structured_dtype_offsets():
+ # An aligned structured dtype will have additional padding
+ dt = np.dtype("i1, i4, i1, i4, i1, i4", align=True)
+ data = StringIO("1,2,3,4,5,6\n7,8,9,10,11,12\n")
+ expected = np.array([(1, 2, 3, 4, 5, 6), (7, 8, 9, 10, 11, 12)], dtype=dt)
+ assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dt), expected)
+
+
+@pytest.mark.parametrize("param", ("skiprows", "max_rows"))
+def test_exception_negative_row_limits(param):
+ """skiprows and max_rows should raise for negative parameters."""
+ with pytest.raises(ValueError, match="argument must be nonnegative"):
+ np.loadtxt("foo.bar", **{param: -3})
+
+
+@pytest.mark.parametrize("param", ("skiprows", "max_rows"))
+def test_exception_noninteger_row_limits(param):
+ with pytest.raises(TypeError, match="argument must be an integer"):
+ np.loadtxt("foo.bar", **{param: 1.0})
+
+
+@pytest.mark.parametrize(
+ "data, shape",
+ [
+ ("1 2 3 4 5\n", (1, 5)), # Single row
+ ("1\n2\n3\n4\n5\n", (5, 1)), # Single column
+ ]
+)
+def test_ndmin_single_row_or_col(data, shape):
+ arr = np.array([1, 2, 3, 4, 5])
+ arr2d = arr.reshape(shape)
+
+ assert_array_equal(np.loadtxt(StringIO(data), dtype=int), arr)
+ assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=0), arr)
+ assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=1), arr)
+ assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=2), arr2d)
+
+
+@pytest.mark.parametrize("badval", [-1, 3, None, "plate of shrimp"])
+def test_bad_ndmin(badval):
+ with pytest.raises(ValueError, match="Illegal value of ndmin keyword"):
+ np.loadtxt("foo.bar", ndmin=badval)
+
+
+@pytest.mark.parametrize(
+ "ws",
+ (
+ " ", # space
+ "\t", # tab
+ "\u2003", # em
+ "\u00A0", # non-break
+ "\u3000", # ideographic space
+ )
+)
+def test_blank_lines_spaces_delimit(ws):
+ txt = StringIO(
+ f"1 2{ws}30\n\n{ws}\n"
+ f"4 5 60{ws}\n {ws} \n"
+ f"7 8 {ws} 90\n # comment\n"
+ f"3 2 1"
+ )
+ # NOTE: It is unclear that the ` # comment` should succeed. Except
+ # for delimiter=None, which should use any whitespace (and maybe
+ # should just be implemented closer to Python
+ expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]])
+ assert_equal(
+ np.loadtxt(txt, dtype=int, delimiter=None, comments="#"), expected
+ )
+
+
+def test_blank_lines_normal_delimiter():
+ txt = StringIO('1,2,30\n\n4,5,60\n\n7,8,90\n# comment\n3,2,1')
+ expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]])
+ assert_equal(
+ np.loadtxt(txt, dtype=int, delimiter=',', comments="#"), expected
+ )
+
+
+@pytest.mark.parametrize("dtype", (float, object))
+def test_maxrows_no_blank_lines(dtype):
+ txt = StringIO("1.5,2.5\n3.0,4.0\n5.5,6.0")
+ res = np.loadtxt(txt, dtype=dtype, delimiter=",", max_rows=2)
+ assert_equal(res.dtype, dtype)
+ assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype))
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+@pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2")))
+def test_exception_message_bad_values(dtype):
+ txt = StringIO("1,2\n3,XXX\n5,6")
+ msg = f"could not convert string 'XXX' to {dtype} at row 1, column 2"
+ with pytest.raises(ValueError, match=msg):
+ np.loadtxt(txt, dtype=dtype, delimiter=",")
+
+
+def test_converters_negative_indices():
+ txt = StringIO('1.5,2.5\n3.0,XXX\n5.5,6.0')
+ conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)}
+ expected = np.array([[1.5, 2.5], [3.0, np.nan], [5.5, 6.0]])
+ res = np.loadtxt(
+ txt, dtype=np.float64, delimiter=",", converters=conv, encoding=None
+ )
+ assert_equal(res, expected)
+
+
+def test_converters_negative_indices_with_usecols():
+ txt = StringIO('1.5,2.5,3.5\n3.0,4.0,XXX\n5.5,6.0,7.5\n')
+ conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)}
+ expected = np.array([[1.5, 3.5], [3.0, np.nan], [5.5, 7.5]])
+ res = np.loadtxt(
+ txt,
+ dtype=np.float64,
+ delimiter=",",
+ converters=conv,
+ usecols=[0, -1],
+ encoding=None,
+ )
+ assert_equal(res, expected)
+
+ # Second test with variable number of rows:
+ res = np.loadtxt(StringIO('''0,1,2\n0,1,2,3,4'''), delimiter=",",
+ usecols=[0, -1], converters={-1: (lambda x: -1)})
+ assert_array_equal(res, [[0, -1], [0, -1]])
+
+def test_ragged_usecols():
+ # usecols, and negative ones, work even with varying number of columns.
+ txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n")
+ expected = np.array([[0, 0], [0, 0], [0, 0]])
+ res = np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2])
+ assert_equal(res, expected)
+
+ txt = StringIO("0,0,XXX\n0\n0,XXX,XXX,0,XXX\n")
+ with pytest.raises(ValueError,
+ match="invalid column index -2 at row 2 with 1 columns"):
+ # There is no -2 column in the second row:
+ np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2])
+
+
+def test_empty_usecols():
+ txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n")
+ res = np.loadtxt(txt, dtype=np.dtype([]), delimiter=",", usecols=[])
+ assert res.shape == (3,)
+ assert res.dtype == np.dtype([])
+
+
+@pytest.mark.parametrize("c1", ["a", "の", "🫕"])
+@pytest.mark.parametrize("c2", ["a", "の", "🫕"])
+def test_large_unicode_characters(c1, c2):
+ # c1 and c2 span ascii, 16bit and 32bit range.
+ txt = StringIO(f"a,{c1},c,1.0\ne,{c2},2.0,g")
+ res = np.loadtxt(txt, dtype=np.dtype('U12'), delimiter=",")
+ expected = np.array(
+ [f"a,{c1},c,1.0".split(","), f"e,{c2},2.0,g".split(",")],
+ dtype=np.dtype('U12')
+ )
+ assert_equal(res, expected)
+
+
+def test_unicode_with_converter():
+ txt = StringIO("cat,dog\nαβγ,δεζ\nabc,def\n")
+ conv = {0: lambda s: s.upper()}
+ res = np.loadtxt(
+ txt,
+ dtype=np.dtype("U12"),
+ converters=conv,
+ delimiter=",",
+ encoding=None
+ )
+ expected = np.array([['CAT', 'dog'], ['ΑΒΓ', 'δεζ'], ['ABC', 'def']])
+ assert_equal(res, expected)
+
+
+def test_converter_with_structured_dtype():
+ txt = StringIO('1.5,2.5,Abc\n3.0,4.0,dEf\n5.5,6.0,ghI\n')
+ dt = np.dtype([('m', np.int32), ('r', np.float32), ('code', 'U8')])
+ conv = {0: lambda s: int(10*float(s)), -1: lambda s: s.upper()}
+ res = np.loadtxt(txt, dtype=dt, delimiter=",", converters=conv)
+ expected = np.array(
+ [(15, 2.5, 'ABC'), (30, 4.0, 'DEF'), (55, 6.0, 'GHI')], dtype=dt
+ )
+ assert_equal(res, expected)
+
+
+def test_converter_with_unicode_dtype():
+ """
+ With the default 'bytes' encoding, tokens are encoded prior to being
+ passed to the converter. This means that the output of the converter may
+ be bytes instead of unicode as expected by `read_rows`.
+
+ This test checks that outputs from the above scenario are properly decoded
+ prior to parsing by `read_rows`.
+ """
+ txt = StringIO('abc,def\nrst,xyz')
+ conv = bytes.upper
+ res = np.loadtxt(
+ txt, dtype=np.dtype("U3"), converters=conv, delimiter=",")
+ expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']])
+ assert_equal(res, expected)
+
+
+def test_read_huge_row():
+ row = "1.5, 2.5," * 50000
+ row = row[:-1] + "\n"
+ txt = StringIO(row * 2)
+ res = np.loadtxt(txt, delimiter=",", dtype=float)
+ assert_equal(res, np.tile([1.5, 2.5], (2, 50000)))
+
+
+@pytest.mark.parametrize("dtype", "edfgFDG")
+def test_huge_float(dtype):
+ # Covers a non-optimized path that is rarely taken:
+ field = "0" * 1000 + ".123456789"
+ dtype = np.dtype(dtype)
+ value = np.loadtxt([field], dtype=dtype)[()]
+ assert value == dtype.type("0.123456789")
+
+
+@pytest.mark.parametrize(
+ ("given_dtype", "expected_dtype"),
+ [
+ ("S", np.dtype("S5")),
+ ("U", np.dtype("U5")),
+ ],
+)
+def test_string_no_length_given(given_dtype, expected_dtype):
+ """
+ The given dtype is just 'S' or 'U' with no length. In these cases, the
+ length of the resulting dtype is determined by the longest string found
+ in the file.
+ """
+ txt = StringIO("AAA,5-1\nBBBBB,0-3\nC,4-9\n")
+ res = np.loadtxt(txt, dtype=given_dtype, delimiter=",")
+ expected = np.array(
+ [['AAA', '5-1'], ['BBBBB', '0-3'], ['C', '4-9']], dtype=expected_dtype
+ )
+ assert_equal(res, expected)
+ assert_equal(res.dtype, expected_dtype)
+
+
+def test_float_conversion():
+ """
+ Some tests that the conversion to float64 works as accurately as the
+ Python built-in `float` function. In a naive version of the float parser,
+ these strings resulted in values that were off by an ULP or two.
+ """
+ strings = [
+ '0.9999999999999999',
+ '9876543210.123456',
+ '5.43215432154321e+300',
+ '0.901',
+ '0.333',
+ ]
+ txt = StringIO('\n'.join(strings))
+ res = np.loadtxt(txt)
+ expected = np.array([float(s) for s in strings])
+ assert_equal(res, expected)
+
+
+def test_bool():
+ # Simple test for bool via integer
+ txt = StringIO("1, 0\n10, -1")
+ res = np.loadtxt(txt, dtype=bool, delimiter=",")
+ assert res.dtype == bool
+ assert_array_equal(res, [[True, False], [True, True]])
+ # Make sure we use only 1 and 0 on the byte level:
+ assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]])
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning")
+def test_integer_signs(dtype):
+ dtype = np.dtype(dtype)
+ assert np.loadtxt(["+2"], dtype=dtype) == 2
+ if dtype.kind == "u":
+ with pytest.raises(ValueError):
+ np.loadtxt(["-1\n"], dtype=dtype)
+ else:
+ assert np.loadtxt(["-2\n"], dtype=dtype) == -2
+
+ for sign in ["++", "+-", "--", "-+"]:
+ with pytest.raises(ValueError):
+ np.loadtxt([f"{sign}2\n"], dtype=dtype)
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
+@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning")
+def test_implicit_cast_float_to_int_fails(dtype):
+ txt = StringIO("1.0, 2.1, 3.7\n4, 5, 6")
+ with pytest.raises(ValueError):
+ np.loadtxt(txt, dtype=dtype, delimiter=",")
+
+@pytest.mark.parametrize("dtype", (np.complex64, np.complex128))
+@pytest.mark.parametrize("with_parens", (False, True))
+def test_complex_parsing(dtype, with_parens):
+ s = "(1.0-2.5j),3.75,(7+-5.0j)\n(4),(-19e2j),(0)"
+ if not with_parens:
+ s = s.replace("(", "").replace(")", "")
+
+ res = np.loadtxt(StringIO(s), dtype=dtype, delimiter=",")
+ expected = np.array(
+ [[1.0-2.5j, 3.75, 7-5j], [4.0, -1900j, 0]], dtype=dtype
+ )
+ assert_equal(res, expected)
+
+
+def test_read_from_generator():
+ def gen():
+ for i in range(4):
+ yield f"{i},{2*i},{i**2}"
+
+ res = np.loadtxt(gen(), dtype=int, delimiter=",")
+ expected = np.array([[0, 0, 0], [1, 2, 1], [2, 4, 4], [3, 6, 9]])
+ assert_equal(res, expected)
+
+
+def test_read_from_generator_multitype():
+ def gen():
+ for i in range(3):
+ yield f"{i} {i / 4}"
+
+ res = np.loadtxt(gen(), dtype="i, d", delimiter=" ")
+ expected = np.array([(0, 0.0), (1, 0.25), (2, 0.5)], dtype="i, d")
+ assert_equal(res, expected)
+
+
+def test_read_from_bad_generator():
+ def gen():
+ for entry in ["1,2", b"3, 5", 12738]:
+ yield entry
+
+ with pytest.raises(
+ TypeError, match=r"non-string returned while reading data"):
+ np.loadtxt(gen(), dtype="i, i", delimiter=",")
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+def test_object_cleanup_on_read_error():
+ sentinel = object()
+ already_read = 0
+
+ def conv(x):
+ nonlocal already_read
+ if already_read > 4999:
+ raise ValueError("failed half-way through!")
+ already_read += 1
+ return sentinel
+
+ txt = StringIO("x\n" * 10000)
+
+ with pytest.raises(ValueError, match="at row 5000, column 1"):
+ np.loadtxt(txt, dtype=object, converters={0: conv})
+
+ assert sys.getrefcount(sentinel) == 2
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+def test_character_not_bytes_compatible():
+ """Test exception when a character cannot be encoded as 'S'."""
+ data = StringIO("–") # == \u2013
+ with pytest.raises(ValueError):
+ np.loadtxt(data, dtype="S5")
+
+
+@pytest.mark.parametrize("conv", (0, [float], ""))
+def test_invalid_converter(conv):
+ msg = (
+ "converters must be a dictionary mapping columns to converter "
+ "functions or a single callable."
+ )
+ with pytest.raises(TypeError, match=msg):
+ np.loadtxt(StringIO("1 2\n3 4"), converters=conv)
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+def test_converters_dict_raises_non_integer_key():
+ with pytest.raises(TypeError, match="keys of the converters dict"):
+ np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int})
+ with pytest.raises(TypeError, match="keys of the converters dict"):
+ np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}, usecols=0)
+
+
+@pytest.mark.parametrize("bad_col_ind", (3, -3))
+def test_converters_dict_raises_non_col_key(bad_col_ind):
+ data = StringIO("1 2\n3 4")
+ with pytest.raises(ValueError, match="converter specified for column"):
+ np.loadtxt(data, converters={bad_col_ind: int})
+
+
+def test_converters_dict_raises_val_not_callable():
+ with pytest.raises(TypeError,
+ match="values of the converters dictionary must be callable"):
+ np.loadtxt(StringIO("1 2\n3 4"), converters={0: 1})
+
+
+@pytest.mark.parametrize("q", ('"', "'", "`"))
+def test_quoted_field(q):
+ txt = StringIO(
+ f"{q}alpha, x{q}, 2.5\n{q}beta, y{q}, 4.5\n{q}gamma, z{q}, 5.0\n"
+ )
+ dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)])
+ expected = np.array(
+ [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype
+ )
+
+ res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar=q)
+ assert_array_equal(res, expected)
+
+
+@pytest.mark.parametrize("q", ('"', "'", "`"))
+def test_quoted_field_with_whitepace_delimiter(q):
+ txt = StringIO(
+ f"{q}alpha, x{q} 2.5\n{q}beta, y{q} 4.5\n{q}gamma, z{q} 5.0\n"
+ )
+ dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)])
+ expected = np.array(
+ [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype
+ )
+
+ res = np.loadtxt(txt, dtype=dtype, delimiter=None, quotechar=q)
+ assert_array_equal(res, expected)
+
+
+def test_quote_support_default():
+ """Support for quoted fields is disabled by default."""
+ txt = StringIO('"lat,long", 45, 30\n')
+ dtype = np.dtype([('f0', 'U24'), ('f1', np.float64), ('f2', np.float64)])
+
+ with pytest.raises(ValueError, match="the number of columns changed"):
+ np.loadtxt(txt, dtype=dtype, delimiter=",")
+
+ # Enable quoting support with non-None value for quotechar param
+ txt.seek(0)
+ expected = np.array([("lat,long", 45., 30.)], dtype=dtype)
+
+ res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"')
+ assert_array_equal(res, expected)
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+def test_quotechar_multichar_error():
+ txt = StringIO("1,2\n3,4")
+ msg = r".*must be a single unicode character or None"
+ with pytest.raises(TypeError, match=msg):
+ np.loadtxt(txt, delimiter=",", quotechar="''")
+
+
+def test_comment_multichar_error_with_quote():
+ txt = StringIO("1,2\n3,4")
+ msg = (
+ "when multiple comments or a multi-character comment is given, "
+ "quotes are not supported."
+ )
+ with pytest.raises(ValueError, match=msg):
+ np.loadtxt(txt, delimiter=",", comments="123", quotechar='"')
+ with pytest.raises(ValueError, match=msg):
+ np.loadtxt(txt, delimiter=",", comments=["#", "%"], quotechar='"')
+
+ # A single character string in a tuple is unpacked though:
+ res = np.loadtxt(txt, delimiter=",", comments=("#",), quotechar="'")
+ assert_equal(res, [[1, 2], [3, 4]])
+
+
+def test_structured_dtype_with_quotes():
+ data = StringIO(
+ (
+ "1000;2.4;'alpha';-34\n"
+ "2000;3.1;'beta';29\n"
+ "3500;9.9;'gamma';120\n"
+ "4090;8.1;'delta';0\n"
+ "5001;4.4;'epsilon';-99\n"
+ "6543;7.8;'omega';-1\n"
+ )
+ )
+ dtype = np.dtype(
+ [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)]
+ )
+ expected = np.array(
+ [
+ (1000, 2.4, "alpha", -34),
+ (2000, 3.1, "beta", 29),
+ (3500, 9.9, "gamma", 120),
+ (4090, 8.1, "delta", 0),
+ (5001, 4.4, "epsilon", -99),
+ (6543, 7.8, "omega", -1)
+ ],
+ dtype=dtype
+ )
+ res = np.loadtxt(data, dtype=dtype, delimiter=";", quotechar="'")
+ assert_array_equal(res, expected)
+
+
+def test_quoted_field_is_not_empty():
+ txt = StringIO('1\n\n"4"\n""')
+ expected = np.array(["1", "4", ""], dtype="U1")
+ res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"')
+ assert_equal(res, expected)
+
+def test_quoted_field_is_not_empty_nonstrict():
+ # Same as test_quoted_field_is_not_empty but check that we are not strict
+ # about missing closing quote (this is the `csv.reader` default also)
+ txt = StringIO('1\n\n"4"\n"')
+ expected = np.array(["1", "4", ""], dtype="U1")
+ res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"')
+ assert_equal(res, expected)
+
+def test_consecutive_quotechar_escaped():
+ txt = StringIO('"Hello, my name is ""Monty""!"')
+ expected = np.array('Hello, my name is "Monty"!', dtype="U40")
+ res = np.loadtxt(txt, dtype="U40", delimiter=",", quotechar='"')
+ assert_equal(res, expected)
+
+
+@pytest.mark.parametrize("data", ("", "\n\n\n", "# 1 2 3\n# 4 5 6\n"))
+@pytest.mark.parametrize("ndmin", (0, 1, 2))
+@pytest.mark.parametrize("usecols", [None, (1, 2, 3)])
+def test_warn_on_no_data(data, ndmin, usecols):
+ """Check that a UserWarning is emitted when no data is read from input."""
+ if usecols is not None:
+ expected_shape = (0, 3)
+ elif ndmin == 2:
+ expected_shape = (0, 1) # guess a single column?!
+ else:
+ expected_shape = (0,)
+
+ txt = StringIO(data)
+ with pytest.warns(UserWarning, match="input contained no data"):
+ res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols)
+ assert res.shape == expected_shape
+
+ with NamedTemporaryFile(mode="w") as fh:
+ fh.write(data)
+ fh.seek(0)
+ with pytest.warns(UserWarning, match="input contained no data"):
+ res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols)
+ assert res.shape == expected_shape
+
+@pytest.mark.parametrize("skiprows", (2, 3))
+def test_warn_on_skipped_data(skiprows):
+ data = "1 2 3\n4 5 6"
+ txt = StringIO(data)
+ with pytest.warns(UserWarning, match="input contained no data"):
+ np.loadtxt(txt, skiprows=skiprows)
+
+
+@pytest.mark.parametrize(["dtype", "value"], [
+ ("i2", 0x0001), ("u2", 0x0001),
+ ("i4", 0x00010203), ("u4", 0x00010203),
+ ("i8", 0x0001020304050607), ("u8", 0x0001020304050607),
+ # The following values are constructed to lead to unique bytes:
+ ("float16", 3.07e-05),
+ ("float32", 9.2557e-41), ("complex64", 9.2557e-41+2.8622554e-29j),
+ ("float64", -1.758571353180402e-24),
+ # Here and below, the repr side-steps a small loss of precision in
+ # complex `str` in PyPy (which is probably fine, as repr works):
+ ("complex128", repr(5.406409232372729e-29-1.758571353180402e-24j)),
+ # Use integer values that fit into double. Everything else leads to
+ # problems due to longdoubles going via double and decimal strings
+ # causing rounding errors.
+ ("longdouble", 0x01020304050607),
+ ("clongdouble", repr(0x01020304050607 + (0x00121314151617 * 1j))),
+ ("U2", "\U00010203\U000a0b0c")])
+@pytest.mark.parametrize("swap", [True, False])
+def test_byteswapping_and_unaligned(dtype, value, swap):
+ # Try to create "interesting" values within the valid unicode range:
+ dtype = np.dtype(dtype)
+ data = [f"x,{value}\n"] # repr as PyPy `str` truncates some
+ if swap:
+ dtype = dtype.newbyteorder()
+ full_dt = np.dtype([("a", "S1"), ("b", dtype)], align=False)
+ # The above ensures that the interesting "b" field is unaligned:
+ assert full_dt.fields["b"][1] == 1
+ res = np.loadtxt(data, dtype=full_dt, delimiter=",", encoding=None,
+ max_rows=1) # max-rows prevents over-allocation
+ assert res["b"] == dtype.type(value)
+
+
+@pytest.mark.parametrize("dtype",
+ np.typecodes["AllInteger"] + "efdFD" + "?")
+def test_unicode_whitespace_stripping(dtype):
+ # Test that all numeric types (and bool) strip whitespace correctly
+ # \u202F is a narrow no-break space, `\n` is just a whitespace if quoted.
+ # Currently, skip float128 as it did not always support this and has no
+ # "custom" parsing:
+ txt = StringIO(' 3 ,"\u202F2\n"')
+ res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"')
+ assert_array_equal(res, np.array([3, 2]).astype(dtype))
+
+
+@pytest.mark.parametrize("dtype", "FD")
+def test_unicode_whitespace_stripping_complex(dtype):
+ # Complex has a few extra cases since it has two components and
+ # parentheses
+ line = " 1 , 2+3j , ( 4+5j ), ( 6+-7j ) , 8j , ( 9j ) \n"
+ data = [line, line.replace(" ", "\u202F")]
+ res = np.loadtxt(data, dtype=dtype, delimiter=',')
+ assert_array_equal(res, np.array([[1, 2+3j, 4+5j, 6-7j, 8j, 9j]] * 2))
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+@pytest.mark.parametrize("dtype", "FD")
+@pytest.mark.parametrize("field",
+ ["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"])
+def test_bad_complex(dtype, field):
+ with pytest.raises(ValueError):
+ np.loadtxt([field + "\n"], dtype=dtype, delimiter=",")
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+@pytest.mark.parametrize("dtype",
+ np.typecodes["AllInteger"] + "efgdFDG" + "?")
+def test_nul_character_error(dtype):
+ # Test that a \0 character is correctly recognized as an error even if
+ # what comes before is valid (not everything gets parsed internally).
+ if dtype.lower() == "g":
+ pytest.xfail("longdouble/clongdouble assignment may misbehave.")
+ with pytest.raises(ValueError):
+ np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"')
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+@pytest.mark.parametrize("dtype",
+ np.typecodes["AllInteger"] + "efgdFDG" + "?")
+def test_no_thousands_support(dtype):
+ # Mainly to document behaviour, Python supports thousands like 1_1.
+ # (e and G may end up using different conversion and support it, this is
+ # a bug but happens...)
+ if dtype == "e":
+ pytest.skip("half assignment currently uses Python float converter")
+ if dtype in "eG":
+ pytest.xfail("clongdouble assignment is buggy (uses `complex`?).")
+
+ assert int("1_1") == float("1_1") == complex("1_1") == 11
+ with pytest.raises(ValueError):
+ np.loadtxt(["1_1\n"], dtype=dtype)
+
+
+@pytest.mark.parametrize("data", [
+ ["1,2\n", "2\n,3\n"],
+ ["1,2\n", "2\r,3\n"]])
+def test_bad_newline_in_iterator(data):
+ # In NumPy <=1.22 this was accepted, because newlines were completely
+ # ignored when the input was an iterable. This could be changed, but right
+ # now, we raise an error.
+ msg = "Found an unquoted embedded newline within a single line"
+ with pytest.raises(ValueError, match=msg):
+ np.loadtxt(data, delimiter=",")
+
+
+@pytest.mark.parametrize("data", [
+ ["1,2\n", "2,3\r\n"], # a universal newline
+ ["1,2\n", "'2\n',3\n"], # a quoted newline
+ ["1,2\n", "'2\r',3\n"],
+ ["1,2\n", "'2\r\n',3\n"],
+])
+def test_good_newline_in_iterator(data):
+ # The quoted newlines will be untransformed here, but are just whitespace.
+ res = np.loadtxt(data, delimiter=",", quotechar="'")
+ assert_array_equal(res, [[1., 2.], [2., 3.]])
+
+
+@pytest.mark.parametrize("newline", ["\n", "\r", "\r\n"])
+def test_universal_newlines_quoted(newline):
+ # Check that universal newline support within the tokenizer is not applied
+ # to quoted fields. (note that lines must end in newline or quoted
+ # fields will not include a newline at all)
+ data = ['1,"2\n"\n', '3,"4\n', '1"\n']
+ data = [row.replace("\n", newline) for row in data]
+ res = np.loadtxt(data, dtype=object, delimiter=",", quotechar='"')
+ assert_array_equal(res, [['1', f'2{newline}'], ['3', f'4{newline}1']])
+
+
+def test_null_character():
+ # Basic tests to check that the NUL character is not special:
+ res = np.loadtxt(["1\0002\0003\n", "4\0005\0006"], delimiter="\000")
+ assert_array_equal(res, [[1, 2, 3], [4, 5, 6]])
+
+ # Also not as part of a field (avoid unicode/arrays as unicode strips \0)
+ res = np.loadtxt(["1\000,2\000,3\n", "4\000,5\000,6"],
+ delimiter=",", dtype=object)
+ assert res.tolist() == [["1\000", "2\000", "3"], ["4\000", "5\000", "6"]]
+
+
+def test_iterator_fails_getting_next_line():
+ class BadSequence:
+ def __len__(self):
+ return 100
+
+ def __getitem__(self, item):
+ if item == 50:
+ raise RuntimeError("Bad things happened!")
+ return f"{item}, {item+1}"
+
+ with pytest.raises(RuntimeError, match="Bad things happened!"):
+ np.loadtxt(BadSequence(), dtype=int, delimiter=",")
+
+
+class TestCReaderUnitTests:
+ # These are internal tests for path that should not be possible to hit
+ # unless things go very very wrong somewhere.
+ def test_not_an_filelike(self):
+ with pytest.raises(AttributeError, match=".*read"):
+ np.core._multiarray_umath._load_from_filelike(
+ object(), dtype=np.dtype("i"), filelike=True)
+
+ def test_filelike_read_fails(self):
+ # Can only be reached if loadtxt opens the file, so it is hard to do
+ # via the public interface (although maybe not impossible considering
+ # the current "DataClass" backing).
+ class BadFileLike:
+ counter = 0
+
+ def read(self, size):
+ self.counter += 1
+ if self.counter > 20:
+ raise RuntimeError("Bad bad bad!")
+ return "1,2,3\n"
+
+ with pytest.raises(RuntimeError, match="Bad bad bad!"):
+ np.core._multiarray_umath._load_from_filelike(
+ BadFileLike(), dtype=np.dtype("i"), filelike=True)
+
+ def test_filelike_bad_read(self):
+ # Can only be reached if loadtxt opens the file, so it is hard to do
+ # via the public interface (although maybe not impossible considering
+ # the current "DataClass" backing).
+
+ class BadFileLike:
+ counter = 0
+
+ def read(self, size):
+ return 1234 # not a string!
+
+ with pytest.raises(TypeError,
+ match="non-string returned while reading data"):
+ np.core._multiarray_umath._load_from_filelike(
+ BadFileLike(), dtype=np.dtype("i"), filelike=True)
+
+ def test_not_an_iter(self):
+ with pytest.raises(TypeError,
+ match="error reading from object, expected an iterable"):
+ np.core._multiarray_umath._load_from_filelike(
+ object(), dtype=np.dtype("i"), filelike=False)
+
+ def test_bad_type(self):
+ with pytest.raises(TypeError, match="internal error: dtype must"):
+ np.core._multiarray_umath._load_from_filelike(
+ object(), dtype="i", filelike=False)
+
+ def test_bad_encoding(self):
+ with pytest.raises(TypeError, match="encoding must be a unicode"):
+ np.core._multiarray_umath._load_from_filelike(
+ object(), dtype=np.dtype("i"), filelike=False, encoding=123)
+
+ @pytest.mark.parametrize("newline", ["\r", "\n", "\r\n"])
+ def test_manual_universal_newlines(self, newline):
+ # This is currently not available to users, because we should always
+ # open files with universal newlines enabled `newlines=None`.
+ # (And reading from an iterator uses slightly different code paths.)
+ # We have no real support for `newline="\r"` or `newline="\n" as the
+ # user cannot specify those options.
+ data = StringIO('0\n1\n"2\n"\n3\n4 #\n'.replace("\n", newline),
+ newline="")
+
+ res = np.core._multiarray_umath._load_from_filelike(
+ data, dtype=np.dtype("U10"), filelike=True,
+ quote='"', comment="#", skiplines=1)
+ assert_array_equal(res[:, 0], ["1", f"2{newline}", "3", "4 "])
+
+
+def test_delimiter_comment_collision_raises():
+ with pytest.raises(TypeError, match=".*control characters.*incompatible"):
+ np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=",")
+
+
+def test_delimiter_quotechar_collision_raises():
+ with pytest.raises(TypeError, match=".*control characters.*incompatible"):
+ np.loadtxt(StringIO("1, 2, 3"), delimiter=",", quotechar=",")
+
+
+def test_comment_quotechar_collision_raises():
+ with pytest.raises(TypeError, match=".*control characters.*incompatible"):
+ np.loadtxt(StringIO("1 2 3"), comments="#", quotechar="#")
+
+
+def test_delimiter_and_multiple_comments_collision_raises():
+ with pytest.raises(
+ TypeError, match="Comment characters.*cannot include the delimiter"
+ ):
+ np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=["#", ","])
+
+
+@pytest.mark.parametrize(
+ "ws",
+ (
+ " ", # space
+ "\t", # tab
+ "\u2003", # em
+ "\u00A0", # non-break
+ "\u3000", # ideographic space
+ )
+)
+def test_collision_with_default_delimiter_raises(ws):
+ with pytest.raises(TypeError, match=".*control characters.*incompatible"):
+ np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), comments=ws)
+ with pytest.raises(TypeError, match=".*control characters.*incompatible"):
+ np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), quotechar=ws)
+
+
+@pytest.mark.parametrize("nl", ("\n", "\r"))
+def test_control_character_newline_raises(nl):
+ txt = StringIO(f"1{nl}2{nl}3{nl}{nl}4{nl}5{nl}6{nl}{nl}")
+ msg = "control character.*cannot be a newline"
+ with pytest.raises(TypeError, match=msg):
+ np.loadtxt(txt, delimiter=nl)
+ with pytest.raises(TypeError, match=msg):
+ np.loadtxt(txt, comments=nl)
+ with pytest.raises(TypeError, match=msg):
+ np.loadtxt(txt, quotechar=nl)
+
+
+@pytest.mark.parametrize(
+ ("generic_data", "long_datum", "unitless_dtype", "expected_dtype"),
+ [
+ ("2012-03", "2013-01-15", "M8", "M8[D]"), # Datetimes
+ ("spam-a-lot", "tis_but_a_scratch", "U", "U17"), # str
+ ],
+)
+@pytest.mark.parametrize("nrows", (10, 50000, 60000)) # lt, eq, gt chunksize
+def test_parametric_unit_discovery(
+ generic_data, long_datum, unitless_dtype, expected_dtype, nrows
+):
+ """Check that the correct unit (e.g. month, day, second) is discovered from
+ the data when a user specifies a unitless datetime."""
+ # Unit should be "D" (days) due to last entry
+ data = [generic_data] * 50000 + [long_datum]
+ expected = np.array(data, dtype=expected_dtype)
+
+ # file-like path
+ txt = StringIO("\n".join(data))
+ a = np.loadtxt(txt, dtype=unitless_dtype)
+ assert a.dtype == expected.dtype
+ assert_equal(a, expected)
+
+ # file-obj path
+ fd, fname = mkstemp()
+ os.close(fd)
+ with open(fname, "w") as fh:
+ fh.write("\n".join(data))
+ a = np.loadtxt(fname, dtype=unitless_dtype)
+ os.remove(fname)
+ assert a.dtype == expected.dtype
+ assert_equal(a, expected)
+
+
+def test_str_dtype_unit_discovery_with_converter():
+ data = ["spam-a-lot"] * 60000 + ["XXXtis_but_a_scratch"]
+ expected = np.array(
+ ["spam-a-lot"] * 60000 + ["tis_but_a_scratch"], dtype="U17"
+ )
+ conv = lambda s: s.strip("XXX")
+
+ # file-like path
+ txt = StringIO("\n".join(data))
+ a = np.loadtxt(txt, dtype="U", converters=conv, encoding=None)
+ assert a.dtype == expected.dtype
+ assert_equal(a, expected)
+
+ # file-obj path
+ fd, fname = mkstemp()
+ os.close(fd)
+ with open(fname, "w") as fh:
+ fh.write("\n".join(data))
+ a = np.loadtxt(fname, dtype="U", converters=conv, encoding=None)
+ os.remove(fname)
+ assert a.dtype == expected.dtype
+ assert_equal(a, expected)
+
+
+@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
+ reason="PyPy bug in error formatting")
+def test_control_character_empty():
+ with pytest.raises(TypeError, match="Text reading control character must"):
+ np.loadtxt(StringIO("1 2 3"), delimiter="")
+ with pytest.raises(TypeError, match="Text reading control character must"):
+ np.loadtxt(StringIO("1 2 3"), quotechar="")
+ with pytest.raises(ValueError, match="comments cannot be an empty string"):
+ np.loadtxt(StringIO("1 2 3"), comments="")
+ with pytest.raises(ValueError, match="comments cannot be an empty string"):
+ np.loadtxt(StringIO("1 2 3"), comments=["#", ""])
+
+
+def test_control_characters_as_bytes():
+ """Byte control characters (comments, delimiter) are supported."""
+ a = np.loadtxt(StringIO("#header\n1,2,3"), comments=b"#", delimiter=b",")
+ assert_equal(a, [1, 2, 3])
+
+
+@pytest.mark.filterwarnings('ignore::UserWarning')
+def test_field_growing_cases():
+ # Test empty field appending/growing (each field still takes 1 character)
+ # to see if the final field appending does not create issues.
+ res = np.loadtxt([""], delimiter=",", dtype=bytes)
+ assert len(res) == 0
+
+ for i in range(1, 1024):
+ res = np.loadtxt(["," * i], delimiter=",", dtype=bytes)
+ assert len(res) == i+1
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_mixins.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_mixins.py
new file mode 100644
index 00000000..63205876
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_mixins.py
@@ -0,0 +1,216 @@
+import numbers
+import operator
+
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_raises
+
+
+# NOTE: This class should be kept as an exact copy of the example from the
+# docstring for NDArrayOperatorsMixin.
+
+class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
+ def __init__(self, value):
+ self.value = np.asarray(value)
+
+ # One might also consider adding the built-in list type to this
+ # list, to support operations like np.add(array_like, list)
+ _HANDLED_TYPES = (np.ndarray, numbers.Number)
+
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ out = kwargs.get('out', ())
+ for x in inputs + out:
+ # Only support operations with instances of _HANDLED_TYPES.
+ # Use ArrayLike instead of type(self) for isinstance to
+ # allow subclasses that don't override __array_ufunc__ to
+ # handle ArrayLike objects.
+ if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
+ return NotImplemented
+
+ # Defer to the implementation of the ufunc on unwrapped values.
+ inputs = tuple(x.value if isinstance(x, ArrayLike) else x
+ for x in inputs)
+ if out:
+ kwargs['out'] = tuple(
+ x.value if isinstance(x, ArrayLike) else x
+ for x in out)
+ result = getattr(ufunc, method)(*inputs, **kwargs)
+
+ if type(result) is tuple:
+ # multiple return values
+ return tuple(type(self)(x) for x in result)
+ elif method == 'at':
+ # no return value
+ return None
+ else:
+ # one return value
+ return type(self)(result)
+
+ def __repr__(self):
+ return '%s(%r)' % (type(self).__name__, self.value)
+
+
+def wrap_array_like(result):
+ if type(result) is tuple:
+ return tuple(ArrayLike(r) for r in result)
+ else:
+ return ArrayLike(result)
+
+
+def _assert_equal_type_and_value(result, expected, err_msg=None):
+ assert_equal(type(result), type(expected), err_msg=err_msg)
+ if isinstance(result, tuple):
+ assert_equal(len(result), len(expected), err_msg=err_msg)
+ for result_item, expected_item in zip(result, expected):
+ _assert_equal_type_and_value(result_item, expected_item, err_msg)
+ else:
+ assert_equal(result.value, expected.value, err_msg=err_msg)
+ assert_equal(getattr(result.value, 'dtype', None),
+ getattr(expected.value, 'dtype', None), err_msg=err_msg)
+
+
+_ALL_BINARY_OPERATORS = [
+ operator.lt,
+ operator.le,
+ operator.eq,
+ operator.ne,
+ operator.gt,
+ operator.ge,
+ operator.add,
+ operator.sub,
+ operator.mul,
+ operator.truediv,
+ operator.floordiv,
+ operator.mod,
+ divmod,
+ pow,
+ operator.lshift,
+ operator.rshift,
+ operator.and_,
+ operator.xor,
+ operator.or_,
+]
+
+
+class TestNDArrayOperatorsMixin:
+
+ def test_array_like_add(self):
+
+ def check(result):
+ _assert_equal_type_and_value(result, ArrayLike(0))
+
+ check(ArrayLike(0) + 0)
+ check(0 + ArrayLike(0))
+
+ check(ArrayLike(0) + np.array(0))
+ check(np.array(0) + ArrayLike(0))
+
+ check(ArrayLike(np.array(0)) + 0)
+ check(0 + ArrayLike(np.array(0)))
+
+ check(ArrayLike(np.array(0)) + np.array(0))
+ check(np.array(0) + ArrayLike(np.array(0)))
+
+ def test_inplace(self):
+ array_like = ArrayLike(np.array([0]))
+ array_like += 1
+ _assert_equal_type_and_value(array_like, ArrayLike(np.array([1])))
+
+ array = np.array([0])
+ array += ArrayLike(1)
+ _assert_equal_type_and_value(array, ArrayLike(np.array([1])))
+
+ def test_opt_out(self):
+
+ class OptOut:
+ """Object that opts out of __array_ufunc__."""
+ __array_ufunc__ = None
+
+ def __add__(self, other):
+ return self
+
+ def __radd__(self, other):
+ return self
+
+ array_like = ArrayLike(1)
+ opt_out = OptOut()
+
+ # supported operations
+ assert_(array_like + opt_out is opt_out)
+ assert_(opt_out + array_like is opt_out)
+
+ # not supported
+ with assert_raises(TypeError):
+ # don't use the Python default, array_like = array_like + opt_out
+ array_like += opt_out
+ with assert_raises(TypeError):
+ array_like - opt_out
+ with assert_raises(TypeError):
+ opt_out - array_like
+
+ def test_subclass(self):
+
+ class SubArrayLike(ArrayLike):
+ """Should take precedence over ArrayLike."""
+
+ x = ArrayLike(0)
+ y = SubArrayLike(1)
+ _assert_equal_type_and_value(x + y, y)
+ _assert_equal_type_and_value(y + x, y)
+
+ def test_object(self):
+ x = ArrayLike(0)
+ obj = object()
+ with assert_raises(TypeError):
+ x + obj
+ with assert_raises(TypeError):
+ obj + x
+ with assert_raises(TypeError):
+ x += obj
+
+ def test_unary_methods(self):
+ array = np.array([-1, 0, 1, 2])
+ array_like = ArrayLike(array)
+ for op in [operator.neg,
+ operator.pos,
+ abs,
+ operator.invert]:
+ _assert_equal_type_and_value(op(array_like), ArrayLike(op(array)))
+
+ def test_forward_binary_methods(self):
+ array = np.array([-1, 0, 1, 2])
+ array_like = ArrayLike(array)
+ for op in _ALL_BINARY_OPERATORS:
+ expected = wrap_array_like(op(array, 1))
+ actual = op(array_like, 1)
+ err_msg = 'failed for operator {}'.format(op)
+ _assert_equal_type_and_value(expected, actual, err_msg=err_msg)
+
+ def test_reflected_binary_methods(self):
+ for op in _ALL_BINARY_OPERATORS:
+ expected = wrap_array_like(op(2, 1))
+ actual = op(2, ArrayLike(1))
+ err_msg = 'failed for operator {}'.format(op)
+ _assert_equal_type_and_value(expected, actual, err_msg=err_msg)
+
+ def test_matmul(self):
+ array = np.array([1, 2], dtype=np.float64)
+ array_like = ArrayLike(array)
+ expected = ArrayLike(np.float64(5))
+ _assert_equal_type_and_value(expected, np.matmul(array_like, array))
+ _assert_equal_type_and_value(
+ expected, operator.matmul(array_like, array))
+ _assert_equal_type_and_value(
+ expected, operator.matmul(array, array_like))
+
+ def test_ufunc_at(self):
+ array = ArrayLike(np.array([1, 2, 3, 4]))
+ assert_(np.negative.at(array, np.array([0, 1])) is None)
+ _assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4]))
+
+ def test_ufunc_two_outputs(self):
+ mantissa, exponent = np.frexp(2 ** -3)
+ expected = (ArrayLike(mantissa), ArrayLike(exponent))
+ _assert_equal_type_and_value(
+ np.frexp(ArrayLike(2 ** -3)), expected)
+ _assert_equal_type_and_value(
+ np.frexp(ArrayLike(np.array(2 ** -3))), expected)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_nanfunctions.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_nanfunctions.py
new file mode 100644
index 00000000..7cdcff32
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_nanfunctions.py
@@ -0,0 +1,1246 @@
+import warnings
+import pytest
+import inspect
+
+import numpy as np
+from numpy.core.numeric import normalize_axis_tuple
+from numpy.lib.nanfunctions import _nan_mask, _replace_nan
+from numpy.testing import (
+ assert_, assert_equal, assert_almost_equal, assert_raises,
+ assert_array_equal, suppress_warnings
+ )
+
+
+# Test data
+_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
+ [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
+ [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
+ [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
+
+
+# Rows of _ndat with nans removed
+_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),
+ np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
+ np.array([0.1042, -0.5954]),
+ np.array([0.1610, 0.1859, 0.3146])]
+
+# Rows of _ndat with nans converted to ones
+_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170],
+ [0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833],
+ [1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954],
+ [0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]])
+
+# Rows of _ndat with nans converted to zeros
+_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
+ [0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833],
+ [0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954],
+ [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
+
+
+class TestSignatureMatch:
+ NANFUNCS = {
+ np.nanmin: np.amin,
+ np.nanmax: np.amax,
+ np.nanargmin: np.argmin,
+ np.nanargmax: np.argmax,
+ np.nansum: np.sum,
+ np.nanprod: np.prod,
+ np.nancumsum: np.cumsum,
+ np.nancumprod: np.cumprod,
+ np.nanmean: np.mean,
+ np.nanmedian: np.median,
+ np.nanpercentile: np.percentile,
+ np.nanquantile: np.quantile,
+ np.nanvar: np.var,
+ np.nanstd: np.std,
+ }
+ IDS = [k.__name__ for k in NANFUNCS]
+
+ @staticmethod
+ def get_signature(func, default="..."):
+ """Construct a signature and replace all default parameter-values."""
+ prm_list = []
+ signature = inspect.signature(func)
+ for prm in signature.parameters.values():
+ if prm.default is inspect.Parameter.empty:
+ prm_list.append(prm)
+ else:
+ prm_list.append(prm.replace(default=default))
+ return inspect.Signature(prm_list)
+
+ @pytest.mark.parametrize("nan_func,func", NANFUNCS.items(), ids=IDS)
+ def test_signature_match(self, nan_func, func):
+ # Ignore the default parameter-values as they can sometimes differ
+ # between the two functions (*e.g.* one has `False` while the other
+ # has `np._NoValue`)
+ signature = self.get_signature(func)
+ nan_signature = self.get_signature(nan_func)
+ np.testing.assert_equal(signature, nan_signature)
+
+ def test_exhaustiveness(self):
+ """Validate that all nan functions are actually tested."""
+ np.testing.assert_equal(
+ set(self.IDS), set(np.lib.nanfunctions.__all__)
+ )
+
+
+class TestNanFunctions_MinMax:
+
+ nanfuncs = [np.nanmin, np.nanmax]
+ stdfuncs = [np.min, np.max]
+
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ for f in self.nanfuncs:
+ f(ndat)
+ assert_equal(ndat, _ndat)
+
+ def test_keepdims(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for axis in [None, 0, 1]:
+ tgt = rf(mat, axis=axis, keepdims=True)
+ res = nf(mat, axis=axis, keepdims=True)
+ assert_(res.ndim == tgt.ndim)
+
+ def test_out(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ resout = np.zeros(3)
+ tgt = rf(mat, axis=1)
+ res = nf(mat, axis=1, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+
+ def test_dtype_from_input(self):
+ codes = 'efdgFDG'
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for c in codes:
+ mat = np.eye(3, dtype=c)
+ tgt = rf(mat, axis=1).dtype.type
+ res = nf(mat, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ tgt = rf(mat, axis=None).dtype.type
+ res = nf(mat, axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_result_values(self):
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ tgt = [rf(d) for d in _rdat]
+ res = nf(_ndat, axis=1)
+ assert_almost_equal(res, tgt)
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan),
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip(f"`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ match = "All-NaN slice encountered"
+ for func in self.nanfuncs:
+ with pytest.warns(RuntimeWarning, match=match):
+ out = func(array, axis=axis)
+ assert np.isnan(out).all()
+ assert out.dtype == array.dtype
+
+ def test_masked(self):
+ mat = np.ma.fix_invalid(_ndat)
+ msk = mat._mask.copy()
+ for f in [np.nanmin]:
+ res = f(mat, axis=1)
+ tgt = f(_ndat, axis=1)
+ assert_equal(res, tgt)
+ assert_equal(mat._mask, msk)
+ assert_(not np.isinf(mat).any())
+
+ def test_scalar(self):
+ for f in self.nanfuncs:
+ assert_(f(0.) == 0.)
+
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
+ # Check that it works and that type and
+ # shape are preserved
+ mine = np.eye(3).view(MyNDArray)
+ for f in self.nanfuncs:
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
+
+ # check that rows of nan are dealt with for subclasses (#4628)
+ mine[1] = np.nan
+ for f in self.nanfuncs:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(not np.any(np.isnan(res)))
+ assert_(len(w) == 0)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(np.isnan(res[1]) and not np.isnan(res[0])
+ and not np.isnan(res[2]))
+ assert_(len(w) == 1, 'no warning raised')
+ assert_(issubclass(w[0].category, RuntimeWarning))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mine)
+ assert_(res.shape == ())
+ assert_(res != np.nan)
+ assert_(len(w) == 0)
+
+ def test_object_array(self):
+ arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object)
+ assert_equal(np.nanmin(arr), 1.0)
+ assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0])
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ # assert_equal does not work on object arrays of nan
+ assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan])
+ assert_(len(w) == 1, 'no warning raised')
+ assert_(issubclass(w[0].category, RuntimeWarning))
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_initial(self, dtype):
+ class MyNDArray(np.ndarray):
+ pass
+
+ ar = np.arange(9).astype(dtype)
+ ar[:5] = np.nan
+
+ for f in self.nanfuncs:
+ initial = 100 if f is np.nanmax else 0
+
+ ret1 = f(ar, initial=initial)
+ assert ret1.dtype == dtype
+ assert ret1 == initial
+
+ ret2 = f(ar.view(MyNDArray), initial=initial)
+ assert ret2.dtype == dtype
+ assert ret2 == initial
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_where(self, dtype):
+ class MyNDArray(np.ndarray):
+ pass
+
+ ar = np.arange(9).reshape(3, 3).astype(dtype)
+ ar[0, :] = np.nan
+ where = np.ones_like(ar, dtype=np.bool_)
+ where[:, 0] = False
+
+ for f in self.nanfuncs:
+ reference = 4 if f is np.nanmin else 8
+
+ ret1 = f(ar, where=where, initial=5)
+ assert ret1.dtype == dtype
+ assert ret1 == reference
+
+ ret2 = f(ar.view(MyNDArray), where=where, initial=5)
+ assert ret2.dtype == dtype
+ assert ret2 == reference
+
+
+class TestNanFunctions_ArgminArgmax:
+
+ nanfuncs = [np.nanargmin, np.nanargmax]
+
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ for f in self.nanfuncs:
+ f(ndat)
+ assert_equal(ndat, _ndat)
+
+ def test_result_values(self):
+ for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
+ for row in _ndat:
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "invalid value encountered in")
+ ind = f(row)
+ val = row[ind]
+ # comparing with NaN is tricky as the result
+ # is always false except for NaN != NaN
+ assert_(not np.isnan(val))
+ assert_(not fcmp(val, row).any())
+ assert_(not np.equal(val, row[:ind]).any())
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan),
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip(f"`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ for func in self.nanfuncs:
+ with pytest.raises(ValueError, match="All-NaN slice encountered"):
+ func(array, axis=axis)
+
+ def test_empty(self):
+ mat = np.zeros((0, 3))
+ for f in self.nanfuncs:
+ for axis in [0, None]:
+ assert_raises(ValueError, f, mat, axis=axis)
+ for axis in [1]:
+ res = f(mat, axis=axis)
+ assert_equal(res, np.zeros(0))
+
+ def test_scalar(self):
+ for f in self.nanfuncs:
+ assert_(f(0.) == 0.)
+
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
+ # Check that it works and that type and
+ # shape are preserved
+ mine = np.eye(3).view(MyNDArray)
+ for f in self.nanfuncs:
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == (3,))
+ res = f(mine)
+ assert_(res.shape == ())
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_keepdims(self, dtype):
+ ar = np.arange(9).astype(dtype)
+ ar[:5] = np.nan
+
+ for f in self.nanfuncs:
+ reference = 5 if f is np.nanargmin else 8
+ ret = f(ar, keepdims=True)
+ assert ret.ndim == ar.ndim
+ assert ret == reference
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_out(self, dtype):
+ ar = np.arange(9).astype(dtype)
+ ar[:5] = np.nan
+
+ for f in self.nanfuncs:
+ out = np.zeros((), dtype=np.intp)
+ reference = 5 if f is np.nanargmin else 8
+ ret = f(ar, out=out)
+ assert ret is out
+ assert ret == reference
+
+
+
+_TEST_ARRAYS = {
+ "0d": np.array(5),
+ "1d": np.array([127, 39, 93, 87, 46])
+}
+for _v in _TEST_ARRAYS.values():
+ _v.setflags(write=False)
+
+
+@pytest.mark.parametrize(
+ "dtype",
+ np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O",
+)
+@pytest.mark.parametrize("mat", _TEST_ARRAYS.values(), ids=_TEST_ARRAYS.keys())
+class TestNanFunctions_NumberTypes:
+ nanfuncs = {
+ np.nanmin: np.min,
+ np.nanmax: np.max,
+ np.nanargmin: np.argmin,
+ np.nanargmax: np.argmax,
+ np.nansum: np.sum,
+ np.nanprod: np.prod,
+ np.nancumsum: np.cumsum,
+ np.nancumprod: np.cumprod,
+ np.nanmean: np.mean,
+ np.nanmedian: np.median,
+ np.nanvar: np.var,
+ np.nanstd: np.std,
+ }
+ nanfunc_ids = [i.__name__ for i in nanfuncs]
+
+ @pytest.mark.parametrize("nanfunc,func", nanfuncs.items(), ids=nanfunc_ids)
+ @np.errstate(over="ignore")
+ def test_nanfunc(self, mat, dtype, nanfunc, func):
+ mat = mat.astype(dtype)
+ tgt = func(mat)
+ out = nanfunc(mat)
+
+ assert_almost_equal(out, tgt)
+ if dtype == "O":
+ assert type(out) is type(tgt)
+ else:
+ assert out.dtype == tgt.dtype
+
+ @pytest.mark.parametrize(
+ "nanfunc,func",
+ [(np.nanquantile, np.quantile), (np.nanpercentile, np.percentile)],
+ ids=["nanquantile", "nanpercentile"],
+ )
+ def test_nanfunc_q(self, mat, dtype, nanfunc, func):
+ mat = mat.astype(dtype)
+ tgt = func(mat, q=1)
+ out = nanfunc(mat, q=1)
+
+ assert_almost_equal(out, tgt)
+ if dtype == "O":
+ assert type(out) is type(tgt)
+ else:
+ assert out.dtype == tgt.dtype
+
+ @pytest.mark.parametrize(
+ "nanfunc,func",
+ [(np.nanvar, np.var), (np.nanstd, np.std)],
+ ids=["nanvar", "nanstd"],
+ )
+ def test_nanfunc_ddof(self, mat, dtype, nanfunc, func):
+ mat = mat.astype(dtype)
+ tgt = func(mat, ddof=0.5)
+ out = nanfunc(mat, ddof=0.5)
+
+ assert_almost_equal(out, tgt)
+ if dtype == "O":
+ assert type(out) is type(tgt)
+ else:
+ assert out.dtype == tgt.dtype
+
+
+class SharedNanFunctionsTestsMixin:
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ for f in self.nanfuncs:
+ f(ndat)
+ assert_equal(ndat, _ndat)
+
+ def test_keepdims(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for axis in [None, 0, 1]:
+ tgt = rf(mat, axis=axis, keepdims=True)
+ res = nf(mat, axis=axis, keepdims=True)
+ assert_(res.ndim == tgt.ndim)
+
+ def test_out(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ resout = np.zeros(3)
+ tgt = rf(mat, axis=1)
+ res = nf(mat, axis=1, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+
+ def test_dtype_from_dtype(self):
+ mat = np.eye(3)
+ codes = 'efdgFDG'
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for c in codes:
+ with suppress_warnings() as sup:
+ if nf in {np.nanstd, np.nanvar} and c in 'FDG':
+ # Giving the warning is a small bug, see gh-8000
+ sup.filter(np.ComplexWarning)
+ tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type
+ res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type
+ res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_dtype_from_char(self):
+ mat = np.eye(3)
+ codes = 'efdgFDG'
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for c in codes:
+ with suppress_warnings() as sup:
+ if nf in {np.nanstd, np.nanvar} and c in 'FDG':
+ # Giving the warning is a small bug, see gh-8000
+ sup.filter(np.ComplexWarning)
+ tgt = rf(mat, dtype=c, axis=1).dtype.type
+ res = nf(mat, dtype=c, axis=1).dtype.type
+ assert_(res is tgt)
+ # scalar case
+ tgt = rf(mat, dtype=c, axis=None).dtype.type
+ res = nf(mat, dtype=c, axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_dtype_from_input(self):
+ codes = 'efdgFDG'
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ for c in codes:
+ mat = np.eye(3, dtype=c)
+ tgt = rf(mat, axis=1).dtype.type
+ res = nf(mat, axis=1).dtype.type
+ assert_(res is tgt, "res %s, tgt %s" % (res, tgt))
+ # scalar case
+ tgt = rf(mat, axis=None).dtype.type
+ res = nf(mat, axis=None).dtype.type
+ assert_(res is tgt)
+
+ def test_result_values(self):
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ tgt = [rf(d) for d in _rdat]
+ res = nf(_ndat, axis=1)
+ assert_almost_equal(res, tgt)
+
+ def test_scalar(self):
+ for f in self.nanfuncs:
+ assert_(f(0.) == 0.)
+
+ def test_subclass(self):
+ class MyNDArray(np.ndarray):
+ pass
+
+ # Check that it works and that type and
+ # shape are preserved
+ array = np.eye(3)
+ mine = array.view(MyNDArray)
+ for f in self.nanfuncs:
+ expected_shape = f(array, axis=0).shape
+ res = f(mine, axis=0)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array, axis=1).shape
+ res = f(mine, axis=1)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+ expected_shape = f(array).shape
+ res = f(mine)
+ assert_(isinstance(res, MyNDArray))
+ assert_(res.shape == expected_shape)
+
+
+class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin):
+
+ nanfuncs = [np.nansum, np.nanprod]
+ stdfuncs = [np.sum, np.prod]
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan),
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip(f"`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ for func, identity in zip(self.nanfuncs, [0, 1]):
+ out = func(array, axis=axis)
+ assert np.all(out == identity)
+ assert out.dtype == array.dtype
+
+ def test_empty(self):
+ for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]):
+ mat = np.zeros((0, 3))
+ tgt = [tgt_value]*3
+ res = f(mat, axis=0)
+ assert_equal(res, tgt)
+ tgt = []
+ res = f(mat, axis=1)
+ assert_equal(res, tgt)
+ tgt = tgt_value
+ res = f(mat, axis=None)
+ assert_equal(res, tgt)
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_initial(self, dtype):
+ ar = np.arange(9).astype(dtype)
+ ar[:5] = np.nan
+
+ for f in self.nanfuncs:
+ reference = 28 if f is np.nansum else 3360
+ ret = f(ar, initial=2)
+ assert ret.dtype == dtype
+ assert ret == reference
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_where(self, dtype):
+ ar = np.arange(9).reshape(3, 3).astype(dtype)
+ ar[0, :] = np.nan
+ where = np.ones_like(ar, dtype=np.bool_)
+ where[:, 0] = False
+
+ for f in self.nanfuncs:
+ reference = 26 if f is np.nansum else 2240
+ ret = f(ar, where=where, initial=2)
+ assert ret.dtype == dtype
+ assert ret == reference
+
+
+class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin):
+
+ nanfuncs = [np.nancumsum, np.nancumprod]
+ stdfuncs = [np.cumsum, np.cumprod]
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan)
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip(f"`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ for func, identity in zip(self.nanfuncs, [0, 1]):
+ out = func(array)
+ assert np.all(out == identity)
+ assert out.dtype == array.dtype
+
+ def test_empty(self):
+ for f, tgt_value in zip(self.nanfuncs, [0, 1]):
+ mat = np.zeros((0, 3))
+ tgt = tgt_value*np.ones((0, 3))
+ res = f(mat, axis=0)
+ assert_equal(res, tgt)
+ tgt = mat
+ res = f(mat, axis=1)
+ assert_equal(res, tgt)
+ tgt = np.zeros((0))
+ res = f(mat, axis=None)
+ assert_equal(res, tgt)
+
+ def test_keepdims(self):
+ for f, g in zip(self.nanfuncs, self.stdfuncs):
+ mat = np.eye(3)
+ for axis in [None, 0, 1]:
+ tgt = f(mat, axis=axis, out=None)
+ res = g(mat, axis=axis, out=None)
+ assert_(res.ndim == tgt.ndim)
+
+ for f in self.nanfuncs:
+ d = np.ones((3, 5, 7, 11))
+ # Randomly set some elements to NaN:
+ rs = np.random.RandomState(0)
+ d[rs.rand(*d.shape) < 0.5] = np.nan
+ res = f(d, axis=None)
+ assert_equal(res.shape, (1155,))
+ for axis in np.arange(4):
+ res = f(d, axis=axis)
+ assert_equal(res.shape, (3, 5, 7, 11))
+
+ def test_result_values(self):
+ for axis in (-2, -1, 0, 1, None):
+ tgt = np.cumprod(_ndat_ones, axis=axis)
+ res = np.nancumprod(_ndat, axis=axis)
+ assert_almost_equal(res, tgt)
+ tgt = np.cumsum(_ndat_zeros,axis=axis)
+ res = np.nancumsum(_ndat, axis=axis)
+ assert_almost_equal(res, tgt)
+
+ def test_out(self):
+ mat = np.eye(3)
+ for nf, rf in zip(self.nanfuncs, self.stdfuncs):
+ resout = np.eye(3)
+ for axis in (-2, -1, 0, 1):
+ tgt = rf(mat, axis=axis)
+ res = nf(mat, axis=axis, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+
+
+class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin):
+
+ nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
+ stdfuncs = [np.mean, np.var, np.std]
+
+ def test_dtype_error(self):
+ for f in self.nanfuncs:
+ for dtype in [np.bool_, np.int_, np.object_]:
+ assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype)
+
+ def test_out_dtype_error(self):
+ for f in self.nanfuncs:
+ for dtype in [np.bool_, np.int_, np.object_]:
+ out = np.empty(_ndat.shape[0], dtype=dtype)
+ assert_raises(TypeError, f, _ndat, axis=1, out=out)
+
+ def test_ddof(self):
+ nanfuncs = [np.nanvar, np.nanstd]
+ stdfuncs = [np.var, np.std]
+ for nf, rf in zip(nanfuncs, stdfuncs):
+ for ddof in [0, 1]:
+ tgt = [rf(d, ddof=ddof) for d in _rdat]
+ res = nf(_ndat, axis=1, ddof=ddof)
+ assert_almost_equal(res, tgt)
+
+ def test_ddof_too_big(self):
+ nanfuncs = [np.nanvar, np.nanstd]
+ stdfuncs = [np.var, np.std]
+ dsize = [len(d) for d in _rdat]
+ for nf, rf in zip(nanfuncs, stdfuncs):
+ for ddof in range(5):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ sup.filter(np.ComplexWarning)
+ tgt = [ddof >= d for d in dsize]
+ res = nf(_ndat, axis=1, ddof=ddof)
+ assert_equal(np.isnan(res), tgt)
+ if any(tgt):
+ assert_(len(sup.log) == 1)
+ else:
+ assert_(len(sup.log) == 0)
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan),
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip(f"`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ match = "(Degrees of freedom <= 0 for slice.)|(Mean of empty slice)"
+ for func in self.nanfuncs:
+ with pytest.warns(RuntimeWarning, match=match):
+ out = func(array, axis=axis)
+ assert np.isnan(out).all()
+
+ # `nanvar` and `nanstd` convert complex inputs to their
+ # corresponding floating dtype
+ if func is np.nanmean:
+ assert out.dtype == array.dtype
+ else:
+ assert out.dtype == np.abs(array).dtype
+
+ def test_empty(self):
+ mat = np.zeros((0, 3))
+ for f in self.nanfuncs:
+ for axis in [0, None]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_(np.isnan(f(mat, axis=axis)).all())
+ assert_(len(w) == 1)
+ assert_(issubclass(w[0].category, RuntimeWarning))
+ for axis in [1]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_equal(f(mat, axis=axis), np.zeros([]))
+ assert_(len(w) == 0)
+
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ def test_where(self, dtype):
+ ar = np.arange(9).reshape(3, 3).astype(dtype)
+ ar[0, :] = np.nan
+ where = np.ones_like(ar, dtype=np.bool_)
+ where[:, 0] = False
+
+ for f, f_std in zip(self.nanfuncs, self.stdfuncs):
+ reference = f_std(ar[where][2:])
+ dtype_reference = dtype if f is np.nanmean else ar.real.dtype
+
+ ret = f(ar, where=where)
+ assert ret.dtype == dtype_reference
+ np.testing.assert_allclose(ret, reference)
+
+
+_TIME_UNITS = (
+ "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"
+)
+
+# All `inexact` + `timdelta64` type codes
+_TYPE_CODES = list(np.typecodes["AllFloat"])
+_TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS]
+
+
+class TestNanFunctions_Median:
+
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ np.nanmedian(ndat)
+ assert_equal(ndat, _ndat)
+
+ def test_keepdims(self):
+ mat = np.eye(3)
+ for axis in [None, 0, 1]:
+ tgt = np.median(mat, axis=axis, out=None, overwrite_input=False)
+ res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False)
+ assert_(res.ndim == tgt.ndim)
+
+ d = np.ones((3, 5, 7, 11))
+ # Randomly set some elements to NaN:
+ w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
+ w = w.astype(np.intp)
+ d[tuple(w)] = np.nan
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning)
+ res = np.nanmedian(d, axis=None, keepdims=True)
+ assert_equal(res.shape, (1, 1, 1, 1))
+ res = np.nanmedian(d, axis=(0, 1), keepdims=True)
+ assert_equal(res.shape, (1, 1, 7, 11))
+ res = np.nanmedian(d, axis=(0, 3), keepdims=True)
+ assert_equal(res.shape, (1, 5, 7, 1))
+ res = np.nanmedian(d, axis=(1,), keepdims=True)
+ assert_equal(res.shape, (3, 1, 7, 11))
+ res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True)
+ assert_equal(res.shape, (1, 1, 1, 1))
+ res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True)
+ assert_equal(res.shape, (1, 1, 7, 1))
+
+ @pytest.mark.parametrize(
+ argnames='axis',
+ argvalues=[
+ None,
+ 1,
+ (1, ),
+ (0, 1),
+ (-3, -1),
+ ]
+ )
+ @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning")
+ def test_keepdims_out(self, axis):
+ d = np.ones((3, 5, 7, 11))
+ # Randomly set some elements to NaN:
+ w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
+ w = w.astype(np.intp)
+ d[tuple(w)] = np.nan
+ if axis is None:
+ shape_out = (1,) * d.ndim
+ else:
+ axis_norm = normalize_axis_tuple(axis, d.ndim)
+ shape_out = tuple(
+ 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+ out = np.empty(shape_out)
+ result = np.nanmedian(d, axis=axis, keepdims=True, out=out)
+ assert result is out
+ assert_equal(result.shape, shape_out)
+
+ def test_out(self):
+ mat = np.random.rand(3, 3)
+ nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
+ resout = np.zeros(3)
+ tgt = np.median(mat, axis=1)
+ res = np.nanmedian(nan_mat, axis=1, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+ # 0-d output:
+ resout = np.zeros(())
+ tgt = np.median(mat, axis=None)
+ res = np.nanmedian(nan_mat, axis=None, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+ res = np.nanmedian(nan_mat, axis=(0, 1), out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+
+ def test_small_large(self):
+ # test the small and large code paths, current cutoff 400 elements
+ for s in [5, 20, 51, 200, 1000]:
+ d = np.random.randn(4, s)
+ # Randomly set some elements to NaN:
+ w = np.random.randint(0, d.size, size=d.size // 5)
+ d.ravel()[w] = np.nan
+ d[:,0] = 1. # ensure at least one good value
+ # use normal median without nans to compare
+ tgt = []
+ for x in d:
+ nonan = np.compress(~np.isnan(x), x)
+ tgt.append(np.median(nonan, overwrite_input=True))
+
+ assert_array_equal(np.nanmedian(d, axis=-1), tgt)
+
+ def test_result_values(self):
+ tgt = [np.median(d) for d in _rdat]
+ res = np.nanmedian(_ndat, axis=1)
+ assert_almost_equal(res, tgt)
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", _TYPE_CODES)
+ def test_allnans(self, dtype, axis):
+ mat = np.full((3, 3), np.nan).astype(dtype)
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+
+ output = np.nanmedian(mat, axis=axis)
+ assert output.dtype == mat.dtype
+ assert np.isnan(output).all()
+
+ if axis is None:
+ assert_(len(sup.log) == 1)
+ else:
+ assert_(len(sup.log) == 3)
+
+ # Check scalar
+ scalar = np.array(np.nan).astype(dtype)[()]
+ output_scalar = np.nanmedian(scalar)
+ assert output_scalar.dtype == scalar.dtype
+ assert np.isnan(output_scalar)
+
+ if axis is None:
+ assert_(len(sup.log) == 2)
+ else:
+ assert_(len(sup.log) == 4)
+
+ def test_empty(self):
+ mat = np.zeros((0, 3))
+ for axis in [0, None]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
+ assert_(len(w) == 1)
+ assert_(issubclass(w[0].category, RuntimeWarning))
+ for axis in [1]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_equal(np.nanmedian(mat, axis=axis), np.zeros([]))
+ assert_(len(w) == 0)
+
+ def test_scalar(self):
+ assert_(np.nanmedian(0.) == 0.)
+
+ def test_extended_axis_invalid(self):
+ d = np.ones((3, 5, 7, 11))
+ assert_raises(np.AxisError, np.nanmedian, d, axis=-5)
+ assert_raises(np.AxisError, np.nanmedian, d, axis=(0, -5))
+ assert_raises(np.AxisError, np.nanmedian, d, axis=4)
+ assert_raises(np.AxisError, np.nanmedian, d, axis=(0, 4))
+ assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))
+
+ def test_float_special(self):
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning)
+ for inf in [np.inf, -np.inf]:
+ a = np.array([[inf, np.nan], [np.nan, np.nan]])
+ assert_equal(np.nanmedian(a, axis=0), [inf, np.nan])
+ assert_equal(np.nanmedian(a, axis=1), [inf, np.nan])
+ assert_equal(np.nanmedian(a), inf)
+
+ # minimum fill value check
+ a = np.array([[np.nan, np.nan, inf],
+ [np.nan, np.nan, inf]])
+ assert_equal(np.nanmedian(a), inf)
+ assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf])
+ assert_equal(np.nanmedian(a, axis=1), inf)
+
+ # no mask path
+ a = np.array([[inf, inf], [inf, inf]])
+ assert_equal(np.nanmedian(a, axis=1), inf)
+
+ a = np.array([[inf, 7, -inf, -9],
+ [-10, np.nan, np.nan, 5],
+ [4, np.nan, np.nan, inf]],
+ dtype=np.float32)
+ if inf > 0:
+ assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.])
+ assert_equal(np.nanmedian(a), 4.5)
+ else:
+ assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.])
+ assert_equal(np.nanmedian(a), -2.5)
+ assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf])
+
+ for i in range(0, 10):
+ for j in range(1, 10):
+ a = np.array([([np.nan] * i) + ([inf] * j)] * 2)
+ assert_equal(np.nanmedian(a), inf)
+ assert_equal(np.nanmedian(a, axis=1), inf)
+ assert_equal(np.nanmedian(a, axis=0),
+ ([np.nan] * i) + [inf] * j)
+
+ a = np.array([([np.nan] * i) + ([-inf] * j)] * 2)
+ assert_equal(np.nanmedian(a), -inf)
+ assert_equal(np.nanmedian(a, axis=1), -inf)
+ assert_equal(np.nanmedian(a, axis=0),
+ ([np.nan] * i) + [-inf] * j)
+
+
+class TestNanFunctions_Percentile:
+
+ def test_mutation(self):
+ # Check that passed array is not modified.
+ ndat = _ndat.copy()
+ np.nanpercentile(ndat, 30)
+ assert_equal(ndat, _ndat)
+
+ def test_keepdims(self):
+ mat = np.eye(3)
+ for axis in [None, 0, 1]:
+ tgt = np.percentile(mat, 70, axis=axis, out=None,
+ overwrite_input=False)
+ res = np.nanpercentile(mat, 70, axis=axis, out=None,
+ overwrite_input=False)
+ assert_(res.ndim == tgt.ndim)
+
+ d = np.ones((3, 5, 7, 11))
+ # Randomly set some elements to NaN:
+ w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
+ w = w.astype(np.intp)
+ d[tuple(w)] = np.nan
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning)
+ res = np.nanpercentile(d, 90, axis=None, keepdims=True)
+ assert_equal(res.shape, (1, 1, 1, 1))
+ res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True)
+ assert_equal(res.shape, (1, 1, 7, 11))
+ res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True)
+ assert_equal(res.shape, (1, 5, 7, 1))
+ res = np.nanpercentile(d, 90, axis=(1,), keepdims=True)
+ assert_equal(res.shape, (3, 1, 7, 11))
+ res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True)
+ assert_equal(res.shape, (1, 1, 1, 1))
+ res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True)
+ assert_equal(res.shape, (1, 1, 7, 1))
+
+ @pytest.mark.parametrize('q', [7, [1, 7]])
+ @pytest.mark.parametrize(
+ argnames='axis',
+ argvalues=[
+ None,
+ 1,
+ (1,),
+ (0, 1),
+ (-3, -1),
+ ]
+ )
+ @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning")
+ def test_keepdims_out(self, q, axis):
+ d = np.ones((3, 5, 7, 11))
+ # Randomly set some elements to NaN:
+ w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
+ w = w.astype(np.intp)
+ d[tuple(w)] = np.nan
+ if axis is None:
+ shape_out = (1,) * d.ndim
+ else:
+ axis_norm = normalize_axis_tuple(axis, d.ndim)
+ shape_out = tuple(
+ 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+ shape_out = np.shape(q) + shape_out
+
+ out = np.empty(shape_out)
+ result = np.nanpercentile(d, q, axis=axis, keepdims=True, out=out)
+ assert result is out
+ assert_equal(result.shape, shape_out)
+
+ def test_out(self):
+ mat = np.random.rand(3, 3)
+ nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
+ resout = np.zeros(3)
+ tgt = np.percentile(mat, 42, axis=1)
+ res = np.nanpercentile(nan_mat, 42, axis=1, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+ # 0-d output:
+ resout = np.zeros(())
+ tgt = np.percentile(mat, 42, axis=None)
+ res = np.nanpercentile(nan_mat, 42, axis=None, out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+ res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout)
+ assert_almost_equal(res, resout)
+ assert_almost_equal(res, tgt)
+
+ def test_result_values(self):
+ tgt = [np.percentile(d, 28) for d in _rdat]
+ res = np.nanpercentile(_ndat, 28, axis=1)
+ assert_almost_equal(res, tgt)
+ # Transpose the array to fit the output convention of numpy.percentile
+ tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat])
+ res = np.nanpercentile(_ndat, (28, 98), axis=1)
+ assert_almost_equal(res, tgt)
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan),
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip(f"`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"):
+ out = np.nanpercentile(array, 60, axis=axis)
+ assert np.isnan(out).all()
+ assert out.dtype == array.dtype
+
+ def test_empty(self):
+ mat = np.zeros((0, 3))
+ for axis in [0, None]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all())
+ assert_(len(w) == 1)
+ assert_(issubclass(w[0].category, RuntimeWarning))
+ for axis in [1]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([]))
+ assert_(len(w) == 0)
+
+ def test_scalar(self):
+ assert_equal(np.nanpercentile(0., 100), 0.)
+ a = np.arange(6)
+ r = np.nanpercentile(a, 50, axis=0)
+ assert_equal(r, 2.5)
+ assert_(np.isscalar(r))
+
+ def test_extended_axis_invalid(self):
+ d = np.ones((3, 5, 7, 11))
+ assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=-5)
+ assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, -5))
+ assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=4)
+ assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, 4))
+ assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1))
+
+ def test_multiple_percentiles(self):
+ perc = [50, 100]
+ mat = np.ones((4, 3))
+ nan_mat = np.nan * mat
+ # For checking consistency in higher dimensional case
+ large_mat = np.ones((3, 4, 5))
+ large_mat[:, 0:2:4, :] = 0
+ large_mat[:, :, 3:] *= 2
+ for axis in [None, 0, 1]:
+ for keepdim in [False, True]:
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "All-NaN slice encountered")
+ val = np.percentile(mat, perc, axis=axis, keepdims=keepdim)
+ nan_val = np.nanpercentile(nan_mat, perc, axis=axis,
+ keepdims=keepdim)
+ assert_equal(nan_val.shape, val.shape)
+
+ val = np.percentile(large_mat, perc, axis=axis,
+ keepdims=keepdim)
+ nan_val = np.nanpercentile(large_mat, perc, axis=axis,
+ keepdims=keepdim)
+ assert_equal(nan_val, val)
+
+ megamat = np.ones((3, 4, 5, 6))
+ assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
+
+
+class TestNanFunctions_Quantile:
+ # most of this is already tested by TestPercentile
+
+ def test_regression(self):
+ ar = np.arange(24).reshape(2, 3, 4).astype(float)
+ ar[0][1] = np.nan
+
+ assert_equal(np.nanquantile(ar, q=0.5), np.nanpercentile(ar, q=50))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=0),
+ np.nanpercentile(ar, q=50, axis=0))
+ assert_equal(np.nanquantile(ar, q=0.5, axis=1),
+ np.nanpercentile(ar, q=50, axis=1))
+ assert_equal(np.nanquantile(ar, q=[0.5], axis=1),
+ np.nanpercentile(ar, q=[50], axis=1))
+ assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1),
+ np.nanpercentile(ar, q=[25, 50, 75], axis=1))
+
+ def test_basic(self):
+ x = np.arange(8) * 0.5
+ assert_equal(np.nanquantile(x, 0), 0.)
+ assert_equal(np.nanquantile(x, 1), 3.5)
+ assert_equal(np.nanquantile(x, 0.5), 1.75)
+
+ def test_no_p_overwrite(self):
+ # this is worth retesting, because quantile does not make a copy
+ p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
+ p = p0.copy()
+ np.nanquantile(np.arange(100.), p, method="midpoint")
+ assert_array_equal(p, p0)
+
+ p0 = p0.tolist()
+ p = p.tolist()
+ np.nanquantile(np.arange(100.), p, method="midpoint")
+ assert_array_equal(p, p0)
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("array", [
+ np.array(np.nan),
+ np.full((3, 3), np.nan),
+ ], ids=["0d", "2d"])
+ def test_allnans(self, axis, dtype, array):
+ if axis is not None and array.ndim == 0:
+ pytest.skip(f"`axis != None` not supported for 0d arrays")
+
+ array = array.astype(dtype)
+ with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"):
+ out = np.nanquantile(array, 1, axis=axis)
+ assert np.isnan(out).all()
+ assert out.dtype == array.dtype
+
+@pytest.mark.parametrize("arr, expected", [
+ # array of floats with some nans
+ (np.array([np.nan, 5.0, np.nan, np.inf]),
+ np.array([False, True, False, True])),
+ # int64 array that can't possibly have nans
+ (np.array([1, 5, 7, 9], dtype=np.int64),
+ True),
+ # bool array that can't possibly have nans
+ (np.array([False, True, False, True]),
+ True),
+ # 2-D complex array with nans
+ (np.array([[np.nan, 5.0],
+ [np.nan, np.inf]], dtype=np.complex64),
+ np.array([[False, True],
+ [False, True]])),
+ ])
+def test__nan_mask(arr, expected):
+ for out in [None, np.empty(arr.shape, dtype=np.bool_)]:
+ actual = _nan_mask(arr, out=out)
+ assert_equal(actual, expected)
+ # the above won't distinguish between True proper
+ # and an array of True values; we want True proper
+ # for types that can't possibly contain NaN
+ if type(expected) is not np.ndarray:
+ assert actual is True
+
+
+def test__replace_nan():
+ """ Test that _replace_nan returns the original array if there are no
+ NaNs, not a copy.
+ """
+ for dtype in [np.bool_, np.int32, np.int64]:
+ arr = np.array([0, 1], dtype=dtype)
+ result, mask = _replace_nan(arr, 0)
+ assert mask is None
+ # do not make a copy if there are no nans
+ assert result is arr
+
+ for dtype in [np.float32, np.float64]:
+ arr = np.array([0, 1], dtype=dtype)
+ result, mask = _replace_nan(arr, 2)
+ assert (mask == False).all()
+ # mask is not None, so we make a copy
+ assert result is not arr
+ assert_equal(result, arr)
+
+ arr_nan = np.array([0, 1, np.nan], dtype=dtype)
+ result_nan, mask_nan = _replace_nan(arr_nan, 2)
+ assert_equal(mask_nan, np.array([False, False, True]))
+ assert result_nan is not arr_nan
+ assert_equal(result_nan, np.array([0, 1, 2]))
+ assert np.isnan(arr_nan[-1])
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_packbits.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_packbits.py
new file mode 100644
index 00000000..5b07f41c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_packbits.py
@@ -0,0 +1,376 @@
+import numpy as np
+from numpy.testing import assert_array_equal, assert_equal, assert_raises
+import pytest
+from itertools import chain
+
+def test_packbits():
+ # Copied from the docstring.
+ a = [[[1, 0, 1], [0, 1, 0]],
+ [[1, 1, 0], [0, 0, 1]]]
+ for dt in '?bBhHiIlLqQ':
+ arr = np.array(a, dtype=dt)
+ b = np.packbits(arr, axis=-1)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]]))
+
+ assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
+
+
+def test_packbits_empty():
+ shapes = [
+ (0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0),
+ (0, 0, 20), (0, 0, 0),
+ ]
+ for dt in '?bBhHiIlLqQ':
+ for shape in shapes:
+ a = np.empty(shape, dtype=dt)
+ b = np.packbits(a)
+ assert_equal(b.dtype, np.uint8)
+ assert_equal(b.shape, (0,))
+
+
+def test_packbits_empty_with_axis():
+ # Original shapes and lists of packed shapes for different axes.
+ shapes = [
+ ((0,), [(0,)]),
+ ((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]),
+ ((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]),
+ ((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]),
+ ((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]),
+ ((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]),
+ ((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]),
+ ((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]),
+ ]
+ for dt in '?bBhHiIlLqQ':
+ for in_shape, out_shapes in shapes:
+ for ax, out_shape in enumerate(out_shapes):
+ a = np.empty(in_shape, dtype=dt)
+ b = np.packbits(a, axis=ax)
+ assert_equal(b.dtype, np.uint8)
+ assert_equal(b.shape, out_shape)
+
+@pytest.mark.parametrize('bitorder', ('little', 'big'))
+def test_packbits_large(bitorder):
+ # test data large enough for 16 byte vectorization
+ a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,
+ 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,
+ 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,
+ 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1,
+ 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1,
+ 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1,
+ 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1,
+ 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0,
+ 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0,
+ 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1,
+ 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0,
+ 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0,
+ 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0])
+ a = a.repeat(3)
+ for dtype in '?bBhHiIlLqQ':
+ arr = np.array(a, dtype=dtype)
+ b = np.packbits(arr, axis=None, bitorder=bitorder)
+ assert_equal(b.dtype, np.uint8)
+ r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252,
+ 113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255,
+ 227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63,
+ 224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112,
+ 63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1,
+ 255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15,
+ 199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227,
+ 129, 248, 227, 129, 199, 31, 128]
+ if bitorder == 'big':
+ assert_array_equal(b, r)
+ # equal for size being multiple of 8
+ assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a)
+
+ # check last byte of different remainders (16 byte vectorization)
+ b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)]
+ assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199,
+ 198, 196, 192])
+
+
+ arr = arr.reshape(36, 25)
+ b = np.packbits(arr, axis=0)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195,
+ 199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105,
+ 107, 75, 74, 88],
+ [72, 216, 248, 241, 227, 195, 202, 90, 90, 83,
+ 83, 119, 127, 109, 73, 64, 208, 244, 189, 45,
+ 41, 104, 122, 90, 18],
+ [113, 120, 248, 216, 152, 24, 60, 52, 182, 150,
+ 150, 150, 146, 210, 210, 246, 255, 255, 223,
+ 151, 21, 17, 17, 131, 163],
+ [214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92,
+ 92, 78, 110, 39, 181, 149, 220, 222, 218, 218,
+ 202, 234, 170, 168],
+ [0, 128, 128, 192, 80, 112, 48, 160, 160, 224,
+ 240, 208, 144, 128, 160, 224, 240, 208, 144,
+ 144, 176, 240, 224, 192, 128]])
+
+ b = np.packbits(arr, axis=1)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, [[252, 127, 192, 0],
+ [ 7, 252, 15, 128],
+ [240, 0, 28, 0],
+ [255, 128, 0, 128],
+ [192, 31, 255, 128],
+ [142, 63, 0, 0],
+ [255, 240, 7, 0],
+ [ 7, 224, 14, 0],
+ [126, 0, 224, 0],
+ [255, 255, 199, 0],
+ [ 56, 28, 126, 0],
+ [113, 248, 227, 128],
+ [227, 142, 63, 0],
+ [ 0, 28, 112, 0],
+ [ 15, 248, 3, 128],
+ [ 28, 126, 56, 0],
+ [ 56, 255, 241, 128],
+ [240, 7, 224, 0],
+ [227, 129, 192, 128],
+ [255, 255, 254, 0],
+ [126, 0, 224, 0],
+ [ 3, 241, 248, 0],
+ [ 0, 255, 241, 128],
+ [128, 0, 255, 128],
+ [224, 1, 255, 128],
+ [248, 252, 126, 0],
+ [ 0, 7, 3, 128],
+ [224, 113, 248, 0],
+ [ 0, 252, 127, 128],
+ [142, 63, 224, 0],
+ [224, 14, 63, 0],
+ [ 7, 3, 128, 0],
+ [113, 255, 255, 128],
+ [ 28, 113, 199, 0],
+ [ 7, 227, 142, 0],
+ [ 14, 56, 252, 0]])
+
+ arr = arr.T.copy()
+ b = np.packbits(arr, axis=0)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255,
+ 56, 113, 227, 0, 15, 28, 56, 240, 227, 255,
+ 126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224,
+ 7, 113, 28, 7, 14],
+ [127, 252, 0, 128, 31, 63, 240, 224, 0, 255,
+ 28, 248, 142, 28, 248, 126, 255, 7, 129, 255,
+ 0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14,
+ 3, 255, 113, 227, 56],
+ [192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126,
+ 227, 63, 112, 3, 56, 241, 224, 192, 254, 224,
+ 248, 241, 255, 255, 126, 3, 248, 127, 224, 63,
+ 128, 255, 199, 142, 252],
+ [0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0,
+ 0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128,
+ 128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]])
+
+ b = np.packbits(arr, axis=1)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, [[190, 72, 113, 214, 0],
+ [186, 216, 120, 210, 128],
+ [178, 248, 248, 210, 128],
+ [178, 241, 216, 64, 192],
+ [150, 227, 152, 68, 80],
+ [215, 195, 24, 5, 112],
+ [ 87, 202, 60, 5, 48],
+ [ 83, 90, 52, 1, 160],
+ [ 83, 90, 182, 72, 160],
+ [195, 83, 150, 88, 224],
+ [199, 83, 150, 92, 240],
+ [206, 119, 150, 92, 208],
+ [204, 127, 146, 78, 144],
+ [204, 109, 210, 110, 128],
+ [140, 73, 210, 39, 160],
+ [140, 64, 246, 181, 224],
+ [136, 208, 255, 149, 240],
+ [136, 244, 255, 220, 208],
+ [ 8, 189, 223, 222, 144],
+ [ 40, 45, 151, 218, 144],
+ [105, 41, 21, 218, 176],
+ [107, 104, 17, 202, 240],
+ [ 75, 122, 17, 234, 224],
+ [ 74, 90, 131, 170, 192],
+ [ 88, 18, 163, 168, 128]])
+
+
+ # result is the same if input is multiplied with a nonzero value
+ for dtype in 'bBhHiIlLqQ':
+ arr = np.array(a, dtype=dtype)
+ rnd = np.random.randint(low=np.iinfo(dtype).min,
+ high=np.iinfo(dtype).max, size=arr.size,
+ dtype=dtype)
+ rnd[rnd == 0] = 1
+ arr *= rnd.astype(dtype)
+ b = np.packbits(arr, axis=-1)
+ assert_array_equal(np.unpackbits(b)[:-4], a)
+
+ assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
+
+
+def test_packbits_very_large():
+ # test some with a larger arrays gh-8637
+ # code is covered earlier but larger array makes crash on bug more likely
+ for s in range(950, 1050):
+ for dt in '?bBhHiIlLqQ':
+ x = np.ones((200, s), dtype=bool)
+ np.packbits(x, axis=1)
+
+
+def test_unpackbits():
+ # Copied from the docstring.
+ a = np.array([[2], [7], [23]], dtype=np.uint8)
+ b = np.unpackbits(a, axis=1)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 0, 1, 1, 1]]))
+
+def test_pack_unpack_order():
+ a = np.array([[2], [7], [23]], dtype=np.uint8)
+ b = np.unpackbits(a, axis=1)
+ assert_equal(b.dtype, np.uint8)
+ b_little = np.unpackbits(a, axis=1, bitorder='little')
+ b_big = np.unpackbits(a, axis=1, bitorder='big')
+ assert_array_equal(b, b_big)
+ assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little'))
+ assert_array_equal(b[:,::-1], b_little)
+ assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big'))
+ assert_raises(ValueError, np.unpackbits, a, bitorder='r')
+ assert_raises(TypeError, np.unpackbits, a, bitorder=10)
+
+
+
+def test_unpackbits_empty():
+ a = np.empty((0,), dtype=np.uint8)
+ b = np.unpackbits(a)
+ assert_equal(b.dtype, np.uint8)
+ assert_array_equal(b, np.empty((0,)))
+
+
+def test_unpackbits_empty_with_axis():
+ # Lists of packed shapes for different axes and unpacked shapes.
+ shapes = [
+ ([(0,)], (0,)),
+ ([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)),
+ ([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)),
+ ([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)),
+ ([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)),
+ ([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)),
+ ([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)),
+ ([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)),
+ ]
+ for in_shapes, out_shape in shapes:
+ for ax, in_shape in enumerate(in_shapes):
+ a = np.empty(in_shape, dtype=np.uint8)
+ b = np.unpackbits(a, axis=ax)
+ assert_equal(b.dtype, np.uint8)
+ assert_equal(b.shape, out_shape)
+
+
+def test_unpackbits_large():
+ # test all possible numbers via comparison to already tested packbits
+ d = np.arange(277, dtype=np.uint8)
+ assert_array_equal(np.packbits(np.unpackbits(d)), d)
+ assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2])
+ d = np.tile(d, (3, 1))
+ assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d)
+ d = d.T.copy()
+ assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
+
+
+class TestCount():
+ x = np.array([
+ [1, 0, 1, 0, 0, 1, 0],
+ [0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 1],
+ [1, 1, 0, 0, 0, 1, 1],
+ [1, 0, 1, 0, 1, 0, 1],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ ], dtype=np.uint8)
+ padded1 = np.zeros(57, dtype=np.uint8)
+ padded1[:49] = x.ravel()
+ padded1b = np.zeros(57, dtype=np.uint8)
+ padded1b[:49] = x[::-1].copy().ravel()
+ padded2 = np.zeros((9, 9), dtype=np.uint8)
+ padded2[:7, :7] = x
+
+ @pytest.mark.parametrize('bitorder', ('little', 'big'))
+ @pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1)))
+ def test_roundtrip(self, bitorder, count):
+ if count < 0:
+ # one extra zero of padding
+ cutoff = count - 1
+ else:
+ cutoff = count
+ # test complete invertibility of packbits and unpackbits with count
+ packed = np.packbits(self.x, bitorder=bitorder)
+ unpacked = np.unpackbits(packed, count=count, bitorder=bitorder)
+ assert_equal(unpacked.dtype, np.uint8)
+ assert_array_equal(unpacked, self.padded1[:cutoff])
+
+ @pytest.mark.parametrize('kwargs', [
+ {}, {'count': None},
+ ])
+ def test_count(self, kwargs):
+ packed = np.packbits(self.x)
+ unpacked = np.unpackbits(packed, **kwargs)
+ assert_equal(unpacked.dtype, np.uint8)
+ assert_array_equal(unpacked, self.padded1[:-1])
+
+ @pytest.mark.parametrize('bitorder', ('little', 'big'))
+ # delta==-1 when count<0 because one extra zero of padding
+ @pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1)))
+ def test_roundtrip_axis(self, bitorder, count):
+ if count < 0:
+ # one extra zero of padding
+ cutoff = count - 1
+ else:
+ cutoff = count
+ packed0 = np.packbits(self.x, axis=0, bitorder=bitorder)
+ unpacked0 = np.unpackbits(packed0, axis=0, count=count,
+ bitorder=bitorder)
+ assert_equal(unpacked0.dtype, np.uint8)
+ assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]])
+
+ packed1 = np.packbits(self.x, axis=1, bitorder=bitorder)
+ unpacked1 = np.unpackbits(packed1, axis=1, count=count,
+ bitorder=bitorder)
+ assert_equal(unpacked1.dtype, np.uint8)
+ assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff])
+
+ @pytest.mark.parametrize('kwargs', [
+ {}, {'count': None},
+ {'bitorder' : 'little'},
+ {'bitorder': 'little', 'count': None},
+ {'bitorder' : 'big'},
+ {'bitorder': 'big', 'count': None},
+ ])
+ def test_axis_count(self, kwargs):
+ packed0 = np.packbits(self.x, axis=0)
+ unpacked0 = np.unpackbits(packed0, axis=0, **kwargs)
+ assert_equal(unpacked0.dtype, np.uint8)
+ if kwargs.get('bitorder', 'big') == 'big':
+ assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]])
+ else:
+ assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]])
+
+ packed1 = np.packbits(self.x, axis=1)
+ unpacked1 = np.unpackbits(packed1, axis=1, **kwargs)
+ assert_equal(unpacked1.dtype, np.uint8)
+ if kwargs.get('bitorder', 'big') == 'big':
+ assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1])
+ else:
+ assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1])
+
+ def test_bad_count(self):
+ packed0 = np.packbits(self.x, axis=0)
+ assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9)
+ packed1 = np.packbits(self.x, axis=1)
+ assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9)
+ packed = np.packbits(self.x)
+ assert_raises(ValueError, np.unpackbits, packed, count=-57)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_polynomial.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_polynomial.py
new file mode 100644
index 00000000..3734344d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_polynomial.py
@@ -0,0 +1,303 @@
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_raises, assert_allclose
+ )
+
+import pytest
+
+# `poly1d` has some support for `bool_` and `timedelta64`,
+# but it is limited and they are therefore excluded here
+TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O"
+
+
+class TestPolynomial:
+ def test_poly1d_str_and_repr(self):
+ p = np.poly1d([1., 2, 3])
+ assert_equal(repr(p), 'poly1d([1., 2., 3.])')
+ assert_equal(str(p),
+ ' 2\n'
+ '1 x + 2 x + 3')
+
+ q = np.poly1d([3., 2, 1])
+ assert_equal(repr(q), 'poly1d([3., 2., 1.])')
+ assert_equal(str(q),
+ ' 2\n'
+ '3 x + 2 x + 1')
+
+ r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j])
+ assert_equal(str(r),
+ ' 3 2\n'
+ '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)')
+
+ assert_equal(str(np.poly1d([-3, -2, -1])),
+ ' 2\n'
+ '-3 x - 2 x - 1')
+
+ def test_poly1d_resolution(self):
+ p = np.poly1d([1., 2, 3])
+ q = np.poly1d([3., 2, 1])
+ assert_equal(p(0), 3.0)
+ assert_equal(p(5), 38.0)
+ assert_equal(q(0), 1.0)
+ assert_equal(q(5), 86.0)
+
+ def test_poly1d_math(self):
+ # here we use some simple coeffs to make calculations easier
+ p = np.poly1d([1., 2, 4])
+ q = np.poly1d([4., 2, 1])
+ assert_equal(p/q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75])))
+ assert_equal(p.integ(), np.poly1d([1/3, 1., 4., 0.]))
+ assert_equal(p.integ(1), np.poly1d([1/3, 1., 4., 0.]))
+
+ p = np.poly1d([1., 2, 3])
+ q = np.poly1d([3., 2, 1])
+ assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.]))
+ assert_equal(p + q, np.poly1d([4., 4., 4.]))
+ assert_equal(p - q, np.poly1d([-2., 0., 2.]))
+ assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.]))
+ assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.]))
+ assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.]))
+ assert_equal(p.deriv(), np.poly1d([2., 2.]))
+ assert_equal(p.deriv(2), np.poly1d([2.]))
+ assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])),
+ (np.poly1d([1., -1.]), np.poly1d([0.])))
+
+ @pytest.mark.parametrize("type_code", TYPE_CODES)
+ def test_poly1d_misc(self, type_code: str) -> None:
+ dtype = np.dtype(type_code)
+ ar = np.array([1, 2, 3], dtype=dtype)
+ p = np.poly1d(ar)
+
+ # `__eq__`
+ assert_equal(np.asarray(p), ar)
+ assert_equal(np.asarray(p).dtype, dtype)
+ assert_equal(len(p), 2)
+
+ # `__getitem__`
+ comparison_dct = {-1: 0, 0: 3, 1: 2, 2: 1, 3: 0}
+ for index, ref in comparison_dct.items():
+ scalar = p[index]
+ assert_equal(scalar, ref)
+ if dtype == np.object_:
+ assert isinstance(scalar, int)
+ else:
+ assert_equal(scalar.dtype, dtype)
+
+ def test_poly1d_variable_arg(self):
+ q = np.poly1d([1., 2, 3], variable='y')
+ assert_equal(str(q),
+ ' 2\n'
+ '1 y + 2 y + 3')
+ q = np.poly1d([1., 2, 3], variable='lambda')
+ assert_equal(str(q),
+ ' 2\n'
+ '1 lambda + 2 lambda + 3')
+
+ def test_poly(self):
+ assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]),
+ [1, -3, -2, 6])
+
+ # From matlab docs
+ A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
+ assert_array_almost_equal(np.poly(A), [1, -6, -72, -27])
+
+ # Should produce real output for perfect conjugates
+ assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j])))
+ assert_(np.isrealobj(np.poly([0+1j, -0+-1j, 1+2j,
+ 1-2j, 1.+3.5j, 1-3.5j])))
+ assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j, 1+3j, 1-3.j])))
+ assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j])))
+ assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j])))
+ assert_(np.isrealobj(np.poly([1j, -1j])))
+ assert_(np.isrealobj(np.poly([1, -1])))
+
+ assert_(np.iscomplexobj(np.poly([1j, -1.0000001j])))
+
+ np.random.seed(42)
+ a = np.random.randn(100) + 1j*np.random.randn(100)
+ assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a))))))
+
+ def test_roots(self):
+ assert_array_equal(np.roots([1, 0, 0]), [0, 0])
+
+ def test_str_leading_zeros(self):
+ p = np.poly1d([4, 3, 2, 1])
+ p[3] = 0
+ assert_equal(str(p),
+ " 2\n"
+ "3 x + 2 x + 1")
+
+ p = np.poly1d([1, 2])
+ p[0] = 0
+ p[1] = 0
+ assert_equal(str(p), " \n0")
+
+ def test_polyfit(self):
+ c = np.array([3., 2., 1.])
+ x = np.linspace(0, 2, 7)
+ y = np.polyval(c, x)
+ err = [1, -1, 1, -1, 1, -1, 1]
+ weights = np.arange(8, 1, -1)**2/7.0
+
+ # Check exception when too few points for variance estimate. Note that
+ # the estimate requires the number of data points to exceed
+ # degree + 1
+ assert_raises(ValueError, np.polyfit,
+ [1], [1], deg=0, cov=True)
+
+ # check 1D case
+ m, cov = np.polyfit(x, y+err, 2, cov=True)
+ est = [3.8571, 0.2857, 1.619]
+ assert_almost_equal(est, m, decimal=4)
+ val0 = [[ 1.4694, -2.9388, 0.8163],
+ [-2.9388, 6.3673, -2.1224],
+ [ 0.8163, -2.1224, 1.161 ]]
+ assert_almost_equal(val0, cov, decimal=4)
+
+ m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True)
+ assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)
+ val = [[ 4.3964, -5.0052, 0.4878],
+ [-5.0052, 6.8067, -0.9089],
+ [ 0.4878, -0.9089, 0.3337]]
+ assert_almost_equal(val, cov2, decimal=4)
+
+ m3, cov3 = np.polyfit(x, y+err, 2, w=weights, cov="unscaled")
+ assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4)
+ val = [[ 0.1473, -0.1677, 0.0163],
+ [-0.1677, 0.228 , -0.0304],
+ [ 0.0163, -0.0304, 0.0112]]
+ assert_almost_equal(val, cov3, decimal=4)
+
+ # check 2D (n,1) case
+ y = y[:, np.newaxis]
+ c = c[:, np.newaxis]
+ assert_almost_equal(c, np.polyfit(x, y, 2))
+ # check 2D (n,2) case
+ yy = np.concatenate((y, y), axis=1)
+ cc = np.concatenate((c, c), axis=1)
+ assert_almost_equal(cc, np.polyfit(x, yy, 2))
+
+ m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True)
+ assert_almost_equal(est, m[:, 0], decimal=4)
+ assert_almost_equal(est, m[:, 1], decimal=4)
+ assert_almost_equal(val0, cov[:, :, 0], decimal=4)
+ assert_almost_equal(val0, cov[:, :, 1], decimal=4)
+
+ # check order 1 (deg=0) case, were the analytic results are simple
+ np.random.seed(123)
+ y = np.random.normal(size=(4, 10000))
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True)
+ # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5.
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)
+ # Without scaling, since reduced chi2 is 1, the result should be the same.
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]),
+ deg=0, cov="unscaled")
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_almost_equal(np.sqrt(cov.mean()), 0.5)
+ # If we estimate our errors wrong, no change with scaling:
+ w = np.full(y.shape[0], 1./0.5)
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True)
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)
+ # But if we do not scale, our estimate for the error in the mean will
+ # differ.
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled")
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_almost_equal(np.sqrt(cov.mean()), 0.25)
+
+ def test_objects(self):
+ from decimal import Decimal
+ p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')])
+ p2 = p * Decimal('1.333333333333333')
+ assert_(p2[1] == Decimal("3.9999999999999990"))
+ p2 = p.deriv()
+ assert_(p2[1] == Decimal('8.0'))
+ p2 = p.integ()
+ assert_(p2[3] == Decimal("1.333333333333333333333333333"))
+ assert_(p2[2] == Decimal('1.5'))
+ assert_(np.issubdtype(p2.coeffs.dtype, np.object_))
+ p = np.poly([Decimal(1), Decimal(2)])
+ assert_equal(np.poly([Decimal(1), Decimal(2)]),
+ [1, Decimal(-3), Decimal(2)])
+
+ def test_complex(self):
+ p = np.poly1d([3j, 2j, 1j])
+ p2 = p.integ()
+ assert_((p2.coeffs == [1j, 1j, 1j, 0]).all())
+ p2 = p.deriv()
+ assert_((p2.coeffs == [6j, 2j]).all())
+
+ def test_integ_coeffs(self):
+ p = np.poly1d([3, 2, 1])
+ p2 = p.integ(3, k=[9, 7, 6])
+ assert_(
+ (p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all())
+
+ def test_zero_dims(self):
+ try:
+ np.poly(np.zeros((0, 0)))
+ except ValueError:
+ pass
+
+ def test_poly_int_overflow(self):
+ """
+ Regression test for gh-5096.
+ """
+ v = np.arange(1, 21)
+ assert_almost_equal(np.poly(v), np.poly(np.diag(v)))
+
+ def test_zero_poly_dtype(self):
+ """
+ Regression test for gh-16354.
+ """
+ z = np.array([0, 0, 0])
+ p = np.poly1d(z.astype(np.int64))
+ assert_equal(p.coeffs.dtype, np.int64)
+
+ p = np.poly1d(z.astype(np.float32))
+ assert_equal(p.coeffs.dtype, np.float32)
+
+ p = np.poly1d(z.astype(np.complex64))
+ assert_equal(p.coeffs.dtype, np.complex64)
+
+ def test_poly_eq(self):
+ p = np.poly1d([1, 2, 3])
+ p2 = np.poly1d([1, 2, 4])
+ assert_equal(p == None, False)
+ assert_equal(p != None, True)
+ assert_equal(p == p, True)
+ assert_equal(p == p2, False)
+ assert_equal(p != p2, True)
+
+ def test_polydiv(self):
+ b = np.poly1d([2, 6, 6, 1])
+ a = np.poly1d([-1j, (1+2j), -(2+1j), 1])
+ q, r = np.polydiv(b, a)
+ assert_equal(q.coeffs.dtype, np.complex128)
+ assert_equal(r.coeffs.dtype, np.complex128)
+ assert_equal(q*a + r, b)
+
+ c = [1, 2, 3]
+ d = np.poly1d([1, 2, 3])
+ s, t = np.polydiv(c, d)
+ assert isinstance(s, np.poly1d)
+ assert isinstance(t, np.poly1d)
+ u, v = np.polydiv(d, c)
+ assert isinstance(u, np.poly1d)
+ assert isinstance(v, np.poly1d)
+
+ def test_poly_coeffs_mutable(self):
+ """ Coefficients should be modifiable """
+ p = np.poly1d([1, 2, 3])
+
+ p.coeffs += 1
+ assert_equal(p.coeffs, [2, 3, 4])
+
+ p.coeffs[2] += 10
+ assert_equal(p.coeffs, [2, 3, 14])
+
+ # this never used to be allowed - let's not add features to deprecated
+ # APIs
+ assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1))
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_recfunctions.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_recfunctions.py
new file mode 100644
index 00000000..0c919a53
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_recfunctions.py
@@ -0,0 +1,987 @@
+import pytest
+
+import numpy as np
+import numpy.ma as ma
+from numpy.ma.mrecords import MaskedRecords
+from numpy.ma.testutils import assert_equal
+from numpy.testing import assert_, assert_raises
+from numpy.lib.recfunctions import (
+ drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
+ find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
+ repack_fields, unstructured_to_structured, structured_to_unstructured,
+ apply_along_fields, require_fields, assign_fields_by_name)
+get_fieldspec = np.lib.recfunctions._get_fieldspec
+get_names = np.lib.recfunctions.get_names
+get_names_flat = np.lib.recfunctions.get_names_flat
+zip_descr = np.lib.recfunctions._zip_descr
+zip_dtype = np.lib.recfunctions._zip_dtype
+
+
+class TestRecFunctions:
+ # Misc tests
+
+ def setup_method(self):
+ x = np.array([1, 2, ])
+ y = np.array([10, 20, 30])
+ z = np.array([('A', 1.), ('B', 2.)],
+ dtype=[('A', '|S3'), ('B', float)])
+ w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
+ dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
+ self.data = (w, x, y, z)
+
+ def test_zip_descr(self):
+ # Test zip_descr
+ (w, x, y, z) = self.data
+
+ # Std array
+ test = zip_descr((x, x), flatten=True)
+ assert_equal(test,
+ np.dtype([('', int), ('', int)]))
+ test = zip_descr((x, x), flatten=False)
+ assert_equal(test,
+ np.dtype([('', int), ('', int)]))
+
+ # Std & flexible-dtype
+ test = zip_descr((x, z), flatten=True)
+ assert_equal(test,
+ np.dtype([('', int), ('A', '|S3'), ('B', float)]))
+ test = zip_descr((x, z), flatten=False)
+ assert_equal(test,
+ np.dtype([('', int),
+ ('', [('A', '|S3'), ('B', float)])]))
+
+ # Standard & nested dtype
+ test = zip_descr((x, w), flatten=True)
+ assert_equal(test,
+ np.dtype([('', int),
+ ('a', int),
+ ('ba', float), ('bb', int)]))
+ test = zip_descr((x, w), flatten=False)
+ assert_equal(test,
+ np.dtype([('', int),
+ ('', [('a', int),
+ ('b', [('ba', float), ('bb', int)])])]))
+
+ def test_drop_fields(self):
+ # Test drop_fields
+ a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
+ dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
+
+ # A basic field
+ test = drop_fields(a, 'a')
+ control = np.array([((2, 3.0),), ((5, 6.0),)],
+ dtype=[('b', [('ba', float), ('bb', int)])])
+ assert_equal(test, control)
+
+ # Another basic field (but nesting two fields)
+ test = drop_fields(a, 'b')
+ control = np.array([(1,), (4,)], dtype=[('a', int)])
+ assert_equal(test, control)
+
+ # A nested sub-field
+ test = drop_fields(a, ['ba', ])
+ control = np.array([(1, (3.0,)), (4, (6.0,))],
+ dtype=[('a', int), ('b', [('bb', int)])])
+ assert_equal(test, control)
+
+ # All the nested sub-field from a field: zap that field
+ test = drop_fields(a, ['ba', 'bb'])
+ control = np.array([(1,), (4,)], dtype=[('a', int)])
+ assert_equal(test, control)
+
+ # dropping all fields results in an array with no fields
+ test = drop_fields(a, ['a', 'b'])
+ control = np.array([(), ()], dtype=[])
+ assert_equal(test, control)
+
+ def test_rename_fields(self):
+ # Test rename fields
+ a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
+ dtype=[('a', int),
+ ('b', [('ba', float), ('bb', (float, 2))])])
+ test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
+ newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
+ control = a.view(newdtype)
+ assert_equal(test.dtype, newdtype)
+ assert_equal(test, control)
+
+ def test_get_names(self):
+ # Test get_names
+ ndtype = np.dtype([('A', '|S3'), ('B', float)])
+ test = get_names(ndtype)
+ assert_equal(test, ('A', 'B'))
+
+ ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
+ test = get_names(ndtype)
+ assert_equal(test, ('a', ('b', ('ba', 'bb'))))
+
+ ndtype = np.dtype([('a', int), ('b', [])])
+ test = get_names(ndtype)
+ assert_equal(test, ('a', ('b', ())))
+
+ ndtype = np.dtype([])
+ test = get_names(ndtype)
+ assert_equal(test, ())
+
+ def test_get_names_flat(self):
+ # Test get_names_flat
+ ndtype = np.dtype([('A', '|S3'), ('B', float)])
+ test = get_names_flat(ndtype)
+ assert_equal(test, ('A', 'B'))
+
+ ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
+ test = get_names_flat(ndtype)
+ assert_equal(test, ('a', 'b', 'ba', 'bb'))
+
+ ndtype = np.dtype([('a', int), ('b', [])])
+ test = get_names_flat(ndtype)
+ assert_equal(test, ('a', 'b'))
+
+ ndtype = np.dtype([])
+ test = get_names_flat(ndtype)
+ assert_equal(test, ())
+
+ def test_get_fieldstructure(self):
+ # Test get_fieldstructure
+
+ # No nested fields
+ ndtype = np.dtype([('A', '|S3'), ('B', float)])
+ test = get_fieldstructure(ndtype)
+ assert_equal(test, {'A': [], 'B': []})
+
+ # One 1-nested field
+ ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
+ test = get_fieldstructure(ndtype)
+ assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})
+
+ # One 2-nested fields
+ ndtype = np.dtype([('A', int),
+ ('B', [('BA', int),
+ ('BB', [('BBA', int), ('BBB', int)])])])
+ test = get_fieldstructure(ndtype)
+ control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
+ 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
+ assert_equal(test, control)
+
+ # 0 fields
+ ndtype = np.dtype([])
+ test = get_fieldstructure(ndtype)
+ assert_equal(test, {})
+
+ def test_find_duplicates(self):
+ # Test find_duplicates
+ a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
+ (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
+ mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
+ (0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
+ dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
+
+ test = find_duplicates(a, ignoremask=False, return_index=True)
+ control = [0, 2]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ test = find_duplicates(a, key='A', return_index=True)
+ control = [0, 1, 2, 3, 5]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ test = find_duplicates(a, key='B', return_index=True)
+ control = [0, 1, 2, 4]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ test = find_duplicates(a, key='BA', return_index=True)
+ control = [0, 1, 2, 4]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ test = find_duplicates(a, key='BB', return_index=True)
+ control = [0, 1, 2, 3, 4]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ def test_find_duplicates_ignoremask(self):
+ # Test the ignoremask option of find_duplicates
+ ndtype = [('a', int)]
+ a = ma.array([1, 1, 1, 2, 2, 3, 3],
+ mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
+ test = find_duplicates(a, ignoremask=True, return_index=True)
+ control = [0, 1, 3, 4]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ test = find_duplicates(a, ignoremask=False, return_index=True)
+ control = [0, 1, 2, 3, 4, 6]
+ assert_equal(sorted(test[-1]), control)
+ assert_equal(test[0], a[test[-1]])
+
+ def test_repack_fields(self):
+ dt = np.dtype('u1,f4,i8', align=True)
+ a = np.zeros(2, dtype=dt)
+
+ assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))
+ assert_equal(repack_fields(a).itemsize, 13)
+ assert_equal(repack_fields(repack_fields(dt), align=True), dt)
+
+ # make sure type is preserved
+ dt = np.dtype((np.record, dt))
+ assert_(repack_fields(dt).type is np.record)
+
+ def test_structured_to_unstructured(self):
+ a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+ out = structured_to_unstructured(a)
+ assert_equal(out, np.zeros((4,5), dtype='f8'))
+
+ b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
+ assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ]))
+ out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
+ assert_equal(out, np.array([ 1. , 4. , 7. , 10. ]))
+
+ c = np.arange(20).reshape((4,5))
+ out = unstructured_to_structured(c, a.dtype)
+ want = np.array([( 0, ( 1., 2), [ 3., 4.]),
+ ( 5, ( 6., 7), [ 8., 9.]),
+ (10, (11., 12), [13., 14.]),
+ (15, (16., 17), [18., 19.])],
+ dtype=[('a', 'i4'),
+ ('b', [('f0', 'f4'), ('f1', 'u2')]),
+ ('c', 'f4', (2,))])
+ assert_equal(out, want)
+
+ d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ assert_equal(apply_along_fields(np.mean, d),
+ np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ]))
+ assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
+ np.array([ 3. , 5.5, 9. , 11. ]))
+
+ # check that for uniform field dtypes we get a view, not a copy:
+ d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
+ dd = structured_to_unstructured(d)
+ ddd = unstructured_to_structured(dd, d.dtype)
+ assert_(dd.base is d)
+ assert_(ddd.base is d)
+
+ # including uniform fields with subarrays unpacked
+ d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
+ (8, [9, 10], [[11, 12], [13, 14]])],
+ dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
+ ('x2', ('i4', (2, 2)))])
+ dd = structured_to_unstructured(d)
+ ddd = unstructured_to_structured(dd, d.dtype)
+ assert_(dd.base is d)
+ assert_(ddd.base is d)
+
+ # test that nested fields with identical names don't break anything
+ point = np.dtype([('x', int), ('y', int)])
+ triangle = np.dtype([('a', point), ('b', point), ('c', point)])
+ arr = np.zeros(10, triangle)
+ res = structured_to_unstructured(arr, dtype=int)
+ assert_equal(res, np.zeros((10, 6), dtype=int))
+
+
+ # test nested combinations of subarrays and structured arrays, gh-13333
+ def subarray(dt, shape):
+ return np.dtype((dt, shape))
+
+ def structured(*dts):
+ return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)])
+
+ def inspect(dt, dtype=None):
+ arr = np.zeros((), dt)
+ ret = structured_to_unstructured(arr, dtype=dtype)
+ backarr = unstructured_to_structured(ret, dt)
+ return ret.shape, ret.dtype, backarr.dtype
+
+ dt = structured(subarray(structured(np.int32, np.int32), 3))
+ assert_equal(inspect(dt), ((6,), np.int32, dt))
+
+ dt = structured(subarray(subarray(np.int32, 2), 2))
+ assert_equal(inspect(dt), ((4,), np.int32, dt))
+
+ dt = structured(np.int32)
+ assert_equal(inspect(dt), ((1,), np.int32, dt))
+
+ dt = structured(np.int32, subarray(subarray(np.int32, 2), 2))
+ assert_equal(inspect(dt), ((5,), np.int32, dt))
+
+ dt = structured()
+ assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt))
+
+ # these currently don't work, but we may make it work in the future
+ assert_raises(NotImplementedError, structured_to_unstructured,
+ np.zeros(3, dt), dtype=np.int32)
+ assert_raises(NotImplementedError, unstructured_to_structured,
+ np.zeros((3,0), dtype=np.int32))
+
+ def test_unstructured_to_structured(self):
+ # test if dtype is the args of np.dtype
+ a = np.zeros((20, 2))
+ test_dtype_args = [('x', float), ('y', float)]
+ test_dtype = np.dtype(test_dtype_args)
+ field1 = unstructured_to_structured(a, dtype=test_dtype_args) # now
+ field2 = unstructured_to_structured(a, dtype=test_dtype) # before
+ assert_equal(field1, field2)
+
+ def test_field_assignment_by_name(self):
+ a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
+ newdt = [('b', 'f4'), ('c', 'u1')]
+
+ assert_equal(require_fields(a, newdt), np.ones(2, newdt))
+
+ b = np.array([(1,2), (3,4)], dtype=newdt)
+ assign_fields_by_name(a, b, zero_unassigned=False)
+ assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype))
+ assign_fields_by_name(a, b)
+ assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype))
+
+ # test nested fields
+ a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])
+ newdt = [('a', [('c', 'u1')])]
+ assert_equal(require_fields(a, newdt), np.ones(2, newdt))
+ b = np.array([((2,),), ((3,),)], dtype=newdt)
+ assign_fields_by_name(a, b, zero_unassigned=False)
+ assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype))
+ assign_fields_by_name(a, b)
+ assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype))
+
+ # test unstructured code path for 0d arrays
+ a, b = np.array(3), np.array(0)
+ assign_fields_by_name(b, a)
+ assert_equal(b[()], 3)
+
+
+class TestRecursiveFillFields:
+ # Test recursive_fill_fields.
+ def test_simple_flexible(self):
+ # Test recursive_fill_fields on flexible-array
+ a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
+ b = np.zeros((3,), dtype=a.dtype)
+ test = recursive_fill_fields(a, b)
+ control = np.array([(1, 10.), (2, 20.), (0, 0.)],
+ dtype=[('A', int), ('B', float)])
+ assert_equal(test, control)
+
+ def test_masked_flexible(self):
+ # Test recursive_fill_fields on masked flexible-array
+ a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
+ dtype=[('A', int), ('B', float)])
+ b = ma.zeros((3,), dtype=a.dtype)
+ test = recursive_fill_fields(a, b)
+ control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
+ mask=[(0, 1), (1, 0), (0, 0)],
+ dtype=[('A', int), ('B', float)])
+ assert_equal(test, control)
+
+
+class TestMergeArrays:
+ # Test merge_arrays
+
+ def setup_method(self):
+ x = np.array([1, 2, ])
+ y = np.array([10, 20, 30])
+ z = np.array(
+ [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
+ w = np.array(
+ [(1, (2, 3.0, ())), (4, (5, 6.0, ()))],
+ dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])])
+ self.data = (w, x, y, z)
+
+ def test_solo(self):
+ # Test merge_arrays on a single array.
+ (_, x, _, z) = self.data
+
+ test = merge_arrays(x)
+ control = np.array([(1,), (2,)], dtype=[('f0', int)])
+ assert_equal(test, control)
+ test = merge_arrays((x,))
+ assert_equal(test, control)
+
+ test = merge_arrays(z, flatten=False)
+ assert_equal(test, z)
+ test = merge_arrays(z, flatten=True)
+ assert_equal(test, z)
+
+ def test_solo_w_flatten(self):
+ # Test merge_arrays on a single array w & w/o flattening
+ w = self.data[0]
+ test = merge_arrays(w, flatten=False)
+ assert_equal(test, w)
+
+ test = merge_arrays(w, flatten=True)
+ control = np.array([(1, 2, 3.0), (4, 5, 6.0)],
+ dtype=[('a', int), ('ba', float), ('bb', int)])
+ assert_equal(test, control)
+
+ def test_standard(self):
+ # Test standard & standard
+ # Test merge arrays
+ (_, x, y, _) = self.data
+ test = merge_arrays((x, y), usemask=False)
+ control = np.array([(1, 10), (2, 20), (-1, 30)],
+ dtype=[('f0', int), ('f1', int)])
+ assert_equal(test, control)
+
+ test = merge_arrays((x, y), usemask=True)
+ control = ma.array([(1, 10), (2, 20), (-1, 30)],
+ mask=[(0, 0), (0, 0), (1, 0)],
+ dtype=[('f0', int), ('f1', int)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ def test_flatten(self):
+ # Test standard & flexible
+ (_, x, _, z) = self.data
+ test = merge_arrays((x, z), flatten=True)
+ control = np.array([(1, 'A', 1.), (2, 'B', 2.)],
+ dtype=[('f0', int), ('A', '|S3'), ('B', float)])
+ assert_equal(test, control)
+
+ test = merge_arrays((x, z), flatten=False)
+ control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],
+ dtype=[('f0', int),
+ ('f1', [('A', '|S3'), ('B', float)])])
+ assert_equal(test, control)
+
+ def test_flatten_wflexible(self):
+ # Test flatten standard & nested
+ (w, x, _, _) = self.data
+ test = merge_arrays((x, w), flatten=True)
+ control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],
+ dtype=[('f0', int),
+ ('a', int), ('ba', float), ('bb', int)])
+ assert_equal(test, control)
+
+ test = merge_arrays((x, w), flatten=False)
+ controldtype = [('f0', int),
+ ('f1', [('a', int),
+ ('b', [('ba', float), ('bb', int), ('bc', [])])])]
+ control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))],
+ dtype=controldtype)
+ assert_equal(test, control)
+
+ def test_wmasked_arrays(self):
+ # Test merge_arrays masked arrays
+ (_, x, _, _) = self.data
+ mx = ma.array([1, 2, 3], mask=[1, 0, 0])
+ test = merge_arrays((x, mx), usemask=True)
+ control = ma.array([(1, 1), (2, 2), (-1, 3)],
+ mask=[(0, 1), (0, 0), (1, 0)],
+ dtype=[('f0', int), ('f1', int)])
+ assert_equal(test, control)
+ test = merge_arrays((x, mx), usemask=True, asrecarray=True)
+ assert_equal(test, control)
+ assert_(isinstance(test, MaskedRecords))
+
+ def test_w_singlefield(self):
+ # Test single field
+ test = merge_arrays((np.array([1, 2]).view([('a', int)]),
+ np.array([10., 20., 30.])),)
+ control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],
+ mask=[(0, 0), (0, 0), (1, 0)],
+ dtype=[('a', int), ('f1', float)])
+ assert_equal(test, control)
+
+ def test_w_shorter_flex(self):
+ # Test merge_arrays w/ a shorter flexndarray.
+ z = self.data[-1]
+
+ # Fixme, this test looks incomplete and broken
+ #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
+ #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
+ # dtype=[('A', '|S3'), ('B', float), ('C', int)])
+ #assert_equal(test, control)
+
+ # Hack to avoid pyflakes warnings about unused variables
+ merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
+ np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
+ dtype=[('A', '|S3'), ('B', float), ('C', int)])
+
+ def test_singlerecord(self):
+ (_, x, y, z) = self.data
+ test = merge_arrays((x[0], y[0], z[0]), usemask=False)
+ control = np.array([(1, 10, ('A', 1))],
+ dtype=[('f0', int),
+ ('f1', int),
+ ('f2', [('A', '|S3'), ('B', float)])])
+ assert_equal(test, control)
+
+
+class TestAppendFields:
+ # Test append_fields
+
+ def setup_method(self):
+ x = np.array([1, 2, ])
+ y = np.array([10, 20, 30])
+ z = np.array(
+ [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
+ w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
+ dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
+ self.data = (w, x, y, z)
+
+ def test_append_single(self):
+ # Test simple case
+ (_, x, _, _) = self.data
+ test = append_fields(x, 'A', data=[10, 20, 30])
+ control = ma.array([(1, 10), (2, 20), (-1, 30)],
+ mask=[(0, 0), (0, 0), (1, 0)],
+ dtype=[('f0', int), ('A', int)],)
+ assert_equal(test, control)
+
+ def test_append_double(self):
+ # Test simple case
+ (_, x, _, _) = self.data
+ test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
+ control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
+ mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
+ dtype=[('f0', int), ('A', int), ('B', int)],)
+ assert_equal(test, control)
+
+ def test_append_on_flex(self):
+ # Test append_fields on flexible type arrays
+ z = self.data[-1]
+ test = append_fields(z, 'C', data=[10, 20, 30])
+ control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
+ mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
+ dtype=[('A', '|S3'), ('B', float), ('C', int)],)
+ assert_equal(test, control)
+
+ def test_append_on_nested(self):
+ # Test append_fields on nested fields
+ w = self.data[0]
+ test = append_fields(w, 'C', data=[10, 20, 30])
+ control = ma.array([(1, (2, 3.0), 10),
+ (4, (5, 6.0), 20),
+ (-1, (-1, -1.), 30)],
+ mask=[(
+ 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
+ dtype=[('a', int),
+ ('b', [('ba', float), ('bb', int)]),
+ ('C', int)],)
+ assert_equal(test, control)
+
+
+class TestStackArrays:
+ # Test stack_arrays
+ def setup_method(self):
+ x = np.array([1, 2, ])
+ y = np.array([10, 20, 30])
+ z = np.array(
+ [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
+ w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
+ dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
+ self.data = (w, x, y, z)
+
+ def test_solo(self):
+ # Test stack_arrays on single arrays
+ (_, x, _, _) = self.data
+ test = stack_arrays((x,))
+ assert_equal(test, x)
+ assert_(test is x)
+
+ test = stack_arrays(x)
+ assert_equal(test, x)
+ assert_(test is x)
+
+ def test_unnamed_fields(self):
+ # Tests combinations of arrays w/o named fields
+ (_, x, y, _) = self.data
+
+ test = stack_arrays((x, x), usemask=False)
+ control = np.array([1, 2, 1, 2])
+ assert_equal(test, control)
+
+ test = stack_arrays((x, y), usemask=False)
+ control = np.array([1, 2, 10, 20, 30])
+ assert_equal(test, control)
+
+ test = stack_arrays((y, x), usemask=False)
+ control = np.array([10, 20, 30, 1, 2])
+ assert_equal(test, control)
+
+ def test_unnamed_and_named_fields(self):
+ # Test combination of arrays w/ & w/o named fields
+ (_, x, _, z) = self.data
+
+ test = stack_arrays((x, z))
+ control = ma.array([(1, -1, -1), (2, -1, -1),
+ (-1, 'A', 1), (-1, 'B', 2)],
+ mask=[(0, 1, 1), (0, 1, 1),
+ (1, 0, 0), (1, 0, 0)],
+ dtype=[('f0', int), ('A', '|S3'), ('B', float)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ test = stack_arrays((z, x))
+ control = ma.array([('A', 1, -1), ('B', 2, -1),
+ (-1, -1, 1), (-1, -1, 2), ],
+ mask=[(0, 0, 1), (0, 0, 1),
+ (1, 1, 0), (1, 1, 0)],
+ dtype=[('A', '|S3'), ('B', float), ('f2', int)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ test = stack_arrays((z, z, x))
+ control = ma.array([('A', 1, -1), ('B', 2, -1),
+ ('A', 1, -1), ('B', 2, -1),
+ (-1, -1, 1), (-1, -1, 2), ],
+ mask=[(0, 0, 1), (0, 0, 1),
+ (0, 0, 1), (0, 0, 1),
+ (1, 1, 0), (1, 1, 0)],
+ dtype=[('A', '|S3'), ('B', float), ('f2', int)])
+ assert_equal(test, control)
+
+ def test_matching_named_fields(self):
+ # Test combination of arrays w/ matching field names
+ (_, x, _, z) = self.data
+ zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
+ dtype=[('A', '|S3'), ('B', float), ('C', float)])
+ test = stack_arrays((z, zz))
+ control = ma.array([('A', 1, -1), ('B', 2, -1),
+ (
+ 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
+ dtype=[('A', '|S3'), ('B', float), ('C', float)],
+ mask=[(0, 0, 1), (0, 0, 1),
+ (0, 0, 0), (0, 0, 0), (0, 0, 0)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ test = stack_arrays((z, zz, x))
+ ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
+ control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
+ ('a', 10., 100., -1), ('b', 20., 200., -1),
+ ('c', 30., 300., -1),
+ (-1, -1, -1, 1), (-1, -1, -1, 2)],
+ dtype=ndtype,
+ mask=[(0, 0, 1, 1), (0, 0, 1, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
+ (1, 1, 1, 0), (1, 1, 1, 0)])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ def test_defaults(self):
+ # Test defaults: no exception raised if keys of defaults are not fields.
+ (_, _, _, z) = self.data
+ zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
+ dtype=[('A', '|S3'), ('B', float), ('C', float)])
+ defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}
+ test = stack_arrays((z, zz), defaults=defaults)
+ control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
+ (
+ 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
+ dtype=[('A', '|S3'), ('B', float), ('C', float)],
+ mask=[(0, 0, 1), (0, 0, 1),
+ (0, 0, 0), (0, 0, 0), (0, 0, 0)])
+ assert_equal(test, control)
+ assert_equal(test.data, control.data)
+ assert_equal(test.mask, control.mask)
+
+ def test_autoconversion(self):
+ # Tests autoconversion
+ adtype = [('A', int), ('B', bool), ('C', float)]
+ a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
+ bdtype = [('A', int), ('B', float), ('C', float)]
+ b = ma.array([(4, 5, 6)], dtype=bdtype)
+ control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
+ dtype=bdtype)
+ test = stack_arrays((a, b), autoconvert=True)
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+ with assert_raises(TypeError):
+ stack_arrays((a, b), autoconvert=False)
+
+ def test_checktitles(self):
+ # Test using titles in the field names
+ adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
+ a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
+ bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
+ b = ma.array([(4, 5, 6)], dtype=bdtype)
+ test = stack_arrays((a, b))
+ control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
+ dtype=bdtype)
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+
+ def test_subdtype(self):
+ z = np.array([
+ ('A', 1), ('B', 2)
+ ], dtype=[('A', '|S3'), ('B', float, (1,))])
+ zz = np.array([
+ ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
+ ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
+
+ res = stack_arrays((z, zz))
+ expected = ma.array(
+ data=[
+ (b'A', [1.0], 0),
+ (b'B', [2.0], 0),
+ (b'a', [10.0], 100.0),
+ (b'b', [20.0], 200.0),
+ (b'c', [30.0], 300.0)],
+ mask=[
+ (False, [False], True),
+ (False, [False], True),
+ (False, [False], False),
+ (False, [False], False),
+ (False, [False], False)
+ ],
+ dtype=zz.dtype
+ )
+ assert_equal(res.dtype, expected.dtype)
+ assert_equal(res, expected)
+ assert_equal(res.mask, expected.mask)
+
+
+class TestJoinBy:
+ def setup_method(self):
+ self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
+ np.arange(100, 110))),
+ dtype=[('a', int), ('b', int), ('c', int)])
+ self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),
+ np.arange(100, 110))),
+ dtype=[('a', int), ('b', int), ('d', int)])
+
+ def test_inner_join(self):
+ # Basic test of join_by
+ a, b = self.a, self.b
+
+ test = join_by('a', a, b, jointype='inner')
+ control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),
+ (7, 57, 67, 107, 102), (8, 58, 68, 108, 103),
+ (9, 59, 69, 109, 104)],
+ dtype=[('a', int), ('b1', int), ('b2', int),
+ ('c', int), ('d', int)])
+ assert_equal(test, control)
+
+ def test_join(self):
+ a, b = self.a, self.b
+
+ # Fixme, this test is broken
+ #test = join_by(('a', 'b'), a, b)
+ #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
+ # (7, 57, 107, 102), (8, 58, 108, 103),
+ # (9, 59, 109, 104)],
+ # dtype=[('a', int), ('b', int),
+ # ('c', int), ('d', int)])
+ #assert_equal(test, control)
+
+ # Hack to avoid pyflakes unused variable warnings
+ join_by(('a', 'b'), a, b)
+ np.array([(5, 55, 105, 100), (6, 56, 106, 101),
+ (7, 57, 107, 102), (8, 58, 108, 103),
+ (9, 59, 109, 104)],
+ dtype=[('a', int), ('b', int),
+ ('c', int), ('d', int)])
+
+ def test_join_subdtype(self):
+ # tests the bug in https://stackoverflow.com/q/44769632/102441
+ foo = np.array([(1,)],
+ dtype=[('key', int)])
+ bar = np.array([(1, np.array([1,2,3]))],
+ dtype=[('key', int), ('value', 'uint16', 3)])
+ res = join_by('key', foo, bar)
+ assert_equal(res, bar.view(ma.MaskedArray))
+
+ def test_outer_join(self):
+ a, b = self.a, self.b
+
+ test = join_by(('a', 'b'), a, b, 'outer')
+ control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
+ (2, 52, 102, -1), (3, 53, 103, -1),
+ (4, 54, 104, -1), (5, 55, 105, -1),
+ (5, 65, -1, 100), (6, 56, 106, -1),
+ (6, 66, -1, 101), (7, 57, 107, -1),
+ (7, 67, -1, 102), (8, 58, 108, -1),
+ (8, 68, -1, 103), (9, 59, 109, -1),
+ (9, 69, -1, 104), (10, 70, -1, 105),
+ (11, 71, -1, 106), (12, 72, -1, 107),
+ (13, 73, -1, 108), (14, 74, -1, 109)],
+ mask=[(0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 1, 0), (0, 0, 0, 1),
+ (0, 0, 1, 0), (0, 0, 0, 1),
+ (0, 0, 1, 0), (0, 0, 0, 1),
+ (0, 0, 1, 0), (0, 0, 0, 1),
+ (0, 0, 1, 0), (0, 0, 1, 0),
+ (0, 0, 1, 0), (0, 0, 1, 0),
+ (0, 0, 1, 0), (0, 0, 1, 0)],
+ dtype=[('a', int), ('b', int),
+ ('c', int), ('d', int)])
+ assert_equal(test, control)
+
+ def test_leftouter_join(self):
+ a, b = self.a, self.b
+
+ test = join_by(('a', 'b'), a, b, 'leftouter')
+ control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
+ (2, 52, 102, -1), (3, 53, 103, -1),
+ (4, 54, 104, -1), (5, 55, 105, -1),
+ (6, 56, 106, -1), (7, 57, 107, -1),
+ (8, 58, 108, -1), (9, 59, 109, -1)],
+ mask=[(0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1),
+ (0, 0, 0, 1), (0, 0, 0, 1)],
+ dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
+ assert_equal(test, control)
+
+ def test_different_field_order(self):
+ # gh-8940
+ a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
+ b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
+ # this should not give a FutureWarning:
+ j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False)
+ assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2'])
+
+ def test_duplicate_keys(self):
+ a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
+ b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
+ assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
+
+ def test_same_name_different_dtypes_key(self):
+ a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
+ b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
+ expected_dtype = np.dtype([
+ ('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])
+
+ a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
+ b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
+ res = join_by('key', a, b)
+
+ assert_equal(res.dtype, expected_dtype)
+
+ def test_same_name_different_dtypes(self):
+ # gh-9338
+ a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
+ b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])
+ expected_dtype = np.dtype([
+ ('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])
+
+ a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
+ b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
+ res = join_by('key', a, b)
+
+ assert_equal(res.dtype, expected_dtype)
+
+ def test_subarray_key(self):
+ a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])
+ a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)
+
+ b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])
+ b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)
+
+ expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])
+ expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)
+
+ res = join_by('pos', a, b)
+ assert_equal(res.dtype, expected_dtype)
+ assert_equal(res, expected)
+
+ def test_padded_dtype(self):
+ dt = np.dtype('i1,f4', align=True)
+ dt.names = ('k', 'v')
+ assert_(len(dt.descr), 3) # padding field is inserted
+
+ a = np.array([(1, 3), (3, 2)], dt)
+ b = np.array([(1, 1), (2, 2)], dt)
+ res = join_by('k', a, b)
+
+ # no padding fields remain
+ expected_dtype = np.dtype([
+ ('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')
+ ])
+
+ assert_equal(res.dtype, expected_dtype)
+
+
+class TestJoinBy2:
+ @classmethod
+ def setup_method(cls):
+ cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
+ np.arange(100, 110))),
+ dtype=[('a', int), ('b', int), ('c', int)])
+ cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),
+ np.arange(100, 110))),
+ dtype=[('a', int), ('b', int), ('d', int)])
+
+ def test_no_r1postfix(self):
+ # Basic test of join_by no_r1postfix
+ a, b = self.a, self.b
+
+ test = join_by(
+ 'a', a, b, r1postfix='', r2postfix='2', jointype='inner')
+ control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
+ (2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
+ (4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
+ (6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
+ (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
+ dtype=[('a', int), ('b', int), ('b2', int),
+ ('c', int), ('d', int)])
+ assert_equal(test, control)
+
+ def test_no_postfix(self):
+ assert_raises(ValueError, join_by, 'a', self.a, self.b,
+ r1postfix='', r2postfix='')
+
+ def test_no_r2postfix(self):
+ # Basic test of join_by no_r2postfix
+ a, b = self.a, self.b
+
+ test = join_by(
+ 'a', a, b, r1postfix='1', r2postfix='', jointype='inner')
+ control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
+ (2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
+ (4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
+ (6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
+ (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
+ dtype=[('a', int), ('b1', int), ('b', int),
+ ('c', int), ('d', int)])
+ assert_equal(test, control)
+
+ def test_two_keys_two_vars(self):
+ a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
+ np.arange(50, 60), np.arange(10, 20))),
+ dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
+
+ b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
+ np.arange(65, 75), np.arange(0, 10))),
+ dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
+
+ control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
+ (10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
+ (10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
+ (10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
+ (10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
+ dtype=[('k', int), ('a', int), ('b1', int),
+ ('b2', int), ('c1', int), ('c2', int)])
+ test = join_by(
+ ['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
+ assert_equal(test.dtype, control.dtype)
+ assert_equal(test, control)
+
+class TestAppendFieldsObj:
+ """
+ Test append_fields with arrays containing objects
+ """
+ # https://github.com/numpy/numpy/issues/2346
+
+ def setup_method(self):
+ from datetime import date
+ self.data = dict(obj=date(2000, 1, 1))
+
+ def test_append_to_objects(self):
+ "Test append_fields when the base array contains objects"
+ obj = self.data['obj']
+ x = np.array([(obj, 1.), (obj, 2.)],
+ dtype=[('A', object), ('B', float)])
+ y = np.array([10, 20], dtype=int)
+ test = append_fields(x, 'C', data=y, usemask=False)
+ control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
+ dtype=[('A', object), ('B', float), ('C', int)])
+ assert_equal(test, control)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_regression.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_regression.py
new file mode 100644
index 00000000..55df2a67
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_regression.py
@@ -0,0 +1,247 @@
+import os
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
+ assert_raises, _assert_valid_refcount,
+ )
+
+
+class TestRegression:
+ def test_poly1d(self):
+ # Ticket #28
+ assert_equal(np.poly1d([1]) - np.poly1d([1, 0]),
+ np.poly1d([-1, 1]))
+
+ def test_cov_parameters(self):
+ # Ticket #91
+ x = np.random.random((3, 3))
+ y = x.copy()
+ np.cov(x, rowvar=True)
+ np.cov(y, rowvar=False)
+ assert_array_equal(x, y)
+
+ def test_mem_digitize(self):
+ # Ticket #95
+ for i in range(100):
+ np.digitize([1, 2, 3, 4], [1, 3])
+ np.digitize([0, 1, 2, 3, 4], [1, 3])
+
+ def test_unique_zero_sized(self):
+ # Ticket #205
+ assert_array_equal([], np.unique(np.array([])))
+
+ def test_mem_vectorise(self):
+ # Ticket #325
+ vt = np.vectorize(lambda *args: args)
+ vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)))
+ vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1,
+ 1, 2)), np.zeros((2, 2)))
+
+ def test_mgrid_single_element(self):
+ # Ticket #339
+ assert_array_equal(np.mgrid[0:0:1j], [0])
+ assert_array_equal(np.mgrid[0:0], [])
+
+ def test_refcount_vectorize(self):
+ # Ticket #378
+ def p(x, y):
+ return 123
+ v = np.vectorize(p)
+ _assert_valid_refcount(v)
+
+ def test_poly1d_nan_roots(self):
+ # Ticket #396
+ p = np.poly1d([np.nan, np.nan, 1], r=False)
+ assert_raises(np.linalg.LinAlgError, getattr, p, "r")
+
+ def test_mem_polymul(self):
+ # Ticket #448
+ np.polymul([], [1.])
+
+ def test_mem_string_concat(self):
+ # Ticket #469
+ x = np.array([])
+ np.append(x, 'asdasd\tasdasd')
+
+ def test_poly_div(self):
+ # Ticket #553
+ u = np.poly1d([1, 2, 3])
+ v = np.poly1d([1, 2, 3, 4, 5])
+ q, r = np.polydiv(u, v)
+ assert_equal(q*v + r, u)
+
+ def test_poly_eq(self):
+ # Ticket #554
+ x = np.poly1d([1, 2, 3])
+ y = np.poly1d([3, 4])
+ assert_(x != y)
+ assert_(x == x)
+
+ def test_polyfit_build(self):
+ # Ticket #628
+ ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
+ 9.95368241e+00, -3.14526520e+02]
+ x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,
+ 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176]
+ y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,
+ 6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,
+ 13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,
+ 7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,
+ 6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,
+ 6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,
+ 8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]
+ tested = np.polyfit(x, y, 4)
+ assert_array_almost_equal(ref, tested)
+
+ def test_polydiv_type(self):
+ # Make polydiv work for complex types
+ msg = "Wrong type, should be complex"
+ x = np.ones(3, dtype=complex)
+ q, r = np.polydiv(x, x)
+ assert_(q.dtype == complex, msg)
+ msg = "Wrong type, should be float"
+ x = np.ones(3, dtype=int)
+ q, r = np.polydiv(x, x)
+ assert_(q.dtype == float, msg)
+
+ def test_histogramdd_too_many_bins(self):
+ # Ticket 928.
+ assert_raises(ValueError, np.histogramdd, np.ones((1, 10)), bins=2**10)
+
+ def test_polyint_type(self):
+ # Ticket #944
+ msg = "Wrong type, should be complex"
+ x = np.ones(3, dtype=complex)
+ assert_(np.polyint(x).dtype == complex, msg)
+ msg = "Wrong type, should be float"
+ x = np.ones(3, dtype=int)
+ assert_(np.polyint(x).dtype == float, msg)
+
+ def test_ndenumerate_crash(self):
+ # Ticket 1140
+ # Shouldn't crash:
+ list(np.ndenumerate(np.array([[]])))
+
+ def test_asfarray_none(self):
+ # Test for changeset r5065
+ assert_array_equal(np.array([np.nan]), np.asfarray([None]))
+
+ def test_large_fancy_indexing(self):
+ # Large enough to fail on 64-bit.
+ nbits = np.dtype(np.intp).itemsize * 8
+ thesize = int((2**nbits)**(1.0/5.0)+1)
+
+ def dp():
+ n = 3
+ a = np.ones((n,)*5)
+ i = np.random.randint(0, n, size=thesize)
+ a[np.ix_(i, i, i, i, i)] = 0
+
+ def dp2():
+ n = 3
+ a = np.ones((n,)*5)
+ i = np.random.randint(0, n, size=thesize)
+ a[np.ix_(i, i, i, i, i)]
+
+ assert_raises(ValueError, dp)
+ assert_raises(ValueError, dp2)
+
+ def test_void_coercion(self):
+ dt = np.dtype([('a', 'f4'), ('b', 'i4')])
+ x = np.zeros((1,), dt)
+ assert_(np.r_[x, x].dtype == dt)
+
+ def test_who_with_0dim_array(self):
+ # ticket #1243
+ import os
+ import sys
+
+ oldstdout = sys.stdout
+ sys.stdout = open(os.devnull, 'w')
+ try:
+ try:
+ np.who({'foo': np.array(1)})
+ except Exception:
+ raise AssertionError("ticket #1243")
+ finally:
+ sys.stdout.close()
+ sys.stdout = oldstdout
+
+ def test_include_dirs(self):
+ # As a sanity check, just test that get_include
+ # includes something reasonable. Somewhat
+ # related to ticket #1405.
+ include_dirs = [np.get_include()]
+ for path in include_dirs:
+ assert_(isinstance(path, str))
+ assert_(path != '')
+
+ def test_polyder_return_type(self):
+ # Ticket #1249
+ assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d))
+ assert_(isinstance(np.polyder([1], 0), np.ndarray))
+ assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d))
+ assert_(isinstance(np.polyder([1], 1), np.ndarray))
+
+ def test_append_fields_dtype_list(self):
+ # Ticket #1676
+ from numpy.lib.recfunctions import append_fields
+
+ base = np.array([1, 2, 3], dtype=np.int32)
+ names = ['a', 'b', 'c']
+ data = np.eye(3).astype(np.int32)
+ dlist = [np.float64, np.int32, np.int32]
+ try:
+ append_fields(base, names, data, dlist)
+ except Exception:
+ raise AssertionError()
+
+ def test_loadtxt_fields_subarrays(self):
+ # For ticket #1936
+ from io import StringIO
+
+ dt = [("a", 'u1', 2), ("b", 'u1', 2)]
+ x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
+ assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
+
+ dt = [("a", [("a", 'u1', (1, 3)), ("b", 'u1')])]
+ x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
+ assert_equal(x, np.array([(((0, 1, 2), 3),)], dtype=dt))
+
+ dt = [("a", 'u1', (2, 2))]
+ x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
+ assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt))
+
+ dt = [("a", 'u1', (2, 3, 2))]
+ x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt)
+ data = [((((0, 1), (2, 3), (4, 5)), ((6, 7), (8, 9), (10, 11))),)]
+ assert_equal(x, np.array(data, dtype=dt))
+
+ def test_nansum_with_boolean(self):
+ # gh-2978
+ a = np.zeros(2, dtype=bool)
+ try:
+ np.nansum(a)
+ except Exception:
+ raise AssertionError()
+
+ def test_py3_compat(self):
+ # gh-2561
+ # Test if the oldstyle class test is bypassed in python3
+ class C():
+ """Old-style class in python2, normal class in python3"""
+ pass
+
+ out = open(os.devnull, 'w')
+ try:
+ np.info(C(), output=out)
+ except AttributeError:
+ raise AssertionError()
+ finally:
+ out.close()
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_shape_base.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_shape_base.py
new file mode 100644
index 00000000..76058cf2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_shape_base.py
@@ -0,0 +1,787 @@
+import numpy as np
+import functools
+import sys
+import pytest
+
+from numpy.lib.shape_base import (
+ apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
+ vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis,
+ put_along_axis
+ )
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_raises, assert_warns
+ )
+
+
+IS_64BIT = sys.maxsize > 2**32
+
+
+def _add_keepdims(func):
+ """ hack in keepdims behavior into a function taking an axis """
+ @functools.wraps(func)
+ def wrapped(a, axis, **kwargs):
+ res = func(a, axis=axis, **kwargs)
+ if axis is None:
+ axis = 0 # res is now a scalar, so we can insert this anywhere
+ return np.expand_dims(res, axis=axis)
+ return wrapped
+
+
+class TestTakeAlongAxis:
+ def test_argequivalent(self):
+ """ Test it translates from arg<func> to <func> """
+ from numpy.random import rand
+ a = rand(3, 4, 5)
+
+ funcs = [
+ (np.sort, np.argsort, dict()),
+ (_add_keepdims(np.min), _add_keepdims(np.argmin), dict()),
+ (_add_keepdims(np.max), _add_keepdims(np.argmax), dict()),
+ (np.partition, np.argpartition, dict(kth=2)),
+ ]
+
+ for func, argfunc, kwargs in funcs:
+ for axis in list(range(a.ndim)) + [None]:
+ a_func = func(a, axis=axis, **kwargs)
+ ai_func = argfunc(a, axis=axis, **kwargs)
+ assert_equal(a_func, take_along_axis(a, ai_func, axis=axis))
+
+ def test_invalid(self):
+ """ Test it errors when indices has too few dimensions """
+ a = np.ones((10, 10))
+ ai = np.ones((10, 2), dtype=np.intp)
+
+ # sanity check
+ take_along_axis(a, ai, axis=1)
+
+ # not enough indices
+ assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)
+ # bool arrays not allowed
+ assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)
+ # float arrays not allowed
+ assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)
+ # invalid axis
+ assert_raises(np.AxisError, take_along_axis, a, ai, axis=10)
+
+ def test_empty(self):
+ """ Test everything is ok with empty results, even with inserted dims """
+ a = np.ones((3, 4, 5))
+ ai = np.ones((3, 0, 5), dtype=np.intp)
+
+ actual = take_along_axis(a, ai, axis=1)
+ assert_equal(actual.shape, ai.shape)
+
+ def test_broadcast(self):
+ """ Test that non-indexing dimensions are broadcast in both directions """
+ a = np.ones((3, 4, 1))
+ ai = np.ones((1, 2, 5), dtype=np.intp)
+ actual = take_along_axis(a, ai, axis=1)
+ assert_equal(actual.shape, (3, 2, 5))
+
+
+class TestPutAlongAxis:
+ def test_replace_max(self):
+ a_base = np.array([[10, 30, 20], [60, 40, 50]])
+
+ for axis in list(range(a_base.ndim)) + [None]:
+ # we mutate this in the loop
+ a = a_base.copy()
+
+ # replace the max with a small value
+ i_max = _add_keepdims(np.argmax)(a, axis=axis)
+ put_along_axis(a, i_max, -99, axis=axis)
+
+ # find the new minimum, which should max
+ i_min = _add_keepdims(np.argmin)(a, axis=axis)
+
+ assert_equal(i_min, i_max)
+
+ def test_broadcast(self):
+ """ Test that non-indexing dimensions are broadcast in both directions """
+ a = np.ones((3, 4, 1))
+ ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4
+ put_along_axis(a, ai, 20, axis=1)
+ assert_equal(take_along_axis(a, ai, axis=1), 20)
+
+
+class TestApplyAlongAxis:
+ def test_simple(self):
+ a = np.ones((20, 10), 'd')
+ assert_array_equal(
+ apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
+
+ def test_simple101(self):
+ a = np.ones((10, 101), 'd')
+ assert_array_equal(
+ apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
+
+ def test_3d(self):
+ a = np.arange(27).reshape((3, 3, 3))
+ assert_array_equal(apply_along_axis(np.sum, 0, a),
+ [[27, 30, 33], [36, 39, 42], [45, 48, 51]])
+
+ def test_preserve_subclass(self):
+ def double(row):
+ return row * 2
+
+ class MyNDArray(np.ndarray):
+ pass
+
+ m = np.array([[0, 1], [2, 3]]).view(MyNDArray)
+ expected = np.array([[0, 2], [4, 6]]).view(MyNDArray)
+
+ result = apply_along_axis(double, 0, m)
+ assert_(isinstance(result, MyNDArray))
+ assert_array_equal(result, expected)
+
+ result = apply_along_axis(double, 1, m)
+ assert_(isinstance(result, MyNDArray))
+ assert_array_equal(result, expected)
+
+ def test_subclass(self):
+ class MinimalSubclass(np.ndarray):
+ data = 1
+
+ def minimal_function(array):
+ return array.data
+
+ a = np.zeros((6, 3)).view(MinimalSubclass)
+
+ assert_array_equal(
+ apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1])
+ )
+
+ def test_scalar_array(self, cls=np.ndarray):
+ a = np.ones((6, 3)).view(cls)
+ res = apply_along_axis(np.sum, 0, a)
+ assert_(isinstance(res, cls))
+ assert_array_equal(res, np.array([6, 6, 6]).view(cls))
+
+ def test_0d_array(self, cls=np.ndarray):
+ def sum_to_0d(x):
+ """ Sum x, returning a 0d array of the same class """
+ assert_equal(x.ndim, 1)
+ return np.squeeze(np.sum(x, keepdims=True))
+ a = np.ones((6, 3)).view(cls)
+ res = apply_along_axis(sum_to_0d, 0, a)
+ assert_(isinstance(res, cls))
+ assert_array_equal(res, np.array([6, 6, 6]).view(cls))
+
+ res = apply_along_axis(sum_to_0d, 1, a)
+ assert_(isinstance(res, cls))
+ assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls))
+
+ def test_axis_insertion(self, cls=np.ndarray):
+ def f1to2(x):
+ """produces an asymmetric non-square matrix from x"""
+ assert_equal(x.ndim, 1)
+ return (x[::-1] * x[1:,None]).view(cls)
+
+ a2d = np.arange(6*3).reshape((6, 3))
+
+ # 2d insertion along first axis
+ actual = apply_along_axis(f1to2, 0, a2d)
+ expected = np.stack([
+ f1to2(a2d[:,i]) for i in range(a2d.shape[1])
+ ], axis=-1).view(cls)
+ assert_equal(type(actual), type(expected))
+ assert_equal(actual, expected)
+
+ # 2d insertion along last axis
+ actual = apply_along_axis(f1to2, 1, a2d)
+ expected = np.stack([
+ f1to2(a2d[i,:]) for i in range(a2d.shape[0])
+ ], axis=0).view(cls)
+ assert_equal(type(actual), type(expected))
+ assert_equal(actual, expected)
+
+ # 3d insertion along middle axis
+ a3d = np.arange(6*5*3).reshape((6, 5, 3))
+
+ actual = apply_along_axis(f1to2, 1, a3d)
+ expected = np.stack([
+ np.stack([
+ f1to2(a3d[i,:,j]) for i in range(a3d.shape[0])
+ ], axis=0)
+ for j in range(a3d.shape[2])
+ ], axis=-1).view(cls)
+ assert_equal(type(actual), type(expected))
+ assert_equal(actual, expected)
+
+ def test_subclass_preservation(self):
+ class MinimalSubclass(np.ndarray):
+ pass
+ self.test_scalar_array(MinimalSubclass)
+ self.test_0d_array(MinimalSubclass)
+ self.test_axis_insertion(MinimalSubclass)
+
+ def test_axis_insertion_ma(self):
+ def f1to2(x):
+ """produces an asymmetric non-square matrix from x"""
+ assert_equal(x.ndim, 1)
+ res = x[::-1] * x[1:,None]
+ return np.ma.masked_where(res%5==0, res)
+ a = np.arange(6*3).reshape((6, 3))
+ res = apply_along_axis(f1to2, 0, a)
+ assert_(isinstance(res, np.ma.masked_array))
+ assert_equal(res.ndim, 3)
+ assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask)
+ assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask)
+ assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask)
+
+ def test_tuple_func1d(self):
+ def sample_1d(x):
+ return x[1], x[0]
+ res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]]))
+ assert_array_equal(res, np.array([[2, 1], [4, 3]]))
+
+ def test_empty(self):
+ # can't apply_along_axis when there's no chance to call the function
+ def never_call(x):
+ assert_(False) # should never be reached
+
+ a = np.empty((0, 0))
+ assert_raises(ValueError, np.apply_along_axis, never_call, 0, a)
+ assert_raises(ValueError, np.apply_along_axis, never_call, 1, a)
+
+ # but it's sometimes ok with some non-zero dimensions
+ def empty_to_1(x):
+ assert_(len(x) == 0)
+ return 1
+
+ a = np.empty((10, 0))
+ actual = np.apply_along_axis(empty_to_1, 1, a)
+ assert_equal(actual, np.ones(10))
+ assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a)
+
+ def test_with_iterable_object(self):
+ # from issue 5248
+ d = np.array([
+ [{1, 11}, {2, 22}, {3, 33}],
+ [{4, 44}, {5, 55}, {6, 66}]
+ ])
+ actual = np.apply_along_axis(lambda a: set.union(*a), 0, d)
+ expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}])
+
+ assert_equal(actual, expected)
+
+ # issue 8642 - assert_equal doesn't detect this!
+ for i in np.ndindex(actual.shape):
+ assert_equal(type(actual[i]), type(expected[i]))
+
+
+class TestApplyOverAxes:
+ def test_simple(self):
+ a = np.arange(24).reshape(2, 3, 4)
+ aoa_a = apply_over_axes(np.sum, a, [0, 2])
+ assert_array_equal(aoa_a, np.array([[[60], [92], [124]]]))
+
+
+class TestExpandDims:
+ def test_functionality(self):
+ s = (2, 3, 4, 5)
+ a = np.empty(s)
+ for axis in range(-5, 4):
+ b = expand_dims(a, axis)
+ assert_(b.shape[axis] == 1)
+ assert_(np.squeeze(b).shape == s)
+
+ def test_axis_tuple(self):
+ a = np.empty((3, 3, 3))
+ assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3)
+ assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1)
+ assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1)
+ assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3)
+
+ def test_axis_out_of_range(self):
+ s = (2, 3, 4, 5)
+ a = np.empty(s)
+ assert_raises(np.AxisError, expand_dims, a, -6)
+ assert_raises(np.AxisError, expand_dims, a, 5)
+
+ a = np.empty((3, 3, 3))
+ assert_raises(np.AxisError, expand_dims, a, (0, -6))
+ assert_raises(np.AxisError, expand_dims, a, (0, 5))
+
+ def test_repeated_axis(self):
+ a = np.empty((3, 3, 3))
+ assert_raises(ValueError, expand_dims, a, axis=(1, 1))
+
+ def test_subclasses(self):
+ a = np.arange(10).reshape((2, 5))
+ a = np.ma.array(a, mask=a%3 == 0)
+
+ expanded = np.expand_dims(a, axis=1)
+ assert_(isinstance(expanded, np.ma.MaskedArray))
+ assert_equal(expanded.shape, (2, 1, 5))
+ assert_equal(expanded.mask.shape, (2, 1, 5))
+
+
+class TestArraySplit:
+ def test_integer_0_split(self):
+ a = np.arange(10)
+ assert_raises(ValueError, array_split, a, 0)
+
+ def test_integer_split(self):
+ a = np.arange(10)
+ res = array_split(a, 1)
+ desired = [np.arange(10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 2)
+ desired = [np.arange(5), np.arange(5, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 3)
+ desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 4)
+ desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8),
+ np.arange(8, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 5)
+ desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
+ np.arange(6, 8), np.arange(8, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 6)
+ desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
+ np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 7)
+ desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
+ np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
+ np.arange(9, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 8)
+ desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5),
+ np.arange(5, 6), np.arange(6, 7), np.arange(7, 8),
+ np.arange(8, 9), np.arange(9, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 9)
+ desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4),
+ np.arange(4, 5), np.arange(5, 6), np.arange(6, 7),
+ np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 10)
+ desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),
+ np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),
+ np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
+ np.arange(9, 10)]
+ compare_results(res, desired)
+
+ res = array_split(a, 11)
+ desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),
+ np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),
+ np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
+ np.arange(9, 10), np.array([])]
+ compare_results(res, desired)
+
+ def test_integer_split_2D_rows(self):
+ a = np.array([np.arange(10), np.arange(10)])
+ res = array_split(a, 3, axis=0)
+ tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),
+ np.zeros((0, 10))]
+ compare_results(res, tgt)
+ assert_(a.dtype.type is res[-1].dtype.type)
+
+ # Same thing for manual splits:
+ res = array_split(a, [0, 1], axis=0)
+ tgt = [np.zeros((0, 10)), np.array([np.arange(10)]),
+ np.array([np.arange(10)])]
+ compare_results(res, tgt)
+ assert_(a.dtype.type is res[-1].dtype.type)
+
+ def test_integer_split_2D_cols(self):
+ a = np.array([np.arange(10), np.arange(10)])
+ res = array_split(a, 3, axis=-1)
+ desired = [np.array([np.arange(4), np.arange(4)]),
+ np.array([np.arange(4, 7), np.arange(4, 7)]),
+ np.array([np.arange(7, 10), np.arange(7, 10)])]
+ compare_results(res, desired)
+
+ def test_integer_split_2D_default(self):
+ """ This will fail if we change default axis
+ """
+ a = np.array([np.arange(10), np.arange(10)])
+ res = array_split(a, 3)
+ tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),
+ np.zeros((0, 10))]
+ compare_results(res, tgt)
+ assert_(a.dtype.type is res[-1].dtype.type)
+ # perhaps should check higher dimensions
+
+ @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
+ def test_integer_split_2D_rows_greater_max_int32(self):
+ a = np.broadcast_to([0], (1 << 32, 2))
+ res = array_split(a, 4)
+ chunk = np.broadcast_to([0], (1 << 30, 2))
+ tgt = [chunk] * 4
+ for i in range(len(tgt)):
+ assert_equal(res[i].shape, tgt[i].shape)
+
+ def test_index_split_simple(self):
+ a = np.arange(10)
+ indices = [1, 5, 7]
+ res = array_split(a, indices, axis=-1)
+ desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7),
+ np.arange(7, 10)]
+ compare_results(res, desired)
+
+ def test_index_split_low_bound(self):
+ a = np.arange(10)
+ indices = [0, 5, 7]
+ res = array_split(a, indices, axis=-1)
+ desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),
+ np.arange(7, 10)]
+ compare_results(res, desired)
+
+ def test_index_split_high_bound(self):
+ a = np.arange(10)
+ indices = [0, 5, 7, 10, 12]
+ res = array_split(a, indices, axis=-1)
+ desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),
+ np.arange(7, 10), np.array([]), np.array([])]
+ compare_results(res, desired)
+
+
+class TestSplit:
+ # The split function is essentially the same as array_split,
+ # except that it test if splitting will result in an
+ # equal split. Only test for this case.
+
+ def test_equal_split(self):
+ a = np.arange(10)
+ res = split(a, 2)
+ desired = [np.arange(5), np.arange(5, 10)]
+ compare_results(res, desired)
+
+ def test_unequal_split(self):
+ a = np.arange(10)
+ assert_raises(ValueError, split, a, 3)
+
+
+class TestColumnStack:
+ def test_non_iterable(self):
+ assert_raises(TypeError, column_stack, 1)
+
+ def test_1D_arrays(self):
+ # example from docstring
+ a = np.array((1, 2, 3))
+ b = np.array((2, 3, 4))
+ expected = np.array([[1, 2],
+ [2, 3],
+ [3, 4]])
+ actual = np.column_stack((a, b))
+ assert_equal(actual, expected)
+
+ def test_2D_arrays(self):
+ # same as hstack 2D docstring example
+ a = np.array([[1], [2], [3]])
+ b = np.array([[2], [3], [4]])
+ expected = np.array([[1, 2],
+ [2, 3],
+ [3, 4]])
+ actual = np.column_stack((a, b))
+ assert_equal(actual, expected)
+
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ column_stack((np.arange(3) for _ in range(2)))
+
+
+class TestDstack:
+ def test_non_iterable(self):
+ assert_raises(TypeError, dstack, 1)
+
+ def test_0D_array(self):
+ a = np.array(1)
+ b = np.array(2)
+ res = dstack([a, b])
+ desired = np.array([[[1, 2]]])
+ assert_array_equal(res, desired)
+
+ def test_1D_array(self):
+ a = np.array([1])
+ b = np.array([2])
+ res = dstack([a, b])
+ desired = np.array([[[1, 2]]])
+ assert_array_equal(res, desired)
+
+ def test_2D_array(self):
+ a = np.array([[1], [2]])
+ b = np.array([[1], [2]])
+ res = dstack([a, b])
+ desired = np.array([[[1, 1]], [[2, 2, ]]])
+ assert_array_equal(res, desired)
+
+ def test_2D_array2(self):
+ a = np.array([1, 2])
+ b = np.array([1, 2])
+ res = dstack([a, b])
+ desired = np.array([[[1, 1], [2, 2]]])
+ assert_array_equal(res, desired)
+
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ dstack((np.arange(3) for _ in range(2)))
+
+
+# array_split has more comprehensive test of splitting.
+# only do simple test on hsplit, vsplit, and dsplit
+class TestHsplit:
+ """Only testing for integer splits.
+
+ """
+ def test_non_iterable(self):
+ assert_raises(ValueError, hsplit, 1, 1)
+
+ def test_0D_array(self):
+ a = np.array(1)
+ try:
+ hsplit(a, 2)
+ assert_(0)
+ except ValueError:
+ pass
+
+ def test_1D_array(self):
+ a = np.array([1, 2, 3, 4])
+ res = hsplit(a, 2)
+ desired = [np.array([1, 2]), np.array([3, 4])]
+ compare_results(res, desired)
+
+ def test_2D_array(self):
+ a = np.array([[1, 2, 3, 4],
+ [1, 2, 3, 4]])
+ res = hsplit(a, 2)
+ desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])]
+ compare_results(res, desired)
+
+
+class TestVsplit:
+ """Only testing for integer splits.
+
+ """
+ def test_non_iterable(self):
+ assert_raises(ValueError, vsplit, 1, 1)
+
+ def test_0D_array(self):
+ a = np.array(1)
+ assert_raises(ValueError, vsplit, a, 2)
+
+ def test_1D_array(self):
+ a = np.array([1, 2, 3, 4])
+ try:
+ vsplit(a, 2)
+ assert_(0)
+ except ValueError:
+ pass
+
+ def test_2D_array(self):
+ a = np.array([[1, 2, 3, 4],
+ [1, 2, 3, 4]])
+ res = vsplit(a, 2)
+ desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])]
+ compare_results(res, desired)
+
+
+class TestDsplit:
+ # Only testing for integer splits.
+ def test_non_iterable(self):
+ assert_raises(ValueError, dsplit, 1, 1)
+
+ def test_0D_array(self):
+ a = np.array(1)
+ assert_raises(ValueError, dsplit, a, 2)
+
+ def test_1D_array(self):
+ a = np.array([1, 2, 3, 4])
+ assert_raises(ValueError, dsplit, a, 2)
+
+ def test_2D_array(self):
+ a = np.array([[1, 2, 3, 4],
+ [1, 2, 3, 4]])
+ try:
+ dsplit(a, 2)
+ assert_(0)
+ except ValueError:
+ pass
+
+ def test_3D_array(self):
+ a = np.array([[[1, 2, 3, 4],
+ [1, 2, 3, 4]],
+ [[1, 2, 3, 4],
+ [1, 2, 3, 4]]])
+ res = dsplit(a, 2)
+ desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]),
+ np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])]
+ compare_results(res, desired)
+
+
+class TestSqueeze:
+ def test_basic(self):
+ from numpy.random import rand
+
+ a = rand(20, 10, 10, 1, 1)
+ b = rand(20, 1, 10, 1, 20)
+ c = rand(1, 1, 20, 10)
+ assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10)))
+ assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20)))
+ assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10)))
+
+ # Squeezing to 0-dim should still give an ndarray
+ a = [[[1.5]]]
+ res = np.squeeze(a)
+ assert_equal(res, 1.5)
+ assert_equal(res.ndim, 0)
+ assert_equal(type(res), np.ndarray)
+
+
+class TestKron:
+ def test_basic(self):
+ # Using 0-dimensional ndarray
+ a = np.array(1)
+ b = np.array([[1, 2], [3, 4]])
+ k = np.array([[1, 2], [3, 4]])
+ assert_array_equal(np.kron(a, b), k)
+ a = np.array([[1, 2], [3, 4]])
+ b = np.array(1)
+ assert_array_equal(np.kron(a, b), k)
+
+ # Using 1-dimensional ndarray
+ a = np.array([3])
+ b = np.array([[1, 2], [3, 4]])
+ k = np.array([[3, 6], [9, 12]])
+ assert_array_equal(np.kron(a, b), k)
+ a = np.array([[1, 2], [3, 4]])
+ b = np.array([3])
+ assert_array_equal(np.kron(a, b), k)
+
+ # Using 3-dimensional ndarray
+ a = np.array([[[1]], [[2]]])
+ b = np.array([[1, 2], [3, 4]])
+ k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]])
+ assert_array_equal(np.kron(a, b), k)
+ a = np.array([[1, 2], [3, 4]])
+ b = np.array([[[1]], [[2]]])
+ k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]])
+ assert_array_equal(np.kron(a, b), k)
+
+ def test_return_type(self):
+ class myarray(np.ndarray):
+ __array_priority__ = 1.0
+
+ a = np.ones([2, 2])
+ ma = myarray(a.shape, a.dtype, a.data)
+ assert_equal(type(kron(a, a)), np.ndarray)
+ assert_equal(type(kron(ma, ma)), myarray)
+ assert_equal(type(kron(a, ma)), myarray)
+ assert_equal(type(kron(ma, a)), myarray)
+
+ @pytest.mark.parametrize(
+ "array_class", [np.asarray, np.mat]
+ )
+ def test_kron_smoke(self, array_class):
+ a = array_class(np.ones([3, 3]))
+ b = array_class(np.ones([3, 3]))
+ k = array_class(np.ones([9, 9]))
+
+ assert_array_equal(np.kron(a, b), k)
+
+ def test_kron_ma(self):
+ x = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]])
+ k = np.ma.array(np.diag([1, 4, 4, 16]),
+ mask=~np.array(np.identity(4), dtype=bool))
+
+ assert_array_equal(k, np.kron(x, x))
+
+ @pytest.mark.parametrize(
+ "shape_a,shape_b", [
+ ((1, 1), (1, 1)),
+ ((1, 2, 3), (4, 5, 6)),
+ ((2, 2), (2, 2, 2)),
+ ((1, 0), (1, 1)),
+ ((2, 0, 2), (2, 2)),
+ ((2, 0, 0, 2), (2, 0, 2)),
+ ])
+ def test_kron_shape(self, shape_a, shape_b):
+ a = np.ones(shape_a)
+ b = np.ones(shape_b)
+ normalised_shape_a = (1,) * max(0, len(shape_b)-len(shape_a)) + shape_a
+ normalised_shape_b = (1,) * max(0, len(shape_a)-len(shape_b)) + shape_b
+ expected_shape = np.multiply(normalised_shape_a, normalised_shape_b)
+
+ k = np.kron(a, b)
+ assert np.array_equal(
+ k.shape, expected_shape), "Unexpected shape from kron"
+
+
+class TestTile:
+ def test_basic(self):
+ a = np.array([0, 1, 2])
+ b = [[1, 2], [3, 4]]
+ assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2])
+ assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]])
+ assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]])
+ assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]])
+ assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]])
+ assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4],
+ [1, 2, 1, 2], [3, 4, 3, 4]])
+
+ def test_tile_one_repetition_on_array_gh4679(self):
+ a = np.arange(5)
+ b = tile(a, 1)
+ b += 2
+ assert_equal(a, np.arange(5))
+
+ def test_empty(self):
+ a = np.array([[[]]])
+ b = np.array([[], []])
+ c = tile(b, 2).shape
+ d = tile(a, (3, 2, 5)).shape
+ assert_equal(c, (2, 0))
+ assert_equal(d, (3, 2, 0))
+
+ def test_kroncompare(self):
+ from numpy.random import randint
+
+ reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)]
+ shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)]
+ for s in shape:
+ b = randint(0, 10, size=s)
+ for r in reps:
+ a = np.ones(r, b.dtype)
+ large = tile(b, r)
+ klarge = kron(a, b)
+ assert_equal(large, klarge)
+
+
+class TestMayShareMemory:
+ def test_basic(self):
+ d = np.ones((50, 60))
+ d2 = np.ones((30, 60, 6))
+ assert_(np.may_share_memory(d, d))
+ assert_(np.may_share_memory(d, d[::-1]))
+ assert_(np.may_share_memory(d, d[::2]))
+ assert_(np.may_share_memory(d, d[1:, ::-1]))
+
+ assert_(not np.may_share_memory(d[::-1], d2))
+ assert_(not np.may_share_memory(d[::2], d2))
+ assert_(not np.may_share_memory(d[1:, ::-1], d2))
+ assert_(np.may_share_memory(d2[1:, ::-1], d2))
+
+
+# Utility
+def compare_results(res, desired):
+ """Compare lists of arrays."""
+ if len(res) != len(desired):
+ raise ValueError("Iterables have different lengths")
+ # See also PEP 618 for Python 3.10
+ for x, y in zip(res, desired):
+ assert_array_equal(x, y)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_stride_tricks.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_stride_tricks.py
new file mode 100644
index 00000000..efec5d24
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_stride_tricks.py
@@ -0,0 +1,645 @@
+import numpy as np
+from numpy.core._rational_tests import rational
+from numpy.testing import (
+ assert_equal, assert_array_equal, assert_raises, assert_,
+ assert_raises_regex, assert_warns,
+ )
+from numpy.lib.stride_tricks import (
+ as_strided, broadcast_arrays, _broadcast_shape, broadcast_to,
+ broadcast_shapes, sliding_window_view,
+ )
+import pytest
+
+
+def assert_shapes_correct(input_shapes, expected_shape):
+ # Broadcast a list of arrays with the given input shapes and check the
+ # common output shape.
+
+ inarrays = [np.zeros(s) for s in input_shapes]
+ outarrays = broadcast_arrays(*inarrays)
+ outshapes = [a.shape for a in outarrays]
+ expected = [expected_shape] * len(inarrays)
+ assert_equal(outshapes, expected)
+
+
+def assert_incompatible_shapes_raise(input_shapes):
+ # Broadcast a list of arrays with the given (incompatible) input shapes
+ # and check that they raise a ValueError.
+
+ inarrays = [np.zeros(s) for s in input_shapes]
+ assert_raises(ValueError, broadcast_arrays, *inarrays)
+
+
+def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False):
+ # Broadcast two shapes against each other and check that the data layout
+ # is the same as if a ufunc did the broadcasting.
+
+ x0 = np.zeros(shape0, dtype=int)
+ # Note that multiply.reduce's identity element is 1.0, so when shape1==(),
+ # this gives the desired n==1.
+ n = int(np.multiply.reduce(shape1))
+ x1 = np.arange(n).reshape(shape1)
+ if transposed:
+ x0 = x0.T
+ x1 = x1.T
+ if flipped:
+ x0 = x0[::-1]
+ x1 = x1[::-1]
+ # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the
+ # result should be exactly the same as the broadcasted view of x1.
+ y = x0 + x1
+ b0, b1 = broadcast_arrays(x0, x1)
+ assert_array_equal(y, b1)
+
+
+def test_same():
+ x = np.arange(10)
+ y = np.arange(10)
+ bx, by = broadcast_arrays(x, y)
+ assert_array_equal(x, bx)
+ assert_array_equal(y, by)
+
+def test_broadcast_kwargs():
+ # ensure that a TypeError is appropriately raised when
+ # np.broadcast_arrays() is called with any keyword
+ # argument other than 'subok'
+ x = np.arange(10)
+ y = np.arange(10)
+
+ with assert_raises_regex(TypeError, 'got an unexpected keyword'):
+ broadcast_arrays(x, y, dtype='float64')
+
+
+def test_one_off():
+ x = np.array([[1, 2, 3]])
+ y = np.array([[1], [2], [3]])
+ bx, by = broadcast_arrays(x, y)
+ bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
+ by0 = bx0.T
+ assert_array_equal(bx0, bx)
+ assert_array_equal(by0, by)
+
+
+def test_same_input_shapes():
+ # Check that the final shape is just the input shape.
+
+ data = [
+ (),
+ (1,),
+ (3,),
+ (0, 1),
+ (0, 3),
+ (1, 0),
+ (3, 0),
+ (1, 3),
+ (3, 1),
+ (3, 3),
+ ]
+ for shape in data:
+ input_shapes = [shape]
+ # Single input.
+ assert_shapes_correct(input_shapes, shape)
+ # Double input.
+ input_shapes2 = [shape, shape]
+ assert_shapes_correct(input_shapes2, shape)
+ # Triple input.
+ input_shapes3 = [shape, shape, shape]
+ assert_shapes_correct(input_shapes3, shape)
+
+
+def test_two_compatible_by_ones_input_shapes():
+ # Check that two different input shapes of the same length, but some have
+ # ones, broadcast to the correct shape.
+
+ data = [
+ [[(1,), (3,)], (3,)],
+ [[(1, 3), (3, 3)], (3, 3)],
+ [[(3, 1), (3, 3)], (3, 3)],
+ [[(1, 3), (3, 1)], (3, 3)],
+ [[(1, 1), (3, 3)], (3, 3)],
+ [[(1, 1), (1, 3)], (1, 3)],
+ [[(1, 1), (3, 1)], (3, 1)],
+ [[(1, 0), (0, 0)], (0, 0)],
+ [[(0, 1), (0, 0)], (0, 0)],
+ [[(1, 0), (0, 1)], (0, 0)],
+ [[(1, 1), (0, 0)], (0, 0)],
+ [[(1, 1), (1, 0)], (1, 0)],
+ [[(1, 1), (0, 1)], (0, 1)],
+ ]
+ for input_shapes, expected_shape in data:
+ assert_shapes_correct(input_shapes, expected_shape)
+ # Reverse the input shapes since broadcasting should be symmetric.
+ assert_shapes_correct(input_shapes[::-1], expected_shape)
+
+
+def test_two_compatible_by_prepending_ones_input_shapes():
+ # Check that two different input shapes (of different lengths) broadcast
+ # to the correct shape.
+
+ data = [
+ [[(), (3,)], (3,)],
+ [[(3,), (3, 3)], (3, 3)],
+ [[(3,), (3, 1)], (3, 3)],
+ [[(1,), (3, 3)], (3, 3)],
+ [[(), (3, 3)], (3, 3)],
+ [[(1, 1), (3,)], (1, 3)],
+ [[(1,), (3, 1)], (3, 1)],
+ [[(1,), (1, 3)], (1, 3)],
+ [[(), (1, 3)], (1, 3)],
+ [[(), (3, 1)], (3, 1)],
+ [[(), (0,)], (0,)],
+ [[(0,), (0, 0)], (0, 0)],
+ [[(0,), (0, 1)], (0, 0)],
+ [[(1,), (0, 0)], (0, 0)],
+ [[(), (0, 0)], (0, 0)],
+ [[(1, 1), (0,)], (1, 0)],
+ [[(1,), (0, 1)], (0, 1)],
+ [[(1,), (1, 0)], (1, 0)],
+ [[(), (1, 0)], (1, 0)],
+ [[(), (0, 1)], (0, 1)],
+ ]
+ for input_shapes, expected_shape in data:
+ assert_shapes_correct(input_shapes, expected_shape)
+ # Reverse the input shapes since broadcasting should be symmetric.
+ assert_shapes_correct(input_shapes[::-1], expected_shape)
+
+
+def test_incompatible_shapes_raise_valueerror():
+ # Check that a ValueError is raised for incompatible shapes.
+
+ data = [
+ [(3,), (4,)],
+ [(2, 3), (2,)],
+ [(3,), (3,), (4,)],
+ [(1, 3, 4), (2, 3, 3)],
+ ]
+ for input_shapes in data:
+ assert_incompatible_shapes_raise(input_shapes)
+ # Reverse the input shapes since broadcasting should be symmetric.
+ assert_incompatible_shapes_raise(input_shapes[::-1])
+
+
+def test_same_as_ufunc():
+ # Check that the data layout is the same as if a ufunc did the operation.
+
+ data = [
+ [[(1,), (3,)], (3,)],
+ [[(1, 3), (3, 3)], (3, 3)],
+ [[(3, 1), (3, 3)], (3, 3)],
+ [[(1, 3), (3, 1)], (3, 3)],
+ [[(1, 1), (3, 3)], (3, 3)],
+ [[(1, 1), (1, 3)], (1, 3)],
+ [[(1, 1), (3, 1)], (3, 1)],
+ [[(1, 0), (0, 0)], (0, 0)],
+ [[(0, 1), (0, 0)], (0, 0)],
+ [[(1, 0), (0, 1)], (0, 0)],
+ [[(1, 1), (0, 0)], (0, 0)],
+ [[(1, 1), (1, 0)], (1, 0)],
+ [[(1, 1), (0, 1)], (0, 1)],
+ [[(), (3,)], (3,)],
+ [[(3,), (3, 3)], (3, 3)],
+ [[(3,), (3, 1)], (3, 3)],
+ [[(1,), (3, 3)], (3, 3)],
+ [[(), (3, 3)], (3, 3)],
+ [[(1, 1), (3,)], (1, 3)],
+ [[(1,), (3, 1)], (3, 1)],
+ [[(1,), (1, 3)], (1, 3)],
+ [[(), (1, 3)], (1, 3)],
+ [[(), (3, 1)], (3, 1)],
+ [[(), (0,)], (0,)],
+ [[(0,), (0, 0)], (0, 0)],
+ [[(0,), (0, 1)], (0, 0)],
+ [[(1,), (0, 0)], (0, 0)],
+ [[(), (0, 0)], (0, 0)],
+ [[(1, 1), (0,)], (1, 0)],
+ [[(1,), (0, 1)], (0, 1)],
+ [[(1,), (1, 0)], (1, 0)],
+ [[(), (1, 0)], (1, 0)],
+ [[(), (0, 1)], (0, 1)],
+ ]
+ for input_shapes, expected_shape in data:
+ assert_same_as_ufunc(input_shapes[0], input_shapes[1],
+ "Shapes: %s %s" % (input_shapes[0], input_shapes[1]))
+ # Reverse the input shapes since broadcasting should be symmetric.
+ assert_same_as_ufunc(input_shapes[1], input_shapes[0])
+ # Try them transposed, too.
+ assert_same_as_ufunc(input_shapes[0], input_shapes[1], True)
+ # ... and flipped for non-rank-0 inputs in order to test negative
+ # strides.
+ if () not in input_shapes:
+ assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True)
+ assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True)
+
+
+def test_broadcast_to_succeeds():
+ data = [
+ [np.array(0), (0,), np.array(0)],
+ [np.array(0), (1,), np.zeros(1)],
+ [np.array(0), (3,), np.zeros(3)],
+ [np.ones(1), (1,), np.ones(1)],
+ [np.ones(1), (2,), np.ones(2)],
+ [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))],
+ [np.arange(3), (3,), np.arange(3)],
+ [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)],
+ [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])],
+ # test if shape is not a tuple
+ [np.ones(0), 0, np.ones(0)],
+ [np.ones(1), 1, np.ones(1)],
+ [np.ones(1), 2, np.ones(2)],
+ # these cases with size 0 are strange, but they reproduce the behavior
+ # of broadcasting with ufuncs (see test_same_as_ufunc above)
+ [np.ones(1), (0,), np.ones(0)],
+ [np.ones((1, 2)), (0, 2), np.ones((0, 2))],
+ [np.ones((2, 1)), (2, 0), np.ones((2, 0))],
+ ]
+ for input_array, shape, expected in data:
+ actual = broadcast_to(input_array, shape)
+ assert_array_equal(expected, actual)
+
+
+def test_broadcast_to_raises():
+ data = [
+ [(0,), ()],
+ [(1,), ()],
+ [(3,), ()],
+ [(3,), (1,)],
+ [(3,), (2,)],
+ [(3,), (4,)],
+ [(1, 2), (2, 1)],
+ [(1, 1), (1,)],
+ [(1,), -1],
+ [(1,), (-1,)],
+ [(1, 2), (-1, 2)],
+ ]
+ for orig_shape, target_shape in data:
+ arr = np.zeros(orig_shape)
+ assert_raises(ValueError, lambda: broadcast_to(arr, target_shape))
+
+
+def test_broadcast_shape():
+ # tests internal _broadcast_shape
+ # _broadcast_shape is already exercised indirectly by broadcast_arrays
+ # _broadcast_shape is also exercised by the public broadcast_shapes function
+ assert_equal(_broadcast_shape(), ())
+ assert_equal(_broadcast_shape([1, 2]), (2,))
+ assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1))
+ assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4))
+ assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2))
+ assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2))
+
+ # regression tests for gh-5862
+ assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,))
+ bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32
+ assert_raises(ValueError, lambda: _broadcast_shape(*bad_args))
+
+
+def test_broadcast_shapes_succeeds():
+ # tests public broadcast_shapes
+ data = [
+ [[], ()],
+ [[()], ()],
+ [[(7,)], (7,)],
+ [[(1, 2), (2,)], (1, 2)],
+ [[(1, 1)], (1, 1)],
+ [[(1, 1), (3, 4)], (3, 4)],
+ [[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],
+ [[(5, 6, 1)], (5, 6, 1)],
+ [[(1, 3), (3, 1)], (3, 3)],
+ [[(1, 0), (0, 0)], (0, 0)],
+ [[(0, 1), (0, 0)], (0, 0)],
+ [[(1, 0), (0, 1)], (0, 0)],
+ [[(1, 1), (0, 0)], (0, 0)],
+ [[(1, 1), (1, 0)], (1, 0)],
+ [[(1, 1), (0, 1)], (0, 1)],
+ [[(), (0,)], (0,)],
+ [[(0,), (0, 0)], (0, 0)],
+ [[(0,), (0, 1)], (0, 0)],
+ [[(1,), (0, 0)], (0, 0)],
+ [[(), (0, 0)], (0, 0)],
+ [[(1, 1), (0,)], (1, 0)],
+ [[(1,), (0, 1)], (0, 1)],
+ [[(1,), (1, 0)], (1, 0)],
+ [[(), (1, 0)], (1, 0)],
+ [[(), (0, 1)], (0, 1)],
+ [[(1,), (3,)], (3,)],
+ [[2, (3, 2)], (3, 2)],
+ ]
+ for input_shapes, target_shape in data:
+ assert_equal(broadcast_shapes(*input_shapes), target_shape)
+
+ assert_equal(broadcast_shapes(*([(1, 2)] * 32)), (1, 2))
+ assert_equal(broadcast_shapes(*([(1, 2)] * 100)), (1, 2))
+
+ # regression tests for gh-5862
+ assert_equal(broadcast_shapes(*([(2,)] * 32)), (2,))
+
+
+def test_broadcast_shapes_raises():
+ # tests public broadcast_shapes
+ data = [
+ [(3,), (4,)],
+ [(2, 3), (2,)],
+ [(3,), (3,), (4,)],
+ [(1, 3, 4), (2, 3, 3)],
+ [(1, 2), (3,1), (3,2), (10, 5)],
+ [2, (2, 3)],
+ ]
+ for input_shapes in data:
+ assert_raises(ValueError, lambda: broadcast_shapes(*input_shapes))
+
+ bad_args = [(2,)] * 32 + [(3,)] * 32
+ assert_raises(ValueError, lambda: broadcast_shapes(*bad_args))
+
+
+def test_as_strided():
+ a = np.array([None])
+ a_view = as_strided(a)
+ expected = np.array([None])
+ assert_array_equal(a_view, np.array([None]))
+
+ a = np.array([1, 2, 3, 4])
+ a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
+ expected = np.array([1, 3])
+ assert_array_equal(a_view, expected)
+
+ a = np.array([1, 2, 3, 4])
+ a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize))
+ expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
+ assert_array_equal(a_view, expected)
+
+ # Regression test for gh-5081
+ dt = np.dtype([('num', 'i4'), ('obj', 'O')])
+ a = np.empty((4,), dtype=dt)
+ a['num'] = np.arange(1, 5)
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ expected_num = [[1, 2, 3, 4]] * 3
+ expected_obj = [[None]*4]*3
+ assert_equal(a_view.dtype, dt)
+ assert_array_equal(expected_num, a_view['num'])
+ assert_array_equal(expected_obj, a_view['obj'])
+
+ # Make sure that void types without fields are kept unchanged
+ a = np.empty((4,), dtype='V4')
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ assert_equal(a.dtype, a_view.dtype)
+
+ # Make sure that the only type that could fail is properly handled
+ dt = np.dtype({'names': [''], 'formats': ['V4']})
+ a = np.empty((4,), dtype=dt)
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ assert_equal(a.dtype, a_view.dtype)
+
+ # Custom dtypes should not be lost (gh-9161)
+ r = [rational(i) for i in range(4)]
+ a = np.array(r, dtype=rational)
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ assert_equal(a.dtype, a_view.dtype)
+ assert_array_equal([r] * 3, a_view)
+
+
+class TestSlidingWindowView:
+ def test_1d(self):
+ arr = np.arange(5)
+ arr_view = sliding_window_view(arr, 2)
+ expected = np.array([[0, 1],
+ [1, 2],
+ [2, 3],
+ [3, 4]])
+ assert_array_equal(arr_view, expected)
+
+ def test_2d(self):
+ i, j = np.ogrid[:3, :4]
+ arr = 10*i + j
+ shape = (2, 2)
+ arr_view = sliding_window_view(arr, shape)
+ expected = np.array([[[[0, 1], [10, 11]],
+ [[1, 2], [11, 12]],
+ [[2, 3], [12, 13]]],
+ [[[10, 11], [20, 21]],
+ [[11, 12], [21, 22]],
+ [[12, 13], [22, 23]]]])
+ assert_array_equal(arr_view, expected)
+
+ def test_2d_with_axis(self):
+ i, j = np.ogrid[:3, :4]
+ arr = 10*i + j
+ arr_view = sliding_window_view(arr, 3, 0)
+ expected = np.array([[[0, 10, 20],
+ [1, 11, 21],
+ [2, 12, 22],
+ [3, 13, 23]]])
+ assert_array_equal(arr_view, expected)
+
+ def test_2d_repeated_axis(self):
+ i, j = np.ogrid[:3, :4]
+ arr = 10*i + j
+ arr_view = sliding_window_view(arr, (2, 3), (1, 1))
+ expected = np.array([[[[0, 1, 2],
+ [1, 2, 3]]],
+ [[[10, 11, 12],
+ [11, 12, 13]]],
+ [[[20, 21, 22],
+ [21, 22, 23]]]])
+ assert_array_equal(arr_view, expected)
+
+ def test_2d_without_axis(self):
+ i, j = np.ogrid[:4, :4]
+ arr = 10*i + j
+ shape = (2, 3)
+ arr_view = sliding_window_view(arr, shape)
+ expected = np.array([[[[0, 1, 2], [10, 11, 12]],
+ [[1, 2, 3], [11, 12, 13]]],
+ [[[10, 11, 12], [20, 21, 22]],
+ [[11, 12, 13], [21, 22, 23]]],
+ [[[20, 21, 22], [30, 31, 32]],
+ [[21, 22, 23], [31, 32, 33]]]])
+ assert_array_equal(arr_view, expected)
+
+ def test_errors(self):
+ i, j = np.ogrid[:4, :4]
+ arr = 10*i + j
+ with pytest.raises(ValueError, match='cannot contain negative values'):
+ sliding_window_view(arr, (-1, 3))
+ with pytest.raises(
+ ValueError,
+ match='must provide window_shape for all dimensions of `x`'):
+ sliding_window_view(arr, (1,))
+ with pytest.raises(
+ ValueError,
+ match='Must provide matching length window_shape and axis'):
+ sliding_window_view(arr, (1, 3, 4), axis=(0, 1))
+ with pytest.raises(
+ ValueError,
+ match='window shape cannot be larger than input array'):
+ sliding_window_view(arr, (5, 5))
+
+ def test_writeable(self):
+ arr = np.arange(5)
+ view = sliding_window_view(arr, 2, writeable=False)
+ assert_(not view.flags.writeable)
+ with pytest.raises(
+ ValueError,
+ match='assignment destination is read-only'):
+ view[0, 0] = 3
+ view = sliding_window_view(arr, 2, writeable=True)
+ assert_(view.flags.writeable)
+ view[0, 1] = 3
+ assert_array_equal(arr, np.array([0, 3, 2, 3, 4]))
+
+ def test_subok(self):
+ class MyArray(np.ndarray):
+ pass
+
+ arr = np.arange(5).view(MyArray)
+ assert_(not isinstance(sliding_window_view(arr, 2,
+ subok=False),
+ MyArray))
+ assert_(isinstance(sliding_window_view(arr, 2, subok=True), MyArray))
+ # Default behavior
+ assert_(not isinstance(sliding_window_view(arr, 2), MyArray))
+
+
+def as_strided_writeable():
+ arr = np.ones(10)
+ view = as_strided(arr, writeable=False)
+ assert_(not view.flags.writeable)
+
+ # Check that writeable also is fine:
+ view = as_strided(arr, writeable=True)
+ assert_(view.flags.writeable)
+ view[...] = 3
+ assert_array_equal(arr, np.full_like(arr, 3))
+
+ # Test that things do not break down for readonly:
+ arr.flags.writeable = False
+ view = as_strided(arr, writeable=False)
+ view = as_strided(arr, writeable=True)
+ assert_(not view.flags.writeable)
+
+
+class VerySimpleSubClass(np.ndarray):
+ def __new__(cls, *args, **kwargs):
+ return np.array(*args, subok=True, **kwargs).view(cls)
+
+
+class SimpleSubClass(VerySimpleSubClass):
+ def __new__(cls, *args, **kwargs):
+ self = np.array(*args, subok=True, **kwargs).view(cls)
+ self.info = 'simple'
+ return self
+
+ def __array_finalize__(self, obj):
+ self.info = getattr(obj, 'info', '') + ' finalized'
+
+
+def test_subclasses():
+ # test that subclass is preserved only if subok=True
+ a = VerySimpleSubClass([1, 2, 3, 4])
+ assert_(type(a) is VerySimpleSubClass)
+ a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
+ assert_(type(a_view) is np.ndarray)
+ a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
+ assert_(type(a_view) is VerySimpleSubClass)
+ # test that if a subclass has __array_finalize__, it is used
+ a = SimpleSubClass([1, 2, 3, 4])
+ a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
+ assert_(type(a_view) is SimpleSubClass)
+ assert_(a_view.info == 'simple finalized')
+
+ # similar tests for broadcast_arrays
+ b = np.arange(len(a)).reshape(-1, 1)
+ a_view, b_view = broadcast_arrays(a, b)
+ assert_(type(a_view) is np.ndarray)
+ assert_(type(b_view) is np.ndarray)
+ assert_(a_view.shape == b_view.shape)
+ a_view, b_view = broadcast_arrays(a, b, subok=True)
+ assert_(type(a_view) is SimpleSubClass)
+ assert_(a_view.info == 'simple finalized')
+ assert_(type(b_view) is np.ndarray)
+ assert_(a_view.shape == b_view.shape)
+
+ # and for broadcast_to
+ shape = (2, 4)
+ a_view = broadcast_to(a, shape)
+ assert_(type(a_view) is np.ndarray)
+ assert_(a_view.shape == shape)
+ a_view = broadcast_to(a, shape, subok=True)
+ assert_(type(a_view) is SimpleSubClass)
+ assert_(a_view.info == 'simple finalized')
+ assert_(a_view.shape == shape)
+
+
+def test_writeable():
+ # broadcast_to should return a readonly array
+ original = np.array([1, 2, 3])
+ result = broadcast_to(original, (2, 3))
+ assert_equal(result.flags.writeable, False)
+ assert_raises(ValueError, result.__setitem__, slice(None), 0)
+
+ # but the result of broadcast_arrays needs to be writeable, to
+ # preserve backwards compatibility
+ for is_broadcast, results in [(False, broadcast_arrays(original,)),
+ (True, broadcast_arrays(0, original))]:
+ for result in results:
+ # This will change to False in a future version
+ if is_broadcast:
+ with assert_warns(FutureWarning):
+ assert_equal(result.flags.writeable, True)
+ with assert_warns(DeprecationWarning):
+ result[:] = 0
+ # Warning not emitted, writing to the array resets it
+ assert_equal(result.flags.writeable, True)
+ else:
+ # No warning:
+ assert_equal(result.flags.writeable, True)
+
+ for results in [broadcast_arrays(original),
+ broadcast_arrays(0, original)]:
+ for result in results:
+ # resets the warn_on_write DeprecationWarning
+ result.flags.writeable = True
+ # check: no warning emitted
+ assert_equal(result.flags.writeable, True)
+ result[:] = 0
+
+ # keep readonly input readonly
+ original.flags.writeable = False
+ _, result = broadcast_arrays(0, original)
+ assert_equal(result.flags.writeable, False)
+
+ # regression test for GH6491
+ shape = (2,)
+ strides = [0]
+ tricky_array = as_strided(np.array(0), shape, strides)
+ other = np.zeros((1,))
+ first, second = broadcast_arrays(tricky_array, other)
+ assert_(first.shape == second.shape)
+
+
+def test_writeable_memoryview():
+ # The result of broadcast_arrays exports as a non-writeable memoryview
+ # because otherwise there is no good way to opt in to the new behaviour
+ # (i.e. you would need to set writeable to False explicitly).
+ # See gh-13929.
+ original = np.array([1, 2, 3])
+
+ for is_broadcast, results in [(False, broadcast_arrays(original,)),
+ (True, broadcast_arrays(0, original))]:
+ for result in results:
+ # This will change to False in a future version
+ if is_broadcast:
+ # memoryview(result, writable=True) will give warning but cannot
+ # be tested using the python API.
+ assert memoryview(result).readonly
+ else:
+ assert not memoryview(result).readonly
+
+
+def test_reference_types():
+ input_array = np.array('a', dtype=object)
+ expected = np.array(['a'] * 3, dtype=object)
+ actual = broadcast_to(input_array, (3,))
+ assert_array_equal(expected, actual)
+
+ actual, _ = broadcast_arrays(input_array, np.ones(3))
+ assert_array_equal(expected, actual)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_twodim_base.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_twodim_base.py
new file mode 100644
index 00000000..141f508f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_twodim_base.py
@@ -0,0 +1,548 @@
+"""Test functions for matrix module
+
+"""
+from numpy.testing import (
+ assert_equal, assert_array_equal, assert_array_max_ulp,
+ assert_array_almost_equal, assert_raises, assert_
+ )
+
+from numpy import (
+ arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d,
+ tri, mask_indices, triu_indices, triu_indices_from, tril_indices,
+ tril_indices_from, vander,
+ )
+
+import numpy as np
+
+
+from numpy.core.tests.test_overrides import requires_array_function
+
+
+import pytest
+
+
+def get_mat(n):
+ data = arange(n)
+ data = add.outer(data, data)
+ return data
+
+
+class TestEye:
+ def test_basic(self):
+ assert_equal(eye(4),
+ array([[1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 1, 0],
+ [0, 0, 0, 1]]))
+
+ assert_equal(eye(4, dtype='f'),
+ array([[1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 1, 0],
+ [0, 0, 0, 1]], 'f'))
+
+ assert_equal(eye(3) == 1,
+ eye(3, dtype=bool))
+
+ def test_uint64(self):
+ # Regression test for gh-9982
+ assert_equal(eye(np.uint64(2), dtype=int), array([[1, 0], [0, 1]]))
+ assert_equal(eye(np.uint64(2), M=np.uint64(4), k=np.uint64(1)),
+ array([[0, 1, 0, 0], [0, 0, 1, 0]]))
+
+ def test_diag(self):
+ assert_equal(eye(4, k=1),
+ array([[0, 1, 0, 0],
+ [0, 0, 1, 0],
+ [0, 0, 0, 1],
+ [0, 0, 0, 0]]))
+
+ assert_equal(eye(4, k=-1),
+ array([[0, 0, 0, 0],
+ [1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 1, 0]]))
+
+ def test_2d(self):
+ assert_equal(eye(4, 3),
+ array([[1, 0, 0],
+ [0, 1, 0],
+ [0, 0, 1],
+ [0, 0, 0]]))
+
+ assert_equal(eye(3, 4),
+ array([[1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 1, 0]]))
+
+ def test_diag2d(self):
+ assert_equal(eye(3, 4, k=2),
+ array([[0, 0, 1, 0],
+ [0, 0, 0, 1],
+ [0, 0, 0, 0]]))
+
+ assert_equal(eye(4, 3, k=-2),
+ array([[0, 0, 0],
+ [0, 0, 0],
+ [1, 0, 0],
+ [0, 1, 0]]))
+
+ def test_eye_bounds(self):
+ assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]])
+ assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]])
+ assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]])
+ assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]])
+ assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]])
+ assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]])
+ assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]])
+ assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]])
+ assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]])
+
+ def test_strings(self):
+ assert_equal(eye(2, 2, dtype='S3'),
+ [[b'1', b''], [b'', b'1']])
+
+ def test_bool(self):
+ assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
+
+ def test_order(self):
+ mat_c = eye(4, 3, k=-1)
+ mat_f = eye(4, 3, k=-1, order='F')
+ assert_equal(mat_c, mat_f)
+ assert mat_c.flags.c_contiguous
+ assert not mat_c.flags.f_contiguous
+ assert not mat_f.flags.c_contiguous
+ assert mat_f.flags.f_contiguous
+
+
+class TestDiag:
+ def test_vector(self):
+ vals = (100 * arange(5)).astype('l')
+ b = zeros((5, 5))
+ for k in range(5):
+ b[k, k] = vals[k]
+ assert_equal(diag(vals), b)
+ b = zeros((7, 7))
+ c = b.copy()
+ for k in range(5):
+ b[k, k + 2] = vals[k]
+ c[k + 2, k] = vals[k]
+ assert_equal(diag(vals, k=2), b)
+ assert_equal(diag(vals, k=-2), c)
+
+ def test_matrix(self, vals=None):
+ if vals is None:
+ vals = (100 * get_mat(5) + 1).astype('l')
+ b = zeros((5,))
+ for k in range(5):
+ b[k] = vals[k, k]
+ assert_equal(diag(vals), b)
+ b = b * 0
+ for k in range(3):
+ b[k] = vals[k, k + 2]
+ assert_equal(diag(vals, 2), b[:3])
+ for k in range(3):
+ b[k] = vals[k + 2, k]
+ assert_equal(diag(vals, -2), b[:3])
+
+ def test_fortran_order(self):
+ vals = array((100 * get_mat(5) + 1), order='F', dtype='l')
+ self.test_matrix(vals)
+
+ def test_diag_bounds(self):
+ A = [[1, 2], [3, 4], [5, 6]]
+ assert_equal(diag(A, k=2), [])
+ assert_equal(diag(A, k=1), [2])
+ assert_equal(diag(A, k=0), [1, 4])
+ assert_equal(diag(A, k=-1), [3, 6])
+ assert_equal(diag(A, k=-2), [5])
+ assert_equal(diag(A, k=-3), [])
+
+ def test_failure(self):
+ assert_raises(ValueError, diag, [[[1]]])
+
+
+class TestFliplr:
+ def test_basic(self):
+ assert_raises(ValueError, fliplr, ones(4))
+ a = get_mat(4)
+ b = a[:, ::-1]
+ assert_equal(fliplr(a), b)
+ a = [[0, 1, 2],
+ [3, 4, 5]]
+ b = [[2, 1, 0],
+ [5, 4, 3]]
+ assert_equal(fliplr(a), b)
+
+
+class TestFlipud:
+ def test_basic(self):
+ a = get_mat(4)
+ b = a[::-1, :]
+ assert_equal(flipud(a), b)
+ a = [[0, 1, 2],
+ [3, 4, 5]]
+ b = [[3, 4, 5],
+ [0, 1, 2]]
+ assert_equal(flipud(a), b)
+
+
+class TestHistogram2d:
+ def test_simple(self):
+ x = array(
+ [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])
+ y = array(
+ [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673])
+ xedges = np.linspace(0, 1, 10)
+ yedges = np.linspace(0, 1, 10)
+ H = histogram2d(x, y, (xedges, yedges))[0]
+ answer = array(
+ [[0, 0, 0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 1, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 0, 1, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0]])
+ assert_array_equal(H.T, answer)
+ H = histogram2d(x, y, xedges)[0]
+ assert_array_equal(H.T, answer)
+ H, xedges, yedges = histogram2d(list(range(10)), list(range(10)))
+ assert_array_equal(H, eye(10, 10))
+ assert_array_equal(xedges, np.linspace(0, 9, 11))
+ assert_array_equal(yedges, np.linspace(0, 9, 11))
+
+ def test_asym(self):
+ x = array([1, 1, 2, 3, 4, 4, 4, 5])
+ y = array([1, 3, 2, 0, 1, 2, 3, 4])
+ H, xed, yed = histogram2d(
+ x, y, (6, 5), range=[[0, 6], [0, 5]], density=True)
+ answer = array(
+ [[0., 0, 0, 0, 0],
+ [0, 1, 0, 1, 0],
+ [0, 0, 1, 0, 0],
+ [1, 0, 0, 0, 0],
+ [0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 1]])
+ assert_array_almost_equal(H, answer/8., 3)
+ assert_array_equal(xed, np.linspace(0, 6, 7))
+ assert_array_equal(yed, np.linspace(0, 5, 6))
+
+ def test_density(self):
+ x = array([1, 2, 3, 1, 2, 3, 1, 2, 3])
+ y = array([1, 1, 1, 2, 2, 2, 3, 3, 3])
+ H, xed, yed = histogram2d(
+ x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True)
+ answer = array([[1, 1, .5],
+ [1, 1, .5],
+ [.5, .5, .25]])/9.
+ assert_array_almost_equal(H, answer, 3)
+
+ def test_all_outliers(self):
+ r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6
+ H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1]))
+ assert_array_equal(H, 0)
+
+ def test_empty(self):
+ a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1]))
+ assert_array_max_ulp(a, array([[0.]]))
+
+ a, edge1, edge2 = histogram2d([], [], bins=4)
+ assert_array_max_ulp(a, np.zeros((4, 4)))
+
+ def test_binparameter_combination(self):
+ x = array(
+ [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
+ 0.59944483, 1])
+ y = array(
+ [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
+ 0.15886423, 1])
+ edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
+ H, xe, ye = histogram2d(x, y, (edges, 4))
+ answer = array(
+ [[2., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [1., 0., 0., 0.],
+ [0., 1., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 1.]])
+ assert_array_equal(H, answer)
+ assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1]))
+ H, xe, ye = histogram2d(x, y, (4, edges))
+ answer = array(
+ [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
+ assert_array_equal(H, answer)
+ assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
+
+ @requires_array_function
+ def test_dispatch(self):
+ class ShouldDispatch:
+ def __array_function__(self, function, types, args, kwargs):
+ return types, args, kwargs
+
+ xy = [1, 2]
+ s_d = ShouldDispatch()
+ r = histogram2d(s_d, xy)
+ # Cannot use assert_equal since that dispatches...
+ assert_(r == ((ShouldDispatch,), (s_d, xy), {}))
+ r = histogram2d(xy, s_d)
+ assert_(r == ((ShouldDispatch,), (xy, s_d), {}))
+ r = histogram2d(xy, xy, bins=s_d)
+ assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=s_d)))
+ r = histogram2d(xy, xy, bins=[s_d, 5])
+ assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=[s_d, 5])))
+ assert_raises(Exception, histogram2d, xy, xy, bins=[s_d])
+ r = histogram2d(xy, xy, weights=s_d)
+ assert_(r, ((ShouldDispatch,), (xy, xy), dict(weights=s_d)))
+
+ @pytest.mark.parametrize(("x_len", "y_len"), [(10, 11), (20, 19)])
+ def test_bad_length(self, x_len, y_len):
+ x, y = np.ones(x_len), np.ones(y_len)
+ with pytest.raises(ValueError,
+ match='x and y must have the same length.'):
+ histogram2d(x, y)
+
+
+class TestTri:
+ def test_dtype(self):
+ out = array([[1, 0, 0],
+ [1, 1, 0],
+ [1, 1, 1]])
+ assert_array_equal(tri(3), out)
+ assert_array_equal(tri(3, dtype=bool), out.astype(bool))
+
+
+def test_tril_triu_ndim2():
+ for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:
+ a = np.ones((2, 2), dtype=dtype)
+ b = np.tril(a)
+ c = np.triu(a)
+ assert_array_equal(b, [[1, 0], [1, 1]])
+ assert_array_equal(c, b.T)
+ # should return the same dtype as the original array
+ assert_equal(b.dtype, a.dtype)
+ assert_equal(c.dtype, a.dtype)
+
+
+def test_tril_triu_ndim3():
+ for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:
+ a = np.array([
+ [[1, 1], [1, 1]],
+ [[1, 1], [1, 0]],
+ [[1, 1], [0, 0]],
+ ], dtype=dtype)
+ a_tril_desired = np.array([
+ [[1, 0], [1, 1]],
+ [[1, 0], [1, 0]],
+ [[1, 0], [0, 0]],
+ ], dtype=dtype)
+ a_triu_desired = np.array([
+ [[1, 1], [0, 1]],
+ [[1, 1], [0, 0]],
+ [[1, 1], [0, 0]],
+ ], dtype=dtype)
+ a_triu_observed = np.triu(a)
+ a_tril_observed = np.tril(a)
+ assert_array_equal(a_triu_observed, a_triu_desired)
+ assert_array_equal(a_tril_observed, a_tril_desired)
+ assert_equal(a_triu_observed.dtype, a.dtype)
+ assert_equal(a_tril_observed.dtype, a.dtype)
+
+
+def test_tril_triu_with_inf():
+ # Issue 4859
+ arr = np.array([[1, 1, np.inf],
+ [1, 1, 1],
+ [np.inf, 1, 1]])
+ out_tril = np.array([[1, 0, 0],
+ [1, 1, 0],
+ [np.inf, 1, 1]])
+ out_triu = out_tril.T
+ assert_array_equal(np.triu(arr), out_triu)
+ assert_array_equal(np.tril(arr), out_tril)
+
+
+def test_tril_triu_dtype():
+ # Issue 4916
+ # tril and triu should return the same dtype as input
+ for c in np.typecodes['All']:
+ if c == 'V':
+ continue
+ arr = np.zeros((3, 3), dtype=c)
+ assert_equal(np.triu(arr).dtype, arr.dtype)
+ assert_equal(np.tril(arr).dtype, arr.dtype)
+
+ # check special cases
+ arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'],
+ ['2004-01-01T12:00', '2003-01-03T13:45']],
+ dtype='datetime64')
+ assert_equal(np.triu(arr).dtype, arr.dtype)
+ assert_equal(np.tril(arr).dtype, arr.dtype)
+
+ arr = np.zeros((3, 3), dtype='f4,f4')
+ assert_equal(np.triu(arr).dtype, arr.dtype)
+ assert_equal(np.tril(arr).dtype, arr.dtype)
+
+
+def test_mask_indices():
+ # simple test without offset
+ iu = mask_indices(3, np.triu)
+ a = np.arange(9).reshape(3, 3)
+ assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8]))
+ # Now with an offset
+ iu1 = mask_indices(3, np.triu, 1)
+ assert_array_equal(a[iu1], array([1, 2, 5]))
+
+
+def test_tril_indices():
+ # indices without and with offset
+ il1 = tril_indices(4)
+ il2 = tril_indices(4, k=2)
+ il3 = tril_indices(4, m=5)
+ il4 = tril_indices(4, k=2, m=5)
+
+ a = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16]])
+ b = np.arange(1, 21).reshape(4, 5)
+
+ # indexing:
+ assert_array_equal(a[il1],
+ array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
+ assert_array_equal(b[il3],
+ array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
+
+ # And for assigning values:
+ a[il1] = -1
+ assert_array_equal(a,
+ array([[-1, 2, 3, 4],
+ [-1, -1, 7, 8],
+ [-1, -1, -1, 12],
+ [-1, -1, -1, -1]]))
+ b[il3] = -1
+ assert_array_equal(b,
+ array([[-1, 2, 3, 4, 5],
+ [-1, -1, 8, 9, 10],
+ [-1, -1, -1, 14, 15],
+ [-1, -1, -1, -1, 20]]))
+ # These cover almost the whole array (two diagonals right of the main one):
+ a[il2] = -10
+ assert_array_equal(a,
+ array([[-10, -10, -10, 4],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10]]))
+ b[il4] = -10
+ assert_array_equal(b,
+ array([[-10, -10, -10, 4, 5],
+ [-10, -10, -10, -10, 10],
+ [-10, -10, -10, -10, -10],
+ [-10, -10, -10, -10, -10]]))
+
+
+class TestTriuIndices:
+ def test_triu_indices(self):
+ iu1 = triu_indices(4)
+ iu2 = triu_indices(4, k=2)
+ iu3 = triu_indices(4, m=5)
+ iu4 = triu_indices(4, k=2, m=5)
+
+ a = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16]])
+ b = np.arange(1, 21).reshape(4, 5)
+
+ # Both for indexing:
+ assert_array_equal(a[iu1],
+ array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
+ assert_array_equal(b[iu3],
+ array([1, 2, 3, 4, 5, 7, 8, 9,
+ 10, 13, 14, 15, 19, 20]))
+
+ # And for assigning values:
+ a[iu1] = -1
+ assert_array_equal(a,
+ array([[-1, -1, -1, -1],
+ [5, -1, -1, -1],
+ [9, 10, -1, -1],
+ [13, 14, 15, -1]]))
+ b[iu3] = -1
+ assert_array_equal(b,
+ array([[-1, -1, -1, -1, -1],
+ [6, -1, -1, -1, -1],
+ [11, 12, -1, -1, -1],
+ [16, 17, 18, -1, -1]]))
+
+ # These cover almost the whole array (two diagonals right of the
+ # main one):
+ a[iu2] = -10
+ assert_array_equal(a,
+ array([[-1, -1, -10, -10],
+ [5, -1, -1, -10],
+ [9, 10, -1, -1],
+ [13, 14, 15, -1]]))
+ b[iu4] = -10
+ assert_array_equal(b,
+ array([[-1, -1, -10, -10, -10],
+ [6, -1, -1, -10, -10],
+ [11, 12, -1, -1, -10],
+ [16, 17, 18, -1, -1]]))
+
+
+class TestTrilIndicesFrom:
+ def test_exceptions(self):
+ assert_raises(ValueError, tril_indices_from, np.ones((2,)))
+ assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2)))
+ # assert_raises(ValueError, tril_indices_from, np.ones((2, 3)))
+
+
+class TestTriuIndicesFrom:
+ def test_exceptions(self):
+ assert_raises(ValueError, triu_indices_from, np.ones((2,)))
+ assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2)))
+ # assert_raises(ValueError, triu_indices_from, np.ones((2, 3)))
+
+
+class TestVander:
+ def test_basic(self):
+ c = np.array([0, 1, -2, 3])
+ v = vander(c)
+ powers = np.array([[0, 0, 0, 0, 1],
+ [1, 1, 1, 1, 1],
+ [16, -8, 4, -2, 1],
+ [81, 27, 9, 3, 1]])
+ # Check default value of N:
+ assert_array_equal(v, powers[:, 1:])
+ # Check a range of N values, including 0 and 5 (greater than default)
+ m = powers.shape[1]
+ for n in range(6):
+ v = vander(c, N=n)
+ assert_array_equal(v, powers[:, m-n:m])
+
+ def test_dtypes(self):
+ c = array([11, -12, 13], dtype=np.int8)
+ v = vander(c)
+ expected = np.array([[121, 11, 1],
+ [144, -12, 1],
+ [169, 13, 1]])
+ assert_array_equal(v, expected)
+
+ c = array([1.0+1j, 1.0-1j])
+ v = vander(c, N=3)
+ expected = np.array([[2j, 1+1j, 1],
+ [-2j, 1-1j, 1]])
+ # The data is floating point, but the values are small integers,
+ # so assert_array_equal *should* be safe here (rather than, say,
+ # assert_array_almost_equal).
+ assert_array_equal(v, expected)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_type_check.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_type_check.py
new file mode 100644
index 00000000..3f4ca630
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_type_check.py
@@ -0,0 +1,478 @@
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_raises
+ )
+from numpy.lib.type_check import (
+ common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
+ nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close
+ )
+
+
+def assert_all(x):
+ assert_(np.all(x), x)
+
+
+class TestCommonType:
+ def test_basic(self):
+ ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
+ af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
+ af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)
+ af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)
+ acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle)
+ acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble)
+ assert_(common_type(ai32) == np.float64)
+ assert_(common_type(af16) == np.float16)
+ assert_(common_type(af32) == np.float32)
+ assert_(common_type(af64) == np.float64)
+ assert_(common_type(acs) == np.csingle)
+ assert_(common_type(acd) == np.cdouble)
+
+
+class TestMintypecode:
+
+ def test_default_1(self):
+ for itype in '1bcsuwil':
+ assert_equal(mintypecode(itype), 'd')
+ assert_equal(mintypecode('f'), 'f')
+ assert_equal(mintypecode('d'), 'd')
+ assert_equal(mintypecode('F'), 'F')
+ assert_equal(mintypecode('D'), 'D')
+
+ def test_default_2(self):
+ for itype in '1bcsuwil':
+ assert_equal(mintypecode(itype+'f'), 'f')
+ assert_equal(mintypecode(itype+'d'), 'd')
+ assert_equal(mintypecode(itype+'F'), 'F')
+ assert_equal(mintypecode(itype+'D'), 'D')
+ assert_equal(mintypecode('ff'), 'f')
+ assert_equal(mintypecode('fd'), 'd')
+ assert_equal(mintypecode('fF'), 'F')
+ assert_equal(mintypecode('fD'), 'D')
+ assert_equal(mintypecode('df'), 'd')
+ assert_equal(mintypecode('dd'), 'd')
+ #assert_equal(mintypecode('dF',savespace=1),'F')
+ assert_equal(mintypecode('dF'), 'D')
+ assert_equal(mintypecode('dD'), 'D')
+ assert_equal(mintypecode('Ff'), 'F')
+ #assert_equal(mintypecode('Fd',savespace=1),'F')
+ assert_equal(mintypecode('Fd'), 'D')
+ assert_equal(mintypecode('FF'), 'F')
+ assert_equal(mintypecode('FD'), 'D')
+ assert_equal(mintypecode('Df'), 'D')
+ assert_equal(mintypecode('Dd'), 'D')
+ assert_equal(mintypecode('DF'), 'D')
+ assert_equal(mintypecode('DD'), 'D')
+
+ def test_default_3(self):
+ assert_equal(mintypecode('fdF'), 'D')
+ #assert_equal(mintypecode('fdF',savespace=1),'F')
+ assert_equal(mintypecode('fdD'), 'D')
+ assert_equal(mintypecode('fFD'), 'D')
+ assert_equal(mintypecode('dFD'), 'D')
+
+ assert_equal(mintypecode('ifd'), 'd')
+ assert_equal(mintypecode('ifF'), 'F')
+ assert_equal(mintypecode('ifD'), 'D')
+ assert_equal(mintypecode('idF'), 'D')
+ #assert_equal(mintypecode('idF',savespace=1),'F')
+ assert_equal(mintypecode('idD'), 'D')
+
+
+class TestIsscalar:
+
+ def test_basic(self):
+ assert_(np.isscalar(3))
+ assert_(not np.isscalar([3]))
+ assert_(not np.isscalar((3,)))
+ assert_(np.isscalar(3j))
+ assert_(np.isscalar(4.0))
+
+
+class TestReal:
+
+ def test_real(self):
+ y = np.random.rand(10,)
+ assert_array_equal(y, np.real(y))
+
+ y = np.array(1)
+ out = np.real(y)
+ assert_array_equal(y, out)
+ assert_(isinstance(out, np.ndarray))
+
+ y = 1
+ out = np.real(y)
+ assert_equal(y, out)
+ assert_(not isinstance(out, np.ndarray))
+
+ def test_cmplx(self):
+ y = np.random.rand(10,)+1j*np.random.rand(10,)
+ assert_array_equal(y.real, np.real(y))
+
+ y = np.array(1 + 1j)
+ out = np.real(y)
+ assert_array_equal(y.real, out)
+ assert_(isinstance(out, np.ndarray))
+
+ y = 1 + 1j
+ out = np.real(y)
+ assert_equal(1.0, out)
+ assert_(not isinstance(out, np.ndarray))
+
+
+class TestImag:
+
+ def test_real(self):
+ y = np.random.rand(10,)
+ assert_array_equal(0, np.imag(y))
+
+ y = np.array(1)
+ out = np.imag(y)
+ assert_array_equal(0, out)
+ assert_(isinstance(out, np.ndarray))
+
+ y = 1
+ out = np.imag(y)
+ assert_equal(0, out)
+ assert_(not isinstance(out, np.ndarray))
+
+ def test_cmplx(self):
+ y = np.random.rand(10,)+1j*np.random.rand(10,)
+ assert_array_equal(y.imag, np.imag(y))
+
+ y = np.array(1 + 1j)
+ out = np.imag(y)
+ assert_array_equal(y.imag, out)
+ assert_(isinstance(out, np.ndarray))
+
+ y = 1 + 1j
+ out = np.imag(y)
+ assert_equal(1.0, out)
+ assert_(not isinstance(out, np.ndarray))
+
+
+class TestIscomplex:
+
+ def test_fail(self):
+ z = np.array([-1, 0, 1])
+ res = iscomplex(z)
+ assert_(not np.sometrue(res, axis=0))
+
+ def test_pass(self):
+ z = np.array([-1j, 1, 0])
+ res = iscomplex(z)
+ assert_array_equal(res, [1, 0, 0])
+
+
+class TestIsreal:
+
+ def test_pass(self):
+ z = np.array([-1, 0, 1j])
+ res = isreal(z)
+ assert_array_equal(res, [1, 1, 0])
+
+ def test_fail(self):
+ z = np.array([-1j, 1, 0])
+ res = isreal(z)
+ assert_array_equal(res, [0, 1, 1])
+
+
+class TestIscomplexobj:
+
+ def test_basic(self):
+ z = np.array([-1, 0, 1])
+ assert_(not iscomplexobj(z))
+ z = np.array([-1j, 0, -1])
+ assert_(iscomplexobj(z))
+
+ def test_scalar(self):
+ assert_(not iscomplexobj(1.0))
+ assert_(iscomplexobj(1+0j))
+
+ def test_list(self):
+ assert_(iscomplexobj([3, 1+0j, True]))
+ assert_(not iscomplexobj([3, 1, True]))
+
+ def test_duck(self):
+ class DummyComplexArray:
+ @property
+ def dtype(self):
+ return np.dtype(complex)
+ dummy = DummyComplexArray()
+ assert_(iscomplexobj(dummy))
+
+ def test_pandas_duck(self):
+ # This tests a custom np.dtype duck-typed class, such as used by pandas
+ # (pandas.core.dtypes)
+ class PdComplex(np.complex128):
+ pass
+ class PdDtype:
+ name = 'category'
+ names = None
+ type = PdComplex
+ kind = 'c'
+ str = '<c16'
+ base = np.dtype('complex128')
+ class DummyPd:
+ @property
+ def dtype(self):
+ return PdDtype
+ dummy = DummyPd()
+ assert_(iscomplexobj(dummy))
+
+ def test_custom_dtype_duck(self):
+ class MyArray(list):
+ @property
+ def dtype(self):
+ return complex
+
+ a = MyArray([1+0j, 2+0j, 3+0j])
+ assert_(iscomplexobj(a))
+
+
+class TestIsrealobj:
+ def test_basic(self):
+ z = np.array([-1, 0, 1])
+ assert_(isrealobj(z))
+ z = np.array([-1j, 0, -1])
+ assert_(not isrealobj(z))
+
+
+class TestIsnan:
+
+ def test_goodvalues(self):
+ z = np.array((-1., 0., 1.))
+ res = np.isnan(z) == 0
+ assert_all(np.all(res, axis=0))
+
+ def test_posinf(self):
+ with np.errstate(divide='ignore'):
+ assert_all(np.isnan(np.array((1.,))/0.) == 0)
+
+ def test_neginf(self):
+ with np.errstate(divide='ignore'):
+ assert_all(np.isnan(np.array((-1.,))/0.) == 0)
+
+ def test_ind(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isnan(np.array((0.,))/0.) == 1)
+
+ def test_integer(self):
+ assert_all(np.isnan(1) == 0)
+
+ def test_complex(self):
+ assert_all(np.isnan(1+1j) == 0)
+
+ def test_complex1(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isnan(np.array(0+0j)/0.) == 1)
+
+
+class TestIsfinite:
+ # Fixme, wrong place, isfinite now ufunc
+
+ def test_goodvalues(self):
+ z = np.array((-1., 0., 1.))
+ res = np.isfinite(z) == 1
+ assert_all(np.all(res, axis=0))
+
+ def test_posinf(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isfinite(np.array((1.,))/0.) == 0)
+
+ def test_neginf(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isfinite(np.array((-1.,))/0.) == 0)
+
+ def test_ind(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isfinite(np.array((0.,))/0.) == 0)
+
+ def test_integer(self):
+ assert_all(np.isfinite(1) == 1)
+
+ def test_complex(self):
+ assert_all(np.isfinite(1+1j) == 1)
+
+ def test_complex1(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
+
+
+class TestIsinf:
+ # Fixme, wrong place, isinf now ufunc
+
+ def test_goodvalues(self):
+ z = np.array((-1., 0., 1.))
+ res = np.isinf(z) == 0
+ assert_all(np.all(res, axis=0))
+
+ def test_posinf(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array((1.,))/0.) == 1)
+
+ def test_posinf_scalar(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array(1.,)/0.) == 1)
+
+ def test_neginf(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array((-1.,))/0.) == 1)
+
+ def test_neginf_scalar(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array(-1.)/0.) == 1)
+
+ def test_ind(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_all(np.isinf(np.array((0.,))/0.) == 0)
+
+
+class TestIsposinf:
+
+ def test_generic(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = isposinf(np.array((-1., 0, 1))/0.)
+ assert_(vals[0] == 0)
+ assert_(vals[1] == 0)
+ assert_(vals[2] == 1)
+
+
+class TestIsneginf:
+
+ def test_generic(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = isneginf(np.array((-1., 0, 1))/0.)
+ assert_(vals[0] == 1)
+ assert_(vals[1] == 0)
+ assert_(vals[2] == 0)
+
+
+class TestNanToNum:
+
+ def test_generic(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = nan_to_num(np.array((-1., 0, 1))/0.)
+ assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
+ assert_(vals[1] == 0)
+ assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
+ assert_equal(type(vals), np.ndarray)
+
+ # perform the same tests but with nan, posinf and neginf keywords
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = nan_to_num(np.array((-1., 0, 1))/0.,
+ nan=10, posinf=20, neginf=30)
+ assert_equal(vals, [30, 10, 20])
+ assert_all(np.isfinite(vals[[0, 2]]))
+ assert_equal(type(vals), np.ndarray)
+
+ # perform the same test but in-place
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = np.array((-1., 0, 1))/0.
+ result = nan_to_num(vals, copy=False)
+
+ assert_(result is vals)
+ assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
+ assert_(vals[1] == 0)
+ assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
+ assert_equal(type(vals), np.ndarray)
+
+ # perform the same test but in-place
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = np.array((-1., 0, 1))/0.
+ result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30)
+
+ assert_(result is vals)
+ assert_equal(vals, [30, 10, 20])
+ assert_all(np.isfinite(vals[[0, 2]]))
+ assert_equal(type(vals), np.ndarray)
+
+ def test_array(self):
+ vals = nan_to_num([1])
+ assert_array_equal(vals, np.array([1], int))
+ assert_equal(type(vals), np.ndarray)
+ vals = nan_to_num([1], nan=10, posinf=20, neginf=30)
+ assert_array_equal(vals, np.array([1], int))
+ assert_equal(type(vals), np.ndarray)
+
+ def test_integer(self):
+ vals = nan_to_num(1)
+ assert_all(vals == 1)
+ assert_equal(type(vals), np.int_)
+ vals = nan_to_num(1, nan=10, posinf=20, neginf=30)
+ assert_all(vals == 1)
+ assert_equal(type(vals), np.int_)
+
+ def test_float(self):
+ vals = nan_to_num(1.0)
+ assert_all(vals == 1.0)
+ assert_equal(type(vals), np.float_)
+ vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30)
+ assert_all(vals == 1.1)
+ assert_equal(type(vals), np.float_)
+
+ def test_complex_good(self):
+ vals = nan_to_num(1+1j)
+ assert_all(vals == 1+1j)
+ assert_equal(type(vals), np.complex_)
+ vals = nan_to_num(1+1j, nan=10, posinf=20, neginf=30)
+ assert_all(vals == 1+1j)
+ assert_equal(type(vals), np.complex_)
+
+ def test_complex_bad(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ v = 1 + 1j
+ v += np.array(0+1.j)/0.
+ vals = nan_to_num(v)
+ # !! This is actually (unexpectedly) zero
+ assert_all(np.isfinite(vals))
+ assert_equal(type(vals), np.complex_)
+
+ def test_complex_bad2(self):
+ with np.errstate(divide='ignore', invalid='ignore'):
+ v = 1 + 1j
+ v += np.array(-1+1.j)/0.
+ vals = nan_to_num(v)
+ assert_all(np.isfinite(vals))
+ assert_equal(type(vals), np.complex_)
+ # Fixme
+ #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
+ # !! This is actually (unexpectedly) positive
+ # !! inf. Comment out for now, and see if it
+ # !! changes
+ #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
+
+ def test_do_not_rewrite_previous_keyword(self):
+ # This is done to test that when, for instance, nan=np.inf then these
+ # values are not rewritten by posinf keyword to the posinf value.
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = nan_to_num(np.array((-1., 0, 1))/0., nan=np.inf, posinf=999)
+ assert_all(np.isfinite(vals[[0, 2]]))
+ assert_all(vals[0] < -1e10)
+ assert_equal(vals[[1, 2]], [np.inf, 999])
+ assert_equal(type(vals), np.ndarray)
+
+
+class TestRealIfClose:
+
+ def test_basic(self):
+ a = np.random.rand(10)
+ b = real_if_close(a+1e-15j)
+ assert_all(isrealobj(b))
+ assert_array_equal(a, b)
+ b = real_if_close(a+1e-7j)
+ assert_all(iscomplexobj(b))
+ b = real_if_close(a+1e-7j, tol=1e-6)
+ assert_all(isrealobj(b))
+
+
+class TestArrayConversion:
+
+ def test_asfarray(self):
+ a = asfarray(np.array([1, 2, 3]))
+ assert_equal(a.__class__, np.ndarray)
+ assert_(np.issubdtype(a.dtype, np.floating))
+
+ # previously this would infer dtypes from arrays, unlike every single
+ # other numpy function
+ assert_raises(TypeError,
+ asfarray, np.array([1, 2, 3]), dtype=np.array(1.0))
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_ufunclike.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_ufunclike.py
new file mode 100644
index 00000000..c280b696
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_ufunclike.py
@@ -0,0 +1,104 @@
+import numpy as np
+import numpy.core as nx
+import numpy.lib.ufunclike as ufl
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_warns, assert_raises
+)
+
+
+class TestUfunclike:
+
+ def test_isposinf(self):
+ a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
+ out = nx.zeros(a.shape, bool)
+ tgt = nx.array([True, False, False, False, False, False])
+
+ res = ufl.isposinf(a)
+ assert_equal(res, tgt)
+ res = ufl.isposinf(a, out)
+ assert_equal(res, tgt)
+ assert_equal(out, tgt)
+
+ a = a.astype(np.complex_)
+ with assert_raises(TypeError):
+ ufl.isposinf(a)
+
+ def test_isneginf(self):
+ a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
+ out = nx.zeros(a.shape, bool)
+ tgt = nx.array([False, True, False, False, False, False])
+
+ res = ufl.isneginf(a)
+ assert_equal(res, tgt)
+ res = ufl.isneginf(a, out)
+ assert_equal(res, tgt)
+ assert_equal(out, tgt)
+
+ a = a.astype(np.complex_)
+ with assert_raises(TypeError):
+ ufl.isneginf(a)
+
+ def test_fix(self):
+ a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])
+ out = nx.zeros(a.shape, float)
+ tgt = nx.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]])
+
+ res = ufl.fix(a)
+ assert_equal(res, tgt)
+ res = ufl.fix(a, out)
+ assert_equal(res, tgt)
+ assert_equal(out, tgt)
+ assert_equal(ufl.fix(3.14), 3)
+
+ def test_fix_with_subclass(self):
+ class MyArray(nx.ndarray):
+ def __new__(cls, data, metadata=None):
+ res = nx.array(data, copy=True).view(cls)
+ res.metadata = metadata
+ return res
+
+ def __array_wrap__(self, obj, context=None):
+ if isinstance(obj, MyArray):
+ obj.metadata = self.metadata
+ return obj
+
+ def __array_finalize__(self, obj):
+ self.metadata = getattr(obj, 'metadata', None)
+ return self
+
+ a = nx.array([1.1, -1.1])
+ m = MyArray(a, metadata='foo')
+ f = ufl.fix(m)
+ assert_array_equal(f, nx.array([1, -1]))
+ assert_(isinstance(f, MyArray))
+ assert_equal(f.metadata, 'foo')
+
+ # check 0d arrays don't decay to scalars
+ m0d = m[0,...]
+ m0d.metadata = 'bar'
+ f0d = ufl.fix(m0d)
+ assert_(isinstance(f0d, MyArray))
+ assert_equal(f0d.metadata, 'bar')
+
+ def test_deprecated(self):
+ # NumPy 1.13.0, 2017-04-26
+ assert_warns(DeprecationWarning, ufl.fix, [1, 2], y=nx.empty(2))
+ assert_warns(DeprecationWarning, ufl.isposinf, [1, 2], y=nx.empty(2))
+ assert_warns(DeprecationWarning, ufl.isneginf, [1, 2], y=nx.empty(2))
+
+ def test_scalar(self):
+ x = np.inf
+ actual = np.isposinf(x)
+ expected = np.True_
+ assert_equal(actual, expected)
+ assert_equal(type(actual), type(expected))
+
+ x = -3.4
+ actual = np.fix(x)
+ expected = np.float64(-3.0)
+ assert_equal(actual, expected)
+ assert_equal(type(actual), type(expected))
+
+ out = np.array(0.0)
+ actual = np.fix(x, out=out)
+ assert_(actual is out)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/tests/test_utils.py b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_utils.py
new file mode 100644
index 00000000..6ad4bfe6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/tests/test_utils.py
@@ -0,0 +1,178 @@
+import inspect
+import sys
+import pytest
+
+from numpy.core import arange
+from numpy.testing import assert_, assert_equal, assert_raises_regex
+from numpy.lib import deprecate, deprecate_with_doc
+import numpy.lib.utils as utils
+
+from io import StringIO
+
+
+@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
+@pytest.mark.skipif(
+ sys.version_info == (3, 10, 0, "candidate", 1),
+ reason="Broken as of bpo-44524",
+)
+def test_lookfor():
+ out = StringIO()
+ utils.lookfor('eigenvalue', module='numpy', output=out,
+ import_modules=False)
+ out = out.getvalue()
+ assert_('numpy.linalg.eig' in out)
+
+
+@deprecate
+def old_func(self, x):
+ return x
+
+
+@deprecate(message="Rather use new_func2")
+def old_func2(self, x):
+ return x
+
+
+def old_func3(self, x):
+ return x
+new_func3 = deprecate(old_func3, old_name="old_func3", new_name="new_func3")
+
+
+def old_func4(self, x):
+ """Summary.
+
+ Further info.
+ """
+ return x
+new_func4 = deprecate(old_func4)
+
+
+def old_func5(self, x):
+ """Summary.
+
+ Bizarre indentation.
+ """
+ return x
+new_func5 = deprecate(old_func5, message="This function is\ndeprecated.")
+
+
+def old_func6(self, x):
+ """
+ Also in PEP-257.
+ """
+ return x
+new_func6 = deprecate(old_func6)
+
+
+@deprecate_with_doc(msg="Rather use new_func7")
+def old_func7(self,x):
+ return x
+
+
+def test_deprecate_decorator():
+ assert_('deprecated' in old_func.__doc__)
+
+
+def test_deprecate_decorator_message():
+ assert_('Rather use new_func2' in old_func2.__doc__)
+
+
+def test_deprecate_fn():
+ assert_('old_func3' in new_func3.__doc__)
+ assert_('new_func3' in new_func3.__doc__)
+
+
+def test_deprecate_with_doc_decorator_message():
+ assert_('Rather use new_func7' in old_func7.__doc__)
+
+
+@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings")
+@pytest.mark.parametrize('old_func, new_func', [
+ (old_func4, new_func4),
+ (old_func5, new_func5),
+ (old_func6, new_func6),
+])
+def test_deprecate_help_indentation(old_func, new_func):
+ _compare_docs(old_func, new_func)
+ # Ensure we don't mess up the indentation
+ for knd, func in (('old', old_func), ('new', new_func)):
+ for li, line in enumerate(func.__doc__.split('\n')):
+ if li == 0:
+ assert line.startswith(' ') or not line.startswith(' '), knd
+ elif line:
+ assert line.startswith(' '), knd
+
+
+def _compare_docs(old_func, new_func):
+ old_doc = inspect.getdoc(old_func)
+ new_doc = inspect.getdoc(new_func)
+ index = new_doc.index('\n\n') + 2
+ assert_equal(new_doc[index:], old_doc)
+
+
+@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings")
+def test_deprecate_preserve_whitespace():
+ assert_('\n Bizarre' in new_func5.__doc__)
+
+
+def test_deprecate_module():
+ assert_(old_func.__module__ == __name__)
+
+
+def test_safe_eval_nameconstant():
+ # Test if safe_eval supports Python 3.4 _ast.NameConstant
+ utils.safe_eval('None')
+
+
+class TestByteBounds:
+
+ def test_byte_bounds(self):
+ # pointer difference matches size * itemsize
+ # due to contiguity
+ a = arange(12).reshape(3, 4)
+ low, high = utils.byte_bounds(a)
+ assert_equal(high - low, a.size * a.itemsize)
+
+ def test_unusual_order_positive_stride(self):
+ a = arange(12).reshape(3, 4)
+ b = a.T
+ low, high = utils.byte_bounds(b)
+ assert_equal(high - low, b.size * b.itemsize)
+
+ def test_unusual_order_negative_stride(self):
+ a = arange(12).reshape(3, 4)
+ b = a.T[::-1]
+ low, high = utils.byte_bounds(b)
+ assert_equal(high - low, b.size * b.itemsize)
+
+ def test_strided(self):
+ a = arange(12)
+ b = a[::2]
+ low, high = utils.byte_bounds(b)
+ # the largest pointer address is lost (even numbers only in the
+ # stride), and compensate addresses for striding by 2
+ assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize)
+
+
+def test_assert_raises_regex_context_manager():
+ with assert_raises_regex(ValueError, 'no deprecation warning'):
+ raise ValueError('no deprecation warning')
+
+
+def test_info_method_heading():
+ # info(class) should only print "Methods:" heading if methods exist
+
+ class NoPublicMethods:
+ pass
+
+ class WithPublicMethods:
+ def first_method():
+ pass
+
+ def _has_method_heading(cls):
+ out = StringIO()
+ utils.info(cls, output=out)
+ return 'Methods:' in out.getvalue()
+
+ assert _has_method_heading(WithPublicMethods)
+ assert not _has_method_heading(NoPublicMethods)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/twodim_base.py b/venv/lib/python3.9/site-packages/numpy/lib/twodim_base.py
new file mode 100644
index 00000000..a5f421e5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/twodim_base.py
@@ -0,0 +1,1128 @@
+""" Basic functions for manipulating 2d arrays
+
+"""
+import functools
+import operator
+
+from numpy.core.numeric import (
+ asanyarray, arange, zeros, greater_equal, multiply, ones,
+ asarray, where, int8, int16, int32, int64, intp, empty, promote_types,
+ diagonal, nonzero, indices
+ )
+from numpy.core.overrides import set_array_function_like_doc, set_module
+from numpy.core import overrides
+from numpy.core import iinfo
+from numpy.lib.stride_tricks import broadcast_to
+
+
+__all__ = [
+ 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
+ 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
+ 'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+i1 = iinfo(int8)
+i2 = iinfo(int16)
+i4 = iinfo(int32)
+
+
+def _min_int(low, high):
+ """ get small int that fits the range """
+ if high <= i1.max and low >= i1.min:
+ return int8
+ if high <= i2.max and low >= i2.min:
+ return int16
+ if high <= i4.max and low >= i4.min:
+ return int32
+ return int64
+
+
+def _flip_dispatcher(m):
+ return (m,)
+
+
+@array_function_dispatch(_flip_dispatcher)
+def fliplr(m):
+ """
+ Reverse the order of elements along axis 1 (left/right).
+
+ For a 2-D array, this flips the entries in each row in the left/right
+ direction. Columns are preserved, but appear in a different order than
+ before.
+
+ Parameters
+ ----------
+ m : array_like
+ Input array, must be at least 2-D.
+
+ Returns
+ -------
+ f : ndarray
+ A view of `m` with the columns reversed. Since a view
+ is returned, this operation is :math:`\\mathcal O(1)`.
+
+ See Also
+ --------
+ flipud : Flip array in the up/down direction.
+ flip : Flip array in one or more dimensions.
+ rot90 : Rotate array counterclockwise.
+
+ Notes
+ -----
+ Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``.
+ Requires the array to be at least 2-D.
+
+ Examples
+ --------
+ >>> A = np.diag([1.,2.,3.])
+ >>> A
+ array([[1., 0., 0.],
+ [0., 2., 0.],
+ [0., 0., 3.]])
+ >>> np.fliplr(A)
+ array([[0., 0., 1.],
+ [0., 2., 0.],
+ [3., 0., 0.]])
+
+ >>> A = np.random.randn(2,3,5)
+ >>> np.all(np.fliplr(A) == A[:,::-1,...])
+ True
+
+ """
+ m = asanyarray(m)
+ if m.ndim < 2:
+ raise ValueError("Input must be >= 2-d.")
+ return m[:, ::-1]
+
+
+@array_function_dispatch(_flip_dispatcher)
+def flipud(m):
+ """
+ Reverse the order of elements along axis 0 (up/down).
+
+ For a 2-D array, this flips the entries in each column in the up/down
+ direction. Rows are preserved, but appear in a different order than before.
+
+ Parameters
+ ----------
+ m : array_like
+ Input array.
+
+ Returns
+ -------
+ out : array_like
+ A view of `m` with the rows reversed. Since a view is
+ returned, this operation is :math:`\\mathcal O(1)`.
+
+ See Also
+ --------
+ fliplr : Flip array in the left/right direction.
+ flip : Flip array in one or more dimensions.
+ rot90 : Rotate array counterclockwise.
+
+ Notes
+ -----
+ Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``.
+ Requires the array to be at least 1-D.
+
+ Examples
+ --------
+ >>> A = np.diag([1.0, 2, 3])
+ >>> A
+ array([[1., 0., 0.],
+ [0., 2., 0.],
+ [0., 0., 3.]])
+ >>> np.flipud(A)
+ array([[0., 0., 3.],
+ [0., 2., 0.],
+ [1., 0., 0.]])
+
+ >>> A = np.random.randn(2,3,5)
+ >>> np.all(np.flipud(A) == A[::-1,...])
+ True
+
+ >>> np.flipud([1,2])
+ array([2, 1])
+
+ """
+ m = asanyarray(m)
+ if m.ndim < 1:
+ raise ValueError("Input must be >= 1-d.")
+ return m[::-1, ...]
+
+
+def _eye_dispatcher(N, M=None, k=None, dtype=None, order=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
+ """
+ Return a 2-D array with ones on the diagonal and zeros elsewhere.
+
+ Parameters
+ ----------
+ N : int
+ Number of rows in the output.
+ M : int, optional
+ Number of columns in the output. If None, defaults to `N`.
+ k : int, optional
+ Index of the diagonal: 0 (the default) refers to the main diagonal,
+ a positive value refers to an upper diagonal, and a negative value
+ to a lower diagonal.
+ dtype : data-type, optional
+ Data-type of the returned array.
+ order : {'C', 'F'}, optional
+ Whether the output should be stored in row-major (C-style) or
+ column-major (Fortran-style) order in memory.
+
+ .. versionadded:: 1.14.0
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ I : ndarray of shape (N,M)
+ An array where all elements are equal to zero, except for the `k`-th
+ diagonal, whose values are equal to one.
+
+ See Also
+ --------
+ identity : (almost) equivalent function
+ diag : diagonal 2-D array from a 1-D array specified by the user.
+
+ Examples
+ --------
+ >>> np.eye(2, dtype=int)
+ array([[1, 0],
+ [0, 1]])
+ >>> np.eye(3, k=1)
+ array([[0., 1., 0.],
+ [0., 0., 1.],
+ [0., 0., 0.]])
+
+ """
+ if like is not None:
+ return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like)
+ if M is None:
+ M = N
+ m = zeros((N, M), dtype=dtype, order=order)
+ if k >= M:
+ return m
+ # Ensure M and k are integers, so we don't get any surprise casting
+ # results in the expressions `M-k` and `M+1` used below. This avoids
+ # a problem with inputs with type (for example) np.uint64.
+ M = operator.index(M)
+ k = operator.index(k)
+ if k >= 0:
+ i = k
+ else:
+ i = (-k) * M
+ m[:M-k].flat[i::M+1] = 1
+ return m
+
+
+_eye_with_like = array_function_dispatch(
+ _eye_dispatcher, use_like=True
+)(eye)
+
+
+def _diag_dispatcher(v, k=None):
+ return (v,)
+
+
+@array_function_dispatch(_diag_dispatcher)
+def diag(v, k=0):
+ """
+ Extract a diagonal or construct a diagonal array.
+
+ See the more detailed documentation for ``numpy.diagonal`` if you use this
+ function to extract a diagonal and wish to write to the resulting array;
+ whether it returns a copy or a view depends on what version of numpy you
+ are using.
+
+ Parameters
+ ----------
+ v : array_like
+ If `v` is a 2-D array, return a copy of its `k`-th diagonal.
+ If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
+ diagonal.
+ k : int, optional
+ Diagonal in question. The default is 0. Use `k>0` for diagonals
+ above the main diagonal, and `k<0` for diagonals below the main
+ diagonal.
+
+ Returns
+ -------
+ out : ndarray
+ The extracted diagonal or constructed diagonal array.
+
+ See Also
+ --------
+ diagonal : Return specified diagonals.
+ diagflat : Create a 2-D array with the flattened input as a diagonal.
+ trace : Sum along diagonals.
+ triu : Upper triangle of an array.
+ tril : Lower triangle of an array.
+
+ Examples
+ --------
+ >>> x = np.arange(9).reshape((3,3))
+ >>> x
+ array([[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]])
+
+ >>> np.diag(x)
+ array([0, 4, 8])
+ >>> np.diag(x, k=1)
+ array([1, 5])
+ >>> np.diag(x, k=-1)
+ array([3, 7])
+
+ >>> np.diag(np.diag(x))
+ array([[0, 0, 0],
+ [0, 4, 0],
+ [0, 0, 8]])
+
+ """
+ v = asanyarray(v)
+ s = v.shape
+ if len(s) == 1:
+ n = s[0]+abs(k)
+ res = zeros((n, n), v.dtype)
+ if k >= 0:
+ i = k
+ else:
+ i = (-k) * n
+ res[:n-k].flat[i::n+1] = v
+ return res
+ elif len(s) == 2:
+ return diagonal(v, k)
+ else:
+ raise ValueError("Input must be 1- or 2-d.")
+
+
+@array_function_dispatch(_diag_dispatcher)
+def diagflat(v, k=0):
+ """
+ Create a two-dimensional array with the flattened input as a diagonal.
+
+ Parameters
+ ----------
+ v : array_like
+ Input data, which is flattened and set as the `k`-th
+ diagonal of the output.
+ k : int, optional
+ Diagonal to set; 0, the default, corresponds to the "main" diagonal,
+ a positive (negative) `k` giving the number of the diagonal above
+ (below) the main.
+
+ Returns
+ -------
+ out : ndarray
+ The 2-D output array.
+
+ See Also
+ --------
+ diag : MATLAB work-alike for 1-D and 2-D arrays.
+ diagonal : Return specified diagonals.
+ trace : Sum along diagonals.
+
+ Examples
+ --------
+ >>> np.diagflat([[1,2], [3,4]])
+ array([[1, 0, 0, 0],
+ [0, 2, 0, 0],
+ [0, 0, 3, 0],
+ [0, 0, 0, 4]])
+
+ >>> np.diagflat([1,2], 1)
+ array([[0, 1, 0],
+ [0, 0, 2],
+ [0, 0, 0]])
+
+ """
+ try:
+ wrap = v.__array_wrap__
+ except AttributeError:
+ wrap = None
+ v = asarray(v).ravel()
+ s = len(v)
+ n = s + abs(k)
+ res = zeros((n, n), v.dtype)
+ if (k >= 0):
+ i = arange(0, n-k, dtype=intp)
+ fi = i+k+i*n
+ else:
+ i = arange(0, n+k, dtype=intp)
+ fi = i+(i-k)*n
+ res.flat[fi] = v
+ if not wrap:
+ return res
+ return wrap(res)
+
+
+def _tri_dispatcher(N, M=None, k=None, dtype=None, *, like=None):
+ return (like,)
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def tri(N, M=None, k=0, dtype=float, *, like=None):
+ """
+ An array with ones at and below the given diagonal and zeros elsewhere.
+
+ Parameters
+ ----------
+ N : int
+ Number of rows in the array.
+ M : int, optional
+ Number of columns in the array.
+ By default, `M` is taken equal to `N`.
+ k : int, optional
+ The sub-diagonal at and below which the array is filled.
+ `k` = 0 is the main diagonal, while `k` < 0 is below it,
+ and `k` > 0 is above. The default is 0.
+ dtype : dtype, optional
+ Data type of the returned array. The default is float.
+ ${ARRAY_FUNCTION_LIKE}
+
+ .. versionadded:: 1.20.0
+
+ Returns
+ -------
+ tri : ndarray of shape (N, M)
+ Array with its lower triangle filled with ones and zero elsewhere;
+ in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise.
+
+ Examples
+ --------
+ >>> np.tri(3, 5, 2, dtype=int)
+ array([[1, 1, 1, 0, 0],
+ [1, 1, 1, 1, 0],
+ [1, 1, 1, 1, 1]])
+
+ >>> np.tri(3, 5, -1)
+ array([[0., 0., 0., 0., 0.],
+ [1., 0., 0., 0., 0.],
+ [1., 1., 0., 0., 0.]])
+
+ """
+ if like is not None:
+ return _tri_with_like(N, M=M, k=k, dtype=dtype, like=like)
+
+ if M is None:
+ M = N
+
+ m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
+ arange(-k, M-k, dtype=_min_int(-k, M - k)))
+
+ # Avoid making a copy if the requested type is already bool
+ m = m.astype(dtype, copy=False)
+
+ return m
+
+
+_tri_with_like = array_function_dispatch(
+ _tri_dispatcher, use_like=True
+)(tri)
+
+
+def _trilu_dispatcher(m, k=None):
+ return (m,)
+
+
+@array_function_dispatch(_trilu_dispatcher)
+def tril(m, k=0):
+ """
+ Lower triangle of an array.
+
+ Return a copy of an array with elements above the `k`-th diagonal zeroed.
+ For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two
+ axes.
+
+ Parameters
+ ----------
+ m : array_like, shape (..., M, N)
+ Input array.
+ k : int, optional
+ Diagonal above which to zero elements. `k = 0` (the default) is the
+ main diagonal, `k < 0` is below it and `k > 0` is above.
+
+ Returns
+ -------
+ tril : ndarray, shape (..., M, N)
+ Lower triangle of `m`, of same shape and data-type as `m`.
+
+ See Also
+ --------
+ triu : same thing, only for the upper triangle
+
+ Examples
+ --------
+ >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
+ array([[ 0, 0, 0],
+ [ 4, 0, 0],
+ [ 7, 8, 0],
+ [10, 11, 12]])
+
+ >>> np.tril(np.arange(3*4*5).reshape(3, 4, 5))
+ array([[[ 0, 0, 0, 0, 0],
+ [ 5, 6, 0, 0, 0],
+ [10, 11, 12, 0, 0],
+ [15, 16, 17, 18, 0]],
+ [[20, 0, 0, 0, 0],
+ [25, 26, 0, 0, 0],
+ [30, 31, 32, 0, 0],
+ [35, 36, 37, 38, 0]],
+ [[40, 0, 0, 0, 0],
+ [45, 46, 0, 0, 0],
+ [50, 51, 52, 0, 0],
+ [55, 56, 57, 58, 0]]])
+
+ """
+ m = asanyarray(m)
+ mask = tri(*m.shape[-2:], k=k, dtype=bool)
+
+ return where(mask, m, zeros(1, m.dtype))
+
+
+@array_function_dispatch(_trilu_dispatcher)
+def triu(m, k=0):
+ """
+ Upper triangle of an array.
+
+ Return a copy of an array with the elements below the `k`-th diagonal
+ zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the
+ final two axes.
+
+ Please refer to the documentation for `tril` for further details.
+
+ See Also
+ --------
+ tril : lower triangle of an array
+
+ Examples
+ --------
+ >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
+ array([[ 1, 2, 3],
+ [ 4, 5, 6],
+ [ 0, 8, 9],
+ [ 0, 0, 12]])
+
+ >>> np.triu(np.arange(3*4*5).reshape(3, 4, 5))
+ array([[[ 0, 1, 2, 3, 4],
+ [ 0, 6, 7, 8, 9],
+ [ 0, 0, 12, 13, 14],
+ [ 0, 0, 0, 18, 19]],
+ [[20, 21, 22, 23, 24],
+ [ 0, 26, 27, 28, 29],
+ [ 0, 0, 32, 33, 34],
+ [ 0, 0, 0, 38, 39]],
+ [[40, 41, 42, 43, 44],
+ [ 0, 46, 47, 48, 49],
+ [ 0, 0, 52, 53, 54],
+ [ 0, 0, 0, 58, 59]]])
+
+ """
+ m = asanyarray(m)
+ mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
+
+ return where(mask, zeros(1, m.dtype), m)
+
+
+def _vander_dispatcher(x, N=None, increasing=None):
+ return (x,)
+
+
+# Originally borrowed from John Hunter and matplotlib
+@array_function_dispatch(_vander_dispatcher)
+def vander(x, N=None, increasing=False):
+ """
+ Generate a Vandermonde matrix.
+
+ The columns of the output matrix are powers of the input vector. The
+ order of the powers is determined by the `increasing` boolean argument.
+ Specifically, when `increasing` is False, the `i`-th output column is
+ the input vector raised element-wise to the power of ``N - i - 1``. Such
+ a matrix with a geometric progression in each row is named for Alexandre-
+ Theophile Vandermonde.
+
+ Parameters
+ ----------
+ x : array_like
+ 1-D input array.
+ N : int, optional
+ Number of columns in the output. If `N` is not specified, a square
+ array is returned (``N = len(x)``).
+ increasing : bool, optional
+ Order of the powers of the columns. If True, the powers increase
+ from left to right, if False (the default) they are reversed.
+
+ .. versionadded:: 1.9.0
+
+ Returns
+ -------
+ out : ndarray
+ Vandermonde matrix. If `increasing` is False, the first column is
+ ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
+ True, the columns are ``x^0, x^1, ..., x^(N-1)``.
+
+ See Also
+ --------
+ polynomial.polynomial.polyvander
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3, 5])
+ >>> N = 3
+ >>> np.vander(x, N)
+ array([[ 1, 1, 1],
+ [ 4, 2, 1],
+ [ 9, 3, 1],
+ [25, 5, 1]])
+
+ >>> np.column_stack([x**(N-1-i) for i in range(N)])
+ array([[ 1, 1, 1],
+ [ 4, 2, 1],
+ [ 9, 3, 1],
+ [25, 5, 1]])
+
+ >>> x = np.array([1, 2, 3, 5])
+ >>> np.vander(x)
+ array([[ 1, 1, 1, 1],
+ [ 8, 4, 2, 1],
+ [ 27, 9, 3, 1],
+ [125, 25, 5, 1]])
+ >>> np.vander(x, increasing=True)
+ array([[ 1, 1, 1, 1],
+ [ 1, 2, 4, 8],
+ [ 1, 3, 9, 27],
+ [ 1, 5, 25, 125]])
+
+ The determinant of a square Vandermonde matrix is the product
+ of the differences between the values of the input vector:
+
+ >>> np.linalg.det(np.vander(x))
+ 48.000000000000043 # may vary
+ >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
+ 48
+
+ """
+ x = asarray(x)
+ if x.ndim != 1:
+ raise ValueError("x must be a one-dimensional array or sequence.")
+ if N is None:
+ N = len(x)
+
+ v = empty((len(x), N), dtype=promote_types(x.dtype, int))
+ tmp = v[:, ::-1] if not increasing else v
+
+ if N > 0:
+ tmp[:, 0] = 1
+ if N > 1:
+ tmp[:, 1:] = x[:, None]
+ multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
+
+ return v
+
+
+def _histogram2d_dispatcher(x, y, bins=None, range=None, density=None,
+ weights=None):
+ yield x
+ yield y
+
+ # This terrible logic is adapted from the checks in histogram2d
+ try:
+ N = len(bins)
+ except TypeError:
+ N = 1
+ if N == 2:
+ yield from bins # bins=[x, y]
+ else:
+ yield bins
+
+ yield weights
+
+
+@array_function_dispatch(_histogram2d_dispatcher)
+def histogram2d(x, y, bins=10, range=None, density=None, weights=None):
+ """
+ Compute the bi-dimensional histogram of two data samples.
+
+ Parameters
+ ----------
+ x : array_like, shape (N,)
+ An array containing the x coordinates of the points to be
+ histogrammed.
+ y : array_like, shape (N,)
+ An array containing the y coordinates of the points to be
+ histogrammed.
+ bins : int or array_like or [int, int] or [array, array], optional
+ The bin specification:
+
+ * If int, the number of bins for the two dimensions (nx=ny=bins).
+ * If array_like, the bin edges for the two dimensions
+ (x_edges=y_edges=bins).
+ * If [int, int], the number of bins in each dimension
+ (nx, ny = bins).
+ * If [array, array], the bin edges in each dimension
+ (x_edges, y_edges = bins).
+ * A combination [int, array] or [array, int], where int
+ is the number of bins and array is the bin edges.
+
+ range : array_like, shape(2,2), optional
+ The leftmost and rightmost edges of the bins along each dimension
+ (if not specified explicitly in the `bins` parameters):
+ ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
+ will be considered outliers and not tallied in the histogram.
+ density : bool, optional
+ If False, the default, returns the number of samples in each bin.
+ If True, returns the probability *density* function at the bin,
+ ``bin_count / sample_count / bin_area``.
+ weights : array_like, shape(N,), optional
+ An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
+ Weights are normalized to 1 if `density` is True. If `density` is
+ False, the values of the returned histogram are equal to the sum of
+ the weights belonging to the samples falling into each bin.
+
+ Returns
+ -------
+ H : ndarray, shape(nx, ny)
+ The bi-dimensional histogram of samples `x` and `y`. Values in `x`
+ are histogrammed along the first dimension and values in `y` are
+ histogrammed along the second dimension.
+ xedges : ndarray, shape(nx+1,)
+ The bin edges along the first dimension.
+ yedges : ndarray, shape(ny+1,)
+ The bin edges along the second dimension.
+
+ See Also
+ --------
+ histogram : 1D histogram
+ histogramdd : Multidimensional histogram
+
+ Notes
+ -----
+ When `density` is True, then the returned histogram is the sample
+ density, defined such that the sum over bins of the product
+ ``bin_value * bin_area`` is 1.
+
+ Please note that the histogram does not follow the Cartesian convention
+ where `x` values are on the abscissa and `y` values on the ordinate
+ axis. Rather, `x` is histogrammed along the first dimension of the
+ array (vertical), and `y` along the second dimension of the array
+ (horizontal). This ensures compatibility with `histogramdd`.
+
+ Examples
+ --------
+ >>> from matplotlib.image import NonUniformImage
+ >>> import matplotlib.pyplot as plt
+
+ Construct a 2-D histogram with variable bin width. First define the bin
+ edges:
+
+ >>> xedges = [0, 1, 3, 5]
+ >>> yedges = [0, 2, 3, 4, 6]
+
+ Next we create a histogram H with random bin content:
+
+ >>> x = np.random.normal(2, 1, 100)
+ >>> y = np.random.normal(1, 1, 100)
+ >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
+ >>> # Histogram does not follow Cartesian convention (see Notes),
+ >>> # therefore transpose H for visualization purposes.
+ >>> H = H.T
+
+ :func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
+
+ >>> fig = plt.figure(figsize=(7, 3))
+ >>> ax = fig.add_subplot(131, title='imshow: square bins')
+ >>> plt.imshow(H, interpolation='nearest', origin='lower',
+ ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
+ <matplotlib.image.AxesImage object at 0x...>
+
+ :func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
+
+ >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
+ ... aspect='equal')
+ >>> X, Y = np.meshgrid(xedges, yedges)
+ >>> ax.pcolormesh(X, Y, H)
+ <matplotlib.collections.QuadMesh object at 0x...>
+
+ :class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
+ display actual bin edges with interpolation:
+
+ >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
+ ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
+ >>> im = NonUniformImage(ax, interpolation='bilinear')
+ >>> xcenters = (xedges[:-1] + xedges[1:]) / 2
+ >>> ycenters = (yedges[:-1] + yedges[1:]) / 2
+ >>> im.set_data(xcenters, ycenters, H)
+ >>> ax.add_image(im)
+ >>> plt.show()
+
+ It is also possible to construct a 2-D histogram without specifying bin
+ edges:
+
+ >>> # Generate non-symmetric test data
+ >>> n = 10000
+ >>> x = np.linspace(1, 100, n)
+ >>> y = 2*np.log(x) + np.random.rand(n) - 0.5
+ >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges
+ >>> H, yedges, xedges = np.histogram2d(y, x, bins=20)
+
+ Now we can plot the histogram using
+ :func:`pcolormesh <matplotlib.pyplot.pcolormesh>`, and a
+ :func:`hexbin <matplotlib.pyplot.hexbin>` for comparison.
+
+ >>> # Plot histogram using pcolormesh
+ >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True)
+ >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow')
+ >>> ax1.plot(x, 2*np.log(x), 'k-')
+ >>> ax1.set_xlim(x.min(), x.max())
+ >>> ax1.set_ylim(y.min(), y.max())
+ >>> ax1.set_xlabel('x')
+ >>> ax1.set_ylabel('y')
+ >>> ax1.set_title('histogram2d')
+ >>> ax1.grid()
+
+ >>> # Create hexbin plot for comparison
+ >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow')
+ >>> ax2.plot(x, 2*np.log(x), 'k-')
+ >>> ax2.set_title('hexbin')
+ >>> ax2.set_xlim(x.min(), x.max())
+ >>> ax2.set_xlabel('x')
+ >>> ax2.grid()
+
+ >>> plt.show()
+ """
+ from numpy import histogramdd
+
+ if len(x) != len(y):
+ raise ValueError('x and y must have the same length.')
+
+ try:
+ N = len(bins)
+ except TypeError:
+ N = 1
+
+ if N != 1 and N != 2:
+ xedges = yedges = asarray(bins)
+ bins = [xedges, yedges]
+ hist, edges = histogramdd([x, y], bins, range, density, weights)
+ return hist, edges[0], edges[1]
+
+
+@set_module('numpy')
+def mask_indices(n, mask_func, k=0):
+ """
+ Return the indices to access (n, n) arrays, given a masking function.
+
+ Assume `mask_func` is a function that, for a square array a of size
+ ``(n, n)`` with a possible offset argument `k`, when called as
+ ``mask_func(a, k)`` returns a new array with zeros in certain locations
+ (functions like `triu` or `tril` do precisely this). Then this function
+ returns the indices where the non-zero values would be located.
+
+ Parameters
+ ----------
+ n : int
+ The returned indices will be valid to access arrays of shape (n, n).
+ mask_func : callable
+ A function whose call signature is similar to that of `triu`, `tril`.
+ That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
+ `k` is an optional argument to the function.
+ k : scalar
+ An optional argument which is passed through to `mask_func`. Functions
+ like `triu`, `tril` take a second argument that is interpreted as an
+ offset.
+
+ Returns
+ -------
+ indices : tuple of arrays.
+ The `n` arrays of indices corresponding to the locations where
+ ``mask_func(np.ones((n, n)), k)`` is True.
+
+ See Also
+ --------
+ triu, tril, triu_indices, tril_indices
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ Examples
+ --------
+ These are the indices that would allow you to access the upper triangular
+ part of any 3x3 array:
+
+ >>> iu = np.mask_indices(3, np.triu)
+
+ For example, if `a` is a 3x3 array:
+
+ >>> a = np.arange(9).reshape(3, 3)
+ >>> a
+ array([[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]])
+ >>> a[iu]
+ array([0, 1, 2, 4, 5, 8])
+
+ An offset can be passed also to the masking function. This gets us the
+ indices starting on the first diagonal right of the main one:
+
+ >>> iu1 = np.mask_indices(3, np.triu, 1)
+
+ with which we now extract only three elements:
+
+ >>> a[iu1]
+ array([1, 2, 5])
+
+ """
+ m = ones((n, n), int)
+ a = mask_func(m, k)
+ return nonzero(a != 0)
+
+
+@set_module('numpy')
+def tril_indices(n, k=0, m=None):
+ """
+ Return the indices for the lower-triangle of an (n, m) array.
+
+ Parameters
+ ----------
+ n : int
+ The row dimension of the arrays for which the returned
+ indices will be valid.
+ k : int, optional
+ Diagonal offset (see `tril` for details).
+ m : int, optional
+ .. versionadded:: 1.9.0
+
+ The column dimension of the arrays for which the returned
+ arrays will be valid.
+ By default `m` is taken equal to `n`.
+
+
+ Returns
+ -------
+ inds : tuple of arrays
+ The indices for the triangle. The returned tuple contains two arrays,
+ each with the indices along one dimension of the array.
+
+ See also
+ --------
+ triu_indices : similar function, for upper-triangular.
+ mask_indices : generic function accepting an arbitrary mask function.
+ tril, triu
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ Examples
+ --------
+ Compute two different sets of indices to access 4x4 arrays, one for the
+ lower triangular part starting at the main diagonal, and one starting two
+ diagonals further right:
+
+ >>> il1 = np.tril_indices(4)
+ >>> il2 = np.tril_indices(4, 2)
+
+ Here is how they can be used with a sample array:
+
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+ Both for indexing:
+
+ >>> a[il1]
+ array([ 0, 4, 5, ..., 13, 14, 15])
+
+ And for assigning values:
+
+ >>> a[il1] = -1
+ >>> a
+ array([[-1, 1, 2, 3],
+ [-1, -1, 6, 7],
+ [-1, -1, -1, 11],
+ [-1, -1, -1, -1]])
+
+ These cover almost the whole array (two diagonals right of the main one):
+
+ >>> a[il2] = -10
+ >>> a
+ array([[-10, -10, -10, 3],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10]])
+
+ """
+ tri_ = tri(n, m, k=k, dtype=bool)
+
+ return tuple(broadcast_to(inds, tri_.shape)[tri_]
+ for inds in indices(tri_.shape, sparse=True))
+
+
+def _trilu_indices_form_dispatcher(arr, k=None):
+ return (arr,)
+
+
+@array_function_dispatch(_trilu_indices_form_dispatcher)
+def tril_indices_from(arr, k=0):
+ """
+ Return the indices for the lower-triangle of arr.
+
+ See `tril_indices` for full details.
+
+ Parameters
+ ----------
+ arr : array_like
+ The indices will be valid for square arrays whose dimensions are
+ the same as arr.
+ k : int, optional
+ Diagonal offset (see `tril` for details).
+
+ See Also
+ --------
+ tril_indices, tril
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ """
+ if arr.ndim != 2:
+ raise ValueError("input array must be 2-d")
+ return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
+
+
+@set_module('numpy')
+def triu_indices(n, k=0, m=None):
+ """
+ Return the indices for the upper-triangle of an (n, m) array.
+
+ Parameters
+ ----------
+ n : int
+ The size of the arrays for which the returned indices will
+ be valid.
+ k : int, optional
+ Diagonal offset (see `triu` for details).
+ m : int, optional
+ .. versionadded:: 1.9.0
+
+ The column dimension of the arrays for which the returned
+ arrays will be valid.
+ By default `m` is taken equal to `n`.
+
+
+ Returns
+ -------
+ inds : tuple, shape(2) of ndarrays, shape(`n`)
+ The indices for the triangle. The returned tuple contains two arrays,
+ each with the indices along one dimension of the array. Can be used
+ to slice a ndarray of shape(`n`, `n`).
+
+ See also
+ --------
+ tril_indices : similar function, for lower-triangular.
+ mask_indices : generic function accepting an arbitrary mask function.
+ triu, tril
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ Examples
+ --------
+ Compute two different sets of indices to access 4x4 arrays, one for the
+ upper triangular part starting at the main diagonal, and one starting two
+ diagonals further right:
+
+ >>> iu1 = np.triu_indices(4)
+ >>> iu2 = np.triu_indices(4, 2)
+
+ Here is how they can be used with a sample array:
+
+ >>> a = np.arange(16).reshape(4, 4)
+ >>> a
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11],
+ [12, 13, 14, 15]])
+
+ Both for indexing:
+
+ >>> a[iu1]
+ array([ 0, 1, 2, ..., 10, 11, 15])
+
+ And for assigning values:
+
+ >>> a[iu1] = -1
+ >>> a
+ array([[-1, -1, -1, -1],
+ [ 4, -1, -1, -1],
+ [ 8, 9, -1, -1],
+ [12, 13, 14, -1]])
+
+ These cover only a small part of the whole array (two diagonals right
+ of the main one):
+
+ >>> a[iu2] = -10
+ >>> a
+ array([[ -1, -1, -10, -10],
+ [ 4, -1, -1, -10],
+ [ 8, 9, -1, -1],
+ [ 12, 13, 14, -1]])
+
+ """
+ tri_ = ~tri(n, m, k=k - 1, dtype=bool)
+
+ return tuple(broadcast_to(inds, tri_.shape)[tri_]
+ for inds in indices(tri_.shape, sparse=True))
+
+
+@array_function_dispatch(_trilu_indices_form_dispatcher)
+def triu_indices_from(arr, k=0):
+ """
+ Return the indices for the upper-triangle of arr.
+
+ See `triu_indices` for full details.
+
+ Parameters
+ ----------
+ arr : ndarray, shape(N, N)
+ The indices will be valid for square arrays.
+ k : int, optional
+ Diagonal offset (see `triu` for details).
+
+ Returns
+ -------
+ triu_indices_from : tuple, shape(2) of ndarray, shape(N)
+ Indices for the upper-triangle of `arr`.
+
+ See Also
+ --------
+ triu_indices, triu
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ """
+ if arr.ndim != 2:
+ raise ValueError("input array must be 2-d")
+ return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/twodim_base.pyi b/venv/lib/python3.9/site-packages/numpy/lib/twodim_base.pyi
new file mode 100644
index 00000000..1b3b94bd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/twodim_base.pyi
@@ -0,0 +1,239 @@
+from collections.abc import Callable, Sequence
+from typing import (
+ Any,
+ overload,
+ TypeVar,
+ Union,
+)
+
+from numpy import (
+ generic,
+ number,
+ bool_,
+ timedelta64,
+ datetime64,
+ int_,
+ intp,
+ float64,
+ signedinteger,
+ floating,
+ complexfloating,
+ object_,
+ _OrderCF,
+)
+
+from numpy._typing import (
+ DTypeLike,
+ _DTypeLike,
+ ArrayLike,
+ _ArrayLike,
+ NDArray,
+ _SupportsArrayFunc,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeObject_co,
+)
+
+_T = TypeVar("_T")
+_SCT = TypeVar("_SCT", bound=generic)
+
+# The returned arrays dtype must be compatible with `np.equal`
+_MaskFunc = Callable[
+ [NDArray[int_], _T],
+ NDArray[Union[number[Any], bool_, timedelta64, datetime64, object_]],
+]
+
+__all__: list[str]
+
+@overload
+def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def fliplr(m: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def flipud(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def flipud(m: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def eye(
+ N: int,
+ M: None | int = ...,
+ k: int = ...,
+ dtype: None = ...,
+ order: _OrderCF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def eye(
+ N: int,
+ M: None | int = ...,
+ k: int = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ order: _OrderCF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def eye(
+ N: int,
+ M: None | int = ...,
+ k: int = ...,
+ dtype: DTypeLike = ...,
+ order: _OrderCF = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def diag(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
+@overload
+def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def diagflat(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
+@overload
+def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def tri(
+ N: int,
+ M: None | int = ...,
+ k: int = ...,
+ dtype: None = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...
+) -> NDArray[float64]: ...
+@overload
+def tri(
+ N: int,
+ M: None | int = ...,
+ k: int = ...,
+ dtype: _DTypeLike[_SCT] = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...
+) -> NDArray[_SCT]: ...
+@overload
+def tri(
+ N: int,
+ M: None | int = ...,
+ k: int = ...,
+ dtype: DTypeLike = ...,
+ *,
+ like: None | _SupportsArrayFunc = ...
+) -> NDArray[Any]: ...
+
+@overload
+def tril(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
+@overload
+def tril(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def triu(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
+@overload
+def triu(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def vander( # type: ignore[misc]
+ x: _ArrayLikeInt_co,
+ N: None | int = ...,
+ increasing: bool = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def vander( # type: ignore[misc]
+ x: _ArrayLikeFloat_co,
+ N: None | int = ...,
+ increasing: bool = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def vander(
+ x: _ArrayLikeComplex_co,
+ N: None | int = ...,
+ increasing: bool = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def vander(
+ x: _ArrayLikeObject_co,
+ N: None | int = ...,
+ increasing: bool = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def histogram2d( # type: ignore[misc]
+ x: _ArrayLikeFloat_co,
+ y: _ArrayLikeFloat_co,
+ bins: int | Sequence[int] = ...,
+ range: None | _ArrayLikeFloat_co = ...,
+ density: None | bool = ...,
+ weights: None | _ArrayLikeFloat_co = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[floating[Any]],
+ NDArray[floating[Any]],
+]: ...
+@overload
+def histogram2d(
+ x: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co,
+ bins: int | Sequence[int] = ...,
+ range: None | _ArrayLikeFloat_co = ...,
+ density: None | bool = ...,
+ weights: None | _ArrayLikeFloat_co = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[complexfloating[Any, Any]],
+ NDArray[complexfloating[Any, Any]],
+]: ...
+@overload # TODO: Sort out `bins`
+def histogram2d(
+ x: _ArrayLikeComplex_co,
+ y: _ArrayLikeComplex_co,
+ bins: Sequence[_ArrayLikeInt_co],
+ range: None | _ArrayLikeFloat_co = ...,
+ density: None | bool = ...,
+ weights: None | _ArrayLikeFloat_co = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[Any],
+ NDArray[Any],
+]: ...
+
+# NOTE: we're assuming/demanding here the `mask_func` returns
+# an ndarray of shape `(n, n)`; otherwise there is the possibility
+# of the output tuple having more or less than 2 elements
+@overload
+def mask_indices(
+ n: int,
+ mask_func: _MaskFunc[int],
+ k: int = ...,
+) -> tuple[NDArray[intp], NDArray[intp]]: ...
+@overload
+def mask_indices(
+ n: int,
+ mask_func: _MaskFunc[_T],
+ k: _T,
+) -> tuple[NDArray[intp], NDArray[intp]]: ...
+
+def tril_indices(
+ n: int,
+ k: int = ...,
+ m: None | int = ...,
+) -> tuple[NDArray[int_], NDArray[int_]]: ...
+
+def tril_indices_from(
+ arr: NDArray[Any],
+ k: int = ...,
+) -> tuple[NDArray[int_], NDArray[int_]]: ...
+
+def triu_indices(
+ n: int,
+ k: int = ...,
+ m: None | int = ...,
+) -> tuple[NDArray[int_], NDArray[int_]]: ...
+
+def triu_indices_from(
+ arr: NDArray[Any],
+ k: int = ...,
+) -> tuple[NDArray[int_], NDArray[int_]]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/type_check.py b/venv/lib/python3.9/site-packages/numpy/lib/type_check.py
new file mode 100644
index 00000000..94d525f5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/type_check.py
@@ -0,0 +1,735 @@
+"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py
+
+"""
+import functools
+import warnings
+
+__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
+ 'isreal', 'nan_to_num', 'real', 'real_if_close',
+ 'typename', 'asfarray', 'mintypecode',
+ 'common_type']
+
+import numpy.core.numeric as _nx
+from numpy.core.numeric import asarray, asanyarray, isnan, zeros
+from numpy.core.overrides import set_module
+from numpy.core import overrides
+from .ufunclike import isneginf, isposinf
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
+
+
+@set_module('numpy')
+def mintypecode(typechars, typeset='GDFgdf', default='d'):
+ """
+ Return the character for the minimum-size type to which given types can
+ be safely cast.
+
+ The returned type character must represent the smallest size dtype such
+ that an array of the returned type can handle the data from an array of
+ all types in `typechars` (or if `typechars` is an array, then its
+ dtype.char).
+
+ Parameters
+ ----------
+ typechars : list of str or array_like
+ If a list of strings, each string should represent a dtype.
+ If array_like, the character representation of the array dtype is used.
+ typeset : str or list of str, optional
+ The set of characters that the returned character is chosen from.
+ The default set is 'GDFgdf'.
+ default : str, optional
+ The default character, this is returned if none of the characters in
+ `typechars` matches a character in `typeset`.
+
+ Returns
+ -------
+ typechar : str
+ The character representing the minimum-size type that was found.
+
+ See Also
+ --------
+ dtype, sctype2char, maximum_sctype
+
+ Examples
+ --------
+ >>> np.mintypecode(['d', 'f', 'S'])
+ 'd'
+ >>> x = np.array([1.1, 2-3.j])
+ >>> np.mintypecode(x)
+ 'D'
+
+ >>> np.mintypecode('abceh', default='G')
+ 'G'
+
+ """
+ typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char
+ for t in typechars)
+ intersection = set(t for t in typecodes if t in typeset)
+ if not intersection:
+ return default
+ if 'F' in intersection and 'd' in intersection:
+ return 'D'
+ return min(intersection, key=_typecodes_by_elsize.index)
+
+
+def _asfarray_dispatcher(a, dtype=None):
+ return (a,)
+
+
+@array_function_dispatch(_asfarray_dispatcher)
+def asfarray(a, dtype=_nx.float_):
+ """
+ Return an array converted to a float type.
+
+ Parameters
+ ----------
+ a : array_like
+ The input array.
+ dtype : str or dtype object, optional
+ Float type code to coerce input array `a`. If `dtype` is one of the
+ 'int' dtypes, it is replaced with float64.
+
+ Returns
+ -------
+ out : ndarray
+ The input `a` as a float ndarray.
+
+ Examples
+ --------
+ >>> np.asfarray([2, 3])
+ array([2., 3.])
+ >>> np.asfarray([2, 3], dtype='float')
+ array([2., 3.])
+ >>> np.asfarray([2, 3], dtype='int8')
+ array([2., 3.])
+
+ """
+ if not _nx.issubdtype(dtype, _nx.inexact):
+ dtype = _nx.float_
+ return asarray(a, dtype=dtype)
+
+
+def _real_dispatcher(val):
+ return (val,)
+
+
+@array_function_dispatch(_real_dispatcher)
+def real(val):
+ """
+ Return the real part of the complex argument.
+
+ Parameters
+ ----------
+ val : array_like
+ Input array.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The real component of the complex argument. If `val` is real, the type
+ of `val` is used for the output. If `val` has complex elements, the
+ returned type is float.
+
+ See Also
+ --------
+ real_if_close, imag, angle
+
+ Examples
+ --------
+ >>> a = np.array([1+2j, 3+4j, 5+6j])
+ >>> a.real
+ array([1., 3., 5.])
+ >>> a.real = 9
+ >>> a
+ array([9.+2.j, 9.+4.j, 9.+6.j])
+ >>> a.real = np.array([9, 8, 7])
+ >>> a
+ array([9.+2.j, 8.+4.j, 7.+6.j])
+ >>> np.real(1 + 1j)
+ 1.0
+
+ """
+ try:
+ return val.real
+ except AttributeError:
+ return asanyarray(val).real
+
+
+def _imag_dispatcher(val):
+ return (val,)
+
+
+@array_function_dispatch(_imag_dispatcher)
+def imag(val):
+ """
+ Return the imaginary part of the complex argument.
+
+ Parameters
+ ----------
+ val : array_like
+ Input array.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ The imaginary component of the complex argument. If `val` is real,
+ the type of `val` is used for the output. If `val` has complex
+ elements, the returned type is float.
+
+ See Also
+ --------
+ real, angle, real_if_close
+
+ Examples
+ --------
+ >>> a = np.array([1+2j, 3+4j, 5+6j])
+ >>> a.imag
+ array([2., 4., 6.])
+ >>> a.imag = np.array([8, 10, 12])
+ >>> a
+ array([1. +8.j, 3.+10.j, 5.+12.j])
+ >>> np.imag(1 + 1j)
+ 1.0
+
+ """
+ try:
+ return val.imag
+ except AttributeError:
+ return asanyarray(val).imag
+
+
+def _is_type_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_is_type_dispatcher)
+def iscomplex(x):
+ """
+ Returns a bool array, where True if input element is complex.
+
+ What is tested is whether the input has a non-zero imaginary part, not if
+ the input type is complex.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array.
+
+ Returns
+ -------
+ out : ndarray of bools
+ Output array.
+
+ See Also
+ --------
+ isreal
+ iscomplexobj : Return True if x is a complex type or an array of complex
+ numbers.
+
+ Examples
+ --------
+ >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
+ array([ True, False, False, False, False, True])
+
+ """
+ ax = asanyarray(x)
+ if issubclass(ax.dtype.type, _nx.complexfloating):
+ return ax.imag != 0
+ res = zeros(ax.shape, bool)
+ return res[()] # convert to scalar if needed
+
+
+@array_function_dispatch(_is_type_dispatcher)
+def isreal(x):
+ """
+ Returns a bool array, where True if input element is real.
+
+ If element has complex type with zero complex part, the return value
+ for that element is True.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array.
+
+ Returns
+ -------
+ out : ndarray, bool
+ Boolean array of same shape as `x`.
+
+ Notes
+ -----
+ `isreal` may behave unexpectedly for string or object arrays (see examples)
+
+ See Also
+ --------
+ iscomplex
+ isrealobj : Return True if x is not a complex type.
+
+ Examples
+ --------
+ >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex)
+ >>> np.isreal(a)
+ array([False, True, True, True, True, False])
+
+ The function does not work on string arrays.
+
+ >>> a = np.array([2j, "a"], dtype="U")
+ >>> np.isreal(a) # Warns about non-elementwise comparison
+ False
+
+ Returns True for all elements in input array of ``dtype=object`` even if
+ any of the elements is complex.
+
+ >>> a = np.array([1, "2", 3+4j], dtype=object)
+ >>> np.isreal(a)
+ array([ True, True, True])
+
+ isreal should not be used with object arrays
+
+ >>> a = np.array([1+2j, 2+1j], dtype=object)
+ >>> np.isreal(a)
+ array([ True, True])
+
+ """
+ return imag(x) == 0
+
+
+@array_function_dispatch(_is_type_dispatcher)
+def iscomplexobj(x):
+ """
+ Check for a complex type or an array of complex numbers.
+
+ The type of the input is checked, not the value. Even if the input
+ has an imaginary part equal to zero, `iscomplexobj` evaluates to True.
+
+ Parameters
+ ----------
+ x : any
+ The input can be of any type and shape.
+
+ Returns
+ -------
+ iscomplexobj : bool
+ The return value, True if `x` is of a complex type or has at least
+ one complex element.
+
+ See Also
+ --------
+ isrealobj, iscomplex
+
+ Examples
+ --------
+ >>> np.iscomplexobj(1)
+ False
+ >>> np.iscomplexobj(1+0j)
+ True
+ >>> np.iscomplexobj([3, 1+0j, True])
+ True
+
+ """
+ try:
+ dtype = x.dtype
+ type_ = dtype.type
+ except AttributeError:
+ type_ = asarray(x).dtype.type
+ return issubclass(type_, _nx.complexfloating)
+
+
+@array_function_dispatch(_is_type_dispatcher)
+def isrealobj(x):
+ """
+ Return True if x is a not complex type or an array of complex numbers.
+
+ The type of the input is checked, not the value. So even if the input
+ has an imaginary part equal to zero, `isrealobj` evaluates to False
+ if the data type is complex.
+
+ Parameters
+ ----------
+ x : any
+ The input can be of any type and shape.
+
+ Returns
+ -------
+ y : bool
+ The return value, False if `x` is of a complex type.
+
+ See Also
+ --------
+ iscomplexobj, isreal
+
+ Notes
+ -----
+ The function is only meant for arrays with numerical values but it
+ accepts all other objects. Since it assumes array input, the return
+ value of other objects may be True.
+
+ >>> np.isrealobj('A string')
+ True
+ >>> np.isrealobj(False)
+ True
+ >>> np.isrealobj(None)
+ True
+
+ Examples
+ --------
+ >>> np.isrealobj(1)
+ True
+ >>> np.isrealobj(1+0j)
+ False
+ >>> np.isrealobj([3, 1+0j, True])
+ False
+
+ """
+ return not iscomplexobj(x)
+
+#-----------------------------------------------------------------------------
+
+def _getmaxmin(t):
+ from numpy.core import getlimits
+ f = getlimits.finfo(t)
+ return f.max, f.min
+
+
+def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None):
+ return (x,)
+
+
+@array_function_dispatch(_nan_to_num_dispatcher)
+def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
+ """
+ Replace NaN with zero and infinity with large finite numbers (default
+ behaviour) or with the numbers defined by the user using the `nan`,
+ `posinf` and/or `neginf` keywords.
+
+ If `x` is inexact, NaN is replaced by zero or by the user defined value in
+ `nan` keyword, infinity is replaced by the largest finite floating point
+ values representable by ``x.dtype`` or by the user defined value in
+ `posinf` keyword and -infinity is replaced by the most negative finite
+ floating point values representable by ``x.dtype`` or by the user defined
+ value in `neginf` keyword.
+
+ For complex dtypes, the above is applied to each of the real and
+ imaginary components of `x` separately.
+
+ If `x` is not inexact, then no replacements are made.
+
+ Parameters
+ ----------
+ x : scalar or array_like
+ Input data.
+ copy : bool, optional
+ Whether to create a copy of `x` (True) or to replace values
+ in-place (False). The in-place operation only occurs if
+ casting to an array does not require a copy.
+ Default is True.
+
+ .. versionadded:: 1.13
+ nan : int, float, optional
+ Value to be used to fill NaN values. If no value is passed
+ then NaN values will be replaced with 0.0.
+
+ .. versionadded:: 1.17
+ posinf : int, float, optional
+ Value to be used to fill positive infinity values. If no value is
+ passed then positive infinity values will be replaced with a very
+ large number.
+
+ .. versionadded:: 1.17
+ neginf : int, float, optional
+ Value to be used to fill negative infinity values. If no value is
+ passed then negative infinity values will be replaced with a very
+ small (or negative) number.
+
+ .. versionadded:: 1.17
+
+
+
+ Returns
+ -------
+ out : ndarray
+ `x`, with the non-finite values replaced. If `copy` is False, this may
+ be `x` itself.
+
+ See Also
+ --------
+ isinf : Shows which elements are positive or negative infinity.
+ isneginf : Shows which elements are negative infinity.
+ isposinf : Shows which elements are positive infinity.
+ isnan : Shows which elements are Not a Number (NaN).
+ isfinite : Shows which elements are finite (not NaN, not infinity)
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
+
+ Examples
+ --------
+ >>> np.nan_to_num(np.inf)
+ 1.7976931348623157e+308
+ >>> np.nan_to_num(-np.inf)
+ -1.7976931348623157e+308
+ >>> np.nan_to_num(np.nan)
+ 0.0
+ >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
+ >>> np.nan_to_num(x)
+ array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
+ -1.28000000e+002, 1.28000000e+002])
+ >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
+ array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03,
+ -1.2800000e+02, 1.2800000e+02])
+ >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])
+ array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
+ -1.28000000e+002, 1.28000000e+002])
+ >>> np.nan_to_num(y)
+ array([ 1.79769313e+308 +0.00000000e+000j, # may vary
+ 0.00000000e+000 +0.00000000e+000j,
+ 0.00000000e+000 +1.79769313e+308j])
+ >>> np.nan_to_num(y, nan=111111, posinf=222222)
+ array([222222.+111111.j, 111111. +0.j, 111111.+222222.j])
+ """
+ x = _nx.array(x, subok=True, copy=copy)
+ xtype = x.dtype.type
+
+ isscalar = (x.ndim == 0)
+
+ if not issubclass(xtype, _nx.inexact):
+ return x[()] if isscalar else x
+
+ iscomplex = issubclass(xtype, _nx.complexfloating)
+
+ dest = (x.real, x.imag) if iscomplex else (x,)
+ maxf, minf = _getmaxmin(x.real.dtype)
+ if posinf is not None:
+ maxf = posinf
+ if neginf is not None:
+ minf = neginf
+ for d in dest:
+ idx_nan = isnan(d)
+ idx_posinf = isposinf(d)
+ idx_neginf = isneginf(d)
+ _nx.copyto(d, nan, where=idx_nan)
+ _nx.copyto(d, maxf, where=idx_posinf)
+ _nx.copyto(d, minf, where=idx_neginf)
+ return x[()] if isscalar else x
+
+#-----------------------------------------------------------------------------
+
+def _real_if_close_dispatcher(a, tol=None):
+ return (a,)
+
+
+@array_function_dispatch(_real_if_close_dispatcher)
+def real_if_close(a, tol=100):
+ """
+ If input is complex with all imaginary parts close to zero, return
+ real parts.
+
+ "Close to zero" is defined as `tol` * (machine epsilon of the type for
+ `a`).
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ tol : float
+ Tolerance in machine epsilons for the complex part of the elements
+ in the array.
+
+ Returns
+ -------
+ out : ndarray
+ If `a` is real, the type of `a` is used for the output. If `a`
+ has complex elements, the returned type is float.
+
+ See Also
+ --------
+ real, imag, angle
+
+ Notes
+ -----
+ Machine epsilon varies from machine to machine and between data types
+ but Python floats on most platforms have a machine epsilon equal to
+ 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print
+ out the machine epsilon for floats.
+
+ Examples
+ --------
+ >>> np.finfo(float).eps
+ 2.2204460492503131e-16 # may vary
+
+ >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000)
+ array([2.1, 5.2])
+ >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000)
+ array([2.1+4.e-13j, 5.2 + 3e-15j])
+
+ """
+ a = asanyarray(a)
+ if not issubclass(a.dtype.type, _nx.complexfloating):
+ return a
+ if tol > 1:
+ from numpy.core import getlimits
+ f = getlimits.finfo(a.dtype.type)
+ tol = f.eps * tol
+ if _nx.all(_nx.absolute(a.imag) < tol):
+ a = a.real
+ return a
+
+
+#-----------------------------------------------------------------------------
+
+_namefromtype = {'S1': 'character',
+ '?': 'bool',
+ 'b': 'signed char',
+ 'B': 'unsigned char',
+ 'h': 'short',
+ 'H': 'unsigned short',
+ 'i': 'integer',
+ 'I': 'unsigned integer',
+ 'l': 'long integer',
+ 'L': 'unsigned long integer',
+ 'q': 'long long integer',
+ 'Q': 'unsigned long long integer',
+ 'f': 'single precision',
+ 'd': 'double precision',
+ 'g': 'long precision',
+ 'F': 'complex single precision',
+ 'D': 'complex double precision',
+ 'G': 'complex long double precision',
+ 'S': 'string',
+ 'U': 'unicode',
+ 'V': 'void',
+ 'O': 'object'
+ }
+
+@set_module('numpy')
+def typename(char):
+ """
+ Return a description for the given data type code.
+
+ Parameters
+ ----------
+ char : str
+ Data type code.
+
+ Returns
+ -------
+ out : str
+ Description of the input data type code.
+
+ See Also
+ --------
+ dtype, typecodes
+
+ Examples
+ --------
+ >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
+ ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
+ >>> for typechar in typechars:
+ ... print(typechar, ' : ', np.typename(typechar))
+ ...
+ S1 : character
+ ? : bool
+ B : unsigned char
+ D : complex double precision
+ G : complex long double precision
+ F : complex single precision
+ I : unsigned integer
+ H : unsigned short
+ L : unsigned long integer
+ O : object
+ Q : unsigned long long integer
+ S : string
+ U : unicode
+ V : void
+ b : signed char
+ d : double precision
+ g : long precision
+ f : single precision
+ i : integer
+ h : short
+ l : long integer
+ q : long long integer
+
+ """
+ return _namefromtype[char]
+
+#-----------------------------------------------------------------------------
+
+#determine the "minimum common type" for a group of arrays.
+array_type = [[_nx.half, _nx.single, _nx.double, _nx.longdouble],
+ [None, _nx.csingle, _nx.cdouble, _nx.clongdouble]]
+array_precision = {_nx.half: 0,
+ _nx.single: 1,
+ _nx.double: 2,
+ _nx.longdouble: 3,
+ _nx.csingle: 1,
+ _nx.cdouble: 2,
+ _nx.clongdouble: 3}
+
+
+def _common_type_dispatcher(*arrays):
+ return arrays
+
+
+@array_function_dispatch(_common_type_dispatcher)
+def common_type(*arrays):
+ """
+ Return a scalar type which is common to the input arrays.
+
+ The return type will always be an inexact (i.e. floating point) scalar
+ type, even if all the arrays are integer arrays. If one of the inputs is
+ an integer array, the minimum precision type that is returned is a
+ 64-bit floating point dtype.
+
+ All input arrays except int64 and uint64 can be safely cast to the
+ returned dtype without loss of information.
+
+ Parameters
+ ----------
+ array1, array2, ... : ndarrays
+ Input arrays.
+
+ Returns
+ -------
+ out : data type code
+ Data type code.
+
+ See Also
+ --------
+ dtype, mintypecode
+
+ Examples
+ --------
+ >>> np.common_type(np.arange(2, dtype=np.float32))
+ <class 'numpy.float32'>
+ >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
+ <class 'numpy.float64'>
+ >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
+ <class 'numpy.complex128'>
+
+ """
+ is_complex = False
+ precision = 0
+ for a in arrays:
+ t = a.dtype.type
+ if iscomplexobj(a):
+ is_complex = True
+ if issubclass(t, _nx.integer):
+ p = 2 # array_precision[_nx.double]
+ else:
+ p = array_precision.get(t, None)
+ if p is None:
+ raise TypeError("can't get common type for non-numeric array")
+ precision = max(precision, p)
+ if is_complex:
+ return array_type[1][precision]
+ else:
+ return array_type[0][precision]
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/type_check.pyi b/venv/lib/python3.9/site-packages/numpy/lib/type_check.pyi
new file mode 100644
index 00000000..b04da21d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/type_check.pyi
@@ -0,0 +1,222 @@
+from collections.abc import Container, Iterable
+from typing import (
+ Literal as L,
+ Any,
+ overload,
+ TypeVar,
+ Protocol,
+)
+
+from numpy import (
+ dtype,
+ generic,
+ bool_,
+ floating,
+ float64,
+ complexfloating,
+ integer,
+)
+
+from numpy._typing import (
+ ArrayLike,
+ DTypeLike,
+ NBitBase,
+ NDArray,
+ _64Bit,
+ _SupportsDType,
+ _ScalarLike_co,
+ _ArrayLike,
+ _DTypeLikeComplex,
+)
+
+_T = TypeVar("_T")
+_T_co = TypeVar("_T_co", covariant=True)
+_SCT = TypeVar("_SCT", bound=generic)
+_NBit1 = TypeVar("_NBit1", bound=NBitBase)
+_NBit2 = TypeVar("_NBit2", bound=NBitBase)
+
+class _SupportsReal(Protocol[_T_co]):
+ @property
+ def real(self) -> _T_co: ...
+
+class _SupportsImag(Protocol[_T_co]):
+ @property
+ def imag(self) -> _T_co: ...
+
+__all__: list[str]
+
+def mintypecode(
+ typechars: Iterable[str | ArrayLike],
+ typeset: Container[str] = ...,
+ default: str = ...,
+) -> str: ...
+
+# `asfarray` ignores dtypes if they're not inexact
+
+@overload
+def asfarray(
+ a: object,
+ dtype: None | type[float] = ...,
+) -> NDArray[float64]: ...
+@overload
+def asfarray( # type: ignore[misc]
+ a: Any,
+ dtype: _DTypeLikeComplex,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def asfarray(
+ a: Any,
+ dtype: DTypeLike,
+) -> NDArray[floating[Any]]: ...
+
+@overload
+def real(val: _SupportsReal[_T]) -> _T: ...
+@overload
+def real(val: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def imag(val: _SupportsImag[_T]) -> _T: ...
+@overload
+def imag(val: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def iscomplex(x: _ScalarLike_co) -> bool_: ... # type: ignore[misc]
+@overload
+def iscomplex(x: ArrayLike) -> NDArray[bool_]: ...
+
+@overload
+def isreal(x: _ScalarLike_co) -> bool_: ... # type: ignore[misc]
+@overload
+def isreal(x: ArrayLike) -> NDArray[bool_]: ...
+
+def iscomplexobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ...
+
+def isrealobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ...
+
+@overload
+def nan_to_num( # type: ignore[misc]
+ x: _SCT,
+ copy: bool = ...,
+ nan: float = ...,
+ posinf: None | float = ...,
+ neginf: None | float = ...,
+) -> _SCT: ...
+@overload
+def nan_to_num(
+ x: _ScalarLike_co,
+ copy: bool = ...,
+ nan: float = ...,
+ posinf: None | float = ...,
+ neginf: None | float = ...,
+) -> Any: ...
+@overload
+def nan_to_num(
+ x: _ArrayLike[_SCT],
+ copy: bool = ...,
+ nan: float = ...,
+ posinf: None | float = ...,
+ neginf: None | float = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def nan_to_num(
+ x: ArrayLike,
+ copy: bool = ...,
+ nan: float = ...,
+ posinf: None | float = ...,
+ neginf: None | float = ...,
+) -> NDArray[Any]: ...
+
+# If one passes a complex array to `real_if_close`, then one is reasonably
+# expected to verify the output dtype (so we can return an unsafe union here)
+
+@overload
+def real_if_close( # type: ignore[misc]
+ a: _ArrayLike[complexfloating[_NBit1, _NBit1]],
+ tol: float = ...,
+) -> NDArray[floating[_NBit1]] | NDArray[complexfloating[_NBit1, _NBit1]]: ...
+@overload
+def real_if_close(
+ a: _ArrayLike[_SCT],
+ tol: float = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def real_if_close(
+ a: ArrayLike,
+ tol: float = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def typename(char: L['S1']) -> L['character']: ...
+@overload
+def typename(char: L['?']) -> L['bool']: ...
+@overload
+def typename(char: L['b']) -> L['signed char']: ...
+@overload
+def typename(char: L['B']) -> L['unsigned char']: ...
+@overload
+def typename(char: L['h']) -> L['short']: ...
+@overload
+def typename(char: L['H']) -> L['unsigned short']: ...
+@overload
+def typename(char: L['i']) -> L['integer']: ...
+@overload
+def typename(char: L['I']) -> L['unsigned integer']: ...
+@overload
+def typename(char: L['l']) -> L['long integer']: ...
+@overload
+def typename(char: L['L']) -> L['unsigned long integer']: ...
+@overload
+def typename(char: L['q']) -> L['long long integer']: ...
+@overload
+def typename(char: L['Q']) -> L['unsigned long long integer']: ...
+@overload
+def typename(char: L['f']) -> L['single precision']: ...
+@overload
+def typename(char: L['d']) -> L['double precision']: ...
+@overload
+def typename(char: L['g']) -> L['long precision']: ...
+@overload
+def typename(char: L['F']) -> L['complex single precision']: ...
+@overload
+def typename(char: L['D']) -> L['complex double precision']: ...
+@overload
+def typename(char: L['G']) -> L['complex long double precision']: ...
+@overload
+def typename(char: L['S']) -> L['string']: ...
+@overload
+def typename(char: L['U']) -> L['unicode']: ...
+@overload
+def typename(char: L['V']) -> L['void']: ...
+@overload
+def typename(char: L['O']) -> L['object']: ...
+
+@overload
+def common_type( # type: ignore[misc]
+ *arrays: _SupportsDType[dtype[
+ integer[Any]
+ ]]
+) -> type[floating[_64Bit]]: ...
+@overload
+def common_type( # type: ignore[misc]
+ *arrays: _SupportsDType[dtype[
+ floating[_NBit1]
+ ]]
+) -> type[floating[_NBit1]]: ...
+@overload
+def common_type( # type: ignore[misc]
+ *arrays: _SupportsDType[dtype[
+ integer[Any] | floating[_NBit1]
+ ]]
+) -> type[floating[_NBit1 | _64Bit]]: ...
+@overload
+def common_type( # type: ignore[misc]
+ *arrays: _SupportsDType[dtype[
+ floating[_NBit1] | complexfloating[_NBit2, _NBit2]
+ ]]
+) -> type[complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]]: ...
+@overload
+def common_type(
+ *arrays: _SupportsDType[dtype[
+ integer[Any] | floating[_NBit1] | complexfloating[_NBit2, _NBit2]
+ ]]
+) -> type[complexfloating[_64Bit | _NBit1 | _NBit2, _64Bit | _NBit1 | _NBit2]]: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/ufunclike.py b/venv/lib/python3.9/site-packages/numpy/lib/ufunclike.py
new file mode 100644
index 00000000..a93c4773
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/ufunclike.py
@@ -0,0 +1,268 @@
+"""
+Module of functions that are like ufuncs in acting on arrays and optionally
+storing results in an output array.
+
+"""
+__all__ = ['fix', 'isneginf', 'isposinf']
+
+import numpy.core.numeric as nx
+from numpy.core.overrides import (
+ array_function_dispatch, ARRAY_FUNCTION_ENABLED,
+)
+import warnings
+import functools
+
+
+def _deprecate_out_named_y(f):
+ """
+ Allow the out argument to be passed as the name `y` (deprecated)
+
+ In future, this decorator should be removed.
+ """
+ @functools.wraps(f)
+ def func(x, out=None, **kwargs):
+ if 'y' in kwargs:
+ if 'out' in kwargs:
+ raise TypeError(
+ "{} got multiple values for argument 'out'/'y'"
+ .format(f.__name__)
+ )
+ out = kwargs.pop('y')
+ # NumPy 1.13.0, 2017-04-26
+ warnings.warn(
+ "The name of the out argument to {} has changed from `y` to "
+ "`out`, to match other ufuncs.".format(f.__name__),
+ DeprecationWarning, stacklevel=3)
+ return f(x, out=out, **kwargs)
+
+ return func
+
+
+def _fix_out_named_y(f):
+ """
+ Allow the out argument to be passed as the name `y` (deprecated)
+
+ This decorator should only be used if _deprecate_out_named_y is used on
+ a corresponding dispatcher function.
+ """
+ @functools.wraps(f)
+ def func(x, out=None, **kwargs):
+ if 'y' in kwargs:
+ # we already did error checking in _deprecate_out_named_y
+ out = kwargs.pop('y')
+ return f(x, out=out, **kwargs)
+
+ return func
+
+
+def _fix_and_maybe_deprecate_out_named_y(f):
+ """
+ Use the appropriate decorator, depending upon if dispatching is being used.
+ """
+ if ARRAY_FUNCTION_ENABLED:
+ return _fix_out_named_y(f)
+ else:
+ return _deprecate_out_named_y(f)
+
+
+@_deprecate_out_named_y
+def _dispatcher(x, out=None):
+ return (x, out)
+
+
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+@_fix_and_maybe_deprecate_out_named_y
+def fix(x, out=None):
+ """
+ Round to nearest integer towards zero.
+
+ Round an array of floats element-wise to nearest integer towards zero.
+ The rounded values are returned as floats.
+
+ Parameters
+ ----------
+ x : array_like
+ An array of floats to be rounded
+ out : ndarray, optional
+ A location into which the result is stored. If provided, it must have
+ a shape that the input broadcasts to. If not provided or None, a
+ freshly-allocated array is returned.
+
+ Returns
+ -------
+ out : ndarray of floats
+ A float array with the same dimensions as the input.
+ If second argument is not supplied then a float array is returned
+ with the rounded values.
+
+ If a second argument is supplied the result is stored there.
+ The return value `out` is then a reference to that array.
+
+ See Also
+ --------
+ rint, trunc, floor, ceil
+ around : Round to given number of decimals
+
+ Examples
+ --------
+ >>> np.fix(3.14)
+ 3.0
+ >>> np.fix(3)
+ 3.0
+ >>> np.fix([2.1, 2.9, -2.1, -2.9])
+ array([ 2., 2., -2., -2.])
+
+ """
+ # promote back to an array if flattened
+ res = nx.asanyarray(nx.ceil(x, out=out))
+ res = nx.floor(x, out=res, where=nx.greater_equal(x, 0))
+
+ # when no out argument is passed and no subclasses are involved, flatten
+ # scalars
+ if out is None and type(res) is nx.ndarray:
+ res = res[()]
+ return res
+
+
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+@_fix_and_maybe_deprecate_out_named_y
+def isposinf(x, out=None):
+ """
+ Test element-wise for positive infinity, return result as bool array.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ out : array_like, optional
+ A location into which the result is stored. If provided, it must have a
+ shape that the input broadcasts to. If not provided or None, a
+ freshly-allocated boolean array is returned.
+
+ Returns
+ -------
+ out : ndarray
+ A boolean array with the same dimensions as the input.
+ If second argument is not supplied then a boolean array is returned
+ with values True where the corresponding element of the input is
+ positive infinity and values False where the element of the input is
+ not positive infinity.
+
+ If a second argument is supplied the result is stored there. If the
+ type of that array is a numeric type the result is represented as zeros
+ and ones, if the type is boolean then as False and True.
+ The return value `out` is then a reference to that array.
+
+ See Also
+ --------
+ isinf, isneginf, isfinite, isnan
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754).
+
+ Errors result if the second argument is also supplied when x is a scalar
+ input, if first and second arguments have different shapes, or if the
+ first argument has complex values
+
+ Examples
+ --------
+ >>> np.isposinf(np.PINF)
+ True
+ >>> np.isposinf(np.inf)
+ True
+ >>> np.isposinf(np.NINF)
+ False
+ >>> np.isposinf([-np.inf, 0., np.inf])
+ array([False, False, True])
+
+ >>> x = np.array([-np.inf, 0., np.inf])
+ >>> y = np.array([2, 2, 2])
+ >>> np.isposinf(x, y)
+ array([0, 0, 1])
+ >>> y
+ array([0, 0, 1])
+
+ """
+ is_inf = nx.isinf(x)
+ try:
+ signbit = ~nx.signbit(x)
+ except TypeError as e:
+ dtype = nx.asanyarray(x).dtype
+ raise TypeError(f'This operation is not supported for {dtype} values '
+ 'because it would be ambiguous.') from e
+ else:
+ return nx.logical_and(is_inf, signbit, out)
+
+
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+@_fix_and_maybe_deprecate_out_named_y
+def isneginf(x, out=None):
+ """
+ Test element-wise for negative infinity, return result as bool array.
+
+ Parameters
+ ----------
+ x : array_like
+ The input array.
+ out : array_like, optional
+ A location into which the result is stored. If provided, it must have a
+ shape that the input broadcasts to. If not provided or None, a
+ freshly-allocated boolean array is returned.
+
+ Returns
+ -------
+ out : ndarray
+ A boolean array with the same dimensions as the input.
+ If second argument is not supplied then a numpy boolean array is
+ returned with values True where the corresponding element of the
+ input is negative infinity and values False where the element of
+ the input is not negative infinity.
+
+ If a second argument is supplied the result is stored there. If the
+ type of that array is a numeric type the result is represented as
+ zeros and ones, if the type is boolean then as False and True. The
+ return value `out` is then a reference to that array.
+
+ See Also
+ --------
+ isinf, isposinf, isnan, isfinite
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754).
+
+ Errors result if the second argument is also supplied when x is a scalar
+ input, if first and second arguments have different shapes, or if the
+ first argument has complex values.
+
+ Examples
+ --------
+ >>> np.isneginf(np.NINF)
+ True
+ >>> np.isneginf(np.inf)
+ False
+ >>> np.isneginf(np.PINF)
+ False
+ >>> np.isneginf([-np.inf, 0., np.inf])
+ array([ True, False, False])
+
+ >>> x = np.array([-np.inf, 0., np.inf])
+ >>> y = np.array([2, 2, 2])
+ >>> np.isneginf(x, y)
+ array([1, 0, 0])
+ >>> y
+ array([1, 0, 0])
+
+ """
+ is_inf = nx.isinf(x)
+ try:
+ signbit = nx.signbit(x)
+ except TypeError as e:
+ dtype = nx.asanyarray(x).dtype
+ raise TypeError(f'This operation is not supported for {dtype} values '
+ 'because it would be ambiguous.') from e
+ else:
+ return nx.logical_and(is_inf, signbit, out)
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/ufunclike.pyi b/venv/lib/python3.9/site-packages/numpy/lib/ufunclike.pyi
new file mode 100644
index 00000000..82537e2a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/ufunclike.pyi
@@ -0,0 +1,66 @@
+from typing import Any, overload, TypeVar
+
+from numpy import floating, bool_, object_, ndarray
+from numpy._typing import (
+ NDArray,
+ _FloatLike_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeObject_co,
+)
+
+_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
+
+__all__: list[str]
+
+@overload
+def fix( # type: ignore[misc]
+ x: _FloatLike_co,
+ out: None = ...,
+) -> floating[Any]: ...
+@overload
+def fix(
+ x: _ArrayLikeFloat_co,
+ out: None = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def fix(
+ x: _ArrayLikeObject_co,
+ out: None = ...,
+) -> NDArray[object_]: ...
+@overload
+def fix(
+ x: _ArrayLikeFloat_co | _ArrayLikeObject_co,
+ out: _ArrayType,
+) -> _ArrayType: ...
+
+@overload
+def isposinf( # type: ignore[misc]
+ x: _FloatLike_co,
+ out: None = ...,
+) -> bool_: ...
+@overload
+def isposinf(
+ x: _ArrayLikeFloat_co,
+ out: None = ...,
+) -> NDArray[bool_]: ...
+@overload
+def isposinf(
+ x: _ArrayLikeFloat_co,
+ out: _ArrayType,
+) -> _ArrayType: ...
+
+@overload
+def isneginf( # type: ignore[misc]
+ x: _FloatLike_co,
+ out: None = ...,
+) -> bool_: ...
+@overload
+def isneginf(
+ x: _ArrayLikeFloat_co,
+ out: None = ...,
+) -> NDArray[bool_]: ...
+@overload
+def isneginf(
+ x: _ArrayLikeFloat_co,
+ out: _ArrayType,
+) -> _ArrayType: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/user_array.py b/venv/lib/python3.9/site-packages/numpy/lib/user_array.py
new file mode 100644
index 00000000..0e96b477
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/user_array.py
@@ -0,0 +1,286 @@
+"""
+Standard container-class for easy multiple-inheritance.
+
+Try to inherit from the ndarray instead of using this class as this is not
+complete.
+
+"""
+from numpy.core import (
+ array, asarray, absolute, add, subtract, multiply, divide,
+ remainder, power, left_shift, right_shift, bitwise_and, bitwise_or,
+ bitwise_xor, invert, less, less_equal, not_equal, equal, greater,
+ greater_equal, shape, reshape, arange, sin, sqrt, transpose
+)
+
+
+class container:
+ """
+ container(data, dtype=None, copy=True)
+
+ Standard container-class for easy multiple-inheritance.
+
+ Methods
+ -------
+ copy
+ tostring
+ byteswap
+ astype
+
+ """
+ def __init__(self, data, dtype=None, copy=True):
+ self.array = array(data, dtype, copy=copy)
+
+ def __repr__(self):
+ if self.ndim > 0:
+ return self.__class__.__name__ + repr(self.array)[len("array"):]
+ else:
+ return self.__class__.__name__ + "(" + repr(self.array) + ")"
+
+ def __array__(self, t=None):
+ if t:
+ return self.array.astype(t)
+ return self.array
+
+ # Array as sequence
+ def __len__(self):
+ return len(self.array)
+
+ def __getitem__(self, index):
+ return self._rc(self.array[index])
+
+ def __setitem__(self, index, value):
+ self.array[index] = asarray(value, self.dtype)
+
+ def __abs__(self):
+ return self._rc(absolute(self.array))
+
+ def __neg__(self):
+ return self._rc(-self.array)
+
+ def __add__(self, other):
+ return self._rc(self.array + asarray(other))
+
+ __radd__ = __add__
+
+ def __iadd__(self, other):
+ add(self.array, other, self.array)
+ return self
+
+ def __sub__(self, other):
+ return self._rc(self.array - asarray(other))
+
+ def __rsub__(self, other):
+ return self._rc(asarray(other) - self.array)
+
+ def __isub__(self, other):
+ subtract(self.array, other, self.array)
+ return self
+
+ def __mul__(self, other):
+ return self._rc(multiply(self.array, asarray(other)))
+
+ __rmul__ = __mul__
+
+ def __imul__(self, other):
+ multiply(self.array, other, self.array)
+ return self
+
+ def __div__(self, other):
+ return self._rc(divide(self.array, asarray(other)))
+
+ def __rdiv__(self, other):
+ return self._rc(divide(asarray(other), self.array))
+
+ def __idiv__(self, other):
+ divide(self.array, other, self.array)
+ return self
+
+ def __mod__(self, other):
+ return self._rc(remainder(self.array, other))
+
+ def __rmod__(self, other):
+ return self._rc(remainder(other, self.array))
+
+ def __imod__(self, other):
+ remainder(self.array, other, self.array)
+ return self
+
+ def __divmod__(self, other):
+ return (self._rc(divide(self.array, other)),
+ self._rc(remainder(self.array, other)))
+
+ def __rdivmod__(self, other):
+ return (self._rc(divide(other, self.array)),
+ self._rc(remainder(other, self.array)))
+
+ def __pow__(self, other):
+ return self._rc(power(self.array, asarray(other)))
+
+ def __rpow__(self, other):
+ return self._rc(power(asarray(other), self.array))
+
+ def __ipow__(self, other):
+ power(self.array, other, self.array)
+ return self
+
+ def __lshift__(self, other):
+ return self._rc(left_shift(self.array, other))
+
+ def __rshift__(self, other):
+ return self._rc(right_shift(self.array, other))
+
+ def __rlshift__(self, other):
+ return self._rc(left_shift(other, self.array))
+
+ def __rrshift__(self, other):
+ return self._rc(right_shift(other, self.array))
+
+ def __ilshift__(self, other):
+ left_shift(self.array, other, self.array)
+ return self
+
+ def __irshift__(self, other):
+ right_shift(self.array, other, self.array)
+ return self
+
+ def __and__(self, other):
+ return self._rc(bitwise_and(self.array, other))
+
+ def __rand__(self, other):
+ return self._rc(bitwise_and(other, self.array))
+
+ def __iand__(self, other):
+ bitwise_and(self.array, other, self.array)
+ return self
+
+ def __xor__(self, other):
+ return self._rc(bitwise_xor(self.array, other))
+
+ def __rxor__(self, other):
+ return self._rc(bitwise_xor(other, self.array))
+
+ def __ixor__(self, other):
+ bitwise_xor(self.array, other, self.array)
+ return self
+
+ def __or__(self, other):
+ return self._rc(bitwise_or(self.array, other))
+
+ def __ror__(self, other):
+ return self._rc(bitwise_or(other, self.array))
+
+ def __ior__(self, other):
+ bitwise_or(self.array, other, self.array)
+ return self
+
+ def __pos__(self):
+ return self._rc(self.array)
+
+ def __invert__(self):
+ return self._rc(invert(self.array))
+
+ def _scalarfunc(self, func):
+ if self.ndim == 0:
+ return func(self[0])
+ else:
+ raise TypeError(
+ "only rank-0 arrays can be converted to Python scalars.")
+
+ def __complex__(self):
+ return self._scalarfunc(complex)
+
+ def __float__(self):
+ return self._scalarfunc(float)
+
+ def __int__(self):
+ return self._scalarfunc(int)
+
+ def __hex__(self):
+ return self._scalarfunc(hex)
+
+ def __oct__(self):
+ return self._scalarfunc(oct)
+
+ def __lt__(self, other):
+ return self._rc(less(self.array, other))
+
+ def __le__(self, other):
+ return self._rc(less_equal(self.array, other))
+
+ def __eq__(self, other):
+ return self._rc(equal(self.array, other))
+
+ def __ne__(self, other):
+ return self._rc(not_equal(self.array, other))
+
+ def __gt__(self, other):
+ return self._rc(greater(self.array, other))
+
+ def __ge__(self, other):
+ return self._rc(greater_equal(self.array, other))
+
+ def copy(self):
+ ""
+ return self._rc(self.array.copy())
+
+ def tostring(self):
+ ""
+ return self.array.tostring()
+
+ def tobytes(self):
+ ""
+ return self.array.tobytes()
+
+ def byteswap(self):
+ ""
+ return self._rc(self.array.byteswap())
+
+ def astype(self, typecode):
+ ""
+ return self._rc(self.array.astype(typecode))
+
+ def _rc(self, a):
+ if len(shape(a)) == 0:
+ return a
+ else:
+ return self.__class__(a)
+
+ def __array_wrap__(self, *args):
+ return self.__class__(args[0])
+
+ def __setattr__(self, attr, value):
+ if attr == 'array':
+ object.__setattr__(self, attr, value)
+ return
+ try:
+ self.array.__setattr__(attr, value)
+ except AttributeError:
+ object.__setattr__(self, attr, value)
+
+ # Only called after other approaches fail.
+ def __getattr__(self, attr):
+ if (attr == 'array'):
+ return object.__getattribute__(self, attr)
+ return self.array.__getattribute__(attr)
+
+#############################################################
+# Test of class container
+#############################################################
+if __name__ == '__main__':
+ temp = reshape(arange(10000), (100, 100))
+
+ ua = container(temp)
+ # new object created begin test
+ print(dir(ua))
+ print(shape(ua), ua.shape) # I have changed Numeric.py
+
+ ua_small = ua[:3, :5]
+ print(ua_small)
+ # this did not change ua[0,0], which is not normal behavior
+ ua_small[0, 0] = 10
+ print(ua_small[0, 0], ua[0, 0])
+ print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2))
+ print(less(ua_small, 103), type(less(ua_small, 103)))
+ print(type(ua_small * reshape(arange(15), shape(ua_small))))
+ print(reshape(ua_small, (5, 3)))
+ print(transpose(ua_small))
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/utils.py b/venv/lib/python3.9/site-packages/numpy/lib/utils.py
new file mode 100644
index 00000000..afde8cc6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/utils.py
@@ -0,0 +1,1148 @@
+import os
+import sys
+import textwrap
+import types
+import re
+import warnings
+import functools
+
+from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
+from numpy.core.overrides import set_module
+from numpy.core import ndarray, ufunc, asarray
+import numpy as np
+
+__all__ = [
+ 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate',
+ 'deprecate_with_doc', 'get_include', 'info', 'source', 'who',
+ 'lookfor', 'byte_bounds', 'safe_eval', 'show_runtime'
+ ]
+
+
+def show_runtime():
+ """
+ Print information about various resources in the system
+ including available intrinsic support and BLAS/LAPACK library
+ in use
+
+ See Also
+ --------
+ show_config : Show libraries in the system on which NumPy was built.
+
+ Notes
+ -----
+ 1. Information is derived with the help of `threadpoolctl <https://pypi.org/project/threadpoolctl/>`_
+ library.
+ 2. SIMD related information is derived from ``__cpu_features__``,
+ ``__cpu_baseline__`` and ``__cpu_dispatch__``
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.show_runtime()
+ [{'simd_extensions': {'baseline': ['SSE', 'SSE2', 'SSE3'],
+ 'found': ['SSSE3',
+ 'SSE41',
+ 'POPCNT',
+ 'SSE42',
+ 'AVX',
+ 'F16C',
+ 'FMA3',
+ 'AVX2'],
+ 'not_found': ['AVX512F',
+ 'AVX512CD',
+ 'AVX512_KNL',
+ 'AVX512_KNM',
+ 'AVX512_SKX',
+ 'AVX512_CLX',
+ 'AVX512_CNL',
+ 'AVX512_ICL']}},
+ {'architecture': 'Zen',
+ 'filepath': '/usr/lib/x86_64-linux-gnu/openblas-pthread/libopenblasp-r0.3.20.so',
+ 'internal_api': 'openblas',
+ 'num_threads': 12,
+ 'prefix': 'libopenblas',
+ 'threading_layer': 'pthreads',
+ 'user_api': 'blas',
+ 'version': '0.3.20'}]
+ """
+ from numpy.core._multiarray_umath import (
+ __cpu_features__, __cpu_baseline__, __cpu_dispatch__
+ )
+ from pprint import pprint
+ config_found = []
+ features_found, features_not_found = [], []
+ for feature in __cpu_dispatch__:
+ if __cpu_features__[feature]:
+ features_found.append(feature)
+ else:
+ features_not_found.append(feature)
+ config_found.append({
+ "simd_extensions": {
+ "baseline": __cpu_baseline__,
+ "found": features_found,
+ "not_found": features_not_found
+ }
+ })
+ try:
+ from threadpoolctl import threadpool_info
+ config_found.extend(threadpool_info())
+ except ImportError:
+ print("WARNING: `threadpoolctl` not found in system!"
+ " Install it by `pip install threadpoolctl`."
+ " Once installed, try `np.show_runtime` again"
+ " for more detailed build information")
+ pprint(config_found)
+
+
+def get_include():
+ """
+ Return the directory that contains the NumPy \\*.h header files.
+
+ Extension modules that need to compile against NumPy should use this
+ function to locate the appropriate include directory.
+
+ Notes
+ -----
+ When using ``distutils``, for example in ``setup.py``::
+
+ import numpy as np
+ ...
+ Extension('extension_name', ...
+ include_dirs=[np.get_include()])
+ ...
+
+ """
+ import numpy
+ if numpy.show_config is None:
+ # running from numpy source directory
+ d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
+ else:
+ # using installed numpy core headers
+ import numpy.core as core
+ d = os.path.join(os.path.dirname(core.__file__), 'include')
+ return d
+
+
+class _Deprecate:
+ """
+ Decorator class to deprecate old functions.
+
+ Refer to `deprecate` for details.
+
+ See Also
+ --------
+ deprecate
+
+ """
+
+ def __init__(self, old_name=None, new_name=None, message=None):
+ self.old_name = old_name
+ self.new_name = new_name
+ self.message = message
+
+ def __call__(self, func, *args, **kwargs):
+ """
+ Decorator call. Refer to ``decorate``.
+
+ """
+ old_name = self.old_name
+ new_name = self.new_name
+ message = self.message
+
+ if old_name is None:
+ old_name = func.__name__
+ if new_name is None:
+ depdoc = "`%s` is deprecated!" % old_name
+ else:
+ depdoc = "`%s` is deprecated, use `%s` instead!" % \
+ (old_name, new_name)
+
+ if message is not None:
+ depdoc += "\n" + message
+
+ @functools.wraps(func)
+ def newfunc(*args, **kwds):
+ warnings.warn(depdoc, DeprecationWarning, stacklevel=2)
+ return func(*args, **kwds)
+
+ newfunc.__name__ = old_name
+ doc = func.__doc__
+ if doc is None:
+ doc = depdoc
+ else:
+ lines = doc.expandtabs().split('\n')
+ indent = _get_indent(lines[1:])
+ if lines[0].lstrip():
+ # Indent the original first line to let inspect.cleandoc()
+ # dedent the docstring despite the deprecation notice.
+ doc = indent * ' ' + doc
+ else:
+ # Remove the same leading blank lines as cleandoc() would.
+ skip = len(lines[0]) + 1
+ for line in lines[1:]:
+ if len(line) > indent:
+ break
+ skip += len(line) + 1
+ doc = doc[skip:]
+ depdoc = textwrap.indent(depdoc, ' ' * indent)
+ doc = '\n\n'.join([depdoc, doc])
+ newfunc.__doc__ = doc
+
+ return newfunc
+
+
+def _get_indent(lines):
+ """
+ Determines the leading whitespace that could be removed from all the lines.
+ """
+ indent = sys.maxsize
+ for line in lines:
+ content = len(line.lstrip())
+ if content:
+ indent = min(indent, len(line) - content)
+ if indent == sys.maxsize:
+ indent = 0
+ return indent
+
+
+def deprecate(*args, **kwargs):
+ """
+ Issues a DeprecationWarning, adds warning to `old_name`'s
+ docstring, rebinds ``old_name.__name__`` and returns the new
+ function object.
+
+ This function may also be used as a decorator.
+
+ Parameters
+ ----------
+ func : function
+ The function to be deprecated.
+ old_name : str, optional
+ The name of the function to be deprecated. Default is None, in
+ which case the name of `func` is used.
+ new_name : str, optional
+ The new name for the function. Default is None, in which case the
+ deprecation message is that `old_name` is deprecated. If given, the
+ deprecation message is that `old_name` is deprecated and `new_name`
+ should be used instead.
+ message : str, optional
+ Additional explanation of the deprecation. Displayed in the
+ docstring after the warning.
+
+ Returns
+ -------
+ old_func : function
+ The deprecated function.
+
+ Examples
+ --------
+ Note that ``olduint`` returns a value after printing Deprecation
+ Warning:
+
+ >>> olduint = np.deprecate(np.uint)
+ DeprecationWarning: `uint64` is deprecated! # may vary
+ >>> olduint(6)
+ 6
+
+ """
+ # Deprecate may be run as a function or as a decorator
+ # If run as a function, we initialise the decorator class
+ # and execute its __call__ method.
+
+ if args:
+ fn = args[0]
+ args = args[1:]
+
+ return _Deprecate(*args, **kwargs)(fn)
+ else:
+ return _Deprecate(*args, **kwargs)
+
+
+def deprecate_with_doc(msg):
+ """
+ Deprecates a function and includes the deprecation in its docstring.
+
+ This function is used as a decorator. It returns an object that can be
+ used to issue a DeprecationWarning, by passing the to-be decorated
+ function as argument, this adds warning to the to-be decorated function's
+ docstring and returns the new function object.
+
+ See Also
+ --------
+ deprecate : Decorate a function such that it issues a `DeprecationWarning`
+
+ Parameters
+ ----------
+ msg : str
+ Additional explanation of the deprecation. Displayed in the
+ docstring after the warning.
+
+ Returns
+ -------
+ obj : object
+
+ """
+ return _Deprecate(message=msg)
+
+
+#--------------------------------------------
+# Determine if two arrays can share memory
+#--------------------------------------------
+
+def byte_bounds(a):
+ """
+ Returns pointers to the end-points of an array.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array. It must conform to the Python-side of the array
+ interface.
+
+ Returns
+ -------
+ (low, high) : tuple of 2 integers
+ The first integer is the first byte of the array, the second
+ integer is just past the last byte of the array. If `a` is not
+ contiguous it will not use every byte between the (`low`, `high`)
+ values.
+
+ Examples
+ --------
+ >>> I = np.eye(2, dtype='f'); I.dtype
+ dtype('float32')
+ >>> low, high = np.byte_bounds(I)
+ >>> high - low == I.size*I.itemsize
+ True
+ >>> I = np.eye(2); I.dtype
+ dtype('float64')
+ >>> low, high = np.byte_bounds(I)
+ >>> high - low == I.size*I.itemsize
+ True
+
+ """
+ ai = a.__array_interface__
+ a_data = ai['data'][0]
+ astrides = ai['strides']
+ ashape = ai['shape']
+ bytes_a = asarray(a).dtype.itemsize
+
+ a_low = a_high = a_data
+ if astrides is None:
+ # contiguous case
+ a_high += a.size * bytes_a
+ else:
+ for shape, stride in zip(ashape, astrides):
+ if stride < 0:
+ a_low += (shape-1)*stride
+ else:
+ a_high += (shape-1)*stride
+ a_high += bytes_a
+ return a_low, a_high
+
+
+#-----------------------------------------------------------------------------
+# Function for output and information on the variables used.
+#-----------------------------------------------------------------------------
+
+
+def who(vardict=None):
+ """
+ Print the NumPy arrays in the given dictionary.
+
+ If there is no dictionary passed in or `vardict` is None then returns
+ NumPy arrays in the globals() dictionary (all NumPy arrays in the
+ namespace).
+
+ Parameters
+ ----------
+ vardict : dict, optional
+ A dictionary possibly containing ndarrays. Default is globals().
+
+ Returns
+ -------
+ out : None
+ Returns 'None'.
+
+ Notes
+ -----
+ Prints out the name, shape, bytes and type of all of the ndarrays
+ present in `vardict`.
+
+ Examples
+ --------
+ >>> a = np.arange(10)
+ >>> b = np.ones(20)
+ >>> np.who()
+ Name Shape Bytes Type
+ ===========================================================
+ a 10 80 int64
+ b 20 160 float64
+ Upper bound on total bytes = 240
+
+ >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
+ ... 'idx':5}
+ >>> np.who(d)
+ Name Shape Bytes Type
+ ===========================================================
+ x 2 16 float64
+ y 3 24 float64
+ Upper bound on total bytes = 40
+
+ """
+ if vardict is None:
+ frame = sys._getframe().f_back
+ vardict = frame.f_globals
+ sta = []
+ cache = {}
+ for name in vardict.keys():
+ if isinstance(vardict[name], ndarray):
+ var = vardict[name]
+ idv = id(var)
+ if idv in cache.keys():
+ namestr = name + " (%s)" % cache[idv]
+ original = 0
+ else:
+ cache[idv] = name
+ namestr = name
+ original = 1
+ shapestr = " x ".join(map(str, var.shape))
+ bytestr = str(var.nbytes)
+ sta.append([namestr, shapestr, bytestr, var.dtype.name,
+ original])
+
+ maxname = 0
+ maxshape = 0
+ maxbyte = 0
+ totalbytes = 0
+ for val in sta:
+ if maxname < len(val[0]):
+ maxname = len(val[0])
+ if maxshape < len(val[1]):
+ maxshape = len(val[1])
+ if maxbyte < len(val[2]):
+ maxbyte = len(val[2])
+ if val[4]:
+ totalbytes += int(val[2])
+
+ if len(sta) > 0:
+ sp1 = max(10, maxname)
+ sp2 = max(10, maxshape)
+ sp3 = max(10, maxbyte)
+ prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
+ print(prval + "\n" + "="*(len(prval)+5) + "\n")
+
+ for val in sta:
+ print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
+ val[1], ' '*(sp2-len(val[1])+5),
+ val[2], ' '*(sp3-len(val[2])+5),
+ val[3]))
+ print("\nUpper bound on total bytes = %d" % totalbytes)
+ return
+
+#-----------------------------------------------------------------------------
+
+
+# NOTE: pydoc defines a help function which works similarly to this
+# except it uses a pager to take over the screen.
+
+# combine name and arguments and split to multiple lines of width
+# characters. End lines on a comma and begin argument list indented with
+# the rest of the arguments.
+def _split_line(name, arguments, width):
+ firstwidth = len(name)
+ k = firstwidth
+ newstr = name
+ sepstr = ", "
+ arglist = arguments.split(sepstr)
+ for argument in arglist:
+ if k == firstwidth:
+ addstr = ""
+ else:
+ addstr = sepstr
+ k = k + len(argument) + len(addstr)
+ if k > width:
+ k = firstwidth + 1 + len(argument)
+ newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
+ else:
+ newstr = newstr + addstr + argument
+ return newstr
+
+_namedict = None
+_dictlist = None
+
+# Traverse all module directories underneath globals
+# to see if something is defined
+def _makenamedict(module='numpy'):
+ module = __import__(module, globals(), locals(), [])
+ thedict = {module.__name__:module.__dict__}
+ dictlist = [module.__name__]
+ totraverse = [module.__dict__]
+ while True:
+ if len(totraverse) == 0:
+ break
+ thisdict = totraverse.pop(0)
+ for x in thisdict.keys():
+ if isinstance(thisdict[x], types.ModuleType):
+ modname = thisdict[x].__name__
+ if modname not in dictlist:
+ moddict = thisdict[x].__dict__
+ dictlist.append(modname)
+ totraverse.append(moddict)
+ thedict[modname] = moddict
+ return thedict, dictlist
+
+
+def _info(obj, output=None):
+ """Provide information about ndarray obj.
+
+ Parameters
+ ----------
+ obj : ndarray
+ Must be ndarray, not checked.
+ output
+ Where printed output goes.
+
+ Notes
+ -----
+ Copied over from the numarray module prior to its removal.
+ Adapted somewhat as only numpy is an option now.
+
+ Called by info.
+
+ """
+ extra = ""
+ tic = ""
+ bp = lambda x: x
+ cls = getattr(obj, '__class__', type(obj))
+ nm = getattr(cls, '__name__', cls)
+ strides = obj.strides
+ endian = obj.dtype.byteorder
+
+ if output is None:
+ output = sys.stdout
+
+ print("class: ", nm, file=output)
+ print("shape: ", obj.shape, file=output)
+ print("strides: ", strides, file=output)
+ print("itemsize: ", obj.itemsize, file=output)
+ print("aligned: ", bp(obj.flags.aligned), file=output)
+ print("contiguous: ", bp(obj.flags.contiguous), file=output)
+ print("fortran: ", obj.flags.fortran, file=output)
+ print(
+ "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra),
+ file=output
+ )
+ print("byteorder: ", end=' ', file=output)
+ if endian in ['|', '=']:
+ print("%s%s%s" % (tic, sys.byteorder, tic), file=output)
+ byteswap = False
+ elif endian == '>':
+ print("%sbig%s" % (tic, tic), file=output)
+ byteswap = sys.byteorder != "big"
+ else:
+ print("%slittle%s" % (tic, tic), file=output)
+ byteswap = sys.byteorder != "little"
+ print("byteswap: ", bp(byteswap), file=output)
+ print("type: %s" % obj.dtype, file=output)
+
+
+@set_module('numpy')
+def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
+ """
+ Get help information for a function, class, or module.
+
+ Parameters
+ ----------
+ object : object or str, optional
+ Input object or name to get information about. If `object` is a
+ numpy object, its docstring is given. If it is a string, available
+ modules are searched for matching objects. If None, information
+ about `info` itself is returned.
+ maxwidth : int, optional
+ Printing width.
+ output : file like object, optional
+ File like object that the output is written to, default is
+ ``None``, in which case ``sys.stdout`` will be used.
+ The object has to be opened in 'w' or 'a' mode.
+ toplevel : str, optional
+ Start search at this level.
+
+ See Also
+ --------
+ source, lookfor
+
+ Notes
+ -----
+ When used interactively with an object, ``np.info(obj)`` is equivalent
+ to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython
+ prompt.
+
+ Examples
+ --------
+ >>> np.info(np.polyval) # doctest: +SKIP
+ polyval(p, x)
+ Evaluate the polynomial p at x.
+ ...
+
+ When using a string for `object` it is possible to get multiple results.
+
+ >>> np.info('fft') # doctest: +SKIP
+ *** Found in numpy ***
+ Core FFT routines
+ ...
+ *** Found in numpy.fft ***
+ fft(a, n=None, axis=-1)
+ ...
+ *** Repeat reference found in numpy.fft.fftpack ***
+ *** Total of 3 references found. ***
+
+ """
+ global _namedict, _dictlist
+ # Local import to speed up numpy's import time.
+ import pydoc
+ import inspect
+
+ if (hasattr(object, '_ppimport_importer') or
+ hasattr(object, '_ppimport_module')):
+ object = object._ppimport_module
+ elif hasattr(object, '_ppimport_attr'):
+ object = object._ppimport_attr
+
+ if output is None:
+ output = sys.stdout
+
+ if object is None:
+ info(info)
+ elif isinstance(object, ndarray):
+ _info(object, output=output)
+ elif isinstance(object, str):
+ if _namedict is None:
+ _namedict, _dictlist = _makenamedict(toplevel)
+ numfound = 0
+ objlist = []
+ for namestr in _dictlist:
+ try:
+ obj = _namedict[namestr][object]
+ if id(obj) in objlist:
+ print("\n "
+ "*** Repeat reference found in %s *** " % namestr,
+ file=output
+ )
+ else:
+ objlist.append(id(obj))
+ print(" *** Found in %s ***" % namestr, file=output)
+ info(obj)
+ print("-"*maxwidth, file=output)
+ numfound += 1
+ except KeyError:
+ pass
+ if numfound == 0:
+ print("Help for %s not found." % object, file=output)
+ else:
+ print("\n "
+ "*** Total of %d references found. ***" % numfound,
+ file=output
+ )
+
+ elif inspect.isfunction(object) or inspect.ismethod(object):
+ name = object.__name__
+ try:
+ arguments = str(inspect.signature(object))
+ except Exception:
+ arguments = "()"
+
+ if len(name+arguments) > maxwidth:
+ argstr = _split_line(name, arguments, maxwidth)
+ else:
+ argstr = name + arguments
+
+ print(" " + argstr + "\n", file=output)
+ print(inspect.getdoc(object), file=output)
+
+ elif inspect.isclass(object):
+ name = object.__name__
+ try:
+ arguments = str(inspect.signature(object))
+ except Exception:
+ arguments = "()"
+
+ if len(name+arguments) > maxwidth:
+ argstr = _split_line(name, arguments, maxwidth)
+ else:
+ argstr = name + arguments
+
+ print(" " + argstr + "\n", file=output)
+ doc1 = inspect.getdoc(object)
+ if doc1 is None:
+ if hasattr(object, '__init__'):
+ print(inspect.getdoc(object.__init__), file=output)
+ else:
+ print(inspect.getdoc(object), file=output)
+
+ methods = pydoc.allmethods(object)
+
+ public_methods = [meth for meth in methods if meth[0] != '_']
+ if public_methods:
+ print("\n\nMethods:\n", file=output)
+ for meth in public_methods:
+ thisobj = getattr(object, meth, None)
+ if thisobj is not None:
+ methstr, other = pydoc.splitdoc(
+ inspect.getdoc(thisobj) or "None"
+ )
+ print(" %s -- %s" % (meth, methstr), file=output)
+
+ elif hasattr(object, '__doc__'):
+ print(inspect.getdoc(object), file=output)
+
+
+@set_module('numpy')
+def source(object, output=sys.stdout):
+ """
+ Print or write to a file the source code for a NumPy object.
+
+ The source code is only returned for objects written in Python. Many
+ functions and classes are defined in C and will therefore not return
+ useful information.
+
+ Parameters
+ ----------
+ object : numpy object
+ Input object. This can be any object (function, class, module,
+ ...).
+ output : file object, optional
+ If `output` not supplied then source code is printed to screen
+ (sys.stdout). File object must be created with either write 'w' or
+ append 'a' modes.
+
+ See Also
+ --------
+ lookfor, info
+
+ Examples
+ --------
+ >>> np.source(np.interp) #doctest: +SKIP
+ In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py
+ def interp(x, xp, fp, left=None, right=None):
+ \"\"\".... (full docstring printed)\"\"\"
+ if isinstance(x, (float, int, number)):
+ return compiled_interp([x], xp, fp, left, right).item()
+ else:
+ return compiled_interp(x, xp, fp, left, right)
+
+ The source code is only returned for objects written in Python.
+
+ >>> np.source(np.array) #doctest: +SKIP
+ Not available for this object.
+
+ """
+ # Local import to speed up numpy's import time.
+ import inspect
+ try:
+ print("In file: %s\n" % inspect.getsourcefile(object), file=output)
+ print(inspect.getsource(object), file=output)
+ except Exception:
+ print("Not available for this object.", file=output)
+
+
+# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
+# where kind: "func", "class", "module", "object"
+# and index: index in breadth-first namespace traversal
+_lookfor_caches = {}
+
+# regexp whose match indicates that the string may contain a function
+# signature
+_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
+
+
+@set_module('numpy')
+def lookfor(what, module=None, import_modules=True, regenerate=False,
+ output=None):
+ """
+ Do a keyword search on docstrings.
+
+ A list of objects that matched the search is displayed,
+ sorted by relevance. All given keywords need to be found in the
+ docstring for it to be returned as a result, but the order does
+ not matter.
+
+ Parameters
+ ----------
+ what : str
+ String containing words to look for.
+ module : str or list, optional
+ Name of module(s) whose docstrings to go through.
+ import_modules : bool, optional
+ Whether to import sub-modules in packages. Default is True.
+ regenerate : bool, optional
+ Whether to re-generate the docstring cache. Default is False.
+ output : file-like, optional
+ File-like object to write the output to. If omitted, use a pager.
+
+ See Also
+ --------
+ source, info
+
+ Notes
+ -----
+ Relevance is determined only roughly, by checking if the keywords occur
+ in the function name, at the start of a docstring, etc.
+
+ Examples
+ --------
+ >>> np.lookfor('binary representation') # doctest: +SKIP
+ Search results for 'binary representation'
+ ------------------------------------------
+ numpy.binary_repr
+ Return the binary representation of the input number as a string.
+ numpy.core.setup_common.long_double_representation
+ Given a binary dump as given by GNU od -b, look for long double
+ numpy.base_repr
+ Return a string representation of a number in the given base system.
+ ...
+
+ """
+ import pydoc
+
+ # Cache
+ cache = _lookfor_generate_cache(module, import_modules, regenerate)
+
+ # Search
+ # XXX: maybe using a real stemming search engine would be better?
+ found = []
+ whats = str(what).lower().split()
+ if not whats:
+ return
+
+ for name, (docstring, kind, index) in cache.items():
+ if kind in ('module', 'object'):
+ # don't show modules or objects
+ continue
+ doc = docstring.lower()
+ if all(w in doc for w in whats):
+ found.append(name)
+
+ # Relevance sort
+ # XXX: this is full Harrison-Stetson heuristics now,
+ # XXX: it probably could be improved
+
+ kind_relevance = {'func': 1000, 'class': 1000,
+ 'module': -1000, 'object': -1000}
+
+ def relevance(name, docstr, kind, index):
+ r = 0
+ # do the keywords occur within the start of the docstring?
+ first_doc = "\n".join(docstr.lower().strip().split("\n")[:3])
+ r += sum([200 for w in whats if w in first_doc])
+ # do the keywords occur in the function name?
+ r += sum([30 for w in whats if w in name])
+ # is the full name long?
+ r += -len(name) * 5
+ # is the object of bad type?
+ r += kind_relevance.get(kind, -1000)
+ # is the object deep in namespace hierarchy?
+ r += -name.count('.') * 10
+ r += max(-index / 100, -100)
+ return r
+
+ def relevance_value(a):
+ return relevance(a, *cache[a])
+ found.sort(key=relevance_value)
+
+ # Pretty-print
+ s = "Search results for '%s'" % (' '.join(whats))
+ help_text = [s, "-"*len(s)]
+ for name in found[::-1]:
+ doc, kind, ix = cache[name]
+
+ doclines = [line.strip() for line in doc.strip().split("\n")
+ if line.strip()]
+
+ # find a suitable short description
+ try:
+ first_doc = doclines[0].strip()
+ if _function_signature_re.search(first_doc):
+ first_doc = doclines[1].strip()
+ except IndexError:
+ first_doc = ""
+ help_text.append("%s\n %s" % (name, first_doc))
+
+ if not found:
+ help_text.append("Nothing found.")
+
+ # Output
+ if output is not None:
+ output.write("\n".join(help_text))
+ elif len(help_text) > 10:
+ pager = pydoc.getpager()
+ pager("\n".join(help_text))
+ else:
+ print("\n".join(help_text))
+
+def _lookfor_generate_cache(module, import_modules, regenerate):
+ """
+ Generate docstring cache for given module.
+
+ Parameters
+ ----------
+ module : str, None, module
+ Module for which to generate docstring cache
+ import_modules : bool
+ Whether to import sub-modules in packages.
+ regenerate : bool
+ Re-generate the docstring cache
+
+ Returns
+ -------
+ cache : dict {obj_full_name: (docstring, kind, index), ...}
+ Docstring cache for the module, either cached one (regenerate=False)
+ or newly generated.
+
+ """
+ # Local import to speed up numpy's import time.
+ import inspect
+
+ from io import StringIO
+
+ if module is None:
+ module = "numpy"
+
+ if isinstance(module, str):
+ try:
+ __import__(module)
+ except ImportError:
+ return {}
+ module = sys.modules[module]
+ elif isinstance(module, list) or isinstance(module, tuple):
+ cache = {}
+ for mod in module:
+ cache.update(_lookfor_generate_cache(mod, import_modules,
+ regenerate))
+ return cache
+
+ if id(module) in _lookfor_caches and not regenerate:
+ return _lookfor_caches[id(module)]
+
+ # walk items and collect docstrings
+ cache = {}
+ _lookfor_caches[id(module)] = cache
+ seen = {}
+ index = 0
+ stack = [(module.__name__, module)]
+ while stack:
+ name, item = stack.pop(0)
+ if id(item) in seen:
+ continue
+ seen[id(item)] = True
+
+ index += 1
+ kind = "object"
+
+ if inspect.ismodule(item):
+ kind = "module"
+ try:
+ _all = item.__all__
+ except AttributeError:
+ _all = None
+
+ # import sub-packages
+ if import_modules and hasattr(item, '__path__'):
+ for pth in item.__path__:
+ for mod_path in os.listdir(pth):
+ this_py = os.path.join(pth, mod_path)
+ init_py = os.path.join(pth, mod_path, '__init__.py')
+ if (os.path.isfile(this_py) and
+ mod_path.endswith('.py')):
+ to_import = mod_path[:-3]
+ elif os.path.isfile(init_py):
+ to_import = mod_path
+ else:
+ continue
+ if to_import == '__init__':
+ continue
+
+ try:
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
+ try:
+ sys.stdout = StringIO()
+ sys.stderr = StringIO()
+ __import__("%s.%s" % (name, to_import))
+ finally:
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
+ except KeyboardInterrupt:
+ # Assume keyboard interrupt came from a user
+ raise
+ except BaseException:
+ # Ignore also SystemExit and pytests.importorskip
+ # `Skipped` (these are BaseExceptions; gh-22345)
+ continue
+
+ for n, v in _getmembers(item):
+ try:
+ item_name = getattr(v, '__name__', "%s.%s" % (name, n))
+ mod_name = getattr(v, '__module__', None)
+ except NameError:
+ # ref. SWIG's global cvars
+ # NameError: Unknown C global variable
+ item_name = "%s.%s" % (name, n)
+ mod_name = None
+ if '.' not in item_name and mod_name:
+ item_name = "%s.%s" % (mod_name, item_name)
+
+ if not item_name.startswith(name + '.'):
+ # don't crawl "foreign" objects
+ if isinstance(v, ufunc):
+ # ... unless they are ufuncs
+ pass
+ else:
+ continue
+ elif not (inspect.ismodule(v) or _all is None or n in _all):
+ continue
+ stack.append(("%s.%s" % (name, n), v))
+ elif inspect.isclass(item):
+ kind = "class"
+ for n, v in _getmembers(item):
+ stack.append(("%s.%s" % (name, n), v))
+ elif hasattr(item, "__call__"):
+ kind = "func"
+
+ try:
+ doc = inspect.getdoc(item)
+ except NameError:
+ # ref SWIG's NameError: Unknown C global variable
+ doc = None
+ if doc is not None:
+ cache[name] = (doc, kind, index)
+
+ return cache
+
+def _getmembers(item):
+ import inspect
+ try:
+ members = inspect.getmembers(item)
+ except Exception:
+ members = [(x, getattr(item, x)) for x in dir(item)
+ if hasattr(item, x)]
+ return members
+
+
+def safe_eval(source):
+ """
+ Protected string evaluation.
+
+ Evaluate a string containing a Python literal expression without
+ allowing the execution of arbitrary non-literal code.
+
+ .. warning::
+
+ This function is identical to :py:meth:`ast.literal_eval` and
+ has the same security implications. It may not always be safe
+ to evaluate large input strings.
+
+ Parameters
+ ----------
+ source : str
+ The string to evaluate.
+
+ Returns
+ -------
+ obj : object
+ The result of evaluating `source`.
+
+ Raises
+ ------
+ SyntaxError
+ If the code has invalid Python syntax, or if it contains
+ non-literal code.
+
+ Examples
+ --------
+ >>> np.safe_eval('1')
+ 1
+ >>> np.safe_eval('[1, 2, 3]')
+ [1, 2, 3]
+ >>> np.safe_eval('{"foo": ("bar", 10.0)}')
+ {'foo': ('bar', 10.0)}
+
+ >>> np.safe_eval('import os')
+ Traceback (most recent call last):
+ ...
+ SyntaxError: invalid syntax
+
+ >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
+ Traceback (most recent call last):
+ ...
+ ValueError: malformed node or string: <_ast.Call object at 0x...>
+
+ """
+ # Local import to speed up numpy's import time.
+ import ast
+ return ast.literal_eval(source)
+
+
+def _median_nancheck(data, result, axis):
+ """
+ Utility function to check median result from data for NaN values at the end
+ and return NaN in that case. Input result can also be a MaskedArray.
+
+ Parameters
+ ----------
+ data : array
+ Sorted input data to median function
+ result : Array or MaskedArray
+ Result of median function.
+ axis : int
+ Axis along which the median was computed.
+
+ Returns
+ -------
+ result : scalar or ndarray
+ Median or NaN in axes which contained NaN in the input. If the input
+ was an array, NaN will be inserted in-place. If a scalar, either the
+ input itself or a scalar NaN.
+ """
+ if data.size == 0:
+ return result
+ n = np.isnan(data.take(-1, axis=axis))
+ # masked NaN values are ok
+ if np.ma.isMaskedArray(n):
+ n = n.filled(False)
+ if np.count_nonzero(n.ravel()) > 0:
+ # Without given output, it is possible that the current result is a
+ # numpy scalar, which is not writeable. If so, just return nan.
+ if isinstance(result, np.generic):
+ return data.dtype.type(np.nan)
+
+ result[n] = np.nan
+ return result
+
+def _opt_info():
+ """
+ Returns a string contains the supported CPU features by the current build.
+
+ The string format can be explained as follows:
+ - dispatched features that are supported by the running machine
+ end with `*`.
+ - dispatched features that are "not" supported by the running machine
+ end with `?`.
+ - remained features are representing the baseline.
+ """
+ from numpy.core._multiarray_umath import (
+ __cpu_features__, __cpu_baseline__, __cpu_dispatch__
+ )
+
+ if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0:
+ return ''
+
+ enabled_features = ' '.join(__cpu_baseline__)
+ for feature in __cpu_dispatch__:
+ if __cpu_features__[feature]:
+ enabled_features += f" {feature}*"
+ else:
+ enabled_features += f" {feature}?"
+
+ return enabled_features
+#-----------------------------------------------------------------------------
diff --git a/venv/lib/python3.9/site-packages/numpy/lib/utils.pyi b/venv/lib/python3.9/site-packages/numpy/lib/utils.pyi
new file mode 100644
index 00000000..52ca9277
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/lib/utils.pyi
@@ -0,0 +1,91 @@
+from ast import AST
+from collections.abc import Callable, Mapping, Sequence
+from typing import (
+ Any,
+ overload,
+ TypeVar,
+ Protocol,
+)
+
+from numpy import ndarray, generic
+
+from numpy.core.numerictypes import (
+ issubclass_ as issubclass_,
+ issubdtype as issubdtype,
+ issubsctype as issubsctype,
+)
+
+_T_contra = TypeVar("_T_contra", contravariant=True)
+_FuncType = TypeVar("_FuncType", bound=Callable[..., Any])
+
+# A file-like object opened in `w` mode
+class _SupportsWrite(Protocol[_T_contra]):
+ def write(self, s: _T_contra, /) -> Any: ...
+
+__all__: list[str]
+
+class _Deprecate:
+ old_name: None | str
+ new_name: None | str
+ message: None | str
+ def __init__(
+ self,
+ old_name: None | str = ...,
+ new_name: None | str = ...,
+ message: None | str = ...,
+ ) -> None: ...
+ # NOTE: `__call__` can in principle take arbitrary `*args` and `**kwargs`,
+ # even though they aren't used for anything
+ def __call__(self, func: _FuncType) -> _FuncType: ...
+
+def get_include() -> str: ...
+
+@overload
+def deprecate(
+ *,
+ old_name: None | str = ...,
+ new_name: None | str = ...,
+ message: None | str = ...,
+) -> _Deprecate: ...
+@overload
+def deprecate(
+ func: _FuncType,
+ /,
+ old_name: None | str = ...,
+ new_name: None | str = ...,
+ message: None | str = ...,
+) -> _FuncType: ...
+
+def deprecate_with_doc(msg: None | str) -> _Deprecate: ...
+
+# NOTE: In practice `byte_bounds` can (potentially) take any object
+# implementing the `__array_interface__` protocol. The caveat is
+# that certain keys, marked as optional in the spec, must be present for
+# `byte_bounds`. This concerns `"strides"` and `"data"`.
+def byte_bounds(a: generic | ndarray[Any, Any]) -> tuple[int, int]: ...
+
+def who(vardict: None | Mapping[str, ndarray[Any, Any]] = ...) -> None: ...
+
+def info(
+ object: object = ...,
+ maxwidth: int = ...,
+ output: None | _SupportsWrite[str] = ...,
+ toplevel: str = ...,
+) -> None: ...
+
+def source(
+ object: object,
+ output: None | _SupportsWrite[str] = ...,
+) -> None: ...
+
+def lookfor(
+ what: str,
+ module: None | str | Sequence[str] = ...,
+ import_modules: bool = ...,
+ regenerate: bool = ...,
+ output: None | _SupportsWrite[str] =...,
+) -> None: ...
+
+def safe_eval(source: str | AST) -> Any: ...
+
+def show_runtime() -> None: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/linalg/__init__.py b/venv/lib/python3.9/site-packages/numpy/linalg/__init__.py
new file mode 100644
index 00000000..93943de3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/linalg/__init__.py
@@ -0,0 +1,80 @@
+"""
+``numpy.linalg``
+================
+
+The NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient
+low level implementations of standard linear algebra algorithms. Those
+libraries may be provided by NumPy itself using C versions of a subset of their
+reference implementations but, when possible, highly optimized libraries that
+take advantage of specialized processor functionality are preferred. Examples
+of such libraries are OpenBLAS, MKL (TM), and ATLAS. Because those libraries
+are multithreaded and processor dependent, environmental variables and external
+packages such as threadpoolctl may be needed to control the number of threads
+or specify the processor architecture.
+
+- OpenBLAS: https://www.openblas.net/
+- threadpoolctl: https://github.com/joblib/threadpoolctl
+
+Please note that the most-used linear algebra functions in NumPy are present in
+the main ``numpy`` namespace rather than in ``numpy.linalg``. There are:
+``dot``, ``vdot``, ``inner``, ``outer``, ``matmul``, ``tensordot``, ``einsum``,
+``einsum_path`` and ``kron``.
+
+Functions present in numpy.linalg are listed below.
+
+
+Matrix and vector products
+--------------------------
+
+ multi_dot
+ matrix_power
+
+Decompositions
+--------------
+
+ cholesky
+ qr
+ svd
+
+Matrix eigenvalues
+------------------
+
+ eig
+ eigh
+ eigvals
+ eigvalsh
+
+Norms and other numbers
+-----------------------
+
+ norm
+ cond
+ det
+ matrix_rank
+ slogdet
+
+Solving equations and inverting matrices
+----------------------------------------
+
+ solve
+ tensorsolve
+ lstsq
+ inv
+ pinv
+ tensorinv
+
+Exceptions
+----------
+
+ LinAlgError
+
+"""
+# To get sub-modules
+from . import linalg
+from .linalg import *
+
+__all__ = linalg.__all__.copy()
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/linalg/__init__.pyi b/venv/lib/python3.9/site-packages/numpy/linalg/__init__.pyi
new file mode 100644
index 00000000..d9acd558
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/linalg/__init__.pyi
@@ -0,0 +1,30 @@
+from numpy.linalg.linalg import (
+ matrix_power as matrix_power,
+ solve as solve,
+ tensorsolve as tensorsolve,
+ tensorinv as tensorinv,
+ inv as inv,
+ cholesky as cholesky,
+ eigvals as eigvals,
+ eigvalsh as eigvalsh,
+ pinv as pinv,
+ slogdet as slogdet,
+ det as det,
+ svd as svd,
+ eig as eig,
+ eigh as eigh,
+ lstsq as lstsq,
+ norm as norm,
+ qr as qr,
+ cond as cond,
+ matrix_rank as matrix_rank,
+ multi_dot as multi_dot,
+)
+
+from numpy._pytesttester import PytestTester
+
+__all__: list[str]
+__path__: list[str]
+test: PytestTester
+
+class LinAlgError(Exception): ...
diff --git a/venv/lib/python3.9/site-packages/numpy/linalg/_umath_linalg.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/linalg/_umath_linalg.cpython-39-darwin.so
new file mode 100755
index 00000000..93c6a983
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/linalg/_umath_linalg.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/linalg/lapack_lite.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/linalg/lapack_lite.cpython-39-darwin.so
new file mode 100755
index 00000000..ff74708f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/linalg/lapack_lite.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/linalg/linalg.py b/venv/lib/python3.9/site-packages/numpy/linalg/linalg.py
new file mode 100644
index 00000000..69719332
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/linalg/linalg.py
@@ -0,0 +1,2795 @@
+"""Lite version of scipy.linalg.
+
+Notes
+-----
+This module is a lite version of the linalg.py module in SciPy which
+contains high-level Python interface to the LAPACK library. The lite
+version only accesses the following LAPACK functions: dgesv, zgesv,
+dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
+zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
+"""
+
+__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
+ 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
+ 'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
+ 'LinAlgError', 'multi_dot']
+
+import functools
+import operator
+import warnings
+
+from numpy.core import (
+ array, asarray, zeros, empty, empty_like, intc, single, double,
+ csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
+ add, multiply, sqrt, sum, isfinite,
+ finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
+ atleast_2d, intp, asanyarray, object_, matmul,
+ swapaxes, divide, count_nonzero, isnan, sign, argsort, sort,
+ reciprocal
+)
+from numpy.core.multiarray import normalize_axis_index
+from numpy.core.overrides import set_module
+from numpy.core import overrides
+from numpy.lib.twodim_base import triu, eye
+from numpy.linalg import _umath_linalg
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy.linalg')
+
+
+fortran_int = intc
+
+
+@set_module('numpy.linalg')
+class LinAlgError(Exception):
+ """
+ Generic Python-exception-derived object raised by linalg functions.
+
+ General purpose exception class, derived from Python's exception.Exception
+ class, programmatically raised in linalg functions when a Linear
+ Algebra-related condition would prevent further correct execution of the
+ function.
+
+ Parameters
+ ----------
+ None
+
+ Examples
+ --------
+ >>> from numpy import linalg as LA
+ >>> LA.inv(np.zeros((2,2)))
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ File "...linalg.py", line 350,
+ in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
+ File "...linalg.py", line 249,
+ in solve
+ raise LinAlgError('Singular matrix')
+ numpy.linalg.LinAlgError: Singular matrix
+
+ """
+
+
+def _determine_error_states():
+ errobj = geterrobj()
+ bufsize = errobj[0]
+
+ with errstate(invalid='call', over='ignore',
+ divide='ignore', under='ignore'):
+ invalid_call_errmask = geterrobj()[1]
+
+ return [bufsize, invalid_call_errmask, None]
+
+# Dealing with errors in _umath_linalg
+_linalg_error_extobj = _determine_error_states()
+del _determine_error_states
+
+def _raise_linalgerror_singular(err, flag):
+ raise LinAlgError("Singular matrix")
+
+def _raise_linalgerror_nonposdef(err, flag):
+ raise LinAlgError("Matrix is not positive definite")
+
+def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
+ raise LinAlgError("Eigenvalues did not converge")
+
+def _raise_linalgerror_svd_nonconvergence(err, flag):
+ raise LinAlgError("SVD did not converge")
+
+def _raise_linalgerror_lstsq(err, flag):
+ raise LinAlgError("SVD did not converge in Linear Least Squares")
+
+def _raise_linalgerror_qr(err, flag):
+ raise LinAlgError("Incorrect argument found while performing "
+ "QR factorization")
+
+def get_linalg_error_extobj(callback):
+ extobj = list(_linalg_error_extobj) # make a copy
+ extobj[2] = callback
+ return extobj
+
+def _makearray(a):
+ new = asarray(a)
+ wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
+ return new, wrap
+
+def isComplexType(t):
+ return issubclass(t, complexfloating)
+
+_real_types_map = {single : single,
+ double : double,
+ csingle : single,
+ cdouble : double}
+
+_complex_types_map = {single : csingle,
+ double : cdouble,
+ csingle : csingle,
+ cdouble : cdouble}
+
+def _realType(t, default=double):
+ return _real_types_map.get(t, default)
+
+def _complexType(t, default=cdouble):
+ return _complex_types_map.get(t, default)
+
+def _commonType(*arrays):
+ # in lite version, use higher precision (always double or cdouble)
+ result_type = single
+ is_complex = False
+ for a in arrays:
+ if issubclass(a.dtype.type, inexact):
+ if isComplexType(a.dtype.type):
+ is_complex = True
+ rt = _realType(a.dtype.type, default=None)
+ if rt is None:
+ # unsupported inexact scalar
+ raise TypeError("array type %s is unsupported in linalg" %
+ (a.dtype.name,))
+ else:
+ rt = double
+ if rt is double:
+ result_type = double
+ if is_complex:
+ t = cdouble
+ result_type = _complex_types_map[result_type]
+ else:
+ t = double
+ return t, result_type
+
+
+def _to_native_byte_order(*arrays):
+ ret = []
+ for arr in arrays:
+ if arr.dtype.byteorder not in ('=', '|'):
+ ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
+ else:
+ ret.append(arr)
+ if len(ret) == 1:
+ return ret[0]
+ else:
+ return ret
+
+
+def _assert_2d(*arrays):
+ for a in arrays:
+ if a.ndim != 2:
+ raise LinAlgError('%d-dimensional array given. Array must be '
+ 'two-dimensional' % a.ndim)
+
+def _assert_stacked_2d(*arrays):
+ for a in arrays:
+ if a.ndim < 2:
+ raise LinAlgError('%d-dimensional array given. Array must be '
+ 'at least two-dimensional' % a.ndim)
+
+def _assert_stacked_square(*arrays):
+ for a in arrays:
+ m, n = a.shape[-2:]
+ if m != n:
+ raise LinAlgError('Last 2 dimensions of the array must be square')
+
+def _assert_finite(*arrays):
+ for a in arrays:
+ if not isfinite(a).all():
+ raise LinAlgError("Array must not contain infs or NaNs")
+
+def _is_empty_2d(arr):
+ # check size first for efficiency
+ return arr.size == 0 and product(arr.shape[-2:]) == 0
+
+
+def transpose(a):
+ """
+ Transpose each matrix in a stack of matrices.
+
+ Unlike np.transpose, this only swaps the last two axes, rather than all of
+ them
+
+ Parameters
+ ----------
+ a : (...,M,N) array_like
+
+ Returns
+ -------
+ aT : (...,N,M) ndarray
+ """
+ return swapaxes(a, -1, -2)
+
+# Linear equations
+
+def _tensorsolve_dispatcher(a, b, axes=None):
+ return (a, b)
+
+
+@array_function_dispatch(_tensorsolve_dispatcher)
+def tensorsolve(a, b, axes=None):
+ """
+ Solve the tensor equation ``a x = b`` for x.
+
+ It is assumed that all indices of `x` are summed over in the product,
+ together with the rightmost indices of `a`, as is done in, for example,
+ ``tensordot(a, x, axes=x.ndim)``.
+
+ Parameters
+ ----------
+ a : array_like
+ Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
+ the shape of that sub-tensor of `a` consisting of the appropriate
+ number of its rightmost indices, and must be such that
+ ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
+ 'square').
+ b : array_like
+ Right-hand tensor, which can be of any shape.
+ axes : tuple of ints, optional
+ Axes in `a` to reorder to the right, before inversion.
+ If None (default), no reordering is done.
+
+ Returns
+ -------
+ x : ndarray, shape Q
+
+ Raises
+ ------
+ LinAlgError
+ If `a` is singular or not 'square' (in the above sense).
+
+ See Also
+ --------
+ numpy.tensordot, tensorinv, numpy.einsum
+
+ Examples
+ --------
+ >>> a = np.eye(2*3*4)
+ >>> a.shape = (2*3, 4, 2, 3, 4)
+ >>> b = np.random.randn(2*3, 4)
+ >>> x = np.linalg.tensorsolve(a, b)
+ >>> x.shape
+ (2, 3, 4)
+ >>> np.allclose(np.tensordot(a, x, axes=3), b)
+ True
+
+ """
+ a, wrap = _makearray(a)
+ b = asarray(b)
+ an = a.ndim
+
+ if axes is not None:
+ allaxes = list(range(0, an))
+ for k in axes:
+ allaxes.remove(k)
+ allaxes.insert(an, k)
+ a = a.transpose(allaxes)
+
+ oldshape = a.shape[-(an-b.ndim):]
+ prod = 1
+ for k in oldshape:
+ prod *= k
+
+ if a.size != prod ** 2:
+ raise LinAlgError(
+ "Input arrays must satisfy the requirement \
+ prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])"
+ )
+
+ a = a.reshape(prod, prod)
+ b = b.ravel()
+ res = wrap(solve(a, b))
+ res.shape = oldshape
+ return res
+
+
+def _solve_dispatcher(a, b):
+ return (a, b)
+
+
+@array_function_dispatch(_solve_dispatcher)
+def solve(a, b):
+ """
+ Solve a linear matrix equation, or system of linear scalar equations.
+
+ Computes the "exact" solution, `x`, of the well-determined, i.e., full
+ rank, linear matrix equation `ax = b`.
+
+ Parameters
+ ----------
+ a : (..., M, M) array_like
+ Coefficient matrix.
+ b : {(..., M,), (..., M, K)}, array_like
+ Ordinate or "dependent variable" values.
+
+ Returns
+ -------
+ x : {(..., M,), (..., M, K)} ndarray
+ Solution to the system a x = b. Returned shape is identical to `b`.
+
+ Raises
+ ------
+ LinAlgError
+ If `a` is singular or not square.
+
+ See Also
+ --------
+ scipy.linalg.solve : Similar function in SciPy.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.8.0
+
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ The solutions are computed using LAPACK routine ``_gesv``.
+
+ `a` must be square and of full-rank, i.e., all rows (or, equivalently,
+ columns) must be linearly independent; if either is not true, use
+ `lstsq` for the least-squares best "solution" of the
+ system/equation.
+
+ References
+ ----------
+ .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
+ FL, Academic Press, Inc., 1980, pg. 22.
+
+ Examples
+ --------
+ Solve the system of equations ``x0 + 2 * x1 = 1`` and ``3 * x0 + 5 * x1 = 2``:
+
+ >>> a = np.array([[1, 2], [3, 5]])
+ >>> b = np.array([1, 2])
+ >>> x = np.linalg.solve(a, b)
+ >>> x
+ array([-1., 1.])
+
+ Check that the solution is correct:
+
+ >>> np.allclose(np.dot(a, x), b)
+ True
+
+ """
+ a, _ = _makearray(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ b, wrap = _makearray(b)
+ t, result_t = _commonType(a, b)
+
+ # We use the b = (..., M,) logic, only if the number of extra dimensions
+ # match exactly
+ if b.ndim == a.ndim - 1:
+ gufunc = _umath_linalg.solve1
+ else:
+ gufunc = _umath_linalg.solve
+
+ signature = 'DD->D' if isComplexType(t) else 'dd->d'
+ extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
+ r = gufunc(a, b, signature=signature, extobj=extobj)
+
+ return wrap(r.astype(result_t, copy=False))
+
+
+def _tensorinv_dispatcher(a, ind=None):
+ return (a,)
+
+
+@array_function_dispatch(_tensorinv_dispatcher)
+def tensorinv(a, ind=2):
+ """
+ Compute the 'inverse' of an N-dimensional array.
+
+ The result is an inverse for `a` relative to the tensordot operation
+ ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
+ ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
+ tensordot operation.
+
+ Parameters
+ ----------
+ a : array_like
+ Tensor to 'invert'. Its shape must be 'square', i. e.,
+ ``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
+ ind : int, optional
+ Number of first indices that are involved in the inverse sum.
+ Must be a positive integer, default is 2.
+
+ Returns
+ -------
+ b : ndarray
+ `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
+
+ Raises
+ ------
+ LinAlgError
+ If `a` is singular or not 'square' (in the above sense).
+
+ See Also
+ --------
+ numpy.tensordot, tensorsolve
+
+ Examples
+ --------
+ >>> a = np.eye(4*6)
+ >>> a.shape = (4, 6, 8, 3)
+ >>> ainv = np.linalg.tensorinv(a, ind=2)
+ >>> ainv.shape
+ (8, 3, 4, 6)
+ >>> b = np.random.randn(4, 6)
+ >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
+ True
+
+ >>> a = np.eye(4*6)
+ >>> a.shape = (24, 8, 3)
+ >>> ainv = np.linalg.tensorinv(a, ind=1)
+ >>> ainv.shape
+ (8, 3, 24)
+ >>> b = np.random.randn(24)
+ >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
+ True
+
+ """
+ a = asarray(a)
+ oldshape = a.shape
+ prod = 1
+ if ind > 0:
+ invshape = oldshape[ind:] + oldshape[:ind]
+ for k in oldshape[ind:]:
+ prod *= k
+ else:
+ raise ValueError("Invalid ind argument.")
+ a = a.reshape(prod, -1)
+ ia = inv(a)
+ return ia.reshape(*invshape)
+
+
+# Matrix inversion
+
+def _unary_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def inv(a):
+ """
+ Compute the (multiplicative) inverse of a matrix.
+
+ Given a square matrix `a`, return the matrix `ainv` satisfying
+ ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
+
+ Parameters
+ ----------
+ a : (..., M, M) array_like
+ Matrix to be inverted.
+
+ Returns
+ -------
+ ainv : (..., M, M) ndarray or matrix
+ (Multiplicative) inverse of the matrix `a`.
+
+ Raises
+ ------
+ LinAlgError
+ If `a` is not square or inversion fails.
+
+ See Also
+ --------
+ scipy.linalg.inv : Similar function in SciPy.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.8.0
+
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ Examples
+ --------
+ >>> from numpy.linalg import inv
+ >>> a = np.array([[1., 2.], [3., 4.]])
+ >>> ainv = inv(a)
+ >>> np.allclose(np.dot(a, ainv), np.eye(2))
+ True
+ >>> np.allclose(np.dot(ainv, a), np.eye(2))
+ True
+
+ If a is a matrix object, then the return value is a matrix as well:
+
+ >>> ainv = inv(np.matrix(a))
+ >>> ainv
+ matrix([[-2. , 1. ],
+ [ 1.5, -0.5]])
+
+ Inverses of several matrices can be computed at once:
+
+ >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
+ >>> inv(a)
+ array([[[-2. , 1. ],
+ [ 1.5 , -0.5 ]],
+ [[-1.25, 0.75],
+ [ 0.75, -0.25]]])
+
+ """
+ a, wrap = _makearray(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ t, result_t = _commonType(a)
+
+ signature = 'D->D' if isComplexType(t) else 'd->d'
+ extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
+ ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
+ return wrap(ainv.astype(result_t, copy=False))
+
+
+def _matrix_power_dispatcher(a, n):
+ return (a,)
+
+
+@array_function_dispatch(_matrix_power_dispatcher)
+def matrix_power(a, n):
+ """
+ Raise a square matrix to the (integer) power `n`.
+
+ For positive integers `n`, the power is computed by repeated matrix
+ squarings and matrix multiplications. If ``n == 0``, the identity matrix
+ of the same shape as M is returned. If ``n < 0``, the inverse
+ is computed and then raised to the ``abs(n)``.
+
+ .. note:: Stacks of object matrices are not currently supported.
+
+ Parameters
+ ----------
+ a : (..., M, M) array_like
+ Matrix to be "powered".
+ n : int
+ The exponent can be any integer or long integer, positive,
+ negative, or zero.
+
+ Returns
+ -------
+ a**n : (..., M, M) ndarray or matrix object
+ The return value is the same shape and type as `M`;
+ if the exponent is positive or zero then the type of the
+ elements is the same as those of `M`. If the exponent is
+ negative the elements are floating-point.
+
+ Raises
+ ------
+ LinAlgError
+ For matrices that are not square or that (for negative powers) cannot
+ be inverted numerically.
+
+ Examples
+ --------
+ >>> from numpy.linalg import matrix_power
+ >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
+ >>> matrix_power(i, 3) # should = -i
+ array([[ 0, -1],
+ [ 1, 0]])
+ >>> matrix_power(i, 0)
+ array([[1, 0],
+ [0, 1]])
+ >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
+ array([[ 0., 1.],
+ [-1., 0.]])
+
+ Somewhat more sophisticated example
+
+ >>> q = np.zeros((4, 4))
+ >>> q[0:2, 0:2] = -i
+ >>> q[2:4, 2:4] = i
+ >>> q # one of the three quaternion units not equal to 1
+ array([[ 0., -1., 0., 0.],
+ [ 1., 0., 0., 0.],
+ [ 0., 0., 0., 1.],
+ [ 0., 0., -1., 0.]])
+ >>> matrix_power(q, 2) # = -np.eye(4)
+ array([[-1., 0., 0., 0.],
+ [ 0., -1., 0., 0.],
+ [ 0., 0., -1., 0.],
+ [ 0., 0., 0., -1.]])
+
+ """
+ a = asanyarray(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+
+ try:
+ n = operator.index(n)
+ except TypeError as e:
+ raise TypeError("exponent must be an integer") from e
+
+ # Fall back on dot for object arrays. Object arrays are not supported by
+ # the current implementation of matmul using einsum
+ if a.dtype != object:
+ fmatmul = matmul
+ elif a.ndim == 2:
+ fmatmul = dot
+ else:
+ raise NotImplementedError(
+ "matrix_power not supported for stacks of object arrays")
+
+ if n == 0:
+ a = empty_like(a)
+ a[...] = eye(a.shape[-2], dtype=a.dtype)
+ return a
+
+ elif n < 0:
+ a = inv(a)
+ n = abs(n)
+
+ # short-cuts.
+ if n == 1:
+ return a
+
+ elif n == 2:
+ return fmatmul(a, a)
+
+ elif n == 3:
+ return fmatmul(fmatmul(a, a), a)
+
+ # Use binary decomposition to reduce the number of matrix multiplications.
+ # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
+ # increasing powers of 2, and multiply into the result as needed.
+ z = result = None
+ while n > 0:
+ z = a if z is None else fmatmul(z, z)
+ n, bit = divmod(n, 2)
+ if bit:
+ result = z if result is None else fmatmul(result, z)
+
+ return result
+
+
+# Cholesky decomposition
+
+
+@array_function_dispatch(_unary_dispatcher)
+def cholesky(a):
+ """
+ Cholesky decomposition.
+
+ Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
+ where `L` is lower-triangular and .H is the conjugate transpose operator
+ (which is the ordinary transpose if `a` is real-valued). `a` must be
+ Hermitian (symmetric if real-valued) and positive-definite. No
+ checking is performed to verify whether `a` is Hermitian or not.
+ In addition, only the lower-triangular and diagonal elements of `a`
+ are used. Only `L` is actually returned.
+
+ Parameters
+ ----------
+ a : (..., M, M) array_like
+ Hermitian (symmetric if all elements are real), positive-definite
+ input matrix.
+
+ Returns
+ -------
+ L : (..., M, M) array_like
+ Lower-triangular Cholesky factor of `a`. Returns a matrix object if
+ `a` is a matrix object.
+
+ Raises
+ ------
+ LinAlgError
+ If the decomposition fails, for example, if `a` is not
+ positive-definite.
+
+ See Also
+ --------
+ scipy.linalg.cholesky : Similar function in SciPy.
+ scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian
+ positive-definite matrix.
+ scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in
+ `scipy.linalg.cho_solve`.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.8.0
+
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ The Cholesky decomposition is often used as a fast way of solving
+
+ .. math:: A \\mathbf{x} = \\mathbf{b}
+
+ (when `A` is both Hermitian/symmetric and positive-definite).
+
+ First, we solve for :math:`\\mathbf{y}` in
+
+ .. math:: L \\mathbf{y} = \\mathbf{b},
+
+ and then for :math:`\\mathbf{x}` in
+
+ .. math:: L.H \\mathbf{x} = \\mathbf{y}.
+
+ Examples
+ --------
+ >>> A = np.array([[1,-2j],[2j,5]])
+ >>> A
+ array([[ 1.+0.j, -0.-2.j],
+ [ 0.+2.j, 5.+0.j]])
+ >>> L = np.linalg.cholesky(A)
+ >>> L
+ array([[1.+0.j, 0.+0.j],
+ [0.+2.j, 1.+0.j]])
+ >>> np.dot(L, L.T.conj()) # verify that L * L.H = A
+ array([[1.+0.j, 0.-2.j],
+ [0.+2.j, 5.+0.j]])
+ >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
+ >>> np.linalg.cholesky(A) # an ndarray object is returned
+ array([[1.+0.j, 0.+0.j],
+ [0.+2.j, 1.+0.j]])
+ >>> # But a matrix object is returned if A is a matrix object
+ >>> np.linalg.cholesky(np.matrix(A))
+ matrix([[ 1.+0.j, 0.+0.j],
+ [ 0.+2.j, 1.+0.j]])
+
+ """
+ extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
+ gufunc = _umath_linalg.cholesky_lo
+ a, wrap = _makearray(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ t, result_t = _commonType(a)
+ signature = 'D->D' if isComplexType(t) else 'd->d'
+ r = gufunc(a, signature=signature, extobj=extobj)
+ return wrap(r.astype(result_t, copy=False))
+
+
+# QR decomposition
+
+def _qr_dispatcher(a, mode=None):
+ return (a,)
+
+
+@array_function_dispatch(_qr_dispatcher)
+def qr(a, mode='reduced'):
+ """
+ Compute the qr factorization of a matrix.
+
+ Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
+ upper-triangular.
+
+ Parameters
+ ----------
+ a : array_like, shape (..., M, N)
+ An array-like object with the dimensionality of at least 2.
+ mode : {'reduced', 'complete', 'r', 'raw'}, optional
+ If K = min(M, N), then
+
+ * 'reduced' : returns q, r with dimensions
+ (..., M, K), (..., K, N) (default)
+ * 'complete' : returns q, r with dimensions (..., M, M), (..., M, N)
+ * 'r' : returns r only with dimensions (..., K, N)
+ * 'raw' : returns h, tau with dimensions (..., N, M), (..., K,)
+
+ The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
+ see the notes for more information. The default is 'reduced', and to
+ maintain backward compatibility with earlier versions of numpy both
+ it and the old default 'full' can be omitted. Note that array h
+ returned in 'raw' mode is transposed for calling Fortran. The
+ 'economic' mode is deprecated. The modes 'full' and 'economic' may
+ be passed using only the first letter for backwards compatibility,
+ but all others must be spelled out. See the Notes for more
+ explanation.
+
+
+ Returns
+ -------
+ q : ndarray of float or complex, optional
+ A matrix with orthonormal columns. When mode = 'complete' the
+ result is an orthogonal/unitary matrix depending on whether or not
+ a is real/complex. The determinant may be either +/- 1 in that
+ case. In case the number of dimensions in the input array is
+ greater than 2 then a stack of the matrices with above properties
+ is returned.
+ r : ndarray of float or complex, optional
+ The upper-triangular matrix or a stack of upper-triangular
+ matrices if the number of dimensions in the input array is greater
+ than 2.
+ (h, tau) : ndarrays of np.double or np.cdouble, optional
+ The array h contains the Householder reflectors that generate q
+ along with r. The tau array contains scaling factors for the
+ reflectors. In the deprecated 'economic' mode only h is returned.
+
+ Raises
+ ------
+ LinAlgError
+ If factoring fails.
+
+ See Also
+ --------
+ scipy.linalg.qr : Similar function in SciPy.
+ scipy.linalg.rq : Compute RQ decomposition of a matrix.
+
+ Notes
+ -----
+ This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``,
+ ``dorgqr``, and ``zungqr``.
+
+ For more information on the qr factorization, see for example:
+ https://en.wikipedia.org/wiki/QR_factorization
+
+ Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
+ `a` is of type `matrix`, all the return values will be matrices too.
+
+ New 'reduced', 'complete', and 'raw' options for mode were added in
+ NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
+ addition the options 'full' and 'economic' were deprecated. Because
+ 'full' was the previous default and 'reduced' is the new default,
+ backward compatibility can be maintained by letting `mode` default.
+ The 'raw' option was added so that LAPACK routines that can multiply
+ arrays by q using the Householder reflectors can be used. Note that in
+ this case the returned arrays are of type np.double or np.cdouble and
+ the h array is transposed to be FORTRAN compatible. No routines using
+ the 'raw' return are currently exposed by numpy, but some are available
+ in lapack_lite and just await the necessary work.
+
+ Examples
+ --------
+ >>> a = np.random.randn(9, 6)
+ >>> q, r = np.linalg.qr(a)
+ >>> np.allclose(a, np.dot(q, r)) # a does equal qr
+ True
+ >>> r2 = np.linalg.qr(a, mode='r')
+ >>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
+ True
+ >>> a = np.random.normal(size=(3, 2, 2)) # Stack of 2 x 2 matrices as input
+ >>> q, r = np.linalg.qr(a)
+ >>> q.shape
+ (3, 2, 2)
+ >>> r.shape
+ (3, 2, 2)
+ >>> np.allclose(a, np.matmul(q, r))
+ True
+
+ Example illustrating a common use of `qr`: solving of least squares
+ problems
+
+ What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
+ the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
+ and you'll see that it should be y0 = 0, m = 1.) The answer is provided
+ by solving the over-determined matrix equation ``Ax = b``, where::
+
+ A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
+ x = array([[y0], [m]])
+ b = array([[1], [0], [2], [1]])
+
+ If A = qr such that q is orthonormal (which is always possible via
+ Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
+ however, we simply use `lstsq`.)
+
+ >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
+ >>> A
+ array([[0, 1],
+ [1, 1],
+ [1, 1],
+ [2, 1]])
+ >>> b = np.array([1, 2, 2, 3])
+ >>> q, r = np.linalg.qr(A)
+ >>> p = np.dot(q.T, b)
+ >>> np.dot(np.linalg.inv(r), p)
+ array([ 1., 1.])
+
+ """
+ if mode not in ('reduced', 'complete', 'r', 'raw'):
+ if mode in ('f', 'full'):
+ # 2013-04-01, 1.8
+ msg = "".join((
+ "The 'full' option is deprecated in favor of 'reduced'.\n",
+ "For backward compatibility let mode default."))
+ warnings.warn(msg, DeprecationWarning, stacklevel=3)
+ mode = 'reduced'
+ elif mode in ('e', 'economic'):
+ # 2013-04-01, 1.8
+ msg = "The 'economic' option is deprecated."
+ warnings.warn(msg, DeprecationWarning, stacklevel=3)
+ mode = 'economic'
+ else:
+ raise ValueError(f"Unrecognized mode '{mode}'")
+
+ a, wrap = _makearray(a)
+ _assert_stacked_2d(a)
+ m, n = a.shape[-2:]
+ t, result_t = _commonType(a)
+ a = a.astype(t, copy=True)
+ a = _to_native_byte_order(a)
+ mn = min(m, n)
+
+ if m <= n:
+ gufunc = _umath_linalg.qr_r_raw_m
+ else:
+ gufunc = _umath_linalg.qr_r_raw_n
+
+ signature = 'D->D' if isComplexType(t) else 'd->d'
+ extobj = get_linalg_error_extobj(_raise_linalgerror_qr)
+ tau = gufunc(a, signature=signature, extobj=extobj)
+
+ # handle modes that don't return q
+ if mode == 'r':
+ r = triu(a[..., :mn, :])
+ r = r.astype(result_t, copy=False)
+ return wrap(r)
+
+ if mode == 'raw':
+ q = transpose(a)
+ q = q.astype(result_t, copy=False)
+ tau = tau.astype(result_t, copy=False)
+ return wrap(q), tau
+
+ if mode == 'economic':
+ a = a.astype(result_t, copy=False)
+ return wrap(a)
+
+ # mc is the number of columns in the resulting q
+ # matrix. If the mode is complete then it is
+ # same as number of rows, and if the mode is reduced,
+ # then it is the minimum of number of rows and columns.
+ if mode == 'complete' and m > n:
+ mc = m
+ gufunc = _umath_linalg.qr_complete
+ else:
+ mc = mn
+ gufunc = _umath_linalg.qr_reduced
+
+ signature = 'DD->D' if isComplexType(t) else 'dd->d'
+ extobj = get_linalg_error_extobj(_raise_linalgerror_qr)
+ q = gufunc(a, tau, signature=signature, extobj=extobj)
+ r = triu(a[..., :mc, :])
+
+ q = q.astype(result_t, copy=False)
+ r = r.astype(result_t, copy=False)
+
+ return wrap(q), wrap(r)
+
+# Eigenvalues
+
+
+@array_function_dispatch(_unary_dispatcher)
+def eigvals(a):
+ """
+ Compute the eigenvalues of a general matrix.
+
+ Main difference between `eigvals` and `eig`: the eigenvectors aren't
+ returned.
+
+ Parameters
+ ----------
+ a : (..., M, M) array_like
+ A complex- or real-valued matrix whose eigenvalues will be computed.
+
+ Returns
+ -------
+ w : (..., M,) ndarray
+ The eigenvalues, each repeated according to its multiplicity.
+ They are not necessarily ordered, nor are they necessarily
+ real for real matrices.
+
+ Raises
+ ------
+ LinAlgError
+ If the eigenvalue computation does not converge.
+
+ See Also
+ --------
+ eig : eigenvalues and right eigenvectors of general arrays
+ eigvalsh : eigenvalues of real symmetric or complex Hermitian
+ (conjugate symmetric) arrays.
+ eigh : eigenvalues and eigenvectors of real symmetric or complex
+ Hermitian (conjugate symmetric) arrays.
+ scipy.linalg.eigvals : Similar function in SciPy.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.8.0
+
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ This is implemented using the ``_geev`` LAPACK routines which compute
+ the eigenvalues and eigenvectors of general square arrays.
+
+ Examples
+ --------
+ Illustration, using the fact that the eigenvalues of a diagonal matrix
+ are its diagonal elements, that multiplying a matrix on the left
+ by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
+ of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
+ if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
+ ``A``:
+
+ >>> from numpy import linalg as LA
+ >>> x = np.random.random()
+ >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
+ >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
+ (1.0, 1.0, 0.0)
+
+ Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other:
+
+ >>> D = np.diag((-1,1))
+ >>> LA.eigvals(D)
+ array([-1., 1.])
+ >>> A = np.dot(Q, D)
+ >>> A = np.dot(A, Q.T)
+ >>> LA.eigvals(A)
+ array([ 1., -1.]) # random
+
+ """
+ a, wrap = _makearray(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ _assert_finite(a)
+ t, result_t = _commonType(a)
+
+ extobj = get_linalg_error_extobj(
+ _raise_linalgerror_eigenvalues_nonconvergence)
+ signature = 'D->D' if isComplexType(t) else 'd->D'
+ w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
+
+ if not isComplexType(t):
+ if all(w.imag == 0):
+ w = w.real
+ result_t = _realType(result_t)
+ else:
+ result_t = _complexType(result_t)
+
+ return w.astype(result_t, copy=False)
+
+
+def _eigvalsh_dispatcher(a, UPLO=None):
+ return (a,)
+
+
+@array_function_dispatch(_eigvalsh_dispatcher)
+def eigvalsh(a, UPLO='L'):
+ """
+ Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
+
+ Main difference from eigh: the eigenvectors are not computed.
+
+ Parameters
+ ----------
+ a : (..., M, M) array_like
+ A complex- or real-valued matrix whose eigenvalues are to be
+ computed.
+ UPLO : {'L', 'U'}, optional
+ Specifies whether the calculation is done with the lower triangular
+ part of `a` ('L', default) or the upper triangular part ('U').
+ Irrespective of this value only the real parts of the diagonal will
+ be considered in the computation to preserve the notion of a Hermitian
+ matrix. It therefore follows that the imaginary part of the diagonal
+ will always be treated as zero.
+
+ Returns
+ -------
+ w : (..., M,) ndarray
+ The eigenvalues in ascending order, each repeated according to
+ its multiplicity.
+
+ Raises
+ ------
+ LinAlgError
+ If the eigenvalue computation does not converge.
+
+ See Also
+ --------
+ eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian
+ (conjugate symmetric) arrays.
+ eigvals : eigenvalues of general real or complex arrays.
+ eig : eigenvalues and right eigenvectors of general real or complex
+ arrays.
+ scipy.linalg.eigvalsh : Similar function in SciPy.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.8.0
+
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``.
+
+ Examples
+ --------
+ >>> from numpy import linalg as LA
+ >>> a = np.array([[1, -2j], [2j, 5]])
+ >>> LA.eigvalsh(a)
+ array([ 0.17157288, 5.82842712]) # may vary
+
+ >>> # demonstrate the treatment of the imaginary part of the diagonal
+ >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
+ >>> a
+ array([[5.+2.j, 9.-2.j],
+ [0.+2.j, 2.-1.j]])
+ >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
+ >>> # with:
+ >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
+ >>> b
+ array([[5.+0.j, 0.-2.j],
+ [0.+2.j, 2.+0.j]])
+ >>> wa = LA.eigvalsh(a)
+ >>> wb = LA.eigvals(b)
+ >>> wa; wb
+ array([1., 6.])
+ array([6.+0.j, 1.+0.j])
+
+ """
+ UPLO = UPLO.upper()
+ if UPLO not in ('L', 'U'):
+ raise ValueError("UPLO argument must be 'L' or 'U'")
+
+ extobj = get_linalg_error_extobj(
+ _raise_linalgerror_eigenvalues_nonconvergence)
+ if UPLO == 'L':
+ gufunc = _umath_linalg.eigvalsh_lo
+ else:
+ gufunc = _umath_linalg.eigvalsh_up
+
+ a, wrap = _makearray(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ t, result_t = _commonType(a)
+ signature = 'D->d' if isComplexType(t) else 'd->d'
+ w = gufunc(a, signature=signature, extobj=extobj)
+ return w.astype(_realType(result_t), copy=False)
+
+def _convertarray(a):
+ t, result_t = _commonType(a)
+ a = a.astype(t).T.copy()
+ return a, t, result_t
+
+
+# Eigenvectors
+
+
+@array_function_dispatch(_unary_dispatcher)
+def eig(a):
+ """
+ Compute the eigenvalues and right eigenvectors of a square array.
+
+ Parameters
+ ----------
+ a : (..., M, M) array
+ Matrices for which the eigenvalues and right eigenvectors will
+ be computed
+
+ Returns
+ -------
+ w : (..., M) array
+ The eigenvalues, each repeated according to its multiplicity.
+ The eigenvalues are not necessarily ordered. The resulting
+ array will be of complex type, unless the imaginary part is
+ zero in which case it will be cast to a real type. When `a`
+ is real the resulting eigenvalues will be real (0 imaginary
+ part) or occur in conjugate pairs
+
+ v : (..., M, M) array
+ The normalized (unit "length") eigenvectors, such that the
+ column ``v[:,i]`` is the eigenvector corresponding to the
+ eigenvalue ``w[i]``.
+
+ Raises
+ ------
+ LinAlgError
+ If the eigenvalue computation does not converge.
+
+ See Also
+ --------
+ eigvals : eigenvalues of a non-symmetric array.
+ eigh : eigenvalues and eigenvectors of a real symmetric or complex
+ Hermitian (conjugate symmetric) array.
+ eigvalsh : eigenvalues of a real symmetric or complex Hermitian
+ (conjugate symmetric) array.
+ scipy.linalg.eig : Similar function in SciPy that also solves the
+ generalized eigenvalue problem.
+ scipy.linalg.schur : Best choice for unitary and other non-Hermitian
+ normal matrices.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.8.0
+
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ This is implemented using the ``_geev`` LAPACK routines which compute
+ the eigenvalues and eigenvectors of general square arrays.
+
+ The number `w` is an eigenvalue of `a` if there exists a vector
+ `v` such that ``a @ v = w * v``. Thus, the arrays `a`, `w`, and
+ `v` satisfy the equations ``a @ v[:,i] = w[i] * v[:,i]``
+ for :math:`i \\in \\{0,...,M-1\\}`.
+
+ The array `v` of eigenvectors may not be of maximum rank, that is, some
+ of the columns may be linearly dependent, although round-off error may
+ obscure that fact. If the eigenvalues are all different, then theoretically
+ the eigenvectors are linearly independent and `a` can be diagonalized by
+ a similarity transformation using `v`, i.e, ``inv(v) @ a @ v`` is diagonal.
+
+ For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur`
+ is preferred because the matrix `v` is guaranteed to be unitary, which is
+ not the case when using `eig`. The Schur factorization produces an
+ upper triangular matrix rather than a diagonal matrix, but for normal
+ matrices only the diagonal of the upper triangular matrix is needed, the
+ rest is roundoff error.
+
+ Finally, it is emphasized that `v` consists of the *right* (as in
+ right-hand side) eigenvectors of `a`. A vector `y` satisfying
+ ``y.T @ a = z * y.T`` for some number `z` is called a *left*
+ eigenvector of `a`, and, in general, the left and right eigenvectors
+ of a matrix are not necessarily the (perhaps conjugate) transposes
+ of each other.
+
+ References
+ ----------
+ G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
+ Academic Press, Inc., 1980, Various pp.
+
+ Examples
+ --------
+ >>> from numpy import linalg as LA
+
+ (Almost) trivial example with real e-values and e-vectors.
+
+ >>> w, v = LA.eig(np.diag((1, 2, 3)))
+ >>> w; v
+ array([1., 2., 3.])
+ array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
+
+ Real matrix possessing complex e-values and e-vectors; note that the
+ e-values are complex conjugates of each other.
+
+ >>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
+ >>> w; v
+ array([1.+1.j, 1.-1.j])
+ array([[0.70710678+0.j , 0.70710678-0.j ],
+ [0. -0.70710678j, 0. +0.70710678j]])
+
+ Complex-valued matrix with real e-values (but complex-valued e-vectors);
+ note that ``a.conj().T == a``, i.e., `a` is Hermitian.
+
+ >>> a = np.array([[1, 1j], [-1j, 1]])
+ >>> w, v = LA.eig(a)
+ >>> w; v
+ array([2.+0.j, 0.+0.j])
+ array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary
+ [ 0.70710678+0.j , -0. +0.70710678j]])
+
+ Be careful about round-off error!
+
+ >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
+ >>> # Theor. e-values are 1 +/- 1e-9
+ >>> w, v = LA.eig(a)
+ >>> w; v
+ array([1., 1.])
+ array([[1., 0.],
+ [0., 1.]])
+
+ """
+ a, wrap = _makearray(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ _assert_finite(a)
+ t, result_t = _commonType(a)
+
+ extobj = get_linalg_error_extobj(
+ _raise_linalgerror_eigenvalues_nonconvergence)
+ signature = 'D->DD' if isComplexType(t) else 'd->DD'
+ w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
+
+ if not isComplexType(t) and all(w.imag == 0.0):
+ w = w.real
+ vt = vt.real
+ result_t = _realType(result_t)
+ else:
+ result_t = _complexType(result_t)
+
+ vt = vt.astype(result_t, copy=False)
+ return w.astype(result_t, copy=False), wrap(vt)
+
+
+@array_function_dispatch(_eigvalsh_dispatcher)
+def eigh(a, UPLO='L'):
+ """
+ Return the eigenvalues and eigenvectors of a complex Hermitian
+ (conjugate symmetric) or a real symmetric matrix.
+
+ Returns two objects, a 1-D array containing the eigenvalues of `a`, and
+ a 2-D square array or matrix (depending on the input type) of the
+ corresponding eigenvectors (in columns).
+
+ Parameters
+ ----------
+ a : (..., M, M) array
+ Hermitian or real symmetric matrices whose eigenvalues and
+ eigenvectors are to be computed.
+ UPLO : {'L', 'U'}, optional
+ Specifies whether the calculation is done with the lower triangular
+ part of `a` ('L', default) or the upper triangular part ('U').
+ Irrespective of this value only the real parts of the diagonal will
+ be considered in the computation to preserve the notion of a Hermitian
+ matrix. It therefore follows that the imaginary part of the diagonal
+ will always be treated as zero.
+
+ Returns
+ -------
+ w : (..., M) ndarray
+ The eigenvalues in ascending order, each repeated according to
+ its multiplicity.
+ v : {(..., M, M) ndarray, (..., M, M) matrix}
+ The column ``v[:, i]`` is the normalized eigenvector corresponding
+ to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
+ a matrix object.
+
+ Raises
+ ------
+ LinAlgError
+ If the eigenvalue computation does not converge.
+
+ See Also
+ --------
+ eigvalsh : eigenvalues of real symmetric or complex Hermitian
+ (conjugate symmetric) arrays.
+ eig : eigenvalues and right eigenvectors for non-symmetric arrays.
+ eigvals : eigenvalues of non-symmetric arrays.
+ scipy.linalg.eigh : Similar function in SciPy (but also solves the
+ generalized eigenvalue problem).
+
+ Notes
+ -----
+
+ .. versionadded:: 1.8.0
+
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``,
+ ``_heevd``.
+
+ The eigenvalues of real symmetric or complex Hermitian matrices are
+ always real. [1]_ The array `v` of (column) eigenvectors is unitary
+ and `a`, `w`, and `v` satisfy the equations
+ ``dot(a, v[:, i]) = w[i] * v[:, i]``.
+
+ References
+ ----------
+ .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
+ FL, Academic Press, Inc., 1980, pg. 222.
+
+ Examples
+ --------
+ >>> from numpy import linalg as LA
+ >>> a = np.array([[1, -2j], [2j, 5]])
+ >>> a
+ array([[ 1.+0.j, -0.-2.j],
+ [ 0.+2.j, 5.+0.j]])
+ >>> w, v = LA.eigh(a)
+ >>> w; v
+ array([0.17157288, 5.82842712])
+ array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
+ [ 0. +0.38268343j, 0. -0.92387953j]])
+
+ >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
+ array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j])
+ >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
+ array([0.+0.j, 0.+0.j])
+
+ >>> A = np.matrix(a) # what happens if input is a matrix object
+ >>> A
+ matrix([[ 1.+0.j, -0.-2.j],
+ [ 0.+2.j, 5.+0.j]])
+ >>> w, v = LA.eigh(A)
+ >>> w; v
+ array([0.17157288, 5.82842712])
+ matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
+ [ 0. +0.38268343j, 0. -0.92387953j]])
+
+ >>> # demonstrate the treatment of the imaginary part of the diagonal
+ >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
+ >>> a
+ array([[5.+2.j, 9.-2.j],
+ [0.+2.j, 2.-1.j]])
+ >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
+ >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
+ >>> b
+ array([[5.+0.j, 0.-2.j],
+ [0.+2.j, 2.+0.j]])
+ >>> wa, va = LA.eigh(a)
+ >>> wb, vb = LA.eig(b)
+ >>> wa; wb
+ array([1., 6.])
+ array([6.+0.j, 1.+0.j])
+ >>> va; vb
+ array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary
+ [ 0. +0.89442719j, 0. -0.4472136j ]])
+ array([[ 0.89442719+0.j , -0. +0.4472136j],
+ [-0. +0.4472136j, 0.89442719+0.j ]])
+ """
+ UPLO = UPLO.upper()
+ if UPLO not in ('L', 'U'):
+ raise ValueError("UPLO argument must be 'L' or 'U'")
+
+ a, wrap = _makearray(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ t, result_t = _commonType(a)
+
+ extobj = get_linalg_error_extobj(
+ _raise_linalgerror_eigenvalues_nonconvergence)
+ if UPLO == 'L':
+ gufunc = _umath_linalg.eigh_lo
+ else:
+ gufunc = _umath_linalg.eigh_up
+
+ signature = 'D->dD' if isComplexType(t) else 'd->dd'
+ w, vt = gufunc(a, signature=signature, extobj=extobj)
+ w = w.astype(_realType(result_t), copy=False)
+ vt = vt.astype(result_t, copy=False)
+ return w, wrap(vt)
+
+
+# Singular value decomposition
+
+def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None):
+ return (a,)
+
+
+@array_function_dispatch(_svd_dispatcher)
+def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
+ """
+ Singular Value Decomposition.
+
+ When `a` is a 2D array, and ``full_matrices=False``, then it is
+ factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``, where
+ `u` and the Hermitian transpose of `vh` are 2D arrays with
+ orthonormal columns and `s` is a 1D array of `a`'s singular
+ values. When `a` is higher-dimensional, SVD is applied in
+ stacked mode as explained below.
+
+ Parameters
+ ----------
+ a : (..., M, N) array_like
+ A real or complex array with ``a.ndim >= 2``.
+ full_matrices : bool, optional
+ If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
+ ``(..., N, N)``, respectively. Otherwise, the shapes are
+ ``(..., M, K)`` and ``(..., K, N)``, respectively, where
+ ``K = min(M, N)``.
+ compute_uv : bool, optional
+ Whether or not to compute `u` and `vh` in addition to `s`. True
+ by default.
+ hermitian : bool, optional
+ If True, `a` is assumed to be Hermitian (symmetric if real-valued),
+ enabling a more efficient method for finding singular values.
+ Defaults to False.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ u : { (..., M, M), (..., M, K) } array
+ Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
+ size as those of the input `a`. The size of the last two dimensions
+ depends on the value of `full_matrices`. Only returned when
+ `compute_uv` is True.
+ s : (..., K) array
+ Vector(s) with the singular values, within each vector sorted in
+ descending order. The first ``a.ndim - 2`` dimensions have the same
+ size as those of the input `a`.
+ vh : { (..., N, N), (..., K, N) } array
+ Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
+ size as those of the input `a`. The size of the last two dimensions
+ depends on the value of `full_matrices`. Only returned when
+ `compute_uv` is True.
+
+ Raises
+ ------
+ LinAlgError
+ If SVD computation does not converge.
+
+ See Also
+ --------
+ scipy.linalg.svd : Similar function in SciPy.
+ scipy.linalg.svdvals : Compute singular values of a matrix.
+
+ Notes
+ -----
+
+ .. versionchanged:: 1.8.0
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ The decomposition is performed using LAPACK routine ``_gesdd``.
+
+ SVD is usually described for the factorization of a 2D matrix :math:`A`.
+ The higher-dimensional case will be discussed below. In the 2D case, SVD is
+ written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
+ :math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
+ contains the singular values of `a` and `u` and `vh` are unitary. The rows
+ of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
+ the eigenvectors of :math:`A A^H`. In both cases the corresponding
+ (possibly non-zero) eigenvalues are given by ``s**2``.
+
+ If `a` has more than two dimensions, then broadcasting rules apply, as
+ explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
+ working in "stacked" mode: it iterates over all indices of the first
+ ``a.ndim - 2`` dimensions and for each combination SVD is applied to the
+ last two indices. The matrix `a` can be reconstructed from the
+ decomposition with either ``(u * s[..., None, :]) @ vh`` or
+ ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
+ function ``np.matmul`` for python versions below 3.5.)
+
+ If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
+ all the return values.
+
+ Examples
+ --------
+ >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
+ >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
+
+ Reconstruction based on full SVD, 2D case:
+
+ >>> u, s, vh = np.linalg.svd(a, full_matrices=True)
+ >>> u.shape, s.shape, vh.shape
+ ((9, 9), (6,), (6, 6))
+ >>> np.allclose(a, np.dot(u[:, :6] * s, vh))
+ True
+ >>> smat = np.zeros((9, 6), dtype=complex)
+ >>> smat[:6, :6] = np.diag(s)
+ >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
+ True
+
+ Reconstruction based on reduced SVD, 2D case:
+
+ >>> u, s, vh = np.linalg.svd(a, full_matrices=False)
+ >>> u.shape, s.shape, vh.shape
+ ((9, 6), (6,), (6, 6))
+ >>> np.allclose(a, np.dot(u * s, vh))
+ True
+ >>> smat = np.diag(s)
+ >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
+ True
+
+ Reconstruction based on full SVD, 4D case:
+
+ >>> u, s, vh = np.linalg.svd(b, full_matrices=True)
+ >>> u.shape, s.shape, vh.shape
+ ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
+ >>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
+ True
+ >>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
+ True
+
+ Reconstruction based on reduced SVD, 4D case:
+
+ >>> u, s, vh = np.linalg.svd(b, full_matrices=False)
+ >>> u.shape, s.shape, vh.shape
+ ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
+ >>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
+ True
+ >>> np.allclose(b, np.matmul(u, s[..., None] * vh))
+ True
+
+ """
+ import numpy as _nx
+ a, wrap = _makearray(a)
+
+ if hermitian:
+ # note: lapack svd returns eigenvalues with s ** 2 sorted descending,
+ # but eig returns s sorted ascending, so we re-order the eigenvalues
+ # and related arrays to have the correct order
+ if compute_uv:
+ s, u = eigh(a)
+ sgn = sign(s)
+ s = abs(s)
+ sidx = argsort(s)[..., ::-1]
+ sgn = _nx.take_along_axis(sgn, sidx, axis=-1)
+ s = _nx.take_along_axis(s, sidx, axis=-1)
+ u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1)
+ # singular values are unsigned, move the sign into v
+ vt = transpose(u * sgn[..., None, :]).conjugate()
+ return wrap(u), s, wrap(vt)
+ else:
+ s = eigvalsh(a)
+ s = abs(s)
+ return sort(s)[..., ::-1]
+
+ _assert_stacked_2d(a)
+ t, result_t = _commonType(a)
+
+ extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
+
+ m, n = a.shape[-2:]
+ if compute_uv:
+ if full_matrices:
+ if m < n:
+ gufunc = _umath_linalg.svd_m_f
+ else:
+ gufunc = _umath_linalg.svd_n_f
+ else:
+ if m < n:
+ gufunc = _umath_linalg.svd_m_s
+ else:
+ gufunc = _umath_linalg.svd_n_s
+
+ signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
+ u, s, vh = gufunc(a, signature=signature, extobj=extobj)
+ u = u.astype(result_t, copy=False)
+ s = s.astype(_realType(result_t), copy=False)
+ vh = vh.astype(result_t, copy=False)
+ return wrap(u), s, wrap(vh)
+ else:
+ if m < n:
+ gufunc = _umath_linalg.svd_m
+ else:
+ gufunc = _umath_linalg.svd_n
+
+ signature = 'D->d' if isComplexType(t) else 'd->d'
+ s = gufunc(a, signature=signature, extobj=extobj)
+ s = s.astype(_realType(result_t), copy=False)
+ return s
+
+
+def _cond_dispatcher(x, p=None):
+ return (x,)
+
+
+@array_function_dispatch(_cond_dispatcher)
+def cond(x, p=None):
+ """
+ Compute the condition number of a matrix.
+
+ This function is capable of returning the condition number using
+ one of seven different norms, depending on the value of `p` (see
+ Parameters below).
+
+ Parameters
+ ----------
+ x : (..., M, N) array_like
+ The matrix whose condition number is sought.
+ p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
+ Order of the norm used in the condition number computation:
+
+ ===== ============================
+ p norm for matrices
+ ===== ============================
+ None 2-norm, computed directly using the ``SVD``
+ 'fro' Frobenius norm
+ inf max(sum(abs(x), axis=1))
+ -inf min(sum(abs(x), axis=1))
+ 1 max(sum(abs(x), axis=0))
+ -1 min(sum(abs(x), axis=0))
+ 2 2-norm (largest sing. value)
+ -2 smallest singular value
+ ===== ============================
+
+ inf means the `numpy.inf` object, and the Frobenius norm is
+ the root-of-sum-of-squares norm.
+
+ Returns
+ -------
+ c : {float, inf}
+ The condition number of the matrix. May be infinite.
+
+ See Also
+ --------
+ numpy.linalg.norm
+
+ Notes
+ -----
+ The condition number of `x` is defined as the norm of `x` times the
+ norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
+ (root-of-sum-of-squares) or one of a number of other matrix norms.
+
+ References
+ ----------
+ .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
+ Academic Press, Inc., 1980, pg. 285.
+
+ Examples
+ --------
+ >>> from numpy import linalg as LA
+ >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
+ >>> a
+ array([[ 1, 0, -1],
+ [ 0, 1, 0],
+ [ 1, 0, 1]])
+ >>> LA.cond(a)
+ 1.4142135623730951
+ >>> LA.cond(a, 'fro')
+ 3.1622776601683795
+ >>> LA.cond(a, np.inf)
+ 2.0
+ >>> LA.cond(a, -np.inf)
+ 1.0
+ >>> LA.cond(a, 1)
+ 2.0
+ >>> LA.cond(a, -1)
+ 1.0
+ >>> LA.cond(a, 2)
+ 1.4142135623730951
+ >>> LA.cond(a, -2)
+ 0.70710678118654746 # may vary
+ >>> min(LA.svd(a, compute_uv=False))*min(LA.svd(LA.inv(a), compute_uv=False))
+ 0.70710678118654746 # may vary
+
+ """
+ x = asarray(x) # in case we have a matrix
+ if _is_empty_2d(x):
+ raise LinAlgError("cond is not defined on empty arrays")
+ if p is None or p == 2 or p == -2:
+ s = svd(x, compute_uv=False)
+ with errstate(all='ignore'):
+ if p == -2:
+ r = s[..., -1] / s[..., 0]
+ else:
+ r = s[..., 0] / s[..., -1]
+ else:
+ # Call inv(x) ignoring errors. The result array will
+ # contain nans in the entries where inversion failed.
+ _assert_stacked_2d(x)
+ _assert_stacked_square(x)
+ t, result_t = _commonType(x)
+ signature = 'D->D' if isComplexType(t) else 'd->d'
+ with errstate(all='ignore'):
+ invx = _umath_linalg.inv(x, signature=signature)
+ r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1))
+ r = r.astype(result_t, copy=False)
+
+ # Convert nans to infs unless the original array had nan entries
+ r = asarray(r)
+ nan_mask = isnan(r)
+ if nan_mask.any():
+ nan_mask &= ~isnan(x).any(axis=(-2, -1))
+ if r.ndim > 0:
+ r[nan_mask] = Inf
+ elif nan_mask:
+ r[()] = Inf
+
+ # Convention is to return scalars instead of 0d arrays
+ if r.ndim == 0:
+ r = r[()]
+
+ return r
+
+
+def _matrix_rank_dispatcher(A, tol=None, hermitian=None):
+ return (A,)
+
+
+@array_function_dispatch(_matrix_rank_dispatcher)
+def matrix_rank(A, tol=None, hermitian=False):
+ """
+ Return matrix rank of array using SVD method
+
+ Rank of the array is the number of singular values of the array that are
+ greater than `tol`.
+
+ .. versionchanged:: 1.14
+ Can now operate on stacks of matrices
+
+ Parameters
+ ----------
+ A : {(M,), (..., M, N)} array_like
+ Input vector or stack of matrices.
+ tol : (...) array_like, float, optional
+ Threshold below which SVD values are considered zero. If `tol` is
+ None, and ``S`` is an array with singular values for `M`, and
+ ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
+ set to ``S.max() * max(M, N) * eps``.
+
+ .. versionchanged:: 1.14
+ Broadcasted against the stack of matrices
+ hermitian : bool, optional
+ If True, `A` is assumed to be Hermitian (symmetric if real-valued),
+ enabling a more efficient method for finding singular values.
+ Defaults to False.
+
+ .. versionadded:: 1.14
+
+ Returns
+ -------
+ rank : (...) array_like
+ Rank of A.
+
+ Notes
+ -----
+ The default threshold to detect rank deficiency is a test on the magnitude
+ of the singular values of `A`. By default, we identify singular values less
+ than ``S.max() * max(M, N) * eps`` as indicating rank deficiency (with
+ the symbols defined above). This is the algorithm MATLAB uses [1]. It also
+ appears in *Numerical recipes* in the discussion of SVD solutions for linear
+ least squares [2].
+
+ This default threshold is designed to detect rank deficiency accounting for
+ the numerical errors of the SVD computation. Imagine that there is a column
+ in `A` that is an exact (in floating point) linear combination of other
+ columns in `A`. Computing the SVD on `A` will not produce a singular value
+ exactly equal to 0 in general: any difference of the smallest SVD value from
+ 0 will be caused by numerical imprecision in the calculation of the SVD.
+ Our threshold for small SVD values takes this numerical imprecision into
+ account, and the default threshold will detect such numerical rank
+ deficiency. The threshold may declare a matrix `A` rank deficient even if
+ the linear combination of some columns of `A` is not exactly equal to
+ another column of `A` but only numerically very close to another column of
+ `A`.
+
+ We chose our default threshold because it is in wide use. Other thresholds
+ are possible. For example, elsewhere in the 2007 edition of *Numerical
+ recipes* there is an alternative threshold of ``S.max() *
+ np.finfo(A.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
+ this threshold as being based on "expected roundoff error" (p 71).
+
+ The thresholds above deal with floating point roundoff error in the
+ calculation of the SVD. However, you may have more information about the
+ sources of error in `A` that would make you consider other tolerance values
+ to detect *effective* rank deficiency. The most useful measure of the
+ tolerance depends on the operations you intend to use on your matrix. For
+ example, if your data come from uncertain measurements with uncertainties
+ greater than floating point epsilon, choosing a tolerance near that
+ uncertainty may be preferable. The tolerance may be absolute if the
+ uncertainties are absolute rather than relative.
+
+ References
+ ----------
+ .. [1] MATLAB reference documentation, "Rank"
+ https://www.mathworks.com/help/techdoc/ref/rank.html
+ .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
+ "Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
+ page 795.
+
+ Examples
+ --------
+ >>> from numpy.linalg import matrix_rank
+ >>> matrix_rank(np.eye(4)) # Full rank matrix
+ 4
+ >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
+ >>> matrix_rank(I)
+ 3
+ >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
+ 1
+ >>> matrix_rank(np.zeros((4,)))
+ 0
+ """
+ A = asarray(A)
+ if A.ndim < 2:
+ return int(not all(A==0))
+ S = svd(A, compute_uv=False, hermitian=hermitian)
+ if tol is None:
+ tol = S.max(axis=-1, keepdims=True) * max(A.shape[-2:]) * finfo(S.dtype).eps
+ else:
+ tol = asarray(tol)[..., newaxis]
+ return count_nonzero(S > tol, axis=-1)
+
+
+# Generalized inverse
+
+def _pinv_dispatcher(a, rcond=None, hermitian=None):
+ return (a,)
+
+
+@array_function_dispatch(_pinv_dispatcher)
+def pinv(a, rcond=1e-15, hermitian=False):
+ """
+ Compute the (Moore-Penrose) pseudo-inverse of a matrix.
+
+ Calculate the generalized inverse of a matrix using its
+ singular-value decomposition (SVD) and including all
+ *large* singular values.
+
+ .. versionchanged:: 1.14
+ Can now operate on stacks of matrices
+
+ Parameters
+ ----------
+ a : (..., M, N) array_like
+ Matrix or stack of matrices to be pseudo-inverted.
+ rcond : (...) array_like of float
+ Cutoff for small singular values.
+ Singular values less than or equal to
+ ``rcond * largest_singular_value`` are set to zero.
+ Broadcasts against the stack of matrices.
+ hermitian : bool, optional
+ If True, `a` is assumed to be Hermitian (symmetric if real-valued),
+ enabling a more efficient method for finding singular values.
+ Defaults to False.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ B : (..., N, M) ndarray
+ The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
+ is `B`.
+
+ Raises
+ ------
+ LinAlgError
+ If the SVD computation does not converge.
+
+ See Also
+ --------
+ scipy.linalg.pinv : Similar function in SciPy.
+ scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a
+ Hermitian matrix.
+
+ Notes
+ -----
+ The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
+ defined as: "the matrix that 'solves' [the least-squares problem]
+ :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
+ :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
+
+ It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
+ value decomposition of A, then
+ :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
+ orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
+ of A's so-called singular values, (followed, typically, by
+ zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
+ consisting of the reciprocals of A's singular values
+ (again, followed by zeros). [1]_
+
+ References
+ ----------
+ .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
+ FL, Academic Press, Inc., 1980, pp. 139-142.
+
+ Examples
+ --------
+ The following example checks that ``a * a+ * a == a`` and
+ ``a+ * a * a+ == a+``:
+
+ >>> a = np.random.randn(9, 6)
+ >>> B = np.linalg.pinv(a)
+ >>> np.allclose(a, np.dot(a, np.dot(B, a)))
+ True
+ >>> np.allclose(B, np.dot(B, np.dot(a, B)))
+ True
+
+ """
+ a, wrap = _makearray(a)
+ rcond = asarray(rcond)
+ if _is_empty_2d(a):
+ m, n = a.shape[-2:]
+ res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
+ return wrap(res)
+ a = a.conjugate()
+ u, s, vt = svd(a, full_matrices=False, hermitian=hermitian)
+
+ # discard small singular values
+ cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
+ large = s > cutoff
+ s = divide(1, s, where=large, out=s)
+ s[~large] = 0
+
+ res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
+ return wrap(res)
+
+
+# Determinant
+
+
+@array_function_dispatch(_unary_dispatcher)
+def slogdet(a):
+ """
+ Compute the sign and (natural) logarithm of the determinant of an array.
+
+ If an array has a very small or very large determinant, then a call to
+ `det` may overflow or underflow. This routine is more robust against such
+ issues, because it computes the logarithm of the determinant rather than
+ the determinant itself.
+
+ Parameters
+ ----------
+ a : (..., M, M) array_like
+ Input array, has to be a square 2-D array.
+
+ Returns
+ -------
+ sign : (...) array_like
+ A number representing the sign of the determinant. For a real matrix,
+ this is 1, 0, or -1. For a complex matrix, this is a complex number
+ with absolute value 1 (i.e., it is on the unit circle), or else 0.
+ logdet : (...) array_like
+ The natural log of the absolute value of the determinant.
+
+ If the determinant is zero, then `sign` will be 0 and `logdet` will be
+ -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
+
+ See Also
+ --------
+ det
+
+ Notes
+ -----
+
+ .. versionadded:: 1.8.0
+
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ .. versionadded:: 1.6.0
+
+ The determinant is computed via LU factorization using the LAPACK
+ routine ``z/dgetrf``.
+
+
+ Examples
+ --------
+ The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
+
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> (sign, logdet) = np.linalg.slogdet(a)
+ >>> (sign, logdet)
+ (-1, 0.69314718055994529) # may vary
+ >>> sign * np.exp(logdet)
+ -2.0
+
+ Computing log-determinants for a stack of matrices:
+
+ >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
+ >>> a.shape
+ (3, 2, 2)
+ >>> sign, logdet = np.linalg.slogdet(a)
+ >>> (sign, logdet)
+ (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
+ >>> sign * np.exp(logdet)
+ array([-2., -3., -8.])
+
+ This routine succeeds where ordinary `det` does not:
+
+ >>> np.linalg.det(np.eye(500) * 0.1)
+ 0.0
+ >>> np.linalg.slogdet(np.eye(500) * 0.1)
+ (1, -1151.2925464970228)
+
+ """
+ a = asarray(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ t, result_t = _commonType(a)
+ real_t = _realType(result_t)
+ signature = 'D->Dd' if isComplexType(t) else 'd->dd'
+ sign, logdet = _umath_linalg.slogdet(a, signature=signature)
+ sign = sign.astype(result_t, copy=False)
+ logdet = logdet.astype(real_t, copy=False)
+ return sign, logdet
+
+
+@array_function_dispatch(_unary_dispatcher)
+def det(a):
+ """
+ Compute the determinant of an array.
+
+ Parameters
+ ----------
+ a : (..., M, M) array_like
+ Input array to compute determinants for.
+
+ Returns
+ -------
+ det : (...) array_like
+ Determinant of `a`.
+
+ See Also
+ --------
+ slogdet : Another way to represent the determinant, more suitable
+ for large matrices where underflow/overflow may occur.
+ scipy.linalg.det : Similar function in SciPy.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.8.0
+
+ Broadcasting rules apply, see the `numpy.linalg` documentation for
+ details.
+
+ The determinant is computed via LU factorization using the LAPACK
+ routine ``z/dgetrf``.
+
+ Examples
+ --------
+ The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
+
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> np.linalg.det(a)
+ -2.0 # may vary
+
+ Computing determinants for a stack of matrices:
+
+ >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
+ >>> a.shape
+ (3, 2, 2)
+ >>> np.linalg.det(a)
+ array([-2., -3., -8.])
+
+ """
+ a = asarray(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ t, result_t = _commonType(a)
+ signature = 'D->D' if isComplexType(t) else 'd->d'
+ r = _umath_linalg.det(a, signature=signature)
+ r = r.astype(result_t, copy=False)
+ return r
+
+
+# Linear Least Squares
+
+def _lstsq_dispatcher(a, b, rcond=None):
+ return (a, b)
+
+
+@array_function_dispatch(_lstsq_dispatcher)
+def lstsq(a, b, rcond="warn"):
+ r"""
+ Return the least-squares solution to a linear matrix equation.
+
+ Computes the vector `x` that approximately solves the equation
+ ``a @ x = b``. The equation may be under-, well-, or over-determined
+ (i.e., the number of linearly independent rows of `a` can be less than,
+ equal to, or greater than its number of linearly independent columns).
+ If `a` is square and of full rank, then `x` (but for round-off error)
+ is the "exact" solution of the equation. Else, `x` minimizes the
+ Euclidean 2-norm :math:`||b - ax||`. If there are multiple minimizing
+ solutions, the one with the smallest 2-norm :math:`||x||` is returned.
+
+ Parameters
+ ----------
+ a : (M, N) array_like
+ "Coefficient" matrix.
+ b : {(M,), (M, K)} array_like
+ Ordinate or "dependent variable" values. If `b` is two-dimensional,
+ the least-squares solution is calculated for each of the `K` columns
+ of `b`.
+ rcond : float, optional
+ Cut-off ratio for small singular values of `a`.
+ For the purposes of rank determination, singular values are treated
+ as zero if they are smaller than `rcond` times the largest singular
+ value of `a`.
+
+ .. versionchanged:: 1.14.0
+ If not set, a FutureWarning is given. The previous default
+ of ``-1`` will use the machine precision as `rcond` parameter,
+ the new default will use the machine precision times `max(M, N)`.
+ To silence the warning and use the new default, use ``rcond=None``,
+ to keep using the old behavior, use ``rcond=-1``.
+
+ Returns
+ -------
+ x : {(N,), (N, K)} ndarray
+ Least-squares solution. If `b` is two-dimensional,
+ the solutions are in the `K` columns of `x`.
+ residuals : {(1,), (K,), (0,)} ndarray
+ Sums of squared residuals: Squared Euclidean 2-norm for each column in
+ ``b - a @ x``.
+ If the rank of `a` is < N or M <= N, this is an empty array.
+ If `b` is 1-dimensional, this is a (1,) shape array.
+ Otherwise the shape is (K,).
+ rank : int
+ Rank of matrix `a`.
+ s : (min(M, N),) ndarray
+ Singular values of `a`.
+
+ Raises
+ ------
+ LinAlgError
+ If computation does not converge.
+
+ See Also
+ --------
+ scipy.linalg.lstsq : Similar function in SciPy.
+
+ Notes
+ -----
+ If `b` is a matrix, then all array results are returned as matrices.
+
+ Examples
+ --------
+ Fit a line, ``y = mx + c``, through some noisy data-points:
+
+ >>> x = np.array([0, 1, 2, 3])
+ >>> y = np.array([-1, 0.2, 0.9, 2.1])
+
+ By examining the coefficients, we see that the line should have a
+ gradient of roughly 1 and cut the y-axis at, more or less, -1.
+
+ We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
+ and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
+
+ >>> A = np.vstack([x, np.ones(len(x))]).T
+ >>> A
+ array([[ 0., 1.],
+ [ 1., 1.],
+ [ 2., 1.],
+ [ 3., 1.]])
+
+ >>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
+ >>> m, c
+ (1.0 -0.95) # may vary
+
+ Plot the data along with the fitted line:
+
+ >>> import matplotlib.pyplot as plt
+ >>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10)
+ >>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line')
+ >>> _ = plt.legend()
+ >>> plt.show()
+
+ """
+ a, _ = _makearray(a)
+ b, wrap = _makearray(b)
+ is_1d = b.ndim == 1
+ if is_1d:
+ b = b[:, newaxis]
+ _assert_2d(a, b)
+ m, n = a.shape[-2:]
+ m2, n_rhs = b.shape[-2:]
+ if m != m2:
+ raise LinAlgError('Incompatible dimensions')
+
+ t, result_t = _commonType(a, b)
+ result_real_t = _realType(result_t)
+
+ # Determine default rcond value
+ if rcond == "warn":
+ # 2017-08-19, 1.14.0
+ warnings.warn("`rcond` parameter will change to the default of "
+ "machine precision times ``max(M, N)`` where M and N "
+ "are the input matrix dimensions.\n"
+ "To use the future default and silence this warning "
+ "we advise to pass `rcond=None`, to keep using the old, "
+ "explicitly pass `rcond=-1`.",
+ FutureWarning, stacklevel=3)
+ rcond = -1
+ if rcond is None:
+ rcond = finfo(t).eps * max(n, m)
+
+ if m <= n:
+ gufunc = _umath_linalg.lstsq_m
+ else:
+ gufunc = _umath_linalg.lstsq_n
+
+ signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
+ extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)
+ if n_rhs == 0:
+ # lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis
+ b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype)
+ x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
+ if m == 0:
+ x[...] = 0
+ if n_rhs == 0:
+ # remove the item we added
+ x = x[..., :n_rhs]
+ resids = resids[..., :n_rhs]
+
+ # remove the axis we added
+ if is_1d:
+ x = x.squeeze(axis=-1)
+ # we probably should squeeze resids too, but we can't
+ # without breaking compatibility.
+
+ # as documented
+ if rank != n or m <= n:
+ resids = array([], result_real_t)
+
+ # coerce output arrays
+ s = s.astype(result_real_t, copy=False)
+ resids = resids.astype(result_real_t, copy=False)
+ x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed
+ return wrap(x), wrap(resids), rank, s
+
+
+def _multi_svd_norm(x, row_axis, col_axis, op):
+ """Compute a function of the singular values of the 2-D matrices in `x`.
+
+ This is a private utility function used by `numpy.linalg.norm()`.
+
+ Parameters
+ ----------
+ x : ndarray
+ row_axis, col_axis : int
+ The axes of `x` that hold the 2-D matrices.
+ op : callable
+ This should be either numpy.amin or `numpy.amax` or `numpy.sum`.
+
+ Returns
+ -------
+ result : float or ndarray
+ If `x` is 2-D, the return values is a float.
+ Otherwise, it is an array with ``x.ndim - 2`` dimensions.
+ The return values are either the minimum or maximum or sum of the
+ singular values of the matrices, depending on whether `op`
+ is `numpy.amin` or `numpy.amax` or `numpy.sum`.
+
+ """
+ y = moveaxis(x, (row_axis, col_axis), (-2, -1))
+ result = op(svd(y, compute_uv=False), axis=-1)
+ return result
+
+
+def _norm_dispatcher(x, ord=None, axis=None, keepdims=None):
+ return (x,)
+
+
+@array_function_dispatch(_norm_dispatcher)
+def norm(x, ord=None, axis=None, keepdims=False):
+ """
+ Matrix or vector norm.
+
+ This function is able to return one of eight different matrix norms,
+ or one of an infinite number of vector norms (described below), depending
+ on the value of the ``ord`` parameter.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`
+ is None. If both `axis` and `ord` are None, the 2-norm of
+ ``x.ravel`` will be returned.
+ ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
+ Order of the norm (see table under ``Notes``). inf means numpy's
+ `inf` object. The default is None.
+ axis : {None, int, 2-tuple of ints}, optional.
+ If `axis` is an integer, it specifies the axis of `x` along which to
+ compute the vector norms. If `axis` is a 2-tuple, it specifies the
+ axes that hold 2-D matrices, and the matrix norms of these matrices
+ are computed. If `axis` is None then either a vector norm (when `x`
+ is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default
+ is None.
+
+ .. versionadded:: 1.8.0
+
+ keepdims : bool, optional
+ If this is set to True, the axes which are normed over are left in the
+ result as dimensions with size one. With this option the result will
+ broadcast correctly against the original `x`.
+
+ .. versionadded:: 1.10.0
+
+ Returns
+ -------
+ n : float or ndarray
+ Norm of the matrix or vector(s).
+
+ See Also
+ --------
+ scipy.linalg.norm : Similar function in SciPy.
+
+ Notes
+ -----
+ For values of ``ord < 1``, the result is, strictly speaking, not a
+ mathematical 'norm', but it may still be useful for various numerical
+ purposes.
+
+ The following norms can be calculated:
+
+ ===== ============================ ==========================
+ ord norm for matrices norm for vectors
+ ===== ============================ ==========================
+ None Frobenius norm 2-norm
+ 'fro' Frobenius norm --
+ 'nuc' nuclear norm --
+ inf max(sum(abs(x), axis=1)) max(abs(x))
+ -inf min(sum(abs(x), axis=1)) min(abs(x))
+ 0 -- sum(x != 0)
+ 1 max(sum(abs(x), axis=0)) as below
+ -1 min(sum(abs(x), axis=0)) as below
+ 2 2-norm (largest sing. value) as below
+ -2 smallest singular value as below
+ other -- sum(abs(x)**ord)**(1./ord)
+ ===== ============================ ==========================
+
+ The Frobenius norm is given by [1]_:
+
+ :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
+
+ The nuclear norm is the sum of the singular values.
+
+ Both the Frobenius and nuclear norm orders are only defined for
+ matrices and raise a ValueError when ``x.ndim != 2``.
+
+ References
+ ----------
+ .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
+ Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
+
+ Examples
+ --------
+ >>> from numpy import linalg as LA
+ >>> a = np.arange(9) - 4
+ >>> a
+ array([-4, -3, -2, ..., 2, 3, 4])
+ >>> b = a.reshape((3, 3))
+ >>> b
+ array([[-4, -3, -2],
+ [-1, 0, 1],
+ [ 2, 3, 4]])
+
+ >>> LA.norm(a)
+ 7.745966692414834
+ >>> LA.norm(b)
+ 7.745966692414834
+ >>> LA.norm(b, 'fro')
+ 7.745966692414834
+ >>> LA.norm(a, np.inf)
+ 4.0
+ >>> LA.norm(b, np.inf)
+ 9.0
+ >>> LA.norm(a, -np.inf)
+ 0.0
+ >>> LA.norm(b, -np.inf)
+ 2.0
+
+ >>> LA.norm(a, 1)
+ 20.0
+ >>> LA.norm(b, 1)
+ 7.0
+ >>> LA.norm(a, -1)
+ -4.6566128774142013e-010
+ >>> LA.norm(b, -1)
+ 6.0
+ >>> LA.norm(a, 2)
+ 7.745966692414834
+ >>> LA.norm(b, 2)
+ 7.3484692283495345
+
+ >>> LA.norm(a, -2)
+ 0.0
+ >>> LA.norm(b, -2)
+ 1.8570331885190563e-016 # may vary
+ >>> LA.norm(a, 3)
+ 5.8480354764257312 # may vary
+ >>> LA.norm(a, -3)
+ 0.0
+
+ Using the `axis` argument to compute vector norms:
+
+ >>> c = np.array([[ 1, 2, 3],
+ ... [-1, 1, 4]])
+ >>> LA.norm(c, axis=0)
+ array([ 1.41421356, 2.23606798, 5. ])
+ >>> LA.norm(c, axis=1)
+ array([ 3.74165739, 4.24264069])
+ >>> LA.norm(c, ord=1, axis=1)
+ array([ 6., 6.])
+
+ Using the `axis` argument to compute matrix norms:
+
+ >>> m = np.arange(8).reshape(2,2,2)
+ >>> LA.norm(m, axis=(1,2))
+ array([ 3.74165739, 11.22497216])
+ >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
+ (3.7416573867739413, 11.224972160321824)
+
+ """
+ x = asarray(x)
+
+ if not issubclass(x.dtype.type, (inexact, object_)):
+ x = x.astype(float)
+
+ # Immediately handle some default, simple, fast, and common cases.
+ if axis is None:
+ ndim = x.ndim
+ if ((ord is None) or
+ (ord in ('f', 'fro') and ndim == 2) or
+ (ord == 2 and ndim == 1)):
+
+ x = x.ravel(order='K')
+ if isComplexType(x.dtype.type):
+ x_real = x.real
+ x_imag = x.imag
+ sqnorm = x_real.dot(x_real) + x_imag.dot(x_imag)
+ else:
+ sqnorm = x.dot(x)
+ ret = sqrt(sqnorm)
+ if keepdims:
+ ret = ret.reshape(ndim*[1])
+ return ret
+
+ # Normalize the `axis` argument to a tuple.
+ nd = x.ndim
+ if axis is None:
+ axis = tuple(range(nd))
+ elif not isinstance(axis, tuple):
+ try:
+ axis = int(axis)
+ except Exception as e:
+ raise TypeError("'axis' must be None, an integer or a tuple of integers") from e
+ axis = (axis,)
+
+ if len(axis) == 1:
+ if ord == Inf:
+ return abs(x).max(axis=axis, keepdims=keepdims)
+ elif ord == -Inf:
+ return abs(x).min(axis=axis, keepdims=keepdims)
+ elif ord == 0:
+ # Zero norm
+ return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
+ elif ord == 1:
+ # special case for speedup
+ return add.reduce(abs(x), axis=axis, keepdims=keepdims)
+ elif ord is None or ord == 2:
+ # special case for speedup
+ s = (x.conj() * x).real
+ return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
+ # None of the str-type keywords for ord ('fro', 'nuc')
+ # are valid for vectors
+ elif isinstance(ord, str):
+ raise ValueError(f"Invalid norm order '{ord}' for vectors")
+ else:
+ absx = abs(x)
+ absx **= ord
+ ret = add.reduce(absx, axis=axis, keepdims=keepdims)
+ ret **= reciprocal(ord, dtype=ret.dtype)
+ return ret
+ elif len(axis) == 2:
+ row_axis, col_axis = axis
+ row_axis = normalize_axis_index(row_axis, nd)
+ col_axis = normalize_axis_index(col_axis, nd)
+ if row_axis == col_axis:
+ raise ValueError('Duplicate axes given.')
+ if ord == 2:
+ ret = _multi_svd_norm(x, row_axis, col_axis, amax)
+ elif ord == -2:
+ ret = _multi_svd_norm(x, row_axis, col_axis, amin)
+ elif ord == 1:
+ if col_axis > row_axis:
+ col_axis -= 1
+ ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
+ elif ord == Inf:
+ if row_axis > col_axis:
+ row_axis -= 1
+ ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
+ elif ord == -1:
+ if col_axis > row_axis:
+ col_axis -= 1
+ ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
+ elif ord == -Inf:
+ if row_axis > col_axis:
+ row_axis -= 1
+ ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
+ elif ord in [None, 'fro', 'f']:
+ ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
+ elif ord == 'nuc':
+ ret = _multi_svd_norm(x, row_axis, col_axis, sum)
+ else:
+ raise ValueError("Invalid norm order for matrices.")
+ if keepdims:
+ ret_shape = list(x.shape)
+ ret_shape[axis[0]] = 1
+ ret_shape[axis[1]] = 1
+ ret = ret.reshape(ret_shape)
+ return ret
+ else:
+ raise ValueError("Improper number of dimensions to norm.")
+
+
+# multi_dot
+
+def _multidot_dispatcher(arrays, *, out=None):
+ yield from arrays
+ yield out
+
+
+@array_function_dispatch(_multidot_dispatcher)
+def multi_dot(arrays, *, out=None):
+ """
+ Compute the dot product of two or more arrays in a single function call,
+ while automatically selecting the fastest evaluation order.
+
+ `multi_dot` chains `numpy.dot` and uses optimal parenthesization
+ of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
+ this can speed up the multiplication a lot.
+
+ If the first argument is 1-D it is treated as a row vector.
+ If the last argument is 1-D it is treated as a column vector.
+ The other arguments must be 2-D.
+
+ Think of `multi_dot` as::
+
+ def multi_dot(arrays): return functools.reduce(np.dot, arrays)
+
+
+ Parameters
+ ----------
+ arrays : sequence of array_like
+ If the first argument is 1-D it is treated as row vector.
+ If the last argument is 1-D it is treated as column vector.
+ The other arguments must be 2-D.
+ out : ndarray, optional
+ Output argument. This must have the exact kind that would be returned
+ if it was not used. In particular, it must have the right type, must be
+ C-contiguous, and its dtype must be the dtype that would be returned
+ for `dot(a, b)`. This is a performance feature. Therefore, if these
+ conditions are not met, an exception is raised, instead of attempting
+ to be flexible.
+
+ .. versionadded:: 1.19.0
+
+ Returns
+ -------
+ output : ndarray
+ Returns the dot product of the supplied arrays.
+
+ See Also
+ --------
+ numpy.dot : dot multiplication with two arguments.
+
+ References
+ ----------
+
+ .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
+ .. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication
+
+ Examples
+ --------
+ `multi_dot` allows you to write::
+
+ >>> from numpy.linalg import multi_dot
+ >>> # Prepare some data
+ >>> A = np.random.random((10000, 100))
+ >>> B = np.random.random((100, 1000))
+ >>> C = np.random.random((1000, 5))
+ >>> D = np.random.random((5, 333))
+ >>> # the actual dot multiplication
+ >>> _ = multi_dot([A, B, C, D])
+
+ instead of::
+
+ >>> _ = np.dot(np.dot(np.dot(A, B), C), D)
+ >>> # or
+ >>> _ = A.dot(B).dot(C).dot(D)
+
+ Notes
+ -----
+ The cost for a matrix multiplication can be calculated with the
+ following function::
+
+ def cost(A, B):
+ return A.shape[0] * A.shape[1] * B.shape[1]
+
+ Assume we have three matrices
+ :math:`A_{10x100}, B_{100x5}, C_{5x50}`.
+
+ The costs for the two different parenthesizations are as follows::
+
+ cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
+ cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
+
+ """
+ n = len(arrays)
+ # optimization only makes sense for len(arrays) > 2
+ if n < 2:
+ raise ValueError("Expecting at least two arrays.")
+ elif n == 2:
+ return dot(arrays[0], arrays[1], out=out)
+
+ arrays = [asanyarray(a) for a in arrays]
+
+ # save original ndim to reshape the result array into the proper form later
+ ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
+ # Explicitly convert vectors to 2D arrays to keep the logic of the internal
+ # _multi_dot_* functions as simple as possible.
+ if arrays[0].ndim == 1:
+ arrays[0] = atleast_2d(arrays[0])
+ if arrays[-1].ndim == 1:
+ arrays[-1] = atleast_2d(arrays[-1]).T
+ _assert_2d(*arrays)
+
+ # _multi_dot_three is much faster than _multi_dot_matrix_chain_order
+ if n == 3:
+ result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out)
+ else:
+ order = _multi_dot_matrix_chain_order(arrays)
+ result = _multi_dot(arrays, order, 0, n - 1, out=out)
+
+ # return proper shape
+ if ndim_first == 1 and ndim_last == 1:
+ return result[0, 0] # scalar
+ elif ndim_first == 1 or ndim_last == 1:
+ return result.ravel() # 1-D
+ else:
+ return result
+
+
+def _multi_dot_three(A, B, C, out=None):
+ """
+ Find the best order for three arrays and do the multiplication.
+
+ For three arguments `_multi_dot_three` is approximately 15 times faster
+ than `_multi_dot_matrix_chain_order`
+
+ """
+ a0, a1b0 = A.shape
+ b1c0, c1 = C.shape
+ # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
+ cost1 = a0 * b1c0 * (a1b0 + c1)
+ # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
+ cost2 = a1b0 * c1 * (a0 + b1c0)
+
+ if cost1 < cost2:
+ return dot(dot(A, B), C, out=out)
+ else:
+ return dot(A, dot(B, C), out=out)
+
+
+def _multi_dot_matrix_chain_order(arrays, return_costs=False):
+ """
+ Return a np.array that encodes the optimal order of mutiplications.
+
+ The optimal order array is then used by `_multi_dot()` to do the
+ multiplication.
+
+ Also return the cost matrix if `return_costs` is `True`
+
+ The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
+ Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
+
+ cost[i, j] = min([
+ cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
+ for k in range(i, j)])
+
+ """
+ n = len(arrays)
+ # p stores the dimensions of the matrices
+ # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
+ p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
+ # m is a matrix of costs of the subproblems
+ # m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
+ m = zeros((n, n), dtype=double)
+ # s is the actual ordering
+ # s[i, j] is the value of k at which we split the product A_i..A_j
+ s = empty((n, n), dtype=intp)
+
+ for l in range(1, n):
+ for i in range(n - l):
+ j = i + l
+ m[i, j] = Inf
+ for k in range(i, j):
+ q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
+ if q < m[i, j]:
+ m[i, j] = q
+ s[i, j] = k # Note that Cormen uses 1-based index
+
+ return (s, m) if return_costs else s
+
+
+def _multi_dot(arrays, order, i, j, out=None):
+ """Actually do the multiplication with the given order."""
+ if i == j:
+ # the initial call with non-None out should never get here
+ assert out is None
+
+ return arrays[i]
+ else:
+ return dot(_multi_dot(arrays, order, i, order[i, j]),
+ _multi_dot(arrays, order, order[i, j] + 1, j),
+ out=out)
diff --git a/venv/lib/python3.9/site-packages/numpy/linalg/linalg.pyi b/venv/lib/python3.9/site-packages/numpy/linalg/linalg.pyi
new file mode 100644
index 00000000..20cdb708
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/linalg/linalg.pyi
@@ -0,0 +1,282 @@
+from collections.abc import Iterable
+from typing import (
+ Literal as L,
+ overload,
+ TypeVar,
+ Any,
+ SupportsIndex,
+ SupportsInt,
+)
+
+from numpy import (
+ generic,
+ floating,
+ complexfloating,
+ int32,
+ float64,
+ complex128,
+)
+
+from numpy.linalg import LinAlgError as LinAlgError
+
+from numpy._typing import (
+ NDArray,
+ ArrayLike,
+ _ArrayLikeInt_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeComplex_co,
+ _ArrayLikeTD64_co,
+ _ArrayLikeObject_co,
+)
+
+_T = TypeVar("_T")
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+
+_2Tuple = tuple[_T, _T]
+_ModeKind = L["reduced", "complete", "r", "raw"]
+
+__all__: list[str]
+
+@overload
+def tensorsolve(
+ a: _ArrayLikeInt_co,
+ b: _ArrayLikeInt_co,
+ axes: None | Iterable[int] =...,
+) -> NDArray[float64]: ...
+@overload
+def tensorsolve(
+ a: _ArrayLikeFloat_co,
+ b: _ArrayLikeFloat_co,
+ axes: None | Iterable[int] =...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def tensorsolve(
+ a: _ArrayLikeComplex_co,
+ b: _ArrayLikeComplex_co,
+ axes: None | Iterable[int] =...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def solve(
+ a: _ArrayLikeInt_co,
+ b: _ArrayLikeInt_co,
+) -> NDArray[float64]: ...
+@overload
+def solve(
+ a: _ArrayLikeFloat_co,
+ b: _ArrayLikeFloat_co,
+) -> NDArray[floating[Any]]: ...
+@overload
+def solve(
+ a: _ArrayLikeComplex_co,
+ b: _ArrayLikeComplex_co,
+) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def tensorinv(
+ a: _ArrayLikeInt_co,
+ ind: int = ...,
+) -> NDArray[float64]: ...
+@overload
+def tensorinv(
+ a: _ArrayLikeFloat_co,
+ ind: int = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def tensorinv(
+ a: _ArrayLikeComplex_co,
+ ind: int = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ...
+@overload
+def inv(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
+@overload
+def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+# TODO: The supported input and output dtypes are dependent on the value of `n`.
+# For example: `n < 0` always casts integer types to float64
+def matrix_power(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ n: SupportsIndex,
+) -> NDArray[Any]: ...
+
+@overload
+def cholesky(a: _ArrayLikeInt_co) -> NDArray[float64]: ...
+@overload
+def cholesky(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
+@overload
+def cholesky(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> _2Tuple[NDArray[float64]]: ...
+@overload
+def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> _2Tuple[NDArray[floating[Any]]]: ...
+@overload
+def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> _2Tuple[NDArray[complexfloating[Any, Any]]]: ...
+
+@overload
+def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ...
+@overload
+def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]] | NDArray[complexfloating[Any, Any]]: ...
+@overload
+def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+@overload
+def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]: ...
+@overload
+def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating[Any]]: ...
+
+@overload
+def eig(a: _ArrayLikeInt_co) -> _2Tuple[NDArray[float64]] | _2Tuple[NDArray[complex128]]: ...
+@overload
+def eig(a: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]] | _2Tuple[NDArray[complexfloating[Any, Any]]]: ...
+@overload
+def eig(a: _ArrayLikeComplex_co) -> _2Tuple[NDArray[complexfloating[Any, Any]]]: ...
+
+@overload
+def eigh(
+ a: _ArrayLikeInt_co,
+ UPLO: L["L", "U", "l", "u"] = ...,
+) -> tuple[NDArray[float64], NDArray[float64]]: ...
+@overload
+def eigh(
+ a: _ArrayLikeFloat_co,
+ UPLO: L["L", "U", "l", "u"] = ...,
+) -> tuple[NDArray[floating[Any]], NDArray[floating[Any]]]: ...
+@overload
+def eigh(
+ a: _ArrayLikeComplex_co,
+ UPLO: L["L", "U", "l", "u"] = ...,
+) -> tuple[NDArray[floating[Any]], NDArray[complexfloating[Any, Any]]]: ...
+
+@overload
+def svd(
+ a: _ArrayLikeInt_co,
+ full_matrices: bool = ...,
+ compute_uv: L[True] = ...,
+ hermitian: bool = ...,
+) -> tuple[
+ NDArray[float64],
+ NDArray[float64],
+ NDArray[float64],
+]: ...
+@overload
+def svd(
+ a: _ArrayLikeFloat_co,
+ full_matrices: bool = ...,
+ compute_uv: L[True] = ...,
+ hermitian: bool = ...,
+) -> tuple[
+ NDArray[floating[Any]],
+ NDArray[floating[Any]],
+ NDArray[floating[Any]],
+]: ...
+@overload
+def svd(
+ a: _ArrayLikeComplex_co,
+ full_matrices: bool = ...,
+ compute_uv: L[True] = ...,
+ hermitian: bool = ...,
+) -> tuple[
+ NDArray[complexfloating[Any, Any]],
+ NDArray[floating[Any]],
+ NDArray[complexfloating[Any, Any]],
+]: ...
+@overload
+def svd(
+ a: _ArrayLikeInt_co,
+ full_matrices: bool = ...,
+ compute_uv: L[False] = ...,
+ hermitian: bool = ...,
+) -> NDArray[float64]: ...
+@overload
+def svd(
+ a: _ArrayLikeComplex_co,
+ full_matrices: bool = ...,
+ compute_uv: L[False] = ...,
+ hermitian: bool = ...,
+) -> NDArray[floating[Any]]: ...
+
+# TODO: Returns a scalar for 2D arrays and
+# a `(x.ndim - 2)`` dimensionl array otherwise
+def cond(x: _ArrayLikeComplex_co, p: None | float | L["fro", "nuc"] = ...) -> Any: ...
+
+# TODO: Returns `int` for <2D arrays and `intp` otherwise
+def matrix_rank(
+ A: _ArrayLikeComplex_co,
+ tol: None | _ArrayLikeFloat_co = ...,
+ hermitian: bool = ...,
+) -> Any: ...
+
+@overload
+def pinv(
+ a: _ArrayLikeInt_co,
+ rcond: _ArrayLikeFloat_co = ...,
+ hermitian: bool = ...,
+) -> NDArray[float64]: ...
+@overload
+def pinv(
+ a: _ArrayLikeFloat_co,
+ rcond: _ArrayLikeFloat_co = ...,
+ hermitian: bool = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def pinv(
+ a: _ArrayLikeComplex_co,
+ rcond: _ArrayLikeFloat_co = ...,
+ hermitian: bool = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+
+# TODO: Returns a 2-tuple of scalars for 2D arrays and
+# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise
+def slogdet(a: _ArrayLikeComplex_co) -> _2Tuple[Any]: ...
+
+# TODO: Returns a 2-tuple of scalars for 2D arrays and
+# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise
+def det(a: _ArrayLikeComplex_co) -> Any: ...
+
+@overload
+def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: None | float = ...) -> tuple[
+ NDArray[float64],
+ NDArray[float64],
+ int32,
+ NDArray[float64],
+]: ...
+@overload
+def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: None | float = ...) -> tuple[
+ NDArray[floating[Any]],
+ NDArray[floating[Any]],
+ int32,
+ NDArray[floating[Any]],
+]: ...
+@overload
+def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: None | float = ...) -> tuple[
+ NDArray[complexfloating[Any, Any]],
+ NDArray[floating[Any]],
+ int32,
+ NDArray[floating[Any]],
+]: ...
+
+@overload
+def norm(
+ x: ArrayLike,
+ ord: None | float | L["fro", "nuc"] = ...,
+ axis: None = ...,
+ keepdims: bool = ...,
+) -> floating[Any]: ...
+@overload
+def norm(
+ x: ArrayLike,
+ ord: None | float | L["fro", "nuc"] = ...,
+ axis: SupportsInt | SupportsIndex | tuple[int, ...] = ...,
+ keepdims: bool = ...,
+) -> Any: ...
+
+# TODO: Returns a scalar or array
+def multi_dot(
+ arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co],
+ *,
+ out: None | NDArray[Any] = ...,
+) -> Any: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/linalg/setup.py b/venv/lib/python3.9/site-packages/numpy/linalg/setup.py
new file mode 100644
index 00000000..1c4e1295
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/linalg/setup.py
@@ -0,0 +1,92 @@
+import os
+import sys
+import sysconfig
+
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS
+ from numpy.distutils.system_info import get_info, system_info
+ config = Configuration('linalg', parent_package, top_path)
+
+ config.add_subpackage('tests')
+
+ # Configure lapack_lite
+
+ src_dir = 'lapack_lite'
+ lapack_lite_src = [
+ os.path.join(src_dir, 'python_xerbla.c'),
+ os.path.join(src_dir, 'f2c_z_lapack.c'),
+ os.path.join(src_dir, 'f2c_c_lapack.c'),
+ os.path.join(src_dir, 'f2c_d_lapack.c'),
+ os.path.join(src_dir, 'f2c_s_lapack.c'),
+ os.path.join(src_dir, 'f2c_lapack.c'),
+ os.path.join(src_dir, 'f2c_blas.c'),
+ os.path.join(src_dir, 'f2c_config.c'),
+ os.path.join(src_dir, 'f2c.c'),
+ ]
+ all_sources = config.paths(lapack_lite_src)
+
+ if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":
+ lapack_info = get_info('lapack_ilp64_opt', 2)
+ else:
+ lapack_info = get_info('lapack_opt', 0) # and {}
+
+ use_lapack_lite = not lapack_info
+
+ if use_lapack_lite:
+ # This makes numpy.distutils write the fact that lapack_lite
+ # is being used to numpy.__config__
+ class numpy_linalg_lapack_lite(system_info):
+ def calc_info(self):
+ info = {'language': 'c'}
+ size_t_size = sysconfig.get_config_var("SIZEOF_SIZE_T")
+ if size_t_size:
+ maxsize = 2**(size_t_size - 1) - 1
+ else:
+ # We prefer using sysconfig as it allows cross-compilation
+ # but the information may be missing (e.g. on windows).
+ maxsize = sys.maxsize
+ if maxsize > 2**32:
+ # Build lapack-lite in 64-bit integer mode.
+ # The suffix is arbitrary (lapack_lite symbols follow it),
+ # but use the "64_" convention here.
+ info['define_macros'] = [
+ ('HAVE_BLAS_ILP64', None),
+ ('BLAS_SYMBOL_SUFFIX', '64_')
+ ]
+ self.set_info(**info)
+
+ lapack_info = numpy_linalg_lapack_lite().get_info(2)
+
+ def get_lapack_lite_sources(ext, build_dir):
+ if use_lapack_lite:
+ print("### Warning: Using unoptimized lapack ###")
+ return all_sources
+ else:
+ if sys.platform == 'win32':
+ print("### Warning: python_xerbla.c is disabled ###")
+ return []
+ return [all_sources[0]]
+
+ config.add_extension(
+ 'lapack_lite',
+ sources=['lapack_litemodule.c', get_lapack_lite_sources],
+ depends=['lapack_lite/f2c.h'],
+ extra_info=lapack_info,
+ )
+
+ # umath_linalg module
+ config.add_extension(
+ '_umath_linalg',
+ sources=['umath_linalg.cpp', get_lapack_lite_sources],
+ depends=['lapack_lite/f2c.h'],
+ extra_info=lapack_info,
+ extra_cxx_compile_args=NPY_CXX_FLAGS,
+ libraries=['npymath'],
+ )
+ config.add_data_files('*.pyi')
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/venv/lib/python3.9/site-packages/numpy/linalg/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/linalg/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/linalg/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/linalg/tests/test_deprecations.py b/venv/lib/python3.9/site-packages/numpy/linalg/tests/test_deprecations.py
new file mode 100644
index 00000000..cd4c1083
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/linalg/tests/test_deprecations.py
@@ -0,0 +1,20 @@
+"""Test deprecation and future warnings.
+
+"""
+import numpy as np
+from numpy.testing import assert_warns
+
+
+def test_qr_mode_full_future_warning():
+ """Check mode='full' FutureWarning.
+
+ In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were
+ deprecated. The release date will probably be sometime in the summer
+ of 2013.
+
+ """
+ a = np.eye(2)
+ assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full')
+ assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f')
+ assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic')
+ assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e')
diff --git a/venv/lib/python3.9/site-packages/numpy/linalg/tests/test_linalg.py b/venv/lib/python3.9/site-packages/numpy/linalg/tests/test_linalg.py
new file mode 100644
index 00000000..b1dbd4c2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/linalg/tests/test_linalg.py
@@ -0,0 +1,2186 @@
+""" Test functions for linalg module
+
+"""
+import os
+import sys
+import itertools
+import traceback
+import textwrap
+import subprocess
+import pytest
+
+import numpy as np
+from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
+from numpy.core import swapaxes
+from numpy import multiply, atleast_2d, inf, asarray
+from numpy import linalg
+from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
+from numpy.linalg.linalg import _multi_dot_matrix_chain_order
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_array_equal,
+ assert_almost_equal, assert_allclose, suppress_warnings,
+ assert_raises_regex, HAS_LAPACK64, IS_WASM
+ )
+
+
+def consistent_subclass(out, in_):
+ # For ndarray subclass input, our output should have the same subclass
+ # (non-ndarray input gets converted to ndarray).
+ return type(out) is (type(in_) if isinstance(in_, np.ndarray)
+ else np.ndarray)
+
+
+old_assert_almost_equal = assert_almost_equal
+
+
+def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw):
+ if asarray(a).dtype.type in (single, csingle):
+ decimal = single_decimal
+ else:
+ decimal = double_decimal
+ old_assert_almost_equal(a, b, decimal=decimal, **kw)
+
+
+def get_real_dtype(dtype):
+ return {single: single, double: double,
+ csingle: single, cdouble: double}[dtype]
+
+
+def get_complex_dtype(dtype):
+ return {single: csingle, double: cdouble,
+ csingle: csingle, cdouble: cdouble}[dtype]
+
+
+def get_rtol(dtype):
+ # Choose a safe rtol
+ if dtype in (single, csingle):
+ return 1e-5
+ else:
+ return 1e-11
+
+
+# used to categorize tests
+all_tags = {
+ 'square', 'nonsquare', 'hermitian', # mutually exclusive
+ 'generalized', 'size-0', 'strided' # optional additions
+}
+
+
+class LinalgCase:
+ def __init__(self, name, a, b, tags=set()):
+ """
+ A bundle of arguments to be passed to a test case, with an identifying
+ name, the operands a and b, and a set of tags to filter the tests
+ """
+ assert_(isinstance(name, str))
+ self.name = name
+ self.a = a
+ self.b = b
+ self.tags = frozenset(tags) # prevent shared tags
+
+ def check(self, do):
+ """
+ Run the function `do` on this test case, expanding arguments
+ """
+ do(self.a, self.b, tags=self.tags)
+
+ def __repr__(self):
+ return f'<LinalgCase: {self.name}>'
+
+
+def apply_tag(tag, cases):
+ """
+ Add the given tag (a string) to each of the cases (a list of LinalgCase
+ objects)
+ """
+ assert tag in all_tags, "Invalid tag"
+ for case in cases:
+ case.tags = case.tags | {tag}
+ return cases
+
+
+#
+# Base test cases
+#
+
+np.random.seed(1234)
+
+CASES = []
+
+# square test cases
+CASES += apply_tag('square', [
+ LinalgCase("single",
+ array([[1., 2.], [3., 4.]], dtype=single),
+ array([2., 1.], dtype=single)),
+ LinalgCase("double",
+ array([[1., 2.], [3., 4.]], dtype=double),
+ array([2., 1.], dtype=double)),
+ LinalgCase("double_2",
+ array([[1., 2.], [3., 4.]], dtype=double),
+ array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
+ LinalgCase("csingle",
+ array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle),
+ array([2. + 1j, 1. + 2j], dtype=csingle)),
+ LinalgCase("cdouble",
+ array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
+ array([2. + 1j, 1. + 2j], dtype=cdouble)),
+ LinalgCase("cdouble_2",
+ array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
+ array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)),
+ LinalgCase("0x0",
+ np.empty((0, 0), dtype=double),
+ np.empty((0,), dtype=double),
+ tags={'size-0'}),
+ LinalgCase("8x8",
+ np.random.rand(8, 8),
+ np.random.rand(8)),
+ LinalgCase("1x1",
+ np.random.rand(1, 1),
+ np.random.rand(1)),
+ LinalgCase("nonarray",
+ [[1, 2], [3, 4]],
+ [2, 1]),
+])
+
+# non-square test-cases
+CASES += apply_tag('nonsquare', [
+ LinalgCase("single_nsq_1",
+ array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
+ array([2., 1.], dtype=single)),
+ LinalgCase("single_nsq_2",
+ array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
+ array([2., 1., 3.], dtype=single)),
+ LinalgCase("double_nsq_1",
+ array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
+ array([2., 1.], dtype=double)),
+ LinalgCase("double_nsq_2",
+ array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
+ array([2., 1., 3.], dtype=double)),
+ LinalgCase("csingle_nsq_1",
+ array(
+ [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle),
+ array([2. + 1j, 1. + 2j], dtype=csingle)),
+ LinalgCase("csingle_nsq_2",
+ array(
+ [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle),
+ array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)),
+ LinalgCase("cdouble_nsq_1",
+ array(
+ [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
+ array([2. + 1j, 1. + 2j], dtype=cdouble)),
+ LinalgCase("cdouble_nsq_2",
+ array(
+ [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
+ array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)),
+ LinalgCase("cdouble_nsq_1_2",
+ array(
+ [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
+ array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
+ LinalgCase("cdouble_nsq_2_2",
+ array(
+ [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
+ array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
+ LinalgCase("8x11",
+ np.random.rand(8, 11),
+ np.random.rand(8)),
+ LinalgCase("1x5",
+ np.random.rand(1, 5),
+ np.random.rand(1)),
+ LinalgCase("5x1",
+ np.random.rand(5, 1),
+ np.random.rand(5)),
+ LinalgCase("0x4",
+ np.random.rand(0, 4),
+ np.random.rand(0),
+ tags={'size-0'}),
+ LinalgCase("4x0",
+ np.random.rand(4, 0),
+ np.random.rand(4),
+ tags={'size-0'}),
+])
+
+# hermitian test-cases
+CASES += apply_tag('hermitian', [
+ LinalgCase("hsingle",
+ array([[1., 2.], [2., 1.]], dtype=single),
+ None),
+ LinalgCase("hdouble",
+ array([[1., 2.], [2., 1.]], dtype=double),
+ None),
+ LinalgCase("hcsingle",
+ array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle),
+ None),
+ LinalgCase("hcdouble",
+ array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble),
+ None),
+ LinalgCase("hempty",
+ np.empty((0, 0), dtype=double),
+ None,
+ tags={'size-0'}),
+ LinalgCase("hnonarray",
+ [[1, 2], [2, 1]],
+ None),
+ LinalgCase("matrix_b_only",
+ array([[1., 2.], [2., 1.]]),
+ None),
+ LinalgCase("hmatrix_1x1",
+ np.random.rand(1, 1),
+ None),
+])
+
+
+#
+# Gufunc test cases
+#
+def _make_generalized_cases():
+ new_cases = []
+
+ for case in CASES:
+ if not isinstance(case.a, np.ndarray):
+ continue
+
+ a = np.array([case.a, 2 * case.a, 3 * case.a])
+ if case.b is None:
+ b = None
+ else:
+ b = np.array([case.b, 7 * case.b, 6 * case.b])
+ new_case = LinalgCase(case.name + "_tile3", a, b,
+ tags=case.tags | {'generalized'})
+ new_cases.append(new_case)
+
+ a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape)
+ if case.b is None:
+ b = None
+ else:
+ b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape)
+ new_case = LinalgCase(case.name + "_tile213", a, b,
+ tags=case.tags | {'generalized'})
+ new_cases.append(new_case)
+
+ return new_cases
+
+
+CASES += _make_generalized_cases()
+
+
+#
+# Generate stride combination variations of the above
+#
+def _stride_comb_iter(x):
+ """
+ Generate cartesian product of strides for all axes
+ """
+
+ if not isinstance(x, np.ndarray):
+ yield x, "nop"
+ return
+
+ stride_set = [(1,)] * x.ndim
+ stride_set[-1] = (1, 3, -4)
+ if x.ndim > 1:
+ stride_set[-2] = (1, 3, -4)
+ if x.ndim > 2:
+ stride_set[-3] = (1, -4)
+
+ for repeats in itertools.product(*tuple(stride_set)):
+ new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)]
+ slices = tuple([slice(None, None, repeat) for repeat in repeats])
+
+ # new array with different strides, but same data
+ xi = np.empty(new_shape, dtype=x.dtype)
+ xi.view(np.uint32).fill(0xdeadbeef)
+ xi = xi[slices]
+ xi[...] = x
+ xi = xi.view(x.__class__)
+ assert_(np.all(xi == x))
+ yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
+
+ # generate also zero strides if possible
+ if x.ndim >= 1 and x.shape[-1] == 1:
+ s = list(x.strides)
+ s[-1] = 0
+ xi = np.lib.stride_tricks.as_strided(x, strides=s)
+ yield xi, "stride_xxx_0"
+ if x.ndim >= 2 and x.shape[-2] == 1:
+ s = list(x.strides)
+ s[-2] = 0
+ xi = np.lib.stride_tricks.as_strided(x, strides=s)
+ yield xi, "stride_xxx_0_x"
+ if x.ndim >= 2 and x.shape[:-2] == (1, 1):
+ s = list(x.strides)
+ s[-1] = 0
+ s[-2] = 0
+ xi = np.lib.stride_tricks.as_strided(x, strides=s)
+ yield xi, "stride_xxx_0_0"
+
+
+def _make_strided_cases():
+ new_cases = []
+ for case in CASES:
+ for a, a_label in _stride_comb_iter(case.a):
+ for b, b_label in _stride_comb_iter(case.b):
+ new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b,
+ tags=case.tags | {'strided'})
+ new_cases.append(new_case)
+ return new_cases
+
+
+CASES += _make_strided_cases()
+
+
+#
+# Test different routines against the above cases
+#
+class LinalgTestCase:
+ TEST_CASES = CASES
+
+ def check_cases(self, require=set(), exclude=set()):
+ """
+ Run func on each of the cases with all of the tags in require, and none
+ of the tags in exclude
+ """
+ for case in self.TEST_CASES:
+ # filter by require and exclude
+ if case.tags & require != require:
+ continue
+ if case.tags & exclude:
+ continue
+
+ try:
+ case.check(self.do)
+ except Exception as e:
+ msg = f'In test case: {case!r}\n\n'
+ msg += traceback.format_exc()
+ raise AssertionError(msg) from e
+
+
+class LinalgSquareTestCase(LinalgTestCase):
+
+ def test_sq_cases(self):
+ self.check_cases(require={'square'},
+ exclude={'generalized', 'size-0'})
+
+ def test_empty_sq_cases(self):
+ self.check_cases(require={'square', 'size-0'},
+ exclude={'generalized'})
+
+
+class LinalgNonsquareTestCase(LinalgTestCase):
+
+ def test_nonsq_cases(self):
+ self.check_cases(require={'nonsquare'},
+ exclude={'generalized', 'size-0'})
+
+ def test_empty_nonsq_cases(self):
+ self.check_cases(require={'nonsquare', 'size-0'},
+ exclude={'generalized'})
+
+
+class HermitianTestCase(LinalgTestCase):
+
+ def test_herm_cases(self):
+ self.check_cases(require={'hermitian'},
+ exclude={'generalized', 'size-0'})
+
+ def test_empty_herm_cases(self):
+ self.check_cases(require={'hermitian', 'size-0'},
+ exclude={'generalized'})
+
+
+class LinalgGeneralizedSquareTestCase(LinalgTestCase):
+
+ @pytest.mark.slow
+ def test_generalized_sq_cases(self):
+ self.check_cases(require={'generalized', 'square'},
+ exclude={'size-0'})
+
+ @pytest.mark.slow
+ def test_generalized_empty_sq_cases(self):
+ self.check_cases(require={'generalized', 'square', 'size-0'})
+
+
+class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
+
+ @pytest.mark.slow
+ def test_generalized_nonsq_cases(self):
+ self.check_cases(require={'generalized', 'nonsquare'},
+ exclude={'size-0'})
+
+ @pytest.mark.slow
+ def test_generalized_empty_nonsq_cases(self):
+ self.check_cases(require={'generalized', 'nonsquare', 'size-0'})
+
+
+class HermitianGeneralizedTestCase(LinalgTestCase):
+
+ @pytest.mark.slow
+ def test_generalized_herm_cases(self):
+ self.check_cases(require={'generalized', 'hermitian'},
+ exclude={'size-0'})
+
+ @pytest.mark.slow
+ def test_generalized_empty_herm_cases(self):
+ self.check_cases(require={'generalized', 'hermitian', 'size-0'},
+ exclude={'none'})
+
+
+def dot_generalized(a, b):
+ a = asarray(a)
+ if a.ndim >= 3:
+ if a.ndim == b.ndim:
+ # matrix x matrix
+ new_shape = a.shape[:-1] + b.shape[-1:]
+ elif a.ndim == b.ndim + 1:
+ # matrix x vector
+ new_shape = a.shape[:-1]
+ else:
+ raise ValueError("Not implemented...")
+ r = np.empty(new_shape, dtype=np.common_type(a, b))
+ for c in itertools.product(*map(range, a.shape[:-2])):
+ r[c] = dot(a[c], b[c])
+ return r
+ else:
+ return dot(a, b)
+
+
+def identity_like_generalized(a):
+ a = asarray(a)
+ if a.ndim >= 3:
+ r = np.empty(a.shape, dtype=a.dtype)
+ r[...] = identity(a.shape[-2])
+ return r
+ else:
+ return identity(a.shape[0])
+
+
+class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+ # kept apart from TestSolve for use for testing with matrices.
+ def do(self, a, b, tags):
+ x = linalg.solve(a, b)
+ assert_almost_equal(b, dot_generalized(a, x))
+ assert_(consistent_subclass(x, b))
+
+
+class TestSolve(SolveCases):
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ assert_equal(linalg.solve(x, x).dtype, dtype)
+
+ def test_0_size(self):
+ class ArraySubclass(np.ndarray):
+ pass
+ # Test system of 0x0 matrices
+ a = np.arange(8).reshape(2, 2, 2)
+ b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
+
+ expected = linalg.solve(a, b)[:, 0:0, :]
+ result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :])
+ assert_array_equal(result, expected)
+ assert_(isinstance(result, ArraySubclass))
+
+ # Test errors for non-square and only b's dimension being 0
+ assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
+ assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :])
+
+ # Test broadcasting error
+ b = np.arange(6).reshape(1, 3, 2) # broadcasting error
+ assert_raises(ValueError, linalg.solve, a, b)
+ assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
+
+ # Test zero "single equations" with 0x0 matrices.
+ b = np.arange(2).reshape(1, 2).view(ArraySubclass)
+ expected = linalg.solve(a, b)[:, 0:0]
+ result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
+ assert_array_equal(result, expected)
+ assert_(isinstance(result, ArraySubclass))
+
+ b = np.arange(3).reshape(1, 3)
+ assert_raises(ValueError, linalg.solve, a, b)
+ assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
+ assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
+
+ def test_0_size_k(self):
+ # test zero multiple equation (K=0) case.
+ class ArraySubclass(np.ndarray):
+ pass
+ a = np.arange(4).reshape(1, 2, 2)
+ b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)
+
+ expected = linalg.solve(a, b)[:, :, 0:0]
+ result = linalg.solve(a, b[:, :, 0:0])
+ assert_array_equal(result, expected)
+ assert_(isinstance(result, ArraySubclass))
+
+ # test both zero.
+ expected = linalg.solve(a, b)[:, 0:0, 0:0]
+ result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0])
+ assert_array_equal(result, expected)
+ assert_(isinstance(result, ArraySubclass))
+
+
+class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+
+ def do(self, a, b, tags):
+ a_inv = linalg.inv(a)
+ assert_almost_equal(dot_generalized(a, a_inv),
+ identity_like_generalized(a))
+ assert_(consistent_subclass(a_inv, a))
+
+
+class TestInv(InvCases):
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ assert_equal(linalg.inv(x).dtype, dtype)
+
+ def test_0_size(self):
+ # Check that all kinds of 0-sized arrays work
+ class ArraySubclass(np.ndarray):
+ pass
+ a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
+ res = linalg.inv(a)
+ assert_(res.dtype.type is np.float64)
+ assert_equal(a.shape, res.shape)
+ assert_(isinstance(res, ArraySubclass))
+
+ a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
+ res = linalg.inv(a)
+ assert_(res.dtype.type is np.complex64)
+ assert_equal(a.shape, res.shape)
+ assert_(isinstance(res, ArraySubclass))
+
+
+class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+
+ def do(self, a, b, tags):
+ ev = linalg.eigvals(a)
+ evalues, evectors = linalg.eig(a)
+ assert_almost_equal(ev, evalues)
+
+
+class TestEigvals(EigvalsCases):
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ assert_equal(linalg.eigvals(x).dtype, dtype)
+ x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
+ assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
+
+ def test_0_size(self):
+ # Check that all kinds of 0-sized arrays work
+ class ArraySubclass(np.ndarray):
+ pass
+ a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
+ res = linalg.eigvals(a)
+ assert_(res.dtype.type is np.float64)
+ assert_equal((0, 1), res.shape)
+ # This is just for documentation, it might make sense to change:
+ assert_(isinstance(res, np.ndarray))
+
+ a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
+ res = linalg.eigvals(a)
+ assert_(res.dtype.type is np.complex64)
+ assert_equal((0,), res.shape)
+ # This is just for documentation, it might make sense to change:
+ assert_(isinstance(res, np.ndarray))
+
+
+class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+
+ def do(self, a, b, tags):
+ evalues, evectors = linalg.eig(a)
+ assert_allclose(dot_generalized(a, evectors),
+ np.asarray(evectors) * np.asarray(evalues)[..., None, :],
+ rtol=get_rtol(evalues.dtype))
+ assert_(consistent_subclass(evectors, a))
+
+
+class TestEig(EigCases):
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ w, v = np.linalg.eig(x)
+ assert_equal(w.dtype, dtype)
+ assert_equal(v.dtype, dtype)
+
+ x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
+ w, v = np.linalg.eig(x)
+ assert_equal(w.dtype, get_complex_dtype(dtype))
+ assert_equal(v.dtype, get_complex_dtype(dtype))
+
+ def test_0_size(self):
+ # Check that all kinds of 0-sized arrays work
+ class ArraySubclass(np.ndarray):
+ pass
+ a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
+ res, res_v = linalg.eig(a)
+ assert_(res_v.dtype.type is np.float64)
+ assert_(res.dtype.type is np.float64)
+ assert_equal(a.shape, res_v.shape)
+ assert_equal((0, 1), res.shape)
+ # This is just for documentation, it might make sense to change:
+ assert_(isinstance(a, np.ndarray))
+
+ a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
+ res, res_v = linalg.eig(a)
+ assert_(res_v.dtype.type is np.complex64)
+ assert_(res.dtype.type is np.complex64)
+ assert_equal(a.shape, res_v.shape)
+ assert_equal((0,), res.shape)
+ # This is just for documentation, it might make sense to change:
+ assert_(isinstance(a, np.ndarray))
+
+
+class SVDBaseTests:
+ hermitian = False
+
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ u, s, vh = linalg.svd(x)
+ assert_equal(u.dtype, dtype)
+ assert_equal(s.dtype, get_real_dtype(dtype))
+ assert_equal(vh.dtype, dtype)
+ s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian)
+ assert_equal(s.dtype, get_real_dtype(dtype))
+
+
+class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+
+ def do(self, a, b, tags):
+ u, s, vt = linalg.svd(a, False)
+ assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
+ np.asarray(vt)),
+ rtol=get_rtol(u.dtype))
+ assert_(consistent_subclass(u, a))
+ assert_(consistent_subclass(vt, a))
+
+
+class TestSVD(SVDCases, SVDBaseTests):
+ def test_empty_identity(self):
+ """ Empty input should put an identity matrix in u or vh """
+ x = np.empty((4, 0))
+ u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
+ assert_equal(u.shape, (4, 4))
+ assert_equal(vh.shape, (0, 0))
+ assert_equal(u, np.eye(4))
+
+ x = np.empty((0, 4))
+ u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
+ assert_equal(u.shape, (0, 0))
+ assert_equal(vh.shape, (4, 4))
+ assert_equal(vh, np.eye(4))
+
+
+class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
+
+ def do(self, a, b, tags):
+ u, s, vt = linalg.svd(a, False, hermitian=True)
+ assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
+ np.asarray(vt)),
+ rtol=get_rtol(u.dtype))
+ def hermitian(mat):
+ axes = list(range(mat.ndim))
+ axes[-1], axes[-2] = axes[-2], axes[-1]
+ return np.conj(np.transpose(mat, axes=axes))
+
+ assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape))
+ assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape))
+ assert_equal(np.sort(s)[..., ::-1], s)
+ assert_(consistent_subclass(u, a))
+ assert_(consistent_subclass(vt, a))
+
+
+class TestSVDHermitian(SVDHermitianCases, SVDBaseTests):
+ hermitian = True
+
+
+class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+ # cond(x, p) for p in (None, 2, -2)
+
+ def do(self, a, b, tags):
+ c = asarray(a) # a might be a matrix
+ if 'size-0' in tags:
+ assert_raises(LinAlgError, linalg.cond, c)
+ return
+
+ # +-2 norms
+ s = linalg.svd(c, compute_uv=False)
+ assert_almost_equal(
+ linalg.cond(a), s[..., 0] / s[..., -1],
+ single_decimal=5, double_decimal=11)
+ assert_almost_equal(
+ linalg.cond(a, 2), s[..., 0] / s[..., -1],
+ single_decimal=5, double_decimal=11)
+ assert_almost_equal(
+ linalg.cond(a, -2), s[..., -1] / s[..., 0],
+ single_decimal=5, double_decimal=11)
+
+ # Other norms
+ cinv = np.linalg.inv(c)
+ assert_almost_equal(
+ linalg.cond(a, 1),
+ abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1),
+ single_decimal=5, double_decimal=11)
+ assert_almost_equal(
+ linalg.cond(a, -1),
+ abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1),
+ single_decimal=5, double_decimal=11)
+ assert_almost_equal(
+ linalg.cond(a, np.inf),
+ abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1),
+ single_decimal=5, double_decimal=11)
+ assert_almost_equal(
+ linalg.cond(a, -np.inf),
+ abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1),
+ single_decimal=5, double_decimal=11)
+ assert_almost_equal(
+ linalg.cond(a, 'fro'),
+ np.sqrt((abs(c)**2).sum(-1).sum(-1)
+ * (abs(cinv)**2).sum(-1).sum(-1)),
+ single_decimal=5, double_decimal=11)
+
+
+class TestCond(CondCases):
+ def test_basic_nonsvd(self):
+ # Smoketest the non-svd norms
+ A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]])
+ assert_almost_equal(linalg.cond(A, inf), 4)
+ assert_almost_equal(linalg.cond(A, -inf), 2/3)
+ assert_almost_equal(linalg.cond(A, 1), 4)
+ assert_almost_equal(linalg.cond(A, -1), 0.5)
+ assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12))
+
+ def test_singular(self):
+ # Singular matrices have infinite condition number for
+ # positive norms, and negative norms shouldn't raise
+ # exceptions
+ As = [np.zeros((2, 2)), np.ones((2, 2))]
+ p_pos = [None, 1, 2, 'fro']
+ p_neg = [-1, -2]
+ for A, p in itertools.product(As, p_pos):
+ # Inversion may not hit exact infinity, so just check the
+ # number is large
+ assert_(linalg.cond(A, p) > 1e15)
+ for A, p in itertools.product(As, p_neg):
+ linalg.cond(A, p)
+
+ @pytest.mark.xfail(True, run=False,
+ reason="Platform/LAPACK-dependent failure, "
+ "see gh-18914")
+ def test_nan(self):
+ # nans should be passed through, not converted to infs
+ ps = [None, 1, -1, 2, -2, 'fro']
+ p_pos = [None, 1, 2, 'fro']
+
+ A = np.ones((2, 2))
+ A[0,1] = np.nan
+ for p in ps:
+ c = linalg.cond(A, p)
+ assert_(isinstance(c, np.float_))
+ assert_(np.isnan(c))
+
+ A = np.ones((3, 2, 2))
+ A[1,0,1] = np.nan
+ for p in ps:
+ c = linalg.cond(A, p)
+ assert_(np.isnan(c[1]))
+ if p in p_pos:
+ assert_(c[0] > 1e15)
+ assert_(c[2] > 1e15)
+ else:
+ assert_(not np.isnan(c[0]))
+ assert_(not np.isnan(c[2]))
+
+ def test_stacked_singular(self):
+ # Check behavior when only some of the stacked matrices are
+ # singular
+ np.random.seed(1234)
+ A = np.random.rand(2, 2, 2, 2)
+ A[0,0] = 0
+ A[1,1] = 0
+
+ for p in (None, 1, 2, 'fro', -1, -2):
+ c = linalg.cond(A, p)
+ assert_equal(c[0,0], np.inf)
+ assert_equal(c[1,1], np.inf)
+ assert_(np.isfinite(c[0,1]))
+ assert_(np.isfinite(c[1,0]))
+
+
+class PinvCases(LinalgSquareTestCase,
+ LinalgNonsquareTestCase,
+ LinalgGeneralizedSquareTestCase,
+ LinalgGeneralizedNonsquareTestCase):
+
+ def do(self, a, b, tags):
+ a_ginv = linalg.pinv(a)
+ # `a @ a_ginv == I` does not hold if a is singular
+ dot = dot_generalized
+ assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
+ assert_(consistent_subclass(a_ginv, a))
+
+
+class TestPinv(PinvCases):
+ pass
+
+
+class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
+
+ def do(self, a, b, tags):
+ a_ginv = linalg.pinv(a, hermitian=True)
+ # `a @ a_ginv == I` does not hold if a is singular
+ dot = dot_generalized
+ assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
+ assert_(consistent_subclass(a_ginv, a))
+
+
+class TestPinvHermitian(PinvHermitianCases):
+ pass
+
+
+class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+
+ def do(self, a, b, tags):
+ d = linalg.det(a)
+ (s, ld) = linalg.slogdet(a)
+ if asarray(a).dtype.type in (single, double):
+ ad = asarray(a).astype(double)
+ else:
+ ad = asarray(a).astype(cdouble)
+ ev = linalg.eigvals(ad)
+ assert_almost_equal(d, multiply.reduce(ev, axis=-1))
+ assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))
+
+ s = np.atleast_1d(s)
+ ld = np.atleast_1d(ld)
+ m = (s != 0)
+ assert_almost_equal(np.abs(s[m]), 1)
+ assert_equal(ld[~m], -inf)
+
+
+class TestDet(DetCases):
+ def test_zero(self):
+ assert_equal(linalg.det([[0.0]]), 0.0)
+ assert_equal(type(linalg.det([[0.0]])), double)
+ assert_equal(linalg.det([[0.0j]]), 0.0)
+ assert_equal(type(linalg.det([[0.0j]])), cdouble)
+
+ assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
+ assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
+ assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
+ assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
+ assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
+ assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
+
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ assert_equal(np.linalg.det(x).dtype, dtype)
+ ph, s = np.linalg.slogdet(x)
+ assert_equal(s.dtype, get_real_dtype(dtype))
+ assert_equal(ph.dtype, dtype)
+
+ def test_0_size(self):
+ a = np.zeros((0, 0), dtype=np.complex64)
+ res = linalg.det(a)
+ assert_equal(res, 1.)
+ assert_(res.dtype.type is np.complex64)
+ res = linalg.slogdet(a)
+ assert_equal(res, (1, 0))
+ assert_(res[0].dtype.type is np.complex64)
+ assert_(res[1].dtype.type is np.float32)
+
+ a = np.zeros((0, 0), dtype=np.float64)
+ res = linalg.det(a)
+ assert_equal(res, 1.)
+ assert_(res.dtype.type is np.float64)
+ res = linalg.slogdet(a)
+ assert_equal(res, (1, 0))
+ assert_(res[0].dtype.type is np.float64)
+ assert_(res[1].dtype.type is np.float64)
+
+
+class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
+
+ def do(self, a, b, tags):
+ arr = np.asarray(a)
+ m, n = arr.shape
+ u, s, vt = linalg.svd(a, False)
+ x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1)
+ if m == 0:
+ assert_((x == 0).all())
+ if m <= n:
+ assert_almost_equal(b, dot(a, x))
+ assert_equal(rank, m)
+ else:
+ assert_equal(rank, n)
+ assert_almost_equal(sv, sv.__array_wrap__(s))
+ if rank == n and m > n:
+ expect_resids = (
+ np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
+ expect_resids = np.asarray(expect_resids)
+ if np.asarray(b).ndim == 1:
+ expect_resids.shape = (1,)
+ assert_equal(residuals.shape, expect_resids.shape)
+ else:
+ expect_resids = np.array([]).view(type(x))
+ assert_almost_equal(residuals, expect_resids)
+ assert_(np.issubdtype(residuals.dtype, np.floating))
+ assert_(consistent_subclass(x, b))
+ assert_(consistent_subclass(residuals, b))
+
+
+class TestLstsq(LstsqCases):
+ def test_future_rcond(self):
+ a = np.array([[0., 1., 0., 1., 2., 0.],
+ [0., 2., 0., 0., 1., 0.],
+ [1., 0., 1., 0., 0., 4.],
+ [0., 0., 0., 2., 3., 0.]]).T
+
+ b = np.array([1, 0, 0, 0, 0, 0])
+ with suppress_warnings() as sup:
+ w = sup.record(FutureWarning, "`rcond` parameter will change")
+ x, residuals, rank, s = linalg.lstsq(a, b)
+ assert_(rank == 4)
+ x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1)
+ assert_(rank == 4)
+ x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
+ assert_(rank == 3)
+ # Warning should be raised exactly once (first command)
+ assert_(len(w) == 1)
+
+ @pytest.mark.parametrize(["m", "n", "n_rhs"], [
+ (4, 2, 2),
+ (0, 4, 1),
+ (0, 4, 2),
+ (4, 0, 1),
+ (4, 0, 2),
+ (4, 2, 0),
+ (0, 0, 0)
+ ])
+ def test_empty_a_b(self, m, n, n_rhs):
+ a = np.arange(m * n).reshape(m, n)
+ b = np.ones((m, n_rhs))
+ x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
+ if m == 0:
+ assert_((x == 0).all())
+ assert_equal(x.shape, (n, n_rhs))
+ assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,)))
+ if m > n and n_rhs > 0:
+ # residuals are exactly the squared norms of b's columns
+ r = b - np.dot(a, x)
+ assert_almost_equal(residuals, (r * r).sum(axis=-2))
+ assert_equal(rank, min(m, n))
+ assert_equal(s.shape, (min(m, n),))
+
+ def test_incompatible_dims(self):
+ # use modified version of docstring example
+ x = np.array([0, 1, 2, 3])
+ y = np.array([-1, 0.2, 0.9, 2.1, 3.3])
+ A = np.vstack([x, np.ones(len(x))]).T
+ with assert_raises_regex(LinAlgError, "Incompatible dimensions"):
+ linalg.lstsq(A, y, rcond=None)
+
+
+@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])
+class TestMatrixPower:
+
+ rshft_0 = np.eye(4)
+ rshft_1 = rshft_0[[3, 0, 1, 2]]
+ rshft_2 = rshft_0[[2, 3, 0, 1]]
+ rshft_3 = rshft_0[[1, 2, 3, 0]]
+ rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3]
+ noninv = array([[1, 0], [0, 0]])
+ stacked = np.block([[[rshft_0]]]*2)
+ #FIXME the 'e' dtype might work in future
+ dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')]
+
+ def test_large_power(self, dt):
+ rshft = self.rshft_1.astype(dt)
+ assert_equal(
+ matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0)
+ assert_equal(
+ matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1)
+ assert_equal(
+ matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2)
+ assert_equal(
+ matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3)
+
+ def test_power_is_zero(self, dt):
+ def tz(M):
+ mz = matrix_power(M, 0)
+ assert_equal(mz, identity_like_generalized(M))
+ assert_equal(mz.dtype, M.dtype)
+
+ for mat in self.rshft_all:
+ tz(mat.astype(dt))
+ if dt != object:
+ tz(self.stacked.astype(dt))
+
+ def test_power_is_one(self, dt):
+ def tz(mat):
+ mz = matrix_power(mat, 1)
+ assert_equal(mz, mat)
+ assert_equal(mz.dtype, mat.dtype)
+
+ for mat in self.rshft_all:
+ tz(mat.astype(dt))
+ if dt != object:
+ tz(self.stacked.astype(dt))
+
+ def test_power_is_two(self, dt):
+ def tz(mat):
+ mz = matrix_power(mat, 2)
+ mmul = matmul if mat.dtype != object else dot
+ assert_equal(mz, mmul(mat, mat))
+ assert_equal(mz.dtype, mat.dtype)
+
+ for mat in self.rshft_all:
+ tz(mat.astype(dt))
+ if dt != object:
+ tz(self.stacked.astype(dt))
+
+ def test_power_is_minus_one(self, dt):
+ def tz(mat):
+ invmat = matrix_power(mat, -1)
+ mmul = matmul if mat.dtype != object else dot
+ assert_almost_equal(
+ mmul(invmat, mat), identity_like_generalized(mat))
+
+ for mat in self.rshft_all:
+ if dt not in self.dtnoinv:
+ tz(mat.astype(dt))
+
+ def test_exceptions_bad_power(self, dt):
+ mat = self.rshft_0.astype(dt)
+ assert_raises(TypeError, matrix_power, mat, 1.5)
+ assert_raises(TypeError, matrix_power, mat, [1])
+
+ def test_exceptions_non_square(self, dt):
+ assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1)
+ assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1)
+ assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_exceptions_not_invertible(self, dt):
+ if dt in self.dtnoinv:
+ return
+ mat = self.noninv.astype(dt)
+ assert_raises(LinAlgError, matrix_power, mat, -1)
+
+
+class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):
+
+ def do(self, a, b, tags):
+ # note that eigenvalue arrays returned by eig must be sorted since
+ # their order isn't guaranteed.
+ ev = linalg.eigvalsh(a, 'L')
+ evalues, evectors = linalg.eig(a)
+ evalues.sort(axis=-1)
+ assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype))
+
+ ev2 = linalg.eigvalsh(a, 'U')
+ assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
+
+
+class TestEigvalsh:
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ w = np.linalg.eigvalsh(x)
+ assert_equal(w.dtype, get_real_dtype(dtype))
+
+ def test_invalid(self):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
+ assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong")
+ assert_raises(ValueError, np.linalg.eigvalsh, x, "lower")
+ assert_raises(ValueError, np.linalg.eigvalsh, x, "upper")
+
+ def test_UPLO(self):
+ Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
+ Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
+ tgt = np.array([-1, 1], dtype=np.double)
+ rtol = get_rtol(np.double)
+
+ # Check default is 'L'
+ w = np.linalg.eigvalsh(Klo)
+ assert_allclose(w, tgt, rtol=rtol)
+ # Check 'L'
+ w = np.linalg.eigvalsh(Klo, UPLO='L')
+ assert_allclose(w, tgt, rtol=rtol)
+ # Check 'l'
+ w = np.linalg.eigvalsh(Klo, UPLO='l')
+ assert_allclose(w, tgt, rtol=rtol)
+ # Check 'U'
+ w = np.linalg.eigvalsh(Kup, UPLO='U')
+ assert_allclose(w, tgt, rtol=rtol)
+ # Check 'u'
+ w = np.linalg.eigvalsh(Kup, UPLO='u')
+ assert_allclose(w, tgt, rtol=rtol)
+
+ def test_0_size(self):
+ # Check that all kinds of 0-sized arrays work
+ class ArraySubclass(np.ndarray):
+ pass
+ a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
+ res = linalg.eigvalsh(a)
+ assert_(res.dtype.type is np.float64)
+ assert_equal((0, 1), res.shape)
+ # This is just for documentation, it might make sense to change:
+ assert_(isinstance(res, np.ndarray))
+
+ a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
+ res = linalg.eigvalsh(a)
+ assert_(res.dtype.type is np.float32)
+ assert_equal((0,), res.shape)
+ # This is just for documentation, it might make sense to change:
+ assert_(isinstance(res, np.ndarray))
+
+
+class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):
+
+ def do(self, a, b, tags):
+ # note that eigenvalue arrays returned by eig must be sorted since
+ # their order isn't guaranteed.
+ ev, evc = linalg.eigh(a)
+ evalues, evectors = linalg.eig(a)
+ evalues.sort(axis=-1)
+ assert_almost_equal(ev, evalues)
+
+ assert_allclose(dot_generalized(a, evc),
+ np.asarray(ev)[..., None, :] * np.asarray(evc),
+ rtol=get_rtol(ev.dtype))
+
+ ev2, evc2 = linalg.eigh(a, 'U')
+ assert_almost_equal(ev2, evalues)
+
+ assert_allclose(dot_generalized(a, evc2),
+ np.asarray(ev2)[..., None, :] * np.asarray(evc2),
+ rtol=get_rtol(ev.dtype), err_msg=repr(a))
+
+
+class TestEigh:
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ w, v = np.linalg.eigh(x)
+ assert_equal(w.dtype, get_real_dtype(dtype))
+ assert_equal(v.dtype, dtype)
+
+ def test_invalid(self):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
+ assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong")
+ assert_raises(ValueError, np.linalg.eigh, x, "lower")
+ assert_raises(ValueError, np.linalg.eigh, x, "upper")
+
+ def test_UPLO(self):
+ Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
+ Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
+ tgt = np.array([-1, 1], dtype=np.double)
+ rtol = get_rtol(np.double)
+
+ # Check default is 'L'
+ w, v = np.linalg.eigh(Klo)
+ assert_allclose(w, tgt, rtol=rtol)
+ # Check 'L'
+ w, v = np.linalg.eigh(Klo, UPLO='L')
+ assert_allclose(w, tgt, rtol=rtol)
+ # Check 'l'
+ w, v = np.linalg.eigh(Klo, UPLO='l')
+ assert_allclose(w, tgt, rtol=rtol)
+ # Check 'U'
+ w, v = np.linalg.eigh(Kup, UPLO='U')
+ assert_allclose(w, tgt, rtol=rtol)
+ # Check 'u'
+ w, v = np.linalg.eigh(Kup, UPLO='u')
+ assert_allclose(w, tgt, rtol=rtol)
+
+ def test_0_size(self):
+ # Check that all kinds of 0-sized arrays work
+ class ArraySubclass(np.ndarray):
+ pass
+ a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
+ res, res_v = linalg.eigh(a)
+ assert_(res_v.dtype.type is np.float64)
+ assert_(res.dtype.type is np.float64)
+ assert_equal(a.shape, res_v.shape)
+ assert_equal((0, 1), res.shape)
+ # This is just for documentation, it might make sense to change:
+ assert_(isinstance(a, np.ndarray))
+
+ a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
+ res, res_v = linalg.eigh(a)
+ assert_(res_v.dtype.type is np.complex64)
+ assert_(res.dtype.type is np.float32)
+ assert_equal(a.shape, res_v.shape)
+ assert_equal((0,), res.shape)
+ # This is just for documentation, it might make sense to change:
+ assert_(isinstance(a, np.ndarray))
+
+
+class _TestNormBase:
+ dt = None
+ dec = None
+
+ @staticmethod
+ def check_dtype(x, res):
+ if issubclass(x.dtype.type, np.inexact):
+ assert_equal(res.dtype, x.real.dtype)
+ else:
+ # For integer input, don't have to test float precision of output.
+ assert_(issubclass(res.dtype.type, np.floating))
+
+
+class _TestNormGeneral(_TestNormBase):
+
+ def test_empty(self):
+ assert_equal(norm([]), 0.0)
+ assert_equal(norm(array([], dtype=self.dt)), 0.0)
+ assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
+
+ def test_vector_return_type(self):
+ a = np.array([1, 0, 1])
+
+ exact_types = np.typecodes['AllInteger']
+ inexact_types = np.typecodes['AllFloat']
+
+ all_types = exact_types + inexact_types
+
+ for each_type in all_types:
+ at = a.astype(each_type)
+
+ an = norm(at, -np.inf)
+ self.check_dtype(at, an)
+ assert_almost_equal(an, 0.0)
+
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "divide by zero encountered")
+ an = norm(at, -1)
+ self.check_dtype(at, an)
+ assert_almost_equal(an, 0.0)
+
+ an = norm(at, 0)
+ self.check_dtype(at, an)
+ assert_almost_equal(an, 2)
+
+ an = norm(at, 1)
+ self.check_dtype(at, an)
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 2)
+ self.check_dtype(at, an)
+ assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0))
+
+ an = norm(at, 4)
+ self.check_dtype(at, an)
+ assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0))
+
+ an = norm(at, np.inf)
+ self.check_dtype(at, an)
+ assert_almost_equal(an, 1.0)
+
+ def test_vector(self):
+ a = [1, 2, 3, 4]
+ b = [-1, -2, -3, -4]
+ c = [-1, 2, -3, 4]
+
+ def _test(v):
+ np.testing.assert_almost_equal(norm(v), 30 ** 0.5,
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, inf), 4.0,
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, -inf), 1.0,
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, 1), 10.0,
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25,
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5,
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5),
+ decimal=self.dec)
+ np.testing.assert_almost_equal(norm(v, 0), 4,
+ decimal=self.dec)
+
+ for v in (a, b, c,):
+ _test(v)
+
+ for v in (array(a, dtype=self.dt), array(b, dtype=self.dt),
+ array(c, dtype=self.dt)):
+ _test(v)
+
+ def test_axis(self):
+ # Vector norms.
+ # Compare the use of `axis` with computing the norm of each row
+ # or column separately.
+ A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
+ for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
+ expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])]
+ assert_almost_equal(norm(A, ord=order, axis=0), expected0)
+ expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])]
+ assert_almost_equal(norm(A, ord=order, axis=1), expected1)
+
+ # Matrix norms.
+ B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
+ nd = B.ndim
+ for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
+ for axis in itertools.combinations(range(-nd, nd), 2):
+ row_axis, col_axis = axis
+ if row_axis < 0:
+ row_axis += nd
+ if col_axis < 0:
+ col_axis += nd
+ if row_axis == col_axis:
+ assert_raises(ValueError, norm, B, ord=order, axis=axis)
+ else:
+ n = norm(B, ord=order, axis=axis)
+
+ # The logic using k_index only works for nd = 3.
+ # This has to be changed if nd is increased.
+ k_index = nd - (row_axis + col_axis)
+ if row_axis < col_axis:
+ expected = [norm(B[:].take(k, axis=k_index), ord=order)
+ for k in range(B.shape[k_index])]
+ else:
+ expected = [norm(B[:].take(k, axis=k_index).T, ord=order)
+ for k in range(B.shape[k_index])]
+ assert_almost_equal(n, expected)
+
+ def test_keepdims(self):
+ A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
+
+ allclose_err = 'order {0}, axis = {1}'
+ shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}'
+
+ # check the order=None, axis=None case
+ expected = norm(A, ord=None, axis=None)
+ found = norm(A, ord=None, axis=None, keepdims=True)
+ assert_allclose(np.squeeze(found), expected,
+ err_msg=allclose_err.format(None, None))
+ expected_shape = (1, 1, 1)
+ assert_(found.shape == expected_shape,
+ shape_err.format(found.shape, expected_shape, None, None))
+
+ # Vector norms.
+ for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
+ for k in range(A.ndim):
+ expected = norm(A, ord=order, axis=k)
+ found = norm(A, ord=order, axis=k, keepdims=True)
+ assert_allclose(np.squeeze(found), expected,
+ err_msg=allclose_err.format(order, k))
+ expected_shape = list(A.shape)
+ expected_shape[k] = 1
+ expected_shape = tuple(expected_shape)
+ assert_(found.shape == expected_shape,
+ shape_err.format(found.shape, expected_shape, order, k))
+
+ # Matrix norms.
+ for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']:
+ for k in itertools.permutations(range(A.ndim), 2):
+ expected = norm(A, ord=order, axis=k)
+ found = norm(A, ord=order, axis=k, keepdims=True)
+ assert_allclose(np.squeeze(found), expected,
+ err_msg=allclose_err.format(order, k))
+ expected_shape = list(A.shape)
+ expected_shape[k[0]] = 1
+ expected_shape[k[1]] = 1
+ expected_shape = tuple(expected_shape)
+ assert_(found.shape == expected_shape,
+ shape_err.format(found.shape, expected_shape, order, k))
+
+
+class _TestNorm2D(_TestNormBase):
+ # Define the part for 2d arrays separately, so we can subclass this
+ # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg.
+ array = np.array
+
+ def test_matrix_empty(self):
+ assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0)
+
+ def test_matrix_return_type(self):
+ a = self.array([[1, 0, 1], [0, 1, 1]])
+
+ exact_types = np.typecodes['AllInteger']
+
+ # float32, complex64, float64, complex128 types are the only types
+ # allowed by `linalg`, which performs the matrix operations used
+ # within `norm`.
+ inexact_types = 'fdFD'
+
+ all_types = exact_types + inexact_types
+
+ for each_type in all_types:
+ at = a.astype(each_type)
+
+ an = norm(at, -np.inf)
+ self.check_dtype(at, an)
+ assert_almost_equal(an, 2.0)
+
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning, "divide by zero encountered")
+ an = norm(at, -1)
+ self.check_dtype(at, an)
+ assert_almost_equal(an, 1.0)
+
+ an = norm(at, 1)
+ self.check_dtype(at, an)
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 2)
+ self.check_dtype(at, an)
+ assert_almost_equal(an, 3.0**(1.0/2.0))
+
+ an = norm(at, -2)
+ self.check_dtype(at, an)
+ assert_almost_equal(an, 1.0)
+
+ an = norm(at, np.inf)
+ self.check_dtype(at, an)
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 'fro')
+ self.check_dtype(at, an)
+ assert_almost_equal(an, 2.0)
+
+ an = norm(at, 'nuc')
+ self.check_dtype(at, an)
+ # Lower bar needed to support low precision floats.
+ # They end up being off by 1 in the 7th place.
+ np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6)
+
+ def test_matrix_2x2(self):
+ A = self.array([[1, 3], [5, 7]], dtype=self.dt)
+ assert_almost_equal(norm(A), 84 ** 0.5)
+ assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
+ assert_almost_equal(norm(A, 'nuc'), 10.0)
+ assert_almost_equal(norm(A, inf), 12.0)
+ assert_almost_equal(norm(A, -inf), 4.0)
+ assert_almost_equal(norm(A, 1), 10.0)
+ assert_almost_equal(norm(A, -1), 6.0)
+ assert_almost_equal(norm(A, 2), 9.1231056256176615)
+ assert_almost_equal(norm(A, -2), 0.87689437438234041)
+
+ assert_raises(ValueError, norm, A, 'nofro')
+ assert_raises(ValueError, norm, A, -3)
+ assert_raises(ValueError, norm, A, 0)
+
+ def test_matrix_3x3(self):
+ # This test has been added because the 2x2 example
+ # happened to have equal nuclear norm and induced 1-norm.
+ # The 1/10 scaling factor accommodates the absolute tolerance
+ # used in assert_almost_equal.
+ A = (1 / 10) * \
+ self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
+ assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
+ assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
+ assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
+ assert_almost_equal(norm(A, inf), 1.1)
+ assert_almost_equal(norm(A, -inf), 0.6)
+ assert_almost_equal(norm(A, 1), 1.0)
+ assert_almost_equal(norm(A, -1), 0.4)
+ assert_almost_equal(norm(A, 2), 0.88722940323461277)
+ assert_almost_equal(norm(A, -2), 0.19456584790481812)
+
+ def test_bad_args(self):
+ # Check that bad arguments raise the appropriate exceptions.
+
+ A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
+ B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
+
+ # Using `axis=<integer>` or passing in a 1-D array implies vector
+ # norms are being computed, so also using `ord='fro'`
+ # or `ord='nuc'` or any other string raises a ValueError.
+ assert_raises(ValueError, norm, A, 'fro', 0)
+ assert_raises(ValueError, norm, A, 'nuc', 0)
+ assert_raises(ValueError, norm, [3, 4], 'fro', None)
+ assert_raises(ValueError, norm, [3, 4], 'nuc', None)
+ assert_raises(ValueError, norm, [3, 4], 'test', None)
+
+ # Similarly, norm should raise an exception when ord is any finite
+ # number other than 1, 2, -1 or -2 when computing matrix norms.
+ for order in [0, 3]:
+ assert_raises(ValueError, norm, A, order, None)
+ assert_raises(ValueError, norm, A, order, (0, 1))
+ assert_raises(ValueError, norm, B, order, (1, 2))
+
+ # Invalid axis
+ assert_raises(np.AxisError, norm, B, None, 3)
+ assert_raises(np.AxisError, norm, B, None, (2, 3))
+ assert_raises(ValueError, norm, B, None, (0, 1, 2))
+
+
+class _TestNorm(_TestNorm2D, _TestNormGeneral):
+ pass
+
+
+class TestNorm_NonSystematic:
+
+ def test_longdouble_norm(self):
+ # Non-regression test: p-norm of longdouble would previously raise
+ # UnboundLocalError.
+ x = np.arange(10, dtype=np.longdouble)
+ old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2)
+
+ def test_intmin(self):
+ # Non-regression test: p-norm of signed integer would previously do
+ # float cast and abs in the wrong order.
+ x = np.array([-2 ** 31], dtype=np.int32)
+ old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5)
+
+ def test_complex_high_ord(self):
+ # gh-4156
+ d = np.empty((2,), dtype=np.clongdouble)
+ d[0] = 6 + 7j
+ d[1] = -6 + 7j
+ res = 11.615898132184
+ old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10)
+ d = d.astype(np.complex128)
+ old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9)
+ d = d.astype(np.complex64)
+ old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)
+
+
+# Separate definitions so we can use them for matrix tests.
+class _TestNormDoubleBase(_TestNormBase):
+ dt = np.double
+ dec = 12
+
+
+class _TestNormSingleBase(_TestNormBase):
+ dt = np.float32
+ dec = 6
+
+
+class _TestNormInt64Base(_TestNormBase):
+ dt = np.int64
+ dec = 12
+
+
+class TestNormDouble(_TestNorm, _TestNormDoubleBase):
+ pass
+
+
+class TestNormSingle(_TestNorm, _TestNormSingleBase):
+ pass
+
+
+class TestNormInt64(_TestNorm, _TestNormInt64Base):
+ pass
+
+
+class TestMatrixRank:
+
+ def test_matrix_rank(self):
+ # Full rank matrix
+ assert_equal(4, matrix_rank(np.eye(4)))
+ # rank deficient matrix
+ I = np.eye(4)
+ I[-1, -1] = 0.
+ assert_equal(matrix_rank(I), 3)
+ # All zeros - zero rank
+ assert_equal(matrix_rank(np.zeros((4, 4))), 0)
+ # 1 dimension - rank 1 unless all 0
+ assert_equal(matrix_rank([1, 0, 0, 0]), 1)
+ assert_equal(matrix_rank(np.zeros((4,))), 0)
+ # accepts array-like
+ assert_equal(matrix_rank([1]), 1)
+ # greater than 2 dimensions treated as stacked matrices
+ ms = np.array([I, np.eye(4), np.zeros((4,4))])
+ assert_equal(matrix_rank(ms), np.array([3, 4, 0]))
+ # works on scalar
+ assert_equal(matrix_rank(1), 1)
+
+ def test_symmetric_rank(self):
+ assert_equal(4, matrix_rank(np.eye(4), hermitian=True))
+ assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True))
+ assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True))
+ # rank deficient matrix
+ I = np.eye(4)
+ I[-1, -1] = 0.
+ assert_equal(3, matrix_rank(I, hermitian=True))
+ # manually supplied tolerance
+ I[-1, -1] = 1e-8
+ assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8))
+ assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8))
+
+
+def test_reduced_rank():
+ # Test matrices with reduced rank
+ rng = np.random.RandomState(20120714)
+ for i in range(100):
+ # Make a rank deficient matrix
+ X = rng.normal(size=(40, 10))
+ X[:, 0] = X[:, 1] + X[:, 2]
+ # Assert that matrix_rank detected deficiency
+ assert_equal(matrix_rank(X), 9)
+ X[:, 3] = X[:, 4] + X[:, 5]
+ assert_equal(matrix_rank(X), 8)
+
+
+class TestQR:
+ # Define the array class here, so run this on matrices elsewhere.
+ array = np.array
+
+ def check_qr(self, a):
+ # This test expects the argument `a` to be an ndarray or
+ # a subclass of an ndarray of inexact type.
+ a_type = type(a)
+ a_dtype = a.dtype
+ m, n = a.shape
+ k = min(m, n)
+
+ # mode == 'complete'
+ q, r = linalg.qr(a, mode='complete')
+ assert_(q.dtype == a_dtype)
+ assert_(r.dtype == a_dtype)
+ assert_(isinstance(q, a_type))
+ assert_(isinstance(r, a_type))
+ assert_(q.shape == (m, m))
+ assert_(r.shape == (m, n))
+ assert_almost_equal(dot(q, r), a)
+ assert_almost_equal(dot(q.T.conj(), q), np.eye(m))
+ assert_almost_equal(np.triu(r), r)
+
+ # mode == 'reduced'
+ q1, r1 = linalg.qr(a, mode='reduced')
+ assert_(q1.dtype == a_dtype)
+ assert_(r1.dtype == a_dtype)
+ assert_(isinstance(q1, a_type))
+ assert_(isinstance(r1, a_type))
+ assert_(q1.shape == (m, k))
+ assert_(r1.shape == (k, n))
+ assert_almost_equal(dot(q1, r1), a)
+ assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k))
+ assert_almost_equal(np.triu(r1), r1)
+
+ # mode == 'r'
+ r2 = linalg.qr(a, mode='r')
+ assert_(r2.dtype == a_dtype)
+ assert_(isinstance(r2, a_type))
+ assert_almost_equal(r2, r1)
+
+
+ @pytest.mark.parametrize(["m", "n"], [
+ (3, 0),
+ (0, 3),
+ (0, 0)
+ ])
+ def test_qr_empty(self, m, n):
+ k = min(m, n)
+ a = np.empty((m, n))
+
+ self.check_qr(a)
+
+ h, tau = np.linalg.qr(a, mode='raw')
+ assert_equal(h.dtype, np.double)
+ assert_equal(tau.dtype, np.double)
+ assert_equal(h.shape, (n, m))
+ assert_equal(tau.shape, (k,))
+
+ def test_mode_raw(self):
+ # The factorization is not unique and varies between libraries,
+ # so it is not possible to check against known values. Functional
+ # testing is a possibility, but awaits the exposure of more
+ # of the functions in lapack_lite. Consequently, this test is
+ # very limited in scope. Note that the results are in FORTRAN
+ # order, hence the h arrays are transposed.
+ a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
+
+ # Test double
+ h, tau = linalg.qr(a, mode='raw')
+ assert_(h.dtype == np.double)
+ assert_(tau.dtype == np.double)
+ assert_(h.shape == (2, 3))
+ assert_(tau.shape == (2,))
+
+ h, tau = linalg.qr(a.T, mode='raw')
+ assert_(h.dtype == np.double)
+ assert_(tau.dtype == np.double)
+ assert_(h.shape == (3, 2))
+ assert_(tau.shape == (2,))
+
+ def test_mode_all_but_economic(self):
+ a = self.array([[1, 2], [3, 4]])
+ b = self.array([[1, 2], [3, 4], [5, 6]])
+ for dt in "fd":
+ m1 = a.astype(dt)
+ m2 = b.astype(dt)
+ self.check_qr(m1)
+ self.check_qr(m2)
+ self.check_qr(m2.T)
+
+ for dt in "fd":
+ m1 = 1 + 1j * a.astype(dt)
+ m2 = 1 + 1j * b.astype(dt)
+ self.check_qr(m1)
+ self.check_qr(m2)
+ self.check_qr(m2.T)
+
+ def check_qr_stacked(self, a):
+ # This test expects the argument `a` to be an ndarray or
+ # a subclass of an ndarray of inexact type.
+ a_type = type(a)
+ a_dtype = a.dtype
+ m, n = a.shape[-2:]
+ k = min(m, n)
+
+ # mode == 'complete'
+ q, r = linalg.qr(a, mode='complete')
+ assert_(q.dtype == a_dtype)
+ assert_(r.dtype == a_dtype)
+ assert_(isinstance(q, a_type))
+ assert_(isinstance(r, a_type))
+ assert_(q.shape[-2:] == (m, m))
+ assert_(r.shape[-2:] == (m, n))
+ assert_almost_equal(matmul(q, r), a)
+ I_mat = np.identity(q.shape[-1])
+ stack_I_mat = np.broadcast_to(I_mat,
+ q.shape[:-2] + (q.shape[-1],)*2)
+ assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat)
+ assert_almost_equal(np.triu(r[..., :, :]), r)
+
+ # mode == 'reduced'
+ q1, r1 = linalg.qr(a, mode='reduced')
+ assert_(q1.dtype == a_dtype)
+ assert_(r1.dtype == a_dtype)
+ assert_(isinstance(q1, a_type))
+ assert_(isinstance(r1, a_type))
+ assert_(q1.shape[-2:] == (m, k))
+ assert_(r1.shape[-2:] == (k, n))
+ assert_almost_equal(matmul(q1, r1), a)
+ I_mat = np.identity(q1.shape[-1])
+ stack_I_mat = np.broadcast_to(I_mat,
+ q1.shape[:-2] + (q1.shape[-1],)*2)
+ assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1),
+ stack_I_mat)
+ assert_almost_equal(np.triu(r1[..., :, :]), r1)
+
+ # mode == 'r'
+ r2 = linalg.qr(a, mode='r')
+ assert_(r2.dtype == a_dtype)
+ assert_(isinstance(r2, a_type))
+ assert_almost_equal(r2, r1)
+
+ @pytest.mark.parametrize("size", [
+ (3, 4), (4, 3), (4, 4),
+ (3, 0), (0, 3)])
+ @pytest.mark.parametrize("outer_size", [
+ (2, 2), (2,), (2, 3, 4)])
+ @pytest.mark.parametrize("dt", [
+ np.single, np.double,
+ np.csingle, np.cdouble])
+ def test_stacked_inputs(self, outer_size, size, dt):
+
+ A = np.random.normal(size=outer_size + size).astype(dt)
+ B = np.random.normal(size=outer_size + size).astype(dt)
+ self.check_qr_stacked(A)
+ self.check_qr_stacked(A + 1.j*B)
+
+
+class TestCholesky:
+ # TODO: are there no other tests for cholesky?
+
+ @pytest.mark.parametrize(
+ 'shape', [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
+ )
+ @pytest.mark.parametrize(
+ 'dtype', (np.float32, np.float64, np.complex64, np.complex128)
+ )
+ def test_basic_property(self, shape, dtype):
+ # Check A = L L^H
+ np.random.seed(1)
+ a = np.random.randn(*shape)
+ if np.issubdtype(dtype, np.complexfloating):
+ a = a + 1j*np.random.randn(*shape)
+
+ t = list(range(len(shape)))
+ t[-2:] = -1, -2
+
+ a = np.matmul(a.transpose(t).conj(), a)
+ a = np.asarray(a, dtype=dtype)
+
+ c = np.linalg.cholesky(a)
+
+ b = np.matmul(c, c.transpose(t).conj())
+ with np._no_nep50_warning():
+ atol = 500 * a.shape[0] * np.finfo(dtype).eps
+ assert_allclose(b, a, atol=atol, err_msg=f'{shape} {dtype}\n{a}\n{c}')
+
+ def test_0_size(self):
+ class ArraySubclass(np.ndarray):
+ pass
+ a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
+ res = linalg.cholesky(a)
+ assert_equal(a.shape, res.shape)
+ assert_(res.dtype.type is np.float64)
+ # for documentation purpose:
+ assert_(isinstance(res, np.ndarray))
+
+ a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass)
+ res = linalg.cholesky(a)
+ assert_equal(a.shape, res.shape)
+ assert_(res.dtype.type is np.complex64)
+ assert_(isinstance(res, np.ndarray))
+
+
+def test_byteorder_check():
+ # Byte order check should pass for native order
+ if sys.byteorder == 'little':
+ native = '<'
+ else:
+ native = '>'
+
+ for dtt in (np.float32, np.float64):
+ arr = np.eye(4, dtype=dtt)
+ n_arr = arr.newbyteorder(native)
+ sw_arr = arr.newbyteorder('S').byteswap()
+ assert_equal(arr.dtype.byteorder, '=')
+ for routine in (linalg.inv, linalg.det, linalg.pinv):
+ # Normal call
+ res = routine(arr)
+ # Native but not '='
+ assert_array_equal(res, routine(n_arr))
+ # Swapped
+ assert_array_equal(res, routine(sw_arr))
+
+
+@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+def test_generalized_raise_multiloop():
+ # It should raise an error even if the error doesn't occur in the
+ # last iteration of the ufunc inner loop
+
+ invertible = np.array([[1, 2], [3, 4]])
+ non_invertible = np.array([[1, 1], [1, 1]])
+
+ x = np.zeros([4, 4, 2, 2])[1::2]
+ x[...] = invertible
+ x[0, 0] = non_invertible
+
+ assert_raises(np.linalg.LinAlgError, np.linalg.inv, x)
+
+
+def test_xerbla_override():
+ # Check that our xerbla has been successfully linked in. If it is not,
+ # the default xerbla routine is called, which prints a message to stdout
+ # and may, or may not, abort the process depending on the LAPACK package.
+
+ XERBLA_OK = 255
+
+ try:
+ pid = os.fork()
+ except (OSError, AttributeError):
+ # fork failed, or not running on POSIX
+ pytest.skip("Not POSIX or fork failed.")
+
+ if pid == 0:
+ # child; close i/o file handles
+ os.close(1)
+ os.close(0)
+ # Avoid producing core files.
+ import resource
+ resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
+ # These calls may abort.
+ try:
+ np.linalg.lapack_lite.xerbla()
+ except ValueError:
+ pass
+ except Exception:
+ os._exit(os.EX_CONFIG)
+
+ try:
+ a = np.array([[1.]])
+ np.linalg.lapack_lite.dorgqr(
+ 1, 1, 1, a,
+ 0, # <- invalid value
+ a, a, 0, 0)
+ except ValueError as e:
+ if "DORGQR parameter number 5" in str(e):
+ # success, reuse error code to mark success as
+ # FORTRAN STOP returns as success.
+ os._exit(XERBLA_OK)
+
+ # Did not abort, but our xerbla was not linked in.
+ os._exit(os.EX_CONFIG)
+ else:
+ # parent
+ pid, status = os.wait()
+ if os.WEXITSTATUS(status) != XERBLA_OK:
+ pytest.skip('Numpy xerbla not linked in.')
+
+
+@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
+@pytest.mark.slow
+def test_sdot_bug_8577():
+ # Regression test that loading certain other libraries does not
+ # result to wrong results in float32 linear algebra.
+ #
+ # There's a bug gh-8577 on OSX that can trigger this, and perhaps
+ # there are also other situations in which it occurs.
+ #
+ # Do the check in a separate process.
+
+ bad_libs = ['PyQt5.QtWidgets', 'IPython']
+
+ template = textwrap.dedent("""
+ import sys
+ {before}
+ try:
+ import {bad_lib}
+ except ImportError:
+ sys.exit(0)
+ {after}
+ x = np.ones(2, dtype=np.float32)
+ sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1)
+ """)
+
+ for bad_lib in bad_libs:
+ code = template.format(before="import numpy as np", after="",
+ bad_lib=bad_lib)
+ subprocess.check_call([sys.executable, "-c", code])
+
+ # Swapped import order
+ code = template.format(after="import numpy as np", before="",
+ bad_lib=bad_lib)
+ subprocess.check_call([sys.executable, "-c", code])
+
+
+class TestMultiDot:
+
+ def test_basic_function_with_three_arguments(self):
+ # multi_dot with three arguments uses a fast hand coded algorithm to
+ # determine the optimal order. Therefore test it separately.
+ A = np.random.random((6, 2))
+ B = np.random.random((2, 6))
+ C = np.random.random((6, 2))
+
+ assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C))
+ assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C)))
+
+ def test_basic_function_with_two_arguments(self):
+ # separate code path with two arguments
+ A = np.random.random((6, 2))
+ B = np.random.random((2, 6))
+
+ assert_almost_equal(multi_dot([A, B]), A.dot(B))
+ assert_almost_equal(multi_dot([A, B]), np.dot(A, B))
+
+ def test_basic_function_with_dynamic_programming_optimization(self):
+ # multi_dot with four or more arguments uses the dynamic programming
+ # optimization and therefore deserve a separate
+ A = np.random.random((6, 2))
+ B = np.random.random((2, 6))
+ C = np.random.random((6, 2))
+ D = np.random.random((2, 1))
+ assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D))
+
+ def test_vector_as_first_argument(self):
+ # The first argument can be 1-D
+ A1d = np.random.random(2) # 1-D
+ B = np.random.random((2, 6))
+ C = np.random.random((6, 2))
+ D = np.random.random((2, 2))
+
+ # the result should be 1-D
+ assert_equal(multi_dot([A1d, B, C, D]).shape, (2,))
+
+ def test_vector_as_last_argument(self):
+ # The last argument can be 1-D
+ A = np.random.random((6, 2))
+ B = np.random.random((2, 6))
+ C = np.random.random((6, 2))
+ D1d = np.random.random(2) # 1-D
+
+ # the result should be 1-D
+ assert_equal(multi_dot([A, B, C, D1d]).shape, (6,))
+
+ def test_vector_as_first_and_last_argument(self):
+ # The first and last arguments can be 1-D
+ A1d = np.random.random(2) # 1-D
+ B = np.random.random((2, 6))
+ C = np.random.random((6, 2))
+ D1d = np.random.random(2) # 1-D
+
+ # the result should be a scalar
+ assert_equal(multi_dot([A1d, B, C, D1d]).shape, ())
+
+ def test_three_arguments_and_out(self):
+ # multi_dot with three arguments uses a fast hand coded algorithm to
+ # determine the optimal order. Therefore test it separately.
+ A = np.random.random((6, 2))
+ B = np.random.random((2, 6))
+ C = np.random.random((6, 2))
+
+ out = np.zeros((6, 2))
+ ret = multi_dot([A, B, C], out=out)
+ assert out is ret
+ assert_almost_equal(out, A.dot(B).dot(C))
+ assert_almost_equal(out, np.dot(A, np.dot(B, C)))
+
+ def test_two_arguments_and_out(self):
+ # separate code path with two arguments
+ A = np.random.random((6, 2))
+ B = np.random.random((2, 6))
+ out = np.zeros((6, 6))
+ ret = multi_dot([A, B], out=out)
+ assert out is ret
+ assert_almost_equal(out, A.dot(B))
+ assert_almost_equal(out, np.dot(A, B))
+
+ def test_dynamic_programming_optimization_and_out(self):
+ # multi_dot with four or more arguments uses the dynamic programming
+ # optimization and therefore deserve a separate test
+ A = np.random.random((6, 2))
+ B = np.random.random((2, 6))
+ C = np.random.random((6, 2))
+ D = np.random.random((2, 1))
+ out = np.zeros((6, 1))
+ ret = multi_dot([A, B, C, D], out=out)
+ assert out is ret
+ assert_almost_equal(out, A.dot(B).dot(C).dot(D))
+
+ def test_dynamic_programming_logic(self):
+ # Test for the dynamic programming part
+ # This test is directly taken from Cormen page 376.
+ arrays = [np.random.random((30, 35)),
+ np.random.random((35, 15)),
+ np.random.random((15, 5)),
+ np.random.random((5, 10)),
+ np.random.random((10, 20)),
+ np.random.random((20, 25))]
+ m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.],
+ [0., 0., 2625., 4375., 7125., 10500.],
+ [0., 0., 0., 750., 2500., 5375.],
+ [0., 0., 0., 0., 1000., 3500.],
+ [0., 0., 0., 0., 0., 5000.],
+ [0., 0., 0., 0., 0., 0.]])
+ s_expected = np.array([[0, 1, 1, 3, 3, 3],
+ [0, 0, 2, 3, 3, 3],
+ [0, 0, 0, 3, 3, 3],
+ [0, 0, 0, 0, 4, 5],
+ [0, 0, 0, 0, 0, 5],
+ [0, 0, 0, 0, 0, 0]], dtype=int)
+ s_expected -= 1 # Cormen uses 1-based index, python does not.
+
+ s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True)
+
+ # Only the upper triangular part (without the diagonal) is interesting.
+ assert_almost_equal(np.triu(s[:-1, 1:]),
+ np.triu(s_expected[:-1, 1:]))
+ assert_almost_equal(np.triu(m), np.triu(m_expected))
+
+ def test_too_few_input_arrays(self):
+ assert_raises(ValueError, multi_dot, [])
+ assert_raises(ValueError, multi_dot, [np.random.random((3, 3))])
+
+
+class TestTensorinv:
+
+ @pytest.mark.parametrize("arr, ind", [
+ (np.ones((4, 6, 8, 2)), 2),
+ (np.ones((3, 3, 2)), 1),
+ ])
+ def test_non_square_handling(self, arr, ind):
+ with assert_raises(LinAlgError):
+ linalg.tensorinv(arr, ind=ind)
+
+ @pytest.mark.parametrize("shape, ind", [
+ # examples from docstring
+ ((4, 6, 8, 3), 2),
+ ((24, 8, 3), 1),
+ ])
+ def test_tensorinv_shape(self, shape, ind):
+ a = np.eye(24)
+ a.shape = shape
+ ainv = linalg.tensorinv(a=a, ind=ind)
+ expected = a.shape[ind:] + a.shape[:ind]
+ actual = ainv.shape
+ assert_equal(actual, expected)
+
+ @pytest.mark.parametrize("ind", [
+ 0, -2,
+ ])
+ def test_tensorinv_ind_limit(self, ind):
+ a = np.eye(24)
+ a.shape = (4, 6, 8, 3)
+ with assert_raises(ValueError):
+ linalg.tensorinv(a=a, ind=ind)
+
+ def test_tensorinv_result(self):
+ # mimic a docstring example
+ a = np.eye(24)
+ a.shape = (24, 8, 3)
+ ainv = linalg.tensorinv(a, ind=1)
+ b = np.ones(24)
+ assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
+
+
+class TestTensorsolve:
+
+ @pytest.mark.parametrize("a, axes", [
+ (np.ones((4, 6, 8, 2)), None),
+ (np.ones((3, 3, 2)), (0, 2)),
+ ])
+ def test_non_square_handling(self, a, axes):
+ with assert_raises(LinAlgError):
+ b = np.ones(a.shape[:2])
+ linalg.tensorsolve(a, b, axes=axes)
+
+ @pytest.mark.parametrize("shape",
+ [(2, 3, 6), (3, 4, 4, 3), (0, 3, 3, 0)],
+ )
+ def test_tensorsolve_result(self, shape):
+ a = np.random.randn(*shape)
+ b = np.ones(a.shape[:2])
+ x = np.linalg.tensorsolve(a, b)
+ assert_allclose(np.tensordot(a, x, axes=len(x.shape)), b)
+
+
+def test_unsupported_commontype():
+ # linalg gracefully handles unsupported type
+ arr = np.array([[1, -2], [2, 5]], dtype='float16')
+ with assert_raises_regex(TypeError, "unsupported in linalg"):
+ linalg.cholesky(arr)
+
+
+#@pytest.mark.slow
+#@pytest.mark.xfail(not HAS_LAPACK64, run=False,
+# reason="Numpy not compiled with 64-bit BLAS/LAPACK")
+#@requires_memory(free_bytes=16e9)
+@pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing")
+def test_blas64_dot():
+ n = 2**32
+ a = np.zeros([1, n], dtype=np.float32)
+ b = np.ones([1, 1], dtype=np.float32)
+ a[0,-1] = 1
+ c = np.dot(b, a)
+ assert_equal(c[0,-1], 1)
+
+
+@pytest.mark.xfail(not HAS_LAPACK64,
+ reason="Numpy not compiled with 64-bit BLAS/LAPACK")
+def test_blas64_geqrf_lwork_smoketest():
+ # Smoke test LAPACK geqrf lwork call with 64-bit integers
+ dtype = np.float64
+ lapack_routine = np.linalg.lapack_lite.dgeqrf
+
+ m = 2**32 + 1
+ n = 2**32 + 1
+ lda = m
+
+ # Dummy arrays, not referenced by the lapack routine, so don't
+ # need to be of the right size
+ a = np.zeros([1, 1], dtype=dtype)
+ work = np.zeros([1], dtype=dtype)
+ tau = np.zeros([1], dtype=dtype)
+
+ # Size query
+ results = lapack_routine(m, n, a, lda, tau, work, -1, 0)
+ assert_equal(results['info'], 0)
+ assert_equal(results['m'], m)
+ assert_equal(results['n'], m)
+
+ # Should result to an integer of a reasonable size
+ lwork = int(work.item())
+ assert_(2**32 < lwork < 2**42)
diff --git a/venv/lib/python3.9/site-packages/numpy/linalg/tests/test_regression.py b/venv/lib/python3.9/site-packages/numpy/linalg/tests/test_regression.py
new file mode 100644
index 00000000..7ed932bc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/linalg/tests/test_regression.py
@@ -0,0 +1,148 @@
+""" Test functions for linalg module
+"""
+import warnings
+
+import numpy as np
+from numpy import linalg, arange, float64, array, dot, transpose
+from numpy.testing import (
+ assert_, assert_raises, assert_equal, assert_array_equal,
+ assert_array_almost_equal, assert_array_less
+)
+
+
+class TestRegression:
+
+ def test_eig_build(self):
+ # Ticket #652
+ rva = array([1.03221168e+02 + 0.j,
+ -1.91843603e+01 + 0.j,
+ -6.04004526e-01 + 15.84422474j,
+ -6.04004526e-01 - 15.84422474j,
+ -1.13692929e+01 + 0.j,
+ -6.57612485e-01 + 10.41755503j,
+ -6.57612485e-01 - 10.41755503j,
+ 1.82126812e+01 + 0.j,
+ 1.06011014e+01 + 0.j,
+ 7.80732773e+00 + 0.j,
+ -7.65390898e-01 + 0.j,
+ 1.51971555e-15 + 0.j,
+ -1.51308713e-15 + 0.j])
+ a = arange(13 * 13, dtype=float64)
+ a.shape = (13, 13)
+ a = a % 17
+ va, ve = linalg.eig(a)
+ va.sort()
+ rva.sort()
+ assert_array_almost_equal(va, rva)
+
+ def test_eigh_build(self):
+ # Ticket 662.
+ rvals = [68.60568999, 89.57756725, 106.67185574]
+
+ cov = array([[77.70273908, 3.51489954, 15.64602427],
+ [3.51489954, 88.97013878, -1.07431931],
+ [15.64602427, -1.07431931, 98.18223512]])
+
+ vals, vecs = linalg.eigh(cov)
+ assert_array_almost_equal(vals, rvals)
+
+ def test_svd_build(self):
+ # Ticket 627.
+ a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
+ m, n = a.shape
+ u, s, vh = linalg.svd(a)
+
+ b = dot(transpose(u[:, n:]), a)
+
+ assert_array_almost_equal(b, np.zeros((2, 2)))
+
+ def test_norm_vector_badarg(self):
+ # Regression for #786: Frobenius norm for vectors raises
+ # ValueError.
+ assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')
+
+ def test_lapack_endian(self):
+ # For bug #1482
+ a = array([[5.7998084, -2.1825367],
+ [-2.1825367, 9.85910595]], dtype='>f8')
+ b = array(a, dtype='<f8')
+
+ ap = linalg.cholesky(a)
+ bp = linalg.cholesky(b)
+ assert_array_equal(ap, bp)
+
+ def test_large_svd_32bit(self):
+ # See gh-4442, 64bit would require very large/slow matrices.
+ x = np.eye(1000, 66)
+ np.linalg.svd(x)
+
+ def test_svd_no_uv(self):
+ # gh-4733
+ for shape in (3, 4), (4, 4), (4, 3):
+ for t in float, complex:
+ a = np.ones(shape, dtype=t)
+ w = linalg.svd(a, compute_uv=False)
+ c = np.count_nonzero(np.absolute(w) > 0.5)
+ assert_equal(c, 1)
+ assert_equal(np.linalg.matrix_rank(a), 1)
+ assert_array_less(1, np.linalg.norm(a, ord=2))
+
+ def test_norm_object_array(self):
+ # gh-7575
+ testvector = np.array([np.array([0, 1]), 0, 0], dtype=object)
+
+ norm = linalg.norm(testvector)
+ assert_array_equal(norm, [0, 1])
+ assert_(norm.dtype == np.dtype('float64'))
+
+ norm = linalg.norm(testvector, ord=1)
+ assert_array_equal(norm, [0, 1])
+ assert_(norm.dtype != np.dtype('float64'))
+
+ norm = linalg.norm(testvector, ord=2)
+ assert_array_equal(norm, [0, 1])
+ assert_(norm.dtype == np.dtype('float64'))
+
+ assert_raises(ValueError, linalg.norm, testvector, ord='fro')
+ assert_raises(ValueError, linalg.norm, testvector, ord='nuc')
+ assert_raises(ValueError, linalg.norm, testvector, ord=np.inf)
+ assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf)
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+ assert_raises((AttributeError, DeprecationWarning),
+ linalg.norm, testvector, ord=0)
+ assert_raises(ValueError, linalg.norm, testvector, ord=-1)
+ assert_raises(ValueError, linalg.norm, testvector, ord=-2)
+
+ testmatrix = np.array([[np.array([0, 1]), 0, 0],
+ [0, 0, 0]], dtype=object)
+
+ norm = linalg.norm(testmatrix)
+ assert_array_equal(norm, [0, 1])
+ assert_(norm.dtype == np.dtype('float64'))
+
+ norm = linalg.norm(testmatrix, ord='fro')
+ assert_array_equal(norm, [0, 1])
+ assert_(norm.dtype == np.dtype('float64'))
+
+ assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc')
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=0)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=1)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=-1)
+ assert_raises(TypeError, linalg.norm, testmatrix, ord=2)
+ assert_raises(TypeError, linalg.norm, testmatrix, ord=-2)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=3)
+
+ def test_lstsq_complex_larger_rhs(self):
+ # gh-9891
+ size = 20
+ n_rhs = 70
+ G = np.random.randn(size, size) + 1j * np.random.randn(size, size)
+ u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs)
+ b = G.dot(u)
+ # This should work without segmentation fault.
+ u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None)
+ # check results just in case
+ assert_array_almost_equal(u_lstsq, u)
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/__init__.py b/venv/lib/python3.9/site-packages/numpy/ma/__init__.py
new file mode 100644
index 00000000..870cc4ef
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/__init__.py
@@ -0,0 +1,54 @@
+"""
+=============
+Masked Arrays
+=============
+
+Arrays sometimes contain invalid or missing data. When doing operations
+on such arrays, we wish to suppress invalid values, which is the purpose masked
+arrays fulfill (an example of typical use is given below).
+
+For example, examine the following array:
+
+>>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan])
+
+When we try to calculate the mean of the data, the result is undetermined:
+
+>>> np.mean(x)
+nan
+
+The mean is calculated using roughly ``np.sum(x)/len(x)``, but since
+any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter
+masked arrays:
+
+>>> m = np.ma.masked_array(x, np.isnan(x))
+>>> m
+masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --],
+ mask = [False False False True False False False True],
+ fill_value=1e+20)
+
+Here, we construct a masked array that suppress all ``NaN`` values. We
+may now proceed to calculate the mean of the other values:
+
+>>> np.mean(m)
+2.6666666666666665
+
+.. [1] Not-a-Number, a floating point value that is the result of an
+ invalid operation.
+
+.. moduleauthor:: Pierre Gerard-Marchant
+.. moduleauthor:: Jarrod Millman
+
+"""
+from . import core
+from .core import *
+
+from . import extras
+from .extras import *
+
+__all__ = ['core', 'extras']
+__all__ += core.__all__
+__all__ += extras.__all__
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/__init__.pyi b/venv/lib/python3.9/site-packages/numpy/ma/__init__.pyi
new file mode 100644
index 00000000..7f5cb56a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/__init__.pyi
@@ -0,0 +1,235 @@
+from numpy._pytesttester import PytestTester
+
+from numpy.ma import extras as extras
+
+from numpy.ma.core import (
+ MAError as MAError,
+ MaskError as MaskError,
+ MaskType as MaskType,
+ MaskedArray as MaskedArray,
+ abs as abs,
+ absolute as absolute,
+ add as add,
+ all as all,
+ allclose as allclose,
+ allequal as allequal,
+ alltrue as alltrue,
+ amax as amax,
+ amin as amin,
+ angle as angle,
+ anom as anom,
+ anomalies as anomalies,
+ any as any,
+ append as append,
+ arange as arange,
+ arccos as arccos,
+ arccosh as arccosh,
+ arcsin as arcsin,
+ arcsinh as arcsinh,
+ arctan as arctan,
+ arctan2 as arctan2,
+ arctanh as arctanh,
+ argmax as argmax,
+ argmin as argmin,
+ argsort as argsort,
+ around as around,
+ array as array,
+ asanyarray as asanyarray,
+ asarray as asarray,
+ bitwise_and as bitwise_and,
+ bitwise_or as bitwise_or,
+ bitwise_xor as bitwise_xor,
+ bool_ as bool_,
+ ceil as ceil,
+ choose as choose,
+ clip as clip,
+ common_fill_value as common_fill_value,
+ compress as compress,
+ compressed as compressed,
+ concatenate as concatenate,
+ conjugate as conjugate,
+ convolve as convolve,
+ copy as copy,
+ correlate as correlate,
+ cos as cos,
+ cosh as cosh,
+ count as count,
+ cumprod as cumprod,
+ cumsum as cumsum,
+ default_fill_value as default_fill_value,
+ diag as diag,
+ diagonal as diagonal,
+ diff as diff,
+ divide as divide,
+ empty as empty,
+ empty_like as empty_like,
+ equal as equal,
+ exp as exp,
+ expand_dims as expand_dims,
+ fabs as fabs,
+ filled as filled,
+ fix_invalid as fix_invalid,
+ flatten_mask as flatten_mask,
+ flatten_structured_array as flatten_structured_array,
+ floor as floor,
+ floor_divide as floor_divide,
+ fmod as fmod,
+ frombuffer as frombuffer,
+ fromflex as fromflex,
+ fromfunction as fromfunction,
+ getdata as getdata,
+ getmask as getmask,
+ getmaskarray as getmaskarray,
+ greater as greater,
+ greater_equal as greater_equal,
+ harden_mask as harden_mask,
+ hypot as hypot,
+ identity as identity,
+ ids as ids,
+ indices as indices,
+ inner as inner,
+ innerproduct as innerproduct,
+ isMA as isMA,
+ isMaskedArray as isMaskedArray,
+ is_mask as is_mask,
+ is_masked as is_masked,
+ isarray as isarray,
+ left_shift as left_shift,
+ less as less,
+ less_equal as less_equal,
+ log as log,
+ log10 as log10,
+ log2 as log2,
+ logical_and as logical_and,
+ logical_not as logical_not,
+ logical_or as logical_or,
+ logical_xor as logical_xor,
+ make_mask as make_mask,
+ make_mask_descr as make_mask_descr,
+ make_mask_none as make_mask_none,
+ mask_or as mask_or,
+ masked as masked,
+ masked_array as masked_array,
+ masked_equal as masked_equal,
+ masked_greater as masked_greater,
+ masked_greater_equal as masked_greater_equal,
+ masked_inside as masked_inside,
+ masked_invalid as masked_invalid,
+ masked_less as masked_less,
+ masked_less_equal as masked_less_equal,
+ masked_not_equal as masked_not_equal,
+ masked_object as masked_object,
+ masked_outside as masked_outside,
+ masked_print_option as masked_print_option,
+ masked_singleton as masked_singleton,
+ masked_values as masked_values,
+ masked_where as masked_where,
+ max as max,
+ maximum as maximum,
+ maximum_fill_value as maximum_fill_value,
+ mean as mean,
+ min as min,
+ minimum as minimum,
+ minimum_fill_value as minimum_fill_value,
+ mod as mod,
+ multiply as multiply,
+ mvoid as mvoid,
+ ndim as ndim,
+ negative as negative,
+ nomask as nomask,
+ nonzero as nonzero,
+ not_equal as not_equal,
+ ones as ones,
+ outer as outer,
+ outerproduct as outerproduct,
+ power as power,
+ prod as prod,
+ product as product,
+ ptp as ptp,
+ put as put,
+ putmask as putmask,
+ ravel as ravel,
+ remainder as remainder,
+ repeat as repeat,
+ reshape as reshape,
+ resize as resize,
+ right_shift as right_shift,
+ round as round,
+ round_ as round_,
+ set_fill_value as set_fill_value,
+ shape as shape,
+ sin as sin,
+ sinh as sinh,
+ size as size,
+ soften_mask as soften_mask,
+ sometrue as sometrue,
+ sort as sort,
+ sqrt as sqrt,
+ squeeze as squeeze,
+ std as std,
+ subtract as subtract,
+ sum as sum,
+ swapaxes as swapaxes,
+ take as take,
+ tan as tan,
+ tanh as tanh,
+ trace as trace,
+ transpose as transpose,
+ true_divide as true_divide,
+ var as var,
+ where as where,
+ zeros as zeros,
+)
+
+from numpy.ma.extras import (
+ apply_along_axis as apply_along_axis,
+ apply_over_axes as apply_over_axes,
+ atleast_1d as atleast_1d,
+ atleast_2d as atleast_2d,
+ atleast_3d as atleast_3d,
+ average as average,
+ clump_masked as clump_masked,
+ clump_unmasked as clump_unmasked,
+ column_stack as column_stack,
+ compress_cols as compress_cols,
+ compress_nd as compress_nd,
+ compress_rowcols as compress_rowcols,
+ compress_rows as compress_rows,
+ count_masked as count_masked,
+ corrcoef as corrcoef,
+ cov as cov,
+ diagflat as diagflat,
+ dot as dot,
+ dstack as dstack,
+ ediff1d as ediff1d,
+ flatnotmasked_contiguous as flatnotmasked_contiguous,
+ flatnotmasked_edges as flatnotmasked_edges,
+ hsplit as hsplit,
+ hstack as hstack,
+ isin as isin,
+ in1d as in1d,
+ intersect1d as intersect1d,
+ mask_cols as mask_cols,
+ mask_rowcols as mask_rowcols,
+ mask_rows as mask_rows,
+ masked_all as masked_all,
+ masked_all_like as masked_all_like,
+ median as median,
+ mr_ as mr_,
+ ndenumerate as ndenumerate,
+ notmasked_contiguous as notmasked_contiguous,
+ notmasked_edges as notmasked_edges,
+ polyfit as polyfit,
+ row_stack as row_stack,
+ setdiff1d as setdiff1d,
+ setxor1d as setxor1d,
+ stack as stack,
+ unique as unique,
+ union1d as union1d,
+ vander as vander,
+ vstack as vstack,
+)
+
+__all__: list[str]
+__path__: list[str]
+test: PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/bench.py b/venv/lib/python3.9/site-packages/numpy/ma/bench.py
new file mode 100644
index 00000000..56865683
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/bench.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python3
+
+import timeit
+import numpy
+
+
+###############################################################################
+# Global variables #
+###############################################################################
+
+
+# Small arrays
+xs = numpy.random.uniform(-1, 1, 6).reshape(2, 3)
+ys = numpy.random.uniform(-1, 1, 6).reshape(2, 3)
+zs = xs + 1j * ys
+m1 = [[True, False, False], [False, False, True]]
+m2 = [[True, False, True], [False, False, True]]
+nmxs = numpy.ma.array(xs, mask=m1)
+nmys = numpy.ma.array(ys, mask=m2)
+nmzs = numpy.ma.array(zs, mask=m1)
+
+# Big arrays
+xl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100)
+yl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100)
+zl = xl + 1j * yl
+maskx = xl > 0.8
+masky = yl < -0.8
+nmxl = numpy.ma.array(xl, mask=maskx)
+nmyl = numpy.ma.array(yl, mask=masky)
+nmzl = numpy.ma.array(zl, mask=maskx)
+
+
+###############################################################################
+# Functions #
+###############################################################################
+
+
+def timer(s, v='', nloop=500, nrep=3):
+ units = ["s", "ms", "µs", "ns"]
+ scaling = [1, 1e3, 1e6, 1e9]
+ print("%s : %-50s : " % (v, s), end=' ')
+ varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz']
+ setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames)
+ Timer = timeit.Timer(stmt=s, setup=setup)
+ best = min(Timer.repeat(nrep, nloop)) / nloop
+ if best > 0.0:
+ order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3)
+ else:
+ order = 3
+ print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep,
+ 3,
+ best * scaling[order],
+ units[order]))
+
+
+def compare_functions_1v(func, nloop=500,
+ xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl):
+ funcname = func.__name__
+ print("-"*50)
+ print(f'{funcname} on small arrays')
+ module, data = "numpy.ma", "nmxs"
+ timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
+
+ print("%s on large arrays" % funcname)
+ module, data = "numpy.ma", "nmxl"
+ timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
+ return
+
+def compare_methods(methodname, args, vars='x', nloop=500, test=True,
+ xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl):
+ print("-"*50)
+ print(f'{methodname} on small arrays')
+ data, ver = f'nm{vars}l', 'numpy.ma'
+ timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop)
+
+ print("%s on large arrays" % methodname)
+ data, ver = "nm%sl" % vars, 'numpy.ma'
+ timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop)
+ return
+
+def compare_functions_2v(func, nloop=500, test=True,
+ xs=xs, nmxs=nmxs,
+ ys=ys, nmys=nmys,
+ xl=xl, nmxl=nmxl,
+ yl=yl, nmyl=nmyl):
+ funcname = func.__name__
+ print("-"*50)
+ print(f'{funcname} on small arrays')
+ module, data = "numpy.ma", "nmxs,nmys"
+ timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
+
+ print(f'{funcname} on large arrays')
+ module, data = "numpy.ma", "nmxl,nmyl"
+ timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
+ return
+
+
+if __name__ == '__main__':
+ compare_functions_1v(numpy.sin)
+ compare_functions_1v(numpy.log)
+ compare_functions_1v(numpy.sqrt)
+
+ compare_functions_2v(numpy.multiply)
+ compare_functions_2v(numpy.divide)
+ compare_functions_2v(numpy.power)
+
+ compare_methods('ravel', '', nloop=1000)
+ compare_methods('conjugate', '', 'z', nloop=1000)
+ compare_methods('transpose', '', nloop=1000)
+ compare_methods('compressed', '', nloop=1000)
+ compare_methods('__getitem__', '0', nloop=1000)
+ compare_methods('__getitem__', '(0,0)', nloop=1000)
+ compare_methods('__getitem__', '[0,-1]', nloop=1000)
+ compare_methods('__setitem__', '0, 17', nloop=1000, test=False)
+ compare_methods('__setitem__', '(0,0), 17', nloop=1000, test=False)
+
+ print("-"*50)
+ print("__setitem__ on small arrays")
+ timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000)
+
+ print("-"*50)
+ print("__setitem__ on large arrays")
+ timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000)
+
+ print("-"*50)
+ print("where on small arrays")
+ timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ', nloop=1000)
+ print("-"*50)
+ print("where on large arrays")
+ timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ', nloop=100)
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/core.py b/venv/lib/python3.9/site-packages/numpy/ma/core.py
new file mode 100644
index 00000000..acf1a044
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/core.py
@@ -0,0 +1,8404 @@
+"""
+numpy.ma : a package to handle missing or invalid values.
+
+This package was initially written for numarray by Paul F. Dubois
+at Lawrence Livermore National Laboratory.
+In 2006, the package was completely rewritten by Pierre Gerard-Marchant
+(University of Georgia) to make the MaskedArray class a subclass of ndarray,
+and to improve support of structured arrays.
+
+
+Copyright 1999, 2000, 2001 Regents of the University of California.
+Released for unlimited redistribution.
+
+* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois.
+* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant
+ (pgmdevlist_AT_gmail_DOT_com)
+* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com)
+
+.. moduleauthor:: Pierre Gerard-Marchant
+
+"""
+# pylint: disable-msg=E1002
+import builtins
+import inspect
+import operator
+import warnings
+import textwrap
+import re
+from functools import reduce
+
+import numpy as np
+import numpy.core.umath as umath
+import numpy.core.numerictypes as ntypes
+from numpy.core import multiarray as mu
+from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue
+from numpy import array as narray
+from numpy.lib.function_base import angle
+from numpy.compat import (
+ getargspec, formatargspec, long, unicode, bytes
+ )
+from numpy import expand_dims
+from numpy.core.numeric import normalize_axis_tuple
+
+
+__all__ = [
+ 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute',
+ 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin',
+ 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos',
+ 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
+ 'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray',
+ 'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil',
+ 'choose', 'clip', 'common_fill_value', 'compress', 'compressed',
+ 'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh',
+ 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal',
+ 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp',
+ 'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask',
+ 'flatten_structured_array', 'floor', 'floor_divide', 'fmod',
+ 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask',
+ 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot',
+ 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA',
+ 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift',
+ 'less', 'less_equal', 'log', 'log10', 'log2',
+ 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask',
+ 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked',
+ 'masked_array', 'masked_equal', 'masked_greater',
+ 'masked_greater_equal', 'masked_inside', 'masked_invalid',
+ 'masked_less', 'masked_less_equal', 'masked_not_equal',
+ 'masked_object', 'masked_outside', 'masked_print_option',
+ 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum',
+ 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
+ 'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero',
+ 'not_equal', 'ones', 'ones_like', 'outer', 'outerproduct', 'power', 'prod',
+ 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder',
+ 'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_',
+ 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask',
+ 'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
+ 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',
+ 'var', 'where', 'zeros', 'zeros_like',
+ ]
+
+MaskType = np.bool_
+nomask = MaskType(0)
+
+class MaskedArrayFutureWarning(FutureWarning):
+ pass
+
+def _deprecate_argsort_axis(arr):
+ """
+ Adjust the axis passed to argsort, warning if necessary
+
+ Parameters
+ ----------
+ arr
+ The array which argsort was called on
+
+ np.ma.argsort has a long-term bug where the default of the axis argument
+ is wrong (gh-8701), which now must be kept for backwards compatibility.
+ Thankfully, this only makes a difference when arrays are 2- or more-
+ dimensional, so we only need a warning then.
+ """
+ if arr.ndim <= 1:
+ # no warning needed - but switch to -1 anyway, to avoid surprising
+ # subclasses, which are more likely to implement scalar axes.
+ return -1
+ else:
+ # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default
+ warnings.warn(
+ "In the future the default for argsort will be axis=-1, not the "
+ "current None, to match its documentation and np.argsort. "
+ "Explicitly pass -1 or None to silence this warning.",
+ MaskedArrayFutureWarning, stacklevel=3)
+ return None
+
+
+def doc_note(initialdoc, note):
+ """
+ Adds a Notes section to an existing docstring.
+
+ """
+ if initialdoc is None:
+ return
+ if note is None:
+ return initialdoc
+
+ notesplit = re.split(r'\n\s*?Notes\n\s*?-----', inspect.cleandoc(initialdoc))
+ notedoc = "\n\nNotes\n-----\n%s\n" % inspect.cleandoc(note)
+
+ return ''.join(notesplit[:1] + [notedoc] + notesplit[1:])
+
+
+def get_object_signature(obj):
+ """
+ Get the signature from obj
+
+ """
+ try:
+ sig = formatargspec(*getargspec(obj))
+ except TypeError:
+ sig = ''
+ return sig
+
+
+###############################################################################
+# Exceptions #
+###############################################################################
+
+
+class MAError(Exception):
+ """
+ Class for masked array related errors.
+
+ """
+ pass
+
+
+class MaskError(MAError):
+ """
+ Class for mask related errors.
+
+ """
+ pass
+
+
+###############################################################################
+# Filling options #
+###############################################################################
+
+
+# b: boolean - c: complex - f: floats - i: integer - O: object - S: string
+default_filler = {'b': True,
+ 'c': 1.e20 + 0.0j,
+ 'f': 1.e20,
+ 'i': 999999,
+ 'O': '?',
+ 'S': b'N/A',
+ 'u': 999999,
+ 'V': b'???',
+ 'U': 'N/A'
+ }
+
+# Add datetime64 and timedelta64 types
+for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps",
+ "fs", "as"]:
+ default_filler["M8[" + v + "]"] = np.datetime64("NaT", v)
+ default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v)
+
+float_types_list = [np.half, np.single, np.double, np.longdouble,
+ np.csingle, np.cdouble, np.clongdouble]
+max_filler = ntypes._minvals
+max_filler.update([(k, -np.inf) for k in float_types_list[:4]])
+max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]])
+
+min_filler = ntypes._maxvals
+min_filler.update([(k, +np.inf) for k in float_types_list[:4]])
+min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]])
+
+del float_types_list
+
+def _recursive_fill_value(dtype, f):
+ """
+ Recursively produce a fill value for `dtype`, calling f on scalar dtypes
+ """
+ if dtype.names is not None:
+ # We wrap into `array` here, which ensures we use NumPy cast rules
+ # for integer casts, this allows the use of 99999 as a fill value
+ # for int8.
+ # TODO: This is probably a mess, but should best preserve behavior?
+ vals = tuple(
+ np.array(_recursive_fill_value(dtype[name], f))
+ for name in dtype.names)
+ return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d
+ elif dtype.subdtype:
+ subtype, shape = dtype.subdtype
+ subval = _recursive_fill_value(subtype, f)
+ return np.full(shape, subval)
+ else:
+ return f(dtype)
+
+
+def _get_dtype_of(obj):
+ """ Convert the argument for *_fill_value into a dtype """
+ if isinstance(obj, np.dtype):
+ return obj
+ elif hasattr(obj, 'dtype'):
+ return obj.dtype
+ else:
+ return np.asanyarray(obj).dtype
+
+
+def default_fill_value(obj):
+ """
+ Return the default fill value for the argument object.
+
+ The default filling value depends on the datatype of the input
+ array or the type of the input scalar:
+
+ ======== ========
+ datatype default
+ ======== ========
+ bool True
+ int 999999
+ float 1.e20
+ complex 1.e20+0j
+ object '?'
+ string 'N/A'
+ ======== ========
+
+ For structured types, a structured scalar is returned, with each field the
+ default fill value for its type.
+
+ For subarray types, the fill value is an array of the same size containing
+ the default scalar fill value.
+
+ Parameters
+ ----------
+ obj : ndarray, dtype or scalar
+ The array data-type or scalar for which the default fill value
+ is returned.
+
+ Returns
+ -------
+ fill_value : scalar
+ The default fill value.
+
+ Examples
+ --------
+ >>> np.ma.default_fill_value(1)
+ 999999
+ >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
+ 1e+20
+ >>> np.ma.default_fill_value(np.dtype(complex))
+ (1e+20+0j)
+
+ """
+ def _scalar_fill_value(dtype):
+ if dtype.kind in 'Mm':
+ return default_filler.get(dtype.str[1:], '?')
+ else:
+ return default_filler.get(dtype.kind, '?')
+
+ dtype = _get_dtype_of(obj)
+ return _recursive_fill_value(dtype, _scalar_fill_value)
+
+
+def _extremum_fill_value(obj, extremum, extremum_name):
+
+ def _scalar_fill_value(dtype):
+ try:
+ return extremum[dtype]
+ except KeyError as e:
+ raise TypeError(
+ f"Unsuitable type {dtype} for calculating {extremum_name}."
+ ) from None
+
+ dtype = _get_dtype_of(obj)
+ return _recursive_fill_value(dtype, _scalar_fill_value)
+
+
+def minimum_fill_value(obj):
+ """
+ Return the maximum value that can be represented by the dtype of an object.
+
+ This function is useful for calculating a fill value suitable for
+ taking the minimum of an array with a given dtype.
+
+ Parameters
+ ----------
+ obj : ndarray, dtype or scalar
+ An object that can be queried for it's numeric type.
+
+ Returns
+ -------
+ val : scalar
+ The maximum representable value.
+
+ Raises
+ ------
+ TypeError
+ If `obj` isn't a suitable numeric type.
+
+ See Also
+ --------
+ maximum_fill_value : The inverse function.
+ set_fill_value : Set the filling value of a masked array.
+ MaskedArray.fill_value : Return current fill value.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.int8()
+ >>> ma.minimum_fill_value(a)
+ 127
+ >>> a = np.int32()
+ >>> ma.minimum_fill_value(a)
+ 2147483647
+
+ An array of numeric data can also be passed.
+
+ >>> a = np.array([1, 2, 3], dtype=np.int8)
+ >>> ma.minimum_fill_value(a)
+ 127
+ >>> a = np.array([1, 2, 3], dtype=np.float32)
+ >>> ma.minimum_fill_value(a)
+ inf
+
+ """
+ return _extremum_fill_value(obj, min_filler, "minimum")
+
+
+def maximum_fill_value(obj):
+ """
+ Return the minimum value that can be represented by the dtype of an object.
+
+ This function is useful for calculating a fill value suitable for
+ taking the maximum of an array with a given dtype.
+
+ Parameters
+ ----------
+ obj : ndarray, dtype or scalar
+ An object that can be queried for it's numeric type.
+
+ Returns
+ -------
+ val : scalar
+ The minimum representable value.
+
+ Raises
+ ------
+ TypeError
+ If `obj` isn't a suitable numeric type.
+
+ See Also
+ --------
+ minimum_fill_value : The inverse function.
+ set_fill_value : Set the filling value of a masked array.
+ MaskedArray.fill_value : Return current fill value.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.int8()
+ >>> ma.maximum_fill_value(a)
+ -128
+ >>> a = np.int32()
+ >>> ma.maximum_fill_value(a)
+ -2147483648
+
+ An array of numeric data can also be passed.
+
+ >>> a = np.array([1, 2, 3], dtype=np.int8)
+ >>> ma.maximum_fill_value(a)
+ -128
+ >>> a = np.array([1, 2, 3], dtype=np.float32)
+ >>> ma.maximum_fill_value(a)
+ -inf
+
+ """
+ return _extremum_fill_value(obj, max_filler, "maximum")
+
+
+def _recursive_set_fill_value(fillvalue, dt):
+ """
+ Create a fill value for a structured dtype.
+
+ Parameters
+ ----------
+ fillvalue : scalar or array_like
+ Scalar or array representing the fill value. If it is of shorter
+ length than the number of fields in dt, it will be resized.
+ dt : dtype
+ The structured dtype for which to create the fill value.
+
+ Returns
+ -------
+ val : tuple
+ A tuple of values corresponding to the structured fill value.
+
+ """
+ fillvalue = np.resize(fillvalue, len(dt.names))
+ output_value = []
+ for (fval, name) in zip(fillvalue, dt.names):
+ cdtype = dt[name]
+ if cdtype.subdtype:
+ cdtype = cdtype.subdtype[0]
+
+ if cdtype.names is not None:
+ output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
+ else:
+ output_value.append(np.array(fval, dtype=cdtype).item())
+ return tuple(output_value)
+
+
+def _check_fill_value(fill_value, ndtype):
+ """
+ Private function validating the given `fill_value` for the given dtype.
+
+ If fill_value is None, it is set to the default corresponding to the dtype.
+
+ If fill_value is not None, its value is forced to the given dtype.
+
+ The result is always a 0d array.
+
+ """
+ ndtype = np.dtype(ndtype)
+ if fill_value is None:
+ fill_value = default_fill_value(ndtype)
+ elif ndtype.names is not None:
+ if isinstance(fill_value, (ndarray, np.void)):
+ try:
+ fill_value = np.array(fill_value, copy=False, dtype=ndtype)
+ except ValueError as e:
+ err_msg = "Unable to transform %s to dtype %s"
+ raise ValueError(err_msg % (fill_value, ndtype)) from e
+ else:
+ fill_value = np.asarray(fill_value, dtype=object)
+ fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype),
+ dtype=ndtype)
+ else:
+ if isinstance(fill_value, str) and (ndtype.char not in 'OSVU'):
+ # Note this check doesn't work if fill_value is not a scalar
+ err_msg = "Cannot set fill value of string with array of dtype %s"
+ raise TypeError(err_msg % ndtype)
+ else:
+ # In case we want to convert 1e20 to int.
+ # Also in case of converting string arrays.
+ try:
+ fill_value = np.array(fill_value, copy=False, dtype=ndtype)
+ except (OverflowError, ValueError) as e:
+ # Raise TypeError instead of OverflowError or ValueError.
+ # OverflowError is seldom used, and the real problem here is
+ # that the passed fill_value is not compatible with the ndtype.
+ err_msg = "Cannot convert fill_value %s to dtype %s"
+ raise TypeError(err_msg % (fill_value, ndtype)) from e
+ return np.array(fill_value)
+
+
+def set_fill_value(a, fill_value):
+ """
+ Set the filling value of a, if a is a masked array.
+
+ This function changes the fill value of the masked array `a` in place.
+ If `a` is not a masked array, the function returns silently, without
+ doing anything.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ fill_value : dtype
+ Filling value. A consistency test is performed to make sure
+ the value is compatible with the dtype of `a`.
+
+ Returns
+ -------
+ None
+ Nothing returned by this function.
+
+ See Also
+ --------
+ maximum_fill_value : Return the default fill value for a dtype.
+ MaskedArray.fill_value : Return current fill value.
+ MaskedArray.set_fill_value : Equivalent method.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.arange(5)
+ >>> a
+ array([0, 1, 2, 3, 4])
+ >>> a = ma.masked_where(a < 3, a)
+ >>> a
+ masked_array(data=[--, --, --, 3, 4],
+ mask=[ True, True, True, False, False],
+ fill_value=999999)
+ >>> ma.set_fill_value(a, -999)
+ >>> a
+ masked_array(data=[--, --, --, 3, 4],
+ mask=[ True, True, True, False, False],
+ fill_value=-999)
+
+ Nothing happens if `a` is not a masked array.
+
+ >>> a = list(range(5))
+ >>> a
+ [0, 1, 2, 3, 4]
+ >>> ma.set_fill_value(a, 100)
+ >>> a
+ [0, 1, 2, 3, 4]
+ >>> a = np.arange(5)
+ >>> a
+ array([0, 1, 2, 3, 4])
+ >>> ma.set_fill_value(a, 100)
+ >>> a
+ array([0, 1, 2, 3, 4])
+
+ """
+ if isinstance(a, MaskedArray):
+ a.set_fill_value(fill_value)
+ return
+
+
+def get_fill_value(a):
+ """
+ Return the filling value of a, if any. Otherwise, returns the
+ default filling value for that type.
+
+ """
+ if isinstance(a, MaskedArray):
+ result = a.fill_value
+ else:
+ result = default_fill_value(a)
+ return result
+
+
+def common_fill_value(a, b):
+ """
+ Return the common filling value of two masked arrays, if any.
+
+ If ``a.fill_value == b.fill_value``, return the fill value,
+ otherwise return None.
+
+ Parameters
+ ----------
+ a, b : MaskedArray
+ The masked arrays for which to compare fill values.
+
+ Returns
+ -------
+ fill_value : scalar or None
+ The common fill value, or None.
+
+ Examples
+ --------
+ >>> x = np.ma.array([0, 1.], fill_value=3)
+ >>> y = np.ma.array([0, 1.], fill_value=3)
+ >>> np.ma.common_fill_value(x, y)
+ 3.0
+
+ """
+ t1 = get_fill_value(a)
+ t2 = get_fill_value(b)
+ if t1 == t2:
+ return t1
+ return None
+
+
+def filled(a, fill_value=None):
+ """
+ Return input as an array with masked data replaced by a fill value.
+
+ If `a` is not a `MaskedArray`, `a` itself is returned.
+ If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to
+ ``a.fill_value``.
+
+ Parameters
+ ----------
+ a : MaskedArray or array_like
+ An input object.
+ fill_value : array_like, optional.
+ Can be scalar or non-scalar. If non-scalar, the
+ resulting filled array should be broadcastable
+ over input array. Default is None.
+
+ Returns
+ -------
+ a : ndarray
+ The filled array.
+
+ See Also
+ --------
+ compressed
+
+ Examples
+ --------
+ >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
+ ... [1, 0, 0],
+ ... [0, 0, 0]])
+ >>> x.filled()
+ array([[999999, 1, 2],
+ [999999, 4, 5],
+ [ 6, 7, 8]])
+ >>> x.filled(fill_value=333)
+ array([[333, 1, 2],
+ [333, 4, 5],
+ [ 6, 7, 8]])
+ >>> x.filled(fill_value=np.arange(3))
+ array([[0, 1, 2],
+ [0, 4, 5],
+ [6, 7, 8]])
+
+ """
+ if hasattr(a, 'filled'):
+ return a.filled(fill_value)
+
+ elif isinstance(a, ndarray):
+ # Should we check for contiguity ? and a.flags['CONTIGUOUS']:
+ return a
+ elif isinstance(a, dict):
+ return np.array(a, 'O')
+ else:
+ return np.array(a)
+
+
+def get_masked_subclass(*arrays):
+ """
+ Return the youngest subclass of MaskedArray from a list of (masked) arrays.
+
+ In case of siblings, the first listed takes over.
+
+ """
+ if len(arrays) == 1:
+ arr = arrays[0]
+ if isinstance(arr, MaskedArray):
+ rcls = type(arr)
+ else:
+ rcls = MaskedArray
+ else:
+ arrcls = [type(a) for a in arrays]
+ rcls = arrcls[0]
+ if not issubclass(rcls, MaskedArray):
+ rcls = MaskedArray
+ for cls in arrcls[1:]:
+ if issubclass(cls, rcls):
+ rcls = cls
+ # Don't return MaskedConstant as result: revert to MaskedArray
+ if rcls.__name__ == 'MaskedConstant':
+ return MaskedArray
+ return rcls
+
+
+def getdata(a, subok=True):
+ """
+ Return the data of a masked array as an ndarray.
+
+ Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
+ else return `a` as a ndarray or subclass (depending on `subok`) if not.
+
+ Parameters
+ ----------
+ a : array_like
+ Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
+ subok : bool
+ Whether to force the output to be a `pure` ndarray (False) or to
+ return a subclass of ndarray if appropriate (True, default).
+
+ See Also
+ --------
+ getmask : Return the mask of a masked array, or nomask.
+ getmaskarray : Return the mask of a masked array, or full array of False.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = ma.masked_equal([[1,2],[3,4]], 2)
+ >>> a
+ masked_array(
+ data=[[1, --],
+ [3, 4]],
+ mask=[[False, True],
+ [False, False]],
+ fill_value=2)
+ >>> ma.getdata(a)
+ array([[1, 2],
+ [3, 4]])
+
+ Equivalently use the ``MaskedArray`` `data` attribute.
+
+ >>> a.data
+ array([[1, 2],
+ [3, 4]])
+
+ """
+ try:
+ data = a._data
+ except AttributeError:
+ data = np.array(a, copy=False, subok=subok)
+ if not subok:
+ return data.view(ndarray)
+ return data
+
+
+get_data = getdata
+
+
+def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
+ """
+ Return input with invalid data masked and replaced by a fill value.
+
+ Invalid data means values of `nan`, `inf`, etc.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array, a (subclass of) ndarray.
+ mask : sequence, optional
+ Mask. Must be convertible to an array of booleans with the same
+ shape as `data`. True indicates a masked (i.e. invalid) data.
+ copy : bool, optional
+ Whether to use a copy of `a` (True) or to fix `a` in place (False).
+ Default is True.
+ fill_value : scalar, optional
+ Value used for fixing invalid data. Default is None, in which case
+ the ``a.fill_value`` is used.
+
+ Returns
+ -------
+ b : MaskedArray
+ The input array with invalid entries fixed.
+
+ Notes
+ -----
+ A copy is performed by default.
+
+ Examples
+ --------
+ >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
+ >>> x
+ masked_array(data=[--, -1.0, nan, inf],
+ mask=[ True, False, False, False],
+ fill_value=1e+20)
+ >>> np.ma.fix_invalid(x)
+ masked_array(data=[--, -1.0, --, --],
+ mask=[ True, False, True, True],
+ fill_value=1e+20)
+
+ >>> fixed = np.ma.fix_invalid(x)
+ >>> fixed.data
+ array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20])
+ >>> x.data
+ array([ 1., -1., nan, inf])
+
+ """
+ a = masked_array(a, copy=copy, mask=mask, subok=True)
+ invalid = np.logical_not(np.isfinite(a._data))
+ if not invalid.any():
+ return a
+ a._mask |= invalid
+ if fill_value is None:
+ fill_value = a.fill_value
+ a._data[invalid] = fill_value
+ return a
+
+def is_string_or_list_of_strings(val):
+ return (isinstance(val, str) or
+ (isinstance(val, list) and val and
+ builtins.all(isinstance(s, str) for s in val)))
+
+###############################################################################
+# Ufuncs #
+###############################################################################
+
+
+ufunc_domain = {}
+ufunc_fills = {}
+
+
+class _DomainCheckInterval:
+ """
+ Define a valid interval, so that :
+
+ ``domain_check_interval(a,b)(x) == True`` where
+ ``x < a`` or ``x > b``.
+
+ """
+
+ def __init__(self, a, b):
+ "domain_check_interval(a,b)(x) = true where x < a or y > b"
+ if a > b:
+ (a, b) = (b, a)
+ self.a = a
+ self.b = b
+
+ def __call__(self, x):
+ "Execute the call behavior."
+ # nans at masked positions cause RuntimeWarnings, even though
+ # they are masked. To avoid this we suppress warnings.
+ with np.errstate(invalid='ignore'):
+ return umath.logical_or(umath.greater(x, self.b),
+ umath.less(x, self.a))
+
+
+class _DomainTan:
+ """
+ Define a valid interval for the `tan` function, so that:
+
+ ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps``
+
+ """
+
+ def __init__(self, eps):
+ "domain_tan(eps) = true where abs(cos(x)) < eps)"
+ self.eps = eps
+
+ def __call__(self, x):
+ "Executes the call behavior."
+ with np.errstate(invalid='ignore'):
+ return umath.less(umath.absolute(umath.cos(x)), self.eps)
+
+
+class _DomainSafeDivide:
+ """
+ Define a domain for safe division.
+
+ """
+
+ def __init__(self, tolerance=None):
+ self.tolerance = tolerance
+
+ def __call__(self, a, b):
+ # Delay the selection of the tolerance to here in order to reduce numpy
+ # import times. The calculation of these parameters is a substantial
+ # component of numpy's import time.
+ if self.tolerance is None:
+ self.tolerance = np.finfo(float).tiny
+ # don't call ma ufuncs from __array_wrap__ which would fail for scalars
+ a, b = np.asarray(a), np.asarray(b)
+ with np.errstate(invalid='ignore'):
+ return umath.absolute(a) * self.tolerance >= umath.absolute(b)
+
+
+class _DomainGreater:
+ """
+ DomainGreater(v)(x) is True where x <= v.
+
+ """
+
+ def __init__(self, critical_value):
+ "DomainGreater(v)(x) = true where x <= v"
+ self.critical_value = critical_value
+
+ def __call__(self, x):
+ "Executes the call behavior."
+ with np.errstate(invalid='ignore'):
+ return umath.less_equal(x, self.critical_value)
+
+
+class _DomainGreaterEqual:
+ """
+ DomainGreaterEqual(v)(x) is True where x < v.
+
+ """
+
+ def __init__(self, critical_value):
+ "DomainGreaterEqual(v)(x) = true where x < v"
+ self.critical_value = critical_value
+
+ def __call__(self, x):
+ "Executes the call behavior."
+ with np.errstate(invalid='ignore'):
+ return umath.less(x, self.critical_value)
+
+
+class _MaskedUFunc:
+ def __init__(self, ufunc):
+ self.f = ufunc
+ self.__doc__ = ufunc.__doc__
+ self.__name__ = ufunc.__name__
+
+ def __str__(self):
+ return f"Masked version of {self.f}"
+
+
+class _MaskedUnaryOperation(_MaskedUFunc):
+ """
+ Defines masked version of unary operations, where invalid values are
+ pre-masked.
+
+ Parameters
+ ----------
+ mufunc : callable
+ The function for which to define a masked version. Made available
+ as ``_MaskedUnaryOperation.f``.
+ fill : scalar, optional
+ Filling value, default is 0.
+ domain : class instance
+ Domain for the function. Should be one of the ``_Domain*``
+ classes. Default is None.
+
+ """
+
+ def __init__(self, mufunc, fill=0, domain=None):
+ super().__init__(mufunc)
+ self.fill = fill
+ self.domain = domain
+ ufunc_domain[mufunc] = domain
+ ufunc_fills[mufunc] = fill
+
+ def __call__(self, a, *args, **kwargs):
+ """
+ Execute the call behavior.
+
+ """
+ d = getdata(a)
+ # Deal with domain
+ if self.domain is not None:
+ # Case 1.1. : Domained function
+ # nans at masked positions cause RuntimeWarnings, even though
+ # they are masked. To avoid this we suppress warnings.
+ with np.errstate(divide='ignore', invalid='ignore'):
+ result = self.f(d, *args, **kwargs)
+ # Make a mask
+ m = ~umath.isfinite(result)
+ m |= self.domain(d)
+ m |= getmask(a)
+ else:
+ # Case 1.2. : Function without a domain
+ # Get the result and the mask
+ with np.errstate(divide='ignore', invalid='ignore'):
+ result = self.f(d, *args, **kwargs)
+ m = getmask(a)
+
+ if not result.ndim:
+ # Case 2.1. : The result is scalarscalar
+ if m:
+ return masked
+ return result
+
+ if m is not nomask:
+ # Case 2.2. The result is an array
+ # We need to fill the invalid data back w/ the input Now,
+ # that's plain silly: in C, we would just skip the element and
+ # keep the original, but we do have to do it that way in Python
+
+ # In case result has a lower dtype than the inputs (as in
+ # equal)
+ try:
+ np.copyto(result, d, where=m)
+ except TypeError:
+ pass
+ # Transform to
+ masked_result = result.view(get_masked_subclass(a))
+ masked_result._mask = m
+ masked_result._update_from(a)
+ return masked_result
+
+
+class _MaskedBinaryOperation(_MaskedUFunc):
+ """
+ Define masked version of binary operations, where invalid
+ values are pre-masked.
+
+ Parameters
+ ----------
+ mbfunc : function
+ The function for which to define a masked version. Made available
+ as ``_MaskedBinaryOperation.f``.
+ domain : class instance
+ Default domain for the function. Should be one of the ``_Domain*``
+ classes. Default is None.
+ fillx : scalar, optional
+ Filling value for the first argument, default is 0.
+ filly : scalar, optional
+ Filling value for the second argument, default is 0.
+
+ """
+
+ def __init__(self, mbfunc, fillx=0, filly=0):
+ """
+ abfunc(fillx, filly) must be defined.
+
+ abfunc(x, filly) = x for all x to enable reduce.
+
+ """
+ super().__init__(mbfunc)
+ self.fillx = fillx
+ self.filly = filly
+ ufunc_domain[mbfunc] = None
+ ufunc_fills[mbfunc] = (fillx, filly)
+
+ def __call__(self, a, b, *args, **kwargs):
+ """
+ Execute the call behavior.
+
+ """
+ # Get the data, as ndarray
+ (da, db) = (getdata(a), getdata(b))
+ # Get the result
+ with np.errstate():
+ np.seterr(divide='ignore', invalid='ignore')
+ result = self.f(da, db, *args, **kwargs)
+ # Get the mask for the result
+ (ma, mb) = (getmask(a), getmask(b))
+ if ma is nomask:
+ if mb is nomask:
+ m = nomask
+ else:
+ m = umath.logical_or(getmaskarray(a), mb)
+ elif mb is nomask:
+ m = umath.logical_or(ma, getmaskarray(b))
+ else:
+ m = umath.logical_or(ma, mb)
+
+ # Case 1. : scalar
+ if not result.ndim:
+ if m:
+ return masked
+ return result
+
+ # Case 2. : array
+ # Revert result to da where masked
+ if m is not nomask and m.any():
+ # any errors, just abort; impossible to guarantee masked values
+ try:
+ np.copyto(result, da, casting='unsafe', where=m)
+ except Exception:
+ pass
+
+ # Transforms to a (subclass of) MaskedArray
+ masked_result = result.view(get_masked_subclass(a, b))
+ masked_result._mask = m
+ if isinstance(a, MaskedArray):
+ masked_result._update_from(a)
+ elif isinstance(b, MaskedArray):
+ masked_result._update_from(b)
+ return masked_result
+
+ def reduce(self, target, axis=0, dtype=None):
+ """
+ Reduce `target` along the given `axis`.
+
+ """
+ tclass = get_masked_subclass(target)
+ m = getmask(target)
+ t = filled(target, self.filly)
+ if t.shape == ():
+ t = t.reshape(1)
+ if m is not nomask:
+ m = make_mask(m, copy=True)
+ m.shape = (1,)
+
+ if m is nomask:
+ tr = self.f.reduce(t, axis)
+ mr = nomask
+ else:
+ tr = self.f.reduce(t, axis, dtype=dtype)
+ mr = umath.logical_and.reduce(m, axis)
+
+ if not tr.shape:
+ if mr:
+ return masked
+ else:
+ return tr
+ masked_tr = tr.view(tclass)
+ masked_tr._mask = mr
+ return masked_tr
+
+ def outer(self, a, b):
+ """
+ Return the function applied to the outer product of a and b.
+
+ """
+ (da, db) = (getdata(a), getdata(b))
+ d = self.f.outer(da, db)
+ ma = getmask(a)
+ mb = getmask(b)
+ if ma is nomask and mb is nomask:
+ m = nomask
+ else:
+ ma = getmaskarray(a)
+ mb = getmaskarray(b)
+ m = umath.logical_or.outer(ma, mb)
+ if (not m.ndim) and m:
+ return masked
+ if m is not nomask:
+ np.copyto(d, da, where=m)
+ if not d.shape:
+ return d
+ masked_d = d.view(get_masked_subclass(a, b))
+ masked_d._mask = m
+ return masked_d
+
+ def accumulate(self, target, axis=0):
+ """Accumulate `target` along `axis` after filling with y fill
+ value.
+
+ """
+ tclass = get_masked_subclass(target)
+ t = filled(target, self.filly)
+ result = self.f.accumulate(t, axis)
+ masked_result = result.view(tclass)
+ return masked_result
+
+
+
+class _DomainedBinaryOperation(_MaskedUFunc):
+ """
+ Define binary operations that have a domain, like divide.
+
+ They have no reduce, outer or accumulate.
+
+ Parameters
+ ----------
+ mbfunc : function
+ The function for which to define a masked version. Made available
+ as ``_DomainedBinaryOperation.f``.
+ domain : class instance
+ Default domain for the function. Should be one of the ``_Domain*``
+ classes.
+ fillx : scalar, optional
+ Filling value for the first argument, default is 0.
+ filly : scalar, optional
+ Filling value for the second argument, default is 0.
+
+ """
+
+ def __init__(self, dbfunc, domain, fillx=0, filly=0):
+ """abfunc(fillx, filly) must be defined.
+ abfunc(x, filly) = x for all x to enable reduce.
+ """
+ super().__init__(dbfunc)
+ self.domain = domain
+ self.fillx = fillx
+ self.filly = filly
+ ufunc_domain[dbfunc] = domain
+ ufunc_fills[dbfunc] = (fillx, filly)
+
+ def __call__(self, a, b, *args, **kwargs):
+ "Execute the call behavior."
+ # Get the data
+ (da, db) = (getdata(a), getdata(b))
+ # Get the result
+ with np.errstate(divide='ignore', invalid='ignore'):
+ result = self.f(da, db, *args, **kwargs)
+ # Get the mask as a combination of the source masks and invalid
+ m = ~umath.isfinite(result)
+ m |= getmask(a)
+ m |= getmask(b)
+ # Apply the domain
+ domain = ufunc_domain.get(self.f, None)
+ if domain is not None:
+ m |= domain(da, db)
+ # Take care of the scalar case first
+ if not m.ndim:
+ if m:
+ return masked
+ else:
+ return result
+ # When the mask is True, put back da if possible
+ # any errors, just abort; impossible to guarantee masked values
+ try:
+ np.copyto(result, 0, casting='unsafe', where=m)
+ # avoid using "*" since this may be overlaid
+ masked_da = umath.multiply(m, da)
+ # only add back if it can be cast safely
+ if np.can_cast(masked_da.dtype, result.dtype, casting='safe'):
+ result += masked_da
+ except Exception:
+ pass
+
+ # Transforms to a (subclass of) MaskedArray
+ masked_result = result.view(get_masked_subclass(a, b))
+ masked_result._mask = m
+ if isinstance(a, MaskedArray):
+ masked_result._update_from(a)
+ elif isinstance(b, MaskedArray):
+ masked_result._update_from(b)
+ return masked_result
+
+
+# Unary ufuncs
+exp = _MaskedUnaryOperation(umath.exp)
+conjugate = _MaskedUnaryOperation(umath.conjugate)
+sin = _MaskedUnaryOperation(umath.sin)
+cos = _MaskedUnaryOperation(umath.cos)
+arctan = _MaskedUnaryOperation(umath.arctan)
+arcsinh = _MaskedUnaryOperation(umath.arcsinh)
+sinh = _MaskedUnaryOperation(umath.sinh)
+cosh = _MaskedUnaryOperation(umath.cosh)
+tanh = _MaskedUnaryOperation(umath.tanh)
+abs = absolute = _MaskedUnaryOperation(umath.absolute)
+angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base
+fabs = _MaskedUnaryOperation(umath.fabs)
+negative = _MaskedUnaryOperation(umath.negative)
+floor = _MaskedUnaryOperation(umath.floor)
+ceil = _MaskedUnaryOperation(umath.ceil)
+around = _MaskedUnaryOperation(np.round_)
+logical_not = _MaskedUnaryOperation(umath.logical_not)
+
+# Domained unary ufuncs
+sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0,
+ _DomainGreaterEqual(0.0))
+log = _MaskedUnaryOperation(umath.log, 1.0,
+ _DomainGreater(0.0))
+log2 = _MaskedUnaryOperation(umath.log2, 1.0,
+ _DomainGreater(0.0))
+log10 = _MaskedUnaryOperation(umath.log10, 1.0,
+ _DomainGreater(0.0))
+tan = _MaskedUnaryOperation(umath.tan, 0.0,
+ _DomainTan(1e-35))
+arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0,
+ _DomainCheckInterval(-1.0, 1.0))
+arccos = _MaskedUnaryOperation(umath.arccos, 0.0,
+ _DomainCheckInterval(-1.0, 1.0))
+arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0,
+ _DomainGreaterEqual(1.0))
+arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0,
+ _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15))
+
+# Binary ufuncs
+add = _MaskedBinaryOperation(umath.add)
+subtract = _MaskedBinaryOperation(umath.subtract)
+multiply = _MaskedBinaryOperation(umath.multiply, 1, 1)
+arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0)
+equal = _MaskedBinaryOperation(umath.equal)
+equal.reduce = None
+not_equal = _MaskedBinaryOperation(umath.not_equal)
+not_equal.reduce = None
+less_equal = _MaskedBinaryOperation(umath.less_equal)
+less_equal.reduce = None
+greater_equal = _MaskedBinaryOperation(umath.greater_equal)
+greater_equal.reduce = None
+less = _MaskedBinaryOperation(umath.less)
+less.reduce = None
+greater = _MaskedBinaryOperation(umath.greater)
+greater.reduce = None
+logical_and = _MaskedBinaryOperation(umath.logical_and)
+alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce
+logical_or = _MaskedBinaryOperation(umath.logical_or)
+sometrue = logical_or.reduce
+logical_xor = _MaskedBinaryOperation(umath.logical_xor)
+bitwise_and = _MaskedBinaryOperation(umath.bitwise_and)
+bitwise_or = _MaskedBinaryOperation(umath.bitwise_or)
+bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor)
+hypot = _MaskedBinaryOperation(umath.hypot)
+
+# Domained binary ufuncs
+divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1)
+true_divide = _DomainedBinaryOperation(umath.true_divide,
+ _DomainSafeDivide(), 0, 1)
+floor_divide = _DomainedBinaryOperation(umath.floor_divide,
+ _DomainSafeDivide(), 0, 1)
+remainder = _DomainedBinaryOperation(umath.remainder,
+ _DomainSafeDivide(), 0, 1)
+fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1)
+mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1)
+
+
+###############################################################################
+# Mask creation functions #
+###############################################################################
+
+
+def _replace_dtype_fields_recursive(dtype, primitive_dtype):
+ "Private function allowing recursion in _replace_dtype_fields."
+ _recurse = _replace_dtype_fields_recursive
+
+ # Do we have some name fields ?
+ if dtype.names is not None:
+ descr = []
+ for name in dtype.names:
+ field = dtype.fields[name]
+ if len(field) == 3:
+ # Prepend the title to the name
+ name = (field[-1], name)
+ descr.append((name, _recurse(field[0], primitive_dtype)))
+ new_dtype = np.dtype(descr)
+
+ # Is this some kind of composite a la (float,2)
+ elif dtype.subdtype:
+ descr = list(dtype.subdtype)
+ descr[0] = _recurse(dtype.subdtype[0], primitive_dtype)
+ new_dtype = np.dtype(tuple(descr))
+
+ # this is a primitive type, so do a direct replacement
+ else:
+ new_dtype = primitive_dtype
+
+ # preserve identity of dtypes
+ if new_dtype == dtype:
+ new_dtype = dtype
+
+ return new_dtype
+
+
+def _replace_dtype_fields(dtype, primitive_dtype):
+ """
+ Construct a dtype description list from a given dtype.
+
+ Returns a new dtype object, with all fields and subtypes in the given type
+ recursively replaced with `primitive_dtype`.
+
+ Arguments are coerced to dtypes first.
+ """
+ dtype = np.dtype(dtype)
+ primitive_dtype = np.dtype(primitive_dtype)
+ return _replace_dtype_fields_recursive(dtype, primitive_dtype)
+
+
+def make_mask_descr(ndtype):
+ """
+ Construct a dtype description list from a given dtype.
+
+ Returns a new dtype object, with the type of all fields in `ndtype` to a
+ boolean type. Field names are not altered.
+
+ Parameters
+ ----------
+ ndtype : dtype
+ The dtype to convert.
+
+ Returns
+ -------
+ result : dtype
+ A dtype that looks like `ndtype`, the type of all fields is boolean.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> dtype = np.dtype({'names':['foo', 'bar'],
+ ... 'formats':[np.float32, np.int64]})
+ >>> dtype
+ dtype([('foo', '<f4'), ('bar', '<i8')])
+ >>> ma.make_mask_descr(dtype)
+ dtype([('foo', '|b1'), ('bar', '|b1')])
+ >>> ma.make_mask_descr(np.float32)
+ dtype('bool')
+
+ """
+ return _replace_dtype_fields(ndtype, MaskType)
+
+
+def getmask(a):
+ """
+ Return the mask of a masked array, or nomask.
+
+ Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
+ mask is not `nomask`, else return `nomask`. To guarantee a full array
+ of booleans of the same shape as a, use `getmaskarray`.
+
+ Parameters
+ ----------
+ a : array_like
+ Input `MaskedArray` for which the mask is required.
+
+ See Also
+ --------
+ getdata : Return the data of a masked array as an ndarray.
+ getmaskarray : Return the mask of a masked array, or full array of False.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = ma.masked_equal([[1,2],[3,4]], 2)
+ >>> a
+ masked_array(
+ data=[[1, --],
+ [3, 4]],
+ mask=[[False, True],
+ [False, False]],
+ fill_value=2)
+ >>> ma.getmask(a)
+ array([[False, True],
+ [False, False]])
+
+ Equivalently use the `MaskedArray` `mask` attribute.
+
+ >>> a.mask
+ array([[False, True],
+ [False, False]])
+
+ Result when mask == `nomask`
+
+ >>> b = ma.masked_array([[1,2],[3,4]])
+ >>> b
+ masked_array(
+ data=[[1, 2],
+ [3, 4]],
+ mask=False,
+ fill_value=999999)
+ >>> ma.nomask
+ False
+ >>> ma.getmask(b) == ma.nomask
+ True
+ >>> b.mask == ma.nomask
+ True
+
+ """
+ return getattr(a, '_mask', nomask)
+
+
+get_mask = getmask
+
+
+def getmaskarray(arr):
+ """
+ Return the mask of a masked array, or full boolean array of False.
+
+ Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
+ the mask is not `nomask`, else return a full boolean array of False of
+ the same shape as `arr`.
+
+ Parameters
+ ----------
+ arr : array_like
+ Input `MaskedArray` for which the mask is required.
+
+ See Also
+ --------
+ getmask : Return the mask of a masked array, or nomask.
+ getdata : Return the data of a masked array as an ndarray.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = ma.masked_equal([[1,2],[3,4]], 2)
+ >>> a
+ masked_array(
+ data=[[1, --],
+ [3, 4]],
+ mask=[[False, True],
+ [False, False]],
+ fill_value=2)
+ >>> ma.getmaskarray(a)
+ array([[False, True],
+ [False, False]])
+
+ Result when mask == ``nomask``
+
+ >>> b = ma.masked_array([[1,2],[3,4]])
+ >>> b
+ masked_array(
+ data=[[1, 2],
+ [3, 4]],
+ mask=False,
+ fill_value=999999)
+ >>> ma.getmaskarray(b)
+ array([[False, False],
+ [False, False]])
+
+ """
+ mask = getmask(arr)
+ if mask is nomask:
+ mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None))
+ return mask
+
+
+def is_mask(m):
+ """
+ Return True if m is a valid, standard mask.
+
+ This function does not check the contents of the input, only that the
+ type is MaskType. In particular, this function returns False if the
+ mask has a flexible dtype.
+
+ Parameters
+ ----------
+ m : array_like
+ Array to test.
+
+ Returns
+ -------
+ result : bool
+ True if `m.dtype.type` is MaskType, False otherwise.
+
+ See Also
+ --------
+ ma.isMaskedArray : Test whether input is an instance of MaskedArray.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
+ >>> m
+ masked_array(data=[--, 1, --, 2, 3],
+ mask=[ True, False, True, False, False],
+ fill_value=0)
+ >>> ma.is_mask(m)
+ False
+ >>> ma.is_mask(m.mask)
+ True
+
+ Input must be an ndarray (or have similar attributes)
+ for it to be considered a valid mask.
+
+ >>> m = [False, True, False]
+ >>> ma.is_mask(m)
+ False
+ >>> m = np.array([False, True, False])
+ >>> m
+ array([False, True, False])
+ >>> ma.is_mask(m)
+ True
+
+ Arrays with complex dtypes don't return True.
+
+ >>> dtype = np.dtype({'names':['monty', 'pithon'],
+ ... 'formats':[bool, bool]})
+ >>> dtype
+ dtype([('monty', '|b1'), ('pithon', '|b1')])
+ >>> m = np.array([(True, False), (False, True), (True, False)],
+ ... dtype=dtype)
+ >>> m
+ array([( True, False), (False, True), ( True, False)],
+ dtype=[('monty', '?'), ('pithon', '?')])
+ >>> ma.is_mask(m)
+ False
+
+ """
+ try:
+ return m.dtype.type is MaskType
+ except AttributeError:
+ return False
+
+
+def _shrink_mask(m):
+ """
+ Shrink a mask to nomask if possible
+ """
+ if m.dtype.names is None and not m.any():
+ return nomask
+ else:
+ return m
+
+
+def make_mask(m, copy=False, shrink=True, dtype=MaskType):
+ """
+ Create a boolean mask from an array.
+
+ Return `m` as a boolean mask, creating a copy if necessary or requested.
+ The function can accept any sequence that is convertible to integers,
+ or ``nomask``. Does not require that contents must be 0s and 1s, values
+ of 0 are interpreted as False, everything else as True.
+
+ Parameters
+ ----------
+ m : array_like
+ Potential mask.
+ copy : bool, optional
+ Whether to return a copy of `m` (True) or `m` itself (False).
+ shrink : bool, optional
+ Whether to shrink `m` to ``nomask`` if all its values are False.
+ dtype : dtype, optional
+ Data-type of the output mask. By default, the output mask has a
+ dtype of MaskType (bool). If the dtype is flexible, each field has
+ a boolean dtype. This is ignored when `m` is ``nomask``, in which
+ case ``nomask`` is always returned.
+
+ Returns
+ -------
+ result : ndarray
+ A boolean mask derived from `m`.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> m = [True, False, True, True]
+ >>> ma.make_mask(m)
+ array([ True, False, True, True])
+ >>> m = [1, 0, 1, 1]
+ >>> ma.make_mask(m)
+ array([ True, False, True, True])
+ >>> m = [1, 0, 2, -3]
+ >>> ma.make_mask(m)
+ array([ True, False, True, True])
+
+ Effect of the `shrink` parameter.
+
+ >>> m = np.zeros(4)
+ >>> m
+ array([0., 0., 0., 0.])
+ >>> ma.make_mask(m)
+ False
+ >>> ma.make_mask(m, shrink=False)
+ array([False, False, False, False])
+
+ Using a flexible `dtype`.
+
+ >>> m = [1, 0, 1, 1]
+ >>> n = [0, 1, 0, 0]
+ >>> arr = []
+ >>> for man, mouse in zip(m, n):
+ ... arr.append((man, mouse))
+ >>> arr
+ [(1, 0), (0, 1), (1, 0), (1, 0)]
+ >>> dtype = np.dtype({'names':['man', 'mouse'],
+ ... 'formats':[np.int64, np.int64]})
+ >>> arr = np.array(arr, dtype=dtype)
+ >>> arr
+ array([(1, 0), (0, 1), (1, 0), (1, 0)],
+ dtype=[('man', '<i8'), ('mouse', '<i8')])
+ >>> ma.make_mask(arr, dtype=dtype)
+ array([(True, False), (False, True), (True, False), (True, False)],
+ dtype=[('man', '|b1'), ('mouse', '|b1')])
+
+ """
+ if m is nomask:
+ return nomask
+
+ # Make sure the input dtype is valid.
+ dtype = make_mask_descr(dtype)
+
+ # legacy boolean special case: "existence of fields implies true"
+ if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_:
+ return np.ones(m.shape, dtype=dtype)
+
+ # Fill the mask in case there are missing data; turn it into an ndarray.
+ result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True)
+ # Bas les masques !
+ if shrink:
+ result = _shrink_mask(result)
+ return result
+
+
+def make_mask_none(newshape, dtype=None):
+ """
+ Return a boolean mask of the given shape, filled with False.
+
+ This function returns a boolean ndarray with all entries False, that can
+ be used in common mask manipulations. If a complex dtype is specified, the
+ type of each field is converted to a boolean type.
+
+ Parameters
+ ----------
+ newshape : tuple
+ A tuple indicating the shape of the mask.
+ dtype : {None, dtype}, optional
+ If None, use a MaskType instance. Otherwise, use a new datatype with
+ the same fields as `dtype`, converted to boolean types.
+
+ Returns
+ -------
+ result : ndarray
+ An ndarray of appropriate shape and dtype, filled with False.
+
+ See Also
+ --------
+ make_mask : Create a boolean mask from an array.
+ make_mask_descr : Construct a dtype description list from a given dtype.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> ma.make_mask_none((3,))
+ array([False, False, False])
+
+ Defining a more complex dtype.
+
+ >>> dtype = np.dtype({'names':['foo', 'bar'],
+ ... 'formats':[np.float32, np.int64]})
+ >>> dtype
+ dtype([('foo', '<f4'), ('bar', '<i8')])
+ >>> ma.make_mask_none((3,), dtype=dtype)
+ array([(False, False), (False, False), (False, False)],
+ dtype=[('foo', '|b1'), ('bar', '|b1')])
+
+ """
+ if dtype is None:
+ result = np.zeros(newshape, dtype=MaskType)
+ else:
+ result = np.zeros(newshape, dtype=make_mask_descr(dtype))
+ return result
+
+
+def _recursive_mask_or(m1, m2, newmask):
+ names = m1.dtype.names
+ for name in names:
+ current1 = m1[name]
+ if current1.dtype.names is not None:
+ _recursive_mask_or(current1, m2[name], newmask[name])
+ else:
+ umath.logical_or(current1, m2[name], newmask[name])
+
+
+def mask_or(m1, m2, copy=False, shrink=True):
+ """
+ Combine two masks with the ``logical_or`` operator.
+
+ The result may be a view on `m1` or `m2` if the other is `nomask`
+ (i.e. False).
+
+ Parameters
+ ----------
+ m1, m2 : array_like
+ Input masks.
+ copy : bool, optional
+ If copy is False and one of the inputs is `nomask`, return a view
+ of the other input mask. Defaults to False.
+ shrink : bool, optional
+ Whether to shrink the output to `nomask` if all its values are
+ False. Defaults to True.
+
+ Returns
+ -------
+ mask : output mask
+ The result masks values that are masked in either `m1` or `m2`.
+
+ Raises
+ ------
+ ValueError
+ If `m1` and `m2` have different flexible dtypes.
+
+ Examples
+ --------
+ >>> m1 = np.ma.make_mask([0, 1, 1, 0])
+ >>> m2 = np.ma.make_mask([1, 0, 0, 0])
+ >>> np.ma.mask_or(m1, m2)
+ array([ True, True, True, False])
+
+ """
+
+ if (m1 is nomask) or (m1 is False):
+ dtype = getattr(m2, 'dtype', MaskType)
+ return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
+ if (m2 is nomask) or (m2 is False):
+ dtype = getattr(m1, 'dtype', MaskType)
+ return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)
+ if m1 is m2 and is_mask(m1):
+ return m1
+ (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
+ if dtype1 != dtype2:
+ raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
+ if dtype1.names is not None:
+ # Allocate an output mask array with the properly broadcast shape.
+ newmask = np.empty(np.broadcast(m1, m2).shape, dtype1)
+ _recursive_mask_or(m1, m2, newmask)
+ return newmask
+ return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
+
+
+def flatten_mask(mask):
+ """
+ Returns a completely flattened version of the mask, where nested fields
+ are collapsed.
+
+ Parameters
+ ----------
+ mask : array_like
+ Input array, which will be interpreted as booleans.
+
+ Returns
+ -------
+ flattened_mask : ndarray of bools
+ The flattened input.
+
+ Examples
+ --------
+ >>> mask = np.array([0, 0, 1])
+ >>> np.ma.flatten_mask(mask)
+ array([False, False, True])
+
+ >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
+ >>> np.ma.flatten_mask(mask)
+ array([False, False, False, True])
+
+ >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
+ >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
+ >>> np.ma.flatten_mask(mask)
+ array([False, False, False, False, False, True])
+
+ """
+
+ def _flatmask(mask):
+ "Flatten the mask and returns a (maybe nested) sequence of booleans."
+ mnames = mask.dtype.names
+ if mnames is not None:
+ return [flatten_mask(mask[name]) for name in mnames]
+ else:
+ return mask
+
+ def _flatsequence(sequence):
+ "Generates a flattened version of the sequence."
+ try:
+ for element in sequence:
+ if hasattr(element, '__iter__'):
+ yield from _flatsequence(element)
+ else:
+ yield element
+ except TypeError:
+ yield sequence
+
+ mask = np.asarray(mask)
+ flattened = _flatsequence(_flatmask(mask))
+ return np.array([_ for _ in flattened], dtype=bool)
+
+
+def _check_mask_axis(mask, axis, keepdims=np._NoValue):
+ "Check whether there are masked values along the given axis"
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+ if mask is not nomask:
+ return mask.all(axis=axis, **kwargs)
+ return nomask
+
+
+###############################################################################
+# Masking functions #
+###############################################################################
+
+def masked_where(condition, a, copy=True):
+ """
+ Mask an array where a condition is met.
+
+ Return `a` as an array masked where `condition` is True.
+ Any masked values of `a` or `condition` are also masked in the output.
+
+ Parameters
+ ----------
+ condition : array_like
+ Masking condition. When `condition` tests floating point values for
+ equality, consider using ``masked_values`` instead.
+ a : array_like
+ Array to mask.
+ copy : bool
+ If True (default) make a copy of `a` in the result. If False modify
+ `a` in place and return a view.
+
+ Returns
+ -------
+ result : MaskedArray
+ The result of masking `a` where `condition` is True.
+
+ See Also
+ --------
+ masked_values : Mask using floating point equality.
+ masked_equal : Mask where equal to a given value.
+ masked_not_equal : Mask where `not` equal to a given value.
+ masked_less_equal : Mask where less than or equal to a given value.
+ masked_greater_equal : Mask where greater than or equal to a given value.
+ masked_less : Mask where less than a given value.
+ masked_greater : Mask where greater than a given value.
+ masked_inside : Mask inside a given interval.
+ masked_outside : Mask outside a given interval.
+ masked_invalid : Mask invalid values (NaNs or infs).
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.arange(4)
+ >>> a
+ array([0, 1, 2, 3])
+ >>> ma.masked_where(a <= 2, a)
+ masked_array(data=[--, --, --, 3],
+ mask=[ True, True, True, False],
+ fill_value=999999)
+
+ Mask array `b` conditional on `a`.
+
+ >>> b = ['a', 'b', 'c', 'd']
+ >>> ma.masked_where(a == 2, b)
+ masked_array(data=['a', 'b', --, 'd'],
+ mask=[False, False, True, False],
+ fill_value='N/A',
+ dtype='<U1')
+
+ Effect of the `copy` argument.
+
+ >>> c = ma.masked_where(a <= 2, a)
+ >>> c
+ masked_array(data=[--, --, --, 3],
+ mask=[ True, True, True, False],
+ fill_value=999999)
+ >>> c[0] = 99
+ >>> c
+ masked_array(data=[99, --, --, 3],
+ mask=[False, True, True, False],
+ fill_value=999999)
+ >>> a
+ array([0, 1, 2, 3])
+ >>> c = ma.masked_where(a <= 2, a, copy=False)
+ >>> c[0] = 99
+ >>> c
+ masked_array(data=[99, --, --, 3],
+ mask=[False, True, True, False],
+ fill_value=999999)
+ >>> a
+ array([99, 1, 2, 3])
+
+ When `condition` or `a` contain masked values.
+
+ >>> a = np.arange(4)
+ >>> a = ma.masked_where(a == 2, a)
+ >>> a
+ masked_array(data=[0, 1, --, 3],
+ mask=[False, False, True, False],
+ fill_value=999999)
+ >>> b = np.arange(4)
+ >>> b = ma.masked_where(b == 0, b)
+ >>> b
+ masked_array(data=[--, 1, 2, 3],
+ mask=[ True, False, False, False],
+ fill_value=999999)
+ >>> ma.masked_where(a == 3, b)
+ masked_array(data=[--, 1, --, --],
+ mask=[ True, False, True, True],
+ fill_value=999999)
+
+ """
+ # Make sure that condition is a valid standard-type mask.
+ cond = make_mask(condition, shrink=False)
+ a = np.array(a, copy=copy, subok=True)
+
+ (cshape, ashape) = (cond.shape, a.shape)
+ if cshape and cshape != ashape:
+ raise IndexError("Inconsistent shape between the condition and the input"
+ " (got %s and %s)" % (cshape, ashape))
+ if hasattr(a, '_mask'):
+ cond = mask_or(cond, a._mask)
+ cls = type(a)
+ else:
+ cls = MaskedArray
+ result = a.view(cls)
+ # Assign to *.mask so that structured masks are handled correctly.
+ result.mask = _shrink_mask(cond)
+ # There is no view of a boolean so when 'a' is a MaskedArray with nomask
+ # the update to the result's mask has no effect.
+ if not copy and hasattr(a, '_mask') and getmask(a) is nomask:
+ a._mask = result._mask.view()
+ return result
+
+
+def masked_greater(x, value, copy=True):
+ """
+ Mask an array where greater than a given value.
+
+ This function is a shortcut to ``masked_where``, with
+ `condition` = (x > value).
+
+ See Also
+ --------
+ masked_where : Mask where a condition is met.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.arange(4)
+ >>> a
+ array([0, 1, 2, 3])
+ >>> ma.masked_greater(a, 2)
+ masked_array(data=[0, 1, 2, --],
+ mask=[False, False, False, True],
+ fill_value=999999)
+
+ """
+ return masked_where(greater(x, value), x, copy=copy)
+
+
+def masked_greater_equal(x, value, copy=True):
+ """
+ Mask an array where greater than or equal to a given value.
+
+ This function is a shortcut to ``masked_where``, with
+ `condition` = (x >= value).
+
+ See Also
+ --------
+ masked_where : Mask where a condition is met.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.arange(4)
+ >>> a
+ array([0, 1, 2, 3])
+ >>> ma.masked_greater_equal(a, 2)
+ masked_array(data=[0, 1, --, --],
+ mask=[False, False, True, True],
+ fill_value=999999)
+
+ """
+ return masked_where(greater_equal(x, value), x, copy=copy)
+
+
+def masked_less(x, value, copy=True):
+ """
+ Mask an array where less than a given value.
+
+ This function is a shortcut to ``masked_where``, with
+ `condition` = (x < value).
+
+ See Also
+ --------
+ masked_where : Mask where a condition is met.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.arange(4)
+ >>> a
+ array([0, 1, 2, 3])
+ >>> ma.masked_less(a, 2)
+ masked_array(data=[--, --, 2, 3],
+ mask=[ True, True, False, False],
+ fill_value=999999)
+
+ """
+ return masked_where(less(x, value), x, copy=copy)
+
+
+def masked_less_equal(x, value, copy=True):
+ """
+ Mask an array where less than or equal to a given value.
+
+ This function is a shortcut to ``masked_where``, with
+ `condition` = (x <= value).
+
+ See Also
+ --------
+ masked_where : Mask where a condition is met.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.arange(4)
+ >>> a
+ array([0, 1, 2, 3])
+ >>> ma.masked_less_equal(a, 2)
+ masked_array(data=[--, --, --, 3],
+ mask=[ True, True, True, False],
+ fill_value=999999)
+
+ """
+ return masked_where(less_equal(x, value), x, copy=copy)
+
+
+def masked_not_equal(x, value, copy=True):
+ """
+ Mask an array where `not` equal to a given value.
+
+ This function is a shortcut to ``masked_where``, with
+ `condition` = (x != value).
+
+ See Also
+ --------
+ masked_where : Mask where a condition is met.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.arange(4)
+ >>> a
+ array([0, 1, 2, 3])
+ >>> ma.masked_not_equal(a, 2)
+ masked_array(data=[--, --, 2, --],
+ mask=[ True, True, False, True],
+ fill_value=999999)
+
+ """
+ return masked_where(not_equal(x, value), x, copy=copy)
+
+
+def masked_equal(x, value, copy=True):
+ """
+ Mask an array where equal to a given value.
+
+ Return a MaskedArray, masked where the data in array `x` are
+ equal to `value`. The fill_value of the returned MaskedArray
+ is set to `value`.
+
+ For floating point arrays, consider using ``masked_values(x, value)``.
+
+ See Also
+ --------
+ masked_where : Mask where a condition is met.
+ masked_values : Mask using floating point equality.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.arange(4)
+ >>> a
+ array([0, 1, 2, 3])
+ >>> ma.masked_equal(a, 2)
+ masked_array(data=[0, 1, --, 3],
+ mask=[False, False, True, False],
+ fill_value=2)
+
+ """
+ output = masked_where(equal(x, value), x, copy=copy)
+ output.fill_value = value
+ return output
+
+
+def masked_inside(x, v1, v2, copy=True):
+ """
+ Mask an array inside a given interval.
+
+ Shortcut to ``masked_where``, where `condition` is True for `x` inside
+ the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2`
+ can be given in either order.
+
+ See Also
+ --------
+ masked_where : Mask where a condition is met.
+
+ Notes
+ -----
+ The array `x` is prefilled with its filling value.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
+ >>> ma.masked_inside(x, -0.3, 0.3)
+ masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1],
+ mask=[False, False, True, True, False, False],
+ fill_value=1e+20)
+
+ The order of `v1` and `v2` doesn't matter.
+
+ >>> ma.masked_inside(x, 0.3, -0.3)
+ masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1],
+ mask=[False, False, True, True, False, False],
+ fill_value=1e+20)
+
+ """
+ if v2 < v1:
+ (v1, v2) = (v2, v1)
+ xf = filled(x)
+ condition = (xf >= v1) & (xf <= v2)
+ return masked_where(condition, x, copy=copy)
+
+
+def masked_outside(x, v1, v2, copy=True):
+ """
+ Mask an array outside a given interval.
+
+ Shortcut to ``masked_where``, where `condition` is True for `x` outside
+ the interval [v1,v2] (x < v1)|(x > v2).
+ The boundaries `v1` and `v2` can be given in either order.
+
+ See Also
+ --------
+ masked_where : Mask where a condition is met.
+
+ Notes
+ -----
+ The array `x` is prefilled with its filling value.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
+ >>> ma.masked_outside(x, -0.3, 0.3)
+ masked_array(data=[--, --, 0.01, 0.2, --, --],
+ mask=[ True, True, False, False, True, True],
+ fill_value=1e+20)
+
+ The order of `v1` and `v2` doesn't matter.
+
+ >>> ma.masked_outside(x, 0.3, -0.3)
+ masked_array(data=[--, --, 0.01, 0.2, --, --],
+ mask=[ True, True, False, False, True, True],
+ fill_value=1e+20)
+
+ """
+ if v2 < v1:
+ (v1, v2) = (v2, v1)
+ xf = filled(x)
+ condition = (xf < v1) | (xf > v2)
+ return masked_where(condition, x, copy=copy)
+
+
+def masked_object(x, value, copy=True, shrink=True):
+ """
+ Mask the array `x` where the data are exactly equal to value.
+
+ This function is similar to `masked_values`, but only suitable
+ for object arrays: for floating point, use `masked_values` instead.
+
+ Parameters
+ ----------
+ x : array_like
+ Array to mask
+ value : object
+ Comparison value
+ copy : {True, False}, optional
+ Whether to return a copy of `x`.
+ shrink : {True, False}, optional
+ Whether to collapse a mask full of False to nomask
+
+ Returns
+ -------
+ result : MaskedArray
+ The result of masking `x` where equal to `value`.
+
+ See Also
+ --------
+ masked_where : Mask where a condition is met.
+ masked_equal : Mask where equal to a given value (integers).
+ masked_values : Mask using floating point equality.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> food = np.array(['green_eggs', 'ham'], dtype=object)
+ >>> # don't eat spoiled food
+ >>> eat = ma.masked_object(food, 'green_eggs')
+ >>> eat
+ masked_array(data=[--, 'ham'],
+ mask=[ True, False],
+ fill_value='green_eggs',
+ dtype=object)
+ >>> # plain ol` ham is boring
+ >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
+ >>> eat = ma.masked_object(fresh_food, 'green_eggs')
+ >>> eat
+ masked_array(data=['cheese', 'ham', 'pineapple'],
+ mask=False,
+ fill_value='green_eggs',
+ dtype=object)
+
+ Note that `mask` is set to ``nomask`` if possible.
+
+ >>> eat
+ masked_array(data=['cheese', 'ham', 'pineapple'],
+ mask=False,
+ fill_value='green_eggs',
+ dtype=object)
+
+ """
+ if isMaskedArray(x):
+ condition = umath.equal(x._data, value)
+ mask = x._mask
+ else:
+ condition = umath.equal(np.asarray(x), value)
+ mask = nomask
+ mask = mask_or(mask, make_mask(condition, shrink=shrink))
+ return masked_array(x, mask=mask, copy=copy, fill_value=value)
+
+
+def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
+ """
+ Mask using floating point equality.
+
+ Return a MaskedArray, masked where the data in array `x` are approximately
+ equal to `value`, determined using `isclose`. The default tolerances for
+ `masked_values` are the same as those for `isclose`.
+
+ For integer types, exact equality is used, in the same way as
+ `masked_equal`.
+
+ The fill_value is set to `value` and the mask is set to ``nomask`` if
+ possible.
+
+ Parameters
+ ----------
+ x : array_like
+ Array to mask.
+ value : float
+ Masking value.
+ rtol, atol : float, optional
+ Tolerance parameters passed on to `isclose`
+ copy : bool, optional
+ Whether to return a copy of `x`.
+ shrink : bool, optional
+ Whether to collapse a mask full of False to ``nomask``.
+
+ Returns
+ -------
+ result : MaskedArray
+ The result of masking `x` where approximately equal to `value`.
+
+ See Also
+ --------
+ masked_where : Mask where a condition is met.
+ masked_equal : Mask where equal to a given value (integers).
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> x = np.array([1, 1.1, 2, 1.1, 3])
+ >>> ma.masked_values(x, 1.1)
+ masked_array(data=[1.0, --, 2.0, --, 3.0],
+ mask=[False, True, False, True, False],
+ fill_value=1.1)
+
+ Note that `mask` is set to ``nomask`` if possible.
+
+ >>> ma.masked_values(x, 2.1)
+ masked_array(data=[1. , 1.1, 2. , 1.1, 3. ],
+ mask=False,
+ fill_value=2.1)
+
+ Unlike `masked_equal`, `masked_values` can perform approximate equalities.
+
+ >>> ma.masked_values(x, 2.1, atol=1e-1)
+ masked_array(data=[1.0, 1.1, --, 1.1, 3.0],
+ mask=[False, False, True, False, False],
+ fill_value=2.1)
+
+ """
+ xnew = filled(x, value)
+ if np.issubdtype(xnew.dtype, np.floating):
+ mask = np.isclose(xnew, value, atol=atol, rtol=rtol)
+ else:
+ mask = umath.equal(xnew, value)
+ ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value)
+ if shrink:
+ ret.shrink_mask()
+ return ret
+
+
+def masked_invalid(a, copy=True):
+ """
+ Mask an array where invalid values occur (NaNs or infs).
+
+ This function is a shortcut to ``masked_where``, with
+ `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved.
+ Only applies to arrays with a dtype where NaNs or infs make sense
+ (i.e. floating point types), but accepts any array_like object.
+
+ See Also
+ --------
+ masked_where : Mask where a condition is met.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.arange(5, dtype=float)
+ >>> a[2] = np.NaN
+ >>> a[3] = np.PINF
+ >>> a
+ array([ 0., 1., nan, inf, 4.])
+ >>> ma.masked_invalid(a)
+ masked_array(data=[0.0, 1.0, --, --, 4.0],
+ mask=[False, False, True, True, False],
+ fill_value=1e+20)
+
+ """
+ a = np.array(a, copy=False, subok=True)
+ res = masked_where(~(np.isfinite(a)), a, copy=copy)
+ # masked_invalid previously never returned nomask as a mask and doing so
+ # threw off matplotlib (gh-22842). So use shrink=False:
+ if res._mask is nomask:
+ res._mask = make_mask_none(res.shape, res.dtype)
+ return res
+
+###############################################################################
+# Printing options #
+###############################################################################
+
+
+class _MaskedPrintOption:
+ """
+ Handle the string used to represent missing data in a masked array.
+
+ """
+
+ def __init__(self, display):
+ """
+ Create the masked_print_option object.
+
+ """
+ self._display = display
+ self._enabled = True
+
+ def display(self):
+ """
+ Display the string to print for masked values.
+
+ """
+ return self._display
+
+ def set_display(self, s):
+ """
+ Set the string to print for masked values.
+
+ """
+ self._display = s
+
+ def enabled(self):
+ """
+ Is the use of the display value enabled?
+
+ """
+ return self._enabled
+
+ def enable(self, shrink=1):
+ """
+ Set the enabling shrink to `shrink`.
+
+ """
+ self._enabled = shrink
+
+ def __str__(self):
+ return str(self._display)
+
+ __repr__ = __str__
+
+# if you single index into a masked location you get this object.
+masked_print_option = _MaskedPrintOption('--')
+
+
+def _recursive_printoption(result, mask, printopt):
+ """
+ Puts printoptions in result where mask is True.
+
+ Private function allowing for recursion
+
+ """
+ names = result.dtype.names
+ if names is not None:
+ for name in names:
+ curdata = result[name]
+ curmask = mask[name]
+ _recursive_printoption(curdata, curmask, printopt)
+ else:
+ np.copyto(result, printopt, where=mask)
+ return
+
+# For better or worse, these end in a newline
+_legacy_print_templates = dict(
+ long_std=textwrap.dedent("""\
+ masked_%(name)s(data =
+ %(data)s,
+ %(nlen)s mask =
+ %(mask)s,
+ %(nlen)s fill_value = %(fill)s)
+ """),
+ long_flx=textwrap.dedent("""\
+ masked_%(name)s(data =
+ %(data)s,
+ %(nlen)s mask =
+ %(mask)s,
+ %(nlen)s fill_value = %(fill)s,
+ %(nlen)s dtype = %(dtype)s)
+ """),
+ short_std=textwrap.dedent("""\
+ masked_%(name)s(data = %(data)s,
+ %(nlen)s mask = %(mask)s,
+ %(nlen)s fill_value = %(fill)s)
+ """),
+ short_flx=textwrap.dedent("""\
+ masked_%(name)s(data = %(data)s,
+ %(nlen)s mask = %(mask)s,
+ %(nlen)s fill_value = %(fill)s,
+ %(nlen)s dtype = %(dtype)s)
+ """)
+)
+
+###############################################################################
+# MaskedArray class #
+###############################################################################
+
+
+def _recursive_filled(a, mask, fill_value):
+ """
+ Recursively fill `a` with `fill_value`.
+
+ """
+ names = a.dtype.names
+ for name in names:
+ current = a[name]
+ if current.dtype.names is not None:
+ _recursive_filled(current, mask[name], fill_value[name])
+ else:
+ np.copyto(current, fill_value[name], where=mask[name])
+
+
+def flatten_structured_array(a):
+ """
+ Flatten a structured array.
+
+ The data type of the output is chosen such that it can represent all of the
+ (nested) fields.
+
+ Parameters
+ ----------
+ a : structured array
+
+ Returns
+ -------
+ output : masked array or ndarray
+ A flattened masked array if the input is a masked array, otherwise a
+ standard ndarray.
+
+ Examples
+ --------
+ >>> ndtype = [('a', int), ('b', float)]
+ >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
+ >>> np.ma.flatten_structured_array(a)
+ array([[1., 1.],
+ [2., 2.]])
+
+ """
+
+ def flatten_sequence(iterable):
+ """
+ Flattens a compound of nested iterables.
+
+ """
+ for elm in iter(iterable):
+ if hasattr(elm, '__iter__'):
+ yield from flatten_sequence(elm)
+ else:
+ yield elm
+
+ a = np.asanyarray(a)
+ inishape = a.shape
+ a = a.ravel()
+ if isinstance(a, MaskedArray):
+ out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])
+ out = out.view(MaskedArray)
+ out._mask = np.array([tuple(flatten_sequence(d.item()))
+ for d in getmaskarray(a)])
+ else:
+ out = np.array([tuple(flatten_sequence(d.item())) for d in a])
+ if len(inishape) > 1:
+ newshape = list(out.shape)
+ newshape[0] = inishape
+ out.shape = tuple(flatten_sequence(newshape))
+ return out
+
+
+def _arraymethod(funcname, onmask=True):
+ """
+ Return a class method wrapper around a basic array method.
+
+ Creates a class method which returns a masked array, where the new
+ ``_data`` array is the output of the corresponding basic method called
+ on the original ``_data``.
+
+ If `onmask` is True, the new mask is the output of the method called
+ on the initial mask. Otherwise, the new mask is just a reference
+ to the initial mask.
+
+ Parameters
+ ----------
+ funcname : str
+ Name of the function to apply on data.
+ onmask : bool
+ Whether the mask must be processed also (True) or left
+ alone (False). Default is True. Make available as `_onmask`
+ attribute.
+
+ Returns
+ -------
+ method : instancemethod
+ Class method wrapper of the specified basic array method.
+
+ """
+ def wrapped_method(self, *args, **params):
+ result = getattr(self._data, funcname)(*args, **params)
+ result = result.view(type(self))
+ result._update_from(self)
+ mask = self._mask
+ if not onmask:
+ result.__setmask__(mask)
+ elif mask is not nomask:
+ # __setmask__ makes a copy, which we don't want
+ result._mask = getattr(mask, funcname)(*args, **params)
+ return result
+ methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None)
+ if methdoc is not None:
+ wrapped_method.__doc__ = methdoc.__doc__
+ wrapped_method.__name__ = funcname
+ return wrapped_method
+
+
+class MaskedIterator:
+ """
+ Flat iterator object to iterate over masked arrays.
+
+ A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array
+ `x`. It allows iterating over the array as if it were a 1-D array,
+ either in a for-loop or by calling its `next` method.
+
+ Iteration is done in C-contiguous style, with the last index varying the
+ fastest. The iterator can also be indexed using basic slicing or
+ advanced indexing.
+
+ See Also
+ --------
+ MaskedArray.flat : Return a flat iterator over an array.
+ MaskedArray.flatten : Returns a flattened copy of an array.
+
+ Notes
+ -----
+ `MaskedIterator` is not exported by the `ma` module. Instead of
+ instantiating a `MaskedIterator` directly, use `MaskedArray.flat`.
+
+ Examples
+ --------
+ >>> x = np.ma.array(arange(6).reshape(2, 3))
+ >>> fl = x.flat
+ >>> type(fl)
+ <class 'numpy.ma.core.MaskedIterator'>
+ >>> for item in fl:
+ ... print(item)
+ ...
+ 0
+ 1
+ 2
+ 3
+ 4
+ 5
+
+ Extracting more than a single element b indexing the `MaskedIterator`
+ returns a masked array:
+
+ >>> fl[2:4]
+ masked_array(data = [2 3],
+ mask = False,
+ fill_value = 999999)
+
+ """
+
+ def __init__(self, ma):
+ self.ma = ma
+ self.dataiter = ma._data.flat
+
+ if ma._mask is nomask:
+ self.maskiter = None
+ else:
+ self.maskiter = ma._mask.flat
+
+ def __iter__(self):
+ return self
+
+ def __getitem__(self, indx):
+ result = self.dataiter.__getitem__(indx).view(type(self.ma))
+ if self.maskiter is not None:
+ _mask = self.maskiter.__getitem__(indx)
+ if isinstance(_mask, ndarray):
+ # set shape to match that of data; this is needed for matrices
+ _mask.shape = result.shape
+ result._mask = _mask
+ elif isinstance(_mask, np.void):
+ return mvoid(result, mask=_mask, hardmask=self.ma._hardmask)
+ elif _mask: # Just a scalar, masked
+ return masked
+ return result
+
+ # This won't work if ravel makes a copy
+ def __setitem__(self, index, value):
+ self.dataiter[index] = getdata(value)
+ if self.maskiter is not None:
+ self.maskiter[index] = getmaskarray(value)
+
+ def __next__(self):
+ """
+ Return the next value, or raise StopIteration.
+
+ Examples
+ --------
+ >>> x = np.ma.array([3, 2], mask=[0, 1])
+ >>> fl = x.flat
+ >>> next(fl)
+ 3
+ >>> next(fl)
+ masked
+ >>> next(fl)
+ Traceback (most recent call last):
+ ...
+ StopIteration
+
+ """
+ d = next(self.dataiter)
+ if self.maskiter is not None:
+ m = next(self.maskiter)
+ if isinstance(m, np.void):
+ return mvoid(d, mask=m, hardmask=self.ma._hardmask)
+ elif m: # Just a scalar, masked
+ return masked
+ return d
+
+
+class MaskedArray(ndarray):
+ """
+ An array class with possibly masked values.
+
+ Masked values of True exclude the corresponding element from any
+ computation.
+
+ Construction::
+
+ x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True,
+ ndmin=0, fill_value=None, keep_mask=True, hard_mask=None,
+ shrink=True, order=None)
+
+ Parameters
+ ----------
+ data : array_like
+ Input data.
+ mask : sequence, optional
+ Mask. Must be convertible to an array of booleans with the same
+ shape as `data`. True indicates a masked (i.e. invalid) data.
+ dtype : dtype, optional
+ Data type of the output.
+ If `dtype` is None, the type of the data argument (``data.dtype``)
+ is used. If `dtype` is not None and different from ``data.dtype``,
+ a copy is performed.
+ copy : bool, optional
+ Whether to copy the input data (True), or to use a reference instead.
+ Default is False.
+ subok : bool, optional
+ Whether to return a subclass of `MaskedArray` if possible (True) or a
+ plain `MaskedArray`. Default is True.
+ ndmin : int, optional
+ Minimum number of dimensions. Default is 0.
+ fill_value : scalar, optional
+ Value used to fill in the masked values when necessary.
+ If None, a default based on the data-type is used.
+ keep_mask : bool, optional
+ Whether to combine `mask` with the mask of the input data, if any
+ (True), or to use only `mask` for the output (False). Default is True.
+ hard_mask : bool, optional
+ Whether to use a hard mask or not. With a hard mask, masked values
+ cannot be unmasked. Default is False.
+ shrink : bool, optional
+ Whether to force compression of an empty mask. Default is True.
+ order : {'C', 'F', 'A'}, optional
+ Specify the order of the array. If order is 'C', then the array
+ will be in C-contiguous order (last-index varies the fastest).
+ If order is 'F', then the returned array will be in
+ Fortran-contiguous order (first-index varies the fastest).
+ If order is 'A' (default), then the returned array may be
+ in any order (either C-, Fortran-contiguous, or even discontiguous),
+ unless a copy is required, in which case it will be C-contiguous.
+
+ Examples
+ --------
+
+ The ``mask`` can be initialized with an array of boolean values
+ with the same shape as ``data``.
+
+ >>> data = np.arange(6).reshape((2, 3))
+ >>> np.ma.MaskedArray(data, mask=[[False, True, False],
+ ... [False, False, True]])
+ masked_array(
+ data=[[0, --, 2],
+ [3, 4, --]],
+ mask=[[False, True, False],
+ [False, False, True]],
+ fill_value=999999)
+
+ Alternatively, the ``mask`` can be initialized to homogeneous boolean
+ array with the same shape as ``data`` by passing in a scalar
+ boolean value:
+
+ >>> np.ma.MaskedArray(data, mask=False)
+ masked_array(
+ data=[[0, 1, 2],
+ [3, 4, 5]],
+ mask=[[False, False, False],
+ [False, False, False]],
+ fill_value=999999)
+
+ >>> np.ma.MaskedArray(data, mask=True)
+ masked_array(
+ data=[[--, --, --],
+ [--, --, --]],
+ mask=[[ True, True, True],
+ [ True, True, True]],
+ fill_value=999999,
+ dtype=int64)
+
+ .. note::
+ The recommended practice for initializing ``mask`` with a scalar
+ boolean value is to use ``True``/``False`` rather than
+ ``np.True_``/``np.False_``. The reason is :attr:`nomask`
+ is represented internally as ``np.False_``.
+
+ >>> np.False_ is np.ma.nomask
+ True
+
+ """
+
+ __array_priority__ = 15
+ _defaultmask = nomask
+ _defaulthardmask = False
+ _baseclass = ndarray
+
+ # Maximum number of elements per axis used when printing an array. The
+ # 1d case is handled separately because we need more values in this case.
+ _print_width = 100
+ _print_width_1d = 1500
+
+ def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,
+ subok=True, ndmin=0, fill_value=None, keep_mask=True,
+ hard_mask=None, shrink=True, order=None):
+ """
+ Create a new masked array from scratch.
+
+ Notes
+ -----
+ A masked array can also be created by taking a .view(MaskedArray).
+
+ """
+ # Process data.
+ _data = np.array(data, dtype=dtype, copy=copy,
+ order=order, subok=True, ndmin=ndmin)
+ _baseclass = getattr(data, '_baseclass', type(_data))
+ # Check that we're not erasing the mask.
+ if isinstance(data, MaskedArray) and (data.shape != _data.shape):
+ copy = True
+
+ # Here, we copy the _view_, so that we can attach new properties to it
+ # we must never do .view(MaskedConstant), as that would create a new
+ # instance of np.ma.masked, which make identity comparison fail
+ if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant):
+ _data = ndarray.view(_data, type(data))
+ else:
+ _data = ndarray.view(_data, cls)
+
+ # Handle the case where data is not a subclass of ndarray, but
+ # still has the _mask attribute like MaskedArrays
+ if hasattr(data, '_mask') and not isinstance(data, ndarray):
+ _data._mask = data._mask
+ # FIXME: should we set `_data._sharedmask = True`?
+ # Process mask.
+ # Type of the mask
+ mdtype = make_mask_descr(_data.dtype)
+
+ if mask is nomask:
+ # Case 1. : no mask in input.
+ # Erase the current mask ?
+ if not keep_mask:
+ # With a reduced version
+ if shrink:
+ _data._mask = nomask
+ # With full version
+ else:
+ _data._mask = np.zeros(_data.shape, dtype=mdtype)
+ # Check whether we missed something
+ elif isinstance(data, (tuple, list)):
+ try:
+ # If data is a sequence of masked array
+ mask = np.array(
+ [getmaskarray(np.asanyarray(m, dtype=_data.dtype))
+ for m in data], dtype=mdtype)
+ except ValueError:
+ # If data is nested
+ mask = nomask
+ # Force shrinking of the mask if needed (and possible)
+ if (mdtype == MaskType) and mask.any():
+ _data._mask = mask
+ _data._sharedmask = False
+ else:
+ _data._sharedmask = not copy
+ if copy:
+ _data._mask = _data._mask.copy()
+ # Reset the shape of the original mask
+ if getmask(data) is not nomask:
+ data._mask.shape = data.shape
+ else:
+ # Case 2. : With a mask in input.
+ # If mask is boolean, create an array of True or False
+ if mask is True and mdtype == MaskType:
+ mask = np.ones(_data.shape, dtype=mdtype)
+ elif mask is False and mdtype == MaskType:
+ mask = np.zeros(_data.shape, dtype=mdtype)
+ else:
+ # Read the mask with the current mdtype
+ try:
+ mask = np.array(mask, copy=copy, dtype=mdtype)
+ # Or assume it's a sequence of bool/int
+ except TypeError:
+ mask = np.array([tuple([m] * len(mdtype)) for m in mask],
+ dtype=mdtype)
+ # Make sure the mask and the data have the same shape
+ if mask.shape != _data.shape:
+ (nd, nm) = (_data.size, mask.size)
+ if nm == 1:
+ mask = np.resize(mask, _data.shape)
+ elif nm == nd:
+ mask = np.reshape(mask, _data.shape)
+ else:
+ msg = "Mask and data not compatible: data size is %i, " + \
+ "mask size is %i."
+ raise MaskError(msg % (nd, nm))
+ copy = True
+ # Set the mask to the new value
+ if _data._mask is nomask:
+ _data._mask = mask
+ _data._sharedmask = not copy
+ else:
+ if not keep_mask:
+ _data._mask = mask
+ _data._sharedmask = not copy
+ else:
+ if _data.dtype.names is not None:
+ def _recursive_or(a, b):
+ "do a|=b on each field of a, recursively"
+ for name in a.dtype.names:
+ (af, bf) = (a[name], b[name])
+ if af.dtype.names is not None:
+ _recursive_or(af, bf)
+ else:
+ af |= bf
+
+ _recursive_or(_data._mask, mask)
+ else:
+ _data._mask = np.logical_or(mask, _data._mask)
+ _data._sharedmask = False
+ # Update fill_value.
+ if fill_value is None:
+ fill_value = getattr(data, '_fill_value', None)
+ # But don't run the check unless we have something to check.
+ if fill_value is not None:
+ _data._fill_value = _check_fill_value(fill_value, _data.dtype)
+ # Process extra options ..
+ if hard_mask is None:
+ _data._hardmask = getattr(data, '_hardmask', False)
+ else:
+ _data._hardmask = hard_mask
+ _data._baseclass = _baseclass
+ return _data
+
+
+ def _update_from(self, obj):
+ """
+ Copies some attributes of obj to self.
+
+ """
+ if isinstance(obj, ndarray):
+ _baseclass = type(obj)
+ else:
+ _baseclass = ndarray
+ # We need to copy the _basedict to avoid backward propagation
+ _optinfo = {}
+ _optinfo.update(getattr(obj, '_optinfo', {}))
+ _optinfo.update(getattr(obj, '_basedict', {}))
+ if not isinstance(obj, MaskedArray):
+ _optinfo.update(getattr(obj, '__dict__', {}))
+ _dict = dict(_fill_value=getattr(obj, '_fill_value', None),
+ _hardmask=getattr(obj, '_hardmask', False),
+ _sharedmask=getattr(obj, '_sharedmask', False),
+ _isfield=getattr(obj, '_isfield', False),
+ _baseclass=getattr(obj, '_baseclass', _baseclass),
+ _optinfo=_optinfo,
+ _basedict=_optinfo)
+ self.__dict__.update(_dict)
+ self.__dict__.update(_optinfo)
+ return
+
+ def __array_finalize__(self, obj):
+ """
+ Finalizes the masked array.
+
+ """
+ # Get main attributes.
+ self._update_from(obj)
+
+ # We have to decide how to initialize self.mask, based on
+ # obj.mask. This is very difficult. There might be some
+ # correspondence between the elements in the array we are being
+ # created from (= obj) and us. Or there might not. This method can
+ # be called in all kinds of places for all kinds of reasons -- could
+ # be empty_like, could be slicing, could be a ufunc, could be a view.
+ # The numpy subclassing interface simply doesn't give us any way
+ # to know, which means that at best this method will be based on
+ # guesswork and heuristics. To make things worse, there isn't even any
+ # clear consensus about what the desired behavior is. For instance,
+ # most users think that np.empty_like(marr) -- which goes via this
+ # method -- should return a masked array with an empty mask (see
+ # gh-3404 and linked discussions), but others disagree, and they have
+ # existing code which depends on empty_like returning an array that
+ # matches the input mask.
+ #
+ # Historically our algorithm was: if the template object mask had the
+ # same *number of elements* as us, then we used *it's mask object
+ # itself* as our mask, so that writes to us would also write to the
+ # original array. This is horribly broken in multiple ways.
+ #
+ # Now what we do instead is, if the template object mask has the same
+ # number of elements as us, and we do not have the same base pointer
+ # as the template object (b/c views like arr[...] should keep the same
+ # mask), then we make a copy of the template object mask and use
+ # that. This is also horribly broken but somewhat less so. Maybe.
+ if isinstance(obj, ndarray):
+ # XX: This looks like a bug -- shouldn't it check self.dtype
+ # instead?
+ if obj.dtype.names is not None:
+ _mask = getmaskarray(obj)
+ else:
+ _mask = getmask(obj)
+
+ # If self and obj point to exactly the same data, then probably
+ # self is a simple view of obj (e.g., self = obj[...]), so they
+ # should share the same mask. (This isn't 100% reliable, e.g. self
+ # could be the first row of obj, or have strange strides, but as a
+ # heuristic it's not bad.) In all other cases, we make a copy of
+ # the mask, so that future modifications to 'self' do not end up
+ # side-effecting 'obj' as well.
+ if (_mask is not nomask and obj.__array_interface__["data"][0]
+ != self.__array_interface__["data"][0]):
+ # We should make a copy. But we could get here via astype,
+ # in which case the mask might need a new dtype as well
+ # (e.g., changing to or from a structured dtype), and the
+ # order could have changed. So, change the mask type if
+ # needed and use astype instead of copy.
+ if self.dtype == obj.dtype:
+ _mask_dtype = _mask.dtype
+ else:
+ _mask_dtype = make_mask_descr(self.dtype)
+
+ if self.flags.c_contiguous:
+ order = "C"
+ elif self.flags.f_contiguous:
+ order = "F"
+ else:
+ order = "K"
+
+ _mask = _mask.astype(_mask_dtype, order)
+ else:
+ # Take a view so shape changes, etc., do not propagate back.
+ _mask = _mask.view()
+ else:
+ _mask = nomask
+
+ self._mask = _mask
+ # Finalize the mask
+ if self._mask is not nomask:
+ try:
+ self._mask.shape = self.shape
+ except ValueError:
+ self._mask = nomask
+ except (TypeError, AttributeError):
+ # When _mask.shape is not writable (because it's a void)
+ pass
+
+ # Finalize the fill_value
+ if self._fill_value is not None:
+ self._fill_value = _check_fill_value(self._fill_value, self.dtype)
+ elif self.dtype.names is not None:
+ # Finalize the default fill_value for structured arrays
+ self._fill_value = _check_fill_value(None, self.dtype)
+
+ def __array_wrap__(self, obj, context=None):
+ """
+ Special hook for ufuncs.
+
+ Wraps the numpy array and sets the mask according to context.
+
+ """
+ if obj is self: # for in-place operations
+ result = obj
+ else:
+ result = obj.view(type(self))
+ result._update_from(self)
+
+ if context is not None:
+ result._mask = result._mask.copy()
+ func, args, out_i = context
+ # args sometimes contains outputs (gh-10459), which we don't want
+ input_args = args[:func.nin]
+ m = reduce(mask_or, [getmaskarray(arg) for arg in input_args])
+ # Get the domain mask
+ domain = ufunc_domain.get(func, None)
+ if domain is not None:
+ # Take the domain, and make sure it's a ndarray
+ with np.errstate(divide='ignore', invalid='ignore'):
+ d = filled(domain(*input_args), True)
+
+ if d.any():
+ # Fill the result where the domain is wrong
+ try:
+ # Binary domain: take the last value
+ fill_value = ufunc_fills[func][-1]
+ except TypeError:
+ # Unary domain: just use this one
+ fill_value = ufunc_fills[func]
+ except KeyError:
+ # Domain not recognized, use fill_value instead
+ fill_value = self.fill_value
+
+ np.copyto(result, fill_value, where=d)
+
+ # Update the mask
+ if m is nomask:
+ m = d
+ else:
+ # Don't modify inplace, we risk back-propagation
+ m = (m | d)
+
+ # Make sure the mask has the proper size
+ if result is not self and result.shape == () and m:
+ return masked
+ else:
+ result._mask = m
+ result._sharedmask = False
+
+ return result
+
+ def view(self, dtype=None, type=None, fill_value=None):
+ """
+ Return a view of the MaskedArray data.
+
+ Parameters
+ ----------
+ dtype : data-type or ndarray sub-class, optional
+ Data-type descriptor of the returned view, e.g., float32 or int16.
+ The default, None, results in the view having the same data-type
+ as `a`. As with ``ndarray.view``, dtype can also be specified as
+ an ndarray sub-class, which then specifies the type of the
+ returned object (this is equivalent to setting the ``type``
+ parameter).
+ type : Python type, optional
+ Type of the returned view, either ndarray or a subclass. The
+ default None results in type preservation.
+ fill_value : scalar, optional
+ The value to use for invalid entries (None by default).
+ If None, then this argument is inferred from the passed `dtype`, or
+ in its absence the original array, as discussed in the notes below.
+
+ See Also
+ --------
+ numpy.ndarray.view : Equivalent method on ndarray object.
+
+ Notes
+ -----
+
+ ``a.view()`` is used two different ways:
+
+ ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
+ of the array's memory with a different data-type. This can cause a
+ reinterpretation of the bytes of memory.
+
+ ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
+ returns an instance of `ndarray_subclass` that looks at the same array
+ (same shape, dtype, etc.) This does not cause a reinterpretation of the
+ memory.
+
+ If `fill_value` is not specified, but `dtype` is specified (and is not
+ an ndarray sub-class), the `fill_value` of the MaskedArray will be
+ reset. If neither `fill_value` nor `dtype` are specified (or if
+ `dtype` is an ndarray sub-class), then the fill value is preserved.
+ Finally, if `fill_value` is specified, but `dtype` is not, the fill
+ value is set to the specified value.
+
+ For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
+ bytes per entry than the previous dtype (for example, converting a
+ regular array to a structured array), then the behavior of the view
+ cannot be predicted just from the superficial appearance of ``a`` (shown
+ by ``print(a)``). It also depends on exactly how ``a`` is stored in
+ memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
+ defined as a slice or transpose, etc., the view may give different
+ results.
+ """
+
+ if dtype is None:
+ if type is None:
+ output = ndarray.view(self)
+ else:
+ output = ndarray.view(self, type)
+ elif type is None:
+ try:
+ if issubclass(dtype, ndarray):
+ output = ndarray.view(self, dtype)
+ dtype = None
+ else:
+ output = ndarray.view(self, dtype)
+ except TypeError:
+ output = ndarray.view(self, dtype)
+ else:
+ output = ndarray.view(self, dtype, type)
+
+ # also make the mask be a view (so attr changes to the view's
+ # mask do no affect original object's mask)
+ # (especially important to avoid affecting np.masked singleton)
+ if getmask(output) is not nomask:
+ output._mask = output._mask.view()
+
+ # Make sure to reset the _fill_value if needed
+ if getattr(output, '_fill_value', None) is not None:
+ if fill_value is None:
+ if dtype is None:
+ pass # leave _fill_value as is
+ else:
+ output._fill_value = None
+ else:
+ output.fill_value = fill_value
+ return output
+
+ def __getitem__(self, indx):
+ """
+ x.__getitem__(y) <==> x[y]
+
+ Return the item described by i, as a masked array.
+
+ """
+ # We could directly use ndarray.__getitem__ on self.
+ # But then we would have to modify __array_finalize__ to prevent the
+ # mask of being reshaped if it hasn't been set up properly yet
+ # So it's easier to stick to the current version
+ dout = self.data[indx]
+ _mask = self._mask
+
+ def _is_scalar(m):
+ return not isinstance(m, np.ndarray)
+
+ def _scalar_heuristic(arr, elem):
+ """
+ Return whether `elem` is a scalar result of indexing `arr`, or None
+ if undecidable without promoting nomask to a full mask
+ """
+ # obviously a scalar
+ if not isinstance(elem, np.ndarray):
+ return True
+
+ # object array scalar indexing can return anything
+ elif arr.dtype.type is np.object_:
+ if arr.dtype is not elem.dtype:
+ # elem is an array, but dtypes do not match, so must be
+ # an element
+ return True
+
+ # well-behaved subclass that only returns 0d arrays when
+ # expected - this is not a scalar
+ elif type(arr).__getitem__ == ndarray.__getitem__:
+ return False
+
+ return None
+
+ if _mask is not nomask:
+ # _mask cannot be a subclass, so it tells us whether we should
+ # expect a scalar. It also cannot be of dtype object.
+ mout = _mask[indx]
+ scalar_expected = _is_scalar(mout)
+
+ else:
+ # attempt to apply the heuristic to avoid constructing a full mask
+ mout = nomask
+ scalar_expected = _scalar_heuristic(self.data, dout)
+ if scalar_expected is None:
+ # heuristics have failed
+ # construct a full array, so we can be certain. This is costly.
+ # we could also fall back on ndarray.__getitem__(self.data, indx)
+ scalar_expected = _is_scalar(getmaskarray(self)[indx])
+
+ # Did we extract a single item?
+ if scalar_expected:
+ # A record
+ if isinstance(dout, np.void):
+ # We should always re-cast to mvoid, otherwise users can
+ # change masks on rows that already have masked values, but not
+ # on rows that have no masked values, which is inconsistent.
+ return mvoid(dout, mask=mout, hardmask=self._hardmask)
+
+ # special case introduced in gh-5962
+ elif (self.dtype.type is np.object_ and
+ isinstance(dout, np.ndarray) and
+ dout is not masked):
+ # If masked, turn into a MaskedArray, with everything masked.
+ if mout:
+ return MaskedArray(dout, mask=True)
+ else:
+ return dout
+
+ # Just a scalar
+ else:
+ if mout:
+ return masked
+ else:
+ return dout
+ else:
+ # Force dout to MA
+ dout = dout.view(type(self))
+ # Inherit attributes from self
+ dout._update_from(self)
+ # Check the fill_value
+ if is_string_or_list_of_strings(indx):
+ if self._fill_value is not None:
+ dout._fill_value = self._fill_value[indx]
+
+ # Something like gh-15895 has happened if this check fails.
+ # _fill_value should always be an ndarray.
+ if not isinstance(dout._fill_value, np.ndarray):
+ raise RuntimeError('Internal NumPy error.')
+ # If we're indexing a multidimensional field in a
+ # structured array (such as dtype("(2,)i2,(2,)i1")),
+ # dimensionality goes up (M[field].ndim == M.ndim +
+ # M.dtype[field].ndim). That's fine for
+ # M[field] but problematic for M[field].fill_value
+ # which should have shape () to avoid breaking several
+ # methods. There is no great way out, so set to
+ # first element. See issue #6723.
+ if dout._fill_value.ndim > 0:
+ if not (dout._fill_value ==
+ dout._fill_value.flat[0]).all():
+ warnings.warn(
+ "Upon accessing multidimensional field "
+ f"{indx!s}, need to keep dimensionality "
+ "of fill_value at 0. Discarding "
+ "heterogeneous fill_value and setting "
+ f"all to {dout._fill_value[0]!s}.",
+ stacklevel=2)
+ # Need to use `.flat[0:1].squeeze(...)` instead of just
+ # `.flat[0]` to ensure the result is a 0d array and not
+ # a scalar.
+ dout._fill_value = dout._fill_value.flat[0:1].squeeze(axis=0)
+ dout._isfield = True
+ # Update the mask if needed
+ if mout is not nomask:
+ # set shape to match that of data; this is needed for matrices
+ dout._mask = reshape(mout, dout.shape)
+ dout._sharedmask = True
+ # Note: Don't try to check for m.any(), that'll take too long
+ return dout
+
+ # setitem may put NaNs into integer arrays or occasionally overflow a
+ # float. But this may happen in masked values, so avoid otherwise
+ # correct warnings (as is typical also in masked calculations).
+ @np.errstate(over='ignore', invalid='ignore')
+ def __setitem__(self, indx, value):
+ """
+ x.__setitem__(i, y) <==> x[i]=y
+
+ Set item described by index. If value is masked, masks those
+ locations.
+
+ """
+ if self is masked:
+ raise MaskError('Cannot alter the masked element.')
+ _data = self._data
+ _mask = self._mask
+ if isinstance(indx, str):
+ _data[indx] = value
+ if _mask is nomask:
+ self._mask = _mask = make_mask_none(self.shape, self.dtype)
+ _mask[indx] = getmask(value)
+ return
+
+ _dtype = _data.dtype
+
+ if value is masked:
+ # The mask wasn't set: create a full version.
+ if _mask is nomask:
+ _mask = self._mask = make_mask_none(self.shape, _dtype)
+ # Now, set the mask to its value.
+ if _dtype.names is not None:
+ _mask[indx] = tuple([True] * len(_dtype.names))
+ else:
+ _mask[indx] = True
+ return
+
+ # Get the _data part of the new value
+ dval = getattr(value, '_data', value)
+ # Get the _mask part of the new value
+ mval = getmask(value)
+ if _dtype.names is not None and mval is nomask:
+ mval = tuple([False] * len(_dtype.names))
+ if _mask is nomask:
+ # Set the data, then the mask
+ _data[indx] = dval
+ if mval is not nomask:
+ _mask = self._mask = make_mask_none(self.shape, _dtype)
+ _mask[indx] = mval
+ elif not self._hardmask:
+ # Set the data, then the mask
+ if (isinstance(indx, masked_array) and
+ not isinstance(value, masked_array)):
+ _data[indx.data] = dval
+ else:
+ _data[indx] = dval
+ _mask[indx] = mval
+ elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):
+ indx = indx * umath.logical_not(_mask)
+ _data[indx] = dval
+ else:
+ if _dtype.names is not None:
+ err_msg = "Flexible 'hard' masks are not yet supported."
+ raise NotImplementedError(err_msg)
+ mindx = mask_or(_mask[indx], mval, copy=True)
+ dindx = self._data[indx]
+ if dindx.size > 1:
+ np.copyto(dindx, dval, where=~mindx)
+ elif mindx is nomask:
+ dindx = dval
+ _data[indx] = dindx
+ _mask[indx] = mindx
+ return
+
+ # Define so that we can overwrite the setter.
+ @property
+ def dtype(self):
+ return super().dtype
+
+ @dtype.setter
+ def dtype(self, dtype):
+ super(MaskedArray, type(self)).dtype.__set__(self, dtype)
+ if self._mask is not nomask:
+ self._mask = self._mask.view(make_mask_descr(dtype), ndarray)
+ # Try to reset the shape of the mask (if we don't have a void).
+ # This raises a ValueError if the dtype change won't work.
+ try:
+ self._mask.shape = self.shape
+ except (AttributeError, TypeError):
+ pass
+
+ @property
+ def shape(self):
+ return super().shape
+
+ @shape.setter
+ def shape(self, shape):
+ super(MaskedArray, type(self)).shape.__set__(self, shape)
+ # Cannot use self._mask, since it may not (yet) exist when a
+ # masked matrix sets the shape.
+ if getmask(self) is not nomask:
+ self._mask.shape = self.shape
+
+ def __setmask__(self, mask, copy=False):
+ """
+ Set the mask.
+
+ """
+ idtype = self.dtype
+ current_mask = self._mask
+ if mask is masked:
+ mask = True
+
+ if current_mask is nomask:
+ # Make sure the mask is set
+ # Just don't do anything if there's nothing to do.
+ if mask is nomask:
+ return
+ current_mask = self._mask = make_mask_none(self.shape, idtype)
+
+ if idtype.names is None:
+ # No named fields.
+ # Hardmask: don't unmask the data
+ if self._hardmask:
+ current_mask |= mask
+ # Softmask: set everything to False
+ # If it's obviously a compatible scalar, use a quick update
+ # method.
+ elif isinstance(mask, (int, float, np.bool_, np.number)):
+ current_mask[...] = mask
+ # Otherwise fall back to the slower, general purpose way.
+ else:
+ current_mask.flat = mask
+ else:
+ # Named fields w/
+ mdtype = current_mask.dtype
+ mask = np.array(mask, copy=False)
+ # Mask is a singleton
+ if not mask.ndim:
+ # It's a boolean : make a record
+ if mask.dtype.kind == 'b':
+ mask = np.array(tuple([mask.item()] * len(mdtype)),
+ dtype=mdtype)
+ # It's a record: make sure the dtype is correct
+ else:
+ mask = mask.astype(mdtype)
+ # Mask is a sequence
+ else:
+ # Make sure the new mask is a ndarray with the proper dtype
+ try:
+ mask = np.array(mask, copy=copy, dtype=mdtype)
+ # Or assume it's a sequence of bool/int
+ except TypeError:
+ mask = np.array([tuple([m] * len(mdtype)) for m in mask],
+ dtype=mdtype)
+ # Hardmask: don't unmask the data
+ if self._hardmask:
+ for n in idtype.names:
+ current_mask[n] |= mask[n]
+ # Softmask: set everything to False
+ # If it's obviously a compatible scalar, use a quick update
+ # method.
+ elif isinstance(mask, (int, float, np.bool_, np.number)):
+ current_mask[...] = mask
+ # Otherwise fall back to the slower, general purpose way.
+ else:
+ current_mask.flat = mask
+ # Reshape if needed
+ if current_mask.shape:
+ current_mask.shape = self.shape
+ return
+
+ _set_mask = __setmask__
+
+ @property
+ def mask(self):
+ """ Current mask. """
+
+ # We could try to force a reshape, but that wouldn't work in some
+ # cases.
+ # Return a view so that the dtype and shape cannot be changed in place
+ # This still preserves nomask by identity
+ return self._mask.view()
+
+ @mask.setter
+ def mask(self, value):
+ self.__setmask__(value)
+
+ @property
+ def recordmask(self):
+ """
+ Get or set the mask of the array if it has no named fields. For
+ structured arrays, returns a ndarray of booleans where entries are
+ ``True`` if **all** the fields are masked, ``False`` otherwise:
+
+ >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)],
+ ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)],
+ ... dtype=[('a', int), ('b', int)])
+ >>> x.recordmask
+ array([False, False, True, False, False])
+ """
+
+ _mask = self._mask.view(ndarray)
+ if _mask.dtype.names is None:
+ return _mask
+ return np.all(flatten_structured_array(_mask), axis=-1)
+
+ @recordmask.setter
+ def recordmask(self, mask):
+ raise NotImplementedError("Coming soon: setting the mask per records!")
+
+ def harden_mask(self):
+ """
+ Force the mask to hard, preventing unmasking by assignment.
+
+ Whether the mask of a masked array is hard or soft is determined by
+ its `~ma.MaskedArray.hardmask` property. `harden_mask` sets
+ `~ma.MaskedArray.hardmask` to ``True`` (and returns the modified
+ self).
+
+ See Also
+ --------
+ ma.MaskedArray.hardmask
+ ma.MaskedArray.soften_mask
+
+ """
+ self._hardmask = True
+ return self
+
+ def soften_mask(self):
+ """
+ Force the mask to soft (default), allowing unmasking by assignment.
+
+ Whether the mask of a masked array is hard or soft is determined by
+ its `~ma.MaskedArray.hardmask` property. `soften_mask` sets
+ `~ma.MaskedArray.hardmask` to ``False`` (and returns the modified
+ self).
+
+ See Also
+ --------
+ ma.MaskedArray.hardmask
+ ma.MaskedArray.harden_mask
+
+ """
+ self._hardmask = False
+ return self
+
+ @property
+ def hardmask(self):
+ """
+ Specifies whether values can be unmasked through assignments.
+
+ By default, assigning definite values to masked array entries will
+ unmask them. When `hardmask` is ``True``, the mask will not change
+ through assignments.
+
+ See Also
+ --------
+ ma.MaskedArray.harden_mask
+ ma.MaskedArray.soften_mask
+
+ Examples
+ --------
+ >>> x = np.arange(10)
+ >>> m = np.ma.masked_array(x, x>5)
+ >>> assert not m.hardmask
+
+ Since `m` has a soft mask, assigning an element value unmasks that
+ element:
+
+ >>> m[8] = 42
+ >>> m
+ masked_array(data=[0, 1, 2, 3, 4, 5, --, --, 42, --],
+ mask=[False, False, False, False, False, False,
+ True, True, False, True],
+ fill_value=999999)
+
+ After hardening, the mask is not affected by assignments:
+
+ >>> hardened = np.ma.harden_mask(m)
+ >>> assert m.hardmask and hardened is m
+ >>> m[:] = 23
+ >>> m
+ masked_array(data=[23, 23, 23, 23, 23, 23, --, --, 23, --],
+ mask=[False, False, False, False, False, False,
+ True, True, False, True],
+ fill_value=999999)
+
+ """
+ return self._hardmask
+
+ def unshare_mask(self):
+ """
+ Copy the mask and set the `sharedmask` flag to ``False``.
+
+ Whether the mask is shared between masked arrays can be seen from
+ the `sharedmask` property. `unshare_mask` ensures the mask is not
+ shared. A copy of the mask is only made if it was shared.
+
+ See Also
+ --------
+ sharedmask
+
+ """
+ if self._sharedmask:
+ self._mask = self._mask.copy()
+ self._sharedmask = False
+ return self
+
+ @property
+ def sharedmask(self):
+ """ Share status of the mask (read-only). """
+ return self._sharedmask
+
+ def shrink_mask(self):
+ """
+ Reduce a mask to nomask when possible.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)
+ >>> x.mask
+ array([[False, False],
+ [False, False]])
+ >>> x.shrink_mask()
+ masked_array(
+ data=[[1, 2],
+ [3, 4]],
+ mask=False,
+ fill_value=999999)
+ >>> x.mask
+ False
+
+ """
+ self._mask = _shrink_mask(self._mask)
+ return self
+
+ @property
+ def baseclass(self):
+ """ Class of the underlying data (read-only). """
+ return self._baseclass
+
+ def _get_data(self):
+ """
+ Returns the underlying data, as a view of the masked array.
+
+ If the underlying data is a subclass of :class:`numpy.ndarray`, it is
+ returned as such.
+
+ >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
+ >>> x.data
+ matrix([[1, 2],
+ [3, 4]])
+
+ The type of the data can be accessed through the :attr:`baseclass`
+ attribute.
+ """
+ return ndarray.view(self, self._baseclass)
+
+ _data = property(fget=_get_data)
+ data = property(fget=_get_data)
+
+ @property
+ def flat(self):
+ """ Return a flat iterator, or set a flattened version of self to value. """
+ return MaskedIterator(self)
+
+ @flat.setter
+ def flat(self, value):
+ y = self.ravel()
+ y[:] = value
+
+ @property
+ def fill_value(self):
+ """
+ The filling value of the masked array is a scalar. When setting, None
+ will set to a default based on the data type.
+
+ Examples
+ --------
+ >>> for dt in [np.int32, np.int64, np.float64, np.complex128]:
+ ... np.ma.array([0, 1], dtype=dt).get_fill_value()
+ ...
+ 999999
+ 999999
+ 1e+20
+ (1e+20+0j)
+
+ >>> x = np.ma.array([0, 1.], fill_value=-np.inf)
+ >>> x.fill_value
+ -inf
+ >>> x.fill_value = np.pi
+ >>> x.fill_value
+ 3.1415926535897931 # may vary
+
+ Reset to default:
+
+ >>> x.fill_value = None
+ >>> x.fill_value
+ 1e+20
+
+ """
+ if self._fill_value is None:
+ self._fill_value = _check_fill_value(None, self.dtype)
+
+ # Temporary workaround to account for the fact that str and bytes
+ # scalars cannot be indexed with (), whereas all other numpy
+ # scalars can. See issues #7259 and #7267.
+ # The if-block can be removed after #7267 has been fixed.
+ if isinstance(self._fill_value, ndarray):
+ return self._fill_value[()]
+ return self._fill_value
+
+ @fill_value.setter
+ def fill_value(self, value=None):
+ target = _check_fill_value(value, self.dtype)
+ if not target.ndim == 0:
+ # 2019-11-12, 1.18.0
+ warnings.warn(
+ "Non-scalar arrays for the fill value are deprecated. Use "
+ "arrays with scalar values instead. The filled function "
+ "still supports any array as `fill_value`.",
+ DeprecationWarning, stacklevel=2)
+
+ _fill_value = self._fill_value
+ if _fill_value is None:
+ # Create the attribute if it was undefined
+ self._fill_value = target
+ else:
+ # Don't overwrite the attribute, just fill it (for propagation)
+ _fill_value[()] = target
+
+ # kept for compatibility
+ get_fill_value = fill_value.fget
+ set_fill_value = fill_value.fset
+
+ def filled(self, fill_value=None):
+ """
+ Return a copy of self, with masked values filled with a given value.
+ **However**, if there are no masked values to fill, self will be
+ returned instead as an ndarray.
+
+ Parameters
+ ----------
+ fill_value : array_like, optional
+ The value to use for invalid entries. Can be scalar or non-scalar.
+ If non-scalar, the resulting ndarray must be broadcastable over
+ input array. Default is None, in which case, the `fill_value`
+ attribute of the array is used instead.
+
+ Returns
+ -------
+ filled_array : ndarray
+ A copy of ``self`` with invalid entries replaced by *fill_value*
+ (be it the function argument or the attribute of ``self``), or
+ ``self`` itself as an ndarray if there are no invalid entries to
+ be replaced.
+
+ Notes
+ -----
+ The result is **not** a MaskedArray!
+
+ Examples
+ --------
+ >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
+ >>> x.filled()
+ array([ 1, 2, -999, 4, -999])
+ >>> x.filled(fill_value=1000)
+ array([ 1, 2, 1000, 4, 1000])
+ >>> type(x.filled())
+ <class 'numpy.ndarray'>
+
+ Subclassing is preserved. This means that if, e.g., the data part of
+ the masked array is a recarray, `filled` returns a recarray:
+
+ >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray)
+ >>> m = np.ma.array(x, mask=[(True, False), (False, True)])
+ >>> m.filled()
+ rec.array([(999999, 2), ( -3, 999999)],
+ dtype=[('f0', '<i8'), ('f1', '<i8')])
+ """
+ m = self._mask
+ if m is nomask:
+ return self._data
+
+ if fill_value is None:
+ fill_value = self.fill_value
+ else:
+ fill_value = _check_fill_value(fill_value, self.dtype)
+
+ if self is masked_singleton:
+ return np.asanyarray(fill_value)
+
+ if m.dtype.names is not None:
+ result = self._data.copy('K')
+ _recursive_filled(result, self._mask, fill_value)
+ elif not m.any():
+ return self._data
+ else:
+ result = self._data.copy('K')
+ try:
+ np.copyto(result, fill_value, where=m)
+ except (TypeError, AttributeError):
+ fill_value = narray(fill_value, dtype=object)
+ d = result.astype(object)
+ result = np.choose(m, (d, fill_value))
+ except IndexError:
+ # ok, if scalar
+ if self._data.shape:
+ raise
+ elif m:
+ result = np.array(fill_value, dtype=self.dtype)
+ else:
+ result = self._data
+ return result
+
+ def compressed(self):
+ """
+ Return all the non-masked data as a 1-D array.
+
+ Returns
+ -------
+ data : ndarray
+ A new `ndarray` holding the non-masked data is returned.
+
+ Notes
+ -----
+ The result is **not** a MaskedArray!
+
+ Examples
+ --------
+ >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)
+ >>> x.compressed()
+ array([0, 1])
+ >>> type(x.compressed())
+ <class 'numpy.ndarray'>
+
+ """
+ data = ndarray.ravel(self._data)
+ if self._mask is not nomask:
+ data = data.compress(np.logical_not(ndarray.ravel(self._mask)))
+ return data
+
+ def compress(self, condition, axis=None, out=None):
+ """
+ Return `a` where condition is ``True``.
+
+ If condition is a `~ma.MaskedArray`, missing values are considered
+ as ``False``.
+
+ Parameters
+ ----------
+ condition : var
+ Boolean 1-d array selecting which entries to return. If len(condition)
+ is less than the size of a along the axis, then output is truncated
+ to length of condition array.
+ axis : {None, int}, optional
+ Axis along which the operation must be performed.
+ out : {None, ndarray}, optional
+ Alternative output array in which to place the result. It must have
+ the same shape as the expected output but the type will be cast if
+ necessary.
+
+ Returns
+ -------
+ result : MaskedArray
+ A :class:`~ma.MaskedArray` object.
+
+ Notes
+ -----
+ Please note the difference with :meth:`compressed` !
+ The output of :meth:`compress` has a mask, the output of
+ :meth:`compressed` does not.
+
+ Examples
+ --------
+ >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
+ >>> x
+ masked_array(
+ data=[[1, --, 3],
+ [--, 5, --],
+ [7, --, 9]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+ >>> x.compress([1, 0, 1])
+ masked_array(data=[1, 3],
+ mask=[False, False],
+ fill_value=999999)
+
+ >>> x.compress([1, 0, 1], axis=1)
+ masked_array(
+ data=[[1, 3],
+ [--, --],
+ [7, 9]],
+ mask=[[False, False],
+ [ True, True],
+ [False, False]],
+ fill_value=999999)
+
+ """
+ # Get the basic components
+ (_data, _mask) = (self._data, self._mask)
+
+ # Force the condition to a regular ndarray and forget the missing
+ # values.
+ condition = np.asarray(condition)
+
+ _new = _data.compress(condition, axis=axis, out=out).view(type(self))
+ _new._update_from(self)
+ if _mask is not nomask:
+ _new._mask = _mask.compress(condition, axis=axis)
+ return _new
+
+ def _insert_masked_print(self):
+ """
+ Replace masked values with masked_print_option, casting all innermost
+ dtypes to object.
+ """
+ if masked_print_option.enabled():
+ mask = self._mask
+ if mask is nomask:
+ res = self._data
+ else:
+ # convert to object array to make filled work
+ data = self._data
+ # For big arrays, to avoid a costly conversion to the
+ # object dtype, extract the corners before the conversion.
+ print_width = (self._print_width if self.ndim > 1
+ else self._print_width_1d)
+ for axis in range(self.ndim):
+ if data.shape[axis] > print_width:
+ ind = print_width // 2
+ arr = np.split(data, (ind, -ind), axis=axis)
+ data = np.concatenate((arr[0], arr[2]), axis=axis)
+ arr = np.split(mask, (ind, -ind), axis=axis)
+ mask = np.concatenate((arr[0], arr[2]), axis=axis)
+
+ rdtype = _replace_dtype_fields(self.dtype, "O")
+ res = data.astype(rdtype)
+ _recursive_printoption(res, mask, masked_print_option)
+ else:
+ res = self.filled(self.fill_value)
+ return res
+
+ def __str__(self):
+ return str(self._insert_masked_print())
+
+ def __repr__(self):
+ """
+ Literal string representation.
+
+ """
+ if self._baseclass is np.ndarray:
+ name = 'array'
+ else:
+ name = self._baseclass.__name__
+
+
+ # 2016-11-19: Demoted to legacy format
+ if np.core.arrayprint._get_legacy_print_mode() <= 113:
+ is_long = self.ndim > 1
+ parameters = dict(
+ name=name,
+ nlen=" " * len(name),
+ data=str(self),
+ mask=str(self._mask),
+ fill=str(self.fill_value),
+ dtype=str(self.dtype)
+ )
+ is_structured = bool(self.dtype.names)
+ key = '{}_{}'.format(
+ 'long' if is_long else 'short',
+ 'flx' if is_structured else 'std'
+ )
+ return _legacy_print_templates[key] % parameters
+
+ prefix = f"masked_{name}("
+
+ dtype_needed = (
+ not np.core.arrayprint.dtype_is_implied(self.dtype) or
+ np.all(self.mask) or
+ self.size == 0
+ )
+
+ # determine which keyword args need to be shown
+ keys = ['data', 'mask', 'fill_value']
+ if dtype_needed:
+ keys.append('dtype')
+
+ # array has only one row (non-column)
+ is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1])
+
+ # choose what to indent each keyword with
+ min_indent = 2
+ if is_one_row:
+ # first key on the same line as the type, remaining keys
+ # aligned by equals
+ indents = {}
+ indents[keys[0]] = prefix
+ for k in keys[1:]:
+ n = builtins.max(min_indent, len(prefix + keys[0]) - len(k))
+ indents[k] = ' ' * n
+ prefix = '' # absorbed into the first indent
+ else:
+ # each key on its own line, indented by two spaces
+ indents = {k: ' ' * min_indent for k in keys}
+ prefix = prefix + '\n' # first key on the next line
+
+ # format the field values
+ reprs = {}
+ reprs['data'] = np.array2string(
+ self._insert_masked_print(),
+ separator=", ",
+ prefix=indents['data'] + 'data=',
+ suffix=',')
+ reprs['mask'] = np.array2string(
+ self._mask,
+ separator=", ",
+ prefix=indents['mask'] + 'mask=',
+ suffix=',')
+ reprs['fill_value'] = repr(self.fill_value)
+ if dtype_needed:
+ reprs['dtype'] = np.core.arrayprint.dtype_short_repr(self.dtype)
+
+ # join keys with values and indentations
+ result = ',\n'.join(
+ '{}{}={}'.format(indents[k], k, reprs[k])
+ for k in keys
+ )
+ return prefix + result + ')'
+
+ def _delegate_binop(self, other):
+ # This emulates the logic in
+ # private/binop_override.h:forward_binop_should_defer
+ if isinstance(other, type(self)):
+ return False
+ array_ufunc = getattr(other, "__array_ufunc__", False)
+ if array_ufunc is False:
+ other_priority = getattr(other, "__array_priority__", -1000000)
+ return self.__array_priority__ < other_priority
+ else:
+ # If array_ufunc is not None, it will be called inside the ufunc;
+ # None explicitly tells us to not call the ufunc, i.e., defer.
+ return array_ufunc is None
+
+ def _comparison(self, other, compare):
+ """Compare self with other using operator.eq or operator.ne.
+
+ When either of the elements is masked, the result is masked as well,
+ but the underlying boolean data are still set, with self and other
+ considered equal if both are masked, and unequal otherwise.
+
+ For structured arrays, all fields are combined, with masked values
+ ignored. The result is masked if all fields were masked, with self
+ and other considered equal only if both were fully masked.
+ """
+ omask = getmask(other)
+ smask = self.mask
+ mask = mask_or(smask, omask, copy=True)
+
+ odata = getdata(other)
+ if mask.dtype.names is not None:
+ # only == and != are reasonably defined for structured dtypes,
+ # so give up early for all other comparisons:
+ if compare not in (operator.eq, operator.ne):
+ return NotImplemented
+ # For possibly masked structured arrays we need to be careful,
+ # since the standard structured array comparison will use all
+ # fields, masked or not. To avoid masked fields influencing the
+ # outcome, we set all masked fields in self to other, so they'll
+ # count as equal. To prepare, we ensure we have the right shape.
+ broadcast_shape = np.broadcast(self, odata).shape
+ sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True)
+ sbroadcast._mask = mask
+ sdata = sbroadcast.filled(odata)
+ # Now take care of the mask; the merged mask should have an item
+ # masked if all fields were masked (in one and/or other).
+ mask = (mask == np.ones((), mask.dtype))
+
+ else:
+ # For regular arrays, just use the data as they come.
+ sdata = self.data
+
+ check = compare(sdata, odata)
+
+ if isinstance(check, (np.bool_, bool)):
+ return masked if mask else check
+
+ if mask is not nomask and compare in (operator.eq, operator.ne):
+ # Adjust elements that were masked, which should be treated
+ # as equal if masked in both, unequal if masked in one.
+ # Note that this works automatically for structured arrays too.
+ # Ignore this for operations other than `==` and `!=`
+ check = np.where(mask, compare(smask, omask), check)
+ if mask.shape != check.shape:
+ # Guarantee consistency of the shape, making a copy since the
+ # the mask may need to get written to later.
+ mask = np.broadcast_to(mask, check.shape).copy()
+
+ check = check.view(type(self))
+ check._update_from(self)
+ check._mask = mask
+
+ # Cast fill value to bool_ if needed. If it cannot be cast, the
+ # default boolean fill value is used.
+ if check._fill_value is not None:
+ try:
+ fill = _check_fill_value(check._fill_value, np.bool_)
+ except (TypeError, ValueError):
+ fill = _check_fill_value(None, np.bool_)
+ check._fill_value = fill
+
+ return check
+
+ def __eq__(self, other):
+ """Check whether other equals self elementwise.
+
+ When either of the elements is masked, the result is masked as well,
+ but the underlying boolean data are still set, with self and other
+ considered equal if both are masked, and unequal otherwise.
+
+ For structured arrays, all fields are combined, with masked values
+ ignored. The result is masked if all fields were masked, with self
+ and other considered equal only if both were fully masked.
+ """
+ return self._comparison(other, operator.eq)
+
+ def __ne__(self, other):
+ """Check whether other does not equal self elementwise.
+
+ When either of the elements is masked, the result is masked as well,
+ but the underlying boolean data are still set, with self and other
+ considered equal if both are masked, and unequal otherwise.
+
+ For structured arrays, all fields are combined, with masked values
+ ignored. The result is masked if all fields were masked, with self
+ and other considered equal only if both were fully masked.
+ """
+ return self._comparison(other, operator.ne)
+
+ # All other comparisons:
+ def __le__(self, other):
+ return self._comparison(other, operator.le)
+
+ def __lt__(self, other):
+ return self._comparison(other, operator.lt)
+
+ def __ge__(self, other):
+ return self._comparison(other, operator.ge)
+
+ def __gt__(self, other):
+ return self._comparison(other, operator.gt)
+
+ def __add__(self, other):
+ """
+ Add self to other, and return a new masked array.
+
+ """
+ if self._delegate_binop(other):
+ return NotImplemented
+ return add(self, other)
+
+ def __radd__(self, other):
+ """
+ Add other to self, and return a new masked array.
+
+ """
+ # In analogy with __rsub__ and __rdiv__, use original order:
+ # we get here from `other + self`.
+ return add(other, self)
+
+ def __sub__(self, other):
+ """
+ Subtract other from self, and return a new masked array.
+
+ """
+ if self._delegate_binop(other):
+ return NotImplemented
+ return subtract(self, other)
+
+ def __rsub__(self, other):
+ """
+ Subtract self from other, and return a new masked array.
+
+ """
+ return subtract(other, self)
+
+ def __mul__(self, other):
+ "Multiply self by other, and return a new masked array."
+ if self._delegate_binop(other):
+ return NotImplemented
+ return multiply(self, other)
+
+ def __rmul__(self, other):
+ """
+ Multiply other by self, and return a new masked array.
+
+ """
+ # In analogy with __rsub__ and __rdiv__, use original order:
+ # we get here from `other * self`.
+ return multiply(other, self)
+
+ def __div__(self, other):
+ """
+ Divide other into self, and return a new masked array.
+
+ """
+ if self._delegate_binop(other):
+ return NotImplemented
+ return divide(self, other)
+
+ def __truediv__(self, other):
+ """
+ Divide other into self, and return a new masked array.
+
+ """
+ if self._delegate_binop(other):
+ return NotImplemented
+ return true_divide(self, other)
+
+ def __rtruediv__(self, other):
+ """
+ Divide self into other, and return a new masked array.
+
+ """
+ return true_divide(other, self)
+
+ def __floordiv__(self, other):
+ """
+ Divide other into self, and return a new masked array.
+
+ """
+ if self._delegate_binop(other):
+ return NotImplemented
+ return floor_divide(self, other)
+
+ def __rfloordiv__(self, other):
+ """
+ Divide self into other, and return a new masked array.
+
+ """
+ return floor_divide(other, self)
+
+ def __pow__(self, other):
+ """
+ Raise self to the power other, masking the potential NaNs/Infs
+
+ """
+ if self._delegate_binop(other):
+ return NotImplemented
+ return power(self, other)
+
+ def __rpow__(self, other):
+ """
+ Raise other to the power self, masking the potential NaNs/Infs
+
+ """
+ return power(other, self)
+
+ def __iadd__(self, other):
+ """
+ Add other to self in-place.
+
+ """
+ m = getmask(other)
+ if self._mask is nomask:
+ if m is not nomask and m.any():
+ self._mask = make_mask_none(self.shape, self.dtype)
+ self._mask += m
+ else:
+ if m is not nomask:
+ self._mask += m
+ other_data = getdata(other)
+ other_data = np.where(self._mask, other_data.dtype.type(0), other_data)
+ self._data.__iadd__(other_data)
+ return self
+
+ def __isub__(self, other):
+ """
+ Subtract other from self in-place.
+
+ """
+ m = getmask(other)
+ if self._mask is nomask:
+ if m is not nomask and m.any():
+ self._mask = make_mask_none(self.shape, self.dtype)
+ self._mask += m
+ elif m is not nomask:
+ self._mask += m
+ other_data = getdata(other)
+ other_data = np.where(self._mask, other_data.dtype.type(0), other_data)
+ self._data.__isub__(other_data)
+ return self
+
+ def __imul__(self, other):
+ """
+ Multiply self by other in-place.
+
+ """
+ m = getmask(other)
+ if self._mask is nomask:
+ if m is not nomask and m.any():
+ self._mask = make_mask_none(self.shape, self.dtype)
+ self._mask += m
+ elif m is not nomask:
+ self._mask += m
+ other_data = getdata(other)
+ other_data = np.where(self._mask, other_data.dtype.type(1), other_data)
+ self._data.__imul__(other_data)
+ return self
+
+ def __idiv__(self, other):
+ """
+ Divide self by other in-place.
+
+ """
+ other_data = getdata(other)
+ dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
+ other_mask = getmask(other)
+ new_mask = mask_or(other_mask, dom_mask)
+ # The following 4 lines control the domain filling
+ if dom_mask.any():
+ (_, fval) = ufunc_fills[np.divide]
+ other_data = np.where(
+ dom_mask, other_data.dtype.type(fval), other_data)
+ self._mask |= new_mask
+ other_data = np.where(self._mask, other_data.dtype.type(1), other_data)
+ self._data.__idiv__(other_data)
+ return self
+
+ def __ifloordiv__(self, other):
+ """
+ Floor divide self by other in-place.
+
+ """
+ other_data = getdata(other)
+ dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
+ other_mask = getmask(other)
+ new_mask = mask_or(other_mask, dom_mask)
+ # The following 3 lines control the domain filling
+ if dom_mask.any():
+ (_, fval) = ufunc_fills[np.floor_divide]
+ other_data = np.where(
+ dom_mask, other_data.dtype.type(fval), other_data)
+ self._mask |= new_mask
+ other_data = np.where(self._mask, other_data.dtype.type(1), other_data)
+ self._data.__ifloordiv__(other_data)
+ return self
+
+ def __itruediv__(self, other):
+ """
+ True divide self by other in-place.
+
+ """
+ other_data = getdata(other)
+ dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
+ other_mask = getmask(other)
+ new_mask = mask_or(other_mask, dom_mask)
+ # The following 3 lines control the domain filling
+ if dom_mask.any():
+ (_, fval) = ufunc_fills[np.true_divide]
+ other_data = np.where(
+ dom_mask, other_data.dtype.type(fval), other_data)
+ self._mask |= new_mask
+ other_data = np.where(self._mask, other_data.dtype.type(1), other_data)
+ self._data.__itruediv__(other_data)
+ return self
+
+ def __ipow__(self, other):
+ """
+ Raise self to the power other, in place.
+
+ """
+ other_data = getdata(other)
+ other_data = np.where(self._mask, other_data.dtype.type(1), other_data)
+ other_mask = getmask(other)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ self._data.__ipow__(other_data)
+ invalid = np.logical_not(np.isfinite(self._data))
+ if invalid.any():
+ if self._mask is not nomask:
+ self._mask |= invalid
+ else:
+ self._mask = invalid
+ np.copyto(self._data, self.fill_value, where=invalid)
+ new_mask = mask_or(other_mask, invalid)
+ self._mask = mask_or(self._mask, new_mask)
+ return self
+
+ def __float__(self):
+ """
+ Convert to float.
+
+ """
+ if self.size > 1:
+ raise TypeError("Only length-1 arrays can be converted "
+ "to Python scalars")
+ elif self._mask:
+ warnings.warn("Warning: converting a masked element to nan.", stacklevel=2)
+ return np.nan
+ return float(self.item())
+
+ def __int__(self):
+ """
+ Convert to int.
+
+ """
+ if self.size > 1:
+ raise TypeError("Only length-1 arrays can be converted "
+ "to Python scalars")
+ elif self._mask:
+ raise MaskError('Cannot convert masked element to a Python int.')
+ return int(self.item())
+
+ @property
+ def imag(self):
+ """
+ The imaginary part of the masked array.
+
+ This property is a view on the imaginary part of this `MaskedArray`.
+
+ See Also
+ --------
+ real
+
+ Examples
+ --------
+ >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
+ >>> x.imag
+ masked_array(data=[1.0, --, 1.6],
+ mask=[False, True, False],
+ fill_value=1e+20)
+
+ """
+ result = self._data.imag.view(type(self))
+ result.__setmask__(self._mask)
+ return result
+
+ # kept for compatibility
+ get_imag = imag.fget
+
+ @property
+ def real(self):
+ """
+ The real part of the masked array.
+
+ This property is a view on the real part of this `MaskedArray`.
+
+ See Also
+ --------
+ imag
+
+ Examples
+ --------
+ >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
+ >>> x.real
+ masked_array(data=[1.0, --, 3.45],
+ mask=[False, True, False],
+ fill_value=1e+20)
+
+ """
+ result = self._data.real.view(type(self))
+ result.__setmask__(self._mask)
+ return result
+
+ # kept for compatibility
+ get_real = real.fget
+
+ def count(self, axis=None, keepdims=np._NoValue):
+ """
+ Count the non-masked elements of the array along the given axis.
+
+ Parameters
+ ----------
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which the count is performed.
+ The default, None, performs the count over all
+ the dimensions of the input array. `axis` may be negative, in
+ which case it counts from the last to the first axis.
+
+ .. versionadded:: 1.10.0
+
+ If this is a tuple of ints, the count is performed on multiple
+ axes, instead of a single axis or all the axes as before.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ Returns
+ -------
+ result : ndarray or scalar
+ An array with the same shape as the input array, with the specified
+ axis removed. If the array is a 0-d array, or if `axis` is None, a
+ scalar is returned.
+
+ See Also
+ --------
+ ma.count_masked : Count masked elements in array or along a given axis.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = ma.arange(6).reshape((2, 3))
+ >>> a[1, :] = ma.masked
+ >>> a
+ masked_array(
+ data=[[0, 1, 2],
+ [--, --, --]],
+ mask=[[False, False, False],
+ [ True, True, True]],
+ fill_value=999999)
+ >>> a.count()
+ 3
+
+ When the `axis` keyword is specified an array of appropriate size is
+ returned.
+
+ >>> a.count(axis=0)
+ array([1, 1, 1])
+ >>> a.count(axis=1)
+ array([3, 0])
+
+ """
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+
+ m = self._mask
+ # special case for matrices (we assume no other subclasses modify
+ # their dimensions)
+ if isinstance(self.data, np.matrix):
+ if m is nomask:
+ m = np.zeros(self.shape, dtype=np.bool_)
+ m = m.view(type(self.data))
+
+ if m is nomask:
+ # compare to _count_reduce_items in _methods.py
+
+ if self.shape == ():
+ if axis not in (None, 0):
+ raise np.AxisError(axis=axis, ndim=self.ndim)
+ return 1
+ elif axis is None:
+ if kwargs.get('keepdims', False):
+ return np.array(self.size, dtype=np.intp, ndmin=self.ndim)
+ return self.size
+
+ axes = normalize_axis_tuple(axis, self.ndim)
+ items = 1
+ for ax in axes:
+ items *= self.shape[ax]
+
+ if kwargs.get('keepdims', False):
+ out_dims = list(self.shape)
+ for a in axes:
+ out_dims[a] = 1
+ else:
+ out_dims = [d for n, d in enumerate(self.shape)
+ if n not in axes]
+ # make sure to return a 0-d array if axis is supplied
+ return np.full(out_dims, items, dtype=np.intp)
+
+ # take care of the masked singleton
+ if self is masked:
+ return 0
+
+ return (~m).sum(axis=axis, dtype=np.intp, **kwargs)
+
+ def ravel(self, order='C'):
+ """
+ Returns a 1D version of self, as a view.
+
+ Parameters
+ ----------
+ order : {'C', 'F', 'A', 'K'}, optional
+ The elements of `a` are read using this index order. 'C' means to
+ index the elements in C-like order, with the last axis index
+ changing fastest, back to the first axis index changing slowest.
+ 'F' means to index the elements in Fortran-like index order, with
+ the first index changing fastest, and the last index changing
+ slowest. Note that the 'C' and 'F' options take no account of the
+ memory layout of the underlying array, and only refer to the order
+ of axis indexing. 'A' means to read the elements in Fortran-like
+ index order if `m` is Fortran *contiguous* in memory, C-like order
+ otherwise. 'K' means to read the elements in the order they occur
+ in memory, except for reversing the data when strides are negative.
+ By default, 'C' index order is used.
+ (Masked arrays currently use 'A' on the data when 'K' is passed.)
+
+ Returns
+ -------
+ MaskedArray
+ Output view is of shape ``(self.size,)`` (or
+ ``(np.ma.product(self.shape),)``).
+
+ Examples
+ --------
+ >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
+ >>> x
+ masked_array(
+ data=[[1, --, 3],
+ [--, 5, --],
+ [7, --, 9]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+ >>> x.ravel()
+ masked_array(data=[1, --, 3, --, 5, --, 7, --, 9],
+ mask=[False, True, False, True, False, True, False, True,
+ False],
+ fill_value=999999)
+
+ """
+ # The order of _data and _mask could be different (it shouldn't be
+ # normally). Passing order `K` or `A` would be incorrect.
+ # So we ignore the mask memory order.
+ # TODO: We don't actually support K, so use A instead. We could
+ # try to guess this correct by sorting strides or deprecate.
+ if order in "kKaA":
+ order = "C" if self._data.flags.fnc else "F"
+ r = ndarray.ravel(self._data, order=order).view(type(self))
+ r._update_from(self)
+ if self._mask is not nomask:
+ r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape)
+ else:
+ r._mask = nomask
+ return r
+
+
+ def reshape(self, *s, **kwargs):
+ """
+ Give a new shape to the array without changing its data.
+
+ Returns a masked array containing the same data, but with a new shape.
+ The result is a view on the original array; if this is not possible, a
+ ValueError is raised.
+
+ Parameters
+ ----------
+ shape : int or tuple of ints
+ The new shape should be compatible with the original shape. If an
+ integer is supplied, then the result will be a 1-D array of that
+ length.
+ order : {'C', 'F'}, optional
+ Determines whether the array data should be viewed as in C
+ (row-major) or FORTRAN (column-major) order.
+
+ Returns
+ -------
+ reshaped_array : array
+ A new view on the array.
+
+ See Also
+ --------
+ reshape : Equivalent function in the masked array module.
+ numpy.ndarray.reshape : Equivalent method on ndarray object.
+ numpy.reshape : Equivalent function in the NumPy module.
+
+ Notes
+ -----
+ The reshaping operation cannot guarantee that a copy will not be made,
+ to modify the shape in place, use ``a.shape = s``
+
+ Examples
+ --------
+ >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
+ >>> x
+ masked_array(
+ data=[[--, 2],
+ [3, --]],
+ mask=[[ True, False],
+ [False, True]],
+ fill_value=999999)
+ >>> x = x.reshape((4,1))
+ >>> x
+ masked_array(
+ data=[[--],
+ [2],
+ [3],
+ [--]],
+ mask=[[ True],
+ [False],
+ [False],
+ [ True]],
+ fill_value=999999)
+
+ """
+ kwargs.update(order=kwargs.get('order', 'C'))
+ result = self._data.reshape(*s, **kwargs).view(type(self))
+ result._update_from(self)
+ mask = self._mask
+ if mask is not nomask:
+ result._mask = mask.reshape(*s, **kwargs)
+ return result
+
+ def resize(self, newshape, refcheck=True, order=False):
+ """
+ .. warning::
+
+ This method does nothing, except raise a ValueError exception. A
+ masked array does not own its data and therefore cannot safely be
+ resized in place. Use the `numpy.ma.resize` function instead.
+
+ This method is difficult to implement safely and may be deprecated in
+ future releases of NumPy.
+
+ """
+ # Note : the 'order' keyword looks broken, let's just drop it
+ errmsg = "A masked array does not own its data "\
+ "and therefore cannot be resized.\n" \
+ "Use the numpy.ma.resize function instead."
+ raise ValueError(errmsg)
+
+ def put(self, indices, values, mode='raise'):
+ """
+ Set storage-indexed locations to corresponding values.
+
+ Sets self._data.flat[n] = values[n] for each n in indices.
+ If `values` is shorter than `indices` then it will repeat.
+ If `values` has some masked values, the initial mask is updated
+ in consequence, else the corresponding values are unmasked.
+
+ Parameters
+ ----------
+ indices : 1-D array_like
+ Target indices, interpreted as integers.
+ values : array_like
+ Values to place in self._data copy at target indices.
+ mode : {'raise', 'wrap', 'clip'}, optional
+ Specifies how out-of-bounds indices will behave.
+ 'raise' : raise an error.
+ 'wrap' : wrap around.
+ 'clip' : clip to the range.
+
+ Notes
+ -----
+ `values` can be a scalar or length 1 array.
+
+ Examples
+ --------
+ >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
+ >>> x
+ masked_array(
+ data=[[1, --, 3],
+ [--, 5, --],
+ [7, --, 9]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+ >>> x.put([0,4,8],[10,20,30])
+ >>> x
+ masked_array(
+ data=[[10, --, 3],
+ [--, 20, --],
+ [7, --, 30]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+
+ >>> x.put(4,999)
+ >>> x
+ masked_array(
+ data=[[10, --, 3],
+ [--, 999, --],
+ [7, --, 30]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+
+ """
+ # Hard mask: Get rid of the values/indices that fall on masked data
+ if self._hardmask and self._mask is not nomask:
+ mask = self._mask[indices]
+ indices = narray(indices, copy=False)
+ values = narray(values, copy=False, subok=True)
+ values.resize(indices.shape)
+ indices = indices[~mask]
+ values = values[~mask]
+
+ self._data.put(indices, values, mode=mode)
+
+ # short circuit if neither self nor values are masked
+ if self._mask is nomask and getmask(values) is nomask:
+ return
+
+ m = getmaskarray(self)
+
+ if getmask(values) is nomask:
+ m.put(indices, False, mode=mode)
+ else:
+ m.put(indices, values._mask, mode=mode)
+ m = make_mask(m, copy=False, shrink=True)
+ self._mask = m
+ return
+
+ def ids(self):
+ """
+ Return the addresses of the data and mask areas.
+
+ Parameters
+ ----------
+ None
+
+ Examples
+ --------
+ >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1])
+ >>> x.ids()
+ (166670640, 166659832) # may vary
+
+ If the array has no mask, the address of `nomask` is returned. This address
+ is typically not close to the data in memory:
+
+ >>> x = np.ma.array([1, 2, 3])
+ >>> x.ids()
+ (166691080, 3083169284) # may vary
+
+ """
+ if self._mask is nomask:
+ return (self.ctypes.data, id(nomask))
+ return (self.ctypes.data, self._mask.ctypes.data)
+
+ def iscontiguous(self):
+ """
+ Return a boolean indicating whether the data is contiguous.
+
+ Parameters
+ ----------
+ None
+
+ Examples
+ --------
+ >>> x = np.ma.array([1, 2, 3])
+ >>> x.iscontiguous()
+ True
+
+ `iscontiguous` returns one of the flags of the masked array:
+
+ >>> x.flags
+ C_CONTIGUOUS : True
+ F_CONTIGUOUS : True
+ OWNDATA : False
+ WRITEABLE : True
+ ALIGNED : True
+ WRITEBACKIFCOPY : False
+
+ """
+ return self.flags['CONTIGUOUS']
+
+ def all(self, axis=None, out=None, keepdims=np._NoValue):
+ """
+ Returns True if all elements evaluate to True.
+
+ The output array is masked where all the values along the given axis
+ are masked: if the output would have been a scalar and that all the
+ values are masked, then the output is `masked`.
+
+ Refer to `numpy.all` for full documentation.
+
+ See Also
+ --------
+ numpy.ndarray.all : corresponding function for ndarrays
+ numpy.all : equivalent function
+
+ Examples
+ --------
+ >>> np.ma.array([1,2,3]).all()
+ True
+ >>> a = np.ma.array([1,2,3], mask=True)
+ >>> (a.all() is np.ma.masked)
+ True
+
+ """
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+
+ mask = _check_mask_axis(self._mask, axis, **kwargs)
+ if out is None:
+ d = self.filled(True).all(axis=axis, **kwargs).view(type(self))
+ if d.ndim:
+ d.__setmask__(mask)
+ elif mask:
+ return masked
+ return d
+ self.filled(True).all(axis=axis, out=out, **kwargs)
+ if isinstance(out, MaskedArray):
+ if out.ndim or mask:
+ out.__setmask__(mask)
+ return out
+
+ def any(self, axis=None, out=None, keepdims=np._NoValue):
+ """
+ Returns True if any of the elements of `a` evaluate to True.
+
+ Masked values are considered as False during computation.
+
+ Refer to `numpy.any` for full documentation.
+
+ See Also
+ --------
+ numpy.ndarray.any : corresponding function for ndarrays
+ numpy.any : equivalent function
+
+ """
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+
+ mask = _check_mask_axis(self._mask, axis, **kwargs)
+ if out is None:
+ d = self.filled(False).any(axis=axis, **kwargs).view(type(self))
+ if d.ndim:
+ d.__setmask__(mask)
+ elif mask:
+ d = masked
+ return d
+ self.filled(False).any(axis=axis, out=out, **kwargs)
+ if isinstance(out, MaskedArray):
+ if out.ndim or mask:
+ out.__setmask__(mask)
+ return out
+
+ def nonzero(self):
+ """
+ Return the indices of unmasked elements that are not zero.
+
+ Returns a tuple of arrays, one for each dimension, containing the
+ indices of the non-zero elements in that dimension. The corresponding
+ non-zero values can be obtained with::
+
+ a[a.nonzero()]
+
+ To group the indices by element, rather than dimension, use
+ instead::
+
+ np.transpose(a.nonzero())
+
+ The result of this is always a 2d array, with a row for each non-zero
+ element.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ tuple_of_arrays : tuple
+ Indices of elements that are non-zero.
+
+ See Also
+ --------
+ numpy.nonzero :
+ Function operating on ndarrays.
+ flatnonzero :
+ Return indices that are non-zero in the flattened version of the input
+ array.
+ numpy.ndarray.nonzero :
+ Equivalent ndarray method.
+ count_nonzero :
+ Counts the number of non-zero elements in the input array.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> x = ma.array(np.eye(3))
+ >>> x
+ masked_array(
+ data=[[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]],
+ mask=False,
+ fill_value=1e+20)
+ >>> x.nonzero()
+ (array([0, 1, 2]), array([0, 1, 2]))
+
+ Masked elements are ignored.
+
+ >>> x[1, 1] = ma.masked
+ >>> x
+ masked_array(
+ data=[[1.0, 0.0, 0.0],
+ [0.0, --, 0.0],
+ [0.0, 0.0, 1.0]],
+ mask=[[False, False, False],
+ [False, True, False],
+ [False, False, False]],
+ fill_value=1e+20)
+ >>> x.nonzero()
+ (array([0, 2]), array([0, 2]))
+
+ Indices can also be grouped by element.
+
+ >>> np.transpose(x.nonzero())
+ array([[0, 0],
+ [2, 2]])
+
+ A common use for ``nonzero`` is to find the indices of an array, where
+ a condition is True. Given an array `a`, the condition `a` > 3 is a
+ boolean array and since False is interpreted as 0, ma.nonzero(a > 3)
+ yields the indices of the `a` where the condition is true.
+
+ >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]])
+ >>> a > 3
+ masked_array(
+ data=[[False, False, False],
+ [ True, True, True],
+ [ True, True, True]],
+ mask=False,
+ fill_value=True)
+ >>> ma.nonzero(a > 3)
+ (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
+
+ The ``nonzero`` method of the condition array can also be called.
+
+ >>> (a > 3).nonzero()
+ (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
+
+ """
+ return narray(self.filled(0), copy=False).nonzero()
+
+ def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
+ """
+ (this docstring should be overwritten)
+ """
+ #!!!: implement out + test!
+ m = self._mask
+ if m is nomask:
+ result = super().trace(offset=offset, axis1=axis1, axis2=axis2,
+ out=out)
+ return result.astype(dtype)
+ else:
+ D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
+ return D.astype(dtype).filled(0).sum(axis=-1, out=out)
+ trace.__doc__ = ndarray.trace.__doc__
+
+ def dot(self, b, out=None, strict=False):
+ """
+ a.dot(b, out=None)
+
+ Masked dot product of two arrays. Note that `out` and `strict` are
+ located in different positions than in `ma.dot`. In order to
+ maintain compatibility with the functional version, it is
+ recommended that the optional arguments be treated as keyword only.
+ At some point that may be mandatory.
+
+ .. versionadded:: 1.10.0
+
+ Parameters
+ ----------
+ b : masked_array_like
+ Inputs array.
+ out : masked_array, optional
+ Output argument. This must have the exact kind that would be
+ returned if it was not used. In particular, it must have the
+ right type, must be C-contiguous, and its dtype must be the
+ dtype that would be returned for `ma.dot(a,b)`. This is a
+ performance feature. Therefore, if these conditions are not
+ met, an exception is raised, instead of attempting to be
+ flexible.
+ strict : bool, optional
+ Whether masked data are propagated (True) or set to 0 (False)
+ for the computation. Default is False. Propagating the mask
+ means that if a masked value appears in a row or column, the
+ whole row or column is considered masked.
+
+ .. versionadded:: 1.10.2
+
+ See Also
+ --------
+ numpy.ma.dot : equivalent function
+
+ """
+ return dot(self, b, out=out, strict=strict)
+
+ def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
+ """
+ Return the sum of the array elements over the given axis.
+
+ Masked elements are set to 0 internally.
+
+ Refer to `numpy.sum` for full documentation.
+
+ See Also
+ --------
+ numpy.ndarray.sum : corresponding function for ndarrays
+ numpy.sum : equivalent function
+
+ Examples
+ --------
+ >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
+ >>> x
+ masked_array(
+ data=[[1, --, 3],
+ [--, 5, --],
+ [7, --, 9]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+ >>> x.sum()
+ 25
+ >>> x.sum(axis=1)
+ masked_array(data=[4, 5, 16],
+ mask=[False, False, False],
+ fill_value=999999)
+ >>> x.sum(axis=0)
+ masked_array(data=[8, 5, 12],
+ mask=[False, False, False],
+ fill_value=999999)
+ >>> print(type(x.sum(axis=0, dtype=np.int64)[0]))
+ <class 'numpy.int64'>
+
+ """
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+
+ _mask = self._mask
+ newmask = _check_mask_axis(_mask, axis, **kwargs)
+ # No explicit output
+ if out is None:
+ result = self.filled(0).sum(axis, dtype=dtype, **kwargs)
+ rndim = getattr(result, 'ndim', 0)
+ if rndim:
+ result = result.view(type(self))
+ result.__setmask__(newmask)
+ elif newmask:
+ result = masked
+ return result
+ # Explicit output
+ result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs)
+ if isinstance(out, MaskedArray):
+ outmask = getmask(out)
+ if outmask is nomask:
+ outmask = out._mask = make_mask_none(out.shape)
+ outmask.flat = newmask
+ return out
+
+ def cumsum(self, axis=None, dtype=None, out=None):
+ """
+ Return the cumulative sum of the array elements over the given axis.
+
+ Masked values are set to 0 internally during the computation.
+ However, their position is saved, and the result will be masked at
+ the same locations.
+
+ Refer to `numpy.cumsum` for full documentation.
+
+ Notes
+ -----
+ The mask is lost if `out` is not a valid :class:`ma.MaskedArray` !
+
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ See Also
+ --------
+ numpy.ndarray.cumsum : corresponding function for ndarrays
+ numpy.cumsum : equivalent function
+
+ Examples
+ --------
+ >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
+ >>> marr.cumsum()
+ masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33],
+ mask=[False, False, False, True, True, True, False, False,
+ False, False],
+ fill_value=999999)
+
+ """
+ result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)
+ if out is not None:
+ if isinstance(out, MaskedArray):
+ out.__setmask__(self.mask)
+ return out
+ result = result.view(type(self))
+ result.__setmask__(self._mask)
+ return result
+
+ def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
+ """
+ Return the product of the array elements over the given axis.
+
+ Masked elements are set to 1 internally for computation.
+
+ Refer to `numpy.prod` for full documentation.
+
+ Notes
+ -----
+ Arithmetic is modular when using integer types, and no error is raised
+ on overflow.
+
+ See Also
+ --------
+ numpy.ndarray.prod : corresponding function for ndarrays
+ numpy.prod : equivalent function
+ """
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+
+ _mask = self._mask
+ newmask = _check_mask_axis(_mask, axis, **kwargs)
+ # No explicit output
+ if out is None:
+ result = self.filled(1).prod(axis, dtype=dtype, **kwargs)
+ rndim = getattr(result, 'ndim', 0)
+ if rndim:
+ result = result.view(type(self))
+ result.__setmask__(newmask)
+ elif newmask:
+ result = masked
+ return result
+ # Explicit output
+ result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs)
+ if isinstance(out, MaskedArray):
+ outmask = getmask(out)
+ if outmask is nomask:
+ outmask = out._mask = make_mask_none(out.shape)
+ outmask.flat = newmask
+ return out
+ product = prod
+
+ def cumprod(self, axis=None, dtype=None, out=None):
+ """
+ Return the cumulative product of the array elements over the given axis.
+
+ Masked values are set to 1 internally during the computation.
+ However, their position is saved, and the result will be masked at
+ the same locations.
+
+ Refer to `numpy.cumprod` for full documentation.
+
+ Notes
+ -----
+ The mask is lost if `out` is not a valid MaskedArray !
+
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ See Also
+ --------
+ numpy.ndarray.cumprod : corresponding function for ndarrays
+ numpy.cumprod : equivalent function
+ """
+ result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
+ if out is not None:
+ if isinstance(out, MaskedArray):
+ out.__setmask__(self._mask)
+ return out
+ result = result.view(type(self))
+ result.__setmask__(self._mask)
+ return result
+
+ def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
+ """
+ Returns the average of the array elements along given axis.
+
+ Masked entries are ignored, and result elements which are not
+ finite will be masked.
+
+ Refer to `numpy.mean` for full documentation.
+
+ See Also
+ --------
+ numpy.ndarray.mean : corresponding function for ndarrays
+ numpy.mean : Equivalent function
+ numpy.ma.average : Weighted average.
+
+ Examples
+ --------
+ >>> a = np.ma.array([1,2,3], mask=[False, False, True])
+ >>> a
+ masked_array(data=[1, 2, --],
+ mask=[False, False, True],
+ fill_value=999999)
+ >>> a.mean()
+ 1.5
+
+ """
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+ if self._mask is nomask:
+ result = super().mean(axis=axis, dtype=dtype, **kwargs)[()]
+ else:
+ is_float16_result = False
+ if dtype is None:
+ if issubclass(self.dtype.type, (ntypes.integer, ntypes.bool_)):
+ dtype = mu.dtype('f8')
+ elif issubclass(self.dtype.type, ntypes.float16):
+ dtype = mu.dtype('f4')
+ is_float16_result = True
+ dsum = self.sum(axis=axis, dtype=dtype, **kwargs)
+ cnt = self.count(axis=axis, **kwargs)
+ if cnt.shape == () and (cnt == 0):
+ result = masked
+ elif is_float16_result:
+ result = self.dtype.type(dsum * 1. / cnt)
+ else:
+ result = dsum * 1. / cnt
+ if out is not None:
+ out.flat = result
+ if isinstance(out, MaskedArray):
+ outmask = getmask(out)
+ if outmask is nomask:
+ outmask = out._mask = make_mask_none(out.shape)
+ outmask.flat = getmask(result)
+ return out
+ return result
+
+ def anom(self, axis=None, dtype=None):
+ """
+ Compute the anomalies (deviations from the arithmetic mean)
+ along the given axis.
+
+ Returns an array of anomalies, with the same shape as the input and
+ where the arithmetic mean is computed along the given axis.
+
+ Parameters
+ ----------
+ axis : int, optional
+ Axis over which the anomalies are taken.
+ The default is to use the mean of the flattened array as reference.
+ dtype : dtype, optional
+ Type to use in computing the variance. For arrays of integer type
+ the default is float32; for arrays of float types it is the same as
+ the array type.
+
+ See Also
+ --------
+ mean : Compute the mean of the array.
+
+ Examples
+ --------
+ >>> a = np.ma.array([1,2,3])
+ >>> a.anom()
+ masked_array(data=[-1., 0., 1.],
+ mask=False,
+ fill_value=1e+20)
+
+ """
+ m = self.mean(axis, dtype)
+ if not axis:
+ return self - m
+ else:
+ return self - expand_dims(m, axis)
+
+ def var(self, axis=None, dtype=None, out=None, ddof=0,
+ keepdims=np._NoValue):
+ """
+ Returns the variance of the array elements along given axis.
+
+ Masked entries are ignored, and result elements which are not
+ finite will be masked.
+
+ Refer to `numpy.var` for full documentation.
+
+ See Also
+ --------
+ numpy.ndarray.var : corresponding function for ndarrays
+ numpy.var : Equivalent function
+ """
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+
+ # Easy case: nomask, business as usual
+ if self._mask is nomask:
+ ret = super().var(axis=axis, dtype=dtype, out=out, ddof=ddof,
+ **kwargs)[()]
+ if out is not None:
+ if isinstance(out, MaskedArray):
+ out.__setmask__(nomask)
+ return out
+ return ret
+
+ # Some data are masked, yay!
+ cnt = self.count(axis=axis, **kwargs) - ddof
+ danom = self - self.mean(axis, dtype, keepdims=True)
+ if iscomplexobj(self):
+ danom = umath.absolute(danom) ** 2
+ else:
+ danom *= danom
+ dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self))
+ # Apply the mask if it's not a scalar
+ if dvar.ndim:
+ dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0))
+ dvar._update_from(self)
+ elif getmask(dvar):
+ # Make sure that masked is returned when the scalar is masked.
+ dvar = masked
+ if out is not None:
+ if isinstance(out, MaskedArray):
+ out.flat = 0
+ out.__setmask__(True)
+ elif out.dtype.kind in 'biu':
+ errmsg = "Masked data information would be lost in one or "\
+ "more location."
+ raise MaskError(errmsg)
+ else:
+ out.flat = np.nan
+ return out
+ # In case with have an explicit output
+ if out is not None:
+ # Set the data
+ out.flat = dvar
+ # Set the mask if needed
+ if isinstance(out, MaskedArray):
+ out.__setmask__(dvar.mask)
+ return out
+ return dvar
+ var.__doc__ = np.var.__doc__
+
+ def std(self, axis=None, dtype=None, out=None, ddof=0,
+ keepdims=np._NoValue):
+ """
+ Returns the standard deviation of the array elements along given axis.
+
+ Masked entries are ignored.
+
+ Refer to `numpy.std` for full documentation.
+
+ See Also
+ --------
+ numpy.ndarray.std : corresponding function for ndarrays
+ numpy.std : Equivalent function
+ """
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+
+ dvar = self.var(axis, dtype, out, ddof, **kwargs)
+ if dvar is not masked:
+ if out is not None:
+ np.power(out, 0.5, out=out, casting='unsafe')
+ return out
+ dvar = sqrt(dvar)
+ return dvar
+
+ def round(self, decimals=0, out=None):
+ """
+ Return each element rounded to the given number of decimals.
+
+ Refer to `numpy.around` for full documentation.
+
+ See Also
+ --------
+ numpy.ndarray.round : corresponding function for ndarrays
+ numpy.around : equivalent function
+ """
+ result = self._data.round(decimals=decimals, out=out).view(type(self))
+ if result.ndim > 0:
+ result._mask = self._mask
+ result._update_from(self)
+ elif self._mask:
+ # Return masked when the scalar is masked
+ result = masked
+ # No explicit output: we're done
+ if out is None:
+ return result
+ if isinstance(out, MaskedArray):
+ out.__setmask__(self._mask)
+ return out
+
+ def argsort(self, axis=np._NoValue, kind=None, order=None,
+ endwith=True, fill_value=None):
+ """
+ Return an ndarray of indices that sort the array along the
+ specified axis. Masked values are filled beforehand to
+ `fill_value`.
+
+ Parameters
+ ----------
+ axis : int, optional
+ Axis along which to sort. If None, the default, the flattened array
+ is used.
+
+ .. versionchanged:: 1.13.0
+ Previously, the default was documented to be -1, but that was
+ in error. At some future date, the default will change to -1, as
+ originally intended.
+ Until then, the axis should be given explicitly when
+ ``arr.ndim > 1``, to avoid a FutureWarning.
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
+ The sorting algorithm used.
+ order : list, optional
+ When `a` is an array with fields defined, this argument specifies
+ which fields to compare first, second, etc. Not all fields need be
+ specified.
+ endwith : {True, False}, optional
+ Whether missing values (if any) should be treated as the largest values
+ (True) or the smallest values (False)
+ When the array contains unmasked values at the same extremes of the
+ datatype, the ordering of these values and the masked values is
+ undefined.
+ fill_value : scalar or None, optional
+ Value used internally for the masked values.
+ If ``fill_value`` is not None, it supersedes ``endwith``.
+
+ Returns
+ -------
+ index_array : ndarray, int
+ Array of indices that sort `a` along the specified axis.
+ In other words, ``a[index_array]`` yields a sorted `a`.
+
+ See Also
+ --------
+ ma.MaskedArray.sort : Describes sorting algorithms used.
+ lexsort : Indirect stable sort with multiple keys.
+ numpy.ndarray.sort : Inplace sort.
+
+ Notes
+ -----
+ See `sort` for notes on the different sorting algorithms.
+
+ Examples
+ --------
+ >>> a = np.ma.array([3,2,1], mask=[False, False, True])
+ >>> a
+ masked_array(data=[3, 2, --],
+ mask=[False, False, True],
+ fill_value=999999)
+ >>> a.argsort()
+ array([1, 0, 2])
+
+ """
+
+ # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default
+ if axis is np._NoValue:
+ axis = _deprecate_argsort_axis(self)
+
+ if fill_value is None:
+ if endwith:
+ # nan > inf
+ if np.issubdtype(self.dtype, np.floating):
+ fill_value = np.nan
+ else:
+ fill_value = minimum_fill_value(self)
+ else:
+ fill_value = maximum_fill_value(self)
+
+ filled = self.filled(fill_value)
+ return filled.argsort(axis=axis, kind=kind, order=order)
+
+ def argmin(self, axis=None, fill_value=None, out=None, *,
+ keepdims=np._NoValue):
+ """
+ Return array of indices to the minimum values along the given axis.
+
+ Parameters
+ ----------
+ axis : {None, integer}
+ If None, the index is into the flattened array, otherwise along
+ the specified axis
+ fill_value : scalar or None, optional
+ Value used to fill in the masked values. If None, the output of
+ minimum_fill_value(self._data) is used instead.
+ out : {None, array}, optional
+ Array into which the result can be placed. Its type is preserved
+ and it must be of the right shape to hold the output.
+
+ Returns
+ -------
+ ndarray or scalar
+ If multi-dimension input, returns a new ndarray of indices to the
+ minimum values along the given axis. Otherwise, returns a scalar
+ of index to the minimum values along the given axis.
+
+ Examples
+ --------
+ >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0])
+ >>> x.shape = (2,2)
+ >>> x
+ masked_array(
+ data=[[--, --],
+ [2, 3]],
+ mask=[[ True, True],
+ [False, False]],
+ fill_value=999999)
+ >>> x.argmin(axis=0, fill_value=-1)
+ array([0, 0])
+ >>> x.argmin(axis=0, fill_value=9)
+ array([1, 1])
+
+ """
+ if fill_value is None:
+ fill_value = minimum_fill_value(self)
+ d = self.filled(fill_value).view(ndarray)
+ keepdims = False if keepdims is np._NoValue else bool(keepdims)
+ return d.argmin(axis, out=out, keepdims=keepdims)
+
+ def argmax(self, axis=None, fill_value=None, out=None, *,
+ keepdims=np._NoValue):
+ """
+ Returns array of indices of the maximum values along the given axis.
+ Masked values are treated as if they had the value fill_value.
+
+ Parameters
+ ----------
+ axis : {None, integer}
+ If None, the index is into the flattened array, otherwise along
+ the specified axis
+ fill_value : scalar or None, optional
+ Value used to fill in the masked values. If None, the output of
+ maximum_fill_value(self._data) is used instead.
+ out : {None, array}, optional
+ Array into which the result can be placed. Its type is preserved
+ and it must be of the right shape to hold the output.
+
+ Returns
+ -------
+ index_array : {integer_array}
+
+ Examples
+ --------
+ >>> a = np.arange(6).reshape(2,3)
+ >>> a.argmax()
+ 5
+ >>> a.argmax(0)
+ array([1, 1, 1])
+ >>> a.argmax(1)
+ array([2, 2])
+
+ """
+ if fill_value is None:
+ fill_value = maximum_fill_value(self._data)
+ d = self.filled(fill_value).view(ndarray)
+ keepdims = False if keepdims is np._NoValue else bool(keepdims)
+ return d.argmax(axis, out=out, keepdims=keepdims)
+
+ def sort(self, axis=-1, kind=None, order=None,
+ endwith=True, fill_value=None):
+ """
+ Sort the array, in-place
+
+ Parameters
+ ----------
+ a : array_like
+ Array to be sorted.
+ axis : int, optional
+ Axis along which to sort. If None, the array is flattened before
+ sorting. The default is -1, which sorts along the last axis.
+ kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
+ The sorting algorithm used.
+ order : list, optional
+ When `a` is a structured array, this argument specifies which fields
+ to compare first, second, and so on. This list does not need to
+ include all of the fields.
+ endwith : {True, False}, optional
+ Whether missing values (if any) should be treated as the largest values
+ (True) or the smallest values (False)
+ When the array contains unmasked values sorting at the same extremes of the
+ datatype, the ordering of these values and the masked values is
+ undefined.
+ fill_value : scalar or None, optional
+ Value used internally for the masked values.
+ If ``fill_value`` is not None, it supersedes ``endwith``.
+
+ Returns
+ -------
+ sorted_array : ndarray
+ Array of the same type and shape as `a`.
+
+ See Also
+ --------
+ numpy.ndarray.sort : Method to sort an array in-place.
+ argsort : Indirect sort.
+ lexsort : Indirect stable sort on multiple keys.
+ searchsorted : Find elements in a sorted array.
+
+ Notes
+ -----
+ See ``sort`` for notes on the different sorting algorithms.
+
+ Examples
+ --------
+ >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
+ >>> # Default
+ >>> a.sort()
+ >>> a
+ masked_array(data=[1, 3, 5, --, --],
+ mask=[False, False, False, True, True],
+ fill_value=999999)
+
+ >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
+ >>> # Put missing values in the front
+ >>> a.sort(endwith=False)
+ >>> a
+ masked_array(data=[--, --, 1, 3, 5],
+ mask=[ True, True, False, False, False],
+ fill_value=999999)
+
+ >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
+ >>> # fill_value takes over endwith
+ >>> a.sort(endwith=False, fill_value=3)
+ >>> a
+ masked_array(data=[1, --, --, 3, 5],
+ mask=[False, True, True, False, False],
+ fill_value=999999)
+
+ """
+ if self._mask is nomask:
+ ndarray.sort(self, axis=axis, kind=kind, order=order)
+ return
+
+ if self is masked:
+ return
+
+ sidx = self.argsort(axis=axis, kind=kind, order=order,
+ fill_value=fill_value, endwith=endwith)
+
+ self[...] = np.take_along_axis(self, sidx, axis=axis)
+
+ def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
+ """
+ Return the minimum along a given axis.
+
+ Parameters
+ ----------
+ axis : None or int or tuple of ints, optional
+ Axis along which to operate. By default, ``axis`` is None and the
+ flattened input is used.
+ .. versionadded:: 1.7.0
+ If this is a tuple of ints, the minimum is selected over multiple
+ axes, instead of a single axis or all the axes as before.
+ out : array_like, optional
+ Alternative output array in which to place the result. Must be of
+ the same shape and buffer length as the expected output.
+ fill_value : scalar or None, optional
+ Value used to fill in the masked values.
+ If None, use the output of `minimum_fill_value`.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ Returns
+ -------
+ amin : array_like
+ New array holding the result.
+ If ``out`` was specified, ``out`` is returned.
+
+ See Also
+ --------
+ ma.minimum_fill_value
+ Returns the minimum filling value for a given datatype.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> x = [[1., -2., 3.], [0.2, -0.7, 0.1]]
+ >>> mask = [[1, 1, 0], [0, 0, 1]]
+ >>> masked_x = ma.masked_array(x, mask)
+ >>> masked_x
+ masked_array(
+ data=[[--, --, 3.0],
+ [0.2, -0.7, --]],
+ mask=[[ True, True, False],
+ [False, False, True]],
+ fill_value=1e+20)
+ >>> ma.min(masked_x)
+ -0.7
+ >>> ma.min(masked_x, axis=-1)
+ masked_array(data=[3.0, -0.7],
+ mask=[False, False],
+ fill_value=1e+20)
+ >>> ma.min(masked_x, axis=0, keepdims=True)
+ masked_array(data=[[0.2, -0.7, 3.0]],
+ mask=[[False, False, False]],
+ fill_value=1e+20)
+ >>> mask = [[1, 1, 1,], [1, 1, 1]]
+ >>> masked_x = ma.masked_array(x, mask)
+ >>> ma.min(masked_x, axis=0)
+ masked_array(data=[--, --, --],
+ mask=[ True, True, True],
+ fill_value=1e+20,
+ dtype=float64)
+ """
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+
+ _mask = self._mask
+ newmask = _check_mask_axis(_mask, axis, **kwargs)
+ if fill_value is None:
+ fill_value = minimum_fill_value(self)
+ # No explicit output
+ if out is None:
+ result = self.filled(fill_value).min(
+ axis=axis, out=out, **kwargs).view(type(self))
+ if result.ndim:
+ # Set the mask
+ result.__setmask__(newmask)
+ # Get rid of Infs
+ if newmask.ndim:
+ np.copyto(result, result.fill_value, where=newmask)
+ elif newmask:
+ result = masked
+ return result
+ # Explicit output
+ result = self.filled(fill_value).min(axis=axis, out=out, **kwargs)
+ if isinstance(out, MaskedArray):
+ outmask = getmask(out)
+ if outmask is nomask:
+ outmask = out._mask = make_mask_none(out.shape)
+ outmask.flat = newmask
+ else:
+ if out.dtype.kind in 'biu':
+ errmsg = "Masked data information would be lost in one or more"\
+ " location."
+ raise MaskError(errmsg)
+ np.copyto(out, np.nan, where=newmask)
+ return out
+
+ def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
+ """
+ Return the maximum along a given axis.
+
+ Parameters
+ ----------
+ axis : None or int or tuple of ints, optional
+ Axis along which to operate. By default, ``axis`` is None and the
+ flattened input is used.
+ .. versionadded:: 1.7.0
+ If this is a tuple of ints, the maximum is selected over multiple
+ axes, instead of a single axis or all the axes as before.
+ out : array_like, optional
+ Alternative output array in which to place the result. Must
+ be of the same shape and buffer length as the expected output.
+ fill_value : scalar or None, optional
+ Value used to fill in the masked values.
+ If None, use the output of maximum_fill_value().
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ Returns
+ -------
+ amax : array_like
+ New array holding the result.
+ If ``out`` was specified, ``out`` is returned.
+
+ See Also
+ --------
+ ma.maximum_fill_value
+ Returns the maximum filling value for a given datatype.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> x = [[-1., 2.5], [4., -2.], [3., 0.]]
+ >>> mask = [[0, 0], [1, 0], [1, 0]]
+ >>> masked_x = ma.masked_array(x, mask)
+ >>> masked_x
+ masked_array(
+ data=[[-1.0, 2.5],
+ [--, -2.0],
+ [--, 0.0]],
+ mask=[[False, False],
+ [ True, False],
+ [ True, False]],
+ fill_value=1e+20)
+ >>> ma.max(masked_x)
+ 2.5
+ >>> ma.max(masked_x, axis=0)
+ masked_array(data=[-1.0, 2.5],
+ mask=[False, False],
+ fill_value=1e+20)
+ >>> ma.max(masked_x, axis=1, keepdims=True)
+ masked_array(
+ data=[[2.5],
+ [-2.0],
+ [0.0]],
+ mask=[[False],
+ [False],
+ [False]],
+ fill_value=1e+20)
+ >>> mask = [[1, 1], [1, 1], [1, 1]]
+ >>> masked_x = ma.masked_array(x, mask)
+ >>> ma.max(masked_x, axis=1)
+ masked_array(data=[--, --, --],
+ mask=[ True, True, True],
+ fill_value=1e+20,
+ dtype=float64)
+ """
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+
+ _mask = self._mask
+ newmask = _check_mask_axis(_mask, axis, **kwargs)
+ if fill_value is None:
+ fill_value = maximum_fill_value(self)
+ # No explicit output
+ if out is None:
+ result = self.filled(fill_value).max(
+ axis=axis, out=out, **kwargs).view(type(self))
+ if result.ndim:
+ # Set the mask
+ result.__setmask__(newmask)
+ # Get rid of Infs
+ if newmask.ndim:
+ np.copyto(result, result.fill_value, where=newmask)
+ elif newmask:
+ result = masked
+ return result
+ # Explicit output
+ result = self.filled(fill_value).max(axis=axis, out=out, **kwargs)
+ if isinstance(out, MaskedArray):
+ outmask = getmask(out)
+ if outmask is nomask:
+ outmask = out._mask = make_mask_none(out.shape)
+ outmask.flat = newmask
+ else:
+
+ if out.dtype.kind in 'biu':
+ errmsg = "Masked data information would be lost in one or more"\
+ " location."
+ raise MaskError(errmsg)
+ np.copyto(out, np.nan, where=newmask)
+ return out
+
+ def ptp(self, axis=None, out=None, fill_value=None, keepdims=False):
+ """
+ Return (maximum - minimum) along the given dimension
+ (i.e. peak-to-peak value).
+
+ .. warning::
+ `ptp` preserves the data type of the array. This means the
+ return value for an input of signed integers with n bits
+ (e.g. `np.int8`, `np.int16`, etc) is also a signed integer
+ with n bits. In that case, peak-to-peak values greater than
+ ``2**(n-1)-1`` will be returned as negative values. An example
+ with a work-around is shown below.
+
+ Parameters
+ ----------
+ axis : {None, int}, optional
+ Axis along which to find the peaks. If None (default) the
+ flattened array is used.
+ out : {None, array_like}, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type will be cast if necessary.
+ fill_value : scalar or None, optional
+ Value used to fill in the masked values.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
+
+ Returns
+ -------
+ ptp : ndarray.
+ A new array holding the result, unless ``out`` was
+ specified, in which case a reference to ``out`` is returned.
+
+ Examples
+ --------
+ >>> x = np.ma.MaskedArray([[4, 9, 2, 10],
+ ... [6, 9, 7, 12]])
+
+ >>> x.ptp(axis=1)
+ masked_array(data=[8, 6],
+ mask=False,
+ fill_value=999999)
+
+ >>> x.ptp(axis=0)
+ masked_array(data=[2, 0, 5, 2],
+ mask=False,
+ fill_value=999999)
+
+ >>> x.ptp()
+ 10
+
+ This example shows that a negative value can be returned when
+ the input is an array of signed integers.
+
+ >>> y = np.ma.MaskedArray([[1, 127],
+ ... [0, 127],
+ ... [-1, 127],
+ ... [-2, 127]], dtype=np.int8)
+ >>> y.ptp(axis=1)
+ masked_array(data=[ 126, 127, -128, -127],
+ mask=False,
+ fill_value=999999,
+ dtype=int8)
+
+ A work-around is to use the `view()` method to view the result as
+ unsigned integers with the same bit width:
+
+ >>> y.ptp(axis=1).view(np.uint8)
+ masked_array(data=[126, 127, 128, 129],
+ mask=False,
+ fill_value=999999,
+ dtype=uint8)
+ """
+ if out is None:
+ result = self.max(axis=axis, fill_value=fill_value,
+ keepdims=keepdims)
+ result -= self.min(axis=axis, fill_value=fill_value,
+ keepdims=keepdims)
+ return result
+ out.flat = self.max(axis=axis, out=out, fill_value=fill_value,
+ keepdims=keepdims)
+ min_value = self.min(axis=axis, fill_value=fill_value,
+ keepdims=keepdims)
+ np.subtract(out, min_value, out=out, casting='unsafe')
+ return out
+
+ def partition(self, *args, **kwargs):
+ warnings.warn("Warning: 'partition' will ignore the 'mask' "
+ f"of the {self.__class__.__name__}.",
+ stacklevel=2)
+ return super().partition(*args, **kwargs)
+
+ def argpartition(self, *args, **kwargs):
+ warnings.warn("Warning: 'argpartition' will ignore the 'mask' "
+ f"of the {self.__class__.__name__}.",
+ stacklevel=2)
+ return super().argpartition(*args, **kwargs)
+
+ def take(self, indices, axis=None, out=None, mode='raise'):
+ """
+ """
+ (_data, _mask) = (self._data, self._mask)
+ cls = type(self)
+ # Make sure the indices are not masked
+ maskindices = getmask(indices)
+ if maskindices is not nomask:
+ indices = indices.filled(0)
+ # Get the data, promoting scalars to 0d arrays with [...] so that
+ # .view works correctly
+ if out is None:
+ out = _data.take(indices, axis=axis, mode=mode)[...].view(cls)
+ else:
+ np.take(_data, indices, axis=axis, mode=mode, out=out)
+ # Get the mask
+ if isinstance(out, MaskedArray):
+ if _mask is nomask:
+ outmask = maskindices
+ else:
+ outmask = _mask.take(indices, axis=axis, mode=mode)
+ outmask |= maskindices
+ out.__setmask__(outmask)
+ # demote 0d arrays back to scalars, for consistency with ndarray.take
+ return out[()]
+
+ # Array methods
+ copy = _arraymethod('copy')
+ diagonal = _arraymethod('diagonal')
+ flatten = _arraymethod('flatten')
+ repeat = _arraymethod('repeat')
+ squeeze = _arraymethod('squeeze')
+ swapaxes = _arraymethod('swapaxes')
+ T = property(fget=lambda self: self.transpose())
+ transpose = _arraymethod('transpose')
+
+ def tolist(self, fill_value=None):
+ """
+ Return the data portion of the masked array as a hierarchical Python list.
+
+ Data items are converted to the nearest compatible Python type.
+ Masked values are converted to `fill_value`. If `fill_value` is None,
+ the corresponding entries in the output list will be ``None``.
+
+ Parameters
+ ----------
+ fill_value : scalar, optional
+ The value to use for invalid entries. Default is None.
+
+ Returns
+ -------
+ result : list
+ The Python list representation of the masked array.
+
+ Examples
+ --------
+ >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4)
+ >>> x.tolist()
+ [[1, None, 3], [None, 5, None], [7, None, 9]]
+ >>> x.tolist(-999)
+ [[1, -999, 3], [-999, 5, -999], [7, -999, 9]]
+
+ """
+ _mask = self._mask
+ # No mask ? Just return .data.tolist ?
+ if _mask is nomask:
+ return self._data.tolist()
+ # Explicit fill_value: fill the array and get the list
+ if fill_value is not None:
+ return self.filled(fill_value).tolist()
+ # Structured array.
+ names = self.dtype.names
+ if names:
+ result = self._data.astype([(_, object) for _ in names])
+ for n in names:
+ result[n][_mask[n]] = None
+ return result.tolist()
+ # Standard arrays.
+ if _mask is nomask:
+ return [None]
+ # Set temps to save time when dealing w/ marrays.
+ inishape = self.shape
+ result = np.array(self._data.ravel(), dtype=object)
+ result[_mask.ravel()] = None
+ result.shape = inishape
+ return result.tolist()
+
+ def tostring(self, fill_value=None, order='C'):
+ r"""
+ A compatibility alias for `tobytes`, with exactly the same behavior.
+
+ Despite its name, it returns `bytes` not `str`\ s.
+
+ .. deprecated:: 1.19.0
+ """
+ # 2020-03-30, Numpy 1.19.0
+ warnings.warn(
+ "tostring() is deprecated. Use tobytes() instead.",
+ DeprecationWarning, stacklevel=2)
+
+ return self.tobytes(fill_value, order=order)
+
+ def tobytes(self, fill_value=None, order='C'):
+ """
+ Return the array data as a string containing the raw bytes in the array.
+
+ The array is filled with a fill value before the string conversion.
+
+ .. versionadded:: 1.9.0
+
+ Parameters
+ ----------
+ fill_value : scalar, optional
+ Value used to fill in the masked values. Default is None, in which
+ case `MaskedArray.fill_value` is used.
+ order : {'C','F','A'}, optional
+ Order of the data item in the copy. Default is 'C'.
+
+ - 'C' -- C order (row major).
+ - 'F' -- Fortran order (column major).
+ - 'A' -- Any, current order of array.
+ - None -- Same as 'A'.
+
+ See Also
+ --------
+ numpy.ndarray.tobytes
+ tolist, tofile
+
+ Notes
+ -----
+ As for `ndarray.tobytes`, information about the shape, dtype, etc.,
+ but also about `fill_value`, will be lost.
+
+ Examples
+ --------
+ >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
+ >>> x.tobytes()
+ b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00'
+
+ """
+ return self.filled(fill_value).tobytes(order=order)
+
+ def tofile(self, fid, sep="", format="%s"):
+ """
+ Save a masked array to a file in binary format.
+
+ .. warning::
+ This function is not implemented yet.
+
+ Raises
+ ------
+ NotImplementedError
+ When `tofile` is called.
+
+ """
+ raise NotImplementedError("MaskedArray.tofile() not implemented yet.")
+
+ def toflex(self):
+ """
+ Transforms a masked array into a flexible-type array.
+
+ The flexible type array that is returned will have two fields:
+
+ * the ``_data`` field stores the ``_data`` part of the array.
+ * the ``_mask`` field stores the ``_mask`` part of the array.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ record : ndarray
+ A new flexible-type `ndarray` with two fields: the first element
+ containing a value, the second element containing the corresponding
+ mask boolean. The returned record shape matches self.shape.
+
+ Notes
+ -----
+ A side-effect of transforming a masked array into a flexible `ndarray` is
+ that meta information (``fill_value``, ...) will be lost.
+
+ Examples
+ --------
+ >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
+ >>> x
+ masked_array(
+ data=[[1, --, 3],
+ [--, 5, --],
+ [7, --, 9]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+ >>> x.toflex()
+ array([[(1, False), (2, True), (3, False)],
+ [(4, True), (5, False), (6, True)],
+ [(7, False), (8, True), (9, False)]],
+ dtype=[('_data', '<i8'), ('_mask', '?')])
+
+ """
+ # Get the basic dtype.
+ ddtype = self.dtype
+ # Make sure we have a mask
+ _mask = self._mask
+ if _mask is None:
+ _mask = make_mask_none(self.shape, ddtype)
+ # And get its dtype
+ mdtype = self._mask.dtype
+
+ record = np.ndarray(shape=self.shape,
+ dtype=[('_data', ddtype), ('_mask', mdtype)])
+ record['_data'] = self._data
+ record['_mask'] = self._mask
+ return record
+ torecords = toflex
+
+ # Pickling
+ def __getstate__(self):
+ """Return the internal state of the masked array, for pickling
+ purposes.
+
+ """
+ cf = 'CF'[self.flags.fnc]
+ data_state = super().__reduce__()[2]
+ return data_state + (getmaskarray(self).tobytes(cf), self._fill_value)
+
+ def __setstate__(self, state):
+ """Restore the internal state of the masked array, for
+ pickling purposes. ``state`` is typically the output of the
+ ``__getstate__`` output, and is a 5-tuple:
+
+ - class name
+ - a tuple giving the shape of the data
+ - a typecode for the data
+ - a binary string for the data
+ - a binary string for the mask.
+
+ """
+ (_, shp, typ, isf, raw, msk, flv) = state
+ super().__setstate__((shp, typ, isf, raw))
+ self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk))
+ self.fill_value = flv
+
+ def __reduce__(self):
+ """Return a 3-tuple for pickling a MaskedArray.
+
+ """
+ return (_mareconstruct,
+ (self.__class__, self._baseclass, (0,), 'b',),
+ self.__getstate__())
+
+ def __deepcopy__(self, memo=None):
+ from copy import deepcopy
+ copied = MaskedArray.__new__(type(self), self, copy=True)
+ if memo is None:
+ memo = {}
+ memo[id(self)] = copied
+ for (k, v) in self.__dict__.items():
+ copied.__dict__[k] = deepcopy(v, memo)
+ return copied
+
+
+def _mareconstruct(subtype, baseclass, baseshape, basetype,):
+ """Internal function that builds a new MaskedArray from the
+ information stored in a pickle.
+
+ """
+ _data = ndarray.__new__(baseclass, baseshape, basetype)
+ _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))
+ return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
+
+
+class mvoid(MaskedArray):
+ """
+ Fake a 'void' object to use for masked array with structured dtypes.
+ """
+
+ def __new__(self, data, mask=nomask, dtype=None, fill_value=None,
+ hardmask=False, copy=False, subok=True):
+ _data = np.array(data, copy=copy, subok=subok, dtype=dtype)
+ _data = _data.view(self)
+ _data._hardmask = hardmask
+ if mask is not nomask:
+ if isinstance(mask, np.void):
+ _data._mask = mask
+ else:
+ try:
+ # Mask is already a 0D array
+ _data._mask = np.void(mask)
+ except TypeError:
+ # Transform the mask to a void
+ mdtype = make_mask_descr(dtype)
+ _data._mask = np.array(mask, dtype=mdtype)[()]
+ if fill_value is not None:
+ _data.fill_value = fill_value
+ return _data
+
+ @property
+ def _data(self):
+ # Make sure that the _data part is a np.void
+ return super()._data[()]
+
+ def __getitem__(self, indx):
+ """
+ Get the index.
+
+ """
+ m = self._mask
+ if isinstance(m[indx], ndarray):
+ # Can happen when indx is a multi-dimensional field:
+ # A = ma.masked_array(data=[([0,1],)], mask=[([True,
+ # False],)], dtype=[("A", ">i2", (2,))])
+ # x = A[0]; y = x["A"]; then y.mask["A"].size==2
+ # and we can not say masked/unmasked.
+ # The result is no longer mvoid!
+ # See also issue #6724.
+ return masked_array(
+ data=self._data[indx], mask=m[indx],
+ fill_value=self._fill_value[indx],
+ hard_mask=self._hardmask)
+ if m is not nomask and m[indx]:
+ return masked
+ return self._data[indx]
+
+ def __setitem__(self, indx, value):
+ self._data[indx] = value
+ if self._hardmask:
+ self._mask[indx] |= getattr(value, "_mask", False)
+ else:
+ self._mask[indx] = getattr(value, "_mask", False)
+
+ def __str__(self):
+ m = self._mask
+ if m is nomask:
+ return str(self._data)
+
+ rdtype = _replace_dtype_fields(self._data.dtype, "O")
+ data_arr = super()._data
+ res = data_arr.astype(rdtype)
+ _recursive_printoption(res, self._mask, masked_print_option)
+ return str(res)
+
+ __repr__ = __str__
+
+ def __iter__(self):
+ "Defines an iterator for mvoid"
+ (_data, _mask) = (self._data, self._mask)
+ if _mask is nomask:
+ yield from _data
+ else:
+ for (d, m) in zip(_data, _mask):
+ if m:
+ yield masked
+ else:
+ yield d
+
+ def __len__(self):
+ return self._data.__len__()
+
+ def filled(self, fill_value=None):
+ """
+ Return a copy with masked fields filled with a given value.
+
+ Parameters
+ ----------
+ fill_value : array_like, optional
+ The value to use for invalid entries. Can be scalar or
+ non-scalar. If latter is the case, the filled array should
+ be broadcastable over input array. Default is None, in
+ which case the `fill_value` attribute is used instead.
+
+ Returns
+ -------
+ filled_void
+ A `np.void` object
+
+ See Also
+ --------
+ MaskedArray.filled
+
+ """
+ return asarray(self).filled(fill_value)[()]
+
+ def tolist(self):
+ """
+ Transforms the mvoid object into a tuple.
+
+ Masked fields are replaced by None.
+
+ Returns
+ -------
+ returned_tuple
+ Tuple of fields
+ """
+ _mask = self._mask
+ if _mask is nomask:
+ return self._data.tolist()
+ result = []
+ for (d, m) in zip(self._data, self._mask):
+ if m:
+ result.append(None)
+ else:
+ # .item() makes sure we return a standard Python object
+ result.append(d.item())
+ return tuple(result)
+
+
+##############################################################################
+# Shortcuts #
+##############################################################################
+
+
+def isMaskedArray(x):
+ """
+ Test whether input is an instance of MaskedArray.
+
+ This function returns True if `x` is an instance of MaskedArray
+ and returns False otherwise. Any object is accepted as input.
+
+ Parameters
+ ----------
+ x : object
+ Object to test.
+
+ Returns
+ -------
+ result : bool
+ True if `x` is a MaskedArray.
+
+ See Also
+ --------
+ isMA : Alias to isMaskedArray.
+ isarray : Alias to isMaskedArray.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.eye(3, 3)
+ >>> a
+ array([[ 1., 0., 0.],
+ [ 0., 1., 0.],
+ [ 0., 0., 1.]])
+ >>> m = ma.masked_values(a, 0)
+ >>> m
+ masked_array(
+ data=[[1.0, --, --],
+ [--, 1.0, --],
+ [--, --, 1.0]],
+ mask=[[False, True, True],
+ [ True, False, True],
+ [ True, True, False]],
+ fill_value=0.0)
+ >>> ma.isMaskedArray(a)
+ False
+ >>> ma.isMaskedArray(m)
+ True
+ >>> ma.isMaskedArray([0, 1, 2])
+ False
+
+ """
+ return isinstance(x, MaskedArray)
+
+
+isarray = isMaskedArray
+isMA = isMaskedArray # backward compatibility
+
+
+class MaskedConstant(MaskedArray):
+ # the lone np.ma.masked instance
+ __singleton = None
+
+ @classmethod
+ def __has_singleton(cls):
+ # second case ensures `cls.__singleton` is not just a view on the
+ # superclass singleton
+ return cls.__singleton is not None and type(cls.__singleton) is cls
+
+ def __new__(cls):
+ if not cls.__has_singleton():
+ # We define the masked singleton as a float for higher precedence.
+ # Note that it can be tricky sometimes w/ type comparison
+ data = np.array(0.)
+ mask = np.array(True)
+
+ # prevent any modifications
+ data.flags.writeable = False
+ mask.flags.writeable = False
+
+ # don't fall back on MaskedArray.__new__(MaskedConstant), since
+ # that might confuse it - this way, the construction is entirely
+ # within our control
+ cls.__singleton = MaskedArray(data, mask=mask).view(cls)
+
+ return cls.__singleton
+
+ def __array_finalize__(self, obj):
+ if not self.__has_singleton():
+ # this handles the `.view` in __new__, which we want to copy across
+ # properties normally
+ return super().__array_finalize__(obj)
+ elif self is self.__singleton:
+ # not clear how this can happen, play it safe
+ pass
+ else:
+ # everywhere else, we want to downcast to MaskedArray, to prevent a
+ # duplicate maskedconstant.
+ self.__class__ = MaskedArray
+ MaskedArray.__array_finalize__(self, obj)
+
+ def __array_prepare__(self, obj, context=None):
+ return self.view(MaskedArray).__array_prepare__(obj, context)
+
+ def __array_wrap__(self, obj, context=None):
+ return self.view(MaskedArray).__array_wrap__(obj, context)
+
+ def __str__(self):
+ return str(masked_print_option._display)
+
+ def __repr__(self):
+ if self is MaskedConstant.__singleton:
+ return 'masked'
+ else:
+ # it's a subclass, or something is wrong, make it obvious
+ return object.__repr__(self)
+
+ def __format__(self, format_spec):
+ # Replace ndarray.__format__ with the default, which supports no format characters.
+ # Supporting format characters is unwise here, because we do not know what type
+ # the user was expecting - better to not guess.
+ try:
+ return object.__format__(self, format_spec)
+ except TypeError:
+ # 2020-03-23, NumPy 1.19.0
+ warnings.warn(
+ "Format strings passed to MaskedConstant are ignored, but in future may "
+ "error or produce different behavior",
+ FutureWarning, stacklevel=2
+ )
+ return object.__format__(self, "")
+
+ def __reduce__(self):
+ """Override of MaskedArray's __reduce__.
+ """
+ return (self.__class__, ())
+
+ # inplace operations have no effect. We have to override them to avoid
+ # trying to modify the readonly data and mask arrays
+ def __iop__(self, other):
+ return self
+ __iadd__ = \
+ __isub__ = \
+ __imul__ = \
+ __ifloordiv__ = \
+ __itruediv__ = \
+ __ipow__ = \
+ __iop__
+ del __iop__ # don't leave this around
+
+ def copy(self, *args, **kwargs):
+ """ Copy is a no-op on the maskedconstant, as it is a scalar """
+ # maskedconstant is a scalar, so copy doesn't need to copy. There's
+ # precedent for this with `np.bool_` scalars.
+ return self
+
+ def __copy__(self):
+ return self
+
+ def __deepcopy__(self, memo):
+ return self
+
+ def __setattr__(self, attr, value):
+ if not self.__has_singleton():
+ # allow the singleton to be initialized
+ return super().__setattr__(attr, value)
+ elif self is self.__singleton:
+ raise AttributeError(
+ f"attributes of {self!r} are not writeable")
+ else:
+ # duplicate instance - we can end up here from __array_finalize__,
+ # where we set the __class__ attribute
+ return super().__setattr__(attr, value)
+
+
+masked = masked_singleton = MaskedConstant()
+masked_array = MaskedArray
+
+
+def array(data, dtype=None, copy=False, order=None,
+ mask=nomask, fill_value=None, keep_mask=True,
+ hard_mask=False, shrink=True, subok=True, ndmin=0):
+ """
+ Shortcut to MaskedArray.
+
+ The options are in a different order for convenience and backwards
+ compatibility.
+
+ """
+ return MaskedArray(data, mask=mask, dtype=dtype, copy=copy,
+ subok=subok, keep_mask=keep_mask,
+ hard_mask=hard_mask, fill_value=fill_value,
+ ndmin=ndmin, shrink=shrink, order=order)
+array.__doc__ = masked_array.__doc__
+
+
+def is_masked(x):
+ """
+ Determine whether input has masked values.
+
+ Accepts any object as input, but always returns False unless the
+ input is a MaskedArray containing masked values.
+
+ Parameters
+ ----------
+ x : array_like
+ Array to check for masked values.
+
+ Returns
+ -------
+ result : bool
+ True if `x` is a MaskedArray with masked values, False otherwise.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)
+ >>> x
+ masked_array(data=[--, 1, --, 2, 3],
+ mask=[ True, False, True, False, False],
+ fill_value=0)
+ >>> ma.is_masked(x)
+ True
+ >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)
+ >>> x
+ masked_array(data=[0, 1, 0, 2, 3],
+ mask=False,
+ fill_value=42)
+ >>> ma.is_masked(x)
+ False
+
+ Always returns False if `x` isn't a MaskedArray.
+
+ >>> x = [False, True, False]
+ >>> ma.is_masked(x)
+ False
+ >>> x = 'a string'
+ >>> ma.is_masked(x)
+ False
+
+ """
+ m = getmask(x)
+ if m is nomask:
+ return False
+ elif m.any():
+ return True
+ return False
+
+
+##############################################################################
+# Extrema functions #
+##############################################################################
+
+
+class _extrema_operation(_MaskedUFunc):
+ """
+ Generic class for maximum/minimum functions.
+
+ .. note::
+ This is the base class for `_maximum_operation` and
+ `_minimum_operation`.
+
+ """
+ def __init__(self, ufunc, compare, fill_value):
+ super().__init__(ufunc)
+ self.compare = compare
+ self.fill_value_func = fill_value
+
+ def __call__(self, a, b):
+ "Executes the call behavior."
+
+ return where(self.compare(a, b), a, b)
+
+ def reduce(self, target, axis=np._NoValue):
+ "Reduce target along the given axis."
+ target = narray(target, copy=False, subok=True)
+ m = getmask(target)
+
+ if axis is np._NoValue and target.ndim > 1:
+ # 2017-05-06, Numpy 1.13.0: warn on axis default
+ warnings.warn(
+ f"In the future the default for ma.{self.__name__}.reduce will be axis=0, "
+ f"not the current None, to match np.{self.__name__}.reduce. "
+ "Explicitly pass 0 or None to silence this warning.",
+ MaskedArrayFutureWarning, stacklevel=2)
+ axis = None
+
+ if axis is not np._NoValue:
+ kwargs = dict(axis=axis)
+ else:
+ kwargs = dict()
+
+ if m is nomask:
+ t = self.f.reduce(target, **kwargs)
+ else:
+ target = target.filled(
+ self.fill_value_func(target)).view(type(target))
+ t = self.f.reduce(target, **kwargs)
+ m = umath.logical_and.reduce(m, **kwargs)
+ if hasattr(t, '_mask'):
+ t._mask = m
+ elif m:
+ t = masked
+ return t
+
+ def outer(self, a, b):
+ "Return the function applied to the outer product of a and b."
+ ma = getmask(a)
+ mb = getmask(b)
+ if ma is nomask and mb is nomask:
+ m = nomask
+ else:
+ ma = getmaskarray(a)
+ mb = getmaskarray(b)
+ m = logical_or.outer(ma, mb)
+ result = self.f.outer(filled(a), filled(b))
+ if not isinstance(result, MaskedArray):
+ result = result.view(MaskedArray)
+ result._mask = m
+ return result
+
+def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+
+ try:
+ return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs)
+ except (AttributeError, TypeError):
+ # If obj doesn't have a min method, or if the method doesn't accept a
+ # fill_value argument
+ return asanyarray(obj).min(axis=axis, fill_value=fill_value,
+ out=out, **kwargs)
+min.__doc__ = MaskedArray.min.__doc__
+
+def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+
+ try:
+ return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs)
+ except (AttributeError, TypeError):
+ # If obj doesn't have a max method, or if the method doesn't accept a
+ # fill_value argument
+ return asanyarray(obj).max(axis=axis, fill_value=fill_value,
+ out=out, **kwargs)
+max.__doc__ = MaskedArray.max.__doc__
+
+
+def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
+ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
+ try:
+ return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs)
+ except (AttributeError, TypeError):
+ # If obj doesn't have a ptp method or if the method doesn't accept
+ # a fill_value argument
+ return asanyarray(obj).ptp(axis=axis, fill_value=fill_value,
+ out=out, **kwargs)
+ptp.__doc__ = MaskedArray.ptp.__doc__
+
+
+##############################################################################
+# Definition of functions from the corresponding methods #
+##############################################################################
+
+
+class _frommethod:
+ """
+ Define functions from existing MaskedArray methods.
+
+ Parameters
+ ----------
+ methodname : str
+ Name of the method to transform.
+
+ """
+
+ def __init__(self, methodname, reversed=False):
+ self.__name__ = methodname
+ self.__doc__ = self.getdoc()
+ self.reversed = reversed
+
+ def getdoc(self):
+ "Return the doc of the function (from the doc of the method)."
+ meth = getattr(MaskedArray, self.__name__, None) or\
+ getattr(np, self.__name__, None)
+ signature = self.__name__ + get_object_signature(meth)
+ if meth is not None:
+ doc = """ %s\n%s""" % (
+ signature, getattr(meth, '__doc__', None))
+ return doc
+
+ def __call__(self, a, *args, **params):
+ if self.reversed:
+ args = list(args)
+ a, args[0] = args[0], a
+
+ marr = asanyarray(a)
+ method_name = self.__name__
+ method = getattr(type(marr), method_name, None)
+ if method is None:
+ # use the corresponding np function
+ method = getattr(np, method_name)
+
+ return method(marr, *args, **params)
+
+
+all = _frommethod('all')
+anomalies = anom = _frommethod('anom')
+any = _frommethod('any')
+compress = _frommethod('compress', reversed=True)
+cumprod = _frommethod('cumprod')
+cumsum = _frommethod('cumsum')
+copy = _frommethod('copy')
+diagonal = _frommethod('diagonal')
+harden_mask = _frommethod('harden_mask')
+ids = _frommethod('ids')
+maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value)
+mean = _frommethod('mean')
+minimum = _extrema_operation(umath.minimum, less, minimum_fill_value)
+nonzero = _frommethod('nonzero')
+prod = _frommethod('prod')
+product = _frommethod('prod')
+ravel = _frommethod('ravel')
+repeat = _frommethod('repeat')
+shrink_mask = _frommethod('shrink_mask')
+soften_mask = _frommethod('soften_mask')
+std = _frommethod('std')
+sum = _frommethod('sum')
+swapaxes = _frommethod('swapaxes')
+#take = _frommethod('take')
+trace = _frommethod('trace')
+var = _frommethod('var')
+
+count = _frommethod('count')
+
+def take(a, indices, axis=None, out=None, mode='raise'):
+ """
+ """
+ a = masked_array(a)
+ return a.take(indices, axis=axis, out=out, mode=mode)
+
+
+def power(a, b, third=None):
+ """
+ Returns element-wise base array raised to power from second array.
+
+ This is the masked array version of `numpy.power`. For details see
+ `numpy.power`.
+
+ See Also
+ --------
+ numpy.power
+
+ Notes
+ -----
+ The *out* argument to `numpy.power` is not supported, `third` has to be
+ None.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> x = [11.2, -3.973, 0.801, -1.41]
+ >>> mask = [0, 0, 0, 1]
+ >>> masked_x = ma.masked_array(x, mask)
+ >>> masked_x
+ masked_array(data=[11.2, -3.973, 0.801, --],
+ mask=[False, False, False, True],
+ fill_value=1e+20)
+ >>> ma.power(masked_x, 2)
+ masked_array(data=[125.43999999999998, 15.784728999999999,
+ 0.6416010000000001, --],
+ mask=[False, False, False, True],
+ fill_value=1e+20)
+ >>> y = [-0.5, 2, 0, 17]
+ >>> masked_y = ma.masked_array(y, mask)
+ >>> masked_y
+ masked_array(data=[-0.5, 2.0, 0.0, --],
+ mask=[False, False, False, True],
+ fill_value=1e+20)
+ >>> ma.power(masked_x, masked_y)
+ masked_array(data=[0.29880715233359845, 15.784728999999999, 1.0, --],
+ mask=[False, False, False, True],
+ fill_value=1e+20)
+
+ """
+ if third is not None:
+ raise MaskError("3-argument power not supported.")
+ # Get the masks
+ ma = getmask(a)
+ mb = getmask(b)
+ m = mask_or(ma, mb)
+ # Get the rawdata
+ fa = getdata(a)
+ fb = getdata(b)
+ # Get the type of the result (so that we preserve subclasses)
+ if isinstance(a, MaskedArray):
+ basetype = type(a)
+ else:
+ basetype = MaskedArray
+ # Get the result and view it as a (subclass of) MaskedArray
+ with np.errstate(divide='ignore', invalid='ignore'):
+ result = np.where(m, fa, umath.power(fa, fb)).view(basetype)
+ result._update_from(a)
+ # Find where we're in trouble w/ NaNs and Infs
+ invalid = np.logical_not(np.isfinite(result.view(ndarray)))
+ # Add the initial mask
+ if m is not nomask:
+ if not result.ndim:
+ return masked
+ result._mask = np.logical_or(m, invalid)
+ # Fix the invalid parts
+ if invalid.any():
+ if not result.ndim:
+ return masked
+ elif result._mask is nomask:
+ result._mask = invalid
+ result._data[invalid] = result.fill_value
+ return result
+
+argmin = _frommethod('argmin')
+argmax = _frommethod('argmax')
+
+def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None):
+ "Function version of the eponymous method."
+ a = np.asanyarray(a)
+
+ # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default
+ if axis is np._NoValue:
+ axis = _deprecate_argsort_axis(a)
+
+ if isinstance(a, MaskedArray):
+ return a.argsort(axis=axis, kind=kind, order=order,
+ endwith=endwith, fill_value=fill_value)
+ else:
+ return a.argsort(axis=axis, kind=kind, order=order)
+argsort.__doc__ = MaskedArray.argsort.__doc__
+
+def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None):
+ """
+ Return a sorted copy of the masked array.
+
+ Equivalent to creating a copy of the array
+ and applying the MaskedArray ``sort()`` method.
+
+ Refer to ``MaskedArray.sort`` for the full documentation
+
+ See Also
+ --------
+ MaskedArray.sort : equivalent method
+ """
+ a = np.array(a, copy=True, subok=True)
+ if axis is None:
+ a = a.flatten()
+ axis = 0
+
+ if isinstance(a, MaskedArray):
+ a.sort(axis=axis, kind=kind, order=order,
+ endwith=endwith, fill_value=fill_value)
+ else:
+ a.sort(axis=axis, kind=kind, order=order)
+ return a
+
+
+def compressed(x):
+ """
+ Return all the non-masked data as a 1-D array.
+
+ This function is equivalent to calling the "compressed" method of a
+ `ma.MaskedArray`, see `ma.MaskedArray.compressed` for details.
+
+ See Also
+ --------
+ ma.MaskedArray.compressed : Equivalent method.
+
+ """
+ return asanyarray(x).compressed()
+
+
+def concatenate(arrays, axis=0):
+ """
+ Concatenate a sequence of arrays along the given axis.
+
+ Parameters
+ ----------
+ arrays : sequence of array_like
+ The arrays must have the same shape, except in the dimension
+ corresponding to `axis` (the first, by default).
+ axis : int, optional
+ The axis along which the arrays will be joined. Default is 0.
+
+ Returns
+ -------
+ result : MaskedArray
+ The concatenated array with any masked entries preserved.
+
+ See Also
+ --------
+ numpy.concatenate : Equivalent function in the top-level NumPy module.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = ma.arange(3)
+ >>> a[1] = ma.masked
+ >>> b = ma.arange(2, 5)
+ >>> a
+ masked_array(data=[0, --, 2],
+ mask=[False, True, False],
+ fill_value=999999)
+ >>> b
+ masked_array(data=[2, 3, 4],
+ mask=False,
+ fill_value=999999)
+ >>> ma.concatenate([a, b])
+ masked_array(data=[0, --, 2, 2, 3, 4],
+ mask=[False, True, False, False, False, False],
+ fill_value=999999)
+
+ """
+ d = np.concatenate([getdata(a) for a in arrays], axis)
+ rcls = get_masked_subclass(*arrays)
+ data = d.view(rcls)
+ # Check whether one of the arrays has a non-empty mask.
+ for x in arrays:
+ if getmask(x) is not nomask:
+ break
+ else:
+ return data
+ # OK, so we have to concatenate the masks
+ dm = np.concatenate([getmaskarray(a) for a in arrays], axis)
+ dm = dm.reshape(d.shape)
+
+ # If we decide to keep a '_shrinkmask' option, we want to check that
+ # all of them are True, and then check for dm.any()
+ data._mask = _shrink_mask(dm)
+ return data
+
+
+def diag(v, k=0):
+ """
+ Extract a diagonal or construct a diagonal array.
+
+ This function is the equivalent of `numpy.diag` that takes masked
+ values into account, see `numpy.diag` for details.
+
+ See Also
+ --------
+ numpy.diag : Equivalent function for ndarrays.
+
+ """
+ output = np.diag(v, k).view(MaskedArray)
+ if getmask(v) is not nomask:
+ output._mask = np.diag(v._mask, k)
+ return output
+
+
+def left_shift(a, n):
+ """
+ Shift the bits of an integer to the left.
+
+ This is the masked array version of `numpy.left_shift`, for details
+ see that function.
+
+ See Also
+ --------
+ numpy.left_shift
+
+ """
+ m = getmask(a)
+ if m is nomask:
+ d = umath.left_shift(filled(a), n)
+ return masked_array(d)
+ else:
+ d = umath.left_shift(filled(a, 0), n)
+ return masked_array(d, mask=m)
+
+
+def right_shift(a, n):
+ """
+ Shift the bits of an integer to the right.
+
+ This is the masked array version of `numpy.right_shift`, for details
+ see that function.
+
+ See Also
+ --------
+ numpy.right_shift
+
+ """
+ m = getmask(a)
+ if m is nomask:
+ d = umath.right_shift(filled(a), n)
+ return masked_array(d)
+ else:
+ d = umath.right_shift(filled(a, 0), n)
+ return masked_array(d, mask=m)
+
+
+def put(a, indices, values, mode='raise'):
+ """
+ Set storage-indexed locations to corresponding values.
+
+ This function is equivalent to `MaskedArray.put`, see that method
+ for details.
+
+ See Also
+ --------
+ MaskedArray.put
+
+ """
+ # We can't use 'frommethod', the order of arguments is different
+ try:
+ return a.put(indices, values, mode=mode)
+ except AttributeError:
+ return narray(a, copy=False).put(indices, values, mode=mode)
+
+
+def putmask(a, mask, values): # , mode='raise'):
+ """
+ Changes elements of an array based on conditional and input values.
+
+ This is the masked array version of `numpy.putmask`, for details see
+ `numpy.putmask`.
+
+ See Also
+ --------
+ numpy.putmask
+
+ Notes
+ -----
+ Using a masked array as `values` will **not** transform a `ndarray` into
+ a `MaskedArray`.
+
+ """
+ # We can't use 'frommethod', the order of arguments is different
+ if not isinstance(a, MaskedArray):
+ a = a.view(MaskedArray)
+ (valdata, valmask) = (getdata(values), getmask(values))
+ if getmask(a) is nomask:
+ if valmask is not nomask:
+ a._sharedmask = True
+ a._mask = make_mask_none(a.shape, a.dtype)
+ np.copyto(a._mask, valmask, where=mask)
+ elif a._hardmask:
+ if valmask is not nomask:
+ m = a._mask.copy()
+ np.copyto(m, valmask, where=mask)
+ a.mask |= m
+ else:
+ if valmask is nomask:
+ valmask = getmaskarray(values)
+ np.copyto(a._mask, valmask, where=mask)
+ np.copyto(a._data, valdata, where=mask)
+ return
+
+
+def transpose(a, axes=None):
+ """
+ Permute the dimensions of an array.
+
+ This function is exactly equivalent to `numpy.transpose`.
+
+ See Also
+ --------
+ numpy.transpose : Equivalent function in top-level NumPy module.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> x = ma.arange(4).reshape((2,2))
+ >>> x[1, 1] = ma.masked
+ >>> x
+ masked_array(
+ data=[[0, 1],
+ [2, --]],
+ mask=[[False, False],
+ [False, True]],
+ fill_value=999999)
+
+ >>> ma.transpose(x)
+ masked_array(
+ data=[[0, 2],
+ [1, --]],
+ mask=[[False, False],
+ [False, True]],
+ fill_value=999999)
+ """
+ # We can't use 'frommethod', as 'transpose' doesn't take keywords
+ try:
+ return a.transpose(axes)
+ except AttributeError:
+ return narray(a, copy=False).transpose(axes).view(MaskedArray)
+
+
+def reshape(a, new_shape, order='C'):
+ """
+ Returns an array containing the same data with a new shape.
+
+ Refer to `MaskedArray.reshape` for full documentation.
+
+ See Also
+ --------
+ MaskedArray.reshape : equivalent function
+
+ """
+ # We can't use 'frommethod', it whine about some parameters. Dmmit.
+ try:
+ return a.reshape(new_shape, order=order)
+ except AttributeError:
+ _tmp = narray(a, copy=False).reshape(new_shape, order=order)
+ return _tmp.view(MaskedArray)
+
+
+def resize(x, new_shape):
+ """
+ Return a new masked array with the specified size and shape.
+
+ This is the masked equivalent of the `numpy.resize` function. The new
+ array is filled with repeated copies of `x` (in the order that the
+ data are stored in memory). If `x` is masked, the new array will be
+ masked, and the new mask will be a repetition of the old one.
+
+ See Also
+ --------
+ numpy.resize : Equivalent function in the top level NumPy module.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = ma.array([[1, 2] ,[3, 4]])
+ >>> a[0, 1] = ma.masked
+ >>> a
+ masked_array(
+ data=[[1, --],
+ [3, 4]],
+ mask=[[False, True],
+ [False, False]],
+ fill_value=999999)
+ >>> np.resize(a, (3, 3))
+ masked_array(
+ data=[[1, 2, 3],
+ [4, 1, 2],
+ [3, 4, 1]],
+ mask=False,
+ fill_value=999999)
+ >>> ma.resize(a, (3, 3))
+ masked_array(
+ data=[[1, --, 3],
+ [4, 1, --],
+ [3, 4, 1]],
+ mask=[[False, True, False],
+ [False, False, True],
+ [False, False, False]],
+ fill_value=999999)
+
+ A MaskedArray is always returned, regardless of the input type.
+
+ >>> a = np.array([[1, 2] ,[3, 4]])
+ >>> ma.resize(a, (3, 3))
+ masked_array(
+ data=[[1, 2, 3],
+ [4, 1, 2],
+ [3, 4, 1]],
+ mask=False,
+ fill_value=999999)
+
+ """
+ # We can't use _frommethods here, as N.resize is notoriously whiny.
+ m = getmask(x)
+ if m is not nomask:
+ m = np.resize(m, new_shape)
+ result = np.resize(x, new_shape).view(get_masked_subclass(x))
+ if result.ndim:
+ result._mask = m
+ return result
+
+
+def ndim(obj):
+ """
+ maskedarray version of the numpy function.
+
+ """
+ return np.ndim(getdata(obj))
+
+ndim.__doc__ = np.ndim.__doc__
+
+
+def shape(obj):
+ "maskedarray version of the numpy function."
+ return np.shape(getdata(obj))
+shape.__doc__ = np.shape.__doc__
+
+
+def size(obj, axis=None):
+ "maskedarray version of the numpy function."
+ return np.size(getdata(obj), axis)
+size.__doc__ = np.size.__doc__
+
+
+##############################################################################
+# Extra functions #
+##############################################################################
+
+
+def where(condition, x=_NoValue, y=_NoValue):
+ """
+ Return a masked array with elements from `x` or `y`, depending on condition.
+
+ .. note::
+ When only `condition` is provided, this function is identical to
+ `nonzero`. The rest of this documentation covers only the case where
+ all three arguments are provided.
+
+ Parameters
+ ----------
+ condition : array_like, bool
+ Where True, yield `x`, otherwise yield `y`.
+ x, y : array_like, optional
+ Values from which to choose. `x`, `y` and `condition` need to be
+ broadcastable to some shape.
+
+ Returns
+ -------
+ out : MaskedArray
+ An masked array with `masked` elements where the condition is masked,
+ elements from `x` where `condition` is True, and elements from `y`
+ elsewhere.
+
+ See Also
+ --------
+ numpy.where : Equivalent function in the top-level NumPy module.
+ nonzero : The function that is called when x and y are omitted
+
+ Examples
+ --------
+ >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
+ ... [1, 0, 1],
+ ... [0, 1, 0]])
+ >>> x
+ masked_array(
+ data=[[0.0, --, 2.0],
+ [--, 4.0, --],
+ [6.0, --, 8.0]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=1e+20)
+ >>> np.ma.where(x > 5, x, -3.1416)
+ masked_array(
+ data=[[-3.1416, --, -3.1416],
+ [--, -3.1416, --],
+ [6.0, --, 8.0]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=1e+20)
+
+ """
+
+ # handle the single-argument case
+ missing = (x is _NoValue, y is _NoValue).count(True)
+ if missing == 1:
+ raise ValueError("Must provide both 'x' and 'y' or neither.")
+ if missing == 2:
+ return nonzero(condition)
+
+ # we only care if the condition is true - false or masked pick y
+ cf = filled(condition, False)
+ xd = getdata(x)
+ yd = getdata(y)
+
+ # we need the full arrays here for correct final dimensions
+ cm = getmaskarray(condition)
+ xm = getmaskarray(x)
+ ym = getmaskarray(y)
+
+ # deal with the fact that masked.dtype == float64, but we don't actually
+ # want to treat it as that.
+ if x is masked and y is not masked:
+ xd = np.zeros((), dtype=yd.dtype)
+ xm = np.ones((), dtype=ym.dtype)
+ elif y is masked and x is not masked:
+ yd = np.zeros((), dtype=xd.dtype)
+ ym = np.ones((), dtype=xm.dtype)
+
+ data = np.where(cf, xd, yd)
+ mask = np.where(cf, xm, ym)
+ mask = np.where(cm, np.ones((), dtype=mask.dtype), mask)
+
+ # collapse the mask, for backwards compatibility
+ mask = _shrink_mask(mask)
+
+ return masked_array(data, mask=mask)
+
+
+def choose(indices, choices, out=None, mode='raise'):
+ """
+ Use an index array to construct a new array from a list of choices.
+
+ Given an array of integers and a list of n choice arrays, this method
+ will create a new array that merges each of the choice arrays. Where a
+ value in `index` is i, the new array will have the value that choices[i]
+ contains in the same place.
+
+ Parameters
+ ----------
+ indices : ndarray of ints
+ This array must contain integers in ``[0, n-1]``, where n is the
+ number of choices.
+ choices : sequence of arrays
+ Choice arrays. The index array and all of the choices should be
+ broadcastable to the same shape.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and `dtype`.
+ mode : {'raise', 'wrap', 'clip'}, optional
+ Specifies how out-of-bounds indices will behave.
+
+ * 'raise' : raise an error
+ * 'wrap' : wrap around
+ * 'clip' : clip to the range
+
+ Returns
+ -------
+ merged_array : array
+
+ See Also
+ --------
+ choose : equivalent function
+
+ Examples
+ --------
+ >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]])
+ >>> a = np.array([2, 1, 0])
+ >>> np.ma.choose(a, choice)
+ masked_array(data=[3, 2, 1],
+ mask=False,
+ fill_value=999999)
+
+ """
+ def fmask(x):
+ "Returns the filled array, or True if masked."
+ if x is masked:
+ return True
+ return filled(x)
+
+ def nmask(x):
+ "Returns the mask, True if ``masked``, False if ``nomask``."
+ if x is masked:
+ return True
+ return getmask(x)
+ # Get the indices.
+ c = filled(indices, 0)
+ # Get the masks.
+ masks = [nmask(x) for x in choices]
+ data = [fmask(x) for x in choices]
+ # Construct the mask
+ outputmask = np.choose(c, masks, mode=mode)
+ outputmask = make_mask(mask_or(outputmask, getmask(indices)),
+ copy=False, shrink=True)
+ # Get the choices.
+ d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)
+ if out is not None:
+ if isinstance(out, MaskedArray):
+ out.__setmask__(outputmask)
+ return out
+ d.__setmask__(outputmask)
+ return d
+
+
+def round_(a, decimals=0, out=None):
+ """
+ Return a copy of a, rounded to 'decimals' places.
+
+ When 'decimals' is negative, it specifies the number of positions
+ to the left of the decimal point. The real and imaginary parts of
+ complex numbers are rounded separately. Nothing is done if the
+ array is not of float type and 'decimals' is greater than or equal
+ to 0.
+
+ Parameters
+ ----------
+ decimals : int
+ Number of decimals to round to. May be negative.
+ out : array_like
+ Existing array to use for output.
+ If not given, returns a default copy of a.
+
+ Notes
+ -----
+ If out is given and does not have a mask attribute, the mask of a
+ is lost!
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> x = [11.2, -3.973, 0.801, -1.41]
+ >>> mask = [0, 0, 0, 1]
+ >>> masked_x = ma.masked_array(x, mask)
+ >>> masked_x
+ masked_array(data=[11.2, -3.973, 0.801, --],
+ mask=[False, False, False, True],
+ fill_value=1e+20)
+ >>> ma.round_(masked_x)
+ masked_array(data=[11.0, -4.0, 1.0, --],
+ mask=[False, False, False, True],
+ fill_value=1e+20)
+ >>> ma.round(masked_x, decimals=1)
+ masked_array(data=[11.2, -4.0, 0.8, --],
+ mask=[False, False, False, True],
+ fill_value=1e+20)
+ >>> ma.round_(masked_x, decimals=-1)
+ masked_array(data=[10.0, -0.0, 0.0, --],
+ mask=[False, False, False, True],
+ fill_value=1e+20)
+ """
+ if out is None:
+ return np.round_(a, decimals, out)
+ else:
+ np.round_(getdata(a), decimals, out)
+ if hasattr(out, '_mask'):
+ out._mask = getmask(a)
+ return out
+round = round_
+
+
+# Needed by dot, so move here from extras.py. It will still be exported
+# from extras.py for compatibility.
+def mask_rowcols(a, axis=None):
+ """
+ Mask rows and/or columns of a 2D array that contain masked values.
+
+ Mask whole rows and/or columns of a 2D array that contain
+ masked values. The masking behavior is selected using the
+ `axis` parameter.
+
+ - If `axis` is None, rows *and* columns are masked.
+ - If `axis` is 0, only rows are masked.
+ - If `axis` is 1 or -1, only columns are masked.
+
+ Parameters
+ ----------
+ a : array_like, MaskedArray
+ The array to mask. If not a MaskedArray instance (or if no array
+ elements are masked). The result is a MaskedArray with `mask` set
+ to `nomask` (False). Must be a 2D array.
+ axis : int, optional
+ Axis along which to perform the operation. If None, applies to a
+ flattened version of the array.
+
+ Returns
+ -------
+ a : MaskedArray
+ A modified version of the input array, masked depending on the value
+ of the `axis` parameter.
+
+ Raises
+ ------
+ NotImplementedError
+ If input array `a` is not 2D.
+
+ See Also
+ --------
+ mask_rows : Mask rows of a 2D array that contain masked values.
+ mask_cols : Mask cols of a 2D array that contain masked values.
+ masked_where : Mask where a condition is met.
+
+ Notes
+ -----
+ The input array's mask is modified by this function.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.zeros((3, 3), dtype=int)
+ >>> a[1, 1] = 1
+ >>> a
+ array([[0, 0, 0],
+ [0, 1, 0],
+ [0, 0, 0]])
+ >>> a = ma.masked_equal(a, 1)
+ >>> a
+ masked_array(
+ data=[[0, 0, 0],
+ [0, --, 0],
+ [0, 0, 0]],
+ mask=[[False, False, False],
+ [False, True, False],
+ [False, False, False]],
+ fill_value=1)
+ >>> ma.mask_rowcols(a)
+ masked_array(
+ data=[[0, --, 0],
+ [--, --, --],
+ [0, --, 0]],
+ mask=[[False, True, False],
+ [ True, True, True],
+ [False, True, False]],
+ fill_value=1)
+
+ """
+ a = array(a, subok=False)
+ if a.ndim != 2:
+ raise NotImplementedError("mask_rowcols works for 2D arrays only.")
+ m = getmask(a)
+ # Nothing is masked: return a
+ if m is nomask or not m.any():
+ return a
+ maskedval = m.nonzero()
+ a._mask = a._mask.copy()
+ if not axis:
+ a[np.unique(maskedval[0])] = masked
+ if axis in [None, 1, -1]:
+ a[:, np.unique(maskedval[1])] = masked
+ return a
+
+
+# Include masked dot here to avoid import problems in getting it from
+# extras.py. Note that it is not included in __all__, but rather exported
+# from extras in order to avoid backward compatibility problems.
+def dot(a, b, strict=False, out=None):
+ """
+ Return the dot product of two arrays.
+
+ This function is the equivalent of `numpy.dot` that takes masked values
+ into account. Note that `strict` and `out` are in different position
+ than in the method version. In order to maintain compatibility with the
+ corresponding method, it is recommended that the optional arguments be
+ treated as keyword only. At some point that may be mandatory.
+
+ .. note::
+ Works only with 2-D arrays at the moment.
+
+
+ Parameters
+ ----------
+ a, b : masked_array_like
+ Inputs arrays.
+ strict : bool, optional
+ Whether masked data are propagated (True) or set to 0 (False) for
+ the computation. Default is False. Propagating the mask means that
+ if a masked value appears in a row or column, the whole row or
+ column is considered masked.
+ out : masked_array, optional
+ Output argument. This must have the exact kind that would be returned
+ if it was not used. In particular, it must have the right type, must be
+ C-contiguous, and its dtype must be the dtype that would be returned
+ for `dot(a,b)`. This is a performance feature. Therefore, if these
+ conditions are not met, an exception is raised, instead of attempting
+ to be flexible.
+
+ .. versionadded:: 1.10.2
+
+ See Also
+ --------
+ numpy.dot : Equivalent function for ndarrays.
+
+ Examples
+ --------
+ >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]])
+ >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]])
+ >>> np.ma.dot(a, b)
+ masked_array(
+ data=[[21, 26],
+ [45, 64]],
+ mask=[[False, False],
+ [False, False]],
+ fill_value=999999)
+ >>> np.ma.dot(a, b, strict=True)
+ masked_array(
+ data=[[--, --],
+ [--, 64]],
+ mask=[[ True, True],
+ [ True, False]],
+ fill_value=999999)
+
+ """
+ # !!!: Works only with 2D arrays. There should be a way to get it to run
+ # with higher dimension
+ if strict and (a.ndim == 2) and (b.ndim == 2):
+ a = mask_rowcols(a, 0)
+ b = mask_rowcols(b, 1)
+ am = ~getmaskarray(a)
+ bm = ~getmaskarray(b)
+
+ if out is None:
+ d = np.dot(filled(a, 0), filled(b, 0))
+ m = ~np.dot(am, bm)
+ if d.ndim == 0:
+ d = np.asarray(d)
+ r = d.view(get_masked_subclass(a, b))
+ r.__setmask__(m)
+ return r
+ else:
+ d = np.dot(filled(a, 0), filled(b, 0), out._data)
+ if out.mask.shape != d.shape:
+ out._mask = np.empty(d.shape, MaskType)
+ np.dot(am, bm, out._mask)
+ np.logical_not(out._mask, out._mask)
+ return out
+
+
+def inner(a, b):
+ """
+ Returns the inner product of a and b for arrays of floating point types.
+
+ Like the generic NumPy equivalent the product sum is over the last dimension
+ of a and b. The first argument is not conjugated.
+
+ """
+ fa = filled(a, 0)
+ fb = filled(b, 0)
+ if fa.ndim == 0:
+ fa.shape = (1,)
+ if fb.ndim == 0:
+ fb.shape = (1,)
+ return np.inner(fa, fb).view(MaskedArray)
+inner.__doc__ = doc_note(np.inner.__doc__,
+ "Masked values are replaced by 0.")
+innerproduct = inner
+
+
+def outer(a, b):
+ "maskedarray version of the numpy function."
+ fa = filled(a, 0).ravel()
+ fb = filled(b, 0).ravel()
+ d = np.outer(fa, fb)
+ ma = getmask(a)
+ mb = getmask(b)
+ if ma is nomask and mb is nomask:
+ return masked_array(d)
+ ma = getmaskarray(a)
+ mb = getmaskarray(b)
+ m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False)
+ return masked_array(d, mask=m)
+outer.__doc__ = doc_note(np.outer.__doc__,
+ "Masked values are replaced by 0.")
+outerproduct = outer
+
+
+def _convolve_or_correlate(f, a, v, mode, propagate_mask):
+ """
+ Helper function for ma.correlate and ma.convolve
+ """
+ if propagate_mask:
+ # results which are contributed to by either item in any pair being invalid
+ mask = (
+ f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode)
+ | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode)
+ )
+ data = f(getdata(a), getdata(v), mode=mode)
+ else:
+ # results which are not contributed to by any pair of valid elements
+ mask = ~f(~getmaskarray(a), ~getmaskarray(v))
+ data = f(filled(a, 0), filled(v, 0), mode=mode)
+
+ return masked_array(data, mask=mask)
+
+
+def correlate(a, v, mode='valid', propagate_mask=True):
+ """
+ Cross-correlation of two 1-dimensional sequences.
+
+ Parameters
+ ----------
+ a, v : array_like
+ Input sequences.
+ mode : {'valid', 'same', 'full'}, optional
+ Refer to the `np.convolve` docstring. Note that the default
+ is 'valid', unlike `convolve`, which uses 'full'.
+ propagate_mask : bool
+ If True, then a result element is masked if any masked element contributes towards it.
+ If False, then a result element is only masked if no non-masked element
+ contribute towards it
+
+ Returns
+ -------
+ out : MaskedArray
+ Discrete cross-correlation of `a` and `v`.
+
+ See Also
+ --------
+ numpy.correlate : Equivalent function in the top-level NumPy module.
+ """
+ return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask)
+
+
+def convolve(a, v, mode='full', propagate_mask=True):
+ """
+ Returns the discrete, linear convolution of two one-dimensional sequences.
+
+ Parameters
+ ----------
+ a, v : array_like
+ Input sequences.
+ mode : {'valid', 'same', 'full'}, optional
+ Refer to the `np.convolve` docstring.
+ propagate_mask : bool
+ If True, then if any masked element is included in the sum for a result
+ element, then the result is masked.
+ If False, then the result element is only masked if no non-masked cells
+ contribute towards it
+
+ Returns
+ -------
+ out : MaskedArray
+ Discrete, linear convolution of `a` and `v`.
+
+ See Also
+ --------
+ numpy.convolve : Equivalent function in the top-level NumPy module.
+ """
+ return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask)
+
+
+def allequal(a, b, fill_value=True):
+ """
+ Return True if all entries of a and b are equal, using
+ fill_value as a truth value where either or both are masked.
+
+ Parameters
+ ----------
+ a, b : array_like
+ Input arrays to compare.
+ fill_value : bool, optional
+ Whether masked values in a or b are considered equal (True) or not
+ (False).
+
+ Returns
+ -------
+ y : bool
+ Returns True if the two arrays are equal within the given
+ tolerance, False otherwise. If either array contains NaN,
+ then False is returned.
+
+ See Also
+ --------
+ all, any
+ numpy.ma.allclose
+
+ Examples
+ --------
+ >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
+ >>> a
+ masked_array(data=[10000000000.0, 1e-07, --],
+ mask=[False, False, True],
+ fill_value=1e+20)
+
+ >>> b = np.array([1e10, 1e-7, -42.0])
+ >>> b
+ array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01])
+ >>> np.ma.allequal(a, b, fill_value=False)
+ False
+ >>> np.ma.allequal(a, b)
+ True
+
+ """
+ m = mask_or(getmask(a), getmask(b))
+ if m is nomask:
+ x = getdata(a)
+ y = getdata(b)
+ d = umath.equal(x, y)
+ return d.all()
+ elif fill_value:
+ x = getdata(a)
+ y = getdata(b)
+ d = umath.equal(x, y)
+ dm = array(d, mask=m, copy=False)
+ return dm.filled(True).all(None)
+ else:
+ return False
+
+
+def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8):
+ """
+ Returns True if two arrays are element-wise equal within a tolerance.
+
+ This function is equivalent to `allclose` except that masked values
+ are treated as equal (default) or unequal, depending on the `masked_equal`
+ argument.
+
+ Parameters
+ ----------
+ a, b : array_like
+ Input arrays to compare.
+ masked_equal : bool, optional
+ Whether masked values in `a` and `b` are considered equal (True) or not
+ (False). They are considered equal by default.
+ rtol : float, optional
+ Relative tolerance. The relative difference is equal to ``rtol * b``.
+ Default is 1e-5.
+ atol : float, optional
+ Absolute tolerance. The absolute difference is equal to `atol`.
+ Default is 1e-8.
+
+ Returns
+ -------
+ y : bool
+ Returns True if the two arrays are equal within the given
+ tolerance, False otherwise. If either array contains NaN, then
+ False is returned.
+
+ See Also
+ --------
+ all, any
+ numpy.allclose : the non-masked `allclose`.
+
+ Notes
+ -----
+ If the following equation is element-wise True, then `allclose` returns
+ True::
+
+ absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
+
+ Return True if all elements of `a` and `b` are equal subject to
+ given tolerances.
+
+ Examples
+ --------
+ >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
+ >>> a
+ masked_array(data=[10000000000.0, 1e-07, --],
+ mask=[False, False, True],
+ fill_value=1e+20)
+ >>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])
+ >>> np.ma.allclose(a, b)
+ False
+
+ >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
+ >>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])
+ >>> np.ma.allclose(a, b)
+ True
+ >>> np.ma.allclose(a, b, masked_equal=False)
+ False
+
+ Masked values are not compared directly.
+
+ >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
+ >>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])
+ >>> np.ma.allclose(a, b)
+ True
+ >>> np.ma.allclose(a, b, masked_equal=False)
+ False
+
+ """
+ x = masked_array(a, copy=False)
+ y = masked_array(b, copy=False)
+
+ # make sure y is an inexact type to avoid abs(MIN_INT); will cause
+ # casting of x later.
+ # NOTE: We explicitly allow timedelta, which used to work. This could
+ # possibly be deprecated. See also gh-18286.
+ # timedelta works if `atol` is an integer or also a timedelta.
+ # Although, the default tolerances are unlikely to be useful
+ if y.dtype.kind != "m":
+ dtype = np.result_type(y, 1.)
+ if y.dtype != dtype:
+ y = masked_array(y, dtype=dtype, copy=False)
+
+ m = mask_or(getmask(x), getmask(y))
+ xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False)
+ # If we have some infs, they should fall at the same place.
+ if not np.all(xinf == filled(np.isinf(y), False)):
+ return False
+ # No infs at all
+ if not np.any(xinf):
+ d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)),
+ masked_equal)
+ return np.all(d)
+
+ if not np.all(filled(x[xinf] == y[xinf], masked_equal)):
+ return False
+ x = x[~xinf]
+ y = y[~xinf]
+
+ d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)),
+ masked_equal)
+
+ return np.all(d)
+
+
+def asarray(a, dtype=None, order=None):
+ """
+ Convert the input to a masked array of the given data-type.
+
+ No copy is performed if the input is already an `ndarray`. If `a` is
+ a subclass of `MaskedArray`, a base class `MaskedArray` is returned.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data, in any form that can be converted to a masked array. This
+ includes lists, lists of tuples, tuples, tuples of tuples, tuples
+ of lists, ndarrays and masked arrays.
+ dtype : dtype, optional
+ By default, the data-type is inferred from the input data.
+ order : {'C', 'F'}, optional
+ Whether to use row-major ('C') or column-major ('FORTRAN') memory
+ representation. Default is 'C'.
+
+ Returns
+ -------
+ out : MaskedArray
+ Masked array interpretation of `a`.
+
+ See Also
+ --------
+ asanyarray : Similar to `asarray`, but conserves subclasses.
+
+ Examples
+ --------
+ >>> x = np.arange(10.).reshape(2, 5)
+ >>> x
+ array([[0., 1., 2., 3., 4.],
+ [5., 6., 7., 8., 9.]])
+ >>> np.ma.asarray(x)
+ masked_array(
+ data=[[0., 1., 2., 3., 4.],
+ [5., 6., 7., 8., 9.]],
+ mask=False,
+ fill_value=1e+20)
+ >>> type(np.ma.asarray(x))
+ <class 'numpy.ma.core.MaskedArray'>
+
+ """
+ order = order or 'C'
+ return masked_array(a, dtype=dtype, copy=False, keep_mask=True,
+ subok=False, order=order)
+
+
+def asanyarray(a, dtype=None):
+ """
+ Convert the input to a masked array, conserving subclasses.
+
+ If `a` is a subclass of `MaskedArray`, its class is conserved.
+ No copy is performed if the input is already an `ndarray`.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data, in any form that can be converted to an array.
+ dtype : dtype, optional
+ By default, the data-type is inferred from the input data.
+ order : {'C', 'F'}, optional
+ Whether to use row-major ('C') or column-major ('FORTRAN') memory
+ representation. Default is 'C'.
+
+ Returns
+ -------
+ out : MaskedArray
+ MaskedArray interpretation of `a`.
+
+ See Also
+ --------
+ asarray : Similar to `asanyarray`, but does not conserve subclass.
+
+ Examples
+ --------
+ >>> x = np.arange(10.).reshape(2, 5)
+ >>> x
+ array([[0., 1., 2., 3., 4.],
+ [5., 6., 7., 8., 9.]])
+ >>> np.ma.asanyarray(x)
+ masked_array(
+ data=[[0., 1., 2., 3., 4.],
+ [5., 6., 7., 8., 9.]],
+ mask=False,
+ fill_value=1e+20)
+ >>> type(np.ma.asanyarray(x))
+ <class 'numpy.ma.core.MaskedArray'>
+
+ """
+ # workaround for #8666, to preserve identity. Ideally the bottom line
+ # would handle this for us.
+ if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype):
+ return a
+ return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
+
+
+##############################################################################
+# Pickling #
+##############################################################################
+
+
+def fromfile(file, dtype=float, count=-1, sep=''):
+ raise NotImplementedError(
+ "fromfile() not yet implemented for a MaskedArray.")
+
+
+def fromflex(fxarray):
+ """
+ Build a masked array from a suitable flexible-type array.
+
+ The input array has to have a data-type with ``_data`` and ``_mask``
+ fields. This type of array is output by `MaskedArray.toflex`.
+
+ Parameters
+ ----------
+ fxarray : ndarray
+ The structured input array, containing ``_data`` and ``_mask``
+ fields. If present, other fields are discarded.
+
+ Returns
+ -------
+ result : MaskedArray
+ The constructed masked array.
+
+ See Also
+ --------
+ MaskedArray.toflex : Build a flexible-type array from a masked array.
+
+ Examples
+ --------
+ >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4)
+ >>> rec = x.toflex()
+ >>> rec
+ array([[(0, False), (1, True), (2, False)],
+ [(3, True), (4, False), (5, True)],
+ [(6, False), (7, True), (8, False)]],
+ dtype=[('_data', '<i8'), ('_mask', '?')])
+ >>> x2 = np.ma.fromflex(rec)
+ >>> x2
+ masked_array(
+ data=[[0, --, 2],
+ [--, 4, --],
+ [6, --, 8]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+
+ Extra fields can be present in the structured array but are discarded:
+
+ >>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]
+ >>> rec2 = np.zeros((2, 2), dtype=dt)
+ >>> rec2
+ array([[(0, False, 0.), (0, False, 0.)],
+ [(0, False, 0.), (0, False, 0.)]],
+ dtype=[('_data', '<i4'), ('_mask', '?'), ('field3', '<f4')])
+ >>> y = np.ma.fromflex(rec2)
+ >>> y
+ masked_array(
+ data=[[0, 0],
+ [0, 0]],
+ mask=[[False, False],
+ [False, False]],
+ fill_value=999999,
+ dtype=int32)
+
+ """
+ return masked_array(fxarray['_data'], mask=fxarray['_mask'])
+
+
+class _convert2ma:
+
+ """
+ Convert functions from numpy to numpy.ma.
+
+ Parameters
+ ----------
+ _methodname : string
+ Name of the method to transform.
+
+ """
+ __doc__ = None
+
+ def __init__(self, funcname, np_ret, np_ma_ret, params=None):
+ self._func = getattr(np, funcname)
+ self.__doc__ = self.getdoc(np_ret, np_ma_ret)
+ self._extras = params or {}
+
+ def getdoc(self, np_ret, np_ma_ret):
+ "Return the doc of the function (from the doc of the method)."
+ doc = getattr(self._func, '__doc__', None)
+ sig = get_object_signature(self._func)
+ if doc:
+ doc = self._replace_return_type(doc, np_ret, np_ma_ret)
+ # Add the signature of the function at the beginning of the doc
+ if sig:
+ sig = "%s%s\n" % (self._func.__name__, sig)
+ doc = sig + doc
+ return doc
+
+ def _replace_return_type(self, doc, np_ret, np_ma_ret):
+ """
+ Replace documentation of ``np`` function's return type.
+
+ Replaces it with the proper type for the ``np.ma`` function.
+
+ Parameters
+ ----------
+ doc : str
+ The documentation of the ``np`` method.
+ np_ret : str
+ The return type string of the ``np`` method that we want to
+ replace. (e.g. "out : ndarray")
+ np_ma_ret : str
+ The return type string of the ``np.ma`` method.
+ (e.g. "out : MaskedArray")
+ """
+ if np_ret not in doc:
+ raise RuntimeError(
+ f"Failed to replace `{np_ret}` with `{np_ma_ret}`. "
+ f"The documentation string for return type, {np_ret}, is not "
+ f"found in the docstring for `np.{self._func.__name__}`. "
+ f"Fix the docstring for `np.{self._func.__name__}` or "
+ "update the expected string for return type."
+ )
+
+ return doc.replace(np_ret, np_ma_ret)
+
+ def __call__(self, *args, **params):
+ # Find the common parameters to the call and the definition
+ _extras = self._extras
+ common_params = set(params).intersection(_extras)
+ # Drop the common parameters from the call
+ for p in common_params:
+ _extras[p] = params.pop(p)
+ # Get the result
+ result = self._func.__call__(*args, **params).view(MaskedArray)
+ if "fill_value" in common_params:
+ result.fill_value = _extras.get("fill_value", None)
+ if "hardmask" in common_params:
+ result._hardmask = bool(_extras.get("hard_mask", False))
+ return result
+
+
+arange = _convert2ma(
+ 'arange',
+ params=dict(fill_value=None, hardmask=False),
+ np_ret='arange : ndarray',
+ np_ma_ret='arange : MaskedArray',
+)
+clip = _convert2ma(
+ 'clip',
+ params=dict(fill_value=None, hardmask=False),
+ np_ret='clipped_array : ndarray',
+ np_ma_ret='clipped_array : MaskedArray',
+)
+diff = _convert2ma(
+ 'diff',
+ params=dict(fill_value=None, hardmask=False),
+ np_ret='diff : ndarray',
+ np_ma_ret='diff : MaskedArray',
+)
+empty = _convert2ma(
+ 'empty',
+ params=dict(fill_value=None, hardmask=False),
+ np_ret='out : ndarray',
+ np_ma_ret='out : MaskedArray',
+)
+empty_like = _convert2ma(
+ 'empty_like',
+ np_ret='out : ndarray',
+ np_ma_ret='out : MaskedArray',
+)
+frombuffer = _convert2ma(
+ 'frombuffer',
+ np_ret='out : ndarray',
+ np_ma_ret='out: MaskedArray',
+)
+fromfunction = _convert2ma(
+ 'fromfunction',
+ np_ret='fromfunction : any',
+ np_ma_ret='fromfunction: MaskedArray',
+)
+identity = _convert2ma(
+ 'identity',
+ params=dict(fill_value=None, hardmask=False),
+ np_ret='out : ndarray',
+ np_ma_ret='out : MaskedArray',
+)
+indices = _convert2ma(
+ 'indices',
+ params=dict(fill_value=None, hardmask=False),
+ np_ret='grid : one ndarray or tuple of ndarrays',
+ np_ma_ret='grid : one MaskedArray or tuple of MaskedArrays',
+)
+ones = _convert2ma(
+ 'ones',
+ params=dict(fill_value=None, hardmask=False),
+ np_ret='out : ndarray',
+ np_ma_ret='out : MaskedArray',
+)
+ones_like = _convert2ma(
+ 'ones_like',
+ np_ret='out : ndarray',
+ np_ma_ret='out : MaskedArray',
+)
+squeeze = _convert2ma(
+ 'squeeze',
+ params=dict(fill_value=None, hardmask=False),
+ np_ret='squeezed : ndarray',
+ np_ma_ret='squeezed : MaskedArray',
+)
+zeros = _convert2ma(
+ 'zeros',
+ params=dict(fill_value=None, hardmask=False),
+ np_ret='out : ndarray',
+ np_ma_ret='out : MaskedArray',
+)
+zeros_like = _convert2ma(
+ 'zeros_like',
+ np_ret='out : ndarray',
+ np_ma_ret='out : MaskedArray',
+)
+
+
+def append(a, b, axis=None):
+ """Append values to the end of an array.
+
+ .. versionadded:: 1.9.0
+
+ Parameters
+ ----------
+ a : array_like
+ Values are appended to a copy of this array.
+ b : array_like
+ These values are appended to a copy of `a`. It must be of the
+ correct shape (the same shape as `a`, excluding `axis`). If `axis`
+ is not specified, `b` can be any shape and will be flattened
+ before use.
+ axis : int, optional
+ The axis along which `v` are appended. If `axis` is not given,
+ both `a` and `b` are flattened before use.
+
+ Returns
+ -------
+ append : MaskedArray
+ A copy of `a` with `b` appended to `axis`. Note that `append`
+ does not occur in-place: a new array is allocated and filled. If
+ `axis` is None, the result is a flattened array.
+
+ See Also
+ --------
+ numpy.append : Equivalent function in the top-level NumPy module.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = ma.masked_values([1, 2, 3], 2)
+ >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
+ >>> ma.append(a, b)
+ masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9],
+ mask=[False, True, False, False, False, False, True, False,
+ False],
+ fill_value=999999)
+ """
+ return concatenate([a, b], axis)
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/core.pyi b/venv/lib/python3.9/site-packages/numpy/ma/core.pyi
new file mode 100644
index 00000000..d5d4f7f3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/core.pyi
@@ -0,0 +1,472 @@
+from collections.abc import Callable
+from typing import Any, TypeVar
+from numpy import ndarray, dtype, float64
+
+from numpy import (
+ amax as amax,
+ amin as amin,
+ bool_ as bool_,
+ expand_dims as expand_dims,
+ diff as diff,
+ clip as clip,
+ indices as indices,
+ ones_like as ones_like,
+ squeeze as squeeze,
+ zeros_like as zeros_like,
+)
+
+from numpy.lib.function_base import (
+ angle as angle,
+)
+
+# TODO: Set the `bound` to something more suitable once we
+# have proper shape support
+_ShapeType = TypeVar("_ShapeType", bound=Any)
+_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True)
+
+__all__: list[str]
+
+MaskType = bool_
+nomask: bool_
+
+class MaskedArrayFutureWarning(FutureWarning): ...
+class MAError(Exception): ...
+class MaskError(MAError): ...
+
+def default_fill_value(obj): ...
+def minimum_fill_value(obj): ...
+def maximum_fill_value(obj): ...
+def set_fill_value(a, fill_value): ...
+def common_fill_value(a, b): ...
+def filled(a, fill_value=...): ...
+def getdata(a, subok=...): ...
+get_data = getdata
+
+def fix_invalid(a, mask=..., copy=..., fill_value=...): ...
+
+class _MaskedUFunc:
+ f: Any
+ __doc__: Any
+ __name__: Any
+ def __init__(self, ufunc): ...
+
+class _MaskedUnaryOperation(_MaskedUFunc):
+ fill: Any
+ domain: Any
+ def __init__(self, mufunc, fill=..., domain=...): ...
+ def __call__(self, a, *args, **kwargs): ...
+
+class _MaskedBinaryOperation(_MaskedUFunc):
+ fillx: Any
+ filly: Any
+ def __init__(self, mbfunc, fillx=..., filly=...): ...
+ def __call__(self, a, b, *args, **kwargs): ...
+ def reduce(self, target, axis=..., dtype=...): ...
+ def outer(self, a, b): ...
+ def accumulate(self, target, axis=...): ...
+
+class _DomainedBinaryOperation(_MaskedUFunc):
+ domain: Any
+ fillx: Any
+ filly: Any
+ def __init__(self, dbfunc, domain, fillx=..., filly=...): ...
+ def __call__(self, a, b, *args, **kwargs): ...
+
+exp: _MaskedUnaryOperation
+conjugate: _MaskedUnaryOperation
+sin: _MaskedUnaryOperation
+cos: _MaskedUnaryOperation
+arctan: _MaskedUnaryOperation
+arcsinh: _MaskedUnaryOperation
+sinh: _MaskedUnaryOperation
+cosh: _MaskedUnaryOperation
+tanh: _MaskedUnaryOperation
+abs: _MaskedUnaryOperation
+absolute: _MaskedUnaryOperation
+fabs: _MaskedUnaryOperation
+negative: _MaskedUnaryOperation
+floor: _MaskedUnaryOperation
+ceil: _MaskedUnaryOperation
+around: _MaskedUnaryOperation
+logical_not: _MaskedUnaryOperation
+sqrt: _MaskedUnaryOperation
+log: _MaskedUnaryOperation
+log2: _MaskedUnaryOperation
+log10: _MaskedUnaryOperation
+tan: _MaskedUnaryOperation
+arcsin: _MaskedUnaryOperation
+arccos: _MaskedUnaryOperation
+arccosh: _MaskedUnaryOperation
+arctanh: _MaskedUnaryOperation
+
+add: _MaskedBinaryOperation
+subtract: _MaskedBinaryOperation
+multiply: _MaskedBinaryOperation
+arctan2: _MaskedBinaryOperation
+equal: _MaskedBinaryOperation
+not_equal: _MaskedBinaryOperation
+less_equal: _MaskedBinaryOperation
+greater_equal: _MaskedBinaryOperation
+less: _MaskedBinaryOperation
+greater: _MaskedBinaryOperation
+logical_and: _MaskedBinaryOperation
+alltrue: _MaskedBinaryOperation
+logical_or: _MaskedBinaryOperation
+sometrue: Callable[..., Any]
+logical_xor: _MaskedBinaryOperation
+bitwise_and: _MaskedBinaryOperation
+bitwise_or: _MaskedBinaryOperation
+bitwise_xor: _MaskedBinaryOperation
+hypot: _MaskedBinaryOperation
+divide: _MaskedBinaryOperation
+true_divide: _MaskedBinaryOperation
+floor_divide: _MaskedBinaryOperation
+remainder: _MaskedBinaryOperation
+fmod: _MaskedBinaryOperation
+mod: _MaskedBinaryOperation
+
+def make_mask_descr(ndtype): ...
+def getmask(a): ...
+get_mask = getmask
+
+def getmaskarray(arr): ...
+def is_mask(m): ...
+def make_mask(m, copy=..., shrink=..., dtype=...): ...
+def make_mask_none(newshape, dtype=...): ...
+def mask_or(m1, m2, copy=..., shrink=...): ...
+def flatten_mask(mask): ...
+def masked_where(condition, a, copy=...): ...
+def masked_greater(x, value, copy=...): ...
+def masked_greater_equal(x, value, copy=...): ...
+def masked_less(x, value, copy=...): ...
+def masked_less_equal(x, value, copy=...): ...
+def masked_not_equal(x, value, copy=...): ...
+def masked_equal(x, value, copy=...): ...
+def masked_inside(x, v1, v2, copy=...): ...
+def masked_outside(x, v1, v2, copy=...): ...
+def masked_object(x, value, copy=..., shrink=...): ...
+def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...): ...
+def masked_invalid(a, copy=...): ...
+
+class _MaskedPrintOption:
+ def __init__(self, display): ...
+ def display(self): ...
+ def set_display(self, s): ...
+ def enabled(self): ...
+ def enable(self, shrink=...): ...
+
+masked_print_option: _MaskedPrintOption
+
+def flatten_structured_array(a): ...
+
+class MaskedIterator:
+ ma: Any
+ dataiter: Any
+ maskiter: Any
+ def __init__(self, ma): ...
+ def __iter__(self): ...
+ def __getitem__(self, indx): ...
+ def __setitem__(self, index, value): ...
+ def __next__(self): ...
+
+class MaskedArray(ndarray[_ShapeType, _DType_co]):
+ __array_priority__: Any
+ def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ...
+ def __array_finalize__(self, obj): ...
+ def __array_wrap__(self, obj, context=...): ...
+ def view(self, dtype=..., type=..., fill_value=...): ...
+ def __getitem__(self, indx): ...
+ def __setitem__(self, indx, value): ...
+ @property
+ def dtype(self): ...
+ @dtype.setter
+ def dtype(self, dtype): ...
+ @property
+ def shape(self): ...
+ @shape.setter
+ def shape(self, shape): ...
+ def __setmask__(self, mask, copy=...): ...
+ @property
+ def mask(self): ...
+ @mask.setter
+ def mask(self, value): ...
+ @property
+ def recordmask(self): ...
+ @recordmask.setter
+ def recordmask(self, mask): ...
+ def harden_mask(self): ...
+ def soften_mask(self): ...
+ @property
+ def hardmask(self): ...
+ def unshare_mask(self): ...
+ @property
+ def sharedmask(self): ...
+ def shrink_mask(self): ...
+ @property
+ def baseclass(self): ...
+ data: Any
+ @property
+ def flat(self): ...
+ @flat.setter
+ def flat(self, value): ...
+ @property
+ def fill_value(self): ...
+ @fill_value.setter
+ def fill_value(self, value=...): ...
+ get_fill_value: Any
+ set_fill_value: Any
+ def filled(self, fill_value=...): ...
+ def compressed(self): ...
+ def compress(self, condition, axis=..., out=...): ...
+ def __eq__(self, other): ...
+ def __ne__(self, other): ...
+ def __ge__(self, other): ...
+ def __gt__(self, other): ...
+ def __le__(self, other): ...
+ def __lt__(self, other): ...
+ def __add__(self, other): ...
+ def __radd__(self, other): ...
+ def __sub__(self, other): ...
+ def __rsub__(self, other): ...
+ def __mul__(self, other): ...
+ def __rmul__(self, other): ...
+ def __div__(self, other): ...
+ def __truediv__(self, other): ...
+ def __rtruediv__(self, other): ...
+ def __floordiv__(self, other): ...
+ def __rfloordiv__(self, other): ...
+ def __pow__(self, other): ...
+ def __rpow__(self, other): ...
+ def __iadd__(self, other): ...
+ def __isub__(self, other): ...
+ def __imul__(self, other): ...
+ def __idiv__(self, other): ...
+ def __ifloordiv__(self, other): ...
+ def __itruediv__(self, other): ...
+ def __ipow__(self, other): ...
+ def __float__(self): ...
+ def __int__(self): ...
+ @property # type: ignore[misc]
+ def imag(self): ...
+ get_imag: Any
+ @property # type: ignore[misc]
+ def real(self): ...
+ get_real: Any
+ def count(self, axis=..., keepdims=...): ...
+ def ravel(self, order=...): ...
+ def reshape(self, *s, **kwargs): ...
+ def resize(self, newshape, refcheck=..., order=...): ...
+ def put(self, indices, values, mode=...): ...
+ def ids(self): ...
+ def iscontiguous(self): ...
+ def all(self, axis=..., out=..., keepdims=...): ...
+ def any(self, axis=..., out=..., keepdims=...): ...
+ def nonzero(self): ...
+ def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ...
+ def dot(self, b, out=..., strict=...): ...
+ def sum(self, axis=..., dtype=..., out=..., keepdims=...): ...
+ def cumsum(self, axis=..., dtype=..., out=...): ...
+ def prod(self, axis=..., dtype=..., out=..., keepdims=...): ...
+ product: Any
+ def cumprod(self, axis=..., dtype=..., out=...): ...
+ def mean(self, axis=..., dtype=..., out=..., keepdims=...): ...
+ def anom(self, axis=..., dtype=...): ...
+ def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ...
+ def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ...
+ def round(self, decimals=..., out=...): ...
+ def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=...): ...
+ def argmin(self, axis=..., fill_value=..., out=..., *, keepdims=...): ...
+ def argmax(self, axis=..., fill_value=..., out=..., *, keepdims=...): ...
+ def sort(self, axis=..., kind=..., order=..., endwith=..., fill_value=...): ...
+ def min(self, axis=..., out=..., fill_value=..., keepdims=...): ...
+ # NOTE: deprecated
+ # def tostring(self, fill_value=..., order=...): ...
+ def max(self, axis=..., out=..., fill_value=..., keepdims=...): ...
+ def ptp(self, axis=..., out=..., fill_value=..., keepdims=...): ...
+ def partition(self, *args, **kwargs): ...
+ def argpartition(self, *args, **kwargs): ...
+ def take(self, indices, axis=..., out=..., mode=...): ...
+ copy: Any
+ diagonal: Any
+ flatten: Any
+ repeat: Any
+ squeeze: Any
+ swapaxes: Any
+ T: Any
+ transpose: Any
+ def tolist(self, fill_value=...): ...
+ def tobytes(self, fill_value=..., order=...): ...
+ def tofile(self, fid, sep=..., format=...): ...
+ def toflex(self): ...
+ torecords: Any
+ def __reduce__(self): ...
+ def __deepcopy__(self, memo=...): ...
+
+class mvoid(MaskedArray[_ShapeType, _DType_co]):
+ def __new__(
+ self,
+ data,
+ mask=...,
+ dtype=...,
+ fill_value=...,
+ hardmask=...,
+ copy=...,
+ subok=...,
+ ): ...
+ def __getitem__(self, indx): ...
+ def __setitem__(self, indx, value): ...
+ def __iter__(self): ...
+ def __len__(self): ...
+ def filled(self, fill_value=...): ...
+ def tolist(self): ...
+
+def isMaskedArray(x): ...
+isarray = isMaskedArray
+isMA = isMaskedArray
+
+# 0D float64 array
+class MaskedConstant(MaskedArray[Any, dtype[float64]]):
+ def __new__(cls): ...
+ __class__: Any
+ def __array_finalize__(self, obj): ...
+ def __array_prepare__(self, obj, context=...): ...
+ def __array_wrap__(self, obj, context=...): ...
+ def __format__(self, format_spec): ...
+ def __reduce__(self): ...
+ def __iop__(self, other): ...
+ __iadd__: Any
+ __isub__: Any
+ __imul__: Any
+ __ifloordiv__: Any
+ __itruediv__: Any
+ __ipow__: Any
+ def copy(self, *args, **kwargs): ...
+ def __copy__(self): ...
+ def __deepcopy__(self, memo): ...
+ def __setattr__(self, attr, value): ...
+
+masked: MaskedConstant
+masked_singleton: MaskedConstant
+masked_array = MaskedArray
+
+def array(
+ data,
+ dtype=...,
+ copy=...,
+ order=...,
+ mask=...,
+ fill_value=...,
+ keep_mask=...,
+ hard_mask=...,
+ shrink=...,
+ subok=...,
+ ndmin=...,
+): ...
+def is_masked(x): ...
+
+class _extrema_operation(_MaskedUFunc):
+ compare: Any
+ fill_value_func: Any
+ def __init__(self, ufunc, compare, fill_value): ...
+ # NOTE: in practice `b` has a default value, but users should
+ # explicitly provide a value here as the default is deprecated
+ def __call__(self, a, b): ...
+ def reduce(self, target, axis=...): ...
+ def outer(self, a, b): ...
+
+def min(obj, axis=..., out=..., fill_value=..., keepdims=...): ...
+def max(obj, axis=..., out=..., fill_value=..., keepdims=...): ...
+def ptp(obj, axis=..., out=..., fill_value=..., keepdims=...): ...
+
+class _frommethod:
+ __name__: Any
+ __doc__: Any
+ reversed: Any
+ def __init__(self, methodname, reversed=...): ...
+ def getdoc(self): ...
+ def __call__(self, a, *args, **params): ...
+
+all: _frommethod
+anomalies: _frommethod
+anom: _frommethod
+any: _frommethod
+compress: _frommethod
+cumprod: _frommethod
+cumsum: _frommethod
+copy: _frommethod
+diagonal: _frommethod
+harden_mask: _frommethod
+ids: _frommethod
+mean: _frommethod
+nonzero: _frommethod
+prod: _frommethod
+product: _frommethod
+ravel: _frommethod
+repeat: _frommethod
+soften_mask: _frommethod
+std: _frommethod
+sum: _frommethod
+swapaxes: _frommethod
+trace: _frommethod
+var: _frommethod
+count: _frommethod
+argmin: _frommethod
+argmax: _frommethod
+
+minimum: _extrema_operation
+maximum: _extrema_operation
+
+def take(a, indices, axis=..., out=..., mode=...): ...
+def power(a, b, third=...): ...
+def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=...): ...
+def sort(a, axis=..., kind=..., order=..., endwith=..., fill_value=...): ...
+def compressed(x): ...
+def concatenate(arrays, axis=...): ...
+def diag(v, k=...): ...
+def left_shift(a, n): ...
+def right_shift(a, n): ...
+def put(a, indices, values, mode=...): ...
+def putmask(a, mask, values): ...
+def transpose(a, axes=...): ...
+def reshape(a, new_shape, order=...): ...
+def resize(x, new_shape): ...
+def ndim(obj): ...
+def shape(obj): ...
+def size(obj, axis=...): ...
+def where(condition, x=..., y=...): ...
+def choose(indices, choices, out=..., mode=...): ...
+def round_(a, decimals=..., out=...): ...
+round = round_
+
+def inner(a, b): ...
+innerproduct = inner
+
+def outer(a, b): ...
+outerproduct = outer
+
+def correlate(a, v, mode=..., propagate_mask=...): ...
+def convolve(a, v, mode=..., propagate_mask=...): ...
+def allequal(a, b, fill_value=...): ...
+def allclose(a, b, masked_equal=..., rtol=..., atol=...): ...
+def asarray(a, dtype=..., order=...): ...
+def asanyarray(a, dtype=...): ...
+def fromflex(fxarray): ...
+
+class _convert2ma:
+ __doc__: Any
+ def __init__(self, funcname, params=...): ...
+ def getdoc(self): ...
+ def __call__(self, *args, **params): ...
+
+arange: _convert2ma
+empty: _convert2ma
+empty_like: _convert2ma
+frombuffer: _convert2ma
+fromfunction: _convert2ma
+identity: _convert2ma
+ones: _convert2ma
+zeros: _convert2ma
+
+def append(a, b, axis=...): ...
+def dot(a, b, strict=..., out=...): ...
+def mask_rowcols(a, axis=...): ...
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/extras.py b/venv/lib/python3.9/site-packages/numpy/ma/extras.py
new file mode 100644
index 00000000..41bce0f2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/extras.py
@@ -0,0 +1,2045 @@
+"""
+Masked arrays add-ons.
+
+A collection of utilities for `numpy.ma`.
+
+:author: Pierre Gerard-Marchant
+:contact: pierregm_at_uga_dot_edu
+:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
+
+"""
+__all__ = [
+ 'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
+ 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', 'column_stack',
+ 'compress_cols', 'compress_nd', 'compress_rowcols', 'compress_rows',
+ 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', 'dstack', 'ediff1d',
+ 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack',
+ 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', 'mask_rows',
+ 'masked_all', 'masked_all_like', 'median', 'mr_', 'ndenumerate',
+ 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack',
+ 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack',
+ ]
+
+import itertools
+import warnings
+
+from . import core as ma
+from .core import (
+ MaskedArray, MAError, add, array, asarray, concatenate, filled, count,
+ getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,
+ nomask, ones, sort, zeros, getdata, get_masked_subclass, dot,
+ mask_rowcols
+ )
+
+import numpy as np
+from numpy import ndarray, array as nxarray
+from numpy.core.multiarray import normalize_axis_index
+from numpy.core.numeric import normalize_axis_tuple
+from numpy.lib.function_base import _ureduce
+from numpy.lib.index_tricks import AxisConcatenator
+
+
+def issequence(seq):
+ """
+ Is seq a sequence (ndarray, list or tuple)?
+
+ """
+ return isinstance(seq, (ndarray, tuple, list))
+
+
+def count_masked(arr, axis=None):
+ """
+ Count the number of masked elements along the given axis.
+
+ Parameters
+ ----------
+ arr : array_like
+ An array with (possibly) masked elements.
+ axis : int, optional
+ Axis along which to count. If None (default), a flattened
+ version of the array is used.
+
+ Returns
+ -------
+ count : int, ndarray
+ The total number of masked elements (axis=None) or the number
+ of masked elements along each slice of the given axis.
+
+ See Also
+ --------
+ MaskedArray.count : Count non-masked elements.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.arange(9).reshape((3,3))
+ >>> a = ma.array(a)
+ >>> a[1, 0] = ma.masked
+ >>> a[1, 2] = ma.masked
+ >>> a[2, 1] = ma.masked
+ >>> a
+ masked_array(
+ data=[[0, 1, 2],
+ [--, 4, --],
+ [6, --, 8]],
+ mask=[[False, False, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+ >>> ma.count_masked(a)
+ 3
+
+ When the `axis` keyword is used an array is returned.
+
+ >>> ma.count_masked(a, axis=0)
+ array([1, 1, 1])
+ >>> ma.count_masked(a, axis=1)
+ array([0, 2, 1])
+
+ """
+ m = getmaskarray(arr)
+ return m.sum(axis)
+
+
+def masked_all(shape, dtype=float):
+ """
+ Empty masked array with all elements masked.
+
+ Return an empty masked array of the given shape and dtype, where all the
+ data are masked.
+
+ Parameters
+ ----------
+ shape : int or tuple of ints
+ Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``.
+ dtype : dtype, optional
+ Data type of the output.
+
+ Returns
+ -------
+ a : MaskedArray
+ A masked array with all data masked.
+
+ See Also
+ --------
+ masked_all_like : Empty masked array modelled on an existing array.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> ma.masked_all((3, 3))
+ masked_array(
+ data=[[--, --, --],
+ [--, --, --],
+ [--, --, --]],
+ mask=[[ True, True, True],
+ [ True, True, True],
+ [ True, True, True]],
+ fill_value=1e+20,
+ dtype=float64)
+
+ The `dtype` parameter defines the underlying data type.
+
+ >>> a = ma.masked_all((3, 3))
+ >>> a.dtype
+ dtype('float64')
+ >>> a = ma.masked_all((3, 3), dtype=np.int32)
+ >>> a.dtype
+ dtype('int32')
+
+ """
+ a = masked_array(np.empty(shape, dtype),
+ mask=np.ones(shape, make_mask_descr(dtype)))
+ return a
+
+
+def masked_all_like(arr):
+ """
+ Empty masked array with the properties of an existing array.
+
+ Return an empty masked array of the same shape and dtype as
+ the array `arr`, where all the data are masked.
+
+ Parameters
+ ----------
+ arr : ndarray
+ An array describing the shape and dtype of the required MaskedArray.
+
+ Returns
+ -------
+ a : MaskedArray
+ A masked array with all data masked.
+
+ Raises
+ ------
+ AttributeError
+ If `arr` doesn't have a shape attribute (i.e. not an ndarray)
+
+ See Also
+ --------
+ masked_all : Empty masked array with all elements masked.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> arr = np.zeros((2, 3), dtype=np.float32)
+ >>> arr
+ array([[0., 0., 0.],
+ [0., 0., 0.]], dtype=float32)
+ >>> ma.masked_all_like(arr)
+ masked_array(
+ data=[[--, --, --],
+ [--, --, --]],
+ mask=[[ True, True, True],
+ [ True, True, True]],
+ fill_value=1e+20,
+ dtype=float32)
+
+ The dtype of the masked array matches the dtype of `arr`.
+
+ >>> arr.dtype
+ dtype('float32')
+ >>> ma.masked_all_like(arr).dtype
+ dtype('float32')
+
+ """
+ a = np.empty_like(arr).view(MaskedArray)
+ a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype))
+ return a
+
+
+#####--------------------------------------------------------------------------
+#---- --- Standard functions ---
+#####--------------------------------------------------------------------------
+class _fromnxfunction:
+ """
+ Defines a wrapper to adapt NumPy functions to masked arrays.
+
+
+ An instance of `_fromnxfunction` can be called with the same parameters
+ as the wrapped NumPy function. The docstring of `newfunc` is adapted from
+ the wrapped function as well, see `getdoc`.
+
+ This class should not be used directly. Instead, one of its extensions that
+ provides support for a specific type of input should be used.
+
+ Parameters
+ ----------
+ funcname : str
+ The name of the function to be adapted. The function should be
+ in the NumPy namespace (i.e. ``np.funcname``).
+
+ """
+
+ def __init__(self, funcname):
+ self.__name__ = funcname
+ self.__doc__ = self.getdoc()
+
+ def getdoc(self):
+ """
+ Retrieve the docstring and signature from the function.
+
+ The ``__doc__`` attribute of the function is used as the docstring for
+ the new masked array version of the function. A note on application
+ of the function to the mask is appended.
+
+ Parameters
+ ----------
+ None
+
+ """
+ npfunc = getattr(np, self.__name__, None)
+ doc = getattr(npfunc, '__doc__', None)
+ if doc:
+ sig = self.__name__ + ma.get_object_signature(npfunc)
+ doc = ma.doc_note(doc, "The function is applied to both the _data "
+ "and the _mask, if any.")
+ return '\n\n'.join((sig, doc))
+ return
+
+ def __call__(self, *args, **params):
+ pass
+
+
+class _fromnxfunction_single(_fromnxfunction):
+ """
+ A version of `_fromnxfunction` that is called with a single array
+ argument followed by auxiliary args that are passed verbatim for
+ both the data and mask calls.
+ """
+ def __call__(self, x, *args, **params):
+ func = getattr(np, self.__name__)
+ if isinstance(x, ndarray):
+ _d = func(x.__array__(), *args, **params)
+ _m = func(getmaskarray(x), *args, **params)
+ return masked_array(_d, mask=_m)
+ else:
+ _d = func(np.asarray(x), *args, **params)
+ _m = func(getmaskarray(x), *args, **params)
+ return masked_array(_d, mask=_m)
+
+
+class _fromnxfunction_seq(_fromnxfunction):
+ """
+ A version of `_fromnxfunction` that is called with a single sequence
+ of arrays followed by auxiliary args that are passed verbatim for
+ both the data and mask calls.
+ """
+ def __call__(self, x, *args, **params):
+ func = getattr(np, self.__name__)
+ _d = func(tuple([np.asarray(a) for a in x]), *args, **params)
+ _m = func(tuple([getmaskarray(a) for a in x]), *args, **params)
+ return masked_array(_d, mask=_m)
+
+
+class _fromnxfunction_args(_fromnxfunction):
+ """
+ A version of `_fromnxfunction` that is called with multiple array
+ arguments. The first non-array-like input marks the beginning of the
+ arguments that are passed verbatim for both the data and mask calls.
+ Array arguments are processed independently and the results are
+ returned in a list. If only one array is found, the return value is
+ just the processed array instead of a list.
+ """
+ def __call__(self, *args, **params):
+ func = getattr(np, self.__name__)
+ arrays = []
+ args = list(args)
+ while len(args) > 0 and issequence(args[0]):
+ arrays.append(args.pop(0))
+ res = []
+ for x in arrays:
+ _d = func(np.asarray(x), *args, **params)
+ _m = func(getmaskarray(x), *args, **params)
+ res.append(masked_array(_d, mask=_m))
+ if len(arrays) == 1:
+ return res[0]
+ return res
+
+
+class _fromnxfunction_allargs(_fromnxfunction):
+ """
+ A version of `_fromnxfunction` that is called with multiple array
+ arguments. Similar to `_fromnxfunction_args` except that all args
+ are converted to arrays even if they are not so already. This makes
+ it possible to process scalars as 1-D arrays. Only keyword arguments
+ are passed through verbatim for the data and mask calls. Arrays
+ arguments are processed independently and the results are returned
+ in a list. If only one arg is present, the return value is just the
+ processed array instead of a list.
+ """
+ def __call__(self, *args, **params):
+ func = getattr(np, self.__name__)
+ res = []
+ for x in args:
+ _d = func(np.asarray(x), **params)
+ _m = func(getmaskarray(x), **params)
+ res.append(masked_array(_d, mask=_m))
+ if len(args) == 1:
+ return res[0]
+ return res
+
+
+atleast_1d = _fromnxfunction_allargs('atleast_1d')
+atleast_2d = _fromnxfunction_allargs('atleast_2d')
+atleast_3d = _fromnxfunction_allargs('atleast_3d')
+
+vstack = row_stack = _fromnxfunction_seq('vstack')
+hstack = _fromnxfunction_seq('hstack')
+column_stack = _fromnxfunction_seq('column_stack')
+dstack = _fromnxfunction_seq('dstack')
+stack = _fromnxfunction_seq('stack')
+
+hsplit = _fromnxfunction_single('hsplit')
+
+diagflat = _fromnxfunction_single('diagflat')
+
+
+#####--------------------------------------------------------------------------
+#----
+#####--------------------------------------------------------------------------
+def flatten_inplace(seq):
+ """Flatten a sequence in place."""
+ k = 0
+ while (k != len(seq)):
+ while hasattr(seq[k], '__iter__'):
+ seq[k:(k + 1)] = seq[k]
+ k += 1
+ return seq
+
+
+def apply_along_axis(func1d, axis, arr, *args, **kwargs):
+ """
+ (This docstring should be overwritten)
+ """
+ arr = array(arr, copy=False, subok=True)
+ nd = arr.ndim
+ axis = normalize_axis_index(axis, nd)
+ ind = [0] * (nd - 1)
+ i = np.zeros(nd, 'O')
+ indlist = list(range(nd))
+ indlist.remove(axis)
+ i[axis] = slice(None, None)
+ outshape = np.asarray(arr.shape).take(indlist)
+ i.put(indlist, ind)
+ res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
+ # if res is a number, then we have a smaller output array
+ asscalar = np.isscalar(res)
+ if not asscalar:
+ try:
+ len(res)
+ except TypeError:
+ asscalar = True
+ # Note: we shouldn't set the dtype of the output from the first result
+ # so we force the type to object, and build a list of dtypes. We'll
+ # just take the largest, to avoid some downcasting
+ dtypes = []
+ if asscalar:
+ dtypes.append(np.asarray(res).dtype)
+ outarr = zeros(outshape, object)
+ outarr[tuple(ind)] = res
+ Ntot = np.product(outshape)
+ k = 1
+ while k < Ntot:
+ # increment the index
+ ind[-1] += 1
+ n = -1
+ while (ind[n] >= outshape[n]) and (n > (1 - nd)):
+ ind[n - 1] += 1
+ ind[n] = 0
+ n -= 1
+ i.put(indlist, ind)
+ res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
+ outarr[tuple(ind)] = res
+ dtypes.append(asarray(res).dtype)
+ k += 1
+ else:
+ res = array(res, copy=False, subok=True)
+ j = i.copy()
+ j[axis] = ([slice(None, None)] * res.ndim)
+ j.put(indlist, ind)
+ Ntot = np.product(outshape)
+ holdshape = outshape
+ outshape = list(arr.shape)
+ outshape[axis] = res.shape
+ dtypes.append(asarray(res).dtype)
+ outshape = flatten_inplace(outshape)
+ outarr = zeros(outshape, object)
+ outarr[tuple(flatten_inplace(j.tolist()))] = res
+ k = 1
+ while k < Ntot:
+ # increment the index
+ ind[-1] += 1
+ n = -1
+ while (ind[n] >= holdshape[n]) and (n > (1 - nd)):
+ ind[n - 1] += 1
+ ind[n] = 0
+ n -= 1
+ i.put(indlist, ind)
+ j.put(indlist, ind)
+ res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
+ outarr[tuple(flatten_inplace(j.tolist()))] = res
+ dtypes.append(asarray(res).dtype)
+ k += 1
+ max_dtypes = np.dtype(np.asarray(dtypes).max())
+ if not hasattr(arr, '_mask'):
+ result = np.asarray(outarr, dtype=max_dtypes)
+ else:
+ result = asarray(outarr, dtype=max_dtypes)
+ result.fill_value = ma.default_fill_value(result)
+ return result
+apply_along_axis.__doc__ = np.apply_along_axis.__doc__
+
+
+def apply_over_axes(func, a, axes):
+ """
+ (This docstring will be overwritten)
+ """
+ val = asarray(a)
+ N = a.ndim
+ if array(axes).ndim == 0:
+ axes = (axes,)
+ for axis in axes:
+ if axis < 0:
+ axis = N + axis
+ args = (val, axis)
+ res = func(*args)
+ if res.ndim == val.ndim:
+ val = res
+ else:
+ res = ma.expand_dims(res, axis)
+ if res.ndim == val.ndim:
+ val = res
+ else:
+ raise ValueError("function is not returning "
+ "an array of the correct shape")
+ return val
+
+
+if apply_over_axes.__doc__ is not None:
+ apply_over_axes.__doc__ = np.apply_over_axes.__doc__[
+ :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \
+ """
+
+ Examples
+ --------
+ >>> a = np.ma.arange(24).reshape(2,3,4)
+ >>> a[:,0,1] = np.ma.masked
+ >>> a[:,1,:] = np.ma.masked
+ >>> a
+ masked_array(
+ data=[[[0, --, 2, 3],
+ [--, --, --, --],
+ [8, 9, 10, 11]],
+ [[12, --, 14, 15],
+ [--, --, --, --],
+ [20, 21, 22, 23]]],
+ mask=[[[False, True, False, False],
+ [ True, True, True, True],
+ [False, False, False, False]],
+ [[False, True, False, False],
+ [ True, True, True, True],
+ [False, False, False, False]]],
+ fill_value=999999)
+ >>> np.ma.apply_over_axes(np.ma.sum, a, [0,2])
+ masked_array(
+ data=[[[46],
+ [--],
+ [124]]],
+ mask=[[[False],
+ [ True],
+ [False]]],
+ fill_value=999999)
+
+ Tuple axis arguments to ufuncs are equivalent:
+
+ >>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1))
+ masked_array(
+ data=[[[46],
+ [--],
+ [124]]],
+ mask=[[[False],
+ [ True],
+ [False]]],
+ fill_value=999999)
+ """
+
+
+def average(a, axis=None, weights=None, returned=False, *,
+ keepdims=np._NoValue):
+ """
+ Return the weighted average of array over the given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ Data to be averaged.
+ Masked entries are not taken into account in the computation.
+ axis : int, optional
+ Axis along which to average `a`. If None, averaging is done over
+ the flattened array.
+ weights : array_like, optional
+ The importance that each element has in the computation of the average.
+ The weights array can either be 1-D (in which case its length must be
+ the size of `a` along the given axis) or of the same shape as `a`.
+ If ``weights=None``, then all data in `a` are assumed to have a
+ weight equal to one. The 1-D calculation is::
+
+ avg = sum(a * weights) / sum(weights)
+
+ The only constraint on `weights` is that `sum(weights)` must not be 0.
+ returned : bool, optional
+ Flag indicating whether a tuple ``(result, sum of weights)``
+ should be returned as output (True), or just the result (False).
+ Default is False.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+ *Note:* `keepdims` will not work with instances of `numpy.matrix`
+ or other classes whose methods do not support `keepdims`.
+
+ .. versionadded:: 1.23.0
+
+ Returns
+ -------
+ average, [sum_of_weights] : (tuple of) scalar or MaskedArray
+ The average along the specified axis. When returned is `True`,
+ return a tuple with the average as the first element and the sum
+ of the weights as the second element. The return type is `np.float64`
+ if `a` is of integer type and floats smaller than `float64`, or the
+ input data-type, otherwise. If returned, `sum_of_weights` is always
+ `float64`.
+
+ Examples
+ --------
+ >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True])
+ >>> np.ma.average(a, weights=[3, 1, 0, 0])
+ 1.25
+
+ >>> x = np.ma.arange(6.).reshape(3, 2)
+ >>> x
+ masked_array(
+ data=[[0., 1.],
+ [2., 3.],
+ [4., 5.]],
+ mask=False,
+ fill_value=1e+20)
+ >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3],
+ ... returned=True)
+ >>> avg
+ masked_array(data=[2.6666666666666665, 3.6666666666666665],
+ mask=[False, False],
+ fill_value=1e+20)
+
+ With ``keepdims=True``, the following result has shape (3, 1).
+
+ >>> np.ma.average(x, axis=1, keepdims=True)
+ masked_array(
+ data=[[0.5],
+ [2.5],
+ [4.5]],
+ mask=False,
+ fill_value=1e+20)
+ """
+ a = asarray(a)
+ m = getmask(a)
+
+ # inspired by 'average' in numpy/lib/function_base.py
+
+ if keepdims is np._NoValue:
+ # Don't pass on the keepdims argument if one wasn't given.
+ keepdims_kw = {}
+ else:
+ keepdims_kw = {'keepdims': keepdims}
+
+ if weights is None:
+ avg = a.mean(axis, **keepdims_kw)
+ scl = avg.dtype.type(a.count(axis))
+ else:
+ wgt = asarray(weights)
+
+ if issubclass(a.dtype.type, (np.integer, np.bool_)):
+ result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
+ else:
+ result_dtype = np.result_type(a.dtype, wgt.dtype)
+
+ # Sanity checks
+ if a.shape != wgt.shape:
+ if axis is None:
+ raise TypeError(
+ "Axis must be specified when shapes of a and weights "
+ "differ.")
+ if wgt.ndim != 1:
+ raise TypeError(
+ "1D weights expected when shapes of a and weights differ.")
+ if wgt.shape[0] != a.shape[axis]:
+ raise ValueError(
+ "Length of weights not compatible with specified axis.")
+
+ # setup wgt to broadcast along axis
+ wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape, subok=True)
+ wgt = wgt.swapaxes(-1, axis)
+
+ if m is not nomask:
+ wgt = wgt*(~a.mask)
+ wgt.mask |= a.mask
+
+ scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw)
+ avg = np.multiply(a, wgt,
+ dtype=result_dtype).sum(axis, **keepdims_kw) / scl
+
+ if returned:
+ if scl.shape != avg.shape:
+ scl = np.broadcast_to(scl, avg.shape).copy()
+ return avg, scl
+ else:
+ return avg
+
+
+def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
+ """
+ Compute the median along the specified axis.
+
+ Returns the median of the array elements.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array or object that can be converted to an array.
+ axis : int, optional
+ Axis along which the medians are computed. The default (None) is
+ to compute the median along a flattened version of the array.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type will be cast if necessary.
+ overwrite_input : bool, optional
+ If True, then allow use of memory of input array (a) for
+ calculations. The input array will be modified by the call to
+ median. This will save memory when you do not need to preserve
+ the contents of the input array. Treat the input as undefined,
+ but it will probably be fully or partially sorted. Default is
+ False. Note that, if `overwrite_input` is True, and the input
+ is not already an `ndarray`, an error will be raised.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the input array.
+
+ .. versionadded:: 1.10.0
+
+ Returns
+ -------
+ median : ndarray
+ A new array holding the result is returned unless out is
+ specified, in which case a reference to out is returned.
+ Return data-type is `float64` for integers and floats smaller than
+ `float64`, or the input data-type, otherwise.
+
+ See Also
+ --------
+ mean
+
+ Notes
+ -----
+ Given a vector ``V`` with ``N`` non masked values, the median of ``V``
+ is the middle value of a sorted copy of ``V`` (``Vs``) - i.e.
+ ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2``
+ when ``N`` is even.
+
+ Examples
+ --------
+ >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4)
+ >>> np.ma.median(x)
+ 1.5
+
+ >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
+ >>> np.ma.median(x)
+ 2.5
+ >>> np.ma.median(x, axis=-1, overwrite_input=True)
+ masked_array(data=[2.0, 5.0],
+ mask=[False, False],
+ fill_value=1e+20)
+
+ """
+ if not hasattr(a, 'mask'):
+ m = np.median(getdata(a, subok=True), axis=axis,
+ out=out, overwrite_input=overwrite_input,
+ keepdims=keepdims)
+ if isinstance(m, np.ndarray) and 1 <= m.ndim:
+ return masked_array(m, copy=False)
+ else:
+ return m
+
+ return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out,
+ overwrite_input=overwrite_input)
+
+
+def _median(a, axis=None, out=None, overwrite_input=False):
+ # when an unmasked NaN is present return it, so we need to sort the NaN
+ # values behind the mask
+ if np.issubdtype(a.dtype, np.inexact):
+ fill_value = np.inf
+ else:
+ fill_value = None
+ if overwrite_input:
+ if axis is None:
+ asorted = a.ravel()
+ asorted.sort(fill_value=fill_value)
+ else:
+ a.sort(axis=axis, fill_value=fill_value)
+ asorted = a
+ else:
+ asorted = sort(a, axis=axis, fill_value=fill_value)
+
+ if axis is None:
+ axis = 0
+ else:
+ axis = normalize_axis_index(axis, asorted.ndim)
+
+ if asorted.shape[axis] == 0:
+ # for empty axis integer indices fail so use slicing to get same result
+ # as median (which is mean of empty slice = nan)
+ indexer = [slice(None)] * asorted.ndim
+ indexer[axis] = slice(0, 0)
+ indexer = tuple(indexer)
+ return np.ma.mean(asorted[indexer], axis=axis, out=out)
+
+ if asorted.ndim == 1:
+ idx, odd = divmod(count(asorted), 2)
+ mid = asorted[idx + odd - 1:idx + 1]
+ if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0:
+ # avoid inf / x = masked
+ s = mid.sum(out=out)
+ if not odd:
+ s = np.true_divide(s, 2., casting='safe', out=out)
+ s = np.lib.utils._median_nancheck(asorted, s, axis)
+ else:
+ s = mid.mean(out=out)
+
+ # if result is masked either the input contained enough
+ # minimum_fill_value so that it would be the median or all values
+ # masked
+ if np.ma.is_masked(s) and not np.all(asorted.mask):
+ return np.ma.minimum_fill_value(asorted)
+ return s
+
+ counts = count(asorted, axis=axis, keepdims=True)
+ h = counts // 2
+
+ # duplicate high if odd number of elements so mean does nothing
+ odd = counts % 2 == 1
+ l = np.where(odd, h, h-1)
+
+ lh = np.concatenate([l,h], axis=axis)
+
+ # get low and high median
+ low_high = np.take_along_axis(asorted, lh, axis=axis)
+
+ def replace_masked(s):
+ # Replace masked entries with minimum_full_value unless it all values
+ # are masked. This is required as the sort order of values equal or
+ # larger than the fill value is undefined and a valid value placed
+ # elsewhere, e.g. [4, --, inf].
+ if np.ma.is_masked(s):
+ rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask
+ s.data[rep] = np.ma.minimum_fill_value(asorted)
+ s.mask[rep] = False
+
+ replace_masked(low_high)
+
+ if np.issubdtype(asorted.dtype, np.inexact):
+ # avoid inf / x = masked
+ s = np.ma.sum(low_high, axis=axis, out=out)
+ np.true_divide(s.data, 2., casting='unsafe', out=s.data)
+
+ s = np.lib.utils._median_nancheck(asorted, s, axis)
+ else:
+ s = np.ma.mean(low_high, axis=axis, out=out)
+
+ return s
+
+
+def compress_nd(x, axis=None):
+ """Suppress slices from multiple dimensions which contain masked values.
+
+ Parameters
+ ----------
+ x : array_like, MaskedArray
+ The array to operate on. If not a MaskedArray instance (or if no array
+ elements are masked), `x` is interpreted as a MaskedArray with `mask`
+ set to `nomask`.
+ axis : tuple of ints or int, optional
+ Which dimensions to suppress slices from can be configured with this
+ parameter.
+ - If axis is a tuple of ints, those are the axes to suppress slices from.
+ - If axis is an int, then that is the only axis to suppress slices from.
+ - If axis is None, all axis are selected.
+
+ Returns
+ -------
+ compress_array : ndarray
+ The compressed array.
+ """
+ x = asarray(x)
+ m = getmask(x)
+ # Set axis to tuple of ints
+ if axis is None:
+ axis = tuple(range(x.ndim))
+ else:
+ axis = normalize_axis_tuple(axis, x.ndim)
+
+ # Nothing is masked: return x
+ if m is nomask or not m.any():
+ return x._data
+ # All is masked: return empty
+ if m.all():
+ return nxarray([])
+ # Filter elements through boolean indexing
+ data = x._data
+ for ax in axis:
+ axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim)))
+ data = data[(slice(None),)*ax + (~m.any(axis=axes),)]
+ return data
+
+
+def compress_rowcols(x, axis=None):
+ """
+ Suppress the rows and/or columns of a 2-D array that contain
+ masked values.
+
+ The suppression behavior is selected with the `axis` parameter.
+
+ - If axis is None, both rows and columns are suppressed.
+ - If axis is 0, only rows are suppressed.
+ - If axis is 1 or -1, only columns are suppressed.
+
+ Parameters
+ ----------
+ x : array_like, MaskedArray
+ The array to operate on. If not a MaskedArray instance (or if no array
+ elements are masked), `x` is interpreted as a MaskedArray with
+ `mask` set to `nomask`. Must be a 2D array.
+ axis : int, optional
+ Axis along which to perform the operation. Default is None.
+
+ Returns
+ -------
+ compressed_array : ndarray
+ The compressed array.
+
+ Examples
+ --------
+ >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
+ ... [1, 0, 0],
+ ... [0, 0, 0]])
+ >>> x
+ masked_array(
+ data=[[--, 1, 2],
+ [--, 4, 5],
+ [6, 7, 8]],
+ mask=[[ True, False, False],
+ [ True, False, False],
+ [False, False, False]],
+ fill_value=999999)
+
+ >>> np.ma.compress_rowcols(x)
+ array([[7, 8]])
+ >>> np.ma.compress_rowcols(x, 0)
+ array([[6, 7, 8]])
+ >>> np.ma.compress_rowcols(x, 1)
+ array([[1, 2],
+ [4, 5],
+ [7, 8]])
+
+ """
+ if asarray(x).ndim != 2:
+ raise NotImplementedError("compress_rowcols works for 2D arrays only.")
+ return compress_nd(x, axis=axis)
+
+
+def compress_rows(a):
+ """
+ Suppress whole rows of a 2-D array that contain masked values.
+
+ This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see
+ `compress_rowcols` for details.
+
+ See Also
+ --------
+ compress_rowcols
+
+ """
+ a = asarray(a)
+ if a.ndim != 2:
+ raise NotImplementedError("compress_rows works for 2D arrays only.")
+ return compress_rowcols(a, 0)
+
+
+def compress_cols(a):
+ """
+ Suppress whole columns of a 2-D array that contain masked values.
+
+ This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see
+ `compress_rowcols` for details.
+
+ See Also
+ --------
+ compress_rowcols
+
+ """
+ a = asarray(a)
+ if a.ndim != 2:
+ raise NotImplementedError("compress_cols works for 2D arrays only.")
+ return compress_rowcols(a, 1)
+
+
+def mask_rows(a, axis=np._NoValue):
+ """
+ Mask rows of a 2D array that contain masked values.
+
+ This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0.
+
+ See Also
+ --------
+ mask_rowcols : Mask rows and/or columns of a 2D array.
+ masked_where : Mask where a condition is met.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.zeros((3, 3), dtype=int)
+ >>> a[1, 1] = 1
+ >>> a
+ array([[0, 0, 0],
+ [0, 1, 0],
+ [0, 0, 0]])
+ >>> a = ma.masked_equal(a, 1)
+ >>> a
+ masked_array(
+ data=[[0, 0, 0],
+ [0, --, 0],
+ [0, 0, 0]],
+ mask=[[False, False, False],
+ [False, True, False],
+ [False, False, False]],
+ fill_value=1)
+
+ >>> ma.mask_rows(a)
+ masked_array(
+ data=[[0, 0, 0],
+ [--, --, --],
+ [0, 0, 0]],
+ mask=[[False, False, False],
+ [ True, True, True],
+ [False, False, False]],
+ fill_value=1)
+
+ """
+ if axis is not np._NoValue:
+ # remove the axis argument when this deprecation expires
+ # NumPy 1.18.0, 2019-11-28
+ warnings.warn(
+ "The axis argument has always been ignored, in future passing it "
+ "will raise TypeError", DeprecationWarning, stacklevel=2)
+ return mask_rowcols(a, 0)
+
+
+def mask_cols(a, axis=np._NoValue):
+ """
+ Mask columns of a 2D array that contain masked values.
+
+ This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1.
+
+ See Also
+ --------
+ mask_rowcols : Mask rows and/or columns of a 2D array.
+ masked_where : Mask where a condition is met.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.zeros((3, 3), dtype=int)
+ >>> a[1, 1] = 1
+ >>> a
+ array([[0, 0, 0],
+ [0, 1, 0],
+ [0, 0, 0]])
+ >>> a = ma.masked_equal(a, 1)
+ >>> a
+ masked_array(
+ data=[[0, 0, 0],
+ [0, --, 0],
+ [0, 0, 0]],
+ mask=[[False, False, False],
+ [False, True, False],
+ [False, False, False]],
+ fill_value=1)
+ >>> ma.mask_cols(a)
+ masked_array(
+ data=[[0, --, 0],
+ [0, --, 0],
+ [0, --, 0]],
+ mask=[[False, True, False],
+ [False, True, False],
+ [False, True, False]],
+ fill_value=1)
+
+ """
+ if axis is not np._NoValue:
+ # remove the axis argument when this deprecation expires
+ # NumPy 1.18.0, 2019-11-28
+ warnings.warn(
+ "The axis argument has always been ignored, in future passing it "
+ "will raise TypeError", DeprecationWarning, stacklevel=2)
+ return mask_rowcols(a, 1)
+
+
+#####--------------------------------------------------------------------------
+#---- --- arraysetops ---
+#####--------------------------------------------------------------------------
+
+def ediff1d(arr, to_end=None, to_begin=None):
+ """
+ Compute the differences between consecutive elements of an array.
+
+ This function is the equivalent of `numpy.ediff1d` that takes masked
+ values into account, see `numpy.ediff1d` for details.
+
+ See Also
+ --------
+ numpy.ediff1d : Equivalent function for ndarrays.
+
+ """
+ arr = ma.asanyarray(arr).flat
+ ed = arr[1:] - arr[:-1]
+ arrays = [ed]
+ #
+ if to_begin is not None:
+ arrays.insert(0, to_begin)
+ if to_end is not None:
+ arrays.append(to_end)
+ #
+ if len(arrays) != 1:
+ # We'll save ourselves a copy of a potentially large array in the common
+ # case where neither to_begin or to_end was given.
+ ed = hstack(arrays)
+ #
+ return ed
+
+
+def unique(ar1, return_index=False, return_inverse=False):
+ """
+ Finds the unique elements of an array.
+
+ Masked values are considered the same element (masked). The output array
+ is always a masked array. See `numpy.unique` for more details.
+
+ See Also
+ --------
+ numpy.unique : Equivalent function for ndarrays.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = [1, 2, 1000, 2, 3]
+ >>> mask = [0, 0, 1, 0, 0]
+ >>> masked_a = ma.masked_array(a, mask)
+ >>> masked_a
+ masked_array(data=[1, 2, --, 2, 3],
+ mask=[False, False, True, False, False],
+ fill_value=999999)
+ >>> ma.unique(masked_a)
+ masked_array(data=[1, 2, 3, --],
+ mask=[False, False, False, True],
+ fill_value=999999)
+ >>> ma.unique(masked_a, return_index=True)
+ (masked_array(data=[1, 2, 3, --],
+ mask=[False, False, False, True],
+ fill_value=999999), array([0, 1, 4, 2]))
+ >>> ma.unique(masked_a, return_inverse=True)
+ (masked_array(data=[1, 2, 3, --],
+ mask=[False, False, False, True],
+ fill_value=999999), array([0, 1, 3, 1, 2]))
+ >>> ma.unique(masked_a, return_index=True, return_inverse=True)
+ (masked_array(data=[1, 2, 3, --],
+ mask=[False, False, False, True],
+ fill_value=999999), array([0, 1, 4, 2]), array([0, 1, 3, 1, 2]))
+ """
+ output = np.unique(ar1,
+ return_index=return_index,
+ return_inverse=return_inverse)
+ if isinstance(output, tuple):
+ output = list(output)
+ output[0] = output[0].view(MaskedArray)
+ output = tuple(output)
+ else:
+ output = output.view(MaskedArray)
+ return output
+
+
+def intersect1d(ar1, ar2, assume_unique=False):
+ """
+ Returns the unique elements common to both arrays.
+
+ Masked values are considered equal one to the other.
+ The output is always a masked array.
+
+ See `numpy.intersect1d` for more details.
+
+ See Also
+ --------
+ numpy.intersect1d : Equivalent function for ndarrays.
+
+ Examples
+ --------
+ >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1])
+ >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1])
+ >>> np.ma.intersect1d(x, y)
+ masked_array(data=[1, 3, --],
+ mask=[False, False, True],
+ fill_value=999999)
+
+ """
+ if assume_unique:
+ aux = ma.concatenate((ar1, ar2))
+ else:
+ # Might be faster than unique( intersect1d( ar1, ar2 ) )?
+ aux = ma.concatenate((unique(ar1), unique(ar2)))
+ aux.sort()
+ return aux[:-1][aux[1:] == aux[:-1]]
+
+
+def setxor1d(ar1, ar2, assume_unique=False):
+ """
+ Set exclusive-or of 1-D arrays with unique elements.
+
+ The output is always a masked array. See `numpy.setxor1d` for more details.
+
+ See Also
+ --------
+ numpy.setxor1d : Equivalent function for ndarrays.
+
+ """
+ if not assume_unique:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+
+ aux = ma.concatenate((ar1, ar2))
+ if aux.size == 0:
+ return aux
+ aux.sort()
+ auxf = aux.filled()
+# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
+ flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))
+# flag2 = ediff1d( flag ) == 0
+ flag2 = (flag[1:] == flag[:-1])
+ return aux[flag2]
+
+
+def in1d(ar1, ar2, assume_unique=False, invert=False):
+ """
+ Test whether each element of an array is also present in a second
+ array.
+
+ The output is always a masked array. See `numpy.in1d` for more details.
+
+ We recommend using :func:`isin` instead of `in1d` for new code.
+
+ See Also
+ --------
+ isin : Version of this function that preserves the shape of ar1.
+ numpy.in1d : Equivalent function for ndarrays.
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ """
+ if not assume_unique:
+ ar1, rev_idx = unique(ar1, return_inverse=True)
+ ar2 = unique(ar2)
+
+ ar = ma.concatenate((ar1, ar2))
+ # We need this to be a stable sort, so always use 'mergesort'
+ # here. The values from the first array should always come before
+ # the values from the second array.
+ order = ar.argsort(kind='mergesort')
+ sar = ar[order]
+ if invert:
+ bool_ar = (sar[1:] != sar[:-1])
+ else:
+ bool_ar = (sar[1:] == sar[:-1])
+ flag = ma.concatenate((bool_ar, [invert]))
+ indx = order.argsort(kind='mergesort')[:len(ar1)]
+
+ if assume_unique:
+ return flag[indx]
+ else:
+ return flag[indx][rev_idx]
+
+
+def isin(element, test_elements, assume_unique=False, invert=False):
+ """
+ Calculates `element in test_elements`, broadcasting over
+ `element` only.
+
+ The output is always a masked array of the same shape as `element`.
+ See `numpy.isin` for more details.
+
+ See Also
+ --------
+ in1d : Flattened version of this function.
+ numpy.isin : Equivalent function for ndarrays.
+
+ Notes
+ -----
+ .. versionadded:: 1.13.0
+
+ """
+ element = ma.asarray(element)
+ return in1d(element, test_elements, assume_unique=assume_unique,
+ invert=invert).reshape(element.shape)
+
+
+def union1d(ar1, ar2):
+ """
+ Union of two arrays.
+
+ The output is always a masked array. See `numpy.union1d` for more details.
+
+ See Also
+ --------
+ numpy.union1d : Equivalent function for ndarrays.
+
+ """
+ return unique(ma.concatenate((ar1, ar2), axis=None))
+
+
+def setdiff1d(ar1, ar2, assume_unique=False):
+ """
+ Set difference of 1D arrays with unique elements.
+
+ The output is always a masked array. See `numpy.setdiff1d` for more
+ details.
+
+ See Also
+ --------
+ numpy.setdiff1d : Equivalent function for ndarrays.
+
+ Examples
+ --------
+ >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1])
+ >>> np.ma.setdiff1d(x, [1, 2])
+ masked_array(data=[3, --],
+ mask=[False, True],
+ fill_value=999999)
+
+ """
+ if assume_unique:
+ ar1 = ma.asarray(ar1).ravel()
+ else:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+ return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
+
+
+###############################################################################
+# Covariance #
+###############################################################################
+
+
+def _covhelper(x, y=None, rowvar=True, allow_masked=True):
+ """
+ Private function for the computation of covariance and correlation
+ coefficients.
+
+ """
+ x = ma.array(x, ndmin=2, copy=True, dtype=float)
+ xmask = ma.getmaskarray(x)
+ # Quick exit if we can't process masked data
+ if not allow_masked and xmask.any():
+ raise ValueError("Cannot process masked data.")
+ #
+ if x.shape[0] == 1:
+ rowvar = True
+ # Make sure that rowvar is either 0 or 1
+ rowvar = int(bool(rowvar))
+ axis = 1 - rowvar
+ if rowvar:
+ tup = (slice(None), None)
+ else:
+ tup = (None, slice(None))
+ #
+ if y is None:
+ xnotmask = np.logical_not(xmask).astype(int)
+ else:
+ y = array(y, copy=False, ndmin=2, dtype=float)
+ ymask = ma.getmaskarray(y)
+ if not allow_masked and ymask.any():
+ raise ValueError("Cannot process masked data.")
+ if xmask.any() or ymask.any():
+ if y.shape == x.shape:
+ # Define some common mask
+ common_mask = np.logical_or(xmask, ymask)
+ if common_mask is not nomask:
+ xmask = x._mask = y._mask = ymask = common_mask
+ x._sharedmask = False
+ y._sharedmask = False
+ x = ma.concatenate((x, y), axis)
+ xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int)
+ x -= x.mean(axis=rowvar)[tup]
+ return (x, xnotmask, rowvar)
+
+
+def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
+ """
+ Estimate the covariance matrix.
+
+ Except for the handling of missing data this function does the same as
+ `numpy.cov`. For more details and examples, see `numpy.cov`.
+
+ By default, masked values are recognized as such. If `x` and `y` have the
+ same shape, a common mask is allocated: if ``x[i,j]`` is masked, then
+ ``y[i,j]`` will also be masked.
+ Setting `allow_masked` to False will raise an exception if values are
+ missing in either of the input arrays.
+
+ Parameters
+ ----------
+ x : array_like
+ A 1-D or 2-D array containing multiple variables and observations.
+ Each row of `x` represents a variable, and each column a single
+ observation of all those variables. Also see `rowvar` below.
+ y : array_like, optional
+ An additional set of variables and observations. `y` has the same
+ shape as `x`.
+ rowvar : bool, optional
+ If `rowvar` is True (default), then each row represents a
+ variable, with observations in the columns. Otherwise, the relationship
+ is transposed: each column represents a variable, while the rows
+ contain observations.
+ bias : bool, optional
+ Default normalization (False) is by ``(N-1)``, where ``N`` is the
+ number of observations given (unbiased estimate). If `bias` is True,
+ then normalization is by ``N``. This keyword can be overridden by
+ the keyword ``ddof`` in numpy versions >= 1.5.
+ allow_masked : bool, optional
+ If True, masked values are propagated pair-wise: if a value is masked
+ in `x`, the corresponding value is masked in `y`.
+ If False, raises a `ValueError` exception when some values are missing.
+ ddof : {None, int}, optional
+ If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
+ the number of observations; this overrides the value implied by
+ ``bias``. The default value is ``None``.
+
+ .. versionadded:: 1.5
+
+ Raises
+ ------
+ ValueError
+ Raised if some values are missing and `allow_masked` is False.
+
+ See Also
+ --------
+ numpy.cov
+
+ """
+ # Check inputs
+ if ddof is not None and ddof != int(ddof):
+ raise ValueError("ddof must be an integer")
+ # Set up ddof
+ if ddof is None:
+ if bias:
+ ddof = 0
+ else:
+ ddof = 1
+
+ (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
+ if not rowvar:
+ fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof
+ result = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
+ else:
+ fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof
+ result = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
+ return result
+
+
+def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True,
+ ddof=np._NoValue):
+ """
+ Return Pearson product-moment correlation coefficients.
+
+ Except for the handling of missing data this function does the same as
+ `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`.
+
+ Parameters
+ ----------
+ x : array_like
+ A 1-D or 2-D array containing multiple variables and observations.
+ Each row of `x` represents a variable, and each column a single
+ observation of all those variables. Also see `rowvar` below.
+ y : array_like, optional
+ An additional set of variables and observations. `y` has the same
+ shape as `x`.
+ rowvar : bool, optional
+ If `rowvar` is True (default), then each row represents a
+ variable, with observations in the columns. Otherwise, the relationship
+ is transposed: each column represents a variable, while the rows
+ contain observations.
+ bias : _NoValue, optional
+ Has no effect, do not use.
+
+ .. deprecated:: 1.10.0
+ allow_masked : bool, optional
+ If True, masked values are propagated pair-wise: if a value is masked
+ in `x`, the corresponding value is masked in `y`.
+ If False, raises an exception. Because `bias` is deprecated, this
+ argument needs to be treated as keyword only to avoid a warning.
+ ddof : _NoValue, optional
+ Has no effect, do not use.
+
+ .. deprecated:: 1.10.0
+
+ See Also
+ --------
+ numpy.corrcoef : Equivalent function in top-level NumPy module.
+ cov : Estimate the covariance matrix.
+
+ Notes
+ -----
+ This function accepts but discards arguments `bias` and `ddof`. This is
+ for backwards compatibility with previous versions of this function. These
+ arguments had no effect on the return values of the function and can be
+ safely ignored in this and previous versions of numpy.
+ """
+ msg = 'bias and ddof have no effect and are deprecated'
+ if bias is not np._NoValue or ddof is not np._NoValue:
+ # 2015-03-15, 1.10
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ # Get the data
+ (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
+ # Compute the covariance matrix
+ if not rowvar:
+ fact = np.dot(xnotmask.T, xnotmask) * 1.
+ c = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
+ else:
+ fact = np.dot(xnotmask, xnotmask.T) * 1.
+ c = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
+ # Check whether we have a scalar
+ try:
+ diag = ma.diagonal(c)
+ except ValueError:
+ return 1
+ #
+ if xnotmask.all():
+ _denom = ma.sqrt(ma.multiply.outer(diag, diag))
+ else:
+ _denom = diagflat(diag)
+ _denom._sharedmask = False # We know return is always a copy
+ n = x.shape[1 - rowvar]
+ if rowvar:
+ for i in range(n - 1):
+ for j in range(i + 1, n):
+ _x = mask_cols(vstack((x[i], x[j]))).var(axis=1)
+ _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
+ else:
+ for i in range(n - 1):
+ for j in range(i + 1, n):
+ _x = mask_cols(
+ vstack((x[:, i], x[:, j]))).var(axis=1)
+ _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
+ return c / _denom
+
+#####--------------------------------------------------------------------------
+#---- --- Concatenation helpers ---
+#####--------------------------------------------------------------------------
+
+class MAxisConcatenator(AxisConcatenator):
+ """
+ Translate slice objects to concatenation along an axis.
+
+ For documentation on usage, see `mr_class`.
+
+ See Also
+ --------
+ mr_class
+
+ """
+ concatenate = staticmethod(concatenate)
+
+ @classmethod
+ def makemat(cls, arr):
+ # There used to be a view as np.matrix here, but we may eventually
+ # deprecate that class. In preparation, we use the unmasked version
+ # to construct the matrix (with copy=False for backwards compatibility
+ # with the .view)
+ data = super().makemat(arr.data, copy=False)
+ return array(data, mask=arr.mask)
+
+ def __getitem__(self, key):
+ # matrix builder syntax, like 'a, b; c, d'
+ if isinstance(key, str):
+ raise MAError("Unavailable for masked array.")
+
+ return super().__getitem__(key)
+
+
+class mr_class(MAxisConcatenator):
+ """
+ Translate slice objects to concatenation along the first axis.
+
+ This is the masked array version of `lib.index_tricks.RClass`.
+
+ See Also
+ --------
+ lib.index_tricks.RClass
+
+ Examples
+ --------
+ >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
+ masked_array(data=[1, 2, 3, ..., 4, 5, 6],
+ mask=False,
+ fill_value=999999)
+
+ """
+ def __init__(self):
+ MAxisConcatenator.__init__(self, 0)
+
+mr_ = mr_class()
+
+
+#####--------------------------------------------------------------------------
+#---- Find unmasked data ---
+#####--------------------------------------------------------------------------
+
+def ndenumerate(a, compressed=True):
+ """
+ Multidimensional index iterator.
+
+ Return an iterator yielding pairs of array coordinates and values,
+ skipping elements that are masked. With `compressed=False`,
+ `ma.masked` is yielded as the value of masked elements. This
+ behavior differs from that of `numpy.ndenumerate`, which yields the
+ value of the underlying data array.
+
+ Notes
+ -----
+ .. versionadded:: 1.23.0
+
+ Parameters
+ ----------
+ a : array_like
+ An array with (possibly) masked elements.
+ compressed : bool, optional
+ If True (default), masked elements are skipped.
+
+ See Also
+ --------
+ numpy.ndenumerate : Equivalent function ignoring any mask.
+
+ Examples
+ --------
+ >>> a = np.ma.arange(9).reshape((3, 3))
+ >>> a[1, 0] = np.ma.masked
+ >>> a[1, 2] = np.ma.masked
+ >>> a[2, 1] = np.ma.masked
+ >>> a
+ masked_array(
+ data=[[0, 1, 2],
+ [--, 4, --],
+ [6, --, 8]],
+ mask=[[False, False, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+ >>> for index, x in np.ma.ndenumerate(a):
+ ... print(index, x)
+ (0, 0) 0
+ (0, 1) 1
+ (0, 2) 2
+ (1, 1) 4
+ (2, 0) 6
+ (2, 2) 8
+
+ >>> for index, x in np.ma.ndenumerate(a, compressed=False):
+ ... print(index, x)
+ (0, 0) 0
+ (0, 1) 1
+ (0, 2) 2
+ (1, 0) --
+ (1, 1) 4
+ (1, 2) --
+ (2, 0) 6
+ (2, 1) --
+ (2, 2) 8
+ """
+ for it, mask in zip(np.ndenumerate(a), getmaskarray(a).flat):
+ if not mask:
+ yield it
+ elif not compressed:
+ yield it[0], masked
+
+
+def flatnotmasked_edges(a):
+ """
+ Find the indices of the first and last unmasked values.
+
+ Expects a 1-D `MaskedArray`, returns None if all values are masked.
+
+ Parameters
+ ----------
+ a : array_like
+ Input 1-D `MaskedArray`
+
+ Returns
+ -------
+ edges : ndarray or None
+ The indices of first and last non-masked value in the array.
+ Returns None if all values are masked.
+
+ See Also
+ --------
+ flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges
+ clump_masked, clump_unmasked
+
+ Notes
+ -----
+ Only accepts 1-D arrays.
+
+ Examples
+ --------
+ >>> a = np.ma.arange(10)
+ >>> np.ma.flatnotmasked_edges(a)
+ array([0, 9])
+
+ >>> mask = (a < 3) | (a > 8) | (a == 5)
+ >>> a[mask] = np.ma.masked
+ >>> np.array(a[~a.mask])
+ array([3, 4, 6, 7, 8])
+
+ >>> np.ma.flatnotmasked_edges(a)
+ array([3, 8])
+
+ >>> a[:] = np.ma.masked
+ >>> print(np.ma.flatnotmasked_edges(a))
+ None
+
+ """
+ m = getmask(a)
+ if m is nomask or not np.any(m):
+ return np.array([0, a.size - 1])
+ unmasked = np.flatnonzero(~m)
+ if len(unmasked) > 0:
+ return unmasked[[0, -1]]
+ else:
+ return None
+
+
+def notmasked_edges(a, axis=None):
+ """
+ Find the indices of the first and last unmasked values along an axis.
+
+ If all values are masked, return None. Otherwise, return a list
+ of two tuples, corresponding to the indices of the first and last
+ unmasked values respectively.
+
+ Parameters
+ ----------
+ a : array_like
+ The input array.
+ axis : int, optional
+ Axis along which to perform the operation.
+ If None (default), applies to a flattened version of the array.
+
+ Returns
+ -------
+ edges : ndarray or list
+ An array of start and end indexes if there are any masked data in
+ the array. If there are no masked data in the array, `edges` is a
+ list of the first and last index.
+
+ See Also
+ --------
+ flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous
+ clump_masked, clump_unmasked
+
+ Examples
+ --------
+ >>> a = np.arange(9).reshape((3, 3))
+ >>> m = np.zeros_like(a)
+ >>> m[1:, 1:] = 1
+
+ >>> am = np.ma.array(a, mask=m)
+ >>> np.array(am[~am.mask])
+ array([0, 1, 2, 3, 6])
+
+ >>> np.ma.notmasked_edges(am)
+ array([0, 6])
+
+ """
+ a = asarray(a)
+ if axis is None or a.ndim == 1:
+ return flatnotmasked_edges(a)
+ m = getmaskarray(a)
+ idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim))
+ return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]),
+ tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]
+
+
+def flatnotmasked_contiguous(a):
+ """
+ Find contiguous unmasked data in a masked array.
+
+ Parameters
+ ----------
+ a : array_like
+ The input array.
+
+ Returns
+ -------
+ slice_list : list
+ A sorted sequence of `slice` objects (start index, end index).
+
+ .. versionchanged:: 1.15.0
+ Now returns an empty list instead of None for a fully masked array
+
+ See Also
+ --------
+ flatnotmasked_edges, notmasked_contiguous, notmasked_edges
+ clump_masked, clump_unmasked
+
+ Notes
+ -----
+ Only accepts 2-D arrays at most.
+
+ Examples
+ --------
+ >>> a = np.ma.arange(10)
+ >>> np.ma.flatnotmasked_contiguous(a)
+ [slice(0, 10, None)]
+
+ >>> mask = (a < 3) | (a > 8) | (a == 5)
+ >>> a[mask] = np.ma.masked
+ >>> np.array(a[~a.mask])
+ array([3, 4, 6, 7, 8])
+
+ >>> np.ma.flatnotmasked_contiguous(a)
+ [slice(3, 5, None), slice(6, 9, None)]
+ >>> a[:] = np.ma.masked
+ >>> np.ma.flatnotmasked_contiguous(a)
+ []
+
+ """
+ m = getmask(a)
+ if m is nomask:
+ return [slice(0, a.size)]
+ i = 0
+ result = []
+ for (k, g) in itertools.groupby(m.ravel()):
+ n = len(list(g))
+ if not k:
+ result.append(slice(i, i + n))
+ i += n
+ return result
+
+
+def notmasked_contiguous(a, axis=None):
+ """
+ Find contiguous unmasked data in a masked array along the given axis.
+
+ Parameters
+ ----------
+ a : array_like
+ The input array.
+ axis : int, optional
+ Axis along which to perform the operation.
+ If None (default), applies to a flattened version of the array, and this
+ is the same as `flatnotmasked_contiguous`.
+
+ Returns
+ -------
+ endpoints : list
+ A list of slices (start and end indexes) of unmasked indexes
+ in the array.
+
+ If the input is 2d and axis is specified, the result is a list of lists.
+
+ See Also
+ --------
+ flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
+ clump_masked, clump_unmasked
+
+ Notes
+ -----
+ Only accepts 2-D arrays at most.
+
+ Examples
+ --------
+ >>> a = np.arange(12).reshape((3, 4))
+ >>> mask = np.zeros_like(a)
+ >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0
+ >>> ma = np.ma.array(a, mask=mask)
+ >>> ma
+ masked_array(
+ data=[[0, --, 2, 3],
+ [--, --, --, 7],
+ [8, --, --, 11]],
+ mask=[[False, True, False, False],
+ [ True, True, True, False],
+ [False, True, True, False]],
+ fill_value=999999)
+ >>> np.array(ma[~ma.mask])
+ array([ 0, 2, 3, 7, 8, 11])
+
+ >>> np.ma.notmasked_contiguous(ma)
+ [slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)]
+
+ >>> np.ma.notmasked_contiguous(ma, axis=0)
+ [[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]]
+
+ >>> np.ma.notmasked_contiguous(ma, axis=1)
+ [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]]
+
+ """
+ a = asarray(a)
+ nd = a.ndim
+ if nd > 2:
+ raise NotImplementedError("Currently limited to at most 2D array.")
+ if axis is None or nd == 1:
+ return flatnotmasked_contiguous(a)
+ #
+ result = []
+ #
+ other = (axis + 1) % 2
+ idx = [0, 0]
+ idx[axis] = slice(None, None)
+ #
+ for i in range(a.shape[other]):
+ idx[other] = i
+ result.append(flatnotmasked_contiguous(a[tuple(idx)]))
+ return result
+
+
+def _ezclump(mask):
+ """
+ Finds the clumps (groups of data with the same values) for a 1D bool array.
+
+ Returns a series of slices.
+ """
+ if mask.ndim > 1:
+ mask = mask.ravel()
+ idx = (mask[1:] ^ mask[:-1]).nonzero()
+ idx = idx[0] + 1
+
+ if mask[0]:
+ if len(idx) == 0:
+ return [slice(0, mask.size)]
+
+ r = [slice(0, idx[0])]
+ r.extend((slice(left, right)
+ for left, right in zip(idx[1:-1:2], idx[2::2])))
+ else:
+ if len(idx) == 0:
+ return []
+
+ r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])]
+
+ if mask[-1]:
+ r.append(slice(idx[-1], mask.size))
+ return r
+
+
+def clump_unmasked(a):
+ """
+ Return list of slices corresponding to the unmasked clumps of a 1-D array.
+ (A "clump" is defined as a contiguous region of the array).
+
+ Parameters
+ ----------
+ a : ndarray
+ A one-dimensional masked array.
+
+ Returns
+ -------
+ slices : list of slice
+ The list of slices, one for each continuous region of unmasked
+ elements in `a`.
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ See Also
+ --------
+ flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
+ notmasked_contiguous, clump_masked
+
+ Examples
+ --------
+ >>> a = np.ma.masked_array(np.arange(10))
+ >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
+ >>> np.ma.clump_unmasked(a)
+ [slice(3, 6, None), slice(7, 8, None)]
+
+ """
+ mask = getattr(a, '_mask', nomask)
+ if mask is nomask:
+ return [slice(0, a.size)]
+ return _ezclump(~mask)
+
+
+def clump_masked(a):
+ """
+ Returns a list of slices corresponding to the masked clumps of a 1-D array.
+ (A "clump" is defined as a contiguous region of the array).
+
+ Parameters
+ ----------
+ a : ndarray
+ A one-dimensional masked array.
+
+ Returns
+ -------
+ slices : list of slice
+ The list of slices, one for each continuous region of masked elements
+ in `a`.
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ See Also
+ --------
+ flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
+ notmasked_contiguous, clump_unmasked
+
+ Examples
+ --------
+ >>> a = np.ma.masked_array(np.arange(10))
+ >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
+ >>> np.ma.clump_masked(a)
+ [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)]
+
+ """
+ mask = ma.getmask(a)
+ if mask is nomask:
+ return []
+ return _ezclump(mask)
+
+
+###############################################################################
+# Polynomial fit #
+###############################################################################
+
+
+def vander(x, n=None):
+ """
+ Masked values in the input array result in rows of zeros.
+
+ """
+ _vander = np.vander(x, n)
+ m = getmask(x)
+ if m is not nomask:
+ _vander[m] = 0
+ return _vander
+
+vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__)
+
+
+def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
+ """
+ Any masked values in x is propagated in y, and vice-versa.
+
+ """
+ x = asarray(x)
+ y = asarray(y)
+
+ m = getmask(x)
+ if y.ndim == 1:
+ m = mask_or(m, getmask(y))
+ elif y.ndim == 2:
+ my = getmask(mask_rows(y))
+ if my is not nomask:
+ m = mask_or(m, my[:, 0])
+ else:
+ raise TypeError("Expected a 1D or 2D array for y!")
+
+ if w is not None:
+ w = asarray(w)
+ if w.ndim != 1:
+ raise TypeError("expected a 1-d array for weights")
+ if w.shape[0] != y.shape[0]:
+ raise TypeError("expected w and y to have the same length")
+ m = mask_or(m, getmask(w))
+
+ if m is not nomask:
+ not_m = ~m
+ if w is not None:
+ w = w[not_m]
+ return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov)
+ else:
+ return np.polyfit(x, y, deg, rcond, full, w, cov)
+
+polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/extras.pyi b/venv/lib/python3.9/site-packages/numpy/ma/extras.pyi
new file mode 100644
index 00000000..56228b92
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/extras.pyi
@@ -0,0 +1,85 @@
+from typing import Any
+from numpy.lib.index_tricks import AxisConcatenator
+
+from numpy.ma.core import (
+ dot as dot,
+ mask_rowcols as mask_rowcols,
+)
+
+__all__: list[str]
+
+def count_masked(arr, axis=...): ...
+def masked_all(shape, dtype = ...): ...
+def masked_all_like(arr): ...
+
+class _fromnxfunction:
+ __name__: Any
+ __doc__: Any
+ def __init__(self, funcname): ...
+ def getdoc(self): ...
+ def __call__(self, *args, **params): ...
+
+class _fromnxfunction_single(_fromnxfunction):
+ def __call__(self, x, *args, **params): ...
+
+class _fromnxfunction_seq(_fromnxfunction):
+ def __call__(self, x, *args, **params): ...
+
+class _fromnxfunction_allargs(_fromnxfunction):
+ def __call__(self, *args, **params): ...
+
+atleast_1d: _fromnxfunction_allargs
+atleast_2d: _fromnxfunction_allargs
+atleast_3d: _fromnxfunction_allargs
+
+vstack: _fromnxfunction_seq
+row_stack: _fromnxfunction_seq
+hstack: _fromnxfunction_seq
+column_stack: _fromnxfunction_seq
+dstack: _fromnxfunction_seq
+stack: _fromnxfunction_seq
+
+hsplit: _fromnxfunction_single
+diagflat: _fromnxfunction_single
+
+def apply_along_axis(func1d, axis, arr, *args, **kwargs): ...
+def apply_over_axes(func, a, axes): ...
+def average(a, axis=..., weights=..., returned=..., keepdims=...): ...
+def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ...
+def compress_nd(x, axis=...): ...
+def compress_rowcols(x, axis=...): ...
+def compress_rows(a): ...
+def compress_cols(a): ...
+def mask_rows(a, axis = ...): ...
+def mask_cols(a, axis = ...): ...
+def ediff1d(arr, to_end=..., to_begin=...): ...
+def unique(ar1, return_index=..., return_inverse=...): ...
+def intersect1d(ar1, ar2, assume_unique=...): ...
+def setxor1d(ar1, ar2, assume_unique=...): ...
+def in1d(ar1, ar2, assume_unique=..., invert=...): ...
+def isin(element, test_elements, assume_unique=..., invert=...): ...
+def union1d(ar1, ar2): ...
+def setdiff1d(ar1, ar2, assume_unique=...): ...
+def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ...
+def corrcoef(x, y=..., rowvar=..., bias = ..., allow_masked=..., ddof = ...): ...
+
+class MAxisConcatenator(AxisConcatenator):
+ concatenate: Any
+ @classmethod
+ def makemat(cls, arr): ...
+ def __getitem__(self, key): ...
+
+class mr_class(MAxisConcatenator):
+ def __init__(self): ...
+
+mr_: mr_class
+
+def ndenumerate(a, compressed=...): ...
+def flatnotmasked_edges(a): ...
+def notmasked_edges(a, axis=...): ...
+def flatnotmasked_contiguous(a): ...
+def notmasked_contiguous(a, axis=...): ...
+def clump_unmasked(a): ...
+def clump_masked(a): ...
+def vander(x, n=...): ...
+def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ...
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/mrecords.py b/venv/lib/python3.9/site-packages/numpy/ma/mrecords.py
new file mode 100644
index 00000000..1e8103bc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/mrecords.py
@@ -0,0 +1,783 @@
+""":mod:`numpy.ma..mrecords`
+
+Defines the equivalent of :class:`numpy.recarrays` for masked arrays,
+where fields can be accessed as attributes.
+Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes
+and the masking of individual fields.
+
+.. moduleauthor:: Pierre Gerard-Marchant
+
+"""
+# We should make sure that no field is called '_mask','mask','_fieldmask',
+# or whatever restricted keywords. An idea would be to no bother in the
+# first place, and then rename the invalid fields with a trailing
+# underscore. Maybe we could just overload the parser function ?
+
+from numpy.ma import (
+ MAError, MaskedArray, masked, nomask, masked_array, getdata,
+ getmaskarray, filled
+)
+import numpy.ma as ma
+import warnings
+
+import numpy as np
+from numpy import (
+ bool_, dtype, ndarray, recarray, array as narray
+)
+from numpy.core.records import (
+ fromarrays as recfromarrays, fromrecords as recfromrecords
+)
+
+_byteorderconv = np.core.records._byteorderconv
+
+
+_check_fill_value = ma.core._check_fill_value
+
+
+__all__ = [
+ 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords',
+ 'fromtextfile', 'addfield',
+]
+
+reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype']
+
+
+def _checknames(descr, names=None):
+ """
+ Checks that field names ``descr`` are not reserved keywords.
+
+ If this is the case, a default 'f%i' is substituted. If the argument
+ `names` is not None, updates the field names to valid names.
+
+ """
+ ndescr = len(descr)
+ default_names = ['f%i' % i for i in range(ndescr)]
+ if names is None:
+ new_names = default_names
+ else:
+ if isinstance(names, (tuple, list)):
+ new_names = names
+ elif isinstance(names, str):
+ new_names = names.split(',')
+ else:
+ raise NameError(f'illegal input names {names!r}')
+ nnames = len(new_names)
+ if nnames < ndescr:
+ new_names += default_names[nnames:]
+ ndescr = []
+ for (n, d, t) in zip(new_names, default_names, descr.descr):
+ if n in reserved_fields:
+ if t[0] in reserved_fields:
+ ndescr.append((d, t[1]))
+ else:
+ ndescr.append(t)
+ else:
+ ndescr.append((n, t[1]))
+ return np.dtype(ndescr)
+
+
+def _get_fieldmask(self):
+ mdescr = [(n, '|b1') for n in self.dtype.names]
+ fdmask = np.empty(self.shape, dtype=mdescr)
+ fdmask.flat = tuple([False] * len(mdescr))
+ return fdmask
+
+
+class MaskedRecords(MaskedArray):
+ """
+
+ Attributes
+ ----------
+ _data : recarray
+ Underlying data, as a record array.
+ _mask : boolean array
+ Mask of the records. A record is masked when all its fields are
+ masked.
+ _fieldmask : boolean recarray
+ Record array of booleans, setting the mask of each individual field
+ of each record.
+ _fill_value : record
+ Filling values for each field.
+
+ """
+
+ def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None,
+ formats=None, names=None, titles=None,
+ byteorder=None, aligned=False,
+ mask=nomask, hard_mask=False, fill_value=None, keep_mask=True,
+ copy=False,
+ **options):
+
+ self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset,
+ strides=strides, formats=formats, names=names,
+ titles=titles, byteorder=byteorder,
+ aligned=aligned,)
+
+ mdtype = ma.make_mask_descr(self.dtype)
+ if mask is nomask or not np.size(mask):
+ if not keep_mask:
+ self._mask = tuple([False] * len(mdtype))
+ else:
+ mask = np.array(mask, copy=copy)
+ if mask.shape != self.shape:
+ (nd, nm) = (self.size, mask.size)
+ if nm == 1:
+ mask = np.resize(mask, self.shape)
+ elif nm == nd:
+ mask = np.reshape(mask, self.shape)
+ else:
+ msg = "Mask and data not compatible: data size is %i, " + \
+ "mask size is %i."
+ raise MAError(msg % (nd, nm))
+ if not keep_mask:
+ self.__setmask__(mask)
+ self._sharedmask = True
+ else:
+ if mask.dtype == mdtype:
+ _mask = mask
+ else:
+ _mask = np.array([tuple([m] * len(mdtype)) for m in mask],
+ dtype=mdtype)
+ self._mask = _mask
+ return self
+
+ def __array_finalize__(self, obj):
+ # Make sure we have a _fieldmask by default
+ _mask = getattr(obj, '_mask', None)
+ if _mask is None:
+ objmask = getattr(obj, '_mask', nomask)
+ _dtype = ndarray.__getattribute__(self, 'dtype')
+ if objmask is nomask:
+ _mask = ma.make_mask_none(self.shape, dtype=_dtype)
+ else:
+ mdescr = ma.make_mask_descr(_dtype)
+ _mask = narray([tuple([m] * len(mdescr)) for m in objmask],
+ dtype=mdescr).view(recarray)
+ # Update some of the attributes
+ _dict = self.__dict__
+ _dict.update(_mask=_mask)
+ self._update_from(obj)
+ if _dict['_baseclass'] == ndarray:
+ _dict['_baseclass'] = recarray
+ return
+
+ @property
+ def _data(self):
+ """
+ Returns the data as a recarray.
+
+ """
+ return ndarray.view(self, recarray)
+
+ @property
+ def _fieldmask(self):
+ """
+ Alias to mask.
+
+ """
+ return self._mask
+
+ def __len__(self):
+ """
+ Returns the length
+
+ """
+ # We have more than one record
+ if self.ndim:
+ return len(self._data)
+ # We have only one record: return the nb of fields
+ return len(self.dtype)
+
+ def __getattribute__(self, attr):
+ try:
+ return object.__getattribute__(self, attr)
+ except AttributeError:
+ # attr must be a fieldname
+ pass
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields
+ try:
+ res = fielddict[attr][:2]
+ except (TypeError, KeyError) as e:
+ raise AttributeError(
+ f'record array has no attribute {attr}') from e
+ # So far, so good
+ _localdict = ndarray.__getattribute__(self, '__dict__')
+ _data = ndarray.view(self, _localdict['_baseclass'])
+ obj = _data.getfield(*res)
+ if obj.dtype.names is not None:
+ raise NotImplementedError("MaskedRecords is currently limited to"
+ "simple records.")
+ # Get some special attributes
+ # Reset the object's mask
+ hasmasked = False
+ _mask = _localdict.get('_mask', None)
+ if _mask is not None:
+ try:
+ _mask = _mask[attr]
+ except IndexError:
+ # Couldn't find a mask: use the default (nomask)
+ pass
+ tp_len = len(_mask.dtype)
+ hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any()
+ if (obj.shape or hasmasked):
+ obj = obj.view(MaskedArray)
+ obj._baseclass = ndarray
+ obj._isfield = True
+ obj._mask = _mask
+ # Reset the field values
+ _fill_value = _localdict.get('_fill_value', None)
+ if _fill_value is not None:
+ try:
+ obj._fill_value = _fill_value[attr]
+ except ValueError:
+ obj._fill_value = None
+ else:
+ obj = obj.item()
+ return obj
+
+ def __setattr__(self, attr, val):
+ """
+ Sets the attribute attr to the value val.
+
+ """
+ # Should we call __setmask__ first ?
+ if attr in ['mask', 'fieldmask']:
+ self.__setmask__(val)
+ return
+ # Create a shortcut (so that we don't have to call getattr all the time)
+ _localdict = object.__getattribute__(self, '__dict__')
+ # Check whether we're creating a new field
+ newattr = attr not in _localdict
+ try:
+ # Is attr a generic attribute ?
+ ret = object.__setattr__(self, attr, val)
+ except Exception:
+ # Not a generic attribute: exit if it's not a valid field
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
+ optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
+ if not (attr in fielddict or attr in optinfo):
+ raise
+ else:
+ # Get the list of names
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
+ # Check the attribute
+ if attr not in fielddict:
+ return ret
+ if newattr:
+ # We just added this one or this setattr worked on an
+ # internal attribute.
+ try:
+ object.__delattr__(self, attr)
+ except Exception:
+ return ret
+ # Let's try to set the field
+ try:
+ res = fielddict[attr][:2]
+ except (TypeError, KeyError) as e:
+ raise AttributeError(
+ f'record array has no attribute {attr}') from e
+
+ if val is masked:
+ _fill_value = _localdict['_fill_value']
+ if _fill_value is not None:
+ dval = _localdict['_fill_value'][attr]
+ else:
+ dval = val
+ mval = True
+ else:
+ dval = filled(val)
+ mval = getmaskarray(val)
+ obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res)
+ _localdict['_mask'].__setitem__(attr, mval)
+ return obj
+
+ def __getitem__(self, indx):
+ """
+ Returns all the fields sharing the same fieldname base.
+
+ The fieldname base is either `_data` or `_mask`.
+
+ """
+ _localdict = self.__dict__
+ _mask = ndarray.__getattribute__(self, '_mask')
+ _data = ndarray.view(self, _localdict['_baseclass'])
+ # We want a field
+ if isinstance(indx, str):
+ # Make sure _sharedmask is True to propagate back to _fieldmask
+ # Don't use _set_mask, there are some copies being made that
+ # break propagation Don't force the mask to nomask, that wreaks
+ # easy masking
+ obj = _data[indx].view(MaskedArray)
+ obj._mask = _mask[indx]
+ obj._sharedmask = True
+ fval = _localdict['_fill_value']
+ if fval is not None:
+ obj._fill_value = fval[indx]
+ # Force to masked if the mask is True
+ if not obj.ndim and obj._mask:
+ return masked
+ return obj
+ # We want some elements.
+ # First, the data.
+ obj = np.array(_data[indx], copy=False).view(mrecarray)
+ obj._mask = np.array(_mask[indx], copy=False).view(recarray)
+ return obj
+
+ def __setitem__(self, indx, value):
+ """
+ Sets the given record to value.
+
+ """
+ MaskedArray.__setitem__(self, indx, value)
+ if isinstance(indx, str):
+ self._mask[indx] = ma.getmaskarray(value)
+
+ def __str__(self):
+ """
+ Calculates the string representation.
+
+ """
+ if self.size > 1:
+ mstr = [f"({','.join([str(i) for i in s])})"
+ for s in zip(*[getattr(self, f) for f in self.dtype.names])]
+ return f"[{', '.join(mstr)}]"
+ else:
+ mstr = [f"{','.join([str(i) for i in s])}"
+ for s in zip([getattr(self, f) for f in self.dtype.names])]
+ return f"({', '.join(mstr)})"
+
+ def __repr__(self):
+ """
+ Calculates the repr representation.
+
+ """
+ _names = self.dtype.names
+ fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,)
+ reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names]
+ reprstr.insert(0, 'masked_records(')
+ reprstr.extend([fmt % (' fill_value', self.fill_value),
+ ' )'])
+ return str("\n".join(reprstr))
+
+ def view(self, dtype=None, type=None):
+ """
+ Returns a view of the mrecarray.
+
+ """
+ # OK, basic copy-paste from MaskedArray.view.
+ if dtype is None:
+ if type is None:
+ output = ndarray.view(self)
+ else:
+ output = ndarray.view(self, type)
+ # Here again.
+ elif type is None:
+ try:
+ if issubclass(dtype, ndarray):
+ output = ndarray.view(self, dtype)
+ else:
+ output = ndarray.view(self, dtype)
+ # OK, there's the change
+ except TypeError:
+ dtype = np.dtype(dtype)
+ # we need to revert to MaskedArray, but keeping the possibility
+ # of subclasses (eg, TimeSeriesRecords), so we'll force a type
+ # set to the first parent
+ if dtype.fields is None:
+ basetype = self.__class__.__bases__[0]
+ output = self.__array__().view(dtype, basetype)
+ output._update_from(self)
+ else:
+ output = ndarray.view(self, dtype)
+ output._fill_value = None
+ else:
+ output = ndarray.view(self, dtype, type)
+ # Update the mask, just like in MaskedArray.view
+ if (getattr(output, '_mask', nomask) is not nomask):
+ mdtype = ma.make_mask_descr(output.dtype)
+ output._mask = self._mask.view(mdtype, ndarray)
+ output._mask.shape = output.shape
+ return output
+
+ def harden_mask(self):
+ """
+ Forces the mask to hard.
+
+ """
+ self._hardmask = True
+
+ def soften_mask(self):
+ """
+ Forces the mask to soft
+
+ """
+ self._hardmask = False
+
+ def copy(self):
+ """
+ Returns a copy of the masked record.
+
+ """
+ copied = self._data.copy().view(type(self))
+ copied._mask = self._mask.copy()
+ return copied
+
+ def tolist(self, fill_value=None):
+ """
+ Return the data portion of the array as a list.
+
+ Data items are converted to the nearest compatible Python type.
+ Masked values are converted to fill_value. If fill_value is None,
+ the corresponding entries in the output list will be ``None``.
+
+ """
+ if fill_value is not None:
+ return self.filled(fill_value).tolist()
+ result = narray(self.filled().tolist(), dtype=object)
+ mask = narray(self._mask.tolist())
+ result[mask] = None
+ return result.tolist()
+
+ def __getstate__(self):
+ """Return the internal state of the masked array.
+
+ This is for pickling.
+
+ """
+ state = (1,
+ self.shape,
+ self.dtype,
+ self.flags.fnc,
+ self._data.tobytes(),
+ self._mask.tobytes(),
+ self._fill_value,
+ )
+ return state
+
+ def __setstate__(self, state):
+ """
+ Restore the internal state of the masked array.
+
+ This is for pickling. ``state`` is typically the output of the
+ ``__getstate__`` output, and is a 5-tuple:
+
+ - class name
+ - a tuple giving the shape of the data
+ - a typecode for the data
+ - a binary string for the data
+ - a binary string for the mask.
+
+ """
+ (ver, shp, typ, isf, raw, msk, flv) = state
+ ndarray.__setstate__(self, (shp, typ, isf, raw))
+ mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr])
+ self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk))
+ self.fill_value = flv
+
+ def __reduce__(self):
+ """
+ Return a 3-tuple for pickling a MaskedArray.
+
+ """
+ return (_mrreconstruct,
+ (self.__class__, self._baseclass, (0,), 'b',),
+ self.__getstate__())
+
+
+def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
+ """
+ Build a new MaskedArray from the information stored in a pickle.
+
+ """
+ _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
+ _mask = ndarray.__new__(ndarray, baseshape, 'b1')
+ return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
+
+mrecarray = MaskedRecords
+
+
+###############################################################################
+# Constructors #
+###############################################################################
+
+
+def fromarrays(arraylist, dtype=None, shape=None, formats=None,
+ names=None, titles=None, aligned=False, byteorder=None,
+ fill_value=None):
+ """
+ Creates a mrecarray from a (flat) list of masked arrays.
+
+ Parameters
+ ----------
+ arraylist : sequence
+ A list of (masked) arrays. Each element of the sequence is first converted
+ to a masked array if needed. If a 2D array is passed as argument, it is
+ processed line by line
+ dtype : {None, dtype}, optional
+ Data type descriptor.
+ shape : {None, integer}, optional
+ Number of records. If None, shape is defined from the shape of the
+ first array in the list.
+ formats : {None, sequence}, optional
+ Sequence of formats for each individual field. If None, the formats will
+ be autodetected by inspecting the fields and selecting the highest dtype
+ possible.
+ names : {None, sequence}, optional
+ Sequence of the names of each field.
+ fill_value : {None, sequence}, optional
+ Sequence of data to be used as filling values.
+
+ Notes
+ -----
+ Lists of tuples should be preferred over lists of lists for faster processing.
+
+ """
+ datalist = [getdata(x) for x in arraylist]
+ masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist]
+ _array = recfromarrays(datalist,
+ dtype=dtype, shape=shape, formats=formats,
+ names=names, titles=titles, aligned=aligned,
+ byteorder=byteorder).view(mrecarray)
+ _array._mask.flat = list(zip(*masklist))
+ if fill_value is not None:
+ _array.fill_value = fill_value
+ return _array
+
+
+def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,
+ titles=None, aligned=False, byteorder=None,
+ fill_value=None, mask=nomask):
+ """
+ Creates a MaskedRecords from a list of records.
+
+ Parameters
+ ----------
+ reclist : sequence
+ A list of records. Each element of the sequence is first converted
+ to a masked array if needed. If a 2D array is passed as argument, it is
+ processed line by line
+ dtype : {None, dtype}, optional
+ Data type descriptor.
+ shape : {None,int}, optional
+ Number of records. If None, ``shape`` is defined from the shape of the
+ first array in the list.
+ formats : {None, sequence}, optional
+ Sequence of formats for each individual field. If None, the formats will
+ be autodetected by inspecting the fields and selecting the highest dtype
+ possible.
+ names : {None, sequence}, optional
+ Sequence of the names of each field.
+ fill_value : {None, sequence}, optional
+ Sequence of data to be used as filling values.
+ mask : {nomask, sequence}, optional.
+ External mask to apply on the data.
+
+ Notes
+ -----
+ Lists of tuples should be preferred over lists of lists for faster processing.
+
+ """
+ # Grab the initial _fieldmask, if needed:
+ _mask = getattr(reclist, '_mask', None)
+ # Get the list of records.
+ if isinstance(reclist, ndarray):
+ # Make sure we don't have some hidden mask
+ if isinstance(reclist, MaskedArray):
+ reclist = reclist.filled().view(ndarray)
+ # Grab the initial dtype, just in case
+ if dtype is None:
+ dtype = reclist.dtype
+ reclist = reclist.tolist()
+ mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
+ names=names, titles=titles,
+ aligned=aligned, byteorder=byteorder).view(mrecarray)
+ # Set the fill_value if needed
+ if fill_value is not None:
+ mrec.fill_value = fill_value
+ # Now, let's deal w/ the mask
+ if mask is not nomask:
+ mask = np.array(mask, copy=False)
+ maskrecordlength = len(mask.dtype)
+ if maskrecordlength:
+ mrec._mask.flat = mask
+ elif mask.ndim == 2:
+ mrec._mask.flat = [tuple(m) for m in mask]
+ else:
+ mrec.__setmask__(mask)
+ if _mask is not None:
+ mrec._mask[:] = _mask
+ return mrec
+
+
+def _guessvartypes(arr):
+ """
+ Tries to guess the dtypes of the str_ ndarray `arr`.
+
+ Guesses by testing element-wise conversion. Returns a list of dtypes.
+ The array is first converted to ndarray. If the array is 2D, the test
+ is performed on the first line. An exception is raised if the file is
+ 3D or more.
+
+ """
+ vartypes = []
+ arr = np.asarray(arr)
+ if arr.ndim == 2:
+ arr = arr[0]
+ elif arr.ndim > 2:
+ raise ValueError("The array should be 2D at most!")
+ # Start the conversion loop.
+ for f in arr:
+ try:
+ int(f)
+ except (ValueError, TypeError):
+ try:
+ float(f)
+ except (ValueError, TypeError):
+ try:
+ complex(f)
+ except (ValueError, TypeError):
+ vartypes.append(arr.dtype)
+ else:
+ vartypes.append(np.dtype(complex))
+ else:
+ vartypes.append(np.dtype(float))
+ else:
+ vartypes.append(np.dtype(int))
+ return vartypes
+
+
+def openfile(fname):
+ """
+ Opens the file handle of file `fname`.
+
+ """
+ # A file handle
+ if hasattr(fname, 'readline'):
+ return fname
+ # Try to open the file and guess its type
+ try:
+ f = open(fname)
+ except FileNotFoundError as e:
+ raise FileNotFoundError(f"No such file: '{fname}'") from e
+ if f.readline()[:2] != "\\x":
+ f.seek(0, 0)
+ return f
+ f.close()
+ raise NotImplementedError("Wow, binary file")
+
+
+def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='',
+ varnames=None, vartypes=None,
+ *, delimitor=np._NoValue): # backwards compatibility
+ """
+ Creates a mrecarray from data stored in the file `filename`.
+
+ Parameters
+ ----------
+ fname : {file name/handle}
+ Handle of an opened file.
+ delimiter : {None, string}, optional
+ Alphanumeric character used to separate columns in the file.
+ If None, any (group of) white spacestring(s) will be used.
+ commentchar : {'#', string}, optional
+ Alphanumeric character used to mark the start of a comment.
+ missingchar : {'', string}, optional
+ String indicating missing data, and used to create the masks.
+ varnames : {None, sequence}, optional
+ Sequence of the variable names. If None, a list will be created from
+ the first non empty line of the file.
+ vartypes : {None, sequence}, optional
+ Sequence of the variables dtypes. If None, it will be estimated from
+ the first non-commented line.
+
+
+ Ultra simple: the varnames are in the header, one line"""
+ if delimitor is not np._NoValue:
+ if delimiter is not None:
+ raise TypeError("fromtextfile() got multiple values for argument "
+ "'delimiter'")
+ # NumPy 1.22.0, 2021-09-23
+ warnings.warn("The 'delimitor' keyword argument of "
+ "numpy.ma.mrecords.fromtextfile() is deprecated "
+ "since NumPy 1.22.0, use 'delimiter' instead.",
+ DeprecationWarning, stacklevel=2)
+ delimiter = delimitor
+
+ # Try to open the file.
+ ftext = openfile(fname)
+
+ # Get the first non-empty line as the varnames
+ while True:
+ line = ftext.readline()
+ firstline = line[:line.find(commentchar)].strip()
+ _varnames = firstline.split(delimiter)
+ if len(_varnames) > 1:
+ break
+ if varnames is None:
+ varnames = _varnames
+
+ # Get the data.
+ _variables = masked_array([line.strip().split(delimiter) for line in ftext
+ if line[0] != commentchar and len(line) > 1])
+ (_, nfields) = _variables.shape
+ ftext.close()
+
+ # Try to guess the dtype.
+ if vartypes is None:
+ vartypes = _guessvartypes(_variables[0])
+ else:
+ vartypes = [np.dtype(v) for v in vartypes]
+ if len(vartypes) != nfields:
+ msg = "Attempting to %i dtypes for %i fields!"
+ msg += " Reverting to default."
+ warnings.warn(msg % (len(vartypes), nfields), stacklevel=2)
+ vartypes = _guessvartypes(_variables[0])
+
+ # Construct the descriptor.
+ mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]
+ mfillv = [ma.default_fill_value(f) for f in vartypes]
+
+ # Get the data and the mask.
+ # We just need a list of masked_arrays. It's easier to create it like that:
+ _mask = (_variables.T == missingchar)
+ _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f)
+ for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)]
+
+ return fromarrays(_datalist, dtype=mdescr)
+
+
+def addfield(mrecord, newfield, newfieldname=None):
+ """Adds a new field to the masked record array
+
+ Uses `newfield` as data and `newfieldname` as name. If `newfieldname`
+ is None, the new field name is set to 'fi', where `i` is the number of
+ existing fields.
+
+ """
+ _data = mrecord._data
+ _mask = mrecord._mask
+ if newfieldname is None or newfieldname in reserved_fields:
+ newfieldname = 'f%i' % len(_data.dtype)
+ newfield = ma.array(newfield)
+ # Get the new data.
+ # Create a new empty recarray
+ newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
+ newdata = recarray(_data.shape, newdtype)
+ # Add the existing field
+ [newdata.setfield(_data.getfield(*f), *f)
+ for f in _data.dtype.fields.values()]
+ # Add the new field
+ newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
+ newdata = newdata.view(MaskedRecords)
+ # Get the new mask
+ # Create a new empty recarray
+ newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
+ newmask = recarray(_data.shape, newmdtype)
+ # Add the old masks
+ [newmask.setfield(_mask.getfield(*f), *f)
+ for f in _mask.dtype.fields.values()]
+ # Add the mask of the new field
+ newmask.setfield(getmaskarray(newfield),
+ *newmask.dtype.fields[newfieldname])
+ newdata._mask = newmask
+ return newdata
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/mrecords.pyi b/venv/lib/python3.9/site-packages/numpy/ma/mrecords.pyi
new file mode 100644
index 00000000..264807e0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/mrecords.pyi
@@ -0,0 +1,90 @@
+from typing import Any, TypeVar
+
+from numpy import dtype
+from numpy.ma import MaskedArray
+
+__all__: list[str]
+
+# TODO: Set the `bound` to something more suitable once we
+# have proper shape support
+_ShapeType = TypeVar("_ShapeType", bound=Any)
+_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True)
+
+class MaskedRecords(MaskedArray[_ShapeType, _DType_co]):
+ def __new__(
+ cls,
+ shape,
+ dtype=...,
+ buf=...,
+ offset=...,
+ strides=...,
+ formats=...,
+ names=...,
+ titles=...,
+ byteorder=...,
+ aligned=...,
+ mask=...,
+ hard_mask=...,
+ fill_value=...,
+ keep_mask=...,
+ copy=...,
+ **options,
+ ): ...
+ _mask: Any
+ _fill_value: Any
+ @property
+ def _data(self): ...
+ @property
+ def _fieldmask(self): ...
+ def __array_finalize__(self, obj): ...
+ def __len__(self): ...
+ def __getattribute__(self, attr): ...
+ def __setattr__(self, attr, val): ...
+ def __getitem__(self, indx): ...
+ def __setitem__(self, indx, value): ...
+ def view(self, dtype=..., type=...): ...
+ def harden_mask(self): ...
+ def soften_mask(self): ...
+ def copy(self): ...
+ def tolist(self, fill_value=...): ...
+ def __reduce__(self): ...
+
+mrecarray = MaskedRecords
+
+def fromarrays(
+ arraylist,
+ dtype=...,
+ shape=...,
+ formats=...,
+ names=...,
+ titles=...,
+ aligned=...,
+ byteorder=...,
+ fill_value=...,
+): ...
+
+def fromrecords(
+ reclist,
+ dtype=...,
+ shape=...,
+ formats=...,
+ names=...,
+ titles=...,
+ aligned=...,
+ byteorder=...,
+ fill_value=...,
+ mask=...,
+): ...
+
+def fromtextfile(
+ fname,
+ delimiter=...,
+ commentchar=...,
+ missingchar=...,
+ varnames=...,
+ vartypes=...,
+ # NOTE: deprecated: NumPy 1.22.0, 2021-09-23
+ # delimitor=...,
+): ...
+
+def addfield(mrecord, newfield, newfieldname=...): ...
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/setup.py b/venv/lib/python3.9/site-packages/numpy/ma/setup.py
new file mode 100644
index 00000000..018d38cd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/setup.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python3
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('ma', parent_package, top_path)
+ config.add_subpackage('tests')
+ config.add_data_files('*.pyi')
+ return config
+
+if __name__ == "__main__":
+ from numpy.distutils.core import setup
+ config = configuration(top_path='').todict()
+ setup(**config)
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/ma/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/tests/test_core.py b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_core.py
new file mode 100644
index 00000000..6c03e0ba
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_core.py
@@ -0,0 +1,5564 @@
+# pylint: disable-msg=W0400,W0511,W0611,W0612,W0614,R0201,E1102
+"""Tests suite for MaskedArray & subclassing.
+
+:author: Pierre Gerard-Marchant
+:contact: pierregm_at_uga_dot_edu
+"""
+__author__ = "Pierre GF Gerard-Marchant"
+
+import sys
+import warnings
+import operator
+import itertools
+import textwrap
+import pytest
+
+from functools import reduce
+
+
+import numpy as np
+import numpy.ma.core
+import numpy.core.fromnumeric as fromnumeric
+import numpy.core.umath as umath
+from numpy.testing import (
+ assert_raises, assert_warns, suppress_warnings, IS_WASM
+ )
+from numpy.testing._private.utils import requires_memory
+from numpy import ndarray
+from numpy.compat import asbytes
+from numpy.ma.testutils import (
+ assert_, assert_array_equal, assert_equal, assert_almost_equal,
+ assert_equal_records, fail_if_equal, assert_not_equal,
+ assert_mask_equal
+ )
+from numpy.ma.core import (
+ MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all,
+ allclose, allequal, alltrue, angle, anom, arange, arccos, arccosh, arctan2,
+ arcsin, arctan, argsort, array, asarray, choose, concatenate,
+ conjugate, cos, cosh, count, default_fill_value, diag, divide, doc_note,
+ empty, empty_like, equal, exp, flatten_mask, filled, fix_invalid,
+ flatten_structured_array, fromflex, getmask, getmaskarray, greater,
+ greater_equal, identity, inner, isMaskedArray, less, less_equal, log,
+ log10, make_mask, make_mask_descr, mask_or, masked, masked_array,
+ masked_equal, masked_greater, masked_greater_equal, masked_inside,
+ masked_less, masked_less_equal, masked_not_equal, masked_outside,
+ masked_print_option, masked_values, masked_where, max, maximum,
+ maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply,
+ mvoid, nomask, not_equal, ones, ones_like, outer, power, product, put,
+ putmask, ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort,
+ sqrt, subtract, sum, take, tan, tanh, transpose, where, zeros, zeros_like,
+ )
+from numpy.compat import pickle
+
+pi = np.pi
+
+
+suppress_copy_mask_on_assignment = suppress_warnings()
+suppress_copy_mask_on_assignment.filter(
+ numpy.ma.core.MaskedArrayFutureWarning,
+ "setting an item on a masked array which has a shared mask will not copy")
+
+
+# For parametrized numeric testing
+num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD']
+num_ids = [dt_.char for dt_ in num_dts]
+
+
+class TestMaskedArray:
+ # Base test class for MaskedArrays.
+
+ def setup_method(self):
+ # Base data definition.
+ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
+ y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
+ a10 = 10.
+ m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
+ m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
+ xm = masked_array(x, mask=m1)
+ ym = masked_array(y, mask=m2)
+ z = np.array([-.5, 0., .5, .8])
+ zm = masked_array(z, mask=[0, 1, 0, 0])
+ xf = np.where(m1, 1e+20, x)
+ xm.set_fill_value(1e+20)
+ self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
+
+ def test_basicattributes(self):
+ # Tests some basic array attributes.
+ a = array([1, 3, 2])
+ b = array([1, 3, 2], mask=[1, 0, 1])
+ assert_equal(a.ndim, 1)
+ assert_equal(b.ndim, 1)
+ assert_equal(a.size, 3)
+ assert_equal(b.size, 3)
+ assert_equal(a.shape, (3,))
+ assert_equal(b.shape, (3,))
+
+ def test_basic0d(self):
+ # Checks masking a scalar
+ x = masked_array(0)
+ assert_equal(str(x), '0')
+ x = masked_array(0, mask=True)
+ assert_equal(str(x), str(masked_print_option))
+ x = masked_array(0, mask=False)
+ assert_equal(str(x), '0')
+ x = array(0, mask=1)
+ assert_(x.filled().dtype is x._data.dtype)
+
+ def test_basic1d(self):
+ # Test of basic array creation and properties in 1 dimension.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
+ assert_(not isMaskedArray(x))
+ assert_(isMaskedArray(xm))
+ assert_((xm - ym).filled(0).any())
+ fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
+ s = x.shape
+ assert_equal(np.shape(xm), s)
+ assert_equal(xm.shape, s)
+ assert_equal(xm.dtype, x.dtype)
+ assert_equal(zm.dtype, z.dtype)
+ assert_equal(xm.size, reduce(lambda x, y:x * y, s))
+ assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
+ assert_array_equal(xm, xf)
+ assert_array_equal(filled(xm, 1.e20), xf)
+ assert_array_equal(x, xm)
+
+ def test_basic2d(self):
+ # Test of basic array creation and properties in 2 dimensions.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
+ for s in [(4, 3), (6, 2)]:
+ x.shape = s
+ y.shape = s
+ xm.shape = s
+ ym.shape = s
+ xf.shape = s
+
+ assert_(not isMaskedArray(x))
+ assert_(isMaskedArray(xm))
+ assert_equal(shape(xm), s)
+ assert_equal(xm.shape, s)
+ assert_equal(xm.size, reduce(lambda x, y:x * y, s))
+ assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
+ assert_equal(xm, xf)
+ assert_equal(filled(xm, 1.e20), xf)
+ assert_equal(x, xm)
+
+ def test_concatenate_basic(self):
+ # Tests concatenations.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
+ # basic concatenation
+ assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))
+ assert_equal(np.concatenate((x, y)), concatenate((x, y)))
+ assert_equal(np.concatenate((x, y)), concatenate((xm, y)))
+ assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))
+
+ def test_concatenate_alongaxis(self):
+ # Tests concatenations.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
+ # Concatenation along an axis
+ s = (3, 4)
+ x.shape = y.shape = xm.shape = ym.shape = s
+ assert_equal(xm.mask, np.reshape(m1, s))
+ assert_equal(ym.mask, np.reshape(m2, s))
+ xmym = concatenate((xm, ym), 1)
+ assert_equal(np.concatenate((x, y), 1), xmym)
+ assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)
+
+ x = zeros(2)
+ y = array(ones(2), mask=[False, True])
+ z = concatenate((x, y))
+ assert_array_equal(z, [0, 0, 1, 1])
+ assert_array_equal(z.mask, [False, False, False, True])
+ z = concatenate((y, x))
+ assert_array_equal(z, [1, 1, 0, 0])
+ assert_array_equal(z.mask, [False, True, False, False])
+
+ def test_concatenate_flexible(self):
+ # Tests the concatenation on flexible arrays.
+ data = masked_array(list(zip(np.random.rand(10),
+ np.arange(10))),
+ dtype=[('a', float), ('b', int)])
+
+ test = concatenate([data[:5], data[5:]])
+ assert_equal_records(test, data)
+
+ def test_creation_ndmin(self):
+ # Check the use of ndmin
+ x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)
+ assert_equal(x.shape, (1, 3))
+ assert_equal(x._data, [[1, 2, 3]])
+ assert_equal(x._mask, [[1, 0, 0]])
+
+ def test_creation_ndmin_from_maskedarray(self):
+ # Make sure we're not losing the original mask w/ ndmin
+ x = array([1, 2, 3])
+ x[-1] = masked
+ xx = array(x, ndmin=2, dtype=float)
+ assert_equal(x.shape, x._mask.shape)
+ assert_equal(xx.shape, xx._mask.shape)
+
+ def test_creation_maskcreation(self):
+ # Tests how masks are initialized at the creation of Maskedarrays.
+ data = arange(24, dtype=float)
+ data[[3, 6, 15]] = masked
+ dma_1 = MaskedArray(data)
+ assert_equal(dma_1.mask, data.mask)
+ dma_2 = MaskedArray(dma_1)
+ assert_equal(dma_2.mask, dma_1.mask)
+ dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)
+ fail_if_equal(dma_3.mask, dma_1.mask)
+
+ x = array([1, 2, 3], mask=True)
+ assert_equal(x._mask, [True, True, True])
+ x = array([1, 2, 3], mask=False)
+ assert_equal(x._mask, [False, False, False])
+ y = array([1, 2, 3], mask=x._mask, copy=False)
+ assert_(np.may_share_memory(x.mask, y.mask))
+ y = array([1, 2, 3], mask=x._mask, copy=True)
+ assert_(not np.may_share_memory(x.mask, y.mask))
+
+ def test_masked_singleton_array_creation_warns(self):
+ # The first works, but should not (ideally), there may be no way
+ # to solve this, however, as long as `np.ma.masked` is an ndarray.
+ np.array(np.ma.masked)
+ with pytest.warns(UserWarning):
+ # Tries to create a float array, using `float(np.ma.masked)`.
+ # We may want to define this is invalid behaviour in the future!
+ # (requiring np.ma.masked to be a known NumPy scalar probably
+ # with a DType.)
+ np.array([3., np.ma.masked])
+
+ def test_creation_with_list_of_maskedarrays(self):
+ # Tests creating a masked array from a list of masked arrays.
+ x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
+ data = array((x, x[::-1]))
+ assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
+ assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
+
+ x.mask = nomask
+ data = array((x, x[::-1]))
+ assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
+ assert_(data.mask is nomask)
+
+ def test_creation_with_list_of_maskedarrays_no_bool_cast(self):
+ # Tests the regression in gh-18551
+ masked_str = np.ma.masked_array(['a', 'b'], mask=[True, False])
+ normal_int = np.arange(2)
+ res = np.ma.asarray([masked_str, normal_int], dtype="U21")
+ assert_array_equal(res.mask, [[True, False], [False, False]])
+
+ # The above only failed due a long chain of oddity, try also with
+ # an object array that cannot be converted to bool always:
+ class NotBool():
+ def __bool__(self):
+ raise ValueError("not a bool!")
+ masked_obj = np.ma.masked_array([NotBool(), 'b'], mask=[True, False])
+ # Check that the NotBool actually fails like we would expect:
+ with pytest.raises(ValueError, match="not a bool!"):
+ np.asarray([masked_obj], dtype=bool)
+
+ res = np.ma.asarray([masked_obj, normal_int])
+ assert_array_equal(res.mask, [[True, False], [False, False]])
+
+ def test_creation_from_ndarray_with_padding(self):
+ x = np.array([('A', 0)], dtype={'names':['f0','f1'],
+ 'formats':['S4','i8'],
+ 'offsets':[0,8]})
+ array(x) # used to fail due to 'V' padding field in x.dtype.descr
+
+ def test_unknown_keyword_parameter(self):
+ with pytest.raises(TypeError, match="unexpected keyword argument"):
+ MaskedArray([1, 2, 3], maks=[0, 1, 0]) # `mask` is misspelled.
+
+ def test_asarray(self):
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
+ xm.fill_value = -9999
+ xm._hardmask = True
+ xmm = asarray(xm)
+ assert_equal(xmm._data, xm._data)
+ assert_equal(xmm._mask, xm._mask)
+ assert_equal(xmm.fill_value, xm.fill_value)
+ assert_equal(xmm._hardmask, xm._hardmask)
+
+ def test_asarray_default_order(self):
+ # See Issue #6646
+ m = np.eye(3).T
+ assert_(not m.flags.c_contiguous)
+
+ new_m = asarray(m)
+ assert_(new_m.flags.c_contiguous)
+
+ def test_asarray_enforce_order(self):
+ # See Issue #6646
+ m = np.eye(3).T
+ assert_(not m.flags.c_contiguous)
+
+ new_m = asarray(m, order='C')
+ assert_(new_m.flags.c_contiguous)
+
+ def test_fix_invalid(self):
+ # Checks fix_invalid.
+ with np.errstate(invalid='ignore'):
+ data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])
+ data_fixed = fix_invalid(data)
+ assert_equal(data_fixed._data, [data.fill_value, 0., 1.])
+ assert_equal(data_fixed._mask, [1., 0., 1.])
+
+ def test_maskedelement(self):
+ # Test of masked element
+ x = arange(6)
+ x[1] = masked
+ assert_(str(masked) == '--')
+ assert_(x[1] is masked)
+ assert_equal(filled(x[1], 0), 0)
+
+ def test_set_element_as_object(self):
+ # Tests setting elements with object
+ a = empty(1, dtype=object)
+ x = (1, 2, 3, 4, 5)
+ a[0] = x
+ assert_equal(a[0], x)
+ assert_(a[0] is x)
+
+ import datetime
+ dt = datetime.datetime.now()
+ a[0] = dt
+ assert_(a[0] is dt)
+
+ def test_indexing(self):
+ # Tests conversions and indexing
+ x1 = np.array([1, 2, 4, 3])
+ x2 = array(x1, mask=[1, 0, 0, 0])
+ x3 = array(x1, mask=[0, 1, 0, 1])
+ x4 = array(x1)
+ # test conversion to strings
+ str(x2) # raises?
+ repr(x2) # raises?
+ assert_equal(np.sort(x1), sort(x2, endwith=False))
+ # tests of indexing
+ assert_(type(x2[1]) is type(x1[1]))
+ assert_(x1[1] == x2[1])
+ assert_(x2[0] is masked)
+ assert_equal(x1[2], x2[2])
+ assert_equal(x1[2:5], x2[2:5])
+ assert_equal(x1[:], x2[:])
+ assert_equal(x1[1:], x3[1:])
+ x1[2] = 9
+ x2[2] = 9
+ assert_equal(x1, x2)
+ x1[1:3] = 99
+ x2[1:3] = 99
+ assert_equal(x1, x2)
+ x2[1] = masked
+ assert_equal(x1, x2)
+ x2[1:3] = masked
+ assert_equal(x1, x2)
+ x2[:] = x1
+ x2[1] = masked
+ assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
+ x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
+ assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
+ x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
+ assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
+ assert_(allequal(x4, array([1, 2, 3, 4])))
+ x1 = np.arange(5) * 1.0
+ x2 = masked_values(x1, 3.0)
+ assert_equal(x1, x2)
+ assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
+ assert_equal(3.0, x2.fill_value)
+ x1 = array([1, 'hello', 2, 3], object)
+ x2 = np.array([1, 'hello', 2, 3], object)
+ s1 = x1[1]
+ s2 = x2[1]
+ assert_equal(type(s2), str)
+ assert_equal(type(s1), str)
+ assert_equal(s1, s2)
+ assert_(x1[1:1].shape == (0,))
+
+ def test_setitem_no_warning(self):
+ # Setitem shouldn't warn, because the assignment might be masked
+ # and warning for a masked assignment is weird (see gh-23000)
+ # (When the value is masked, otherwise a warning would be acceptable
+ # but is not given currently.)
+ x = np.ma.arange(60).reshape((6, 10))
+ index = (slice(1, 5, 2), [7, 5])
+ value = np.ma.masked_all((2, 2))
+ value._data[...] = np.inf # not a valid integer...
+ x[index] = value
+ # The masked scalar is special cased, but test anyway (it's NaN):
+ x[...] = np.ma.masked
+ # Finally, a large value that cannot be cast to the float32 `x`
+ x = np.ma.arange(3., dtype=np.float32)
+ value = np.ma.array([2e234, 1, 1], mask=[True, False, False])
+ x[...] = value
+ x[[0, 1, 2]] = value
+
+ @suppress_copy_mask_on_assignment
+ def test_copy(self):
+ # Tests of some subtle points of copying and sizing.
+ n = [0, 0, 1, 0, 0]
+ m = make_mask(n)
+ m2 = make_mask(m)
+ assert_(m is m2)
+ m3 = make_mask(m, copy=True)
+ assert_(m is not m3)
+
+ x1 = np.arange(5)
+ y1 = array(x1, mask=m)
+ assert_equal(y1._data.__array_interface__, x1.__array_interface__)
+ assert_(allequal(x1, y1.data))
+ assert_equal(y1._mask.__array_interface__, m.__array_interface__)
+
+ y1a = array(y1)
+ # Default for masked array is not to copy; see gh-10318.
+ assert_(y1a._data.__array_interface__ ==
+ y1._data.__array_interface__)
+ assert_(y1a._mask.__array_interface__ ==
+ y1._mask.__array_interface__)
+
+ y2 = array(x1, mask=m3)
+ assert_(y2._data.__array_interface__ == x1.__array_interface__)
+ assert_(y2._mask.__array_interface__ == m3.__array_interface__)
+ assert_(y2[2] is masked)
+ y2[2] = 9
+ assert_(y2[2] is not masked)
+ assert_(y2._mask.__array_interface__ == m3.__array_interface__)
+ assert_(allequal(y2.mask, 0))
+
+ y2a = array(x1, mask=m, copy=1)
+ assert_(y2a._data.__array_interface__ != x1.__array_interface__)
+ #assert_( y2a._mask is not m)
+ assert_(y2a._mask.__array_interface__ != m.__array_interface__)
+ assert_(y2a[2] is masked)
+ y2a[2] = 9
+ assert_(y2a[2] is not masked)
+ #assert_( y2a._mask is not m)
+ assert_(y2a._mask.__array_interface__ != m.__array_interface__)
+ assert_(allequal(y2a.mask, 0))
+
+ y3 = array(x1 * 1.0, mask=m)
+ assert_(filled(y3).dtype is (x1 * 1.0).dtype)
+
+ x4 = arange(4)
+ x4[2] = masked
+ y4 = resize(x4, (8,))
+ assert_equal(concatenate([x4, x4]), y4)
+ assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
+ y5 = repeat(x4, (2, 2, 2, 2), axis=0)
+ assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
+ y6 = repeat(x4, 2, axis=0)
+ assert_equal(y5, y6)
+ y7 = x4.repeat((2, 2, 2, 2), axis=0)
+ assert_equal(y5, y7)
+ y8 = x4.repeat(2, 0)
+ assert_equal(y5, y8)
+
+ y9 = x4.copy()
+ assert_equal(y9._data, x4._data)
+ assert_equal(y9._mask, x4._mask)
+
+ x = masked_array([1, 2, 3], mask=[0, 1, 0])
+ # Copy is False by default
+ y = masked_array(x)
+ assert_equal(y._data.ctypes.data, x._data.ctypes.data)
+ assert_equal(y._mask.ctypes.data, x._mask.ctypes.data)
+ y = masked_array(x, copy=True)
+ assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
+ assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
+
+ def test_copy_0d(self):
+ # gh-9430
+ x = np.ma.array(43, mask=True)
+ xc = x.copy()
+ assert_equal(xc.mask, True)
+
+ def test_copy_on_python_builtins(self):
+ # Tests copy works on python builtins (issue#8019)
+ assert_(isMaskedArray(np.ma.copy([1,2,3])))
+ assert_(isMaskedArray(np.ma.copy((1,2,3))))
+
+ def test_copy_immutable(self):
+ # Tests that the copy method is immutable, GitHub issue #5247
+ a = np.ma.array([1, 2, 3])
+ b = np.ma.array([4, 5, 6])
+ a_copy_method = a.copy
+ b.copy
+ assert_equal(a_copy_method(), [1, 2, 3])
+
+ def test_deepcopy(self):
+ from copy import deepcopy
+ a = array([0, 1, 2], mask=[False, True, False])
+ copied = deepcopy(a)
+ assert_equal(copied.mask, a.mask)
+ assert_not_equal(id(a._mask), id(copied._mask))
+
+ copied[1] = 1
+ assert_equal(copied.mask, [0, 0, 0])
+ assert_equal(a.mask, [0, 1, 0])
+
+ copied = deepcopy(a)
+ assert_equal(copied.mask, a.mask)
+ copied.mask[1] = False
+ assert_equal(copied.mask, [0, 0, 0])
+ assert_equal(a.mask, [0, 1, 0])
+
+ def test_format(self):
+ a = array([0, 1, 2], mask=[False, True, False])
+ assert_equal(format(a), "[0 -- 2]")
+ assert_equal(format(masked), "--")
+ assert_equal(format(masked, ""), "--")
+
+ # Postponed from PR #15410, perhaps address in the future.
+ # assert_equal(format(masked, " >5"), " --")
+ # assert_equal(format(masked, " <5"), "-- ")
+
+ # Expect a FutureWarning for using format_spec with MaskedElement
+ with assert_warns(FutureWarning):
+ with_format_string = format(masked, " >5")
+ assert_equal(with_format_string, "--")
+
+ def test_str_repr(self):
+ a = array([0, 1, 2], mask=[False, True, False])
+ assert_equal(str(a), '[0 -- 2]')
+ assert_equal(
+ repr(a),
+ textwrap.dedent('''\
+ masked_array(data=[0, --, 2],
+ mask=[False, True, False],
+ fill_value=999999)''')
+ )
+
+ # arrays with a continuation
+ a = np.ma.arange(2000)
+ a[1:50] = np.ma.masked
+ assert_equal(
+ repr(a),
+ textwrap.dedent('''\
+ masked_array(data=[0, --, --, ..., 1997, 1998, 1999],
+ mask=[False, True, True, ..., False, False, False],
+ fill_value=999999)''')
+ )
+
+ # line-wrapped 1d arrays are correctly aligned
+ a = np.ma.arange(20)
+ assert_equal(
+ repr(a),
+ textwrap.dedent('''\
+ masked_array(data=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19],
+ mask=False,
+ fill_value=999999)''')
+ )
+
+ # 2d arrays cause wrapping
+ a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8)
+ a[1,1] = np.ma.masked
+ assert_equal(
+ repr(a),
+ textwrap.dedent('''\
+ masked_array(
+ data=[[1, 2, 3],
+ [4, --, 6]],
+ mask=[[False, False, False],
+ [False, True, False]],
+ fill_value=999999,
+ dtype=int8)''')
+ )
+
+ # but not it they're a row vector
+ assert_equal(
+ repr(a[:1]),
+ textwrap.dedent('''\
+ masked_array(data=[[1, 2, 3]],
+ mask=[[False, False, False]],
+ fill_value=999999,
+ dtype=int8)''')
+ )
+
+ # dtype=int is implied, so not shown
+ assert_equal(
+ repr(a.astype(int)),
+ textwrap.dedent('''\
+ masked_array(
+ data=[[1, 2, 3],
+ [4, --, 6]],
+ mask=[[False, False, False],
+ [False, True, False]],
+ fill_value=999999)''')
+ )
+
+ def test_str_repr_legacy(self):
+ oldopts = np.get_printoptions()
+ np.set_printoptions(legacy='1.13')
+ try:
+ a = array([0, 1, 2], mask=[False, True, False])
+ assert_equal(str(a), '[0 -- 2]')
+ assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n'
+ ' mask = [False True False],\n'
+ ' fill_value = 999999)\n')
+
+ a = np.ma.arange(2000)
+ a[1:50] = np.ma.masked
+ assert_equal(
+ repr(a),
+ 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n'
+ ' mask = [False True True ..., False False False],\n'
+ ' fill_value = 999999)\n'
+ )
+ finally:
+ np.set_printoptions(**oldopts)
+
+ def test_0d_unicode(self):
+ u = 'caf\xe9'
+ utype = type(u)
+
+ arr_nomask = np.ma.array(u)
+ arr_masked = np.ma.array(u, mask=True)
+
+ assert_equal(utype(arr_nomask), u)
+ assert_equal(utype(arr_masked), '--')
+
+ def test_pickling(self):
+ # Tests pickling
+ for dtype in (int, float, str, object):
+ a = arange(10).astype(dtype)
+ a.fill_value = 999
+
+ masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked
+ True, # Fully masked
+ False) # Fully unmasked
+
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ for mask in masks:
+ a.mask = mask
+ a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled._data, a._data)
+ if dtype in (object, int):
+ assert_equal(a_pickled.fill_value, 999)
+ else:
+ assert_equal(a_pickled.fill_value, dtype(999))
+ assert_array_equal(a_pickled.mask, mask)
+
+ def test_pickling_subbaseclass(self):
+ # Test pickling w/ a subclass of ndarray
+ x = np.array([(1.0, 2), (3.0, 4)],
+ dtype=[('x', float), ('y', int)]).view(np.recarray)
+ a = masked_array(x, mask=[(True, False), (False, True)])
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled, a)
+ assert_(isinstance(a_pickled._data, np.recarray))
+
+ def test_pickling_maskedconstant(self):
+ # Test pickling MaskedConstant
+ mc = np.ma.masked
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ mc_pickled = pickle.loads(pickle.dumps(mc, protocol=proto))
+ assert_equal(mc_pickled._baseclass, mc._baseclass)
+ assert_equal(mc_pickled._mask, mc._mask)
+ assert_equal(mc_pickled._data, mc._data)
+
+ def test_pickling_wstructured(self):
+ # Tests pickling w/ structured array
+ a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
+ dtype=[('a', int), ('b', float)])
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled, a)
+
+ def test_pickling_keepalignment(self):
+ # Tests pickling w/ F_CONTIGUOUS arrays
+ a = arange(10)
+ a.shape = (-1, 2)
+ b = a.T
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ test = pickle.loads(pickle.dumps(b, protocol=proto))
+ assert_equal(test, b)
+
+ def test_single_element_subscript(self):
+ # Tests single element subscripts of Maskedarrays.
+ a = array([1, 3, 2])
+ b = array([1, 3, 2], mask=[1, 0, 1])
+ assert_equal(a[0].shape, ())
+ assert_equal(b[0].shape, ())
+ assert_equal(b[1].shape, ())
+
+ def test_topython(self):
+ # Tests some communication issues with Python.
+ assert_equal(1, int(array(1)))
+ assert_equal(1.0, float(array(1)))
+ assert_equal(1, int(array([[[1]]])))
+ assert_equal(1.0, float(array([[1]])))
+ assert_raises(TypeError, float, array([1, 1]))
+
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning, 'Warning: converting a masked element')
+ assert_(np.isnan(float(array([1], mask=[1]))))
+
+ a = array([1, 2, 3], mask=[1, 0, 0])
+ assert_raises(TypeError, lambda: float(a))
+ assert_equal(float(a[-1]), 3.)
+ assert_(np.isnan(float(a[0])))
+ assert_raises(TypeError, int, a)
+ assert_equal(int(a[-1]), 3)
+ assert_raises(MAError, lambda:int(a[0]))
+
+ def test_oddfeatures_1(self):
+ # Test of other odd features
+ x = arange(20)
+ x = x.reshape(4, 5)
+ x.flat[5] = 12
+ assert_(x[1, 0] == 12)
+ z = x + 10j * x
+ assert_equal(z.real, x)
+ assert_equal(z.imag, 10 * x)
+ assert_equal((z * conjugate(z)).real, 101 * x * x)
+ z.imag[...] = 0.0
+
+ x = arange(10)
+ x[3] = masked
+ assert_(str(x[3]) == str(masked))
+ c = x >= 8
+ assert_(count(where(c, masked, masked)) == 0)
+ assert_(shape(where(c, masked, masked)) == c.shape)
+
+ z = masked_where(c, x)
+ assert_(z.dtype is x.dtype)
+ assert_(z[3] is masked)
+ assert_(z[4] is not masked)
+ assert_(z[7] is not masked)
+ assert_(z[8] is masked)
+ assert_(z[9] is masked)
+ assert_equal(x, z)
+
+ def test_oddfeatures_2(self):
+ # Tests some more features.
+ x = array([1., 2., 3., 4., 5.])
+ c = array([1, 1, 1, 0, 0])
+ x[2] = masked
+ z = where(c, x, -x)
+ assert_equal(z, [1., 2., 0., -4., -5])
+ c[0] = masked
+ z = where(c, x, -x)
+ assert_equal(z, [1., 2., 0., -4., -5])
+ assert_(z[0] is masked)
+ assert_(z[1] is not masked)
+ assert_(z[2] is masked)
+
+ @suppress_copy_mask_on_assignment
+ def test_oddfeatures_3(self):
+ # Tests some generic features
+ atest = array([10], mask=True)
+ btest = array([20])
+ idx = atest.mask
+ atest[idx] = btest[idx]
+ assert_equal(atest, [20])
+
+ def test_filled_with_object_dtype(self):
+ a = np.ma.masked_all(1, dtype='O')
+ assert_equal(a.filled('x')[0], 'x')
+
+ def test_filled_with_flexible_dtype(self):
+ # Test filled w/ flexible dtype
+ flexi = array([(1, 1, 1)],
+ dtype=[('i', int), ('s', '|S8'), ('f', float)])
+ flexi[0] = masked
+ assert_equal(flexi.filled(),
+ np.array([(default_fill_value(0),
+ default_fill_value('0'),
+ default_fill_value(0.),)], dtype=flexi.dtype))
+ flexi[0] = masked
+ assert_equal(flexi.filled(1),
+ np.array([(1, '1', 1.)], dtype=flexi.dtype))
+
+ def test_filled_with_mvoid(self):
+ # Test filled w/ mvoid
+ ndtype = [('a', int), ('b', float)]
+ a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype)
+ # Filled using default
+ test = a.filled()
+ assert_equal(tuple(test), (1, default_fill_value(1.)))
+ # Explicit fill_value
+ test = a.filled((-1, -1))
+ assert_equal(tuple(test), (1, -1))
+ # Using predefined filling values
+ a.fill_value = (-999, -999)
+ assert_equal(tuple(a.filled()), (1, -999))
+
+ def test_filled_with_nested_dtype(self):
+ # Test filled w/ nested dtype
+ ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
+ a = array([(1, (1, 1)), (2, (2, 2))],
+ mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)
+ test = a.filled(0)
+ control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)
+ assert_equal(test, control)
+
+ test = a['B'].filled(0)
+ control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)
+ assert_equal(test, control)
+
+ # test if mask gets set correctly (see #6760)
+ Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))]))
+ assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)),
+ ('f1', 'i1', (2, 2))], (2, 2))]))
+ assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)),
+ ('f1', '?', (2, 2))], (2, 2))]))
+
+ def test_filled_with_f_order(self):
+ # Test filled w/ F-contiguous array
+ a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
+ mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),
+ order='F') # this is currently ignored
+ assert_(a.flags['F_CONTIGUOUS'])
+ assert_(a.filled(0).flags['F_CONTIGUOUS'])
+
+ def test_optinfo_propagation(self):
+ # Checks that _optinfo dictionary isn't back-propagated
+ x = array([1, 2, 3, ], dtype=float)
+ x._optinfo['info'] = '???'
+ y = x.copy()
+ assert_equal(y._optinfo['info'], '???')
+ y._optinfo['info'] = '!!!'
+ assert_equal(x._optinfo['info'], '???')
+
+ def test_optinfo_forward_propagation(self):
+ a = array([1,2,2,4])
+ a._optinfo["key"] = "value"
+ assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], a[:2]._optinfo["key"])
+ assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"])
+ assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"])
+ assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"])
+ assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"])
+ assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"])
+
+ def test_fancy_printoptions(self):
+ # Test printing a masked array w/ fancy dtype.
+ fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
+ test = array([(1, (2, 3.0)), (4, (5, 6.0))],
+ mask=[(1, (0, 1)), (0, (1, 0))],
+ dtype=fancydtype)
+ control = "[(--, (2, --)) (4, (--, 6.0))]"
+ assert_equal(str(test), control)
+
+ # Test 0-d array with multi-dimensional dtype
+ t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0]],
+ 0.0),
+ mask = (False, [[True, False, True],
+ [False, False, True]],
+ False),
+ dtype = "int, (2,3)float, float")
+ control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)"
+ assert_equal(str(t_2d0), control)
+
+ def test_flatten_structured_array(self):
+ # Test flatten_structured_array on arrays
+ # On ndarray
+ ndtype = [('a', int), ('b', float)]
+ a = np.array([(1, 1), (2, 2)], dtype=ndtype)
+ test = flatten_structured_array(a)
+ control = np.array([[1., 1.], [2., 2.]], dtype=float)
+ assert_equal(test, control)
+ assert_equal(test.dtype, control.dtype)
+ # On masked_array
+ a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
+ test = flatten_structured_array(a)
+ control = array([[1., 1.], [2., 2.]],
+ mask=[[0, 1], [1, 0]], dtype=float)
+ assert_equal(test, control)
+ assert_equal(test.dtype, control.dtype)
+ assert_equal(test.mask, control.mask)
+ # On masked array with nested structure
+ ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]
+ a = array([(1, (1, 1.1)), (2, (2, 2.2))],
+ mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
+ test = flatten_structured_array(a)
+ control = array([[1., 1., 1.1], [2., 2., 2.2]],
+ mask=[[0, 1, 0], [1, 0, 1]], dtype=float)
+ assert_equal(test, control)
+ assert_equal(test.dtype, control.dtype)
+ assert_equal(test.mask, control.mask)
+ # Keeping the initial shape
+ ndtype = [('a', int), ('b', float)]
+ a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)
+ test = flatten_structured_array(a)
+ control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float)
+ assert_equal(test, control)
+ assert_equal(test.dtype, control.dtype)
+
+ def test_void0d(self):
+ # Test creating a mvoid object
+ ndtype = [('a', int), ('b', int)]
+ a = np.array([(1, 2,)], dtype=ndtype)[0]
+ f = mvoid(a)
+ assert_(isinstance(f, mvoid))
+
+ a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0]
+ assert_(isinstance(a, mvoid))
+
+ a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
+ f = mvoid(a._data[0], a._mask[0])
+ assert_(isinstance(f, mvoid))
+
+ def test_mvoid_getitem(self):
+ # Test mvoid.__getitem__
+ ndtype = [('a', int), ('b', int)]
+ a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
+ dtype=ndtype)
+ # w/o mask
+ f = a[0]
+ assert_(isinstance(f, mvoid))
+ assert_equal((f[0], f['a']), (1, 1))
+ assert_equal(f['b'], 2)
+ # w/ mask
+ f = a[1]
+ assert_(isinstance(f, mvoid))
+ assert_(f[0] is masked)
+ assert_(f['a'] is masked)
+ assert_equal(f[1], 4)
+
+ # exotic dtype
+ A = masked_array(data=[([0,1],)],
+ mask=[([True, False],)],
+ dtype=[("A", ">i2", (2,))])
+ assert_equal(A[0]["A"], A["A"][0])
+ assert_equal(A[0]["A"], masked_array(data=[0, 1],
+ mask=[True, False], dtype=">i2"))
+
+ def test_mvoid_iter(self):
+ # Test iteration on __getitem__
+ ndtype = [('a', int), ('b', int)]
+ a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)],
+ dtype=ndtype)
+ # w/o mask
+ assert_equal(list(a[0]), [1, 2])
+ # w/ mask
+ assert_equal(list(a[1]), [masked, 4])
+
+ def test_mvoid_print(self):
+ # Test printing a mvoid
+ mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)])
+ assert_equal(str(mx[0]), "(1, 1)")
+ mx['b'][0] = masked
+ ini_display = masked_print_option._display
+ masked_print_option.set_display("-X-")
+ try:
+ assert_equal(str(mx[0]), "(1, -X-)")
+ assert_equal(repr(mx[0]), "(1, -X-)")
+ finally:
+ masked_print_option.set_display(ini_display)
+
+ # also check if there are object datatypes (see gh-7493)
+ mx = array([(1,), (2,)], dtype=[('a', 'O')])
+ assert_equal(str(mx[0]), "(1,)")
+
+ def test_mvoid_multidim_print(self):
+
+ # regression test for gh-6019
+ t_ma = masked_array(data = [([1, 2, 3],)],
+ mask = [([False, True, False],)],
+ fill_value = ([999999, 999999, 999999],),
+ dtype = [('a', '<i4', (3,))])
+ assert_(str(t_ma[0]) == "([1, --, 3],)")
+ assert_(repr(t_ma[0]) == "([1, --, 3],)")
+
+ # additional tests with structured arrays
+
+ t_2d = masked_array(data = [([[1, 2], [3,4]],)],
+ mask = [([[False, True], [True, False]],)],
+ dtype = [('a', '<i4', (2,2))])
+ assert_(str(t_2d[0]) == "([[1, --], [--, 4]],)")
+ assert_(repr(t_2d[0]) == "([[1, --], [--, 4]],)")
+
+ t_0d = masked_array(data = [(1,2)],
+ mask = [(True,False)],
+ dtype = [('a', '<i4'), ('b', '<i4')])
+ assert_(str(t_0d[0]) == "(--, 2)")
+ assert_(repr(t_0d[0]) == "(--, 2)")
+
+ t_2d = masked_array(data = [([[1, 2], [3,4]], 1)],
+ mask = [([[False, True], [True, False]], False)],
+ dtype = [('a', '<i4', (2,2)), ('b', float)])
+ assert_(str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
+ assert_(repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
+
+ t_ne = masked_array(data=[(1, (1, 1))],
+ mask=[(True, (True, False))],
+ dtype = [('a', '<i4'), ('b', 'i4,i4')])
+ assert_(str(t_ne[0]) == "(--, (--, 1))")
+ assert_(repr(t_ne[0]) == "(--, (--, 1))")
+
+ def test_object_with_array(self):
+ mx1 = masked_array([1.], mask=[True])
+ mx2 = masked_array([1., 2.])
+ mx = masked_array([mx1, mx2], mask=[False, True], dtype=object)
+ assert_(mx[0] is mx1)
+ assert_(mx[1] is not mx2)
+ assert_(np.all(mx[1].data == mx2.data))
+ assert_(np.all(mx[1].mask))
+ # check that we return a view.
+ mx[1].data[0] = 0.
+ assert_(mx2[0] == 0.)
+
+
+class TestMaskedArrayArithmetic:
+ # Base test class for MaskedArrays.
+
+ def setup_method(self):
+ # Base data definition.
+ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
+ y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
+ a10 = 10.
+ m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
+ m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
+ xm = masked_array(x, mask=m1)
+ ym = masked_array(y, mask=m2)
+ z = np.array([-.5, 0., .5, .8])
+ zm = masked_array(z, mask=[0, 1, 0, 0])
+ xf = np.where(m1, 1e+20, x)
+ xm.set_fill_value(1e+20)
+ self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
+ self.err_status = np.geterr()
+ np.seterr(divide='ignore', invalid='ignore')
+
+ def teardown_method(self):
+ np.seterr(**self.err_status)
+
+ def test_basic_arithmetic(self):
+ # Test of basic arithmetic.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
+ a2d = array([[1, 2], [0, 4]])
+ a2dm = masked_array(a2d, [[0, 0], [1, 0]])
+ assert_equal(a2d * a2d, a2d * a2dm)
+ assert_equal(a2d + a2d, a2d + a2dm)
+ assert_equal(a2d - a2d, a2d - a2dm)
+ for s in [(12,), (4, 3), (2, 6)]:
+ x = x.reshape(s)
+ y = y.reshape(s)
+ xm = xm.reshape(s)
+ ym = ym.reshape(s)
+ xf = xf.reshape(s)
+ assert_equal(-x, -xm)
+ assert_equal(x + y, xm + ym)
+ assert_equal(x - y, xm - ym)
+ assert_equal(x * y, xm * ym)
+ assert_equal(x / y, xm / ym)
+ assert_equal(a10 + y, a10 + ym)
+ assert_equal(a10 - y, a10 - ym)
+ assert_equal(a10 * y, a10 * ym)
+ assert_equal(a10 / y, a10 / ym)
+ assert_equal(x + a10, xm + a10)
+ assert_equal(x - a10, xm - a10)
+ assert_equal(x * a10, xm * a10)
+ assert_equal(x / a10, xm / a10)
+ assert_equal(x ** 2, xm ** 2)
+ assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5)
+ assert_equal(x ** y, xm ** ym)
+ assert_equal(np.add(x, y), add(xm, ym))
+ assert_equal(np.subtract(x, y), subtract(xm, ym))
+ assert_equal(np.multiply(x, y), multiply(xm, ym))
+ assert_equal(np.divide(x, y), divide(xm, ym))
+
+ def test_divide_on_different_shapes(self):
+ x = arange(6, dtype=float)
+ x.shape = (2, 3)
+ y = arange(3, dtype=float)
+
+ z = x / y
+ assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
+ assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
+
+ z = x / y[None,:]
+ assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])
+ assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])
+
+ y = arange(2, dtype=float)
+ z = x / y[:, None]
+ assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]])
+ assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])
+
+ def test_mixed_arithmetic(self):
+ # Tests mixed arithmetic.
+ na = np.array([1])
+ ma = array([1])
+ assert_(isinstance(na + ma, MaskedArray))
+ assert_(isinstance(ma + na, MaskedArray))
+
+ def test_limits_arithmetic(self):
+ tiny = np.finfo(float).tiny
+ a = array([tiny, 1. / tiny, 0.])
+ assert_equal(getmaskarray(a / 2), [0, 0, 0])
+ assert_equal(getmaskarray(2 / a), [1, 0, 1])
+
+ def test_masked_singleton_arithmetic(self):
+ # Tests some scalar arithmetic on MaskedArrays.
+ # Masked singleton should remain masked no matter what
+ xm = array(0, mask=1)
+ assert_((1 / array(0)).mask)
+ assert_((1 + xm).mask)
+ assert_((-xm).mask)
+ assert_(maximum(xm, xm).mask)
+ assert_(minimum(xm, xm).mask)
+
+ def test_masked_singleton_equality(self):
+ # Tests (in)equality on masked singleton
+ a = array([1, 2, 3], mask=[1, 1, 0])
+ assert_((a[0] == 0) is masked)
+ assert_((a[0] != 0) is masked)
+ assert_equal((a[-1] == 0), False)
+ assert_equal((a[-1] != 0), True)
+
+ def test_arithmetic_with_masked_singleton(self):
+ # Checks that there's no collapsing to masked
+ x = masked_array([1, 2])
+ y = x * masked
+ assert_equal(y.shape, x.shape)
+ assert_equal(y._mask, [True, True])
+ y = x[0] * masked
+ assert_(y is masked)
+ y = x + masked
+ assert_equal(y.shape, x.shape)
+ assert_equal(y._mask, [True, True])
+
+ def test_arithmetic_with_masked_singleton_on_1d_singleton(self):
+ # Check that we're not losing the shape of a singleton
+ x = masked_array([1, ])
+ y = x + masked
+ assert_equal(y.shape, x.shape)
+ assert_equal(y.mask, [True, ])
+
+ def test_scalar_arithmetic(self):
+ x = array(0, mask=0)
+ assert_equal(x.filled().ctypes.data, x.ctypes.data)
+ # Make sure we don't lose the shape in some circumstances
+ xm = array((0, 0)) / 0.
+ assert_equal(xm.shape, (2,))
+ assert_equal(xm.mask, [1, 1])
+
+ def test_basic_ufuncs(self):
+ # Test various functions such as sin, cos.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
+ assert_equal(np.cos(x), cos(xm))
+ assert_equal(np.cosh(x), cosh(xm))
+ assert_equal(np.sin(x), sin(xm))
+ assert_equal(np.sinh(x), sinh(xm))
+ assert_equal(np.tan(x), tan(xm))
+ assert_equal(np.tanh(x), tanh(xm))
+ assert_equal(np.sqrt(abs(x)), sqrt(xm))
+ assert_equal(np.log(abs(x)), log(xm))
+ assert_equal(np.log10(abs(x)), log10(xm))
+ assert_equal(np.exp(x), exp(xm))
+ assert_equal(np.arcsin(z), arcsin(zm))
+ assert_equal(np.arccos(z), arccos(zm))
+ assert_equal(np.arctan(z), arctan(zm))
+ assert_equal(np.arctan2(x, y), arctan2(xm, ym))
+ assert_equal(np.absolute(x), absolute(xm))
+ assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))
+ assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))
+ assert_equal(np.equal(x, y), equal(xm, ym))
+ assert_equal(np.not_equal(x, y), not_equal(xm, ym))
+ assert_equal(np.less(x, y), less(xm, ym))
+ assert_equal(np.greater(x, y), greater(xm, ym))
+ assert_equal(np.less_equal(x, y), less_equal(xm, ym))
+ assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))
+ assert_equal(np.conjugate(x), conjugate(xm))
+
+ def test_count_func(self):
+ # Tests count
+ assert_equal(1, count(1))
+ assert_equal(0, array(1, mask=[1]))
+
+ ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
+ res = count(ott)
+ assert_(res.dtype.type is np.intp)
+ assert_equal(3, res)
+
+ ott = ott.reshape((2, 2))
+ res = count(ott)
+ assert_(res.dtype.type is np.intp)
+ assert_equal(3, res)
+ res = count(ott, 0)
+ assert_(isinstance(res, ndarray))
+ assert_equal([1, 2], res)
+ assert_(getmask(res) is nomask)
+
+ ott = array([0., 1., 2., 3.])
+ res = count(ott, 0)
+ assert_(isinstance(res, ndarray))
+ assert_(res.dtype.type is np.intp)
+ assert_raises(np.AxisError, ott.count, axis=1)
+
+ def test_count_on_python_builtins(self):
+ # Tests count works on python builtins (issue#8019)
+ assert_equal(3, count([1,2,3]))
+ assert_equal(2, count((1,2)))
+
+ def test_minmax_func(self):
+ # Tests minimum and maximum.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
+ # max doesn't work if shaped
+ xr = np.ravel(x)
+ xmr = ravel(xm)
+ # following are true because of careful selection of data
+ assert_equal(max(xr), maximum.reduce(xmr))
+ assert_equal(min(xr), minimum.reduce(xmr))
+
+ assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])
+ assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])
+ x = arange(5)
+ y = arange(5) - 2
+ x[3] = masked
+ y[0] = masked
+ assert_equal(minimum(x, y), where(less(x, y), x, y))
+ assert_equal(maximum(x, y), where(greater(x, y), x, y))
+ assert_(minimum.reduce(x) == 0)
+ assert_(maximum.reduce(x) == 4)
+
+ x = arange(4).reshape(2, 2)
+ x[-1, -1] = masked
+ assert_equal(maximum.reduce(x, axis=None), 2)
+
+ def test_minimummaximum_func(self):
+ a = np.ones((2, 2))
+ aminimum = minimum(a, a)
+ assert_(isinstance(aminimum, MaskedArray))
+ assert_equal(aminimum, np.minimum(a, a))
+
+ aminimum = minimum.outer(a, a)
+ assert_(isinstance(aminimum, MaskedArray))
+ assert_equal(aminimum, np.minimum.outer(a, a))
+
+ amaximum = maximum(a, a)
+ assert_(isinstance(amaximum, MaskedArray))
+ assert_equal(amaximum, np.maximum(a, a))
+
+ amaximum = maximum.outer(a, a)
+ assert_(isinstance(amaximum, MaskedArray))
+ assert_equal(amaximum, np.maximum.outer(a, a))
+
+ def test_minmax_reduce(self):
+ # Test np.min/maximum.reduce on array w/ full False mask
+ a = array([1, 2, 3], mask=[False, False, False])
+ b = np.maximum.reduce(a)
+ assert_equal(b, 3)
+
+ def test_minmax_funcs_with_output(self):
+ # Tests the min/max functions with explicit outputs
+ mask = np.random.rand(12).round()
+ xm = array(np.random.uniform(0, 10, 12), mask=mask)
+ xm.shape = (3, 4)
+ for funcname in ('min', 'max'):
+ # Initialize
+ npfunc = getattr(np, funcname)
+ mafunc = getattr(numpy.ma.core, funcname)
+ # Use the np version
+ nout = np.empty((4,), dtype=int)
+ try:
+ result = npfunc(xm, axis=0, out=nout)
+ except MaskError:
+ pass
+ nout = np.empty((4,), dtype=float)
+ result = npfunc(xm, axis=0, out=nout)
+ assert_(result is nout)
+ # Use the ma version
+ nout.fill(-999)
+ result = mafunc(xm, axis=0, out=nout)
+ assert_(result is nout)
+
+ def test_minmax_methods(self):
+ # Additional tests on max/min
+ (_, _, _, _, _, xm, _, _, _, _) = self.d
+ xm.shape = (xm.size,)
+ assert_equal(xm.max(), 10)
+ assert_(xm[0].max() is masked)
+ assert_(xm[0].max(0) is masked)
+ assert_(xm[0].max(-1) is masked)
+ assert_equal(xm.min(), -10.)
+ assert_(xm[0].min() is masked)
+ assert_(xm[0].min(0) is masked)
+ assert_(xm[0].min(-1) is masked)
+ assert_equal(xm.ptp(), 20.)
+ assert_(xm[0].ptp() is masked)
+ assert_(xm[0].ptp(0) is masked)
+ assert_(xm[0].ptp(-1) is masked)
+
+ x = array([1, 2, 3], mask=True)
+ assert_(x.min() is masked)
+ assert_(x.max() is masked)
+ assert_(x.ptp() is masked)
+
+ def test_minmax_dtypes(self):
+ # Additional tests on max/min for non-standard float and complex dtypes
+ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
+ a10 = 10.
+ an10 = -10.0
+ m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
+ xm = masked_array(x, mask=m1)
+ xm.set_fill_value(1e+20)
+ float_dtypes = [np.half, np.single, np.double,
+ np.longdouble, np.cfloat, np.cdouble, np.clongdouble]
+ for float_dtype in float_dtypes:
+ assert_equal(masked_array(x, mask=m1, dtype=float_dtype).max(),
+ float_dtype(a10))
+ assert_equal(masked_array(x, mask=m1, dtype=float_dtype).min(),
+ float_dtype(an10))
+
+ assert_equal(xm.min(), an10)
+ assert_equal(xm.max(), a10)
+
+ # Non-complex type only test
+ for float_dtype in float_dtypes[:4]:
+ assert_equal(masked_array(x, mask=m1, dtype=float_dtype).max(),
+ float_dtype(a10))
+ assert_equal(masked_array(x, mask=m1, dtype=float_dtype).min(),
+ float_dtype(an10))
+
+ # Complex types only test
+ for float_dtype in float_dtypes[-3:]:
+ ym = masked_array([1e20+1j, 1e20-2j, 1e20-1j], mask=[0, 1, 0],
+ dtype=float_dtype)
+ assert_equal(ym.min(), float_dtype(1e20-1j))
+ assert_equal(ym.max(), float_dtype(1e20+1j))
+
+ zm = masked_array([np.inf+2j, np.inf+3j, -np.inf-1j], mask=[0, 1, 0],
+ dtype=float_dtype)
+ assert_equal(zm.min(), float_dtype(-np.inf-1j))
+ assert_equal(zm.max(), float_dtype(np.inf+2j))
+
+ cmax = np.inf - 1j * np.finfo(np.float64).max
+ assert masked_array([-cmax, 0], mask=[0, 1]).max() == -cmax
+ assert masked_array([cmax, 0], mask=[0, 1]).min() == cmax
+
+ def test_addsumprod(self):
+ # Tests add, sum, product.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
+ assert_equal(np.add.reduce(x), add.reduce(x))
+ assert_equal(np.add.accumulate(x), add.accumulate(x))
+ assert_equal(4, sum(array(4), axis=0))
+ assert_equal(4, sum(array(4), axis=0))
+ assert_equal(np.sum(x, axis=0), sum(x, axis=0))
+ assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
+ assert_equal(np.sum(x, 0), sum(x, 0))
+ assert_equal(np.product(x, axis=0), product(x, axis=0))
+ assert_equal(np.product(x, 0), product(x, 0))
+ assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
+ s = (3, 4)
+ x.shape = y.shape = xm.shape = ym.shape = s
+ if len(s) > 1:
+ assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
+ assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
+ assert_equal(np.sum(x, 1), sum(x, 1))
+ assert_equal(np.product(x, 1), product(x, 1))
+
+ def test_binops_d2D(self):
+ # Test binary operations on 2D data
+ a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
+ b = array([[2., 3.], [4., 5.], [6., 7.]])
+
+ test = a * b
+ control = array([[2., 3.], [2., 2.], [3., 3.]],
+ mask=[[0, 0], [1, 1], [1, 1]])
+ assert_equal(test, control)
+ assert_equal(test.data, control.data)
+ assert_equal(test.mask, control.mask)
+
+ test = b * a
+ control = array([[2., 3.], [4., 5.], [6., 7.]],
+ mask=[[0, 0], [1, 1], [1, 1]])
+ assert_equal(test, control)
+ assert_equal(test.data, control.data)
+ assert_equal(test.mask, control.mask)
+
+ a = array([[1.], [2.], [3.]])
+ b = array([[2., 3.], [4., 5.], [6., 7.]],
+ mask=[[0, 0], [0, 0], [0, 1]])
+ test = a * b
+ control = array([[2, 3], [8, 10], [18, 3]],
+ mask=[[0, 0], [0, 0], [0, 1]])
+ assert_equal(test, control)
+ assert_equal(test.data, control.data)
+ assert_equal(test.mask, control.mask)
+
+ test = b * a
+ control = array([[2, 3], [8, 10], [18, 7]],
+ mask=[[0, 0], [0, 0], [0, 1]])
+ assert_equal(test, control)
+ assert_equal(test.data, control.data)
+ assert_equal(test.mask, control.mask)
+
+ def test_domained_binops_d2D(self):
+ # Test domained binary operations on 2D data
+ a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])
+ b = array([[2., 3.], [4., 5.], [6., 7.]])
+
+ test = a / b
+ control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]],
+ mask=[[0, 0], [1, 1], [1, 1]])
+ assert_equal(test, control)
+ assert_equal(test.data, control.data)
+ assert_equal(test.mask, control.mask)
+
+ test = b / a
+ control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]],
+ mask=[[0, 0], [1, 1], [1, 1]])
+ assert_equal(test, control)
+ assert_equal(test.data, control.data)
+ assert_equal(test.mask, control.mask)
+
+ a = array([[1.], [2.], [3.]])
+ b = array([[2., 3.], [4., 5.], [6., 7.]],
+ mask=[[0, 0], [0, 0], [0, 1]])
+ test = a / b
+ control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]],
+ mask=[[0, 0], [0, 0], [0, 1]])
+ assert_equal(test, control)
+ assert_equal(test.data, control.data)
+ assert_equal(test.mask, control.mask)
+
+ test = b / a
+ control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]],
+ mask=[[0, 0], [0, 0], [0, 1]])
+ assert_equal(test, control)
+ assert_equal(test.data, control.data)
+ assert_equal(test.mask, control.mask)
+
+ def test_noshrinking(self):
+ # Check that we don't shrink a mask when not wanted
+ # Binary operations
+ a = masked_array([1., 2., 3.], mask=[False, False, False],
+ shrink=False)
+ b = a + 1
+ assert_equal(b.mask, [0, 0, 0])
+ # In place binary operation
+ a += 1
+ assert_equal(a.mask, [0, 0, 0])
+ # Domained binary operation
+ b = a / 1.
+ assert_equal(b.mask, [0, 0, 0])
+ # In place binary operation
+ a /= 1.
+ assert_equal(a.mask, [0, 0, 0])
+
+ def test_ufunc_nomask(self):
+ # check the case ufuncs should set the mask to false
+ m = np.ma.array([1])
+ # check we don't get array([False], dtype=bool)
+ assert_equal(np.true_divide(m, 5).mask.shape, ())
+
+ def test_noshink_on_creation(self):
+ # Check that the mask is not shrunk on array creation when not wanted
+ a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False)
+ assert_equal(a.mask, [0, 0, 0])
+
+ def test_mod(self):
+ # Tests mod
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
+ assert_equal(mod(x, y), mod(xm, ym))
+ test = mod(ym, xm)
+ assert_equal(test, np.mod(ym, xm))
+ assert_equal(test.mask, mask_or(xm.mask, ym.mask))
+ test = mod(xm, ym)
+ assert_equal(test, np.mod(xm, ym))
+ assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))
+
+ def test_TakeTransposeInnerOuter(self):
+ # Test of take, transpose, inner, outer products
+ x = arange(24)
+ y = np.arange(24)
+ x[5:6] = masked
+ x = x.reshape(2, 3, 4)
+ y = y.reshape(2, 3, 4)
+ assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
+ assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
+ assert_equal(np.inner(filled(x, 0), filled(y, 0)),
+ inner(x, y))
+ assert_equal(np.outer(filled(x, 0), filled(y, 0)),
+ outer(x, y))
+ y = array(['abc', 1, 'def', 2, 3], object)
+ y[2] = masked
+ t = take(y, [0, 3, 4])
+ assert_(t[0] == 'abc')
+ assert_(t[1] == 2)
+ assert_(t[2] == 3)
+
+ def test_imag_real(self):
+ # Check complex
+ xx = array([1 + 10j, 20 + 2j], mask=[1, 0])
+ assert_equal(xx.imag, [10, 2])
+ assert_equal(xx.imag.filled(), [1e+20, 2])
+ assert_equal(xx.imag.dtype, xx._data.imag.dtype)
+ assert_equal(xx.real, [1, 20])
+ assert_equal(xx.real.filled(), [1e+20, 20])
+ assert_equal(xx.real.dtype, xx._data.real.dtype)
+
+ def test_methods_with_output(self):
+ xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
+ xm[:, 0] = xm[0] = xm[-1, -1] = masked
+
+ funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',)
+
+ for funcname in funclist:
+ npfunc = getattr(np, funcname)
+ xmmeth = getattr(xm, funcname)
+ # A ndarray as explicit input
+ output = np.empty(4, dtype=float)
+ output.fill(-9999)
+ result = npfunc(xm, axis=0, out=output)
+ # ... the result should be the given output
+ assert_(result is output)
+ assert_equal(result, xmmeth(axis=0, out=output))
+
+ output = empty(4, dtype=int)
+ result = xmmeth(axis=0, out=output)
+ assert_(result is output)
+ assert_(output[0] is masked)
+
+ def test_eq_on_structured(self):
+ # Test the equality of structured arrays
+ ndtype = [('A', int), ('B', int)]
+ a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
+
+ test = (a == a)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [False, False])
+ assert_(test.fill_value == True)
+
+ test = (a == a[0])
+ assert_equal(test.data, [True, False])
+ assert_equal(test.mask, [False, False])
+ assert_(test.fill_value == True)
+
+ b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
+ test = (a == b)
+ assert_equal(test.data, [False, True])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ test = (a[0] == b)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
+ test = (a == b)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [False, False])
+ assert_(test.fill_value == True)
+
+ # complicated dtype, 2-dimensional array.
+ ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
+ a = array([[(1, (1, 1)), (2, (2, 2))],
+ [(3, (3, 3)), (4, (4, 4))]],
+ mask=[[(0, (1, 0)), (0, (0, 1))],
+ [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype)
+ test = (a[0, 0] == a)
+ assert_equal(test.data, [[True, False], [False, False]])
+ assert_equal(test.mask, [[False, False], [False, True]])
+ assert_(test.fill_value == True)
+
+ def test_ne_on_structured(self):
+ # Test the equality of structured arrays
+ ndtype = [('A', int), ('B', int)]
+ a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
+
+ test = (a != a)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [False, False])
+ assert_(test.fill_value == True)
+
+ test = (a != a[0])
+ assert_equal(test.data, [False, True])
+ assert_equal(test.mask, [False, False])
+ assert_(test.fill_value == True)
+
+ b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
+ test = (a != b)
+ assert_equal(test.data, [True, False])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ test = (a[0] != b)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
+ test = (a != b)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [False, False])
+ assert_(test.fill_value == True)
+
+ # complicated dtype, 2-dimensional array.
+ ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
+ a = array([[(1, (1, 1)), (2, (2, 2))],
+ [(3, (3, 3)), (4, (4, 4))]],
+ mask=[[(0, (1, 0)), (0, (0, 1))],
+ [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype)
+ test = (a[0, 0] != a)
+ assert_equal(test.data, [[False, True], [True, True]])
+ assert_equal(test.mask, [[False, False], [False, True]])
+ assert_(test.fill_value == True)
+
+ def test_eq_ne_structured_extra(self):
+ # ensure simple examples are symmetric and make sense.
+ # from https://github.com/numpy/numpy/pull/8590#discussion_r101126465
+ dt = np.dtype('i4,i4')
+ for m1 in (mvoid((1, 2), mask=(0, 0), dtype=dt),
+ mvoid((1, 2), mask=(0, 1), dtype=dt),
+ mvoid((1, 2), mask=(1, 0), dtype=dt),
+ mvoid((1, 2), mask=(1, 1), dtype=dt)):
+ ma1 = m1.view(MaskedArray)
+ r1 = ma1.view('2i4')
+ for m2 in (np.array((1, 1), dtype=dt),
+ mvoid((1, 1), dtype=dt),
+ mvoid((1, 0), mask=(0, 1), dtype=dt),
+ mvoid((3, 2), mask=(0, 1), dtype=dt)):
+ ma2 = m2.view(MaskedArray)
+ r2 = ma2.view('2i4')
+ eq_expected = (r1 == r2).all()
+ assert_equal(m1 == m2, eq_expected)
+ assert_equal(m2 == m1, eq_expected)
+ assert_equal(ma1 == m2, eq_expected)
+ assert_equal(m1 == ma2, eq_expected)
+ assert_equal(ma1 == ma2, eq_expected)
+ # Also check it is the same if we do it element by element.
+ el_by_el = [m1[name] == m2[name] for name in dt.names]
+ assert_equal(array(el_by_el, dtype=bool).all(), eq_expected)
+ ne_expected = (r1 != r2).any()
+ assert_equal(m1 != m2, ne_expected)
+ assert_equal(m2 != m1, ne_expected)
+ assert_equal(ma1 != m2, ne_expected)
+ assert_equal(m1 != ma2, ne_expected)
+ assert_equal(ma1 != ma2, ne_expected)
+ el_by_el = [m1[name] != m2[name] for name in dt.names]
+ assert_equal(array(el_by_el, dtype=bool).any(), ne_expected)
+
+ @pytest.mark.parametrize('dt', ['S', 'U'])
+ @pytest.mark.parametrize('fill', [None, 'A'])
+ def test_eq_for_strings(self, dt, fill):
+ # Test the equality of structured arrays
+ a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill)
+
+ test = (a == a)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ test = (a == a[0])
+ assert_equal(test.data, [True, False])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill)
+ test = (a == b)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [True, True])
+ assert_(test.fill_value == True)
+
+ test = (a[0] == b)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ test = (b == a[0])
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ @pytest.mark.parametrize('dt', ['S', 'U'])
+ @pytest.mark.parametrize('fill', [None, 'A'])
+ def test_ne_for_strings(self, dt, fill):
+ # Test the equality of structured arrays
+ a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill)
+
+ test = (a != a)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ test = (a != a[0])
+ assert_equal(test.data, [False, True])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill)
+ test = (a != b)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [True, True])
+ assert_(test.fill_value == True)
+
+ test = (a[0] != b)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ test = (b != a[0])
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ @pytest.mark.parametrize('dt1', num_dts, ids=num_ids)
+ @pytest.mark.parametrize('dt2', num_dts, ids=num_ids)
+ @pytest.mark.parametrize('fill', [None, 1])
+ def test_eq_for_numeric(self, dt1, dt2, fill):
+ # Test the equality of structured arrays
+ a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill)
+
+ test = (a == a)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ test = (a == a[0])
+ assert_equal(test.data, [True, False])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill)
+ test = (a == b)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [True, True])
+ assert_(test.fill_value == True)
+
+ test = (a[0] == b)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ test = (b == a[0])
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ @pytest.mark.parametrize('dt1', num_dts, ids=num_ids)
+ @pytest.mark.parametrize('dt2', num_dts, ids=num_ids)
+ @pytest.mark.parametrize('fill', [None, 1])
+ def test_ne_for_numeric(self, dt1, dt2, fill):
+ # Test the equality of structured arrays
+ a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill)
+
+ test = (a != a)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ test = (a != a[0])
+ assert_equal(test.data, [False, True])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill)
+ test = (a != b)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [True, True])
+ assert_(test.fill_value == True)
+
+ test = (a[0] != b)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ test = (b != a[0])
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ @pytest.mark.parametrize('dt1', num_dts, ids=num_ids)
+ @pytest.mark.parametrize('dt2', num_dts, ids=num_ids)
+ @pytest.mark.parametrize('fill', [None, 1])
+ @pytest.mark.parametrize('op',
+ [operator.le, operator.lt, operator.ge, operator.gt])
+ def test_comparisons_for_numeric(self, op, dt1, dt2, fill):
+ # Test the equality of structured arrays
+ a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill)
+
+ test = op(a, a)
+ assert_equal(test.data, op(a._data, a._data))
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ test = op(a, a[0])
+ assert_equal(test.data, op(a._data, a._data[0]))
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill)
+ test = op(a, b)
+ assert_equal(test.data, op(a._data, b._data))
+ assert_equal(test.mask, [True, True])
+ assert_(test.fill_value == True)
+
+ test = op(a[0], b)
+ assert_equal(test.data, op(a._data[0], b._data))
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ test = op(b, a[0])
+ assert_equal(test.data, op(b._data, a._data[0]))
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ @pytest.mark.parametrize('op',
+ [operator.le, operator.lt, operator.ge, operator.gt])
+ @pytest.mark.parametrize('fill', [None, "N/A"])
+ def test_comparisons_strings(self, op, fill):
+ # See gh-21770, mask propagation is broken for strings (and some other
+ # cases) so we explicitly test strings here.
+ # In principle only == and != may need special handling...
+ ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill)
+ ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill)
+ assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data))
+
+ def test_eq_with_None(self):
+ # Really, comparisons with None should not be done, but check them
+ # anyway. Note that pep8 will flag these tests.
+ # Deprecation is in place for arrays, and when it happens this
+ # test will fail (and have to be changed accordingly).
+
+ # With partial mask
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning, "Comparison to `None`")
+ a = array([None, 1], mask=[0, 1])
+ assert_equal(a == None, array([True, False], mask=[0, 1]))
+ assert_equal(a.data == None, [True, False])
+ assert_equal(a != None, array([False, True], mask=[0, 1]))
+ # With nomask
+ a = array([None, 1], mask=False)
+ assert_equal(a == None, [True, False])
+ assert_equal(a != None, [False, True])
+ # With complete mask
+ a = array([None, 2], mask=True)
+ assert_equal(a == None, array([False, True], mask=True))
+ assert_equal(a != None, array([True, False], mask=True))
+ # Fully masked, even comparison to None should return "masked"
+ a = masked
+ assert_equal(a == None, masked)
+
+ def test_eq_with_scalar(self):
+ a = array(1)
+ assert_equal(a == 1, True)
+ assert_equal(a == 0, False)
+ assert_equal(a != 1, False)
+ assert_equal(a != 0, True)
+ b = array(1, mask=True)
+ assert_equal(b == 0, masked)
+ assert_equal(b == 1, masked)
+ assert_equal(b != 0, masked)
+ assert_equal(b != 1, masked)
+
+ def test_eq_different_dimensions(self):
+ m1 = array([1, 1], mask=[0, 1])
+ # test comparison with both masked and regular arrays.
+ for m2 in (array([[0, 1], [1, 2]]),
+ np.array([[0, 1], [1, 2]])):
+ test = (m1 == m2)
+ assert_equal(test.data, [[False, False],
+ [True, False]])
+ assert_equal(test.mask, [[False, True],
+ [False, True]])
+
+ def test_numpyarithmetic(self):
+ # Check that the mask is not back-propagated when using numpy functions
+ a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
+ control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
+ mask=[1, 1, 0, 0, 1])
+
+ test = log(a)
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+ assert_equal(a.mask, [0, 0, 0, 0, 1])
+
+ test = np.log(a)
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+ assert_equal(a.mask, [0, 0, 0, 0, 1])
+
+
+class TestMaskedArrayAttributes:
+
+ def test_keepmask(self):
+ # Tests the keep mask flag
+ x = masked_array([1, 2, 3], mask=[1, 0, 0])
+ mx = masked_array(x)
+ assert_equal(mx.mask, x.mask)
+ mx = masked_array(x, mask=[0, 1, 0], keep_mask=False)
+ assert_equal(mx.mask, [0, 1, 0])
+ mx = masked_array(x, mask=[0, 1, 0], keep_mask=True)
+ assert_equal(mx.mask, [1, 1, 0])
+ # We default to true
+ mx = masked_array(x, mask=[0, 1, 0])
+ assert_equal(mx.mask, [1, 1, 0])
+
+ def test_hardmask(self):
+ # Test hard_mask
+ d = arange(5)
+ n = [0, 0, 0, 1, 1]
+ m = make_mask(n)
+ xh = array(d, mask=m, hard_mask=True)
+ # We need to copy, to avoid updating d in xh !
+ xs = array(d, mask=m, hard_mask=False, copy=True)
+ xh[[1, 4]] = [10, 40]
+ xs[[1, 4]] = [10, 40]
+ assert_equal(xh._data, [0, 10, 2, 3, 4])
+ assert_equal(xs._data, [0, 10, 2, 3, 40])
+ assert_equal(xs.mask, [0, 0, 0, 1, 0])
+ assert_(xh._hardmask)
+ assert_(not xs._hardmask)
+ xh[1:4] = [10, 20, 30]
+ xs[1:4] = [10, 20, 30]
+ assert_equal(xh._data, [0, 10, 20, 3, 4])
+ assert_equal(xs._data, [0, 10, 20, 30, 40])
+ assert_equal(xs.mask, nomask)
+ xh[0] = masked
+ xs[0] = masked
+ assert_equal(xh.mask, [1, 0, 0, 1, 1])
+ assert_equal(xs.mask, [1, 0, 0, 0, 0])
+ xh[:] = 1
+ xs[:] = 1
+ assert_equal(xh._data, [0, 1, 1, 3, 4])
+ assert_equal(xs._data, [1, 1, 1, 1, 1])
+ assert_equal(xh.mask, [1, 0, 0, 1, 1])
+ assert_equal(xs.mask, nomask)
+ # Switch to soft mask
+ xh.soften_mask()
+ xh[:] = arange(5)
+ assert_equal(xh._data, [0, 1, 2, 3, 4])
+ assert_equal(xh.mask, nomask)
+ # Switch back to hard mask
+ xh.harden_mask()
+ xh[xh < 3] = masked
+ assert_equal(xh._data, [0, 1, 2, 3, 4])
+ assert_equal(xh._mask, [1, 1, 1, 0, 0])
+ xh[filled(xh > 1, False)] = 5
+ assert_equal(xh._data, [0, 1, 2, 5, 5])
+ assert_equal(xh._mask, [1, 1, 1, 0, 0])
+
+ xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True)
+ xh[0] = 0
+ assert_equal(xh._data, [[1, 0], [3, 4]])
+ assert_equal(xh._mask, [[1, 0], [0, 0]])
+ xh[-1, -1] = 5
+ assert_equal(xh._data, [[1, 0], [3, 5]])
+ assert_equal(xh._mask, [[1, 0], [0, 0]])
+ xh[filled(xh < 5, False)] = 2
+ assert_equal(xh._data, [[1, 2], [2, 5]])
+ assert_equal(xh._mask, [[1, 0], [0, 0]])
+
+ def test_hardmask_again(self):
+ # Another test of hardmask
+ d = arange(5)
+ n = [0, 0, 0, 1, 1]
+ m = make_mask(n)
+ xh = array(d, mask=m, hard_mask=True)
+ xh[4:5] = 999
+ xh[0:1] = 999
+ assert_equal(xh._data, [999, 1, 2, 3, 4])
+
+ def test_hardmask_oncemore_yay(self):
+ # OK, yet another test of hardmask
+ # Make sure that harden_mask/soften_mask//unshare_mask returns self
+ a = array([1, 2, 3], mask=[1, 0, 0])
+ b = a.harden_mask()
+ assert_equal(a, b)
+ b[0] = 0
+ assert_equal(a, b)
+ assert_equal(b, array([1, 2, 3], mask=[1, 0, 0]))
+ a = b.soften_mask()
+ a[0] = 0
+ assert_equal(a, b)
+ assert_equal(b, array([0, 2, 3], mask=[0, 0, 0]))
+
+ def test_smallmask(self):
+ # Checks the behaviour of _smallmask
+ a = arange(10)
+ a[1] = masked
+ a[1] = 1
+ assert_equal(a._mask, nomask)
+ a = arange(10)
+ a._smallmask = False
+ a[1] = masked
+ a[1] = 1
+ assert_equal(a._mask, zeros(10))
+
+ def test_shrink_mask(self):
+ # Tests .shrink_mask()
+ a = array([1, 2, 3], mask=[0, 0, 0])
+ b = a.shrink_mask()
+ assert_equal(a, b)
+ assert_equal(a.mask, nomask)
+
+ # Mask cannot be shrunk on structured types, so is a no-op
+ a = np.ma.array([(1, 2.0)], [('a', int), ('b', float)])
+ b = a.copy()
+ a.shrink_mask()
+ assert_equal(a.mask, b.mask)
+
+ def test_flat(self):
+ # Test that flat can return all types of items [#4585, #4615]
+ # test 2-D record array
+ # ... on structured array w/ masked records
+ x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')],
+ [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]],
+ dtype=[('a', int), ('b', float), ('c', '|S8')])
+ x['a'][0, 1] = masked
+ x['b'][1, 0] = masked
+ x['c'][0, 2] = masked
+ x[-1, -1] = masked
+ xflat = x.flat
+ assert_equal(xflat[0], x[0, 0])
+ assert_equal(xflat[1], x[0, 1])
+ assert_equal(xflat[2], x[0, 2])
+ assert_equal(xflat[:3], x[0])
+ assert_equal(xflat[3], x[1, 0])
+ assert_equal(xflat[4], x[1, 1])
+ assert_equal(xflat[5], x[1, 2])
+ assert_equal(xflat[3:], x[1])
+ assert_equal(xflat[-1], x[-1, -1])
+ i = 0
+ j = 0
+ for xf in xflat:
+ assert_equal(xf, x[j, i])
+ i += 1
+ if i >= x.shape[-1]:
+ i = 0
+ j += 1
+
+ def test_assign_dtype(self):
+ # check that the mask's dtype is updated when dtype is changed
+ a = np.zeros(4, dtype='f4,i4')
+
+ m = np.ma.array(a)
+ m.dtype = np.dtype('f4')
+ repr(m) # raises?
+ assert_equal(m.dtype, np.dtype('f4'))
+
+ # check that dtype changes that change shape of mask too much
+ # are not allowed
+ def assign():
+ m = np.ma.array(a)
+ m.dtype = np.dtype('f8')
+ assert_raises(ValueError, assign)
+
+ b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises?
+ assert_equal(b.dtype, np.dtype('f4'))
+
+ # check that nomask is preserved
+ a = np.zeros(4, dtype='f4')
+ m = np.ma.array(a)
+ m.dtype = np.dtype('f4,i4')
+ assert_equal(m.dtype, np.dtype('f4,i4'))
+ assert_equal(m._mask, np.ma.nomask)
+
+
+class TestFillingValues:
+
+ def test_check_on_scalar(self):
+ # Test _check_fill_value set to valid and invalid values
+ _check_fill_value = np.ma.core._check_fill_value
+
+ fval = _check_fill_value(0, int)
+ assert_equal(fval, 0)
+ fval = _check_fill_value(None, int)
+ assert_equal(fval, default_fill_value(0))
+
+ fval = _check_fill_value(0, "|S3")
+ assert_equal(fval, b"0")
+ fval = _check_fill_value(None, "|S3")
+ assert_equal(fval, default_fill_value(b"camelot!"))
+ assert_raises(TypeError, _check_fill_value, 1e+20, int)
+ assert_raises(TypeError, _check_fill_value, 'stuff', int)
+
+ def test_check_on_fields(self):
+ # Tests _check_fill_value with records
+ _check_fill_value = np.ma.core._check_fill_value
+ ndtype = [('a', int), ('b', float), ('c', "|S3")]
+ # A check on a list should return a single record
+ fval = _check_fill_value([-999, -12345678.9, "???"], ndtype)
+ assert_(isinstance(fval, ndarray))
+ assert_equal(fval.item(), [-999, -12345678.9, b"???"])
+ # A check on None should output the defaults
+ fval = _check_fill_value(None, ndtype)
+ assert_(isinstance(fval, ndarray))
+ assert_equal(fval.item(), [default_fill_value(0),
+ default_fill_value(0.),
+ asbytes(default_fill_value("0"))])
+ #.....Using a structured type as fill_value should work
+ fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype)
+ fval = _check_fill_value(fill_val, ndtype)
+ assert_(isinstance(fval, ndarray))
+ assert_equal(fval.item(), [-999, -12345678.9, b"???"])
+
+ #.....Using a flexible type w/ a different type shouldn't matter
+ # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured
+ # types by position
+ fill_val = np.array((-999, -12345678.9, "???"),
+ dtype=[("A", int), ("B", float), ("C", "|S3")])
+ fval = _check_fill_value(fill_val, ndtype)
+ assert_(isinstance(fval, ndarray))
+ assert_equal(fval.item(), [-999, -12345678.9, b"???"])
+
+ #.....Using an object-array shouldn't matter either
+ fill_val = np.ndarray(shape=(1,), dtype=object)
+ fill_val[0] = (-999, -12345678.9, b"???")
+ fval = _check_fill_value(fill_val, object)
+ assert_(isinstance(fval, ndarray))
+ assert_equal(fval.item(), [-999, -12345678.9, b"???"])
+ # NOTE: This test was never run properly as "fill_value" rather than
+ # "fill_val" was assigned. Written properly, it fails.
+ #fill_val = np.array((-999, -12345678.9, "???"))
+ #fval = _check_fill_value(fill_val, ndtype)
+ #assert_(isinstance(fval, ndarray))
+ #assert_equal(fval.item(), [-999, -12345678.9, b"???"])
+ #.....One-field-only flexible type should work as well
+ ndtype = [("a", int)]
+ fval = _check_fill_value(-999999999, ndtype)
+ assert_(isinstance(fval, ndarray))
+ assert_equal(fval.item(), (-999999999,))
+
+ def test_fillvalue_conversion(self):
+ # Tests the behavior of fill_value during conversion
+ # We had a tailored comment to make sure special attributes are
+ # properly dealt with
+ a = array([b'3', b'4', b'5'])
+ a._optinfo.update({'comment':"updated!"})
+
+ b = array(a, dtype=int)
+ assert_equal(b._data, [3, 4, 5])
+ assert_equal(b.fill_value, default_fill_value(0))
+
+ b = array(a, dtype=float)
+ assert_equal(b._data, [3, 4, 5])
+ assert_equal(b.fill_value, default_fill_value(0.))
+
+ b = a.astype(int)
+ assert_equal(b._data, [3, 4, 5])
+ assert_equal(b.fill_value, default_fill_value(0))
+ assert_equal(b._optinfo['comment'], "updated!")
+
+ b = a.astype([('a', '|S3')])
+ assert_equal(b['a']._data, a._data)
+ assert_equal(b['a'].fill_value, a.fill_value)
+
+ def test_default_fill_value(self):
+ # check all calling conventions
+ f1 = default_fill_value(1.)
+ f2 = default_fill_value(np.array(1.))
+ f3 = default_fill_value(np.array(1.).dtype)
+ assert_equal(f1, f2)
+ assert_equal(f1, f3)
+
+ def test_default_fill_value_structured(self):
+ fields = array([(1, 1, 1)],
+ dtype=[('i', int), ('s', '|S8'), ('f', float)])
+
+ f1 = default_fill_value(fields)
+ f2 = default_fill_value(fields.dtype)
+ expected = np.array((default_fill_value(0),
+ default_fill_value('0'),
+ default_fill_value(0.)), dtype=fields.dtype)
+ assert_equal(f1, expected)
+ assert_equal(f2, expected)
+
+ def test_default_fill_value_void(self):
+ dt = np.dtype([('v', 'V7')])
+ f = default_fill_value(dt)
+ assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v']))
+
+ def test_fillvalue(self):
+ # Yet more fun with the fill_value
+ data = masked_array([1, 2, 3], fill_value=-999)
+ series = data[[0, 2, 1]]
+ assert_equal(series._fill_value, data._fill_value)
+
+ mtype = [('f', float), ('s', '|S3')]
+ x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype)
+ x.fill_value = 999
+ assert_equal(x.fill_value.item(), [999., b'999'])
+ assert_equal(x['f'].fill_value, 999)
+ assert_equal(x['s'].fill_value, b'999')
+
+ x.fill_value = (9, '???')
+ assert_equal(x.fill_value.item(), (9, b'???'))
+ assert_equal(x['f'].fill_value, 9)
+ assert_equal(x['s'].fill_value, b'???')
+
+ x = array([1, 2, 3.1])
+ x.fill_value = 999
+ assert_equal(np.asarray(x.fill_value).dtype, float)
+ assert_equal(x.fill_value, 999.)
+ assert_equal(x._fill_value, np.array(999.))
+
+ def test_subarray_fillvalue(self):
+ # gh-10483 test multi-field index fill value
+ fields = array([(1, 1, 1)],
+ dtype=[('i', int), ('s', '|S8'), ('f', float)])
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning, "Numpy has detected")
+ subfields = fields[['i', 'f']]
+ assert_equal(tuple(subfields.fill_value), (999999, 1.e+20))
+ # test comparison does not raise:
+ subfields[1:] == subfields[:-1]
+
+ def test_fillvalue_exotic_dtype(self):
+ # Tests yet more exotic flexible dtypes
+ _check_fill_value = np.ma.core._check_fill_value
+ ndtype = [('i', int), ('s', '|S8'), ('f', float)]
+ control = np.array((default_fill_value(0),
+ default_fill_value('0'),
+ default_fill_value(0.),),
+ dtype=ndtype)
+ assert_equal(_check_fill_value(None, ndtype), control)
+ # The shape shouldn't matter
+ ndtype = [('f0', float, (2, 2))]
+ control = np.array((default_fill_value(0.),),
+ dtype=[('f0', float)]).astype(ndtype)
+ assert_equal(_check_fill_value(None, ndtype), control)
+ control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)
+ assert_equal(_check_fill_value(0, ndtype), control)
+
+ ndtype = np.dtype("int, (2,3)float, float")
+ control = np.array((default_fill_value(0),
+ default_fill_value(0.),
+ default_fill_value(0.),),
+ dtype="int, float, float").astype(ndtype)
+ test = _check_fill_value(None, ndtype)
+ assert_equal(test, control)
+ control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype)
+ assert_equal(_check_fill_value(0, ndtype), control)
+ # but when indexing, fill value should become scalar not tuple
+ # See issue #6723
+ M = masked_array(control)
+ assert_equal(M["f1"].fill_value.ndim, 0)
+
+ def test_fillvalue_datetime_timedelta(self):
+ # Test default fillvalue for datetime64 and timedelta64 types.
+ # See issue #4476, this would return '?' which would cause errors
+ # elsewhere
+
+ for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m",
+ "h", "D", "W", "M", "Y"):
+ control = numpy.datetime64("NaT", timecode)
+ test = default_fill_value(numpy.dtype("<M8[" + timecode + "]"))
+ np.testing.assert_equal(test, control)
+
+ control = numpy.timedelta64("NaT", timecode)
+ test = default_fill_value(numpy.dtype("<m8[" + timecode + "]"))
+ np.testing.assert_equal(test, control)
+
+ def test_extremum_fill_value(self):
+ # Tests extremum fill values for flexible type.
+ a = array([(1, (2, 3)), (4, (5, 6))],
+ dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
+ test = a.fill_value
+ assert_equal(test.dtype, a.dtype)
+ assert_equal(test['A'], default_fill_value(a['A']))
+ assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
+ assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
+
+ test = minimum_fill_value(a)
+ assert_equal(test.dtype, a.dtype)
+ assert_equal(test[0], minimum_fill_value(a['A']))
+ assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
+ assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
+ assert_equal(test[1], minimum_fill_value(a['B']))
+
+ test = maximum_fill_value(a)
+ assert_equal(test.dtype, a.dtype)
+ assert_equal(test[0], maximum_fill_value(a['A']))
+ assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
+ assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
+ assert_equal(test[1], maximum_fill_value(a['B']))
+
+ def test_extremum_fill_value_subdtype(self):
+ a = array(([2, 3, 4],), dtype=[('value', np.int8, 3)])
+
+ test = minimum_fill_value(a)
+ assert_equal(test.dtype, a.dtype)
+ assert_equal(test[0], np.full(3, minimum_fill_value(a['value'])))
+
+ test = maximum_fill_value(a)
+ assert_equal(test.dtype, a.dtype)
+ assert_equal(test[0], np.full(3, maximum_fill_value(a['value'])))
+
+ def test_fillvalue_individual_fields(self):
+ # Test setting fill_value on individual fields
+ ndtype = [('a', int), ('b', int)]
+ # Explicit fill_value
+ a = array(list(zip([1, 2, 3], [4, 5, 6])),
+ fill_value=(-999, -999), dtype=ndtype)
+ aa = a['a']
+ aa.set_fill_value(10)
+ assert_equal(aa._fill_value, np.array(10))
+ assert_equal(tuple(a.fill_value), (10, -999))
+ a.fill_value['b'] = -10
+ assert_equal(tuple(a.fill_value), (10, -10))
+ # Implicit fill_value
+ t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=ndtype)
+ tt = t['a']
+ tt.set_fill_value(10)
+ assert_equal(tt._fill_value, np.array(10))
+ assert_equal(tuple(t.fill_value), (10, default_fill_value(0)))
+
+ def test_fillvalue_implicit_structured_array(self):
+ # Check that fill_value is always defined for structured arrays
+ ndtype = ('b', float)
+ adtype = ('a', float)
+ a = array([(1.,), (2.,)], mask=[(False,), (False,)],
+ fill_value=(np.nan,), dtype=np.dtype([adtype]))
+ b = empty(a.shape, dtype=[adtype, ndtype])
+ b['a'] = a['a']
+ b['a'].set_fill_value(a['a'].fill_value)
+ f = b._fill_value[()]
+ assert_(np.isnan(f[0]))
+ assert_equal(f[-1], default_fill_value(1.))
+
+ def test_fillvalue_as_arguments(self):
+ # Test adding a fill_value parameter to empty/ones/zeros
+ a = empty(3, fill_value=999.)
+ assert_equal(a.fill_value, 999.)
+
+ a = ones(3, fill_value=999., dtype=float)
+ assert_equal(a.fill_value, 999.)
+
+ a = zeros(3, fill_value=0., dtype=complex)
+ assert_equal(a.fill_value, 0.)
+
+ a = identity(3, fill_value=0., dtype=complex)
+ assert_equal(a.fill_value, 0.)
+
+ def test_shape_argument(self):
+ # Test that shape can be provides as an argument
+ # GH issue 6106
+ a = empty(shape=(3, ))
+ assert_equal(a.shape, (3, ))
+
+ a = ones(shape=(3, ), dtype=float)
+ assert_equal(a.shape, (3, ))
+
+ a = zeros(shape=(3, ), dtype=complex)
+ assert_equal(a.shape, (3, ))
+
+ def test_fillvalue_in_view(self):
+ # Test the behavior of fill_value in view
+
+ # Create initial masked array
+ x = array([1, 2, 3], fill_value=1, dtype=np.int64)
+
+ # Check that fill_value is preserved by default
+ y = x.view()
+ assert_(y.fill_value == 1)
+
+ # Check that fill_value is preserved if dtype is specified and the
+ # dtype is an ndarray sub-class and has a _fill_value attribute
+ y = x.view(MaskedArray)
+ assert_(y.fill_value == 1)
+
+ # Check that fill_value is preserved if type is specified and the
+ # dtype is an ndarray sub-class and has a _fill_value attribute (by
+ # default, the first argument is dtype, not type)
+ y = x.view(type=MaskedArray)
+ assert_(y.fill_value == 1)
+
+ # Check that code does not crash if passed an ndarray sub-class that
+ # does not have a _fill_value attribute
+ y = x.view(np.ndarray)
+ y = x.view(type=np.ndarray)
+
+ # Check that fill_value can be overridden with view
+ y = x.view(MaskedArray, fill_value=2)
+ assert_(y.fill_value == 2)
+
+ # Check that fill_value can be overridden with view (using type=)
+ y = x.view(type=MaskedArray, fill_value=2)
+ assert_(y.fill_value == 2)
+
+ # Check that fill_value gets reset if passed a dtype but not a
+ # fill_value. This is because even though in some cases one can safely
+ # cast the fill_value, e.g. if taking an int64 view of an int32 array,
+ # in other cases, this cannot be done (e.g. int32 view of an int64
+ # array with a large fill_value).
+ y = x.view(dtype=np.int32)
+ assert_(y.fill_value == 999999)
+
+ def test_fillvalue_bytes_or_str(self):
+ # Test whether fill values work as expected for structured dtypes
+ # containing bytes or str. See issue #7259.
+ a = empty(shape=(3, ), dtype="(2)3S,(2)3U")
+ assert_equal(a["f0"].fill_value, default_fill_value(b"spam"))
+ assert_equal(a["f1"].fill_value, default_fill_value("eggs"))
+
+
+class TestUfuncs:
+ # Test class for the application of ufuncs on MaskedArrays.
+
+ def setup_method(self):
+ # Base data definition.
+ self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
+ array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
+ self.err_status = np.geterr()
+ np.seterr(divide='ignore', invalid='ignore')
+
+ def teardown_method(self):
+ np.seterr(**self.err_status)
+
+ def test_testUfuncRegression(self):
+ # Tests new ufuncs on MaskedArrays.
+ for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
+ 'sin', 'cos', 'tan',
+ 'arcsin', 'arccos', 'arctan',
+ 'sinh', 'cosh', 'tanh',
+ 'arcsinh',
+ 'arccosh',
+ 'arctanh',
+ 'absolute', 'fabs', 'negative',
+ 'floor', 'ceil',
+ 'logical_not',
+ 'add', 'subtract', 'multiply',
+ 'divide', 'true_divide', 'floor_divide',
+ 'remainder', 'fmod', 'hypot', 'arctan2',
+ 'equal', 'not_equal', 'less_equal', 'greater_equal',
+ 'less', 'greater',
+ 'logical_and', 'logical_or', 'logical_xor',
+ ]:
+ try:
+ uf = getattr(umath, f)
+ except AttributeError:
+ uf = getattr(fromnumeric, f)
+ mf = getattr(numpy.ma.core, f)
+ args = self.d[:uf.nin]
+ ur = uf(*args)
+ mr = mf(*args)
+ assert_equal(ur.filled(0), mr.filled(0), f)
+ assert_mask_equal(ur.mask, mr.mask, err_msg=f)
+
+ def test_reduce(self):
+ # Tests reduce on MaskedArrays.
+ a = self.d[0]
+ assert_(not alltrue(a, axis=0))
+ assert_(sometrue(a, axis=0))
+ assert_equal(sum(a[:3], axis=0), 0)
+ assert_equal(product(a, axis=0), 0)
+ assert_equal(add.reduce(a), pi)
+
+ def test_minmax(self):
+ # Tests extrema on MaskedArrays.
+ a = arange(1, 13).reshape(3, 4)
+ amask = masked_where(a < 5, a)
+ assert_equal(amask.max(), a.max())
+ assert_equal(amask.min(), 5)
+ assert_equal(amask.max(0), a.max(0))
+ assert_equal(amask.min(0), [5, 6, 7, 8])
+ assert_(amask.max(1)[0].mask)
+ assert_(amask.min(1)[0].mask)
+
+ def test_ndarray_mask(self):
+ # Check that the mask of the result is a ndarray (not a MaskedArray...)
+ a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
+ test = np.sqrt(a)
+ control = masked_array([-1, 0, 1, np.sqrt(2), -1],
+ mask=[1, 0, 0, 0, 1])
+ assert_equal(test, control)
+ assert_equal(test.mask, control.mask)
+ assert_(not isinstance(test.mask, MaskedArray))
+
+ def test_treatment_of_NotImplemented(self):
+ # Check that NotImplemented is returned at appropriate places
+
+ a = masked_array([1., 2.], mask=[1, 0])
+ assert_raises(TypeError, operator.mul, a, "abc")
+ assert_raises(TypeError, operator.truediv, a, "abc")
+
+ class MyClass:
+ __array_priority__ = a.__array_priority__ + 1
+
+ def __mul__(self, other):
+ return "My mul"
+
+ def __rmul__(self, other):
+ return "My rmul"
+
+ me = MyClass()
+ assert_(me * a == "My mul")
+ assert_(a * me == "My rmul")
+
+ # and that __array_priority__ is respected
+ class MyClass2:
+ __array_priority__ = 100
+
+ def __mul__(self, other):
+ return "Me2mul"
+
+ def __rmul__(self, other):
+ return "Me2rmul"
+
+ def __rdiv__(self, other):
+ return "Me2rdiv"
+
+ __rtruediv__ = __rdiv__
+
+ me_too = MyClass2()
+ assert_(a.__mul__(me_too) is NotImplemented)
+ assert_(all(multiply.outer(a, me_too) == "Me2rmul"))
+ assert_(a.__truediv__(me_too) is NotImplemented)
+ assert_(me_too * a == "Me2mul")
+ assert_(a * me_too == "Me2rmul")
+ assert_(a / me_too == "Me2rdiv")
+
+ def test_no_masked_nan_warnings(self):
+ # check that a nan in masked position does not
+ # cause ufunc warnings
+
+ m = np.ma.array([0.5, np.nan], mask=[0,1])
+
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error")
+
+ # test unary and binary ufuncs
+ exp(m)
+ add(m, 1)
+ m > 0
+
+ # test different unary domains
+ sqrt(m)
+ log(m)
+ tan(m)
+ arcsin(m)
+ arccos(m)
+ arccosh(m)
+
+ # test binary domains
+ divide(m, 2)
+
+ # also check that allclose uses ma ufuncs, to avoid warning
+ allclose(m, 0.5)
+
+class TestMaskedArrayInPlaceArithmetic:
+ # Test MaskedArray Arithmetic
+
+ def setup_method(self):
+ x = arange(10)
+ y = arange(10)
+ xm = arange(10)
+ xm[2] = masked
+ self.intdata = (x, y, xm)
+ self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))
+ self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
+ self.othertypes = [np.dtype(_).type for _ in self.othertypes]
+ self.uint8data = (
+ x.astype(np.uint8),
+ y.astype(np.uint8),
+ xm.astype(np.uint8)
+ )
+
+ def test_inplace_addition_scalar(self):
+ # Test of inplace additions
+ (x, y, xm) = self.intdata
+ xm[2] = masked
+ x += 1
+ assert_equal(x, y + 1)
+ xm += 1
+ assert_equal(xm, y + 1)
+
+ (x, _, xm) = self.floatdata
+ id1 = x.data.ctypes.data
+ x += 1.
+ assert_(id1 == x.data.ctypes.data)
+ assert_equal(x, y + 1.)
+
+ def test_inplace_addition_array(self):
+ # Test of inplace additions
+ (x, y, xm) = self.intdata
+ m = xm.mask
+ a = arange(10, dtype=np.int16)
+ a[-1] = masked
+ x += a
+ xm += a
+ assert_equal(x, y + a)
+ assert_equal(xm, y + a)
+ assert_equal(xm.mask, mask_or(m, a.mask))
+
+ def test_inplace_subtraction_scalar(self):
+ # Test of inplace subtractions
+ (x, y, xm) = self.intdata
+ x -= 1
+ assert_equal(x, y - 1)
+ xm -= 1
+ assert_equal(xm, y - 1)
+
+ def test_inplace_subtraction_array(self):
+ # Test of inplace subtractions
+ (x, y, xm) = self.floatdata
+ m = xm.mask
+ a = arange(10, dtype=float)
+ a[-1] = masked
+ x -= a
+ xm -= a
+ assert_equal(x, y - a)
+ assert_equal(xm, y - a)
+ assert_equal(xm.mask, mask_or(m, a.mask))
+
+ def test_inplace_multiplication_scalar(self):
+ # Test of inplace multiplication
+ (x, y, xm) = self.floatdata
+ x *= 2.0
+ assert_equal(x, y * 2)
+ xm *= 2.0
+ assert_equal(xm, y * 2)
+
+ def test_inplace_multiplication_array(self):
+ # Test of inplace multiplication
+ (x, y, xm) = self.floatdata
+ m = xm.mask
+ a = arange(10, dtype=float)
+ a[-1] = masked
+ x *= a
+ xm *= a
+ assert_equal(x, y * a)
+ assert_equal(xm, y * a)
+ assert_equal(xm.mask, mask_or(m, a.mask))
+
+ def test_inplace_division_scalar_int(self):
+ # Test of inplace division
+ (x, y, xm) = self.intdata
+ x = arange(10) * 2
+ xm = arange(10) * 2
+ xm[2] = masked
+ x //= 2
+ assert_equal(x, y)
+ xm //= 2
+ assert_equal(xm, y)
+
+ def test_inplace_division_scalar_float(self):
+ # Test of inplace division
+ (x, y, xm) = self.floatdata
+ x /= 2.0
+ assert_equal(x, y / 2.0)
+ xm /= arange(10)
+ assert_equal(xm, ones((10,)))
+
+ def test_inplace_division_array_float(self):
+ # Test of inplace division
+ (x, y, xm) = self.floatdata
+ m = xm.mask
+ a = arange(10, dtype=float)
+ a[-1] = masked
+ x /= a
+ xm /= a
+ assert_equal(x, y / a)
+ assert_equal(xm, y / a)
+ assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))
+
+ def test_inplace_division_misc(self):
+
+ x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]
+ y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]
+ m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
+ m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
+ xm = masked_array(x, mask=m1)
+ ym = masked_array(y, mask=m2)
+
+ z = xm / ym
+ assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
+ assert_equal(z._data,
+ [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
+
+ xm = xm.copy()
+ xm /= ym
+ assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])
+ assert_equal(z._data,
+ [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])
+
+ def test_datafriendly_add(self):
+ # Test keeping data w/ (inplace) addition
+ x = array([1, 2, 3], mask=[0, 0, 1])
+ # Test add w/ scalar
+ xx = x + 1
+ assert_equal(xx.data, [2, 3, 3])
+ assert_equal(xx.mask, [0, 0, 1])
+ # Test iadd w/ scalar
+ x += 1
+ assert_equal(x.data, [2, 3, 3])
+ assert_equal(x.mask, [0, 0, 1])
+ # Test add w/ array
+ x = array([1, 2, 3], mask=[0, 0, 1])
+ xx = x + array([1, 2, 3], mask=[1, 0, 0])
+ assert_equal(xx.data, [1, 4, 3])
+ assert_equal(xx.mask, [1, 0, 1])
+ # Test iadd w/ array
+ x = array([1, 2, 3], mask=[0, 0, 1])
+ x += array([1, 2, 3], mask=[1, 0, 0])
+ assert_equal(x.data, [1, 4, 3])
+ assert_equal(x.mask, [1, 0, 1])
+
+ def test_datafriendly_sub(self):
+ # Test keeping data w/ (inplace) subtraction
+ # Test sub w/ scalar
+ x = array([1, 2, 3], mask=[0, 0, 1])
+ xx = x - 1
+ assert_equal(xx.data, [0, 1, 3])
+ assert_equal(xx.mask, [0, 0, 1])
+ # Test isub w/ scalar
+ x = array([1, 2, 3], mask=[0, 0, 1])
+ x -= 1
+ assert_equal(x.data, [0, 1, 3])
+ assert_equal(x.mask, [0, 0, 1])
+ # Test sub w/ array
+ x = array([1, 2, 3], mask=[0, 0, 1])
+ xx = x - array([1, 2, 3], mask=[1, 0, 0])
+ assert_equal(xx.data, [1, 0, 3])
+ assert_equal(xx.mask, [1, 0, 1])
+ # Test isub w/ array
+ x = array([1, 2, 3], mask=[0, 0, 1])
+ x -= array([1, 2, 3], mask=[1, 0, 0])
+ assert_equal(x.data, [1, 0, 3])
+ assert_equal(x.mask, [1, 0, 1])
+
+ def test_datafriendly_mul(self):
+ # Test keeping data w/ (inplace) multiplication
+ # Test mul w/ scalar
+ x = array([1, 2, 3], mask=[0, 0, 1])
+ xx = x * 2
+ assert_equal(xx.data, [2, 4, 3])
+ assert_equal(xx.mask, [0, 0, 1])
+ # Test imul w/ scalar
+ x = array([1, 2, 3], mask=[0, 0, 1])
+ x *= 2
+ assert_equal(x.data, [2, 4, 3])
+ assert_equal(x.mask, [0, 0, 1])
+ # Test mul w/ array
+ x = array([1, 2, 3], mask=[0, 0, 1])
+ xx = x * array([10, 20, 30], mask=[1, 0, 0])
+ assert_equal(xx.data, [1, 40, 3])
+ assert_equal(xx.mask, [1, 0, 1])
+ # Test imul w/ array
+ x = array([1, 2, 3], mask=[0, 0, 1])
+ x *= array([10, 20, 30], mask=[1, 0, 0])
+ assert_equal(x.data, [1, 40, 3])
+ assert_equal(x.mask, [1, 0, 1])
+
+ def test_datafriendly_div(self):
+ # Test keeping data w/ (inplace) division
+ # Test div on scalar
+ x = array([1, 2, 3], mask=[0, 0, 1])
+ xx = x / 2.
+ assert_equal(xx.data, [1 / 2., 2 / 2., 3])
+ assert_equal(xx.mask, [0, 0, 1])
+ # Test idiv on scalar
+ x = array([1., 2., 3.], mask=[0, 0, 1])
+ x /= 2.
+ assert_equal(x.data, [1 / 2., 2 / 2., 3])
+ assert_equal(x.mask, [0, 0, 1])
+ # Test div on array
+ x = array([1., 2., 3.], mask=[0, 0, 1])
+ xx = x / array([10., 20., 30.], mask=[1, 0, 0])
+ assert_equal(xx.data, [1., 2. / 20., 3.])
+ assert_equal(xx.mask, [1, 0, 1])
+ # Test idiv on array
+ x = array([1., 2., 3.], mask=[0, 0, 1])
+ x /= array([10., 20., 30.], mask=[1, 0, 0])
+ assert_equal(x.data, [1., 2 / 20., 3.])
+ assert_equal(x.mask, [1, 0, 1])
+
+ def test_datafriendly_pow(self):
+ # Test keeping data w/ (inplace) power
+ # Test pow on scalar
+ x = array([1., 2., 3.], mask=[0, 0, 1])
+ xx = x ** 2.5
+ assert_equal(xx.data, [1., 2. ** 2.5, 3.])
+ assert_equal(xx.mask, [0, 0, 1])
+ # Test ipow on scalar
+ x **= 2.5
+ assert_equal(x.data, [1., 2. ** 2.5, 3])
+ assert_equal(x.mask, [0, 0, 1])
+
+ def test_datafriendly_add_arrays(self):
+ a = array([[1, 1], [3, 3]])
+ b = array([1, 1], mask=[0, 0])
+ a += b
+ assert_equal(a, [[2, 2], [4, 4]])
+ if a.mask is not nomask:
+ assert_equal(a.mask, [[0, 0], [0, 0]])
+
+ a = array([[1, 1], [3, 3]])
+ b = array([1, 1], mask=[0, 1])
+ a += b
+ assert_equal(a, [[2, 2], [4, 4]])
+ assert_equal(a.mask, [[0, 1], [0, 1]])
+
+ def test_datafriendly_sub_arrays(self):
+ a = array([[1, 1], [3, 3]])
+ b = array([1, 1], mask=[0, 0])
+ a -= b
+ assert_equal(a, [[0, 0], [2, 2]])
+ if a.mask is not nomask:
+ assert_equal(a.mask, [[0, 0], [0, 0]])
+
+ a = array([[1, 1], [3, 3]])
+ b = array([1, 1], mask=[0, 1])
+ a -= b
+ assert_equal(a, [[0, 0], [2, 2]])
+ assert_equal(a.mask, [[0, 1], [0, 1]])
+
+ def test_datafriendly_mul_arrays(self):
+ a = array([[1, 1], [3, 3]])
+ b = array([1, 1], mask=[0, 0])
+ a *= b
+ assert_equal(a, [[1, 1], [3, 3]])
+ if a.mask is not nomask:
+ assert_equal(a.mask, [[0, 0], [0, 0]])
+
+ a = array([[1, 1], [3, 3]])
+ b = array([1, 1], mask=[0, 1])
+ a *= b
+ assert_equal(a, [[1, 1], [3, 3]])
+ assert_equal(a.mask, [[0, 1], [0, 1]])
+
+ def test_inplace_addition_scalar_type(self):
+ # Test of inplace additions
+ for t in self.othertypes:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ xm[2] = masked
+ x += t(1)
+ assert_equal(x, y + t(1))
+ xm += t(1)
+ assert_equal(xm, y + t(1))
+
+ def test_inplace_addition_array_type(self):
+ # Test of inplace additions
+ for t in self.othertypes:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ m = xm.mask
+ a = arange(10, dtype=t)
+ a[-1] = masked
+ x += a
+ xm += a
+ assert_equal(x, y + a)
+ assert_equal(xm, y + a)
+ assert_equal(xm.mask, mask_or(m, a.mask))
+
+ def test_inplace_subtraction_scalar_type(self):
+ # Test of inplace subtractions
+ for t in self.othertypes:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ x -= t(1)
+ assert_equal(x, y - t(1))
+ xm -= t(1)
+ assert_equal(xm, y - t(1))
+
+ def test_inplace_subtraction_array_type(self):
+ # Test of inplace subtractions
+ for t in self.othertypes:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ m = xm.mask
+ a = arange(10, dtype=t)
+ a[-1] = masked
+ x -= a
+ xm -= a
+ assert_equal(x, y - a)
+ assert_equal(xm, y - a)
+ assert_equal(xm.mask, mask_or(m, a.mask))
+
+ def test_inplace_multiplication_scalar_type(self):
+ # Test of inplace multiplication
+ for t in self.othertypes:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ x *= t(2)
+ assert_equal(x, y * t(2))
+ xm *= t(2)
+ assert_equal(xm, y * t(2))
+
+ def test_inplace_multiplication_array_type(self):
+ # Test of inplace multiplication
+ for t in self.othertypes:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ m = xm.mask
+ a = arange(10, dtype=t)
+ a[-1] = masked
+ x *= a
+ xm *= a
+ assert_equal(x, y * a)
+ assert_equal(xm, y * a)
+ assert_equal(xm.mask, mask_or(m, a.mask))
+
+ def test_inplace_floor_division_scalar_type(self):
+ # Test of inplace division
+ # Check for TypeError in case of unsupported types
+ unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]}
+ for t in self.othertypes:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ x = arange(10, dtype=t) * t(2)
+ xm = arange(10, dtype=t) * t(2)
+ xm[2] = masked
+ try:
+ x //= t(2)
+ xm //= t(2)
+ assert_equal(x, y)
+ assert_equal(xm, y)
+ except TypeError:
+ msg = f"Supported type {t} throwing TypeError"
+ assert t in unsupported, msg
+
+ def test_inplace_floor_division_array_type(self):
+ # Test of inplace division
+ # Check for TypeError in case of unsupported types
+ unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]}
+ for t in self.othertypes:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ m = xm.mask
+ a = arange(10, dtype=t)
+ a[-1] = masked
+ try:
+ x //= a
+ xm //= a
+ assert_equal(x, y // a)
+ assert_equal(xm, y // a)
+ assert_equal(
+ xm.mask,
+ mask_or(mask_or(m, a.mask), (a == t(0)))
+ )
+ except TypeError:
+ msg = f"Supported type {t} throwing TypeError"
+ assert t in unsupported, msg
+
+ def test_inplace_division_scalar_type(self):
+ # Test of inplace division
+ for t in self.othertypes:
+ with suppress_warnings() as sup:
+ sup.record(UserWarning)
+
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ x = arange(10, dtype=t) * t(2)
+ xm = arange(10, dtype=t) * t(2)
+ xm[2] = masked
+
+ # May get a DeprecationWarning or a TypeError.
+ #
+ # This is a consequence of the fact that this is true divide
+ # and will require casting to float for calculation and
+ # casting back to the original type. This will only be raised
+ # with integers. Whether it is an error or warning is only
+ # dependent on how stringent the casting rules are.
+ #
+ # Will handle the same way.
+ try:
+ x /= t(2)
+ assert_equal(x, y)
+ except (DeprecationWarning, TypeError) as e:
+ warnings.warn(str(e), stacklevel=1)
+ try:
+ xm /= t(2)
+ assert_equal(xm, y)
+ except (DeprecationWarning, TypeError) as e:
+ warnings.warn(str(e), stacklevel=1)
+
+ if issubclass(t, np.integer):
+ assert_equal(len(sup.log), 2, f'Failed on type={t}.')
+ else:
+ assert_equal(len(sup.log), 0, f'Failed on type={t}.')
+
+ def test_inplace_division_array_type(self):
+ # Test of inplace division
+ for t in self.othertypes:
+ with suppress_warnings() as sup:
+ sup.record(UserWarning)
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ m = xm.mask
+ a = arange(10, dtype=t)
+ a[-1] = masked
+
+ # May get a DeprecationWarning or a TypeError.
+ #
+ # This is a consequence of the fact that this is true divide
+ # and will require casting to float for calculation and
+ # casting back to the original type. This will only be raised
+ # with integers. Whether it is an error or warning is only
+ # dependent on how stringent the casting rules are.
+ #
+ # Will handle the same way.
+ try:
+ x /= a
+ assert_equal(x, y / a)
+ except (DeprecationWarning, TypeError) as e:
+ warnings.warn(str(e), stacklevel=1)
+ try:
+ xm /= a
+ assert_equal(xm, y / a)
+ assert_equal(
+ xm.mask,
+ mask_or(mask_or(m, a.mask), (a == t(0)))
+ )
+ except (DeprecationWarning, TypeError) as e:
+ warnings.warn(str(e), stacklevel=1)
+
+ if issubclass(t, np.integer):
+ assert_equal(len(sup.log), 2, f'Failed on type={t}.')
+ else:
+ assert_equal(len(sup.log), 0, f'Failed on type={t}.')
+
+ def test_inplace_pow_type(self):
+ # Test keeping data w/ (inplace) power
+ for t in self.othertypes:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("error")
+ # Test pow on scalar
+ x = array([1, 2, 3], mask=[0, 0, 1], dtype=t)
+ xx = x ** t(2)
+ xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t)
+ assert_equal(xx.data, xx_r.data)
+ assert_equal(xx.mask, xx_r.mask)
+ # Test ipow on scalar
+ x **= t(2)
+ assert_equal(x.data, xx_r.data)
+ assert_equal(x.mask, xx_r.mask)
+
+
+class TestMaskedArrayMethods:
+ # Test class for miscellaneous MaskedArrays methods.
+ def setup_method(self):
+ # Base data definition.
+ x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
+ 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
+ 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
+ 6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
+ 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
+ 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
+ X = x.reshape(6, 6)
+ XX = x.reshape(3, 2, 2, 3)
+
+ m = np.array([0, 1, 0, 1, 0, 0,
+ 1, 0, 1, 1, 0, 1,
+ 0, 0, 0, 1, 0, 1,
+ 0, 0, 0, 1, 1, 1,
+ 1, 0, 0, 1, 0, 0,
+ 0, 0, 1, 0, 1, 0])
+ mx = array(data=x, mask=m)
+ mX = array(data=X, mask=m.reshape(X.shape))
+ mXX = array(data=XX, mask=m.reshape(XX.shape))
+
+ m2 = np.array([1, 1, 0, 1, 0, 0,
+ 1, 1, 1, 1, 0, 1,
+ 0, 0, 1, 1, 0, 1,
+ 0, 0, 0, 1, 1, 1,
+ 1, 0, 0, 1, 1, 0,
+ 0, 0, 1, 0, 1, 1])
+ m2x = array(data=x, mask=m2)
+ m2X = array(data=X, mask=m2.reshape(X.shape))
+ m2XX = array(data=XX, mask=m2.reshape(XX.shape))
+ self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
+
+ def test_generic_methods(self):
+ # Tests some MaskedArray methods.
+ a = array([1, 3, 2])
+ assert_equal(a.any(), a._data.any())
+ assert_equal(a.all(), a._data.all())
+ assert_equal(a.argmax(), a._data.argmax())
+ assert_equal(a.argmin(), a._data.argmin())
+ assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
+ assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
+ assert_equal(a.conj(), a._data.conj())
+ assert_equal(a.conjugate(), a._data.conjugate())
+
+ m = array([[1, 2], [3, 4]])
+ assert_equal(m.diagonal(), m._data.diagonal())
+ assert_equal(a.sum(), a._data.sum())
+ assert_equal(a.take([1, 2]), a._data.take([1, 2]))
+ assert_equal(m.transpose(), m._data.transpose())
+
+ def test_allclose(self):
+ # Tests allclose on arrays
+ a = np.random.rand(10)
+ b = a + np.random.rand(10) * 1e-8
+ assert_(allclose(a, b))
+ # Test allclose w/ infs
+ a[0] = np.inf
+ assert_(not allclose(a, b))
+ b[0] = np.inf
+ assert_(allclose(a, b))
+ # Test allclose w/ masked
+ a = masked_array(a)
+ a[-1] = masked
+ assert_(allclose(a, b, masked_equal=True))
+ assert_(not allclose(a, b, masked_equal=False))
+ # Test comparison w/ scalar
+ a *= 1e-8
+ a[0] = 0
+ assert_(allclose(a, 0, masked_equal=True))
+
+ # Test that the function works for MIN_INT integer typed arrays
+ a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
+ assert_(allclose(a, a))
+
+ def test_allclose_timedelta(self):
+ # Allclose currently works for timedelta64 as long as `atol` is
+ # an integer or also a timedelta64
+ a = np.array([[1, 2, 3, 4]], dtype="m8[ns]")
+ assert allclose(a, a, atol=0)
+ assert allclose(a, a, atol=np.timedelta64(1, "ns"))
+
+ def test_allany(self):
+ # Checks the any/all methods/functions.
+ x = np.array([[0.13, 0.26, 0.90],
+ [0.28, 0.33, 0.63],
+ [0.31, 0.87, 0.70]])
+ m = np.array([[True, False, False],
+ [False, False, False],
+ [True, True, False]], dtype=np.bool_)
+ mx = masked_array(x, mask=m)
+ mxbig = (mx > 0.5)
+ mxsmall = (mx < 0.5)
+
+ assert_(not mxbig.all())
+ assert_(mxbig.any())
+ assert_equal(mxbig.all(0), [False, False, True])
+ assert_equal(mxbig.all(1), [False, False, True])
+ assert_equal(mxbig.any(0), [False, False, True])
+ assert_equal(mxbig.any(1), [True, True, True])
+
+ assert_(not mxsmall.all())
+ assert_(mxsmall.any())
+ assert_equal(mxsmall.all(0), [True, True, False])
+ assert_equal(mxsmall.all(1), [False, False, False])
+ assert_equal(mxsmall.any(0), [True, True, False])
+ assert_equal(mxsmall.any(1), [True, True, False])
+
+ def test_allany_oddities(self):
+ # Some fun with all and any
+ store = empty((), dtype=bool)
+ full = array([1, 2, 3], mask=True)
+
+ assert_(full.all() is masked)
+ full.all(out=store)
+ assert_(store)
+ assert_(store._mask, True)
+ assert_(store is not masked)
+
+ store = empty((), dtype=bool)
+ assert_(full.any() is masked)
+ full.any(out=store)
+ assert_(not store)
+ assert_(store._mask, True)
+ assert_(store is not masked)
+
+ def test_argmax_argmin(self):
+ # Tests argmin & argmax on MaskedArrays.
+ (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
+
+ assert_equal(mx.argmin(), 35)
+ assert_equal(mX.argmin(), 35)
+ assert_equal(m2x.argmin(), 4)
+ assert_equal(m2X.argmin(), 4)
+ assert_equal(mx.argmax(), 28)
+ assert_equal(mX.argmax(), 28)
+ assert_equal(m2x.argmax(), 31)
+ assert_equal(m2X.argmax(), 31)
+
+ assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5])
+ assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4])
+ assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0])
+ assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0])
+
+ assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ])
+ assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3])
+ assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1])
+ assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1])
+
+ def test_clip(self):
+ # Tests clip on MaskedArrays.
+ x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
+ 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
+ 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
+ 6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
+ 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
+ 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
+ m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
+ 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,
+ 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0])
+ mx = array(x, mask=m)
+ clipped = mx.clip(2, 8)
+ assert_equal(clipped.mask, mx.mask)
+ assert_equal(clipped._data, x.clip(2, 8))
+ assert_equal(clipped._data, mx._data.clip(2, 8))
+
+ def test_clip_out(self):
+ # gh-14140
+ a = np.arange(10)
+ m = np.ma.MaskedArray(a, mask=[0, 1] * 5)
+ m.clip(0, 5, out=m)
+ assert_equal(m.mask, [0, 1] * 5)
+
+ def test_compress(self):
+ # test compress
+ a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)
+ condition = (a > 1.5) & (a < 3.5)
+ assert_equal(a.compress(condition), [2., 3.])
+
+ a[[2, 3]] = masked
+ b = a.compress(condition)
+ assert_equal(b._data, [2., 3.])
+ assert_equal(b._mask, [0, 1])
+ assert_equal(b.fill_value, 9999)
+ assert_equal(b, a[condition])
+
+ condition = (a < 4.)
+ b = a.compress(condition)
+ assert_equal(b._data, [1., 2., 3.])
+ assert_equal(b._mask, [0, 0, 1])
+ assert_equal(b.fill_value, 9999)
+ assert_equal(b, a[condition])
+
+ a = masked_array([[10, 20, 30], [40, 50, 60]],
+ mask=[[0, 0, 1], [1, 0, 0]])
+ b = a.compress(a.ravel() >= 22)
+ assert_equal(b._data, [30, 40, 50, 60])
+ assert_equal(b._mask, [1, 1, 0, 0])
+
+ x = np.array([3, 1, 2])
+ b = a.compress(x >= 2, axis=1)
+ assert_equal(b._data, [[10, 30], [40, 60]])
+ assert_equal(b._mask, [[0, 1], [1, 0]])
+
+ def test_compressed(self):
+ # Tests compressed
+ a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])
+ b = a.compressed()
+ assert_equal(b, a)
+ a[0] = masked
+ b = a.compressed()
+ assert_equal(b, [2, 3, 4])
+
+ def test_empty(self):
+ # Tests empty/like
+ datatype = [('a', int), ('b', float), ('c', '|S8')]
+ a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
+ dtype=datatype)
+ assert_equal(len(a.fill_value.item()), len(datatype))
+
+ b = empty_like(a)
+ assert_equal(b.shape, a.shape)
+ assert_equal(b.fill_value, a.fill_value)
+
+ b = empty(len(a), dtype=datatype)
+ assert_equal(b.shape, a.shape)
+ assert_equal(b.fill_value, a.fill_value)
+
+ # check empty_like mask handling
+ a = masked_array([1, 2, 3], mask=[False, True, False])
+ b = empty_like(a)
+ assert_(not np.may_share_memory(a.mask, b.mask))
+ b = a.view(masked_array)
+ assert_(np.may_share_memory(a.mask, b.mask))
+
+ def test_zeros(self):
+ # Tests zeros/like
+ datatype = [('a', int), ('b', float), ('c', '|S8')]
+ a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
+ dtype=datatype)
+ assert_equal(len(a.fill_value.item()), len(datatype))
+
+ b = zeros(len(a), dtype=datatype)
+ assert_equal(b.shape, a.shape)
+ assert_equal(b.fill_value, a.fill_value)
+
+ b = zeros_like(a)
+ assert_equal(b.shape, a.shape)
+ assert_equal(b.fill_value, a.fill_value)
+
+ # check zeros_like mask handling
+ a = masked_array([1, 2, 3], mask=[False, True, False])
+ b = zeros_like(a)
+ assert_(not np.may_share_memory(a.mask, b.mask))
+ b = a.view()
+ assert_(np.may_share_memory(a.mask, b.mask))
+
+ def test_ones(self):
+ # Tests ones/like
+ datatype = [('a', int), ('b', float), ('c', '|S8')]
+ a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
+ dtype=datatype)
+ assert_equal(len(a.fill_value.item()), len(datatype))
+
+ b = ones(len(a), dtype=datatype)
+ assert_equal(b.shape, a.shape)
+ assert_equal(b.fill_value, a.fill_value)
+
+ b = ones_like(a)
+ assert_equal(b.shape, a.shape)
+ assert_equal(b.fill_value, a.fill_value)
+
+ # check ones_like mask handling
+ a = masked_array([1, 2, 3], mask=[False, True, False])
+ b = ones_like(a)
+ assert_(not np.may_share_memory(a.mask, b.mask))
+ b = a.view()
+ assert_(np.may_share_memory(a.mask, b.mask))
+
+ @suppress_copy_mask_on_assignment
+ def test_put(self):
+ # Tests put.
+ d = arange(5)
+ n = [0, 0, 0, 1, 1]
+ m = make_mask(n)
+ x = array(d, mask=m)
+ assert_(x[3] is masked)
+ assert_(x[4] is masked)
+ x[[1, 4]] = [10, 40]
+ assert_(x[3] is masked)
+ assert_(x[4] is not masked)
+ assert_equal(x, [0, 10, 2, -1, 40])
+
+ x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
+ i = [0, 2, 4, 6]
+ x.put(i, [6, 4, 2, 0])
+ assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
+ assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
+ x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
+ assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
+ assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
+
+ x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
+ put(x, i, [6, 4, 2, 0])
+ assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))
+ assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
+ put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))
+ assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])
+ assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])
+
+ def test_put_nomask(self):
+ # GitHub issue 6425
+ x = zeros(10)
+ z = array([3., -1.], mask=[False, True])
+
+ x.put([1, 2], z)
+ assert_(x[0] is not masked)
+ assert_equal(x[0], 0)
+ assert_(x[1] is not masked)
+ assert_equal(x[1], 3)
+ assert_(x[2] is masked)
+ assert_(x[3] is not masked)
+ assert_equal(x[3], 0)
+
+ def test_put_hardmask(self):
+ # Tests put on hardmask
+ d = arange(5)
+ n = [0, 0, 0, 1, 1]
+ m = make_mask(n)
+ xh = array(d + 1, mask=m, hard_mask=True, copy=True)
+ xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5])
+ assert_equal(xh._data, [3, 4, 2, 4, 5])
+
+ def test_putmask(self):
+ x = arange(6) + 1
+ mx = array(x, mask=[0, 0, 0, 1, 1, 1])
+ mask = [0, 0, 1, 0, 0, 1]
+ # w/o mask, w/o masked values
+ xx = x.copy()
+ putmask(xx, mask, 99)
+ assert_equal(xx, [1, 2, 99, 4, 5, 99])
+ # w/ mask, w/o masked values
+ mxx = mx.copy()
+ putmask(mxx, mask, 99)
+ assert_equal(mxx._data, [1, 2, 99, 4, 5, 99])
+ assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0])
+ # w/o mask, w/ masked values
+ values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0])
+ xx = x.copy()
+ putmask(xx, mask, values)
+ assert_equal(xx._data, [1, 2, 30, 4, 5, 60])
+ assert_equal(xx._mask, [0, 0, 1, 0, 0, 0])
+ # w/ mask, w/ masked values
+ mxx = mx.copy()
+ putmask(mxx, mask, values)
+ assert_equal(mxx._data, [1, 2, 30, 4, 5, 60])
+ assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0])
+ # w/ mask, w/ masked values + hardmask
+ mxx = mx.copy()
+ mxx.harden_mask()
+ putmask(mxx, mask, values)
+ assert_equal(mxx, [1, 2, 30, 4, 5, 60])
+
+ def test_ravel(self):
+ # Tests ravel
+ a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])
+ aravel = a.ravel()
+ assert_equal(aravel._mask.shape, aravel.shape)
+ a = array([0, 0], mask=[1, 1])
+ aravel = a.ravel()
+ assert_equal(aravel._mask.shape, a.shape)
+ # Checks that small_mask is preserved
+ a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
+ assert_equal(a.ravel()._mask, [0, 0, 0, 0])
+ # Test that the fill_value is preserved
+ a.fill_value = -99
+ a.shape = (2, 2)
+ ar = a.ravel()
+ assert_equal(ar._mask, [0, 0, 0, 0])
+ assert_equal(ar._data, [1, 2, 3, 4])
+ assert_equal(ar.fill_value, -99)
+ # Test index ordering
+ assert_equal(a.ravel(order='C'), [1, 2, 3, 4])
+ assert_equal(a.ravel(order='F'), [1, 3, 2, 4])
+
+ @pytest.mark.parametrize("order", "AKCF")
+ @pytest.mark.parametrize("data_order", "CF")
+ def test_ravel_order(self, order, data_order):
+ # Ravelling must ravel mask and data in the same order always to avoid
+ # misaligning the two in the ravel result.
+ arr = np.ones((5, 10), order=data_order)
+ arr[0, :] = 0
+ mask = np.ones((10, 5), dtype=bool, order=data_order).T
+ mask[0, :] = False
+ x = array(arr, mask=mask)
+ assert x._data.flags.fnc != x._mask.flags.fnc
+ assert (x.filled(0) == 0).all()
+ raveled = x.ravel(order)
+ assert (raveled.filled(0) == 0).all()
+
+
+ def test_reshape(self):
+ # Tests reshape
+ x = arange(4)
+ x[0] = masked
+ y = x.reshape(2, 2)
+ assert_equal(y.shape, (2, 2,))
+ assert_equal(y._mask.shape, (2, 2,))
+ assert_equal(x.shape, (4,))
+ assert_equal(x._mask.shape, (4,))
+
+ def test_sort(self):
+ # Test sort
+ x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
+
+ sortedx = sort(x)
+ assert_equal(sortedx._data, [1, 2, 3, 4])
+ assert_equal(sortedx._mask, [0, 0, 0, 1])
+
+ sortedx = sort(x, endwith=False)
+ assert_equal(sortedx._data, [4, 1, 2, 3])
+ assert_equal(sortedx._mask, [1, 0, 0, 0])
+
+ x.sort()
+ assert_equal(x._data, [1, 2, 3, 4])
+ assert_equal(x._mask, [0, 0, 0, 1])
+
+ x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
+ x.sort(endwith=False)
+ assert_equal(x._data, [4, 1, 2, 3])
+ assert_equal(x._mask, [1, 0, 0, 0])
+
+ x = [1, 4, 2, 3]
+ sortedx = sort(x)
+ assert_(not isinstance(sorted, MaskedArray))
+
+ x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)
+ sortedx = sort(x, endwith=False)
+ assert_equal(sortedx._data, [-2, -1, 0, 1, 2])
+ x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8)
+ sortedx = sort(x, endwith=False)
+ assert_equal(sortedx._data, [1, 2, -2, -1, 0])
+ assert_equal(sortedx._mask, [1, 1, 0, 0, 0])
+
+ x = array([0, -1], dtype=np.int8)
+ sortedx = sort(x, kind="stable")
+ assert_equal(sortedx, array([-1, 0], dtype=np.int8))
+
+ def test_stable_sort(self):
+ x = array([1, 2, 3, 1, 2, 3], dtype=np.uint8)
+ expected = array([0, 3, 1, 4, 2, 5])
+ computed = argsort(x, kind='stable')
+ assert_equal(computed, expected)
+
+ def test_argsort_matches_sort(self):
+ x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)
+
+ for kwargs in [dict(),
+ dict(endwith=True),
+ dict(endwith=False),
+ dict(fill_value=2),
+ dict(fill_value=2, endwith=True),
+ dict(fill_value=2, endwith=False)]:
+ sortedx = sort(x, **kwargs)
+ argsortedx = x[argsort(x, **kwargs)]
+ assert_equal(sortedx._data, argsortedx._data)
+ assert_equal(sortedx._mask, argsortedx._mask)
+
+ def test_sort_2d(self):
+ # Check sort of 2D array.
+ # 2D array w/o mask
+ a = masked_array([[8, 4, 1], [2, 0, 9]])
+ a.sort(0)
+ assert_equal(a, [[2, 0, 1], [8, 4, 9]])
+ a = masked_array([[8, 4, 1], [2, 0, 9]])
+ a.sort(1)
+ assert_equal(a, [[1, 4, 8], [0, 2, 9]])
+ # 2D array w/mask
+ a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
+ a.sort(0)
+ assert_equal(a, [[2, 0, 1], [8, 4, 9]])
+ assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]])
+ a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])
+ a.sort(1)
+ assert_equal(a, [[1, 4, 8], [0, 2, 9]])
+ assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]])
+ # 3D
+ a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]],
+ [[1, 2, 3], [7, 8, 9], [4, 5, 6]],
+ [[7, 8, 9], [1, 2, 3], [4, 5, 6]],
+ [[4, 5, 6], [1, 2, 3], [7, 8, 9]]])
+ a[a % 4 == 0] = masked
+ am = a.copy()
+ an = a.filled(99)
+ am.sort(0)
+ an.sort(0)
+ assert_equal(am, an)
+ am = a.copy()
+ an = a.filled(99)
+ am.sort(1)
+ an.sort(1)
+ assert_equal(am, an)
+ am = a.copy()
+ an = a.filled(99)
+ am.sort(2)
+ an.sort(2)
+ assert_equal(am, an)
+
+ def test_sort_flexible(self):
+ # Test sort on structured dtype.
+ a = array(
+ data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
+ mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
+ dtype=[('A', int), ('B', int)])
+ mask_last = array(
+ data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
+ mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
+ dtype=[('A', int), ('B', int)])
+ mask_first = array(
+ data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)],
+ mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)],
+ dtype=[('A', int), ('B', int)])
+
+ test = sort(a)
+ assert_equal(test, mask_last)
+ assert_equal(test.mask, mask_last.mask)
+
+ test = sort(a, endwith=False)
+ assert_equal(test, mask_first)
+ assert_equal(test.mask, mask_first.mask)
+
+ # Test sort on dtype with subarray (gh-8069)
+ # Just check that the sort does not error, structured array subarrays
+ # are treated as byte strings and that leads to differing behavior
+ # depending on endianness and `endwith`.
+ dt = np.dtype([('v', int, 2)])
+ a = a.view(dt)
+ test = sort(a)
+ test = sort(a, endwith=False)
+
+ def test_argsort(self):
+ # Test argsort
+ a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0])
+ assert_equal(np.argsort(a), argsort(a))
+
+ def test_squeeze(self):
+ # Check squeeze
+ data = masked_array([[1, 2, 3]])
+ assert_equal(data.squeeze(), [1, 2, 3])
+ data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])
+ assert_equal(data.squeeze(), [1, 2, 3])
+ assert_equal(data.squeeze()._mask, [1, 1, 1])
+
+ # normal ndarrays return a view
+ arr = np.array([[1]])
+ arr_sq = arr.squeeze()
+ assert_equal(arr_sq, 1)
+ arr_sq[...] = 2
+ assert_equal(arr[0,0], 2)
+
+ # so maskedarrays should too
+ m_arr = masked_array([[1]], mask=True)
+ m_arr_sq = m_arr.squeeze()
+ assert_(m_arr_sq is not np.ma.masked)
+ assert_equal(m_arr_sq.mask, True)
+ m_arr_sq[...] = 2
+ assert_equal(m_arr[0,0], 2)
+
+ def test_swapaxes(self):
+ # Tests swapaxes on MaskedArrays.
+ x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
+ 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
+ 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
+ 6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
+ 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
+ 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
+ m = np.array([0, 1, 0, 1, 0, 0,
+ 1, 0, 1, 1, 0, 1,
+ 0, 0, 0, 1, 0, 1,
+ 0, 0, 0, 1, 1, 1,
+ 1, 0, 0, 1, 0, 0,
+ 0, 0, 1, 0, 1, 0])
+ mX = array(x, mask=m).reshape(6, 6)
+ mXX = mX.reshape(3, 2, 2, 3)
+
+ mXswapped = mX.swapaxes(0, 1)
+ assert_equal(mXswapped[-1], mX[:, -1])
+
+ mXXswapped = mXX.swapaxes(0, 2)
+ assert_equal(mXXswapped.shape, (2, 2, 3, 3))
+
+ def test_take(self):
+ # Tests take
+ x = masked_array([10, 20, 30, 40], [0, 1, 0, 1])
+ assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1]))
+ assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]])
+ assert_equal(x.take([[0, 1], [0, 1]]),
+ masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
+
+ # assert_equal crashes when passed np.ma.mask
+ assert_(x[1] is np.ma.masked)
+ assert_(x.take(1) is np.ma.masked)
+
+ x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
+ assert_equal(x.take([0, 2], axis=1),
+ array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
+ assert_equal(take(x, [0, 2], axis=1),
+ array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))
+
+ def test_take_masked_indices(self):
+ # Test take w/ masked indices
+ a = np.array((40, 18, 37, 9, 22))
+ indices = np.arange(3)[None,:] + np.arange(5)[:, None]
+ mindices = array(indices, mask=(indices >= len(a)))
+ # No mask
+ test = take(a, mindices, mode='clip')
+ ctrl = array([[40, 18, 37],
+ [18, 37, 9],
+ [37, 9, 22],
+ [9, 22, 22],
+ [22, 22, 22]])
+ assert_equal(test, ctrl)
+ # Masked indices
+ test = take(a, mindices)
+ ctrl = array([[40, 18, 37],
+ [18, 37, 9],
+ [37, 9, 22],
+ [9, 22, 40],
+ [22, 40, 40]])
+ ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked
+ assert_equal(test, ctrl)
+ assert_equal(test.mask, ctrl.mask)
+ # Masked input + masked indices
+ a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0))
+ test = take(a, mindices)
+ ctrl[0, 1] = ctrl[1, 0] = masked
+ assert_equal(test, ctrl)
+ assert_equal(test.mask, ctrl.mask)
+
+ def test_tolist(self):
+ # Tests to list
+ # ... on 1D
+ x = array(np.arange(12))
+ x[[1, -2]] = masked
+ xlist = x.tolist()
+ assert_(xlist[1] is None)
+ assert_(xlist[-2] is None)
+ # ... on 2D
+ x.shape = (3, 4)
+ xlist = x.tolist()
+ ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]]
+ assert_equal(xlist[0], [0, None, 2, 3])
+ assert_equal(xlist[1], [4, 5, 6, 7])
+ assert_equal(xlist[2], [8, 9, None, 11])
+ assert_equal(xlist, ctrl)
+ # ... on structured array w/ masked records
+ x = array(list(zip([1, 2, 3],
+ [1.1, 2.2, 3.3],
+ ['one', 'two', 'thr'])),
+ dtype=[('a', int), ('b', float), ('c', '|S8')])
+ x[-1] = masked
+ assert_equal(x.tolist(),
+ [(1, 1.1, b'one'),
+ (2, 2.2, b'two'),
+ (None, None, None)])
+ # ... on structured array w/ masked fields
+ a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)],
+ dtype=[('a', int), ('b', int)])
+ test = a.tolist()
+ assert_equal(test, [[1, None], [3, 4]])
+ # ... on mvoid
+ a = a[0]
+ test = a.tolist()
+ assert_equal(test, [1, None])
+
+ def test_tolist_specialcase(self):
+ # Test mvoid.tolist: make sure we return a standard Python object
+ a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)])
+ # w/o mask: each entry is a np.void whose elements are standard Python
+ for entry in a:
+ for item in entry.tolist():
+ assert_(not isinstance(item, np.generic))
+ # w/ mask: each entry is a ma.void whose elements should be
+ # standard Python
+ a.mask[0] = (0, 1)
+ for entry in a:
+ for item in entry.tolist():
+ assert_(not isinstance(item, np.generic))
+
+ def test_toflex(self):
+ # Test the conversion to records
+ data = arange(10)
+ record = data.toflex()
+ assert_equal(record['_data'], data._data)
+ assert_equal(record['_mask'], data._mask)
+
+ data[[0, 1, 2, -1]] = masked
+ record = data.toflex()
+ assert_equal(record['_data'], data._data)
+ assert_equal(record['_mask'], data._mask)
+
+ ndtype = [('i', int), ('s', '|S3'), ('f', float)]
+ data = array([(i, s, f) for (i, s, f) in zip(np.arange(10),
+ 'ABCDEFGHIJKLM',
+ np.random.rand(10))],
+ dtype=ndtype)
+ data[[0, 1, 2, -1]] = masked
+ record = data.toflex()
+ assert_equal(record['_data'], data._data)
+ assert_equal(record['_mask'], data._mask)
+
+ ndtype = np.dtype("int, (2,3)float, float")
+ data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10),
+ np.random.rand(10),
+ np.random.rand(10))],
+ dtype=ndtype)
+ data[[0, 1, 2, -1]] = masked
+ record = data.toflex()
+ assert_equal_records(record['_data'], data._data)
+ assert_equal_records(record['_mask'], data._mask)
+
+ def test_fromflex(self):
+ # Test the reconstruction of a masked_array from a record
+ a = array([1, 2, 3])
+ test = fromflex(a.toflex())
+ assert_equal(test, a)
+ assert_equal(test.mask, a.mask)
+
+ a = array([1, 2, 3], mask=[0, 0, 1])
+ test = fromflex(a.toflex())
+ assert_equal(test, a)
+ assert_equal(test.mask, a.mask)
+
+ a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],
+ dtype=[('A', int), ('B', float)])
+ test = fromflex(a.toflex())
+ assert_equal(test, a)
+ assert_equal(test.data, a.data)
+
+ def test_arraymethod(self):
+ # Test a _arraymethod w/ n argument
+ marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0])
+ control = masked_array([[1], [2], [3], [4], [5]],
+ mask=[0, 0, 1, 0, 0])
+ assert_equal(marray.T, control)
+ assert_equal(marray.transpose(), control)
+
+ assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))
+
+ def test_arraymethod_0d(self):
+ # gh-9430
+ x = np.ma.array(42, mask=True)
+ assert_equal(x.T.mask, x.mask)
+ assert_equal(x.T.data, x.data)
+
+ def test_transpose_view(self):
+ x = np.ma.array([[1, 2, 3], [4, 5, 6]])
+ x[0,1] = np.ma.masked
+ xt = x.T
+
+ xt[1,0] = 10
+ xt[0,1] = np.ma.masked
+
+ assert_equal(x.data, xt.T.data)
+ assert_equal(x.mask, xt.T.mask)
+
+ def test_diagonal_view(self):
+ x = np.ma.zeros((3,3))
+ x[0,0] = 10
+ x[1,1] = np.ma.masked
+ x[2,2] = 20
+ xd = x.diagonal()
+ x[1,1] = 15
+ assert_equal(xd.mask, x.diagonal().mask)
+ assert_equal(xd.data, x.diagonal().data)
+
+
+class TestMaskedArrayMathMethods:
+
+ def setup_method(self):
+ # Base data definition.
+ x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
+ 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
+ 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
+ 6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
+ 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
+ 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
+ X = x.reshape(6, 6)
+ XX = x.reshape(3, 2, 2, 3)
+
+ m = np.array([0, 1, 0, 1, 0, 0,
+ 1, 0, 1, 1, 0, 1,
+ 0, 0, 0, 1, 0, 1,
+ 0, 0, 0, 1, 1, 1,
+ 1, 0, 0, 1, 0, 0,
+ 0, 0, 1, 0, 1, 0])
+ mx = array(data=x, mask=m)
+ mX = array(data=X, mask=m.reshape(X.shape))
+ mXX = array(data=XX, mask=m.reshape(XX.shape))
+
+ m2 = np.array([1, 1, 0, 1, 0, 0,
+ 1, 1, 1, 1, 0, 1,
+ 0, 0, 1, 1, 0, 1,
+ 0, 0, 0, 1, 1, 1,
+ 1, 0, 0, 1, 1, 0,
+ 0, 0, 1, 0, 1, 1])
+ m2x = array(data=x, mask=m2)
+ m2X = array(data=X, mask=m2.reshape(X.shape))
+ m2XX = array(data=XX, mask=m2.reshape(XX.shape))
+ self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
+
+ def test_cumsumprod(self):
+ # Tests cumsum & cumprod on MaskedArrays.
+ (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
+ mXcp = mX.cumsum(0)
+ assert_equal(mXcp._data, mX.filled(0).cumsum(0))
+ mXcp = mX.cumsum(1)
+ assert_equal(mXcp._data, mX.filled(0).cumsum(1))
+
+ mXcp = mX.cumprod(0)
+ assert_equal(mXcp._data, mX.filled(1).cumprod(0))
+ mXcp = mX.cumprod(1)
+ assert_equal(mXcp._data, mX.filled(1).cumprod(1))
+
+ def test_cumsumprod_with_output(self):
+ # Tests cumsum/cumprod w/ output
+ xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
+ xm[:, 0] = xm[0] = xm[-1, -1] = masked
+
+ for funcname in ('cumsum', 'cumprod'):
+ npfunc = getattr(np, funcname)
+ xmmeth = getattr(xm, funcname)
+
+ # A ndarray as explicit input
+ output = np.empty((3, 4), dtype=float)
+ output.fill(-9999)
+ result = npfunc(xm, axis=0, out=output)
+ # ... the result should be the given output
+ assert_(result is output)
+ assert_equal(result, xmmeth(axis=0, out=output))
+
+ output = empty((3, 4), dtype=int)
+ result = xmmeth(axis=0, out=output)
+ assert_(result is output)
+
+ def test_ptp(self):
+ # Tests ptp on MaskedArrays.
+ (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
+ (n, m) = X.shape
+ assert_equal(mx.ptp(), mx.compressed().ptp())
+ rows = np.zeros(n, float)
+ cols = np.zeros(m, float)
+ for k in range(m):
+ cols[k] = mX[:, k].compressed().ptp()
+ for k in range(n):
+ rows[k] = mX[k].compressed().ptp()
+ assert_equal(mX.ptp(0), cols)
+ assert_equal(mX.ptp(1), rows)
+
+ def test_add_object(self):
+ x = masked_array(['a', 'b'], mask=[1, 0], dtype=object)
+ y = x + 'x'
+ assert_equal(y[1], 'bx')
+ assert_(y.mask[0])
+
+ def test_sum_object(self):
+ # Test sum on object dtype
+ a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object)
+ assert_equal(a.sum(), 5)
+ a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
+ assert_equal(a.sum(axis=0), [5, 7, 9])
+
+ def test_prod_object(self):
+ # Test prod on object dtype
+ a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object)
+ assert_equal(a.prod(), 2 * 3)
+ a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
+ assert_equal(a.prod(axis=0), [4, 10, 18])
+
+ def test_meananom_object(self):
+ # Test mean/anom on object dtype
+ a = masked_array([1, 2, 3], dtype=object)
+ assert_equal(a.mean(), 2)
+ assert_equal(a.anom(), [-1, 0, 1])
+
+ def test_anom_shape(self):
+ a = masked_array([1, 2, 3])
+ assert_equal(a.anom().shape, a.shape)
+ a.mask = True
+ assert_equal(a.anom().shape, a.shape)
+ assert_(np.ma.is_masked(a.anom()))
+
+ def test_anom(self):
+ a = masked_array(np.arange(1, 7).reshape(2, 3))
+ assert_almost_equal(a.anom(),
+ [[-2.5, -1.5, -0.5], [0.5, 1.5, 2.5]])
+ assert_almost_equal(a.anom(axis=0),
+ [[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]])
+ assert_almost_equal(a.anom(axis=1),
+ [[-1., 0., 1.], [-1., 0., 1.]])
+ a.mask = [[0, 0, 1], [0, 1, 0]]
+ mval = -99
+ assert_almost_equal(a.anom().filled(mval),
+ [[-2.25, -1.25, mval], [0.75, mval, 2.75]])
+ assert_almost_equal(a.anom(axis=0).filled(mval),
+ [[-1.5, 0.0, mval], [1.5, mval, 0.0]])
+ assert_almost_equal(a.anom(axis=1).filled(mval),
+ [[-0.5, 0.5, mval], [-1.0, mval, 1.0]])
+
+ def test_trace(self):
+ # Tests trace on MaskedArrays.
+ (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
+ mXdiag = mX.diagonal()
+ assert_equal(mX.trace(), mX.diagonal().compressed().sum())
+ assert_almost_equal(mX.trace(),
+ X.trace() - sum(mXdiag.mask * X.diagonal(),
+ axis=0))
+ assert_equal(np.trace(mX), mX.trace())
+
+ # gh-5560
+ arr = np.arange(2*4*4).reshape(2,4,4)
+ m_arr = np.ma.masked_array(arr, False)
+ assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2))
+
+ def test_dot(self):
+ # Tests dot on MaskedArrays.
+ (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
+ fx = mx.filled(0)
+ r = mx.dot(mx)
+ assert_almost_equal(r.filled(0), fx.dot(fx))
+ assert_(r.mask is nomask)
+
+ fX = mX.filled(0)
+ r = mX.dot(mX)
+ assert_almost_equal(r.filled(0), fX.dot(fX))
+ assert_(r.mask[1,3])
+ r1 = empty_like(r)
+ mX.dot(mX, out=r1)
+ assert_almost_equal(r, r1)
+
+ mYY = mXX.swapaxes(-1, -2)
+ fXX, fYY = mXX.filled(0), mYY.filled(0)
+ r = mXX.dot(mYY)
+ assert_almost_equal(r.filled(0), fXX.dot(fYY))
+ r1 = empty_like(r)
+ mXX.dot(mYY, out=r1)
+ assert_almost_equal(r, r1)
+
+ def test_dot_shape_mismatch(self):
+ # regression test
+ x = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
+ y = masked_array([[1,2],[3,4]], mask=[[0,1],[0,0]])
+ z = masked_array([[0,1],[3,3]])
+ x.dot(y, out=z)
+ assert_almost_equal(z.filled(0), [[1, 0], [15, 16]])
+ assert_almost_equal(z.mask, [[0, 1], [0, 0]])
+
+ def test_varmean_nomask(self):
+ # gh-5769
+ foo = array([1,2,3,4], dtype='f8')
+ bar = array([1,2,3,4], dtype='f8')
+ assert_equal(type(foo.mean()), np.float64)
+ assert_equal(type(foo.var()), np.float64)
+ assert((foo.mean() == bar.mean()) is np.bool_(True))
+
+ # check array type is preserved and out works
+ foo = array(np.arange(16).reshape((4,4)), dtype='f8')
+ bar = empty(4, dtype='f4')
+ assert_equal(type(foo.mean(axis=1)), MaskedArray)
+ assert_equal(type(foo.var(axis=1)), MaskedArray)
+ assert_(foo.mean(axis=1, out=bar) is bar)
+ assert_(foo.var(axis=1, out=bar) is bar)
+
+ def test_varstd(self):
+ # Tests var & std on MaskedArrays.
+ (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
+ assert_almost_equal(mX.var(axis=None), mX.compressed().var())
+ assert_almost_equal(mX.std(axis=None), mX.compressed().std())
+ assert_almost_equal(mX.std(axis=None, ddof=1),
+ mX.compressed().std(ddof=1))
+ assert_almost_equal(mX.var(axis=None, ddof=1),
+ mX.compressed().var(ddof=1))
+ assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
+ assert_equal(mX.var().shape, X.var().shape)
+ (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
+ assert_almost_equal(mX.var(axis=None, ddof=2),
+ mX.compressed().var(ddof=2))
+ assert_almost_equal(mX.std(axis=None, ddof=2),
+ mX.compressed().std(ddof=2))
+ for k in range(6):
+ assert_almost_equal(mXvar1[k], mX[k].compressed().var())
+ assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
+ assert_almost_equal(np.sqrt(mXvar0[k]),
+ mX[:, k].compressed().std())
+
+ @suppress_copy_mask_on_assignment
+ def test_varstd_specialcases(self):
+ # Test a special case for var
+ nout = np.array(-1, dtype=float)
+ mout = array(-1, dtype=float)
+
+ x = array(arange(10), mask=True)
+ for methodname in ('var', 'std'):
+ method = getattr(x, methodname)
+ assert_(method() is masked)
+ assert_(method(0) is masked)
+ assert_(method(-1) is masked)
+ # Using a masked array as explicit output
+ method(out=mout)
+ assert_(mout is not masked)
+ assert_equal(mout.mask, True)
+ # Using a ndarray as explicit output
+ method(out=nout)
+ assert_(np.isnan(nout))
+
+ x = array(arange(10), mask=True)
+ x[-1] = 9
+ for methodname in ('var', 'std'):
+ method = getattr(x, methodname)
+ assert_(method(ddof=1) is masked)
+ assert_(method(0, ddof=1) is masked)
+ assert_(method(-1, ddof=1) is masked)
+ # Using a masked array as explicit output
+ method(out=mout, ddof=1)
+ assert_(mout is not masked)
+ assert_equal(mout.mask, True)
+ # Using a ndarray as explicit output
+ method(out=nout, ddof=1)
+ assert_(np.isnan(nout))
+
+ def test_varstd_ddof(self):
+ a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])
+ test = a.std(axis=0, ddof=0)
+ assert_equal(test.filled(0), [0, 0, 0])
+ assert_equal(test.mask, [0, 0, 1])
+ test = a.std(axis=0, ddof=1)
+ assert_equal(test.filled(0), [0, 0, 0])
+ assert_equal(test.mask, [0, 0, 1])
+ test = a.std(axis=0, ddof=2)
+ assert_equal(test.filled(0), [0, 0, 0])
+ assert_equal(test.mask, [1, 1, 1])
+
+ def test_diag(self):
+ # Test diag
+ x = arange(9).reshape((3, 3))
+ x[1, 1] = masked
+ out = np.diag(x)
+ assert_equal(out, [0, 4, 8])
+ out = diag(x)
+ assert_equal(out, [0, 4, 8])
+ assert_equal(out.mask, [0, 1, 0])
+ out = diag(out)
+ control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],
+ mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
+ assert_equal(out, control)
+
+ def test_axis_methods_nomask(self):
+ # Test the combination nomask & methods w/ axis
+ a = array([[1, 2, 3], [4, 5, 6]])
+
+ assert_equal(a.sum(0), [5, 7, 9])
+ assert_equal(a.sum(-1), [6, 15])
+ assert_equal(a.sum(1), [6, 15])
+
+ assert_equal(a.prod(0), [4, 10, 18])
+ assert_equal(a.prod(-1), [6, 120])
+ assert_equal(a.prod(1), [6, 120])
+
+ assert_equal(a.min(0), [1, 2, 3])
+ assert_equal(a.min(-1), [1, 4])
+ assert_equal(a.min(1), [1, 4])
+
+ assert_equal(a.max(0), [4, 5, 6])
+ assert_equal(a.max(-1), [3, 6])
+ assert_equal(a.max(1), [3, 6])
+
+ @requires_memory(free_bytes=2 * 10000 * 1000 * 2)
+ def test_mean_overflow(self):
+ # Test overflow in masked arrays
+ # gh-20272
+ a = masked_array(np.full((10000, 10000), 65535, dtype=np.uint16),
+ mask=np.zeros((10000, 10000)))
+ assert_equal(a.mean(), 65535.0)
+
+class TestMaskedArrayMathMethodsComplex:
+ # Test class for miscellaneous MaskedArrays methods.
+ def setup_method(self):
+ # Base data definition.
+ x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928,
+ 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
+ 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
+ 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j,
+ 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893,
+ 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j])
+ X = x.reshape(6, 6)
+ XX = x.reshape(3, 2, 2, 3)
+
+ m = np.array([0, 1, 0, 1, 0, 0,
+ 1, 0, 1, 1, 0, 1,
+ 0, 0, 0, 1, 0, 1,
+ 0, 0, 0, 1, 1, 1,
+ 1, 0, 0, 1, 0, 0,
+ 0, 0, 1, 0, 1, 0])
+ mx = array(data=x, mask=m)
+ mX = array(data=X, mask=m.reshape(X.shape))
+ mXX = array(data=XX, mask=m.reshape(XX.shape))
+
+ m2 = np.array([1, 1, 0, 1, 0, 0,
+ 1, 1, 1, 1, 0, 1,
+ 0, 0, 1, 1, 0, 1,
+ 0, 0, 0, 1, 1, 1,
+ 1, 0, 0, 1, 1, 0,
+ 0, 0, 1, 0, 1, 1])
+ m2x = array(data=x, mask=m2)
+ m2X = array(data=X, mask=m2.reshape(X.shape))
+ m2XX = array(data=XX, mask=m2.reshape(XX.shape))
+ self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)
+
+ def test_varstd(self):
+ # Tests var & std on MaskedArrays.
+ (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
+ assert_almost_equal(mX.var(axis=None), mX.compressed().var())
+ assert_almost_equal(mX.std(axis=None), mX.compressed().std())
+ assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)
+ assert_equal(mX.var().shape, X.var().shape)
+ (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
+ assert_almost_equal(mX.var(axis=None, ddof=2),
+ mX.compressed().var(ddof=2))
+ assert_almost_equal(mX.std(axis=None, ddof=2),
+ mX.compressed().std(ddof=2))
+ for k in range(6):
+ assert_almost_equal(mXvar1[k], mX[k].compressed().var())
+ assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())
+ assert_almost_equal(np.sqrt(mXvar0[k]),
+ mX[:, k].compressed().std())
+
+
+class TestMaskedArrayFunctions:
+ # Test class for miscellaneous functions.
+
+ def setup_method(self):
+ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
+ y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
+ m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
+ m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
+ xm = masked_array(x, mask=m1)
+ ym = masked_array(y, mask=m2)
+ xm.set_fill_value(1e+20)
+ self.info = (xm, ym)
+
+ def test_masked_where_bool(self):
+ x = [1, 2]
+ y = masked_where(False, x)
+ assert_equal(y, [1, 2])
+ assert_equal(y[1], 2)
+
+ def test_masked_equal_wlist(self):
+ x = [1, 2, 3]
+ mx = masked_equal(x, 3)
+ assert_equal(mx, x)
+ assert_equal(mx._mask, [0, 0, 1])
+ mx = masked_not_equal(x, 3)
+ assert_equal(mx, x)
+ assert_equal(mx._mask, [1, 1, 0])
+
+ def test_masked_equal_fill_value(self):
+ x = [1, 2, 3]
+ mx = masked_equal(x, 3)
+ assert_equal(mx._mask, [0, 0, 1])
+ assert_equal(mx.fill_value, 3)
+
+ def test_masked_where_condition(self):
+ # Tests masking functions.
+ x = array([1., 2., 3., 4., 5.])
+ x[2] = masked
+ assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2))
+ assert_equal(masked_where(greater_equal(x, 2), x),
+ masked_greater_equal(x, 2))
+ assert_equal(masked_where(less(x, 2), x), masked_less(x, 2))
+ assert_equal(masked_where(less_equal(x, 2), x),
+ masked_less_equal(x, 2))
+ assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
+ assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2))
+ assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))
+ assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
+ [99, 99, 3, 4, 5])
+
+ def test_masked_where_oddities(self):
+ # Tests some generic features.
+ atest = ones((10, 10, 10), dtype=float)
+ btest = zeros(atest.shape, MaskType)
+ ctest = masked_where(btest, atest)
+ assert_equal(atest, ctest)
+
+ def test_masked_where_shape_constraint(self):
+ a = arange(10)
+ with assert_raises(IndexError):
+ masked_equal(1, a)
+ test = masked_equal(a, 1)
+ assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
+
+ def test_masked_where_structured(self):
+ # test that masked_where on a structured array sets a structured
+ # mask (see issue #2972)
+ a = np.zeros(10, dtype=[("A", "<f2"), ("B", "<f4")])
+ with np.errstate(over="ignore"):
+ # NOTE: The float16 "uses" 1e20 as mask, which overflows to inf
+ # and warns. Unrelated to this test, but probably undesired.
+ # But NumPy previously did not warn for this overflow.
+ am = np.ma.masked_where(a["A"] < 5, a)
+ assert_equal(am.mask.dtype.names, am.dtype.names)
+ assert_equal(am["A"],
+ np.ma.masked_array(np.zeros(10), np.ones(10)))
+
+ def test_masked_where_mismatch(self):
+ # gh-4520
+ x = np.arange(10)
+ y = np.arange(5)
+ assert_raises(IndexError, np.ma.masked_where, y > 6, x)
+
+ def test_masked_otherfunctions(self):
+ assert_equal(masked_inside(list(range(5)), 1, 3),
+ [0, 199, 199, 199, 4])
+ assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])
+ assert_equal(masked_inside(array(list(range(5)),
+ mask=[1, 0, 0, 0, 0]), 1, 3).mask,
+ [1, 1, 1, 1, 0])
+ assert_equal(masked_outside(array(list(range(5)),
+ mask=[0, 1, 0, 0, 0]), 1, 3).mask,
+ [1, 1, 0, 0, 1])
+ assert_equal(masked_equal(array(list(range(5)),
+ mask=[1, 0, 0, 0, 0]), 2).mask,
+ [1, 0, 1, 0, 0])
+ assert_equal(masked_not_equal(array([2, 2, 1, 2, 1],
+ mask=[1, 0, 0, 0, 0]), 2).mask,
+ [1, 0, 1, 0, 1])
+
+ def test_round(self):
+ a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890],
+ mask=[0, 1, 0, 0, 0])
+ assert_equal(a.round(), [1., 2., 3., 5., 6.])
+ assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7])
+ assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679])
+ b = empty_like(a)
+ a.round(out=b)
+ assert_equal(b, [1., 2., 3., 5., 6.])
+
+ x = array([1., 2., 3., 4., 5.])
+ c = array([1, 1, 1, 0, 0])
+ x[2] = masked
+ z = where(c, x, -x)
+ assert_equal(z, [1., 2., 0., -4., -5])
+ c[0] = masked
+ z = where(c, x, -x)
+ assert_equal(z, [1., 2., 0., -4., -5])
+ assert_(z[0] is masked)
+ assert_(z[1] is not masked)
+ assert_(z[2] is masked)
+
+ def test_round_with_output(self):
+ # Testing round with an explicit output
+
+ xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)
+ xm[:, 0] = xm[0] = xm[-1, -1] = masked
+
+ # A ndarray as explicit input
+ output = np.empty((3, 4), dtype=float)
+ output.fill(-9999)
+ result = np.round(xm, decimals=2, out=output)
+ # ... the result should be the given output
+ assert_(result is output)
+ assert_equal(result, xm.round(decimals=2, out=output))
+
+ output = empty((3, 4), dtype=float)
+ result = xm.round(decimals=2, out=output)
+ assert_(result is output)
+
+ def test_round_with_scalar(self):
+ # Testing round with scalar/zero dimension input
+ # GH issue 2244
+ a = array(1.1, mask=[False])
+ assert_equal(a.round(), 1)
+
+ a = array(1.1, mask=[True])
+ assert_(a.round() is masked)
+
+ a = array(1.1, mask=[False])
+ output = np.empty(1, dtype=float)
+ output.fill(-9999)
+ a.round(out=output)
+ assert_equal(output, 1)
+
+ a = array(1.1, mask=[False])
+ output = array(-9999., mask=[True])
+ a.round(out=output)
+ assert_equal(output[()], 1)
+
+ a = array(1.1, mask=[True])
+ output = array(-9999., mask=[False])
+ a.round(out=output)
+ assert_(output[()] is masked)
+
+ def test_identity(self):
+ a = identity(5)
+ assert_(isinstance(a, MaskedArray))
+ assert_equal(a, np.identity(5))
+
+ def test_power(self):
+ x = -1.1
+ assert_almost_equal(power(x, 2.), 1.21)
+ assert_(power(x, masked) is masked)
+ x = array([-1.1, -1.1, 1.1, 1.1, 0.])
+ b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])
+ y = power(x, b)
+ assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.])
+ assert_equal(y._mask, [1, 0, 0, 0, 1])
+ b.mask = nomask
+ y = power(x, b)
+ assert_equal(y._mask, [1, 0, 0, 0, 1])
+ z = x ** b
+ assert_equal(z._mask, y._mask)
+ assert_almost_equal(z, y)
+ assert_almost_equal(z._data, y._data)
+ x **= b
+ assert_equal(x._mask, y._mask)
+ assert_almost_equal(x, y)
+ assert_almost_equal(x._data, y._data)
+
+ def test_power_with_broadcasting(self):
+ # Test power w/ broadcasting
+ a2 = np.array([[1., 2., 3.], [4., 5., 6.]])
+ a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]])
+ b1 = np.array([2, 4, 3])
+ b2 = np.array([b1, b1])
+ b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]])
+
+ ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]],
+ mask=[[1, 1, 0], [0, 1, 1]])
+ # No broadcasting, base & exp w/ mask
+ test = a2m ** b2m
+ assert_equal(test, ctrl)
+ assert_equal(test.mask, ctrl.mask)
+ # No broadcasting, base w/ mask, exp w/o mask
+ test = a2m ** b2
+ assert_equal(test, ctrl)
+ assert_equal(test.mask, a2m.mask)
+ # No broadcasting, base w/o mask, exp w/ mask
+ test = a2 ** b2m
+ assert_equal(test, ctrl)
+ assert_equal(test.mask, b2m.mask)
+
+ ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]],
+ mask=[[0, 1, 0], [0, 1, 0]])
+ test = b1 ** b2m
+ assert_equal(test, ctrl)
+ assert_equal(test.mask, ctrl.mask)
+ test = b2m ** b1
+ assert_equal(test, ctrl)
+ assert_equal(test.mask, ctrl.mask)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ def test_where(self):
+ # Test the where function
+ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
+ y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
+ m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
+ m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
+ xm = masked_array(x, mask=m1)
+ ym = masked_array(y, mask=m2)
+ xm.set_fill_value(1e+20)
+
+ d = where(xm > 2, xm, -9)
+ assert_equal(d, [-9., -9., -9., -9., -9., 4.,
+ -9., -9., 10., -9., -9., 3.])
+ assert_equal(d._mask, xm._mask)
+ d = where(xm > 2, -9, ym)
+ assert_equal(d, [5., 0., 3., 2., -1., -9.,
+ -9., -10., -9., 1., 0., -9.])
+ assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])
+ d = where(xm > 2, xm, masked)
+ assert_equal(d, [-9., -9., -9., -9., -9., 4.,
+ -9., -9., 10., -9., -9., 3.])
+ tmp = xm._mask.copy()
+ tmp[(xm <= 2).filled(True)] = True
+ assert_equal(d._mask, tmp)
+
+ with np.errstate(invalid="warn"):
+ # The fill value is 1e20, it cannot be converted to `int`:
+ with pytest.warns(RuntimeWarning, match="invalid value"):
+ ixm = xm.astype(int)
+ d = where(ixm > 2, ixm, masked)
+ assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])
+ assert_equal(d.dtype, ixm.dtype)
+
+ def test_where_object(self):
+ a = np.array(None)
+ b = masked_array(None)
+ r = b.copy()
+ assert_equal(np.ma.where(True, a, a), r)
+ assert_equal(np.ma.where(True, b, b), r)
+
+ def test_where_with_masked_choice(self):
+ x = arange(10)
+ x[3] = masked
+ c = x >= 8
+ # Set False to masked
+ z = where(c, x, masked)
+ assert_(z.dtype is x.dtype)
+ assert_(z[3] is masked)
+ assert_(z[4] is masked)
+ assert_(z[7] is masked)
+ assert_(z[8] is not masked)
+ assert_(z[9] is not masked)
+ assert_equal(x, z)
+ # Set True to masked
+ z = where(c, masked, x)
+ assert_(z.dtype is x.dtype)
+ assert_(z[3] is masked)
+ assert_(z[4] is not masked)
+ assert_(z[7] is not masked)
+ assert_(z[8] is masked)
+ assert_(z[9] is masked)
+
+ def test_where_with_masked_condition(self):
+ x = array([1., 2., 3., 4., 5.])
+ c = array([1, 1, 1, 0, 0])
+ x[2] = masked
+ z = where(c, x, -x)
+ assert_equal(z, [1., 2., 0., -4., -5])
+ c[0] = masked
+ z = where(c, x, -x)
+ assert_equal(z, [1., 2., 0., -4., -5])
+ assert_(z[0] is masked)
+ assert_(z[1] is not masked)
+ assert_(z[2] is masked)
+
+ x = arange(1, 6)
+ x[-1] = masked
+ y = arange(1, 6) * 10
+ y[2] = masked
+ c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0])
+ cm = c.filled(1)
+ z = where(c, x, y)
+ zm = where(cm, x, y)
+ assert_equal(z, zm)
+ assert_(getmask(zm) is nomask)
+ assert_equal(zm, [1, 2, 3, 40, 50])
+ z = where(c, masked, 1)
+ assert_equal(z, [99, 99, 99, 1, 1])
+ z = where(c, 1, masked)
+ assert_equal(z, [99, 1, 1, 99, 99])
+
+ def test_where_type(self):
+ # Test the type conservation with where
+ x = np.arange(4, dtype=np.int32)
+ y = np.arange(4, dtype=np.float32) * 2.2
+ test = where(x > 1.5, y, x).dtype
+ control = np.find_common_type([np.int32, np.float32], [])
+ assert_equal(test, control)
+
+ def test_where_broadcast(self):
+ # Issue 8599
+ x = np.arange(9).reshape(3, 3)
+ y = np.zeros(3)
+ core = np.where([1, 0, 1], x, y)
+ ma = where([1, 0, 1], x, y)
+
+ assert_equal(core, ma)
+ assert_equal(core.dtype, ma.dtype)
+
+ def test_where_structured(self):
+ # Issue 8600
+ dt = np.dtype([('a', int), ('b', int)])
+ x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt)
+ y = np.array((10, 20), dtype=dt)
+ core = np.where([0, 1, 1], x, y)
+ ma = np.where([0, 1, 1], x, y)
+
+ assert_equal(core, ma)
+ assert_equal(core.dtype, ma.dtype)
+
+ def test_where_structured_masked(self):
+ dt = np.dtype([('a', int), ('b', int)])
+ x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt)
+
+ ma = where([0, 1, 1], x, masked)
+ expected = masked_where([1, 0, 0], x)
+
+ assert_equal(ma.dtype, expected.dtype)
+ assert_equal(ma, expected)
+ assert_equal(ma.mask, expected.mask)
+
+ def test_masked_invalid_error(self):
+ a = np.arange(5, dtype=object)
+ a[3] = np.PINF
+ a[2] = np.NaN
+ with pytest.raises(TypeError,
+ match="not supported for the input types"):
+ np.ma.masked_invalid(a)
+
+ def test_masked_invalid_pandas(self):
+ # getdata() used to be bad for pandas series due to its _data
+ # attribute. This test is a regression test mainly and may be
+ # removed if getdata() is adjusted.
+ class Series():
+ _data = "nonsense"
+
+ def __array__(self):
+ return np.array([5, np.nan, np.inf])
+
+ arr = np.ma.masked_invalid(Series())
+ assert_array_equal(arr._data, np.array(Series()))
+ assert_array_equal(arr._mask, [False, True, True])
+
+ @pytest.mark.parametrize("copy", [True, False])
+ def test_masked_invalid_full_mask(self, copy):
+ # Matplotlib relied on masked_invalid always returning a full mask
+ # (Also astropy projects, but were ok with it gh-22720 and gh-22842)
+ a = np.ma.array([1, 2, 3, 4])
+ assert a._mask is nomask
+ res = np.ma.masked_invalid(a, copy=copy)
+ assert res.mask is not nomask
+ # mask of a should not be mutated
+ assert a.mask is nomask
+ assert np.may_share_memory(a._data, res._data) != copy
+
+ def test_choose(self):
+ # Test choose
+ choices = [[0, 1, 2, 3], [10, 11, 12, 13],
+ [20, 21, 22, 23], [30, 31, 32, 33]]
+ chosen = choose([2, 3, 1, 0], choices)
+ assert_equal(chosen, array([20, 31, 12, 3]))
+ chosen = choose([2, 4, 1, 0], choices, mode='clip')
+ assert_equal(chosen, array([20, 31, 12, 3]))
+ chosen = choose([2, 4, 1, 0], choices, mode='wrap')
+ assert_equal(chosen, array([20, 1, 12, 3]))
+ # Check with some masked indices
+ indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1])
+ chosen = choose(indices_, choices, mode='wrap')
+ assert_equal(chosen, array([99, 1, 12, 99]))
+ assert_equal(chosen.mask, [1, 0, 0, 1])
+ # Check with some masked choices
+ choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
+ [1, 0, 0, 0], [0, 0, 0, 0]])
+ indices_ = [2, 3, 1, 0]
+ chosen = choose(indices_, choices, mode='wrap')
+ assert_equal(chosen, array([20, 31, 12, 3]))
+ assert_equal(chosen.mask, [1, 0, 0, 1])
+
+ def test_choose_with_out(self):
+ # Test choose with an explicit out keyword
+ choices = [[0, 1, 2, 3], [10, 11, 12, 13],
+ [20, 21, 22, 23], [30, 31, 32, 33]]
+ store = empty(4, dtype=int)
+ chosen = choose([2, 3, 1, 0], choices, out=store)
+ assert_equal(store, array([20, 31, 12, 3]))
+ assert_(store is chosen)
+ # Check with some masked indices + out
+ store = empty(4, dtype=int)
+ indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])
+ chosen = choose(indices_, choices, mode='wrap', out=store)
+ assert_equal(store, array([99, 31, 12, 99]))
+ assert_equal(store.mask, [1, 0, 0, 1])
+ # Check with some masked choices + out ina ndarray !
+ choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],
+ [1, 0, 0, 0], [0, 0, 0, 0]])
+ indices_ = [2, 3, 1, 0]
+ store = empty(4, dtype=int).view(ndarray)
+ chosen = choose(indices_, choices, mode='wrap', out=store)
+ assert_equal(store, array([999999, 31, 12, 999999]))
+
+ def test_reshape(self):
+ a = arange(10)
+ a[0] = masked
+ # Try the default
+ b = a.reshape((5, 2))
+ assert_equal(b.shape, (5, 2))
+ assert_(b.flags['C'])
+ # Try w/ arguments as list instead of tuple
+ b = a.reshape(5, 2)
+ assert_equal(b.shape, (5, 2))
+ assert_(b.flags['C'])
+ # Try w/ order
+ b = a.reshape((5, 2), order='F')
+ assert_equal(b.shape, (5, 2))
+ assert_(b.flags['F'])
+ # Try w/ order
+ b = a.reshape(5, 2, order='F')
+ assert_equal(b.shape, (5, 2))
+ assert_(b.flags['F'])
+
+ c = np.reshape(a, (2, 5))
+ assert_(isinstance(c, MaskedArray))
+ assert_equal(c.shape, (2, 5))
+ assert_(c[0, 0] is masked)
+ assert_(c.flags['C'])
+
+ def test_make_mask_descr(self):
+ # Flexible
+ ntype = [('a', float), ('b', float)]
+ test = make_mask_descr(ntype)
+ assert_equal(test, [('a', bool), ('b', bool)])
+ assert_(test is make_mask_descr(test))
+
+ # Standard w/ shape
+ ntype = (float, 2)
+ test = make_mask_descr(ntype)
+ assert_equal(test, (bool, 2))
+ assert_(test is make_mask_descr(test))
+
+ # Standard standard
+ ntype = float
+ test = make_mask_descr(ntype)
+ assert_equal(test, np.dtype(bool))
+ assert_(test is make_mask_descr(test))
+
+ # Nested
+ ntype = [('a', float), ('b', [('ba', float), ('bb', float)])]
+ test = make_mask_descr(ntype)
+ control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])
+ assert_equal(test, control)
+ assert_(test is make_mask_descr(test))
+
+ # Named+ shape
+ ntype = [('a', (float, 2))]
+ test = make_mask_descr(ntype)
+ assert_equal(test, np.dtype([('a', (bool, 2))]))
+ assert_(test is make_mask_descr(test))
+
+ # 2 names
+ ntype = [(('A', 'a'), float)]
+ test = make_mask_descr(ntype)
+ assert_equal(test, np.dtype([(('A', 'a'), bool)]))
+ assert_(test is make_mask_descr(test))
+
+ # nested boolean types should preserve identity
+ base_type = np.dtype([('a', int, 3)])
+ base_mtype = make_mask_descr(base_type)
+ sub_type = np.dtype([('a', int), ('b', base_mtype)])
+ test = make_mask_descr(sub_type)
+ assert_equal(test, np.dtype([('a', bool), ('b', [('a', bool, 3)])]))
+ assert_(test.fields['b'][0] is base_mtype)
+
+ def test_make_mask(self):
+ # Test make_mask
+ # w/ a list as an input
+ mask = [0, 1]
+ test = make_mask(mask)
+ assert_equal(test.dtype, MaskType)
+ assert_equal(test, [0, 1])
+ # w/ a ndarray as an input
+ mask = np.array([0, 1], dtype=bool)
+ test = make_mask(mask)
+ assert_equal(test.dtype, MaskType)
+ assert_equal(test, [0, 1])
+ # w/ a flexible-type ndarray as an input - use default
+ mdtype = [('a', bool), ('b', bool)]
+ mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
+ test = make_mask(mask)
+ assert_equal(test.dtype, MaskType)
+ assert_equal(test, [1, 1])
+ # w/ a flexible-type ndarray as an input - use input dtype
+ mdtype = [('a', bool), ('b', bool)]
+ mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
+ test = make_mask(mask, dtype=mask.dtype)
+ assert_equal(test.dtype, mdtype)
+ assert_equal(test, mask)
+ # w/ a flexible-type ndarray as an input - use input dtype
+ mdtype = [('a', float), ('b', float)]
+ bdtype = [('a', bool), ('b', bool)]
+ mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
+ test = make_mask(mask, dtype=mask.dtype)
+ assert_equal(test.dtype, bdtype)
+ assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))
+ # Ensure this also works for void
+ mask = np.array((False, True), dtype='?,?')[()]
+ assert_(isinstance(mask, np.void))
+ test = make_mask(mask, dtype=mask.dtype)
+ assert_equal(test, mask)
+ assert_(test is not mask)
+ mask = np.array((0, 1), dtype='i4,i4')[()]
+ test2 = make_mask(mask, dtype=mask.dtype)
+ assert_equal(test2, test)
+ # test that nomask is returned when m is nomask.
+ bools = [True, False]
+ dtypes = [MaskType, float]
+ msgformat = 'copy=%s, shrink=%s, dtype=%s'
+ for cpy, shr, dt in itertools.product(bools, bools, dtypes):
+ res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt)
+ assert_(res is nomask, msgformat % (cpy, shr, dt))
+
+ def test_mask_or(self):
+ # Initialize
+ mtype = [('a', bool), ('b', bool)]
+ mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)
+ # Test using nomask as input
+ test = mask_or(mask, nomask)
+ assert_equal(test, mask)
+ test = mask_or(nomask, mask)
+ assert_equal(test, mask)
+ # Using False as input
+ test = mask_or(mask, False)
+ assert_equal(test, mask)
+ # Using another array w / the same dtype
+ other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)
+ test = mask_or(mask, other)
+ control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)
+ assert_equal(test, control)
+ # Using another array w / a different dtype
+ othertype = [('A', bool), ('B', bool)]
+ other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)
+ try:
+ test = mask_or(mask, other)
+ except ValueError:
+ pass
+ # Using nested arrays
+ dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
+ amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
+ bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
+ cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
+ assert_equal(mask_or(amask, bmask), cntrl)
+
+ def test_flatten_mask(self):
+ # Tests flatten mask
+ # Standard dtype
+ mask = np.array([0, 0, 1], dtype=bool)
+ assert_equal(flatten_mask(mask), mask)
+ # Flexible dtype
+ mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
+ test = flatten_mask(mask)
+ control = np.array([0, 0, 0, 1], dtype=bool)
+ assert_equal(test, control)
+
+ mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
+ data = [(0, (0, 0)), (0, (0, 1))]
+ mask = np.array(data, dtype=mdtype)
+ test = flatten_mask(mask)
+ control = np.array([0, 0, 0, 0, 0, 1], dtype=bool)
+ assert_equal(test, control)
+
+ def test_on_ndarray(self):
+ # Test functions on ndarrays
+ a = np.array([1, 2, 3, 4])
+ m = array(a, mask=False)
+ test = anom(a)
+ assert_equal(test, m.anom())
+ test = reshape(a, (2, 2))
+ assert_equal(test, m.reshape(2, 2))
+
+ def test_compress(self):
+ # Test compress function on ndarray and masked array
+ # Address Github #2495.
+ arr = np.arange(8)
+ arr.shape = 4, 2
+ cond = np.array([True, False, True, True])
+ control = arr[[0, 2, 3]]
+ test = np.ma.compress(cond, arr, axis=0)
+ assert_equal(test, control)
+ marr = np.ma.array(arr)
+ test = np.ma.compress(cond, marr, axis=0)
+ assert_equal(test, control)
+
+ def test_compressed(self):
+ # Test ma.compressed function.
+ # Address gh-4026
+ a = np.ma.array([1, 2])
+ test = np.ma.compressed(a)
+ assert_(type(test) is np.ndarray)
+
+ # Test case when input data is ndarray subclass
+ class A(np.ndarray):
+ pass
+
+ a = np.ma.array(A(shape=0))
+ test = np.ma.compressed(a)
+ assert_(type(test) is A)
+
+ # Test that compress flattens
+ test = np.ma.compressed([[1],[2]])
+ assert_equal(test.ndim, 1)
+ test = np.ma.compressed([[[[[1]]]]])
+ assert_equal(test.ndim, 1)
+
+ # Test case when input is MaskedArray subclass
+ class M(MaskedArray):
+ pass
+
+ test = np.ma.compressed(M([[[]], [[]]]))
+ assert_equal(test.ndim, 1)
+
+ # with .compressed() overridden
+ class M(MaskedArray):
+ def compressed(self):
+ return 42
+
+ test = np.ma.compressed(M([[[]], [[]]]))
+ assert_equal(test, 42)
+
+ def test_convolve(self):
+ a = masked_equal(np.arange(5), 2)
+ b = np.array([1, 1])
+ test = np.ma.convolve(a, b)
+ assert_equal(test, masked_equal([0, 1, -1, -1, 7, 4], -1))
+
+ test = np.ma.convolve(a, b, propagate_mask=False)
+ assert_equal(test, masked_equal([0, 1, 1, 3, 7, 4], -1))
+
+ test = np.ma.convolve([1, 1], [1, 1, 1])
+ assert_equal(test, masked_equal([1, 2, 2, 1], -1))
+
+ a = [1, 1]
+ b = masked_equal([1, -1, -1, 1], -1)
+ test = np.ma.convolve(a, b, propagate_mask=False)
+ assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1))
+ test = np.ma.convolve(a, b, propagate_mask=True)
+ assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1))
+
+
+class TestMaskedFields:
+
+ def setup_method(self):
+ ilist = [1, 2, 3, 4, 5]
+ flist = [1.1, 2.2, 3.3, 4.4, 5.5]
+ slist = ['one', 'two', 'three', 'four', 'five']
+ ddtype = [('a', int), ('b', float), ('c', '|S8')]
+ mdtype = [('a', bool), ('b', bool), ('c', bool)]
+ mask = [0, 1, 0, 0, 1]
+ base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
+ self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)
+
+ def test_set_records_masks(self):
+ base = self.data['base']
+ mdtype = self.data['mdtype']
+ # Set w/ nomask or masked
+ base.mask = nomask
+ assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
+ base.mask = masked
+ assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
+ # Set w/ simple boolean
+ base.mask = False
+ assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))
+ base.mask = True
+ assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))
+ # Set w/ list
+ base.mask = [0, 0, 0, 1, 1]
+ assert_equal_records(base._mask,
+ np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],
+ dtype=mdtype))
+
+ def test_set_record_element(self):
+ # Check setting an element of a record)
+ base = self.data['base']
+ (base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
+ base[0] = (pi, pi, 'pi')
+
+ assert_equal(base_a.dtype, int)
+ assert_equal(base_a._data, [3, 2, 3, 4, 5])
+
+ assert_equal(base_b.dtype, float)
+ assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])
+
+ assert_equal(base_c.dtype, '|S8')
+ assert_equal(base_c._data,
+ [b'pi', b'two', b'three', b'four', b'five'])
+
+ def test_set_record_slice(self):
+ base = self.data['base']
+ (base_a, base_b, base_c) = (base['a'], base['b'], base['c'])
+ base[:3] = (pi, pi, 'pi')
+
+ assert_equal(base_a.dtype, int)
+ assert_equal(base_a._data, [3, 3, 3, 4, 5])
+
+ assert_equal(base_b.dtype, float)
+ assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])
+
+ assert_equal(base_c.dtype, '|S8')
+ assert_equal(base_c._data,
+ [b'pi', b'pi', b'pi', b'four', b'five'])
+
+ def test_mask_element(self):
+ "Check record access"
+ base = self.data['base']
+ base[0] = masked
+
+ for n in ('a', 'b', 'c'):
+ assert_equal(base[n].mask, [1, 1, 0, 0, 1])
+ assert_equal(base[n]._data, base._data[n])
+
+ def test_getmaskarray(self):
+ # Test getmaskarray on flexible dtype
+ ndtype = [('a', int), ('b', float)]
+ test = empty(3, dtype=ndtype)
+ assert_equal(getmaskarray(test),
+ np.array([(0, 0), (0, 0), (0, 0)],
+ dtype=[('a', '|b1'), ('b', '|b1')]))
+ test[:] = masked
+ assert_equal(getmaskarray(test),
+ np.array([(1, 1), (1, 1), (1, 1)],
+ dtype=[('a', '|b1'), ('b', '|b1')]))
+
+ def test_view(self):
+ # Test view w/ flexible dtype
+ iterator = list(zip(np.arange(10), np.random.rand(10)))
+ data = np.array(iterator)
+ a = array(iterator, dtype=[('a', float), ('b', float)])
+ a.mask[0] = (1, 0)
+ controlmask = np.array([1] + 19 * [0], dtype=bool)
+ # Transform globally to simple dtype
+ test = a.view(float)
+ assert_equal(test, data.ravel())
+ assert_equal(test.mask, controlmask)
+ # Transform globally to dty
+ test = a.view((float, 2))
+ assert_equal(test, data)
+ assert_equal(test.mask, controlmask.reshape(-1, 2))
+
+ def test_getitem(self):
+ ndtype = [('a', float), ('b', float)]
+ a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)
+ a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
+ [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),
+ dtype=[('a', bool), ('b', bool)])
+
+ def _test_index(i):
+ assert_equal(type(a[i]), mvoid)
+ assert_equal_records(a[i]._data, a._data[i])
+ assert_equal_records(a[i]._mask, a._mask[i])
+
+ assert_equal(type(a[i, ...]), MaskedArray)
+ assert_equal_records(a[i,...]._data, a._data[i,...])
+ assert_equal_records(a[i,...]._mask, a._mask[i,...])
+
+ _test_index(1) # No mask
+ _test_index(0) # One element masked
+ _test_index(-2) # All element masked
+
+ def test_setitem(self):
+ # Issue 4866: check that one can set individual items in [record][col]
+ # and [col][record] order
+ ndtype = np.dtype([('a', float), ('b', int)])
+ ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype)
+ ma['a'][1] = 3.0
+ assert_equal(ma['a'], np.array([1.0, 3.0]))
+ ma[1]['a'] = 4.0
+ assert_equal(ma['a'], np.array([1.0, 4.0]))
+ # Issue 2403
+ mdtype = np.dtype([('a', bool), ('b', bool)])
+ # soft mask
+ control = np.array([(False, True), (True, True)], dtype=mdtype)
+ a = np.ma.masked_all((2,), dtype=ndtype)
+ a['a'][0] = 2
+ assert_equal(a.mask, control)
+ a = np.ma.masked_all((2,), dtype=ndtype)
+ a[0]['a'] = 2
+ assert_equal(a.mask, control)
+ # hard mask
+ control = np.array([(True, True), (True, True)], dtype=mdtype)
+ a = np.ma.masked_all((2,), dtype=ndtype)
+ a.harden_mask()
+ a['a'][0] = 2
+ assert_equal(a.mask, control)
+ a = np.ma.masked_all((2,), dtype=ndtype)
+ a.harden_mask()
+ a[0]['a'] = 2
+ assert_equal(a.mask, control)
+
+ def test_setitem_scalar(self):
+ # 8510
+ mask_0d = np.ma.masked_array(1, mask=True)
+ arr = np.ma.arange(3)
+ arr[0] = mask_0d
+ assert_array_equal(arr.mask, [True, False, False])
+
+ def test_element_len(self):
+ # check that len() works for mvoid (Github issue #576)
+ for rec in self.data['base']:
+ assert_equal(len(rec), len(self.data['ddtype']))
+
+
+class TestMaskedObjectArray:
+
+ def test_getitem(self):
+ arr = np.ma.array([None, None])
+ for dt in [float, object]:
+ a0 = np.eye(2).astype(dt)
+ a1 = np.eye(3).astype(dt)
+ arr[0] = a0
+ arr[1] = a1
+
+ assert_(arr[0] is a0)
+ assert_(arr[1] is a1)
+ assert_(isinstance(arr[0,...], MaskedArray))
+ assert_(isinstance(arr[1,...], MaskedArray))
+ assert_(arr[0,...][()] is a0)
+ assert_(arr[1,...][()] is a1)
+
+ arr[0] = np.ma.masked
+
+ assert_(arr[1] is a1)
+ assert_(isinstance(arr[0,...], MaskedArray))
+ assert_(isinstance(arr[1,...], MaskedArray))
+ assert_equal(arr[0,...].mask, True)
+ assert_(arr[1,...][()] is a1)
+
+ # gh-5962 - object arrays of arrays do something special
+ assert_equal(arr[0].data, a0)
+ assert_equal(arr[0].mask, True)
+ assert_equal(arr[0,...][()].data, a0)
+ assert_equal(arr[0,...][()].mask, True)
+
+ def test_nested_ma(self):
+
+ arr = np.ma.array([None, None])
+ # set the first object to be an unmasked masked constant. A little fiddly
+ arr[0,...] = np.array([np.ma.masked], object)[0,...]
+
+ # check the above line did what we were aiming for
+ assert_(arr.data[0] is np.ma.masked)
+
+ # test that getitem returned the value by identity
+ assert_(arr[0] is np.ma.masked)
+
+ # now mask the masked value!
+ arr[0] = np.ma.masked
+ assert_(arr[0] is np.ma.masked)
+
+
+class TestMaskedView:
+
+ def setup_method(self):
+ iterator = list(zip(np.arange(10), np.random.rand(10)))
+ data = np.array(iterator)
+ a = array(iterator, dtype=[('a', float), ('b', float)])
+ a.mask[0] = (1, 0)
+ controlmask = np.array([1] + 19 * [0], dtype=bool)
+ self.data = (data, a, controlmask)
+
+ def test_view_to_nothing(self):
+ (data, a, controlmask) = self.data
+ test = a.view()
+ assert_(isinstance(test, MaskedArray))
+ assert_equal(test._data, a._data)
+ assert_equal(test._mask, a._mask)
+
+ def test_view_to_type(self):
+ (data, a, controlmask) = self.data
+ test = a.view(np.ndarray)
+ assert_(not isinstance(test, MaskedArray))
+ assert_equal(test, a._data)
+ assert_equal_records(test, data.view(a.dtype).squeeze())
+
+ def test_view_to_simple_dtype(self):
+ (data, a, controlmask) = self.data
+ # View globally
+ test = a.view(float)
+ assert_(isinstance(test, MaskedArray))
+ assert_equal(test, data.ravel())
+ assert_equal(test.mask, controlmask)
+
+ def test_view_to_flexible_dtype(self):
+ (data, a, controlmask) = self.data
+
+ test = a.view([('A', float), ('B', float)])
+ assert_equal(test.mask.dtype.names, ('A', 'B'))
+ assert_equal(test['A'], a['a'])
+ assert_equal(test['B'], a['b'])
+
+ test = a[0].view([('A', float), ('B', float)])
+ assert_(isinstance(test, MaskedArray))
+ assert_equal(test.mask.dtype.names, ('A', 'B'))
+ assert_equal(test['A'], a['a'][0])
+ assert_equal(test['B'], a['b'][0])
+
+ test = a[-1].view([('A', float), ('B', float)])
+ assert_(isinstance(test, MaskedArray))
+ assert_equal(test.dtype.names, ('A', 'B'))
+ assert_equal(test['A'], a['a'][-1])
+ assert_equal(test['B'], a['b'][-1])
+
+ def test_view_to_subdtype(self):
+ (data, a, controlmask) = self.data
+ # View globally
+ test = a.view((float, 2))
+ assert_(isinstance(test, MaskedArray))
+ assert_equal(test, data)
+ assert_equal(test.mask, controlmask.reshape(-1, 2))
+ # View on 1 masked element
+ test = a[0].view((float, 2))
+ assert_(isinstance(test, MaskedArray))
+ assert_equal(test, data[0])
+ assert_equal(test.mask, (1, 0))
+ # View on 1 unmasked element
+ test = a[-1].view((float, 2))
+ assert_(isinstance(test, MaskedArray))
+ assert_equal(test, data[-1])
+
+ def test_view_to_dtype_and_type(self):
+ (data, a, controlmask) = self.data
+
+ test = a.view((float, 2), np.recarray)
+ assert_equal(test, data)
+ assert_(isinstance(test, np.recarray))
+ assert_(not isinstance(test, MaskedArray))
+
+
+class TestOptionalArgs:
+ def test_ndarrayfuncs(self):
+ # test axis arg behaves the same as ndarray (including multiple axes)
+
+ d = np.arange(24.0).reshape((2,3,4))
+ m = np.zeros(24, dtype=bool).reshape((2,3,4))
+ # mask out last element of last dimension
+ m[:,:,-1] = True
+ a = np.ma.array(d, mask=m)
+
+ def testaxis(f, a, d):
+ numpy_f = numpy.__getattribute__(f)
+ ma_f = np.ma.__getattribute__(f)
+
+ # test axis arg
+ assert_equal(ma_f(a, axis=1)[...,:-1], numpy_f(d[...,:-1], axis=1))
+ assert_equal(ma_f(a, axis=(0,1))[...,:-1],
+ numpy_f(d[...,:-1], axis=(0,1)))
+
+ def testkeepdims(f, a, d):
+ numpy_f = numpy.__getattribute__(f)
+ ma_f = np.ma.__getattribute__(f)
+
+ # test keepdims arg
+ assert_equal(ma_f(a, keepdims=True).shape,
+ numpy_f(d, keepdims=True).shape)
+ assert_equal(ma_f(a, keepdims=False).shape,
+ numpy_f(d, keepdims=False).shape)
+
+ # test both at once
+ assert_equal(ma_f(a, axis=1, keepdims=True)[...,:-1],
+ numpy_f(d[...,:-1], axis=1, keepdims=True))
+ assert_equal(ma_f(a, axis=(0,1), keepdims=True)[...,:-1],
+ numpy_f(d[...,:-1], axis=(0,1), keepdims=True))
+
+ for f in ['sum', 'prod', 'mean', 'var', 'std']:
+ testaxis(f, a, d)
+ testkeepdims(f, a, d)
+
+ for f in ['min', 'max']:
+ testaxis(f, a, d)
+
+ d = (np.arange(24).reshape((2,3,4))%2 == 0)
+ a = np.ma.array(d, mask=m)
+ for f in ['all', 'any']:
+ testaxis(f, a, d)
+ testkeepdims(f, a, d)
+
+ def test_count(self):
+ # test np.ma.count specially
+
+ d = np.arange(24.0).reshape((2,3,4))
+ m = np.zeros(24, dtype=bool).reshape((2,3,4))
+ m[:,0,:] = True
+ a = np.ma.array(d, mask=m)
+
+ assert_equal(count(a), 16)
+ assert_equal(count(a, axis=1), 2*ones((2,4)))
+ assert_equal(count(a, axis=(0,1)), 4*ones((4,)))
+ assert_equal(count(a, keepdims=True), 16*ones((1,1,1)))
+ assert_equal(count(a, axis=1, keepdims=True), 2*ones((2,1,4)))
+ assert_equal(count(a, axis=(0,1), keepdims=True), 4*ones((1,1,4)))
+ assert_equal(count(a, axis=-2), 2*ones((2,4)))
+ assert_raises(ValueError, count, a, axis=(1,1))
+ assert_raises(np.AxisError, count, a, axis=3)
+
+ # check the 'nomask' path
+ a = np.ma.array(d, mask=nomask)
+
+ assert_equal(count(a), 24)
+ assert_equal(count(a, axis=1), 3*ones((2,4)))
+ assert_equal(count(a, axis=(0,1)), 6*ones((4,)))
+ assert_equal(count(a, keepdims=True), 24*ones((1,1,1)))
+ assert_equal(np.ndim(count(a, keepdims=True)), 3)
+ assert_equal(count(a, axis=1, keepdims=True), 3*ones((2,1,4)))
+ assert_equal(count(a, axis=(0,1), keepdims=True), 6*ones((1,1,4)))
+ assert_equal(count(a, axis=-2), 3*ones((2,4)))
+ assert_raises(ValueError, count, a, axis=(1,1))
+ assert_raises(np.AxisError, count, a, axis=3)
+
+ # check the 'masked' singleton
+ assert_equal(count(np.ma.masked), 0)
+
+ # check 0-d arrays do not allow axis > 0
+ assert_raises(np.AxisError, count, np.ma.array(1), axis=1)
+
+
+class TestMaskedConstant:
+ def _do_add_test(self, add):
+ # sanity check
+ assert_(add(np.ma.masked, 1) is np.ma.masked)
+
+ # now try with a vector
+ vector = np.array([1, 2, 3])
+ result = add(np.ma.masked, vector)
+
+ # lots of things could go wrong here
+ assert_(result is not np.ma.masked)
+ assert_(not isinstance(result, np.ma.core.MaskedConstant))
+ assert_equal(result.shape, vector.shape)
+ assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool))
+
+ def test_ufunc(self):
+ self._do_add_test(np.add)
+
+ def test_operator(self):
+ self._do_add_test(lambda a, b: a + b)
+
+ def test_ctor(self):
+ m = np.ma.array(np.ma.masked)
+
+ # most importantly, we do not want to create a new MaskedConstant
+ # instance
+ assert_(not isinstance(m, np.ma.core.MaskedConstant))
+ assert_(m is not np.ma.masked)
+
+ def test_repr(self):
+ # copies should not exist, but if they do, it should be obvious that
+ # something is wrong
+ assert_equal(repr(np.ma.masked), 'masked')
+
+ # create a new instance in a weird way
+ masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant)
+ assert_not_equal(repr(masked2), 'masked')
+
+ def test_pickle(self):
+ from io import BytesIO
+
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ with BytesIO() as f:
+ pickle.dump(np.ma.masked, f, protocol=proto)
+ f.seek(0)
+ res = pickle.load(f)
+ assert_(res is np.ma.masked)
+
+ def test_copy(self):
+ # gh-9328
+ # copy is a no-op, like it is with np.True_
+ assert_equal(
+ np.ma.masked.copy() is np.ma.masked,
+ np.True_.copy() is np.True_)
+
+ def test__copy(self):
+ import copy
+ assert_(
+ copy.copy(np.ma.masked) is np.ma.masked)
+
+ def test_deepcopy(self):
+ import copy
+ assert_(
+ copy.deepcopy(np.ma.masked) is np.ma.masked)
+
+ def test_immutable(self):
+ orig = np.ma.masked
+ assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1)
+ assert_raises(ValueError,operator.setitem, orig.data, (), 1)
+ assert_raises(ValueError, operator.setitem, orig.mask, (), False)
+
+ view = np.ma.masked.view(np.ma.MaskedArray)
+ assert_raises(ValueError, operator.setitem, view, (), 1)
+ assert_raises(ValueError, operator.setitem, view.data, (), 1)
+ assert_raises(ValueError, operator.setitem, view.mask, (), False)
+
+ def test_coercion_int(self):
+ a_i = np.zeros((), int)
+ assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked)
+ assert_raises(MaskError, int, np.ma.masked)
+
+ def test_coercion_float(self):
+ a_f = np.zeros((), float)
+ assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked)
+ assert_(np.isnan(a_f[()]))
+
+ @pytest.mark.xfail(reason="See gh-9750")
+ def test_coercion_unicode(self):
+ a_u = np.zeros((), 'U10')
+ a_u[()] = np.ma.masked
+ assert_equal(a_u[()], '--')
+
+ @pytest.mark.xfail(reason="See gh-9750")
+ def test_coercion_bytes(self):
+ a_b = np.zeros((), 'S10')
+ a_b[()] = np.ma.masked
+ assert_equal(a_b[()], b'--')
+
+ def test_subclass(self):
+ # https://github.com/astropy/astropy/issues/6645
+ class Sub(type(np.ma.masked)): pass
+
+ a = Sub()
+ assert_(a is Sub())
+ assert_(a is not np.ma.masked)
+ assert_not_equal(repr(a), 'masked')
+
+ def test_attributes_readonly(self):
+ assert_raises(AttributeError, setattr, np.ma.masked, 'shape', (1,))
+ assert_raises(AttributeError, setattr, np.ma.masked, 'dtype', np.int64)
+
+
+class TestMaskedWhereAliases:
+
+ # TODO: Test masked_object, masked_equal, ...
+
+ def test_masked_values(self):
+ res = masked_values(np.array([-32768.0]), np.int16(-32768))
+ assert_equal(res.mask, [True])
+
+ res = masked_values(np.inf, np.inf)
+ assert_equal(res.mask, True)
+
+ res = np.ma.masked_values(np.inf, -np.inf)
+ assert_equal(res.mask, False)
+
+ res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=True)
+ assert_(res.mask is np.ma.nomask)
+
+ res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=False)
+ assert_equal(res.mask, [False] * 4)
+
+
+def test_masked_array():
+ a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
+ assert_equal(np.argwhere(a), [[1], [3]])
+
+def test_masked_array_no_copy():
+ # check nomask array is updated in place
+ a = np.ma.array([1, 2, 3, 4])
+ _ = np.ma.masked_where(a == 3, a, copy=False)
+ assert_array_equal(a.mask, [False, False, True, False])
+ # check masked array is updated in place
+ a = np.ma.array([1, 2, 3, 4], mask=[1, 0, 0, 0])
+ _ = np.ma.masked_where(a == 3, a, copy=False)
+ assert_array_equal(a.mask, [True, False, True, False])
+ # check masked array with masked_invalid is updated in place
+ a = np.ma.array([np.inf, 1, 2, 3, 4])
+ _ = np.ma.masked_invalid(a, copy=False)
+ assert_array_equal(a.mask, [True, False, False, False, False])
+
+def test_append_masked_array():
+ a = np.ma.masked_equal([1,2,3], value=2)
+ b = np.ma.masked_equal([4,3,2], value=2)
+
+ result = np.ma.append(a, b)
+ expected_data = [1, 2, 3, 4, 3, 2]
+ expected_mask = [False, True, False, False, False, True]
+ assert_array_equal(result.data, expected_data)
+ assert_array_equal(result.mask, expected_mask)
+
+ a = np.ma.masked_all((2,2))
+ b = np.ma.ones((3,1))
+
+ result = np.ma.append(a, b)
+ expected_data = [1] * 3
+ expected_mask = [True] * 4 + [False] * 3
+ assert_array_equal(result.data[-3], expected_data)
+ assert_array_equal(result.mask, expected_mask)
+
+ result = np.ma.append(a, b, axis=None)
+ assert_array_equal(result.data[-3], expected_data)
+ assert_array_equal(result.mask, expected_mask)
+
+
+def test_append_masked_array_along_axis():
+ a = np.ma.masked_equal([1,2,3], value=2)
+ b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
+
+ # When `axis` is specified, `values` must have the correct shape.
+ assert_raises(ValueError, np.ma.append, a, b, axis=0)
+
+ result = np.ma.append(a[np.newaxis,:], b, axis=0)
+ expected = np.ma.arange(1, 10)
+ expected[[1, 6]] = np.ma.masked
+ expected = expected.reshape((3,3))
+ assert_array_equal(result.data, expected.data)
+ assert_array_equal(result.mask, expected.mask)
+
+def test_default_fill_value_complex():
+ # regression test for Python 3, where 'unicode' was not defined
+ assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j)
+
+
+def test_ufunc_with_output():
+ # check that giving an output argument always returns that output.
+ # Regression test for gh-8416.
+ x = array([1., 2., 3.], mask=[0, 0, 1])
+ y = np.add(x, 1., out=x)
+ assert_(y is x)
+
+
+def test_ufunc_with_out_varied():
+ """ Test that masked arrays are immune to gh-10459 """
+ # the mask of the output should not affect the result, however it is passed
+ a = array([ 1, 2, 3], mask=[1, 0, 0])
+ b = array([10, 20, 30], mask=[1, 0, 0])
+ out = array([ 0, 0, 0], mask=[0, 0, 1])
+ expected = array([11, 22, 33], mask=[1, 0, 0])
+
+ out_pos = out.copy()
+ res_pos = np.add(a, b, out_pos)
+
+ out_kw = out.copy()
+ res_kw = np.add(a, b, out=out_kw)
+
+ out_tup = out.copy()
+ res_tup = np.add(a, b, out=(out_tup,))
+
+ assert_equal(res_kw.mask, expected.mask)
+ assert_equal(res_kw.data, expected.data)
+ assert_equal(res_tup.mask, expected.mask)
+ assert_equal(res_tup.data, expected.data)
+ assert_equal(res_pos.mask, expected.mask)
+ assert_equal(res_pos.data, expected.data)
+
+
+def test_astype_mask_ordering():
+ descr = np.dtype([('v', int, 3), ('x', [('y', float)])])
+ x = array([
+ [([1, 2, 3], (1.0,)), ([1, 2, 3], (2.0,))],
+ [([1, 2, 3], (3.0,)), ([1, 2, 3], (4.0,))]], dtype=descr)
+ x[0]['v'][0] = np.ma.masked
+
+ x_a = x.astype(descr)
+ assert x_a.dtype.names == np.dtype(descr).names
+ assert x_a.mask.dtype.names == np.dtype(descr).names
+ assert_equal(x, x_a)
+
+ assert_(x is x.astype(x.dtype, copy=False))
+ assert_equal(type(x.astype(x.dtype, subok=False)), np.ndarray)
+
+ x_f = x.astype(x.dtype, order='F')
+ assert_(x_f.flags.f_contiguous)
+ assert_(x_f.mask.flags.f_contiguous)
+
+ # Also test the same indirectly, via np.array
+ x_a2 = np.array(x, dtype=descr, subok=True)
+ assert x_a2.dtype.names == np.dtype(descr).names
+ assert x_a2.mask.dtype.names == np.dtype(descr).names
+ assert_equal(x, x_a2)
+
+ assert_(x is np.array(x, dtype=descr, copy=False, subok=True))
+
+ x_f2 = np.array(x, dtype=x.dtype, order='F', subok=True)
+ assert_(x_f2.flags.f_contiguous)
+ assert_(x_f2.mask.flags.f_contiguous)
+
+
+@pytest.mark.parametrize('dt1', num_dts, ids=num_ids)
+@pytest.mark.parametrize('dt2', num_dts, ids=num_ids)
+@pytest.mark.filterwarnings('ignore::numpy.ComplexWarning')
+def test_astype_basic(dt1, dt2):
+ # See gh-12070
+ src = np.ma.array(ones(3, dt1), fill_value=1)
+ dst = src.astype(dt2)
+
+ assert_(src.fill_value == 1)
+ assert_(src.dtype == dt1)
+ assert_(src.fill_value.dtype == dt1)
+
+ assert_(dst.fill_value == 1)
+ assert_(dst.dtype == dt2)
+ assert_(dst.fill_value.dtype == dt2)
+
+ assert_equal(src, dst)
+
+
+def test_fieldless_void():
+ dt = np.dtype([]) # a void dtype with no fields
+ x = np.empty(4, dt)
+
+ # these arrays contain no values, so there's little to test - but this
+ # shouldn't crash
+ mx = np.ma.array(x)
+ assert_equal(mx.dtype, x.dtype)
+ assert_equal(mx.shape, x.shape)
+
+ mx = np.ma.array(x, mask=x)
+ assert_equal(mx.dtype, x.dtype)
+ assert_equal(mx.shape, x.shape)
+
+
+def test_mask_shape_assignment_does_not_break_masked():
+ a = np.ma.masked
+ b = np.ma.array(1, mask=a.mask)
+ b.shape = (1,)
+ assert_equal(a.mask.shape, ())
+
+@pytest.mark.skipif(sys.flags.optimize > 1,
+ reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
+def test_doc_note():
+ def method(self):
+ """This docstring
+
+ Has multiple lines
+
+ And notes
+
+ Notes
+ -----
+ original note
+ """
+ pass
+
+ expected_doc = """This docstring
+
+Has multiple lines
+
+And notes
+
+Notes
+-----
+note
+
+original note"""
+
+ assert_equal(np.ma.core.doc_note(method.__doc__, "note"), expected_doc)
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/tests/test_deprecations.py b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_deprecations.py
new file mode 100644
index 00000000..40c8418f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_deprecations.py
@@ -0,0 +1,84 @@
+"""Test deprecation and future warnings.
+
+"""
+import pytest
+import numpy as np
+from numpy.testing import assert_warns
+from numpy.ma.testutils import assert_equal
+from numpy.ma.core import MaskedArrayFutureWarning
+import io
+import textwrap
+
+class TestArgsort:
+ """ gh-8701 """
+ def _test_base(self, argsort, cls):
+ arr_0d = np.array(1).view(cls)
+ argsort(arr_0d)
+
+ arr_1d = np.array([1, 2, 3]).view(cls)
+ argsort(arr_1d)
+
+ # argsort has a bad default for >1d arrays
+ arr_2d = np.array([[1, 2], [3, 4]]).view(cls)
+ result = assert_warns(
+ np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d)
+ assert_equal(result, argsort(arr_2d, axis=None))
+
+ # should be no warnings for explicitly specifying it
+ argsort(arr_2d, axis=None)
+ argsort(arr_2d, axis=-1)
+
+ def test_function_ndarray(self):
+ return self._test_base(np.ma.argsort, np.ndarray)
+
+ def test_function_maskedarray(self):
+ return self._test_base(np.ma.argsort, np.ma.MaskedArray)
+
+ def test_method(self):
+ return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray)
+
+
+class TestMinimumMaximum:
+
+ def test_axis_default(self):
+ # NumPy 1.13, 2017-05-06
+
+ data1d = np.ma.arange(6)
+ data2d = data1d.reshape(2, 3)
+
+ ma_min = np.ma.minimum.reduce
+ ma_max = np.ma.maximum.reduce
+
+ # check that the default axis is still None, but warns on 2d arrays
+ result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d)
+ assert_equal(result, ma_max(data2d, axis=None))
+
+ result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d)
+ assert_equal(result, ma_min(data2d, axis=None))
+
+ # no warnings on 1d, as both new and old defaults are equivalent
+ result = ma_min(data1d)
+ assert_equal(result, ma_min(data1d, axis=None))
+ assert_equal(result, ma_min(data1d, axis=0))
+
+ result = ma_max(data1d)
+ assert_equal(result, ma_max(data1d, axis=None))
+ assert_equal(result, ma_max(data1d, axis=0))
+
+
+class TestFromtextfile:
+ def test_fromtextfile_delimitor(self):
+ # NumPy 1.22.0, 2021-09-23
+
+ textfile = io.StringIO(textwrap.dedent(
+ """
+ A,B,C,D
+ 'string 1';1;1.0;'mixed column'
+ 'string 2';2;2.0;
+ 'string 3';3;3.0;123
+ 'string 4';4;4.0;3.14
+ """
+ ))
+
+ with pytest.warns(DeprecationWarning):
+ result = np.ma.mrecords.fromtextfile(textfile, delimitor=';')
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/tests/test_extras.py b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_extras.py
new file mode 100644
index 00000000..38603fb8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_extras.py
@@ -0,0 +1,1829 @@
+# pylint: disable-msg=W0611, W0612, W0511
+"""Tests suite for MaskedArray.
+Adapted from the original test_ma by Pierre Gerard-Marchant
+
+:author: Pierre Gerard-Marchant
+:contact: pierregm_at_uga_dot_edu
+:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
+
+"""
+import warnings
+import itertools
+import pytest
+
+import numpy as np
+from numpy.core.numeric import normalize_axis_tuple
+from numpy.testing import (
+ assert_warns, suppress_warnings
+ )
+from numpy.ma.testutils import (
+ assert_, assert_array_equal, assert_equal, assert_almost_equal
+ )
+from numpy.ma.core import (
+ array, arange, masked, MaskedArray, masked_array, getmaskarray, shape,
+ nomask, ones, zeros, count
+ )
+from numpy.ma.extras import (
+ atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef,
+ median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d,
+ ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols,
+ mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous,
+ notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin,
+ diagflat, ndenumerate, stack, vstack
+ )
+
+
+class TestGeneric:
+ #
+ def test_masked_all(self):
+ # Tests masked_all
+ # Standard dtype
+ test = masked_all((2,), dtype=float)
+ control = array([1, 1], mask=[1, 1], dtype=float)
+ assert_equal(test, control)
+ # Flexible dtype
+ dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']})
+ test = masked_all((2,), dtype=dt)
+ control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt)
+ assert_equal(test, control)
+ test = masked_all((2, 2), dtype=dt)
+ control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]],
+ mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]],
+ dtype=dt)
+ assert_equal(test, control)
+ # Nested dtype
+ dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])])
+ test = masked_all((2,), dtype=dt)
+ control = array([(1, (1, 1)), (1, (1, 1))],
+ mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
+ assert_equal(test, control)
+ test = masked_all((2,), dtype=dt)
+ control = array([(1, (1, 1)), (1, (1, 1))],
+ mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
+ assert_equal(test, control)
+ test = masked_all((1, 1), dtype=dt)
+ control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt)
+ assert_equal(test, control)
+
+ def test_masked_all_with_object_nested(self):
+ # Test masked_all works with nested array with dtype of an 'object'
+ # refers to issue #15895
+ my_dtype = np.dtype([('b', ([('c', object)], (1,)))])
+ masked_arr = np.ma.masked_all((1,), my_dtype)
+
+ assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray)
+ assert_equal(type(masked_arr['b']['c']), np.ma.core.MaskedArray)
+ assert_equal(len(masked_arr['b']['c']), 1)
+ assert_equal(masked_arr['b']['c'].shape, (1, 1))
+ assert_equal(masked_arr['b']['c']._fill_value.shape, ())
+
+ def test_masked_all_with_object(self):
+ # same as above except that the array is not nested
+ my_dtype = np.dtype([('b', (object, (1,)))])
+ masked_arr = np.ma.masked_all((1,), my_dtype)
+
+ assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray)
+ assert_equal(len(masked_arr['b']), 1)
+ assert_equal(masked_arr['b'].shape, (1, 1))
+ assert_equal(masked_arr['b']._fill_value.shape, ())
+
+ def test_masked_all_like(self):
+ # Tests masked_all
+ # Standard dtype
+ base = array([1, 2], dtype=float)
+ test = masked_all_like(base)
+ control = array([1, 1], mask=[1, 1], dtype=float)
+ assert_equal(test, control)
+ # Flexible dtype
+ dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']})
+ base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt)
+ test = masked_all_like(base)
+ control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt)
+ assert_equal(test, control)
+ # Nested dtype
+ dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])])
+ control = array([(1, (1, 1)), (1, (1, 1))],
+ mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
+ test = masked_all_like(control)
+ assert_equal(test, control)
+
+ def check_clump(self, f):
+ for i in range(1, 7):
+ for j in range(2**i):
+ k = np.arange(i, dtype=int)
+ ja = np.full(i, j, dtype=int)
+ a = masked_array(2**k)
+ a.mask = (ja & (2**k)) != 0
+ s = 0
+ for sl in f(a):
+ s += a.data[sl].sum()
+ if f == clump_unmasked:
+ assert_equal(a.compressed().sum(), s)
+ else:
+ a.mask = ~a.mask
+ assert_equal(a.compressed().sum(), s)
+
+ def test_clump_masked(self):
+ # Test clump_masked
+ a = masked_array(np.arange(10))
+ a[[0, 1, 2, 6, 8, 9]] = masked
+ #
+ test = clump_masked(a)
+ control = [slice(0, 3), slice(6, 7), slice(8, 10)]
+ assert_equal(test, control)
+
+ self.check_clump(clump_masked)
+
+ def test_clump_unmasked(self):
+ # Test clump_unmasked
+ a = masked_array(np.arange(10))
+ a[[0, 1, 2, 6, 8, 9]] = masked
+ test = clump_unmasked(a)
+ control = [slice(3, 6), slice(7, 8), ]
+ assert_equal(test, control)
+
+ self.check_clump(clump_unmasked)
+
+ def test_flatnotmasked_contiguous(self):
+ # Test flatnotmasked_contiguous
+ a = arange(10)
+ # No mask
+ test = flatnotmasked_contiguous(a)
+ assert_equal(test, [slice(0, a.size)])
+ # mask of all false
+ a.mask = np.zeros(10, dtype=bool)
+ assert_equal(test, [slice(0, a.size)])
+ # Some mask
+ a[(a < 3) | (a > 8) | (a == 5)] = masked
+ test = flatnotmasked_contiguous(a)
+ assert_equal(test, [slice(3, 5), slice(6, 9)])
+ #
+ a[:] = masked
+ test = flatnotmasked_contiguous(a)
+ assert_equal(test, [])
+
+
+class TestAverage:
+ # Several tests of average. Why so many ? Good point...
+ def test_testAverage1(self):
+ # Test of average.
+ ott = array([0., 1., 2., 3.], mask=[True, False, False, False])
+ assert_equal(2.0, average(ott, axis=0))
+ assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.]))
+ result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True)
+ assert_equal(2.0, result)
+ assert_(wts == 4.0)
+ ott[:] = masked
+ assert_equal(average(ott, axis=0).mask, [True])
+ ott = array([0., 1., 2., 3.], mask=[True, False, False, False])
+ ott = ott.reshape(2, 2)
+ ott[:, 1] = masked
+ assert_equal(average(ott, axis=0), [2.0, 0.0])
+ assert_equal(average(ott, axis=1).mask[0], [True])
+ assert_equal([2., 0.], average(ott, axis=0))
+ result, wts = average(ott, axis=0, returned=True)
+ assert_equal(wts, [1., 0.])
+
+ def test_testAverage2(self):
+ # More tests of average.
+ w1 = [0, 1, 1, 1, 1, 0]
+ w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
+ x = arange(6, dtype=np.float_)
+ assert_equal(average(x, axis=0), 2.5)
+ assert_equal(average(x, axis=0, weights=w1), 2.5)
+ y = array([arange(6, dtype=np.float_), 2.0 * arange(6)])
+ assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.)
+ assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.)
+ assert_equal(average(y, axis=1),
+ [average(x, axis=0), average(x, axis=0) * 2.0])
+ assert_equal(average(y, None, weights=w2), 20. / 6.)
+ assert_equal(average(y, axis=0, weights=w2),
+ [0., 1., 2., 3., 4., 10.])
+ assert_equal(average(y, axis=1),
+ [average(x, axis=0), average(x, axis=0) * 2.0])
+ m1 = zeros(6)
+ m2 = [0, 0, 1, 1, 0, 0]
+ m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
+ m4 = ones(6)
+ m5 = [0, 1, 1, 1, 1, 1]
+ assert_equal(average(masked_array(x, m1), axis=0), 2.5)
+ assert_equal(average(masked_array(x, m2), axis=0), 2.5)
+ assert_equal(average(masked_array(x, m4), axis=0).mask, [True])
+ assert_equal(average(masked_array(x, m5), axis=0), 0.0)
+ assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
+ z = masked_array(y, m3)
+ assert_equal(average(z, None), 20. / 6.)
+ assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])
+ assert_equal(average(z, axis=1), [2.5, 5.0])
+ assert_equal(average(z, axis=0, weights=w2),
+ [0., 1., 99., 99., 4.0, 10.0])
+
+ def test_testAverage3(self):
+ # Yet more tests of average!
+ a = arange(6)
+ b = arange(6) * 3
+ r1, w1 = average([[a, b], [b, a]], axis=1, returned=True)
+ assert_equal(shape(r1), shape(w1))
+ assert_equal(r1.shape, w1.shape)
+ r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True)
+ assert_equal(shape(w2), shape(r2))
+ r2, w2 = average(ones((2, 2, 3)), returned=True)
+ assert_equal(shape(w2), shape(r2))
+ r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True)
+ assert_equal(shape(w2), shape(r2))
+ a2d = array([[1, 2], [0, 4]], float)
+ a2dm = masked_array(a2d, [[False, False], [True, False]])
+ a2da = average(a2d, axis=0)
+ assert_equal(a2da, [0.5, 3.0])
+ a2dma = average(a2dm, axis=0)
+ assert_equal(a2dma, [1.0, 3.0])
+ a2dma = average(a2dm, axis=None)
+ assert_equal(a2dma, 7. / 3.)
+ a2dma = average(a2dm, axis=1)
+ assert_equal(a2dma, [1.5, 4.0])
+
+ def test_testAverage4(self):
+ # Test that `keepdims` works with average
+ x = np.array([2, 3, 4]).reshape(3, 1)
+ b = np.ma.array(x, mask=[[False], [False], [True]])
+ w = np.array([4, 5, 6]).reshape(3, 1)
+ actual = average(b, weights=w, axis=1, keepdims=True)
+ desired = masked_array([[2.], [3.], [4.]], [[False], [False], [True]])
+ assert_equal(actual, desired)
+
+ def test_onintegers_with_mask(self):
+ # Test average on integers with mask
+ a = average(array([1, 2]))
+ assert_equal(a, 1.5)
+ a = average(array([1, 2, 3, 4], mask=[False, False, True, True]))
+ assert_equal(a, 1.5)
+
+ def test_complex(self):
+ # Test with complex data.
+ # (Regression test for https://github.com/numpy/numpy/issues/2684)
+ mask = np.array([[0, 0, 0, 1, 0],
+ [0, 1, 0, 0, 0]], dtype=bool)
+ a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j],
+ [9j, 0+1j, 2+3j, 4+5j, 7+7j]],
+ mask=mask)
+
+ av = average(a)
+ expected = np.average(a.compressed())
+ assert_almost_equal(av.real, expected.real)
+ assert_almost_equal(av.imag, expected.imag)
+
+ av0 = average(a, axis=0)
+ expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j
+ assert_almost_equal(av0.real, expected0.real)
+ assert_almost_equal(av0.imag, expected0.imag)
+
+ av1 = average(a, axis=1)
+ expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j
+ assert_almost_equal(av1.real, expected1.real)
+ assert_almost_equal(av1.imag, expected1.imag)
+
+ # Test with the 'weights' argument.
+ wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5],
+ [1.0, 1.0, 1.0, 1.0, 1.0]])
+ wav = average(a, weights=wts)
+ expected = np.average(a.compressed(), weights=wts[~mask])
+ assert_almost_equal(wav.real, expected.real)
+ assert_almost_equal(wav.imag, expected.imag)
+
+ wav0 = average(a, weights=wts, axis=0)
+ expected0 = (average(a.real, weights=wts, axis=0) +
+ average(a.imag, weights=wts, axis=0)*1j)
+ assert_almost_equal(wav0.real, expected0.real)
+ assert_almost_equal(wav0.imag, expected0.imag)
+
+ wav1 = average(a, weights=wts, axis=1)
+ expected1 = (average(a.real, weights=wts, axis=1) +
+ average(a.imag, weights=wts, axis=1)*1j)
+ assert_almost_equal(wav1.real, expected1.real)
+ assert_almost_equal(wav1.imag, expected1.imag)
+
+ @pytest.mark.parametrize(
+ 'x, axis, expected_avg, weights, expected_wavg, expected_wsum',
+ [([1, 2, 3], None, [2.0], [3, 4, 1], [1.75], [8.0]),
+ ([[1, 2, 5], [1, 6, 11]], 0, [[1.0, 4.0, 8.0]],
+ [1, 3], [[1.0, 5.0, 9.5]], [[4, 4, 4]])],
+ )
+ def test_basic_keepdims(self, x, axis, expected_avg,
+ weights, expected_wavg, expected_wsum):
+ avg = np.ma.average(x, axis=axis, keepdims=True)
+ assert avg.shape == np.shape(expected_avg)
+ assert_array_equal(avg, expected_avg)
+
+ wavg = np.ma.average(x, axis=axis, weights=weights, keepdims=True)
+ assert wavg.shape == np.shape(expected_wavg)
+ assert_array_equal(wavg, expected_wavg)
+
+ wavg, wsum = np.ma.average(x, axis=axis, weights=weights,
+ returned=True, keepdims=True)
+ assert wavg.shape == np.shape(expected_wavg)
+ assert_array_equal(wavg, expected_wavg)
+ assert wsum.shape == np.shape(expected_wsum)
+ assert_array_equal(wsum, expected_wsum)
+
+ def test_masked_weights(self):
+ # Test with masked weights.
+ # (Regression test for https://github.com/numpy/numpy/issues/10438)
+ a = np.ma.array(np.arange(9).reshape(3, 3),
+ mask=[[1, 0, 0], [1, 0, 0], [0, 0, 0]])
+ weights_unmasked = masked_array([5, 28, 31], mask=False)
+ weights_masked = masked_array([5, 28, 31], mask=[1, 0, 0])
+
+ avg_unmasked = average(a, axis=0,
+ weights=weights_unmasked, returned=False)
+ expected_unmasked = np.array([6.0, 5.21875, 6.21875])
+ assert_almost_equal(avg_unmasked, expected_unmasked)
+
+ avg_masked = average(a, axis=0, weights=weights_masked, returned=False)
+ expected_masked = np.array([6.0, 5.576271186440678, 6.576271186440678])
+ assert_almost_equal(avg_masked, expected_masked)
+
+ # weights should be masked if needed
+ # depending on the array mask. This is to avoid summing
+ # masked nan or other values that are not cancelled by a zero
+ a = np.ma.array([1.0, 2.0, 3.0, 4.0],
+ mask=[False, False, True, True])
+ avg_unmasked = average(a, weights=[1, 1, 1, np.nan])
+
+ assert_almost_equal(avg_unmasked, 1.5)
+
+ a = np.ma.array([
+ [1.0, 2.0, 3.0, 4.0],
+ [5.0, 6.0, 7.0, 8.0],
+ [9.0, 1.0, 2.0, 3.0],
+ ], mask=[
+ [False, True, True, False],
+ [True, False, True, True],
+ [True, False, True, False],
+ ])
+
+ avg_masked = np.ma.average(a, weights=[1, np.nan, 1], axis=0)
+ avg_expected = np.ma.array([1.0, np.nan, np.nan, 3.5],
+ mask=[False, True, True, False])
+
+ assert_almost_equal(avg_masked, avg_expected)
+ assert_equal(avg_masked.mask, avg_expected.mask)
+
+
+class TestConcatenator:
+ # Tests for mr_, the equivalent of r_ for masked arrays.
+
+ def test_1d(self):
+ # Tests mr_ on 1D arrays.
+ assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6]))
+ b = ones(5)
+ m = [1, 0, 0, 0, 0]
+ d = masked_array(b, mask=m)
+ c = mr_[d, 0, 0, d]
+ assert_(isinstance(c, MaskedArray))
+ assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
+ assert_array_equal(c.mask, mr_[m, 0, 0, m])
+
+ def test_2d(self):
+ # Tests mr_ on 2D arrays.
+ a_1 = np.random.rand(5, 5)
+ a_2 = np.random.rand(5, 5)
+ m_1 = np.round_(np.random.rand(5, 5), 0)
+ m_2 = np.round_(np.random.rand(5, 5), 0)
+ b_1 = masked_array(a_1, mask=m_1)
+ b_2 = masked_array(a_2, mask=m_2)
+ # append columns
+ d = mr_['1', b_1, b_2]
+ assert_(d.shape == (5, 10))
+ assert_array_equal(d[:, :5], b_1)
+ assert_array_equal(d[:, 5:], b_2)
+ assert_array_equal(d.mask, np.r_['1', m_1, m_2])
+ d = mr_[b_1, b_2]
+ assert_(d.shape == (10, 5))
+ assert_array_equal(d[:5,:], b_1)
+ assert_array_equal(d[5:,:], b_2)
+ assert_array_equal(d.mask, np.r_[m_1, m_2])
+
+ def test_masked_constant(self):
+ actual = mr_[np.ma.masked, 1]
+ assert_equal(actual.mask, [True, False])
+ assert_equal(actual.data[1], 1)
+
+ actual = mr_[[1, 2], np.ma.masked]
+ assert_equal(actual.mask, [False, False, True])
+ assert_equal(actual.data[:2], [1, 2])
+
+
+class TestNotMasked:
+ # Tests notmasked_edges and notmasked_contiguous.
+
+ def test_edges(self):
+ # Tests unmasked_edges
+ data = masked_array(np.arange(25).reshape(5, 5),
+ mask=[[0, 0, 1, 0, 0],
+ [0, 0, 0, 1, 1],
+ [1, 1, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [1, 1, 1, 0, 0]],)
+ test = notmasked_edges(data, None)
+ assert_equal(test, [0, 24])
+ test = notmasked_edges(data, 0)
+ assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)])
+ assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)])
+ test = notmasked_edges(data, 1)
+ assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)])
+ assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)])
+ #
+ test = notmasked_edges(data.data, None)
+ assert_equal(test, [0, 24])
+ test = notmasked_edges(data.data, 0)
+ assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)])
+ assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)])
+ test = notmasked_edges(data.data, -1)
+ assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)])
+ assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)])
+ #
+ data[-2] = masked
+ test = notmasked_edges(data, 0)
+ assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)])
+ assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)])
+ test = notmasked_edges(data, -1)
+ assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)])
+ assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)])
+
+ def test_contiguous(self):
+ # Tests notmasked_contiguous
+ a = masked_array(np.arange(24).reshape(3, 8),
+ mask=[[0, 0, 0, 0, 1, 1, 1, 1],
+ [1, 1, 1, 1, 1, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 1, 0]])
+ tmp = notmasked_contiguous(a, None)
+ assert_equal(tmp, [
+ slice(0, 4, None),
+ slice(16, 22, None),
+ slice(23, 24, None)
+ ])
+
+ tmp = notmasked_contiguous(a, 0)
+ assert_equal(tmp, [
+ [slice(0, 1, None), slice(2, 3, None)],
+ [slice(0, 1, None), slice(2, 3, None)],
+ [slice(0, 1, None), slice(2, 3, None)],
+ [slice(0, 1, None), slice(2, 3, None)],
+ [slice(2, 3, None)],
+ [slice(2, 3, None)],
+ [],
+ [slice(2, 3, None)]
+ ])
+ #
+ tmp = notmasked_contiguous(a, 1)
+ assert_equal(tmp, [
+ [slice(0, 4, None)],
+ [],
+ [slice(0, 6, None), slice(7, 8, None)]
+ ])
+
+
+class TestCompressFunctions:
+
+ def test_compress_nd(self):
+ # Tests compress_nd
+ x = np.array(list(range(3*4*5))).reshape(3, 4, 5)
+ m = np.zeros((3,4,5)).astype(bool)
+ m[1,1,1] = True
+ x = array(x, mask=m)
+
+ # axis=None
+ a = compress_nd(x)
+ assert_equal(a, [[[ 0, 2, 3, 4],
+ [10, 12, 13, 14],
+ [15, 17, 18, 19]],
+ [[40, 42, 43, 44],
+ [50, 52, 53, 54],
+ [55, 57, 58, 59]]])
+
+ # axis=0
+ a = compress_nd(x, 0)
+ assert_equal(a, [[[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9],
+ [10, 11, 12, 13, 14],
+ [15, 16, 17, 18, 19]],
+ [[40, 41, 42, 43, 44],
+ [45, 46, 47, 48, 49],
+ [50, 51, 52, 53, 54],
+ [55, 56, 57, 58, 59]]])
+
+ # axis=1
+ a = compress_nd(x, 1)
+ assert_equal(a, [[[ 0, 1, 2, 3, 4],
+ [10, 11, 12, 13, 14],
+ [15, 16, 17, 18, 19]],
+ [[20, 21, 22, 23, 24],
+ [30, 31, 32, 33, 34],
+ [35, 36, 37, 38, 39]],
+ [[40, 41, 42, 43, 44],
+ [50, 51, 52, 53, 54],
+ [55, 56, 57, 58, 59]]])
+
+ a2 = compress_nd(x, (1,))
+ a3 = compress_nd(x, -2)
+ a4 = compress_nd(x, (-2,))
+ assert_equal(a, a2)
+ assert_equal(a, a3)
+ assert_equal(a, a4)
+
+ # axis=2
+ a = compress_nd(x, 2)
+ assert_equal(a, [[[ 0, 2, 3, 4],
+ [ 5, 7, 8, 9],
+ [10, 12, 13, 14],
+ [15, 17, 18, 19]],
+ [[20, 22, 23, 24],
+ [25, 27, 28, 29],
+ [30, 32, 33, 34],
+ [35, 37, 38, 39]],
+ [[40, 42, 43, 44],
+ [45, 47, 48, 49],
+ [50, 52, 53, 54],
+ [55, 57, 58, 59]]])
+
+ a2 = compress_nd(x, (2,))
+ a3 = compress_nd(x, -1)
+ a4 = compress_nd(x, (-1,))
+ assert_equal(a, a2)
+ assert_equal(a, a3)
+ assert_equal(a, a4)
+
+ # axis=(0, 1)
+ a = compress_nd(x, (0, 1))
+ assert_equal(a, [[[ 0, 1, 2, 3, 4],
+ [10, 11, 12, 13, 14],
+ [15, 16, 17, 18, 19]],
+ [[40, 41, 42, 43, 44],
+ [50, 51, 52, 53, 54],
+ [55, 56, 57, 58, 59]]])
+ a2 = compress_nd(x, (0, -2))
+ assert_equal(a, a2)
+
+ # axis=(1, 2)
+ a = compress_nd(x, (1, 2))
+ assert_equal(a, [[[ 0, 2, 3, 4],
+ [10, 12, 13, 14],
+ [15, 17, 18, 19]],
+ [[20, 22, 23, 24],
+ [30, 32, 33, 34],
+ [35, 37, 38, 39]],
+ [[40, 42, 43, 44],
+ [50, 52, 53, 54],
+ [55, 57, 58, 59]]])
+
+ a2 = compress_nd(x, (-2, 2))
+ a3 = compress_nd(x, (1, -1))
+ a4 = compress_nd(x, (-2, -1))
+ assert_equal(a, a2)
+ assert_equal(a, a3)
+ assert_equal(a, a4)
+
+ # axis=(0, 2)
+ a = compress_nd(x, (0, 2))
+ assert_equal(a, [[[ 0, 2, 3, 4],
+ [ 5, 7, 8, 9],
+ [10, 12, 13, 14],
+ [15, 17, 18, 19]],
+ [[40, 42, 43, 44],
+ [45, 47, 48, 49],
+ [50, 52, 53, 54],
+ [55, 57, 58, 59]]])
+
+ a2 = compress_nd(x, (0, -1))
+ assert_equal(a, a2)
+
+ def test_compress_rowcols(self):
+ # Tests compress_rowcols
+ x = array(np.arange(9).reshape(3, 3),
+ mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+ assert_equal(compress_rowcols(x), [[4, 5], [7, 8]])
+ assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]])
+ assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]])
+ x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
+ assert_equal(compress_rowcols(x), [[0, 2], [6, 8]])
+ assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]])
+ assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]])
+ x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]])
+ assert_equal(compress_rowcols(x), [[8]])
+ assert_equal(compress_rowcols(x, 0), [[6, 7, 8]])
+ assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]])
+ x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ assert_equal(compress_rowcols(x).size, 0)
+ assert_equal(compress_rowcols(x, 0).size, 0)
+ assert_equal(compress_rowcols(x, 1).size, 0)
+
+ def test_mask_rowcols(self):
+ # Tests mask_rowcols.
+ x = array(np.arange(9).reshape(3, 3),
+ mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+ assert_equal(mask_rowcols(x).mask,
+ [[1, 1, 1], [1, 0, 0], [1, 0, 0]])
+ assert_equal(mask_rowcols(x, 0).mask,
+ [[1, 1, 1], [0, 0, 0], [0, 0, 0]])
+ assert_equal(mask_rowcols(x, 1).mask,
+ [[1, 0, 0], [1, 0, 0], [1, 0, 0]])
+ x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
+ assert_equal(mask_rowcols(x).mask,
+ [[0, 1, 0], [1, 1, 1], [0, 1, 0]])
+ assert_equal(mask_rowcols(x, 0).mask,
+ [[0, 0, 0], [1, 1, 1], [0, 0, 0]])
+ assert_equal(mask_rowcols(x, 1).mask,
+ [[0, 1, 0], [0, 1, 0], [0, 1, 0]])
+ x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]])
+ assert_equal(mask_rowcols(x).mask,
+ [[1, 1, 1], [1, 1, 1], [1, 1, 0]])
+ assert_equal(mask_rowcols(x, 0).mask,
+ [[1, 1, 1], [1, 1, 1], [0, 0, 0]])
+ assert_equal(mask_rowcols(x, 1,).mask,
+ [[1, 1, 0], [1, 1, 0], [1, 1, 0]])
+ x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ assert_(mask_rowcols(x).all() is masked)
+ assert_(mask_rowcols(x, 0).all() is masked)
+ assert_(mask_rowcols(x, 1).all() is masked)
+ assert_(mask_rowcols(x).mask.all())
+ assert_(mask_rowcols(x, 0).mask.all())
+ assert_(mask_rowcols(x, 1).mask.all())
+
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize(["func", "rowcols_axis"],
+ [(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)])
+ def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis):
+ # Test deprecation of the axis argument to `mask_rows` and `mask_cols`
+ x = array(np.arange(9).reshape(3, 3),
+ mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+
+ with assert_warns(DeprecationWarning):
+ res = func(x, axis=axis)
+ assert_equal(res, mask_rowcols(x, rowcols_axis))
+
+ def test_dot(self):
+ # Tests dot product
+ n = np.arange(1, 7)
+ #
+ m = [1, 0, 0, 0, 0, 0]
+ a = masked_array(n, mask=m).reshape(2, 3)
+ b = masked_array(n, mask=m).reshape(3, 2)
+ c = dot(a, b, strict=True)
+ assert_equal(c.mask, [[1, 1], [1, 0]])
+ c = dot(b, a, strict=True)
+ assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]])
+ c = dot(a, b, strict=False)
+ assert_equal(c, np.dot(a.filled(0), b.filled(0)))
+ c = dot(b, a, strict=False)
+ assert_equal(c, np.dot(b.filled(0), a.filled(0)))
+ #
+ m = [0, 0, 0, 0, 0, 1]
+ a = masked_array(n, mask=m).reshape(2, 3)
+ b = masked_array(n, mask=m).reshape(3, 2)
+ c = dot(a, b, strict=True)
+ assert_equal(c.mask, [[0, 1], [1, 1]])
+ c = dot(b, a, strict=True)
+ assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]])
+ c = dot(a, b, strict=False)
+ assert_equal(c, np.dot(a.filled(0), b.filled(0)))
+ assert_equal(c, dot(a, b))
+ c = dot(b, a, strict=False)
+ assert_equal(c, np.dot(b.filled(0), a.filled(0)))
+ #
+ m = [0, 0, 0, 0, 0, 0]
+ a = masked_array(n, mask=m).reshape(2, 3)
+ b = masked_array(n, mask=m).reshape(3, 2)
+ c = dot(a, b)
+ assert_equal(c.mask, nomask)
+ c = dot(b, a)
+ assert_equal(c.mask, nomask)
+ #
+ a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3)
+ b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2)
+ c = dot(a, b, strict=True)
+ assert_equal(c.mask, [[1, 1], [0, 0]])
+ c = dot(a, b, strict=False)
+ assert_equal(c, np.dot(a.filled(0), b.filled(0)))
+ c = dot(b, a, strict=True)
+ assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]])
+ c = dot(b, a, strict=False)
+ assert_equal(c, np.dot(b.filled(0), a.filled(0)))
+ #
+ a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3)
+ b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2)
+ c = dot(a, b, strict=True)
+ assert_equal(c.mask, [[0, 0], [1, 1]])
+ c = dot(a, b)
+ assert_equal(c, np.dot(a.filled(0), b.filled(0)))
+ c = dot(b, a, strict=True)
+ assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]])
+ c = dot(b, a, strict=False)
+ assert_equal(c, np.dot(b.filled(0), a.filled(0)))
+ #
+ a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3)
+ b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2)
+ c = dot(a, b, strict=True)
+ assert_equal(c.mask, [[1, 0], [1, 1]])
+ c = dot(a, b, strict=False)
+ assert_equal(c, np.dot(a.filled(0), b.filled(0)))
+ c = dot(b, a, strict=True)
+ assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]])
+ c = dot(b, a, strict=False)
+ assert_equal(c, np.dot(b.filled(0), a.filled(0)))
+
+ def test_dot_returns_maskedarray(self):
+ # See gh-6611
+ a = np.eye(3)
+ b = array(a)
+ assert_(type(dot(a, a)) is MaskedArray)
+ assert_(type(dot(a, b)) is MaskedArray)
+ assert_(type(dot(b, a)) is MaskedArray)
+ assert_(type(dot(b, b)) is MaskedArray)
+
+ def test_dot_out(self):
+ a = array(np.eye(3))
+ out = array(np.zeros((3, 3)))
+ res = dot(a, a, out=out)
+ assert_(res is out)
+ assert_equal(a, res)
+
+
+class TestApplyAlongAxis:
+ # Tests 2D functions
+ def test_3d(self):
+ a = arange(12.).reshape(2, 2, 3)
+
+ def myfunc(b):
+ return b[1]
+
+ xa = apply_along_axis(myfunc, 2, a)
+ assert_equal(xa, [[1, 4], [7, 10]])
+
+ # Tests kwargs functions
+ def test_3d_kwargs(self):
+ a = arange(12).reshape(2, 2, 3)
+
+ def myfunc(b, offset=0):
+ return b[1+offset]
+
+ xa = apply_along_axis(myfunc, 2, a, offset=1)
+ assert_equal(xa, [[2, 5], [8, 11]])
+
+
+class TestApplyOverAxes:
+ # Tests apply_over_axes
+ def test_basic(self):
+ a = arange(24).reshape(2, 3, 4)
+ test = apply_over_axes(np.sum, a, [0, 2])
+ ctrl = np.array([[[60], [92], [124]]])
+ assert_equal(test, ctrl)
+ a[(a % 2).astype(bool)] = masked
+ test = apply_over_axes(np.sum, a, [0, 2])
+ ctrl = np.array([[[28], [44], [60]]])
+ assert_equal(test, ctrl)
+
+
+class TestMedian:
+ def test_pytype(self):
+ r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1)
+ assert_equal(r, np.inf)
+
+ def test_inf(self):
+ # test that even which computes handles inf / x = masked
+ r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
+ [np.inf, np.inf]]), axis=-1)
+ assert_equal(r, np.inf)
+ r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
+ [np.inf, np.inf]]), axis=None)
+ assert_equal(r, np.inf)
+ # all masked
+ r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
+ [np.inf, np.inf]], mask=True),
+ axis=-1)
+ assert_equal(r.mask, True)
+ r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
+ [np.inf, np.inf]], mask=True),
+ axis=None)
+ assert_equal(r.mask, True)
+
+ def test_non_masked(self):
+ x = np.arange(9)
+ assert_equal(np.ma.median(x), 4.)
+ assert_(type(np.ma.median(x)) is not MaskedArray)
+ x = range(8)
+ assert_equal(np.ma.median(x), 3.5)
+ assert_(type(np.ma.median(x)) is not MaskedArray)
+ x = 5
+ assert_equal(np.ma.median(x), 5.)
+ assert_(type(np.ma.median(x)) is not MaskedArray)
+ # integer
+ x = np.arange(9 * 8).reshape(9, 8)
+ assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0))
+ assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1))
+ assert_(np.ma.median(x, axis=1) is not MaskedArray)
+ # float
+ x = np.arange(9 * 8.).reshape(9, 8)
+ assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0))
+ assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1))
+ assert_(np.ma.median(x, axis=1) is not MaskedArray)
+
+ def test_docstring_examples(self):
+ "test the examples given in the docstring of ma.median"
+ x = array(np.arange(8), mask=[0]*4 + [1]*4)
+ assert_equal(np.ma.median(x), 1.5)
+ assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+ assert_(type(np.ma.median(x)) is not MaskedArray)
+ x = array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
+ assert_equal(np.ma.median(x), 2.5)
+ assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+ assert_(type(np.ma.median(x)) is not MaskedArray)
+ ma_x = np.ma.median(x, axis=-1, overwrite_input=True)
+ assert_equal(ma_x, [2., 5.])
+ assert_equal(ma_x.shape, (2,), "shape mismatch")
+ assert_(type(ma_x) is MaskedArray)
+
+ def test_axis_argument_errors(self):
+ msg = "mask = %s, ndim = %s, axis = %s, overwrite_input = %s"
+ for ndmin in range(5):
+ for mask in [False, True]:
+ x = array(1, ndmin=ndmin, mask=mask)
+
+ # Valid axis values should not raise exception
+ args = itertools.product(range(-ndmin, ndmin), [False, True])
+ for axis, over in args:
+ try:
+ np.ma.median(x, axis=axis, overwrite_input=over)
+ except Exception:
+ raise AssertionError(msg % (mask, ndmin, axis, over))
+
+ # Invalid axis values should raise exception
+ args = itertools.product([-(ndmin + 1), ndmin], [False, True])
+ for axis, over in args:
+ try:
+ np.ma.median(x, axis=axis, overwrite_input=over)
+ except np.AxisError:
+ pass
+ else:
+ raise AssertionError(msg % (mask, ndmin, axis, over))
+
+ def test_masked_0d(self):
+ # Check values
+ x = array(1, mask=False)
+ assert_equal(np.ma.median(x), 1)
+ x = array(1, mask=True)
+ assert_equal(np.ma.median(x), np.ma.masked)
+
+ def test_masked_1d(self):
+ x = array(np.arange(5), mask=True)
+ assert_equal(np.ma.median(x), np.ma.masked)
+ assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+ assert_(type(np.ma.median(x)) is np.ma.core.MaskedConstant)
+ x = array(np.arange(5), mask=False)
+ assert_equal(np.ma.median(x), 2.)
+ assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+ assert_(type(np.ma.median(x)) is not MaskedArray)
+ x = array(np.arange(5), mask=[0,1,0,0,0])
+ assert_equal(np.ma.median(x), 2.5)
+ assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+ assert_(type(np.ma.median(x)) is not MaskedArray)
+ x = array(np.arange(5), mask=[0,1,1,1,1])
+ assert_equal(np.ma.median(x), 0.)
+ assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+ assert_(type(np.ma.median(x)) is not MaskedArray)
+ # integer
+ x = array(np.arange(5), mask=[0,1,1,0,0])
+ assert_equal(np.ma.median(x), 3.)
+ assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+ assert_(type(np.ma.median(x)) is not MaskedArray)
+ # float
+ x = array(np.arange(5.), mask=[0,1,1,0,0])
+ assert_equal(np.ma.median(x), 3.)
+ assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+ assert_(type(np.ma.median(x)) is not MaskedArray)
+ # integer
+ x = array(np.arange(6), mask=[0,1,1,1,1,0])
+ assert_equal(np.ma.median(x), 2.5)
+ assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+ assert_(type(np.ma.median(x)) is not MaskedArray)
+ # float
+ x = array(np.arange(6.), mask=[0,1,1,1,1,0])
+ assert_equal(np.ma.median(x), 2.5)
+ assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+ assert_(type(np.ma.median(x)) is not MaskedArray)
+
+ def test_1d_shape_consistency(self):
+ assert_equal(np.ma.median(array([1,2,3],mask=[0,0,0])).shape,
+ np.ma.median(array([1,2,3],mask=[0,1,0])).shape )
+
+ def test_2d(self):
+ # Tests median w/ 2D
+ (n, p) = (101, 30)
+ x = masked_array(np.linspace(-1., 1., n),)
+ x[:10] = x[-10:] = masked
+ z = masked_array(np.empty((n, p), dtype=float))
+ z[:, 0] = x[:]
+ idx = np.arange(len(x))
+ for i in range(1, p):
+ np.random.shuffle(idx)
+ z[:, i] = x[idx]
+ assert_equal(median(z[:, 0]), 0)
+ assert_equal(median(z), 0)
+ assert_equal(median(z, axis=0), np.zeros(p))
+ assert_equal(median(z.T, axis=1), np.zeros(p))
+
+ def test_2d_waxis(self):
+ # Tests median w/ 2D arrays and different axis.
+ x = masked_array(np.arange(30).reshape(10, 3))
+ x[:3] = x[-3:] = masked
+ assert_equal(median(x), 14.5)
+ assert_(type(np.ma.median(x)) is not MaskedArray)
+ assert_equal(median(x, axis=0), [13.5, 14.5, 15.5])
+ assert_(type(np.ma.median(x, axis=0)) is MaskedArray)
+ assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0])
+ assert_(type(np.ma.median(x, axis=1)) is MaskedArray)
+ assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1])
+
+ def test_3d(self):
+ # Tests median w/ 3D
+ x = np.ma.arange(24).reshape(3, 4, 2)
+ x[x % 3 == 0] = masked
+ assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]])
+ x.shape = (4, 3, 2)
+ assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]])
+ x = np.ma.arange(24).reshape(4, 3, 2)
+ x[x % 5 == 0] = masked
+ assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]])
+
+ def test_neg_axis(self):
+ x = masked_array(np.arange(30).reshape(10, 3))
+ x[:3] = x[-3:] = masked
+ assert_equal(median(x, axis=-1), median(x, axis=1))
+
+ def test_out_1d(self):
+ # integer float even odd
+ for v in (30, 30., 31, 31.):
+ x = masked_array(np.arange(v))
+ x[:3] = x[-3:] = masked
+ out = masked_array(np.ones(()))
+ r = median(x, out=out)
+ if v == 30:
+ assert_equal(out, 14.5)
+ else:
+ assert_equal(out, 15.)
+ assert_(r is out)
+ assert_(type(r) is MaskedArray)
+
+ def test_out(self):
+ # integer float even odd
+ for v in (40, 40., 30, 30.):
+ x = masked_array(np.arange(v).reshape(10, -1))
+ x[:3] = x[-3:] = masked
+ out = masked_array(np.ones(10))
+ r = median(x, axis=1, out=out)
+ if v == 30:
+ e = masked_array([0.]*3 + [10, 13, 16, 19] + [0.]*3,
+ mask=[True] * 3 + [False] * 4 + [True] * 3)
+ else:
+ e = masked_array([0.]*3 + [13.5, 17.5, 21.5, 25.5] + [0.]*3,
+ mask=[True]*3 + [False]*4 + [True]*3)
+ assert_equal(r, e)
+ assert_(r is out)
+ assert_(type(r) is MaskedArray)
+
+ @pytest.mark.parametrize(
+ argnames='axis',
+ argvalues=[
+ None,
+ 1,
+ (1, ),
+ (0, 1),
+ (-3, -1),
+ ]
+ )
+ def test_keepdims_out(self, axis):
+ mask = np.zeros((3, 5, 7, 11), dtype=bool)
+ # Randomly set some elements to True:
+ w = np.random.random((4, 200)) * np.array(mask.shape)[:, None]
+ w = w.astype(np.intp)
+ mask[tuple(w)] = np.nan
+ d = masked_array(np.ones(mask.shape), mask=mask)
+ if axis is None:
+ shape_out = (1,) * d.ndim
+ else:
+ axis_norm = normalize_axis_tuple(axis, d.ndim)
+ shape_out = tuple(
+ 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+ out = masked_array(np.empty(shape_out))
+ result = median(d, axis=axis, keepdims=True, out=out)
+ assert result is out
+ assert_equal(result.shape, shape_out)
+
+ def test_single_non_masked_value_on_axis(self):
+ data = [[1., 0.],
+ [0., 3.],
+ [0., 0.]]
+ masked_arr = np.ma.masked_equal(data, 0)
+ expected = [1., 3.]
+ assert_array_equal(np.ma.median(masked_arr, axis=0),
+ expected)
+
+ def test_nan(self):
+ for mask in (False, np.zeros(6, dtype=bool)):
+ dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]])
+ dm.mask = mask
+
+ # scalar result
+ r = np.ma.median(dm, axis=None)
+ assert_(np.isscalar(r))
+ assert_array_equal(r, np.nan)
+ r = np.ma.median(dm.ravel(), axis=0)
+ assert_(np.isscalar(r))
+ assert_array_equal(r, np.nan)
+
+ r = np.ma.median(dm, axis=0)
+ assert_equal(type(r), MaskedArray)
+ assert_array_equal(r, [1, np.nan, 3])
+ r = np.ma.median(dm, axis=1)
+ assert_equal(type(r), MaskedArray)
+ assert_array_equal(r, [np.nan, 2])
+ r = np.ma.median(dm, axis=-1)
+ assert_equal(type(r), MaskedArray)
+ assert_array_equal(r, [np.nan, 2])
+
+ dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]])
+ dm[:, 2] = np.ma.masked
+ assert_array_equal(np.ma.median(dm, axis=None), np.nan)
+ assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3])
+ assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5])
+
+ def test_out_nan(self):
+ o = np.ma.masked_array(np.zeros((4,)))
+ d = np.ma.masked_array(np.ones((3, 4)))
+ d[2, 1] = np.nan
+ d[2, 2] = np.ma.masked
+ assert_equal(np.ma.median(d, 0, out=o), o)
+ o = np.ma.masked_array(np.zeros((3,)))
+ assert_equal(np.ma.median(d, 1, out=o), o)
+ o = np.ma.masked_array(np.zeros(()))
+ assert_equal(np.ma.median(d, out=o), o)
+
+ def test_nan_behavior(self):
+ a = np.ma.masked_array(np.arange(24, dtype=float))
+ a[::3] = np.ma.masked
+ a[2] = np.nan
+ assert_array_equal(np.ma.median(a), np.nan)
+ assert_array_equal(np.ma.median(a, axis=0), np.nan)
+
+ a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4))
+ a.mask = np.arange(a.size) % 2 == 1
+ aorig = a.copy()
+ a[1, 2, 3] = np.nan
+ a[1, 1, 2] = np.nan
+
+ # no axis
+ assert_array_equal(np.ma.median(a), np.nan)
+ assert_(np.isscalar(np.ma.median(a)))
+
+ # axis0
+ b = np.ma.median(aorig, axis=0)
+ b[2, 3] = np.nan
+ b[1, 2] = np.nan
+ assert_equal(np.ma.median(a, 0), b)
+
+ # axis1
+ b = np.ma.median(aorig, axis=1)
+ b[1, 3] = np.nan
+ b[1, 2] = np.nan
+ assert_equal(np.ma.median(a, 1), b)
+
+ # axis02
+ b = np.ma.median(aorig, axis=(0, 2))
+ b[1] = np.nan
+ b[2] = np.nan
+ assert_equal(np.ma.median(a, (0, 2)), b)
+
+ def test_ambigous_fill(self):
+ # 255 is max value, used as filler for sort
+ a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8)
+ a = np.ma.masked_array(a, mask=a == 3)
+ assert_array_equal(np.ma.median(a, axis=1), 255)
+ assert_array_equal(np.ma.median(a, axis=1).mask, False)
+ assert_array_equal(np.ma.median(a, axis=0), a[0])
+ assert_array_equal(np.ma.median(a), 255)
+
+ def test_special(self):
+ for inf in [np.inf, -np.inf]:
+ a = np.array([[inf, np.nan], [np.nan, np.nan]])
+ a = np.ma.masked_array(a, mask=np.isnan(a))
+ assert_equal(np.ma.median(a, axis=0), [inf, np.nan])
+ assert_equal(np.ma.median(a, axis=1), [inf, np.nan])
+ assert_equal(np.ma.median(a), inf)
+
+ a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]])
+ a = np.ma.masked_array(a, mask=np.isnan(a))
+ assert_array_equal(np.ma.median(a, axis=1), inf)
+ assert_array_equal(np.ma.median(a, axis=1).mask, False)
+ assert_array_equal(np.ma.median(a, axis=0), a[0])
+ assert_array_equal(np.ma.median(a), inf)
+
+ # no mask
+ a = np.array([[inf, inf], [inf, inf]])
+ assert_equal(np.ma.median(a), inf)
+ assert_equal(np.ma.median(a, axis=0), inf)
+ assert_equal(np.ma.median(a, axis=1), inf)
+
+ a = np.array([[inf, 7, -inf, -9],
+ [-10, np.nan, np.nan, 5],
+ [4, np.nan, np.nan, inf]],
+ dtype=np.float32)
+ a = np.ma.masked_array(a, mask=np.isnan(a))
+ if inf > 0:
+ assert_equal(np.ma.median(a, axis=0), [4., 7., -inf, 5.])
+ assert_equal(np.ma.median(a), 4.5)
+ else:
+ assert_equal(np.ma.median(a, axis=0), [-10., 7., -inf, -9.])
+ assert_equal(np.ma.median(a), -2.5)
+ assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf])
+
+ for i in range(0, 10):
+ for j in range(1, 10):
+ a = np.array([([np.nan] * i) + ([inf] * j)] * 2)
+ a = np.ma.masked_array(a, mask=np.isnan(a))
+ assert_equal(np.ma.median(a), inf)
+ assert_equal(np.ma.median(a, axis=1), inf)
+ assert_equal(np.ma.median(a, axis=0),
+ ([np.nan] * i) + [inf] * j)
+
+ def test_empty(self):
+ # empty arrays
+ a = np.ma.masked_array(np.array([], dtype=float))
+ with suppress_warnings() as w:
+ w.record(RuntimeWarning)
+ assert_array_equal(np.ma.median(a), np.nan)
+ assert_(w.log[0].category is RuntimeWarning)
+
+ # multiple dimensions
+ a = np.ma.masked_array(np.array([], dtype=float, ndmin=3))
+ # no axis
+ with suppress_warnings() as w:
+ w.record(RuntimeWarning)
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_array_equal(np.ma.median(a), np.nan)
+ assert_(w.log[0].category is RuntimeWarning)
+
+ # axis 0 and 1
+ b = np.ma.masked_array(np.array([], dtype=float, ndmin=2))
+ assert_equal(np.ma.median(a, axis=0), b)
+ assert_equal(np.ma.median(a, axis=1), b)
+
+ # axis 2
+ b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2))
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ assert_equal(np.ma.median(a, axis=2), b)
+ assert_(w[0].category is RuntimeWarning)
+
+ def test_object(self):
+ o = np.ma.masked_array(np.arange(7.))
+ assert_(type(np.ma.median(o.astype(object))), float)
+ o[2] = np.nan
+ assert_(type(np.ma.median(o.astype(object))), float)
+
+
+class TestCov:
+
+ def setup_method(self):
+ self.data = array(np.random.rand(12))
+
+ def test_1d_without_missing(self):
+ # Test cov on 1D variable w/o missing values
+ x = self.data
+ assert_almost_equal(np.cov(x), cov(x))
+ assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False))
+ assert_almost_equal(np.cov(x, rowvar=False, bias=True),
+ cov(x, rowvar=False, bias=True))
+
+ def test_2d_without_missing(self):
+ # Test cov on 1 2D variable w/o missing values
+ x = self.data.reshape(3, 4)
+ assert_almost_equal(np.cov(x), cov(x))
+ assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False))
+ assert_almost_equal(np.cov(x, rowvar=False, bias=True),
+ cov(x, rowvar=False, bias=True))
+
+ def test_1d_with_missing(self):
+ # Test cov 1 1D variable w/missing values
+ x = self.data
+ x[-1] = masked
+ x -= x.mean()
+ nx = x.compressed()
+ assert_almost_equal(np.cov(nx), cov(x))
+ assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False))
+ assert_almost_equal(np.cov(nx, rowvar=False, bias=True),
+ cov(x, rowvar=False, bias=True))
+ #
+ try:
+ cov(x, allow_masked=False)
+ except ValueError:
+ pass
+ #
+ # 2 1D variables w/ missing values
+ nx = x[1:-1]
+ assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1]))
+ assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False),
+ cov(x, x[::-1], rowvar=False))
+ assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True),
+ cov(x, x[::-1], rowvar=False, bias=True))
+
+ def test_2d_with_missing(self):
+ # Test cov on 2D variable w/ missing value
+ x = self.data
+ x[-1] = masked
+ x = x.reshape(3, 4)
+ valid = np.logical_not(getmaskarray(x)).astype(int)
+ frac = np.dot(valid, valid.T)
+ xf = (x - x.mean(1)[:, None]).filled(0)
+ assert_almost_equal(cov(x),
+ np.cov(xf) * (x.shape[1] - 1) / (frac - 1.))
+ assert_almost_equal(cov(x, bias=True),
+ np.cov(xf, bias=True) * x.shape[1] / frac)
+ frac = np.dot(valid.T, valid)
+ xf = (x - x.mean(0)).filled(0)
+ assert_almost_equal(cov(x, rowvar=False),
+ (np.cov(xf, rowvar=False) *
+ (x.shape[0] - 1) / (frac - 1.)))
+ assert_almost_equal(cov(x, rowvar=False, bias=True),
+ (np.cov(xf, rowvar=False, bias=True) *
+ x.shape[0] / frac))
+
+
+class TestCorrcoef:
+
+ def setup_method(self):
+ self.data = array(np.random.rand(12))
+ self.data2 = array(np.random.rand(12))
+
+ def test_ddof(self):
+ # ddof raises DeprecationWarning
+ x, y = self.data, self.data2
+ expected = np.corrcoef(x)
+ expected2 = np.corrcoef(x, y)
+ with suppress_warnings() as sup:
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, corrcoef, x, ddof=-1)
+ sup.filter(DeprecationWarning, "bias and ddof have no effect")
+ # ddof has no or negligible effect on the function
+ assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0))
+ assert_almost_equal(corrcoef(x, ddof=-1), expected)
+ assert_almost_equal(corrcoef(x, y, ddof=-1), expected2)
+ assert_almost_equal(corrcoef(x, ddof=3), expected)
+ assert_almost_equal(corrcoef(x, y, ddof=3), expected2)
+
+ def test_bias(self):
+ x, y = self.data, self.data2
+ expected = np.corrcoef(x)
+ # bias raises DeprecationWarning
+ with suppress_warnings() as sup:
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, corrcoef, x, y, True, False)
+ assert_warns(DeprecationWarning, corrcoef, x, y, True, True)
+ assert_warns(DeprecationWarning, corrcoef, x, bias=False)
+ sup.filter(DeprecationWarning, "bias and ddof have no effect")
+ # bias has no or negligible effect on the function
+ assert_almost_equal(corrcoef(x, bias=1), expected)
+
+ def test_1d_without_missing(self):
+ # Test cov on 1D variable w/o missing values
+ x = self.data
+ assert_almost_equal(np.corrcoef(x), corrcoef(x))
+ assert_almost_equal(np.corrcoef(x, rowvar=False),
+ corrcoef(x, rowvar=False))
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, "bias and ddof have no effect")
+ assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
+ corrcoef(x, rowvar=False, bias=True))
+
+ def test_2d_without_missing(self):
+ # Test corrcoef on 1 2D variable w/o missing values
+ x = self.data.reshape(3, 4)
+ assert_almost_equal(np.corrcoef(x), corrcoef(x))
+ assert_almost_equal(np.corrcoef(x, rowvar=False),
+ corrcoef(x, rowvar=False))
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, "bias and ddof have no effect")
+ assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
+ corrcoef(x, rowvar=False, bias=True))
+
+ def test_1d_with_missing(self):
+ # Test corrcoef 1 1D variable w/missing values
+ x = self.data
+ x[-1] = masked
+ x -= x.mean()
+ nx = x.compressed()
+ assert_almost_equal(np.corrcoef(nx), corrcoef(x))
+ assert_almost_equal(np.corrcoef(nx, rowvar=False),
+ corrcoef(x, rowvar=False))
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, "bias and ddof have no effect")
+ assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True),
+ corrcoef(x, rowvar=False, bias=True))
+ try:
+ corrcoef(x, allow_masked=False)
+ except ValueError:
+ pass
+ # 2 1D variables w/ missing values
+ nx = x[1:-1]
+ assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1]))
+ assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False),
+ corrcoef(x, x[::-1], rowvar=False))
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, "bias and ddof have no effect")
+ # ddof and bias have no or negligible effect on the function
+ assert_almost_equal(np.corrcoef(nx, nx[::-1]),
+ corrcoef(x, x[::-1], bias=1))
+ assert_almost_equal(np.corrcoef(nx, nx[::-1]),
+ corrcoef(x, x[::-1], ddof=2))
+
+ def test_2d_with_missing(self):
+ # Test corrcoef on 2D variable w/ missing value
+ x = self.data
+ x[-1] = masked
+ x = x.reshape(3, 4)
+
+ test = corrcoef(x)
+ control = np.corrcoef(x)
+ assert_almost_equal(test[:-1, :-1], control[:-1, :-1])
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, "bias and ddof have no effect")
+ # ddof and bias have no or negligible effect on the function
+ assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1],
+ control[:-1, :-1])
+ assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1],
+ control[:-1, :-1])
+ assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1],
+ control[:-1, :-1])
+
+
+class TestPolynomial:
+ #
+ def test_polyfit(self):
+ # Tests polyfit
+ # On ndarrays
+ x = np.random.rand(10)
+ y = np.random.rand(20).reshape(-1, 2)
+ assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3))
+ # ON 1D maskedarrays
+ x = x.view(MaskedArray)
+ x[0] = masked
+ y = y.view(MaskedArray)
+ y[0, 0] = y[-1, -1] = masked
+ #
+ (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True)
+ (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3,
+ full=True)
+ for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
+ assert_almost_equal(a, a_)
+ #
+ (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True)
+ (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True)
+ for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
+ assert_almost_equal(a, a_)
+ #
+ (C, R, K, S, D) = polyfit(x, y, 3, full=True)
+ (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True)
+ for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
+ assert_almost_equal(a, a_)
+ #
+ w = np.random.rand(10) + 1
+ wo = w.copy()
+ xs = x[1:-1]
+ ys = y[1:-1]
+ ws = w[1:-1]
+ (C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w)
+ (c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws)
+ assert_equal(w, wo)
+ for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
+ assert_almost_equal(a, a_)
+
+ def test_polyfit_with_masked_NaNs(self):
+ x = np.random.rand(10)
+ y = np.random.rand(20).reshape(-1, 2)
+
+ x[0] = np.nan
+ y[-1,-1] = np.nan
+ x = x.view(MaskedArray)
+ y = y.view(MaskedArray)
+ x[0] = masked
+ y[-1,-1] = masked
+
+ (C, R, K, S, D) = polyfit(x, y, 3, full=True)
+ (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True)
+ for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
+ assert_almost_equal(a, a_)
+
+
+class TestArraySetOps:
+
+ def test_unique_onlist(self):
+ # Test unique on list
+ data = [1, 1, 1, 2, 2, 3]
+ test = unique(data, return_index=True, return_inverse=True)
+ assert_(isinstance(test[0], MaskedArray))
+ assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0]))
+ assert_equal(test[1], [0, 3, 5])
+ assert_equal(test[2], [0, 0, 0, 1, 1, 2])
+
+ def test_unique_onmaskedarray(self):
+ # Test unique on masked data w/use_mask=True
+ data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0])
+ test = unique(data, return_index=True, return_inverse=True)
+ assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
+ assert_equal(test[1], [0, 3, 5, 2])
+ assert_equal(test[2], [0, 0, 3, 1, 3, 2])
+ #
+ data.fill_value = 3
+ data = masked_array(data=[1, 1, 1, 2, 2, 3],
+ mask=[0, 0, 1, 0, 1, 0], fill_value=3)
+ test = unique(data, return_index=True, return_inverse=True)
+ assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
+ assert_equal(test[1], [0, 3, 5, 2])
+ assert_equal(test[2], [0, 0, 3, 1, 3, 2])
+
+ def test_unique_allmasked(self):
+ # Test all masked
+ data = masked_array([1, 1, 1], mask=True)
+ test = unique(data, return_index=True, return_inverse=True)
+ assert_equal(test[0], masked_array([1, ], mask=[True]))
+ assert_equal(test[1], [0])
+ assert_equal(test[2], [0, 0, 0])
+ #
+ # Test masked
+ data = masked
+ test = unique(data, return_index=True, return_inverse=True)
+ assert_equal(test[0], masked_array(masked))
+ assert_equal(test[1], [0])
+ assert_equal(test[2], [0])
+
+ def test_ediff1d(self):
+ # Tests mediff1d
+ x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
+ control = array([1, 1, 1, 4], mask=[1, 0, 0, 1])
+ test = ediff1d(x)
+ assert_equal(test, control)
+ assert_equal(test.filled(0), control.filled(0))
+ assert_equal(test.mask, control.mask)
+
+ def test_ediff1d_tobegin(self):
+ # Test ediff1d w/ to_begin
+ x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
+ test = ediff1d(x, to_begin=masked)
+ control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1])
+ assert_equal(test, control)
+ assert_equal(test.filled(0), control.filled(0))
+ assert_equal(test.mask, control.mask)
+ #
+ test = ediff1d(x, to_begin=[1, 2, 3])
+ control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1])
+ assert_equal(test, control)
+ assert_equal(test.filled(0), control.filled(0))
+ assert_equal(test.mask, control.mask)
+
+ def test_ediff1d_toend(self):
+ # Test ediff1d w/ to_end
+ x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
+ test = ediff1d(x, to_end=masked)
+ control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1])
+ assert_equal(test, control)
+ assert_equal(test.filled(0), control.filled(0))
+ assert_equal(test.mask, control.mask)
+ #
+ test = ediff1d(x, to_end=[1, 2, 3])
+ control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0])
+ assert_equal(test, control)
+ assert_equal(test.filled(0), control.filled(0))
+ assert_equal(test.mask, control.mask)
+
+ def test_ediff1d_tobegin_toend(self):
+ # Test ediff1d w/ to_begin and to_end
+ x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
+ test = ediff1d(x, to_end=masked, to_begin=masked)
+ control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1])
+ assert_equal(test, control)
+ assert_equal(test.filled(0), control.filled(0))
+ assert_equal(test.mask, control.mask)
+ #
+ test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked)
+ control = array([0, 1, 1, 1, 4, 1, 2, 3],
+ mask=[1, 1, 0, 0, 1, 0, 0, 0])
+ assert_equal(test, control)
+ assert_equal(test.filled(0), control.filled(0))
+ assert_equal(test.mask, control.mask)
+
+ def test_ediff1d_ndarray(self):
+ # Test ediff1d w/ a ndarray
+ x = np.arange(5)
+ test = ediff1d(x)
+ control = array([1, 1, 1, 1], mask=[0, 0, 0, 0])
+ assert_equal(test, control)
+ assert_(isinstance(test, MaskedArray))
+ assert_equal(test.filled(0), control.filled(0))
+ assert_equal(test.mask, control.mask)
+ #
+ test = ediff1d(x, to_end=masked, to_begin=masked)
+ control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1])
+ assert_(isinstance(test, MaskedArray))
+ assert_equal(test.filled(0), control.filled(0))
+ assert_equal(test.mask, control.mask)
+
+ def test_intersect1d(self):
+ # Test intersect1d
+ x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
+ y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
+ test = intersect1d(x, y)
+ control = array([1, 3, -1], mask=[0, 0, 1])
+ assert_equal(test, control)
+
+ def test_setxor1d(self):
+ # Test setxor1d
+ a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
+ b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
+ test = setxor1d(a, b)
+ assert_equal(test, array([3, 4, 7]))
+ #
+ a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
+ b = [1, 2, 3, 4, 5]
+ test = setxor1d(a, b)
+ assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1]))
+ #
+ a = array([1, 2, 3])
+ b = array([6, 5, 4])
+ test = setxor1d(a, b)
+ assert_(isinstance(test, MaskedArray))
+ assert_equal(test, [1, 2, 3, 4, 5, 6])
+ #
+ a = array([1, 8, 2, 3], mask=[0, 1, 0, 0])
+ b = array([6, 5, 4, 8], mask=[0, 0, 0, 1])
+ test = setxor1d(a, b)
+ assert_(isinstance(test, MaskedArray))
+ assert_equal(test, [1, 2, 3, 4, 5, 6])
+ #
+ assert_array_equal([], setxor1d([], []))
+
+ def test_isin(self):
+ # the tests for in1d cover most of isin's behavior
+ # if in1d is removed, would need to change those tests to test
+ # isin instead.
+ a = np.arange(24).reshape([2, 3, 4])
+ mask = np.zeros([2, 3, 4])
+ mask[1, 2, 0] = 1
+ a = array(a, mask=mask)
+ b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33],
+ mask=[0, 1, 0, 1, 0, 1, 0, 1, 0])
+ ec = zeros((2, 3, 4), dtype=bool)
+ ec[0, 0, 0] = True
+ ec[0, 0, 1] = True
+ ec[0, 2, 3] = True
+ c = isin(a, b)
+ assert_(isinstance(c, MaskedArray))
+ assert_array_equal(c, ec)
+ #compare results of np.isin to ma.isin
+ d = np.isin(a, b[~b.mask]) & ~a.mask
+ assert_array_equal(c, d)
+
+ def test_in1d(self):
+ # Test in1d
+ a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
+ b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
+ test = in1d(a, b)
+ assert_equal(test, [True, True, True, False, True])
+ #
+ a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
+ b = array([1, 5, -1], mask=[0, 0, 1])
+ test = in1d(a, b)
+ assert_equal(test, [True, True, False, True, True])
+ #
+ assert_array_equal([], in1d([], []))
+
+ def test_in1d_invert(self):
+ # Test in1d's invert parameter
+ a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
+ b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
+ assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
+
+ a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
+ b = array([1, 5, -1], mask=[0, 0, 1])
+ assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
+
+ assert_array_equal([], in1d([], [], invert=True))
+
+ def test_union1d(self):
+ # Test union1d
+ a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1])
+ b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
+ test = union1d(a, b)
+ control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1])
+ assert_equal(test, control)
+
+ # Tests gh-10340, arguments to union1d should be
+ # flattened if they are not already 1D
+ x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]])
+ y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1])
+ ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1])
+ z = union1d(x, y)
+ assert_equal(z, ez)
+ #
+ assert_array_equal([], union1d([], []))
+
+ def test_setdiff1d(self):
+ # Test setdiff1d
+ a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1])
+ b = array([2, 4, 3, 3, 2, 1, 5])
+ test = setdiff1d(a, b)
+ assert_equal(test, array([6, 7, -1], mask=[0, 0, 1]))
+ #
+ a = arange(10)
+ b = arange(8)
+ assert_equal(setdiff1d(a, b), array([8, 9]))
+ a = array([], np.uint32, mask=[])
+ assert_equal(setdiff1d(a, []).dtype, np.uint32)
+
+ def test_setdiff1d_char_array(self):
+ # Test setdiff1d_charray
+ a = np.array(['a', 'b', 'c'])
+ b = np.array(['a', 'b', 's'])
+ assert_array_equal(setdiff1d(a, b), np.array(['c']))
+
+
+class TestShapeBase:
+
+ def test_atleast_2d(self):
+ # Test atleast_2d
+ a = masked_array([0, 1, 2], mask=[0, 1, 0])
+ b = atleast_2d(a)
+ assert_equal(b.shape, (1, 3))
+ assert_equal(b.mask.shape, b.data.shape)
+ assert_equal(a.shape, (3,))
+ assert_equal(a.mask.shape, a.data.shape)
+ assert_equal(b.mask.shape, b.data.shape)
+
+ def test_shape_scalar(self):
+ # the atleast and diagflat function should work with scalars
+ # GitHub issue #3367
+ # Additionally, the atleast functions should accept multiple scalars
+ # correctly
+ b = atleast_1d(1.0)
+ assert_equal(b.shape, (1,))
+ assert_equal(b.mask.shape, b.shape)
+ assert_equal(b.data.shape, b.shape)
+
+ b = atleast_1d(1.0, 2.0)
+ for a in b:
+ assert_equal(a.shape, (1,))
+ assert_equal(a.mask.shape, a.shape)
+ assert_equal(a.data.shape, a.shape)
+
+ b = atleast_2d(1.0)
+ assert_equal(b.shape, (1, 1))
+ assert_equal(b.mask.shape, b.shape)
+ assert_equal(b.data.shape, b.shape)
+
+ b = atleast_2d(1.0, 2.0)
+ for a in b:
+ assert_equal(a.shape, (1, 1))
+ assert_equal(a.mask.shape, a.shape)
+ assert_equal(a.data.shape, a.shape)
+
+ b = atleast_3d(1.0)
+ assert_equal(b.shape, (1, 1, 1))
+ assert_equal(b.mask.shape, b.shape)
+ assert_equal(b.data.shape, b.shape)
+
+ b = atleast_3d(1.0, 2.0)
+ for a in b:
+ assert_equal(a.shape, (1, 1, 1))
+ assert_equal(a.mask.shape, a.shape)
+ assert_equal(a.data.shape, a.shape)
+
+ b = diagflat(1.0)
+ assert_equal(b.shape, (1, 1))
+ assert_equal(b.mask.shape, b.data.shape)
+
+
+class TestNDEnumerate:
+
+ def test_ndenumerate_nomasked(self):
+ ordinary = np.arange(6.).reshape((1, 3, 2))
+ empty_mask = np.zeros_like(ordinary, dtype=bool)
+ with_mask = masked_array(ordinary, mask=empty_mask)
+ assert_equal(list(np.ndenumerate(ordinary)),
+ list(ndenumerate(ordinary)))
+ assert_equal(list(ndenumerate(ordinary)),
+ list(ndenumerate(with_mask)))
+ assert_equal(list(ndenumerate(with_mask)),
+ list(ndenumerate(with_mask, compressed=False)))
+
+ def test_ndenumerate_allmasked(self):
+ a = masked_all(())
+ b = masked_all((100,))
+ c = masked_all((2, 3, 4))
+ assert_equal(list(ndenumerate(a)), [])
+ assert_equal(list(ndenumerate(b)), [])
+ assert_equal(list(ndenumerate(b, compressed=False)),
+ list(zip(np.ndindex((100,)), 100 * [masked])))
+ assert_equal(list(ndenumerate(c)), [])
+ assert_equal(list(ndenumerate(c, compressed=False)),
+ list(zip(np.ndindex((2, 3, 4)), 2 * 3 * 4 * [masked])))
+
+ def test_ndenumerate_mixedmasked(self):
+ a = masked_array(np.arange(12).reshape((3, 4)),
+ mask=[[1, 1, 1, 1],
+ [1, 1, 0, 1],
+ [0, 0, 0, 0]])
+ items = [((1, 2), 6),
+ ((2, 0), 8), ((2, 1), 9), ((2, 2), 10), ((2, 3), 11)]
+ assert_equal(list(ndenumerate(a)), items)
+ assert_equal(len(list(ndenumerate(a, compressed=False))), a.size)
+ for coordinate, value in ndenumerate(a, compressed=False):
+ assert_equal(a[coordinate], value)
+
+
+class TestStack:
+
+ def test_stack_1d(self):
+ a = masked_array([0, 1, 2], mask=[0, 1, 0])
+ b = masked_array([9, 8, 7], mask=[1, 0, 0])
+
+ c = stack([a, b], axis=0)
+ assert_equal(c.shape, (2, 3))
+ assert_array_equal(a.mask, c[0].mask)
+ assert_array_equal(b.mask, c[1].mask)
+
+ d = vstack([a, b])
+ assert_array_equal(c.data, d.data)
+ assert_array_equal(c.mask, d.mask)
+
+ c = stack([a, b], axis=1)
+ assert_equal(c.shape, (3, 2))
+ assert_array_equal(a.mask, c[:, 0].mask)
+ assert_array_equal(b.mask, c[:, 1].mask)
+
+ def test_stack_masks(self):
+ a = masked_array([0, 1, 2], mask=True)
+ b = masked_array([9, 8, 7], mask=False)
+
+ c = stack([a, b], axis=0)
+ assert_equal(c.shape, (2, 3))
+ assert_array_equal(a.mask, c[0].mask)
+ assert_array_equal(b.mask, c[1].mask)
+
+ d = vstack([a, b])
+ assert_array_equal(c.data, d.data)
+ assert_array_equal(c.mask, d.mask)
+
+ c = stack([a, b], axis=1)
+ assert_equal(c.shape, (3, 2))
+ assert_array_equal(a.mask, c[:, 0].mask)
+ assert_array_equal(b.mask, c[:, 1].mask)
+
+ def test_stack_nd(self):
+ # 2D
+ shp = (3, 2)
+ d1 = np.random.randint(0, 10, shp)
+ d2 = np.random.randint(0, 10, shp)
+ m1 = np.random.randint(0, 2, shp).astype(bool)
+ m2 = np.random.randint(0, 2, shp).astype(bool)
+ a1 = masked_array(d1, mask=m1)
+ a2 = masked_array(d2, mask=m2)
+
+ c = stack([a1, a2], axis=0)
+ c_shp = (2,) + shp
+ assert_equal(c.shape, c_shp)
+ assert_array_equal(a1.mask, c[0].mask)
+ assert_array_equal(a2.mask, c[1].mask)
+
+ c = stack([a1, a2], axis=-1)
+ c_shp = shp + (2,)
+ assert_equal(c.shape, c_shp)
+ assert_array_equal(a1.mask, c[..., 0].mask)
+ assert_array_equal(a2.mask, c[..., 1].mask)
+
+ # 4D
+ shp = (3, 2, 4, 5,)
+ d1 = np.random.randint(0, 10, shp)
+ d2 = np.random.randint(0, 10, shp)
+ m1 = np.random.randint(0, 2, shp).astype(bool)
+ m2 = np.random.randint(0, 2, shp).astype(bool)
+ a1 = masked_array(d1, mask=m1)
+ a2 = masked_array(d2, mask=m2)
+
+ c = stack([a1, a2], axis=0)
+ c_shp = (2,) + shp
+ assert_equal(c.shape, c_shp)
+ assert_array_equal(a1.mask, c[0].mask)
+ assert_array_equal(a2.mask, c[1].mask)
+
+ c = stack([a1, a2], axis=-1)
+ c_shp = shp + (2,)
+ assert_equal(c.shape, c_shp)
+ assert_array_equal(a1.mask, c[..., 0].mask)
+ assert_array_equal(a2.mask, c[..., 1].mask)
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/tests/test_mrecords.py b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_mrecords.py
new file mode 100644
index 00000000..77123c3c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_mrecords.py
@@ -0,0 +1,493 @@
+# pylint: disable-msg=W0611, W0612, W0511,R0201
+"""Tests suite for mrecords.
+
+:author: Pierre Gerard-Marchant
+:contact: pierregm_at_uga_dot_edu
+
+"""
+import numpy as np
+import numpy.ma as ma
+from numpy import recarray
+from numpy.ma import masked, nomask
+from numpy.testing import temppath
+from numpy.core.records import (
+ fromrecords as recfromrecords, fromarrays as recfromarrays
+ )
+from numpy.ma.mrecords import (
+ MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords,
+ addfield
+ )
+from numpy.ma.testutils import (
+ assert_, assert_equal,
+ assert_equal_records,
+ )
+from numpy.compat import pickle
+
+
+class TestMRecords:
+
+ ilist = [1, 2, 3, 4, 5]
+ flist = [1.1, 2.2, 3.3, 4.4, 5.5]
+ slist = [b'one', b'two', b'three', b'four', b'five']
+ ddtype = [('a', int), ('b', float), ('c', '|S8')]
+ mask = [0, 1, 0, 0, 1]
+ base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
+
+ def test_byview(self):
+ # Test creation by view
+ base = self.base
+ mbase = base.view(mrecarray)
+ assert_equal(mbase.recordmask, base.recordmask)
+ assert_equal_records(mbase._mask, base._mask)
+ assert_(isinstance(mbase._data, recarray))
+ assert_equal_records(mbase._data, base._data.view(recarray))
+ for field in ('a', 'b', 'c'):
+ assert_equal(base[field], mbase[field])
+ assert_equal_records(mbase.view(mrecarray), mbase)
+
+ def test_get(self):
+ # Tests fields retrieval
+ base = self.base.copy()
+ mbase = base.view(mrecarray)
+ # As fields..........
+ for field in ('a', 'b', 'c'):
+ assert_equal(getattr(mbase, field), mbase[field])
+ assert_equal(base[field], mbase[field])
+ # as elements .......
+ mbase_first = mbase[0]
+ assert_(isinstance(mbase_first, mrecarray))
+ assert_equal(mbase_first.dtype, mbase.dtype)
+ assert_equal(mbase_first.tolist(), (1, 1.1, b'one'))
+ # Used to be mask, now it's recordmask
+ assert_equal(mbase_first.recordmask, nomask)
+ assert_equal(mbase_first._mask.item(), (False, False, False))
+ assert_equal(mbase_first['a'], mbase['a'][0])
+ mbase_last = mbase[-1]
+ assert_(isinstance(mbase_last, mrecarray))
+ assert_equal(mbase_last.dtype, mbase.dtype)
+ assert_equal(mbase_last.tolist(), (None, None, None))
+ # Used to be mask, now it's recordmask
+ assert_equal(mbase_last.recordmask, True)
+ assert_equal(mbase_last._mask.item(), (True, True, True))
+ assert_equal(mbase_last['a'], mbase['a'][-1])
+ assert_((mbase_last['a'] is masked))
+ # as slice ..........
+ mbase_sl = mbase[:2]
+ assert_(isinstance(mbase_sl, mrecarray))
+ assert_equal(mbase_sl.dtype, mbase.dtype)
+ # Used to be mask, now it's recordmask
+ assert_equal(mbase_sl.recordmask, [0, 1])
+ assert_equal_records(mbase_sl.mask,
+ np.array([(False, False, False),
+ (True, True, True)],
+ dtype=mbase._mask.dtype))
+ assert_equal_records(mbase_sl, base[:2].view(mrecarray))
+ for field in ('a', 'b', 'c'):
+ assert_equal(getattr(mbase_sl, field), base[:2][field])
+
+ def test_set_fields(self):
+ # Tests setting fields.
+ base = self.base.copy()
+ mbase = base.view(mrecarray)
+ mbase = mbase.copy()
+ mbase.fill_value = (999999, 1e20, 'N/A')
+ # Change the data, the mask should be conserved
+ mbase.a._data[:] = 5
+ assert_equal(mbase['a']._data, [5, 5, 5, 5, 5])
+ assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1])
+ # Change the elements, and the mask will follow
+ mbase.a = 1
+ assert_equal(mbase['a']._data, [1]*5)
+ assert_equal(ma.getmaskarray(mbase['a']), [0]*5)
+ # Use to be _mask, now it's recordmask
+ assert_equal(mbase.recordmask, [False]*5)
+ assert_equal(mbase._mask.tolist(),
+ np.array([(0, 0, 0),
+ (0, 1, 1),
+ (0, 0, 0),
+ (0, 0, 0),
+ (0, 1, 1)],
+ dtype=bool))
+ # Set a field to mask ........................
+ mbase.c = masked
+ # Use to be mask, and now it's still mask !
+ assert_equal(mbase.c.mask, [1]*5)
+ assert_equal(mbase.c.recordmask, [1]*5)
+ assert_equal(ma.getmaskarray(mbase['c']), [1]*5)
+ assert_equal(ma.getdata(mbase['c']), [b'N/A']*5)
+ assert_equal(mbase._mask.tolist(),
+ np.array([(0, 0, 1),
+ (0, 1, 1),
+ (0, 0, 1),
+ (0, 0, 1),
+ (0, 1, 1)],
+ dtype=bool))
+ # Set fields by slices .......................
+ mbase = base.view(mrecarray).copy()
+ mbase.a[3:] = 5
+ assert_equal(mbase.a, [1, 2, 3, 5, 5])
+ assert_equal(mbase.a._mask, [0, 1, 0, 0, 0])
+ mbase.b[3:] = masked
+ assert_equal(mbase.b, base['b'])
+ assert_equal(mbase.b._mask, [0, 1, 0, 1, 1])
+ # Set fields globally..........................
+ ndtype = [('alpha', '|S1'), ('num', int)]
+ data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype)
+ rdata = data.view(MaskedRecords)
+ val = ma.array([10, 20, 30], mask=[1, 0, 0])
+
+ rdata['num'] = val
+ assert_equal(rdata.num, val)
+ assert_equal(rdata.num.mask, [1, 0, 0])
+
+ def test_set_fields_mask(self):
+ # Tests setting the mask of a field.
+ base = self.base.copy()
+ # This one has already a mask....
+ mbase = base.view(mrecarray)
+ mbase['a'][-2] = masked
+ assert_equal(mbase.a, [1, 2, 3, 4, 5])
+ assert_equal(mbase.a._mask, [0, 1, 0, 1, 1])
+ # This one has not yet
+ mbase = fromarrays([np.arange(5), np.random.rand(5)],
+ dtype=[('a', int), ('b', float)])
+ mbase['a'][-2] = masked
+ assert_equal(mbase.a, [0, 1, 2, 3, 4])
+ assert_equal(mbase.a._mask, [0, 0, 0, 1, 0])
+
+ def test_set_mask(self):
+ base = self.base.copy()
+ mbase = base.view(mrecarray)
+ # Set the mask to True .......................
+ mbase.mask = masked
+ assert_equal(ma.getmaskarray(mbase['b']), [1]*5)
+ assert_equal(mbase['a']._mask, mbase['b']._mask)
+ assert_equal(mbase['a']._mask, mbase['c']._mask)
+ assert_equal(mbase._mask.tolist(),
+ np.array([(1, 1, 1)]*5, dtype=bool))
+ # Delete the mask ............................
+ mbase.mask = nomask
+ assert_equal(ma.getmaskarray(mbase['c']), [0]*5)
+ assert_equal(mbase._mask.tolist(),
+ np.array([(0, 0, 0)]*5, dtype=bool))
+
+ def test_set_mask_fromarray(self):
+ base = self.base.copy()
+ mbase = base.view(mrecarray)
+ # Sets the mask w/ an array
+ mbase.mask = [1, 0, 0, 0, 1]
+ assert_equal(mbase.a.mask, [1, 0, 0, 0, 1])
+ assert_equal(mbase.b.mask, [1, 0, 0, 0, 1])
+ assert_equal(mbase.c.mask, [1, 0, 0, 0, 1])
+ # Yay, once more !
+ mbase.mask = [0, 0, 0, 0, 1]
+ assert_equal(mbase.a.mask, [0, 0, 0, 0, 1])
+ assert_equal(mbase.b.mask, [0, 0, 0, 0, 1])
+ assert_equal(mbase.c.mask, [0, 0, 0, 0, 1])
+
+ def test_set_mask_fromfields(self):
+ mbase = self.base.copy().view(mrecarray)
+
+ nmask = np.array(
+ [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)],
+ dtype=[('a', bool), ('b', bool), ('c', bool)])
+ mbase.mask = nmask
+ assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])
+ assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])
+ assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])
+ # Reinitialize and redo
+ mbase.mask = False
+ mbase.fieldmask = nmask
+ assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])
+ assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])
+ assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])
+
+ def test_set_elements(self):
+ base = self.base.copy()
+ # Set an element to mask .....................
+ mbase = base.view(mrecarray).copy()
+ mbase[-2] = masked
+ assert_equal(
+ mbase._mask.tolist(),
+ np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)],
+ dtype=bool))
+ # Used to be mask, now it's recordmask!
+ assert_equal(mbase.recordmask, [0, 1, 0, 1, 1])
+ # Set slices .................................
+ mbase = base.view(mrecarray).copy()
+ mbase[:2] = (5, 5, 5)
+ assert_equal(mbase.a._data, [5, 5, 3, 4, 5])
+ assert_equal(mbase.a._mask, [0, 0, 0, 0, 1])
+ assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5])
+ assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])
+ assert_equal(mbase.c._data,
+ [b'5', b'5', b'three', b'four', b'five'])
+ assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])
+
+ mbase = base.view(mrecarray).copy()
+ mbase[:2] = masked
+ assert_equal(mbase.a._data, [1, 2, 3, 4, 5])
+ assert_equal(mbase.a._mask, [1, 1, 0, 0, 1])
+ assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5])
+ assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])
+ assert_equal(mbase.c._data,
+ [b'one', b'two', b'three', b'four', b'five'])
+ assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])
+
+ def test_setslices_hardmask(self):
+ # Tests setting slices w/ hardmask.
+ base = self.base.copy()
+ mbase = base.view(mrecarray)
+ mbase.harden_mask()
+ try:
+ mbase[-2:] = (5, 5, 5)
+ assert_equal(mbase.a._data, [1, 2, 3, 5, 5])
+ assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5])
+ assert_equal(mbase.c._data,
+ [b'one', b'two', b'three', b'5', b'five'])
+ assert_equal(mbase.a._mask, [0, 1, 0, 0, 1])
+ assert_equal(mbase.b._mask, mbase.a._mask)
+ assert_equal(mbase.b._mask, mbase.c._mask)
+ except NotImplementedError:
+ # OK, not implemented yet...
+ pass
+ except AssertionError:
+ raise
+ else:
+ raise Exception("Flexible hard masks should be supported !")
+ # Not using a tuple should crash
+ try:
+ mbase[-2:] = 3
+ except (NotImplementedError, TypeError):
+ pass
+ else:
+ raise TypeError("Should have expected a readable buffer object!")
+
+ def test_hardmask(self):
+ # Test hardmask
+ base = self.base.copy()
+ mbase = base.view(mrecarray)
+ mbase.harden_mask()
+ assert_(mbase._hardmask)
+ mbase.mask = nomask
+ assert_equal_records(mbase._mask, base._mask)
+ mbase.soften_mask()
+ assert_(not mbase._hardmask)
+ mbase.mask = nomask
+ # So, the mask of a field is no longer set to nomask...
+ assert_equal_records(mbase._mask,
+ ma.make_mask_none(base.shape, base.dtype))
+ assert_(ma.make_mask(mbase['b']._mask) is nomask)
+ assert_equal(mbase['a']._mask, mbase['b']._mask)
+
+ def test_pickling(self):
+ # Test pickling
+ base = self.base.copy()
+ mrec = base.view(mrecarray)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ _ = pickle.dumps(mrec, protocol=proto)
+ mrec_ = pickle.loads(_)
+ assert_equal(mrec_.dtype, mrec.dtype)
+ assert_equal_records(mrec_._data, mrec._data)
+ assert_equal(mrec_._mask, mrec._mask)
+ assert_equal_records(mrec_._mask, mrec._mask)
+
+ def test_filled(self):
+ # Test filling the array
+ _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
+ _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
+ _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')
+ ddtype = [('a', int), ('b', float), ('c', '|S8')]
+ mrec = fromarrays([_a, _b, _c], dtype=ddtype,
+ fill_value=(99999, 99999., 'N/A'))
+ mrecfilled = mrec.filled()
+ assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int))
+ assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.),
+ dtype=float))
+ assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'),
+ dtype='|S8'))
+
+ def test_tolist(self):
+ # Test tolist.
+ _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
+ _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
+ _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8')
+ ddtype = [('a', int), ('b', float), ('c', '|S8')]
+ mrec = fromarrays([_a, _b, _c], dtype=ddtype,
+ fill_value=(99999, 99999., 'N/A'))
+
+ assert_equal(mrec.tolist(),
+ [(1, 1.1, None), (2, 2.2, b'two'),
+ (None, None, b'three')])
+
+ def test_withnames(self):
+ # Test the creation w/ format and names
+ x = mrecarray(1, formats=float, names='base')
+ x[0]['base'] = 10
+ assert_equal(x['base'][0], 10)
+
+ def test_exotic_formats(self):
+ # Test that 'exotic' formats are processed properly
+ easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)])
+ easy[0] = masked
+ assert_equal(easy.filled(1).item(), (1, b'1', 1.))
+
+ solo = mrecarray(1, dtype=[('f0', '<f8', (2, 2))])
+ solo[0] = masked
+ assert_equal(solo.filled(1).item(),
+ np.array((1,), dtype=solo.dtype).item())
+
+ mult = mrecarray(2, dtype="i4, (2,3)float, float")
+ mult[0] = masked
+ mult[1] = (1, 1, 1)
+ mult.filled(0)
+ assert_equal_records(mult.filled(0),
+ np.array([(0, 0, 0), (1, 1, 1)],
+ dtype=mult.dtype))
+
+
+class TestView:
+
+ def setup_method(self):
+ (a, b) = (np.arange(10), np.random.rand(10))
+ ndtype = [('a', float), ('b', float)]
+ arr = np.array(list(zip(a, b)), dtype=ndtype)
+
+ mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.))
+ mrec.mask[3] = (False, True)
+ self.data = (mrec, a, b, arr)
+
+ def test_view_by_itself(self):
+ (mrec, a, b, arr) = self.data
+ test = mrec.view()
+ assert_(isinstance(test, MaskedRecords))
+ assert_equal_records(test, mrec)
+ assert_equal_records(test._mask, mrec._mask)
+
+ def test_view_simple_dtype(self):
+ (mrec, a, b, arr) = self.data
+ ntype = (float, 2)
+ test = mrec.view(ntype)
+ assert_(isinstance(test, ma.MaskedArray))
+ assert_equal(test, np.array(list(zip(a, b)), dtype=float))
+ assert_(test[3, 1] is ma.masked)
+
+ def test_view_flexible_type(self):
+ (mrec, a, b, arr) = self.data
+ alttype = [('A', float), ('B', float)]
+ test = mrec.view(alttype)
+ assert_(isinstance(test, MaskedRecords))
+ assert_equal_records(test, arr.view(alttype))
+ assert_(test['B'][3] is masked)
+ assert_equal(test.dtype, np.dtype(alttype))
+ assert_(test._fill_value is None)
+
+
+##############################################################################
+class TestMRecordsImport:
+
+ _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
+ _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
+ _c = ma.array([b'one', b'two', b'three'],
+ mask=[0, 0, 1], dtype='|S8')
+ ddtype = [('a', int), ('b', float), ('c', '|S8')]
+ mrec = fromarrays([_a, _b, _c], dtype=ddtype,
+ fill_value=(b'99999', b'99999.',
+ b'N/A'))
+ nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype)
+ data = (mrec, nrec, ddtype)
+
+ def test_fromarrays(self):
+ _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
+ _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
+ _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')
+ (mrec, nrec, _) = self.data
+ for (f, l) in zip(('a', 'b', 'c'), (_a, _b, _c)):
+ assert_equal(getattr(mrec, f)._mask, l._mask)
+ # One record only
+ _x = ma.array([1, 1.1, 'one'], mask=[1, 0, 0], dtype=object)
+ assert_equal_records(fromarrays(_x, dtype=mrec.dtype), mrec[0])
+
+ def test_fromrecords(self):
+ # Test construction from records.
+ (mrec, nrec, ddtype) = self.data
+ #......
+ palist = [(1, 'abc', 3.7000002861022949, 0),
+ (2, 'xy', 6.6999998092651367, 1),
+ (0, ' ', 0.40000000596046448, 0)]
+ pa = recfromrecords(palist, names='c1, c2, c3, c4')
+ mpa = fromrecords(palist, names='c1, c2, c3, c4')
+ assert_equal_records(pa, mpa)
+ #.....
+ _mrec = fromrecords(nrec)
+ assert_equal(_mrec.dtype, mrec.dtype)
+ for field in _mrec.dtype.names:
+ assert_equal(getattr(_mrec, field), getattr(mrec._data, field))
+
+ _mrec = fromrecords(nrec.tolist(), names='c1,c2,c3')
+ assert_equal(_mrec.dtype, [('c1', int), ('c2', float), ('c3', '|S5')])
+ for (f, n) in zip(('c1', 'c2', 'c3'), ('a', 'b', 'c')):
+ assert_equal(getattr(_mrec, f), getattr(mrec._data, n))
+
+ _mrec = fromrecords(mrec)
+ assert_equal(_mrec.dtype, mrec.dtype)
+ assert_equal_records(_mrec._data, mrec.filled())
+ assert_equal_records(_mrec._mask, mrec._mask)
+
+ def test_fromrecords_wmask(self):
+ # Tests construction from records w/ mask.
+ (mrec, nrec, ddtype) = self.data
+
+ _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=[0, 1, 0,])
+ assert_equal_records(_mrec._data, mrec._data)
+ assert_equal(_mrec._mask.tolist(), [(0, 0, 0), (1, 1, 1), (0, 0, 0)])
+
+ _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=True)
+ assert_equal_records(_mrec._data, mrec._data)
+ assert_equal(_mrec._mask.tolist(), [(1, 1, 1), (1, 1, 1), (1, 1, 1)])
+
+ _mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=mrec._mask)
+ assert_equal_records(_mrec._data, mrec._data)
+ assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())
+
+ _mrec = fromrecords(nrec.tolist(), dtype=ddtype,
+ mask=mrec._mask.tolist())
+ assert_equal_records(_mrec._data, mrec._data)
+ assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())
+
+ def test_fromtextfile(self):
+ # Tests reading from a text file.
+ fcontent = (
+"""#
+'One (S)','Two (I)','Three (F)','Four (M)','Five (-)','Six (C)'
+'strings',1,1.0,'mixed column',,1
+'with embedded "double quotes"',2,2.0,1.0,,1
+'strings',3,3.0E5,3,,1
+'strings',4,-1e-10,,,1
+""")
+ with temppath() as path:
+ with open(path, 'w') as f:
+ f.write(fcontent)
+ mrectxt = fromtextfile(path, delimiter=',', varnames='ABCDEFG')
+ assert_(isinstance(mrectxt, MaskedRecords))
+ assert_equal(mrectxt.F, [1, 1, 1, 1])
+ assert_equal(mrectxt.E._mask, [1, 1, 1, 1])
+ assert_equal(mrectxt.C, [1, 2, 3.e+5, -1e-10])
+
+ def test_addfield(self):
+ # Tests addfield
+ (mrec, nrec, ddtype) = self.data
+ (d, m) = ([100, 200, 300], [1, 0, 0])
+ mrec = addfield(mrec, ma.array(d, mask=m))
+ assert_equal(mrec.f3, d)
+ assert_equal(mrec.f3._mask, m)
+
+
+def test_record_array_with_object_field():
+ # Trac #1839
+ y = ma.masked_array(
+ [(1, '2'), (3, '4')],
+ mask=[(0, 0), (0, 1)],
+ dtype=[('a', int), ('b', object)])
+ # getting an item used to fail
+ y[1]
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/tests/test_old_ma.py b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_old_ma.py
new file mode 100644
index 00000000..8465b115
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_old_ma.py
@@ -0,0 +1,874 @@
+from functools import reduce
+
+import pytest
+
+import numpy as np
+import numpy.core.umath as umath
+import numpy.core.fromnumeric as fromnumeric
+from numpy.testing import (
+ assert_, assert_raises, assert_equal,
+ )
+from numpy.ma import (
+ MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue,
+ arange, arccos, arcsin, arctan, arctan2, array, average, choose,
+ concatenate, conjugate, cos, cosh, count, divide, equal, exp, filled,
+ getmask, greater, greater_equal, inner, isMaskedArray, less,
+ less_equal, log, log10, make_mask, masked, masked_array, masked_equal,
+ masked_greater, masked_greater_equal, masked_inside, masked_less,
+ masked_less_equal, masked_not_equal, masked_outside,
+ masked_print_option, masked_values, masked_where, maximum, minimum,
+ multiply, nomask, nonzero, not_equal, ones, outer, product, put, ravel,
+ repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum,
+ take, tan, tanh, transpose, where, zeros,
+ )
+from numpy.compat import pickle
+
+pi = np.pi
+
+
+def eq(v, w, msg=''):
+ result = allclose(v, w)
+ if not result:
+ print(f'Not eq:{msg}\n{v}\n----{w}')
+ return result
+
+
+class TestMa:
+
+ def setup_method(self):
+ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
+ y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
+ a10 = 10.
+ m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
+ m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
+ xm = array(x, mask=m1)
+ ym = array(y, mask=m2)
+ z = np.array([-.5, 0., .5, .8])
+ zm = array(z, mask=[0, 1, 0, 0])
+ xf = np.where(m1, 1e+20, x)
+ s = x.shape
+ xm.set_fill_value(1e+20)
+ self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)
+
+ def test_testBasic1d(self):
+ # Test of basic array creation and properties in 1 dimension.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ assert_(not isMaskedArray(x))
+ assert_(isMaskedArray(xm))
+ assert_equal(shape(xm), s)
+ assert_equal(xm.shape, s)
+ assert_equal(xm.dtype, x.dtype)
+ assert_equal(xm.size, reduce(lambda x, y:x * y, s))
+ assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
+ assert_(eq(xm, xf))
+ assert_(eq(filled(xm, 1.e20), xf))
+ assert_(eq(x, xm))
+
+ @pytest.mark.parametrize("s", [(4, 3), (6, 2)])
+ def test_testBasic2d(self, s):
+ # Test of basic array creation and properties in 2 dimensions.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ x.shape = s
+ y.shape = s
+ xm.shape = s
+ ym.shape = s
+ xf.shape = s
+
+ assert_(not isMaskedArray(x))
+ assert_(isMaskedArray(xm))
+ assert_equal(shape(xm), s)
+ assert_equal(xm.shape, s)
+ assert_equal(xm.size, reduce(lambda x, y: x * y, s))
+ assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1))
+ assert_(eq(xm, xf))
+ assert_(eq(filled(xm, 1.e20), xf))
+ assert_(eq(x, xm))
+
+ def test_testArithmetic(self):
+ # Test of basic arithmetic.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ a2d = array([[1, 2], [0, 4]])
+ a2dm = masked_array(a2d, [[0, 0], [1, 0]])
+ assert_(eq(a2d * a2d, a2d * a2dm))
+ assert_(eq(a2d + a2d, a2d + a2dm))
+ assert_(eq(a2d - a2d, a2d - a2dm))
+ for s in [(12,), (4, 3), (2, 6)]:
+ x = x.reshape(s)
+ y = y.reshape(s)
+ xm = xm.reshape(s)
+ ym = ym.reshape(s)
+ xf = xf.reshape(s)
+ assert_(eq(-x, -xm))
+ assert_(eq(x + y, xm + ym))
+ assert_(eq(x - y, xm - ym))
+ assert_(eq(x * y, xm * ym))
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_(eq(x / y, xm / ym))
+ assert_(eq(a10 + y, a10 + ym))
+ assert_(eq(a10 - y, a10 - ym))
+ assert_(eq(a10 * y, a10 * ym))
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_(eq(a10 / y, a10 / ym))
+ assert_(eq(x + a10, xm + a10))
+ assert_(eq(x - a10, xm - a10))
+ assert_(eq(x * a10, xm * a10))
+ assert_(eq(x / a10, xm / a10))
+ assert_(eq(x ** 2, xm ** 2))
+ assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
+ assert_(eq(x ** y, xm ** ym))
+ assert_(eq(np.add(x, y), add(xm, ym)))
+ assert_(eq(np.subtract(x, y), subtract(xm, ym)))
+ assert_(eq(np.multiply(x, y), multiply(xm, ym)))
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_(eq(np.divide(x, y), divide(xm, ym)))
+
+ def test_testMixedArithmetic(self):
+ na = np.array([1])
+ ma = array([1])
+ assert_(isinstance(na + ma, MaskedArray))
+ assert_(isinstance(ma + na, MaskedArray))
+
+ def test_testUfuncs1(self):
+ # Test various functions such as sin, cos.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ assert_(eq(np.cos(x), cos(xm)))
+ assert_(eq(np.cosh(x), cosh(xm)))
+ assert_(eq(np.sin(x), sin(xm)))
+ assert_(eq(np.sinh(x), sinh(xm)))
+ assert_(eq(np.tan(x), tan(xm)))
+ assert_(eq(np.tanh(x), tanh(xm)))
+ with np.errstate(divide='ignore', invalid='ignore'):
+ assert_(eq(np.sqrt(abs(x)), sqrt(xm)))
+ assert_(eq(np.log(abs(x)), log(xm)))
+ assert_(eq(np.log10(abs(x)), log10(xm)))
+ assert_(eq(np.exp(x), exp(xm)))
+ assert_(eq(np.arcsin(z), arcsin(zm)))
+ assert_(eq(np.arccos(z), arccos(zm)))
+ assert_(eq(np.arctan(z), arctan(zm)))
+ assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))
+ assert_(eq(np.absolute(x), absolute(xm)))
+ assert_(eq(np.equal(x, y), equal(xm, ym)))
+ assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))
+ assert_(eq(np.less(x, y), less(xm, ym)))
+ assert_(eq(np.greater(x, y), greater(xm, ym)))
+ assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))
+ assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
+ assert_(eq(np.conjugate(x), conjugate(xm)))
+ assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))
+ assert_(eq(np.concatenate((x, y)), concatenate((x, y))))
+ assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))
+ assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
+
+ def test_xtestCount(self):
+ # Test count
+ ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
+ assert_(count(ott).dtype.type is np.intp)
+ assert_equal(3, count(ott))
+ assert_equal(1, count(1))
+ assert_(eq(0, array(1, mask=[1])))
+ ott = ott.reshape((2, 2))
+ assert_(count(ott).dtype.type is np.intp)
+ assert_(isinstance(count(ott, 0), np.ndarray))
+ assert_(count(ott).dtype.type is np.intp)
+ assert_(eq(3, count(ott)))
+ assert_(getmask(count(ott, 0)) is nomask)
+ assert_(eq([1, 2], count(ott, 0)))
+
+ def test_testMinMax(self):
+ # Test minimum and maximum.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ xr = np.ravel(x) # max doesn't work if shaped
+ xmr = ravel(xm)
+
+ # true because of careful selection of data
+ assert_(eq(max(xr), maximum.reduce(xmr)))
+ assert_(eq(min(xr), minimum.reduce(xmr)))
+
+ def test_testAddSumProd(self):
+ # Test add, sum, product.
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ assert_(eq(np.add.reduce(x), add.reduce(x)))
+ assert_(eq(np.add.accumulate(x), add.accumulate(x)))
+ assert_(eq(4, sum(array(4), axis=0)))
+ assert_(eq(4, sum(array(4), axis=0)))
+ assert_(eq(np.sum(x, axis=0), sum(x, axis=0)))
+ assert_(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
+ assert_(eq(np.sum(x, 0), sum(x, 0)))
+ assert_(eq(np.product(x, axis=0), product(x, axis=0)))
+ assert_(eq(np.product(x, 0), product(x, 0)))
+ assert_(eq(np.product(filled(xm, 1), axis=0),
+ product(xm, axis=0)))
+ if len(s) > 1:
+ assert_(eq(np.concatenate((x, y), 1),
+ concatenate((xm, ym), 1)))
+ assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
+ assert_(eq(np.sum(x, 1), sum(x, 1)))
+ assert_(eq(np.product(x, 1), product(x, 1)))
+
+ def test_testCI(self):
+ # Test of conversions and indexing
+ x1 = np.array([1, 2, 4, 3])
+ x2 = array(x1, mask=[1, 0, 0, 0])
+ x3 = array(x1, mask=[0, 1, 0, 1])
+ x4 = array(x1)
+ # test conversion to strings
+ str(x2) # raises?
+ repr(x2) # raises?
+ assert_(eq(np.sort(x1), sort(x2, fill_value=0)))
+ # tests of indexing
+ assert_(type(x2[1]) is type(x1[1]))
+ assert_(x1[1] == x2[1])
+ assert_(x2[0] is masked)
+ assert_(eq(x1[2], x2[2]))
+ assert_(eq(x1[2:5], x2[2:5]))
+ assert_(eq(x1[:], x2[:]))
+ assert_(eq(x1[1:], x3[1:]))
+ x1[2] = 9
+ x2[2] = 9
+ assert_(eq(x1, x2))
+ x1[1:3] = 99
+ x2[1:3] = 99
+ assert_(eq(x1, x2))
+ x2[1] = masked
+ assert_(eq(x1, x2))
+ x2[1:3] = masked
+ assert_(eq(x1, x2))
+ x2[:] = x1
+ x2[1] = masked
+ assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
+ x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
+ assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
+ x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
+ assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
+ assert_(allequal(x4, array([1, 2, 3, 4])))
+ x1 = np.arange(5) * 1.0
+ x2 = masked_values(x1, 3.0)
+ assert_(eq(x1, x2))
+ assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
+ assert_(eq(3.0, x2.fill_value))
+ x1 = array([1, 'hello', 2, 3], object)
+ x2 = np.array([1, 'hello', 2, 3], object)
+ s1 = x1[1]
+ s2 = x2[1]
+ assert_equal(type(s2), str)
+ assert_equal(type(s1), str)
+ assert_equal(s1, s2)
+ assert_(x1[1:1].shape == (0,))
+
+ def test_testCopySize(self):
+ # Tests of some subtle points of copying and sizing.
+ n = [0, 0, 1, 0, 0]
+ m = make_mask(n)
+ m2 = make_mask(m)
+ assert_(m is m2)
+ m3 = make_mask(m, copy=True)
+ assert_(m is not m3)
+
+ x1 = np.arange(5)
+ y1 = array(x1, mask=m)
+ assert_(y1._data is not x1)
+ assert_(allequal(x1, y1._data))
+ assert_(y1._mask is m)
+
+ y1a = array(y1, copy=0)
+ # For copy=False, one might expect that the array would just
+ # passed on, i.e., that it would be "is" instead of "==".
+ # See gh-4043 for discussion.
+ assert_(y1a._mask.__array_interface__ ==
+ y1._mask.__array_interface__)
+
+ y2 = array(x1, mask=m3, copy=0)
+ assert_(y2._mask is m3)
+ assert_(y2[2] is masked)
+ y2[2] = 9
+ assert_(y2[2] is not masked)
+ assert_(y2._mask is m3)
+ assert_(allequal(y2.mask, 0))
+
+ y2a = array(x1, mask=m, copy=1)
+ assert_(y2a._mask is not m)
+ assert_(y2a[2] is masked)
+ y2a[2] = 9
+ assert_(y2a[2] is not masked)
+ assert_(y2a._mask is not m)
+ assert_(allequal(y2a.mask, 0))
+
+ y3 = array(x1 * 1.0, mask=m)
+ assert_(filled(y3).dtype is (x1 * 1.0).dtype)
+
+ x4 = arange(4)
+ x4[2] = masked
+ y4 = resize(x4, (8,))
+ assert_(eq(concatenate([x4, x4]), y4))
+ assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
+ y5 = repeat(x4, (2, 2, 2, 2), axis=0)
+ assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
+ y6 = repeat(x4, 2, axis=0)
+ assert_(eq(y5, y6))
+
+ def test_testPut(self):
+ # Test of put
+ d = arange(5)
+ n = [0, 0, 0, 1, 1]
+ m = make_mask(n)
+ m2 = m.copy()
+ x = array(d, mask=m)
+ assert_(x[3] is masked)
+ assert_(x[4] is masked)
+ x[[1, 4]] = [10, 40]
+ assert_(x._mask is m)
+ assert_(x[3] is masked)
+ assert_(x[4] is not masked)
+ assert_(eq(x, [0, 10, 2, -1, 40]))
+
+ x = array(d, mask=m2, copy=True)
+ x.put([0, 1, 2], [-1, 100, 200])
+ assert_(x._mask is not m2)
+ assert_(x[3] is masked)
+ assert_(x[4] is masked)
+ assert_(eq(x, [-1, 100, 200, 0, 0]))
+
+ def test_testPut2(self):
+ # Test of put
+ d = arange(5)
+ x = array(d, mask=[0, 0, 0, 0, 0])
+ z = array([10, 40], mask=[1, 0])
+ assert_(x[2] is not masked)
+ assert_(x[3] is not masked)
+ x[2:4] = z
+ assert_(x[2] is masked)
+ assert_(x[3] is not masked)
+ assert_(eq(x, [0, 1, 10, 40, 4]))
+
+ d = arange(5)
+ x = array(d, mask=[0, 0, 0, 0, 0])
+ y = x[2:4]
+ z = array([10, 40], mask=[1, 0])
+ assert_(x[2] is not masked)
+ assert_(x[3] is not masked)
+ y[:] = z
+ assert_(y[0] is masked)
+ assert_(y[1] is not masked)
+ assert_(eq(y, [10, 40]))
+ assert_(x[2] is masked)
+ assert_(x[3] is not masked)
+ assert_(eq(x, [0, 1, 10, 40, 4]))
+
+ def test_testMaPut(self):
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
+ m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]
+ i = np.nonzero(m)[0]
+ put(ym, i, zm)
+ assert_(all(take(ym, i, axis=0) == zm))
+
+ def test_testOddFeatures(self):
+ # Test of other odd features
+ x = arange(20)
+ x = x.reshape(4, 5)
+ x.flat[5] = 12
+ assert_(x[1, 0] == 12)
+ z = x + 10j * x
+ assert_(eq(z.real, x))
+ assert_(eq(z.imag, 10 * x))
+ assert_(eq((z * conjugate(z)).real, 101 * x * x))
+ z.imag[...] = 0.0
+
+ x = arange(10)
+ x[3] = masked
+ assert_(str(x[3]) == str(masked))
+ c = x >= 8
+ assert_(count(where(c, masked, masked)) == 0)
+ assert_(shape(where(c, masked, masked)) == c.shape)
+ z = where(c, x, masked)
+ assert_(z.dtype is x.dtype)
+ assert_(z[3] is masked)
+ assert_(z[4] is masked)
+ assert_(z[7] is masked)
+ assert_(z[8] is not masked)
+ assert_(z[9] is not masked)
+ assert_(eq(x, z))
+ z = where(c, masked, x)
+ assert_(z.dtype is x.dtype)
+ assert_(z[3] is masked)
+ assert_(z[4] is not masked)
+ assert_(z[7] is not masked)
+ assert_(z[8] is masked)
+ assert_(z[9] is masked)
+ z = masked_where(c, x)
+ assert_(z.dtype is x.dtype)
+ assert_(z[3] is masked)
+ assert_(z[4] is not masked)
+ assert_(z[7] is not masked)
+ assert_(z[8] is masked)
+ assert_(z[9] is masked)
+ assert_(eq(x, z))
+ x = array([1., 2., 3., 4., 5.])
+ c = array([1, 1, 1, 0, 0])
+ x[2] = masked
+ z = where(c, x, -x)
+ assert_(eq(z, [1., 2., 0., -4., -5]))
+ c[0] = masked
+ z = where(c, x, -x)
+ assert_(eq(z, [1., 2., 0., -4., -5]))
+ assert_(z[0] is masked)
+ assert_(z[1] is not masked)
+ assert_(z[2] is masked)
+ assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
+ assert_(eq(masked_where(greater_equal(x, 2), x),
+ masked_greater_equal(x, 2)))
+ assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
+ assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
+ assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
+ assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
+ assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
+ assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
+ assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
+ assert_(eq(masked_inside(array(list(range(5)),
+ mask=[1, 0, 0, 0, 0]), 1, 3).mask,
+ [1, 1, 1, 1, 0]))
+ assert_(eq(masked_outside(array(list(range(5)),
+ mask=[0, 1, 0, 0, 0]), 1, 3).mask,
+ [1, 1, 0, 0, 1]))
+ assert_(eq(masked_equal(array(list(range(5)),
+ mask=[1, 0, 0, 0, 0]), 2).mask,
+ [1, 0, 1, 0, 0]))
+ assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
+ mask=[1, 0, 0, 0, 0]), 2).mask,
+ [1, 0, 1, 0, 1]))
+ assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
+ [99, 99, 3, 4, 5]))
+ atest = ones((10, 10, 10), dtype=np.float32)
+ btest = zeros(atest.shape, MaskType)
+ ctest = masked_where(btest, atest)
+ assert_(eq(atest, ctest))
+ z = choose(c, (-x, x))
+ assert_(eq(z, [1., 2., 0., -4., -5]))
+ assert_(z[0] is masked)
+ assert_(z[1] is not masked)
+ assert_(z[2] is masked)
+ x = arange(6)
+ x[5] = masked
+ y = arange(6) * 10
+ y[2] = masked
+ c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
+ cm = c.filled(1)
+ z = where(c, x, y)
+ zm = where(cm, x, y)
+ assert_(eq(z, zm))
+ assert_(getmask(zm) is nomask)
+ assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
+ z = where(c, masked, 1)
+ assert_(eq(z, [99, 99, 99, 1, 1, 1]))
+ z = where(c, 1, masked)
+ assert_(eq(z, [99, 1, 1, 99, 99, 99]))
+
+ def test_testMinMax2(self):
+ # Test of minimum, maximum.
+ assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]))
+ assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]))
+ x = arange(5)
+ y = arange(5) - 2
+ x[3] = masked
+ y[0] = masked
+ assert_(eq(minimum(x, y), where(less(x, y), x, y)))
+ assert_(eq(maximum(x, y), where(greater(x, y), x, y)))
+ assert_(minimum.reduce(x) == 0)
+ assert_(maximum.reduce(x) == 4)
+
+ def test_testTakeTransposeInnerOuter(self):
+ # Test of take, transpose, inner, outer products
+ x = arange(24)
+ y = np.arange(24)
+ x[5:6] = masked
+ x = x.reshape(2, 3, 4)
+ y = y.reshape(2, 3, 4)
+ assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
+ assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
+ assert_(eq(np.inner(filled(x, 0), filled(y, 0)),
+ inner(x, y)))
+ assert_(eq(np.outer(filled(x, 0), filled(y, 0)),
+ outer(x, y)))
+ y = array(['abc', 1, 'def', 2, 3], object)
+ y[2] = masked
+ t = take(y, [0, 3, 4])
+ assert_(t[0] == 'abc')
+ assert_(t[1] == 2)
+ assert_(t[2] == 3)
+
+ def test_testInplace(self):
+ # Test of inplace operations and rich comparisons
+ y = arange(10)
+
+ x = arange(10)
+ xm = arange(10)
+ xm[2] = masked
+ x += 1
+ assert_(eq(x, y + 1))
+ xm += 1
+ assert_(eq(x, y + 1))
+
+ x = arange(10)
+ xm = arange(10)
+ xm[2] = masked
+ x -= 1
+ assert_(eq(x, y - 1))
+ xm -= 1
+ assert_(eq(xm, y - 1))
+
+ x = arange(10) * 1.0
+ xm = arange(10) * 1.0
+ xm[2] = masked
+ x *= 2.0
+ assert_(eq(x, y * 2))
+ xm *= 2.0
+ assert_(eq(xm, y * 2))
+
+ x = arange(10) * 2
+ xm = arange(10)
+ xm[2] = masked
+ x //= 2
+ assert_(eq(x, y))
+ xm //= 2
+ assert_(eq(x, y))
+
+ x = arange(10) * 1.0
+ xm = arange(10) * 1.0
+ xm[2] = masked
+ x /= 2.0
+ assert_(eq(x, y / 2.0))
+ xm /= arange(10)
+ assert_(eq(xm, ones((10,))))
+
+ x = arange(10).astype(np.float32)
+ xm = arange(10)
+ xm[2] = masked
+ x += 1.
+ assert_(eq(x, y + 1.))
+
+ def test_testPickle(self):
+ # Test of pickling
+ x = arange(12)
+ x[4:10:2] = masked
+ x = x.reshape(4, 3)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ s = pickle.dumps(x, protocol=proto)
+ y = pickle.loads(s)
+ assert_(eq(x, y))
+
+ def test_testMasked(self):
+ # Test of masked element
+ xx = arange(6)
+ xx[1] = masked
+ assert_(str(masked) == '--')
+ assert_(xx[1] is masked)
+ assert_equal(filled(xx[1], 0), 0)
+
+ def test_testAverage1(self):
+ # Test of average.
+ ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
+ assert_(eq(2.0, average(ott, axis=0)))
+ assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
+ result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True)
+ assert_(eq(2.0, result))
+ assert_(wts == 4.0)
+ ott[:] = masked
+ assert_(average(ott, axis=0) is masked)
+ ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
+ ott = ott.reshape(2, 2)
+ ott[:, 1] = masked
+ assert_(eq(average(ott, axis=0), [2.0, 0.0]))
+ assert_(average(ott, axis=1)[0] is masked)
+ assert_(eq([2., 0.], average(ott, axis=0)))
+ result, wts = average(ott, axis=0, returned=True)
+ assert_(eq(wts, [1., 0.]))
+
+ def test_testAverage2(self):
+ # More tests of average.
+ w1 = [0, 1, 1, 1, 1, 0]
+ w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
+ x = arange(6)
+ assert_(allclose(average(x, axis=0), 2.5))
+ assert_(allclose(average(x, axis=0, weights=w1), 2.5))
+ y = array([arange(6), 2.0 * arange(6)])
+ assert_(allclose(average(y, None),
+ np.add.reduce(np.arange(6)) * 3. / 12.))
+ assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))
+ assert_(allclose(average(y, axis=1),
+ [average(x, axis=0), average(x, axis=0)*2.0]))
+ assert_(allclose(average(y, None, weights=w2), 20. / 6.))
+ assert_(allclose(average(y, axis=0, weights=w2),
+ [0., 1., 2., 3., 4., 10.]))
+ assert_(allclose(average(y, axis=1),
+ [average(x, axis=0), average(x, axis=0)*2.0]))
+ m1 = zeros(6)
+ m2 = [0, 0, 1, 1, 0, 0]
+ m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
+ m4 = ones(6)
+ m5 = [0, 1, 1, 1, 1, 1]
+ assert_(allclose(average(masked_array(x, m1), axis=0), 2.5))
+ assert_(allclose(average(masked_array(x, m2), axis=0), 2.5))
+ assert_(average(masked_array(x, m4), axis=0) is masked)
+ assert_equal(average(masked_array(x, m5), axis=0), 0.0)
+ assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
+ z = masked_array(y, m3)
+ assert_(allclose(average(z, None), 20. / 6.))
+ assert_(allclose(average(z, axis=0),
+ [0., 1., 99., 99., 4.0, 7.5]))
+ assert_(allclose(average(z, axis=1), [2.5, 5.0]))
+ assert_(allclose(average(z, axis=0, weights=w2),
+ [0., 1., 99., 99., 4.0, 10.0]))
+
+ a = arange(6)
+ b = arange(6) * 3
+ r1, w1 = average([[a, b], [b, a]], axis=1, returned=True)
+ assert_equal(shape(r1), shape(w1))
+ assert_equal(r1.shape, w1.shape)
+ r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True)
+ assert_equal(shape(w2), shape(r2))
+ r2, w2 = average(ones((2, 2, 3)), returned=True)
+ assert_equal(shape(w2), shape(r2))
+ r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True)
+ assert_(shape(w2) == shape(r2))
+ a2d = array([[1, 2], [0, 4]], float)
+ a2dm = masked_array(a2d, [[0, 0], [1, 0]])
+ a2da = average(a2d, axis=0)
+ assert_(eq(a2da, [0.5, 3.0]))
+ a2dma = average(a2dm, axis=0)
+ assert_(eq(a2dma, [1.0, 3.0]))
+ a2dma = average(a2dm, axis=None)
+ assert_(eq(a2dma, 7. / 3.))
+ a2dma = average(a2dm, axis=1)
+ assert_(eq(a2dma, [1.5, 4.0]))
+
+ def test_testToPython(self):
+ assert_equal(1, int(array(1)))
+ assert_equal(1.0, float(array(1)))
+ assert_equal(1, int(array([[[1]]])))
+ assert_equal(1.0, float(array([[1]])))
+ assert_raises(TypeError, float, array([1, 1]))
+ assert_raises(ValueError, bool, array([0, 1]))
+ assert_raises(ValueError, bool, array([0, 0], mask=[0, 1]))
+
+ def test_testScalarArithmetic(self):
+ xm = array(0, mask=1)
+ #TODO FIXME: Find out what the following raises a warning in r8247
+ with np.errstate(divide='ignore'):
+ assert_((1 / array(0)).mask)
+ assert_((1 + xm).mask)
+ assert_((-xm).mask)
+ assert_((-xm).mask)
+ assert_(maximum(xm, xm).mask)
+ assert_(minimum(xm, xm).mask)
+ assert_(xm.filled().dtype is xm._data.dtype)
+ x = array(0, mask=0)
+ assert_(x.filled() == x._data)
+ assert_equal(str(xm), str(masked_print_option))
+
+ def test_testArrayMethods(self):
+ a = array([1, 3, 2])
+ assert_(eq(a.any(), a._data.any()))
+ assert_(eq(a.all(), a._data.all()))
+ assert_(eq(a.argmax(), a._data.argmax()))
+ assert_(eq(a.argmin(), a._data.argmin()))
+ assert_(eq(a.choose(0, 1, 2, 3, 4),
+ a._data.choose(0, 1, 2, 3, 4)))
+ assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))
+ assert_(eq(a.conj(), a._data.conj()))
+ assert_(eq(a.conjugate(), a._data.conjugate()))
+ m = array([[1, 2], [3, 4]])
+ assert_(eq(m.diagonal(), m._data.diagonal()))
+ assert_(eq(a.sum(), a._data.sum()))
+ assert_(eq(a.take([1, 2]), a._data.take([1, 2])))
+ assert_(eq(m.transpose(), m._data.transpose()))
+
+ def test_testArrayAttributes(self):
+ a = array([1, 3, 2])
+ assert_equal(a.ndim, 1)
+
+ def test_testAPI(self):
+ assert_(not [m for m in dir(np.ndarray)
+ if m not in dir(MaskedArray) and
+ not m.startswith('_')])
+
+ def test_testSingleElementSubscript(self):
+ a = array([1, 3, 2])
+ b = array([1, 3, 2], mask=[1, 0, 1])
+ assert_equal(a[0].shape, ())
+ assert_equal(b[0].shape, ())
+ assert_equal(b[1].shape, ())
+
+ def test_assignment_by_condition(self):
+ # Test for gh-18951
+ a = array([1, 2, 3, 4], mask=[1, 0, 1, 0])
+ c = a >= 3
+ a[c] = 5
+ assert_(a[2] is masked)
+
+ def test_assignment_by_condition_2(self):
+ # gh-19721
+ a = masked_array([0, 1], mask=[False, False])
+ b = masked_array([0, 1], mask=[True, True])
+ mask = a < 1
+ b[mask] = a[mask]
+ expected_mask = [False, True]
+ assert_equal(b.mask, expected_mask)
+
+
+class TestUfuncs:
+ def setup_method(self):
+ self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
+ array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
+
+ def test_testUfuncRegression(self):
+ f_invalid_ignore = [
+ 'sqrt', 'arctanh', 'arcsin', 'arccos',
+ 'arccosh', 'arctanh', 'log', 'log10', 'divide',
+ 'true_divide', 'floor_divide', 'remainder', 'fmod']
+ for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
+ 'sin', 'cos', 'tan',
+ 'arcsin', 'arccos', 'arctan',
+ 'sinh', 'cosh', 'tanh',
+ 'arcsinh',
+ 'arccosh',
+ 'arctanh',
+ 'absolute', 'fabs', 'negative',
+ 'floor', 'ceil',
+ 'logical_not',
+ 'add', 'subtract', 'multiply',
+ 'divide', 'true_divide', 'floor_divide',
+ 'remainder', 'fmod', 'hypot', 'arctan2',
+ 'equal', 'not_equal', 'less_equal', 'greater_equal',
+ 'less', 'greater',
+ 'logical_and', 'logical_or', 'logical_xor']:
+ try:
+ uf = getattr(umath, f)
+ except AttributeError:
+ uf = getattr(fromnumeric, f)
+ mf = getattr(np.ma, f)
+ args = self.d[:uf.nin]
+ with np.errstate():
+ if f in f_invalid_ignore:
+ np.seterr(invalid='ignore')
+ if f in ['arctanh', 'log', 'log10']:
+ np.seterr(divide='ignore')
+ ur = uf(*args)
+ mr = mf(*args)
+ assert_(eq(ur.filled(0), mr.filled(0), f))
+ assert_(eqmask(ur.mask, mr.mask))
+
+ def test_reduce(self):
+ a = self.d[0]
+ assert_(not alltrue(a, axis=0))
+ assert_(sometrue(a, axis=0))
+ assert_equal(sum(a[:3], axis=0), 0)
+ assert_equal(product(a, axis=0), 0)
+
+ def test_minmax(self):
+ a = arange(1, 13).reshape(3, 4)
+ amask = masked_where(a < 5, a)
+ assert_equal(amask.max(), a.max())
+ assert_equal(amask.min(), 5)
+ assert_((amask.max(0) == a.max(0)).all())
+ assert_((amask.min(0) == [5, 6, 7, 8]).all())
+ assert_(amask.max(1)[0].mask)
+ assert_(amask.min(1)[0].mask)
+
+ def test_nonzero(self):
+ for t in "?bhilqpBHILQPfdgFDGO":
+ x = array([1, 0, 2, 0], mask=[0, 0, 1, 1])
+ assert_(eq(nonzero(x), [0]))
+
+
+class TestArrayMethods:
+
+ def setup_method(self):
+ x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
+ 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
+ 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
+ 6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
+ 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
+ 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
+ X = x.reshape(6, 6)
+ XX = x.reshape(3, 2, 2, 3)
+
+ m = np.array([0, 1, 0, 1, 0, 0,
+ 1, 0, 1, 1, 0, 1,
+ 0, 0, 0, 1, 0, 1,
+ 0, 0, 0, 1, 1, 1,
+ 1, 0, 0, 1, 0, 0,
+ 0, 0, 1, 0, 1, 0])
+ mx = array(data=x, mask=m)
+ mX = array(data=X, mask=m.reshape(X.shape))
+ mXX = array(data=XX, mask=m.reshape(XX.shape))
+
+ self.d = (x, X, XX, m, mx, mX, mXX)
+
+ def test_trace(self):
+ (x, X, XX, m, mx, mX, mXX,) = self.d
+ mXdiag = mX.diagonal()
+ assert_equal(mX.trace(), mX.diagonal().compressed().sum())
+ assert_(eq(mX.trace(),
+ X.trace() - sum(mXdiag.mask * X.diagonal(),
+ axis=0)))
+
+ def test_clip(self):
+ (x, X, XX, m, mx, mX, mXX,) = self.d
+ clipped = mx.clip(2, 8)
+ assert_(eq(clipped.mask, mx.mask))
+ assert_(eq(clipped._data, x.clip(2, 8)))
+ assert_(eq(clipped._data, mx._data.clip(2, 8)))
+
+ def test_ptp(self):
+ (x, X, XX, m, mx, mX, mXX,) = self.d
+ (n, m) = X.shape
+ assert_equal(mx.ptp(), mx.compressed().ptp())
+ rows = np.zeros(n, np.float_)
+ cols = np.zeros(m, np.float_)
+ for k in range(m):
+ cols[k] = mX[:, k].compressed().ptp()
+ for k in range(n):
+ rows[k] = mX[k].compressed().ptp()
+ assert_(eq(mX.ptp(0), cols))
+ assert_(eq(mX.ptp(1), rows))
+
+ def test_swapaxes(self):
+ (x, X, XX, m, mx, mX, mXX,) = self.d
+ mXswapped = mX.swapaxes(0, 1)
+ assert_(eq(mXswapped[-1], mX[:, -1]))
+ mXXswapped = mXX.swapaxes(0, 2)
+ assert_equal(mXXswapped.shape, (2, 2, 3, 3))
+
+ def test_cumprod(self):
+ (x, X, XX, m, mx, mX, mXX,) = self.d
+ mXcp = mX.cumprod(0)
+ assert_(eq(mXcp._data, mX.filled(1).cumprod(0)))
+ mXcp = mX.cumprod(1)
+ assert_(eq(mXcp._data, mX.filled(1).cumprod(1)))
+
+ def test_cumsum(self):
+ (x, X, XX, m, mx, mX, mXX,) = self.d
+ mXcp = mX.cumsum(0)
+ assert_(eq(mXcp._data, mX.filled(0).cumsum(0)))
+ mXcp = mX.cumsum(1)
+ assert_(eq(mXcp._data, mX.filled(0).cumsum(1)))
+
+ def test_varstd(self):
+ (x, X, XX, m, mx, mX, mXX,) = self.d
+ assert_(eq(mX.var(axis=None), mX.compressed().var()))
+ assert_(eq(mX.std(axis=None), mX.compressed().std()))
+ assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape))
+ assert_(eq(mX.var().shape, X.var().shape))
+ (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
+ for k in range(6):
+ assert_(eq(mXvar1[k], mX[k].compressed().var()))
+ assert_(eq(mXvar0[k], mX[:, k].compressed().var()))
+ assert_(eq(np.sqrt(mXvar0[k]),
+ mX[:, k].compressed().std()))
+
+
+def eqmask(m1, m2):
+ if m1 is nomask:
+ return m2 is nomask
+ if m2 is nomask:
+ return m1 is nomask
+ return (m1 == m2).all()
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/tests/test_regression.py b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_regression.py
new file mode 100644
index 00000000..cb3d0349
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_regression.py
@@ -0,0 +1,91 @@
+import numpy as np
+from numpy.testing import (
+ assert_, assert_array_equal, assert_allclose, suppress_warnings
+ )
+
+
+class TestRegression:
+ def test_masked_array_create(self):
+ # Ticket #17
+ x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6],
+ mask=[0, 0, 0, 1, 1, 1, 0, 0])
+ assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]])
+
+ def test_masked_array(self):
+ # Ticket #61
+ np.ma.array(1, mask=[1])
+
+ def test_mem_masked_where(self):
+ # Ticket #62
+ from numpy.ma import masked_where, MaskType
+ a = np.zeros((1, 1))
+ b = np.zeros(a.shape, MaskType)
+ c = masked_where(b, a)
+ a-c
+
+ def test_masked_array_multiply(self):
+ # Ticket #254
+ a = np.ma.zeros((4, 1))
+ a[2, 0] = np.ma.masked
+ b = np.zeros((4, 2))
+ a*b
+ b*a
+
+ def test_masked_array_repeat(self):
+ # Ticket #271
+ np.ma.array([1], mask=False).repeat(10)
+
+ def test_masked_array_repr_unicode(self):
+ # Ticket #1256
+ repr(np.ma.array("Unicode"))
+
+ def test_atleast_2d(self):
+ # Ticket #1559
+ a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False])
+ b = np.atleast_2d(a)
+ assert_(a.mask.ndim == 1)
+ assert_(b.mask.ndim == 2)
+
+ def test_set_fill_value_unicode_py3(self):
+ # Ticket #2733
+ a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0])
+ a.fill_value = 'X'
+ assert_(a.fill_value == 'X')
+
+ def test_var_sets_maskedarray_scalar(self):
+ # Issue gh-2757
+ a = np.ma.array(np.arange(5), mask=True)
+ mout = np.ma.array(-1, dtype=float)
+ a.var(out=mout)
+ assert_(mout._data == 0)
+
+ def test_ddof_corrcoef(self):
+ # See gh-3336
+ x = np.ma.masked_equal([1, 2, 3, 4, 5], 4)
+ y = np.array([2, 2.5, 3.1, 3, 5])
+ # this test can be removed after deprecation.
+ with suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, "bias and ddof have no effect")
+ r0 = np.ma.corrcoef(x, y, ddof=0)
+ r1 = np.ma.corrcoef(x, y, ddof=1)
+ # ddof should not have an effect (it gets cancelled out)
+ assert_allclose(r0.data, r1.data)
+
+ def test_mask_not_backmangled(self):
+ # See gh-10314. Test case taken from gh-3140.
+ a = np.ma.MaskedArray([1., 2.], mask=[False, False])
+ assert_(a.mask.shape == (2,))
+ b = np.tile(a, (2, 1))
+ # Check that the above no longer changes a.shape to (1, 2)
+ assert_(a.mask.shape == (2,))
+ assert_(b.shape == (2, 2))
+ assert_(b.mask.shape == (2, 2))
+
+ def test_empty_list_on_structured(self):
+ # See gh-12464. Indexing with empty list should give empty result.
+ ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4')
+ assert_array_equal(ma[[]], ma[:0])
+
+ def test_masked_array_tobytes_fortran(self):
+ ma = np.ma.arange(4).reshape((2,2))
+ assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes())
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/tests/test_subclassing.py b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_subclassing.py
new file mode 100644
index 00000000..64c66eeb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/tests/test_subclassing.py
@@ -0,0 +1,450 @@
+# pylint: disable-msg=W0611, W0612, W0511,R0201
+"""Tests suite for MaskedArray & subclassing.
+
+:author: Pierre Gerard-Marchant
+:contact: pierregm_at_uga_dot_edu
+:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $
+
+"""
+import numpy as np
+from numpy.lib.mixins import NDArrayOperatorsMixin
+from numpy.testing import assert_, assert_raises
+from numpy.ma.testutils import assert_equal
+from numpy.ma.core import (
+ array, arange, masked, MaskedArray, masked_array, log, add, hypot,
+ divide, asarray, asanyarray, nomask
+ )
+# from numpy.ma.core import (
+
+def assert_startswith(a, b):
+ # produces a better error message than assert_(a.startswith(b))
+ assert_equal(a[:len(b)], b)
+
+class SubArray(np.ndarray):
+ # Defines a generic np.ndarray subclass, that stores some metadata
+ # in the dictionary `info`.
+ def __new__(cls,arr,info={}):
+ x = np.asanyarray(arr).view(cls)
+ x.info = info.copy()
+ return x
+
+ def __array_finalize__(self, obj):
+ super().__array_finalize__(obj)
+ self.info = getattr(obj, 'info', {}).copy()
+ return
+
+ def __add__(self, other):
+ result = super().__add__(other)
+ result.info['added'] = result.info.get('added', 0) + 1
+ return result
+
+ def __iadd__(self, other):
+ result = super().__iadd__(other)
+ result.info['iadded'] = result.info.get('iadded', 0) + 1
+ return result
+
+
+subarray = SubArray
+
+
+class SubMaskedArray(MaskedArray):
+ """Pure subclass of MaskedArray, keeping some info on subclass."""
+ def __new__(cls, info=None, **kwargs):
+ obj = super().__new__(cls, **kwargs)
+ obj._optinfo['info'] = info
+ return obj
+
+
+class MSubArray(SubArray, MaskedArray):
+
+ def __new__(cls, data, info={}, mask=nomask):
+ subarr = SubArray(data, info)
+ _data = MaskedArray.__new__(cls, data=subarr, mask=mask)
+ _data.info = subarr.info
+ return _data
+
+ @property
+ def _series(self):
+ _view = self.view(MaskedArray)
+ _view._sharedmask = False
+ return _view
+
+msubarray = MSubArray
+
+
+# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing
+# setting to non-class values (and thus np.ma.core.masked_print_option)
+# and overrides __array_wrap__, updating the info dict, to check that this
+# doesn't get destroyed by MaskedArray._update_from. But this one also needs
+# its own iterator...
+class CSAIterator:
+ """
+ Flat iterator object that uses its own setter/getter
+ (works around ndarray.flat not propagating subclass setters/getters
+ see https://github.com/numpy/numpy/issues/4564)
+ roughly following MaskedIterator
+ """
+ def __init__(self, a):
+ self._original = a
+ self._dataiter = a.view(np.ndarray).flat
+
+ def __iter__(self):
+ return self
+
+ def __getitem__(self, indx):
+ out = self._dataiter.__getitem__(indx)
+ if not isinstance(out, np.ndarray):
+ out = out.__array__()
+ out = out.view(type(self._original))
+ return out
+
+ def __setitem__(self, index, value):
+ self._dataiter[index] = self._original._validate_input(value)
+
+ def __next__(self):
+ return next(self._dataiter).__array__().view(type(self._original))
+
+
+class ComplicatedSubArray(SubArray):
+
+ def __str__(self):
+ return f'myprefix {self.view(SubArray)} mypostfix'
+
+ def __repr__(self):
+ # Return a repr that does not start with 'name('
+ return f'<{self.__class__.__name__} {self}>'
+
+ def _validate_input(self, value):
+ if not isinstance(value, ComplicatedSubArray):
+ raise ValueError("Can only set to MySubArray values")
+ return value
+
+ def __setitem__(self, item, value):
+ # validation ensures direct assignment with ndarray or
+ # masked_print_option will fail
+ super().__setitem__(item, self._validate_input(value))
+
+ def __getitem__(self, item):
+ # ensure getter returns our own class also for scalars
+ value = super().__getitem__(item)
+ if not isinstance(value, np.ndarray): # scalar
+ value = value.__array__().view(ComplicatedSubArray)
+ return value
+
+ @property
+ def flat(self):
+ return CSAIterator(self)
+
+ @flat.setter
+ def flat(self, value):
+ y = self.ravel()
+ y[:] = value
+
+ def __array_wrap__(self, obj, context=None):
+ obj = super().__array_wrap__(obj, context)
+ if context is not None and context[0] is np.multiply:
+ obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1
+
+ return obj
+
+
+class WrappedArray(NDArrayOperatorsMixin):
+ """
+ Wrapping a MaskedArray rather than subclassing to test that
+ ufunc deferrals are commutative.
+ See: https://github.com/numpy/numpy/issues/15200)
+ """
+ __array_priority__ = 20
+
+ def __init__(self, array, **attrs):
+ self._array = array
+ self.attrs = attrs
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}(\n{self._array}\n{self.attrs}\n)"
+
+ def __array__(self):
+ return np.asarray(self._array)
+
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ if method == '__call__':
+ inputs = [arg._array if isinstance(arg, self.__class__) else arg
+ for arg in inputs]
+ return self.__class__(ufunc(*inputs, **kwargs), **self.attrs)
+ else:
+ return NotImplemented
+
+
+class TestSubclassing:
+ # Test suite for masked subclasses of ndarray.
+
+ def setup_method(self):
+ x = np.arange(5, dtype='float')
+ mx = msubarray(x, mask=[0, 1, 0, 0, 0])
+ self.data = (x, mx)
+
+ def test_data_subclassing(self):
+ # Tests whether the subclass is kept.
+ x = np.arange(5)
+ m = [0, 0, 1, 0, 0]
+ xsub = SubArray(x)
+ xmsub = masked_array(xsub, mask=m)
+ assert_(isinstance(xmsub, MaskedArray))
+ assert_equal(xmsub._data, xsub)
+ assert_(isinstance(xmsub._data, SubArray))
+
+ def test_maskedarray_subclassing(self):
+ # Tests subclassing MaskedArray
+ (x, mx) = self.data
+ assert_(isinstance(mx._data, subarray))
+
+ def test_masked_unary_operations(self):
+ # Tests masked_unary_operation
+ (x, mx) = self.data
+ with np.errstate(divide='ignore'):
+ assert_(isinstance(log(mx), msubarray))
+ assert_equal(log(x), np.log(x))
+
+ def test_masked_binary_operations(self):
+ # Tests masked_binary_operation
+ (x, mx) = self.data
+ # Result should be a msubarray
+ assert_(isinstance(add(mx, mx), msubarray))
+ assert_(isinstance(add(mx, x), msubarray))
+ # Result should work
+ assert_equal(add(mx, x), mx+x)
+ assert_(isinstance(add(mx, mx)._data, subarray))
+ assert_(isinstance(add.outer(mx, mx), msubarray))
+ assert_(isinstance(hypot(mx, mx), msubarray))
+ assert_(isinstance(hypot(mx, x), msubarray))
+
+ def test_masked_binary_operations2(self):
+ # Tests domained_masked_binary_operation
+ (x, mx) = self.data
+ xmx = masked_array(mx.data.__array__(), mask=mx.mask)
+ assert_(isinstance(divide(mx, mx), msubarray))
+ assert_(isinstance(divide(mx, x), msubarray))
+ assert_equal(divide(mx, mx), divide(xmx, xmx))
+
+ def test_attributepropagation(self):
+ x = array(arange(5), mask=[0]+[1]*4)
+ my = masked_array(subarray(x))
+ ym = msubarray(x)
+ #
+ z = (my+1)
+ assert_(isinstance(z, MaskedArray))
+ assert_(not isinstance(z, MSubArray))
+ assert_(isinstance(z._data, SubArray))
+ assert_equal(z._data.info, {})
+ #
+ z = (ym+1)
+ assert_(isinstance(z, MaskedArray))
+ assert_(isinstance(z, MSubArray))
+ assert_(isinstance(z._data, SubArray))
+ assert_(z._data.info['added'] > 0)
+ # Test that inplace methods from data get used (gh-4617)
+ ym += 1
+ assert_(isinstance(ym, MaskedArray))
+ assert_(isinstance(ym, MSubArray))
+ assert_(isinstance(ym._data, SubArray))
+ assert_(ym._data.info['iadded'] > 0)
+ #
+ ym._set_mask([1, 0, 0, 0, 1])
+ assert_equal(ym._mask, [1, 0, 0, 0, 1])
+ ym._series._set_mask([0, 0, 0, 0, 1])
+ assert_equal(ym._mask, [0, 0, 0, 0, 1])
+ #
+ xsub = subarray(x, info={'name':'x'})
+ mxsub = masked_array(xsub)
+ assert_(hasattr(mxsub, 'info'))
+ assert_equal(mxsub.info, xsub.info)
+
+ def test_subclasspreservation(self):
+ # Checks that masked_array(...,subok=True) preserves the class.
+ x = np.arange(5)
+ m = [0, 0, 1, 0, 0]
+ xinfo = [(i, j) for (i, j) in zip(x, m)]
+ xsub = MSubArray(x, mask=m, info={'xsub':xinfo})
+ #
+ mxsub = masked_array(xsub, subok=False)
+ assert_(not isinstance(mxsub, MSubArray))
+ assert_(isinstance(mxsub, MaskedArray))
+ assert_equal(mxsub._mask, m)
+ #
+ mxsub = asarray(xsub)
+ assert_(not isinstance(mxsub, MSubArray))
+ assert_(isinstance(mxsub, MaskedArray))
+ assert_equal(mxsub._mask, m)
+ #
+ mxsub = masked_array(xsub, subok=True)
+ assert_(isinstance(mxsub, MSubArray))
+ assert_equal(mxsub.info, xsub.info)
+ assert_equal(mxsub._mask, xsub._mask)
+ #
+ mxsub = asanyarray(xsub)
+ assert_(isinstance(mxsub, MSubArray))
+ assert_equal(mxsub.info, xsub.info)
+ assert_equal(mxsub._mask, m)
+
+ def test_subclass_items(self):
+ """test that getter and setter go via baseclass"""
+ x = np.arange(5)
+ xcsub = ComplicatedSubArray(x)
+ mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
+ # getter should return a ComplicatedSubArray, even for single item
+ # first check we wrote ComplicatedSubArray correctly
+ assert_(isinstance(xcsub[1], ComplicatedSubArray))
+ assert_(isinstance(xcsub[1,...], ComplicatedSubArray))
+ assert_(isinstance(xcsub[1:4], ComplicatedSubArray))
+
+ # now that it propagates inside the MaskedArray
+ assert_(isinstance(mxcsub[1], ComplicatedSubArray))
+ assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray))
+ assert_(mxcsub[0] is masked)
+ assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray))
+ assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray))
+
+ # also for flattened version (which goes via MaskedIterator)
+ assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray))
+ assert_(mxcsub.flat[0] is masked)
+ assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray))
+
+ # setter should only work with ComplicatedSubArray input
+ # first check we wrote ComplicatedSubArray correctly
+ assert_raises(ValueError, xcsub.__setitem__, 1, x[4])
+ # now that it propagates inside the MaskedArray
+ assert_raises(ValueError, mxcsub.__setitem__, 1, x[4])
+ assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4])
+ mxcsub[1] = xcsub[4]
+ mxcsub[1:4] = xcsub[1:4]
+ # also for flattened version (which goes via MaskedIterator)
+ assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4])
+ assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4])
+ mxcsub.flat[1] = xcsub[4]
+ mxcsub.flat[1:4] = xcsub[1:4]
+
+ def test_subclass_nomask_items(self):
+ x = np.arange(5)
+ xcsub = ComplicatedSubArray(x)
+ mxcsub_nomask = masked_array(xcsub)
+
+ assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray))
+ assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray))
+
+ assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray))
+ assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray))
+
+ def test_subclass_repr(self):
+ """test that repr uses the name of the subclass
+ and 'array' for np.ndarray"""
+ x = np.arange(5)
+ mx = masked_array(x, mask=[True, False, True, False, False])
+ assert_startswith(repr(mx), 'masked_array')
+ xsub = SubArray(x)
+ mxsub = masked_array(xsub, mask=[True, False, True, False, False])
+ assert_startswith(repr(mxsub),
+ f'masked_{SubArray.__name__}(data=[--, 1, --, 3, 4]')
+
+ def test_subclass_str(self):
+ """test str with subclass that has overridden str, setitem"""
+ # first without override
+ x = np.arange(5)
+ xsub = SubArray(x)
+ mxsub = masked_array(xsub, mask=[True, False, True, False, False])
+ assert_equal(str(mxsub), '[-- 1 -- 3 4]')
+
+ xcsub = ComplicatedSubArray(x)
+ assert_raises(ValueError, xcsub.__setitem__, 0,
+ np.ma.core.masked_print_option)
+ mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
+ assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix')
+
+ def test_pure_subclass_info_preservation(self):
+ # Test that ufuncs and methods conserve extra information consistently;
+ # see gh-7122.
+ arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6])
+ arr2 = SubMaskedArray(data=[0,1,2,3,4,5])
+ diff1 = np.subtract(arr1, arr2)
+ assert_('info' in diff1._optinfo)
+ assert_(diff1._optinfo['info'] == 'test')
+ diff2 = arr1 - arr2
+ assert_('info' in diff2._optinfo)
+ assert_(diff2._optinfo['info'] == 'test')
+
+
+class ArrayNoInheritance:
+ """Quantity-like class that does not inherit from ndarray"""
+ def __init__(self, data, units):
+ self.magnitude = data
+ self.units = units
+
+ def __getattr__(self, attr):
+ return getattr(self.magnitude, attr)
+
+
+def test_array_no_inheritance():
+ data_masked = np.ma.array([1, 2, 3], mask=[True, False, True])
+ data_masked_units = ArrayNoInheritance(data_masked, 'meters')
+
+ # Get the masked representation of the Quantity-like class
+ new_array = np.ma.array(data_masked_units)
+ assert_equal(data_masked.data, new_array.data)
+ assert_equal(data_masked.mask, new_array.mask)
+ # Test sharing the mask
+ data_masked.mask = [True, False, False]
+ assert_equal(data_masked.mask, new_array.mask)
+ assert_(new_array.sharedmask)
+
+ # Get the masked representation of the Quantity-like class
+ new_array = np.ma.array(data_masked_units, copy=True)
+ assert_equal(data_masked.data, new_array.data)
+ assert_equal(data_masked.mask, new_array.mask)
+ # Test that the mask is not shared when copy=True
+ data_masked.mask = [True, False, True]
+ assert_equal([True, False, False], new_array.mask)
+ assert_(not new_array.sharedmask)
+
+ # Get the masked representation of the Quantity-like class
+ new_array = np.ma.array(data_masked_units, keep_mask=False)
+ assert_equal(data_masked.data, new_array.data)
+ # The change did not affect the original mask
+ assert_equal(data_masked.mask, [True, False, True])
+ # Test that the mask is False and not shared when keep_mask=False
+ assert_(not new_array.mask)
+ assert_(not new_array.sharedmask)
+
+
+class TestClassWrapping:
+ # Test suite for classes that wrap MaskedArrays
+
+ def setup_method(self):
+ m = np.ma.masked_array([1, 3, 5], mask=[False, True, False])
+ wm = WrappedArray(m)
+ self.data = (m, wm)
+
+ def test_masked_unary_operations(self):
+ # Tests masked_unary_operation
+ (m, wm) = self.data
+ with np.errstate(divide='ignore'):
+ assert_(isinstance(np.log(wm), WrappedArray))
+
+ def test_masked_binary_operations(self):
+ # Tests masked_binary_operation
+ (m, wm) = self.data
+ # Result should be a WrappedArray
+ assert_(isinstance(np.add(wm, wm), WrappedArray))
+ assert_(isinstance(np.add(m, wm), WrappedArray))
+ assert_(isinstance(np.add(wm, m), WrappedArray))
+ # add and '+' should call the same ufunc
+ assert_equal(np.add(m, wm), m + wm)
+ assert_(isinstance(np.hypot(m, wm), WrappedArray))
+ assert_(isinstance(np.hypot(wm, m), WrappedArray))
+ # Test domained binary operations
+ assert_(isinstance(np.divide(wm, m), WrappedArray))
+ assert_(isinstance(np.divide(m, wm), WrappedArray))
+ assert_equal(np.divide(wm, m) * m, np.divide(m, m) * wm)
+ # Test broadcasting
+ m2 = np.stack([m, m])
+ assert_(isinstance(np.divide(wm, m2), WrappedArray))
+ assert_(isinstance(np.divide(m2, wm), WrappedArray))
+ assert_equal(np.divide(m2, wm), np.divide(wm, m2))
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/testutils.py b/venv/lib/python3.9/site-packages/numpy/ma/testutils.py
new file mode 100644
index 00000000..2dd479ab
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/testutils.py
@@ -0,0 +1,288 @@
+"""Miscellaneous functions for testing masked arrays and subclasses
+
+:author: Pierre Gerard-Marchant
+:contact: pierregm_at_uga_dot_edu
+:version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $
+
+"""
+import operator
+
+import numpy as np
+from numpy import ndarray, float_
+import numpy.core.umath as umath
+import numpy.testing
+from numpy.testing import (
+ assert_, assert_allclose, assert_array_almost_equal_nulp,
+ assert_raises, build_err_msg
+ )
+from .core import mask_or, getmask, masked_array, nomask, masked, filled
+
+__all__masked = [
+ 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal',
+ 'assert_array_approx_equal', 'assert_array_compare',
+ 'assert_array_equal', 'assert_array_less', 'assert_close',
+ 'assert_equal', 'assert_equal_records', 'assert_mask_equal',
+ 'assert_not_equal', 'fail_if_array_equal',
+ ]
+
+# Include some normal test functions to avoid breaking other projects who
+# have mistakenly included them from this file. SciPy is one. That is
+# unfortunate, as some of these functions are not intended to work with
+# masked arrays. But there was no way to tell before.
+from unittest import TestCase
+__some__from_testing = [
+ 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp',
+ 'assert_raises'
+ ]
+
+__all__ = __all__masked + __some__from_testing
+
+
+def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
+ """
+ Returns true if all components of a and b are equal to given tolerances.
+
+ If fill_value is True, masked values considered equal. Otherwise,
+ masked values are considered unequal. The relative error rtol should
+ be positive and << 1.0 The absolute error atol comes into play for
+ those elements of b that are very small or zero; it says how small a
+ must be also.
+
+ """
+ m = mask_or(getmask(a), getmask(b))
+ d1 = filled(a)
+ d2 = filled(b)
+ if d1.dtype.char == "O" or d2.dtype.char == "O":
+ return np.equal(d1, d2).ravel()
+ x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
+ y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
+ d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
+ return d.ravel()
+
+
+def almost(a, b, decimal=6, fill_value=True):
+ """
+ Returns True if a and b are equal up to decimal places.
+
+ If fill_value is True, masked values considered equal. Otherwise,
+ masked values are considered unequal.
+
+ """
+ m = mask_or(getmask(a), getmask(b))
+ d1 = filled(a)
+ d2 = filled(b)
+ if d1.dtype.char == "O" or d2.dtype.char == "O":
+ return np.equal(d1, d2).ravel()
+ x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
+ y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
+ d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal)
+ return d.ravel()
+
+
+def _assert_equal_on_sequences(actual, desired, err_msg=''):
+ """
+ Asserts the equality of two non-array sequences.
+
+ """
+ assert_equal(len(actual), len(desired), err_msg)
+ for k in range(len(desired)):
+ assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}')
+ return
+
+
+def assert_equal_records(a, b):
+ """
+ Asserts that two records are equal.
+
+ Pretty crude for now.
+
+ """
+ assert_equal(a.dtype, b.dtype)
+ for f in a.dtype.names:
+ (af, bf) = (operator.getitem(a, f), operator.getitem(b, f))
+ if not (af is masked) and not (bf is masked):
+ assert_equal(operator.getitem(a, f), operator.getitem(b, f))
+ return
+
+
+def assert_equal(actual, desired, err_msg=''):
+ """
+ Asserts that two items are equal.
+
+ """
+ # Case #1: dictionary .....
+ if isinstance(desired, dict):
+ if not isinstance(actual, dict):
+ raise AssertionError(repr(type(actual)))
+ assert_equal(len(actual), len(desired), err_msg)
+ for k, i in desired.items():
+ if k not in actual:
+ raise AssertionError(f"{k} not in {actual}")
+ assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}')
+ return
+ # Case #2: lists .....
+ if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
+ return _assert_equal_on_sequences(actual, desired, err_msg='')
+ if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)):
+ msg = build_err_msg([actual, desired], err_msg,)
+ if not desired == actual:
+ raise AssertionError(msg)
+ return
+ # Case #4. arrays or equivalent
+ if ((actual is masked) and not (desired is masked)) or \
+ ((desired is masked) and not (actual is masked)):
+ msg = build_err_msg([actual, desired],
+ err_msg, header='', names=('x', 'y'))
+ raise ValueError(msg)
+ actual = np.asanyarray(actual)
+ desired = np.asanyarray(desired)
+ (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype)
+ if actual_dtype.char == "S" and desired_dtype.char == "S":
+ return _assert_equal_on_sequences(actual.tolist(),
+ desired.tolist(),
+ err_msg='')
+ return assert_array_equal(actual, desired, err_msg)
+
+
+def fail_if_equal(actual, desired, err_msg='',):
+ """
+ Raises an assertion error if two items are equal.
+
+ """
+ if isinstance(desired, dict):
+ if not isinstance(actual, dict):
+ raise AssertionError(repr(type(actual)))
+ fail_if_equal(len(actual), len(desired), err_msg)
+ for k, i in desired.items():
+ if k not in actual:
+ raise AssertionError(repr(k))
+ fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}')
+ return
+ if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
+ fail_if_equal(len(actual), len(desired), err_msg)
+ for k in range(len(desired)):
+ fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}')
+ return
+ if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray):
+ return fail_if_array_equal(actual, desired, err_msg)
+ msg = build_err_msg([actual, desired], err_msg)
+ if not desired != actual:
+ raise AssertionError(msg)
+
+
+assert_not_equal = fail_if_equal
+
+
+def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True):
+ """
+ Asserts that two items are almost equal.
+
+ The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal).
+
+ """
+ if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray):
+ return assert_array_almost_equal(actual, desired, decimal=decimal,
+ err_msg=err_msg, verbose=verbose)
+ msg = build_err_msg([actual, desired],
+ err_msg=err_msg, verbose=verbose)
+ if not round(abs(desired - actual), decimal) == 0:
+ raise AssertionError(msg)
+
+
+assert_close = assert_almost_equal
+
+
+def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
+ fill_value=True):
+ """
+ Asserts that comparison between two masked arrays is satisfied.
+
+ The comparison is elementwise.
+
+ """
+ # Allocate a common mask and refill
+ m = mask_or(getmask(x), getmask(y))
+ x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False)
+ y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False)
+ if ((x is masked) and not (y is masked)) or \
+ ((y is masked) and not (x is masked)):
+ msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose,
+ header=header, names=('x', 'y'))
+ raise ValueError(msg)
+ # OK, now run the basic tests on filled versions
+ return np.testing.assert_array_compare(comparison,
+ x.filled(fill_value),
+ y.filled(fill_value),
+ err_msg=err_msg,
+ verbose=verbose, header=header)
+
+
+def assert_array_equal(x, y, err_msg='', verbose=True):
+ """
+ Checks the elementwise equality of two masked arrays.
+
+ """
+ assert_array_compare(operator.__eq__, x, y,
+ err_msg=err_msg, verbose=verbose,
+ header='Arrays are not equal')
+
+
+def fail_if_array_equal(x, y, err_msg='', verbose=True):
+ """
+ Raises an assertion error if two masked arrays are not equal elementwise.
+
+ """
+ def compare(x, y):
+ return (not np.alltrue(approx(x, y)))
+ assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
+ header='Arrays are not equal')
+
+
+def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True):
+ """
+ Checks the equality of two masked arrays, up to given number odecimals.
+
+ The equality is checked elementwise.
+
+ """
+ def compare(x, y):
+ "Returns the result of the loose comparison between x and y)."
+ return approx(x, y, rtol=10. ** -decimal)
+ assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
+ header='Arrays are not almost equal')
+
+
+def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
+ """
+ Checks the equality of two masked arrays, up to given number odecimals.
+
+ The equality is checked elementwise.
+
+ """
+ def compare(x, y):
+ "Returns the result of the loose comparison between x and y)."
+ return almost(x, y, decimal)
+ assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
+ header='Arrays are not almost equal')
+
+
+def assert_array_less(x, y, err_msg='', verbose=True):
+ """
+ Checks that x is smaller than y elementwise.
+
+ """
+ assert_array_compare(operator.__lt__, x, y,
+ err_msg=err_msg, verbose=verbose,
+ header='Arrays are not less-ordered')
+
+
+def assert_mask_equal(m1, m2, err_msg=''):
+ """
+ Asserts the equality of two masks.
+
+ """
+ if m1 is nomask:
+ assert_(m2 is nomask)
+ if m2 is nomask:
+ assert_(m1 is nomask)
+ assert_array_equal(m1, m2, err_msg=err_msg)
diff --git a/venv/lib/python3.9/site-packages/numpy/ma/timer_comparison.py b/venv/lib/python3.9/site-packages/numpy/ma/timer_comparison.py
new file mode 100644
index 00000000..9eb1a23c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/ma/timer_comparison.py
@@ -0,0 +1,443 @@
+import timeit
+from functools import reduce
+
+import numpy as np
+from numpy import float_
+import numpy.core.fromnumeric as fromnumeric
+
+from numpy.testing import build_err_msg
+
+
+pi = np.pi
+
+class ModuleTester:
+ def __init__(self, module):
+ self.module = module
+ self.allequal = module.allequal
+ self.arange = module.arange
+ self.array = module.array
+ self.concatenate = module.concatenate
+ self.count = module.count
+ self.equal = module.equal
+ self.filled = module.filled
+ self.getmask = module.getmask
+ self.getmaskarray = module.getmaskarray
+ self.id = id
+ self.inner = module.inner
+ self.make_mask = module.make_mask
+ self.masked = module.masked
+ self.masked_array = module.masked_array
+ self.masked_values = module.masked_values
+ self.mask_or = module.mask_or
+ self.nomask = module.nomask
+ self.ones = module.ones
+ self.outer = module.outer
+ self.repeat = module.repeat
+ self.resize = module.resize
+ self.sort = module.sort
+ self.take = module.take
+ self.transpose = module.transpose
+ self.zeros = module.zeros
+ self.MaskType = module.MaskType
+ try:
+ self.umath = module.umath
+ except AttributeError:
+ self.umath = module.core.umath
+ self.testnames = []
+
+ def assert_array_compare(self, comparison, x, y, err_msg='', header='',
+ fill_value=True):
+ """
+ Assert that a comparison of two masked arrays is satisfied elementwise.
+
+ """
+ xf = self.filled(x)
+ yf = self.filled(y)
+ m = self.mask_or(self.getmask(x), self.getmask(y))
+
+ x = self.filled(self.masked_array(xf, mask=m), fill_value)
+ y = self.filled(self.masked_array(yf, mask=m), fill_value)
+ if (x.dtype.char != "O"):
+ x = x.astype(float_)
+ if isinstance(x, np.ndarray) and x.size > 1:
+ x[np.isnan(x)] = 0
+ elif np.isnan(x):
+ x = 0
+ if (y.dtype.char != "O"):
+ y = y.astype(float_)
+ if isinstance(y, np.ndarray) and y.size > 1:
+ y[np.isnan(y)] = 0
+ elif np.isnan(y):
+ y = 0
+ try:
+ cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
+ if not cond:
+ msg = build_err_msg([x, y],
+ err_msg
+ + f'\n(shapes {x.shape}, {y.shape} mismatch)',
+ header=header,
+ names=('x', 'y'))
+ assert cond, msg
+ val = comparison(x, y)
+ if m is not self.nomask and fill_value:
+ val = self.masked_array(val, mask=m)
+ if isinstance(val, bool):
+ cond = val
+ reduced = [0]
+ else:
+ reduced = val.ravel()
+ cond = reduced.all()
+ reduced = reduced.tolist()
+ if not cond:
+ match = 100-100.0*reduced.count(1)/len(reduced)
+ msg = build_err_msg([x, y],
+ err_msg
+ + '\n(mismatch %s%%)' % (match,),
+ header=header,
+ names=('x', 'y'))
+ assert cond, msg
+ except ValueError as e:
+ msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y'))
+ raise ValueError(msg) from e
+
+ def assert_array_equal(self, x, y, err_msg=''):
+ """
+ Checks the elementwise equality of two masked arrays.
+
+ """
+ self.assert_array_compare(self.equal, x, y, err_msg=err_msg,
+ header='Arrays are not equal')
+
+ @np.errstate(all='ignore')
+ def test_0(self):
+ """
+ Tests creation
+
+ """
+ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
+ m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
+ xm = self.masked_array(x, mask=m)
+ xm[0]
+
+ @np.errstate(all='ignore')
+ def test_1(self):
+ """
+ Tests creation
+
+ """
+ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
+ y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
+ m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
+ m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
+ xm = self.masked_array(x, mask=m1)
+ ym = self.masked_array(y, mask=m2)
+ xf = np.where(m1, 1.e+20, x)
+ xm.set_fill_value(1.e+20)
+
+ assert((xm-ym).filled(0).any())
+ s = x.shape
+ assert(xm.size == reduce(lambda x, y:x*y, s))
+ assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1))
+
+ for s in [(4, 3), (6, 2)]:
+ x.shape = s
+ y.shape = s
+ xm.shape = s
+ ym.shape = s
+ xf.shape = s
+ assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1))
+
+ @np.errstate(all='ignore')
+ def test_2(self):
+ """
+ Tests conversions and indexing.
+
+ """
+ x1 = np.array([1, 2, 4, 3])
+ x2 = self.array(x1, mask=[1, 0, 0, 0])
+ x3 = self.array(x1, mask=[0, 1, 0, 1])
+ x4 = self.array(x1)
+ # test conversion to strings, no errors
+ str(x2)
+ repr(x2)
+ # tests of indexing
+ assert type(x2[1]) is type(x1[1])
+ assert x1[1] == x2[1]
+ x1[2] = 9
+ x2[2] = 9
+ self.assert_array_equal(x1, x2)
+ x1[1:3] = 99
+ x2[1:3] = 99
+ x2[1] = self.masked
+ x2[1:3] = self.masked
+ x2[:] = x1
+ x2[1] = self.masked
+ x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0])
+ x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0])
+ x1 = np.arange(5)*1.0
+ x2 = self.masked_values(x1, 3.0)
+ x1 = self.array([1, 'hello', 2, 3], object)
+ x2 = np.array([1, 'hello', 2, 3], object)
+ # check that no error occurs.
+ x1[1]
+ x2[1]
+ assert x1[1:1].shape == (0,)
+ # Tests copy-size
+ n = [0, 0, 1, 0, 0]
+ m = self.make_mask(n)
+ m2 = self.make_mask(m)
+ assert(m is m2)
+ m3 = self.make_mask(m, copy=1)
+ assert(m is not m3)
+
+ @np.errstate(all='ignore')
+ def test_3(self):
+ """
+ Tests resize/repeat
+
+ """
+ x4 = self.arange(4)
+ x4[2] = self.masked
+ y4 = self.resize(x4, (8,))
+ assert self.allequal(self.concatenate([x4, x4]), y4)
+ assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
+ y5 = self.repeat(x4, (2, 2, 2, 2), axis=0)
+ self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
+ y6 = self.repeat(x4, 2, axis=0)
+ assert self.allequal(y5, y6)
+ y7 = x4.repeat((2, 2, 2, 2), axis=0)
+ assert self.allequal(y5, y7)
+ y8 = x4.repeat(2, 0)
+ assert self.allequal(y5, y8)
+
+ @np.errstate(all='ignore')
+ def test_4(self):
+ """
+ Test of take, transpose, inner, outer products.
+
+ """
+ x = self.arange(24)
+ y = np.arange(24)
+ x[5:6] = self.masked
+ x = x.reshape(2, 3, 4)
+ y = y.reshape(2, 3, 4)
+ assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1)))
+ assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1))
+ assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)),
+ self.inner(x, y))
+ assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)),
+ self.outer(x, y))
+ y = self.array(['abc', 1, 'def', 2, 3], object)
+ y[2] = self.masked
+ t = self.take(y, [0, 3, 4])
+ assert t[0] == 'abc'
+ assert t[1] == 2
+ assert t[2] == 3
+
+ @np.errstate(all='ignore')
+ def test_5(self):
+ """
+ Tests inplace w/ scalar
+
+ """
+ x = self.arange(10)
+ y = self.arange(10)
+ xm = self.arange(10)
+ xm[2] = self.masked
+ x += 1
+ assert self.allequal(x, y+1)
+ xm += 1
+ assert self.allequal(xm, y+1)
+
+ x = self.arange(10)
+ xm = self.arange(10)
+ xm[2] = self.masked
+ x -= 1
+ assert self.allequal(x, y-1)
+ xm -= 1
+ assert self.allequal(xm, y-1)
+
+ x = self.arange(10)*1.0
+ xm = self.arange(10)*1.0
+ xm[2] = self.masked
+ x *= 2.0
+ assert self.allequal(x, y*2)
+ xm *= 2.0
+ assert self.allequal(xm, y*2)
+
+ x = self.arange(10)*2
+ xm = self.arange(10)*2
+ xm[2] = self.masked
+ x /= 2
+ assert self.allequal(x, y)
+ xm /= 2
+ assert self.allequal(xm, y)
+
+ x = self.arange(10)*1.0
+ xm = self.arange(10)*1.0
+ xm[2] = self.masked
+ x /= 2.0
+ assert self.allequal(x, y/2.0)
+ xm /= self.arange(10)
+ self.assert_array_equal(xm, self.ones((10,)))
+
+ x = self.arange(10).astype(float_)
+ xm = self.arange(10)
+ xm[2] = self.masked
+ x += 1.
+ assert self.allequal(x, y + 1.)
+
+ @np.errstate(all='ignore')
+ def test_6(self):
+ """
+ Tests inplace w/ array
+
+ """
+ x = self.arange(10, dtype=float_)
+ y = self.arange(10)
+ xm = self.arange(10, dtype=float_)
+ xm[2] = self.masked
+ m = xm.mask
+ a = self.arange(10, dtype=float_)
+ a[-1] = self.masked
+ x += a
+ xm += a
+ assert self.allequal(x, y+a)
+ assert self.allequal(xm, y+a)
+ assert self.allequal(xm.mask, self.mask_or(m, a.mask))
+
+ x = self.arange(10, dtype=float_)
+ xm = self.arange(10, dtype=float_)
+ xm[2] = self.masked
+ m = xm.mask
+ a = self.arange(10, dtype=float_)
+ a[-1] = self.masked
+ x -= a
+ xm -= a
+ assert self.allequal(x, y-a)
+ assert self.allequal(xm, y-a)
+ assert self.allequal(xm.mask, self.mask_or(m, a.mask))
+
+ x = self.arange(10, dtype=float_)
+ xm = self.arange(10, dtype=float_)
+ xm[2] = self.masked
+ m = xm.mask
+ a = self.arange(10, dtype=float_)
+ a[-1] = self.masked
+ x *= a
+ xm *= a
+ assert self.allequal(x, y*a)
+ assert self.allequal(xm, y*a)
+ assert self.allequal(xm.mask, self.mask_or(m, a.mask))
+
+ x = self.arange(10, dtype=float_)
+ xm = self.arange(10, dtype=float_)
+ xm[2] = self.masked
+ m = xm.mask
+ a = self.arange(10, dtype=float_)
+ a[-1] = self.masked
+ x /= a
+ xm /= a
+
+ @np.errstate(all='ignore')
+ def test_7(self):
+ "Tests ufunc"
+ d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6),
+ self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),)
+ for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
+# 'sin', 'cos', 'tan',
+# 'arcsin', 'arccos', 'arctan',
+# 'sinh', 'cosh', 'tanh',
+# 'arcsinh',
+# 'arccosh',
+# 'arctanh',
+# 'absolute', 'fabs', 'negative',
+# # 'nonzero', 'around',
+# 'floor', 'ceil',
+# # 'sometrue', 'alltrue',
+# 'logical_not',
+# 'add', 'subtract', 'multiply',
+# 'divide', 'true_divide', 'floor_divide',
+# 'remainder', 'fmod', 'hypot', 'arctan2',
+# 'equal', 'not_equal', 'less_equal', 'greater_equal',
+# 'less', 'greater',
+# 'logical_and', 'logical_or', 'logical_xor',
+ ]:
+ try:
+ uf = getattr(self.umath, f)
+ except AttributeError:
+ uf = getattr(fromnumeric, f)
+ mf = getattr(self.module, f)
+ args = d[:uf.nin]
+ ur = uf(*args)
+ mr = mf(*args)
+ self.assert_array_equal(ur.filled(0), mr.filled(0), f)
+ self.assert_array_equal(ur._mask, mr._mask)
+
+ @np.errstate(all='ignore')
+ def test_99(self):
+ # test average
+ ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
+ self.assert_array_equal(2.0, self.average(ott, axis=0))
+ self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.]))
+ result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1)
+ self.assert_array_equal(2.0, result)
+ assert(wts == 4.0)
+ ott[:] = self.masked
+ assert(self.average(ott, axis=0) is self.masked)
+ ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
+ ott = ott.reshape(2, 2)
+ ott[:, 1] = self.masked
+ self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0])
+ assert(self.average(ott, axis=1)[0] is self.masked)
+ self.assert_array_equal([2., 0.], self.average(ott, axis=0))
+ result, wts = self.average(ott, axis=0, returned=1)
+ self.assert_array_equal(wts, [1., 0.])
+ w1 = [0, 1, 1, 1, 1, 0]
+ w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
+ x = self.arange(6)
+ self.assert_array_equal(self.average(x, axis=0), 2.5)
+ self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5)
+ y = self.array([self.arange(6), 2.0*self.arange(6)])
+ self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.)
+ self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.)
+ self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0])
+ self.assert_array_equal(self.average(y, None, weights=w2), 20./6.)
+ self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.])
+ self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0])
+ m1 = self.zeros(6)
+ m2 = [0, 0, 1, 1, 0, 0]
+ m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
+ m4 = self.ones(6)
+ m5 = [0, 1, 1, 1, 1, 1]
+ self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5)
+ self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5)
+ self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0)
+ self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0)
+ z = self.masked_array(y, m3)
+ self.assert_array_equal(self.average(z, None), 20./6.)
+ self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])
+ self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0])
+ self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0])
+
+ @np.errstate(all='ignore')
+ def test_A(self):
+ x = self.arange(24)
+ x[5:6] = self.masked
+ x = x.reshape(2, 3, 4)
+
+
+if __name__ == '__main__':
+ setup_base = ("from __main__ import ModuleTester \n"
+ "import numpy\n"
+ "tester = ModuleTester(module)\n")
+ setup_cur = "import numpy.ma.core as module\n" + setup_base
+ (nrepeat, nloop) = (10, 10)
+
+ for i in range(1, 8):
+ func = 'tester.test_%i()' % i
+ cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10)
+ cur = np.sort(cur)
+ print("#%i" % i + 50*'.')
+ print(eval("ModuleTester.test_%i.__doc__" % i))
+ print(f'core_current : {cur[0]:.3f} - {cur[1]:.3f}')
diff --git a/venv/lib/python3.9/site-packages/numpy/matlib.py b/venv/lib/python3.9/site-packages/numpy/matlib.py
new file mode 100644
index 00000000..e929fd9b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matlib.py
@@ -0,0 +1,378 @@
+import warnings
+
+# 2018-05-29, PendingDeprecationWarning added to matrix.__new__
+# 2020-01-23, numpy 1.19.0 PendingDeprecatonWarning
+warnings.warn("Importing from numpy.matlib is deprecated since 1.19.0. "
+ "The matrix subclass is not the recommended way to represent "
+ "matrices or deal with linear algebra (see "
+ "https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). "
+ "Please adjust your code to use regular ndarray. ",
+ PendingDeprecationWarning, stacklevel=2)
+
+import numpy as np
+from numpy.matrixlib.defmatrix import matrix, asmatrix
+# Matlib.py contains all functions in the numpy namespace with a few
+# replacements. See doc/source/reference/routines.matlib.rst for details.
+# Need * as we're copying the numpy namespace.
+from numpy import * # noqa: F403
+
+__version__ = np.__version__
+
+__all__ = np.__all__[:] # copy numpy namespace
+__all__ += ['rand', 'randn', 'repmat']
+
+def empty(shape, dtype=None, order='C'):
+ """Return a new matrix of given shape and type, without initializing entries.
+
+ Parameters
+ ----------
+ shape : int or tuple of int
+ Shape of the empty matrix.
+ dtype : data-type, optional
+ Desired output data-type.
+ order : {'C', 'F'}, optional
+ Whether to store multi-dimensional data in row-major
+ (C-style) or column-major (Fortran-style) order in
+ memory.
+
+ See Also
+ --------
+ empty_like, zeros
+
+ Notes
+ -----
+ `empty`, unlike `zeros`, does not set the matrix values to zero,
+ and may therefore be marginally faster. On the other hand, it requires
+ the user to manually set all the values in the array, and should be
+ used with caution.
+
+ Examples
+ --------
+ >>> import numpy.matlib
+ >>> np.matlib.empty((2, 2)) # filled with random data
+ matrix([[ 6.76425276e-320, 9.79033856e-307], # random
+ [ 7.39337286e-309, 3.22135945e-309]])
+ >>> np.matlib.empty((2, 2), dtype=int)
+ matrix([[ 6600475, 0], # random
+ [ 6586976, 22740995]])
+
+ """
+ return ndarray.__new__(matrix, shape, dtype, order=order)
+
+def ones(shape, dtype=None, order='C'):
+ """
+ Matrix of ones.
+
+ Return a matrix of given shape and type, filled with ones.
+
+ Parameters
+ ----------
+ shape : {sequence of ints, int}
+ Shape of the matrix
+ dtype : data-type, optional
+ The desired data-type for the matrix, default is np.float64.
+ order : {'C', 'F'}, optional
+ Whether to store matrix in C- or Fortran-contiguous order,
+ default is 'C'.
+
+ Returns
+ -------
+ out : matrix
+ Matrix of ones of given shape, dtype, and order.
+
+ See Also
+ --------
+ ones : Array of ones.
+ matlib.zeros : Zero matrix.
+
+ Notes
+ -----
+ If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
+ `out` becomes a single row matrix of shape ``(1,N)``.
+
+ Examples
+ --------
+ >>> np.matlib.ones((2,3))
+ matrix([[1., 1., 1.],
+ [1., 1., 1.]])
+
+ >>> np.matlib.ones(2)
+ matrix([[1., 1.]])
+
+ """
+ a = ndarray.__new__(matrix, shape, dtype, order=order)
+ a.fill(1)
+ return a
+
+def zeros(shape, dtype=None, order='C'):
+ """
+ Return a matrix of given shape and type, filled with zeros.
+
+ Parameters
+ ----------
+ shape : int or sequence of ints
+ Shape of the matrix
+ dtype : data-type, optional
+ The desired data-type for the matrix, default is float.
+ order : {'C', 'F'}, optional
+ Whether to store the result in C- or Fortran-contiguous order,
+ default is 'C'.
+
+ Returns
+ -------
+ out : matrix
+ Zero matrix of given shape, dtype, and order.
+
+ See Also
+ --------
+ numpy.zeros : Equivalent array function.
+ matlib.ones : Return a matrix of ones.
+
+ Notes
+ -----
+ If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
+ `out` becomes a single row matrix of shape ``(1,N)``.
+
+ Examples
+ --------
+ >>> import numpy.matlib
+ >>> np.matlib.zeros((2, 3))
+ matrix([[0., 0., 0.],
+ [0., 0., 0.]])
+
+ >>> np.matlib.zeros(2)
+ matrix([[0., 0.]])
+
+ """
+ a = ndarray.__new__(matrix, shape, dtype, order=order)
+ a.fill(0)
+ return a
+
+def identity(n,dtype=None):
+ """
+ Returns the square identity matrix of given size.
+
+ Parameters
+ ----------
+ n : int
+ Size of the returned identity matrix.
+ dtype : data-type, optional
+ Data-type of the output. Defaults to ``float``.
+
+ Returns
+ -------
+ out : matrix
+ `n` x `n` matrix with its main diagonal set to one,
+ and all other elements zero.
+
+ See Also
+ --------
+ numpy.identity : Equivalent array function.
+ matlib.eye : More general matrix identity function.
+
+ Examples
+ --------
+ >>> import numpy.matlib
+ >>> np.matlib.identity(3, dtype=int)
+ matrix([[1, 0, 0],
+ [0, 1, 0],
+ [0, 0, 1]])
+
+ """
+ a = array([1]+n*[0], dtype=dtype)
+ b = empty((n, n), dtype=dtype)
+ b.flat = a
+ return b
+
+def eye(n,M=None, k=0, dtype=float, order='C'):
+ """
+ Return a matrix with ones on the diagonal and zeros elsewhere.
+
+ Parameters
+ ----------
+ n : int
+ Number of rows in the output.
+ M : int, optional
+ Number of columns in the output, defaults to `n`.
+ k : int, optional
+ Index of the diagonal: 0 refers to the main diagonal,
+ a positive value refers to an upper diagonal,
+ and a negative value to a lower diagonal.
+ dtype : dtype, optional
+ Data-type of the returned matrix.
+ order : {'C', 'F'}, optional
+ Whether the output should be stored in row-major (C-style) or
+ column-major (Fortran-style) order in memory.
+
+ .. versionadded:: 1.14.0
+
+ Returns
+ -------
+ I : matrix
+ A `n` x `M` matrix where all elements are equal to zero,
+ except for the `k`-th diagonal, whose values are equal to one.
+
+ See Also
+ --------
+ numpy.eye : Equivalent array function.
+ identity : Square identity matrix.
+
+ Examples
+ --------
+ >>> import numpy.matlib
+ >>> np.matlib.eye(3, k=1, dtype=float)
+ matrix([[0., 1., 0.],
+ [0., 0., 1.],
+ [0., 0., 0.]])
+
+ """
+ return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order))
+
+def rand(*args):
+ """
+ Return a matrix of random values with given shape.
+
+ Create a matrix of the given shape and propagate it with
+ random samples from a uniform distribution over ``[0, 1)``.
+
+ Parameters
+ ----------
+ \\*args : Arguments
+ Shape of the output.
+ If given as N integers, each integer specifies the size of one
+ dimension.
+ If given as a tuple, this tuple gives the complete shape.
+
+ Returns
+ -------
+ out : ndarray
+ The matrix of random values with shape given by `\\*args`.
+
+ See Also
+ --------
+ randn, numpy.random.RandomState.rand
+
+ Examples
+ --------
+ >>> np.random.seed(123)
+ >>> import numpy.matlib
+ >>> np.matlib.rand(2, 3)
+ matrix([[0.69646919, 0.28613933, 0.22685145],
+ [0.55131477, 0.71946897, 0.42310646]])
+ >>> np.matlib.rand((2, 3))
+ matrix([[0.9807642 , 0.68482974, 0.4809319 ],
+ [0.39211752, 0.34317802, 0.72904971]])
+
+ If the first argument is a tuple, other arguments are ignored:
+
+ >>> np.matlib.rand((2, 3), 4)
+ matrix([[0.43857224, 0.0596779 , 0.39804426],
+ [0.73799541, 0.18249173, 0.17545176]])
+
+ """
+ if isinstance(args[0], tuple):
+ args = args[0]
+ return asmatrix(np.random.rand(*args))
+
+def randn(*args):
+ """
+ Return a random matrix with data from the "standard normal" distribution.
+
+ `randn` generates a matrix filled with random floats sampled from a
+ univariate "normal" (Gaussian) distribution of mean 0 and variance 1.
+
+ Parameters
+ ----------
+ \\*args : Arguments
+ Shape of the output.
+ If given as N integers, each integer specifies the size of one
+ dimension. If given as a tuple, this tuple gives the complete shape.
+
+ Returns
+ -------
+ Z : matrix of floats
+ A matrix of floating-point samples drawn from the standard normal
+ distribution.
+
+ See Also
+ --------
+ rand, numpy.random.RandomState.randn
+
+ Notes
+ -----
+ For random samples from the normal distribution with mean ``mu`` and
+ standard deviation ``sigma``, use::
+
+ sigma * np.matlib.randn(...) + mu
+
+ Examples
+ --------
+ >>> np.random.seed(123)
+ >>> import numpy.matlib
+ >>> np.matlib.randn(1)
+ matrix([[-1.0856306]])
+ >>> np.matlib.randn(1, 2, 3)
+ matrix([[ 0.99734545, 0.2829785 , -1.50629471],
+ [-0.57860025, 1.65143654, -2.42667924]])
+
+ Two-by-four matrix of samples from the normal distribution with
+ mean 3 and standard deviation 2.5:
+
+ >>> 2.5 * np.matlib.randn((2, 4)) + 3
+ matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462],
+ [2.76322758, 6.72847407, 1.40274501, 1.8900451 ]])
+
+ """
+ if isinstance(args[0], tuple):
+ args = args[0]
+ return asmatrix(np.random.randn(*args))
+
+def repmat(a, m, n):
+ """
+ Repeat a 0-D to 2-D array or matrix MxN times.
+
+ Parameters
+ ----------
+ a : array_like
+ The array or matrix to be repeated.
+ m, n : int
+ The number of times `a` is repeated along the first and second axes.
+
+ Returns
+ -------
+ out : ndarray
+ The result of repeating `a`.
+
+ Examples
+ --------
+ >>> import numpy.matlib
+ >>> a0 = np.array(1)
+ >>> np.matlib.repmat(a0, 2, 3)
+ array([[1, 1, 1],
+ [1, 1, 1]])
+
+ >>> a1 = np.arange(4)
+ >>> np.matlib.repmat(a1, 2, 2)
+ array([[0, 1, 2, 3, 0, 1, 2, 3],
+ [0, 1, 2, 3, 0, 1, 2, 3]])
+
+ >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3))
+ >>> np.matlib.repmat(a2, 2, 3)
+ matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2],
+ [3, 4, 5, 3, 4, 5, 3, 4, 5],
+ [0, 1, 2, 0, 1, 2, 0, 1, 2],
+ [3, 4, 5, 3, 4, 5, 3, 4, 5]])
+
+ """
+ a = asanyarray(a)
+ ndim = a.ndim
+ if ndim == 0:
+ origrows, origcols = (1, 1)
+ elif ndim == 1:
+ origrows, origcols = (1, a.shape[0])
+ else:
+ origrows, origcols = a.shape
+ rows = origrows * m
+ cols = origcols * n
+ c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0)
+ return c.reshape(rows, cols)
diff --git a/venv/lib/python3.9/site-packages/numpy/matrixlib/__init__.py b/venv/lib/python3.9/site-packages/numpy/matrixlib/__init__.py
new file mode 100644
index 00000000..8a7597d3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matrixlib/__init__.py
@@ -0,0 +1,11 @@
+"""Sub-package containing the matrix class and related functions.
+
+"""
+from . import defmatrix
+from .defmatrix import *
+
+__all__ = defmatrix.__all__
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/matrixlib/__init__.pyi b/venv/lib/python3.9/site-packages/numpy/matrixlib/__init__.pyi
new file mode 100644
index 00000000..b0ca8c9c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matrixlib/__init__.pyi
@@ -0,0 +1,15 @@
+from numpy._pytesttester import PytestTester
+
+from numpy import (
+ matrix as matrix,
+)
+
+from numpy.matrixlib.defmatrix import (
+ bmat as bmat,
+ mat as mat,
+ asmatrix as asmatrix,
+)
+
+__all__: list[str]
+__path__: list[str]
+test: PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.py b/venv/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.py
new file mode 100644
index 00000000..a414ee9b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.py
@@ -0,0 +1,1113 @@
+__all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
+
+import sys
+import warnings
+import ast
+import numpy.core.numeric as N
+from numpy.core.numeric import concatenate, isscalar
+from numpy.core.overrides import set_module
+# While not in __all__, matrix_power used to be defined here, so we import
+# it for backward compatibility.
+from numpy.linalg import matrix_power
+
+
+def _convert_from_string(data):
+ for char in '[]':
+ data = data.replace(char, '')
+
+ rows = data.split(';')
+ newdata = []
+ count = 0
+ for row in rows:
+ trow = row.split(',')
+ newrow = []
+ for col in trow:
+ temp = col.split()
+ newrow.extend(map(ast.literal_eval, temp))
+ if count == 0:
+ Ncols = len(newrow)
+ elif len(newrow) != Ncols:
+ raise ValueError("Rows not the same size.")
+ count += 1
+ newdata.append(newrow)
+ return newdata
+
+
+@set_module('numpy')
+def asmatrix(data, dtype=None):
+ """
+ Interpret the input as a matrix.
+
+ Unlike `matrix`, `asmatrix` does not make a copy if the input is already
+ a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``.
+
+ Parameters
+ ----------
+ data : array_like
+ Input data.
+ dtype : data-type
+ Data-type of the output matrix.
+
+ Returns
+ -------
+ mat : matrix
+ `data` interpreted as a matrix.
+
+ Examples
+ --------
+ >>> x = np.array([[1, 2], [3, 4]])
+
+ >>> m = np.asmatrix(x)
+
+ >>> x[0,0] = 5
+
+ >>> m
+ matrix([[5, 2],
+ [3, 4]])
+
+ """
+ return matrix(data, dtype=dtype, copy=False)
+
+
+@set_module('numpy')
+class matrix(N.ndarray):
+ """
+ matrix(data, dtype=None, copy=True)
+
+ .. note:: It is no longer recommended to use this class, even for linear
+ algebra. Instead use regular arrays. The class may be removed
+ in the future.
+
+ Returns a matrix from an array-like object, or from a string of data.
+ A matrix is a specialized 2-D array that retains its 2-D nature
+ through operations. It has certain special operators, such as ``*``
+ (matrix multiplication) and ``**`` (matrix power).
+
+ Parameters
+ ----------
+ data : array_like or string
+ If `data` is a string, it is interpreted as a matrix with commas
+ or spaces separating columns, and semicolons separating rows.
+ dtype : data-type
+ Data-type of the output matrix.
+ copy : bool
+ If `data` is already an `ndarray`, then this flag determines
+ whether the data is copied (the default), or whether a view is
+ constructed.
+
+ See Also
+ --------
+ array
+
+ Examples
+ --------
+ >>> a = np.matrix('1 2; 3 4')
+ >>> a
+ matrix([[1, 2],
+ [3, 4]])
+
+ >>> np.matrix([[1, 2], [3, 4]])
+ matrix([[1, 2],
+ [3, 4]])
+
+ """
+ __array_priority__ = 10.0
+ def __new__(subtype, data, dtype=None, copy=True):
+ warnings.warn('the matrix subclass is not the recommended way to '
+ 'represent matrices or deal with linear algebra (see '
+ 'https://docs.scipy.org/doc/numpy/user/'
+ 'numpy-for-matlab-users.html). '
+ 'Please adjust your code to use regular ndarray.',
+ PendingDeprecationWarning, stacklevel=2)
+ if isinstance(data, matrix):
+ dtype2 = data.dtype
+ if (dtype is None):
+ dtype = dtype2
+ if (dtype2 == dtype) and (not copy):
+ return data
+ return data.astype(dtype)
+
+ if isinstance(data, N.ndarray):
+ if dtype is None:
+ intype = data.dtype
+ else:
+ intype = N.dtype(dtype)
+ new = data.view(subtype)
+ if intype != data.dtype:
+ return new.astype(intype)
+ if copy: return new.copy()
+ else: return new
+
+ if isinstance(data, str):
+ data = _convert_from_string(data)
+
+ # now convert data to an array
+ arr = N.array(data, dtype=dtype, copy=copy)
+ ndim = arr.ndim
+ shape = arr.shape
+ if (ndim > 2):
+ raise ValueError("matrix must be 2-dimensional")
+ elif ndim == 0:
+ shape = (1, 1)
+ elif ndim == 1:
+ shape = (1, shape[0])
+
+ order = 'C'
+ if (ndim == 2) and arr.flags.fortran:
+ order = 'F'
+
+ if not (order or arr.flags.contiguous):
+ arr = arr.copy()
+
+ ret = N.ndarray.__new__(subtype, shape, arr.dtype,
+ buffer=arr,
+ order=order)
+ return ret
+
+ def __array_finalize__(self, obj):
+ self._getitem = False
+ if (isinstance(obj, matrix) and obj._getitem): return
+ ndim = self.ndim
+ if (ndim == 2):
+ return
+ if (ndim > 2):
+ newshape = tuple([x for x in self.shape if x > 1])
+ ndim = len(newshape)
+ if ndim == 2:
+ self.shape = newshape
+ return
+ elif (ndim > 2):
+ raise ValueError("shape too large to be a matrix.")
+ else:
+ newshape = self.shape
+ if ndim == 0:
+ self.shape = (1, 1)
+ elif ndim == 1:
+ self.shape = (1, newshape[0])
+ return
+
+ def __getitem__(self, index):
+ self._getitem = True
+
+ try:
+ out = N.ndarray.__getitem__(self, index)
+ finally:
+ self._getitem = False
+
+ if not isinstance(out, N.ndarray):
+ return out
+
+ if out.ndim == 0:
+ return out[()]
+ if out.ndim == 1:
+ sh = out.shape[0]
+ # Determine when we should have a column array
+ try:
+ n = len(index)
+ except Exception:
+ n = 0
+ if n > 1 and isscalar(index[1]):
+ out.shape = (sh, 1)
+ else:
+ out.shape = (1, sh)
+ return out
+
+ def __mul__(self, other):
+ if isinstance(other, (N.ndarray, list, tuple)) :
+ # This promotes 1-D vectors to row vectors
+ return N.dot(self, asmatrix(other))
+ if isscalar(other) or not hasattr(other, '__rmul__') :
+ return N.dot(self, other)
+ return NotImplemented
+
+ def __rmul__(self, other):
+ return N.dot(other, self)
+
+ def __imul__(self, other):
+ self[:] = self * other
+ return self
+
+ def __pow__(self, other):
+ return matrix_power(self, other)
+
+ def __ipow__(self, other):
+ self[:] = self ** other
+ return self
+
+ def __rpow__(self, other):
+ return NotImplemented
+
+ def _align(self, axis):
+ """A convenience function for operations that need to preserve axis
+ orientation.
+ """
+ if axis is None:
+ return self[0, 0]
+ elif axis==0:
+ return self
+ elif axis==1:
+ return self.transpose()
+ else:
+ raise ValueError("unsupported axis")
+
+ def _collapse(self, axis):
+ """A convenience function for operations that want to collapse
+ to a scalar like _align, but are using keepdims=True
+ """
+ if axis is None:
+ return self[0, 0]
+ else:
+ return self
+
+ # Necessary because base-class tolist expects dimension
+ # reduction by x[0]
+ def tolist(self):
+ """
+ Return the matrix as a (possibly nested) list.
+
+ See `ndarray.tolist` for full documentation.
+
+ See Also
+ --------
+ ndarray.tolist
+
+ Examples
+ --------
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
+ matrix([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> x.tolist()
+ [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
+
+ """
+ return self.__array__().tolist()
+
+ # To preserve orientation of result...
+ def sum(self, axis=None, dtype=None, out=None):
+ """
+ Returns the sum of the matrix elements, along the given axis.
+
+ Refer to `numpy.sum` for full documentation.
+
+ See Also
+ --------
+ numpy.sum
+
+ Notes
+ -----
+ This is the same as `ndarray.sum`, except that where an `ndarray` would
+ be returned, a `matrix` object is returned instead.
+
+ Examples
+ --------
+ >>> x = np.matrix([[1, 2], [4, 3]])
+ >>> x.sum()
+ 10
+ >>> x.sum(axis=1)
+ matrix([[3],
+ [7]])
+ >>> x.sum(axis=1, dtype='float')
+ matrix([[3.],
+ [7.]])
+ >>> out = np.zeros((2, 1), dtype='float')
+ >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out))
+ matrix([[3.],
+ [7.]])
+
+ """
+ return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis)
+
+
+ # To update docstring from array to matrix...
+ def squeeze(self, axis=None):
+ """
+ Return a possibly reshaped matrix.
+
+ Refer to `numpy.squeeze` for more documentation.
+
+ Parameters
+ ----------
+ axis : None or int or tuple of ints, optional
+ Selects a subset of the axes of length one in the shape.
+ If an axis is selected with shape entry greater than one,
+ an error is raised.
+
+ Returns
+ -------
+ squeezed : matrix
+ The matrix, but as a (1, N) matrix if it had shape (N, 1).
+
+ See Also
+ --------
+ numpy.squeeze : related function
+
+ Notes
+ -----
+ If `m` has a single column then that column is returned
+ as the single row of a matrix. Otherwise `m` is returned.
+ The returned matrix is always either `m` itself or a view into `m`.
+ Supplying an axis keyword argument will not affect the returned matrix
+ but it may cause an error to be raised.
+
+ Examples
+ --------
+ >>> c = np.matrix([[1], [2]])
+ >>> c
+ matrix([[1],
+ [2]])
+ >>> c.squeeze()
+ matrix([[1, 2]])
+ >>> r = c.T
+ >>> r
+ matrix([[1, 2]])
+ >>> r.squeeze()
+ matrix([[1, 2]])
+ >>> m = np.matrix([[1, 2], [3, 4]])
+ >>> m.squeeze()
+ matrix([[1, 2],
+ [3, 4]])
+
+ """
+ return N.ndarray.squeeze(self, axis=axis)
+
+
+ # To update docstring from array to matrix...
+ def flatten(self, order='C'):
+ """
+ Return a flattened copy of the matrix.
+
+ All `N` elements of the matrix are placed into a single row.
+
+ Parameters
+ ----------
+ order : {'C', 'F', 'A', 'K'}, optional
+ 'C' means to flatten in row-major (C-style) order. 'F' means to
+ flatten in column-major (Fortran-style) order. 'A' means to
+ flatten in column-major order if `m` is Fortran *contiguous* in
+ memory, row-major order otherwise. 'K' means to flatten `m` in
+ the order the elements occur in memory. The default is 'C'.
+
+ Returns
+ -------
+ y : matrix
+ A copy of the matrix, flattened to a `(1, N)` matrix where `N`
+ is the number of elements in the original matrix.
+
+ See Also
+ --------
+ ravel : Return a flattened array.
+ flat : A 1-D flat iterator over the matrix.
+
+ Examples
+ --------
+ >>> m = np.matrix([[1,2], [3,4]])
+ >>> m.flatten()
+ matrix([[1, 2, 3, 4]])
+ >>> m.flatten('F')
+ matrix([[1, 3, 2, 4]])
+
+ """
+ return N.ndarray.flatten(self, order=order)
+
+ def mean(self, axis=None, dtype=None, out=None):
+ """
+ Returns the average of the matrix elements along the given axis.
+
+ Refer to `numpy.mean` for full documentation.
+
+ See Also
+ --------
+ numpy.mean
+
+ Notes
+ -----
+ Same as `ndarray.mean` except that, where that returns an `ndarray`,
+ this returns a `matrix` object.
+
+ Examples
+ --------
+ >>> x = np.matrix(np.arange(12).reshape((3, 4)))
+ >>> x
+ matrix([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> x.mean()
+ 5.5
+ >>> x.mean(0)
+ matrix([[4., 5., 6., 7.]])
+ >>> x.mean(1)
+ matrix([[ 1.5],
+ [ 5.5],
+ [ 9.5]])
+
+ """
+ return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis)
+
+ def std(self, axis=None, dtype=None, out=None, ddof=0):
+ """
+ Return the standard deviation of the array elements along the given axis.
+
+ Refer to `numpy.std` for full documentation.
+
+ See Also
+ --------
+ numpy.std
+
+ Notes
+ -----
+ This is the same as `ndarray.std`, except that where an `ndarray` would
+ be returned, a `matrix` object is returned instead.
+
+ Examples
+ --------
+ >>> x = np.matrix(np.arange(12).reshape((3, 4)))
+ >>> x
+ matrix([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> x.std()
+ 3.4520525295346629 # may vary
+ >>> x.std(0)
+ matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) # may vary
+ >>> x.std(1)
+ matrix([[ 1.11803399],
+ [ 1.11803399],
+ [ 1.11803399]])
+
+ """
+ return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
+
+ def var(self, axis=None, dtype=None, out=None, ddof=0):
+ """
+ Returns the variance of the matrix elements, along the given axis.
+
+ Refer to `numpy.var` for full documentation.
+
+ See Also
+ --------
+ numpy.var
+
+ Notes
+ -----
+ This is the same as `ndarray.var`, except that where an `ndarray` would
+ be returned, a `matrix` object is returned instead.
+
+ Examples
+ --------
+ >>> x = np.matrix(np.arange(12).reshape((3, 4)))
+ >>> x
+ matrix([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> x.var()
+ 11.916666666666666
+ >>> x.var(0)
+ matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) # may vary
+ >>> x.var(1)
+ matrix([[1.25],
+ [1.25],
+ [1.25]])
+
+ """
+ return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
+
+ def prod(self, axis=None, dtype=None, out=None):
+ """
+ Return the product of the array elements over the given axis.
+
+ Refer to `prod` for full documentation.
+
+ See Also
+ --------
+ prod, ndarray.prod
+
+ Notes
+ -----
+ Same as `ndarray.prod`, except, where that returns an `ndarray`, this
+ returns a `matrix` object instead.
+
+ Examples
+ --------
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
+ matrix([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> x.prod()
+ 0
+ >>> x.prod(0)
+ matrix([[ 0, 45, 120, 231]])
+ >>> x.prod(1)
+ matrix([[ 0],
+ [ 840],
+ [7920]])
+
+ """
+ return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis)
+
+ def any(self, axis=None, out=None):
+ """
+ Test whether any array element along a given axis evaluates to True.
+
+ Refer to `numpy.any` for full documentation.
+
+ Parameters
+ ----------
+ axis : int, optional
+ Axis along which logical OR is performed
+ out : ndarray, optional
+ Output to existing array instead of creating new one, must have
+ same shape as expected output
+
+ Returns
+ -------
+ any : bool, ndarray
+ Returns a single bool if `axis` is ``None``; otherwise,
+ returns `ndarray`
+
+ """
+ return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis)
+
+ def all(self, axis=None, out=None):
+ """
+ Test whether all matrix elements along a given axis evaluate to True.
+
+ Parameters
+ ----------
+ See `numpy.all` for complete descriptions
+
+ See Also
+ --------
+ numpy.all
+
+ Notes
+ -----
+ This is the same as `ndarray.all`, but it returns a `matrix` object.
+
+ Examples
+ --------
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
+ matrix([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> y = x[0]; y
+ matrix([[0, 1, 2, 3]])
+ >>> (x == y)
+ matrix([[ True, True, True, True],
+ [False, False, False, False],
+ [False, False, False, False]])
+ >>> (x == y).all()
+ False
+ >>> (x == y).all(0)
+ matrix([[False, False, False, False]])
+ >>> (x == y).all(1)
+ matrix([[ True],
+ [False],
+ [False]])
+
+ """
+ return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis)
+
+ def max(self, axis=None, out=None):
+ """
+ Return the maximum value along an axis.
+
+ Parameters
+ ----------
+ See `amax` for complete descriptions
+
+ See Also
+ --------
+ amax, ndarray.max
+
+ Notes
+ -----
+ This is the same as `ndarray.max`, but returns a `matrix` object
+ where `ndarray.max` would return an ndarray.
+
+ Examples
+ --------
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
+ matrix([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> x.max()
+ 11
+ >>> x.max(0)
+ matrix([[ 8, 9, 10, 11]])
+ >>> x.max(1)
+ matrix([[ 3],
+ [ 7],
+ [11]])
+
+ """
+ return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis)
+
+ def argmax(self, axis=None, out=None):
+ """
+ Indexes of the maximum values along an axis.
+
+ Return the indexes of the first occurrences of the maximum values
+ along the specified axis. If axis is None, the index is for the
+ flattened matrix.
+
+ Parameters
+ ----------
+ See `numpy.argmax` for complete descriptions
+
+ See Also
+ --------
+ numpy.argmax
+
+ Notes
+ -----
+ This is the same as `ndarray.argmax`, but returns a `matrix` object
+ where `ndarray.argmax` would return an `ndarray`.
+
+ Examples
+ --------
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
+ matrix([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> x.argmax()
+ 11
+ >>> x.argmax(0)
+ matrix([[2, 2, 2, 2]])
+ >>> x.argmax(1)
+ matrix([[3],
+ [3],
+ [3]])
+
+ """
+ return N.ndarray.argmax(self, axis, out)._align(axis)
+
+ def min(self, axis=None, out=None):
+ """
+ Return the minimum value along an axis.
+
+ Parameters
+ ----------
+ See `amin` for complete descriptions.
+
+ See Also
+ --------
+ amin, ndarray.min
+
+ Notes
+ -----
+ This is the same as `ndarray.min`, but returns a `matrix` object
+ where `ndarray.min` would return an ndarray.
+
+ Examples
+ --------
+ >>> x = -np.matrix(np.arange(12).reshape((3,4))); x
+ matrix([[ 0, -1, -2, -3],
+ [ -4, -5, -6, -7],
+ [ -8, -9, -10, -11]])
+ >>> x.min()
+ -11
+ >>> x.min(0)
+ matrix([[ -8, -9, -10, -11]])
+ >>> x.min(1)
+ matrix([[ -3],
+ [ -7],
+ [-11]])
+
+ """
+ return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis)
+
+ def argmin(self, axis=None, out=None):
+ """
+ Indexes of the minimum values along an axis.
+
+ Return the indexes of the first occurrences of the minimum values
+ along the specified axis. If axis is None, the index is for the
+ flattened matrix.
+
+ Parameters
+ ----------
+ See `numpy.argmin` for complete descriptions.
+
+ See Also
+ --------
+ numpy.argmin
+
+ Notes
+ -----
+ This is the same as `ndarray.argmin`, but returns a `matrix` object
+ where `ndarray.argmin` would return an `ndarray`.
+
+ Examples
+ --------
+ >>> x = -np.matrix(np.arange(12).reshape((3,4))); x
+ matrix([[ 0, -1, -2, -3],
+ [ -4, -5, -6, -7],
+ [ -8, -9, -10, -11]])
+ >>> x.argmin()
+ 11
+ >>> x.argmin(0)
+ matrix([[2, 2, 2, 2]])
+ >>> x.argmin(1)
+ matrix([[3],
+ [3],
+ [3]])
+
+ """
+ return N.ndarray.argmin(self, axis, out)._align(axis)
+
+ def ptp(self, axis=None, out=None):
+ """
+ Peak-to-peak (maximum - minimum) value along the given axis.
+
+ Refer to `numpy.ptp` for full documentation.
+
+ See Also
+ --------
+ numpy.ptp
+
+ Notes
+ -----
+ Same as `ndarray.ptp`, except, where that would return an `ndarray` object,
+ this returns a `matrix` object.
+
+ Examples
+ --------
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
+ matrix([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> x.ptp()
+ 11
+ >>> x.ptp(0)
+ matrix([[8, 8, 8, 8]])
+ >>> x.ptp(1)
+ matrix([[3],
+ [3],
+ [3]])
+
+ """
+ return N.ndarray.ptp(self, axis, out)._align(axis)
+
+ @property
+ def I(self):
+ """
+ Returns the (multiplicative) inverse of invertible `self`.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ ret : matrix object
+ If `self` is non-singular, `ret` is such that ``ret * self`` ==
+ ``self * ret`` == ``np.matrix(np.eye(self[0,:].size))`` all return
+ ``True``.
+
+ Raises
+ ------
+ numpy.linalg.LinAlgError: Singular matrix
+ If `self` is singular.
+
+ See Also
+ --------
+ linalg.inv
+
+ Examples
+ --------
+ >>> m = np.matrix('[1, 2; 3, 4]'); m
+ matrix([[1, 2],
+ [3, 4]])
+ >>> m.getI()
+ matrix([[-2. , 1. ],
+ [ 1.5, -0.5]])
+ >>> m.getI() * m
+ matrix([[ 1., 0.], # may vary
+ [ 0., 1.]])
+
+ """
+ M, N = self.shape
+ if M == N:
+ from numpy.linalg import inv as func
+ else:
+ from numpy.linalg import pinv as func
+ return asmatrix(func(self))
+
+ @property
+ def A(self):
+ """
+ Return `self` as an `ndarray` object.
+
+ Equivalent to ``np.asarray(self)``.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ ret : ndarray
+ `self` as an `ndarray`
+
+ Examples
+ --------
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
+ matrix([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> x.getA()
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+
+ """
+ return self.__array__()
+
+ @property
+ def A1(self):
+ """
+ Return `self` as a flattened `ndarray`.
+
+ Equivalent to ``np.asarray(x).ravel()``
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ ret : ndarray
+ `self`, 1-D, as an `ndarray`
+
+ Examples
+ --------
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
+ matrix([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
+ >>> x.getA1()
+ array([ 0, 1, 2, ..., 9, 10, 11])
+
+
+ """
+ return self.__array__().ravel()
+
+
+ def ravel(self, order='C'):
+ """
+ Return a flattened matrix.
+
+ Refer to `numpy.ravel` for more documentation.
+
+ Parameters
+ ----------
+ order : {'C', 'F', 'A', 'K'}, optional
+ The elements of `m` are read using this index order. 'C' means to
+ index the elements in C-like order, with the last axis index
+ changing fastest, back to the first axis index changing slowest.
+ 'F' means to index the elements in Fortran-like index order, with
+ the first index changing fastest, and the last index changing
+ slowest. Note that the 'C' and 'F' options take no account of the
+ memory layout of the underlying array, and only refer to the order
+ of axis indexing. 'A' means to read the elements in Fortran-like
+ index order if `m` is Fortran *contiguous* in memory, C-like order
+ otherwise. 'K' means to read the elements in the order they occur
+ in memory, except for reversing the data when strides are negative.
+ By default, 'C' index order is used.
+
+ Returns
+ -------
+ ret : matrix
+ Return the matrix flattened to shape `(1, N)` where `N`
+ is the number of elements in the original matrix.
+ A copy is made only if necessary.
+
+ See Also
+ --------
+ matrix.flatten : returns a similar output matrix but always a copy
+ matrix.flat : a flat iterator on the array.
+ numpy.ravel : related function which returns an ndarray
+
+ """
+ return N.ndarray.ravel(self, order=order)
+
+ @property
+ def T(self):
+ """
+ Returns the transpose of the matrix.
+
+ Does *not* conjugate! For the complex conjugate transpose, use ``.H``.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ ret : matrix object
+ The (non-conjugated) transpose of the matrix.
+
+ See Also
+ --------
+ transpose, getH
+
+ Examples
+ --------
+ >>> m = np.matrix('[1, 2; 3, 4]')
+ >>> m
+ matrix([[1, 2],
+ [3, 4]])
+ >>> m.getT()
+ matrix([[1, 3],
+ [2, 4]])
+
+ """
+ return self.transpose()
+
+ @property
+ def H(self):
+ """
+ Returns the (complex) conjugate transpose of `self`.
+
+ Equivalent to ``np.transpose(self)`` if `self` is real-valued.
+
+ Parameters
+ ----------
+ None
+
+ Returns
+ -------
+ ret : matrix object
+ complex conjugate transpose of `self`
+
+ Examples
+ --------
+ >>> x = np.matrix(np.arange(12).reshape((3,4)))
+ >>> z = x - 1j*x; z
+ matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j],
+ [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j],
+ [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]])
+ >>> z.getH()
+ matrix([[ 0. -0.j, 4. +4.j, 8. +8.j],
+ [ 1. +1.j, 5. +5.j, 9. +9.j],
+ [ 2. +2.j, 6. +6.j, 10.+10.j],
+ [ 3. +3.j, 7. +7.j, 11.+11.j]])
+
+ """
+ if issubclass(self.dtype.type, N.complexfloating):
+ return self.transpose().conjugate()
+ else:
+ return self.transpose()
+
+ # kept for compatibility
+ getT = T.fget
+ getA = A.fget
+ getA1 = A1.fget
+ getH = H.fget
+ getI = I.fget
+
+def _from_string(str, gdict, ldict):
+ rows = str.split(';')
+ rowtup = []
+ for row in rows:
+ trow = row.split(',')
+ newrow = []
+ for x in trow:
+ newrow.extend(x.split())
+ trow = newrow
+ coltup = []
+ for col in trow:
+ col = col.strip()
+ try:
+ thismat = ldict[col]
+ except KeyError:
+ try:
+ thismat = gdict[col]
+ except KeyError as e:
+ raise NameError(f"name {col!r} is not defined") from None
+
+ coltup.append(thismat)
+ rowtup.append(concatenate(coltup, axis=-1))
+ return concatenate(rowtup, axis=0)
+
+
+@set_module('numpy')
+def bmat(obj, ldict=None, gdict=None):
+ """
+ Build a matrix object from a string, nested sequence, or array.
+
+ Parameters
+ ----------
+ obj : str or array_like
+ Input data. If a string, variables in the current scope may be
+ referenced by name.
+ ldict : dict, optional
+ A dictionary that replaces local operands in current frame.
+ Ignored if `obj` is not a string or `gdict` is None.
+ gdict : dict, optional
+ A dictionary that replaces global operands in current frame.
+ Ignored if `obj` is not a string.
+
+ Returns
+ -------
+ out : matrix
+ Returns a matrix object, which is a specialized 2-D array.
+
+ See Also
+ --------
+ block :
+ A generalization of this function for N-d arrays, that returns normal
+ ndarrays.
+
+ Examples
+ --------
+ >>> A = np.mat('1 1; 1 1')
+ >>> B = np.mat('2 2; 2 2')
+ >>> C = np.mat('3 4; 5 6')
+ >>> D = np.mat('7 8; 9 0')
+
+ All the following expressions construct the same block matrix:
+
+ >>> np.bmat([[A, B], [C, D]])
+ matrix([[1, 1, 2, 2],
+ [1, 1, 2, 2],
+ [3, 4, 7, 8],
+ [5, 6, 9, 0]])
+ >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]])
+ matrix([[1, 1, 2, 2],
+ [1, 1, 2, 2],
+ [3, 4, 7, 8],
+ [5, 6, 9, 0]])
+ >>> np.bmat('A,B; C,D')
+ matrix([[1, 1, 2, 2],
+ [1, 1, 2, 2],
+ [3, 4, 7, 8],
+ [5, 6, 9, 0]])
+
+ """
+ if isinstance(obj, str):
+ if gdict is None:
+ # get previous frame
+ frame = sys._getframe().f_back
+ glob_dict = frame.f_globals
+ loc_dict = frame.f_locals
+ else:
+ glob_dict = gdict
+ loc_dict = ldict
+
+ return matrix(_from_string(obj, glob_dict, loc_dict))
+
+ if isinstance(obj, (tuple, list)):
+ # [[A,B],[C,D]]
+ arr_rows = []
+ for row in obj:
+ if isinstance(row, N.ndarray): # not 2-d
+ return matrix(concatenate(obj, axis=-1))
+ else:
+ arr_rows.append(concatenate(row, axis=-1))
+ return matrix(concatenate(arr_rows, axis=0))
+ if isinstance(obj, N.ndarray):
+ return matrix(obj)
+
+mat = asmatrix
diff --git a/venv/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.pyi b/venv/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.pyi
new file mode 100644
index 00000000..9d0d1ee5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matrixlib/defmatrix.pyi
@@ -0,0 +1,16 @@
+from collections.abc import Sequence, Mapping
+from typing import Any
+from numpy import matrix as matrix
+from numpy._typing import ArrayLike, DTypeLike, NDArray
+
+__all__: list[str]
+
+def bmat(
+ obj: str | Sequence[ArrayLike] | NDArray[Any],
+ ldict: None | Mapping[str, Any] = ...,
+ gdict: None | Mapping[str, Any] = ...,
+) -> matrix[Any, Any]: ...
+
+def asmatrix(data: ArrayLike, dtype: DTypeLike = ...) -> matrix[Any, Any]: ...
+
+mat = asmatrix
diff --git a/venv/lib/python3.9/site-packages/numpy/matrixlib/setup.py b/venv/lib/python3.9/site-packages/numpy/matrixlib/setup.py
new file mode 100644
index 00000000..4fed75de
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matrixlib/setup.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python3
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('matrixlib', parent_package, top_path)
+ config.add_subpackage('tests')
+ config.add_data_files('*.pyi')
+ return config
+
+if __name__ == "__main__":
+ from numpy.distutils.core import setup
+ config = configuration(top_path='').todict()
+ setup(**config)
diff --git a/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_defmatrix.py b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_defmatrix.py
new file mode 100644
index 00000000..4cb5f3a3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_defmatrix.py
@@ -0,0 +1,453 @@
+import collections.abc
+
+import numpy as np
+from numpy import matrix, asmatrix, bmat
+from numpy.testing import (
+ assert_, assert_equal, assert_almost_equal, assert_array_equal,
+ assert_array_almost_equal, assert_raises
+ )
+from numpy.linalg import matrix_power
+from numpy.matrixlib import mat
+
+class TestCtor:
+ def test_basic(self):
+ A = np.array([[1, 2], [3, 4]])
+ mA = matrix(A)
+ assert_(np.all(mA.A == A))
+
+ B = bmat("A,A;A,A")
+ C = bmat([[A, A], [A, A]])
+ D = np.array([[1, 2, 1, 2],
+ [3, 4, 3, 4],
+ [1, 2, 1, 2],
+ [3, 4, 3, 4]])
+ assert_(np.all(B.A == D))
+ assert_(np.all(C.A == D))
+
+ E = np.array([[5, 6], [7, 8]])
+ AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]])
+ assert_(np.all(bmat([A, E]) == AEresult))
+
+ vec = np.arange(5)
+ mvec = matrix(vec)
+ assert_(mvec.shape == (1, 5))
+
+ def test_exceptions(self):
+ # Check for ValueError when called with invalid string data.
+ assert_raises(ValueError, matrix, "invalid")
+
+ def test_bmat_nondefault_str(self):
+ A = np.array([[1, 2], [3, 4]])
+ B = np.array([[5, 6], [7, 8]])
+ Aresult = np.array([[1, 2, 1, 2],
+ [3, 4, 3, 4],
+ [1, 2, 1, 2],
+ [3, 4, 3, 4]])
+ mixresult = np.array([[1, 2, 5, 6],
+ [3, 4, 7, 8],
+ [5, 6, 1, 2],
+ [7, 8, 3, 4]])
+ assert_(np.all(bmat("A,A;A,A") == Aresult))
+ assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult))
+ assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B})
+ assert_(
+ np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult))
+ b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A})
+ assert_(np.all(b2 == mixresult))
+
+
+class TestProperties:
+ def test_sum(self):
+ """Test whether matrix.sum(axis=1) preserves orientation.
+ Fails in NumPy <= 0.9.6.2127.
+ """
+ M = matrix([[1, 2, 0, 0],
+ [3, 4, 0, 0],
+ [1, 2, 1, 2],
+ [3, 4, 3, 4]])
+ sum0 = matrix([8, 12, 4, 6])
+ sum1 = matrix([3, 7, 6, 14]).T
+ sumall = 30
+ assert_array_equal(sum0, M.sum(axis=0))
+ assert_array_equal(sum1, M.sum(axis=1))
+ assert_equal(sumall, M.sum())
+
+ assert_array_equal(sum0, np.sum(M, axis=0))
+ assert_array_equal(sum1, np.sum(M, axis=1))
+ assert_equal(sumall, np.sum(M))
+
+ def test_prod(self):
+ x = matrix([[1, 2, 3], [4, 5, 6]])
+ assert_equal(x.prod(), 720)
+ assert_equal(x.prod(0), matrix([[4, 10, 18]]))
+ assert_equal(x.prod(1), matrix([[6], [120]]))
+
+ assert_equal(np.prod(x), 720)
+ assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]]))
+ assert_equal(np.prod(x, axis=1), matrix([[6], [120]]))
+
+ y = matrix([0, 1, 3])
+ assert_(y.prod() == 0)
+
+ def test_max(self):
+ x = matrix([[1, 2, 3], [4, 5, 6]])
+ assert_equal(x.max(), 6)
+ assert_equal(x.max(0), matrix([[4, 5, 6]]))
+ assert_equal(x.max(1), matrix([[3], [6]]))
+
+ assert_equal(np.max(x), 6)
+ assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]]))
+ assert_equal(np.max(x, axis=1), matrix([[3], [6]]))
+
+ def test_min(self):
+ x = matrix([[1, 2, 3], [4, 5, 6]])
+ assert_equal(x.min(), 1)
+ assert_equal(x.min(0), matrix([[1, 2, 3]]))
+ assert_equal(x.min(1), matrix([[1], [4]]))
+
+ assert_equal(np.min(x), 1)
+ assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]]))
+ assert_equal(np.min(x, axis=1), matrix([[1], [4]]))
+
+ def test_ptp(self):
+ x = np.arange(4).reshape((2, 2))
+ assert_(x.ptp() == 3)
+ assert_(np.all(x.ptp(0) == np.array([2, 2])))
+ assert_(np.all(x.ptp(1) == np.array([1, 1])))
+
+ def test_var(self):
+ x = np.arange(9).reshape((3, 3))
+ mx = x.view(np.matrix)
+ assert_equal(x.var(ddof=0), mx.var(ddof=0))
+ assert_equal(x.var(ddof=1), mx.var(ddof=1))
+
+ def test_basic(self):
+ import numpy.linalg as linalg
+
+ A = np.array([[1., 2.],
+ [3., 4.]])
+ mA = matrix(A)
+ assert_(np.allclose(linalg.inv(A), mA.I))
+ assert_(np.all(np.array(np.transpose(A) == mA.T)))
+ assert_(np.all(np.array(np.transpose(A) == mA.H)))
+ assert_(np.all(A == mA.A))
+
+ B = A + 2j*A
+ mB = matrix(B)
+ assert_(np.allclose(linalg.inv(B), mB.I))
+ assert_(np.all(np.array(np.transpose(B) == mB.T)))
+ assert_(np.all(np.array(np.transpose(B).conj() == mB.H)))
+
+ def test_pinv(self):
+ x = matrix(np.arange(6).reshape(2, 3))
+ xpinv = matrix([[-0.77777778, 0.27777778],
+ [-0.11111111, 0.11111111],
+ [ 0.55555556, -0.05555556]])
+ assert_almost_equal(x.I, xpinv)
+
+ def test_comparisons(self):
+ A = np.arange(100).reshape(10, 10)
+ mA = matrix(A)
+ mB = matrix(A) + 0.1
+ assert_(np.all(mB == A+0.1))
+ assert_(np.all(mB == matrix(A+0.1)))
+ assert_(not np.any(mB == matrix(A-0.1)))
+ assert_(np.all(mA < mB))
+ assert_(np.all(mA <= mB))
+ assert_(np.all(mA <= mA))
+ assert_(not np.any(mA < mA))
+
+ assert_(not np.any(mB < mA))
+ assert_(np.all(mB >= mA))
+ assert_(np.all(mB >= mB))
+ assert_(not np.any(mB > mB))
+
+ assert_(np.all(mA == mA))
+ assert_(not np.any(mA == mB))
+ assert_(np.all(mB != mA))
+
+ assert_(not np.all(abs(mA) > 0))
+ assert_(np.all(abs(mB > 0)))
+
+ def test_asmatrix(self):
+ A = np.arange(100).reshape(10, 10)
+ mA = asmatrix(A)
+ A[0, 0] = -10
+ assert_(A[0, 0] == mA[0, 0])
+
+ def test_noaxis(self):
+ A = matrix([[1, 0], [0, 1]])
+ assert_(A.sum() == matrix(2))
+ assert_(A.mean() == matrix(0.5))
+
+ def test_repr(self):
+ A = matrix([[1, 0], [0, 1]])
+ assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])")
+
+ def test_make_bool_matrix_from_str(self):
+ A = matrix('True; True; False')
+ B = matrix([[True], [True], [False]])
+ assert_array_equal(A, B)
+
+class TestCasting:
+ def test_basic(self):
+ A = np.arange(100).reshape(10, 10)
+ mA = matrix(A)
+
+ mB = mA.copy()
+ O = np.ones((10, 10), np.float64) * 0.1
+ mB = mB + O
+ assert_(mB.dtype.type == np.float64)
+ assert_(np.all(mA != mB))
+ assert_(np.all(mB == mA+0.1))
+
+ mC = mA.copy()
+ O = np.ones((10, 10), np.complex128)
+ mC = mC * O
+ assert_(mC.dtype.type == np.complex128)
+ assert_(np.all(mA != mB))
+
+
+class TestAlgebra:
+ def test_basic(self):
+ import numpy.linalg as linalg
+
+ A = np.array([[1., 2.], [3., 4.]])
+ mA = matrix(A)
+
+ B = np.identity(2)
+ for i in range(6):
+ assert_(np.allclose((mA ** i).A, B))
+ B = np.dot(B, A)
+
+ Ainv = linalg.inv(A)
+ B = np.identity(2)
+ for i in range(6):
+ assert_(np.allclose((mA ** -i).A, B))
+ B = np.dot(B, Ainv)
+
+ assert_(np.allclose((mA * mA).A, np.dot(A, A)))
+ assert_(np.allclose((mA + mA).A, (A + A)))
+ assert_(np.allclose((3*mA).A, (3*A)))
+
+ mA2 = matrix(A)
+ mA2 *= 3
+ assert_(np.allclose(mA2.A, 3*A))
+
+ def test_pow(self):
+ """Test raising a matrix to an integer power works as expected."""
+ m = matrix("1. 2.; 3. 4.")
+ m2 = m.copy()
+ m2 **= 2
+ mi = m.copy()
+ mi **= -1
+ m4 = m2.copy()
+ m4 **= 2
+ assert_array_almost_equal(m2, m**2)
+ assert_array_almost_equal(m4, np.dot(m2, m2))
+ assert_array_almost_equal(np.dot(mi, m), np.eye(2))
+
+ def test_scalar_type_pow(self):
+ m = matrix([[1, 2], [3, 4]])
+ for scalar_t in [np.int8, np.uint8]:
+ two = scalar_t(2)
+ assert_array_almost_equal(m ** 2, m ** two)
+
+ def test_notimplemented(self):
+ '''Check that 'not implemented' operations produce a failure.'''
+ A = matrix([[1., 2.],
+ [3., 4.]])
+
+ # __rpow__
+ with assert_raises(TypeError):
+ 1.0**A
+
+ # __mul__ with something not a list, ndarray, tuple, or scalar
+ with assert_raises(TypeError):
+ A*object()
+
+
+class TestMatrixReturn:
+ def test_instance_methods(self):
+ a = matrix([1.0], dtype='f8')
+ methodargs = {
+ 'astype': ('intc',),
+ 'clip': (0.0, 1.0),
+ 'compress': ([1],),
+ 'repeat': (1,),
+ 'reshape': (1,),
+ 'swapaxes': (0, 0),
+ 'dot': np.array([1.0]),
+ }
+ excluded_methods = [
+ 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield',
+ 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize',
+ 'searchsorted', 'setflags', 'setfield', 'sort',
+ 'partition', 'argpartition',
+ 'take', 'tofile', 'tolist', 'tostring', 'tobytes', 'all', 'any',
+ 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp',
+ 'prod', 'std', 'ctypes', 'itemset',
+ ]
+ for attrib in dir(a):
+ if attrib.startswith('_') or attrib in excluded_methods:
+ continue
+ f = getattr(a, attrib)
+ if isinstance(f, collections.abc.Callable):
+ # reset contents of a
+ a.astype('f8')
+ a.fill(1.0)
+ if attrib in methodargs:
+ args = methodargs[attrib]
+ else:
+ args = ()
+ b = f(*args)
+ assert_(type(b) is matrix, "%s" % attrib)
+ assert_(type(a.real) is matrix)
+ assert_(type(a.imag) is matrix)
+ c, d = matrix([0.0]).nonzero()
+ assert_(type(c) is np.ndarray)
+ assert_(type(d) is np.ndarray)
+
+
+class TestIndexing:
+ def test_basic(self):
+ x = asmatrix(np.zeros((3, 2), float))
+ y = np.zeros((3, 1), float)
+ y[:, 0] = [0.8, 0.2, 0.3]
+ x[:, 1] = y > 0.5
+ assert_equal(x, [[0, 1], [0, 0], [0, 0]])
+
+
+class TestNewScalarIndexing:
+ a = matrix([[1, 2], [3, 4]])
+
+ def test_dimesions(self):
+ a = self.a
+ x = a[0]
+ assert_equal(x.ndim, 2)
+
+ def test_array_from_matrix_list(self):
+ a = self.a
+ x = np.array([a, a])
+ assert_equal(x.shape, [2, 2, 2])
+
+ def test_array_to_list(self):
+ a = self.a
+ assert_equal(a.tolist(), [[1, 2], [3, 4]])
+
+ def test_fancy_indexing(self):
+ a = self.a
+ x = a[1, [0, 1, 0]]
+ assert_(isinstance(x, matrix))
+ assert_equal(x, matrix([[3, 4, 3]]))
+ x = a[[1, 0]]
+ assert_(isinstance(x, matrix))
+ assert_equal(x, matrix([[3, 4], [1, 2]]))
+ x = a[[[1], [0]], [[1, 0], [0, 1]]]
+ assert_(isinstance(x, matrix))
+ assert_equal(x, matrix([[4, 3], [1, 2]]))
+
+ def test_matrix_element(self):
+ x = matrix([[1, 2, 3], [4, 5, 6]])
+ assert_equal(x[0][0], matrix([[1, 2, 3]]))
+ assert_equal(x[0][0].shape, (1, 3))
+ assert_equal(x[0].shape, (1, 3))
+ assert_equal(x[:, 0].shape, (2, 1))
+
+ x = matrix(0)
+ assert_equal(x[0, 0], 0)
+ assert_equal(x[0], 0)
+ assert_equal(x[:, 0].shape, x.shape)
+
+ def test_scalar_indexing(self):
+ x = asmatrix(np.zeros((3, 2), float))
+ assert_equal(x[0, 0], x[0][0])
+
+ def test_row_column_indexing(self):
+ x = asmatrix(np.eye(2))
+ assert_array_equal(x[0,:], [[1, 0]])
+ assert_array_equal(x[1,:], [[0, 1]])
+ assert_array_equal(x[:, 0], [[1], [0]])
+ assert_array_equal(x[:, 1], [[0], [1]])
+
+ def test_boolean_indexing(self):
+ A = np.arange(6)
+ A.shape = (3, 2)
+ x = asmatrix(A)
+ assert_array_equal(x[:, np.array([True, False])], x[:, 0])
+ assert_array_equal(x[np.array([True, False, False]),:], x[0,:])
+
+ def test_list_indexing(self):
+ A = np.arange(6)
+ A.shape = (3, 2)
+ x = asmatrix(A)
+ assert_array_equal(x[:, [1, 0]], x[:, ::-1])
+ assert_array_equal(x[[2, 1, 0],:], x[::-1,:])
+
+
+class TestPower:
+ def test_returntype(self):
+ a = np.array([[0, 1], [0, 0]])
+ assert_(type(matrix_power(a, 2)) is np.ndarray)
+ a = mat(a)
+ assert_(type(matrix_power(a, 2)) is matrix)
+
+ def test_list(self):
+ assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]])
+
+
+class TestShape:
+
+ a = np.array([[1], [2]])
+ m = matrix([[1], [2]])
+
+ def test_shape(self):
+ assert_equal(self.a.shape, (2, 1))
+ assert_equal(self.m.shape, (2, 1))
+
+ def test_numpy_ravel(self):
+ assert_equal(np.ravel(self.a).shape, (2,))
+ assert_equal(np.ravel(self.m).shape, (2,))
+
+ def test_member_ravel(self):
+ assert_equal(self.a.ravel().shape, (2,))
+ assert_equal(self.m.ravel().shape, (1, 2))
+
+ def test_member_flatten(self):
+ assert_equal(self.a.flatten().shape, (2,))
+ assert_equal(self.m.flatten().shape, (1, 2))
+
+ def test_numpy_ravel_order(self):
+ x = np.array([[1, 2, 3], [4, 5, 6]])
+ assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6])
+ assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6])
+ assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6])
+ assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6])
+ x = matrix([[1, 2, 3], [4, 5, 6]])
+ assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6])
+ assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6])
+ assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6])
+ assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6])
+
+ def test_matrix_ravel_order(self):
+ x = matrix([[1, 2, 3], [4, 5, 6]])
+ assert_equal(x.ravel(), [[1, 2, 3, 4, 5, 6]])
+ assert_equal(x.ravel(order='F'), [[1, 4, 2, 5, 3, 6]])
+ assert_equal(x.T.ravel(), [[1, 4, 2, 5, 3, 6]])
+ assert_equal(x.T.ravel(order='A'), [[1, 2, 3, 4, 5, 6]])
+
+ def test_array_memory_sharing(self):
+ assert_(np.may_share_memory(self.a, self.a.ravel()))
+ assert_(not np.may_share_memory(self.a, self.a.flatten()))
+
+ def test_matrix_memory_sharing(self):
+ assert_(np.may_share_memory(self.m, self.m.ravel()))
+ assert_(not np.may_share_memory(self.m, self.m.flatten()))
+
+ def test_expand_dims_matrix(self):
+ # matrices are always 2d - so expand_dims only makes sense when the
+ # type is changed away from matrix.
+ a = np.arange(10).reshape((2, 5)).view(np.matrix)
+ expanded = np.expand_dims(a, axis=1)
+ assert_equal(expanded.ndim, 3)
+ assert_(not isinstance(expanded, np.matrix))
diff --git a/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_interaction.py b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_interaction.py
new file mode 100644
index 00000000..5154bd62
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_interaction.py
@@ -0,0 +1,354 @@
+"""Tests of interaction of matrix with other parts of numpy.
+
+Note that tests with MaskedArray and linalg are done in separate files.
+"""
+import pytest
+
+import textwrap
+import warnings
+
+import numpy as np
+from numpy.testing import (assert_, assert_equal, assert_raises,
+ assert_raises_regex, assert_array_equal,
+ assert_almost_equal, assert_array_almost_equal)
+
+
+def test_fancy_indexing():
+ # The matrix class messes with the shape. While this is always
+ # weird (getitem is not used, it does not have setitem nor knows
+ # about fancy indexing), this tests gh-3110
+ # 2018-04-29: moved here from core.tests.test_index.
+ m = np.matrix([[1, 2], [3, 4]])
+
+ assert_(isinstance(m[[0, 1, 0], :], np.matrix))
+
+ # gh-3110. Note the transpose currently because matrices do *not*
+ # support dimension fixing for fancy indexing correctly.
+ x = np.asmatrix(np.arange(50).reshape(5, 10))
+ assert_equal(x[:2, np.array(-1)], x[:2, -1].T)
+
+
+def test_polynomial_mapdomain():
+ # test that polynomial preserved matrix subtype.
+ # 2018-04-29: moved here from polynomial.tests.polyutils.
+ dom1 = [0, 4]
+ dom2 = [1, 3]
+ x = np.matrix([dom1, dom1])
+ res = np.polynomial.polyutils.mapdomain(x, dom1, dom2)
+ assert_(isinstance(res, np.matrix))
+
+
+def test_sort_matrix_none():
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ a = np.matrix([[2, 1, 0]])
+ actual = np.sort(a, axis=None)
+ expected = np.matrix([[0, 1, 2]])
+ assert_equal(actual, expected)
+ assert_(type(expected) is np.matrix)
+
+
+def test_partition_matrix_none():
+ # gh-4301
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ a = np.matrix([[2, 1, 0]])
+ actual = np.partition(a, 1, axis=None)
+ expected = np.matrix([[0, 1, 2]])
+ assert_equal(actual, expected)
+ assert_(type(expected) is np.matrix)
+
+
+def test_dot_scalar_and_matrix_of_objects():
+ # Ticket #2469
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ arr = np.matrix([1, 2], dtype=object)
+ desired = np.matrix([[3, 6]], dtype=object)
+ assert_equal(np.dot(arr, 3), desired)
+ assert_equal(np.dot(3, arr), desired)
+
+
+def test_inner_scalar_and_matrix():
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
+ sca = np.array(3, dtype=dt)[()]
+ arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
+ desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
+ assert_equal(np.inner(arr, sca), desired)
+ assert_equal(np.inner(sca, arr), desired)
+
+
+def test_inner_scalar_and_matrix_of_objects():
+ # Ticket #4482
+ # 2018-04-29: moved here from core.tests.test_multiarray
+ arr = np.matrix([1, 2], dtype=object)
+ desired = np.matrix([[3, 6]], dtype=object)
+ assert_equal(np.inner(arr, 3), desired)
+ assert_equal(np.inner(3, arr), desired)
+
+
+def test_iter_allocate_output_subtype():
+ # Make sure that the subtype with priority wins
+ # 2018-04-29: moved here from core.tests.test_nditer, given the
+ # matrix specific shape test.
+
+ # matrix vs ndarray
+ a = np.matrix([[1, 2], [3, 4]])
+ b = np.arange(4).reshape(2, 2).T
+ i = np.nditer([a, b, None], [],
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ assert_(type(i.operands[2]) is np.matrix)
+ assert_(type(i.operands[2]) is not np.ndarray)
+ assert_equal(i.operands[2].shape, (2, 2))
+
+ # matrix always wants things to be 2D
+ b = np.arange(4).reshape(1, 2, 2)
+ assert_raises(RuntimeError, np.nditer, [a, b, None], [],
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
+ # but if subtypes are disabled, the result can still work
+ i = np.nditer([a, b, None], [],
+ [['readonly'], ['readonly'],
+ ['writeonly', 'allocate', 'no_subtype']])
+ assert_(type(i.operands[2]) is np.ndarray)
+ assert_(type(i.operands[2]) is not np.matrix)
+ assert_equal(i.operands[2].shape, (1, 2, 2))
+
+
+def like_function():
+ # 2018-04-29: moved here from core.tests.test_numeric
+ a = np.matrix([[1, 2], [3, 4]])
+ for like_function in np.zeros_like, np.ones_like, np.empty_like:
+ b = like_function(a)
+ assert_(type(b) is np.matrix)
+
+ c = like_function(a, subok=False)
+ assert_(type(c) is not np.matrix)
+
+
+def test_array_astype():
+ # 2018-04-29: copied here from core.tests.test_api
+ # subok=True passes through a matrix
+ a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4')
+ b = a.astype('f4', subok=True, copy=False)
+ assert_(a is b)
+
+ # subok=True is default, and creates a subtype on a cast
+ b = a.astype('i4', copy=False)
+ assert_equal(a, b)
+ assert_equal(type(b), np.matrix)
+
+ # subok=False never returns a matrix
+ b = a.astype('f4', subok=False, copy=False)
+ assert_equal(a, b)
+ assert_(not (a is b))
+ assert_(type(b) is not np.matrix)
+
+
+def test_stack():
+ # 2018-04-29: copied here from core.tests.test_shape_base
+ # check np.matrix cannot be stacked
+ m = np.matrix([[1, 2], [3, 4]])
+ assert_raises_regex(ValueError, 'shape too large to be a matrix',
+ np.stack, [m, m])
+
+
+def test_object_scalar_multiply():
+ # Tickets #2469 and #4482
+ # 2018-04-29: moved here from core.tests.test_ufunc
+ arr = np.matrix([1, 2], dtype=object)
+ desired = np.matrix([[3, 6]], dtype=object)
+ assert_equal(np.multiply(arr, 3), desired)
+ assert_equal(np.multiply(3, arr), desired)
+
+
+def test_nanfunctions_matrices():
+ # Check that it works and that type and
+ # shape are preserved
+ # 2018-04-29: moved here from core.tests.test_nanfunctions
+ mat = np.matrix(np.eye(3))
+ for f in [np.nanmin, np.nanmax]:
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (1, 3))
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 1))
+ res = f(mat)
+ assert_(np.isscalar(res))
+ # check that rows of nan are dealt with for subclasses (#4628)
+ mat[1] = np.nan
+ for f in [np.nanmin, np.nanmax]:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(not np.any(np.isnan(res)))
+ assert_(len(w) == 0)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
+ and not np.isnan(res[2, 0]))
+ assert_(len(w) == 1, 'no warning raised')
+ assert_(issubclass(w[0].category, RuntimeWarning))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always')
+ res = f(mat)
+ assert_(np.isscalar(res))
+ assert_(res != np.nan)
+ assert_(len(w) == 0)
+
+
+def test_nanfunctions_matrices_general():
+ # Check that it works and that type and
+ # shape are preserved
+ # 2018-04-29: moved here from core.tests.test_nanfunctions
+ mat = np.matrix(np.eye(3))
+ for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod,
+ np.nanmean, np.nanvar, np.nanstd):
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (1, 3))
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 1))
+ res = f(mat)
+ assert_(np.isscalar(res))
+
+ for f in np.nancumsum, np.nancumprod:
+ res = f(mat, axis=0)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 3))
+ res = f(mat, axis=1)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (3, 3))
+ res = f(mat)
+ assert_(isinstance(res, np.matrix))
+ assert_(res.shape == (1, 3*3))
+
+
+def test_average_matrix():
+ # 2018-04-29: moved here from core.tests.test_function_base.
+ y = np.matrix(np.random.rand(5, 5))
+ assert_array_equal(y.mean(0), np.average(y, 0))
+
+ a = np.matrix([[1, 2], [3, 4]])
+ w = np.matrix([[1, 2], [3, 4]])
+
+ r = np.average(a, axis=0, weights=w)
+ assert_equal(type(r), np.matrix)
+ assert_equal(r, [[2.5, 10.0/3]])
+
+
+def test_trapz_matrix():
+ # Test to make sure matrices give the same answer as ndarrays
+ # 2018-04-29: moved here from core.tests.test_function_base.
+ x = np.linspace(0, 5)
+ y = x * x
+ r = np.trapz(y, x)
+ mx = np.matrix(x)
+ my = np.matrix(y)
+ mr = np.trapz(my, mx)
+ assert_almost_equal(mr, r)
+
+
+def test_ediff1d_matrix():
+ # 2018-04-29: moved here from core.tests.test_arraysetops.
+ assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix))
+ assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix))
+
+
+def test_apply_along_axis_matrix():
+ # this test is particularly malicious because matrix
+ # refuses to become 1d
+ # 2018-04-29: moved here from core.tests.test_shape_base.
+ def double(row):
+ return row * 2
+
+ m = np.matrix([[0, 1], [2, 3]])
+ expected = np.matrix([[0, 2], [4, 6]])
+
+ result = np.apply_along_axis(double, 0, m)
+ assert_(isinstance(result, np.matrix))
+ assert_array_equal(result, expected)
+
+ result = np.apply_along_axis(double, 1, m)
+ assert_(isinstance(result, np.matrix))
+ assert_array_equal(result, expected)
+
+
+def test_kron_matrix():
+ # 2018-04-29: moved here from core.tests.test_shape_base.
+ a = np.ones([2, 2])
+ m = np.asmatrix(a)
+ assert_equal(type(np.kron(a, a)), np.ndarray)
+ assert_equal(type(np.kron(m, m)), np.matrix)
+ assert_equal(type(np.kron(a, m)), np.matrix)
+ assert_equal(type(np.kron(m, a)), np.matrix)
+
+
+class TestConcatenatorMatrix:
+ # 2018-04-29: moved here from core.tests.test_index_tricks.
+ def test_matrix(self):
+ a = [1, 2]
+ b = [3, 4]
+
+ ab_r = np.r_['r', a, b]
+ ab_c = np.r_['c', a, b]
+
+ assert_equal(type(ab_r), np.matrix)
+ assert_equal(type(ab_c), np.matrix)
+
+ assert_equal(np.array(ab_r), [[1, 2, 3, 4]])
+ assert_equal(np.array(ab_c), [[1], [2], [3], [4]])
+
+ assert_raises(ValueError, lambda: np.r_['rc', a, b])
+
+ def test_matrix_scalar(self):
+ r = np.r_['r', [1, 2], 3]
+ assert_equal(type(r), np.matrix)
+ assert_equal(np.array(r), [[1, 2, 3]])
+
+ def test_matrix_builder(self):
+ a = np.array([1])
+ b = np.array([2])
+ c = np.array([3])
+ d = np.array([4])
+ actual = np.r_['a, b; c, d']
+ expected = np.bmat([[a, b], [c, d]])
+
+ assert_equal(actual, expected)
+ assert_equal(type(actual), type(expected))
+
+
+def test_array_equal_error_message_matrix():
+ # 2018-04-29: moved here from testing.tests.test_utils.
+ with pytest.raises(AssertionError) as exc_info:
+ assert_equal(np.array([1, 2]), np.matrix([1, 2]))
+ msg = str(exc_info.value)
+ msg_reference = textwrap.dedent("""\
+
+ Arrays are not equal
+
+ (shapes (2,), (1, 2) mismatch)
+ x: array([1, 2])
+ y: matrix([[1, 2]])""")
+ assert_equal(msg, msg_reference)
+
+
+def test_array_almost_equal_matrix():
+ # Matrix slicing keeps things 2-D, while array does not necessarily.
+ # See gh-8452.
+ # 2018-04-29: moved here from testing.tests.test_utils.
+ m1 = np.matrix([[1., 2.]])
+ m2 = np.matrix([[1., np.nan]])
+ m3 = np.matrix([[1., -np.inf]])
+ m4 = np.matrix([[np.nan, np.inf]])
+ m5 = np.matrix([[1., 2.], [np.nan, np.inf]])
+ for assert_func in assert_array_almost_equal, assert_almost_equal:
+ for m in m1, m2, m3, m4, m5:
+ assert_func(m, m)
+ a = np.array(m)
+ assert_func(a, m)
+ assert_func(m, a)
diff --git a/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_masked_matrix.py b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_masked_matrix.py
new file mode 100644
index 00000000..d0ce357a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_masked_matrix.py
@@ -0,0 +1,231 @@
+import numpy as np
+from numpy.testing import assert_warns
+from numpy.ma.testutils import (assert_, assert_equal, assert_raises,
+ assert_array_equal)
+from numpy.ma.core import (masked_array, masked_values, masked, allequal,
+ MaskType, getmask, MaskedArray, nomask,
+ log, add, hypot, divide)
+from numpy.ma.extras import mr_
+from numpy.compat import pickle
+
+
+class MMatrix(MaskedArray, np.matrix,):
+
+ def __new__(cls, data, mask=nomask):
+ mat = np.matrix(data)
+ _data = MaskedArray.__new__(cls, data=mat, mask=mask)
+ return _data
+
+ def __array_finalize__(self, obj):
+ np.matrix.__array_finalize__(self, obj)
+ MaskedArray.__array_finalize__(self, obj)
+ return
+
+ @property
+ def _series(self):
+ _view = self.view(MaskedArray)
+ _view._sharedmask = False
+ return _view
+
+
+class TestMaskedMatrix:
+ def test_matrix_indexing(self):
+ # Tests conversions and indexing
+ x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
+ x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]])
+ x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]])
+ x4 = masked_array(x1)
+ # test conversion to strings
+ str(x2) # raises?
+ repr(x2) # raises?
+ # tests of indexing
+ assert_(type(x2[1, 0]) is type(x1[1, 0]))
+ assert_(x1[1, 0] == x2[1, 0])
+ assert_(x2[1, 1] is masked)
+ assert_equal(x1[0, 2], x2[0, 2])
+ assert_equal(x1[0, 1:], x2[0, 1:])
+ assert_equal(x1[:, 2], x2[:, 2])
+ assert_equal(x1[:], x2[:])
+ assert_equal(x1[1:], x3[1:])
+ x1[0, 2] = 9
+ x2[0, 2] = 9
+ assert_equal(x1, x2)
+ x1[0, 1:] = 99
+ x2[0, 1:] = 99
+ assert_equal(x1, x2)
+ x2[0, 1] = masked
+ assert_equal(x1, x2)
+ x2[0, 1:] = masked
+ assert_equal(x1, x2)
+ x2[0, :] = x1[0, :]
+ x2[0, 1] = masked
+ assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
+ x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
+ assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0])))
+ assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0])))
+ x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
+ assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0])))
+ assert_(allequal(x4[1], masked_array([1, 2, 3])))
+ x1 = np.matrix(np.arange(5) * 1.0)
+ x2 = masked_values(x1, 3.0)
+ assert_equal(x1, x2)
+ assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType),
+ x2.mask))
+ assert_equal(3.0, x2.fill_value)
+
+ def test_pickling_subbaseclass(self):
+ # Test pickling w/ a subclass of ndarray
+ a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled, a)
+ assert_(isinstance(a_pickled._data, np.matrix))
+
+ def test_count_mean_with_matrix(self):
+ m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2)))
+
+ assert_equal(m.count(axis=0).shape, (1, 2))
+ assert_equal(m.count(axis=1).shape, (2, 1))
+
+ # Make sure broadcasting inside mean and var work
+ assert_equal(m.mean(axis=0), [[2., 3.]])
+ assert_equal(m.mean(axis=1), [[1.5], [3.5]])
+
+ def test_flat(self):
+ # Test that flat can return items even for matrices [#4585, #4615]
+ # test simple access
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+ assert_equal(test.flat[1], 2)
+ assert_equal(test.flat[2], masked)
+ assert_(np.all(test.flat[0:2] == test[0, 0:2]))
+ # Test flat on masked_matrices
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+ test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
+ control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
+ assert_equal(test, control)
+ # Test setting
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
+ testflat = test.flat
+ testflat[:] = testflat[[2, 1, 0]]
+ assert_equal(test, control)
+ testflat[0] = 9
+ # test that matrices keep the correct shape (#4615)
+ a = masked_array(np.matrix(np.eye(2)), mask=0)
+ b = a.flat
+ b01 = b[:2]
+ assert_equal(b01.data, np.array([[1., 0.]]))
+ assert_equal(b01.mask, np.array([[False, False]]))
+
+ def test_allany_onmatrices(self):
+ x = np.array([[0.13, 0.26, 0.90],
+ [0.28, 0.33, 0.63],
+ [0.31, 0.87, 0.70]])
+ X = np.matrix(x)
+ m = np.array([[True, False, False],
+ [False, False, False],
+ [True, True, False]], dtype=np.bool_)
+ mX = masked_array(X, mask=m)
+ mXbig = (mX > 0.5)
+ mXsmall = (mX < 0.5)
+
+ assert_(not mXbig.all())
+ assert_(mXbig.any())
+ assert_equal(mXbig.all(0), np.matrix([False, False, True]))
+ assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
+ assert_equal(mXbig.any(0), np.matrix([False, False, True]))
+ assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
+
+ assert_(not mXsmall.all())
+ assert_(mXsmall.any())
+ assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
+ assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
+ assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
+ assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
+
+ def test_compressed(self):
+ a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
+ b = a.compressed()
+ assert_equal(b, a)
+ assert_(isinstance(b, np.matrix))
+ a[0, 0] = masked
+ b = a.compressed()
+ assert_equal(b, [[2, 3, 4]])
+
+ def test_ravel(self):
+ a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
+ aravel = a.ravel()
+ assert_equal(aravel.shape, (1, 5))
+ assert_equal(aravel._mask.shape, a.shape)
+
+ def test_view(self):
+ # Test view w/ flexible dtype
+ iterator = list(zip(np.arange(10), np.random.rand(10)))
+ data = np.array(iterator)
+ a = masked_array(iterator, dtype=[('a', float), ('b', float)])
+ a.mask[0] = (1, 0)
+ test = a.view((float, 2), np.matrix)
+ assert_equal(test, data)
+ assert_(isinstance(test, np.matrix))
+ assert_(not isinstance(test, MaskedArray))
+
+
+class TestSubclassing:
+ # Test suite for masked subclasses of ndarray.
+
+ def setup_method(self):
+ x = np.arange(5, dtype='float')
+ mx = MMatrix(x, mask=[0, 1, 0, 0, 0])
+ self.data = (x, mx)
+
+ def test_maskedarray_subclassing(self):
+ # Tests subclassing MaskedArray
+ (x, mx) = self.data
+ assert_(isinstance(mx._data, np.matrix))
+
+ def test_masked_unary_operations(self):
+ # Tests masked_unary_operation
+ (x, mx) = self.data
+ with np.errstate(divide='ignore'):
+ assert_(isinstance(log(mx), MMatrix))
+ assert_equal(log(x), np.log(x))
+
+ def test_masked_binary_operations(self):
+ # Tests masked_binary_operation
+ (x, mx) = self.data
+ # Result should be a MMatrix
+ assert_(isinstance(add(mx, mx), MMatrix))
+ assert_(isinstance(add(mx, x), MMatrix))
+ # Result should work
+ assert_equal(add(mx, x), mx+x)
+ assert_(isinstance(add(mx, mx)._data, np.matrix))
+ with assert_warns(DeprecationWarning):
+ assert_(isinstance(add.outer(mx, mx), MMatrix))
+ assert_(isinstance(hypot(mx, mx), MMatrix))
+ assert_(isinstance(hypot(mx, x), MMatrix))
+
+ def test_masked_binary_operations2(self):
+ # Tests domained_masked_binary_operation
+ (x, mx) = self.data
+ xmx = masked_array(mx.data.__array__(), mask=mx.mask)
+ assert_(isinstance(divide(mx, mx), MMatrix))
+ assert_(isinstance(divide(mx, x), MMatrix))
+ assert_equal(divide(mx, mx), divide(xmx, xmx))
+
+class TestConcatenator:
+ # Tests for mr_, the equivalent of r_ for masked arrays.
+
+ def test_matrix_builder(self):
+ assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4'])
+
+ def test_matrix(self):
+ # Test consistency with unmasked version. If we ever deprecate
+ # matrix, this test should either still pass, or both actual and
+ # expected should fail to be build.
+ actual = mr_['r', 1, 2, 3]
+ expected = np.ma.array(np.r_['r', 1, 2, 3])
+ assert_array_equal(actual, expected)
+
+ # outer type is masked array, inner type is matrix
+ assert_equal(type(actual), type(expected))
+ assert_equal(type(actual.data), type(expected.data))
diff --git a/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py
new file mode 100644
index 00000000..106c2e38
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py
@@ -0,0 +1,93 @@
+""" Test functions for linalg module using the matrix class."""
+import numpy as np
+
+from numpy.linalg.tests.test_linalg import (
+ LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase,
+ _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base,
+ SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases,
+ PinvCases, DetCases, LstsqCases)
+
+
+CASES = []
+
+# square test cases
+CASES += apply_tag('square', [
+ LinalgCase("0x0_matrix",
+ np.empty((0, 0), dtype=np.double).view(np.matrix),
+ np.empty((0, 1), dtype=np.double).view(np.matrix),
+ tags={'size-0'}),
+ LinalgCase("matrix_b_only",
+ np.array([[1., 2.], [3., 4.]]),
+ np.matrix([2., 1.]).T),
+ LinalgCase("matrix_a_and_b",
+ np.matrix([[1., 2.], [3., 4.]]),
+ np.matrix([2., 1.]).T),
+])
+
+# hermitian test-cases
+CASES += apply_tag('hermitian', [
+ LinalgCase("hmatrix_a_and_b",
+ np.matrix([[1., 2.], [2., 1.]]),
+ None),
+])
+# No need to make generalized or strided cases for matrices.
+
+
+class MatrixTestCase(LinalgTestCase):
+ TEST_CASES = CASES
+
+
+class TestSolveMatrix(SolveCases, MatrixTestCase):
+ pass
+
+
+class TestInvMatrix(InvCases, MatrixTestCase):
+ pass
+
+
+class TestEigvalsMatrix(EigvalsCases, MatrixTestCase):
+ pass
+
+
+class TestEigMatrix(EigCases, MatrixTestCase):
+ pass
+
+
+class TestSVDMatrix(SVDCases, MatrixTestCase):
+ pass
+
+
+class TestCondMatrix(CondCases, MatrixTestCase):
+ pass
+
+
+class TestPinvMatrix(PinvCases, MatrixTestCase):
+ pass
+
+
+class TestDetMatrix(DetCases, MatrixTestCase):
+ pass
+
+
+class TestLstsqMatrix(LstsqCases, MatrixTestCase):
+ pass
+
+
+class _TestNorm2DMatrix(_TestNorm2D):
+ array = np.matrix
+
+
+class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase):
+ pass
+
+
+class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase):
+ pass
+
+
+class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base):
+ pass
+
+
+class TestQRMatrix(_TestQR):
+ array = np.matrix
diff --git a/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_multiarray.py b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_multiarray.py
new file mode 100644
index 00000000..638d0d15
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_multiarray.py
@@ -0,0 +1,16 @@
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_array_equal
+
+class TestView:
+ def test_type(self):
+ x = np.array([1, 2, 3])
+ assert_(isinstance(x.view(np.matrix), np.matrix))
+
+ def test_keywords(self):
+ x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
+ # We must be specific about the endianness here:
+ y = x.view(dtype='<i2', type=np.matrix)
+ assert_array_equal(y, [[513]])
+
+ assert_(isinstance(y, np.matrix))
+ assert_equal(y.dtype, np.dtype('<i2'))
diff --git a/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_numeric.py b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_numeric.py
new file mode 100644
index 00000000..a772bb38
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_numeric.py
@@ -0,0 +1,17 @@
+import numpy as np
+from numpy.testing import assert_equal
+
+class TestDot:
+ def test_matscalar(self):
+ b1 = np.matrix(np.ones((3, 3), dtype=complex))
+ assert_equal(b1*1.0, b1)
+
+
+def test_diagonal():
+ b1 = np.matrix([[1,2],[3,4]])
+ diag_b1 = np.matrix([[1, 4]])
+ array_b1 = np.array([1, 4])
+
+ assert_equal(b1.diagonal(), diag_b1)
+ assert_equal(np.diagonal(b1), array_b1)
+ assert_equal(np.diag(b1), array_b1)
diff --git a/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_regression.py b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_regression.py
new file mode 100644
index 00000000..a54d4402
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/matrixlib/tests/test_regression.py
@@ -0,0 +1,31 @@
+import numpy as np
+from numpy.testing import assert_, assert_equal, assert_raises
+
+
+class TestRegression:
+ def test_kron_matrix(self):
+ # Ticket #71
+ x = np.matrix('[1 0; 1 0]')
+ assert_equal(type(np.kron(x, x)), type(x))
+
+ def test_matrix_properties(self):
+ # Ticket #125
+ a = np.matrix([1.0], dtype=float)
+ assert_(type(a.real) is np.matrix)
+ assert_(type(a.imag) is np.matrix)
+ c, d = np.matrix([0.0]).nonzero()
+ assert_(type(c) is np.ndarray)
+ assert_(type(d) is np.ndarray)
+
+ def test_matrix_multiply_by_1d_vector(self):
+ # Ticket #473
+ def mul():
+ np.mat(np.eye(2))*np.ones(2)
+
+ assert_raises(ValueError, mul)
+
+ def test_matrix_std_argmax(self):
+ # Ticket #83
+ x = np.asmatrix(np.random.uniform(0, 1, (3, 3)))
+ assert_equal(x.std().shape, ())
+ assert_equal(x.argmax().shape, ())
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/__init__.py b/venv/lib/python3.9/site-packages/numpy/polynomial/__init__.py
new file mode 100644
index 00000000..c4e7baf2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/__init__.py
@@ -0,0 +1,185 @@
+"""
+A sub-package for efficiently dealing with polynomials.
+
+Within the documentation for this sub-package, a "finite power series,"
+i.e., a polynomial (also referred to simply as a "series") is represented
+by a 1-D numpy array of the polynomial's coefficients, ordered from lowest
+order term to highest. For example, array([1,2,3]) represents
+``P_0 + 2*P_1 + 3*P_2``, where P_n is the n-th order basis polynomial
+applicable to the specific module in question, e.g., `polynomial` (which
+"wraps" the "standard" basis) or `chebyshev`. For optimal performance,
+all operations on polynomials, including evaluation at an argument, are
+implemented as operations on the coefficients. Additional (module-specific)
+information can be found in the docstring for the module of interest.
+
+This package provides *convenience classes* for each of six different kinds
+of polynomials:
+
+ ======================== ================
+ **Name** **Provides**
+ ======================== ================
+ `~polynomial.Polynomial` Power series
+ `~chebyshev.Chebyshev` Chebyshev series
+ `~legendre.Legendre` Legendre series
+ `~laguerre.Laguerre` Laguerre series
+ `~hermite.Hermite` Hermite series
+ `~hermite_e.HermiteE` HermiteE series
+ ======================== ================
+
+These *convenience classes* provide a consistent interface for creating,
+manipulating, and fitting data with polynomials of different bases.
+The convenience classes are the preferred interface for the `~numpy.polynomial`
+package, and are available from the ``numpy.polynomial`` namespace.
+This eliminates the need to navigate to the corresponding submodules, e.g.
+``np.polynomial.Polynomial`` or ``np.polynomial.Chebyshev`` instead of
+``np.polynomial.polynomial.Polynomial`` or
+``np.polynomial.chebyshev.Chebyshev``, respectively.
+The classes provide a more consistent and concise interface than the
+type-specific functions defined in the submodules for each type of polynomial.
+For example, to fit a Chebyshev polynomial with degree ``1`` to data given
+by arrays ``xdata`` and ``ydata``, the
+`~chebyshev.Chebyshev.fit` class method::
+
+ >>> from numpy.polynomial import Chebyshev
+ >>> c = Chebyshev.fit(xdata, ydata, deg=1)
+
+is preferred over the `chebyshev.chebfit` function from the
+``np.polynomial.chebyshev`` module::
+
+ >>> from numpy.polynomial.chebyshev import chebfit
+ >>> c = chebfit(xdata, ydata, deg=1)
+
+See :doc:`routines.polynomials.classes` for more details.
+
+Convenience Classes
+===================
+
+The following lists the various constants and methods common to all of
+the classes representing the various kinds of polynomials. In the following,
+the term ``Poly`` represents any one of the convenience classes (e.g.
+`~polynomial.Polynomial`, `~chebyshev.Chebyshev`, `~hermite.Hermite`, etc.)
+while the lowercase ``p`` represents an **instance** of a polynomial class.
+
+Constants
+---------
+
+- ``Poly.domain`` -- Default domain
+- ``Poly.window`` -- Default window
+- ``Poly.basis_name`` -- String used to represent the basis
+- ``Poly.maxpower`` -- Maximum value ``n`` such that ``p**n`` is allowed
+- ``Poly.nickname`` -- String used in printing
+
+Creation
+--------
+
+Methods for creating polynomial instances.
+
+- ``Poly.basis(degree)`` -- Basis polynomial of given degree
+- ``Poly.identity()`` -- ``p`` where ``p(x) = x`` for all ``x``
+- ``Poly.fit(x, y, deg)`` -- ``p`` of degree ``deg`` with coefficients
+ determined by the least-squares fit to the data ``x``, ``y``
+- ``Poly.fromroots(roots)`` -- ``p`` with specified roots
+- ``p.copy()`` -- Create a copy of ``p``
+
+Conversion
+----------
+
+Methods for converting a polynomial instance of one kind to another.
+
+- ``p.cast(Poly)`` -- Convert ``p`` to instance of kind ``Poly``
+- ``p.convert(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` or map
+ between ``domain`` and ``window``
+
+Calculus
+--------
+- ``p.deriv()`` -- Take the derivative of ``p``
+- ``p.integ()`` -- Integrate ``p``
+
+Validation
+----------
+- ``Poly.has_samecoef(p1, p2)`` -- Check if coefficients match
+- ``Poly.has_samedomain(p1, p2)`` -- Check if domains match
+- ``Poly.has_sametype(p1, p2)`` -- Check if types match
+- ``Poly.has_samewindow(p1, p2)`` -- Check if windows match
+
+Misc
+----
+- ``p.linspace()`` -- Return ``x, p(x)`` at equally-spaced points in ``domain``
+- ``p.mapparms()`` -- Return the parameters for the linear mapping between
+ ``domain`` and ``window``.
+- ``p.roots()`` -- Return the roots of `p`.
+- ``p.trim()`` -- Remove trailing coefficients.
+- ``p.cutdeg(degree)`` -- Truncate p to given degree
+- ``p.truncate(size)`` -- Truncate p to given size
+
+"""
+from .polynomial import Polynomial
+from .chebyshev import Chebyshev
+from .legendre import Legendre
+from .hermite import Hermite
+from .hermite_e import HermiteE
+from .laguerre import Laguerre
+
+__all__ = [
+ "set_default_printstyle",
+ "polynomial", "Polynomial",
+ "chebyshev", "Chebyshev",
+ "legendre", "Legendre",
+ "hermite", "Hermite",
+ "hermite_e", "HermiteE",
+ "laguerre", "Laguerre",
+]
+
+
+def set_default_printstyle(style):
+ """
+ Set the default format for the string representation of polynomials.
+
+ Values for ``style`` must be valid inputs to ``__format__``, i.e. 'ascii'
+ or 'unicode'.
+
+ Parameters
+ ----------
+ style : str
+ Format string for default printing style. Must be either 'ascii' or
+ 'unicode'.
+
+ Notes
+ -----
+ The default format depends on the platform: 'unicode' is used on
+ Unix-based systems and 'ascii' on Windows. This determination is based on
+ default font support for the unicode superscript and subscript ranges.
+
+ Examples
+ --------
+ >>> p = np.polynomial.Polynomial([1, 2, 3])
+ >>> c = np.polynomial.Chebyshev([1, 2, 3])
+ >>> np.polynomial.set_default_printstyle('unicode')
+ >>> print(p)
+ 1.0 + 2.0·x + 3.0·x²
+ >>> print(c)
+ 1.0 + 2.0·T₁(x) + 3.0·T₂(x)
+ >>> np.polynomial.set_default_printstyle('ascii')
+ >>> print(p)
+ 1.0 + 2.0 x + 3.0 x**2
+ >>> print(c)
+ 1.0 + 2.0 T_1(x) + 3.0 T_2(x)
+ >>> # Formatting supersedes all class/package-level defaults
+ >>> print(f"{p:unicode}")
+ 1.0 + 2.0·x + 3.0·x²
+ """
+ if style not in ('unicode', 'ascii'):
+ raise ValueError(
+ f"Unsupported format string '{style}'. Valid options are 'ascii' "
+ f"and 'unicode'"
+ )
+ _use_unicode = True
+ if style == 'ascii':
+ _use_unicode = False
+ from ._polybase import ABCPolyBase
+ ABCPolyBase._use_unicode = _use_unicode
+
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/__init__.pyi b/venv/lib/python3.9/site-packages/numpy/polynomial/__init__.pyi
new file mode 100644
index 00000000..c9d1c27a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/__init__.pyi
@@ -0,0 +1,22 @@
+from numpy._pytesttester import PytestTester
+
+from numpy.polynomial import (
+ chebyshev as chebyshev,
+ hermite as hermite,
+ hermite_e as hermite_e,
+ laguerre as laguerre,
+ legendre as legendre,
+ polynomial as polynomial,
+)
+from numpy.polynomial.chebyshev import Chebyshev as Chebyshev
+from numpy.polynomial.hermite import Hermite as Hermite
+from numpy.polynomial.hermite_e import HermiteE as HermiteE
+from numpy.polynomial.laguerre import Laguerre as Laguerre
+from numpy.polynomial.legendre import Legendre as Legendre
+from numpy.polynomial.polynomial import Polynomial as Polynomial
+
+__all__: list[str]
+__path__: list[str]
+test: PytestTester
+
+def set_default_printstyle(style): ...
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/_polybase.py b/venv/lib/python3.9/site-packages/numpy/polynomial/_polybase.py
new file mode 100644
index 00000000..3bea91dd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/_polybase.py
@@ -0,0 +1,1184 @@
+"""
+Abstract base class for the various polynomial Classes.
+
+The ABCPolyBase class provides the methods needed to implement the common API
+for the various polynomial classes. It operates as a mixin, but uses the
+abc module from the stdlib, hence it is only available for Python >= 2.6.
+
+"""
+import os
+import abc
+import numbers
+
+import numpy as np
+from . import polyutils as pu
+
+__all__ = ['ABCPolyBase']
+
+class ABCPolyBase(abc.ABC):
+ """An abstract base class for immutable series classes.
+
+ ABCPolyBase provides the standard Python numerical methods
+ '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the
+ methods listed below.
+
+ .. versionadded:: 1.9.0
+
+ Parameters
+ ----------
+ coef : array_like
+ Series coefficients in order of increasing degree, i.e.,
+ ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where
+ ``P_i`` is the basis polynomials of degree ``i``.
+ domain : (2,) array_like, optional
+ Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
+ to the interval ``[window[0], window[1]]`` by shifting and scaling.
+ The default value is the derived class domain.
+ window : (2,) array_like, optional
+ Window, see domain for its use. The default value is the
+ derived class window.
+ symbol : str, optional
+ Symbol used to represent the independent variable in string
+ representations of the polynomial expression, e.g. for printing.
+ The symbol must be a valid Python identifier. Default value is 'x'.
+
+ .. versionadded:: 1.24
+
+ Attributes
+ ----------
+ coef : (N,) ndarray
+ Series coefficients in order of increasing degree.
+ domain : (2,) ndarray
+ Domain that is mapped to window.
+ window : (2,) ndarray
+ Window that domain is mapped to.
+ symbol : str
+ Symbol representing the independent variable.
+
+ Class Attributes
+ ----------------
+ maxpower : int
+ Maximum power allowed, i.e., the largest number ``n`` such that
+ ``p(x)**n`` is allowed. This is to limit runaway polynomial size.
+ domain : (2,) ndarray
+ Default domain of the class.
+ window : (2,) ndarray
+ Default window of the class.
+
+ """
+
+ # Not hashable
+ __hash__ = None
+
+ # Opt out of numpy ufuncs and Python ops with ndarray subclasses.
+ __array_ufunc__ = None
+
+ # Limit runaway size. T_n^m has degree n*m
+ maxpower = 100
+
+ # Unicode character mappings for improved __str__
+ _superscript_mapping = str.maketrans({
+ "0": "⁰",
+ "1": "¹",
+ "2": "²",
+ "3": "³",
+ "4": "⁴",
+ "5": "⁵",
+ "6": "⁶",
+ "7": "⁷",
+ "8": "⁸",
+ "9": "⁹"
+ })
+ _subscript_mapping = str.maketrans({
+ "0": "₀",
+ "1": "₁",
+ "2": "₂",
+ "3": "₃",
+ "4": "₄",
+ "5": "₅",
+ "6": "₆",
+ "7": "₇",
+ "8": "₈",
+ "9": "₉"
+ })
+ # Some fonts don't support full unicode character ranges necessary for
+ # the full set of superscripts and subscripts, including common/default
+ # fonts in Windows shells/terminals. Therefore, default to ascii-only
+ # printing on windows.
+ _use_unicode = not os.name == 'nt'
+
+ @property
+ def symbol(self):
+ return self._symbol
+
+ @property
+ @abc.abstractmethod
+ def domain(self):
+ pass
+
+ @property
+ @abc.abstractmethod
+ def window(self):
+ pass
+
+ @property
+ @abc.abstractmethod
+ def basis_name(self):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def _add(c1, c2):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def _sub(c1, c2):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def _mul(c1, c2):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def _div(c1, c2):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def _pow(c, pow, maxpower=None):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def _val(x, c):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def _int(c, m, k, lbnd, scl):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def _der(c, m, scl):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def _fit(x, y, deg, rcond, full):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def _line(off, scl):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def _roots(c):
+ pass
+
+ @staticmethod
+ @abc.abstractmethod
+ def _fromroots(r):
+ pass
+
+ def has_samecoef(self, other):
+ """Check if coefficients match.
+
+ .. versionadded:: 1.6.0
+
+ Parameters
+ ----------
+ other : class instance
+ The other class must have the ``coef`` attribute.
+
+ Returns
+ -------
+ bool : boolean
+ True if the coefficients are the same, False otherwise.
+
+ """
+ if len(self.coef) != len(other.coef):
+ return False
+ elif not np.all(self.coef == other.coef):
+ return False
+ else:
+ return True
+
+ def has_samedomain(self, other):
+ """Check if domains match.
+
+ .. versionadded:: 1.6.0
+
+ Parameters
+ ----------
+ other : class instance
+ The other class must have the ``domain`` attribute.
+
+ Returns
+ -------
+ bool : boolean
+ True if the domains are the same, False otherwise.
+
+ """
+ return np.all(self.domain == other.domain)
+
+ def has_samewindow(self, other):
+ """Check if windows match.
+
+ .. versionadded:: 1.6.0
+
+ Parameters
+ ----------
+ other : class instance
+ The other class must have the ``window`` attribute.
+
+ Returns
+ -------
+ bool : boolean
+ True if the windows are the same, False otherwise.
+
+ """
+ return np.all(self.window == other.window)
+
+ def has_sametype(self, other):
+ """Check if types match.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ other : object
+ Class instance.
+
+ Returns
+ -------
+ bool : boolean
+ True if other is same class as self
+
+ """
+ return isinstance(other, self.__class__)
+
+ def _get_coefficients(self, other):
+ """Interpret other as polynomial coefficients.
+
+ The `other` argument is checked to see if it is of the same
+ class as self with identical domain and window. If so,
+ return its coefficients, otherwise return `other`.
+
+ .. versionadded:: 1.9.0
+
+ Parameters
+ ----------
+ other : anything
+ Object to be checked.
+
+ Returns
+ -------
+ coef
+ The coefficients of`other` if it is a compatible instance,
+ of ABCPolyBase, otherwise `other`.
+
+ Raises
+ ------
+ TypeError
+ When `other` is an incompatible instance of ABCPolyBase.
+
+ """
+ if isinstance(other, ABCPolyBase):
+ if not isinstance(other, self.__class__):
+ raise TypeError("Polynomial types differ")
+ elif not np.all(self.domain == other.domain):
+ raise TypeError("Domains differ")
+ elif not np.all(self.window == other.window):
+ raise TypeError("Windows differ")
+ elif self.symbol != other.symbol:
+ raise ValueError("Polynomial symbols differ")
+ return other.coef
+ return other
+
+ def __init__(self, coef, domain=None, window=None, symbol='x'):
+ [coef] = pu.as_series([coef], trim=False)
+ self.coef = coef
+
+ if domain is not None:
+ [domain] = pu.as_series([domain], trim=False)
+ if len(domain) != 2:
+ raise ValueError("Domain has wrong number of elements.")
+ self.domain = domain
+
+ if window is not None:
+ [window] = pu.as_series([window], trim=False)
+ if len(window) != 2:
+ raise ValueError("Window has wrong number of elements.")
+ self.window = window
+
+ # Validation for symbol
+ try:
+ if not symbol.isidentifier():
+ raise ValueError(
+ "Symbol string must be a valid Python identifier"
+ )
+ # If a user passes in something other than a string, the above
+ # results in an AttributeError. Catch this and raise a more
+ # informative exception
+ except AttributeError:
+ raise TypeError("Symbol must be a non-empty string")
+
+ self._symbol = symbol
+
+ def __repr__(self):
+ coef = repr(self.coef)[6:-1]
+ domain = repr(self.domain)[6:-1]
+ window = repr(self.window)[6:-1]
+ name = self.__class__.__name__
+ return (f"{name}({coef}, domain={domain}, window={window}, "
+ f"symbol='{self.symbol}')")
+
+ def __format__(self, fmt_str):
+ if fmt_str == '':
+ return self.__str__()
+ if fmt_str not in ('ascii', 'unicode'):
+ raise ValueError(
+ f"Unsupported format string '{fmt_str}' passed to "
+ f"{self.__class__}.__format__. Valid options are "
+ f"'ascii' and 'unicode'"
+ )
+ if fmt_str == 'ascii':
+ return self._generate_string(self._str_term_ascii)
+ return self._generate_string(self._str_term_unicode)
+
+ def __str__(self):
+ if self._use_unicode:
+ return self._generate_string(self._str_term_unicode)
+ return self._generate_string(self._str_term_ascii)
+
+ def _generate_string(self, term_method):
+ """
+ Generate the full string representation of the polynomial, using
+ ``term_method`` to generate each polynomial term.
+ """
+ # Get configuration for line breaks
+ linewidth = np.get_printoptions().get('linewidth', 75)
+ if linewidth < 1:
+ linewidth = 1
+ out = pu.format_float(self.coef[0])
+ for i, coef in enumerate(self.coef[1:]):
+ out += " "
+ power = str(i + 1)
+ # Polynomial coefficient
+ # The coefficient array can be an object array with elements that
+ # will raise a TypeError with >= 0 (e.g. strings or Python
+ # complex). In this case, represent the coefficient as-is.
+ try:
+ if coef >= 0:
+ next_term = f"+ " + pu.format_float(coef, parens=True)
+ else:
+ next_term = f"- " + pu.format_float(-coef, parens=True)
+ except TypeError:
+ next_term = f"+ {coef}"
+ # Polynomial term
+ next_term += term_method(power, self.symbol)
+ # Length of the current line with next term added
+ line_len = len(out.split('\n')[-1]) + len(next_term)
+ # If not the last term in the polynomial, it will be two
+ # characters longer due to the +/- with the next term
+ if i < len(self.coef[1:]) - 1:
+ line_len += 2
+ # Handle linebreaking
+ if line_len >= linewidth:
+ next_term = next_term.replace(" ", "\n", 1)
+ out += next_term
+ return out
+
+ @classmethod
+ def _str_term_unicode(cls, i, arg_str):
+ """
+ String representation of single polynomial term using unicode
+ characters for superscripts and subscripts.
+ """
+ if cls.basis_name is None:
+ raise NotImplementedError(
+ "Subclasses must define either a basis_name, or override "
+ "_str_term_unicode(cls, i, arg_str)"
+ )
+ return (f"·{cls.basis_name}{i.translate(cls._subscript_mapping)}"
+ f"({arg_str})")
+
+ @classmethod
+ def _str_term_ascii(cls, i, arg_str):
+ """
+ String representation of a single polynomial term using ** and _ to
+ represent superscripts and subscripts, respectively.
+ """
+ if cls.basis_name is None:
+ raise NotImplementedError(
+ "Subclasses must define either a basis_name, or override "
+ "_str_term_ascii(cls, i, arg_str)"
+ )
+ return f" {cls.basis_name}_{i}({arg_str})"
+
+ @classmethod
+ def _repr_latex_term(cls, i, arg_str, needs_parens):
+ if cls.basis_name is None:
+ raise NotImplementedError(
+ "Subclasses must define either a basis name, or override "
+ "_repr_latex_term(i, arg_str, needs_parens)")
+ # since we always add parens, we don't care if the expression needs them
+ return f"{{{cls.basis_name}}}_{{{i}}}({arg_str})"
+
+ @staticmethod
+ def _repr_latex_scalar(x, parens=False):
+ # TODO: we're stuck with disabling math formatting until we handle
+ # exponents in this function
+ return r'\text{{{}}}'.format(pu.format_float(x, parens=parens))
+
+ def _repr_latex_(self):
+ # get the scaled argument string to the basis functions
+ off, scale = self.mapparms()
+ if off == 0 and scale == 1:
+ term = self.symbol
+ needs_parens = False
+ elif scale == 1:
+ term = f"{self._repr_latex_scalar(off)} + {self.symbol}"
+ needs_parens = True
+ elif off == 0:
+ term = f"{self._repr_latex_scalar(scale)}{self.symbol}"
+ needs_parens = True
+ else:
+ term = (
+ f"{self._repr_latex_scalar(off)} + "
+ f"{self._repr_latex_scalar(scale)}{self.symbol}"
+ )
+ needs_parens = True
+
+ mute = r"\color{{LightGray}}{{{}}}".format
+
+ parts = []
+ for i, c in enumerate(self.coef):
+ # prevent duplication of + and - signs
+ if i == 0:
+ coef_str = f"{self._repr_latex_scalar(c)}"
+ elif not isinstance(c, numbers.Real):
+ coef_str = f" + ({self._repr_latex_scalar(c)})"
+ elif not np.signbit(c):
+ coef_str = f" + {self._repr_latex_scalar(c, parens=True)}"
+ else:
+ coef_str = f" - {self._repr_latex_scalar(-c, parens=True)}"
+
+ # produce the string for the term
+ term_str = self._repr_latex_term(i, term, needs_parens)
+ if term_str == '1':
+ part = coef_str
+ else:
+ part = rf"{coef_str}\,{term_str}"
+
+ if c == 0:
+ part = mute(part)
+
+ parts.append(part)
+
+ if parts:
+ body = ''.join(parts)
+ else:
+ # in case somehow there are no coefficients at all
+ body = '0'
+
+ return rf"${self.symbol} \mapsto {body}$"
+
+
+
+ # Pickle and copy
+
+ def __getstate__(self):
+ ret = self.__dict__.copy()
+ ret['coef'] = self.coef.copy()
+ ret['domain'] = self.domain.copy()
+ ret['window'] = self.window.copy()
+ ret['symbol'] = self.symbol
+ return ret
+
+ def __setstate__(self, dict):
+ self.__dict__ = dict
+
+ # Call
+
+ def __call__(self, arg):
+ off, scl = pu.mapparms(self.domain, self.window)
+ arg = off + scl*arg
+ return self._val(arg, self.coef)
+
+ def __iter__(self):
+ return iter(self.coef)
+
+ def __len__(self):
+ return len(self.coef)
+
+ # Numeric properties.
+
+ def __neg__(self):
+ return self.__class__(
+ -self.coef, self.domain, self.window, self.symbol
+ )
+
+ def __pos__(self):
+ return self
+
+ def __add__(self, other):
+ othercoef = self._get_coefficients(other)
+ try:
+ coef = self._add(self.coef, othercoef)
+ except Exception:
+ return NotImplemented
+ return self.__class__(coef, self.domain, self.window, self.symbol)
+
+ def __sub__(self, other):
+ othercoef = self._get_coefficients(other)
+ try:
+ coef = self._sub(self.coef, othercoef)
+ except Exception:
+ return NotImplemented
+ return self.__class__(coef, self.domain, self.window, self.symbol)
+
+ def __mul__(self, other):
+ othercoef = self._get_coefficients(other)
+ try:
+ coef = self._mul(self.coef, othercoef)
+ except Exception:
+ return NotImplemented
+ return self.__class__(coef, self.domain, self.window, self.symbol)
+
+ def __truediv__(self, other):
+ # there is no true divide if the rhs is not a Number, although it
+ # could return the first n elements of an infinite series.
+ # It is hard to see where n would come from, though.
+ if not isinstance(other, numbers.Number) or isinstance(other, bool):
+ raise TypeError(
+ f"unsupported types for true division: "
+ f"'{type(self)}', '{type(other)}'"
+ )
+ return self.__floordiv__(other)
+
+ def __floordiv__(self, other):
+ res = self.__divmod__(other)
+ if res is NotImplemented:
+ return res
+ return res[0]
+
+ def __mod__(self, other):
+ res = self.__divmod__(other)
+ if res is NotImplemented:
+ return res
+ return res[1]
+
+ def __divmod__(self, other):
+ othercoef = self._get_coefficients(other)
+ try:
+ quo, rem = self._div(self.coef, othercoef)
+ except ZeroDivisionError:
+ raise
+ except Exception:
+ return NotImplemented
+ quo = self.__class__(quo, self.domain, self.window, self.symbol)
+ rem = self.__class__(rem, self.domain, self.window, self.symbol)
+ return quo, rem
+
+ def __pow__(self, other):
+ coef = self._pow(self.coef, other, maxpower=self.maxpower)
+ res = self.__class__(coef, self.domain, self.window, self.symbol)
+ return res
+
+ def __radd__(self, other):
+ try:
+ coef = self._add(other, self.coef)
+ except Exception:
+ return NotImplemented
+ return self.__class__(coef, self.domain, self.window, self.symbol)
+
+ def __rsub__(self, other):
+ try:
+ coef = self._sub(other, self.coef)
+ except Exception:
+ return NotImplemented
+ return self.__class__(coef, self.domain, self.window, self.symbol)
+
+ def __rmul__(self, other):
+ try:
+ coef = self._mul(other, self.coef)
+ except Exception:
+ return NotImplemented
+ return self.__class__(coef, self.domain, self.window, self.symbol)
+
+ def __rdiv__(self, other):
+ # set to __floordiv__ /.
+ return self.__rfloordiv__(other)
+
+ def __rtruediv__(self, other):
+ # An instance of ABCPolyBase is not considered a
+ # Number.
+ return NotImplemented
+
+ def __rfloordiv__(self, other):
+ res = self.__rdivmod__(other)
+ if res is NotImplemented:
+ return res
+ return res[0]
+
+ def __rmod__(self, other):
+ res = self.__rdivmod__(other)
+ if res is NotImplemented:
+ return res
+ return res[1]
+
+ def __rdivmod__(self, other):
+ try:
+ quo, rem = self._div(other, self.coef)
+ except ZeroDivisionError:
+ raise
+ except Exception:
+ return NotImplemented
+ quo = self.__class__(quo, self.domain, self.window, self.symbol)
+ rem = self.__class__(rem, self.domain, self.window, self.symbol)
+ return quo, rem
+
+ def __eq__(self, other):
+ res = (isinstance(other, self.__class__) and
+ np.all(self.domain == other.domain) and
+ np.all(self.window == other.window) and
+ (self.coef.shape == other.coef.shape) and
+ np.all(self.coef == other.coef) and
+ (self.symbol == other.symbol))
+ return res
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ #
+ # Extra methods.
+ #
+
+ def copy(self):
+ """Return a copy.
+
+ Returns
+ -------
+ new_series : series
+ Copy of self.
+
+ """
+ return self.__class__(self.coef, self.domain, self.window, self.symbol)
+
+ def degree(self):
+ """The degree of the series.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ degree : int
+ Degree of the series, one less than the number of coefficients.
+
+ """
+ return len(self) - 1
+
+ def cutdeg(self, deg):
+ """Truncate series to the given degree.
+
+ Reduce the degree of the series to `deg` by discarding the
+ high order terms. If `deg` is greater than the current degree a
+ copy of the current series is returned. This can be useful in least
+ squares where the coefficients of the high degree terms may be very
+ small.
+
+ .. versionadded:: 1.5.0
+
+ Parameters
+ ----------
+ deg : non-negative int
+ The series is reduced to degree `deg` by discarding the high
+ order terms. The value of `deg` must be a non-negative integer.
+
+ Returns
+ -------
+ new_series : series
+ New instance of series with reduced degree.
+
+ """
+ return self.truncate(deg + 1)
+
+ def trim(self, tol=0):
+ """Remove trailing coefficients
+
+ Remove trailing coefficients until a coefficient is reached whose
+ absolute value greater than `tol` or the beginning of the series is
+ reached. If all the coefficients would be removed the series is set
+ to ``[0]``. A new series instance is returned with the new
+ coefficients. The current instance remains unchanged.
+
+ Parameters
+ ----------
+ tol : non-negative number.
+ All trailing coefficients less than `tol` will be removed.
+
+ Returns
+ -------
+ new_series : series
+ New instance of series with trimmed coefficients.
+
+ """
+ coef = pu.trimcoef(self.coef, tol)
+ return self.__class__(coef, self.domain, self.window, self.symbol)
+
+ def truncate(self, size):
+ """Truncate series to length `size`.
+
+ Reduce the series to length `size` by discarding the high
+ degree terms. The value of `size` must be a positive integer. This
+ can be useful in least squares where the coefficients of the
+ high degree terms may be very small.
+
+ Parameters
+ ----------
+ size : positive int
+ The series is reduced to length `size` by discarding the high
+ degree terms. The value of `size` must be a positive integer.
+
+ Returns
+ -------
+ new_series : series
+ New instance of series with truncated coefficients.
+
+ """
+ isize = int(size)
+ if isize != size or isize < 1:
+ raise ValueError("size must be a positive integer")
+ if isize >= len(self.coef):
+ coef = self.coef
+ else:
+ coef = self.coef[:isize]
+ return self.__class__(coef, self.domain, self.window, self.symbol)
+
+ def convert(self, domain=None, kind=None, window=None):
+ """Convert series to a different kind and/or domain and/or window.
+
+ Parameters
+ ----------
+ domain : array_like, optional
+ The domain of the converted series. If the value is None,
+ the default domain of `kind` is used.
+ kind : class, optional
+ The polynomial series type class to which the current instance
+ should be converted. If kind is None, then the class of the
+ current instance is used.
+ window : array_like, optional
+ The window of the converted series. If the value is None,
+ the default window of `kind` is used.
+
+ Returns
+ -------
+ new_series : series
+ The returned class can be of different type than the current
+ instance and/or have a different domain and/or different
+ window.
+
+ Notes
+ -----
+ Conversion between domains and class types can result in
+ numerically ill defined series.
+
+ """
+ if kind is None:
+ kind = self.__class__
+ if domain is None:
+ domain = kind.domain
+ if window is None:
+ window = kind.window
+ return self(kind.identity(domain, window=window, symbol=self.symbol))
+
+ def mapparms(self):
+ """Return the mapping parameters.
+
+ The returned values define a linear map ``off + scl*x`` that is
+ applied to the input arguments before the series is evaluated. The
+ map depends on the ``domain`` and ``window``; if the current
+ ``domain`` is equal to the ``window`` the resulting map is the
+ identity. If the coefficients of the series instance are to be
+ used by themselves outside this class, then the linear function
+ must be substituted for the ``x`` in the standard representation of
+ the base polynomials.
+
+ Returns
+ -------
+ off, scl : float or complex
+ The mapping function is defined by ``off + scl*x``.
+
+ Notes
+ -----
+ If the current domain is the interval ``[l1, r1]`` and the window
+ is ``[l2, r2]``, then the linear mapping function ``L`` is
+ defined by the equations::
+
+ L(l1) = l2
+ L(r1) = r2
+
+ """
+ return pu.mapparms(self.domain, self.window)
+
+ def integ(self, m=1, k=[], lbnd=None):
+ """Integrate.
+
+ Return a series instance that is the definite integral of the
+ current series.
+
+ Parameters
+ ----------
+ m : non-negative int
+ The number of integrations to perform.
+ k : array_like
+ Integration constants. The first constant is applied to the
+ first integration, the second to the second, and so on. The
+ list of values must less than or equal to `m` in length and any
+ missing values are set to zero.
+ lbnd : Scalar
+ The lower bound of the definite integral.
+
+ Returns
+ -------
+ new_series : series
+ A new series representing the integral. The domain is the same
+ as the domain of the integrated series.
+
+ """
+ off, scl = self.mapparms()
+ if lbnd is None:
+ lbnd = 0
+ else:
+ lbnd = off + scl*lbnd
+ coef = self._int(self.coef, m, k, lbnd, 1./scl)
+ return self.__class__(coef, self.domain, self.window, self.symbol)
+
+ def deriv(self, m=1):
+ """Differentiate.
+
+ Return a series instance of that is the derivative of the current
+ series.
+
+ Parameters
+ ----------
+ m : non-negative int
+ Find the derivative of order `m`.
+
+ Returns
+ -------
+ new_series : series
+ A new series representing the derivative. The domain is the same
+ as the domain of the differentiated series.
+
+ """
+ off, scl = self.mapparms()
+ coef = self._der(self.coef, m, scl)
+ return self.__class__(coef, self.domain, self.window, self.symbol)
+
+ def roots(self):
+ """Return the roots of the series polynomial.
+
+ Compute the roots for the series. Note that the accuracy of the
+ roots decrease the further outside the domain they lie.
+
+ Returns
+ -------
+ roots : ndarray
+ Array containing the roots of the series.
+
+ """
+ roots = self._roots(self.coef)
+ return pu.mapdomain(roots, self.window, self.domain)
+
+ def linspace(self, n=100, domain=None):
+ """Return x, y values at equally spaced points in domain.
+
+ Returns the x, y values at `n` linearly spaced points across the
+ domain. Here y is the value of the polynomial at the points x. By
+ default the domain is the same as that of the series instance.
+ This method is intended mostly as a plotting aid.
+
+ .. versionadded:: 1.5.0
+
+ Parameters
+ ----------
+ n : int, optional
+ Number of point pairs to return. The default value is 100.
+ domain : {None, array_like}, optional
+ If not None, the specified domain is used instead of that of
+ the calling instance. It should be of the form ``[beg,end]``.
+ The default is None which case the class domain is used.
+
+ Returns
+ -------
+ x, y : ndarray
+ x is equal to linspace(self.domain[0], self.domain[1], n) and
+ y is the series evaluated at element of x.
+
+ """
+ if domain is None:
+ domain = self.domain
+ x = np.linspace(domain[0], domain[1], n)
+ y = self(x)
+ return x, y
+
+ @classmethod
+ def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None,
+ window=None, symbol='x'):
+ """Least squares fit to data.
+
+ Return a series instance that is the least squares fit to the data
+ `y` sampled at `x`. The domain of the returned instance can be
+ specified and this will often result in a superior fit with less
+ chance of ill conditioning.
+
+ Parameters
+ ----------
+ x : array_like, shape (M,)
+ x-coordinates of the M sample points ``(x[i], y[i])``.
+ y : array_like, shape (M,)
+ y-coordinates of the M sample points ``(x[i], y[i])``.
+ deg : int or 1-D array_like
+ Degree(s) of the fitting polynomials. If `deg` is a single integer
+ all terms up to and including the `deg`'th term are included in the
+ fit. For NumPy versions >= 1.11.0 a list of integers specifying the
+ degrees of the terms to include may be used instead.
+ domain : {None, [beg, end], []}, optional
+ Domain to use for the returned series. If ``None``,
+ then a minimal domain that covers the points `x` is chosen. If
+ ``[]`` the class domain is used. The default value was the
+ class domain in NumPy 1.4 and ``None`` in later versions.
+ The ``[]`` option was added in numpy 1.5.0.
+ rcond : float, optional
+ Relative condition number of the fit. Singular values smaller
+ than this relative to the largest singular value will be
+ ignored. The default value is len(x)*eps, where eps is the
+ relative precision of the float type, about 2e-16 in most
+ cases.
+ full : bool, optional
+ Switch determining nature of return value. When it is False
+ (the default) just the coefficients are returned, when True
+ diagnostic information from the singular value decomposition is
+ also returned.
+ w : array_like, shape (M,), optional
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have
+ the same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
+
+ .. versionadded:: 1.5.0
+ window : {[beg, end]}, optional
+ Window to use for the returned series. The default
+ value is the default class domain
+
+ .. versionadded:: 1.6.0
+ symbol : str, optional
+ Symbol representing the independent variable. Default is 'x'.
+
+ Returns
+ -------
+ new_series : series
+ A series that represents the least squares fit to the data and
+ has the domain and window specified in the call. If the
+ coefficients for the unscaled and unshifted basis polynomials are
+ of interest, do ``new_series.convert().coef``.
+
+ [resid, rank, sv, rcond] : list
+ These values are only returned if ``full == True``
+
+ - resid -- sum of squared residuals of the least squares fit
+ - rank -- the numerical rank of the scaled Vandermonde matrix
+ - sv -- singular values of the scaled Vandermonde matrix
+ - rcond -- value of `rcond`.
+
+ For more details, see `linalg.lstsq`.
+
+ """
+ if domain is None:
+ domain = pu.getdomain(x)
+ elif type(domain) is list and len(domain) == 0:
+ domain = cls.domain
+
+ if window is None:
+ window = cls.window
+
+ xnew = pu.mapdomain(x, domain, window)
+ res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full)
+ if full:
+ [coef, status] = res
+ return (
+ cls(coef, domain=domain, window=window, symbol=symbol), status
+ )
+ else:
+ coef = res
+ return cls(coef, domain=domain, window=window, symbol=symbol)
+
+ @classmethod
+ def fromroots(cls, roots, domain=[], window=None, symbol='x'):
+ """Return series instance that has the specified roots.
+
+ Returns a series representing the product
+ ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a
+ list of roots.
+
+ Parameters
+ ----------
+ roots : array_like
+ List of roots.
+ domain : {[], None, array_like}, optional
+ Domain for the resulting series. If None the domain is the
+ interval from the smallest root to the largest. If [] the
+ domain is the class domain. The default is [].
+ window : {None, array_like}, optional
+ Window for the returned series. If None the class window is
+ used. The default is None.
+ symbol : str, optional
+ Symbol representing the independent variable. Default is 'x'.
+
+ Returns
+ -------
+ new_series : series
+ Series with the specified roots.
+
+ """
+ [roots] = pu.as_series([roots], trim=False)
+ if domain is None:
+ domain = pu.getdomain(roots)
+ elif type(domain) is list and len(domain) == 0:
+ domain = cls.domain
+
+ if window is None:
+ window = cls.window
+
+ deg = len(roots)
+ off, scl = pu.mapparms(domain, window)
+ rnew = off + scl*roots
+ coef = cls._fromroots(rnew) / scl**deg
+ return cls(coef, domain=domain, window=window, symbol=symbol)
+
+ @classmethod
+ def identity(cls, domain=None, window=None, symbol='x'):
+ """Identity function.
+
+ If ``p`` is the returned series, then ``p(x) == x`` for all
+ values of x.
+
+ Parameters
+ ----------
+ domain : {None, array_like}, optional
+ If given, the array must be of the form ``[beg, end]``, where
+ ``beg`` and ``end`` are the endpoints of the domain. If None is
+ given then the class domain is used. The default is None.
+ window : {None, array_like}, optional
+ If given, the resulting array must be if the form
+ ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
+ the window. If None is given then the class window is used. The
+ default is None.
+ symbol : str, optional
+ Symbol representing the independent variable. Default is 'x'.
+
+ Returns
+ -------
+ new_series : series
+ Series of representing the identity.
+
+ """
+ if domain is None:
+ domain = cls.domain
+ if window is None:
+ window = cls.window
+ off, scl = pu.mapparms(window, domain)
+ coef = cls._line(off, scl)
+ return cls(coef, domain, window, symbol)
+
+ @classmethod
+ def basis(cls, deg, domain=None, window=None, symbol='x'):
+ """Series basis polynomial of degree `deg`.
+
+ Returns the series representing the basis polynomial of degree `deg`.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ deg : int
+ Degree of the basis polynomial for the series. Must be >= 0.
+ domain : {None, array_like}, optional
+ If given, the array must be of the form ``[beg, end]``, where
+ ``beg`` and ``end`` are the endpoints of the domain. If None is
+ given then the class domain is used. The default is None.
+ window : {None, array_like}, optional
+ If given, the resulting array must be if the form
+ ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
+ the window. If None is given then the class window is used. The
+ default is None.
+ symbol : str, optional
+ Symbol representing the independent variable. Default is 'x'.
+
+ Returns
+ -------
+ new_series : series
+ A series with the coefficient of the `deg` term set to one and
+ all others zero.
+
+ """
+ if domain is None:
+ domain = cls.domain
+ if window is None:
+ window = cls.window
+ ideg = int(deg)
+
+ if ideg != deg or ideg < 0:
+ raise ValueError("deg must be non-negative integer")
+ return cls([0]*ideg + [1], domain, window, symbol)
+
+ @classmethod
+ def cast(cls, series, domain=None, window=None):
+ """Convert series to series of this class.
+
+ The `series` is expected to be an instance of some polynomial
+ series of one of the types supported by by the numpy.polynomial
+ module, but could be some other class that supports the convert
+ method.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ series : series
+ The series instance to be converted.
+ domain : {None, array_like}, optional
+ If given, the array must be of the form ``[beg, end]``, where
+ ``beg`` and ``end`` are the endpoints of the domain. If None is
+ given then the class domain is used. The default is None.
+ window : {None, array_like}, optional
+ If given, the resulting array must be if the form
+ ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
+ the window. If None is given then the class window is used. The
+ default is None.
+
+ Returns
+ -------
+ new_series : series
+ A series of the same kind as the calling class and equal to
+ `series` when evaluated.
+
+ See Also
+ --------
+ convert : similar instance method
+
+ """
+ if domain is None:
+ domain = cls.domain
+ if window is None:
+ window = cls.window
+ return series.convert(domain, cls, window)
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/_polybase.pyi b/venv/lib/python3.9/site-packages/numpy/polynomial/_polybase.pyi
new file mode 100644
index 00000000..25c740db
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/_polybase.pyi
@@ -0,0 +1,71 @@
+import abc
+from typing import Any, ClassVar
+
+__all__: list[str]
+
+class ABCPolyBase(abc.ABC):
+ __hash__: ClassVar[None] # type: ignore[assignment]
+ __array_ufunc__: ClassVar[None]
+ maxpower: ClassVar[int]
+ coef: Any
+ @property
+ def symbol(self) -> str: ...
+ @property
+ @abc.abstractmethod
+ def domain(self): ...
+ @property
+ @abc.abstractmethod
+ def window(self): ...
+ @property
+ @abc.abstractmethod
+ def basis_name(self): ...
+ def has_samecoef(self, other): ...
+ def has_samedomain(self, other): ...
+ def has_samewindow(self, other): ...
+ def has_sametype(self, other): ...
+ def __init__(self, coef, domain=..., window=..., symbol: str = ...) -> None: ...
+ def __format__(self, fmt_str): ...
+ def __call__(self, arg): ...
+ def __iter__(self): ...
+ def __len__(self): ...
+ def __neg__(self): ...
+ def __pos__(self): ...
+ def __add__(self, other): ...
+ def __sub__(self, other): ...
+ def __mul__(self, other): ...
+ def __truediv__(self, other): ...
+ def __floordiv__(self, other): ...
+ def __mod__(self, other): ...
+ def __divmod__(self, other): ...
+ def __pow__(self, other): ...
+ def __radd__(self, other): ...
+ def __rsub__(self, other): ...
+ def __rmul__(self, other): ...
+ def __rdiv__(self, other): ...
+ def __rtruediv__(self, other): ...
+ def __rfloordiv__(self, other): ...
+ def __rmod__(self, other): ...
+ def __rdivmod__(self, other): ...
+ def __eq__(self, other): ...
+ def __ne__(self, other): ...
+ def copy(self): ...
+ def degree(self): ...
+ def cutdeg(self, deg): ...
+ def trim(self, tol=...): ...
+ def truncate(self, size): ...
+ def convert(self, domain=..., kind=..., window=...): ...
+ def mapparms(self): ...
+ def integ(self, m=..., k = ..., lbnd=...): ...
+ def deriv(self, m=...): ...
+ def roots(self): ...
+ def linspace(self, n=..., domain=...): ...
+ @classmethod
+ def fit(cls, x, y, deg, domain=..., rcond=..., full=..., w=..., window=...): ...
+ @classmethod
+ def fromroots(cls, roots, domain = ..., window=...): ...
+ @classmethod
+ def identity(cls, domain=..., window=...): ...
+ @classmethod
+ def basis(cls, deg, domain=..., window=...): ...
+ @classmethod
+ def cast(cls, series, domain=..., window=...): ...
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/chebyshev.py b/venv/lib/python3.9/site-packages/numpy/polynomial/chebyshev.py
new file mode 100644
index 00000000..c663ffab
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/chebyshev.py
@@ -0,0 +1,2076 @@
+"""
+====================================================
+Chebyshev Series (:mod:`numpy.polynomial.chebyshev`)
+====================================================
+
+This module provides a number of objects (mostly functions) useful for
+dealing with Chebyshev series, including a `Chebyshev` class that
+encapsulates the usual arithmetic operations. (General information
+on how this module represents and works with such polynomials is in the
+docstring for its "parent" sub-package, `numpy.polynomial`).
+
+Classes
+-------
+
+.. autosummary::
+ :toctree: generated/
+
+ Chebyshev
+
+
+Constants
+---------
+
+.. autosummary::
+ :toctree: generated/
+
+ chebdomain
+ chebzero
+ chebone
+ chebx
+
+Arithmetic
+----------
+
+.. autosummary::
+ :toctree: generated/
+
+ chebadd
+ chebsub
+ chebmulx
+ chebmul
+ chebdiv
+ chebpow
+ chebval
+ chebval2d
+ chebval3d
+ chebgrid2d
+ chebgrid3d
+
+Calculus
+--------
+
+.. autosummary::
+ :toctree: generated/
+
+ chebder
+ chebint
+
+Misc Functions
+--------------
+
+.. autosummary::
+ :toctree: generated/
+
+ chebfromroots
+ chebroots
+ chebvander
+ chebvander2d
+ chebvander3d
+ chebgauss
+ chebweight
+ chebcompanion
+ chebfit
+ chebpts1
+ chebpts2
+ chebtrim
+ chebline
+ cheb2poly
+ poly2cheb
+ chebinterpolate
+
+See also
+--------
+`numpy.polynomial`
+
+Notes
+-----
+The implementations of multiplication, division, integration, and
+differentiation use the algebraic identities [1]_:
+
+.. math::
+ T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
+ z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
+
+where
+
+.. math:: x = \\frac{z + z^{-1}}{2}.
+
+These identities allow a Chebyshev series to be expressed as a finite,
+symmetric Laurent series. In this module, this sort of Laurent series
+is referred to as a "z-series."
+
+References
+----------
+.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
+ Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
+ (https://web.archive.org/web/20080221202153/https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
+
+"""
+import numpy as np
+import numpy.linalg as la
+from numpy.core.multiarray import normalize_axis_index
+
+from . import polyutils as pu
+from ._polybase import ABCPolyBase
+
+__all__ = [
+ 'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd',
+ 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval',
+ 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots',
+ 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1',
+ 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d',
+ 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion',
+ 'chebgauss', 'chebweight', 'chebinterpolate']
+
+chebtrim = pu.trimcoef
+
+#
+# A collection of functions for manipulating z-series. These are private
+# functions and do minimal error checking.
+#
+
+def _cseries_to_zseries(c):
+ """Convert Chebyshev series to z-series.
+
+ Convert a Chebyshev series to the equivalent z-series. The result is
+ never an empty array. The dtype of the return is the same as that of
+ the input. No checks are run on the arguments as this routine is for
+ internal use.
+
+ Parameters
+ ----------
+ c : 1-D ndarray
+ Chebyshev coefficients, ordered from low to high
+
+ Returns
+ -------
+ zs : 1-D ndarray
+ Odd length symmetric z-series, ordered from low to high.
+
+ """
+ n = c.size
+ zs = np.zeros(2*n-1, dtype=c.dtype)
+ zs[n-1:] = c/2
+ return zs + zs[::-1]
+
+
+def _zseries_to_cseries(zs):
+ """Convert z-series to a Chebyshev series.
+
+ Convert a z series to the equivalent Chebyshev series. The result is
+ never an empty array. The dtype of the return is the same as that of
+ the input. No checks are run on the arguments as this routine is for
+ internal use.
+
+ Parameters
+ ----------
+ zs : 1-D ndarray
+ Odd length symmetric z-series, ordered from low to high.
+
+ Returns
+ -------
+ c : 1-D ndarray
+ Chebyshev coefficients, ordered from low to high.
+
+ """
+ n = (zs.size + 1)//2
+ c = zs[n-1:].copy()
+ c[1:n] *= 2
+ return c
+
+
+def _zseries_mul(z1, z2):
+ """Multiply two z-series.
+
+ Multiply two z-series to produce a z-series.
+
+ Parameters
+ ----------
+ z1, z2 : 1-D ndarray
+ The arrays must be 1-D but this is not checked.
+
+ Returns
+ -------
+ product : 1-D ndarray
+ The product z-series.
+
+ Notes
+ -----
+ This is simply convolution. If symmetric/anti-symmetric z-series are
+ denoted by S/A then the following rules apply:
+
+ S*S, A*A -> S
+ S*A, A*S -> A
+
+ """
+ return np.convolve(z1, z2)
+
+
+def _zseries_div(z1, z2):
+ """Divide the first z-series by the second.
+
+ Divide `z1` by `z2` and return the quotient and remainder as z-series.
+ Warning: this implementation only applies when both z1 and z2 have the
+ same symmetry, which is sufficient for present purposes.
+
+ Parameters
+ ----------
+ z1, z2 : 1-D ndarray
+ The arrays must be 1-D and have the same symmetry, but this is not
+ checked.
+
+ Returns
+ -------
+
+ (quotient, remainder) : 1-D ndarrays
+ Quotient and remainder as z-series.
+
+ Notes
+ -----
+ This is not the same as polynomial division on account of the desired form
+ of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A
+ then the following rules apply:
+
+ S/S -> S,S
+ A/A -> S,A
+
+ The restriction to types of the same symmetry could be fixed but seems like
+ unneeded generality. There is no natural form for the remainder in the case
+ where there is no symmetry.
+
+ """
+ z1 = z1.copy()
+ z2 = z2.copy()
+ lc1 = len(z1)
+ lc2 = len(z2)
+ if lc2 == 1:
+ z1 /= z2
+ return z1, z1[:1]*0
+ elif lc1 < lc2:
+ return z1[:1]*0, z1
+ else:
+ dlen = lc1 - lc2
+ scl = z2[0]
+ z2 /= scl
+ quo = np.empty(dlen + 1, dtype=z1.dtype)
+ i = 0
+ j = dlen
+ while i < j:
+ r = z1[i]
+ quo[i] = z1[i]
+ quo[dlen - i] = r
+ tmp = r*z2
+ z1[i:i+lc2] -= tmp
+ z1[j:j+lc2] -= tmp
+ i += 1
+ j -= 1
+ r = z1[i]
+ quo[i] = r
+ tmp = r*z2
+ z1[i:i+lc2] -= tmp
+ quo /= scl
+ rem = z1[i+1:i-1+lc2].copy()
+ return quo, rem
+
+
+def _zseries_der(zs):
+ """Differentiate a z-series.
+
+ The derivative is with respect to x, not z. This is achieved using the
+ chain rule and the value of dx/dz given in the module notes.
+
+ Parameters
+ ----------
+ zs : z-series
+ The z-series to differentiate.
+
+ Returns
+ -------
+ derivative : z-series
+ The derivative
+
+ Notes
+ -----
+ The zseries for x (ns) has been multiplied by two in order to avoid
+ using floats that are incompatible with Decimal and likely other
+ specialized scalar types. This scaling has been compensated by
+ multiplying the value of zs by two also so that the two cancels in the
+ division.
+
+ """
+ n = len(zs)//2
+ ns = np.array([-1, 0, 1], dtype=zs.dtype)
+ zs *= np.arange(-n, n+1)*2
+ d, r = _zseries_div(zs, ns)
+ return d
+
+
+def _zseries_int(zs):
+ """Integrate a z-series.
+
+ The integral is with respect to x, not z. This is achieved by a change
+ of variable using dx/dz given in the module notes.
+
+ Parameters
+ ----------
+ zs : z-series
+ The z-series to integrate
+
+ Returns
+ -------
+ integral : z-series
+ The indefinite integral
+
+ Notes
+ -----
+ The zseries for x (ns) has been multiplied by two in order to avoid
+ using floats that are incompatible with Decimal and likely other
+ specialized scalar types. This scaling has been compensated by
+ dividing the resulting zs by two.
+
+ """
+ n = 1 + len(zs)//2
+ ns = np.array([-1, 0, 1], dtype=zs.dtype)
+ zs = _zseries_mul(zs, ns)
+ div = np.arange(-n, n+1)*2
+ zs[:n] /= div[:n]
+ zs[n+1:] /= div[n+1:]
+ zs[n] = 0
+ return zs
+
+#
+# Chebyshev series functions
+#
+
+
+def poly2cheb(pol):
+ """
+ Convert a polynomial to a Chebyshev series.
+
+ Convert an array representing the coefficients of a polynomial (relative
+ to the "standard" basis) ordered from lowest degree to highest, to an
+ array of the coefficients of the equivalent Chebyshev series, ordered
+ from lowest to highest degree.
+
+ Parameters
+ ----------
+ pol : array_like
+ 1-D array containing the polynomial coefficients
+
+ Returns
+ -------
+ c : ndarray
+ 1-D array containing the coefficients of the equivalent Chebyshev
+ series.
+
+ See Also
+ --------
+ cheb2poly
+
+ Notes
+ -----
+ The easy way to do conversions between polynomial basis sets
+ is to use the convert method of a class instance.
+
+ Examples
+ --------
+ >>> from numpy import polynomial as P
+ >>> p = P.Polynomial(range(4))
+ >>> p
+ Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
+ >>> c = p.convert(kind=P.Chebyshev)
+ >>> c
+ Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., 1.])
+ >>> P.chebyshev.poly2cheb(range(4))
+ array([1. , 3.25, 1. , 0.75])
+
+ """
+ [pol] = pu.as_series([pol])
+ deg = len(pol) - 1
+ res = 0
+ for i in range(deg, -1, -1):
+ res = chebadd(chebmulx(res), pol[i])
+ return res
+
+
+def cheb2poly(c):
+ """
+ Convert a Chebyshev series to a polynomial.
+
+ Convert an array representing the coefficients of a Chebyshev series,
+ ordered from lowest degree to highest, to an array of the coefficients
+ of the equivalent polynomial (relative to the "standard" basis) ordered
+ from lowest to highest degree.
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array containing the Chebyshev series coefficients, ordered
+ from lowest order term to highest.
+
+ Returns
+ -------
+ pol : ndarray
+ 1-D array containing the coefficients of the equivalent polynomial
+ (relative to the "standard" basis) ordered from lowest order term
+ to highest.
+
+ See Also
+ --------
+ poly2cheb
+
+ Notes
+ -----
+ The easy way to do conversions between polynomial basis sets
+ is to use the convert method of a class instance.
+
+ Examples
+ --------
+ >>> from numpy import polynomial as P
+ >>> c = P.Chebyshev(range(4))
+ >>> c
+ Chebyshev([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
+ >>> p = c.convert(kind=P.Polynomial)
+ >>> p
+ Polynomial([-2., -8., 4., 12.], domain=[-1., 1.], window=[-1., 1.])
+ >>> P.chebyshev.cheb2poly(range(4))
+ array([-2., -8., 4., 12.])
+
+ """
+ from .polynomial import polyadd, polysub, polymulx
+
+ [c] = pu.as_series([c])
+ n = len(c)
+ if n < 3:
+ return c
+ else:
+ c0 = c[-2]
+ c1 = c[-1]
+ # i is the current degree of c1
+ for i in range(n - 1, 1, -1):
+ tmp = c0
+ c0 = polysub(c[i - 2], c1)
+ c1 = polyadd(tmp, polymulx(c1)*2)
+ return polyadd(c0, polymulx(c1))
+
+
+#
+# These are constant arrays are of integer type so as to be compatible
+# with the widest range of other types, such as Decimal.
+#
+
+# Chebyshev default domain.
+chebdomain = np.array([-1, 1])
+
+# Chebyshev coefficients representing zero.
+chebzero = np.array([0])
+
+# Chebyshev coefficients representing one.
+chebone = np.array([1])
+
+# Chebyshev coefficients representing the identity x.
+chebx = np.array([0, 1])
+
+
+def chebline(off, scl):
+ """
+ Chebyshev series whose graph is a straight line.
+
+ Parameters
+ ----------
+ off, scl : scalars
+ The specified line is given by ``off + scl*x``.
+
+ Returns
+ -------
+ y : ndarray
+ This module's representation of the Chebyshev series for
+ ``off + scl*x``.
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyline
+ numpy.polynomial.legendre.legline
+ numpy.polynomial.laguerre.lagline
+ numpy.polynomial.hermite.hermline
+ numpy.polynomial.hermite_e.hermeline
+
+ Examples
+ --------
+ >>> import numpy.polynomial.chebyshev as C
+ >>> C.chebline(3,2)
+ array([3, 2])
+ >>> C.chebval(-3, C.chebline(3,2)) # should be -3
+ -3.0
+
+ """
+ if scl != 0:
+ return np.array([off, scl])
+ else:
+ return np.array([off])
+
+
+def chebfromroots(roots):
+ """
+ Generate a Chebyshev series with given roots.
+
+ The function returns the coefficients of the polynomial
+
+ .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
+
+ in Chebyshev form, where the `r_n` are the roots specified in `roots`.
+ If a zero has multiplicity n, then it must appear in `roots` n times.
+ For instance, if 2 is a root of multiplicity three and 3 is a root of
+ multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
+ roots can appear in any order.
+
+ If the returned coefficients are `c`, then
+
+ .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x)
+
+ The coefficient of the last term is not generally 1 for monic
+ polynomials in Chebyshev form.
+
+ Parameters
+ ----------
+ roots : array_like
+ Sequence containing the roots.
+
+ Returns
+ -------
+ out : ndarray
+ 1-D array of coefficients. If all roots are real then `out` is a
+ real array, if some of the roots are complex, then `out` is complex
+ even if all the coefficients in the result are real (see Examples
+ below).
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyfromroots
+ numpy.polynomial.legendre.legfromroots
+ numpy.polynomial.laguerre.lagfromroots
+ numpy.polynomial.hermite.hermfromroots
+ numpy.polynomial.hermite_e.hermefromroots
+
+ Examples
+ --------
+ >>> import numpy.polynomial.chebyshev as C
+ >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
+ array([ 0. , -0.25, 0. , 0.25])
+ >>> j = complex(0,1)
+ >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
+ array([1.5+0.j, 0. +0.j, 0.5+0.j])
+
+ """
+ return pu._fromroots(chebline, chebmul, roots)
+
+
+def chebadd(c1, c2):
+ """
+ Add one Chebyshev series to another.
+
+ Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
+ are sequences of coefficients ordered from lowest order term to
+ highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Chebyshev series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Array representing the Chebyshev series of their sum.
+
+ See Also
+ --------
+ chebsub, chebmulx, chebmul, chebdiv, chebpow
+
+ Notes
+ -----
+ Unlike multiplication, division, etc., the sum of two Chebyshev series
+ is a Chebyshev series (without having to "reproject" the result onto
+ the basis set) so addition, just like that of "standard" polynomials,
+ is simply "component-wise."
+
+ Examples
+ --------
+ >>> from numpy.polynomial import chebyshev as C
+ >>> c1 = (1,2,3)
+ >>> c2 = (3,2,1)
+ >>> C.chebadd(c1,c2)
+ array([4., 4., 4.])
+
+ """
+ return pu._add(c1, c2)
+
+
+def chebsub(c1, c2):
+ """
+ Subtract one Chebyshev series from another.
+
+ Returns the difference of two Chebyshev series `c1` - `c2`. The
+ sequences of coefficients are from lowest order term to highest, i.e.,
+ [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Chebyshev series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Of Chebyshev series coefficients representing their difference.
+
+ See Also
+ --------
+ chebadd, chebmulx, chebmul, chebdiv, chebpow
+
+ Notes
+ -----
+ Unlike multiplication, division, etc., the difference of two Chebyshev
+ series is a Chebyshev series (without having to "reproject" the result
+ onto the basis set) so subtraction, just like that of "standard"
+ polynomials, is simply "component-wise."
+
+ Examples
+ --------
+ >>> from numpy.polynomial import chebyshev as C
+ >>> c1 = (1,2,3)
+ >>> c2 = (3,2,1)
+ >>> C.chebsub(c1,c2)
+ array([-2., 0., 2.])
+ >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
+ array([ 2., 0., -2.])
+
+ """
+ return pu._sub(c1, c2)
+
+
+def chebmulx(c):
+ """Multiply a Chebyshev series by x.
+
+ Multiply the polynomial `c` by x, where x is the independent
+ variable.
+
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Chebyshev series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Array representing the result of the multiplication.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.5.0
+
+ Examples
+ --------
+ >>> from numpy.polynomial import chebyshev as C
+ >>> C.chebmulx([1,2,3])
+ array([1. , 2.5, 1. , 1.5])
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ # The zero series needs special treatment
+ if len(c) == 1 and c[0] == 0:
+ return c
+
+ prd = np.empty(len(c) + 1, dtype=c.dtype)
+ prd[0] = c[0]*0
+ prd[1] = c[0]
+ if len(c) > 1:
+ tmp = c[1:]/2
+ prd[2:] = tmp
+ prd[0:-2] += tmp
+ return prd
+
+
+def chebmul(c1, c2):
+ """
+ Multiply one Chebyshev series by another.
+
+ Returns the product of two Chebyshev series `c1` * `c2`. The arguments
+ are sequences of coefficients, from lowest order "term" to highest,
+ e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Chebyshev series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Of Chebyshev series coefficients representing their product.
+
+ See Also
+ --------
+ chebadd, chebsub, chebmulx, chebdiv, chebpow
+
+ Notes
+ -----
+ In general, the (polynomial) product of two C-series results in terms
+ that are not in the Chebyshev polynomial basis set. Thus, to express
+ the product as a C-series, it is typically necessary to "reproject"
+ the product onto said basis set, which typically produces
+ "unintuitive live" (but correct) results; see Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial import chebyshev as C
+ >>> c1 = (1,2,3)
+ >>> c2 = (3,2,1)
+ >>> C.chebmul(c1,c2) # multiplication requires "reprojection"
+ array([ 6.5, 12. , 12. , 4. , 1.5])
+
+ """
+ # c1, c2 are trimmed copies
+ [c1, c2] = pu.as_series([c1, c2])
+ z1 = _cseries_to_zseries(c1)
+ z2 = _cseries_to_zseries(c2)
+ prd = _zseries_mul(z1, z2)
+ ret = _zseries_to_cseries(prd)
+ return pu.trimseq(ret)
+
+
+def chebdiv(c1, c2):
+ """
+ Divide one Chebyshev series by another.
+
+ Returns the quotient-with-remainder of two Chebyshev series
+ `c1` / `c2`. The arguments are sequences of coefficients from lowest
+ order "term" to highest, e.g., [1,2,3] represents the series
+ ``T_0 + 2*T_1 + 3*T_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Chebyshev series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ [quo, rem] : ndarrays
+ Of Chebyshev series coefficients representing the quotient and
+ remainder.
+
+ See Also
+ --------
+ chebadd, chebsub, chebmulx, chebmul, chebpow
+
+ Notes
+ -----
+ In general, the (polynomial) division of one C-series by another
+ results in quotient and remainder terms that are not in the Chebyshev
+ polynomial basis set. Thus, to express these results as C-series, it
+ is typically necessary to "reproject" the results onto said basis
+ set, which typically produces "unintuitive" (but correct) results;
+ see Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial import chebyshev as C
+ >>> c1 = (1,2,3)
+ >>> c2 = (3,2,1)
+ >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
+ (array([3.]), array([-8., -4.]))
+ >>> c2 = (0,1,2,3)
+ >>> C.chebdiv(c2,c1) # neither "intuitive"
+ (array([0., 2.]), array([-2., -4.]))
+
+ """
+ # c1, c2 are trimmed copies
+ [c1, c2] = pu.as_series([c1, c2])
+ if c2[-1] == 0:
+ raise ZeroDivisionError()
+
+ # note: this is more efficient than `pu._div(chebmul, c1, c2)`
+ lc1 = len(c1)
+ lc2 = len(c2)
+ if lc1 < lc2:
+ return c1[:1]*0, c1
+ elif lc2 == 1:
+ return c1/c2[-1], c1[:1]*0
+ else:
+ z1 = _cseries_to_zseries(c1)
+ z2 = _cseries_to_zseries(c2)
+ quo, rem = _zseries_div(z1, z2)
+ quo = pu.trimseq(_zseries_to_cseries(quo))
+ rem = pu.trimseq(_zseries_to_cseries(rem))
+ return quo, rem
+
+
+def chebpow(c, pow, maxpower=16):
+ """Raise a Chebyshev series to a power.
+
+ Returns the Chebyshev series `c` raised to the power `pow`. The
+ argument `c` is a sequence of coefficients ordered from low to high.
+ i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Chebyshev series coefficients ordered from low to
+ high.
+ pow : integer
+ Power to which the series will be raised
+ maxpower : integer, optional
+ Maximum power allowed. This is mainly to limit growth of the series
+ to unmanageable size. Default is 16
+
+ Returns
+ -------
+ coef : ndarray
+ Chebyshev series of power.
+
+ See Also
+ --------
+ chebadd, chebsub, chebmulx, chebmul, chebdiv
+
+ Examples
+ --------
+ >>> from numpy.polynomial import chebyshev as C
+ >>> C.chebpow([1, 2, 3, 4], 2)
+ array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ])
+
+ """
+ # note: this is more efficient than `pu._pow(chebmul, c1, c2)`, as it
+ # avoids converting between z and c series repeatedly
+
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ power = int(pow)
+ if power != pow or power < 0:
+ raise ValueError("Power must be a non-negative integer.")
+ elif maxpower is not None and power > maxpower:
+ raise ValueError("Power is too large")
+ elif power == 0:
+ return np.array([1], dtype=c.dtype)
+ elif power == 1:
+ return c
+ else:
+ # This can be made more efficient by using powers of two
+ # in the usual way.
+ zs = _cseries_to_zseries(c)
+ prd = zs
+ for i in range(2, power + 1):
+ prd = np.convolve(prd, zs)
+ return _zseries_to_cseries(prd)
+
+
+def chebder(c, m=1, scl=1, axis=0):
+ """
+ Differentiate a Chebyshev series.
+
+ Returns the Chebyshev series coefficients `c` differentiated `m` times
+ along `axis`. At each iteration the result is multiplied by `scl` (the
+ scaling factor is for use in a linear change of variable). The argument
+ `c` is an array of coefficients from low to high degree along each
+ axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2``
+ while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) +
+ 2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is
+ ``y``.
+
+ Parameters
+ ----------
+ c : array_like
+ Array of Chebyshev series coefficients. If c is multidimensional
+ the different axis correspond to different variables with the
+ degree in each axis given by the corresponding index.
+ m : int, optional
+ Number of derivatives taken, must be non-negative. (Default: 1)
+ scl : scalar, optional
+ Each differentiation is multiplied by `scl`. The end result is
+ multiplication by ``scl**m``. This is for use in a linear change of
+ variable. (Default: 1)
+ axis : int, optional
+ Axis over which the derivative is taken. (Default: 0).
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ der : ndarray
+ Chebyshev series of the derivative.
+
+ See Also
+ --------
+ chebint
+
+ Notes
+ -----
+ In general, the result of differentiating a C-series needs to be
+ "reprojected" onto the C-series basis set. Thus, typically, the
+ result of this function is "unintuitive," albeit correct; see Examples
+ section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial import chebyshev as C
+ >>> c = (1,2,3,4)
+ >>> C.chebder(c)
+ array([14., 12., 24.])
+ >>> C.chebder(c,3)
+ array([96.])
+ >>> C.chebder(c,scl=-1)
+ array([-14., -12., -24.])
+ >>> C.chebder(c,2,-1)
+ array([12., 96.])
+
+ """
+ c = np.array(c, ndmin=1, copy=True)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ cnt = pu._deprecate_as_int(m, "the order of derivation")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
+ if cnt < 0:
+ raise ValueError("The order of derivation must be non-negative")
+ iaxis = normalize_axis_index(iaxis, c.ndim)
+
+ if cnt == 0:
+ return c
+
+ c = np.moveaxis(c, iaxis, 0)
+ n = len(c)
+ if cnt >= n:
+ c = c[:1]*0
+ else:
+ for i in range(cnt):
+ n = n - 1
+ c *= scl
+ der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
+ for j in range(n, 2, -1):
+ der[j - 1] = (2*j)*c[j]
+ c[j - 2] += (j*c[j])/(j - 2)
+ if n > 1:
+ der[1] = 4*c[2]
+ der[0] = c[1]
+ c = der
+ c = np.moveaxis(c, 0, iaxis)
+ return c
+
+
+def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
+ """
+ Integrate a Chebyshev series.
+
+ Returns the Chebyshev series coefficients `c` integrated `m` times from
+ `lbnd` along `axis`. At each iteration the resulting series is
+ **multiplied** by `scl` and an integration constant, `k`, is added.
+ The scaling factor is for use in a linear change of variable. ("Buyer
+ beware": note that, depending on what one is doing, one may want `scl`
+ to be the reciprocal of what one might expect; for more information,
+ see the Notes section below.) The argument `c` is an array of
+ coefficients from low to high degree along each axis, e.g., [1,2,3]
+ represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]]
+ represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) +
+ 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
+
+ Parameters
+ ----------
+ c : array_like
+ Array of Chebyshev series coefficients. If c is multidimensional
+ the different axis correspond to different variables with the
+ degree in each axis given by the corresponding index.
+ m : int, optional
+ Order of integration, must be positive. (Default: 1)
+ k : {[], list, scalar}, optional
+ Integration constant(s). The value of the first integral at zero
+ is the first value in the list, the value of the second integral
+ at zero is the second value, etc. If ``k == []`` (the default),
+ all constants are set to zero. If ``m == 1``, a single scalar can
+ be given instead of a list.
+ lbnd : scalar, optional
+ The lower bound of the integral. (Default: 0)
+ scl : scalar, optional
+ Following each integration the result is *multiplied* by `scl`
+ before the integration constant is added. (Default: 1)
+ axis : int, optional
+ Axis over which the integral is taken. (Default: 0).
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ S : ndarray
+ C-series coefficients of the integral.
+
+ Raises
+ ------
+ ValueError
+ If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
+ ``np.ndim(scl) != 0``.
+
+ See Also
+ --------
+ chebder
+
+ Notes
+ -----
+ Note that the result of each integration is *multiplied* by `scl`.
+ Why is this important to note? Say one is making a linear change of
+ variable :math:`u = ax + b` in an integral relative to `x`. Then
+ :math:`dx = du/a`, so one will need to set `scl` equal to
+ :math:`1/a`- perhaps not what one would have first thought.
+
+ Also note that, in general, the result of integrating a C-series needs
+ to be "reprojected" onto the C-series basis set. Thus, typically,
+ the result of this function is "unintuitive," albeit correct; see
+ Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial import chebyshev as C
+ >>> c = (1,2,3)
+ >>> C.chebint(c)
+ array([ 0.5, -0.5, 0.5, 0.5])
+ >>> C.chebint(c,3)
+ array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, # may vary
+ 0.00625 ])
+ >>> C.chebint(c, k=3)
+ array([ 3.5, -0.5, 0.5, 0.5])
+ >>> C.chebint(c,lbnd=-2)
+ array([ 8.5, -0.5, 0.5, 0.5])
+ >>> C.chebint(c,scl=-2)
+ array([-1., 1., -1., -1.])
+
+ """
+ c = np.array(c, ndmin=1, copy=True)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ if not np.iterable(k):
+ k = [k]
+ cnt = pu._deprecate_as_int(m, "the order of integration")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
+ if cnt < 0:
+ raise ValueError("The order of integration must be non-negative")
+ if len(k) > cnt:
+ raise ValueError("Too many integration constants")
+ if np.ndim(lbnd) != 0:
+ raise ValueError("lbnd must be a scalar.")
+ if np.ndim(scl) != 0:
+ raise ValueError("scl must be a scalar.")
+ iaxis = normalize_axis_index(iaxis, c.ndim)
+
+ if cnt == 0:
+ return c
+
+ c = np.moveaxis(c, iaxis, 0)
+ k = list(k) + [0]*(cnt - len(k))
+ for i in range(cnt):
+ n = len(c)
+ c *= scl
+ if n == 1 and np.all(c[0] == 0):
+ c[0] += k[i]
+ else:
+ tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
+ tmp[0] = c[0]*0
+ tmp[1] = c[0]
+ if n > 1:
+ tmp[2] = c[1]/4
+ for j in range(2, n):
+ tmp[j + 1] = c[j]/(2*(j + 1))
+ tmp[j - 1] -= c[j]/(2*(j - 1))
+ tmp[0] += k[i] - chebval(lbnd, tmp)
+ c = tmp
+ c = np.moveaxis(c, 0, iaxis)
+ return c
+
+
+def chebval(x, c, tensor=True):
+ """
+ Evaluate a Chebyshev series at points x.
+
+ If `c` is of length `n + 1`, this function returns the value:
+
+ .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)
+
+ The parameter `x` is converted to an array only if it is a tuple or a
+ list, otherwise it is treated as a scalar. In either case, either `x`
+ or its elements must support multiplication and addition both with
+ themselves and with the elements of `c`.
+
+ If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
+ `c` is multidimensional, then the shape of the result depends on the
+ value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
+ x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
+ scalars have shape (,).
+
+ Trailing zeros in the coefficients will be used in the evaluation, so
+ they should be avoided if efficiency is a concern.
+
+ Parameters
+ ----------
+ x : array_like, compatible object
+ If `x` is a list or tuple, it is converted to an ndarray, otherwise
+ it is left unchanged and treated as a scalar. In either case, `x`
+ or its elements must support addition and multiplication with
+ themselves and with the elements of `c`.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree n are contained in c[n]. If `c` is multidimensional the
+ remaining indices enumerate multiple polynomials. In the two
+ dimensional case the coefficients may be thought of as stored in
+ the columns of `c`.
+ tensor : boolean, optional
+ If True, the shape of the coefficient array is extended with ones
+ on the right, one for each dimension of `x`. Scalars have dimension 0
+ for this action. The result is that every column of coefficients in
+ `c` is evaluated for every element of `x`. If False, `x` is broadcast
+ over the columns of `c` for the evaluation. This keyword is useful
+ when `c` is multidimensional. The default value is True.
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ values : ndarray, algebra_like
+ The shape of the return value is described above.
+
+ See Also
+ --------
+ chebval2d, chebgrid2d, chebval3d, chebgrid3d
+
+ Notes
+ -----
+ The evaluation uses Clenshaw recursion, aka synthetic division.
+
+ """
+ c = np.array(c, ndmin=1, copy=True)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ if isinstance(x, (tuple, list)):
+ x = np.asarray(x)
+ if isinstance(x, np.ndarray) and tensor:
+ c = c.reshape(c.shape + (1,)*x.ndim)
+
+ if len(c) == 1:
+ c0 = c[0]
+ c1 = 0
+ elif len(c) == 2:
+ c0 = c[0]
+ c1 = c[1]
+ else:
+ x2 = 2*x
+ c0 = c[-2]
+ c1 = c[-1]
+ for i in range(3, len(c) + 1):
+ tmp = c0
+ c0 = c[-i] - c1
+ c1 = tmp + c1*x2
+ return c0 + c1*x
+
+
+def chebval2d(x, y, c):
+ """
+ Evaluate a 2-D Chebyshev series at points (x, y).
+
+ This function returns the values:
+
+ .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y)
+
+ The parameters `x` and `y` are converted to arrays only if they are
+ tuples or a lists, otherwise they are treated as a scalars and they
+ must have the same shape after conversion. In either case, either `x`
+ and `y` or their elements must support multiplication and addition both
+ with themselves and with the elements of `c`.
+
+ If `c` is a 1-D array a one is implicitly appended to its shape to make
+ it 2-D. The shape of the result will be c.shape[2:] + x.shape.
+
+ Parameters
+ ----------
+ x, y : array_like, compatible objects
+ The two dimensional series is evaluated at the points `(x, y)`,
+ where `x` and `y` must have the same shape. If `x` or `y` is a list
+ or tuple, it is first converted to an ndarray, otherwise it is left
+ unchanged and if it isn't an ndarray it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term
+ of multi-degree i,j is contained in ``c[i,j]``. If `c` has
+ dimension greater than 2 the remaining indices enumerate multiple
+ sets of coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional Chebyshev series at points formed
+ from pairs of corresponding values from `x` and `y`.
+
+ See Also
+ --------
+ chebval, chebgrid2d, chebval3d, chebgrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._valnd(chebval, c, x, y)
+
+
+def chebgrid2d(x, y, c):
+ """
+ Evaluate a 2-D Chebyshev series on the Cartesian product of x and y.
+
+ This function returns the values:
+
+ .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b),
+
+ where the points `(a, b)` consist of all pairs formed by taking
+ `a` from `x` and `b` from `y`. The resulting points form a grid with
+ `x` in the first dimension and `y` in the second.
+
+ The parameters `x` and `y` are converted to arrays only if they are
+ tuples or a lists, otherwise they are treated as a scalars. In either
+ case, either `x` and `y` or their elements must support multiplication
+ and addition both with themselves and with the elements of `c`.
+
+ If `c` has fewer than two dimensions, ones are implicitly appended to
+ its shape to make it 2-D. The shape of the result will be c.shape[2:] +
+ x.shape + y.shape.
+
+ Parameters
+ ----------
+ x, y : array_like, compatible objects
+ The two dimensional series is evaluated at the points in the
+ Cartesian product of `x` and `y`. If `x` or `y` is a list or
+ tuple, it is first converted to an ndarray, otherwise it is left
+ unchanged and, if it isn't an ndarray, it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term of
+ multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
+ greater than two the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional Chebyshev series at points in the
+ Cartesian product of `x` and `y`.
+
+ See Also
+ --------
+ chebval, chebval2d, chebval3d, chebgrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._gridnd(chebval, c, x, y)
+
+
+def chebval3d(x, y, z, c):
+ """
+ Evaluate a 3-D Chebyshev series at points (x, y, z).
+
+ This function returns the values:
+
+ .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z)
+
+ The parameters `x`, `y`, and `z` are converted to arrays only if
+ they are tuples or a lists, otherwise they are treated as a scalars and
+ they must have the same shape after conversion. In either case, either
+ `x`, `y`, and `z` or their elements must support multiplication and
+ addition both with themselves and with the elements of `c`.
+
+ If `c` has fewer than 3 dimensions, ones are implicitly appended to its
+ shape to make it 3-D. The shape of the result will be c.shape[3:] +
+ x.shape.
+
+ Parameters
+ ----------
+ x, y, z : array_like, compatible object
+ The three dimensional series is evaluated at the points
+ `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
+ any of `x`, `y`, or `z` is a list or tuple, it is first converted
+ to an ndarray, otherwise it is left unchanged and if it isn't an
+ ndarray it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term of
+ multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
+ greater than 3 the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the multidimensional polynomial on points formed with
+ triples of corresponding values from `x`, `y`, and `z`.
+
+ See Also
+ --------
+ chebval, chebval2d, chebgrid2d, chebgrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._valnd(chebval, c, x, y, z)
+
+
+def chebgrid3d(x, y, z, c):
+ """
+ Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z.
+
+ This function returns the values:
+
+ .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c)
+
+ where the points `(a, b, c)` consist of all triples formed by taking
+ `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
+ a grid with `x` in the first dimension, `y` in the second, and `z` in
+ the third.
+
+ The parameters `x`, `y`, and `z` are converted to arrays only if they
+ are tuples or a lists, otherwise they are treated as a scalars. In
+ either case, either `x`, `y`, and `z` or their elements must support
+ multiplication and addition both with themselves and with the elements
+ of `c`.
+
+ If `c` has fewer than three dimensions, ones are implicitly appended to
+ its shape to make it 3-D. The shape of the result will be c.shape[3:] +
+ x.shape + y.shape + z.shape.
+
+ Parameters
+ ----------
+ x, y, z : array_like, compatible objects
+ The three dimensional series is evaluated at the points in the
+ Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
+ list or tuple, it is first converted to an ndarray, otherwise it is
+ left unchanged and, if it isn't an ndarray, it is treated as a
+ scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree i,j are contained in ``c[i,j]``. If `c` has dimension
+ greater than two the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional polynomial at points in the Cartesian
+ product of `x` and `y`.
+
+ See Also
+ --------
+ chebval, chebval2d, chebgrid2d, chebval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._gridnd(chebval, c, x, y, z)
+
+
+def chebvander(x, deg):
+ """Pseudo-Vandermonde matrix of given degree.
+
+ Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
+ `x`. The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., i] = T_i(x),
+
+ where `0 <= i <= deg`. The leading indices of `V` index the elements of
+ `x` and the last index is the degree of the Chebyshev polynomial.
+
+ If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
+ matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and
+ ``chebval(x, c)`` are the same up to roundoff. This equivalence is
+ useful both for least squares fitting and for the evaluation of a large
+ number of Chebyshev series of the same degree and sample points.
+
+ Parameters
+ ----------
+ x : array_like
+ Array of points. The dtype is converted to float64 or complex128
+ depending on whether any of the elements are complex. If `x` is
+ scalar it is converted to a 1-D array.
+ deg : int
+ Degree of the resulting matrix.
+
+ Returns
+ -------
+ vander : ndarray
+ The pseudo Vandermonde matrix. The shape of the returned matrix is
+ ``x.shape + (deg + 1,)``, where The last index is the degree of the
+ corresponding Chebyshev polynomial. The dtype will be the same as
+ the converted `x`.
+
+ """
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg < 0:
+ raise ValueError("deg must be non-negative")
+
+ x = np.array(x, copy=False, ndmin=1) + 0.0
+ dims = (ideg + 1,) + x.shape
+ dtyp = x.dtype
+ v = np.empty(dims, dtype=dtyp)
+ # Use forward recursion to generate the entries.
+ v[0] = x*0 + 1
+ if ideg > 0:
+ x2 = 2*x
+ v[1] = x
+ for i in range(2, ideg + 1):
+ v[i] = v[i-1]*x2 - v[i-2]
+ return np.moveaxis(v, 0, -1)
+
+
+def chebvander2d(x, y, deg):
+ """Pseudo-Vandermonde matrix of given degrees.
+
+ Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
+ points `(x, y)`. The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., (deg[1] + 1)*i + j] = T_i(x) * T_j(y),
+
+ where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
+ `V` index the points `(x, y)` and the last index encodes the degrees of
+ the Chebyshev polynomials.
+
+ If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
+ correspond to the elements of a 2-D coefficient array `c` of shape
+ (xdeg + 1, ydeg + 1) in the order
+
+ .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
+
+ and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same
+ up to roundoff. This equivalence is useful both for least squares
+ fitting and for the evaluation of a large number of 2-D Chebyshev
+ series of the same degrees and sample points.
+
+ Parameters
+ ----------
+ x, y : array_like
+ Arrays of point coordinates, all of the same shape. The dtypes
+ will be converted to either float64 or complex128 depending on
+ whether any of the elements are complex. Scalars are converted to
+ 1-D arrays.
+ deg : list of ints
+ List of maximum degrees of the form [x_deg, y_deg].
+
+ Returns
+ -------
+ vander2d : ndarray
+ The shape of the returned matrix is ``x.shape + (order,)``, where
+ :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same
+ as the converted `x` and `y`.
+
+ See Also
+ --------
+ chebvander, chebvander3d, chebval2d, chebval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg)
+
+
+def chebvander3d(x, y, z, deg):
+ """Pseudo-Vandermonde matrix of given degrees.
+
+ Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
+ points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
+ then The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z),
+
+ where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
+ indices of `V` index the points `(x, y, z)` and the last index encodes
+ the degrees of the Chebyshev polynomials.
+
+ If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
+ of `V` correspond to the elements of a 3-D coefficient array `c` of
+ shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
+
+ .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
+
+ and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the
+ same up to roundoff. This equivalence is useful both for least squares
+ fitting and for the evaluation of a large number of 3-D Chebyshev
+ series of the same degrees and sample points.
+
+ Parameters
+ ----------
+ x, y, z : array_like
+ Arrays of point coordinates, all of the same shape. The dtypes will
+ be converted to either float64 or complex128 depending on whether
+ any of the elements are complex. Scalars are converted to 1-D
+ arrays.
+ deg : list of ints
+ List of maximum degrees of the form [x_deg, y_deg, z_deg].
+
+ Returns
+ -------
+ vander3d : ndarray
+ The shape of the returned matrix is ``x.shape + (order,)``, where
+ :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will
+ be the same as the converted `x`, `y`, and `z`.
+
+ See Also
+ --------
+ chebvander, chebvander3d, chebval2d, chebval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg)
+
+
+def chebfit(x, y, deg, rcond=None, full=False, w=None):
+ """
+ Least squares fit of Chebyshev series to data.
+
+ Return the coefficients of a Chebyshev series of degree `deg` that is the
+ least squares fit to the data values `y` given at points `x`. If `y` is
+ 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
+ fits are done, one for each column of `y`, and the resulting
+ coefficients are stored in the corresponding columns of a 2-D return.
+ The fitted polynomial(s) are in the form
+
+ .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x),
+
+ where `n` is `deg`.
+
+ Parameters
+ ----------
+ x : array_like, shape (M,)
+ x-coordinates of the M sample points ``(x[i], y[i])``.
+ y : array_like, shape (M,) or (M, K)
+ y-coordinates of the sample points. Several data sets of sample
+ points sharing the same x-coordinates can be fitted at once by
+ passing in a 2D-array that contains one dataset per column.
+ deg : int or 1-D array_like
+ Degree(s) of the fitting polynomials. If `deg` is a single integer,
+ all terms up to and including the `deg`'th term are included in the
+ fit. For NumPy versions >= 1.11.0 a list of integers specifying the
+ degrees of the terms to include may be used instead.
+ rcond : float, optional
+ Relative condition number of the fit. Singular values smaller than
+ this relative to the largest singular value will be ignored. The
+ default value is len(x)*eps, where eps is the relative precision of
+ the float type, about 2e-16 in most cases.
+ full : bool, optional
+ Switch determining nature of return value. When it is False (the
+ default) just the coefficients are returned, when True diagnostic
+ information from the singular value decomposition is also returned.
+ w : array_like, shape (`M`,), optional
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ coef : ndarray, shape (M,) or (M, K)
+ Chebyshev coefficients ordered from low to high. If `y` was 2-D,
+ the coefficients for the data in column k of `y` are in column
+ `k`.
+
+ [residuals, rank, singular_values, rcond] : list
+ These values are only returned if ``full == True``
+
+ - residuals -- sum of squared residuals of the least squares fit
+ - rank -- the numerical rank of the scaled Vandermonde matrix
+ - singular_values -- singular values of the scaled Vandermonde matrix
+ - rcond -- value of `rcond`.
+
+ For more details, see `numpy.linalg.lstsq`.
+
+ Warns
+ -----
+ RankWarning
+ The rank of the coefficient matrix in the least-squares fit is
+ deficient. The warning is only raised if ``full == False``. The
+ warnings can be turned off by
+
+ >>> import warnings
+ >>> warnings.simplefilter('ignore', np.RankWarning)
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyfit
+ numpy.polynomial.legendre.legfit
+ numpy.polynomial.laguerre.lagfit
+ numpy.polynomial.hermite.hermfit
+ numpy.polynomial.hermite_e.hermefit
+ chebval : Evaluates a Chebyshev series.
+ chebvander : Vandermonde matrix of Chebyshev series.
+ chebweight : Chebyshev weight function.
+ numpy.linalg.lstsq : Computes a least-squares fit from the matrix.
+ scipy.interpolate.UnivariateSpline : Computes spline fits.
+
+ Notes
+ -----
+ The solution is the coefficients of the Chebyshev series `p` that
+ minimizes the sum of the weighted squared errors
+
+ .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
+
+ where :math:`w_j` are the weights. This problem is solved by setting up
+ as the (typically) overdetermined matrix equation
+
+ .. math:: V(x) * c = w * y,
+
+ where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
+ coefficients to be solved for, `w` are the weights, and `y` are the
+ observed values. This equation is then solved using the singular value
+ decomposition of `V`.
+
+ If some of the singular values of `V` are so small that they are
+ neglected, then a `RankWarning` will be issued. This means that the
+ coefficient values may be poorly determined. Using a lower order fit
+ will usually get rid of the warning. The `rcond` parameter can also be
+ set to a value smaller than its default, but the resulting fit may be
+ spurious and have large contributions from roundoff error.
+
+ Fits using Chebyshev series are usually better conditioned than fits
+ using power series, but much can depend on the distribution of the
+ sample points and the smoothness of the data. If the quality of the fit
+ is inadequate splines may be a good alternative.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Curve fitting",
+ https://en.wikipedia.org/wiki/Curve_fitting
+
+ Examples
+ --------
+
+ """
+ return pu._fit(chebvander, x, y, deg, rcond, full, w)
+
+
+def chebcompanion(c):
+ """Return the scaled companion matrix of c.
+
+ The basis polynomials are scaled so that the companion matrix is
+ symmetric when `c` is a Chebyshev basis polynomial. This provides
+ better eigenvalue estimates than the unscaled case and for basis
+ polynomials the eigenvalues are guaranteed to be real if
+ `numpy.linalg.eigvalsh` is used to obtain them.
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Chebyshev series coefficients ordered from low to high
+ degree.
+
+ Returns
+ -------
+ mat : ndarray
+ Scaled companion matrix of dimensions (deg, deg).
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ if len(c) < 2:
+ raise ValueError('Series must have maximum degree of at least 1.')
+ if len(c) == 2:
+ return np.array([[-c[0]/c[1]]])
+
+ n = len(c) - 1
+ mat = np.zeros((n, n), dtype=c.dtype)
+ scl = np.array([1.] + [np.sqrt(.5)]*(n-1))
+ top = mat.reshape(-1)[1::n+1]
+ bot = mat.reshape(-1)[n::n+1]
+ top[0] = np.sqrt(.5)
+ top[1:] = 1/2
+ bot[...] = top
+ mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
+ return mat
+
+
+def chebroots(c):
+ """
+ Compute the roots of a Chebyshev series.
+
+ Return the roots (a.k.a. "zeros") of the polynomial
+
+ .. math:: p(x) = \\sum_i c[i] * T_i(x).
+
+ Parameters
+ ----------
+ c : 1-D array_like
+ 1-D array of coefficients.
+
+ Returns
+ -------
+ out : ndarray
+ Array of the roots of the series. If all the roots are real,
+ then `out` is also real, otherwise it is complex.
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyroots
+ numpy.polynomial.legendre.legroots
+ numpy.polynomial.laguerre.lagroots
+ numpy.polynomial.hermite.hermroots
+ numpy.polynomial.hermite_e.hermeroots
+
+ Notes
+ -----
+ The root estimates are obtained as the eigenvalues of the companion
+ matrix, Roots far from the origin of the complex plane may have large
+ errors due to the numerical instability of the series for such
+ values. Roots with multiplicity greater than 1 will also show larger
+ errors as the value of the series near such points is relatively
+ insensitive to errors in the roots. Isolated roots near the origin can
+ be improved by a few iterations of Newton's method.
+
+ The Chebyshev series basis polynomials aren't powers of `x` so the
+ results of this function may seem unintuitive.
+
+ Examples
+ --------
+ >>> import numpy.polynomial.chebyshev as cheb
+ >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
+ array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ if len(c) < 2:
+ return np.array([], dtype=c.dtype)
+ if len(c) == 2:
+ return np.array([-c[0]/c[1]])
+
+ # rotated companion matrix reduces error
+ m = chebcompanion(c)[::-1,::-1]
+ r = la.eigvals(m)
+ r.sort()
+ return r
+
+
+def chebinterpolate(func, deg, args=()):
+ """Interpolate a function at the Chebyshev points of the first kind.
+
+ Returns the Chebyshev series that interpolates `func` at the Chebyshev
+ points of the first kind in the interval [-1, 1]. The interpolating
+ series tends to a minmax approximation to `func` with increasing `deg`
+ if the function is continuous in the interval.
+
+ .. versionadded:: 1.14.0
+
+ Parameters
+ ----------
+ func : function
+ The function to be approximated. It must be a function of a single
+ variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are
+ extra arguments passed in the `args` parameter.
+ deg : int
+ Degree of the interpolating polynomial
+ args : tuple, optional
+ Extra arguments to be used in the function call. Default is no extra
+ arguments.
+
+ Returns
+ -------
+ coef : ndarray, shape (deg + 1,)
+ Chebyshev coefficients of the interpolating series ordered from low to
+ high.
+
+ Examples
+ --------
+ >>> import numpy.polynomial.chebyshev as C
+ >>> C.chebfromfunction(lambda x: np.tanh(x) + 0.5, 8)
+ array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17,
+ -5.42457905e-02, -2.71387850e-16, 4.51658839e-03,
+ 2.46716228e-17, -3.79694221e-04, -3.26899002e-16])
+
+ Notes
+ -----
+
+ The Chebyshev polynomials used in the interpolation are orthogonal when
+ sampled at the Chebyshev points of the first kind. If it is desired to
+ constrain some of the coefficients they can simply be set to the desired
+ value after the interpolation, no new interpolation or fit is needed. This
+ is especially useful if it is known apriori that some of coefficients are
+ zero. For instance, if the function is even then the coefficients of the
+ terms of odd degree in the result can be set to zero.
+
+ """
+ deg = np.asarray(deg)
+
+ # check arguments.
+ if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0:
+ raise TypeError("deg must be an int")
+ if deg < 0:
+ raise ValueError("expected deg >= 0")
+
+ order = deg + 1
+ xcheb = chebpts1(order)
+ yfunc = func(xcheb, *args)
+ m = chebvander(xcheb, deg)
+ c = np.dot(m.T, yfunc)
+ c[0] /= order
+ c[1:] /= 0.5*order
+
+ return c
+
+
+def chebgauss(deg):
+ """
+ Gauss-Chebyshev quadrature.
+
+ Computes the sample points and weights for Gauss-Chebyshev quadrature.
+ These sample points and weights will correctly integrate polynomials of
+ degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
+ the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`.
+
+ Parameters
+ ----------
+ deg : int
+ Number of sample points and weights. It must be >= 1.
+
+ Returns
+ -------
+ x : ndarray
+ 1-D ndarray containing the sample points.
+ y : ndarray
+ 1-D ndarray containing the weights.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ The results have only been tested up to degree 100, higher degrees may
+ be problematic. For Gauss-Chebyshev there are closed form solutions for
+ the sample points and weights. If n = `deg`, then
+
+ .. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n))
+
+ .. math:: w_i = \\pi / n
+
+ """
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg <= 0:
+ raise ValueError("deg must be a positive integer")
+
+ x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg))
+ w = np.ones(ideg)*(np.pi/ideg)
+
+ return x, w
+
+
+def chebweight(x):
+ """
+ The weight function of the Chebyshev polynomials.
+
+ The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of
+ integration is :math:`[-1, 1]`. The Chebyshev polynomials are
+ orthogonal, but not normalized, with respect to this weight function.
+
+ Parameters
+ ----------
+ x : array_like
+ Values at which the weight function will be computed.
+
+ Returns
+ -------
+ w : ndarray
+ The weight function at `x`.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x))
+ return w
+
+
+def chebpts1(npts):
+ """
+ Chebyshev points of the first kind.
+
+ The Chebyshev points of the first kind are the points ``cos(x)``,
+ where ``x = [pi*(k + .5)/npts for k in range(npts)]``.
+
+ Parameters
+ ----------
+ npts : int
+ Number of sample points desired.
+
+ Returns
+ -------
+ pts : ndarray
+ The Chebyshev points of the first kind.
+
+ See Also
+ --------
+ chebpts2
+
+ Notes
+ -----
+
+ .. versionadded:: 1.5.0
+
+ """
+ _npts = int(npts)
+ if _npts != npts:
+ raise ValueError("npts must be integer")
+ if _npts < 1:
+ raise ValueError("npts must be >= 1")
+
+ x = 0.5 * np.pi / _npts * np.arange(-_npts+1, _npts+1, 2)
+ return np.sin(x)
+
+
+def chebpts2(npts):
+ """
+ Chebyshev points of the second kind.
+
+ The Chebyshev points of the second kind are the points ``cos(x)``,
+ where ``x = [pi*k/(npts - 1) for k in range(npts)]`` sorted in ascending
+ order.
+
+ Parameters
+ ----------
+ npts : int
+ Number of sample points desired.
+
+ Returns
+ -------
+ pts : ndarray
+ The Chebyshev points of the second kind.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.5.0
+
+ """
+ _npts = int(npts)
+ if _npts != npts:
+ raise ValueError("npts must be integer")
+ if _npts < 2:
+ raise ValueError("npts must be >= 2")
+
+ x = np.linspace(-np.pi, 0, _npts)
+ return np.cos(x)
+
+
+#
+# Chebyshev series class
+#
+
+class Chebyshev(ABCPolyBase):
+ """A Chebyshev series class.
+
+ The Chebyshev class provides the standard Python numerical methods
+ '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
+ methods listed below.
+
+ Parameters
+ ----------
+ coef : array_like
+ Chebyshev coefficients in order of increasing degree, i.e.,
+ ``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``.
+ domain : (2,) array_like, optional
+ Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
+ to the interval ``[window[0], window[1]]`` by shifting and scaling.
+ The default value is [-1, 1].
+ window : (2,) array_like, optional
+ Window, see `domain` for its use. The default value is [-1, 1].
+
+ .. versionadded:: 1.6.0
+
+ """
+ # Virtual Functions
+ _add = staticmethod(chebadd)
+ _sub = staticmethod(chebsub)
+ _mul = staticmethod(chebmul)
+ _div = staticmethod(chebdiv)
+ _pow = staticmethod(chebpow)
+ _val = staticmethod(chebval)
+ _int = staticmethod(chebint)
+ _der = staticmethod(chebder)
+ _fit = staticmethod(chebfit)
+ _line = staticmethod(chebline)
+ _roots = staticmethod(chebroots)
+ _fromroots = staticmethod(chebfromroots)
+
+ @classmethod
+ def interpolate(cls, func, deg, domain=None, args=()):
+ """Interpolate a function at the Chebyshev points of the first kind.
+
+ Returns the series that interpolates `func` at the Chebyshev points of
+ the first kind scaled and shifted to the `domain`. The resulting series
+ tends to a minmax approximation of `func` when the function is
+ continuous in the domain.
+
+ .. versionadded:: 1.14.0
+
+ Parameters
+ ----------
+ func : function
+ The function to be interpolated. It must be a function of a single
+ variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are
+ extra arguments passed in the `args` parameter.
+ deg : int
+ Degree of the interpolating polynomial.
+ domain : {None, [beg, end]}, optional
+ Domain over which `func` is interpolated. The default is None, in
+ which case the domain is [-1, 1].
+ args : tuple, optional
+ Extra arguments to be used in the function call. Default is no
+ extra arguments.
+
+ Returns
+ -------
+ polynomial : Chebyshev instance
+ Interpolating Chebyshev instance.
+
+ Notes
+ -----
+ See `numpy.polynomial.chebfromfunction` for more details.
+
+ """
+ if domain is None:
+ domain = cls.domain
+ xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args)
+ coef = chebinterpolate(xfunc, deg)
+ return cls(coef, domain=domain)
+
+ # Virtual properties
+ domain = np.array(chebdomain)
+ window = np.array(chebdomain)
+ basis_name = 'T'
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/chebyshev.pyi b/venv/lib/python3.9/site-packages/numpy/polynomial/chebyshev.pyi
new file mode 100644
index 00000000..e8113dba
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/chebyshev.pyi
@@ -0,0 +1,51 @@
+from typing import Any
+
+from numpy import ndarray, dtype, int_
+from numpy.polynomial._polybase import ABCPolyBase
+from numpy.polynomial.polyutils import trimcoef
+
+__all__: list[str]
+
+chebtrim = trimcoef
+
+def poly2cheb(pol): ...
+def cheb2poly(c): ...
+
+chebdomain: ndarray[Any, dtype[int_]]
+chebzero: ndarray[Any, dtype[int_]]
+chebone: ndarray[Any, dtype[int_]]
+chebx: ndarray[Any, dtype[int_]]
+
+def chebline(off, scl): ...
+def chebfromroots(roots): ...
+def chebadd(c1, c2): ...
+def chebsub(c1, c2): ...
+def chebmulx(c): ...
+def chebmul(c1, c2): ...
+def chebdiv(c1, c2): ...
+def chebpow(c, pow, maxpower=...): ...
+def chebder(c, m=..., scl=..., axis=...): ...
+def chebint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ...
+def chebval(x, c, tensor=...): ...
+def chebval2d(x, y, c): ...
+def chebgrid2d(x, y, c): ...
+def chebval3d(x, y, z, c): ...
+def chebgrid3d(x, y, z, c): ...
+def chebvander(x, deg): ...
+def chebvander2d(x, y, deg): ...
+def chebvander3d(x, y, z, deg): ...
+def chebfit(x, y, deg, rcond=..., full=..., w=...): ...
+def chebcompanion(c): ...
+def chebroots(c): ...
+def chebinterpolate(func, deg, args = ...): ...
+def chebgauss(deg): ...
+def chebweight(x): ...
+def chebpts1(npts): ...
+def chebpts2(npts): ...
+
+class Chebyshev(ABCPolyBase):
+ @classmethod
+ def interpolate(cls, func, deg, domain=..., args = ...): ...
+ domain: Any
+ window: Any
+ basis_name: Any
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/hermite.py b/venv/lib/python3.9/site-packages/numpy/polynomial/hermite.py
new file mode 100644
index 00000000..e2033912
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/hermite.py
@@ -0,0 +1,1697 @@
+"""
+==============================================================
+Hermite Series, "Physicists" (:mod:`numpy.polynomial.hermite`)
+==============================================================
+
+This module provides a number of objects (mostly functions) useful for
+dealing with Hermite series, including a `Hermite` class that
+encapsulates the usual arithmetic operations. (General information
+on how this module represents and works with such polynomials is in the
+docstring for its "parent" sub-package, `numpy.polynomial`).
+
+Classes
+-------
+.. autosummary::
+ :toctree: generated/
+
+ Hermite
+
+Constants
+---------
+.. autosummary::
+ :toctree: generated/
+
+ hermdomain
+ hermzero
+ hermone
+ hermx
+
+Arithmetic
+----------
+.. autosummary::
+ :toctree: generated/
+
+ hermadd
+ hermsub
+ hermmulx
+ hermmul
+ hermdiv
+ hermpow
+ hermval
+ hermval2d
+ hermval3d
+ hermgrid2d
+ hermgrid3d
+
+Calculus
+--------
+.. autosummary::
+ :toctree: generated/
+
+ hermder
+ hermint
+
+Misc Functions
+--------------
+.. autosummary::
+ :toctree: generated/
+
+ hermfromroots
+ hermroots
+ hermvander
+ hermvander2d
+ hermvander3d
+ hermgauss
+ hermweight
+ hermcompanion
+ hermfit
+ hermtrim
+ hermline
+ herm2poly
+ poly2herm
+
+See also
+--------
+`numpy.polynomial`
+
+"""
+import numpy as np
+import numpy.linalg as la
+from numpy.core.multiarray import normalize_axis_index
+
+from . import polyutils as pu
+from ._polybase import ABCPolyBase
+
+__all__ = [
+ 'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd',
+ 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval',
+ 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots',
+ 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite',
+ 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d',
+ 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight']
+
+hermtrim = pu.trimcoef
+
+
+def poly2herm(pol):
+ """
+ poly2herm(pol)
+
+ Convert a polynomial to a Hermite series.
+
+ Convert an array representing the coefficients of a polynomial (relative
+ to the "standard" basis) ordered from lowest degree to highest, to an
+ array of the coefficients of the equivalent Hermite series, ordered
+ from lowest to highest degree.
+
+ Parameters
+ ----------
+ pol : array_like
+ 1-D array containing the polynomial coefficients
+
+ Returns
+ -------
+ c : ndarray
+ 1-D array containing the coefficients of the equivalent Hermite
+ series.
+
+ See Also
+ --------
+ herm2poly
+
+ Notes
+ -----
+ The easy way to do conversions between polynomial basis sets
+ is to use the convert method of a class instance.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import poly2herm
+ >>> poly2herm(np.arange(4))
+ array([1. , 2.75 , 0.5 , 0.375])
+
+ """
+ [pol] = pu.as_series([pol])
+ deg = len(pol) - 1
+ res = 0
+ for i in range(deg, -1, -1):
+ res = hermadd(hermmulx(res), pol[i])
+ return res
+
+
+def herm2poly(c):
+ """
+ Convert a Hermite series to a polynomial.
+
+ Convert an array representing the coefficients of a Hermite series,
+ ordered from lowest degree to highest, to an array of the coefficients
+ of the equivalent polynomial (relative to the "standard" basis) ordered
+ from lowest to highest degree.
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array containing the Hermite series coefficients, ordered
+ from lowest order term to highest.
+
+ Returns
+ -------
+ pol : ndarray
+ 1-D array containing the coefficients of the equivalent polynomial
+ (relative to the "standard" basis) ordered from lowest order term
+ to highest.
+
+ See Also
+ --------
+ poly2herm
+
+ Notes
+ -----
+ The easy way to do conversions between polynomial basis sets
+ is to use the convert method of a class instance.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import herm2poly
+ >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375])
+ array([0., 1., 2., 3.])
+
+ """
+ from .polynomial import polyadd, polysub, polymulx
+
+ [c] = pu.as_series([c])
+ n = len(c)
+ if n == 1:
+ return c
+ if n == 2:
+ c[1] *= 2
+ return c
+ else:
+ c0 = c[-2]
+ c1 = c[-1]
+ # i is the current degree of c1
+ for i in range(n - 1, 1, -1):
+ tmp = c0
+ c0 = polysub(c[i - 2], c1*(2*(i - 1)))
+ c1 = polyadd(tmp, polymulx(c1)*2)
+ return polyadd(c0, polymulx(c1)*2)
+
+#
+# These are constant arrays are of integer type so as to be compatible
+# with the widest range of other types, such as Decimal.
+#
+
+# Hermite
+hermdomain = np.array([-1, 1])
+
+# Hermite coefficients representing zero.
+hermzero = np.array([0])
+
+# Hermite coefficients representing one.
+hermone = np.array([1])
+
+# Hermite coefficients representing the identity x.
+hermx = np.array([0, 1/2])
+
+
+def hermline(off, scl):
+ """
+ Hermite series whose graph is a straight line.
+
+
+
+ Parameters
+ ----------
+ off, scl : scalars
+ The specified line is given by ``off + scl*x``.
+
+ Returns
+ -------
+ y : ndarray
+ This module's representation of the Hermite series for
+ ``off + scl*x``.
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyline
+ numpy.polynomial.chebyshev.chebline
+ numpy.polynomial.legendre.legline
+ numpy.polynomial.laguerre.lagline
+ numpy.polynomial.hermite_e.hermeline
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermline, hermval
+ >>> hermval(0,hermline(3, 2))
+ 3.0
+ >>> hermval(1,hermline(3, 2))
+ 5.0
+
+ """
+ if scl != 0:
+ return np.array([off, scl/2])
+ else:
+ return np.array([off])
+
+
+def hermfromroots(roots):
+ """
+ Generate a Hermite series with given roots.
+
+ The function returns the coefficients of the polynomial
+
+ .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
+
+ in Hermite form, where the `r_n` are the roots specified in `roots`.
+ If a zero has multiplicity n, then it must appear in `roots` n times.
+ For instance, if 2 is a root of multiplicity three and 3 is a root of
+ multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
+ roots can appear in any order.
+
+ If the returned coefficients are `c`, then
+
+ .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x)
+
+ The coefficient of the last term is not generally 1 for monic
+ polynomials in Hermite form.
+
+ Parameters
+ ----------
+ roots : array_like
+ Sequence containing the roots.
+
+ Returns
+ -------
+ out : ndarray
+ 1-D array of coefficients. If all roots are real then `out` is a
+ real array, if some of the roots are complex, then `out` is complex
+ even if all the coefficients in the result are real (see Examples
+ below).
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyfromroots
+ numpy.polynomial.legendre.legfromroots
+ numpy.polynomial.laguerre.lagfromroots
+ numpy.polynomial.chebyshev.chebfromroots
+ numpy.polynomial.hermite_e.hermefromroots
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermfromroots, hermval
+ >>> coef = hermfromroots((-1, 0, 1))
+ >>> hermval((-1, 0, 1), coef)
+ array([0., 0., 0.])
+ >>> coef = hermfromroots((-1j, 1j))
+ >>> hermval((-1j, 1j), coef)
+ array([0.+0.j, 0.+0.j])
+
+ """
+ return pu._fromroots(hermline, hermmul, roots)
+
+
+def hermadd(c1, c2):
+ """
+ Add one Hermite series to another.
+
+ Returns the sum of two Hermite series `c1` + `c2`. The arguments
+ are sequences of coefficients ordered from lowest order term to
+ highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Hermite series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Array representing the Hermite series of their sum.
+
+ See Also
+ --------
+ hermsub, hermmulx, hermmul, hermdiv, hermpow
+
+ Notes
+ -----
+ Unlike multiplication, division, etc., the sum of two Hermite series
+ is a Hermite series (without having to "reproject" the result onto
+ the basis set) so addition, just like that of "standard" polynomials,
+ is simply "component-wise."
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermadd
+ >>> hermadd([1, 2, 3], [1, 2, 3, 4])
+ array([2., 4., 6., 4.])
+
+ """
+ return pu._add(c1, c2)
+
+
+def hermsub(c1, c2):
+ """
+ Subtract one Hermite series from another.
+
+ Returns the difference of two Hermite series `c1` - `c2`. The
+ sequences of coefficients are from lowest order term to highest, i.e.,
+ [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Hermite series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Of Hermite series coefficients representing their difference.
+
+ See Also
+ --------
+ hermadd, hermmulx, hermmul, hermdiv, hermpow
+
+ Notes
+ -----
+ Unlike multiplication, division, etc., the difference of two Hermite
+ series is a Hermite series (without having to "reproject" the result
+ onto the basis set) so subtraction, just like that of "standard"
+ polynomials, is simply "component-wise."
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermsub
+ >>> hermsub([1, 2, 3, 4], [1, 2, 3])
+ array([0., 0., 0., 4.])
+
+ """
+ return pu._sub(c1, c2)
+
+
+def hermmulx(c):
+ """Multiply a Hermite series by x.
+
+ Multiply the Hermite series `c` by x, where x is the independent
+ variable.
+
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Hermite series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Array representing the result of the multiplication.
+
+ See Also
+ --------
+ hermadd, hermsub, hermmul, hermdiv, hermpow
+
+ Notes
+ -----
+ The multiplication uses the recursion relationship for Hermite
+ polynomials in the form
+
+ .. math::
+
+ xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x))
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermmulx
+ >>> hermmulx([1, 2, 3])
+ array([2. , 6.5, 1. , 1.5])
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ # The zero series needs special treatment
+ if len(c) == 1 and c[0] == 0:
+ return c
+
+ prd = np.empty(len(c) + 1, dtype=c.dtype)
+ prd[0] = c[0]*0
+ prd[1] = c[0]/2
+ for i in range(1, len(c)):
+ prd[i + 1] = c[i]/2
+ prd[i - 1] += c[i]*i
+ return prd
+
+
+def hermmul(c1, c2):
+ """
+ Multiply one Hermite series by another.
+
+ Returns the product of two Hermite series `c1` * `c2`. The arguments
+ are sequences of coefficients, from lowest order "term" to highest,
+ e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Hermite series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Of Hermite series coefficients representing their product.
+
+ See Also
+ --------
+ hermadd, hermsub, hermmulx, hermdiv, hermpow
+
+ Notes
+ -----
+ In general, the (polynomial) product of two C-series results in terms
+ that are not in the Hermite polynomial basis set. Thus, to express
+ the product as a Hermite series, it is necessary to "reproject" the
+ product onto said basis set, which may produce "unintuitive" (but
+ correct) results; see Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermmul
+ >>> hermmul([1, 2, 3], [0, 1, 2])
+ array([52., 29., 52., 7., 6.])
+
+ """
+ # s1, s2 are trimmed copies
+ [c1, c2] = pu.as_series([c1, c2])
+
+ if len(c1) > len(c2):
+ c = c2
+ xs = c1
+ else:
+ c = c1
+ xs = c2
+
+ if len(c) == 1:
+ c0 = c[0]*xs
+ c1 = 0
+ elif len(c) == 2:
+ c0 = c[0]*xs
+ c1 = c[1]*xs
+ else:
+ nd = len(c)
+ c0 = c[-2]*xs
+ c1 = c[-1]*xs
+ for i in range(3, len(c) + 1):
+ tmp = c0
+ nd = nd - 1
+ c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1)))
+ c1 = hermadd(tmp, hermmulx(c1)*2)
+ return hermadd(c0, hermmulx(c1)*2)
+
+
+def hermdiv(c1, c2):
+ """
+ Divide one Hermite series by another.
+
+ Returns the quotient-with-remainder of two Hermite series
+ `c1` / `c2`. The arguments are sequences of coefficients from lowest
+ order "term" to highest, e.g., [1,2,3] represents the series
+ ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Hermite series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ [quo, rem] : ndarrays
+ Of Hermite series coefficients representing the quotient and
+ remainder.
+
+ See Also
+ --------
+ hermadd, hermsub, hermmulx, hermmul, hermpow
+
+ Notes
+ -----
+ In general, the (polynomial) division of one Hermite series by another
+ results in quotient and remainder terms that are not in the Hermite
+ polynomial basis set. Thus, to express these results as a Hermite
+ series, it is necessary to "reproject" the results onto the Hermite
+ basis set, which may produce "unintuitive" (but correct) results; see
+ Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermdiv
+ >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2])
+ (array([1., 2., 3.]), array([0.]))
+ >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2])
+ (array([1., 2., 3.]), array([2., 2.]))
+ >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2])
+ (array([1., 2., 3.]), array([1., 1.]))
+
+ """
+ return pu._div(hermmul, c1, c2)
+
+
+def hermpow(c, pow, maxpower=16):
+ """Raise a Hermite series to a power.
+
+ Returns the Hermite series `c` raised to the power `pow`. The
+ argument `c` is a sequence of coefficients ordered from low to high.
+ i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Hermite series coefficients ordered from low to
+ high.
+ pow : integer
+ Power to which the series will be raised
+ maxpower : integer, optional
+ Maximum power allowed. This is mainly to limit growth of the series
+ to unmanageable size. Default is 16
+
+ Returns
+ -------
+ coef : ndarray
+ Hermite series of power.
+
+ See Also
+ --------
+ hermadd, hermsub, hermmulx, hermmul, hermdiv
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermpow
+ >>> hermpow([1, 2, 3], 2)
+ array([81., 52., 82., 12., 9.])
+
+ """
+ return pu._pow(hermmul, c, pow, maxpower)
+
+
+def hermder(c, m=1, scl=1, axis=0):
+ """
+ Differentiate a Hermite series.
+
+ Returns the Hermite series coefficients `c` differentiated `m` times
+ along `axis`. At each iteration the result is multiplied by `scl` (the
+ scaling factor is for use in a linear change of variable). The argument
+ `c` is an array of coefficients from low to high degree along each
+ axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2``
+ while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) +
+ 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is
+ ``y``.
+
+ Parameters
+ ----------
+ c : array_like
+ Array of Hermite series coefficients. If `c` is multidimensional the
+ different axis correspond to different variables with the degree in
+ each axis given by the corresponding index.
+ m : int, optional
+ Number of derivatives taken, must be non-negative. (Default: 1)
+ scl : scalar, optional
+ Each differentiation is multiplied by `scl`. The end result is
+ multiplication by ``scl**m``. This is for use in a linear change of
+ variable. (Default: 1)
+ axis : int, optional
+ Axis over which the derivative is taken. (Default: 0).
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ der : ndarray
+ Hermite series of the derivative.
+
+ See Also
+ --------
+ hermint
+
+ Notes
+ -----
+ In general, the result of differentiating a Hermite series does not
+ resemble the same operation on a power series. Thus the result of this
+ function may be "unintuitive," albeit correct; see Examples section
+ below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermder
+ >>> hermder([ 1. , 0.5, 0.5, 0.5])
+ array([1., 2., 3.])
+ >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2)
+ array([1., 2., 3.])
+
+ """
+ c = np.array(c, ndmin=1, copy=True)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ cnt = pu._deprecate_as_int(m, "the order of derivation")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
+ if cnt < 0:
+ raise ValueError("The order of derivation must be non-negative")
+ iaxis = normalize_axis_index(iaxis, c.ndim)
+
+ if cnt == 0:
+ return c
+
+ c = np.moveaxis(c, iaxis, 0)
+ n = len(c)
+ if cnt >= n:
+ c = c[:1]*0
+ else:
+ for i in range(cnt):
+ n = n - 1
+ c *= scl
+ der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
+ for j in range(n, 0, -1):
+ der[j - 1] = (2*j)*c[j]
+ c = der
+ c = np.moveaxis(c, 0, iaxis)
+ return c
+
+
+def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
+ """
+ Integrate a Hermite series.
+
+ Returns the Hermite series coefficients `c` integrated `m` times from
+ `lbnd` along `axis`. At each iteration the resulting series is
+ **multiplied** by `scl` and an integration constant, `k`, is added.
+ The scaling factor is for use in a linear change of variable. ("Buyer
+ beware": note that, depending on what one is doing, one may want `scl`
+ to be the reciprocal of what one might expect; for more information,
+ see the Notes section below.) The argument `c` is an array of
+ coefficients from low to high degree along each axis, e.g., [1,2,3]
+ represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
+ represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
+ 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
+
+ Parameters
+ ----------
+ c : array_like
+ Array of Hermite series coefficients. If c is multidimensional the
+ different axis correspond to different variables with the degree in
+ each axis given by the corresponding index.
+ m : int, optional
+ Order of integration, must be positive. (Default: 1)
+ k : {[], list, scalar}, optional
+ Integration constant(s). The value of the first integral at
+ ``lbnd`` is the first value in the list, the value of the second
+ integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
+ default), all constants are set to zero. If ``m == 1``, a single
+ scalar can be given instead of a list.
+ lbnd : scalar, optional
+ The lower bound of the integral. (Default: 0)
+ scl : scalar, optional
+ Following each integration the result is *multiplied* by `scl`
+ before the integration constant is added. (Default: 1)
+ axis : int, optional
+ Axis over which the integral is taken. (Default: 0).
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ S : ndarray
+ Hermite series coefficients of the integral.
+
+ Raises
+ ------
+ ValueError
+ If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
+ ``np.ndim(scl) != 0``.
+
+ See Also
+ --------
+ hermder
+
+ Notes
+ -----
+ Note that the result of each integration is *multiplied* by `scl`.
+ Why is this important to note? Say one is making a linear change of
+ variable :math:`u = ax + b` in an integral relative to `x`. Then
+ :math:`dx = du/a`, so one will need to set `scl` equal to
+ :math:`1/a` - perhaps not what one would have first thought.
+
+ Also note that, in general, the result of integrating a C-series needs
+ to be "reprojected" onto the C-series basis set. Thus, typically,
+ the result of this function is "unintuitive," albeit correct; see
+ Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermint
+ >>> hermint([1,2,3]) # integrate once, value 0 at 0.
+ array([1. , 0.5, 0.5, 0.5])
+ >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0
+ array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary
+ >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0.
+ array([2. , 0.5, 0.5, 0.5])
+ >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1
+ array([-2. , 0.5, 0.5, 0.5])
+ >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1)
+ array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary
+
+ """
+ c = np.array(c, ndmin=1, copy=True)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ if not np.iterable(k):
+ k = [k]
+ cnt = pu._deprecate_as_int(m, "the order of integration")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
+ if cnt < 0:
+ raise ValueError("The order of integration must be non-negative")
+ if len(k) > cnt:
+ raise ValueError("Too many integration constants")
+ if np.ndim(lbnd) != 0:
+ raise ValueError("lbnd must be a scalar.")
+ if np.ndim(scl) != 0:
+ raise ValueError("scl must be a scalar.")
+ iaxis = normalize_axis_index(iaxis, c.ndim)
+
+ if cnt == 0:
+ return c
+
+ c = np.moveaxis(c, iaxis, 0)
+ k = list(k) + [0]*(cnt - len(k))
+ for i in range(cnt):
+ n = len(c)
+ c *= scl
+ if n == 1 and np.all(c[0] == 0):
+ c[0] += k[i]
+ else:
+ tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
+ tmp[0] = c[0]*0
+ tmp[1] = c[0]/2
+ for j in range(1, n):
+ tmp[j + 1] = c[j]/(2*(j + 1))
+ tmp[0] += k[i] - hermval(lbnd, tmp)
+ c = tmp
+ c = np.moveaxis(c, 0, iaxis)
+ return c
+
+
+def hermval(x, c, tensor=True):
+ """
+ Evaluate an Hermite series at points x.
+
+ If `c` is of length `n + 1`, this function returns the value:
+
+ .. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x)
+
+ The parameter `x` is converted to an array only if it is a tuple or a
+ list, otherwise it is treated as a scalar. In either case, either `x`
+ or its elements must support multiplication and addition both with
+ themselves and with the elements of `c`.
+
+ If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
+ `c` is multidimensional, then the shape of the result depends on the
+ value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
+ x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
+ scalars have shape (,).
+
+ Trailing zeros in the coefficients will be used in the evaluation, so
+ they should be avoided if efficiency is a concern.
+
+ Parameters
+ ----------
+ x : array_like, compatible object
+ If `x` is a list or tuple, it is converted to an ndarray, otherwise
+ it is left unchanged and treated as a scalar. In either case, `x`
+ or its elements must support addition and multiplication with
+ themselves and with the elements of `c`.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree n are contained in c[n]. If `c` is multidimensional the
+ remaining indices enumerate multiple polynomials. In the two
+ dimensional case the coefficients may be thought of as stored in
+ the columns of `c`.
+ tensor : boolean, optional
+ If True, the shape of the coefficient array is extended with ones
+ on the right, one for each dimension of `x`. Scalars have dimension 0
+ for this action. The result is that every column of coefficients in
+ `c` is evaluated for every element of `x`. If False, `x` is broadcast
+ over the columns of `c` for the evaluation. This keyword is useful
+ when `c` is multidimensional. The default value is True.
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ values : ndarray, algebra_like
+ The shape of the return value is described above.
+
+ See Also
+ --------
+ hermval2d, hermgrid2d, hermval3d, hermgrid3d
+
+ Notes
+ -----
+ The evaluation uses Clenshaw recursion, aka synthetic division.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermval
+ >>> coef = [1,2,3]
+ >>> hermval(1, coef)
+ 11.0
+ >>> hermval([[1,2],[3,4]], coef)
+ array([[ 11., 51.],
+ [115., 203.]])
+
+ """
+ c = np.array(c, ndmin=1, copy=False)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ if isinstance(x, (tuple, list)):
+ x = np.asarray(x)
+ if isinstance(x, np.ndarray) and tensor:
+ c = c.reshape(c.shape + (1,)*x.ndim)
+
+ x2 = x*2
+ if len(c) == 1:
+ c0 = c[0]
+ c1 = 0
+ elif len(c) == 2:
+ c0 = c[0]
+ c1 = c[1]
+ else:
+ nd = len(c)
+ c0 = c[-2]
+ c1 = c[-1]
+ for i in range(3, len(c) + 1):
+ tmp = c0
+ nd = nd - 1
+ c0 = c[-i] - c1*(2*(nd - 1))
+ c1 = tmp + c1*x2
+ return c0 + c1*x2
+
+
+def hermval2d(x, y, c):
+ """
+ Evaluate a 2-D Hermite series at points (x, y).
+
+ This function returns the values:
+
+ .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y)
+
+ The parameters `x` and `y` are converted to arrays only if they are
+ tuples or a lists, otherwise they are treated as a scalars and they
+ must have the same shape after conversion. In either case, either `x`
+ and `y` or their elements must support multiplication and addition both
+ with themselves and with the elements of `c`.
+
+ If `c` is a 1-D array a one is implicitly appended to its shape to make
+ it 2-D. The shape of the result will be c.shape[2:] + x.shape.
+
+ Parameters
+ ----------
+ x, y : array_like, compatible objects
+ The two dimensional series is evaluated at the points `(x, y)`,
+ where `x` and `y` must have the same shape. If `x` or `y` is a list
+ or tuple, it is first converted to an ndarray, otherwise it is left
+ unchanged and if it isn't an ndarray it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term
+ of multi-degree i,j is contained in ``c[i,j]``. If `c` has
+ dimension greater than two the remaining indices enumerate multiple
+ sets of coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional polynomial at points formed with
+ pairs of corresponding values from `x` and `y`.
+
+ See Also
+ --------
+ hermval, hermgrid2d, hermval3d, hermgrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._valnd(hermval, c, x, y)
+
+
+def hermgrid2d(x, y, c):
+ """
+ Evaluate a 2-D Hermite series on the Cartesian product of x and y.
+
+ This function returns the values:
+
+ .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
+
+ where the points `(a, b)` consist of all pairs formed by taking
+ `a` from `x` and `b` from `y`. The resulting points form a grid with
+ `x` in the first dimension and `y` in the second.
+
+ The parameters `x` and `y` are converted to arrays only if they are
+ tuples or a lists, otherwise they are treated as a scalars. In either
+ case, either `x` and `y` or their elements must support multiplication
+ and addition both with themselves and with the elements of `c`.
+
+ If `c` has fewer than two dimensions, ones are implicitly appended to
+ its shape to make it 2-D. The shape of the result will be c.shape[2:] +
+ x.shape.
+
+ Parameters
+ ----------
+ x, y : array_like, compatible objects
+ The two dimensional series is evaluated at the points in the
+ Cartesian product of `x` and `y`. If `x` or `y` is a list or
+ tuple, it is first converted to an ndarray, otherwise it is left
+ unchanged and, if it isn't an ndarray, it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree i,j are contained in ``c[i,j]``. If `c` has dimension
+ greater than two the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional polynomial at points in the Cartesian
+ product of `x` and `y`.
+
+ See Also
+ --------
+ hermval, hermval2d, hermval3d, hermgrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._gridnd(hermval, c, x, y)
+
+
+def hermval3d(x, y, z, c):
+ """
+ Evaluate a 3-D Hermite series at points (x, y, z).
+
+ This function returns the values:
+
+ .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z)
+
+ The parameters `x`, `y`, and `z` are converted to arrays only if
+ they are tuples or a lists, otherwise they are treated as a scalars and
+ they must have the same shape after conversion. In either case, either
+ `x`, `y`, and `z` or their elements must support multiplication and
+ addition both with themselves and with the elements of `c`.
+
+ If `c` has fewer than 3 dimensions, ones are implicitly appended to its
+ shape to make it 3-D. The shape of the result will be c.shape[3:] +
+ x.shape.
+
+ Parameters
+ ----------
+ x, y, z : array_like, compatible object
+ The three dimensional series is evaluated at the points
+ `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
+ any of `x`, `y`, or `z` is a list or tuple, it is first converted
+ to an ndarray, otherwise it is left unchanged and if it isn't an
+ ndarray it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term of
+ multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
+ greater than 3 the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the multidimensional polynomial on points formed with
+ triples of corresponding values from `x`, `y`, and `z`.
+
+ See Also
+ --------
+ hermval, hermval2d, hermgrid2d, hermgrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._valnd(hermval, c, x, y, z)
+
+
+def hermgrid3d(x, y, z, c):
+ """
+ Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z.
+
+ This function returns the values:
+
+ .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c)
+
+ where the points `(a, b, c)` consist of all triples formed by taking
+ `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
+ a grid with `x` in the first dimension, `y` in the second, and `z` in
+ the third.
+
+ The parameters `x`, `y`, and `z` are converted to arrays only if they
+ are tuples or a lists, otherwise they are treated as a scalars. In
+ either case, either `x`, `y`, and `z` or their elements must support
+ multiplication and addition both with themselves and with the elements
+ of `c`.
+
+ If `c` has fewer than three dimensions, ones are implicitly appended to
+ its shape to make it 3-D. The shape of the result will be c.shape[3:] +
+ x.shape + y.shape + z.shape.
+
+ Parameters
+ ----------
+ x, y, z : array_like, compatible objects
+ The three dimensional series is evaluated at the points in the
+ Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
+ list or tuple, it is first converted to an ndarray, otherwise it is
+ left unchanged and, if it isn't an ndarray, it is treated as a
+ scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree i,j are contained in ``c[i,j]``. If `c` has dimension
+ greater than two the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional polynomial at points in the Cartesian
+ product of `x` and `y`.
+
+ See Also
+ --------
+ hermval, hermval2d, hermgrid2d, hermval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._gridnd(hermval, c, x, y, z)
+
+
+def hermvander(x, deg):
+ """Pseudo-Vandermonde matrix of given degree.
+
+ Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
+ `x`. The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., i] = H_i(x),
+
+ where `0 <= i <= deg`. The leading indices of `V` index the elements of
+ `x` and the last index is the degree of the Hermite polynomial.
+
+ If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
+ array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and
+ ``hermval(x, c)`` are the same up to roundoff. This equivalence is
+ useful both for least squares fitting and for the evaluation of a large
+ number of Hermite series of the same degree and sample points.
+
+ Parameters
+ ----------
+ x : array_like
+ Array of points. The dtype is converted to float64 or complex128
+ depending on whether any of the elements are complex. If `x` is
+ scalar it is converted to a 1-D array.
+ deg : int
+ Degree of the resulting matrix.
+
+ Returns
+ -------
+ vander : ndarray
+ The pseudo-Vandermonde matrix. The shape of the returned matrix is
+ ``x.shape + (deg + 1,)``, where The last index is the degree of the
+ corresponding Hermite polynomial. The dtype will be the same as
+ the converted `x`.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermvander
+ >>> x = np.array([-1, 0, 1])
+ >>> hermvander(x, 3)
+ array([[ 1., -2., 2., 4.],
+ [ 1., 0., -2., -0.],
+ [ 1., 2., 2., -4.]])
+
+ """
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg < 0:
+ raise ValueError("deg must be non-negative")
+
+ x = np.array(x, copy=False, ndmin=1) + 0.0
+ dims = (ideg + 1,) + x.shape
+ dtyp = x.dtype
+ v = np.empty(dims, dtype=dtyp)
+ v[0] = x*0 + 1
+ if ideg > 0:
+ x2 = x*2
+ v[1] = x2
+ for i in range(2, ideg + 1):
+ v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1)))
+ return np.moveaxis(v, 0, -1)
+
+
+def hermvander2d(x, y, deg):
+ """Pseudo-Vandermonde matrix of given degrees.
+
+ Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
+ points `(x, y)`. The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., (deg[1] + 1)*i + j] = H_i(x) * H_j(y),
+
+ where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
+ `V` index the points `(x, y)` and the last index encodes the degrees of
+ the Hermite polynomials.
+
+ If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
+ correspond to the elements of a 2-D coefficient array `c` of shape
+ (xdeg + 1, ydeg + 1) in the order
+
+ .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
+
+ and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same
+ up to roundoff. This equivalence is useful both for least squares
+ fitting and for the evaluation of a large number of 2-D Hermite
+ series of the same degrees and sample points.
+
+ Parameters
+ ----------
+ x, y : array_like
+ Arrays of point coordinates, all of the same shape. The dtypes
+ will be converted to either float64 or complex128 depending on
+ whether any of the elements are complex. Scalars are converted to 1-D
+ arrays.
+ deg : list of ints
+ List of maximum degrees of the form [x_deg, y_deg].
+
+ Returns
+ -------
+ vander2d : ndarray
+ The shape of the returned matrix is ``x.shape + (order,)``, where
+ :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same
+ as the converted `x` and `y`.
+
+ See Also
+ --------
+ hermvander, hermvander3d, hermval2d, hermval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._vander_nd_flat((hermvander, hermvander), (x, y), deg)
+
+
+def hermvander3d(x, y, z, deg):
+ """Pseudo-Vandermonde matrix of given degrees.
+
+ Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
+ points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
+ then The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z),
+
+ where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
+ indices of `V` index the points `(x, y, z)` and the last index encodes
+ the degrees of the Hermite polynomials.
+
+ If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
+ of `V` correspond to the elements of a 3-D coefficient array `c` of
+ shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
+
+ .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
+
+ and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the
+ same up to roundoff. This equivalence is useful both for least squares
+ fitting and for the evaluation of a large number of 3-D Hermite
+ series of the same degrees and sample points.
+
+ Parameters
+ ----------
+ x, y, z : array_like
+ Arrays of point coordinates, all of the same shape. The dtypes will
+ be converted to either float64 or complex128 depending on whether
+ any of the elements are complex. Scalars are converted to 1-D
+ arrays.
+ deg : list of ints
+ List of maximum degrees of the form [x_deg, y_deg, z_deg].
+
+ Returns
+ -------
+ vander3d : ndarray
+ The shape of the returned matrix is ``x.shape + (order,)``, where
+ :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will
+ be the same as the converted `x`, `y`, and `z`.
+
+ See Also
+ --------
+ hermvander, hermvander3d, hermval2d, hermval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg)
+
+
+def hermfit(x, y, deg, rcond=None, full=False, w=None):
+ """
+ Least squares fit of Hermite series to data.
+
+ Return the coefficients of a Hermite series of degree `deg` that is the
+ least squares fit to the data values `y` given at points `x`. If `y` is
+ 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
+ fits are done, one for each column of `y`, and the resulting
+ coefficients are stored in the corresponding columns of a 2-D return.
+ The fitted polynomial(s) are in the form
+
+ .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x),
+
+ where `n` is `deg`.
+
+ Parameters
+ ----------
+ x : array_like, shape (M,)
+ x-coordinates of the M sample points ``(x[i], y[i])``.
+ y : array_like, shape (M,) or (M, K)
+ y-coordinates of the sample points. Several data sets of sample
+ points sharing the same x-coordinates can be fitted at once by
+ passing in a 2D-array that contains one dataset per column.
+ deg : int or 1-D array_like
+ Degree(s) of the fitting polynomials. If `deg` is a single integer
+ all terms up to and including the `deg`'th term are included in the
+ fit. For NumPy versions >= 1.11.0 a list of integers specifying the
+ degrees of the terms to include may be used instead.
+ rcond : float, optional
+ Relative condition number of the fit. Singular values smaller than
+ this relative to the largest singular value will be ignored. The
+ default value is len(x)*eps, where eps is the relative precision of
+ the float type, about 2e-16 in most cases.
+ full : bool, optional
+ Switch determining nature of return value. When it is False (the
+ default) just the coefficients are returned, when True diagnostic
+ information from the singular value decomposition is also returned.
+ w : array_like, shape (`M`,), optional
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
+
+ Returns
+ -------
+ coef : ndarray, shape (M,) or (M, K)
+ Hermite coefficients ordered from low to high. If `y` was 2-D,
+ the coefficients for the data in column k of `y` are in column
+ `k`.
+
+ [residuals, rank, singular_values, rcond] : list
+ These values are only returned if ``full == True``
+
+ - residuals -- sum of squared residuals of the least squares fit
+ - rank -- the numerical rank of the scaled Vandermonde matrix
+ - singular_values -- singular values of the scaled Vandermonde matrix
+ - rcond -- value of `rcond`.
+
+ For more details, see `numpy.linalg.lstsq`.
+
+ Warns
+ -----
+ RankWarning
+ The rank of the coefficient matrix in the least-squares fit is
+ deficient. The warning is only raised if ``full == False``. The
+ warnings can be turned off by
+
+ >>> import warnings
+ >>> warnings.simplefilter('ignore', np.RankWarning)
+
+ See Also
+ --------
+ numpy.polynomial.chebyshev.chebfit
+ numpy.polynomial.legendre.legfit
+ numpy.polynomial.laguerre.lagfit
+ numpy.polynomial.polynomial.polyfit
+ numpy.polynomial.hermite_e.hermefit
+ hermval : Evaluates a Hermite series.
+ hermvander : Vandermonde matrix of Hermite series.
+ hermweight : Hermite weight function
+ numpy.linalg.lstsq : Computes a least-squares fit from the matrix.
+ scipy.interpolate.UnivariateSpline : Computes spline fits.
+
+ Notes
+ -----
+ The solution is the coefficients of the Hermite series `p` that
+ minimizes the sum of the weighted squared errors
+
+ .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
+
+ where the :math:`w_j` are the weights. This problem is solved by
+ setting up the (typically) overdetermined matrix equation
+
+ .. math:: V(x) * c = w * y,
+
+ where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
+ coefficients to be solved for, `w` are the weights, `y` are the
+ observed values. This equation is then solved using the singular value
+ decomposition of `V`.
+
+ If some of the singular values of `V` are so small that they are
+ neglected, then a `RankWarning` will be issued. This means that the
+ coefficient values may be poorly determined. Using a lower order fit
+ will usually get rid of the warning. The `rcond` parameter can also be
+ set to a value smaller than its default, but the resulting fit may be
+ spurious and have large contributions from roundoff error.
+
+ Fits using Hermite series are probably most useful when the data can be
+ approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite
+ weight. In that case the weight ``sqrt(w(x[i]))`` should be used
+ together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is
+ available as `hermweight`.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Curve fitting",
+ https://en.wikipedia.org/wiki/Curve_fitting
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermfit, hermval
+ >>> x = np.linspace(-10, 10)
+ >>> err = np.random.randn(len(x))/10
+ >>> y = hermval(x, [1, 2, 3]) + err
+ >>> hermfit(x, y, 2)
+ array([1.0218, 1.9986, 2.9999]) # may vary
+
+ """
+ return pu._fit(hermvander, x, y, deg, rcond, full, w)
+
+
+def hermcompanion(c):
+ """Return the scaled companion matrix of c.
+
+ The basis polynomials are scaled so that the companion matrix is
+ symmetric when `c` is an Hermite basis polynomial. This provides
+ better eigenvalue estimates than the unscaled case and for basis
+ polynomials the eigenvalues are guaranteed to be real if
+ `numpy.linalg.eigvalsh` is used to obtain them.
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Hermite series coefficients ordered from low to high
+ degree.
+
+ Returns
+ -------
+ mat : ndarray
+ Scaled companion matrix of dimensions (deg, deg).
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ if len(c) < 2:
+ raise ValueError('Series must have maximum degree of at least 1.')
+ if len(c) == 2:
+ return np.array([[-.5*c[0]/c[1]]])
+
+ n = len(c) - 1
+ mat = np.zeros((n, n), dtype=c.dtype)
+ scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1))))
+ scl = np.multiply.accumulate(scl)[::-1]
+ top = mat.reshape(-1)[1::n+1]
+ bot = mat.reshape(-1)[n::n+1]
+ top[...] = np.sqrt(.5*np.arange(1, n))
+ bot[...] = top
+ mat[:, -1] -= scl*c[:-1]/(2.0*c[-1])
+ return mat
+
+
+def hermroots(c):
+ """
+ Compute the roots of a Hermite series.
+
+ Return the roots (a.k.a. "zeros") of the polynomial
+
+ .. math:: p(x) = \\sum_i c[i] * H_i(x).
+
+ Parameters
+ ----------
+ c : 1-D array_like
+ 1-D array of coefficients.
+
+ Returns
+ -------
+ out : ndarray
+ Array of the roots of the series. If all the roots are real,
+ then `out` is also real, otherwise it is complex.
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyroots
+ numpy.polynomial.legendre.legroots
+ numpy.polynomial.laguerre.lagroots
+ numpy.polynomial.chebyshev.chebroots
+ numpy.polynomial.hermite_e.hermeroots
+
+ Notes
+ -----
+ The root estimates are obtained as the eigenvalues of the companion
+ matrix, Roots far from the origin of the complex plane may have large
+ errors due to the numerical instability of the series for such
+ values. Roots with multiplicity greater than 1 will also show larger
+ errors as the value of the series near such points is relatively
+ insensitive to errors in the roots. Isolated roots near the origin can
+ be improved by a few iterations of Newton's method.
+
+ The Hermite series basis polynomials aren't powers of `x` so the
+ results of this function may seem unintuitive.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite import hermroots, hermfromroots
+ >>> coef = hermfromroots([-1, 0, 1])
+ >>> coef
+ array([0. , 0.25 , 0. , 0.125])
+ >>> hermroots(coef)
+ array([-1.00000000e+00, -1.38777878e-17, 1.00000000e+00])
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ if len(c) <= 1:
+ return np.array([], dtype=c.dtype)
+ if len(c) == 2:
+ return np.array([-.5*c[0]/c[1]])
+
+ # rotated companion matrix reduces error
+ m = hermcompanion(c)[::-1,::-1]
+ r = la.eigvals(m)
+ r.sort()
+ return r
+
+
+def _normed_hermite_n(x, n):
+ """
+ Evaluate a normalized Hermite polynomial.
+
+ Compute the value of the normalized Hermite polynomial of degree ``n``
+ at the points ``x``.
+
+
+ Parameters
+ ----------
+ x : ndarray of double.
+ Points at which to evaluate the function
+ n : int
+ Degree of the normalized Hermite function to be evaluated.
+
+ Returns
+ -------
+ values : ndarray
+ The shape of the return value is described above.
+
+ Notes
+ -----
+ .. versionadded:: 1.10.0
+
+ This function is needed for finding the Gauss points and integration
+ weights for high degrees. The values of the standard Hermite functions
+ overflow when n >= 207.
+
+ """
+ if n == 0:
+ return np.full(x.shape, 1/np.sqrt(np.sqrt(np.pi)))
+
+ c0 = 0.
+ c1 = 1./np.sqrt(np.sqrt(np.pi))
+ nd = float(n)
+ for i in range(n - 1):
+ tmp = c0
+ c0 = -c1*np.sqrt((nd - 1.)/nd)
+ c1 = tmp + c1*x*np.sqrt(2./nd)
+ nd = nd - 1.0
+ return c0 + c1*x*np.sqrt(2)
+
+
+def hermgauss(deg):
+ """
+ Gauss-Hermite quadrature.
+
+ Computes the sample points and weights for Gauss-Hermite quadrature.
+ These sample points and weights will correctly integrate polynomials of
+ degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`
+ with the weight function :math:`f(x) = \\exp(-x^2)`.
+
+ Parameters
+ ----------
+ deg : int
+ Number of sample points and weights. It must be >= 1.
+
+ Returns
+ -------
+ x : ndarray
+ 1-D ndarray containing the sample points.
+ y : ndarray
+ 1-D ndarray containing the weights.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ The results have only been tested up to degree 100, higher degrees may
+ be problematic. The weights are determined by using the fact that
+
+ .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k))
+
+ where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
+ is the k'th root of :math:`H_n`, and then scaling the results to get
+ the right value when integrating 1.
+
+ """
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg <= 0:
+ raise ValueError("deg must be a positive integer")
+
+ # first approximation of roots. We use the fact that the companion
+ # matrix is symmetric in this case in order to obtain better zeros.
+ c = np.array([0]*deg + [1], dtype=np.float64)
+ m = hermcompanion(c)
+ x = la.eigvalsh(m)
+
+ # improve roots by one application of Newton
+ dy = _normed_hermite_n(x, ideg)
+ df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg)
+ x -= dy/df
+
+ # compute the weights. We scale the factor to avoid possible numerical
+ # overflow.
+ fm = _normed_hermite_n(x, ideg - 1)
+ fm /= np.abs(fm).max()
+ w = 1/(fm * fm)
+
+ # for Hermite we can also symmetrize
+ w = (w + w[::-1])/2
+ x = (x - x[::-1])/2
+
+ # scale w to get the right value
+ w *= np.sqrt(np.pi) / w.sum()
+
+ return x, w
+
+
+def hermweight(x):
+ """
+ Weight function of the Hermite polynomials.
+
+ The weight function is :math:`\\exp(-x^2)` and the interval of
+ integration is :math:`[-\\inf, \\inf]`. the Hermite polynomials are
+ orthogonal, but not normalized, with respect to this weight function.
+
+ Parameters
+ ----------
+ x : array_like
+ Values at which the weight function will be computed.
+
+ Returns
+ -------
+ w : ndarray
+ The weight function at `x`.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ w = np.exp(-x**2)
+ return w
+
+
+#
+# Hermite series class
+#
+
+class Hermite(ABCPolyBase):
+ """An Hermite series class.
+
+ The Hermite class provides the standard Python numerical methods
+ '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
+ attributes and methods listed in the `ABCPolyBase` documentation.
+
+ Parameters
+ ----------
+ coef : array_like
+ Hermite coefficients in order of increasing degree, i.e,
+ ``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``.
+ domain : (2,) array_like, optional
+ Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
+ to the interval ``[window[0], window[1]]`` by shifting and scaling.
+ The default value is [-1, 1].
+ window : (2,) array_like, optional
+ Window, see `domain` for its use. The default value is [-1, 1].
+
+ .. versionadded:: 1.6.0
+
+ """
+ # Virtual Functions
+ _add = staticmethod(hermadd)
+ _sub = staticmethod(hermsub)
+ _mul = staticmethod(hermmul)
+ _div = staticmethod(hermdiv)
+ _pow = staticmethod(hermpow)
+ _val = staticmethod(hermval)
+ _int = staticmethod(hermint)
+ _der = staticmethod(hermder)
+ _fit = staticmethod(hermfit)
+ _line = staticmethod(hermline)
+ _roots = staticmethod(hermroots)
+ _fromroots = staticmethod(hermfromroots)
+
+ # Virtual properties
+ domain = np.array(hermdomain)
+ window = np.array(hermdomain)
+ basis_name = 'H'
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/hermite.pyi b/venv/lib/python3.9/site-packages/numpy/polynomial/hermite.pyi
new file mode 100644
index 00000000..0d3556d6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/hermite.pyi
@@ -0,0 +1,46 @@
+from typing import Any
+
+from numpy import ndarray, dtype, int_, float_
+from numpy.polynomial._polybase import ABCPolyBase
+from numpy.polynomial.polyutils import trimcoef
+
+__all__: list[str]
+
+hermtrim = trimcoef
+
+def poly2herm(pol): ...
+def herm2poly(c): ...
+
+hermdomain: ndarray[Any, dtype[int_]]
+hermzero: ndarray[Any, dtype[int_]]
+hermone: ndarray[Any, dtype[int_]]
+hermx: ndarray[Any, dtype[float_]]
+
+def hermline(off, scl): ...
+def hermfromroots(roots): ...
+def hermadd(c1, c2): ...
+def hermsub(c1, c2): ...
+def hermmulx(c): ...
+def hermmul(c1, c2): ...
+def hermdiv(c1, c2): ...
+def hermpow(c, pow, maxpower=...): ...
+def hermder(c, m=..., scl=..., axis=...): ...
+def hermint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ...
+def hermval(x, c, tensor=...): ...
+def hermval2d(x, y, c): ...
+def hermgrid2d(x, y, c): ...
+def hermval3d(x, y, z, c): ...
+def hermgrid3d(x, y, z, c): ...
+def hermvander(x, deg): ...
+def hermvander2d(x, y, deg): ...
+def hermvander3d(x, y, z, deg): ...
+def hermfit(x, y, deg, rcond=..., full=..., w=...): ...
+def hermcompanion(c): ...
+def hermroots(c): ...
+def hermgauss(deg): ...
+def hermweight(x): ...
+
+class Hermite(ABCPolyBase):
+ domain: Any
+ window: Any
+ basis_name: Any
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/hermite_e.py b/venv/lib/python3.9/site-packages/numpy/polynomial/hermite_e.py
new file mode 100644
index 00000000..182c562c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/hermite_e.py
@@ -0,0 +1,1689 @@
+"""
+===================================================================
+HermiteE Series, "Probabilists" (:mod:`numpy.polynomial.hermite_e`)
+===================================================================
+
+This module provides a number of objects (mostly functions) useful for
+dealing with Hermite_e series, including a `HermiteE` class that
+encapsulates the usual arithmetic operations. (General information
+on how this module represents and works with such polynomials is in the
+docstring for its "parent" sub-package, `numpy.polynomial`).
+
+Classes
+-------
+.. autosummary::
+ :toctree: generated/
+
+ HermiteE
+
+Constants
+---------
+.. autosummary::
+ :toctree: generated/
+
+ hermedomain
+ hermezero
+ hermeone
+ hermex
+
+Arithmetic
+----------
+.. autosummary::
+ :toctree: generated/
+
+ hermeadd
+ hermesub
+ hermemulx
+ hermemul
+ hermediv
+ hermepow
+ hermeval
+ hermeval2d
+ hermeval3d
+ hermegrid2d
+ hermegrid3d
+
+Calculus
+--------
+.. autosummary::
+ :toctree: generated/
+
+ hermeder
+ hermeint
+
+Misc Functions
+--------------
+.. autosummary::
+ :toctree: generated/
+
+ hermefromroots
+ hermeroots
+ hermevander
+ hermevander2d
+ hermevander3d
+ hermegauss
+ hermeweight
+ hermecompanion
+ hermefit
+ hermetrim
+ hermeline
+ herme2poly
+ poly2herme
+
+See also
+--------
+`numpy.polynomial`
+
+"""
+import numpy as np
+import numpy.linalg as la
+from numpy.core.multiarray import normalize_axis_index
+
+from . import polyutils as pu
+from ._polybase import ABCPolyBase
+
+__all__ = [
+ 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline',
+ 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv',
+ 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly',
+ 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim',
+ 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d',
+ 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion',
+ 'hermegauss', 'hermeweight']
+
+hermetrim = pu.trimcoef
+
+
+def poly2herme(pol):
+ """
+ poly2herme(pol)
+
+ Convert a polynomial to a Hermite series.
+
+ Convert an array representing the coefficients of a polynomial (relative
+ to the "standard" basis) ordered from lowest degree to highest, to an
+ array of the coefficients of the equivalent Hermite series, ordered
+ from lowest to highest degree.
+
+ Parameters
+ ----------
+ pol : array_like
+ 1-D array containing the polynomial coefficients
+
+ Returns
+ -------
+ c : ndarray
+ 1-D array containing the coefficients of the equivalent Hermite
+ series.
+
+ See Also
+ --------
+ herme2poly
+
+ Notes
+ -----
+ The easy way to do conversions between polynomial basis sets
+ is to use the convert method of a class instance.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import poly2herme
+ >>> poly2herme(np.arange(4))
+ array([ 2., 10., 2., 3.])
+
+ """
+ [pol] = pu.as_series([pol])
+ deg = len(pol) - 1
+ res = 0
+ for i in range(deg, -1, -1):
+ res = hermeadd(hermemulx(res), pol[i])
+ return res
+
+
+def herme2poly(c):
+ """
+ Convert a Hermite series to a polynomial.
+
+ Convert an array representing the coefficients of a Hermite series,
+ ordered from lowest degree to highest, to an array of the coefficients
+ of the equivalent polynomial (relative to the "standard" basis) ordered
+ from lowest to highest degree.
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array containing the Hermite series coefficients, ordered
+ from lowest order term to highest.
+
+ Returns
+ -------
+ pol : ndarray
+ 1-D array containing the coefficients of the equivalent polynomial
+ (relative to the "standard" basis) ordered from lowest order term
+ to highest.
+
+ See Also
+ --------
+ poly2herme
+
+ Notes
+ -----
+ The easy way to do conversions between polynomial basis sets
+ is to use the convert method of a class instance.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import herme2poly
+ >>> herme2poly([ 2., 10., 2., 3.])
+ array([0., 1., 2., 3.])
+
+ """
+ from .polynomial import polyadd, polysub, polymulx
+
+ [c] = pu.as_series([c])
+ n = len(c)
+ if n == 1:
+ return c
+ if n == 2:
+ return c
+ else:
+ c0 = c[-2]
+ c1 = c[-1]
+ # i is the current degree of c1
+ for i in range(n - 1, 1, -1):
+ tmp = c0
+ c0 = polysub(c[i - 2], c1*(i - 1))
+ c1 = polyadd(tmp, polymulx(c1))
+ return polyadd(c0, polymulx(c1))
+
+#
+# These are constant arrays are of integer type so as to be compatible
+# with the widest range of other types, such as Decimal.
+#
+
+# Hermite
+hermedomain = np.array([-1, 1])
+
+# Hermite coefficients representing zero.
+hermezero = np.array([0])
+
+# Hermite coefficients representing one.
+hermeone = np.array([1])
+
+# Hermite coefficients representing the identity x.
+hermex = np.array([0, 1])
+
+
+def hermeline(off, scl):
+ """
+ Hermite series whose graph is a straight line.
+
+ Parameters
+ ----------
+ off, scl : scalars
+ The specified line is given by ``off + scl*x``.
+
+ Returns
+ -------
+ y : ndarray
+ This module's representation of the Hermite series for
+ ``off + scl*x``.
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyline
+ numpy.polynomial.chebyshev.chebline
+ numpy.polynomial.legendre.legline
+ numpy.polynomial.laguerre.lagline
+ numpy.polynomial.hermite.hermline
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermeline
+ >>> from numpy.polynomial.hermite_e import hermeline, hermeval
+ >>> hermeval(0,hermeline(3, 2))
+ 3.0
+ >>> hermeval(1,hermeline(3, 2))
+ 5.0
+
+ """
+ if scl != 0:
+ return np.array([off, scl])
+ else:
+ return np.array([off])
+
+
+def hermefromroots(roots):
+ """
+ Generate a HermiteE series with given roots.
+
+ The function returns the coefficients of the polynomial
+
+ .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
+
+ in HermiteE form, where the `r_n` are the roots specified in `roots`.
+ If a zero has multiplicity n, then it must appear in `roots` n times.
+ For instance, if 2 is a root of multiplicity three and 3 is a root of
+ multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
+ roots can appear in any order.
+
+ If the returned coefficients are `c`, then
+
+ .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x)
+
+ The coefficient of the last term is not generally 1 for monic
+ polynomials in HermiteE form.
+
+ Parameters
+ ----------
+ roots : array_like
+ Sequence containing the roots.
+
+ Returns
+ -------
+ out : ndarray
+ 1-D array of coefficients. If all roots are real then `out` is a
+ real array, if some of the roots are complex, then `out` is complex
+ even if all the coefficients in the result are real (see Examples
+ below).
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyfromroots
+ numpy.polynomial.legendre.legfromroots
+ numpy.polynomial.laguerre.lagfromroots
+ numpy.polynomial.hermite.hermfromroots
+ numpy.polynomial.chebyshev.chebfromroots
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval
+ >>> coef = hermefromroots((-1, 0, 1))
+ >>> hermeval((-1, 0, 1), coef)
+ array([0., 0., 0.])
+ >>> coef = hermefromroots((-1j, 1j))
+ >>> hermeval((-1j, 1j), coef)
+ array([0.+0.j, 0.+0.j])
+
+ """
+ return pu._fromroots(hermeline, hermemul, roots)
+
+
+def hermeadd(c1, c2):
+ """
+ Add one Hermite series to another.
+
+ Returns the sum of two Hermite series `c1` + `c2`. The arguments
+ are sequences of coefficients ordered from lowest order term to
+ highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Hermite series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Array representing the Hermite series of their sum.
+
+ See Also
+ --------
+ hermesub, hermemulx, hermemul, hermediv, hermepow
+
+ Notes
+ -----
+ Unlike multiplication, division, etc., the sum of two Hermite series
+ is a Hermite series (without having to "reproject" the result onto
+ the basis set) so addition, just like that of "standard" polynomials,
+ is simply "component-wise."
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermeadd
+ >>> hermeadd([1, 2, 3], [1, 2, 3, 4])
+ array([2., 4., 6., 4.])
+
+ """
+ return pu._add(c1, c2)
+
+
+def hermesub(c1, c2):
+ """
+ Subtract one Hermite series from another.
+
+ Returns the difference of two Hermite series `c1` - `c2`. The
+ sequences of coefficients are from lowest order term to highest, i.e.,
+ [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Hermite series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Of Hermite series coefficients representing their difference.
+
+ See Also
+ --------
+ hermeadd, hermemulx, hermemul, hermediv, hermepow
+
+ Notes
+ -----
+ Unlike multiplication, division, etc., the difference of two Hermite
+ series is a Hermite series (without having to "reproject" the result
+ onto the basis set) so subtraction, just like that of "standard"
+ polynomials, is simply "component-wise."
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermesub
+ >>> hermesub([1, 2, 3, 4], [1, 2, 3])
+ array([0., 0., 0., 4.])
+
+ """
+ return pu._sub(c1, c2)
+
+
+def hermemulx(c):
+ """Multiply a Hermite series by x.
+
+ Multiply the Hermite series `c` by x, where x is the independent
+ variable.
+
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Hermite series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Array representing the result of the multiplication.
+
+ Notes
+ -----
+ The multiplication uses the recursion relationship for Hermite
+ polynomials in the form
+
+ .. math::
+
+ xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x)))
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermemulx
+ >>> hermemulx([1, 2, 3])
+ array([2., 7., 2., 3.])
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ # The zero series needs special treatment
+ if len(c) == 1 and c[0] == 0:
+ return c
+
+ prd = np.empty(len(c) + 1, dtype=c.dtype)
+ prd[0] = c[0]*0
+ prd[1] = c[0]
+ for i in range(1, len(c)):
+ prd[i + 1] = c[i]
+ prd[i - 1] += c[i]*i
+ return prd
+
+
+def hermemul(c1, c2):
+ """
+ Multiply one Hermite series by another.
+
+ Returns the product of two Hermite series `c1` * `c2`. The arguments
+ are sequences of coefficients, from lowest order "term" to highest,
+ e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Hermite series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Of Hermite series coefficients representing their product.
+
+ See Also
+ --------
+ hermeadd, hermesub, hermemulx, hermediv, hermepow
+
+ Notes
+ -----
+ In general, the (polynomial) product of two C-series results in terms
+ that are not in the Hermite polynomial basis set. Thus, to express
+ the product as a Hermite series, it is necessary to "reproject" the
+ product onto said basis set, which may produce "unintuitive" (but
+ correct) results; see Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermemul
+ >>> hermemul([1, 2, 3], [0, 1, 2])
+ array([14., 15., 28., 7., 6.])
+
+ """
+ # s1, s2 are trimmed copies
+ [c1, c2] = pu.as_series([c1, c2])
+
+ if len(c1) > len(c2):
+ c = c2
+ xs = c1
+ else:
+ c = c1
+ xs = c2
+
+ if len(c) == 1:
+ c0 = c[0]*xs
+ c1 = 0
+ elif len(c) == 2:
+ c0 = c[0]*xs
+ c1 = c[1]*xs
+ else:
+ nd = len(c)
+ c0 = c[-2]*xs
+ c1 = c[-1]*xs
+ for i in range(3, len(c) + 1):
+ tmp = c0
+ nd = nd - 1
+ c0 = hermesub(c[-i]*xs, c1*(nd - 1))
+ c1 = hermeadd(tmp, hermemulx(c1))
+ return hermeadd(c0, hermemulx(c1))
+
+
+def hermediv(c1, c2):
+ """
+ Divide one Hermite series by another.
+
+ Returns the quotient-with-remainder of two Hermite series
+ `c1` / `c2`. The arguments are sequences of coefficients from lowest
+ order "term" to highest, e.g., [1,2,3] represents the series
+ ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Hermite series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ [quo, rem] : ndarrays
+ Of Hermite series coefficients representing the quotient and
+ remainder.
+
+ See Also
+ --------
+ hermeadd, hermesub, hermemulx, hermemul, hermepow
+
+ Notes
+ -----
+ In general, the (polynomial) division of one Hermite series by another
+ results in quotient and remainder terms that are not in the Hermite
+ polynomial basis set. Thus, to express these results as a Hermite
+ series, it is necessary to "reproject" the results onto the Hermite
+ basis set, which may produce "unintuitive" (but correct) results; see
+ Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermediv
+ >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2])
+ (array([1., 2., 3.]), array([0.]))
+ >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2])
+ (array([1., 2., 3.]), array([1., 2.]))
+
+ """
+ return pu._div(hermemul, c1, c2)
+
+
+def hermepow(c, pow, maxpower=16):
+ """Raise a Hermite series to a power.
+
+ Returns the Hermite series `c` raised to the power `pow`. The
+ argument `c` is a sequence of coefficients ordered from low to high.
+ i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Hermite series coefficients ordered from low to
+ high.
+ pow : integer
+ Power to which the series will be raised
+ maxpower : integer, optional
+ Maximum power allowed. This is mainly to limit growth of the series
+ to unmanageable size. Default is 16
+
+ Returns
+ -------
+ coef : ndarray
+ Hermite series of power.
+
+ See Also
+ --------
+ hermeadd, hermesub, hermemulx, hermemul, hermediv
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermepow
+ >>> hermepow([1, 2, 3], 2)
+ array([23., 28., 46., 12., 9.])
+
+ """
+ return pu._pow(hermemul, c, pow, maxpower)
+
+
+def hermeder(c, m=1, scl=1, axis=0):
+ """
+ Differentiate a Hermite_e series.
+
+ Returns the series coefficients `c` differentiated `m` times along
+ `axis`. At each iteration the result is multiplied by `scl` (the
+ scaling factor is for use in a linear change of variable). The argument
+ `c` is an array of coefficients from low to high degree along each
+ axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2``
+ while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y)
+ + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1
+ is ``y``.
+
+ Parameters
+ ----------
+ c : array_like
+ Array of Hermite_e series coefficients. If `c` is multidimensional
+ the different axis correspond to different variables with the
+ degree in each axis given by the corresponding index.
+ m : int, optional
+ Number of derivatives taken, must be non-negative. (Default: 1)
+ scl : scalar, optional
+ Each differentiation is multiplied by `scl`. The end result is
+ multiplication by ``scl**m``. This is for use in a linear change of
+ variable. (Default: 1)
+ axis : int, optional
+ Axis over which the derivative is taken. (Default: 0).
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ der : ndarray
+ Hermite series of the derivative.
+
+ See Also
+ --------
+ hermeint
+
+ Notes
+ -----
+ In general, the result of differentiating a Hermite series does not
+ resemble the same operation on a power series. Thus the result of this
+ function may be "unintuitive," albeit correct; see Examples section
+ below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermeder
+ >>> hermeder([ 1., 1., 1., 1.])
+ array([1., 2., 3.])
+ >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2)
+ array([1., 2., 3.])
+
+ """
+ c = np.array(c, ndmin=1, copy=True)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ cnt = pu._deprecate_as_int(m, "the order of derivation")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
+ if cnt < 0:
+ raise ValueError("The order of derivation must be non-negative")
+ iaxis = normalize_axis_index(iaxis, c.ndim)
+
+ if cnt == 0:
+ return c
+
+ c = np.moveaxis(c, iaxis, 0)
+ n = len(c)
+ if cnt >= n:
+ return c[:1]*0
+ else:
+ for i in range(cnt):
+ n = n - 1
+ c *= scl
+ der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
+ for j in range(n, 0, -1):
+ der[j - 1] = j*c[j]
+ c = der
+ c = np.moveaxis(c, 0, iaxis)
+ return c
+
+
+def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
+ """
+ Integrate a Hermite_e series.
+
+ Returns the Hermite_e series coefficients `c` integrated `m` times from
+ `lbnd` along `axis`. At each iteration the resulting series is
+ **multiplied** by `scl` and an integration constant, `k`, is added.
+ The scaling factor is for use in a linear change of variable. ("Buyer
+ beware": note that, depending on what one is doing, one may want `scl`
+ to be the reciprocal of what one might expect; for more information,
+ see the Notes section below.) The argument `c` is an array of
+ coefficients from low to high degree along each axis, e.g., [1,2,3]
+ represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
+ represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
+ 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
+
+ Parameters
+ ----------
+ c : array_like
+ Array of Hermite_e series coefficients. If c is multidimensional
+ the different axis correspond to different variables with the
+ degree in each axis given by the corresponding index.
+ m : int, optional
+ Order of integration, must be positive. (Default: 1)
+ k : {[], list, scalar}, optional
+ Integration constant(s). The value of the first integral at
+ ``lbnd`` is the first value in the list, the value of the second
+ integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
+ default), all constants are set to zero. If ``m == 1``, a single
+ scalar can be given instead of a list.
+ lbnd : scalar, optional
+ The lower bound of the integral. (Default: 0)
+ scl : scalar, optional
+ Following each integration the result is *multiplied* by `scl`
+ before the integration constant is added. (Default: 1)
+ axis : int, optional
+ Axis over which the integral is taken. (Default: 0).
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ S : ndarray
+ Hermite_e series coefficients of the integral.
+
+ Raises
+ ------
+ ValueError
+ If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
+ ``np.ndim(scl) != 0``.
+
+ See Also
+ --------
+ hermeder
+
+ Notes
+ -----
+ Note that the result of each integration is *multiplied* by `scl`.
+ Why is this important to note? Say one is making a linear change of
+ variable :math:`u = ax + b` in an integral relative to `x`. Then
+ :math:`dx = du/a`, so one will need to set `scl` equal to
+ :math:`1/a` - perhaps not what one would have first thought.
+
+ Also note that, in general, the result of integrating a C-series needs
+ to be "reprojected" onto the C-series basis set. Thus, typically,
+ the result of this function is "unintuitive," albeit correct; see
+ Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermeint
+ >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
+ array([1., 1., 1., 1.])
+ >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
+ array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) # may vary
+ >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
+ array([2., 1., 1., 1.])
+ >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
+ array([-1., 1., 1., 1.])
+ >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
+ array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary
+
+ """
+ c = np.array(c, ndmin=1, copy=True)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ if not np.iterable(k):
+ k = [k]
+ cnt = pu._deprecate_as_int(m, "the order of integration")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
+ if cnt < 0:
+ raise ValueError("The order of integration must be non-negative")
+ if len(k) > cnt:
+ raise ValueError("Too many integration constants")
+ if np.ndim(lbnd) != 0:
+ raise ValueError("lbnd must be a scalar.")
+ if np.ndim(scl) != 0:
+ raise ValueError("scl must be a scalar.")
+ iaxis = normalize_axis_index(iaxis, c.ndim)
+
+ if cnt == 0:
+ return c
+
+ c = np.moveaxis(c, iaxis, 0)
+ k = list(k) + [0]*(cnt - len(k))
+ for i in range(cnt):
+ n = len(c)
+ c *= scl
+ if n == 1 and np.all(c[0] == 0):
+ c[0] += k[i]
+ else:
+ tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
+ tmp[0] = c[0]*0
+ tmp[1] = c[0]
+ for j in range(1, n):
+ tmp[j + 1] = c[j]/(j + 1)
+ tmp[0] += k[i] - hermeval(lbnd, tmp)
+ c = tmp
+ c = np.moveaxis(c, 0, iaxis)
+ return c
+
+
+def hermeval(x, c, tensor=True):
+ """
+ Evaluate an HermiteE series at points x.
+
+ If `c` is of length `n + 1`, this function returns the value:
+
+ .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x)
+
+ The parameter `x` is converted to an array only if it is a tuple or a
+ list, otherwise it is treated as a scalar. In either case, either `x`
+ or its elements must support multiplication and addition both with
+ themselves and with the elements of `c`.
+
+ If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
+ `c` is multidimensional, then the shape of the result depends on the
+ value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
+ x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
+ scalars have shape (,).
+
+ Trailing zeros in the coefficients will be used in the evaluation, so
+ they should be avoided if efficiency is a concern.
+
+ Parameters
+ ----------
+ x : array_like, compatible object
+ If `x` is a list or tuple, it is converted to an ndarray, otherwise
+ it is left unchanged and treated as a scalar. In either case, `x`
+ or its elements must support addition and multiplication with
+ with themselves and with the elements of `c`.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree n are contained in c[n]. If `c` is multidimensional the
+ remaining indices enumerate multiple polynomials. In the two
+ dimensional case the coefficients may be thought of as stored in
+ the columns of `c`.
+ tensor : boolean, optional
+ If True, the shape of the coefficient array is extended with ones
+ on the right, one for each dimension of `x`. Scalars have dimension 0
+ for this action. The result is that every column of coefficients in
+ `c` is evaluated for every element of `x`. If False, `x` is broadcast
+ over the columns of `c` for the evaluation. This keyword is useful
+ when `c` is multidimensional. The default value is True.
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ values : ndarray, algebra_like
+ The shape of the return value is described above.
+
+ See Also
+ --------
+ hermeval2d, hermegrid2d, hermeval3d, hermegrid3d
+
+ Notes
+ -----
+ The evaluation uses Clenshaw recursion, aka synthetic division.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermeval
+ >>> coef = [1,2,3]
+ >>> hermeval(1, coef)
+ 3.0
+ >>> hermeval([[1,2],[3,4]], coef)
+ array([[ 3., 14.],
+ [31., 54.]])
+
+ """
+ c = np.array(c, ndmin=1, copy=False)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ if isinstance(x, (tuple, list)):
+ x = np.asarray(x)
+ if isinstance(x, np.ndarray) and tensor:
+ c = c.reshape(c.shape + (1,)*x.ndim)
+
+ if len(c) == 1:
+ c0 = c[0]
+ c1 = 0
+ elif len(c) == 2:
+ c0 = c[0]
+ c1 = c[1]
+ else:
+ nd = len(c)
+ c0 = c[-2]
+ c1 = c[-1]
+ for i in range(3, len(c) + 1):
+ tmp = c0
+ nd = nd - 1
+ c0 = c[-i] - c1*(nd - 1)
+ c1 = tmp + c1*x
+ return c0 + c1*x
+
+
+def hermeval2d(x, y, c):
+ """
+ Evaluate a 2-D HermiteE series at points (x, y).
+
+ This function returns the values:
+
+ .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y)
+
+ The parameters `x` and `y` are converted to arrays only if they are
+ tuples or a lists, otherwise they are treated as a scalars and they
+ must have the same shape after conversion. In either case, either `x`
+ and `y` or their elements must support multiplication and addition both
+ with themselves and with the elements of `c`.
+
+ If `c` is a 1-D array a one is implicitly appended to its shape to make
+ it 2-D. The shape of the result will be c.shape[2:] + x.shape.
+
+ Parameters
+ ----------
+ x, y : array_like, compatible objects
+ The two dimensional series is evaluated at the points `(x, y)`,
+ where `x` and `y` must have the same shape. If `x` or `y` is a list
+ or tuple, it is first converted to an ndarray, otherwise it is left
+ unchanged and if it isn't an ndarray it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term
+ of multi-degree i,j is contained in ``c[i,j]``. If `c` has
+ dimension greater than two the remaining indices enumerate multiple
+ sets of coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional polynomial at points formed with
+ pairs of corresponding values from `x` and `y`.
+
+ See Also
+ --------
+ hermeval, hermegrid2d, hermeval3d, hermegrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._valnd(hermeval, c, x, y)
+
+
+def hermegrid2d(x, y, c):
+ """
+ Evaluate a 2-D HermiteE series on the Cartesian product of x and y.
+
+ This function returns the values:
+
+ .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
+
+ where the points `(a, b)` consist of all pairs formed by taking
+ `a` from `x` and `b` from `y`. The resulting points form a grid with
+ `x` in the first dimension and `y` in the second.
+
+ The parameters `x` and `y` are converted to arrays only if they are
+ tuples or a lists, otherwise they are treated as a scalars. In either
+ case, either `x` and `y` or their elements must support multiplication
+ and addition both with themselves and with the elements of `c`.
+
+ If `c` has fewer than two dimensions, ones are implicitly appended to
+ its shape to make it 2-D. The shape of the result will be c.shape[2:] +
+ x.shape.
+
+ Parameters
+ ----------
+ x, y : array_like, compatible objects
+ The two dimensional series is evaluated at the points in the
+ Cartesian product of `x` and `y`. If `x` or `y` is a list or
+ tuple, it is first converted to an ndarray, otherwise it is left
+ unchanged and, if it isn't an ndarray, it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree i,j are contained in ``c[i,j]``. If `c` has dimension
+ greater than two the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional polynomial at points in the Cartesian
+ product of `x` and `y`.
+
+ See Also
+ --------
+ hermeval, hermeval2d, hermeval3d, hermegrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._gridnd(hermeval, c, x, y)
+
+
+def hermeval3d(x, y, z, c):
+ """
+ Evaluate a 3-D Hermite_e series at points (x, y, z).
+
+ This function returns the values:
+
+ .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z)
+
+ The parameters `x`, `y`, and `z` are converted to arrays only if
+ they are tuples or a lists, otherwise they are treated as a scalars and
+ they must have the same shape after conversion. In either case, either
+ `x`, `y`, and `z` or their elements must support multiplication and
+ addition both with themselves and with the elements of `c`.
+
+ If `c` has fewer than 3 dimensions, ones are implicitly appended to its
+ shape to make it 3-D. The shape of the result will be c.shape[3:] +
+ x.shape.
+
+ Parameters
+ ----------
+ x, y, z : array_like, compatible object
+ The three dimensional series is evaluated at the points
+ `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
+ any of `x`, `y`, or `z` is a list or tuple, it is first converted
+ to an ndarray, otherwise it is left unchanged and if it isn't an
+ ndarray it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term of
+ multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
+ greater than 3 the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the multidimensional polynomial on points formed with
+ triples of corresponding values from `x`, `y`, and `z`.
+
+ See Also
+ --------
+ hermeval, hermeval2d, hermegrid2d, hermegrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._valnd(hermeval, c, x, y, z)
+
+
+def hermegrid3d(x, y, z, c):
+ """
+ Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z.
+
+ This function returns the values:
+
+ .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c)
+
+ where the points `(a, b, c)` consist of all triples formed by taking
+ `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
+ a grid with `x` in the first dimension, `y` in the second, and `z` in
+ the third.
+
+ The parameters `x`, `y`, and `z` are converted to arrays only if they
+ are tuples or a lists, otherwise they are treated as a scalars. In
+ either case, either `x`, `y`, and `z` or their elements must support
+ multiplication and addition both with themselves and with the elements
+ of `c`.
+
+ If `c` has fewer than three dimensions, ones are implicitly appended to
+ its shape to make it 3-D. The shape of the result will be c.shape[3:] +
+ x.shape + y.shape + z.shape.
+
+ Parameters
+ ----------
+ x, y, z : array_like, compatible objects
+ The three dimensional series is evaluated at the points in the
+ Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
+ list or tuple, it is first converted to an ndarray, otherwise it is
+ left unchanged and, if it isn't an ndarray, it is treated as a
+ scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree i,j are contained in ``c[i,j]``. If `c` has dimension
+ greater than two the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional polynomial at points in the Cartesian
+ product of `x` and `y`.
+
+ See Also
+ --------
+ hermeval, hermeval2d, hermegrid2d, hermeval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._gridnd(hermeval, c, x, y, z)
+
+
+def hermevander(x, deg):
+ """Pseudo-Vandermonde matrix of given degree.
+
+ Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
+ `x`. The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., i] = He_i(x),
+
+ where `0 <= i <= deg`. The leading indices of `V` index the elements of
+ `x` and the last index is the degree of the HermiteE polynomial.
+
+ If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
+ array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and
+ ``hermeval(x, c)`` are the same up to roundoff. This equivalence is
+ useful both for least squares fitting and for the evaluation of a large
+ number of HermiteE series of the same degree and sample points.
+
+ Parameters
+ ----------
+ x : array_like
+ Array of points. The dtype is converted to float64 or complex128
+ depending on whether any of the elements are complex. If `x` is
+ scalar it is converted to a 1-D array.
+ deg : int
+ Degree of the resulting matrix.
+
+ Returns
+ -------
+ vander : ndarray
+ The pseudo-Vandermonde matrix. The shape of the returned matrix is
+ ``x.shape + (deg + 1,)``, where The last index is the degree of the
+ corresponding HermiteE polynomial. The dtype will be the same as
+ the converted `x`.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermevander
+ >>> x = np.array([-1, 0, 1])
+ >>> hermevander(x, 3)
+ array([[ 1., -1., 0., 2.],
+ [ 1., 0., -1., -0.],
+ [ 1., 1., 0., -2.]])
+
+ """
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg < 0:
+ raise ValueError("deg must be non-negative")
+
+ x = np.array(x, copy=False, ndmin=1) + 0.0
+ dims = (ideg + 1,) + x.shape
+ dtyp = x.dtype
+ v = np.empty(dims, dtype=dtyp)
+ v[0] = x*0 + 1
+ if ideg > 0:
+ v[1] = x
+ for i in range(2, ideg + 1):
+ v[i] = (v[i-1]*x - v[i-2]*(i - 1))
+ return np.moveaxis(v, 0, -1)
+
+
+def hermevander2d(x, y, deg):
+ """Pseudo-Vandermonde matrix of given degrees.
+
+ Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
+ points `(x, y)`. The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., (deg[1] + 1)*i + j] = He_i(x) * He_j(y),
+
+ where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
+ `V` index the points `(x, y)` and the last index encodes the degrees of
+ the HermiteE polynomials.
+
+ If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
+ correspond to the elements of a 2-D coefficient array `c` of shape
+ (xdeg + 1, ydeg + 1) in the order
+
+ .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
+
+ and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same
+ up to roundoff. This equivalence is useful both for least squares
+ fitting and for the evaluation of a large number of 2-D HermiteE
+ series of the same degrees and sample points.
+
+ Parameters
+ ----------
+ x, y : array_like
+ Arrays of point coordinates, all of the same shape. The dtypes
+ will be converted to either float64 or complex128 depending on
+ whether any of the elements are complex. Scalars are converted to
+ 1-D arrays.
+ deg : list of ints
+ List of maximum degrees of the form [x_deg, y_deg].
+
+ Returns
+ -------
+ vander2d : ndarray
+ The shape of the returned matrix is ``x.shape + (order,)``, where
+ :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same
+ as the converted `x` and `y`.
+
+ See Also
+ --------
+ hermevander, hermevander3d, hermeval2d, hermeval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._vander_nd_flat((hermevander, hermevander), (x, y), deg)
+
+
+def hermevander3d(x, y, z, deg):
+ """Pseudo-Vandermonde matrix of given degrees.
+
+ Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
+ points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
+ then Hehe pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z),
+
+ where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
+ indices of `V` index the points `(x, y, z)` and the last index encodes
+ the degrees of the HermiteE polynomials.
+
+ If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
+ of `V` correspond to the elements of a 3-D coefficient array `c` of
+ shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
+
+ .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
+
+ and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the
+ same up to roundoff. This equivalence is useful both for least squares
+ fitting and for the evaluation of a large number of 3-D HermiteE
+ series of the same degrees and sample points.
+
+ Parameters
+ ----------
+ x, y, z : array_like
+ Arrays of point coordinates, all of the same shape. The dtypes will
+ be converted to either float64 or complex128 depending on whether
+ any of the elements are complex. Scalars are converted to 1-D
+ arrays.
+ deg : list of ints
+ List of maximum degrees of the form [x_deg, y_deg, z_deg].
+
+ Returns
+ -------
+ vander3d : ndarray
+ The shape of the returned matrix is ``x.shape + (order,)``, where
+ :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will
+ be the same as the converted `x`, `y`, and `z`.
+
+ See Also
+ --------
+ hermevander, hermevander3d, hermeval2d, hermeval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._vander_nd_flat((hermevander, hermevander, hermevander), (x, y, z), deg)
+
+
+def hermefit(x, y, deg, rcond=None, full=False, w=None):
+ """
+ Least squares fit of Hermite series to data.
+
+ Return the coefficients of a HermiteE series of degree `deg` that is
+ the least squares fit to the data values `y` given at points `x`. If
+ `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D
+ multiple fits are done, one for each column of `y`, and the resulting
+ coefficients are stored in the corresponding columns of a 2-D return.
+ The fitted polynomial(s) are in the form
+
+ .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x),
+
+ where `n` is `deg`.
+
+ Parameters
+ ----------
+ x : array_like, shape (M,)
+ x-coordinates of the M sample points ``(x[i], y[i])``.
+ y : array_like, shape (M,) or (M, K)
+ y-coordinates of the sample points. Several data sets of sample
+ points sharing the same x-coordinates can be fitted at once by
+ passing in a 2D-array that contains one dataset per column.
+ deg : int or 1-D array_like
+ Degree(s) of the fitting polynomials. If `deg` is a single integer
+ all terms up to and including the `deg`'th term are included in the
+ fit. For NumPy versions >= 1.11.0 a list of integers specifying the
+ degrees of the terms to include may be used instead.
+ rcond : float, optional
+ Relative condition number of the fit. Singular values smaller than
+ this relative to the largest singular value will be ignored. The
+ default value is len(x)*eps, where eps is the relative precision of
+ the float type, about 2e-16 in most cases.
+ full : bool, optional
+ Switch determining nature of return value. When it is False (the
+ default) just the coefficients are returned, when True diagnostic
+ information from the singular value decomposition is also returned.
+ w : array_like, shape (`M`,), optional
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
+
+ Returns
+ -------
+ coef : ndarray, shape (M,) or (M, K)
+ Hermite coefficients ordered from low to high. If `y` was 2-D,
+ the coefficients for the data in column k of `y` are in column
+ `k`.
+
+ [residuals, rank, singular_values, rcond] : list
+ These values are only returned if ``full == True``
+
+ - residuals -- sum of squared residuals of the least squares fit
+ - rank -- the numerical rank of the scaled Vandermonde matrix
+ - singular_values -- singular values of the scaled Vandermonde matrix
+ - rcond -- value of `rcond`.
+
+ For more details, see `numpy.linalg.lstsq`.
+
+ Warns
+ -----
+ RankWarning
+ The rank of the coefficient matrix in the least-squares fit is
+ deficient. The warning is only raised if ``full = False``. The
+ warnings can be turned off by
+
+ >>> import warnings
+ >>> warnings.simplefilter('ignore', np.RankWarning)
+
+ See Also
+ --------
+ numpy.polynomial.chebyshev.chebfit
+ numpy.polynomial.legendre.legfit
+ numpy.polynomial.polynomial.polyfit
+ numpy.polynomial.hermite.hermfit
+ numpy.polynomial.laguerre.lagfit
+ hermeval : Evaluates a Hermite series.
+ hermevander : pseudo Vandermonde matrix of Hermite series.
+ hermeweight : HermiteE weight function.
+ numpy.linalg.lstsq : Computes a least-squares fit from the matrix.
+ scipy.interpolate.UnivariateSpline : Computes spline fits.
+
+ Notes
+ -----
+ The solution is the coefficients of the HermiteE series `p` that
+ minimizes the sum of the weighted squared errors
+
+ .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
+
+ where the :math:`w_j` are the weights. This problem is solved by
+ setting up the (typically) overdetermined matrix equation
+
+ .. math:: V(x) * c = w * y,
+
+ where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c`
+ are the coefficients to be solved for, and the elements of `y` are the
+ observed values. This equation is then solved using the singular value
+ decomposition of `V`.
+
+ If some of the singular values of `V` are so small that they are
+ neglected, then a `RankWarning` will be issued. This means that the
+ coefficient values may be poorly determined. Using a lower order fit
+ will usually get rid of the warning. The `rcond` parameter can also be
+ set to a value smaller than its default, but the resulting fit may be
+ spurious and have large contributions from roundoff error.
+
+ Fits using HermiteE series are probably most useful when the data can
+ be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE
+ weight. In that case the weight ``sqrt(w(x[i]))`` should be used
+ together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is
+ available as `hermeweight`.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Curve fitting",
+ https://en.wikipedia.org/wiki/Curve_fitting
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermefit, hermeval
+ >>> x = np.linspace(-10, 10)
+ >>> np.random.seed(123)
+ >>> err = np.random.randn(len(x))/10
+ >>> y = hermeval(x, [1, 2, 3]) + err
+ >>> hermefit(x, y, 2)
+ array([ 1.01690445, 1.99951418, 2.99948696]) # may vary
+
+ """
+ return pu._fit(hermevander, x, y, deg, rcond, full, w)
+
+
+def hermecompanion(c):
+ """
+ Return the scaled companion matrix of c.
+
+ The basis polynomials are scaled so that the companion matrix is
+ symmetric when `c` is an HermiteE basis polynomial. This provides
+ better eigenvalue estimates than the unscaled case and for basis
+ polynomials the eigenvalues are guaranteed to be real if
+ `numpy.linalg.eigvalsh` is used to obtain them.
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of HermiteE series coefficients ordered from low to high
+ degree.
+
+ Returns
+ -------
+ mat : ndarray
+ Scaled companion matrix of dimensions (deg, deg).
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ if len(c) < 2:
+ raise ValueError('Series must have maximum degree of at least 1.')
+ if len(c) == 2:
+ return np.array([[-c[0]/c[1]]])
+
+ n = len(c) - 1
+ mat = np.zeros((n, n), dtype=c.dtype)
+ scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1))))
+ scl = np.multiply.accumulate(scl)[::-1]
+ top = mat.reshape(-1)[1::n+1]
+ bot = mat.reshape(-1)[n::n+1]
+ top[...] = np.sqrt(np.arange(1, n))
+ bot[...] = top
+ mat[:, -1] -= scl*c[:-1]/c[-1]
+ return mat
+
+
+def hermeroots(c):
+ """
+ Compute the roots of a HermiteE series.
+
+ Return the roots (a.k.a. "zeros") of the polynomial
+
+ .. math:: p(x) = \\sum_i c[i] * He_i(x).
+
+ Parameters
+ ----------
+ c : 1-D array_like
+ 1-D array of coefficients.
+
+ Returns
+ -------
+ out : ndarray
+ Array of the roots of the series. If all the roots are real,
+ then `out` is also real, otherwise it is complex.
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyroots
+ numpy.polynomial.legendre.legroots
+ numpy.polynomial.laguerre.lagroots
+ numpy.polynomial.hermite.hermroots
+ numpy.polynomial.chebyshev.chebroots
+
+ Notes
+ -----
+ The root estimates are obtained as the eigenvalues of the companion
+ matrix, Roots far from the origin of the complex plane may have large
+ errors due to the numerical instability of the series for such
+ values. Roots with multiplicity greater than 1 will also show larger
+ errors as the value of the series near such points is relatively
+ insensitive to errors in the roots. Isolated roots near the origin can
+ be improved by a few iterations of Newton's method.
+
+ The HermiteE series basis polynomials aren't powers of `x` so the
+ results of this function may seem unintuitive.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots
+ >>> coef = hermefromroots([-1, 0, 1])
+ >>> coef
+ array([0., 2., 0., 1.])
+ >>> hermeroots(coef)
+ array([-1., 0., 1.]) # may vary
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ if len(c) <= 1:
+ return np.array([], dtype=c.dtype)
+ if len(c) == 2:
+ return np.array([-c[0]/c[1]])
+
+ # rotated companion matrix reduces error
+ m = hermecompanion(c)[::-1,::-1]
+ r = la.eigvals(m)
+ r.sort()
+ return r
+
+
+def _normed_hermite_e_n(x, n):
+ """
+ Evaluate a normalized HermiteE polynomial.
+
+ Compute the value of the normalized HermiteE polynomial of degree ``n``
+ at the points ``x``.
+
+
+ Parameters
+ ----------
+ x : ndarray of double.
+ Points at which to evaluate the function
+ n : int
+ Degree of the normalized HermiteE function to be evaluated.
+
+ Returns
+ -------
+ values : ndarray
+ The shape of the return value is described above.
+
+ Notes
+ -----
+ .. versionadded:: 1.10.0
+
+ This function is needed for finding the Gauss points and integration
+ weights for high degrees. The values of the standard HermiteE functions
+ overflow when n >= 207.
+
+ """
+ if n == 0:
+ return np.full(x.shape, 1/np.sqrt(np.sqrt(2*np.pi)))
+
+ c0 = 0.
+ c1 = 1./np.sqrt(np.sqrt(2*np.pi))
+ nd = float(n)
+ for i in range(n - 1):
+ tmp = c0
+ c0 = -c1*np.sqrt((nd - 1.)/nd)
+ c1 = tmp + c1*x*np.sqrt(1./nd)
+ nd = nd - 1.0
+ return c0 + c1*x
+
+
+def hermegauss(deg):
+ """
+ Gauss-HermiteE quadrature.
+
+ Computes the sample points and weights for Gauss-HermiteE quadrature.
+ These sample points and weights will correctly integrate polynomials of
+ degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`
+ with the weight function :math:`f(x) = \\exp(-x^2/2)`.
+
+ Parameters
+ ----------
+ deg : int
+ Number of sample points and weights. It must be >= 1.
+
+ Returns
+ -------
+ x : ndarray
+ 1-D ndarray containing the sample points.
+ y : ndarray
+ 1-D ndarray containing the weights.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ The results have only been tested up to degree 100, higher degrees may
+ be problematic. The weights are determined by using the fact that
+
+ .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))
+
+ where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
+ is the k'th root of :math:`He_n`, and then scaling the results to get
+ the right value when integrating 1.
+
+ """
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg <= 0:
+ raise ValueError("deg must be a positive integer")
+
+ # first approximation of roots. We use the fact that the companion
+ # matrix is symmetric in this case in order to obtain better zeros.
+ c = np.array([0]*deg + [1])
+ m = hermecompanion(c)
+ x = la.eigvalsh(m)
+
+ # improve roots by one application of Newton
+ dy = _normed_hermite_e_n(x, ideg)
+ df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)
+ x -= dy/df
+
+ # compute the weights. We scale the factor to avoid possible numerical
+ # overflow.
+ fm = _normed_hermite_e_n(x, ideg - 1)
+ fm /= np.abs(fm).max()
+ w = 1/(fm * fm)
+
+ # for Hermite_e we can also symmetrize
+ w = (w + w[::-1])/2
+ x = (x - x[::-1])/2
+
+ # scale w to get the right value
+ w *= np.sqrt(2*np.pi) / w.sum()
+
+ return x, w
+
+
+def hermeweight(x):
+ """Weight function of the Hermite_e polynomials.
+
+ The weight function is :math:`\\exp(-x^2/2)` and the interval of
+ integration is :math:`[-\\inf, \\inf]`. the HermiteE polynomials are
+ orthogonal, but not normalized, with respect to this weight function.
+
+ Parameters
+ ----------
+ x : array_like
+ Values at which the weight function will be computed.
+
+ Returns
+ -------
+ w : ndarray
+ The weight function at `x`.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ w = np.exp(-.5*x**2)
+ return w
+
+
+#
+# HermiteE series class
+#
+
+class HermiteE(ABCPolyBase):
+ """An HermiteE series class.
+
+ The HermiteE class provides the standard Python numerical methods
+ '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
+ attributes and methods listed in the `ABCPolyBase` documentation.
+
+ Parameters
+ ----------
+ coef : array_like
+ HermiteE coefficients in order of increasing degree, i.e,
+ ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``.
+ domain : (2,) array_like, optional
+ Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
+ to the interval ``[window[0], window[1]]`` by shifting and scaling.
+ The default value is [-1, 1].
+ window : (2,) array_like, optional
+ Window, see `domain` for its use. The default value is [-1, 1].
+
+ .. versionadded:: 1.6.0
+
+ """
+ # Virtual Functions
+ _add = staticmethod(hermeadd)
+ _sub = staticmethod(hermesub)
+ _mul = staticmethod(hermemul)
+ _div = staticmethod(hermediv)
+ _pow = staticmethod(hermepow)
+ _val = staticmethod(hermeval)
+ _int = staticmethod(hermeint)
+ _der = staticmethod(hermeder)
+ _fit = staticmethod(hermefit)
+ _line = staticmethod(hermeline)
+ _roots = staticmethod(hermeroots)
+ _fromroots = staticmethod(hermefromroots)
+
+ # Virtual properties
+ domain = np.array(hermedomain)
+ window = np.array(hermedomain)
+ basis_name = 'He'
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/hermite_e.pyi b/venv/lib/python3.9/site-packages/numpy/polynomial/hermite_e.pyi
new file mode 100644
index 00000000..0b7152a2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/hermite_e.pyi
@@ -0,0 +1,46 @@
+from typing import Any
+
+from numpy import ndarray, dtype, int_
+from numpy.polynomial._polybase import ABCPolyBase
+from numpy.polynomial.polyutils import trimcoef
+
+__all__: list[str]
+
+hermetrim = trimcoef
+
+def poly2herme(pol): ...
+def herme2poly(c): ...
+
+hermedomain: ndarray[Any, dtype[int_]]
+hermezero: ndarray[Any, dtype[int_]]
+hermeone: ndarray[Any, dtype[int_]]
+hermex: ndarray[Any, dtype[int_]]
+
+def hermeline(off, scl): ...
+def hermefromroots(roots): ...
+def hermeadd(c1, c2): ...
+def hermesub(c1, c2): ...
+def hermemulx(c): ...
+def hermemul(c1, c2): ...
+def hermediv(c1, c2): ...
+def hermepow(c, pow, maxpower=...): ...
+def hermeder(c, m=..., scl=..., axis=...): ...
+def hermeint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ...
+def hermeval(x, c, tensor=...): ...
+def hermeval2d(x, y, c): ...
+def hermegrid2d(x, y, c): ...
+def hermeval3d(x, y, z, c): ...
+def hermegrid3d(x, y, z, c): ...
+def hermevander(x, deg): ...
+def hermevander2d(x, y, deg): ...
+def hermevander3d(x, y, z, deg): ...
+def hermefit(x, y, deg, rcond=..., full=..., w=...): ...
+def hermecompanion(c): ...
+def hermeroots(c): ...
+def hermegauss(deg): ...
+def hermeweight(x): ...
+
+class HermiteE(ABCPolyBase):
+ domain: Any
+ window: Any
+ basis_name: Any
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/laguerre.py b/venv/lib/python3.9/site-packages/numpy/polynomial/laguerre.py
new file mode 100644
index 00000000..2eaccece
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/laguerre.py
@@ -0,0 +1,1645 @@
+"""
+==================================================
+Laguerre Series (:mod:`numpy.polynomial.laguerre`)
+==================================================
+
+This module provides a number of objects (mostly functions) useful for
+dealing with Laguerre series, including a `Laguerre` class that
+encapsulates the usual arithmetic operations. (General information
+on how this module represents and works with such polynomials is in the
+docstring for its "parent" sub-package, `numpy.polynomial`).
+
+Classes
+-------
+.. autosummary::
+ :toctree: generated/
+
+ Laguerre
+
+Constants
+---------
+.. autosummary::
+ :toctree: generated/
+
+ lagdomain
+ lagzero
+ lagone
+ lagx
+
+Arithmetic
+----------
+.. autosummary::
+ :toctree: generated/
+
+ lagadd
+ lagsub
+ lagmulx
+ lagmul
+ lagdiv
+ lagpow
+ lagval
+ lagval2d
+ lagval3d
+ laggrid2d
+ laggrid3d
+
+Calculus
+--------
+.. autosummary::
+ :toctree: generated/
+
+ lagder
+ lagint
+
+Misc Functions
+--------------
+.. autosummary::
+ :toctree: generated/
+
+ lagfromroots
+ lagroots
+ lagvander
+ lagvander2d
+ lagvander3d
+ laggauss
+ lagweight
+ lagcompanion
+ lagfit
+ lagtrim
+ lagline
+ lag2poly
+ poly2lag
+
+See also
+--------
+`numpy.polynomial`
+
+"""
+import numpy as np
+import numpy.linalg as la
+from numpy.core.multiarray import normalize_axis_index
+
+from . import polyutils as pu
+from ._polybase import ABCPolyBase
+
+__all__ = [
+ 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd',
+ 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder',
+ 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander',
+ 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d',
+ 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion',
+ 'laggauss', 'lagweight']
+
+lagtrim = pu.trimcoef
+
+
+def poly2lag(pol):
+ """
+ poly2lag(pol)
+
+ Convert a polynomial to a Laguerre series.
+
+ Convert an array representing the coefficients of a polynomial (relative
+ to the "standard" basis) ordered from lowest degree to highest, to an
+ array of the coefficients of the equivalent Laguerre series, ordered
+ from lowest to highest degree.
+
+ Parameters
+ ----------
+ pol : array_like
+ 1-D array containing the polynomial coefficients
+
+ Returns
+ -------
+ c : ndarray
+ 1-D array containing the coefficients of the equivalent Laguerre
+ series.
+
+ See Also
+ --------
+ lag2poly
+
+ Notes
+ -----
+ The easy way to do conversions between polynomial basis sets
+ is to use the convert method of a class instance.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import poly2lag
+ >>> poly2lag(np.arange(4))
+ array([ 23., -63., 58., -18.])
+
+ """
+ [pol] = pu.as_series([pol])
+ res = 0
+ for p in pol[::-1]:
+ res = lagadd(lagmulx(res), p)
+ return res
+
+
+def lag2poly(c):
+ """
+ Convert a Laguerre series to a polynomial.
+
+ Convert an array representing the coefficients of a Laguerre series,
+ ordered from lowest degree to highest, to an array of the coefficients
+ of the equivalent polynomial (relative to the "standard" basis) ordered
+ from lowest to highest degree.
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array containing the Laguerre series coefficients, ordered
+ from lowest order term to highest.
+
+ Returns
+ -------
+ pol : ndarray
+ 1-D array containing the coefficients of the equivalent polynomial
+ (relative to the "standard" basis) ordered from lowest order term
+ to highest.
+
+ See Also
+ --------
+ poly2lag
+
+ Notes
+ -----
+ The easy way to do conversions between polynomial basis sets
+ is to use the convert method of a class instance.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lag2poly
+ >>> lag2poly([ 23., -63., 58., -18.])
+ array([0., 1., 2., 3.])
+
+ """
+ from .polynomial import polyadd, polysub, polymulx
+
+ [c] = pu.as_series([c])
+ n = len(c)
+ if n == 1:
+ return c
+ else:
+ c0 = c[-2]
+ c1 = c[-1]
+ # i is the current degree of c1
+ for i in range(n - 1, 1, -1):
+ tmp = c0
+ c0 = polysub(c[i - 2], (c1*(i - 1))/i)
+ c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i)
+ return polyadd(c0, polysub(c1, polymulx(c1)))
+
+#
+# These are constant arrays are of integer type so as to be compatible
+# with the widest range of other types, such as Decimal.
+#
+
+# Laguerre
+lagdomain = np.array([0, 1])
+
+# Laguerre coefficients representing zero.
+lagzero = np.array([0])
+
+# Laguerre coefficients representing one.
+lagone = np.array([1])
+
+# Laguerre coefficients representing the identity x.
+lagx = np.array([1, -1])
+
+
+def lagline(off, scl):
+ """
+ Laguerre series whose graph is a straight line.
+
+ Parameters
+ ----------
+ off, scl : scalars
+ The specified line is given by ``off + scl*x``.
+
+ Returns
+ -------
+ y : ndarray
+ This module's representation of the Laguerre series for
+ ``off + scl*x``.
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyline
+ numpy.polynomial.chebyshev.chebline
+ numpy.polynomial.legendre.legline
+ numpy.polynomial.hermite.hermline
+ numpy.polynomial.hermite_e.hermeline
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagline, lagval
+ >>> lagval(0,lagline(3, 2))
+ 3.0
+ >>> lagval(1,lagline(3, 2))
+ 5.0
+
+ """
+ if scl != 0:
+ return np.array([off + scl, -scl])
+ else:
+ return np.array([off])
+
+
+def lagfromroots(roots):
+ """
+ Generate a Laguerre series with given roots.
+
+ The function returns the coefficients of the polynomial
+
+ .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
+
+ in Laguerre form, where the `r_n` are the roots specified in `roots`.
+ If a zero has multiplicity n, then it must appear in `roots` n times.
+ For instance, if 2 is a root of multiplicity three and 3 is a root of
+ multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
+ roots can appear in any order.
+
+ If the returned coefficients are `c`, then
+
+ .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
+
+ The coefficient of the last term is not generally 1 for monic
+ polynomials in Laguerre form.
+
+ Parameters
+ ----------
+ roots : array_like
+ Sequence containing the roots.
+
+ Returns
+ -------
+ out : ndarray
+ 1-D array of coefficients. If all roots are real then `out` is a
+ real array, if some of the roots are complex, then `out` is complex
+ even if all the coefficients in the result are real (see Examples
+ below).
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyfromroots
+ numpy.polynomial.legendre.legfromroots
+ numpy.polynomial.chebyshev.chebfromroots
+ numpy.polynomial.hermite.hermfromroots
+ numpy.polynomial.hermite_e.hermefromroots
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagfromroots, lagval
+ >>> coef = lagfromroots((-1, 0, 1))
+ >>> lagval((-1, 0, 1), coef)
+ array([0., 0., 0.])
+ >>> coef = lagfromroots((-1j, 1j))
+ >>> lagval((-1j, 1j), coef)
+ array([0.+0.j, 0.+0.j])
+
+ """
+ return pu._fromroots(lagline, lagmul, roots)
+
+
+def lagadd(c1, c2):
+ """
+ Add one Laguerre series to another.
+
+ Returns the sum of two Laguerre series `c1` + `c2`. The arguments
+ are sequences of coefficients ordered from lowest order term to
+ highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Laguerre series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Array representing the Laguerre series of their sum.
+
+ See Also
+ --------
+ lagsub, lagmulx, lagmul, lagdiv, lagpow
+
+ Notes
+ -----
+ Unlike multiplication, division, etc., the sum of two Laguerre series
+ is a Laguerre series (without having to "reproject" the result onto
+ the basis set) so addition, just like that of "standard" polynomials,
+ is simply "component-wise."
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagadd
+ >>> lagadd([1, 2, 3], [1, 2, 3, 4])
+ array([2., 4., 6., 4.])
+
+
+ """
+ return pu._add(c1, c2)
+
+
+def lagsub(c1, c2):
+ """
+ Subtract one Laguerre series from another.
+
+ Returns the difference of two Laguerre series `c1` - `c2`. The
+ sequences of coefficients are from lowest order term to highest, i.e.,
+ [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Laguerre series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Of Laguerre series coefficients representing their difference.
+
+ See Also
+ --------
+ lagadd, lagmulx, lagmul, lagdiv, lagpow
+
+ Notes
+ -----
+ Unlike multiplication, division, etc., the difference of two Laguerre
+ series is a Laguerre series (without having to "reproject" the result
+ onto the basis set) so subtraction, just like that of "standard"
+ polynomials, is simply "component-wise."
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagsub
+ >>> lagsub([1, 2, 3, 4], [1, 2, 3])
+ array([0., 0., 0., 4.])
+
+ """
+ return pu._sub(c1, c2)
+
+
+def lagmulx(c):
+ """Multiply a Laguerre series by x.
+
+ Multiply the Laguerre series `c` by x, where x is the independent
+ variable.
+
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Laguerre series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Array representing the result of the multiplication.
+
+ See Also
+ --------
+ lagadd, lagsub, lagmul, lagdiv, lagpow
+
+ Notes
+ -----
+ The multiplication uses the recursion relationship for Laguerre
+ polynomials in the form
+
+ .. math::
+
+ xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x))
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagmulx
+ >>> lagmulx([1, 2, 3])
+ array([-1., -1., 11., -9.])
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ # The zero series needs special treatment
+ if len(c) == 1 and c[0] == 0:
+ return c
+
+ prd = np.empty(len(c) + 1, dtype=c.dtype)
+ prd[0] = c[0]
+ prd[1] = -c[0]
+ for i in range(1, len(c)):
+ prd[i + 1] = -c[i]*(i + 1)
+ prd[i] += c[i]*(2*i + 1)
+ prd[i - 1] -= c[i]*i
+ return prd
+
+
+def lagmul(c1, c2):
+ """
+ Multiply one Laguerre series by another.
+
+ Returns the product of two Laguerre series `c1` * `c2`. The arguments
+ are sequences of coefficients, from lowest order "term" to highest,
+ e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Laguerre series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Of Laguerre series coefficients representing their product.
+
+ See Also
+ --------
+ lagadd, lagsub, lagmulx, lagdiv, lagpow
+
+ Notes
+ -----
+ In general, the (polynomial) product of two C-series results in terms
+ that are not in the Laguerre polynomial basis set. Thus, to express
+ the product as a Laguerre series, it is necessary to "reproject" the
+ product onto said basis set, which may produce "unintuitive" (but
+ correct) results; see Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagmul
+ >>> lagmul([1, 2, 3], [0, 1, 2])
+ array([ 8., -13., 38., -51., 36.])
+
+ """
+ # s1, s2 are trimmed copies
+ [c1, c2] = pu.as_series([c1, c2])
+
+ if len(c1) > len(c2):
+ c = c2
+ xs = c1
+ else:
+ c = c1
+ xs = c2
+
+ if len(c) == 1:
+ c0 = c[0]*xs
+ c1 = 0
+ elif len(c) == 2:
+ c0 = c[0]*xs
+ c1 = c[1]*xs
+ else:
+ nd = len(c)
+ c0 = c[-2]*xs
+ c1 = c[-1]*xs
+ for i in range(3, len(c) + 1):
+ tmp = c0
+ nd = nd - 1
+ c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd)
+ c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd)
+ return lagadd(c0, lagsub(c1, lagmulx(c1)))
+
+
+def lagdiv(c1, c2):
+ """
+ Divide one Laguerre series by another.
+
+ Returns the quotient-with-remainder of two Laguerre series
+ `c1` / `c2`. The arguments are sequences of coefficients from lowest
+ order "term" to highest, e.g., [1,2,3] represents the series
+ ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Laguerre series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ [quo, rem] : ndarrays
+ Of Laguerre series coefficients representing the quotient and
+ remainder.
+
+ See Also
+ --------
+ lagadd, lagsub, lagmulx, lagmul, lagpow
+
+ Notes
+ -----
+ In general, the (polynomial) division of one Laguerre series by another
+ results in quotient and remainder terms that are not in the Laguerre
+ polynomial basis set. Thus, to express these results as a Laguerre
+ series, it is necessary to "reproject" the results onto the Laguerre
+ basis set, which may produce "unintuitive" (but correct) results; see
+ Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagdiv
+ >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2])
+ (array([1., 2., 3.]), array([0.]))
+ >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2])
+ (array([1., 2., 3.]), array([1., 1.]))
+
+ """
+ return pu._div(lagmul, c1, c2)
+
+
+def lagpow(c, pow, maxpower=16):
+ """Raise a Laguerre series to a power.
+
+ Returns the Laguerre series `c` raised to the power `pow`. The
+ argument `c` is a sequence of coefficients ordered from low to high.
+ i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Laguerre series coefficients ordered from low to
+ high.
+ pow : integer
+ Power to which the series will be raised
+ maxpower : integer, optional
+ Maximum power allowed. This is mainly to limit growth of the series
+ to unmanageable size. Default is 16
+
+ Returns
+ -------
+ coef : ndarray
+ Laguerre series of power.
+
+ See Also
+ --------
+ lagadd, lagsub, lagmulx, lagmul, lagdiv
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagpow
+ >>> lagpow([1, 2, 3], 2)
+ array([ 14., -16., 56., -72., 54.])
+
+ """
+ return pu._pow(lagmul, c, pow, maxpower)
+
+
+def lagder(c, m=1, scl=1, axis=0):
+ """
+ Differentiate a Laguerre series.
+
+ Returns the Laguerre series coefficients `c` differentiated `m` times
+ along `axis`. At each iteration the result is multiplied by `scl` (the
+ scaling factor is for use in a linear change of variable). The argument
+ `c` is an array of coefficients from low to high degree along each
+ axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
+ while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
+ 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
+ ``y``.
+
+ Parameters
+ ----------
+ c : array_like
+ Array of Laguerre series coefficients. If `c` is multidimensional
+ the different axis correspond to different variables with the
+ degree in each axis given by the corresponding index.
+ m : int, optional
+ Number of derivatives taken, must be non-negative. (Default: 1)
+ scl : scalar, optional
+ Each differentiation is multiplied by `scl`. The end result is
+ multiplication by ``scl**m``. This is for use in a linear change of
+ variable. (Default: 1)
+ axis : int, optional
+ Axis over which the derivative is taken. (Default: 0).
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ der : ndarray
+ Laguerre series of the derivative.
+
+ See Also
+ --------
+ lagint
+
+ Notes
+ -----
+ In general, the result of differentiating a Laguerre series does not
+ resemble the same operation on a power series. Thus the result of this
+ function may be "unintuitive," albeit correct; see Examples section
+ below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagder
+ >>> lagder([ 1., 1., 1., -3.])
+ array([1., 2., 3.])
+ >>> lagder([ 1., 0., 0., -4., 3.], m=2)
+ array([1., 2., 3.])
+
+ """
+ c = np.array(c, ndmin=1, copy=True)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+
+ cnt = pu._deprecate_as_int(m, "the order of derivation")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
+ if cnt < 0:
+ raise ValueError("The order of derivation must be non-negative")
+ iaxis = normalize_axis_index(iaxis, c.ndim)
+
+ if cnt == 0:
+ return c
+
+ c = np.moveaxis(c, iaxis, 0)
+ n = len(c)
+ if cnt >= n:
+ c = c[:1]*0
+ else:
+ for i in range(cnt):
+ n = n - 1
+ c *= scl
+ der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
+ for j in range(n, 1, -1):
+ der[j - 1] = -c[j]
+ c[j - 1] += c[j]
+ der[0] = -c[1]
+ c = der
+ c = np.moveaxis(c, 0, iaxis)
+ return c
+
+
+def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
+ """
+ Integrate a Laguerre series.
+
+ Returns the Laguerre series coefficients `c` integrated `m` times from
+ `lbnd` along `axis`. At each iteration the resulting series is
+ **multiplied** by `scl` and an integration constant, `k`, is added.
+ The scaling factor is for use in a linear change of variable. ("Buyer
+ beware": note that, depending on what one is doing, one may want `scl`
+ to be the reciprocal of what one might expect; for more information,
+ see the Notes section below.) The argument `c` is an array of
+ coefficients from low to high degree along each axis, e.g., [1,2,3]
+ represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
+ represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
+ 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
+
+
+ Parameters
+ ----------
+ c : array_like
+ Array of Laguerre series coefficients. If `c` is multidimensional
+ the different axis correspond to different variables with the
+ degree in each axis given by the corresponding index.
+ m : int, optional
+ Order of integration, must be positive. (Default: 1)
+ k : {[], list, scalar}, optional
+ Integration constant(s). The value of the first integral at
+ ``lbnd`` is the first value in the list, the value of the second
+ integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
+ default), all constants are set to zero. If ``m == 1``, a single
+ scalar can be given instead of a list.
+ lbnd : scalar, optional
+ The lower bound of the integral. (Default: 0)
+ scl : scalar, optional
+ Following each integration the result is *multiplied* by `scl`
+ before the integration constant is added. (Default: 1)
+ axis : int, optional
+ Axis over which the integral is taken. (Default: 0).
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ S : ndarray
+ Laguerre series coefficients of the integral.
+
+ Raises
+ ------
+ ValueError
+ If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
+ ``np.ndim(scl) != 0``.
+
+ See Also
+ --------
+ lagder
+
+ Notes
+ -----
+ Note that the result of each integration is *multiplied* by `scl`.
+ Why is this important to note? Say one is making a linear change of
+ variable :math:`u = ax + b` in an integral relative to `x`. Then
+ :math:`dx = du/a`, so one will need to set `scl` equal to
+ :math:`1/a` - perhaps not what one would have first thought.
+
+ Also note that, in general, the result of integrating a C-series needs
+ to be "reprojected" onto the C-series basis set. Thus, typically,
+ the result of this function is "unintuitive," albeit correct; see
+ Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagint
+ >>> lagint([1,2,3])
+ array([ 1., 1., 1., -3.])
+ >>> lagint([1,2,3], m=2)
+ array([ 1., 0., 0., -4., 3.])
+ >>> lagint([1,2,3], k=1)
+ array([ 2., 1., 1., -3.])
+ >>> lagint([1,2,3], lbnd=-1)
+ array([11.5, 1. , 1. , -3. ])
+ >>> lagint([1,2], m=2, k=[1,2], lbnd=-1)
+ array([ 11.16666667, -5. , -3. , 2. ]) # may vary
+
+ """
+ c = np.array(c, ndmin=1, copy=True)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ if not np.iterable(k):
+ k = [k]
+ cnt = pu._deprecate_as_int(m, "the order of integration")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
+ if cnt < 0:
+ raise ValueError("The order of integration must be non-negative")
+ if len(k) > cnt:
+ raise ValueError("Too many integration constants")
+ if np.ndim(lbnd) != 0:
+ raise ValueError("lbnd must be a scalar.")
+ if np.ndim(scl) != 0:
+ raise ValueError("scl must be a scalar.")
+ iaxis = normalize_axis_index(iaxis, c.ndim)
+
+ if cnt == 0:
+ return c
+
+ c = np.moveaxis(c, iaxis, 0)
+ k = list(k) + [0]*(cnt - len(k))
+ for i in range(cnt):
+ n = len(c)
+ c *= scl
+ if n == 1 and np.all(c[0] == 0):
+ c[0] += k[i]
+ else:
+ tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
+ tmp[0] = c[0]
+ tmp[1] = -c[0]
+ for j in range(1, n):
+ tmp[j] += c[j]
+ tmp[j + 1] = -c[j]
+ tmp[0] += k[i] - lagval(lbnd, tmp)
+ c = tmp
+ c = np.moveaxis(c, 0, iaxis)
+ return c
+
+
+def lagval(x, c, tensor=True):
+ """
+ Evaluate a Laguerre series at points x.
+
+ If `c` is of length `n + 1`, this function returns the value:
+
+ .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
+
+ The parameter `x` is converted to an array only if it is a tuple or a
+ list, otherwise it is treated as a scalar. In either case, either `x`
+ or its elements must support multiplication and addition both with
+ themselves and with the elements of `c`.
+
+ If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
+ `c` is multidimensional, then the shape of the result depends on the
+ value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
+ x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
+ scalars have shape (,).
+
+ Trailing zeros in the coefficients will be used in the evaluation, so
+ they should be avoided if efficiency is a concern.
+
+ Parameters
+ ----------
+ x : array_like, compatible object
+ If `x` is a list or tuple, it is converted to an ndarray, otherwise
+ it is left unchanged and treated as a scalar. In either case, `x`
+ or its elements must support addition and multiplication with
+ themselves and with the elements of `c`.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree n are contained in c[n]. If `c` is multidimensional the
+ remaining indices enumerate multiple polynomials. In the two
+ dimensional case the coefficients may be thought of as stored in
+ the columns of `c`.
+ tensor : boolean, optional
+ If True, the shape of the coefficient array is extended with ones
+ on the right, one for each dimension of `x`. Scalars have dimension 0
+ for this action. The result is that every column of coefficients in
+ `c` is evaluated for every element of `x`. If False, `x` is broadcast
+ over the columns of `c` for the evaluation. This keyword is useful
+ when `c` is multidimensional. The default value is True.
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ values : ndarray, algebra_like
+ The shape of the return value is described above.
+
+ See Also
+ --------
+ lagval2d, laggrid2d, lagval3d, laggrid3d
+
+ Notes
+ -----
+ The evaluation uses Clenshaw recursion, aka synthetic division.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagval
+ >>> coef = [1,2,3]
+ >>> lagval(1, coef)
+ -0.5
+ >>> lagval([[1,2],[3,4]], coef)
+ array([[-0.5, -4. ],
+ [-4.5, -2. ]])
+
+ """
+ c = np.array(c, ndmin=1, copy=False)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ if isinstance(x, (tuple, list)):
+ x = np.asarray(x)
+ if isinstance(x, np.ndarray) and tensor:
+ c = c.reshape(c.shape + (1,)*x.ndim)
+
+ if len(c) == 1:
+ c0 = c[0]
+ c1 = 0
+ elif len(c) == 2:
+ c0 = c[0]
+ c1 = c[1]
+ else:
+ nd = len(c)
+ c0 = c[-2]
+ c1 = c[-1]
+ for i in range(3, len(c) + 1):
+ tmp = c0
+ nd = nd - 1
+ c0 = c[-i] - (c1*(nd - 1))/nd
+ c1 = tmp + (c1*((2*nd - 1) - x))/nd
+ return c0 + c1*(1 - x)
+
+
+def lagval2d(x, y, c):
+ """
+ Evaluate a 2-D Laguerre series at points (x, y).
+
+ This function returns the values:
+
+ .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
+
+ The parameters `x` and `y` are converted to arrays only if they are
+ tuples or a lists, otherwise they are treated as a scalars and they
+ must have the same shape after conversion. In either case, either `x`
+ and `y` or their elements must support multiplication and addition both
+ with themselves and with the elements of `c`.
+
+ If `c` is a 1-D array a one is implicitly appended to its shape to make
+ it 2-D. The shape of the result will be c.shape[2:] + x.shape.
+
+ Parameters
+ ----------
+ x, y : array_like, compatible objects
+ The two dimensional series is evaluated at the points `(x, y)`,
+ where `x` and `y` must have the same shape. If `x` or `y` is a list
+ or tuple, it is first converted to an ndarray, otherwise it is left
+ unchanged and if it isn't an ndarray it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term
+ of multi-degree i,j is contained in ``c[i,j]``. If `c` has
+ dimension greater than two the remaining indices enumerate multiple
+ sets of coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional polynomial at points formed with
+ pairs of corresponding values from `x` and `y`.
+
+ See Also
+ --------
+ lagval, laggrid2d, lagval3d, laggrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._valnd(lagval, c, x, y)
+
+
+def laggrid2d(x, y, c):
+ """
+ Evaluate a 2-D Laguerre series on the Cartesian product of x and y.
+
+ This function returns the values:
+
+ .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
+
+ where the points `(a, b)` consist of all pairs formed by taking
+ `a` from `x` and `b` from `y`. The resulting points form a grid with
+ `x` in the first dimension and `y` in the second.
+
+ The parameters `x` and `y` are converted to arrays only if they are
+ tuples or a lists, otherwise they are treated as a scalars. In either
+ case, either `x` and `y` or their elements must support multiplication
+ and addition both with themselves and with the elements of `c`.
+
+ If `c` has fewer than two dimensions, ones are implicitly appended to
+ its shape to make it 2-D. The shape of the result will be c.shape[2:] +
+ x.shape + y.shape.
+
+ Parameters
+ ----------
+ x, y : array_like, compatible objects
+ The two dimensional series is evaluated at the points in the
+ Cartesian product of `x` and `y`. If `x` or `y` is a list or
+ tuple, it is first converted to an ndarray, otherwise it is left
+ unchanged and, if it isn't an ndarray, it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term of
+ multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
+ greater than two the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional Chebyshev series at points in the
+ Cartesian product of `x` and `y`.
+
+ See Also
+ --------
+ lagval, lagval2d, lagval3d, laggrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._gridnd(lagval, c, x, y)
+
+
+def lagval3d(x, y, z, c):
+ """
+ Evaluate a 3-D Laguerre series at points (x, y, z).
+
+ This function returns the values:
+
+ .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
+
+ The parameters `x`, `y`, and `z` are converted to arrays only if
+ they are tuples or a lists, otherwise they are treated as a scalars and
+ they must have the same shape after conversion. In either case, either
+ `x`, `y`, and `z` or their elements must support multiplication and
+ addition both with themselves and with the elements of `c`.
+
+ If `c` has fewer than 3 dimensions, ones are implicitly appended to its
+ shape to make it 3-D. The shape of the result will be c.shape[3:] +
+ x.shape.
+
+ Parameters
+ ----------
+ x, y, z : array_like, compatible object
+ The three dimensional series is evaluated at the points
+ `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
+ any of `x`, `y`, or `z` is a list or tuple, it is first converted
+ to an ndarray, otherwise it is left unchanged and if it isn't an
+ ndarray it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term of
+ multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
+ greater than 3 the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the multidimensional polynomial on points formed with
+ triples of corresponding values from `x`, `y`, and `z`.
+
+ See Also
+ --------
+ lagval, lagval2d, laggrid2d, laggrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._valnd(lagval, c, x, y, z)
+
+
+def laggrid3d(x, y, z, c):
+ """
+ Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z.
+
+ This function returns the values:
+
+ .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
+
+ where the points `(a, b, c)` consist of all triples formed by taking
+ `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
+ a grid with `x` in the first dimension, `y` in the second, and `z` in
+ the third.
+
+ The parameters `x`, `y`, and `z` are converted to arrays only if they
+ are tuples or a lists, otherwise they are treated as a scalars. In
+ either case, either `x`, `y`, and `z` or their elements must support
+ multiplication and addition both with themselves and with the elements
+ of `c`.
+
+ If `c` has fewer than three dimensions, ones are implicitly appended to
+ its shape to make it 3-D. The shape of the result will be c.shape[3:] +
+ x.shape + y.shape + z.shape.
+
+ Parameters
+ ----------
+ x, y, z : array_like, compatible objects
+ The three dimensional series is evaluated at the points in the
+ Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
+ list or tuple, it is first converted to an ndarray, otherwise it is
+ left unchanged and, if it isn't an ndarray, it is treated as a
+ scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree i,j are contained in ``c[i,j]``. If `c` has dimension
+ greater than two the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional polynomial at points in the Cartesian
+ product of `x` and `y`.
+
+ See Also
+ --------
+ lagval, lagval2d, laggrid2d, lagval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._gridnd(lagval, c, x, y, z)
+
+
+def lagvander(x, deg):
+ """Pseudo-Vandermonde matrix of given degree.
+
+ Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
+ `x`. The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., i] = L_i(x)
+
+ where `0 <= i <= deg`. The leading indices of `V` index the elements of
+ `x` and the last index is the degree of the Laguerre polynomial.
+
+ If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
+ array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and
+ ``lagval(x, c)`` are the same up to roundoff. This equivalence is
+ useful both for least squares fitting and for the evaluation of a large
+ number of Laguerre series of the same degree and sample points.
+
+ Parameters
+ ----------
+ x : array_like
+ Array of points. The dtype is converted to float64 or complex128
+ depending on whether any of the elements are complex. If `x` is
+ scalar it is converted to a 1-D array.
+ deg : int
+ Degree of the resulting matrix.
+
+ Returns
+ -------
+ vander : ndarray
+ The pseudo-Vandermonde matrix. The shape of the returned matrix is
+ ``x.shape + (deg + 1,)``, where The last index is the degree of the
+ corresponding Laguerre polynomial. The dtype will be the same as
+ the converted `x`.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagvander
+ >>> x = np.array([0, 1, 2])
+ >>> lagvander(x, 3)
+ array([[ 1. , 1. , 1. , 1. ],
+ [ 1. , 0. , -0.5 , -0.66666667],
+ [ 1. , -1. , -1. , -0.33333333]])
+
+ """
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg < 0:
+ raise ValueError("deg must be non-negative")
+
+ x = np.array(x, copy=False, ndmin=1) + 0.0
+ dims = (ideg + 1,) + x.shape
+ dtyp = x.dtype
+ v = np.empty(dims, dtype=dtyp)
+ v[0] = x*0 + 1
+ if ideg > 0:
+ v[1] = 1 - x
+ for i in range(2, ideg + 1):
+ v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i
+ return np.moveaxis(v, 0, -1)
+
+
+def lagvander2d(x, y, deg):
+ """Pseudo-Vandermonde matrix of given degrees.
+
+ Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
+ points `(x, y)`. The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y),
+
+ where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
+ `V` index the points `(x, y)` and the last index encodes the degrees of
+ the Laguerre polynomials.
+
+ If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
+ correspond to the elements of a 2-D coefficient array `c` of shape
+ (xdeg + 1, ydeg + 1) in the order
+
+ .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
+
+ and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same
+ up to roundoff. This equivalence is useful both for least squares
+ fitting and for the evaluation of a large number of 2-D Laguerre
+ series of the same degrees and sample points.
+
+ Parameters
+ ----------
+ x, y : array_like
+ Arrays of point coordinates, all of the same shape. The dtypes
+ will be converted to either float64 or complex128 depending on
+ whether any of the elements are complex. Scalars are converted to
+ 1-D arrays.
+ deg : list of ints
+ List of maximum degrees of the form [x_deg, y_deg].
+
+ Returns
+ -------
+ vander2d : ndarray
+ The shape of the returned matrix is ``x.shape + (order,)``, where
+ :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same
+ as the converted `x` and `y`.
+
+ See Also
+ --------
+ lagvander, lagvander3d, lagval2d, lagval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg)
+
+
+def lagvander3d(x, y, z, deg):
+ """Pseudo-Vandermonde matrix of given degrees.
+
+ Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
+ points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
+ then The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
+
+ where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
+ indices of `V` index the points `(x, y, z)` and the last index encodes
+ the degrees of the Laguerre polynomials.
+
+ If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
+ of `V` correspond to the elements of a 3-D coefficient array `c` of
+ shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
+
+ .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
+
+ and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the
+ same up to roundoff. This equivalence is useful both for least squares
+ fitting and for the evaluation of a large number of 3-D Laguerre
+ series of the same degrees and sample points.
+
+ Parameters
+ ----------
+ x, y, z : array_like
+ Arrays of point coordinates, all of the same shape. The dtypes will
+ be converted to either float64 or complex128 depending on whether
+ any of the elements are complex. Scalars are converted to 1-D
+ arrays.
+ deg : list of ints
+ List of maximum degrees of the form [x_deg, y_deg, z_deg].
+
+ Returns
+ -------
+ vander3d : ndarray
+ The shape of the returned matrix is ``x.shape + (order,)``, where
+ :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will
+ be the same as the converted `x`, `y`, and `z`.
+
+ See Also
+ --------
+ lagvander, lagvander3d, lagval2d, lagval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._vander_nd_flat((lagvander, lagvander, lagvander), (x, y, z), deg)
+
+
+def lagfit(x, y, deg, rcond=None, full=False, w=None):
+ """
+ Least squares fit of Laguerre series to data.
+
+ Return the coefficients of a Laguerre series of degree `deg` that is the
+ least squares fit to the data values `y` given at points `x`. If `y` is
+ 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
+ fits are done, one for each column of `y`, and the resulting
+ coefficients are stored in the corresponding columns of a 2-D return.
+ The fitted polynomial(s) are in the form
+
+ .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
+
+ where ``n`` is `deg`.
+
+ Parameters
+ ----------
+ x : array_like, shape (M,)
+ x-coordinates of the M sample points ``(x[i], y[i])``.
+ y : array_like, shape (M,) or (M, K)
+ y-coordinates of the sample points. Several data sets of sample
+ points sharing the same x-coordinates can be fitted at once by
+ passing in a 2D-array that contains one dataset per column.
+ deg : int or 1-D array_like
+ Degree(s) of the fitting polynomials. If `deg` is a single integer
+ all terms up to and including the `deg`'th term are included in the
+ fit. For NumPy versions >= 1.11.0 a list of integers specifying the
+ degrees of the terms to include may be used instead.
+ rcond : float, optional
+ Relative condition number of the fit. Singular values smaller than
+ this relative to the largest singular value will be ignored. The
+ default value is len(x)*eps, where eps is the relative precision of
+ the float type, about 2e-16 in most cases.
+ full : bool, optional
+ Switch determining nature of return value. When it is False (the
+ default) just the coefficients are returned, when True diagnostic
+ information from the singular value decomposition is also returned.
+ w : array_like, shape (`M`,), optional
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
+
+ Returns
+ -------
+ coef : ndarray, shape (M,) or (M, K)
+ Laguerre coefficients ordered from low to high. If `y` was 2-D,
+ the coefficients for the data in column *k* of `y` are in column
+ *k*.
+
+ [residuals, rank, singular_values, rcond] : list
+ These values are only returned if ``full == True``
+
+ - residuals -- sum of squared residuals of the least squares fit
+ - rank -- the numerical rank of the scaled Vandermonde matrix
+ - singular_values -- singular values of the scaled Vandermonde matrix
+ - rcond -- value of `rcond`.
+
+ For more details, see `numpy.linalg.lstsq`.
+
+ Warns
+ -----
+ RankWarning
+ The rank of the coefficient matrix in the least-squares fit is
+ deficient. The warning is only raised if ``full == False``. The
+ warnings can be turned off by
+
+ >>> import warnings
+ >>> warnings.simplefilter('ignore', np.RankWarning)
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyfit
+ numpy.polynomial.legendre.legfit
+ numpy.polynomial.chebyshev.chebfit
+ numpy.polynomial.hermite.hermfit
+ numpy.polynomial.hermite_e.hermefit
+ lagval : Evaluates a Laguerre series.
+ lagvander : pseudo Vandermonde matrix of Laguerre series.
+ lagweight : Laguerre weight function.
+ numpy.linalg.lstsq : Computes a least-squares fit from the matrix.
+ scipy.interpolate.UnivariateSpline : Computes spline fits.
+
+ Notes
+ -----
+ The solution is the coefficients of the Laguerre series ``p`` that
+ minimizes the sum of the weighted squared errors
+
+ .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
+
+ where the :math:`w_j` are the weights. This problem is solved by
+ setting up as the (typically) overdetermined matrix equation
+
+ .. math:: V(x) * c = w * y,
+
+ where ``V`` is the weighted pseudo Vandermonde matrix of `x`, ``c`` are the
+ coefficients to be solved for, `w` are the weights, and `y` are the
+ observed values. This equation is then solved using the singular value
+ decomposition of ``V``.
+
+ If some of the singular values of `V` are so small that they are
+ neglected, then a `RankWarning` will be issued. This means that the
+ coefficient values may be poorly determined. Using a lower order fit
+ will usually get rid of the warning. The `rcond` parameter can also be
+ set to a value smaller than its default, but the resulting fit may be
+ spurious and have large contributions from roundoff error.
+
+ Fits using Laguerre series are probably most useful when the data can
+ be approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the Laguerre
+ weight. In that case the weight ``sqrt(w(x[i]))`` should be used
+ together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is
+ available as `lagweight`.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Curve fitting",
+ https://en.wikipedia.org/wiki/Curve_fitting
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagfit, lagval
+ >>> x = np.linspace(0, 10)
+ >>> err = np.random.randn(len(x))/10
+ >>> y = lagval(x, [1, 2, 3]) + err
+ >>> lagfit(x, y, 2)
+ array([ 0.96971004, 2.00193749, 3.00288744]) # may vary
+
+ """
+ return pu._fit(lagvander, x, y, deg, rcond, full, w)
+
+
+def lagcompanion(c):
+ """
+ Return the companion matrix of c.
+
+ The usual companion matrix of the Laguerre polynomials is already
+ symmetric when `c` is a basis Laguerre polynomial, so no scaling is
+ applied.
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Laguerre series coefficients ordered from low to high
+ degree.
+
+ Returns
+ -------
+ mat : ndarray
+ Companion matrix of dimensions (deg, deg).
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ if len(c) < 2:
+ raise ValueError('Series must have maximum degree of at least 1.')
+ if len(c) == 2:
+ return np.array([[1 + c[0]/c[1]]])
+
+ n = len(c) - 1
+ mat = np.zeros((n, n), dtype=c.dtype)
+ top = mat.reshape(-1)[1::n+1]
+ mid = mat.reshape(-1)[0::n+1]
+ bot = mat.reshape(-1)[n::n+1]
+ top[...] = -np.arange(1, n)
+ mid[...] = 2.*np.arange(n) + 1.
+ bot[...] = top
+ mat[:, -1] += (c[:-1]/c[-1])*n
+ return mat
+
+
+def lagroots(c):
+ """
+ Compute the roots of a Laguerre series.
+
+ Return the roots (a.k.a. "zeros") of the polynomial
+
+ .. math:: p(x) = \\sum_i c[i] * L_i(x).
+
+ Parameters
+ ----------
+ c : 1-D array_like
+ 1-D array of coefficients.
+
+ Returns
+ -------
+ out : ndarray
+ Array of the roots of the series. If all the roots are real,
+ then `out` is also real, otherwise it is complex.
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyroots
+ numpy.polynomial.legendre.legroots
+ numpy.polynomial.chebyshev.chebroots
+ numpy.polynomial.hermite.hermroots
+ numpy.polynomial.hermite_e.hermeroots
+
+ Notes
+ -----
+ The root estimates are obtained as the eigenvalues of the companion
+ matrix, Roots far from the origin of the complex plane may have large
+ errors due to the numerical instability of the series for such
+ values. Roots with multiplicity greater than 1 will also show larger
+ errors as the value of the series near such points is relatively
+ insensitive to errors in the roots. Isolated roots near the origin can
+ be improved by a few iterations of Newton's method.
+
+ The Laguerre series basis polynomials aren't powers of `x` so the
+ results of this function may seem unintuitive.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.laguerre import lagroots, lagfromroots
+ >>> coef = lagfromroots([0, 1, 2])
+ >>> coef
+ array([ 2., -8., 12., -6.])
+ >>> lagroots(coef)
+ array([-4.4408921e-16, 1.0000000e+00, 2.0000000e+00])
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ if len(c) <= 1:
+ return np.array([], dtype=c.dtype)
+ if len(c) == 2:
+ return np.array([1 + c[0]/c[1]])
+
+ # rotated companion matrix reduces error
+ m = lagcompanion(c)[::-1,::-1]
+ r = la.eigvals(m)
+ r.sort()
+ return r
+
+
+def laggauss(deg):
+ """
+ Gauss-Laguerre quadrature.
+
+ Computes the sample points and weights for Gauss-Laguerre quadrature.
+ These sample points and weights will correctly integrate polynomials of
+ degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]`
+ with the weight function :math:`f(x) = \\exp(-x)`.
+
+ Parameters
+ ----------
+ deg : int
+ Number of sample points and weights. It must be >= 1.
+
+ Returns
+ -------
+ x : ndarray
+ 1-D ndarray containing the sample points.
+ y : ndarray
+ 1-D ndarray containing the weights.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ The results have only been tested up to degree 100 higher degrees may
+ be problematic. The weights are determined by using the fact that
+
+ .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
+
+ where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
+ is the k'th root of :math:`L_n`, and then scaling the results to get
+ the right value when integrating 1.
+
+ """
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg <= 0:
+ raise ValueError("deg must be a positive integer")
+
+ # first approximation of roots. We use the fact that the companion
+ # matrix is symmetric in this case in order to obtain better zeros.
+ c = np.array([0]*deg + [1])
+ m = lagcompanion(c)
+ x = la.eigvalsh(m)
+
+ # improve roots by one application of Newton
+ dy = lagval(x, c)
+ df = lagval(x, lagder(c))
+ x -= dy/df
+
+ # compute the weights. We scale the factor to avoid possible numerical
+ # overflow.
+ fm = lagval(x, c[1:])
+ fm /= np.abs(fm).max()
+ df /= np.abs(df).max()
+ w = 1/(fm * df)
+
+ # scale w to get the right value, 1 in this case
+ w /= w.sum()
+
+ return x, w
+
+
+def lagweight(x):
+ """Weight function of the Laguerre polynomials.
+
+ The weight function is :math:`exp(-x)` and the interval of integration
+ is :math:`[0, \\inf]`. The Laguerre polynomials are orthogonal, but not
+ normalized, with respect to this weight function.
+
+ Parameters
+ ----------
+ x : array_like
+ Values at which the weight function will be computed.
+
+ Returns
+ -------
+ w : ndarray
+ The weight function at `x`.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ w = np.exp(-x)
+ return w
+
+#
+# Laguerre series class
+#
+
+class Laguerre(ABCPolyBase):
+ """A Laguerre series class.
+
+ The Laguerre class provides the standard Python numerical methods
+ '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
+ attributes and methods listed in the `ABCPolyBase` documentation.
+
+ Parameters
+ ----------
+ coef : array_like
+ Laguerre coefficients in order of increasing degree, i.e,
+ ``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``.
+ domain : (2,) array_like, optional
+ Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
+ to the interval ``[window[0], window[1]]`` by shifting and scaling.
+ The default value is [0, 1].
+ window : (2,) array_like, optional
+ Window, see `domain` for its use. The default value is [0, 1].
+
+ .. versionadded:: 1.6.0
+
+ """
+ # Virtual Functions
+ _add = staticmethod(lagadd)
+ _sub = staticmethod(lagsub)
+ _mul = staticmethod(lagmul)
+ _div = staticmethod(lagdiv)
+ _pow = staticmethod(lagpow)
+ _val = staticmethod(lagval)
+ _int = staticmethod(lagint)
+ _der = staticmethod(lagder)
+ _fit = staticmethod(lagfit)
+ _line = staticmethod(lagline)
+ _roots = staticmethod(lagroots)
+ _fromroots = staticmethod(lagfromroots)
+
+ # Virtual properties
+ domain = np.array(lagdomain)
+ window = np.array(lagdomain)
+ basis_name = 'L'
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/laguerre.pyi b/venv/lib/python3.9/site-packages/numpy/polynomial/laguerre.pyi
new file mode 100644
index 00000000..e546bc20
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/laguerre.pyi
@@ -0,0 +1,46 @@
+from typing import Any
+
+from numpy import ndarray, dtype, int_
+from numpy.polynomial._polybase import ABCPolyBase
+from numpy.polynomial.polyutils import trimcoef
+
+__all__: list[str]
+
+lagtrim = trimcoef
+
+def poly2lag(pol): ...
+def lag2poly(c): ...
+
+lagdomain: ndarray[Any, dtype[int_]]
+lagzero: ndarray[Any, dtype[int_]]
+lagone: ndarray[Any, dtype[int_]]
+lagx: ndarray[Any, dtype[int_]]
+
+def lagline(off, scl): ...
+def lagfromroots(roots): ...
+def lagadd(c1, c2): ...
+def lagsub(c1, c2): ...
+def lagmulx(c): ...
+def lagmul(c1, c2): ...
+def lagdiv(c1, c2): ...
+def lagpow(c, pow, maxpower=...): ...
+def lagder(c, m=..., scl=..., axis=...): ...
+def lagint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ...
+def lagval(x, c, tensor=...): ...
+def lagval2d(x, y, c): ...
+def laggrid2d(x, y, c): ...
+def lagval3d(x, y, z, c): ...
+def laggrid3d(x, y, z, c): ...
+def lagvander(x, deg): ...
+def lagvander2d(x, y, deg): ...
+def lagvander3d(x, y, z, deg): ...
+def lagfit(x, y, deg, rcond=..., full=..., w=...): ...
+def lagcompanion(c): ...
+def lagroots(c): ...
+def laggauss(deg): ...
+def lagweight(x): ...
+
+class Laguerre(ABCPolyBase):
+ domain: Any
+ window: Any
+ basis_name: Any
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/legendre.py b/venv/lib/python3.9/site-packages/numpy/polynomial/legendre.py
new file mode 100644
index 00000000..028e2fe7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/legendre.py
@@ -0,0 +1,1658 @@
+"""
+==================================================
+Legendre Series (:mod:`numpy.polynomial.legendre`)
+==================================================
+
+This module provides a number of objects (mostly functions) useful for
+dealing with Legendre series, including a `Legendre` class that
+encapsulates the usual arithmetic operations. (General information
+on how this module represents and works with such polynomials is in the
+docstring for its "parent" sub-package, `numpy.polynomial`).
+
+Classes
+-------
+.. autosummary::
+ :toctree: generated/
+
+ Legendre
+
+Constants
+---------
+
+.. autosummary::
+ :toctree: generated/
+
+ legdomain
+ legzero
+ legone
+ legx
+
+Arithmetic
+----------
+
+.. autosummary::
+ :toctree: generated/
+
+ legadd
+ legsub
+ legmulx
+ legmul
+ legdiv
+ legpow
+ legval
+ legval2d
+ legval3d
+ leggrid2d
+ leggrid3d
+
+Calculus
+--------
+
+.. autosummary::
+ :toctree: generated/
+
+ legder
+ legint
+
+Misc Functions
+--------------
+
+.. autosummary::
+ :toctree: generated/
+
+ legfromroots
+ legroots
+ legvander
+ legvander2d
+ legvander3d
+ leggauss
+ legweight
+ legcompanion
+ legfit
+ legtrim
+ legline
+ leg2poly
+ poly2leg
+
+See also
+--------
+numpy.polynomial
+
+"""
+import numpy as np
+import numpy.linalg as la
+from numpy.core.multiarray import normalize_axis_index
+
+from . import polyutils as pu
+from ._polybase import ABCPolyBase
+
+__all__ = [
+ 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd',
+ 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder',
+ 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander',
+ 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d',
+ 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion',
+ 'leggauss', 'legweight']
+
+legtrim = pu.trimcoef
+
+
+def poly2leg(pol):
+ """
+ Convert a polynomial to a Legendre series.
+
+ Convert an array representing the coefficients of a polynomial (relative
+ to the "standard" basis) ordered from lowest degree to highest, to an
+ array of the coefficients of the equivalent Legendre series, ordered
+ from lowest to highest degree.
+
+ Parameters
+ ----------
+ pol : array_like
+ 1-D array containing the polynomial coefficients
+
+ Returns
+ -------
+ c : ndarray
+ 1-D array containing the coefficients of the equivalent Legendre
+ series.
+
+ See Also
+ --------
+ leg2poly
+
+ Notes
+ -----
+ The easy way to do conversions between polynomial basis sets
+ is to use the convert method of a class instance.
+
+ Examples
+ --------
+ >>> from numpy import polynomial as P
+ >>> p = P.Polynomial(np.arange(4))
+ >>> p
+ Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
+ >>> c = P.Legendre(P.legendre.poly2leg(p.coef))
+ >>> c
+ Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary
+
+ """
+ [pol] = pu.as_series([pol])
+ deg = len(pol) - 1
+ res = 0
+ for i in range(deg, -1, -1):
+ res = legadd(legmulx(res), pol[i])
+ return res
+
+
+def leg2poly(c):
+ """
+ Convert a Legendre series to a polynomial.
+
+ Convert an array representing the coefficients of a Legendre series,
+ ordered from lowest degree to highest, to an array of the coefficients
+ of the equivalent polynomial (relative to the "standard" basis) ordered
+ from lowest to highest degree.
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array containing the Legendre series coefficients, ordered
+ from lowest order term to highest.
+
+ Returns
+ -------
+ pol : ndarray
+ 1-D array containing the coefficients of the equivalent polynomial
+ (relative to the "standard" basis) ordered from lowest order term
+ to highest.
+
+ See Also
+ --------
+ poly2leg
+
+ Notes
+ -----
+ The easy way to do conversions between polynomial basis sets
+ is to use the convert method of a class instance.
+
+ Examples
+ --------
+ >>> from numpy import polynomial as P
+ >>> c = P.Legendre(range(4))
+ >>> c
+ Legendre([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
+ >>> p = c.convert(kind=P.Polynomial)
+ >>> p
+ Polynomial([-1. , -3.5, 3. , 7.5], domain=[-1., 1.], window=[-1., 1.])
+ >>> P.legendre.leg2poly(range(4))
+ array([-1. , -3.5, 3. , 7.5])
+
+
+ """
+ from .polynomial import polyadd, polysub, polymulx
+
+ [c] = pu.as_series([c])
+ n = len(c)
+ if n < 3:
+ return c
+ else:
+ c0 = c[-2]
+ c1 = c[-1]
+ # i is the current degree of c1
+ for i in range(n - 1, 1, -1):
+ tmp = c0
+ c0 = polysub(c[i - 2], (c1*(i - 1))/i)
+ c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i)
+ return polyadd(c0, polymulx(c1))
+
+#
+# These are constant arrays are of integer type so as to be compatible
+# with the widest range of other types, such as Decimal.
+#
+
+# Legendre
+legdomain = np.array([-1, 1])
+
+# Legendre coefficients representing zero.
+legzero = np.array([0])
+
+# Legendre coefficients representing one.
+legone = np.array([1])
+
+# Legendre coefficients representing the identity x.
+legx = np.array([0, 1])
+
+
+def legline(off, scl):
+ """
+ Legendre series whose graph is a straight line.
+
+
+
+ Parameters
+ ----------
+ off, scl : scalars
+ The specified line is given by ``off + scl*x``.
+
+ Returns
+ -------
+ y : ndarray
+ This module's representation of the Legendre series for
+ ``off + scl*x``.
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyline
+ numpy.polynomial.chebyshev.chebline
+ numpy.polynomial.laguerre.lagline
+ numpy.polynomial.hermite.hermline
+ numpy.polynomial.hermite_e.hermeline
+
+ Examples
+ --------
+ >>> import numpy.polynomial.legendre as L
+ >>> L.legline(3,2)
+ array([3, 2])
+ >>> L.legval(-3, L.legline(3,2)) # should be -3
+ -3.0
+
+ """
+ if scl != 0:
+ return np.array([off, scl])
+ else:
+ return np.array([off])
+
+
+def legfromroots(roots):
+ """
+ Generate a Legendre series with given roots.
+
+ The function returns the coefficients of the polynomial
+
+ .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
+
+ in Legendre form, where the `r_n` are the roots specified in `roots`.
+ If a zero has multiplicity n, then it must appear in `roots` n times.
+ For instance, if 2 is a root of multiplicity three and 3 is a root of
+ multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
+ roots can appear in any order.
+
+ If the returned coefficients are `c`, then
+
+ .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
+
+ The coefficient of the last term is not generally 1 for monic
+ polynomials in Legendre form.
+
+ Parameters
+ ----------
+ roots : array_like
+ Sequence containing the roots.
+
+ Returns
+ -------
+ out : ndarray
+ 1-D array of coefficients. If all roots are real then `out` is a
+ real array, if some of the roots are complex, then `out` is complex
+ even if all the coefficients in the result are real (see Examples
+ below).
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyfromroots
+ numpy.polynomial.chebyshev.chebfromroots
+ numpy.polynomial.laguerre.lagfromroots
+ numpy.polynomial.hermite.hermfromroots
+ numpy.polynomial.hermite_e.hermefromroots
+
+ Examples
+ --------
+ >>> import numpy.polynomial.legendre as L
+ >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis
+ array([ 0. , -0.4, 0. , 0.4])
+ >>> j = complex(0,1)
+ >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis
+ array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary
+
+ """
+ return pu._fromroots(legline, legmul, roots)
+
+
+def legadd(c1, c2):
+ """
+ Add one Legendre series to another.
+
+ Returns the sum of two Legendre series `c1` + `c2`. The arguments
+ are sequences of coefficients ordered from lowest order term to
+ highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Legendre series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Array representing the Legendre series of their sum.
+
+ See Also
+ --------
+ legsub, legmulx, legmul, legdiv, legpow
+
+ Notes
+ -----
+ Unlike multiplication, division, etc., the sum of two Legendre series
+ is a Legendre series (without having to "reproject" the result onto
+ the basis set) so addition, just like that of "standard" polynomials,
+ is simply "component-wise."
+
+ Examples
+ --------
+ >>> from numpy.polynomial import legendre as L
+ >>> c1 = (1,2,3)
+ >>> c2 = (3,2,1)
+ >>> L.legadd(c1,c2)
+ array([4., 4., 4.])
+
+ """
+ return pu._add(c1, c2)
+
+
+def legsub(c1, c2):
+ """
+ Subtract one Legendre series from another.
+
+ Returns the difference of two Legendre series `c1` - `c2`. The
+ sequences of coefficients are from lowest order term to highest, i.e.,
+ [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Legendre series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Of Legendre series coefficients representing their difference.
+
+ See Also
+ --------
+ legadd, legmulx, legmul, legdiv, legpow
+
+ Notes
+ -----
+ Unlike multiplication, division, etc., the difference of two Legendre
+ series is a Legendre series (without having to "reproject" the result
+ onto the basis set) so subtraction, just like that of "standard"
+ polynomials, is simply "component-wise."
+
+ Examples
+ --------
+ >>> from numpy.polynomial import legendre as L
+ >>> c1 = (1,2,3)
+ >>> c2 = (3,2,1)
+ >>> L.legsub(c1,c2)
+ array([-2., 0., 2.])
+ >>> L.legsub(c2,c1) # -C.legsub(c1,c2)
+ array([ 2., 0., -2.])
+
+ """
+ return pu._sub(c1, c2)
+
+
+def legmulx(c):
+ """Multiply a Legendre series by x.
+
+ Multiply the Legendre series `c` by x, where x is the independent
+ variable.
+
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Legendre series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Array representing the result of the multiplication.
+
+ See Also
+ --------
+ legadd, legmul, legdiv, legpow
+
+ Notes
+ -----
+ The multiplication uses the recursion relationship for Legendre
+ polynomials in the form
+
+ .. math::
+
+ xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
+
+ Examples
+ --------
+ >>> from numpy.polynomial import legendre as L
+ >>> L.legmulx([1,2,3])
+ array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ # The zero series needs special treatment
+ if len(c) == 1 and c[0] == 0:
+ return c
+
+ prd = np.empty(len(c) + 1, dtype=c.dtype)
+ prd[0] = c[0]*0
+ prd[1] = c[0]
+ for i in range(1, len(c)):
+ j = i + 1
+ k = i - 1
+ s = i + j
+ prd[j] = (c[i]*j)/s
+ prd[k] += (c[i]*i)/s
+ return prd
+
+
+def legmul(c1, c2):
+ """
+ Multiply one Legendre series by another.
+
+ Returns the product of two Legendre series `c1` * `c2`. The arguments
+ are sequences of coefficients, from lowest order "term" to highest,
+ e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Legendre series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Of Legendre series coefficients representing their product.
+
+ See Also
+ --------
+ legadd, legsub, legmulx, legdiv, legpow
+
+ Notes
+ -----
+ In general, the (polynomial) product of two C-series results in terms
+ that are not in the Legendre polynomial basis set. Thus, to express
+ the product as a Legendre series, it is necessary to "reproject" the
+ product onto said basis set, which may produce "unintuitive" (but
+ correct) results; see Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial import legendre as L
+ >>> c1 = (1,2,3)
+ >>> c2 = (3,2)
+ >>> L.legmul(c1,c2) # multiplication requires "reprojection"
+ array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) # may vary
+
+ """
+ # s1, s2 are trimmed copies
+ [c1, c2] = pu.as_series([c1, c2])
+
+ if len(c1) > len(c2):
+ c = c2
+ xs = c1
+ else:
+ c = c1
+ xs = c2
+
+ if len(c) == 1:
+ c0 = c[0]*xs
+ c1 = 0
+ elif len(c) == 2:
+ c0 = c[0]*xs
+ c1 = c[1]*xs
+ else:
+ nd = len(c)
+ c0 = c[-2]*xs
+ c1 = c[-1]*xs
+ for i in range(3, len(c) + 1):
+ tmp = c0
+ nd = nd - 1
+ c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd)
+ c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd)
+ return legadd(c0, legmulx(c1))
+
+
+def legdiv(c1, c2):
+ """
+ Divide one Legendre series by another.
+
+ Returns the quotient-with-remainder of two Legendre series
+ `c1` / `c2`. The arguments are sequences of coefficients from lowest
+ order "term" to highest, e.g., [1,2,3] represents the series
+ ``P_0 + 2*P_1 + 3*P_2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of Legendre series coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ quo, rem : ndarrays
+ Of Legendre series coefficients representing the quotient and
+ remainder.
+
+ See Also
+ --------
+ legadd, legsub, legmulx, legmul, legpow
+
+ Notes
+ -----
+ In general, the (polynomial) division of one Legendre series by another
+ results in quotient and remainder terms that are not in the Legendre
+ polynomial basis set. Thus, to express these results as a Legendre
+ series, it is necessary to "reproject" the results onto the Legendre
+ basis set, which may produce "unintuitive" (but correct) results; see
+ Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial import legendre as L
+ >>> c1 = (1,2,3)
+ >>> c2 = (3,2,1)
+ >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not
+ (array([3.]), array([-8., -4.]))
+ >>> c2 = (0,1,2,3)
+ >>> L.legdiv(c2,c1) # neither "intuitive"
+ (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary
+
+ """
+ return pu._div(legmul, c1, c2)
+
+
+def legpow(c, pow, maxpower=16):
+ """Raise a Legendre series to a power.
+
+ Returns the Legendre series `c` raised to the power `pow`. The
+ argument `c` is a sequence of coefficients ordered from low to high.
+ i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Legendre series coefficients ordered from low to
+ high.
+ pow : integer
+ Power to which the series will be raised
+ maxpower : integer, optional
+ Maximum power allowed. This is mainly to limit growth of the series
+ to unmanageable size. Default is 16
+
+ Returns
+ -------
+ coef : ndarray
+ Legendre series of power.
+
+ See Also
+ --------
+ legadd, legsub, legmulx, legmul, legdiv
+
+ """
+ return pu._pow(legmul, c, pow, maxpower)
+
+
+def legder(c, m=1, scl=1, axis=0):
+ """
+ Differentiate a Legendre series.
+
+ Returns the Legendre series coefficients `c` differentiated `m` times
+ along `axis`. At each iteration the result is multiplied by `scl` (the
+ scaling factor is for use in a linear change of variable). The argument
+ `c` is an array of coefficients from low to high degree along each
+ axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
+ while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
+ 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
+ ``y``.
+
+ Parameters
+ ----------
+ c : array_like
+ Array of Legendre series coefficients. If c is multidimensional the
+ different axis correspond to different variables with the degree in
+ each axis given by the corresponding index.
+ m : int, optional
+ Number of derivatives taken, must be non-negative. (Default: 1)
+ scl : scalar, optional
+ Each differentiation is multiplied by `scl`. The end result is
+ multiplication by ``scl**m``. This is for use in a linear change of
+ variable. (Default: 1)
+ axis : int, optional
+ Axis over which the derivative is taken. (Default: 0).
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ der : ndarray
+ Legendre series of the derivative.
+
+ See Also
+ --------
+ legint
+
+ Notes
+ -----
+ In general, the result of differentiating a Legendre series does not
+ resemble the same operation on a power series. Thus the result of this
+ function may be "unintuitive," albeit correct; see Examples section
+ below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial import legendre as L
+ >>> c = (1,2,3,4)
+ >>> L.legder(c)
+ array([ 6., 9., 20.])
+ >>> L.legder(c, 3)
+ array([60.])
+ >>> L.legder(c, scl=-1)
+ array([ -6., -9., -20.])
+ >>> L.legder(c, 2,-1)
+ array([ 9., 60.])
+
+ """
+ c = np.array(c, ndmin=1, copy=True)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ cnt = pu._deprecate_as_int(m, "the order of derivation")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
+ if cnt < 0:
+ raise ValueError("The order of derivation must be non-negative")
+ iaxis = normalize_axis_index(iaxis, c.ndim)
+
+ if cnt == 0:
+ return c
+
+ c = np.moveaxis(c, iaxis, 0)
+ n = len(c)
+ if cnt >= n:
+ c = c[:1]*0
+ else:
+ for i in range(cnt):
+ n = n - 1
+ c *= scl
+ der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
+ for j in range(n, 2, -1):
+ der[j - 1] = (2*j - 1)*c[j]
+ c[j - 2] += c[j]
+ if n > 1:
+ der[1] = 3*c[2]
+ der[0] = c[1]
+ c = der
+ c = np.moveaxis(c, 0, iaxis)
+ return c
+
+
+def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
+ """
+ Integrate a Legendre series.
+
+ Returns the Legendre series coefficients `c` integrated `m` times from
+ `lbnd` along `axis`. At each iteration the resulting series is
+ **multiplied** by `scl` and an integration constant, `k`, is added.
+ The scaling factor is for use in a linear change of variable. ("Buyer
+ beware": note that, depending on what one is doing, one may want `scl`
+ to be the reciprocal of what one might expect; for more information,
+ see the Notes section below.) The argument `c` is an array of
+ coefficients from low to high degree along each axis, e.g., [1,2,3]
+ represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
+ represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
+ 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
+
+ Parameters
+ ----------
+ c : array_like
+ Array of Legendre series coefficients. If c is multidimensional the
+ different axis correspond to different variables with the degree in
+ each axis given by the corresponding index.
+ m : int, optional
+ Order of integration, must be positive. (Default: 1)
+ k : {[], list, scalar}, optional
+ Integration constant(s). The value of the first integral at
+ ``lbnd`` is the first value in the list, the value of the second
+ integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
+ default), all constants are set to zero. If ``m == 1``, a single
+ scalar can be given instead of a list.
+ lbnd : scalar, optional
+ The lower bound of the integral. (Default: 0)
+ scl : scalar, optional
+ Following each integration the result is *multiplied* by `scl`
+ before the integration constant is added. (Default: 1)
+ axis : int, optional
+ Axis over which the integral is taken. (Default: 0).
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ S : ndarray
+ Legendre series coefficient array of the integral.
+
+ Raises
+ ------
+ ValueError
+ If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
+ ``np.ndim(scl) != 0``.
+
+ See Also
+ --------
+ legder
+
+ Notes
+ -----
+ Note that the result of each integration is *multiplied* by `scl`.
+ Why is this important to note? Say one is making a linear change of
+ variable :math:`u = ax + b` in an integral relative to `x`. Then
+ :math:`dx = du/a`, so one will need to set `scl` equal to
+ :math:`1/a` - perhaps not what one would have first thought.
+
+ Also note that, in general, the result of integrating a C-series needs
+ to be "reprojected" onto the C-series basis set. Thus, typically,
+ the result of this function is "unintuitive," albeit correct; see
+ Examples section below.
+
+ Examples
+ --------
+ >>> from numpy.polynomial import legendre as L
+ >>> c = (1,2,3)
+ >>> L.legint(c)
+ array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary
+ >>> L.legint(c, 3)
+ array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, # may vary
+ -1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
+ >>> L.legint(c, k=3)
+ array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary
+ >>> L.legint(c, lbnd=-2)
+ array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary
+ >>> L.legint(c, scl=2)
+ array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) # may vary
+
+ """
+ c = np.array(c, ndmin=1, copy=True)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ if not np.iterable(k):
+ k = [k]
+ cnt = pu._deprecate_as_int(m, "the order of integration")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
+ if cnt < 0:
+ raise ValueError("The order of integration must be non-negative")
+ if len(k) > cnt:
+ raise ValueError("Too many integration constants")
+ if np.ndim(lbnd) != 0:
+ raise ValueError("lbnd must be a scalar.")
+ if np.ndim(scl) != 0:
+ raise ValueError("scl must be a scalar.")
+ iaxis = normalize_axis_index(iaxis, c.ndim)
+
+ if cnt == 0:
+ return c
+
+ c = np.moveaxis(c, iaxis, 0)
+ k = list(k) + [0]*(cnt - len(k))
+ for i in range(cnt):
+ n = len(c)
+ c *= scl
+ if n == 1 and np.all(c[0] == 0):
+ c[0] += k[i]
+ else:
+ tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
+ tmp[0] = c[0]*0
+ tmp[1] = c[0]
+ if n > 1:
+ tmp[2] = c[1]/3
+ for j in range(2, n):
+ t = c[j]/(2*j + 1)
+ tmp[j + 1] = t
+ tmp[j - 1] -= t
+ tmp[0] += k[i] - legval(lbnd, tmp)
+ c = tmp
+ c = np.moveaxis(c, 0, iaxis)
+ return c
+
+
+def legval(x, c, tensor=True):
+ """
+ Evaluate a Legendre series at points x.
+
+ If `c` is of length `n + 1`, this function returns the value:
+
+ .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
+
+ The parameter `x` is converted to an array only if it is a tuple or a
+ list, otherwise it is treated as a scalar. In either case, either `x`
+ or its elements must support multiplication and addition both with
+ themselves and with the elements of `c`.
+
+ If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
+ `c` is multidimensional, then the shape of the result depends on the
+ value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
+ x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
+ scalars have shape (,).
+
+ Trailing zeros in the coefficients will be used in the evaluation, so
+ they should be avoided if efficiency is a concern.
+
+ Parameters
+ ----------
+ x : array_like, compatible object
+ If `x` is a list or tuple, it is converted to an ndarray, otherwise
+ it is left unchanged and treated as a scalar. In either case, `x`
+ or its elements must support addition and multiplication with
+ themselves and with the elements of `c`.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree n are contained in c[n]. If `c` is multidimensional the
+ remaining indices enumerate multiple polynomials. In the two
+ dimensional case the coefficients may be thought of as stored in
+ the columns of `c`.
+ tensor : boolean, optional
+ If True, the shape of the coefficient array is extended with ones
+ on the right, one for each dimension of `x`. Scalars have dimension 0
+ for this action. The result is that every column of coefficients in
+ `c` is evaluated for every element of `x`. If False, `x` is broadcast
+ over the columns of `c` for the evaluation. This keyword is useful
+ when `c` is multidimensional. The default value is True.
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ values : ndarray, algebra_like
+ The shape of the return value is described above.
+
+ See Also
+ --------
+ legval2d, leggrid2d, legval3d, leggrid3d
+
+ Notes
+ -----
+ The evaluation uses Clenshaw recursion, aka synthetic division.
+
+ """
+ c = np.array(c, ndmin=1, copy=False)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ c = c.astype(np.double)
+ if isinstance(x, (tuple, list)):
+ x = np.asarray(x)
+ if isinstance(x, np.ndarray) and tensor:
+ c = c.reshape(c.shape + (1,)*x.ndim)
+
+ if len(c) == 1:
+ c0 = c[0]
+ c1 = 0
+ elif len(c) == 2:
+ c0 = c[0]
+ c1 = c[1]
+ else:
+ nd = len(c)
+ c0 = c[-2]
+ c1 = c[-1]
+ for i in range(3, len(c) + 1):
+ tmp = c0
+ nd = nd - 1
+ c0 = c[-i] - (c1*(nd - 1))/nd
+ c1 = tmp + (c1*x*(2*nd - 1))/nd
+ return c0 + c1*x
+
+
+def legval2d(x, y, c):
+ """
+ Evaluate a 2-D Legendre series at points (x, y).
+
+ This function returns the values:
+
+ .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
+
+ The parameters `x` and `y` are converted to arrays only if they are
+ tuples or a lists, otherwise they are treated as a scalars and they
+ must have the same shape after conversion. In either case, either `x`
+ and `y` or their elements must support multiplication and addition both
+ with themselves and with the elements of `c`.
+
+ If `c` is a 1-D array a one is implicitly appended to its shape to make
+ it 2-D. The shape of the result will be c.shape[2:] + x.shape.
+
+ Parameters
+ ----------
+ x, y : array_like, compatible objects
+ The two dimensional series is evaluated at the points `(x, y)`,
+ where `x` and `y` must have the same shape. If `x` or `y` is a list
+ or tuple, it is first converted to an ndarray, otherwise it is left
+ unchanged and if it isn't an ndarray it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term
+ of multi-degree i,j is contained in ``c[i,j]``. If `c` has
+ dimension greater than two the remaining indices enumerate multiple
+ sets of coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional Legendre series at points formed
+ from pairs of corresponding values from `x` and `y`.
+
+ See Also
+ --------
+ legval, leggrid2d, legval3d, leggrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._valnd(legval, c, x, y)
+
+
+def leggrid2d(x, y, c):
+ """
+ Evaluate a 2-D Legendre series on the Cartesian product of x and y.
+
+ This function returns the values:
+
+ .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
+
+ where the points `(a, b)` consist of all pairs formed by taking
+ `a` from `x` and `b` from `y`. The resulting points form a grid with
+ `x` in the first dimension and `y` in the second.
+
+ The parameters `x` and `y` are converted to arrays only if they are
+ tuples or a lists, otherwise they are treated as a scalars. In either
+ case, either `x` and `y` or their elements must support multiplication
+ and addition both with themselves and with the elements of `c`.
+
+ If `c` has fewer than two dimensions, ones are implicitly appended to
+ its shape to make it 2-D. The shape of the result will be c.shape[2:] +
+ x.shape + y.shape.
+
+ Parameters
+ ----------
+ x, y : array_like, compatible objects
+ The two dimensional series is evaluated at the points in the
+ Cartesian product of `x` and `y`. If `x` or `y` is a list or
+ tuple, it is first converted to an ndarray, otherwise it is left
+ unchanged and, if it isn't an ndarray, it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term of
+ multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
+ greater than two the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional Chebyshev series at points in the
+ Cartesian product of `x` and `y`.
+
+ See Also
+ --------
+ legval, legval2d, legval3d, leggrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._gridnd(legval, c, x, y)
+
+
+def legval3d(x, y, z, c):
+ """
+ Evaluate a 3-D Legendre series at points (x, y, z).
+
+ This function returns the values:
+
+ .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
+
+ The parameters `x`, `y`, and `z` are converted to arrays only if
+ they are tuples or a lists, otherwise they are treated as a scalars and
+ they must have the same shape after conversion. In either case, either
+ `x`, `y`, and `z` or their elements must support multiplication and
+ addition both with themselves and with the elements of `c`.
+
+ If `c` has fewer than 3 dimensions, ones are implicitly appended to its
+ shape to make it 3-D. The shape of the result will be c.shape[3:] +
+ x.shape.
+
+ Parameters
+ ----------
+ x, y, z : array_like, compatible object
+ The three dimensional series is evaluated at the points
+ `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
+ any of `x`, `y`, or `z` is a list or tuple, it is first converted
+ to an ndarray, otherwise it is left unchanged and if it isn't an
+ ndarray it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term of
+ multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
+ greater than 3 the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the multidimensional polynomial on points formed with
+ triples of corresponding values from `x`, `y`, and `z`.
+
+ See Also
+ --------
+ legval, legval2d, leggrid2d, leggrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._valnd(legval, c, x, y, z)
+
+
+def leggrid3d(x, y, z, c):
+ """
+ Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z.
+
+ This function returns the values:
+
+ .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
+
+ where the points `(a, b, c)` consist of all triples formed by taking
+ `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
+ a grid with `x` in the first dimension, `y` in the second, and `z` in
+ the third.
+
+ The parameters `x`, `y`, and `z` are converted to arrays only if they
+ are tuples or a lists, otherwise they are treated as a scalars. In
+ either case, either `x`, `y`, and `z` or their elements must support
+ multiplication and addition both with themselves and with the elements
+ of `c`.
+
+ If `c` has fewer than three dimensions, ones are implicitly appended to
+ its shape to make it 3-D. The shape of the result will be c.shape[3:] +
+ x.shape + y.shape + z.shape.
+
+ Parameters
+ ----------
+ x, y, z : array_like, compatible objects
+ The three dimensional series is evaluated at the points in the
+ Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
+ list or tuple, it is first converted to an ndarray, otherwise it is
+ left unchanged and, if it isn't an ndarray, it is treated as a
+ scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree i,j are contained in ``c[i,j]``. If `c` has dimension
+ greater than two the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional polynomial at points in the Cartesian
+ product of `x` and `y`.
+
+ See Also
+ --------
+ legval, legval2d, leggrid2d, legval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._gridnd(legval, c, x, y, z)
+
+
+def legvander(x, deg):
+ """Pseudo-Vandermonde matrix of given degree.
+
+ Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
+ `x`. The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., i] = L_i(x)
+
+ where `0 <= i <= deg`. The leading indices of `V` index the elements of
+ `x` and the last index is the degree of the Legendre polynomial.
+
+ If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
+ array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and
+ ``legval(x, c)`` are the same up to roundoff. This equivalence is
+ useful both for least squares fitting and for the evaluation of a large
+ number of Legendre series of the same degree and sample points.
+
+ Parameters
+ ----------
+ x : array_like
+ Array of points. The dtype is converted to float64 or complex128
+ depending on whether any of the elements are complex. If `x` is
+ scalar it is converted to a 1-D array.
+ deg : int
+ Degree of the resulting matrix.
+
+ Returns
+ -------
+ vander : ndarray
+ The pseudo-Vandermonde matrix. The shape of the returned matrix is
+ ``x.shape + (deg + 1,)``, where The last index is the degree of the
+ corresponding Legendre polynomial. The dtype will be the same as
+ the converted `x`.
+
+ """
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg < 0:
+ raise ValueError("deg must be non-negative")
+
+ x = np.array(x, copy=False, ndmin=1) + 0.0
+ dims = (ideg + 1,) + x.shape
+ dtyp = x.dtype
+ v = np.empty(dims, dtype=dtyp)
+ # Use forward recursion to generate the entries. This is not as accurate
+ # as reverse recursion in this application but it is more efficient.
+ v[0] = x*0 + 1
+ if ideg > 0:
+ v[1] = x
+ for i in range(2, ideg + 1):
+ v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
+ return np.moveaxis(v, 0, -1)
+
+
+def legvander2d(x, y, deg):
+ """Pseudo-Vandermonde matrix of given degrees.
+
+ Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
+ points `(x, y)`. The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y),
+
+ where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
+ `V` index the points `(x, y)` and the last index encodes the degrees of
+ the Legendre polynomials.
+
+ If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
+ correspond to the elements of a 2-D coefficient array `c` of shape
+ (xdeg + 1, ydeg + 1) in the order
+
+ .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
+
+ and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same
+ up to roundoff. This equivalence is useful both for least squares
+ fitting and for the evaluation of a large number of 2-D Legendre
+ series of the same degrees and sample points.
+
+ Parameters
+ ----------
+ x, y : array_like
+ Arrays of point coordinates, all of the same shape. The dtypes
+ will be converted to either float64 or complex128 depending on
+ whether any of the elements are complex. Scalars are converted to
+ 1-D arrays.
+ deg : list of ints
+ List of maximum degrees of the form [x_deg, y_deg].
+
+ Returns
+ -------
+ vander2d : ndarray
+ The shape of the returned matrix is ``x.shape + (order,)``, where
+ :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same
+ as the converted `x` and `y`.
+
+ See Also
+ --------
+ legvander, legvander3d, legval2d, legval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._vander_nd_flat((legvander, legvander), (x, y), deg)
+
+
+def legvander3d(x, y, z, deg):
+ """Pseudo-Vandermonde matrix of given degrees.
+
+ Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
+ points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
+ then The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
+
+ where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
+ indices of `V` index the points `(x, y, z)` and the last index encodes
+ the degrees of the Legendre polynomials.
+
+ If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
+ of `V` correspond to the elements of a 3-D coefficient array `c` of
+ shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
+
+ .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
+
+ and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the
+ same up to roundoff. This equivalence is useful both for least squares
+ fitting and for the evaluation of a large number of 3-D Legendre
+ series of the same degrees and sample points.
+
+ Parameters
+ ----------
+ x, y, z : array_like
+ Arrays of point coordinates, all of the same shape. The dtypes will
+ be converted to either float64 or complex128 depending on whether
+ any of the elements are complex. Scalars are converted to 1-D
+ arrays.
+ deg : list of ints
+ List of maximum degrees of the form [x_deg, y_deg, z_deg].
+
+ Returns
+ -------
+ vander3d : ndarray
+ The shape of the returned matrix is ``x.shape + (order,)``, where
+ :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will
+ be the same as the converted `x`, `y`, and `z`.
+
+ See Also
+ --------
+ legvander, legvander3d, legval2d, legval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._vander_nd_flat((legvander, legvander, legvander), (x, y, z), deg)
+
+
+def legfit(x, y, deg, rcond=None, full=False, w=None):
+ """
+ Least squares fit of Legendre series to data.
+
+ Return the coefficients of a Legendre series of degree `deg` that is the
+ least squares fit to the data values `y` given at points `x`. If `y` is
+ 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
+ fits are done, one for each column of `y`, and the resulting
+ coefficients are stored in the corresponding columns of a 2-D return.
+ The fitted polynomial(s) are in the form
+
+ .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
+
+ where `n` is `deg`.
+
+ Parameters
+ ----------
+ x : array_like, shape (M,)
+ x-coordinates of the M sample points ``(x[i], y[i])``.
+ y : array_like, shape (M,) or (M, K)
+ y-coordinates of the sample points. Several data sets of sample
+ points sharing the same x-coordinates can be fitted at once by
+ passing in a 2D-array that contains one dataset per column.
+ deg : int or 1-D array_like
+ Degree(s) of the fitting polynomials. If `deg` is a single integer
+ all terms up to and including the `deg`'th term are included in the
+ fit. For NumPy versions >= 1.11.0 a list of integers specifying the
+ degrees of the terms to include may be used instead.
+ rcond : float, optional
+ Relative condition number of the fit. Singular values smaller than
+ this relative to the largest singular value will be ignored. The
+ default value is len(x)*eps, where eps is the relative precision of
+ the float type, about 2e-16 in most cases.
+ full : bool, optional
+ Switch determining nature of return value. When it is False (the
+ default) just the coefficients are returned, when True diagnostic
+ information from the singular value decomposition is also returned.
+ w : array_like, shape (`M`,), optional
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ coef : ndarray, shape (M,) or (M, K)
+ Legendre coefficients ordered from low to high. If `y` was
+ 2-D, the coefficients for the data in column k of `y` are in
+ column `k`. If `deg` is specified as a list, coefficients for
+ terms not included in the fit are set equal to zero in the
+ returned `coef`.
+
+ [residuals, rank, singular_values, rcond] : list
+ These values are only returned if ``full == True``
+
+ - residuals -- sum of squared residuals of the least squares fit
+ - rank -- the numerical rank of the scaled Vandermonde matrix
+ - singular_values -- singular values of the scaled Vandermonde matrix
+ - rcond -- value of `rcond`.
+
+ For more details, see `numpy.linalg.lstsq`.
+
+ Warns
+ -----
+ RankWarning
+ The rank of the coefficient matrix in the least-squares fit is
+ deficient. The warning is only raised if ``full == False``. The
+ warnings can be turned off by
+
+ >>> import warnings
+ >>> warnings.simplefilter('ignore', np.RankWarning)
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyfit
+ numpy.polynomial.chebyshev.chebfit
+ numpy.polynomial.laguerre.lagfit
+ numpy.polynomial.hermite.hermfit
+ numpy.polynomial.hermite_e.hermefit
+ legval : Evaluates a Legendre series.
+ legvander : Vandermonde matrix of Legendre series.
+ legweight : Legendre weight function (= 1).
+ numpy.linalg.lstsq : Computes a least-squares fit from the matrix.
+ scipy.interpolate.UnivariateSpline : Computes spline fits.
+
+ Notes
+ -----
+ The solution is the coefficients of the Legendre series `p` that
+ minimizes the sum of the weighted squared errors
+
+ .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
+
+ where :math:`w_j` are the weights. This problem is solved by setting up
+ as the (typically) overdetermined matrix equation
+
+ .. math:: V(x) * c = w * y,
+
+ where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
+ coefficients to be solved for, `w` are the weights, and `y` are the
+ observed values. This equation is then solved using the singular value
+ decomposition of `V`.
+
+ If some of the singular values of `V` are so small that they are
+ neglected, then a `RankWarning` will be issued. This means that the
+ coefficient values may be poorly determined. Using a lower order fit
+ will usually get rid of the warning. The `rcond` parameter can also be
+ set to a value smaller than its default, but the resulting fit may be
+ spurious and have large contributions from roundoff error.
+
+ Fits using Legendre series are usually better conditioned than fits
+ using power series, but much can depend on the distribution of the
+ sample points and the smoothness of the data. If the quality of the fit
+ is inadequate splines may be a good alternative.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Curve fitting",
+ https://en.wikipedia.org/wiki/Curve_fitting
+
+ Examples
+ --------
+
+ """
+ return pu._fit(legvander, x, y, deg, rcond, full, w)
+
+
+def legcompanion(c):
+ """Return the scaled companion matrix of c.
+
+ The basis polynomials are scaled so that the companion matrix is
+ symmetric when `c` is an Legendre basis polynomial. This provides
+ better eigenvalue estimates than the unscaled case and for basis
+ polynomials the eigenvalues are guaranteed to be real if
+ `numpy.linalg.eigvalsh` is used to obtain them.
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of Legendre series coefficients ordered from low to high
+ degree.
+
+ Returns
+ -------
+ mat : ndarray
+ Scaled companion matrix of dimensions (deg, deg).
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ if len(c) < 2:
+ raise ValueError('Series must have maximum degree of at least 1.')
+ if len(c) == 2:
+ return np.array([[-c[0]/c[1]]])
+
+ n = len(c) - 1
+ mat = np.zeros((n, n), dtype=c.dtype)
+ scl = 1./np.sqrt(2*np.arange(n) + 1)
+ top = mat.reshape(-1)[1::n+1]
+ bot = mat.reshape(-1)[n::n+1]
+ top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n]
+ bot[...] = top
+ mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1))
+ return mat
+
+
+def legroots(c):
+ """
+ Compute the roots of a Legendre series.
+
+ Return the roots (a.k.a. "zeros") of the polynomial
+
+ .. math:: p(x) = \\sum_i c[i] * L_i(x).
+
+ Parameters
+ ----------
+ c : 1-D array_like
+ 1-D array of coefficients.
+
+ Returns
+ -------
+ out : ndarray
+ Array of the roots of the series. If all the roots are real,
+ then `out` is also real, otherwise it is complex.
+
+ See Also
+ --------
+ numpy.polynomial.polynomial.polyroots
+ numpy.polynomial.chebyshev.chebroots
+ numpy.polynomial.laguerre.lagroots
+ numpy.polynomial.hermite.hermroots
+ numpy.polynomial.hermite_e.hermeroots
+
+ Notes
+ -----
+ The root estimates are obtained as the eigenvalues of the companion
+ matrix, Roots far from the origin of the complex plane may have large
+ errors due to the numerical instability of the series for such values.
+ Roots with multiplicity greater than 1 will also show larger errors as
+ the value of the series near such points is relatively insensitive to
+ errors in the roots. Isolated roots near the origin can be improved by
+ a few iterations of Newton's method.
+
+ The Legendre series basis polynomials aren't powers of ``x`` so the
+ results of this function may seem unintuitive.
+
+ Examples
+ --------
+ >>> import numpy.polynomial.legendre as leg
+ >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots
+ array([-0.85099543, -0.11407192, 0.51506735]) # may vary
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ if len(c) < 2:
+ return np.array([], dtype=c.dtype)
+ if len(c) == 2:
+ return np.array([-c[0]/c[1]])
+
+ # rotated companion matrix reduces error
+ m = legcompanion(c)[::-1,::-1]
+ r = la.eigvals(m)
+ r.sort()
+ return r
+
+
+def leggauss(deg):
+ """
+ Gauss-Legendre quadrature.
+
+ Computes the sample points and weights for Gauss-Legendre quadrature.
+ These sample points and weights will correctly integrate polynomials of
+ degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
+ the weight function :math:`f(x) = 1`.
+
+ Parameters
+ ----------
+ deg : int
+ Number of sample points and weights. It must be >= 1.
+
+ Returns
+ -------
+ x : ndarray
+ 1-D ndarray containing the sample points.
+ y : ndarray
+ 1-D ndarray containing the weights.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ The results have only been tested up to degree 100, higher degrees may
+ be problematic. The weights are determined by using the fact that
+
+ .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
+
+ where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
+ is the k'th root of :math:`L_n`, and then scaling the results to get
+ the right value when integrating 1.
+
+ """
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg <= 0:
+ raise ValueError("deg must be a positive integer")
+
+ # first approximation of roots. We use the fact that the companion
+ # matrix is symmetric in this case in order to obtain better zeros.
+ c = np.array([0]*deg + [1])
+ m = legcompanion(c)
+ x = la.eigvalsh(m)
+
+ # improve roots by one application of Newton
+ dy = legval(x, c)
+ df = legval(x, legder(c))
+ x -= dy/df
+
+ # compute the weights. We scale the factor to avoid possible numerical
+ # overflow.
+ fm = legval(x, c[1:])
+ fm /= np.abs(fm).max()
+ df /= np.abs(df).max()
+ w = 1/(fm * df)
+
+ # for Legendre we can also symmetrize
+ w = (w + w[::-1])/2
+ x = (x - x[::-1])/2
+
+ # scale w to get the right value
+ w *= 2. / w.sum()
+
+ return x, w
+
+
+def legweight(x):
+ """
+ Weight function of the Legendre polynomials.
+
+ The weight function is :math:`1` and the interval of integration is
+ :math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not
+ normalized, with respect to this weight function.
+
+ Parameters
+ ----------
+ x : array_like
+ Values at which the weight function will be computed.
+
+ Returns
+ -------
+ w : ndarray
+ The weight function at `x`.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ w = x*0.0 + 1.0
+ return w
+
+#
+# Legendre series class
+#
+
+class Legendre(ABCPolyBase):
+ """A Legendre series class.
+
+ The Legendre class provides the standard Python numerical methods
+ '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
+ attributes and methods listed in the `ABCPolyBase` documentation.
+
+ Parameters
+ ----------
+ coef : array_like
+ Legendre coefficients in order of increasing degree, i.e.,
+ ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``.
+ domain : (2,) array_like, optional
+ Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
+ to the interval ``[window[0], window[1]]`` by shifting and scaling.
+ The default value is [-1, 1].
+ window : (2,) array_like, optional
+ Window, see `domain` for its use. The default value is [-1, 1].
+
+ .. versionadded:: 1.6.0
+
+ """
+ # Virtual Functions
+ _add = staticmethod(legadd)
+ _sub = staticmethod(legsub)
+ _mul = staticmethod(legmul)
+ _div = staticmethod(legdiv)
+ _pow = staticmethod(legpow)
+ _val = staticmethod(legval)
+ _int = staticmethod(legint)
+ _der = staticmethod(legder)
+ _fit = staticmethod(legfit)
+ _line = staticmethod(legline)
+ _roots = staticmethod(legroots)
+ _fromroots = staticmethod(legfromroots)
+
+ # Virtual properties
+ domain = np.array(legdomain)
+ window = np.array(legdomain)
+ basis_name = 'P'
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/legendre.pyi b/venv/lib/python3.9/site-packages/numpy/polynomial/legendre.pyi
new file mode 100644
index 00000000..63a1c3f3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/legendre.pyi
@@ -0,0 +1,46 @@
+from typing import Any
+
+from numpy import ndarray, dtype, int_
+from numpy.polynomial._polybase import ABCPolyBase
+from numpy.polynomial.polyutils import trimcoef
+
+__all__: list[str]
+
+legtrim = trimcoef
+
+def poly2leg(pol): ...
+def leg2poly(c): ...
+
+legdomain: ndarray[Any, dtype[int_]]
+legzero: ndarray[Any, dtype[int_]]
+legone: ndarray[Any, dtype[int_]]
+legx: ndarray[Any, dtype[int_]]
+
+def legline(off, scl): ...
+def legfromroots(roots): ...
+def legadd(c1, c2): ...
+def legsub(c1, c2): ...
+def legmulx(c): ...
+def legmul(c1, c2): ...
+def legdiv(c1, c2): ...
+def legpow(c, pow, maxpower=...): ...
+def legder(c, m=..., scl=..., axis=...): ...
+def legint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ...
+def legval(x, c, tensor=...): ...
+def legval2d(x, y, c): ...
+def leggrid2d(x, y, c): ...
+def legval3d(x, y, z, c): ...
+def leggrid3d(x, y, z, c): ...
+def legvander(x, deg): ...
+def legvander2d(x, y, deg): ...
+def legvander3d(x, y, z, deg): ...
+def legfit(x, y, deg, rcond=..., full=..., w=...): ...
+def legcompanion(c): ...
+def legroots(c): ...
+def leggauss(deg): ...
+def legweight(x): ...
+
+class Legendre(ABCPolyBase):
+ domain: Any
+ window: Any
+ basis_name: Any
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/polynomial.py b/venv/lib/python3.9/site-packages/numpy/polynomial/polynomial.py
new file mode 100644
index 00000000..d102f5a3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/polynomial.py
@@ -0,0 +1,1536 @@
+"""
+=================================================
+Power Series (:mod:`numpy.polynomial.polynomial`)
+=================================================
+
+This module provides a number of objects (mostly functions) useful for
+dealing with polynomials, including a `Polynomial` class that
+encapsulates the usual arithmetic operations. (General information
+on how this module represents and works with polynomial objects is in
+the docstring for its "parent" sub-package, `numpy.polynomial`).
+
+Classes
+-------
+.. autosummary::
+ :toctree: generated/
+
+ Polynomial
+
+Constants
+---------
+.. autosummary::
+ :toctree: generated/
+
+ polydomain
+ polyzero
+ polyone
+ polyx
+
+Arithmetic
+----------
+.. autosummary::
+ :toctree: generated/
+
+ polyadd
+ polysub
+ polymulx
+ polymul
+ polydiv
+ polypow
+ polyval
+ polyval2d
+ polyval3d
+ polygrid2d
+ polygrid3d
+
+Calculus
+--------
+.. autosummary::
+ :toctree: generated/
+
+ polyder
+ polyint
+
+Misc Functions
+--------------
+.. autosummary::
+ :toctree: generated/
+
+ polyfromroots
+ polyroots
+ polyvalfromroots
+ polyvander
+ polyvander2d
+ polyvander3d
+ polycompanion
+ polyfit
+ polytrim
+ polyline
+
+See Also
+--------
+`numpy.polynomial`
+
+"""
+__all__ = [
+ 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd',
+ 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval',
+ 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander',
+ 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d',
+ 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d']
+
+import numpy as np
+import numpy.linalg as la
+from numpy.core.multiarray import normalize_axis_index
+
+from . import polyutils as pu
+from ._polybase import ABCPolyBase
+
+polytrim = pu.trimcoef
+
+#
+# These are constant arrays are of integer type so as to be compatible
+# with the widest range of other types, such as Decimal.
+#
+
+# Polynomial default domain.
+polydomain = np.array([-1, 1])
+
+# Polynomial coefficients representing zero.
+polyzero = np.array([0])
+
+# Polynomial coefficients representing one.
+polyone = np.array([1])
+
+# Polynomial coefficients representing the identity x.
+polyx = np.array([0, 1])
+
+#
+# Polynomial series functions
+#
+
+
+def polyline(off, scl):
+ """
+ Returns an array representing a linear polynomial.
+
+ Parameters
+ ----------
+ off, scl : scalars
+ The "y-intercept" and "slope" of the line, respectively.
+
+ Returns
+ -------
+ y : ndarray
+ This module's representation of the linear polynomial ``off +
+ scl*x``.
+
+ See Also
+ --------
+ numpy.polynomial.chebyshev.chebline
+ numpy.polynomial.legendre.legline
+ numpy.polynomial.laguerre.lagline
+ numpy.polynomial.hermite.hermline
+ numpy.polynomial.hermite_e.hermeline
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polynomial as P
+ >>> P.polyline(1,-1)
+ array([ 1, -1])
+ >>> P.polyval(1, P.polyline(1,-1)) # should be 0
+ 0.0
+
+ """
+ if scl != 0:
+ return np.array([off, scl])
+ else:
+ return np.array([off])
+
+
+def polyfromroots(roots):
+ """
+ Generate a monic polynomial with given roots.
+
+ Return the coefficients of the polynomial
+
+ .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
+
+ where the ``r_n`` are the roots specified in `roots`. If a zero has
+ multiplicity n, then it must appear in `roots` n times. For instance,
+ if 2 is a root of multiplicity three and 3 is a root of multiplicity 2,
+ then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear
+ in any order.
+
+ If the returned coefficients are `c`, then
+
+ .. math:: p(x) = c_0 + c_1 * x + ... + x^n
+
+ The coefficient of the last term is 1 for monic polynomials in this
+ form.
+
+ Parameters
+ ----------
+ roots : array_like
+ Sequence containing the roots.
+
+ Returns
+ -------
+ out : ndarray
+ 1-D array of the polynomial's coefficients If all the roots are
+ real, then `out` is also real, otherwise it is complex. (see
+ Examples below).
+
+ See Also
+ --------
+ numpy.polynomial.chebyshev.chebfromroots
+ numpy.polynomial.legendre.legfromroots
+ numpy.polynomial.laguerre.lagfromroots
+ numpy.polynomial.hermite.hermfromroots
+ numpy.polynomial.hermite_e.hermefromroots
+
+ Notes
+ -----
+ The coefficients are determined by multiplying together linear factors
+ of the form ``(x - r_i)``, i.e.
+
+ .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n)
+
+ where ``n == len(roots) - 1``; note that this implies that ``1`` is always
+ returned for :math:`a_n`.
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polynomial as P
+ >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x
+ array([ 0., -1., 0., 1.])
+ >>> j = complex(0,1)
+ >>> P.polyfromroots((-j,j)) # complex returned, though values are real
+ array([1.+0.j, 0.+0.j, 1.+0.j])
+
+ """
+ return pu._fromroots(polyline, polymul, roots)
+
+
+def polyadd(c1, c2):
+ """
+ Add one polynomial to another.
+
+ Returns the sum of two polynomials `c1` + `c2`. The arguments are
+ sequences of coefficients from lowest order term to highest, i.e.,
+ [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of polynomial coefficients ordered from low to high.
+
+ Returns
+ -------
+ out : ndarray
+ The coefficient array representing their sum.
+
+ See Also
+ --------
+ polysub, polymulx, polymul, polydiv, polypow
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polynomial as P
+ >>> c1 = (1,2,3)
+ >>> c2 = (3,2,1)
+ >>> sum = P.polyadd(c1,c2); sum
+ array([4., 4., 4.])
+ >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2)
+ 28.0
+
+ """
+ return pu._add(c1, c2)
+
+
+def polysub(c1, c2):
+ """
+ Subtract one polynomial from another.
+
+ Returns the difference of two polynomials `c1` - `c2`. The arguments
+ are sequences of coefficients from lowest order term to highest, i.e.,
+ [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of polynomial coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Of coefficients representing their difference.
+
+ See Also
+ --------
+ polyadd, polymulx, polymul, polydiv, polypow
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polynomial as P
+ >>> c1 = (1,2,3)
+ >>> c2 = (3,2,1)
+ >>> P.polysub(c1,c2)
+ array([-2., 0., 2.])
+ >>> P.polysub(c2,c1) # -P.polysub(c1,c2)
+ array([ 2., 0., -2.])
+
+ """
+ return pu._sub(c1, c2)
+
+
+def polymulx(c):
+ """Multiply a polynomial by x.
+
+ Multiply the polynomial `c` by x, where x is the independent
+ variable.
+
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of polynomial coefficients ordered from low to
+ high.
+
+ Returns
+ -------
+ out : ndarray
+ Array representing the result of the multiplication.
+
+ See Also
+ --------
+ polyadd, polysub, polymul, polydiv, polypow
+
+ Notes
+ -----
+
+ .. versionadded:: 1.5.0
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ # The zero series needs special treatment
+ if len(c) == 1 and c[0] == 0:
+ return c
+
+ prd = np.empty(len(c) + 1, dtype=c.dtype)
+ prd[0] = c[0]*0
+ prd[1:] = c
+ return prd
+
+
+def polymul(c1, c2):
+ """
+ Multiply one polynomial by another.
+
+ Returns the product of two polynomials `c1` * `c2`. The arguments are
+ sequences of coefficients, from lowest order term to highest, e.g.,
+ [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.``
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of coefficients representing a polynomial, relative to the
+ "standard" basis, and ordered from lowest order term to highest.
+
+ Returns
+ -------
+ out : ndarray
+ Of the coefficients of their product.
+
+ See Also
+ --------
+ polyadd, polysub, polymulx, polydiv, polypow
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polynomial as P
+ >>> c1 = (1,2,3)
+ >>> c2 = (3,2,1)
+ >>> P.polymul(c1,c2)
+ array([ 3., 8., 14., 8., 3.])
+
+ """
+ # c1, c2 are trimmed copies
+ [c1, c2] = pu.as_series([c1, c2])
+ ret = np.convolve(c1, c2)
+ return pu.trimseq(ret)
+
+
+def polydiv(c1, c2):
+ """
+ Divide one polynomial by another.
+
+ Returns the quotient-with-remainder of two polynomials `c1` / `c2`.
+ The arguments are sequences of coefficients, from lowest order term
+ to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``.
+
+ Parameters
+ ----------
+ c1, c2 : array_like
+ 1-D arrays of polynomial coefficients ordered from low to high.
+
+ Returns
+ -------
+ [quo, rem] : ndarrays
+ Of coefficient series representing the quotient and remainder.
+
+ See Also
+ --------
+ polyadd, polysub, polymulx, polymul, polypow
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polynomial as P
+ >>> c1 = (1,2,3)
+ >>> c2 = (3,2,1)
+ >>> P.polydiv(c1,c2)
+ (array([3.]), array([-8., -4.]))
+ >>> P.polydiv(c2,c1)
+ (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary
+
+ """
+ # c1, c2 are trimmed copies
+ [c1, c2] = pu.as_series([c1, c2])
+ if c2[-1] == 0:
+ raise ZeroDivisionError()
+
+ # note: this is more efficient than `pu._div(polymul, c1, c2)`
+ lc1 = len(c1)
+ lc2 = len(c2)
+ if lc1 < lc2:
+ return c1[:1]*0, c1
+ elif lc2 == 1:
+ return c1/c2[-1], c1[:1]*0
+ else:
+ dlen = lc1 - lc2
+ scl = c2[-1]
+ c2 = c2[:-1]/scl
+ i = dlen
+ j = lc1 - 1
+ while i >= 0:
+ c1[i:j] -= c2*c1[j]
+ i -= 1
+ j -= 1
+ return c1[j+1:]/scl, pu.trimseq(c1[:j+1])
+
+
+def polypow(c, pow, maxpower=None):
+ """Raise a polynomial to a power.
+
+ Returns the polynomial `c` raised to the power `pow`. The argument
+ `c` is a sequence of coefficients ordered from low to high. i.e.,
+ [1,2,3] is the series ``1 + 2*x + 3*x**2.``
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of array of series coefficients ordered from low to
+ high degree.
+ pow : integer
+ Power to which the series will be raised
+ maxpower : integer, optional
+ Maximum power allowed. This is mainly to limit growth of the series
+ to unmanageable size. Default is 16
+
+ Returns
+ -------
+ coef : ndarray
+ Power series of power.
+
+ See Also
+ --------
+ polyadd, polysub, polymulx, polymul, polydiv
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polynomial as P
+ >>> P.polypow([1,2,3], 2)
+ array([ 1., 4., 10., 12., 9.])
+
+ """
+ # note: this is more efficient than `pu._pow(polymul, c1, c2)`, as it
+ # avoids calling `as_series` repeatedly
+ return pu._pow(np.convolve, c, pow, maxpower)
+
+
+def polyder(c, m=1, scl=1, axis=0):
+ """
+ Differentiate a polynomial.
+
+ Returns the polynomial coefficients `c` differentiated `m` times along
+ `axis`. At each iteration the result is multiplied by `scl` (the
+ scaling factor is for use in a linear change of variable). The
+ argument `c` is an array of coefficients from low to high degree along
+ each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``
+ while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is
+ ``x`` and axis=1 is ``y``.
+
+ Parameters
+ ----------
+ c : array_like
+ Array of polynomial coefficients. If c is multidimensional the
+ different axis correspond to different variables with the degree
+ in each axis given by the corresponding index.
+ m : int, optional
+ Number of derivatives taken, must be non-negative. (Default: 1)
+ scl : scalar, optional
+ Each differentiation is multiplied by `scl`. The end result is
+ multiplication by ``scl**m``. This is for use in a linear change
+ of variable. (Default: 1)
+ axis : int, optional
+ Axis over which the derivative is taken. (Default: 0).
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ der : ndarray
+ Polynomial coefficients of the derivative.
+
+ See Also
+ --------
+ polyint
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polynomial as P
+ >>> c = (1,2,3,4) # 1 + 2x + 3x**2 + 4x**3
+ >>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2
+ array([ 2., 6., 12.])
+ >>> P.polyder(c,3) # (d**3/dx**3)(c) = 24
+ array([24.])
+ >>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2
+ array([ -2., -6., -12.])
+ >>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x
+ array([ 6., 24.])
+
+ """
+ c = np.array(c, ndmin=1, copy=True)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ # astype fails with NA
+ c = c + 0.0
+ cdt = c.dtype
+ cnt = pu._deprecate_as_int(m, "the order of derivation")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
+ if cnt < 0:
+ raise ValueError("The order of derivation must be non-negative")
+ iaxis = normalize_axis_index(iaxis, c.ndim)
+
+ if cnt == 0:
+ return c
+
+ c = np.moveaxis(c, iaxis, 0)
+ n = len(c)
+ if cnt >= n:
+ c = c[:1]*0
+ else:
+ for i in range(cnt):
+ n = n - 1
+ c *= scl
+ der = np.empty((n,) + c.shape[1:], dtype=cdt)
+ for j in range(n, 0, -1):
+ der[j - 1] = j*c[j]
+ c = der
+ c = np.moveaxis(c, 0, iaxis)
+ return c
+
+
+def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
+ """
+ Integrate a polynomial.
+
+ Returns the polynomial coefficients `c` integrated `m` times from
+ `lbnd` along `axis`. At each iteration the resulting series is
+ **multiplied** by `scl` and an integration constant, `k`, is added.
+ The scaling factor is for use in a linear change of variable. ("Buyer
+ beware": note that, depending on what one is doing, one may want `scl`
+ to be the reciprocal of what one might expect; for more information,
+ see the Notes section below.) The argument `c` is an array of
+ coefficients, from low to high degree along each axis, e.g., [1,2,3]
+ represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]]
+ represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is
+ ``y``.
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of polynomial coefficients, ordered from low to high.
+ m : int, optional
+ Order of integration, must be positive. (Default: 1)
+ k : {[], list, scalar}, optional
+ Integration constant(s). The value of the first integral at zero
+ is the first value in the list, the value of the second integral
+ at zero is the second value, etc. If ``k == []`` (the default),
+ all constants are set to zero. If ``m == 1``, a single scalar can
+ be given instead of a list.
+ lbnd : scalar, optional
+ The lower bound of the integral. (Default: 0)
+ scl : scalar, optional
+ Following each integration the result is *multiplied* by `scl`
+ before the integration constant is added. (Default: 1)
+ axis : int, optional
+ Axis over which the integral is taken. (Default: 0).
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ S : ndarray
+ Coefficient array of the integral.
+
+ Raises
+ ------
+ ValueError
+ If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
+ ``np.ndim(scl) != 0``.
+
+ See Also
+ --------
+ polyder
+
+ Notes
+ -----
+ Note that the result of each integration is *multiplied* by `scl`. Why
+ is this important to note? Say one is making a linear change of
+ variable :math:`u = ax + b` in an integral relative to `x`. Then
+ :math:`dx = du/a`, so one will need to set `scl` equal to
+ :math:`1/a` - perhaps not what one would have first thought.
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polynomial as P
+ >>> c = (1,2,3)
+ >>> P.polyint(c) # should return array([0, 1, 1, 1])
+ array([0., 1., 1., 1.])
+ >>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20])
+ array([ 0. , 0. , 0. , 0.16666667, 0.08333333, # may vary
+ 0.05 ])
+ >>> P.polyint(c,k=3) # should return array([3, 1, 1, 1])
+ array([3., 1., 1., 1.])
+ >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1])
+ array([6., 1., 1., 1.])
+ >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2])
+ array([ 0., -2., -2., -2.])
+
+ """
+ c = np.array(c, ndmin=1, copy=True)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ # astype doesn't preserve mask attribute.
+ c = c + 0.0
+ cdt = c.dtype
+ if not np.iterable(k):
+ k = [k]
+ cnt = pu._deprecate_as_int(m, "the order of integration")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
+ if cnt < 0:
+ raise ValueError("The order of integration must be non-negative")
+ if len(k) > cnt:
+ raise ValueError("Too many integration constants")
+ if np.ndim(lbnd) != 0:
+ raise ValueError("lbnd must be a scalar.")
+ if np.ndim(scl) != 0:
+ raise ValueError("scl must be a scalar.")
+ iaxis = normalize_axis_index(iaxis, c.ndim)
+
+ if cnt == 0:
+ return c
+
+ k = list(k) + [0]*(cnt - len(k))
+ c = np.moveaxis(c, iaxis, 0)
+ for i in range(cnt):
+ n = len(c)
+ c *= scl
+ if n == 1 and np.all(c[0] == 0):
+ c[0] += k[i]
+ else:
+ tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt)
+ tmp[0] = c[0]*0
+ tmp[1] = c[0]
+ for j in range(1, n):
+ tmp[j + 1] = c[j]/(j + 1)
+ tmp[0] += k[i] - polyval(lbnd, tmp)
+ c = tmp
+ c = np.moveaxis(c, 0, iaxis)
+ return c
+
+
+def polyval(x, c, tensor=True):
+ """
+ Evaluate a polynomial at points x.
+
+ If `c` is of length `n + 1`, this function returns the value
+
+ .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n
+
+ The parameter `x` is converted to an array only if it is a tuple or a
+ list, otherwise it is treated as a scalar. In either case, either `x`
+ or its elements must support multiplication and addition both with
+ themselves and with the elements of `c`.
+
+ If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
+ `c` is multidimensional, then the shape of the result depends on the
+ value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
+ x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
+ scalars have shape (,).
+
+ Trailing zeros in the coefficients will be used in the evaluation, so
+ they should be avoided if efficiency is a concern.
+
+ Parameters
+ ----------
+ x : array_like, compatible object
+ If `x` is a list or tuple, it is converted to an ndarray, otherwise
+ it is left unchanged and treated as a scalar. In either case, `x`
+ or its elements must support addition and multiplication with
+ with themselves and with the elements of `c`.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree n are contained in c[n]. If `c` is multidimensional the
+ remaining indices enumerate multiple polynomials. In the two
+ dimensional case the coefficients may be thought of as stored in
+ the columns of `c`.
+ tensor : boolean, optional
+ If True, the shape of the coefficient array is extended with ones
+ on the right, one for each dimension of `x`. Scalars have dimension 0
+ for this action. The result is that every column of coefficients in
+ `c` is evaluated for every element of `x`. If False, `x` is broadcast
+ over the columns of `c` for the evaluation. This keyword is useful
+ when `c` is multidimensional. The default value is True.
+
+ .. versionadded:: 1.7.0
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The shape of the returned array is described above.
+
+ See Also
+ --------
+ polyval2d, polygrid2d, polyval3d, polygrid3d
+
+ Notes
+ -----
+ The evaluation uses Horner's method.
+
+ Examples
+ --------
+ >>> from numpy.polynomial.polynomial import polyval
+ >>> polyval(1, [1,2,3])
+ 6.0
+ >>> a = np.arange(4).reshape(2,2)
+ >>> a
+ array([[0, 1],
+ [2, 3]])
+ >>> polyval(a, [1,2,3])
+ array([[ 1., 6.],
+ [17., 34.]])
+ >>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients
+ >>> coef
+ array([[0, 1],
+ [2, 3]])
+ >>> polyval([1,2], coef, tensor=True)
+ array([[2., 4.],
+ [4., 7.]])
+ >>> polyval([1,2], coef, tensor=False)
+ array([2., 7.])
+
+ """
+ c = np.array(c, ndmin=1, copy=False)
+ if c.dtype.char in '?bBhHiIlLqQpP':
+ # astype fails with NA
+ c = c + 0.0
+ if isinstance(x, (tuple, list)):
+ x = np.asarray(x)
+ if isinstance(x, np.ndarray) and tensor:
+ c = c.reshape(c.shape + (1,)*x.ndim)
+
+ c0 = c[-1] + x*0
+ for i in range(2, len(c) + 1):
+ c0 = c[-i] + c0*x
+ return c0
+
+
+def polyvalfromroots(x, r, tensor=True):
+ """
+ Evaluate a polynomial specified by its roots at points x.
+
+ If `r` is of length `N`, this function returns the value
+
+ .. math:: p(x) = \\prod_{n=1}^{N} (x - r_n)
+
+ The parameter `x` is converted to an array only if it is a tuple or a
+ list, otherwise it is treated as a scalar. In either case, either `x`
+ or its elements must support multiplication and addition both with
+ themselves and with the elements of `r`.
+
+ If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If `r`
+ is multidimensional, then the shape of the result depends on the value of
+ `tensor`. If `tensor` is ``True`` the shape will be r.shape[1:] + x.shape;
+ that is, each polynomial is evaluated at every value of `x`. If `tensor` is
+ ``False``, the shape will be r.shape[1:]; that is, each polynomial is
+ evaluated only for the corresponding broadcast value of `x`. Note that
+ scalars have shape (,).
+
+ .. versionadded:: 1.12
+
+ Parameters
+ ----------
+ x : array_like, compatible object
+ If `x` is a list or tuple, it is converted to an ndarray, otherwise
+ it is left unchanged and treated as a scalar. In either case, `x`
+ or its elements must support addition and multiplication with
+ with themselves and with the elements of `r`.
+ r : array_like
+ Array of roots. If `r` is multidimensional the first index is the
+ root index, while the remaining indices enumerate multiple
+ polynomials. For instance, in the two dimensional case the roots
+ of each polynomial may be thought of as stored in the columns of `r`.
+ tensor : boolean, optional
+ If True, the shape of the roots array is extended with ones on the
+ right, one for each dimension of `x`. Scalars have dimension 0 for this
+ action. The result is that every column of coefficients in `r` is
+ evaluated for every element of `x`. If False, `x` is broadcast over the
+ columns of `r` for the evaluation. This keyword is useful when `r` is
+ multidimensional. The default value is True.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The shape of the returned array is described above.
+
+ See Also
+ --------
+ polyroots, polyfromroots, polyval
+
+ Examples
+ --------
+ >>> from numpy.polynomial.polynomial import polyvalfromroots
+ >>> polyvalfromroots(1, [1,2,3])
+ 0.0
+ >>> a = np.arange(4).reshape(2,2)
+ >>> a
+ array([[0, 1],
+ [2, 3]])
+ >>> polyvalfromroots(a, [-1, 0, 1])
+ array([[-0., 0.],
+ [ 6., 24.]])
+ >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients
+ >>> r # each column of r defines one polynomial
+ array([[-2, -1],
+ [ 0, 1]])
+ >>> b = [-2, 1]
+ >>> polyvalfromroots(b, r, tensor=True)
+ array([[-0., 3.],
+ [ 3., 0.]])
+ >>> polyvalfromroots(b, r, tensor=False)
+ array([-0., 0.])
+ """
+ r = np.array(r, ndmin=1, copy=False)
+ if r.dtype.char in '?bBhHiIlLqQpP':
+ r = r.astype(np.double)
+ if isinstance(x, (tuple, list)):
+ x = np.asarray(x)
+ if isinstance(x, np.ndarray):
+ if tensor:
+ r = r.reshape(r.shape + (1,)*x.ndim)
+ elif x.ndim >= r.ndim:
+ raise ValueError("x.ndim must be < r.ndim when tensor == False")
+ return np.prod(x - r, axis=0)
+
+
+def polyval2d(x, y, c):
+ """
+ Evaluate a 2-D polynomial at points (x, y).
+
+ This function returns the value
+
+ .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j
+
+ The parameters `x` and `y` are converted to arrays only if they are
+ tuples or a lists, otherwise they are treated as a scalars and they
+ must have the same shape after conversion. In either case, either `x`
+ and `y` or their elements must support multiplication and addition both
+ with themselves and with the elements of `c`.
+
+ If `c` has fewer than two dimensions, ones are implicitly appended to
+ its shape to make it 2-D. The shape of the result will be c.shape[2:] +
+ x.shape.
+
+ Parameters
+ ----------
+ x, y : array_like, compatible objects
+ The two dimensional series is evaluated at the points `(x, y)`,
+ where `x` and `y` must have the same shape. If `x` or `y` is a list
+ or tuple, it is first converted to an ndarray, otherwise it is left
+ unchanged and, if it isn't an ndarray, it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term
+ of multi-degree i,j is contained in `c[i,j]`. If `c` has
+ dimension greater than two the remaining indices enumerate multiple
+ sets of coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional polynomial at points formed with
+ pairs of corresponding values from `x` and `y`.
+
+ See Also
+ --------
+ polyval, polygrid2d, polyval3d, polygrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._valnd(polyval, c, x, y)
+
+
+def polygrid2d(x, y, c):
+ """
+ Evaluate a 2-D polynomial on the Cartesian product of x and y.
+
+ This function returns the values:
+
+ .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j
+
+ where the points `(a, b)` consist of all pairs formed by taking
+ `a` from `x` and `b` from `y`. The resulting points form a grid with
+ `x` in the first dimension and `y` in the second.
+
+ The parameters `x` and `y` are converted to arrays only if they are
+ tuples or a lists, otherwise they are treated as a scalars. In either
+ case, either `x` and `y` or their elements must support multiplication
+ and addition both with themselves and with the elements of `c`.
+
+ If `c` has fewer than two dimensions, ones are implicitly appended to
+ its shape to make it 2-D. The shape of the result will be c.shape[2:] +
+ x.shape + y.shape.
+
+ Parameters
+ ----------
+ x, y : array_like, compatible objects
+ The two dimensional series is evaluated at the points in the
+ Cartesian product of `x` and `y`. If `x` or `y` is a list or
+ tuple, it is first converted to an ndarray, otherwise it is left
+ unchanged and, if it isn't an ndarray, it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree i,j are contained in ``c[i,j]``. If `c` has dimension
+ greater than two the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional polynomial at points in the Cartesian
+ product of `x` and `y`.
+
+ See Also
+ --------
+ polyval, polyval2d, polyval3d, polygrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._gridnd(polyval, c, x, y)
+
+
+def polyval3d(x, y, z, c):
+ """
+ Evaluate a 3-D polynomial at points (x, y, z).
+
+ This function returns the values:
+
+ .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k
+
+ The parameters `x`, `y`, and `z` are converted to arrays only if
+ they are tuples or a lists, otherwise they are treated as a scalars and
+ they must have the same shape after conversion. In either case, either
+ `x`, `y`, and `z` or their elements must support multiplication and
+ addition both with themselves and with the elements of `c`.
+
+ If `c` has fewer than 3 dimensions, ones are implicitly appended to its
+ shape to make it 3-D. The shape of the result will be c.shape[3:] +
+ x.shape.
+
+ Parameters
+ ----------
+ x, y, z : array_like, compatible object
+ The three dimensional series is evaluated at the points
+ `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
+ any of `x`, `y`, or `z` is a list or tuple, it is first converted
+ to an ndarray, otherwise it is left unchanged and if it isn't an
+ ndarray it is treated as a scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficient of the term of
+ multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
+ greater than 3 the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the multidimensional polynomial on points formed with
+ triples of corresponding values from `x`, `y`, and `z`.
+
+ See Also
+ --------
+ polyval, polyval2d, polygrid2d, polygrid3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._valnd(polyval, c, x, y, z)
+
+
+def polygrid3d(x, y, z, c):
+ """
+ Evaluate a 3-D polynomial on the Cartesian product of x, y and z.
+
+ This function returns the values:
+
+ .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k
+
+ where the points `(a, b, c)` consist of all triples formed by taking
+ `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
+ a grid with `x` in the first dimension, `y` in the second, and `z` in
+ the third.
+
+ The parameters `x`, `y`, and `z` are converted to arrays only if they
+ are tuples or a lists, otherwise they are treated as a scalars. In
+ either case, either `x`, `y`, and `z` or their elements must support
+ multiplication and addition both with themselves and with the elements
+ of `c`.
+
+ If `c` has fewer than three dimensions, ones are implicitly appended to
+ its shape to make it 3-D. The shape of the result will be c.shape[3:] +
+ x.shape + y.shape + z.shape.
+
+ Parameters
+ ----------
+ x, y, z : array_like, compatible objects
+ The three dimensional series is evaluated at the points in the
+ Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
+ list or tuple, it is first converted to an ndarray, otherwise it is
+ left unchanged and, if it isn't an ndarray, it is treated as a
+ scalar.
+ c : array_like
+ Array of coefficients ordered so that the coefficients for terms of
+ degree i,j are contained in ``c[i,j]``. If `c` has dimension
+ greater than two the remaining indices enumerate multiple sets of
+ coefficients.
+
+ Returns
+ -------
+ values : ndarray, compatible object
+ The values of the two dimensional polynomial at points in the Cartesian
+ product of `x` and `y`.
+
+ See Also
+ --------
+ polyval, polyval2d, polygrid2d, polyval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._gridnd(polyval, c, x, y, z)
+
+
+def polyvander(x, deg):
+ """Vandermonde matrix of given degree.
+
+ Returns the Vandermonde matrix of degree `deg` and sample points
+ `x`. The Vandermonde matrix is defined by
+
+ .. math:: V[..., i] = x^i,
+
+ where `0 <= i <= deg`. The leading indices of `V` index the elements of
+ `x` and the last index is the power of `x`.
+
+ If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
+ matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and
+ ``polyval(x, c)`` are the same up to roundoff. This equivalence is
+ useful both for least squares fitting and for the evaluation of a large
+ number of polynomials of the same degree and sample points.
+
+ Parameters
+ ----------
+ x : array_like
+ Array of points. The dtype is converted to float64 or complex128
+ depending on whether any of the elements are complex. If `x` is
+ scalar it is converted to a 1-D array.
+ deg : int
+ Degree of the resulting matrix.
+
+ Returns
+ -------
+ vander : ndarray.
+ The Vandermonde matrix. The shape of the returned matrix is
+ ``x.shape + (deg + 1,)``, where the last index is the power of `x`.
+ The dtype will be the same as the converted `x`.
+
+ See Also
+ --------
+ polyvander2d, polyvander3d
+
+ """
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg < 0:
+ raise ValueError("deg must be non-negative")
+
+ x = np.array(x, copy=False, ndmin=1) + 0.0
+ dims = (ideg + 1,) + x.shape
+ dtyp = x.dtype
+ v = np.empty(dims, dtype=dtyp)
+ v[0] = x*0 + 1
+ if ideg > 0:
+ v[1] = x
+ for i in range(2, ideg + 1):
+ v[i] = v[i-1]*x
+ return np.moveaxis(v, 0, -1)
+
+
+def polyvander2d(x, y, deg):
+ """Pseudo-Vandermonde matrix of given degrees.
+
+ Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
+ points `(x, y)`. The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., (deg[1] + 1)*i + j] = x^i * y^j,
+
+ where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
+ `V` index the points `(x, y)` and the last index encodes the powers of
+ `x` and `y`.
+
+ If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
+ correspond to the elements of a 2-D coefficient array `c` of shape
+ (xdeg + 1, ydeg + 1) in the order
+
+ .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
+
+ and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same
+ up to roundoff. This equivalence is useful both for least squares
+ fitting and for the evaluation of a large number of 2-D polynomials
+ of the same degrees and sample points.
+
+ Parameters
+ ----------
+ x, y : array_like
+ Arrays of point coordinates, all of the same shape. The dtypes
+ will be converted to either float64 or complex128 depending on
+ whether any of the elements are complex. Scalars are converted to
+ 1-D arrays.
+ deg : list of ints
+ List of maximum degrees of the form [x_deg, y_deg].
+
+ Returns
+ -------
+ vander2d : ndarray
+ The shape of the returned matrix is ``x.shape + (order,)``, where
+ :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
+ as the converted `x` and `y`.
+
+ See Also
+ --------
+ polyvander, polyvander3d, polyval2d, polyval3d
+
+ """
+ return pu._vander_nd_flat((polyvander, polyvander), (x, y), deg)
+
+
+def polyvander3d(x, y, z, deg):
+ """Pseudo-Vandermonde matrix of given degrees.
+
+ Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
+ points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
+ then The pseudo-Vandermonde matrix is defined by
+
+ .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k,
+
+ where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
+ indices of `V` index the points `(x, y, z)` and the last index encodes
+ the powers of `x`, `y`, and `z`.
+
+ If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
+ of `V` correspond to the elements of a 3-D coefficient array `c` of
+ shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
+
+ .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
+
+ and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the
+ same up to roundoff. This equivalence is useful both for least squares
+ fitting and for the evaluation of a large number of 3-D polynomials
+ of the same degrees and sample points.
+
+ Parameters
+ ----------
+ x, y, z : array_like
+ Arrays of point coordinates, all of the same shape. The dtypes will
+ be converted to either float64 or complex128 depending on whether
+ any of the elements are complex. Scalars are converted to 1-D
+ arrays.
+ deg : list of ints
+ List of maximum degrees of the form [x_deg, y_deg, z_deg].
+
+ Returns
+ -------
+ vander3d : ndarray
+ The shape of the returned matrix is ``x.shape + (order,)``, where
+ :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
+ be the same as the converted `x`, `y`, and `z`.
+
+ See Also
+ --------
+ polyvander, polyvander3d, polyval2d, polyval3d
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ return pu._vander_nd_flat((polyvander, polyvander, polyvander), (x, y, z), deg)
+
+
+def polyfit(x, y, deg, rcond=None, full=False, w=None):
+ """
+ Least-squares fit of a polynomial to data.
+
+ Return the coefficients of a polynomial of degree `deg` that is the
+ least squares fit to the data values `y` given at points `x`. If `y` is
+ 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
+ fits are done, one for each column of `y`, and the resulting
+ coefficients are stored in the corresponding columns of a 2-D return.
+ The fitted polynomial(s) are in the form
+
+ .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n,
+
+ where `n` is `deg`.
+
+ Parameters
+ ----------
+ x : array_like, shape (`M`,)
+ x-coordinates of the `M` sample (data) points ``(x[i], y[i])``.
+ y : array_like, shape (`M`,) or (`M`, `K`)
+ y-coordinates of the sample points. Several sets of sample points
+ sharing the same x-coordinates can be (independently) fit with one
+ call to `polyfit` by passing in for `y` a 2-D array that contains
+ one data set per column.
+ deg : int or 1-D array_like
+ Degree(s) of the fitting polynomials. If `deg` is a single integer
+ all terms up to and including the `deg`'th term are included in the
+ fit. For NumPy versions >= 1.11.0 a list of integers specifying the
+ degrees of the terms to include may be used instead.
+ rcond : float, optional
+ Relative condition number of the fit. Singular values smaller
+ than `rcond`, relative to the largest singular value, will be
+ ignored. The default value is ``len(x)*eps``, where `eps` is the
+ relative precision of the platform's float type, about 2e-16 in
+ most cases.
+ full : bool, optional
+ Switch determining the nature of the return value. When ``False``
+ (the default) just the coefficients are returned; when ``True``,
+ diagnostic information from the singular value decomposition (used
+ to solve the fit's matrix equation) is also returned.
+ w : array_like, shape (`M`,), optional
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
+ same variance. When using inverse-variance weighting, use
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
+
+ .. versionadded:: 1.5.0
+
+ Returns
+ -------
+ coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`)
+ Polynomial coefficients ordered from low to high. If `y` was 2-D,
+ the coefficients in column `k` of `coef` represent the polynomial
+ fit to the data in `y`'s `k`-th column.
+
+ [residuals, rank, singular_values, rcond] : list
+ These values are only returned if ``full == True``
+
+ - residuals -- sum of squared residuals of the least squares fit
+ - rank -- the numerical rank of the scaled Vandermonde matrix
+ - singular_values -- singular values of the scaled Vandermonde matrix
+ - rcond -- value of `rcond`.
+
+ For more details, see `numpy.linalg.lstsq`.
+
+ Raises
+ ------
+ RankWarning
+ Raised if the matrix in the least-squares fit is rank deficient.
+ The warning is only raised if ``full == False``. The warnings can
+ be turned off by:
+
+ >>> import warnings
+ >>> warnings.simplefilter('ignore', np.RankWarning)
+
+ See Also
+ --------
+ numpy.polynomial.chebyshev.chebfit
+ numpy.polynomial.legendre.legfit
+ numpy.polynomial.laguerre.lagfit
+ numpy.polynomial.hermite.hermfit
+ numpy.polynomial.hermite_e.hermefit
+ polyval : Evaluates a polynomial.
+ polyvander : Vandermonde matrix for powers.
+ numpy.linalg.lstsq : Computes a least-squares fit from the matrix.
+ scipy.interpolate.UnivariateSpline : Computes spline fits.
+
+ Notes
+ -----
+ The solution is the coefficients of the polynomial `p` that minimizes
+ the sum of the weighted squared errors
+
+ .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
+
+ where the :math:`w_j` are the weights. This problem is solved by
+ setting up the (typically) over-determined matrix equation:
+
+ .. math:: V(x) * c = w * y,
+
+ where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
+ coefficients to be solved for, `w` are the weights, and `y` are the
+ observed values. This equation is then solved using the singular value
+ decomposition of `V`.
+
+ If some of the singular values of `V` are so small that they are
+ neglected (and `full` == ``False``), a `RankWarning` will be raised.
+ This means that the coefficient values may be poorly determined.
+ Fitting to a lower order polynomial will usually get rid of the warning
+ (but may not be what you want, of course; if you have independent
+ reason(s) for choosing the degree which isn't working, you may have to:
+ a) reconsider those reasons, and/or b) reconsider the quality of your
+ data). The `rcond` parameter can also be set to a value smaller than
+ its default, but the resulting fit may be spurious and have large
+ contributions from roundoff error.
+
+ Polynomial fits using double precision tend to "fail" at about
+ (polynomial) degree 20. Fits using Chebyshev or Legendre series are
+ generally better conditioned, but much can still depend on the
+ distribution of the sample points and the smoothness of the data. If
+ the quality of the fit is inadequate, splines may be a good
+ alternative.
+
+ Examples
+ --------
+ >>> np.random.seed(123)
+ >>> from numpy.polynomial import polynomial as P
+ >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1]
+ >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + Gaussian noise
+ >>> c, stats = P.polyfit(x,y,3,full=True)
+ >>> np.random.seed(123)
+ >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1
+ array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) # may vary
+ >>> stats # note the large SSR, explaining the rather poor results
+ [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, # may vary
+ 0.28853036]), 1.1324274851176597e-014]
+
+ Same thing without the added noise
+
+ >>> y = x**3 - x
+ >>> c, stats = P.polyfit(x,y,3,full=True)
+ >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1
+ array([-6.36925336e-18, -1.00000000e+00, -4.08053781e-16, 1.00000000e+00])
+ >>> stats # note the minuscule SSR
+ [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, # may vary
+ 0.50443316, 0.28853036]), 1.1324274851176597e-014]
+
+ """
+ return pu._fit(polyvander, x, y, deg, rcond, full, w)
+
+
+def polycompanion(c):
+ """
+ Return the companion matrix of c.
+
+ The companion matrix for power series cannot be made symmetric by
+ scaling the basis, so this function differs from those for the
+ orthogonal polynomials.
+
+ Parameters
+ ----------
+ c : array_like
+ 1-D array of polynomial coefficients ordered from low to high
+ degree.
+
+ Returns
+ -------
+ mat : ndarray
+ Companion matrix of dimensions (deg, deg).
+
+ Notes
+ -----
+
+ .. versionadded:: 1.7.0
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ if len(c) < 2:
+ raise ValueError('Series must have maximum degree of at least 1.')
+ if len(c) == 2:
+ return np.array([[-c[0]/c[1]]])
+
+ n = len(c) - 1
+ mat = np.zeros((n, n), dtype=c.dtype)
+ bot = mat.reshape(-1)[n::n+1]
+ bot[...] = 1
+ mat[:, -1] -= c[:-1]/c[-1]
+ return mat
+
+
+def polyroots(c):
+ """
+ Compute the roots of a polynomial.
+
+ Return the roots (a.k.a. "zeros") of the polynomial
+
+ .. math:: p(x) = \\sum_i c[i] * x^i.
+
+ Parameters
+ ----------
+ c : 1-D array_like
+ 1-D array of polynomial coefficients.
+
+ Returns
+ -------
+ out : ndarray
+ Array of the roots of the polynomial. If all the roots are real,
+ then `out` is also real, otherwise it is complex.
+
+ See Also
+ --------
+ numpy.polynomial.chebyshev.chebroots
+ numpy.polynomial.legendre.legroots
+ numpy.polynomial.laguerre.lagroots
+ numpy.polynomial.hermite.hermroots
+ numpy.polynomial.hermite_e.hermeroots
+
+ Notes
+ -----
+ The root estimates are obtained as the eigenvalues of the companion
+ matrix, Roots far from the origin of the complex plane may have large
+ errors due to the numerical instability of the power series for such
+ values. Roots with multiplicity greater than 1 will also show larger
+ errors as the value of the series near such points is relatively
+ insensitive to errors in the roots. Isolated roots near the origin can
+ be improved by a few iterations of Newton's method.
+
+ Examples
+ --------
+ >>> import numpy.polynomial.polynomial as poly
+ >>> poly.polyroots(poly.polyfromroots((-1,0,1)))
+ array([-1., 0., 1.])
+ >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype
+ dtype('float64')
+ >>> j = complex(0,1)
+ >>> poly.polyroots(poly.polyfromroots((-j,0,j)))
+ array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) # may vary
+
+ """
+ # c is a trimmed copy
+ [c] = pu.as_series([c])
+ if len(c) < 2:
+ return np.array([], dtype=c.dtype)
+ if len(c) == 2:
+ return np.array([-c[0]/c[1]])
+
+ # rotated companion matrix reduces error
+ m = polycompanion(c)[::-1,::-1]
+ r = la.eigvals(m)
+ r.sort()
+ return r
+
+
+#
+# polynomial class
+#
+
+class Polynomial(ABCPolyBase):
+ """A power series class.
+
+ The Polynomial class provides the standard Python numerical methods
+ '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
+ attributes and methods listed in the `ABCPolyBase` documentation.
+
+ Parameters
+ ----------
+ coef : array_like
+ Polynomial coefficients in order of increasing degree, i.e.,
+ ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``.
+ domain : (2,) array_like, optional
+ Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
+ to the interval ``[window[0], window[1]]`` by shifting and scaling.
+ The default value is [-1, 1].
+ window : (2,) array_like, optional
+ Window, see `domain` for its use. The default value is [-1, 1].
+
+ .. versionadded:: 1.6.0
+
+ """
+ # Virtual Functions
+ _add = staticmethod(polyadd)
+ _sub = staticmethod(polysub)
+ _mul = staticmethod(polymul)
+ _div = staticmethod(polydiv)
+ _pow = staticmethod(polypow)
+ _val = staticmethod(polyval)
+ _int = staticmethod(polyint)
+ _der = staticmethod(polyder)
+ _fit = staticmethod(polyfit)
+ _line = staticmethod(polyline)
+ _roots = staticmethod(polyroots)
+ _fromroots = staticmethod(polyfromroots)
+
+ # Virtual properties
+ domain = np.array(polydomain)
+ window = np.array(polydomain)
+ basis_name = None
+
+ @classmethod
+ def _str_term_unicode(cls, i, arg_str):
+ if i == '1':
+ return f"·{arg_str}"
+ else:
+ return f"·{arg_str}{i.translate(cls._superscript_mapping)}"
+
+ @staticmethod
+ def _str_term_ascii(i, arg_str):
+ if i == '1':
+ return f" {arg_str}"
+ else:
+ return f" {arg_str}**{i}"
+
+ @staticmethod
+ def _repr_latex_term(i, arg_str, needs_parens):
+ if needs_parens:
+ arg_str = rf"\left({arg_str}\right)"
+ if i == 0:
+ return '1'
+ elif i == 1:
+ return arg_str
+ else:
+ return f"{arg_str}^{{{i}}}"
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/polynomial.pyi b/venv/lib/python3.9/site-packages/numpy/polynomial/polynomial.pyi
new file mode 100644
index 00000000..3c87f9d2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/polynomial.pyi
@@ -0,0 +1,41 @@
+from typing import Any
+
+from numpy import ndarray, dtype, int_
+from numpy.polynomial._polybase import ABCPolyBase
+from numpy.polynomial.polyutils import trimcoef
+
+__all__: list[str]
+
+polytrim = trimcoef
+
+polydomain: ndarray[Any, dtype[int_]]
+polyzero: ndarray[Any, dtype[int_]]
+polyone: ndarray[Any, dtype[int_]]
+polyx: ndarray[Any, dtype[int_]]
+
+def polyline(off, scl): ...
+def polyfromroots(roots): ...
+def polyadd(c1, c2): ...
+def polysub(c1, c2): ...
+def polymulx(c): ...
+def polymul(c1, c2): ...
+def polydiv(c1, c2): ...
+def polypow(c, pow, maxpower=...): ...
+def polyder(c, m=..., scl=..., axis=...): ...
+def polyint(c, m=..., k=..., lbnd=..., scl=..., axis=...): ...
+def polyval(x, c, tensor=...): ...
+def polyvalfromroots(x, r, tensor=...): ...
+def polyval2d(x, y, c): ...
+def polygrid2d(x, y, c): ...
+def polyval3d(x, y, z, c): ...
+def polygrid3d(x, y, z, c): ...
+def polyvander(x, deg): ...
+def polyvander2d(x, y, deg): ...
+def polyvander3d(x, y, z, deg): ...
+def polyfit(x, y, deg, rcond=..., full=..., w=...): ...
+def polyroots(c): ...
+
+class Polynomial(ABCPolyBase):
+ domain: Any
+ window: Any
+ basis_name: Any
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/polyutils.py b/venv/lib/python3.9/site-packages/numpy/polynomial/polyutils.py
new file mode 100644
index 00000000..48291389
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/polyutils.py
@@ -0,0 +1,789 @@
+"""
+Utility classes and functions for the polynomial modules.
+
+This module provides: error and warning objects; a polynomial base class;
+and some routines used in both the `polynomial` and `chebyshev` modules.
+
+Warning objects
+---------------
+
+.. autosummary::
+ :toctree: generated/
+
+ RankWarning raised in least-squares fit for rank-deficient matrix.
+
+Functions
+---------
+
+.. autosummary::
+ :toctree: generated/
+
+ as_series convert list of array_likes into 1-D arrays of common type.
+ trimseq remove trailing zeros.
+ trimcoef remove small trailing coefficients.
+ getdomain return the domain appropriate for a given set of abscissae.
+ mapdomain maps points between domains.
+ mapparms parameters of the linear map between domains.
+
+"""
+import operator
+import functools
+import warnings
+
+import numpy as np
+
+from numpy.core.multiarray import dragon4_positional, dragon4_scientific
+from numpy.core.umath import absolute
+
+__all__ = [
+ 'RankWarning', 'as_series', 'trimseq',
+ 'trimcoef', 'getdomain', 'mapdomain', 'mapparms',
+ 'format_float']
+
+#
+# Warnings and Exceptions
+#
+
+class RankWarning(UserWarning):
+ """Issued by chebfit when the design matrix is rank deficient."""
+ pass
+
+#
+# Helper functions to convert inputs to 1-D arrays
+#
+def trimseq(seq):
+ """Remove small Poly series coefficients.
+
+ Parameters
+ ----------
+ seq : sequence
+ Sequence of Poly series coefficients. This routine fails for
+ empty sequences.
+
+ Returns
+ -------
+ series : sequence
+ Subsequence with trailing zeros removed. If the resulting sequence
+ would be empty, return the first element. The returned sequence may
+ or may not be a view.
+
+ Notes
+ -----
+ Do not lose the type info if the sequence contains unknown objects.
+
+ """
+ if len(seq) == 0:
+ return seq
+ else:
+ for i in range(len(seq) - 1, -1, -1):
+ if seq[i] != 0:
+ break
+ return seq[:i+1]
+
+
+def as_series(alist, trim=True):
+ """
+ Return argument as a list of 1-d arrays.
+
+ The returned list contains array(s) of dtype double, complex double, or
+ object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of
+ size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays
+ of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array
+ raises a Value Error if it is not first reshaped into either a 1-d or 2-d
+ array.
+
+ Parameters
+ ----------
+ alist : array_like
+ A 1- or 2-d array_like
+ trim : boolean, optional
+ When True, trailing zeros are removed from the inputs.
+ When False, the inputs are passed through intact.
+
+ Returns
+ -------
+ [a1, a2,...] : list of 1-D arrays
+ A copy of the input data as a list of 1-d arrays.
+
+ Raises
+ ------
+ ValueError
+ Raised when `as_series` cannot convert its input to 1-d arrays, or at
+ least one of the resulting arrays is empty.
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polyutils as pu
+ >>> a = np.arange(4)
+ >>> pu.as_series(a)
+ [array([0.]), array([1.]), array([2.]), array([3.])]
+ >>> b = np.arange(6).reshape((2,3))
+ >>> pu.as_series(b)
+ [array([0., 1., 2.]), array([3., 4., 5.])]
+
+ >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16)))
+ [array([1.]), array([0., 1., 2.]), array([0., 1.])]
+
+ >>> pu.as_series([2, [1.1, 0.]])
+ [array([2.]), array([1.1])]
+
+ >>> pu.as_series([2, [1.1, 0.]], trim=False)
+ [array([2.]), array([1.1, 0. ])]
+
+ """
+ arrays = [np.array(a, ndmin=1, copy=False) for a in alist]
+ if min([a.size for a in arrays]) == 0:
+ raise ValueError("Coefficient array is empty")
+ if any(a.ndim != 1 for a in arrays):
+ raise ValueError("Coefficient array is not 1-d")
+ if trim:
+ arrays = [trimseq(a) for a in arrays]
+
+ if any(a.dtype == np.dtype(object) for a in arrays):
+ ret = []
+ for a in arrays:
+ if a.dtype != np.dtype(object):
+ tmp = np.empty(len(a), dtype=np.dtype(object))
+ tmp[:] = a[:]
+ ret.append(tmp)
+ else:
+ ret.append(a.copy())
+ else:
+ try:
+ dtype = np.common_type(*arrays)
+ except Exception as e:
+ raise ValueError("Coefficient arrays have no common type") from e
+ ret = [np.array(a, copy=True, dtype=dtype) for a in arrays]
+ return ret
+
+
+def trimcoef(c, tol=0):
+ """
+ Remove "small" "trailing" coefficients from a polynomial.
+
+ "Small" means "small in absolute value" and is controlled by the
+ parameter `tol`; "trailing" means highest order coefficient(s), e.g., in
+ ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``)
+ both the 3-rd and 4-th order coefficients would be "trimmed."
+
+ Parameters
+ ----------
+ c : array_like
+ 1-d array of coefficients, ordered from lowest order to highest.
+ tol : number, optional
+ Trailing (i.e., highest order) elements with absolute value less
+ than or equal to `tol` (default value is zero) are removed.
+
+ Returns
+ -------
+ trimmed : ndarray
+ 1-d array with trailing zeros removed. If the resulting series
+ would be empty, a series containing a single zero is returned.
+
+ Raises
+ ------
+ ValueError
+ If `tol` < 0
+
+ See Also
+ --------
+ trimseq
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polyutils as pu
+ >>> pu.trimcoef((0,0,3,0,5,0,0))
+ array([0., 0., 3., 0., 5.])
+ >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed
+ array([0.])
+ >>> i = complex(0,1) # works for complex
+ >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3)
+ array([0.0003+0.j , 0.001 -0.001j])
+
+ """
+ if tol < 0:
+ raise ValueError("tol must be non-negative")
+
+ [c] = as_series([c])
+ [ind] = np.nonzero(np.abs(c) > tol)
+ if len(ind) == 0:
+ return c[:1]*0
+ else:
+ return c[:ind[-1] + 1].copy()
+
+def getdomain(x):
+ """
+ Return a domain suitable for given abscissae.
+
+ Find a domain suitable for a polynomial or Chebyshev series
+ defined at the values supplied.
+
+ Parameters
+ ----------
+ x : array_like
+ 1-d array of abscissae whose domain will be determined.
+
+ Returns
+ -------
+ domain : ndarray
+ 1-d array containing two values. If the inputs are complex, then
+ the two returned points are the lower left and upper right corners
+ of the smallest rectangle (aligned with the axes) in the complex
+ plane containing the points `x`. If the inputs are real, then the
+ two points are the ends of the smallest interval containing the
+ points `x`.
+
+ See Also
+ --------
+ mapparms, mapdomain
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polyutils as pu
+ >>> points = np.arange(4)**2 - 5; points
+ array([-5, -4, -1, 4])
+ >>> pu.getdomain(points)
+ array([-5., 4.])
+ >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle
+ >>> pu.getdomain(c)
+ array([-1.-1.j, 1.+1.j])
+
+ """
+ [x] = as_series([x], trim=False)
+ if x.dtype.char in np.typecodes['Complex']:
+ rmin, rmax = x.real.min(), x.real.max()
+ imin, imax = x.imag.min(), x.imag.max()
+ return np.array((complex(rmin, imin), complex(rmax, imax)))
+ else:
+ return np.array((x.min(), x.max()))
+
+def mapparms(old, new):
+ """
+ Linear map parameters between domains.
+
+ Return the parameters of the linear map ``offset + scale*x`` that maps
+ `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``.
+
+ Parameters
+ ----------
+ old, new : array_like
+ Domains. Each domain must (successfully) convert to a 1-d array
+ containing precisely two values.
+
+ Returns
+ -------
+ offset, scale : scalars
+ The map ``L(x) = offset + scale*x`` maps the first domain to the
+ second.
+
+ See Also
+ --------
+ getdomain, mapdomain
+
+ Notes
+ -----
+ Also works for complex numbers, and thus can be used to calculate the
+ parameters required to map any line in the complex plane to any other
+ line therein.
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polyutils as pu
+ >>> pu.mapparms((-1,1),(-1,1))
+ (0.0, 1.0)
+ >>> pu.mapparms((1,-1),(-1,1))
+ (-0.0, -1.0)
+ >>> i = complex(0,1)
+ >>> pu.mapparms((-i,-1),(1,i))
+ ((1+1j), (1-0j))
+
+ """
+ oldlen = old[1] - old[0]
+ newlen = new[1] - new[0]
+ off = (old[1]*new[0] - old[0]*new[1])/oldlen
+ scl = newlen/oldlen
+ return off, scl
+
+def mapdomain(x, old, new):
+ """
+ Apply linear map to input points.
+
+ The linear map ``offset + scale*x`` that maps the domain `old` to
+ the domain `new` is applied to the points `x`.
+
+ Parameters
+ ----------
+ x : array_like
+ Points to be mapped. If `x` is a subtype of ndarray the subtype
+ will be preserved.
+ old, new : array_like
+ The two domains that determine the map. Each must (successfully)
+ convert to 1-d arrays containing precisely two values.
+
+ Returns
+ -------
+ x_out : ndarray
+ Array of points of the same shape as `x`, after application of the
+ linear map between the two domains.
+
+ See Also
+ --------
+ getdomain, mapparms
+
+ Notes
+ -----
+ Effectively, this implements:
+
+ .. math::
+ x\\_out = new[0] + m(x - old[0])
+
+ where
+
+ .. math::
+ m = \\frac{new[1]-new[0]}{old[1]-old[0]}
+
+ Examples
+ --------
+ >>> from numpy.polynomial import polyutils as pu
+ >>> old_domain = (-1,1)
+ >>> new_domain = (0,2*np.pi)
+ >>> x = np.linspace(-1,1,6); x
+ array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ])
+ >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out
+ array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary
+ 6.28318531])
+ >>> x - pu.mapdomain(x_out, new_domain, old_domain)
+ array([0., 0., 0., 0., 0., 0.])
+
+ Also works for complex numbers (and thus can be used to map any line in
+ the complex plane to any other line therein).
+
+ >>> i = complex(0,1)
+ >>> old = (-1 - i, 1 + i)
+ >>> new = (-1 + i, 1 - i)
+ >>> z = np.linspace(old[0], old[1], 6); z
+ array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ])
+ >>> new_z = pu.mapdomain(z, old, new); new_z
+ array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary
+
+ """
+ x = np.asanyarray(x)
+ off, scl = mapparms(old, new)
+ return off + scl*x
+
+
+def _nth_slice(i, ndim):
+ sl = [np.newaxis] * ndim
+ sl[i] = slice(None)
+ return tuple(sl)
+
+
+def _vander_nd(vander_fs, points, degrees):
+ r"""
+ A generalization of the Vandermonde matrix for N dimensions
+
+ The result is built by combining the results of 1d Vandermonde matrices,
+
+ .. math::
+ W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{V_k(x_k)[i_0, \ldots, i_M, j_k]}
+
+ where
+
+ .. math::
+ N &= \texttt{len(points)} = \texttt{len(degrees)} = \texttt{len(vander\_fs)} \\
+ M &= \texttt{points[k].ndim} \\
+ V_k &= \texttt{vander\_fs[k]} \\
+ x_k &= \texttt{points[k]} \\
+ 0 \le j_k &\le \texttt{degrees[k]}
+
+ Expanding the one-dimensional :math:`V_k` functions gives:
+
+ .. math::
+ W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \ldots, i_M])}
+
+ where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along
+ dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`.
+
+ Parameters
+ ----------
+ vander_fs : Sequence[function(array_like, int) -> ndarray]
+ The 1d vander function to use for each axis, such as ``polyvander``
+ points : Sequence[array_like]
+ Arrays of point coordinates, all of the same shape. The dtypes
+ will be converted to either float64 or complex128 depending on
+ whether any of the elements are complex. Scalars are converted to
+ 1-D arrays.
+ This must be the same length as `vander_fs`.
+ degrees : Sequence[int]
+ The maximum degree (inclusive) to use for each axis.
+ This must be the same length as `vander_fs`.
+
+ Returns
+ -------
+ vander_nd : ndarray
+ An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``.
+ """
+ n_dims = len(vander_fs)
+ if n_dims != len(points):
+ raise ValueError(
+ f"Expected {n_dims} dimensions of sample points, got {len(points)}")
+ if n_dims != len(degrees):
+ raise ValueError(
+ f"Expected {n_dims} dimensions of degrees, got {len(degrees)}")
+ if n_dims == 0:
+ raise ValueError("Unable to guess a dtype or shape when no points are given")
+
+ # convert to the same shape and type
+ points = tuple(np.array(tuple(points), copy=False) + 0.0)
+
+ # produce the vandermonde matrix for each dimension, placing the last
+ # axis of each in an independent trailing axis of the output
+ vander_arrays = (
+ vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)]
+ for i in range(n_dims)
+ )
+
+ # we checked this wasn't empty already, so no `initial` needed
+ return functools.reduce(operator.mul, vander_arrays)
+
+
+def _vander_nd_flat(vander_fs, points, degrees):
+ """
+ Like `_vander_nd`, but flattens the last ``len(degrees)`` axes into a single axis
+
+ Used to implement the public ``<type>vander<n>d`` functions.
+ """
+ v = _vander_nd(vander_fs, points, degrees)
+ return v.reshape(v.shape[:-len(degrees)] + (-1,))
+
+
+def _fromroots(line_f, mul_f, roots):
+ """
+ Helper function used to implement the ``<type>fromroots`` functions.
+
+ Parameters
+ ----------
+ line_f : function(float, float) -> ndarray
+ The ``<type>line`` function, such as ``polyline``
+ mul_f : function(array_like, array_like) -> ndarray
+ The ``<type>mul`` function, such as ``polymul``
+ roots
+ See the ``<type>fromroots`` functions for more detail
+ """
+ if len(roots) == 0:
+ return np.ones(1)
+ else:
+ [roots] = as_series([roots], trim=False)
+ roots.sort()
+ p = [line_f(-r, 1) for r in roots]
+ n = len(p)
+ while n > 1:
+ m, r = divmod(n, 2)
+ tmp = [mul_f(p[i], p[i+m]) for i in range(m)]
+ if r:
+ tmp[0] = mul_f(tmp[0], p[-1])
+ p = tmp
+ n = m
+ return p[0]
+
+
+def _valnd(val_f, c, *args):
+ """
+ Helper function used to implement the ``<type>val<n>d`` functions.
+
+ Parameters
+ ----------
+ val_f : function(array_like, array_like, tensor: bool) -> array_like
+ The ``<type>val`` function, such as ``polyval``
+ c, args
+ See the ``<type>val<n>d`` functions for more detail
+ """
+ args = [np.asanyarray(a) for a in args]
+ shape0 = args[0].shape
+ if not all((a.shape == shape0 for a in args[1:])):
+ if len(args) == 3:
+ raise ValueError('x, y, z are incompatible')
+ elif len(args) == 2:
+ raise ValueError('x, y are incompatible')
+ else:
+ raise ValueError('ordinates are incompatible')
+ it = iter(args)
+ x0 = next(it)
+
+ # use tensor on only the first
+ c = val_f(x0, c)
+ for xi in it:
+ c = val_f(xi, c, tensor=False)
+ return c
+
+
+def _gridnd(val_f, c, *args):
+ """
+ Helper function used to implement the ``<type>grid<n>d`` functions.
+
+ Parameters
+ ----------
+ val_f : function(array_like, array_like, tensor: bool) -> array_like
+ The ``<type>val`` function, such as ``polyval``
+ c, args
+ See the ``<type>grid<n>d`` functions for more detail
+ """
+ for xi in args:
+ c = val_f(xi, c)
+ return c
+
+
+def _div(mul_f, c1, c2):
+ """
+ Helper function used to implement the ``<type>div`` functions.
+
+ Implementation uses repeated subtraction of c2 multiplied by the nth basis.
+ For some polynomial types, a more efficient approach may be possible.
+
+ Parameters
+ ----------
+ mul_f : function(array_like, array_like) -> array_like
+ The ``<type>mul`` function, such as ``polymul``
+ c1, c2
+ See the ``<type>div`` functions for more detail
+ """
+ # c1, c2 are trimmed copies
+ [c1, c2] = as_series([c1, c2])
+ if c2[-1] == 0:
+ raise ZeroDivisionError()
+
+ lc1 = len(c1)
+ lc2 = len(c2)
+ if lc1 < lc2:
+ return c1[:1]*0, c1
+ elif lc2 == 1:
+ return c1/c2[-1], c1[:1]*0
+ else:
+ quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
+ rem = c1
+ for i in range(lc1 - lc2, - 1, -1):
+ p = mul_f([0]*i + [1], c2)
+ q = rem[-1]/p[-1]
+ rem = rem[:-1] - q*p[:-1]
+ quo[i] = q
+ return quo, trimseq(rem)
+
+
+def _add(c1, c2):
+ """ Helper function used to implement the ``<type>add`` functions. """
+ # c1, c2 are trimmed copies
+ [c1, c2] = as_series([c1, c2])
+ if len(c1) > len(c2):
+ c1[:c2.size] += c2
+ ret = c1
+ else:
+ c2[:c1.size] += c1
+ ret = c2
+ return trimseq(ret)
+
+
+def _sub(c1, c2):
+ """ Helper function used to implement the ``<type>sub`` functions. """
+ # c1, c2 are trimmed copies
+ [c1, c2] = as_series([c1, c2])
+ if len(c1) > len(c2):
+ c1[:c2.size] -= c2
+ ret = c1
+ else:
+ c2 = -c2
+ c2[:c1.size] += c1
+ ret = c2
+ return trimseq(ret)
+
+
+def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None):
+ """
+ Helper function used to implement the ``<type>fit`` functions.
+
+ Parameters
+ ----------
+ vander_f : function(array_like, int) -> ndarray
+ The 1d vander function, such as ``polyvander``
+ c1, c2
+ See the ``<type>fit`` functions for more detail
+ """
+ x = np.asarray(x) + 0.0
+ y = np.asarray(y) + 0.0
+ deg = np.asarray(deg)
+
+ # check arguments.
+ if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
+ raise TypeError("deg must be an int or non-empty 1-D array of int")
+ if deg.min() < 0:
+ raise ValueError("expected deg >= 0")
+ if x.ndim != 1:
+ raise TypeError("expected 1D vector for x")
+ if x.size == 0:
+ raise TypeError("expected non-empty vector for x")
+ if y.ndim < 1 or y.ndim > 2:
+ raise TypeError("expected 1D or 2D array for y")
+ if len(x) != len(y):
+ raise TypeError("expected x and y to have same length")
+
+ if deg.ndim == 0:
+ lmax = deg
+ order = lmax + 1
+ van = vander_f(x, lmax)
+ else:
+ deg = np.sort(deg)
+ lmax = deg[-1]
+ order = len(deg)
+ van = vander_f(x, lmax)[:, deg]
+
+ # set up the least squares matrices in transposed form
+ lhs = van.T
+ rhs = y.T
+ if w is not None:
+ w = np.asarray(w) + 0.0
+ if w.ndim != 1:
+ raise TypeError("expected 1D vector for w")
+ if len(x) != len(w):
+ raise TypeError("expected x and w to have same length")
+ # apply weights. Don't use inplace operations as they
+ # can cause problems with NA.
+ lhs = lhs * w
+ rhs = rhs * w
+
+ # set rcond
+ if rcond is None:
+ rcond = len(x)*np.finfo(x.dtype).eps
+
+ # Determine the norms of the design matrix columns.
+ if issubclass(lhs.dtype.type, np.complexfloating):
+ scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
+ else:
+ scl = np.sqrt(np.square(lhs).sum(1))
+ scl[scl == 0] = 1
+
+ # Solve the least squares problem.
+ c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond)
+ c = (c.T/scl).T
+
+ # Expand c to include non-fitted coefficients which are set to zero
+ if deg.ndim > 0:
+ if c.ndim == 2:
+ cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
+ else:
+ cc = np.zeros(lmax+1, dtype=c.dtype)
+ cc[deg] = c
+ c = cc
+
+ # warn on rank reduction
+ if rank != order and not full:
+ msg = "The fit may be poorly conditioned"
+ warnings.warn(msg, RankWarning, stacklevel=2)
+
+ if full:
+ return c, [resids, rank, s, rcond]
+ else:
+ return c
+
+
+def _pow(mul_f, c, pow, maxpower):
+ """
+ Helper function used to implement the ``<type>pow`` functions.
+
+ Parameters
+ ----------
+ mul_f : function(array_like, array_like) -> ndarray
+ The ``<type>mul`` function, such as ``polymul``
+ c : array_like
+ 1-D array of array of series coefficients
+ pow, maxpower
+ See the ``<type>pow`` functions for more detail
+ """
+ # c is a trimmed copy
+ [c] = as_series([c])
+ power = int(pow)
+ if power != pow or power < 0:
+ raise ValueError("Power must be a non-negative integer.")
+ elif maxpower is not None and power > maxpower:
+ raise ValueError("Power is too large")
+ elif power == 0:
+ return np.array([1], dtype=c.dtype)
+ elif power == 1:
+ return c
+ else:
+ # This can be made more efficient by using powers of two
+ # in the usual way.
+ prd = c
+ for i in range(2, power + 1):
+ prd = mul_f(prd, c)
+ return prd
+
+
+def _deprecate_as_int(x, desc):
+ """
+ Like `operator.index`, but emits a deprecation warning when passed a float
+
+ Parameters
+ ----------
+ x : int-like, or float with integral value
+ Value to interpret as an integer
+ desc : str
+ description to include in any error message
+
+ Raises
+ ------
+ TypeError : if x is a non-integral float or non-numeric
+ DeprecationWarning : if x is an integral float
+ """
+ try:
+ return operator.index(x)
+ except TypeError as e:
+ # Numpy 1.17.0, 2019-03-11
+ try:
+ ix = int(x)
+ except TypeError:
+ pass
+ else:
+ if ix == x:
+ warnings.warn(
+ f"In future, this will raise TypeError, as {desc} will "
+ "need to be an integer not just an integral float.",
+ DeprecationWarning,
+ stacklevel=3
+ )
+ return ix
+
+ raise TypeError(f"{desc} must be an integer") from e
+
+
+def format_float(x, parens=False):
+ if not np.issubdtype(type(x), np.floating):
+ return str(x)
+
+ opts = np.get_printoptions()
+
+ if np.isnan(x):
+ return opts['nanstr']
+ elif np.isinf(x):
+ return opts['infstr']
+
+ exp_format = False
+ if x != 0:
+ a = absolute(x)
+ if a >= 1.e8 or a < 10**min(0, -(opts['precision']-1)//2):
+ exp_format = True
+
+ trim, unique = '0', True
+ if opts['floatmode'] == 'fixed':
+ trim, unique = 'k', False
+
+ if exp_format:
+ s = dragon4_scientific(x, precision=opts['precision'],
+ unique=unique, trim=trim,
+ sign=opts['sign'] == '+')
+ if parens:
+ s = '(' + s + ')'
+ else:
+ s = dragon4_positional(x, precision=opts['precision'],
+ fractional=True,
+ unique=unique, trim=trim,
+ sign=opts['sign'] == '+')
+ return s
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/polyutils.pyi b/venv/lib/python3.9/site-packages/numpy/polynomial/polyutils.pyi
new file mode 100644
index 00000000..c0bcc678
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/polyutils.pyi
@@ -0,0 +1,11 @@
+__all__: list[str]
+
+class RankWarning(UserWarning): ...
+
+def trimseq(seq): ...
+def as_series(alist, trim=...): ...
+def trimcoef(c, tol=...): ...
+def getdomain(x): ...
+def mapparms(old, new): ...
+def mapdomain(x, old, new): ...
+def format_float(x, parens=...): ...
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/setup.py b/venv/lib/python3.9/site-packages/numpy/polynomial/setup.py
new file mode 100644
index 00000000..b58e867a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/setup.py
@@ -0,0 +1,10 @@
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('polynomial', parent_package, top_path)
+ config.add_subpackage('tests')
+ config.add_data_files('*.pyi')
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_chebyshev.py b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_chebyshev.py
new file mode 100644
index 00000000..2f54bebf
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_chebyshev.py
@@ -0,0 +1,619 @@
+"""Tests for chebyshev module.
+
+"""
+from functools import reduce
+
+import numpy as np
+import numpy.polynomial.chebyshev as cheb
+from numpy.polynomial.polynomial import polyval
+from numpy.testing import (
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ )
+
+
+def trim(x):
+ return cheb.chebtrim(x, tol=1e-6)
+
+T0 = [1]
+T1 = [0, 1]
+T2 = [-1, 0, 2]
+T3 = [0, -3, 0, 4]
+T4 = [1, 0, -8, 0, 8]
+T5 = [0, 5, 0, -20, 0, 16]
+T6 = [-1, 0, 18, 0, -48, 0, 32]
+T7 = [0, -7, 0, 56, 0, -112, 0, 64]
+T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128]
+T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
+
+Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
+
+
+class TestPrivate:
+
+ def test__cseries_to_zseries(self):
+ for i in range(5):
+ inp = np.array([2] + [1]*i, np.double)
+ tgt = np.array([.5]*i + [2] + [.5]*i, np.double)
+ res = cheb._cseries_to_zseries(inp)
+ assert_equal(res, tgt)
+
+ def test__zseries_to_cseries(self):
+ for i in range(5):
+ inp = np.array([.5]*i + [2] + [.5]*i, np.double)
+ tgt = np.array([2] + [1]*i, np.double)
+ res = cheb._zseries_to_cseries(inp)
+ assert_equal(res, tgt)
+
+
+class TestConstants:
+
+ def test_chebdomain(self):
+ assert_equal(cheb.chebdomain, [-1, 1])
+
+ def test_chebzero(self):
+ assert_equal(cheb.chebzero, [0])
+
+ def test_chebone(self):
+ assert_equal(cheb.chebone, [1])
+
+ def test_chebx(self):
+ assert_equal(cheb.chebx, [0, 1])
+
+
+class TestArithmetic:
+
+ def test_chebadd(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(max(i, j) + 1)
+ tgt[i] += 1
+ tgt[j] += 1
+ res = cheb.chebadd([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_chebsub(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(max(i, j) + 1)
+ tgt[i] += 1
+ tgt[j] -= 1
+ res = cheb.chebsub([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_chebmulx(self):
+ assert_equal(cheb.chebmulx([0]), [0])
+ assert_equal(cheb.chebmulx([1]), [0, 1])
+ for i in range(1, 5):
+ ser = [0]*i + [1]
+ tgt = [0]*(i - 1) + [.5, 0, .5]
+ assert_equal(cheb.chebmulx(ser), tgt)
+
+ def test_chebmul(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(i + j + 1)
+ tgt[i + j] += .5
+ tgt[abs(i - j)] += .5
+ res = cheb.chebmul([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_chebdiv(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ ci = [0]*i + [1]
+ cj = [0]*j + [1]
+ tgt = cheb.chebadd(ci, cj)
+ quo, rem = cheb.chebdiv(tgt, ci)
+ res = cheb.chebadd(cheb.chebmul(quo, ci), rem)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_chebpow(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ c = np.arange(i + 1)
+ tgt = reduce(cheb.chebmul, [c]*j, np.array([1]))
+ res = cheb.chebpow(c, j)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+
+class TestEvaluation:
+ # coefficients of 1 + 2*x + 3*x**2
+ c1d = np.array([2.5, 2., 1.5])
+ c2d = np.einsum('i,j->ij', c1d, c1d)
+ c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
+
+ # some random values in [-1, 1)
+ x = np.random.random((3, 5))*2 - 1
+ y = polyval(x, [1., 2., 3.])
+
+ def test_chebval(self):
+ #check empty input
+ assert_equal(cheb.chebval([], [1]).size, 0)
+
+ #check normal input)
+ x = np.linspace(-1, 1)
+ y = [polyval(x, c) for c in Tlist]
+ for i in range(10):
+ msg = f"At i={i}"
+ tgt = y[i]
+ res = cheb.chebval(x, [0]*i + [1])
+ assert_almost_equal(res, tgt, err_msg=msg)
+
+ #check that shape is preserved
+ for i in range(3):
+ dims = [2]*i
+ x = np.zeros(dims)
+ assert_equal(cheb.chebval(x, [1]).shape, dims)
+ assert_equal(cheb.chebval(x, [1, 0]).shape, dims)
+ assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims)
+
+ def test_chebval2d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test exceptions
+ assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d)
+
+ #test values
+ tgt = y1*y2
+ res = cheb.chebval2d(x1, x2, self.c2d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = cheb.chebval2d(z, z, self.c2d)
+ assert_(res.shape == (2, 3))
+
+ def test_chebval3d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test exceptions
+ assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d)
+
+ #test values
+ tgt = y1*y2*y3
+ res = cheb.chebval3d(x1, x2, x3, self.c3d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = cheb.chebval3d(z, z, z, self.c3d)
+ assert_(res.shape == (2, 3))
+
+ def test_chebgrid2d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test values
+ tgt = np.einsum('i,j->ij', y1, y2)
+ res = cheb.chebgrid2d(x1, x2, self.c2d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = cheb.chebgrid2d(z, z, self.c2d)
+ assert_(res.shape == (2, 3)*2)
+
+ def test_chebgrid3d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test values
+ tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
+ res = cheb.chebgrid3d(x1, x2, x3, self.c3d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = cheb.chebgrid3d(z, z, z, self.c3d)
+ assert_(res.shape == (2, 3)*3)
+
+
+class TestIntegral:
+
+ def test_chebint(self):
+ # check exceptions
+ assert_raises(TypeError, cheb.chebint, [0], .5)
+ assert_raises(ValueError, cheb.chebint, [0], -1)
+ assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0])
+ assert_raises(ValueError, cheb.chebint, [0], lbnd=[0])
+ assert_raises(ValueError, cheb.chebint, [0], scl=[0])
+ assert_raises(TypeError, cheb.chebint, [0], axis=.5)
+
+ # test integration of zero polynomial
+ for i in range(2, 5):
+ k = [0]*(i - 2) + [1]
+ res = cheb.chebint([0], m=i, k=k)
+ assert_almost_equal(res, [0, 1])
+
+ # check single integration with integration constant
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ tgt = [i] + [0]*i + [1/scl]
+ chebpol = cheb.poly2cheb(pol)
+ chebint = cheb.chebint(chebpol, m=1, k=[i])
+ res = cheb.cheb2poly(chebint)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check single integration with integration constant and lbnd
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ chebpol = cheb.poly2cheb(pol)
+ chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1)
+ assert_almost_equal(cheb.chebval(-1, chebint), i)
+
+ # check single integration with integration constant and scaling
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ tgt = [i] + [0]*i + [2/scl]
+ chebpol = cheb.poly2cheb(pol)
+ chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2)
+ res = cheb.cheb2poly(chebint)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with default k
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = cheb.chebint(tgt, m=1)
+ res = cheb.chebint(pol, m=j)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with defined k
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = cheb.chebint(tgt, m=1, k=[k])
+ res = cheb.chebint(pol, m=j, k=list(range(j)))
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with lbnd
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1)
+ res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with scaling
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = cheb.chebint(tgt, m=1, k=[k], scl=2)
+ res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_chebint_axis(self):
+ # check that axis keyword works
+ c2d = np.random.random((3, 4))
+
+ tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T
+ res = cheb.chebint(c2d, axis=0)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([cheb.chebint(c) for c in c2d])
+ res = cheb.chebint(c2d, axis=1)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d])
+ res = cheb.chebint(c2d, k=3, axis=1)
+ assert_almost_equal(res, tgt)
+
+
+class TestDerivative:
+
+ def test_chebder(self):
+ # check exceptions
+ assert_raises(TypeError, cheb.chebder, [0], .5)
+ assert_raises(ValueError, cheb.chebder, [0], -1)
+
+ # check that zeroth derivative does nothing
+ for i in range(5):
+ tgt = [0]*i + [1]
+ res = cheb.chebder(tgt, m=0)
+ assert_equal(trim(res), trim(tgt))
+
+ # check that derivation is the inverse of integration
+ for i in range(5):
+ for j in range(2, 5):
+ tgt = [0]*i + [1]
+ res = cheb.chebder(cheb.chebint(tgt, m=j), m=j)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check derivation with scaling
+ for i in range(5):
+ for j in range(2, 5):
+ tgt = [0]*i + [1]
+ res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_chebder_axis(self):
+ # check that axis keyword works
+ c2d = np.random.random((3, 4))
+
+ tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T
+ res = cheb.chebder(c2d, axis=0)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([cheb.chebder(c) for c in c2d])
+ res = cheb.chebder(c2d, axis=1)
+ assert_almost_equal(res, tgt)
+
+
+class TestVander:
+ # some random values in [-1, 1)
+ x = np.random.random((3, 5))*2 - 1
+
+ def test_chebvander(self):
+ # check for 1d x
+ x = np.arange(3)
+ v = cheb.chebvander(x, 3)
+ assert_(v.shape == (3, 4))
+ for i in range(4):
+ coef = [0]*i + [1]
+ assert_almost_equal(v[..., i], cheb.chebval(x, coef))
+
+ # check for 2d x
+ x = np.array([[1, 2], [3, 4], [5, 6]])
+ v = cheb.chebvander(x, 3)
+ assert_(v.shape == (3, 2, 4))
+ for i in range(4):
+ coef = [0]*i + [1]
+ assert_almost_equal(v[..., i], cheb.chebval(x, coef))
+
+ def test_chebvander2d(self):
+ # also tests chebval2d for non-square coefficient array
+ x1, x2, x3 = self.x
+ c = np.random.random((2, 3))
+ van = cheb.chebvander2d(x1, x2, [1, 2])
+ tgt = cheb.chebval2d(x1, x2, c)
+ res = np.dot(van, c.flat)
+ assert_almost_equal(res, tgt)
+
+ # check shape
+ van = cheb.chebvander2d([x1], [x2], [1, 2])
+ assert_(van.shape == (1, 5, 6))
+
+ def test_chebvander3d(self):
+ # also tests chebval3d for non-square coefficient array
+ x1, x2, x3 = self.x
+ c = np.random.random((2, 3, 4))
+ van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3])
+ tgt = cheb.chebval3d(x1, x2, x3, c)
+ res = np.dot(van, c.flat)
+ assert_almost_equal(res, tgt)
+
+ # check shape
+ van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3])
+ assert_(van.shape == (1, 5, 24))
+
+
+class TestFitting:
+
+ def test_chebfit(self):
+ def f(x):
+ return x*(x - 1)*(x - 2)
+
+ def f2(x):
+ return x**4 + x**2 + 1
+
+ # Test exceptions
+ assert_raises(ValueError, cheb.chebfit, [1], [1], -1)
+ assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0)
+ assert_raises(TypeError, cheb.chebfit, [], [1], 0)
+ assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0)
+ assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0)
+ assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0)
+ assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]])
+ assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1])
+ assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,])
+ assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6])
+ assert_raises(TypeError, cheb.chebfit, [1], [1], [])
+
+ # Test fit
+ x = np.linspace(0, 2)
+ y = f(x)
+ #
+ coef3 = cheb.chebfit(x, y, 3)
+ assert_equal(len(coef3), 4)
+ assert_almost_equal(cheb.chebval(x, coef3), y)
+ coef3 = cheb.chebfit(x, y, [0, 1, 2, 3])
+ assert_equal(len(coef3), 4)
+ assert_almost_equal(cheb.chebval(x, coef3), y)
+ #
+ coef4 = cheb.chebfit(x, y, 4)
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(cheb.chebval(x, coef4), y)
+ coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4])
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(cheb.chebval(x, coef4), y)
+ # check things still work if deg is not in strict increasing
+ coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0])
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(cheb.chebval(x, coef4), y)
+ #
+ coef2d = cheb.chebfit(x, np.array([y, y]).T, 3)
+ assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
+ coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3])
+ assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
+ # test weighting
+ w = np.zeros_like(x)
+ yw = y.copy()
+ w[1::2] = 1
+ y[0::2] = 0
+ wcoef3 = cheb.chebfit(x, yw, 3, w=w)
+ assert_almost_equal(wcoef3, coef3)
+ wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w)
+ assert_almost_equal(wcoef3, coef3)
+ #
+ wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w)
+ assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
+ wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
+ assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
+ # test scaling with complex values x points whose square
+ # is zero when summed.
+ x = [1, 1j, -1, -1j]
+ assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1])
+ assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1])
+ # test fitting only even polynomials
+ x = np.linspace(-1, 1)
+ y = f2(x)
+ coef1 = cheb.chebfit(x, y, 4)
+ assert_almost_equal(cheb.chebval(x, coef1), y)
+ coef2 = cheb.chebfit(x, y, [0, 2, 4])
+ assert_almost_equal(cheb.chebval(x, coef2), y)
+ assert_almost_equal(coef1, coef2)
+
+
+class TestInterpolate:
+
+ def f(self, x):
+ return x * (x - 1) * (x - 2)
+
+ def test_raises(self):
+ assert_raises(ValueError, cheb.chebinterpolate, self.f, -1)
+ assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.)
+
+ def test_dimensions(self):
+ for deg in range(1, 5):
+ assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,))
+
+ def test_approximation(self):
+
+ def powx(x, p):
+ return x**p
+
+ x = np.linspace(-1, 1, 10)
+ for deg in range(0, 10):
+ for p in range(0, deg + 1):
+ c = cheb.chebinterpolate(powx, deg, (p,))
+ assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12)
+
+
+class TestCompanion:
+
+ def test_raises(self):
+ assert_raises(ValueError, cheb.chebcompanion, [])
+ assert_raises(ValueError, cheb.chebcompanion, [1])
+
+ def test_dimensions(self):
+ for i in range(1, 5):
+ coef = [0]*i + [1]
+ assert_(cheb.chebcompanion(coef).shape == (i, i))
+
+ def test_linear_root(self):
+ assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5)
+
+
+class TestGauss:
+
+ def test_100(self):
+ x, w = cheb.chebgauss(100)
+
+ # test orthogonality. Note that the results need to be normalized,
+ # otherwise the huge values that can arise from fast growing
+ # functions like Laguerre can be very confusing.
+ v = cheb.chebvander(x, 99)
+ vv = np.dot(v.T * w, v)
+ vd = 1/np.sqrt(vv.diagonal())
+ vv = vd[:, None] * vv * vd
+ assert_almost_equal(vv, np.eye(100))
+
+ # check that the integral of 1 is correct
+ tgt = np.pi
+ assert_almost_equal(w.sum(), tgt)
+
+
+class TestMisc:
+
+ def test_chebfromroots(self):
+ res = cheb.chebfromroots([])
+ assert_almost_equal(trim(res), [1])
+ for i in range(1, 5):
+ roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
+ tgt = [0]*i + [1]
+ res = cheb.chebfromroots(roots)*2**(i-1)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_chebroots(self):
+ assert_almost_equal(cheb.chebroots([1]), [])
+ assert_almost_equal(cheb.chebroots([1, 2]), [-.5])
+ for i in range(2, 5):
+ tgt = np.linspace(-1, 1, i)
+ res = cheb.chebroots(cheb.chebfromroots(tgt))
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_chebtrim(self):
+ coef = [2, -1, 1, 0]
+
+ # Test exceptions
+ assert_raises(ValueError, cheb.chebtrim, coef, -1)
+
+ # Test results
+ assert_equal(cheb.chebtrim(coef), coef[:-1])
+ assert_equal(cheb.chebtrim(coef, 1), coef[:-3])
+ assert_equal(cheb.chebtrim(coef, 2), [0])
+
+ def test_chebline(self):
+ assert_equal(cheb.chebline(3, 4), [3, 4])
+
+ def test_cheb2poly(self):
+ for i in range(10):
+ assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i])
+
+ def test_poly2cheb(self):
+ for i in range(10):
+ assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1])
+
+ def test_weight(self):
+ x = np.linspace(-1, 1, 11)[1:-1]
+ tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x))
+ res = cheb.chebweight(x)
+ assert_almost_equal(res, tgt)
+
+ def test_chebpts1(self):
+ #test exceptions
+ assert_raises(ValueError, cheb.chebpts1, 1.5)
+ assert_raises(ValueError, cheb.chebpts1, 0)
+
+ #test points
+ tgt = [0]
+ assert_almost_equal(cheb.chebpts1(1), tgt)
+ tgt = [-0.70710678118654746, 0.70710678118654746]
+ assert_almost_equal(cheb.chebpts1(2), tgt)
+ tgt = [-0.86602540378443871, 0, 0.86602540378443871]
+ assert_almost_equal(cheb.chebpts1(3), tgt)
+ tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325]
+ assert_almost_equal(cheb.chebpts1(4), tgt)
+
+ def test_chebpts2(self):
+ #test exceptions
+ assert_raises(ValueError, cheb.chebpts2, 1.5)
+ assert_raises(ValueError, cheb.chebpts2, 1)
+
+ #test points
+ tgt = [-1, 1]
+ assert_almost_equal(cheb.chebpts2(2), tgt)
+ tgt = [-1, 0, 1]
+ assert_almost_equal(cheb.chebpts2(3), tgt)
+ tgt = [-1, -0.5, .5, 1]
+ assert_almost_equal(cheb.chebpts2(4), tgt)
+ tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0]
+ assert_almost_equal(cheb.chebpts2(5), tgt)
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_classes.py b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_classes.py
new file mode 100644
index 00000000..6322062f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_classes.py
@@ -0,0 +1,600 @@
+"""Test inter-conversion of different polynomial classes.
+
+This tests the convert and cast methods of all the polynomial classes.
+
+"""
+import operator as op
+from numbers import Number
+
+import pytest
+import numpy as np
+from numpy.polynomial import (
+ Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE)
+from numpy.testing import (
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ )
+from numpy.polynomial.polyutils import RankWarning
+
+#
+# fixtures
+#
+
+classes = (
+ Polynomial, Legendre, Chebyshev, Laguerre,
+ Hermite, HermiteE
+ )
+classids = tuple(cls.__name__ for cls in classes)
+
+@pytest.fixture(params=classes, ids=classids)
+def Poly(request):
+ return request.param
+
+#
+# helper functions
+#
+random = np.random.random
+
+
+def assert_poly_almost_equal(p1, p2, msg=""):
+ try:
+ assert_(np.all(p1.domain == p2.domain))
+ assert_(np.all(p1.window == p2.window))
+ assert_almost_equal(p1.coef, p2.coef)
+ except AssertionError:
+ msg = f"Result: {p1}\nTarget: {p2}"
+ raise AssertionError(msg)
+
+
+#
+# Test conversion methods that depend on combinations of two classes.
+#
+
+Poly1 = Poly
+Poly2 = Poly
+
+
+def test_conversion(Poly1, Poly2):
+ x = np.linspace(0, 1, 10)
+ coef = random((3,))
+
+ d1 = Poly1.domain + random((2,))*.25
+ w1 = Poly1.window + random((2,))*.25
+ p1 = Poly1(coef, domain=d1, window=w1)
+
+ d2 = Poly2.domain + random((2,))*.25
+ w2 = Poly2.window + random((2,))*.25
+ p2 = p1.convert(kind=Poly2, domain=d2, window=w2)
+
+ assert_almost_equal(p2.domain, d2)
+ assert_almost_equal(p2.window, w2)
+ assert_almost_equal(p2(x), p1(x))
+
+
+def test_cast(Poly1, Poly2):
+ x = np.linspace(0, 1, 10)
+ coef = random((3,))
+
+ d1 = Poly1.domain + random((2,))*.25
+ w1 = Poly1.window + random((2,))*.25
+ p1 = Poly1(coef, domain=d1, window=w1)
+
+ d2 = Poly2.domain + random((2,))*.25
+ w2 = Poly2.window + random((2,))*.25
+ p2 = Poly2.cast(p1, domain=d2, window=w2)
+
+ assert_almost_equal(p2.domain, d2)
+ assert_almost_equal(p2.window, w2)
+ assert_almost_equal(p2(x), p1(x))
+
+
+#
+# test methods that depend on one class
+#
+
+
+def test_identity(Poly):
+ d = Poly.domain + random((2,))*.25
+ w = Poly.window + random((2,))*.25
+ x = np.linspace(d[0], d[1], 11)
+ p = Poly.identity(domain=d, window=w)
+ assert_equal(p.domain, d)
+ assert_equal(p.window, w)
+ assert_almost_equal(p(x), x)
+
+
+def test_basis(Poly):
+ d = Poly.domain + random((2,))*.25
+ w = Poly.window + random((2,))*.25
+ p = Poly.basis(5, domain=d, window=w)
+ assert_equal(p.domain, d)
+ assert_equal(p.window, w)
+ assert_equal(p.coef, [0]*5 + [1])
+
+
+def test_fromroots(Poly):
+ # check that requested roots are zeros of a polynomial
+ # of correct degree, domain, and window.
+ d = Poly.domain + random((2,))*.25
+ w = Poly.window + random((2,))*.25
+ r = random((5,))
+ p1 = Poly.fromroots(r, domain=d, window=w)
+ assert_equal(p1.degree(), len(r))
+ assert_equal(p1.domain, d)
+ assert_equal(p1.window, w)
+ assert_almost_equal(p1(r), 0)
+
+ # check that polynomial is monic
+ pdom = Polynomial.domain
+ pwin = Polynomial.window
+ p2 = Polynomial.cast(p1, domain=pdom, window=pwin)
+ assert_almost_equal(p2.coef[-1], 1)
+
+
+def test_bad_conditioned_fit(Poly):
+
+ x = [0., 0., 1.]
+ y = [1., 2., 3.]
+
+ # check RankWarning is raised
+ with pytest.warns(RankWarning) as record:
+ Poly.fit(x, y, 2)
+ assert record[0].message.args[0] == "The fit may be poorly conditioned"
+
+
+def test_fit(Poly):
+
+ def f(x):
+ return x*(x - 1)*(x - 2)
+ x = np.linspace(0, 3)
+ y = f(x)
+
+ # check default value of domain and window
+ p = Poly.fit(x, y, 3)
+ assert_almost_equal(p.domain, [0, 3])
+ assert_almost_equal(p(x), y)
+ assert_equal(p.degree(), 3)
+
+ # check with given domains and window
+ d = Poly.domain + random((2,))*.25
+ w = Poly.window + random((2,))*.25
+ p = Poly.fit(x, y, 3, domain=d, window=w)
+ assert_almost_equal(p(x), y)
+ assert_almost_equal(p.domain, d)
+ assert_almost_equal(p.window, w)
+ p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w)
+ assert_almost_equal(p(x), y)
+ assert_almost_equal(p.domain, d)
+ assert_almost_equal(p.window, w)
+
+ # check with class domain default
+ p = Poly.fit(x, y, 3, [])
+ assert_equal(p.domain, Poly.domain)
+ assert_equal(p.window, Poly.window)
+ p = Poly.fit(x, y, [0, 1, 2, 3], [])
+ assert_equal(p.domain, Poly.domain)
+ assert_equal(p.window, Poly.window)
+
+ # check that fit accepts weights.
+ w = np.zeros_like(x)
+ z = y + random(y.shape)*.25
+ w[::2] = 1
+ p1 = Poly.fit(x[::2], z[::2], 3)
+ p2 = Poly.fit(x, z, 3, w=w)
+ p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w)
+ assert_almost_equal(p1(x), p2(x))
+ assert_almost_equal(p2(x), p3(x))
+
+
+def test_equal(Poly):
+ p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
+ p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
+ p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
+ p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
+ assert_(p1 == p1)
+ assert_(not p1 == p2)
+ assert_(not p1 == p3)
+ assert_(not p1 == p4)
+
+
+def test_not_equal(Poly):
+ p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
+ p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
+ p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
+ p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
+ assert_(not p1 != p1)
+ assert_(p1 != p2)
+ assert_(p1 != p3)
+ assert_(p1 != p4)
+
+
+def test_add(Poly):
+ # This checks commutation, not numerical correctness
+ c1 = list(random((4,)) + .5)
+ c2 = list(random((3,)) + .5)
+ p1 = Poly(c1)
+ p2 = Poly(c2)
+ p3 = p1 + p2
+ assert_poly_almost_equal(p2 + p1, p3)
+ assert_poly_almost_equal(p1 + c2, p3)
+ assert_poly_almost_equal(c2 + p1, p3)
+ assert_poly_almost_equal(p1 + tuple(c2), p3)
+ assert_poly_almost_equal(tuple(c2) + p1, p3)
+ assert_poly_almost_equal(p1 + np.array(c2), p3)
+ assert_poly_almost_equal(np.array(c2) + p1, p3)
+ assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1))
+ assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1))
+ if Poly is Polynomial:
+ assert_raises(TypeError, op.add, p1, Chebyshev([0]))
+ else:
+ assert_raises(TypeError, op.add, p1, Polynomial([0]))
+
+
+def test_sub(Poly):
+ # This checks commutation, not numerical correctness
+ c1 = list(random((4,)) + .5)
+ c2 = list(random((3,)) + .5)
+ p1 = Poly(c1)
+ p2 = Poly(c2)
+ p3 = p1 - p2
+ assert_poly_almost_equal(p2 - p1, -p3)
+ assert_poly_almost_equal(p1 - c2, p3)
+ assert_poly_almost_equal(c2 - p1, -p3)
+ assert_poly_almost_equal(p1 - tuple(c2), p3)
+ assert_poly_almost_equal(tuple(c2) - p1, -p3)
+ assert_poly_almost_equal(p1 - np.array(c2), p3)
+ assert_poly_almost_equal(np.array(c2) - p1, -p3)
+ assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1))
+ assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1))
+ if Poly is Polynomial:
+ assert_raises(TypeError, op.sub, p1, Chebyshev([0]))
+ else:
+ assert_raises(TypeError, op.sub, p1, Polynomial([0]))
+
+
+def test_mul(Poly):
+ c1 = list(random((4,)) + .5)
+ c2 = list(random((3,)) + .5)
+ p1 = Poly(c1)
+ p2 = Poly(c2)
+ p3 = p1 * p2
+ assert_poly_almost_equal(p2 * p1, p3)
+ assert_poly_almost_equal(p1 * c2, p3)
+ assert_poly_almost_equal(c2 * p1, p3)
+ assert_poly_almost_equal(p1 * tuple(c2), p3)
+ assert_poly_almost_equal(tuple(c2) * p1, p3)
+ assert_poly_almost_equal(p1 * np.array(c2), p3)
+ assert_poly_almost_equal(np.array(c2) * p1, p3)
+ assert_poly_almost_equal(p1 * 2, p1 * Poly([2]))
+ assert_poly_almost_equal(2 * p1, p1 * Poly([2]))
+ assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1))
+ assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1))
+ if Poly is Polynomial:
+ assert_raises(TypeError, op.mul, p1, Chebyshev([0]))
+ else:
+ assert_raises(TypeError, op.mul, p1, Polynomial([0]))
+
+
+def test_floordiv(Poly):
+ c1 = list(random((4,)) + .5)
+ c2 = list(random((3,)) + .5)
+ c3 = list(random((2,)) + .5)
+ p1 = Poly(c1)
+ p2 = Poly(c2)
+ p3 = Poly(c3)
+ p4 = p1 * p2 + p3
+ c4 = list(p4.coef)
+ assert_poly_almost_equal(p4 // p2, p1)
+ assert_poly_almost_equal(p4 // c2, p1)
+ assert_poly_almost_equal(c4 // p2, p1)
+ assert_poly_almost_equal(p4 // tuple(c2), p1)
+ assert_poly_almost_equal(tuple(c4) // p2, p1)
+ assert_poly_almost_equal(p4 // np.array(c2), p1)
+ assert_poly_almost_equal(np.array(c4) // p2, p1)
+ assert_poly_almost_equal(2 // p2, Poly([0]))
+ assert_poly_almost_equal(p2 // 2, 0.5*p2)
+ assert_raises(
+ TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1))
+ assert_raises(
+ TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1))
+ if Poly is Polynomial:
+ assert_raises(TypeError, op.floordiv, p1, Chebyshev([0]))
+ else:
+ assert_raises(TypeError, op.floordiv, p1, Polynomial([0]))
+
+
+def test_truediv(Poly):
+ # true division is valid only if the denominator is a Number and
+ # not a python bool.
+ p1 = Poly([1,2,3])
+ p2 = p1 * 5
+
+ for stype in np.ScalarType:
+ if not issubclass(stype, Number) or issubclass(stype, bool):
+ continue
+ s = stype(5)
+ assert_poly_almost_equal(op.truediv(p2, s), p1)
+ assert_raises(TypeError, op.truediv, s, p2)
+ for stype in (int, float):
+ s = stype(5)
+ assert_poly_almost_equal(op.truediv(p2, s), p1)
+ assert_raises(TypeError, op.truediv, s, p2)
+ for stype in [complex]:
+ s = stype(5, 0)
+ assert_poly_almost_equal(op.truediv(p2, s), p1)
+ assert_raises(TypeError, op.truediv, s, p2)
+ for s in [tuple(), list(), dict(), bool(), np.array([1])]:
+ assert_raises(TypeError, op.truediv, p2, s)
+ assert_raises(TypeError, op.truediv, s, p2)
+ for ptype in classes:
+ assert_raises(TypeError, op.truediv, p2, ptype(1))
+
+
+def test_mod(Poly):
+ # This checks commutation, not numerical correctness
+ c1 = list(random((4,)) + .5)
+ c2 = list(random((3,)) + .5)
+ c3 = list(random((2,)) + .5)
+ p1 = Poly(c1)
+ p2 = Poly(c2)
+ p3 = Poly(c3)
+ p4 = p1 * p2 + p3
+ c4 = list(p4.coef)
+ assert_poly_almost_equal(p4 % p2, p3)
+ assert_poly_almost_equal(p4 % c2, p3)
+ assert_poly_almost_equal(c4 % p2, p3)
+ assert_poly_almost_equal(p4 % tuple(c2), p3)
+ assert_poly_almost_equal(tuple(c4) % p2, p3)
+ assert_poly_almost_equal(p4 % np.array(c2), p3)
+ assert_poly_almost_equal(np.array(c4) % p2, p3)
+ assert_poly_almost_equal(2 % p2, Poly([2]))
+ assert_poly_almost_equal(p2 % 2, Poly([0]))
+ assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1))
+ assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1))
+ if Poly is Polynomial:
+ assert_raises(TypeError, op.mod, p1, Chebyshev([0]))
+ else:
+ assert_raises(TypeError, op.mod, p1, Polynomial([0]))
+
+
+def test_divmod(Poly):
+ # This checks commutation, not numerical correctness
+ c1 = list(random((4,)) + .5)
+ c2 = list(random((3,)) + .5)
+ c3 = list(random((2,)) + .5)
+ p1 = Poly(c1)
+ p2 = Poly(c2)
+ p3 = Poly(c3)
+ p4 = p1 * p2 + p3
+ c4 = list(p4.coef)
+ quo, rem = divmod(p4, p2)
+ assert_poly_almost_equal(quo, p1)
+ assert_poly_almost_equal(rem, p3)
+ quo, rem = divmod(p4, c2)
+ assert_poly_almost_equal(quo, p1)
+ assert_poly_almost_equal(rem, p3)
+ quo, rem = divmod(c4, p2)
+ assert_poly_almost_equal(quo, p1)
+ assert_poly_almost_equal(rem, p3)
+ quo, rem = divmod(p4, tuple(c2))
+ assert_poly_almost_equal(quo, p1)
+ assert_poly_almost_equal(rem, p3)
+ quo, rem = divmod(tuple(c4), p2)
+ assert_poly_almost_equal(quo, p1)
+ assert_poly_almost_equal(rem, p3)
+ quo, rem = divmod(p4, np.array(c2))
+ assert_poly_almost_equal(quo, p1)
+ assert_poly_almost_equal(rem, p3)
+ quo, rem = divmod(np.array(c4), p2)
+ assert_poly_almost_equal(quo, p1)
+ assert_poly_almost_equal(rem, p3)
+ quo, rem = divmod(p2, 2)
+ assert_poly_almost_equal(quo, 0.5*p2)
+ assert_poly_almost_equal(rem, Poly([0]))
+ quo, rem = divmod(2, p2)
+ assert_poly_almost_equal(quo, Poly([0]))
+ assert_poly_almost_equal(rem, Poly([2]))
+ assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1))
+ assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1))
+ if Poly is Polynomial:
+ assert_raises(TypeError, divmod, p1, Chebyshev([0]))
+ else:
+ assert_raises(TypeError, divmod, p1, Polynomial([0]))
+
+
+def test_roots(Poly):
+ d = Poly.domain * 1.25 + .25
+ w = Poly.window
+ tgt = np.linspace(d[0], d[1], 5)
+ res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots())
+ assert_almost_equal(res, tgt)
+ # default domain and window
+ res = np.sort(Poly.fromroots(tgt).roots())
+ assert_almost_equal(res, tgt)
+
+
+def test_degree(Poly):
+ p = Poly.basis(5)
+ assert_equal(p.degree(), 5)
+
+
+def test_copy(Poly):
+ p1 = Poly.basis(5)
+ p2 = p1.copy()
+ assert_(p1 == p2)
+ assert_(p1 is not p2)
+ assert_(p1.coef is not p2.coef)
+ assert_(p1.domain is not p2.domain)
+ assert_(p1.window is not p2.window)
+
+
+def test_integ(Poly):
+ P = Polynomial
+ # Check defaults
+ p0 = Poly.cast(P([1*2, 2*3, 3*4]))
+ p1 = P.cast(p0.integ())
+ p2 = P.cast(p0.integ(2))
+ assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
+ assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
+ # Check with k
+ p0 = Poly.cast(P([1*2, 2*3, 3*4]))
+ p1 = P.cast(p0.integ(k=1))
+ p2 = P.cast(p0.integ(2, k=[1, 1]))
+ assert_poly_almost_equal(p1, P([1, 2, 3, 4]))
+ assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1]))
+ # Check with lbnd
+ p0 = Poly.cast(P([1*2, 2*3, 3*4]))
+ p1 = P.cast(p0.integ(lbnd=1))
+ p2 = P.cast(p0.integ(2, lbnd=1))
+ assert_poly_almost_equal(p1, P([-9, 2, 3, 4]))
+ assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1]))
+ # Check scaling
+ d = 2*Poly.domain
+ p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d)
+ p1 = P.cast(p0.integ())
+ p2 = P.cast(p0.integ(2))
+ assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
+ assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
+
+
+def test_deriv(Poly):
+ # Check that the derivative is the inverse of integration. It is
+ # assumes that the integration has been checked elsewhere.
+ d = Poly.domain + random((2,))*.25
+ w = Poly.window + random((2,))*.25
+ p1 = Poly([1, 2, 3], domain=d, window=w)
+ p2 = p1.integ(2, k=[1, 2])
+ p3 = p1.integ(1, k=[1])
+ assert_almost_equal(p2.deriv(1).coef, p3.coef)
+ assert_almost_equal(p2.deriv(2).coef, p1.coef)
+ # default domain and window
+ p1 = Poly([1, 2, 3])
+ p2 = p1.integ(2, k=[1, 2])
+ p3 = p1.integ(1, k=[1])
+ assert_almost_equal(p2.deriv(1).coef, p3.coef)
+ assert_almost_equal(p2.deriv(2).coef, p1.coef)
+
+
+def test_linspace(Poly):
+ d = Poly.domain + random((2,))*.25
+ w = Poly.window + random((2,))*.25
+ p = Poly([1, 2, 3], domain=d, window=w)
+ # check default domain
+ xtgt = np.linspace(d[0], d[1], 20)
+ ytgt = p(xtgt)
+ xres, yres = p.linspace(20)
+ assert_almost_equal(xres, xtgt)
+ assert_almost_equal(yres, ytgt)
+ # check specified domain
+ xtgt = np.linspace(0, 2, 20)
+ ytgt = p(xtgt)
+ xres, yres = p.linspace(20, domain=[0, 2])
+ assert_almost_equal(xres, xtgt)
+ assert_almost_equal(yres, ytgt)
+
+
+def test_pow(Poly):
+ d = Poly.domain + random((2,))*.25
+ w = Poly.window + random((2,))*.25
+ tgt = Poly([1], domain=d, window=w)
+ tst = Poly([1, 2, 3], domain=d, window=w)
+ for i in range(5):
+ assert_poly_almost_equal(tst**i, tgt)
+ tgt = tgt * tst
+ # default domain and window
+ tgt = Poly([1])
+ tst = Poly([1, 2, 3])
+ for i in range(5):
+ assert_poly_almost_equal(tst**i, tgt)
+ tgt = tgt * tst
+ # check error for invalid powers
+ assert_raises(ValueError, op.pow, tgt, 1.5)
+ assert_raises(ValueError, op.pow, tgt, -1)
+
+
+def test_call(Poly):
+ P = Polynomial
+ d = Poly.domain
+ x = np.linspace(d[0], d[1], 11)
+
+ # Check defaults
+ p = Poly.cast(P([1, 2, 3]))
+ tgt = 1 + x*(2 + 3*x)
+ res = p(x)
+ assert_almost_equal(res, tgt)
+
+
+def test_cutdeg(Poly):
+ p = Poly([1, 2, 3])
+ assert_raises(ValueError, p.cutdeg, .5)
+ assert_raises(ValueError, p.cutdeg, -1)
+ assert_equal(len(p.cutdeg(3)), 3)
+ assert_equal(len(p.cutdeg(2)), 3)
+ assert_equal(len(p.cutdeg(1)), 2)
+ assert_equal(len(p.cutdeg(0)), 1)
+
+
+def test_truncate(Poly):
+ p = Poly([1, 2, 3])
+ assert_raises(ValueError, p.truncate, .5)
+ assert_raises(ValueError, p.truncate, 0)
+ assert_equal(len(p.truncate(4)), 3)
+ assert_equal(len(p.truncate(3)), 3)
+ assert_equal(len(p.truncate(2)), 2)
+ assert_equal(len(p.truncate(1)), 1)
+
+
+def test_trim(Poly):
+ c = [1, 1e-6, 1e-12, 0]
+ p = Poly(c)
+ assert_equal(p.trim().coef, c[:3])
+ assert_equal(p.trim(1e-10).coef, c[:2])
+ assert_equal(p.trim(1e-5).coef, c[:1])
+
+
+def test_mapparms(Poly):
+ # check with defaults. Should be identity.
+ d = Poly.domain
+ w = Poly.window
+ p = Poly([1], domain=d, window=w)
+ assert_almost_equal([0, 1], p.mapparms())
+ #
+ w = 2*d + 1
+ p = Poly([1], domain=d, window=w)
+ assert_almost_equal([1, 2], p.mapparms())
+
+
+def test_ufunc_override(Poly):
+ p = Poly([1, 2, 3])
+ x = np.ones(3)
+ assert_raises(TypeError, np.add, p, x)
+ assert_raises(TypeError, np.add, x, p)
+
+
+#
+# Test class method that only exists for some classes
+#
+
+
+class TestInterpolate:
+
+ def f(self, x):
+ return x * (x - 1) * (x - 2)
+
+ def test_raises(self):
+ assert_raises(ValueError, Chebyshev.interpolate, self.f, -1)
+ assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.)
+
+ def test_dimensions(self):
+ for deg in range(1, 5):
+ assert_(Chebyshev.interpolate(self.f, deg).degree() == deg)
+
+ def test_approximation(self):
+
+ def powx(x, p):
+ return x**p
+
+ x = np.linspace(0, 2, 10)
+ for deg in range(0, 10):
+ for t in range(0, deg + 1):
+ p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,))
+ assert_almost_equal(p(x), powx(x, t), decimal=11)
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite.py b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite.py
new file mode 100644
index 00000000..53ee0844
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite.py
@@ -0,0 +1,555 @@
+"""Tests for hermite module.
+
+"""
+from functools import reduce
+
+import numpy as np
+import numpy.polynomial.hermite as herm
+from numpy.polynomial.polynomial import polyval
+from numpy.testing import (
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ )
+
+H0 = np.array([1])
+H1 = np.array([0, 2])
+H2 = np.array([-2, 0, 4])
+H3 = np.array([0, -12, 0, 8])
+H4 = np.array([12, 0, -48, 0, 16])
+H5 = np.array([0, 120, 0, -160, 0, 32])
+H6 = np.array([-120, 0, 720, 0, -480, 0, 64])
+H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128])
+H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256])
+H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512])
+
+Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9]
+
+
+def trim(x):
+ return herm.hermtrim(x, tol=1e-6)
+
+
+class TestConstants:
+
+ def test_hermdomain(self):
+ assert_equal(herm.hermdomain, [-1, 1])
+
+ def test_hermzero(self):
+ assert_equal(herm.hermzero, [0])
+
+ def test_hermone(self):
+ assert_equal(herm.hermone, [1])
+
+ def test_hermx(self):
+ assert_equal(herm.hermx, [0, .5])
+
+
+class TestArithmetic:
+ x = np.linspace(-3, 3, 100)
+
+ def test_hermadd(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(max(i, j) + 1)
+ tgt[i] += 1
+ tgt[j] += 1
+ res = herm.hermadd([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_hermsub(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(max(i, j) + 1)
+ tgt[i] += 1
+ tgt[j] -= 1
+ res = herm.hermsub([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_hermmulx(self):
+ assert_equal(herm.hermmulx([0]), [0])
+ assert_equal(herm.hermmulx([1]), [0, .5])
+ for i in range(1, 5):
+ ser = [0]*i + [1]
+ tgt = [0]*(i - 1) + [i, 0, .5]
+ assert_equal(herm.hermmulx(ser), tgt)
+
+ def test_hermmul(self):
+ # check values of result
+ for i in range(5):
+ pol1 = [0]*i + [1]
+ val1 = herm.hermval(self.x, pol1)
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ pol2 = [0]*j + [1]
+ val2 = herm.hermval(self.x, pol2)
+ pol3 = herm.hermmul(pol1, pol2)
+ val3 = herm.hermval(self.x, pol3)
+ assert_(len(pol3) == i + j + 1, msg)
+ assert_almost_equal(val3, val1*val2, err_msg=msg)
+
+ def test_hermdiv(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ ci = [0]*i + [1]
+ cj = [0]*j + [1]
+ tgt = herm.hermadd(ci, cj)
+ quo, rem = herm.hermdiv(tgt, ci)
+ res = herm.hermadd(herm.hermmul(quo, ci), rem)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_hermpow(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ c = np.arange(i + 1)
+ tgt = reduce(herm.hermmul, [c]*j, np.array([1]))
+ res = herm.hermpow(c, j)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+
+class TestEvaluation:
+ # coefficients of 1 + 2*x + 3*x**2
+ c1d = np.array([2.5, 1., .75])
+ c2d = np.einsum('i,j->ij', c1d, c1d)
+ c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
+
+ # some random values in [-1, 1)
+ x = np.random.random((3, 5))*2 - 1
+ y = polyval(x, [1., 2., 3.])
+
+ def test_hermval(self):
+ #check empty input
+ assert_equal(herm.hermval([], [1]).size, 0)
+
+ #check normal input)
+ x = np.linspace(-1, 1)
+ y = [polyval(x, c) for c in Hlist]
+ for i in range(10):
+ msg = f"At i={i}"
+ tgt = y[i]
+ res = herm.hermval(x, [0]*i + [1])
+ assert_almost_equal(res, tgt, err_msg=msg)
+
+ #check that shape is preserved
+ for i in range(3):
+ dims = [2]*i
+ x = np.zeros(dims)
+ assert_equal(herm.hermval(x, [1]).shape, dims)
+ assert_equal(herm.hermval(x, [1, 0]).shape, dims)
+ assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims)
+
+ def test_hermval2d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test exceptions
+ assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d)
+
+ #test values
+ tgt = y1*y2
+ res = herm.hermval2d(x1, x2, self.c2d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = herm.hermval2d(z, z, self.c2d)
+ assert_(res.shape == (2, 3))
+
+ def test_hermval3d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test exceptions
+ assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d)
+
+ #test values
+ tgt = y1*y2*y3
+ res = herm.hermval3d(x1, x2, x3, self.c3d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = herm.hermval3d(z, z, z, self.c3d)
+ assert_(res.shape == (2, 3))
+
+ def test_hermgrid2d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test values
+ tgt = np.einsum('i,j->ij', y1, y2)
+ res = herm.hermgrid2d(x1, x2, self.c2d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = herm.hermgrid2d(z, z, self.c2d)
+ assert_(res.shape == (2, 3)*2)
+
+ def test_hermgrid3d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test values
+ tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
+ res = herm.hermgrid3d(x1, x2, x3, self.c3d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = herm.hermgrid3d(z, z, z, self.c3d)
+ assert_(res.shape == (2, 3)*3)
+
+
+class TestIntegral:
+
+ def test_hermint(self):
+ # check exceptions
+ assert_raises(TypeError, herm.hermint, [0], .5)
+ assert_raises(ValueError, herm.hermint, [0], -1)
+ assert_raises(ValueError, herm.hermint, [0], 1, [0, 0])
+ assert_raises(ValueError, herm.hermint, [0], lbnd=[0])
+ assert_raises(ValueError, herm.hermint, [0], scl=[0])
+ assert_raises(TypeError, herm.hermint, [0], axis=.5)
+
+ # test integration of zero polynomial
+ for i in range(2, 5):
+ k = [0]*(i - 2) + [1]
+ res = herm.hermint([0], m=i, k=k)
+ assert_almost_equal(res, [0, .5])
+
+ # check single integration with integration constant
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ tgt = [i] + [0]*i + [1/scl]
+ hermpol = herm.poly2herm(pol)
+ hermint = herm.hermint(hermpol, m=1, k=[i])
+ res = herm.herm2poly(hermint)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check single integration with integration constant and lbnd
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ hermpol = herm.poly2herm(pol)
+ hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1)
+ assert_almost_equal(herm.hermval(-1, hermint), i)
+
+ # check single integration with integration constant and scaling
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ tgt = [i] + [0]*i + [2/scl]
+ hermpol = herm.poly2herm(pol)
+ hermint = herm.hermint(hermpol, m=1, k=[i], scl=2)
+ res = herm.herm2poly(hermint)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with default k
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = herm.hermint(tgt, m=1)
+ res = herm.hermint(pol, m=j)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with defined k
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = herm.hermint(tgt, m=1, k=[k])
+ res = herm.hermint(pol, m=j, k=list(range(j)))
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with lbnd
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1)
+ res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with scaling
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = herm.hermint(tgt, m=1, k=[k], scl=2)
+ res = herm.hermint(pol, m=j, k=list(range(j)), scl=2)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_hermint_axis(self):
+ # check that axis keyword works
+ c2d = np.random.random((3, 4))
+
+ tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T
+ res = herm.hermint(c2d, axis=0)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([herm.hermint(c) for c in c2d])
+ res = herm.hermint(c2d, axis=1)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([herm.hermint(c, k=3) for c in c2d])
+ res = herm.hermint(c2d, k=3, axis=1)
+ assert_almost_equal(res, tgt)
+
+
+class TestDerivative:
+
+ def test_hermder(self):
+ # check exceptions
+ assert_raises(TypeError, herm.hermder, [0], .5)
+ assert_raises(ValueError, herm.hermder, [0], -1)
+
+ # check that zeroth derivative does nothing
+ for i in range(5):
+ tgt = [0]*i + [1]
+ res = herm.hermder(tgt, m=0)
+ assert_equal(trim(res), trim(tgt))
+
+ # check that derivation is the inverse of integration
+ for i in range(5):
+ for j in range(2, 5):
+ tgt = [0]*i + [1]
+ res = herm.hermder(herm.hermint(tgt, m=j), m=j)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check derivation with scaling
+ for i in range(5):
+ for j in range(2, 5):
+ tgt = [0]*i + [1]
+ res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_hermder_axis(self):
+ # check that axis keyword works
+ c2d = np.random.random((3, 4))
+
+ tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T
+ res = herm.hermder(c2d, axis=0)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([herm.hermder(c) for c in c2d])
+ res = herm.hermder(c2d, axis=1)
+ assert_almost_equal(res, tgt)
+
+
+class TestVander:
+ # some random values in [-1, 1)
+ x = np.random.random((3, 5))*2 - 1
+
+ def test_hermvander(self):
+ # check for 1d x
+ x = np.arange(3)
+ v = herm.hermvander(x, 3)
+ assert_(v.shape == (3, 4))
+ for i in range(4):
+ coef = [0]*i + [1]
+ assert_almost_equal(v[..., i], herm.hermval(x, coef))
+
+ # check for 2d x
+ x = np.array([[1, 2], [3, 4], [5, 6]])
+ v = herm.hermvander(x, 3)
+ assert_(v.shape == (3, 2, 4))
+ for i in range(4):
+ coef = [0]*i + [1]
+ assert_almost_equal(v[..., i], herm.hermval(x, coef))
+
+ def test_hermvander2d(self):
+ # also tests hermval2d for non-square coefficient array
+ x1, x2, x3 = self.x
+ c = np.random.random((2, 3))
+ van = herm.hermvander2d(x1, x2, [1, 2])
+ tgt = herm.hermval2d(x1, x2, c)
+ res = np.dot(van, c.flat)
+ assert_almost_equal(res, tgt)
+
+ # check shape
+ van = herm.hermvander2d([x1], [x2], [1, 2])
+ assert_(van.shape == (1, 5, 6))
+
+ def test_hermvander3d(self):
+ # also tests hermval3d for non-square coefficient array
+ x1, x2, x3 = self.x
+ c = np.random.random((2, 3, 4))
+ van = herm.hermvander3d(x1, x2, x3, [1, 2, 3])
+ tgt = herm.hermval3d(x1, x2, x3, c)
+ res = np.dot(van, c.flat)
+ assert_almost_equal(res, tgt)
+
+ # check shape
+ van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3])
+ assert_(van.shape == (1, 5, 24))
+
+
+class TestFitting:
+
+ def test_hermfit(self):
+ def f(x):
+ return x*(x - 1)*(x - 2)
+
+ def f2(x):
+ return x**4 + x**2 + 1
+
+ # Test exceptions
+ assert_raises(ValueError, herm.hermfit, [1], [1], -1)
+ assert_raises(TypeError, herm.hermfit, [[1]], [1], 0)
+ assert_raises(TypeError, herm.hermfit, [], [1], 0)
+ assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0)
+ assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0)
+ assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0)
+ assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]])
+ assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1])
+ assert_raises(ValueError, herm.hermfit, [1], [1], [-1,])
+ assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6])
+ assert_raises(TypeError, herm.hermfit, [1], [1], [])
+
+ # Test fit
+ x = np.linspace(0, 2)
+ y = f(x)
+ #
+ coef3 = herm.hermfit(x, y, 3)
+ assert_equal(len(coef3), 4)
+ assert_almost_equal(herm.hermval(x, coef3), y)
+ coef3 = herm.hermfit(x, y, [0, 1, 2, 3])
+ assert_equal(len(coef3), 4)
+ assert_almost_equal(herm.hermval(x, coef3), y)
+ #
+ coef4 = herm.hermfit(x, y, 4)
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(herm.hermval(x, coef4), y)
+ coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4])
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(herm.hermval(x, coef4), y)
+ # check things still work if deg is not in strict increasing
+ coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0])
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(herm.hermval(x, coef4), y)
+ #
+ coef2d = herm.hermfit(x, np.array([y, y]).T, 3)
+ assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
+ coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3])
+ assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
+ # test weighting
+ w = np.zeros_like(x)
+ yw = y.copy()
+ w[1::2] = 1
+ y[0::2] = 0
+ wcoef3 = herm.hermfit(x, yw, 3, w=w)
+ assert_almost_equal(wcoef3, coef3)
+ wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w)
+ assert_almost_equal(wcoef3, coef3)
+ #
+ wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w)
+ assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
+ wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
+ assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
+ # test scaling with complex values x points whose square
+ # is zero when summed.
+ x = [1, 1j, -1, -1j]
+ assert_almost_equal(herm.hermfit(x, x, 1), [0, .5])
+ assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5])
+ # test fitting only even Legendre polynomials
+ x = np.linspace(-1, 1)
+ y = f2(x)
+ coef1 = herm.hermfit(x, y, 4)
+ assert_almost_equal(herm.hermval(x, coef1), y)
+ coef2 = herm.hermfit(x, y, [0, 2, 4])
+ assert_almost_equal(herm.hermval(x, coef2), y)
+ assert_almost_equal(coef1, coef2)
+
+
+class TestCompanion:
+
+ def test_raises(self):
+ assert_raises(ValueError, herm.hermcompanion, [])
+ assert_raises(ValueError, herm.hermcompanion, [1])
+
+ def test_dimensions(self):
+ for i in range(1, 5):
+ coef = [0]*i + [1]
+ assert_(herm.hermcompanion(coef).shape == (i, i))
+
+ def test_linear_root(self):
+ assert_(herm.hermcompanion([1, 2])[0, 0] == -.25)
+
+
+class TestGauss:
+
+ def test_100(self):
+ x, w = herm.hermgauss(100)
+
+ # test orthogonality. Note that the results need to be normalized,
+ # otherwise the huge values that can arise from fast growing
+ # functions like Laguerre can be very confusing.
+ v = herm.hermvander(x, 99)
+ vv = np.dot(v.T * w, v)
+ vd = 1/np.sqrt(vv.diagonal())
+ vv = vd[:, None] * vv * vd
+ assert_almost_equal(vv, np.eye(100))
+
+ # check that the integral of 1 is correct
+ tgt = np.sqrt(np.pi)
+ assert_almost_equal(w.sum(), tgt)
+
+
+class TestMisc:
+
+ def test_hermfromroots(self):
+ res = herm.hermfromroots([])
+ assert_almost_equal(trim(res), [1])
+ for i in range(1, 5):
+ roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
+ pol = herm.hermfromroots(roots)
+ res = herm.hermval(roots, pol)
+ tgt = 0
+ assert_(len(pol) == i + 1)
+ assert_almost_equal(herm.herm2poly(pol)[-1], 1)
+ assert_almost_equal(res, tgt)
+
+ def test_hermroots(self):
+ assert_almost_equal(herm.hermroots([1]), [])
+ assert_almost_equal(herm.hermroots([1, 1]), [-.5])
+ for i in range(2, 5):
+ tgt = np.linspace(-1, 1, i)
+ res = herm.hermroots(herm.hermfromroots(tgt))
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_hermtrim(self):
+ coef = [2, -1, 1, 0]
+
+ # Test exceptions
+ assert_raises(ValueError, herm.hermtrim, coef, -1)
+
+ # Test results
+ assert_equal(herm.hermtrim(coef), coef[:-1])
+ assert_equal(herm.hermtrim(coef, 1), coef[:-3])
+ assert_equal(herm.hermtrim(coef, 2), [0])
+
+ def test_hermline(self):
+ assert_equal(herm.hermline(3, 4), [3, 2])
+
+ def test_herm2poly(self):
+ for i in range(10):
+ assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i])
+
+ def test_poly2herm(self):
+ for i in range(10):
+ assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1])
+
+ def test_weight(self):
+ x = np.linspace(-5, 5, 11)
+ tgt = np.exp(-x**2)
+ res = herm.hermweight(x)
+ assert_almost_equal(res, tgt)
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite_e.py b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite_e.py
new file mode 100644
index 00000000..2d262a33
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_hermite_e.py
@@ -0,0 +1,556 @@
+"""Tests for hermite_e module.
+
+"""
+from functools import reduce
+
+import numpy as np
+import numpy.polynomial.hermite_e as herme
+from numpy.polynomial.polynomial import polyval
+from numpy.testing import (
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ )
+
+He0 = np.array([1])
+He1 = np.array([0, 1])
+He2 = np.array([-1, 0, 1])
+He3 = np.array([0, -3, 0, 1])
+He4 = np.array([3, 0, -6, 0, 1])
+He5 = np.array([0, 15, 0, -10, 0, 1])
+He6 = np.array([-15, 0, 45, 0, -15, 0, 1])
+He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1])
+He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1])
+He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1])
+
+Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9]
+
+
+def trim(x):
+ return herme.hermetrim(x, tol=1e-6)
+
+
+class TestConstants:
+
+ def test_hermedomain(self):
+ assert_equal(herme.hermedomain, [-1, 1])
+
+ def test_hermezero(self):
+ assert_equal(herme.hermezero, [0])
+
+ def test_hermeone(self):
+ assert_equal(herme.hermeone, [1])
+
+ def test_hermex(self):
+ assert_equal(herme.hermex, [0, 1])
+
+
+class TestArithmetic:
+ x = np.linspace(-3, 3, 100)
+
+ def test_hermeadd(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(max(i, j) + 1)
+ tgt[i] += 1
+ tgt[j] += 1
+ res = herme.hermeadd([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_hermesub(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(max(i, j) + 1)
+ tgt[i] += 1
+ tgt[j] -= 1
+ res = herme.hermesub([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_hermemulx(self):
+ assert_equal(herme.hermemulx([0]), [0])
+ assert_equal(herme.hermemulx([1]), [0, 1])
+ for i in range(1, 5):
+ ser = [0]*i + [1]
+ tgt = [0]*(i - 1) + [i, 0, 1]
+ assert_equal(herme.hermemulx(ser), tgt)
+
+ def test_hermemul(self):
+ # check values of result
+ for i in range(5):
+ pol1 = [0]*i + [1]
+ val1 = herme.hermeval(self.x, pol1)
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ pol2 = [0]*j + [1]
+ val2 = herme.hermeval(self.x, pol2)
+ pol3 = herme.hermemul(pol1, pol2)
+ val3 = herme.hermeval(self.x, pol3)
+ assert_(len(pol3) == i + j + 1, msg)
+ assert_almost_equal(val3, val1*val2, err_msg=msg)
+
+ def test_hermediv(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ ci = [0]*i + [1]
+ cj = [0]*j + [1]
+ tgt = herme.hermeadd(ci, cj)
+ quo, rem = herme.hermediv(tgt, ci)
+ res = herme.hermeadd(herme.hermemul(quo, ci), rem)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_hermepow(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ c = np.arange(i + 1)
+ tgt = reduce(herme.hermemul, [c]*j, np.array([1]))
+ res = herme.hermepow(c, j)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+
+class TestEvaluation:
+ # coefficients of 1 + 2*x + 3*x**2
+ c1d = np.array([4., 2., 3.])
+ c2d = np.einsum('i,j->ij', c1d, c1d)
+ c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
+
+ # some random values in [-1, 1)
+ x = np.random.random((3, 5))*2 - 1
+ y = polyval(x, [1., 2., 3.])
+
+ def test_hermeval(self):
+ #check empty input
+ assert_equal(herme.hermeval([], [1]).size, 0)
+
+ #check normal input)
+ x = np.linspace(-1, 1)
+ y = [polyval(x, c) for c in Helist]
+ for i in range(10):
+ msg = f"At i={i}"
+ tgt = y[i]
+ res = herme.hermeval(x, [0]*i + [1])
+ assert_almost_equal(res, tgt, err_msg=msg)
+
+ #check that shape is preserved
+ for i in range(3):
+ dims = [2]*i
+ x = np.zeros(dims)
+ assert_equal(herme.hermeval(x, [1]).shape, dims)
+ assert_equal(herme.hermeval(x, [1, 0]).shape, dims)
+ assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims)
+
+ def test_hermeval2d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test exceptions
+ assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d)
+
+ #test values
+ tgt = y1*y2
+ res = herme.hermeval2d(x1, x2, self.c2d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = herme.hermeval2d(z, z, self.c2d)
+ assert_(res.shape == (2, 3))
+
+ def test_hermeval3d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test exceptions
+ assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d)
+
+ #test values
+ tgt = y1*y2*y3
+ res = herme.hermeval3d(x1, x2, x3, self.c3d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = herme.hermeval3d(z, z, z, self.c3d)
+ assert_(res.shape == (2, 3))
+
+ def test_hermegrid2d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test values
+ tgt = np.einsum('i,j->ij', y1, y2)
+ res = herme.hermegrid2d(x1, x2, self.c2d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = herme.hermegrid2d(z, z, self.c2d)
+ assert_(res.shape == (2, 3)*2)
+
+ def test_hermegrid3d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test values
+ tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
+ res = herme.hermegrid3d(x1, x2, x3, self.c3d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = herme.hermegrid3d(z, z, z, self.c3d)
+ assert_(res.shape == (2, 3)*3)
+
+
+class TestIntegral:
+
+ def test_hermeint(self):
+ # check exceptions
+ assert_raises(TypeError, herme.hermeint, [0], .5)
+ assert_raises(ValueError, herme.hermeint, [0], -1)
+ assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0])
+ assert_raises(ValueError, herme.hermeint, [0], lbnd=[0])
+ assert_raises(ValueError, herme.hermeint, [0], scl=[0])
+ assert_raises(TypeError, herme.hermeint, [0], axis=.5)
+
+ # test integration of zero polynomial
+ for i in range(2, 5):
+ k = [0]*(i - 2) + [1]
+ res = herme.hermeint([0], m=i, k=k)
+ assert_almost_equal(res, [0, 1])
+
+ # check single integration with integration constant
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ tgt = [i] + [0]*i + [1/scl]
+ hermepol = herme.poly2herme(pol)
+ hermeint = herme.hermeint(hermepol, m=1, k=[i])
+ res = herme.herme2poly(hermeint)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check single integration with integration constant and lbnd
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ hermepol = herme.poly2herme(pol)
+ hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1)
+ assert_almost_equal(herme.hermeval(-1, hermeint), i)
+
+ # check single integration with integration constant and scaling
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ tgt = [i] + [0]*i + [2/scl]
+ hermepol = herme.poly2herme(pol)
+ hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2)
+ res = herme.herme2poly(hermeint)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with default k
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = herme.hermeint(tgt, m=1)
+ res = herme.hermeint(pol, m=j)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with defined k
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = herme.hermeint(tgt, m=1, k=[k])
+ res = herme.hermeint(pol, m=j, k=list(range(j)))
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with lbnd
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1)
+ res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with scaling
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = herme.hermeint(tgt, m=1, k=[k], scl=2)
+ res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_hermeint_axis(self):
+ # check that axis keyword works
+ c2d = np.random.random((3, 4))
+
+ tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T
+ res = herme.hermeint(c2d, axis=0)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([herme.hermeint(c) for c in c2d])
+ res = herme.hermeint(c2d, axis=1)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d])
+ res = herme.hermeint(c2d, k=3, axis=1)
+ assert_almost_equal(res, tgt)
+
+
+class TestDerivative:
+
+ def test_hermeder(self):
+ # check exceptions
+ assert_raises(TypeError, herme.hermeder, [0], .5)
+ assert_raises(ValueError, herme.hermeder, [0], -1)
+
+ # check that zeroth derivative does nothing
+ for i in range(5):
+ tgt = [0]*i + [1]
+ res = herme.hermeder(tgt, m=0)
+ assert_equal(trim(res), trim(tgt))
+
+ # check that derivation is the inverse of integration
+ for i in range(5):
+ for j in range(2, 5):
+ tgt = [0]*i + [1]
+ res = herme.hermeder(herme.hermeint(tgt, m=j), m=j)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check derivation with scaling
+ for i in range(5):
+ for j in range(2, 5):
+ tgt = [0]*i + [1]
+ res = herme.hermeder(
+ herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_hermeder_axis(self):
+ # check that axis keyword works
+ c2d = np.random.random((3, 4))
+
+ tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T
+ res = herme.hermeder(c2d, axis=0)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([herme.hermeder(c) for c in c2d])
+ res = herme.hermeder(c2d, axis=1)
+ assert_almost_equal(res, tgt)
+
+
+class TestVander:
+ # some random values in [-1, 1)
+ x = np.random.random((3, 5))*2 - 1
+
+ def test_hermevander(self):
+ # check for 1d x
+ x = np.arange(3)
+ v = herme.hermevander(x, 3)
+ assert_(v.shape == (3, 4))
+ for i in range(4):
+ coef = [0]*i + [1]
+ assert_almost_equal(v[..., i], herme.hermeval(x, coef))
+
+ # check for 2d x
+ x = np.array([[1, 2], [3, 4], [5, 6]])
+ v = herme.hermevander(x, 3)
+ assert_(v.shape == (3, 2, 4))
+ for i in range(4):
+ coef = [0]*i + [1]
+ assert_almost_equal(v[..., i], herme.hermeval(x, coef))
+
+ def test_hermevander2d(self):
+ # also tests hermeval2d for non-square coefficient array
+ x1, x2, x3 = self.x
+ c = np.random.random((2, 3))
+ van = herme.hermevander2d(x1, x2, [1, 2])
+ tgt = herme.hermeval2d(x1, x2, c)
+ res = np.dot(van, c.flat)
+ assert_almost_equal(res, tgt)
+
+ # check shape
+ van = herme.hermevander2d([x1], [x2], [1, 2])
+ assert_(van.shape == (1, 5, 6))
+
+ def test_hermevander3d(self):
+ # also tests hermeval3d for non-square coefficient array
+ x1, x2, x3 = self.x
+ c = np.random.random((2, 3, 4))
+ van = herme.hermevander3d(x1, x2, x3, [1, 2, 3])
+ tgt = herme.hermeval3d(x1, x2, x3, c)
+ res = np.dot(van, c.flat)
+ assert_almost_equal(res, tgt)
+
+ # check shape
+ van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3])
+ assert_(van.shape == (1, 5, 24))
+
+
+class TestFitting:
+
+ def test_hermefit(self):
+ def f(x):
+ return x*(x - 1)*(x - 2)
+
+ def f2(x):
+ return x**4 + x**2 + 1
+
+ # Test exceptions
+ assert_raises(ValueError, herme.hermefit, [1], [1], -1)
+ assert_raises(TypeError, herme.hermefit, [[1]], [1], 0)
+ assert_raises(TypeError, herme.hermefit, [], [1], 0)
+ assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0)
+ assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0)
+ assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0)
+ assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]])
+ assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1])
+ assert_raises(ValueError, herme.hermefit, [1], [1], [-1,])
+ assert_raises(ValueError, herme.hermefit, [1], [1], [2, -1, 6])
+ assert_raises(TypeError, herme.hermefit, [1], [1], [])
+
+ # Test fit
+ x = np.linspace(0, 2)
+ y = f(x)
+ #
+ coef3 = herme.hermefit(x, y, 3)
+ assert_equal(len(coef3), 4)
+ assert_almost_equal(herme.hermeval(x, coef3), y)
+ coef3 = herme.hermefit(x, y, [0, 1, 2, 3])
+ assert_equal(len(coef3), 4)
+ assert_almost_equal(herme.hermeval(x, coef3), y)
+ #
+ coef4 = herme.hermefit(x, y, 4)
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(herme.hermeval(x, coef4), y)
+ coef4 = herme.hermefit(x, y, [0, 1, 2, 3, 4])
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(herme.hermeval(x, coef4), y)
+ # check things still work if deg is not in strict increasing
+ coef4 = herme.hermefit(x, y, [2, 3, 4, 1, 0])
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(herme.hermeval(x, coef4), y)
+ #
+ coef2d = herme.hermefit(x, np.array([y, y]).T, 3)
+ assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
+ coef2d = herme.hermefit(x, np.array([y, y]).T, [0, 1, 2, 3])
+ assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
+ # test weighting
+ w = np.zeros_like(x)
+ yw = y.copy()
+ w[1::2] = 1
+ y[0::2] = 0
+ wcoef3 = herme.hermefit(x, yw, 3, w=w)
+ assert_almost_equal(wcoef3, coef3)
+ wcoef3 = herme.hermefit(x, yw, [0, 1, 2, 3], w=w)
+ assert_almost_equal(wcoef3, coef3)
+ #
+ wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w)
+ assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
+ wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
+ assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
+ # test scaling with complex values x points whose square
+ # is zero when summed.
+ x = [1, 1j, -1, -1j]
+ assert_almost_equal(herme.hermefit(x, x, 1), [0, 1])
+ assert_almost_equal(herme.hermefit(x, x, [0, 1]), [0, 1])
+ # test fitting only even Legendre polynomials
+ x = np.linspace(-1, 1)
+ y = f2(x)
+ coef1 = herme.hermefit(x, y, 4)
+ assert_almost_equal(herme.hermeval(x, coef1), y)
+ coef2 = herme.hermefit(x, y, [0, 2, 4])
+ assert_almost_equal(herme.hermeval(x, coef2), y)
+ assert_almost_equal(coef1, coef2)
+
+
+class TestCompanion:
+
+ def test_raises(self):
+ assert_raises(ValueError, herme.hermecompanion, [])
+ assert_raises(ValueError, herme.hermecompanion, [1])
+
+ def test_dimensions(self):
+ for i in range(1, 5):
+ coef = [0]*i + [1]
+ assert_(herme.hermecompanion(coef).shape == (i, i))
+
+ def test_linear_root(self):
+ assert_(herme.hermecompanion([1, 2])[0, 0] == -.5)
+
+
+class TestGauss:
+
+ def test_100(self):
+ x, w = herme.hermegauss(100)
+
+ # test orthogonality. Note that the results need to be normalized,
+ # otherwise the huge values that can arise from fast growing
+ # functions like Laguerre can be very confusing.
+ v = herme.hermevander(x, 99)
+ vv = np.dot(v.T * w, v)
+ vd = 1/np.sqrt(vv.diagonal())
+ vv = vd[:, None] * vv * vd
+ assert_almost_equal(vv, np.eye(100))
+
+ # check that the integral of 1 is correct
+ tgt = np.sqrt(2*np.pi)
+ assert_almost_equal(w.sum(), tgt)
+
+
+class TestMisc:
+
+ def test_hermefromroots(self):
+ res = herme.hermefromroots([])
+ assert_almost_equal(trim(res), [1])
+ for i in range(1, 5):
+ roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
+ pol = herme.hermefromroots(roots)
+ res = herme.hermeval(roots, pol)
+ tgt = 0
+ assert_(len(pol) == i + 1)
+ assert_almost_equal(herme.herme2poly(pol)[-1], 1)
+ assert_almost_equal(res, tgt)
+
+ def test_hermeroots(self):
+ assert_almost_equal(herme.hermeroots([1]), [])
+ assert_almost_equal(herme.hermeroots([1, 1]), [-1])
+ for i in range(2, 5):
+ tgt = np.linspace(-1, 1, i)
+ res = herme.hermeroots(herme.hermefromroots(tgt))
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_hermetrim(self):
+ coef = [2, -1, 1, 0]
+
+ # Test exceptions
+ assert_raises(ValueError, herme.hermetrim, coef, -1)
+
+ # Test results
+ assert_equal(herme.hermetrim(coef), coef[:-1])
+ assert_equal(herme.hermetrim(coef, 1), coef[:-3])
+ assert_equal(herme.hermetrim(coef, 2), [0])
+
+ def test_hermeline(self):
+ assert_equal(herme.hermeline(3, 4), [3, 4])
+
+ def test_herme2poly(self):
+ for i in range(10):
+ assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i])
+
+ def test_poly2herme(self):
+ for i in range(10):
+ assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1])
+
+ def test_weight(self):
+ x = np.linspace(-5, 5, 11)
+ tgt = np.exp(-.5*x**2)
+ res = herme.hermeweight(x)
+ assert_almost_equal(res, tgt)
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_laguerre.py b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_laguerre.py
new file mode 100644
index 00000000..227ef3c5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_laguerre.py
@@ -0,0 +1,537 @@
+"""Tests for laguerre module.
+
+"""
+from functools import reduce
+
+import numpy as np
+import numpy.polynomial.laguerre as lag
+from numpy.polynomial.polynomial import polyval
+from numpy.testing import (
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ )
+
+L0 = np.array([1])/1
+L1 = np.array([1, -1])/1
+L2 = np.array([2, -4, 1])/2
+L3 = np.array([6, -18, 9, -1])/6
+L4 = np.array([24, -96, 72, -16, 1])/24
+L5 = np.array([120, -600, 600, -200, 25, -1])/120
+L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1])/720
+
+Llist = [L0, L1, L2, L3, L4, L5, L6]
+
+
+def trim(x):
+ return lag.lagtrim(x, tol=1e-6)
+
+
+class TestConstants:
+
+ def test_lagdomain(self):
+ assert_equal(lag.lagdomain, [0, 1])
+
+ def test_lagzero(self):
+ assert_equal(lag.lagzero, [0])
+
+ def test_lagone(self):
+ assert_equal(lag.lagone, [1])
+
+ def test_lagx(self):
+ assert_equal(lag.lagx, [1, -1])
+
+
+class TestArithmetic:
+ x = np.linspace(-3, 3, 100)
+
+ def test_lagadd(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(max(i, j) + 1)
+ tgt[i] += 1
+ tgt[j] += 1
+ res = lag.lagadd([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_lagsub(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(max(i, j) + 1)
+ tgt[i] += 1
+ tgt[j] -= 1
+ res = lag.lagsub([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_lagmulx(self):
+ assert_equal(lag.lagmulx([0]), [0])
+ assert_equal(lag.lagmulx([1]), [1, -1])
+ for i in range(1, 5):
+ ser = [0]*i + [1]
+ tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)]
+ assert_almost_equal(lag.lagmulx(ser), tgt)
+
+ def test_lagmul(self):
+ # check values of result
+ for i in range(5):
+ pol1 = [0]*i + [1]
+ val1 = lag.lagval(self.x, pol1)
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ pol2 = [0]*j + [1]
+ val2 = lag.lagval(self.x, pol2)
+ pol3 = lag.lagmul(pol1, pol2)
+ val3 = lag.lagval(self.x, pol3)
+ assert_(len(pol3) == i + j + 1, msg)
+ assert_almost_equal(val3, val1*val2, err_msg=msg)
+
+ def test_lagdiv(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ ci = [0]*i + [1]
+ cj = [0]*j + [1]
+ tgt = lag.lagadd(ci, cj)
+ quo, rem = lag.lagdiv(tgt, ci)
+ res = lag.lagadd(lag.lagmul(quo, ci), rem)
+ assert_almost_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_lagpow(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ c = np.arange(i + 1)
+ tgt = reduce(lag.lagmul, [c]*j, np.array([1]))
+ res = lag.lagpow(c, j)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+
+class TestEvaluation:
+ # coefficients of 1 + 2*x + 3*x**2
+ c1d = np.array([9., -14., 6.])
+ c2d = np.einsum('i,j->ij', c1d, c1d)
+ c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
+
+ # some random values in [-1, 1)
+ x = np.random.random((3, 5))*2 - 1
+ y = polyval(x, [1., 2., 3.])
+
+ def test_lagval(self):
+ #check empty input
+ assert_equal(lag.lagval([], [1]).size, 0)
+
+ #check normal input)
+ x = np.linspace(-1, 1)
+ y = [polyval(x, c) for c in Llist]
+ for i in range(7):
+ msg = f"At i={i}"
+ tgt = y[i]
+ res = lag.lagval(x, [0]*i + [1])
+ assert_almost_equal(res, tgt, err_msg=msg)
+
+ #check that shape is preserved
+ for i in range(3):
+ dims = [2]*i
+ x = np.zeros(dims)
+ assert_equal(lag.lagval(x, [1]).shape, dims)
+ assert_equal(lag.lagval(x, [1, 0]).shape, dims)
+ assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims)
+
+ def test_lagval2d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test exceptions
+ assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d)
+
+ #test values
+ tgt = y1*y2
+ res = lag.lagval2d(x1, x2, self.c2d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = lag.lagval2d(z, z, self.c2d)
+ assert_(res.shape == (2, 3))
+
+ def test_lagval3d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test exceptions
+ assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d)
+
+ #test values
+ tgt = y1*y2*y3
+ res = lag.lagval3d(x1, x2, x3, self.c3d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = lag.lagval3d(z, z, z, self.c3d)
+ assert_(res.shape == (2, 3))
+
+ def test_laggrid2d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test values
+ tgt = np.einsum('i,j->ij', y1, y2)
+ res = lag.laggrid2d(x1, x2, self.c2d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = lag.laggrid2d(z, z, self.c2d)
+ assert_(res.shape == (2, 3)*2)
+
+ def test_laggrid3d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test values
+ tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
+ res = lag.laggrid3d(x1, x2, x3, self.c3d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = lag.laggrid3d(z, z, z, self.c3d)
+ assert_(res.shape == (2, 3)*3)
+
+
+class TestIntegral:
+
+ def test_lagint(self):
+ # check exceptions
+ assert_raises(TypeError, lag.lagint, [0], .5)
+ assert_raises(ValueError, lag.lagint, [0], -1)
+ assert_raises(ValueError, lag.lagint, [0], 1, [0, 0])
+ assert_raises(ValueError, lag.lagint, [0], lbnd=[0])
+ assert_raises(ValueError, lag.lagint, [0], scl=[0])
+ assert_raises(TypeError, lag.lagint, [0], axis=.5)
+
+ # test integration of zero polynomial
+ for i in range(2, 5):
+ k = [0]*(i - 2) + [1]
+ res = lag.lagint([0], m=i, k=k)
+ assert_almost_equal(res, [1, -1])
+
+ # check single integration with integration constant
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ tgt = [i] + [0]*i + [1/scl]
+ lagpol = lag.poly2lag(pol)
+ lagint = lag.lagint(lagpol, m=1, k=[i])
+ res = lag.lag2poly(lagint)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check single integration with integration constant and lbnd
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ lagpol = lag.poly2lag(pol)
+ lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1)
+ assert_almost_equal(lag.lagval(-1, lagint), i)
+
+ # check single integration with integration constant and scaling
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ tgt = [i] + [0]*i + [2/scl]
+ lagpol = lag.poly2lag(pol)
+ lagint = lag.lagint(lagpol, m=1, k=[i], scl=2)
+ res = lag.lag2poly(lagint)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with default k
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = lag.lagint(tgt, m=1)
+ res = lag.lagint(pol, m=j)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with defined k
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = lag.lagint(tgt, m=1, k=[k])
+ res = lag.lagint(pol, m=j, k=list(range(j)))
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with lbnd
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1)
+ res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with scaling
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = lag.lagint(tgt, m=1, k=[k], scl=2)
+ res = lag.lagint(pol, m=j, k=list(range(j)), scl=2)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_lagint_axis(self):
+ # check that axis keyword works
+ c2d = np.random.random((3, 4))
+
+ tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T
+ res = lag.lagint(c2d, axis=0)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([lag.lagint(c) for c in c2d])
+ res = lag.lagint(c2d, axis=1)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([lag.lagint(c, k=3) for c in c2d])
+ res = lag.lagint(c2d, k=3, axis=1)
+ assert_almost_equal(res, tgt)
+
+
+class TestDerivative:
+
+ def test_lagder(self):
+ # check exceptions
+ assert_raises(TypeError, lag.lagder, [0], .5)
+ assert_raises(ValueError, lag.lagder, [0], -1)
+
+ # check that zeroth derivative does nothing
+ for i in range(5):
+ tgt = [0]*i + [1]
+ res = lag.lagder(tgt, m=0)
+ assert_equal(trim(res), trim(tgt))
+
+ # check that derivation is the inverse of integration
+ for i in range(5):
+ for j in range(2, 5):
+ tgt = [0]*i + [1]
+ res = lag.lagder(lag.lagint(tgt, m=j), m=j)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check derivation with scaling
+ for i in range(5):
+ for j in range(2, 5):
+ tgt = [0]*i + [1]
+ res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_lagder_axis(self):
+ # check that axis keyword works
+ c2d = np.random.random((3, 4))
+
+ tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T
+ res = lag.lagder(c2d, axis=0)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([lag.lagder(c) for c in c2d])
+ res = lag.lagder(c2d, axis=1)
+ assert_almost_equal(res, tgt)
+
+
+class TestVander:
+ # some random values in [-1, 1)
+ x = np.random.random((3, 5))*2 - 1
+
+ def test_lagvander(self):
+ # check for 1d x
+ x = np.arange(3)
+ v = lag.lagvander(x, 3)
+ assert_(v.shape == (3, 4))
+ for i in range(4):
+ coef = [0]*i + [1]
+ assert_almost_equal(v[..., i], lag.lagval(x, coef))
+
+ # check for 2d x
+ x = np.array([[1, 2], [3, 4], [5, 6]])
+ v = lag.lagvander(x, 3)
+ assert_(v.shape == (3, 2, 4))
+ for i in range(4):
+ coef = [0]*i + [1]
+ assert_almost_equal(v[..., i], lag.lagval(x, coef))
+
+ def test_lagvander2d(self):
+ # also tests lagval2d for non-square coefficient array
+ x1, x2, x3 = self.x
+ c = np.random.random((2, 3))
+ van = lag.lagvander2d(x1, x2, [1, 2])
+ tgt = lag.lagval2d(x1, x2, c)
+ res = np.dot(van, c.flat)
+ assert_almost_equal(res, tgt)
+
+ # check shape
+ van = lag.lagvander2d([x1], [x2], [1, 2])
+ assert_(van.shape == (1, 5, 6))
+
+ def test_lagvander3d(self):
+ # also tests lagval3d for non-square coefficient array
+ x1, x2, x3 = self.x
+ c = np.random.random((2, 3, 4))
+ van = lag.lagvander3d(x1, x2, x3, [1, 2, 3])
+ tgt = lag.lagval3d(x1, x2, x3, c)
+ res = np.dot(van, c.flat)
+ assert_almost_equal(res, tgt)
+
+ # check shape
+ van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3])
+ assert_(van.shape == (1, 5, 24))
+
+
+class TestFitting:
+
+ def test_lagfit(self):
+ def f(x):
+ return x*(x - 1)*(x - 2)
+
+ # Test exceptions
+ assert_raises(ValueError, lag.lagfit, [1], [1], -1)
+ assert_raises(TypeError, lag.lagfit, [[1]], [1], 0)
+ assert_raises(TypeError, lag.lagfit, [], [1], 0)
+ assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0)
+ assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0)
+ assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0)
+ assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]])
+ assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1])
+ assert_raises(ValueError, lag.lagfit, [1], [1], [-1,])
+ assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6])
+ assert_raises(TypeError, lag.lagfit, [1], [1], [])
+
+ # Test fit
+ x = np.linspace(0, 2)
+ y = f(x)
+ #
+ coef3 = lag.lagfit(x, y, 3)
+ assert_equal(len(coef3), 4)
+ assert_almost_equal(lag.lagval(x, coef3), y)
+ coef3 = lag.lagfit(x, y, [0, 1, 2, 3])
+ assert_equal(len(coef3), 4)
+ assert_almost_equal(lag.lagval(x, coef3), y)
+ #
+ coef4 = lag.lagfit(x, y, 4)
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(lag.lagval(x, coef4), y)
+ coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4])
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(lag.lagval(x, coef4), y)
+ #
+ coef2d = lag.lagfit(x, np.array([y, y]).T, 3)
+ assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
+ coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3])
+ assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
+ # test weighting
+ w = np.zeros_like(x)
+ yw = y.copy()
+ w[1::2] = 1
+ y[0::2] = 0
+ wcoef3 = lag.lagfit(x, yw, 3, w=w)
+ assert_almost_equal(wcoef3, coef3)
+ wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w)
+ assert_almost_equal(wcoef3, coef3)
+ #
+ wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w)
+ assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
+ wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
+ assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
+ # test scaling with complex values x points whose square
+ # is zero when summed.
+ x = [1, 1j, -1, -1j]
+ assert_almost_equal(lag.lagfit(x, x, 1), [1, -1])
+ assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1])
+
+
+class TestCompanion:
+
+ def test_raises(self):
+ assert_raises(ValueError, lag.lagcompanion, [])
+ assert_raises(ValueError, lag.lagcompanion, [1])
+
+ def test_dimensions(self):
+ for i in range(1, 5):
+ coef = [0]*i + [1]
+ assert_(lag.lagcompanion(coef).shape == (i, i))
+
+ def test_linear_root(self):
+ assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5)
+
+
+class TestGauss:
+
+ def test_100(self):
+ x, w = lag.laggauss(100)
+
+ # test orthogonality. Note that the results need to be normalized,
+ # otherwise the huge values that can arise from fast growing
+ # functions like Laguerre can be very confusing.
+ v = lag.lagvander(x, 99)
+ vv = np.dot(v.T * w, v)
+ vd = 1/np.sqrt(vv.diagonal())
+ vv = vd[:, None] * vv * vd
+ assert_almost_equal(vv, np.eye(100))
+
+ # check that the integral of 1 is correct
+ tgt = 1.0
+ assert_almost_equal(w.sum(), tgt)
+
+
+class TestMisc:
+
+ def test_lagfromroots(self):
+ res = lag.lagfromroots([])
+ assert_almost_equal(trim(res), [1])
+ for i in range(1, 5):
+ roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
+ pol = lag.lagfromroots(roots)
+ res = lag.lagval(roots, pol)
+ tgt = 0
+ assert_(len(pol) == i + 1)
+ assert_almost_equal(lag.lag2poly(pol)[-1], 1)
+ assert_almost_equal(res, tgt)
+
+ def test_lagroots(self):
+ assert_almost_equal(lag.lagroots([1]), [])
+ assert_almost_equal(lag.lagroots([0, 1]), [1])
+ for i in range(2, 5):
+ tgt = np.linspace(0, 3, i)
+ res = lag.lagroots(lag.lagfromroots(tgt))
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_lagtrim(self):
+ coef = [2, -1, 1, 0]
+
+ # Test exceptions
+ assert_raises(ValueError, lag.lagtrim, coef, -1)
+
+ # Test results
+ assert_equal(lag.lagtrim(coef), coef[:-1])
+ assert_equal(lag.lagtrim(coef, 1), coef[:-3])
+ assert_equal(lag.lagtrim(coef, 2), [0])
+
+ def test_lagline(self):
+ assert_equal(lag.lagline(3, 4), [7, -4])
+
+ def test_lag2poly(self):
+ for i in range(7):
+ assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i])
+
+ def test_poly2lag(self):
+ for i in range(7):
+ assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1])
+
+ def test_weight(self):
+ x = np.linspace(0, 10, 11)
+ tgt = np.exp(-x)
+ res = lag.lagweight(x)
+ assert_almost_equal(res, tgt)
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_legendre.py b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_legendre.py
new file mode 100644
index 00000000..92399c16
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_legendre.py
@@ -0,0 +1,568 @@
+"""Tests for legendre module.
+
+"""
+from functools import reduce
+
+import numpy as np
+import numpy.polynomial.legendre as leg
+from numpy.polynomial.polynomial import polyval
+from numpy.testing import (
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ )
+
+L0 = np.array([1])
+L1 = np.array([0, 1])
+L2 = np.array([-1, 0, 3])/2
+L3 = np.array([0, -3, 0, 5])/2
+L4 = np.array([3, 0, -30, 0, 35])/8
+L5 = np.array([0, 15, 0, -70, 0, 63])/8
+L6 = np.array([-5, 0, 105, 0, -315, 0, 231])/16
+L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429])/16
+L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435])/128
+L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155])/128
+
+Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9]
+
+
+def trim(x):
+ return leg.legtrim(x, tol=1e-6)
+
+
+class TestConstants:
+
+ def test_legdomain(self):
+ assert_equal(leg.legdomain, [-1, 1])
+
+ def test_legzero(self):
+ assert_equal(leg.legzero, [0])
+
+ def test_legone(self):
+ assert_equal(leg.legone, [1])
+
+ def test_legx(self):
+ assert_equal(leg.legx, [0, 1])
+
+
+class TestArithmetic:
+ x = np.linspace(-1, 1, 100)
+
+ def test_legadd(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(max(i, j) + 1)
+ tgt[i] += 1
+ tgt[j] += 1
+ res = leg.legadd([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_legsub(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(max(i, j) + 1)
+ tgt[i] += 1
+ tgt[j] -= 1
+ res = leg.legsub([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_legmulx(self):
+ assert_equal(leg.legmulx([0]), [0])
+ assert_equal(leg.legmulx([1]), [0, 1])
+ for i in range(1, 5):
+ tmp = 2*i + 1
+ ser = [0]*i + [1]
+ tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp]
+ assert_equal(leg.legmulx(ser), tgt)
+
+ def test_legmul(self):
+ # check values of result
+ for i in range(5):
+ pol1 = [0]*i + [1]
+ val1 = leg.legval(self.x, pol1)
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ pol2 = [0]*j + [1]
+ val2 = leg.legval(self.x, pol2)
+ pol3 = leg.legmul(pol1, pol2)
+ val3 = leg.legval(self.x, pol3)
+ assert_(len(pol3) == i + j + 1, msg)
+ assert_almost_equal(val3, val1*val2, err_msg=msg)
+
+ def test_legdiv(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ ci = [0]*i + [1]
+ cj = [0]*j + [1]
+ tgt = leg.legadd(ci, cj)
+ quo, rem = leg.legdiv(tgt, ci)
+ res = leg.legadd(leg.legmul(quo, ci), rem)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_legpow(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ c = np.arange(i + 1)
+ tgt = reduce(leg.legmul, [c]*j, np.array([1]))
+ res = leg.legpow(c, j)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+
+class TestEvaluation:
+ # coefficients of 1 + 2*x + 3*x**2
+ c1d = np.array([2., 2., 2.])
+ c2d = np.einsum('i,j->ij', c1d, c1d)
+ c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
+
+ # some random values in [-1, 1)
+ x = np.random.random((3, 5))*2 - 1
+ y = polyval(x, [1., 2., 3.])
+
+ def test_legval(self):
+ #check empty input
+ assert_equal(leg.legval([], [1]).size, 0)
+
+ #check normal input)
+ x = np.linspace(-1, 1)
+ y = [polyval(x, c) for c in Llist]
+ for i in range(10):
+ msg = f"At i={i}"
+ tgt = y[i]
+ res = leg.legval(x, [0]*i + [1])
+ assert_almost_equal(res, tgt, err_msg=msg)
+
+ #check that shape is preserved
+ for i in range(3):
+ dims = [2]*i
+ x = np.zeros(dims)
+ assert_equal(leg.legval(x, [1]).shape, dims)
+ assert_equal(leg.legval(x, [1, 0]).shape, dims)
+ assert_equal(leg.legval(x, [1, 0, 0]).shape, dims)
+
+ def test_legval2d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test exceptions
+ assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d)
+
+ #test values
+ tgt = y1*y2
+ res = leg.legval2d(x1, x2, self.c2d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = leg.legval2d(z, z, self.c2d)
+ assert_(res.shape == (2, 3))
+
+ def test_legval3d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test exceptions
+ assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d)
+
+ #test values
+ tgt = y1*y2*y3
+ res = leg.legval3d(x1, x2, x3, self.c3d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = leg.legval3d(z, z, z, self.c3d)
+ assert_(res.shape == (2, 3))
+
+ def test_leggrid2d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test values
+ tgt = np.einsum('i,j->ij', y1, y2)
+ res = leg.leggrid2d(x1, x2, self.c2d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = leg.leggrid2d(z, z, self.c2d)
+ assert_(res.shape == (2, 3)*2)
+
+ def test_leggrid3d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test values
+ tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
+ res = leg.leggrid3d(x1, x2, x3, self.c3d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = leg.leggrid3d(z, z, z, self.c3d)
+ assert_(res.shape == (2, 3)*3)
+
+
+class TestIntegral:
+
+ def test_legint(self):
+ # check exceptions
+ assert_raises(TypeError, leg.legint, [0], .5)
+ assert_raises(ValueError, leg.legint, [0], -1)
+ assert_raises(ValueError, leg.legint, [0], 1, [0, 0])
+ assert_raises(ValueError, leg.legint, [0], lbnd=[0])
+ assert_raises(ValueError, leg.legint, [0], scl=[0])
+ assert_raises(TypeError, leg.legint, [0], axis=.5)
+
+ # test integration of zero polynomial
+ for i in range(2, 5):
+ k = [0]*(i - 2) + [1]
+ res = leg.legint([0], m=i, k=k)
+ assert_almost_equal(res, [0, 1])
+
+ # check single integration with integration constant
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ tgt = [i] + [0]*i + [1/scl]
+ legpol = leg.poly2leg(pol)
+ legint = leg.legint(legpol, m=1, k=[i])
+ res = leg.leg2poly(legint)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check single integration with integration constant and lbnd
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ legpol = leg.poly2leg(pol)
+ legint = leg.legint(legpol, m=1, k=[i], lbnd=-1)
+ assert_almost_equal(leg.legval(-1, legint), i)
+
+ # check single integration with integration constant and scaling
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ tgt = [i] + [0]*i + [2/scl]
+ legpol = leg.poly2leg(pol)
+ legint = leg.legint(legpol, m=1, k=[i], scl=2)
+ res = leg.leg2poly(legint)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with default k
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = leg.legint(tgt, m=1)
+ res = leg.legint(pol, m=j)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with defined k
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = leg.legint(tgt, m=1, k=[k])
+ res = leg.legint(pol, m=j, k=list(range(j)))
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with lbnd
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1)
+ res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with scaling
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = leg.legint(tgt, m=1, k=[k], scl=2)
+ res = leg.legint(pol, m=j, k=list(range(j)), scl=2)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_legint_axis(self):
+ # check that axis keyword works
+ c2d = np.random.random((3, 4))
+
+ tgt = np.vstack([leg.legint(c) for c in c2d.T]).T
+ res = leg.legint(c2d, axis=0)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([leg.legint(c) for c in c2d])
+ res = leg.legint(c2d, axis=1)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([leg.legint(c, k=3) for c in c2d])
+ res = leg.legint(c2d, k=3, axis=1)
+ assert_almost_equal(res, tgt)
+
+ def test_legint_zerointord(self):
+ assert_equal(leg.legint((1, 2, 3), 0), (1, 2, 3))
+
+
+class TestDerivative:
+
+ def test_legder(self):
+ # check exceptions
+ assert_raises(TypeError, leg.legder, [0], .5)
+ assert_raises(ValueError, leg.legder, [0], -1)
+
+ # check that zeroth derivative does nothing
+ for i in range(5):
+ tgt = [0]*i + [1]
+ res = leg.legder(tgt, m=0)
+ assert_equal(trim(res), trim(tgt))
+
+ # check that derivation is the inverse of integration
+ for i in range(5):
+ for j in range(2, 5):
+ tgt = [0]*i + [1]
+ res = leg.legder(leg.legint(tgt, m=j), m=j)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check derivation with scaling
+ for i in range(5):
+ for j in range(2, 5):
+ tgt = [0]*i + [1]
+ res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_legder_axis(self):
+ # check that axis keyword works
+ c2d = np.random.random((3, 4))
+
+ tgt = np.vstack([leg.legder(c) for c in c2d.T]).T
+ res = leg.legder(c2d, axis=0)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([leg.legder(c) for c in c2d])
+ res = leg.legder(c2d, axis=1)
+ assert_almost_equal(res, tgt)
+
+ def test_legder_orderhigherthancoeff(self):
+ c = (1, 2, 3, 4)
+ assert_equal(leg.legder(c, 4), [0])
+
+class TestVander:
+ # some random values in [-1, 1)
+ x = np.random.random((3, 5))*2 - 1
+
+ def test_legvander(self):
+ # check for 1d x
+ x = np.arange(3)
+ v = leg.legvander(x, 3)
+ assert_(v.shape == (3, 4))
+ for i in range(4):
+ coef = [0]*i + [1]
+ assert_almost_equal(v[..., i], leg.legval(x, coef))
+
+ # check for 2d x
+ x = np.array([[1, 2], [3, 4], [5, 6]])
+ v = leg.legvander(x, 3)
+ assert_(v.shape == (3, 2, 4))
+ for i in range(4):
+ coef = [0]*i + [1]
+ assert_almost_equal(v[..., i], leg.legval(x, coef))
+
+ def test_legvander2d(self):
+ # also tests polyval2d for non-square coefficient array
+ x1, x2, x3 = self.x
+ c = np.random.random((2, 3))
+ van = leg.legvander2d(x1, x2, [1, 2])
+ tgt = leg.legval2d(x1, x2, c)
+ res = np.dot(van, c.flat)
+ assert_almost_equal(res, tgt)
+
+ # check shape
+ van = leg.legvander2d([x1], [x2], [1, 2])
+ assert_(van.shape == (1, 5, 6))
+
+ def test_legvander3d(self):
+ # also tests polyval3d for non-square coefficient array
+ x1, x2, x3 = self.x
+ c = np.random.random((2, 3, 4))
+ van = leg.legvander3d(x1, x2, x3, [1, 2, 3])
+ tgt = leg.legval3d(x1, x2, x3, c)
+ res = np.dot(van, c.flat)
+ assert_almost_equal(res, tgt)
+
+ # check shape
+ van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3])
+ assert_(van.shape == (1, 5, 24))
+
+ def test_legvander_negdeg(self):
+ assert_raises(ValueError, leg.legvander, (1, 2, 3), -1)
+
+
+class TestFitting:
+
+ def test_legfit(self):
+ def f(x):
+ return x*(x - 1)*(x - 2)
+
+ def f2(x):
+ return x**4 + x**2 + 1
+
+ # Test exceptions
+ assert_raises(ValueError, leg.legfit, [1], [1], -1)
+ assert_raises(TypeError, leg.legfit, [[1]], [1], 0)
+ assert_raises(TypeError, leg.legfit, [], [1], 0)
+ assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0)
+ assert_raises(TypeError, leg.legfit, [1, 2], [1], 0)
+ assert_raises(TypeError, leg.legfit, [1], [1, 2], 0)
+ assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]])
+ assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1])
+ assert_raises(ValueError, leg.legfit, [1], [1], [-1,])
+ assert_raises(ValueError, leg.legfit, [1], [1], [2, -1, 6])
+ assert_raises(TypeError, leg.legfit, [1], [1], [])
+
+ # Test fit
+ x = np.linspace(0, 2)
+ y = f(x)
+ #
+ coef3 = leg.legfit(x, y, 3)
+ assert_equal(len(coef3), 4)
+ assert_almost_equal(leg.legval(x, coef3), y)
+ coef3 = leg.legfit(x, y, [0, 1, 2, 3])
+ assert_equal(len(coef3), 4)
+ assert_almost_equal(leg.legval(x, coef3), y)
+ #
+ coef4 = leg.legfit(x, y, 4)
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(leg.legval(x, coef4), y)
+ coef4 = leg.legfit(x, y, [0, 1, 2, 3, 4])
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(leg.legval(x, coef4), y)
+ # check things still work if deg is not in strict increasing
+ coef4 = leg.legfit(x, y, [2, 3, 4, 1, 0])
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(leg.legval(x, coef4), y)
+ #
+ coef2d = leg.legfit(x, np.array([y, y]).T, 3)
+ assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
+ coef2d = leg.legfit(x, np.array([y, y]).T, [0, 1, 2, 3])
+ assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
+ # test weighting
+ w = np.zeros_like(x)
+ yw = y.copy()
+ w[1::2] = 1
+ y[0::2] = 0
+ wcoef3 = leg.legfit(x, yw, 3, w=w)
+ assert_almost_equal(wcoef3, coef3)
+ wcoef3 = leg.legfit(x, yw, [0, 1, 2, 3], w=w)
+ assert_almost_equal(wcoef3, coef3)
+ #
+ wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w)
+ assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
+ wcoef2d = leg.legfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
+ assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
+ # test scaling with complex values x points whose square
+ # is zero when summed.
+ x = [1, 1j, -1, -1j]
+ assert_almost_equal(leg.legfit(x, x, 1), [0, 1])
+ assert_almost_equal(leg.legfit(x, x, [0, 1]), [0, 1])
+ # test fitting only even Legendre polynomials
+ x = np.linspace(-1, 1)
+ y = f2(x)
+ coef1 = leg.legfit(x, y, 4)
+ assert_almost_equal(leg.legval(x, coef1), y)
+ coef2 = leg.legfit(x, y, [0, 2, 4])
+ assert_almost_equal(leg.legval(x, coef2), y)
+ assert_almost_equal(coef1, coef2)
+
+
+class TestCompanion:
+
+ def test_raises(self):
+ assert_raises(ValueError, leg.legcompanion, [])
+ assert_raises(ValueError, leg.legcompanion, [1])
+
+ def test_dimensions(self):
+ for i in range(1, 5):
+ coef = [0]*i + [1]
+ assert_(leg.legcompanion(coef).shape == (i, i))
+
+ def test_linear_root(self):
+ assert_(leg.legcompanion([1, 2])[0, 0] == -.5)
+
+
+class TestGauss:
+
+ def test_100(self):
+ x, w = leg.leggauss(100)
+
+ # test orthogonality. Note that the results need to be normalized,
+ # otherwise the huge values that can arise from fast growing
+ # functions like Laguerre can be very confusing.
+ v = leg.legvander(x, 99)
+ vv = np.dot(v.T * w, v)
+ vd = 1/np.sqrt(vv.diagonal())
+ vv = vd[:, None] * vv * vd
+ assert_almost_equal(vv, np.eye(100))
+
+ # check that the integral of 1 is correct
+ tgt = 2.0
+ assert_almost_equal(w.sum(), tgt)
+
+
+class TestMisc:
+
+ def test_legfromroots(self):
+ res = leg.legfromroots([])
+ assert_almost_equal(trim(res), [1])
+ for i in range(1, 5):
+ roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
+ pol = leg.legfromroots(roots)
+ res = leg.legval(roots, pol)
+ tgt = 0
+ assert_(len(pol) == i + 1)
+ assert_almost_equal(leg.leg2poly(pol)[-1], 1)
+ assert_almost_equal(res, tgt)
+
+ def test_legroots(self):
+ assert_almost_equal(leg.legroots([1]), [])
+ assert_almost_equal(leg.legroots([1, 2]), [-.5])
+ for i in range(2, 5):
+ tgt = np.linspace(-1, 1, i)
+ res = leg.legroots(leg.legfromroots(tgt))
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_legtrim(self):
+ coef = [2, -1, 1, 0]
+
+ # Test exceptions
+ assert_raises(ValueError, leg.legtrim, coef, -1)
+
+ # Test results
+ assert_equal(leg.legtrim(coef), coef[:-1])
+ assert_equal(leg.legtrim(coef, 1), coef[:-3])
+ assert_equal(leg.legtrim(coef, 2), [0])
+
+ def test_legline(self):
+ assert_equal(leg.legline(3, 4), [3, 4])
+
+ def test_legline_zeroscl(self):
+ assert_equal(leg.legline(3, 0), [3])
+
+ def test_leg2poly(self):
+ for i in range(10):
+ assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i])
+
+ def test_poly2leg(self):
+ for i in range(10):
+ assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1])
+
+ def test_weight(self):
+ x = np.linspace(-1, 1, 11)
+ tgt = 1.
+ res = leg.legweight(x)
+ assert_almost_equal(res, tgt)
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_polynomial.py b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_polynomial.py
new file mode 100644
index 00000000..6b3ef238
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_polynomial.py
@@ -0,0 +1,611 @@
+"""Tests for polynomial module.
+
+"""
+from functools import reduce
+
+import numpy as np
+import numpy.polynomial.polynomial as poly
+import pickle
+from copy import deepcopy
+from numpy.testing import (
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ assert_warns, assert_array_equal, assert_raises_regex)
+
+
+def trim(x):
+ return poly.polytrim(x, tol=1e-6)
+
+T0 = [1]
+T1 = [0, 1]
+T2 = [-1, 0, 2]
+T3 = [0, -3, 0, 4]
+T4 = [1, 0, -8, 0, 8]
+T5 = [0, 5, 0, -20, 0, 16]
+T6 = [-1, 0, 18, 0, -48, 0, 32]
+T7 = [0, -7, 0, 56, 0, -112, 0, 64]
+T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128]
+T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
+
+Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
+
+
+class TestConstants:
+
+ def test_polydomain(self):
+ assert_equal(poly.polydomain, [-1, 1])
+
+ def test_polyzero(self):
+ assert_equal(poly.polyzero, [0])
+
+ def test_polyone(self):
+ assert_equal(poly.polyone, [1])
+
+ def test_polyx(self):
+ assert_equal(poly.polyx, [0, 1])
+
+ def test_copy(self):
+ x = poly.Polynomial([1, 2, 3])
+ y = deepcopy(x)
+ assert_equal(x, y)
+
+ def test_pickle(self):
+ x = poly.Polynomial([1, 2, 3])
+ y = pickle.loads(pickle.dumps(x))
+ assert_equal(x, y)
+
+class TestArithmetic:
+
+ def test_polyadd(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(max(i, j) + 1)
+ tgt[i] += 1
+ tgt[j] += 1
+ res = poly.polyadd([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_polysub(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(max(i, j) + 1)
+ tgt[i] += 1
+ tgt[j] -= 1
+ res = poly.polysub([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_polymulx(self):
+ assert_equal(poly.polymulx([0]), [0])
+ assert_equal(poly.polymulx([1]), [0, 1])
+ for i in range(1, 5):
+ ser = [0]*i + [1]
+ tgt = [0]*(i + 1) + [1]
+ assert_equal(poly.polymulx(ser), tgt)
+
+ def test_polymul(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ tgt = np.zeros(i + j + 1)
+ tgt[i + j] += 1
+ res = poly.polymul([0]*i + [1], [0]*j + [1])
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+ def test_polydiv(self):
+ # check zero division
+ assert_raises(ZeroDivisionError, poly.polydiv, [1], [0])
+
+ # check scalar division
+ quo, rem = poly.polydiv([2], [2])
+ assert_equal((quo, rem), (1, 0))
+ quo, rem = poly.polydiv([2, 2], [2])
+ assert_equal((quo, rem), ((1, 1), 0))
+
+ # check rest.
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ ci = [0]*i + [1, 2]
+ cj = [0]*j + [1, 2]
+ tgt = poly.polyadd(ci, cj)
+ quo, rem = poly.polydiv(tgt, ci)
+ res = poly.polyadd(poly.polymul(quo, ci), rem)
+ assert_equal(res, tgt, err_msg=msg)
+
+ def test_polypow(self):
+ for i in range(5):
+ for j in range(5):
+ msg = f"At i={i}, j={j}"
+ c = np.arange(i + 1)
+ tgt = reduce(poly.polymul, [c]*j, np.array([1]))
+ res = poly.polypow(c, j)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
+
+class TestEvaluation:
+ # coefficients of 1 + 2*x + 3*x**2
+ c1d = np.array([1., 2., 3.])
+ c2d = np.einsum('i,j->ij', c1d, c1d)
+ c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
+
+ # some random values in [-1, 1)
+ x = np.random.random((3, 5))*2 - 1
+ y = poly.polyval(x, [1., 2., 3.])
+
+ def test_polyval(self):
+ #check empty input
+ assert_equal(poly.polyval([], [1]).size, 0)
+
+ #check normal input)
+ x = np.linspace(-1, 1)
+ y = [x**i for i in range(5)]
+ for i in range(5):
+ tgt = y[i]
+ res = poly.polyval(x, [0]*i + [1])
+ assert_almost_equal(res, tgt)
+ tgt = x*(x**2 - 1)
+ res = poly.polyval(x, [0, -1, 0, 1])
+ assert_almost_equal(res, tgt)
+
+ #check that shape is preserved
+ for i in range(3):
+ dims = [2]*i
+ x = np.zeros(dims)
+ assert_equal(poly.polyval(x, [1]).shape, dims)
+ assert_equal(poly.polyval(x, [1, 0]).shape, dims)
+ assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims)
+
+ #check masked arrays are processed correctly
+ mask = [False, True, False]
+ mx = np.ma.array([1, 2, 3], mask=mask)
+ res = np.polyval([7, 5, 3], mx)
+ assert_array_equal(res.mask, mask)
+
+ #check subtypes of ndarray are preserved
+ class C(np.ndarray):
+ pass
+
+ cx = np.array([1, 2, 3]).view(C)
+ assert_equal(type(np.polyval([2, 3, 4], cx)), C)
+
+ def test_polyvalfromroots(self):
+ # check exception for broadcasting x values over root array with
+ # too few dimensions
+ assert_raises(ValueError, poly.polyvalfromroots,
+ [1], [1], tensor=False)
+
+ # check empty input
+ assert_equal(poly.polyvalfromroots([], [1]).size, 0)
+ assert_(poly.polyvalfromroots([], [1]).shape == (0,))
+
+ # check empty input + multidimensional roots
+ assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0)
+ assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0))
+
+ # check scalar input
+ assert_equal(poly.polyvalfromroots(1, 1), 0)
+ assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,))
+
+ # check normal input)
+ x = np.linspace(-1, 1)
+ y = [x**i for i in range(5)]
+ for i in range(1, 5):
+ tgt = y[i]
+ res = poly.polyvalfromroots(x, [0]*i)
+ assert_almost_equal(res, tgt)
+ tgt = x*(x - 1)*(x + 1)
+ res = poly.polyvalfromroots(x, [-1, 0, 1])
+ assert_almost_equal(res, tgt)
+
+ # check that shape is preserved
+ for i in range(3):
+ dims = [2]*i
+ x = np.zeros(dims)
+ assert_equal(poly.polyvalfromroots(x, [1]).shape, dims)
+ assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims)
+ assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims)
+
+ # check compatibility with factorization
+ ptest = [15, 2, -16, -2, 1]
+ r = poly.polyroots(ptest)
+ x = np.linspace(-1, 1)
+ assert_almost_equal(poly.polyval(x, ptest),
+ poly.polyvalfromroots(x, r))
+
+ # check multidimensional arrays of roots and values
+ # check tensor=False
+ rshape = (3, 5)
+ x = np.arange(-3, 2)
+ r = np.random.randint(-5, 5, size=rshape)
+ res = poly.polyvalfromroots(x, r, tensor=False)
+ tgt = np.empty(r.shape[1:])
+ for ii in range(tgt.size):
+ tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii])
+ assert_equal(res, tgt)
+
+ # check tensor=True
+ x = np.vstack([x, 2*x])
+ res = poly.polyvalfromroots(x, r, tensor=True)
+ tgt = np.empty(r.shape[1:] + x.shape)
+ for ii in range(r.shape[1]):
+ for jj in range(x.shape[0]):
+ tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii])
+ assert_equal(res, tgt)
+
+ def test_polyval2d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test exceptions
+ assert_raises_regex(ValueError, 'incompatible',
+ poly.polyval2d, x1, x2[:2], self.c2d)
+
+ #test values
+ tgt = y1*y2
+ res = poly.polyval2d(x1, x2, self.c2d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = poly.polyval2d(z, z, self.c2d)
+ assert_(res.shape == (2, 3))
+
+ def test_polyval3d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test exceptions
+ assert_raises_regex(ValueError, 'incompatible',
+ poly.polyval3d, x1, x2, x3[:2], self.c3d)
+
+ #test values
+ tgt = y1*y2*y3
+ res = poly.polyval3d(x1, x2, x3, self.c3d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = poly.polyval3d(z, z, z, self.c3d)
+ assert_(res.shape == (2, 3))
+
+ def test_polygrid2d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test values
+ tgt = np.einsum('i,j->ij', y1, y2)
+ res = poly.polygrid2d(x1, x2, self.c2d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = poly.polygrid2d(z, z, self.c2d)
+ assert_(res.shape == (2, 3)*2)
+
+ def test_polygrid3d(self):
+ x1, x2, x3 = self.x
+ y1, y2, y3 = self.y
+
+ #test values
+ tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
+ res = poly.polygrid3d(x1, x2, x3, self.c3d)
+ assert_almost_equal(res, tgt)
+
+ #test shape
+ z = np.ones((2, 3))
+ res = poly.polygrid3d(z, z, z, self.c3d)
+ assert_(res.shape == (2, 3)*3)
+
+
+class TestIntegral:
+
+ def test_polyint(self):
+ # check exceptions
+ assert_raises(TypeError, poly.polyint, [0], .5)
+ assert_raises(ValueError, poly.polyint, [0], -1)
+ assert_raises(ValueError, poly.polyint, [0], 1, [0, 0])
+ assert_raises(ValueError, poly.polyint, [0], lbnd=[0])
+ assert_raises(ValueError, poly.polyint, [0], scl=[0])
+ assert_raises(TypeError, poly.polyint, [0], axis=.5)
+ with assert_warns(DeprecationWarning):
+ poly.polyint([1, 1], 1.)
+
+ # test integration of zero polynomial
+ for i in range(2, 5):
+ k = [0]*(i - 2) + [1]
+ res = poly.polyint([0], m=i, k=k)
+ assert_almost_equal(res, [0, 1])
+
+ # check single integration with integration constant
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ tgt = [i] + [0]*i + [1/scl]
+ res = poly.polyint(pol, m=1, k=[i])
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check single integration with integration constant and lbnd
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ res = poly.polyint(pol, m=1, k=[i], lbnd=-1)
+ assert_almost_equal(poly.polyval(-1, res), i)
+
+ # check single integration with integration constant and scaling
+ for i in range(5):
+ scl = i + 1
+ pol = [0]*i + [1]
+ tgt = [i] + [0]*i + [2/scl]
+ res = poly.polyint(pol, m=1, k=[i], scl=2)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with default k
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = poly.polyint(tgt, m=1)
+ res = poly.polyint(pol, m=j)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with defined k
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = poly.polyint(tgt, m=1, k=[k])
+ res = poly.polyint(pol, m=j, k=list(range(j)))
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with lbnd
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1)
+ res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check multiple integrations with scaling
+ for i in range(5):
+ for j in range(2, 5):
+ pol = [0]*i + [1]
+ tgt = pol[:]
+ for k in range(j):
+ tgt = poly.polyint(tgt, m=1, k=[k], scl=2)
+ res = poly.polyint(pol, m=j, k=list(range(j)), scl=2)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_polyint_axis(self):
+ # check that axis keyword works
+ c2d = np.random.random((3, 4))
+
+ tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T
+ res = poly.polyint(c2d, axis=0)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([poly.polyint(c) for c in c2d])
+ res = poly.polyint(c2d, axis=1)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([poly.polyint(c, k=3) for c in c2d])
+ res = poly.polyint(c2d, k=3, axis=1)
+ assert_almost_equal(res, tgt)
+
+
+class TestDerivative:
+
+ def test_polyder(self):
+ # check exceptions
+ assert_raises(TypeError, poly.polyder, [0], .5)
+ assert_raises(ValueError, poly.polyder, [0], -1)
+
+ # check that zeroth derivative does nothing
+ for i in range(5):
+ tgt = [0]*i + [1]
+ res = poly.polyder(tgt, m=0)
+ assert_equal(trim(res), trim(tgt))
+
+ # check that derivation is the inverse of integration
+ for i in range(5):
+ for j in range(2, 5):
+ tgt = [0]*i + [1]
+ res = poly.polyder(poly.polyint(tgt, m=j), m=j)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ # check derivation with scaling
+ for i in range(5):
+ for j in range(2, 5):
+ tgt = [0]*i + [1]
+ res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_polyder_axis(self):
+ # check that axis keyword works
+ c2d = np.random.random((3, 4))
+
+ tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T
+ res = poly.polyder(c2d, axis=0)
+ assert_almost_equal(res, tgt)
+
+ tgt = np.vstack([poly.polyder(c) for c in c2d])
+ res = poly.polyder(c2d, axis=1)
+ assert_almost_equal(res, tgt)
+
+
+class TestVander:
+ # some random values in [-1, 1)
+ x = np.random.random((3, 5))*2 - 1
+
+ def test_polyvander(self):
+ # check for 1d x
+ x = np.arange(3)
+ v = poly.polyvander(x, 3)
+ assert_(v.shape == (3, 4))
+ for i in range(4):
+ coef = [0]*i + [1]
+ assert_almost_equal(v[..., i], poly.polyval(x, coef))
+
+ # check for 2d x
+ x = np.array([[1, 2], [3, 4], [5, 6]])
+ v = poly.polyvander(x, 3)
+ assert_(v.shape == (3, 2, 4))
+ for i in range(4):
+ coef = [0]*i + [1]
+ assert_almost_equal(v[..., i], poly.polyval(x, coef))
+
+ def test_polyvander2d(self):
+ # also tests polyval2d for non-square coefficient array
+ x1, x2, x3 = self.x
+ c = np.random.random((2, 3))
+ van = poly.polyvander2d(x1, x2, [1, 2])
+ tgt = poly.polyval2d(x1, x2, c)
+ res = np.dot(van, c.flat)
+ assert_almost_equal(res, tgt)
+
+ # check shape
+ van = poly.polyvander2d([x1], [x2], [1, 2])
+ assert_(van.shape == (1, 5, 6))
+
+ def test_polyvander3d(self):
+ # also tests polyval3d for non-square coefficient array
+ x1, x2, x3 = self.x
+ c = np.random.random((2, 3, 4))
+ van = poly.polyvander3d(x1, x2, x3, [1, 2, 3])
+ tgt = poly.polyval3d(x1, x2, x3, c)
+ res = np.dot(van, c.flat)
+ assert_almost_equal(res, tgt)
+
+ # check shape
+ van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3])
+ assert_(van.shape == (1, 5, 24))
+
+ def test_polyvandernegdeg(self):
+ x = np.arange(3)
+ assert_raises(ValueError, poly.polyvander, x, -1)
+
+
+class TestCompanion:
+
+ def test_raises(self):
+ assert_raises(ValueError, poly.polycompanion, [])
+ assert_raises(ValueError, poly.polycompanion, [1])
+
+ def test_dimensions(self):
+ for i in range(1, 5):
+ coef = [0]*i + [1]
+ assert_(poly.polycompanion(coef).shape == (i, i))
+
+ def test_linear_root(self):
+ assert_(poly.polycompanion([1, 2])[0, 0] == -.5)
+
+
+class TestMisc:
+
+ def test_polyfromroots(self):
+ res = poly.polyfromroots([])
+ assert_almost_equal(trim(res), [1])
+ for i in range(1, 5):
+ roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
+ tgt = Tlist[i]
+ res = poly.polyfromroots(roots)*2**(i-1)
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_polyroots(self):
+ assert_almost_equal(poly.polyroots([1]), [])
+ assert_almost_equal(poly.polyroots([1, 2]), [-.5])
+ for i in range(2, 5):
+ tgt = np.linspace(-1, 1, i)
+ res = poly.polyroots(poly.polyfromroots(tgt))
+ assert_almost_equal(trim(res), trim(tgt))
+
+ def test_polyfit(self):
+ def f(x):
+ return x*(x - 1)*(x - 2)
+
+ def f2(x):
+ return x**4 + x**2 + 1
+
+ # Test exceptions
+ assert_raises(ValueError, poly.polyfit, [1], [1], -1)
+ assert_raises(TypeError, poly.polyfit, [[1]], [1], 0)
+ assert_raises(TypeError, poly.polyfit, [], [1], 0)
+ assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0)
+ assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0)
+ assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0)
+ assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]])
+ assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1])
+ assert_raises(ValueError, poly.polyfit, [1], [1], [-1,])
+ assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6])
+ assert_raises(TypeError, poly.polyfit, [1], [1], [])
+
+ # Test fit
+ x = np.linspace(0, 2)
+ y = f(x)
+ #
+ coef3 = poly.polyfit(x, y, 3)
+ assert_equal(len(coef3), 4)
+ assert_almost_equal(poly.polyval(x, coef3), y)
+ coef3 = poly.polyfit(x, y, [0, 1, 2, 3])
+ assert_equal(len(coef3), 4)
+ assert_almost_equal(poly.polyval(x, coef3), y)
+ #
+ coef4 = poly.polyfit(x, y, 4)
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(poly.polyval(x, coef4), y)
+ coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4])
+ assert_equal(len(coef4), 5)
+ assert_almost_equal(poly.polyval(x, coef4), y)
+ #
+ coef2d = poly.polyfit(x, np.array([y, y]).T, 3)
+ assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
+ coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3])
+ assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
+ # test weighting
+ w = np.zeros_like(x)
+ yw = y.copy()
+ w[1::2] = 1
+ yw[0::2] = 0
+ wcoef3 = poly.polyfit(x, yw, 3, w=w)
+ assert_almost_equal(wcoef3, coef3)
+ wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w)
+ assert_almost_equal(wcoef3, coef3)
+ #
+ wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w)
+ assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
+ wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
+ assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
+ # test scaling with complex values x points whose square
+ # is zero when summed.
+ x = [1, 1j, -1, -1j]
+ assert_almost_equal(poly.polyfit(x, x, 1), [0, 1])
+ assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1])
+ # test fitting only even Polyendre polynomials
+ x = np.linspace(-1, 1)
+ y = f2(x)
+ coef1 = poly.polyfit(x, y, 4)
+ assert_almost_equal(poly.polyval(x, coef1), y)
+ coef2 = poly.polyfit(x, y, [0, 2, 4])
+ assert_almost_equal(poly.polyval(x, coef2), y)
+ assert_almost_equal(coef1, coef2)
+
+ def test_polytrim(self):
+ coef = [2, -1, 1, 0]
+
+ # Test exceptions
+ assert_raises(ValueError, poly.polytrim, coef, -1)
+
+ # Test results
+ assert_equal(poly.polytrim(coef), coef[:-1])
+ assert_equal(poly.polytrim(coef, 1), coef[:-3])
+ assert_equal(poly.polytrim(coef, 2), [0])
+
+ def test_polyline(self):
+ assert_equal(poly.polyline(3, 4), [3, 4])
+
+ def test_polyline_zero(self):
+ assert_equal(poly.polyline(3, 0), [3])
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_polyutils.py b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_polyutils.py
new file mode 100644
index 00000000..cc630790
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_polyutils.py
@@ -0,0 +1,121 @@
+"""Tests for polyutils module.
+
+"""
+import numpy as np
+import numpy.polynomial.polyutils as pu
+from numpy.testing import (
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ )
+
+
+class TestMisc:
+
+ def test_trimseq(self):
+ for i in range(5):
+ tgt = [1]
+ res = pu.trimseq([1] + [0]*5)
+ assert_equal(res, tgt)
+
+ def test_as_series(self):
+ # check exceptions
+ assert_raises(ValueError, pu.as_series, [[]])
+ assert_raises(ValueError, pu.as_series, [[[1, 2]]])
+ assert_raises(ValueError, pu.as_series, [[1], ['a']])
+ # check common types
+ types = ['i', 'd', 'O']
+ for i in range(len(types)):
+ for j in range(i):
+ ci = np.ones(1, types[i])
+ cj = np.ones(1, types[j])
+ [resi, resj] = pu.as_series([ci, cj])
+ assert_(resi.dtype.char == resj.dtype.char)
+ assert_(resj.dtype.char == types[i])
+
+ def test_trimcoef(self):
+ coef = [2, -1, 1, 0]
+ # Test exceptions
+ assert_raises(ValueError, pu.trimcoef, coef, -1)
+ # Test results
+ assert_equal(pu.trimcoef(coef), coef[:-1])
+ assert_equal(pu.trimcoef(coef, 1), coef[:-3])
+ assert_equal(pu.trimcoef(coef, 2), [0])
+
+ def test_vander_nd_exception(self):
+ # n_dims != len(points)
+ assert_raises(ValueError, pu._vander_nd, (), (1, 2, 3), [90])
+ # n_dims != len(degrees)
+ assert_raises(ValueError, pu._vander_nd, (), (), [90.65])
+ # n_dims == 0
+ assert_raises(ValueError, pu._vander_nd, (), (), [])
+
+ def test_div_zerodiv(self):
+ # c2[-1] == 0
+ assert_raises(ZeroDivisionError, pu._div, pu._div, (1, 2, 3), [0])
+
+ def test_pow_too_large(self):
+ # power > maxpower
+ assert_raises(ValueError, pu._pow, (), [1, 2, 3], 5, 4)
+
+class TestDomain:
+
+ def test_getdomain(self):
+ # test for real values
+ x = [1, 10, 3, -1]
+ tgt = [-1, 10]
+ res = pu.getdomain(x)
+ assert_almost_equal(res, tgt)
+
+ # test for complex values
+ x = [1 + 1j, 1 - 1j, 0, 2]
+ tgt = [-1j, 2 + 1j]
+ res = pu.getdomain(x)
+ assert_almost_equal(res, tgt)
+
+ def test_mapdomain(self):
+ # test for real values
+ dom1 = [0, 4]
+ dom2 = [1, 3]
+ tgt = dom2
+ res = pu.mapdomain(dom1, dom1, dom2)
+ assert_almost_equal(res, tgt)
+
+ # test for complex values
+ dom1 = [0 - 1j, 2 + 1j]
+ dom2 = [-2, 2]
+ tgt = dom2
+ x = dom1
+ res = pu.mapdomain(x, dom1, dom2)
+ assert_almost_equal(res, tgt)
+
+ # test for multidimensional arrays
+ dom1 = [0, 4]
+ dom2 = [1, 3]
+ tgt = np.array([dom2, dom2])
+ x = np.array([dom1, dom1])
+ res = pu.mapdomain(x, dom1, dom2)
+ assert_almost_equal(res, tgt)
+
+ # test that subtypes are preserved.
+ class MyNDArray(np.ndarray):
+ pass
+
+ dom1 = [0, 4]
+ dom2 = [1, 3]
+ x = np.array([dom1, dom1]).view(MyNDArray)
+ res = pu.mapdomain(x, dom1, dom2)
+ assert_(isinstance(res, MyNDArray))
+
+ def test_mapparms(self):
+ # test for real values
+ dom1 = [0, 4]
+ dom2 = [1, 3]
+ tgt = [1, .5]
+ res = pu. mapparms(dom1, dom2)
+ assert_almost_equal(res, tgt)
+
+ # test for complex values
+ dom1 = [0 - 1j, 2 + 1j]
+ dom2 = [-2, 2]
+ tgt = [-1 + 1j, 1 - 1j]
+ res = pu.mapparms(dom1, dom2)
+ assert_almost_equal(res, tgt)
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_printing.py b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_printing.py
new file mode 100644
index 00000000..6f2a5092
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_printing.py
@@ -0,0 +1,530 @@
+from math import nan, inf
+import pytest
+from numpy.core import array, arange, printoptions
+import numpy.polynomial as poly
+from numpy.testing import assert_equal, assert_
+
+# For testing polynomial printing with object arrays
+from fractions import Fraction
+from decimal import Decimal
+
+
+class TestStrUnicodeSuperSubscripts:
+
+ @pytest.fixture(scope='class', autouse=True)
+ def use_unicode(self):
+ poly.set_default_printstyle('unicode')
+
+ @pytest.mark.parametrize(('inp', 'tgt'), (
+ ([1, 2, 3], "1.0 + 2.0·x + 3.0·x²"),
+ ([-1, 0, 3, -1], "-1.0 + 0.0·x + 3.0·x² - 1.0·x³"),
+ (arange(12), ("0.0 + 1.0·x + 2.0·x² + 3.0·x³ + 4.0·x⁴ + 5.0·x⁵ + "
+ "6.0·x⁶ + 7.0·x⁷ +\n8.0·x⁸ + 9.0·x⁹ + 10.0·x¹⁰ + "
+ "11.0·x¹¹")),
+ ))
+ def test_polynomial_str(self, inp, tgt):
+ res = str(poly.Polynomial(inp))
+ assert_equal(res, tgt)
+
+ @pytest.mark.parametrize(('inp', 'tgt'), (
+ ([1, 2, 3], "1.0 + 2.0·T₁(x) + 3.0·T₂(x)"),
+ ([-1, 0, 3, -1], "-1.0 + 0.0·T₁(x) + 3.0·T₂(x) - 1.0·T₃(x)"),
+ (arange(12), ("0.0 + 1.0·T₁(x) + 2.0·T₂(x) + 3.0·T₃(x) + 4.0·T₄(x) + "
+ "5.0·T₅(x) +\n6.0·T₆(x) + 7.0·T₇(x) + 8.0·T₈(x) + "
+ "9.0·T₉(x) + 10.0·T₁₀(x) + 11.0·T₁₁(x)")),
+ ))
+ def test_chebyshev_str(self, inp, tgt):
+ res = str(poly.Chebyshev(inp))
+ assert_equal(res, tgt)
+
+ @pytest.mark.parametrize(('inp', 'tgt'), (
+ ([1, 2, 3], "1.0 + 2.0·P₁(x) + 3.0·P₂(x)"),
+ ([-1, 0, 3, -1], "-1.0 + 0.0·P₁(x) + 3.0·P₂(x) - 1.0·P₃(x)"),
+ (arange(12), ("0.0 + 1.0·P₁(x) + 2.0·P₂(x) + 3.0·P₃(x) + 4.0·P₄(x) + "
+ "5.0·P₅(x) +\n6.0·P₆(x) + 7.0·P₇(x) + 8.0·P₈(x) + "
+ "9.0·P₉(x) + 10.0·P₁₀(x) + 11.0·P₁₁(x)")),
+ ))
+ def test_legendre_str(self, inp, tgt):
+ res = str(poly.Legendre(inp))
+ assert_equal(res, tgt)
+
+ @pytest.mark.parametrize(('inp', 'tgt'), (
+ ([1, 2, 3], "1.0 + 2.0·H₁(x) + 3.0·H₂(x)"),
+ ([-1, 0, 3, -1], "-1.0 + 0.0·H₁(x) + 3.0·H₂(x) - 1.0·H₃(x)"),
+ (arange(12), ("0.0 + 1.0·H₁(x) + 2.0·H₂(x) + 3.0·H₃(x) + 4.0·H₄(x) + "
+ "5.0·H₅(x) +\n6.0·H₆(x) + 7.0·H₇(x) + 8.0·H₈(x) + "
+ "9.0·H₉(x) + 10.0·H₁₀(x) + 11.0·H₁₁(x)")),
+ ))
+ def test_hermite_str(self, inp, tgt):
+ res = str(poly.Hermite(inp))
+ assert_equal(res, tgt)
+
+ @pytest.mark.parametrize(('inp', 'tgt'), (
+ ([1, 2, 3], "1.0 + 2.0·He₁(x) + 3.0·He₂(x)"),
+ ([-1, 0, 3, -1], "-1.0 + 0.0·He₁(x) + 3.0·He₂(x) - 1.0·He₃(x)"),
+ (arange(12), ("0.0 + 1.0·He₁(x) + 2.0·He₂(x) + 3.0·He₃(x) + "
+ "4.0·He₄(x) + 5.0·He₅(x) +\n6.0·He₆(x) + 7.0·He₇(x) + "
+ "8.0·He₈(x) + 9.0·He₉(x) + 10.0·He₁₀(x) +\n"
+ "11.0·He₁₁(x)")),
+ ))
+ def test_hermiteE_str(self, inp, tgt):
+ res = str(poly.HermiteE(inp))
+ assert_equal(res, tgt)
+
+ @pytest.mark.parametrize(('inp', 'tgt'), (
+ ([1, 2, 3], "1.0 + 2.0·L₁(x) + 3.0·L₂(x)"),
+ ([-1, 0, 3, -1], "-1.0 + 0.0·L₁(x) + 3.0·L₂(x) - 1.0·L₃(x)"),
+ (arange(12), ("0.0 + 1.0·L₁(x) + 2.0·L₂(x) + 3.0·L₃(x) + 4.0·L₄(x) + "
+ "5.0·L₅(x) +\n6.0·L₆(x) + 7.0·L₇(x) + 8.0·L₈(x) + "
+ "9.0·L₉(x) + 10.0·L₁₀(x) + 11.0·L₁₁(x)")),
+ ))
+ def test_laguerre_str(self, inp, tgt):
+ res = str(poly.Laguerre(inp))
+ assert_equal(res, tgt)
+
+
+class TestStrAscii:
+
+ @pytest.fixture(scope='class', autouse=True)
+ def use_ascii(self):
+ poly.set_default_printstyle('ascii')
+
+ @pytest.mark.parametrize(('inp', 'tgt'), (
+ ([1, 2, 3], "1.0 + 2.0 x + 3.0 x**2"),
+ ([-1, 0, 3, -1], "-1.0 + 0.0 x + 3.0 x**2 - 1.0 x**3"),
+ (arange(12), ("0.0 + 1.0 x + 2.0 x**2 + 3.0 x**3 + 4.0 x**4 + "
+ "5.0 x**5 + 6.0 x**6 +\n7.0 x**7 + 8.0 x**8 + "
+ "9.0 x**9 + 10.0 x**10 + 11.0 x**11")),
+ ))
+ def test_polynomial_str(self, inp, tgt):
+ res = str(poly.Polynomial(inp))
+ assert_equal(res, tgt)
+
+ @pytest.mark.parametrize(('inp', 'tgt'), (
+ ([1, 2, 3], "1.0 + 2.0 T_1(x) + 3.0 T_2(x)"),
+ ([-1, 0, 3, -1], "-1.0 + 0.0 T_1(x) + 3.0 T_2(x) - 1.0 T_3(x)"),
+ (arange(12), ("0.0 + 1.0 T_1(x) + 2.0 T_2(x) + 3.0 T_3(x) + "
+ "4.0 T_4(x) + 5.0 T_5(x) +\n6.0 T_6(x) + 7.0 T_7(x) + "
+ "8.0 T_8(x) + 9.0 T_9(x) + 10.0 T_10(x) +\n"
+ "11.0 T_11(x)")),
+ ))
+ def test_chebyshev_str(self, inp, tgt):
+ res = str(poly.Chebyshev(inp))
+ assert_equal(res, tgt)
+
+ @pytest.mark.parametrize(('inp', 'tgt'), (
+ ([1, 2, 3], "1.0 + 2.0 P_1(x) + 3.0 P_2(x)"),
+ ([-1, 0, 3, -1], "-1.0 + 0.0 P_1(x) + 3.0 P_2(x) - 1.0 P_3(x)"),
+ (arange(12), ("0.0 + 1.0 P_1(x) + 2.0 P_2(x) + 3.0 P_3(x) + "
+ "4.0 P_4(x) + 5.0 P_5(x) +\n6.0 P_6(x) + 7.0 P_7(x) + "
+ "8.0 P_8(x) + 9.0 P_9(x) + 10.0 P_10(x) +\n"
+ "11.0 P_11(x)")),
+ ))
+ def test_legendre_str(self, inp, tgt):
+ res = str(poly.Legendre(inp))
+ assert_equal(res, tgt)
+
+ @pytest.mark.parametrize(('inp', 'tgt'), (
+ ([1, 2, 3], "1.0 + 2.0 H_1(x) + 3.0 H_2(x)"),
+ ([-1, 0, 3, -1], "-1.0 + 0.0 H_1(x) + 3.0 H_2(x) - 1.0 H_3(x)"),
+ (arange(12), ("0.0 + 1.0 H_1(x) + 2.0 H_2(x) + 3.0 H_3(x) + "
+ "4.0 H_4(x) + 5.0 H_5(x) +\n6.0 H_6(x) + 7.0 H_7(x) + "
+ "8.0 H_8(x) + 9.0 H_9(x) + 10.0 H_10(x) +\n"
+ "11.0 H_11(x)")),
+ ))
+ def test_hermite_str(self, inp, tgt):
+ res = str(poly.Hermite(inp))
+ assert_equal(res, tgt)
+
+ @pytest.mark.parametrize(('inp', 'tgt'), (
+ ([1, 2, 3], "1.0 + 2.0 He_1(x) + 3.0 He_2(x)"),
+ ([-1, 0, 3, -1], "-1.0 + 0.0 He_1(x) + 3.0 He_2(x) - 1.0 He_3(x)"),
+ (arange(12), ("0.0 + 1.0 He_1(x) + 2.0 He_2(x) + 3.0 He_3(x) + "
+ "4.0 He_4(x) +\n5.0 He_5(x) + 6.0 He_6(x) + "
+ "7.0 He_7(x) + 8.0 He_8(x) + 9.0 He_9(x) +\n"
+ "10.0 He_10(x) + 11.0 He_11(x)")),
+ ))
+ def test_hermiteE_str(self, inp, tgt):
+ res = str(poly.HermiteE(inp))
+ assert_equal(res, tgt)
+
+ @pytest.mark.parametrize(('inp', 'tgt'), (
+ ([1, 2, 3], "1.0 + 2.0 L_1(x) + 3.0 L_2(x)"),
+ ([-1, 0, 3, -1], "-1.0 + 0.0 L_1(x) + 3.0 L_2(x) - 1.0 L_3(x)"),
+ (arange(12), ("0.0 + 1.0 L_1(x) + 2.0 L_2(x) + 3.0 L_3(x) + "
+ "4.0 L_4(x) + 5.0 L_5(x) +\n6.0 L_6(x) + 7.0 L_7(x) + "
+ "8.0 L_8(x) + 9.0 L_9(x) + 10.0 L_10(x) +\n"
+ "11.0 L_11(x)")),
+ ))
+ def test_laguerre_str(self, inp, tgt):
+ res = str(poly.Laguerre(inp))
+ assert_equal(res, tgt)
+
+
+class TestLinebreaking:
+
+ @pytest.fixture(scope='class', autouse=True)
+ def use_ascii(self):
+ poly.set_default_printstyle('ascii')
+
+ def test_single_line_one_less(self):
+ # With 'ascii' style, len(str(p)) is default linewidth - 1 (i.e. 74)
+ p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 123])
+ assert_equal(len(str(p)), 74)
+ assert_equal(str(p), (
+ '12345678.0 + 12345678.0 x + 12345678.0 x**2 + '
+ '12345678.0 x**3 + 123.0 x**4'
+ ))
+
+ def test_num_chars_is_linewidth(self):
+ # len(str(p)) == default linewidth == 75
+ p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 1234])
+ assert_equal(len(str(p)), 75)
+ assert_equal(str(p), (
+ '12345678.0 + 12345678.0 x + 12345678.0 x**2 + '
+ '12345678.0 x**3 +\n1234.0 x**4'
+ ))
+
+ def test_first_linebreak_multiline_one_less_than_linewidth(self):
+ # Multiline str where len(first_line) + len(next_term) == lw - 1 == 74
+ p = poly.Polynomial(
+ [12345678, 12345678, 12345678, 12345678, 1, 12345678]
+ )
+ assert_equal(len(str(p).split('\n')[0]), 74)
+ assert_equal(str(p), (
+ '12345678.0 + 12345678.0 x + 12345678.0 x**2 + '
+ '12345678.0 x**3 + 1.0 x**4 +\n12345678.0 x**5'
+ ))
+
+ def test_first_linebreak_multiline_on_linewidth(self):
+ # First line is one character longer than previous test
+ p = poly.Polynomial(
+ [12345678, 12345678, 12345678, 12345678.12, 1, 12345678]
+ )
+ assert_equal(str(p), (
+ '12345678.0 + 12345678.0 x + 12345678.0 x**2 + '
+ '12345678.12 x**3 +\n1.0 x**4 + 12345678.0 x**5'
+ ))
+
+ @pytest.mark.parametrize(('lw', 'tgt'), (
+ (75, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + '
+ '500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + '
+ '900.0 x**9')),
+ (45, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 +\n40000.0 x**4 + '
+ '500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 +\n'
+ '900.0 x**9')),
+ (132, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + '
+ '500000.0 x**5 + 600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + '
+ '900.0 x**9')),
+ ))
+ def test_linewidth_printoption(self, lw, tgt):
+ p = poly.Polynomial(
+ [0, 10, 200, 3000, 40000, 500000, 600000, 70000, 8000, 900]
+ )
+ with printoptions(linewidth=lw):
+ assert_equal(str(p), tgt)
+ for line in str(p).split('\n'):
+ assert_(len(line) < lw)
+
+
+def test_set_default_printoptions():
+ p = poly.Polynomial([1, 2, 3])
+ c = poly.Chebyshev([1, 2, 3])
+ poly.set_default_printstyle('ascii')
+ assert_equal(str(p), "1.0 + 2.0 x + 3.0 x**2")
+ assert_equal(str(c), "1.0 + 2.0 T_1(x) + 3.0 T_2(x)")
+ poly.set_default_printstyle('unicode')
+ assert_equal(str(p), "1.0 + 2.0·x + 3.0·x²")
+ assert_equal(str(c), "1.0 + 2.0·T₁(x) + 3.0·T₂(x)")
+ with pytest.raises(ValueError):
+ poly.set_default_printstyle('invalid_input')
+
+
+def test_complex_coefficients():
+ """Test both numpy and built-in complex."""
+ coefs = [0+1j, 1+1j, -2+2j, 3+0j]
+ # numpy complex
+ p1 = poly.Polynomial(coefs)
+ # Python complex
+ p2 = poly.Polynomial(array(coefs, dtype=object))
+ poly.set_default_printstyle('unicode')
+ assert_equal(str(p1), "1j + (1+1j)·x - (2-2j)·x² + (3+0j)·x³")
+ assert_equal(str(p2), "1j + (1+1j)·x + (-2+2j)·x² + (3+0j)·x³")
+ poly.set_default_printstyle('ascii')
+ assert_equal(str(p1), "1j + (1+1j) x - (2-2j) x**2 + (3+0j) x**3")
+ assert_equal(str(p2), "1j + (1+1j) x + (-2+2j) x**2 + (3+0j) x**3")
+
+
+@pytest.mark.parametrize(('coefs', 'tgt'), (
+ (array([Fraction(1, 2), Fraction(3, 4)], dtype=object), (
+ "1/2 + 3/4·x"
+ )),
+ (array([1, 2, Fraction(5, 7)], dtype=object), (
+ "1 + 2·x + 5/7·x²"
+ )),
+ (array([Decimal('1.00'), Decimal('2.2'), 3], dtype=object), (
+ "1.00 + 2.2·x + 3·x²"
+ )),
+))
+def test_numeric_object_coefficients(coefs, tgt):
+ p = poly.Polynomial(coefs)
+ poly.set_default_printstyle('unicode')
+ assert_equal(str(p), tgt)
+
+
+@pytest.mark.parametrize(('coefs', 'tgt'), (
+ (array([1, 2, 'f'], dtype=object), '1 + 2·x + f·x²'),
+ (array([1, 2, [3, 4]], dtype=object), '1 + 2·x + [3, 4]·x²'),
+))
+def test_nonnumeric_object_coefficients(coefs, tgt):
+ """
+ Test coef fallback for object arrays of non-numeric coefficients.
+ """
+ p = poly.Polynomial(coefs)
+ poly.set_default_printstyle('unicode')
+ assert_equal(str(p), tgt)
+
+
+class TestFormat:
+ def test_format_unicode(self):
+ poly.set_default_printstyle('ascii')
+ p = poly.Polynomial([1, 2, 0, -1])
+ assert_equal(format(p, 'unicode'), "1.0 + 2.0·x + 0.0·x² - 1.0·x³")
+
+ def test_format_ascii(self):
+ poly.set_default_printstyle('unicode')
+ p = poly.Polynomial([1, 2, 0, -1])
+ assert_equal(
+ format(p, 'ascii'), "1.0 + 2.0 x + 0.0 x**2 - 1.0 x**3"
+ )
+
+ def test_empty_formatstr(self):
+ poly.set_default_printstyle('ascii')
+ p = poly.Polynomial([1, 2, 3])
+ assert_equal(format(p), "1.0 + 2.0 x + 3.0 x**2")
+ assert_equal(f"{p}", "1.0 + 2.0 x + 3.0 x**2")
+
+ def test_bad_formatstr(self):
+ p = poly.Polynomial([1, 2, 0, -1])
+ with pytest.raises(ValueError):
+ format(p, '.2f')
+
+
+@pytest.mark.parametrize(('poly', 'tgt'), (
+ (poly.Polynomial, '1.0 + 2.0·z + 3.0·z²'),
+ (poly.Chebyshev, '1.0 + 2.0·T₁(z) + 3.0·T₂(z)'),
+ (poly.Hermite, '1.0 + 2.0·H₁(z) + 3.0·H₂(z)'),
+ (poly.HermiteE, '1.0 + 2.0·He₁(z) + 3.0·He₂(z)'),
+ (poly.Laguerre, '1.0 + 2.0·L₁(z) + 3.0·L₂(z)'),
+ (poly.Legendre, '1.0 + 2.0·P₁(z) + 3.0·P₂(z)'),
+))
+def test_symbol(poly, tgt):
+ p = poly([1, 2, 3], symbol='z')
+ assert_equal(f"{p:unicode}", tgt)
+
+
+class TestRepr:
+ def test_polynomial_str(self):
+ res = repr(poly.Polynomial([0, 1]))
+ tgt = (
+ "Polynomial([0., 1.], domain=[-1, 1], window=[-1, 1], "
+ "symbol='x')"
+ )
+ assert_equal(res, tgt)
+
+ def test_chebyshev_str(self):
+ res = repr(poly.Chebyshev([0, 1]))
+ tgt = (
+ "Chebyshev([0., 1.], domain=[-1, 1], window=[-1, 1], "
+ "symbol='x')"
+ )
+ assert_equal(res, tgt)
+
+ def test_legendre_repr(self):
+ res = repr(poly.Legendre([0, 1]))
+ tgt = (
+ "Legendre([0., 1.], domain=[-1, 1], window=[-1, 1], "
+ "symbol='x')"
+ )
+ assert_equal(res, tgt)
+
+ def test_hermite_repr(self):
+ res = repr(poly.Hermite([0, 1]))
+ tgt = (
+ "Hermite([0., 1.], domain=[-1, 1], window=[-1, 1], "
+ "symbol='x')"
+ )
+ assert_equal(res, tgt)
+
+ def test_hermiteE_repr(self):
+ res = repr(poly.HermiteE([0, 1]))
+ tgt = (
+ "HermiteE([0., 1.], domain=[-1, 1], window=[-1, 1], "
+ "symbol='x')"
+ )
+ assert_equal(res, tgt)
+
+ def test_laguerre_repr(self):
+ res = repr(poly.Laguerre([0, 1]))
+ tgt = (
+ "Laguerre([0., 1.], domain=[0, 1], window=[0, 1], "
+ "symbol='x')"
+ )
+ assert_equal(res, tgt)
+
+
+class TestLatexRepr:
+ """Test the latex repr used by Jupyter"""
+
+ def as_latex(self, obj):
+ # right now we ignore the formatting of scalars in our tests, since
+ # it makes them too verbose. Ideally, the formatting of scalars will
+ # be fixed such that tests below continue to pass
+ obj._repr_latex_scalar = lambda x, parens=False: str(x)
+ try:
+ return obj._repr_latex_()
+ finally:
+ del obj._repr_latex_scalar
+
+ def test_simple_polynomial(self):
+ # default input
+ p = poly.Polynomial([1, 2, 3])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$')
+
+ # translated input
+ p = poly.Polynomial([1, 2, 3], domain=[-2, 0])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$')
+
+ # scaled input
+ p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$')
+
+ # affine input
+ p = poly.Polynomial([1, 2, 3], domain=[-1, 0])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$')
+
+ def test_basis_func(self):
+ p = poly.Chebyshev([1, 2, 3])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$')
+ # affine input - check no surplus parens are added
+ p = poly.Chebyshev([1, 2, 3], domain=[-1, 0])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$')
+
+ def test_multichar_basis_func(self):
+ p = poly.HermiteE([1, 2, 3])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$')
+
+ def test_symbol_basic(self):
+ # default input
+ p = poly.Polynomial([1, 2, 3], symbol='z')
+ assert_equal(self.as_latex(p),
+ r'$z \mapsto 1.0 + 2.0\,z + 3.0\,z^{2}$')
+
+ # translated input
+ p = poly.Polynomial([1, 2, 3], domain=[-2, 0], symbol='z')
+ assert_equal(
+ self.as_latex(p),
+ (
+ r'$z \mapsto 1.0 + 2.0\,\left(1.0 + z\right) + 3.0\,'
+ r'\left(1.0 + z\right)^{2}$'
+ ),
+ )
+
+ # scaled input
+ p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5], symbol='z')
+ assert_equal(
+ self.as_latex(p),
+ (
+ r'$z \mapsto 1.0 + 2.0\,\left(2.0z\right) + 3.0\,'
+ r'\left(2.0z\right)^{2}$'
+ ),
+ )
+
+ # affine input
+ p = poly.Polynomial([1, 2, 3], domain=[-1, 0], symbol='z')
+ assert_equal(
+ self.as_latex(p),
+ (
+ r'$z \mapsto 1.0 + 2.0\,\left(1.0 + 2.0z\right) + 3.0\,'
+ r'\left(1.0 + 2.0z\right)^{2}$'
+ ),
+ )
+
+
+SWITCH_TO_EXP = (
+ '1.0 + (1.0e-01) x + (1.0e-02) x**2',
+ '1.2 + (1.2e-01) x + (1.2e-02) x**2',
+ '1.23 + 0.12 x + (1.23e-02) x**2 + (1.23e-03) x**3',
+ '1.235 + 0.123 x + (1.235e-02) x**2 + (1.235e-03) x**3',
+ '1.2346 + 0.1235 x + 0.0123 x**2 + (1.2346e-03) x**3 + (1.2346e-04) x**4',
+ '1.23457 + 0.12346 x + 0.01235 x**2 + (1.23457e-03) x**3 + '
+ '(1.23457e-04) x**4',
+ '1.234568 + 0.123457 x + 0.012346 x**2 + 0.001235 x**3 + '
+ '(1.234568e-04) x**4 + (1.234568e-05) x**5',
+ '1.2345679 + 0.1234568 x + 0.0123457 x**2 + 0.0012346 x**3 + '
+ '(1.2345679e-04) x**4 + (1.2345679e-05) x**5')
+
+class TestPrintOptions:
+ """
+ Test the output is properly configured via printoptions.
+ The exponential notation is enabled automatically when the values
+ are too small or too large.
+ """
+
+ @pytest.fixture(scope='class', autouse=True)
+ def use_ascii(self):
+ poly.set_default_printstyle('ascii')
+
+ def test_str(self):
+ p = poly.Polynomial([1/2, 1/7, 1/7*10**8, 1/7*10**9])
+ assert_equal(str(p), '0.5 + 0.14285714 x + 14285714.28571429 x**2 '
+ '+ (1.42857143e+08) x**3')
+
+ with printoptions(precision=3):
+ assert_equal(str(p), '0.5 + 0.143 x + 14285714.286 x**2 '
+ '+ (1.429e+08) x**3')
+
+ def test_latex(self):
+ p = poly.Polynomial([1/2, 1/7, 1/7*10**8, 1/7*10**9])
+ assert_equal(p._repr_latex_(),
+ r'$x \mapsto \text{0.5} + \text{0.14285714}\,x + '
+ r'\text{14285714.28571429}\,x^{2} + '
+ r'\text{(1.42857143e+08)}\,x^{3}$')
+
+ with printoptions(precision=3):
+ assert_equal(p._repr_latex_(),
+ r'$x \mapsto \text{0.5} + \text{0.143}\,x + '
+ r'\text{14285714.286}\,x^{2} + \text{(1.429e+08)}\,x^{3}$')
+
+ def test_fixed(self):
+ p = poly.Polynomial([1/2])
+ assert_equal(str(p), '0.5')
+
+ with printoptions(floatmode='fixed'):
+ assert_equal(str(p), '0.50000000')
+
+ with printoptions(floatmode='fixed', precision=4):
+ assert_equal(str(p), '0.5000')
+
+ def test_switch_to_exp(self):
+ for i, s in enumerate(SWITCH_TO_EXP):
+ with printoptions(precision=i):
+ p = poly.Polynomial([1.23456789*10**-i
+ for i in range(i//2+3)])
+ assert str(p).replace('\n', ' ') == s
+
+ def test_non_finite(self):
+ p = poly.Polynomial([nan, inf])
+ assert str(p) == 'nan + inf x'
+ assert p._repr_latex_() == r'$x \mapsto \text{nan} + \text{inf}\,x$'
+ with printoptions(nanstr='NAN', infstr='INF'):
+ assert str(p) == 'NAN + INF x'
+ assert p._repr_latex_() == \
+ r'$x \mapsto \text{NAN} + \text{INF}\,x$'
diff --git a/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_symbol.py b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_symbol.py
new file mode 100644
index 00000000..4ea6035e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/polynomial/tests/test_symbol.py
@@ -0,0 +1,216 @@
+"""
+Tests related to the ``symbol`` attribute of the ABCPolyBase class.
+"""
+
+import pytest
+import numpy.polynomial as poly
+from numpy.core import array
+from numpy.testing import assert_equal, assert_raises, assert_
+
+
+class TestInit:
+ """
+ Test polynomial creation with symbol kwarg.
+ """
+ c = [1, 2, 3]
+
+ def test_default_symbol(self):
+ p = poly.Polynomial(self.c)
+ assert_equal(p.symbol, 'x')
+
+ @pytest.mark.parametrize(('bad_input', 'exception'), (
+ ('', ValueError),
+ ('3', ValueError),
+ (None, TypeError),
+ (1, TypeError),
+ ))
+ def test_symbol_bad_input(self, bad_input, exception):
+ with pytest.raises(exception):
+ p = poly.Polynomial(self.c, symbol=bad_input)
+
+ @pytest.mark.parametrize('symbol', (
+ 'x',
+ 'x_1',
+ 'A',
+ 'xyz',
+ 'β',
+ ))
+ def test_valid_symbols(self, symbol):
+ """
+ Values for symbol that should pass input validation.
+ """
+ p = poly.Polynomial(self.c, symbol=symbol)
+ assert_equal(p.symbol, symbol)
+
+ def test_property(self):
+ """
+ 'symbol' attribute is read only.
+ """
+ p = poly.Polynomial(self.c, symbol='x')
+ with pytest.raises(AttributeError):
+ p.symbol = 'z'
+
+ def test_change_symbol(self):
+ p = poly.Polynomial(self.c, symbol='y')
+ # Create new polynomial from p with different symbol
+ pt = poly.Polynomial(p.coef, symbol='t')
+ assert_equal(pt.symbol, 't')
+
+
+class TestUnaryOperators:
+ p = poly.Polynomial([1, 2, 3], symbol='z')
+
+ def test_neg(self):
+ n = -self.p
+ assert_equal(n.symbol, 'z')
+
+ def test_scalarmul(self):
+ out = self.p * 10
+ assert_equal(out.symbol, 'z')
+
+ def test_rscalarmul(self):
+ out = 10 * self.p
+ assert_equal(out.symbol, 'z')
+
+ def test_pow(self):
+ out = self.p ** 3
+ assert_equal(out.symbol, 'z')
+
+
+@pytest.mark.parametrize(
+ 'rhs',
+ (
+ poly.Polynomial([4, 5, 6], symbol='z'),
+ array([4, 5, 6]),
+ ),
+)
+class TestBinaryOperatorsSameSymbol:
+ """
+ Ensure symbol is preserved for numeric operations on polynomials with
+ the same symbol
+ """
+ p = poly.Polynomial([1, 2, 3], symbol='z')
+
+ def test_add(self, rhs):
+ out = self.p + rhs
+ assert_equal(out.symbol, 'z')
+
+ def test_sub(self, rhs):
+ out = self.p - rhs
+ assert_equal(out.symbol, 'z')
+
+ def test_polymul(self, rhs):
+ out = self.p * rhs
+ assert_equal(out.symbol, 'z')
+
+ def test_divmod(self, rhs):
+ for out in divmod(self.p, rhs):
+ assert_equal(out.symbol, 'z')
+
+ def test_radd(self, rhs):
+ out = rhs + self.p
+ assert_equal(out.symbol, 'z')
+
+ def test_rsub(self, rhs):
+ out = rhs - self.p
+ assert_equal(out.symbol, 'z')
+
+ def test_rmul(self, rhs):
+ out = rhs * self.p
+ assert_equal(out.symbol, 'z')
+
+ def test_rdivmod(self, rhs):
+ for out in divmod(rhs, self.p):
+ assert_equal(out.symbol, 'z')
+
+
+class TestBinaryOperatorsDifferentSymbol:
+ p = poly.Polynomial([1, 2, 3], symbol='x')
+ other = poly.Polynomial([4, 5, 6], symbol='y')
+ ops = (p.__add__, p.__sub__, p.__mul__, p.__floordiv__, p.__mod__)
+
+ @pytest.mark.parametrize('f', ops)
+ def test_binops_fails(self, f):
+ assert_raises(ValueError, f, self.other)
+
+
+class TestEquality:
+ p = poly.Polynomial([1, 2, 3], symbol='x')
+
+ def test_eq(self):
+ other = poly.Polynomial([1, 2, 3], symbol='x')
+ assert_(self.p == other)
+
+ def test_neq(self):
+ other = poly.Polynomial([1, 2, 3], symbol='y')
+ assert_(not self.p == other)
+
+
+class TestExtraMethods:
+ """
+ Test other methods for manipulating/creating polynomial objects.
+ """
+ p = poly.Polynomial([1, 2, 3, 0], symbol='z')
+
+ def test_copy(self):
+ other = self.p.copy()
+ assert_equal(other.symbol, 'z')
+
+ def test_trim(self):
+ other = self.p.trim()
+ assert_equal(other.symbol, 'z')
+
+ def test_truncate(self):
+ other = self.p.truncate(2)
+ assert_equal(other.symbol, 'z')
+
+ @pytest.mark.parametrize('kwarg', (
+ {'domain': [-10, 10]},
+ {'window': [-10, 10]},
+ {'kind': poly.Chebyshev},
+ ))
+ def test_convert(self, kwarg):
+ other = self.p.convert(**kwarg)
+ assert_equal(other.symbol, 'z')
+
+ def test_integ(self):
+ other = self.p.integ()
+ assert_equal(other.symbol, 'z')
+
+ def test_deriv(self):
+ other = self.p.deriv()
+ assert_equal(other.symbol, 'z')
+
+
+def test_composition():
+ p = poly.Polynomial([3, 2, 1], symbol="t")
+ q = poly.Polynomial([5, 1, 0, -1], symbol="λ_1")
+ r = p(q)
+ assert r.symbol == "λ_1"
+
+
+#
+# Class methods that result in new polynomial class instances
+#
+
+
+def test_fit():
+ x, y = (range(10),)*2
+ p = poly.Polynomial.fit(x, y, deg=1, symbol='z')
+ assert_equal(p.symbol, 'z')
+
+
+def test_froomroots():
+ roots = [-2, 2]
+ p = poly.Polynomial.fromroots(roots, symbol='z')
+ assert_equal(p.symbol, 'z')
+
+
+def test_identity():
+ p = poly.Polynomial.identity(domain=[-1, 1], window=[5, 20], symbol='z')
+ assert_equal(p.symbol, 'z')
+
+
+def test_basis():
+ p = poly.Polynomial.basis(3, symbol='z')
+ assert_equal(p.symbol, 'z')
diff --git a/venv/lib/python3.9/site-packages/numpy/py.typed b/venv/lib/python3.9/site-packages/numpy/py.typed
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/py.typed
diff --git a/venv/lib/python3.9/site-packages/numpy/random/__init__.pxd b/venv/lib/python3.9/site-packages/numpy/random/__init__.pxd
new file mode 100644
index 00000000..1f905729
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/__init__.pxd
@@ -0,0 +1,14 @@
+cimport numpy as np
+from libc.stdint cimport uint32_t, uint64_t
+
+cdef extern from "numpy/random/bitgen.h":
+ struct bitgen:
+ void *state
+ uint64_t (*next_uint64)(void *st) nogil
+ uint32_t (*next_uint32)(void *st) nogil
+ double (*next_double)(void *st) nogil
+ uint64_t (*next_raw)(void *st) nogil
+
+ ctypedef bitgen bitgen_t
+
+from numpy.random.bit_generator cimport BitGenerator, SeedSequence
diff --git a/venv/lib/python3.9/site-packages/numpy/random/__init__.py b/venv/lib/python3.9/site-packages/numpy/random/__init__.py
new file mode 100644
index 00000000..2e8f99fe
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/__init__.py
@@ -0,0 +1,215 @@
+"""
+========================
+Random Number Generation
+========================
+
+Use ``default_rng()`` to create a `Generator` and call its methods.
+
+=============== =========================================================
+Generator
+--------------- ---------------------------------------------------------
+Generator Class implementing all of the random number distributions
+default_rng Default constructor for ``Generator``
+=============== =========================================================
+
+============================================= ===
+BitGenerator Streams that work with Generator
+--------------------------------------------- ---
+MT19937
+PCG64
+PCG64DXSM
+Philox
+SFC64
+============================================= ===
+
+============================================= ===
+Getting entropy to initialize a BitGenerator
+--------------------------------------------- ---
+SeedSequence
+============================================= ===
+
+
+Legacy
+------
+
+For backwards compatibility with previous versions of numpy before 1.17, the
+various aliases to the global `RandomState` methods are left alone and do not
+use the new `Generator` API.
+
+==================== =========================================================
+Utility functions
+-------------------- ---------------------------------------------------------
+random Uniformly distributed floats over ``[0, 1)``
+bytes Uniformly distributed random bytes.
+permutation Randomly permute a sequence / generate a random sequence.
+shuffle Randomly permute a sequence in place.
+choice Random sample from 1-D array.
+==================== =========================================================
+
+==================== =========================================================
+Compatibility
+functions - removed
+in the new API
+-------------------- ---------------------------------------------------------
+rand Uniformly distributed values.
+randn Normally distributed values.
+ranf Uniformly distributed floating point numbers.
+random_integers Uniformly distributed integers in a given range.
+ (deprecated, use ``integers(..., closed=True)`` instead)
+random_sample Alias for `random_sample`
+randint Uniformly distributed integers in a given range
+seed Seed the legacy random number generator.
+==================== =========================================================
+
+==================== =========================================================
+Univariate
+distributions
+-------------------- ---------------------------------------------------------
+beta Beta distribution over ``[0, 1]``.
+binomial Binomial distribution.
+chisquare :math:`\\chi^2` distribution.
+exponential Exponential distribution.
+f F (Fisher-Snedecor) distribution.
+gamma Gamma distribution.
+geometric Geometric distribution.
+gumbel Gumbel distribution.
+hypergeometric Hypergeometric distribution.
+laplace Laplace distribution.
+logistic Logistic distribution.
+lognormal Log-normal distribution.
+logseries Logarithmic series distribution.
+negative_binomial Negative binomial distribution.
+noncentral_chisquare Non-central chi-square distribution.
+noncentral_f Non-central F distribution.
+normal Normal / Gaussian distribution.
+pareto Pareto distribution.
+poisson Poisson distribution.
+power Power distribution.
+rayleigh Rayleigh distribution.
+triangular Triangular distribution.
+uniform Uniform distribution.
+vonmises Von Mises circular distribution.
+wald Wald (inverse Gaussian) distribution.
+weibull Weibull distribution.
+zipf Zipf's distribution over ranked data.
+==================== =========================================================
+
+==================== ==========================================================
+Multivariate
+distributions
+-------------------- ----------------------------------------------------------
+dirichlet Multivariate generalization of Beta distribution.
+multinomial Multivariate generalization of the binomial distribution.
+multivariate_normal Multivariate generalization of the normal distribution.
+==================== ==========================================================
+
+==================== =========================================================
+Standard
+distributions
+-------------------- ---------------------------------------------------------
+standard_cauchy Standard Cauchy-Lorentz distribution.
+standard_exponential Standard exponential distribution.
+standard_gamma Standard Gamma distribution.
+standard_normal Standard normal distribution.
+standard_t Standard Student's t-distribution.
+==================== =========================================================
+
+==================== =========================================================
+Internal functions
+-------------------- ---------------------------------------------------------
+get_state Get tuple representing internal state of generator.
+set_state Set state of generator.
+==================== =========================================================
+
+
+"""
+__all__ = [
+ 'beta',
+ 'binomial',
+ 'bytes',
+ 'chisquare',
+ 'choice',
+ 'dirichlet',
+ 'exponential',
+ 'f',
+ 'gamma',
+ 'geometric',
+ 'get_state',
+ 'gumbel',
+ 'hypergeometric',
+ 'laplace',
+ 'logistic',
+ 'lognormal',
+ 'logseries',
+ 'multinomial',
+ 'multivariate_normal',
+ 'negative_binomial',
+ 'noncentral_chisquare',
+ 'noncentral_f',
+ 'normal',
+ 'pareto',
+ 'permutation',
+ 'poisson',
+ 'power',
+ 'rand',
+ 'randint',
+ 'randn',
+ 'random',
+ 'random_integers',
+ 'random_sample',
+ 'ranf',
+ 'rayleigh',
+ 'sample',
+ 'seed',
+ 'set_state',
+ 'shuffle',
+ 'standard_cauchy',
+ 'standard_exponential',
+ 'standard_gamma',
+ 'standard_normal',
+ 'standard_t',
+ 'triangular',
+ 'uniform',
+ 'vonmises',
+ 'wald',
+ 'weibull',
+ 'zipf',
+]
+
+# add these for module-freeze analysis (like PyInstaller)
+from . import _pickle
+from . import _common
+from . import _bounded_integers
+
+from ._generator import Generator, default_rng
+from .bit_generator import SeedSequence, BitGenerator
+from ._mt19937 import MT19937
+from ._pcg64 import PCG64, PCG64DXSM
+from ._philox import Philox
+from ._sfc64 import SFC64
+from .mtrand import *
+
+__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
+ 'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng',
+ 'BitGenerator']
+
+
+def __RandomState_ctor():
+ """Return a RandomState instance.
+
+ This function exists solely to assist (un)pickling.
+
+ Note that the state of the RandomState returned here is irrelevant, as this
+ function's entire purpose is to return a newly allocated RandomState whose
+ state pickle can set. Consequently the RandomState returned by this function
+ is a freshly allocated copy with a seed=0.
+
+ See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
+
+ """
+ return RandomState(seed=0)
+
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/random/__init__.pyi b/venv/lib/python3.9/site-packages/numpy/random/__init__.pyi
new file mode 100644
index 00000000..99ef6f3e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/__init__.pyi
@@ -0,0 +1,72 @@
+from numpy._pytesttester import PytestTester
+
+from numpy.random._generator import Generator as Generator
+from numpy.random._generator import default_rng as default_rng
+from numpy.random._mt19937 import MT19937 as MT19937
+from numpy.random._pcg64 import (
+ PCG64 as PCG64,
+ PCG64DXSM as PCG64DXSM,
+)
+from numpy.random._philox import Philox as Philox
+from numpy.random._sfc64 import SFC64 as SFC64
+from numpy.random.bit_generator import BitGenerator as BitGenerator
+from numpy.random.bit_generator import SeedSequence as SeedSequence
+from numpy.random.mtrand import (
+ RandomState as RandomState,
+ beta as beta,
+ binomial as binomial,
+ bytes as bytes,
+ chisquare as chisquare,
+ choice as choice,
+ dirichlet as dirichlet,
+ exponential as exponential,
+ f as f,
+ gamma as gamma,
+ geometric as geometric,
+ get_bit_generator as get_bit_generator,
+ get_state as get_state,
+ gumbel as gumbel,
+ hypergeometric as hypergeometric,
+ laplace as laplace,
+ logistic as logistic,
+ lognormal as lognormal,
+ logseries as logseries,
+ multinomial as multinomial,
+ multivariate_normal as multivariate_normal,
+ negative_binomial as negative_binomial,
+ noncentral_chisquare as noncentral_chisquare,
+ noncentral_f as noncentral_f,
+ normal as normal,
+ pareto as pareto,
+ permutation as permutation,
+ poisson as poisson,
+ power as power,
+ rand as rand,
+ randint as randint,
+ randn as randn,
+ random as random,
+ random_integers as random_integers,
+ random_sample as random_sample,
+ ranf as ranf,
+ rayleigh as rayleigh,
+ sample as sample,
+ seed as seed,
+ set_bit_generator as set_bit_generator,
+ set_state as set_state,
+ shuffle as shuffle,
+ standard_cauchy as standard_cauchy,
+ standard_exponential as standard_exponential,
+ standard_gamma as standard_gamma,
+ standard_normal as standard_normal,
+ standard_t as standard_t,
+ triangular as triangular,
+ uniform as uniform,
+ vonmises as vonmises,
+ wald as wald,
+ weibull as weibull,
+ zipf as zipf,
+)
+
+__all__: list[str]
+__path__: list[str]
+test: PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_bounded_integers.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/random/_bounded_integers.cpython-39-darwin.so
new file mode 100755
index 00000000..66f3ed34
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_bounded_integers.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_bounded_integers.pxd b/venv/lib/python3.9/site-packages/numpy/random/_bounded_integers.pxd
new file mode 100644
index 00000000..7e41463a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_bounded_integers.pxd
@@ -0,0 +1,29 @@
+from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
+ int8_t, int16_t, int32_t, int64_t, intptr_t)
+import numpy as np
+cimport numpy as np
+ctypedef np.npy_bool bool_t
+
+from numpy.random cimport bitgen_t
+
+cdef inline uint64_t _gen_mask(uint64_t max_val) nogil:
+ """Mask generator for use in bounded random numbers"""
+ # Smallest bit mask >= max
+ cdef uint64_t mask = max_val
+ mask |= mask >> 1
+ mask |= mask >> 2
+ mask |= mask >> 4
+ mask |= mask >> 8
+ mask |= mask >> 16
+ mask |= mask >> 32
+ return mask
+
+cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
+cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
+cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
+cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
+cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
+cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
+cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
+cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
+cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_common.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/random/_common.cpython-39-darwin.so
new file mode 100755
index 00000000..a5f0bfe6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_common.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_common.pxd b/venv/lib/python3.9/site-packages/numpy/random/_common.pxd
new file mode 100644
index 00000000..3eaf39dd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_common.pxd
@@ -0,0 +1,106 @@
+#cython: language_level=3
+
+from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t
+
+import numpy as np
+cimport numpy as np
+
+from numpy.random cimport bitgen_t
+
+cdef double POISSON_LAM_MAX
+cdef double LEGACY_POISSON_LAM_MAX
+cdef uint64_t MAXSIZE
+
+cdef enum ConstraintType:
+ CONS_NONE
+ CONS_NON_NEGATIVE
+ CONS_POSITIVE
+ CONS_POSITIVE_NOT_NAN
+ CONS_BOUNDED_0_1
+ CONS_BOUNDED_GT_0_1
+ CONS_BOUNDED_LT_0_1
+ CONS_GT_1
+ CONS_GTE_1
+ CONS_POISSON
+ LEGACY_CONS_POISSON
+
+ctypedef ConstraintType constraint_type
+
+cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method)
+cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output)
+cdef object prepare_cffi(bitgen_t *bitgen)
+cdef object prepare_ctypes(bitgen_t *bitgen)
+cdef int check_constraint(double val, object name, constraint_type cons) except -1
+cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1
+
+cdef extern from "include/aligned_malloc.h":
+ cdef void *PyArray_realloc_aligned(void *p, size_t n)
+ cdef void *PyArray_malloc_aligned(size_t n)
+ cdef void *PyArray_calloc_aligned(size_t n, size_t s)
+ cdef void PyArray_free_aligned(void *p)
+
+ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) nogil
+ctypedef double (*random_double_0)(void *state) nogil
+ctypedef double (*random_double_1)(void *state, double a) nogil
+ctypedef double (*random_double_2)(void *state, double a, double b) nogil
+ctypedef double (*random_double_3)(void *state, double a, double b, double c) nogil
+
+ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) nogil
+ctypedef float (*random_float_0)(bitgen_t *state) nogil
+ctypedef float (*random_float_1)(bitgen_t *state, float a) nogil
+
+ctypedef int64_t (*random_uint_0)(void *state) nogil
+ctypedef int64_t (*random_uint_d)(void *state, double a) nogil
+ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) nogil
+ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) nogil
+ctypedef int64_t (*random_uint_i)(void *state, int64_t a) nogil
+ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) nogil
+
+ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) nogil
+ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) nogil
+
+ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) nogil
+ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) nogil
+
+cdef double kahan_sum(double *darr, np.npy_intp n)
+
+cdef inline double uint64_to_double(uint64_t rnd) nogil:
+ return (rnd >> 11) * (1.0 / 9007199254740992.0)
+
+cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out)
+
+cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out)
+
+cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out)
+
+cdef object wrap_int(object val, object bits)
+
+cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size)
+
+cdef validate_output_shape(iter_shape, np.ndarray output)
+
+cdef object cont(void *func, void *state, object size, object lock, int narg,
+ object a, object a_name, constraint_type a_constraint,
+ object b, object b_name, constraint_type b_constraint,
+ object c, object c_name, constraint_type c_constraint,
+ object out)
+
+cdef object disc(void *func, void *state, object size, object lock,
+ int narg_double, int narg_int64,
+ object a, object a_name, constraint_type a_constraint,
+ object b, object b_name, constraint_type b_constraint,
+ object c, object c_name, constraint_type c_constraint)
+
+cdef object cont_f(void *func, bitgen_t *state, object size, object lock,
+ object a, object a_name, constraint_type a_constraint,
+ object out)
+
+cdef object cont_broadcast_3(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
+ np.ndarray c_arr, object c_name, constraint_type c_constraint)
+
+cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
+ np.ndarray c_arr, object c_name, constraint_type c_constraint)
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_examples/cffi/extending.py b/venv/lib/python3.9/site-packages/numpy/random/_examples/cffi/extending.py
new file mode 100644
index 00000000..8440d400
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_examples/cffi/extending.py
@@ -0,0 +1,40 @@
+"""
+Use cffi to access any of the underlying C functions from distributions.h
+"""
+import os
+import numpy as np
+import cffi
+from .parse import parse_distributions_h
+ffi = cffi.FFI()
+
+inc_dir = os.path.join(np.get_include(), 'numpy')
+
+# Basic numpy types
+ffi.cdef('''
+ typedef intptr_t npy_intp;
+ typedef unsigned char npy_bool;
+
+''')
+
+parse_distributions_h(ffi, inc_dir)
+
+lib = ffi.dlopen(np.random._generator.__file__)
+
+# Compare the distributions.h random_standard_normal_fill to
+# Generator.standard_random
+bit_gen = np.random.PCG64()
+rng = np.random.Generator(bit_gen)
+state = bit_gen.state
+
+interface = rng.bit_generator.cffi
+n = 100
+vals_cffi = ffi.new('double[%d]' % n)
+lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi)
+
+# reset the state
+bit_gen.state = state
+
+vals = rng.standard_normal(n)
+
+for i in range(n):
+ assert vals[i] == vals_cffi[i]
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_examples/cffi/parse.py b/venv/lib/python3.9/site-packages/numpy/random/_examples/cffi/parse.py
new file mode 100644
index 00000000..daff6bde
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_examples/cffi/parse.py
@@ -0,0 +1,55 @@
+import os
+
+
+def parse_distributions_h(ffi, inc_dir):
+ """
+ Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef
+
+ Read the function declarations without the "#define ..." macros that will
+ be filled in when loading the library.
+ """
+
+ with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid:
+ s = []
+ for line in fid:
+ # massage the include file
+ if line.strip().startswith('#'):
+ continue
+ s.append(line)
+ ffi.cdef('\n'.join(s))
+
+ with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid:
+ s = []
+ in_skip = 0
+ ignoring = False
+ for line in fid:
+ # check for and remove extern "C" guards
+ if ignoring:
+ if line.strip().startswith('#endif'):
+ ignoring = False
+ continue
+ if line.strip().startswith('#ifdef __cplusplus'):
+ ignoring = True
+
+ # massage the include file
+ if line.strip().startswith('#'):
+ continue
+
+ # skip any inlined function definition
+ # which starts with 'static NPY_INLINE xxx(...) {'
+ # and ends with a closing '}'
+ if line.strip().startswith('static NPY_INLINE'):
+ in_skip += line.count('{')
+ continue
+ elif in_skip > 0:
+ in_skip += line.count('{')
+ in_skip -= line.count('}')
+ continue
+
+ # replace defines with their value or remove them
+ line = line.replace('DECLDIR', '')
+ line = line.replace('NPY_INLINE', '')
+ line = line.replace('RAND_INT_TYPE', 'int64_t')
+ s.append(line)
+ ffi.cdef('\n'.join(s))
+
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_examples/cython/extending.pyx b/venv/lib/python3.9/site-packages/numpy/random/_examples/cython/extending.pyx
new file mode 100644
index 00000000..30efd744
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_examples/cython/extending.pyx
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+#cython: language_level=3
+
+from libc.stdint cimport uint32_t
+from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+from numpy.random cimport bitgen_t
+from numpy.random import PCG64
+
+np.import_array()
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def uniform_mean(Py_ssize_t n):
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef double[::1] random_values
+ cdef np.ndarray randoms
+
+ x = PCG64()
+ capsule = x.capsule
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n)
+ # Best practice is to acquire the lock whenever generating random values.
+ # This prevents other threads from modifying the state. Acquiring the lock
+ # is only necessary if the GIL is also released, as in this example.
+ with x.lock, nogil:
+ for i in range(n):
+ random_values[i] = rng.next_double(rng.state)
+ randoms = np.asarray(random_values)
+ return randoms.mean()
+
+
+# This function is declared nogil so it can be used without the GIL below
+cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil:
+ cdef uint32_t mask, delta, val
+ mask = delta = ub - lb
+ mask |= mask >> 1
+ mask |= mask >> 2
+ mask |= mask >> 4
+ mask |= mask >> 8
+ mask |= mask >> 16
+
+ val = rng.next_uint32(rng.state) & mask
+ while val > delta:
+ val = rng.next_uint32(rng.state) & mask
+
+ return lb + val
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n):
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef uint32_t[::1] out
+ cdef const char *capsule_name = "BitGenerator"
+
+ x = PCG64()
+ out = np.empty(n, dtype=np.uint32)
+ capsule = x.capsule
+
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ rng = <bitgen_t *>PyCapsule_GetPointer(capsule, capsule_name)
+
+ with x.lock, nogil:
+ for i in range(n):
+ out[i] = bounded_uint(lb, ub, rng)
+ return np.asarray(out)
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_examples/cython/extending_distributions.pyx b/venv/lib/python3.9/site-packages/numpy/random/_examples/cython/extending_distributions.pyx
new file mode 100644
index 00000000..d908e92d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_examples/cython/extending_distributions.pyx
@@ -0,0 +1,117 @@
+#!/usr/bin/env python3
+#cython: language_level=3
+"""
+This file shows how the to use a BitGenerator to create a distribution.
+"""
+import numpy as np
+cimport numpy as np
+cimport cython
+from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+from libc.stdint cimport uint16_t, uint64_t
+from numpy.random cimport bitgen_t
+from numpy.random import PCG64
+from numpy.random.c_distributions cimport (
+ random_standard_uniform_fill, random_standard_uniform_fill_f)
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def uniforms(Py_ssize_t n):
+ """
+ Create an array of `n` uniformly distributed doubles.
+ A 'real' distribution would want to process the values into
+ some non-uniform distribution
+ """
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef double[::1] random_values
+
+ x = PCG64()
+ capsule = x.capsule
+ # Optional check that the capsule if from a BitGenerator
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ # Cast the pointer
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n, dtype='float64')
+ with x.lock, nogil:
+ for i in range(n):
+ # Call the function
+ random_values[i] = rng.next_double(rng.state)
+ randoms = np.asarray(random_values)
+
+ return randoms
+
+# cython example 2
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def uint10_uniforms(Py_ssize_t n):
+ """Uniform 10 bit integers stored as 16-bit unsigned integers"""
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef uint16_t[::1] random_values
+ cdef int bits_remaining
+ cdef int width = 10
+ cdef uint64_t buff, mask = 0x3FF
+
+ x = PCG64()
+ capsule = x.capsule
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n, dtype='uint16')
+ # Best practice is to release GIL and acquire the lock
+ bits_remaining = 0
+ with x.lock, nogil:
+ for i in range(n):
+ if bits_remaining < width:
+ buff = rng.next_uint64(rng.state)
+ random_values[i] = buff & mask
+ buff >>= width
+
+ randoms = np.asarray(random_values)
+ return randoms
+
+# cython example 3
+def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64):
+ """
+ Create an array of `n` uniformly distributed doubles via a "fill" function.
+
+ A 'real' distribution would want to process the values into
+ some non-uniform distribution
+
+ Parameters
+ ----------
+ bit_generator: BitGenerator instance
+ n: int
+ Output vector length
+ dtype: {str, dtype}, optional
+ Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The
+ default dtype value is 'd'
+ """
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef np.ndarray randoms
+
+ capsule = bit_generator.capsule
+ # Optional check that the capsule if from a BitGenerator
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ # Cast the pointer
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+
+ _dtype = np.dtype(dtype)
+ randoms = np.empty(n, dtype=_dtype)
+ if _dtype == np.float32:
+ with bit_generator.lock:
+ random_standard_uniform_fill_f(rng, n, <float*>np.PyArray_DATA(randoms))
+ elif _dtype == np.float64:
+ with bit_generator.lock:
+ random_standard_uniform_fill(rng, n, <double*>np.PyArray_DATA(randoms))
+ else:
+ raise TypeError('Unsupported dtype %r for random' % _dtype)
+ return randoms
+
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_examples/cython/setup.py b/venv/lib/python3.9/site-packages/numpy/random/_examples/cython/setup.py
new file mode 100644
index 00000000..e70a1fdd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_examples/cython/setup.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+"""
+Build the Cython demonstrations of low-level access to NumPy random
+
+Usage: python setup.py build_ext -i
+"""
+from os.path import dirname, join, abspath
+
+from setuptools import setup
+from setuptools.extension import Extension
+
+import numpy as np
+from Cython.Build import cythonize
+
+
+path = dirname(__file__)
+src_dir = join(dirname(path), '..', 'src')
+defs = [('NPY_NO_DEPRECATED_API', 0)]
+inc_path = np.get_include()
+# Add paths for npyrandom and npymath libraries:
+lib_path = [
+ abspath(join(np.get_include(), '..', '..', 'random', 'lib')),
+ abspath(join(np.get_include(), '..', 'lib'))
+]
+
+extending = Extension("extending",
+ sources=[join('.', 'extending.pyx')],
+ include_dirs=[
+ np.get_include(),
+ join(path, '..', '..')
+ ],
+ define_macros=defs,
+ )
+distributions = Extension("extending_distributions",
+ sources=[join('.', 'extending_distributions.pyx')],
+ include_dirs=[inc_path],
+ library_dirs=lib_path,
+ libraries=['npyrandom', 'npymath'],
+ define_macros=defs,
+ )
+
+extensions = [extending, distributions]
+
+setup(
+ ext_modules=cythonize(extensions)
+)
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_examples/numba/extending.py b/venv/lib/python3.9/site-packages/numpy/random/_examples/numba/extending.py
new file mode 100644
index 00000000..f387db69
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_examples/numba/extending.py
@@ -0,0 +1,84 @@
+import numpy as np
+import numba as nb
+
+from numpy.random import PCG64
+from timeit import timeit
+
+bit_gen = PCG64()
+next_d = bit_gen.cffi.next_double
+state_addr = bit_gen.cffi.state_address
+
+def normals(n, state):
+ out = np.empty(n)
+ for i in range((n + 1) // 2):
+ x1 = 2.0 * next_d(state) - 1.0
+ x2 = 2.0 * next_d(state) - 1.0
+ r2 = x1 * x1 + x2 * x2
+ while r2 >= 1.0 or r2 == 0.0:
+ x1 = 2.0 * next_d(state) - 1.0
+ x2 = 2.0 * next_d(state) - 1.0
+ r2 = x1 * x1 + x2 * x2
+ f = np.sqrt(-2.0 * np.log(r2) / r2)
+ out[2 * i] = f * x1
+ if 2 * i + 1 < n:
+ out[2 * i + 1] = f * x2
+ return out
+
+# Compile using Numba
+normalsj = nb.jit(normals, nopython=True)
+# Must use state address not state with numba
+n = 10000
+
+def numbacall():
+ return normalsj(n, state_addr)
+
+rg = np.random.Generator(PCG64())
+
+def numpycall():
+ return rg.normal(size=n)
+
+# Check that the functions work
+r1 = numbacall()
+r2 = numpycall()
+assert r1.shape == (n,)
+assert r1.shape == r2.shape
+
+t1 = timeit(numbacall, number=1000)
+print(f'{t1:.2f} secs for {n} PCG64 (Numba/PCG64) gaussian randoms')
+t2 = timeit(numpycall, number=1000)
+print(f'{t2:.2f} secs for {n} PCG64 (NumPy/PCG64) gaussian randoms')
+
+# example 2
+
+next_u32 = bit_gen.ctypes.next_uint32
+ctypes_state = bit_gen.ctypes.state
+
+@nb.jit(nopython=True)
+def bounded_uint(lb, ub, state):
+ mask = delta = ub - lb
+ mask |= mask >> 1
+ mask |= mask >> 2
+ mask |= mask >> 4
+ mask |= mask >> 8
+ mask |= mask >> 16
+
+ val = next_u32(state) & mask
+ while val > delta:
+ val = next_u32(state) & mask
+
+ return lb + val
+
+
+print(bounded_uint(323, 2394691, ctypes_state.value))
+
+
+@nb.jit(nopython=True)
+def bounded_uints(lb, ub, n, state):
+ out = np.empty(n, dtype=np.uint32)
+ for i in range(n):
+ out[i] = bounded_uint(lb, ub, state)
+
+
+bounded_uints(323, 2394691, 10000000, ctypes_state.value)
+
+
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_examples/numba/extending_distributions.py b/venv/lib/python3.9/site-packages/numpy/random/_examples/numba/extending_distributions.py
new file mode 100644
index 00000000..7cf8bf0b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_examples/numba/extending_distributions.py
@@ -0,0 +1,67 @@
+r"""
+Building the required library in this example requires a source distribution
+of NumPy or clone of the NumPy git repository since distributions.c is not
+included in binary distributions.
+
+On *nix, execute in numpy/random/src/distributions
+
+export ${PYTHON_VERSION}=3.8 # Python version
+export PYTHON_INCLUDE=#path to Python's include folder, usually \
+ ${PYTHON_HOME}/include/python${PYTHON_VERSION}m
+export NUMPY_INCLUDE=#path to numpy's include folder, usually \
+ ${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include
+gcc -shared -o libdistributions.so -fPIC distributions.c \
+ -I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE}
+mv libdistributions.so ../../_examples/numba/
+
+On Windows
+
+rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example
+set PYTHON_HOME=c:\Anaconda
+set PYTHON_VERSION=38
+cl.exe /LD .\distributions.c -DDLL_EXPORT \
+ -I%PYTHON_HOME%\lib\site-packages\numpy\core\include \
+ -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib
+move distributions.dll ../../_examples/numba/
+"""
+import os
+
+import numba as nb
+import numpy as np
+from cffi import FFI
+
+from numpy.random import PCG64
+
+ffi = FFI()
+if os.path.exists('./distributions.dll'):
+ lib = ffi.dlopen('./distributions.dll')
+elif os.path.exists('./libdistributions.so'):
+ lib = ffi.dlopen('./libdistributions.so')
+else:
+ raise RuntimeError('Required DLL/so file was not found.')
+
+ffi.cdef("""
+double random_standard_normal(void *bitgen_state);
+""")
+x = PCG64()
+xffi = x.cffi
+bit_generator = xffi.bit_generator
+
+random_standard_normal = lib.random_standard_normal
+
+
+def normals(n, bit_generator):
+ out = np.empty(n)
+ for i in range(n):
+ out[i] = random_standard_normal(bit_generator)
+ return out
+
+
+normalsj = nb.jit(normals, nopython=True)
+
+# Numba requires a memory address for void *
+# Can also get address from x.ctypes.bit_generator.value
+bit_generator_address = int(ffi.cast('uintptr_t', bit_generator))
+
+norm = normalsj(1000, bit_generator_address)
+print(norm[:12])
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_generator.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/random/_generator.cpython-39-darwin.so
new file mode 100755
index 00000000..52eae9e3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_generator.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_generator.pyi b/venv/lib/python3.9/site-packages/numpy/random/_generator.pyi
new file mode 100644
index 00000000..f0d814fe
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_generator.pyi
@@ -0,0 +1,638 @@
+from collections.abc import Callable
+from typing import Any, Union, overload, TypeVar, Literal
+
+from numpy import (
+ bool_,
+ dtype,
+ float32,
+ float64,
+ int8,
+ int16,
+ int32,
+ int64,
+ int_,
+ ndarray,
+ uint,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+from numpy.random import BitGenerator, SeedSequence
+from numpy._typing import (
+ ArrayLike,
+ _ArrayLikeFloat_co,
+ _ArrayLikeInt_co,
+ _DoubleCodes,
+ _DTypeLikeBool,
+ _DTypeLikeInt,
+ _DTypeLikeUInt,
+ _Float32Codes,
+ _Float64Codes,
+ _Int8Codes,
+ _Int16Codes,
+ _Int32Codes,
+ _Int64Codes,
+ _IntCodes,
+ _ShapeLike,
+ _SingleCodes,
+ _SupportsDType,
+ _UInt8Codes,
+ _UInt16Codes,
+ _UInt32Codes,
+ _UInt64Codes,
+ _UIntCodes,
+)
+
+_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
+
+_DTypeLikeFloat32 = Union[
+ dtype[float32],
+ _SupportsDType[dtype[float32]],
+ type[float32],
+ _Float32Codes,
+ _SingleCodes,
+]
+
+_DTypeLikeFloat64 = Union[
+ dtype[float64],
+ _SupportsDType[dtype[float64]],
+ type[float],
+ type[float64],
+ _Float64Codes,
+ _DoubleCodes,
+]
+
+class Generator:
+ def __init__(self, bit_generator: BitGenerator) -> None: ...
+ def __repr__(self) -> str: ...
+ def __str__(self) -> str: ...
+ def __getstate__(self) -> dict[str, Any]: ...
+ def __setstate__(self, state: dict[str, Any]) -> None: ...
+ def __reduce__(self) -> tuple[Callable[[str], Generator], tuple[str], dict[str, Any]]: ...
+ @property
+ def bit_generator(self) -> BitGenerator: ...
+ def bytes(self, length: int) -> bytes: ...
+ @overload
+ def standard_normal( # type: ignore[misc]
+ self,
+ size: None = ...,
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
+ out: None = ...,
+ ) -> float: ...
+ @overload
+ def standard_normal( # type: ignore[misc]
+ self,
+ size: _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_normal( # type: ignore[misc]
+ self,
+ *,
+ out: ndarray[Any, dtype[float64]] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_normal( # type: ignore[misc]
+ self,
+ size: _ShapeLike = ...,
+ dtype: _DTypeLikeFloat32 = ...,
+ out: None | ndarray[Any, dtype[float32]] = ...,
+ ) -> ndarray[Any, dtype[float32]]: ...
+ @overload
+ def standard_normal( # type: ignore[misc]
+ self,
+ size: _ShapeLike = ...,
+ dtype: _DTypeLikeFloat64 = ...,
+ out: None | ndarray[Any, dtype[float64]] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def permutation(self, x: int, axis: int = ...) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def permutation(self, x: ArrayLike, axis: int = ...) -> ndarray[Any, Any]: ...
+ @overload
+ def standard_exponential( # type: ignore[misc]
+ self,
+ size: None = ...,
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
+ method: Literal["zig", "inv"] = ...,
+ out: None = ...,
+ ) -> float: ...
+ @overload
+ def standard_exponential(
+ self,
+ size: _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_exponential(
+ self,
+ *,
+ out: ndarray[Any, dtype[float64]] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_exponential(
+ self,
+ size: _ShapeLike = ...,
+ *,
+ method: Literal["zig", "inv"] = ...,
+ out: None | ndarray[Any, dtype[float64]] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_exponential(
+ self,
+ size: _ShapeLike = ...,
+ dtype: _DTypeLikeFloat32 = ...,
+ method: Literal["zig", "inv"] = ...,
+ out: None | ndarray[Any, dtype[float32]] = ...,
+ ) -> ndarray[Any, dtype[float32]]: ...
+ @overload
+ def standard_exponential(
+ self,
+ size: _ShapeLike = ...,
+ dtype: _DTypeLikeFloat64 = ...,
+ method: Literal["zig", "inv"] = ...,
+ out: None | ndarray[Any, dtype[float64]] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def random( # type: ignore[misc]
+ self,
+ size: None = ...,
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
+ out: None = ...,
+ ) -> float: ...
+ @overload
+ def random(
+ self,
+ *,
+ out: ndarray[Any, dtype[float64]] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def random(
+ self,
+ size: _ShapeLike = ...,
+ *,
+ out: None | ndarray[Any, dtype[float64]] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def random(
+ self,
+ size: _ShapeLike = ...,
+ dtype: _DTypeLikeFloat32 = ...,
+ out: None | ndarray[Any, dtype[float32]] = ...,
+ ) -> ndarray[Any, dtype[float32]]: ...
+ @overload
+ def random(
+ self,
+ size: _ShapeLike = ...,
+ dtype: _DTypeLikeFloat64 = ...,
+ out: None | ndarray[Any, dtype[float64]] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def beta(
+ self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def exponential(
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: int,
+ high: None | int = ...,
+ ) -> int: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: int,
+ high: None | int = ...,
+ size: None = ...,
+ dtype: _DTypeLikeBool = ...,
+ endpoint: bool = ...,
+ ) -> bool: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: int,
+ high: None | int = ...,
+ size: None = ...,
+ dtype: _DTypeLikeInt | _DTypeLikeUInt = ...,
+ endpoint: bool = ...,
+ ) -> int: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: _DTypeLikeBool = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[bool_]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[int8]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[int16]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[int32]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[uint8]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[uint16]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[uint32]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[uint64]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def integers( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...,
+ endpoint: bool = ...,
+ ) -> ndarray[Any, dtype[uint]]: ...
+ # TODO: Use a TypeVar _T here to get away from Any output? Should be int->ndarray[Any,dtype[int64]], ArrayLike[_T] -> _T | ndarray[Any,Any]
+ @overload
+ def choice(
+ self,
+ a: int,
+ size: None = ...,
+ replace: bool = ...,
+ p: None | _ArrayLikeFloat_co = ...,
+ axis: int = ...,
+ shuffle: bool = ...,
+ ) -> int: ...
+ @overload
+ def choice(
+ self,
+ a: int,
+ size: _ShapeLike = ...,
+ replace: bool = ...,
+ p: None | _ArrayLikeFloat_co = ...,
+ axis: int = ...,
+ shuffle: bool = ...,
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def choice(
+ self,
+ a: ArrayLike,
+ size: None = ...,
+ replace: bool = ...,
+ p: None | _ArrayLikeFloat_co = ...,
+ axis: int = ...,
+ shuffle: bool = ...,
+ ) -> Any: ...
+ @overload
+ def choice(
+ self,
+ a: ArrayLike,
+ size: _ShapeLike = ...,
+ replace: bool = ...,
+ p: None | _ArrayLikeFloat_co = ...,
+ axis: int = ...,
+ shuffle: bool = ...,
+ ) -> ndarray[Any, Any]: ...
+ @overload
+ def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def uniform(
+ self,
+ low: _ArrayLikeFloat_co = ...,
+ high: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def normal(
+ self,
+ loc: _ArrayLikeFloat_co = ...,
+ scale: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_gamma( # type: ignore[misc]
+ self,
+ shape: float,
+ size: None = ...,
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
+ out: None = ...,
+ ) -> float: ...
+ @overload
+ def standard_gamma(
+ self,
+ shape: _ArrayLikeFloat_co,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_gamma(
+ self,
+ shape: _ArrayLikeFloat_co,
+ *,
+ out: ndarray[Any, dtype[float64]] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_gamma(
+ self,
+ shape: _ArrayLikeFloat_co,
+ size: None | _ShapeLike = ...,
+ dtype: _DTypeLikeFloat32 = ...,
+ out: None | ndarray[Any, dtype[float32]] = ...,
+ ) -> ndarray[Any, dtype[float32]]: ...
+ @overload
+ def standard_gamma(
+ self,
+ shape: _ArrayLikeFloat_co,
+ size: None | _ShapeLike = ...,
+ dtype: _DTypeLikeFloat64 = ...,
+ out: None | ndarray[Any, dtype[float64]] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def gamma(
+ self,
+ shape: _ArrayLikeFloat_co,
+ scale: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def f(
+ self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def noncentral_f(
+ self,
+ dfnum: _ArrayLikeFloat_co,
+ dfden: _ArrayLikeFloat_co,
+ nonc: _ArrayLikeFloat_co,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def chisquare(
+ self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def noncentral_chisquare(
+ self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def standard_t(
+ self, df: _ArrayLikeFloat_co, size: None = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_t(
+ self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def vonmises(
+ self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def pareto(
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def weibull(
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def power(
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def laplace(
+ self,
+ loc: _ArrayLikeFloat_co = ...,
+ scale: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def gumbel(
+ self,
+ loc: _ArrayLikeFloat_co = ...,
+ scale: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def logistic(
+ self,
+ loc: _ArrayLikeFloat_co = ...,
+ scale: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def lognormal(
+ self,
+ mean: _ArrayLikeFloat_co = ...,
+ sigma: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def rayleigh(
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def wald(
+ self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def triangular(
+ self,
+ left: _ArrayLikeFloat_co,
+ mode: _ArrayLikeFloat_co,
+ right: _ArrayLikeFloat_co,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def binomial(
+ self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def negative_binomial(
+ self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def poisson(
+ self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def zipf(
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def geometric(
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def hypergeometric(
+ self,
+ ngood: _ArrayLikeInt_co,
+ nbad: _ArrayLikeInt_co,
+ nsample: _ArrayLikeInt_co,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def logseries(
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ def multivariate_normal(
+ self,
+ mean: _ArrayLikeFloat_co,
+ cov: _ArrayLikeFloat_co,
+ size: None | _ShapeLike = ...,
+ check_valid: Literal["warn", "raise", "ignore"] = ...,
+ tol: float = ...,
+ *,
+ method: Literal["svd", "eigh", "cholesky"] = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ def multinomial(
+ self, n: _ArrayLikeInt_co,
+ pvals: _ArrayLikeFloat_co,
+ size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int64]]: ...
+ def multivariate_hypergeometric(
+ self,
+ colors: _ArrayLikeInt_co,
+ nsample: int,
+ size: None | _ShapeLike = ...,
+ method: Literal["marginals", "count"] = ...,
+ ) -> ndarray[Any, dtype[int64]]: ...
+ def dirichlet(
+ self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ def permuted(
+ self, x: ArrayLike, *, axis: None | int = ..., out: None | ndarray[Any, Any] = ...
+ ) -> ndarray[Any, Any]: ...
+ def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ...
+
+def default_rng(
+ seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator = ...
+) -> Generator: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_mt19937.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/random/_mt19937.cpython-39-darwin.so
new file mode 100755
index 00000000..56ffbfec
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_mt19937.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_mt19937.pyi b/venv/lib/python3.9/site-packages/numpy/random/_mt19937.pyi
new file mode 100644
index 00000000..55cfb2db
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_mt19937.pyi
@@ -0,0 +1,22 @@
+from typing import Any, TypedDict
+
+from numpy import dtype, ndarray, uint32
+from numpy.random.bit_generator import BitGenerator, SeedSequence
+from numpy._typing import _ArrayLikeInt_co
+
+class _MT19937Internal(TypedDict):
+ key: ndarray[Any, dtype[uint32]]
+ pos: int
+
+class _MT19937State(TypedDict):
+ bit_generator: str
+ state: _MT19937Internal
+
+class MT19937(BitGenerator):
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
+ def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ...
+ def jumped(self, jumps: int = ...) -> MT19937: ...
+ @property
+ def state(self) -> _MT19937State: ...
+ @state.setter
+ def state(self, value: _MT19937State) -> None: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_pcg64.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/random/_pcg64.cpython-39-darwin.so
new file mode 100755
index 00000000..b5e277c5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_pcg64.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_pcg64.pyi b/venv/lib/python3.9/site-packages/numpy/random/_pcg64.pyi
new file mode 100644
index 00000000..470aee86
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_pcg64.pyi
@@ -0,0 +1,42 @@
+from typing import TypedDict
+
+from numpy.random.bit_generator import BitGenerator, SeedSequence
+from numpy._typing import _ArrayLikeInt_co
+
+class _PCG64Internal(TypedDict):
+ state: int
+ inc: int
+
+class _PCG64State(TypedDict):
+ bit_generator: str
+ state: _PCG64Internal
+ has_uint32: int
+ uinteger: int
+
+class PCG64(BitGenerator):
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
+ def jumped(self, jumps: int = ...) -> PCG64: ...
+ @property
+ def state(
+ self,
+ ) -> _PCG64State: ...
+ @state.setter
+ def state(
+ self,
+ value: _PCG64State,
+ ) -> None: ...
+ def advance(self, delta: int) -> PCG64: ...
+
+class PCG64DXSM(BitGenerator):
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
+ def jumped(self, jumps: int = ...) -> PCG64DXSM: ...
+ @property
+ def state(
+ self,
+ ) -> _PCG64State: ...
+ @state.setter
+ def state(
+ self,
+ value: _PCG64State,
+ ) -> None: ...
+ def advance(self, delta: int) -> PCG64DXSM: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_philox.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/random/_philox.cpython-39-darwin.so
new file mode 100755
index 00000000..08f32d45
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_philox.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_philox.pyi b/venv/lib/python3.9/site-packages/numpy/random/_philox.pyi
new file mode 100644
index 00000000..26ce726e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_philox.pyi
@@ -0,0 +1,36 @@
+from typing import Any, TypedDict
+
+from numpy import dtype, ndarray, uint64
+from numpy.random.bit_generator import BitGenerator, SeedSequence
+from numpy._typing import _ArrayLikeInt_co
+
+class _PhiloxInternal(TypedDict):
+ counter: ndarray[Any, dtype[uint64]]
+ key: ndarray[Any, dtype[uint64]]
+
+class _PhiloxState(TypedDict):
+ bit_generator: str
+ state: _PhiloxInternal
+ buffer: ndarray[Any, dtype[uint64]]
+ buffer_pos: int
+ has_uint32: int
+ uinteger: int
+
+class Philox(BitGenerator):
+ def __init__(
+ self,
+ seed: None | _ArrayLikeInt_co | SeedSequence = ...,
+ counter: None | _ArrayLikeInt_co = ...,
+ key: None | _ArrayLikeInt_co = ...,
+ ) -> None: ...
+ @property
+ def state(
+ self,
+ ) -> _PhiloxState: ...
+ @state.setter
+ def state(
+ self,
+ value: _PhiloxState,
+ ) -> None: ...
+ def jumped(self, jumps: int = ...) -> Philox: ...
+ def advance(self, delta: int) -> Philox: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_pickle.py b/venv/lib/python3.9/site-packages/numpy/random/_pickle.py
new file mode 100644
index 00000000..07399372
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_pickle.py
@@ -0,0 +1,80 @@
+from .mtrand import RandomState
+from ._philox import Philox
+from ._pcg64 import PCG64, PCG64DXSM
+from ._sfc64 import SFC64
+
+from ._generator import Generator
+from ._mt19937 import MT19937
+
+BitGenerators = {'MT19937': MT19937,
+ 'PCG64': PCG64,
+ 'PCG64DXSM': PCG64DXSM,
+ 'Philox': Philox,
+ 'SFC64': SFC64,
+ }
+
+
+def __bit_generator_ctor(bit_generator_name='MT19937'):
+ """
+ Pickling helper function that returns a bit generator object
+
+ Parameters
+ ----------
+ bit_generator_name : str
+ String containing the name of the BitGenerator
+
+ Returns
+ -------
+ bit_generator : BitGenerator
+ BitGenerator instance
+ """
+ if bit_generator_name in BitGenerators:
+ bit_generator = BitGenerators[bit_generator_name]
+ else:
+ raise ValueError(str(bit_generator_name) + ' is not a known '
+ 'BitGenerator module.')
+
+ return bit_generator()
+
+
+def __generator_ctor(bit_generator_name="MT19937",
+ bit_generator_ctor=__bit_generator_ctor):
+ """
+ Pickling helper function that returns a Generator object
+
+ Parameters
+ ----------
+ bit_generator_name : str
+ String containing the core BitGenerator's name
+ bit_generator_ctor : callable, optional
+ Callable function that takes bit_generator_name as its only argument
+ and returns an instantized bit generator.
+
+ Returns
+ -------
+ rg : Generator
+ Generator using the named core BitGenerator
+ """
+ return Generator(bit_generator_ctor(bit_generator_name))
+
+
+def __randomstate_ctor(bit_generator_name="MT19937",
+ bit_generator_ctor=__bit_generator_ctor):
+ """
+ Pickling helper function that returns a legacy RandomState-like object
+
+ Parameters
+ ----------
+ bit_generator_name : str
+ String containing the core BitGenerator's name
+ bit_generator_ctor : callable, optional
+ Callable function that takes bit_generator_name as its only argument
+ and returns an instantized bit generator.
+
+ Returns
+ -------
+ rs : RandomState
+ Legacy RandomState using the named core BitGenerator
+ """
+
+ return RandomState(bit_generator_ctor(bit_generator_name))
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_sfc64.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/random/_sfc64.cpython-39-darwin.so
new file mode 100755
index 00000000..488f9c53
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_sfc64.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/random/_sfc64.pyi b/venv/lib/python3.9/site-packages/numpy/random/_sfc64.pyi
new file mode 100644
index 00000000..e1810e7d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/_sfc64.pyi
@@ -0,0 +1,28 @@
+from typing import Any, TypedDict
+
+from numpy import dtype as dtype
+from numpy import ndarray as ndarray
+from numpy import uint64
+from numpy.random.bit_generator import BitGenerator, SeedSequence
+from numpy._typing import _ArrayLikeInt_co
+
+class _SFC64Internal(TypedDict):
+ state: ndarray[Any, dtype[uint64]]
+
+class _SFC64State(TypedDict):
+ bit_generator: str
+ state: _SFC64Internal
+ has_uint32: int
+ uinteger: int
+
+class SFC64(BitGenerator):
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
+ @property
+ def state(
+ self,
+ ) -> _SFC64State: ...
+ @state.setter
+ def state(
+ self,
+ value: _SFC64State,
+ ) -> None: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/random/bit_generator.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/random/bit_generator.cpython-39-darwin.so
new file mode 100755
index 00000000..692a8d17
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/bit_generator.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/random/bit_generator.pxd b/venv/lib/python3.9/site-packages/numpy/random/bit_generator.pxd
new file mode 100644
index 00000000..dfa7d0a7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/bit_generator.pxd
@@ -0,0 +1,35 @@
+cimport numpy as np
+from libc.stdint cimport uint32_t, uint64_t
+
+cdef extern from "numpy/random/bitgen.h":
+ struct bitgen:
+ void *state
+ uint64_t (*next_uint64)(void *st) nogil
+ uint32_t (*next_uint32)(void *st) nogil
+ double (*next_double)(void *st) nogil
+ uint64_t (*next_raw)(void *st) nogil
+
+ ctypedef bitgen bitgen_t
+
+cdef class BitGenerator():
+ cdef readonly object _seed_seq
+ cdef readonly object lock
+ cdef bitgen_t _bitgen
+ cdef readonly object _ctypes
+ cdef readonly object _cffi
+ cdef readonly object capsule
+
+
+cdef class SeedSequence():
+ cdef readonly object entropy
+ cdef readonly tuple spawn_key
+ cdef readonly Py_ssize_t pool_size
+ cdef readonly object pool
+ cdef readonly uint32_t n_children_spawned
+
+ cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer,
+ np.ndarray[np.npy_uint32, ndim=1] entropy_array)
+ cdef get_assembled_entropy(self)
+
+cdef class SeedlessSequence():
+ pass
diff --git a/venv/lib/python3.9/site-packages/numpy/random/bit_generator.pyi b/venv/lib/python3.9/site-packages/numpy/random/bit_generator.pyi
new file mode 100644
index 00000000..e6e3b10c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/bit_generator.pyi
@@ -0,0 +1,109 @@
+import abc
+from threading import Lock
+from collections.abc import Callable, Mapping, Sequence
+from typing import (
+ Any,
+ NamedTuple,
+ TypedDict,
+ TypeVar,
+ Union,
+ overload,
+ Literal,
+)
+
+from numpy import dtype, ndarray, uint32, uint64
+from numpy._typing import _ArrayLikeInt_co, _ShapeLike, _SupportsDType, _UInt32Codes, _UInt64Codes
+
+_T = TypeVar("_T")
+
+_DTypeLikeUint32 = Union[
+ dtype[uint32],
+ _SupportsDType[dtype[uint32]],
+ type[uint32],
+ _UInt32Codes,
+]
+_DTypeLikeUint64 = Union[
+ dtype[uint64],
+ _SupportsDType[dtype[uint64]],
+ type[uint64],
+ _UInt64Codes,
+]
+
+class _SeedSeqState(TypedDict):
+ entropy: None | int | Sequence[int]
+ spawn_key: tuple[int, ...]
+ pool_size: int
+ n_children_spawned: int
+
+class _Interface(NamedTuple):
+ state_address: Any
+ state: Any
+ next_uint64: Any
+ next_uint32: Any
+ next_double: Any
+ bit_generator: Any
+
+class ISeedSequence(abc.ABC):
+ @abc.abstractmethod
+ def generate_state(
+ self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
+ ) -> ndarray[Any, dtype[uint32 | uint64]]: ...
+
+class ISpawnableSeedSequence(ISeedSequence):
+ @abc.abstractmethod
+ def spawn(self: _T, n_children: int) -> list[_T]: ...
+
+class SeedlessSeedSequence(ISpawnableSeedSequence):
+ def generate_state(
+ self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
+ ) -> ndarray[Any, dtype[uint32 | uint64]]: ...
+ def spawn(self: _T, n_children: int) -> list[_T]: ...
+
+class SeedSequence(ISpawnableSeedSequence):
+ entropy: None | int | Sequence[int]
+ spawn_key: tuple[int, ...]
+ pool_size: int
+ n_children_spawned: int
+ pool: ndarray[Any, dtype[uint32]]
+ def __init__(
+ self,
+ entropy: None | int | Sequence[int] | _ArrayLikeInt_co = ...,
+ *,
+ spawn_key: Sequence[int] = ...,
+ pool_size: int = ...,
+ n_children_spawned: int = ...,
+ ) -> None: ...
+ def __repr__(self) -> str: ...
+ @property
+ def state(
+ self,
+ ) -> _SeedSeqState: ...
+ def generate_state(
+ self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
+ ) -> ndarray[Any, dtype[uint32 | uint64]]: ...
+ def spawn(self, n_children: int) -> list[SeedSequence]: ...
+
+class BitGenerator(abc.ABC):
+ lock: Lock
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
+ def __getstate__(self) -> dict[str, Any]: ...
+ def __setstate__(self, state: dict[str, Any]) -> None: ...
+ def __reduce__(
+ self,
+ ) -> tuple[Callable[[str], BitGenerator], tuple[str], tuple[dict[str, Any]]]: ...
+ @abc.abstractmethod
+ @property
+ def state(self) -> Mapping[str, Any]: ...
+ @state.setter
+ def state(self, value: Mapping[str, Any]) -> None: ...
+ @overload
+ def random_raw(self, size: None = ..., output: Literal[True] = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def random_raw(self, size: _ShapeLike = ..., output: Literal[True] = ...) -> ndarray[Any, dtype[uint64]]: ... # type: ignore[misc]
+ @overload
+ def random_raw(self, size: None | _ShapeLike = ..., output: Literal[False] = ...) -> None: ... # type: ignore[misc]
+ def _benchmark(self, cnt: int, method: str = ...) -> None: ...
+ @property
+ def ctypes(self) -> _Interface: ...
+ @property
+ def cffi(self) -> _Interface: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/random/c_distributions.pxd b/venv/lib/python3.9/site-packages/numpy/random/c_distributions.pxd
new file mode 100644
index 00000000..6f905edc
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/c_distributions.pxd
@@ -0,0 +1,114 @@
+#!python
+#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
+from numpy cimport npy_intp
+
+from libc.stdint cimport (uint64_t, int32_t, int64_t)
+from numpy.random cimport bitgen_t
+
+cdef extern from "numpy/random/distributions.h":
+
+ struct s_binomial_t:
+ int has_binomial
+ double psave
+ int64_t nsave
+ double r
+ double q
+ double fm
+ int64_t m
+ double p1
+ double xm
+ double xl
+ double xr
+ double c
+ double laml
+ double lamr
+ double p2
+ double p3
+ double p4
+
+ ctypedef s_binomial_t binomial_t
+
+ double random_standard_uniform(bitgen_t *bitgen_state) nogil
+ void random_standard_uniform_fill(bitgen_t* bitgen_state, npy_intp cnt, double *out) nogil
+ double random_standard_exponential(bitgen_t *bitgen_state) nogil
+ double random_standard_exponential_f(bitgen_t *bitgen_state) nogil
+ void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
+ void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
+ void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
+ void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
+ double random_standard_normal(bitgen_t* bitgen_state) nogil
+ void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out) nogil
+ void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp count, float *out) nogil
+ double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil
+
+ float random_standard_uniform_f(bitgen_t *bitgen_state) nogil
+ void random_standard_uniform_fill_f(bitgen_t* bitgen_state, npy_intp cnt, float *out) nogil
+ float random_standard_normal_f(bitgen_t* bitgen_state) nogil
+ float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
+
+ int64_t random_positive_int64(bitgen_t *bitgen_state) nogil
+ int32_t random_positive_int32(bitgen_t *bitgen_state) nogil
+ int64_t random_positive_int(bitgen_t *bitgen_state) nogil
+ uint64_t random_uint(bitgen_t *bitgen_state) nogil
+
+ double random_normal(bitgen_t *bitgen_state, double loc, double scale) nogil
+
+ double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil
+ float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) nogil
+
+ double random_exponential(bitgen_t *bitgen_state, double scale) nogil
+ double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil
+ double random_beta(bitgen_t *bitgen_state, double a, double b) nogil
+ double random_chisquare(bitgen_t *bitgen_state, double df) nogil
+ double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil
+ double random_standard_cauchy(bitgen_t *bitgen_state) nogil
+ double random_pareto(bitgen_t *bitgen_state, double a) nogil
+ double random_weibull(bitgen_t *bitgen_state, double a) nogil
+ double random_power(bitgen_t *bitgen_state, double a) nogil
+ double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil
+ double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil
+ double random_standard_t(bitgen_t *bitgen_state, double df) nogil
+ double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
+ double nonc) nogil
+ double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
+ double dfden, double nonc) nogil
+ double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil
+ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil
+ double random_triangular(bitgen_t *bitgen_state, double left, double mode,
+ double right) nogil
+
+ int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil
+ int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil
+ int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil
+ int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil
+ int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad,
+ int64_t sample) nogil
+
+ uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil
+
+ # Generate random uint64 numbers in closed interval [off, off + rng].
+ uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
+ uint64_t off, uint64_t rng,
+ uint64_t mask, bint use_masked) nogil
+
+ void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix,
+ double *pix, npy_intp d, binomial_t *binomial) nogil
+
+ int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates) nogil
+ void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
+ int64_t total,
+ size_t num_colors, int64_t *colors,
+ int64_t nsample,
+ size_t num_variates, int64_t *variates) nogil
+
diff --git a/venv/lib/python3.9/site-packages/numpy/random/lib/libnpyrandom.a b/venv/lib/python3.9/site-packages/numpy/random/lib/libnpyrandom.a
new file mode 100644
index 00000000..7a8321a0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/lib/libnpyrandom.a
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/random/mtrand.cpython-39-darwin.so b/venv/lib/python3.9/site-packages/numpy/random/mtrand.cpython-39-darwin.so
new file mode 100755
index 00000000..40fd593b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/mtrand.cpython-39-darwin.so
Binary files differ
diff --git a/venv/lib/python3.9/site-packages/numpy/random/mtrand.pyi b/venv/lib/python3.9/site-packages/numpy/random/mtrand.pyi
new file mode 100644
index 00000000..271cb978
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/mtrand.pyi
@@ -0,0 +1,570 @@
+from collections.abc import Callable
+from typing import Any, Union, overload, Literal
+
+from numpy import (
+ bool_,
+ dtype,
+ float32,
+ float64,
+ int8,
+ int16,
+ int32,
+ int64,
+ int_,
+ ndarray,
+ uint,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+from numpy.random.bit_generator import BitGenerator
+from numpy._typing import (
+ ArrayLike,
+ _ArrayLikeFloat_co,
+ _ArrayLikeInt_co,
+ _DoubleCodes,
+ _DTypeLikeBool,
+ _DTypeLikeInt,
+ _DTypeLikeUInt,
+ _Float32Codes,
+ _Float64Codes,
+ _Int8Codes,
+ _Int16Codes,
+ _Int32Codes,
+ _Int64Codes,
+ _IntCodes,
+ _ShapeLike,
+ _SingleCodes,
+ _SupportsDType,
+ _UInt8Codes,
+ _UInt16Codes,
+ _UInt32Codes,
+ _UInt64Codes,
+ _UIntCodes,
+)
+
+_DTypeLikeFloat32 = Union[
+ dtype[float32],
+ _SupportsDType[dtype[float32]],
+ type[float32],
+ _Float32Codes,
+ _SingleCodes,
+]
+
+_DTypeLikeFloat64 = Union[
+ dtype[float64],
+ _SupportsDType[dtype[float64]],
+ type[float],
+ type[float64],
+ _Float64Codes,
+ _DoubleCodes,
+]
+
+class RandomState:
+ _bit_generator: BitGenerator
+ def __init__(self, seed: None | _ArrayLikeInt_co | BitGenerator = ...) -> None: ...
+ def __repr__(self) -> str: ...
+ def __str__(self) -> str: ...
+ def __getstate__(self) -> dict[str, Any]: ...
+ def __setstate__(self, state: dict[str, Any]) -> None: ...
+ def __reduce__(self) -> tuple[Callable[[str], RandomState], tuple[str], dict[str, Any]]: ...
+ def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ...
+ @overload
+ def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ...
+ @overload
+ def get_state(
+ self, legacy: Literal[True] = ...
+ ) -> dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]: ...
+ def set_state(
+ self, state: dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]
+ ) -> None: ...
+ @overload
+ def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def random_sample(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def random(self, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def random(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def beta(
+ self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def exponential(
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def standard_exponential(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def tomaxint(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: int,
+ high: None | int = ...,
+ ) -> int: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: int,
+ high: None | int = ...,
+ size: None = ...,
+ dtype: _DTypeLikeBool = ...,
+ ) -> bool: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: int,
+ high: None | int = ...,
+ size: None = ...,
+ dtype: _DTypeLikeInt | _DTypeLikeUInt = ...,
+ ) -> int: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: _DTypeLikeBool = ...,
+ ) -> ndarray[Any, dtype[bool_]]: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
+ ) -> ndarray[Any, dtype[int8]]: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
+ ) -> ndarray[Any, dtype[int16]]: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
+ ) -> ndarray[Any, dtype[int32]]: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
+ ) -> ndarray[Any, dtype[int64]]: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
+ ) -> ndarray[Any, dtype[uint8]]: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
+ ) -> ndarray[Any, dtype[uint16]]: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
+ ) -> ndarray[Any, dtype[uint32]]: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
+ ) -> ndarray[Any, dtype[uint64]]: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...,
+ ) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def randint( # type: ignore[misc]
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...,
+ ) -> ndarray[Any, dtype[uint]]: ...
+ def bytes(self, length: int) -> bytes: ...
+ @overload
+ def choice(
+ self,
+ a: int,
+ size: None = ...,
+ replace: bool = ...,
+ p: None | _ArrayLikeFloat_co = ...,
+ ) -> int: ...
+ @overload
+ def choice(
+ self,
+ a: int,
+ size: _ShapeLike = ...,
+ replace: bool = ...,
+ p: None | _ArrayLikeFloat_co = ...,
+ ) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def choice(
+ self,
+ a: ArrayLike,
+ size: None = ...,
+ replace: bool = ...,
+ p: None | _ArrayLikeFloat_co = ...,
+ ) -> Any: ...
+ @overload
+ def choice(
+ self,
+ a: ArrayLike,
+ size: _ShapeLike = ...,
+ replace: bool = ...,
+ p: None | _ArrayLikeFloat_co = ...,
+ ) -> ndarray[Any, Any]: ...
+ @overload
+ def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def uniform(
+ self,
+ low: _ArrayLikeFloat_co = ...,
+ high: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def rand(self) -> float: ...
+ @overload
+ def rand(self, *args: int) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def randn(self) -> float: ...
+ @overload
+ def randn(self, *args: int) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def random_integers(self, low: int, high: None | int = ..., size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def random_integers(
+ self,
+ low: _ArrayLikeInt_co,
+ high: None | _ArrayLikeInt_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def standard_normal( # type: ignore[misc]
+ self, size: _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def normal(
+ self,
+ loc: _ArrayLikeFloat_co = ...,
+ scale: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_gamma( # type: ignore[misc]
+ self,
+ shape: float,
+ size: None = ...,
+ ) -> float: ...
+ @overload
+ def standard_gamma(
+ self,
+ shape: _ArrayLikeFloat_co,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def gamma(
+ self,
+ shape: _ArrayLikeFloat_co,
+ scale: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def f(
+ self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def noncentral_f(
+ self,
+ dfnum: _ArrayLikeFloat_co,
+ dfden: _ArrayLikeFloat_co,
+ nonc: _ArrayLikeFloat_co,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def chisquare(
+ self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def noncentral_chisquare(
+ self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def standard_t(
+ self, df: _ArrayLikeFloat_co, size: None = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_t(
+ self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def vonmises(
+ self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def pareto(
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def weibull(
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def power(
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def laplace(
+ self,
+ loc: _ArrayLikeFloat_co = ...,
+ scale: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def gumbel(
+ self,
+ loc: _ArrayLikeFloat_co = ...,
+ scale: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def logistic(
+ self,
+ loc: _ArrayLikeFloat_co = ...,
+ scale: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def lognormal(
+ self,
+ mean: _ArrayLikeFloat_co = ...,
+ sigma: _ArrayLikeFloat_co = ...,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def rayleigh(
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def wald(
+ self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc]
+ @overload
+ def triangular(
+ self,
+ left: _ArrayLikeFloat_co,
+ mode: _ArrayLikeFloat_co,
+ right: _ArrayLikeFloat_co,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ @overload
+ def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def binomial(
+ self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def negative_binomial(
+ self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def poisson(
+ self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def zipf(
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def geometric(
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def hypergeometric(
+ self,
+ ngood: _ArrayLikeInt_co,
+ nbad: _ArrayLikeInt_co,
+ nsample: _ArrayLikeInt_co,
+ size: None | _ShapeLike = ...,
+ ) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
+ @overload
+ def logseries(
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int_]]: ...
+ def multivariate_normal(
+ self,
+ mean: _ArrayLikeFloat_co,
+ cov: _ArrayLikeFloat_co,
+ size: None | _ShapeLike = ...,
+ check_valid: Literal["warn", "raise", "ignore"] = ...,
+ tol: float = ...,
+ ) -> ndarray[Any, dtype[float64]]: ...
+ def multinomial(
+ self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[int_]]: ...
+ def dirichlet(
+ self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
+ ) -> ndarray[Any, dtype[float64]]: ...
+ def shuffle(self, x: ArrayLike) -> None: ...
+ @overload
+ def permutation(self, x: int) -> ndarray[Any, dtype[int_]]: ...
+ @overload
+ def permutation(self, x: ArrayLike) -> ndarray[Any, Any]: ...
+
+_rand: RandomState
+
+beta = _rand.beta
+binomial = _rand.binomial
+bytes = _rand.bytes
+chisquare = _rand.chisquare
+choice = _rand.choice
+dirichlet = _rand.dirichlet
+exponential = _rand.exponential
+f = _rand.f
+gamma = _rand.gamma
+get_state = _rand.get_state
+geometric = _rand.geometric
+gumbel = _rand.gumbel
+hypergeometric = _rand.hypergeometric
+laplace = _rand.laplace
+logistic = _rand.logistic
+lognormal = _rand.lognormal
+logseries = _rand.logseries
+multinomial = _rand.multinomial
+multivariate_normal = _rand.multivariate_normal
+negative_binomial = _rand.negative_binomial
+noncentral_chisquare = _rand.noncentral_chisquare
+noncentral_f = _rand.noncentral_f
+normal = _rand.normal
+pareto = _rand.pareto
+permutation = _rand.permutation
+poisson = _rand.poisson
+power = _rand.power
+rand = _rand.rand
+randint = _rand.randint
+randn = _rand.randn
+random = _rand.random
+random_integers = _rand.random_integers
+random_sample = _rand.random_sample
+rayleigh = _rand.rayleigh
+seed = _rand.seed
+set_state = _rand.set_state
+shuffle = _rand.shuffle
+standard_cauchy = _rand.standard_cauchy
+standard_exponential = _rand.standard_exponential
+standard_gamma = _rand.standard_gamma
+standard_normal = _rand.standard_normal
+standard_t = _rand.standard_t
+triangular = _rand.triangular
+uniform = _rand.uniform
+vonmises = _rand.vonmises
+wald = _rand.wald
+weibull = _rand.weibull
+zipf = _rand.zipf
+# Two legacy that are trivial wrappers around random_sample
+sample = _rand.random_sample
+ranf = _rand.random_sample
+
+def set_bit_generator(bitgen: BitGenerator) -> None:
+ ...
+
+def get_bit_generator() -> BitGenerator:
+ ...
diff --git a/venv/lib/python3.9/site-packages/numpy/random/setup.py b/venv/lib/python3.9/site-packages/numpy/random/setup.py
new file mode 100644
index 00000000..cd9ad976
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/setup.py
@@ -0,0 +1,159 @@
+import os
+import sys
+from os.path import join
+
+from numpy.distutils.system_info import platform_bits
+from numpy.distutils.msvccompiler import lib_opts_if_msvc
+
+
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration, get_mathlibs
+ config = Configuration('random', parent_package, top_path)
+
+ def generate_libraries(ext, build_dir):
+ config_cmd = config.get_config_cmd()
+ libs = get_mathlibs()
+ if sys.platform == 'win32':
+ libs.extend(['Advapi32', 'Kernel32'])
+ ext.libraries.extend(libs)
+ return None
+
+ # enable unix large file support on 32 bit systems
+ # (64 bit off_t, lseek -> lseek64 etc.)
+ if sys.platform[:3] == 'aix':
+ defs = [('_LARGE_FILES', None)]
+ else:
+ defs = [('_FILE_OFFSET_BITS', '64'),
+ ('_LARGEFILE_SOURCE', '1'),
+ ('_LARGEFILE64_SOURCE', '1')]
+
+ defs.append(('NPY_NO_DEPRECATED_API', 0))
+ config.add_subpackage('tests')
+ config.add_data_dir('tests/data')
+ config.add_data_dir('_examples')
+
+ EXTRA_LINK_ARGS = []
+ EXTRA_LIBRARIES = ['npyrandom']
+ if os.name != 'nt':
+ # Math lib
+ EXTRA_LIBRARIES.append('m')
+ # Some bit generators exclude GCC inlining
+ EXTRA_COMPILE_ARGS = ['-U__GNUC_GNU_INLINE__']
+
+ if sys.platform == 'cygwin':
+ # Export symbols without __declspec(dllexport) for using by cython.
+ # Using __declspec(dllexport) does not export other necessary symbols
+ # in Cygwin package's Cython environment, making it impossible to
+ # import modules.
+ EXTRA_LINK_ARGS += ['-Wl,--export-all-symbols']
+
+ # Use legacy integer variable sizes
+ LEGACY_DEFS = [('NP_RANDOM_LEGACY', '1')]
+ PCG64_DEFS = []
+ # One can force emulated 128-bit arithmetic if one wants.
+ #PCG64_DEFS += [('PCG_FORCE_EMULATED_128BIT_MATH', '1')]
+ depends = ['__init__.pxd', 'c_distributions.pxd', 'bit_generator.pxd']
+
+ # npyrandom - a library like npymath
+ npyrandom_sources = [
+ 'src/distributions/logfactorial.c',
+ 'src/distributions/distributions.c',
+ 'src/distributions/random_mvhg_count.c',
+ 'src/distributions/random_mvhg_marginals.c',
+ 'src/distributions/random_hypergeometric.c',
+ ]
+
+ def lib_opts(build_cmd):
+ """ Add flags that depend on the compiler.
+
+ We can't see which compiler we are using in our scope, because we have
+ not initialized the distutils build command, so use this deferred
+ calculation to run when we are building the library.
+ """
+ opts = lib_opts_if_msvc(build_cmd)
+ if build_cmd.compiler.compiler_type != 'msvc':
+ # Some bit generators require c99
+ opts.append('-std=c99')
+ return opts
+
+ config.add_installed_library('npyrandom',
+ sources=npyrandom_sources,
+ install_dir='lib',
+ build_info={
+ 'include_dirs' : [], # empty list required for creating npyrandom.h
+ 'extra_compiler_args': [lib_opts],
+ })
+
+ for gen in ['mt19937']:
+ # gen.pyx, src/gen/gen.c, src/gen/gen-jump.c
+ config.add_extension(f'_{gen}',
+ sources=[f'_{gen}.c',
+ f'src/{gen}/{gen}.c',
+ f'src/{gen}/{gen}-jump.c'],
+ include_dirs=['.', 'src', join('src', gen)],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=depends + [f'_{gen}.pyx'],
+ define_macros=defs,
+ )
+ for gen in ['philox', 'pcg64', 'sfc64']:
+ # gen.pyx, src/gen/gen.c
+ _defs = defs + PCG64_DEFS if gen == 'pcg64' else defs
+ config.add_extension(f'_{gen}',
+ sources=[f'_{gen}.c',
+ f'src/{gen}/{gen}.c'],
+ include_dirs=['.', 'src', join('src', gen)],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=depends + [f'_{gen}.pyx',
+ 'bit_generator.pyx', 'bit_generator.pxd'],
+ define_macros=_defs,
+ )
+ for gen in ['_common', 'bit_generator']:
+ # gen.pyx
+ config.add_extension(gen,
+ sources=[f'{gen}.c'],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ include_dirs=['.', 'src'],
+ depends=depends + [f'{gen}.pyx', f'{gen}.pxd',],
+ define_macros=defs,
+ )
+ config.add_data_files(f'{gen}.pxd')
+ for gen in ['_generator', '_bounded_integers']:
+ # gen.pyx, src/distributions/distributions.c
+ config.add_extension(gen,
+ sources=[f'{gen}.c'],
+ libraries=EXTRA_LIBRARIES + ['npymath'],
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ include_dirs=['.', 'src'],
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=depends + [f'{gen}.pyx'],
+ define_macros=defs,
+ )
+ config.add_data_files('_bounded_integers.pxd')
+ mtrand_libs = ['m', 'npymath'] if os.name != 'nt' else ['npymath']
+ config.add_extension('mtrand',
+ sources=['mtrand.c',
+ 'src/legacy/legacy-distributions.c',
+ 'src/distributions/distributions.c',
+ ],
+ include_dirs=['.', 'src', 'src/legacy'],
+ libraries=mtrand_libs,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=depends + ['mtrand.pyx'],
+ define_macros=defs + LEGACY_DEFS,
+ )
+ config.add_data_files(*depends)
+ config.add_data_files('*.pyi')
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+
+ setup(configuration=configuration)
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/random/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/data/__init__.py b/venv/lib/python3.9/site-packages/numpy/random/tests/data/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/data/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/data/mt19937-testset-1.csv b/venv/lib/python3.9/site-packages/numpy/random/tests/data/mt19937-testset-1.csv
new file mode 100644
index 00000000..b97bfa66
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/data/mt19937-testset-1.csv
@@ -0,0 +1,1001 @@
+seed, 0xdeadbeaf
+0, 0xc816921f
+1, 0xb3623c6d
+2, 0x5fa391bb
+3, 0x40178d9
+4, 0x7dcc9811
+5, 0x548eb8e6
+6, 0x92ba3125
+7, 0x65fde68d
+8, 0x2f81ec95
+9, 0xbd94f7a2
+10, 0xdc4d9bcc
+11, 0xa672bf13
+12, 0xb41113e
+13, 0xec7e0066
+14, 0x50239372
+15, 0xd9d66b1d
+16, 0xab72a161
+17, 0xddc2e29f
+18, 0x7ea29ab4
+19, 0x80d141ba
+20, 0xb1c7edf1
+21, 0x44d29203
+22, 0xe224d98
+23, 0x5b3e9d26
+24, 0x14fd567c
+25, 0x27d98c96
+26, 0x838779fc
+27, 0x92a138a
+28, 0x5d08965b
+29, 0x531e0ad6
+30, 0x984ee8f4
+31, 0x1ed78539
+32, 0x32bd6d8d
+33, 0xc37c8516
+34, 0x9aef5c6b
+35, 0x3aacd139
+36, 0xd96ed154
+37, 0x489cd1ed
+38, 0x2cba4b3b
+39, 0x76c6ae72
+40, 0x2dae02b9
+41, 0x52ac5fd6
+42, 0xc2b5e265
+43, 0x630e6a28
+44, 0x3f560d5d
+45, 0x9315bdf3
+46, 0xf1055aba
+47, 0x840e42c6
+48, 0xf2099c6b
+49, 0x15ff7696
+50, 0x7948d146
+51, 0x97342961
+52, 0x7a7a21c
+53, 0xc66f4fb1
+54, 0x23c4103e
+55, 0xd7321f98
+56, 0xeb7efb75
+57, 0xe02490b5
+58, 0x2aa02de
+59, 0x8bee0bf7
+60, 0xfc2da059
+61, 0xae835034
+62, 0x678f2075
+63, 0x6d03094b
+64, 0x56455e05
+65, 0x18b32373
+66, 0x8ff0356b
+67, 0x1fe442fb
+68, 0x3f1ab6c3
+69, 0xb6fd21b
+70, 0xfc310eb2
+71, 0xb19e9a4d
+72, 0x17ddee72
+73, 0xfd534251
+74, 0x9e500564
+75, 0x9013a036
+76, 0xcf08f118
+77, 0x6b6d5969
+78, 0x3ccf1977
+79, 0x7cc11497
+80, 0x651c6ac9
+81, 0x4d6b104b
+82, 0x9a28314e
+83, 0x14c237be
+84, 0x9cfc8d52
+85, 0x2947fad5
+86, 0xd71eff49
+87, 0x5188730e
+88, 0x4b894614
+89, 0xf4fa2a34
+90, 0x42f7cc69
+91, 0x4089c9e8
+92, 0xbf0bbfe4
+93, 0x3cea65c
+94, 0xc6221207
+95, 0x1bb71a8f
+96, 0x54843fe7
+97, 0xbc59de4c
+98, 0x79c6ee64
+99, 0x14e57a26
+100, 0x68d88fe
+101, 0x2b86ef64
+102, 0x8ffff3c1
+103, 0x5bdd573f
+104, 0x85671813
+105, 0xefe32ca2
+106, 0x105ded1e
+107, 0x90ca2769
+108, 0xb33963ac
+109, 0x363fbbc3
+110, 0x3b3763ae
+111, 0x1d50ab88
+112, 0xc9ec01eb
+113, 0xc8bbeada
+114, 0x5d704692
+115, 0x5fd9e40
+116, 0xe61c125
+117, 0x2fe05792
+118, 0xda8afb72
+119, 0x4cbaa653
+120, 0xdd2243df
+121, 0x896fd3f5
+122, 0x5bc23db
+123, 0xa1c4e807
+124, 0x57d1a24d
+125, 0x66503ddc
+126, 0xcf7c0838
+127, 0x19e034fc
+128, 0x66807450
+129, 0xfc219b3b
+130, 0xe8a843e7
+131, 0x9ce61f08
+132, 0x92b950d6
+133, 0xce955ec4
+134, 0xda0d1f0d
+135, 0x960c6250
+136, 0x39552432
+137, 0xde845e84
+138, 0xff3b4b11
+139, 0x5d918e6f
+140, 0xbb930df2
+141, 0x7cfb0993
+142, 0x5400e1e9
+143, 0x3bfa0954
+144, 0x7e2605fb
+145, 0x11941591
+146, 0x887e6994
+147, 0xdc8bed45
+148, 0x45b3fb50
+149, 0xfbdf8358
+150, 0x41507468
+151, 0x34c87166
+152, 0x17f64d77
+153, 0x3bbaf4f8
+154, 0x4f26f37e
+155, 0x4a56ebf2
+156, 0x81100f1
+157, 0x96d94eae
+158, 0xca88fda5
+159, 0x2eef3a60
+160, 0x952afbd3
+161, 0x2bec88c7
+162, 0x52335c4b
+163, 0x8296db8e
+164, 0x4da7d00a
+165, 0xc00ac899
+166, 0xadff8c72
+167, 0xbecf26cf
+168, 0x8835c83c
+169, 0x1d13c804
+170, 0xaa940ddc
+171, 0x68222cfe
+172, 0x4569c0e1
+173, 0x29077976
+174, 0x32d4a5af
+175, 0xd31fcdef
+176, 0xdc60682b
+177, 0x7c95c368
+178, 0x75a70213
+179, 0x43021751
+180, 0x5e52e0a6
+181, 0xf7e190b5
+182, 0xee3e4bb
+183, 0x2fe3b150
+184, 0xcf419c07
+185, 0x478a4570
+186, 0xe5c3ea50
+187, 0x417f30a8
+188, 0xf0cfdaa0
+189, 0xd1f7f738
+190, 0x2c70fc23
+191, 0x54fc89f9
+192, 0x444dcf01
+193, 0xec2a002d
+194, 0xef0c3a88
+195, 0xde21be9
+196, 0x88ab3296
+197, 0x3028897c
+198, 0x264b200b
+199, 0xd8ae0706
+200, 0x9eef901a
+201, 0xbd1b96e0
+202, 0xea71366c
+203, 0x1465b694
+204, 0x5a794650
+205, 0x83df52d4
+206, 0x8262413d
+207, 0x5bc148c0
+208, 0xe0ecd80c
+209, 0x40649571
+210, 0xb4d2ee5f
+211, 0xedfd7d09
+212, 0xa082e25f
+213, 0xc62992d1
+214, 0xbc7e65ee
+215, 0x5499cf8a
+216, 0xac28f775
+217, 0x649840fb
+218, 0xd4c54805
+219, 0x1d166ba6
+220, 0xbeb1171f
+221, 0x45b66703
+222, 0x78c03349
+223, 0x38d2a6ff
+224, 0x935cae8b
+225, 0x1d07dc3f
+226, 0x6c1ed365
+227, 0x579fc585
+228, 0x1320c0ec
+229, 0x632757eb
+230, 0xd265a397
+231, 0x70e9b6c2
+232, 0xc81e322c
+233, 0xa27153cf
+234, 0x2118ba19
+235, 0x514ec400
+236, 0x2bd0ecd6
+237, 0xc3e7dae3
+238, 0xfa39355e
+239, 0x48f23cc1
+240, 0xbcf75948
+241, 0x53ccc70c
+242, 0x75346423
+243, 0x951181e0
+244, 0x348e90df
+245, 0x14365d7f
+246, 0xfbc95d7a
+247, 0xdc98a9e6
+248, 0xed202df7
+249, 0xa59ec913
+250, 0x6b6e9ae2
+251, 0x1697f265
+252, 0x15d322d0
+253, 0xa2e7ee0a
+254, 0x88860b7e
+255, 0x455d8b9d
+256, 0x2f5c59cb
+257, 0xac49c9f1
+258, 0xa6a6a039
+259, 0xc057f56b
+260, 0xf1ff1208
+261, 0x5eb8dc9d
+262, 0xe6702509
+263, 0xe238b0ed
+264, 0x5ae32e3d
+265, 0xa88ebbdf
+266, 0xef885ae7
+267, 0xafa6d49b
+268, 0xc94499e0
+269, 0x1a196325
+270, 0x88938da3
+271, 0x14f4345
+272, 0xd8e33637
+273, 0xa3551bd5
+274, 0x73fe35c7
+275, 0x9561e94b
+276, 0xd673bf68
+277, 0x16134872
+278, 0x68c42f9f
+279, 0xdf7574c8
+280, 0x8809bab9
+281, 0x1432cf69
+282, 0xafb66bf1
+283, 0xc184aa7b
+284, 0xedbf2007
+285, 0xbd420ce1
+286, 0x761033a0
+287, 0xff7e351f
+288, 0xd6c3780e
+289, 0x5844416f
+290, 0xc6c0ee1c
+291, 0xd2e147db
+292, 0x92ac601a
+293, 0x393e846b
+294, 0x18196cca
+295, 0x54a22be
+296, 0x32bab1c4
+297, 0x60365183
+298, 0x64fa342
+299, 0xca24a493
+300, 0xd8cc8b83
+301, 0x3faf102b
+302, 0x6e09bb58
+303, 0x812f0ea
+304, 0x592c95d8
+305, 0xe45ea4c5
+306, 0x23aebf83
+307, 0xbd9691d4
+308, 0xf47b4baa
+309, 0x4ac7b487
+310, 0xcce18803
+311, 0x3377556e
+312, 0x3ff8e6b6
+313, 0x99d22063
+314, 0x23250bec
+315, 0x4e1f9861
+316, 0x8554249b
+317, 0x8635c2fc
+318, 0xe8426e8a
+319, 0x966c29d8
+320, 0x270b6082
+321, 0x3180a8a1
+322, 0xe7e1668b
+323, 0x7f868dc
+324, 0xcf4c17cf
+325, 0xe31de4d1
+326, 0xc8c8aff4
+327, 0xae8db704
+328, 0x3c928cc2
+329, 0xe12cd48
+330, 0xb33ecd04
+331, 0xb93d7cbe
+332, 0x49c69d6a
+333, 0x7d3bce64
+334, 0x86bc219
+335, 0x8408233b
+336, 0x44dc7479
+337, 0xdf80d538
+338, 0xf3db02c3
+339, 0xbbbd31d7
+340, 0x121281f
+341, 0x7521e9a3
+342, 0x8859675a
+343, 0x75aa6502
+344, 0x430ed15b
+345, 0xecf0a28d
+346, 0x659774fd
+347, 0xd58a2311
+348, 0x512389a9
+349, 0xff65e1ff
+350, 0xb6ddf222
+351, 0xe3458895
+352, 0x8b13cd6e
+353, 0xd4a22870
+354, 0xe604c50c
+355, 0x27f54f26
+356, 0x8f7f422f
+357, 0x9735b4cf
+358, 0x414072b0
+359, 0x76a1c6d5
+360, 0xa2208c06
+361, 0x83cd0f61
+362, 0x6c4f7ead
+363, 0x6553cf76
+364, 0xeffcf44
+365, 0x7f434a3f
+366, 0x9dc364bd
+367, 0x3cdf52ed
+368, 0xad597594
+369, 0x9c3e211b
+370, 0x6c04a33f
+371, 0x885dafa6
+372, 0xbbdaca71
+373, 0x7ae5dd5c
+374, 0x37675644
+375, 0x251853c6
+376, 0x130b086b
+377, 0x143fa54b
+378, 0x54cdc282
+379, 0x9faff5b3
+380, 0x502a5c8b
+381, 0xd9524550
+382, 0xae221aa6
+383, 0x55cf759b
+384, 0x24782da4
+385, 0xd715d815
+386, 0x250ea09a
+387, 0x4e0744ac
+388, 0x11e15814
+389, 0xabe5f9df
+390, 0xc8146350
+391, 0xfba67d9b
+392, 0x2b82e42f
+393, 0xd4ea96fc
+394, 0x5ffc179e
+395, 0x1598bafe
+396, 0x7fb6d662
+397, 0x1a12a0db
+398, 0x450cee4a
+399, 0x85f8e12
+400, 0xce71b594
+401, 0xd4bb1d19
+402, 0x968f379d
+403, 0x54cc1d52
+404, 0x467e6066
+405, 0x7da5f9a9
+406, 0x70977034
+407, 0x49e65c4b
+408, 0xd08570d1
+409, 0x7acdf60b
+410, 0xdffa038b
+411, 0x9ce14e4c
+412, 0x107cbbf8
+413, 0xdd746ca0
+414, 0xc6370a46
+415, 0xe7f83312
+416, 0x373fa9ce
+417, 0xd822a2c6
+418, 0x1d4efea6
+419, 0xc53dcadb
+420, 0x9b4e898f
+421, 0x71daa6bf
+422, 0x7a0bc78b
+423, 0xd7b86f50
+424, 0x1b8b3286
+425, 0xcf9425dd
+426, 0xd5263220
+427, 0x4ea0b647
+428, 0xc767fe64
+429, 0xcfc5e67
+430, 0xcc6a2942
+431, 0xa51eff00
+432, 0x76092e1b
+433, 0xf606e80f
+434, 0x824b5e20
+435, 0xebb55e14
+436, 0x783d96a6
+437, 0x10696512
+438, 0x17ee510a
+439, 0x3ab70a1f
+440, 0xcce6b210
+441, 0x8f72f0fb
+442, 0xf0610b41
+443, 0x83d01fb5
+444, 0x6b3de36
+445, 0xe4c2e84f
+446, 0x9c43bb15
+447, 0xddf2905
+448, 0x7dd63556
+449, 0x3662ca09
+450, 0xfb81f35b
+451, 0xc2c8a72a
+452, 0x8e93c37
+453, 0xa93da2d4
+454, 0xa03af8f1
+455, 0x8d75159a
+456, 0x15f010b0
+457, 0xa296ab06
+458, 0xe55962ba
+459, 0xeae700a9
+460, 0xe388964a
+461, 0x917f2bec
+462, 0x1c203fea
+463, 0x792a01ba
+464, 0xa93a80ac
+465, 0x9eb8a197
+466, 0x56c0bc73
+467, 0xb8f05799
+468, 0xf429a8c8
+469, 0xb92cee42
+470, 0xf8864ec
+471, 0x62f2518a
+472, 0x3a7bfa3e
+473, 0x12e56e6d
+474, 0xd7a18313
+475, 0x41fa3899
+476, 0xa09c4956
+477, 0xebcfd94a
+478, 0xc485f90b
+479, 0x4391ce40
+480, 0x742a3333
+481, 0xc932f9e5
+482, 0x75c6c263
+483, 0x80937f0
+484, 0xcf21833c
+485, 0x16027520
+486, 0xd42e669f
+487, 0xb0f01fb7
+488, 0xb35896f1
+489, 0x763737a9
+490, 0x1bb20209
+491, 0x3551f189
+492, 0x56bc2602
+493, 0xb6eacf4
+494, 0x42ec4d11
+495, 0x245cc68
+496, 0xc27ac43b
+497, 0x9d903466
+498, 0xce3f0c05
+499, 0xb708c31c
+500, 0xc0fd37eb
+501, 0x95938b2c
+502, 0xf20175a7
+503, 0x4a86ee9b
+504, 0xbe039a58
+505, 0xd41cabe7
+506, 0x83bc99ba
+507, 0x761d60e1
+508, 0x7737cc2e
+509, 0x2b82fc4b
+510, 0x375aa401
+511, 0xfe9597a0
+512, 0x5543806a
+513, 0x44f31238
+514, 0x7df31538
+515, 0x74cfa770
+516, 0x8755d881
+517, 0x1fde665a
+518, 0xda8bf315
+519, 0x973d8e95
+520, 0x72205228
+521, 0x8fe59717
+522, 0x7bb90b34
+523, 0xef6ed945
+524, 0x16fd4a38
+525, 0x5db44de1
+526, 0xf09f93b3
+527, 0xe84824cc
+528, 0x945bb50e
+529, 0xd0be4aa5
+530, 0x47c277c2
+531, 0xd3800c28
+532, 0xac1c33ec
+533, 0xd3dacce
+534, 0x811c8387
+535, 0x6761b36
+536, 0x70d3882f
+537, 0xd6e62e3a
+538, 0xea25daa2
+539, 0xb07f39d1
+540, 0x391d89d7
+541, 0x84b6fb5e
+542, 0x3dda3fca
+543, 0x229e80a4
+544, 0x3d94a4b7
+545, 0x5d3d576a
+546, 0xad7818a0
+547, 0xce23b03a
+548, 0x7aa2079c
+549, 0x9a6be555
+550, 0x83f3b34a
+551, 0x1848f9d9
+552, 0xd8fefc1c
+553, 0x48e6ce48
+554, 0x52e55750
+555, 0xf41a71cf
+556, 0xba08e259
+557, 0xfaf06a15
+558, 0xeaaac0fb
+559, 0x34f90098
+560, 0xb1dfffbb
+561, 0x718daec2
+562, 0xab4dda21
+563, 0xd27cc1ee
+564, 0x4aafbc4c
+565, 0x356dfb4f
+566, 0x83fcdfd6
+567, 0x8f0bcde0
+568, 0x4363f844
+569, 0xadc0f4d5
+570, 0x3bde994e
+571, 0x3884d452
+572, 0x21876b4a
+573, 0x9c985398
+574, 0xca55a226
+575, 0x3a88c583
+576, 0x916dc33c
+577, 0x8f67d1d7
+578, 0x3b26a667
+579, 0xe4ddeb4b
+580, 0x1a9d8c33
+581, 0x81c9b74f
+582, 0x9ed1e9df
+583, 0x6e61aecf
+584, 0x95e95a5d
+585, 0x68864ff5
+586, 0xb8fa5b9
+587, 0x72b1b3de
+588, 0x5e18a86b
+589, 0xd7f2337d
+590, 0xd70e0925
+591, 0xb573a4c1
+592, 0xc77b3f8a
+593, 0x389b20de
+594, 0x16cf6afb
+595, 0xa39bd275
+596, 0xf491cf01
+597, 0x6f88a802
+598, 0x8510af05
+599, 0xe7cd549a
+600, 0x8603179a
+601, 0xef43f191
+602, 0xf9b64c60
+603, 0xb00254a7
+604, 0xd7c06a2d
+605, 0x17e9380b
+606, 0x529e727b
+607, 0xaaa8fe0a
+608, 0xfb64ff4c
+609, 0xcd75af26
+610, 0xfb717c87
+611, 0xa0789899
+612, 0x10391ec9
+613, 0x7e9b40b3
+614, 0x18536554
+615, 0x728c05f7
+616, 0x787dca98
+617, 0xad948d1
+618, 0x44c18def
+619, 0x3303f2ec
+620, 0xa15acb5
+621, 0xb58d38f4
+622, 0xfe041ef8
+623, 0xd151a956
+624, 0x7b9168e8
+625, 0x5ebeca06
+626, 0x90fe95df
+627, 0xf76875aa
+628, 0xb2e0d664
+629, 0x2e3253b7
+630, 0x68e34469
+631, 0x1f0c2d89
+632, 0x13a34ac2
+633, 0x5ffeb841
+634, 0xe381e91c
+635, 0xb8549a92
+636, 0x3f35cf1
+637, 0xda0f9dcb
+638, 0xdd9828a6
+639, 0xe1428f29
+640, 0xf4db80b5
+641, 0xdac30af5
+642, 0x1af1dd17
+643, 0x9a540254
+644, 0xcab68a38
+645, 0x33560361
+646, 0x2fbf3886
+647, 0xbc785923
+648, 0xe081cd10
+649, 0x8e473356
+650, 0xd102c357
+651, 0xeea4fe48
+652, 0x248d3453
+653, 0x1da79ac
+654, 0x815a65ff
+655, 0x27693e76
+656, 0xb7d5af40
+657, 0x6d245d30
+658, 0x9e06fa8f
+659, 0xb0570dcb
+660, 0x469f0005
+661, 0x3e0ca132
+662, 0xd89bbf3
+663, 0xd61ccd47
+664, 0x6383878
+665, 0x62b5956
+666, 0x4dc83675
+667, 0x93fd8492
+668, 0x5a0091f5
+669, 0xc9f9bc3
+670, 0xa26e7778
+671, 0xeabf2d01
+672, 0xe612dc06
+673, 0x85d89ff9
+674, 0xd1763179
+675, 0xcb88947b
+676, 0x9e8757a5
+677, 0xe100e85c
+678, 0x904166eb
+679, 0x4996243d
+680, 0x4038e1cb
+681, 0x2be2c63d
+682, 0x77017e81
+683, 0x3b1f556b
+684, 0x1c785c77
+685, 0x6869b8bd
+686, 0xe1217ed4
+687, 0x4012ab2f
+688, 0xc06c0d8e
+689, 0x2122eb68
+690, 0xad1783fd
+691, 0x5f0c80e3
+692, 0x828f7efa
+693, 0x29328399
+694, 0xeadf1087
+695, 0x85dc0037
+696, 0x9691ef26
+697, 0xc0947a53
+698, 0x2a178d2a
+699, 0x2a2c7e8f
+700, 0x90378380
+701, 0xaad8d326
+702, 0x9cf1c3c8
+703, 0x84eccd44
+704, 0x79e61808
+705, 0x8b3f454e
+706, 0x209e6e1
+707, 0x51f88378
+708, 0xc210226f
+709, 0xd982adb5
+710, 0x55d44a31
+711, 0x9817d443
+712, 0xa328c626
+713, 0x13455966
+714, 0xb8f681d3
+715, 0x2a3c713b
+716, 0xc186959b
+717, 0x814a74b0
+718, 0xed7bc90
+719, 0xa88d3d6d
+720, 0x88a9f561
+721, 0x73aa1c0a
+722, 0xdfeff404
+723, 0xec037e4b
+724, 0xa5c209f0
+725, 0xb3a223b4
+726, 0x24ce3709
+727, 0x3184c790
+728, 0xa1398c62
+729, 0x2f92034e
+730, 0xbb37a79a
+731, 0x605287b4
+732, 0x8faa772c
+733, 0x6ce56c1d
+734, 0xc035fb4c
+735, 0x7cf5b316
+736, 0x6502645
+737, 0xa283d810
+738, 0x778bc2f1
+739, 0xfdf99313
+740, 0x1f513265
+741, 0xbd3837e2
+742, 0x9b84a9a
+743, 0x2139ce91
+744, 0x61a8e890
+745, 0xf9ff12db
+746, 0xb43d2ea7
+747, 0x88532e61
+748, 0x175a6655
+749, 0x7a6c4f72
+750, 0x6dafc1b7
+751, 0x449b1459
+752, 0x514f654f
+753, 0x9a6731e2
+754, 0x8632da43
+755, 0xc81b0422
+756, 0x81fe9005
+757, 0x15b79618
+758, 0xb5fa629f
+759, 0x987a474f
+760, 0x1c74f54e
+761, 0xf9743232
+762, 0xec4b55f
+763, 0x87d761e5
+764, 0xd1ad78b7
+765, 0x453d9350
+766, 0xc7a7d85
+767, 0xb2576ff5
+768, 0xcdde49b7
+769, 0x8e1f763e
+770, 0x1338583e
+771, 0xfd65b9dc
+772, 0x4f19c4f4
+773, 0x3a52d73d
+774, 0xd3509c4c
+775, 0xda24fe31
+776, 0xe2de56ba
+777, 0x2db5e540
+778, 0x23172734
+779, 0x4db572f
+780, 0xeb941718
+781, 0x84c2649a
+782, 0x3b1e5b6a
+783, 0x4c9c61b9
+784, 0x3bccd11
+785, 0xb4d7b78e
+786, 0x48580ae5
+787, 0xd273ab68
+788, 0x25c11615
+789, 0x470b53f6
+790, 0x329c2068
+791, 0x1693721b
+792, 0xf8c9aacf
+793, 0x4c3d5693
+794, 0xd778284e
+795, 0xae1cb24f
+796, 0x3c11d1b3
+797, 0xddd2b0c0
+798, 0x90269fa7
+799, 0x5666e0a2
+800, 0xf9f195a4
+801, 0x61d78eb2
+802, 0xada5a7c0
+803, 0xaa272fbe
+804, 0xba3bae2f
+805, 0xd0b70fc2
+806, 0x529f32b
+807, 0xda7a3e21
+808, 0x9a776a20
+809, 0xb21f9635
+810, 0xb3acc14e
+811, 0xac55f56
+812, 0x29dccf41
+813, 0x32dabdb3
+814, 0xaa032f58
+815, 0xfa406af4
+816, 0xce3c415d
+817, 0xb44fb4d9
+818, 0x32248d1c
+819, 0x680c6440
+820, 0xae2337b
+821, 0x294cb597
+822, 0x5bca48fe
+823, 0xaef19f40
+824, 0xad60406
+825, 0x4781f090
+826, 0xfd691ffc
+827, 0xb6568268
+828, 0xa56c72cb
+829, 0xf8a9e0fc
+830, 0x9af4fd02
+831, 0x2cd30932
+832, 0x776cefd7
+833, 0xe31f476e
+834, 0x6d94a437
+835, 0xb3cab598
+836, 0xf582d13f
+837, 0x3bf8759d
+838, 0xc3777dc
+839, 0x5e425ea8
+840, 0x1c7ff4ed
+841, 0x1c2e97d1
+842, 0xc062d2b4
+843, 0x46dc80e0
+844, 0xbcdb47e6
+845, 0x32282fe0
+846, 0xaba89063
+847, 0x5e94e9bb
+848, 0x3e667f78
+849, 0xea6eb21a
+850, 0xe56e54e8
+851, 0xa0383510
+852, 0x6768fe2b
+853, 0xb53ac3e0
+854, 0x779569a0
+855, 0xeca83c6a
+856, 0x24db4d2d
+857, 0x4585f696
+858, 0xf84748b2
+859, 0xf6a4dd5b
+860, 0x31fb524d
+861, 0x67ab39fe
+862, 0x5882a899
+863, 0x9a05fcf6
+864, 0x712b5674
+865, 0xe8c6958f
+866, 0x4b448bb3
+867, 0x530b9abf
+868, 0xb491f491
+869, 0x98352c62
+870, 0x2d0a50e3
+871, 0xeb4384da
+872, 0x36246f07
+873, 0xcbc5c1a
+874, 0xae24031d
+875, 0x44d11ed6
+876, 0xf07f1608
+877, 0xf296aadd
+878, 0x3bcfe3be
+879, 0x8fa1e7df
+880, 0xfd317a6e
+881, 0xe4975c44
+882, 0x15205892
+883, 0xa762d4df
+884, 0xf1167365
+885, 0x6811cc00
+886, 0x8315f23
+887, 0xe045b4b1
+888, 0xa8496414
+889, 0xbed313ae
+890, 0xcdae3ddb
+891, 0xa9c22c9
+892, 0x275fab1a
+893, 0xedd65fa
+894, 0x4c188229
+895, 0x63a83e58
+896, 0x18aa9207
+897, 0xa41f2e78
+898, 0xd9f63653
+899, 0xbe2be73b
+900, 0xa3364d39
+901, 0x896d5428
+902, 0xc737539e
+903, 0x745a78c6
+904, 0xf0b2b042
+905, 0x510773b4
+906, 0x92ad8e37
+907, 0x27f2f8c4
+908, 0x23704cc8
+909, 0x3d95a77f
+910, 0xf08587a4
+911, 0xbd696a25
+912, 0x948924f3
+913, 0x8cddb634
+914, 0xcd2a4910
+915, 0x8e0e300e
+916, 0x83815a9b
+917, 0x67383510
+918, 0x3c18f0d0
+919, 0xc7a7bccc
+920, 0x7cc2d3a2
+921, 0x52eb2eeb
+922, 0xe4a257e5
+923, 0xec76160e
+924, 0x63f9ad68
+925, 0x36d0bbbf
+926, 0x957bc4e4
+927, 0xc9ed90ff
+928, 0x4cb6059d
+929, 0x2f86eca1
+930, 0x3e3665a3
+931, 0x9b7eb6f4
+932, 0x492e7e18
+933, 0xa098aa51
+934, 0x7eb568b2
+935, 0x3fd639ba
+936, 0x7bebcf1
+937, 0x99c844ad
+938, 0x43cb5ec7
+939, 0x8dfbbef5
+940, 0x5be413ff
+941, 0xd93b976d
+942, 0xc1c7a86d
+943, 0x1f0e93d0
+944, 0x498204a2
+945, 0xe8fe832a
+946, 0x2236bd7
+947, 0x89953769
+948, 0x2acc3491
+949, 0x2c4f22c6
+950, 0xd7996277
+951, 0x3bcdc349
+952, 0xfc286630
+953, 0x5f8909fd
+954, 0x242677c0
+955, 0x4cb34104
+956, 0xa6ff8100
+957, 0x39ea47ec
+958, 0x9bd54140
+959, 0x7502ffe8
+960, 0x7ebef8ae
+961, 0x1ed8abe4
+962, 0xfaba8450
+963, 0xc197b65f
+964, 0x19431455
+965, 0xe229c176
+966, 0xeb2967da
+967, 0xe0c5dc05
+968, 0xa84e3227
+969, 0x10dd9e0f
+970, 0xbdb70b02
+971, 0xce24808a
+972, 0x423edab8
+973, 0x194caf71
+974, 0x144f150d
+975, 0xf811c2d2
+976, 0xc224ee85
+977, 0x2b217a5b
+978, 0xf78a5a79
+979, 0x6554a4b1
+980, 0x769582df
+981, 0xf4b2cf93
+982, 0x89648483
+983, 0xb3283a3e
+984, 0x82b895db
+985, 0x79388ef0
+986, 0x54bc42a6
+987, 0xc4dd39d9
+988, 0x45b33b7d
+989, 0x8703b2c1
+990, 0x1cc94806
+991, 0xe0f43e49
+992, 0xcaa7b6bc
+993, 0x4f88e9af
+994, 0x1477cce5
+995, 0x347dd115
+996, 0x36e335fa
+997, 0xb93c9a31
+998, 0xaac3a175
+999, 0x68a19647
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/data/mt19937-testset-2.csv b/venv/lib/python3.9/site-packages/numpy/random/tests/data/mt19937-testset-2.csv
new file mode 100644
index 00000000..cdb8e479
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/data/mt19937-testset-2.csv
@@ -0,0 +1,1001 @@
+seed, 0x0
+0, 0x7ab4ea94
+1, 0x9b561119
+2, 0x4957d02e
+3, 0x7dd3fdc2
+4, 0x5affe54
+5, 0x5a01741c
+6, 0x8b9e8c1f
+7, 0xda5bf11a
+8, 0x509226
+9, 0x64e2ea17
+10, 0x82c6dab5
+11, 0xe4302515
+12, 0x8198b873
+13, 0xc3ec9a82
+14, 0x829dff28
+15, 0x5278e44f
+16, 0x994a7d2c
+17, 0xf1c89398
+18, 0xaf2fddec
+19, 0x22abc6ee
+20, 0x963dbd43
+21, 0xc29edffb
+22, 0x41c1ce07
+23, 0x9c90034d
+24, 0x1f17a796
+25, 0x3833caa8
+26, 0xb8795528
+27, 0xebc595a2
+28, 0xf8f5b5dd
+29, 0xc2881f72
+30, 0x18e5d3f0
+31, 0x9b19ac7a
+32, 0xb9992436
+33, 0xc00052b3
+34, 0xb63f4475
+35, 0x962642d9
+36, 0x63506c10
+37, 0x2be6b127
+38, 0x569bdbc6
+39, 0x7f185e01
+40, 0xebb55f53
+41, 0x1c30198c
+42, 0x7c8d75c6
+43, 0xd3f2186b
+44, 0xaca5b9b1
+45, 0xbc49ff45
+46, 0xc4a802af
+47, 0x2cecd86f
+48, 0x8e0da529
+49, 0x1f22b00e
+50, 0x4559ea80
+51, 0x60f587d8
+52, 0x7c7460e9
+53, 0x67be0a4a
+54, 0x987a0183
+55, 0x7bd30f1
+56, 0xab18c4ac
+57, 0xffdbfb64
+58, 0x9ea917f9
+59, 0x1239dab7
+60, 0x38efabeb
+61, 0x5da91888
+62, 0x8f49ed62
+63, 0x83f60b1e
+64, 0x5950a3fc
+65, 0xd8911104
+66, 0x19e8859e
+67, 0x1a4d89ec
+68, 0x968ca180
+69, 0x9e1b6da3
+70, 0x3d99c2c
+71, 0x55f76289
+72, 0x8fa28b9e
+73, 0x9fe01d33
+74, 0xdade4e38
+75, 0x1ea04290
+76, 0xa7263313
+77, 0xaafc762e
+78, 0x460476d6
+79, 0x31226e12
+80, 0x451d3f05
+81, 0xd0d2764b
+82, 0xd06e1ab3
+83, 0x1394e3f4
+84, 0x2fc04ea3
+85, 0x5b8401c
+86, 0xebd6c929
+87, 0xe881687c
+88, 0x94bdd66a
+89, 0xabf85983
+90, 0x223ad12d
+91, 0x2aaeeaa3
+92, 0x1f704934
+93, 0x2db2efb6
+94, 0xf49b8dfb
+95, 0x5bdbbb9d
+96, 0xba0cd0db
+97, 0x4ec4674e
+98, 0xad0129e
+99, 0x7a66129b
+100, 0x50d12c5e
+101, 0x85b1d335
+102, 0x3efda58a
+103, 0xecd886fb
+104, 0x8ecadd3d
+105, 0x60ebac0f
+106, 0x5e10fe79
+107, 0xa84f7e5d
+108, 0x43931288
+109, 0xfacf448
+110, 0x4ee01997
+111, 0xcdc0a651
+112, 0x33c87037
+113, 0x8b50fc03
+114, 0xf52aad34
+115, 0xda6cd856
+116, 0x7585bea0
+117, 0xe947c762
+118, 0x4ddff5d8
+119, 0xe0e79b3b
+120, 0xb804cf09
+121, 0x84765c44
+122, 0x3ff666b4
+123, 0xe31621ad
+124, 0x816f2236
+125, 0x228176bc
+126, 0xfdc14904
+127, 0x635f5077
+128, 0x6981a817
+129, 0xfd9a0300
+130, 0xd3fa8a24
+131, 0xd67c1a77
+132, 0x903fe97a
+133, 0xf7c4a4d5
+134, 0x109f2058
+135, 0x48ab87fe
+136, 0xfd6f1928
+137, 0x707e9452
+138, 0xf327db9e
+139, 0x7b80d76d
+140, 0xfb6ba193
+141, 0x454a1ad0
+142, 0xe20b51e
+143, 0xb774d085
+144, 0x6b1ed574
+145, 0xb1e77de4
+146, 0xe2a83b37
+147, 0x33d3176f
+148, 0x2f0ca0fc
+149, 0x17f51e2
+150, 0x7c1fbf55
+151, 0xf09e9cd0
+152, 0xe3d9bacd
+153, 0x4244db0a
+154, 0x876c09fc
+155, 0x9db4fc2f
+156, 0xd3771d60
+157, 0x25fc6a75
+158, 0xb309915c
+159, 0xc50ee027
+160, 0xaa5b7b38
+161, 0x4c650ded
+162, 0x1acb2879
+163, 0x50db5887
+164, 0x90054847
+165, 0xfef23e5b
+166, 0x2dd7b7d5
+167, 0x990b8c2e
+168, 0x6001a601
+169, 0xb5d314c4
+170, 0xfbfb7bf9
+171, 0x1aba997d
+172, 0x814e7304
+173, 0x989d956a
+174, 0x86d5a29c
+175, 0x70a9fa08
+176, 0xc4ccba87
+177, 0x7e9cb366
+178, 0xee18eb0a
+179, 0x44f5be58
+180, 0x91d4af2d
+181, 0x5ab6e593
+182, 0x9fd6bb4d
+183, 0x85894ce
+184, 0x728a2401
+185, 0xf006f6d4
+186, 0xd782741e
+187, 0x842cd5bd
+188, 0xfb5883aa
+189, 0x7e5a471
+190, 0x83ff6965
+191, 0xc9675c6b
+192, 0xb6ced3c7
+193, 0x3de6425b
+194, 0x25e14db4
+195, 0x69ca3dec
+196, 0x81342d13
+197, 0xd7cd8417
+198, 0x88d15e69
+199, 0xefba17c9
+200, 0x43d595e6
+201, 0x89d4cf25
+202, 0x7cae9b9b
+203, 0x2242c621
+204, 0x27fc3598
+205, 0x467b1d84
+206, 0xe84d4622
+207, 0xa26bf980
+208, 0x80411010
+209, 0xe2c2bfea
+210, 0xbc6ca25a
+211, 0x3ddb592a
+212, 0xdd46eb9e
+213, 0xdfe8f657
+214, 0x2cedc974
+215, 0xf0dc546b
+216, 0xd46be68f
+217, 0x26d8a5aa
+218, 0x76e96ba3
+219, 0x7d5b5353
+220, 0xf532237c
+221, 0x6478b79
+222, 0x9b81a5e5
+223, 0x5fc68e5c
+224, 0x68436e70
+225, 0x2a0043f9
+226, 0x108d523c
+227, 0x7a4c32a3
+228, 0x9c84c742
+229, 0x6f813dae
+230, 0xfcc5bbcc
+231, 0x215b6f3a
+232, 0x84cb321d
+233, 0x7913a248
+234, 0xb1e6b585
+235, 0x49376b31
+236, 0x1dc896b0
+237, 0x347051ad
+238, 0x5524c042
+239, 0xda0eef9d
+240, 0xf2e73342
+241, 0xbeee2f9d
+242, 0x7c702874
+243, 0x9eb3bd34
+244, 0x97b09700
+245, 0xcdbab1d4
+246, 0x4a2f6ed1
+247, 0x2047bda5
+248, 0x3ecc7005
+249, 0x8d0d5e67
+250, 0x40876fb5
+251, 0xb5fd2187
+252, 0xe915d8af
+253, 0x9a2351c7
+254, 0xccc658ae
+255, 0xebb1eddc
+256, 0xc4a83671
+257, 0xffb2548f
+258, 0xe4fe387a
+259, 0x477aaab4
+260, 0x8475a4e4
+261, 0xf8823e46
+262, 0xe4130f71
+263, 0xbdb54482
+264, 0x98fe0462
+265, 0xf36b27b8
+266, 0xed7733da
+267, 0x5f428afc
+268, 0x43a3a21a
+269, 0xf8370b55
+270, 0xfade1de1
+271, 0xd9a038ea
+272, 0x3c69af23
+273, 0x24df7dd0
+274, 0xf66d9353
+275, 0x71d811be
+276, 0xcc4d024b
+277, 0xb8c30bf0
+278, 0x4198509d
+279, 0x8b37ba36
+280, 0xa41ae29a
+281, 0x8cf7799e
+282, 0x5cd0136a
+283, 0xa11324ef
+284, 0x2f8b6d4b
+285, 0x3657cf17
+286, 0x35b6873f
+287, 0xee6e5bd7
+288, 0xbeeaa98
+289, 0x9ad3c581
+290, 0xe2376c3f
+291, 0x738027cc
+292, 0x536ac839
+293, 0xf066227
+294, 0x6c9cb0f9
+295, 0x84082ae6
+296, 0xab38ae9d
+297, 0x493eade9
+298, 0xcb630b3a
+299, 0x64d44250
+300, 0xe5efb557
+301, 0xea2424d9
+302, 0x11a690ba
+303, 0x30a48ae4
+304, 0x58987e53
+305, 0x94ec6076
+306, 0x5d3308fa
+307, 0xf1635ebb
+308, 0x56a5ab90
+309, 0x2b2f2ee4
+310, 0x6f9e6483
+311, 0x8b93e327
+312, 0xa7ce140b
+313, 0x4c8aa42
+314, 0x7657bb3f
+315, 0xf250fd75
+316, 0x1edfcb0f
+317, 0xdb42ace3
+318, 0xf8147e16
+319, 0xd1992bd
+320, 0x64bb14d1
+321, 0x423e724d
+322, 0x7b172f7c
+323, 0x17171696
+324, 0x4acaf83b
+325, 0x7a83527e
+326, 0xfc980c60
+327, 0xc8b56bb
+328, 0x2453f77f
+329, 0x85ad1bf9
+330, 0x62a85dfe
+331, 0x48238c4d
+332, 0xbb3ec1eb
+333, 0x4c1c039c
+334, 0x1f37f571
+335, 0x98aecb63
+336, 0xc3b3ddd6
+337, 0xd22dad4
+338, 0xe49671a3
+339, 0xe3baf945
+340, 0xb9e21680
+341, 0xda562856
+342, 0xe8b88ce4
+343, 0x86f88de2
+344, 0x986faf76
+345, 0x6f0025c3
+346, 0x3fe21234
+347, 0xd8d3f729
+348, 0xc2d11c6f
+349, 0xd4f9e8f
+350, 0xf61a0aa
+351, 0xc48bb313
+352, 0xe944e940
+353, 0xf1801b2e
+354, 0x253590be
+355, 0x981f069d
+356, 0x891454d8
+357, 0xa4f824ad
+358, 0x6dd2cc48
+359, 0x3018827e
+360, 0x3fb329e6
+361, 0x65276517
+362, 0x8d2c0dd2
+363, 0xc965b48e
+364, 0x85d14d90
+365, 0x5a51623c
+366, 0xa9573d6a
+367, 0x82d00edf
+368, 0x5ed7ce07
+369, 0x1d946abc
+370, 0x24fa567b
+371, 0x83ef5ecc
+372, 0x9001724a
+373, 0xc4fe48f3
+374, 0x1e07c25c
+375, 0xf4d5e65e
+376, 0xb734f6e9
+377, 0x327a2df8
+378, 0x766d59b7
+379, 0x625e6b61
+380, 0xe82f32d7
+381, 0x1566c638
+382, 0x2e815871
+383, 0x606514aa
+384, 0x36b7386e
+385, 0xcaa8ce08
+386, 0xb453fe9c
+387, 0x48574e23
+388, 0x71f0da06
+389, 0xa8a79463
+390, 0x6b590210
+391, 0x86e989db
+392, 0x42899f4f
+393, 0x7a654ef9
+394, 0x4c4fe932
+395, 0x77b2fd10
+396, 0xb6b4565c
+397, 0xa2e537a3
+398, 0xef5a3dca
+399, 0x41235ea8
+400, 0x95c90541
+401, 0x50ad32c4
+402, 0xc1b8e0a4
+403, 0x498e9aab
+404, 0xffc965f1
+405, 0x72633485
+406, 0x3a731aef
+407, 0x7cfddd0b
+408, 0xb04d4129
+409, 0x184fc28e
+410, 0x424369b0
+411, 0xf9ae13a1
+412, 0xaf357c8d
+413, 0x7a19228e
+414, 0xb46de2a8
+415, 0xeff2ac76
+416, 0xa6c9357b
+417, 0x614f19c1
+418, 0x8ee1a53f
+419, 0xbe1257b1
+420, 0xf72651fe
+421, 0xd347c298
+422, 0x96dd2f23
+423, 0x5bb1d63e
+424, 0x32e10887
+425, 0x36a144da
+426, 0x9d70e791
+427, 0x5e535a25
+428, 0x214253da
+429, 0x2e43dd40
+430, 0xfc0413f4
+431, 0x1f5ea409
+432, 0x1754c126
+433, 0xcdbeebbe
+434, 0x1fb44a14
+435, 0xaec7926
+436, 0xb9d9a1e
+437, 0x9e4a6577
+438, 0x8b1f04c5
+439, 0x19854e8a
+440, 0x531080cd
+441, 0xc0cbd73
+442, 0x20399d77
+443, 0x7d8e9ed5
+444, 0x66177598
+445, 0x4d18a5c2
+446, 0xe08ebf58
+447, 0xb1f9c87b
+448, 0x66bedb10
+449, 0x26670d21
+450, 0x7a7892da
+451, 0x69b69d86
+452, 0xd04f1d1c
+453, 0xaf469625
+454, 0x7946b813
+455, 0x1ee596bd
+456, 0x7f365d85
+457, 0x795b662b
+458, 0x194ad02d
+459, 0x5a9649b5
+460, 0x6085e278
+461, 0x2cf54550
+462, 0x9c77ea0b
+463, 0x3c6ff8b
+464, 0x2141cd34
+465, 0xb90bc671
+466, 0x35037c4b
+467, 0xd04c0d76
+468, 0xc75bff8
+469, 0x8f52003b
+470, 0xfad3d031
+471, 0x667024bc
+472, 0xcb04ea36
+473, 0x3e03d587
+474, 0x2644d3a0
+475, 0xa8fe99ba
+476, 0x2b9a55fc
+477, 0x45c4d44a
+478, 0xd059881
+479, 0xe07fcd20
+480, 0x4e22046c
+481, 0x7c2cbf81
+482, 0xbf7f23de
+483, 0x69d924c3
+484, 0xe53cd01
+485, 0x3879017c
+486, 0xa590e558
+487, 0x263bc076
+488, 0x245465b1
+489, 0x449212c6
+490, 0x249dcb29
+491, 0x703d42d7
+492, 0x140eb9ec
+493, 0xc86c5741
+494, 0x7992aa5b
+495, 0xb8b76a91
+496, 0x771dac3d
+497, 0x4ecd81e3
+498, 0xe5ac30b3
+499, 0xf4d7a5a6
+500, 0xac24b97
+501, 0x63494d78
+502, 0x627ffa89
+503, 0xfa4f330
+504, 0x8098a1aa
+505, 0xcc0c61dc
+506, 0x34749fa0
+507, 0x7f217822
+508, 0x418d6f15
+509, 0xa4b6e51e
+510, 0x1036de68
+511, 0x1436986e
+512, 0x44df961d
+513, 0x368e4651
+514, 0x6a9e5d8c
+515, 0x27d1597e
+516, 0xa1926c62
+517, 0x8d1f2b55
+518, 0x5797eb42
+519, 0xa90f9e81
+520, 0x57547b10
+521, 0xdbbcca8e
+522, 0x9edd2d86
+523, 0xbb0a7527
+524, 0x7662380c
+525, 0xe7c98590
+526, 0x950fbf3f
+527, 0xdc2b76b3
+528, 0x8a945102
+529, 0x3f0a1a85
+530, 0xeb215834
+531, 0xc59f2802
+532, 0xe2a4610
+533, 0x8b5a8665
+534, 0x8b2d9933
+535, 0x40a4f0bc
+536, 0xaab5bc67
+537, 0x1442a69e
+538, 0xdf531193
+539, 0x698d3db4
+540, 0x2d40324e
+541, 0x1a25feb2
+542, 0xe8cc898f
+543, 0xf12e98f5
+544, 0xc03ad34c
+545, 0xf62fceff
+546, 0xdd827e1e
+547, 0x7d8ccb3b
+548, 0xab2d6bc1
+549, 0xc323a124
+550, 0x8184a19a
+551, 0xc3c4e934
+552, 0x5487424d
+553, 0xd6a81a44
+554, 0x90a8689d
+555, 0xe69c4c67
+556, 0xbdae02dd
+557, 0x72a18a79
+558, 0x2a88e907
+559, 0x31cf4b5d
+560, 0xb157772f
+561, 0x206ba601
+562, 0x18529232
+563, 0x7dac90d8
+564, 0x3a5f8a09
+565, 0x9f4b64a3
+566, 0xae373af9
+567, 0x1d79447c
+568, 0x2a23684b
+569, 0x41fb7ba4
+570, 0x55e4bb9e
+571, 0xd7619d3e
+572, 0xc04e4dd8
+573, 0x8418d516
+574, 0x2b2ca585
+575, 0xfa8eedf
+576, 0x5bafd977
+577, 0x31974fb0
+578, 0x9eb6697b
+579, 0xc8be22f5
+580, 0x173b126a
+581, 0x8809becf
+582, 0x3e41efe1
+583, 0x3d6cbbb8
+584, 0x278c81d8
+585, 0xa6f08434
+586, 0xa0e6601d
+587, 0x2fccd88d
+588, 0x3cbc8beb
+589, 0x5f65d864
+590, 0xa1ff8ddf
+591, 0x609dcb7c
+592, 0x4a4e1663
+593, 0xeae5531
+594, 0x962a7c85
+595, 0x1e110607
+596, 0x8c5db5d0
+597, 0xc7f2337e
+598, 0xc94fcc9c
+599, 0xe7f62629
+600, 0x6c9aa9f8
+601, 0x2e27fe0e
+602, 0x4d0dae12
+603, 0x9eecf588
+604, 0x977ba3f2
+605, 0xed0a51af
+606, 0x3f3ec633
+607, 0xc174b2ec
+608, 0x590be8a9
+609, 0x4f630d18
+610, 0xf579e989
+611, 0xe2a55584
+612, 0xee11edcd
+613, 0x150a4833
+614, 0xc0a0535c
+615, 0xb5e00993
+616, 0xb6435700
+617, 0xa98dbff
+618, 0x315716af
+619, 0x94395776
+620, 0x6cbd48d9
+621, 0xab17f8fc
+622, 0xa794ffb7
+623, 0x6b55e231
+624, 0x89ff5783
+625, 0x431dcb26
+626, 0x270f9bf8
+627, 0x2af1b8d0
+628, 0x881745ed
+629, 0x17e1be4e
+630, 0x132a0ec4
+631, 0x5712df17
+632, 0x2dfb3334
+633, 0xf5a35519
+634, 0xcafbdac6
+635, 0x73b6189d
+636, 0x10107cac
+637, 0x18c1045e
+638, 0xbc19bbad
+639, 0x8b4f05ac
+640, 0x5830d038
+641, 0x468cd98a
+642, 0x5b83a201
+643, 0xf0ccdd9c
+644, 0xcb20c4bd
+645, 0x1ff186c9
+646, 0xcdddb47f
+647, 0x5c65ce6
+648, 0xb748c580
+649, 0x23b6f262
+650, 0xe2ba8e5c
+651, 0x9a164a03
+652, 0x62d3322e
+653, 0x918d8b43
+654, 0x45c8b49d
+655, 0xce172c6e
+656, 0x23febc6
+657, 0x84fdc5b7
+658, 0xe7d1fd82
+659, 0xf0ddf3a6
+660, 0x87050436
+661, 0x13d46375
+662, 0x5b191c78
+663, 0x2cbd99c0
+664, 0x7686c7f
+665, 0xcff56c84
+666, 0x7f9b4486
+667, 0xefc997fe
+668, 0x984d4588
+669, 0xfa44f36a
+670, 0x7a5276c1
+671, 0xcfde6176
+672, 0xcacf7b1d
+673, 0xcffae9a7
+674, 0xe98848d5
+675, 0xd4346001
+676, 0xa2196cac
+677, 0x217f07dc
+678, 0x42d5bef
+679, 0x6f2e8838
+680, 0x4677a24
+681, 0x4ad9cd54
+682, 0x43df42af
+683, 0x2dde417
+684, 0xaef5acb1
+685, 0xf377f4b3
+686, 0x7d870d40
+687, 0xe53df1c2
+688, 0xaeb5be50
+689, 0x7c92eac0
+690, 0x4f00838c
+691, 0x91e05e84
+692, 0x23856c80
+693, 0xc4266fa6
+694, 0x912fddb
+695, 0x34d42d22
+696, 0x6c02ffa
+697, 0xe47d093
+698, 0x183c55b3
+699, 0xc161d142
+700, 0x3d43ff5f
+701, 0xc944a36
+702, 0x27bb9fc6
+703, 0x75c91080
+704, 0x2460d0dc
+705, 0xd2174558
+706, 0x68062dbf
+707, 0x778e5c6e
+708, 0xa4dc9a
+709, 0x7a191e69
+710, 0xc084b2ba
+711, 0xbb391d2
+712, 0x88849be
+713, 0x69c02714
+714, 0x69d4a389
+715, 0x8f51854d
+716, 0xaf10bb82
+717, 0x4d5d1c77
+718, 0x53b53109
+719, 0xa0a92aa0
+720, 0x83ecb757
+721, 0x5325752a
+722, 0x114e466e
+723, 0x4b3f2780
+724, 0xa7a6a39c
+725, 0x5e723357
+726, 0xa6b8be9b
+727, 0x157c32ff
+728, 0x8b898012
+729, 0xd7ff2b1e
+730, 0x69cd8444
+731, 0x6ad8030c
+732, 0xa08a49ec
+733, 0xfbc055d3
+734, 0xedf17e46
+735, 0xc9526200
+736, 0x3849b88a
+737, 0x2746860b
+738, 0xae13d0c1
+739, 0x4f15154f
+740, 0xd65c3975
+741, 0x6a377278
+742, 0x54d501f7
+743, 0x81a054ea
+744, 0x143592ba
+745, 0x97714ad6
+746, 0x4f9926d9
+747, 0x4f7ac56d
+748, 0xe87ca939
+749, 0x58b76f6f
+750, 0x60901ad8
+751, 0x3e401bb6
+752, 0xa058468e
+753, 0xc0bb14f6
+754, 0x2cb8f02a
+755, 0x7c2cf756
+756, 0x34c31de5
+757, 0x9b243e83
+758, 0xa5c85ab4
+759, 0x2741e3b3
+760, 0x1249000e
+761, 0x3fc4e72b
+762, 0xa3e038a2
+763, 0x952dd92c
+764, 0x2b821966
+765, 0xfa81b365
+766, 0x530919b9
+767, 0x4486d66f
+768, 0xccf4f3c1
+769, 0xa8bddd1d
+770, 0xcc295eb9
+771, 0xfccbe42f
+772, 0x38bacd8d
+773, 0x2261854f
+774, 0x56068c62
+775, 0x9bdaeb8
+776, 0x555fa5b6
+777, 0x20fe615e
+778, 0x49fb23d3
+779, 0xd093bad6
+780, 0x54919e86
+781, 0x7373eb24
+782, 0xfbaa7a98
+783, 0x5f62fb39
+784, 0xe03bc9ec
+785, 0xa5074d41
+786, 0xa1cefb1
+787, 0x13912d74
+788, 0xf6421b8
+789, 0xfcb48812
+790, 0x8f1db50b
+791, 0xc1654b87
+792, 0x948b43c2
+793, 0xf503ef77
+794, 0x117d891d
+795, 0x5493ffa
+796, 0x171313b1
+797, 0xa4b62e1e
+798, 0x77454ea6
+799, 0xbea0aff0
+800, 0x13c36389
+801, 0xe3b60bac
+802, 0xa176bed3
+803, 0x2863d428
+804, 0xe2314f46
+805, 0xa85cd3d4
+806, 0x7866e57
+807, 0x8f03f5bc
+808, 0x239ae
+809, 0x46f279fb
+810, 0xcca00559
+811, 0xaa07a104
+812, 0x89123d08
+813, 0x2e6856ba
+814, 0x43a9780d
+815, 0x676cff25
+816, 0x6744b87d
+817, 0xee260d4f
+818, 0xb98d8b77
+819, 0x9b0ca455
+820, 0x659f6fe
+821, 0x28d20d1c
+822, 0x601f2657
+823, 0xdec3073e
+824, 0x61263863
+825, 0x1a13435a
+826, 0x27497d1e
+827, 0x17a8458e
+828, 0xdddc407d
+829, 0x4bb2e8ac
+830, 0x16b2aedb
+831, 0x77ccd696
+832, 0x9d108fcd
+833, 0x25ad233e
+834, 0xaa9bc370
+835, 0xa873ab50
+836, 0xaf19c9d9
+837, 0x696e1e6b
+838, 0x1fdc4bf4
+839, 0x4c2ebc81
+840, 0xde4929ed
+841, 0xf4d0c10c
+842, 0xb6595b76
+843, 0x75cbb1b3
+844, 0xbcb6de49
+845, 0xe23157fd
+846, 0x5e596078
+847, 0xa69b0d29
+848, 0x2118a41
+849, 0x7088c16
+850, 0xc75e1e1
+851, 0x6a4af2d6
+852, 0xf19c6521
+853, 0xaff7b3b1
+854, 0x615295c7
+855, 0xbda3a8d7
+856, 0x5b5ca72e
+857, 0xdad9d80f
+858, 0xfa81c084
+859, 0xf4703fa
+860, 0x3ca54540
+861, 0xa8961d51
+862, 0x53d1ecc2
+863, 0x808d83b6
+864, 0x68e8c48e
+865, 0x89be2039
+866, 0x9088ea11
+867, 0xb8665d12
+868, 0x91272f9
+869, 0x53dddff2
+870, 0xb7a54ab
+871, 0xd2b645ca
+872, 0x99fb8590
+873, 0x5315c8e
+874, 0x2a913806
+875, 0x7f15eb2b
+876, 0xa7f1cc5d
+877, 0xbb2ee836
+878, 0xd9fafd60
+879, 0x17448d6f
+880, 0x999ec436
+881, 0x482ec606
+882, 0x9b403c0e
+883, 0x569eb51b
+884, 0xb275d1a6
+885, 0xadd29c31
+886, 0xb7ebdb15
+887, 0xdfef3662
+888, 0x51aba6db
+889, 0x6d41946d
+890, 0x77bf8896
+891, 0xcafa6fab
+892, 0x976ab40f
+893, 0x49a6d86b
+894, 0x56639e55
+895, 0x9945b996
+896, 0x81459b50
+897, 0xbce97542
+898, 0xe397c9c9
+899, 0x247a5955
+900, 0xb72b1573
+901, 0x86306f86
+902, 0x34f65dc5
+903, 0x909360c0
+904, 0xf3f696ef
+905, 0xcb9faae5
+906, 0x93daecd9
+907, 0xde1af7af
+908, 0x43a1f2d
+909, 0x6d75cde5
+910, 0x9e412b6
+911, 0x5673fed
+912, 0x16bb511a
+913, 0x35ef4cca
+914, 0x4e615aca
+915, 0x5cdaf47a
+916, 0x26676047
+917, 0x8c199325
+918, 0x2adf0cb9
+919, 0x84f2e6fd
+920, 0x5e627f64
+921, 0xb7cee354
+922, 0x542ab4a6
+923, 0xe59cd83b
+924, 0x89cc3f10
+925, 0x92b0f5f
+926, 0xc1328370
+927, 0x8208d9f7
+928, 0x68eb00cf
+929, 0xfadd4ac4
+930, 0x2517784f
+931, 0x4042b99
+932, 0x75ce0230
+933, 0x97c5a1b4
+934, 0x1a97f709
+935, 0x4c62781e
+936, 0xf530a83
+937, 0x75776413
+938, 0x321c7240
+939, 0x6afe4e36
+940, 0xad00a2b4
+941, 0xbc05477d
+942, 0xb0911e80
+943, 0x9935b87d
+944, 0xd535eec5
+945, 0x149af45e
+946, 0x786934b0
+947, 0xbc13cdac
+948, 0x208bfa2e
+949, 0xcf4b39cc
+950, 0x6ac6c172
+951, 0xbfa9a37
+952, 0x42d28db6
+953, 0x2bf1ea63
+954, 0xbed6e677
+955, 0x50325d27
+956, 0xa79d3b8b
+957, 0x52448bb1
+958, 0xefaad1bd
+959, 0x833a2e54
+960, 0xd9de549a
+961, 0x9f59672f
+962, 0x9d5f5f16
+963, 0x1c914489
+964, 0xc08fa058
+965, 0xb188698b
+966, 0xdc4672b5
+967, 0x594f720e
+968, 0x56ed428f
+969, 0x9b0898af
+970, 0x8a64d3d5
+971, 0x773308d6
+972, 0x84d62098
+973, 0x46da7cf9
+974, 0x1114eae7
+975, 0xf9f2a092
+976, 0x5363a28
+977, 0xf2db7b3a
+978, 0x102c71a9
+979, 0xe8e76aaf
+980, 0x77a97b3b
+981, 0x77b090d
+982, 0x1099620e
+983, 0xa6daaae6
+984, 0x86ff4713
+985, 0xc0ef85b8
+986, 0xf621d409
+987, 0xfd1561e2
+988, 0x4bcc687d
+989, 0x596f760
+990, 0x7c8819f9
+991, 0x8cb865b8
+992, 0xadea115a
+993, 0x56609348
+994, 0xb321ac14
+995, 0x1bac7db2
+996, 0x5fe6ee2
+997, 0xe9bfe072
+998, 0x15549e74
+999, 0xad8c191b
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64-testset-1.csv b/venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64-testset-1.csv
new file mode 100644
index 00000000..0c8271fa
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64-testset-1.csv
@@ -0,0 +1,1001 @@
+seed, 0xdeadbeaf
+0, 0x60d24054e17a0698
+1, 0xd5e79d89856e4f12
+2, 0xd254972fe64bd782
+3, 0xf1e3072a53c72571
+4, 0xd7c1d7393d4115c9
+5, 0x77b75928b763e1e2
+6, 0xee6dee05190f7909
+7, 0x15f7b1c51d7fa319
+8, 0x27e44105f26ac2d7
+9, 0xcc0d88b29e5b415
+10, 0xe07b1a90c685e361
+11, 0xd2e430240de95e38
+12, 0x3260bca9a24ca9da
+13, 0x9b3cf2e92385adb7
+14, 0x30b5514548271976
+15, 0xa3a1fa16c124faf9
+16, 0xf53e17e918e45bb6
+17, 0x26f19faaeb833bfc
+18, 0x95e1d605730cce1b
+19, 0xa7b520c5c093c1aa
+20, 0x4b68c010c9b106a3
+21, 0x25e19fe91df703f0
+22, 0x898364bb0bf593cb
+23, 0x5bd6ab7dbaa125db
+24, 0xd1fe47f25152045c
+25, 0x3bb11919addf2409
+26, 0x26a8cb7b3f54af8
+27, 0xe6a27ee11200aa24
+28, 0x7cb585ab01e22000
+29, 0x78e60028676d2ef3
+30, 0x5c32535e5a899528
+31, 0x83e8b6f8c4a46fb3
+32, 0xe56ef7668a161246
+33, 0x36dcbc15aeb73055
+34, 0x5ea247f0bd188acb
+35, 0x438b547b84601a80
+36, 0x8acda2a1273e9e3d
+37, 0x2b05e30a4b40c24c
+38, 0xfd87236bd13af032
+39, 0x471df211d8d985ef
+40, 0x18e8a5609a793292
+41, 0x46f0951fab6dc4e3
+42, 0x6c199c4e700f6795
+43, 0xf04aa16bfb7d22cb
+44, 0xd763d269fbaffc89
+45, 0x9991930cefbe5c2b
+46, 0xb2a11b953f824c96
+47, 0x63fd9f52172c44b0
+48, 0x183bdad907b1d848
+49, 0xe17953cddb931c52
+50, 0x515cf16726ec205a
+51, 0x88c327605150711a
+52, 0xc7090dd79cbc8dc3
+53, 0xcb487cedeb00a350
+54, 0xc8abf254d87b657
+55, 0xd43cc4cbfb493d1a
+56, 0x8705452e5d9ed1e
+57, 0xcecd11446769cf43
+58, 0xde72156c8d65bc69
+59, 0x796a8f0f47d52ee8
+60, 0xb4c0da443917d6c3
+61, 0xe07ad7568a8e3dc3
+62, 0xc24a8da39ce6dc21
+63, 0x92b21ea80a8556eb
+64, 0x572f21e531edf3af
+65, 0x9b917ed56bbed198
+66, 0xe65fd8ddc5ab3d7d
+67, 0xf55a80a8ec84fa18
+68, 0x18fc22e1a5227b61
+69, 0x72305dc7eeaa79d3
+70, 0x47ce58a36e7592cf
+71, 0x14c6374340c0f7cc
+72, 0x6f98273d4eb5a2c
+73, 0x59a8702c46fe8f8a
+74, 0xb67cbd8113cfe57f
+75, 0xaa03c5db5f5b7690
+76, 0x3fb0f77ea4568013
+77, 0x756530990398b26e
+78, 0x4c1952b2a3a6a343
+79, 0x1da15c5383074582
+80, 0xb405b21c81c274f7
+81, 0xbe664677a16788b
+82, 0x9d2e37550bcee656
+83, 0x8b4589f0d9defe02
+84, 0x2935f018ee06a59
+85, 0x3834bf88be97ed11
+86, 0xa610d049cea79b6d
+87, 0xd49ffc0d09a59ea9
+88, 0x4073365b76567adf
+89, 0x499eefb9bb7513e2
+90, 0x74a743ee6b0138a9
+91, 0x3bf0880f2d947594
+92, 0x555d1c0498600a99
+93, 0x923b32a88ef2ffa4
+94, 0x7325411065fbedea
+95, 0x9f4129ff8b79d300
+96, 0xab2b0a9b8a3785dc
+97, 0x11734bdfba3a1713
+98, 0xc8333398841ba585
+99, 0xee2409cc234e6742
+100, 0xf6638e700872ecd2
+101, 0x10875300c13cd284
+102, 0x27a9bbed7c15b2d3
+103, 0x3c87f8fef31ce9bd
+104, 0x92be263cd0914a95
+105, 0xa7b0f11bc742307e
+106, 0x4a56f788cc1c1a3c
+107, 0x4a130fa32257a48b
+108, 0x5d4d9eda16e90286
+109, 0x7cc2af564844bedc
+110, 0x2532867bfe7cda1a
+111, 0xb1c504676611fd17
+112, 0xce8e86cfb4189aee
+113, 0x99685898980d1970
+114, 0x8c3b67db23bcf1e
+115, 0x73e14c93905b135f
+116, 0xf0271b64ac2bd4d3
+117, 0xf4beba82f3ec1b2d
+118, 0x1cdbf3ee9f210af
+119, 0x2e938557c09c3ea6
+120, 0x2d314ccfa6ffd81d
+121, 0x31ad47079950ade4
+122, 0x342b27547b900872
+123, 0x171b0e20b9ef1a76
+124, 0xdf10ce6318b03654
+125, 0x1d625df4aa718897
+126, 0x8712715a9f6e02ec
+127, 0xb4a072da725bca3b
+128, 0x19d346cb7734bd42
+129, 0xfd4281d311cb2958
+130, 0x58274c9519fc8789
+131, 0x4cacf29d885fd544
+132, 0x784b14d1c2523b80
+133, 0x2d25242131bb2373
+134, 0xcd2a5e43a7d9abf9
+135, 0x15eda3806e650ecb
+136, 0xdaac5e277d764d96
+137, 0xdc5a5dd59aaa94e0
+138, 0x40d00237a46d5999
+139, 0x6205dd35a692743f
+140, 0xbbd8236740361f09
+141, 0x1625c9f4e7288bf9
+142, 0xb74f12df1479e3ce
+143, 0xb2d72a51b43d7131
+144, 0xf006a324b3707c83
+145, 0x28e8ab4abe7655b8
+146, 0xfb480093ad7ab55
+147, 0x3f8abd0d6ff8d272
+148, 0xc81a94177ac26bb7
+149, 0x3cdc178307751b14
+150, 0x9de84cc2b10ba025
+151, 0x3f8ab5aefcd046e2
+152, 0x43bdb894e1ee83b2
+153, 0xe288a40f3f06ac9d
+154, 0xdab62a7d04b4f30f
+155, 0x49f4e20295e1a805
+156, 0x3643764805e0edef
+157, 0x9449954618b6b
+158, 0x6c87e0d4508e0ce0
+159, 0x3a334be688a9dd7b
+160, 0xb35c39228776e499
+161, 0xc4118bfff938490e
+162, 0x88cbde3dcbb034b2
+163, 0xf91b287793c417c3
+164, 0x42b15f731a59f5b3
+165, 0xffa27104bbe4814d
+166, 0x1b6789d138beccde
+167, 0x542c2c1440d0ceb9
+168, 0x367294504d18fa0d
+169, 0xf918b60e804a1b58
+170, 0xd390964e33a9d0e3
+171, 0x23bb1be7c4030fe8
+172, 0x9731054d039a8afb
+173, 0x1a6205026b9d139b
+174, 0x2fa13b318254a07e
+175, 0x69571de7d8520626
+176, 0x641a13d7c03332b7
+177, 0x76a6237818f7a441
+178, 0x4e77860d0c660d81
+179, 0x4441448a1c1cbdb2
+180, 0xccd7783a042046e5
+181, 0xf620d8e0805e3200
+182, 0x7de02971367fdd0c
+183, 0x539c263c5914cab1
+184, 0x9c3b9ba1a87bbf08
+185, 0x6d95baa34cda215f
+186, 0x2db3f83ace0bac5f
+187, 0x7f5af1da2dc670a4
+188, 0xfcc098d16c891bfb
+189, 0x81a33df1d7a5ab12
+190, 0x767b0f863c8e9882
+191, 0x7a92983830de483d
+192, 0xfa7598c37a79ac25
+193, 0xb89b3ca42ce03053
+194, 0x457a542b8efed4f7
+195, 0x571b7737fd0eeda7
+196, 0xa0f59e524485c0a
+197, 0x82dca766b7901efd
+198, 0xa68243caf6a3bd5d
+199, 0x1bac981c6c740e5e
+200, 0xbcd51bedf9103e44
+201, 0x4e197efd3ae5a7bf
+202, 0x523568efd782268b
+203, 0x5ec4ef1191fef09
+204, 0xed751ed5e31c9ab
+205, 0x44eac24de03e1b29
+206, 0x9237d57c011d3fb3
+207, 0xa8c6da0f7692f235
+208, 0x9f9eb6bc15d6cac7
+209, 0x34bb8e0c93427aad
+210, 0x115febd738eaac4a
+211, 0xa439991ed139d27a
+212, 0x45c7c2633d8710a2
+213, 0x48b7475f3405a3ce
+214, 0x80158497c77bd00b
+215, 0x935c316a5b1657cb
+216, 0x59c5d54440e9695e
+217, 0x337c78c5b3d0ede2
+218, 0x8c46bb956b93790d
+219, 0xbf1dd03e471d71c5
+220, 0x2d375e90a4bef583
+221, 0xd0365428331b3790
+222, 0xfcd3969ac827ecd4
+223, 0x392fb6c580498410
+224, 0x6d6db4ceab5ea6c0
+225, 0x9bf84f1972e24786
+226, 0x798dfd820959dcc5
+227, 0x2e425095e65e8bfb
+228, 0x8c1aa11536b1c9c3
+229, 0xd28e2ef9b12f6f74
+230, 0x86583bc98c8f78d2
+231, 0x489877530e3f93e7
+232, 0xb1d9430631104a15
+233, 0x1814f6098e6263bd
+234, 0x8e2658a4e0d4cd53
+235, 0x5afe20e2531cdb2a
+236, 0x30d02f7c4755c9bf
+237, 0xe1e217cda16ed2d2
+238, 0xccb4913a42e3b791
+239, 0xfff21363ac183226
+240, 0xe788690bbda147a7
+241, 0x76905cf5917bfc6a
+242, 0x2a8fa58f7916f52c
+243, 0xf903c0cc0357815a
+244, 0x15d20f243a4998d2
+245, 0x5b7decee5a86ea44
+246, 0x114f7fc421211185
+247, 0x328eb21715764c50
+248, 0xaffaa3f45c0678fd
+249, 0x2579e6ef50378393
+250, 0x7610ab7743c19795
+251, 0xf9923d2bd101b197
+252, 0x57e42e7a62ba7e53
+253, 0x9f1dc217b4f02901
+254, 0x88a9ebd86509b234
+255, 0x867fc926aecc8591
+256, 0xaf22c1bfef04c718
+257, 0x39f701f0313f4288
+258, 0x6171ad397e6faab2
+259, 0x239bb5b9abdec4fc
+260, 0xd9a591e25dd01c6e
+261, 0x826dc4a75b628e49
+262, 0xf112b152c408f47
+263, 0x6843a06110f86c0
+264, 0x965e56a7185c1332
+265, 0x8d84492edbc71710
+266, 0xeee8ec111cfd1319
+267, 0xf2858e94ad98e458
+268, 0xbc9589fdf5f3a97e
+269, 0xaf0ceef3bc375130
+270, 0x48f4aaf13fa75c1e
+271, 0x111e9db47bee758f
+272, 0xea3171df130164ba
+273, 0x2a7bbe30bf827ab6
+274, 0xc516c3fdbf758c35
+275, 0xec55097754b04be5
+276, 0x374a997d52b6d3e6
+277, 0x487df5456085ffbc
+278, 0x528883b84df8eafe
+279, 0x805f77ab5ba26f86
+280, 0x8eb81477dc04f213
+281, 0x471ea08ec6794d72
+282, 0x69d3667ecc4d2176
+283, 0x98b7b6e295548a66
+284, 0x3877713c173f8f2
+285, 0xa00542570d0e8de3
+286, 0xf534b1bfa4033e50
+287, 0x7e1fedeac8bf6b26
+288, 0x8043f37c89628af4
+289, 0x1dd7039ec295e86d
+290, 0xce9c05b763a40cc4
+291, 0x246926481e61028f
+292, 0xb7cb0f1babf5893b
+293, 0xefe6b777f37fc63e
+294, 0xebbcabb4cb35cdcb
+295, 0x39fa63cd711eeea9
+296, 0xad5d3ba7aaf30c8d
+297, 0x8e9e78fe46021990
+298, 0xc7eaef6e7d5a3c62
+299, 0xefccdd5495d3f386
+300, 0x2179557ee8cfc76a
+301, 0x88a77f621f0885ce
+302, 0xafda62674543d90c
+303, 0xb8e6fbe2e13e56c0
+304, 0x8bfbbe26a14f9b1a
+305, 0x1404f59f5851f8c3
+306, 0x1140c53a0489566d
+307, 0x3edf2d138b5c3f1d
+308, 0x75d6bb275d817dc
+309, 0x8e660ae27107664e
+310, 0x7a8021038ee303e1
+311, 0x2042ef5eefa9079f
+312, 0xe3e7b90bbf6d457a
+313, 0xf3f819d2bb9405b
+314, 0x522e42155cae0c10
+315, 0xf5bfbb975b40e233
+316, 0x2cf82b614dd95cfa
+317, 0x183ef4a96bc40e55
+318, 0x9f6e351c5ba4e752
+319, 0x37c1110683c90846
+320, 0x1d89b7a996d8a977
+321, 0x18a444f77c7cb4d9
+322, 0xd0a8a971b78dc893
+323, 0x860232fb9e6543f1
+324, 0x60b6097f51002555
+325, 0xca1e5214123e3894
+326, 0xe03fe695c95f99bb
+327, 0x2c7c6779d5f03622
+328, 0xafeeee42f63055d1
+329, 0x670dde905515936a
+330, 0x9a922f42b59fb094
+331, 0xddb5ff49af5a651a
+332, 0xe61b04c9e58ebbf8
+333, 0x4e459dcf272e7fc4
+334, 0xd549e92c16adceeb
+335, 0x7a17dba1299d4a9c
+336, 0x825d756109f2b585
+337, 0xba142e61a9cb203e
+338, 0xc2a19f00e9c04a30
+339, 0x2d0f8140d23d0652
+340, 0x8b866d4d4d6caaf4
+341, 0x4f11d90dd91f8217
+342, 0xf6efc37373b9e0d
+343, 0x248493d6cd6a4736
+344, 0xd12b6ae74a951a3e
+345, 0x56e34722070b70a7
+346, 0x22d3f201cc9fa0eb
+347, 0xbfdcc320008291b7
+348, 0x1a7a6922e9204fbd
+349, 0x831421e0c4945ae4
+350, 0x66316feddddf0e11
+351, 0xa8c86a1517456554
+352, 0x14a9049ad989e335
+353, 0x837022259f141ecd
+354, 0xcb71793a06c261f7
+355, 0x4aeefc07ebe09a79
+356, 0x8982f15aa3b6594b
+357, 0x67bccfa7ed9b0d5b
+358, 0xb377463b523e9dec
+359, 0x53d3d594870fecb7
+360, 0xa5274b1caec5a60a
+361, 0xd6316d0cb643db39
+362, 0xabc1a9b536de88ce
+363, 0xed2fdb1383d2a077
+364, 0x12319c6feb97221b
+365, 0x7e0f6cd40ef47403
+366, 0x86135c84fe26dbf8
+367, 0xc96622d3fbbee19b
+368, 0xe3989d8d8511573f
+369, 0x42cc365554d1fdc7
+370, 0x4c1a1eb8bbce8b4f
+371, 0xfc4e30e7ef2034c1
+372, 0xc490444317a91e76
+373, 0x7ccdf469ff5dc81c
+374, 0xf5a0da4110cc09d7
+375, 0x505227baf34c0fb5
+376, 0xbe58737e8a35cc88
+377, 0xd449bee91b3e8c41
+378, 0x3e590e23299d0e6
+379, 0x291a7d9e0a64caf7
+380, 0xdc6fafbdfebd2293
+381, 0x8223f1e259fe8a65
+382, 0x6186fbc9efd9e3df
+383, 0xfda39b07e4007ffb
+384, 0xfc19aea98574dc02
+385, 0xd0e10d354fcacd8c
+386, 0xc9619916544a55a5
+387, 0xd454d50a8c8558cd
+388, 0xcd94a246712d91e
+389, 0x76a771f5d1231cce
+390, 0xdd20cb2b7b370ee5
+391, 0xa6f4f50feca57c49
+392, 0x78c8fb431f17ab9c
+393, 0x1b692b79a59b43cc
+394, 0x4c45045d287da7e6
+395, 0x522132e18bf43928
+396, 0x25c458983138b41c
+397, 0x2a1fb426ef229796
+398, 0x74dc324c74e5dd3d
+399, 0x6df75e3eb6eb5374
+400, 0xb63f2f4f9ca25b61
+401, 0xac72286112ee54d6
+402, 0x5a966f3d0a6863c4
+403, 0x8d7046bc64a46fc2
+404, 0xa7b740fd6e3087eb
+405, 0xcdbcbe0340cfcdf5
+406, 0xcb632613bf312b65
+407, 0xa91b3f2c2aac238b
+408, 0xa06deb3f5ae555a3
+409, 0x29d72e1f8db69
+410, 0x2d004bae09728ea6
+411, 0xc6eee5dce0736cc1
+412, 0xa7493145500ff60f
+413, 0xc4d68c4aa18ab93c
+414, 0x8210c29e79d48d7f
+415, 0xd0999d7889ecbef6
+416, 0x6e3bd61e66e93566
+417, 0xe6cc13d47d7d7b1f
+418, 0x3d6f181f42e03979
+419, 0xbed4e14fd867604a
+420, 0xbe511c84067bd86d
+421, 0x49a876d89e697d38
+422, 0xc04c3dde8f889c98
+423, 0xaf293eeab0f53e3f
+424, 0x9f6291dd65732cd6
+425, 0xd7811ac01de78c01
+426, 0xe385cf0261d50ec2
+427, 0x5a64134b3542bbf
+428, 0xf9d1302bc6f13a68
+429, 0x5d2aabbea37d8c31
+430, 0xd9842e99a5192970
+431, 0x713eadc4cd30e837
+432, 0xb7b002fc72abb413
+433, 0x276cfeea526af1cf
+434, 0x8519fe79b633a0ce
+435, 0x2f0e87363705a3e2
+436, 0x9adbac0be3c371e7
+437, 0xf3f44ba899a6173c
+438, 0x782d6c29618fde2b
+439, 0x7f61062acec408f
+440, 0x6e79cd836359258f
+441, 0x5c8e9b138df5785a
+442, 0xa54359c9f39a9a84
+443, 0xeec3f033135084b0
+444, 0x883ee717787a535c
+445, 0x9a2422b513a73b00
+446, 0x2dd4beddcdd64a58
+447, 0x90c8a13202239c7b
+448, 0x85b352ab759646d9
+449, 0x139f5cb2e46c53aa
+450, 0xe1d3ba6c721c66d1
+451, 0xaa66e0edc4b60a98
+452, 0x3521275c75be29b6
+453, 0x490a5190b3edfa5d
+454, 0xd2abcdd2ccb2f14e
+455, 0x9d9be8bef4a5857d
+456, 0xde19676f13ef7755
+457, 0xdac2fee2e42615f3
+458, 0xf4239801cb02f2ab
+459, 0xaa8bf923ed91875c
+460, 0x61d18a1940e4c7c0
+461, 0x1eb6aa3d5f077a6d
+462, 0xee7374c063bf29d8
+463, 0x2f0a59e34d76268d
+464, 0xc92e80e17d1eb3e9
+465, 0xafd05b3ec3d2ca72
+466, 0x28a61ad8d6c497b8
+467, 0xa7094d6834ad7d47
+468, 0x57d80ea9eccbb4f
+469, 0xb047e0fee6cdaf16
+470, 0x44f41b5eb48c00bb
+471, 0xd6dc8e1eb9c8c9ba
+472, 0x47adfd2c638c7849
+473, 0x365d63db7d526c68
+474, 0xc21cda439016135d
+475, 0x14d10c3f0f98863c
+476, 0xa93e56f74e037602
+477, 0x3b4e9c8915bdc9
+478, 0xb46f5ae155e54aa2
+479, 0x8e470d21ce1943e1
+480, 0x60b96301b5ba2e8d
+481, 0x1b473a41d381f9ff
+482, 0xabcf5a8e3269e73f
+483, 0xd410f6e94fb21fa1
+484, 0x65d1a47eebf87e5e
+485, 0x48eaa201c61cb843
+486, 0x212c1abc2499bfc5
+487, 0x4255ad8377d2d8d
+488, 0x44caeef472010612
+489, 0xffae764524f572f2
+490, 0x78d374d20c9ee550
+491, 0x6e003206c0511cee
+492, 0x7998a159145bfb82
+493, 0x921239650bda1d4d
+494, 0xae05025509bcfdc5
+495, 0xc6430c980be407b4
+496, 0x78524f1744b153f1
+497, 0x84089e6f468181fe
+498, 0x8d0d21d7dfb6c254
+499, 0x90bad90502a33603
+500, 0x3072a403cbd16315
+501, 0xdfadddf3f1c040c2
+502, 0x22f0b0639d9ff975
+503, 0xb49e48a4cad0765b
+504, 0x95a0a04f8239709d
+505, 0x56e147a24a4c481f
+506, 0xacf16ef61dea4c7e
+507, 0x424040afd2700de6
+508, 0xc67e8096a3c717a9
+509, 0x39f164181dd0a399
+510, 0x2449cedc1d62198c
+511, 0x7a53df11a1f1a61c
+512, 0x5596f1d4a3badae3
+513, 0x38ed4c822072b3d0
+514, 0xf07ef346b3fd730a
+515, 0xfd349c35c3ed51fd
+516, 0x2f15c9c7890f8f32
+517, 0x3b470df52b173c29
+518, 0xd31bfc8981281af7
+519, 0xbbcc9bdf561215bb
+520, 0x5782fffea326574f
+521, 0xb0ebdcfcc5e03290
+522, 0x7fd89d93d2b3fbef
+523, 0x280ea1865d9ba2
+524, 0xe726959845b2c100
+525, 0xd0361f032cd7dbb1
+526, 0x3c65ec2028b81a22
+527, 0x5221e9b2188920bf
+528, 0xeb5ab27c4125ec20
+529, 0x80a32dd48b54f0a4
+530, 0x369b5ced1012bebb
+531, 0x582d35d76530bc6f
+532, 0x7b50dc9b48e1e37d
+533, 0x37fdfe8bbacf8dad
+534, 0x7a0cb7e6e93840ea
+535, 0xa1132c870be0b2ce
+536, 0x9d8ac2c68267cd1a
+537, 0x470969b647fa7df4
+538, 0xabcb7d8adf7e2d24
+539, 0xacdebec9bdf9eb1c
+540, 0xe30f4cbf7eb6a59
+541, 0x746673836c4df41d
+542, 0x75120a6b647bb326
+543, 0x2f4eab556c3f6878
+544, 0xd84651ab05405b7a
+545, 0x9e695808b9622284
+546, 0xc93b71e56aa6e1a5
+547, 0x2be7f3be4a7b7050
+548, 0x6497e910b6733241
+549, 0xcf7050dfd08076fc
+550, 0x4e3cc156eca183f7
+551, 0xf801a33d9326c265
+552, 0x6aa293c8a47d40e6
+553, 0x28c429755faa6230
+554, 0x82b818651f54e7bb
+555, 0xa84d726d7acdbead
+556, 0x5cfa535d5774965d
+557, 0x4a34b7b1cb48d53
+558, 0x86a7b5bce426de84
+559, 0xfcd2307cecdb7318
+560, 0x16dbaaa71181a038
+561, 0x88e7e8cd261c2547
+562, 0x3c09ba6d1d5ea913
+563, 0x5dd3d643734ee5b6
+564, 0x326d725fe8cbb33
+565, 0x7bcca9ca2da8e784
+566, 0x482dcf6b11d7f9a4
+567, 0x1291b605b4cd3e04
+568, 0x6988181b50e2f4a8
+569, 0x649e3c37131fc292
+570, 0x4eeb67b9e21eba54
+571, 0xc051d39073dec45f
+572, 0xc99c52e110270d67
+573, 0xcb813d5d77868add
+574, 0x423a5f13573e7ac0
+575, 0x231ac4cc4fe73616
+576, 0x4c22b888a6e600ea
+577, 0x8059a6dc7c9e25c6
+578, 0x49f498a5b8ad22de
+579, 0xf1e812cc6d1826c8
+580, 0xbbaf60abe8b11e00
+581, 0x1d31d7f4d8be9a6a
+582, 0xfeadce70a9a10c14
+583, 0xb47c635bc136996a
+584, 0xd88e694c8da030cb
+585, 0xc41bbe132aff1364
+586, 0x34249ab18a4b0800
+587, 0xf14b5c825aa736cc
+588, 0x2710be6b08df78e
+589, 0x2ab56bcc9bf9e740
+590, 0x9b7f6e591b5f648
+591, 0xfb665c3772f34135
+592, 0x628a0a5d2db5d8d5
+593, 0xb3e3f251e61b5259
+594, 0x82310ae33faf1b23
+595, 0x24af8723a65cbd0b
+596, 0x671c93282fc4ad97
+597, 0x6cabeaac77270cad
+598, 0xef4643fe38b02b7f
+599, 0x7b011549d1ac6653
+600, 0xe2af87b9fccfe89
+601, 0x36b71ad67197ac8a
+602, 0xdbba55d06f2fd93b
+603, 0xf571dbd764b7f7e5
+604, 0x38ea402501cdbd45
+605, 0xb8ab5b5b1bab2913
+606, 0xfab973c4d45f32bd
+607, 0x9364f1717c2636b9
+608, 0xfad00f4d983e00fe
+609, 0xc90c532a11aef75a
+610, 0x64a6eda96e44783c
+611, 0x35891f2eb84520be
+612, 0x28d216080caed43
+613, 0x129629cc5bd206f6
+614, 0x22c3d39822cbb4b3
+615, 0xf1efbf4cce1eaa2b
+616, 0x7070cba12524ed08
+617, 0xa7ed0be9deabf20d
+618, 0x8ddb4cd6b454f76b
+619, 0xb82814b1db37b63
+620, 0x418e83b36de01876
+621, 0x9a538c7f39c6413
+622, 0xee0cd7abf8a2ecb9
+623, 0xa9222b07e95590f3
+624, 0x6296a415d68341e6
+625, 0x981e0a5a8f811929
+626, 0x4bb372d3b0de283d
+627, 0xa9805b5971866e16
+628, 0xaf3b5f5183497657
+629, 0x2152b0fd23c3d9f
+630, 0xb730c325b7173180
+631, 0x1e3439d231608c19
+632, 0x1c5ba6031379823c
+633, 0x87f5d12d6d365cbc
+634, 0xd3bc7f29614bc594
+635, 0x63102214bb391268
+636, 0x482bbd5bba648a44
+637, 0x6a23604690759dc4
+638, 0x4091d41408d3a39e
+639, 0x7cd017f922101b15
+640, 0x7ce9004ac5f9231
+641, 0x978bc3d8ec7f7fdf
+642, 0x5bd0c4d780580c11
+643, 0x4313c068bb040153
+644, 0x3ab7dab7bc38bf80
+645, 0x3aaf9c187728deea
+646, 0x6633a4ce8efb88d9
+647, 0x7263b089878f00fc
+648, 0xd0d767e96fe00eb8
+649, 0x184a7c0c01908028
+650, 0x1ebdf41e6f76e186
+651, 0xeb740ee1d0402083
+652, 0xfccf4974edb1c339
+653, 0x16e2707aa28306d
+654, 0x1684f0bdb018c3a5
+655, 0x887b6b67b88aa862
+656, 0x923d7810a2bea33a
+657, 0x56b3560babef5d6b
+658, 0xb39a14614c54b8c6
+659, 0x33e4dc545a509fc8
+660, 0x26e21f84142da9b
+661, 0xdd07598125756855
+662, 0x572d49a071d7ae0a
+663, 0xba3c7e3baea28760
+664, 0x7ecdb2d714db4b61
+665, 0x1c62b4920e1b2fe2
+666, 0x71bfafb70092834a
+667, 0xd710a4228f60d56a
+668, 0xeb16277d4ce4e95b
+669, 0x968168c90b16d3a1
+670, 0xac3439dfe8ad0062
+671, 0x5a8226f9dd5876ad
+672, 0xb843affe917291b0
+673, 0xd76d1e67051f8259
+674, 0xb73a6638cce8ccde
+675, 0xa0e6afd3c7295f9
+676, 0xff8857b4bbb5f4c6
+677, 0x99becf78938f0426
+678, 0xfcd17edc1e70f004
+679, 0x6223b8b23f2f50
+680, 0xca875f3e84587b4c
+681, 0x7d1e81e589f87fb9
+682, 0x9eb621586aa826fc
+683, 0xf46fb9ef5b9c2086
+684, 0x2882c9b7092725f3
+685, 0x5493f099bbedcd02
+686, 0x90c1ec979ffa811d
+687, 0x963f765025bcc53
+688, 0x56194e3ec3d9d4e9
+689, 0x7ec4720954cac1f0
+690, 0xfab3145171af7f90
+691, 0x52a0b4e41a13b593
+692, 0x740e2d4d5909d126
+693, 0x98f5339c09c94a28
+694, 0x1700e462fe8dec76
+695, 0x3dbffc2aa4695ac3
+696, 0x5763edacabdfe2a1
+697, 0x7b5b623ce49ef21d
+698, 0x30addc66f49860df
+699, 0xcc7511a6c31bceda
+700, 0x1b25b61ca75db43b
+701, 0x416bc4c298e59046
+702, 0x4cd11fe2d74e4649
+703, 0xb54458a9229fc978
+704, 0x8c21a27882b6ca35
+705, 0x57887c8b5e01639b
+706, 0xf4e893da996680bb
+707, 0x8d601297702c9c0d
+708, 0x2a27904a30aa53af
+709, 0x497800f6917ea8d0
+710, 0xe96db3340ada9c00
+711, 0xcc23166f14c010ee
+712, 0x782690d78fa65ec9
+713, 0xf3e00d74a0878eda
+714, 0xa7cbb683decca0a3
+715, 0xdd2e038e683a94aa
+716, 0xe2096ff8da896ca5
+717, 0xf7c83400afdabe11
+718, 0x395b8c6f6a4086a4
+719, 0x4a164ec05bee71d4
+720, 0xe87aa5d1ca0462fe
+721, 0x8dbc5aed6dff9ceb
+722, 0x12120d1e9552707b
+723, 0x877dca6889b3e6cd
+724, 0xbd65605c01e900fb
+725, 0xbd6b82c4157c3115
+726, 0x8b60282732caf78a
+727, 0x279fcf5e5de9e57f
+728, 0x34b34ebfb6a37eae
+729, 0xd258cc1a14e03b7b
+730, 0x9a528ba3db4a13fb
+731, 0xffa0aea59d057746
+732, 0x27fa7f456cd37c4e
+733, 0xe1117a57a6fdce63
+734, 0xdc8fc903970a1551
+735, 0x492dd104f30faf29
+736, 0x110def0959e5652b
+737, 0x7f8d1997636fdd15
+738, 0xfb77b05e538a9b59
+739, 0x2e41fa35b4b01fc6
+740, 0xbc35ae69a3374085
+741, 0x192c2a681c2d9b4b
+742, 0x12566b8866c189d6
+743, 0x9d88ea785c5185c8
+744, 0x30a621ad5f983c4
+745, 0x8b875efe1206f587
+746, 0x224d25c3af6e3423
+747, 0x7503e976a1ac7bcc
+748, 0x3c98aa869e823859
+749, 0x3d8835304b646892
+750, 0xf6353330ff970bc2
+751, 0x8a673f5e2edb8acb
+752, 0xf2fdcc53493838b9
+753, 0x85ddcd526236af16
+754, 0x60afb99814c676c5
+755, 0x32a1c2749e281ca8
+756, 0x2367a92ae3bee9ca
+757, 0x219fe082703743cc
+758, 0x34d8b74dc85182a9
+759, 0xdd04164c72db23f
+760, 0xe293ac28fe2671a9
+761, 0x9ca7d169cbda6f45
+762, 0x705c47972b4240ed
+763, 0xc10eda9eeb536209
+764, 0xc36ddacd0c94e85d
+765, 0x8eb592c27e8cd0d2
+766, 0x3e815991c76e7cc4
+767, 0xac9cfce31acf7580
+768, 0xbf7a4cb31c7aee94
+769, 0x663077444aceecf6
+770, 0xe7f614ff386eb568
+771, 0x79d7a229c66912c0
+772, 0x161ed4311f63e1f3
+773, 0x308a5faeb9982ede
+774, 0x7b38ddb9b7efd10
+775, 0x1e103a2589b27ecf
+776, 0x67b02baf4259f27e
+777, 0x868921c115ea2eee
+778, 0x959791912200f71e
+779, 0x4dd55f36dec10557
+780, 0xe3464d90080cb99d
+781, 0xfb2d4f6accce652f
+782, 0x109900a9257d77ba
+783, 0x3c4bda8e2c83684c
+784, 0xc9ae040fb7f868c6
+785, 0x78098ffe994f4905
+786, 0x7a94c33eca77f0b4
+787, 0xbe6a2a95e9b5c0e8
+788, 0x797d39cf963f4837
+789, 0x8d2e249e4425d06d
+790, 0x6ae2c30cd5da06f4
+791, 0x904489de762b179f
+792, 0x84713e2dfb591e3b
+793, 0x6405a40da3f6f51b
+794, 0x976b560d663a2df1
+795, 0xed1c544784ba1e22
+796, 0xca658e995ed9344c
+797, 0x2b1c6b8e4db49025
+798, 0x52b1513da528bad
+799, 0x3c63406d256d9968
+800, 0x63a31ca3d423f85e
+801, 0xb05a81f55789a720
+802, 0xd04412992c476c8e
+803, 0x828ec2f77a150a3d
+804, 0xee50926671bb60c6
+805, 0x5aa70f93e2df61b4
+806, 0x94d60fa2e8655858
+807, 0x3f5e5b770703cc7d
+808, 0xc62dfb2688ca7784
+809, 0xaaf02e1e8ba89fe4
+810, 0x4ab74e0d8c047405
+811, 0x31ee04fbac6fcead
+812, 0x1203b78b8228f5af
+813, 0x412a70836f9aa71a
+814, 0xab51cf98c03f1819
+815, 0x783a3ce9ce137f65
+816, 0x8897085b0a072cf2
+817, 0x685dd9bde8798cb
+818, 0x9a1fac7b1705e2c1
+819, 0xf3e9ff98de48e9cb
+820, 0x5c2d3eb1a1fbe917
+821, 0x3bda718b6b54d82e
+822, 0x29f2dd18f22f0821
+823, 0xb992da1572ac3597
+824, 0xacb69e7aa14b34f7
+825, 0xcd36e3ad14f088d1
+826, 0x6aaacc96a1ec55e8
+827, 0xf8ac593f154fe68f
+828, 0x18fc9cbff012339f
+829, 0x2f3368ccbbb99899
+830, 0x7cec7d17f37031f7
+831, 0x96e86bfaadcb8fc2
+832, 0x74f9e7ee3d42a752
+833, 0xbd52f6c7d9b0733
+834, 0xa48e6d96bb6ce1c9
+835, 0xaefa058254b82133
+836, 0xb7a19edfd0929107
+837, 0x6160ce9125b26e26
+838, 0x6537dbbde1d2aed
+839, 0xc567f9a6bec52dde
+840, 0xca29fd3f22443342
+841, 0x7732aa6db6a1c476
+842, 0x8f5a4d7df6b11b3
+843, 0x76649262aa7e31e1
+844, 0x60a13eb125fbc829
+845, 0xc81e4d123dd21ac1
+846, 0x643cbb09bb72f86b
+847, 0xf971a98fb25555a6
+848, 0xffa2774c66692d56
+849, 0xcb33c16c50b13ea9
+850, 0xfabf388dffda0e9b
+851, 0x55d41ec12ca24b9f
+852, 0x91cf693a3467e807
+853, 0x6be2c00b2c31d6dd
+854, 0xc5cf513b5251ae28
+855, 0xffc4384212403dec
+856, 0x45d4e1865255a69d
+857, 0xfb1dcf956972086a
+858, 0xcae946a55c4c55b8
+859, 0x7351ac7720e385c1
+860, 0x19aa8ffd86240254
+861, 0x8f515ae78f4040da
+862, 0x1e1ed2058de50fce
+863, 0x22d006dcdb374243
+864, 0x6e0f0ede7c95b441
+865, 0x70e8aa81b53b4d25
+866, 0x998f309ea41e3814
+867, 0x89ed6598fb66f390
+868, 0xb5997dc3278060df
+869, 0xb2a021eac4f7e046
+870, 0x3705b60aa2fd0768
+871, 0xfc415079ab9200e
+872, 0xf2871ac4cf45ecc9
+873, 0x24bf758d2246175f
+874, 0xac503dd6f8141b3
+875, 0x4e879d12d9f03b3
+876, 0x82034af8cf93b644
+877, 0x59899dd7e478a6c7
+878, 0xae90addb6eb11507
+879, 0x1524ddf76730cdef
+880, 0x6fd4afd5456b1c9d
+881, 0xcddb9221ea001cbc
+882, 0x64ff400bbf2e8604
+883, 0x6dda10549b06ed9b
+884, 0xed2c85104c261527
+885, 0xc7e09217d29929a8
+886, 0x56284df611a428b1
+887, 0x1a7608289c0a61
+888, 0x7cb63db15166ff66
+889, 0xc6013c76fcdcdc72
+890, 0x8e5dd566c7a5a676
+891, 0x5a8e8565f40d133b
+892, 0xe465973455848c44
+893, 0xf92eecbfe0f3c2c0
+894, 0x7d64155d4dcc5cac
+895, 0xf17595706f988dad
+896, 0xd590a001a6a19c5c
+897, 0x82a164475758db3d
+898, 0x6b144993ea1bbe32
+899, 0x22a81a7a6e453779
+900, 0x8e8c298df1a68a73
+901, 0x78056afd6d936b4c
+902, 0xaaceef0325faaf62
+903, 0xe78bb7699f82266f
+904, 0x523a2d283c5a5166
+905, 0x7076d87088f6c6db
+906, 0x6087dd54cff5aeb2
+907, 0x7ef82e62cb851680
+908, 0x4e8bcc8ed84d03d8
+909, 0xd12fa0361df3cfd3
+910, 0xefb89c79f8127297
+911, 0xa9af4e2fbce0b1f8
+912, 0x462136685b70331e
+913, 0xe9e74c93da699b77
+914, 0x9ec69215fb11d0c3
+915, 0xc10f229939e3e111
+916, 0x3f67fa79e41d2374
+917, 0xd5e7c1a9a7185162
+918, 0xa1dcce9ec91492fe
+919, 0xd4e61f0727b5d21b
+920, 0xdf6cdce46551800a
+921, 0xa3f256ce906982d3
+922, 0x209742a6b9ffc27
+923, 0x4006c96958526a57
+924, 0x9606aebc75a1967e
+925, 0x91b9f42fb64189df
+926, 0xb27119defcb938bc
+927, 0x128cc7a84ba05597
+928, 0x6c3df613c62d0d30
+929, 0x3adf69d48b629ec7
+930, 0xda42ee493837b128
+931, 0xb8e770480e760bb5
+932, 0x9feb55d57c99c626
+933, 0x29812d80afdae3ed
+934, 0xae4222a64276a8c7
+935, 0xe3897212a5b4ed53
+936, 0x98bedfd13886e669
+937, 0xca858675d7fc0d0e
+938, 0x28a359f665354234
+939, 0xfac2ccabe4128b35
+940, 0x61373cc5d11ca180
+941, 0x7007605a4512a87a
+942, 0xe71f8eade7b30b3d
+943, 0x3a9e77f9b99bd04d
+944, 0x70d3e42488098866
+945, 0xd30fc159c7cd4d99
+946, 0xe4d3f6600d2e2d6f
+947, 0x1088324dfa955c25
+948, 0x516437acd4764623
+949, 0x38a31abe50d0aa03
+950, 0x72e1054e9dc02ba
+951, 0xe6971dd664d1a2e2
+952, 0xf6698cb095d3b702
+953, 0xad995a5a8c19bd92
+954, 0x34e53c6936f656e6
+955, 0x10de240bc07c757a
+956, 0x3e3b9a6861c2bd1c
+957, 0x9c0b0b97d3712ec9
+958, 0xabf1505a75043aed
+959, 0xbdf93d3de3274179
+960, 0x28fa5904d3f62c28
+961, 0xc3b97b39ef6c5133
+962, 0xf2b2219225b8679d
+963, 0x8be4ec0f930c0aaa
+964, 0x47de5a56aa590643
+965, 0xb6f871b304129856
+966, 0x80a61c06233ab0f9
+967, 0x3ce6c3af8101b055
+968, 0x85b911708274e7d1
+969, 0x4cab65d093a488b7
+970, 0xaabc4b10661fe28e
+971, 0x35b16dea64474a68
+972, 0x1d6eb5b093361223
+973, 0xc39107b92f0fe1fb
+974, 0x1d09e048073c4841
+975, 0xc6a02f43aca8cb2f
+976, 0xaf6613dbc7da909c
+977, 0x5ac2a40c230aa756
+978, 0x33afb5e7c01c39a5
+979, 0xc7b0b20ea8b7d0ef
+980, 0xdf7306c8ccb1bbea
+981, 0x9710efc0c188b2a0
+982, 0xd6303eadb72c873e
+983, 0xa38ca609b118f35a
+984, 0x8390613065c6e535
+985, 0xdf9a0106757e431f
+986, 0x8bcf77039788e143
+987, 0x6026806a986b378e
+988, 0x482ff3b1394cb1dc
+989, 0x2a27d0ccac9ede9c
+990, 0x53c77f26e271b3ab
+991, 0x1ba004cf276cf3f
+992, 0xc135b0517dc81f7c
+993, 0x5d137838db75e442
+994, 0x3fe505f93d1dbdd7
+995, 0x351654ae7d598294
+996, 0x173f8d182af9d84d
+997, 0xf97dfcd164fe11c5
+998, 0xcda423e5ad43b290
+999, 0xa5cb380b8de10d10
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64-testset-2.csv b/venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64-testset-2.csv
new file mode 100644
index 00000000..7c13e317
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64-testset-2.csv
@@ -0,0 +1,1001 @@
+seed, 0x0
+0, 0xa30febcfd9c2825f
+1, 0x4510bdf882d9d721
+2, 0xa7d3da94ecde8b8
+3, 0x43b27b61342f01d
+4, 0xd0327a782cde513b
+5, 0xe9aa5979a6401c4e
+6, 0x9b4c7b7180edb27f
+7, 0xbac0495ff8829a45
+8, 0x8b2b01e7a1dc7fbf
+9, 0xef60e8078f56bfed
+10, 0xd0dbc74d4700374c
+11, 0xb37868abbe90b0
+12, 0xdb7ed8bf64e6f5f0
+13, 0x89910738de7951f
+14, 0xbacab307c3cfd379
+15, 0x2cf7c449d8b927a6
+16, 0xdcf94b3a16db7f0e
+17, 0x8a9d33d905a8792e
+18, 0x4cb9eb2014951238
+19, 0x6c353acf7b26d6f1
+20, 0x73ff53d673aa30c
+21, 0x1fd10760015eca68
+22, 0xabae0aa9021eeba8
+23, 0xa5ae363a868ee2bb
+24, 0x9d89e0f041de6631
+25, 0x6238b133c3991a65
+26, 0xff49267d75fef51a
+27, 0xfb180656ce13c53f
+28, 0xaf7fadf36128712d
+29, 0xa6847fc6f339c63e
+30, 0xb03e0b80d71ea5bc
+31, 0x63905abcb43969af
+32, 0x2295af3ee00a3bba
+33, 0xb8b375b994330415
+34, 0x867d9ef1d8716a3b
+35, 0x4f6c02f5601b4e18
+36, 0x7c5fb4c16c470d18
+37, 0xe3b57986b804b343
+38, 0xef1d79d212aca692
+39, 0x5b98774c8806209c
+40, 0x924fc76bac38a5d1
+41, 0x5266084c412ddeed
+42, 0x98240bf9b831d6a3
+43, 0x5681599e81219442
+44, 0x6441248fc2ba92bc
+45, 0xe3e9051a540349ea
+46, 0x3a2700034390baa3
+47, 0x9f893155b6d402bc
+48, 0x158207910c6d8aef
+49, 0xd5282ab7608c2cbc
+50, 0xc97f4651669dee4f
+51, 0x3d4750d95103ed60
+52, 0xe0614542caac1f04
+53, 0xefe5092144cfc6c
+54, 0x560bc486abd7e9ae
+55, 0x2678b71392daa4b8
+56, 0x734970d3dc2ba416
+57, 0xcbdbe849e51e4aaf
+58, 0x3b0b5e28b491556c
+59, 0xd51449ac45abd88
+60, 0x6790b59991f1b7ab
+61, 0x32d1c039ff2415bc
+62, 0x173b9772f24f72e0
+63, 0x9490a9ca9f883b1b
+64, 0x4c775989e6214222
+65, 0xac07db37e6ee6114
+66, 0x331371b2e3f10aee
+67, 0xf12e5326c21c28e4
+68, 0x5d77dc280c70d614
+69, 0x1b01bd17a2f281ec
+70, 0xa10d3b5882938487
+71, 0xed5a0033c394ae8f
+72, 0x70bc8ea568ea44b4
+73, 0xf4600ae77965e730
+74, 0x7ff92c0b321ce233
+75, 0x6cdbc87d0cc1d670
+76, 0x9ec64f0cf2000eb1
+77, 0xfebea50259800f68
+78, 0xf2edf9019a8fd343
+79, 0x75c584ac042e5468
+80, 0xc1fa8481d5bf9a1d
+81, 0x7f57180168514ac2
+82, 0x878100716b94f81e
+83, 0xc929406e3af17fd2
+84, 0x6a26e2c013e4bf4d
+85, 0xbc071d8848280955
+86, 0xb60d75abbfd1bdac
+87, 0xee9b76afeca9fa69
+88, 0x1d6c399d2f452810
+89, 0xbaa0bc1621e25c83
+90, 0xed6ba792f8671ba5
+91, 0xf7ca02c2ab11d8d7
+92, 0x3c3cadadf0b21e3
+93, 0xdd1784571e864e9c
+94, 0xfb2f992015157509
+95, 0xf50bb9f0d3ced743
+96, 0x261565f75c3e185f
+97, 0xf8fe33b284513e60
+98, 0xe3d2d10b5e024664
+99, 0xd28717566242cf35
+100, 0x7ae07d133ac5b789
+101, 0x3b7ccaaa53ac338e
+102, 0xcd480bace4871650
+103, 0xec6c78f923c080e9
+104, 0x44211d0ff8919d59
+105, 0x89f79af76d2a45fe
+106, 0x71583fd8a837548b
+107, 0xee57269c261511f5
+108, 0xa5ee8f3b128c5d1
+109, 0xbb64c20ed0765a17
+110, 0x9d4790ab2eeaf7e4
+111, 0x742f3db806d9e98
+112, 0xb81ec97aed6a0d1b
+113, 0x41808b34f6a8a23
+114, 0xc20913af175dfd4d
+115, 0x834427db263b22bb
+116, 0xedd9c632e611828a
+117, 0x10eac8524496f571
+118, 0xd76091b97eb00ab7
+119, 0x111298ae9fe95666
+120, 0x5824b2e2a6719c43
+121, 0x6e280ec539e934ed
+122, 0xf74fd832df90083e
+123, 0x8fee6d0f241c2e97
+124, 0x4244f331c2f19c3c
+125, 0x3dde75a845cce97f
+126, 0xe35bb8e635a9915b
+127, 0x39d2943037f7932e
+128, 0x1fe2d134201d0970
+129, 0x49d00b63c749b804
+130, 0x960c2942cd4e4e04
+131, 0x8dd8e009dbc0435f
+132, 0xcf493495c3a055cd
+133, 0x8f7b5a1c0f9fe9cd
+134, 0x49d5f90374641a25
+135, 0x69b3932073d3524c
+136, 0xd170603e7de84ee2
+137, 0xa062ba3ed3539948
+138, 0xf5861cc5b5d56c82
+139, 0x5e914998a30c7e76
+140, 0x8d77f2ad1503c0f1
+141, 0x980b6a9e3b4181fb
+142, 0xd9299cd50694c084
+143, 0x253dc0f8f1cec4c5
+144, 0x68110fb9d1b3e695
+145, 0xe8f3120d0aabc461
+146, 0xb066e7df0dfb042
+147, 0xd29ce0f797e6b60b
+148, 0x6a569bb7ca33bd42
+149, 0xd46e08b2dc2385f8
+150, 0x28c61d11d055767
+151, 0x5d73aa3d1a2bb725
+152, 0x1421191e1c14829a
+153, 0xa711bfb6423df35e
+154, 0x461af97a86308006
+155, 0xb3e1018ff3519367
+156, 0xf19cf866a268ef2b
+157, 0x207715eac9199d1d
+158, 0xdd621c410975b78c
+159, 0xf390aea68683610
+160, 0x617a2d107a0047d9
+161, 0x6e05ac416e5bebf0
+162, 0x7d253e70506c1bed
+163, 0xf9f96f4a7dd53810
+164, 0xc693b29cb1573f73
+165, 0x4f1146b0020ea544
+166, 0x45140608fbd40579
+167, 0xdcf57219828ce6be
+168, 0xe19d58cca37b5b32
+169, 0x82bda95b2a161235
+170, 0x5823c3d8a2b6c9ba
+171, 0xfeb2e74092fdf89a
+172, 0x50e1ad1abc8f869d
+173, 0x2ec63d0c105eb8da
+174, 0xe14e1c4845a3264a
+175, 0xcff53670455eb6aa
+176, 0xaafaccd24619fa3e
+177, 0xf55a988486e2422a
+178, 0xecfba16a90ff4d04
+179, 0xbf8d36c2f644757a
+180, 0xdc56ed75a0dd6249
+181, 0x3f45023eff17c3bb
+182, 0x2428bbfe90023fab
+183, 0xab892c611adcb70c
+184, 0xb6f13d8c0c2b9d74
+185, 0x2ac3fb11d224f2a8
+186, 0x65433dcfae2d9351
+187, 0xe906859ae4b45f82
+188, 0x8fb7f5f093d76a3b
+189, 0x940dd290b5e88d1a
+190, 0x31b27d21bef116e7
+191, 0x86a964e2c83b5296
+192, 0x85ffd17bc079a9e8
+193, 0x16c47c724e7ab7f1
+194, 0xfb6098a9867e7d7f
+195, 0x9246fb69092c6cb2
+196, 0x1a4033572760f32
+197, 0xc5cc568a8b273b84
+198, 0xfa6f9f2fbdd44abc
+199, 0x9701b8e087718ba3
+200, 0x51d6a7dcf73f8f3a
+201, 0x30008172cc6a972d
+202, 0xac2ab49a5ca6ac81
+203, 0x31f28ef79461e54c
+204, 0x93e35a8da8cc6132
+205, 0x9a2c58beeba3d5b9
+206, 0xf6615c1de266ac39
+207, 0x127ff9f8166b766b
+208, 0x7ffe380e80a69556
+209, 0xbe7d2c228e1542f7
+210, 0x2d5ebb4e50ba1746
+211, 0x63585761ae1bf684
+212, 0x1019eb5cee022fea
+213, 0xb9d3540ab58da30d
+214, 0x1677f4cb45620eb9
+215, 0x6524baee51783822
+216, 0xdf9f2ddcfabb0adc
+217, 0x78e8acc43b287935
+218, 0xe9a1974e999222b5
+219, 0xc41324ec2291e780
+220, 0xea52abc9ecdcbc9f
+221, 0x209d7bcd46ec6b04
+222, 0x12d504c09803db2e
+223, 0x1200e6bf21475d81
+224, 0xde6d3c2b35fd2cfc
+225, 0xa2526900ac33bd3c
+226, 0x7f1f5290fc432bc5
+227, 0x29ddfb380a3d69c8
+228, 0xac79cb6942a2909d
+229, 0x516996685b67a92a
+230, 0xb5fc39041cb828bb
+231, 0x75d9d8ca0644a276
+232, 0x81e98b76be92a3e9
+233, 0xca27888fafe12179
+234, 0x17be2ae039925765
+235, 0x9429846c0e6d0342
+236, 0x327dfd50439815e9
+237, 0xcee20cd7bc254aeb
+238, 0x7d250389f453f29e
+239, 0xfd1b232a85c95569
+240, 0x2ed55fac80f3e9e9
+241, 0xf6886c20417a1be7
+242, 0xcd08e61f0b0fdfde
+243, 0x7b33e34da5c27bff
+244, 0xd043c4b7d5603dd5
+245, 0x9a544e4c70a3b686
+246, 0xa7b60398c381f771
+247, 0xe9e7a3487c4bd4f2
+248, 0x10b58fdfe1ff112c
+249, 0xd5c1c9748c0f4ceb
+250, 0x61be9d09159d54ff
+251, 0x5356f51e8239f510
+252, 0xfe7889d9b202ecef
+253, 0xc7fc19ca5d263d5d
+254, 0x7c4c07e61dfd9f69
+255, 0x6c315fe5015f300a
+256, 0xe0a5bc00039747b4
+257, 0x16397fdcf829ee80
+258, 0xb55aee80d16a5169
+259, 0xca0609944d007eea
+260, 0xcc982249f65a02ce
+261, 0x528161feb149c148
+262, 0xcbf08ba49b41c006
+263, 0x39af1ff0b6f14138
+264, 0x5cc036be69799aec
+265, 0x6adde125b1db21c5
+266, 0x8a99d83d6b613b67
+267, 0x1cd43fca9451f74c
+268, 0x682dbb26ecc96365
+269, 0x13b4be2ceb43e3
+270, 0xbe8fbc3b6f4f581e
+271, 0xda148a2f4bda5719
+272, 0x239106ca3319f393
+273, 0xb42b4dde641f0dd5
+274, 0xd233cfdf4cb0af74
+275, 0xfb5919d905589afc
+276, 0xd802a8860c10b66a
+277, 0x6c923e1d00e7b5bc
+278, 0xfacce1134f383b89
+279, 0xf9570abda7a6d553
+280, 0x80f0f9796a208f18
+281, 0xc0e1df5280951c57
+282, 0xe9f143f08257bbe0
+283, 0x79e4c6463123d588
+284, 0xdd2118583f2b1684
+285, 0xb399ff5f2329fa18
+286, 0x4b3e9ebae96f813c
+287, 0xc484dbf247787384
+288, 0x921865eb97603f2c
+289, 0x18063c68e257d300
+290, 0x643181f345e7fc26
+291, 0x12e0b0e8eadf9fa7
+292, 0x79e613fe73dfa354
+293, 0x6db4c59203b7217a
+294, 0x6c7a0e9ba6139eaf
+295, 0x9617c7ac4e3f6d97
+296, 0x1f68a7b4fb1b4b75
+297, 0xef0b7ab24944f466
+298, 0xaf1dee1f4be1bc89
+299, 0xd2e355c959f5fd8d
+300, 0xe594c3fb95d96efc
+301, 0x9554766ca3342906
+302, 0xa4bbdc77d12842c
+303, 0xb62400211ee489a8
+304, 0x91abadaaa3bbe67c
+305, 0xd371eeb91deb42bb
+306, 0x883bab35cbd2b6e5
+307, 0xd030c3d9411a9041
+308, 0xff3c110a858ff000
+309, 0x59bdf5ca47d0bde7
+310, 0x2bc80fa3cdba1853
+311, 0x6444ccb652662cb8
+312, 0xc0c7e256b9e90339
+313, 0x70714ea9c9d72302
+314, 0x96a0142f9d897d27
+315, 0x209a9097c5a91ef7
+316, 0xb9e33afc5171e009
+317, 0x47b37af433a58d40
+318, 0x30cc4ffbfa831d26
+319, 0xdcea4a85ff815466
+320, 0x907d5bd027f2e5cc
+321, 0x7c081f6852e04a4b
+322, 0xe61950749c1d502b
+323, 0x1604e937ee69834a
+324, 0xb2372d952dd25309
+325, 0x53f6a5b834c72577
+326, 0x2ce7a74395e0b694
+327, 0xacbf9ab4fe91f225
+328, 0x5ce1e63d3a2bb90f
+329, 0x54740da3a5ed139b
+330, 0xf194ddb39f29880b
+331, 0x3305374f5d8ec08b
+332, 0x831dd0164927ff4a
+333, 0x625baa78e4458cf
+334, 0x29d27dc0a4a71152
+335, 0xe227bae9a1401034
+336, 0xca0c209831846b2b
+337, 0x8e8cc54b08b5a411
+338, 0x38f2b4acaac27db6
+339, 0x8ec88baac814e86b
+340, 0x31c08e46b007bde
+341, 0xb686c02722794c09
+342, 0xb77cf8fc682e3907
+343, 0xa56334e7f606f4b2
+344, 0x9c80b127bddd5f4f
+345, 0x12df14834cd858bf
+346, 0x3f14762a9cf5fb9f
+347, 0x930a70941ef5779e
+348, 0x64e96c849c30c080
+349, 0xfdf53bfba1300484
+350, 0xec7a9363c21bc616
+351, 0x26e9fd6a115ecb47
+352, 0x9707a84b5bc77fbb
+353, 0xb23b2737b20d5903
+354, 0x22f4825ae80f6501
+355, 0x500644b12be6a01b
+356, 0xb746645b2af082db
+357, 0xe6af051f697892f8
+358, 0x577c724248a1cfc6
+359, 0x3d2b6a434c84eed3
+360, 0xd260f5efd7328314
+361, 0x95c16cc84bb3f55c
+362, 0x7a01b2e4e0e80ca7
+363, 0x41930c3ce70a0935
+364, 0x1299bccf39d4e110
+365, 0x494883ba1a8a87f
+366, 0x9478ecfe2d918e60
+367, 0x30ec9a5670cda8af
+368, 0xf9bc877e833e2b99
+369, 0x1b83a0acfbb4a8db
+370, 0x73bc1740c0d18880
+371, 0x65086ca9773cb3e1
+372, 0x3b78c3ccd63cff2e
+373, 0xbfae748795acfb31
+374, 0xa4c9d5d56a15ba20
+375, 0xb9cb41721e52b71e
+376, 0x1532f15d4dc47748
+377, 0x5a4d647a4b9ee632
+378, 0x8513c7c5a50898d9
+379, 0x6d3d98ccd5461b2e
+380, 0xa65e99be2fe98d6
+381, 0x31abc8855334a0e5
+382, 0xf1ed22a661dca5b8
+383, 0x299e2b63229e03be
+384, 0xda201a06687bce48
+385, 0xd27794b302142c55
+386, 0x642bd3e1c7898a9d
+387, 0x777f1ff00afa1a87
+388, 0xd2f1c84fb3877baa
+389, 0xae417583289191fd
+390, 0xd641f1d88e0e2d55
+391, 0xc1f1d98fb5d18ebf
+392, 0xb0f72aecdadce97b
+393, 0xe9b8abc764f6018a
+394, 0xd2a37cff8e890594
+395, 0x2dd70d631a528771
+396, 0xbf8ba0478c18e336
+397, 0x1630bf47f372ce0a
+398, 0x6d04ea20dc3f46b8
+399, 0x6591881bf34337f2
+400, 0x33c149c7eb5b4103
+401, 0xf01a8c9857c86748
+402, 0x184348cdfc16d215
+403, 0x141168b253d2ed7
+404, 0x52aaf012ef50a6f1
+405, 0xfda1722387e16f4c
+406, 0x43c30f57d6c038fa
+407, 0xd4a8611f5f96d214
+408, 0x2c512ce17e987f2c
+409, 0x961ce450f0fa2822
+410, 0xf55a506ec6cea9cd
+411, 0xb76d694d9c7f5ef6
+412, 0xfb029216dbd8e988
+413, 0x93162501896a0081
+414, 0xfbbbd2c5ab300f5c
+415, 0xd648b6da7387d491
+416, 0xc73b4697471d9d98
+417, 0xe37412bf1c93ee76
+418, 0xa1a96d96570e6637
+419, 0x5b3ab4f82428f65c
+420, 0x873d849b188aa36f
+421, 0x39fbee0ffc9fa9ff
+422, 0xc70d21b744d677fe
+423, 0x2b8a43c23043d209
+424, 0x93c33eaa37370d16
+425, 0x8930ac1880f2b0ef
+426, 0xac01d27707036af0
+427, 0xc2af3fee504343a0
+428, 0x1c1dae2ad5535d97
+429, 0x9ffc21804b76a480
+430, 0x69f903412cc13563
+431, 0x9d3c4e2759a0c47d
+432, 0xb1a8f894be6302b9
+433, 0x95e1fd7951479506
+434, 0xbb9e6c03cd4ae8e3
+435, 0x85206010c9b737cf
+436, 0x767e813694d6238c
+437, 0x4969af329ccbb30a
+438, 0x3aa9af1075aaea5c
+439, 0xb1ff519e8118a993
+440, 0xb21a23a3c91180fe
+441, 0x320b24582ca3fd88
+442, 0xf8ca56415fb4e453
+443, 0xabd0899c07205e77
+444, 0x87fdc7a44b4ad50f
+445, 0xd75744911641a278
+446, 0x7c8c9a65df6fcb95
+447, 0x79d785e3c7a5b695
+448, 0x421e4565ba1f592f
+449, 0x27f87eb2517835cf
+450, 0xb62cc4297441c83e
+451, 0xd817a80ac815ca6d
+452, 0xad84388130df2aa8
+453, 0x5e6b1640452d6ac8
+454, 0x936285e15edce2a3
+455, 0x903bccc4969768e8
+456, 0xefc2cb7b109d3140
+457, 0x633e9dfdda2d903a
+458, 0x2a2f3225925678a1
+459, 0xe07eac91a27f8547
+460, 0xe50ced40eda78cb3
+461, 0xc5b22500e1c7441
+462, 0x32becf61bca3aa72
+463, 0xa2e37c4b30671344
+464, 0xc9f1c1910f45d544
+465, 0x9b50333b2dcdf730
+466, 0x310bfd53a1684b94
+467, 0x1e1dc21e66ac6455
+468, 0x81876c2bfb1ed5a1
+469, 0xd0c54a3e25eadc7b
+470, 0x3791b6fbbd5c7ba0
+471, 0x133be57356c599fc
+472, 0x8d1148eb8e83fdea
+473, 0x311aedba0d8b42cc
+474, 0x1142ae52745f94bb
+475, 0xc5f4ab2fbde8c4a3
+476, 0xd23be827b5b24f6d
+477, 0x65f95194cd122715
+478, 0x4b48969d73125922
+479, 0x46f165052b8ff988
+480, 0x5c689f94b9275ff4
+481, 0x93b03823ff2d536b
+482, 0x871f3775aa4e3523
+483, 0x5af829f7cc0f66a5
+484, 0xa32e05739cbeac8c
+485, 0xacff1856ddace0fe
+486, 0x8eeb5e7f991a5322
+487, 0x6325c2720e0dbdea
+488, 0x9fb817bc4fdf5200
+489, 0x9786f0d850e43d78
+490, 0x571f76dd7f9fb77a
+491, 0x4d9e94e181cbc63f
+492, 0x8bb632d3376c547a
+493, 0x9cc26d9efd1c88b9
+494, 0x9c5d49579df52b0b
+495, 0x6201abf7e1cda07b
+496, 0x90d68f0c6c884963
+497, 0xfc5b66188ef7f561
+498, 0x6d9303cf2e0e0f95
+499, 0xd7cfcff535f5ed07
+500, 0x14d1a1228daa4ac6
+501, 0xe00ef5762f66ae50
+502, 0xf113a79471582978
+503, 0x430985281785dc7a
+504, 0x31914108c206ed5
+505, 0x7ba6707b6419971c
+506, 0x2ec63b033ce112e5
+507, 0xf8bcd36ced3b41e3
+508, 0xe5cf908c8010414b
+509, 0xf5ee224b7c703e30
+510, 0x9a9733af0b12338b
+511, 0x83e18cc00ace34f8
+512, 0xd52cff39e23008b8
+513, 0xa700578136b9c0c5
+514, 0x3fa179d32ac51f99
+515, 0xef2d5eab6d4ad380
+516, 0x709024a5abd032df
+517, 0xc607c7ee349ede87
+518, 0x803d784e9731eb5f
+519, 0x2ef06f4ba769282d
+520, 0x4bc1dca1e9f07eb9
+521, 0x930c958a7a72f94d
+522, 0x249bc8db2cc7a3bf
+523, 0x3845305798f9a5d
+524, 0x6f137eca9ab6f948
+525, 0xc31f5a963d31bd67
+526, 0x9d39693d5383626f
+527, 0x52fb41c335a8b98e
+528, 0xb79d1a29a06006ec
+529, 0x7c0926a7a3eda2cc
+530, 0xffdf5214406fd53e
+531, 0xc6aa02a7e94282b9
+532, 0xd4a4431b4aa301ee
+533, 0x4271cc0f9420d3ab
+534, 0x26fccd7cc7fc2485
+535, 0x330594bb945b8d5a
+536, 0x6ea8eaad12e5cb8c
+537, 0x831c3467726bede3
+538, 0x31d1eb10017eaa61
+539, 0xc7aa75e41508f5cb
+540, 0xde51810f0cadd0b5
+541, 0x50e5b3e73692f80b
+542, 0x82107ec55636e188
+543, 0x9828ef175d843ab4
+544, 0xb8edc6a860dd421e
+545, 0x25c0c138fd537ac3
+546, 0x47e72a771e8eb563
+547, 0xbb0f8c5333f4a2cc
+548, 0x91750d2fb9b2d479
+549, 0xe662d8f6fe38df36
+550, 0x72a6d879fb5619f0
+551, 0x6817c7878dcbf077
+552, 0x4e7741cb484661e8
+553, 0x3b3b3ba0be5711bf
+554, 0xa6989f5d25868765
+555, 0x43c276398997e4e0
+556, 0xdcbe16a94da28870
+557, 0x454936980a699c99
+558, 0xac614bfa8f0266c6
+559, 0x9174841392e213d5
+560, 0xa0e2acffc5fc9d1f
+561, 0xe53a08a7a0e6521a
+562, 0x2b845cf7c24172e0
+563, 0x265a4fc5f7adec0d
+564, 0x1f34fbe5f1e49420
+565, 0x139181f6fb647f20
+566, 0x88c35d46e2fcd05e
+567, 0x2a6d5b55903c0459
+568, 0xcea28eb621ad7bf1
+569, 0x5c9cdc13e7aaa30
+570, 0x5fe63e14746e7103
+571, 0x7923e53d73835db9
+572, 0x376e661210bf1b06
+573, 0x5b1cab85450efdd5
+574, 0x3908dc096c70b452
+575, 0x4825e303cd1f396f
+576, 0xed476bfd702957c3
+577, 0x6acc013aff5db743
+578, 0x62c80b776343d488
+579, 0x9c75edcd5b012697
+580, 0xaa053362a3b9770a
+581, 0xa907e236c7c07e94
+582, 0x15b2c380451692c0
+583, 0x94f79142697bd61f
+584, 0xbc657d31ea98d44f
+585, 0xcbaa5e52517a1f5e
+586, 0x96aa2e44a7c4a03f
+587, 0x216d3c66db2b515d
+588, 0x157001807e3ca88a
+589, 0x52b3a596bdd3859a
+590, 0xed747e7fc5e3adac
+591, 0x78fd765ddb2c448d
+592, 0xe53dc7299ed8614e
+593, 0x75ad41fb1d7a790a
+594, 0xc14f6b944b0e6cb1
+595, 0x7c314b69fce3df1c
+596, 0xb56d82eb740d7abc
+597, 0x5132a93c41251fdb
+598, 0xe3ce35bd2a82f958
+599, 0x440571a981c722f2
+600, 0x194cdfd9f186bc9
+601, 0xb89e522a5db00939
+602, 0xad35f339f68df3c8
+603, 0xa82ab18420322293
+604, 0xaffa6df9b72b27c4
+605, 0x9615694d23beaa2c
+606, 0x1d82ebe563abad91
+607, 0xab50ef65fbd94385
+608, 0x1b070dbd70a9a14
+609, 0x2ececa796abbadf0
+610, 0x6bbeafe9e81ab2a2
+611, 0x60dcd0d2a9b76914
+612, 0x1e748039ef05c33f
+613, 0x6d4d17f2213ccdff
+614, 0x9fa56132957bc987
+615, 0x60a17185de2428eb
+616, 0xb56038ddf306479c
+617, 0x3b1db5df92d06d8b
+618, 0x24d1bba8bdedf580
+619, 0xbfb7e6740ebaa4d9
+620, 0xab31c4473e46f61d
+621, 0x6deb3cdd8fd5869f
+622, 0x23032e47746d72d6
+623, 0xa9e72d734e10f2e8
+624, 0xbffd199b6157bc23
+625, 0x29f8254df273fb62
+626, 0xb076142130ee55ec
+627, 0x5b0b08374126c309
+628, 0xea4536aae979521f
+629, 0xc064e7abec91a174
+630, 0x46133ef80c59d935
+631, 0xf0227e2da1b14160
+632, 0x675a76641e1af5a
+633, 0x2f50a069b33d198c
+634, 0x3ded5a65e1d657eb
+635, 0xbb6999b020694f6b
+636, 0x86b2f2b33487aed7
+637, 0x76e14e85f8bfb4cf
+638, 0x38f7f1e44bd4e0db
+639, 0xc1a7d41b7e80d4ae
+640, 0x1dfaaf80bbceb42e
+641, 0x3f51c11497720c2b
+642, 0xce6da1415ddb8b80
+643, 0x7377d8bcd359b5f3
+644, 0xe077208f3f810aca
+645, 0x9a06a8a2dacbffce
+646, 0xca1f99156b09b735
+647, 0x2ff9a93064d91451
+648, 0x50f3ea93f351a7ef
+649, 0x606fceccb07054de
+650, 0x7e83d6d2f8f6685d
+651, 0x78f3995291c5d407
+652, 0xd28d2460e22d0228
+653, 0x2c5636f68a0054dd
+654, 0xd9fafb1c56c8f6cb
+655, 0xe39889b5f9d74464
+656, 0x1355372bf5db2cc1
+657, 0x26768426b9ac323
+658, 0x4af1dbdc1111fd89
+659, 0x66973587943b927f
+660, 0xf86f5f50684dfb1d
+661, 0x1247d574ff79b534
+662, 0xc8039f3259210fe2
+663, 0x79b573235c92a9f5
+664, 0x213f642d8450e2f0
+665, 0x5db7706973376566
+666, 0x6182c12e69b373d7
+667, 0x3e5ac47300aec07f
+668, 0x4b5b6c57b1574376
+669, 0x6b7fcceefd56b17c
+670, 0xf656c3455cb9d4b8
+671, 0x7577e2e13329721f
+672, 0xf33c0c53ce956e8d
+673, 0x7d0f328ee356174
+674, 0x10ec9a168088686e
+675, 0x71ef1776d062dfa
+676, 0xaa7b590a488a6bc4
+677, 0x38612b6dd8049a1c
+678, 0x939045e36874f731
+679, 0xcb9d1d74c56d5ac9
+680, 0x54f1c1c8fef1d8ff
+681, 0x3ee4b85c8c7e939e
+682, 0xb9b4608e019f352c
+683, 0x79d4701275d12e6a
+684, 0x2632a2d9835c7f19
+685, 0x1662cd9fba293692
+686, 0xbcb70265115ee944
+687, 0xdc43fb9761468604
+688, 0xe3eec4e7d3871352
+689, 0x829531753226989d
+690, 0x2748cc67f540e074
+691, 0x39c4af25d607837d
+692, 0x741a243f4cb5df99
+693, 0xda1353287e18b49a
+694, 0xa6735689d751ea74
+695, 0x46326d587340ce0b
+696, 0xc18531df4550012b
+697, 0x6f7901e05dd4b818
+698, 0xfb966afc4c001d63
+699, 0x6dc10fca67a9cfdb
+700, 0xd6527ffadf0feaae
+701, 0x3b900172045e25d
+702, 0xb7dd594cdded6a46
+703, 0x6602aee7ec1599fc
+704, 0x7fbf12f23747546a
+705, 0x32e63f662bd2de0d
+706, 0xedf47770b67ed641
+707, 0x331bef83481c5c2a
+708, 0x8fc4256fdf05158c
+709, 0x98eba48dabccf5e0
+710, 0xdbc2f2cdb7b1c154
+711, 0x7777755616517ad3
+712, 0xd473c147d2628ac1
+713, 0x861e15d1d760b5a7
+714, 0xf4d25926405ecb07
+715, 0xb7739c69effff86e
+716, 0xe97fbafa6f96830c
+717, 0xf13e8a334e8bede1
+718, 0xcd60010cba4ee4f9
+719, 0x1f537ac2b82e6008
+720, 0x1fda8d781a89140a
+721, 0x9dc204f3f4a463f0
+722, 0x456dcd18eb56a1ab
+723, 0x629957bc87bd16a1
+724, 0x2c8000ddb8c75253
+725, 0xc31dae9ec8449284
+726, 0xdac05c8baa2b691a
+727, 0x21ff7be9ffa3e7ac
+728, 0x844f4b5ed4ee08d0
+729, 0x651f913fd636c994
+730, 0xca3e71a2110b2d49
+731, 0x7709bc42253ed09d
+732, 0xbb164d45b6569d43
+733, 0x90ec2f040c20a112
+734, 0xfa6e77e9166f5be4
+735, 0x6b6d12c1842d587d
+736, 0xfcd7ff8466e25e2a
+737, 0x6a5a2ed8bd971297
+738, 0x2ec35f6bba5adcbc
+739, 0xc83676e16651249a
+740, 0x458f6064cefe10ba
+741, 0x90d54d527e6cd028
+742, 0xa5613e88db27c388
+743, 0x331e0c7d85aa1abc
+744, 0x8cee4977e210358
+745, 0xfcae379aa6cbff8e
+746, 0xd1407afc97a57e86
+747, 0x1fab25c864f094ae
+748, 0xd914864a63004552
+749, 0x4214d226a20f1384
+750, 0x3f4e0d80c488b715
+751, 0xc5ca2f654024b7c8
+752, 0xc1e27a124e7c821c
+753, 0xd890a915ffc7918c
+754, 0x22fba040ce51a9f8
+755, 0xbf61cebd8891617a
+756, 0x7846609ee228e319
+757, 0x536d1854375509b8
+758, 0xbbfb45fc6e666f50
+759, 0xd85b4c0527f9d7d6
+760, 0x528cc9c7fa2a84c8
+761, 0x27a1baece647f2cb
+762, 0xfddf0cb92fe09dc3
+763, 0xeb5008fe965d8d96
+764, 0x4a3307937eb2e5c8
+765, 0xd07d74c240c6c363
+766, 0x16f62290179d1bbf
+767, 0xe99c9bcc9cb1ece7
+768, 0xc64f9be03c8a93be
+769, 0x32659effaf666c1f
+770, 0x4bb228cfb30b6672
+771, 0x98764870842068a5
+772, 0x5b12ef2d2cd8bdcc
+773, 0xbc79d1c1b41f28b8
+774, 0x97a517cf3279fc9a
+775, 0x34ffd46c1d4d6025
+776, 0x9c302307ee25c8f0
+777, 0x399604eed1f18a8
+778, 0x1c9b813c2043142a
+779, 0x2944ea5e55267fe9
+780, 0x5a8a9f5e728ea667
+781, 0x30c8440adb804a0
+782, 0xee0e6b627099a937
+783, 0x3d50757ada3c52da
+784, 0x4548916b32c813ab
+785, 0x602a186fe5bf109b
+786, 0xf0d440a2227ba304
+787, 0x5a10d4e0ca9ea32b
+788, 0x6e5eb90da13ba64c
+789, 0x4c6af8fd04241ab2
+790, 0xf9eb31d26e093006
+791, 0x5d674878839fe3ea
+792, 0x1562b55b2484e47c
+793, 0xa87188c099c1cb61
+794, 0xb7736b8aa02a3392
+795, 0x5f4b301125abb20f
+796, 0x361d566984637f44
+797, 0x68c4b3feac8bd0c3
+798, 0x7066c634dd2503c1
+799, 0xfecbf7c9441eb6ea
+800, 0xdbc26ae0fc81436b
+801, 0x9ef3e2b48252e7a4
+802, 0x31a49b4c339b37c7
+803, 0xb01b2a83cf346cf4
+804, 0xc24dc2347f82fbe3
+805, 0x134cad272dcd410f
+806, 0x61260742823ba59c
+807, 0x53ac4c193a97c730
+808, 0x9207c9833af34b52
+809, 0xa72e7ee77078d1f5
+810, 0x2e6f6e1b05936885
+811, 0x783b99ce5dbf9464
+812, 0xfdfeb6f0d027bb44
+813, 0x40eeb27096f92b0
+814, 0x5ef96ff5d4a4521f
+815, 0x5595806ae873718a
+816, 0x67d449eecf4ca1c3
+817, 0xde837ab611364f3f
+818, 0x7034c24d2b139be9
+819, 0xe21166603e0a9c86
+820, 0x935694435c1f0d51
+821, 0x6cb3bec90c126088
+822, 0x4096ef662b7a9f89
+823, 0xd2d85b8d238d8c15
+824, 0xa4ea533ce3ec59b2
+825, 0x3654729d80a2db29
+826, 0x214c4cc3906d29d4
+827, 0x201c447e7588e373
+828, 0xe8b8f0ae25f683eb
+829, 0x6744aaf5754e38af
+830, 0xd1ffb10d6f27a061
+831, 0xe536733a7b3a6c30
+832, 0x39f0f66e47cbf2c9
+833, 0x856a9593526fde2
+834, 0x2e2a817a0098ea4b
+835, 0xc5e1eeb551a0e3d3
+836, 0x3f21e2f5e2d50b2
+837, 0x906af56c66dd9f8c
+838, 0x30f6dbd70329fac8
+839, 0xc443dfddf3c01a60
+840, 0x7ab85d9aa9675470
+841, 0x8c9080bd39717bfc
+842, 0x4b1ccdb3c3597f6f
+843, 0x74e2542d70ab5d67
+844, 0xbb3d236aad00f74
+845, 0xcf3cadf9a2804774
+846, 0xe851d9750e42bd07
+847, 0xc0ad82029b1c371f
+848, 0x7ee119eb552d6c07
+849, 0xd8024049bd1d784a
+850, 0xfa67a899760363
+851, 0xaa7c2f438b178197
+852, 0xc473674a47ffe064
+853, 0x539fbe3fc674c270
+854, 0xdb48484748a76f3b
+855, 0xc73b2b092060d
+856, 0xa1d2a15345016f5d
+857, 0x4d0fe8599f9bba47
+858, 0xa0edc275e6f8f1d1
+859, 0x40590a8655bc8d72
+860, 0x35b4223161f05f75
+861, 0xa04c0c0f616752dc
+862, 0x7f371ed2ca45432d
+863, 0x2ff1a08f75ac6438
+864, 0xe2dc5c3682282f48
+865, 0xe1e4179fa98d9013
+866, 0x8cb083d6843a73d5
+867, 0xb4c2b5921b706854
+868, 0x738e14c0e7352445
+869, 0xcd2b646f91afd8c7
+870, 0xd5779a5b57a264fd
+871, 0xc39ff855586c7d07
+872, 0x3e3f0098c631a859
+873, 0x644e02fae032110
+874, 0xa8834613c0a45278
+875, 0x69482f2c08e10657
+876, 0xe4ee475bdb87e69a
+877, 0xdc1ef7b25c0d0019
+878, 0x88a3fa2be18d8744
+879, 0x60a02e0b21c5bec7
+880, 0xb6867b88aa19bc1a
+881, 0xb599409affcf10eb
+882, 0xaeaa1778a5e59daa
+883, 0xd7a91a52c16663e3
+884, 0x93cb269affe07b1c
+885, 0x841b6ced3a4ba815
+886, 0x84541768e1540a5c
+887, 0xe3943c84f83b3020
+888, 0x5de366fbd7b45258
+889, 0xd787cc3bde91a661
+890, 0x814071446edecb57
+891, 0x15d8c602a1141514
+892, 0x72f07bc8002d1d0d
+893, 0x4a8bd8dc9a1f0f3e
+894, 0x8723796ae0f20d35
+895, 0xda7283c2051f73b2
+896, 0x2df0cc247f90bd3b
+897, 0x79a8522b968f990a
+898, 0x951ede190c8b9d02
+899, 0xc512f1a5b14b018a
+900, 0xf0e3ddc03b9a4259
+901, 0x8cf4a35ad312e15f
+902, 0xebef28926b11094b
+903, 0x5628ba687325921c
+904, 0xc3aa75e57edc49c3
+905, 0xc38382fa98e762ba
+906, 0x8d209e896285848e
+907, 0x2c7d6adf592b4a3e
+908, 0x62de48e36f8338f3
+909, 0x4a752741e00de30e
+910, 0xf7855b70f1f6ec2b
+911, 0xa505fa4428199e43
+912, 0xe8b6b423b826bbac
+913, 0x4bd1206cf8786d05
+914, 0x6dcf040391fe3bf4
+915, 0x913f500f87e1bba3
+916, 0x5acf775aa180a5d5
+917, 0x74dd28d9432ce739
+918, 0x996c2ff2f0dc2495
+919, 0x73dbfe6c56effe4
+920, 0x56fddd25196f5e40
+921, 0xe87810158f5b7
+922, 0x7b8795e996383f1f
+923, 0x9ba5ee7c777c4c82
+924, 0x17ce3908d270fe1c
+925, 0x3df9e613c1aedfae
+926, 0xcdd26871b32fc8e1
+927, 0xd71cb13afc633979
+928, 0x63427c8ea9b1c79e
+929, 0xd070f7664d3b405d
+930, 0x46f2a9e32d9fb769
+931, 0xb4c3822a45e9fe9b
+932, 0x8ba30b97fe6f5ec7
+933, 0x70aa554ee2fc11f9
+934, 0xa80c99dbe0cfcfaf
+935, 0x36d9250cb2d68ed
+936, 0x2995e4b9e1cd1db4
+937, 0x4b3803ba57fc570f
+938, 0xae3959e7d740eaa5
+939, 0xb4cbd6662adbae08
+940, 0xae46576446e8dbc4
+941, 0xc4828e008a9a8a54
+942, 0x145d7db8e6554b2f
+943, 0x1b1b8916a730c371
+944, 0xdaf84b2bebe31963
+945, 0x5b59b80ef23a2403
+946, 0x9180c7e89cab6fd3
+947, 0x80e58f5411babf34
+948, 0xa06cf55185b9b005
+949, 0x13b2c798424173ad
+950, 0xc510f8e706311d49
+951, 0x1f974b83b6046d3a
+952, 0xae6e8e85e822d1c3
+953, 0x66f2c8dc3274a31a
+954, 0x7e04dbcbf65bd377
+955, 0xabf41ede01ec20a4
+956, 0x5efa0948f6bbb2ea
+957, 0xbc91c99d8592255
+958, 0xf6d6917911d86d75
+959, 0x85ce273d54e9097a
+960, 0xbdfd30f2420fff92
+961, 0x8802f02f610b537c
+962, 0xd1d70037ed543229
+963, 0x908aaf97f9693a46
+964, 0x1f6cfeaa0834d53a
+965, 0xa453fd1648ce04d2
+966, 0x2c38bb85ebc64af9
+967, 0xd2daff551c90c4f8
+968, 0xae5a0d949797d784
+969, 0xf0974c8552ac9593
+970, 0xa10b70499f65c693
+971, 0x39a449ebd594ddff
+972, 0x8ea090f2b17b9b49
+973, 0xc592de318090fd83
+974, 0xb63e4fbc467b6912
+975, 0x57a0c1c5ce0e4dcc
+976, 0xa7c517cf3d436b35
+977, 0xef6dcb0f3fad038b
+978, 0xaf4fb60315b91287
+979, 0x5e0776f67304f331
+980, 0xe927753b8e6f7932
+981, 0xd3df2dd92559e304
+982, 0xdaed52aa6af44413
+983, 0x1b59f4dac1e181f8
+984, 0x4a73c2293877ef39
+985, 0xca45d0d015fe44de
+986, 0x4659c8b7853735a8
+987, 0x12de6466bdf8adeb
+988, 0xaeea857a09bfec15
+989, 0xcc9cf4b3c0b88a23
+990, 0xa44ae52396a5e1bf
+991, 0x5847a724305d137f
+992, 0x8f4d4de223956182
+993, 0x58254dfada867a8
+994, 0x900a98222c2f339e
+995, 0xdb575260935d51d5
+996, 0x13fb4bfbbc0d7b53
+997, 0x62213850186bb92b
+998, 0x2a34823312c00388
+999, 0x6148329042f743b0
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv b/venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv
new file mode 100644
index 00000000..39cef057
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64dxsm-testset-1.csv
@@ -0,0 +1,1001 @@
+seed, 0xdeadbeaf
+0, 0xdf1ddcf1e22521fe
+1, 0xc71b2f9c706cf151
+2, 0x6922a8cc24ad96b2
+3, 0x82738c549beccc30
+4, 0x5e8415cdb1f17580
+5, 0x64c54ad0c09cb43
+6, 0x361a17a607dce278
+7, 0x4346f6afb7acad68
+8, 0x6e9f14d4f6398d6b
+9, 0xf818d4343f8ed822
+10, 0x6327647daf508ed6
+11, 0xe1d1dbe5496a262a
+12, 0xfc081e619076b2e0
+13, 0x37126563a956ab1
+14, 0x8bb46e155db16b9
+15, 0x56449f006c9f3fb4
+16, 0x34a9273550941803
+17, 0x5b4df62660f99462
+18, 0xb8665cad532e3018
+19, 0x72fc3e5f7f84216a
+20, 0x71d3c47f6fd59939
+21, 0xfd4218afa1de463b
+22, 0xc84054c78e0a9a71
+23, 0xae59034726be61a8
+24, 0xa6a5f21de983654d
+25, 0x3b633acf572009da
+26, 0x6a0884f347ab54c8
+27, 0x7a907ebe9adcab50
+28, 0xbe779be53d7b8d4a
+29, 0xf5976e8c69b9dcd1
+30, 0x1d8302f114699e11
+31, 0x7d37e43042c038a0
+32, 0x2cc1d4edc2a40f35
+33, 0x83e3347bb2d581f1
+34, 0x253f8698651a844d
+35, 0x4312dea0dd4e32f6
+36, 0x10f106439964ea3a
+37, 0x810eb374844868cc
+38, 0x366342a54b1978cc
+39, 0x9fb39b13aaddfb5e
+40, 0xdb91fd0d9482bed7
+41, 0x89f6ea4ca9c68204
+42, 0x146b31ccca461792
+43, 0x203fd9724deb2486
+44, 0x58a84f23748e25cb
+45, 0x2f20eb6aeb94e88
+46, 0x14d3581460e473c
+47, 0xad5bd0d25f37d047
+48, 0x1cf88fa16de258b2
+49, 0x3bcab6485b7a341
+50, 0xb2433b37f227d90c
+51, 0x2cffd7e0a8360cc8
+52, 0x5d2eeff7c9ebc847
+53, 0x6fd7c7ae23f9f64b
+54, 0x381650b2d00f175d
+55, 0x9d93edcedc873cae
+56, 0x56e369a033d4cb49
+57, 0x7547997116a3bac
+58, 0x11debaa897fd4665
+59, 0xdf799d2b73bd6fb8
+60, 0x3747d299c66624d
+61, 0xac9346701afd0cfa
+62, 0xac90e150fa13c7bf
+63, 0x85c56ad2248c2871
+64, 0xdea66bf35c45f195
+65, 0x59cf910ea079fb74
+66, 0x2f841bb782274586
+67, 0x9814df4384d92bd9
+68, 0x15bc70824be09925
+69, 0x16d4d0524c0503a3
+70, 0xf04ea249135c0cc7
+71, 0xa707ab509b7e3032
+72, 0x465459efa869e372
+73, 0x64cbf70a783fab67
+74, 0x36b3541a14ca8ed7
+75, 0x9a4dfae8f4c596bf
+76, 0x11d9a04224281be3
+77, 0xe09bbe6d5e98ec32
+78, 0xa6c60d908973aa0d
+79, 0x7c524c57dd5915c8
+80, 0xa810c170b27f1fdc
+81, 0xce5d409819621583
+82, 0xfe2ee3d5332a3525
+83, 0x162fb7c8b32045eb
+84, 0x4a3327156b0b2d83
+85, 0x808d0282f971064
+86, 0x2e6f04cf5ed27e60
+87, 0xaf6800699cca67a9
+88, 0xc7590aae7244c3bf
+89, 0x7824345f4713f5f9
+90, 0x8f713505f8fd059b
+91, 0x3d5b5b9bb6b1e80e
+92, 0x8674f45e5dc40d79
+93, 0xcb1e36846aa14773
+94, 0xe0ae45b2b9b778c1
+95, 0xd7254ce931eefcfb
+96, 0xef34e15e4f55ac0a
+97, 0xf17cc0ba15a99bc4
+98, 0x77bb0f7ffe7b31f1
+99, 0x6ee86438d2e71d38
+100, 0x584890f86829a455
+101, 0x7baf0d8d30ba70fe
+102, 0xb1ac8f326b8403ae
+103, 0xcc1963435c874ba7
+104, 0x9c483b953d1334ce
+105, 0xc0924bcbf3e10941
+106, 0x21bcc581558717b1
+107, 0x2c5ad1623f8d292b
+108, 0xa8ea110f6124557e
+109, 0x15f24a6c5c4c591
+110, 0x40fe0d9cd7629126
+111, 0xcfe8f2b3b081484d
+112, 0x891383f4b4cac284
+113, 0x76f2fcdef7fa845
+114, 0x4edd12133aed0584
+115, 0xd53c06d12308873d
+116, 0xf7f22882c17f86bf
+117, 0xfbaa4aad72f35e10
+118, 0x627610da2e3c0cc3
+119, 0x582b16a143634d9a
+120, 0x9b4a7f69ed38f4a0
+121, 0x2df694974d1e1cbe
+122, 0xe5be6eaafed5d4b
+123, 0xc48e2a288ad6605e
+124, 0xbcb088149ce27c2b
+125, 0x3cb6a7fb06ceecbe
+126, 0x516735fff3b9e3ac
+127, 0x5cbafc551ee5008d
+128, 0xee27d1ab855c5fd5
+129, 0xc99fb341f6baf846
+130, 0x7ad8891b92058e6d
+131, 0xf50310d03c1ac6c7
+132, 0x947e281d998cbd3e
+133, 0x1d4d94a93824fe80
+134, 0x5568b77289e7ee73
+135, 0x7d82d1b2b41e3c8b
+136, 0x1af462c7abc787b
+137, 0xcfd8dfe80bfae1ef
+138, 0xd314caeb723a63ea
+139, 0x1c63ddcfc1145429
+140, 0x3801b7cc6cbf2437
+141, 0xc327d5b9fdafddd3
+142, 0xe140278430ca3c78
+143, 0x4d0345a685cb6ef8
+144, 0x47640dc86e261ff9
+145, 0xab817f158523ebf4
+146, 0x37c51e35fbe65a6b
+147, 0xab090f475d30a178
+148, 0x4d3ec225bf599fc1
+149, 0xefd517b0041679b1
+150, 0x20ad50bca4da32c5
+151, 0x75e1f7cd07fad86d
+152, 0x348cf781ee655f4b
+153, 0x9375f0e5ffc2d2ec
+154, 0x7689082fd5f7279c
+155, 0x633e56f763561e77
+156, 0x9d1752d70861f9fd
+157, 0xa3c994b4e70b0b0f
+158, 0xabf7276a58701b88
+159, 0xbfa18d1a0540d000
+160, 0xc6a28a2475646d26
+161, 0x7cdf108583f65085
+162, 0x82dcefb9f32104be
+163, 0xc6baadd0adc6b446
+164, 0x7a63cff01075b1b4
+165, 0x67ac62e575c89919
+166, 0x96fa4320a0942035
+167, 0xc4658859385b325f
+168, 0xde22c17ff47808f6
+169, 0xbb952c4d89e2f2ec
+170, 0x638251fbc55bdc37
+171, 0x38918b307a03b3ea
+172, 0xccb60f2cedbb570b
+173, 0x3c06f4086a28f012
+174, 0x4e8d238388986e33
+175, 0x1760b7793514a143
+176, 0xa3f924efe49ee7d6
+177, 0xaf6be2dbaebc0bdf
+178, 0x6782682090dffe09
+179, 0xb63a4d90d848e8ef
+180, 0x5f649c7eaf4c54c5
+181, 0xbe57582426a085ba
+182, 0xb5dd825aa52fb76d
+183, 0x74cb4e6ca4039617
+184, 0x382e578bf0a49588
+185, 0xc043e8ea6e1dcdae
+186, 0xf902addd5c04fa7c
+187, 0xf3337994612528db
+188, 0x4e8fd48d6d15b4e6
+189, 0x7190a509927c07ab
+190, 0x864c2dee5b7108ae
+191, 0xbb9972ddc196f467
+192, 0x1ea02ab3ca10a448
+193, 0xe50a8ffde35ddef9
+194, 0x7bd2f59a67183541
+195, 0x5a940b30d8fcd27a
+196, 0x82b4cea62623d4d3
+197, 0x6fbda76d4afef445
+198, 0x8b1f6880f418328e
+199, 0x8b69a025c72c54b7
+200, 0xb71e0f3986a3835f
+201, 0xa4a7ddb8b9816825
+202, 0x945dcda28228b1d8
+203, 0xb471abf2f8044d72
+204, 0xf07d4af64742b1ba
+205, 0xfca5190bc4dd6a2a
+206, 0xd681497262e11bc5
+207, 0xbe95d5f00c577028
+208, 0x56313439fd8bde19
+209, 0x3f3d9ac9b5ee6522
+210, 0x7b8d457dd2b49bbe
+211, 0xe76b5747885d214b
+212, 0xa8a695b3deb493ea
+213, 0x5292446548c95d71
+214, 0xbf5cdf0d436412df
+215, 0x7936abaed779d28d
+216, 0x659c6e8073b3a06d
+217, 0x86c9ff28f5543b71
+218, 0x6faa748445a99146
+219, 0xdcc1e6ab57904fd7
+220, 0x770bd61233addc5f
+221, 0x16963e041e46d94f
+222, 0x158e6cb2934157ac
+223, 0xb65088a8fd246441
+224, 0x2b12ced6ce8a68c3
+225, 0x59a18d02cd6082b3
+226, 0x4ddbc318cb5488ee
+227, 0x3d4cf520b3ed20a1
+228, 0x7028b3a92e2b292d
+229, 0xf141da264a250e4d
+230, 0x9788d53e86041c37
+231, 0x1bb91238a7c97dbf
+232, 0x81953d0ddb634309
+233, 0xfa39ccfe14d2d46
+234, 0xf7c7861c9b7e8399
+235, 0x18d27ca50d9dc249
+236, 0x258dfdf38510d0d9
+237, 0x9e72d8af910ea76f
+238, 0x4f8ef24b96de50ad
+239, 0xb9d9c12297e03dc9
+240, 0x91994e41b4a1929c
+241, 0x8defa79b2ccc83b9
+242, 0x948566748706dac5
+243, 0x7b0454946e70e4cf
+244, 0x340b7cb298c70ed7
+245, 0x6602005330cebd95
+246, 0xf71cb803aa61f722
+247, 0x4683fb07fc70ae8a
+248, 0xc6db9f0c4de3ed88
+249, 0x3e8dfae2a593cef9
+250, 0x615f7c38e3862b33
+251, 0x676c7996550d857
+252, 0xc6d520d54a5c266a
+253, 0x202b1e8eef14aa2e
+254, 0xa3a84891a27a582
+255, 0x84dbee451658d47f
+256, 0x254c7cd97e777e3a
+257, 0xf50b6e977f0eba50
+258, 0x2898b1d3062a4798
+259, 0x4096f7cbbb019773
+260, 0x9fb8e75548062c50
+261, 0x4647071e5ca318ec
+262, 0x2b4750bdb3b3b01
+263, 0x88ac41cc69a39786
+264, 0x705e25476ef46fa3
+265, 0xc0c1db19884a48a6
+266, 0x1364c0afdbb465e5
+267, 0x58e98534701272a6
+268, 0x746a5ea9701517c0
+269, 0x523a70bc6b300b67
+270, 0x9b1c098eda8564ad
+271, 0xfbaeb28d3637067f
+272, 0xddd9a13551fdba65
+273, 0x56461a670559e832
+274, 0xab4fd79be85570ad
+275, 0xd4b691ecaff8ca55
+276, 0x11a4495939e7f004
+277, 0x40d069d19477eb47
+278, 0xe790783d285cd81e
+279, 0xde8218b16d935bc7
+280, 0x2635e8c65cd4182d
+281, 0xeae402623e3454
+282, 0x9f99c833184e0279
+283, 0x3d0f79a0d52d84e7
+284, 0xc1f8edb10c625b90
+285, 0x9b4546363d1f0489
+286, 0x98d86d0b1212a282
+287, 0x386b53863161200d
+288, 0xbe1165c7fe48a135
+289, 0xb9658b04dbbfdc8c
+290, 0xcea14eddfe84d71a
+291, 0x55d03298be74abe7
+292, 0x5be3b50d961ffd7e
+293, 0xc76b1045dc4b78e1
+294, 0x7830e3ff3f6c3d4c
+295, 0xb617adb36ca3729
+296, 0x4a51bdb194f14aa9
+297, 0x246024e54e6b682a
+298, 0x33d42fc9c6d33083
+299, 0xadccba149f31e1d
+300, 0x5183e66b9002f8b
+301, 0x70eb2416404d51b7
+302, 0x26c25eb225535351
+303, 0xbc2d5b0d23076561
+304, 0x5823019ddead1da
+305, 0x85cfa109fca69f62
+306, 0x26017933e7e1efd9
+307, 0x3ec7be9a32212753
+308, 0x697e8a0697cd6f60
+309, 0x44735f6cca03920f
+310, 0x8cc655eb94ee212e
+311, 0x8b8b74eba84929a0
+312, 0x7708ccedd0c98c80
+313, 0x1b6f21f19777cbe1
+314, 0x363e564bd5fadedb
+315, 0x5921543a641591fe
+316, 0xc390786d68ea8a1b
+317, 0x9b293138dc033fca
+318, 0x45447ca8dc843345
+319, 0xee6ef6755bc49c5e
+320, 0x70a3a1f5163c3be5
+321, 0xf05e25448b6343b0
+322, 0x4739f4f8717b7e69
+323, 0xb006141975bf957
+324, 0x31874a91b707f452
+325, 0x3a07f2c90bae2869
+326, 0xb73dae5499a55c5e
+327, 0x489070893bb51575
+328, 0x7129acf423940575
+329, 0x38c41f4b90130972
+330, 0xc5260ca65f5a84a1
+331, 0x6e76194f39563932
+332, 0x62ca1f9ca3de3ca6
+333, 0xb4a97874e640853f
+334, 0x38ed0f71e311cc02
+335, 0xde183b81099e8f47
+336, 0x9bb8bf8e6694346
+337, 0xd15497b6bf81e0f2
+338, 0xaaae52536c00111
+339, 0x4e4e60d1435aaafd
+340, 0x5a15512e5d6ea721
+341, 0xff0f1ffabfc6664f
+342, 0xba3ffcedc5f97fec
+343, 0xef87f391c0c6bfb6
+344, 0x4a888c5d31eb0f98
+345, 0x559a3fbfd7946e95
+346, 0xe45b44a0db5a9bad
+347, 0x9457898964190af1
+348, 0xd9357dfaab76cd9e
+349, 0xa60e907178d965a1
+350, 0x76b2dc3032dc2f4a
+351, 0x13549b9c2802120
+352, 0x8656b965a66a1800
+353, 0x16802e6e22456a23
+354, 0x23b62edc60efaa9
+355, 0x6832a366e1e4ea3b
+356, 0x46b1b41093ff2b1e
+357, 0x55c857128143f219
+358, 0x7fc35ddf5e138200
+359, 0x790abe78be67467e
+360, 0xa4446fc08babd466
+361, 0xc23d70327999b855
+362, 0x2e019d1597148196
+363, 0xfefd98e560403ab8
+364, 0xbe5f0a33da330d58
+365, 0x3078a4e9d43ca395
+366, 0x511bfedd6f12f2b3
+367, 0x8bc138e335be987c
+368, 0x24640f803465716d
+369, 0xf6530b04d0bd618f
+370, 0x9b7833e5aa782716
+371, 0x778cd35aea5841b1
+372, 0xecea3c458cefbc60
+373, 0x5107ae83fc527f46
+374, 0x278ad83d44bd2d1a
+375, 0x7014a382295aeb16
+376, 0xf326dd762048743f
+377, 0x858633d56279e553
+378, 0x76408154085f01bc
+379, 0x3e77d3364d02e746
+380, 0x2f26cea26cadd50b
+381, 0x6d6846a4ecb84273
+382, 0x4847e96f2df5f76
+383, 0x5a8610f46e13ff61
+384, 0x4e7a7cac403e10dd
+385, 0x754bdf2e20c7bc90
+386, 0x8bdd80e6c51bd0be
+387, 0x61c655fae2b4bc52
+388, 0x60873ef48e3d2f03
+389, 0x9d7d8d3698a0b4a4
+390, 0xdf48e9c355cd5d4b
+391, 0x69ecf03e20be99ac
+392, 0xc1a0c5a339bd1815
+393, 0x2e3263a6a3adccb
+394, 0x23557459719adbdc
+395, 0xd1b709a3b330e5a
+396, 0xade5ab00a5d88b9d
+397, 0x69a6bd644120cfad
+398, 0x40187ecceee92342
+399, 0x1c41964ba1ac78da
+400, 0x9ac5c51cbecabe67
+401, 0xbdc075781cf36d55
+402, 0xeaf5a32246ded56
+403, 0xcda0b67e39c0fb71
+404, 0x4839ee456ef7cc95
+405, 0xf17092fdd41d5658
+406, 0x2b5d422e60ae3253
+407, 0x3effe71102008551
+408, 0x20a47108e83934b7
+409, 0xd02da65fe768a88f
+410, 0xeb046bd56afa4026
+411, 0x70c0509c08e0fbe0
+412, 0x1d35c38d4f8bac6c
+413, 0x9aa8eb6466f392e0
+414, 0x587bd4a430740f30
+415, 0x82978fe4bad4195
+416, 0xdc4ebc4c0feb50ab
+417, 0xd3b7164d0240c06f
+418, 0x6e2ad6e5a5003a63
+419, 0xa24b430e2ee6b59c
+420, 0x2905f49fd5073094
+421, 0x5f209e4de03aa941
+422, 0x57b7da3e0bedb1dc
+423, 0x5e054018875b01f5
+424, 0xb2f2da6145658db3
+425, 0xbd9c94a69a8eb651
+426, 0x9c5f9a07cd6ac749
+427, 0x2296c4af4d529c38
+428, 0x522ed800fafdefab
+429, 0xe2a447ced0c66791
+430, 0x937f10d45e455fef
+431, 0xc882987d9e29a24
+432, 0x4610bfd6a247ee1a
+433, 0x562ba3e50870059
+434, 0x59d8d58793602189
+435, 0xfe9a606e3e34abe
+436, 0x6825f7932a5e9282
+437, 0xe77f7061bab476ad
+438, 0xbf42001da340ace3
+439, 0x9c3e9230f5e47960
+440, 0x2c0f700d96d5ad58
+441, 0x330048b7cd18f1f9
+442, 0xffc08785eca5cca9
+443, 0xb5879046915f07a5
+444, 0xef51fe26f83c988e
+445, 0xfa4c2968e7881a9a
+446, 0xc0a9744455a4aad
+447, 0xbd2ad686d6313928
+448, 0x6b9f0984c127682a
+449, 0xc9aaa00a5da59ed8
+450, 0x762a0c4b98980dbf
+451, 0x52d1a2393d3ca2d1
+452, 0x1e9308f2861db15c
+453, 0xe7b3c74fe4b4a844
+454, 0x485e15704a7fc594
+455, 0x9e7f67ea44c221f6
+456, 0xbab9ad47fde916e0
+457, 0x50e383912b7fc1f4
+458, 0xaad63db8abcef62d
+459, 0xc2f0c5699f47f013
+460, 0xee15b36ada826812
+461, 0x2a1b1cf1e1777142
+462, 0x8adb03ede79e937d
+463, 0xf14105ef65643bf3
+464, 0x752bbaefc374a3c7
+465, 0xa4980a08a5a21d23
+466, 0x418a1c05194b2db7
+467, 0xdd6ff32efe1c3cd6
+468, 0x272473ed1f0d3aa2
+469, 0x1e7fdebadabe6c06
+470, 0xd1baa90c17b3842f
+471, 0xd3d3a778e9c8404a
+472, 0x781ae7fda49fa1a0
+473, 0x61c44fdbdacc672d
+474, 0x6d447d0a1404f257
+475, 0x9303e8bdfbfb894d
+476, 0x3b3482cdec016244
+477, 0xb149bf245d062e7b
+478, 0x96f8d54b14cf992d
+479, 0x4741549a01f8c3d0
+480, 0x48270811b2992af
+481, 0x7b58f175cd25d147
+482, 0x8f19a840b56f4be9
+483, 0x84a77f43c0951a93
+484, 0x34e1a69381f0c374
+485, 0xb158383c9b4040f
+486, 0x372f1abc7cf3a9fa
+487, 0x5439819a84571763
+488, 0xabf8515e9084e2fa
+489, 0xb02312b9387ff99
+490, 0x238a85bb47a68b12
+491, 0x2068cb83857c49bb
+492, 0xc6170e743083664c
+493, 0x745cf8470bcb8467
+494, 0xe3a759a301670300
+495, 0x292c7686ad3e67da
+496, 0x359efedaff192a45
+497, 0x511f2c31a2d8c475
+498, 0x97fd041bf21c20b3
+499, 0x25ef1fe841b7b3f6
+500, 0xbb71739e656f262d
+501, 0x2729b0e989b6b7b8
+502, 0xd2142702ec7dbabf
+503, 0x7008decd2488ee3f
+504, 0x69daa95e303298d7
+505, 0xc35eca4efb8baa5a
+506, 0xf3f16d261cec3b6c
+507, 0x22371c1d75396bd3
+508, 0x7aefa08eccae857e
+509, 0x255b493c5e3c2a2f
+510, 0x779474a077d34241
+511, 0x5199c42686bea241
+512, 0x16c83931e293b8d3
+513, 0xa57fe8db8c0302c7
+514, 0xd7ace619e5312eb1
+515, 0x8740f013306d217c
+516, 0xb6a1ad5e29f4d453
+517, 0x31abf7c964688597
+518, 0xbc3d791daed71e7
+519, 0x31ee4ca67b7056ed
+520, 0x1ab5416bfe290ea3
+521, 0x93db416f6d3b843a
+522, 0xed83bbe5b1dd2fed
+523, 0xece38271470d9b6d
+524, 0x3a620f42663cd8ae
+525, 0x50c87e02acafee5d
+526, 0xcabeb8bedbc6dab5
+527, 0x2880a6d09970c729
+528, 0x4aba5dd3bfc81bc
+529, 0xaba54edf41080cec
+530, 0xb86bb916fc85a169
+531, 0x4c41de87bc79d8ca
+532, 0xcce2a202622945fe
+533, 0x513f086fad94c107
+534, 0x18b3960c11f8cc96
+535, 0x2f0d1cfd1896e236
+536, 0x1702ae3880d79b15
+537, 0x88923749029ae81
+538, 0x84810d4bdec668eb
+539, 0xf85b0a123f4fc68d
+540, 0x93efd68974b6e4d1
+541, 0x5d16d6d993a071c9
+542, 0x94436858f94ca43b
+543, 0xb3dbb9ed0cb180b6
+544, 0x6447030a010b8c99
+545, 0xd7224897c62925d8
+546, 0xb0c13c1d50605d3a
+547, 0xdff02c7cb9d45f30
+548, 0xe8103179f983570d
+549, 0xbc552037d6d0a24e
+550, 0x775e500b01486b0d
+551, 0x2050ac632c694dd6
+552, 0x218910387c4d7ae7
+553, 0xf83e8b68ff885d5d
+554, 0xe3374ec25fca51a3
+555, 0xfa750ffa3a60f3af
+556, 0x29ee40ba6df5592e
+557, 0x70e21a68f48260d2
+558, 0x3805ca72cd40886e
+559, 0x2f23e73f8eabf062
+560, 0x2296f80cdf6531ae
+561, 0x903099ed968db43a
+562, 0xf044445cf9f2929f
+563, 0xcd47fdc2de1b7a1
+564, 0xaab1cbd4f849da99
+565, 0x5fc990688da01acb
+566, 0xa9cee52ea7dab392
+567, 0xecefc3a4349283a8
+568, 0xdd6b572972e3fafc
+569, 0xc1f0b1a2ffb155da
+570, 0xc30d53fc17bd25c8
+571, 0x8afa89c77834db28
+572, 0x5569a596fb32896c
+573, 0x36f207fc8df3e3d4
+574, 0x57c2bd58517d81db
+575, 0xb524693e73d0061c
+576, 0xb69f6eb233f5c48b
+577, 0x4f0fb23cab8dc695
+578, 0x492c1ad0a48df8df
+579, 0xf6dcc348ec8dec1f
+580, 0xa4d8708d6eb2e262
+581, 0x4c2072c2c9766ff1
+582, 0xa9bf27c4304875f0
+583, 0xfc8fb8066d4f9ae2
+584, 0x188095f6235fec3c
+585, 0x1d8227a2938c2864
+586, 0x89ea50c599010378
+587, 0xcac86df0a7c6d56d
+588, 0x47a8c5df84c7d78
+589, 0xe607ae24ea228bfa
+590, 0x36624a7996efe104
+591, 0x5d72881c1227d810
+592, 0x78694a6750374c8
+593, 0x7b9a217d4ab5ff45
+594, 0xd53e5d6f7504becc
+595, 0x197a72d3f4889a0e
+596, 0xfdc70c4755a8df36
+597, 0xd0fda83748c77f74
+598, 0x7ddc919ac9d6dcc9
+599, 0x785c810a6a2dc08b
+600, 0xba4be83e7e36896c
+601, 0x379d6fe80cf2bffe
+602, 0x74cae2dabc429206
+603, 0x1efac32d5d34c917
+604, 0x3cb64e2f98d36e70
+605, 0xc0a7c3cdc3c60aa7
+606, 0x699dfadd38790ebe
+607, 0x4861e61b3ecfbeac
+608, 0x531744826c345baa
+609, 0x5ec26427ad450cba
+610, 0xf2c1741479abdcae
+611, 0xe9328a78b2595458
+612, 0x30cd1bdf087acd7f
+613, 0x7491ced4e009adbe
+614, 0xdcd942df1e2e7023
+615, 0xfe63f01689fee35
+616, 0x80282dfe5eaedc42
+617, 0x6ecdea86495f8427
+618, 0xe0adfdd5e9ed31c3
+619, 0xf32bd2a7418127e
+620, 0x8aabba078db6ee2
+621, 0xa8a8e60499145aca
+622, 0xf76b086ac4e8a0f2
+623, 0x6e55b3c452ff27f8
+624, 0xe18fa7cd025a71bf
+625, 0xeed7b685fde0fa25
+626, 0xba9b6c95867fa721
+627, 0x4c2603bc69de2df2
+628, 0xaac87eee1b58cd66
+629, 0x3c9af6656e01282c
+630, 0x2dfa05ce8ff476b6
+631, 0xeae9143fcf92f23d
+632, 0x3f0699f631be3bc8
+633, 0xa0f5f79f2492bd67
+634, 0x59c47722388131ed
+635, 0x5f6e9d2941cef1de
+636, 0xe9ad915c09788b7b
+637, 0x92c6d37e4f9482f5
+638, 0x57d301b7fdadd911
+639, 0x7e952d23d2a8443
+640, 0xbb2fa5e0704b3871
+641, 0xe5642199be36e2d5
+642, 0x5020b60d54358291
+643, 0xa0b6317ec3f60343
+644, 0xb57b08b99540bc5c
+645, 0x21f1890adc997a88
+646, 0xfcf824200dd9da2d
+647, 0x8146293d83d425d1
+648, 0xdadfbf5fbb99d420
+649, 0x1eb9bbc5e6482b7d
+650, 0xd40ff44f1bbd0f1c
+651, 0xa9f948ba2d08afa5
+652, 0x638cc07c5301e601
+653, 0x1f984baa606e14e8
+654, 0x44e153671081f398
+655, 0xb17882eeb1d77a5d
+656, 0x5fd8dbee995f14c
+657, 0xff3533e87f81b7fe
+658, 0x2f44124293c49795
+659, 0x3bf6b51e9360248
+660, 0x72d615edf1436371
+661, 0x8fc5cf4a38adab9d
+662, 0xfa517e9022078374
+663, 0xf356733f3e26f4d8
+664, 0x20ea099cdc6aad40
+665, 0xe15b977deb37637d
+666, 0xcc85601b89dae88d
+667, 0x5768c62f8dd4905c
+668, 0xa43cc632b4e56ea
+669, 0xc4240cf980e82458
+670, 0xb194e8ffb4b3eeb6
+671, 0xee753cf2219c5fa1
+672, 0xfe2500192181d44d
+673, 0x2d03d7d6493dd821
+674, 0xff0e787bb98e7f9b
+675, 0xa05cf8d3bd810ce7
+676, 0x718d5d6dcbbdcd65
+677, 0x8d0b5343a06931c
+678, 0xae3a00a932e7eaf9
+679, 0x7ed3d8f18f983e18
+680, 0x3bb778ee466dc143
+681, 0x711c685c4e9062c0
+682, 0x104c3af5d7ac9834
+683, 0x17bdbb671fb5d5cf
+684, 0xabf26caead4d2292
+685, 0xa45f02866467c005
+686, 0xf3769a32dc945d2d
+687, 0xe78d0007f6aabb66
+688, 0x34b60be4acbd8d4b
+689, 0x58c0b04b69359084
+690, 0x3a8bb354c212b1
+691, 0x6b82a8f3d70058d5
+692, 0x405bdef80a276a4a
+693, 0xe20ca40ee9195cad
+694, 0xf5dd96ba2446fefd
+695, 0xc1e180c55fe55e3c
+696, 0xa329caf6daa952b3
+697, 0xb4809dd0c84a6b0a
+698, 0xd27f82661070cee7
+699, 0xa7121f15ee2b0d8a
+700, 0x4bdaea70d6b34583
+701, 0xe821dc2f310f7a49
+702, 0x4c00a5a68e76f647
+703, 0x331065b064a2d5ea
+704, 0xac0c2ce3dc04fa37
+705, 0x56b32b37b8229008
+706, 0xe757cdb51534fcfa
+707, 0xd3ff183576b2fad7
+708, 0x179e1f4190f197a7
+709, 0xf874c626a7c9aae5
+710, 0xd58514ffc37c80e4
+711, 0xc65de31d33fa7fd3
+712, 0x6f6637052025769b
+713, 0xca1c6bdadb519cc0
+714, 0xd1f3534cde37828a
+715, 0xc858c339eee4830a
+716, 0x2371eacc215e02f4
+717, 0x84e5022db85bbbe9
+718, 0x5f71c50bba48610e
+719, 0xe420192dad9c323f
+720, 0x2889342721fca003
+721, 0x83e64f63334f501d
+722, 0xac2617172953f2c
+723, 0xfa1f78d8433938ff
+724, 0x5578382760051462
+725, 0x375d7a2e3b90af16
+726, 0xb93ff44e6c07552d
+727, 0xded1d5ad811e818c
+728, 0x7cf256b3b29e3a8c
+729, 0x78d581b8e7bf95e8
+730, 0x5b69192f2caa6ad3
+731, 0xa9e25855a52de3ce
+732, 0x69d8e8fc45cc188d
+733, 0x5dd012c139ad347d
+734, 0xfcb01c07b77db606
+735, 0x56253e36ab3d1cce
+736, 0x1181edbb3ea2192
+737, 0x325bef47ff19a08d
+738, 0xd3e231ceb27e5f7
+739, 0x8e819dd2de7956d2
+740, 0x34a9689fe6f84a51
+741, 0x3e4eeb719a9c2927
+742, 0x5c3b3440581d0aaf
+743, 0x57caf51897d7c920
+744, 0xec6a458130464b40
+745, 0xe98f044e0da40e9b
+746, 0xbe38662020eeb8e7
+747, 0x7b8c407c632724ae
+748, 0x16c7cfa97b33a544
+749, 0xd23359e2e978ae5a
+750, 0x4fdba458250933dd
+751, 0x3c9e0713cfe616ba
+752, 0x6f0df87b13163b42
+753, 0xc460902cb852cc97
+754, 0x289df8fefd6b0bce
+755, 0x4ac2a2a1c3fb8029
+756, 0x2fc3e24d8b68eef7
+757, 0x34564386a59aab9a
+758, 0x31047391ebd67ce4
+759, 0x6c23d070a0564d41
+760, 0xba6387b2b72545f7
+761, 0xcdcf1008058387af
+762, 0xc9308fa98db05192
+763, 0xdbdbb5abd01a9d84
+764, 0x937088275c7804ab
+765, 0x6f6accfefe34ee81
+766, 0x5c33c74c49cfdb2c
+767, 0x5e1a771edfb92bd3
+768, 0x6e89b009069ecae7
+769, 0x34d64e17ec0e8968
+770, 0x841203d0cde0c330
+771, 0x7642cc9d7eb9e9cb
+772, 0xca01d2e8c128b97e
+773, 0x5b8390617b3304ab
+774, 0x52ec4ed10de1eb2d
+775, 0xb90f288b9616f237
+776, 0x5bd43cd49617b2e2
+777, 0x1a53e21d25230596
+778, 0x36ccd15207a21cd6
+779, 0xc8263d780618fd3c
+780, 0x6eb520598c6ce1cb
+781, 0x493c99a3b341564f
+782, 0xab999e9c5aa8764f
+783, 0xab2fa4ceaba84b
+784, 0xbbd2f17e5cb2331b
+785, 0xc8b4d377c0cc4e81
+786, 0x31f71a6e165c4b1e
+787, 0xd1011e55fb3addaa
+788, 0x5f7ec34728dfa59
+789, 0x2aef59e60a84eb0f
+790, 0x5dde6f09aec9ad5f
+791, 0x968c6cdbc0ef0438
+792, 0x1957133afa15b13a
+793, 0xbaf28f27573a64c2
+794, 0xc6f6ddd543ebf862
+795, 0xdd7534315ec9ae1e
+796, 0xd2b80cd2758dd3b
+797, 0xa38c3da00cc81538
+798, 0x15c95b82d3f9b0f9
+799, 0x6704930287ce2571
+800, 0x9c40cc2f6f4ecb0c
+801, 0xc8de91f50b22e94e
+802, 0x39272e8fddbfdf0a
+803, 0x879e0aa810a117d
+804, 0xa312fff4e9e5f3bd
+805, 0x10dd747f2835dfec
+806, 0xeb8466db7171cdae
+807, 0xaa808d87b9ad040a
+808, 0xab4d2229a329243a
+809, 0x7c622f70d46f789c
+810, 0x5d41cef5965b2a8e
+811, 0xce97ec4702410d99
+812, 0x5beba2812c91211b
+813, 0xf134b46c93a3fec7
+814, 0x76401d5630127226
+815, 0xc55fc9d9eacd4ec1
+816, 0xaec8cefaa12f813f
+817, 0x2f845dcfd7b00722
+818, 0x3380ab4c20885921
+819, 0xdb68ad2597691b74
+820, 0x8a7e4951455f563f
+821, 0x2372d007ed761c53
+822, 0xcab691907714c4f1
+823, 0x16bc31d6f3abec1a
+824, 0x7dff639fbcf1824
+825, 0x6666985fbcff543d
+826, 0xb618948e3d8e6d0c
+827, 0x77b87837c794e068
+828, 0xcd48288d54fcb5a8
+829, 0x47a773ed6ae30dc3
+830, 0xba85ae44e203c942
+831, 0xa7a7b21791a25b2d
+832, 0x4029dd92e63f19e0
+833, 0xc2ad66ab85e7d5aa
+834, 0xa0f237c96fdab0db
+835, 0xffefb0ab1ca18ed
+836, 0x90cb4500785fd7d5
+837, 0xa7dd3120f4876435
+838, 0x53f7872624694300
+839, 0xea111326ff0040d9
+840, 0x5f83cb4cce40c83b
+841, 0x918e04936c3b504d
+842, 0x87a8db4c0e15e87c
+843, 0x7cff39da6a0dedd0
+844, 0x36f7de2037f85381
+845, 0xd1d8d94022a1e9a7
+846, 0x2c9930127dc33ec9
+847, 0x6cb4719dcd0101c6
+848, 0xc01868cde76935f7
+849, 0x6b86f2ec1ab50143
+850, 0x68af607d8d94ae61
+851, 0xe216c5b95feedf34
+852, 0x4b866bd91efe2e4b
+853, 0x4bff79df08f92c99
+854, 0x6ff664ea806acfd1
+855, 0x7fce0b3f9ece39bc
+856, 0x29bc90b59cb3db97
+857, 0x833c4b419198607d
+858, 0xf3573e36ca4d4768
+859, 0x50d71c0a3c2a3fa8
+860, 0xd754591aea2017e7
+861, 0x3f9126f1ee1ebf3
+862, 0xe775d7f4b1e43de8
+863, 0xe93d51628c263060
+864, 0x83e77f6fb32d6d82
+865, 0x43dd7eef823408e4
+866, 0x1c843c2c90180662
+867, 0xe924dafb9a16066b
+868, 0x6af3ee96e7b7fbd9
+869, 0x94d5c4f37befcd1f
+870, 0x40ffb04bedef4236
+871, 0x71c17bbc20e553e
+872, 0x101f7a0a6208729f
+873, 0x5ca34570cf923548
+874, 0x8e3139db2e96e814
+875, 0x3ab96d96263d048d
+876, 0x97f3c0bbc6755c3c
+877, 0x31fc72daedaef3dc
+878, 0x71f8d7855d10789b
+879, 0xce6dc97b4662333b
+880, 0xfddc2aabd342bc61
+881, 0xefbd4007ff8c7d2e
+882, 0xf72cd6c689ef8758
+883, 0x932c8b0c0e755137
+884, 0x94cc4dedd58ff69
+885, 0xde4dfd6890535979
+886, 0xdb00dcd2dcb4a50a
+887, 0xb0466240b4548107
+888, 0x9cb9264c7b90d1a3
+889, 0x357e378e9be5766b
+890, 0x6e0316ef03367bbf
+891, 0x201ea18839544ca
+892, 0x803ff3406be5f338
+893, 0xf9d5e82fd4144bb2
+894, 0x1b6b88ca701e9f47
+895, 0xd1fe5ab8e1f89cc0
+896, 0x14171fe176c4bece
+897, 0x887948bdef78beaa
+898, 0x80449ddc3eb9b977
+899, 0x5f4e1f900fb4bcf3
+900, 0xbe30f8701909f8e2
+901, 0xd1f2a2fb5503306d
+902, 0x6b1c77238dc23803
+903, 0x102156a6c9860f66
+904, 0x4cd446e099edf4c1
+905, 0xc79ac6cbc911f33b
+906, 0x3ee096ffe3384f1c
+907, 0xb58f83b18a306dc7
+908, 0x9f76582141de56b2
+909, 0x9ddfa85e02c13866
+910, 0x4d9a19d4ce90a543
+911, 0xbf81ab39fd17d376
+912, 0x5327e5054c6a74f1
+913, 0xd5062dd31db1a9b7
+914, 0x645853735527edc
+915, 0x485393967f91af08
+916, 0xeff9667dcf77ca68
+917, 0xd012313f5fbec464
+918, 0xbeae35bdfae55144
+919, 0x302c41ebac8444a0
+920, 0x9ccdb6c2fe58fba8
+921, 0x567753af68ed23f8
+922, 0xff90f790e43efec3
+923, 0x970cc756fb799696
+924, 0xe59239d1c44915
+925, 0x4d2d189fb3941f05
+926, 0x96f23085db165a9c
+927, 0xa1202dec7a37b1a5
+928, 0xc0c1ee74bcd7dc1a
+929, 0x9edcf2048b30333a
+930, 0xd848588ba7e865fb
+931, 0x8d9f0897317cab40
+932, 0x67b96f15e25924fb
+933, 0xefc8d8536619ee42
+934, 0xf3f621d22bdde0c2
+935, 0x68610a0de862ae32
+936, 0xa22ca5142de24cbd
+937, 0x8815452f4e6b4801
+938, 0x4e9c1b607b2750e5
+939, 0x19b3c09ba6fc9b25
+940, 0x9b2543c8836780ac
+941, 0xe702b8f950e56431
+942, 0xb357cc329cac3917
+943, 0x387bf86a17a31e08
+944, 0x9940b983d331b163
+945, 0xf5d89d7fe9095e18
+946, 0x4362682329e5c4d1
+947, 0xd2132573f6ae7b42
+948, 0xc0a5849e23a61606
+949, 0xdadbddf47265bc02
+950, 0x1b96f00339a705f7
+951, 0x94e6642329288913
+952, 0x825ab3f10e6d330b
+953, 0x1a1c31ac9d883ea0
+954, 0xb49076b7155c6f47
+955, 0x920cf3085dfe3ccb
+956, 0x9743407c9f28e825
+957, 0x6ce8a28622402719
+958, 0xce2fe67e06baf8a6
+959, 0x3a16b34784ecf5e6
+960, 0x140467cc1d162a0c
+961, 0x32d4772692ab625
+962, 0xa4f4b28562f43336
+963, 0x885b4335457bd84a
+964, 0x499d3ed26c87ad8a
+965, 0xc7328bcedb9a545e
+966, 0xc6dd76a6cbf5d2b2
+967, 0xba9c22be404ee1aa
+968, 0x70e6aee45f23521d
+969, 0x61e03a798593c177
+970, 0x171671f809c68213
+971, 0x28d54872fc1d914c
+972, 0x43c2fcd9bd098b53
+973, 0x172ad4c4a98b9d37
+974, 0x330860c9460f2516
+975, 0x49547f472df984f4
+976, 0x873b2436d3f0e114
+977, 0x6f99accf4ea050b6
+978, 0x5968ac874ed51613
+979, 0x4939d70d29a3c611
+980, 0x11f381ed28738d3d
+981, 0xa97430d36ab3a869
+982, 0xe6fa880801129e22
+983, 0xf84decbd8f48c913
+984, 0x4425c0ed1e9a82a5
+985, 0x7a1f9485e9929d5a
+986, 0xc7c51f155dfce1c6
+987, 0x9619a39501d74f2b
+988, 0x7c7035955dbf4c1b
+989, 0xc61ee569cf57c2c9
+990, 0x3eaf7c5b0df734e1
+991, 0xe71cb4064d1ede05
+992, 0x356e3cec80e418b2
+993, 0xca04306243a15be6
+994, 0x941cf3881fa18896
+995, 0x30dbb0e819d644e0
+996, 0xaae22c0bef02859a
+997, 0x7bd30917bbaa8a94
+998, 0x2672547bc8d7d329
+999, 0x4955c92aaa231578
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv b/venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv
new file mode 100644
index 00000000..878c5ea7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/data/pcg64dxsm-testset-2.csv
@@ -0,0 +1,1001 @@
+seed, 0x0
+0, 0xd97e4a147f788a70
+1, 0x8dfa7bce56e3a253
+2, 0x13556ed9f53d3c10
+3, 0x55dbf1c241341e98
+4, 0xa2cd98f722eb0e0a
+5, 0x83dfc407203ade8
+6, 0xeaa083df518f030d
+7, 0x44968c87e432852b
+8, 0x573107b9cb8d9ecc
+9, 0x9eedd1da50b9daca
+10, 0xb33a6735ca451e3c
+11, 0x72830d2b39677262
+12, 0x9da8c512fd0207e8
+13, 0x1fc5c91954a2672b
+14, 0xd33479437116e08
+15, 0x9ccdd9390cee46f3
+16, 0x1fd39bb01acd9e76
+17, 0xedc1869a42ff7fe5
+18, 0xbd68ca0b42a6e7e9
+19, 0x620b67df09621b1f
+20, 0xfa11d51bd6950221
+21, 0xc8c45b36e7d28d08
+22, 0xe9c91272fbaad777
+23, 0x2dc87a143f220e90
+24, 0x6376a7c82361f49d
+25, 0x552c5e434232fe75
+26, 0x468f7f872ac195bc
+27, 0x32bed6858125cf89
+28, 0xe4f06111494d09d3
+29, 0xa5c166ffea248b80
+30, 0x4e26605b97064a3f
+31, 0xceafd9f6fc5569d
+32, 0xb772f2f9eed9e106
+33, 0x672c65e6a93534e2
+34, 0xcdc5e1a28d1bd6a0
+35, 0x1ed9c96daeebd3e3
+36, 0x4d189dcfc0c93c3f
+37, 0x50df5a95c62f4b43
+38, 0xcccf4949fa65bbb8
+39, 0x19b8073d53cdc984
+40, 0x6fb40bba35483703
+41, 0xb02de4aef86b515a
+42, 0x4d90c63655350310
+43, 0xea44e4089825b16c
+44, 0x8d676958b1f9da2b
+45, 0x6d313940917ae195
+46, 0x1b1d35a4c1dd19f4
+47, 0x117720f8397337ef
+48, 0xcc073cf3ac11eeaa
+49, 0x8331ec58a9ff8acb
+50, 0xf3dc2a308b6b866f
+51, 0x7eba1202663382b6
+52, 0x8269839debeb4e5a
+53, 0x87fd3dc0f9181a8e
+54, 0xabe62ddd3c925f03
+55, 0x7f56f146944fe8d4
+56, 0xc535972150852068
+57, 0x60b252d453bd3a68
+58, 0x4251f0134634490a
+59, 0x338950da210dfeb2
+60, 0xcadfe932971c9471
+61, 0xfb7049457fab470e
+62, 0x9bfb8145a4459dff
+63, 0x4a89dda3898f9d8a
+64, 0x88cc560151483929
+65, 0x277dc820f4b6796e
+66, 0x3524bd07ea0afb88
+67, 0x92eb6ffb2bf14311
+68, 0xf6559be0783f3fe9
+69, 0xf0844f9af54af00d
+70, 0xdd5e0b59adcef8a
+71, 0x4ff7e4f2ab18554c
+72, 0x3fa22c8a02634587
+73, 0x1db8e1a9442fe300
+74, 0x40cf15953ad3d3e7
+75, 0x92af15fe1a9f6f0a
+76, 0xab4a0e466fb0cfd
+77, 0x944f1555a06cca82
+78, 0x10cf48412f1f6066
+79, 0x7f51f9a455f9e8e1
+80, 0x47ee93530f024c7e
+81, 0x36cf2f0413e0f6f2
+82, 0xa315e23731969407
+83, 0xd8e2796327cf5f87
+84, 0xa86072696a555c34
+85, 0xee3f0b8804feaab7
+86, 0x41e80dc858f8360b
+87, 0x31ec2e9b78f5b29
+88, 0xd397fb9b8561344c
+89, 0x28081e724e649b74
+90, 0x5c135fc3fc672348
+91, 0x9a276ca70ce9caa0
+92, 0x9216da059229050a
+93, 0xcf7d375ed68007b0
+94, 0xa68ad1963724a770
+95, 0xd4350de8d3b6787c
+96, 0xee7d2c2cc275b6d2
+97, 0x71645ec738749735
+98, 0x45abdf8c68d33dbb
+99, 0xe71cadb692c705ea
+100, 0x60af6f061fd90622
+101, 0x1eabe2072632c99d
+102, 0x947dda995a402cb6
+103, 0xbb19f49a3454f3b
+104, 0xe6e43e907407758c
+105, 0xfe2b67016bd6873a
+106, 0x7fdb4dd8ab30a722
+107, 0x39d3265b0ff1a45b
+108, 0xed24c0e4fce8d0c2
+109, 0xf6e074f86faf669d
+110, 0x9142040df8dc2a79
+111, 0x9682ab16bc939a9c
+112, 0x6a4e80c378d971c8
+113, 0x31309c2c7fc2d3d6
+114, 0xb7237ec682993339
+115, 0x6a30c06bb83dccd9
+116, 0x21c8e9b6d8e7c382
+117, 0x258a24ae6f086a19
+118, 0xb76edb5be7df5c35
+119, 0x3c11d7d5c16e7175
+120, 0xbdfc34c31eff66e1
+121, 0x8af66e44be8bf3a2
+122, 0x3053292e193dec28
+123, 0xd0cc44545b454995
+124, 0x408ac01a9289d56
+125, 0x4e02d34318ec2e85
+126, 0x9413ff3777c6eb6b
+127, 0xa3a301f8e37eb3df
+128, 0x14e6306bd8d8f9f9
+129, 0xd3ea06ce16c4a653
+130, 0x170abe5429122982
+131, 0x7f9e6fddc6cacb85
+132, 0xa41b93e10a10a4c8
+133, 0x239216f9d5b6d0b5
+134, 0x985fcb6cb4190d98
+135, 0xb45e3e7c68f480c6
+136, 0xc1b2fc2e0446211c
+137, 0x4596adb28858c498
+138, 0x2dd706f3458ddc75
+139, 0x29c988c86f75464
+140, 0xac33a65aa679a60
+141, 0xa28fef762d39d938
+142, 0x541e6fa48647f53
+143, 0x27838d56b2649735
+144, 0x8e143d318a796212
+145, 0xaea6097745f586b8
+146, 0x636143330f8ee2e6
+147, 0xc2d05fd8b945b172
+148, 0x6e355f9eb4353055
+149, 0xeb64ca42e8bf282e
+150, 0xe8202dfd9da0fe5
+151, 0x7305689c9d790cba
+152, 0xf122f8b1bef32970
+153, 0x9562887e38c32ba5
+154, 0xf9cd9be121b738d
+155, 0x6238e0c398307913
+156, 0x5f2e79bb07c30f47
+157, 0x8ce8e45c465006e
+158, 0x39281fe1e99e2441
+159, 0xafb10c2ca2874fea
+160, 0x6e52f91633f83cf
+161, 0x8ff12c1ac73c4494
+162, 0xe48608a09365af59
+163, 0xefd9bbc7e76e6a33
+164, 0xbe16a39d5c38ec92
+165, 0x6a6ffbcaf5a2330f
+166, 0xdd5d6ac7d998d43d
+167, 0x207bf978226d4f11
+168, 0xf8eec56bd2a0f62e
+169, 0xa5bccf05dce0d975
+170, 0x93cf3ec1afe457a6
+171, 0x38651466d201f736
+172, 0x3ad21473985c9184
+173, 0xc6407a3bd38c92a6
+174, 0xb1ec42c7afa90a25
+175, 0xbdeca984df8b7dd3
+176, 0xb6926b1d00aa6c55
+177, 0x86141d0022352d49
+178, 0x169316256135ee09
+179, 0xffb1c7767af02a5c
+180, 0x502af38ad19f5c91
+181, 0xfbf6cbc080086658
+182, 0x33cf9b219edae501
+183, 0x46e69bebd77b8862
+184, 0xf11e0cc91125d041
+185, 0xb4cd1649f85e078f
+186, 0xb49be408db4e952
+187, 0xb0b8db46140cce3c
+188, 0xba647f2174012be7
+189, 0x4f0a09e406970ac9
+190, 0xf868c7aec9890a5c
+191, 0xde4c8fa7498ea090
+192, 0x872ceb197978c1d4
+193, 0x1eb5cd9c3269b258
+194, 0x3ea189f91724f014
+195, 0x41379656f7746f2c
+196, 0x7bd18493aca60e51
+197, 0x5380c23b0cbbf15e
+198, 0x920b72835f88246b
+199, 0x24d7f734a4548b8e
+200, 0x9944edb57e5aa145
+201, 0x4628e136ebb8afe1
+202, 0xb4ee6a776356e2a7
+203, 0x481cbe9744ccf7d7
+204, 0x7e8d67e8b0b995d9
+205, 0xeeacde100af7b47e
+206, 0x103da08f2487dab7
+207, 0x6b9890a91d831459
+208, 0xd0c5beae37b572c7
+209, 0xfdccc371ee73fcc
+210, 0x65438f0a367a2003
+211, 0x5d23b2c818a7e943
+212, 0x9a8ed45ac04b58b3
+213, 0xdaf3c3f1695dce10
+214, 0x5960eec706fa2bc0
+215, 0x98ca652facb80d40
+216, 0x72970ae5e2194143
+217, 0x18c6374d878c5c94
+218, 0x20fa51f997381900
+219, 0x3af253dba26d6e1d
+220, 0x1b23d65db15c7f78
+221, 0x9f53ae976259b0e3
+222, 0x9a6addb28dc92d49
+223, 0x1e085c4accd0a7d7
+224, 0xe9d3f4cc9bad6ce5
+225, 0xe018fad78b5b1059
+226, 0x5ef7682232b4b95
+227, 0xb2242aa649f5de80
+228, 0x8f3e6d8dd99b9e4e
+229, 0xb9be6cc22949d62a
+230, 0xecbdc7beaa5ff1fe
+231, 0xd388db43a855bdf0
+232, 0xd71ee3238852568d
+233, 0x85ab3056304c04b5
+234, 0x2ed7ae7ad3cfc3cb
+235, 0x781d1b03d40b6c48
+236, 0x7d3c740886657e6d
+237, 0x982cfa6828daa6b0
+238, 0x278579599c529464
+239, 0x773adecfae9f0e08
+240, 0x63a243ea4b85c5d7
+241, 0x59940074fc3709e1
+242, 0xc914a2eed58a6363
+243, 0x2602b04274dd724c
+244, 0xdf636eb7636c2c42
+245, 0x891a334d0d26c547
+246, 0xde8cd586d499e22d
+247, 0x3ea1aa4d9b7035b6
+248, 0xd085cff6f9501523
+249, 0xe82a872f374959e
+250, 0x55cb495bbd42cc53
+251, 0x5f42b3226e56ca97
+252, 0xea463f6f203493a3
+253, 0xeef3718e57731737
+254, 0x1bd4f9d62b7f9f3c
+255, 0x19284f5e74817511
+256, 0xaf6e842c7450ca87
+257, 0x1d27d2b08a6b3600
+258, 0xfb4b912b396a52e3
+259, 0x30804d4c5c710121
+260, 0x4907e82564e36338
+261, 0x6441cf3b2900ddb7
+262, 0xd76de6f51988dc66
+263, 0x4f298ef96fd5e6d2
+264, 0x65432960c009f83d
+265, 0x65ebed07e1d2e3df
+266, 0xf83ee8078febca20
+267, 0x7bb18e9d74fc5b29
+268, 0x597b5fbc2261d91
+269, 0xea4f8ed0732b15b2
+270, 0xba2267f74f458268
+271, 0x3f304acabd746bbb
+272, 0x7bd187af85659a82
+273, 0x88e20dbdb7a08ea3
+274, 0x2a2dc948c772fcb4
+275, 0x87784fec2993c867
+276, 0x89163933cd362d4e
+277, 0xfd7b24f04302f957
+278, 0x9bdd544405dfb153
+279, 0xddee0fac58ffc611
+280, 0xa8e8993417e71ec1
+281, 0x55e0ab46ff7757af
+282, 0x53e7645f08d3d7df
+283, 0xbf78e563bc656ba2
+284, 0x1d162253b45ee2de
+285, 0x15e2bfefedf29eb4
+286, 0x4e2a4584aa394702
+287, 0xa89fb12b01525897
+288, 0x825bd98f0544e4df
+289, 0xfc6c50da6750700
+290, 0xc24aaabde7d28423
+291, 0x79d6f4660fcb19e5
+292, 0xee7d4fb40c8d659f
+293, 0x70bc281b462e811d
+294, 0x23ed4dc9636519a7
+295, 0xcb7c3f5a5711b935
+296, 0xe73090e0508c5d9d
+297, 0xb25a331f375952a6
+298, 0xa64c86e0c04740f6
+299, 0xb8f3ffc8d56ac124
+300, 0x2479266fc5ee6b15
+301, 0x8d5792d27f5ffbcb
+302, 0xb064298be946cd52
+303, 0xf0934a98912ffe26
+304, 0xbe805682c6634d98
+305, 0xe0e6e2c010012b4f
+306, 0x58c47d475f75976
+307, 0x358c9a6e646b2b4a
+308, 0x7e7c4ffca5b17ba7
+309, 0x43585c8c9a24a04c
+310, 0x5154ddbcd68d5c2c
+311, 0x4a2b062d3742a5e
+312, 0xca5691191da2b946
+313, 0x696a542109457466
+314, 0x9eb5d658a5022ba5
+315, 0x8158cf6b599ab8dc
+316, 0x1b95391eaa4af4a6
+317, 0x9953e79bd0fc3107
+318, 0x8639690086748123
+319, 0x2d35781c287c6842
+320, 0x393ef0001cd7bc8f
+321, 0xe3a61be8c5f2c22a
+322, 0x5e4ff21b847cc29b
+323, 0x4c9c9389a370eb84
+324, 0xd43a25a8fc3635fa
+325, 0xf6790e4a85385508
+326, 0x37edf0c81cb95e1d
+327, 0x52db00d6e6e79af8
+328, 0x3b202bceeb7f096
+329, 0x2a164a1c776136bb
+330, 0x73e03ee3fd80fd1b
+331, 0xd2c58c0746b8d858
+332, 0x2ed2cb0038153d22
+333, 0x98996d0fc8ceeacc
+334, 0xa4ed0589936b37f
+335, 0x5f61cf41a6d2c172
+336, 0xa6d4afb538c110d7
+337, 0xe85834541baadf1a
+338, 0x4c8967107fd49212
+339, 0x49bafb762ab1a8c1
+340, 0x45d540e2a834bf17
+341, 0x1c0ec8b4ed671dac
+342, 0x3d503ce2c83fe883
+343, 0x437bfffd95f42022
+344, 0xc82d1e3d5c2bc8d2
+345, 0x7a0a9cbfcb0d3f24
+346, 0xc0a4f00251b7a3be
+347, 0xb5be24e74bb6a1c6
+348, 0xa3104b94b57545b1
+349, 0x86de7d0c4b97b361
+350, 0x879c1483f26538a6
+351, 0xd74c87557f6accfb
+352, 0x2f9be40dbf0fe8a1
+353, 0x445a93398f608d89
+354, 0x7b3cb8a7211d7fdc
+355, 0xe86cc51290d031e7
+356, 0x33ef3594052ad79f
+357, 0xc61911d241dbb590
+358, 0x37cccb0c0e3de461
+359, 0xb75259124080b48b
+360, 0xd81e8961beb4abe5
+361, 0xf4542deb84a754e
+362, 0x6ea036d00385f02e
+363, 0xa7b60b0ac3b88681
+364, 0x108a6c36ca30baf5
+365, 0x4a2adc5bbfe2bf07
+366, 0x4079501f892a5342
+367, 0x55e113963c5448f0
+368, 0x8019ff4903b37242
+369, 0x109c6dcdb7ec6618
+370, 0x1239ac50944da450
+371, 0xe1399c7f94c651c1
+372, 0x5a6bbbae388d365a
+373, 0x4d72be57b8810929
+374, 0x3f067df24384e1fb
+375, 0x4f8b9e0f7f6c7be
+376, 0x202492c342a3b08
+377, 0x250753192af93a3
+378, 0xfba1159d9de2cb8e
+379, 0xba964497ab05505c
+380, 0x1329ec5d8a709dca
+381, 0x32927cacb6cd22bb
+382, 0x6b4d7db904187d56
+383, 0xe76adccf8e841e02
+384, 0x8c4bf4b6a788202
+385, 0x3013a3b409831651
+386, 0x7427d125c475412f
+387, 0x84dcc4bb2bf43202
+388, 0x117526f1101372a5
+389, 0xfe95d64b8984bd72
+390, 0x524e129934cc55c1
+391, 0xc3db4b0418c36d30
+392, 0xe1cb2047e9c19f7a
+393, 0xea43d6c8d8982795
+394, 0xe80ac8a37df89ed
+395, 0xfecc2104329ed306
+396, 0xa5c38aac9c1d51ea
+397, 0x3abe5d1c01e4fe17
+398, 0x717a805d97fcc7ac
+399, 0x94441f8207a1fb78
+400, 0x22d7869c5f002607
+401, 0x349e899f28c3a1b9
+402, 0x5639950cdea92b75
+403, 0x7e08450497c375b
+404, 0x94bf898b475d211d
+405, 0x75c761a402375104
+406, 0x1930920ec9d2a1e7
+407, 0xb774ba1bc6f6e4e2
+408, 0xf715602412e5d900
+409, 0x87bb995f4a13f0ba
+410, 0xa3c787868dfa9c8d
+411, 0xa17fd42a5a4f0987
+412, 0x4a9f7d435242b86
+413, 0x240364aff88f8aef
+414, 0xe7cd4cf4bf39f144
+415, 0xd030f313ca4c2692
+416, 0xc46696f4e03ec1e9
+417, 0x22c60f1ec21060b3
+418, 0x16c88058fd68986f
+419, 0x69ca448e8e6bde3f
+420, 0x3466c2cdec218abd
+421, 0x837ac4d05e6b117d
+422, 0x911210e154690191
+423, 0x9ece851d6fa358b7
+424, 0x42f79cb0c45e7897
+425, 0xbf7583babd7c499b
+426, 0x2059fe8031c6e0b9
+427, 0xabbec8fc00f7e51d
+428, 0x88809d86a3a256e1
+429, 0xd36056df829fdcb5
+430, 0x515632b6cb914c64
+431, 0xba76d06c2558874
+432, 0x632c54ca4214d253
+433, 0xadec487adf2cb215
+434, 0x521e663e1940513d
+435, 0xb1b638b548806694
+436, 0xbe2d5bfbe57d2c72
+437, 0x8b89e7719db02f7
+438, 0x90ba5281c1d56e63
+439, 0x899e1b92fceea102
+440, 0xf90d918e15182fa6
+441, 0x94a489ce96c948c4
+442, 0xad34db453517fcd4
+443, 0xc5264eb2de15930f
+444, 0x101b4e6603a21cee
+445, 0xef9b6258d6e85fff
+446, 0x6075c7d6c048bd7a
+447, 0x6f03232c64e438aa
+448, 0x18c983d7105ee469
+449, 0x3ffc23f5c1375879
+450, 0xbc1b4a00afb1f9f
+451, 0x5afa6b2bb8c6b46e
+452, 0xe7fce4af2f2c152a
+453, 0x5b00ab5c4b3982c7
+454, 0x2d4b0c9c0eb4bd0c
+455, 0x61d926270642f1f2
+456, 0x7219c485c23a2377
+457, 0x7e471c752fecd895
+458, 0x23c4d30a4d17ba1f
+459, 0x65cb277fe565ca22
+460, 0xcbb56ed9c701363b
+461, 0xfd04ab3a6eba8282
+462, 0x19c9e5c8bab38500
+463, 0xea4c15227676b65b
+464, 0x20f3412606c8da6f
+465, 0xb06782d3bf61a239
+466, 0xf96e02d5276a9a31
+467, 0x835d256b42aa52a6
+468, 0x25b09151747f39c1
+469, 0x64507386e1103eda
+470, 0x51cbc05716ef88e4
+471, 0x998cd9b7989e81cc
+472, 0x9d7115416bec28d1
+473, 0xc992ca39de97906b
+474, 0xd571e6f7ca598214
+475, 0xafc7fb6ccd9abbf8
+476, 0x88ef456febff7bf4
+477, 0xdbe87ccc55b157d2
+478, 0xaab95e405f8a4f6d
+479, 0xad586a385e74af4f
+480, 0x23cd15225c8485aa
+481, 0x370940bf47900ac7
+482, 0xefd6afda1a4b0ead
+483, 0x9cb1a4c90993dd7a
+484, 0xff7893e8b2f70b11
+485, 0xb09e1807c0638e8e
+486, 0xb10915dcb4978f74
+487, 0x88212ab0051a85eb
+488, 0x7af41b76e1ec793f
+489, 0x2e5c486406d3fefd
+490, 0xebe54eff67f513cc
+491, 0xab6c90d0876a79b8
+492, 0x224df82f93fe9089
+493, 0xc51c1ce053dc9cd2
+494, 0x5ef35a4d8a633ee7
+495, 0x4aca033459c2585f
+496, 0xd066932c6eefb23d
+497, 0x5309768aab9a7591
+498, 0xa2a3e33823df37f9
+499, 0xcec77ff6a359ee9
+500, 0x784dc62d999d3483
+501, 0x84e789fb8acc985d
+502, 0xd590237e86aa60f
+503, 0x737e2ffe1c8ad600
+504, 0xc019c3a39a99eab8
+505, 0x6a39e9836964c516
+506, 0xe0fe43129535d9da
+507, 0xdfc5f603d639d4de
+508, 0x7b9a7d048a9c03b6
+509, 0xbb5aa520faa27fdd
+510, 0x2a09b4200f398fa2
+511, 0x38cc88107904064e
+512, 0xa9a90d0b2d92bb25
+513, 0x9419762f87e987e3
+514, 0x1a52c525153dedcd
+515, 0xc26d9973dd65ae99
+516, 0x8e89bd9d0dc6e6a1
+517, 0x2f30868dc01bfb53
+518, 0x20f09d99b46501c4
+519, 0x78b468a563b8f1e9
+520, 0xcccf34b0b6c380c7
+521, 0xf554e7dc815297e6
+522, 0x332a585cfb4a50ef
+523, 0xa9fb64a2b6da41d7
+524, 0xdcd2a5a337391ce0
+525, 0x8a9bd3e324c6463d
+526, 0x9f4487d725503bdd
+527, 0xf72282d82f1d0ff
+528, 0x308f4160abb72d42
+529, 0x648de1db3a601b08
+530, 0x36cab5192e7ebd39
+531, 0x7975fbe4ab6a1c66
+532, 0xd515b4d72243864e
+533, 0x43a568f8b915e895
+534, 0x15fa9f2057bdb91d
+535, 0x7a43858ef7a222dc
+536, 0x17b4a9175ac074fe
+537, 0xa932c833b8d0f8f8
+538, 0x1d2db93a9a587678
+539, 0x98abd1d146124d27
+540, 0xf0ab0431671740aa
+541, 0xa9d182467540ad33
+542, 0x41c8a6cfc331b7fc
+543, 0xa52c6bd0fcd1d228
+544, 0x2773c29a34dc6fa3
+545, 0x3098230746fc1f37
+546, 0xd63311bb4f23fabe
+547, 0x6712bf530cd2faec
+548, 0x342e8f342e42c4dd
+549, 0xfbd83331851cdcad
+550, 0xe903be1361bbc34d
+551, 0xd94372e5077e3ef9
+552, 0x95aaa234f194bd8
+553, 0x20c0c8fb11e27538
+554, 0xfaf47dc90462b30b
+555, 0x8ddc6d144147682a
+556, 0xf626833fd926af55
+557, 0x5df93c34290d1793
+558, 0xb06a903e6e9fca5e
+559, 0x10c792dc851d77ca
+560, 0xd9b1b817b18e56cb
+561, 0x3a81730c408eb408
+562, 0x65052c04a8d4b63c
+563, 0x3328546598e33742
+564, 0xeca44a13f62d156d
+565, 0x69f83d1d86b20170
+566, 0x937764200412027d
+567, 0xc57eb1b58df0f191
+568, 0xa1c7d67dce81bc41
+569, 0x8e709c59a6a579ce
+570, 0x776a2f5155d46c70
+571, 0xd92906fbbc373aa5
+572, 0xe97ad478a2a98bf6
+573, 0xc296c8819ac815f
+574, 0x613ede67ba70e93e
+575, 0xe145222498f99cde
+576, 0xafcdfa7a3c1cf9bf
+577, 0x1c89252176db670d
+578, 0xad245eda5c0865ff
+579, 0x249463d3053eb917
+580, 0xc9be16d337517c0b
+581, 0xefcc82bf67b8f731
+582, 0x1e01577d029e0d00
+583, 0xad9c24b2a4f3d418
+584, 0xed2cceb510db4d0f
+585, 0xbddadcdb92400c70
+586, 0x67d6b0476ef82186
+587, 0xbc7662ff7bf19f73
+588, 0x9d94452a729e6e92
+589, 0x6b278d8594f55428
+590, 0x6c4b31cceb1b2109
+591, 0xccc6c3a726701e9
+592, 0x6bc28ece07df8925
+593, 0xc0422b7bf150ccc4
+594, 0xab7158f044e73479
+595, 0xdf3347546d9ed83f
+596, 0x3b3235a02c70dff4
+597, 0x2551c49c14ea8d77
+598, 0xee2f7f5bb3cc228e
+599, 0x39b87bfe8c882d39
+600, 0x7dd420fad380b51c
+601, 0xffe64976af093f96
+602, 0x4a4f48dc6e7eaa5f
+603, 0x85f2514d32fdc8cc
+604, 0x1ab1215fd7f94801
+605, 0x4cd1200fc795b774
+606, 0xcf8af463a38942ee
+607, 0x319caa7ce3022721
+608, 0x8cd9798a76d1aea4
+609, 0x2bd3933ac7afd34e
+610, 0x85d4c323403cf811
+611, 0xd7b956d3064efa30
+612, 0x67a078dbf1f13068
+613, 0x665fa6c83e87c290
+614, 0x9333ac2416d2469b
+615, 0xdfb1fd21a0094977
+616, 0xa1962a6e2c25f8ff
+617, 0x1f3b10a7ed5287cf
+618, 0x70641efb3d362713
+619, 0xe527a2cf85d00918
+620, 0x9741e45d3f9890a3
+621, 0x6cb74b5d4d36db4b
+622, 0xf24734d622bd2209
+623, 0xadd6d94f78e9d378
+624, 0xc3bbdb59225cca7f
+625, 0x5ad36614275b30cd
+626, 0x495568dd74eea434
+627, 0xf35de47e0ffe1f2d
+628, 0xefa209dca719ab18
+629, 0x844ddcaeb5b99ae8
+630, 0x37449670a1dc7b19
+631, 0x5a4612c166f845c1
+632, 0xe70f7782f2087947
+633, 0x98d484deac365721
+634, 0x705302198cf52457
+635, 0x7135ae0f5b77df41
+636, 0x342ac6e44a9b6fc3
+637, 0x2713fd2a59af5826
+638, 0x6e1a3f90f84efa75
+639, 0x9fb3b4dd446ca040
+640, 0x530044ae91e6bd49
+641, 0xe984c4183974dc3e
+642, 0x40c1fa961997d066
+643, 0xb7868250d8c21559
+644, 0x8bc929fa085fd1de
+645, 0x7bdb63288dc8733e
+646, 0xac4faad24326a468
+647, 0x1c6e799833aea0b1
+648, 0xcc8a749e94f20f36
+649, 0x4e7abfd0443547c5
+650, 0xb661c73bb8caa358
+651, 0x4a800f5728ff2351
+652, 0x8c15e15189b9f7ed
+653, 0xab367846b811362c
+654, 0x4ba7508f0851ca2a
+655, 0xe9af891acbafc356
+656, 0xbdebe183989601f8
+657, 0x4c665ea496afc061
+658, 0x3ca1d14a5f2ed7c
+659, 0xfbdff10a1027dd21
+660, 0xdfd28f77c8cff968
+661, 0xc4fbaadf8a3e9c77
+662, 0xdac7e448b218c589
+663, 0xb26390b5befd19e2
+664, 0xd2ef14916c66dba9
+665, 0xfab600284b0ff86b
+666, 0xf04a1c229b58dabb
+667, 0xc21c45637e452476
+668, 0xd1435966f75e0791
+669, 0xc1f28522eda4a2d0
+670, 0x52332ae8f1222185
+671, 0x81c6c0790c0bf47e
+672, 0xfebd215e7d8ffb86
+673, 0x68c5dce55dbe962b
+674, 0x231d09cb0d2531d1
+675, 0x3218fba199dbbc6b
+676, 0x8f23c535f8ea0bf6
+677, 0x6c228963e1df8bd9
+678, 0x9843c7722ed153e3
+679, 0xd032d99e419bddec
+680, 0xe2dca88aa7814cab
+681, 0x4d53fb8c6a59cdc2
+682, 0x8fb3abc46157b68b
+683, 0xa3e733087e09b8e
+684, 0x6bdc1aee029d6b96
+685, 0x4089667a8906d65b
+686, 0x8f3026a52d39dd03
+687, 0x6d2e0ccb567bae84
+688, 0x74bad450199e464
+689, 0xf114fb68a8f300d5
+690, 0xc7a5cc7b374c7d10
+691, 0xf0e93da639b279d1
+692, 0xb9943841ad493166
+693, 0x77a69290455a3664
+694, 0x41530da2ebea054b
+695, 0xe8f9fab03ea24abf
+696, 0xaa931f0c9f55a57a
+697, 0xb4d68a75d56f97ae
+698, 0x3d58ff898b6ba297
+699, 0x49d81e08faf5a3f5
+700, 0xfc5207b9f3697f3b
+701, 0xa25911abb3cf19b7
+702, 0x6b8908eb67c3a41
+703, 0xd63ef402e2e3fa33
+704, 0x728e75d3f33b14c5
+705, 0x248cb1b8bc6f379a
+706, 0x3aa3d6d2b8c72996
+707, 0x49cc50bd2d3d2860
+708, 0xb4e1387647c72075
+709, 0x435a1630a4a81ed3
+710, 0xa5ea13005d2460cf
+711, 0xc7a613df37d159ec
+712, 0x95721ccc218b857e
+713, 0xd4b70d8c86b124d3
+714, 0x2b82bcc4b612d494
+715, 0xaf13062885276050
+716, 0xcbd8fcf571a33d9c
+717, 0x3f7f67ca1125fc15
+718, 0xddf4bb45aac81b4c
+719, 0x23606da62de9c040
+720, 0xa3a172375666b636
+721, 0x292f87387a6c6c3c
+722, 0xd1d10d00c5496fe1
+723, 0x86b0411ce8a25550
+724, 0x38e0487872e33976
+725, 0x363e49f88ddfd42c
+726, 0x45bdf1e9f6b66b0a
+727, 0x8a6fff3de394f9b5
+728, 0x8502158bb03f6209
+729, 0x22e24d16dba42907
+730, 0x3fe3ba427cc2b779
+731, 0x77144793f66b3d7e
+732, 0xcf8912ccb29b8af9
+733, 0xdc856caff2abd670
+734, 0xe6d3ae0b0d9d4c8b
+735, 0xb8f5d40e454c539f
+736, 0x79ca953114fbc6b7
+737, 0x478d6f4bbfa38837
+738, 0x9babae1a3ffdc340
+739, 0x40edd56802bae613
+740, 0x97a56c2dcccf0641
+741, 0xafc250257f027f8e
+742, 0x8da41ef1edf69125
+743, 0x6574b0280ff9d309
+744, 0x197c776151b8f820
+745, 0x6b03e077c9dac3b6
+746, 0x24a40ebbc5c341c5
+747, 0x50e585169a6a1c4b
+748, 0x37783a5a6a3e4e02
+749, 0xb3de81ee6fbad647
+750, 0xf4f292f57ca4591e
+751, 0x6214e9e7d44d30a
+752, 0x5920190c56d21c12
+753, 0x9ac163419b5e0c9b
+754, 0xfc2328761ae8ed93
+755, 0xc68f945b545508c6
+756, 0x687c49a17ce0a5e2
+757, 0x276d8f53d30d4ab4
+758, 0x8201804970343ce1
+759, 0x1b5d323cc2e7fb7e
+760, 0x6f351ef04fd904b
+761, 0x6c793a7d455d5198
+762, 0x46f5d108430ae91f
+763, 0xac16a15b2a0cf77f
+764, 0xa0d479d9e4122b9d
+765, 0x3afd94604307f19
+766, 0x2573ed6d39d38dbf
+767, 0xa58e14ba60b4294b
+768, 0xe69c1aed5840d156
+769, 0x4cf6fda7f04855c2
+770, 0x2fb65a56ef5f22da
+771, 0xf95819434d5dc220
+772, 0x29c65133623dafba
+773, 0x8e997bd018467523
+774, 0xfd08ba9d498461a7
+775, 0xdd52243bc78a5592
+776, 0x39c30108f6db88b3
+777, 0x38af8e1894f259b9
+778, 0x97eedf3b4ae5f6de
+779, 0x757825add80c5ece
+780, 0xf0fdd90ac14edb14
+781, 0xbbb19d4cc8cac6d4
+782, 0x9a82234edfae05e3
+783, 0x704401c61d1edf1c
+784, 0x8b0eb481fb3a1fb2
+785, 0xef6f36e7cc06c002
+786, 0x7a208b17e04b8cd7
+787, 0xf20e33d498838fe9
+788, 0xc2bdb22117058326
+789, 0x6ec31939eb4ca543
+790, 0x6f1654838f507a21
+791, 0xc65ab81a955d2b93
+792, 0x40b1420fdd9531b8
+793, 0xe31f221cab9f4f40
+794, 0x798cdd414c1deb7a
+795, 0x9c84e9c7d41cd983
+796, 0x63d6b1ae3b60b7fa
+797, 0xb42bfdd1a2f78ffa
+798, 0x37e431eaccaaa8e9
+799, 0x7508142a0f73eac9
+800, 0x91662a023df5893a
+801, 0x59782070e2fe3031
+802, 0xb2acd589a8ce7961
+803, 0xa224743fa877b292
+804, 0xaa5362aa27e6ed9e
+805, 0xa394a4e520c0c1c7
+806, 0xe49b16d2018ffb6f
+807, 0xb8074b9f2f1e762b
+808, 0xcf5f86143d5c23a7
+809, 0xfd838785db987087
+810, 0x31b1889df389aff8
+811, 0x30aaca876a4383b
+812, 0x1731bb71c4c38d4f
+813, 0x9a83a65395e05458
+814, 0x99cd0c8d67c8f4fc
+815, 0xfbd9fdc849b761a5
+816, 0x82c04834fc466889
+817, 0xdeef9d6e715e8c97
+818, 0x549c281c16da6078
+819, 0x2d70661254ad599d
+820, 0x57995793a72acac
+821, 0xf1727005116183ba
+822, 0xa22bb38945285de3
+823, 0x4f2d687fe45131ff
+824, 0x5666c87ddbbc981f
+825, 0xbcb4b2d4e7a517d0
+826, 0x5e794dd2e20b785d
+827, 0x449ad020149e093c
+828, 0x7704ee0412d106f5
+829, 0x83cbdf257b072ac1
+830, 0xae5c4fc9f638b0da
+831, 0x7b9e5a64e372ed47
+832, 0x7eddbbb22c2cdf57
+833, 0x3f19ebfa155b08e
+834, 0x91d991154dfd7177
+835, 0x611ae74b952d387f
+836, 0x3fdf7a335bda36ee
+837, 0xdf182433fc7a7c05
+838, 0x62c78598d1f8db0a
+839, 0xc3750c69d2c5c1f0
+840, 0xf1318024709efdee
+841, 0xaa3fd360d224dc29
+842, 0x62af53b2f307c19
+843, 0xdf527683c58120c2
+844, 0x3281deecc496f93d
+845, 0x4f704ad31527ef08
+846, 0x127a14a5e07cfdfc
+847, 0x90d0b1f549255c92
+848, 0xbc3406b212c5e1fc
+849, 0x4e89f39379dba91d
+850, 0x1290ef43c4998e6e
+851, 0xecfeb1a1cb1c6e1b
+852, 0x2067e90403003bf1
+853, 0x38ae04be30bdbeba
+854, 0x8a3537f298baedda
+855, 0xd07f3b825cdb2936
+856, 0xea020b5aebae8b45
+857, 0xfcd614ab031132b0
+858, 0x5fb682a4ff2268f5
+859, 0xd1c4662ce65596f4
+860, 0x7026b8270dd0b8dc
+861, 0x8101ec4b4beae45a
+862, 0xa0e9dc87940610a6
+863, 0x83ec33679d83165b
+864, 0x981847ca82e86d41
+865, 0xda84c188a304a0b7
+866, 0x3c37529c5a5bbbb8
+867, 0x34a8491ce3e19a5a
+868, 0xd36ad716a2fa6cb8
+869, 0xfd1d1d6a5189a15c
+870, 0x9716eb47851e8d8d
+871, 0x7dfb13ea3b15c5aa
+872, 0xbdf6e707f45113a5
+873, 0xb8118261b04bd097
+874, 0x6191f9895881bec6
+875, 0x7aac257ae11acf9b
+876, 0x35a491e1537ff120
+877, 0xe078943432efa71c
+878, 0xb3338485dd3dc2b9
+879, 0x456060975d2bb3b5
+880, 0xaddc4c451bdfc44c
+881, 0x18bfa7beacf96430
+882, 0x8802ebcaf0f67498
+883, 0xad922a5a825bd780
+884, 0x9fb4587d748f4efa
+885, 0xdb2a445136cd5e7
+886, 0xb98b3676ea8e96ac
+887, 0xb02d8d244d784878
+888, 0xa1a8442b18860abb
+889, 0x6a3029ba1361e5d1
+890, 0xf426d5fac161eb1
+891, 0xfa5ac2b87acecb23
+892, 0xaa659896e50535df
+893, 0xf40dd7a3d3c5c8ed
+894, 0x3f8367abecb705bc
+895, 0x2d60e7525873358f
+896, 0xc4a9d3948a0c3937
+897, 0x5ecc04fef6003909
+898, 0x7a865004918cba2
+899, 0x47ae110a678ec10b
+900, 0xa0f02f629d91aa67
+901, 0x4848b99e7fac9347
+902, 0xaa858346d63b80ac
+903, 0xeb5bf42ee161eeef
+904, 0x4d35d723d3c6ba37
+905, 0xdf22ca6ca93b64a7
+906, 0x9d198520f97b25b1
+907, 0x3068415350778efe
+908, 0xf3709f2e8793c2fe
+909, 0xd1517bac8dd9f16f
+910, 0xfb99bccaa15861dc
+911, 0xa9ad607d796a2521
+912, 0x55d3793d36bd22e4
+913, 0xf99270d891ff7401
+914, 0x401750a5c4aa8238
+915, 0xd84b3003e6f28309
+916, 0x8a23798b5fa7c98b
+917, 0xadd58bbc8f43e399
+918, 0xbd8c741ada62c6a8
+919, 0xbdc6937bc55b49fa
+920, 0x4aefa82201b8502
+921, 0x17adf29a717b303
+922, 0xa6ed2197be168f6c
+923, 0x1ba47543f4359a95
+924, 0xe34299949ac01ae9
+925, 0x711c76cffc9b62f3
+926, 0xbac259895508a4b7
+927, 0x3c8b3b3626b0d900
+928, 0x1a8d23fbe2ae71bf
+929, 0xca984fa3b5a5c3a1
+930, 0xb1986ab7521a9c93
+931, 0xd6b5b2c8d47a75b5
+932, 0xc7f1c4a88afb4957
+933, 0xdeb58033a3acd6cc
+934, 0xabe49ddfe1167e67
+935, 0x8d559c10205c06e3
+936, 0xea07a1a7de67a651
+937, 0xcbef60db15b6fef8
+938, 0xbfca142cff280e7
+939, 0x362693eba0732221
+940, 0x7463237e134db103
+941, 0x45574ddb5035e17a
+942, 0xfc65e0cb9b94a1aa
+943, 0x3154c55f1d86b36d
+944, 0x2d93a96dd6ab2d8b
+945, 0xbe3bc1d1f2542a25
+946, 0xdd4b541f7385bdaa
+947, 0x3b56b919d914e3f8
+948, 0x82fd51468a21895f
+949, 0x8988cf120731b916
+950, 0xa06a61db5fb93e32
+951, 0x6ed66c1b36f68623
+952, 0x875ae844d2f01c59
+953, 0x17ccd7ac912e5925
+954, 0x12fe2a66b8e40cb1
+955, 0xf843e5e3923ad791
+956, 0xa17560f2fd4ef48
+957, 0x27a2968191a8ee07
+958, 0xa9aab4d22ff44a3c
+959, 0x63cd0dcc3bb083ae
+960, 0x7a30b48c6160bf85
+961, 0x956160fb572503b3
+962, 0xc47f6b7546640257
+963, 0xaf4b625f7f49153
+964, 0x2f5c86a790e0c7e8
+965, 0xb52e0610ae07f0b8
+966, 0x38a589292c3d849e
+967, 0xc3e9ef655d30b4ef
+968, 0xb5695f765cda998a
+969, 0xde5d5e692a028e91
+970, 0x839476721555f72e
+971, 0x48b20679b17d9ebf
+972, 0xe3d4c6b2c26fb0df
+973, 0xce5a9834f0b4e71f
+974, 0x533abb253d5d420e
+975, 0x9eac5ad9aed34627
+976, 0xc0f2a01ab3c90dbb
+977, 0x6528eda93f6a066c
+978, 0xc16a1b625e467ade
+979, 0x1a4a320fb5e8b098
+980, 0x8819cccd8b4ab32f
+981, 0x42daa88531fd0bfd
+982, 0xcf732226409be17c
+983, 0xfddcdb25ccbf378c
+984, 0x9b15b603bf589fc1
+985, 0x2436066b95d366fe
+986, 0x8d42eff2e9cbda90
+987, 0x694b2fc8a4e8303c
+988, 0x8e207f98aaea3ccd
+989, 0x4730d7a620f822d9
+990, 0x468dc9ca30fe2fd4
+991, 0x74b36d8a1c0f031b
+992, 0x3c1aac1c488c1a94
+993, 0x19d0101042444585
+994, 0x8ec50c56d0c8adf4
+995, 0x721ec629e4d66394
+996, 0x3ca5ad93abeac4a4
+997, 0xaaebc76e71592623
+998, 0x969cc319e3ed6058
+999, 0xc0a277e3b2bfc3de
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/data/philox-testset-1.csv b/venv/lib/python3.9/site-packages/numpy/random/tests/data/philox-testset-1.csv
new file mode 100644
index 00000000..e448cbf7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/data/philox-testset-1.csv
@@ -0,0 +1,1001 @@
+seed, 0xdeadbeaf
+0, 0xedc95200e2bd66a5
+1, 0x581d4e43b7682352
+2, 0x4be7278f5e373eab
+3, 0xee47f17991a9e7ea
+4, 0x38a7d2ae422f2e2c
+5, 0xe2a6730a3b4a8a15
+6, 0x1588b7a841486442
+7, 0x13ad777246700504
+8, 0x14d157e0f5e18204
+9, 0xd87c22a7ee8c13f1
+10, 0x30cc389ce3542ba1
+11, 0xb8a53348955bb2e9
+12, 0xc08802e3c454f74f
+13, 0xb444f627671a5780
+14, 0x4b6dd42b29cbf567
+15, 0x6109c7dc0bc5f7d5
+16, 0x85c954715d6b5b1e
+17, 0x646178d3d9a3a5d5
+18, 0xebbde42b1cd83465
+19, 0x3d015102f6bc9c1a
+20, 0x720fe2ec3798d5fd
+21, 0x93120961289ceb2e
+22, 0xc9207e960a56fae2
+23, 0xa7f042f31d991b98
+24, 0x5fac117415fae74b
+25, 0xd0a970ba8dddc287
+26, 0x84b4e7e51b43106
+27, 0x6ad02bf525ea265f
+28, 0xcdc7e5992b36ef8f
+29, 0x44d4985209261d60
+30, 0x628c02d50f4b902e
+31, 0xc7b1914922d1e76d
+32, 0xfde99ff895cba51d
+33, 0x175a0be050fa985f
+34, 0x47297d3699e03228
+35, 0xccf1e9aeaa3339cd
+36, 0x9fdd18ebeeaf15b1
+37, 0x7c94c9ab68747011
+38, 0x612d8ef22c1fa80f
+39, 0x13f52b860de89ab5
+40, 0x81f264b8c139c43b
+41, 0x8d017ba4ef1e85ba
+42, 0x6d0556f46219951e
+43, 0x8ee7b85663cf67b6
+44, 0x2432fc707645fe67
+45, 0xaf814046051e5941
+46, 0x4d432a83739ac76f
+47, 0x59e5060d0983ccdd
+48, 0xdd20e828b83d9b53
+49, 0x1b891800d7385f4c
+50, 0x10e86a026c52ff5e
+51, 0xb932f11723f7b90c
+52, 0xb2413d0a1f3582d0
+53, 0xe7cd4edda65fc6b5
+54, 0x6d3808848d56593b
+55, 0x192a727c3c7f47d9
+56, 0x9659d8aea5db8c16
+57, 0x4242c79fe2c77c16
+58, 0x605f90c913827cea
+59, 0x53e153c8bfc2138a
+60, 0xed2158fbdef5910e
+61, 0xae9e6e29d4cb5060
+62, 0x7dd51afaad3b11ce
+63, 0x2b9ba533d01a5453
+64, 0x7e0e9cf2b6c72c8
+65, 0x1cc8b3c7747ed147
+66, 0x9b102651e2e11b48
+67, 0x30b0b53cbaac33ea
+68, 0x70c28aec39b99b85
+69, 0x5f1417ff536fdb75
+70, 0x3a1d91abd53acf58
+71, 0xba116a1772168259
+72, 0xf5369bc9bd284151
+73, 0x67bf11373bf183ca
+74, 0xef0b2d44dbd33dc7
+75, 0xbfd567ee1a2953ed
+76, 0x7d373f2579b5e5c6
+77, 0x756eeae7bcdd99be
+78, 0x75f16eb9faa56f3b
+79, 0x96d55ded2b54b9a5
+80, 0x94495191db692c24
+81, 0x32358bdd56bab38c
+82, 0x3f6b64078576579
+83, 0x7177e7948bc064c9
+84, 0x2cbf23f09ba9bc91
+85, 0x9b97cc31c26645f5
+86, 0x5af2d239ff9028b1
+87, 0x316fa920e0332abe
+88, 0x46535b7d1cae10a0
+89, 0x21f0a6869298022c
+90, 0xf395c623b12deb14
+91, 0x8573995180675aa7
+92, 0xc3076509f4dc42d5
+93, 0x15e11e49760c6066
+94, 0xe8a6d311e67a021d
+95, 0x7482f389c883339b
+96, 0xda6f881573cba403
+97, 0xb110ffb847e42f07
+98, 0x2c3393140605ccf9
+99, 0xba1c8ba37d8bdc33
+100, 0x59adf43db7a86fe0
+101, 0xb4fcbf6aa585ca85
+102, 0xd794a93c18033fa6
+103, 0x6e839c01985f9d4
+104, 0x64065bf28222b2c7
+105, 0x6a6359b293fa0640
+106, 0x5ff610969e383e44
+107, 0xa8172c263f05c7f7
+108, 0x62a0172e8bd75d07
+109, 0x7be66e3c453b65ac
+110, 0x6a3b8d5a14014292
+111, 0xa2583e6087450020
+112, 0xd5d3ecc480c627d2
+113, 0xa24e83f1eec8a27c
+114, 0xa23febd2a99ee75a
+115, 0x9a5fbf91c7310366
+116, 0x5b63156932e039b
+117, 0x942af3c569908505
+118, 0x89a850f71ab6a912
+119, 0xfeadc803ac132fe9
+120, 0x67bf60e758250f3
+121, 0x533c25103466a697
+122, 0xb7deede3482f9769
+123, 0x325e043b53bba915
+124, 0x9e8d9e7fde132006
+125, 0x6bacc6860bbc436e
+126, 0xb3ea0534c42b1c53
+127, 0xb2389334db583172
+128, 0xa74b1bfbf5242ee4
+129, 0x53a487e2dc51d15c
+130, 0xe5a3b538d2c7a82e
+131, 0x7b6c70bb0c4cadaf
+132, 0xae20791b2081df1
+133, 0xc685c12e3c61d32c
+134, 0x60110e6b0286e882
+135, 0x49682119c774045c
+136, 0x53dc11a3bbd072e
+137, 0xbdc87c6e732d9c2d
+138, 0xcc4620861ebac8fd
+139, 0x7e9c3558759350cc
+140, 0x157408dee34891ba
+141, 0x9bcad1855b80651b
+142, 0xd81b29141d636908
+143, 0x1ed041a9f319c69d
+144, 0x805b2f541208b490
+145, 0x484ef3bba2eb7c66
+146, 0xb6b5e37d50a99691
+147, 0xabc26a7d9e97e85f
+148, 0xcba2a3cce0417c2f
+149, 0xa030dfffd701993c
+150, 0x2bf2dc50582ebf33
+151, 0xd9df13dd3eb9993e
+152, 0x31ca28b757232ae5
+153, 0x614562a0ccf37263
+154, 0x44d635b01725afbb
+155, 0x5ae230bc9ca9cd
+156, 0xb23a124eb98705c6
+157, 0x6395675444981b11
+158, 0xd97314c34119f9ca
+159, 0x9de61048327dd980
+160, 0x16bac6bded819707
+161, 0xcea3700e3e84b8c7
+162, 0xaa96955e2ee9c408
+163, 0x95361dcc93b5bc99
+164, 0x306921aed3713287
+165, 0x4df87f3130cd302a
+166, 0x37c451daeb6a4af5
+167, 0x8dbbe35f911d5cc1
+168, 0x518157ce61cb10f9
+169, 0x669f577aebc7b35b
+170, 0x4b0a5824a8786040
+171, 0x519bc3528de379f5
+172, 0x6128012516b54e02
+173, 0x98e4f165e5e6a6dd
+174, 0x6404d03618a9b882
+175, 0x15b6aeb3d9cd8dc5
+176, 0x87ed2c1bae83c35b
+177, 0x8377fc0252d41278
+178, 0x843f89d257a9ba02
+179, 0xcdda696ea95d0180
+180, 0xcfc4b23a50a89def
+181, 0xf37fd270d5e29902
+182, 0xafe14418f76b7efa
+183, 0xf984b81577076842
+184, 0xe8c60649ccb5458d
+185, 0x3b7be8e50f8ff27b
+186, 0xaa7506f25cef1464
+187, 0x5e513da59f106688
+188, 0x3c585e1f21a90d91
+189, 0x1df0e2075af292a
+190, 0x29fdd36d4f72795f
+191, 0xb162fe6c24cb4741
+192, 0x45073a8c02bd12c4
+193, 0xcbaaa395c2106f34
+194, 0x5db3c4c6011bc21c
+195, 0x1b02aac4f752e377
+196, 0xa2dfb583eb7bec5
+197, 0xfe1d728805d34bb1
+198, 0xf647fb78bb4601ec
+199, 0xd17be06f0d1f51ef
+200, 0x39ec97c26e3d18a0
+201, 0xb7117c6037e142c8
+202, 0xe3a6ce6e6c71a028
+203, 0xe70a265e5db90bb2
+204, 0x24da4480530def1e
+205, 0xfd82b28ce11d9a90
+206, 0x5bf61ead55074a1d
+207, 0xbe9899c61dec480d
+208, 0xae7d66d21e51ec9e
+209, 0x384ee62c26a08419
+210, 0x6648dccb7c2f4abf
+211, 0xc72aa0c2c708bdc9
+212, 0x205c5946b2b5ba71
+213, 0xd4d8d0b01890a812
+214, 0x56f185493625378d
+215, 0x92f8072c81d39bd0
+216, 0xa60b3ceecb3e4979
+217, 0xfcf41d88b63b5896
+218, 0xf5a49aa845c14003
+219, 0xffcc7e99eee1e705
+220, 0xdd98312a7a43b32d
+221, 0xa6339bd7730b004
+222, 0xdac7874ba7e30386
+223, 0xadf6f0b0d321c8
+224, 0x126a173ae4ffa39f
+225, 0x5c854b137385c1e7
+226, 0x8173d471b1e69c00
+227, 0x23fa34de43581e27
+228, 0x343b373aef4507b1
+229, 0xa482d262b4ea919c
+230, 0xf7fbef1b6f7fbba
+231, 0xd8ce559487976613
+232, 0xbf3c8dd1e6ebc654
+233, 0xda41ed375451e988
+234, 0xf54906371fd4b9b3
+235, 0x5b6bb41231a04230
+236, 0x866d816482b29c17
+237, 0x11315b96941f27dc
+238, 0xff95c79205c47d50
+239, 0x19c4fff96fbdac98
+240, 0xbfb1ae6e4131d0f4
+241, 0x9d20923f3cdb82c9
+242, 0x282175507c865dff
+243, 0xdfd5e58a40fe29be
+244, 0xedbd906ff40c8e4f
+245, 0x11b04fc82614ccb3
+246, 0xeceb8afda76ae49f
+247, 0xa4856913847c2cdf
+248, 0x6f1425f15a627f2a
+249, 0xdf144ffedf60349e
+250, 0x392d7ecfd77cc65f
+251, 0x72b8e2531049b2c6
+252, 0x5a7eb2bdb0ec9529
+253, 0xdcfd4306443e78c1
+254, 0x89ad67ed86cd7583
+255, 0x276b06c0779a6c8f
+256, 0xb2dbb723196a0ac3
+257, 0x66c86a3b65906016
+258, 0x938348768a730b47
+259, 0x5f5282de938d1a96
+260, 0xa4d4588c4b473b1f
+261, 0x8daed5962be4796f
+262, 0x9dde8d796985a56e
+263, 0x46be06dbd9ed9543
+264, 0xdf98286ceb9c5955
+265, 0xa1da1f52d7a7ca2b
+266, 0x5a7f1449f24bbd62
+267, 0x3aedc4e324e525fd
+268, 0xced62464cd0154e1
+269, 0x148fc035e7d88ce3
+270, 0x82f8878948f40d4c
+271, 0x4c04d9cdd6135c17
+272, 0xdf046948d86b3b93
+273, 0x2f0dec84f403fe40
+274, 0xa61954fb71e63c0d
+275, 0x616d8496f00382e8
+276, 0x162c622472746e27
+277, 0x43bcfe48731d2ceb
+278, 0xff22432f9ff16d85
+279, 0xc033ed32bb0ad5a4
+280, 0x5d3717cc91c0ce09
+281, 0x7a39a4852d251075
+282, 0x61cd73d71d6e6a6
+283, 0xe37e2ea4783ab1a5
+284, 0x60e1882162579ea8
+285, 0x9258ec33f1a88e00
+286, 0x24b32acf029f0407
+287, 0x1410fc9aea6d3fac
+288, 0x6054cf2a3c71d8f7
+289, 0x82f7605157a66183
+290, 0x3b34c1c0dff9eac5
+291, 0xfebe01b6d5c61819
+292, 0x7372187c68b777f2
+293, 0xc6923812cda479f0
+294, 0x386613be41b45156
+295, 0x92cfebe8cc4014b
+296, 0x8e13c4595849828b
+297, 0x90e47390d412291f
+298, 0x6b21a1d93d285138
+299, 0xbf5b1f5922f04b12
+300, 0x21e65d1643b3cb69
+301, 0xf7683b131948ac3c
+302, 0xe5d99fc926196ed2
+303, 0x7b138debbec90116
+304, 0x8a2650a75c2c2a5c
+305, 0x20689a768f9b347b
+306, 0xdfa2900cfb72dc6e
+307, 0x98959c3855611cc2
+308, 0x5fdb71b89596cc7c
+309, 0x1c14ac5c49568c7b
+310, 0x958c4293016091fe
+311, 0x7484522eb0087243
+312, 0xc4018dfb34fc190f
+313, 0xca638567e9888860
+314, 0x102cd4805f0c0e89
+315, 0xcc3bc438e04548f8
+316, 0xb808944bb56ea5be
+317, 0xffd4778dbf945c57
+318, 0xfe42617784c0233b
+319, 0x3eccbfeae9b42d3c
+320, 0xd9f1b585fd0bfa60
+321, 0x5c063d1b2705d5dd
+322, 0x8e8bec3519941b64
+323, 0x9e94c36cbec2a42
+324, 0x1cd19f5b64ffd3ad
+325, 0x9632e3aebfc68e66
+326, 0x98960c2d9da4ae45
+327, 0xb76994b1f2bbfc1f
+328, 0xca184a737d3971cc
+329, 0x964d31b07183adfb
+330, 0xe9e0ff351cd276d4
+331, 0xb5747c860b05bbe4
+332, 0x5549ddc3bd3862e2
+333, 0x495496677b27873b
+334, 0x53910baa26e3ea18
+335, 0xaa07a07ad0a688d3
+336, 0xbb43bd1f09ecdb1e
+337, 0xe2ebc105699dd84
+338, 0x6e815a2729584035
+339, 0x2caab1713b17948a
+340, 0x43d39d209fa41c90
+341, 0xfe3e71089d5d1c3a
+342, 0xa778646c32f81177
+343, 0x8d42bfb86e6e92d5
+344, 0x175571f70b4fcfbe
+345, 0x2a66a6fe10dc3b5b
+346, 0xd9545e85235ca709
+347, 0x5642781c77ced48a
+348, 0x24facc40b72ccd09
+349, 0xa800fbacce33f6f8
+350, 0x675f58a0ff19fba
+351, 0x35aedf57bb5cde1b
+352, 0xe5535a6b63f6d068
+353, 0x84dffd0102aaa85d
+354, 0x621faad65467aaa7
+355, 0x596ad85b556b112f
+356, 0x837545fff8894c7a
+357, 0x3d9a4ae1356bc6a6
+358, 0xcd8b7153205d4ad0
+359, 0x98afdd40f1ed09a6
+360, 0xa38b2dc55a5cf87f
+361, 0x484aecce2b6838bc
+362, 0x6af05c26bdab18d9
+363, 0xf418b7399dcf2e4b
+364, 0x1cfa38789b0d2445
+365, 0xfbed23c34166ee67
+366, 0x38e6820039e4912a
+367, 0x1fe94911e963591e
+368, 0x1291c79aee29ad70
+369, 0x65eccfc89506f963
+370, 0x7d14de3b2f55b1f6
+371, 0x82eb79c36cd2a739
+372, 0x41ffe3b75ea0def5
+373, 0x9eba9156470a51d9
+374, 0xd17c00b981db37d1
+375, 0xf688769a75601aa7
+376, 0xbcf738e9e03d571e
+377, 0x14712e56df8f919b
+378, 0xab14e227d156e310
+379, 0xf53d193e993e351e
+380, 0x857fae46bd312141
+381, 0xc2dd71e41b639966
+382, 0x74f8b987a3d00ad1
+383, 0x5bce8526dc527981
+384, 0x94910926c172a379
+385, 0x503c45557688a9d5
+386, 0x244d03834e05807f
+387, 0x6e014cbab9c7a31f
+388, 0xae544c638530facf
+389, 0x9b853aaaf9cbc22d
+390, 0xfb42ab7024d060ed
+391, 0x74cc3fba0dfd7ff2
+392, 0x24ec9e8f62144ad5
+393, 0x72f082954307bbe7
+394, 0x36feda21bbf67577
+395, 0x3222191611b832f1
+396, 0xd0584e81bcac8b0b
+397, 0xdce8d793ef75e771
+398, 0x978824c6c2578fc
+399, 0x6e8f77503b3c2ee4
+400, 0xc85d2d86fecf5d03
+401, 0x3d35b4a5d4d723c4
+402, 0xd3987dfd4727fff3
+403, 0xd3cde63fb6a31add
+404, 0xf6699e86165bdaeb
+405, 0x9d60ba158ec364c4
+406, 0x920c3c18b346bfc9
+407, 0x770fd1fdfbc236ca
+408, 0x45998cfc5fc12ddd
+409, 0xd74a3454e888834b
+410, 0xbf2aa68081a4a28f
+411, 0xea41b26a6f1da1b3
+412, 0x5560a2d24b9d5903
+413, 0xe3791f652a228d8b
+414, 0x365116d3b5a8520c
+415, 0xb1b2bd46528f8969
+416, 0xfcfe14943ef16ae7
+417, 0xf4d43425e8a535dc
+418, 0xe6cf10a78782a7e0
+419, 0x9c7ac0de46556e3e
+420, 0xc667ae0856eed9ef
+421, 0x47dbb532e16f9c7e
+422, 0xdf4785a5d89ee82e
+423, 0xbd014925ce79dbcf
+424, 0xea0d663fb58fa5be
+425, 0x51af07d5cc3821fb
+426, 0x27a1bdcdc4159a9d
+427, 0x520c986c59b1e140
+428, 0x50b73fd9bacd5b39
+429, 0xae5240641f51e4f3
+430, 0x71faecc164ed9681
+431, 0xda95aa35529a7ee
+432, 0xe25ba29b853c1c6d
+433, 0x9871a925cda53735
+434, 0xde481ad8540e114d
+435, 0xa2997f540e8abca0
+436, 0xc9683c5035e28185
+437, 0x1082471b57182bac
+438, 0xbd3ecf0f0b788988
+439, 0xf479760776fbb342
+440, 0x3730929200d91f44
+441, 0xc1762d79ae72809c
+442, 0xfaa0a4c7b1686cb3
+443, 0xd581e6d55afdafcd
+444, 0x6cf57bdfba2dcf6d
+445, 0xdef79d9fe6a5bcef
+446, 0x13ed376e18132bd3
+447, 0xbe67efd72defa2a
+448, 0x5acc176c468966ea
+449, 0x8b35b626af139187
+450, 0x446de3fac0d973ac
+451, 0xe1d49e06dc890317
+452, 0x817bc3fd21fc09b7
+453, 0xb71c3958a13d5579
+454, 0x8746e010f73d7148
+455, 0x1b61c06009922e83
+456, 0xba17e62e6b092316
+457, 0x1375fa23c4db8290
+458, 0x3f071230f51245a6
+459, 0x51c99a086a61cd13
+460, 0x5f0f2ae78589e1fd
+461, 0x604834e114bbbc27
+462, 0x5eb2a7a34814e9a9
+463, 0x77a6907f386bf11e
+464, 0x99525de2bd407eeb
+465, 0xb818348c57b3b98f
+466, 0x25f5f9e702fbe78d
+467, 0x8f66669e6f884473
+468, 0x1e47d46e2af4f919
+469, 0xf6a19df846476833
+470, 0xff00c67bcd06621f
+471, 0xe3dfe069795d72d8
+472, 0x8affc88b2fea4d73
+473, 0x66df747e5f827168
+474, 0xf368ec338d898a0e
+475, 0x9e1f1a739c5984a2
+476, 0x46a1c90e1ca32cbc
+477, 0xc261bc305ed8d762
+478, 0x754d7949f7da9e72
+479, 0x4c8fbbb14ef47b17
+480, 0xccbdc67a3848d80d
+481, 0x3c25e6f58bae751d
+482, 0x7078b163b936d9b6
+483, 0x440e27463c134ecf
+484, 0x6c83ee39f324db0f
+485, 0x27cf901b22aea535
+486, 0x57262dec79a3f366
+487, 0x91db09f1dbb524fb
+488, 0xd7436eefba865df2
+489, 0x16c86b0a275a3f43
+490, 0x689493e6681deaa9
+491, 0x7e1dc536c1a9ac42
+492, 0x1145beac3ac7f5cc
+493, 0x3d05e211a104b2b0
+494, 0x4f9e77ced3c52f44
+495, 0x53de1369354add72
+496, 0x1fb60f835f47cdeb
+497, 0x6ab36f089e40c106
+498, 0xaabffcb0d3d04c7
+499, 0xaa399686d921bd25
+500, 0x2bf8dd8b6d6fa7f0
+501, 0x1ddbf4e124329613
+502, 0x466a740241466a72
+503, 0x98d7381eb68a761
+504, 0x817691510bc4857a
+505, 0x8837622c0171fe33
+506, 0xcba078873179ee16
+507, 0x13adad1ab7b75af4
+508, 0x3bac3f502428840c
+509, 0xbeb3cce138de9a91
+510, 0x30ef556e40b5f0b4
+511, 0x19c22abdf3bbb108
+512, 0x977e66ea4ddc7cf
+513, 0x9f4a505f223d3bf3
+514, 0x6bc3f42ac79ec87b
+515, 0x31e77712158d6c23
+516, 0x6d8de4295a28af0d
+517, 0xee1807dbda72adb7
+518, 0xda54140179cd038f
+519, 0x715aa5cdac38e062
+520, 0x5a7e55e99a22fa16
+521, 0xf190c36aa8edbe4f
+522, 0xccadd93a82c1d044
+523, 0x7070e6d5012c3f15
+524, 0x50a83341a26c1ba5
+525, 0x11bca7cc634142e5
+526, 0x623a0d27867d8b04
+527, 0x75c18acff54fbf6e
+528, 0x455ae7d933497a6f
+529, 0xf624cf27d030c3d3
+530, 0x7a852716f8758bac
+531, 0xe7a497ac1fa2b5b4
+532, 0xf84f097498f57562
+533, 0xc4bb392f87f65943
+534, 0x618e79a5d499fbfb
+535, 0xb3c0b61d82b48b8
+536, 0x4750a10815c78ea7
+537, 0x9cf09cca3ddece69
+538, 0x2a69f1c94cc901a2
+539, 0x347a0e446e1ce86d
+540, 0xb06f3a5a5ab37bb1
+541, 0x8035bd0713d591db
+542, 0x539c9637042c3a1f
+543, 0xd7ba4dc6b273cbd7
+544, 0x12f3f99933444f85
+545, 0x4a9517b9783fb9a4
+546, 0x6422b2ea95093bc5
+547, 0x3a5ecff0f996c2a6
+548, 0x31de504efc76a723
+549, 0x7ccb7c5233c21a9f
+550, 0xc687d9e6ce4186e8
+551, 0x6e40769d6940376a
+552, 0xf51207314f1f7528
+553, 0x67ee3acb190865e3
+554, 0xe08d586270588761
+555, 0xe387fa489af1a75c
+556, 0x73414a52d29d8375
+557, 0x671a38191cf2a357
+558, 0xe00fb25b1aa54008
+559, 0x11a0610e22cf549b
+560, 0xc90cc865d57c75be
+561, 0x90d0863cc15f2b79
+562, 0x8b3e60d32ebcb856
+563, 0xb28cc55af621e04a
+564, 0xcf60bd3cb2a5ab1d
+565, 0x212cb5d421948f86
+566, 0xee297b96e0a3363f
+567, 0x4e9392ff998760d1
+568, 0x61940c8d0105ba3e
+569, 0x14ebcbae72a59a16
+570, 0xdf0f39a3d10c02af
+571, 0xfc047b2b3c1c549d
+572, 0x91718b5b98e3b286
+573, 0x9ea9539b1547d326
+574, 0x7a5a624a89a165e6
+575, 0x145b37dcaa8c4166
+576, 0x63814bbb90e5616c
+577, 0xc4bc3ca6c38bb739
+578, 0x853c3a61ddc6626c
+579, 0xa7ce8481c433829a
+580, 0x8aff426941cc07b
+581, 0x2dc3347ca68d8b95
+582, 0xce69f44f349e9917
+583, 0x2fa5cb8aca009b11
+584, 0xf26bb012115d9aca
+585, 0xafa01c2f2d27235a
+586, 0xabcba21f1b40305e
+587, 0xfec20c896c0c1128
+588, 0xc5f7a71ebacadfa0
+589, 0xc8479ad14bab4eef
+590, 0xad86ec9a3e7d3dc
+591, 0xbbecd65292b915c5
+592, 0xb1f9e28149e67446
+593, 0x708d081c03dad352
+594, 0xaa8a84dbd1de916c
+595, 0x9aa3efb29ba9480b
+596, 0xd3c63969ff11443e
+597, 0x1e9e9ac861315919
+598, 0x4fe227f91e66b41d
+599, 0xefc0212d43d253ab
+600, 0x98341437727c42d1
+601, 0x5ea85c0fe9008adc
+602, 0x7891b15faa808613
+603, 0x32db2d63989aacfd
+604, 0xc92f7f28e88fd7bc
+605, 0x3513545eb6549475
+606, 0x49abe0082906fbf8
+607, 0xcee1e1a6551e729c
+608, 0x38556672b592a28e
+609, 0xc3e61409c4ec2d45
+610, 0x96c67ce2995a0fd4
+611, 0x9b9b0cada870293
+612, 0x82d6dd5dada48037
+613, 0xeea4f415299f1706
+614, 0x371107895f152ab3
+615, 0x2f6686159f4396bb
+616, 0x61005a2ff3680089
+617, 0x9d2f2cafb595e6b6
+618, 0x4a812a920f011672
+619, 0x317554d3a77385d7
+620, 0x24c01086727eb74b
+621, 0xa15ff76d618a3a9e
+622, 0x2121bfd983859940
+623, 0x384d11577eea8114
+624, 0xab0f4299f3c44d88
+625, 0x136fd4b07cfa14d9
+626, 0x665fe45cbfaa972a
+627, 0x76c5a23398a314e9
+628, 0x5507036357ccda98
+629, 0xd9b8c5ac9dce632b
+630, 0x366bc71781da6e27
+631, 0xdd2b2ba1d6be6d15
+632, 0xf33ed0d50ea6f1a6
+633, 0xf05a9b1900174c18
+634, 0x3947e1419e2787cf
+635, 0x6c742b1e029637d0
+636, 0x32aba12196a0d2e8
+637, 0x1b94aab2e82e7df
+638, 0x68b617db19229d6
+639, 0x6c88a95ac0a33f98
+640, 0xdc9b95fd60c2d23e
+641, 0x999e6971d3afc8b3
+642, 0x7071fc6ad8b60129
+643, 0x41a8184ef62485f6
+644, 0xb68e0605c7d5e713
+645, 0x272b961a1d1bbee
+646, 0x23f04e76446187b0
+647, 0x999a7a8f6d33f260
+648, 0xdbd6318df4f168d
+649, 0x8f5e74c84c40711e
+650, 0x8ccc6b04393a19d6
+651, 0xadcd24b782dd8d3d
+652, 0x1a966b4f80ef9499
+653, 0xcb6d4f9ff5a280f0
+654, 0x8095ff2b8484018a
+655, 0xbfd3389611b8e771
+656, 0x278eb670b7d12d51
+657, 0x31df54ca8d65c20f
+658, 0x121c7fb38af6985e
+659, 0x84fb94f38fe1d0a
+660, 0x15ae8af1a6d48f02
+661, 0x8d51e4a62cba1a28
+662, 0x58e6b6b3ae0f9e42
+663, 0x9365a0a85669cc99
+664, 0xe56e92f65a2106df
+665, 0x68fa299c66b428fc
+666, 0x55e51bb0b0a832c6
+667, 0x48b565293f9bc494
+668, 0x73d8132b1cbabb57
+669, 0x9178ac3926c36cbc
+670, 0xe2f22c7b28ea5e0f
+671, 0x6af45322a99afb12
+672, 0x59072fcb486a46f4
+673, 0x166b717b08d3d8e
+674, 0xd4e627a2dfacc4ab
+675, 0x33dad6f2921dedaa
+676, 0x4b13b806834a6704
+677, 0xe5f7971b398ed54d
+678, 0x20bfae65e3e6899b
+679, 0x881dab45d2b4fc98
+680, 0x6f248126b5b885be
+681, 0x7aeb39e986f9deee
+682, 0xf819f9574b8c3a03
+683, 0xff3d93ed6bd9781a
+684, 0x3a31e2e24a2f6385
+685, 0x7888a88f8944a5e
+686, 0x4faee12f5de95537
+687, 0x7f3e4efccdb2ed67
+688, 0x91e0f2fc12593af5
+689, 0xb5be8a4b886a40d3
+690, 0x998e8288ac3a9b1b
+691, 0x85c48fc8b1349e7b
+692, 0xf03af25222d8fae5
+693, 0x45467e805b242c2e
+694, 0xa2350db793dbebdc
+695, 0xfebe5b61d2174553
+696, 0xa9a331f02c54ad0b
+697, 0xe94e49a0f905aef3
+698, 0xe54b4c812b55e3da
+699, 0xdc454114c6bc0278
+700, 0x99c7765ab476baa2
+701, 0xccd9590e47fdff7c
+702, 0xfa2bcae7afd6cb71
+703, 0x2c1bf1a433a6f0f7
+704, 0x53882c62ff0aab28
+705, 0x80ac900f844dacc
+706, 0x27ba8eb5c4a44d54
+707, 0x78f3dfb072a46004
+708, 0x34e00e6ec629edce
+709, 0x5b88d19b552d1fbd
+710, 0xe4df375dc79df432
+711, 0x37446312ff79c3b4
+712, 0xb72256900a95fa6d
+713, 0x89f3171fbdff0bfc
+714, 0xd37885b048687eba
+715, 0xbb033213b283b60e
+716, 0xcf10b523ee769030
+717, 0xbf8070b6cfd7bafb
+718, 0xb7194da81fd1763b
+719, 0xbfc303de88e68d24
+720, 0xb949c7a5aea8a072
+721, 0x844216e7bae90455
+722, 0xf1e7f20840049a33
+723, 0x96e3263ad0cae794
+724, 0x10772d51f6e9ba49
+725, 0xcea24fccae9d23b3
+726, 0xefd378add9dde040
+727, 0xba0c7c5275805976
+728, 0x2e2a04608f64fa8c
+729, 0xafb42ec43aa0fa7
+730, 0x30444b84241ac465
+731, 0x19ef384bac4493ab
+732, 0xfd1ac615d3ba5ab9
+733, 0x6cc781ba38643aff
+734, 0x30ff27ebed875cfd
+735, 0xee1a261aca97ae62
+736, 0xc5a92715202bc940
+737, 0x9e6ec76f93c657ff
+738, 0x9b9fd55f55191ca5
+739, 0x654b13af008d8f03
+740, 0x1b7f030d9bd0719f
+741, 0x6d622e277550cb7f
+742, 0x3f8ee6b8830d0538
+743, 0x475462bcd0de190f
+744, 0x21380e8a513bdbcd
+745, 0x629bf3771b1bd7a4
+746, 0x3b5fd0b62c353709
+747, 0xf95634006ec3867e
+748, 0x1be8bb584a6653c2
+749, 0x2e2d3cfa85320ce8
+750, 0x5b904b692252d11d
+751, 0x4bfd76631d527990
+752, 0xc019571ca2bec4a0
+753, 0xf2eb730cea4cd751
+754, 0xd4571d709530191a
+755, 0x3b5bd947061f5a7d
+756, 0x56e2322cd2d1d1c0
+757, 0xa8830a5f62019f83
+758, 0x901d130c1b873cf3
+759, 0xb5dd29b363c61299
+760, 0xbb710bec3a17b26d
+761, 0xc0c464daca0f2328
+762, 0x4dc8055df02650f5
+763, 0x3d3cd9bbe8b957af
+764, 0xdb79612c2635b828
+765, 0xe25b3a8ad8fa3040
+766, 0xd5875c563cbf236b
+767, 0x46861c1c3849c9bc
+768, 0xf84bf1a2814dff43
+769, 0x6d8103902e0ad5e6
+770, 0x99f51c9be8af79e5
+771, 0xb0bfa8540ff94a96
+772, 0xaf45109a4e06f7d0
+773, 0x281df3e55aea9bfc
+774, 0x6a1155ca8aa40e60
+775, 0x754d32c5de1f5da
+776, 0xce1eafb1c6ca916f
+777, 0xc4f2185fa8577bd1
+778, 0x4a188e9bdb5501d9
+779, 0xbb14107e99bd5550
+780, 0xf0381d8425ec2962
+781, 0x213dbfffc16ec4f6
+782, 0x7a999c5a28ea65bc
+783, 0x23758c2aba7709ff
+784, 0xea7e4bb205e93b44
+785, 0x9c5a31e53911c658
+786, 0x7f04d0bbdc689ddc
+787, 0xe3ed89ab8d78dcb3
+788, 0x73c38bfb43986210
+789, 0x740c7d787eb8e158
+790, 0x5284fafdfb3fb9ec
+791, 0x2e91a58ac1fb1409
+792, 0xb94a600bf0a09af3
+793, 0x533ea4dbe07d81dd
+794, 0x48c3f1a736b3c5fd
+795, 0x56ae3499fa8720ce
+796, 0x526f2def663ca818
+797, 0x2f085759c65665c4
+798, 0xf715f042c69e0db4
+799, 0x110889c399231e60
+800, 0x64584a244866f3a0
+801, 0xf02ec101a39405d3
+802, 0xe73cd5e9a7f17283
+803, 0xfea64869e7028234
+804, 0x97559974ad877891
+805, 0xc8695aba1dc9f2e5
+806, 0x7b62b76ffc2264ec
+807, 0xf5e1df172ec5ccd
+808, 0xafaeb68765e443bd
+809, 0xd3870eb2e8337623
+810, 0x4f944d684138fb39
+811, 0x6977c575038916ad
+812, 0x8ada1a225df95a56
+813, 0xe4044c6c58d15e54
+814, 0x4e5121366681cf2
+815, 0xcf8640b079357b0d
+816, 0xcd5b157d44106fa3
+817, 0x9d7a5481279e25a1
+818, 0xe10e9db41fb4b34f
+819, 0x1052607be1eadff9
+820, 0x3403d67232fe2265
+821, 0xac9358f498c34afc
+822, 0x820172da0dc39c9
+823, 0xe186e91a3b826b6a
+824, 0x1a838e2a40284445
+825, 0x1870b617ebd7bce6
+826, 0xcb7cba4424be1ed7
+827, 0x6a2e56e40fdf9041
+828, 0xace93bbe108f97ee
+829, 0xfeb9bc74ac41ca08
+830, 0x8cb2d05b0f6a1f51
+831, 0x73792309f3fac0a9
+832, 0x2507343d431308ca
+833, 0xd0ea1197be615412
+834, 0xb1870812f1d2fa94
+835, 0x6d067b6935dcd23e
+836, 0xaf161014e5492c31
+837, 0xd4be0dce97064be4
+838, 0xf8edfe3fc75c20f1
+839, 0x894751dc442d2d9c
+840, 0xb4a95f6a6663456c
+841, 0x74e93162e2d805db
+842, 0x784bc5f3a7a2f645
+843, 0xd234d7c5b0582ea9
+844, 0x491f28d0ab6cb97c
+845, 0xa79419e5cf4336c3
+846, 0x66b00141978c849
+847, 0xa7ddbd64698d563f
+848, 0xefc33a4a5d97d4b2
+849, 0x95075514a65aebdc
+850, 0x40eca5b3e28cd25e
+851, 0x90ec7d00e9c9e35d
+852, 0x63e84104d5af417a
+853, 0xdaca0ea32df5744
+854, 0x7ed54f2587795881
+855, 0x5a73931760af4ee0
+856, 0x857d1a185a3081ec
+857, 0x6eac2aabe67fb463
+858, 0xd1f86155d8bfc55f
+859, 0x6d56398f3e7877ef
+860, 0x7642f61dfc62bc17
+861, 0x1d76b12843246ffa
+862, 0xde7817809b8a31d0
+863, 0xbcca9cd091198f9d
+864, 0xf71ca566dddcdfd4
+865, 0xea4386ee8b61d082
+866, 0xe351729d6010bac4
+867, 0xfd685d8a49910dd6
+868, 0xa7a20ea6c686bd3
+869, 0x1cdaf82f4dbd5536
+870, 0xa3da1d1e77dda3e0
+871, 0x4f723b3818ff8b2a
+872, 0x1290669eca152469
+873, 0xb54158b52d30651b
+874, 0xc06b74f2c7f0fee
+875, 0x7d5840bcbf702379
+876, 0x19fa4c1254a82ed
+877, 0xcf5ce090ad0b38ea
+878, 0xd4edd6ac9437e16d
+879, 0xc6ebf25eb623b426
+880, 0xd2b6dbdf00d8fea2
+881, 0x949cf98391cc59e1
+882, 0x380a0c7d0356f7b3
+883, 0x8ffefe32465473bf
+884, 0x637b6542d27c861e
+885, 0x347d12ffc664ecd9
+886, 0xea66e3a0c75a6b37
+887, 0xc3aff6f34fb537a1
+888, 0x67bdf3579959bf49
+889, 0xa17a348e3a74b723
+890, 0x93c9ef26ddadd569
+891, 0x483909059a5ac0b2
+892, 0x26ec9074b56d5a0d
+893, 0x6216000d9a48403a
+894, 0x79b43909eab1ec05
+895, 0xe4a8e8d03649e0de
+896, 0x1435d666f3ccdc08
+897, 0xb9e22ba902650a0e
+898, 0x44dffcccc68b41f8
+899, 0x23e60dcc7a559a17
+900, 0x6fd1735eacd81266
+901, 0xf6bda0745ea20c8e
+902, 0x85efcaefe271e07c
+903, 0x9be996ee931cef42
+904, 0xe78b41c158611d64
+905, 0xd6201df605839830
+906, 0x702e8e47d2769fd3
+907, 0xb8dcf70e18cf14c
+908, 0xac2690bab1bf5c17
+909, 0x92b166b71205d696
+910, 0xb0e73c795fc6df28
+911, 0x4bf2322c8b6b6f0d
+912, 0xa842fbe67918cea0
+913, 0xb01a8675d9294e54
+914, 0xfbe3c94f03ca5af2
+915, 0x51a5c089600c441f
+916, 0x60f0fd7512d85ded
+917, 0xef3113d3bc2cadb0
+918, 0xe1ea128ade300d60
+919, 0xde413b7f8d92d746
+920, 0xfc32c6d43f47c5d8
+921, 0x69d551d8c2b54c68
+922, 0xb9bc68c175777943
+923, 0xb9c79c687f0dae90
+924, 0xd799421ef883c06e
+925, 0xbff553ca95a29a3e
+926, 0xfc9ffac46bd0aca1
+927, 0x4f6c3a30c80c3e5a
+928, 0x8b7245bc6dc4a0a
+929, 0xaf4e191a4575ff60
+930, 0x41218c4a76b90f0b
+931, 0x986052aa51b8e89b
+932, 0x284b464ed5622f9
+933, 0xba6bded912626b40
+934, 0x43cad3ed7443cb5c
+935, 0x21641fa95725f328
+936, 0x6d99d6d09d755822
+937, 0x8246dfa2d4838492
+938, 0xd2ee70b9056f4726
+939, 0x87db515a786fbb8b
+940, 0x7c63e4c1d7786e7d
+941, 0xd1a9d548f10b3e88
+942, 0xa00856475f3b74c9
+943, 0x7f1964ce67148bf4
+944, 0x446650ec71e6018c
+945, 0xb1805ca07d1b6345
+946, 0x869c0a1625b7271b
+947, 0x79d6da06ce2ecfe2
+948, 0xec7b3cafc5e3c85f
+949, 0x1745ce21e39f2c3d
+950, 0xd9a0a7af6ee97825
+951, 0x680e0e52a6e11d5c
+952, 0xd86b3f344ff7f4cd
+953, 0xab56af117c840b9c
+954, 0x5c5404c7e333a10e
+955, 0x4f1eb462f35d990d
+956, 0xf857605a5644458e
+957, 0x3bb87cdf09262f86
+958, 0xd57295baf6da64b
+959, 0xb5993f48472f2894
+960, 0x7d1a501608c060b2
+961, 0x45fabe2d0e54adf0
+962, 0xbb41c3806afb4efe
+963, 0xbfbc506049424c8
+964, 0xb7dd6b67f2203344
+965, 0x389ce52eff883b81
+966, 0xe259c55c0cf6d000
+967, 0x70fb3e3824f7d213
+968, 0x9f36d5599ed55f4b
+969, 0xd14cf6f12f83c4f7
+970, 0x570a09d56aaa0b66
+971, 0x8accafd527f4598
+972, 0xa42d64c62175adfd
+973, 0xddb9c6a87b6e1558
+974, 0xd80b6c69fa1cde2a
+975, 0x44ebaac10082207b
+976, 0xf99be8889552fa1a
+977, 0x38253cd4b38b5dc5
+978, 0x85356c8b02675791
+979, 0xbf91677b2ecdcf55
+980, 0x2316cb85e93f366e
+981, 0x9abf35954db6b053
+982, 0xf49f7425e086b45a
+983, 0x8f5b625e074afde2
+984, 0xe0d614559791b080
+985, 0xbf7b866afab2a525
+986, 0xde89d7e1641a6412
+987, 0x1d10687d8ae5b86f
+988, 0x1f034caa0e904cbd
+989, 0x2086357aec8a7a2c
+990, 0x22dc476b80c56e1e
+991, 0xbef5a73cc0e3a493
+992, 0xddfa3829b26ed797
+993, 0x8917a87ec3d4dc78
+994, 0xfeabe390628c365e
+995, 0x581b0c4f6fb2d642
+996, 0x1ef8c590adbf5b9a
+997, 0x4d8e13aac0cce879
+998, 0xfe38f71e5977fad0
+999, 0x1f83a32d4adfd2ed
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/data/philox-testset-2.csv b/venv/lib/python3.9/site-packages/numpy/random/tests/data/philox-testset-2.csv
new file mode 100644
index 00000000..69d24c38
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/data/philox-testset-2.csv
@@ -0,0 +1,1001 @@
+seed, 0x0
+0, 0x399e5b222b82fa9
+1, 0x41fd08c1f00f3bc5
+2, 0x78b8824162ee4d04
+3, 0x176747919e02739d
+4, 0xfaa88f002a8d3596
+5, 0x418eb6f592e6c227
+6, 0xef83020b8344dd45
+7, 0x30a74a1a6eaa064b
+8, 0x93d43bf97a490c3
+9, 0xe4ba28b442194cc
+10, 0xc829083a168a8656
+11, 0x73f45d50f8e22849
+12, 0xf912db57352824cc
+13, 0xf524216927b12ada
+14, 0x22b7697473b1dfda
+15, 0x311e2a936414b39f
+16, 0xb905abfdcc425be6
+17, 0x4b14630d031eac9c
+18, 0x1cf0c4ae01222bc8
+19, 0xa6c33efc6e82ef3
+20, 0x43b3576937ba0948
+21, 0x1e483d17cdde108a
+22, 0x6722784cac11ac88
+23, 0xee87569a48fc45d7
+24, 0xb821dcbe74d18661
+25, 0xa5d1876ef3da1a81
+26, 0xe4121c2af72a483
+27, 0x2d747e355a52cf43
+28, 0x609059957bd03725
+29, 0xc3327244b49e16c5
+30, 0xb5ae6cb000dde769
+31, 0x774315003209017
+32, 0xa2013397ba8db605
+33, 0x73b228945dbcd957
+34, 0x801af7190375d3c0
+35, 0xae6dca29f24c9c67
+36, 0xd1cc0bcb1ca26249
+37, 0x1defa62a5bd853be
+38, 0x67c2f5557fa89462
+39, 0xf1729b58122fab02
+40, 0xb67eb71949ec6c42
+41, 0x5456366ec1f8f7d7
+42, 0x44492b32eb7966f5
+43, 0xa801804159f175f1
+44, 0x5a416f23cac70d84
+45, 0x186f55293302303d
+46, 0x7339d5d7b6a43639
+47, 0xfc6df38d6a566121
+48, 0xed2fe018f150b39e
+49, 0x508e0b04a781fa1b
+50, 0x8bee9d50f32eaf50
+51, 0x9870015d37e63cc
+52, 0x93c6b12309c14f2d
+53, 0xb571cf798abe93ff
+54, 0x85c35a297a88ae6e
+55, 0x9b1b79afe497a2ae
+56, 0x1ca02e5b95d96b8d
+57, 0x5bb695a666c0a94a
+58, 0x4e3caf9bbab0b208
+59, 0x44a44be1a89f2dc1
+60, 0x4ff37c33445758d1
+61, 0xd0e02875322f35da
+62, 0xfd449a91fb92646b
+63, 0xbe0b49096b95db4d
+64, 0xffa3647cad13ef5d
+65, 0x75c127a61acd10c8
+66, 0xd65f697756f5f98e
+67, 0x3ced84be93d94434
+68, 0x4da3095c2fc46d68
+69, 0x67564e2a771ee9ac
+70, 0x36944775180644a9
+71, 0xf458db1c177cdb60
+72, 0x5b58406dcd034c8
+73, 0x793301a3fdab2a73
+74, 0x1c2a1a16d6db6128
+75, 0xc2dacd4ddddbe56c
+76, 0x2e7d15be2301a111
+77, 0xd4f4a6341b3bcd18
+78, 0x3622996bbe6a9e3b
+79, 0xaf29aa9a7d6d47da
+80, 0x6d7dbb74a4cd68ae
+81, 0xc260a17e0f39f841
+82, 0xdee0170f2af66f0d
+83, 0xf84ae780d7b5a06e
+84, 0x8326247b73f43c3a
+85, 0xd44eef44b4f98b84
+86, 0x3d10aee62ec895e3
+87, 0x4f23fef01bf703b3
+88, 0xf8e50aa57d888df6
+89, 0x7da67411e3bef261
+90, 0x1d00f2769b2f96d7
+91, 0x7ef9a15b7444b84e
+92, 0xcfa16436cc2b7e21
+93, 0x29ab8cfac00460ff
+94, 0x23613de8608b0e70
+95, 0xb1aa0980625798a8
+96, 0xb9256fd29db7df99
+97, 0xdacf311bf3e7fa18
+98, 0xa013c8f9fada20d8
+99, 0xaf5fd4fe8230fe3e
+100, 0xd3d59ca55102bc5c
+101, 0x9d08e2aa5242767f
+102, 0x40278fe131e83b53
+103, 0x56397d03c7c14c98
+104, 0xe874b77b119359b3
+105, 0x926a1ba4304ab19f
+106, 0x1e115d5aa695a91d
+107, 0xc6a459df441f2fe3
+108, 0x2ca842bc1b0b3c6a
+109, 0x24c804cf8e5eed16
+110, 0x7ca00fc4a4c3ebd3
+111, 0x546af7cecc4a4ba6
+112, 0x8faae1fa18fd6e3
+113, 0x40420b0089641a6a
+114, 0x88175a35d9abcb83
+115, 0xf7d746d1b8b1357c
+116, 0x7dae771a651be970
+117, 0x2f6485247ee4df84
+118, 0x6883702fab2d8ec5
+119, 0xeb7eea829a67f9a6
+120, 0x60d5880b485562ed
+121, 0x7d4ca3d7e41a4e7e
+122, 0xbb7fef961ab8de18
+123, 0x3b92452fb810c164
+124, 0x5f4b4755348b338
+125, 0xca45a715a7539806
+126, 0xc33efd9da5399dd
+127, 0x593d665a51d4aedd
+128, 0x75d6b8636563036b
+129, 0x7b57caa55e262082
+130, 0x4ede7427969e0dd5
+131, 0xc3f19b6f78ea00b
+132, 0xeea7bab9be2181ea
+133, 0x652c45fe9c420c04
+134, 0x14ba9e3d175670ee
+135, 0xd2ad156ba6490474
+136, 0x4d65ae41065f614
+137, 0x6ff911c8afa28eb1
+138, 0xedc2b33588f3cb68
+139, 0x437c8bc324666a2f
+140, 0x828cee25457a3f0
+141, 0x530c986091f31b9b
+142, 0x2f34671e8326ade7
+143, 0x4f686a8f4d77f6da
+144, 0xa4c1987083498895
+145, 0xbce5a88b672b0fb1
+146, 0x8476115a9e6a00cc
+147, 0x16de18a55dd2c238
+148, 0xdf38cf4c416232bc
+149, 0x2cb837924e7559f3
+150, 0xfad4727484e982ed
+151, 0x32a55d4b7801e4f
+152, 0x8b9ef96804bd10a5
+153, 0xa1fd422c9b5cf2a9
+154, 0xf46ddb122eb7e442
+155, 0x6e3842547afa3b33
+156, 0x863dee1c34afe5c4
+157, 0x6a43a1935b6db171
+158, 0x1060a5c2f8145821
+159, 0xf783ec9ed34c4607
+160, 0x1da4a86bf5f8c0b0
+161, 0x4c7714041ba12af8
+162, 0x580da7010be2f192
+163, 0xad682fe795a7ea7a
+164, 0x6687b6cb88a9ed2c
+165, 0x3c8d4b175517cd18
+166, 0xe9247c3a524a6b6b
+167, 0x337ca9cfaa02658
+168, 0xed95399481c6feec
+169, 0x58726a088e606062
+170, 0xfe7588a5b4ee342a
+171, 0xee434c7ed146fdee
+172, 0xe2ade8b60fdc4ba5
+173, 0xd57e4c155de4eaab
+174, 0xdefeae12de1137cb
+175, 0xb7a276a241316ac1
+176, 0xeb838b1b1df4ca15
+177, 0x6f78965edea32f6f
+178, 0x18bebd264d7a5d53
+179, 0x3641c691d77005ec
+180, 0xbe70ed7efea8c24c
+181, 0x33047fa8d03ca560
+182, 0x3bed0d2221ff0f87
+183, 0x23083a6ffbcf38a2
+184, 0xc23eb827073d3fa5
+185, 0xc873bb3415e9fb9b
+186, 0xa4645179e54147fe
+187, 0x2c72fb443f66e207
+188, 0x98084915dd89d8f4
+189, 0x88baa2de12c99037
+190, 0x85c74ab238cb795f
+191, 0xe122186469ea3a26
+192, 0x4c3bba99b3249292
+193, 0x85d6845d9a015234
+194, 0x147ddd69c13e6a31
+195, 0x255f4d678c9a570b
+196, 0x2d7c0c410bf962b4
+197, 0x58eb7649e0aa16ca
+198, 0x9d240bf662fe0783
+199, 0x5f74f6fa32d293cc
+200, 0x4928e52f0f79d9b9
+201, 0xe61c2b87146b706d
+202, 0xcfcd90d100cf5431
+203, 0xf15ea8138e6aa178
+204, 0x6ab8287024f9a819
+205, 0xed8942593db74e01
+206, 0xefc00e4ec2ae36dd
+207, 0xc21429fb9387f334
+208, 0xf9a3389e285a9bce
+209, 0xacdee8c43aae49b3
+210, 0xefc382f02ad55c25
+211, 0x1153b50e8d406b72
+212, 0xb00d39ebcc2f89d8
+213, 0xde62f0b9831c8850
+214, 0xc076994662eef6c7
+215, 0x66f08f4752f1e3ef
+216, 0x283b90619796249a
+217, 0x4e4869bc4227499e
+218, 0xb45ad78a49efd7ed
+219, 0xffe19aa77abf5f4b
+220, 0xfce11a0daf913aef
+221, 0x7e4e64450d5cdceb
+222, 0xe9621997cfd62762
+223, 0x4d2c9e156868081
+224, 0x4e2d96eb7cc9a08
+225, 0xda74849bba6e3bd3
+226, 0x6f4621da935e7fde
+227, 0xb94b914aa0497259
+228, 0xd50d03e8b8db1563
+229, 0x1a45c1ce5dca422e
+230, 0xc8d30d33276f843f
+231, 0xb57245774e4176b4
+232, 0x8d36342c05abbbb1
+233, 0x3591ad893ecf9e78
+234, 0x62f4717239ee0ac8
+235, 0x9b71148a1a1d4200
+236, 0x65f8e0f56dd94463
+237, 0x453b1fcfd4fac8c2
+238, 0x4c25e48e54a55865
+239, 0xa866baa05112ace2
+240, 0x7741d3c69c6e79c5
+241, 0x7deb375e8f4f7a8a
+242, 0xc242087ede42abd8
+243, 0x2fa9d1d488750c4b
+244, 0xe8940137a935d3d3
+245, 0x1dab4918ca24b2f2
+246, 0xe2368c782168fe3e
+247, 0x6e8b2d1d73695909
+248, 0x70455ebea268b33e
+249, 0x656a919202e28da1
+250, 0x5a5a8935647da999
+251, 0x428c6f77e118c13c
+252, 0xa87aee2b675bb083
+253, 0x3873a6412b239969
+254, 0x5f72c1e91cb8a2ee
+255, 0xa25af80a1beb5679
+256, 0x1af65d27c7b4abc3
+257, 0x133437060670e067
+258, 0xb1990fa39a97d32e
+259, 0x724adc89ae10ed17
+260, 0x3f682a3f2363a240
+261, 0x29198f8dbd343499
+262, 0xdfaeeaa42bc51105
+263, 0x5baff3901b9480c2
+264, 0x3f760a67043e77f5
+265, 0x610fa7aa355a43ba
+266, 0x394856ac09c4f7a7
+267, 0x1d9229d058aee82e
+268, 0x19c674804c41aeec
+269, 0x74cf12372012f4aa
+270, 0xa5d89b353fa2f6ca
+271, 0x697e4f672ac363dd
+272, 0xde6f55ba73df5af9
+273, 0x679cf537510bd68f
+274, 0x3dc916114ae9ef7e
+275, 0xd7e31a66ec2ee7ba
+276, 0xc21bebb968728495
+277, 0xc5e0781414e2adfd
+278, 0x71147b5412ddd4bd
+279, 0x3b864b410625cca9
+280, 0x433d67c0036cdc6
+281, 0x48083afa0ae20b1b
+282, 0x2d80beecd64ac4e8
+283, 0x2a753c27c3a3ee3e
+284, 0xb2c5e6afd1fe051a
+285, 0xea677930cd66c46b
+286, 0x4c3960932f92810a
+287, 0xf1b367a9e527eaba
+288, 0xb7d92a8a9a69a98e
+289, 0x9f9ad3210bd6b453
+290, 0x817f2889db2dcbd8
+291, 0x4270a665ac15813c
+292, 0x90b85353bd2be4dd
+293, 0x10c0460f7b2d68d
+294, 0x11cef32b94f947f5
+295, 0x3cf29ed8e7d477e8
+296, 0x793aaa9bd50599ef
+297, 0xbac15d1190014aad
+298, 0x987944ae80b5cb13
+299, 0x460aa51f8d57c484
+300, 0xc77df0385f97c2d3
+301, 0x92e743b7293a3822
+302, 0xbc3458bcfbcbb8c0
+303, 0xe277bcf3d04b4ed7
+304, 0xa537ae5cf1c9a31c
+305, 0x95eb00d30bd8cfb2
+306, 0x6376361c24e4f2dd
+307, 0x374477fe87b9ea8e
+308, 0x8210f1a9a039902e
+309, 0xe7628f7031321f68
+310, 0x8b8e9c0888fc1d3d
+311, 0x306be461fdc9e0ed
+312, 0x510009372f9b56f5
+313, 0xa6e6fa486b7a027a
+314, 0x9d3f002025203b5a
+315, 0x7a46e0e81ecbef86
+316, 0x41e280c611d04df0
+317, 0xedcec10418a99e8a
+318, 0x5c27b6327e0b9dbd
+319, 0xa81ed2035b509f07
+320, 0x3581e855983a4cc4
+321, 0x4744594b25e9809d
+322, 0xc737ac7c27fbd0ed
+323, 0x1b523a307045433a
+324, 0x8b4ce9171076f1d9
+325, 0x2db02d817cd5eec0
+326, 0x24a1f1229af50288
+327, 0x5550c0dcf583ff16
+328, 0x3587baaa122ec422
+329, 0xf9d3dc894229e510
+330, 0xf3100430d5cf8e87
+331, 0xc31af79862f8e2fb
+332, 0xd20582063b9f3537
+333, 0xac5e90ac95fcc7ad
+334, 0x107c4c704d5109d4
+335, 0xebc8628906dbfd70
+336, 0x215242776da8c531
+337, 0xa98002f1dcf08b51
+338, 0xbc3bdc07f3b09718
+339, 0x238677062495b512
+340, 0x53b4796f2a3c49e8
+341, 0x6424286467e22f0e
+342, 0x14d0952a11a71bac
+343, 0x2f97098149b82514
+344, 0x3777f2fdc425ad2
+345, 0xa32f2382938876d4
+346, 0xda8a39a021f20ae3
+347, 0x364361ef0a6ac32c
+348, 0x4413eede008ff05a
+349, 0x8dda8ace851aa327
+350, 0x4303cabbdcecd1ee
+351, 0x2e69f06d74aa549f
+352, 0x4797079cd4d9275c
+353, 0xc7b1890917e98307
+354, 0x34031b0e822a4b4c
+355, 0xfc79f76b566303ea
+356, 0x77014adbe255a930
+357, 0xab6c43dd162f3be5
+358, 0xa430041f3463f6b9
+359, 0x5c191a32ada3f84a
+360, 0xe8674a0781645a31
+361, 0x3a11cb667b8d0916
+362, 0xaedc73e80c39fd8a
+363, 0xfde12c1b42328765
+364, 0x97abb7dcccdc1a0b
+365, 0x52475c14d2167bc8
+366, 0x540e8811196d5aff
+367, 0xa867e4ccdb2b4b77
+368, 0x2be04af61e5bcfb9
+369, 0x81b645102bfc5dfd
+370, 0x96a52c9a66c6450f
+371, 0x632ec2d136889234
+372, 0x4ed530c0b36a6c25
+373, 0x6f4851225546b75
+374, 0x2c065d6ba46a1144
+375, 0xf8a3613ff416551d
+376, 0xb5f0fd60e9c971a9
+377, 0x339011a03bb4be65
+378, 0x9439f72b6995ded6
+379, 0xc1b03f3ef3b2292d
+380, 0xad12fd221daab3ae
+381, 0xf615b770f2cf996f
+382, 0x269d0fdcb764172
+383, 0x67837025e8039256
+384, 0x6402831fc823fafa
+385, 0x22854146a4abb964
+386, 0x7b5ad9b5a1bad7a8
+387, 0x67170e7beb6ac935
+388, 0xfc2d1e8e24adfaaa
+389, 0x7ded4395345ff40d
+390, 0x418981760a80dd07
+391, 0xc03bef38022c1d2
+392, 0x3a11850b26eade29
+393, 0xaa56d02c7175c5f4
+394, 0xd83b7917b9bfbff5
+395, 0x3c1df2f8fa6fced3
+396, 0xf3d6e2999c0bb760
+397, 0xc66d683a59a950e3
+398, 0x8e3972a9d73ffabf
+399, 0x97720a0443edffd9
+400, 0xa85f5d2fe198444a
+401, 0xfc5f0458e1b0de5e
+402, 0xe3973f03df632b87
+403, 0xe151073c84c594b3
+404, 0x68eb4e22e7ff8ecf
+405, 0x274f36eaed7cae27
+406, 0x3b87b1eb60896b13
+407, 0xbe0b2f831442d70a
+408, 0x2782ed7a48a1b328
+409, 0xb3619d890310f704
+410, 0xb03926b11b55921a
+411, 0xdb46fc44aa6a0ce4
+412, 0x4b063e2ef2e9453a
+413, 0xe1584f1aeec60fb5
+414, 0x7092bd6a879c5a49
+415, 0xb84e1e7c7d52b0e6
+416, 0x29d09ca48db64dfb
+417, 0x8f6c4a402066e905
+418, 0x77390795eabc36b
+419, 0xcc2dc2e4141cc69f
+420, 0x2727f83beb9e3c7c
+421, 0x1b29868619331de0
+422, 0xd38c571e192c246f
+423, 0x535327479fe37b6f
+424, 0xaff9ce5758617eb3
+425, 0x5658539e9288a4e4
+426, 0x8df91d87126c4c6d
+427, 0xe931cf8fdba6e255
+428, 0x815dfdf25fbee9e8
+429, 0x5c61f4c7cba91697
+430, 0xdd5f5512fe2313a1
+431, 0x499dd918a92a53cd
+432, 0xa7e969d007c97dfd
+433, 0xb8d39c6fc81ac0bb
+434, 0x1d646983def5746c
+435, 0x44d4b3b17432a60c
+436, 0x65664232a14db1e3
+437, 0xda8fae6433e7500b
+438, 0xbe51b94ff2a3fe94
+439, 0xe9b1bd9a9098ef9f
+440, 0xfe47d54176297ef5
+441, 0xb8ab99bc03bb7135
+442, 0xcfad97f608565b38
+443, 0xf05da71f6760d9c1
+444, 0xef8da40a7c70e7b
+445, 0xe0465d58dbd5d138
+446, 0xb54a2d70eb1a938
+447, 0xfdd50c905958f2d8
+448, 0x3c41933c90a57d43
+449, 0x678f6d894c6ad0bb
+450, 0x403e8f4582274e8
+451, 0x5cbbe975668df6b0
+452, 0x297e6520a7902f03
+453, 0x8f6dded33cd1efd7
+454, 0x8e903c97be8d783b
+455, 0x10bd015577e30f77
+456, 0x3fcd69d1c36eab0c
+457, 0xb45989f3ca198d3
+458, 0x507655ce02b491a9
+459, 0xa92cf99bb78602ce
+460, 0xebfb82055fbc2f0f
+461, 0x3334256279289b7a
+462, 0xc19d2a0f740ee0ac
+463, 0x8bb070dea3934905
+464, 0xa4ab57d3a8d1b3eb
+465, 0xfee1b09bcacf7ff4
+466, 0xccc7fb41ceec41fa
+467, 0xd4da49094eb5a74d
+468, 0xed5c693770af02ed
+469, 0x369dabc9bbfaa8e4
+470, 0x7eab9f360d054199
+471, 0xe36dbebf5ee94076
+472, 0xd30840e499b23d7
+473, 0x8678e6cb545015ff
+474, 0x3a47932ca0b336e
+475, 0xeb7c742b6e93d6fe
+476, 0x1404ea51fe5a62a9
+477, 0xa72cd49db978e288
+478, 0xfd7bada020173dcf
+479, 0xc9e74fc7abe50054
+480, 0x93197847bb66808d
+481, 0x25fd5f053dce5698
+482, 0xe198a9b18cc21f4
+483, 0x5cc27b1689452d5d
+484, 0x8b3657af955a98dc
+485, 0xc17f7584f54aa1c0
+486, 0xe821b088246b1427
+487, 0x32b5a9f6b45b6fa0
+488, 0x2aef7c315c2bae0c
+489, 0xe1af8129846b705a
+490, 0x4123b4c091b34614
+491, 0x6999d61ec341c073
+492, 0x14b9a8fcf86831ea
+493, 0xfd4cff6548f46c9f
+494, 0x350c3b7e6cc8d7d6
+495, 0x202a5047fecafcd5
+496, 0xa82509fe496bb57d
+497, 0x835e4b2608b575fe
+498, 0xf3abe3da919f54ec
+499, 0x8705a21e2c9b8796
+500, 0xfd02d1427005c314
+501, 0xa38458faa637f49b
+502, 0x61622f2360e7622a
+503, 0xe89335a773c2963b
+504, 0x481264b659b0e0d0
+505, 0x1e82ae94ebf62f15
+506, 0x8ea7812de49209d4
+507, 0xff963d764680584
+508, 0x418a68bef717f4af
+509, 0x581f0e7621a8ab91
+510, 0x840337e9a0ec4150
+511, 0x951ef61b344be505
+512, 0xc8b1b899feb61ec2
+513, 0x8b78ca13c56f6ed9
+514, 0x3d2fd793715a946f
+515, 0xf1c04fabcd0f4084
+516, 0x92b602614a9a9fcc
+517, 0x7991bd7a94a65be7
+518, 0x5dead10b06cad2d7
+519, 0xda7719b33f722f06
+520, 0x9d87a722b7bff71e
+521, 0xb038e479071409e9
+522, 0xf4e8bbec48054775
+523, 0x4fec2cd7a28a88ea
+524, 0x839e28526aad3e56
+525, 0xd37ec57852a98bf0
+526, 0xdef2cbbe00f3a02d
+527, 0x1aecfe01a9e4d801
+528, 0x59018d3c8beaf067
+529, 0x892753e6ac8bf3cd
+530, 0xefdd3437023d2d1c
+531, 0x447bfbd148c8cb88
+532, 0x282380221bd442b8
+533, 0xfce8658d1347384a
+534, 0x60b211a7ec6bfa8
+535, 0xd21729cfcc692974
+536, 0x162087ecd5038a47
+537, 0x2b17000c4bce39d2
+538, 0x3a1f75ff6adcdce0
+539, 0x721a411d312f1a2c
+540, 0x9c13b6133f66934d
+541, 0xaa975d14978980e5
+542, 0x9403dbd4754203fa
+543, 0x588c15762fdd643
+544, 0xdd1290f8d0ada73a
+545, 0xd9b77380936103f4
+546, 0xb2e2047a356eb829
+547, 0x7019e5e7f76f7a47
+548, 0x3c29a461f62b001d
+549, 0xa07dc6cfab59c116
+550, 0x9b97e278433f8eb
+551, 0x6affc714e7236588
+552, 0x36170aeb32911a73
+553, 0x4a665104d364a789
+554, 0x4be01464ec276c9c
+555, 0x71bb10271a8b4ecf
+556, 0xbf62e1d068bc018
+557, 0xc9ada5db2cbbb413
+558, 0x2bded75e726650e5
+559, 0x33d5a7af2f34385d
+560, 0x8179c46661d85657
+561, 0x324ebcfd29267359
+562, 0xac4c9311dc9f9110
+563, 0xc14bb6a52f9f9c0
+564, 0xc430abe15e7fb9db
+565, 0xf1cce5c14df91c38
+566, 0x651e3efa2c0750d3
+567, 0x38a33604a8be5c75
+568, 0x7aaf77fe7ff56a49
+569, 0xc0d1cc56bbf27706
+570, 0x887aa47324e156c6
+571, 0x12547c004b085e8d
+572, 0xd86a8d6fbbbfd011
+573, 0x57c860188c92d7b4
+574, 0xcd5d3843d361b8ca
+575, 0x8f586ef05a9cb3ef
+576, 0x174456e1ba6267d5
+577, 0xf5dc302c62fe583c
+578, 0xa349442fabcdb71
+579, 0xe5123c1a8b6fd08e
+580, 0x80681552aa318593
+581, 0xb295396deaef1e31
+582, 0xabb626e0b900e32b
+583, 0xf024db8d3f19c15e
+584, 0x1d04bb9548e2fb6c
+585, 0xd8ed2b2214936c2b
+586, 0x618ca1e430a52bc9
+587, 0xccbca44a6088136b
+588, 0xd0481855c8b9ccbe
+589, 0x3c92a2fade28bdf7
+590, 0x855e9fefc38c0816
+591, 0x1269bbfe55a7b27c
+592, 0x1d6c853d83726d43
+593, 0xc8655511cc7fcafc
+594, 0x301503eb125a9b0e
+595, 0xb3108e4532016b11
+596, 0xbb7ab6245da9cb3d
+597, 0x18004c49116d85eb
+598, 0x3480849c20f61129
+599, 0xe28f45157463937b
+600, 0x8e85e61060f2ce1
+601, 0x1673da4ec589ba5e
+602, 0x74b9a6bd1b194712
+603, 0xed39e147fa8b7601
+604, 0x28ce54019102ca77
+605, 0x42e0347f6d7a2f30
+606, 0xb6a908d1c4814731
+607, 0x16c3435e4e9a126d
+608, 0x8880190514c1ad54
+609, 0xfffd86229a6f773c
+610, 0x4f2420cdb0aa1a93
+611, 0xf8e1acb4120fc1fa
+612, 0x63a8c553ab36a2f2
+613, 0x86b88cf3c0a6a190
+614, 0x44d8b2801622c792
+615, 0xf6eae14e93082ff1
+616, 0xd9ed4f5d1b8fac61
+617, 0x1808ce17f4e1f70
+618, 0x446e83ea336f262f
+619, 0xc7c802b04c0917b7
+620, 0x626f45fd64968b73
+621, 0x9ffa540edc9b2c5c
+622, 0xa96a1e219e486af8
+623, 0x2bb8963884e887a1
+624, 0xba7f68a5d029e3c4
+625, 0xefc45f44392d9ca0
+626, 0x98d77762503c5eab
+627, 0xd89bcf62f2da627c
+628, 0xa3cab8347f833151
+629, 0xa095b7595907d5c7
+630, 0x3b3041274286181
+631, 0xb518db8919eb71fa
+632, 0x187036c14fdc9a36
+633, 0xd06e28301e696f5d
+634, 0xdbc71184e0c56492
+635, 0xfe51e9cae6125bfd
+636, 0x3b12d17cd014df24
+637, 0x3b95e4e2c986ac1a
+638, 0x29c1cce59fb2dea2
+639, 0x58c05793182a49d6
+640, 0xc016477e330d8c00
+641, 0x79ef335133ada5d
+642, 0x168e2cad941203f3
+643, 0xf99d0f219d702ef0
+644, 0x655628068f8f135b
+645, 0xdcdea51910ae3f92
+646, 0x8e4505039c567892
+647, 0x91a9ec7e947c89ae
+648, 0x8717172530f93949
+649, 0x1c80aba9a440171a
+650, 0x9c8f83f6ebe7441e
+651, 0x6c05e1efea4aa7f9
+652, 0x10af696b777c01b
+653, 0x5892e9d9a92fc309
+654, 0xd2ba7da71e709432
+655, 0x46378c7c3269a466
+656, 0x942c63dfe18e772c
+657, 0x6245cf02ef2476f
+658, 0x6f265b2759ea2aea
+659, 0x5aa757f17d17f4a6
+660, 0x1ad6a3c44fa09be6
+661, 0xe861af14e7015fb8
+662, 0x86be2e7db388c77
+663, 0x5c7bba32b519e9a0
+664, 0x3feb314850c4437b
+665, 0x97955add60cfb45b
+666, 0xfdb536230a540bdc
+667, 0xdac9d7bf6e58512e
+668, 0x4894c00e474e8120
+669, 0xa1918a37739da366
+670, 0xa8097f2096532807
+671, 0x592afe50e6c5e643
+672, 0xd69050ee6dcb33dc
+673, 0xa6956b262dd3c561
+674, 0x1a55c815555e63f7
+675, 0x2ec7fd37516de2bb
+676, 0x8ec251d9c70e76ba
+677, 0x9b76e4abafd2689
+678, 0x9ce3f5c751a57df1
+679, 0x915c4818bf287bc7
+680, 0x2293a0d1fe07c735
+681, 0x7627dcd5d5a66d3d
+682, 0xb5e4f92cc49c7138
+683, 0x6fc51298731d268c
+684, 0xd19800aa95441f87
+685, 0x14f70f31162fa115
+686, 0x41a3da3752936f59
+687, 0xbec0652be95652ee
+688, 0x7aa4bdb1020a290f
+689, 0x4382d0d9bee899ef
+690, 0xe6d988ae4277d6ff
+691, 0xe618088ccb2a32d1
+692, 0x411669dfaa899e90
+693, 0x234e2bf4ba76d9f
+694, 0xe109fe4cb7828687
+695, 0x1fb96b5022b0b360
+696, 0x6b24ad76c061a716
+697, 0x7e1781d4d7ecee15
+698, 0xf20c2dbe82ba38ba
+699, 0xeda8e8ae1d943655
+700, 0xa58d196e2a77eaec
+701, 0x44564765a5995a0b
+702, 0x11902fe871ecae21
+703, 0x2ea60279900e675d
+704, 0x38427227c18a9a96
+705, 0xe0af01490a1b1b48
+706, 0x826f91997e057824
+707, 0x1e57308e6e50451
+708, 0xb42d469bbbfdc350
+709, 0xb9734cff1109c49b
+710, 0x98967559bb9d364f
+711, 0xd6be360041907c12
+712, 0xa86a1279122a1e21
+713, 0x26f99a8527bfc698
+714, 0xfa8b85758f28f5d6
+715, 0xe3057429940806ae
+716, 0x4bee2d7e84f93b2b
+717, 0x948350a76ea506f4
+718, 0xa139154488045e74
+719, 0x8893579ba5e78085
+720, 0x5f21c215c6a9e397
+721, 0x456134f3a59641dc
+722, 0x92c0273f8e97a9c6
+723, 0xd2936c9c3f0c6936
+724, 0xcfa4221e752c4735
+725, 0x28cd5a7457355dca
+726, 0xecdfdde23d90999f
+727, 0x60631b2d494d032b
+728, 0xf67289df269a827f
+729, 0xcbe8011ef0f5b7ef
+730, 0x20eea973c70a84f5
+731, 0xbe1fd200398557ce
+732, 0xd2279ee030191bba
+733, 0xf2bd4291dedaf819
+734, 0xfc6d167dbe8c402
+735, 0x39ac298da5d0044b
+736, 0xceac026f5f561ce
+737, 0x10a5b0bdd8ad60e6
+738, 0xdeb3c626df6d4bcb
+739, 0x3c128962e77ff6ca
+740, 0xc786262e9c67a0e5
+741, 0x4332855b3febcdc0
+742, 0x7bda9724d1c0e020
+743, 0x6a8c93399bc4df22
+744, 0xa9b20100ac707396
+745, 0xa11a3458502c4eb5
+746, 0xb185461c60478941
+747, 0x13131d56195b7ff6
+748, 0x8d55875ddbd4aa1c
+749, 0xc09b67425f469aa5
+750, 0x39e33786cc7594c4
+751, 0x75e96db8e4b08b93
+752, 0xda01cd12a3275d1e
+753, 0x2c49e7822344fab5
+754, 0x9bd5f10612514ca7
+755, 0x1c801a5c828e7332
+756, 0x29797d3f4f6c7b4c
+757, 0xac992715e21e4e53
+758, 0xe40e89ee887ddb37
+759, 0x15189a2b265a783b
+760, 0xa854159a52af5c5
+761, 0xb9d8a5a81c12bead
+762, 0x3240cdc9d59e2a58
+763, 0x1d0b872234cf8e23
+764, 0xc01224cf6ce12cff
+765, 0x2601e9f3905c8663
+766, 0xd4ecf9890168d6b4
+767, 0xa45db796d89bfdd5
+768, 0x9f389406dad64ab4
+769, 0xa5a851adce43ffe3
+770, 0xd0962c41c26e5aa9
+771, 0x8a671679e48510a4
+772, 0xc196dc0924a6bfeb
+773, 0x3ead661043b549cb
+774, 0x51af4ca737d405ac
+775, 0xf4425b5c62275fb6
+776, 0x71e69d1f818c10f5
+777, 0xacaf4af2d3c70162
+778, 0x2e1f1d4fd7524244
+779, 0xe54fdd8f388890e8
+780, 0xfda0d33e84eb2b83
+781, 0x53965c5e392b81da
+782, 0x5c92288267263097
+783, 0xcac1b431c878c66c
+784, 0x36c0e1cf417241c6
+785, 0x5cc4d9cd1a36bf2c
+786, 0x32e4257bb5d3e470
+787, 0x4aecff904adb44fb
+788, 0x4d91a8e0d1d60cac
+789, 0xa3b478388385b038
+790, 0x48d955f24eba70be
+791, 0x310e4deb07f24f68
+792, 0x8853e73b1f30a5a
+793, 0x278aee45c2a65c5
+794, 0xf6932eedbd62fb0b
+795, 0xafb95958c82fafad
+796, 0x78e807c18616c16c
+797, 0xd7abadda7488ed9f
+798, 0x2dd72e2572aa2ae6
+799, 0x6ec3791982c2be09
+800, 0x6865bb314fac478f
+801, 0xa14dc0ce09000d1a
+802, 0xb8081ad134da10f2
+803, 0xc4ac1534aa825ef5
+804, 0xd83aeb48ae2d538f
+805, 0x38052027e3074be4
+806, 0xa9833e06ef136582
+807, 0x4f02d790ec9fd78
+808, 0xec2f60bc711c5bdc
+809, 0x9253b0d12268e561
+810, 0xa8ac607fdd62c206
+811, 0x895e28ebc920289f
+812, 0xe2fd42b154243ac7
+813, 0xc69cac2f776eee19
+814, 0xf4d4ac11db56d0dc
+815, 0xa8d37049b9f39833
+816, 0x75abbf8a196c337c
+817, 0xb115bb76750d27b8
+818, 0x39426d187839154
+819, 0xd488423e7f38bf83
+820, 0xbb92e0c76ecb6a62
+821, 0x3055a018ce39f4e3
+822, 0xc93fe0e907729bfb
+823, 0x65985d17c5863340
+824, 0x2088ae081b2028e1
+825, 0x6e628de873314057
+826, 0x864377cccf573f0e
+827, 0xae03f4c9aa63d132
+828, 0xb1db766d6404c66d
+829, 0xdce5a22414a374b
+830, 0x622155b777819997
+831, 0x69fe96e620371f3c
+832, 0xa9c67dbc326d94fc
+833, 0x932a84ae5dd43bab
+834, 0xe2301a20f6c48c3f
+835, 0x795d2e79c6477300
+836, 0xd8e3e631289521e7
+837, 0xae2684979002dfd6
+838, 0xc9c2392377550f89
+839, 0xa1b0c99d508ef7ec
+840, 0x593aef3c5a5272ec
+841, 0xe32e511a4b7162cd
+842, 0xab3b81655f5a2857
+843, 0x1b535e1a0aaf053e
+844, 0x5b33f56c1b6a07e2
+845, 0x782dc8cfcac4ef36
+846, 0xb3d4f256eecfd202
+847, 0xf73a6598f58c4f7e
+848, 0xd5722189524870ae
+849, 0x707878de6b995fc0
+850, 0xc3eb6ba73e3d7e8a
+851, 0xca75c017655b75a7
+852, 0x1b29369ea3541e5f
+853, 0x352e98858bdb58a3
+854, 0x1e4412d184b6b27d
+855, 0x2d375ba0304b2d17
+856, 0x56c30fce69a5d08e
+857, 0x6b8c2b0c06584bda
+858, 0xde4dfff228c8c91f
+859, 0xb7c9edd574e6287f
+860, 0xf6078281c9fca2b2
+861, 0xb9b9a51de02a2f1e
+862, 0xa411bef31c0103b0
+863, 0xc5facd8fc5e1d7a3
+864, 0x54e631c05ddf7359
+865, 0x815b42b3fd06c474
+866, 0xc9ac07566fda18ec
+867, 0xd84ea62957bd8e15
+868, 0x5575f74b5cfd8803
+869, 0x5779a8d460c2e304
+870, 0xfd6e87e264a85587
+871, 0xa1d674daa320b26d
+872, 0x2c3c3ec64b35afc4
+873, 0x393a274ff03e6935
+874, 0x1f40ecbac52c50ea
+875, 0xc3de64fa324ffc0c
+876, 0x56ae828b7f9deb04
+877, 0xe7c1a77b5c1f2cb3
+878, 0xa4c4aab19ea921cc
+879, 0xec164c238825822c
+880, 0xa6a3304770c03b03
+881, 0x3a63641d5b1e8123
+882, 0x42677be3a54617ef
+883, 0xa2680423e3a200c0
+884, 0x8b17cf75f3f37277
+885, 0xe7ce65a49242be3d
+886, 0x7f85934271323e4b
+887, 0xcfb0f431f79a4fab
+888, 0x392e4041a8505b65
+889, 0xd3e5daf0d8b25ea6
+890, 0x9447eff675d80f53
+891, 0xea27a9d53cfaeea8
+892, 0xe3f2335945a83ba
+893, 0x8875a43ce216413b
+894, 0xe49941f9eabce33e
+895, 0x9357c1296683a5b1
+896, 0xf0f16439e81ee701
+897, 0x3181515295ffd79a
+898, 0x9d7150fffd169ed8
+899, 0x2d6a1d281e255a72
+900, 0x81bf1286fb3a92b6
+901, 0x566d3079b499e279
+902, 0xc7939ca8f047341
+903, 0xb1f8050e7c2d59f6
+904, 0x605701045e7be192
+905, 0x51b73360e8e31a1c
+906, 0x9f4ad54483ba9fe0
+907, 0xd3085b8fcf69d1c8
+908, 0xc3e7475026dc5f0b
+909, 0x5800f8554b157354
+910, 0x37dfdf858cfcd963
+911, 0x3a1fce05ce385072
+912, 0xf495c062645c20c3
+913, 0xdcbeec2c3492c773
+914, 0xc38f427589d1d0b4
+915, 0x681ead60216a8184
+916, 0x4bd569c40cc88c41
+917, 0x49b0d442e130b7a2
+918, 0xee349156b7d1fa3f
+919, 0x2bde2d2db055135b
+920, 0xc6a460d2fbcb2378
+921, 0xd0f170494ff3dbb
+922, 0xb294422492528a23
+923, 0xfc95873c854e7b86
+924, 0x6c9c3ad1797bb19c
+925, 0xe0c06f2aab65062d
+926, 0x58e32ce0f11e3a81
+927, 0xa745fcd729ff5036
+928, 0x599b249b2fc2cdb2
+929, 0x78f23b5b0dd5b082
+930, 0x6de3e957f549ecfc
+931, 0x9d0712fa6d878756
+932, 0x9076e8554e4a413a
+933, 0xf3185818c0294de8
+934, 0x5de7cdf4b455b9b6
+935, 0xb15f6908ed703f7d
+936, 0x98c654dfedc6818
+937, 0x120502ab0e93ae42
+938, 0x67966a98a58dc120
+939, 0x1caa0fc628989482
+940, 0xd8b2c3cd480a8625
+941, 0x85c70071b3aed671
+942, 0xff385f8473714662
+943, 0xe2868e4bf3773b63
+944, 0x96cf8019b279298e
+945, 0x8511cc930bd74800
+946, 0x5312e48fdd55f5ab
+947, 0xfcdae564b52df78d
+948, 0x9eee48373e652176
+949, 0x953788f6bcbc56b0
+950, 0xd1a3855dbd2f6b37
+951, 0x3ad32acf77f4d1e9
+952, 0x917c7be81b003e30
+953, 0x9ce817da1e2e9dfb
+954, 0x2968983db162d44d
+955, 0x1e005decef5828ad
+956, 0xc38fe59d1aa4f3d5
+957, 0xf357f1710dc02f1d
+958, 0x2613912a4c83ec67
+959, 0x832a11470b9a17cb
+960, 0x5e85508a611f0dad
+961, 0x2781131677f59d56
+962, 0xa82358d7d4b0237f
+963, 0xfbf8b3cc030c3af6
+964, 0x68b2f68ac8a55adb
+965, 0x3b6fcf353add0ada
+966, 0xd1956049bcd15bd5
+967, 0x95b76f31c7f98b6d
+968, 0x814b6690df971a84
+969, 0xdcf7959cddd819e4
+970, 0xcf8c72c5d804fc88
+971, 0x56883769c8945a22
+972, 0x1f034652f658cf46
+973, 0x41df1324cda235a1
+974, 0xeccd32524504a054
+975, 0x974e0910a04ec02c
+976, 0x72104507b821f6db
+977, 0x791f8d089f273044
+978, 0xe0f79a4f567f73c3
+979, 0x52fe5bea3997f024
+980, 0x5f8b9b446494f78
+981, 0xfd9f511947059190
+982, 0x3aea9dac6063bce3
+983, 0xbfdae4dfc24aee60
+984, 0xa82cdbbf0a280318
+985, 0xf460aae18d70aa9d
+986, 0x997367cb204a57c4
+987, 0x616e21ab95ba05ef
+988, 0x9bfc93bec116769f
+989, 0x2b2ee27c37a3fa5b
+990, 0xb25c6ed54006ee38
+991, 0xab04d4a5c69e69a5
+992, 0x6d2f6b45f2d8438f
+993, 0x4ad2f32afc82f092
+994, 0x513d718908f709c0
+995, 0x5272aadc4fffca51
+996, 0xeb3f87e66156ef5d
+997, 0xf8a3d5a46a86ba85
+998, 0xdb4548a86f27abfd
+999, 0x57c05f47ff62380d
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/data/sfc64-testset-1.csv b/venv/lib/python3.9/site-packages/numpy/random/tests/data/sfc64-testset-1.csv
new file mode 100644
index 00000000..4fffe695
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/data/sfc64-testset-1.csv
@@ -0,0 +1,1001 @@
+seed, 0xdeadbeaf
+0, 0xa475f55fbb6bc638
+1, 0xb2d594b6c29d971c
+2, 0x275bc4ece4484fb1
+3, 0x569be72d9b3492fb
+4, 0x89a5bb9b206a670c
+5, 0xd951bfa06afdc3f9
+6, 0x7ee2e1029d52a265
+7, 0x12ef1d4de0cb4d4c
+8, 0x41658ba8f0ef0280
+9, 0x5b650c82e4fe09c5
+10, 0x638a9f3e30ec4e94
+11, 0x147487fb2ba9233e
+12, 0x89ef035603d2d1fb
+13, 0xe66ca57a190e6cbe
+14, 0x330f673740dd61fc
+15, 0xc71d3dce2f8bb34e
+16, 0x3c07c39ff150b185
+17, 0x5df952b6cae8f099
+18, 0x9f09f2b1f0ceac80
+19, 0x19598eee2d0c4c67
+20, 0x64e06483702e0ebd
+21, 0xda04d1fdb545f7fa
+22, 0xf2cf53b61a0c4f9b
+23, 0xf0bb724ce196f66e
+24, 0x71cefde55d9cf0f
+25, 0x6323f62824a20048
+26, 0x1e93604680f14b4e
+27, 0xd9d8fad1d4654025
+28, 0xf4ee25af2e76ca08
+29, 0x6af3325896befa98
+30, 0xad9e43abf5e04053
+31, 0xbf930e318ce09de3
+32, 0x61f9583b4f9ffe76
+33, 0x9b69d0b3d5ec8958
+34, 0xa608f250f9b2ca41
+35, 0x6fdba7073dc2bb5d
+36, 0xa9d57601efea6d26
+37, 0xc24a88a994954105
+38, 0xc728b1f78d88fe5b
+39, 0x88da88c2b083b3b2
+40, 0xa9e27f7303c76cfd
+41, 0xc4c24608c29176eb
+42, 0x5420b58466b972fd
+43, 0xd2018a661b6756c8
+44, 0x7caed83d9573fc7
+45, 0x562a3d81b849a06a
+46, 0x16588af120c21f2c
+47, 0x658109a7e0eb4837
+48, 0x877aabb14d3822e1
+49, 0x95704c342c3745fe
+50, 0xeeb8a0dc81603616
+51, 0x431bf94889290419
+52, 0xe4a9410ab92a5863
+53, 0xbc6be64ea60f12ba
+54, 0x328a2da920015063
+55, 0x40f6b3bf8271ae07
+56, 0x4068ff00a0e854f8
+57, 0x1b287572ca13fa78
+58, 0xa11624a600490b99
+59, 0x4a04ef29eb7150fa
+60, 0xcc9469ab5ffb739
+61, 0x99a6a9f8d95e782
+62, 0x8e90356573e7a070
+63, 0xa740b8fb415c81c4
+64, 0x47eccef67447f3da
+65, 0x2c720afe3a62a49b
+66, 0xe2a747f0a43eacf4
+67, 0xba063a87ab165576
+68, 0xbc1c78ed27feb5a3
+69, 0x285a19fa3974f9d
+70, 0x489c61e704f5f0e3
+71, 0xf5ab04f6b03f238b
+72, 0x7e25f88138a110dd
+73, 0xc3d1cef3d7c1f1d1
+74, 0xc3de6ec64d0d8e00
+75, 0x73682a15b6cc5088
+76, 0x6fecbeb319163dc5
+77, 0x7e100d5defe570a1
+78, 0xad2af9af076dce57
+79, 0x3c65100e23cd3a9a
+80, 0x4b442cc6cfe521bb
+81, 0xe89dc50f8ab1ef75
+82, 0x8b3c6fdc2496566
+83, 0xdfc50042bc2c308c
+84, 0xe39c5f158b33d2b2
+85, 0x92f6adefdfeb0ac
+86, 0xdf5808a949c85b3e
+87, 0x437384021c9dace9
+88, 0xa7b5ed0d3d67d8f
+89, 0xe1408f8b21da3c34
+90, 0xa1bba125c1e80522
+91, 0x7611dc4710385264
+92, 0xb00a46ea84082917
+93, 0x51bf8002ffa87cef
+94, 0x9bb81013e9810adc
+95, 0xd28f6600013541cd
+96, 0xc2ca3b1fa7791c1f
+97, 0x47f9ad58f099c82c
+98, 0x4d1bb9458469caf9
+99, 0xca0b165b2844257
+100, 0xc3b2e667d075dc66
+101, 0xde22f71136a3dbb1
+102, 0x23b4e3b6f219e4c3
+103, 0x327e0db4c9782f66
+104, 0x9365506a6c7a1807
+105, 0x3e868382dedd3be7
+106, 0xff04fa6534bcaa99
+107, 0x96621a8862995305
+108, 0x81bf39cb5f8e1df7
+109, 0x79b684bb8c37af7a
+110, 0xae3bc073c3cde33c
+111, 0x7805674112c899ac
+112, 0xd95a27995abb20f2
+113, 0x71a503c57b105c40
+114, 0x5ff00d6a73ec8acc
+115, 0x12f96391d91e47c2
+116, 0xd55ca097b3bd4947
+117, 0x794d79d20468b04
+118, 0x35d814efb0d7a07d
+119, 0xfa9ac9bd0aae76d3
+120, 0xa77b8a3711e175cd
+121, 0xe6694fbf421f9489
+122, 0xd8f1756525a1a0aa
+123, 0xe38dfa8426277433
+124, 0x16b640c269bbcd44
+125, 0x2a7a5a67ca24cfeb
+126, 0x669039c28d5344b4
+127, 0x2a445ee81fd596bb
+128, 0x600df94cf25607e0
+129, 0x9358561a7579abff
+130, 0xee1d52ea179fc274
+131, 0x21a8b325e89d31be
+132, 0x36fc0917486eec0a
+133, 0x3d99f40717a6be9f
+134, 0x39ac140051ca55ff
+135, 0xcef7447c26711575
+136, 0xf22666870eff441d
+137, 0x4a53c6134e1c7268
+138, 0xd26de518ad6bdb1b
+139, 0x1a736bf75b8b0e55
+140, 0xef1523f4e6bd0219
+141, 0xb287b32fd615ad92
+142, 0x2583d6af5e841dd5
+143, 0x4b9294aae7ca670c
+144, 0xf5aa4a84174f3ca9
+145, 0x886300f9e0dc6376
+146, 0x3611401e475ef130
+147, 0x69b56432b367e1ac
+148, 0x30c330e9ab36b7c4
+149, 0x1e0e73079a85b8d5
+150, 0x40fdfc7a5bfaecf
+151, 0xd7760f3e8e75a085
+152, 0x1cc1891f7f625313
+153, 0xeece1fe6165b4272
+154, 0xe61111b0c166a3c1
+155, 0x2f1201563312f185
+156, 0xfd10e8ecdd2a57cb
+157, 0x51cdc8c9dd3a89bf
+158, 0xed13cc93938b5496
+159, 0x843816129750526b
+160, 0xd09995cd6819ada
+161, 0x4601e778d40607df
+162, 0xef9df06bd66c2ea0
+163, 0xae0bdecd3db65d69
+164, 0xbb921a3c65a4ae9a
+165, 0xd66698ce8e9361be
+166, 0xacdc91647b6068f4
+167, 0xe505ef68f2a5c1c0
+168, 0xd6e62fd27c6ab137
+169, 0x6a2ba2c6a4641d86
+170, 0x9c89143715c3b81
+171, 0xe408c4e00362601a
+172, 0x986155cbf5d4bd9d
+173, 0xb9e6831728c893a7
+174, 0xb985497c3bf88d8c
+175, 0xd0d729214b727bec
+176, 0x4e557f75fece38a
+177, 0x6572067fdfd623ca
+178, 0x178d49bb4d5cd794
+179, 0xe6baf59f60445d82
+180, 0x5607d53518e3a8d2
+181, 0xba7931adb6ebbd61
+182, 0xe853576172611329
+183, 0xe945daff96000c44
+184, 0x565b9ba3d952a176
+185, 0xcdb54d4f88c584c8
+186, 0x482a7499bee9b5e5
+187, 0x76560dd0affe825b
+188, 0x2a56221faa5ca22c
+189, 0x7729be5b361f5a25
+190, 0xd6f2195795764876
+191, 0x59ef7f8f423f18c5
+192, 0x7ebefed6d02adde1
+193, 0xcfec7265329c73e5
+194, 0x4fd8606a5e59881c
+195, 0x95860982ae370b73
+196, 0xdecfa33b1f902acc
+197, 0xf9b8a57400b7c0a6
+198, 0xd20b822672ec857b
+199, 0x4eb81084096c7364
+200, 0xe535c29a44d9b6ad
+201, 0xdef8b48ebacb2e29
+202, 0x1063bc2b8ba0e915
+203, 0xe4e837fb53d76d02
+204, 0x4df935db53579fb8
+205, 0xa30a0c8053869a89
+206, 0xe891ee58a388a7b5
+207, 0x17931a0c64b8a985
+208, 0xaf2d350b494ce1b3
+209, 0x2ab9345ffbcfed82
+210, 0x7de3fe628a2592f0
+211, 0x85cf54fab8b7e79d
+212, 0x42d221520edab71b
+213, 0x17b695b3af36c233
+214, 0xa4ffe50fe53eb485
+215, 0x1102d242db800e4d
+216, 0xc8dc01f0233b3b6
+217, 0x984a030321053d36
+218, 0x27fa8dc7b7112c0e
+219, 0xba634dd8294e177f
+220, 0xe67ce34b36332eb
+221, 0x8f1351e1894fb41a
+222, 0xb522a3048761fd30
+223, 0xc350ad9bc6729edc
+224, 0xe0ed105bd3c805e1
+225, 0xa14043d2b0825aa7
+226, 0xee7779ce7fc11fdf
+227, 0xc0fa8ba23a60ab25
+228, 0xb596d1ce259afbad
+229, 0xaa9b8445537fdf62
+230, 0x770ab2c700762e13
+231, 0xe812f1183e40cc1
+232, 0x44bc898e57aefbbd
+233, 0xdd8a871df785c996
+234, 0x88836a5e371eb36b
+235, 0xb6081c9152623f27
+236, 0x895acbcd6528ca96
+237, 0xfb67e33ddfbed435
+238, 0xaf7af47d323ce26
+239, 0xe354a510c3c39b2d
+240, 0x5cacdedda0672ba3
+241, 0xa440d9a2c6c22b09
+242, 0x6395099f48d64304
+243, 0xc11cf04c75f655b5
+244, 0x1c4e054d144ddb30
+245, 0x3e0c2db89d336636
+246, 0x127ecf18a5b0b9a7
+247, 0x3b50551a88ea7a73
+248, 0xbd27003e47f1f684
+249, 0xf32d657782baac9b
+250, 0x727f5cabf020bc9
+251, 0x39c1c1c226197dc7
+252, 0x5552c87b35deeb69
+253, 0x64d54067b5ce493f
+254, 0x3494b091fe28dda0
+255, 0xdf0278bc85ee2965
+256, 0xdef16fec25efbd66
+257, 0xe2be09f578c4ce28
+258, 0xd27a9271979d3019
+259, 0x427f6fcd71845e3
+260, 0x26b52c5f81ec142b
+261, 0x98267efc3986ad46
+262, 0x7bf4165ddb7e4374
+263, 0xd05f7996d7941010
+264, 0x3b3991de97b45f14
+265, 0x9068217fb4f27a30
+266, 0xd8fe295160afc7f3
+267, 0x8a159fab4c3bc06f
+268, 0x57855506d19080b6
+269, 0x7636df6b3f2367a4
+270, 0x2844ee3abd1d5ec9
+271, 0xe5788de061f51c16
+272, 0x69e78cc9132a164
+273, 0xacd53cde6d8cd421
+274, 0xb23f3100068e91da
+275, 0x4140070a47f53891
+276, 0xe4a422225a96e53a
+277, 0xb82a8925a272a2ac
+278, 0x7c2f9573590fe3b7
+279, 0xbaf80764db170575
+280, 0x955abffa54358368
+281, 0x355ce7460614a869
+282, 0x3700ede779a4afbf
+283, 0x10a6ec01d92d68cd
+284, 0x3308f5a0a4c0afef
+285, 0x97b892d7601136c9
+286, 0x4955c3b941b8552e
+287, 0xca85aa67e941961d
+288, 0xb1859ae5db28e9d2
+289, 0x305d072ac1521fbd
+290, 0xed52a868996085bb
+291, 0x723bfa6a76358852
+292, 0x78d946ecd97c5fb3
+293, 0x39205b30a8e23e79
+294, 0xb927e3d086baadbe
+295, 0xa18d6946136e1ff5
+296, 0xdab6f0b51c1eb5ff
+297, 0xf0a640bf7a1af60c
+298, 0xf0e81db09004d0d4
+299, 0xfe76cebdbe5a4dde
+300, 0x2dafe9cc3decc376
+301, 0x4c871fdf1af34205
+302, 0xe79617d0c8fa893b
+303, 0xee658aaad3a141f7
+304, 0xfd91aa74863e19f1
+305, 0x841b8f55c103cc22
+306, 0x22766ed65444ad5d
+307, 0x56d03d1beca6c17a
+308, 0x5fd4c112c92036ae
+309, 0x75466ae58a5616dc
+310, 0xfbf98b1081e802a9
+311, 0xdc325e957bf6d8f5
+312, 0xb08da7015ebd19b7
+313, 0xf25a9c0944f0c073
+314, 0xf4625bafa0ced718
+315, 0x4349c9e093a9e692
+316, 0x75a9ccd4dd8935cb
+317, 0x7e6cf9e539361e91
+318, 0x20fdd22fb6edd475
+319, 0x5973021b57c2311f
+320, 0x75392403667edc15
+321, 0xed9b2156ea70d9f1
+322, 0xf40c114db50b64a0
+323, 0xe26bb2c9eef20c62
+324, 0x409c1e3037869f03
+325, 0xcdfd71fdda3b7f91
+326, 0xa0dfae46816777d6
+327, 0xde060a8f61a8deb8
+328, 0x890e082a8b0ca4fc
+329, 0xb9f2958eddf2d0db
+330, 0xd17c148020d20e30
+331, 0xffdc9cc176fe7201
+332, 0xffb83d925b764c1
+333, 0x817ea639e313da8d
+334, 0xa4dd335dd891ca91
+335, 0x1342d25a5e81f488
+336, 0xfa7eb9c3cf466b03
+337, 0xfe0a423d44b185d0
+338, 0x101cfd430ab96049
+339, 0x7b5d3eda9c4504b
+340, 0xe20ccc006e0193f1
+341, 0xf54ccddedebc5df0
+342, 0xc0edd142bd58f1db
+343, 0x3831f40d378d2430
+344, 0x80132353f0a88289
+345, 0x688f23c419d03ef8
+346, 0x4c6837e697884066
+347, 0x699387bb2e9a3a8f
+348, 0x8996f860342448d8
+349, 0xb0f80dff99bfa5cc
+350, 0x3e927a7f9ea12c8e
+351, 0xd7e498d1e5f9dff3
+352, 0x78ecb97bb3f864cc
+353, 0x3c4ffd069a014d38
+354, 0xf8d5073a1e09b4d4
+355, 0x8717e854f9faef23
+356, 0xfbcc5478d8d0ad7
+357, 0xd3cd8b233ca274ff
+358, 0x8bd8f11f79beb265
+359, 0xf64498a832d8fd0e
+360, 0xb01bba75112131ec
+361, 0x55572445a7869781
+362, 0x7b56622f18cb3d7a
+363, 0x7f192c9e075bdb83
+364, 0xd9a112f836b83ff3
+365, 0x68673b37269653dc
+366, 0xe46a9433fb6a0879
+367, 0x127d756ca4779001
+368, 0xc1378e8b1e8eab94
+369, 0x1006edb0f51d078c
+370, 0xc6dd53961232d926
+371, 0x9a4aeef44038256d
+372, 0xd357f4fa652d4f5f
+373, 0x59f3d2cc3378598
+374, 0xe76e6207a824a7fc
+375, 0x5fc5e33712ceffef
+376, 0x77d24aeb0ccb1adc
+377, 0x5be4b2826805659e
+378, 0x257c69d787e64634
+379, 0x58dd52ca6bc727b1
+380, 0x3ab997767235ea33
+381, 0x986a2a7a966fad14
+382, 0xc900f8b27761dcc4
+383, 0x44991bdb13795700
+384, 0xe5c145a4fe733b2
+385, 0x56f041b56bffe0d3
+386, 0x5779c4fef8067996
+387, 0xa0fe8748e829532d
+388, 0x840c1277d78d9dd4
+389, 0x37ebcb315432acbc
+390, 0xf4bc8738433ba3be
+391, 0x8b122993f2e10062
+392, 0xe1fe8481f2681ed5
+393, 0x8e23f1630d9f494a
+394, 0xda24661a01b7d0b3
+395, 0x7a02942a179cee36
+396, 0xf1e08a3c09b71ac
+397, 0x3dec2cc7ee0bd8fd
+398, 0x1f3e480113d805d4
+399, 0xc061b973ad4e3f2c
+400, 0x6bea750f17a66836
+401, 0xbc2add72eac84c25
+402, 0xcff058d3f97934ca
+403, 0x54ccc30987778ec2
+404, 0x93449ec1e1469558
+405, 0xe2ff369eb0c6836
+406, 0x41c2df2d63bf8e55
+407, 0xf9302629b6c71be2
+408, 0xdd30376b8e5ab29a
+409, 0x12db9e04f911d754
+410, 0x8d03d6cd359f1b97
+411, 0xe15956511abf1cee
+412, 0x9b68e10e2c2fd940
+413, 0x2e28de6491c1ce53
+414, 0x52b329b72d0c109d
+415, 0xc2c0b115f9da2a60
+416, 0x6ca084105271bbff
+417, 0x49b92b8676058c1e
+418, 0x767fc92a70f7e5a3
+419, 0x87ba4ed4b65a6aa0
+420, 0xf70b052e0a3975e9
+421, 0x3e925c3306db9eec
+422, 0x43253f1d96ac9513
+423, 0xe3e04f1a1ea454c4
+424, 0x763e3f4cc81ba0c8
+425, 0x2a2721ac69265705
+426, 0xdf3b0ac6416ea214
+427, 0xa6a6b57450f3e000
+428, 0xc3d3b1ac7dbfe6ac
+429, 0xb66e5e6f7d2e4ec0
+430, 0x43c65296f98f0f04
+431, 0xdb0f6e3ff974d842
+432, 0x3d6b48e02ebb203b
+433, 0xd74674ebf09d8f27
+434, 0xbe65243c58fc1200
+435, 0x55eb210a68d42625
+436, 0x87badab097dbe883
+437, 0xada3fda85a53824f
+438, 0xef2791e8f48cd37a
+439, 0x3fe7fceb927a641a
+440, 0xd3bffd3ff031ac78
+441, 0xb94efe03da4d18fb
+442, 0x162a0ad8da65ea68
+443, 0x300f234ef5b7e4a6
+444, 0xa2a8b4c77024e4fb
+445, 0x5950f095ddd7b109
+446, 0xded66dd2b1bb02ba
+447, 0x8ec24b7fa509bcb6
+448, 0x9bede53d924bdad6
+449, 0xa9c3f46423be1930
+450, 0x6dfc90597f8de8b4
+451, 0xb7419ebc65b434f0
+452, 0xa6596949238f58b9
+453, 0x966cbade640829b8
+454, 0x58c74877bdcbf65e
+455, 0xaa103b8f89b0c453
+456, 0x219f0a86e41179a4
+457, 0x90f534fc06ddc57f
+458, 0x8db7cdd644f1affa
+459, 0x38f91de0167127ac
+460, 0xdcd2a65e4df43daa
+461, 0x3e04f34a7e01f834
+462, 0x5b237eea68007768
+463, 0x7ff4d2b015921768
+464, 0xf786b286549d3d51
+465, 0xaefa053fc2c3884c
+466, 0x8e6a8ff381515d36
+467, 0x35b94f3d0a1fce3c
+468, 0x165266d19e9abb64
+469, 0x1deb5caa5f9d8076
+470, 0x13ab91290c7cfe9d
+471, 0x3651ca9856be3e05
+472, 0xe7b705f6e9cccc19
+473, 0xd6e7f79668c127ed
+474, 0xa9faf37154896f92
+475, 0x89fbf190603e0ab1
+476, 0xb34d155a86f942d0
+477, 0xb2d4400a78bfdd76
+478, 0x7c0946aca8cfb3f0
+479, 0x7492771591c9d0e8
+480, 0xd084d95c5ca2eb28
+481, 0xb18d12bd3a6023e
+482, 0xea217ed7b864d80b
+483, 0xe52f69a755dd5c6f
+484, 0x127133993d81c4aa
+485, 0xe07188fcf1670bfb
+486, 0x178fbfe668e4661d
+487, 0x1c9ee14bb0cda154
+488, 0x8d043b96b6668f98
+489, 0xbc858986ec96ca2b
+490, 0x7660f779d528b6b7
+491, 0xd448c6a1f74ae1d3
+492, 0x178e122cfc2a6862
+493, 0x236f000abaf2d23b
+494, 0x171b27f3f0921915
+495, 0x4c3ff07652f50a70
+496, 0x18663e5e7d3a66ca
+497, 0xb38c97946c750cc9
+498, 0xc5031aae6f78f909
+499, 0x4d1514e2925e95c1
+500, 0x4c2184a741dabfbb
+501, 0xfd410364edf77182
+502, 0xc228157f863ee873
+503, 0x9856fdc735cc09fc
+504, 0x660496cd1e41d60e
+505, 0x2edf1d7e01954c32
+506, 0xd32e94639bdd98cf
+507, 0x8e153f48709a77d
+508, 0x89357f332d2d6561
+509, 0x1840d512c97085e6
+510, 0x2f18d035c9e26a85
+511, 0x77b88b1448b26d5b
+512, 0xc1ca6ef4cdae0799
+513, 0xcc203f9e4508165f
+514, 0xeaf762fbc9e0cbbe
+515, 0xc070c687f3c4a290
+516, 0xd49ed321068d5c15
+517, 0x84a55eec17ee64ee
+518, 0x4d8ee685298a8871
+519, 0x9ff5f17d7e029793
+520, 0x791d7d0d62e46302
+521, 0xab218b9114e22bc6
+522, 0x4902b7ab3f7119a7
+523, 0x694930f2e29b049e
+524, 0x1a3c90650848999f
+525, 0x79f1b9d8499c932b
+526, 0xfacb6d3d55e3c92f
+527, 0x8fd8b4f25a5da9f5
+528, 0xd037dcc3a7e62ae7
+529, 0xfecf57300d8f84f4
+530, 0x32079b1e1dc12d48
+531, 0xe5f8f1e62b288f54
+532, 0x97feba3a9c108894
+533, 0xd279a51e1899a9a0
+534, 0xd68eea8e8e363fa8
+535, 0x7394cf2deeca9386
+536, 0x5f70b0c80f1dbf10
+537, 0x8d646916ed40462
+538, 0xd253bb1c8a12bbb6
+539, 0x38f399a821fbd73e
+540, 0x947523a26333ac90
+541, 0xb52e90affbc52a37
+542, 0xcf899cd964654da4
+543, 0xdf66ae9cca8d99e7
+544, 0x6051478e57c21b6a
+545, 0xffa7dc975af3c1da
+546, 0x195c7bff2d1a8f5
+547, 0x64f12b6575cf984d
+548, 0x536034cb842cf9e1
+549, 0x180f247ce5bbfad
+550, 0x8ced45081b134867
+551, 0x532bbfdf426710f3
+552, 0x4747933e74c4f54d
+553, 0x197a890dc4793401
+554, 0x76c7cc2bd42fae2
+555, 0xdabfd67f69675dd0
+556, 0x85c690a68cdb3197
+557, 0xe482cec89ce8f92
+558, 0x20bc9fb7797011b1
+559, 0x76dc85a2185782ad
+560, 0x3df37c164422117a
+561, 0x99211f5d231e0ab0
+562, 0xef7fd794a0a91f4
+563, 0x419577151915f5fe
+564, 0x3ce14a0a7135dae3
+565, 0x389b57598a075d6a
+566, 0x8cc2a9d51b5af9aa
+567, 0xe80a9beffbd13f13
+568, 0x65e96b22ea8a54d8
+569, 0x79f38c4164138ede
+570, 0xd1955846cba03d81
+571, 0x60359fe58e4f26d6
+572, 0x4ea724f585f8d13e
+573, 0x316dfdbadc801a3c
+574, 0x20aa29b7c6dd66fe
+575, 0x65eaf83a6a008caa
+576, 0x407000aff1b9e8cb
+577, 0xb4d49bfb2b268c40
+578, 0xd4e6fe8a7a0f14a9
+579, 0xe34afef924e8f58e
+580, 0xe377b0c891844824
+581, 0x29c2e20c112d30c8
+582, 0x906aad1fe0c18a95
+583, 0x308385f0efbb6474
+584, 0xf23900481bf70445
+585, 0xfdfe3ade7f937a55
+586, 0xf37aae71c33c4f97
+587, 0x1c81e3775a8bed85
+588, 0x7eb5013882ce35ea
+589, 0x37a1c1692495818d
+590, 0x3f90ae118622a0ba
+591, 0x58e4fe6fea29b037
+592, 0xd10ff1d269808825
+593, 0xbce30edb60c21bba
+594, 0x123732329afd6fee
+595, 0x429b4059f797d840
+596, 0x421166568a8c4be1
+597, 0x88f895c424c1bd7f
+598, 0x2adaf7a7b9f781cb
+599, 0xa425644b26cb698
+600, 0x8cc44d2486cc5743
+601, 0xdb9f357a33abf6ba
+602, 0x1a57c4ea77a4d70c
+603, 0x1dea29be75239e44
+604, 0x463141a137121a06
+605, 0x8fecfbbe0b8a9517
+606, 0x92c83984b3566123
+607, 0x3b1c69180ed28665
+608, 0x14a6073425ea8717
+609, 0x71f4c2b3283238d7
+610, 0xb3d491e3152f19f
+611, 0x3a0ba3a11ebac5d2
+612, 0xddb4d1dd4c0f54ac
+613, 0xdb8f36fe02414035
+614, 0x1cf5df5031b1902c
+615, 0x23a20ed12ef95870
+616, 0xf113e573b2dedcbb
+617, 0x308e2395cde0a9fa
+618, 0xd377a22581c3a7da
+619, 0xe0ced97a947a66fb
+620, 0xe44f4de9cd754b00
+621, 0x2344943337d9d1bf
+622, 0x4b5ae5e2ea6e749c
+623, 0x9b8d2e3ef41d1c01
+624, 0x59a5a53ebbd24c6b
+625, 0x4f7611bf9e8a06fb
+626, 0xea38c7b61361cd06
+627, 0xf125a2bfdd2c0c7
+628, 0x2df8dcb5926b9ebb
+629, 0x233e18720cc56988
+630, 0x974c61379b4aa95e
+631, 0xc7fe24c1c868910b
+632, 0x818fd1affc82a842
+633, 0xcee92a952a26d38e
+634, 0x8962f575ebcbf43
+635, 0x7770687e3678c460
+636, 0xdfb1db4ed1298117
+637, 0xb9db54cb03d434d3
+638, 0x34aebbf2244257ad
+639, 0xd836db0cb210c490
+640, 0x935daed7138957cd
+641, 0x3cd914b14e7948fd
+642, 0xd0472e9ed0a0f7f0
+643, 0xa9df33dca697f75e
+644, 0x15e9ea259398721a
+645, 0x23eeba0f970abd60
+646, 0x2217fdf8bbe99a12
+647, 0x5ea490a95717b198
+648, 0xf4e2bfc28280b639
+649, 0x9d19916072d6f05c
+650, 0x5e0387cab1734c6a
+651, 0x93c2c8ac26e5f01e
+652, 0xb0d934354d957eb1
+653, 0xee5099a1eef3188c
+654, 0x8be0abca8edc1115
+655, 0x989a60845dbf5aa3
+656, 0x181c7ed964eee892
+657, 0x49838ea07481288d
+658, 0x17dbc75d66116b2e
+659, 0xa4cafb7a87c0117e
+660, 0xab2d0ae44cdc2e6e
+661, 0xdf802f2457e7da6
+662, 0x4b966c4b9187e124
+663, 0x62de9db6f4811e1a
+664, 0x1e20485968bc62
+665, 0xe9ac288265caca94
+666, 0xc5c694d349aa8c1a
+667, 0x3d67f2083d9bdf10
+668, 0x9a2468e503085486
+669, 0x9d6acd3dc152d1a3
+670, 0xca951e2aeee8df77
+671, 0x2707371af9cdd7b0
+672, 0x2347ae6a4eb5ecbd
+673, 0x16abe5582cb426f
+674, 0x523af4ff980bbccb
+675, 0xb07a0f043e3694aa
+676, 0x14d7c3da81b2de7
+677, 0xf471f1b8ac22305b
+678, 0xdb087ffff9e18520
+679, 0x1a352db3574359e8
+680, 0x48d5431502cc7476
+681, 0x7c9b7e7003dfd1bf
+682, 0x4f43a48aae987169
+683, 0x9a5d3eb66dedb3e9
+684, 0xa7b331af76a9f817
+685, 0xba440154b118ab2d
+686, 0x64d22344ce24c9c6
+687, 0xa22377bd52bd043
+688, 0x9dfa1bb18ca6c5f7
+689, 0xdccf44a92f644c8b
+690, 0xf623d0a49fd18145
+691, 0x556d5c37978e28b3
+692, 0xad96e32ce9d2bb8b
+693, 0x2e479c120be52798
+694, 0x7501cf871af7b2f7
+695, 0xd02536a5d026a5b8
+696, 0x4b37ff53e76ab5a4
+697, 0xdb3a4039caaeab13
+698, 0x6cbd65e3b700c7be
+699, 0x7367abd98761a147
+700, 0xf4f9ba216a35aa77
+701, 0xf88ca25ce921eb86
+702, 0xb211de082ec2cbf2
+703, 0xdd94aa46ec57e12e
+704, 0xa967d74ad8210240
+705, 0xdaa1fada8cfa887
+706, 0x85901d081c4488ee
+707, 0xcf67f79a699ef06
+708, 0x7f2f1f0de921ee14
+709, 0x28bc61e9d3f2328b
+710, 0x3332f2963faf18e5
+711, 0x4167ac71fcf43a6
+712, 0x843c1746b0160b74
+713, 0xd9be80070c578a5e
+714, 0xbd7250c9af1473e7
+715, 0x43f78afaa3647899
+716, 0x91c6b5dd715a75a5
+717, 0x29cc66c8a07bfef3
+718, 0x3f5c667311dc22be
+719, 0x4f49cd47958260cd
+720, 0xbef8be43d920b64e
+721, 0x7a892a5f13061d8b
+722, 0x9532f40125c819b1
+723, 0x924fca3045f8a564
+724, 0x9b2c6442453b0c20
+725, 0x7e21009085b8e793
+726, 0x9b98c17e17af59d2
+727, 0xba61acb73e3ae89a
+728, 0xb9d61a710555c138
+729, 0xc2a425d80978974b
+730, 0xa275e13592da7d67
+731, 0xe962103202d9ad0f
+732, 0xbdf8367a4d6f33fd
+733, 0xe59beb2f8648bdc8
+734, 0xb4c387d8fbc4ac1c
+735, 0x5e3f276b63054b75
+736, 0xf27e616aa54d8464
+737, 0x3f271661d1cd7426
+738, 0x43a69dbee7502c78
+739, 0x8066fcea6df059a1
+740, 0x3c10f19409bdc993
+741, 0x6ba6f43fb21f23e0
+742, 0x9e182d70a5bccf09
+743, 0x1520783d2a63a199
+744, 0xba1dcc0c70b9cace
+745, 0x1009e1e9b1032d8
+746, 0xf632f6a95fb0315
+747, 0x48e711c7114cbfff
+748, 0xef281dcec67debf7
+749, 0x33789894d6abf59b
+750, 0x6c8e541fffbe7f9c
+751, 0x85417f13b08e0a88
+752, 0x9a581e36d589608f
+753, 0x461dca50b1befd35
+754, 0x5a3231680dde6462
+755, 0xcc57acf729780b97
+756, 0x50301efef62e1054
+757, 0x675d042cd4f6bbc9
+758, 0x1652fdd3794384c9
+759, 0x1c93bbeeb763cd4d
+760, 0x44b7240c4b105242
+761, 0x4c6af2a1b606ccfb
+762, 0x18fc43ece2ec1a40
+763, 0x859a5511aeae8acb
+764, 0x2f56826f1996ad2f
+765, 0xa8e95ce8bb363bdf
+766, 0xf4da396054e50e4b
+767, 0x5493865e9895883c
+768, 0x768e4c8b332ac0e3
+769, 0x32195d2aa583fca5
+770, 0xf2f353f21266bc15
+771, 0x43cddf1d021307d
+772, 0x6031e3aa30300e4a
+773, 0x4f1298469ac6088f
+774, 0x4b4d450bafac574e
+775, 0x23e1cf9c0582a22b
+776, 0x2e9036980db49cd0
+777, 0xe4e228b113c411b2
+778, 0x8bddcdb82b51706
+779, 0xd2a7ea8288593629
+780, 0x67fe90e98fdda61
+781, 0x7b63494dba95717b
+782, 0x105625904510d782
+783, 0xdf4aa2242454e50a
+784, 0x32541d6cd7d6c7e3
+785, 0x5661fb432591cf3b
+786, 0xce920a5ed047bce7
+787, 0xed4178a3c96eea8f
+788, 0xe378cd996e39863b
+789, 0x169e1fdc8e2b05e1
+790, 0xaee1812ef7149a96
+791, 0x648571c7453d12c5
+792, 0xb7b6bc9328573c43
+793, 0xe7fb969078e270d7
+794, 0xdfc2b1b8985f6e6f
+795, 0x862b6527ee39a1aa
+796, 0x1ee329aea91d7882
+797, 0x20d25324f2fe704
+798, 0xbfcc47401fc3bbfd
+799, 0x1515cdc8d48b2904
+800, 0xbd6eefe86284261c
+801, 0x9b1f28e3b35f22ee
+802, 0x842a29d35e5aecda
+803, 0xf2346109ad370765
+804, 0x24d68add5a71afd9
+805, 0x4a691421613d91e2
+806, 0x60e3058b3c244051
+807, 0x79194905cdaa5de8
+808, 0xe0e2df35c01e8987
+809, 0xe29b78beffbb5e4a
+810, 0xcdcdbc020218c19e
+811, 0x5ae0af8c16feae43
+812, 0x8109292feeaf14fa
+813, 0x34113f7508dfa521
+814, 0xc062ac163f56730a
+815, 0xf1660e66ec6d4c4c
+816, 0x5966c55f60151c80
+817, 0x3865ae8ec934b17
+818, 0x472a7314afb055ec
+819, 0x7a24277309a44a44
+820, 0x556e02dd35d38baa
+821, 0x9849611a1bc96ec1
+822, 0xd176f5d5a8eb0843
+823, 0x44db12ec60510030
+824, 0x272e3a06a0030078
+825, 0x7c4764dbefc075ea
+826, 0x910712f3735c1183
+827, 0xd49a2da74ae7aff6
+828, 0xcf9b3e6e8f776d71
+829, 0x27789fe3ec481a02
+830, 0x86659f82c6b5912b
+831, 0xe044b3dbf339158c
+832, 0x99d81f6bb62a37b0
+833, 0x5f5830c246fada9a
+834, 0xe68abab1eeb432cb
+835, 0x49c5c5ace04e104
+836, 0x1ac3871b3fc6771b
+837, 0x773b39f32d070652
+838, 0x9c4138c2ae58b1f3
+839, 0xac41c63d7452ac60
+840, 0x9248826b245359e1
+841, 0x99bba1c7a64f1670
+842, 0xe0dc99ff4ebb92f2
+843, 0x113638652740f87c
+844, 0xebf51e94da88cfc
+845, 0x5441c344b81b2585
+846, 0xe1e69e0bc2de652a
+847, 0xe9ab6d64ae42ed1e
+848, 0x879af8730e305f31
+849, 0x36b9ad912c7e00d6
+850, 0x83ef5e9fca853886
+851, 0xda54d48bb20ea974
+852, 0x32c6d93aefa92aa2
+853, 0x4e887b2c3391847d
+854, 0x50966e815f42b1b8
+855, 0x53411ac087832837
+856, 0x46f64fef79df4f29
+857, 0xb34aae3924cd272c
+858, 0xf5ad455869a0adbe
+859, 0x8351ded7144edac8
+860, 0xeb558af089677494
+861, 0x36ed71d69293a8d6
+862, 0x659f90bf5431b254
+863, 0x53349102b7519949
+864, 0x3db83e20b1713610
+865, 0x6d63f96090556254
+866, 0x4cc0467e8f45c645
+867, 0xb8840c4bd5cd4091
+868, 0xbd381463cc93d584
+869, 0x203410d878c2066d
+870, 0x2ebea06213cf71c8
+871, 0x598e8fb75e3fceb4
+872, 0xdcca41ceba0fce02
+873, 0x61bf69212b56aae5
+874, 0x97eed7f70c9114fa
+875, 0xf46f37a8b7a063f9
+876, 0x66c8f4ffe5bd6efa
+877, 0xe43fd6efda2d4e32
+878, 0x12d6c799e5ad01de
+879, 0x9ac83e7f8b709360
+880, 0xbbb7bb3c1957513d
+881, 0x7f87c08d4b3796b0
+882, 0x9a7d1d74b6aa4a5c
+883, 0xa4314530ff741b6f
+884, 0x99a80c6b6f15fca8
+885, 0xd2fec81d6d5fc3ce
+886, 0x15a98be1cc40cea
+887, 0x98693eb7719366f3
+888, 0x36ccdc2a9e9d4de8
+889, 0x3c8208f63d77df25
+890, 0xca2e376e2343df6
+891, 0xcc9b17cbb54420c6
+892, 0x8724c44a64d7dcb8
+893, 0x9d00c6949ff33869
+894, 0xf4f8e584d2699372
+895, 0x88f4748cdd5a2d53
+896, 0xe215072a1205bc6d
+897, 0x190934fe6d740442
+898, 0x7fac5c0ab2af106d
+899, 0x1b86633a0bd84fa1
+900, 0x1293e54318492dfb
+901, 0x433324fd390f34b9
+902, 0x4c5eb2c67a44643b
+903, 0x59a6e281c388b0dd
+904, 0xe78e03f9c44623b7
+905, 0x91307a93c768fc3d
+906, 0xde8867b004d8e3ff
+907, 0xdf52c3f57b7c5862
+908, 0x993f3e1d10358a92
+909, 0x9ccb10bc3e18662d
+910, 0x45093ce48a114c73
+911, 0xd59d05979d26330a
+912, 0x417c0e03300119a9
+913, 0x1c336500f90cde81
+914, 0x1c8ccd29ead9b85b
+915, 0xb76baf3e55d4d950
+916, 0x133ad6196c75fd7e
+917, 0x34200b0cde7ed560
+918, 0x9c7c3dacb213c8d9
+919, 0xd97563c4fd9bf1b6
+920, 0x5d910e871835b6cb
+921, 0x7d46c4733a16bdf9
+922, 0xe41d73194ddc87b2
+923, 0x7d3d8a0855a465a9
+924, 0x70c2a8b5d3f90c0f
+925, 0x9e7565ca5dccfe12
+926, 0x2c0acb4577aa51b1
+927, 0x3d2cd211145b79c7
+928, 0x15a7b17aa6da7732
+929, 0xab44a3730c27d780
+930, 0xf008bd6c802bde3a
+931, 0x82ed86ddf3619f77
+932, 0xaabe982ab15c49f9
+933, 0x9bcad8fa6d8e58a4
+934, 0x8f39ed8243718aa1
+935, 0xe9489340e03e3cb6
+936, 0xc722314f5eefb8d0
+937, 0x870e8869a436df59
+938, 0x4dae75b8087a8204
+939, 0xe1d790f6ec6e425b
+940, 0xafd39ea1b1d0ed09
+941, 0xdf2c99e464ddf08f
+942, 0x74936d859ab9644d
+943, 0x3871302164250e73
+944, 0x764b68921e911886
+945, 0x2a1d024b26bb9d66
+946, 0x797fba43918e75b4
+947, 0x62ec6d24ccca335b
+948, 0xf4bd8b951762b520
+949, 0x9d450dede9119397
+950, 0x5393a26d10f8c124
+951, 0x6b74769392896b57
+952, 0x7f61dbcc0e328581
+953, 0x64e1df3884d0d94
+954, 0xba77dcdf23738c37
+955, 0xf8e288bc0a177475
+956, 0x4a8abfd1702ecb7d
+957, 0x53f22886694736a7
+958, 0x8fc982597ced3e3
+959, 0x1bc46090f820fff7
+960, 0x8bd31f965d02229f
+961, 0x65cd0cb29996ee53
+962, 0x702e0f4fcf8c2e9f
+963, 0x293b77bff307a9a0
+964, 0x125a986b8b305788
+965, 0x416b0eea428ebf3c
+966, 0xeac85421ab0e8469
+967, 0x7f5496095019aa68
+968, 0x1a96d7afbc708e0
+969, 0xb91262e6766e01e1
+970, 0xd0a549cc4ccc6954
+971, 0x75a9a073f50c8a0d
+972, 0xae275d2c1c6cd23c
+973, 0xcf159b5ec5d28fd4
+974, 0x75d0838ce9b92b
+975, 0xd4eddcee6dc4677f
+976, 0x6a0a8ad5df6b75b8
+977, 0x6f3fd0ef0f13ecc4
+978, 0xb75a5826c1a8f8a8
+979, 0xd47098bbc7943766
+980, 0x3d4ddd62d5f23dd1
+981, 0x760a904e4583841c
+982, 0x2afeb5022b4cf1f
+983, 0x66d5f653729f0a13
+984, 0x9a6a5ab62980d30f
+985, 0xc332f5643bbf8d5b
+986, 0x848fb702e4056a90
+987, 0xa057beaf3f9e8c5f
+988, 0x6cc603e4560a6c6a
+989, 0xec761811a7b23211
+990, 0xb14aa4090a82aaa5
+991, 0xe29d9d028a5b2dbb
+992, 0x5564e53738d68f97
+993, 0xfabca36542eaaf3b
+994, 0xb9912fcb782020a2
+995, 0xe865e01b349284fd
+996, 0x540b5ff11c5f9274
+997, 0x3463f64e1e7451dc
+998, 0xe15d3e2f33b735f8
+999, 0xf5433336eadef6e
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/data/sfc64-testset-2.csv b/venv/lib/python3.9/site-packages/numpy/random/tests/data/sfc64-testset-2.csv
new file mode 100644
index 00000000..70aebd5d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/data/sfc64-testset-2.csv
@@ -0,0 +1,1001 @@
+seed, 0x0
+0, 0x91959e5fb96a6332
+1, 0x3c1dd8a25a7e9f21
+2, 0x657bdffc99798d9e
+3, 0x1a04de320b19e022
+4, 0x65b92af0e5f3c61c
+5, 0x9c84070ce8f743c0
+6, 0xbb10e573693cdb25
+7, 0xd65ea9e76b37fb6b
+8, 0x503efd0e76c8ae66
+9, 0xd711dcd04c26d0f
+10, 0x12f53f435814ac8c
+11, 0xb392cd402cfc82bd
+12, 0x461764550e06c889
+13, 0x716a48b3514e6979
+14, 0xdd0a322213c18ad7
+15, 0x6673a8ca0a05c4d7
+16, 0x2992ef333437f844
+17, 0xc4aaf7e8240b2aad
+18, 0x6ab0a1af1f41474f
+19, 0xb0bae400c226941d
+20, 0xe5f80c2eeeab48c6
+21, 0x3832c6a93a4024bf
+22, 0x280bd824fabe8368
+23, 0x66b626228321e5ff
+24, 0xe0bdfba5325a307e
+25, 0x3a5f65c6ef254e05
+26, 0x99ea12503cb02f94
+27, 0x5d01fd2db77d420b
+28, 0x6959bf5f36b2368d
+29, 0xd856e30c62b5f5be
+30, 0xe33233e1d8140e66
+31, 0xb78be619d415fa8d
+32, 0x4f943bb2cc63d3b
+33, 0x9b1460b290952d81
+34, 0x19205d794826740e
+35, 0x64617bd9d7a6a1ff
+36, 0x30442124b55ea76a
+37, 0xebbbc3b29d0333fc
+38, 0x39235a0fe359751c
+39, 0xf9629768891121aa
+40, 0x32052f53f366e05a
+41, 0x60cc5b412c925bc8
+42, 0xf8b7ecda1c0e5a9
+43, 0x195f036e170a2568
+44, 0xfe06d0381a9ca782
+45, 0x919d89e8b88eebbf
+46, 0xa47fb30148cf0d43
+47, 0x5c983e99d5f9fd56
+48, 0xe7492cdb6a1d42cd
+49, 0xf9cfe5c865b0cfd8
+50, 0x35b653367bbc3b99
+51, 0xb1d92f6f4d4e440b
+52, 0x737e1d5bd87ed9c0
+53, 0x7a880ca1498f8e17
+54, 0x687dae8494f9a3f7
+55, 0x6bae1989f441d5d7
+56, 0x71ad3fa5a9195c2e
+57, 0x16b3969779f5d03
+58, 0xd1bce2ac973f15b3
+59, 0xa114b1ee2ce0dcdd
+60, 0x270d75c11eb1b8d5
+61, 0xc48ffa087c0a7bc
+62, 0xaaf9dc48cda9848d
+63, 0x8111cf10ef6e584d
+64, 0x6736df6af40ee6f4
+65, 0x1a1a111682fbf98d
+66, 0xeb217658e1cb3b5d
+67, 0xcaf58a8b79de9dec
+68, 0x25d0ffd63c88d7a1
+69, 0x4c498cd871b7f176
+70, 0x4069a6156eb0cf3c
+71, 0xdf012f12edcdd867
+72, 0x7734c0ac8edb1689
+73, 0xed6960ac53dbc245
+74, 0x305e20da8868c661
+75, 0x5f0c7a3719956f95
+76, 0x66842bbe3b28895
+77, 0xb608bc9a31eac410
+78, 0xfcb17d5529503abd
+79, 0x829ae5cbc29b92ee
+80, 0x17f2f0027bc24f3a
+81, 0x435926c33d8f44cc
+82, 0x3ab899327098dbec
+83, 0xaf78573b27f8ead8
+84, 0xa8b334fabcf8dc60
+85, 0xcdf3b366a6a303db
+86, 0x8da9379dd62b34c8
+87, 0xb0ba511955f264a7
+88, 0x9d72e21a644f961d
+89, 0xfac28382e2e7e710
+90, 0xd457065f048410aa
+91, 0x1cae57d952563969
+92, 0x5a160a6223253e03
+93, 0x2c45df736d73c8bd
+94, 0x7f651ebc6ad9cec5
+95, 0x77a6be96c7d2e7e7
+96, 0x1721fb1dbfd6546a
+97, 0xf73f433ecff3c997
+98, 0xed1e80f680965bfe
+99, 0x6705ad67a3003b30
+100, 0xac21134efcadb9f7
+101, 0x4d2ba0a91d456ac
+102, 0x59da7b59434eb52b
+103, 0x26c1d070fd414b5f
+104, 0xed7079ddfce83d9a
+105, 0x9277d21f88e0fb7a
+106, 0xfae16b9a8d53d282
+107, 0xb08a0e2e405fdf7d
+108, 0x2ea20df44229d6ec
+109, 0x80e4634cd3612825
+110, 0xbe62e8aeba8f8a1a
+111, 0x4981209769c190fb
+112, 0xcec96ef14c7e1f65
+113, 0x73fe4457b47e7b53
+114, 0x1d66300677315c31
+115, 0xe26821290498c4cc
+116, 0xf6110248fd8fb1c5
+117, 0x30fd7fe32dbd8be3
+118, 0x534ec9b910a2bd72
+119, 0x8f9bfe878bbf7382
+120, 0x4f4eb5295c0c2193
+121, 0xdeb22f03a913be9e
+122, 0x40f716f8e2a8886c
+123, 0xc65007d0e386cdb1
+124, 0x9bdd26d92b143a14
+125, 0xf644b0b77ea44625
+126, 0x75f5a53f6b01993a
+127, 0xfe803e347bf41010
+128, 0x594bff5fa17bc360
+129, 0x3551edfb450373c7
+130, 0x898f9dad433615db
+131, 0x923d2406daa26d49
+132, 0x99e07faccbc33426
+133, 0x7389f9ff4470f807
+134, 0xdc2a25957c6df90b
+135, 0x33c6d8965ef3053f
+136, 0x51a8f07e838f1ab
+137, 0x91c5db369380274f
+138, 0xc37de65ac56b207e
+139, 0xfcc6d2375dde7f14
+140, 0xa4e6418bff505958
+141, 0x4b8b9f78e46953c4
+142, 0x255ab2e0f93cf278
+143, 0xdf650717af3d96ef
+144, 0x2caa21cba3aae2b2
+145, 0xce7e46c6f393daa4
+146, 0x1d5b3573f9997ac7
+147, 0x5280c556e850847d
+148, 0x32edc31bef920ad7
+149, 0xefaa6b0b08cf2c6
+150, 0x5151c99d97b111c5
+151, 0x35ccf4bf53d17590
+152, 0xa210d7bd8697b385
+153, 0xa9419f95738fbe61
+154, 0xdeccf93a1a4fdc90
+155, 0xd0ea3365b18e7a05
+156, 0x84122df6dcd31b9a
+157, 0x33040a2125cea5f5
+158, 0xfe18306a862f6d86
+159, 0xdb97c8392e5c4457
+160, 0xc3e0fa735e80e422
+161, 0x7d106ff36467a0c1
+162, 0xb9825eecc720a76d
+163, 0x7fefc6f771647081
+164, 0xf5df3f5b3977bf13
+165, 0x18fb22736d36f1e0
+166, 0xadc4637b4953abfc
+167, 0x174e66d3e17974bd
+168, 0xf1614c51df4db5db
+169, 0x6664ecde5717b293
+170, 0xd5bc5b6839265c26
+171, 0xf6ca9ce1af3f1832
+172, 0xca696789a9d506ea
+173, 0x7399c246c8f9d53
+174, 0xadf49049626417e2
+175, 0xbcd84af37d09ab91
+176, 0xbb41c177f3a3fa45
+177, 0x592becc814d55302
+178, 0xa88b4e65f6cfe5f7
+179, 0xa0a55e34ff879426
+180, 0x3c2ea6aa725b42b7
+181, 0x65ac4a407b1f9521
+182, 0xde63d53f7e88b556
+183, 0x18bc76696d015f40
+184, 0xd1363f2cd4c116a8
+185, 0x2fe859be19a48e4a
+186, 0x83d6099b1415e656
+187, 0x43f2cbc1a4ee6410
+188, 0xb2eca3d3421c533d
+189, 0xc52b98ea3f031f5d
+190, 0xfe57eb01da07e9d1
+191, 0xf9377883537a6031
+192, 0x364030c05dac7add
+193, 0x6815cb06b35d4404
+194, 0xceae2d4ce31894be
+195, 0xc602bcdf6062bf6a
+196, 0xc8e4bd8dcc6062e3
+197, 0x9c29e87b92a1a791
+198, 0x41e626b871ca9651
+199, 0x325c3d1fb8efbcd8
+200, 0x7dbbacf8e3419fb3
+201, 0x3602e72516bb7319
+202, 0x537a008ebd94d24b
+203, 0xda7714fc9d4d161d
+204, 0x1c8c73700e1b621b
+205, 0x2749b80937d6c939
+206, 0x76ee6abac5b14d33
+207, 0xf18d1e92cb6a8b5c
+208, 0x6ce9579d9291c721
+209, 0x60523c745a40e58
+210, 0x637f837fcc901757
+211, 0x2ff71b19661dc5b3
+212, 0x393ab586326ad16f
+213, 0xa0970ea30fe742b7
+214, 0x570222d7f27fe5ae
+215, 0x3b5806d43fd38629
+216, 0x129a0ad7420180c5
+217, 0x1c4726355778d52c
+218, 0x7c1459cf77656499
+219, 0xfe038a0932132069
+220, 0x4c4cc317a937483a
+221, 0xa333d24067e926ba
+222, 0x401d9b6ab37f6ef2
+223, 0x87ad0e491ebe4a2a
+224, 0xfc02f312e72d121d
+225, 0xfde715b3b99767b2
+226, 0xd111c342ba521c92
+227, 0x83b221b10879c617
+228, 0x6a1bf5c01fdf4277
+229, 0x166bfc0c3f5892ee
+230, 0x4608d556d7c57856
+231, 0x8d786857c95ece49
+232, 0x2d357445a1aca4ac
+233, 0x79620dae28ecd796
+234, 0x90e715dc0f2201c4
+235, 0x173b68b4c9f4b665
+236, 0x4e14d040ebac4eef
+237, 0xbd25960b4b892e
+238, 0x911a199db6f1989d
+239, 0xfe822d7c601fd2e0
+240, 0x9b4c1d58d8223a69
+241, 0x907c1891283843b0
+242, 0xf4868bf54061c4b2
+243, 0x17f8cd1fc24efd85
+244, 0xd44253f9af14c3aa
+245, 0x16d0da0cb911d43c
+246, 0x3c6a46615828e79a
+247, 0x498591c1138e11a5
+248, 0xcc0f26336d0d6141
+249, 0x4d3ebc873212309a
+250, 0x16bad7792d5c2c6a
+251, 0x474215a80b2bbd11
+252, 0x7159848abd8492fc
+253, 0x359341c50973685f
+254, 0x27512ee7bf784a4a
+255, 0x45228ea080f70447
+256, 0x880cab616500d50e
+257, 0x12fae93f9830d56e
+258, 0x6744ee64348d9acd
+259, 0x484dada28cd2a828
+260, 0x98491d0729e41863
+261, 0x2f15aac43c2863b0
+262, 0x5727a34d77a1da0f
+263, 0xa435cebef6a62eed
+264, 0xd211697d57b053b0
+265, 0x65aa757b68bd557
+266, 0xe3a1b7a2d8a3e06a
+267, 0x2adf64e67252a7a9
+268, 0xadadcb75cadee276
+269, 0x7934bc57ac8d97bf
+270, 0xccff0d0f412e0606
+271, 0x101a82aa3e8f3db9
+272, 0xb0f2498094b4575c
+273, 0xba2561d9ef26ed8a
+274, 0xfbcd1268fc3febe1
+275, 0x9aa10bb19eb152e0
+276, 0xf496217a601a6d72
+277, 0xe4be1e4f2fa91363
+278, 0x473a602bf3dd68eb
+279, 0xfe8ed2a48c26f4b5
+280, 0x20e94b1a00159476
+281, 0x93e1cb1c6af86ec7
+282, 0x4fcba3898f7442ba
+283, 0x5150c3a3d94891df
+284, 0x91cfce6c85b033ea
+285, 0x625e8a832a806491
+286, 0x28c97ba72e3ec0b2
+287, 0x8e172de217c71ea1
+288, 0x926b80216c732639
+289, 0x28b19431a649ae3d
+290, 0x57c039a6e95a3795
+291, 0xfbc354182fe52718
+292, 0x819dfd7c7d534cef
+293, 0xabb4093a619ed44f
+294, 0xe785b7ac6f656745
+295, 0xb647b4588b2f942f
+296, 0x64cf870a14c72d27
+297, 0x6d4a4a2a0ba9b37e
+298, 0x78bfb0427d7ce6b0
+299, 0x8dcc72b8bfc79ac6
+300, 0x1c14d915d5e76c99
+301, 0xaf48ddea6f096d79
+302, 0x51b39b67aa130d8
+303, 0x1aeeb39d4def06de
+304, 0xd678092ffedfdd27
+305, 0x8f54787f325111d3
+306, 0xf2ca2e827beaa6bc
+307, 0x339d134099e98545
+308, 0x1f6a8a7b33942e43
+309, 0x952c8065dbef669a
+310, 0xe066aeb6690147f7
+311, 0xed25aa92cf58ebb6
+312, 0x7601edce215ef521
+313, 0xed1c5b396abd9434
+314, 0x4fd1e407535de9d5
+315, 0xccc8315a0d4d1441
+316, 0x85753e250bb86976
+317, 0xf232e469378761c3
+318, 0x81d691b8e9aef3c6
+319, 0x224a2f9cab0ad0e
+320, 0x978f3d3e50007f4e
+321, 0xd3713e6a6c0cbe60
+322, 0xcce8f1eadd41f80d
+323, 0x34bda028a97d469
+324, 0x90e242fdf0f59183
+325, 0x4d749754fbc5f092
+326, 0x4399f5b7851cc87b
+327, 0xcb921a5f25f6c5d7
+328, 0x120bf5d0162101
+329, 0x1304cc2aa352735a
+330, 0xf7236c5d0d5d417b
+331, 0xc31b320fc1654306
+332, 0xb468c6b23f3fb4e7
+333, 0xb5985b5bfaca4166
+334, 0x898285a1cd2f8375
+335, 0xa13493da372aa7c9
+336, 0x15c80c09c12634e7
+337, 0x9b765c5cc9d438bd
+338, 0xee7da816a9201dcb
+339, 0x92e269f73b5a248e
+340, 0xa8086c5de81400ce
+341, 0xe0053901853d42be
+342, 0x821df32c012f433e
+343, 0x17a6d69ca37387c7
+344, 0x2b10044bfba3501f
+345, 0x8dfd262afc2e8515
+346, 0xd68c2c7b60226371
+347, 0xe81ac114e4416774
+348, 0x5896d60061ebc471
+349, 0xa996e3147811dbd1
+350, 0xa819c7b80ecb3661
+351, 0x982ad71b38afbc01
+352, 0xab152b65aa17b7fe
+353, 0x4582bc282ef187ef
+354, 0xab5a17fe8d9bc669
+355, 0x83664fa9cb0284b7
+356, 0x234c4b0091968f52
+357, 0x8ab5f51805688d37
+358, 0xe9e11186e0c53eda
+359, 0x10df37ef1de2eccf
+360, 0x780f1b0d52db968f
+361, 0x50bd4ff292872cd5
+362, 0x51e681c265f5ad0
+363, 0x842c49660a527566
+364, 0x6e56ee026e9eda87
+365, 0x4cf39e40d8c80393
+366, 0x13e466df371f7e1f
+367, 0xf2ce1799f38e028e
+368, 0x833c8db7adc6ff0e
+369, 0xc6e189abc2ec98f
+370, 0xafebb3721283fec5
+371, 0xb49bc1eb5cc17bdc
+372, 0xf1d02e818f5e4488
+373, 0xe5e9d5b41a1dd815
+374, 0xce8aca6573b1bfe5
+375, 0x9b0a5d70e268b1d5
+376, 0xf3c0503a8358f4de
+377, 0x2681605dd755669d
+378, 0xea265ca7601efc70
+379, 0xa93747f0a159439f
+380, 0x62a86ede78a23e50
+381, 0xac8a18935c3d063c
+382, 0x729c0a298f5059f5
+383, 0xbbf195e5b54399f4
+384, 0x38aa9d551f968900
+385, 0x3b3e700c58778caa
+386, 0x68e6e33c4443957a
+387, 0x7c56fc13eb269815
+388, 0xaf7daca39711804a
+389, 0x50fde6d10f9544b3
+390, 0xf3d37159f6f6c03d
+391, 0x82d298f5c1a71685
+392, 0x478661ac54c5002c
+393, 0x6053768e1a324ae0
+394, 0xde8fb4a7e56707ea
+395, 0xaa2809301faa8cf4
+396, 0x690a8d49fedd0722
+397, 0xe17c481b9c217de9
+398, 0x60d1d8a2b57288e3
+399, 0x149adfaadc6b0886
+400, 0xa3c18b6eb79cd5fa
+401, 0x5774e3a091af5f58
+402, 0x2acca57ff30e5712
+403, 0x94454d67367c4b0c
+404, 0x581b2985ac2df5ca
+405, 0x71618e50744f3e70
+406, 0x270a7f3bd9a94ae6
+407, 0x3ef81af9bb36cd7b
+408, 0x8a4a2592875254aa
+409, 0x704ac6086fbb414a
+410, 0xda774d5d3f57414d
+411, 0xe20d3358b918ae9e
+412, 0x934a6b9f7b91e247
+413, 0xf91649cde87ec42c
+414, 0x248cec5f9b6ced30
+415, 0x56791809fd8d64ba
+416, 0xf502b2765c1395f
+417, 0x6b04ec973d75aa7f
+418, 0xb0339f2794bb26f
+419, 0x4c524636efbaea49
+420, 0x6bbf3876e9738748
+421, 0xf686524e754e9e24
+422, 0x8dafa05a42d19cd3
+423, 0xc5f069ab2434008e
+424, 0x4fd64cc713cba76
+425, 0xdbf93450c881ed5f
+426, 0x492e278ebabb59a2
+427, 0x993fddfde4542642
+428, 0xecde68a72c8d4e52
+429, 0xe0760b3074c311fd
+430, 0x68dc0e7e06528707
+431, 0x52b50edf49c0fdc7
+432, 0xb2bd4185c138f412
+433, 0x431496d7e1d86f3
+434, 0xa4e605b037e26c44
+435, 0x58236ae1f0aca2b5
+436, 0x26c72c420fc314d8
+437, 0x20134e982ab99a2b
+438, 0x544b59b8b211374b
+439, 0x1301c42f3a14d993
+440, 0x52a6ea740f763b0f
+441, 0xf209d70c2bebf119
+442, 0xac66a4ebc2aa1be
+443, 0x683713ed35878788
+444, 0x2b5578acec06b80c
+445, 0x86428efa11c45b36
+446, 0xb49010adb17d291e
+447, 0x73b686bd8664b6be
+448, 0x6d28ebf57b6884cc
+449, 0x9712091230ff58d9
+450, 0xc9c91f74c38b286
+451, 0x776310ac41dc008e
+452, 0x2f3739df0bf6a88e
+453, 0x5792dc62b94db675
+454, 0x5715910d024b06af
+455, 0xeb1dd745458da08
+456, 0xfce7b07ccfa851a7
+457, 0xc305f1e983ac368
+458, 0x485aa9519ac00bb0
+459, 0xa5354f6589fb0ea0
+460, 0x32fee02dfdbf4454
+461, 0x4d1ddc304bbefaaa
+462, 0x789a270a1737e57e
+463, 0x9f3072f4b1ed8156
+464, 0x4de3c00e89058120
+465, 0xb00a02529e0a86fa
+466, 0x539f6f0edd845d9a
+467, 0x85e578fe15a8c001
+468, 0xa12c8e1a72cce7d8
+469, 0xc6908abbc2b1828
+470, 0xcf70090774cbb38c
+471, 0x3b636a6977b45d4a
+472, 0xf0a731b220680b57
+473, 0x18973929f51443a8
+474, 0xe93e1fbe7eadabe
+475, 0x8233730f0a6dfa02
+476, 0x66e50b6919b0ab74
+477, 0xb1aba87c97fd08a2
+478, 0xd4dffc1fbc117ad6
+479, 0x6f7fa65724b96e6a
+480, 0x4bd5800dee92e0fa
+481, 0xe18a959db6256da
+482, 0xe53a291bc66df487
+483, 0xb7ec306a08651806
+484, 0x1847a6b80d2821e1
+485, 0xda50391283b14d39
+486, 0xacc4d3cd7cceb97a
+487, 0x57f70185165b7bc6
+488, 0x302b6d597c3aaba7
+489, 0xa47f32d037eab51e
+490, 0xe1509b4408abc559
+491, 0x4f30a1d7c2934157
+492, 0x2ad03e6c60b650b2
+493, 0x334d9c337b0a9064
+494, 0xc7f442821e7aac12
+495, 0xbcdeb09298694cdd
+496, 0xe42402389f8f0fb4
+497, 0xe5de56af539df727
+498, 0x7017f9b2101ee240
+499, 0x1ee5e68d5b10001d
+500, 0x436229051836387a
+501, 0xcd532d6d6ec38fb7
+502, 0x30a66606fdf38272
+503, 0xfdaa2ab9cf798496
+504, 0x4277b4adec70e7df
+505, 0x72cfc30256e0eaef
+506, 0x3c3359fd9bd34917
+507, 0xb7aa89598856efb0
+508, 0xf72226f8bf299ef5
+509, 0x258c499275a4356f
+510, 0x999a56bfc7f20d76
+511, 0x2b3e7432e20c18b
+512, 0x2d1251332f760cb5
+513, 0x7420e0eea62157c5
+514, 0xe85c895aa27cec3d
+515, 0x27a0545c7020d57c
+516, 0xc68638a65b4fff0d
+517, 0xfda473983a4ea747
+518, 0xd19fe65fb4c06062
+519, 0x6b1374e050ee15e4
+520, 0x80065ecd49bc4bef
+521, 0x4ee655954bc838de
+522, 0xe8fb777504a72299
+523, 0x86b652ea70f4bdde
+524, 0xcdc9e0fbde7e4f33
+525, 0x352c0a50cd3ac56
+526, 0x4b8605d368be75dc
+527, 0x1ac9ea8129efbc37
+528, 0x470325faa99f39c5
+529, 0x25dd7ef9adccf7a1
+530, 0x5ae2c7a03e965816
+531, 0xf733d2df59dacc7d
+532, 0xa05bbf0a8a1a7a70
+533, 0xe8aa3f102846ef5f
+534, 0xc9b85ec49ae71789
+535, 0xb904c14ed1cb1936
+536, 0x5ae618230b5f0444
+537, 0x97987fe47b5d7467
+538, 0xabb3aca8865ca761
+539, 0x38bfdf29d4508228
+540, 0x353654f408353330
+541, 0xeb7e92930ae4ef0d
+542, 0xec50f1a7ca526b96
+543, 0xd5e2dc08b5697544
+544, 0x24c7fd69d5ec32df
+545, 0x6f7e1095568b8620
+546, 0x6ed9c16ca13b3c8
+547, 0xe676ef460002130f
+548, 0xa3a01a3992c4b430
+549, 0xe2130406c3b1f202
+550, 0xa8f7263e2aedcd20
+551, 0xc45d71ef2e35f507
+552, 0x37155594021da7ba
+553, 0x22dc94f19de73159
+554, 0x7969fc6bffc5443f
+555, 0x97def7e44faa6bfe
+556, 0x8b940f5e8931d71f
+557, 0xd95b1dd3f1a3fdd5
+558, 0x1c83bfdca615701a
+559, 0xb7fcb56279ceca6b
+560, 0xd84f8950f20dcd0
+561, 0xb03343698de3cbe0
+562, 0xf64565d448d71f71
+563, 0xda52b4676e0ae662
+564, 0xda39c2c05b4ffb91
+565, 0xb35e2560421f6a85
+566, 0x1a7b108d48ac3646
+567, 0xc4e264dc390d79ed
+568, 0xa10727dfd9813256
+569, 0x40d23154e720e4f7
+570, 0xd9fa7cd7e313e119
+571, 0xcbf29107859e6013
+572, 0xc357338553d940b7
+573, 0x2641b7ab0bdfcbaa
+574, 0xd12f2b6060533ae7
+575, 0xd0435aa626411c56
+576, 0x44af4a488a9cec72
+577, 0xb934232ea8fa5696
+578, 0x760a8b12072b572d
+579, 0xfab18f9942cfa9b3
+580, 0x5676834c1fe84d16
+581, 0x9c54e4fddb353236
+582, 0xab49edfc9551f293
+583, 0x567f1fb45a871d
+584, 0x32a967c873998834
+585, 0x99240aad380ef8d1
+586, 0x7f66cbd432859a64
+587, 0x4cdc8a4658166822
+588, 0x984e3984a5766492
+589, 0xa3b2d0a3d64d3d94
+590, 0x177f667172f2affc
+591, 0xb1a90607a73a303f
+592, 0xe600b6c36427f878
+593, 0xf758f9834cb7f466
+594, 0x8ee9fce4a3f36449
+595, 0xcb8f11533e7da347
+596, 0xe7cf647794dabd7c
+597, 0xc9d92cfe6110806
+598, 0xea1335fa9145a1ec
+599, 0xbc6c29821d094552
+600, 0x37b9d6a858cc8bc3
+601, 0xf24e4c694929893e
+602, 0x55d025ce2d7d0004
+603, 0xccdc69acccf4267b
+604, 0xc491c04340c222eb
+605, 0xba50f75ecec9befb
+606, 0x1ec7bd85b8fe3bb9
+607, 0xe4de66498c59ae8a
+608, 0x38aa9e912712c889
+609, 0xcee0e43c5cc31566
+610, 0x72b69aa708fc7ed
+611, 0xdff70b7f6fa96679
+612, 0xd6d71d82112aadc3
+613, 0x365177892cb78531
+614, 0xa54852b39de4f72c
+615, 0x11dd5832bf16dd59
+616, 0x248a0f3369c97097
+617, 0xa14cec0260e26792
+618, 0x3517616ff142bed1
+619, 0x9b693ad39dab7636
+620, 0x739dff825e994434
+621, 0x67711e7356098c9
+622, 0xa81f8515d2fdf458
+623, 0xdac2908113fe568e
+624, 0xe99944ebc6e2806a
+625, 0x671728ca5b030975
+626, 0xfdad20edb2b4a789
+627, 0xedc6e466bd0369d2
+628, 0x88b5d469821f7e1b
+629, 0x2eabf94049a522a5
+630, 0x247794b7a2f5a8e3
+631, 0x278942bdbe02c649
+632, 0xbe5a9a9196ab99c1
+633, 0x75955060866da1b5
+634, 0xdedcfa149273c0b5
+635, 0xdbeb7a57758f3867
+636, 0x7b9053347a2c8d5a
+637, 0xa059b3f2eed338a5
+638, 0x59401a46ded3b79f
+639, 0x38044ba56a6d19fb
+640, 0x72c7221b4e77e779
+641, 0x526df3491a3a34da
+642, 0xc3b31184ba16c0c2
+643, 0xd94c7144488624af
+644, 0xcf966ee4dc373f91
+645, 0x62049e65dd416266
+646, 0x7c2adccb925bf8f
+647, 0xd5fa5c22ed4ef8e1
+648, 0xd00134ebd11f2cd1
+649, 0xfbdf81767bed3634
+650, 0x62e8cc8ff66b6e26
+651, 0x3a72d6bcd4f2dcf7
+652, 0xf1cd45b1b46a86ed
+653, 0x1271f98e0938bb9a
+654, 0x82e6927e83dc31fa
+655, 0x7b9b0e0acb67b92d
+656, 0x6df503e397b2e701
+657, 0x93888f6fb561e0c3
+658, 0x393fb6069a40291
+659, 0x967a7d894cc0754d
+660, 0x6e298996ad866333
+661, 0x5ff3cf5559d6ab46
+662, 0xd0d70508c40349f5
+663, 0xc64c66c0dd426b33
+664, 0x8fea340ee35c64dd
+665, 0xf9cd381eb3060005
+666, 0xfcc37c2799fc0b11
+667, 0x6a37c91d65b489fa
+668, 0x57231000fa0a0c9d
+669, 0x55f6e292c6703f9a
+670, 0xd0508ffbfa55a7a6
+671, 0x885db543276bdac8
+672, 0xc26dbe6a26b0e704
+673, 0x21f884874ebd709e
+674, 0x711f0b6c8f732220
+675, 0x354d0a361eaee195
+676, 0x721344d8d30b006a
+677, 0xa0e090a0d3a56f07
+678, 0x16b3d5d823a4952b
+679, 0x59d7874bc9eae7b6
+680, 0x9bbb32710076455f
+681, 0xd4fb22242ffabafd
+682, 0xe1d4ac6770be1d89
+683, 0xb259cedebc73dc8a
+684, 0x35faaa3b4246ab69
+685, 0x5d26addefdaee89
+686, 0x8e7ec350da0f3545
+687, 0xd0f316eed9f8fc79
+688, 0x98b2a52c9bf291b2
+689, 0xe4d294a8aca6a314
+690, 0x25bd554e6aa7673c
+691, 0xcfde5dcba5be2a6c
+692, 0xb5e01fb48d2d2107
+693, 0xe1caf28948028536
+694, 0xd434aa0a26f3ee9b
+695, 0xd17723381641b8f6
+696, 0xfe73bd1f3f3768a2
+697, 0x1cc6b1abd08d67e9
+698, 0x247e328371a28de0
+699, 0x502e7942e5a9104a
+700, 0x6a030fd242eb4502
+701, 0xa2ffe02744014ce8
+702, 0x59290763b18fe04e
+703, 0xcf14241564271436
+704, 0xb0fb73c3c1503aff
+705, 0x94e27c622f82137a
+706, 0x747a5b406ac3e1f0
+707, 0x9a914e96a732031d
+708, 0x59f68c6c8f078835
+709, 0x809d012c73eb4724
+710, 0x5b3c3b73e1b37d74
+711, 0xdde60ef3ba49cdf7
+712, 0x87a14e1f9c761986
+713, 0x4109b960604522af
+714, 0x122d0e1ed0eb6bb9
+715, 0xadc0d29e80bfe33
+716, 0xa25b1b44f5fc8e4e
+717, 0xbab85d8a9b793f20
+718, 0x825f4cbced0e7d1e
+719, 0x2d6ae8807acb37ea
+720, 0x8234420adce2e39
+721, 0x4a8ad4da6b804807
+722, 0x1e19f9bc215e5245
+723, 0x1d6f4848a916dd5e
+724, 0x9ac40dfcdc2d39cc
+725, 0x9f3524e3086155ec
+726, 0x861fffc43124b2ef
+727, 0xe640e3b756396372
+728, 0x41cb0f0c5e149669
+729, 0xe0bd37e1192e4205
+730, 0x62917d3858f4ce47
+731, 0xa36e7eb4d855820a
+732, 0x204b90255a3bf724
+733, 0x66ee83a0175535bc
+734, 0x2c14ce7c6b0c1423
+735, 0x85d9495fa514f70d
+736, 0x5a4fe45ead874dbc
+737, 0xe72248dcb8cfc863
+738, 0xfc21ff2932ed98cd
+739, 0xcbba1edd735b5cad
+740, 0x91ddc32809679bf5
+741, 0x192cdf2c7631ea1f
+742, 0xbbc451ddf2ea286f
+743, 0xad9e80cae2397a64
+744, 0x6918f0119b95d0e5
+745, 0xa40379017a27d70a
+746, 0x1aaeddb600e61e1
+747, 0x15afd93cbd7adda9
+748, 0x156719bc2b757ff4
+749, 0x13d9a59e2b2df49d
+750, 0x9a490986eaddf0a
+751, 0xef9a350f0b3eb6b4
+752, 0x5de7f6295ba4fa4d
+753, 0x7f37fd087c3fdb49
+754, 0xa9fe3749d6f3f209
+755, 0x50912ac036d9bfb
+756, 0x982cb4d726a441f8
+757, 0x8ca8d8af59b872d0
+758, 0x7f8adfb0ceeade8a
+759, 0xdad390ec742be44
+760, 0xa637944d0045be5b
+761, 0x3569a3b3af807061
+762, 0x9599da8eae14511d
+763, 0xc333e8d19589b01a
+764, 0xfb9b524a20b571e1
+765, 0xbd9dc8b37ce5c3e1
+766, 0x142333005fa389ac
+767, 0x1368bc37cd5bcce1
+768, 0x16094907ad6ecf73
+769, 0xb32c90dbba4c1130
+770, 0x82761d97c1747dd0
+771, 0x599f9f267ae3444d
+772, 0x79ad3382994852e1
+773, 0x2511f06d9ef06e54
+774, 0xb35e6ab7d5bbddae
+775, 0xfca9fa83a2988732
+776, 0x7d4350f0394ac3ba
+777, 0xa52a9527bb176ea3
+778, 0xb49fa0ceb2aa8353
+779, 0x1f62e504d1468cc0
+780, 0xe1a77bfccce6efc3
+781, 0x776cdff4dc0d6797
+782, 0x56612e39b652c1f2
+783, 0x5f096a29294eda04
+784, 0x7978abc3aabd8b23
+785, 0x79dd875e0485b979
+786, 0x8a98aa4d5735d778
+787, 0xcca43940f69d2388
+788, 0xb2d4b156f144f93a
+789, 0xbd528a676e9a862
+790, 0x2a394939c8e7ec5e
+791, 0xb1da900c6efe4abc
+792, 0x9869af479de4c034
+793, 0x78dbdfb88ac7c1db
+794, 0x18cb169143088041
+795, 0xe69e5461c51a3e13
+796, 0x5389fa16ea98183c
+797, 0xed7c80d1be1ea520
+798, 0x87246fc359758ced
+799, 0xab323eba95fae4ed
+800, 0xbc4c0dde7f8a1828
+801, 0xdb739f7955610b1a
+802, 0xecd8c68c3434cc
+803, 0x138c2eb88c477f44
+804, 0x28a65f96727aae41
+805, 0xdee879f2cf5629d
+806, 0x684f0c90ef20070f
+807, 0xa24a819ef5621800
+808, 0x8d0054f870e4fdcb
+809, 0x99e8c6e695b600b
+810, 0x50b705245891f7c3
+811, 0xc02eed3a6e58e51a
+812, 0x443d64e95443606c
+813, 0xca24959cfbd2d120
+814, 0xe072609ea48815bc
+815, 0xbcc715026590315b
+816, 0x3e76df24d7aa5938
+817, 0xd8ff04940d9b79ae
+818, 0x54474ce790059bcd
+819, 0x278390dd6aa70e81
+820, 0xf4df619fe35414e4
+821, 0x757d71270264e615
+822, 0x1e8a373699c11b23
+823, 0xef68c82046e67dd6
+824, 0xe280006599972620
+825, 0x234e095183b0f4d6
+826, 0xe3b7560ed9839749
+827, 0xcd5ec4086572332e
+828, 0xc41c0d4aaa279108
+829, 0x4b9cd6126bc16a6d
+830, 0x4a7252734f3e3dd0
+831, 0xb3132df156cc103a
+832, 0xf9e4abbf7b64464a
+833, 0xf936df27fb3c47b7
+834, 0x9142960873f6d71a
+835, 0x4ba6aa3235cdb10d
+836, 0x3237a2e765ba7766
+837, 0xd62f0b94c8e99e54
+838, 0x26b682f90a3ae41b
+839, 0x40ad5e82072b6f81
+840, 0xd0198101f5484000
+841, 0xe4fac60ba11c332
+842, 0x472d0b0a95ef9d38
+843, 0x8512557aec5a3d8f
+844, 0xef83169d3efd4de9
+845, 0x53fe89283e7a7676
+846, 0x2f50933053d69fc4
+847, 0x76f5e4362e2e53a2
+848, 0x8676fdccce28874a
+849, 0x2737764c1fb1f821
+850, 0x4a6f70afc066ab55
+851, 0x27f8e151e310fca4
+852, 0xd606960ccbe85161
+853, 0xcce51d7ddd270a32
+854, 0xb4235999794875c2
+855, 0x580084e358e884
+856, 0x2159d5e6dc8586d7
+857, 0x87bd54d8599b3ba4
+858, 0x3e9ade6a2181664
+859, 0x5e6e140406d97623
+860, 0x511545d5aa0080a2
+861, 0xf49d78ed219aac57
+862, 0xbece1f9c90b8ea87
+863, 0x1c741cac36a2c514
+864, 0x7453c141047db967
+865, 0xd751832a5037eba2
+866, 0x71370a3f30ada1f7
+867, 0x7c01cf2dcb408631
+868, 0x1052a4fbdccc0fa1
+869, 0x13d525c9df3fb6c
+870, 0xa3aa8dbfee760c55
+871, 0xc0288d200f5155cf
+872, 0x79f4bcd12af567c3
+873, 0x8160d163bb548755
+874, 0x5cf2995fb69fd2df
+875, 0xcc98ed01396639df
+876, 0xad95f1d9cfc8256e
+877, 0xa3df27d9fbdbfb9d
+878, 0x83e5f5dda4d52929
+879, 0x9adc05043009f55b
+880, 0xdfe8329dfde1c001
+881, 0x9980ccdd5298e6a2
+882, 0x636a7bd134f6ef56
+883, 0xef5ff780c4be6ba4
+884, 0x290d71dc77a56d16
+885, 0x6d65db9ff58de1e6
+886, 0x944b063b3805a696
+887, 0xce468ca2cce33008
+888, 0x5ba1ccb840f80f48
+889, 0x28ddce36fc9ad268
+890, 0x4f77ef254d507a21
+891, 0xce9b4057fadf3ab
+892, 0xb518bc68298730e6
+893, 0xd2eb5b8e2ec665b0
+894, 0xe1583303a4f87344
+895, 0x9d5a0df4fbe1bed5
+896, 0x2ba9bc03ec8cfd07
+897, 0x479ed880a96ca669
+898, 0xcedf96338324771a
+899, 0x312f4fc2da41ffaa
+900, 0xa0eb9cf23b5e1ed8
+901, 0xf8f88f975dc3f539
+902, 0x4a37e185d0e96e0f
+903, 0xf829654a5c0b46f9
+904, 0x3909cca7a7f8c7fb
+905, 0x4c2e1d66ceb45105
+906, 0xaffaa19e1db8af87
+907, 0x9ec498246bd18c76
+908, 0x21d51558edc089da
+909, 0xe8984112cd1b1561
+910, 0x7de1d2cf54b0c0e1
+911, 0xa06729aed50bfb9d
+912, 0xcf19f733e5db19e1
+913, 0x70edf2624ab777cd
+914, 0x46685becad10e078
+915, 0x825e0f6add46785
+916, 0x66d4af3b15f70de4
+917, 0xc676614b0666b21
+918, 0x282a916c864f5cb7
+919, 0x2707283a3f512167
+920, 0x37ff3afda7461623
+921, 0xc767eb1205e4ca86
+922, 0x46b359aecc4ea25b
+923, 0x67fbbb797a16dbb1
+924, 0x64fd4ba57122290e
+925, 0x8acc2a8ae59d8fac
+926, 0x64a49298599acc67
+927, 0xedf00de67177ce30
+928, 0x1ea9d8d7e76d2d2c
+929, 0x363fcac323f70eb2
+930, 0x19e6e3ec8a9712eb
+931, 0xca541e96b0961f09
+932, 0x4d8fd34c2822ec46
+933, 0x2fdd56a50b32f705
+934, 0xaac2fcf251e3fd3
+935, 0xb0c600299e57045c
+936, 0xd951ec589e909e38
+937, 0x4dc8414390cae508
+938, 0x537ef9d5e2321344
+939, 0xa57bc21fd31aa2dc
+940, 0xa3a60df564183750
+941, 0xbe69a5ce2e369fb6
+942, 0x7744601f4c053ec8
+943, 0x3838452af42f2612
+944, 0xd4f0dad7115a54e9
+945, 0x629cf68d8009a624
+946, 0x2211c8fa34cb98cb
+947, 0x8040b19e2213db83
+948, 0xb2a86d3ba2384fd
+949, 0x4b85cec4f93f0dab
+950, 0xc8d212d21ea6845d
+951, 0x5b271a03a4fe2be0
+952, 0xff4f671319ad8434
+953, 0x8e615a919d5afa96
+954, 0xea7f47c53161160a
+955, 0x33273930b13c6efc
+956, 0x98eedda27fb59c3c
+957, 0x188dc5e92e939677
+958, 0x9dbd0fa0911430f1
+959, 0x5b3dcf3fa75dfd2b
+960, 0x3f03846febdb275d
+961, 0x20cc24faea9e9cf6
+962, 0x854f3ac66199ff5d
+963, 0x31169ac99d341e6f
+964, 0xa85daed3c0bc1bbe
+965, 0x64633711e71ba5dd
+966, 0x530e79978dc73334
+967, 0x636f2ee6e20aef13
+968, 0xf6220f8b6d9a58fb
+969, 0x425db8fa32141a7b
+970, 0xac7c210f4b02be95
+971, 0x5fe8cfbe197a7754
+972, 0xfff7d40c79420ea
+973, 0x5f8bab9ef4697b77
+974, 0xaf6fe54e45b23fe8
+975, 0xce79456ccc70bbce
+976, 0x645ef680f48f1c00
+977, 0xa4dfac46e2028595
+978, 0x6bece4c41effc5df
+979, 0xd316df886442641f
+980, 0xa4f6ff994edd2a6
+981, 0x30281ae3cc49abe4
+982, 0x39acb7b663dea974
+983, 0x5e8829b01a7c06fb
+984, 0x87bdb08cf027f13e
+985, 0xdfa5ede784e802f6
+986, 0x46d03d55711c38cc
+987, 0xa55a961fc9788306
+988, 0xbf09ded495a2e57a
+989, 0xcd601b29a639cc16
+990, 0x2193ce026bfd1085
+991, 0x25ba27f3f225be13
+992, 0x6f685be82f64f2fe
+993, 0xec8454108229c450
+994, 0x6e79d8d205447a44
+995, 0x9ed7b6a96b9ccd68
+996, 0xae7134b3b7f8ee37
+997, 0x66963de0e5ebcc02
+998, 0x29c8dcd0d17c423f
+999, 0xfb8482c827eb90bc
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/test_direct.py b/venv/lib/python3.9/site-packages/numpy/random/tests/test_direct.py
new file mode 100644
index 00000000..58d966ad
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/test_direct.py
@@ -0,0 +1,478 @@
+import os
+from os.path import join
+import sys
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose, assert_array_equal,
+ assert_raises)
+import pytest
+
+from numpy.random import (
+ Generator, MT19937, PCG64, PCG64DXSM, Philox, RandomState, SeedSequence,
+ SFC64, default_rng
+)
+from numpy.random._common import interface
+
+try:
+ import cffi # noqa: F401
+
+ MISSING_CFFI = False
+except ImportError:
+ MISSING_CFFI = True
+
+try:
+ import ctypes # noqa: F401
+
+ MISSING_CTYPES = False
+except ImportError:
+ MISSING_CTYPES = False
+
+if sys.flags.optimize > 1:
+ # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
+ # cffi cannot succeed
+ MISSING_CFFI = True
+
+
+pwd = os.path.dirname(os.path.abspath(__file__))
+
+
+def assert_state_equal(actual, target):
+ for key in actual:
+ if isinstance(actual[key], dict):
+ assert_state_equal(actual[key], target[key])
+ elif isinstance(actual[key], np.ndarray):
+ assert_array_equal(actual[key], target[key])
+ else:
+ assert actual[key] == target[key]
+
+
+def uint32_to_float32(u):
+ return ((u >> np.uint32(8)) * (1.0 / 2**24)).astype(np.float32)
+
+
+def uniform32_from_uint64(x):
+ x = np.uint64(x)
+ upper = np.array(x >> np.uint64(32), dtype=np.uint32)
+ lower = np.uint64(0xffffffff)
+ lower = np.array(x & lower, dtype=np.uint32)
+ joined = np.column_stack([lower, upper]).ravel()
+ return uint32_to_float32(joined)
+
+
+def uniform32_from_uint53(x):
+ x = np.uint64(x) >> np.uint64(16)
+ x = np.uint32(x & np.uint64(0xffffffff))
+ return uint32_to_float32(x)
+
+
+def uniform32_from_uint32(x):
+ return uint32_to_float32(x)
+
+
+def uniform32_from_uint(x, bits):
+ if bits == 64:
+ return uniform32_from_uint64(x)
+ elif bits == 53:
+ return uniform32_from_uint53(x)
+ elif bits == 32:
+ return uniform32_from_uint32(x)
+ else:
+ raise NotImplementedError
+
+
+def uniform_from_uint(x, bits):
+ if bits in (64, 63, 53):
+ return uniform_from_uint64(x)
+ elif bits == 32:
+ return uniform_from_uint32(x)
+
+
+def uniform_from_uint64(x):
+ return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0)
+
+
+def uniform_from_uint32(x):
+ out = np.empty(len(x) // 2)
+ for i in range(0, len(x), 2):
+ a = x[i] >> 5
+ b = x[i + 1] >> 6
+ out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0
+ return out
+
+
+def uniform_from_dsfmt(x):
+ return x.view(np.double) - 1.0
+
+
+def gauss_from_uint(x, n, bits):
+ if bits in (64, 63):
+ doubles = uniform_from_uint64(x)
+ elif bits == 32:
+ doubles = uniform_from_uint32(x)
+ else: # bits == 'dsfmt'
+ doubles = uniform_from_dsfmt(x)
+ gauss = []
+ loc = 0
+ x1 = x2 = 0.0
+ while len(gauss) < n:
+ r2 = 2
+ while r2 >= 1.0 or r2 == 0.0:
+ x1 = 2.0 * doubles[loc] - 1.0
+ x2 = 2.0 * doubles[loc + 1] - 1.0
+ r2 = x1 * x1 + x2 * x2
+ loc += 2
+
+ f = np.sqrt(-2.0 * np.log(r2) / r2)
+ gauss.append(f * x2)
+ gauss.append(f * x1)
+
+ return gauss[:n]
+
+
+def test_seedsequence():
+ from numpy.random.bit_generator import (ISeedSequence,
+ ISpawnableSeedSequence,
+ SeedlessSeedSequence)
+
+ s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6)
+ s1.spawn(10)
+ s2 = SeedSequence(**s1.state)
+ assert_equal(s1.state, s2.state)
+ assert_equal(s1.n_children_spawned, s2.n_children_spawned)
+
+ # The interfaces cannot be instantiated themselves.
+ assert_raises(TypeError, ISeedSequence)
+ assert_raises(TypeError, ISpawnableSeedSequence)
+ dummy = SeedlessSeedSequence()
+ assert_raises(NotImplementedError, dummy.generate_state, 10)
+ assert len(dummy.spawn(10)) == 10
+
+
+class Base:
+ dtype = np.uint64
+ data2 = data1 = {}
+
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = PCG64
+ cls.bits = 64
+ cls.dtype = np.uint64
+ cls.seed_error_type = TypeError
+ cls.invalid_init_types = []
+ cls.invalid_init_values = []
+
+ @classmethod
+ def _read_csv(cls, filename):
+ with open(filename) as csv:
+ seed = csv.readline()
+ seed = seed.split(',')
+ seed = [int(s.strip(), 0) for s in seed[1:]]
+ data = []
+ for line in csv:
+ data.append(int(line.split(',')[-1].strip(), 0))
+ return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)}
+
+ def test_raw(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ uints = bit_generator.random_raw(1000)
+ assert_equal(uints, self.data1['data'])
+
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ uints = bit_generator.random_raw()
+ assert_equal(uints, self.data1['data'][0])
+
+ bit_generator = self.bit_generator(*self.data2['seed'])
+ uints = bit_generator.random_raw(1000)
+ assert_equal(uints, self.data2['data'])
+
+ def test_random_raw(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ uints = bit_generator.random_raw(output=False)
+ assert uints is None
+ uints = bit_generator.random_raw(1000, output=False)
+ assert uints is None
+
+ def test_gauss_inv(self):
+ n = 25
+ rs = RandomState(self.bit_generator(*self.data1['seed']))
+ gauss = rs.standard_normal(n)
+ assert_allclose(gauss,
+ gauss_from_uint(self.data1['data'], n, self.bits))
+
+ rs = RandomState(self.bit_generator(*self.data2['seed']))
+ gauss = rs.standard_normal(25)
+ assert_allclose(gauss,
+ gauss_from_uint(self.data2['data'], n, self.bits))
+
+ def test_uniform_double(self):
+ rs = Generator(self.bit_generator(*self.data1['seed']))
+ vals = uniform_from_uint(self.data1['data'], self.bits)
+ uniforms = rs.random(len(vals))
+ assert_allclose(uniforms, vals)
+ assert_equal(uniforms.dtype, np.float64)
+
+ rs = Generator(self.bit_generator(*self.data2['seed']))
+ vals = uniform_from_uint(self.data2['data'], self.bits)
+ uniforms = rs.random(len(vals))
+ assert_allclose(uniforms, vals)
+ assert_equal(uniforms.dtype, np.float64)
+
+ def test_uniform_float(self):
+ rs = Generator(self.bit_generator(*self.data1['seed']))
+ vals = uniform32_from_uint(self.data1['data'], self.bits)
+ uniforms = rs.random(len(vals), dtype=np.float32)
+ assert_allclose(uniforms, vals)
+ assert_equal(uniforms.dtype, np.float32)
+
+ rs = Generator(self.bit_generator(*self.data2['seed']))
+ vals = uniform32_from_uint(self.data2['data'], self.bits)
+ uniforms = rs.random(len(vals), dtype=np.float32)
+ assert_allclose(uniforms, vals)
+ assert_equal(uniforms.dtype, np.float32)
+
+ def test_repr(self):
+ rs = Generator(self.bit_generator(*self.data1['seed']))
+ assert 'Generator' in repr(rs)
+ assert f'{id(rs):#x}'.upper().replace('X', 'x') in repr(rs)
+
+ def test_str(self):
+ rs = Generator(self.bit_generator(*self.data1['seed']))
+ assert 'Generator' in str(rs)
+ assert str(self.bit_generator.__name__) in str(rs)
+ assert f'{id(rs):#x}'.upper().replace('X', 'x') not in str(rs)
+
+ def test_pickle(self):
+ import pickle
+
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ state = bit_generator.state
+ bitgen_pkl = pickle.dumps(bit_generator)
+ reloaded = pickle.loads(bitgen_pkl)
+ reloaded_state = reloaded.state
+ assert_array_equal(Generator(bit_generator).standard_normal(1000),
+ Generator(reloaded).standard_normal(1000))
+ assert bit_generator is not reloaded
+ assert_state_equal(reloaded_state, state)
+
+ ss = SeedSequence(100)
+ aa = pickle.loads(pickle.dumps(ss))
+ assert_equal(ss.state, aa.state)
+
+ def test_invalid_state_type(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ with pytest.raises(TypeError):
+ bit_generator.state = {'1'}
+
+ def test_invalid_state_value(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ state = bit_generator.state
+ state['bit_generator'] = 'otherBitGenerator'
+ with pytest.raises(ValueError):
+ bit_generator.state = state
+
+ def test_invalid_init_type(self):
+ bit_generator = self.bit_generator
+ for st in self.invalid_init_types:
+ with pytest.raises(TypeError):
+ bit_generator(*st)
+
+ def test_invalid_init_values(self):
+ bit_generator = self.bit_generator
+ for st in self.invalid_init_values:
+ with pytest.raises((ValueError, OverflowError)):
+ bit_generator(*st)
+
+ def test_benchmark(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ bit_generator._benchmark(1)
+ bit_generator._benchmark(1, 'double')
+ with pytest.raises(ValueError):
+ bit_generator._benchmark(1, 'int32')
+
+ @pytest.mark.skipif(MISSING_CFFI, reason='cffi not available')
+ def test_cffi(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ cffi_interface = bit_generator.cffi
+ assert isinstance(cffi_interface, interface)
+ other_cffi_interface = bit_generator.cffi
+ assert other_cffi_interface is cffi_interface
+
+ @pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available')
+ def test_ctypes(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ ctypes_interface = bit_generator.ctypes
+ assert isinstance(ctypes_interface, interface)
+ other_ctypes_interface = bit_generator.ctypes
+ assert other_ctypes_interface is ctypes_interface
+
+ def test_getstate(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ state = bit_generator.state
+ alt_state = bit_generator.__getstate__()
+ assert_state_equal(state, alt_state)
+
+
+class TestPhilox(Base):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = Philox
+ cls.bits = 64
+ cls.dtype = np.uint64
+ cls.data1 = cls._read_csv(
+ join(pwd, './data/philox-testset-1.csv'))
+ cls.data2 = cls._read_csv(
+ join(pwd, './data/philox-testset-2.csv'))
+ cls.seed_error_type = TypeError
+ cls.invalid_init_types = []
+ cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)]
+
+ def test_set_key(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ state = bit_generator.state
+ keyed = self.bit_generator(counter=state['state']['counter'],
+ key=state['state']['key'])
+ assert_state_equal(bit_generator.state, keyed.state)
+
+
+class TestPCG64(Base):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = PCG64
+ cls.bits = 64
+ cls.dtype = np.uint64
+ cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv'))
+ cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv'))
+ cls.seed_error_type = (ValueError, TypeError)
+ cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
+ cls.invalid_init_values = [(-1,)]
+
+ def test_advance_symmetry(self):
+ rs = Generator(self.bit_generator(*self.data1['seed']))
+ state = rs.bit_generator.state
+ step = -0x9e3779b97f4a7c150000000000000000
+ rs.bit_generator.advance(step)
+ val_neg = rs.integers(10)
+ rs.bit_generator.state = state
+ rs.bit_generator.advance(2**128 + step)
+ val_pos = rs.integers(10)
+ rs.bit_generator.state = state
+ rs.bit_generator.advance(10 * 2**128 + step)
+ val_big = rs.integers(10)
+ assert val_neg == val_pos
+ assert val_big == val_pos
+
+ def test_advange_large(self):
+ rs = Generator(self.bit_generator(38219308213743))
+ pcg = rs.bit_generator
+ state = pcg.state["state"]
+ initial_state = 287608843259529770491897792873167516365
+ assert state["state"] == initial_state
+ pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1)))
+ state = pcg.state["state"]
+ advanced_state = 135275564607035429730177404003164635391
+ assert state["state"] == advanced_state
+
+
+class TestPCG64DXSM(Base):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = PCG64DXSM
+ cls.bits = 64
+ cls.dtype = np.uint64
+ cls.data1 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-1.csv'))
+ cls.data2 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-2.csv'))
+ cls.seed_error_type = (ValueError, TypeError)
+ cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
+ cls.invalid_init_values = [(-1,)]
+
+ def test_advance_symmetry(self):
+ rs = Generator(self.bit_generator(*self.data1['seed']))
+ state = rs.bit_generator.state
+ step = -0x9e3779b97f4a7c150000000000000000
+ rs.bit_generator.advance(step)
+ val_neg = rs.integers(10)
+ rs.bit_generator.state = state
+ rs.bit_generator.advance(2**128 + step)
+ val_pos = rs.integers(10)
+ rs.bit_generator.state = state
+ rs.bit_generator.advance(10 * 2**128 + step)
+ val_big = rs.integers(10)
+ assert val_neg == val_pos
+ assert val_big == val_pos
+
+ def test_advange_large(self):
+ rs = Generator(self.bit_generator(38219308213743))
+ pcg = rs.bit_generator
+ state = pcg.state
+ initial_state = 287608843259529770491897792873167516365
+ assert state["state"]["state"] == initial_state
+ pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1)))
+ state = pcg.state["state"]
+ advanced_state = 277778083536782149546677086420637664879
+ assert state["state"] == advanced_state
+
+
+class TestMT19937(Base):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = MT19937
+ cls.bits = 32
+ cls.dtype = np.uint32
+ cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv'))
+ cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv'))
+ cls.seed_error_type = ValueError
+ cls.invalid_init_types = []
+ cls.invalid_init_values = [(-1,)]
+
+ def test_seed_float_array(self):
+ assert_raises(TypeError, self.bit_generator, np.array([np.pi]))
+ assert_raises(TypeError, self.bit_generator, np.array([-np.pi]))
+ assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi]))
+ assert_raises(TypeError, self.bit_generator, np.array([0, np.pi]))
+ assert_raises(TypeError, self.bit_generator, [np.pi])
+ assert_raises(TypeError, self.bit_generator, [0, np.pi])
+
+ def test_state_tuple(self):
+ rs = Generator(self.bit_generator(*self.data1['seed']))
+ bit_generator = rs.bit_generator
+ state = bit_generator.state
+ desired = rs.integers(2 ** 16)
+ tup = (state['bit_generator'], state['state']['key'],
+ state['state']['pos'])
+ bit_generator.state = tup
+ actual = rs.integers(2 ** 16)
+ assert_equal(actual, desired)
+ tup = tup + (0, 0.0)
+ bit_generator.state = tup
+ actual = rs.integers(2 ** 16)
+ assert_equal(actual, desired)
+
+
+class TestSFC64(Base):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = SFC64
+ cls.bits = 64
+ cls.dtype = np.uint64
+ cls.data1 = cls._read_csv(
+ join(pwd, './data/sfc64-testset-1.csv'))
+ cls.data2 = cls._read_csv(
+ join(pwd, './data/sfc64-testset-2.csv'))
+ cls.seed_error_type = (ValueError, TypeError)
+ cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
+ cls.invalid_init_values = [(-1,)]
+
+
+class TestDefaultRNG:
+ def test_seed(self):
+ for args in [(), (None,), (1234,), ([1234, 5678],)]:
+ rg = default_rng(*args)
+ assert isinstance(rg.bit_generator, PCG64)
+
+ def test_passthrough(self):
+ bg = Philox()
+ rg = default_rng(bg)
+ assert rg.bit_generator is bg
+ rg2 = default_rng(rg)
+ assert rg2 is rg
+ assert rg2.bit_generator is bg
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/test_extending.py b/venv/lib/python3.9/site-packages/numpy/random/tests/test_extending.py
new file mode 100644
index 00000000..bc24bd25
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/test_extending.py
@@ -0,0 +1,99 @@
+import os
+import pytest
+import shutil
+import subprocess
+import sys
+import warnings
+import numpy as np
+from numpy.distutils.misc_util import exec_mod_from_location
+from numpy.testing import IS_WASM
+
+try:
+ import cffi
+except ImportError:
+ cffi = None
+
+if sys.flags.optimize > 1:
+ # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
+ # cffi cannot succeed
+ cffi = None
+
+try:
+ with warnings.catch_warnings(record=True) as w:
+ # numba issue gh-4733
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ import numba
+except (ImportError, SystemError):
+ # Certain numpy/numba versions trigger a SystemError due to a numba bug
+ numba = None
+
+try:
+ import cython
+ from Cython.Compiler.Version import version as cython_version
+except ImportError:
+ cython = None
+else:
+ from numpy.compat import _pep440
+ # Cython 0.29.30 is required for Python 3.11 and there are
+ # other fixes in the 0.29 series that are needed even for earlier
+ # Python versions.
+ # Note: keep in sync with the one in pyproject.toml
+ required_version = '0.29.30'
+ if _pep440.parse(cython_version) < _pep440.Version(required_version):
+ # too old or wrong cython, skip the test
+ cython = None
+
+
+@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess")
+@pytest.mark.skipif(cython is None, reason="requires cython")
+@pytest.mark.slow
+def test_cython(tmp_path):
+ srcdir = os.path.join(os.path.dirname(__file__), '..')
+ shutil.copytree(srcdir, tmp_path / 'random')
+ # build the examples and "install" them into a temporary directory
+ build_dir = tmp_path / 'random' / '_examples' / 'cython'
+ subprocess.check_call([sys.executable, 'setup.py', 'build', 'install',
+ '--prefix', str(tmp_path / 'installdir'),
+ '--single-version-externally-managed',
+ '--record', str(tmp_path/ 'tmp_install_log.txt'),
+ ],
+ cwd=str(build_dir),
+ )
+ # gh-16162: make sure numpy's __init__.pxd was used for cython
+ # not really part of this test, but it is a convenient place to check
+ with open(build_dir / 'extending.c') as fid:
+ txt_to_find = 'NumPy API declarations from "numpy/__init__.pxd"'
+ for i, line in enumerate(fid):
+ if txt_to_find in line:
+ break
+ else:
+ assert False, ("Could not find '{}' in C file, "
+ "wrong pxd used".format(txt_to_find))
+ # get the path to the so's
+ so1 = so2 = None
+ with open(tmp_path /'tmp_install_log.txt') as fid:
+ for line in fid:
+ if 'extending.' in line:
+ so1 = line.strip()
+ if 'extending_distributions' in line:
+ so2 = line.strip()
+ assert so1 is not None
+ assert so2 is not None
+ # import the so's without adding the directory to sys.path
+ exec_mod_from_location('extending', so1)
+ extending_distributions = exec_mod_from_location(
+ 'extending_distributions', so2)
+ # actually test the cython c-extension
+ from numpy.random import PCG64
+ values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd')
+ assert values.shape == (10,)
+ assert values.dtype == np.float64
+
+@pytest.mark.skipif(numba is None or cffi is None,
+ reason="requires numba and cffi")
+def test_numba():
+ from numpy.random._examples.numba import extending # noqa: F401
+
+@pytest.mark.skipif(cffi is None, reason="requires cffi")
+def test_cffi():
+ from numpy.random._examples.cffi import extending # noqa: F401
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937.py b/venv/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937.py
new file mode 100644
index 00000000..54a5b73a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937.py
@@ -0,0 +1,2724 @@
+import sys
+import hashlib
+
+import pytest
+
+import numpy as np
+from numpy.linalg import LinAlgError
+from numpy.testing import (
+ assert_, assert_raises, assert_equal, assert_allclose,
+ assert_warns, assert_no_warnings, assert_array_equal,
+ assert_array_almost_equal, suppress_warnings, IS_WASM)
+
+from numpy.random import Generator, MT19937, SeedSequence, RandomState
+
+random = Generator(MT19937())
+
+JUMP_TEST_DATA = [
+ {
+ "seed": 0,
+ "steps": 10,
+ "initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9},
+ "jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598},
+ },
+ {
+ "seed":384908324,
+ "steps":312,
+ "initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311},
+ "jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276},
+ },
+ {
+ "seed": [839438204, 980239840, 859048019, 821],
+ "steps": 511,
+ "initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510},
+ "jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475},
+ },
+]
+
+@pytest.fixture(scope='module', params=[True, False])
+def endpoint(request):
+ return request.param
+
+
+class TestSeed:
+ def test_scalar(self):
+ s = Generator(MT19937(0))
+ assert_equal(s.integers(1000), 479)
+ s = Generator(MT19937(4294967295))
+ assert_equal(s.integers(1000), 324)
+
+ def test_array(self):
+ s = Generator(MT19937(range(10)))
+ assert_equal(s.integers(1000), 465)
+ s = Generator(MT19937(np.arange(10)))
+ assert_equal(s.integers(1000), 465)
+ s = Generator(MT19937([0]))
+ assert_equal(s.integers(1000), 479)
+ s = Generator(MT19937([4294967295]))
+ assert_equal(s.integers(1000), 324)
+
+ def test_seedsequence(self):
+ s = MT19937(SeedSequence(0))
+ assert_equal(s.random_raw(1), 2058676884)
+
+ def test_invalid_scalar(self):
+ # seed must be an unsigned 32 bit integer
+ assert_raises(TypeError, MT19937, -0.5)
+ assert_raises(ValueError, MT19937, -1)
+
+ def test_invalid_array(self):
+ # seed must be an unsigned integer
+ assert_raises(TypeError, MT19937, [-0.5])
+ assert_raises(ValueError, MT19937, [-1])
+ assert_raises(ValueError, MT19937, [1, -2, 4294967296])
+
+ def test_noninstantized_bitgen(self):
+ assert_raises(ValueError, Generator, MT19937)
+
+
+class TestBinomial:
+ def test_n_zero(self):
+ # Tests the corner case of n == 0 for the binomial distribution.
+ # binomial(0, p) should be zero for any p in [0, 1].
+ # This test addresses issue #3480.
+ zeros = np.zeros(2, dtype='int')
+ for p in [0, .5, 1]:
+ assert_(random.binomial(0, p) == 0)
+ assert_array_equal(random.binomial(zeros, p), zeros)
+
+ def test_p_is_nan(self):
+ # Issue #4571.
+ assert_raises(ValueError, random.binomial, 1, np.nan)
+
+
+class TestMultinomial:
+ def test_basic(self):
+ random.multinomial(100, [0.2, 0.8])
+
+ def test_zero_probability(self):
+ random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
+
+ def test_int_negative_interval(self):
+ assert_(-5 <= random.integers(-5, -1) < -1)
+ x = random.integers(-5, -1, 5)
+ assert_(np.all(-5 <= x))
+ assert_(np.all(x < -1))
+
+ def test_size(self):
+ # gh-3173
+ p = [0.5, 0.5]
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
+ (2, 2, 2))
+
+ assert_raises(TypeError, random.multinomial, 1, p,
+ float(1))
+
+ def test_invalid_prob(self):
+ assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
+ assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
+
+ def test_invalid_n(self):
+ assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
+ assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
+
+ def test_p_non_contiguous(self):
+ p = np.arange(15.)
+ p /= np.sum(p[1::3])
+ pvals = p[1::3]
+ random = Generator(MT19937(1432985819))
+ non_contig = random.multinomial(100, pvals=pvals)
+ random = Generator(MT19937(1432985819))
+ contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
+ assert_array_equal(non_contig, contig)
+
+ def test_multinomial_pvals_float32(self):
+ x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09,
+ 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32)
+ pvals = x / x.sum()
+ random = Generator(MT19937(1432985819))
+ match = r"[\w\s]*pvals array is cast to 64-bit floating"
+ with pytest.raises(ValueError, match=match):
+ random.multinomial(1, pvals)
+
+class TestMultivariateHypergeometric:
+
+ def setup_method(self):
+ self.seed = 8675309
+
+ def test_argument_validation(self):
+ # Error cases...
+
+ # `colors` must be a 1-d sequence
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ 10, 4)
+
+ # Negative nsample
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [2, 3, 4], -1)
+
+ # Negative color
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [-1, 2, 3], 2)
+
+ # nsample exceeds sum(colors)
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [2, 3, 4], 10)
+
+ # nsample exceeds sum(colors) (edge case of empty colors)
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [], 1)
+
+ # Validation errors associated with very large values in colors.
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [999999999, 101], 5, 1, 'marginals')
+
+ int64_info = np.iinfo(np.int64)
+ max_int64 = int64_info.max
+ max_int64_index = max_int64 // int64_info.dtype.itemsize
+ assert_raises(ValueError, random.multivariate_hypergeometric,
+ [max_int64_index - 100, 101], 5, 1, 'count')
+
+ @pytest.mark.parametrize('method', ['count', 'marginals'])
+ def test_edge_cases(self, method):
+ # Set the seed, but in fact, all the results in this test are
+ # deterministic, so we don't really need this.
+ random = Generator(MT19937(self.seed))
+
+ x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
+ assert_array_equal(x, [0, 0, 0])
+
+ x = random.multivariate_hypergeometric([], 0, method=method)
+ assert_array_equal(x, [])
+
+ x = random.multivariate_hypergeometric([], 0, size=1, method=method)
+ assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
+
+ x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
+ assert_array_equal(x, [0, 0, 0])
+
+ x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
+ assert_array_equal(x, [3, 0, 0])
+
+ colors = [1, 1, 0, 1, 1]
+ x = random.multivariate_hypergeometric(colors, sum(colors),
+ method=method)
+ assert_array_equal(x, colors)
+
+ x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
+ method=method)
+ assert_array_equal(x, [[3, 4, 5]]*3)
+
+ # Cases for nsample:
+ # nsample < 10
+ # 10 <= nsample < colors.sum()/2
+ # colors.sum()/2 < nsample < colors.sum() - 10
+ # colors.sum() - 10 < nsample < colors.sum()
+ @pytest.mark.parametrize('nsample', [8, 25, 45, 55])
+ @pytest.mark.parametrize('method', ['count', 'marginals'])
+ @pytest.mark.parametrize('size', [5, (2, 3), 150000])
+ def test_typical_cases(self, nsample, method, size):
+ random = Generator(MT19937(self.seed))
+
+ colors = np.array([10, 5, 20, 25])
+ sample = random.multivariate_hypergeometric(colors, nsample, size,
+ method=method)
+ if isinstance(size, int):
+ expected_shape = (size,) + colors.shape
+ else:
+ expected_shape = size + colors.shape
+ assert_equal(sample.shape, expected_shape)
+ assert_((sample >= 0).all())
+ assert_((sample <= colors).all())
+ assert_array_equal(sample.sum(axis=-1),
+ np.full(size, fill_value=nsample, dtype=int))
+ if isinstance(size, int) and size >= 100000:
+ # This sample is large enough to compare its mean to
+ # the expected values.
+ assert_allclose(sample.mean(axis=0),
+ nsample * colors / colors.sum(),
+ rtol=1e-3, atol=0.005)
+
+ def test_repeatability1(self):
+ random = Generator(MT19937(self.seed))
+ sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
+ method='count')
+ expected = np.array([[2, 1, 2],
+ [2, 1, 2],
+ [1, 1, 3],
+ [2, 0, 3],
+ [2, 1, 2]])
+ assert_array_equal(sample, expected)
+
+ def test_repeatability2(self):
+ random = Generator(MT19937(self.seed))
+ sample = random.multivariate_hypergeometric([20, 30, 50], 50,
+ size=5,
+ method='marginals')
+ expected = np.array([[ 9, 17, 24],
+ [ 7, 13, 30],
+ [ 9, 15, 26],
+ [ 9, 17, 24],
+ [12, 14, 24]])
+ assert_array_equal(sample, expected)
+
+ def test_repeatability3(self):
+ random = Generator(MT19937(self.seed))
+ sample = random.multivariate_hypergeometric([20, 30, 50], 12,
+ size=5,
+ method='marginals')
+ expected = np.array([[2, 3, 7],
+ [5, 3, 4],
+ [2, 5, 5],
+ [5, 3, 4],
+ [1, 5, 6]])
+ assert_array_equal(sample, expected)
+
+
+class TestSetState:
+ def setup_method(self):
+ self.seed = 1234567890
+ self.rg = Generator(MT19937(self.seed))
+ self.bit_generator = self.rg.bit_generator
+ self.state = self.bit_generator.state
+ self.legacy_state = (self.state['bit_generator'],
+ self.state['state']['key'],
+ self.state['state']['pos'])
+
+ def test_gaussian_reset(self):
+ # Make sure the cached every-other-Gaussian is reset.
+ old = self.rg.standard_normal(size=3)
+ self.bit_generator.state = self.state
+ new = self.rg.standard_normal(size=3)
+ assert_(np.all(old == new))
+
+ def test_gaussian_reset_in_media_res(self):
+ # When the state is saved with a cached Gaussian, make sure the
+ # cached Gaussian is restored.
+
+ self.rg.standard_normal()
+ state = self.bit_generator.state
+ old = self.rg.standard_normal(size=3)
+ self.bit_generator.state = state
+ new = self.rg.standard_normal(size=3)
+ assert_(np.all(old == new))
+
+ def test_negative_binomial(self):
+ # Ensure that the negative binomial results take floating point
+ # arguments without truncation.
+ self.rg.negative_binomial(0.5, 0.5)
+
+
+class TestIntegers:
+ rfunc = random.integers
+
+ # valid integer/boolean types
+ itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
+ np.int32, np.uint32, np.int64, np.uint64]
+
+ def test_unsupported_type(self, endpoint):
+ assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
+
+ def test_bounds_checking(self, endpoint):
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+ assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, ubnd, lbnd,
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
+ dtype=dt)
+
+ assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, 1, [0],
+ endpoint=endpoint, dtype=dt)
+
+ def test_bounds_checking_array(self, endpoint):
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
+
+ assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, [lbnd] * 2,
+ [ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, [1] * 2, 0,
+ endpoint=endpoint, dtype=dt)
+
+ def test_rng_zero_and_extremes(self, endpoint):
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+ is_open = not endpoint
+
+ tgt = ubnd - 1
+ assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
+ endpoint=endpoint, dtype=dt), tgt)
+ assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
+ endpoint=endpoint, dtype=dt), tgt)
+
+ tgt = lbnd
+ assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
+ endpoint=endpoint, dtype=dt), tgt)
+ assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
+ endpoint=endpoint, dtype=dt), tgt)
+
+ tgt = (lbnd + ubnd) // 2
+ assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
+ endpoint=endpoint, dtype=dt), tgt)
+ assert_equal(self.rfunc([tgt], [tgt + is_open],
+ size=1000, endpoint=endpoint, dtype=dt),
+ tgt)
+
+ def test_rng_zero_and_extremes_array(self, endpoint):
+ size = 1000
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+
+ tgt = ubnd - 1
+ assert_equal(self.rfunc([tgt], [tgt + 1],
+ size=size, dtype=dt), tgt)
+ assert_equal(self.rfunc(
+ [tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
+ assert_equal(self.rfunc(
+ [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
+
+ tgt = lbnd
+ assert_equal(self.rfunc([tgt], [tgt + 1],
+ size=size, dtype=dt), tgt)
+ assert_equal(self.rfunc(
+ [tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
+ assert_equal(self.rfunc(
+ [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
+
+ tgt = (lbnd + ubnd) // 2
+ assert_equal(self.rfunc([tgt], [tgt + 1],
+ size=size, dtype=dt), tgt)
+ assert_equal(self.rfunc(
+ [tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
+ assert_equal(self.rfunc(
+ [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
+
+ def test_full_range(self, endpoint):
+ # Test for ticket #1690
+
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+
+ try:
+ self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
+ except Exception as e:
+ raise AssertionError("No error should have been raised, "
+ "but one was with the following "
+ "message:\n\n%s" % str(e))
+
+ def test_full_range_array(self, endpoint):
+ # Test for ticket #1690
+
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+
+ try:
+ self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
+ except Exception as e:
+ raise AssertionError("No error should have been raised, "
+ "but one was with the following "
+ "message:\n\n%s" % str(e))
+
+ def test_in_bounds_fuzz(self, endpoint):
+ # Don't use fixed seed
+ random = Generator(MT19937())
+
+ for dt in self.itype[1:]:
+ for ubnd in [4, 8, 16]:
+ vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
+ endpoint=endpoint, dtype=dt)
+ assert_(vals.max() < ubnd)
+ assert_(vals.min() >= 2)
+
+ vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
+ dtype=bool)
+ assert_(vals.max() < 2)
+ assert_(vals.min() >= 0)
+
+ def test_scalar_array_equiv(self, endpoint):
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+
+ size = 1000
+ random = Generator(MT19937(1234))
+ scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
+ dtype=dt)
+
+ random = Generator(MT19937(1234))
+ scalar_array = random.integers([lbnd], [ubnd], size=size,
+ endpoint=endpoint, dtype=dt)
+
+ random = Generator(MT19937(1234))
+ array = random.integers([lbnd] * size, [ubnd] *
+ size, size=size, endpoint=endpoint, dtype=dt)
+ assert_array_equal(scalar, scalar_array)
+ assert_array_equal(scalar, array)
+
+ def test_repeatability(self, endpoint):
+ # We use a sha256 hash of generated sequences of 1000 samples
+ # in the range [0, 6) for all but bool, where the range
+ # is [0, 2). Hashes are for little endian numbers.
+ tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3',
+ 'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4',
+ 'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b',
+ 'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1',
+ 'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1',
+ 'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4',
+ 'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b',
+ 'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1',
+ 'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'}
+
+ for dt in self.itype[1:]:
+ random = Generator(MT19937(1234))
+
+ # view as little endian for hash
+ if sys.byteorder == 'little':
+ val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
+ dtype=dt)
+ else:
+ val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
+ dtype=dt).byteswap()
+
+ res = hashlib.sha256(val).hexdigest()
+ assert_(tgt[np.dtype(dt).name] == res)
+
+ # bools do not depend on endianness
+ random = Generator(MT19937(1234))
+ val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
+ dtype=bool).view(np.int8)
+ res = hashlib.sha256(val).hexdigest()
+ assert_(tgt[np.dtype(bool).name] == res)
+
+ def test_repeatability_broadcasting(self, endpoint):
+ for dt in self.itype:
+ lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
+ ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+
+ # view as little endian for hash
+ random = Generator(MT19937(1234))
+ val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
+ dtype=dt)
+
+ random = Generator(MT19937(1234))
+ val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
+ dtype=dt)
+
+ assert_array_equal(val, val_bc)
+
+ random = Generator(MT19937(1234))
+ val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
+ endpoint=endpoint, dtype=dt)
+
+ assert_array_equal(val, val_bc)
+
+ @pytest.mark.parametrize(
+ 'bound, expected',
+ [(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612,
+ 3769704066, 1170797179, 4108474671])),
+ (2**32, np.array([517043487, 1364798666, 1733884390, 1353720613,
+ 3769704067, 1170797180, 4108474672])),
+ (2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673,
+ 1831631863, 1215661561, 3869512430]))]
+ )
+ def test_repeatability_32bit_boundary(self, bound, expected):
+ for size in [None, len(expected)]:
+ random = Generator(MT19937(1234))
+ x = random.integers(bound, size=size)
+ assert_equal(x, expected if size is not None else expected[0])
+
+ def test_repeatability_32bit_boundary_broadcasting(self):
+ desired = np.array([[[1622936284, 3620788691, 1659384060],
+ [1417365545, 760222891, 1909653332],
+ [3788118662, 660249498, 4092002593]],
+ [[3625610153, 2979601262, 3844162757],
+ [ 685800658, 120261497, 2694012896],
+ [1207779440, 1586594375, 3854335050]],
+ [[3004074748, 2310761796, 3012642217],
+ [2067714190, 2786677879, 1363865881],
+ [ 791663441, 1867303284, 2169727960]],
+ [[1939603804, 1250951100, 298950036],
+ [1040128489, 3791912209, 3317053765],
+ [3155528714, 61360675, 2305155588]],
+ [[ 817688762, 1335621943, 3288952434],
+ [1770890872, 1102951817, 1957607470],
+ [3099996017, 798043451, 48334215]]])
+ for size in [None, (5, 3, 3)]:
+ random = Generator(MT19937(12345))
+ x = random.integers([[-1], [0], [1]],
+ [2**32 - 1, 2**32, 2**32 + 1],
+ size=size)
+ assert_array_equal(x, desired if size is not None else desired[0])
+
+ def test_int64_uint64_broadcast_exceptions(self, endpoint):
+ configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
+ np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
+ (-2**63-1, -2**63-1))}
+ for dtype in configs:
+ for config in configs[dtype]:
+ low, high = config
+ high = high - endpoint
+ low_a = np.array([[low]*10])
+ high_a = np.array([high] * 10)
+ assert_raises(ValueError, random.integers, low, high,
+ endpoint=endpoint, dtype=dtype)
+ assert_raises(ValueError, random.integers, low_a, high,
+ endpoint=endpoint, dtype=dtype)
+ assert_raises(ValueError, random.integers, low, high_a,
+ endpoint=endpoint, dtype=dtype)
+ assert_raises(ValueError, random.integers, low_a, high_a,
+ endpoint=endpoint, dtype=dtype)
+
+ low_o = np.array([[low]*10], dtype=object)
+ high_o = np.array([high] * 10, dtype=object)
+ assert_raises(ValueError, random.integers, low_o, high,
+ endpoint=endpoint, dtype=dtype)
+ assert_raises(ValueError, random.integers, low, high_o,
+ endpoint=endpoint, dtype=dtype)
+ assert_raises(ValueError, random.integers, low_o, high_o,
+ endpoint=endpoint, dtype=dtype)
+
+ def test_int64_uint64_corner_case(self, endpoint):
+ # When stored in Numpy arrays, `lbnd` is casted
+ # as np.int64, and `ubnd` is casted as np.uint64.
+ # Checking whether `lbnd` >= `ubnd` used to be
+ # done solely via direct comparison, which is incorrect
+ # because when Numpy tries to compare both numbers,
+ # it casts both to np.float64 because there is
+ # no integer superset of np.int64 and np.uint64. However,
+ # `ubnd` is too large to be represented in np.float64,
+ # causing it be round down to np.iinfo(np.int64).max,
+ # leading to a ValueError because `lbnd` now equals
+ # the new `ubnd`.
+
+ dt = np.int64
+ tgt = np.iinfo(np.int64).max
+ lbnd = np.int64(np.iinfo(np.int64).max)
+ ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
+
+ # None of these function calls should
+ # generate a ValueError now.
+ actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
+ assert_equal(actual, tgt)
+
+ def test_respect_dtype_singleton(self, endpoint):
+ # See gh-7203
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+ dt = np.bool_ if dt is bool else dt
+
+ sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
+ assert_equal(sample.dtype, dt)
+
+ for dt in (bool, int, np.compat.long):
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+
+ # gh-7284: Ensure that we get Python data types
+ sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
+ assert not hasattr(sample, 'dtype')
+ assert_equal(type(sample), dt)
+
+ def test_respect_dtype_array(self, endpoint):
+ # See gh-7203
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+ dt = np.bool_ if dt is bool else dt
+
+ sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
+ assert_equal(sample.dtype, dt)
+ sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
+ dtype=dt)
+ assert_equal(sample.dtype, dt)
+
+ def test_zero_size(self, endpoint):
+ # See gh-7203
+ for dt in self.itype:
+ sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
+ assert sample.shape == (3, 0, 4)
+ assert sample.dtype == dt
+ assert self.rfunc(0, -10, 0, endpoint=endpoint,
+ dtype=dt).shape == (0,)
+ assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
+ (3, 0, 4))
+ assert_equal(random.integers(0, -10, size=0).shape, (0,))
+ assert_equal(random.integers(10, 10, size=0).shape, (0,))
+
+ def test_error_byteorder(self):
+ other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
+ with pytest.raises(ValueError):
+ random.integers(0, 200, size=10, dtype=other_byteord_dt)
+
+ # chi2max is the maximum acceptable chi-squared value.
+ @pytest.mark.slow
+ @pytest.mark.parametrize('sample_size,high,dtype,chi2max',
+ [(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25
+ (5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30
+ (10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25
+ (50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25
+ ])
+ def test_integers_small_dtype_chisquared(self, sample_size, high,
+ dtype, chi2max):
+ # Regression test for gh-14774.
+ samples = random.integers(high, size=sample_size, dtype=dtype)
+
+ values, counts = np.unique(samples, return_counts=True)
+ expected = sample_size / high
+ chi2 = ((counts - expected)**2 / expected).sum()
+ assert chi2 < chi2max
+
+
+class TestRandomDist:
+ # Make sure the random distribution returns the correct value for a
+ # given seed
+
+ def setup_method(self):
+ self.seed = 1234567890
+
+ def test_integers(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.integers(-99, 99, size=(3, 2))
+ desired = np.array([[-80, -56], [41, 37], [-83, -16]])
+ assert_array_equal(actual, desired)
+
+ def test_integers_masked(self):
+ # Test masked rejection sampling algorithm to generate array of
+ # uint32 in an interval.
+ random = Generator(MT19937(self.seed))
+ actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
+ desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
+ assert_array_equal(actual, desired)
+
+ def test_integers_closed(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
+ desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
+ assert_array_equal(actual, desired)
+
+ def test_integers_max_int(self):
+ # Tests whether integers with closed=True can generate the
+ # maximum allowed Python int that can be converted
+ # into a C long. Previous implementations of this
+ # method have thrown an OverflowError when attempting
+ # to generate this integer.
+ actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
+ endpoint=True)
+
+ desired = np.iinfo('l').max
+ assert_equal(actual, desired)
+
+ def test_random(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.random((3, 2))
+ desired = np.array([[0.096999199829214, 0.707517457682192],
+ [0.084364834598269, 0.767731206553125],
+ [0.665069021359413, 0.715487190596693]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.random()
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
+
+ def test_random_float(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.random((3, 2))
+ desired = np.array([[0.0969992 , 0.70751746],
+ [0.08436483, 0.76773121],
+ [0.66506902, 0.71548719]])
+ assert_array_almost_equal(actual, desired, decimal=7)
+
+ def test_random_float_scalar(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.random(dtype=np.float32)
+ desired = 0.0969992
+ assert_array_almost_equal(actual, desired, decimal=7)
+
+ @pytest.mark.parametrize('dtype, uint_view_type',
+ [(np.float32, np.uint32),
+ (np.float64, np.uint64)])
+ def test_random_distribution_of_lsb(self, dtype, uint_view_type):
+ random = Generator(MT19937(self.seed))
+ sample = random.random(100000, dtype=dtype)
+ num_ones_in_lsb = np.count_nonzero(sample.view(uint_view_type) & 1)
+ # The probability of a 1 in the least significant bit is 0.25.
+ # With a sample size of 100000, the probability that num_ones_in_lsb
+ # is outside the following range is less than 5e-11.
+ assert 24100 < num_ones_in_lsb < 25900
+
+ def test_random_unsupported_type(self):
+ assert_raises(TypeError, random.random, dtype='int32')
+
+ def test_choice_uniform_replace(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice(4, 4)
+ desired = np.array([0, 0, 2, 2], dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+ def test_choice_nonuniform_replace(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
+ desired = np.array([0, 1, 0, 1], dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+ def test_choice_uniform_noreplace(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice(4, 3, replace=False)
+ desired = np.array([2, 0, 3], dtype=np.int64)
+ assert_array_equal(actual, desired)
+ actual = random.choice(4, 4, replace=False, shuffle=False)
+ desired = np.arange(4, dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+ def test_choice_nonuniform_noreplace(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
+ desired = np.array([0, 2, 3], dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+ def test_choice_noninteger(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice(['a', 'b', 'c', 'd'], 4)
+ desired = np.array(['a', 'a', 'c', 'c'])
+ assert_array_equal(actual, desired)
+
+ def test_choice_multidimensional_default_axis(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
+ desired = np.array([[0, 1], [0, 1], [4, 5]])
+ assert_array_equal(actual, desired)
+
+ def test_choice_multidimensional_custom_axis(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
+ desired = np.array([[0], [2], [4], [6]])
+ assert_array_equal(actual, desired)
+
+ def test_choice_exceptions(self):
+ sample = random.choice
+ assert_raises(ValueError, sample, -1, 3)
+ assert_raises(ValueError, sample, 3., 3)
+ assert_raises(ValueError, sample, [], 3)
+ assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
+ p=[[0.25, 0.25], [0.25, 0.25]])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
+ assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
+ # gh-13087
+ assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], 2,
+ replace=False, p=[1, 0, 0])
+
+ def test_choice_return_shape(self):
+ p = [0.1, 0.9]
+ # Check scalar
+ assert_(np.isscalar(random.choice(2, replace=True)))
+ assert_(np.isscalar(random.choice(2, replace=False)))
+ assert_(np.isscalar(random.choice(2, replace=True, p=p)))
+ assert_(np.isscalar(random.choice(2, replace=False, p=p)))
+ assert_(np.isscalar(random.choice([1, 2], replace=True)))
+ assert_(random.choice([None], replace=True) is None)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(random.choice(arr, replace=True) is a)
+
+ # Check 0-d array
+ s = tuple()
+ assert_(not np.isscalar(random.choice(2, s, replace=True)))
+ assert_(not np.isscalar(random.choice(2, s, replace=False)))
+ assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
+ assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
+ assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
+ assert_(random.choice([None], s, replace=True).ndim == 0)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(random.choice(arr, s, replace=True).item() is a)
+
+ # Check multi dimensional array
+ s = (2, 3)
+ p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
+ assert_equal(random.choice(6, s, replace=True).shape, s)
+ assert_equal(random.choice(6, s, replace=False).shape, s)
+ assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
+ assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
+ assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
+
+ # Check zero-size
+ assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
+ assert_equal(random.integers(0, -10, size=0).shape, (0,))
+ assert_equal(random.integers(10, 10, size=0).shape, (0,))
+ assert_equal(random.choice(0, size=0).shape, (0,))
+ assert_equal(random.choice([], size=(0,)).shape, (0,))
+ assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
+ (3, 0, 4))
+ assert_raises(ValueError, random.choice, [], 10)
+
+ def test_choice_nan_probabilities(self):
+ a = np.array([42, 1, 2])
+ p = [None, None, None]
+ assert_raises(ValueError, random.choice, a, p=p)
+
+ def test_choice_p_non_contiguous(self):
+ p = np.ones(10) / 5
+ p[1::2] = 3.0
+ random = Generator(MT19937(self.seed))
+ non_contig = random.choice(5, 3, p=p[::2])
+ random = Generator(MT19937(self.seed))
+ contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
+ assert_array_equal(non_contig, contig)
+
+ def test_choice_return_type(self):
+ # gh 9867
+ p = np.ones(4) / 4.
+ actual = random.choice(4, 2)
+ assert actual.dtype == np.int64
+ actual = random.choice(4, 2, replace=False)
+ assert actual.dtype == np.int64
+ actual = random.choice(4, 2, p=p)
+ assert actual.dtype == np.int64
+ actual = random.choice(4, 2, p=p, replace=False)
+ assert actual.dtype == np.int64
+
+ def test_choice_large_sample(self):
+ choice_hash = '4266599d12bfcfb815213303432341c06b4349f5455890446578877bb322e222'
+ random = Generator(MT19937(self.seed))
+ actual = random.choice(10000, 5000, replace=False)
+ if sys.byteorder != 'little':
+ actual = actual.byteswap()
+ res = hashlib.sha256(actual.view(np.int8)).hexdigest()
+ assert_(choice_hash == res)
+
+ def test_bytes(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.bytes(10)
+ desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
+ assert_equal(actual, desired)
+
+ def test_shuffle(self):
+ # Test lists, arrays (of various dtypes), and multidimensional versions
+ # of both, c-contiguous or not:
+ for conv in [lambda x: np.array([]),
+ lambda x: x,
+ lambda x: np.asarray(x).astype(np.int8),
+ lambda x: np.asarray(x).astype(np.float32),
+ lambda x: np.asarray(x).astype(np.complex64),
+ lambda x: np.asarray(x).astype(object),
+ lambda x: [(i, i) for i in x],
+ lambda x: np.asarray([[i, i] for i in x]),
+ lambda x: np.vstack([x, x]).T,
+ # gh-11442
+ lambda x: (np.asarray([(i, i) for i in x],
+ [("a", int), ("b", int)])
+ .view(np.recarray)),
+ # gh-4270
+ lambda x: np.asarray([(i, i) for i in x],
+ [("a", object, (1,)),
+ ("b", np.int32, (1,))])]:
+ random = Generator(MT19937(self.seed))
+ alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
+ random.shuffle(alist)
+ actual = alist
+ desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
+ assert_array_equal(actual, desired)
+
+ def test_shuffle_custom_axis(self):
+ random = Generator(MT19937(self.seed))
+ actual = np.arange(16).reshape((4, 4))
+ random.shuffle(actual, axis=1)
+ desired = np.array([[ 0, 3, 1, 2],
+ [ 4, 7, 5, 6],
+ [ 8, 11, 9, 10],
+ [12, 15, 13, 14]])
+ assert_array_equal(actual, desired)
+ random = Generator(MT19937(self.seed))
+ actual = np.arange(16).reshape((4, 4))
+ random.shuffle(actual, axis=-1)
+ assert_array_equal(actual, desired)
+
+ def test_shuffle_custom_axis_empty(self):
+ random = Generator(MT19937(self.seed))
+ desired = np.array([]).reshape((0, 6))
+ for axis in (0, 1):
+ actual = np.array([]).reshape((0, 6))
+ random.shuffle(actual, axis=axis)
+ assert_array_equal(actual, desired)
+
+ def test_shuffle_axis_nonsquare(self):
+ y1 = np.arange(20).reshape(2, 10)
+ y2 = y1.copy()
+ random = Generator(MT19937(self.seed))
+ random.shuffle(y1, axis=1)
+ random = Generator(MT19937(self.seed))
+ random.shuffle(y2.T)
+ assert_array_equal(y1, y2)
+
+ def test_shuffle_masked(self):
+ # gh-3263
+ a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
+ b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
+ a_orig = a.copy()
+ b_orig = b.copy()
+ for i in range(50):
+ random.shuffle(a)
+ assert_equal(
+ sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
+ random.shuffle(b)
+ assert_equal(
+ sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
+
+ def test_shuffle_exceptions(self):
+ random = Generator(MT19937(self.seed))
+ arr = np.arange(10)
+ assert_raises(np.AxisError, random.shuffle, arr, 1)
+ arr = np.arange(9).reshape((3, 3))
+ assert_raises(np.AxisError, random.shuffle, arr, 3)
+ assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
+ arr = [[1, 2, 3], [4, 5, 6]]
+ assert_raises(NotImplementedError, random.shuffle, arr, 1)
+
+ arr = np.array(3)
+ assert_raises(TypeError, random.shuffle, arr)
+ arr = np.ones((3, 2))
+ assert_raises(np.AxisError, random.shuffle, arr, 2)
+
+ def test_shuffle_not_writeable(self):
+ random = Generator(MT19937(self.seed))
+ a = np.zeros(5)
+ a.flags.writeable = False
+ with pytest.raises(ValueError, match='read-only'):
+ random.shuffle(a)
+
+ def test_permutation(self):
+ random = Generator(MT19937(self.seed))
+ alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
+ actual = random.permutation(alist)
+ desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
+ assert_array_equal(actual, desired)
+
+ random = Generator(MT19937(self.seed))
+ arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
+ actual = random.permutation(arr_2d)
+ assert_array_equal(actual, np.atleast_2d(desired).T)
+
+ bad_x_str = "abcd"
+ assert_raises(np.AxisError, random.permutation, bad_x_str)
+
+ bad_x_float = 1.2
+ assert_raises(np.AxisError, random.permutation, bad_x_float)
+
+ random = Generator(MT19937(self.seed))
+ integer_val = 10
+ desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
+
+ actual = random.permutation(integer_val)
+ assert_array_equal(actual, desired)
+
+ def test_permutation_custom_axis(self):
+ a = np.arange(16).reshape((4, 4))
+ desired = np.array([[ 0, 3, 1, 2],
+ [ 4, 7, 5, 6],
+ [ 8, 11, 9, 10],
+ [12, 15, 13, 14]])
+ random = Generator(MT19937(self.seed))
+ actual = random.permutation(a, axis=1)
+ assert_array_equal(actual, desired)
+ random = Generator(MT19937(self.seed))
+ actual = random.permutation(a, axis=-1)
+ assert_array_equal(actual, desired)
+
+ def test_permutation_exceptions(self):
+ random = Generator(MT19937(self.seed))
+ arr = np.arange(10)
+ assert_raises(np.AxisError, random.permutation, arr, 1)
+ arr = np.arange(9).reshape((3, 3))
+ assert_raises(np.AxisError, random.permutation, arr, 3)
+ assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
+
+ @pytest.mark.parametrize("dtype", [int, object])
+ @pytest.mark.parametrize("axis, expected",
+ [(None, np.array([[3, 7, 0, 9, 10, 11],
+ [8, 4, 2, 5, 1, 6]])),
+ (0, np.array([[6, 1, 2, 9, 10, 11],
+ [0, 7, 8, 3, 4, 5]])),
+ (1, np.array([[ 5, 3, 4, 0, 2, 1],
+ [11, 9, 10, 6, 8, 7]]))])
+ def test_permuted(self, dtype, axis, expected):
+ random = Generator(MT19937(self.seed))
+ x = np.arange(12).reshape(2, 6).astype(dtype)
+ random.permuted(x, axis=axis, out=x)
+ assert_array_equal(x, expected)
+
+ random = Generator(MT19937(self.seed))
+ x = np.arange(12).reshape(2, 6).astype(dtype)
+ y = random.permuted(x, axis=axis)
+ assert y.dtype == dtype
+ assert_array_equal(y, expected)
+
+ def test_permuted_with_strides(self):
+ random = Generator(MT19937(self.seed))
+ x0 = np.arange(22).reshape(2, 11)
+ x1 = x0.copy()
+ x = x0[:, ::3]
+ y = random.permuted(x, axis=1, out=x)
+ expected = np.array([[0, 9, 3, 6],
+ [14, 20, 11, 17]])
+ assert_array_equal(y, expected)
+ x1[:, ::3] = expected
+ # Verify that the original x0 was modified in-place as expected.
+ assert_array_equal(x1, x0)
+
+ def test_permuted_empty(self):
+ y = random.permuted([])
+ assert_array_equal(y, [])
+
+ @pytest.mark.parametrize('outshape', [(2, 3), 5])
+ def test_permuted_out_with_wrong_shape(self, outshape):
+ a = np.array([1, 2, 3])
+ out = np.zeros(outshape, dtype=a.dtype)
+ with pytest.raises(ValueError, match='same shape'):
+ random.permuted(a, out=out)
+
+ def test_permuted_out_with_wrong_type(self):
+ out = np.zeros((3, 5), dtype=np.int32)
+ x = np.ones((3, 5))
+ with pytest.raises(TypeError, match='Cannot cast'):
+ random.permuted(x, axis=1, out=out)
+
+ def test_permuted_not_writeable(self):
+ x = np.zeros((2, 5))
+ x.flags.writeable = False
+ with pytest.raises(ValueError, match='read-only'):
+ random.permuted(x, axis=1, out=x)
+
+ def test_beta(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.beta(.1, .9, size=(3, 2))
+ desired = np.array(
+ [[1.083029353267698e-10, 2.449965303168024e-11],
+ [2.397085162969853e-02, 3.590779671820755e-08],
+ [2.830254190078299e-04, 1.744709918330393e-01]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_binomial(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.binomial(100.123, .456, size=(3, 2))
+ desired = np.array([[42, 41],
+ [42, 48],
+ [44, 50]])
+ assert_array_equal(actual, desired)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.binomial(100.123, .456)
+ desired = 42
+ assert_array_equal(actual, desired)
+
+ def test_chisquare(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.chisquare(50, size=(3, 2))
+ desired = np.array([[32.9850547060149, 39.0219480493301],
+ [56.2006134779419, 57.3474165711485],
+ [55.4243733880198, 55.4209797925213]])
+ assert_array_almost_equal(actual, desired, decimal=13)
+
+ def test_dirichlet(self):
+ random = Generator(MT19937(self.seed))
+ alpha = np.array([51.72840233779265162, 39.74494232180943953])
+ actual = random.dirichlet(alpha, size=(3, 2))
+ desired = np.array([[[0.5439892869558927, 0.45601071304410745],
+ [0.5588917345860708, 0.4411082654139292 ]],
+ [[0.5632074165063435, 0.43679258349365657],
+ [0.54862581112627, 0.45137418887373015]],
+ [[0.49961831357047226, 0.5003816864295278 ],
+ [0.52374806183482, 0.47625193816517997]]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+ bad_alpha = np.array([5.4e-01, -1.0e-16])
+ assert_raises(ValueError, random.dirichlet, bad_alpha)
+
+ random = Generator(MT19937(self.seed))
+ alpha = np.array([51.72840233779265162, 39.74494232180943953])
+ actual = random.dirichlet(alpha)
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
+
+ def test_dirichlet_size(self):
+ # gh-3173
+ p = np.array([51.72840233779265162, 39.74494232180943953])
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
+
+ assert_raises(TypeError, random.dirichlet, p, float(1))
+
+ def test_dirichlet_bad_alpha(self):
+ # gh-2089
+ alpha = np.array([5.4e-01, -1.0e-16])
+ assert_raises(ValueError, random.dirichlet, alpha)
+
+ # gh-15876
+ assert_raises(ValueError, random.dirichlet, [[5, 1]])
+ assert_raises(ValueError, random.dirichlet, [[5], [1]])
+ assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
+ assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
+
+ def test_dirichlet_alpha_non_contiguous(self):
+ a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
+ alpha = a[::2]
+ random = Generator(MT19937(self.seed))
+ non_contig = random.dirichlet(alpha, size=(3, 2))
+ random = Generator(MT19937(self.seed))
+ contig = random.dirichlet(np.ascontiguousarray(alpha),
+ size=(3, 2))
+ assert_array_almost_equal(non_contig, contig)
+
+ def test_dirichlet_small_alpha(self):
+ eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc.
+ alpha = eps * np.array([1., 1.0e-3])
+ random = Generator(MT19937(self.seed))
+ actual = random.dirichlet(alpha, size=(3, 2))
+ expected = np.array([
+ [[1., 0.],
+ [1., 0.]],
+ [[1., 0.],
+ [1., 0.]],
+ [[1., 0.],
+ [1., 0.]]
+ ])
+ assert_array_almost_equal(actual, expected, decimal=15)
+
+ @pytest.mark.slow
+ def test_dirichlet_moderately_small_alpha(self):
+ # Use alpha.max() < 0.1 to trigger stick breaking code path
+ alpha = np.array([0.02, 0.04, 0.03])
+ exact_mean = alpha / alpha.sum()
+ random = Generator(MT19937(self.seed))
+ sample = random.dirichlet(alpha, size=20000000)
+ sample_mean = sample.mean(axis=0)
+ assert_allclose(sample_mean, exact_mean, rtol=1e-3)
+
+ def test_exponential(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.exponential(1.1234, size=(3, 2))
+ desired = np.array([[0.098845481066258, 1.560752510746964],
+ [0.075730916041636, 1.769098974710777],
+ [1.488602544592235, 2.49684815275751 ]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_exponential_0(self):
+ assert_equal(random.exponential(scale=0), 0)
+ assert_raises(ValueError, random.exponential, scale=-0.)
+
+ def test_f(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.f(12, 77, size=(3, 2))
+ desired = np.array([[0.461720027077085, 1.100441958872451],
+ [1.100337455217484, 0.91421736740018 ],
+ [0.500811891303113, 0.826802454552058]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_gamma(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.gamma(5, 3, size=(3, 2))
+ desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
+ [18.73983605132985, 19.57961681699238],
+ [18.17897755150825, 18.17653912505234]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_gamma_0(self):
+ assert_equal(random.gamma(shape=0, scale=0), 0)
+ assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
+
+ def test_geometric(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.geometric(.123456789, size=(3, 2))
+ desired = np.array([[1, 11],
+ [1, 12],
+ [11, 17]])
+ assert_array_equal(actual, desired)
+
+ def test_geometric_exceptions(self):
+ assert_raises(ValueError, random.geometric, 1.1)
+ assert_raises(ValueError, random.geometric, [1.1] * 10)
+ assert_raises(ValueError, random.geometric, -0.1)
+ assert_raises(ValueError, random.geometric, [-0.1] * 10)
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, random.geometric, np.nan)
+ assert_raises(ValueError, random.geometric, [np.nan] * 10)
+
+ def test_gumbel(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[ 4.688397515056245, -0.289514845417841],
+ [ 4.981176042584683, -0.633224272589149],
+ [-0.055915275687488, -0.333962478257953]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_gumbel_0(self):
+ assert_equal(random.gumbel(scale=0), 0)
+ assert_raises(ValueError, random.gumbel, scale=-0.)
+
+ def test_hypergeometric(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
+ desired = np.array([[ 9, 9],
+ [ 9, 9],
+ [10, 9]])
+ assert_array_equal(actual, desired)
+
+ # Test nbad = 0
+ actual = random.hypergeometric(5, 0, 3, size=4)
+ desired = np.array([3, 3, 3, 3])
+ assert_array_equal(actual, desired)
+
+ actual = random.hypergeometric(15, 0, 12, size=4)
+ desired = np.array([12, 12, 12, 12])
+ assert_array_equal(actual, desired)
+
+ # Test ngood = 0
+ actual = random.hypergeometric(0, 5, 3, size=4)
+ desired = np.array([0, 0, 0, 0])
+ assert_array_equal(actual, desired)
+
+ actual = random.hypergeometric(0, 15, 12, size=4)
+ desired = np.array([0, 0, 0, 0])
+ assert_array_equal(actual, desired)
+
+ def test_laplace(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[-3.156353949272393, 1.195863024830054],
+ [-3.435458081645966, 1.656882398925444],
+ [ 0.924824032467446, 1.251116432209336]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_laplace_0(self):
+ assert_equal(random.laplace(scale=0), 0)
+ assert_raises(ValueError, random.laplace, scale=-0.)
+
+ def test_logistic(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[-4.338584631510999, 1.890171436749954],
+ [-4.64547787337966 , 2.514545562919217],
+ [ 1.495389489198666, 1.967827627577474]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_lognormal(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
+ desired = np.array([[ 0.0268252166335, 13.9534486483053],
+ [ 0.1204014788936, 2.2422077497792],
+ [ 4.2484199496128, 12.0093343977523]])
+ assert_array_almost_equal(actual, desired, decimal=13)
+
+ def test_lognormal_0(self):
+ assert_equal(random.lognormal(sigma=0), 1)
+ assert_raises(ValueError, random.lognormal, sigma=-0.)
+
+ def test_logseries(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.logseries(p=.923456789, size=(3, 2))
+ desired = np.array([[14, 17],
+ [3, 18],
+ [5, 1]])
+ assert_array_equal(actual, desired)
+
+ def test_logseries_zero(self):
+ random = Generator(MT19937(self.seed))
+ assert random.logseries(0) == 1
+
+ @pytest.mark.parametrize("value", [np.nextafter(0., -1), 1., np.nan, 5.])
+ def test_logseries_exceptions(self, value):
+ random = Generator(MT19937(self.seed))
+ with np.errstate(invalid="ignore"):
+ with pytest.raises(ValueError):
+ random.logseries(value)
+ with pytest.raises(ValueError):
+ # contiguous path:
+ random.logseries(np.array([value] * 10))
+ with pytest.raises(ValueError):
+ # non-contiguous path:
+ random.logseries(np.array([value] * 10)[::2])
+
+ def test_multinomial(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
+ desired = np.array([[[1, 5, 1, 6, 4, 3],
+ [4, 2, 6, 2, 4, 2]],
+ [[5, 3, 2, 6, 3, 1],
+ [4, 4, 0, 2, 3, 7]],
+ [[6, 3, 1, 5, 3, 2],
+ [5, 5, 3, 1, 2, 4]]])
+ assert_array_equal(actual, desired)
+
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
+ @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
+ def test_multivariate_normal(self, method):
+ random = Generator(MT19937(self.seed))
+ mean = (.123456789, 10)
+ cov = [[1, 0], [0, 1]]
+ size = (3, 2)
+ actual = random.multivariate_normal(mean, cov, size, method=method)
+ desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
+ [-0.9967333370066214, 10.342002097029821 ]],
+ [[ 0.7850019631242964, 11.181113712443013 ],
+ [ 0.8901349653255224, 8.873825399642492 ]],
+ [[ 0.7130260107430003, 9.551628690083056 ],
+ [ 0.7127098726541128, 11.991709234143173 ]]])
+
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ # Check for default size, was raising deprecation warning
+ actual = random.multivariate_normal(mean, cov, method=method)
+ desired = np.array([0.233278563284287, 9.424140804347195])
+ assert_array_almost_equal(actual, desired, decimal=15)
+ # Check that non symmetric covariance input raises exception when
+ # check_valid='raises' if using default svd method.
+ mean = [0, 0]
+ cov = [[1, 2], [1, 2]]
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='raise')
+
+ # Check that non positive-semidefinite covariance warns with
+ # RuntimeWarning
+ cov = [[1, 2], [2, 1]]
+ assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
+ assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
+ method='eigh')
+ assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
+ method='cholesky')
+
+ # and that it doesn't warn with RuntimeWarning check_valid='ignore'
+ assert_no_warnings(random.multivariate_normal, mean, cov,
+ check_valid='ignore')
+
+ # and that it raises with RuntimeWarning check_valid='raises'
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='raise')
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='raise', method='eigh')
+
+ # check degenerate samples from singular covariance matrix
+ cov = [[1, 1], [1, 1]]
+ if method in ('svd', 'eigh'):
+ samples = random.multivariate_normal(mean, cov, size=(3, 2),
+ method=method)
+ assert_array_almost_equal(samples[..., 0], samples[..., 1],
+ decimal=6)
+ else:
+ assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
+ method='cholesky')
+
+ cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
+ with suppress_warnings() as sup:
+ random.multivariate_normal(mean, cov, method=method)
+ w = sup.record(RuntimeWarning)
+ assert len(w) == 0
+
+ mu = np.zeros(2)
+ cov = np.eye(2)
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='other')
+ assert_raises(ValueError, random.multivariate_normal,
+ np.zeros((2, 1, 1)), cov)
+ assert_raises(ValueError, random.multivariate_normal,
+ mu, np.empty((3, 2)))
+ assert_raises(ValueError, random.multivariate_normal,
+ mu, np.eye(3))
+
+ @pytest.mark.parametrize('mean, cov', [([0], [[1+1j]]), ([0j], [[1]])])
+ def test_multivariate_normal_disallow_complex(self, mean, cov):
+ random = Generator(MT19937(self.seed))
+ with pytest.raises(TypeError, match="must not be complex"):
+ random.multivariate_normal(mean, cov)
+
+ @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
+ def test_multivariate_normal_basic_stats(self, method):
+ random = Generator(MT19937(self.seed))
+ n_s = 1000
+ mean = np.array([1, 2])
+ cov = np.array([[2, 1], [1, 2]])
+ s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)
+ s_center = s - mean
+ cov_emp = (s_center.T @ s_center) / (n_s - 1)
+ # these are pretty loose and are only designed to detect major errors
+ assert np.all(np.abs(s_center.mean(-2)) < 0.1)
+ assert np.all(np.abs(cov_emp - cov) < 0.2)
+
+ def test_negative_binomial(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
+ desired = np.array([[543, 727],
+ [775, 760],
+ [600, 674]])
+ assert_array_equal(actual, desired)
+
+ def test_negative_binomial_exceptions(self):
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, random.negative_binomial, 100, np.nan)
+ assert_raises(ValueError, random.negative_binomial, 100,
+ [np.nan] * 10)
+
+ def test_negative_binomial_p0_exception(self):
+ # Verify that p=0 raises an exception.
+ with assert_raises(ValueError):
+ x = random.negative_binomial(1, 0)
+
+ def test_negative_binomial_invalid_p_n_combination(self):
+ # Verify that values of p and n that would result in an overflow
+ # or infinite loop raise an exception.
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, random.negative_binomial, 2**62, 0.1)
+ assert_raises(ValueError, random.negative_binomial, [2**62], [0.1])
+
+ def test_noncentral_chisquare(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
+ desired = np.array([[ 1.70561552362133, 15.97378184942111],
+ [13.71483425173724, 20.17859633310629],
+ [11.3615477156643 , 3.67891108738029]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
+ desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
+ [1.14554372041263e+00, 1.38187755933435e-03],
+ [1.90659181905387e+00, 1.21772577941822e+00]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
+ desired = np.array([[0.82947954590419, 1.80139670767078],
+ [6.58720057417794, 7.00491463609814],
+ [6.31101879073157, 6.30982307753005]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_noncentral_f(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
+ size=(3, 2))
+ desired = np.array([[0.060310671139 , 0.23866058175939],
+ [0.86860246709073, 0.2668510459738 ],
+ [0.23375780078364, 1.88922102885943]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_noncentral_f_nan(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
+ assert np.isnan(actual)
+
+ def test_normal(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[-3.618412914693162, 2.635726692647081],
+ [-2.116923463013243, 0.807460983059643],
+ [ 1.446547137248593, 2.485684213886024]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_normal_0(self):
+ assert_equal(random.normal(scale=0), 0)
+ assert_raises(ValueError, random.normal, scale=-0.)
+
+ def test_pareto(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.pareto(a=.123456789, size=(3, 2))
+ desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
+ [7.2640150889064703e-01, 3.4650454783825594e+05],
+ [4.5852344481994740e+04, 6.5851383009539105e+07]])
+ # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
+ # matrix differs by 24 nulps. Discussion:
+ # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
+ # Consensus is that this is probably some gcc quirk that affects
+ # rounding but not in any important way, so we just use a looser
+ # tolerance on this test:
+ np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
+
+ def test_poisson(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.poisson(lam=.123456789, size=(3, 2))
+ desired = np.array([[0, 0],
+ [0, 0],
+ [0, 0]])
+ assert_array_equal(actual, desired)
+
+ def test_poisson_exceptions(self):
+ lambig = np.iinfo('int64').max
+ lamneg = -1
+ assert_raises(ValueError, random.poisson, lamneg)
+ assert_raises(ValueError, random.poisson, [lamneg] * 10)
+ assert_raises(ValueError, random.poisson, lambig)
+ assert_raises(ValueError, random.poisson, [lambig] * 10)
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, random.poisson, np.nan)
+ assert_raises(ValueError, random.poisson, [np.nan] * 10)
+
+ def test_power(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.power(a=.123456789, size=(3, 2))
+ desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
+ [2.482442984543471e-10, 1.527108843266079e-01],
+ [8.188283434244285e-02, 3.950547209346948e-01]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_rayleigh(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.rayleigh(scale=10, size=(3, 2))
+ desired = np.array([[4.19494429102666, 16.66920198906598],
+ [3.67184544902662, 17.74695521962917],
+ [16.27935397855501, 21.08355560691792]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_rayleigh_0(self):
+ assert_equal(random.rayleigh(scale=0), 0)
+ assert_raises(ValueError, random.rayleigh, scale=-0.)
+
+ def test_standard_cauchy(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_cauchy(size=(3, 2))
+ desired = np.array([[-1.489437778266206, -3.275389641569784],
+ [ 0.560102864910406, -0.680780916282552],
+ [-1.314912905226277, 0.295852965660225]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_exponential(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_exponential(size=(3, 2), method='inv')
+ desired = np.array([[0.102031839440643, 1.229350298474972],
+ [0.088137284693098, 1.459859985522667],
+ [1.093830802293668, 1.256977002164613]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_expoential_type_error(self):
+ assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
+
+ def test_standard_gamma(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_gamma(shape=3, size=(3, 2))
+ desired = np.array([[0.62970724056362, 1.22379851271008],
+ [3.899412530884 , 4.12479964250139],
+ [3.74994102464584, 3.74929307690815]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_standard_gammma_scalar_float(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_gamma(3, dtype=np.float32)
+ desired = 2.9242148399353027
+ assert_array_almost_equal(actual, desired, decimal=6)
+
+ def test_standard_gamma_float(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_gamma(shape=3, size=(3, 2))
+ desired = np.array([[0.62971, 1.2238 ],
+ [3.89941, 4.1248 ],
+ [3.74994, 3.74929]])
+ assert_array_almost_equal(actual, desired, decimal=5)
+
+ def test_standard_gammma_float_out(self):
+ actual = np.zeros((3, 2), dtype=np.float32)
+ random = Generator(MT19937(self.seed))
+ random.standard_gamma(10.0, out=actual, dtype=np.float32)
+ desired = np.array([[10.14987, 7.87012],
+ [ 9.46284, 12.56832],
+ [13.82495, 7.81533]], dtype=np.float32)
+ assert_array_almost_equal(actual, desired, decimal=5)
+
+ random = Generator(MT19937(self.seed))
+ random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
+ assert_array_almost_equal(actual, desired, decimal=5)
+
+ def test_standard_gamma_unknown_type(self):
+ assert_raises(TypeError, random.standard_gamma, 1.,
+ dtype='int32')
+
+ def test_out_size_mismatch(self):
+ out = np.zeros(10)
+ assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
+ out=out)
+ assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
+ out=out)
+
+ def test_standard_gamma_0(self):
+ assert_equal(random.standard_gamma(shape=0), 0)
+ assert_raises(ValueError, random.standard_gamma, shape=-0.)
+
+ def test_standard_normal(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_normal(size=(3, 2))
+ desired = np.array([[-1.870934851846581, 1.25613495182354 ],
+ [-1.120190126006621, 0.342002097029821],
+ [ 0.661545174124296, 1.181113712443012]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_normal_unsupported_type(self):
+ assert_raises(TypeError, random.standard_normal, dtype=np.int32)
+
+ def test_standard_t(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_t(df=10, size=(3, 2))
+ desired = np.array([[-1.484666193042647, 0.30597891831161 ],
+ [ 1.056684299648085, -0.407312602088507],
+ [ 0.130704414281157, -2.038053410490321]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_triangular(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.triangular(left=5.12, mode=10.23, right=20.34,
+ size=(3, 2))
+ desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
+ [ 7.68152445215983, 14.36169131136546],
+ [13.16105603911429, 13.72341621856971]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_uniform(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
+ desired = np.array([[2.13306255040998 , 7.816987531021207],
+ [2.015436610109887, 8.377577533009589],
+ [7.421792588856135, 7.891185744455209]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_uniform_range_bounds(self):
+ fmin = np.finfo('float').min
+ fmax = np.finfo('float').max
+
+ func = random.uniform
+ assert_raises(OverflowError, func, -np.inf, 0)
+ assert_raises(OverflowError, func, 0, np.inf)
+ assert_raises(OverflowError, func, fmin, fmax)
+ assert_raises(OverflowError, func, [-np.inf], [0])
+ assert_raises(OverflowError, func, [0], [np.inf])
+
+ # (fmax / 1e17) - fmin is within range, so this should not throw
+ # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
+ # DBL_MAX by increasing fmin a bit
+ random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
+
+ def test_uniform_zero_range(self):
+ func = random.uniform
+ result = func(1.5, 1.5)
+ assert_allclose(result, 1.5)
+ result = func([0.0, np.pi], [0.0, np.pi])
+ assert_allclose(result, [0.0, np.pi])
+ result = func([[2145.12], [2145.12]], [2145.12, 2145.12])
+ assert_allclose(result, 2145.12 + np.zeros((2, 2)))
+
+ def test_uniform_neg_range(self):
+ func = random.uniform
+ assert_raises(ValueError, func, 2, 1)
+ assert_raises(ValueError, func, [1, 2], [1, 1])
+ assert_raises(ValueError, func, [[0, 1],[2, 3]], 2)
+
+ def test_scalar_exception_propagation(self):
+ # Tests that exceptions are correctly propagated in distributions
+ # when called with objects that throw exceptions when converted to
+ # scalars.
+ #
+ # Regression test for gh: 8865
+
+ class ThrowingFloat(np.ndarray):
+ def __float__(self):
+ raise TypeError
+
+ throwing_float = np.array(1.0).view(ThrowingFloat)
+ assert_raises(TypeError, random.uniform, throwing_float,
+ throwing_float)
+
+ class ThrowingInteger(np.ndarray):
+ def __int__(self):
+ raise TypeError
+
+ throwing_int = np.array(1).view(ThrowingInteger)
+ assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
+
+ def test_vonmises(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
+ desired = np.array([[ 1.107972248690106, 2.841536476232361],
+ [ 1.832602376042457, 1.945511926976032],
+ [-0.260147475776542, 2.058047492231698]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_vonmises_small(self):
+ # check infinite loop, gh-4720
+ random = Generator(MT19937(self.seed))
+ r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
+ assert_(np.isfinite(r).all())
+
+ def test_vonmises_nan(self):
+ random = Generator(MT19937(self.seed))
+ r = random.vonmises(mu=0., kappa=np.nan)
+ assert_(np.isnan(r))
+
+ @pytest.mark.parametrize("kappa", [1e4, 1e15])
+ def test_vonmises_large_kappa(self, kappa):
+ random = Generator(MT19937(self.seed))
+ rs = RandomState(random.bit_generator)
+ state = random.bit_generator.state
+
+ random_state_vals = rs.vonmises(0, kappa, size=10)
+ random.bit_generator.state = state
+ gen_vals = random.vonmises(0, kappa, size=10)
+ if kappa < 1e6:
+ assert_allclose(random_state_vals, gen_vals)
+ else:
+ assert np.all(random_state_vals != gen_vals)
+
+ @pytest.mark.parametrize("mu", [-7., -np.pi, -3.1, np.pi, 3.2])
+ @pytest.mark.parametrize("kappa", [1e-9, 1e-6, 1, 1e3, 1e15])
+ def test_vonmises_large_kappa_range(self, mu, kappa):
+ random = Generator(MT19937(self.seed))
+ r = random.vonmises(mu, kappa, 50)
+ assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
+
+ def test_wald(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
+ desired = np.array([[0.26871721804551, 3.2233942732115 ],
+ [2.20328374987066, 2.40958405189353],
+ [2.07093587449261, 0.73073890064369]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_weibull(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.weibull(a=1.23, size=(3, 2))
+ desired = np.array([[0.138613914769468, 1.306463419753191],
+ [0.111623365934763, 1.446570494646721],
+ [1.257145775276011, 1.914247725027957]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_weibull_0(self):
+ random = Generator(MT19937(self.seed))
+ assert_equal(random.weibull(a=0, size=12), np.zeros(12))
+ assert_raises(ValueError, random.weibull, a=-0.)
+
+ def test_zipf(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.zipf(a=1.23, size=(3, 2))
+ desired = np.array([[ 1, 1],
+ [ 10, 867],
+ [354, 2]])
+ assert_array_equal(actual, desired)
+
+
+class TestBroadcast:
+ # tests that functions that broadcast behave
+ # correctly when presented with non-scalar arguments
+ def setup_method(self):
+ self.seed = 123456789
+
+
+ def test_uniform(self):
+ random = Generator(MT19937(self.seed))
+ low = [0]
+ high = [1]
+ uniform = random.uniform
+ desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.uniform(low * 3, high)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.uniform(low, high * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_normal(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ random = Generator(MT19937(self.seed))
+ desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.normal(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.normal, loc * 3, bad_scale)
+
+ random = Generator(MT19937(self.seed))
+ normal = random.normal
+ actual = normal(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, normal, loc, bad_scale * 3)
+
+ def test_beta(self):
+ a = [1]
+ b = [2]
+ bad_a = [-1]
+ bad_b = [-2]
+ desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
+
+ random = Generator(MT19937(self.seed))
+ beta = random.beta
+ actual = beta(a * 3, b)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, beta, bad_a * 3, b)
+ assert_raises(ValueError, beta, a * 3, bad_b)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.beta(a, b * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_exponential(self):
+ scale = [1]
+ bad_scale = [-1]
+ desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.exponential(scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.exponential, bad_scale * 3)
+
+ def test_standard_gamma(self):
+ shape = [1]
+ bad_shape = [-1]
+ desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
+
+ random = Generator(MT19937(self.seed))
+ std_gamma = random.standard_gamma
+ actual = std_gamma(shape * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, std_gamma, bad_shape * 3)
+
+ def test_gamma(self):
+ shape = [1]
+ scale = [2]
+ bad_shape = [-1]
+ bad_scale = [-2]
+ desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
+
+ random = Generator(MT19937(self.seed))
+ gamma = random.gamma
+ actual = gamma(shape * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gamma, bad_shape * 3, scale)
+ assert_raises(ValueError, gamma, shape * 3, bad_scale)
+
+ random = Generator(MT19937(self.seed))
+ gamma = random.gamma
+ actual = gamma(shape, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gamma, bad_shape, scale * 3)
+ assert_raises(ValueError, gamma, shape, bad_scale * 3)
+
+ def test_f(self):
+ dfnum = [1]
+ dfden = [2]
+ bad_dfnum = [-1]
+ bad_dfden = [-2]
+ desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
+
+ random = Generator(MT19937(self.seed))
+ f = random.f
+ actual = f(dfnum * 3, dfden)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, f, bad_dfnum * 3, dfden)
+ assert_raises(ValueError, f, dfnum * 3, bad_dfden)
+
+ random = Generator(MT19937(self.seed))
+ f = random.f
+ actual = f(dfnum, dfden * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, f, bad_dfnum, dfden * 3)
+ assert_raises(ValueError, f, dfnum, bad_dfden * 3)
+
+ def test_noncentral_f(self):
+ dfnum = [2]
+ dfden = [3]
+ nonc = [4]
+ bad_dfnum = [0]
+ bad_dfden = [-1]
+ bad_nonc = [-2]
+ desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
+
+ random = Generator(MT19937(self.seed))
+ nonc_f = random.noncentral_f
+ actual = nonc_f(dfnum * 3, dfden, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
+
+ assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
+ assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
+ assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
+
+ random = Generator(MT19937(self.seed))
+ nonc_f = random.noncentral_f
+ actual = nonc_f(dfnum, dfden * 3, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
+ assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
+ assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
+
+ random = Generator(MT19937(self.seed))
+ nonc_f = random.noncentral_f
+ actual = nonc_f(dfnum, dfden, nonc * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
+ assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
+ assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
+
+ def test_noncentral_f_small_df(self):
+ random = Generator(MT19937(self.seed))
+ desired = np.array([0.04714867120827, 0.1239390327694])
+ actual = random.noncentral_f(0.9, 0.9, 2, size=2)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_chisquare(self):
+ df = [1]
+ bad_df = [-1]
+ desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.chisquare(df * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.chisquare, bad_df * 3)
+
+ def test_noncentral_chisquare(self):
+ df = [1]
+ nonc = [2]
+ bad_df = [-1]
+ bad_nonc = [-2]
+ desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
+
+ random = Generator(MT19937(self.seed))
+ nonc_chi = random.noncentral_chisquare
+ actual = nonc_chi(df * 3, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
+ assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
+
+ random = Generator(MT19937(self.seed))
+ nonc_chi = random.noncentral_chisquare
+ actual = nonc_chi(df, nonc * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
+ assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
+
+ def test_standard_t(self):
+ df = [1]
+ bad_df = [-1]
+ desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_t(df * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.standard_t, bad_df * 3)
+
+ def test_vonmises(self):
+ mu = [2]
+ kappa = [1]
+ bad_kappa = [-1]
+ desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.vonmises(mu * 3, kappa)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.vonmises(mu, kappa * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
+
+ def test_pareto(self):
+ a = [1]
+ bad_a = [-1]
+ desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.pareto(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.pareto, bad_a * 3)
+
+ def test_weibull(self):
+ a = [1]
+ bad_a = [-1]
+ desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.weibull(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.weibull, bad_a * 3)
+
+ def test_power(self):
+ a = [1]
+ bad_a = [-1]
+ desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.power(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.power, bad_a * 3)
+
+ def test_laplace(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
+
+ random = Generator(MT19937(self.seed))
+ laplace = random.laplace
+ actual = laplace(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, laplace, loc * 3, bad_scale)
+
+ random = Generator(MT19937(self.seed))
+ laplace = random.laplace
+ actual = laplace(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, laplace, loc, bad_scale * 3)
+
+ def test_gumbel(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
+
+ random = Generator(MT19937(self.seed))
+ gumbel = random.gumbel
+ actual = gumbel(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gumbel, loc * 3, bad_scale)
+
+ random = Generator(MT19937(self.seed))
+ gumbel = random.gumbel
+ actual = gumbel(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gumbel, loc, bad_scale * 3)
+
+ def test_logistic(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.logistic(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.logistic(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
+ assert_equal(random.logistic(1.0, 0.0), 1.0)
+
+ def test_lognormal(self):
+ mean = [0]
+ sigma = [1]
+ bad_sigma = [-1]
+ desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
+
+ random = Generator(MT19937(self.seed))
+ lognormal = random.lognormal
+ actual = lognormal(mean * 3, sigma)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.lognormal(mean, sigma * 3)
+ assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
+
+ def test_rayleigh(self):
+ scale = [1]
+ bad_scale = [-1]
+ desired = np.array(
+ [1.1597068009872629,
+ 0.6539188836253857,
+ 1.1981526554349398]
+ )
+
+ random = Generator(MT19937(self.seed))
+ actual = random.rayleigh(scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.rayleigh, bad_scale * 3)
+
+ def test_wald(self):
+ mean = [0.5]
+ scale = [1]
+ bad_mean = [0]
+ bad_scale = [-2]
+ desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.wald(mean * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.wald, bad_mean * 3, scale)
+ assert_raises(ValueError, random.wald, mean * 3, bad_scale)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.wald(mean, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.wald, bad_mean, scale * 3)
+ assert_raises(ValueError, random.wald, mean, bad_scale * 3)
+
+ def test_triangular(self):
+ left = [1]
+ right = [3]
+ mode = [2]
+ bad_left_one = [3]
+ bad_mode_one = [4]
+ bad_left_two, bad_mode_two = right * 2
+ desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
+
+ random = Generator(MT19937(self.seed))
+ triangular = random.triangular
+ actual = triangular(left * 3, mode, right)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
+ assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
+ assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
+ right)
+
+ random = Generator(MT19937(self.seed))
+ triangular = random.triangular
+ actual = triangular(left, mode * 3, right)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
+ assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
+ right)
+
+ random = Generator(MT19937(self.seed))
+ triangular = random.triangular
+ actual = triangular(left, mode, right * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
+ assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
+ right * 3)
+
+ assert_raises(ValueError, triangular, 10., 0., 20.)
+ assert_raises(ValueError, triangular, 10., 25., 20.)
+ assert_raises(ValueError, triangular, 10., 10., 10.)
+
+ def test_binomial(self):
+ n = [1]
+ p = [0.5]
+ bad_n = [-1]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ desired = np.array([0, 0, 1])
+
+ random = Generator(MT19937(self.seed))
+ binom = random.binomial
+ actual = binom(n * 3, p)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, binom, bad_n * 3, p)
+ assert_raises(ValueError, binom, n * 3, bad_p_one)
+ assert_raises(ValueError, binom, n * 3, bad_p_two)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.binomial(n, p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, binom, bad_n, p * 3)
+ assert_raises(ValueError, binom, n, bad_p_one * 3)
+ assert_raises(ValueError, binom, n, bad_p_two * 3)
+
+ def test_negative_binomial(self):
+ n = [1]
+ p = [0.5]
+ bad_n = [-1]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ desired = np.array([0, 2, 1], dtype=np.int64)
+
+ random = Generator(MT19937(self.seed))
+ neg_binom = random.negative_binomial
+ actual = neg_binom(n * 3, p)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, neg_binom, bad_n * 3, p)
+ assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
+ assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
+
+ random = Generator(MT19937(self.seed))
+ neg_binom = random.negative_binomial
+ actual = neg_binom(n, p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, neg_binom, bad_n, p * 3)
+ assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
+ assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
+
+ def test_poisson(self):
+
+ lam = [1]
+ bad_lam_one = [-1]
+ desired = np.array([0, 0, 3])
+
+ random = Generator(MT19937(self.seed))
+ max_lam = random._poisson_lam_max
+ bad_lam_two = [max_lam * 2]
+ poisson = random.poisson
+ actual = poisson(lam * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, poisson, bad_lam_one * 3)
+ assert_raises(ValueError, poisson, bad_lam_two * 3)
+
+ def test_zipf(self):
+ a = [2]
+ bad_a = [0]
+ desired = np.array([1, 8, 1])
+
+ random = Generator(MT19937(self.seed))
+ zipf = random.zipf
+ actual = zipf(a * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, zipf, bad_a * 3)
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, zipf, np.nan)
+ assert_raises(ValueError, zipf, [0, 0, np.nan])
+
+ def test_geometric(self):
+ p = [0.5]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ desired = np.array([1, 1, 3])
+
+ random = Generator(MT19937(self.seed))
+ geometric = random.geometric
+ actual = geometric(p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, geometric, bad_p_one * 3)
+ assert_raises(ValueError, geometric, bad_p_two * 3)
+
+ def test_hypergeometric(self):
+ ngood = [1]
+ nbad = [2]
+ nsample = [2]
+ bad_ngood = [-1]
+ bad_nbad = [-2]
+ bad_nsample_one = [-1]
+ bad_nsample_two = [4]
+ desired = np.array([0, 0, 1])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.hypergeometric(ngood * 3, nbad, nsample)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
+ assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
+ assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
+ assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.hypergeometric(ngood, nbad * 3, nsample)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
+ assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
+ assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
+ assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
+
+ random = Generator(MT19937(self.seed))
+ hypergeom = random.hypergeometric
+ actual = hypergeom(ngood, nbad, nsample * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
+ assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
+ assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
+ assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
+
+ assert_raises(ValueError, hypergeom, -1, 10, 20)
+ assert_raises(ValueError, hypergeom, 10, -1, 20)
+ assert_raises(ValueError, hypergeom, 10, 10, -1)
+ assert_raises(ValueError, hypergeom, 10, 10, 25)
+
+ # ValueError for arguments that are too big.
+ assert_raises(ValueError, hypergeom, 2**30, 10, 20)
+ assert_raises(ValueError, hypergeom, 999, 2**31, 50)
+ assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
+
+ def test_logseries(self):
+ p = [0.5]
+ bad_p_one = [2]
+ bad_p_two = [-1]
+ desired = np.array([1, 1, 1])
+
+ random = Generator(MT19937(self.seed))
+ logseries = random.logseries
+ actual = logseries(p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, logseries, bad_p_one * 3)
+ assert_raises(ValueError, logseries, bad_p_two * 3)
+
+ def test_multinomial(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
+ desired = np.array([[[0, 0, 2, 1, 2, 0],
+ [2, 3, 6, 4, 2, 3]],
+ [[1, 0, 1, 0, 2, 1],
+ [7, 2, 2, 1, 4, 4]],
+ [[0, 2, 0, 1, 2, 0],
+ [3, 2, 3, 3, 4, 5]]], dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.multinomial([5, 20], [1 / 6.] * 6)
+ desired = np.array([[0, 0, 2, 1, 2, 0],
+ [2, 3, 6, 4, 2, 3]], dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.multinomial([5, 20], [[1 / 6.] * 6] * 2)
+ desired = np.array([[0, 0, 2, 1, 2, 0],
+ [2, 3, 6, 4, 2, 3]], dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.multinomial([[5], [20]], [[1 / 6.] * 6] * 2)
+ desired = np.array([[[0, 0, 2, 1, 2, 0],
+ [0, 0, 2, 1, 1, 1]],
+ [[4, 2, 3, 3, 5, 3],
+ [7, 2, 2, 1, 4, 4]]], dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+ @pytest.mark.parametrize("n", [10,
+ np.array([10, 10]),
+ np.array([[[10]], [[10]]])
+ ]
+ )
+ def test_multinomial_pval_broadcast(self, n):
+ random = Generator(MT19937(self.seed))
+ pvals = np.array([1 / 4] * 4)
+ actual = random.multinomial(n, pvals)
+ n_shape = tuple() if isinstance(n, int) else n.shape
+ expected_shape = n_shape + (4,)
+ assert actual.shape == expected_shape
+ pvals = np.vstack([pvals, pvals])
+ actual = random.multinomial(n, pvals)
+ expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1]) + (4,)
+ assert actual.shape == expected_shape
+
+ pvals = np.vstack([[pvals], [pvals]])
+ actual = random.multinomial(n, pvals)
+ expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1])
+ assert actual.shape == expected_shape + (4,)
+ actual = random.multinomial(n, pvals, size=(3, 2) + expected_shape)
+ assert actual.shape == (3, 2) + expected_shape + (4,)
+
+ with pytest.raises(ValueError):
+ # Ensure that size is not broadcast
+ actual = random.multinomial(n, pvals, size=(1,) * 6)
+
+ def test_invalid_pvals_broadcast(self):
+ random = Generator(MT19937(self.seed))
+ pvals = [[1 / 6] * 6, [1 / 4] * 6]
+ assert_raises(ValueError, random.multinomial, 1, pvals)
+ assert_raises(ValueError, random.multinomial, 6, 0.5)
+
+ def test_empty_outputs(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.multinomial(np.empty((10, 0, 6), "i8"), [1 / 6] * 6)
+ assert actual.shape == (10, 0, 6, 6)
+ actual = random.multinomial(12, np.empty((10, 0, 10)))
+ assert actual.shape == (10, 0, 10)
+ actual = random.multinomial(np.empty((3, 0, 7), "i8"),
+ np.empty((3, 0, 7, 4)))
+ assert actual.shape == (3, 0, 7, 4)
+
+
+@pytest.mark.skipif(IS_WASM, reason="can't start thread")
+class TestThread:
+ # make sure each state produces the same sequence even in threads
+ def setup_method(self):
+ self.seeds = range(4)
+
+ def check_function(self, function, sz):
+ from threading import Thread
+
+ out1 = np.empty((len(self.seeds),) + sz)
+ out2 = np.empty((len(self.seeds),) + sz)
+
+ # threaded generation
+ t = [Thread(target=function, args=(Generator(MT19937(s)), o))
+ for s, o in zip(self.seeds, out1)]
+ [x.start() for x in t]
+ [x.join() for x in t]
+
+ # the same serial
+ for s, o in zip(self.seeds, out2):
+ function(Generator(MT19937(s)), o)
+
+ # these platforms change x87 fpu precision mode in threads
+ if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
+ assert_array_almost_equal(out1, out2)
+ else:
+ assert_array_equal(out1, out2)
+
+ def test_normal(self):
+ def gen_random(state, out):
+ out[...] = state.normal(size=10000)
+
+ self.check_function(gen_random, sz=(10000,))
+
+ def test_exp(self):
+ def gen_random(state, out):
+ out[...] = state.exponential(scale=np.ones((100, 1000)))
+
+ self.check_function(gen_random, sz=(100, 1000))
+
+ def test_multinomial(self):
+ def gen_random(state, out):
+ out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
+
+ self.check_function(gen_random, sz=(10000, 6))
+
+
+# See Issue #4263
+class TestSingleEltArrayInput:
+ def setup_method(self):
+ self.argOne = np.array([2])
+ self.argTwo = np.array([3])
+ self.argThree = np.array([4])
+ self.tgtShape = (1,)
+
+ def test_one_arg_funcs(self):
+ funcs = (random.exponential, random.standard_gamma,
+ random.chisquare, random.standard_t,
+ random.pareto, random.weibull,
+ random.power, random.rayleigh,
+ random.poisson, random.zipf,
+ random.geometric, random.logseries)
+
+ probfuncs = (random.geometric, random.logseries)
+
+ for func in funcs:
+ if func in probfuncs: # p < 1.0
+ out = func(np.array([0.5]))
+
+ else:
+ out = func(self.argOne)
+
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_two_arg_funcs(self):
+ funcs = (random.uniform, random.normal,
+ random.beta, random.gamma,
+ random.f, random.noncentral_chisquare,
+ random.vonmises, random.laplace,
+ random.gumbel, random.logistic,
+ random.lognormal, random.wald,
+ random.binomial, random.negative_binomial)
+
+ probfuncs = (random.binomial, random.negative_binomial)
+
+ for func in funcs:
+ if func in probfuncs: # p <= 1
+ argTwo = np.array([0.5])
+
+ else:
+ argTwo = self.argTwo
+
+ out = func(self.argOne, argTwo)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne[0], argTwo)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne, argTwo[0])
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_integers(self, endpoint):
+ itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
+ np.int32, np.uint32, np.int64, np.uint64]
+ func = random.integers
+ high = np.array([1])
+ low = np.array([0])
+
+ for dt in itype:
+ out = func(low, high, endpoint=endpoint, dtype=dt)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(low[0], high, endpoint=endpoint, dtype=dt)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(low, high[0], endpoint=endpoint, dtype=dt)
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_three_arg_funcs(self):
+ funcs = [random.noncentral_f, random.triangular,
+ random.hypergeometric]
+
+ for func in funcs:
+ out = func(self.argOne, self.argTwo, self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne[0], self.argTwo, self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne, self.argTwo[0], self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+
+@pytest.mark.parametrize("config", JUMP_TEST_DATA)
+def test_jumped(config):
+ # Each config contains the initial seed, a number of raw steps
+ # the sha256 hashes of the initial and the final states' keys and
+ # the position of the initial and the final state.
+ # These were produced using the original C implementation.
+ seed = config["seed"]
+ steps = config["steps"]
+
+ mt19937 = MT19937(seed)
+ # Burn step
+ mt19937.random_raw(steps)
+ key = mt19937.state["state"]["key"]
+ if sys.byteorder == 'big':
+ key = key.byteswap()
+ sha256 = hashlib.sha256(key)
+ assert mt19937.state["state"]["pos"] == config["initial"]["pos"]
+ assert sha256.hexdigest() == config["initial"]["key_sha256"]
+
+ jumped = mt19937.jumped()
+ key = jumped.state["state"]["key"]
+ if sys.byteorder == 'big':
+ key = key.byteswap()
+ sha256 = hashlib.sha256(key)
+ assert jumped.state["state"]["pos"] == config["jumped"]["pos"]
+ assert sha256.hexdigest() == config["jumped"]["key_sha256"]
+
+
+def test_broadcast_size_error():
+ mu = np.ones(3)
+ sigma = np.ones((4, 3))
+ size = (10, 4, 2)
+ assert random.normal(mu, sigma, size=(5, 4, 3)).shape == (5, 4, 3)
+ with pytest.raises(ValueError):
+ random.normal(mu, sigma, size=size)
+ with pytest.raises(ValueError):
+ random.normal(mu, sigma, size=(1, 3))
+ with pytest.raises(ValueError):
+ random.normal(mu, sigma, size=(4, 1, 1))
+ # 1 arg
+ shape = np.ones((4, 3))
+ with pytest.raises(ValueError):
+ random.standard_gamma(shape, size=size)
+ with pytest.raises(ValueError):
+ random.standard_gamma(shape, size=(3,))
+ with pytest.raises(ValueError):
+ random.standard_gamma(shape, size=3)
+ # Check out
+ out = np.empty(size)
+ with pytest.raises(ValueError):
+ random.standard_gamma(shape, out=out)
+
+ # 2 arg
+ with pytest.raises(ValueError):
+ random.binomial(1, [0.3, 0.7], size=(2, 1))
+ with pytest.raises(ValueError):
+ random.binomial([1, 2], 0.3, size=(2, 1))
+ with pytest.raises(ValueError):
+ random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
+ with pytest.raises(ValueError):
+ random.multinomial([2, 2], [.3, .7], size=(2, 1))
+
+ # 3 arg
+ a = random.chisquare(5, size=3)
+ b = random.chisquare(5, size=(4, 3))
+ c = random.chisquare(5, size=(5, 4, 3))
+ assert random.noncentral_f(a, b, c).shape == (5, 4, 3)
+ with pytest.raises(ValueError, match=r"Output size \(6, 5, 1, 1\) is"):
+ random.noncentral_f(a, b, c, size=(6, 5, 1, 1))
+
+
+def test_broadcast_size_scalar():
+ mu = np.ones(3)
+ sigma = np.ones(3)
+ random.normal(mu, sigma, size=3)
+ with pytest.raises(ValueError):
+ random.normal(mu, sigma, size=2)
+
+
+def test_ragged_shuffle():
+ # GH 18142
+ seq = [[], [], 1]
+ gen = Generator(MT19937(0))
+ assert_no_warnings(gen.shuffle, seq)
+ assert seq == [1, [], []]
+
+
+@pytest.mark.parametrize("high", [-2, [-2]])
+@pytest.mark.parametrize("endpoint", [True, False])
+def test_single_arg_integer_exception(high, endpoint):
+ # GH 14333
+ gen = Generator(MT19937(0))
+ msg = 'high < 0' if endpoint else 'high <= 0'
+ with pytest.raises(ValueError, match=msg):
+ gen.integers(high, endpoint=endpoint)
+ msg = 'low > high' if endpoint else 'low >= high'
+ with pytest.raises(ValueError, match=msg):
+ gen.integers(-1, high, endpoint=endpoint)
+ with pytest.raises(ValueError, match=msg):
+ gen.integers([-1], high, endpoint=endpoint)
+
+
+@pytest.mark.parametrize("dtype", ["f4", "f8"])
+def test_c_contig_req_out(dtype):
+ # GH 18704
+ out = np.empty((2, 3), order="F", dtype=dtype)
+ shape = [1, 2, 3]
+ with pytest.raises(ValueError, match="Supplied output array"):
+ random.standard_gamma(shape, out=out, dtype=dtype)
+ with pytest.raises(ValueError, match="Supplied output array"):
+ random.standard_gamma(shape, out=out, size=out.shape, dtype=dtype)
+
+
+@pytest.mark.parametrize("dtype", ["f4", "f8"])
+@pytest.mark.parametrize("order", ["F", "C"])
+@pytest.mark.parametrize("dist", [random.standard_normal, random.random])
+def test_contig_req_out(dist, order, dtype):
+ # GH 18704
+ out = np.empty((2, 3), dtype=dtype, order=order)
+ variates = dist(out=out, dtype=dtype)
+ assert variates is out
+ variates = dist(out=out, dtype=dtype, size=out.shape)
+ assert variates is out
+
+
+def test_generator_ctor_old_style_pickle():
+ rg = np.random.Generator(np.random.PCG64DXSM(0))
+ rg.standard_normal(1)
+ # Directly call reduce which is used in pickling
+ ctor, args, state_a = rg.__reduce__()
+ # Simulate unpickling an old pickle that only has the name
+ assert args[:1] == ("PCG64DXSM",)
+ b = ctor(*args[:1])
+ b.bit_generator.state = state_a
+ state_b = b.bit_generator.state
+ assert state_a == state_b
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py b/venv/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py
new file mode 100644
index 00000000..0227d650
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/test_generator_mt19937_regressions.py
@@ -0,0 +1,150 @@
+from numpy.testing import (assert_, assert_array_equal)
+import numpy as np
+import pytest
+from numpy.random import Generator, MT19937
+
+mt19937 = Generator(MT19937())
+
+
+class TestRegression:
+
+ def test_vonmises_range(self):
+ # Make sure generated random variables are in [-pi, pi].
+ # Regression test for ticket #986.
+ for mu in np.linspace(-7., 7., 5):
+ r = mt19937.vonmises(mu, 1, 50)
+ assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
+
+ def test_hypergeometric_range(self):
+ # Test for ticket #921
+ assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4))
+ assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0))
+
+ # Test for ticket #5623
+ args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems
+ assert_(mt19937.hypergeometric(*args) > 0)
+
+ def test_logseries_convergence(self):
+ # Test for ticket #923
+ N = 1000
+ mt19937 = Generator(MT19937(0))
+ rvsn = mt19937.logseries(0.8, size=N)
+ # these two frequency counts should be close to theoretical
+ # numbers with this large sample
+ # theoretical large N result is 0.49706795
+ freq = np.sum(rvsn == 1) / N
+ msg = f'Frequency was {freq:f}, should be > 0.45'
+ assert_(freq > 0.45, msg)
+ # theoretical large N result is 0.19882718
+ freq = np.sum(rvsn == 2) / N
+ msg = f'Frequency was {freq:f}, should be < 0.23'
+ assert_(freq < 0.23, msg)
+
+ def test_shuffle_mixed_dimension(self):
+ # Test for trac ticket #2074
+ for t in [[1, 2, 3, None],
+ [(1, 1), (2, 2), (3, 3), None],
+ [1, (2, 2), (3, 3), None],
+ [(1, 1), 2, 3, None]]:
+ mt19937 = Generator(MT19937(12345))
+ shuffled = np.array(t, dtype=object)
+ mt19937.shuffle(shuffled)
+ expected = np.array([t[2], t[0], t[3], t[1]], dtype=object)
+ assert_array_equal(np.array(shuffled, dtype=object), expected)
+
+ def test_call_within_randomstate(self):
+ # Check that custom BitGenerator does not call into global state
+ res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4])
+ for i in range(3):
+ mt19937 = Generator(MT19937(i))
+ m = Generator(MT19937(4321))
+ # If m.state is not honored, the result will change
+ assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
+
+ def test_multivariate_normal_size_types(self):
+ # Test for multivariate_normal issue with 'size' argument.
+ # Check that the multivariate_normal size argument can be a
+ # numpy integer.
+ mt19937.multivariate_normal([0], [[0]], size=1)
+ mt19937.multivariate_normal([0], [[0]], size=np.int_(1))
+ mt19937.multivariate_normal([0], [[0]], size=np.int64(1))
+
+ def test_beta_small_parameters(self):
+ # Test that beta with small a and b parameters does not produce
+ # NaNs due to roundoff errors causing 0 / 0, gh-5851
+ mt19937 = Generator(MT19937(1234567890))
+ x = mt19937.beta(0.0001, 0.0001, size=100)
+ assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta')
+
+ def test_choice_sum_of_probs_tolerance(self):
+ # The sum of probs should be 1.0 with some tolerance.
+ # For low precision dtypes the tolerance was too tight.
+ # See numpy github issue 6123.
+ mt19937 = Generator(MT19937(1234))
+ a = [1, 2, 3]
+ counts = [4, 4, 2]
+ for dt in np.float16, np.float32, np.float64:
+ probs = np.array(counts, dtype=dt) / sum(counts)
+ c = mt19937.choice(a, p=probs)
+ assert_(c in a)
+ with pytest.raises(ValueError):
+ mt19937.choice(a, p=probs*0.9)
+
+ def test_shuffle_of_array_of_different_length_strings(self):
+ # Test that permuting an array of different length strings
+ # will not cause a segfault on garbage collection
+ # Tests gh-7710
+ mt19937 = Generator(MT19937(1234))
+
+ a = np.array(['a', 'a' * 1000])
+
+ for _ in range(100):
+ mt19937.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
+ def test_shuffle_of_array_of_objects(self):
+ # Test that permuting an array of objects will not cause
+ # a segfault on garbage collection.
+ # See gh-7719
+ mt19937 = Generator(MT19937(1234))
+ a = np.array([np.arange(1), np.arange(4)], dtype=object)
+
+ for _ in range(1000):
+ mt19937.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
+ def test_permutation_subclass(self):
+ class N(np.ndarray):
+ pass
+
+ mt19937 = Generator(MT19937(1))
+ orig = np.arange(3).view(N)
+ perm = mt19937.permutation(orig)
+ assert_array_equal(perm, np.array([2, 0, 1]))
+ assert_array_equal(orig, np.arange(3).view(N))
+
+ class M:
+ a = np.arange(5)
+
+ def __array__(self):
+ return self.a
+
+ mt19937 = Generator(MT19937(1))
+ m = M()
+ perm = mt19937.permutation(m)
+ assert_array_equal(perm, np.array([4, 1, 3, 0, 2]))
+ assert_array_equal(m.__array__(), np.arange(5))
+
+ def test_gamma_0(self):
+ assert mt19937.standard_gamma(0.0) == 0.0
+ assert_array_equal(mt19937.standard_gamma([0.0]), 0.0)
+
+ actual = mt19937.standard_gamma([0.0], dtype='float')
+ expected = np.array([0.], dtype=np.float32)
+ assert_array_equal(actual, expected)
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/test_random.py b/venv/lib/python3.9/site-packages/numpy/random/tests/test_random.py
new file mode 100644
index 00000000..0f4e7925
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/test_random.py
@@ -0,0 +1,1745 @@
+import warnings
+
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_raises, assert_equal, assert_warns,
+ assert_no_warnings, assert_array_equal, assert_array_almost_equal,
+ suppress_warnings, IS_WASM
+ )
+from numpy import random
+import sys
+
+
+class TestSeed:
+ def test_scalar(self):
+ s = np.random.RandomState(0)
+ assert_equal(s.randint(1000), 684)
+ s = np.random.RandomState(4294967295)
+ assert_equal(s.randint(1000), 419)
+
+ def test_array(self):
+ s = np.random.RandomState(range(10))
+ assert_equal(s.randint(1000), 468)
+ s = np.random.RandomState(np.arange(10))
+ assert_equal(s.randint(1000), 468)
+ s = np.random.RandomState([0])
+ assert_equal(s.randint(1000), 973)
+ s = np.random.RandomState([4294967295])
+ assert_equal(s.randint(1000), 265)
+
+ def test_invalid_scalar(self):
+ # seed must be an unsigned 32 bit integer
+ assert_raises(TypeError, np.random.RandomState, -0.5)
+ assert_raises(ValueError, np.random.RandomState, -1)
+
+ def test_invalid_array(self):
+ # seed must be an unsigned 32 bit integer
+ assert_raises(TypeError, np.random.RandomState, [-0.5])
+ assert_raises(ValueError, np.random.RandomState, [-1])
+ assert_raises(ValueError, np.random.RandomState, [4294967296])
+ assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
+ assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
+
+ def test_invalid_array_shape(self):
+ # gh-9832
+ assert_raises(ValueError, np.random.RandomState,
+ np.array([], dtype=np.int64))
+ assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]])
+ assert_raises(ValueError, np.random.RandomState, [[1, 2, 3],
+ [4, 5, 6]])
+
+
+class TestBinomial:
+ def test_n_zero(self):
+ # Tests the corner case of n == 0 for the binomial distribution.
+ # binomial(0, p) should be zero for any p in [0, 1].
+ # This test addresses issue #3480.
+ zeros = np.zeros(2, dtype='int')
+ for p in [0, .5, 1]:
+ assert_(random.binomial(0, p) == 0)
+ assert_array_equal(random.binomial(zeros, p), zeros)
+
+ def test_p_is_nan(self):
+ # Issue #4571.
+ assert_raises(ValueError, random.binomial, 1, np.nan)
+
+
+class TestMultinomial:
+ def test_basic(self):
+ random.multinomial(100, [0.2, 0.8])
+
+ def test_zero_probability(self):
+ random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
+
+ def test_int_negative_interval(self):
+ assert_(-5 <= random.randint(-5, -1) < -1)
+ x = random.randint(-5, -1, 5)
+ assert_(np.all(-5 <= x))
+ assert_(np.all(x < -1))
+
+ def test_size(self):
+ # gh-3173
+ p = [0.5, 0.5]
+ assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
+ (2, 2, 2))
+
+ assert_raises(TypeError, np.random.multinomial, 1, p,
+ float(1))
+
+ def test_multidimensional_pvals(self):
+ assert_raises(ValueError, np.random.multinomial, 10, [[0, 1]])
+ assert_raises(ValueError, np.random.multinomial, 10, [[0], [1]])
+ assert_raises(ValueError, np.random.multinomial, 10, [[[0], [1]], [[1], [0]]])
+ assert_raises(ValueError, np.random.multinomial, 10, np.array([[0, 1], [1, 0]]))
+
+
+class TestSetState:
+ def setup_method(self):
+ self.seed = 1234567890
+ self.prng = random.RandomState(self.seed)
+ self.state = self.prng.get_state()
+
+ def test_basic(self):
+ old = self.prng.tomaxint(16)
+ self.prng.set_state(self.state)
+ new = self.prng.tomaxint(16)
+ assert_(np.all(old == new))
+
+ def test_gaussian_reset(self):
+ # Make sure the cached every-other-Gaussian is reset.
+ old = self.prng.standard_normal(size=3)
+ self.prng.set_state(self.state)
+ new = self.prng.standard_normal(size=3)
+ assert_(np.all(old == new))
+
+ def test_gaussian_reset_in_media_res(self):
+ # When the state is saved with a cached Gaussian, make sure the
+ # cached Gaussian is restored.
+
+ self.prng.standard_normal()
+ state = self.prng.get_state()
+ old = self.prng.standard_normal(size=3)
+ self.prng.set_state(state)
+ new = self.prng.standard_normal(size=3)
+ assert_(np.all(old == new))
+
+ def test_backwards_compatibility(self):
+ # Make sure we can accept old state tuples that do not have the
+ # cached Gaussian value.
+ old_state = self.state[:-2]
+ x1 = self.prng.standard_normal(size=16)
+ self.prng.set_state(old_state)
+ x2 = self.prng.standard_normal(size=16)
+ self.prng.set_state(self.state)
+ x3 = self.prng.standard_normal(size=16)
+ assert_(np.all(x1 == x2))
+ assert_(np.all(x1 == x3))
+
+ def test_negative_binomial(self):
+ # Ensure that the negative binomial results take floating point
+ # arguments without truncation.
+ self.prng.negative_binomial(0.5, 0.5)
+
+
+class TestRandint:
+
+ rfunc = np.random.randint
+
+ # valid integer/boolean types
+ itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
+ np.int32, np.uint32, np.int64, np.uint64]
+
+ def test_unsupported_type(self):
+ assert_raises(TypeError, self.rfunc, 1, dtype=float)
+
+ def test_bounds_checking(self):
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+ assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
+ assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
+ assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
+ assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
+
+ def test_rng_zero_and_extremes(self):
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+
+ tgt = ubnd - 1
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+
+ tgt = lbnd
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+
+ tgt = (lbnd + ubnd)//2
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+
+ def test_full_range(self):
+ # Test for ticket #1690
+
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+
+ try:
+ self.rfunc(lbnd, ubnd, dtype=dt)
+ except Exception as e:
+ raise AssertionError("No error should have been raised, "
+ "but one was with the following "
+ "message:\n\n%s" % str(e))
+
+ def test_in_bounds_fuzz(self):
+ # Don't use fixed seed
+ np.random.seed()
+
+ for dt in self.itype[1:]:
+ for ubnd in [4, 8, 16]:
+ vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
+ assert_(vals.max() < ubnd)
+ assert_(vals.min() >= 2)
+
+ vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
+
+ assert_(vals.max() < 2)
+ assert_(vals.min() >= 0)
+
+ def test_repeatability(self):
+ import hashlib
+ # We use a sha256 hash of generated sequences of 1000 samples
+ # in the range [0, 6) for all but bool, where the range
+ # is [0, 2). Hashes are for little endian numbers.
+ tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71',
+ 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
+ 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
+ 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
+ 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404',
+ 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
+ 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
+ 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
+ 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'}
+
+ for dt in self.itype[1:]:
+ np.random.seed(1234)
+
+ # view as little endian for hash
+ if sys.byteorder == 'little':
+ val = self.rfunc(0, 6, size=1000, dtype=dt)
+ else:
+ val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
+
+ res = hashlib.sha256(val.view(np.int8)).hexdigest()
+ assert_(tgt[np.dtype(dt).name] == res)
+
+ # bools do not depend on endianness
+ np.random.seed(1234)
+ val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
+ res = hashlib.sha256(val).hexdigest()
+ assert_(tgt[np.dtype(bool).name] == res)
+
+ def test_int64_uint64_corner_case(self):
+ # When stored in Numpy arrays, `lbnd` is casted
+ # as np.int64, and `ubnd` is casted as np.uint64.
+ # Checking whether `lbnd` >= `ubnd` used to be
+ # done solely via direct comparison, which is incorrect
+ # because when Numpy tries to compare both numbers,
+ # it casts both to np.float64 because there is
+ # no integer superset of np.int64 and np.uint64. However,
+ # `ubnd` is too large to be represented in np.float64,
+ # causing it be round down to np.iinfo(np.int64).max,
+ # leading to a ValueError because `lbnd` now equals
+ # the new `ubnd`.
+
+ dt = np.int64
+ tgt = np.iinfo(np.int64).max
+ lbnd = np.int64(np.iinfo(np.int64).max)
+ ubnd = np.uint64(np.iinfo(np.int64).max + 1)
+
+ # None of these function calls should
+ # generate a ValueError now.
+ actual = np.random.randint(lbnd, ubnd, dtype=dt)
+ assert_equal(actual, tgt)
+
+ def test_respect_dtype_singleton(self):
+ # See gh-7203
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+
+ sample = self.rfunc(lbnd, ubnd, dtype=dt)
+ assert_equal(sample.dtype, np.dtype(dt))
+
+ for dt in (bool, int, np.compat.long):
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+
+ # gh-7284: Ensure that we get Python data types
+ sample = self.rfunc(lbnd, ubnd, dtype=dt)
+ assert_(not hasattr(sample, 'dtype'))
+ assert_equal(type(sample), dt)
+
+
+class TestRandomDist:
+ # Make sure the random distribution returns the correct value for a
+ # given seed
+
+ def setup_method(self):
+ self.seed = 1234567890
+
+ def test_rand(self):
+ np.random.seed(self.seed)
+ actual = np.random.rand(3, 2)
+ desired = np.array([[0.61879477158567997, 0.59162362775974664],
+ [0.88868358904449662, 0.89165480011560816],
+ [0.4575674820298663, 0.7781880808593471]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_randn(self):
+ np.random.seed(self.seed)
+ actual = np.random.randn(3, 2)
+ desired = np.array([[1.34016345771863121, 1.73759122771936081],
+ [1.498988344300628, -0.2286433324536169],
+ [2.031033998682787, 2.17032494605655257]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_randint(self):
+ np.random.seed(self.seed)
+ actual = np.random.randint(-99, 99, size=(3, 2))
+ desired = np.array([[31, 3],
+ [-52, 41],
+ [-48, -66]])
+ assert_array_equal(actual, desired)
+
+ def test_random_integers(self):
+ np.random.seed(self.seed)
+ with suppress_warnings() as sup:
+ w = sup.record(DeprecationWarning)
+ actual = np.random.random_integers(-99, 99, size=(3, 2))
+ assert_(len(w) == 1)
+ desired = np.array([[31, 3],
+ [-52, 41],
+ [-48, -66]])
+ assert_array_equal(actual, desired)
+
+ def test_random_integers_max_int(self):
+ # Tests whether random_integers can generate the
+ # maximum allowed Python int that can be converted
+ # into a C long. Previous implementations of this
+ # method have thrown an OverflowError when attempting
+ # to generate this integer.
+ with suppress_warnings() as sup:
+ w = sup.record(DeprecationWarning)
+ actual = np.random.random_integers(np.iinfo('l').max,
+ np.iinfo('l').max)
+ assert_(len(w) == 1)
+
+ desired = np.iinfo('l').max
+ assert_equal(actual, desired)
+
+ def test_random_integers_deprecated(self):
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+
+ # DeprecationWarning raised with high == None
+ assert_raises(DeprecationWarning,
+ np.random.random_integers,
+ np.iinfo('l').max)
+
+ # DeprecationWarning raised with high != None
+ assert_raises(DeprecationWarning,
+ np.random.random_integers,
+ np.iinfo('l').max, np.iinfo('l').max)
+
+ def test_random(self):
+ np.random.seed(self.seed)
+ actual = np.random.random((3, 2))
+ desired = np.array([[0.61879477158567997, 0.59162362775974664],
+ [0.88868358904449662, 0.89165480011560816],
+ [0.4575674820298663, 0.7781880808593471]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_choice_uniform_replace(self):
+ np.random.seed(self.seed)
+ actual = np.random.choice(4, 4)
+ desired = np.array([2, 3, 2, 3])
+ assert_array_equal(actual, desired)
+
+ def test_choice_nonuniform_replace(self):
+ np.random.seed(self.seed)
+ actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
+ desired = np.array([1, 1, 2, 2])
+ assert_array_equal(actual, desired)
+
+ def test_choice_uniform_noreplace(self):
+ np.random.seed(self.seed)
+ actual = np.random.choice(4, 3, replace=False)
+ desired = np.array([0, 1, 3])
+ assert_array_equal(actual, desired)
+
+ def test_choice_nonuniform_noreplace(self):
+ np.random.seed(self.seed)
+ actual = np.random.choice(4, 3, replace=False,
+ p=[0.1, 0.3, 0.5, 0.1])
+ desired = np.array([2, 3, 1])
+ assert_array_equal(actual, desired)
+
+ def test_choice_noninteger(self):
+ np.random.seed(self.seed)
+ actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
+ desired = np.array(['c', 'd', 'c', 'd'])
+ assert_array_equal(actual, desired)
+
+ def test_choice_exceptions(self):
+ sample = np.random.choice
+ assert_raises(ValueError, sample, -1, 3)
+ assert_raises(ValueError, sample, 3., 3)
+ assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
+ assert_raises(ValueError, sample, [], 3)
+ assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
+ p=[[0.25, 0.25], [0.25, 0.25]])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
+ assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
+ # gh-13087
+ assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], 2,
+ replace=False, p=[1, 0, 0])
+
+ def test_choice_return_shape(self):
+ p = [0.1, 0.9]
+ # Check scalar
+ assert_(np.isscalar(np.random.choice(2, replace=True)))
+ assert_(np.isscalar(np.random.choice(2, replace=False)))
+ assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
+ assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
+ assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
+ assert_(np.random.choice([None], replace=True) is None)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(np.random.choice(arr, replace=True) is a)
+
+ # Check 0-d array
+ s = tuple()
+ assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
+ assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
+ assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
+ assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
+ assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
+ assert_(np.random.choice([None], s, replace=True).ndim == 0)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(np.random.choice(arr, s, replace=True).item() is a)
+
+ # Check multi dimensional array
+ s = (2, 3)
+ p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
+ assert_equal(np.random.choice(6, s, replace=True).shape, s)
+ assert_equal(np.random.choice(6, s, replace=False).shape, s)
+ assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s)
+ assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
+ assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
+
+ # Check zero-size
+ assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
+ assert_equal(np.random.randint(0, -10, size=0).shape, (0,))
+ assert_equal(np.random.randint(10, 10, size=0).shape, (0,))
+ assert_equal(np.random.choice(0, size=0).shape, (0,))
+ assert_equal(np.random.choice([], size=(0,)).shape, (0,))
+ assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape,
+ (3, 0, 4))
+ assert_raises(ValueError, np.random.choice, [], 10)
+
+ def test_choice_nan_probabilities(self):
+ a = np.array([42, 1, 2])
+ p = [None, None, None]
+ assert_raises(ValueError, np.random.choice, a, p=p)
+
+ def test_bytes(self):
+ np.random.seed(self.seed)
+ actual = np.random.bytes(10)
+ desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
+ assert_equal(actual, desired)
+
+ def test_shuffle(self):
+ # Test lists, arrays (of various dtypes), and multidimensional versions
+ # of both, c-contiguous or not:
+ for conv in [lambda x: np.array([]),
+ lambda x: x,
+ lambda x: np.asarray(x).astype(np.int8),
+ lambda x: np.asarray(x).astype(np.float32),
+ lambda x: np.asarray(x).astype(np.complex64),
+ lambda x: np.asarray(x).astype(object),
+ lambda x: [(i, i) for i in x],
+ lambda x: np.asarray([[i, i] for i in x]),
+ lambda x: np.vstack([x, x]).T,
+ # gh-11442
+ lambda x: (np.asarray([(i, i) for i in x],
+ [("a", int), ("b", int)])
+ .view(np.recarray)),
+ # gh-4270
+ lambda x: np.asarray([(i, i) for i in x],
+ [("a", object), ("b", np.int32)])]:
+ np.random.seed(self.seed)
+ alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
+ np.random.shuffle(alist)
+ actual = alist
+ desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
+ assert_array_equal(actual, desired)
+
+ def test_shuffle_masked(self):
+ # gh-3263
+ a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
+ b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
+ a_orig = a.copy()
+ b_orig = b.copy()
+ for i in range(50):
+ np.random.shuffle(a)
+ assert_equal(
+ sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
+ np.random.shuffle(b)
+ assert_equal(
+ sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
+
+ @pytest.mark.parametrize("random",
+ [np.random, np.random.RandomState(), np.random.default_rng()])
+ def test_shuffle_untyped_warning(self, random):
+ # Create a dict works like a sequence but isn't one
+ values = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6}
+ with pytest.warns(UserWarning,
+ match="you are shuffling a 'dict' object") as rec:
+ random.shuffle(values)
+ assert "test_random" in rec[0].filename
+
+ @pytest.mark.parametrize("random",
+ [np.random, np.random.RandomState(), np.random.default_rng()])
+ @pytest.mark.parametrize("use_array_like", [True, False])
+ def test_shuffle_no_object_unpacking(self, random, use_array_like):
+ class MyArr(np.ndarray):
+ pass
+
+ items = [
+ None, np.array([3]), np.float64(3), np.array(10), np.float64(7)
+ ]
+ arr = np.array(items, dtype=object)
+ item_ids = {id(i) for i in items}
+ if use_array_like:
+ arr = arr.view(MyArr)
+
+ # The array was created fine, and did not modify any objects:
+ assert all(id(i) in item_ids for i in arr)
+
+ if use_array_like and not isinstance(random, np.random.Generator):
+ # The old API gives incorrect results, but warns about it.
+ with pytest.warns(UserWarning,
+ match="Shuffling a one dimensional array.*"):
+ random.shuffle(arr)
+ else:
+ random.shuffle(arr)
+ assert all(id(i) in item_ids for i in arr)
+
+ def test_shuffle_memoryview(self):
+ # gh-18273
+ # allow graceful handling of memoryviews
+ # (treat the same as arrays)
+ np.random.seed(self.seed)
+ a = np.arange(5).data
+ np.random.shuffle(a)
+ assert_equal(np.asarray(a), [0, 1, 4, 3, 2])
+ rng = np.random.RandomState(self.seed)
+ rng.shuffle(a)
+ assert_equal(np.asarray(a), [0, 1, 2, 3, 4])
+ rng = np.random.default_rng(self.seed)
+ rng.shuffle(a)
+ assert_equal(np.asarray(a), [4, 1, 0, 3, 2])
+
+ def test_shuffle_not_writeable(self):
+ a = np.zeros(3)
+ a.flags.writeable = False
+ with pytest.raises(ValueError, match='read-only'):
+ np.random.shuffle(a)
+
+ def test_beta(self):
+ np.random.seed(self.seed)
+ actual = np.random.beta(.1, .9, size=(3, 2))
+ desired = np.array(
+ [[1.45341850513746058e-02, 5.31297615662868145e-04],
+ [1.85366619058432324e-06, 4.19214516800110563e-03],
+ [1.58405155108498093e-04, 1.26252891949397652e-04]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_binomial(self):
+ np.random.seed(self.seed)
+ actual = np.random.binomial(100, .456, size=(3, 2))
+ desired = np.array([[37, 43],
+ [42, 48],
+ [46, 45]])
+ assert_array_equal(actual, desired)
+
+ def test_chisquare(self):
+ np.random.seed(self.seed)
+ actual = np.random.chisquare(50, size=(3, 2))
+ desired = np.array([[63.87858175501090585, 68.68407748911370447],
+ [65.77116116901505904, 47.09686762438974483],
+ [72.3828403199695174, 74.18408615260374006]])
+ assert_array_almost_equal(actual, desired, decimal=13)
+
+ def test_dirichlet(self):
+ np.random.seed(self.seed)
+ alpha = np.array([51.72840233779265162, 39.74494232180943953])
+ actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
+ desired = np.array([[[0.54539444573611562, 0.45460555426388438],
+ [0.62345816822039413, 0.37654183177960598]],
+ [[0.55206000085785778, 0.44793999914214233],
+ [0.58964023305154301, 0.41035976694845688]],
+ [[0.59266909280647828, 0.40733090719352177],
+ [0.56974431743975207, 0.43025568256024799]]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_dirichlet_size(self):
+ # gh-3173
+ p = np.array([51.72840233779265162, 39.74494232180943953])
+ assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
+
+ assert_raises(TypeError, np.random.dirichlet, p, float(1))
+
+ def test_dirichlet_bad_alpha(self):
+ # gh-2089
+ alpha = np.array([5.4e-01, -1.0e-16])
+ assert_raises(ValueError, np.random.mtrand.dirichlet, alpha)
+
+ # gh-15876
+ assert_raises(ValueError, random.dirichlet, [[5, 1]])
+ assert_raises(ValueError, random.dirichlet, [[5], [1]])
+ assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
+ assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
+
+ def test_exponential(self):
+ np.random.seed(self.seed)
+ actual = np.random.exponential(1.1234, size=(3, 2))
+ desired = np.array([[1.08342649775011624, 1.00607889924557314],
+ [2.46628830085216721, 2.49668106809923884],
+ [0.68717433461363442, 1.69175666993575979]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_exponential_0(self):
+ assert_equal(np.random.exponential(scale=0), 0)
+ assert_raises(ValueError, np.random.exponential, scale=-0.)
+
+ def test_f(self):
+ np.random.seed(self.seed)
+ actual = np.random.f(12, 77, size=(3, 2))
+ desired = np.array([[1.21975394418575878, 1.75135759791559775],
+ [1.44803115017146489, 1.22108959480396262],
+ [1.02176975757740629, 1.34431827623300415]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_gamma(self):
+ np.random.seed(self.seed)
+ actual = np.random.gamma(5, 3, size=(3, 2))
+ desired = np.array([[24.60509188649287182, 28.54993563207210627],
+ [26.13476110204064184, 12.56988482927716078],
+ [31.71863275789960568, 33.30143302795922011]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_gamma_0(self):
+ assert_equal(np.random.gamma(shape=0, scale=0), 0)
+ assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
+
+ def test_geometric(self):
+ np.random.seed(self.seed)
+ actual = np.random.geometric(.123456789, size=(3, 2))
+ desired = np.array([[8, 7],
+ [17, 17],
+ [5, 12]])
+ assert_array_equal(actual, desired)
+
+ def test_gumbel(self):
+ np.random.seed(self.seed)
+ actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[0.19591898743416816, 0.34405539668096674],
+ [-1.4492522252274278, -1.47374816298446865],
+ [1.10651090478803416, -0.69535848626236174]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_gumbel_0(self):
+ assert_equal(np.random.gumbel(scale=0), 0)
+ assert_raises(ValueError, np.random.gumbel, scale=-0.)
+
+ def test_hypergeometric(self):
+ np.random.seed(self.seed)
+ actual = np.random.hypergeometric(10, 5, 14, size=(3, 2))
+ desired = np.array([[10, 10],
+ [10, 10],
+ [9, 9]])
+ assert_array_equal(actual, desired)
+
+ # Test nbad = 0
+ actual = np.random.hypergeometric(5, 0, 3, size=4)
+ desired = np.array([3, 3, 3, 3])
+ assert_array_equal(actual, desired)
+
+ actual = np.random.hypergeometric(15, 0, 12, size=4)
+ desired = np.array([12, 12, 12, 12])
+ assert_array_equal(actual, desired)
+
+ # Test ngood = 0
+ actual = np.random.hypergeometric(0, 5, 3, size=4)
+ desired = np.array([0, 0, 0, 0])
+ assert_array_equal(actual, desired)
+
+ actual = np.random.hypergeometric(0, 15, 12, size=4)
+ desired = np.array([0, 0, 0, 0])
+ assert_array_equal(actual, desired)
+
+ def test_laplace(self):
+ np.random.seed(self.seed)
+ actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[0.66599721112760157, 0.52829452552221945],
+ [3.12791959514407125, 3.18202813572992005],
+ [-0.05391065675859356, 1.74901336242837324]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_laplace_0(self):
+ assert_equal(np.random.laplace(scale=0), 0)
+ assert_raises(ValueError, np.random.laplace, scale=-0.)
+
+ def test_logistic(self):
+ np.random.seed(self.seed)
+ actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[1.09232835305011444, 0.8648196662399954],
+ [4.27818590694950185, 4.33897006346929714],
+ [-0.21682183359214885, 2.63373365386060332]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_lognormal(self):
+ np.random.seed(self.seed)
+ actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
+ desired = np.array([[16.50698631688883822, 36.54846706092654784],
+ [22.67886599981281748, 0.71617561058995771],
+ [65.72798501792723869, 86.84341601437161273]])
+ assert_array_almost_equal(actual, desired, decimal=13)
+
+ def test_lognormal_0(self):
+ assert_equal(np.random.lognormal(sigma=0), 1)
+ assert_raises(ValueError, np.random.lognormal, sigma=-0.)
+
+ def test_logseries(self):
+ np.random.seed(self.seed)
+ actual = np.random.logseries(p=.923456789, size=(3, 2))
+ desired = np.array([[2, 2],
+ [6, 17],
+ [3, 6]])
+ assert_array_equal(actual, desired)
+
+ def test_multinomial(self):
+ np.random.seed(self.seed)
+ actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
+ desired = np.array([[[4, 3, 5, 4, 2, 2],
+ [5, 2, 8, 2, 2, 1]],
+ [[3, 4, 3, 6, 0, 4],
+ [2, 1, 4, 3, 6, 4]],
+ [[4, 4, 2, 5, 2, 3],
+ [4, 3, 4, 2, 3, 4]]])
+ assert_array_equal(actual, desired)
+
+ def test_multivariate_normal(self):
+ np.random.seed(self.seed)
+ mean = (.123456789, 10)
+ cov = [[1, 0], [0, 1]]
+ size = (3, 2)
+ actual = np.random.multivariate_normal(mean, cov, size)
+ desired = np.array([[[1.463620246718631, 11.73759122771936],
+ [1.622445133300628, 9.771356667546383]],
+ [[2.154490787682787, 12.170324946056553],
+ [1.719909438201865, 9.230548443648306]],
+ [[0.689515026297799, 9.880729819607714],
+ [-0.023054015651998, 9.201096623542879]]])
+
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ # Check for default size, was raising deprecation warning
+ actual = np.random.multivariate_normal(mean, cov)
+ desired = np.array([0.895289569463708, 9.17180864067987])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ # Check that non positive-semidefinite covariance warns with
+ # RuntimeWarning
+ mean = [0, 0]
+ cov = [[1, 2], [2, 1]]
+ assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
+
+ # and that it doesn't warn with RuntimeWarning check_valid='ignore'
+ assert_no_warnings(np.random.multivariate_normal, mean, cov,
+ check_valid='ignore')
+
+ # and that it raises with RuntimeWarning check_valid='raises'
+ assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
+ check_valid='raise')
+
+ cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
+ with suppress_warnings() as sup:
+ np.random.multivariate_normal(mean, cov)
+ w = sup.record(RuntimeWarning)
+ assert len(w) == 0
+
+ def test_negative_binomial(self):
+ np.random.seed(self.seed)
+ actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
+ desired = np.array([[848, 841],
+ [892, 611],
+ [779, 647]])
+ assert_array_equal(actual, desired)
+
+ def test_noncentral_chisquare(self):
+ np.random.seed(self.seed)
+ actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
+ desired = np.array([[23.91905354498517511, 13.35324692733826346],
+ [31.22452661329736401, 16.60047399466177254],
+ [5.03461598262724586, 17.94973089023519464]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
+ desired = np.array([[1.47145377828516666, 0.15052899268012659],
+ [0.00943803056963588, 1.02647251615666169],
+ [0.332334982684171, 0.15451287602753125]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ np.random.seed(self.seed)
+ actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
+ desired = np.array([[9.597154162763948, 11.725484450296079],
+ [10.413711048138335, 3.694475922923986],
+ [13.484222138963087, 14.377255424602957]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_noncentral_f(self):
+ np.random.seed(self.seed)
+ actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
+ size=(3, 2))
+ desired = np.array([[1.40598099674926669, 0.34207973179285761],
+ [3.57715069265772545, 7.92632662577829805],
+ [0.43741599463544162, 1.1774208752428319]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_normal(self):
+ np.random.seed(self.seed)
+ actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[2.80378370443726244, 3.59863924443872163],
+ [3.121433477601256, -0.33382987590723379],
+ [4.18552478636557357, 4.46410668111310471]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_normal_0(self):
+ assert_equal(np.random.normal(scale=0), 0)
+ assert_raises(ValueError, np.random.normal, scale=-0.)
+
+ def test_pareto(self):
+ np.random.seed(self.seed)
+ actual = np.random.pareto(a=.123456789, size=(3, 2))
+ desired = np.array(
+ [[2.46852460439034849e+03, 1.41286880810518346e+03],
+ [5.28287797029485181e+07, 6.57720981047328785e+07],
+ [1.40840323350391515e+02, 1.98390255135251704e+05]])
+ # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
+ # matrix differs by 24 nulps. Discussion:
+ # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
+ # Consensus is that this is probably some gcc quirk that affects
+ # rounding but not in any important way, so we just use a looser
+ # tolerance on this test:
+ np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
+
+ def test_poisson(self):
+ np.random.seed(self.seed)
+ actual = np.random.poisson(lam=.123456789, size=(3, 2))
+ desired = np.array([[0, 0],
+ [1, 0],
+ [0, 0]])
+ assert_array_equal(actual, desired)
+
+ def test_poisson_exceptions(self):
+ lambig = np.iinfo('l').max
+ lamneg = -1
+ assert_raises(ValueError, np.random.poisson, lamneg)
+ assert_raises(ValueError, np.random.poisson, [lamneg]*10)
+ assert_raises(ValueError, np.random.poisson, lambig)
+ assert_raises(ValueError, np.random.poisson, [lambig]*10)
+
+ def test_power(self):
+ np.random.seed(self.seed)
+ actual = np.random.power(a=.123456789, size=(3, 2))
+ desired = np.array([[0.02048932883240791, 0.01424192241128213],
+ [0.38446073748535298, 0.39499689943484395],
+ [0.00177699707563439, 0.13115505880863756]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_rayleigh(self):
+ np.random.seed(self.seed)
+ actual = np.random.rayleigh(scale=10, size=(3, 2))
+ desired = np.array([[13.8882496494248393, 13.383318339044731],
+ [20.95413364294492098, 21.08285015800712614],
+ [11.06066537006854311, 17.35468505778271009]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_rayleigh_0(self):
+ assert_equal(np.random.rayleigh(scale=0), 0)
+ assert_raises(ValueError, np.random.rayleigh, scale=-0.)
+
+ def test_standard_cauchy(self):
+ np.random.seed(self.seed)
+ actual = np.random.standard_cauchy(size=(3, 2))
+ desired = np.array([[0.77127660196445336, -6.55601161955910605],
+ [0.93582023391158309, -2.07479293013759447],
+ [-4.74601644297011926, 0.18338989290760804]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_exponential(self):
+ np.random.seed(self.seed)
+ actual = np.random.standard_exponential(size=(3, 2))
+ desired = np.array([[0.96441739162374596, 0.89556604882105506],
+ [2.1953785836319808, 2.22243285392490542],
+ [0.6116915921431676, 1.50592546727413201]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_gamma(self):
+ np.random.seed(self.seed)
+ actual = np.random.standard_gamma(shape=3, size=(3, 2))
+ desired = np.array([[5.50841531318455058, 6.62953470301903103],
+ [5.93988484943779227, 2.31044849402133989],
+ [7.54838614231317084, 8.012756093271868]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_standard_gamma_0(self):
+ assert_equal(np.random.standard_gamma(shape=0), 0)
+ assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
+
+ def test_standard_normal(self):
+ np.random.seed(self.seed)
+ actual = np.random.standard_normal(size=(3, 2))
+ desired = np.array([[1.34016345771863121, 1.73759122771936081],
+ [1.498988344300628, -0.2286433324536169],
+ [2.031033998682787, 2.17032494605655257]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_t(self):
+ np.random.seed(self.seed)
+ actual = np.random.standard_t(df=10, size=(3, 2))
+ desired = np.array([[0.97140611862659965, -0.08830486548450577],
+ [1.36311143689505321, -0.55317463909867071],
+ [-0.18473749069684214, 0.61181537341755321]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_triangular(self):
+ np.random.seed(self.seed)
+ actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
+ size=(3, 2))
+ desired = np.array([[12.68117178949215784, 12.4129206149193152],
+ [16.20131377335158263, 16.25692138747600524],
+ [11.20400690911820263, 14.4978144835829923]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_uniform(self):
+ np.random.seed(self.seed)
+ actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
+ desired = np.array([[6.99097932346268003, 6.73801597444323974],
+ [9.50364421400426274, 9.53130618907631089],
+ [5.48995325769805476, 8.47493103280052118]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_uniform_range_bounds(self):
+ fmin = np.finfo('float').min
+ fmax = np.finfo('float').max
+
+ func = np.random.uniform
+ assert_raises(OverflowError, func, -np.inf, 0)
+ assert_raises(OverflowError, func, 0, np.inf)
+ assert_raises(OverflowError, func, fmin, fmax)
+ assert_raises(OverflowError, func, [-np.inf], [0])
+ assert_raises(OverflowError, func, [0], [np.inf])
+
+ # (fmax / 1e17) - fmin is within range, so this should not throw
+ # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
+ # DBL_MAX by increasing fmin a bit
+ np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
+
+ def test_scalar_exception_propagation(self):
+ # Tests that exceptions are correctly propagated in distributions
+ # when called with objects that throw exceptions when converted to
+ # scalars.
+ #
+ # Regression test for gh: 8865
+
+ class ThrowingFloat(np.ndarray):
+ def __float__(self):
+ raise TypeError
+
+ throwing_float = np.array(1.0).view(ThrowingFloat)
+ assert_raises(TypeError, np.random.uniform, throwing_float,
+ throwing_float)
+
+ class ThrowingInteger(np.ndarray):
+ def __int__(self):
+ raise TypeError
+
+ __index__ = __int__
+
+ throwing_int = np.array(1).view(ThrowingInteger)
+ assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
+
+ def test_vonmises(self):
+ np.random.seed(self.seed)
+ actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
+ desired = np.array([[2.28567572673902042, 2.89163838442285037],
+ [0.38198375564286025, 2.57638023113890746],
+ [1.19153771588353052, 1.83509849681825354]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_vonmises_small(self):
+ # check infinite loop, gh-4720
+ np.random.seed(self.seed)
+ r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
+ np.testing.assert_(np.isfinite(r).all())
+
+ def test_wald(self):
+ np.random.seed(self.seed)
+ actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
+ desired = np.array([[3.82935265715889983, 5.13125249184285526],
+ [0.35045403618358717, 1.50832396872003538],
+ [0.24124319895843183, 0.22031101461955038]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_weibull(self):
+ np.random.seed(self.seed)
+ actual = np.random.weibull(a=1.23, size=(3, 2))
+ desired = np.array([[0.97097342648766727, 0.91422896443565516],
+ [1.89517770034962929, 1.91414357960479564],
+ [0.67057783752390987, 1.39494046635066793]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_weibull_0(self):
+ np.random.seed(self.seed)
+ assert_equal(np.random.weibull(a=0, size=12), np.zeros(12))
+ assert_raises(ValueError, np.random.weibull, a=-0.)
+
+ def test_zipf(self):
+ np.random.seed(self.seed)
+ actual = np.random.zipf(a=1.23, size=(3, 2))
+ desired = np.array([[66, 29],
+ [1, 1],
+ [3, 13]])
+ assert_array_equal(actual, desired)
+
+
+class TestBroadcast:
+ # tests that functions that broadcast behave
+ # correctly when presented with non-scalar arguments
+ def setup_method(self):
+ self.seed = 123456789
+
+ def setSeed(self):
+ np.random.seed(self.seed)
+
+ # TODO: Include test for randint once it can broadcast
+ # Can steal the test written in PR #6938
+
+ def test_uniform(self):
+ low = [0]
+ high = [1]
+ uniform = np.random.uniform
+ desired = np.array([0.53283302478975902,
+ 0.53413660089041659,
+ 0.50955303552646702])
+
+ self.setSeed()
+ actual = uniform(low * 3, high)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ self.setSeed()
+ actual = uniform(low, high * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_normal(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ normal = np.random.normal
+ desired = np.array([2.2129019979039612,
+ 2.1283977976520019,
+ 1.8417114045748335])
+
+ self.setSeed()
+ actual = normal(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, normal, loc * 3, bad_scale)
+
+ self.setSeed()
+ actual = normal(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, normal, loc, bad_scale * 3)
+
+ def test_beta(self):
+ a = [1]
+ b = [2]
+ bad_a = [-1]
+ bad_b = [-2]
+ beta = np.random.beta
+ desired = np.array([0.19843558305989056,
+ 0.075230336409423643,
+ 0.24976865978980844])
+
+ self.setSeed()
+ actual = beta(a * 3, b)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, beta, bad_a * 3, b)
+ assert_raises(ValueError, beta, a * 3, bad_b)
+
+ self.setSeed()
+ actual = beta(a, b * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, beta, bad_a, b * 3)
+ assert_raises(ValueError, beta, a, bad_b * 3)
+
+ def test_exponential(self):
+ scale = [1]
+ bad_scale = [-1]
+ exponential = np.random.exponential
+ desired = np.array([0.76106853658845242,
+ 0.76386282278691653,
+ 0.71243813125891797])
+
+ self.setSeed()
+ actual = exponential(scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, exponential, bad_scale * 3)
+
+ def test_standard_gamma(self):
+ shape = [1]
+ bad_shape = [-1]
+ std_gamma = np.random.standard_gamma
+ desired = np.array([0.76106853658845242,
+ 0.76386282278691653,
+ 0.71243813125891797])
+
+ self.setSeed()
+ actual = std_gamma(shape * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, std_gamma, bad_shape * 3)
+
+ def test_gamma(self):
+ shape = [1]
+ scale = [2]
+ bad_shape = [-1]
+ bad_scale = [-2]
+ gamma = np.random.gamma
+ desired = np.array([1.5221370731769048,
+ 1.5277256455738331,
+ 1.4248762625178359])
+
+ self.setSeed()
+ actual = gamma(shape * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gamma, bad_shape * 3, scale)
+ assert_raises(ValueError, gamma, shape * 3, bad_scale)
+
+ self.setSeed()
+ actual = gamma(shape, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gamma, bad_shape, scale * 3)
+ assert_raises(ValueError, gamma, shape, bad_scale * 3)
+
+ def test_f(self):
+ dfnum = [1]
+ dfden = [2]
+ bad_dfnum = [-1]
+ bad_dfden = [-2]
+ f = np.random.f
+ desired = np.array([0.80038951638264799,
+ 0.86768719635363512,
+ 2.7251095168386801])
+
+ self.setSeed()
+ actual = f(dfnum * 3, dfden)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, f, bad_dfnum * 3, dfden)
+ assert_raises(ValueError, f, dfnum * 3, bad_dfden)
+
+ self.setSeed()
+ actual = f(dfnum, dfden * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, f, bad_dfnum, dfden * 3)
+ assert_raises(ValueError, f, dfnum, bad_dfden * 3)
+
+ def test_noncentral_f(self):
+ dfnum = [2]
+ dfden = [3]
+ nonc = [4]
+ bad_dfnum = [0]
+ bad_dfden = [-1]
+ bad_nonc = [-2]
+ nonc_f = np.random.noncentral_f
+ desired = np.array([9.1393943263705211,
+ 13.025456344595602,
+ 8.8018098359100545])
+
+ self.setSeed()
+ actual = nonc_f(dfnum * 3, dfden, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
+ assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
+ assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
+
+ self.setSeed()
+ actual = nonc_f(dfnum, dfden * 3, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
+ assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
+ assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
+
+ self.setSeed()
+ actual = nonc_f(dfnum, dfden, nonc * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
+ assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
+ assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
+
+ def test_noncentral_f_small_df(self):
+ self.setSeed()
+ desired = np.array([6.869638627492048, 0.785880199263955])
+ actual = np.random.noncentral_f(0.9, 0.9, 2, size=2)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_chisquare(self):
+ df = [1]
+ bad_df = [-1]
+ chisquare = np.random.chisquare
+ desired = np.array([0.57022801133088286,
+ 0.51947702108840776,
+ 0.1320969254923558])
+
+ self.setSeed()
+ actual = chisquare(df * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, chisquare, bad_df * 3)
+
+ def test_noncentral_chisquare(self):
+ df = [1]
+ nonc = [2]
+ bad_df = [-1]
+ bad_nonc = [-2]
+ nonc_chi = np.random.noncentral_chisquare
+ desired = np.array([9.0015599467913763,
+ 4.5804135049718742,
+ 6.0872302432834564])
+
+ self.setSeed()
+ actual = nonc_chi(df * 3, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
+ assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
+
+ self.setSeed()
+ actual = nonc_chi(df, nonc * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
+ assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
+
+ def test_standard_t(self):
+ df = [1]
+ bad_df = [-1]
+ t = np.random.standard_t
+ desired = np.array([3.0702872575217643,
+ 5.8560725167361607,
+ 1.0274791436474273])
+
+ self.setSeed()
+ actual = t(df * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, t, bad_df * 3)
+
+ def test_vonmises(self):
+ mu = [2]
+ kappa = [1]
+ bad_kappa = [-1]
+ vonmises = np.random.vonmises
+ desired = np.array([2.9883443664201312,
+ -2.7064099483995943,
+ -1.8672476700665914])
+
+ self.setSeed()
+ actual = vonmises(mu * 3, kappa)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
+
+ self.setSeed()
+ actual = vonmises(mu, kappa * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
+
+ def test_pareto(self):
+ a = [1]
+ bad_a = [-1]
+ pareto = np.random.pareto
+ desired = np.array([1.1405622680198362,
+ 1.1465519762044529,
+ 1.0389564467453547])
+
+ self.setSeed()
+ actual = pareto(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, pareto, bad_a * 3)
+
+ def test_weibull(self):
+ a = [1]
+ bad_a = [-1]
+ weibull = np.random.weibull
+ desired = np.array([0.76106853658845242,
+ 0.76386282278691653,
+ 0.71243813125891797])
+
+ self.setSeed()
+ actual = weibull(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, weibull, bad_a * 3)
+
+ def test_power(self):
+ a = [1]
+ bad_a = [-1]
+ power = np.random.power
+ desired = np.array([0.53283302478975902,
+ 0.53413660089041659,
+ 0.50955303552646702])
+
+ self.setSeed()
+ actual = power(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, power, bad_a * 3)
+
+ def test_laplace(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ laplace = np.random.laplace
+ desired = np.array([0.067921356028507157,
+ 0.070715642226971326,
+ 0.019290950698972624])
+
+ self.setSeed()
+ actual = laplace(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, laplace, loc * 3, bad_scale)
+
+ self.setSeed()
+ actual = laplace(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, laplace, loc, bad_scale * 3)
+
+ def test_gumbel(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ gumbel = np.random.gumbel
+ desired = np.array([0.2730318639556768,
+ 0.26936705726291116,
+ 0.33906220393037939])
+
+ self.setSeed()
+ actual = gumbel(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gumbel, loc * 3, bad_scale)
+
+ self.setSeed()
+ actual = gumbel(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gumbel, loc, bad_scale * 3)
+
+ def test_logistic(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ logistic = np.random.logistic
+ desired = np.array([0.13152135837586171,
+ 0.13675915696285773,
+ 0.038216792802833396])
+
+ self.setSeed()
+ actual = logistic(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, logistic, loc * 3, bad_scale)
+
+ self.setSeed()
+ actual = logistic(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, logistic, loc, bad_scale * 3)
+
+ def test_lognormal(self):
+ mean = [0]
+ sigma = [1]
+ bad_sigma = [-1]
+ lognormal = np.random.lognormal
+ desired = np.array([9.1422086044848427,
+ 8.4013952870126261,
+ 6.3073234116578671])
+
+ self.setSeed()
+ actual = lognormal(mean * 3, sigma)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
+
+ self.setSeed()
+ actual = lognormal(mean, sigma * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
+
+ def test_rayleigh(self):
+ scale = [1]
+ bad_scale = [-1]
+ rayleigh = np.random.rayleigh
+ desired = np.array([1.2337491937897689,
+ 1.2360119924878694,
+ 1.1936818095781789])
+
+ self.setSeed()
+ actual = rayleigh(scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, rayleigh, bad_scale * 3)
+
+ def test_wald(self):
+ mean = [0.5]
+ scale = [1]
+ bad_mean = [0]
+ bad_scale = [-2]
+ wald = np.random.wald
+ desired = np.array([0.11873681120271318,
+ 0.12450084820795027,
+ 0.9096122728408238])
+
+ self.setSeed()
+ actual = wald(mean * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, wald, bad_mean * 3, scale)
+ assert_raises(ValueError, wald, mean * 3, bad_scale)
+
+ self.setSeed()
+ actual = wald(mean, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, wald, bad_mean, scale * 3)
+ assert_raises(ValueError, wald, mean, bad_scale * 3)
+ assert_raises(ValueError, wald, 0.0, 1)
+ assert_raises(ValueError, wald, 0.5, 0.0)
+
+ def test_triangular(self):
+ left = [1]
+ right = [3]
+ mode = [2]
+ bad_left_one = [3]
+ bad_mode_one = [4]
+ bad_left_two, bad_mode_two = right * 2
+ triangular = np.random.triangular
+ desired = np.array([2.03339048710429,
+ 2.0347400359389356,
+ 2.0095991069536208])
+
+ self.setSeed()
+ actual = triangular(left * 3, mode, right)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
+ assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
+ assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
+ right)
+
+ self.setSeed()
+ actual = triangular(left, mode * 3, right)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
+ assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
+ right)
+
+ self.setSeed()
+ actual = triangular(left, mode, right * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
+ assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
+ right * 3)
+
+ def test_binomial(self):
+ n = [1]
+ p = [0.5]
+ bad_n = [-1]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ binom = np.random.binomial
+ desired = np.array([1, 1, 1])
+
+ self.setSeed()
+ actual = binom(n * 3, p)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, binom, bad_n * 3, p)
+ assert_raises(ValueError, binom, n * 3, bad_p_one)
+ assert_raises(ValueError, binom, n * 3, bad_p_two)
+
+ self.setSeed()
+ actual = binom(n, p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, binom, bad_n, p * 3)
+ assert_raises(ValueError, binom, n, bad_p_one * 3)
+ assert_raises(ValueError, binom, n, bad_p_two * 3)
+
+ def test_negative_binomial(self):
+ n = [1]
+ p = [0.5]
+ bad_n = [-1]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ neg_binom = np.random.negative_binomial
+ desired = np.array([1, 0, 1])
+
+ self.setSeed()
+ actual = neg_binom(n * 3, p)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, neg_binom, bad_n * 3, p)
+ assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
+ assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
+
+ self.setSeed()
+ actual = neg_binom(n, p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, neg_binom, bad_n, p * 3)
+ assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
+ assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
+
+ def test_poisson(self):
+ max_lam = np.random.RandomState()._poisson_lam_max
+
+ lam = [1]
+ bad_lam_one = [-1]
+ bad_lam_two = [max_lam * 2]
+ poisson = np.random.poisson
+ desired = np.array([1, 1, 0])
+
+ self.setSeed()
+ actual = poisson(lam * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, poisson, bad_lam_one * 3)
+ assert_raises(ValueError, poisson, bad_lam_two * 3)
+
+ def test_zipf(self):
+ a = [2]
+ bad_a = [0]
+ zipf = np.random.zipf
+ desired = np.array([2, 2, 1])
+
+ self.setSeed()
+ actual = zipf(a * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, zipf, bad_a * 3)
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, zipf, np.nan)
+ assert_raises(ValueError, zipf, [0, 0, np.nan])
+
+ def test_geometric(self):
+ p = [0.5]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ geom = np.random.geometric
+ desired = np.array([2, 2, 2])
+
+ self.setSeed()
+ actual = geom(p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, geom, bad_p_one * 3)
+ assert_raises(ValueError, geom, bad_p_two * 3)
+
+ def test_hypergeometric(self):
+ ngood = [1]
+ nbad = [2]
+ nsample = [2]
+ bad_ngood = [-1]
+ bad_nbad = [-2]
+ bad_nsample_one = [0]
+ bad_nsample_two = [4]
+ hypergeom = np.random.hypergeometric
+ desired = np.array([1, 1, 1])
+
+ self.setSeed()
+ actual = hypergeom(ngood * 3, nbad, nsample)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
+ assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
+ assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
+ assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
+
+ self.setSeed()
+ actual = hypergeom(ngood, nbad * 3, nsample)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
+ assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
+ assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
+ assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
+
+ self.setSeed()
+ actual = hypergeom(ngood, nbad, nsample * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
+ assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
+ assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
+ assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
+
+ def test_logseries(self):
+ p = [0.5]
+ bad_p_one = [2]
+ bad_p_two = [-1]
+ logseries = np.random.logseries
+ desired = np.array([1, 1, 1])
+
+ self.setSeed()
+ actual = logseries(p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, logseries, bad_p_one * 3)
+ assert_raises(ValueError, logseries, bad_p_two * 3)
+
+
+@pytest.mark.skipif(IS_WASM, reason="can't start thread")
+class TestThread:
+ # make sure each state produces the same sequence even in threads
+ def setup_method(self):
+ self.seeds = range(4)
+
+ def check_function(self, function, sz):
+ from threading import Thread
+
+ out1 = np.empty((len(self.seeds),) + sz)
+ out2 = np.empty((len(self.seeds),) + sz)
+
+ # threaded generation
+ t = [Thread(target=function, args=(np.random.RandomState(s), o))
+ for s, o in zip(self.seeds, out1)]
+ [x.start() for x in t]
+ [x.join() for x in t]
+
+ # the same serial
+ for s, o in zip(self.seeds, out2):
+ function(np.random.RandomState(s), o)
+
+ # these platforms change x87 fpu precision mode in threads
+ if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
+ assert_array_almost_equal(out1, out2)
+ else:
+ assert_array_equal(out1, out2)
+
+ def test_normal(self):
+ def gen_random(state, out):
+ out[...] = state.normal(size=10000)
+ self.check_function(gen_random, sz=(10000,))
+
+ def test_exp(self):
+ def gen_random(state, out):
+ out[...] = state.exponential(scale=np.ones((100, 1000)))
+ self.check_function(gen_random, sz=(100, 1000))
+
+ def test_multinomial(self):
+ def gen_random(state, out):
+ out[...] = state.multinomial(10, [1/6.]*6, size=10000)
+ self.check_function(gen_random, sz=(10000, 6))
+
+
+# See Issue #4263
+class TestSingleEltArrayInput:
+ def setup_method(self):
+ self.argOne = np.array([2])
+ self.argTwo = np.array([3])
+ self.argThree = np.array([4])
+ self.tgtShape = (1,)
+
+ def test_one_arg_funcs(self):
+ funcs = (np.random.exponential, np.random.standard_gamma,
+ np.random.chisquare, np.random.standard_t,
+ np.random.pareto, np.random.weibull,
+ np.random.power, np.random.rayleigh,
+ np.random.poisson, np.random.zipf,
+ np.random.geometric, np.random.logseries)
+
+ probfuncs = (np.random.geometric, np.random.logseries)
+
+ for func in funcs:
+ if func in probfuncs: # p < 1.0
+ out = func(np.array([0.5]))
+
+ else:
+ out = func(self.argOne)
+
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_two_arg_funcs(self):
+ funcs = (np.random.uniform, np.random.normal,
+ np.random.beta, np.random.gamma,
+ np.random.f, np.random.noncentral_chisquare,
+ np.random.vonmises, np.random.laplace,
+ np.random.gumbel, np.random.logistic,
+ np.random.lognormal, np.random.wald,
+ np.random.binomial, np.random.negative_binomial)
+
+ probfuncs = (np.random.binomial, np.random.negative_binomial)
+
+ for func in funcs:
+ if func in probfuncs: # p <= 1
+ argTwo = np.array([0.5])
+
+ else:
+ argTwo = self.argTwo
+
+ out = func(self.argOne, argTwo)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne[0], argTwo)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne, argTwo[0])
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_randint(self):
+ itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
+ np.int32, np.uint32, np.int64, np.uint64]
+ func = np.random.randint
+ high = np.array([1])
+ low = np.array([0])
+
+ for dt in itype:
+ out = func(low, high, dtype=dt)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(low[0], high, dtype=dt)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(low, high[0], dtype=dt)
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_three_arg_funcs(self):
+ funcs = [np.random.noncentral_f, np.random.triangular,
+ np.random.hypergeometric]
+
+ for func in funcs:
+ out = func(self.argOne, self.argTwo, self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne[0], self.argTwo, self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne, self.argTwo[0], self.argThree)
+ assert_equal(out.shape, self.tgtShape)
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/test_randomstate.py b/venv/lib/python3.9/site-packages/numpy/random/tests/test_randomstate.py
new file mode 100644
index 00000000..8b911cb3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/test_randomstate.py
@@ -0,0 +1,2117 @@
+import hashlib
+import pickle
+import sys
+import warnings
+
+import numpy as np
+import pytest
+from numpy.testing import (
+ assert_, assert_raises, assert_equal, assert_warns,
+ assert_no_warnings, assert_array_equal, assert_array_almost_equal,
+ suppress_warnings, IS_WASM
+ )
+
+from numpy.random import MT19937, PCG64
+from numpy import random
+
+INT_FUNCS = {'binomial': (100.0, 0.6),
+ 'geometric': (.5,),
+ 'hypergeometric': (20, 20, 10),
+ 'logseries': (.5,),
+ 'multinomial': (20, np.ones(6) / 6.0),
+ 'negative_binomial': (100, .5),
+ 'poisson': (10.0,),
+ 'zipf': (2,),
+ }
+
+if np.iinfo(int).max < 2**32:
+ # Windows and some 32-bit platforms, e.g., ARM
+ INT_FUNC_HASHES = {'binomial': '2fbead005fc63942decb5326d36a1f32fe2c9d32c904ee61e46866b88447c263',
+ 'logseries': '23ead5dcde35d4cfd4ef2c105e4c3d43304b45dc1b1444b7823b9ee4fa144ebb',
+ 'geometric': '0d764db64f5c3bad48c8c33551c13b4d07a1e7b470f77629bef6c985cac76fcf',
+ 'hypergeometric': '7b59bf2f1691626c5815cdcd9a49e1dd68697251d4521575219e4d2a1b8b2c67',
+ 'multinomial': 'd754fa5b92943a38ec07630de92362dd2e02c43577fc147417dc5b9db94ccdd3',
+ 'negative_binomial': '8eb216f7cb2a63cf55605422845caaff002fddc64a7dc8b2d45acd477a49e824',
+ 'poisson': '70c891d76104013ebd6f6bcf30d403a9074b886ff62e4e6b8eb605bf1a4673b7',
+ 'zipf': '01f074f97517cd5d21747148ac6ca4074dde7fcb7acbaec0a936606fecacd93f',
+ }
+else:
+ INT_FUNC_HASHES = {'binomial': '8626dd9d052cb608e93d8868de0a7b347258b199493871a1dc56e2a26cacb112',
+ 'geometric': '8edd53d272e49c4fc8fbbe6c7d08d563d62e482921f3131d0a0e068af30f0db9',
+ 'hypergeometric': '83496cc4281c77b786c9b7ad88b74d42e01603a55c60577ebab81c3ba8d45657',
+ 'logseries': '65878a38747c176bc00e930ebafebb69d4e1e16cd3a704e264ea8f5e24f548db',
+ 'multinomial': '7a984ae6dca26fd25374479e118b22f55db0aedccd5a0f2584ceada33db98605',
+ 'negative_binomial': 'd636d968e6a24ae92ab52fe11c46ac45b0897e98714426764e820a7d77602a61',
+ 'poisson': '956552176f77e7c9cb20d0118fc9cf690be488d790ed4b4c4747b965e61b0bb4',
+ 'zipf': 'f84ba7feffda41e606e20b28dfc0f1ea9964a74574513d4a4cbc98433a8bfa45',
+ }
+
+
+@pytest.fixture(scope='module', params=INT_FUNCS)
+def int_func(request):
+ return (request.param, INT_FUNCS[request.param],
+ INT_FUNC_HASHES[request.param])
+
+
+@pytest.fixture
+def restore_singleton_bitgen():
+ """Ensures that the singleton bitgen is restored after a test"""
+ orig_bitgen = np.random.get_bit_generator()
+ yield
+ np.random.set_bit_generator(orig_bitgen)
+
+
+def assert_mt19937_state_equal(a, b):
+ assert_equal(a['bit_generator'], b['bit_generator'])
+ assert_array_equal(a['state']['key'], b['state']['key'])
+ assert_array_equal(a['state']['pos'], b['state']['pos'])
+ assert_equal(a['has_gauss'], b['has_gauss'])
+ assert_equal(a['gauss'], b['gauss'])
+
+
+class TestSeed:
+ def test_scalar(self):
+ s = random.RandomState(0)
+ assert_equal(s.randint(1000), 684)
+ s = random.RandomState(4294967295)
+ assert_equal(s.randint(1000), 419)
+
+ def test_array(self):
+ s = random.RandomState(range(10))
+ assert_equal(s.randint(1000), 468)
+ s = random.RandomState(np.arange(10))
+ assert_equal(s.randint(1000), 468)
+ s = random.RandomState([0])
+ assert_equal(s.randint(1000), 973)
+ s = random.RandomState([4294967295])
+ assert_equal(s.randint(1000), 265)
+
+ def test_invalid_scalar(self):
+ # seed must be an unsigned 32 bit integer
+ assert_raises(TypeError, random.RandomState, -0.5)
+ assert_raises(ValueError, random.RandomState, -1)
+
+ def test_invalid_array(self):
+ # seed must be an unsigned 32 bit integer
+ assert_raises(TypeError, random.RandomState, [-0.5])
+ assert_raises(ValueError, random.RandomState, [-1])
+ assert_raises(ValueError, random.RandomState, [4294967296])
+ assert_raises(ValueError, random.RandomState, [1, 2, 4294967296])
+ assert_raises(ValueError, random.RandomState, [1, -2, 4294967296])
+
+ def test_invalid_array_shape(self):
+ # gh-9832
+ assert_raises(ValueError, random.RandomState, np.array([],
+ dtype=np.int64))
+ assert_raises(ValueError, random.RandomState, [[1, 2, 3]])
+ assert_raises(ValueError, random.RandomState, [[1, 2, 3],
+ [4, 5, 6]])
+
+ def test_cannot_seed(self):
+ rs = random.RandomState(PCG64(0))
+ with assert_raises(TypeError):
+ rs.seed(1234)
+
+ def test_invalid_initialization(self):
+ assert_raises(ValueError, random.RandomState, MT19937)
+
+
+class TestBinomial:
+ def test_n_zero(self):
+ # Tests the corner case of n == 0 for the binomial distribution.
+ # binomial(0, p) should be zero for any p in [0, 1].
+ # This test addresses issue #3480.
+ zeros = np.zeros(2, dtype='int')
+ for p in [0, .5, 1]:
+ assert_(random.binomial(0, p) == 0)
+ assert_array_equal(random.binomial(zeros, p), zeros)
+
+ def test_p_is_nan(self):
+ # Issue #4571.
+ assert_raises(ValueError, random.binomial, 1, np.nan)
+
+
+class TestMultinomial:
+ def test_basic(self):
+ random.multinomial(100, [0.2, 0.8])
+
+ def test_zero_probability(self):
+ random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
+
+ def test_int_negative_interval(self):
+ assert_(-5 <= random.randint(-5, -1) < -1)
+ x = random.randint(-5, -1, 5)
+ assert_(np.all(-5 <= x))
+ assert_(np.all(x < -1))
+
+ def test_size(self):
+ # gh-3173
+ p = [0.5, 0.5]
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
+ (2, 2, 2))
+
+ assert_raises(TypeError, random.multinomial, 1, p,
+ float(1))
+
+ def test_invalid_prob(self):
+ assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
+ assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
+
+ def test_invalid_n(self):
+ assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
+
+ def test_p_non_contiguous(self):
+ p = np.arange(15.)
+ p /= np.sum(p[1::3])
+ pvals = p[1::3]
+ random.seed(1432985819)
+ non_contig = random.multinomial(100, pvals=pvals)
+ random.seed(1432985819)
+ contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
+ assert_array_equal(non_contig, contig)
+
+ def test_multinomial_pvals_float32(self):
+ x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09,
+ 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32)
+ pvals = x / x.sum()
+ match = r"[\w\s]*pvals array is cast to 64-bit floating"
+ with pytest.raises(ValueError, match=match):
+ random.multinomial(1, pvals)
+
+
+class TestSetState:
+ def setup_method(self):
+ self.seed = 1234567890
+ self.random_state = random.RandomState(self.seed)
+ self.state = self.random_state.get_state()
+
+ def test_basic(self):
+ old = self.random_state.tomaxint(16)
+ self.random_state.set_state(self.state)
+ new = self.random_state.tomaxint(16)
+ assert_(np.all(old == new))
+
+ def test_gaussian_reset(self):
+ # Make sure the cached every-other-Gaussian is reset.
+ old = self.random_state.standard_normal(size=3)
+ self.random_state.set_state(self.state)
+ new = self.random_state.standard_normal(size=3)
+ assert_(np.all(old == new))
+
+ def test_gaussian_reset_in_media_res(self):
+ # When the state is saved with a cached Gaussian, make sure the
+ # cached Gaussian is restored.
+
+ self.random_state.standard_normal()
+ state = self.random_state.get_state()
+ old = self.random_state.standard_normal(size=3)
+ self.random_state.set_state(state)
+ new = self.random_state.standard_normal(size=3)
+ assert_(np.all(old == new))
+
+ def test_backwards_compatibility(self):
+ # Make sure we can accept old state tuples that do not have the
+ # cached Gaussian value.
+ old_state = self.state[:-2]
+ x1 = self.random_state.standard_normal(size=16)
+ self.random_state.set_state(old_state)
+ x2 = self.random_state.standard_normal(size=16)
+ self.random_state.set_state(self.state)
+ x3 = self.random_state.standard_normal(size=16)
+ assert_(np.all(x1 == x2))
+ assert_(np.all(x1 == x3))
+
+ def test_negative_binomial(self):
+ # Ensure that the negative binomial results take floating point
+ # arguments without truncation.
+ self.random_state.negative_binomial(0.5, 0.5)
+
+ def test_get_state_warning(self):
+ rs = random.RandomState(PCG64())
+ with suppress_warnings() as sup:
+ w = sup.record(RuntimeWarning)
+ state = rs.get_state()
+ assert_(len(w) == 1)
+ assert isinstance(state, dict)
+ assert state['bit_generator'] == 'PCG64'
+
+ def test_invalid_legacy_state_setting(self):
+ state = self.random_state.get_state()
+ new_state = ('Unknown', ) + state[1:]
+ assert_raises(ValueError, self.random_state.set_state, new_state)
+ assert_raises(TypeError, self.random_state.set_state,
+ np.array(new_state, dtype=object))
+ state = self.random_state.get_state(legacy=False)
+ del state['bit_generator']
+ assert_raises(ValueError, self.random_state.set_state, state)
+
+ def test_pickle(self):
+ self.random_state.seed(0)
+ self.random_state.random_sample(100)
+ self.random_state.standard_normal()
+ pickled = self.random_state.get_state(legacy=False)
+ assert_equal(pickled['has_gauss'], 1)
+ rs_unpick = pickle.loads(pickle.dumps(self.random_state))
+ unpickled = rs_unpick.get_state(legacy=False)
+ assert_mt19937_state_equal(pickled, unpickled)
+
+ def test_state_setting(self):
+ attr_state = self.random_state.__getstate__()
+ self.random_state.standard_normal()
+ self.random_state.__setstate__(attr_state)
+ state = self.random_state.get_state(legacy=False)
+ assert_mt19937_state_equal(attr_state, state)
+
+ def test_repr(self):
+ assert repr(self.random_state).startswith('RandomState(MT19937)')
+
+
+class TestRandint:
+
+ rfunc = random.randint
+
+ # valid integer/boolean types
+ itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
+ np.int32, np.uint32, np.int64, np.uint64]
+
+ def test_unsupported_type(self):
+ assert_raises(TypeError, self.rfunc, 1, dtype=float)
+
+ def test_bounds_checking(self):
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+ assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
+ assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
+ assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
+ assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
+
+ def test_rng_zero_and_extremes(self):
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+
+ tgt = ubnd - 1
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+
+ tgt = lbnd
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+
+ tgt = (lbnd + ubnd)//2
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+
+ def test_full_range(self):
+ # Test for ticket #1690
+
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+
+ try:
+ self.rfunc(lbnd, ubnd, dtype=dt)
+ except Exception as e:
+ raise AssertionError("No error should have been raised, "
+ "but one was with the following "
+ "message:\n\n%s" % str(e))
+
+ def test_in_bounds_fuzz(self):
+ # Don't use fixed seed
+ random.seed()
+
+ for dt in self.itype[1:]:
+ for ubnd in [4, 8, 16]:
+ vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
+ assert_(vals.max() < ubnd)
+ assert_(vals.min() >= 2)
+
+ vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
+
+ assert_(vals.max() < 2)
+ assert_(vals.min() >= 0)
+
+ def test_repeatability(self):
+ # We use a sha256 hash of generated sequences of 1000 samples
+ # in the range [0, 6) for all but bool, where the range
+ # is [0, 2). Hashes are for little endian numbers.
+ tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71',
+ 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
+ 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
+ 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
+ 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404',
+ 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
+ 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
+ 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
+ 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'}
+
+ for dt in self.itype[1:]:
+ random.seed(1234)
+
+ # view as little endian for hash
+ if sys.byteorder == 'little':
+ val = self.rfunc(0, 6, size=1000, dtype=dt)
+ else:
+ val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
+
+ res = hashlib.sha256(val.view(np.int8)).hexdigest()
+ assert_(tgt[np.dtype(dt).name] == res)
+
+ # bools do not depend on endianness
+ random.seed(1234)
+ val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
+ res = hashlib.sha256(val).hexdigest()
+ assert_(tgt[np.dtype(bool).name] == res)
+
+ @pytest.mark.skipif(np.iinfo('l').max < 2**32,
+ reason='Cannot test with 32-bit C long')
+ def test_repeatability_32bit_boundary_broadcasting(self):
+ desired = np.array([[[3992670689, 2438360420, 2557845020],
+ [4107320065, 4142558326, 3216529513],
+ [1605979228, 2807061240, 665605495]],
+ [[3211410639, 4128781000, 457175120],
+ [1712592594, 1282922662, 3081439808],
+ [3997822960, 2008322436, 1563495165]],
+ [[1398375547, 4269260146, 115316740],
+ [3414372578, 3437564012, 2112038651],
+ [3572980305, 2260248732, 3908238631]],
+ [[2561372503, 223155946, 3127879445],
+ [ 441282060, 3514786552, 2148440361],
+ [1629275283, 3479737011, 3003195987]],
+ [[ 412181688, 940383289, 3047321305],
+ [2978368172, 764731833, 2282559898],
+ [ 105711276, 720447391, 3596512484]]])
+ for size in [None, (5, 3, 3)]:
+ random.seed(12345)
+ x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1],
+ size=size)
+ assert_array_equal(x, desired if size is not None else desired[0])
+
+ def test_int64_uint64_corner_case(self):
+ # When stored in Numpy arrays, `lbnd` is casted
+ # as np.int64, and `ubnd` is casted as np.uint64.
+ # Checking whether `lbnd` >= `ubnd` used to be
+ # done solely via direct comparison, which is incorrect
+ # because when Numpy tries to compare both numbers,
+ # it casts both to np.float64 because there is
+ # no integer superset of np.int64 and np.uint64. However,
+ # `ubnd` is too large to be represented in np.float64,
+ # causing it be round down to np.iinfo(np.int64).max,
+ # leading to a ValueError because `lbnd` now equals
+ # the new `ubnd`.
+
+ dt = np.int64
+ tgt = np.iinfo(np.int64).max
+ lbnd = np.int64(np.iinfo(np.int64).max)
+ ubnd = np.uint64(np.iinfo(np.int64).max + 1)
+
+ # None of these function calls should
+ # generate a ValueError now.
+ actual = random.randint(lbnd, ubnd, dtype=dt)
+ assert_equal(actual, tgt)
+
+ def test_respect_dtype_singleton(self):
+ # See gh-7203
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+
+ sample = self.rfunc(lbnd, ubnd, dtype=dt)
+ assert_equal(sample.dtype, np.dtype(dt))
+
+ for dt in (bool, int, np.compat.long):
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+
+ # gh-7284: Ensure that we get Python data types
+ sample = self.rfunc(lbnd, ubnd, dtype=dt)
+ assert_(not hasattr(sample, 'dtype'))
+ assert_equal(type(sample), dt)
+
+
+class TestRandomDist:
+ # Make sure the random distribution returns the correct value for a
+ # given seed
+
+ def setup_method(self):
+ self.seed = 1234567890
+
+ def test_rand(self):
+ random.seed(self.seed)
+ actual = random.rand(3, 2)
+ desired = np.array([[0.61879477158567997, 0.59162362775974664],
+ [0.88868358904449662, 0.89165480011560816],
+ [0.4575674820298663, 0.7781880808593471]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_rand_singleton(self):
+ random.seed(self.seed)
+ actual = random.rand()
+ desired = 0.61879477158567997
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_randn(self):
+ random.seed(self.seed)
+ actual = random.randn(3, 2)
+ desired = np.array([[1.34016345771863121, 1.73759122771936081],
+ [1.498988344300628, -0.2286433324536169],
+ [2.031033998682787, 2.17032494605655257]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ random.seed(self.seed)
+ actual = random.randn()
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
+
+ def test_randint(self):
+ random.seed(self.seed)
+ actual = random.randint(-99, 99, size=(3, 2))
+ desired = np.array([[31, 3],
+ [-52, 41],
+ [-48, -66]])
+ assert_array_equal(actual, desired)
+
+ def test_random_integers(self):
+ random.seed(self.seed)
+ with suppress_warnings() as sup:
+ w = sup.record(DeprecationWarning)
+ actual = random.random_integers(-99, 99, size=(3, 2))
+ assert_(len(w) == 1)
+ desired = np.array([[31, 3],
+ [-52, 41],
+ [-48, -66]])
+ assert_array_equal(actual, desired)
+
+ random.seed(self.seed)
+ with suppress_warnings() as sup:
+ w = sup.record(DeprecationWarning)
+ actual = random.random_integers(198, size=(3, 2))
+ assert_(len(w) == 1)
+ assert_array_equal(actual, desired + 100)
+
+ def test_tomaxint(self):
+ random.seed(self.seed)
+ rs = random.RandomState(self.seed)
+ actual = rs.tomaxint(size=(3, 2))
+ if np.iinfo(int).max == 2147483647:
+ desired = np.array([[1328851649, 731237375],
+ [1270502067, 320041495],
+ [1908433478, 499156889]], dtype=np.int64)
+ else:
+ desired = np.array([[5707374374421908479, 5456764827585442327],
+ [8196659375100692377, 8224063923314595285],
+ [4220315081820346526, 7177518203184491332]],
+ dtype=np.int64)
+
+ assert_equal(actual, desired)
+
+ rs.seed(self.seed)
+ actual = rs.tomaxint()
+ assert_equal(actual, desired[0, 0])
+
+ def test_random_integers_max_int(self):
+ # Tests whether random_integers can generate the
+ # maximum allowed Python int that can be converted
+ # into a C long. Previous implementations of this
+ # method have thrown an OverflowError when attempting
+ # to generate this integer.
+ with suppress_warnings() as sup:
+ w = sup.record(DeprecationWarning)
+ actual = random.random_integers(np.iinfo('l').max,
+ np.iinfo('l').max)
+ assert_(len(w) == 1)
+
+ desired = np.iinfo('l').max
+ assert_equal(actual, desired)
+ with suppress_warnings() as sup:
+ w = sup.record(DeprecationWarning)
+ typer = np.dtype('l').type
+ actual = random.random_integers(typer(np.iinfo('l').max),
+ typer(np.iinfo('l').max))
+ assert_(len(w) == 1)
+ assert_equal(actual, desired)
+
+ def test_random_integers_deprecated(self):
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+
+ # DeprecationWarning raised with high == None
+ assert_raises(DeprecationWarning,
+ random.random_integers,
+ np.iinfo('l').max)
+
+ # DeprecationWarning raised with high != None
+ assert_raises(DeprecationWarning,
+ random.random_integers,
+ np.iinfo('l').max, np.iinfo('l').max)
+
+ def test_random_sample(self):
+ random.seed(self.seed)
+ actual = random.random_sample((3, 2))
+ desired = np.array([[0.61879477158567997, 0.59162362775974664],
+ [0.88868358904449662, 0.89165480011560816],
+ [0.4575674820298663, 0.7781880808593471]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ random.seed(self.seed)
+ actual = random.random_sample()
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
+
+ def test_choice_uniform_replace(self):
+ random.seed(self.seed)
+ actual = random.choice(4, 4)
+ desired = np.array([2, 3, 2, 3])
+ assert_array_equal(actual, desired)
+
+ def test_choice_nonuniform_replace(self):
+ random.seed(self.seed)
+ actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
+ desired = np.array([1, 1, 2, 2])
+ assert_array_equal(actual, desired)
+
+ def test_choice_uniform_noreplace(self):
+ random.seed(self.seed)
+ actual = random.choice(4, 3, replace=False)
+ desired = np.array([0, 1, 3])
+ assert_array_equal(actual, desired)
+
+ def test_choice_nonuniform_noreplace(self):
+ random.seed(self.seed)
+ actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
+ desired = np.array([2, 3, 1])
+ assert_array_equal(actual, desired)
+
+ def test_choice_noninteger(self):
+ random.seed(self.seed)
+ actual = random.choice(['a', 'b', 'c', 'd'], 4)
+ desired = np.array(['c', 'd', 'c', 'd'])
+ assert_array_equal(actual, desired)
+
+ def test_choice_exceptions(self):
+ sample = random.choice
+ assert_raises(ValueError, sample, -1, 3)
+ assert_raises(ValueError, sample, 3., 3)
+ assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
+ assert_raises(ValueError, sample, [], 3)
+ assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
+ p=[[0.25, 0.25], [0.25, 0.25]])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
+ assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
+ # gh-13087
+ assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], 2,
+ replace=False, p=[1, 0, 0])
+
+ def test_choice_return_shape(self):
+ p = [0.1, 0.9]
+ # Check scalar
+ assert_(np.isscalar(random.choice(2, replace=True)))
+ assert_(np.isscalar(random.choice(2, replace=False)))
+ assert_(np.isscalar(random.choice(2, replace=True, p=p)))
+ assert_(np.isscalar(random.choice(2, replace=False, p=p)))
+ assert_(np.isscalar(random.choice([1, 2], replace=True)))
+ assert_(random.choice([None], replace=True) is None)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(random.choice(arr, replace=True) is a)
+
+ # Check 0-d array
+ s = tuple()
+ assert_(not np.isscalar(random.choice(2, s, replace=True)))
+ assert_(not np.isscalar(random.choice(2, s, replace=False)))
+ assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
+ assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
+ assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
+ assert_(random.choice([None], s, replace=True).ndim == 0)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(random.choice(arr, s, replace=True).item() is a)
+
+ # Check multi dimensional array
+ s = (2, 3)
+ p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
+ assert_equal(random.choice(6, s, replace=True).shape, s)
+ assert_equal(random.choice(6, s, replace=False).shape, s)
+ assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
+ assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
+ assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
+
+ # Check zero-size
+ assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
+ assert_equal(random.randint(0, -10, size=0).shape, (0,))
+ assert_equal(random.randint(10, 10, size=0).shape, (0,))
+ assert_equal(random.choice(0, size=0).shape, (0,))
+ assert_equal(random.choice([], size=(0,)).shape, (0,))
+ assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
+ (3, 0, 4))
+ assert_raises(ValueError, random.choice, [], 10)
+
+ def test_choice_nan_probabilities(self):
+ a = np.array([42, 1, 2])
+ p = [None, None, None]
+ assert_raises(ValueError, random.choice, a, p=p)
+
+ def test_choice_p_non_contiguous(self):
+ p = np.ones(10) / 5
+ p[1::2] = 3.0
+ random.seed(self.seed)
+ non_contig = random.choice(5, 3, p=p[::2])
+ random.seed(self.seed)
+ contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
+ assert_array_equal(non_contig, contig)
+
+ def test_bytes(self):
+ random.seed(self.seed)
+ actual = random.bytes(10)
+ desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
+ assert_equal(actual, desired)
+
+ def test_shuffle(self):
+ # Test lists, arrays (of various dtypes), and multidimensional versions
+ # of both, c-contiguous or not:
+ for conv in [lambda x: np.array([]),
+ lambda x: x,
+ lambda x: np.asarray(x).astype(np.int8),
+ lambda x: np.asarray(x).astype(np.float32),
+ lambda x: np.asarray(x).astype(np.complex64),
+ lambda x: np.asarray(x).astype(object),
+ lambda x: [(i, i) for i in x],
+ lambda x: np.asarray([[i, i] for i in x]),
+ lambda x: np.vstack([x, x]).T,
+ # gh-11442
+ lambda x: (np.asarray([(i, i) for i in x],
+ [("a", int), ("b", int)])
+ .view(np.recarray)),
+ # gh-4270
+ lambda x: np.asarray([(i, i) for i in x],
+ [("a", object, (1,)),
+ ("b", np.int32, (1,))])]:
+ random.seed(self.seed)
+ alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
+ random.shuffle(alist)
+ actual = alist
+ desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
+ assert_array_equal(actual, desired)
+
+ def test_shuffle_masked(self):
+ # gh-3263
+ a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
+ b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
+ a_orig = a.copy()
+ b_orig = b.copy()
+ for i in range(50):
+ random.shuffle(a)
+ assert_equal(
+ sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
+ random.shuffle(b)
+ assert_equal(
+ sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
+
+ def test_shuffle_invalid_objects(self):
+ x = np.array(3)
+ assert_raises(TypeError, random.shuffle, x)
+
+ def test_permutation(self):
+ random.seed(self.seed)
+ alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
+ actual = random.permutation(alist)
+ desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3]
+ assert_array_equal(actual, desired)
+
+ random.seed(self.seed)
+ arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
+ actual = random.permutation(arr_2d)
+ assert_array_equal(actual, np.atleast_2d(desired).T)
+
+ random.seed(self.seed)
+ bad_x_str = "abcd"
+ assert_raises(IndexError, random.permutation, bad_x_str)
+
+ random.seed(self.seed)
+ bad_x_float = 1.2
+ assert_raises(IndexError, random.permutation, bad_x_float)
+
+ integer_val = 10
+ desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2]
+
+ random.seed(self.seed)
+ actual = random.permutation(integer_val)
+ assert_array_equal(actual, desired)
+
+ def test_beta(self):
+ random.seed(self.seed)
+ actual = random.beta(.1, .9, size=(3, 2))
+ desired = np.array(
+ [[1.45341850513746058e-02, 5.31297615662868145e-04],
+ [1.85366619058432324e-06, 4.19214516800110563e-03],
+ [1.58405155108498093e-04, 1.26252891949397652e-04]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_binomial(self):
+ random.seed(self.seed)
+ actual = random.binomial(100.123, .456, size=(3, 2))
+ desired = np.array([[37, 43],
+ [42, 48],
+ [46, 45]])
+ assert_array_equal(actual, desired)
+
+ random.seed(self.seed)
+ actual = random.binomial(100.123, .456)
+ desired = 37
+ assert_array_equal(actual, desired)
+
+ def test_chisquare(self):
+ random.seed(self.seed)
+ actual = random.chisquare(50, size=(3, 2))
+ desired = np.array([[63.87858175501090585, 68.68407748911370447],
+ [65.77116116901505904, 47.09686762438974483],
+ [72.3828403199695174, 74.18408615260374006]])
+ assert_array_almost_equal(actual, desired, decimal=13)
+
+ def test_dirichlet(self):
+ random.seed(self.seed)
+ alpha = np.array([51.72840233779265162, 39.74494232180943953])
+ actual = random.dirichlet(alpha, size=(3, 2))
+ desired = np.array([[[0.54539444573611562, 0.45460555426388438],
+ [0.62345816822039413, 0.37654183177960598]],
+ [[0.55206000085785778, 0.44793999914214233],
+ [0.58964023305154301, 0.41035976694845688]],
+ [[0.59266909280647828, 0.40733090719352177],
+ [0.56974431743975207, 0.43025568256024799]]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+ bad_alpha = np.array([5.4e-01, -1.0e-16])
+ assert_raises(ValueError, random.dirichlet, bad_alpha)
+
+ random.seed(self.seed)
+ alpha = np.array([51.72840233779265162, 39.74494232180943953])
+ actual = random.dirichlet(alpha)
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
+
+ def test_dirichlet_size(self):
+ # gh-3173
+ p = np.array([51.72840233779265162, 39.74494232180943953])
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
+
+ assert_raises(TypeError, random.dirichlet, p, float(1))
+
+ def test_dirichlet_bad_alpha(self):
+ # gh-2089
+ alpha = np.array([5.4e-01, -1.0e-16])
+ assert_raises(ValueError, random.dirichlet, alpha)
+
+ def test_dirichlet_alpha_non_contiguous(self):
+ a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
+ alpha = a[::2]
+ random.seed(self.seed)
+ non_contig = random.dirichlet(alpha, size=(3, 2))
+ random.seed(self.seed)
+ contig = random.dirichlet(np.ascontiguousarray(alpha),
+ size=(3, 2))
+ assert_array_almost_equal(non_contig, contig)
+
+ def test_exponential(self):
+ random.seed(self.seed)
+ actual = random.exponential(1.1234, size=(3, 2))
+ desired = np.array([[1.08342649775011624, 1.00607889924557314],
+ [2.46628830085216721, 2.49668106809923884],
+ [0.68717433461363442, 1.69175666993575979]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_exponential_0(self):
+ assert_equal(random.exponential(scale=0), 0)
+ assert_raises(ValueError, random.exponential, scale=-0.)
+
+ def test_f(self):
+ random.seed(self.seed)
+ actual = random.f(12, 77, size=(3, 2))
+ desired = np.array([[1.21975394418575878, 1.75135759791559775],
+ [1.44803115017146489, 1.22108959480396262],
+ [1.02176975757740629, 1.34431827623300415]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_gamma(self):
+ random.seed(self.seed)
+ actual = random.gamma(5, 3, size=(3, 2))
+ desired = np.array([[24.60509188649287182, 28.54993563207210627],
+ [26.13476110204064184, 12.56988482927716078],
+ [31.71863275789960568, 33.30143302795922011]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_gamma_0(self):
+ assert_equal(random.gamma(shape=0, scale=0), 0)
+ assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
+
+ def test_geometric(self):
+ random.seed(self.seed)
+ actual = random.geometric(.123456789, size=(3, 2))
+ desired = np.array([[8, 7],
+ [17, 17],
+ [5, 12]])
+ assert_array_equal(actual, desired)
+
+ def test_geometric_exceptions(self):
+ assert_raises(ValueError, random.geometric, 1.1)
+ assert_raises(ValueError, random.geometric, [1.1] * 10)
+ assert_raises(ValueError, random.geometric, -0.1)
+ assert_raises(ValueError, random.geometric, [-0.1] * 10)
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ assert_raises(ValueError, random.geometric, np.nan)
+ assert_raises(ValueError, random.geometric, [np.nan] * 10)
+
+ def test_gumbel(self):
+ random.seed(self.seed)
+ actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[0.19591898743416816, 0.34405539668096674],
+ [-1.4492522252274278, -1.47374816298446865],
+ [1.10651090478803416, -0.69535848626236174]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_gumbel_0(self):
+ assert_equal(random.gumbel(scale=0), 0)
+ assert_raises(ValueError, random.gumbel, scale=-0.)
+
+ def test_hypergeometric(self):
+ random.seed(self.seed)
+ actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
+ desired = np.array([[10, 10],
+ [10, 10],
+ [9, 9]])
+ assert_array_equal(actual, desired)
+
+ # Test nbad = 0
+ actual = random.hypergeometric(5, 0, 3, size=4)
+ desired = np.array([3, 3, 3, 3])
+ assert_array_equal(actual, desired)
+
+ actual = random.hypergeometric(15, 0, 12, size=4)
+ desired = np.array([12, 12, 12, 12])
+ assert_array_equal(actual, desired)
+
+ # Test ngood = 0
+ actual = random.hypergeometric(0, 5, 3, size=4)
+ desired = np.array([0, 0, 0, 0])
+ assert_array_equal(actual, desired)
+
+ actual = random.hypergeometric(0, 15, 12, size=4)
+ desired = np.array([0, 0, 0, 0])
+ assert_array_equal(actual, desired)
+
+ def test_laplace(self):
+ random.seed(self.seed)
+ actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[0.66599721112760157, 0.52829452552221945],
+ [3.12791959514407125, 3.18202813572992005],
+ [-0.05391065675859356, 1.74901336242837324]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_laplace_0(self):
+ assert_equal(random.laplace(scale=0), 0)
+ assert_raises(ValueError, random.laplace, scale=-0.)
+
+ def test_logistic(self):
+ random.seed(self.seed)
+ actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[1.09232835305011444, 0.8648196662399954],
+ [4.27818590694950185, 4.33897006346929714],
+ [-0.21682183359214885, 2.63373365386060332]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_lognormal(self):
+ random.seed(self.seed)
+ actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
+ desired = np.array([[16.50698631688883822, 36.54846706092654784],
+ [22.67886599981281748, 0.71617561058995771],
+ [65.72798501792723869, 86.84341601437161273]])
+ assert_array_almost_equal(actual, desired, decimal=13)
+
+ def test_lognormal_0(self):
+ assert_equal(random.lognormal(sigma=0), 1)
+ assert_raises(ValueError, random.lognormal, sigma=-0.)
+
+ def test_logseries(self):
+ random.seed(self.seed)
+ actual = random.logseries(p=.923456789, size=(3, 2))
+ desired = np.array([[2, 2],
+ [6, 17],
+ [3, 6]])
+ assert_array_equal(actual, desired)
+
+ def test_logseries_zero(self):
+ assert random.logseries(0) == 1
+
+ @pytest.mark.parametrize("value", [np.nextafter(0., -1), 1., np.nan, 5.])
+ def test_logseries_exceptions(self, value):
+ with np.errstate(invalid="ignore"):
+ with pytest.raises(ValueError):
+ random.logseries(value)
+ with pytest.raises(ValueError):
+ # contiguous path:
+ random.logseries(np.array([value] * 10))
+ with pytest.raises(ValueError):
+ # non-contiguous path:
+ random.logseries(np.array([value] * 10)[::2])
+
+ def test_multinomial(self):
+ random.seed(self.seed)
+ actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
+ desired = np.array([[[4, 3, 5, 4, 2, 2],
+ [5, 2, 8, 2, 2, 1]],
+ [[3, 4, 3, 6, 0, 4],
+ [2, 1, 4, 3, 6, 4]],
+ [[4, 4, 2, 5, 2, 3],
+ [4, 3, 4, 2, 3, 4]]])
+ assert_array_equal(actual, desired)
+
+ def test_multivariate_normal(self):
+ random.seed(self.seed)
+ mean = (.123456789, 10)
+ cov = [[1, 0], [0, 1]]
+ size = (3, 2)
+ actual = random.multivariate_normal(mean, cov, size)
+ desired = np.array([[[1.463620246718631, 11.73759122771936],
+ [1.622445133300628, 9.771356667546383]],
+ [[2.154490787682787, 12.170324946056553],
+ [1.719909438201865, 9.230548443648306]],
+ [[0.689515026297799, 9.880729819607714],
+ [-0.023054015651998, 9.201096623542879]]])
+
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ # Check for default size, was raising deprecation warning
+ actual = random.multivariate_normal(mean, cov)
+ desired = np.array([0.895289569463708, 9.17180864067987])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ # Check that non positive-semidefinite covariance warns with
+ # RuntimeWarning
+ mean = [0, 0]
+ cov = [[1, 2], [2, 1]]
+ assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
+
+ # and that it doesn't warn with RuntimeWarning check_valid='ignore'
+ assert_no_warnings(random.multivariate_normal, mean, cov,
+ check_valid='ignore')
+
+ # and that it raises with RuntimeWarning check_valid='raises'
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='raise')
+
+ cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
+ with suppress_warnings() as sup:
+ random.multivariate_normal(mean, cov)
+ w = sup.record(RuntimeWarning)
+ assert len(w) == 0
+
+ mu = np.zeros(2)
+ cov = np.eye(2)
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='other')
+ assert_raises(ValueError, random.multivariate_normal,
+ np.zeros((2, 1, 1)), cov)
+ assert_raises(ValueError, random.multivariate_normal,
+ mu, np.empty((3, 2)))
+ assert_raises(ValueError, random.multivariate_normal,
+ mu, np.eye(3))
+
+ def test_negative_binomial(self):
+ random.seed(self.seed)
+ actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
+ desired = np.array([[848, 841],
+ [892, 611],
+ [779, 647]])
+ assert_array_equal(actual, desired)
+
+ def test_negative_binomial_exceptions(self):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ assert_raises(ValueError, random.negative_binomial, 100, np.nan)
+ assert_raises(ValueError, random.negative_binomial, 100,
+ [np.nan] * 10)
+
+ def test_noncentral_chisquare(self):
+ random.seed(self.seed)
+ actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
+ desired = np.array([[23.91905354498517511, 13.35324692733826346],
+ [31.22452661329736401, 16.60047399466177254],
+ [5.03461598262724586, 17.94973089023519464]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
+ desired = np.array([[1.47145377828516666, 0.15052899268012659],
+ [0.00943803056963588, 1.02647251615666169],
+ [0.332334982684171, 0.15451287602753125]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ random.seed(self.seed)
+ actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
+ desired = np.array([[9.597154162763948, 11.725484450296079],
+ [10.413711048138335, 3.694475922923986],
+ [13.484222138963087, 14.377255424602957]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_noncentral_f(self):
+ random.seed(self.seed)
+ actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
+ size=(3, 2))
+ desired = np.array([[1.40598099674926669, 0.34207973179285761],
+ [3.57715069265772545, 7.92632662577829805],
+ [0.43741599463544162, 1.1774208752428319]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_noncentral_f_nan(self):
+ random.seed(self.seed)
+ actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
+ assert np.isnan(actual)
+
+ def test_normal(self):
+ random.seed(self.seed)
+ actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[2.80378370443726244, 3.59863924443872163],
+ [3.121433477601256, -0.33382987590723379],
+ [4.18552478636557357, 4.46410668111310471]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_normal_0(self):
+ assert_equal(random.normal(scale=0), 0)
+ assert_raises(ValueError, random.normal, scale=-0.)
+
+ def test_pareto(self):
+ random.seed(self.seed)
+ actual = random.pareto(a=.123456789, size=(3, 2))
+ desired = np.array(
+ [[2.46852460439034849e+03, 1.41286880810518346e+03],
+ [5.28287797029485181e+07, 6.57720981047328785e+07],
+ [1.40840323350391515e+02, 1.98390255135251704e+05]])
+ # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
+ # matrix differs by 24 nulps. Discussion:
+ # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
+ # Consensus is that this is probably some gcc quirk that affects
+ # rounding but not in any important way, so we just use a looser
+ # tolerance on this test:
+ np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
+
+ def test_poisson(self):
+ random.seed(self.seed)
+ actual = random.poisson(lam=.123456789, size=(3, 2))
+ desired = np.array([[0, 0],
+ [1, 0],
+ [0, 0]])
+ assert_array_equal(actual, desired)
+
+ def test_poisson_exceptions(self):
+ lambig = np.iinfo('l').max
+ lamneg = -1
+ assert_raises(ValueError, random.poisson, lamneg)
+ assert_raises(ValueError, random.poisson, [lamneg] * 10)
+ assert_raises(ValueError, random.poisson, lambig)
+ assert_raises(ValueError, random.poisson, [lambig] * 10)
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ assert_raises(ValueError, random.poisson, np.nan)
+ assert_raises(ValueError, random.poisson, [np.nan] * 10)
+
+ def test_power(self):
+ random.seed(self.seed)
+ actual = random.power(a=.123456789, size=(3, 2))
+ desired = np.array([[0.02048932883240791, 0.01424192241128213],
+ [0.38446073748535298, 0.39499689943484395],
+ [0.00177699707563439, 0.13115505880863756]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_rayleigh(self):
+ random.seed(self.seed)
+ actual = random.rayleigh(scale=10, size=(3, 2))
+ desired = np.array([[13.8882496494248393, 13.383318339044731],
+ [20.95413364294492098, 21.08285015800712614],
+ [11.06066537006854311, 17.35468505778271009]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_rayleigh_0(self):
+ assert_equal(random.rayleigh(scale=0), 0)
+ assert_raises(ValueError, random.rayleigh, scale=-0.)
+
+ def test_standard_cauchy(self):
+ random.seed(self.seed)
+ actual = random.standard_cauchy(size=(3, 2))
+ desired = np.array([[0.77127660196445336, -6.55601161955910605],
+ [0.93582023391158309, -2.07479293013759447],
+ [-4.74601644297011926, 0.18338989290760804]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_exponential(self):
+ random.seed(self.seed)
+ actual = random.standard_exponential(size=(3, 2))
+ desired = np.array([[0.96441739162374596, 0.89556604882105506],
+ [2.1953785836319808, 2.22243285392490542],
+ [0.6116915921431676, 1.50592546727413201]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_gamma(self):
+ random.seed(self.seed)
+ actual = random.standard_gamma(shape=3, size=(3, 2))
+ desired = np.array([[5.50841531318455058, 6.62953470301903103],
+ [5.93988484943779227, 2.31044849402133989],
+ [7.54838614231317084, 8.012756093271868]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_standard_gamma_0(self):
+ assert_equal(random.standard_gamma(shape=0), 0)
+ assert_raises(ValueError, random.standard_gamma, shape=-0.)
+
+ def test_standard_normal(self):
+ random.seed(self.seed)
+ actual = random.standard_normal(size=(3, 2))
+ desired = np.array([[1.34016345771863121, 1.73759122771936081],
+ [1.498988344300628, -0.2286433324536169],
+ [2.031033998682787, 2.17032494605655257]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_randn_singleton(self):
+ random.seed(self.seed)
+ actual = random.randn()
+ desired = np.array(1.34016345771863121)
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_t(self):
+ random.seed(self.seed)
+ actual = random.standard_t(df=10, size=(3, 2))
+ desired = np.array([[0.97140611862659965, -0.08830486548450577],
+ [1.36311143689505321, -0.55317463909867071],
+ [-0.18473749069684214, 0.61181537341755321]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_triangular(self):
+ random.seed(self.seed)
+ actual = random.triangular(left=5.12, mode=10.23, right=20.34,
+ size=(3, 2))
+ desired = np.array([[12.68117178949215784, 12.4129206149193152],
+ [16.20131377335158263, 16.25692138747600524],
+ [11.20400690911820263, 14.4978144835829923]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_uniform(self):
+ random.seed(self.seed)
+ actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
+ desired = np.array([[6.99097932346268003, 6.73801597444323974],
+ [9.50364421400426274, 9.53130618907631089],
+ [5.48995325769805476, 8.47493103280052118]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_uniform_range_bounds(self):
+ fmin = np.finfo('float').min
+ fmax = np.finfo('float').max
+
+ func = random.uniform
+ assert_raises(OverflowError, func, -np.inf, 0)
+ assert_raises(OverflowError, func, 0, np.inf)
+ assert_raises(OverflowError, func, fmin, fmax)
+ assert_raises(OverflowError, func, [-np.inf], [0])
+ assert_raises(OverflowError, func, [0], [np.inf])
+
+ # (fmax / 1e17) - fmin is within range, so this should not throw
+ # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
+ # DBL_MAX by increasing fmin a bit
+ random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
+
+ def test_scalar_exception_propagation(self):
+ # Tests that exceptions are correctly propagated in distributions
+ # when called with objects that throw exceptions when converted to
+ # scalars.
+ #
+ # Regression test for gh: 8865
+
+ class ThrowingFloat(np.ndarray):
+ def __float__(self):
+ raise TypeError
+
+ throwing_float = np.array(1.0).view(ThrowingFloat)
+ assert_raises(TypeError, random.uniform, throwing_float,
+ throwing_float)
+
+ class ThrowingInteger(np.ndarray):
+ def __int__(self):
+ raise TypeError
+
+ throwing_int = np.array(1).view(ThrowingInteger)
+ assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
+
+ def test_vonmises(self):
+ random.seed(self.seed)
+ actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
+ desired = np.array([[2.28567572673902042, 2.89163838442285037],
+ [0.38198375564286025, 2.57638023113890746],
+ [1.19153771588353052, 1.83509849681825354]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_vonmises_small(self):
+ # check infinite loop, gh-4720
+ random.seed(self.seed)
+ r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
+ assert_(np.isfinite(r).all())
+
+ def test_vonmises_large(self):
+ # guard against changes in RandomState when Generator is fixed
+ random.seed(self.seed)
+ actual = random.vonmises(mu=0., kappa=1e7, size=3)
+ desired = np.array([4.634253748521111e-04,
+ 3.558873596114509e-04,
+ -2.337119622577433e-04])
+ assert_array_almost_equal(actual, desired, decimal=8)
+
+ def test_vonmises_nan(self):
+ random.seed(self.seed)
+ r = random.vonmises(mu=0., kappa=np.nan)
+ assert_(np.isnan(r))
+
+ def test_wald(self):
+ random.seed(self.seed)
+ actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
+ desired = np.array([[3.82935265715889983, 5.13125249184285526],
+ [0.35045403618358717, 1.50832396872003538],
+ [0.24124319895843183, 0.22031101461955038]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_weibull(self):
+ random.seed(self.seed)
+ actual = random.weibull(a=1.23, size=(3, 2))
+ desired = np.array([[0.97097342648766727, 0.91422896443565516],
+ [1.89517770034962929, 1.91414357960479564],
+ [0.67057783752390987, 1.39494046635066793]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_weibull_0(self):
+ random.seed(self.seed)
+ assert_equal(random.weibull(a=0, size=12), np.zeros(12))
+ assert_raises(ValueError, random.weibull, a=-0.)
+
+ def test_zipf(self):
+ random.seed(self.seed)
+ actual = random.zipf(a=1.23, size=(3, 2))
+ desired = np.array([[66, 29],
+ [1, 1],
+ [3, 13]])
+ assert_array_equal(actual, desired)
+
+
+class TestBroadcast:
+ # tests that functions that broadcast behave
+ # correctly when presented with non-scalar arguments
+ def setup_method(self):
+ self.seed = 123456789
+
+ def set_seed(self):
+ random.seed(self.seed)
+
+ def test_uniform(self):
+ low = [0]
+ high = [1]
+ uniform = random.uniform
+ desired = np.array([0.53283302478975902,
+ 0.53413660089041659,
+ 0.50955303552646702])
+
+ self.set_seed()
+ actual = uniform(low * 3, high)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ self.set_seed()
+ actual = uniform(low, high * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_normal(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ normal = random.normal
+ desired = np.array([2.2129019979039612,
+ 2.1283977976520019,
+ 1.8417114045748335])
+
+ self.set_seed()
+ actual = normal(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, normal, loc * 3, bad_scale)
+
+ self.set_seed()
+ actual = normal(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, normal, loc, bad_scale * 3)
+
+ def test_beta(self):
+ a = [1]
+ b = [2]
+ bad_a = [-1]
+ bad_b = [-2]
+ beta = random.beta
+ desired = np.array([0.19843558305989056,
+ 0.075230336409423643,
+ 0.24976865978980844])
+
+ self.set_seed()
+ actual = beta(a * 3, b)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, beta, bad_a * 3, b)
+ assert_raises(ValueError, beta, a * 3, bad_b)
+
+ self.set_seed()
+ actual = beta(a, b * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, beta, bad_a, b * 3)
+ assert_raises(ValueError, beta, a, bad_b * 3)
+
+ def test_exponential(self):
+ scale = [1]
+ bad_scale = [-1]
+ exponential = random.exponential
+ desired = np.array([0.76106853658845242,
+ 0.76386282278691653,
+ 0.71243813125891797])
+
+ self.set_seed()
+ actual = exponential(scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, exponential, bad_scale * 3)
+
+ def test_standard_gamma(self):
+ shape = [1]
+ bad_shape = [-1]
+ std_gamma = random.standard_gamma
+ desired = np.array([0.76106853658845242,
+ 0.76386282278691653,
+ 0.71243813125891797])
+
+ self.set_seed()
+ actual = std_gamma(shape * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, std_gamma, bad_shape * 3)
+
+ def test_gamma(self):
+ shape = [1]
+ scale = [2]
+ bad_shape = [-1]
+ bad_scale = [-2]
+ gamma = random.gamma
+ desired = np.array([1.5221370731769048,
+ 1.5277256455738331,
+ 1.4248762625178359])
+
+ self.set_seed()
+ actual = gamma(shape * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gamma, bad_shape * 3, scale)
+ assert_raises(ValueError, gamma, shape * 3, bad_scale)
+
+ self.set_seed()
+ actual = gamma(shape, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gamma, bad_shape, scale * 3)
+ assert_raises(ValueError, gamma, shape, bad_scale * 3)
+
+ def test_f(self):
+ dfnum = [1]
+ dfden = [2]
+ bad_dfnum = [-1]
+ bad_dfden = [-2]
+ f = random.f
+ desired = np.array([0.80038951638264799,
+ 0.86768719635363512,
+ 2.7251095168386801])
+
+ self.set_seed()
+ actual = f(dfnum * 3, dfden)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, f, bad_dfnum * 3, dfden)
+ assert_raises(ValueError, f, dfnum * 3, bad_dfden)
+
+ self.set_seed()
+ actual = f(dfnum, dfden * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, f, bad_dfnum, dfden * 3)
+ assert_raises(ValueError, f, dfnum, bad_dfden * 3)
+
+ def test_noncentral_f(self):
+ dfnum = [2]
+ dfden = [3]
+ nonc = [4]
+ bad_dfnum = [0]
+ bad_dfden = [-1]
+ bad_nonc = [-2]
+ nonc_f = random.noncentral_f
+ desired = np.array([9.1393943263705211,
+ 13.025456344595602,
+ 8.8018098359100545])
+
+ self.set_seed()
+ actual = nonc_f(dfnum * 3, dfden, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
+
+ assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
+ assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
+ assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
+
+ self.set_seed()
+ actual = nonc_f(dfnum, dfden * 3, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
+ assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
+ assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
+
+ self.set_seed()
+ actual = nonc_f(dfnum, dfden, nonc * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
+ assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
+ assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
+
+ def test_noncentral_f_small_df(self):
+ self.set_seed()
+ desired = np.array([6.869638627492048, 0.785880199263955])
+ actual = random.noncentral_f(0.9, 0.9, 2, size=2)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_chisquare(self):
+ df = [1]
+ bad_df = [-1]
+ chisquare = random.chisquare
+ desired = np.array([0.57022801133088286,
+ 0.51947702108840776,
+ 0.1320969254923558])
+
+ self.set_seed()
+ actual = chisquare(df * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, chisquare, bad_df * 3)
+
+ def test_noncentral_chisquare(self):
+ df = [1]
+ nonc = [2]
+ bad_df = [-1]
+ bad_nonc = [-2]
+ nonc_chi = random.noncentral_chisquare
+ desired = np.array([9.0015599467913763,
+ 4.5804135049718742,
+ 6.0872302432834564])
+
+ self.set_seed()
+ actual = nonc_chi(df * 3, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
+ assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
+
+ self.set_seed()
+ actual = nonc_chi(df, nonc * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
+ assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
+
+ def test_standard_t(self):
+ df = [1]
+ bad_df = [-1]
+ t = random.standard_t
+ desired = np.array([3.0702872575217643,
+ 5.8560725167361607,
+ 1.0274791436474273])
+
+ self.set_seed()
+ actual = t(df * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, t, bad_df * 3)
+ assert_raises(ValueError, random.standard_t, bad_df * 3)
+
+ def test_vonmises(self):
+ mu = [2]
+ kappa = [1]
+ bad_kappa = [-1]
+ vonmises = random.vonmises
+ desired = np.array([2.9883443664201312,
+ -2.7064099483995943,
+ -1.8672476700665914])
+
+ self.set_seed()
+ actual = vonmises(mu * 3, kappa)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
+
+ self.set_seed()
+ actual = vonmises(mu, kappa * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
+
+ def test_pareto(self):
+ a = [1]
+ bad_a = [-1]
+ pareto = random.pareto
+ desired = np.array([1.1405622680198362,
+ 1.1465519762044529,
+ 1.0389564467453547])
+
+ self.set_seed()
+ actual = pareto(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, pareto, bad_a * 3)
+ assert_raises(ValueError, random.pareto, bad_a * 3)
+
+ def test_weibull(self):
+ a = [1]
+ bad_a = [-1]
+ weibull = random.weibull
+ desired = np.array([0.76106853658845242,
+ 0.76386282278691653,
+ 0.71243813125891797])
+
+ self.set_seed()
+ actual = weibull(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, weibull, bad_a * 3)
+ assert_raises(ValueError, random.weibull, bad_a * 3)
+
+ def test_power(self):
+ a = [1]
+ bad_a = [-1]
+ power = random.power
+ desired = np.array([0.53283302478975902,
+ 0.53413660089041659,
+ 0.50955303552646702])
+
+ self.set_seed()
+ actual = power(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, power, bad_a * 3)
+ assert_raises(ValueError, random.power, bad_a * 3)
+
+ def test_laplace(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ laplace = random.laplace
+ desired = np.array([0.067921356028507157,
+ 0.070715642226971326,
+ 0.019290950698972624])
+
+ self.set_seed()
+ actual = laplace(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, laplace, loc * 3, bad_scale)
+
+ self.set_seed()
+ actual = laplace(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, laplace, loc, bad_scale * 3)
+
+ def test_gumbel(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ gumbel = random.gumbel
+ desired = np.array([0.2730318639556768,
+ 0.26936705726291116,
+ 0.33906220393037939])
+
+ self.set_seed()
+ actual = gumbel(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gumbel, loc * 3, bad_scale)
+
+ self.set_seed()
+ actual = gumbel(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gumbel, loc, bad_scale * 3)
+
+ def test_logistic(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ logistic = random.logistic
+ desired = np.array([0.13152135837586171,
+ 0.13675915696285773,
+ 0.038216792802833396])
+
+ self.set_seed()
+ actual = logistic(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, logistic, loc * 3, bad_scale)
+
+ self.set_seed()
+ actual = logistic(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, logistic, loc, bad_scale * 3)
+ assert_equal(random.logistic(1.0, 0.0), 1.0)
+
+ def test_lognormal(self):
+ mean = [0]
+ sigma = [1]
+ bad_sigma = [-1]
+ lognormal = random.lognormal
+ desired = np.array([9.1422086044848427,
+ 8.4013952870126261,
+ 6.3073234116578671])
+
+ self.set_seed()
+ actual = lognormal(mean * 3, sigma)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
+ assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma)
+
+ self.set_seed()
+ actual = lognormal(mean, sigma * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
+ assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
+
+ def test_rayleigh(self):
+ scale = [1]
+ bad_scale = [-1]
+ rayleigh = random.rayleigh
+ desired = np.array([1.2337491937897689,
+ 1.2360119924878694,
+ 1.1936818095781789])
+
+ self.set_seed()
+ actual = rayleigh(scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, rayleigh, bad_scale * 3)
+
+ def test_wald(self):
+ mean = [0.5]
+ scale = [1]
+ bad_mean = [0]
+ bad_scale = [-2]
+ wald = random.wald
+ desired = np.array([0.11873681120271318,
+ 0.12450084820795027,
+ 0.9096122728408238])
+
+ self.set_seed()
+ actual = wald(mean * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, wald, bad_mean * 3, scale)
+ assert_raises(ValueError, wald, mean * 3, bad_scale)
+ assert_raises(ValueError, random.wald, bad_mean * 3, scale)
+ assert_raises(ValueError, random.wald, mean * 3, bad_scale)
+
+ self.set_seed()
+ actual = wald(mean, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, wald, bad_mean, scale * 3)
+ assert_raises(ValueError, wald, mean, bad_scale * 3)
+ assert_raises(ValueError, wald, 0.0, 1)
+ assert_raises(ValueError, wald, 0.5, 0.0)
+
+ def test_triangular(self):
+ left = [1]
+ right = [3]
+ mode = [2]
+ bad_left_one = [3]
+ bad_mode_one = [4]
+ bad_left_two, bad_mode_two = right * 2
+ triangular = random.triangular
+ desired = np.array([2.03339048710429,
+ 2.0347400359389356,
+ 2.0095991069536208])
+
+ self.set_seed()
+ actual = triangular(left * 3, mode, right)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
+ assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
+ assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
+ right)
+
+ self.set_seed()
+ actual = triangular(left, mode * 3, right)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
+ assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
+ right)
+
+ self.set_seed()
+ actual = triangular(left, mode, right * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
+ assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
+ right * 3)
+
+ assert_raises(ValueError, triangular, 10., 0., 20.)
+ assert_raises(ValueError, triangular, 10., 25., 20.)
+ assert_raises(ValueError, triangular, 10., 10., 10.)
+
+ def test_binomial(self):
+ n = [1]
+ p = [0.5]
+ bad_n = [-1]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ binom = random.binomial
+ desired = np.array([1, 1, 1])
+
+ self.set_seed()
+ actual = binom(n * 3, p)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, binom, bad_n * 3, p)
+ assert_raises(ValueError, binom, n * 3, bad_p_one)
+ assert_raises(ValueError, binom, n * 3, bad_p_two)
+
+ self.set_seed()
+ actual = binom(n, p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, binom, bad_n, p * 3)
+ assert_raises(ValueError, binom, n, bad_p_one * 3)
+ assert_raises(ValueError, binom, n, bad_p_two * 3)
+
+ def test_negative_binomial(self):
+ n = [1]
+ p = [0.5]
+ bad_n = [-1]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ neg_binom = random.negative_binomial
+ desired = np.array([1, 0, 1])
+
+ self.set_seed()
+ actual = neg_binom(n * 3, p)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, neg_binom, bad_n * 3, p)
+ assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
+ assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
+
+ self.set_seed()
+ actual = neg_binom(n, p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, neg_binom, bad_n, p * 3)
+ assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
+ assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
+
+ def test_poisson(self):
+ max_lam = random.RandomState()._poisson_lam_max
+
+ lam = [1]
+ bad_lam_one = [-1]
+ bad_lam_two = [max_lam * 2]
+ poisson = random.poisson
+ desired = np.array([1, 1, 0])
+
+ self.set_seed()
+ actual = poisson(lam * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, poisson, bad_lam_one * 3)
+ assert_raises(ValueError, poisson, bad_lam_two * 3)
+
+ def test_zipf(self):
+ a = [2]
+ bad_a = [0]
+ zipf = random.zipf
+ desired = np.array([2, 2, 1])
+
+ self.set_seed()
+ actual = zipf(a * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, zipf, bad_a * 3)
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, zipf, np.nan)
+ assert_raises(ValueError, zipf, [0, 0, np.nan])
+
+ def test_geometric(self):
+ p = [0.5]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ geom = random.geometric
+ desired = np.array([2, 2, 2])
+
+ self.set_seed()
+ actual = geom(p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, geom, bad_p_one * 3)
+ assert_raises(ValueError, geom, bad_p_two * 3)
+
+ def test_hypergeometric(self):
+ ngood = [1]
+ nbad = [2]
+ nsample = [2]
+ bad_ngood = [-1]
+ bad_nbad = [-2]
+ bad_nsample_one = [0]
+ bad_nsample_two = [4]
+ hypergeom = random.hypergeometric
+ desired = np.array([1, 1, 1])
+
+ self.set_seed()
+ actual = hypergeom(ngood * 3, nbad, nsample)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
+ assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
+ assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
+ assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
+
+ self.set_seed()
+ actual = hypergeom(ngood, nbad * 3, nsample)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
+ assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
+ assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
+ assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
+
+ self.set_seed()
+ actual = hypergeom(ngood, nbad, nsample * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
+ assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
+ assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
+ assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
+
+ assert_raises(ValueError, hypergeom, -1, 10, 20)
+ assert_raises(ValueError, hypergeom, 10, -1, 20)
+ assert_raises(ValueError, hypergeom, 10, 10, 0)
+ assert_raises(ValueError, hypergeom, 10, 10, 25)
+
+ def test_logseries(self):
+ p = [0.5]
+ bad_p_one = [2]
+ bad_p_two = [-1]
+ logseries = random.logseries
+ desired = np.array([1, 1, 1])
+
+ self.set_seed()
+ actual = logseries(p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, logseries, bad_p_one * 3)
+ assert_raises(ValueError, logseries, bad_p_two * 3)
+
+
+@pytest.mark.skipif(IS_WASM, reason="can't start thread")
+class TestThread:
+ # make sure each state produces the same sequence even in threads
+ def setup_method(self):
+ self.seeds = range(4)
+
+ def check_function(self, function, sz):
+ from threading import Thread
+
+ out1 = np.empty((len(self.seeds),) + sz)
+ out2 = np.empty((len(self.seeds),) + sz)
+
+ # threaded generation
+ t = [Thread(target=function, args=(random.RandomState(s), o))
+ for s, o in zip(self.seeds, out1)]
+ [x.start() for x in t]
+ [x.join() for x in t]
+
+ # the same serial
+ for s, o in zip(self.seeds, out2):
+ function(random.RandomState(s), o)
+
+ # these platforms change x87 fpu precision mode in threads
+ if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
+ assert_array_almost_equal(out1, out2)
+ else:
+ assert_array_equal(out1, out2)
+
+ def test_normal(self):
+ def gen_random(state, out):
+ out[...] = state.normal(size=10000)
+
+ self.check_function(gen_random, sz=(10000,))
+
+ def test_exp(self):
+ def gen_random(state, out):
+ out[...] = state.exponential(scale=np.ones((100, 1000)))
+
+ self.check_function(gen_random, sz=(100, 1000))
+
+ def test_multinomial(self):
+ def gen_random(state, out):
+ out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
+
+ self.check_function(gen_random, sz=(10000, 6))
+
+
+# See Issue #4263
+class TestSingleEltArrayInput:
+ def setup_method(self):
+ self.argOne = np.array([2])
+ self.argTwo = np.array([3])
+ self.argThree = np.array([4])
+ self.tgtShape = (1,)
+
+ def test_one_arg_funcs(self):
+ funcs = (random.exponential, random.standard_gamma,
+ random.chisquare, random.standard_t,
+ random.pareto, random.weibull,
+ random.power, random.rayleigh,
+ random.poisson, random.zipf,
+ random.geometric, random.logseries)
+
+ probfuncs = (random.geometric, random.logseries)
+
+ for func in funcs:
+ if func in probfuncs: # p < 1.0
+ out = func(np.array([0.5]))
+
+ else:
+ out = func(self.argOne)
+
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_two_arg_funcs(self):
+ funcs = (random.uniform, random.normal,
+ random.beta, random.gamma,
+ random.f, random.noncentral_chisquare,
+ random.vonmises, random.laplace,
+ random.gumbel, random.logistic,
+ random.lognormal, random.wald,
+ random.binomial, random.negative_binomial)
+
+ probfuncs = (random.binomial, random.negative_binomial)
+
+ for func in funcs:
+ if func in probfuncs: # p <= 1
+ argTwo = np.array([0.5])
+
+ else:
+ argTwo = self.argTwo
+
+ out = func(self.argOne, argTwo)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne[0], argTwo)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne, argTwo[0])
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_three_arg_funcs(self):
+ funcs = [random.noncentral_f, random.triangular,
+ random.hypergeometric]
+
+ for func in funcs:
+ out = func(self.argOne, self.argTwo, self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne[0], self.argTwo, self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne, self.argTwo[0], self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+
+# Ensure returned array dtype is correct for platform
+def test_integer_dtype(int_func):
+ random.seed(123456789)
+ fname, args, sha256 = int_func
+ f = getattr(random, fname)
+ actual = f(*args, size=2)
+ assert_(actual.dtype == np.dtype('l'))
+
+
+def test_integer_repeat(int_func):
+ random.seed(123456789)
+ fname, args, sha256 = int_func
+ f = getattr(random, fname)
+ val = f(*args, size=1000000)
+ if sys.byteorder != 'little':
+ val = val.byteswap()
+ res = hashlib.sha256(val.view(np.int8)).hexdigest()
+ assert_(res == sha256)
+
+
+def test_broadcast_size_error():
+ # GH-16833
+ with pytest.raises(ValueError):
+ random.binomial(1, [0.3, 0.7], size=(2, 1))
+ with pytest.raises(ValueError):
+ random.binomial([1, 2], 0.3, size=(2, 1))
+ with pytest.raises(ValueError):
+ random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
+
+
+def test_randomstate_ctor_old_style_pickle():
+ rs = np.random.RandomState(MT19937(0))
+ rs.standard_normal(1)
+ # Directly call reduce which is used in pickling
+ ctor, args, state_a = rs.__reduce__()
+ # Simulate unpickling an old pickle that only has the name
+ assert args[:1] == ("MT19937",)
+ b = ctor(*args[:1])
+ b.set_state(state_a)
+ state_b = b.get_state(legacy=False)
+
+ assert_equal(state_a['bit_generator'], state_b['bit_generator'])
+ assert_array_equal(state_a['state']['key'], state_b['state']['key'])
+ assert_array_equal(state_a['state']['pos'], state_b['state']['pos'])
+ assert_equal(state_a['has_gauss'], state_b['has_gauss'])
+ assert_equal(state_a['gauss'], state_b['gauss'])
+
+def test_hot_swap(restore_singleton_bitgen):
+ # GH 21808
+ def_bg = np.random.default_rng(0)
+ bg = def_bg.bit_generator
+ np.random.set_bit_generator(bg)
+ assert isinstance(np.random.mtrand._rand._bit_generator, type(bg))
+
+ second_bg = np.random.get_bit_generator()
+ assert bg is second_bg
+
+
+def test_seed_alt_bit_gen(restore_singleton_bitgen):
+ # GH 21808
+ bg = PCG64(0)
+ np.random.set_bit_generator(bg)
+ state = np.random.get_state(legacy=False)
+ np.random.seed(1)
+ new_state = np.random.get_state(legacy=False)
+ print(state)
+ print(new_state)
+ assert state["bit_generator"] == "PCG64"
+ assert state["state"]["state"] != new_state["state"]["state"]
+ assert state["state"]["inc"] != new_state["state"]["inc"]
+
+
+def test_state_error_alt_bit_gen(restore_singleton_bitgen):
+ # GH 21808
+ state = np.random.get_state()
+ bg = PCG64(0)
+ np.random.set_bit_generator(bg)
+ with pytest.raises(ValueError, match="state must be for a PCG64"):
+ np.random.set_state(state)
+
+
+def test_swap_worked(restore_singleton_bitgen):
+ # GH 21808
+ np.random.seed(98765)
+ vals = np.random.randint(0, 2 ** 30, 10)
+ bg = PCG64(0)
+ state = bg.state
+ np.random.set_bit_generator(bg)
+ state_direct = np.random.get_state(legacy=False)
+ for field in state:
+ assert state[field] == state_direct[field]
+ np.random.seed(98765)
+ pcg_vals = np.random.randint(0, 2 ** 30, 10)
+ assert not np.all(vals == pcg_vals)
+ new_state = bg.state
+ assert new_state["state"]["state"] != state["state"]["state"]
+ assert new_state["state"]["inc"] == new_state["state"]["inc"]
+
+
+def test_swapped_singleton_against_direct(restore_singleton_bitgen):
+ np.random.set_bit_generator(PCG64(98765))
+ singleton_vals = np.random.randint(0, 2 ** 30, 10)
+ rg = np.random.RandomState(PCG64(98765))
+ non_singleton_vals = rg.randint(0, 2 ** 30, 10)
+ assert_equal(non_singleton_vals, singleton_vals)
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/test_randomstate_regression.py b/venv/lib/python3.9/site-packages/numpy/random/tests/test_randomstate_regression.py
new file mode 100644
index 00000000..7ad19ab5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/test_randomstate_regression.py
@@ -0,0 +1,216 @@
+import sys
+
+import pytest
+
+from numpy.testing import (
+ assert_, assert_array_equal, assert_raises,
+ )
+import numpy as np
+
+from numpy import random
+
+
+class TestRegression:
+
+ def test_VonMises_range(self):
+ # Make sure generated random variables are in [-pi, pi].
+ # Regression test for ticket #986.
+ for mu in np.linspace(-7., 7., 5):
+ r = random.vonmises(mu, 1, 50)
+ assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
+
+ def test_hypergeometric_range(self):
+ # Test for ticket #921
+ assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4))
+ assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0))
+
+ # Test for ticket #5623
+ args = [
+ (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
+ ]
+ is_64bits = sys.maxsize > 2**32
+ if is_64bits and sys.platform != 'win32':
+ # Check for 64-bit systems
+ args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
+ for arg in args:
+ assert_(random.hypergeometric(*arg) > 0)
+
+ def test_logseries_convergence(self):
+ # Test for ticket #923
+ N = 1000
+ random.seed(0)
+ rvsn = random.logseries(0.8, size=N)
+ # these two frequency counts should be close to theoretical
+ # numbers with this large sample
+ # theoretical large N result is 0.49706795
+ freq = np.sum(rvsn == 1) / N
+ msg = f'Frequency was {freq:f}, should be > 0.45'
+ assert_(freq > 0.45, msg)
+ # theoretical large N result is 0.19882718
+ freq = np.sum(rvsn == 2) / N
+ msg = f'Frequency was {freq:f}, should be < 0.23'
+ assert_(freq < 0.23, msg)
+
+ def test_shuffle_mixed_dimension(self):
+ # Test for trac ticket #2074
+ for t in [[1, 2, 3, None],
+ [(1, 1), (2, 2), (3, 3), None],
+ [1, (2, 2), (3, 3), None],
+ [(1, 1), 2, 3, None]]:
+ random.seed(12345)
+ shuffled = list(t)
+ random.shuffle(shuffled)
+ expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
+ assert_array_equal(np.array(shuffled, dtype=object), expected)
+
+ def test_call_within_randomstate(self):
+ # Check that custom RandomState does not call into global state
+ m = random.RandomState()
+ res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
+ for i in range(3):
+ random.seed(i)
+ m.seed(4321)
+ # If m.state is not honored, the result will change
+ assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
+
+ def test_multivariate_normal_size_types(self):
+ # Test for multivariate_normal issue with 'size' argument.
+ # Check that the multivariate_normal size argument can be a
+ # numpy integer.
+ random.multivariate_normal([0], [[0]], size=1)
+ random.multivariate_normal([0], [[0]], size=np.int_(1))
+ random.multivariate_normal([0], [[0]], size=np.int64(1))
+
+ def test_beta_small_parameters(self):
+ # Test that beta with small a and b parameters does not produce
+ # NaNs due to roundoff errors causing 0 / 0, gh-5851
+ random.seed(1234567890)
+ x = random.beta(0.0001, 0.0001, size=100)
+ assert_(not np.any(np.isnan(x)), 'Nans in random.beta')
+
+ def test_choice_sum_of_probs_tolerance(self):
+ # The sum of probs should be 1.0 with some tolerance.
+ # For low precision dtypes the tolerance was too tight.
+ # See numpy github issue 6123.
+ random.seed(1234)
+ a = [1, 2, 3]
+ counts = [4, 4, 2]
+ for dt in np.float16, np.float32, np.float64:
+ probs = np.array(counts, dtype=dt) / sum(counts)
+ c = random.choice(a, p=probs)
+ assert_(c in a)
+ assert_raises(ValueError, random.choice, a, p=probs*0.9)
+
+ def test_shuffle_of_array_of_different_length_strings(self):
+ # Test that permuting an array of different length strings
+ # will not cause a segfault on garbage collection
+ # Tests gh-7710
+ random.seed(1234)
+
+ a = np.array(['a', 'a' * 1000])
+
+ for _ in range(100):
+ random.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
+ def test_shuffle_of_array_of_objects(self):
+ # Test that permuting an array of objects will not cause
+ # a segfault on garbage collection.
+ # See gh-7719
+ random.seed(1234)
+ a = np.array([np.arange(1), np.arange(4)], dtype=object)
+
+ for _ in range(1000):
+ random.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
+ def test_permutation_subclass(self):
+ class N(np.ndarray):
+ pass
+
+ random.seed(1)
+ orig = np.arange(3).view(N)
+ perm = random.permutation(orig)
+ assert_array_equal(perm, np.array([0, 2, 1]))
+ assert_array_equal(orig, np.arange(3).view(N))
+
+ class M:
+ a = np.arange(5)
+
+ def __array__(self):
+ return self.a
+
+ random.seed(1)
+ m = M()
+ perm = random.permutation(m)
+ assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
+ assert_array_equal(m.__array__(), np.arange(5))
+
+ def test_warns_byteorder(self):
+ # GH 13159
+ other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
+ with pytest.deprecated_call(match='non-native byteorder is not'):
+ random.randint(0, 200, size=10, dtype=other_byteord_dt)
+
+ def test_named_argument_initialization(self):
+ # GH 13669
+ rs1 = np.random.RandomState(123456789)
+ rs2 = np.random.RandomState(seed=123456789)
+ assert rs1.randint(0, 100) == rs2.randint(0, 100)
+
+ def test_choice_retun_dtype(self):
+ # GH 9867
+ c = np.random.choice(10, p=[.1]*10, size=2)
+ assert c.dtype == np.dtype(int)
+ c = np.random.choice(10, p=[.1]*10, replace=False, size=2)
+ assert c.dtype == np.dtype(int)
+ c = np.random.choice(10, size=2)
+ assert c.dtype == np.dtype(int)
+ c = np.random.choice(10, replace=False, size=2)
+ assert c.dtype == np.dtype(int)
+
+ @pytest.mark.skipif(np.iinfo('l').max < 2**32,
+ reason='Cannot test with 32-bit C long')
+ def test_randint_117(self):
+ # GH 14189
+ random.seed(0)
+ expected = np.array([2357136044, 2546248239, 3071714933, 3626093760,
+ 2588848963, 3684848379, 2340255427, 3638918503,
+ 1819583497, 2678185683], dtype='int64')
+ actual = random.randint(2**32, size=10)
+ assert_array_equal(actual, expected)
+
+ def test_p_zero_stream(self):
+ # Regression test for gh-14522. Ensure that future versions
+ # generate the same variates as version 1.16.
+ np.random.seed(12345)
+ assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]),
+ [0, 0, 0, 1, 1])
+
+ def test_n_zero_stream(self):
+ # Regression test for gh-14522. Ensure that future versions
+ # generate the same variates as version 1.16.
+ np.random.seed(8675309)
+ expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]])
+ assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)),
+ expected)
+
+
+def test_multinomial_empty():
+ # gh-20483
+ # Ensure that empty p-vals are correctly handled
+ assert random.multinomial(10, []).shape == (0,)
+ assert random.multinomial(3, [], size=(7, 5, 3)).shape == (7, 5, 3, 0)
+
+
+def test_multinomial_1d_pval():
+ # gh-20483
+ with pytest.raises(TypeError, match="pvals must be a 1-d"):
+ random.multinomial(10, 0.3)
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/test_regression.py b/venv/lib/python3.9/site-packages/numpy/random/tests/test_regression.py
new file mode 100644
index 00000000..8bf41987
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/test_regression.py
@@ -0,0 +1,149 @@
+import sys
+from numpy.testing import (
+ assert_, assert_array_equal, assert_raises,
+ )
+from numpy import random
+import numpy as np
+
+
+class TestRegression:
+
+ def test_VonMises_range(self):
+ # Make sure generated random variables are in [-pi, pi].
+ # Regression test for ticket #986.
+ for mu in np.linspace(-7., 7., 5):
+ r = random.mtrand.vonmises(mu, 1, 50)
+ assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
+
+ def test_hypergeometric_range(self):
+ # Test for ticket #921
+ assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4))
+ assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0))
+
+ # Test for ticket #5623
+ args = [
+ (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
+ ]
+ is_64bits = sys.maxsize > 2**32
+ if is_64bits and sys.platform != 'win32':
+ # Check for 64-bit systems
+ args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
+ for arg in args:
+ assert_(np.random.hypergeometric(*arg) > 0)
+
+ def test_logseries_convergence(self):
+ # Test for ticket #923
+ N = 1000
+ np.random.seed(0)
+ rvsn = np.random.logseries(0.8, size=N)
+ # these two frequency counts should be close to theoretical
+ # numbers with this large sample
+ # theoretical large N result is 0.49706795
+ freq = np.sum(rvsn == 1) / N
+ msg = f'Frequency was {freq:f}, should be > 0.45'
+ assert_(freq > 0.45, msg)
+ # theoretical large N result is 0.19882718
+ freq = np.sum(rvsn == 2) / N
+ msg = f'Frequency was {freq:f}, should be < 0.23'
+ assert_(freq < 0.23, msg)
+
+ def test_shuffle_mixed_dimension(self):
+ # Test for trac ticket #2074
+ for t in [[1, 2, 3, None],
+ [(1, 1), (2, 2), (3, 3), None],
+ [1, (2, 2), (3, 3), None],
+ [(1, 1), 2, 3, None]]:
+ np.random.seed(12345)
+ shuffled = list(t)
+ random.shuffle(shuffled)
+ expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
+ assert_array_equal(np.array(shuffled, dtype=object), expected)
+
+ def test_call_within_randomstate(self):
+ # Check that custom RandomState does not call into global state
+ m = np.random.RandomState()
+ res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
+ for i in range(3):
+ np.random.seed(i)
+ m.seed(4321)
+ # If m.state is not honored, the result will change
+ assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
+
+ def test_multivariate_normal_size_types(self):
+ # Test for multivariate_normal issue with 'size' argument.
+ # Check that the multivariate_normal size argument can be a
+ # numpy integer.
+ np.random.multivariate_normal([0], [[0]], size=1)
+ np.random.multivariate_normal([0], [[0]], size=np.int_(1))
+ np.random.multivariate_normal([0], [[0]], size=np.int64(1))
+
+ def test_beta_small_parameters(self):
+ # Test that beta with small a and b parameters does not produce
+ # NaNs due to roundoff errors causing 0 / 0, gh-5851
+ np.random.seed(1234567890)
+ x = np.random.beta(0.0001, 0.0001, size=100)
+ assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta')
+
+ def test_choice_sum_of_probs_tolerance(self):
+ # The sum of probs should be 1.0 with some tolerance.
+ # For low precision dtypes the tolerance was too tight.
+ # See numpy github issue 6123.
+ np.random.seed(1234)
+ a = [1, 2, 3]
+ counts = [4, 4, 2]
+ for dt in np.float16, np.float32, np.float64:
+ probs = np.array(counts, dtype=dt) / sum(counts)
+ c = np.random.choice(a, p=probs)
+ assert_(c in a)
+ assert_raises(ValueError, np.random.choice, a, p=probs*0.9)
+
+ def test_shuffle_of_array_of_different_length_strings(self):
+ # Test that permuting an array of different length strings
+ # will not cause a segfault on garbage collection
+ # Tests gh-7710
+ np.random.seed(1234)
+
+ a = np.array(['a', 'a' * 1000])
+
+ for _ in range(100):
+ np.random.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
+ def test_shuffle_of_array_of_objects(self):
+ # Test that permuting an array of objects will not cause
+ # a segfault on garbage collection.
+ # See gh-7719
+ np.random.seed(1234)
+ a = np.array([np.arange(1), np.arange(4)], dtype=object)
+
+ for _ in range(1000):
+ np.random.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
+ def test_permutation_subclass(self):
+ class N(np.ndarray):
+ pass
+
+ np.random.seed(1)
+ orig = np.arange(3).view(N)
+ perm = np.random.permutation(orig)
+ assert_array_equal(perm, np.array([0, 2, 1]))
+ assert_array_equal(orig, np.arange(3).view(N))
+
+ class M:
+ a = np.arange(5)
+
+ def __array__(self):
+ return self.a
+
+ np.random.seed(1)
+ m = M()
+ perm = np.random.permutation(m)
+ assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
+ assert_array_equal(m.__array__(), np.arange(5))
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/test_seed_sequence.py b/venv/lib/python3.9/site-packages/numpy/random/tests/test_seed_sequence.py
new file mode 100644
index 00000000..f08cf80f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/test_seed_sequence.py
@@ -0,0 +1,80 @@
+import numpy as np
+from numpy.testing import assert_array_equal, assert_array_compare
+
+from numpy.random import SeedSequence
+
+
+def test_reference_data():
+ """ Check that SeedSequence generates data the same as the C++ reference.
+
+ https://gist.github.com/imneme/540829265469e673d045
+ """
+ inputs = [
+ [3735928559, 195939070, 229505742, 305419896],
+ [3668361503, 4165561550, 1661411377, 3634257570],
+ [164546577, 4166754639, 1765190214, 1303880213],
+ [446610472, 3941463886, 522937693, 1882353782],
+ [1864922766, 1719732118, 3882010307, 1776744564],
+ [4141682960, 3310988675, 553637289, 902896340],
+ [1134851934, 2352871630, 3699409824, 2648159817],
+ [1240956131, 3107113773, 1283198141, 1924506131],
+ [2669565031, 579818610, 3042504477, 2774880435],
+ [2766103236, 2883057919, 4029656435, 862374500],
+ ]
+ outputs = [
+ [3914649087, 576849849, 3593928901, 2229911004],
+ [2240804226, 3691353228, 1365957195, 2654016646],
+ [3562296087, 3191708229, 1147942216, 3726991905],
+ [1403443605, 3591372999, 1291086759, 441919183],
+ [1086200464, 2191331643, 560336446, 3658716651],
+ [3249937430, 2346751812, 847844327, 2996632307],
+ [2584285912, 4034195531, 3523502488, 169742686],
+ [959045797, 3875435559, 1886309314, 359682705],
+ [3978441347, 432478529, 3223635119, 138903045],
+ [296367413, 4262059219, 13109864, 3283683422],
+ ]
+ outputs64 = [
+ [2477551240072187391, 9577394838764454085],
+ [15854241394484835714, 11398914698975566411],
+ [13708282465491374871, 16007308345579681096],
+ [15424829579845884309, 1898028439751125927],
+ [9411697742461147792, 15714068361935982142],
+ [10079222287618677782, 12870437757549876199],
+ [17326737873898640088, 729039288628699544],
+ [16644868984619524261, 1544825456798124994],
+ [1857481142255628931, 596584038813451439],
+ [18305404959516669237, 14103312907920476776],
+ ]
+ for seed, expected, expected64 in zip(inputs, outputs, outputs64):
+ expected = np.array(expected, dtype=np.uint32)
+ ss = SeedSequence(seed)
+ state = ss.generate_state(len(expected))
+ assert_array_equal(state, expected)
+ state64 = ss.generate_state(len(expected64), dtype=np.uint64)
+ assert_array_equal(state64, expected64)
+
+
+def test_zero_padding():
+ """ Ensure that the implicit zero-padding does not cause problems.
+ """
+ # Ensure that large integers are inserted in little-endian fashion to avoid
+ # trailing 0s.
+ ss0 = SeedSequence(42)
+ ss1 = SeedSequence(42 << 32)
+ assert_array_compare(
+ np.not_equal,
+ ss0.generate_state(4),
+ ss1.generate_state(4))
+
+ # Ensure backwards compatibility with the original 0.17 release for small
+ # integers and no spawn key.
+ expected42 = np.array([3444837047, 2669555309, 2046530742, 3581440988],
+ dtype=np.uint32)
+ assert_array_equal(SeedSequence(42).generate_state(4), expected42)
+
+ # Regression test for gh-16539 to ensure that the implicit 0s don't
+ # conflict with spawn keys.
+ assert_array_compare(
+ np.not_equal,
+ SeedSequence(42, spawn_key=(0,)).generate_state(4),
+ expected42)
diff --git a/venv/lib/python3.9/site-packages/numpy/random/tests/test_smoke.py b/venv/lib/python3.9/site-packages/numpy/random/tests/test_smoke.py
new file mode 100644
index 00000000..9becc434
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/random/tests/test_smoke.py
@@ -0,0 +1,818 @@
+import pickle
+from functools import partial
+
+import numpy as np
+import pytest
+from numpy.testing import assert_equal, assert_, assert_array_equal
+from numpy.random import (Generator, MT19937, PCG64, PCG64DXSM, Philox, SFC64)
+
+@pytest.fixture(scope='module',
+ params=(np.bool_, np.int8, np.int16, np.int32, np.int64,
+ np.uint8, np.uint16, np.uint32, np.uint64))
+def dtype(request):
+ return request.param
+
+
+def params_0(f):
+ val = f()
+ assert_(np.isscalar(val))
+ val = f(10)
+ assert_(val.shape == (10,))
+ val = f((10, 10))
+ assert_(val.shape == (10, 10))
+ val = f((10, 10, 10))
+ assert_(val.shape == (10, 10, 10))
+ val = f(size=(5, 5))
+ assert_(val.shape == (5, 5))
+
+
+def params_1(f, bounded=False):
+ a = 5.0
+ b = np.arange(2.0, 12.0)
+ c = np.arange(2.0, 102.0).reshape((10, 10))
+ d = np.arange(2.0, 1002.0).reshape((10, 10, 10))
+ e = np.array([2.0, 3.0])
+ g = np.arange(2.0, 12.0).reshape((1, 10, 1))
+ if bounded:
+ a = 0.5
+ b = b / (1.5 * b.max())
+ c = c / (1.5 * c.max())
+ d = d / (1.5 * d.max())
+ e = e / (1.5 * e.max())
+ g = g / (1.5 * g.max())
+
+ # Scalar
+ f(a)
+ # Scalar - size
+ f(a, size=(10, 10))
+ # 1d
+ f(b)
+ # 2d
+ f(c)
+ # 3d
+ f(d)
+ # 1d size
+ f(b, size=10)
+ # 2d - size - broadcast
+ f(e, size=(10, 2))
+ # 3d - size
+ f(g, size=(10, 10, 10))
+
+
+def comp_state(state1, state2):
+ identical = True
+ if isinstance(state1, dict):
+ for key in state1:
+ identical &= comp_state(state1[key], state2[key])
+ elif type(state1) != type(state2):
+ identical &= type(state1) == type(state2)
+ else:
+ if (isinstance(state1, (list, tuple, np.ndarray)) and isinstance(
+ state2, (list, tuple, np.ndarray))):
+ for s1, s2 in zip(state1, state2):
+ identical &= comp_state(s1, s2)
+ else:
+ identical &= state1 == state2
+ return identical
+
+
+def warmup(rg, n=None):
+ if n is None:
+ n = 11 + np.random.randint(0, 20)
+ rg.standard_normal(n)
+ rg.standard_normal(n)
+ rg.standard_normal(n, dtype=np.float32)
+ rg.standard_normal(n, dtype=np.float32)
+ rg.integers(0, 2 ** 24, n, dtype=np.uint64)
+ rg.integers(0, 2 ** 48, n, dtype=np.uint64)
+ rg.standard_gamma(11.0, n)
+ rg.standard_gamma(11.0, n, dtype=np.float32)
+ rg.random(n, dtype=np.float64)
+ rg.random(n, dtype=np.float32)
+
+
+class RNG:
+ @classmethod
+ def setup_class(cls):
+ # Overridden in test classes. Place holder to silence IDE noise
+ cls.bit_generator = PCG64
+ cls.advance = None
+ cls.seed = [12345]
+ cls.rg = Generator(cls.bit_generator(*cls.seed))
+ cls.initial_state = cls.rg.bit_generator.state
+ cls.seed_vector_bits = 64
+ cls._extra_setup()
+
+ @classmethod
+ def _extra_setup(cls):
+ cls.vec_1d = np.arange(2.0, 102.0)
+ cls.vec_2d = np.arange(2.0, 102.0)[None, :]
+ cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100))
+ cls.seed_error = TypeError
+
+ def _reset_state(self):
+ self.rg.bit_generator.state = self.initial_state
+
+ def test_init(self):
+ rg = Generator(self.bit_generator())
+ state = rg.bit_generator.state
+ rg.standard_normal(1)
+ rg.standard_normal(1)
+ rg.bit_generator.state = state
+ new_state = rg.bit_generator.state
+ assert_(comp_state(state, new_state))
+
+ def test_advance(self):
+ state = self.rg.bit_generator.state
+ if hasattr(self.rg.bit_generator, 'advance'):
+ self.rg.bit_generator.advance(self.advance)
+ assert_(not comp_state(state, self.rg.bit_generator.state))
+ else:
+ bitgen_name = self.rg.bit_generator.__class__.__name__
+ pytest.skip(f'Advance is not supported by {bitgen_name}')
+
+ def test_jump(self):
+ state = self.rg.bit_generator.state
+ if hasattr(self.rg.bit_generator, 'jumped'):
+ bit_gen2 = self.rg.bit_generator.jumped()
+ jumped_state = bit_gen2.state
+ assert_(not comp_state(state, jumped_state))
+ self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17)
+ self.rg.bit_generator.state = state
+ bit_gen3 = self.rg.bit_generator.jumped()
+ rejumped_state = bit_gen3.state
+ assert_(comp_state(jumped_state, rejumped_state))
+ else:
+ bitgen_name = self.rg.bit_generator.__class__.__name__
+ if bitgen_name not in ('SFC64',):
+ raise AttributeError(f'no "jumped" in {bitgen_name}')
+ pytest.skip(f'Jump is not supported by {bitgen_name}')
+
+ def test_uniform(self):
+ r = self.rg.uniform(-1.0, 0.0, size=10)
+ assert_(len(r) == 10)
+ assert_((r > -1).all())
+ assert_((r <= 0).all())
+
+ def test_uniform_array(self):
+ r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10)
+ assert_(len(r) == 10)
+ assert_((r > -1).all())
+ assert_((r <= 0).all())
+ r = self.rg.uniform(np.array([-1.0] * 10),
+ np.array([0.0] * 10), size=10)
+ assert_(len(r) == 10)
+ assert_((r > -1).all())
+ assert_((r <= 0).all())
+ r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10)
+ assert_(len(r) == 10)
+ assert_((r > -1).all())
+ assert_((r <= 0).all())
+
+ def test_random(self):
+ assert_(len(self.rg.random(10)) == 10)
+ params_0(self.rg.random)
+
+ def test_standard_normal_zig(self):
+ assert_(len(self.rg.standard_normal(10)) == 10)
+
+ def test_standard_normal(self):
+ assert_(len(self.rg.standard_normal(10)) == 10)
+ params_0(self.rg.standard_normal)
+
+ def test_standard_gamma(self):
+ assert_(len(self.rg.standard_gamma(10, 10)) == 10)
+ assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10)
+ params_1(self.rg.standard_gamma)
+
+ def test_standard_exponential(self):
+ assert_(len(self.rg.standard_exponential(10)) == 10)
+ params_0(self.rg.standard_exponential)
+
+ def test_standard_exponential_float(self):
+ randoms = self.rg.standard_exponential(10, dtype='float32')
+ assert_(len(randoms) == 10)
+ assert randoms.dtype == np.float32
+ params_0(partial(self.rg.standard_exponential, dtype='float32'))
+
+ def test_standard_exponential_float_log(self):
+ randoms = self.rg.standard_exponential(10, dtype='float32',
+ method='inv')
+ assert_(len(randoms) == 10)
+ assert randoms.dtype == np.float32
+ params_0(partial(self.rg.standard_exponential, dtype='float32',
+ method='inv'))
+
+ def test_standard_cauchy(self):
+ assert_(len(self.rg.standard_cauchy(10)) == 10)
+ params_0(self.rg.standard_cauchy)
+
+ def test_standard_t(self):
+ assert_(len(self.rg.standard_t(10, 10)) == 10)
+ params_1(self.rg.standard_t)
+
+ def test_binomial(self):
+ assert_(self.rg.binomial(10, .5) >= 0)
+ assert_(self.rg.binomial(1000, .5) >= 0)
+
+ def test_reset_state(self):
+ state = self.rg.bit_generator.state
+ int_1 = self.rg.integers(2**31)
+ self.rg.bit_generator.state = state
+ int_2 = self.rg.integers(2**31)
+ assert_(int_1 == int_2)
+
+ def test_entropy_init(self):
+ rg = Generator(self.bit_generator())
+ rg2 = Generator(self.bit_generator())
+ assert_(not comp_state(rg.bit_generator.state,
+ rg2.bit_generator.state))
+
+ def test_seed(self):
+ rg = Generator(self.bit_generator(*self.seed))
+ rg2 = Generator(self.bit_generator(*self.seed))
+ rg.random()
+ rg2.random()
+ assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
+
+ def test_reset_state_gauss(self):
+ rg = Generator(self.bit_generator(*self.seed))
+ rg.standard_normal()
+ state = rg.bit_generator.state
+ n1 = rg.standard_normal(size=10)
+ rg2 = Generator(self.bit_generator())
+ rg2.bit_generator.state = state
+ n2 = rg2.standard_normal(size=10)
+ assert_array_equal(n1, n2)
+
+ def test_reset_state_uint32(self):
+ rg = Generator(self.bit_generator(*self.seed))
+ rg.integers(0, 2 ** 24, 120, dtype=np.uint32)
+ state = rg.bit_generator.state
+ n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32)
+ rg2 = Generator(self.bit_generator())
+ rg2.bit_generator.state = state
+ n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32)
+ assert_array_equal(n1, n2)
+
+ def test_reset_state_float(self):
+ rg = Generator(self.bit_generator(*self.seed))
+ rg.random(dtype='float32')
+ state = rg.bit_generator.state
+ n1 = rg.random(size=10, dtype='float32')
+ rg2 = Generator(self.bit_generator())
+ rg2.bit_generator.state = state
+ n2 = rg2.random(size=10, dtype='float32')
+ assert_((n1 == n2).all())
+
+ def test_shuffle(self):
+ original = np.arange(200, 0, -1)
+ permuted = self.rg.permutation(original)
+ assert_((original != permuted).any())
+
+ def test_permutation(self):
+ original = np.arange(200, 0, -1)
+ permuted = self.rg.permutation(original)
+ assert_((original != permuted).any())
+
+ def test_beta(self):
+ vals = self.rg.beta(2.0, 2.0, 10)
+ assert_(len(vals) == 10)
+ vals = self.rg.beta(np.array([2.0] * 10), 2.0)
+ assert_(len(vals) == 10)
+ vals = self.rg.beta(2.0, np.array([2.0] * 10))
+ assert_(len(vals) == 10)
+ vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10))
+ assert_(len(vals) == 10)
+ vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10))
+ assert_(vals.shape == (10, 10))
+
+ def test_bytes(self):
+ vals = self.rg.bytes(10)
+ assert_(len(vals) == 10)
+
+ def test_chisquare(self):
+ vals = self.rg.chisquare(2.0, 10)
+ assert_(len(vals) == 10)
+ params_1(self.rg.chisquare)
+
+ def test_exponential(self):
+ vals = self.rg.exponential(2.0, 10)
+ assert_(len(vals) == 10)
+ params_1(self.rg.exponential)
+
+ def test_f(self):
+ vals = self.rg.f(3, 1000, 10)
+ assert_(len(vals) == 10)
+
+ def test_gamma(self):
+ vals = self.rg.gamma(3, 2, 10)
+ assert_(len(vals) == 10)
+
+ def test_geometric(self):
+ vals = self.rg.geometric(0.5, 10)
+ assert_(len(vals) == 10)
+ params_1(self.rg.exponential, bounded=True)
+
+ def test_gumbel(self):
+ vals = self.rg.gumbel(2.0, 2.0, 10)
+ assert_(len(vals) == 10)
+
+ def test_laplace(self):
+ vals = self.rg.laplace(2.0, 2.0, 10)
+ assert_(len(vals) == 10)
+
+ def test_logitic(self):
+ vals = self.rg.logistic(2.0, 2.0, 10)
+ assert_(len(vals) == 10)
+
+ def test_logseries(self):
+ vals = self.rg.logseries(0.5, 10)
+ assert_(len(vals) == 10)
+
+ def test_negative_binomial(self):
+ vals = self.rg.negative_binomial(10, 0.2, 10)
+ assert_(len(vals) == 10)
+
+ def test_noncentral_chisquare(self):
+ vals = self.rg.noncentral_chisquare(10, 2, 10)
+ assert_(len(vals) == 10)
+
+ def test_noncentral_f(self):
+ vals = self.rg.noncentral_f(3, 1000, 2, 10)
+ assert_(len(vals) == 10)
+ vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2)
+ assert_(len(vals) == 10)
+ vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2)
+ assert_(len(vals) == 10)
+ vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10))
+ assert_(len(vals) == 10)
+
+ def test_normal(self):
+ vals = self.rg.normal(10, 0.2, 10)
+ assert_(len(vals) == 10)
+
+ def test_pareto(self):
+ vals = self.rg.pareto(3.0, 10)
+ assert_(len(vals) == 10)
+
+ def test_poisson(self):
+ vals = self.rg.poisson(10, 10)
+ assert_(len(vals) == 10)
+ vals = self.rg.poisson(np.array([10] * 10))
+ assert_(len(vals) == 10)
+ params_1(self.rg.poisson)
+
+ def test_power(self):
+ vals = self.rg.power(0.2, 10)
+ assert_(len(vals) == 10)
+
+ def test_integers(self):
+ vals = self.rg.integers(10, 20, 10)
+ assert_(len(vals) == 10)
+
+ def test_rayleigh(self):
+ vals = self.rg.rayleigh(0.2, 10)
+ assert_(len(vals) == 10)
+ params_1(self.rg.rayleigh, bounded=True)
+
+ def test_vonmises(self):
+ vals = self.rg.vonmises(10, 0.2, 10)
+ assert_(len(vals) == 10)
+
+ def test_wald(self):
+ vals = self.rg.wald(1.0, 1.0, 10)
+ assert_(len(vals) == 10)
+
+ def test_weibull(self):
+ vals = self.rg.weibull(1.0, 10)
+ assert_(len(vals) == 10)
+
+ def test_zipf(self):
+ vals = self.rg.zipf(10, 10)
+ assert_(len(vals) == 10)
+ vals = self.rg.zipf(self.vec_1d)
+ assert_(len(vals) == 100)
+ vals = self.rg.zipf(self.vec_2d)
+ assert_(vals.shape == (1, 100))
+ vals = self.rg.zipf(self.mat)
+ assert_(vals.shape == (100, 100))
+
+ def test_hypergeometric(self):
+ vals = self.rg.hypergeometric(25, 25, 20)
+ assert_(np.isscalar(vals))
+ vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20)
+ assert_(vals.shape == (10,))
+
+ def test_triangular(self):
+ vals = self.rg.triangular(-5, 0, 5)
+ assert_(np.isscalar(vals))
+ vals = self.rg.triangular(-5, np.array([0] * 10), 5)
+ assert_(vals.shape == (10,))
+
+ def test_multivariate_normal(self):
+ mean = [0, 0]
+ cov = [[1, 0], [0, 100]] # diagonal covariance
+ x = self.rg.multivariate_normal(mean, cov, 5000)
+ assert_(x.shape == (5000, 2))
+ x_zig = self.rg.multivariate_normal(mean, cov, 5000)
+ assert_(x.shape == (5000, 2))
+ x_inv = self.rg.multivariate_normal(mean, cov, 5000)
+ assert_(x.shape == (5000, 2))
+ assert_((x_zig != x_inv).any())
+
+ def test_multinomial(self):
+ vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3])
+ assert_(vals.shape == (2,))
+ vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10)
+ assert_(vals.shape == (10, 2))
+
+ def test_dirichlet(self):
+ s = self.rg.dirichlet((10, 5, 3), 20)
+ assert_(s.shape == (20, 3))
+
+ def test_pickle(self):
+ pick = pickle.dumps(self.rg)
+ unpick = pickle.loads(pick)
+ assert_((type(self.rg) == type(unpick)))
+ assert_(comp_state(self.rg.bit_generator.state,
+ unpick.bit_generator.state))
+
+ pick = pickle.dumps(self.rg)
+ unpick = pickle.loads(pick)
+ assert_((type(self.rg) == type(unpick)))
+ assert_(comp_state(self.rg.bit_generator.state,
+ unpick.bit_generator.state))
+
+ def test_seed_array(self):
+ if self.seed_vector_bits is None:
+ bitgen_name = self.bit_generator.__name__
+ pytest.skip(f'Vector seeding is not supported by {bitgen_name}')
+
+ if self.seed_vector_bits == 32:
+ dtype = np.uint32
+ else:
+ dtype = np.uint64
+ seed = np.array([1], dtype=dtype)
+ bg = self.bit_generator(seed)
+ state1 = bg.state
+ bg = self.bit_generator(1)
+ state2 = bg.state
+ assert_(comp_state(state1, state2))
+
+ seed = np.arange(4, dtype=dtype)
+ bg = self.bit_generator(seed)
+ state1 = bg.state
+ bg = self.bit_generator(seed[0])
+ state2 = bg.state
+ assert_(not comp_state(state1, state2))
+
+ seed = np.arange(1500, dtype=dtype)
+ bg = self.bit_generator(seed)
+ state1 = bg.state
+ bg = self.bit_generator(seed[0])
+ state2 = bg.state
+ assert_(not comp_state(state1, state2))
+
+ seed = 2 ** np.mod(np.arange(1500, dtype=dtype),
+ self.seed_vector_bits - 1) + 1
+ bg = self.bit_generator(seed)
+ state1 = bg.state
+ bg = self.bit_generator(seed[0])
+ state2 = bg.state
+ assert_(not comp_state(state1, state2))
+
+ def test_uniform_float(self):
+ rg = Generator(self.bit_generator(12345))
+ warmup(rg)
+ state = rg.bit_generator.state
+ r1 = rg.random(11, dtype=np.float32)
+ rg2 = Generator(self.bit_generator())
+ warmup(rg2)
+ rg2.bit_generator.state = state
+ r2 = rg2.random(11, dtype=np.float32)
+ assert_array_equal(r1, r2)
+ assert_equal(r1.dtype, np.float32)
+ assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
+
+ def test_gamma_floats(self):
+ rg = Generator(self.bit_generator())
+ warmup(rg)
+ state = rg.bit_generator.state
+ r1 = rg.standard_gamma(4.0, 11, dtype=np.float32)
+ rg2 = Generator(self.bit_generator())
+ warmup(rg2)
+ rg2.bit_generator.state = state
+ r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32)
+ assert_array_equal(r1, r2)
+ assert_equal(r1.dtype, np.float32)
+ assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
+
+ def test_normal_floats(self):
+ rg = Generator(self.bit_generator())
+ warmup(rg)
+ state = rg.bit_generator.state
+ r1 = rg.standard_normal(11, dtype=np.float32)
+ rg2 = Generator(self.bit_generator())
+ warmup(rg2)
+ rg2.bit_generator.state = state
+ r2 = rg2.standard_normal(11, dtype=np.float32)
+ assert_array_equal(r1, r2)
+ assert_equal(r1.dtype, np.float32)
+ assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
+
+ def test_normal_zig_floats(self):
+ rg = Generator(self.bit_generator())
+ warmup(rg)
+ state = rg.bit_generator.state
+ r1 = rg.standard_normal(11, dtype=np.float32)
+ rg2 = Generator(self.bit_generator())
+ warmup(rg2)
+ rg2.bit_generator.state = state
+ r2 = rg2.standard_normal(11, dtype=np.float32)
+ assert_array_equal(r1, r2)
+ assert_equal(r1.dtype, np.float32)
+ assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
+
+ def test_output_fill(self):
+ rg = self.rg
+ state = rg.bit_generator.state
+ size = (31, 7, 97)
+ existing = np.empty(size)
+ rg.bit_generator.state = state
+ rg.standard_normal(out=existing)
+ rg.bit_generator.state = state
+ direct = rg.standard_normal(size=size)
+ assert_equal(direct, existing)
+
+ sized = np.empty(size)
+ rg.bit_generator.state = state
+ rg.standard_normal(out=sized, size=sized.shape)
+
+ existing = np.empty(size, dtype=np.float32)
+ rg.bit_generator.state = state
+ rg.standard_normal(out=existing, dtype=np.float32)
+ rg.bit_generator.state = state
+ direct = rg.standard_normal(size=size, dtype=np.float32)
+ assert_equal(direct, existing)
+
+ def test_output_filling_uniform(self):
+ rg = self.rg
+ state = rg.bit_generator.state
+ size = (31, 7, 97)
+ existing = np.empty(size)
+ rg.bit_generator.state = state
+ rg.random(out=existing)
+ rg.bit_generator.state = state
+ direct = rg.random(size=size)
+ assert_equal(direct, existing)
+
+ existing = np.empty(size, dtype=np.float32)
+ rg.bit_generator.state = state
+ rg.random(out=existing, dtype=np.float32)
+ rg.bit_generator.state = state
+ direct = rg.random(size=size, dtype=np.float32)
+ assert_equal(direct, existing)
+
+ def test_output_filling_exponential(self):
+ rg = self.rg
+ state = rg.bit_generator.state
+ size = (31, 7, 97)
+ existing = np.empty(size)
+ rg.bit_generator.state = state
+ rg.standard_exponential(out=existing)
+ rg.bit_generator.state = state
+ direct = rg.standard_exponential(size=size)
+ assert_equal(direct, existing)
+
+ existing = np.empty(size, dtype=np.float32)
+ rg.bit_generator.state = state
+ rg.standard_exponential(out=existing, dtype=np.float32)
+ rg.bit_generator.state = state
+ direct = rg.standard_exponential(size=size, dtype=np.float32)
+ assert_equal(direct, existing)
+
+ def test_output_filling_gamma(self):
+ rg = self.rg
+ state = rg.bit_generator.state
+ size = (31, 7, 97)
+ existing = np.zeros(size)
+ rg.bit_generator.state = state
+ rg.standard_gamma(1.0, out=existing)
+ rg.bit_generator.state = state
+ direct = rg.standard_gamma(1.0, size=size)
+ assert_equal(direct, existing)
+
+ existing = np.zeros(size, dtype=np.float32)
+ rg.bit_generator.state = state
+ rg.standard_gamma(1.0, out=existing, dtype=np.float32)
+ rg.bit_generator.state = state
+ direct = rg.standard_gamma(1.0, size=size, dtype=np.float32)
+ assert_equal(direct, existing)
+
+ def test_output_filling_gamma_broadcast(self):
+ rg = self.rg
+ state = rg.bit_generator.state
+ size = (31, 7, 97)
+ mu = np.arange(97.0) + 1.0
+ existing = np.zeros(size)
+ rg.bit_generator.state = state
+ rg.standard_gamma(mu, out=existing)
+ rg.bit_generator.state = state
+ direct = rg.standard_gamma(mu, size=size)
+ assert_equal(direct, existing)
+
+ existing = np.zeros(size, dtype=np.float32)
+ rg.bit_generator.state = state
+ rg.standard_gamma(mu, out=existing, dtype=np.float32)
+ rg.bit_generator.state = state
+ direct = rg.standard_gamma(mu, size=size, dtype=np.float32)
+ assert_equal(direct, existing)
+
+ def test_output_fill_error(self):
+ rg = self.rg
+ size = (31, 7, 97)
+ existing = np.empty(size)
+ with pytest.raises(TypeError):
+ rg.standard_normal(out=existing, dtype=np.float32)
+ with pytest.raises(ValueError):
+ rg.standard_normal(out=existing[::3])
+ existing = np.empty(size, dtype=np.float32)
+ with pytest.raises(TypeError):
+ rg.standard_normal(out=existing, dtype=np.float64)
+
+ existing = np.zeros(size, dtype=np.float32)
+ with pytest.raises(TypeError):
+ rg.standard_gamma(1.0, out=existing, dtype=np.float64)
+ with pytest.raises(ValueError):
+ rg.standard_gamma(1.0, out=existing[::3], dtype=np.float32)
+ existing = np.zeros(size, dtype=np.float64)
+ with pytest.raises(TypeError):
+ rg.standard_gamma(1.0, out=existing, dtype=np.float32)
+ with pytest.raises(ValueError):
+ rg.standard_gamma(1.0, out=existing[::3])
+
+ def test_integers_broadcast(self, dtype):
+ if dtype == np.bool_:
+ upper = 2
+ lower = 0
+ else:
+ info = np.iinfo(dtype)
+ upper = int(info.max) + 1
+ lower = info.min
+ self._reset_state()
+ a = self.rg.integers(lower, [upper] * 10, dtype=dtype)
+ self._reset_state()
+ b = self.rg.integers([lower] * 10, upper, dtype=dtype)
+ assert_equal(a, b)
+ self._reset_state()
+ c = self.rg.integers(lower, upper, size=10, dtype=dtype)
+ assert_equal(a, c)
+ self._reset_state()
+ d = self.rg.integers(np.array(
+ [lower] * 10), np.array([upper], dtype=object), size=10,
+ dtype=dtype)
+ assert_equal(a, d)
+ self._reset_state()
+ e = self.rg.integers(
+ np.array([lower] * 10), np.array([upper] * 10), size=10,
+ dtype=dtype)
+ assert_equal(a, e)
+
+ self._reset_state()
+ a = self.rg.integers(0, upper, size=10, dtype=dtype)
+ self._reset_state()
+ b = self.rg.integers([upper] * 10, dtype=dtype)
+ assert_equal(a, b)
+
+ def test_integers_numpy(self, dtype):
+ high = np.array([1])
+ low = np.array([0])
+
+ out = self.rg.integers(low, high, dtype=dtype)
+ assert out.shape == (1,)
+
+ out = self.rg.integers(low[0], high, dtype=dtype)
+ assert out.shape == (1,)
+
+ out = self.rg.integers(low, high[0], dtype=dtype)
+ assert out.shape == (1,)
+
+ def test_integers_broadcast_errors(self, dtype):
+ if dtype == np.bool_:
+ upper = 2
+ lower = 0
+ else:
+ info = np.iinfo(dtype)
+ upper = int(info.max) + 1
+ lower = info.min
+ with pytest.raises(ValueError):
+ self.rg.integers(lower, [upper + 1] * 10, dtype=dtype)
+ with pytest.raises(ValueError):
+ self.rg.integers(lower - 1, [upper] * 10, dtype=dtype)
+ with pytest.raises(ValueError):
+ self.rg.integers([lower - 1], [upper] * 10, dtype=dtype)
+ with pytest.raises(ValueError):
+ self.rg.integers([0], [0], dtype=dtype)
+
+
+class TestMT19937(RNG):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = MT19937
+ cls.advance = None
+ cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1]
+ cls.rg = Generator(cls.bit_generator(*cls.seed))
+ cls.initial_state = cls.rg.bit_generator.state
+ cls.seed_vector_bits = 32
+ cls._extra_setup()
+ cls.seed_error = ValueError
+
+ def test_numpy_state(self):
+ nprg = np.random.RandomState()
+ nprg.standard_normal(99)
+ state = nprg.get_state()
+ self.rg.bit_generator.state = state
+ state2 = self.rg.bit_generator.state
+ assert_((state[1] == state2['state']['key']).all())
+ assert_((state[2] == state2['state']['pos']))
+
+
+class TestPhilox(RNG):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = Philox
+ cls.advance = 2**63 + 2**31 + 2**15 + 1
+ cls.seed = [12345]
+ cls.rg = Generator(cls.bit_generator(*cls.seed))
+ cls.initial_state = cls.rg.bit_generator.state
+ cls.seed_vector_bits = 64
+ cls._extra_setup()
+
+
+class TestSFC64(RNG):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = SFC64
+ cls.advance = None
+ cls.seed = [12345]
+ cls.rg = Generator(cls.bit_generator(*cls.seed))
+ cls.initial_state = cls.rg.bit_generator.state
+ cls.seed_vector_bits = 192
+ cls._extra_setup()
+
+
+class TestPCG64(RNG):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = PCG64
+ cls.advance = 2**63 + 2**31 + 2**15 + 1
+ cls.seed = [12345]
+ cls.rg = Generator(cls.bit_generator(*cls.seed))
+ cls.initial_state = cls.rg.bit_generator.state
+ cls.seed_vector_bits = 64
+ cls._extra_setup()
+
+
+class TestPCG64DXSM(RNG):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = PCG64DXSM
+ cls.advance = 2**63 + 2**31 + 2**15 + 1
+ cls.seed = [12345]
+ cls.rg = Generator(cls.bit_generator(*cls.seed))
+ cls.initial_state = cls.rg.bit_generator.state
+ cls.seed_vector_bits = 64
+ cls._extra_setup()
+
+
+class TestDefaultRNG(RNG):
+ @classmethod
+ def setup_class(cls):
+ # This will duplicate some tests that directly instantiate a fresh
+ # Generator(), but that's okay.
+ cls.bit_generator = PCG64
+ cls.advance = 2**63 + 2**31 + 2**15 + 1
+ cls.seed = [12345]
+ cls.rg = np.random.default_rng(*cls.seed)
+ cls.initial_state = cls.rg.bit_generator.state
+ cls.seed_vector_bits = 64
+ cls._extra_setup()
+
+ def test_default_is_pcg64(self):
+ # In order to change the default BitGenerator, we'll go through
+ # a deprecation cycle to move to a different function.
+ assert_(isinstance(self.rg.bit_generator, PCG64))
+
+ def test_seed(self):
+ np.random.default_rng()
+ np.random.default_rng(None)
+ np.random.default_rng(12345)
+ np.random.default_rng(0)
+ np.random.default_rng(43660444402423911716352051725018508569)
+ np.random.default_rng([43660444402423911716352051725018508569,
+ 279705150948142787361475340226491943209])
+ with pytest.raises(ValueError):
+ np.random.default_rng(-1)
+ with pytest.raises(ValueError):
+ np.random.default_rng([12345, -1])
diff --git a/venv/lib/python3.9/site-packages/numpy/setup.py b/venv/lib/python3.9/site-packages/numpy/setup.py
new file mode 100644
index 00000000..28c28d1a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/setup.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('numpy', parent_package, top_path)
+
+ config.add_subpackage('array_api')
+ config.add_subpackage('compat')
+ config.add_subpackage('core')
+ config.add_subpackage('distutils')
+ config.add_subpackage('doc')
+ config.add_subpackage('f2py')
+ config.add_subpackage('fft')
+ config.add_subpackage('lib')
+ config.add_subpackage('linalg')
+ config.add_subpackage('ma')
+ config.add_subpackage('matrixlib')
+ config.add_subpackage('polynomial')
+ config.add_subpackage('random')
+ config.add_subpackage('testing')
+ config.add_subpackage('typing')
+ config.add_subpackage('_typing')
+ config.add_data_dir('doc')
+ config.add_data_files('py.typed')
+ config.add_data_files('*.pyi')
+ config.add_subpackage('tests')
+ config.add_subpackage('_pyinstaller')
+ config.make_config_py() # installs __config__.py
+ return config
+
+if __name__ == '__main__':
+ print('This is the wrong setup.py file to run')
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/__init__.py b/venv/lib/python3.9/site-packages/numpy/testing/__init__.py
new file mode 100644
index 00000000..087527e4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/__init__.py
@@ -0,0 +1,22 @@
+"""Common test support for all numpy test scripts.
+
+This single module should provide all the common functionality for numpy tests
+in a single location, so that test scripts can just import it and work right
+away.
+
+"""
+from unittest import TestCase
+
+from . import _private
+from ._private.utils import *
+from ._private.utils import (_assert_valid_refcount, _gen_alignment_data)
+from ._private import extbuild, decorators as dec
+from ._private.nosetester import (
+ run_module_suite, NoseTester as Tester
+ )
+
+__all__ = _private.utils.__all__ + ['TestCase', 'run_module_suite']
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/__init__.pyi b/venv/lib/python3.9/site-packages/numpy/testing/__init__.pyi
new file mode 100644
index 00000000..a981d611
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/__init__.pyi
@@ -0,0 +1,56 @@
+from numpy._pytesttester import PytestTester
+
+from unittest import (
+ TestCase as TestCase,
+)
+
+from numpy.testing._private.utils import (
+ assert_equal as assert_equal,
+ assert_almost_equal as assert_almost_equal,
+ assert_approx_equal as assert_approx_equal,
+ assert_array_equal as assert_array_equal,
+ assert_array_less as assert_array_less,
+ assert_string_equal as assert_string_equal,
+ assert_array_almost_equal as assert_array_almost_equal,
+ assert_raises as assert_raises,
+ build_err_msg as build_err_msg,
+ decorate_methods as decorate_methods,
+ jiffies as jiffies,
+ memusage as memusage,
+ print_assert_equal as print_assert_equal,
+ raises as raises,
+ rundocs as rundocs,
+ runstring as runstring,
+ verbose as verbose,
+ measure as measure,
+ assert_ as assert_,
+ assert_array_almost_equal_nulp as assert_array_almost_equal_nulp,
+ assert_raises_regex as assert_raises_regex,
+ assert_array_max_ulp as assert_array_max_ulp,
+ assert_warns as assert_warns,
+ assert_no_warnings as assert_no_warnings,
+ assert_allclose as assert_allclose,
+ IgnoreException as IgnoreException,
+ clear_and_catch_warnings as clear_and_catch_warnings,
+ SkipTest as SkipTest,
+ KnownFailureException as KnownFailureException,
+ temppath as temppath,
+ tempdir as tempdir,
+ IS_PYPY as IS_PYPY,
+ IS_PYSTON as IS_PYSTON,
+ HAS_REFCOUNT as HAS_REFCOUNT,
+ suppress_warnings as suppress_warnings,
+ assert_array_compare as assert_array_compare,
+ assert_no_gc_cycles as assert_no_gc_cycles,
+ break_cycles as break_cycles,
+ HAS_LAPACK64 as HAS_LAPACK64,
+)
+
+__all__: list[str]
+__path__: list[str]
+test: PytestTester
+
+def run_module_suite(
+ file_to_run: None | str = ...,
+ argv: None | list[str] = ...,
+) -> None: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/_private/__init__.py b/venv/lib/python3.9/site-packages/numpy/testing/_private/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/_private/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/_private/decorators.py b/venv/lib/python3.9/site-packages/numpy/testing/_private/decorators.py
new file mode 100644
index 00000000..cb49d9a7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/_private/decorators.py
@@ -0,0 +1,331 @@
+"""
+Decorators for labeling and modifying behavior of test objects.
+
+Decorators that merely return a modified version of the original
+function object are straightforward. Decorators that return a new
+function object need to use
+::
+
+ nose.tools.make_decorator(original_function)(decorator)
+
+in returning the decorator, in order to preserve meta-data such as
+function name, setup and teardown functions and so on - see
+``nose.tools`` for more information.
+
+"""
+import collections.abc
+import warnings
+
+from .utils import SkipTest, assert_warns, HAS_REFCOUNT
+
+__all__ = ['slow', 'setastest', 'skipif', 'knownfailureif', 'deprecated',
+ 'parametrize', '_needs_refcount',]
+
+
+def slow(t):
+ """
+ .. deprecated:: 1.21
+ This decorator is retained for compatibility with the nose testing framework, which is being phased out.
+ Please use the nose2 or pytest frameworks instead.
+
+ Label a test as 'slow'.
+
+ The exact definition of a slow test is obviously both subjective and
+ hardware-dependent, but in general any individual test that requires more
+ than a second or two should be labeled as slow (the whole suite consists of
+ thousands of tests, so even a second is significant).
+
+ Parameters
+ ----------
+ t : callable
+ The test to label as slow.
+
+ Returns
+ -------
+ t : callable
+ The decorated test `t`.
+
+ Examples
+ --------
+ The `numpy.testing` module includes ``import decorators as dec``.
+ A test can be decorated as slow like this::
+
+ from numpy.testing import *
+
+ @dec.slow
+ def test_big(self):
+ print('Big, slow test')
+
+ """
+ # Numpy 1.21, 2020-12-20
+ warnings.warn('the np.testing.dec decorators are included for nose support, and are '
+ 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2)
+
+ t.slow = True
+ return t
+
+def setastest(tf=True):
+ """
+ .. deprecated:: 1.21
+ This decorator is retained for compatibility with the nose testing framework, which is being phased out.
+ Please use the nose2 or pytest frameworks instead.
+
+ Signals to nose that this function is or is not a test.
+
+ Parameters
+ ----------
+ tf : bool
+ If True, specifies that the decorated callable is a test.
+ If False, specifies that the decorated callable is not a test.
+ Default is True.
+
+ Notes
+ -----
+ This decorator can't use the nose namespace, because it can be
+ called from a non-test module. See also ``istest`` and ``nottest`` in
+ ``nose.tools``.
+
+ Examples
+ --------
+ `setastest` can be used in the following way::
+
+ from numpy.testing import dec
+
+ @dec.setastest(False)
+ def func_with_test_in_name(arg1, arg2):
+ pass
+
+ """
+ # Numpy 1.21, 2020-12-20
+ warnings.warn('the np.testing.dec decorators are included for nose support, and are '
+ 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2)
+ def set_test(t):
+ t.__test__ = tf
+ return t
+ return set_test
+
+def skipif(skip_condition, msg=None):
+ """
+ .. deprecated:: 1.21
+ This decorator is retained for compatibility with the nose testing framework, which is being phased out.
+ Please use the nose2 or pytest frameworks instead.
+
+ Make function raise SkipTest exception if a given condition is true.
+
+ If the condition is a callable, it is used at runtime to dynamically
+ make the decision. This is useful for tests that may require costly
+ imports, to delay the cost until the test suite is actually executed.
+
+ Parameters
+ ----------
+ skip_condition : bool or callable
+ Flag to determine whether to skip the decorated test.
+ msg : str, optional
+ Message to give on raising a SkipTest exception. Default is None.
+
+ Returns
+ -------
+ decorator : function
+ Decorator which, when applied to a function, causes SkipTest
+ to be raised when `skip_condition` is True, and the function
+ to be called normally otherwise.
+
+ Notes
+ -----
+ The decorator itself is decorated with the ``nose.tools.make_decorator``
+ function in order to transmit function name, and various other metadata.
+
+ """
+
+ def skip_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+
+ # Numpy 1.21, 2020-12-20
+ warnings.warn('the np.testing.dec decorators are included for nose support, and are '
+ 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2)
+
+ # Allow for both boolean or callable skip conditions.
+ if isinstance(skip_condition, collections.abc.Callable):
+ skip_val = lambda: skip_condition()
+ else:
+ skip_val = lambda: skip_condition
+
+ def get_msg(func,msg=None):
+ """Skip message with information about function being skipped."""
+ if msg is None:
+ out = 'Test skipped due to test condition'
+ else:
+ out = msg
+
+ return f'Skipping test: {func.__name__}: {out}'
+
+ # We need to define *two* skippers because Python doesn't allow both
+ # return with value and yield inside the same function.
+ def skipper_func(*args, **kwargs):
+ """Skipper for normal test functions."""
+ if skip_val():
+ raise SkipTest(get_msg(f, msg))
+ else:
+ return f(*args, **kwargs)
+
+ def skipper_gen(*args, **kwargs):
+ """Skipper for test generators."""
+ if skip_val():
+ raise SkipTest(get_msg(f, msg))
+ else:
+ yield from f(*args, **kwargs)
+
+ # Choose the right skipper to use when building the actual decorator.
+ if nose.util.isgenerator(f):
+ skipper = skipper_gen
+ else:
+ skipper = skipper_func
+
+ return nose.tools.make_decorator(f)(skipper)
+
+ return skip_decorator
+
+
+def knownfailureif(fail_condition, msg=None):
+ """
+ .. deprecated:: 1.21
+ This decorator is retained for compatibility with the nose testing framework, which is being phased out.
+ Please use the nose2 or pytest frameworks instead.
+
+ Make function raise KnownFailureException exception if given condition is true.
+
+ If the condition is a callable, it is used at runtime to dynamically
+ make the decision. This is useful for tests that may require costly
+ imports, to delay the cost until the test suite is actually executed.
+
+ Parameters
+ ----------
+ fail_condition : bool or callable
+ Flag to determine whether to mark the decorated test as a known
+ failure (if True) or not (if False).
+ msg : str, optional
+ Message to give on raising a KnownFailureException exception.
+ Default is None.
+
+ Returns
+ -------
+ decorator : function
+ Decorator, which, when applied to a function, causes
+ KnownFailureException to be raised when `fail_condition` is True,
+ and the function to be called normally otherwise.
+
+ Notes
+ -----
+ The decorator itself is decorated with the ``nose.tools.make_decorator``
+ function in order to transmit function name, and various other metadata.
+
+ """
+ # Numpy 1.21, 2020-12-20
+ warnings.warn('the np.testing.dec decorators are included for nose support, and are '
+ 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2)
+
+ if msg is None:
+ msg = 'Test skipped due to known failure'
+
+ # Allow for both boolean or callable known failure conditions.
+ if isinstance(fail_condition, collections.abc.Callable):
+ fail_val = lambda: fail_condition()
+ else:
+ fail_val = lambda: fail_condition
+
+ def knownfail_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+ from .noseclasses import KnownFailureException
+
+ def knownfailer(*args, **kwargs):
+ if fail_val():
+ raise KnownFailureException(msg)
+ else:
+ return f(*args, **kwargs)
+ return nose.tools.make_decorator(f)(knownfailer)
+
+ return knownfail_decorator
+
+def deprecated(conditional=True):
+ """
+ .. deprecated:: 1.21
+ This decorator is retained for compatibility with the nose testing framework, which is being phased out.
+ Please use the nose2 or pytest frameworks instead.
+
+ Filter deprecation warnings while running the test suite.
+
+ This decorator can be used to filter DeprecationWarning's, to avoid
+ printing them during the test suite run, while checking that the test
+ actually raises a DeprecationWarning.
+
+ Parameters
+ ----------
+ conditional : bool or callable, optional
+ Flag to determine whether to mark test as deprecated or not. If the
+ condition is a callable, it is used at runtime to dynamically make the
+ decision. Default is True.
+
+ Returns
+ -------
+ decorator : function
+ The `deprecated` decorator itself.
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ """
+ def deprecate_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+
+ # Numpy 1.21, 2020-12-20
+ warnings.warn('the np.testing.dec decorators are included for nose support, and are '
+ 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2)
+
+ def _deprecated_imp(*args, **kwargs):
+ # Poor man's replacement for the with statement
+ with assert_warns(DeprecationWarning):
+ f(*args, **kwargs)
+
+ if isinstance(conditional, collections.abc.Callable):
+ cond = conditional()
+ else:
+ cond = conditional
+ if cond:
+ return nose.tools.make_decorator(f)(_deprecated_imp)
+ else:
+ return f
+ return deprecate_decorator
+
+
+def parametrize(vars, input):
+ """
+ .. deprecated:: 1.21
+ This decorator is retained for compatibility with the nose testing framework, which is being phased out.
+ Please use the nose2 or pytest frameworks instead.
+
+ Pytest compatibility class. This implements the simplest level of
+ pytest.mark.parametrize for use in nose as an aid in making the transition
+ to pytest. It achieves that by adding a dummy var parameter and ignoring
+ the doc_func parameter of the base class. It does not support variable
+ substitution by name, nor does it support nesting or classes. See the
+ pytest documentation for usage.
+
+ .. versionadded:: 1.14.0
+
+ """
+ from .parameterized import parameterized
+
+ # Numpy 1.21, 2020-12-20
+ warnings.warn('the np.testing.dec decorators are included for nose support, and are '
+ 'deprecated since NumPy v1.21. Use the nose2 or pytest frameworks instead.', DeprecationWarning, stacklevel=2)
+
+ return parameterized(input)
+
+_needs_refcount = skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/_private/extbuild.py b/venv/lib/python3.9/site-packages/numpy/testing/_private/extbuild.py
new file mode 100644
index 00000000..9b4e9536
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/_private/extbuild.py
@@ -0,0 +1,251 @@
+"""
+Build a c-extension module on-the-fly in tests.
+See build_and_import_extensions for usage hints
+
+"""
+
+import os
+import pathlib
+import sys
+import sysconfig
+
+__all__ = ['build_and_import_extension', 'compile_extension_module']
+
+
+def build_and_import_extension(
+ modname, functions, *, prologue="", build_dir=None,
+ include_dirs=[], more_init=""):
+ """
+ Build and imports a c-extension module `modname` from a list of function
+ fragments `functions`.
+
+
+ Parameters
+ ----------
+ functions : list of fragments
+ Each fragment is a sequence of func_name, calling convention, snippet.
+ prologue : string
+ Code to precede the rest, usually extra ``#include`` or ``#define``
+ macros.
+ build_dir : pathlib.Path
+ Where to build the module, usually a temporary directory
+ include_dirs : list
+ Extra directories to find include files when compiling
+ more_init : string
+ Code to appear in the module PyMODINIT_FUNC
+
+ Returns
+ -------
+ out: module
+ The module will have been loaded and is ready for use
+
+ Examples
+ --------
+ >>> functions = [("test_bytes", "METH_O", \"\"\"
+ if ( !PyBytesCheck(args)) {
+ Py_RETURN_FALSE;
+ }
+ Py_RETURN_TRUE;
+ \"\"\")]
+ >>> mod = build_and_import_extension("testme", functions)
+ >>> assert not mod.test_bytes(u'abc')
+ >>> assert mod.test_bytes(b'abc')
+ """
+ from distutils.errors import CompileError
+
+ body = prologue + _make_methods(functions, modname)
+ init = """PyObject *mod = PyModule_Create(&moduledef);
+ """
+ if not build_dir:
+ build_dir = pathlib.Path('.')
+ if more_init:
+ init += """#define INITERROR return NULL
+ """
+ init += more_init
+ init += "\nreturn mod;"
+ source_string = _make_source(modname, init, body)
+ try:
+ mod_so = compile_extension_module(
+ modname, build_dir, include_dirs, source_string)
+ except CompileError as e:
+ # shorten the exception chain
+ raise RuntimeError(f"could not compile in {build_dir}:") from e
+ import importlib.util
+ spec = importlib.util.spec_from_file_location(modname, mod_so)
+ foo = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(foo)
+ return foo
+
+
+def compile_extension_module(
+ name, builddir, include_dirs,
+ source_string, libraries=[], library_dirs=[]):
+ """
+ Build an extension module and return the filename of the resulting
+ native code file.
+
+ Parameters
+ ----------
+ name : string
+ name of the module, possibly including dots if it is a module inside a
+ package.
+ builddir : pathlib.Path
+ Where to build the module, usually a temporary directory
+ include_dirs : list
+ Extra directories to find include files when compiling
+ libraries : list
+ Libraries to link into the extension module
+ library_dirs: list
+ Where to find the libraries, ``-L`` passed to the linker
+ """
+ modname = name.split('.')[-1]
+ dirname = builddir / name
+ dirname.mkdir(exist_ok=True)
+ cfile = _convert_str_to_file(source_string, dirname)
+ include_dirs = include_dirs + [sysconfig.get_config_var('INCLUDEPY')]
+
+ return _c_compile(
+ cfile, outputfilename=dirname / modname,
+ include_dirs=include_dirs, libraries=[], library_dirs=[],
+ )
+
+
+def _convert_str_to_file(source, dirname):
+ """Helper function to create a file ``source.c`` in `dirname` that contains
+ the string in `source`. Returns the file name
+ """
+ filename = dirname / 'source.c'
+ with filename.open('w') as f:
+ f.write(str(source))
+ return filename
+
+
+def _make_methods(functions, modname):
+ """ Turns the name, signature, code in functions into complete functions
+ and lists them in a methods_table. Then turns the methods_table into a
+ ``PyMethodDef`` structure and returns the resulting code fragment ready
+ for compilation
+ """
+ methods_table = []
+ codes = []
+ for funcname, flags, code in functions:
+ cfuncname = "%s_%s" % (modname, funcname)
+ if 'METH_KEYWORDS' in flags:
+ signature = '(PyObject *self, PyObject *args, PyObject *kwargs)'
+ else:
+ signature = '(PyObject *self, PyObject *args)'
+ methods_table.append(
+ "{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags))
+ func_code = """
+ static PyObject* {cfuncname}{signature}
+ {{
+ {code}
+ }}
+ """.format(cfuncname=cfuncname, signature=signature, code=code)
+ codes.append(func_code)
+
+ body = "\n".join(codes) + """
+ static PyMethodDef methods[] = {
+ %(methods)s
+ { NULL }
+ };
+ static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "%(modname)s", /* m_name */
+ NULL, /* m_doc */
+ -1, /* m_size */
+ methods, /* m_methods */
+ };
+ """ % dict(methods='\n'.join(methods_table), modname=modname)
+ return body
+
+
+def _make_source(name, init, body):
+ """ Combines the code fragments into source code ready to be compiled
+ """
+ code = """
+ #include <Python.h>
+
+ %(body)s
+
+ PyMODINIT_FUNC
+ PyInit_%(name)s(void) {
+ %(init)s
+ }
+ """ % dict(
+ name=name, init=init, body=body,
+ )
+ return code
+
+
+def _c_compile(cfile, outputfilename, include_dirs=[], libraries=[],
+ library_dirs=[]):
+ if sys.platform == 'win32':
+ compile_extra = ["/we4013"]
+ link_extra = ["/LIBPATH:" + os.path.join(sys.base_prefix, 'libs')]
+ elif sys.platform.startswith('linux'):
+ compile_extra = [
+ "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"]
+ link_extra = None
+ else:
+ compile_extra = link_extra = None
+ pass
+ if sys.platform == 'win32':
+ link_extra = link_extra + ['/DEBUG'] # generate .pdb file
+ if sys.platform == 'darwin':
+ # support Fink & Darwinports
+ for s in ('/sw/', '/opt/local/'):
+ if (s + 'include' not in include_dirs
+ and os.path.exists(s + 'include')):
+ include_dirs.append(s + 'include')
+ if s + 'lib' not in library_dirs and os.path.exists(s + 'lib'):
+ library_dirs.append(s + 'lib')
+
+ outputfilename = outputfilename.with_suffix(get_so_suffix())
+ saved_environ = os.environ.copy()
+ try:
+ build(
+ cfile, outputfilename,
+ compile_extra, link_extra,
+ include_dirs, libraries, library_dirs)
+ finally:
+ # workaround for a distutils bugs where some env vars can
+ # become longer and longer every time it is used
+ for key, value in saved_environ.items():
+ if os.environ.get(key) != value:
+ os.environ[key] = value
+ return outputfilename
+
+
+def build(cfile, outputfilename, compile_extra, link_extra,
+ include_dirs, libraries, library_dirs):
+ "cd into the directory where the cfile is, use distutils to build"
+ from numpy.distutils.ccompiler import new_compiler
+
+ compiler = new_compiler(force=1, verbose=2)
+ compiler.customize('')
+ objects = []
+
+ old = os.getcwd()
+ os.chdir(cfile.parent)
+ try:
+ res = compiler.compile(
+ [str(cfile.name)],
+ include_dirs=include_dirs,
+ extra_preargs=compile_extra
+ )
+ objects += [str(cfile.parent / r) for r in res]
+ finally:
+ os.chdir(old)
+
+ compiler.link_shared_object(
+ objects, str(outputfilename),
+ libraries=libraries,
+ extra_preargs=link_extra,
+ library_dirs=library_dirs)
+
+
+def get_so_suffix():
+ ret = sysconfig.get_config_var('EXT_SUFFIX')
+ assert ret
+ return ret
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/_private/noseclasses.py b/venv/lib/python3.9/site-packages/numpy/testing/_private/noseclasses.py
new file mode 100644
index 00000000..48fa4dc1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/_private/noseclasses.py
@@ -0,0 +1,364 @@
+# These classes implement a doctest runner plugin for nose, a "known failure"
+# error class, and a customized TestProgram for NumPy.
+
+# Because this module imports nose directly, it should not
+# be used except by nosetester.py to avoid a general NumPy
+# dependency on nose.
+import os
+import sys
+import doctest
+import inspect
+
+import numpy
+import nose
+from nose.plugins import doctests as npd
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+from nose.plugins.base import Plugin
+from nose.util import src
+from .nosetester import get_package_name
+from .utils import KnownFailureException, KnownFailureTest
+
+
+# Some of the classes in this module begin with 'Numpy' to clearly distinguish
+# them from the plethora of very similar names from nose/unittest/doctest
+
+#-----------------------------------------------------------------------------
+# Modified version of the one in the stdlib, that fixes a python bug (doctests
+# not found in extension modules, https://bugs.python.org/issue3158)
+class NumpyDocTestFinder(doctest.DocTestFinder):
+
+ def _from_module(self, module, object):
+ """
+ Return true if the given object is defined in the given
+ module.
+ """
+ if module is None:
+ return True
+ elif inspect.isfunction(object):
+ return module.__dict__ is object.__globals__
+ elif inspect.isbuiltin(object):
+ return module.__name__ == object.__module__
+ elif inspect.isclass(object):
+ return module.__name__ == object.__module__
+ elif inspect.ismethod(object):
+ # This one may be a bug in cython that fails to correctly set the
+ # __module__ attribute of methods, but since the same error is easy
+ # to make by extension code writers, having this safety in place
+ # isn't such a bad idea
+ return module.__name__ == object.__self__.__class__.__module__
+ elif inspect.getmodule(object) is not None:
+ return module is inspect.getmodule(object)
+ elif hasattr(object, '__module__'):
+ return module.__name__ == object.__module__
+ elif isinstance(object, property):
+ return True # [XX] no way not be sure.
+ else:
+ raise ValueError("object must be a class or function")
+
+ def _find(self, tests, obj, name, module, source_lines, globs, seen):
+ """
+ Find tests for the given object and any contained objects, and
+ add them to `tests`.
+ """
+
+ doctest.DocTestFinder._find(self, tests, obj, name, module,
+ source_lines, globs, seen)
+
+ # Below we re-run pieces of the above method with manual modifications,
+ # because the original code is buggy and fails to correctly identify
+ # doctests in extension modules.
+
+ # Local shorthands
+ from inspect import (
+ isroutine, isclass, ismodule, isfunction, ismethod
+ )
+
+ # Look for tests in a module's contained objects.
+ if ismodule(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ valname1 = f'{name}.{valname}'
+ if ( (isroutine(val) or isclass(val))
+ and self._from_module(module, val)):
+
+ self._find(tests, val, valname1, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a class's contained objects.
+ if isclass(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Special handling for staticmethod/classmethod.
+ if isinstance(val, staticmethod):
+ val = getattr(obj, valname)
+ if isinstance(val, classmethod):
+ val = getattr(obj, valname).__func__
+
+ # Recurse to methods, properties, and nested classes.
+ if ((isfunction(val) or isclass(val) or
+ ismethod(val) or isinstance(val, property)) and
+ self._from_module(module, val)):
+ valname = f'{name}.{valname}'
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+
+# second-chance checker; if the default comparison doesn't
+# pass, then see if the expected output string contains flags that
+# tell us to ignore the output
+class NumpyOutputChecker(doctest.OutputChecker):
+ def check_output(self, want, got, optionflags):
+ ret = doctest.OutputChecker.check_output(self, want, got,
+ optionflags)
+ if not ret:
+ if "#random" in want:
+ return True
+
+ # it would be useful to normalize endianness so that
+ # bigendian machines don't fail all the tests (and there are
+ # actually some bigendian examples in the doctests). Let's try
+ # making them all little endian
+ got = got.replace("'>", "'<")
+ want = want.replace("'>", "'<")
+
+ # try to normalize out 32 and 64 bit default int sizes
+ for sz in [4, 8]:
+ got = got.replace("'<i%d'" % sz, "int")
+ want = want.replace("'<i%d'" % sz, "int")
+
+ ret = doctest.OutputChecker.check_output(self, want,
+ got, optionflags)
+
+ return ret
+
+
+# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
+# its constructor that blocks non-default arguments from being passed
+# down into doctest.DocTestCase
+class NumpyDocTestCase(npd.DocTestCase):
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None, obj=None, result_var='_'):
+ self._result_var = result_var
+ self._nose_obj = obj
+ doctest.DocTestCase.__init__(self, test,
+ optionflags=optionflags,
+ setUp=setUp, tearDown=tearDown,
+ checker=checker)
+
+
+print_state = numpy.get_printoptions()
+
+class NumpyDoctest(npd.Doctest):
+ name = 'numpydoctest' # call nosetests with --with-numpydoctest
+ score = 1000 # load late, after doctest builtin
+
+ # always use whitespace and ellipsis options for doctests
+ doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
+
+ # files that should be ignored for doctests
+ doctest_ignore = ['generate_numpy_api.py',
+ 'setup.py']
+
+ # Custom classes; class variables to allow subclassing
+ doctest_case_class = NumpyDocTestCase
+ out_check_class = NumpyOutputChecker
+ test_finder_class = NumpyDocTestFinder
+
+ # Don't use the standard doctest option handler; hard-code the option values
+ def options(self, parser, env=os.environ):
+ Plugin.options(self, parser, env)
+ # Test doctests in 'test' files / directories. Standard plugin default
+ # is False
+ self.doctest_tests = True
+ # Variable name; if defined, doctest results stored in this variable in
+ # the top-level namespace. None is the standard default
+ self.doctest_result_var = None
+
+ def configure(self, options, config):
+ # parent method sets enabled flag from command line --with-numpydoctest
+ Plugin.configure(self, options, config)
+ self.finder = self.test_finder_class()
+ self.parser = doctest.DocTestParser()
+ if self.enabled:
+ # Pull standard doctest out of plugin list; there's no reason to run
+ # both. In practice the Unplugger plugin above would cover us when
+ # run from a standard numpy.test() call; this is just in case
+ # someone wants to run our plugin outside the numpy.test() machinery
+ config.plugins.plugins = [p for p in config.plugins.plugins
+ if p.name != 'doctest']
+
+ def set_test_context(self, test):
+ """ Configure `test` object to set test context
+
+ We set the numpy / scipy standard doctest namespace
+
+ Parameters
+ ----------
+ test : test object
+ with ``globs`` dictionary defining namespace
+
+ Returns
+ -------
+ None
+
+ Notes
+ -----
+ `test` object modified in place
+ """
+ # set the namespace for tests
+ pkg_name = get_package_name(os.path.dirname(test.filename))
+
+ # Each doctest should execute in an environment equivalent to
+ # starting Python and executing "import numpy as np", and,
+ # for SciPy packages, an additional import of the local
+ # package (so that scipy.linalg.basic.py's doctests have an
+ # implicit "from scipy import linalg" as well).
+ #
+ # Note: __file__ allows the doctest in NoseTester to run
+ # without producing an error
+ test.globs = {'__builtins__':__builtins__,
+ '__file__':'__main__',
+ '__name__':'__main__',
+ 'np':numpy}
+ # add appropriate scipy import for SciPy tests
+ if 'scipy' in pkg_name:
+ p = pkg_name.split('.')
+ p2 = p[-1]
+ test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
+
+ # Override test loading to customize test context (with set_test_context
+ # method), set standard docstring options, and install our own test output
+ # checker
+ def loadTestsFromModule(self, module):
+ if not self.matches(module.__name__):
+ npd.log.debug("Doctest doesn't want module %s", module)
+ return
+ try:
+ tests = self.finder.find(module)
+ except AttributeError:
+ # nose allows module.__test__ = False; doctest does not and
+ # throws AttributeError
+ return
+ if not tests:
+ return
+ tests.sort()
+ module_file = src(module.__file__)
+ for test in tests:
+ if not test.examples:
+ continue
+ if not test.filename:
+ test.filename = module_file
+ # Set test namespace; test altered in place
+ self.set_test_context(test)
+ yield self.doctest_case_class(test,
+ optionflags=self.doctest_optflags,
+ checker=self.out_check_class(),
+ result_var=self.doctest_result_var)
+
+ # Add an afterContext method to nose.plugins.doctests.Doctest in order
+ # to restore print options to the original state after each doctest
+ def afterContext(self):
+ numpy.set_printoptions(**print_state)
+
+ # Ignore NumPy-specific build files that shouldn't be searched for tests
+ def wantFile(self, file):
+ bn = os.path.basename(file)
+ if bn in self.doctest_ignore:
+ return False
+ return npd.Doctest.wantFile(self, file)
+
+
+class Unplugger:
+ """ Nose plugin to remove named plugin late in loading
+
+ By default it removes the "doctest" plugin.
+ """
+ name = 'unplugger'
+ enabled = True # always enabled
+ score = 4000 # load late in order to be after builtins
+
+ def __init__(self, to_unplug='doctest'):
+ self.to_unplug = to_unplug
+
+ def options(self, parser, env):
+ pass
+
+ def configure(self, options, config):
+ # Pull named plugin out of plugins list
+ config.plugins.plugins = [p for p in config.plugins.plugins
+ if p.name != self.to_unplug]
+
+
+class KnownFailurePlugin(ErrorClassPlugin):
+ '''Plugin that installs a KNOWNFAIL error class for the
+ KnownFailureClass exception. When KnownFailure is raised,
+ the exception will be logged in the knownfail attribute of the
+ result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
+ exception will not be counted as an error or failure.'''
+ enabled = True
+ knownfail = ErrorClass(KnownFailureException,
+ label='KNOWNFAIL',
+ isfailure=False)
+
+ def options(self, parser, env=os.environ):
+ env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
+ parser.add_option('--no-knownfail', action='store_true',
+ dest='noKnownFail', default=env.get(env_opt, False),
+ help='Disable special handling of KnownFailure '
+ 'exceptions')
+
+ def configure(self, options, conf):
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noKnownFail', False)
+ if disable:
+ self.enabled = False
+
+KnownFailure = KnownFailurePlugin # backwards compat
+
+
+class FPUModeCheckPlugin(Plugin):
+ """
+ Plugin that checks the FPU mode before and after each test,
+ raising failures if the test changed the mode.
+ """
+
+ def prepareTestCase(self, test):
+ from numpy.core._multiarray_tests import get_fpu_mode
+
+ def run(result):
+ old_mode = get_fpu_mode()
+ test.test(result)
+ new_mode = get_fpu_mode()
+
+ if old_mode != new_mode:
+ try:
+ raise AssertionError(
+ "FPU mode changed from {0:#x} to {1:#x} during the "
+ "test".format(old_mode, new_mode))
+ except AssertionError:
+ result.addFailure(test, sys.exc_info())
+
+ return run
+
+
+# Class allows us to save the results of the tests in runTests - see runTests
+# method docstring for details
+class NumpyTestProgram(nose.core.TestProgram):
+ def runTests(self):
+ """Run Tests. Returns true on success, false on failure, and
+ sets self.success to the same value.
+
+ Because nose currently discards the test result object, but we need
+ to return it to the user, override TestProgram.runTests to retain
+ the result
+ """
+ if self.testRunner is None:
+ self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
+ verbosity=self.config.verbosity,
+ config=self.config)
+ plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
+ if plug_runner is not None:
+ self.testRunner = plug_runner
+ self.result = self.testRunner.run(self.test)
+ self.success = self.result.wasSuccessful()
+ return self.success
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/_private/nosetester.py b/venv/lib/python3.9/site-packages/numpy/testing/_private/nosetester.py
new file mode 100644
index 00000000..bccec823
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/_private/nosetester.py
@@ -0,0 +1,545 @@
+"""
+Nose test running.
+
+This module implements ``test()`` and ``bench()`` functions for NumPy modules.
+
+"""
+import os
+import sys
+import warnings
+import numpy as np
+
+from .utils import import_nose, suppress_warnings
+
+
+__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
+ '_numpy_tester', 'get_package_name', 'import_nose',
+ 'suppress_warnings']
+
+
+def get_package_name(filepath):
+ """
+ Given a path where a package is installed, determine its name.
+
+ Parameters
+ ----------
+ filepath : str
+ Path to a file. If the determination fails, "numpy" is returned.
+
+ Examples
+ --------
+ >>> np.testing.nosetester.get_package_name('nonsense')
+ 'numpy'
+
+ """
+
+ fullpath = filepath[:]
+ pkg_name = []
+ while 'site-packages' in filepath or 'dist-packages' in filepath:
+ filepath, p2 = os.path.split(filepath)
+ if p2 in ('site-packages', 'dist-packages'):
+ break
+ pkg_name.append(p2)
+
+ # if package name determination failed, just default to numpy/scipy
+ if not pkg_name:
+ if 'scipy' in fullpath:
+ return 'scipy'
+ else:
+ return 'numpy'
+
+ # otherwise, reverse to get correct order and return
+ pkg_name.reverse()
+
+ # don't include the outer egg directory
+ if pkg_name[0].endswith('.egg'):
+ pkg_name.pop(0)
+
+ return '.'.join(pkg_name)
+
+
+def run_module_suite(file_to_run=None, argv=None):
+ """
+ Run a test module.
+
+ Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from
+ the command line
+
+ Parameters
+ ----------
+ file_to_run : str, optional
+ Path to test module, or None.
+ By default, run the module from which this function is called.
+ argv : list of strings
+ Arguments to be passed to the nose test runner. ``argv[0]`` is
+ ignored. All command line arguments accepted by ``nosetests``
+ will work. If it is the default value None, sys.argv is used.
+
+ .. versionadded:: 1.9.0
+
+ Examples
+ --------
+ Adding the following::
+
+ if __name__ == "__main__" :
+ run_module_suite(argv=sys.argv)
+
+ at the end of a test module will run the tests when that module is
+ called in the python interpreter.
+
+ Alternatively, calling::
+
+ >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py") # doctest: +SKIP
+
+ from an interpreter will run all the test routine in 'test_matlib.py'.
+ """
+ if file_to_run is None:
+ f = sys._getframe(1)
+ file_to_run = f.f_locals.get('__file__', None)
+ if file_to_run is None:
+ raise AssertionError
+
+ if argv is None:
+ argv = sys.argv + [file_to_run]
+ else:
+ argv = argv + [file_to_run]
+
+ nose = import_nose()
+ from .noseclasses import KnownFailurePlugin
+ nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
+
+
+class NoseTester:
+ """
+ Nose test runner.
+
+ This class is made available as numpy.testing.Tester, and a test function
+ is typically added to a package's __init__.py like so::
+
+ from numpy.testing import Tester
+ test = Tester().test
+
+ Calling this test function finds and runs all tests associated with the
+ package and all its sub-packages.
+
+ Attributes
+ ----------
+ package_path : str
+ Full path to the package to test.
+ package_name : str
+ Name of the package to test.
+
+ Parameters
+ ----------
+ package : module, str or None, optional
+ The package to test. If a string, this should be the full path to
+ the package. If None (default), `package` is set to the module from
+ which `NoseTester` is initialized.
+ raise_warnings : None, str or sequence of warnings, optional
+ This specifies which warnings to configure as 'raise' instead
+ of being shown once during the test execution. Valid strings are:
+
+ - "develop" : equals ``(Warning,)``
+ - "release" : equals ``()``, don't raise on any warnings.
+
+ Default is "release".
+ depth : int, optional
+ If `package` is None, then this can be used to initialize from the
+ module of the caller of (the caller of (...)) the code that
+ initializes `NoseTester`. Default of 0 means the module of the
+ immediate caller; higher values are useful for utility routines that
+ want to initialize `NoseTester` objects on behalf of other code.
+
+ """
+ def __init__(self, package=None, raise_warnings="release", depth=0,
+ check_fpu_mode=False):
+ # Back-compat: 'None' used to mean either "release" or "develop"
+ # depending on whether this was a release or develop version of
+ # numpy. Those semantics were fine for testing numpy, but not so
+ # helpful for downstream projects like scipy that use
+ # numpy.testing. (They want to set this based on whether *they* are a
+ # release or develop version, not whether numpy is.) So we continue to
+ # accept 'None' for back-compat, but it's now just an alias for the
+ # default "release".
+ if raise_warnings is None:
+ raise_warnings = "release"
+
+ package_name = None
+ if package is None:
+ f = sys._getframe(1 + depth)
+ package_path = f.f_locals.get('__file__', None)
+ if package_path is None:
+ raise AssertionError
+ package_path = os.path.dirname(package_path)
+ package_name = f.f_locals.get('__name__', None)
+ elif isinstance(package, type(os)):
+ package_path = os.path.dirname(package.__file__)
+ package_name = getattr(package, '__name__', None)
+ else:
+ package_path = str(package)
+
+ self.package_path = package_path
+
+ # Find the package name under test; this name is used to limit coverage
+ # reporting (if enabled).
+ if package_name is None:
+ package_name = get_package_name(package_path)
+ self.package_name = package_name
+
+ # Set to "release" in constructor in maintenance branches.
+ self.raise_warnings = raise_warnings
+
+ # Whether to check for FPU mode changes
+ self.check_fpu_mode = check_fpu_mode
+
+ def _test_argv(self, label, verbose, extra_argv):
+ ''' Generate argv for nosetest command
+
+ Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifier}, optional
+ see ``test`` docstring
+ verbose : int, optional
+ Verbosity value for test outputs, in the range 1-10. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to nosetests.
+
+ Returns
+ -------
+ argv : list
+ command line arguments that will be passed to nose
+ '''
+ argv = [__file__, self.package_path, '-s']
+ if label and label != 'full':
+ if not isinstance(label, str):
+ raise TypeError('Selection label should be a string')
+ if label == 'fast':
+ label = 'not slow'
+ argv += ['-A', label]
+ argv += ['--verbosity', str(verbose)]
+
+ # When installing with setuptools, and also in some other cases, the
+ # test_*.py files end up marked +x executable. Nose, by default, does
+ # not run files marked with +x as they might be scripts. However, in
+ # our case nose only looks for test_*.py files under the package
+ # directory, which should be safe.
+ argv += ['--exe']
+
+ if extra_argv:
+ argv += extra_argv
+ return argv
+
+ def _show_system_info(self):
+ nose = import_nose()
+
+ import numpy
+ print(f'NumPy version {numpy.__version__}')
+ relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous
+ print("NumPy relaxed strides checking option:", relaxed_strides)
+ npdir = os.path.dirname(numpy.__file__)
+ print(f'NumPy is installed in {npdir}')
+
+ if 'scipy' in self.package_name:
+ import scipy
+ print(f'SciPy version {scipy.__version__}')
+ spdir = os.path.dirname(scipy.__file__)
+ print(f'SciPy is installed in {spdir}')
+
+ pyversion = sys.version.replace('\n', '')
+ print(f'Python version {pyversion}')
+ print("nose version %d.%d.%d" % nose.__versioninfo__)
+
+ def _get_custom_doctester(self):
+ """ Return instantiated plugin for doctests
+
+ Allows subclassing of this class to override doctester
+
+ A return value of None means use the nose builtin doctest plugin
+ """
+ from .noseclasses import NumpyDoctest
+ return NumpyDoctest()
+
+ def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False, timer=False):
+ """
+ Run tests for module using nose.
+
+ This method does the heavy lifting for the `test` method. It takes all
+ the same arguments, for details see `test`.
+
+ See Also
+ --------
+ test
+
+ """
+ # fail with nice error message if nose is not present
+ import_nose()
+ # compile argv
+ argv = self._test_argv(label, verbose, extra_argv)
+ # our way of doing coverage
+ if coverage:
+ argv += [f'--cover-package={self.package_name}', '--with-coverage',
+ '--cover-tests', '--cover-erase']
+
+ if timer:
+ if timer is True:
+ argv += ['--with-timer']
+ elif isinstance(timer, int):
+ argv += ['--with-timer', '--timer-top-n', str(timer)]
+
+ # construct list of plugins
+ import nose.plugins.builtin
+ from nose.plugins import EntryPointPluginManager
+ from .noseclasses import (KnownFailurePlugin, Unplugger,
+ FPUModeCheckPlugin)
+ plugins = [KnownFailurePlugin()]
+ plugins += [p() for p in nose.plugins.builtin.plugins]
+ if self.check_fpu_mode:
+ plugins += [FPUModeCheckPlugin()]
+ argv += ["--with-fpumodecheckplugin"]
+ try:
+ # External plugins (like nose-timer)
+ entrypoint_manager = EntryPointPluginManager()
+ entrypoint_manager.loadPlugins()
+ plugins += [p for p in entrypoint_manager.plugins]
+ except ImportError:
+ # Relies on pkg_resources, not a hard dependency
+ pass
+
+ # add doctesting if required
+ doctest_argv = '--with-doctest' in argv
+ if doctests == False and doctest_argv:
+ doctests = True
+ plug = self._get_custom_doctester()
+ if plug is None:
+ # use standard doctesting
+ if doctests and not doctest_argv:
+ argv += ['--with-doctest']
+ else: # custom doctesting
+ if doctest_argv: # in fact the unplugger would take care of this
+ argv.remove('--with-doctest')
+ plugins += [Unplugger('doctest'), plug]
+ if doctests:
+ argv += ['--with-' + plug.name]
+ return argv, plugins
+
+ def test(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False, raise_warnings=None,
+ timer=False):
+ """
+ Run tests for module using nose.
+
+ Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifier}, optional
+ Identifies the tests to run. This can be a string to pass to
+ the nosetests executable with the '-A' option, or one of several
+ special values. Special values are:
+
+ * 'fast' - the default - which corresponds to the ``nosetests -A``
+ option of 'not slow'.
+ * 'full' - fast (as above) and slow tests as in the
+ 'no -A' option to nosetests - this is the same as ''.
+ * None or '' - run all tests.
+ * attribute_identifier - string passed directly to nosetests as '-A'.
+
+ verbose : int, optional
+ Verbosity value for test outputs, in the range 1-10. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to nosetests.
+ doctests : bool, optional
+ If True, run doctests in module. Default is False.
+ coverage : bool, optional
+ If True, report coverage of NumPy code. Default is False.
+ (This requires the
+ `coverage module <https://pypi.org/project/coverage/>`_).
+ raise_warnings : None, str or sequence of warnings, optional
+ This specifies which warnings to configure as 'raise' instead
+ of being shown once during the test execution. Valid strings are:
+
+ * "develop" : equals ``(Warning,)``
+ * "release" : equals ``()``, do not raise on any warnings.
+ timer : bool or int, optional
+ Timing of individual tests with ``nose-timer`` (which needs to be
+ installed). If True, time tests and report on all of them.
+ If an integer (say ``N``), report timing results for ``N`` slowest
+ tests.
+
+ Returns
+ -------
+ result : object
+ Returns the result of running the tests as a
+ ``nose.result.TextTestResult`` object.
+
+ Notes
+ -----
+ Each NumPy module exposes `test` in its namespace to run all tests for it.
+ For example, to run all tests for numpy.lib:
+
+ >>> np.lib.test() #doctest: +SKIP
+
+ Examples
+ --------
+ >>> result = np.lib.test() #doctest: +SKIP
+ Running unit tests for numpy.lib
+ ...
+ Ran 976 tests in 3.933s
+
+ OK
+
+ >>> result.errors #doctest: +SKIP
+ []
+ >>> result.knownfail #doctest: +SKIP
+ []
+ """
+
+ # cap verbosity at 3 because nose becomes *very* verbose beyond that
+ verbose = min(verbose, 3)
+
+ from . import utils
+ utils.verbose = verbose
+
+ argv, plugins = self.prepare_test_args(
+ label, verbose, extra_argv, doctests, coverage, timer)
+
+ if doctests:
+ print(f'Running unit tests and doctests for {self.package_name}')
+ else:
+ print(f'Running unit tests for {self.package_name}')
+
+ self._show_system_info()
+
+ # reset doctest state on every run
+ import doctest
+ doctest.master = None
+
+ if raise_warnings is None:
+ raise_warnings = self.raise_warnings
+
+ _warn_opts = dict(develop=(Warning,),
+ release=())
+ if isinstance(raise_warnings, str):
+ raise_warnings = _warn_opts[raise_warnings]
+
+ with suppress_warnings("location") as sup:
+ # Reset the warning filters to the default state,
+ # so that running the tests is more repeatable.
+ warnings.resetwarnings()
+ # Set all warnings to 'warn', this is because the default 'once'
+ # has the bad property of possibly shadowing later warnings.
+ warnings.filterwarnings('always')
+ # Force the requested warnings to raise
+ for warningtype in raise_warnings:
+ warnings.filterwarnings('error', category=warningtype)
+ # Filter out annoying import messages.
+ sup.filter(message='Not importing directory')
+ sup.filter(message="numpy.dtype size changed")
+ sup.filter(message="numpy.ufunc size changed")
+ sup.filter(category=np.ModuleDeprecationWarning)
+ # Filter out boolean '-' deprecation messages. This allows
+ # older versions of scipy to test without a flood of messages.
+ sup.filter(message=".*boolean negative.*")
+ sup.filter(message=".*boolean subtract.*")
+ # Filter out distutils cpu warnings (could be localized to
+ # distutils tests). ASV has problems with top level import,
+ # so fetch module for suppression here.
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ from ...distutils import cpuinfo
+ sup.filter(category=UserWarning, module=cpuinfo)
+ # Filter out some deprecation warnings inside nose 1.3.7 when run
+ # on python 3.5b2. See
+ # https://github.com/nose-devs/nose/issues/929
+ # Note: it is hard to filter based on module for sup (lineno could
+ # be implemented).
+ warnings.filterwarnings("ignore", message=".*getargspec.*",
+ category=DeprecationWarning,
+ module=r"nose\.")
+
+ from .noseclasses import NumpyTestProgram
+
+ t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
+
+ return t.result
+
+ def bench(self, label='fast', verbose=1, extra_argv=None):
+ """
+ Run benchmarks for module using nose.
+
+ Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifier}, optional
+ Identifies the benchmarks to run. This can be a string to pass to
+ the nosetests executable with the '-A' option, or one of several
+ special values. Special values are:
+
+ * 'fast' - the default - which corresponds to the ``nosetests -A``
+ option of 'not slow'.
+ * 'full' - fast (as above) and slow benchmarks as in the
+ 'no -A' option to nosetests - this is the same as ''.
+ * None or '' - run all tests.
+ * attribute_identifier - string passed directly to nosetests as '-A'.
+
+ verbose : int, optional
+ Verbosity value for benchmark outputs, in the range 1-10. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to nosetests.
+
+ Returns
+ -------
+ success : bool
+ Returns True if running the benchmarks works, False if an error
+ occurred.
+
+ Notes
+ -----
+ Benchmarks are like tests, but have names starting with "bench" instead
+ of "test", and can be found under the "benchmarks" sub-directory of the
+ module.
+
+ Each NumPy module exposes `bench` in its namespace to run all benchmarks
+ for it.
+
+ Examples
+ --------
+ >>> success = np.lib.bench() #doctest: +SKIP
+ Running benchmarks for numpy.lib
+ ...
+ using 562341 items:
+ unique:
+ 0.11
+ unique1d:
+ 0.11
+ ratio: 1.0
+ nUnique: 56230 == 56230
+ ...
+ OK
+
+ >>> success #doctest: +SKIP
+ True
+
+ """
+
+ print(f'Running benchmarks for {self.package_name}')
+ self._show_system_info()
+
+ argv = self._test_argv(label, verbose, extra_argv)
+ argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
+
+ # import nose or make informative error
+ nose = import_nose()
+
+ # get plugin to disable doctests
+ from .noseclasses import Unplugger
+ add_plugins = [Unplugger('doctest')]
+
+ return nose.run(argv=argv, addplugins=add_plugins)
+
+
+def _numpy_tester():
+ if hasattr(np, "__version__") and ".dev0" in np.__version__:
+ mode = "develop"
+ else:
+ mode = "release"
+ return NoseTester(raise_warnings=mode, depth=1,
+ check_fpu_mode=True)
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/_private/parameterized.py b/venv/lib/python3.9/site-packages/numpy/testing/_private/parameterized.py
new file mode 100644
index 00000000..3a29a181
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/_private/parameterized.py
@@ -0,0 +1,432 @@
+"""
+tl;dr: all code is licensed under simplified BSD, unless stated otherwise.
+
+Unless stated otherwise in the source files, all code is copyright 2010 David
+Wolever <david@wolever.net>. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation are those
+of the authors and should not be interpreted as representing official policies,
+either expressed or implied, of David Wolever.
+
+"""
+import re
+import inspect
+import warnings
+from functools import wraps
+from types import MethodType
+from collections import namedtuple
+
+from unittest import TestCase
+
+_param = namedtuple("param", "args kwargs")
+
+class param(_param):
+ """ Represents a single parameter to a test case.
+
+ For example::
+
+ >>> p = param("foo", bar=16)
+ >>> p
+ param("foo", bar=16)
+ >>> p.args
+ ('foo', )
+ >>> p.kwargs
+ {'bar': 16}
+
+ Intended to be used as an argument to ``@parameterized``::
+
+ @parameterized([
+ param("foo", bar=16),
+ ])
+ def test_stuff(foo, bar=16):
+ pass
+ """
+
+ def __new__(cls, *args , **kwargs):
+ return _param.__new__(cls, args, kwargs)
+
+ @classmethod
+ def explicit(cls, args=None, kwargs=None):
+ """ Creates a ``param`` by explicitly specifying ``args`` and
+ ``kwargs``::
+
+ >>> param.explicit([1,2,3])
+ param(*(1, 2, 3))
+ >>> param.explicit(kwargs={"foo": 42})
+ param(*(), **{"foo": "42"})
+ """
+ args = args or ()
+ kwargs = kwargs or {}
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_decorator(cls, args):
+ """ Returns an instance of ``param()`` for ``@parameterized`` argument
+ ``args``::
+
+ >>> param.from_decorator((42, ))
+ param(args=(42, ), kwargs={})
+ >>> param.from_decorator("foo")
+ param(args=("foo", ), kwargs={})
+ """
+ if isinstance(args, param):
+ return args
+ elif isinstance(args, (str,)):
+ args = (args, )
+ try:
+ return cls(*args)
+ except TypeError as e:
+ if "after * must be" not in str(e):
+ raise
+ raise TypeError(
+ "Parameters must be tuples, but %r is not (hint: use '(%r, )')"
+ %(args, args),
+ )
+
+ def __repr__(self):
+ return "param(*%r, **%r)" %self
+
+
+def parameterized_argument_value_pairs(func, p):
+ """Return tuples of parameterized arguments and their values.
+
+ This is useful if you are writing your own doc_func
+ function and need to know the values for each parameter name::
+
+ >>> def func(a, foo=None, bar=42, **kwargs): pass
+ >>> p = param(1, foo=7, extra=99)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})]
+
+ If the function's first argument is named ``self`` then it will be
+ ignored::
+
+ >>> def func(self, a): pass
+ >>> p = param(1)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("a", 1)]
+
+ Additionally, empty ``*args`` or ``**kwargs`` will be ignored::
+
+ >>> def func(foo, *args): pass
+ >>> p = param(1)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("foo", 1)]
+ >>> p = param(1, 16)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("foo", 1), ("*args", (16, ))]
+ """
+ argspec = inspect.getargspec(func)
+ arg_offset = 1 if argspec.args[:1] == ["self"] else 0
+
+ named_args = argspec.args[arg_offset:]
+
+ result = list(zip(named_args, p.args))
+ named_args = argspec.args[len(result) + arg_offset:]
+ varargs = p.args[len(result):]
+
+ result.extend([
+ (name, p.kwargs.get(name, default))
+ for (name, default)
+ in zip(named_args, argspec.defaults or [])
+ ])
+
+ seen_arg_names = {n for (n, _) in result}
+ keywords = dict(sorted([
+ (name, p.kwargs[name])
+ for name in p.kwargs
+ if name not in seen_arg_names
+ ]))
+
+ if varargs:
+ result.append(("*%s" %(argspec.varargs, ), tuple(varargs)))
+
+ if keywords:
+ result.append(("**%s" %(argspec.keywords, ), keywords))
+
+ return result
+
+def short_repr(x, n=64):
+ """ A shortened repr of ``x`` which is guaranteed to be ``unicode``::
+
+ >>> short_repr("foo")
+ u"foo"
+ >>> short_repr("123456789", n=4)
+ u"12...89"
+ """
+
+ x_repr = repr(x)
+ if isinstance(x_repr, bytes):
+ try:
+ x_repr = str(x_repr, "utf-8")
+ except UnicodeDecodeError:
+ x_repr = str(x_repr, "latin1")
+ if len(x_repr) > n:
+ x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:]
+ return x_repr
+
+def default_doc_func(func, num, p):
+ if func.__doc__ is None:
+ return None
+
+ all_args_with_values = parameterized_argument_value_pairs(func, p)
+
+ # Assumes that the function passed is a bound method.
+ descs = [f'{n}={short_repr(v)}' for n, v in all_args_with_values]
+
+ # The documentation might be a multiline string, so split it
+ # and just work with the first string, ignoring the period
+ # at the end if there is one.
+ first, nl, rest = func.__doc__.lstrip().partition("\n")
+ suffix = ""
+ if first.endswith("."):
+ suffix = "."
+ first = first[:-1]
+ args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs))
+ return "".join([first.rstrip(), args, suffix, nl, rest])
+
+def default_name_func(func, num, p):
+ base_name = func.__name__
+ name_suffix = "_%s" %(num, )
+ if len(p.args) > 0 and isinstance(p.args[0], (str,)):
+ name_suffix += "_" + parameterized.to_safe_name(p.args[0])
+ return base_name + name_suffix
+
+
+# force nose for numpy purposes.
+_test_runner_override = 'nose'
+_test_runner_guess = False
+_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"])
+_test_runner_aliases = {
+ "_pytest": "pytest",
+}
+
+def set_test_runner(name):
+ global _test_runner_override
+ if name not in _test_runners:
+ raise TypeError(
+ "Invalid test runner: %r (must be one of: %s)"
+ %(name, ", ".join(_test_runners)),
+ )
+ _test_runner_override = name
+
+def detect_runner():
+ """ Guess which test runner we're using by traversing the stack and looking
+ for the first matching module. This *should* be reasonably safe, as
+ it's done during test discovery where the test runner should be the
+ stack frame immediately outside. """
+ if _test_runner_override is not None:
+ return _test_runner_override
+ global _test_runner_guess
+ if _test_runner_guess is False:
+ stack = inspect.stack()
+ for record in reversed(stack):
+ frame = record[0]
+ module = frame.f_globals.get("__name__").partition(".")[0]
+ if module in _test_runner_aliases:
+ module = _test_runner_aliases[module]
+ if module in _test_runners:
+ _test_runner_guess = module
+ break
+ else:
+ _test_runner_guess = None
+ return _test_runner_guess
+
+class parameterized:
+ """ Parameterize a test case::
+
+ class TestInt:
+ @parameterized([
+ ("A", 10),
+ ("F", 15),
+ param("10", 42, base=42)
+ ])
+ def test_int(self, input, expected, base=16):
+ actual = int(input, base=base)
+ assert_equal(actual, expected)
+
+ @parameterized([
+ (2, 3, 5)
+ (3, 5, 8),
+ ])
+ def test_add(a, b, expected):
+ assert_equal(a + b, expected)
+ """
+
+ def __init__(self, input, doc_func=None):
+ self.get_input = self.input_as_callable(input)
+ self.doc_func = doc_func or default_doc_func
+
+ def __call__(self, test_func):
+ self.assert_not_in_testcase_subclass()
+
+ @wraps(test_func)
+ def wrapper(test_self=None):
+ test_cls = test_self and type(test_self)
+
+ original_doc = wrapper.__doc__
+ for num, args in enumerate(wrapper.parameterized_input):
+ p = param.from_decorator(args)
+ unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p)
+ try:
+ wrapper.__doc__ = nose_tuple[0].__doc__
+ # Nose uses `getattr(instance, test_func.__name__)` to get
+ # a method bound to the test instance (as opposed to a
+ # method bound to the instance of the class created when
+ # tests were being enumerated). Set a value here to make
+ # sure nose can get the correct test method.
+ if test_self is not None:
+ setattr(test_cls, test_func.__name__, unbound_func)
+ yield nose_tuple
+ finally:
+ if test_self is not None:
+ delattr(test_cls, test_func.__name__)
+ wrapper.__doc__ = original_doc
+ wrapper.parameterized_input = self.get_input()
+ wrapper.parameterized_func = test_func
+ test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, )
+ return wrapper
+
+ def param_as_nose_tuple(self, test_self, func, num, p):
+ nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1]))
+ nose_func.__doc__ = self.doc_func(func, num, p)
+ # Track the unbound function because we need to setattr the unbound
+ # function onto the class for nose to work (see comments above), and
+ # Python 3 doesn't let us pull the function out of a bound method.
+ unbound_func = nose_func
+ if test_self is not None:
+ nose_func = MethodType(nose_func, test_self)
+ return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, )
+
+ def assert_not_in_testcase_subclass(self):
+ parent_classes = self._terrible_magic_get_defining_classes()
+ if any(issubclass(cls, TestCase) for cls in parent_classes):
+ raise Exception("Warning: '@parameterized' tests won't work "
+ "inside subclasses of 'TestCase' - use "
+ "'@parameterized.expand' instead.")
+
+ def _terrible_magic_get_defining_classes(self):
+ """ Returns the list of parent classes of the class currently being defined.
+ Will likely only work if called from the ``parameterized`` decorator.
+ This function is entirely @brandon_rhodes's fault, as he suggested
+ the implementation: http://stackoverflow.com/a/8793684/71522
+ """
+ stack = inspect.stack()
+ if len(stack) <= 4:
+ return []
+ frame = stack[4]
+ code_context = frame[4] and frame[4][0].strip()
+ if not (code_context and code_context.startswith("class ")):
+ return []
+ _, _, parents = code_context.partition("(")
+ parents, _, _ = parents.partition(")")
+ return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
+
+ @classmethod
+ def input_as_callable(cls, input):
+ if callable(input):
+ return lambda: cls.check_input_values(input())
+ input_values = cls.check_input_values(input)
+ return lambda: input_values
+
+ @classmethod
+ def check_input_values(cls, input_values):
+ # Explicitly convert non-list inputs to a list so that:
+ # 1. A helpful exception will be raised if they aren't iterable, and
+ # 2. Generators are unwrapped exactly once (otherwise `nosetests
+ # --processes=n` has issues; see:
+ # https://github.com/wolever/nose-parameterized/pull/31)
+ if not isinstance(input_values, list):
+ input_values = list(input_values)
+ return [ param.from_decorator(p) for p in input_values ]
+
+ @classmethod
+ def expand(cls, input, name_func=None, doc_func=None, **legacy):
+ """ A "brute force" method of parameterizing test cases. Creates new
+ test cases and injects them into the namespace that the wrapped
+ function is being defined in. Useful for parameterizing tests in
+ subclasses of 'UnitTest', where Nose test generators don't work.
+
+ >>> @parameterized.expand([("foo", 1, 2)])
+ ... def test_add1(name, input, expected):
+ ... actual = add1(input)
+ ... assert_equal(actual, expected)
+ ...
+ >>> locals()
+ ... 'test_add1_foo_0': <function ...> ...
+ >>>
+ """
+
+ if "testcase_func_name" in legacy:
+ warnings.warn("testcase_func_name= is deprecated; use name_func=",
+ DeprecationWarning, stacklevel=2)
+ if not name_func:
+ name_func = legacy["testcase_func_name"]
+
+ if "testcase_func_doc" in legacy:
+ warnings.warn("testcase_func_doc= is deprecated; use doc_func=",
+ DeprecationWarning, stacklevel=2)
+ if not doc_func:
+ doc_func = legacy["testcase_func_doc"]
+
+ doc_func = doc_func or default_doc_func
+ name_func = name_func or default_name_func
+
+ def parameterized_expand_wrapper(f, instance=None):
+ stack = inspect.stack()
+ frame = stack[1]
+ frame_locals = frame[0].f_locals
+
+ parameters = cls.input_as_callable(input)()
+ for num, p in enumerate(parameters):
+ name = name_func(f, num, p)
+ frame_locals[name] = cls.param_as_standalone_func(p, f, name)
+ frame_locals[name].__doc__ = doc_func(f, num, p)
+
+ f.__test__ = False
+ return parameterized_expand_wrapper
+
+ @classmethod
+ def param_as_standalone_func(cls, p, func, name):
+ @wraps(func)
+ def standalone_func(*a):
+ return func(*(a + p.args), **p.kwargs)
+ standalone_func.__name__ = name
+
+ # place_as is used by py.test to determine what source file should be
+ # used for this test.
+ standalone_func.place_as = func
+
+ # Remove __wrapped__ because py.test will try to look at __wrapped__
+ # to determine which parameters should be used with this test case,
+ # and obviously we don't need it to do any parameterization.
+ try:
+ del standalone_func.__wrapped__
+ except AttributeError:
+ pass
+ return standalone_func
+
+ @classmethod
+ def to_safe_name(cls, s):
+ return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py b/venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py
new file mode 100644
index 00000000..2a212650
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py
@@ -0,0 +1,2595 @@
+"""
+Utility function to facilitate testing.
+
+"""
+import os
+import sys
+import platform
+import re
+import gc
+import operator
+import warnings
+from functools import partial, wraps
+import shutil
+import contextlib
+from tempfile import mkdtemp, mkstemp
+from unittest.case import SkipTest
+from warnings import WarningMessage
+import pprint
+
+import numpy as np
+from numpy.core import(
+ intp, float32, empty, arange, array_repr, ndarray, isnat, array)
+import numpy.linalg.lapack_lite
+
+from io import StringIO
+
+__all__ = [
+ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
+ 'assert_array_equal', 'assert_array_less', 'assert_string_equal',
+ 'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
+ 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
+ 'raises', 'rundocs', 'runstring', 'verbose', 'measure',
+ 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
+ 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
+ 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
+ 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
+ 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare',
+ 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON',
+ '_OLD_PROMOTION'
+ ]
+
+
+class KnownFailureException(Exception):
+ '''Raise this exception to mark a test as a known failing test.'''
+ pass
+
+
+KnownFailureTest = KnownFailureException # backwards compat
+verbose = 0
+
+IS_WASM = platform.machine() in ["wasm32", "wasm64"]
+IS_PYPY = sys.implementation.name == 'pypy'
+IS_PYSTON = hasattr(sys, "pyston_version_info")
+HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON
+HAS_LAPACK64 = numpy.linalg.lapack_lite._ilp64
+
+_OLD_PROMOTION = lambda: np._get_promotion_state() == 'legacy'
+
+
+def import_nose():
+ """ Import nose only when needed.
+ """
+ nose_is_good = True
+ minimum_nose_version = (1, 0, 0)
+ try:
+ import nose
+ except ImportError:
+ nose_is_good = False
+ else:
+ if nose.__versioninfo__ < minimum_nose_version:
+ nose_is_good = False
+
+ if not nose_is_good:
+ msg = ('Need nose >= %d.%d.%d for tests - see '
+ 'https://nose.readthedocs.io' %
+ minimum_nose_version)
+ raise ImportError(msg)
+
+ return nose
+
+
+def assert_(val, msg=''):
+ """
+ Assert that works in release mode.
+ Accepts callable msg to allow deferring evaluation until failure.
+
+ The Python built-in ``assert`` does not work when executing code in
+ optimized mode (the ``-O`` flag) - no byte-code is generated for it.
+
+ For documentation on usage, refer to the Python documentation.
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ if not val:
+ try:
+ smsg = msg()
+ except TypeError:
+ smsg = msg
+ raise AssertionError(smsg)
+
+
+def gisnan(x):
+ """like isnan, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isnan and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isnan
+ st = isnan(x)
+ if isinstance(st, type(NotImplemented)):
+ raise TypeError("isnan not supported for this type")
+ return st
+
+
+def gisfinite(x):
+ """like isfinite, but always raise an error if type not supported instead
+ of returning a TypeError object.
+
+ Notes
+ -----
+ isfinite and other ufunc sometimes return a NotImplementedType object
+ instead of raising any exception. This function is a wrapper to make sure
+ an exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isfinite, errstate
+ with errstate(invalid='ignore'):
+ st = isfinite(x)
+ if isinstance(st, type(NotImplemented)):
+ raise TypeError("isfinite not supported for this type")
+ return st
+
+
+def gisinf(x):
+ """like isinf, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isinf and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isinf, errstate
+ with errstate(invalid='ignore'):
+ st = isinf(x)
+ if isinstance(st, type(NotImplemented)):
+ raise TypeError("isinf not supported for this type")
+ return st
+
+
+if os.name == 'nt':
+ # Code "stolen" from enthought/debug/memusage.py
+ def GetPerformanceAttributes(object, counter, instance=None,
+ inum=-1, format=None, machine=None):
+ # NOTE: Many counters require 2 samples to give accurate results,
+ # including "% Processor Time" (as by definition, at any instant, a
+ # thread's CPU usage is either 0 or 100). To read counters like this,
+ # you should copy this function, but keep the counter open, and call
+ # CollectQueryData() each time you need to know.
+ # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link)
+ # My older explanation for this was that the "AddCounter" process
+ # forced the CPU to 100%, but the above makes more sense :)
+ import win32pdh
+ if format is None:
+ format = win32pdh.PDH_FMT_LONG
+ path = win32pdh.MakeCounterPath( (machine, object, instance, None,
+ inum, counter))
+ hq = win32pdh.OpenQuery()
+ try:
+ hc = win32pdh.AddCounter(hq, path)
+ try:
+ win32pdh.CollectQueryData(hq)
+ type, val = win32pdh.GetFormattedCounterValue(hc, format)
+ return val
+ finally:
+ win32pdh.RemoveCounter(hc)
+ finally:
+ win32pdh.CloseQuery(hq)
+
+ def memusage(processName="python", instance=0):
+ # from win32pdhutil, part of the win32all package
+ import win32pdh
+ return GetPerformanceAttributes("Process", "Virtual Bytes",
+ processName, instance,
+ win32pdh.PDH_FMT_LONG, None)
+elif sys.platform[:5] == 'linux':
+
+ def memusage(_proc_pid_stat=f'/proc/{os.getpid()}/stat'):
+ """
+ Return virtual memory size in bytes of the running python.
+
+ """
+ try:
+ with open(_proc_pid_stat, 'r') as f:
+ l = f.readline().split(' ')
+ return int(l[22])
+ except Exception:
+ return
+else:
+ def memusage():
+ """
+ Return memory usage of running python. [Not implemented]
+
+ """
+ raise NotImplementedError
+
+
+if sys.platform[:5] == 'linux':
+ def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]):
+ """
+ Return number of jiffies elapsed.
+
+ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. See man 5 proc.
+
+ """
+ import time
+ if not _load_time:
+ _load_time.append(time.time())
+ try:
+ with open(_proc_pid_stat, 'r') as f:
+ l = f.readline().split(' ')
+ return int(l[13])
+ except Exception:
+ return int(100*(time.time()-_load_time[0]))
+else:
+ # os.getpid is not in all platforms available.
+ # Using time is safe but inaccurate, especially when process
+ # was suspended or sleeping.
+ def jiffies(_load_time=[]):
+ """
+ Return number of jiffies elapsed.
+
+ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. See man 5 proc.
+
+ """
+ import time
+ if not _load_time:
+ _load_time.append(time.time())
+ return int(100*(time.time()-_load_time[0]))
+
+
+def build_err_msg(arrays, err_msg, header='Items are not equal:',
+ verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
+ msg = ['\n' + header]
+ if err_msg:
+ if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
+ msg = [msg[0] + ' ' + err_msg]
+ else:
+ msg.append(err_msg)
+ if verbose:
+ for i, a in enumerate(arrays):
+
+ if isinstance(a, ndarray):
+ # precision argument is only needed if the objects are ndarrays
+ r_func = partial(array_repr, precision=precision)
+ else:
+ r_func = repr
+
+ try:
+ r = r_func(a)
+ except Exception as exc:
+ r = f'[repr failed for <{type(a).__name__}>: {exc}]'
+ if r.count('\n') > 3:
+ r = '\n'.join(r.splitlines()[:3])
+ r += '...'
+ msg.append(f' {names[i]}: {r}')
+ return '\n'.join(msg)
+
+
+def assert_equal(actual, desired, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two objects are not equal.
+
+ Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
+ check that all elements of these objects are equal. An exception is raised
+ at the first conflicting values.
+
+ When one of `actual` and `desired` is a scalar and the other is array_like,
+ the function checks that each element of the array_like object is equal to
+ the scalar.
+
+ This function handles NaN comparisons as if NaN was a "normal" number.
+ That is, AssertionError is not raised if both objects have NaNs in the same
+ positions. This is in contrast to the IEEE standard on NaNs, which says
+ that NaN compared to anything must return False.
+
+ Parameters
+ ----------
+ actual : array_like
+ The object to check.
+ desired : array_like
+ The expected object.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal.
+
+ Examples
+ --------
+ >>> np.testing.assert_equal([4,5], [4,6])
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Items are not equal:
+ item=1
+ ACTUAL: 5
+ DESIRED: 6
+
+ The following comparison does not raise an exception. There are NaNs
+ in the inputs, but they are in the same positions.
+
+ >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan])
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ if isinstance(desired, dict):
+ if not isinstance(actual, dict):
+ raise AssertionError(repr(type(actual)))
+ assert_equal(len(actual), len(desired), err_msg, verbose)
+ for k, i in desired.items():
+ if k not in actual:
+ raise AssertionError(repr(k))
+ assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}',
+ verbose)
+ return
+ if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
+ assert_equal(len(actual), len(desired), err_msg, verbose)
+ for k in range(len(desired)):
+ assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}',
+ verbose)
+ return
+ from numpy.core import ndarray, isscalar, signbit
+ from numpy.lib import iscomplexobj, real, imag
+ if isinstance(actual, ndarray) or isinstance(desired, ndarray):
+ return assert_array_equal(actual, desired, err_msg, verbose)
+ msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
+
+ # Handle complex numbers: separate into real/imag to handle
+ # nan/inf/negative zero correctly
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+ try:
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+ except (ValueError, TypeError):
+ usecomplex = False
+
+ if usecomplex:
+ if iscomplexobj(actual):
+ actualr = real(actual)
+ actuali = imag(actual)
+ else:
+ actualr = actual
+ actuali = 0
+ if iscomplexobj(desired):
+ desiredr = real(desired)
+ desiredi = imag(desired)
+ else:
+ desiredr = desired
+ desiredi = 0
+ try:
+ assert_equal(actualr, desiredr)
+ assert_equal(actuali, desiredi)
+ except AssertionError:
+ raise AssertionError(msg)
+
+ # isscalar test to check cases such as [np.nan] != np.nan
+ if isscalar(desired) != isscalar(actual):
+ raise AssertionError(msg)
+
+ try:
+ isdesnat = isnat(desired)
+ isactnat = isnat(actual)
+ dtypes_match = (np.asarray(desired).dtype.type ==
+ np.asarray(actual).dtype.type)
+ if isdesnat and isactnat:
+ # If both are NaT (and have the same dtype -- datetime or
+ # timedelta) they are considered equal.
+ if dtypes_match:
+ return
+ else:
+ raise AssertionError(msg)
+
+ except (TypeError, ValueError, NotImplementedError):
+ pass
+
+ # Inf/nan/negative zero handling
+ try:
+ isdesnan = gisnan(desired)
+ isactnan = gisnan(actual)
+ if isdesnan and isactnan:
+ return # both nan, so equal
+
+ # handle signed zero specially for floats
+ array_actual = np.asarray(actual)
+ array_desired = np.asarray(desired)
+ if (array_actual.dtype.char in 'Mm' or
+ array_desired.dtype.char in 'Mm'):
+ # version 1.18
+ # until this version, gisnan failed for datetime64 and timedelta64.
+ # Now it succeeds but comparison to scalar with a different type
+ # emits a DeprecationWarning.
+ # Avoid that by skipping the next check
+ raise NotImplementedError('cannot compare to a scalar '
+ 'with a different type')
+
+ if desired == 0 and actual == 0:
+ if not signbit(desired) == signbit(actual):
+ raise AssertionError(msg)
+
+ except (TypeError, ValueError, NotImplementedError):
+ pass
+
+ try:
+ # Explicitly use __eq__ for comparison, gh-2552
+ if not (desired == actual):
+ raise AssertionError(msg)
+
+ except (DeprecationWarning, FutureWarning) as e:
+ # this handles the case when the two types are not even comparable
+ if 'elementwise == comparison' in e.args[0]:
+ raise AssertionError(msg)
+ else:
+ raise
+
+
+def print_assert_equal(test_string, actual, desired):
+ """
+ Test if two objects are equal, and print an error message if test fails.
+
+ The test is performed with ``actual == desired``.
+
+ Parameters
+ ----------
+ test_string : str
+ The message supplied to AssertionError.
+ actual : object
+ The object to test for equality against `desired`.
+ desired : object
+ The expected result.
+
+ Examples
+ --------
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
+ Traceback (most recent call last):
+ ...
+ AssertionError: Test XYZ of func xyz failed
+ ACTUAL:
+ [0, 1]
+ DESIRED:
+ [0, 2]
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import pprint
+
+ if not (actual == desired):
+ msg = StringIO()
+ msg.write(test_string)
+ msg.write(' failed\nACTUAL: \n')
+ pprint.pprint(actual, msg)
+ msg.write('DESIRED: \n')
+ pprint.pprint(desired, msg)
+ raise AssertionError(msg.getvalue())
+
+
+@np._no_nep50_warning()
+def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
+ """
+ Raises an AssertionError if two items are not equal up to desired
+ precision.
+
+ .. note:: It is recommended to use one of `assert_allclose`,
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+ instead of this function for more consistent floating point
+ comparisons.
+
+ The test verifies that the elements of `actual` and `desired` satisfy.
+
+ ``abs(desired-actual) < float64(1.5 * 10**(-decimal))``
+
+ That is a looser test than originally documented, but agrees with what the
+ actual implementation in `assert_array_almost_equal` did up to rounding
+ vagaries. An exception is raised at conflicting values. For ndarrays this
+ delegates to assert_array_almost_equal
+
+ Parameters
+ ----------
+ actual : array_like
+ The object to check.
+ desired : array_like
+ The expected object.
+ decimal : int, optional
+ Desired precision, default is 7.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ >>> from numpy.testing import assert_almost_equal
+ >>> assert_almost_equal(2.3333333333333, 2.33333334)
+ >>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not almost equal to 10 decimals
+ ACTUAL: 2.3333333333333
+ DESIRED: 2.33333334
+
+ >>> assert_almost_equal(np.array([1.0,2.3333333333333]),
+ ... np.array([1.0,2.33333334]), decimal=9)
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not almost equal to 9 decimals
+ <BLANKLINE>
+ Mismatched elements: 1 / 2 (50%)
+ Max absolute difference: 6.66669964e-09
+ Max relative difference: 2.85715698e-09
+ x: array([1. , 2.333333333])
+ y: array([1. , 2.33333334])
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ from numpy.core import ndarray
+ from numpy.lib import iscomplexobj, real, imag
+
+ # Handle complex numbers: separate into real/imag to handle
+ # nan/inf/negative zero correctly
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+ try:
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+ except ValueError:
+ usecomplex = False
+
+ def _build_err_msg():
+ header = ('Arrays are not almost equal to %d decimals' % decimal)
+ return build_err_msg([actual, desired], err_msg, verbose=verbose,
+ header=header)
+
+ if usecomplex:
+ if iscomplexobj(actual):
+ actualr = real(actual)
+ actuali = imag(actual)
+ else:
+ actualr = actual
+ actuali = 0
+ if iscomplexobj(desired):
+ desiredr = real(desired)
+ desiredi = imag(desired)
+ else:
+ desiredr = desired
+ desiredi = 0
+ try:
+ assert_almost_equal(actualr, desiredr, decimal=decimal)
+ assert_almost_equal(actuali, desiredi, decimal=decimal)
+ except AssertionError:
+ raise AssertionError(_build_err_msg())
+
+ if isinstance(actual, (ndarray, tuple, list)) \
+ or isinstance(desired, (ndarray, tuple, list)):
+ return assert_array_almost_equal(actual, desired, decimal, err_msg)
+ try:
+ # If one of desired/actual is not finite, handle it specially here:
+ # check that both are nan if any is a nan, and test for equality
+ # otherwise
+ if not (gisfinite(desired) and gisfinite(actual)):
+ if gisnan(desired) or gisnan(actual):
+ if not (gisnan(desired) and gisnan(actual)):
+ raise AssertionError(_build_err_msg())
+ else:
+ if not desired == actual:
+ raise AssertionError(_build_err_msg())
+ return
+ except (NotImplementedError, TypeError):
+ pass
+ if abs(desired - actual) >= np.float64(1.5 * 10.0**(-decimal)):
+ raise AssertionError(_build_err_msg())
+
+
+@np._no_nep50_warning()
+def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
+ """
+ Raises an AssertionError if two items are not equal up to significant
+ digits.
+
+ .. note:: It is recommended to use one of `assert_allclose`,
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+ instead of this function for more consistent floating point
+ comparisons.
+
+ Given two numbers, check that they are approximately equal.
+ Approximately equal is defined as the number of significant digits
+ that agree.
+
+ Parameters
+ ----------
+ actual : scalar
+ The object to check.
+ desired : scalar
+ The expected object.
+ significant : int, optional
+ Desired precision, default is 7.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
+ ... significant=8)
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
+ ... significant=8)
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Items are not equal to 8 significant digits:
+ ACTUAL: 1.234567e-21
+ DESIRED: 1.2345672e-21
+
+ the evaluated condition that raises the exception is
+
+ >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
+ True
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+
+ (actual, desired) = map(float, (actual, desired))
+ if desired == actual:
+ return
+ # Normalized the numbers to be in range (-10.0,10.0)
+ # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
+ with np.errstate(invalid='ignore'):
+ scale = 0.5*(np.abs(desired) + np.abs(actual))
+ scale = np.power(10, np.floor(np.log10(scale)))
+ try:
+ sc_desired = desired/scale
+ except ZeroDivisionError:
+ sc_desired = 0.0
+ try:
+ sc_actual = actual/scale
+ except ZeroDivisionError:
+ sc_actual = 0.0
+ msg = build_err_msg(
+ [actual, desired], err_msg,
+ header='Items are not equal to %d significant digits:' % significant,
+ verbose=verbose)
+ try:
+ # If one of desired/actual is not finite, handle it specially here:
+ # check that both are nan if any is a nan, and test for equality
+ # otherwise
+ if not (gisfinite(desired) and gisfinite(actual)):
+ if gisnan(desired) or gisnan(actual):
+ if not (gisnan(desired) and gisnan(actual)):
+ raise AssertionError(msg)
+ else:
+ if not desired == actual:
+ raise AssertionError(msg)
+ return
+ except (TypeError, NotImplementedError):
+ pass
+ if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
+ raise AssertionError(msg)
+
+
+@np._no_nep50_warning()
+def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
+ precision=6, equal_nan=True, equal_inf=True,
+ *, strict=False):
+ __tracebackhide__ = True # Hide traceback for py.test
+ from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_
+
+ x = np.asanyarray(x)
+ y = np.asanyarray(y)
+
+ # original array for output formatting
+ ox, oy = x, y
+
+ def isnumber(x):
+ return x.dtype.char in '?bhilqpBHILQPefdgFDG'
+
+ def istime(x):
+ return x.dtype.char in "Mm"
+
+ def func_assert_same_pos(x, y, func=isnan, hasval='nan'):
+ """Handling nan/inf.
+
+ Combine results of running func on x and y, checking that they are True
+ at the same locations.
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+
+ x_id = func(x)
+ y_id = func(y)
+ # We include work-arounds here to handle three types of slightly
+ # pathological ndarray subclasses:
+ # (1) all() on `masked` array scalars can return masked arrays, so we
+ # use != True
+ # (2) __eq__ on some ndarray subclasses returns Python booleans
+ # instead of element-wise comparisons, so we cast to bool_() and
+ # use isinstance(..., bool) checks
+ # (3) subclasses with bare-bones __array_function__ implementations may
+ # not implement np.all(), so favor using the .all() method
+ # We are not committed to supporting such subclasses, but it's nice to
+ # support them if possible.
+ if bool_(x_id == y_id).all() != True:
+ msg = build_err_msg([x, y],
+ err_msg + '\nx and y %s location mismatch:'
+ % (hasval), verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise AssertionError(msg)
+ # If there is a scalar, then here we know the array has the same
+ # flag as it everywhere, so we should return the scalar flag.
+ if isinstance(x_id, bool) or x_id.ndim == 0:
+ return bool_(x_id)
+ elif isinstance(y_id, bool) or y_id.ndim == 0:
+ return bool_(y_id)
+ else:
+ return y_id
+
+ try:
+ if strict:
+ cond = x.shape == y.shape and x.dtype == y.dtype
+ else:
+ cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
+ if not cond:
+ if x.shape != y.shape:
+ reason = f'\n(shapes {x.shape}, {y.shape} mismatch)'
+ else:
+ reason = f'\n(dtypes {x.dtype}, {y.dtype} mismatch)'
+ msg = build_err_msg([x, y],
+ err_msg
+ + reason,
+ verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise AssertionError(msg)
+
+ flagged = bool_(False)
+ if isnumber(x) and isnumber(y):
+ if equal_nan:
+ flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')
+
+ if equal_inf:
+ flagged |= func_assert_same_pos(x, y,
+ func=lambda xy: xy == +inf,
+ hasval='+inf')
+ flagged |= func_assert_same_pos(x, y,
+ func=lambda xy: xy == -inf,
+ hasval='-inf')
+
+ elif istime(x) and istime(y):
+ # If one is datetime64 and the other timedelta64 there is no point
+ if equal_nan and x.dtype.type == y.dtype.type:
+ flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT")
+
+ if flagged.ndim > 0:
+ x, y = x[~flagged], y[~flagged]
+ # Only do the comparison if actual values are left
+ if x.size == 0:
+ return
+ elif flagged:
+ # no sense doing comparison if everything is flagged.
+ return
+
+ val = comparison(x, y)
+
+ if isinstance(val, bool):
+ cond = val
+ reduced = array([val])
+ else:
+ reduced = val.ravel()
+ cond = reduced.all()
+
+ # The below comparison is a hack to ensure that fully masked
+ # results, for which val.ravel().all() returns np.ma.masked,
+ # do not trigger a failure (np.ma.masked != True evaluates as
+ # np.ma.masked, which is falsy).
+ if cond != True:
+ n_mismatch = reduced.size - reduced.sum(dtype=intp)
+ n_elements = flagged.size if flagged.ndim != 0 else reduced.size
+ percent_mismatch = 100 * n_mismatch / n_elements
+ remarks = [
+ 'Mismatched elements: {} / {} ({:.3g}%)'.format(
+ n_mismatch, n_elements, percent_mismatch)]
+
+ with errstate(all='ignore'):
+ # ignore errors for non-numeric types
+ with contextlib.suppress(TypeError):
+ error = abs(x - y)
+ if np.issubdtype(x.dtype, np.unsignedinteger):
+ error2 = abs(y - x)
+ np.minimum(error, error2, out=error)
+ max_abs_error = max(error)
+ if getattr(error, 'dtype', object_) == object_:
+ remarks.append('Max absolute difference: '
+ + str(max_abs_error))
+ else:
+ remarks.append('Max absolute difference: '
+ + array2string(max_abs_error))
+
+ # note: this definition of relative error matches that one
+ # used by assert_allclose (found in np.isclose)
+ # Filter values where the divisor would be zero
+ nonzero = bool_(y != 0)
+ if all(~nonzero):
+ max_rel_error = array(inf)
+ else:
+ max_rel_error = max(error[nonzero] / abs(y[nonzero]))
+ if getattr(error, 'dtype', object_) == object_:
+ remarks.append('Max relative difference: '
+ + str(max_rel_error))
+ else:
+ remarks.append('Max relative difference: '
+ + array2string(max_rel_error))
+
+ err_msg += '\n' + '\n'.join(remarks)
+ msg = build_err_msg([ox, oy], err_msg,
+ verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise AssertionError(msg)
+ except ValueError:
+ import traceback
+ efmt = traceback.format_exc()
+ header = f'error during assertion:\n\n{efmt}\n\n{header}'
+
+ msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise ValueError(msg)
+
+
+def assert_array_equal(x, y, err_msg='', verbose=True, *, strict=False):
+ """
+ Raises an AssertionError if two array_like objects are not equal.
+
+ Given two array_like objects, check that the shape is equal and all
+ elements of these objects are equal (but see the Notes for the special
+ handling of a scalar). An exception is raised at shape mismatch or
+ conflicting values. In contrast to the standard usage in numpy, NaNs
+ are compared like numbers, no assertion is raised if both objects have
+ NaNs in the same positions.
+
+ The usual caution for verifying equality with floating point numbers is
+ advised.
+
+ Parameters
+ ----------
+ x : array_like
+ The actual object to check.
+ y : array_like
+ The desired, expected object.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+ strict : bool, optional
+ If True, raise an AssertionError when either the shape or the data
+ type of the array_like objects does not match. The special
+ handling for scalars mentioned in the Notes section is disabled.
+
+ .. versionadded:: 1.24.0
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired objects are not equal.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Notes
+ -----
+ When one of `x` and `y` is a scalar and the other is array_like, the
+ function checks that each element of the array_like object is equal to
+ the scalar. This behaviour can be disabled with the `strict` parameter.
+
+ Examples
+ --------
+ The first assert does not raise an exception:
+
+ >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
+ ... [np.exp(0),2.33333, np.nan])
+
+ Assert fails with numerical imprecision with floats:
+
+ >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
+ ... [1, np.sqrt(np.pi)**2, np.nan])
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not equal
+ <BLANKLINE>
+ Mismatched elements: 1 / 3 (33.3%)
+ Max absolute difference: 4.4408921e-16
+ Max relative difference: 1.41357986e-16
+ x: array([1. , 3.141593, nan])
+ y: array([1. , 3.141593, nan])
+
+ Use `assert_allclose` or one of the nulp (number of floating point values)
+ functions for these cases instead:
+
+ >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
+ ... [1, np.sqrt(np.pi)**2, np.nan],
+ ... rtol=1e-10, atol=0)
+
+ As mentioned in the Notes section, `assert_array_equal` has special
+ handling for scalars. Here the test checks that each value in `x` is 3:
+
+ >>> x = np.full((2, 5), fill_value=3)
+ >>> np.testing.assert_array_equal(x, 3)
+
+ Use `strict` to raise an AssertionError when comparing a scalar with an
+ array:
+
+ >>> np.testing.assert_array_equal(x, 3, strict=True)
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not equal
+ <BLANKLINE>
+ (shapes (2, 5), () mismatch)
+ x: array([[3, 3, 3, 3, 3],
+ [3, 3, 3, 3, 3]])
+ y: array(3)
+
+ The `strict` parameter also ensures that the array data types match:
+
+ >>> x = np.array([2, 2, 2])
+ >>> y = np.array([2., 2., 2.], dtype=np.float32)
+ >>> np.testing.assert_array_equal(x, y, strict=True)
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not equal
+ <BLANKLINE>
+ (dtypes int64, float32 mismatch)
+ x: array([2, 2, 2])
+ y: array([2., 2., 2.], dtype=float32)
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
+ verbose=verbose, header='Arrays are not equal',
+ strict=strict)
+
+
+@np._no_nep50_warning()
+def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two objects are not equal up to desired
+ precision.
+
+ .. note:: It is recommended to use one of `assert_allclose`,
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+ instead of this function for more consistent floating point
+ comparisons.
+
+ The test verifies identical shapes and that the elements of ``actual`` and
+ ``desired`` satisfy.
+
+ ``abs(desired-actual) < 1.5 * 10**(-decimal)``
+
+ That is a looser test than originally documented, but agrees with what the
+ actual implementation did up to rounding vagaries. An exception is raised
+ at shape mismatch or conflicting values. In contrast to the standard usage
+ in numpy, NaNs are compared like numbers, no assertion is raised if both
+ objects have NaNs in the same positions.
+
+ Parameters
+ ----------
+ x : array_like
+ The actual object to check.
+ y : array_like
+ The desired, expected object.
+ decimal : int, optional
+ Desired precision, default is 6.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ the first assert does not raise an exception
+
+ >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
+ ... [1.0,2.333,np.nan])
+
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
+ ... [1.0,2.33339,np.nan], decimal=5)
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not almost equal to 5 decimals
+ <BLANKLINE>
+ Mismatched elements: 1 / 3 (33.3%)
+ Max absolute difference: 6.e-05
+ Max relative difference: 2.57136612e-05
+ x: array([1. , 2.33333, nan])
+ y: array([1. , 2.33339, nan])
+
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
+ ... [1.0,2.33333, 5], decimal=5)
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not almost equal to 5 decimals
+ <BLANKLINE>
+ x and y nan location mismatch:
+ x: array([1. , 2.33333, nan])
+ y: array([1. , 2.33333, 5. ])
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ from numpy.core import number, float_, result_type, array
+ from numpy.core.numerictypes import issubdtype
+ from numpy.core.fromnumeric import any as npany
+
+ def compare(x, y):
+ try:
+ if npany(gisinf(x)) or npany( gisinf(y)):
+ xinfid = gisinf(x)
+ yinfid = gisinf(y)
+ if not (xinfid == yinfid).all():
+ return False
+ # if one item, x and y is +- inf
+ if x.size == y.size == 1:
+ return x == y
+ x = x[~xinfid]
+ y = y[~yinfid]
+ except (TypeError, NotImplementedError):
+ pass
+
+ # make sure y is an inexact type to avoid abs(MIN_INT); will cause
+ # casting of x later.
+ dtype = result_type(y, 1.)
+ y = np.asanyarray(y, dtype)
+ z = abs(x - y)
+
+ if not issubdtype(z.dtype, number):
+ z = z.astype(float_) # handle object arrays
+
+ return z < 1.5 * 10.0**(-decimal)
+
+ assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
+ header=('Arrays are not almost equal to %d decimals' % decimal),
+ precision=decimal)
+
+
+def assert_array_less(x, y, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two array_like objects are not ordered by less
+ than.
+
+ Given two array_like objects, check that the shape is equal and all
+ elements of the first object are strictly smaller than those of the
+ second object. An exception is raised at shape mismatch or incorrectly
+ ordered values. Shape mismatch does not raise if an object has zero
+ dimension. In contrast to the standard usage in numpy, NaNs are
+ compared, no assertion is raised if both objects have NaNs in the same
+ positions.
+
+
+
+ Parameters
+ ----------
+ x : array_like
+ The smaller object to check.
+ y : array_like
+ The larger object to compare.
+ err_msg : string
+ The error message to be printed in case of failure.
+ verbose : bool
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired objects are not equal.
+
+ See Also
+ --------
+ assert_array_equal: tests objects for equality
+ assert_array_almost_equal: test objects for equality up to precision
+
+
+
+ Examples
+ --------
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not less-ordered
+ <BLANKLINE>
+ Mismatched elements: 1 / 3 (33.3%)
+ Max absolute difference: 1.
+ Max relative difference: 0.5
+ x: array([ 1., 1., nan])
+ y: array([ 1., 2., nan])
+
+ >>> np.testing.assert_array_less([1.0, 4.0], 3)
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not less-ordered
+ <BLANKLINE>
+ Mismatched elements: 1 / 2 (50%)
+ Max absolute difference: 2.
+ Max relative difference: 0.66666667
+ x: array([1., 4.])
+ y: array(3)
+
+ >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not less-ordered
+ <BLANKLINE>
+ (shapes (3,), (1,) mismatch)
+ x: array([1., 2., 3.])
+ y: array([4])
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
+ verbose=verbose,
+ header='Arrays are not less-ordered',
+ equal_inf=False)
+
+
+def runstring(astr, dict):
+ exec(astr, dict)
+
+
+def assert_string_equal(actual, desired):
+ """
+ Test if two strings are equal.
+
+ If the given strings are equal, `assert_string_equal` does nothing.
+ If they are not equal, an AssertionError is raised, and the diff
+ between the strings is shown.
+
+ Parameters
+ ----------
+ actual : str
+ The string to test for equality against the expected string.
+ desired : str
+ The expected string.
+
+ Examples
+ --------
+ >>> np.testing.assert_string_equal('abc', 'abc')
+ >>> np.testing.assert_string_equal('abc', 'abcd')
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ...
+ AssertionError: Differences in strings:
+ - abc+ abcd? +
+
+ """
+ # delay import of difflib to reduce startup time
+ __tracebackhide__ = True # Hide traceback for py.test
+ import difflib
+
+ if not isinstance(actual, str):
+ raise AssertionError(repr(type(actual)))
+ if not isinstance(desired, str):
+ raise AssertionError(repr(type(desired)))
+ if desired == actual:
+ return
+
+ diff = list(difflib.Differ().compare(actual.splitlines(True),
+ desired.splitlines(True)))
+ diff_list = []
+ while diff:
+ d1 = diff.pop(0)
+ if d1.startswith(' '):
+ continue
+ if d1.startswith('- '):
+ l = [d1]
+ d2 = diff.pop(0)
+ if d2.startswith('? '):
+ l.append(d2)
+ d2 = diff.pop(0)
+ if not d2.startswith('+ '):
+ raise AssertionError(repr(d2))
+ l.append(d2)
+ if diff:
+ d3 = diff.pop(0)
+ if d3.startswith('? '):
+ l.append(d3)
+ else:
+ diff.insert(0, d3)
+ if d2[2:] == d1[2:]:
+ continue
+ diff_list.extend(l)
+ continue
+ raise AssertionError(repr(d1))
+ if not diff_list:
+ return
+ msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}"
+ if actual != desired:
+ raise AssertionError(msg)
+
+
+def rundocs(filename=None, raise_on_error=True):
+ """
+ Run doctests found in the given file.
+
+ By default `rundocs` raises an AssertionError on failure.
+
+ Parameters
+ ----------
+ filename : str
+ The path to the file for which the doctests are run.
+ raise_on_error : bool
+ Whether to raise an AssertionError when a doctest fails. Default is
+ True.
+
+ Notes
+ -----
+ The doctests can be run by the user/developer by adding the ``doctests``
+ argument to the ``test()`` call. For example, to run all tests (including
+ doctests) for `numpy.lib`:
+
+ >>> np.lib.test(doctests=True) # doctest: +SKIP
+ """
+ from numpy.distutils.misc_util import exec_mod_from_location
+ import doctest
+ if filename is None:
+ f = sys._getframe(1)
+ filename = f.f_globals['__file__']
+ name = os.path.splitext(os.path.basename(filename))[0]
+ m = exec_mod_from_location(name, filename)
+
+ tests = doctest.DocTestFinder().find(m)
+ runner = doctest.DocTestRunner(verbose=False)
+
+ msg = []
+ if raise_on_error:
+ out = lambda s: msg.append(s)
+ else:
+ out = None
+
+ for test in tests:
+ runner.run(test, out=out)
+
+ if runner.failures > 0 and raise_on_error:
+ raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
+
+
+def raises(*args):
+ """Decorator to check for raised exceptions.
+
+ The decorated test function must raise one of the passed exceptions to
+ pass. If you want to test many assertions about exceptions in a single
+ test, you may want to use `assert_raises` instead.
+
+ .. warning::
+ This decorator is nose specific, do not use it if you are using a
+ different test framework.
+
+ Parameters
+ ----------
+ args : exceptions
+ The test passes if any of the passed exceptions is raised.
+
+ Raises
+ ------
+ AssertionError
+
+ Examples
+ --------
+
+ Usage::
+
+ @raises(TypeError, ValueError)
+ def test_raises_type_error():
+ raise TypeError("This test passes")
+
+ @raises(Exception)
+ def test_that_fails_by_passing():
+ pass
+
+ """
+ nose = import_nose()
+ return nose.tools.raises(*args)
+
+#
+# assert_raises and assert_raises_regex are taken from unittest.
+#
+import unittest
+
+
+class _Dummy(unittest.TestCase):
+ def nop(self):
+ pass
+
+_d = _Dummy('nop')
+
+def assert_raises(*args, **kwargs):
+ """
+ assert_raises(exception_class, callable, *args, **kwargs)
+ assert_raises(exception_class)
+
+ Fail unless an exception of class exception_class is thrown
+ by callable when invoked with arguments args and keyword
+ arguments kwargs. If a different type of exception is
+ thrown, it will not be caught, and the test case will be
+ deemed to have suffered an error, exactly as for an
+ unexpected exception.
+
+ Alternatively, `assert_raises` can be used as a context manager:
+
+ >>> from numpy.testing import assert_raises
+ >>> with assert_raises(ZeroDivisionError):
+ ... 1 / 0
+
+ is equivalent to
+
+ >>> def div(x, y):
+ ... return x / y
+ >>> assert_raises(ZeroDivisionError, div, 1, 0)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ return _d.assertRaises(*args,**kwargs)
+
+
+def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
+ """
+ assert_raises_regex(exception_class, expected_regexp, callable, *args,
+ **kwargs)
+ assert_raises_regex(exception_class, expected_regexp)
+
+ Fail unless an exception of class exception_class and with message that
+ matches expected_regexp is thrown by callable when invoked with arguments
+ args and keyword arguments kwargs.
+
+ Alternatively, can be used as a context manager like `assert_raises`.
+
+ Notes
+ -----
+ .. versionadded:: 1.9.0
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs)
+
+
+def decorate_methods(cls, decorator, testmatch=None):
+ """
+ Apply a decorator to all methods in a class matching a regular expression.
+
+ The given decorator is applied to all public methods of `cls` that are
+ matched by the regular expression `testmatch`
+ (``testmatch.search(methodname)``). Methods that are private, i.e. start
+ with an underscore, are ignored.
+
+ Parameters
+ ----------
+ cls : class
+ Class whose methods to decorate.
+ decorator : function
+ Decorator to apply to methods
+ testmatch : compiled regexp or str, optional
+ The regular expression. Default value is None, in which case the
+ nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
+ is used.
+ If `testmatch` is a string, it is compiled to a regular expression
+ first.
+
+ """
+ if testmatch is None:
+ testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
+ else:
+ testmatch = re.compile(testmatch)
+ cls_attr = cls.__dict__
+
+ # delayed import to reduce startup time
+ from inspect import isfunction
+
+ methods = [_m for _m in cls_attr.values() if isfunction(_m)]
+ for function in methods:
+ try:
+ if hasattr(function, 'compat_func_name'):
+ funcname = function.compat_func_name
+ else:
+ funcname = function.__name__
+ except AttributeError:
+ # not a function
+ continue
+ if testmatch.search(funcname) and not funcname.startswith('_'):
+ setattr(cls, funcname, decorator(function))
+ return
+
+
+def measure(code_str, times=1, label=None):
+ """
+ Return elapsed time for executing code in the namespace of the caller.
+
+ The supplied code string is compiled with the Python builtin ``compile``.
+ The precision of the timing is 10 milli-seconds. If the code will execute
+ fast on this timescale, it can be executed many times to get reasonable
+ timing accuracy.
+
+ Parameters
+ ----------
+ code_str : str
+ The code to be timed.
+ times : int, optional
+ The number of times the code is executed. Default is 1. The code is
+ only compiled once.
+ label : str, optional
+ A label to identify `code_str` with. This is passed into ``compile``
+ as the second argument (for run-time error messages).
+
+ Returns
+ -------
+ elapsed : float
+ Total elapsed time in seconds for executing `code_str` `times` times.
+
+ Examples
+ --------
+ >>> times = 10
+ >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times)
+ >>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP
+ Time for a single execution : 0.005 s
+
+ """
+ frame = sys._getframe(1)
+ locs, globs = frame.f_locals, frame.f_globals
+
+ code = compile(code_str, f'Test name: {label} ', 'exec')
+ i = 0
+ elapsed = jiffies()
+ while i < times:
+ i += 1
+ exec(code, globs, locs)
+ elapsed = jiffies() - elapsed
+ return 0.01*elapsed
+
+
+def _assert_valid_refcount(op):
+ """
+ Check that ufuncs don't mishandle refcount of object `1`.
+ Used in a few regression tests.
+ """
+ if not HAS_REFCOUNT:
+ return True
+
+ import gc
+ import numpy as np
+
+ b = np.arange(100*100).reshape(100, 100)
+ c = b
+ i = 1
+
+ gc.disable()
+ try:
+ rc = sys.getrefcount(i)
+ for j in range(15):
+ d = op(b, c)
+ assert_(sys.getrefcount(i) >= rc)
+ finally:
+ gc.enable()
+ del d # for pyflakes
+
+
+def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
+ err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two objects are not equal up to desired
+ tolerance.
+
+ Given two array_like objects, check that their shapes and all elements
+ are equal (but see the Notes for the special handling of a scalar). An
+ exception is raised if the shapes mismatch or any values conflict. In
+ contrast to the standard usage in numpy, NaNs are compared like numbers,
+ no assertion is raised if both objects have NaNs in the same positions.
+
+ The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note
+ that ``allclose`` has different default values). It compares the difference
+ between `actual` and `desired` to ``atol + rtol * abs(desired)``.
+
+ .. versionadded:: 1.5.0
+
+ Parameters
+ ----------
+ actual : array_like
+ Array obtained.
+ desired : array_like
+ Array desired.
+ rtol : float, optional
+ Relative tolerance.
+ atol : float, optional
+ Absolute tolerance.
+ equal_nan : bool, optional.
+ If True, NaNs will compare equal.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_array_almost_equal_nulp, assert_array_max_ulp
+
+ Notes
+ -----
+ When one of `actual` and `desired` is a scalar and the other is
+ array_like, the function checks that each element of the array_like
+ object is equal to the scalar.
+
+ Examples
+ --------
+ >>> x = [1e-5, 1e-3, 1e-1]
+ >>> y = np.arccos(np.cos(x))
+ >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+
+ def compare(x, y):
+ return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
+ equal_nan=equal_nan)
+
+ actual, desired = np.asanyarray(actual), np.asanyarray(desired)
+ header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}'
+ assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
+ verbose=verbose, header=header, equal_nan=equal_nan)
+
+
+def assert_array_almost_equal_nulp(x, y, nulp=1):
+ """
+ Compare two arrays relatively to their spacing.
+
+ This is a relatively robust method to compare two arrays whose amplitude
+ is variable.
+
+ Parameters
+ ----------
+ x, y : array_like
+ Input arrays.
+ nulp : int, optional
+ The maximum number of unit in the last place for tolerance (see Notes).
+ Default is 1.
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ AssertionError
+ If the spacing between `x` and `y` for one or more elements is larger
+ than `nulp`.
+
+ See Also
+ --------
+ assert_array_max_ulp : Check that all items of arrays differ in at most
+ N Units in the Last Place.
+ spacing : Return the distance between x and the nearest adjacent number.
+
+ Notes
+ -----
+ An assertion is raised if the following condition is not met::
+
+ abs(x - y) <= nulp * spacing(maximum(abs(x), abs(y)))
+
+ Examples
+ --------
+ >>> x = np.array([1., 1e-10, 1e-20])
+ >>> eps = np.finfo(x.dtype).eps
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
+
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
+ Traceback (most recent call last):
+ ...
+ AssertionError: X and Y are not equal to 1 ULP (max is 2)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+ ax = np.abs(x)
+ ay = np.abs(y)
+ ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
+ if not np.all(np.abs(x-y) <= ref):
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
+ msg = "X and Y are not equal to %d ULP" % nulp
+ else:
+ max_nulp = np.max(nulp_diff(x, y))
+ msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
+ raise AssertionError(msg)
+
+
+def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
+ """
+ Check that all items of arrays differ in at most N Units in the Last Place.
+
+ Parameters
+ ----------
+ a, b : array_like
+ Input arrays to be compared.
+ maxulp : int, optional
+ The maximum number of units in the last place that elements of `a` and
+ `b` can differ. Default is 1.
+ dtype : dtype, optional
+ Data-type to convert `a` and `b` to if given. Default is None.
+
+ Returns
+ -------
+ ret : ndarray
+ Array containing number of representable floating point numbers between
+ items in `a` and `b`.
+
+ Raises
+ ------
+ AssertionError
+ If one or more elements differ by more than `maxulp`.
+
+ Notes
+ -----
+ For computing the ULP difference, this API does not differentiate between
+ various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
+ is zero).
+
+ See Also
+ --------
+ assert_array_almost_equal_nulp : Compare two arrays relatively to their
+ spacing.
+
+ Examples
+ --------
+ >>> a = np.linspace(0., 1., 100)
+ >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+ ret = nulp_diff(a, b, dtype)
+ if not np.all(ret <= maxulp):
+ raise AssertionError("Arrays are not almost equal up to %g "
+ "ULP (max difference is %g ULP)" %
+ (maxulp, np.max(ret)))
+ return ret
+
+
+def nulp_diff(x, y, dtype=None):
+ """For each item in x and y, return the number of representable floating
+ points between them.
+
+ Parameters
+ ----------
+ x : array_like
+ first input array
+ y : array_like
+ second input array
+ dtype : dtype, optional
+ Data-type to convert `x` and `y` to if given. Default is None.
+
+ Returns
+ -------
+ nulp : array_like
+ number of representable floating point numbers between each item in x
+ and y.
+
+ Notes
+ -----
+ For computing the ULP difference, this API does not differentiate between
+ various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
+ is zero).
+
+ Examples
+ --------
+ # By definition, epsilon is the smallest number such as 1 + eps != 1, so
+ # there should be exactly one ULP between 1 and 1 + eps
+ >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
+ 1.0
+ """
+ import numpy as np
+ if dtype:
+ x = np.asarray(x, dtype=dtype)
+ y = np.asarray(y, dtype=dtype)
+ else:
+ x = np.asarray(x)
+ y = np.asarray(y)
+
+ t = np.common_type(x, y)
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
+ raise NotImplementedError("_nulp not implemented for complex array")
+
+ x = np.array([x], dtype=t)
+ y = np.array([y], dtype=t)
+
+ x[np.isnan(x)] = np.nan
+ y[np.isnan(y)] = np.nan
+
+ if not x.shape == y.shape:
+ raise ValueError("x and y do not have the same shape: %s - %s" %
+ (x.shape, y.shape))
+
+ def _diff(rx, ry, vdt):
+ diff = np.asarray(rx-ry, dtype=vdt)
+ return np.abs(diff)
+
+ rx = integer_repr(x)
+ ry = integer_repr(y)
+ return _diff(rx, ry, t)
+
+
+def _integer_repr(x, vdt, comp):
+ # Reinterpret binary representation of the float as sign-magnitude:
+ # take into account two-complement representation
+ # See also
+ # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+ rx = x.view(vdt)
+ if not (rx.size == 1):
+ rx[rx < 0] = comp - rx[rx < 0]
+ else:
+ if rx < 0:
+ rx = comp - rx
+
+ return rx
+
+
+def integer_repr(x):
+ """Return the signed-magnitude interpretation of the binary representation
+ of x."""
+ import numpy as np
+ if x.dtype == np.float16:
+ return _integer_repr(x, np.int16, np.int16(-2**15))
+ elif x.dtype == np.float32:
+ return _integer_repr(x, np.int32, np.int32(-2**31))
+ elif x.dtype == np.float64:
+ return _integer_repr(x, np.int64, np.int64(-2**63))
+ else:
+ raise ValueError(f'Unsupported dtype {x.dtype}')
+
+
+@contextlib.contextmanager
+def _assert_warns_context(warning_class, name=None):
+ __tracebackhide__ = True # Hide traceback for py.test
+ with suppress_warnings() as sup:
+ l = sup.record(warning_class)
+ yield
+ if not len(l) > 0:
+ name_str = f' when calling {name}' if name is not None else ''
+ raise AssertionError("No warning raised" + name_str)
+
+
+def assert_warns(warning_class, *args, **kwargs):
+ """
+ Fail unless the given callable throws the specified warning.
+
+ A warning of class warning_class should be thrown by the callable when
+ invoked with arguments args and keyword arguments kwargs.
+ If a different type of warning is thrown, it will not be caught.
+
+ If called with all arguments other than the warning class omitted, may be
+ used as a context manager:
+
+ with assert_warns(SomeWarning):
+ do_something()
+
+ The ability to be used as a context manager is new in NumPy v1.11.0.
+
+ .. versionadded:: 1.4.0
+
+ Parameters
+ ----------
+ warning_class : class
+ The class defining the warning that `func` is expected to throw.
+ func : callable, optional
+ Callable to test
+ *args : Arguments
+ Arguments for `func`.
+ **kwargs : Kwargs
+ Keyword arguments for `func`.
+
+ Returns
+ -------
+ The value returned by `func`.
+
+ Examples
+ --------
+ >>> import warnings
+ >>> def deprecated_func(num):
+ ... warnings.warn("Please upgrade", DeprecationWarning)
+ ... return num*num
+ >>> with np.testing.assert_warns(DeprecationWarning):
+ ... assert deprecated_func(4) == 16
+ >>> # or passing a func
+ >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4)
+ >>> assert ret == 16
+ """
+ if not args:
+ return _assert_warns_context(warning_class)
+
+ func = args[0]
+ args = args[1:]
+ with _assert_warns_context(warning_class, name=func.__name__):
+ return func(*args, **kwargs)
+
+
+@contextlib.contextmanager
+def _assert_no_warnings_context(name=None):
+ __tracebackhide__ = True # Hide traceback for py.test
+ with warnings.catch_warnings(record=True) as l:
+ warnings.simplefilter('always')
+ yield
+ if len(l) > 0:
+ name_str = f' when calling {name}' if name is not None else ''
+ raise AssertionError(f'Got warnings{name_str}: {l}')
+
+
+def assert_no_warnings(*args, **kwargs):
+ """
+ Fail if the given callable produces any warnings.
+
+ If called with all arguments omitted, may be used as a context manager:
+
+ with assert_no_warnings():
+ do_something()
+
+ The ability to be used as a context manager is new in NumPy v1.11.0.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ func : callable
+ The callable to test.
+ \\*args : Arguments
+ Arguments passed to `func`.
+ \\*\\*kwargs : Kwargs
+ Keyword arguments passed to `func`.
+
+ Returns
+ -------
+ The value returned by `func`.
+
+ """
+ if not args:
+ return _assert_no_warnings_context()
+
+ func = args[0]
+ args = args[1:]
+ with _assert_no_warnings_context(name=func.__name__):
+ return func(*args, **kwargs)
+
+
+def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
+ """
+ generator producing data with different alignment and offsets
+ to test simd vectorization
+
+ Parameters
+ ----------
+ dtype : dtype
+ data type to produce
+ type : string
+ 'unary': create data for unary operations, creates one input
+ and output array
+ 'binary': create data for unary operations, creates two input
+ and output array
+ max_size : integer
+ maximum size of data to produce
+
+ Returns
+ -------
+ if type is 'unary' yields one output, one input array and a message
+ containing information on the data
+ if type is 'binary' yields one output array, two input array and a message
+ containing information on the data
+
+ """
+ ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
+ bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
+ for o in range(3):
+ for s in range(o + 2, max(o + 3, max_size)):
+ if type == 'unary':
+ inp = lambda: arange(s, dtype=dtype)[o:]
+ out = empty((s,), dtype=dtype)[o:]
+ yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
+ d = inp()
+ yield d, d, ufmt % (o, o, s, dtype, 'in place')
+ yield out[1:], inp()[:-1], ufmt % \
+ (o + 1, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp()[1:], ufmt % \
+ (o, o + 1, s - 1, dtype, 'out of place')
+ yield inp()[:-1], inp()[1:], ufmt % \
+ (o, o + 1, s - 1, dtype, 'aliased')
+ yield inp()[1:], inp()[:-1], ufmt % \
+ (o + 1, o, s - 1, dtype, 'aliased')
+ if type == 'binary':
+ inp1 = lambda: arange(s, dtype=dtype)[o:]
+ inp2 = lambda: arange(s, dtype=dtype)[o:]
+ out = empty((s,), dtype=dtype)[o:]
+ yield out, inp1(), inp2(), bfmt % \
+ (o, o, o, s, dtype, 'out of place')
+ d = inp1()
+ yield d, d, inp2(), bfmt % \
+ (o, o, o, s, dtype, 'in place1')
+ d = inp2()
+ yield d, inp1(), d, bfmt % \
+ (o, o, o, s, dtype, 'in place2')
+ yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+ (o + 1, o, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+ (o, o + 1, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+ (o, o, o + 1, s - 1, dtype, 'out of place')
+ yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+ (o + 1, o, o, s - 1, dtype, 'aliased')
+ yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+ (o, o + 1, o, s - 1, dtype, 'aliased')
+ yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+ (o, o, o + 1, s - 1, dtype, 'aliased')
+
+
+class IgnoreException(Exception):
+ "Ignoring this exception due to disabled feature"
+ pass
+
+
+@contextlib.contextmanager
+def tempdir(*args, **kwargs):
+ """Context manager to provide a temporary test folder.
+
+ All arguments are passed as this to the underlying tempfile.mkdtemp
+ function.
+
+ """
+ tmpdir = mkdtemp(*args, **kwargs)
+ try:
+ yield tmpdir
+ finally:
+ shutil.rmtree(tmpdir)
+
+
+@contextlib.contextmanager
+def temppath(*args, **kwargs):
+ """Context manager for temporary files.
+
+ Context manager that returns the path to a closed temporary file. Its
+ parameters are the same as for tempfile.mkstemp and are passed directly
+ to that function. The underlying file is removed when the context is
+ exited, so it should be closed at that time.
+
+ Windows does not allow a temporary file to be opened if it is already
+ open, so the underlying file must be closed after opening before it
+ can be opened again.
+
+ """
+ fd, path = mkstemp(*args, **kwargs)
+ os.close(fd)
+ try:
+ yield path
+ finally:
+ os.remove(path)
+
+
+class clear_and_catch_warnings(warnings.catch_warnings):
+ """ Context manager that resets warning registry for catching warnings
+
+ Warnings can be slippery, because, whenever a warning is triggered, Python
+ adds a ``__warningregistry__`` member to the *calling* module. This makes
+ it impossible to retrigger the warning in this module, whatever you put in
+ the warnings filters. This context manager accepts a sequence of `modules`
+ as a keyword argument to its constructor and:
+
+ * stores and removes any ``__warningregistry__`` entries in given `modules`
+ on entry;
+ * resets ``__warningregistry__`` to its previous state on exit.
+
+ This makes it possible to trigger any warning afresh inside the context
+ manager without disturbing the state of warnings outside.
+
+ For compatibility with Python 3.0, please consider all arguments to be
+ keyword-only.
+
+ Parameters
+ ----------
+ record : bool, optional
+ Specifies whether warnings should be captured by a custom
+ implementation of ``warnings.showwarning()`` and be appended to a list
+ returned by the context manager. Otherwise None is returned by the
+ context manager. The objects appended to the list are arguments whose
+ attributes mirror the arguments to ``showwarning()``.
+ modules : sequence, optional
+ Sequence of modules for which to reset warnings registry on entry and
+ restore on exit. To work correctly, all 'ignore' filters should
+ filter by one of these modules.
+
+ Examples
+ --------
+ >>> import warnings
+ >>> with np.testing.clear_and_catch_warnings(
+ ... modules=[np.core.fromnumeric]):
+ ... warnings.simplefilter('always')
+ ... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
+ ... # do something that raises a warning but ignore those in
+ ... # np.core.fromnumeric
+ """
+ class_modules = ()
+
+ def __init__(self, record=False, modules=()):
+ self.modules = set(modules).union(self.class_modules)
+ self._warnreg_copies = {}
+ super().__init__(record=record)
+
+ def __enter__(self):
+ for mod in self.modules:
+ if hasattr(mod, '__warningregistry__'):
+ mod_reg = mod.__warningregistry__
+ self._warnreg_copies[mod] = mod_reg.copy()
+ mod_reg.clear()
+ return super().__enter__()
+
+ def __exit__(self, *exc_info):
+ super().__exit__(*exc_info)
+ for mod in self.modules:
+ if hasattr(mod, '__warningregistry__'):
+ mod.__warningregistry__.clear()
+ if mod in self._warnreg_copies:
+ mod.__warningregistry__.update(self._warnreg_copies[mod])
+
+
+class suppress_warnings:
+ """
+ Context manager and decorator doing much the same as
+ ``warnings.catch_warnings``.
+
+ However, it also provides a filter mechanism to work around
+ https://bugs.python.org/issue4180.
+
+ This bug causes Python before 3.4 to not reliably show warnings again
+ after they have been ignored once (even within catch_warnings). It
+ means that no "ignore" filter can be used easily, since following
+ tests might need to see the warning. Additionally it allows easier
+ specificity for testing warnings and can be nested.
+
+ Parameters
+ ----------
+ forwarding_rule : str, optional
+ One of "always", "once", "module", or "location". Analogous to
+ the usual warnings module filter mode, it is useful to reduce
+ noise mostly on the outmost level. Unsuppressed and unrecorded
+ warnings will be forwarded based on this rule. Defaults to "always".
+ "location" is equivalent to the warnings "default", match by exact
+ location the warning warning originated from.
+
+ Notes
+ -----
+ Filters added inside the context manager will be discarded again
+ when leaving it. Upon entering all filters defined outside a
+ context will be applied automatically.
+
+ When a recording filter is added, matching warnings are stored in the
+ ``log`` attribute as well as in the list returned by ``record``.
+
+ If filters are added and the ``module`` keyword is given, the
+ warning registry of this module will additionally be cleared when
+ applying it, entering the context, or exiting it. This could cause
+ warnings to appear a second time after leaving the context if they
+ were configured to be printed once (default) and were already
+ printed before the context was entered.
+
+ Nesting this context manager will work as expected when the
+ forwarding rule is "always" (default). Unfiltered and unrecorded
+ warnings will be passed out and be matched by the outer level.
+ On the outmost level they will be printed (or caught by another
+ warnings context). The forwarding rule argument can modify this
+ behaviour.
+
+ Like ``catch_warnings`` this context manager is not threadsafe.
+
+ Examples
+ --------
+
+ With a context manager::
+
+ with np.testing.suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, "Some text")
+ sup.filter(module=np.ma.core)
+ log = sup.record(FutureWarning, "Does this occur?")
+ command_giving_warnings()
+ # The FutureWarning was given once, the filtered warnings were
+ # ignored. All other warnings abide outside settings (may be
+ # printed/error)
+ assert_(len(log) == 1)
+ assert_(len(sup.log) == 1) # also stored in log attribute
+
+ Or as a decorator::
+
+ sup = np.testing.suppress_warnings()
+ sup.filter(module=np.ma.core) # module must match exactly
+ @sup
+ def some_function():
+ # do something which causes a warning in np.ma.core
+ pass
+ """
+ def __init__(self, forwarding_rule="always"):
+ self._entered = False
+
+ # Suppressions are either instance or defined inside one with block:
+ self._suppressions = []
+
+ if forwarding_rule not in {"always", "module", "once", "location"}:
+ raise ValueError("unsupported forwarding rule.")
+ self._forwarding_rule = forwarding_rule
+
+ def _clear_registries(self):
+ if hasattr(warnings, "_filters_mutated"):
+ # clearing the registry should not be necessary on new pythons,
+ # instead the filters should be mutated.
+ warnings._filters_mutated()
+ return
+ # Simply clear the registry, this should normally be harmless,
+ # note that on new pythons it would be invalidated anyway.
+ for module in self._tmp_modules:
+ if hasattr(module, "__warningregistry__"):
+ module.__warningregistry__.clear()
+
+ def _filter(self, category=Warning, message="", module=None, record=False):
+ if record:
+ record = [] # The log where to store warnings
+ else:
+ record = None
+ if self._entered:
+ if module is None:
+ warnings.filterwarnings(
+ "always", category=category, message=message)
+ else:
+ module_regex = module.__name__.replace('.', r'\.') + '$'
+ warnings.filterwarnings(
+ "always", category=category, message=message,
+ module=module_regex)
+ self._tmp_modules.add(module)
+ self._clear_registries()
+
+ self._tmp_suppressions.append(
+ (category, message, re.compile(message, re.I), module, record))
+ else:
+ self._suppressions.append(
+ (category, message, re.compile(message, re.I), module, record))
+
+ return record
+
+ def filter(self, category=Warning, message="", module=None):
+ """
+ Add a new suppressing filter or apply it if the state is entered.
+
+ Parameters
+ ----------
+ category : class, optional
+ Warning class to filter
+ message : string, optional
+ Regular expression matching the warning message.
+ module : module, optional
+ Module to filter for. Note that the module (and its file)
+ must match exactly and cannot be a submodule. This may make
+ it unreliable for external modules.
+
+ Notes
+ -----
+ When added within a context, filters are only added inside
+ the context and will be forgotten when the context is exited.
+ """
+ self._filter(category=category, message=message, module=module,
+ record=False)
+
+ def record(self, category=Warning, message="", module=None):
+ """
+ Append a new recording filter or apply it if the state is entered.
+
+ All warnings matching will be appended to the ``log`` attribute.
+
+ Parameters
+ ----------
+ category : class, optional
+ Warning class to filter
+ message : string, optional
+ Regular expression matching the warning message.
+ module : module, optional
+ Module to filter for. Note that the module (and its file)
+ must match exactly and cannot be a submodule. This may make
+ it unreliable for external modules.
+
+ Returns
+ -------
+ log : list
+ A list which will be filled with all matched warnings.
+
+ Notes
+ -----
+ When added within a context, filters are only added inside
+ the context and will be forgotten when the context is exited.
+ """
+ return self._filter(category=category, message=message, module=module,
+ record=True)
+
+ def __enter__(self):
+ if self._entered:
+ raise RuntimeError("cannot enter suppress_warnings twice.")
+
+ self._orig_show = warnings.showwarning
+ self._filters = warnings.filters
+ warnings.filters = self._filters[:]
+
+ self._entered = True
+ self._tmp_suppressions = []
+ self._tmp_modules = set()
+ self._forwarded = set()
+
+ self.log = [] # reset global log (no need to keep same list)
+
+ for cat, mess, _, mod, log in self._suppressions:
+ if log is not None:
+ del log[:] # clear the log
+ if mod is None:
+ warnings.filterwarnings(
+ "always", category=cat, message=mess)
+ else:
+ module_regex = mod.__name__.replace('.', r'\.') + '$'
+ warnings.filterwarnings(
+ "always", category=cat, message=mess,
+ module=module_regex)
+ self._tmp_modules.add(mod)
+ warnings.showwarning = self._showwarning
+ self._clear_registries()
+
+ return self
+
+ def __exit__(self, *exc_info):
+ warnings.showwarning = self._orig_show
+ warnings.filters = self._filters
+ self._clear_registries()
+ self._entered = False
+ del self._orig_show
+ del self._filters
+
+ def _showwarning(self, message, category, filename, lineno,
+ *args, use_warnmsg=None, **kwargs):
+ for cat, _, pattern, mod, rec in (
+ self._suppressions + self._tmp_suppressions)[::-1]:
+ if (issubclass(category, cat) and
+ pattern.match(message.args[0]) is not None):
+ if mod is None:
+ # Message and category match, either recorded or ignored
+ if rec is not None:
+ msg = WarningMessage(message, category, filename,
+ lineno, **kwargs)
+ self.log.append(msg)
+ rec.append(msg)
+ return
+ # Use startswith, because warnings strips the c or o from
+ # .pyc/.pyo files.
+ elif mod.__file__.startswith(filename):
+ # The message and module (filename) match
+ if rec is not None:
+ msg = WarningMessage(message, category, filename,
+ lineno, **kwargs)
+ self.log.append(msg)
+ rec.append(msg)
+ return
+
+ # There is no filter in place, so pass to the outside handler
+ # unless we should only pass it once
+ if self._forwarding_rule == "always":
+ if use_warnmsg is None:
+ self._orig_show(message, category, filename, lineno,
+ *args, **kwargs)
+ else:
+ self._orig_showmsg(use_warnmsg)
+ return
+
+ if self._forwarding_rule == "once":
+ signature = (message.args, category)
+ elif self._forwarding_rule == "module":
+ signature = (message.args, category, filename)
+ elif self._forwarding_rule == "location":
+ signature = (message.args, category, filename, lineno)
+
+ if signature in self._forwarded:
+ return
+ self._forwarded.add(signature)
+ if use_warnmsg is None:
+ self._orig_show(message, category, filename, lineno, *args,
+ **kwargs)
+ else:
+ self._orig_showmsg(use_warnmsg)
+
+ def __call__(self, func):
+ """
+ Function decorator to apply certain suppressions to a whole
+ function.
+ """
+ @wraps(func)
+ def new_func(*args, **kwargs):
+ with self:
+ return func(*args, **kwargs)
+
+ return new_func
+
+
+@contextlib.contextmanager
+def _assert_no_gc_cycles_context(name=None):
+ __tracebackhide__ = True # Hide traceback for py.test
+
+ # not meaningful to test if there is no refcounting
+ if not HAS_REFCOUNT:
+ yield
+ return
+
+ assert_(gc.isenabled())
+ gc.disable()
+ gc_debug = gc.get_debug()
+ try:
+ for i in range(100):
+ if gc.collect() == 0:
+ break
+ else:
+ raise RuntimeError(
+ "Unable to fully collect garbage - perhaps a __del__ method "
+ "is creating more reference cycles?")
+
+ gc.set_debug(gc.DEBUG_SAVEALL)
+ yield
+ # gc.collect returns the number of unreachable objects in cycles that
+ # were found -- we are checking that no cycles were created in the context
+ n_objects_in_cycles = gc.collect()
+ objects_in_cycles = gc.garbage[:]
+ finally:
+ del gc.garbage[:]
+ gc.set_debug(gc_debug)
+ gc.enable()
+
+ if n_objects_in_cycles:
+ name_str = f' when calling {name}' if name is not None else ''
+ raise AssertionError(
+ "Reference cycles were found{}: {} objects were collected, "
+ "of which {} are shown below:{}"
+ .format(
+ name_str,
+ n_objects_in_cycles,
+ len(objects_in_cycles),
+ ''.join(
+ "\n {} object with id={}:\n {}".format(
+ type(o).__name__,
+ id(o),
+ pprint.pformat(o).replace('\n', '\n ')
+ ) for o in objects_in_cycles
+ )
+ )
+ )
+
+
+def assert_no_gc_cycles(*args, **kwargs):
+ """
+ Fail if the given callable produces any reference cycles.
+
+ If called with all arguments omitted, may be used as a context manager:
+
+ with assert_no_gc_cycles():
+ do_something()
+
+ .. versionadded:: 1.15.0
+
+ Parameters
+ ----------
+ func : callable
+ The callable to test.
+ \\*args : Arguments
+ Arguments passed to `func`.
+ \\*\\*kwargs : Kwargs
+ Keyword arguments passed to `func`.
+
+ Returns
+ -------
+ Nothing. The result is deliberately discarded to ensure that all cycles
+ are found.
+
+ """
+ if not args:
+ return _assert_no_gc_cycles_context()
+
+ func = args[0]
+ args = args[1:]
+ with _assert_no_gc_cycles_context(name=func.__name__):
+ func(*args, **kwargs)
+
+def break_cycles():
+ """
+ Break reference cycles by calling gc.collect
+ Objects can call other objects' methods (for instance, another object's
+ __del__) inside their own __del__. On PyPy, the interpreter only runs
+ between calls to gc.collect, so multiple calls are needed to completely
+ release all cycles.
+ """
+
+ gc.collect()
+ if IS_PYPY:
+ # a few more, just to make sure all the finalizers are called
+ gc.collect()
+ gc.collect()
+ gc.collect()
+ gc.collect()
+
+
+def requires_memory(free_bytes):
+ """Decorator to skip a test if not enough memory is available"""
+ import pytest
+
+ def decorator(func):
+ @wraps(func)
+ def wrapper(*a, **kw):
+ msg = check_free_memory(free_bytes)
+ if msg is not None:
+ pytest.skip(msg)
+
+ try:
+ return func(*a, **kw)
+ except MemoryError:
+ # Probably ran out of memory regardless: don't regard as failure
+ pytest.xfail("MemoryError raised")
+
+ return wrapper
+
+ return decorator
+
+
+def check_free_memory(free_bytes):
+ """
+ Check whether `free_bytes` amount of memory is currently free.
+ Returns: None if enough memory available, otherwise error message
+ """
+ env_var = 'NPY_AVAILABLE_MEM'
+ env_value = os.environ.get(env_var)
+ if env_value is not None:
+ try:
+ mem_free = _parse_size(env_value)
+ except ValueError as exc:
+ raise ValueError(f'Invalid environment variable {env_var}: {exc}')
+
+ msg = (f'{free_bytes/1e9} GB memory required, but environment variable '
+ f'NPY_AVAILABLE_MEM={env_value} set')
+ else:
+ mem_free = _get_mem_available()
+
+ if mem_free is None:
+ msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM "
+ "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
+ "the test.")
+ mem_free = -1
+ else:
+ msg = f'{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available'
+
+ return msg if mem_free < free_bytes else None
+
+
+def _parse_size(size_str):
+ """Convert memory size strings ('12 GB' etc.) to float"""
+ suffixes = {'': 1, 'b': 1,
+ 'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4,
+ 'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4,
+ 'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4}
+
+ size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format(
+ '|'.join(suffixes.keys())), re.I)
+
+ m = size_re.match(size_str.lower())
+ if not m or m.group(2) not in suffixes:
+ raise ValueError(f'value {size_str!r} not a valid size')
+ return int(float(m.group(1)) * suffixes[m.group(2)])
+
+
+def _get_mem_available():
+ """Return available memory in bytes, or None if unknown."""
+ try:
+ import psutil
+ return psutil.virtual_memory().available
+ except (ImportError, AttributeError):
+ pass
+
+ if sys.platform.startswith('linux'):
+ info = {}
+ with open('/proc/meminfo', 'r') as f:
+ for line in f:
+ p = line.split()
+ info[p[0].strip(':').lower()] = int(p[1]) * 1024
+
+ if 'memavailable' in info:
+ # Linux >= 3.14
+ return info['memavailable']
+ else:
+ return info['memfree'] + info['cached']
+
+ return None
+
+
+def _no_tracing(func):
+ """
+ Decorator to temporarily turn off tracing for the duration of a test.
+ Needed in tests that check refcounting, otherwise the tracing itself
+ influences the refcounts
+ """
+ if not hasattr(sys, 'gettrace'):
+ return func
+ else:
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ original_trace = sys.gettrace()
+ try:
+ sys.settrace(None)
+ return func(*args, **kwargs)
+ finally:
+ sys.settrace(original_trace)
+ return wrapper
+
+
+def _get_glibc_version():
+ try:
+ ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1]
+ except Exception as inst:
+ ver = '0.0'
+
+ return ver
+
+
+_glibcver = _get_glibc_version()
+_glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x)
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/_private/utils.pyi b/venv/lib/python3.9/site-packages/numpy/testing/_private/utils.pyi
new file mode 100644
index 00000000..6e051e91
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/_private/utils.pyi
@@ -0,0 +1,399 @@
+import os
+import sys
+import ast
+import types
+import warnings
+import unittest
+import contextlib
+from re import Pattern
+from collections.abc import Callable, Iterable, Sequence
+from typing import (
+ Literal as L,
+ Any,
+ AnyStr,
+ ClassVar,
+ NoReturn,
+ overload,
+ type_check_only,
+ TypeVar,
+ Union,
+ Final,
+ SupportsIndex,
+)
+from typing_extensions import ParamSpec
+
+from numpy import generic, dtype, number, object_, bool_, _FloatValue
+from numpy._typing import (
+ NDArray,
+ ArrayLike,
+ DTypeLike,
+ _ArrayLikeNumber_co,
+ _ArrayLikeObject_co,
+ _ArrayLikeTD64_co,
+ _ArrayLikeDT64_co,
+)
+
+from unittest.case import (
+ SkipTest as SkipTest,
+)
+
+_P = ParamSpec("_P")
+_T = TypeVar("_T")
+_ET = TypeVar("_ET", bound=BaseException)
+_FT = TypeVar("_FT", bound=Callable[..., Any])
+
+# Must return a bool or an ndarray/generic type
+# that is supported by `np.logical_and.reduce`
+_ComparisonFunc = Callable[
+ [NDArray[Any], NDArray[Any]],
+ Union[
+ bool,
+ bool_,
+ number[Any],
+ NDArray[Union[bool_, number[Any], object_]],
+ ],
+]
+
+__all__: list[str]
+
+class KnownFailureException(Exception): ...
+class IgnoreException(Exception): ...
+
+class clear_and_catch_warnings(warnings.catch_warnings):
+ class_modules: ClassVar[tuple[types.ModuleType, ...]]
+ modules: set[types.ModuleType]
+ @overload
+ def __new__(
+ cls,
+ record: L[False] = ...,
+ modules: Iterable[types.ModuleType] = ...,
+ ) -> _clear_and_catch_warnings_without_records: ...
+ @overload
+ def __new__(
+ cls,
+ record: L[True],
+ modules: Iterable[types.ModuleType] = ...,
+ ) -> _clear_and_catch_warnings_with_records: ...
+ @overload
+ def __new__(
+ cls,
+ record: bool,
+ modules: Iterable[types.ModuleType] = ...,
+ ) -> clear_and_catch_warnings: ...
+ def __enter__(self) -> None | list[warnings.WarningMessage]: ...
+ def __exit__(
+ self,
+ __exc_type: None | type[BaseException] = ...,
+ __exc_val: None | BaseException = ...,
+ __exc_tb: None | types.TracebackType = ...,
+ ) -> None: ...
+
+# Type-check only `clear_and_catch_warnings` subclasses for both values of the
+# `record` parameter. Copied from the stdlib `warnings` stubs.
+
+@type_check_only
+class _clear_and_catch_warnings_with_records(clear_and_catch_warnings):
+ def __enter__(self) -> list[warnings.WarningMessage]: ...
+
+@type_check_only
+class _clear_and_catch_warnings_without_records(clear_and_catch_warnings):
+ def __enter__(self) -> None: ...
+
+class suppress_warnings:
+ log: list[warnings.WarningMessage]
+ def __init__(
+ self,
+ forwarding_rule: L["always", "module", "once", "location"] = ...,
+ ) -> None: ...
+ def filter(
+ self,
+ category: type[Warning] = ...,
+ message: str = ...,
+ module: None | types.ModuleType = ...,
+ ) -> None: ...
+ def record(
+ self,
+ category: type[Warning] = ...,
+ message: str = ...,
+ module: None | types.ModuleType = ...,
+ ) -> list[warnings.WarningMessage]: ...
+ def __enter__(self: _T) -> _T: ...
+ def __exit__(
+ self,
+ __exc_type: None | type[BaseException] = ...,
+ __exc_val: None | BaseException = ...,
+ __exc_tb: None | types.TracebackType = ...,
+ ) -> None: ...
+ def __call__(self, func: _FT) -> _FT: ...
+
+verbose: int
+IS_PYPY: Final[bool]
+IS_PYSTON: Final[bool]
+HAS_REFCOUNT: Final[bool]
+HAS_LAPACK64: Final[bool]
+
+def assert_(val: object, msg: str | Callable[[], str] = ...) -> None: ...
+
+# Contrary to runtime we can't do `os.name` checks while type checking,
+# only `sys.platform` checks
+if sys.platform == "win32" or sys.platform == "cygwin":
+ def memusage(processName: str = ..., instance: int = ...) -> int: ...
+elif sys.platform == "linux":
+ def memusage(_proc_pid_stat: str | bytes | os.PathLike[Any] = ...) -> None | int: ...
+else:
+ def memusage() -> NoReturn: ...
+
+if sys.platform == "linux":
+ def jiffies(
+ _proc_pid_stat: str | bytes | os.PathLike[Any] = ...,
+ _load_time: list[float] = ...,
+ ) -> int: ...
+else:
+ def jiffies(_load_time: list[float] = ...) -> int: ...
+
+def build_err_msg(
+ arrays: Iterable[object],
+ err_msg: str,
+ header: str = ...,
+ verbose: bool = ...,
+ names: Sequence[str] = ...,
+ precision: None | SupportsIndex = ...,
+) -> str: ...
+
+def assert_equal(
+ actual: object,
+ desired: object,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+
+def print_assert_equal(
+ test_string: str,
+ actual: object,
+ desired: object,
+) -> None: ...
+
+def assert_almost_equal(
+ actual: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ desired: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ decimal: int = ...,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+
+# Anything that can be coerced into `builtins.float`
+def assert_approx_equal(
+ actual: _FloatValue,
+ desired: _FloatValue,
+ significant: int = ...,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+
+def assert_array_compare(
+ comparison: _ComparisonFunc,
+ x: ArrayLike,
+ y: ArrayLike,
+ err_msg: str = ...,
+ verbose: bool = ...,
+ header: str = ...,
+ precision: SupportsIndex = ...,
+ equal_nan: bool = ...,
+ equal_inf: bool = ...,
+ *,
+ strict: bool = ...
+) -> None: ...
+
+def assert_array_equal(
+ x: ArrayLike,
+ y: ArrayLike,
+ err_msg: str = ...,
+ verbose: bool = ...,
+ *,
+ strict: bool = ...
+) -> None: ...
+
+def assert_array_almost_equal(
+ x: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ y: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ decimal: float = ...,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+
+@overload
+def assert_array_less(
+ x: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ y: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+@overload
+def assert_array_less(
+ x: _ArrayLikeTD64_co,
+ y: _ArrayLikeTD64_co,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+@overload
+def assert_array_less(
+ x: _ArrayLikeDT64_co,
+ y: _ArrayLikeDT64_co,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+
+def runstring(
+ astr: str | bytes | types.CodeType,
+ dict: None | dict[str, Any],
+) -> Any: ...
+
+def assert_string_equal(actual: str, desired: str) -> None: ...
+
+def rundocs(
+ filename: None | str | os.PathLike[str] = ...,
+ raise_on_error: bool = ...,
+) -> None: ...
+
+def raises(*args: type[BaseException]) -> Callable[[_FT], _FT]: ...
+
+@overload
+def assert_raises( # type: ignore
+ expected_exception: type[BaseException] | tuple[type[BaseException], ...],
+ callable: Callable[_P, Any],
+ /,
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+) -> None: ...
+@overload
+def assert_raises(
+ expected_exception: type[_ET] | tuple[type[_ET], ...],
+ *,
+ msg: None | str = ...,
+) -> unittest.case._AssertRaisesContext[_ET]: ...
+
+@overload
+def assert_raises_regex(
+ expected_exception: type[BaseException] | tuple[type[BaseException], ...],
+ expected_regex: str | bytes | Pattern[Any],
+ callable: Callable[_P, Any],
+ /,
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+) -> None: ...
+@overload
+def assert_raises_regex(
+ expected_exception: type[_ET] | tuple[type[_ET], ...],
+ expected_regex: str | bytes | Pattern[Any],
+ *,
+ msg: None | str = ...,
+) -> unittest.case._AssertRaisesContext[_ET]: ...
+
+def decorate_methods(
+ cls: type[Any],
+ decorator: Callable[[Callable[..., Any]], Any],
+ testmatch: None | str | bytes | Pattern[Any] = ...,
+) -> None: ...
+
+def measure(
+ code_str: str | bytes | ast.mod | ast.AST,
+ times: int = ...,
+ label: None | str = ...,
+) -> float: ...
+
+@overload
+def assert_allclose(
+ actual: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ desired: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+ rtol: float = ...,
+ atol: float = ...,
+ equal_nan: bool = ...,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+@overload
+def assert_allclose(
+ actual: _ArrayLikeTD64_co,
+ desired: _ArrayLikeTD64_co,
+ rtol: float = ...,
+ atol: float = ...,
+ equal_nan: bool = ...,
+ err_msg: str = ...,
+ verbose: bool = ...,
+) -> None: ...
+
+def assert_array_almost_equal_nulp(
+ x: _ArrayLikeNumber_co,
+ y: _ArrayLikeNumber_co,
+ nulp: float = ...,
+) -> None: ...
+
+def assert_array_max_ulp(
+ a: _ArrayLikeNumber_co,
+ b: _ArrayLikeNumber_co,
+ maxulp: float = ...,
+ dtype: DTypeLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def assert_warns(
+ warning_class: type[Warning],
+) -> contextlib._GeneratorContextManager[None]: ...
+@overload
+def assert_warns(
+ warning_class: type[Warning],
+ func: Callable[_P, _T],
+ /,
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+) -> _T: ...
+
+@overload
+def assert_no_warnings() -> contextlib._GeneratorContextManager[None]: ...
+@overload
+def assert_no_warnings(
+ func: Callable[_P, _T],
+ /,
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+) -> _T: ...
+
+@overload
+def tempdir(
+ suffix: None = ...,
+ prefix: None = ...,
+ dir: None = ...,
+) -> contextlib._GeneratorContextManager[str]: ...
+@overload
+def tempdir(
+ suffix: None | AnyStr = ...,
+ prefix: None | AnyStr = ...,
+ dir: None | AnyStr | os.PathLike[AnyStr] = ...,
+) -> contextlib._GeneratorContextManager[AnyStr]: ...
+
+@overload
+def temppath(
+ suffix: None = ...,
+ prefix: None = ...,
+ dir: None = ...,
+ text: bool = ...,
+) -> contextlib._GeneratorContextManager[str]: ...
+@overload
+def temppath(
+ suffix: None | AnyStr = ...,
+ prefix: None | AnyStr = ...,
+ dir: None | AnyStr | os.PathLike[AnyStr] = ...,
+ text: bool = ...,
+) -> contextlib._GeneratorContextManager[AnyStr]: ...
+
+@overload
+def assert_no_gc_cycles() -> contextlib._GeneratorContextManager[None]: ...
+@overload
+def assert_no_gc_cycles(
+ func: Callable[_P, Any],
+ /,
+ *args: _P.args,
+ **kwargs: _P.kwargs,
+) -> None: ...
+
+def break_cycles() -> None: ...
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/print_coercion_tables.py b/venv/lib/python3.9/site-packages/numpy/testing/print_coercion_tables.py
new file mode 100644
index 00000000..c1d4cdff
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/print_coercion_tables.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python3
+"""Prints type-coercion tables for the built-in NumPy types
+
+"""
+import numpy as np
+from collections import namedtuple
+
+# Generic object that can be added, but doesn't do anything else
+class GenericObject:
+ def __init__(self, v):
+ self.v = v
+
+ def __add__(self, other):
+ return self
+
+ def __radd__(self, other):
+ return self
+
+ dtype = np.dtype('O')
+
+def print_cancast_table(ntypes):
+ print('X', end=' ')
+ for char in ntypes:
+ print(char, end=' ')
+ print()
+ for row in ntypes:
+ print(row, end=' ')
+ for col in ntypes:
+ if np.can_cast(row, col, "equiv"):
+ cast = "#"
+ elif np.can_cast(row, col, "safe"):
+ cast = "="
+ elif np.can_cast(row, col, "same_kind"):
+ cast = "~"
+ elif np.can_cast(row, col, "unsafe"):
+ cast = "."
+ else:
+ cast = " "
+ print(cast, end=' ')
+ print()
+
+def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False):
+ print('+', end=' ')
+ for char in ntypes:
+ print(char, end=' ')
+ print()
+ for row in ntypes:
+ if row == 'O':
+ rowtype = GenericObject
+ else:
+ rowtype = np.obj2sctype(row)
+
+ print(row, end=' ')
+ for col in ntypes:
+ if col == 'O':
+ coltype = GenericObject
+ else:
+ coltype = np.obj2sctype(col)
+ try:
+ if firstarray:
+ rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype)
+ else:
+ rowvalue = rowtype(inputfirstvalue)
+ colvalue = coltype(inputsecondvalue)
+ if use_promote_types:
+ char = np.promote_types(rowvalue.dtype, colvalue.dtype).char
+ else:
+ value = np.add(rowvalue, colvalue)
+ if isinstance(value, np.ndarray):
+ char = value.dtype.char
+ else:
+ char = np.dtype(type(value)).char
+ except ValueError:
+ char = '!'
+ except OverflowError:
+ char = '@'
+ except TypeError:
+ char = '#'
+ print(char, end=' ')
+ print()
+
+
+def print_new_cast_table(*, can_cast=True, legacy=False, flags=False):
+ """Prints new casts, the values given are default "can-cast" values, not
+ actual ones.
+ """
+ from numpy.core._multiarray_tests import get_all_cast_information
+
+ cast_table = {
+ -1: " ",
+ 0: "#", # No cast (classify as equivalent here)
+ 1: "#", # equivalent casting
+ 2: "=", # safe casting
+ 3: "~", # same-kind casting
+ 4: ".", # unsafe casting
+ }
+ flags_table = {
+ 0 : "▗", 7: "█",
+ 1: "▚", 2: "▐", 4: "▄",
+ 3: "▜", 5: "▙",
+ 6: "▟",
+ }
+
+ cast_info = namedtuple("cast_info", ["can_cast", "legacy", "flags"])
+ no_cast_info = cast_info(" ", " ", " ")
+
+ casts = get_all_cast_information()
+ table = {}
+ dtypes = set()
+ for cast in casts:
+ dtypes.add(cast["from"])
+ dtypes.add(cast["to"])
+
+ if cast["from"] not in table:
+ table[cast["from"]] = {}
+ to_dict = table[cast["from"]]
+
+ can_cast = cast_table[cast["casting"]]
+ legacy = "L" if cast["legacy"] else "."
+ flags = 0
+ if cast["requires_pyapi"]:
+ flags |= 1
+ if cast["supports_unaligned"]:
+ flags |= 2
+ if cast["no_floatingpoint_errors"]:
+ flags |= 4
+
+ flags = flags_table[flags]
+ to_dict[cast["to"]] = cast_info(can_cast=can_cast, legacy=legacy, flags=flags)
+
+ # The np.dtype(x.type) is a bit strange, because dtype classes do
+ # not expose much yet.
+ types = np.typecodes["All"]
+ def sorter(x):
+ # This is a bit weird hack, to get a table as close as possible to
+ # the one printing all typecodes (but expecting user-dtypes).
+ dtype = np.dtype(x.type)
+ try:
+ indx = types.index(dtype.char)
+ except ValueError:
+ indx = np.inf
+ return (indx, dtype.char)
+
+ dtypes = sorted(dtypes, key=sorter)
+
+ def print_table(field="can_cast"):
+ print('X', end=' ')
+ for dt in dtypes:
+ print(np.dtype(dt.type).char, end=' ')
+ print()
+ for from_dt in dtypes:
+ print(np.dtype(from_dt.type).char, end=' ')
+ row = table.get(from_dt, {})
+ for to_dt in dtypes:
+ print(getattr(row.get(to_dt, no_cast_info), field), end=' ')
+ print()
+
+ if can_cast:
+ # Print the actual table:
+ print()
+ print("Casting: # is equivalent, = is safe, ~ is same-kind, and . is unsafe")
+ print()
+ print_table("can_cast")
+
+ if legacy:
+ print()
+ print("L denotes a legacy cast . a non-legacy one.")
+ print()
+ print_table("legacy")
+
+ if flags:
+ print()
+ print(f"{flags_table[0]}: no flags, {flags_table[1]}: PyAPI, "
+ f"{flags_table[2]}: supports unaligned, {flags_table[4]}: no-float-errors")
+ print()
+ print_table("flags")
+
+
+if __name__ == '__main__':
+ print("can cast")
+ print_cancast_table(np.typecodes['All'])
+ print()
+ print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
+ print()
+ print("scalar + scalar")
+ print_coercion_table(np.typecodes['All'], 0, 0, False)
+ print()
+ print("scalar + neg scalar")
+ print_coercion_table(np.typecodes['All'], 0, -1, False)
+ print()
+ print("array + scalar")
+ print_coercion_table(np.typecodes['All'], 0, 0, True)
+ print()
+ print("array + neg scalar")
+ print_coercion_table(np.typecodes['All'], 0, -1, True)
+ print()
+ print("promote_types")
+ print_coercion_table(np.typecodes['All'], 0, 0, False, True)
+ print("New casting type promotion:")
+ print_new_cast_table(can_cast=True, legacy=True, flags=True)
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/setup.py b/venv/lib/python3.9/site-packages/numpy/testing/setup.py
new file mode 100644
index 00000000..6f203e87
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/setup.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python3
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('testing', parent_package, top_path)
+
+ config.add_subpackage('_private')
+ config.add_subpackage('tests')
+ config.add_data_files('*.pyi')
+ config.add_data_files('_private/*.pyi')
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(maintainer="NumPy Developers",
+ maintainer_email="numpy-dev@numpy.org",
+ description="NumPy test module",
+ url="https://www.numpy.org",
+ license="NumPy License (BSD Style)",
+ configuration=configuration,
+ )
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/testing/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/tests/test_doctesting.py b/venv/lib/python3.9/site-packages/numpy/testing/tests/test_doctesting.py
new file mode 100644
index 00000000..92c2156d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/tests/test_doctesting.py
@@ -0,0 +1,57 @@
+""" Doctests for NumPy-specific nose/doctest modifications
+
+"""
+#FIXME: None of these tests is run, because 'check' is not a recognized
+# testing prefix.
+
+# try the #random directive on the output line
+def check_random_directive():
+ '''
+ >>> 2+2
+ <BadExample object at 0x084D05AC> #random: may vary on your system
+ '''
+
+# check the implicit "import numpy as np"
+def check_implicit_np():
+ '''
+ >>> np.array([1,2,3])
+ array([1, 2, 3])
+ '''
+
+# there's some extraneous whitespace around the correct responses
+def check_whitespace_enabled():
+ '''
+ # whitespace after the 3
+ >>> 1+2
+ 3
+
+ # whitespace before the 7
+ >>> 3+4
+ 7
+ '''
+
+def check_empty_output():
+ """ Check that no output does not cause an error.
+
+ This is related to nose bug 445; the numpy plugin changed the
+ doctest-result-variable default and therefore hit this bug:
+ http://code.google.com/p/python-nose/issues/detail?id=445
+
+ >>> a = 10
+ """
+
+def check_skip():
+ """ Check skip directive
+
+ The test below should not run
+
+ >>> 1/0 #doctest: +SKIP
+ """
+
+
+if __name__ == '__main__':
+ # Run tests outside numpy test rig
+ import nose
+ from numpy.testing.noseclasses import NumpyDoctest
+ argv = ['', __file__, '--with-numpydoctest']
+ nose.core.TestProgram(argv=argv, addplugins=[NumpyDoctest()])
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/tests/test_utils.py b/venv/lib/python3.9/site-packages/numpy/testing/tests/test_utils.py
new file mode 100644
index 00000000..052cc3aa
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/tests/test_utils.py
@@ -0,0 +1,1664 @@
+import warnings
+import sys
+import os
+import itertools
+import pytest
+import weakref
+
+import numpy as np
+from numpy.testing import (
+ assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, assert_array_less, build_err_msg, raises,
+ assert_raises, assert_warns, assert_no_warnings, assert_allclose,
+ assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp,
+ clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
+ tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
+ )
+from numpy.core.overrides import ARRAY_FUNCTION_ENABLED
+
+
+class _GenericTest:
+
+ def _test_equal(self, a, b):
+ self._assert_func(a, b)
+
+ def _test_not_equal(self, a, b):
+ with assert_raises(AssertionError):
+ self._assert_func(a, b)
+
+ def test_array_rank1_eq(self):
+ """Test two equal array of rank 1 are found equal."""
+ a = np.array([1, 2])
+ b = np.array([1, 2])
+
+ self._test_equal(a, b)
+
+ def test_array_rank1_noteq(self):
+ """Test two different array of rank 1 are found not equal."""
+ a = np.array([1, 2])
+ b = np.array([2, 2])
+
+ self._test_not_equal(a, b)
+
+ def test_array_rank2_eq(self):
+ """Test two equal array of rank 2 are found equal."""
+ a = np.array([[1, 2], [3, 4]])
+ b = np.array([[1, 2], [3, 4]])
+
+ self._test_equal(a, b)
+
+ def test_array_diffshape(self):
+ """Test two arrays with different shapes are found not equal."""
+ a = np.array([1, 2])
+ b = np.array([[1, 2], [1, 2]])
+
+ self._test_not_equal(a, b)
+
+ def test_objarray(self):
+ """Test object arrays."""
+ a = np.array([1, 1], dtype=object)
+ self._test_equal(a, 1)
+
+ def test_array_likes(self):
+ self._test_equal([1, 2, 3], (1, 2, 3))
+
+
+class TestArrayEqual(_GenericTest):
+
+ def setup_method(self):
+ self._assert_func = assert_array_equal
+
+ def test_generic_rank1(self):
+ """Test rank 1 array for all dtypes."""
+ def foo(t):
+ a = np.empty(2, t)
+ a.fill(1)
+ b = a.copy()
+ c = a.copy()
+ c.fill(0)
+ self._test_equal(a, b)
+ self._test_not_equal(c, b)
+
+ # Test numeric types and object
+ for t in '?bhilqpBHILQPfdgFDG':
+ foo(t)
+
+ # Test strings
+ for t in ['S1', 'U1']:
+ foo(t)
+
+ def test_0_ndim_array(self):
+ x = np.array(473963742225900817127911193656584771)
+ y = np.array(18535119325151578301457182298393896)
+ assert_raises(AssertionError, self._assert_func, x, y)
+
+ y = x
+ self._assert_func(x, y)
+
+ x = np.array(43)
+ y = np.array(10)
+ assert_raises(AssertionError, self._assert_func, x, y)
+
+ y = x
+ self._assert_func(x, y)
+
+ def test_generic_rank3(self):
+ """Test rank 3 array for all dtypes."""
+ def foo(t):
+ a = np.empty((4, 2, 3), t)
+ a.fill(1)
+ b = a.copy()
+ c = a.copy()
+ c.fill(0)
+ self._test_equal(a, b)
+ self._test_not_equal(c, b)
+
+ # Test numeric types and object
+ for t in '?bhilqpBHILQPfdgFDG':
+ foo(t)
+
+ # Test strings
+ for t in ['S1', 'U1']:
+ foo(t)
+
+ def test_nan_array(self):
+ """Test arrays with nan values in them."""
+ a = np.array([1, 2, np.nan])
+ b = np.array([1, 2, np.nan])
+
+ self._test_equal(a, b)
+
+ c = np.array([1, 2, 3])
+ self._test_not_equal(c, b)
+
+ def test_string_arrays(self):
+ """Test two arrays with different shapes are found not equal."""
+ a = np.array(['floupi', 'floupa'])
+ b = np.array(['floupi', 'floupa'])
+
+ self._test_equal(a, b)
+
+ c = np.array(['floupipi', 'floupa'])
+
+ self._test_not_equal(c, b)
+
+ def test_recarrays(self):
+ """Test record arrays."""
+ a = np.empty(2, [('floupi', float), ('floupa', float)])
+ a['floupi'] = [1, 2]
+ a['floupa'] = [1, 2]
+ b = a.copy()
+
+ self._test_equal(a, b)
+
+ c = np.empty(2, [('floupipi', float),
+ ('floupi', float), ('floupa', float)])
+ c['floupipi'] = a['floupi'].copy()
+ c['floupa'] = a['floupa'].copy()
+
+ with pytest.raises(TypeError):
+ self._test_not_equal(c, b)
+
+ def test_masked_nan_inf(self):
+ # Regression test for gh-11121
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False])
+ b = np.array([3., np.nan, 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False])
+ b = np.array([np.inf, 4., 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+
+ def test_subclass_that_overrides_eq(self):
+ # While we cannot guarantee testing functions will always work for
+ # subclasses, the tests should ideally rely only on subclasses having
+ # comparison operators, not on them being able to store booleans
+ # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
+ class MyArray(np.ndarray):
+ def __eq__(self, other):
+ return bool(np.equal(self, other).all())
+
+ def __ne__(self, other):
+ return not self == other
+
+ a = np.array([1., 2.]).view(MyArray)
+ b = np.array([2., 3.]).view(MyArray)
+ assert_(type(a == a), bool)
+ assert_(a == a)
+ assert_(a != b)
+ self._test_equal(a, a)
+ self._test_not_equal(a, b)
+ self._test_not_equal(b, a)
+
+ @pytest.mark.skipif(
+ not ARRAY_FUNCTION_ENABLED, reason='requires __array_function__')
+ def test_subclass_that_does_not_implement_npall(self):
+ class MyArray(np.ndarray):
+ def __array_function__(self, *args, **kwargs):
+ return NotImplemented
+
+ a = np.array([1., 2.]).view(MyArray)
+ b = np.array([2., 3.]).view(MyArray)
+ with assert_raises(TypeError):
+ np.all(a)
+ self._test_equal(a, a)
+ self._test_not_equal(a, b)
+ self._test_not_equal(b, a)
+
+ def test_suppress_overflow_warnings(self):
+ # Based on issue #18992
+ with pytest.raises(AssertionError):
+ with np.errstate(all="raise"):
+ np.testing.assert_array_equal(
+ np.array([1, 2, 3], np.float32),
+ np.array([1, 1e-40, 3], np.float32))
+
+ def test_array_vs_scalar_is_equal(self):
+ """Test comparing an array with a scalar when all values are equal."""
+ a = np.array([1., 1., 1.])
+ b = 1.
+
+ self._test_equal(a, b)
+
+ def test_array_vs_scalar_not_equal(self):
+ """Test comparing an array with a scalar when not all values equal."""
+ a = np.array([1., 2., 3.])
+ b = 1.
+
+ self._test_not_equal(a, b)
+
+ def test_array_vs_scalar_strict(self):
+ """Test comparing an array with a scalar with strict option."""
+ a = np.array([1., 1., 1.])
+ b = 1.
+
+ with pytest.raises(AssertionError):
+ assert_array_equal(a, b, strict=True)
+
+ def test_array_vs_array_strict(self):
+ """Test comparing two arrays with strict option."""
+ a = np.array([1., 1., 1.])
+ b = np.array([1., 1., 1.])
+
+ assert_array_equal(a, b, strict=True)
+
+ def test_array_vs_float_array_strict(self):
+ """Test comparing two arrays with strict option."""
+ a = np.array([1, 1, 1])
+ b = np.array([1., 1., 1.])
+
+ with pytest.raises(AssertionError):
+ assert_array_equal(a, b, strict=True)
+
+
+class TestBuildErrorMessage:
+
+ def test_build_err_msg_defaults(self):
+ x = np.array([1.00001, 2.00002, 3.00003])
+ y = np.array([1.00002, 2.00003, 3.00004])
+ err_msg = 'There is a mismatch'
+
+ a = build_err_msg([x, y], err_msg)
+ b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
+ '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, '
+ '2.00003, 3.00004])')
+ assert_equal(a, b)
+
+ def test_build_err_msg_no_verbose(self):
+ x = np.array([1.00001, 2.00002, 3.00003])
+ y = np.array([1.00002, 2.00003, 3.00004])
+ err_msg = 'There is a mismatch'
+
+ a = build_err_msg([x, y], err_msg, verbose=False)
+ b = '\nItems are not equal: There is a mismatch'
+ assert_equal(a, b)
+
+ def test_build_err_msg_custom_names(self):
+ x = np.array([1.00001, 2.00002, 3.00003])
+ y = np.array([1.00002, 2.00003, 3.00004])
+ err_msg = 'There is a mismatch'
+
+ a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR'))
+ b = ('\nItems are not equal: There is a mismatch\n FOO: array(['
+ '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, '
+ '3.00004])')
+ assert_equal(a, b)
+
+ def test_build_err_msg_custom_precision(self):
+ x = np.array([1.000000001, 2.00002, 3.00003])
+ y = np.array([1.000000002, 2.00003, 3.00004])
+ err_msg = 'There is a mismatch'
+
+ a = build_err_msg([x, y], err_msg, precision=10)
+ b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
+ '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array(['
+ '1.000000002, 2.00003 , 3.00004 ])')
+ assert_equal(a, b)
+
+
+class TestEqual(TestArrayEqual):
+
+ def setup_method(self):
+ self._assert_func = assert_equal
+
+ def test_nan_items(self):
+ self._assert_func(np.nan, np.nan)
+ self._assert_func([np.nan], [np.nan])
+ self._test_not_equal(np.nan, [np.nan])
+ self._test_not_equal(np.nan, 1)
+
+ def test_inf_items(self):
+ self._assert_func(np.inf, np.inf)
+ self._assert_func([np.inf], [np.inf])
+ self._test_not_equal(np.inf, [np.inf])
+
+ def test_datetime(self):
+ self._test_equal(
+ np.datetime64("2017-01-01", "s"),
+ np.datetime64("2017-01-01", "s")
+ )
+ self._test_equal(
+ np.datetime64("2017-01-01", "s"),
+ np.datetime64("2017-01-01", "m")
+ )
+
+ # gh-10081
+ self._test_not_equal(
+ np.datetime64("2017-01-01", "s"),
+ np.datetime64("2017-01-02", "s")
+ )
+ self._test_not_equal(
+ np.datetime64("2017-01-01", "s"),
+ np.datetime64("2017-01-02", "m")
+ )
+
+ def test_nat_items(self):
+ # not a datetime
+ nadt_no_unit = np.datetime64("NaT")
+ nadt_s = np.datetime64("NaT", "s")
+ nadt_d = np.datetime64("NaT", "ns")
+ # not a timedelta
+ natd_no_unit = np.timedelta64("NaT")
+ natd_s = np.timedelta64("NaT", "s")
+ natd_d = np.timedelta64("NaT", "ns")
+
+ dts = [nadt_no_unit, nadt_s, nadt_d]
+ tds = [natd_no_unit, natd_s, natd_d]
+ for a, b in itertools.product(dts, dts):
+ self._assert_func(a, b)
+ self._assert_func([a], [b])
+ self._test_not_equal([a], b)
+
+ for a, b in itertools.product(tds, tds):
+ self._assert_func(a, b)
+ self._assert_func([a], [b])
+ self._test_not_equal([a], b)
+
+ for a, b in itertools.product(tds, dts):
+ self._test_not_equal(a, b)
+ self._test_not_equal(a, [b])
+ self._test_not_equal([a], [b])
+ self._test_not_equal([a], np.datetime64("2017-01-01", "s"))
+ self._test_not_equal([b], np.datetime64("2017-01-01", "s"))
+ self._test_not_equal([a], np.timedelta64(123, "s"))
+ self._test_not_equal([b], np.timedelta64(123, "s"))
+
+ def test_non_numeric(self):
+ self._assert_func('ab', 'ab')
+ self._test_not_equal('ab', 'abb')
+
+ def test_complex_item(self):
+ self._assert_func(complex(1, 2), complex(1, 2))
+ self._assert_func(complex(1, np.nan), complex(1, np.nan))
+ self._test_not_equal(complex(1, np.nan), complex(1, 2))
+ self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
+ self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
+
+ def test_negative_zero(self):
+ self._test_not_equal(np.PZERO, np.NZERO)
+
+ def test_complex(self):
+ x = np.array([complex(1, 2), complex(1, np.nan)])
+ y = np.array([complex(1, 2), complex(1, 2)])
+ self._assert_func(x, x)
+ self._test_not_equal(x, y)
+
+ def test_object(self):
+ #gh-12942
+ import datetime
+ a = np.array([datetime.datetime(2000, 1, 1),
+ datetime.datetime(2000, 1, 2)])
+ self._test_not_equal(a, a[::-1])
+
+
+class TestArrayAlmostEqual(_GenericTest):
+
+ def setup_method(self):
+ self._assert_func = assert_array_almost_equal
+
+ def test_closeness(self):
+ # Note that in the course of time we ended up with
+ # `abs(x - y) < 1.5 * 10**(-decimal)`
+ # instead of the previously documented
+ # `abs(x - y) < 0.5 * 10**(-decimal)`
+ # so this check serves to preserve the wrongness.
+
+ # test scalars
+ self._assert_func(1.499999, 0.0, decimal=0)
+ assert_raises(AssertionError,
+ lambda: self._assert_func(1.5, 0.0, decimal=0))
+
+ # test arrays
+ self._assert_func([1.499999], [0.0], decimal=0)
+ assert_raises(AssertionError,
+ lambda: self._assert_func([1.5], [0.0], decimal=0))
+
+ def test_simple(self):
+ x = np.array([1234.2222])
+ y = np.array([1234.2223])
+
+ self._assert_func(x, y, decimal=3)
+ self._assert_func(x, y, decimal=4)
+ assert_raises(AssertionError,
+ lambda: self._assert_func(x, y, decimal=5))
+
+ def test_nan(self):
+ anan = np.array([np.nan])
+ aone = np.array([1])
+ ainf = np.array([np.inf])
+ self._assert_func(anan, anan)
+ assert_raises(AssertionError,
+ lambda: self._assert_func(anan, aone))
+ assert_raises(AssertionError,
+ lambda: self._assert_func(anan, ainf))
+ assert_raises(AssertionError,
+ lambda: self._assert_func(ainf, anan))
+
+ def test_inf(self):
+ a = np.array([[1., 2.], [3., 4.]])
+ b = a.copy()
+ a[0, 0] = np.inf
+ assert_raises(AssertionError,
+ lambda: self._assert_func(a, b))
+ b[0, 0] = -np.inf
+ assert_raises(AssertionError,
+ lambda: self._assert_func(a, b))
+
+ def test_subclass(self):
+ a = np.array([[1., 2.], [3., 4.]])
+ b = np.ma.masked_array([[1., 2.], [0., 4.]],
+ [[False, False], [True, False]])
+ self._assert_func(a, b)
+ self._assert_func(b, a)
+ self._assert_func(b, b)
+
+ # Test fully masked as well (see gh-11123).
+ a = np.ma.MaskedArray(3.5, mask=True)
+ b = np.array([3., 4., 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.masked
+ b = np.array([3., 4., 6.5])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+ b = np.array([1., 2., 3.])
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+ b = np.array(1.)
+ self._test_equal(a, b)
+ self._test_equal(b, a)
+
+ def test_subclass_that_cannot_be_bool(self):
+ # While we cannot guarantee testing functions will always work for
+ # subclasses, the tests should ideally rely only on subclasses having
+ # comparison operators, not on them being able to store booleans
+ # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
+ class MyArray(np.ndarray):
+ def __eq__(self, other):
+ return super().__eq__(other).view(np.ndarray)
+
+ def __lt__(self, other):
+ return super().__lt__(other).view(np.ndarray)
+
+ def all(self, *args, **kwargs):
+ raise NotImplementedError
+
+ a = np.array([1., 2.]).view(MyArray)
+ self._assert_func(a, a)
+
+
+class TestAlmostEqual(_GenericTest):
+
+ def setup_method(self):
+ self._assert_func = assert_almost_equal
+
+ def test_closeness(self):
+ # Note that in the course of time we ended up with
+ # `abs(x - y) < 1.5 * 10**(-decimal)`
+ # instead of the previously documented
+ # `abs(x - y) < 0.5 * 10**(-decimal)`
+ # so this check serves to preserve the wrongness.
+
+ # test scalars
+ self._assert_func(1.499999, 0.0, decimal=0)
+ assert_raises(AssertionError,
+ lambda: self._assert_func(1.5, 0.0, decimal=0))
+
+ # test arrays
+ self._assert_func([1.499999], [0.0], decimal=0)
+ assert_raises(AssertionError,
+ lambda: self._assert_func([1.5], [0.0], decimal=0))
+
+ def test_nan_item(self):
+ self._assert_func(np.nan, np.nan)
+ assert_raises(AssertionError,
+ lambda: self._assert_func(np.nan, 1))
+ assert_raises(AssertionError,
+ lambda: self._assert_func(np.nan, np.inf))
+ assert_raises(AssertionError,
+ lambda: self._assert_func(np.inf, np.nan))
+
+ def test_inf_item(self):
+ self._assert_func(np.inf, np.inf)
+ self._assert_func(-np.inf, -np.inf)
+ assert_raises(AssertionError,
+ lambda: self._assert_func(np.inf, 1))
+ assert_raises(AssertionError,
+ lambda: self._assert_func(-np.inf, np.inf))
+
+ def test_simple_item(self):
+ self._test_not_equal(1, 2)
+
+ def test_complex_item(self):
+ self._assert_func(complex(1, 2), complex(1, 2))
+ self._assert_func(complex(1, np.nan), complex(1, np.nan))
+ self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))
+ self._test_not_equal(complex(1, np.nan), complex(1, 2))
+ self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
+ self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
+
+ def test_complex(self):
+ x = np.array([complex(1, 2), complex(1, np.nan)])
+ z = np.array([complex(1, 2), complex(np.nan, 1)])
+ y = np.array([complex(1, 2), complex(1, 2)])
+ self._assert_func(x, x)
+ self._test_not_equal(x, y)
+ self._test_not_equal(x, z)
+
+ def test_error_message(self):
+ """Check the message is formatted correctly for the decimal value.
+ Also check the message when input includes inf or nan (gh12200)"""
+ x = np.array([1.00000000001, 2.00000000002, 3.00003])
+ y = np.array([1.00000000002, 2.00000000003, 3.00004])
+
+ # Test with a different amount of decimal digits
+ with pytest.raises(AssertionError) as exc_info:
+ self._assert_func(x, y, decimal=12)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)')
+ assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
+ assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
+ assert_equal(
+ msgs[6],
+ ' x: array([1.00000000001, 2.00000000002, 3.00003 ])')
+ assert_equal(
+ msgs[7],
+ ' y: array([1.00000000002, 2.00000000003, 3.00004 ])')
+
+ # With the default value of decimal digits, only the 3rd element
+ # differs. Note that we only check for the formatting of the arrays
+ # themselves.
+ with pytest.raises(AssertionError) as exc_info:
+ self._assert_func(x, y)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)')
+ assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
+ assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
+ assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])')
+ assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])')
+
+ # Check the error message when input includes inf
+ x = np.array([np.inf, 0])
+ y = np.array([np.inf, 1])
+ with pytest.raises(AssertionError) as exc_info:
+ self._assert_func(x, y)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)')
+ assert_equal(msgs[4], 'Max absolute difference: 1.')
+ assert_equal(msgs[5], 'Max relative difference: 1.')
+ assert_equal(msgs[6], ' x: array([inf, 0.])')
+ assert_equal(msgs[7], ' y: array([inf, 1.])')
+
+ # Check the error message when dividing by zero
+ x = np.array([1, 2])
+ y = np.array([0, 0])
+ with pytest.raises(AssertionError) as exc_info:
+ self._assert_func(x, y)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)')
+ assert_equal(msgs[4], 'Max absolute difference: 2')
+ assert_equal(msgs[5], 'Max relative difference: inf')
+
+ def test_error_message_2(self):
+ """Check the message is formatted correctly when either x or y is a scalar."""
+ x = 2
+ y = np.ones(20)
+ with pytest.raises(AssertionError) as exc_info:
+ self._assert_func(x, y)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
+ assert_equal(msgs[4], 'Max absolute difference: 1.')
+ assert_equal(msgs[5], 'Max relative difference: 1.')
+
+ y = 2
+ x = np.ones(20)
+ with pytest.raises(AssertionError) as exc_info:
+ self._assert_func(x, y)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
+ assert_equal(msgs[4], 'Max absolute difference: 1.')
+ assert_equal(msgs[5], 'Max relative difference: 0.5')
+
+ def test_subclass_that_cannot_be_bool(self):
+ # While we cannot guarantee testing functions will always work for
+ # subclasses, the tests should ideally rely only on subclasses having
+ # comparison operators, not on them being able to store booleans
+ # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
+ class MyArray(np.ndarray):
+ def __eq__(self, other):
+ return super().__eq__(other).view(np.ndarray)
+
+ def __lt__(self, other):
+ return super().__lt__(other).view(np.ndarray)
+
+ def all(self, *args, **kwargs):
+ raise NotImplementedError
+
+ a = np.array([1., 2.]).view(MyArray)
+ self._assert_func(a, a)
+
+
+class TestApproxEqual:
+
+ def setup_method(self):
+ self._assert_func = assert_approx_equal
+
+ def test_simple_0d_arrays(self):
+ x = np.array(1234.22)
+ y = np.array(1234.23)
+
+ self._assert_func(x, y, significant=5)
+ self._assert_func(x, y, significant=6)
+ assert_raises(AssertionError,
+ lambda: self._assert_func(x, y, significant=7))
+
+ def test_simple_items(self):
+ x = 1234.22
+ y = 1234.23
+
+ self._assert_func(x, y, significant=4)
+ self._assert_func(x, y, significant=5)
+ self._assert_func(x, y, significant=6)
+ assert_raises(AssertionError,
+ lambda: self._assert_func(x, y, significant=7))
+
+ def test_nan_array(self):
+ anan = np.array(np.nan)
+ aone = np.array(1)
+ ainf = np.array(np.inf)
+ self._assert_func(anan, anan)
+ assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
+
+ def test_nan_items(self):
+ anan = np.array(np.nan)
+ aone = np.array(1)
+ ainf = np.array(np.inf)
+ self._assert_func(anan, anan)
+ assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
+
+
+class TestArrayAssertLess:
+
+ def setup_method(self):
+ self._assert_func = assert_array_less
+
+ def test_simple_arrays(self):
+ x = np.array([1.1, 2.2])
+ y = np.array([1.2, 2.3])
+
+ self._assert_func(x, y)
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+ y = np.array([1.0, 2.3])
+
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+ def test_rank2(self):
+ x = np.array([[1.1, 2.2], [3.3, 4.4]])
+ y = np.array([[1.2, 2.3], [3.4, 4.5]])
+
+ self._assert_func(x, y)
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+ y = np.array([[1.0, 2.3], [3.4, 4.5]])
+
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+ def test_rank3(self):
+ x = np.ones(shape=(2, 2, 2))
+ y = np.ones(shape=(2, 2, 2))+1
+
+ self._assert_func(x, y)
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+ y[0, 0, 0] = 0
+
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+ def test_simple_items(self):
+ x = 1.1
+ y = 2.2
+
+ self._assert_func(x, y)
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+ y = np.array([2.2, 3.3])
+
+ self._assert_func(x, y)
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+ y = np.array([1.0, 3.3])
+
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
+
+ def test_nan_noncompare(self):
+ anan = np.array(np.nan)
+ aone = np.array(1)
+ ainf = np.array(np.inf)
+ self._assert_func(anan, anan)
+ assert_raises(AssertionError, lambda: self._assert_func(aone, anan))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
+
+ def test_nan_noncompare_array(self):
+ x = np.array([1.1, 2.2, 3.3])
+ anan = np.array(np.nan)
+
+ assert_raises(AssertionError, lambda: self._assert_func(x, anan))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, x))
+
+ x = np.array([1.1, 2.2, np.nan])
+
+ assert_raises(AssertionError, lambda: self._assert_func(x, anan))
+ assert_raises(AssertionError, lambda: self._assert_func(anan, x))
+
+ y = np.array([1.0, 2.0, np.nan])
+
+ self._assert_func(y, x)
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
+
+ def test_inf_compare(self):
+ aone = np.array(1)
+ ainf = np.array(np.inf)
+
+ self._assert_func(aone, ainf)
+ self._assert_func(-ainf, aone)
+ self._assert_func(-ainf, ainf)
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, aone))
+ assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf))
+
+ def test_inf_compare_array(self):
+ x = np.array([1.1, 2.2, np.inf])
+ ainf = np.array(np.inf)
+
+ assert_raises(AssertionError, lambda: self._assert_func(x, ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, x))
+ assert_raises(AssertionError, lambda: self._assert_func(x, -ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf))
+ assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x))
+ self._assert_func(-ainf, x)
+
+
+@pytest.mark.skip(reason="The raises decorator depends on Nose")
+class TestRaises:
+
+ def setup_method(self):
+ class MyException(Exception):
+ pass
+
+ self.e = MyException
+
+ def raises_exception(self, e):
+ raise e
+
+ def does_not_raise_exception(self):
+ pass
+
+ def test_correct_catch(self):
+ raises(self.e)(self.raises_exception)(self.e) # raises?
+
+ def test_wrong_exception(self):
+ try:
+ raises(self.e)(self.raises_exception)(RuntimeError) # raises?
+ except RuntimeError:
+ return
+ else:
+ raise AssertionError("should have caught RuntimeError")
+
+ def test_catch_no_raise(self):
+ try:
+ raises(self.e)(self.does_not_raise_exception)() # raises?
+ except AssertionError:
+ return
+ else:
+ raise AssertionError("should have raised an AssertionError")
+
+
+class TestWarns:
+
+ def test_warn(self):
+ def f():
+ warnings.warn("yo")
+ return 3
+
+ before_filters = sys.modules['warnings'].filters[:]
+ assert_equal(assert_warns(UserWarning, f), 3)
+ after_filters = sys.modules['warnings'].filters
+
+ assert_raises(AssertionError, assert_no_warnings, f)
+ assert_equal(assert_no_warnings(lambda x: x, 1), 1)
+
+ # Check that the warnings state is unchanged
+ assert_equal(before_filters, after_filters,
+ "assert_warns does not preserver warnings state")
+
+ def test_context_manager(self):
+
+ before_filters = sys.modules['warnings'].filters[:]
+ with assert_warns(UserWarning):
+ warnings.warn("yo")
+ after_filters = sys.modules['warnings'].filters
+
+ def no_warnings():
+ with assert_no_warnings():
+ warnings.warn("yo")
+
+ assert_raises(AssertionError, no_warnings)
+ assert_equal(before_filters, after_filters,
+ "assert_warns does not preserver warnings state")
+
+ def test_warn_wrong_warning(self):
+ def f():
+ warnings.warn("yo", DeprecationWarning)
+
+ failed = False
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+ try:
+ # Should raise a DeprecationWarning
+ assert_warns(UserWarning, f)
+ failed = True
+ except DeprecationWarning:
+ pass
+
+ if failed:
+ raise AssertionError("wrong warning caught by assert_warn")
+
+
+class TestAssertAllclose:
+
+ def test_simple(self):
+ x = 1e-3
+ y = 1e-9
+
+ assert_allclose(x, y, atol=1)
+ assert_raises(AssertionError, assert_allclose, x, y)
+
+ a = np.array([x, y, x, y])
+ b = np.array([x, y, x, x])
+
+ assert_allclose(a, b, atol=1)
+ assert_raises(AssertionError, assert_allclose, a, b)
+
+ b[-1] = y * (1 + 1e-8)
+ assert_allclose(a, b)
+ assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9)
+
+ assert_allclose(6, 10, rtol=0.5)
+ assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
+
+ def test_min_int(self):
+ a = np.array([np.iinfo(np.int_).min], dtype=np.int_)
+ # Should not raise:
+ assert_allclose(a, a)
+
+ def test_report_fail_percentage(self):
+ a = np.array([1, 1, 1, 1])
+ b = np.array([1, 1, 1, 2])
+
+ with pytest.raises(AssertionError) as exc_info:
+ assert_allclose(a, b)
+ msg = str(exc_info.value)
+ assert_('Mismatched elements: 1 / 4 (25%)\n'
+ 'Max absolute difference: 1\n'
+ 'Max relative difference: 0.5' in msg)
+
+ def test_equal_nan(self):
+ a = np.array([np.nan])
+ b = np.array([np.nan])
+ # Should not raise:
+ assert_allclose(a, b, equal_nan=True)
+
+ def test_not_equal_nan(self):
+ a = np.array([np.nan])
+ b = np.array([np.nan])
+ assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False)
+
+ def test_equal_nan_default(self):
+ # Make sure equal_nan default behavior remains unchanged. (All
+ # of these functions use assert_array_compare under the hood.)
+ # None of these should raise.
+ a = np.array([np.nan])
+ b = np.array([np.nan])
+ assert_array_equal(a, b)
+ assert_array_almost_equal(a, b)
+ assert_array_less(a, b)
+ assert_allclose(a, b)
+
+ def test_report_max_relative_error(self):
+ a = np.array([0, 1])
+ b = np.array([0, 2])
+
+ with pytest.raises(AssertionError) as exc_info:
+ assert_allclose(a, b)
+ msg = str(exc_info.value)
+ assert_('Max relative difference: 0.5' in msg)
+
+ def test_timedelta(self):
+ # see gh-18286
+ a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]")
+ assert_allclose(a, a)
+
+ def test_error_message_unsigned(self):
+ """Check the the message is formatted correctly when overflow can occur
+ (gh21768)"""
+ # Ensure to test for potential overflow in the case of:
+ # x - y
+ # and
+ # y - x
+ x = np.asarray([0, 1, 8], dtype='uint8')
+ y = np.asarray([4, 4, 4], dtype='uint8')
+ with pytest.raises(AssertionError) as exc_info:
+ assert_allclose(x, y, atol=3)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[4], 'Max absolute difference: 4')
+
+
+class TestArrayAlmostEqualNulp:
+
+ def test_float64_pass(self):
+ # The number of units of least precision
+ # In this case, use a few places above the lowest level (ie nulp=1)
+ nulp = 5
+ x = np.linspace(-20, 20, 50, dtype=np.float64)
+ x = 10**x
+ x = np.r_[-x, x]
+
+ # Addition
+ eps = np.finfo(x.dtype).eps
+ y = x + x*eps*nulp/2.
+ assert_array_almost_equal_nulp(x, y, nulp)
+
+ # Subtraction
+ epsneg = np.finfo(x.dtype).epsneg
+ y = x - x*epsneg*nulp/2.
+ assert_array_almost_equal_nulp(x, y, nulp)
+
+ def test_float64_fail(self):
+ nulp = 5
+ x = np.linspace(-20, 20, 50, dtype=np.float64)
+ x = 10**x
+ x = np.r_[-x, x]
+
+ eps = np.finfo(x.dtype).eps
+ y = x + x*eps*nulp*2.
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ x, y, nulp)
+
+ epsneg = np.finfo(x.dtype).epsneg
+ y = x - x*epsneg*nulp*2.
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ x, y, nulp)
+
+ def test_float64_ignore_nan(self):
+ # Ignore ULP differences between various NAN's
+ # Note that MIPS may reverse quiet and signaling nans
+ # so we use the builtin version as a base.
+ offset = np.uint64(0xffffffff)
+ nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64)
+ nan2_i64 = nan1_i64 ^ offset # nan payload on MIPS is all ones.
+ nan1_f64 = nan1_i64.view(np.float64)
+ nan2_f64 = nan2_i64.view(np.float64)
+ assert_array_max_ulp(nan1_f64, nan2_f64, 0)
+
+ def test_float32_pass(self):
+ nulp = 5
+ x = np.linspace(-20, 20, 50, dtype=np.float32)
+ x = 10**x
+ x = np.r_[-x, x]
+
+ eps = np.finfo(x.dtype).eps
+ y = x + x*eps*nulp/2.
+ assert_array_almost_equal_nulp(x, y, nulp)
+
+ epsneg = np.finfo(x.dtype).epsneg
+ y = x - x*epsneg*nulp/2.
+ assert_array_almost_equal_nulp(x, y, nulp)
+
+ def test_float32_fail(self):
+ nulp = 5
+ x = np.linspace(-20, 20, 50, dtype=np.float32)
+ x = 10**x
+ x = np.r_[-x, x]
+
+ eps = np.finfo(x.dtype).eps
+ y = x + x*eps*nulp*2.
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ x, y, nulp)
+
+ epsneg = np.finfo(x.dtype).epsneg
+ y = x - x*epsneg*nulp*2.
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ x, y, nulp)
+
+ def test_float32_ignore_nan(self):
+ # Ignore ULP differences between various NAN's
+ # Note that MIPS may reverse quiet and signaling nans
+ # so we use the builtin version as a base.
+ offset = np.uint32(0xffff)
+ nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32)
+ nan2_i32 = nan1_i32 ^ offset # nan payload on MIPS is all ones.
+ nan1_f32 = nan1_i32.view(np.float32)
+ nan2_f32 = nan2_i32.view(np.float32)
+ assert_array_max_ulp(nan1_f32, nan2_f32, 0)
+
+ def test_float16_pass(self):
+ nulp = 5
+ x = np.linspace(-4, 4, 10, dtype=np.float16)
+ x = 10**x
+ x = np.r_[-x, x]
+
+ eps = np.finfo(x.dtype).eps
+ y = x + x*eps*nulp/2.
+ assert_array_almost_equal_nulp(x, y, nulp)
+
+ epsneg = np.finfo(x.dtype).epsneg
+ y = x - x*epsneg*nulp/2.
+ assert_array_almost_equal_nulp(x, y, nulp)
+
+ def test_float16_fail(self):
+ nulp = 5
+ x = np.linspace(-4, 4, 10, dtype=np.float16)
+ x = 10**x
+ x = np.r_[-x, x]
+
+ eps = np.finfo(x.dtype).eps
+ y = x + x*eps*nulp*2.
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ x, y, nulp)
+
+ epsneg = np.finfo(x.dtype).epsneg
+ y = x - x*epsneg*nulp*2.
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ x, y, nulp)
+
+ def test_float16_ignore_nan(self):
+ # Ignore ULP differences between various NAN's
+ # Note that MIPS may reverse quiet and signaling nans
+ # so we use the builtin version as a base.
+ offset = np.uint16(0xff)
+ nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16)
+ nan2_i16 = nan1_i16 ^ offset # nan payload on MIPS is all ones.
+ nan1_f16 = nan1_i16.view(np.float16)
+ nan2_f16 = nan2_i16.view(np.float16)
+ assert_array_max_ulp(nan1_f16, nan2_f16, 0)
+
+ def test_complex128_pass(self):
+ nulp = 5
+ x = np.linspace(-20, 20, 50, dtype=np.float64)
+ x = 10**x
+ x = np.r_[-x, x]
+ xi = x + x*1j
+
+ eps = np.finfo(x.dtype).eps
+ y = x + x*eps*nulp/2.
+ assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
+ assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
+ # The test condition needs to be at least a factor of sqrt(2) smaller
+ # because the real and imaginary parts both change
+ y = x + x*eps*nulp/4.
+ assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
+
+ epsneg = np.finfo(x.dtype).epsneg
+ y = x - x*epsneg*nulp/2.
+ assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
+ assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
+ y = x - x*epsneg*nulp/4.
+ assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
+
+ def test_complex128_fail(self):
+ nulp = 5
+ x = np.linspace(-20, 20, 50, dtype=np.float64)
+ x = 10**x
+ x = np.r_[-x, x]
+ xi = x + x*1j
+
+ eps = np.finfo(x.dtype).eps
+ y = x + x*eps*nulp*2.
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, x + y*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + x*1j, nulp)
+ # The test condition needs to be at least a factor of sqrt(2) smaller
+ # because the real and imaginary parts both change
+ y = x + x*eps*nulp
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + y*1j, nulp)
+
+ epsneg = np.finfo(x.dtype).epsneg
+ y = x - x*epsneg*nulp*2.
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, x + y*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + x*1j, nulp)
+ y = x - x*epsneg*nulp
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + y*1j, nulp)
+
+ def test_complex64_pass(self):
+ nulp = 5
+ x = np.linspace(-20, 20, 50, dtype=np.float32)
+ x = 10**x
+ x = np.r_[-x, x]
+ xi = x + x*1j
+
+ eps = np.finfo(x.dtype).eps
+ y = x + x*eps*nulp/2.
+ assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
+ assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
+ y = x + x*eps*nulp/4.
+ assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
+
+ epsneg = np.finfo(x.dtype).epsneg
+ y = x - x*epsneg*nulp/2.
+ assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
+ assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
+ y = x - x*epsneg*nulp/4.
+ assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
+
+ def test_complex64_fail(self):
+ nulp = 5
+ x = np.linspace(-20, 20, 50, dtype=np.float32)
+ x = 10**x
+ x = np.r_[-x, x]
+ xi = x + x*1j
+
+ eps = np.finfo(x.dtype).eps
+ y = x + x*eps*nulp*2.
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, x + y*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + x*1j, nulp)
+ y = x + x*eps*nulp
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + y*1j, nulp)
+
+ epsneg = np.finfo(x.dtype).epsneg
+ y = x - x*epsneg*nulp*2.
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, x + y*1j, nulp)
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + x*1j, nulp)
+ y = x - x*epsneg*nulp
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
+ xi, y + y*1j, nulp)
+
+
+class TestULP:
+
+ def test_equal(self):
+ x = np.random.randn(10)
+ assert_array_max_ulp(x, x, maxulp=0)
+
+ def test_single(self):
+ # Generate 1 + small deviation, check that adding eps gives a few UNL
+ x = np.ones(10).astype(np.float32)
+ x += 0.01 * np.random.randn(10).astype(np.float32)
+ eps = np.finfo(np.float32).eps
+ assert_array_max_ulp(x, x+eps, maxulp=20)
+
+ def test_double(self):
+ # Generate 1 + small deviation, check that adding eps gives a few UNL
+ x = np.ones(10).astype(np.float64)
+ x += 0.01 * np.random.randn(10).astype(np.float64)
+ eps = np.finfo(np.float64).eps
+ assert_array_max_ulp(x, x+eps, maxulp=200)
+
+ def test_inf(self):
+ for dt in [np.float32, np.float64]:
+ inf = np.array([np.inf]).astype(dt)
+ big = np.array([np.finfo(dt).max])
+ assert_array_max_ulp(inf, big, maxulp=200)
+
+ def test_nan(self):
+ # Test that nan is 'far' from small, tiny, inf, max and min
+ for dt in [np.float32, np.float64]:
+ if dt == np.float32:
+ maxulp = 1e6
+ else:
+ maxulp = 1e12
+ inf = np.array([np.inf]).astype(dt)
+ nan = np.array([np.nan]).astype(dt)
+ big = np.array([np.finfo(dt).max])
+ tiny = np.array([np.finfo(dt).tiny])
+ zero = np.array([np.PZERO]).astype(dt)
+ nzero = np.array([np.NZERO]).astype(dt)
+ assert_raises(AssertionError,
+ lambda: assert_array_max_ulp(nan, inf,
+ maxulp=maxulp))
+ assert_raises(AssertionError,
+ lambda: assert_array_max_ulp(nan, big,
+ maxulp=maxulp))
+ assert_raises(AssertionError,
+ lambda: assert_array_max_ulp(nan, tiny,
+ maxulp=maxulp))
+ assert_raises(AssertionError,
+ lambda: assert_array_max_ulp(nan, zero,
+ maxulp=maxulp))
+ assert_raises(AssertionError,
+ lambda: assert_array_max_ulp(nan, nzero,
+ maxulp=maxulp))
+
+
+class TestStringEqual:
+ def test_simple(self):
+ assert_string_equal("hello", "hello")
+ assert_string_equal("hello\nmultiline", "hello\nmultiline")
+
+ with pytest.raises(AssertionError) as exc_info:
+ assert_string_equal("foo\nbar", "hello\nbar")
+ msg = str(exc_info.value)
+ assert_equal(msg, "Differences in strings:\n- foo\n+ hello")
+
+ assert_raises(AssertionError,
+ lambda: assert_string_equal("foo", "hello"))
+
+ def test_regex(self):
+ assert_string_equal("a+*b", "a+*b")
+
+ assert_raises(AssertionError,
+ lambda: assert_string_equal("aaa", "a+b"))
+
+
+def assert_warn_len_equal(mod, n_in_context):
+ try:
+ mod_warns = mod.__warningregistry__
+ except AttributeError:
+ # the lack of a __warningregistry__
+ # attribute means that no warning has
+ # occurred; this can be triggered in
+ # a parallel test scenario, while in
+ # a serial test scenario an initial
+ # warning (and therefore the attribute)
+ # are always created first
+ mod_warns = {}
+
+ num_warns = len(mod_warns)
+
+ if 'version' in mod_warns:
+ # Python 3 adds a 'version' entry to the registry,
+ # do not count it.
+ num_warns -= 1
+
+ assert_equal(num_warns, n_in_context)
+
+
+def test_warn_len_equal_call_scenarios():
+ # assert_warn_len_equal is called under
+ # varying circumstances depending on serial
+ # vs. parallel test scenarios; this test
+ # simply aims to probe both code paths and
+ # check that no assertion is uncaught
+
+ # parallel scenario -- no warning issued yet
+ class mod:
+ pass
+
+ mod_inst = mod()
+
+ assert_warn_len_equal(mod=mod_inst,
+ n_in_context=0)
+
+ # serial test scenario -- the __warningregistry__
+ # attribute should be present
+ class mod:
+ def __init__(self):
+ self.__warningregistry__ = {'warning1':1,
+ 'warning2':2}
+
+ mod_inst = mod()
+ assert_warn_len_equal(mod=mod_inst,
+ n_in_context=2)
+
+
+def _get_fresh_mod():
+ # Get this module, with warning registry empty
+ my_mod = sys.modules[__name__]
+ try:
+ my_mod.__warningregistry__.clear()
+ except AttributeError:
+ # will not have a __warningregistry__ unless warning has been
+ # raised in the module at some point
+ pass
+ return my_mod
+
+
+def test_clear_and_catch_warnings():
+ # Initial state of module, no warnings
+ my_mod = _get_fresh_mod()
+ assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
+ with clear_and_catch_warnings(modules=[my_mod]):
+ warnings.simplefilter('ignore')
+ warnings.warn('Some warning')
+ assert_equal(my_mod.__warningregistry__, {})
+ # Without specified modules, don't clear warnings during context.
+ # catch_warnings doesn't make an entry for 'ignore'.
+ with clear_and_catch_warnings():
+ warnings.simplefilter('ignore')
+ warnings.warn('Some warning')
+ assert_warn_len_equal(my_mod, 0)
+
+ # Manually adding two warnings to the registry:
+ my_mod.__warningregistry__ = {'warning1': 1,
+ 'warning2': 2}
+
+ # Confirm that specifying module keeps old warning, does not add new
+ with clear_and_catch_warnings(modules=[my_mod]):
+ warnings.simplefilter('ignore')
+ warnings.warn('Another warning')
+ assert_warn_len_equal(my_mod, 2)
+
+ # Another warning, no module spec it clears up registry
+ with clear_and_catch_warnings():
+ warnings.simplefilter('ignore')
+ warnings.warn('Another warning')
+ assert_warn_len_equal(my_mod, 0)
+
+
+def test_suppress_warnings_module():
+ # Initial state of module, no warnings
+ my_mod = _get_fresh_mod()
+ assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
+
+ def warn_other_module():
+ # Apply along axis is implemented in python; stacklevel=2 means
+ # we end up inside its module, not ours.
+ def warn(arr):
+ warnings.warn("Some warning 2", stacklevel=2)
+ return arr
+ np.apply_along_axis(warn, 0, [0])
+
+ # Test module based warning suppression:
+ assert_warn_len_equal(my_mod, 0)
+ with suppress_warnings() as sup:
+ sup.record(UserWarning)
+ # suppress warning from other module (may have .pyc ending),
+ # if apply_along_axis is moved, had to be changed.
+ sup.filter(module=np.lib.shape_base)
+ warnings.warn("Some warning")
+ warn_other_module()
+ # Check that the suppression did test the file correctly (this module
+ # got filtered)
+ assert_equal(len(sup.log), 1)
+ assert_equal(sup.log[0].message.args[0], "Some warning")
+ assert_warn_len_equal(my_mod, 0)
+ sup = suppress_warnings()
+ # Will have to be changed if apply_along_axis is moved:
+ sup.filter(module=my_mod)
+ with sup:
+ warnings.warn('Some warning')
+ assert_warn_len_equal(my_mod, 0)
+ # And test repeat works:
+ sup.filter(module=my_mod)
+ with sup:
+ warnings.warn('Some warning')
+ assert_warn_len_equal(my_mod, 0)
+
+ # Without specified modules
+ with suppress_warnings():
+ warnings.simplefilter('ignore')
+ warnings.warn('Some warning')
+ assert_warn_len_equal(my_mod, 0)
+
+
+def test_suppress_warnings_type():
+ # Initial state of module, no warnings
+ my_mod = _get_fresh_mod()
+ assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
+
+ # Test module based warning suppression:
+ with suppress_warnings() as sup:
+ sup.filter(UserWarning)
+ warnings.warn('Some warning')
+ assert_warn_len_equal(my_mod, 0)
+ sup = suppress_warnings()
+ sup.filter(UserWarning)
+ with sup:
+ warnings.warn('Some warning')
+ assert_warn_len_equal(my_mod, 0)
+ # And test repeat works:
+ sup.filter(module=my_mod)
+ with sup:
+ warnings.warn('Some warning')
+ assert_warn_len_equal(my_mod, 0)
+
+ # Without specified modules
+ with suppress_warnings():
+ warnings.simplefilter('ignore')
+ warnings.warn('Some warning')
+ assert_warn_len_equal(my_mod, 0)
+
+
+def test_suppress_warnings_decorate_no_record():
+ sup = suppress_warnings()
+ sup.filter(UserWarning)
+
+ @sup
+ def warn(category):
+ warnings.warn('Some warning', category)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ warn(UserWarning) # should be supppressed
+ warn(RuntimeWarning)
+ assert_equal(len(w), 1)
+
+
+def test_suppress_warnings_record():
+ sup = suppress_warnings()
+ log1 = sup.record()
+
+ with sup:
+ log2 = sup.record(message='Some other warning 2')
+ sup.filter(message='Some warning')
+ warnings.warn('Some warning')
+ warnings.warn('Some other warning')
+ warnings.warn('Some other warning 2')
+
+ assert_equal(len(sup.log), 2)
+ assert_equal(len(log1), 1)
+ assert_equal(len(log2),1)
+ assert_equal(log2[0].message.args[0], 'Some other warning 2')
+
+ # Do it again, with the same context to see if some warnings survived:
+ with sup:
+ log2 = sup.record(message='Some other warning 2')
+ sup.filter(message='Some warning')
+ warnings.warn('Some warning')
+ warnings.warn('Some other warning')
+ warnings.warn('Some other warning 2')
+
+ assert_equal(len(sup.log), 2)
+ assert_equal(len(log1), 1)
+ assert_equal(len(log2), 1)
+ assert_equal(log2[0].message.args[0], 'Some other warning 2')
+
+ # Test nested:
+ with suppress_warnings() as sup:
+ sup.record()
+ with suppress_warnings() as sup2:
+ sup2.record(message='Some warning')
+ warnings.warn('Some warning')
+ warnings.warn('Some other warning')
+ assert_equal(len(sup2.log), 1)
+ assert_equal(len(sup.log), 1)
+
+
+def test_suppress_warnings_forwarding():
+ def warn_other_module():
+ # Apply along axis is implemented in python; stacklevel=2 means
+ # we end up inside its module, not ours.
+ def warn(arr):
+ warnings.warn("Some warning", stacklevel=2)
+ return arr
+ np.apply_along_axis(warn, 0, [0])
+
+ with suppress_warnings() as sup:
+ sup.record()
+ with suppress_warnings("always"):
+ for i in range(2):
+ warnings.warn("Some warning")
+
+ assert_equal(len(sup.log), 2)
+
+ with suppress_warnings() as sup:
+ sup.record()
+ with suppress_warnings("location"):
+ for i in range(2):
+ warnings.warn("Some warning")
+ warnings.warn("Some warning")
+
+ assert_equal(len(sup.log), 2)
+
+ with suppress_warnings() as sup:
+ sup.record()
+ with suppress_warnings("module"):
+ for i in range(2):
+ warnings.warn("Some warning")
+ warnings.warn("Some warning")
+ warn_other_module()
+
+ assert_equal(len(sup.log), 2)
+
+ with suppress_warnings() as sup:
+ sup.record()
+ with suppress_warnings("once"):
+ for i in range(2):
+ warnings.warn("Some warning")
+ warnings.warn("Some other warning")
+ warn_other_module()
+
+ assert_equal(len(sup.log), 2)
+
+
+def test_tempdir():
+ with tempdir() as tdir:
+ fpath = os.path.join(tdir, 'tmp')
+ with open(fpath, 'w'):
+ pass
+ assert_(not os.path.isdir(tdir))
+
+ raised = False
+ try:
+ with tempdir() as tdir:
+ raise ValueError()
+ except ValueError:
+ raised = True
+ assert_(raised)
+ assert_(not os.path.isdir(tdir))
+
+
+def test_temppath():
+ with temppath() as fpath:
+ with open(fpath, 'w'):
+ pass
+ assert_(not os.path.isfile(fpath))
+
+ raised = False
+ try:
+ with temppath() as fpath:
+ raise ValueError()
+ except ValueError:
+ raised = True
+ assert_(raised)
+ assert_(not os.path.isfile(fpath))
+
+
+class my_cacw(clear_and_catch_warnings):
+
+ class_modules = (sys.modules[__name__],)
+
+
+def test_clear_and_catch_warnings_inherit():
+ # Test can subclass and add default modules
+ my_mod = _get_fresh_mod()
+ with my_cacw():
+ warnings.simplefilter('ignore')
+ warnings.warn('Some warning')
+ assert_equal(my_mod.__warningregistry__, {})
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+class TestAssertNoGcCycles:
+ """ Test assert_no_gc_cycles """
+ def test_passes(self):
+ def no_cycle():
+ b = []
+ b.append([])
+ return b
+
+ with assert_no_gc_cycles():
+ no_cycle()
+
+ assert_no_gc_cycles(no_cycle)
+
+ def test_asserts(self):
+ def make_cycle():
+ a = []
+ a.append(a)
+ a.append(a)
+ return a
+
+ with assert_raises(AssertionError):
+ with assert_no_gc_cycles():
+ make_cycle()
+
+ with assert_raises(AssertionError):
+ assert_no_gc_cycles(make_cycle)
+
+ @pytest.mark.slow
+ def test_fails(self):
+ """
+ Test that in cases where the garbage cannot be collected, we raise an
+ error, instead of hanging forever trying to clear it.
+ """
+
+ class ReferenceCycleInDel:
+ """
+ An object that not only contains a reference cycle, but creates new
+ cycles whenever it's garbage-collected and its __del__ runs
+ """
+ make_cycle = True
+
+ def __init__(self):
+ self.cycle = self
+
+ def __del__(self):
+ # break the current cycle so that `self` can be freed
+ self.cycle = None
+
+ if ReferenceCycleInDel.make_cycle:
+ # but create a new one so that the garbage collector has more
+ # work to do.
+ ReferenceCycleInDel()
+
+ try:
+ w = weakref.ref(ReferenceCycleInDel())
+ try:
+ with assert_raises(RuntimeError):
+ # this will be unable to get a baseline empty garbage
+ assert_no_gc_cycles(lambda: None)
+ except AssertionError:
+ # the above test is only necessary if the GC actually tried to free
+ # our object anyway, which python 2.7 does not.
+ if w() is not None:
+ pytest.skip("GC does not call __del__ on cyclic objects")
+ raise
+
+ finally:
+ # make sure that we stop creating reference cycles
+ ReferenceCycleInDel.make_cycle = False
diff --git a/venv/lib/python3.9/site-packages/numpy/testing/utils.py b/venv/lib/python3.9/site-packages/numpy/testing/utils.py
new file mode 100644
index 00000000..20a88330
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/testing/utils.py
@@ -0,0 +1,29 @@
+"""
+Back compatibility utils module. It will import the appropriate
+set of tools
+
+"""
+import warnings
+
+# 2018-04-04, numpy 1.15.0 ImportWarning
+# 2019-09-18, numpy 1.18.0 DeprecatonWarning (changed)
+warnings.warn("Importing from numpy.testing.utils is deprecated "
+ "since 1.15.0, import from numpy.testing instead.",
+ DeprecationWarning, stacklevel=2)
+
+from ._private.utils import *
+from ._private.utils import _assert_valid_refcount, _gen_alignment_data
+
+__all__ = [
+ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
+ 'assert_array_equal', 'assert_array_less', 'assert_string_equal',
+ 'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
+ 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
+ 'raises', 'rundocs', 'runstring', 'verbose', 'measure',
+ 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
+ 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
+ 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
+ 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
+ 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
+ 'assert_no_gc_cycles'
+ ]
diff --git a/venv/lib/python3.9/site-packages/numpy/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/tests/test__all__.py b/venv/lib/python3.9/site-packages/numpy/tests/test__all__.py
new file mode 100644
index 00000000..e44bda3d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/tests/test__all__.py
@@ -0,0 +1,9 @@
+
+import collections
+import numpy as np
+
+
+def test_no_duplicates_in_np__all__():
+ # Regression test for gh-10198.
+ dups = {k: v for k, v in collections.Counter(np.__all__).items() if v > 1}
+ assert len(dups) == 0
diff --git a/venv/lib/python3.9/site-packages/numpy/tests/test_ctypeslib.py b/venv/lib/python3.9/site-packages/numpy/tests/test_ctypeslib.py
new file mode 100644
index 00000000..1ea08370
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/tests/test_ctypeslib.py
@@ -0,0 +1,368 @@
+import sys
+import pytest
+import weakref
+from pathlib import Path
+
+import numpy as np
+from numpy.ctypeslib import ndpointer, load_library, as_array
+from numpy.distutils.misc_util import get_shared_lib_extension
+from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal
+
+try:
+ import ctypes
+except ImportError:
+ ctypes = None
+else:
+ cdll = None
+ test_cdll = None
+ if hasattr(sys, 'gettotalrefcount'):
+ try:
+ cdll = load_library('_multiarray_umath_d', np.core._multiarray_umath.__file__)
+ except OSError:
+ pass
+ try:
+ test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__)
+ except OSError:
+ pass
+ if cdll is None:
+ cdll = load_library('_multiarray_umath', np.core._multiarray_umath.__file__)
+ if test_cdll is None:
+ test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__)
+
+ c_forward_pointer = test_cdll.forward_pointer
+
+
+@pytest.mark.skipif(ctypes is None,
+ reason="ctypes not available in this python")
+@pytest.mark.skipif(sys.platform == 'cygwin',
+ reason="Known to fail on cygwin")
+class TestLoadLibrary:
+ def test_basic(self):
+ loader_path = np.core._multiarray_umath.__file__
+
+ out1 = load_library('_multiarray_umath', loader_path)
+ out2 = load_library(Path('_multiarray_umath'), loader_path)
+ out3 = load_library('_multiarray_umath', Path(loader_path))
+ out4 = load_library(b'_multiarray_umath', loader_path)
+
+ assert isinstance(out1, ctypes.CDLL)
+ assert out1 is out2 is out3 is out4
+
+ def test_basic2(self):
+ # Regression for #801: load_library with a full library name
+ # (including extension) does not work.
+ try:
+ try:
+ so = get_shared_lib_extension(is_python_ext=True)
+ # Should succeed
+ load_library('_multiarray_umath%s' % so, np.core._multiarray_umath.__file__)
+ except ImportError:
+ print("No distutils available, skipping test.")
+ except ImportError as e:
+ msg = ("ctypes is not available on this python: skipping the test"
+ " (import error was: %s)" % str(e))
+ print(msg)
+
+
+class TestNdpointer:
+ def test_dtype(self):
+ dt = np.intc
+ p = ndpointer(dtype=dt)
+ assert_(p.from_param(np.array([1], dt)))
+ dt = '<i4'
+ p = ndpointer(dtype=dt)
+ assert_(p.from_param(np.array([1], dt)))
+ dt = np.dtype('>i4')
+ p = ndpointer(dtype=dt)
+ p.from_param(np.array([1], dt))
+ assert_raises(TypeError, p.from_param,
+ np.array([1], dt.newbyteorder('swap')))
+ dtnames = ['x', 'y']
+ dtformats = [np.intc, np.float64]
+ dtdescr = {'names': dtnames, 'formats': dtformats}
+ dt = np.dtype(dtdescr)
+ p = ndpointer(dtype=dt)
+ assert_(p.from_param(np.zeros((10,), dt)))
+ samedt = np.dtype(dtdescr)
+ p = ndpointer(dtype=samedt)
+ assert_(p.from_param(np.zeros((10,), dt)))
+ dt2 = np.dtype(dtdescr, align=True)
+ if dt.itemsize != dt2.itemsize:
+ assert_raises(TypeError, p.from_param, np.zeros((10,), dt2))
+ else:
+ assert_(p.from_param(np.zeros((10,), dt2)))
+
+ def test_ndim(self):
+ p = ndpointer(ndim=0)
+ assert_(p.from_param(np.array(1)))
+ assert_raises(TypeError, p.from_param, np.array([1]))
+ p = ndpointer(ndim=1)
+ assert_raises(TypeError, p.from_param, np.array(1))
+ assert_(p.from_param(np.array([1])))
+ p = ndpointer(ndim=2)
+ assert_(p.from_param(np.array([[1]])))
+
+ def test_shape(self):
+ p = ndpointer(shape=(1, 2))
+ assert_(p.from_param(np.array([[1, 2]])))
+ assert_raises(TypeError, p.from_param, np.array([[1], [2]]))
+ p = ndpointer(shape=())
+ assert_(p.from_param(np.array(1)))
+
+ def test_flags(self):
+ x = np.array([[1, 2], [3, 4]], order='F')
+ p = ndpointer(flags='FORTRAN')
+ assert_(p.from_param(x))
+ p = ndpointer(flags='CONTIGUOUS')
+ assert_raises(TypeError, p.from_param, x)
+ p = ndpointer(flags=x.flags.num)
+ assert_(p.from_param(x))
+ assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]]))
+
+ def test_cache(self):
+ assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64))
+
+ # shapes are normalized
+ assert_(ndpointer(shape=2) is ndpointer(shape=(2,)))
+
+ # 1.12 <= v < 1.16 had a bug that made these fail
+ assert_(ndpointer(shape=2) is not ndpointer(ndim=2))
+ assert_(ndpointer(ndim=2) is not ndpointer(shape=2))
+
+@pytest.mark.skipif(ctypes is None,
+ reason="ctypes not available on this python installation")
+class TestNdpointerCFunc:
+ def test_arguments(self):
+ """ Test that arguments are coerced from arrays """
+ c_forward_pointer.restype = ctypes.c_void_p
+ c_forward_pointer.argtypes = (ndpointer(ndim=2),)
+
+ c_forward_pointer(np.zeros((2, 3)))
+ # too many dimensions
+ assert_raises(
+ ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4)))
+
+ @pytest.mark.parametrize(
+ 'dt', [
+ float,
+ np.dtype(dict(
+ formats=['<i4', '<i4'],
+ names=['a', 'b'],
+ offsets=[0, 2],
+ itemsize=6
+ ))
+ ], ids=[
+ 'float',
+ 'overlapping-fields'
+ ]
+ )
+ def test_return(self, dt):
+ """ Test that return values are coerced to arrays """
+ arr = np.zeros((2, 3), dt)
+ ptr_type = ndpointer(shape=arr.shape, dtype=arr.dtype)
+
+ c_forward_pointer.restype = ptr_type
+ c_forward_pointer.argtypes = (ptr_type,)
+
+ # check that the arrays are equivalent views on the same data
+ arr2 = c_forward_pointer(arr)
+ assert_equal(arr2.dtype, arr.dtype)
+ assert_equal(arr2.shape, arr.shape)
+ assert_equal(
+ arr2.__array_interface__['data'],
+ arr.__array_interface__['data']
+ )
+
+ def test_vague_return_value(self):
+ """ Test that vague ndpointer return values do not promote to arrays """
+ arr = np.zeros((2, 3))
+ ptr_type = ndpointer(dtype=arr.dtype)
+
+ c_forward_pointer.restype = ptr_type
+ c_forward_pointer.argtypes = (ptr_type,)
+
+ ret = c_forward_pointer(arr)
+ assert_(isinstance(ret, ptr_type))
+
+
+@pytest.mark.skipif(ctypes is None,
+ reason="ctypes not available on this python installation")
+class TestAsArray:
+ def test_array(self):
+ from ctypes import c_int
+
+ pair_t = c_int * 2
+ a = as_array(pair_t(1, 2))
+ assert_equal(a.shape, (2,))
+ assert_array_equal(a, np.array([1, 2]))
+ a = as_array((pair_t * 3)(pair_t(1, 2), pair_t(3, 4), pair_t(5, 6)))
+ assert_equal(a.shape, (3, 2))
+ assert_array_equal(a, np.array([[1, 2], [3, 4], [5, 6]]))
+
+ def test_pointer(self):
+ from ctypes import c_int, cast, POINTER
+
+ p = cast((c_int * 10)(*range(10)), POINTER(c_int))
+
+ a = as_array(p, shape=(10,))
+ assert_equal(a.shape, (10,))
+ assert_array_equal(a, np.arange(10))
+
+ a = as_array(p, shape=(2, 5))
+ assert_equal(a.shape, (2, 5))
+ assert_array_equal(a, np.arange(10).reshape((2, 5)))
+
+ # shape argument is required
+ assert_raises(TypeError, as_array, p)
+
+ def test_struct_array_pointer(self):
+ from ctypes import c_int16, Structure, pointer
+
+ class Struct(Structure):
+ _fields_ = [('a', c_int16)]
+
+ Struct3 = 3 * Struct
+
+ c_array = (2 * Struct3)(
+ Struct3(Struct(a=1), Struct(a=2), Struct(a=3)),
+ Struct3(Struct(a=4), Struct(a=5), Struct(a=6))
+ )
+
+ expected = np.array([
+ [(1,), (2,), (3,)],
+ [(4,), (5,), (6,)],
+ ], dtype=[('a', np.int16)])
+
+ def check(x):
+ assert_equal(x.dtype, expected.dtype)
+ assert_equal(x, expected)
+
+ # all of these should be equivalent
+ check(as_array(c_array))
+ check(as_array(pointer(c_array), shape=()))
+ check(as_array(pointer(c_array[0]), shape=(2,)))
+ check(as_array(pointer(c_array[0][0]), shape=(2, 3)))
+
+ def test_reference_cycles(self):
+ # related to gh-6511
+ import ctypes
+
+ # create array to work with
+ # don't use int/long to avoid running into bpo-10746
+ N = 100
+ a = np.arange(N, dtype=np.short)
+
+ # get pointer to array
+ pnt = np.ctypeslib.as_ctypes(a)
+
+ with np.testing.assert_no_gc_cycles():
+ # decay the array above to a pointer to its first element
+ newpnt = ctypes.cast(pnt, ctypes.POINTER(ctypes.c_short))
+ # and construct an array using this data
+ b = np.ctypeslib.as_array(newpnt, (N,))
+ # now delete both, which should cleanup both objects
+ del newpnt, b
+
+ def test_segmentation_fault(self):
+ arr = np.zeros((224, 224, 3))
+ c_arr = np.ctypeslib.as_ctypes(arr)
+ arr_ref = weakref.ref(arr)
+ del arr
+
+ # check the reference wasn't cleaned up
+ assert_(arr_ref() is not None)
+
+ # check we avoid the segfault
+ c_arr[0][0][0]
+
+
+@pytest.mark.skipif(ctypes is None,
+ reason="ctypes not available on this python installation")
+class TestAsCtypesType:
+ """ Test conversion from dtypes to ctypes types """
+ def test_scalar(self):
+ dt = np.dtype('<u2')
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_equal(ct, ctypes.c_uint16.__ctype_le__)
+
+ dt = np.dtype('>u2')
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_equal(ct, ctypes.c_uint16.__ctype_be__)
+
+ dt = np.dtype('u2')
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_equal(ct, ctypes.c_uint16)
+
+ def test_subarray(self):
+ dt = np.dtype((np.int32, (2, 3)))
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_equal(ct, 2 * (3 * ctypes.c_int32))
+
+ def test_structure(self):
+ dt = np.dtype([
+ ('a', np.uint16),
+ ('b', np.uint32),
+ ])
+
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_(issubclass(ct, ctypes.Structure))
+ assert_equal(ctypes.sizeof(ct), dt.itemsize)
+ assert_equal(ct._fields_, [
+ ('a', ctypes.c_uint16),
+ ('b', ctypes.c_uint32),
+ ])
+
+ def test_structure_aligned(self):
+ dt = np.dtype([
+ ('a', np.uint16),
+ ('b', np.uint32),
+ ], align=True)
+
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_(issubclass(ct, ctypes.Structure))
+ assert_equal(ctypes.sizeof(ct), dt.itemsize)
+ assert_equal(ct._fields_, [
+ ('a', ctypes.c_uint16),
+ ('', ctypes.c_char * 2), # padding
+ ('b', ctypes.c_uint32),
+ ])
+
+ def test_union(self):
+ dt = np.dtype(dict(
+ names=['a', 'b'],
+ offsets=[0, 0],
+ formats=[np.uint16, np.uint32]
+ ))
+
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_(issubclass(ct, ctypes.Union))
+ assert_equal(ctypes.sizeof(ct), dt.itemsize)
+ assert_equal(ct._fields_, [
+ ('a', ctypes.c_uint16),
+ ('b', ctypes.c_uint32),
+ ])
+
+ def test_padded_union(self):
+ dt = np.dtype(dict(
+ names=['a', 'b'],
+ offsets=[0, 0],
+ formats=[np.uint16, np.uint32],
+ itemsize=5,
+ ))
+
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_(issubclass(ct, ctypes.Union))
+ assert_equal(ctypes.sizeof(ct), dt.itemsize)
+ assert_equal(ct._fields_, [
+ ('a', ctypes.c_uint16),
+ ('b', ctypes.c_uint32),
+ ('', ctypes.c_char * 5), # padding
+ ])
+
+ def test_overlapping(self):
+ dt = np.dtype(dict(
+ names=['a', 'b'],
+ offsets=[0, 2],
+ formats=[np.uint32, np.uint32]
+ ))
+ assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt)
diff --git a/venv/lib/python3.9/site-packages/numpy/tests/test_lazyloading.py b/venv/lib/python3.9/site-packages/numpy/tests/test_lazyloading.py
new file mode 100644
index 00000000..f31a4eab
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/tests/test_lazyloading.py
@@ -0,0 +1,38 @@
+import sys
+import importlib
+from importlib.util import LazyLoader, find_spec, module_from_spec
+import pytest
+
+
+# Warning raised by _reload_guard() in numpy/__init__.py
+@pytest.mark.filterwarnings("ignore:The NumPy module was reloaded")
+def test_lazy_load():
+ # gh-22045. lazyload doesn't import submodule names into the namespace
+ # muck with sys.modules to test the importing system
+ old_numpy = sys.modules.pop("numpy")
+
+ numpy_modules = {}
+ for mod_name, mod in list(sys.modules.items()):
+ if mod_name[:6] == "numpy.":
+ numpy_modules[mod_name] = mod
+ sys.modules.pop(mod_name)
+
+ try:
+ # create lazy load of numpy as np
+ spec = find_spec("numpy")
+ module = module_from_spec(spec)
+ sys.modules["numpy"] = module
+ loader = LazyLoader(spec.loader)
+ loader.exec_module(module)
+ np = module
+
+ # test a subpackage import
+ from numpy.lib import recfunctions
+
+ # test triggering the import of the package
+ np.ndarray
+
+ finally:
+ if old_numpy:
+ sys.modules["numpy"] = old_numpy
+ sys.modules.update(numpy_modules)
diff --git a/venv/lib/python3.9/site-packages/numpy/tests/test_matlib.py b/venv/lib/python3.9/site-packages/numpy/tests/test_matlib.py
new file mode 100644
index 00000000..0e93c484
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/tests/test_matlib.py
@@ -0,0 +1,58 @@
+import numpy as np
+import numpy.matlib
+from numpy.testing import assert_array_equal, assert_
+
+def test_empty():
+ x = numpy.matlib.empty((2,))
+ assert_(isinstance(x, np.matrix))
+ assert_(x.shape, (1, 2))
+
+def test_ones():
+ assert_array_equal(numpy.matlib.ones((2, 3)),
+ np.matrix([[ 1., 1., 1.],
+ [ 1., 1., 1.]]))
+
+ assert_array_equal(numpy.matlib.ones(2), np.matrix([[ 1., 1.]]))
+
+def test_zeros():
+ assert_array_equal(numpy.matlib.zeros((2, 3)),
+ np.matrix([[ 0., 0., 0.],
+ [ 0., 0., 0.]]))
+
+ assert_array_equal(numpy.matlib.zeros(2), np.matrix([[ 0., 0.]]))
+
+def test_identity():
+ x = numpy.matlib.identity(2, dtype=int)
+ assert_array_equal(x, np.matrix([[1, 0], [0, 1]]))
+
+def test_eye():
+ xc = numpy.matlib.eye(3, k=1, dtype=int)
+ assert_array_equal(xc, np.matrix([[ 0, 1, 0],
+ [ 0, 0, 1],
+ [ 0, 0, 0]]))
+ assert xc.flags.c_contiguous
+ assert not xc.flags.f_contiguous
+
+ xf = numpy.matlib.eye(3, 4, dtype=int, order='F')
+ assert_array_equal(xf, np.matrix([[ 1, 0, 0, 0],
+ [ 0, 1, 0, 0],
+ [ 0, 0, 1, 0]]))
+ assert not xf.flags.c_contiguous
+ assert xf.flags.f_contiguous
+
+def test_rand():
+ x = numpy.matlib.rand(3)
+ # check matrix type, array would have shape (3,)
+ assert_(x.ndim == 2)
+
+def test_randn():
+ x = np.matlib.randn(3)
+ # check matrix type, array would have shape (3,)
+ assert_(x.ndim == 2)
+
+def test_repmat():
+ a1 = np.arange(4)
+ x = numpy.matlib.repmat(a1, 2, 2)
+ y = np.array([[0, 1, 2, 3, 0, 1, 2, 3],
+ [0, 1, 2, 3, 0, 1, 2, 3]])
+ assert_array_equal(x, y)
diff --git a/venv/lib/python3.9/site-packages/numpy/tests/test_numpy_version.py b/venv/lib/python3.9/site-packages/numpy/tests/test_numpy_version.py
new file mode 100644
index 00000000..bccbcb8e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/tests/test_numpy_version.py
@@ -0,0 +1,44 @@
+"""
+Check the numpy version is valid.
+
+Note that a development version is marked by the presence of 'dev0' or '+'
+in the version string, all else is treated as a release. The version string
+itself is set from the output of ``git describe`` which relies on tags.
+
+Examples
+--------
+
+Valid Development: 1.22.0.dev0 1.22.0.dev0+5-g7999db4df2 1.22.0+5-g7999db4df2
+Valid Release: 1.21.0.rc1, 1.21.0.b1, 1.21.0
+Invalid: 1.22.0.dev, 1.22.0.dev0-5-g7999db4dfB, 1.21.0.d1, 1.21.a
+
+Note that a release is determined by the version string, which in turn
+is controlled by the result of the ``git describe`` command.
+"""
+import re
+
+import numpy as np
+from numpy.testing import assert_
+
+
+def test_valid_numpy_version():
+ # Verify that the numpy version is a valid one (no .post suffix or other
+ # nonsense). See gh-6431 for an issue caused by an invalid version.
+ version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(a[0-9]|b[0-9]|rc[0-9]|)"
+ dev_suffix = r"(\.dev0|)(\+[0-9]*\.g[0-9a-f]+|)"
+ if np.version.release:
+ res = re.match(version_pattern + '$', np.__version__)
+ else:
+ res = re.match(version_pattern + dev_suffix + '$', np.__version__)
+
+ assert_(res is not None, np.__version__)
+
+
+def test_short_version():
+ # Check numpy.short_version actually exists
+ if np.version.release:
+ assert_(np.__version__ == np.version.short_version,
+ "short_version mismatch in release version")
+ else:
+ assert_(np.__version__.split("+")[0] == np.version.short_version,
+ "short_version mismatch in development version")
diff --git a/venv/lib/python3.9/site-packages/numpy/tests/test_public_api.py b/venv/lib/python3.9/site-packages/numpy/tests/test_public_api.py
new file mode 100644
index 00000000..36a21d1c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/tests/test_public_api.py
@@ -0,0 +1,506 @@
+import sys
+import sysconfig
+import subprocess
+import pkgutil
+import types
+import importlib
+import warnings
+
+import numpy as np
+import numpy
+import pytest
+from numpy.testing import IS_WASM
+
+try:
+ import ctypes
+except ImportError:
+ ctypes = None
+
+
+def check_dir(module, module_name=None):
+ """Returns a mapping of all objects with the wrong __module__ attribute."""
+ if module_name is None:
+ module_name = module.__name__
+ results = {}
+ for name in dir(module):
+ item = getattr(module, name)
+ if (hasattr(item, '__module__') and hasattr(item, '__name__')
+ and item.__module__ != module_name):
+ results[name] = item.__module__ + '.' + item.__name__
+ return results
+
+
+def test_numpy_namespace():
+ # None of these objects are publicly documented to be part of the main
+ # NumPy namespace (some are useful though, others need to be cleaned up)
+ undocumented = {
+ 'Tester': 'numpy.testing._private.nosetester.NoseTester',
+ '_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
+ 'add_docstring': 'numpy.core._multiarray_umath.add_docstring',
+ 'add_newdoc': 'numpy.core.function_base.add_newdoc',
+ 'add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
+ 'byte_bounds': 'numpy.lib.utils.byte_bounds',
+ 'compare_chararrays': 'numpy.core._multiarray_umath.compare_chararrays',
+ 'deprecate': 'numpy.lib.utils.deprecate',
+ 'deprecate_with_doc': 'numpy.lib.utils.deprecate_with_doc',
+ 'disp': 'numpy.lib.function_base.disp',
+ 'fastCopyAndTranspose': 'numpy.core._multiarray_umath.fastCopyAndTranspose',
+ 'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap',
+ 'get_include': 'numpy.lib.utils.get_include',
+ 'recfromcsv': 'numpy.lib.npyio.recfromcsv',
+ 'recfromtxt': 'numpy.lib.npyio.recfromtxt',
+ 'safe_eval': 'numpy.lib.utils.safe_eval',
+ 'set_string_function': 'numpy.core.arrayprint.set_string_function',
+ 'show_config': 'numpy.__config__.show',
+ 'show_runtime': 'numpy.lib.utils.show_runtime',
+ 'who': 'numpy.lib.utils.who',
+ }
+ # We override dir to not show these members
+ allowlist = undocumented
+ bad_results = check_dir(np)
+ # pytest gives better error messages with the builtin assert than with
+ # assert_equal
+ assert bad_results == allowlist
+
+
+@pytest.mark.skipif(IS_WASM, reason="can't start subprocess")
+@pytest.mark.parametrize('name', ['testing', 'Tester'])
+def test_import_lazy_import(name):
+ """Make sure we can actually use the modules we lazy load.
+
+ While not exported as part of the public API, it was accessible. With the
+ use of __getattr__ and __dir__, this isn't always true It can happen that
+ an infinite recursion may happen.
+
+ This is the only way I found that would force the failure to appear on the
+ badly implemented code.
+
+ We also test for the presence of the lazily imported modules in dir
+
+ """
+ exe = (sys.executable, '-c', "import numpy; numpy." + name)
+ result = subprocess.check_output(exe)
+ assert not result
+
+ # Make sure they are still in the __dir__
+ assert name in dir(np)
+
+
+def test_dir_testing():
+ """Assert that output of dir has only one "testing/tester"
+ attribute without duplicate"""
+ assert len(dir(np)) == len(set(dir(np)))
+
+
+def test_numpy_linalg():
+ bad_results = check_dir(np.linalg)
+ assert bad_results == {}
+
+
+def test_numpy_fft():
+ bad_results = check_dir(np.fft)
+ assert bad_results == {}
+
+
+@pytest.mark.skipif(ctypes is None,
+ reason="ctypes not available in this python")
+def test_NPY_NO_EXPORT():
+ cdll = ctypes.CDLL(np.core._multiarray_tests.__file__)
+ # Make sure an arbitrary NPY_NO_EXPORT function is actually hidden
+ f = getattr(cdll, 'test_not_exported', None)
+ assert f is None, ("'test_not_exported' is mistakenly exported, "
+ "NPY_NO_EXPORT does not work")
+
+
+# Historically NumPy has not used leading underscores for private submodules
+# much. This has resulted in lots of things that look like public modules
+# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`),
+# but were never intended to be public. The PUBLIC_MODULES list contains
+# modules that are either public because they were meant to be, or because they
+# contain public functions/objects that aren't present in any other namespace
+# for whatever reason and therefore should be treated as public.
+#
+# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack
+# of underscores) but should not be used. For many of those modules the
+# current status is fine. For others it may make sense to work on making them
+# private, to clean up our public API and avoid confusion.
+PUBLIC_MODULES = ['numpy.' + s for s in [
+ "array_api",
+ "array_api.linalg",
+ "ctypeslib",
+ "distutils",
+ "distutils.cpuinfo",
+ "distutils.exec_command",
+ "distutils.misc_util",
+ "distutils.log",
+ "distutils.system_info",
+ "doc",
+ "doc.constants",
+ "doc.ufuncs",
+ "f2py",
+ "fft",
+ "lib",
+ "lib.format", # was this meant to be public?
+ "lib.mixins",
+ "lib.recfunctions",
+ "lib.scimath",
+ "lib.stride_tricks",
+ "linalg",
+ "ma",
+ "ma.extras",
+ "ma.mrecords",
+ "matlib",
+ "polynomial",
+ "polynomial.chebyshev",
+ "polynomial.hermite",
+ "polynomial.hermite_e",
+ "polynomial.laguerre",
+ "polynomial.legendre",
+ "polynomial.polynomial",
+ "random",
+ "testing",
+ "typing",
+ "typing.mypy_plugin",
+ "version",
+]]
+
+
+PUBLIC_ALIASED_MODULES = [
+ "numpy.char",
+ "numpy.emath",
+ "numpy.rec",
+]
+
+
+PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
+ "compat",
+ "compat.py3k",
+ "conftest",
+ "core",
+ "core.arrayprint",
+ "core.defchararray",
+ "core.einsumfunc",
+ "core.fromnumeric",
+ "core.function_base",
+ "core.getlimits",
+ "core.memmap",
+ "core.multiarray",
+ "core.numeric",
+ "core.numerictypes",
+ "core.overrides",
+ "core.records",
+ "core.shape_base",
+ "core.umath",
+ "core.umath_tests",
+ "distutils.armccompiler",
+ "distutils.ccompiler",
+ 'distutils.ccompiler_opt',
+ "distutils.command",
+ "distutils.command.autodist",
+ "distutils.command.bdist_rpm",
+ "distutils.command.build",
+ "distutils.command.build_clib",
+ "distutils.command.build_ext",
+ "distutils.command.build_py",
+ "distutils.command.build_scripts",
+ "distutils.command.build_src",
+ "distutils.command.config",
+ "distutils.command.config_compiler",
+ "distutils.command.develop",
+ "distutils.command.egg_info",
+ "distutils.command.install",
+ "distutils.command.install_clib",
+ "distutils.command.install_data",
+ "distutils.command.install_headers",
+ "distutils.command.sdist",
+ "distutils.conv_template",
+ "distutils.core",
+ "distutils.extension",
+ "distutils.fcompiler",
+ "distutils.fcompiler.absoft",
+ "distutils.fcompiler.arm",
+ "distutils.fcompiler.compaq",
+ "distutils.fcompiler.environment",
+ "distutils.fcompiler.g95",
+ "distutils.fcompiler.gnu",
+ "distutils.fcompiler.hpux",
+ "distutils.fcompiler.ibm",
+ "distutils.fcompiler.intel",
+ "distutils.fcompiler.lahey",
+ "distutils.fcompiler.mips",
+ "distutils.fcompiler.nag",
+ "distutils.fcompiler.none",
+ "distutils.fcompiler.pathf95",
+ "distutils.fcompiler.pg",
+ "distutils.fcompiler.nv",
+ "distutils.fcompiler.sun",
+ "distutils.fcompiler.vast",
+ "distutils.fcompiler.fujitsu",
+ "distutils.from_template",
+ "distutils.intelccompiler",
+ "distutils.lib2def",
+ "distutils.line_endings",
+ "distutils.mingw32ccompiler",
+ "distutils.msvccompiler",
+ "distutils.npy_pkg_config",
+ "distutils.numpy_distribution",
+ "distutils.pathccompiler",
+ "distutils.unixccompiler",
+ "dual",
+ "f2py.auxfuncs",
+ "f2py.capi_maps",
+ "f2py.cb_rules",
+ "f2py.cfuncs",
+ "f2py.common_rules",
+ "f2py.crackfortran",
+ "f2py.diagnose",
+ "f2py.f2py2e",
+ "f2py.f90mod_rules",
+ "f2py.func2subr",
+ "f2py.rules",
+ "f2py.symbolic",
+ "f2py.use_rules",
+ "fft.helper",
+ "lib.arraypad",
+ "lib.arraysetops",
+ "lib.arrayterator",
+ "lib.function_base",
+ "lib.histograms",
+ "lib.index_tricks",
+ "lib.nanfunctions",
+ "lib.npyio",
+ "lib.polynomial",
+ "lib.shape_base",
+ "lib.twodim_base",
+ "lib.type_check",
+ "lib.ufunclike",
+ "lib.user_array", # note: not in np.lib, but probably should just be deleted
+ "lib.utils",
+ "linalg.lapack_lite",
+ "linalg.linalg",
+ "ma.bench",
+ "ma.core",
+ "ma.testutils",
+ "ma.timer_comparison",
+ "matrixlib",
+ "matrixlib.defmatrix",
+ "polynomial.polyutils",
+ "random.mtrand",
+ "random.bit_generator",
+ "testing.print_coercion_tables",
+ "testing.utils",
+]]
+
+
+def is_unexpected(name):
+ """Check if this needs to be considered."""
+ if '._' in name or '.tests' in name or '.setup' in name:
+ return False
+
+ if name in PUBLIC_MODULES:
+ return False
+
+ if name in PUBLIC_ALIASED_MODULES:
+ return False
+
+ if name in PRIVATE_BUT_PRESENT_MODULES:
+ return False
+
+ return True
+
+
+# These are present in a directory with an __init__.py but cannot be imported
+# code_generators/ isn't installed, but present for an inplace build
+SKIP_LIST = [
+ "numpy.core.code_generators",
+ "numpy.core.code_generators.genapi",
+ "numpy.core.code_generators.generate_umath",
+ "numpy.core.code_generators.ufunc_docstrings",
+ "numpy.core.code_generators.generate_numpy_api",
+ "numpy.core.code_generators.generate_ufunc_api",
+ "numpy.core.code_generators.numpy_api",
+ "numpy.core.code_generators.generate_umath_doc",
+ "numpy.core.cversions",
+ "numpy.core.generate_numpy_api",
+ "numpy.distutils.msvc9compiler",
+]
+
+
+def test_all_modules_are_expected():
+ """
+ Test that we don't add anything that looks like a new public module by
+ accident. Check is based on filenames.
+ """
+
+ modnames = []
+ for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__,
+ prefix=np.__name__ + '.',
+ onerror=None):
+ if is_unexpected(modname) and modname not in SKIP_LIST:
+ # We have a name that is new. If that's on purpose, add it to
+ # PUBLIC_MODULES. We don't expect to have to add anything to
+ # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
+ modnames.append(modname)
+
+ if modnames:
+ raise AssertionError(f'Found unexpected modules: {modnames}')
+
+
+# Stuff that clearly shouldn't be in the API and is detected by the next test
+# below
+SKIP_LIST_2 = [
+ 'numpy.math',
+ 'numpy.distutils.log.sys',
+ 'numpy.distutils.log.logging',
+ 'numpy.distutils.log.warnings',
+ 'numpy.doc.constants.re',
+ 'numpy.doc.constants.textwrap',
+ 'numpy.lib.emath',
+ 'numpy.lib.math',
+ 'numpy.matlib.char',
+ 'numpy.matlib.rec',
+ 'numpy.matlib.emath',
+ 'numpy.matlib.math',
+ 'numpy.matlib.linalg',
+ 'numpy.matlib.fft',
+ 'numpy.matlib.random',
+ 'numpy.matlib.ctypeslib',
+ 'numpy.matlib.ma',
+]
+
+
+def test_all_modules_are_expected_2():
+ """
+ Method checking all objects. The pkgutil-based method in
+ `test_all_modules_are_expected` does not catch imports into a namespace,
+ only filenames. So this test is more thorough, and checks this like:
+
+ import .lib.scimath as emath
+
+ To check if something in a module is (effectively) public, one can check if
+ there's anything in that namespace that's a public function/object but is
+ not exposed in a higher-level namespace. For example for a `numpy.lib`
+ submodule::
+
+ mod = np.lib.mixins
+ for obj in mod.__all__:
+ if obj in np.__all__:
+ continue
+ elif obj in np.lib.__all__:
+ continue
+
+ else:
+ print(obj)
+
+ """
+
+ def find_unexpected_members(mod_name):
+ members = []
+ module = importlib.import_module(mod_name)
+ if hasattr(module, '__all__'):
+ objnames = module.__all__
+ else:
+ objnames = dir(module)
+
+ for objname in objnames:
+ if not objname.startswith('_'):
+ fullobjname = mod_name + '.' + objname
+ if isinstance(getattr(module, objname), types.ModuleType):
+ if is_unexpected(fullobjname):
+ if fullobjname not in SKIP_LIST_2:
+ members.append(fullobjname)
+
+ return members
+
+ unexpected_members = find_unexpected_members("numpy")
+ for modname in PUBLIC_MODULES:
+ unexpected_members.extend(find_unexpected_members(modname))
+
+ if unexpected_members:
+ raise AssertionError("Found unexpected object(s) that look like "
+ "modules: {}".format(unexpected_members))
+
+
+def test_api_importable():
+ """
+ Check that all submodules listed higher up in this file can be imported
+
+ Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
+ simply need to be removed from the list (deprecation may or may not be
+ needed - apply common sense).
+ """
+ def check_importable(module_name):
+ try:
+ importlib.import_module(module_name)
+ except (ImportError, AttributeError):
+ return False
+
+ return True
+
+ module_names = []
+ for module_name in PUBLIC_MODULES:
+ if not check_importable(module_name):
+ module_names.append(module_name)
+
+ if module_names:
+ raise AssertionError("Modules in the public API that cannot be "
+ "imported: {}".format(module_names))
+
+ for module_name in PUBLIC_ALIASED_MODULES:
+ try:
+ eval(module_name)
+ except AttributeError:
+ module_names.append(module_name)
+
+ if module_names:
+ raise AssertionError("Modules in the public API that were not "
+ "found: {}".format(module_names))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', category=DeprecationWarning)
+ warnings.filterwarnings('always', category=ImportWarning)
+ for module_name in PRIVATE_BUT_PRESENT_MODULES:
+ if not check_importable(module_name):
+ module_names.append(module_name)
+
+ if module_names:
+ raise AssertionError("Modules that are not really public but looked "
+ "public and can not be imported: "
+ "{}".format(module_names))
+
+
+@pytest.mark.xfail(
+ sysconfig.get_config_var("Py_DEBUG") is not None,
+ reason=(
+ "NumPy possibly built with `USE_DEBUG=True ./tools/travis-test.sh`, "
+ "which does not expose the `array_api` entry point. "
+ "See https://github.com/numpy/numpy/pull/19800"
+ ),
+)
+def test_array_api_entry_point():
+ """
+ Entry point for Array API implementation can be found with importlib and
+ returns the numpy.array_api namespace.
+ """
+ eps = importlib.metadata.entry_points()
+ try:
+ xp_eps = eps.select(group="array_api")
+ except AttributeError:
+ # The select interface for entry_points was introduced in py3.10,
+ # deprecating its dict interface. We fallback to dict keys for finding
+ # Array API entry points so that running this test in <=3.9 will
+ # still work - see https://github.com/numpy/numpy/pull/19800.
+ xp_eps = eps.get("array_api", [])
+ assert len(xp_eps) > 0, "No entry points for 'array_api' found"
+
+ try:
+ ep = next(ep for ep in xp_eps if ep.name == "numpy")
+ except StopIteration:
+ raise AssertionError("'numpy' not in array_api entry points") from None
+
+ xp = ep.load()
+ msg = (
+ f"numpy entry point value '{ep.value}' "
+ "does not point to our Array API implementation"
+ )
+ assert xp is numpy.array_api, msg
diff --git a/venv/lib/python3.9/site-packages/numpy/tests/test_reloading.py b/venv/lib/python3.9/site-packages/numpy/tests/test_reloading.py
new file mode 100644
index 00000000..a1f36008
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/tests/test_reloading.py
@@ -0,0 +1,72 @@
+from numpy.testing import (
+ assert_raises,
+ assert_warns,
+ assert_,
+ assert_equal,
+ IS_WASM,
+)
+from numpy.compat import pickle
+
+import pytest
+import sys
+import subprocess
+import textwrap
+from importlib import reload
+
+
+def test_numpy_reloading():
+ # gh-7844. Also check that relevant globals retain their identity.
+ import numpy as np
+ import numpy._globals
+
+ _NoValue = np._NoValue
+ VisibleDeprecationWarning = np.VisibleDeprecationWarning
+ ModuleDeprecationWarning = np.ModuleDeprecationWarning
+
+ with assert_warns(UserWarning):
+ reload(np)
+ assert_(_NoValue is np._NoValue)
+ assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning)
+ assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning)
+
+ assert_raises(RuntimeError, reload, numpy._globals)
+ with assert_warns(UserWarning):
+ reload(np)
+ assert_(_NoValue is np._NoValue)
+ assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning)
+ assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning)
+
+def test_novalue():
+ import numpy as np
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_equal(repr(np._NoValue), '<no value>')
+ assert_(pickle.loads(pickle.dumps(np._NoValue,
+ protocol=proto)) is np._NoValue)
+
+
+@pytest.mark.skipif(IS_WASM, reason="can't start subprocess")
+def test_full_reimport():
+ """At the time of writing this, it is *not* truly supported, but
+ apparently enough users rely on it, for it to be an annoying change
+ when it started failing previously.
+ """
+ # Test within a new process, to ensure that we do not mess with the
+ # global state during the test run (could lead to cryptic test failures).
+ # This is generally unsafe, especially, since we also reload the C-modules.
+ code = textwrap.dedent(r"""
+ import sys
+ from pytest import warns
+ import numpy as np
+
+ for k in list(sys.modules.keys()):
+ if "numpy" in k:
+ del sys.modules[k]
+
+ with warns(UserWarning):
+ import numpy as np
+ """)
+ p = subprocess.run([sys.executable, '-c', code], capture_output=True)
+ if p.returncode:
+ raise AssertionError(
+ f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}"
+ )
diff --git a/venv/lib/python3.9/site-packages/numpy/tests/test_scripts.py b/venv/lib/python3.9/site-packages/numpy/tests/test_scripts.py
new file mode 100644
index 00000000..892c04ee
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/tests/test_scripts.py
@@ -0,0 +1,47 @@
+""" Test scripts
+
+Test that we can run executable scripts that have been installed with numpy.
+"""
+import sys
+import os
+import pytest
+from os.path import join as pathjoin, isfile, dirname
+import subprocess
+
+import numpy as np
+from numpy.testing import assert_equal, IS_WASM
+
+is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py'))
+
+
+def find_f2py_commands():
+ if sys.platform == 'win32':
+ exe_dir = dirname(sys.executable)
+ if exe_dir.endswith('Scripts'): # virtualenv
+ return [os.path.join(exe_dir, 'f2py')]
+ else:
+ return [os.path.join(exe_dir, "Scripts", 'f2py')]
+ else:
+ # Three scripts are installed in Unix-like systems:
+ # 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example,
+ # if installed with python3.9 the scripts would be named
+ # 'f2py', 'f2py3', and 'f2py3.9'.
+ version = sys.version_info
+ major = str(version.major)
+ minor = str(version.minor)
+ return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor]
+
+
+@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace")
+@pytest.mark.xfail(reason="Test is unreliable")
+@pytest.mark.parametrize('f2py_cmd', find_f2py_commands())
+def test_f2py(f2py_cmd):
+ # test that we can run f2py script
+ stdout = subprocess.check_output([f2py_cmd, '-v'])
+ assert_equal(stdout.strip(), np.__version__.encode('ascii'))
+
+
+@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
+def test_pep338():
+ stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v'])
+ assert_equal(stdout.strip(), np.__version__.encode('ascii'))
diff --git a/venv/lib/python3.9/site-packages/numpy/tests/test_warnings.py b/venv/lib/python3.9/site-packages/numpy/tests/test_warnings.py
new file mode 100644
index 00000000..d7a6d880
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/tests/test_warnings.py
@@ -0,0 +1,74 @@
+"""
+Tests which scan for certain occurrences in the code, they may not find
+all of these occurrences but should catch almost all.
+"""
+import pytest
+
+from pathlib import Path
+import ast
+import tokenize
+import numpy
+
+class ParseCall(ast.NodeVisitor):
+ def __init__(self):
+ self.ls = []
+
+ def visit_Attribute(self, node):
+ ast.NodeVisitor.generic_visit(self, node)
+ self.ls.append(node.attr)
+
+ def visit_Name(self, node):
+ self.ls.append(node.id)
+
+
+class FindFuncs(ast.NodeVisitor):
+ def __init__(self, filename):
+ super().__init__()
+ self.__filename = filename
+
+ def visit_Call(self, node):
+ p = ParseCall()
+ p.visit(node.func)
+ ast.NodeVisitor.generic_visit(self, node)
+
+ if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
+ if node.args[0].s == "ignore":
+ raise AssertionError(
+ "warnings should have an appropriate stacklevel; found in "
+ "{} on line {}".format(self.__filename, node.lineno))
+
+ if p.ls[-1] == 'warn' and (
+ len(p.ls) == 1 or p.ls[-2] == 'warnings'):
+
+ if "testing/tests/test_warnings.py" == self.__filename:
+ # This file
+ return
+
+ # See if stacklevel exists:
+ if len(node.args) == 3:
+ return
+ args = {kw.arg for kw in node.keywords}
+ if "stacklevel" in args:
+ return
+ raise AssertionError(
+ "warnings should have an appropriate stacklevel; found in "
+ "{} on line {}".format(self.__filename, node.lineno))
+
+
+@pytest.mark.slow
+def test_warning_calls():
+ # combined "ignore" and stacklevel error
+ base = Path(numpy.__file__).parent
+
+ for path in base.rglob("*.py"):
+ if base / "testing" in path.parents:
+ continue
+ if path == base / "__init__.py":
+ continue
+ if path == base / "random" / "__init__.py":
+ continue
+ # use tokenize to auto-detect encoding on systems where no
+ # default encoding is defined (e.g. LANG='C')
+ with tokenize.open(str(path)) as file:
+ tree = ast.parse(file.read())
+ FindFuncs(path).visit(tree)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/__init__.py b/venv/lib/python3.9/site-packages/numpy/typing/__init__.py
new file mode 100644
index 00000000..5cf02fe8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/__init__.py
@@ -0,0 +1,175 @@
+"""
+============================
+Typing (:mod:`numpy.typing`)
+============================
+
+.. versionadded:: 1.20
+
+Large parts of the NumPy API have :pep:`484`-style type annotations. In
+addition a number of type aliases are available to users, most prominently
+the two below:
+
+- `ArrayLike`: objects that can be converted to arrays
+- `DTypeLike`: objects that can be converted to dtypes
+
+.. _typing-extensions: https://pypi.org/project/typing-extensions/
+
+Mypy plugin
+-----------
+
+.. versionadded:: 1.21
+
+.. automodule:: numpy.typing.mypy_plugin
+
+.. currentmodule:: numpy.typing
+
+Differences from the runtime NumPy API
+--------------------------------------
+
+NumPy is very flexible. Trying to describe the full range of
+possibilities statically would result in types that are not very
+helpful. For that reason, the typed NumPy API is often stricter than
+the runtime NumPy API. This section describes some notable
+differences.
+
+ArrayLike
+~~~~~~~~~
+
+The `ArrayLike` type tries to avoid creating object arrays. For
+example,
+
+.. code-block:: python
+
+ >>> np.array(x**2 for x in range(10))
+ array(<generator object <genexpr> at ...>, dtype=object)
+
+is valid NumPy code which will create a 0-dimensional object
+array. Type checkers will complain about the above example when using
+the NumPy types however. If you really intended to do the above, then
+you can either use a ``# type: ignore`` comment:
+
+.. code-block:: python
+
+ >>> np.array(x**2 for x in range(10)) # type: ignore
+
+or explicitly type the array like object as `~typing.Any`:
+
+.. code-block:: python
+
+ >>> from typing import Any
+ >>> array_like: Any = (x**2 for x in range(10))
+ >>> np.array(array_like)
+ array(<generator object <genexpr> at ...>, dtype=object)
+
+ndarray
+~~~~~~~
+
+It's possible to mutate the dtype of an array at runtime. For example,
+the following code is valid:
+
+.. code-block:: python
+
+ >>> x = np.array([1, 2])
+ >>> x.dtype = np.bool_
+
+This sort of mutation is not allowed by the types. Users who want to
+write statically typed code should instead use the `numpy.ndarray.view`
+method to create a view of the array with a different dtype.
+
+DTypeLike
+~~~~~~~~~
+
+The `DTypeLike` type tries to avoid creation of dtype objects using
+dictionary of fields like below:
+
+.. code-block:: python
+
+ >>> x = np.dtype({"field1": (float, 1), "field2": (int, 3)})
+
+Although this is valid NumPy code, the type checker will complain about it,
+since its usage is discouraged.
+Please see : :ref:`Data type objects <arrays.dtypes>`
+
+Number precision
+~~~~~~~~~~~~~~~~
+
+The precision of `numpy.number` subclasses is treated as a covariant generic
+parameter (see :class:`~NBitBase`), simplifying the annotating of processes
+involving precision-based casting.
+
+.. code-block:: python
+
+ >>> from typing import TypeVar
+ >>> import numpy as np
+ >>> import numpy.typing as npt
+
+ >>> T = TypeVar("T", bound=npt.NBitBase)
+ >>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]":
+ ... ...
+
+Consequently, the likes of `~numpy.float16`, `~numpy.float32` and
+`~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to
+runtime, they're not necessarily considered as sub-classes.
+
+Timedelta64
+~~~~~~~~~~~
+
+The `~numpy.timedelta64` class is not considered a subclass of
+`~numpy.signedinteger`, the former only inheriting from `~numpy.generic`
+while static type checking.
+
+0D arrays
+~~~~~~~~~
+
+During runtime numpy aggressively casts any passed 0D arrays into their
+corresponding `~numpy.generic` instance. Until the introduction of shape
+typing (see :pep:`646`) it is unfortunately not possible to make the
+necessary distinction between 0D and >0D arrays. While thus not strictly
+correct, all operations are that can potentially perform a 0D-array -> scalar
+cast are currently annotated as exclusively returning an `ndarray`.
+
+If it is known in advance that an operation _will_ perform a
+0D-array -> scalar cast, then one can consider manually remedying the
+situation with either `typing.cast` or a ``# type: ignore`` comment.
+
+Record array dtypes
+~~~~~~~~~~~~~~~~~~~
+
+The dtype of `numpy.recarray`, and the `numpy.rec` functions in general,
+can be specified in one of two ways:
+
+* Directly via the ``dtype`` argument.
+* With up to five helper arguments that operate via `numpy.format_parser`:
+ ``formats``, ``names``, ``titles``, ``aligned`` and ``byteorder``.
+
+These two approaches are currently typed as being mutually exclusive,
+*i.e.* if ``dtype`` is specified than one may not specify ``formats``.
+While this mutual exclusivity is not (strictly) enforced during runtime,
+combining both dtype specifiers can lead to unexpected or even downright
+buggy behavior.
+
+API
+---
+
+"""
+# NOTE: The API section will be appended with additional entries
+# further down in this file
+
+from numpy._typing import (
+ ArrayLike,
+ DTypeLike,
+ NBitBase,
+ NDArray,
+)
+
+__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"]
+
+if __doc__ is not None:
+ from numpy._typing._add_docstring import _docstrings
+ __doc__ += _docstrings
+ __doc__ += '\n.. autoclass:: numpy.typing.NBitBase\n'
+ del _docstrings
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/mypy_plugin.py b/venv/lib/python3.9/site-packages/numpy/typing/mypy_plugin.py
new file mode 100644
index 00000000..1ffe74fa
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/mypy_plugin.py
@@ -0,0 +1,197 @@
+"""A mypy_ plugin for managing a number of platform-specific annotations.
+Its functionality can be split into three distinct parts:
+
+* Assigning the (platform-dependent) precisions of certain `~numpy.number`
+ subclasses, including the likes of `~numpy.int_`, `~numpy.intp` and
+ `~numpy.longlong`. See the documentation on
+ :ref:`scalar types <arrays.scalars.built-in>` for a comprehensive overview
+ of the affected classes. Without the plugin the precision of all relevant
+ classes will be inferred as `~typing.Any`.
+* Removing all extended-precision `~numpy.number` subclasses that are
+ unavailable for the platform in question. Most notably this includes the
+ likes of `~numpy.float128` and `~numpy.complex256`. Without the plugin *all*
+ extended-precision types will, as far as mypy is concerned, be available
+ to all platforms.
+* Assigning the (platform-dependent) precision of `~numpy.ctypeslib.c_intp`.
+ Without the plugin the type will default to `ctypes.c_int64`.
+
+ .. versionadded:: 1.22
+
+Examples
+--------
+To enable the plugin, one must add it to their mypy `configuration file`_:
+
+.. code-block:: ini
+
+ [mypy]
+ plugins = numpy.typing.mypy_plugin
+
+.. _mypy: http://mypy-lang.org/
+.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html
+
+"""
+
+from __future__ import annotations
+
+from collections.abc import Iterable
+from typing import Final, TYPE_CHECKING, Callable
+
+import numpy as np
+
+try:
+ import mypy.types
+ from mypy.types import Type
+ from mypy.plugin import Plugin, AnalyzeTypeContext
+ from mypy.nodes import MypyFile, ImportFrom, Statement
+ from mypy.build import PRI_MED
+
+ _HookFunc = Callable[[AnalyzeTypeContext], Type]
+ MYPY_EX: None | ModuleNotFoundError = None
+except ModuleNotFoundError as ex:
+ MYPY_EX = ex
+
+__all__: list[str] = []
+
+
+def _get_precision_dict() -> dict[str, str]:
+ names = [
+ ("_NBitByte", np.byte),
+ ("_NBitShort", np.short),
+ ("_NBitIntC", np.intc),
+ ("_NBitIntP", np.intp),
+ ("_NBitInt", np.int_),
+ ("_NBitLongLong", np.longlong),
+
+ ("_NBitHalf", np.half),
+ ("_NBitSingle", np.single),
+ ("_NBitDouble", np.double),
+ ("_NBitLongDouble", np.longdouble),
+ ]
+ ret = {}
+ for name, typ in names:
+ n: int = 8 * typ().dtype.itemsize
+ ret[f'numpy._typing._nbit.{name}'] = f"numpy._{n}Bit"
+ return ret
+
+
+def _get_extended_precision_list() -> list[str]:
+ extended_types = [np.ulonglong, np.longlong, np.longdouble, np.clongdouble]
+ extended_names = {
+ "uint128",
+ "uint256",
+ "int128",
+ "int256",
+ "float80",
+ "float96",
+ "float128",
+ "float256",
+ "complex160",
+ "complex192",
+ "complex256",
+ "complex512",
+ }
+ return [i.__name__ for i in extended_types if i.__name__ in extended_names]
+
+
+def _get_c_intp_name() -> str:
+ # Adapted from `np.core._internal._getintp_ctype`
+ char = np.dtype('p').char
+ if char == 'i':
+ return "c_int"
+ elif char == 'l':
+ return "c_long"
+ elif char == 'q':
+ return "c_longlong"
+ else:
+ return "c_long"
+
+
+#: A dictionary mapping type-aliases in `numpy._typing._nbit` to
+#: concrete `numpy.typing.NBitBase` subclasses.
+_PRECISION_DICT: Final = _get_precision_dict()
+
+#: A list with the names of all extended precision `np.number` subclasses.
+_EXTENDED_PRECISION_LIST: Final = _get_extended_precision_list()
+
+#: The name of the ctypes quivalent of `np.intp`
+_C_INTP: Final = _get_c_intp_name()
+
+
+def _hook(ctx: AnalyzeTypeContext) -> Type:
+ """Replace a type-alias with a concrete ``NBitBase`` subclass."""
+ typ, _, api = ctx
+ name = typ.name.split(".")[-1]
+ name_new = _PRECISION_DICT[f"numpy._typing._nbit.{name}"]
+ return api.named_type(name_new)
+
+
+if TYPE_CHECKING or MYPY_EX is None:
+ def _index(iterable: Iterable[Statement], id: str) -> int:
+ """Identify the first ``ImportFrom`` instance the specified `id`."""
+ for i, value in enumerate(iterable):
+ if getattr(value, "id", None) == id:
+ return i
+ raise ValueError("Failed to identify a `ImportFrom` instance "
+ f"with the following id: {id!r}")
+
+ def _override_imports(
+ file: MypyFile,
+ module: str,
+ imports: list[tuple[str, None | str]],
+ ) -> None:
+ """Override the first `module`-based import with new `imports`."""
+ # Construct a new `from module import y` statement
+ import_obj = ImportFrom(module, 0, names=imports)
+ import_obj.is_top_level = True
+
+ # Replace the first `module`-based import statement with `import_obj`
+ for lst in [file.defs, file.imports]: # type: list[Statement]
+ i = _index(lst, module)
+ lst[i] = import_obj
+
+ class _NumpyPlugin(Plugin):
+ """A mypy plugin for handling versus numpy-specific typing tasks."""
+
+ def get_type_analyze_hook(self, fullname: str) -> None | _HookFunc:
+ """Set the precision of platform-specific `numpy.number`
+ subclasses.
+
+ For example: `numpy.int_`, `numpy.longlong` and `numpy.longdouble`.
+ """
+ if fullname in _PRECISION_DICT:
+ return _hook
+ return None
+
+ def get_additional_deps(
+ self, file: MypyFile
+ ) -> list[tuple[int, str, int]]:
+ """Handle all import-based overrides.
+
+ * Import platform-specific extended-precision `numpy.number`
+ subclasses (*e.g.* `numpy.float96`, `numpy.float128` and
+ `numpy.complex256`).
+ * Import the appropriate `ctypes` equivalent to `numpy.intp`.
+
+ """
+ ret = [(PRI_MED, file.fullname, -1)]
+
+ if file.fullname == "numpy":
+ _override_imports(
+ file, "numpy._typing._extended_precision",
+ imports=[(v, v) for v in _EXTENDED_PRECISION_LIST],
+ )
+ elif file.fullname == "numpy.ctypeslib":
+ _override_imports(
+ file, "ctypes",
+ imports=[(_C_INTP, "_c_intp")],
+ )
+ return ret
+
+ def plugin(version: str) -> type[_NumpyPlugin]:
+ """An entry-point for mypy."""
+ return _NumpyPlugin
+
+else:
+ def plugin(version: str) -> type[_NumpyPlugin]:
+ """An entry-point for mypy."""
+ raise MYPY_EX
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/setup.py b/venv/lib/python3.9/site-packages/numpy/typing/setup.py
new file mode 100644
index 00000000..c444e769
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/setup.py
@@ -0,0 +1,11 @@
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('typing', parent_package, top_path)
+ config.add_subpackage('tests')
+ config.add_data_dir('tests/data')
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/__init__.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/__init__.py
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/arithmetic.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/arithmetic.pyi
new file mode 100644
index 00000000..3bbc101c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/arithmetic.pyi
@@ -0,0 +1,121 @@
+from typing import Any
+import numpy as np
+
+b_ = np.bool_()
+dt = np.datetime64(0, "D")
+td = np.timedelta64(0, "D")
+
+AR_b: np.ndarray[Any, np.dtype[np.bool_]]
+AR_u: np.ndarray[Any, np.dtype[np.uint32]]
+AR_i: np.ndarray[Any, np.dtype[np.int64]]
+AR_f: np.ndarray[Any, np.dtype[np.float64]]
+AR_c: np.ndarray[Any, np.dtype[np.complex128]]
+AR_m: np.ndarray[Any, np.dtype[np.timedelta64]]
+AR_M: np.ndarray[Any, np.dtype[np.datetime64]]
+
+ANY: Any
+
+AR_LIKE_b: list[bool]
+AR_LIKE_u: list[np.uint32]
+AR_LIKE_i: list[int]
+AR_LIKE_f: list[float]
+AR_LIKE_c: list[complex]
+AR_LIKE_m: list[np.timedelta64]
+AR_LIKE_M: list[np.datetime64]
+
+# Array subtraction
+
+# NOTE: mypys `NoReturn` errors are, unfortunately, not that great
+_1 = AR_b - AR_LIKE_b # E: Need type annotation
+_2 = AR_LIKE_b - AR_b # E: Need type annotation
+AR_i - bytes() # E: No overload variant
+
+AR_f - AR_LIKE_m # E: Unsupported operand types
+AR_f - AR_LIKE_M # E: Unsupported operand types
+AR_c - AR_LIKE_m # E: Unsupported operand types
+AR_c - AR_LIKE_M # E: Unsupported operand types
+
+AR_m - AR_LIKE_f # E: Unsupported operand types
+AR_M - AR_LIKE_f # E: Unsupported operand types
+AR_m - AR_LIKE_c # E: Unsupported operand types
+AR_M - AR_LIKE_c # E: Unsupported operand types
+
+AR_m - AR_LIKE_M # E: Unsupported operand types
+AR_LIKE_m - AR_M # E: Unsupported operand types
+
+# array floor division
+
+AR_M // AR_LIKE_b # E: Unsupported operand types
+AR_M // AR_LIKE_u # E: Unsupported operand types
+AR_M // AR_LIKE_i # E: Unsupported operand types
+AR_M // AR_LIKE_f # E: Unsupported operand types
+AR_M // AR_LIKE_c # E: Unsupported operand types
+AR_M // AR_LIKE_m # E: Unsupported operand types
+AR_M // AR_LIKE_M # E: Unsupported operand types
+
+AR_b // AR_LIKE_M # E: Unsupported operand types
+AR_u // AR_LIKE_M # E: Unsupported operand types
+AR_i // AR_LIKE_M # E: Unsupported operand types
+AR_f // AR_LIKE_M # E: Unsupported operand types
+AR_c // AR_LIKE_M # E: Unsupported operand types
+AR_m // AR_LIKE_M # E: Unsupported operand types
+AR_M // AR_LIKE_M # E: Unsupported operand types
+
+_3 = AR_m // AR_LIKE_b # E: Need type annotation
+AR_m // AR_LIKE_c # E: Unsupported operand types
+
+AR_b // AR_LIKE_m # E: Unsupported operand types
+AR_u // AR_LIKE_m # E: Unsupported operand types
+AR_i // AR_LIKE_m # E: Unsupported operand types
+AR_f // AR_LIKE_m # E: Unsupported operand types
+AR_c // AR_LIKE_m # E: Unsupported operand types
+
+# Array multiplication
+
+AR_b *= AR_LIKE_u # E: incompatible type
+AR_b *= AR_LIKE_i # E: incompatible type
+AR_b *= AR_LIKE_f # E: incompatible type
+AR_b *= AR_LIKE_c # E: incompatible type
+AR_b *= AR_LIKE_m # E: incompatible type
+
+AR_u *= AR_LIKE_i # E: incompatible type
+AR_u *= AR_LIKE_f # E: incompatible type
+AR_u *= AR_LIKE_c # E: incompatible type
+AR_u *= AR_LIKE_m # E: incompatible type
+
+AR_i *= AR_LIKE_f # E: incompatible type
+AR_i *= AR_LIKE_c # E: incompatible type
+AR_i *= AR_LIKE_m # E: incompatible type
+
+AR_f *= AR_LIKE_c # E: incompatible type
+AR_f *= AR_LIKE_m # E: incompatible type
+
+# Array power
+
+AR_b **= AR_LIKE_b # E: Invalid self argument
+AR_b **= AR_LIKE_u # E: Invalid self argument
+AR_b **= AR_LIKE_i # E: Invalid self argument
+AR_b **= AR_LIKE_f # E: Invalid self argument
+AR_b **= AR_LIKE_c # E: Invalid self argument
+
+AR_u **= AR_LIKE_i # E: incompatible type
+AR_u **= AR_LIKE_f # E: incompatible type
+AR_u **= AR_LIKE_c # E: incompatible type
+
+AR_i **= AR_LIKE_f # E: incompatible type
+AR_i **= AR_LIKE_c # E: incompatible type
+
+AR_f **= AR_LIKE_c # E: incompatible type
+
+# Scalars
+
+b_ - b_ # E: No overload variant
+
+dt + dt # E: Unsupported operand types
+td - dt # E: Unsupported operand types
+td % 1 # E: Unsupported operand types
+td / dt # E: No overload
+td % dt # E: Unsupported operand types
+
+-b_ # E: Unsupported operand type
++b_ # E: Unsupported operand type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/array_constructors.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/array_constructors.pyi
new file mode 100644
index 00000000..27889463
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/array_constructors.pyi
@@ -0,0 +1,33 @@
+import numpy as np
+
+a: np.ndarray
+generator = (i for i in range(10))
+
+np.require(a, requirements=1) # E: No overload variant
+np.require(a, requirements="TEST") # E: incompatible type
+
+np.zeros("test") # E: incompatible type
+np.zeros() # E: require at least one argument
+
+np.ones("test") # E: incompatible type
+np.ones() # E: require at least one argument
+
+np.array(0, float, True) # E: No overload variant
+
+np.linspace(None, 'bob') # E: No overload variant
+np.linspace(0, 2, num=10.0) # E: No overload variant
+np.linspace(0, 2, endpoint='True') # E: No overload variant
+np.linspace(0, 2, retstep=b'False') # E: No overload variant
+np.linspace(0, 2, dtype=0) # E: No overload variant
+np.linspace(0, 2, axis=None) # E: No overload variant
+
+np.logspace(None, 'bob') # E: No overload variant
+np.logspace(0, 2, base=None) # E: No overload variant
+
+np.geomspace(None, 'bob') # E: No overload variant
+
+np.stack(generator) # E: No overload variant
+np.hstack({1, 2}) # E: No overload variant
+np.vstack(1) # E: No overload variant
+
+np.array([1], like=1) # E: No overload variant
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/array_like.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/array_like.pyi
new file mode 100644
index 00000000..133b5fd4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/array_like.pyi
@@ -0,0 +1,16 @@
+import numpy as np
+from numpy._typing import ArrayLike
+
+
+class A:
+ pass
+
+
+x1: ArrayLike = (i for i in range(10)) # E: Incompatible types in assignment
+x2: ArrayLike = A() # E: Incompatible types in assignment
+x3: ArrayLike = {1: "foo", 2: "bar"} # E: Incompatible types in assignment
+
+scalar = np.int64(1)
+scalar.__array__(dtype=np.float64) # E: No overload variant
+array = np.array([1])
+array.__array__(dtype=np.float64) # E: No overload variant
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/array_pad.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/array_pad.pyi
new file mode 100644
index 00000000..2be51a87
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/array_pad.pyi
@@ -0,0 +1,6 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_i8: npt.NDArray[np.int64]
+
+np.pad(AR_i8, 2, mode="bob") # E: No overload variant
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/arrayprint.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/arrayprint.pyi
new file mode 100644
index 00000000..71b921e3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/arrayprint.pyi
@@ -0,0 +1,14 @@
+from collections.abc import Callable
+from typing import Any
+import numpy as np
+
+AR: np.ndarray
+func1: Callable[[Any], str]
+func2: Callable[[np.integer[Any]], str]
+
+np.array2string(AR, style=None) # E: Unexpected keyword argument
+np.array2string(AR, legacy="1.14") # E: incompatible type
+np.array2string(AR, sign="*") # E: incompatible type
+np.array2string(AR, floatmode="default") # E: incompatible type
+np.array2string(AR, formatter={"A": func1}) # E: incompatible type
+np.array2string(AR, formatter={"float": func2}) # E: Incompatible types
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/arrayterator.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/arrayterator.pyi
new file mode 100644
index 00000000..c50fb2ec
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/arrayterator.pyi
@@ -0,0 +1,14 @@
+from typing import Any
+import numpy as np
+
+AR_i8: np.ndarray[Any, np.dtype[np.int64]]
+ar_iter = np.lib.Arrayterator(AR_i8)
+
+np.lib.Arrayterator(np.int64()) # E: incompatible type
+ar_iter.shape = (10, 5) # E: is read-only
+ar_iter[None] # E: Invalid index type
+ar_iter[None, 1] # E: Invalid index type
+ar_iter[np.intp()] # E: Invalid index type
+ar_iter[np.intp(), ...] # E: Invalid index type
+ar_iter[AR_i8] # E: Invalid index type
+ar_iter[AR_i8, :] # E: Invalid index type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/bitwise_ops.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/bitwise_ops.pyi
new file mode 100644
index 00000000..ee909000
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/bitwise_ops.pyi
@@ -0,0 +1,20 @@
+import numpy as np
+
+i8 = np.int64()
+i4 = np.int32()
+u8 = np.uint64()
+b_ = np.bool_()
+i = int()
+
+f8 = np.float64()
+
+b_ >> f8 # E: No overload variant
+i8 << f8 # E: No overload variant
+i | f8 # E: Unsupported operand types
+i8 ^ f8 # E: No overload variant
+u8 & f8 # E: No overload variant
+~f8 # E: Unsupported operand type
+
+# mypys' error message for `NoReturn` is unfortunately pretty bad
+# TODO: Re-enable this once we add support for numerical precision for `number`s
+# a = u8 | 0 # E: Need type annotation
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/char.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/char.pyi
new file mode 100644
index 00000000..320f05df
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/char.pyi
@@ -0,0 +1,66 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_U: npt.NDArray[np.str_]
+AR_S: npt.NDArray[np.bytes_]
+
+np.char.equal(AR_U, AR_S) # E: incompatible type
+
+np.char.not_equal(AR_U, AR_S) # E: incompatible type
+
+np.char.greater_equal(AR_U, AR_S) # E: incompatible type
+
+np.char.less_equal(AR_U, AR_S) # E: incompatible type
+
+np.char.greater(AR_U, AR_S) # E: incompatible type
+
+np.char.less(AR_U, AR_S) # E: incompatible type
+
+np.char.encode(AR_S) # E: incompatible type
+np.char.decode(AR_U) # E: incompatible type
+
+np.char.join(AR_U, b"_") # E: incompatible type
+np.char.join(AR_S, "_") # E: incompatible type
+
+np.char.ljust(AR_U, 5, fillchar=b"a") # E: incompatible type
+np.char.ljust(AR_S, 5, fillchar="a") # E: incompatible type
+np.char.rjust(AR_U, 5, fillchar=b"a") # E: incompatible type
+np.char.rjust(AR_S, 5, fillchar="a") # E: incompatible type
+
+np.char.lstrip(AR_U, chars=b"a") # E: incompatible type
+np.char.lstrip(AR_S, chars="a") # E: incompatible type
+np.char.strip(AR_U, chars=b"a") # E: incompatible type
+np.char.strip(AR_S, chars="a") # E: incompatible type
+np.char.rstrip(AR_U, chars=b"a") # E: incompatible type
+np.char.rstrip(AR_S, chars="a") # E: incompatible type
+
+np.char.partition(AR_U, b"a") # E: incompatible type
+np.char.partition(AR_S, "a") # E: incompatible type
+np.char.rpartition(AR_U, b"a") # E: incompatible type
+np.char.rpartition(AR_S, "a") # E: incompatible type
+
+np.char.replace(AR_U, b"_", b"-") # E: incompatible type
+np.char.replace(AR_S, "_", "-") # E: incompatible type
+
+np.char.split(AR_U, b"_") # E: incompatible type
+np.char.split(AR_S, "_") # E: incompatible type
+np.char.rsplit(AR_U, b"_") # E: incompatible type
+np.char.rsplit(AR_S, "_") # E: incompatible type
+
+np.char.count(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type
+np.char.count(AR_S, "a", end=9) # E: incompatible type
+
+np.char.endswith(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type
+np.char.endswith(AR_S, "a", end=9) # E: incompatible type
+np.char.startswith(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type
+np.char.startswith(AR_S, "a", end=9) # E: incompatible type
+
+np.char.find(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type
+np.char.find(AR_S, "a", end=9) # E: incompatible type
+np.char.rfind(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type
+np.char.rfind(AR_S, "a", end=9) # E: incompatible type
+
+np.char.index(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type
+np.char.index(AR_S, "a", end=9) # E: incompatible type
+np.char.rindex(AR_U, b"a", start=[1, 2, 3]) # E: incompatible type
+np.char.rindex(AR_S, "a", end=9) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/chararray.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/chararray.pyi
new file mode 100644
index 00000000..ebc182ec
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/chararray.pyi
@@ -0,0 +1,62 @@
+import numpy as np
+from typing import Any
+
+AR_U: np.chararray[Any, np.dtype[np.str_]]
+AR_S: np.chararray[Any, np.dtype[np.bytes_]]
+
+AR_S.encode() # E: Invalid self argument
+AR_U.decode() # E: Invalid self argument
+
+AR_U.join(b"_") # E: incompatible type
+AR_S.join("_") # E: incompatible type
+
+AR_U.ljust(5, fillchar=b"a") # E: incompatible type
+AR_S.ljust(5, fillchar="a") # E: incompatible type
+AR_U.rjust(5, fillchar=b"a") # E: incompatible type
+AR_S.rjust(5, fillchar="a") # E: incompatible type
+
+AR_U.lstrip(chars=b"a") # E: incompatible type
+AR_S.lstrip(chars="a") # E: incompatible type
+AR_U.strip(chars=b"a") # E: incompatible type
+AR_S.strip(chars="a") # E: incompatible type
+AR_U.rstrip(chars=b"a") # E: incompatible type
+AR_S.rstrip(chars="a") # E: incompatible type
+
+AR_U.partition(b"a") # E: incompatible type
+AR_S.partition("a") # E: incompatible type
+AR_U.rpartition(b"a") # E: incompatible type
+AR_S.rpartition("a") # E: incompatible type
+
+AR_U.replace(b"_", b"-") # E: incompatible type
+AR_S.replace("_", "-") # E: incompatible type
+
+AR_U.split(b"_") # E: incompatible type
+AR_S.split("_") # E: incompatible type
+AR_S.split(1) # E: incompatible type
+AR_U.rsplit(b"_") # E: incompatible type
+AR_S.rsplit("_") # E: incompatible type
+
+AR_U.count(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.count("a", end=9) # E: incompatible type
+
+AR_U.endswith(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.endswith("a", end=9) # E: incompatible type
+AR_U.startswith(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.startswith("a", end=9) # E: incompatible type
+
+AR_U.find(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.find("a", end=9) # E: incompatible type
+AR_U.rfind(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.rfind("a", end=9) # E: incompatible type
+
+AR_U.index(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.index("a", end=9) # E: incompatible type
+AR_U.rindex(b"a", start=[1, 2, 3]) # E: incompatible type
+AR_S.rindex("a", end=9) # E: incompatible type
+
+AR_U == AR_S # E: Unsupported operand types
+AR_U != AR_S # E: Unsupported operand types
+AR_U >= AR_S # E: Unsupported operand types
+AR_U <= AR_S # E: Unsupported operand types
+AR_U > AR_S # E: Unsupported operand types
+AR_U < AR_S # E: Unsupported operand types
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/comparisons.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/comparisons.pyi
new file mode 100644
index 00000000..febd0a18
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/comparisons.pyi
@@ -0,0 +1,27 @@
+from typing import Any
+import numpy as np
+
+AR_i: np.ndarray[Any, np.dtype[np.int64]]
+AR_f: np.ndarray[Any, np.dtype[np.float64]]
+AR_c: np.ndarray[Any, np.dtype[np.complex128]]
+AR_m: np.ndarray[Any, np.dtype[np.timedelta64]]
+AR_M: np.ndarray[Any, np.dtype[np.datetime64]]
+
+AR_f > AR_m # E: Unsupported operand types
+AR_c > AR_m # E: Unsupported operand types
+
+AR_m > AR_f # E: Unsupported operand types
+AR_m > AR_c # E: Unsupported operand types
+
+AR_i > AR_M # E: Unsupported operand types
+AR_f > AR_M # E: Unsupported operand types
+AR_m > AR_M # E: Unsupported operand types
+
+AR_M > AR_i # E: Unsupported operand types
+AR_M > AR_f # E: Unsupported operand types
+AR_M > AR_m # E: Unsupported operand types
+
+AR_i > str() # E: No overload variant
+AR_i > bytes() # E: No overload variant
+str() > AR_M # E: Unsupported operand types
+bytes() > AR_M # E: Unsupported operand types
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/constants.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/constants.pyi
new file mode 100644
index 00000000..324cbe9f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/constants.pyi
@@ -0,0 +1,7 @@
+import numpy as np
+
+np.Inf = np.Inf # E: Cannot assign to final
+np.ALLOW_THREADS = np.ALLOW_THREADS # E: Cannot assign to final
+np.little_endian = np.little_endian # E: Cannot assign to final
+np.UFUNC_PYVALS_NAME = "bob" # E: Incompatible types
+np.CLIP = 2 # E: Incompatible types
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/datasource.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/datasource.pyi
new file mode 100644
index 00000000..345277d4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/datasource.pyi
@@ -0,0 +1,15 @@
+from pathlib import Path
+import numpy as np
+
+path: Path
+d1: np.DataSource
+
+d1.abspath(path) # E: incompatible type
+d1.abspath(b"...") # E: incompatible type
+
+d1.exists(path) # E: incompatible type
+d1.exists(b"...") # E: incompatible type
+
+d1.open(path, "r") # E: incompatible type
+d1.open(b"...", encoding="utf8") # E: incompatible type
+d1.open(None, newline="/n") # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/dtype.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/dtype.pyi
new file mode 100644
index 00000000..0f3810f3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/dtype.pyi
@@ -0,0 +1,20 @@
+import numpy as np
+
+
+class Test1:
+ not_dtype = np.dtype(float)
+
+
+class Test2:
+ dtype = float
+
+
+np.dtype(Test1()) # E: No overload variant of "dtype" matches
+np.dtype(Test2()) # E: incompatible type
+
+np.dtype( # E: No overload variant of "dtype" matches
+ {
+ "field1": (float, 1),
+ "field2": (int, 3),
+ }
+)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/einsumfunc.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/einsumfunc.pyi
new file mode 100644
index 00000000..f0e3f1e9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/einsumfunc.pyi
@@ -0,0 +1,15 @@
+from typing import Any
+import numpy as np
+
+AR_i: np.ndarray[Any, np.dtype[np.int64]]
+AR_f: np.ndarray[Any, np.dtype[np.float64]]
+AR_m: np.ndarray[Any, np.dtype[np.timedelta64]]
+AR_O: np.ndarray[Any, np.dtype[np.object_]]
+AR_U: np.ndarray[Any, np.dtype[np.str_]]
+
+np.einsum("i,i->i", AR_i, AR_m) # E: incompatible type
+np.einsum("i,i->i", AR_O, AR_O) # E: incompatible type
+np.einsum("i,i->i", AR_f, AR_f, dtype=np.int32) # E: incompatible type
+np.einsum("i,i->i", AR_i, AR_i, dtype=np.timedelta64, casting="unsafe") # E: No overload variant
+np.einsum("i,i->i", AR_i, AR_i, out=AR_U) # E: Value of type variable "_ArrayType" of "einsum" cannot be
+np.einsum("i,i->i", AR_i, AR_i, out=AR_U, casting="unsafe") # E: No overload variant
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/false_positives.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/false_positives.pyi
new file mode 100644
index 00000000..7e792306
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/false_positives.pyi
@@ -0,0 +1,11 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+
+# NOTE: Mypy bug presumably due to the special-casing of heterogeneous tuples;
+# xref numpy/numpy#20901
+#
+# The expected output should be no different than, e.g., when using a
+# list instead of a tuple
+np.concatenate(([1], AR_f8)) # E: Argument 1 to "concatenate" has incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/flatiter.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/flatiter.pyi
new file mode 100644
index 00000000..b4ce10ba
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/flatiter.pyi
@@ -0,0 +1,25 @@
+from typing import Any
+
+import numpy as np
+from numpy._typing import _SupportsArray
+
+
+class Index:
+ def __index__(self) -> int:
+ ...
+
+
+a: "np.flatiter[np.ndarray]"
+supports_array: _SupportsArray
+
+a.base = Any # E: Property "base" defined in "flatiter" is read-only
+a.coords = Any # E: Property "coords" defined in "flatiter" is read-only
+a.index = Any # E: Property "index" defined in "flatiter" is read-only
+a.copy(order='C') # E: Unexpected keyword argument
+
+# NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter`
+# does not accept objects with the `__array__` or `__index__` protocols;
+# boolean indexing is just plain broken (gh-17175)
+a[np.bool_()] # E: No overload variant of "__getitem__"
+a[Index()] # E: No overload variant of "__getitem__"
+a[supports_array] # E: No overload variant of "__getitem__"
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/fromnumeric.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/fromnumeric.pyi
new file mode 100644
index 00000000..b679703c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/fromnumeric.pyi
@@ -0,0 +1,161 @@
+"""Tests for :mod:`numpy.core.fromnumeric`."""
+
+import numpy as np
+import numpy.typing as npt
+
+A = np.array(True, ndmin=2, dtype=bool)
+A.setflags(write=False)
+AR_U: npt.NDArray[np.str_]
+
+a = np.bool_(True)
+
+np.take(a, None) # E: No overload variant
+np.take(a, axis=1.0) # E: No overload variant
+np.take(A, out=1) # E: No overload variant
+np.take(A, mode="bob") # E: No overload variant
+
+np.reshape(a, None) # E: No overload variant
+np.reshape(A, 1, order="bob") # E: No overload variant
+
+np.choose(a, None) # E: No overload variant
+np.choose(a, out=1.0) # E: No overload variant
+np.choose(A, mode="bob") # E: No overload variant
+
+np.repeat(a, None) # E: No overload variant
+np.repeat(A, 1, axis=1.0) # E: No overload variant
+
+np.swapaxes(A, None, 1) # E: No overload variant
+np.swapaxes(A, 1, [0]) # E: No overload variant
+
+np.transpose(A, axes=1.0) # E: No overload variant
+
+np.partition(a, None) # E: No overload variant
+np.partition( # E: No overload variant
+ a, 0, axis="bob"
+)
+np.partition( # E: No overload variant
+ A, 0, kind="bob"
+)
+np.partition(
+ A, 0, order=range(5) # E: Argument "order" to "partition" has incompatible type
+)
+
+np.argpartition(
+ a, None # E: incompatible type
+)
+np.argpartition(
+ a, 0, axis="bob" # E: incompatible type
+)
+np.argpartition(
+ A, 0, kind="bob" # E: incompatible type
+)
+np.argpartition(
+ A, 0, order=range(5) # E: Argument "order" to "argpartition" has incompatible type
+)
+
+np.sort(A, axis="bob") # E: No overload variant
+np.sort(A, kind="bob") # E: No overload variant
+np.sort(A, order=range(5)) # E: Argument "order" to "sort" has incompatible type
+
+np.argsort(A, axis="bob") # E: Argument "axis" to "argsort" has incompatible type
+np.argsort(A, kind="bob") # E: Argument "kind" to "argsort" has incompatible type
+np.argsort(A, order=range(5)) # E: Argument "order" to "argsort" has incompatible type
+
+np.argmax(A, axis="bob") # E: No overload variant of "argmax" matches argument type
+np.argmax(A, kind="bob") # E: No overload variant of "argmax" matches argument type
+
+np.argmin(A, axis="bob") # E: No overload variant of "argmin" matches argument type
+np.argmin(A, kind="bob") # E: No overload variant of "argmin" matches argument type
+
+np.searchsorted( # E: No overload variant of "searchsorted" matches argument type
+ A[0], 0, side="bob"
+)
+np.searchsorted( # E: No overload variant of "searchsorted" matches argument type
+ A[0], 0, sorter=1.0
+)
+
+np.resize(A, 1.0) # E: No overload variant
+
+np.squeeze(A, 1.0) # E: No overload variant of "squeeze" matches argument type
+
+np.diagonal(A, offset=None) # E: No overload variant
+np.diagonal(A, axis1="bob") # E: No overload variant
+np.diagonal(A, axis2=[]) # E: No overload variant
+
+np.trace(A, offset=None) # E: No overload variant
+np.trace(A, axis1="bob") # E: No overload variant
+np.trace(A, axis2=[]) # E: No overload variant
+
+np.ravel(a, order="bob") # E: No overload variant
+
+np.compress( # E: No overload variant
+ [True], A, axis=1.0
+)
+
+np.clip(a, 1, 2, out=1) # E: No overload variant of "clip" matches argument type
+
+np.sum(a, axis=1.0) # E: No overload variant
+np.sum(a, keepdims=1.0) # E: No overload variant
+np.sum(a, initial=[1]) # E: No overload variant
+
+np.all(a, axis=1.0) # E: No overload variant
+np.all(a, keepdims=1.0) # E: No overload variant
+np.all(a, out=1.0) # E: No overload variant
+
+np.any(a, axis=1.0) # E: No overload variant
+np.any(a, keepdims=1.0) # E: No overload variant
+np.any(a, out=1.0) # E: No overload variant
+
+np.cumsum(a, axis=1.0) # E: No overload variant
+np.cumsum(a, dtype=1.0) # E: No overload variant
+np.cumsum(a, out=1.0) # E: No overload variant
+
+np.ptp(a, axis=1.0) # E: No overload variant
+np.ptp(a, keepdims=1.0) # E: No overload variant
+np.ptp(a, out=1.0) # E: No overload variant
+
+np.amax(a, axis=1.0) # E: No overload variant
+np.amax(a, keepdims=1.0) # E: No overload variant
+np.amax(a, out=1.0) # E: No overload variant
+np.amax(a, initial=[1.0]) # E: No overload variant
+np.amax(a, where=[1.0]) # E: incompatible type
+
+np.amin(a, axis=1.0) # E: No overload variant
+np.amin(a, keepdims=1.0) # E: No overload variant
+np.amin(a, out=1.0) # E: No overload variant
+np.amin(a, initial=[1.0]) # E: No overload variant
+np.amin(a, where=[1.0]) # E: incompatible type
+
+np.prod(a, axis=1.0) # E: No overload variant
+np.prod(a, out=False) # E: No overload variant
+np.prod(a, keepdims=1.0) # E: No overload variant
+np.prod(a, initial=int) # E: No overload variant
+np.prod(a, where=1.0) # E: No overload variant
+np.prod(AR_U) # E: incompatible type
+
+np.cumprod(a, axis=1.0) # E: No overload variant
+np.cumprod(a, out=False) # E: No overload variant
+np.cumprod(AR_U) # E: incompatible type
+
+np.size(a, axis=1.0) # E: Argument "axis" to "size" has incompatible type
+
+np.around(a, decimals=1.0) # E: No overload variant
+np.around(a, out=type) # E: No overload variant
+np.around(AR_U) # E: incompatible type
+
+np.mean(a, axis=1.0) # E: No overload variant
+np.mean(a, out=False) # E: No overload variant
+np.mean(a, keepdims=1.0) # E: No overload variant
+np.mean(AR_U) # E: incompatible type
+
+np.std(a, axis=1.0) # E: No overload variant
+np.std(a, out=False) # E: No overload variant
+np.std(a, ddof='test') # E: No overload variant
+np.std(a, keepdims=1.0) # E: No overload variant
+np.std(AR_U) # E: incompatible type
+
+np.var(a, axis=1.0) # E: No overload variant
+np.var(a, out=False) # E: No overload variant
+np.var(a, ddof='test') # E: No overload variant
+np.var(a, keepdims=1.0) # E: No overload variant
+np.var(AR_U) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/histograms.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/histograms.pyi
new file mode 100644
index 00000000..22499d39
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/histograms.pyi
@@ -0,0 +1,12 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+
+np.histogram_bin_edges(AR_i8, range=(0, 1, 2)) # E: incompatible type
+
+np.histogram(AR_i8, range=(0, 1, 2)) # E: incompatible type
+
+np.histogramdd(AR_i8, range=(0, 1)) # E: incompatible type
+np.histogramdd(AR_i8, range=[(0, 1, 2)]) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/index_tricks.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/index_tricks.pyi
new file mode 100644
index 00000000..22f6f4a6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/index_tricks.pyi
@@ -0,0 +1,14 @@
+import numpy as np
+
+AR_LIKE_i: list[int]
+AR_LIKE_f: list[float]
+
+np.ndindex([1, 2, 3]) # E: No overload variant
+np.unravel_index(AR_LIKE_f, (1, 2, 3)) # E: incompatible type
+np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # E: No overload variant
+np.mgrid[1] # E: Invalid index type
+np.mgrid[...] # E: Invalid index type
+np.ogrid[1] # E: Invalid index type
+np.ogrid[...] # E: Invalid index type
+np.fill_diagonal(AR_LIKE_f, 2) # E: incompatible type
+np.diag_indices(1.0) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_function_base.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_function_base.pyi
new file mode 100644
index 00000000..9cad2da0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_function_base.pyi
@@ -0,0 +1,53 @@
+from typing import Any
+
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+AR_m: npt.NDArray[np.timedelta64]
+AR_M: npt.NDArray[np.datetime64]
+AR_O: npt.NDArray[np.object_]
+
+def func(a: int) -> None: ...
+
+np.average(AR_m) # E: incompatible type
+np.select(1, [AR_f8]) # E: incompatible type
+np.angle(AR_m) # E: incompatible type
+np.unwrap(AR_m) # E: incompatible type
+np.unwrap(AR_c16) # E: incompatible type
+np.trim_zeros(1) # E: incompatible type
+np.place(1, [True], 1.5) # E: incompatible type
+np.vectorize(1) # E: incompatible type
+np.add_newdoc("__main__", 1.5, "docstring") # E: incompatible type
+np.place(AR_f8, slice(None), 5) # E: incompatible type
+
+np.interp(AR_f8, AR_c16, AR_f8) # E: incompatible type
+np.interp(AR_c16, AR_f8, AR_f8) # E: incompatible type
+np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # E: No overload variant
+np.interp(AR_f8, AR_f8, AR_O) # E: incompatible type
+
+np.cov(AR_m) # E: incompatible type
+np.cov(AR_O) # E: incompatible type
+np.corrcoef(AR_m) # E: incompatible type
+np.corrcoef(AR_O) # E: incompatible type
+np.corrcoef(AR_f8, bias=True) # E: No overload variant
+np.corrcoef(AR_f8, ddof=2) # E: No overload variant
+np.blackman(1j) # E: incompatible type
+np.bartlett(1j) # E: incompatible type
+np.hanning(1j) # E: incompatible type
+np.hamming(1j) # E: incompatible type
+np.hamming(AR_c16) # E: incompatible type
+np.kaiser(1j, 1) # E: incompatible type
+np.sinc(AR_O) # E: incompatible type
+np.median(AR_M) # E: incompatible type
+
+np.add_newdoc_ufunc(func, "docstring") # E: incompatible type
+np.percentile(AR_f8, 50j) # E: No overload variant
+np.percentile(AR_f8, 50, interpolation="bob") # E: No overload variant
+np.quantile(AR_f8, 0.5j) # E: No overload variant
+np.quantile(AR_f8, 0.5, interpolation="bob") # E: No overload variant
+np.meshgrid(AR_f8, AR_f8, indexing="bob") # E: incompatible type
+np.delete(AR_f8, AR_f8) # E: incompatible type
+np.insert(AR_f8, AR_f8, 1.5) # E: incompatible type
+np.digitize(AR_f8, 1j) # E: No overload variant
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_polynomial.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_polynomial.pyi
new file mode 100644
index 00000000..ca02d7bd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_polynomial.pyi
@@ -0,0 +1,29 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+AR_O: npt.NDArray[np.object_]
+AR_U: npt.NDArray[np.str_]
+
+poly_obj: np.poly1d
+
+np.polyint(AR_U) # E: incompatible type
+np.polyint(AR_f8, m=1j) # E: No overload variant
+
+np.polyder(AR_U) # E: incompatible type
+np.polyder(AR_f8, m=1j) # E: No overload variant
+
+np.polyfit(AR_O, AR_f8, 1) # E: incompatible type
+np.polyfit(AR_f8, AR_f8, 1, rcond=1j) # E: No overload variant
+np.polyfit(AR_f8, AR_f8, 1, w=AR_c16) # E: incompatible type
+np.polyfit(AR_f8, AR_f8, 1, cov="bob") # E: No overload variant
+
+np.polyval(AR_f8, AR_U) # E: incompatible type
+np.polyadd(AR_f8, AR_U) # E: incompatible type
+np.polysub(AR_f8, AR_U) # E: incompatible type
+np.polymul(AR_f8, AR_U) # E: incompatible type
+np.polydiv(AR_f8, AR_U) # E: incompatible type
+
+5**poly_obj # E: No overload variant
+hash(poly_obj)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_utils.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_utils.pyi
new file mode 100644
index 00000000..e16c926a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_utils.pyi
@@ -0,0 +1,13 @@
+import numpy as np
+
+np.deprecate(1) # E: No overload variant
+
+np.deprecate_with_doc(1) # E: incompatible type
+
+np.byte_bounds(1) # E: incompatible type
+
+np.who(1) # E: incompatible type
+
+np.lookfor(None) # E: incompatible type
+
+np.safe_eval(None) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_version.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_version.pyi
new file mode 100644
index 00000000..2758cfe4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/lib_version.pyi
@@ -0,0 +1,6 @@
+from numpy.lib import NumpyVersion
+
+version: NumpyVersion
+
+NumpyVersion(b"1.8.0") # E: incompatible type
+version >= b"1.8.0" # E: Unsupported operand types
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/linalg.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/linalg.pyi
new file mode 100644
index 00000000..da939032
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/linalg.pyi
@@ -0,0 +1,48 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_O: npt.NDArray[np.object_]
+AR_M: npt.NDArray[np.datetime64]
+
+np.linalg.tensorsolve(AR_O, AR_O) # E: incompatible type
+
+np.linalg.solve(AR_O, AR_O) # E: incompatible type
+
+np.linalg.tensorinv(AR_O) # E: incompatible type
+
+np.linalg.inv(AR_O) # E: incompatible type
+
+np.linalg.matrix_power(AR_M, 5) # E: incompatible type
+
+np.linalg.cholesky(AR_O) # E: incompatible type
+
+np.linalg.qr(AR_O) # E: incompatible type
+np.linalg.qr(AR_f8, mode="bob") # E: No overload variant
+
+np.linalg.eigvals(AR_O) # E: incompatible type
+
+np.linalg.eigvalsh(AR_O) # E: incompatible type
+np.linalg.eigvalsh(AR_O, UPLO="bob") # E: No overload variant
+
+np.linalg.eig(AR_O) # E: incompatible type
+
+np.linalg.eigh(AR_O) # E: incompatible type
+np.linalg.eigh(AR_O, UPLO="bob") # E: No overload variant
+
+np.linalg.svd(AR_O) # E: incompatible type
+
+np.linalg.cond(AR_O) # E: incompatible type
+np.linalg.cond(AR_f8, p="bob") # E: incompatible type
+
+np.linalg.matrix_rank(AR_O) # E: incompatible type
+
+np.linalg.pinv(AR_O) # E: incompatible type
+
+np.linalg.slogdet(AR_O) # E: incompatible type
+
+np.linalg.det(AR_O) # E: incompatible type
+
+np.linalg.norm(AR_f8, ord="bob") # E: No overload variant
+
+np.linalg.multi_dot([AR_M]) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/memmap.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/memmap.pyi
new file mode 100644
index 00000000..434870b6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/memmap.pyi
@@ -0,0 +1,5 @@
+import numpy as np
+
+with open("file.txt", "r") as f:
+ np.memmap(f) # E: No overload variant
+np.memmap("test.txt", shape=[10, 5]) # E: No overload variant
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/modules.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/modules.pyi
new file mode 100644
index 00000000..59e724f2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/modules.pyi
@@ -0,0 +1,18 @@
+import numpy as np
+
+np.testing.bob # E: Module has no attribute
+np.bob # E: Module has no attribute
+
+# Stdlib modules in the namespace by accident
+np.warnings # E: Module has no attribute
+np.sys # E: Module has no attribute
+np.os # E: Module has no attribute
+np.math # E: Module has no attribute
+
+# Public sub-modules that are not imported to their parent module by default;
+# e.g. one must first execute `import numpy.lib.recfunctions`
+np.lib.recfunctions # E: Module has no attribute
+
+np.__NUMPY_SETUP__ # E: Module has no attribute
+np.__deprecated_attrs__ # E: Module has no attribute
+np.__expired_functions__ # E: Module has no attribute
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/multiarray.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/multiarray.pyi
new file mode 100644
index 00000000..425ec3d0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/multiarray.pyi
@@ -0,0 +1,55 @@
+import numpy as np
+import numpy.typing as npt
+
+i8: np.int64
+
+AR_b: npt.NDArray[np.bool_]
+AR_u1: npt.NDArray[np.uint8]
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+AR_M: npt.NDArray[np.datetime64]
+
+M: np.datetime64
+
+AR_LIKE_f: list[float]
+
+def func(a: int) -> None: ...
+
+np.where(AR_b, 1) # E: No overload variant
+
+np.can_cast(AR_f8, 1) # E: incompatible type
+
+np.vdot(AR_M, AR_M) # E: incompatible type
+
+np.copyto(AR_LIKE_f, AR_f8) # E: incompatible type
+
+np.putmask(AR_LIKE_f, [True, True, False], 1.5) # E: incompatible type
+
+np.packbits(AR_f8) # E: incompatible type
+np.packbits(AR_u1, bitorder=">") # E: incompatible type
+
+np.unpackbits(AR_i8) # E: incompatible type
+np.unpackbits(AR_u1, bitorder=">") # E: incompatible type
+
+np.shares_memory(1, 1, max_work=i8) # E: incompatible type
+np.may_share_memory(1, 1, max_work=i8) # E: incompatible type
+
+np.arange(M) # E: No overload variant
+np.arange(stop=10) # E: No overload variant
+
+np.datetime_data(int) # E: incompatible type
+
+np.busday_offset("2012", 10) # E: No overload variant
+
+np.datetime_as_string("2012") # E: No overload variant
+
+np.compare_chararrays("a", b"a", "==", False) # E: No overload variant
+
+np.add_docstring(func, None) # E: incompatible type
+
+np.nested_iters([AR_i8, AR_i8]) # E: Missing positional argument
+np.nested_iters([AR_i8, AR_i8], 0) # E: incompatible type
+np.nested_iters([AR_i8, AR_i8], [0]) # E: incompatible type
+np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["test"]) # E: incompatible type
+np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["test"]]) # E: incompatible type
+np.nested_iters([AR_i8, AR_i8], [[0], [1]], buffersize=1.0) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ndarray.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ndarray.pyi
new file mode 100644
index 00000000..5a5130d4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ndarray.pyi
@@ -0,0 +1,11 @@
+import numpy as np
+
+# Ban setting dtype since mutating the type of the array in place
+# makes having ndarray be generic over dtype impossible. Generally
+# users should use `ndarray.view` in this situation anyway. See
+#
+# https://github.com/numpy/numpy-stubs/issues/7
+#
+# for more context.
+float_array = np.array([1.0])
+float_array.dtype = np.bool_ # E: Property "dtype" defined in "ndarray" is read-only
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ndarray_misc.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ndarray_misc.pyi
new file mode 100644
index 00000000..77bd9a44
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ndarray_misc.pyi
@@ -0,0 +1,43 @@
+"""
+Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods.
+
+More extensive tests are performed for the methods'
+function-based counterpart in `../from_numeric.py`.
+
+"""
+
+from typing import Any
+import numpy as np
+
+f8: np.float64
+AR_f8: np.ndarray[Any, np.dtype[np.float64]]
+AR_M: np.ndarray[Any, np.dtype[np.datetime64]]
+AR_b: np.ndarray[Any, np.dtype[np.bool_]]
+
+ctypes_obj = AR_f8.ctypes
+
+reveal_type(ctypes_obj.get_data()) # E: has no attribute
+reveal_type(ctypes_obj.get_shape()) # E: has no attribute
+reveal_type(ctypes_obj.get_strides()) # E: has no attribute
+reveal_type(ctypes_obj.get_as_parameter()) # E: has no attribute
+
+f8.argpartition(0) # E: has no attribute
+f8.diagonal() # E: has no attribute
+f8.dot(1) # E: has no attribute
+f8.nonzero() # E: has no attribute
+f8.partition(0) # E: has no attribute
+f8.put(0, 2) # E: has no attribute
+f8.setfield(2, np.float64) # E: has no attribute
+f8.sort() # E: has no attribute
+f8.trace() # E: has no attribute
+
+AR_M.__int__() # E: Invalid self argument
+AR_M.__float__() # E: Invalid self argument
+AR_M.__complex__() # E: Invalid self argument
+AR_b.__index__() # E: Invalid self argument
+
+AR_f8[1.5] # E: No overload variant
+AR_f8["field_a"] # E: No overload variant
+AR_f8[["field_a", "field_b"]] # E: Invalid index type
+
+AR_f8.__array_finalize__(object()) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/nditer.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/nditer.pyi
new file mode 100644
index 00000000..1e8e37ee
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/nditer.pyi
@@ -0,0 +1,8 @@
+import numpy as np
+
+class Test(np.nditer): ... # E: Cannot inherit from final class
+
+np.nditer([0, 1], flags=["test"]) # E: incompatible type
+np.nditer([0, 1], op_flags=[["test"]]) # E: incompatible type
+np.nditer([0, 1], itershape=(1.0,)) # E: incompatible type
+np.nditer([0, 1], buffersize=1.0) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/nested_sequence.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/nested_sequence.pyi
new file mode 100644
index 00000000..6301e517
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/nested_sequence.pyi
@@ -0,0 +1,17 @@
+from collections.abc import Sequence
+from numpy._typing import _NestedSequence
+
+a: Sequence[float]
+b: list[complex]
+c: tuple[str, ...]
+d: int
+e: str
+
+def func(a: _NestedSequence[int]) -> None:
+ ...
+
+reveal_type(func(a)) # E: incompatible type
+reveal_type(func(b)) # E: incompatible type
+reveal_type(func(c)) # E: incompatible type
+reveal_type(func(d)) # E: incompatible type
+reveal_type(func(e)) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/npyio.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/npyio.pyi
new file mode 100644
index 00000000..c91b4c9c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/npyio.pyi
@@ -0,0 +1,30 @@
+import pathlib
+from typing import IO
+
+import numpy.typing as npt
+import numpy as np
+
+str_path: str
+bytes_path: bytes
+pathlib_path: pathlib.Path
+str_file: IO[str]
+AR_i8: npt.NDArray[np.int64]
+
+np.load(str_file) # E: incompatible type
+
+np.save(bytes_path, AR_i8) # E: incompatible type
+np.save(str_file, AR_i8) # E: incompatible type
+
+np.savez(bytes_path, AR_i8) # E: incompatible type
+np.savez(str_file, AR_i8) # E: incompatible type
+
+np.savez_compressed(bytes_path, AR_i8) # E: incompatible type
+np.savez_compressed(str_file, AR_i8) # E: incompatible type
+
+np.loadtxt(bytes_path) # E: incompatible type
+
+np.fromregex(bytes_path, ".", np.int64) # E: No overload variant
+
+np.recfromtxt(bytes_path) # E: incompatible type
+
+np.recfromcsv(bytes_path) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/numerictypes.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/numerictypes.pyi
new file mode 100644
index 00000000..a5c2814e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/numerictypes.pyi
@@ -0,0 +1,13 @@
+import numpy as np
+
+# Technically this works, but probably shouldn't. See
+#
+# https://github.com/numpy/numpy/issues/16366
+#
+np.maximum_sctype(1) # E: No overload variant
+
+np.issubsctype(1, np.int64) # E: incompatible type
+
+np.issubdtype(1, np.int64) # E: incompatible type
+
+np.find_common_type(np.int64, np.int64) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/random.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/random.pyi
new file mode 100644
index 00000000..f0e68201
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/random.pyi
@@ -0,0 +1,61 @@
+import numpy as np
+from typing import Any
+
+SEED_FLOAT: float = 457.3
+SEED_ARR_FLOAT: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0, 2, 3, 4])
+SEED_ARRLIKE_FLOAT: list[float] = [1.0, 2.0, 3.0, 4.0]
+SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0)
+SEED_STR: str = "String seeding not allowed"
+# default rng
+np.random.default_rng(SEED_FLOAT) # E: incompatible type
+np.random.default_rng(SEED_ARR_FLOAT) # E: incompatible type
+np.random.default_rng(SEED_ARRLIKE_FLOAT) # E: incompatible type
+np.random.default_rng(SEED_STR) # E: incompatible type
+
+# Seed Sequence
+np.random.SeedSequence(SEED_FLOAT) # E: incompatible type
+np.random.SeedSequence(SEED_ARR_FLOAT) # E: incompatible type
+np.random.SeedSequence(SEED_ARRLIKE_FLOAT) # E: incompatible type
+np.random.SeedSequence(SEED_SEED_SEQ) # E: incompatible type
+np.random.SeedSequence(SEED_STR) # E: incompatible type
+
+seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence()
+seed_seq.spawn(11.5) # E: incompatible type
+seed_seq.generate_state(3.14) # E: incompatible type
+seed_seq.generate_state(3, np.uint8) # E: incompatible type
+seed_seq.generate_state(3, "uint8") # E: incompatible type
+seed_seq.generate_state(3, "u1") # E: incompatible type
+seed_seq.generate_state(3, np.uint16) # E: incompatible type
+seed_seq.generate_state(3, "uint16") # E: incompatible type
+seed_seq.generate_state(3, "u2") # E: incompatible type
+seed_seq.generate_state(3, np.int32) # E: incompatible type
+seed_seq.generate_state(3, "int32") # E: incompatible type
+seed_seq.generate_state(3, "i4") # E: incompatible type
+
+# Bit Generators
+np.random.MT19937(SEED_FLOAT) # E: incompatible type
+np.random.MT19937(SEED_ARR_FLOAT) # E: incompatible type
+np.random.MT19937(SEED_ARRLIKE_FLOAT) # E: incompatible type
+np.random.MT19937(SEED_STR) # E: incompatible type
+
+np.random.PCG64(SEED_FLOAT) # E: incompatible type
+np.random.PCG64(SEED_ARR_FLOAT) # E: incompatible type
+np.random.PCG64(SEED_ARRLIKE_FLOAT) # E: incompatible type
+np.random.PCG64(SEED_STR) # E: incompatible type
+
+np.random.Philox(SEED_FLOAT) # E: incompatible type
+np.random.Philox(SEED_ARR_FLOAT) # E: incompatible type
+np.random.Philox(SEED_ARRLIKE_FLOAT) # E: incompatible type
+np.random.Philox(SEED_STR) # E: incompatible type
+
+np.random.SFC64(SEED_FLOAT) # E: incompatible type
+np.random.SFC64(SEED_ARR_FLOAT) # E: incompatible type
+np.random.SFC64(SEED_ARRLIKE_FLOAT) # E: incompatible type
+np.random.SFC64(SEED_STR) # E: incompatible type
+
+# Generator
+np.random.Generator(None) # E: incompatible type
+np.random.Generator(12333283902830213) # E: incompatible type
+np.random.Generator("OxFEEDF00D") # E: incompatible type
+np.random.Generator([123, 234]) # E: incompatible type
+np.random.Generator(np.array([123, 234], dtype="u4")) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/rec.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/rec.pyi
new file mode 100644
index 00000000..a57f1ba2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/rec.pyi
@@ -0,0 +1,17 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_i8: npt.NDArray[np.int64]
+
+np.rec.fromarrays(1) # E: No overload variant
+np.rec.fromarrays([1, 2, 3], dtype=[("f8", "f8")], formats=["f8", "f8"]) # E: No overload variant
+
+np.rec.fromrecords(AR_i8) # E: incompatible type
+np.rec.fromrecords([(1.5,)], dtype=[("f8", "f8")], formats=["f8", "f8"]) # E: No overload variant
+
+np.rec.fromstring("string", dtype=[("f8", "f8")]) # E: No overload variant
+np.rec.fromstring(b"bytes") # E: No overload variant
+np.rec.fromstring(b"(1.5,)", dtype=[("f8", "f8")], formats=["f8", "f8"]) # E: No overload variant
+
+with open("test", "r") as f:
+ np.rec.fromfile(f, dtype=[("f8", "f8")]) # E: No overload variant
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/scalars.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/scalars.pyi
new file mode 100644
index 00000000..2a6c2c7a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/scalars.pyi
@@ -0,0 +1,92 @@
+import sys
+import numpy as np
+
+f2: np.float16
+f8: np.float64
+c8: np.complex64
+
+# Construction
+
+np.float32(3j) # E: incompatible type
+
+# Technically the following examples are valid NumPy code. But they
+# are not considered a best practice, and people who wish to use the
+# stubs should instead do
+#
+# np.array([1.0, 0.0, 0.0], dtype=np.float32)
+# np.array([], dtype=np.complex64)
+#
+# See e.g. the discussion on the mailing list
+#
+# https://mail.python.org/pipermail/numpy-discussion/2020-April/080566.html
+#
+# and the issue
+#
+# https://github.com/numpy/numpy-stubs/issues/41
+#
+# for more context.
+np.float32([1.0, 0.0, 0.0]) # E: incompatible type
+np.complex64([]) # E: incompatible type
+
+np.complex64(1, 2) # E: Too many arguments
+# TODO: protocols (can't check for non-existent protocols w/ __getattr__)
+
+np.datetime64(0) # E: No overload variant
+
+class A:
+ def __float__(self):
+ return 1.0
+
+
+np.int8(A()) # E: incompatible type
+np.int16(A()) # E: incompatible type
+np.int32(A()) # E: incompatible type
+np.int64(A()) # E: incompatible type
+np.uint8(A()) # E: incompatible type
+np.uint16(A()) # E: incompatible type
+np.uint32(A()) # E: incompatible type
+np.uint64(A()) # E: incompatible type
+
+np.void("test") # E: No overload variant
+np.void("test", dtype=None) # E: No overload variant
+
+np.generic(1) # E: Cannot instantiate abstract class
+np.number(1) # E: Cannot instantiate abstract class
+np.integer(1) # E: Cannot instantiate abstract class
+np.inexact(1) # E: Cannot instantiate abstract class
+np.character("test") # E: Cannot instantiate abstract class
+np.flexible(b"test") # E: Cannot instantiate abstract class
+
+np.float64(value=0.0) # E: Unexpected keyword argument
+np.int64(value=0) # E: Unexpected keyword argument
+np.uint64(value=0) # E: Unexpected keyword argument
+np.complex128(value=0.0j) # E: Unexpected keyword argument
+np.str_(value='bob') # E: No overload variant
+np.bytes_(value=b'test') # E: No overload variant
+np.void(value=b'test') # E: No overload variant
+np.bool_(value=True) # E: Unexpected keyword argument
+np.datetime64(value="2019") # E: No overload variant
+np.timedelta64(value=0) # E: Unexpected keyword argument
+
+np.bytes_(b"hello", encoding='utf-8') # E: No overload variant
+np.str_("hello", encoding='utf-8') # E: No overload variant
+
+f8.item(1) # E: incompatible type
+f8.item((0, 1)) # E: incompatible type
+f8.squeeze(axis=1) # E: incompatible type
+f8.squeeze(axis=(0, 1)) # E: incompatible type
+f8.transpose(1) # E: incompatible type
+
+def func(a: np.float32) -> None: ...
+
+func(f2) # E: incompatible type
+func(f8) # E: incompatible type
+
+round(c8) # E: No overload variant
+
+c8.__getnewargs__() # E: Invalid self argument
+f2.__getnewargs__() # E: Invalid self argument
+f2.hex() # E: Invalid self argument
+np.float16.fromhex("0x0.0p+0") # E: Invalid self argument
+f2.__trunc__() # E: Invalid self argument
+f2.__getformat__("float") # E: Invalid self argument
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/shape_base.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/shape_base.pyi
new file mode 100644
index 00000000..e709741b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/shape_base.pyi
@@ -0,0 +1,8 @@
+import numpy as np
+
+class DTypeLike:
+ dtype: np.dtype[np.int_]
+
+dtype_like: DTypeLike
+
+np.expand_dims(dtype_like, (5, 10)) # E: No overload variant
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/stride_tricks.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/stride_tricks.pyi
new file mode 100644
index 00000000..f2bfba74
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/stride_tricks.pyi
@@ -0,0 +1,9 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+
+np.lib.stride_tricks.as_strided(AR_f8, shape=8) # E: No overload variant
+np.lib.stride_tricks.as_strided(AR_f8, strides=8) # E: No overload variant
+
+np.lib.stride_tricks.sliding_window_view(AR_f8, axis=(1,)) # E: No overload variant
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/testing.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/testing.pyi
new file mode 100644
index 00000000..803870e2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/testing.pyi
@@ -0,0 +1,28 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_U: npt.NDArray[np.str_]
+
+def func() -> bool: ...
+
+np.testing.assert_(True, msg=1) # E: incompatible type
+np.testing.build_err_msg(1, "test") # E: incompatible type
+np.testing.assert_almost_equal(AR_U, AR_U) # E: incompatible type
+np.testing.assert_approx_equal([1, 2, 3], [1, 2, 3]) # E: incompatible type
+np.testing.assert_array_almost_equal(AR_U, AR_U) # E: incompatible type
+np.testing.assert_array_less(AR_U, AR_U) # E: incompatible type
+np.testing.assert_string_equal(b"a", b"a") # E: incompatible type
+
+np.testing.assert_raises(expected_exception=TypeError, callable=func) # E: No overload variant
+np.testing.assert_raises_regex(expected_exception=TypeError, expected_regex="T", callable=func) # E: No overload variant
+
+np.testing.assert_allclose(AR_U, AR_U) # E: incompatible type
+np.testing.assert_array_almost_equal_nulp(AR_U, AR_U) # E: incompatible type
+np.testing.assert_array_max_ulp(AR_U, AR_U) # E: incompatible type
+
+np.testing.assert_warns(warning_class=RuntimeWarning, func=func) # E: No overload variant
+np.testing.assert_no_warnings(func=func) # E: No overload variant
+np.testing.assert_no_warnings(func, None) # E: Too many arguments
+np.testing.assert_no_warnings(func, test=None) # E: Unexpected keyword argument
+
+np.testing.assert_no_gc_cycles(func=func) # E: No overload variant
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/twodim_base.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/twodim_base.pyi
new file mode 100644
index 00000000..faa43009
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/twodim_base.pyi
@@ -0,0 +1,37 @@
+from typing import Any, TypeVar
+
+import numpy as np
+import numpy.typing as npt
+
+
+def func1(ar: npt.NDArray[Any], a: int) -> npt.NDArray[np.str_]:
+ pass
+
+
+def func2(ar: npt.NDArray[Any], a: float) -> float:
+ pass
+
+
+AR_b: npt.NDArray[np.bool_]
+AR_m: npt.NDArray[np.timedelta64]
+
+AR_LIKE_b: list[bool]
+
+np.eye(10, M=20.0) # E: No overload variant
+np.eye(10, k=2.5, dtype=int) # E: No overload variant
+
+np.diag(AR_b, k=0.5) # E: No overload variant
+np.diagflat(AR_b, k=0.5) # E: No overload variant
+
+np.tri(10, M=20.0) # E: No overload variant
+np.tri(10, k=2.5, dtype=int) # E: No overload variant
+
+np.tril(AR_b, k=0.5) # E: No overload variant
+np.triu(AR_b, k=0.5) # E: No overload variant
+
+np.vander(AR_m) # E: incompatible type
+
+np.histogram2d(AR_m) # E: No overload variant
+
+np.mask_indices(10, func1) # E: incompatible type
+np.mask_indices(10, func2, 10.5) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/type_check.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/type_check.pyi
new file mode 100644
index 00000000..95f52bfb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/type_check.pyi
@@ -0,0 +1,13 @@
+import numpy as np
+import numpy.typing as npt
+
+DTYPE_i8: np.dtype[np.int64]
+
+np.mintypecode(DTYPE_i8) # E: incompatible type
+np.iscomplexobj(DTYPE_i8) # E: incompatible type
+np.isrealobj(DTYPE_i8) # E: incompatible type
+
+np.typename(DTYPE_i8) # E: No overload variant
+np.typename("invalid") # E: No overload variant
+
+np.common_type(np.timedelta64()) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ufunc_config.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ufunc_config.pyi
new file mode 100644
index 00000000..f547fbb4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ufunc_config.pyi
@@ -0,0 +1,21 @@
+"""Typing tests for `numpy.core._ufunc_config`."""
+
+import numpy as np
+
+def func1(a: str, b: int, c: float) -> None: ...
+def func2(a: str, *, b: int) -> None: ...
+
+class Write1:
+ def write1(self, a: str) -> None: ...
+
+class Write2:
+ def write(self, a: str, b: str) -> None: ...
+
+class Write3:
+ def write(self, *, a: str) -> None: ...
+
+np.seterrcall(func1) # E: Argument 1 to "seterrcall" has incompatible type
+np.seterrcall(func2) # E: Argument 1 to "seterrcall" has incompatible type
+np.seterrcall(Write1()) # E: Argument 1 to "seterrcall" has incompatible type
+np.seterrcall(Write2()) # E: Argument 1 to "seterrcall" has incompatible type
+np.seterrcall(Write3()) # E: Argument 1 to "seterrcall" has incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ufunclike.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ufunclike.pyi
new file mode 100644
index 00000000..2f9fd14c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ufunclike.pyi
@@ -0,0 +1,21 @@
+from typing import Any
+import numpy as np
+
+AR_c: np.ndarray[Any, np.dtype[np.complex128]]
+AR_m: np.ndarray[Any, np.dtype[np.timedelta64]]
+AR_M: np.ndarray[Any, np.dtype[np.datetime64]]
+AR_O: np.ndarray[Any, np.dtype[np.object_]]
+
+np.fix(AR_c) # E: incompatible type
+np.fix(AR_m) # E: incompatible type
+np.fix(AR_M) # E: incompatible type
+
+np.isposinf(AR_c) # E: incompatible type
+np.isposinf(AR_m) # E: incompatible type
+np.isposinf(AR_M) # E: incompatible type
+np.isposinf(AR_O) # E: incompatible type
+
+np.isneginf(AR_c) # E: incompatible type
+np.isneginf(AR_m) # E: incompatible type
+np.isneginf(AR_M) # E: incompatible type
+np.isneginf(AR_O) # E: incompatible type
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ufuncs.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ufuncs.pyi
new file mode 100644
index 00000000..e827267c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/ufuncs.pyi
@@ -0,0 +1,41 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+
+np.sin.nin + "foo" # E: Unsupported operand types
+np.sin(1, foo="bar") # E: No overload variant
+
+np.abs(None) # E: No overload variant
+
+np.add(1, 1, 1) # E: No overload variant
+np.add(1, 1, axis=0) # E: No overload variant
+
+np.matmul(AR_f8, AR_f8, where=True) # E: No overload variant
+
+np.frexp(AR_f8, out=None) # E: No overload variant
+np.frexp(AR_f8, out=AR_f8) # E: No overload variant
+
+np.absolute.outer() # E: "None" not callable
+np.frexp.outer() # E: "None" not callable
+np.divmod.outer() # E: "None" not callable
+np.matmul.outer() # E: "None" not callable
+
+np.absolute.reduceat() # E: "None" not callable
+np.frexp.reduceat() # E: "None" not callable
+np.divmod.reduceat() # E: "None" not callable
+np.matmul.reduceat() # E: "None" not callable
+
+np.absolute.reduce() # E: "None" not callable
+np.frexp.reduce() # E: "None" not callable
+np.divmod.reduce() # E: "None" not callable
+np.matmul.reduce() # E: "None" not callable
+
+np.absolute.accumulate() # E: "None" not callable
+np.frexp.accumulate() # E: "None" not callable
+np.divmod.accumulate() # E: "None" not callable
+np.matmul.accumulate() # E: "None" not callable
+
+np.frexp.at() # E: "None" not callable
+np.divmod.at() # E: "None" not callable
+np.matmul.at() # E: "None" not callable
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/warnings_and_errors.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/warnings_and_errors.pyi
new file mode 100644
index 00000000..f4fa3829
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/fail/warnings_and_errors.pyi
@@ -0,0 +1,5 @@
+import numpy as np
+
+np.AxisError(1.0) # E: No overload variant
+np.AxisError(1, ndim=2.0) # E: No overload variant
+np.AxisError(2, msg_prefix=404) # E: No overload variant
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/misc/extended_precision.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/misc/extended_precision.pyi
new file mode 100644
index 00000000..1e495e4f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/misc/extended_precision.pyi
@@ -0,0 +1,17 @@
+import numpy as np
+
+reveal_type(np.uint128())
+reveal_type(np.uint256())
+
+reveal_type(np.int128())
+reveal_type(np.int256())
+
+reveal_type(np.float80())
+reveal_type(np.float96())
+reveal_type(np.float128())
+reveal_type(np.float256())
+
+reveal_type(np.complex160())
+reveal_type(np.complex192())
+reveal_type(np.complex256())
+reveal_type(np.complex512())
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/mypy.ini b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/mypy.ini
new file mode 100644
index 00000000..baad759b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/mypy.ini
@@ -0,0 +1,10 @@
+[mypy]
+plugins = numpy.typing.mypy_plugin
+show_absolute_path = True
+implicit_reexport = False
+
+[mypy-numpy]
+ignore_errors = True
+
+[mypy-numpy.*]
+ignore_errors = True
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arithmetic.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arithmetic.py
new file mode 100644
index 00000000..07a99012
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arithmetic.py
@@ -0,0 +1,594 @@
+from __future__ import annotations
+
+from typing import Any
+import numpy as np
+import pytest
+
+c16 = np.complex128(1)
+f8 = np.float64(1)
+i8 = np.int64(1)
+u8 = np.uint64(1)
+
+c8 = np.complex64(1)
+f4 = np.float32(1)
+i4 = np.int32(1)
+u4 = np.uint32(1)
+
+dt = np.datetime64(1, "D")
+td = np.timedelta64(1, "D")
+
+b_ = np.bool_(1)
+
+b = bool(1)
+c = complex(1)
+f = float(1)
+i = int(1)
+
+
+class Object:
+ def __array__(self) -> np.ndarray[Any, np.dtype[np.object_]]:
+ ret = np.empty((), dtype=object)
+ ret[()] = self
+ return ret
+
+ def __sub__(self, value: Any) -> Object:
+ return self
+
+ def __rsub__(self, value: Any) -> Object:
+ return self
+
+ def __floordiv__(self, value: Any) -> Object:
+ return self
+
+ def __rfloordiv__(self, value: Any) -> Object:
+ return self
+
+ def __mul__(self, value: Any) -> Object:
+ return self
+
+ def __rmul__(self, value: Any) -> Object:
+ return self
+
+ def __pow__(self, value: Any) -> Object:
+ return self
+
+ def __rpow__(self, value: Any) -> Object:
+ return self
+
+
+AR_b: np.ndarray[Any, np.dtype[np.bool_]] = np.array([True])
+AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32)
+AR_i: np.ndarray[Any, np.dtype[np.int64]] = np.array([1])
+AR_f: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0])
+AR_c: np.ndarray[Any, np.dtype[np.complex128]] = np.array([1j])
+AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64(1, "D")])
+AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64(1, "D")])
+AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([Object()])
+
+AR_LIKE_b = [True]
+AR_LIKE_u = [np.uint32(1)]
+AR_LIKE_i = [1]
+AR_LIKE_f = [1.0]
+AR_LIKE_c = [1j]
+AR_LIKE_m = [np.timedelta64(1, "D")]
+AR_LIKE_M = [np.datetime64(1, "D")]
+AR_LIKE_O = [Object()]
+
+# Array subtractions
+
+AR_b - AR_LIKE_u
+AR_b - AR_LIKE_i
+AR_b - AR_LIKE_f
+AR_b - AR_LIKE_c
+AR_b - AR_LIKE_m
+AR_b - AR_LIKE_O
+
+AR_LIKE_u - AR_b
+AR_LIKE_i - AR_b
+AR_LIKE_f - AR_b
+AR_LIKE_c - AR_b
+AR_LIKE_m - AR_b
+AR_LIKE_M - AR_b
+AR_LIKE_O - AR_b
+
+AR_u - AR_LIKE_b
+AR_u - AR_LIKE_u
+AR_u - AR_LIKE_i
+AR_u - AR_LIKE_f
+AR_u - AR_LIKE_c
+AR_u - AR_LIKE_m
+AR_u - AR_LIKE_O
+
+AR_LIKE_b - AR_u
+AR_LIKE_u - AR_u
+AR_LIKE_i - AR_u
+AR_LIKE_f - AR_u
+AR_LIKE_c - AR_u
+AR_LIKE_m - AR_u
+AR_LIKE_M - AR_u
+AR_LIKE_O - AR_u
+
+AR_i - AR_LIKE_b
+AR_i - AR_LIKE_u
+AR_i - AR_LIKE_i
+AR_i - AR_LIKE_f
+AR_i - AR_LIKE_c
+AR_i - AR_LIKE_m
+AR_i - AR_LIKE_O
+
+AR_LIKE_b - AR_i
+AR_LIKE_u - AR_i
+AR_LIKE_i - AR_i
+AR_LIKE_f - AR_i
+AR_LIKE_c - AR_i
+AR_LIKE_m - AR_i
+AR_LIKE_M - AR_i
+AR_LIKE_O - AR_i
+
+AR_f - AR_LIKE_b
+AR_f - AR_LIKE_u
+AR_f - AR_LIKE_i
+AR_f - AR_LIKE_f
+AR_f - AR_LIKE_c
+AR_f - AR_LIKE_O
+
+AR_LIKE_b - AR_f
+AR_LIKE_u - AR_f
+AR_LIKE_i - AR_f
+AR_LIKE_f - AR_f
+AR_LIKE_c - AR_f
+AR_LIKE_O - AR_f
+
+AR_c - AR_LIKE_b
+AR_c - AR_LIKE_u
+AR_c - AR_LIKE_i
+AR_c - AR_LIKE_f
+AR_c - AR_LIKE_c
+AR_c - AR_LIKE_O
+
+AR_LIKE_b - AR_c
+AR_LIKE_u - AR_c
+AR_LIKE_i - AR_c
+AR_LIKE_f - AR_c
+AR_LIKE_c - AR_c
+AR_LIKE_O - AR_c
+
+AR_m - AR_LIKE_b
+AR_m - AR_LIKE_u
+AR_m - AR_LIKE_i
+AR_m - AR_LIKE_m
+
+AR_LIKE_b - AR_m
+AR_LIKE_u - AR_m
+AR_LIKE_i - AR_m
+AR_LIKE_m - AR_m
+AR_LIKE_M - AR_m
+
+AR_M - AR_LIKE_b
+AR_M - AR_LIKE_u
+AR_M - AR_LIKE_i
+AR_M - AR_LIKE_m
+AR_M - AR_LIKE_M
+
+AR_LIKE_M - AR_M
+
+AR_O - AR_LIKE_b
+AR_O - AR_LIKE_u
+AR_O - AR_LIKE_i
+AR_O - AR_LIKE_f
+AR_O - AR_LIKE_c
+AR_O - AR_LIKE_O
+
+AR_LIKE_b - AR_O
+AR_LIKE_u - AR_O
+AR_LIKE_i - AR_O
+AR_LIKE_f - AR_O
+AR_LIKE_c - AR_O
+AR_LIKE_O - AR_O
+
+AR_u += AR_b
+AR_u += AR_u
+AR_u += 1 # Allowed during runtime as long as the object is 0D and >=0
+
+# Array floor division
+
+AR_b // AR_LIKE_b
+AR_b // AR_LIKE_u
+AR_b // AR_LIKE_i
+AR_b // AR_LIKE_f
+AR_b // AR_LIKE_O
+
+AR_LIKE_b // AR_b
+AR_LIKE_u // AR_b
+AR_LIKE_i // AR_b
+AR_LIKE_f // AR_b
+AR_LIKE_O // AR_b
+
+AR_u // AR_LIKE_b
+AR_u // AR_LIKE_u
+AR_u // AR_LIKE_i
+AR_u // AR_LIKE_f
+AR_u // AR_LIKE_O
+
+AR_LIKE_b // AR_u
+AR_LIKE_u // AR_u
+AR_LIKE_i // AR_u
+AR_LIKE_f // AR_u
+AR_LIKE_m // AR_u
+AR_LIKE_O // AR_u
+
+AR_i // AR_LIKE_b
+AR_i // AR_LIKE_u
+AR_i // AR_LIKE_i
+AR_i // AR_LIKE_f
+AR_i // AR_LIKE_O
+
+AR_LIKE_b // AR_i
+AR_LIKE_u // AR_i
+AR_LIKE_i // AR_i
+AR_LIKE_f // AR_i
+AR_LIKE_m // AR_i
+AR_LIKE_O // AR_i
+
+AR_f // AR_LIKE_b
+AR_f // AR_LIKE_u
+AR_f // AR_LIKE_i
+AR_f // AR_LIKE_f
+AR_f // AR_LIKE_O
+
+AR_LIKE_b // AR_f
+AR_LIKE_u // AR_f
+AR_LIKE_i // AR_f
+AR_LIKE_f // AR_f
+AR_LIKE_m // AR_f
+AR_LIKE_O // AR_f
+
+AR_m // AR_LIKE_u
+AR_m // AR_LIKE_i
+AR_m // AR_LIKE_f
+AR_m // AR_LIKE_m
+
+AR_LIKE_m // AR_m
+
+AR_O // AR_LIKE_b
+AR_O // AR_LIKE_u
+AR_O // AR_LIKE_i
+AR_O // AR_LIKE_f
+AR_O // AR_LIKE_O
+
+AR_LIKE_b // AR_O
+AR_LIKE_u // AR_O
+AR_LIKE_i // AR_O
+AR_LIKE_f // AR_O
+AR_LIKE_O // AR_O
+
+# Inplace multiplication
+
+AR_b *= AR_LIKE_b
+
+AR_u *= AR_LIKE_b
+AR_u *= AR_LIKE_u
+
+AR_i *= AR_LIKE_b
+AR_i *= AR_LIKE_u
+AR_i *= AR_LIKE_i
+
+AR_f *= AR_LIKE_b
+AR_f *= AR_LIKE_u
+AR_f *= AR_LIKE_i
+AR_f *= AR_LIKE_f
+
+AR_c *= AR_LIKE_b
+AR_c *= AR_LIKE_u
+AR_c *= AR_LIKE_i
+AR_c *= AR_LIKE_f
+AR_c *= AR_LIKE_c
+
+AR_m *= AR_LIKE_b
+AR_m *= AR_LIKE_u
+AR_m *= AR_LIKE_i
+AR_m *= AR_LIKE_f
+
+AR_O *= AR_LIKE_b
+AR_O *= AR_LIKE_u
+AR_O *= AR_LIKE_i
+AR_O *= AR_LIKE_f
+AR_O *= AR_LIKE_c
+AR_O *= AR_LIKE_O
+
+# Inplace power
+
+AR_u **= AR_LIKE_b
+AR_u **= AR_LIKE_u
+
+AR_i **= AR_LIKE_b
+AR_i **= AR_LIKE_u
+AR_i **= AR_LIKE_i
+
+AR_f **= AR_LIKE_b
+AR_f **= AR_LIKE_u
+AR_f **= AR_LIKE_i
+AR_f **= AR_LIKE_f
+
+AR_c **= AR_LIKE_b
+AR_c **= AR_LIKE_u
+AR_c **= AR_LIKE_i
+AR_c **= AR_LIKE_f
+AR_c **= AR_LIKE_c
+
+AR_O **= AR_LIKE_b
+AR_O **= AR_LIKE_u
+AR_O **= AR_LIKE_i
+AR_O **= AR_LIKE_f
+AR_O **= AR_LIKE_c
+AR_O **= AR_LIKE_O
+
+# unary ops
+
+-c16
+-c8
+-f8
+-f4
+-i8
+-i4
+with pytest.warns(RuntimeWarning):
+ -u8
+ -u4
+-td
+-AR_f
+
++c16
++c8
++f8
++f4
++i8
++i4
++u8
++u4
++td
++AR_f
+
+abs(c16)
+abs(c8)
+abs(f8)
+abs(f4)
+abs(i8)
+abs(i4)
+abs(u8)
+abs(u4)
+abs(td)
+abs(b_)
+abs(AR_f)
+
+# Time structures
+
+dt + td
+dt + i
+dt + i4
+dt + i8
+dt - dt
+dt - i
+dt - i4
+dt - i8
+
+td + td
+td + i
+td + i4
+td + i8
+td - td
+td - i
+td - i4
+td - i8
+td / f
+td / f4
+td / f8
+td / td
+td // td
+td % td
+
+
+# boolean
+
+b_ / b
+b_ / b_
+b_ / i
+b_ / i8
+b_ / i4
+b_ / u8
+b_ / u4
+b_ / f
+b_ / f8
+b_ / f4
+b_ / c
+b_ / c16
+b_ / c8
+
+b / b_
+b_ / b_
+i / b_
+i8 / b_
+i4 / b_
+u8 / b_
+u4 / b_
+f / b_
+f8 / b_
+f4 / b_
+c / b_
+c16 / b_
+c8 / b_
+
+# Complex
+
+c16 + c16
+c16 + f8
+c16 + i8
+c16 + c8
+c16 + f4
+c16 + i4
+c16 + b_
+c16 + b
+c16 + c
+c16 + f
+c16 + i
+c16 + AR_f
+
+c16 + c16
+f8 + c16
+i8 + c16
+c8 + c16
+f4 + c16
+i4 + c16
+b_ + c16
+b + c16
+c + c16
+f + c16
+i + c16
+AR_f + c16
+
+c8 + c16
+c8 + f8
+c8 + i8
+c8 + c8
+c8 + f4
+c8 + i4
+c8 + b_
+c8 + b
+c8 + c
+c8 + f
+c8 + i
+c8 + AR_f
+
+c16 + c8
+f8 + c8
+i8 + c8
+c8 + c8
+f4 + c8
+i4 + c8
+b_ + c8
+b + c8
+c + c8
+f + c8
+i + c8
+AR_f + c8
+
+# Float
+
+f8 + f8
+f8 + i8
+f8 + f4
+f8 + i4
+f8 + b_
+f8 + b
+f8 + c
+f8 + f
+f8 + i
+f8 + AR_f
+
+f8 + f8
+i8 + f8
+f4 + f8
+i4 + f8
+b_ + f8
+b + f8
+c + f8
+f + f8
+i + f8
+AR_f + f8
+
+f4 + f8
+f4 + i8
+f4 + f4
+f4 + i4
+f4 + b_
+f4 + b
+f4 + c
+f4 + f
+f4 + i
+f4 + AR_f
+
+f8 + f4
+i8 + f4
+f4 + f4
+i4 + f4
+b_ + f4
+b + f4
+c + f4
+f + f4
+i + f4
+AR_f + f4
+
+# Int
+
+i8 + i8
+i8 + u8
+i8 + i4
+i8 + u4
+i8 + b_
+i8 + b
+i8 + c
+i8 + f
+i8 + i
+i8 + AR_f
+
+u8 + u8
+u8 + i4
+u8 + u4
+u8 + b_
+u8 + b
+u8 + c
+u8 + f
+u8 + i
+u8 + AR_f
+
+i8 + i8
+u8 + i8
+i4 + i8
+u4 + i8
+b_ + i8
+b + i8
+c + i8
+f + i8
+i + i8
+AR_f + i8
+
+u8 + u8
+i4 + u8
+u4 + u8
+b_ + u8
+b + u8
+c + u8
+f + u8
+i + u8
+AR_f + u8
+
+i4 + i8
+i4 + i4
+i4 + i
+i4 + b_
+i4 + b
+i4 + AR_f
+
+u4 + i8
+u4 + i4
+u4 + u8
+u4 + u4
+u4 + i
+u4 + b_
+u4 + b
+u4 + AR_f
+
+i8 + i4
+i4 + i4
+i + i4
+b_ + i4
+b + i4
+AR_f + i4
+
+i8 + u4
+i4 + u4
+u8 + u4
+u4 + u4
+b_ + u4
+b + u4
+i + u4
+AR_f + u4
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/array_constructors.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/array_constructors.py
new file mode 100644
index 00000000..e035a73c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/array_constructors.py
@@ -0,0 +1,137 @@
+import sys
+from typing import Any
+import numpy as np
+
+
+class Index:
+ def __index__(self) -> int:
+ return 0
+
+
+class SubClass(np.ndarray):
+ pass
+
+
+def func(i: int, j: int, **kwargs: Any) -> SubClass:
+ return B
+
+
+i8 = np.int64(1)
+
+A = np.array([1])
+B = A.view(SubClass).copy()
+B_stack = np.array([[1], [1]]).view(SubClass)
+C = [1]
+
+np.ndarray(Index())
+np.ndarray([Index()])
+
+np.array(1, dtype=float)
+np.array(1, copy=False)
+np.array(1, order='F')
+np.array(1, order=None)
+np.array(1, subok=True)
+np.array(1, ndmin=3)
+np.array(1, str, copy=True, order='C', subok=False, ndmin=2)
+
+np.asarray(A)
+np.asarray(B)
+np.asarray(C)
+
+np.asanyarray(A)
+np.asanyarray(B)
+np.asanyarray(B, dtype=int)
+np.asanyarray(C)
+
+np.ascontiguousarray(A)
+np.ascontiguousarray(B)
+np.ascontiguousarray(C)
+
+np.asfortranarray(A)
+np.asfortranarray(B)
+np.asfortranarray(C)
+
+np.require(A)
+np.require(B)
+np.require(B, dtype=int)
+np.require(B, requirements=None)
+np.require(B, requirements="E")
+np.require(B, requirements=["ENSUREARRAY"])
+np.require(B, requirements={"F", "E"})
+np.require(B, requirements=["C", "OWNDATA"])
+np.require(B, requirements="W")
+np.require(B, requirements="A")
+np.require(C)
+
+np.linspace(0, 2)
+np.linspace(0.5, [0, 1, 2])
+np.linspace([0, 1, 2], 3)
+np.linspace(0j, 2)
+np.linspace(0, 2, num=10)
+np.linspace(0, 2, endpoint=True)
+np.linspace(0, 2, retstep=True)
+np.linspace(0j, 2j, retstep=True)
+np.linspace(0, 2, dtype=bool)
+np.linspace([0, 1], [2, 3], axis=Index())
+
+np.logspace(0, 2, base=2)
+np.logspace(0, 2, base=2)
+np.logspace(0, 2, base=[1j, 2j], num=2)
+
+np.geomspace(1, 2)
+
+np.zeros_like(A)
+np.zeros_like(C)
+np.zeros_like(B)
+np.zeros_like(B, dtype=np.int64)
+
+np.ones_like(A)
+np.ones_like(C)
+np.ones_like(B)
+np.ones_like(B, dtype=np.int64)
+
+np.empty_like(A)
+np.empty_like(C)
+np.empty_like(B)
+np.empty_like(B, dtype=np.int64)
+
+np.full_like(A, i8)
+np.full_like(C, i8)
+np.full_like(B, i8)
+np.full_like(B, i8, dtype=np.int64)
+
+np.ones(1)
+np.ones([1, 1, 1])
+
+np.full(1, i8)
+np.full([1, 1, 1], i8)
+
+np.indices([1, 2, 3])
+np.indices([1, 2, 3], sparse=True)
+
+np.fromfunction(func, (3, 5))
+
+np.identity(10)
+
+np.atleast_1d(C)
+np.atleast_1d(A)
+np.atleast_1d(C, C)
+np.atleast_1d(C, A)
+np.atleast_1d(A, A)
+
+np.atleast_2d(C)
+
+np.atleast_3d(C)
+
+np.vstack([C, C])
+np.vstack([C, A])
+np.vstack([A, A])
+
+np.hstack([C, C])
+
+np.stack([C, C])
+np.stack([C, C], axis=0)
+np.stack([C, C], out=B_stack)
+
+np.block([[C, C], [C, C]])
+np.block(A)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/array_like.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/array_like.py
new file mode 100644
index 00000000..da2520e9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/array_like.py
@@ -0,0 +1,41 @@
+from __future__ import annotations
+
+from typing import Any
+
+import numpy as np
+from numpy._typing import ArrayLike, _SupportsArray
+
+x1: ArrayLike = True
+x2: ArrayLike = 5
+x3: ArrayLike = 1.0
+x4: ArrayLike = 1 + 1j
+x5: ArrayLike = np.int8(1)
+x6: ArrayLike = np.float64(1)
+x7: ArrayLike = np.complex128(1)
+x8: ArrayLike = np.array([1, 2, 3])
+x9: ArrayLike = [1, 2, 3]
+x10: ArrayLike = (1, 2, 3)
+x11: ArrayLike = "foo"
+x12: ArrayLike = memoryview(b'foo')
+
+
+class A:
+ def __array__(self, dtype: None | np.dtype[Any] = None) -> np.ndarray:
+ return np.array([1, 2, 3])
+
+
+x13: ArrayLike = A()
+
+scalar: _SupportsArray = np.int64(1)
+scalar.__array__()
+array: _SupportsArray = np.array(1)
+array.__array__()
+
+a: _SupportsArray = A()
+a.__array__()
+a.__array__()
+
+# Escape hatch for when you mean to make something like an object
+# array.
+object_array_scalar: Any = (i for i in range(10))
+np.array(object_array_scalar)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arrayprint.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arrayprint.py
new file mode 100644
index 00000000..6c704c75
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arrayprint.py
@@ -0,0 +1,37 @@
+import numpy as np
+
+AR = np.arange(10)
+AR.setflags(write=False)
+
+with np.printoptions():
+ np.set_printoptions(
+ precision=1,
+ threshold=2,
+ edgeitems=3,
+ linewidth=4,
+ suppress=False,
+ nanstr="Bob",
+ infstr="Bill",
+ formatter={},
+ sign="+",
+ floatmode="unique",
+ )
+ np.get_printoptions()
+ str(AR)
+
+ np.array2string(
+ AR,
+ max_line_width=5,
+ precision=2,
+ suppress_small=True,
+ separator=";",
+ prefix="test",
+ threshold=5,
+ floatmode="fixed",
+ suffix="?",
+ legacy="1.13",
+ )
+ np.format_float_scientific(1, precision=5)
+ np.format_float_positional(1, trim="k")
+ np.array_repr(AR)
+ np.array_str(AR)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arrayterator.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arrayterator.py
new file mode 100644
index 00000000..572be5e2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/arrayterator.py
@@ -0,0 +1,27 @@
+
+from __future__ import annotations
+
+from typing import Any
+import numpy as np
+
+AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10)
+ar_iter = np.lib.Arrayterator(AR_i8)
+
+ar_iter.var
+ar_iter.buf_size
+ar_iter.start
+ar_iter.stop
+ar_iter.step
+ar_iter.shape
+ar_iter.flat
+
+ar_iter.__array__()
+
+for i in ar_iter:
+ pass
+
+ar_iter[0]
+ar_iter[...]
+ar_iter[:]
+ar_iter[0, 0, 0]
+ar_iter[..., 0, :]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/bitwise_ops.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/bitwise_ops.py
new file mode 100644
index 00000000..67449e2c
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/bitwise_ops.py
@@ -0,0 +1,131 @@
+import numpy as np
+
+i8 = np.int64(1)
+u8 = np.uint64(1)
+
+i4 = np.int32(1)
+u4 = np.uint32(1)
+
+b_ = np.bool_(1)
+
+b = bool(1)
+i = int(1)
+
+AR = np.array([0, 1, 2], dtype=np.int32)
+AR.setflags(write=False)
+
+
+i8 << i8
+i8 >> i8
+i8 | i8
+i8 ^ i8
+i8 & i8
+
+i8 << AR
+i8 >> AR
+i8 | AR
+i8 ^ AR
+i8 & AR
+
+i4 << i4
+i4 >> i4
+i4 | i4
+i4 ^ i4
+i4 & i4
+
+i8 << i4
+i8 >> i4
+i8 | i4
+i8 ^ i4
+i8 & i4
+
+i8 << i
+i8 >> i
+i8 | i
+i8 ^ i
+i8 & i
+
+i8 << b_
+i8 >> b_
+i8 | b_
+i8 ^ b_
+i8 & b_
+
+i8 << b
+i8 >> b
+i8 | b
+i8 ^ b
+i8 & b
+
+u8 << u8
+u8 >> u8
+u8 | u8
+u8 ^ u8
+u8 & u8
+
+u8 << AR
+u8 >> AR
+u8 | AR
+u8 ^ AR
+u8 & AR
+
+u4 << u4
+u4 >> u4
+u4 | u4
+u4 ^ u4
+u4 & u4
+
+u4 << i4
+u4 >> i4
+u4 | i4
+u4 ^ i4
+u4 & i4
+
+u4 << i
+u4 >> i
+u4 | i
+u4 ^ i
+u4 & i
+
+u8 << b_
+u8 >> b_
+u8 | b_
+u8 ^ b_
+u8 & b_
+
+u8 << b
+u8 >> b
+u8 | b
+u8 ^ b
+u8 & b
+
+b_ << b_
+b_ >> b_
+b_ | b_
+b_ ^ b_
+b_ & b_
+
+b_ << AR
+b_ >> AR
+b_ | AR
+b_ ^ AR
+b_ & AR
+
+b_ << b
+b_ >> b
+b_ | b
+b_ ^ b
+b_ & b
+
+b_ << i
+b_ >> i
+b_ | i
+b_ ^ i
+b_ & i
+
+~i8
+~i4
+~u8
+~u4
+~b_
+~AR
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/comparisons.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/comparisons.py
new file mode 100644
index 00000000..ce41de43
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/comparisons.py
@@ -0,0 +1,301 @@
+from __future__ import annotations
+
+from typing import Any
+import numpy as np
+
+c16 = np.complex128()
+f8 = np.float64()
+i8 = np.int64()
+u8 = np.uint64()
+
+c8 = np.complex64()
+f4 = np.float32()
+i4 = np.int32()
+u4 = np.uint32()
+
+dt = np.datetime64(0, "D")
+td = np.timedelta64(0, "D")
+
+b_ = np.bool_()
+
+b = bool()
+c = complex()
+f = float()
+i = int()
+
+SEQ = (0, 1, 2, 3, 4)
+
+AR_b: np.ndarray[Any, np.dtype[np.bool_]] = np.array([True])
+AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32)
+AR_i: np.ndarray[Any, np.dtype[np.int_]] = np.array([1])
+AR_f: np.ndarray[Any, np.dtype[np.float_]] = np.array([1.0])
+AR_c: np.ndarray[Any, np.dtype[np.complex_]] = np.array([1.0j])
+AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1")])
+AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1")])
+AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([1], dtype=object)
+
+# Arrays
+
+AR_b > AR_b
+AR_b > AR_u
+AR_b > AR_i
+AR_b > AR_f
+AR_b > AR_c
+
+AR_u > AR_b
+AR_u > AR_u
+AR_u > AR_i
+AR_u > AR_f
+AR_u > AR_c
+
+AR_i > AR_b
+AR_i > AR_u
+AR_i > AR_i
+AR_i > AR_f
+AR_i > AR_c
+
+AR_f > AR_b
+AR_f > AR_u
+AR_f > AR_i
+AR_f > AR_f
+AR_f > AR_c
+
+AR_c > AR_b
+AR_c > AR_u
+AR_c > AR_i
+AR_c > AR_f
+AR_c > AR_c
+
+AR_m > AR_b
+AR_m > AR_u
+AR_m > AR_i
+AR_b > AR_m
+AR_u > AR_m
+AR_i > AR_m
+
+AR_M > AR_M
+
+AR_O > AR_O
+1 > AR_O
+AR_O > 1
+
+# Time structures
+
+dt > dt
+
+td > td
+td > i
+td > i4
+td > i8
+td > AR_i
+td > SEQ
+
+# boolean
+
+b_ > b
+b_ > b_
+b_ > i
+b_ > i8
+b_ > i4
+b_ > u8
+b_ > u4
+b_ > f
+b_ > f8
+b_ > f4
+b_ > c
+b_ > c16
+b_ > c8
+b_ > AR_i
+b_ > SEQ
+
+# Complex
+
+c16 > c16
+c16 > f8
+c16 > i8
+c16 > c8
+c16 > f4
+c16 > i4
+c16 > b_
+c16 > b
+c16 > c
+c16 > f
+c16 > i
+c16 > AR_i
+c16 > SEQ
+
+c16 > c16
+f8 > c16
+i8 > c16
+c8 > c16
+f4 > c16
+i4 > c16
+b_ > c16
+b > c16
+c > c16
+f > c16
+i > c16
+AR_i > c16
+SEQ > c16
+
+c8 > c16
+c8 > f8
+c8 > i8
+c8 > c8
+c8 > f4
+c8 > i4
+c8 > b_
+c8 > b
+c8 > c
+c8 > f
+c8 > i
+c8 > AR_i
+c8 > SEQ
+
+c16 > c8
+f8 > c8
+i8 > c8
+c8 > c8
+f4 > c8
+i4 > c8
+b_ > c8
+b > c8
+c > c8
+f > c8
+i > c8
+AR_i > c8
+SEQ > c8
+
+# Float
+
+f8 > f8
+f8 > i8
+f8 > f4
+f8 > i4
+f8 > b_
+f8 > b
+f8 > c
+f8 > f
+f8 > i
+f8 > AR_i
+f8 > SEQ
+
+f8 > f8
+i8 > f8
+f4 > f8
+i4 > f8
+b_ > f8
+b > f8
+c > f8
+f > f8
+i > f8
+AR_i > f8
+SEQ > f8
+
+f4 > f8
+f4 > i8
+f4 > f4
+f4 > i4
+f4 > b_
+f4 > b
+f4 > c
+f4 > f
+f4 > i
+f4 > AR_i
+f4 > SEQ
+
+f8 > f4
+i8 > f4
+f4 > f4
+i4 > f4
+b_ > f4
+b > f4
+c > f4
+f > f4
+i > f4
+AR_i > f4
+SEQ > f4
+
+# Int
+
+i8 > i8
+i8 > u8
+i8 > i4
+i8 > u4
+i8 > b_
+i8 > b
+i8 > c
+i8 > f
+i8 > i
+i8 > AR_i
+i8 > SEQ
+
+u8 > u8
+u8 > i4
+u8 > u4
+u8 > b_
+u8 > b
+u8 > c
+u8 > f
+u8 > i
+u8 > AR_i
+u8 > SEQ
+
+i8 > i8
+u8 > i8
+i4 > i8
+u4 > i8
+b_ > i8
+b > i8
+c > i8
+f > i8
+i > i8
+AR_i > i8
+SEQ > i8
+
+u8 > u8
+i4 > u8
+u4 > u8
+b_ > u8
+b > u8
+c > u8
+f > u8
+i > u8
+AR_i > u8
+SEQ > u8
+
+i4 > i8
+i4 > i4
+i4 > i
+i4 > b_
+i4 > b
+i4 > AR_i
+i4 > SEQ
+
+u4 > i8
+u4 > i4
+u4 > u8
+u4 > u4
+u4 > i
+u4 > b_
+u4 > b
+u4 > AR_i
+u4 > SEQ
+
+i8 > i4
+i4 > i4
+i > i4
+b_ > i4
+b > i4
+AR_i > i4
+SEQ > i4
+
+i8 > u4
+i4 > u4
+u8 > u4
+u4 > u4
+b_ > u4
+b > u4
+i > u4
+AR_i > u4
+SEQ > u4
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/dtype.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/dtype.py
new file mode 100644
index 00000000..e849cfdd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/dtype.py
@@ -0,0 +1,57 @@
+import numpy as np
+
+dtype_obj = np.dtype(np.str_)
+void_dtype_obj = np.dtype([("f0", np.float64), ("f1", np.float32)])
+
+np.dtype(dtype=np.int64)
+np.dtype(int)
+np.dtype("int")
+np.dtype(None)
+
+np.dtype((int, 2))
+np.dtype((int, (1,)))
+
+np.dtype({"names": ["a", "b"], "formats": [int, float]})
+np.dtype({"names": ["a"], "formats": [int], "titles": [object]})
+np.dtype({"names": ["a"], "formats": [int], "titles": [object()]})
+
+np.dtype([("name", np.unicode_, 16), ("grades", np.float64, (2,)), ("age", "int32")])
+
+np.dtype(
+ {
+ "names": ["a", "b"],
+ "formats": [int, float],
+ "itemsize": 9,
+ "aligned": False,
+ "titles": ["x", "y"],
+ "offsets": [0, 1],
+ }
+)
+
+np.dtype((np.float_, float))
+
+
+class Test:
+ dtype = np.dtype(float)
+
+
+np.dtype(Test())
+
+# Methods and attributes
+dtype_obj.base
+dtype_obj.subdtype
+dtype_obj.newbyteorder()
+dtype_obj.type
+dtype_obj.name
+dtype_obj.names
+
+dtype_obj * 0
+dtype_obj * 2
+
+0 * dtype_obj
+2 * dtype_obj
+
+void_dtype_obj["f0"]
+void_dtype_obj[0]
+void_dtype_obj[["f0", "f1"]]
+void_dtype_obj[["f0"]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/einsumfunc.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/einsumfunc.py
new file mode 100644
index 00000000..429764e6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/einsumfunc.py
@@ -0,0 +1,36 @@
+from __future__ import annotations
+
+from typing import Any
+
+import numpy as np
+
+AR_LIKE_b = [True, True, True]
+AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)]
+AR_LIKE_i = [1, 2, 3]
+AR_LIKE_f = [1.0, 2.0, 3.0]
+AR_LIKE_c = [1j, 2j, 3j]
+AR_LIKE_U = ["1", "2", "3"]
+
+OUT_f: np.ndarray[Any, np.dtype[np.float64]] = np.empty(3, dtype=np.float64)
+OUT_c: np.ndarray[Any, np.dtype[np.complex128]] = np.empty(3, dtype=np.complex128)
+
+np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b)
+np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u)
+np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i)
+np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f)
+np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c)
+np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i)
+np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)
+
+np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16")
+np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe")
+np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, out=OUT_c)
+np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=int, casting="unsafe", out=OUT_f)
+
+np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b)
+np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u)
+np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i)
+np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f)
+np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c)
+np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i)
+np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/flatiter.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/flatiter.py
new file mode 100644
index 00000000..63c839af
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/flatiter.py
@@ -0,0 +1,16 @@
+import numpy as np
+
+a = np.empty((2, 2)).flat
+
+a.base
+a.copy()
+a.coords
+a.index
+iter(a)
+next(a)
+a[0]
+a[[0, 1, 2]]
+a[...]
+a[:]
+a.__array__()
+a.__array__(np.dtype(np.float64))
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/fromnumeric.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/fromnumeric.py
new file mode 100644
index 00000000..9e936e68
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/fromnumeric.py
@@ -0,0 +1,260 @@
+"""Tests for :mod:`numpy.core.fromnumeric`."""
+
+import numpy as np
+
+A = np.array(True, ndmin=2, dtype=bool)
+B = np.array(1.0, ndmin=2, dtype=np.float32)
+A.setflags(write=False)
+B.setflags(write=False)
+
+a = np.bool_(True)
+b = np.float32(1.0)
+c = 1.0
+d = np.array(1.0, dtype=np.float32) # writeable
+
+np.take(a, 0)
+np.take(b, 0)
+np.take(c, 0)
+np.take(A, 0)
+np.take(B, 0)
+np.take(A, [0])
+np.take(B, [0])
+
+np.reshape(a, 1)
+np.reshape(b, 1)
+np.reshape(c, 1)
+np.reshape(A, 1)
+np.reshape(B, 1)
+
+np.choose(a, [True, True])
+np.choose(A, [1.0, 1.0])
+
+np.repeat(a, 1)
+np.repeat(b, 1)
+np.repeat(c, 1)
+np.repeat(A, 1)
+np.repeat(B, 1)
+
+np.swapaxes(A, 0, 0)
+np.swapaxes(B, 0, 0)
+
+np.transpose(a)
+np.transpose(b)
+np.transpose(c)
+np.transpose(A)
+np.transpose(B)
+
+np.partition(a, 0, axis=None)
+np.partition(b, 0, axis=None)
+np.partition(c, 0, axis=None)
+np.partition(A, 0)
+np.partition(B, 0)
+
+np.argpartition(a, 0)
+np.argpartition(b, 0)
+np.argpartition(c, 0)
+np.argpartition(A, 0)
+np.argpartition(B, 0)
+
+np.sort(A, 0)
+np.sort(B, 0)
+
+np.argsort(A, 0)
+np.argsort(B, 0)
+
+np.argmax(A)
+np.argmax(B)
+np.argmax(A, axis=0)
+np.argmax(B, axis=0)
+
+np.argmin(A)
+np.argmin(B)
+np.argmin(A, axis=0)
+np.argmin(B, axis=0)
+
+np.searchsorted(A[0], 0)
+np.searchsorted(B[0], 0)
+np.searchsorted(A[0], [0])
+np.searchsorted(B[0], [0])
+
+np.resize(a, (5, 5))
+np.resize(b, (5, 5))
+np.resize(c, (5, 5))
+np.resize(A, (5, 5))
+np.resize(B, (5, 5))
+
+np.squeeze(a)
+np.squeeze(b)
+np.squeeze(c)
+np.squeeze(A)
+np.squeeze(B)
+
+np.diagonal(A)
+np.diagonal(B)
+
+np.trace(A)
+np.trace(B)
+
+np.ravel(a)
+np.ravel(b)
+np.ravel(c)
+np.ravel(A)
+np.ravel(B)
+
+np.nonzero(A)
+np.nonzero(B)
+
+np.shape(a)
+np.shape(b)
+np.shape(c)
+np.shape(A)
+np.shape(B)
+
+np.compress([True], a)
+np.compress([True], b)
+np.compress([True], c)
+np.compress([True], A)
+np.compress([True], B)
+
+np.clip(a, 0, 1.0)
+np.clip(b, -1, 1)
+np.clip(a, 0, None)
+np.clip(b, None, 1)
+np.clip(c, 0, 1)
+np.clip(A, 0, 1)
+np.clip(B, 0, 1)
+np.clip(B, [0, 1], [1, 2])
+
+np.sum(a)
+np.sum(b)
+np.sum(c)
+np.sum(A)
+np.sum(B)
+np.sum(A, axis=0)
+np.sum(B, axis=0)
+
+np.all(a)
+np.all(b)
+np.all(c)
+np.all(A)
+np.all(B)
+np.all(A, axis=0)
+np.all(B, axis=0)
+np.all(A, keepdims=True)
+np.all(B, keepdims=True)
+
+np.any(a)
+np.any(b)
+np.any(c)
+np.any(A)
+np.any(B)
+np.any(A, axis=0)
+np.any(B, axis=0)
+np.any(A, keepdims=True)
+np.any(B, keepdims=True)
+
+np.cumsum(a)
+np.cumsum(b)
+np.cumsum(c)
+np.cumsum(A)
+np.cumsum(B)
+
+np.ptp(b)
+np.ptp(c)
+np.ptp(B)
+np.ptp(B, axis=0)
+np.ptp(B, keepdims=True)
+
+np.amax(a)
+np.amax(b)
+np.amax(c)
+np.amax(A)
+np.amax(B)
+np.amax(A, axis=0)
+np.amax(B, axis=0)
+np.amax(A, keepdims=True)
+np.amax(B, keepdims=True)
+
+np.amin(a)
+np.amin(b)
+np.amin(c)
+np.amin(A)
+np.amin(B)
+np.amin(A, axis=0)
+np.amin(B, axis=0)
+np.amin(A, keepdims=True)
+np.amin(B, keepdims=True)
+
+np.prod(a)
+np.prod(b)
+np.prod(c)
+np.prod(A)
+np.prod(B)
+np.prod(a, dtype=None)
+np.prod(A, dtype=None)
+np.prod(A, axis=0)
+np.prod(B, axis=0)
+np.prod(A, keepdims=True)
+np.prod(B, keepdims=True)
+np.prod(b, out=d)
+np.prod(B, out=d)
+
+np.cumprod(a)
+np.cumprod(b)
+np.cumprod(c)
+np.cumprod(A)
+np.cumprod(B)
+
+np.ndim(a)
+np.ndim(b)
+np.ndim(c)
+np.ndim(A)
+np.ndim(B)
+
+np.size(a)
+np.size(b)
+np.size(c)
+np.size(A)
+np.size(B)
+
+np.around(a)
+np.around(b)
+np.around(c)
+np.around(A)
+np.around(B)
+
+np.mean(a)
+np.mean(b)
+np.mean(c)
+np.mean(A)
+np.mean(B)
+np.mean(A, axis=0)
+np.mean(B, axis=0)
+np.mean(A, keepdims=True)
+np.mean(B, keepdims=True)
+np.mean(b, out=d)
+np.mean(B, out=d)
+
+np.std(a)
+np.std(b)
+np.std(c)
+np.std(A)
+np.std(B)
+np.std(A, axis=0)
+np.std(B, axis=0)
+np.std(A, keepdims=True)
+np.std(B, keepdims=True)
+np.std(b, out=d)
+np.std(B, out=d)
+
+np.var(a)
+np.var(b)
+np.var(c)
+np.var(A)
+np.var(B)
+np.var(A, axis=0)
+np.var(B, axis=0)
+np.var(A, keepdims=True)
+np.var(B, keepdims=True)
+np.var(b, out=d)
+np.var(B, out=d)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/index_tricks.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/index_tricks.py
new file mode 100644
index 00000000..4c4c1195
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/index_tricks.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+from typing import Any
+import numpy as np
+
+AR_LIKE_b = [[True, True], [True, True]]
+AR_LIKE_i = [[1, 2], [3, 4]]
+AR_LIKE_f = [[1.0, 2.0], [3.0, 4.0]]
+AR_LIKE_U = [["1", "2"], ["3", "4"]]
+
+AR_i8: np.ndarray[Any, np.dtype[np.int64]] = np.array(AR_LIKE_i, dtype=np.int64)
+
+np.ndenumerate(AR_i8)
+np.ndenumerate(AR_LIKE_f)
+np.ndenumerate(AR_LIKE_U)
+
+np.ndenumerate(AR_i8).iter
+np.ndenumerate(AR_LIKE_f).iter
+np.ndenumerate(AR_LIKE_U).iter
+
+next(np.ndenumerate(AR_i8))
+next(np.ndenumerate(AR_LIKE_f))
+next(np.ndenumerate(AR_LIKE_U))
+
+iter(np.ndenumerate(AR_i8))
+iter(np.ndenumerate(AR_LIKE_f))
+iter(np.ndenumerate(AR_LIKE_U))
+
+iter(np.ndindex(1, 2, 3))
+next(np.ndindex(1, 2, 3))
+
+np.unravel_index([22, 41, 37], (7, 6))
+np.unravel_index([31, 41, 13], (7, 6), order='F')
+np.unravel_index(1621, (6, 7, 8, 9))
+
+np.ravel_multi_index(AR_LIKE_i, (7, 6))
+np.ravel_multi_index(AR_LIKE_i, (7, 6), order='F')
+np.ravel_multi_index(AR_LIKE_i, (4, 6), mode='clip')
+np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=('clip', 'wrap'))
+np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9))
+
+np.mgrid[1:1:2]
+np.mgrid[1:1:2, None:10]
+
+np.ogrid[1:1:2]
+np.ogrid[1:1:2, None:10]
+
+np.index_exp[0:1]
+np.index_exp[0:1, None:3]
+np.index_exp[0, 0:1, ..., [0, 1, 3]]
+
+np.s_[0:1]
+np.s_[0:1, None:3]
+np.s_[0, 0:1, ..., [0, 1, 3]]
+
+np.ix_(AR_LIKE_b[0])
+np.ix_(AR_LIKE_i[0], AR_LIKE_f[0])
+np.ix_(AR_i8[0])
+
+np.fill_diagonal(AR_i8, 5)
+
+np.diag_indices(4)
+np.diag_indices(2, 3)
+
+np.diag_indices_from(AR_i8)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/lib_utils.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/lib_utils.py
new file mode 100644
index 00000000..65640c28
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/lib_utils.py
@@ -0,0 +1,25 @@
+from __future__ import annotations
+
+from io import StringIO
+
+import numpy as np
+
+FILE = StringIO()
+AR = np.arange(10, dtype=np.float64)
+
+def func(a: int) -> bool: ...
+
+np.deprecate(func)
+np.deprecate()
+
+np.deprecate_with_doc("test")
+np.deprecate_with_doc(None)
+
+np.byte_bounds(AR)
+np.byte_bounds(np.float64())
+
+np.info(1, output=FILE)
+
+np.source(np.interp, output=FILE)
+
+np.lookfor("binary representation", output=FILE)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/lib_version.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/lib_version.py
new file mode 100644
index 00000000..f3825eca
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/lib_version.py
@@ -0,0 +1,18 @@
+from numpy.lib import NumpyVersion
+
+version = NumpyVersion("1.8.0")
+
+version.vstring
+version.version
+version.major
+version.minor
+version.bugfix
+version.pre_release
+version.is_devversion
+
+version == version
+version != version
+version < "1.8.0"
+version <= version
+version > version
+version >= "1.8.0"
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/literal.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/literal.py
new file mode 100644
index 00000000..d06431ee
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/literal.py
@@ -0,0 +1,47 @@
+from __future__ import annotations
+
+from functools import partial
+from collections.abc import Callable
+
+import pytest # type: ignore
+import numpy as np
+
+AR = np.array(0)
+AR.setflags(write=False)
+
+KACF = frozenset({None, "K", "A", "C", "F"})
+ACF = frozenset({None, "A", "C", "F"})
+CF = frozenset({None, "C", "F"})
+
+order_list: list[tuple[frozenset, Callable]] = [
+ (KACF, partial(np.ndarray, 1)),
+ (KACF, AR.tobytes),
+ (KACF, partial(AR.astype, int)),
+ (KACF, AR.copy),
+ (ACF, partial(AR.reshape, 1)),
+ (KACF, AR.flatten),
+ (KACF, AR.ravel),
+ (KACF, partial(np.array, 1)),
+ (CF, partial(np.zeros, 1)),
+ (CF, partial(np.ones, 1)),
+ (CF, partial(np.empty, 1)),
+ (CF, partial(np.full, 1, 1)),
+ (KACF, partial(np.zeros_like, AR)),
+ (KACF, partial(np.ones_like, AR)),
+ (KACF, partial(np.empty_like, AR)),
+ (KACF, partial(np.full_like, AR, 1)),
+ (KACF, partial(np.add, 1, 1)), # i.e. np.ufunc.__call__
+ (ACF, partial(np.reshape, AR, 1)),
+ (KACF, partial(np.ravel, AR)),
+ (KACF, partial(np.asarray, 1)),
+ (KACF, partial(np.asanyarray, 1)),
+]
+
+for order_set, func in order_list:
+ for order in order_set:
+ func(order=order)
+
+ invalid_orders = KACF - order_set
+ for order in invalid_orders:
+ with pytest.raises(ValueError):
+ func(order=order)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/mod.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/mod.py
new file mode 100644
index 00000000..b5b9afb2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/mod.py
@@ -0,0 +1,149 @@
+import numpy as np
+
+f8 = np.float64(1)
+i8 = np.int64(1)
+u8 = np.uint64(1)
+
+f4 = np.float32(1)
+i4 = np.int32(1)
+u4 = np.uint32(1)
+
+td = np.timedelta64(1, "D")
+b_ = np.bool_(1)
+
+b = bool(1)
+f = float(1)
+i = int(1)
+
+AR = np.array([1], dtype=np.bool_)
+AR.setflags(write=False)
+
+AR2 = np.array([1], dtype=np.timedelta64)
+AR2.setflags(write=False)
+
+# Time structures
+
+td % td
+td % AR2
+AR2 % td
+
+divmod(td, td)
+divmod(td, AR2)
+divmod(AR2, td)
+
+# Bool
+
+b_ % b
+b_ % i
+b_ % f
+b_ % b_
+b_ % i8
+b_ % u8
+b_ % f8
+b_ % AR
+
+divmod(b_, b)
+divmod(b_, i)
+divmod(b_, f)
+divmod(b_, b_)
+divmod(b_, i8)
+divmod(b_, u8)
+divmod(b_, f8)
+divmod(b_, AR)
+
+b % b_
+i % b_
+f % b_
+b_ % b_
+i8 % b_
+u8 % b_
+f8 % b_
+AR % b_
+
+divmod(b, b_)
+divmod(i, b_)
+divmod(f, b_)
+divmod(b_, b_)
+divmod(i8, b_)
+divmod(u8, b_)
+divmod(f8, b_)
+divmod(AR, b_)
+
+# int
+
+i8 % b
+i8 % i
+i8 % f
+i8 % i8
+i8 % f8
+i4 % i8
+i4 % f8
+i4 % i4
+i4 % f4
+i8 % AR
+
+divmod(i8, b)
+divmod(i8, i)
+divmod(i8, f)
+divmod(i8, i8)
+divmod(i8, f8)
+divmod(i8, i4)
+divmod(i8, f4)
+divmod(i4, i4)
+divmod(i4, f4)
+divmod(i8, AR)
+
+b % i8
+i % i8
+f % i8
+i8 % i8
+f8 % i8
+i8 % i4
+f8 % i4
+i4 % i4
+f4 % i4
+AR % i8
+
+divmod(b, i8)
+divmod(i, i8)
+divmod(f, i8)
+divmod(i8, i8)
+divmod(f8, i8)
+divmod(i4, i8)
+divmod(f4, i8)
+divmod(i4, i4)
+divmod(f4, i4)
+divmod(AR, i8)
+
+# float
+
+f8 % b
+f8 % i
+f8 % f
+i8 % f4
+f4 % f4
+f8 % AR
+
+divmod(f8, b)
+divmod(f8, i)
+divmod(f8, f)
+divmod(f8, f8)
+divmod(f8, f4)
+divmod(f4, f4)
+divmod(f8, AR)
+
+b % f8
+i % f8
+f % f8
+f8 % f8
+f8 % f8
+f4 % f4
+AR % f8
+
+divmod(b, f8)
+divmod(i, f8)
+divmod(f, f8)
+divmod(f8, f8)
+divmod(f4, f8)
+divmod(f4, f4)
+divmod(AR, f8)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/modules.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/modules.py
new file mode 100644
index 00000000..9261874d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/modules.py
@@ -0,0 +1,43 @@
+import numpy as np
+from numpy import f2py
+
+np.char
+np.ctypeslib
+np.emath
+np.fft
+np.lib
+np.linalg
+np.ma
+np.matrixlib
+np.polynomial
+np.random
+np.rec
+np.testing
+np.version
+
+np.lib.format
+np.lib.mixins
+np.lib.scimath
+np.lib.stride_tricks
+np.ma.extras
+np.polynomial.chebyshev
+np.polynomial.hermite
+np.polynomial.hermite_e
+np.polynomial.laguerre
+np.polynomial.legendre
+np.polynomial.polynomial
+
+np.__path__
+np.__version__
+np.__git_version__
+
+np.__all__
+np.char.__all__
+np.ctypeslib.__all__
+np.emath.__all__
+np.lib.__all__
+np.ma.__all__
+np.random.__all__
+np.rec.__all__
+np.testing.__all__
+f2py.__all__
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/multiarray.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/multiarray.py
new file mode 100644
index 00000000..26cedfd7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/multiarray.py
@@ -0,0 +1,76 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64] = np.array([1.0])
+AR_i4 = np.array([1], dtype=np.int32)
+AR_u1 = np.array([1], dtype=np.uint8)
+
+AR_LIKE_f = [1.5]
+AR_LIKE_i = [1]
+
+b_f8 = np.broadcast(AR_f8)
+b_i4_f8_f8 = np.broadcast(AR_i4, AR_f8, AR_f8)
+
+next(b_f8)
+b_f8.reset()
+b_f8.index
+b_f8.iters
+b_f8.nd
+b_f8.ndim
+b_f8.numiter
+b_f8.shape
+b_f8.size
+
+next(b_i4_f8_f8)
+b_i4_f8_f8.reset()
+b_i4_f8_f8.ndim
+b_i4_f8_f8.index
+b_i4_f8_f8.iters
+b_i4_f8_f8.nd
+b_i4_f8_f8.numiter
+b_i4_f8_f8.shape
+b_i4_f8_f8.size
+
+np.inner(AR_f8, AR_i4)
+
+np.where([True, True, False])
+np.where([True, True, False], 1, 0)
+
+np.lexsort([0, 1, 2])
+
+np.can_cast(np.dtype("i8"), int)
+np.can_cast(AR_f8, "f8")
+np.can_cast(AR_f8, np.complex128, casting="unsafe")
+
+np.min_scalar_type([1])
+np.min_scalar_type(AR_f8)
+
+np.result_type(int, AR_i4)
+np.result_type(AR_f8, AR_u1)
+np.result_type(AR_f8, np.complex128)
+
+np.dot(AR_LIKE_f, AR_i4)
+np.dot(AR_u1, 1)
+np.dot(1.5j, 1)
+np.dot(AR_u1, 1, out=AR_f8)
+
+np.vdot(AR_LIKE_f, AR_i4)
+np.vdot(AR_u1, 1)
+np.vdot(1.5j, 1)
+
+np.bincount(AR_i4)
+
+np.copyto(AR_f8, [1.6])
+
+np.putmask(AR_f8, [True], 1.5)
+
+np.packbits(AR_i4)
+np.packbits(AR_u1)
+
+np.unpackbits(AR_u1)
+
+np.shares_memory(1, 2)
+np.shares_memory(AR_f8, AR_f8, max_work=1)
+
+np.may_share_memory(1, 2)
+np.may_share_memory(AR_f8, AR_f8, max_work=1)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_conversion.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_conversion.py
new file mode 100644
index 00000000..303cf53e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_conversion.py
@@ -0,0 +1,94 @@
+import os
+import tempfile
+
+import numpy as np
+
+nd = np.array([[1, 2], [3, 4]])
+scalar_array = np.array(1)
+
+# item
+scalar_array.item()
+nd.item(1)
+nd.item(0, 1)
+nd.item((0, 1))
+
+# tolist is pretty simple
+
+# itemset
+scalar_array.itemset(3)
+nd.itemset(3, 0)
+nd.itemset((0, 0), 3)
+
+# tobytes
+nd.tobytes()
+nd.tobytes("C")
+nd.tobytes(None)
+
+# tofile
+if os.name != "nt":
+ with tempfile.NamedTemporaryFile(suffix=".txt") as tmp:
+ nd.tofile(tmp.name)
+ nd.tofile(tmp.name, "")
+ nd.tofile(tmp.name, sep="")
+
+ nd.tofile(tmp.name, "", "%s")
+ nd.tofile(tmp.name, format="%s")
+
+ nd.tofile(tmp)
+
+# dump is pretty simple
+# dumps is pretty simple
+
+# astype
+nd.astype("float")
+nd.astype(float)
+
+nd.astype(float, "K")
+nd.astype(float, order="K")
+
+nd.astype(float, "K", "unsafe")
+nd.astype(float, casting="unsafe")
+
+nd.astype(float, "K", "unsafe", True)
+nd.astype(float, subok=True)
+
+nd.astype(float, "K", "unsafe", True, True)
+nd.astype(float, copy=True)
+
+# byteswap
+nd.byteswap()
+nd.byteswap(True)
+
+# copy
+nd.copy()
+nd.copy("C")
+
+# view
+nd.view()
+nd.view(np.int64)
+nd.view(dtype=np.int64)
+nd.view(np.int64, np.matrix)
+nd.view(type=np.matrix)
+
+# getfield
+complex_array = np.array([[1 + 1j, 0], [0, 1 - 1j]], dtype=np.complex128)
+
+complex_array.getfield("float")
+complex_array.getfield(float)
+
+complex_array.getfield("float", 8)
+complex_array.getfield(float, offset=8)
+
+# setflags
+nd.setflags()
+
+nd.setflags(True)
+nd.setflags(write=True)
+
+nd.setflags(True, True)
+nd.setflags(write=True, align=True)
+
+nd.setflags(True, True, False)
+nd.setflags(write=True, align=True, uic=False)
+
+# fill is pretty simple
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_misc.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_misc.py
new file mode 100644
index 00000000..19a1af9e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_misc.py
@@ -0,0 +1,185 @@
+"""
+Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods.
+
+More extensive tests are performed for the methods'
+function-based counterpart in `../from_numeric.py`.
+
+"""
+
+from __future__ import annotations
+
+import operator
+from typing import cast, Any
+
+import numpy as np
+
+class SubClass(np.ndarray): ...
+
+i4 = np.int32(1)
+A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32)
+B0 = np.empty((), dtype=np.int32).view(SubClass)
+B1 = np.empty((1,), dtype=np.int32).view(SubClass)
+B2 = np.empty((1, 1), dtype=np.int32).view(SubClass)
+C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32)
+D = np.ones(3).view(SubClass)
+
+i4.all()
+A.all()
+A.all(axis=0)
+A.all(keepdims=True)
+A.all(out=B0)
+
+i4.any()
+A.any()
+A.any(axis=0)
+A.any(keepdims=True)
+A.any(out=B0)
+
+i4.argmax()
+A.argmax()
+A.argmax(axis=0)
+A.argmax(out=B0)
+
+i4.argmin()
+A.argmin()
+A.argmin(axis=0)
+A.argmin(out=B0)
+
+i4.argsort()
+A.argsort()
+
+i4.choose([()])
+_choices = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=np.int32)
+C.choose(_choices)
+C.choose(_choices, out=D)
+
+i4.clip(1)
+A.clip(1)
+A.clip(None, 1)
+A.clip(1, out=B2)
+A.clip(None, 1, out=B2)
+
+i4.compress([1])
+A.compress([1])
+A.compress([1], out=B1)
+
+i4.conj()
+A.conj()
+B0.conj()
+
+i4.conjugate()
+A.conjugate()
+B0.conjugate()
+
+i4.cumprod()
+A.cumprod()
+A.cumprod(out=B1)
+
+i4.cumsum()
+A.cumsum()
+A.cumsum(out=B1)
+
+i4.max()
+A.max()
+A.max(axis=0)
+A.max(keepdims=True)
+A.max(out=B0)
+
+i4.mean()
+A.mean()
+A.mean(axis=0)
+A.mean(keepdims=True)
+A.mean(out=B0)
+
+i4.min()
+A.min()
+A.min(axis=0)
+A.min(keepdims=True)
+A.min(out=B0)
+
+i4.newbyteorder()
+A.newbyteorder()
+B0.newbyteorder('|')
+
+i4.prod()
+A.prod()
+A.prod(axis=0)
+A.prod(keepdims=True)
+A.prod(out=B0)
+
+i4.ptp()
+A.ptp()
+A.ptp(axis=0)
+A.ptp(keepdims=True)
+A.astype(int).ptp(out=B0)
+
+i4.round()
+A.round()
+A.round(out=B2)
+
+i4.repeat(1)
+A.repeat(1)
+B0.repeat(1)
+
+i4.std()
+A.std()
+A.std(axis=0)
+A.std(keepdims=True)
+A.std(out=B0.astype(np.float64))
+
+i4.sum()
+A.sum()
+A.sum(axis=0)
+A.sum(keepdims=True)
+A.sum(out=B0)
+
+i4.take(0)
+A.take(0)
+A.take([0])
+A.take(0, out=B0)
+A.take([0], out=B1)
+
+i4.var()
+A.var()
+A.var(axis=0)
+A.var(keepdims=True)
+A.var(out=B0)
+
+A.argpartition([0])
+
+A.diagonal()
+
+A.dot(1)
+A.dot(1, out=B0)
+
+A.nonzero()
+
+C.searchsorted(1)
+
+A.trace()
+A.trace(out=B0)
+
+void = cast(np.void, np.array(1, dtype=[("f", np.float64)]).take(0))
+void.setfield(10, np.float64)
+
+A.item(0)
+C.item(0)
+
+A.ravel()
+C.ravel()
+
+A.flatten()
+C.flatten()
+
+A.reshape(1)
+C.reshape(3)
+
+int(np.array(1.0, dtype=np.float64))
+int(np.array("1", dtype=np.str_))
+
+float(np.array(1.0, dtype=np.float64))
+float(np.array("1", dtype=np.str_))
+
+complex(np.array(1.0, dtype=np.float64))
+
+operator.index(np.array(1, dtype=np.int64))
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py
new file mode 100644
index 00000000..0ca3dff3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py
@@ -0,0 +1,47 @@
+import numpy as np
+
+nd1 = np.array([[1, 2], [3, 4]])
+
+# reshape
+nd1.reshape(4)
+nd1.reshape(2, 2)
+nd1.reshape((2, 2))
+
+nd1.reshape((2, 2), order="C")
+nd1.reshape(4, order="C")
+
+# resize
+nd1.resize()
+nd1.resize(4)
+nd1.resize(2, 2)
+nd1.resize((2, 2))
+
+nd1.resize((2, 2), refcheck=True)
+nd1.resize(4, refcheck=True)
+
+nd2 = np.array([[1, 2], [3, 4]])
+
+# transpose
+nd2.transpose()
+nd2.transpose(1, 0)
+nd2.transpose((1, 0))
+
+# swapaxes
+nd2.swapaxes(0, 1)
+
+# flatten
+nd2.flatten()
+nd2.flatten("C")
+
+# ravel
+nd2.ravel()
+nd2.ravel("C")
+
+# squeeze
+nd2.squeeze()
+
+nd3 = np.array([[1, 2]])
+nd3.squeeze(0)
+
+nd4 = np.array([[[1, 2]]])
+nd4.squeeze((0, 1))
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/numeric.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/numeric.py
new file mode 100644
index 00000000..c4a73c1e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/numeric.py
@@ -0,0 +1,90 @@
+"""
+Tests for :mod:`numpy.core.numeric`.
+
+Does not include tests which fall under ``array_constructors``.
+
+"""
+
+from __future__ import annotations
+
+import numpy as np
+
+class SubClass(np.ndarray):
+ ...
+
+i8 = np.int64(1)
+
+A = np.arange(27).reshape(3, 3, 3)
+B: list[list[list[int]]] = A.tolist()
+C = np.empty((27, 27)).view(SubClass)
+
+np.count_nonzero(i8)
+np.count_nonzero(A)
+np.count_nonzero(B)
+np.count_nonzero(A, keepdims=True)
+np.count_nonzero(A, axis=0)
+
+np.isfortran(i8)
+np.isfortran(A)
+
+np.argwhere(i8)
+np.argwhere(A)
+
+np.flatnonzero(i8)
+np.flatnonzero(A)
+
+np.correlate(B[0][0], A.ravel(), mode="valid")
+np.correlate(A.ravel(), A.ravel(), mode="same")
+
+np.convolve(B[0][0], A.ravel(), mode="valid")
+np.convolve(A.ravel(), A.ravel(), mode="same")
+
+np.outer(i8, A)
+np.outer(B, A)
+np.outer(A, A)
+np.outer(A, A, out=C)
+
+np.tensordot(B, A)
+np.tensordot(A, A)
+np.tensordot(A, A, axes=0)
+np.tensordot(A, A, axes=(0, 1))
+
+np.isscalar(i8)
+np.isscalar(A)
+np.isscalar(B)
+
+np.roll(A, 1)
+np.roll(A, (1, 2))
+np.roll(B, 1)
+
+np.rollaxis(A, 0, 1)
+
+np.moveaxis(A, 0, 1)
+np.moveaxis(A, (0, 1), (1, 2))
+
+np.cross(B, A)
+np.cross(A, A)
+
+np.indices([0, 1, 2])
+np.indices([0, 1, 2], sparse=False)
+np.indices([0, 1, 2], sparse=True)
+
+np.binary_repr(1)
+
+np.base_repr(1)
+
+np.allclose(i8, A)
+np.allclose(B, A)
+np.allclose(A, A)
+
+np.isclose(i8, A)
+np.isclose(B, A)
+np.isclose(A, A)
+
+np.array_equal(i8, A)
+np.array_equal(B, A)
+np.array_equal(A, A)
+
+np.array_equiv(i8, A)
+np.array_equiv(B, A)
+np.array_equiv(A, A)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/numerictypes.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/numerictypes.py
new file mode 100644
index 00000000..7f1dd094
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/numerictypes.py
@@ -0,0 +1,47 @@
+import numpy as np
+
+np.maximum_sctype("S8")
+np.maximum_sctype(object)
+
+np.issctype(object)
+np.issctype("S8")
+
+np.obj2sctype(list)
+np.obj2sctype(list, default=None)
+np.obj2sctype(list, default=np.string_)
+
+np.issubclass_(np.int32, int)
+np.issubclass_(np.float64, float)
+np.issubclass_(np.float64, (int, float))
+
+np.issubsctype("int64", int)
+np.issubsctype(np.array([1]), np.array([1]))
+
+np.issubdtype("S1", np.string_)
+np.issubdtype(np.float64, np.float32)
+
+np.sctype2char("S1")
+np.sctype2char(list)
+
+np.find_common_type([], [np.int64, np.float32, complex])
+np.find_common_type((), (np.int64, np.float32, complex))
+np.find_common_type([np.int64, np.float32], [])
+np.find_common_type([np.float32], [np.int64, np.float64])
+
+np.cast[int]
+np.cast["i8"]
+np.cast[np.int64]
+
+np.nbytes[int]
+np.nbytes["i8"]
+np.nbytes[np.int64]
+
+np.ScalarType
+np.ScalarType[0]
+np.ScalarType[3]
+np.ScalarType[8]
+np.ScalarType[10]
+
+np.typecodes["Character"]
+np.typecodes["Complex"]
+np.typecodes["All"]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/random.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/random.py
new file mode 100644
index 00000000..6a4d99f1
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/random.py
@@ -0,0 +1,1499 @@
+from __future__ import annotations
+
+from typing import Any
+import numpy as np
+
+SEED_NONE = None
+SEED_INT = 4579435749574957634658964293569
+SEED_ARR: np.ndarray[Any, np.dtype[np.int64]] = np.array([1, 2, 3, 4], dtype=np.int64)
+SEED_ARRLIKE: list[int] = [1, 2, 3, 4]
+SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0)
+SEED_MT19937: np.random.MT19937 = np.random.MT19937(0)
+SEED_PCG64: np.random.PCG64 = np.random.PCG64(0)
+SEED_PHILOX: np.random.Philox = np.random.Philox(0)
+SEED_SFC64: np.random.SFC64 = np.random.SFC64(0)
+
+# default rng
+np.random.default_rng()
+np.random.default_rng(SEED_NONE)
+np.random.default_rng(SEED_INT)
+np.random.default_rng(SEED_ARR)
+np.random.default_rng(SEED_ARRLIKE)
+np.random.default_rng(SEED_SEED_SEQ)
+np.random.default_rng(SEED_MT19937)
+np.random.default_rng(SEED_PCG64)
+np.random.default_rng(SEED_PHILOX)
+np.random.default_rng(SEED_SFC64)
+
+# Seed Sequence
+np.random.SeedSequence(SEED_NONE)
+np.random.SeedSequence(SEED_INT)
+np.random.SeedSequence(SEED_ARR)
+np.random.SeedSequence(SEED_ARRLIKE)
+
+# Bit Generators
+np.random.MT19937(SEED_NONE)
+np.random.MT19937(SEED_INT)
+np.random.MT19937(SEED_ARR)
+np.random.MT19937(SEED_ARRLIKE)
+np.random.MT19937(SEED_SEED_SEQ)
+
+np.random.PCG64(SEED_NONE)
+np.random.PCG64(SEED_INT)
+np.random.PCG64(SEED_ARR)
+np.random.PCG64(SEED_ARRLIKE)
+np.random.PCG64(SEED_SEED_SEQ)
+
+np.random.Philox(SEED_NONE)
+np.random.Philox(SEED_INT)
+np.random.Philox(SEED_ARR)
+np.random.Philox(SEED_ARRLIKE)
+np.random.Philox(SEED_SEED_SEQ)
+
+np.random.SFC64(SEED_NONE)
+np.random.SFC64(SEED_INT)
+np.random.SFC64(SEED_ARR)
+np.random.SFC64(SEED_ARRLIKE)
+np.random.SFC64(SEED_SEED_SEQ)
+
+seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence(SEED_NONE)
+seed_seq.spawn(10)
+seed_seq.generate_state(3)
+seed_seq.generate_state(3, "u4")
+seed_seq.generate_state(3, "uint32")
+seed_seq.generate_state(3, "u8")
+seed_seq.generate_state(3, "uint64")
+seed_seq.generate_state(3, np.uint32)
+seed_seq.generate_state(3, np.uint64)
+
+
+def_gen: np.random.Generator = np.random.default_rng()
+
+D_arr_0p1: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.1])
+D_arr_0p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.5])
+D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9])
+D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5])
+I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_)
+I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_)
+D_arr_like_0p1: list[float] = [0.1]
+D_arr_like_0p5: list[float] = [0.5]
+D_arr_like_0p9: list[float] = [0.9]
+D_arr_like_1p5: list[float] = [1.5]
+I_arr_like_10: list[int] = [10]
+I_arr_like_20: list[int] = [20]
+D_2D_like: list[list[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]]
+D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like)
+
+S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32)
+D_out: np.ndarray[Any, np.dtype[np.float64]] = np.empty(1)
+
+def_gen.standard_normal()
+def_gen.standard_normal(dtype=np.float32)
+def_gen.standard_normal(dtype="float32")
+def_gen.standard_normal(dtype="double")
+def_gen.standard_normal(dtype=np.float64)
+def_gen.standard_normal(size=None)
+def_gen.standard_normal(size=1)
+def_gen.standard_normal(size=1, dtype=np.float32)
+def_gen.standard_normal(size=1, dtype="f4")
+def_gen.standard_normal(size=1, dtype="float32", out=S_out)
+def_gen.standard_normal(dtype=np.float32, out=S_out)
+def_gen.standard_normal(size=1, dtype=np.float64)
+def_gen.standard_normal(size=1, dtype="float64")
+def_gen.standard_normal(size=1, dtype="f8")
+def_gen.standard_normal(out=D_out)
+def_gen.standard_normal(size=1, dtype="float64")
+def_gen.standard_normal(size=1, dtype="float64", out=D_out)
+
+def_gen.random()
+def_gen.random(dtype=np.float32)
+def_gen.random(dtype="float32")
+def_gen.random(dtype="double")
+def_gen.random(dtype=np.float64)
+def_gen.random(size=None)
+def_gen.random(size=1)
+def_gen.random(size=1, dtype=np.float32)
+def_gen.random(size=1, dtype="f4")
+def_gen.random(size=1, dtype="float32", out=S_out)
+def_gen.random(dtype=np.float32, out=S_out)
+def_gen.random(size=1, dtype=np.float64)
+def_gen.random(size=1, dtype="float64")
+def_gen.random(size=1, dtype="f8")
+def_gen.random(out=D_out)
+def_gen.random(size=1, dtype="float64")
+def_gen.random(size=1, dtype="float64", out=D_out)
+
+def_gen.standard_cauchy()
+def_gen.standard_cauchy(size=None)
+def_gen.standard_cauchy(size=1)
+
+def_gen.standard_exponential()
+def_gen.standard_exponential(method="inv")
+def_gen.standard_exponential(dtype=np.float32)
+def_gen.standard_exponential(dtype="float32")
+def_gen.standard_exponential(dtype="double")
+def_gen.standard_exponential(dtype=np.float64)
+def_gen.standard_exponential(size=None)
+def_gen.standard_exponential(size=None, method="inv")
+def_gen.standard_exponential(size=1, method="inv")
+def_gen.standard_exponential(size=1, dtype=np.float32)
+def_gen.standard_exponential(size=1, dtype="f4", method="inv")
+def_gen.standard_exponential(size=1, dtype="float32", out=S_out)
+def_gen.standard_exponential(dtype=np.float32, out=S_out)
+def_gen.standard_exponential(size=1, dtype=np.float64, method="inv")
+def_gen.standard_exponential(size=1, dtype="float64")
+def_gen.standard_exponential(size=1, dtype="f8")
+def_gen.standard_exponential(out=D_out)
+def_gen.standard_exponential(size=1, dtype="float64")
+def_gen.standard_exponential(size=1, dtype="float64", out=D_out)
+
+def_gen.zipf(1.5)
+def_gen.zipf(1.5, size=None)
+def_gen.zipf(1.5, size=1)
+def_gen.zipf(D_arr_1p5)
+def_gen.zipf(D_arr_1p5, size=1)
+def_gen.zipf(D_arr_like_1p5)
+def_gen.zipf(D_arr_like_1p5, size=1)
+
+def_gen.weibull(0.5)
+def_gen.weibull(0.5, size=None)
+def_gen.weibull(0.5, size=1)
+def_gen.weibull(D_arr_0p5)
+def_gen.weibull(D_arr_0p5, size=1)
+def_gen.weibull(D_arr_like_0p5)
+def_gen.weibull(D_arr_like_0p5, size=1)
+
+def_gen.standard_t(0.5)
+def_gen.standard_t(0.5, size=None)
+def_gen.standard_t(0.5, size=1)
+def_gen.standard_t(D_arr_0p5)
+def_gen.standard_t(D_arr_0p5, size=1)
+def_gen.standard_t(D_arr_like_0p5)
+def_gen.standard_t(D_arr_like_0p5, size=1)
+
+def_gen.poisson(0.5)
+def_gen.poisson(0.5, size=None)
+def_gen.poisson(0.5, size=1)
+def_gen.poisson(D_arr_0p5)
+def_gen.poisson(D_arr_0p5, size=1)
+def_gen.poisson(D_arr_like_0p5)
+def_gen.poisson(D_arr_like_0p5, size=1)
+
+def_gen.power(0.5)
+def_gen.power(0.5, size=None)
+def_gen.power(0.5, size=1)
+def_gen.power(D_arr_0p5)
+def_gen.power(D_arr_0p5, size=1)
+def_gen.power(D_arr_like_0p5)
+def_gen.power(D_arr_like_0p5, size=1)
+
+def_gen.pareto(0.5)
+def_gen.pareto(0.5, size=None)
+def_gen.pareto(0.5, size=1)
+def_gen.pareto(D_arr_0p5)
+def_gen.pareto(D_arr_0p5, size=1)
+def_gen.pareto(D_arr_like_0p5)
+def_gen.pareto(D_arr_like_0p5, size=1)
+
+def_gen.chisquare(0.5)
+def_gen.chisquare(0.5, size=None)
+def_gen.chisquare(0.5, size=1)
+def_gen.chisquare(D_arr_0p5)
+def_gen.chisquare(D_arr_0p5, size=1)
+def_gen.chisquare(D_arr_like_0p5)
+def_gen.chisquare(D_arr_like_0p5, size=1)
+
+def_gen.exponential(0.5)
+def_gen.exponential(0.5, size=None)
+def_gen.exponential(0.5, size=1)
+def_gen.exponential(D_arr_0p5)
+def_gen.exponential(D_arr_0p5, size=1)
+def_gen.exponential(D_arr_like_0p5)
+def_gen.exponential(D_arr_like_0p5, size=1)
+
+def_gen.geometric(0.5)
+def_gen.geometric(0.5, size=None)
+def_gen.geometric(0.5, size=1)
+def_gen.geometric(D_arr_0p5)
+def_gen.geometric(D_arr_0p5, size=1)
+def_gen.geometric(D_arr_like_0p5)
+def_gen.geometric(D_arr_like_0p5, size=1)
+
+def_gen.logseries(0.5)
+def_gen.logseries(0.5, size=None)
+def_gen.logseries(0.5, size=1)
+def_gen.logseries(D_arr_0p5)
+def_gen.logseries(D_arr_0p5, size=1)
+def_gen.logseries(D_arr_like_0p5)
+def_gen.logseries(D_arr_like_0p5, size=1)
+
+def_gen.rayleigh(0.5)
+def_gen.rayleigh(0.5, size=None)
+def_gen.rayleigh(0.5, size=1)
+def_gen.rayleigh(D_arr_0p5)
+def_gen.rayleigh(D_arr_0p5, size=1)
+def_gen.rayleigh(D_arr_like_0p5)
+def_gen.rayleigh(D_arr_like_0p5, size=1)
+
+def_gen.standard_gamma(0.5)
+def_gen.standard_gamma(0.5, size=None)
+def_gen.standard_gamma(0.5, dtype="float32")
+def_gen.standard_gamma(0.5, size=None, dtype="float32")
+def_gen.standard_gamma(0.5, size=1)
+def_gen.standard_gamma(D_arr_0p5)
+def_gen.standard_gamma(D_arr_0p5, dtype="f4")
+def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out)
+def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out)
+def_gen.standard_gamma(D_arr_0p5, size=1)
+def_gen.standard_gamma(D_arr_like_0p5)
+def_gen.standard_gamma(D_arr_like_0p5, size=1)
+def_gen.standard_gamma(0.5, out=D_out)
+def_gen.standard_gamma(D_arr_like_0p5, out=D_out)
+def_gen.standard_gamma(D_arr_like_0p5, size=1)
+def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64)
+
+def_gen.vonmises(0.5, 0.5)
+def_gen.vonmises(0.5, 0.5, size=None)
+def_gen.vonmises(0.5, 0.5, size=1)
+def_gen.vonmises(D_arr_0p5, 0.5)
+def_gen.vonmises(0.5, D_arr_0p5)
+def_gen.vonmises(D_arr_0p5, 0.5, size=1)
+def_gen.vonmises(0.5, D_arr_0p5, size=1)
+def_gen.vonmises(D_arr_like_0p5, 0.5)
+def_gen.vonmises(0.5, D_arr_like_0p5)
+def_gen.vonmises(D_arr_0p5, D_arr_0p5)
+def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5)
+def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1)
+def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+def_gen.wald(0.5, 0.5)
+def_gen.wald(0.5, 0.5, size=None)
+def_gen.wald(0.5, 0.5, size=1)
+def_gen.wald(D_arr_0p5, 0.5)
+def_gen.wald(0.5, D_arr_0p5)
+def_gen.wald(D_arr_0p5, 0.5, size=1)
+def_gen.wald(0.5, D_arr_0p5, size=1)
+def_gen.wald(D_arr_like_0p5, 0.5)
+def_gen.wald(0.5, D_arr_like_0p5)
+def_gen.wald(D_arr_0p5, D_arr_0p5)
+def_gen.wald(D_arr_like_0p5, D_arr_like_0p5)
+def_gen.wald(D_arr_0p5, D_arr_0p5, size=1)
+def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+def_gen.uniform(0.5, 0.5)
+def_gen.uniform(0.5, 0.5, size=None)
+def_gen.uniform(0.5, 0.5, size=1)
+def_gen.uniform(D_arr_0p5, 0.5)
+def_gen.uniform(0.5, D_arr_0p5)
+def_gen.uniform(D_arr_0p5, 0.5, size=1)
+def_gen.uniform(0.5, D_arr_0p5, size=1)
+def_gen.uniform(D_arr_like_0p5, 0.5)
+def_gen.uniform(0.5, D_arr_like_0p5)
+def_gen.uniform(D_arr_0p5, D_arr_0p5)
+def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5)
+def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1)
+def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+def_gen.beta(0.5, 0.5)
+def_gen.beta(0.5, 0.5, size=None)
+def_gen.beta(0.5, 0.5, size=1)
+def_gen.beta(D_arr_0p5, 0.5)
+def_gen.beta(0.5, D_arr_0p5)
+def_gen.beta(D_arr_0p5, 0.5, size=1)
+def_gen.beta(0.5, D_arr_0p5, size=1)
+def_gen.beta(D_arr_like_0p5, 0.5)
+def_gen.beta(0.5, D_arr_like_0p5)
+def_gen.beta(D_arr_0p5, D_arr_0p5)
+def_gen.beta(D_arr_like_0p5, D_arr_like_0p5)
+def_gen.beta(D_arr_0p5, D_arr_0p5, size=1)
+def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+def_gen.f(0.5, 0.5)
+def_gen.f(0.5, 0.5, size=None)
+def_gen.f(0.5, 0.5, size=1)
+def_gen.f(D_arr_0p5, 0.5)
+def_gen.f(0.5, D_arr_0p5)
+def_gen.f(D_arr_0p5, 0.5, size=1)
+def_gen.f(0.5, D_arr_0p5, size=1)
+def_gen.f(D_arr_like_0p5, 0.5)
+def_gen.f(0.5, D_arr_like_0p5)
+def_gen.f(D_arr_0p5, D_arr_0p5)
+def_gen.f(D_arr_like_0p5, D_arr_like_0p5)
+def_gen.f(D_arr_0p5, D_arr_0p5, size=1)
+def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+def_gen.gamma(0.5, 0.5)
+def_gen.gamma(0.5, 0.5, size=None)
+def_gen.gamma(0.5, 0.5, size=1)
+def_gen.gamma(D_arr_0p5, 0.5)
+def_gen.gamma(0.5, D_arr_0p5)
+def_gen.gamma(D_arr_0p5, 0.5, size=1)
+def_gen.gamma(0.5, D_arr_0p5, size=1)
+def_gen.gamma(D_arr_like_0p5, 0.5)
+def_gen.gamma(0.5, D_arr_like_0p5)
+def_gen.gamma(D_arr_0p5, D_arr_0p5)
+def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5)
+def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1)
+def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+def_gen.gumbel(0.5, 0.5)
+def_gen.gumbel(0.5, 0.5, size=None)
+def_gen.gumbel(0.5, 0.5, size=1)
+def_gen.gumbel(D_arr_0p5, 0.5)
+def_gen.gumbel(0.5, D_arr_0p5)
+def_gen.gumbel(D_arr_0p5, 0.5, size=1)
+def_gen.gumbel(0.5, D_arr_0p5, size=1)
+def_gen.gumbel(D_arr_like_0p5, 0.5)
+def_gen.gumbel(0.5, D_arr_like_0p5)
+def_gen.gumbel(D_arr_0p5, D_arr_0p5)
+def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5)
+def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1)
+def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+def_gen.laplace(0.5, 0.5)
+def_gen.laplace(0.5, 0.5, size=None)
+def_gen.laplace(0.5, 0.5, size=1)
+def_gen.laplace(D_arr_0p5, 0.5)
+def_gen.laplace(0.5, D_arr_0p5)
+def_gen.laplace(D_arr_0p5, 0.5, size=1)
+def_gen.laplace(0.5, D_arr_0p5, size=1)
+def_gen.laplace(D_arr_like_0p5, 0.5)
+def_gen.laplace(0.5, D_arr_like_0p5)
+def_gen.laplace(D_arr_0p5, D_arr_0p5)
+def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5)
+def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1)
+def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+def_gen.logistic(0.5, 0.5)
+def_gen.logistic(0.5, 0.5, size=None)
+def_gen.logistic(0.5, 0.5, size=1)
+def_gen.logistic(D_arr_0p5, 0.5)
+def_gen.logistic(0.5, D_arr_0p5)
+def_gen.logistic(D_arr_0p5, 0.5, size=1)
+def_gen.logistic(0.5, D_arr_0p5, size=1)
+def_gen.logistic(D_arr_like_0p5, 0.5)
+def_gen.logistic(0.5, D_arr_like_0p5)
+def_gen.logistic(D_arr_0p5, D_arr_0p5)
+def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5)
+def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1)
+def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+def_gen.lognormal(0.5, 0.5)
+def_gen.lognormal(0.5, 0.5, size=None)
+def_gen.lognormal(0.5, 0.5, size=1)
+def_gen.lognormal(D_arr_0p5, 0.5)
+def_gen.lognormal(0.5, D_arr_0p5)
+def_gen.lognormal(D_arr_0p5, 0.5, size=1)
+def_gen.lognormal(0.5, D_arr_0p5, size=1)
+def_gen.lognormal(D_arr_like_0p5, 0.5)
+def_gen.lognormal(0.5, D_arr_like_0p5)
+def_gen.lognormal(D_arr_0p5, D_arr_0p5)
+def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5)
+def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1)
+def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+def_gen.noncentral_chisquare(0.5, 0.5)
+def_gen.noncentral_chisquare(0.5, 0.5, size=None)
+def_gen.noncentral_chisquare(0.5, 0.5, size=1)
+def_gen.noncentral_chisquare(D_arr_0p5, 0.5)
+def_gen.noncentral_chisquare(0.5, D_arr_0p5)
+def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1)
+def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1)
+def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5)
+def_gen.noncentral_chisquare(0.5, D_arr_like_0p5)
+def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5)
+def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)
+def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)
+def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+def_gen.normal(0.5, 0.5)
+def_gen.normal(0.5, 0.5, size=None)
+def_gen.normal(0.5, 0.5, size=1)
+def_gen.normal(D_arr_0p5, 0.5)
+def_gen.normal(0.5, D_arr_0p5)
+def_gen.normal(D_arr_0p5, 0.5, size=1)
+def_gen.normal(0.5, D_arr_0p5, size=1)
+def_gen.normal(D_arr_like_0p5, 0.5)
+def_gen.normal(0.5, D_arr_like_0p5)
+def_gen.normal(D_arr_0p5, D_arr_0p5)
+def_gen.normal(D_arr_like_0p5, D_arr_like_0p5)
+def_gen.normal(D_arr_0p5, D_arr_0p5, size=1)
+def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+def_gen.triangular(0.1, 0.5, 0.9)
+def_gen.triangular(0.1, 0.5, 0.9, size=None)
+def_gen.triangular(0.1, 0.5, 0.9, size=1)
+def_gen.triangular(D_arr_0p1, 0.5, 0.9)
+def_gen.triangular(0.1, D_arr_0p5, 0.9)
+def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)
+def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1)
+def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)
+def_gen.triangular(0.5, D_arr_like_0p5, 0.9)
+def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9)
+def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)
+def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)
+def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)
+
+def_gen.noncentral_f(0.1, 0.5, 0.9)
+def_gen.noncentral_f(0.1, 0.5, 0.9, size=None)
+def_gen.noncentral_f(0.1, 0.5, 0.9, size=1)
+def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9)
+def_gen.noncentral_f(0.1, D_arr_0p5, 0.9)
+def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)
+def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)
+def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)
+def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9)
+def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)
+def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)
+def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)
+def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)
+
+def_gen.binomial(10, 0.5)
+def_gen.binomial(10, 0.5, size=None)
+def_gen.binomial(10, 0.5, size=1)
+def_gen.binomial(I_arr_10, 0.5)
+def_gen.binomial(10, D_arr_0p5)
+def_gen.binomial(I_arr_10, 0.5, size=1)
+def_gen.binomial(10, D_arr_0p5, size=1)
+def_gen.binomial(I_arr_like_10, 0.5)
+def_gen.binomial(10, D_arr_like_0p5)
+def_gen.binomial(I_arr_10, D_arr_0p5)
+def_gen.binomial(I_arr_like_10, D_arr_like_0p5)
+def_gen.binomial(I_arr_10, D_arr_0p5, size=1)
+def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1)
+
+def_gen.negative_binomial(10, 0.5)
+def_gen.negative_binomial(10, 0.5, size=None)
+def_gen.negative_binomial(10, 0.5, size=1)
+def_gen.negative_binomial(I_arr_10, 0.5)
+def_gen.negative_binomial(10, D_arr_0p5)
+def_gen.negative_binomial(I_arr_10, 0.5, size=1)
+def_gen.negative_binomial(10, D_arr_0p5, size=1)
+def_gen.negative_binomial(I_arr_like_10, 0.5)
+def_gen.negative_binomial(10, D_arr_like_0p5)
+def_gen.negative_binomial(I_arr_10, D_arr_0p5)
+def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5)
+def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1)
+def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)
+
+def_gen.hypergeometric(20, 20, 10)
+def_gen.hypergeometric(20, 20, 10, size=None)
+def_gen.hypergeometric(20, 20, 10, size=1)
+def_gen.hypergeometric(I_arr_20, 20, 10)
+def_gen.hypergeometric(20, I_arr_20, 10)
+def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)
+def_gen.hypergeometric(20, I_arr_20, 10, size=1)
+def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10)
+def_gen.hypergeometric(20, I_arr_like_20, 10)
+def_gen.hypergeometric(I_arr_20, I_arr_20, 10)
+def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10)
+def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)
+def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)
+
+I_int64_100: np.ndarray[Any, np.dtype[np.int64]] = np.array([100], dtype=np.int64)
+
+def_gen.integers(0, 100)
+def_gen.integers(100)
+def_gen.integers([100])
+def_gen.integers(0, [100])
+
+I_bool_low: np.ndarray[Any, np.dtype[np.bool_]] = np.array([0], dtype=np.bool_)
+I_bool_low_like: list[int] = [0]
+I_bool_high_open: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_)
+I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_)
+
+def_gen.integers(2, dtype=bool)
+def_gen.integers(0, 2, dtype=bool)
+def_gen.integers(1, dtype=bool, endpoint=True)
+def_gen.integers(0, 1, dtype=bool, endpoint=True)
+def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True)
+def_gen.integers(I_bool_high_open, dtype=bool)
+def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool)
+def_gen.integers(0, I_bool_high_open, dtype=bool)
+def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True)
+def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True)
+def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True)
+
+def_gen.integers(2, dtype=np.bool_)
+def_gen.integers(0, 2, dtype=np.bool_)
+def_gen.integers(1, dtype=np.bool_, endpoint=True)
+def_gen.integers(0, 1, dtype=np.bool_, endpoint=True)
+def_gen.integers(I_bool_low_like, 1, dtype=np.bool_, endpoint=True)
+def_gen.integers(I_bool_high_open, dtype=np.bool_)
+def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool_)
+def_gen.integers(0, I_bool_high_open, dtype=np.bool_)
+def_gen.integers(I_bool_high_closed, dtype=np.bool_, endpoint=True)
+def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool_, endpoint=True)
+def_gen.integers(0, I_bool_high_closed, dtype=np.bool_, endpoint=True)
+
+I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8)
+I_u1_low_like: list[int] = [0]
+I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8)
+I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8)
+
+def_gen.integers(256, dtype="u1")
+def_gen.integers(0, 256, dtype="u1")
+def_gen.integers(255, dtype="u1", endpoint=True)
+def_gen.integers(0, 255, dtype="u1", endpoint=True)
+def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True)
+def_gen.integers(I_u1_high_open, dtype="u1")
+def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1")
+def_gen.integers(0, I_u1_high_open, dtype="u1")
+def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True)
+def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True)
+def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True)
+
+def_gen.integers(256, dtype="uint8")
+def_gen.integers(0, 256, dtype="uint8")
+def_gen.integers(255, dtype="uint8", endpoint=True)
+def_gen.integers(0, 255, dtype="uint8", endpoint=True)
+def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True)
+def_gen.integers(I_u1_high_open, dtype="uint8")
+def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8")
+def_gen.integers(0, I_u1_high_open, dtype="uint8")
+def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True)
+def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True)
+def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True)
+
+def_gen.integers(256, dtype=np.uint8)
+def_gen.integers(0, 256, dtype=np.uint8)
+def_gen.integers(255, dtype=np.uint8, endpoint=True)
+def_gen.integers(0, 255, dtype=np.uint8, endpoint=True)
+def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True)
+def_gen.integers(I_u1_high_open, dtype=np.uint8)
+def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8)
+def_gen.integers(0, I_u1_high_open, dtype=np.uint8)
+def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True)
+def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True)
+def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True)
+
+I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16)
+I_u2_low_like: list[int] = [0]
+I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16)
+I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16)
+
+def_gen.integers(65536, dtype="u2")
+def_gen.integers(0, 65536, dtype="u2")
+def_gen.integers(65535, dtype="u2", endpoint=True)
+def_gen.integers(0, 65535, dtype="u2", endpoint=True)
+def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True)
+def_gen.integers(I_u2_high_open, dtype="u2")
+def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2")
+def_gen.integers(0, I_u2_high_open, dtype="u2")
+def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True)
+def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True)
+def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True)
+
+def_gen.integers(65536, dtype="uint16")
+def_gen.integers(0, 65536, dtype="uint16")
+def_gen.integers(65535, dtype="uint16", endpoint=True)
+def_gen.integers(0, 65535, dtype="uint16", endpoint=True)
+def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True)
+def_gen.integers(I_u2_high_open, dtype="uint16")
+def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16")
+def_gen.integers(0, I_u2_high_open, dtype="uint16")
+def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True)
+def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True)
+def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True)
+
+def_gen.integers(65536, dtype=np.uint16)
+def_gen.integers(0, 65536, dtype=np.uint16)
+def_gen.integers(65535, dtype=np.uint16, endpoint=True)
+def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True)
+def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True)
+def_gen.integers(I_u2_high_open, dtype=np.uint16)
+def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16)
+def_gen.integers(0, I_u2_high_open, dtype=np.uint16)
+def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True)
+def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True)
+def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True)
+
+I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32)
+I_u4_low_like: list[int] = [0]
+I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32)
+I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32)
+
+def_gen.integers(4294967296, dtype="u4")
+def_gen.integers(0, 4294967296, dtype="u4")
+def_gen.integers(4294967295, dtype="u4", endpoint=True)
+def_gen.integers(0, 4294967295, dtype="u4", endpoint=True)
+def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True)
+def_gen.integers(I_u4_high_open, dtype="u4")
+def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4")
+def_gen.integers(0, I_u4_high_open, dtype="u4")
+def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True)
+def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True)
+def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True)
+
+def_gen.integers(4294967296, dtype="uint32")
+def_gen.integers(0, 4294967296, dtype="uint32")
+def_gen.integers(4294967295, dtype="uint32", endpoint=True)
+def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True)
+def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True)
+def_gen.integers(I_u4_high_open, dtype="uint32")
+def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32")
+def_gen.integers(0, I_u4_high_open, dtype="uint32")
+def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True)
+def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True)
+def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True)
+
+def_gen.integers(4294967296, dtype=np.uint32)
+def_gen.integers(0, 4294967296, dtype=np.uint32)
+def_gen.integers(4294967295, dtype=np.uint32, endpoint=True)
+def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True)
+def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True)
+def_gen.integers(I_u4_high_open, dtype=np.uint32)
+def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32)
+def_gen.integers(0, I_u4_high_open, dtype=np.uint32)
+def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True)
+def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True)
+def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True)
+
+I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64)
+I_u8_low_like: list[int] = [0]
+I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64)
+I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64)
+
+def_gen.integers(18446744073709551616, dtype="u8")
+def_gen.integers(0, 18446744073709551616, dtype="u8")
+def_gen.integers(18446744073709551615, dtype="u8", endpoint=True)
+def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True)
+def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True)
+def_gen.integers(I_u8_high_open, dtype="u8")
+def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8")
+def_gen.integers(0, I_u8_high_open, dtype="u8")
+def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True)
+def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True)
+def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True)
+
+def_gen.integers(18446744073709551616, dtype="uint64")
+def_gen.integers(0, 18446744073709551616, dtype="uint64")
+def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True)
+def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True)
+def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True)
+def_gen.integers(I_u8_high_open, dtype="uint64")
+def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64")
+def_gen.integers(0, I_u8_high_open, dtype="uint64")
+def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True)
+def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True)
+def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True)
+
+def_gen.integers(18446744073709551616, dtype=np.uint64)
+def_gen.integers(0, 18446744073709551616, dtype=np.uint64)
+def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True)
+def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True)
+def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True)
+def_gen.integers(I_u8_high_open, dtype=np.uint64)
+def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64)
+def_gen.integers(0, I_u8_high_open, dtype=np.uint64)
+def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True)
+def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True)
+def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True)
+
+I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8)
+I_i1_low_like: list[int] = [-128]
+I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8)
+I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8)
+
+def_gen.integers(128, dtype="i1")
+def_gen.integers(-128, 128, dtype="i1")
+def_gen.integers(127, dtype="i1", endpoint=True)
+def_gen.integers(-128, 127, dtype="i1", endpoint=True)
+def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True)
+def_gen.integers(I_i1_high_open, dtype="i1")
+def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1")
+def_gen.integers(-128, I_i1_high_open, dtype="i1")
+def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True)
+def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True)
+def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True)
+
+def_gen.integers(128, dtype="int8")
+def_gen.integers(-128, 128, dtype="int8")
+def_gen.integers(127, dtype="int8", endpoint=True)
+def_gen.integers(-128, 127, dtype="int8", endpoint=True)
+def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True)
+def_gen.integers(I_i1_high_open, dtype="int8")
+def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8")
+def_gen.integers(-128, I_i1_high_open, dtype="int8")
+def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True)
+def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True)
+def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True)
+
+def_gen.integers(128, dtype=np.int8)
+def_gen.integers(-128, 128, dtype=np.int8)
+def_gen.integers(127, dtype=np.int8, endpoint=True)
+def_gen.integers(-128, 127, dtype=np.int8, endpoint=True)
+def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True)
+def_gen.integers(I_i1_high_open, dtype=np.int8)
+def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8)
+def_gen.integers(-128, I_i1_high_open, dtype=np.int8)
+def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True)
+def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True)
+def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True)
+
+I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16)
+I_i2_low_like: list[int] = [-32768]
+I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16)
+I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16)
+
+def_gen.integers(32768, dtype="i2")
+def_gen.integers(-32768, 32768, dtype="i2")
+def_gen.integers(32767, dtype="i2", endpoint=True)
+def_gen.integers(-32768, 32767, dtype="i2", endpoint=True)
+def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True)
+def_gen.integers(I_i2_high_open, dtype="i2")
+def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2")
+def_gen.integers(-32768, I_i2_high_open, dtype="i2")
+def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True)
+def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True)
+def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True)
+
+def_gen.integers(32768, dtype="int16")
+def_gen.integers(-32768, 32768, dtype="int16")
+def_gen.integers(32767, dtype="int16", endpoint=True)
+def_gen.integers(-32768, 32767, dtype="int16", endpoint=True)
+def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True)
+def_gen.integers(I_i2_high_open, dtype="int16")
+def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16")
+def_gen.integers(-32768, I_i2_high_open, dtype="int16")
+def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True)
+def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True)
+def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True)
+
+def_gen.integers(32768, dtype=np.int16)
+def_gen.integers(-32768, 32768, dtype=np.int16)
+def_gen.integers(32767, dtype=np.int16, endpoint=True)
+def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True)
+def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True)
+def_gen.integers(I_i2_high_open, dtype=np.int16)
+def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16)
+def_gen.integers(-32768, I_i2_high_open, dtype=np.int16)
+def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True)
+def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True)
+def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True)
+
+I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32)
+I_i4_low_like: list[int] = [-2147483648]
+I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32)
+I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32)
+
+def_gen.integers(2147483648, dtype="i4")
+def_gen.integers(-2147483648, 2147483648, dtype="i4")
+def_gen.integers(2147483647, dtype="i4", endpoint=True)
+def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True)
+def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True)
+def_gen.integers(I_i4_high_open, dtype="i4")
+def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4")
+def_gen.integers(-2147483648, I_i4_high_open, dtype="i4")
+def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True)
+def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True)
+def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True)
+
+def_gen.integers(2147483648, dtype="int32")
+def_gen.integers(-2147483648, 2147483648, dtype="int32")
+def_gen.integers(2147483647, dtype="int32", endpoint=True)
+def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True)
+def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True)
+def_gen.integers(I_i4_high_open, dtype="int32")
+def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32")
+def_gen.integers(-2147483648, I_i4_high_open, dtype="int32")
+def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True)
+def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True)
+def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True)
+
+def_gen.integers(2147483648, dtype=np.int32)
+def_gen.integers(-2147483648, 2147483648, dtype=np.int32)
+def_gen.integers(2147483647, dtype=np.int32, endpoint=True)
+def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True)
+def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True)
+def_gen.integers(I_i4_high_open, dtype=np.int32)
+def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32)
+def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32)
+def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True)
+def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True)
+def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True)
+
+I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64)
+I_i8_low_like: list[int] = [-9223372036854775808]
+I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64)
+I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64)
+
+def_gen.integers(9223372036854775808, dtype="i8")
+def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8")
+def_gen.integers(9223372036854775807, dtype="i8", endpoint=True)
+def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True)
+def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True)
+def_gen.integers(I_i8_high_open, dtype="i8")
+def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8")
+def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8")
+def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True)
+def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True)
+def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True)
+
+def_gen.integers(9223372036854775808, dtype="int64")
+def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64")
+def_gen.integers(9223372036854775807, dtype="int64", endpoint=True)
+def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True)
+def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True)
+def_gen.integers(I_i8_high_open, dtype="int64")
+def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64")
+def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64")
+def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True)
+def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True)
+def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True)
+
+def_gen.integers(9223372036854775808, dtype=np.int64)
+def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64)
+def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True)
+def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True)
+def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True)
+def_gen.integers(I_i8_high_open, dtype=np.int64)
+def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64)
+def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64)
+def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True)
+def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True)
+def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True)
+
+
+def_gen.bit_generator
+
+def_gen.bytes(2)
+
+def_gen.choice(5)
+def_gen.choice(5, 3)
+def_gen.choice(5, 3, replace=True)
+def_gen.choice(5, 3, p=[1 / 5] * 5)
+def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False)
+
+def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"])
+def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)
+def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)
+def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)
+def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))
+
+def_gen.dirichlet([0.5, 0.5])
+def_gen.dirichlet(np.array([0.5, 0.5]))
+def_gen.dirichlet(np.array([0.5, 0.5]), size=3)
+
+def_gen.multinomial(20, [1 / 6.0] * 6)
+def_gen.multinomial(20, np.array([0.5, 0.5]))
+def_gen.multinomial(20, [1 / 6.0] * 6, size=2)
+def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2))
+def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2))
+
+def_gen.multivariate_hypergeometric([3, 5, 7], 2)
+def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2)
+def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4)
+def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7))
+def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count")
+def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals")
+
+def_gen.multivariate_normal([0.0], [[1.0]])
+def_gen.multivariate_normal([0.0], np.array([[1.0]]))
+def_gen.multivariate_normal(np.array([0.0]), [[1.0]])
+def_gen.multivariate_normal([0.0], np.array([[1.0]]))
+
+def_gen.permutation(10)
+def_gen.permutation([1, 2, 3, 4])
+def_gen.permutation(np.array([1, 2, 3, 4]))
+def_gen.permutation(D_2D, axis=1)
+def_gen.permuted(D_2D)
+def_gen.permuted(D_2D_like)
+def_gen.permuted(D_2D, axis=1)
+def_gen.permuted(D_2D, out=D_2D)
+def_gen.permuted(D_2D_like, out=D_2D)
+def_gen.permuted(D_2D_like, out=D_2D)
+def_gen.permuted(D_2D, axis=1, out=D_2D)
+
+def_gen.shuffle(np.arange(10))
+def_gen.shuffle([1, 2, 3, 4, 5])
+def_gen.shuffle(D_2D, axis=1)
+
+def_gen.__str__()
+def_gen.__repr__()
+def_gen_state: dict[str, Any]
+def_gen_state = def_gen.__getstate__()
+def_gen.__setstate__(def_gen_state)
+
+# RandomState
+random_st: np.random.RandomState = np.random.RandomState()
+
+random_st.standard_normal()
+random_st.standard_normal(size=None)
+random_st.standard_normal(size=1)
+
+random_st.random()
+random_st.random(size=None)
+random_st.random(size=1)
+
+random_st.standard_cauchy()
+random_st.standard_cauchy(size=None)
+random_st.standard_cauchy(size=1)
+
+random_st.standard_exponential()
+random_st.standard_exponential(size=None)
+random_st.standard_exponential(size=1)
+
+random_st.zipf(1.5)
+random_st.zipf(1.5, size=None)
+random_st.zipf(1.5, size=1)
+random_st.zipf(D_arr_1p5)
+random_st.zipf(D_arr_1p5, size=1)
+random_st.zipf(D_arr_like_1p5)
+random_st.zipf(D_arr_like_1p5, size=1)
+
+random_st.weibull(0.5)
+random_st.weibull(0.5, size=None)
+random_st.weibull(0.5, size=1)
+random_st.weibull(D_arr_0p5)
+random_st.weibull(D_arr_0p5, size=1)
+random_st.weibull(D_arr_like_0p5)
+random_st.weibull(D_arr_like_0p5, size=1)
+
+random_st.standard_t(0.5)
+random_st.standard_t(0.5, size=None)
+random_st.standard_t(0.5, size=1)
+random_st.standard_t(D_arr_0p5)
+random_st.standard_t(D_arr_0p5, size=1)
+random_st.standard_t(D_arr_like_0p5)
+random_st.standard_t(D_arr_like_0p5, size=1)
+
+random_st.poisson(0.5)
+random_st.poisson(0.5, size=None)
+random_st.poisson(0.5, size=1)
+random_st.poisson(D_arr_0p5)
+random_st.poisson(D_arr_0p5, size=1)
+random_st.poisson(D_arr_like_0p5)
+random_st.poisson(D_arr_like_0p5, size=1)
+
+random_st.power(0.5)
+random_st.power(0.5, size=None)
+random_st.power(0.5, size=1)
+random_st.power(D_arr_0p5)
+random_st.power(D_arr_0p5, size=1)
+random_st.power(D_arr_like_0p5)
+random_st.power(D_arr_like_0p5, size=1)
+
+random_st.pareto(0.5)
+random_st.pareto(0.5, size=None)
+random_st.pareto(0.5, size=1)
+random_st.pareto(D_arr_0p5)
+random_st.pareto(D_arr_0p5, size=1)
+random_st.pareto(D_arr_like_0p5)
+random_st.pareto(D_arr_like_0p5, size=1)
+
+random_st.chisquare(0.5)
+random_st.chisquare(0.5, size=None)
+random_st.chisquare(0.5, size=1)
+random_st.chisquare(D_arr_0p5)
+random_st.chisquare(D_arr_0p5, size=1)
+random_st.chisquare(D_arr_like_0p5)
+random_st.chisquare(D_arr_like_0p5, size=1)
+
+random_st.exponential(0.5)
+random_st.exponential(0.5, size=None)
+random_st.exponential(0.5, size=1)
+random_st.exponential(D_arr_0p5)
+random_st.exponential(D_arr_0p5, size=1)
+random_st.exponential(D_arr_like_0p5)
+random_st.exponential(D_arr_like_0p5, size=1)
+
+random_st.geometric(0.5)
+random_st.geometric(0.5, size=None)
+random_st.geometric(0.5, size=1)
+random_st.geometric(D_arr_0p5)
+random_st.geometric(D_arr_0p5, size=1)
+random_st.geometric(D_arr_like_0p5)
+random_st.geometric(D_arr_like_0p5, size=1)
+
+random_st.logseries(0.5)
+random_st.logseries(0.5, size=None)
+random_st.logseries(0.5, size=1)
+random_st.logseries(D_arr_0p5)
+random_st.logseries(D_arr_0p5, size=1)
+random_st.logseries(D_arr_like_0p5)
+random_st.logseries(D_arr_like_0p5, size=1)
+
+random_st.rayleigh(0.5)
+random_st.rayleigh(0.5, size=None)
+random_st.rayleigh(0.5, size=1)
+random_st.rayleigh(D_arr_0p5)
+random_st.rayleigh(D_arr_0p5, size=1)
+random_st.rayleigh(D_arr_like_0p5)
+random_st.rayleigh(D_arr_like_0p5, size=1)
+
+random_st.standard_gamma(0.5)
+random_st.standard_gamma(0.5, size=None)
+random_st.standard_gamma(0.5, size=1)
+random_st.standard_gamma(D_arr_0p5)
+random_st.standard_gamma(D_arr_0p5, size=1)
+random_st.standard_gamma(D_arr_like_0p5)
+random_st.standard_gamma(D_arr_like_0p5, size=1)
+random_st.standard_gamma(D_arr_like_0p5, size=1)
+
+random_st.vonmises(0.5, 0.5)
+random_st.vonmises(0.5, 0.5, size=None)
+random_st.vonmises(0.5, 0.5, size=1)
+random_st.vonmises(D_arr_0p5, 0.5)
+random_st.vonmises(0.5, D_arr_0p5)
+random_st.vonmises(D_arr_0p5, 0.5, size=1)
+random_st.vonmises(0.5, D_arr_0p5, size=1)
+random_st.vonmises(D_arr_like_0p5, 0.5)
+random_st.vonmises(0.5, D_arr_like_0p5)
+random_st.vonmises(D_arr_0p5, D_arr_0p5)
+random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5)
+random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1)
+random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+random_st.wald(0.5, 0.5)
+random_st.wald(0.5, 0.5, size=None)
+random_st.wald(0.5, 0.5, size=1)
+random_st.wald(D_arr_0p5, 0.5)
+random_st.wald(0.5, D_arr_0p5)
+random_st.wald(D_arr_0p5, 0.5, size=1)
+random_st.wald(0.5, D_arr_0p5, size=1)
+random_st.wald(D_arr_like_0p5, 0.5)
+random_st.wald(0.5, D_arr_like_0p5)
+random_st.wald(D_arr_0p5, D_arr_0p5)
+random_st.wald(D_arr_like_0p5, D_arr_like_0p5)
+random_st.wald(D_arr_0p5, D_arr_0p5, size=1)
+random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+random_st.uniform(0.5, 0.5)
+random_st.uniform(0.5, 0.5, size=None)
+random_st.uniform(0.5, 0.5, size=1)
+random_st.uniform(D_arr_0p5, 0.5)
+random_st.uniform(0.5, D_arr_0p5)
+random_st.uniform(D_arr_0p5, 0.5, size=1)
+random_st.uniform(0.5, D_arr_0p5, size=1)
+random_st.uniform(D_arr_like_0p5, 0.5)
+random_st.uniform(0.5, D_arr_like_0p5)
+random_st.uniform(D_arr_0p5, D_arr_0p5)
+random_st.uniform(D_arr_like_0p5, D_arr_like_0p5)
+random_st.uniform(D_arr_0p5, D_arr_0p5, size=1)
+random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+random_st.beta(0.5, 0.5)
+random_st.beta(0.5, 0.5, size=None)
+random_st.beta(0.5, 0.5, size=1)
+random_st.beta(D_arr_0p5, 0.5)
+random_st.beta(0.5, D_arr_0p5)
+random_st.beta(D_arr_0p5, 0.5, size=1)
+random_st.beta(0.5, D_arr_0p5, size=1)
+random_st.beta(D_arr_like_0p5, 0.5)
+random_st.beta(0.5, D_arr_like_0p5)
+random_st.beta(D_arr_0p5, D_arr_0p5)
+random_st.beta(D_arr_like_0p5, D_arr_like_0p5)
+random_st.beta(D_arr_0p5, D_arr_0p5, size=1)
+random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+random_st.f(0.5, 0.5)
+random_st.f(0.5, 0.5, size=None)
+random_st.f(0.5, 0.5, size=1)
+random_st.f(D_arr_0p5, 0.5)
+random_st.f(0.5, D_arr_0p5)
+random_st.f(D_arr_0p5, 0.5, size=1)
+random_st.f(0.5, D_arr_0p5, size=1)
+random_st.f(D_arr_like_0p5, 0.5)
+random_st.f(0.5, D_arr_like_0p5)
+random_st.f(D_arr_0p5, D_arr_0p5)
+random_st.f(D_arr_like_0p5, D_arr_like_0p5)
+random_st.f(D_arr_0p5, D_arr_0p5, size=1)
+random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+random_st.gamma(0.5, 0.5)
+random_st.gamma(0.5, 0.5, size=None)
+random_st.gamma(0.5, 0.5, size=1)
+random_st.gamma(D_arr_0p5, 0.5)
+random_st.gamma(0.5, D_arr_0p5)
+random_st.gamma(D_arr_0p5, 0.5, size=1)
+random_st.gamma(0.5, D_arr_0p5, size=1)
+random_st.gamma(D_arr_like_0p5, 0.5)
+random_st.gamma(0.5, D_arr_like_0p5)
+random_st.gamma(D_arr_0p5, D_arr_0p5)
+random_st.gamma(D_arr_like_0p5, D_arr_like_0p5)
+random_st.gamma(D_arr_0p5, D_arr_0p5, size=1)
+random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+random_st.gumbel(0.5, 0.5)
+random_st.gumbel(0.5, 0.5, size=None)
+random_st.gumbel(0.5, 0.5, size=1)
+random_st.gumbel(D_arr_0p5, 0.5)
+random_st.gumbel(0.5, D_arr_0p5)
+random_st.gumbel(D_arr_0p5, 0.5, size=1)
+random_st.gumbel(0.5, D_arr_0p5, size=1)
+random_st.gumbel(D_arr_like_0p5, 0.5)
+random_st.gumbel(0.5, D_arr_like_0p5)
+random_st.gumbel(D_arr_0p5, D_arr_0p5)
+random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5)
+random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1)
+random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+random_st.laplace(0.5, 0.5)
+random_st.laplace(0.5, 0.5, size=None)
+random_st.laplace(0.5, 0.5, size=1)
+random_st.laplace(D_arr_0p5, 0.5)
+random_st.laplace(0.5, D_arr_0p5)
+random_st.laplace(D_arr_0p5, 0.5, size=1)
+random_st.laplace(0.5, D_arr_0p5, size=1)
+random_st.laplace(D_arr_like_0p5, 0.5)
+random_st.laplace(0.5, D_arr_like_0p5)
+random_st.laplace(D_arr_0p5, D_arr_0p5)
+random_st.laplace(D_arr_like_0p5, D_arr_like_0p5)
+random_st.laplace(D_arr_0p5, D_arr_0p5, size=1)
+random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+random_st.logistic(0.5, 0.5)
+random_st.logistic(0.5, 0.5, size=None)
+random_st.logistic(0.5, 0.5, size=1)
+random_st.logistic(D_arr_0p5, 0.5)
+random_st.logistic(0.5, D_arr_0p5)
+random_st.logistic(D_arr_0p5, 0.5, size=1)
+random_st.logistic(0.5, D_arr_0p5, size=1)
+random_st.logistic(D_arr_like_0p5, 0.5)
+random_st.logistic(0.5, D_arr_like_0p5)
+random_st.logistic(D_arr_0p5, D_arr_0p5)
+random_st.logistic(D_arr_like_0p5, D_arr_like_0p5)
+random_st.logistic(D_arr_0p5, D_arr_0p5, size=1)
+random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+random_st.lognormal(0.5, 0.5)
+random_st.lognormal(0.5, 0.5, size=None)
+random_st.lognormal(0.5, 0.5, size=1)
+random_st.lognormal(D_arr_0p5, 0.5)
+random_st.lognormal(0.5, D_arr_0p5)
+random_st.lognormal(D_arr_0p5, 0.5, size=1)
+random_st.lognormal(0.5, D_arr_0p5, size=1)
+random_st.lognormal(D_arr_like_0p5, 0.5)
+random_st.lognormal(0.5, D_arr_like_0p5)
+random_st.lognormal(D_arr_0p5, D_arr_0p5)
+random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5)
+random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1)
+random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+random_st.noncentral_chisquare(0.5, 0.5)
+random_st.noncentral_chisquare(0.5, 0.5, size=None)
+random_st.noncentral_chisquare(0.5, 0.5, size=1)
+random_st.noncentral_chisquare(D_arr_0p5, 0.5)
+random_st.noncentral_chisquare(0.5, D_arr_0p5)
+random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1)
+random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1)
+random_st.noncentral_chisquare(D_arr_like_0p5, 0.5)
+random_st.noncentral_chisquare(0.5, D_arr_like_0p5)
+random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5)
+random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)
+random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)
+random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+random_st.normal(0.5, 0.5)
+random_st.normal(0.5, 0.5, size=None)
+random_st.normal(0.5, 0.5, size=1)
+random_st.normal(D_arr_0p5, 0.5)
+random_st.normal(0.5, D_arr_0p5)
+random_st.normal(D_arr_0p5, 0.5, size=1)
+random_st.normal(0.5, D_arr_0p5, size=1)
+random_st.normal(D_arr_like_0p5, 0.5)
+random_st.normal(0.5, D_arr_like_0p5)
+random_st.normal(D_arr_0p5, D_arr_0p5)
+random_st.normal(D_arr_like_0p5, D_arr_like_0p5)
+random_st.normal(D_arr_0p5, D_arr_0p5, size=1)
+random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)
+
+random_st.triangular(0.1, 0.5, 0.9)
+random_st.triangular(0.1, 0.5, 0.9, size=None)
+random_st.triangular(0.1, 0.5, 0.9, size=1)
+random_st.triangular(D_arr_0p1, 0.5, 0.9)
+random_st.triangular(0.1, D_arr_0p5, 0.9)
+random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)
+random_st.triangular(0.1, D_arr_0p5, 0.9, size=1)
+random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)
+random_st.triangular(0.5, D_arr_like_0p5, 0.9)
+random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9)
+random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)
+random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)
+random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)
+
+random_st.noncentral_f(0.1, 0.5, 0.9)
+random_st.noncentral_f(0.1, 0.5, 0.9, size=None)
+random_st.noncentral_f(0.1, 0.5, 0.9, size=1)
+random_st.noncentral_f(D_arr_0p1, 0.5, 0.9)
+random_st.noncentral_f(0.1, D_arr_0p5, 0.9)
+random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)
+random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)
+random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)
+random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9)
+random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)
+random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)
+random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)
+random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)
+
+random_st.binomial(10, 0.5)
+random_st.binomial(10, 0.5, size=None)
+random_st.binomial(10, 0.5, size=1)
+random_st.binomial(I_arr_10, 0.5)
+random_st.binomial(10, D_arr_0p5)
+random_st.binomial(I_arr_10, 0.5, size=1)
+random_st.binomial(10, D_arr_0p5, size=1)
+random_st.binomial(I_arr_like_10, 0.5)
+random_st.binomial(10, D_arr_like_0p5)
+random_st.binomial(I_arr_10, D_arr_0p5)
+random_st.binomial(I_arr_like_10, D_arr_like_0p5)
+random_st.binomial(I_arr_10, D_arr_0p5, size=1)
+random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1)
+
+random_st.negative_binomial(10, 0.5)
+random_st.negative_binomial(10, 0.5, size=None)
+random_st.negative_binomial(10, 0.5, size=1)
+random_st.negative_binomial(I_arr_10, 0.5)
+random_st.negative_binomial(10, D_arr_0p5)
+random_st.negative_binomial(I_arr_10, 0.5, size=1)
+random_st.negative_binomial(10, D_arr_0p5, size=1)
+random_st.negative_binomial(I_arr_like_10, 0.5)
+random_st.negative_binomial(10, D_arr_like_0p5)
+random_st.negative_binomial(I_arr_10, D_arr_0p5)
+random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5)
+random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1)
+random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)
+
+random_st.hypergeometric(20, 20, 10)
+random_st.hypergeometric(20, 20, 10, size=None)
+random_st.hypergeometric(20, 20, 10, size=1)
+random_st.hypergeometric(I_arr_20, 20, 10)
+random_st.hypergeometric(20, I_arr_20, 10)
+random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)
+random_st.hypergeometric(20, I_arr_20, 10, size=1)
+random_st.hypergeometric(I_arr_like_20, 20, I_arr_10)
+random_st.hypergeometric(20, I_arr_like_20, 10)
+random_st.hypergeometric(I_arr_20, I_arr_20, 10)
+random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10)
+random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)
+random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)
+
+random_st.randint(0, 100)
+random_st.randint(100)
+random_st.randint([100])
+random_st.randint(0, [100])
+
+random_st.randint(2, dtype=bool)
+random_st.randint(0, 2, dtype=bool)
+random_st.randint(I_bool_high_open, dtype=bool)
+random_st.randint(I_bool_low, I_bool_high_open, dtype=bool)
+random_st.randint(0, I_bool_high_open, dtype=bool)
+
+random_st.randint(2, dtype=np.bool_)
+random_st.randint(0, 2, dtype=np.bool_)
+random_st.randint(I_bool_high_open, dtype=np.bool_)
+random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool_)
+random_st.randint(0, I_bool_high_open, dtype=np.bool_)
+
+random_st.randint(256, dtype="u1")
+random_st.randint(0, 256, dtype="u1")
+random_st.randint(I_u1_high_open, dtype="u1")
+random_st.randint(I_u1_low, I_u1_high_open, dtype="u1")
+random_st.randint(0, I_u1_high_open, dtype="u1")
+
+random_st.randint(256, dtype="uint8")
+random_st.randint(0, 256, dtype="uint8")
+random_st.randint(I_u1_high_open, dtype="uint8")
+random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8")
+random_st.randint(0, I_u1_high_open, dtype="uint8")
+
+random_st.randint(256, dtype=np.uint8)
+random_st.randint(0, 256, dtype=np.uint8)
+random_st.randint(I_u1_high_open, dtype=np.uint8)
+random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8)
+random_st.randint(0, I_u1_high_open, dtype=np.uint8)
+
+random_st.randint(65536, dtype="u2")
+random_st.randint(0, 65536, dtype="u2")
+random_st.randint(I_u2_high_open, dtype="u2")
+random_st.randint(I_u2_low, I_u2_high_open, dtype="u2")
+random_st.randint(0, I_u2_high_open, dtype="u2")
+
+random_st.randint(65536, dtype="uint16")
+random_st.randint(0, 65536, dtype="uint16")
+random_st.randint(I_u2_high_open, dtype="uint16")
+random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16")
+random_st.randint(0, I_u2_high_open, dtype="uint16")
+
+random_st.randint(65536, dtype=np.uint16)
+random_st.randint(0, 65536, dtype=np.uint16)
+random_st.randint(I_u2_high_open, dtype=np.uint16)
+random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16)
+random_st.randint(0, I_u2_high_open, dtype=np.uint16)
+
+random_st.randint(4294967296, dtype="u4")
+random_st.randint(0, 4294967296, dtype="u4")
+random_st.randint(I_u4_high_open, dtype="u4")
+random_st.randint(I_u4_low, I_u4_high_open, dtype="u4")
+random_st.randint(0, I_u4_high_open, dtype="u4")
+
+random_st.randint(4294967296, dtype="uint32")
+random_st.randint(0, 4294967296, dtype="uint32")
+random_st.randint(I_u4_high_open, dtype="uint32")
+random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32")
+random_st.randint(0, I_u4_high_open, dtype="uint32")
+
+random_st.randint(4294967296, dtype=np.uint32)
+random_st.randint(0, 4294967296, dtype=np.uint32)
+random_st.randint(I_u4_high_open, dtype=np.uint32)
+random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32)
+random_st.randint(0, I_u4_high_open, dtype=np.uint32)
+
+
+random_st.randint(18446744073709551616, dtype="u8")
+random_st.randint(0, 18446744073709551616, dtype="u8")
+random_st.randint(I_u8_high_open, dtype="u8")
+random_st.randint(I_u8_low, I_u8_high_open, dtype="u8")
+random_st.randint(0, I_u8_high_open, dtype="u8")
+
+random_st.randint(18446744073709551616, dtype="uint64")
+random_st.randint(0, 18446744073709551616, dtype="uint64")
+random_st.randint(I_u8_high_open, dtype="uint64")
+random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64")
+random_st.randint(0, I_u8_high_open, dtype="uint64")
+
+random_st.randint(18446744073709551616, dtype=np.uint64)
+random_st.randint(0, 18446744073709551616, dtype=np.uint64)
+random_st.randint(I_u8_high_open, dtype=np.uint64)
+random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64)
+random_st.randint(0, I_u8_high_open, dtype=np.uint64)
+
+random_st.randint(128, dtype="i1")
+random_st.randint(-128, 128, dtype="i1")
+random_st.randint(I_i1_high_open, dtype="i1")
+random_st.randint(I_i1_low, I_i1_high_open, dtype="i1")
+random_st.randint(-128, I_i1_high_open, dtype="i1")
+
+random_st.randint(128, dtype="int8")
+random_st.randint(-128, 128, dtype="int8")
+random_st.randint(I_i1_high_open, dtype="int8")
+random_st.randint(I_i1_low, I_i1_high_open, dtype="int8")
+random_st.randint(-128, I_i1_high_open, dtype="int8")
+
+random_st.randint(128, dtype=np.int8)
+random_st.randint(-128, 128, dtype=np.int8)
+random_st.randint(I_i1_high_open, dtype=np.int8)
+random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8)
+random_st.randint(-128, I_i1_high_open, dtype=np.int8)
+
+random_st.randint(32768, dtype="i2")
+random_st.randint(-32768, 32768, dtype="i2")
+random_st.randint(I_i2_high_open, dtype="i2")
+random_st.randint(I_i2_low, I_i2_high_open, dtype="i2")
+random_st.randint(-32768, I_i2_high_open, dtype="i2")
+random_st.randint(32768, dtype="int16")
+random_st.randint(-32768, 32768, dtype="int16")
+random_st.randint(I_i2_high_open, dtype="int16")
+random_st.randint(I_i2_low, I_i2_high_open, dtype="int16")
+random_st.randint(-32768, I_i2_high_open, dtype="int16")
+random_st.randint(32768, dtype=np.int16)
+random_st.randint(-32768, 32768, dtype=np.int16)
+random_st.randint(I_i2_high_open, dtype=np.int16)
+random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16)
+random_st.randint(-32768, I_i2_high_open, dtype=np.int16)
+
+random_st.randint(2147483648, dtype="i4")
+random_st.randint(-2147483648, 2147483648, dtype="i4")
+random_st.randint(I_i4_high_open, dtype="i4")
+random_st.randint(I_i4_low, I_i4_high_open, dtype="i4")
+random_st.randint(-2147483648, I_i4_high_open, dtype="i4")
+
+random_st.randint(2147483648, dtype="int32")
+random_st.randint(-2147483648, 2147483648, dtype="int32")
+random_st.randint(I_i4_high_open, dtype="int32")
+random_st.randint(I_i4_low, I_i4_high_open, dtype="int32")
+random_st.randint(-2147483648, I_i4_high_open, dtype="int32")
+
+random_st.randint(2147483648, dtype=np.int32)
+random_st.randint(-2147483648, 2147483648, dtype=np.int32)
+random_st.randint(I_i4_high_open, dtype=np.int32)
+random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32)
+random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32)
+
+random_st.randint(9223372036854775808, dtype="i8")
+random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8")
+random_st.randint(I_i8_high_open, dtype="i8")
+random_st.randint(I_i8_low, I_i8_high_open, dtype="i8")
+random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8")
+
+random_st.randint(9223372036854775808, dtype="int64")
+random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64")
+random_st.randint(I_i8_high_open, dtype="int64")
+random_st.randint(I_i8_low, I_i8_high_open, dtype="int64")
+random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64")
+
+random_st.randint(9223372036854775808, dtype=np.int64)
+random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64)
+random_st.randint(I_i8_high_open, dtype=np.int64)
+random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64)
+random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64)
+
+bg: np.random.BitGenerator = random_st._bit_generator
+
+random_st.bytes(2)
+
+random_st.choice(5)
+random_st.choice(5, 3)
+random_st.choice(5, 3, replace=True)
+random_st.choice(5, 3, p=[1 / 5] * 5)
+random_st.choice(5, 3, p=[1 / 5] * 5, replace=False)
+
+random_st.choice(["pooh", "rabbit", "piglet", "Christopher"])
+random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)
+random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)
+random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)
+random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))
+
+random_st.dirichlet([0.5, 0.5])
+random_st.dirichlet(np.array([0.5, 0.5]))
+random_st.dirichlet(np.array([0.5, 0.5]), size=3)
+
+random_st.multinomial(20, [1 / 6.0] * 6)
+random_st.multinomial(20, np.array([0.5, 0.5]))
+random_st.multinomial(20, [1 / 6.0] * 6, size=2)
+
+random_st.multivariate_normal([0.0], [[1.0]])
+random_st.multivariate_normal([0.0], np.array([[1.0]]))
+random_st.multivariate_normal(np.array([0.0]), [[1.0]])
+random_st.multivariate_normal([0.0], np.array([[1.0]]))
+
+random_st.permutation(10)
+random_st.permutation([1, 2, 3, 4])
+random_st.permutation(np.array([1, 2, 3, 4]))
+random_st.permutation(D_2D)
+
+random_st.shuffle(np.arange(10))
+random_st.shuffle([1, 2, 3, 4, 5])
+random_st.shuffle(D_2D)
+
+np.random.RandomState(SEED_PCG64)
+np.random.RandomState(0)
+np.random.RandomState([0, 1, 2])
+random_st.__str__()
+random_st.__repr__()
+random_st_state = random_st.__getstate__()
+random_st.__setstate__(random_st_state)
+random_st.seed()
+random_st.seed(1)
+random_st.seed([0, 1])
+random_st_get_state = random_st.get_state()
+random_st_get_state_legacy = random_st.get_state(legacy=True)
+random_st.set_state(random_st_get_state)
+
+random_st.rand()
+random_st.rand(1)
+random_st.rand(1, 2)
+random_st.randn()
+random_st.randn(1)
+random_st.randn(1, 2)
+random_st.random_sample()
+random_st.random_sample(1)
+random_st.random_sample(size=(1, 2))
+
+random_st.tomaxint()
+random_st.tomaxint(1)
+random_st.tomaxint((1,))
+
+np.random.set_bit_generator(SEED_PCG64)
+np.random.get_bit_generator()
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/scalars.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/scalars.py
new file mode 100644
index 00000000..a5c6f96e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/scalars.py
@@ -0,0 +1,248 @@
+import sys
+import datetime as dt
+
+import pytest
+import numpy as np
+
+b = np.bool_()
+u8 = np.uint64()
+i8 = np.int64()
+f8 = np.float64()
+c16 = np.complex128()
+U = np.str_()
+S = np.bytes_()
+
+
+# Construction
+class D:
+ def __index__(self) -> int:
+ return 0
+
+
+class C:
+ def __complex__(self) -> complex:
+ return 3j
+
+
+class B:
+ def __int__(self) -> int:
+ return 4
+
+
+class A:
+ def __float__(self) -> float:
+ return 4.0
+
+
+np.complex64(3j)
+np.complex64(A())
+np.complex64(C())
+np.complex128(3j)
+np.complex128(C())
+np.complex128(None)
+np.complex64("1.2")
+np.complex128(b"2j")
+
+np.int8(4)
+np.int16(3.4)
+np.int32(4)
+np.int64(-1)
+np.uint8(B())
+np.uint32()
+np.int32("1")
+np.int64(b"2")
+
+np.float16(A())
+np.float32(16)
+np.float64(3.0)
+np.float64(None)
+np.float32("1")
+np.float16(b"2.5")
+
+np.uint64(D())
+np.float32(D())
+np.complex64(D())
+
+np.bytes_(b"hello")
+np.bytes_("hello", 'utf-8')
+np.bytes_("hello", encoding='utf-8')
+np.str_("hello")
+np.str_(b"hello", 'utf-8')
+np.str_(b"hello", encoding='utf-8')
+
+# Array-ish semantics
+np.int8().real
+np.int16().imag
+np.int32().data
+np.int64().flags
+
+np.uint8().itemsize * 2
+np.uint16().ndim + 1
+np.uint32().strides
+np.uint64().shape
+
+# Time structures
+np.datetime64()
+np.datetime64(0, "D")
+np.datetime64(0, b"D")
+np.datetime64(0, ('ms', 3))
+np.datetime64("2019")
+np.datetime64(b"2019")
+np.datetime64("2019", "D")
+np.datetime64(np.datetime64())
+np.datetime64(dt.datetime(2000, 5, 3))
+np.datetime64(dt.date(2000, 5, 3))
+np.datetime64(None)
+np.datetime64(None, "D")
+
+np.timedelta64()
+np.timedelta64(0)
+np.timedelta64(0, "D")
+np.timedelta64(0, ('ms', 3))
+np.timedelta64(0, b"D")
+np.timedelta64("3")
+np.timedelta64(b"5")
+np.timedelta64(np.timedelta64(2))
+np.timedelta64(dt.timedelta(2))
+np.timedelta64(None)
+np.timedelta64(None, "D")
+
+np.void(1)
+np.void(np.int64(1))
+np.void(True)
+np.void(np.bool_(True))
+np.void(b"test")
+np.void(np.bytes_("test"))
+np.void(object(), [("a", "O"), ("b", "O")])
+np.void(object(), dtype=[("a", "O"), ("b", "O")])
+
+# Protocols
+i8 = np.int64()
+u8 = np.uint64()
+f8 = np.float64()
+c16 = np.complex128()
+b_ = np.bool_()
+td = np.timedelta64()
+U = np.str_("1")
+S = np.bytes_("1")
+AR = np.array(1, dtype=np.float64)
+
+int(i8)
+int(u8)
+int(f8)
+int(b_)
+int(td)
+int(U)
+int(S)
+int(AR)
+with pytest.warns(np.ComplexWarning):
+ int(c16)
+
+float(i8)
+float(u8)
+float(f8)
+float(b_)
+float(td)
+float(U)
+float(S)
+float(AR)
+with pytest.warns(np.ComplexWarning):
+ float(c16)
+
+complex(i8)
+complex(u8)
+complex(f8)
+complex(c16)
+complex(b_)
+complex(td)
+complex(U)
+complex(AR)
+
+
+# Misc
+c16.dtype
+c16.real
+c16.imag
+c16.real.real
+c16.real.imag
+c16.ndim
+c16.size
+c16.itemsize
+c16.shape
+c16.strides
+c16.squeeze()
+c16.byteswap()
+c16.transpose()
+
+# Aliases
+np.string_()
+
+np.byte()
+np.short()
+np.intc()
+np.intp()
+np.int_()
+np.longlong()
+
+np.ubyte()
+np.ushort()
+np.uintc()
+np.uintp()
+np.uint()
+np.ulonglong()
+
+np.half()
+np.single()
+np.double()
+np.float_()
+np.longdouble()
+np.longfloat()
+
+np.csingle()
+np.singlecomplex()
+np.cdouble()
+np.complex_()
+np.cfloat()
+np.clongdouble()
+np.clongfloat()
+np.longcomplex()
+
+b.item()
+i8.item()
+u8.item()
+f8.item()
+c16.item()
+U.item()
+S.item()
+
+b.tolist()
+i8.tolist()
+u8.tolist()
+f8.tolist()
+c16.tolist()
+U.tolist()
+S.tolist()
+
+b.ravel()
+i8.ravel()
+u8.ravel()
+f8.ravel()
+c16.ravel()
+U.ravel()
+S.ravel()
+
+b.flatten()
+i8.flatten()
+u8.flatten()
+f8.flatten()
+c16.flatten()
+U.flatten()
+S.flatten()
+
+b.reshape(1)
+i8.reshape(1)
+u8.reshape(1)
+f8.reshape(1)
+c16.reshape(1)
+U.reshape(1)
+S.reshape(1)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/simple.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/simple.py
new file mode 100644
index 00000000..03ca3e83
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/simple.py
@@ -0,0 +1,165 @@
+"""Simple expression that should pass with mypy."""
+import operator
+
+import numpy as np
+from collections.abc import Iterable
+
+# Basic checks
+array = np.array([1, 2])
+
+
+def ndarray_func(x):
+ # type: (np.ndarray) -> np.ndarray
+ return x
+
+
+ndarray_func(np.array([1, 2]))
+array == 1
+array.dtype == float
+
+# Dtype construction
+np.dtype(float)
+np.dtype(np.float64)
+np.dtype(None)
+np.dtype("float64")
+np.dtype(np.dtype(float))
+np.dtype(("U", 10))
+np.dtype((np.int32, (2, 2)))
+# Define the arguments on the previous line to prevent bidirectional
+# type inference in mypy from broadening the types.
+two_tuples_dtype = [("R", "u1"), ("G", "u1"), ("B", "u1")]
+np.dtype(two_tuples_dtype)
+
+three_tuples_dtype = [("R", "u1", 2)]
+np.dtype(three_tuples_dtype)
+
+mixed_tuples_dtype = [("R", "u1"), ("G", np.unicode_, 1)]
+np.dtype(mixed_tuples_dtype)
+
+shape_tuple_dtype = [("R", "u1", (2, 2))]
+np.dtype(shape_tuple_dtype)
+
+shape_like_dtype = [("R", "u1", (2, 2)), ("G", np.unicode_, 1)]
+np.dtype(shape_like_dtype)
+
+object_dtype = [("field1", object)]
+np.dtype(object_dtype)
+
+np.dtype((np.int32, (np.int8, 4)))
+
+# Dtype comparison
+np.dtype(float) == float
+np.dtype(float) != np.float64
+np.dtype(float) < None
+np.dtype(float) <= "float64"
+np.dtype(float) > np.dtype(float)
+np.dtype(float) >= np.dtype(("U", 10))
+
+# Iteration and indexing
+def iterable_func(x):
+ # type: (Iterable) -> Iterable
+ return x
+
+
+iterable_func(array)
+[element for element in array]
+iter(array)
+zip(array, array)
+array[1]
+array[:]
+array[...]
+array[:] = 0
+
+array_2d = np.ones((3, 3))
+array_2d[:2, :2]
+array_2d[..., 0]
+array_2d[:2, :2] = 0
+
+# Other special methods
+len(array)
+str(array)
+array_scalar = np.array(1)
+int(array_scalar)
+float(array_scalar)
+# currently does not work due to https://github.com/python/typeshed/issues/1904
+# complex(array_scalar)
+bytes(array_scalar)
+operator.index(array_scalar)
+bool(array_scalar)
+
+# comparisons
+array < 1
+array <= 1
+array == 1
+array != 1
+array > 1
+array >= 1
+1 < array
+1 <= array
+1 == array
+1 != array
+1 > array
+1 >= array
+
+# binary arithmetic
+array + 1
+1 + array
+array += 1
+
+array - 1
+1 - array
+array -= 1
+
+array * 1
+1 * array
+array *= 1
+
+nonzero_array = np.array([1, 2])
+array / 1
+1 / nonzero_array
+float_array = np.array([1.0, 2.0])
+float_array /= 1
+
+array // 1
+1 // nonzero_array
+array //= 1
+
+array % 1
+1 % nonzero_array
+array %= 1
+
+divmod(array, 1)
+divmod(1, nonzero_array)
+
+array ** 1
+1 ** array
+array **= 1
+
+array << 1
+1 << array
+array <<= 1
+
+array >> 1
+1 >> array
+array >>= 1
+
+array & 1
+1 & array
+array &= 1
+
+array ^ 1
+1 ^ array
+array ^= 1
+
+array | 1
+1 | array
+array |= 1
+
+# unary arithmetic
+-array
++array
+abs(array)
+~array
+
+# Other methods
+np.array([1, 2]).transpose()
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/simple_py3.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/simple_py3.py
new file mode 100644
index 00000000..c05a1ce6
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/simple_py3.py
@@ -0,0 +1,6 @@
+import numpy as np
+
+array = np.array([1, 2])
+
+# The @ operator is not in python 2
+array @ array
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufunc_config.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufunc_config.py
new file mode 100644
index 00000000..2d131424
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufunc_config.py
@@ -0,0 +1,50 @@
+"""Typing tests for `numpy.core._ufunc_config`."""
+
+import numpy as np
+
+def func1(a: str, b: int) -> None: ...
+def func2(a: str, b: int, c: float = ...) -> None: ...
+def func3(a: str, b: int) -> int: ...
+
+class Write1:
+ def write(self, a: str) -> None: ...
+
+class Write2:
+ def write(self, a: str, b: int = ...) -> None: ...
+
+class Write3:
+ def write(self, a: str) -> int: ...
+
+
+_err_default = np.geterr()
+_bufsize_default = np.getbufsize()
+_errcall_default = np.geterrcall()
+
+try:
+ np.seterr(all=None)
+ np.seterr(divide="ignore")
+ np.seterr(over="warn")
+ np.seterr(under="call")
+ np.seterr(invalid="raise")
+ np.geterr()
+
+ np.setbufsize(4096)
+ np.getbufsize()
+
+ np.seterrcall(func1)
+ np.seterrcall(func2)
+ np.seterrcall(func3)
+ np.seterrcall(Write1())
+ np.seterrcall(Write2())
+ np.seterrcall(Write3())
+ np.geterrcall()
+
+ with np.errstate(call=func1, all="call"):
+ pass
+ with np.errstate(call=Write1(), divide="log", over="log"):
+ pass
+
+finally:
+ np.seterr(**_err_default)
+ np.setbufsize(_bufsize_default)
+ np.seterrcall(_errcall_default)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufunclike.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufunclike.py
new file mode 100644
index 00000000..7eac89e8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufunclike.py
@@ -0,0 +1,46 @@
+from __future__ import annotations
+from typing import Any
+import numpy as np
+
+
+class Object:
+ def __ceil__(self) -> Object:
+ return self
+
+ def __floor__(self) -> Object:
+ return self
+
+ def __ge__(self, value: object) -> bool:
+ return True
+
+ def __array__(self) -> np.ndarray[Any, np.dtype[np.object_]]:
+ ret = np.empty((), dtype=object)
+ ret[()] = self
+ return ret
+
+
+AR_LIKE_b = [True, True, False]
+AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)]
+AR_LIKE_i = [1, 2, 3]
+AR_LIKE_f = [1.0, 2.0, 3.0]
+AR_LIKE_O = [Object(), Object(), Object()]
+AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.zeros(3, dtype="U5")
+
+np.fix(AR_LIKE_b)
+np.fix(AR_LIKE_u)
+np.fix(AR_LIKE_i)
+np.fix(AR_LIKE_f)
+np.fix(AR_LIKE_O)
+np.fix(AR_LIKE_f, out=AR_U)
+
+np.isposinf(AR_LIKE_b)
+np.isposinf(AR_LIKE_u)
+np.isposinf(AR_LIKE_i)
+np.isposinf(AR_LIKE_f)
+np.isposinf(AR_LIKE_f, out=AR_U)
+
+np.isneginf(AR_LIKE_b)
+np.isneginf(AR_LIKE_u)
+np.isneginf(AR_LIKE_i)
+np.isneginf(AR_LIKE_f)
+np.isneginf(AR_LIKE_f, out=AR_U)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufuncs.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufuncs.py
new file mode 100644
index 00000000..3cc31ae5
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/ufuncs.py
@@ -0,0 +1,17 @@
+import numpy as np
+
+np.sin(1)
+np.sin([1, 2, 3])
+np.sin(1, out=np.empty(1))
+np.matmul(np.ones((2, 2, 2)), np.ones((2, 2, 2)), axes=[(0, 1), (0, 1), (0, 1)])
+np.sin(1, signature="D->D")
+np.sin(1, extobj=[16, 1, lambda: None])
+# NOTE: `np.generic` subclasses are not guaranteed to support addition;
+# re-enable this we can infer the exact return type of `np.sin(...)`.
+#
+# np.sin(1) + np.sin(1)
+np.sin.types[0]
+np.sin.__name__
+np.sin.__doc__
+
+np.abs(np.array([1]))
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.py
new file mode 100644
index 00000000..a556bf6b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/pass/warnings_and_errors.py
@@ -0,0 +1,6 @@
+import numpy as np
+
+np.AxisError("test")
+np.AxisError(1, ndim=2)
+np.AxisError(1, ndim=2, msg_prefix="error")
+np.AxisError(1, ndim=2, msg_prefix=None)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arithmetic.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arithmetic.pyi
new file mode 100644
index 00000000..0ca5e977
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arithmetic.pyi
@@ -0,0 +1,526 @@
+from typing import Any
+
+import numpy as np
+from numpy._typing import NDArray, _128Bit
+
+# Can't directly import `np.float128` as it is not available on all platforms
+f16: np.floating[_128Bit]
+
+c16 = np.complex128()
+f8 = np.float64()
+i8 = np.int64()
+u8 = np.uint64()
+
+c8 = np.complex64()
+f4 = np.float32()
+i4 = np.int32()
+u4 = np.uint32()
+
+dt = np.datetime64(0, "D")
+td = np.timedelta64(0, "D")
+
+b_ = np.bool_()
+
+b = bool()
+c = complex()
+f = float()
+i = int()
+
+AR_b: np.ndarray[Any, np.dtype[np.bool_]]
+AR_u: np.ndarray[Any, np.dtype[np.uint32]]
+AR_i: np.ndarray[Any, np.dtype[np.int64]]
+AR_f: np.ndarray[Any, np.dtype[np.float64]]
+AR_c: np.ndarray[Any, np.dtype[np.complex128]]
+AR_m: np.ndarray[Any, np.dtype[np.timedelta64]]
+AR_M: np.ndarray[Any, np.dtype[np.datetime64]]
+AR_O: np.ndarray[Any, np.dtype[np.object_]]
+AR_number: NDArray[np.number[Any]]
+
+AR_LIKE_b: list[bool]
+AR_LIKE_u: list[np.uint32]
+AR_LIKE_i: list[int]
+AR_LIKE_f: list[float]
+AR_LIKE_c: list[complex]
+AR_LIKE_m: list[np.timedelta64]
+AR_LIKE_M: list[np.datetime64]
+AR_LIKE_O: list[np.object_]
+
+# Array subtraction
+
+reveal_type(AR_number - AR_number) # E: ndarray[Any, dtype[number[Any]]]
+
+reveal_type(AR_b - AR_LIKE_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(AR_b - AR_LIKE_i) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_b - AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_b - AR_LIKE_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_b - AR_LIKE_m) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_b - AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_u - AR_b) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(AR_LIKE_i - AR_b) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_LIKE_f - AR_b) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_c - AR_b) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_LIKE_m - AR_b) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_LIKE_M - AR_b) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(AR_LIKE_O - AR_b) # E: Any
+
+reveal_type(AR_u - AR_LIKE_b) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(AR_u - AR_LIKE_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(AR_u - AR_LIKE_i) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_u - AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_u - AR_LIKE_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_u - AR_LIKE_m) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_u - AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_b - AR_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(AR_LIKE_u - AR_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(AR_LIKE_i - AR_u) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_LIKE_f - AR_u) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_c - AR_u) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_LIKE_m - AR_u) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_LIKE_M - AR_u) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(AR_LIKE_O - AR_u) # E: Any
+
+reveal_type(AR_i - AR_LIKE_b) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_i - AR_LIKE_u) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_i - AR_LIKE_i) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_i - AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_i - AR_LIKE_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_i - AR_LIKE_m) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_i - AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_b - AR_i) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_LIKE_u - AR_i) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_LIKE_i - AR_i) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_LIKE_f - AR_i) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_c - AR_i) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_LIKE_m - AR_i) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_LIKE_M - AR_i) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(AR_LIKE_O - AR_i) # E: Any
+
+reveal_type(AR_f - AR_LIKE_b) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_f - AR_LIKE_u) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_f - AR_LIKE_i) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_f - AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_f - AR_LIKE_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_f - AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_b - AR_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_u - AR_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_i - AR_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_f - AR_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_c - AR_f) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_LIKE_O - AR_f) # E: Any
+
+reveal_type(AR_c - AR_LIKE_b) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_c - AR_LIKE_u) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_c - AR_LIKE_i) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_c - AR_LIKE_f) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_c - AR_LIKE_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_c - AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_b - AR_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_LIKE_u - AR_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_LIKE_i - AR_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_LIKE_f - AR_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_LIKE_c - AR_c) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(AR_LIKE_O - AR_c) # E: Any
+
+reveal_type(AR_m - AR_LIKE_b) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_m - AR_LIKE_u) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_m - AR_LIKE_i) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_m - AR_LIKE_m) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_m - AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_b - AR_m) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_LIKE_u - AR_m) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_LIKE_i - AR_m) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_LIKE_m - AR_m) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_LIKE_M - AR_m) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(AR_LIKE_O - AR_m) # E: Any
+
+reveal_type(AR_M - AR_LIKE_b) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(AR_M - AR_LIKE_u) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(AR_M - AR_LIKE_i) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(AR_M - AR_LIKE_m) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(AR_M - AR_LIKE_M) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_M - AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_M - AR_M) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_LIKE_O - AR_M) # E: Any
+
+reveal_type(AR_O - AR_LIKE_b) # E: Any
+reveal_type(AR_O - AR_LIKE_u) # E: Any
+reveal_type(AR_O - AR_LIKE_i) # E: Any
+reveal_type(AR_O - AR_LIKE_f) # E: Any
+reveal_type(AR_O - AR_LIKE_c) # E: Any
+reveal_type(AR_O - AR_LIKE_m) # E: Any
+reveal_type(AR_O - AR_LIKE_M) # E: Any
+reveal_type(AR_O - AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_b - AR_O) # E: Any
+reveal_type(AR_LIKE_u - AR_O) # E: Any
+reveal_type(AR_LIKE_i - AR_O) # E: Any
+reveal_type(AR_LIKE_f - AR_O) # E: Any
+reveal_type(AR_LIKE_c - AR_O) # E: Any
+reveal_type(AR_LIKE_m - AR_O) # E: Any
+reveal_type(AR_LIKE_M - AR_O) # E: Any
+reveal_type(AR_LIKE_O - AR_O) # E: Any
+
+# Array floor division
+
+reveal_type(AR_b // AR_LIKE_b) # E: ndarray[Any, dtype[{int8}]]
+reveal_type(AR_b // AR_LIKE_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(AR_b // AR_LIKE_i) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_b // AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_b // AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_b // AR_b) # E: ndarray[Any, dtype[{int8}]]
+reveal_type(AR_LIKE_u // AR_b) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(AR_LIKE_i // AR_b) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_LIKE_f // AR_b) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_O // AR_b) # E: Any
+
+reveal_type(AR_u // AR_LIKE_b) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(AR_u // AR_LIKE_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(AR_u // AR_LIKE_i) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_u // AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_u // AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_b // AR_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(AR_LIKE_u // AR_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(AR_LIKE_i // AR_u) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_LIKE_f // AR_u) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_m // AR_u) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_LIKE_O // AR_u) # E: Any
+
+reveal_type(AR_i // AR_LIKE_b) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_i // AR_LIKE_u) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_i // AR_LIKE_i) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_i // AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_i // AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_b // AR_i) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_LIKE_u // AR_i) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_LIKE_i // AR_i) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(AR_LIKE_f // AR_i) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_m // AR_i) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_LIKE_O // AR_i) # E: Any
+
+reveal_type(AR_f // AR_LIKE_b) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_f // AR_LIKE_u) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_f // AR_LIKE_i) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_f // AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_f // AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_b // AR_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_u // AR_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_i // AR_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_f // AR_f) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(AR_LIKE_m // AR_f) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_LIKE_O // AR_f) # E: Any
+
+reveal_type(AR_m // AR_LIKE_u) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_m // AR_LIKE_i) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_m // AR_LIKE_f) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(AR_m // AR_LIKE_m) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(AR_m // AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_m // AR_m) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(AR_LIKE_O // AR_m) # E: Any
+
+reveal_type(AR_O // AR_LIKE_b) # E: Any
+reveal_type(AR_O // AR_LIKE_u) # E: Any
+reveal_type(AR_O // AR_LIKE_i) # E: Any
+reveal_type(AR_O // AR_LIKE_f) # E: Any
+reveal_type(AR_O // AR_LIKE_m) # E: Any
+reveal_type(AR_O // AR_LIKE_M) # E: Any
+reveal_type(AR_O // AR_LIKE_O) # E: Any
+
+reveal_type(AR_LIKE_b // AR_O) # E: Any
+reveal_type(AR_LIKE_u // AR_O) # E: Any
+reveal_type(AR_LIKE_i // AR_O) # E: Any
+reveal_type(AR_LIKE_f // AR_O) # E: Any
+reveal_type(AR_LIKE_m // AR_O) # E: Any
+reveal_type(AR_LIKE_M // AR_O) # E: Any
+reveal_type(AR_LIKE_O // AR_O) # E: Any
+
+# unary ops
+
+reveal_type(-f16) # E: {float128}
+reveal_type(-c16) # E: {complex128}
+reveal_type(-c8) # E: {complex64}
+reveal_type(-f8) # E: {float64}
+reveal_type(-f4) # E: {float32}
+reveal_type(-i8) # E: {int64}
+reveal_type(-i4) # E: {int32}
+reveal_type(-u8) # E: {uint64}
+reveal_type(-u4) # E: {uint32}
+reveal_type(-td) # E: timedelta64
+reveal_type(-AR_f) # E: Any
+
+reveal_type(+f16) # E: {float128}
+reveal_type(+c16) # E: {complex128}
+reveal_type(+c8) # E: {complex64}
+reveal_type(+f8) # E: {float64}
+reveal_type(+f4) # E: {float32}
+reveal_type(+i8) # E: {int64}
+reveal_type(+i4) # E: {int32}
+reveal_type(+u8) # E: {uint64}
+reveal_type(+u4) # E: {uint32}
+reveal_type(+td) # E: timedelta64
+reveal_type(+AR_f) # E: Any
+
+reveal_type(abs(f16)) # E: {float128}
+reveal_type(abs(c16)) # E: {float64}
+reveal_type(abs(c8)) # E: {float32}
+reveal_type(abs(f8)) # E: {float64}
+reveal_type(abs(f4)) # E: {float32}
+reveal_type(abs(i8)) # E: {int64}
+reveal_type(abs(i4)) # E: {int32}
+reveal_type(abs(u8)) # E: {uint64}
+reveal_type(abs(u4)) # E: {uint32}
+reveal_type(abs(td)) # E: timedelta64
+reveal_type(abs(b_)) # E: bool_
+reveal_type(abs(AR_f)) # E: Any
+
+# Time structures
+
+reveal_type(dt + td) # E: datetime64
+reveal_type(dt + i) # E: datetime64
+reveal_type(dt + i4) # E: datetime64
+reveal_type(dt + i8) # E: datetime64
+reveal_type(dt - dt) # E: timedelta64
+reveal_type(dt - i) # E: datetime64
+reveal_type(dt - i4) # E: datetime64
+reveal_type(dt - i8) # E: datetime64
+
+reveal_type(td + td) # E: timedelta64
+reveal_type(td + i) # E: timedelta64
+reveal_type(td + i4) # E: timedelta64
+reveal_type(td + i8) # E: timedelta64
+reveal_type(td - td) # E: timedelta64
+reveal_type(td - i) # E: timedelta64
+reveal_type(td - i4) # E: timedelta64
+reveal_type(td - i8) # E: timedelta64
+reveal_type(td / f) # E: timedelta64
+reveal_type(td / f4) # E: timedelta64
+reveal_type(td / f8) # E: timedelta64
+reveal_type(td / td) # E: {float64}
+reveal_type(td // td) # E: {int64}
+
+# boolean
+
+reveal_type(b_ / b) # E: {float64}
+reveal_type(b_ / b_) # E: {float64}
+reveal_type(b_ / i) # E: {float64}
+reveal_type(b_ / i8) # E: {float64}
+reveal_type(b_ / i4) # E: {float64}
+reveal_type(b_ / u8) # E: {float64}
+reveal_type(b_ / u4) # E: {float64}
+reveal_type(b_ / f) # E: {float64}
+reveal_type(b_ / f16) # E: {float128}
+reveal_type(b_ / f8) # E: {float64}
+reveal_type(b_ / f4) # E: {float32}
+reveal_type(b_ / c) # E: {complex128}
+reveal_type(b_ / c16) # E: {complex128}
+reveal_type(b_ / c8) # E: {complex64}
+
+reveal_type(b / b_) # E: {float64}
+reveal_type(b_ / b_) # E: {float64}
+reveal_type(i / b_) # E: {float64}
+reveal_type(i8 / b_) # E: {float64}
+reveal_type(i4 / b_) # E: {float64}
+reveal_type(u8 / b_) # E: {float64}
+reveal_type(u4 / b_) # E: {float64}
+reveal_type(f / b_) # E: {float64}
+reveal_type(f16 / b_) # E: {float128}
+reveal_type(f8 / b_) # E: {float64}
+reveal_type(f4 / b_) # E: {float32}
+reveal_type(c / b_) # E: {complex128}
+reveal_type(c16 / b_) # E: {complex128}
+reveal_type(c8 / b_) # E: {complex64}
+
+# Complex
+
+reveal_type(c16 + f16) # E: {complex256}
+reveal_type(c16 + c16) # E: {complex128}
+reveal_type(c16 + f8) # E: {complex128}
+reveal_type(c16 + i8) # E: {complex128}
+reveal_type(c16 + c8) # E: {complex128}
+reveal_type(c16 + f4) # E: {complex128}
+reveal_type(c16 + i4) # E: {complex128}
+reveal_type(c16 + b_) # E: {complex128}
+reveal_type(c16 + b) # E: {complex128}
+reveal_type(c16 + c) # E: {complex128}
+reveal_type(c16 + f) # E: {complex128}
+reveal_type(c16 + i) # E: {complex128}
+reveal_type(c16 + AR_f) # E: Any
+
+reveal_type(f16 + c16) # E: {complex256}
+reveal_type(c16 + c16) # E: {complex128}
+reveal_type(f8 + c16) # E: {complex128}
+reveal_type(i8 + c16) # E: {complex128}
+reveal_type(c8 + c16) # E: {complex128}
+reveal_type(f4 + c16) # E: {complex128}
+reveal_type(i4 + c16) # E: {complex128}
+reveal_type(b_ + c16) # E: {complex128}
+reveal_type(b + c16) # E: {complex128}
+reveal_type(c + c16) # E: {complex128}
+reveal_type(f + c16) # E: {complex128}
+reveal_type(i + c16) # E: {complex128}
+reveal_type(AR_f + c16) # E: Any
+
+reveal_type(c8 + f16) # E: {complex256}
+reveal_type(c8 + c16) # E: {complex128}
+reveal_type(c8 + f8) # E: {complex128}
+reveal_type(c8 + i8) # E: {complex128}
+reveal_type(c8 + c8) # E: {complex64}
+reveal_type(c8 + f4) # E: {complex64}
+reveal_type(c8 + i4) # E: {complex64}
+reveal_type(c8 + b_) # E: {complex64}
+reveal_type(c8 + b) # E: {complex64}
+reveal_type(c8 + c) # E: {complex128}
+reveal_type(c8 + f) # E: {complex128}
+reveal_type(c8 + i) # E: complexfloating[{_NBitInt}, {_NBitInt}]
+reveal_type(c8 + AR_f) # E: Any
+
+reveal_type(f16 + c8) # E: {complex256}
+reveal_type(c16 + c8) # E: {complex128}
+reveal_type(f8 + c8) # E: {complex128}
+reveal_type(i8 + c8) # E: {complex128}
+reveal_type(c8 + c8) # E: {complex64}
+reveal_type(f4 + c8) # E: {complex64}
+reveal_type(i4 + c8) # E: {complex64}
+reveal_type(b_ + c8) # E: {complex64}
+reveal_type(b + c8) # E: {complex64}
+reveal_type(c + c8) # E: {complex128}
+reveal_type(f + c8) # E: {complex128}
+reveal_type(i + c8) # E: complexfloating[{_NBitInt}, {_NBitInt}]
+reveal_type(AR_f + c8) # E: Any
+
+# Float
+
+reveal_type(f8 + f16) # E: {float128}
+reveal_type(f8 + f8) # E: {float64}
+reveal_type(f8 + i8) # E: {float64}
+reveal_type(f8 + f4) # E: {float64}
+reveal_type(f8 + i4) # E: {float64}
+reveal_type(f8 + b_) # E: {float64}
+reveal_type(f8 + b) # E: {float64}
+reveal_type(f8 + c) # E: {complex128}
+reveal_type(f8 + f) # E: {float64}
+reveal_type(f8 + i) # E: {float64}
+reveal_type(f8 + AR_f) # E: Any
+
+reveal_type(f16 + f8) # E: {float128}
+reveal_type(f8 + f8) # E: {float64}
+reveal_type(i8 + f8) # E: {float64}
+reveal_type(f4 + f8) # E: {float64}
+reveal_type(i4 + f8) # E: {float64}
+reveal_type(b_ + f8) # E: {float64}
+reveal_type(b + f8) # E: {float64}
+reveal_type(c + f8) # E: {complex128}
+reveal_type(f + f8) # E: {float64}
+reveal_type(i + f8) # E: {float64}
+reveal_type(AR_f + f8) # E: Any
+
+reveal_type(f4 + f16) # E: {float128}
+reveal_type(f4 + f8) # E: {float64}
+reveal_type(f4 + i8) # E: {float64}
+reveal_type(f4 + f4) # E: {float32}
+reveal_type(f4 + i4) # E: {float32}
+reveal_type(f4 + b_) # E: {float32}
+reveal_type(f4 + b) # E: {float32}
+reveal_type(f4 + c) # E: {complex128}
+reveal_type(f4 + f) # E: {float64}
+reveal_type(f4 + i) # E: floating[{_NBitInt}]
+reveal_type(f4 + AR_f) # E: Any
+
+reveal_type(f16 + f4) # E: {float128}
+reveal_type(f8 + f4) # E: {float64}
+reveal_type(i8 + f4) # E: {float64}
+reveal_type(f4 + f4) # E: {float32}
+reveal_type(i4 + f4) # E: {float32}
+reveal_type(b_ + f4) # E: {float32}
+reveal_type(b + f4) # E: {float32}
+reveal_type(c + f4) # E: {complex128}
+reveal_type(f + f4) # E: {float64}
+reveal_type(i + f4) # E: floating[{_NBitInt}]
+reveal_type(AR_f + f4) # E: Any
+
+# Int
+
+reveal_type(i8 + i8) # E: {int64}
+reveal_type(i8 + u8) # E: Any
+reveal_type(i8 + i4) # E: {int64}
+reveal_type(i8 + u4) # E: Any
+reveal_type(i8 + b_) # E: {int64}
+reveal_type(i8 + b) # E: {int64}
+reveal_type(i8 + c) # E: {complex128}
+reveal_type(i8 + f) # E: {float64}
+reveal_type(i8 + i) # E: {int64}
+reveal_type(i8 + AR_f) # E: Any
+
+reveal_type(u8 + u8) # E: {uint64}
+reveal_type(u8 + i4) # E: Any
+reveal_type(u8 + u4) # E: {uint64}
+reveal_type(u8 + b_) # E: {uint64}
+reveal_type(u8 + b) # E: {uint64}
+reveal_type(u8 + c) # E: {complex128}
+reveal_type(u8 + f) # E: {float64}
+reveal_type(u8 + i) # E: Any
+reveal_type(u8 + AR_f) # E: Any
+
+reveal_type(i8 + i8) # E: {int64}
+reveal_type(u8 + i8) # E: Any
+reveal_type(i4 + i8) # E: {int64}
+reveal_type(u4 + i8) # E: Any
+reveal_type(b_ + i8) # E: {int64}
+reveal_type(b + i8) # E: {int64}
+reveal_type(c + i8) # E: {complex128}
+reveal_type(f + i8) # E: {float64}
+reveal_type(i + i8) # E: {int64}
+reveal_type(AR_f + i8) # E: Any
+
+reveal_type(u8 + u8) # E: {uint64}
+reveal_type(i4 + u8) # E: Any
+reveal_type(u4 + u8) # E: {uint64}
+reveal_type(b_ + u8) # E: {uint64}
+reveal_type(b + u8) # E: {uint64}
+reveal_type(c + u8) # E: {complex128}
+reveal_type(f + u8) # E: {float64}
+reveal_type(i + u8) # E: Any
+reveal_type(AR_f + u8) # E: Any
+
+reveal_type(i4 + i8) # E: {int64}
+reveal_type(i4 + i4) # E: {int32}
+reveal_type(i4 + i) # E: {int_}
+reveal_type(i4 + b_) # E: {int32}
+reveal_type(i4 + b) # E: {int32}
+reveal_type(i4 + AR_f) # E: Any
+
+reveal_type(u4 + i8) # E: Any
+reveal_type(u4 + i4) # E: Any
+reveal_type(u4 + u8) # E: {uint64}
+reveal_type(u4 + u4) # E: {uint32}
+reveal_type(u4 + i) # E: Any
+reveal_type(u4 + b_) # E: {uint32}
+reveal_type(u4 + b) # E: {uint32}
+reveal_type(u4 + AR_f) # E: Any
+
+reveal_type(i8 + i4) # E: {int64}
+reveal_type(i4 + i4) # E: {int32}
+reveal_type(i + i4) # E: {int_}
+reveal_type(b_ + i4) # E: {int32}
+reveal_type(b + i4) # E: {int32}
+reveal_type(AR_f + i4) # E: Any
+
+reveal_type(i8 + u4) # E: Any
+reveal_type(i4 + u4) # E: Any
+reveal_type(u8 + u4) # E: {uint64}
+reveal_type(u4 + u4) # E: {uint32}
+reveal_type(b_ + u4) # E: {uint32}
+reveal_type(b + u4) # E: {uint32}
+reveal_type(i + u4) # E: Any
+reveal_type(AR_f + u4) # E: Any
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/array_constructors.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/array_constructors.pyi
new file mode 100644
index 00000000..2ff20e9a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/array_constructors.pyi
@@ -0,0 +1,205 @@
+from typing import Any, TypeVar
+from pathlib import Path
+
+import numpy as np
+import numpy.typing as npt
+
+_SCT = TypeVar("_SCT", bound=np.generic, covariant=True)
+
+class SubClass(np.ndarray[Any, np.dtype[_SCT]]): ...
+
+i8: np.int64
+
+A: npt.NDArray[np.float64]
+B: SubClass[np.float64]
+C: list[int]
+
+def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ...
+
+reveal_type(np.empty_like(A)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.empty_like(B)) # E: SubClass[{float64}]
+reveal_type(np.empty_like([1, 1.0])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.empty_like(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.empty_like(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.array(A)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.array(B)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.array(B, subok=True)) # E: SubClass[{float64}]
+reveal_type(np.array([1, 1.0])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.array(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.array(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.array(A, like=A)) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.zeros([1, 5, 6])) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.zeros([1, 5, 6], dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.zeros([1, 5, 6], dtype='c16')) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.empty([1, 5, 6])) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.empty([1, 5, 6], dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.empty([1, 5, 6], dtype='c16')) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.concatenate(A)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.concatenate([A, A])) # E: Any
+reveal_type(np.concatenate([[1], A])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.concatenate([[1], [1]])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.concatenate((A, A))) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.concatenate(([1], [1]))) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.concatenate([1, 1.0])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.concatenate(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.concatenate(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.concatenate([1, 1.0], out=A)) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.asarray(A)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.asarray(B)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.asarray([1, 1.0])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.asarray(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.asarray(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.asanyarray(A)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.asanyarray(B)) # E: SubClass[{float64}]
+reveal_type(np.asanyarray([1, 1.0])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.asanyarray(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.asanyarray(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.ascontiguousarray(A)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.ascontiguousarray(B)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.ascontiguousarray([1, 1.0])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.ascontiguousarray(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.ascontiguousarray(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.asfortranarray(A)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.asfortranarray(B)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.asfortranarray([1, 1.0])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.asfortranarray(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.asfortranarray(A, dtype='c16')) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.fromstring("1 1 1", sep=" ")) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.fromstring(b"1 1 1", sep=" ")) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.fromstring("1 1 1", dtype=np.int64, sep=" ")) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" ")) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.fromstring("1 1 1", dtype="c16", sep=" ")) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.fromstring(b"1 1 1", dtype="c16", sep=" ")) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.fromfile("test.txt", sep=" ")) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.fromfile("test.txt", dtype=np.int64, sep=" ")) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.fromfile("test.txt", dtype="c16", sep=" ")) # E: ndarray[Any, dtype[Any]]
+with open("test.txt") as f:
+ reveal_type(np.fromfile(f, sep=" ")) # E: ndarray[Any, dtype[{float64}]]
+ reveal_type(np.fromfile(b"test.txt", sep=" ")) # E: ndarray[Any, dtype[{float64}]]
+ reveal_type(np.fromfile(Path("test.txt"), sep=" ")) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.fromiter("12345", np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.fromiter("12345", float)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.frombuffer(A)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.frombuffer(A, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.frombuffer(A, dtype="c16")) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.arange(False, True)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.arange(10)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.arange(0, 10, step=2)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.arange(10.0)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.arange(start=0, stop=10.0)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.arange(np.timedelta64(0))) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(np.arange(0, np.timedelta64(10))) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(np.arange(np.datetime64("0"), np.datetime64("10"))) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(np.arange(10, dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.arange(0, 10, step=2, dtype=np.int16)) # E: ndarray[Any, dtype[{int16}]]
+reveal_type(np.arange(10, dtype=int)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.arange(0, 10, dtype="f8")) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.require(A)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.require(B)) # E: SubClass[{float64}]
+reveal_type(np.require(B, requirements=None)) # E: SubClass[{float64}]
+reveal_type(np.require(B, dtype=int)) # E: ndarray[Any, Any]
+reveal_type(np.require(B, requirements="E")) # E: ndarray[Any, Any]
+reveal_type(np.require(B, requirements=["ENSUREARRAY"])) # E: ndarray[Any, Any]
+reveal_type(np.require(B, requirements={"F", "E"})) # E: ndarray[Any, Any]
+reveal_type(np.require(B, requirements=["C", "OWNDATA"])) # E: SubClass[{float64}]
+reveal_type(np.require(B, requirements="W")) # E: SubClass[{float64}]
+reveal_type(np.require(B, requirements="A")) # E: SubClass[{float64}]
+reveal_type(np.require(C)) # E: ndarray[Any, Any]
+
+reveal_type(np.linspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.linspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.linspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.linspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], floating[Any]]
+reveal_type(np.linspace(0j, 10, retstep=True)) # E: Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], complexfloating[Any, Any]]
+reveal_type(np.linspace(0, 10, retstep=True, dtype=np.int64)) # E: Tuple[ndarray[Any, dtype[{int64}]], {int64}]
+reveal_type(np.linspace(0j, 10, retstep=True, dtype=int)) # E: Tuple[ndarray[Any, dtype[Any]], Any]
+
+reveal_type(np.logspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.logspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.logspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.logspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.geomspace(0, 10)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.geomspace(0, 10j)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.geomspace(0, 10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.geomspace(0, 10, dtype=int)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.zeros_like(A)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.zeros_like(C)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.zeros_like(A, dtype=float)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.zeros_like(B)) # E: SubClass[{float64}]
+reveal_type(np.zeros_like(B, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+
+reveal_type(np.ones_like(A)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.ones_like(C)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.ones_like(A, dtype=float)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.ones_like(B)) # E: SubClass[{float64}]
+reveal_type(np.ones_like(B, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+
+reveal_type(np.full_like(A, i8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.full_like(C, i8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.full_like(A, i8, dtype=int)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.full_like(B, i8)) # E: SubClass[{float64}]
+reveal_type(np.full_like(B, i8, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+
+reveal_type(np.ones(1)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.ones([1, 1, 1])) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.ones(5, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.ones(5, dtype=int)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.full(1, i8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.full([1, 1, 1], i8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.full(1, i8, dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.full(1, i8, dtype=float)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.indices([1, 2, 3])) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(np.indices([1, 2, 3], sparse=True)) # E: tuple[ndarray[Any, dtype[{int_}]], ...]
+
+reveal_type(np.fromfunction(func, (3, 5))) # E: SubClass[{float64}]
+
+reveal_type(np.identity(10)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.identity(10, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.identity(10, dtype=int)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.atleast_1d(A)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.atleast_1d(C)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.atleast_1d(A, A)) # E: list[ndarray[Any, dtype[Any]]]
+reveal_type(np.atleast_1d(A, C)) # E: list[ndarray[Any, dtype[Any]]]
+reveal_type(np.atleast_1d(C, C)) # E: list[ndarray[Any, dtype[Any]]]
+
+reveal_type(np.atleast_2d(A)) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.atleast_3d(A)) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.vstack([A, A])) # E: ndarray[Any, Any]
+reveal_type(np.vstack([A, A], dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.vstack([A, C])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.vstack([C, C])) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.hstack([A, A])) # E: ndarray[Any, Any]
+reveal_type(np.hstack([A, A], dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.stack([A, A])) # E: Any
+reveal_type(np.stack([A, A], dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.stack([A, C])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.stack([C, C])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.stack([A, A], axis=0)) # E: Any
+reveal_type(np.stack([A, A], out=B)) # E: SubClass[{float64}]
+
+reveal_type(np.block([[A, A], [A, A]])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.block(C)) # E: ndarray[Any, dtype[Any]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arraypad.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arraypad.pyi
new file mode 100644
index 00000000..a05d4403
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arraypad.pyi
@@ -0,0 +1,22 @@
+from collections.abc import Mapping
+from typing import Any, SupportsIndex
+
+import numpy as np
+import numpy.typing as npt
+
+def mode_func(
+ ar: npt.NDArray[np.number[Any]],
+ width: tuple[int, int],
+ iaxis: SupportsIndex,
+ kwargs: Mapping[str, Any],
+) -> None: ...
+
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+AR_LIKE: list[int]
+
+reveal_type(np.pad(AR_i8, (2, 3), "constant")) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.pad(AR_LIKE, (2, 3), "constant")) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.pad(AR_f8, (2, 3), mode_func)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.pad(AR_f8, (2, 3), mode_func, a=1, b=2)) # E: ndarray[Any, dtype[{float64}]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arrayprint.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arrayprint.pyi
new file mode 100644
index 00000000..6e65a8d8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arrayprint.pyi
@@ -0,0 +1,20 @@
+from collections.abc import Callable
+from typing import Any
+import numpy as np
+
+AR: np.ndarray[Any, Any]
+func_float: Callable[[np.floating[Any]], str]
+func_int: Callable[[np.integer[Any]], str]
+
+reveal_type(np.get_printoptions()) # E: TypedDict
+reveal_type(np.array2string( # E: str
+ AR, formatter={'float_kind': func_float, 'int_kind': func_int}
+))
+reveal_type(np.format_float_scientific(1.0)) # E: str
+reveal_type(np.format_float_positional(1)) # E: str
+reveal_type(np.array_repr(AR)) # E: str
+reveal_type(np.array_str(AR)) # E: str
+
+reveal_type(np.printoptions()) # E: contextlib._GeneratorContextManager
+with np.printoptions() as dct:
+ reveal_type(dct) # E: TypedDict
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arraysetops.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arraysetops.pyi
new file mode 100644
index 00000000..9deff8a8
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arraysetops.pyi
@@ -0,0 +1,60 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_b: npt.NDArray[np.bool_]
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+AR_M: npt.NDArray[np.datetime64]
+AR_O: npt.NDArray[np.object_]
+
+AR_LIKE_f8: list[float]
+
+reveal_type(np.ediff1d(AR_b)) # E: ndarray[Any, dtype[{int8}]]
+reveal_type(np.ediff1d(AR_i8, to_end=[1, 2, 3])) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.ediff1d(AR_M)) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(np.ediff1d(AR_O)) # E: ndarray[Any, dtype[object_]]
+reveal_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5])) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.intersect1d(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.intersect1d(AR_M, AR_M, assume_unique=True)) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(np.intersect1d(AR_f8, AR_i8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.intersect1d(AR_f8, AR_f8, return_indices=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]]
+
+reveal_type(np.setxor1d(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.setxor1d(AR_M, AR_M, assume_unique=True)) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(np.setxor1d(AR_f8, AR_i8)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.in1d(AR_i8, AR_i8)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.in1d(AR_M, AR_M, assume_unique=True)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.in1d(AR_f8, AR_i8)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.in1d(AR_f8, AR_LIKE_f8, invert=True)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.isin(AR_i8, AR_i8)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isin(AR_M, AR_M, assume_unique=True)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isin(AR_f8, AR_i8)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isin(AR_f8, AR_LIKE_f8, invert=True)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.union1d(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.union1d(AR_M, AR_M)) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(np.union1d(AR_f8, AR_i8)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.setdiff1d(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.setdiff1d(AR_M, AR_M, assume_unique=True)) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(np.setdiff1d(AR_f8, AR_i8)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.unique(AR_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.unique(AR_LIKE_f8, axis=0)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.unique(AR_f8, return_index=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.unique(AR_LIKE_f8, return_index=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.unique(AR_f8, return_inverse=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.unique(AR_LIKE_f8, return_inverse=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.unique(AR_f8, return_counts=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.unique(AR_LIKE_f8, return_counts=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.unique(AR_f8, return_index=True, return_inverse=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.unique(AR_f8, return_index=True, return_counts=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_counts=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.unique(AR_f8, return_inverse=True, return_counts=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.unique(AR_LIKE_f8, return_inverse=True, return_counts=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.unique(AR_f8, return_index=True, return_inverse=True, return_counts=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True, return_counts=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arrayterator.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arrayterator.pyi
new file mode 100644
index 00000000..b6c26ddb
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/arrayterator.pyi
@@ -0,0 +1,24 @@
+from typing import Any
+import numpy as np
+
+AR_i8: np.ndarray[Any, np.dtype[np.int64]]
+ar_iter = np.lib.Arrayterator(AR_i8)
+
+reveal_type(ar_iter.var) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(ar_iter.buf_size) # E: Union[None, builtins.int]
+reveal_type(ar_iter.start) # E: builtins.list[builtins.int]
+reveal_type(ar_iter.stop) # E: builtins.list[builtins.int]
+reveal_type(ar_iter.step) # E: builtins.list[builtins.int]
+reveal_type(ar_iter.shape) # E: builtins.tuple[builtins.int, ...]
+reveal_type(ar_iter.flat) # E: typing.Generator[{int64}, None, None]
+
+reveal_type(ar_iter.__array__()) # E: ndarray[Any, dtype[{int64}]]
+
+for i in ar_iter:
+ reveal_type(i) # E: ndarray[Any, dtype[{int64}]]
+
+reveal_type(ar_iter[0]) # E: lib.arrayterator.Arrayterator[Any, dtype[{int64}]]
+reveal_type(ar_iter[...]) # E: lib.arrayterator.Arrayterator[Any, dtype[{int64}]]
+reveal_type(ar_iter[:]) # E: lib.arrayterator.Arrayterator[Any, dtype[{int64}]]
+reveal_type(ar_iter[0, 0, 0]) # E: lib.arrayterator.Arrayterator[Any, dtype[{int64}]]
+reveal_type(ar_iter[..., 0, :]) # E: lib.arrayterator.Arrayterator[Any, dtype[{int64}]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/bitwise_ops.pyi
new file mode 100644
index 00000000..f293ef65
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/bitwise_ops.pyi
@@ -0,0 +1,131 @@
+import numpy as np
+
+i8 = np.int64(1)
+u8 = np.uint64(1)
+
+i4 = np.int32(1)
+u4 = np.uint32(1)
+
+b_ = np.bool_(1)
+
+b = bool(1)
+i = int(1)
+
+AR = np.array([0, 1, 2], dtype=np.int32)
+AR.setflags(write=False)
+
+
+reveal_type(i8 << i8) # E: {int64}
+reveal_type(i8 >> i8) # E: {int64}
+reveal_type(i8 | i8) # E: {int64}
+reveal_type(i8 ^ i8) # E: {int64}
+reveal_type(i8 & i8) # E: {int64}
+
+reveal_type(i8 << AR) # E: Any
+reveal_type(i8 >> AR) # E: Any
+reveal_type(i8 | AR) # E: Any
+reveal_type(i8 ^ AR) # E: Any
+reveal_type(i8 & AR) # E: Any
+
+reveal_type(i4 << i4) # E: {int32}
+reveal_type(i4 >> i4) # E: {int32}
+reveal_type(i4 | i4) # E: {int32}
+reveal_type(i4 ^ i4) # E: {int32}
+reveal_type(i4 & i4) # E: {int32}
+
+reveal_type(i8 << i4) # E: {int64}
+reveal_type(i8 >> i4) # E: {int64}
+reveal_type(i8 | i4) # E: {int64}
+reveal_type(i8 ^ i4) # E: {int64}
+reveal_type(i8 & i4) # E: {int64}
+
+reveal_type(i8 << i) # E: {int64}
+reveal_type(i8 >> i) # E: {int64}
+reveal_type(i8 | i) # E: {int64}
+reveal_type(i8 ^ i) # E: {int64}
+reveal_type(i8 & i) # E: {int64}
+
+reveal_type(i8 << b_) # E: {int64}
+reveal_type(i8 >> b_) # E: {int64}
+reveal_type(i8 | b_) # E: {int64}
+reveal_type(i8 ^ b_) # E: {int64}
+reveal_type(i8 & b_) # E: {int64}
+
+reveal_type(i8 << b) # E: {int64}
+reveal_type(i8 >> b) # E: {int64}
+reveal_type(i8 | b) # E: {int64}
+reveal_type(i8 ^ b) # E: {int64}
+reveal_type(i8 & b) # E: {int64}
+
+reveal_type(u8 << u8) # E: {uint64}
+reveal_type(u8 >> u8) # E: {uint64}
+reveal_type(u8 | u8) # E: {uint64}
+reveal_type(u8 ^ u8) # E: {uint64}
+reveal_type(u8 & u8) # E: {uint64}
+
+reveal_type(u8 << AR) # E: Any
+reveal_type(u8 >> AR) # E: Any
+reveal_type(u8 | AR) # E: Any
+reveal_type(u8 ^ AR) # E: Any
+reveal_type(u8 & AR) # E: Any
+
+reveal_type(u4 << u4) # E: {uint32}
+reveal_type(u4 >> u4) # E: {uint32}
+reveal_type(u4 | u4) # E: {uint32}
+reveal_type(u4 ^ u4) # E: {uint32}
+reveal_type(u4 & u4) # E: {uint32}
+
+reveal_type(u4 << i4) # E: signedinteger[Any]
+reveal_type(u4 >> i4) # E: signedinteger[Any]
+reveal_type(u4 | i4) # E: signedinteger[Any]
+reveal_type(u4 ^ i4) # E: signedinteger[Any]
+reveal_type(u4 & i4) # E: signedinteger[Any]
+
+reveal_type(u4 << i) # E: signedinteger[Any]
+reveal_type(u4 >> i) # E: signedinteger[Any]
+reveal_type(u4 | i) # E: signedinteger[Any]
+reveal_type(u4 ^ i) # E: signedinteger[Any]
+reveal_type(u4 & i) # E: signedinteger[Any]
+
+reveal_type(u8 << b_) # E: {uint64}
+reveal_type(u8 >> b_) # E: {uint64}
+reveal_type(u8 | b_) # E: {uint64}
+reveal_type(u8 ^ b_) # E: {uint64}
+reveal_type(u8 & b_) # E: {uint64}
+
+reveal_type(u8 << b) # E: {uint64}
+reveal_type(u8 >> b) # E: {uint64}
+reveal_type(u8 | b) # E: {uint64}
+reveal_type(u8 ^ b) # E: {uint64}
+reveal_type(u8 & b) # E: {uint64}
+
+reveal_type(b_ << b_) # E: {int8}
+reveal_type(b_ >> b_) # E: {int8}
+reveal_type(b_ | b_) # E: bool_
+reveal_type(b_ ^ b_) # E: bool_
+reveal_type(b_ & b_) # E: bool_
+
+reveal_type(b_ << AR) # E: Any
+reveal_type(b_ >> AR) # E: Any
+reveal_type(b_ | AR) # E: Any
+reveal_type(b_ ^ AR) # E: Any
+reveal_type(b_ & AR) # E: Any
+
+reveal_type(b_ << b) # E: {int8}
+reveal_type(b_ >> b) # E: {int8}
+reveal_type(b_ | b) # E: bool_
+reveal_type(b_ ^ b) # E: bool_
+reveal_type(b_ & b) # E: bool_
+
+reveal_type(b_ << i) # E: {int_}
+reveal_type(b_ >> i) # E: {int_}
+reveal_type(b_ | i) # E: {int_}
+reveal_type(b_ ^ i) # E: {int_}
+reveal_type(b_ & i) # E: {int_}
+
+reveal_type(~i8) # E: {int64}
+reveal_type(~i4) # E: {int32}
+reveal_type(~u8) # E: {uint64}
+reveal_type(~u4) # E: {uint32}
+reveal_type(~b_) # E: bool_
+reveal_type(~AR) # E: Any
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/char.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/char.pyi
new file mode 100644
index 00000000..0563b347
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/char.pyi
@@ -0,0 +1,147 @@
+import numpy as np
+import numpy.typing as npt
+from collections.abc import Sequence
+
+AR_U: npt.NDArray[np.str_]
+AR_S: npt.NDArray[np.bytes_]
+
+reveal_type(np.char.equal(AR_U, AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.equal(AR_S, AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.not_equal(AR_U, AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.not_equal(AR_S, AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.greater_equal(AR_U, AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.greater_equal(AR_S, AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.less_equal(AR_U, AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.less_equal(AR_S, AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.greater(AR_U, AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.greater(AR_S, AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.less(AR_U, AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.less(AR_S, AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.multiply(AR_U, 5)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.multiply(AR_S, [5, 4, 3])) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.mod(AR_U, "test")) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.mod(AR_S, "test")) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.capitalize(AR_U)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.capitalize(AR_S)) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.center(AR_U, 5)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.center(AR_S, [2, 3, 4], b"a")) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.encode(AR_U)) # E: ndarray[Any, dtype[bytes_]]
+reveal_type(np.char.decode(AR_S)) # E: ndarray[Any, dtype[str_]]
+
+reveal_type(np.char.expandtabs(AR_U)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.expandtabs(AR_S, tabsize=4)) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.join(AR_U, "_")) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.join(AR_S, [b"_", b""])) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.ljust(AR_U, 5)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: ndarray[Any, dtype[bytes_]]
+reveal_type(np.char.rjust(AR_U, 5)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.lstrip(AR_U)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.lstrip(AR_S, chars=b"_")) # E: ndarray[Any, dtype[bytes_]]
+reveal_type(np.char.rstrip(AR_U)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.rstrip(AR_S, chars=b"_")) # E: ndarray[Any, dtype[bytes_]]
+reveal_type(np.char.strip(AR_U)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.strip(AR_S, chars=b"_")) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.partition(AR_U, "\n")) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.partition(AR_S, [b"a", b"b", b"c"])) # E: ndarray[Any, dtype[bytes_]]
+reveal_type(np.char.rpartition(AR_U, "\n")) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.rpartition(AR_S, [b"a", b"b", b"c"])) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.replace(AR_U, "_", "-")) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.replace(AR_S, [b"_", b""], [b"a", b"b"])) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.split(AR_U, "_")) # E: ndarray[Any, dtype[object_]]
+reveal_type(np.char.split(AR_S, maxsplit=[1, 2, 3])) # E: ndarray[Any, dtype[object_]]
+reveal_type(np.char.rsplit(AR_U, "_")) # E: ndarray[Any, dtype[object_]]
+reveal_type(np.char.rsplit(AR_S, maxsplit=[1, 2, 3])) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.char.splitlines(AR_U)) # E: ndarray[Any, dtype[object_]]
+reveal_type(np.char.splitlines(AR_S, keepends=[True, True, False])) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.char.swapcase(AR_U)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.swapcase(AR_S)) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.title(AR_U)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.title(AR_S)) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.upper(AR_U)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.upper(AR_S)) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.zfill(AR_U, 5)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.char.zfill(AR_S, [2, 3, 4])) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(np.char.count(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(np.char.count(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(np.char.endswith(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.endswith(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.startswith(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.startswith(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.find(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(np.char.find(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(np.char.rfind(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(np.char.rfind(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(np.char.index(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(np.char.index(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(np.char.rindex(AR_U, "a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(np.char.rindex(AR_S, [b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(np.char.isalpha(AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.isalpha(AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.isalnum(AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.isalnum(AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.isdecimal(AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.isdecimal(AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.isdigit(AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.isdigit(AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.islower(AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.islower(AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.isnumeric(AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.isnumeric(AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.isspace(AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.isspace(AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.istitle(AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.istitle(AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.isupper(AR_U)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.char.isupper(AR_S)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.char.str_len(AR_U)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(np.char.str_len(AR_S)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(np.char.array(AR_U)) # E: chararray[Any, dtype[str_]]
+reveal_type(np.char.array(AR_S, order="K")) # E: chararray[Any, dtype[bytes_]]
+reveal_type(np.char.array("bob", copy=True)) # E: chararray[Any, dtype[str_]]
+reveal_type(np.char.array(b"bob", itemsize=5)) # E: chararray[Any, dtype[bytes_]]
+reveal_type(np.char.array(1, unicode=False)) # E: chararray[Any, dtype[bytes_]]
+reveal_type(np.char.array(1, unicode=True)) # E: chararray[Any, dtype[str_]]
+
+reveal_type(np.char.asarray(AR_U)) # E: chararray[Any, dtype[str_]]
+reveal_type(np.char.asarray(AR_S, order="K")) # E: chararray[Any, dtype[bytes_]]
+reveal_type(np.char.asarray("bob")) # E: chararray[Any, dtype[str_]]
+reveal_type(np.char.asarray(b"bob", itemsize=5)) # E: chararray[Any, dtype[bytes_]]
+reveal_type(np.char.asarray(1, unicode=False)) # E: chararray[Any, dtype[bytes_]]
+reveal_type(np.char.asarray(1, unicode=True)) # E: chararray[Any, dtype[str_]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/chararray.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/chararray.pyi
new file mode 100644
index 00000000..61906c86
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/chararray.pyi
@@ -0,0 +1,132 @@
+import numpy as np
+from typing import Any
+
+AR_U: np.chararray[Any, np.dtype[np.str_]]
+AR_S: np.chararray[Any, np.dtype[np.bytes_]]
+
+reveal_type(AR_U == AR_U) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S == AR_S) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U != AR_U) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S != AR_S) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U >= AR_U) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S >= AR_S) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U <= AR_U) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S <= AR_S) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U > AR_U) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S > AR_S) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U < AR_U) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S < AR_S) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U * 5) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S * [5]) # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U % "test") # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S % b"test") # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U.capitalize()) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.capitalize()) # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U.center(5)) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.center([2, 3, 4], b"a")) # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U.encode()) # E: chararray[Any, dtype[bytes_]]
+reveal_type(AR_S.decode()) # E: chararray[Any, dtype[str_]]
+
+reveal_type(AR_U.expandtabs()) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.expandtabs(tabsize=4)) # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U.join("_")) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.join([b"_", b""])) # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U.ljust(5)) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: chararray[Any, dtype[bytes_]]
+reveal_type(AR_U.rjust(5)) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"])) # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U.lstrip()) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.lstrip(chars=b"_")) # E: chararray[Any, dtype[bytes_]]
+reveal_type(AR_U.rstrip()) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.rstrip(chars=b"_")) # E: chararray[Any, dtype[bytes_]]
+reveal_type(AR_U.strip()) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.strip(chars=b"_")) # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U.partition("\n")) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.partition([b"a", b"b", b"c"])) # E: chararray[Any, dtype[bytes_]]
+reveal_type(AR_U.rpartition("\n")) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.rpartition([b"a", b"b", b"c"])) # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U.replace("_", "-")) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.replace([b"_", b""], [b"a", b"b"])) # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U.split("_")) # E: ndarray[Any, dtype[object_]]
+reveal_type(AR_S.split(maxsplit=[1, 2, 3])) # E: ndarray[Any, dtype[object_]]
+reveal_type(AR_U.rsplit("_")) # E: ndarray[Any, dtype[object_]]
+reveal_type(AR_S.rsplit(maxsplit=[1, 2, 3])) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(AR_U.splitlines()) # E: ndarray[Any, dtype[object_]]
+reveal_type(AR_S.splitlines(keepends=[True, True, False])) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(AR_U.swapcase()) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.swapcase()) # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U.title()) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.title()) # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U.upper()) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.upper()) # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U.zfill(5)) # E: chararray[Any, dtype[str_]]
+reveal_type(AR_S.zfill([2, 3, 4])) # E: chararray[Any, dtype[bytes_]]
+
+reveal_type(AR_U.count("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(AR_S.count([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(AR_U.endswith("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S.endswith([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_U.startswith("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S.startswith([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U.find("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(AR_S.find([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(AR_U.rfind("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(AR_S.rfind([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(AR_U.index("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(AR_S.index([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(AR_U.rindex("a", start=[1, 2, 3])) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(AR_S.rindex([b"a", b"b", b"c"], end=9)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(AR_U.isalpha()) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S.isalpha()) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U.isalnum()) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S.isalnum()) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U.isdecimal()) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S.isdecimal()) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U.isdigit()) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S.isdigit()) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U.islower()) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S.islower()) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U.isnumeric()) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S.isnumeric()) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U.isspace()) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S.isspace()) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U.istitle()) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S.istitle()) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U.isupper()) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR_S.isupper()) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(AR_U.__array_finalize__(object())) # E: None
+reveal_type(AR_S.__array_finalize__(object())) # E: None
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/comparisons.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/comparisons.pyi
new file mode 100644
index 00000000..9b32f405
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/comparisons.pyi
@@ -0,0 +1,261 @@
+import numpy as np
+import fractions
+import decimal
+
+c16 = np.complex128()
+f8 = np.float64()
+i8 = np.int64()
+u8 = np.uint64()
+
+c8 = np.complex64()
+f4 = np.float32()
+i4 = np.int32()
+u4 = np.uint32()
+
+dt = np.datetime64(0, "D")
+td = np.timedelta64(0, "D")
+
+b_ = np.bool_()
+
+b = bool()
+c = complex()
+f = float()
+i = int()
+
+AR = np.array([0], dtype=np.int64)
+AR.setflags(write=False)
+
+SEQ = (0, 1, 2, 3, 4)
+
+# object-like comparisons
+
+reveal_type(i8 > fractions.Fraction(1, 5)) # E: Any
+reveal_type(i8 > [fractions.Fraction(1, 5)]) # E: Any
+reveal_type(i8 > decimal.Decimal("1.5")) # E: Any
+reveal_type(i8 > [decimal.Decimal("1.5")]) # E: Any
+
+# Time structures
+
+reveal_type(dt > dt) # E: bool_
+
+reveal_type(td > td) # E: bool_
+reveal_type(td > i) # E: bool_
+reveal_type(td > i4) # E: bool_
+reveal_type(td > i8) # E: bool_
+
+reveal_type(td > AR) # E: ndarray[Any, dtype[bool_]]
+reveal_type(td > SEQ) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR > SEQ) # E: ndarray[Any, dtype[bool_]]
+reveal_type(AR > td) # E: ndarray[Any, dtype[bool_]]
+reveal_type(SEQ > td) # E: ndarray[Any, dtype[bool_]]
+reveal_type(SEQ > AR) # E: ndarray[Any, dtype[bool_]]
+
+# boolean
+
+reveal_type(b_ > b) # E: bool_
+reveal_type(b_ > b_) # E: bool_
+reveal_type(b_ > i) # E: bool_
+reveal_type(b_ > i8) # E: bool_
+reveal_type(b_ > i4) # E: bool_
+reveal_type(b_ > u8) # E: bool_
+reveal_type(b_ > u4) # E: bool_
+reveal_type(b_ > f) # E: bool_
+reveal_type(b_ > f8) # E: bool_
+reveal_type(b_ > f4) # E: bool_
+reveal_type(b_ > c) # E: bool_
+reveal_type(b_ > c16) # E: bool_
+reveal_type(b_ > c8) # E: bool_
+reveal_type(b_ > AR) # E: ndarray[Any, dtype[bool_]]
+reveal_type(b_ > SEQ) # E: ndarray[Any, dtype[bool_]]
+
+# Complex
+
+reveal_type(c16 > c16) # E: bool_
+reveal_type(c16 > f8) # E: bool_
+reveal_type(c16 > i8) # E: bool_
+reveal_type(c16 > c8) # E: bool_
+reveal_type(c16 > f4) # E: bool_
+reveal_type(c16 > i4) # E: bool_
+reveal_type(c16 > b_) # E: bool_
+reveal_type(c16 > b) # E: bool_
+reveal_type(c16 > c) # E: bool_
+reveal_type(c16 > f) # E: bool_
+reveal_type(c16 > i) # E: bool_
+reveal_type(c16 > AR) # E: ndarray[Any, dtype[bool_]]
+reveal_type(c16 > SEQ) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(c16 > c16) # E: bool_
+reveal_type(f8 > c16) # E: bool_
+reveal_type(i8 > c16) # E: bool_
+reveal_type(c8 > c16) # E: bool_
+reveal_type(f4 > c16) # E: bool_
+reveal_type(i4 > c16) # E: bool_
+reveal_type(b_ > c16) # E: bool_
+reveal_type(b > c16) # E: bool_
+reveal_type(c > c16) # E: bool_
+reveal_type(f > c16) # E: bool_
+reveal_type(i > c16) # E: bool_
+reveal_type(AR > c16) # E: ndarray[Any, dtype[bool_]]
+reveal_type(SEQ > c16) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(c8 > c16) # E: bool_
+reveal_type(c8 > f8) # E: bool_
+reveal_type(c8 > i8) # E: bool_
+reveal_type(c8 > c8) # E: bool_
+reveal_type(c8 > f4) # E: bool_
+reveal_type(c8 > i4) # E: bool_
+reveal_type(c8 > b_) # E: bool_
+reveal_type(c8 > b) # E: bool_
+reveal_type(c8 > c) # E: bool_
+reveal_type(c8 > f) # E: bool_
+reveal_type(c8 > i) # E: bool_
+reveal_type(c8 > AR) # E: ndarray[Any, dtype[bool_]]
+reveal_type(c8 > SEQ) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(c16 > c8) # E: bool_
+reveal_type(f8 > c8) # E: bool_
+reveal_type(i8 > c8) # E: bool_
+reveal_type(c8 > c8) # E: bool_
+reveal_type(f4 > c8) # E: bool_
+reveal_type(i4 > c8) # E: bool_
+reveal_type(b_ > c8) # E: bool_
+reveal_type(b > c8) # E: bool_
+reveal_type(c > c8) # E: bool_
+reveal_type(f > c8) # E: bool_
+reveal_type(i > c8) # E: bool_
+reveal_type(AR > c8) # E: ndarray[Any, dtype[bool_]]
+reveal_type(SEQ > c8) # E: ndarray[Any, dtype[bool_]]
+
+# Float
+
+reveal_type(f8 > f8) # E: bool_
+reveal_type(f8 > i8) # E: bool_
+reveal_type(f8 > f4) # E: bool_
+reveal_type(f8 > i4) # E: bool_
+reveal_type(f8 > b_) # E: bool_
+reveal_type(f8 > b) # E: bool_
+reveal_type(f8 > c) # E: bool_
+reveal_type(f8 > f) # E: bool_
+reveal_type(f8 > i) # E: bool_
+reveal_type(f8 > AR) # E: ndarray[Any, dtype[bool_]]
+reveal_type(f8 > SEQ) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(f8 > f8) # E: bool_
+reveal_type(i8 > f8) # E: bool_
+reveal_type(f4 > f8) # E: bool_
+reveal_type(i4 > f8) # E: bool_
+reveal_type(b_ > f8) # E: bool_
+reveal_type(b > f8) # E: bool_
+reveal_type(c > f8) # E: bool_
+reveal_type(f > f8) # E: bool_
+reveal_type(i > f8) # E: bool_
+reveal_type(AR > f8) # E: ndarray[Any, dtype[bool_]]
+reveal_type(SEQ > f8) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(f4 > f8) # E: bool_
+reveal_type(f4 > i8) # E: bool_
+reveal_type(f4 > f4) # E: bool_
+reveal_type(f4 > i4) # E: bool_
+reveal_type(f4 > b_) # E: bool_
+reveal_type(f4 > b) # E: bool_
+reveal_type(f4 > c) # E: bool_
+reveal_type(f4 > f) # E: bool_
+reveal_type(f4 > i) # E: bool_
+reveal_type(f4 > AR) # E: ndarray[Any, dtype[bool_]]
+reveal_type(f4 > SEQ) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(f8 > f4) # E: bool_
+reveal_type(i8 > f4) # E: bool_
+reveal_type(f4 > f4) # E: bool_
+reveal_type(i4 > f4) # E: bool_
+reveal_type(b_ > f4) # E: bool_
+reveal_type(b > f4) # E: bool_
+reveal_type(c > f4) # E: bool_
+reveal_type(f > f4) # E: bool_
+reveal_type(i > f4) # E: bool_
+reveal_type(AR > f4) # E: ndarray[Any, dtype[bool_]]
+reveal_type(SEQ > f4) # E: ndarray[Any, dtype[bool_]]
+
+# Int
+
+reveal_type(i8 > i8) # E: bool_
+reveal_type(i8 > u8) # E: bool_
+reveal_type(i8 > i4) # E: bool_
+reveal_type(i8 > u4) # E: bool_
+reveal_type(i8 > b_) # E: bool_
+reveal_type(i8 > b) # E: bool_
+reveal_type(i8 > c) # E: bool_
+reveal_type(i8 > f) # E: bool_
+reveal_type(i8 > i) # E: bool_
+reveal_type(i8 > AR) # E: ndarray[Any, dtype[bool_]]
+reveal_type(i8 > SEQ) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(u8 > u8) # E: bool_
+reveal_type(u8 > i4) # E: bool_
+reveal_type(u8 > u4) # E: bool_
+reveal_type(u8 > b_) # E: bool_
+reveal_type(u8 > b) # E: bool_
+reveal_type(u8 > c) # E: bool_
+reveal_type(u8 > f) # E: bool_
+reveal_type(u8 > i) # E: bool_
+reveal_type(u8 > AR) # E: ndarray[Any, dtype[bool_]]
+reveal_type(u8 > SEQ) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(i8 > i8) # E: bool_
+reveal_type(u8 > i8) # E: bool_
+reveal_type(i4 > i8) # E: bool_
+reveal_type(u4 > i8) # E: bool_
+reveal_type(b_ > i8) # E: bool_
+reveal_type(b > i8) # E: bool_
+reveal_type(c > i8) # E: bool_
+reveal_type(f > i8) # E: bool_
+reveal_type(i > i8) # E: bool_
+reveal_type(AR > i8) # E: ndarray[Any, dtype[bool_]]
+reveal_type(SEQ > i8) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(u8 > u8) # E: bool_
+reveal_type(i4 > u8) # E: bool_
+reveal_type(u4 > u8) # E: bool_
+reveal_type(b_ > u8) # E: bool_
+reveal_type(b > u8) # E: bool_
+reveal_type(c > u8) # E: bool_
+reveal_type(f > u8) # E: bool_
+reveal_type(i > u8) # E: bool_
+reveal_type(AR > u8) # E: ndarray[Any, dtype[bool_]]
+reveal_type(SEQ > u8) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(i4 > i8) # E: bool_
+reveal_type(i4 > i4) # E: bool_
+reveal_type(i4 > i) # E: bool_
+reveal_type(i4 > b_) # E: bool_
+reveal_type(i4 > b) # E: bool_
+reveal_type(i4 > AR) # E: ndarray[Any, dtype[bool_]]
+reveal_type(i4 > SEQ) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(u4 > i8) # E: bool_
+reveal_type(u4 > i4) # E: bool_
+reveal_type(u4 > u8) # E: bool_
+reveal_type(u4 > u4) # E: bool_
+reveal_type(u4 > i) # E: bool_
+reveal_type(u4 > b_) # E: bool_
+reveal_type(u4 > b) # E: bool_
+reveal_type(u4 > AR) # E: ndarray[Any, dtype[bool_]]
+reveal_type(u4 > SEQ) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(i8 > i4) # E: bool_
+reveal_type(i4 > i4) # E: bool_
+reveal_type(i > i4) # E: bool_
+reveal_type(b_ > i4) # E: bool_
+reveal_type(b > i4) # E: bool_
+reveal_type(AR > i4) # E: ndarray[Any, dtype[bool_]]
+reveal_type(SEQ > i4) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(i8 > u4) # E: bool_
+reveal_type(i4 > u4) # E: bool_
+reveal_type(u8 > u4) # E: bool_
+reveal_type(u4 > u4) # E: bool_
+reveal_type(b_ > u4) # E: bool_
+reveal_type(b > u4) # E: bool_
+reveal_type(i > u4) # E: bool_
+reveal_type(AR > u4) # E: ndarray[Any, dtype[bool_]]
+reveal_type(SEQ > u4) # E: ndarray[Any, dtype[bool_]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/constants.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/constants.pyi
new file mode 100644
index 00000000..37f54ccd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/constants.pyi
@@ -0,0 +1,52 @@
+import numpy as np
+
+reveal_type(np.Inf) # E: float
+reveal_type(np.Infinity) # E: float
+reveal_type(np.NAN) # E: float
+reveal_type(np.NINF) # E: float
+reveal_type(np.NZERO) # E: float
+reveal_type(np.NaN) # E: float
+reveal_type(np.PINF) # E: float
+reveal_type(np.PZERO) # E: float
+reveal_type(np.e) # E: float
+reveal_type(np.euler_gamma) # E: float
+reveal_type(np.inf) # E: float
+reveal_type(np.infty) # E: float
+reveal_type(np.nan) # E: float
+reveal_type(np.pi) # E: float
+
+reveal_type(np.ALLOW_THREADS) # E: int
+reveal_type(np.BUFSIZE) # E: Literal[8192]
+reveal_type(np.CLIP) # E: Literal[0]
+reveal_type(np.ERR_CALL) # E: Literal[3]
+reveal_type(np.ERR_DEFAULT) # E: Literal[521]
+reveal_type(np.ERR_IGNORE) # E: Literal[0]
+reveal_type(np.ERR_LOG) # E: Literal[5]
+reveal_type(np.ERR_PRINT) # E: Literal[4]
+reveal_type(np.ERR_RAISE) # E: Literal[2]
+reveal_type(np.ERR_WARN) # E: Literal[1]
+reveal_type(np.FLOATING_POINT_SUPPORT) # E: Literal[1]
+reveal_type(np.FPE_DIVIDEBYZERO) # E: Literal[1]
+reveal_type(np.FPE_INVALID) # E: Literal[8]
+reveal_type(np.FPE_OVERFLOW) # E: Literal[2]
+reveal_type(np.FPE_UNDERFLOW) # E: Literal[4]
+reveal_type(np.MAXDIMS) # E: Literal[32]
+reveal_type(np.MAY_SHARE_BOUNDS) # E: Literal[0]
+reveal_type(np.MAY_SHARE_EXACT) # E: Literal[-1]
+reveal_type(np.RAISE) # E: Literal[2]
+reveal_type(np.SHIFT_DIVIDEBYZERO) # E: Literal[0]
+reveal_type(np.SHIFT_INVALID) # E: Literal[9]
+reveal_type(np.SHIFT_OVERFLOW) # E: Literal[3]
+reveal_type(np.SHIFT_UNDERFLOW) # E: Literal[6]
+reveal_type(np.UFUNC_BUFSIZE_DEFAULT) # E: Literal[8192]
+reveal_type(np.WRAP) # E: Literal[1]
+reveal_type(np.tracemalloc_domain) # E: Literal[389047]
+
+reveal_type(np.little_endian) # E: bool
+reveal_type(np.True_) # E: bool_
+reveal_type(np.False_) # E: bool_
+
+reveal_type(np.UFUNC_PYVALS_NAME) # E: Literal['UFUNC_PYVALS']
+
+reveal_type(np.sctypeDict) # E: dict
+reveal_type(np.sctypes) # E: TypedDict
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ctypeslib.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ctypeslib.pyi
new file mode 100644
index 00000000..2d30de3d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ctypeslib.pyi
@@ -0,0 +1,87 @@
+import ctypes
+from typing import Any
+
+import numpy as np
+import numpy.typing as npt
+
+AR_bool: npt.NDArray[np.bool_]
+AR_ubyte: npt.NDArray[np.ubyte]
+AR_ushort: npt.NDArray[np.ushort]
+AR_uintc: npt.NDArray[np.uintc]
+AR_uint: npt.NDArray[np.uint]
+AR_ulonglong: npt.NDArray[np.ulonglong]
+AR_byte: npt.NDArray[np.byte]
+AR_short: npt.NDArray[np.short]
+AR_intc: npt.NDArray[np.intc]
+AR_int: npt.NDArray[np.int_]
+AR_longlong: npt.NDArray[np.longlong]
+AR_single: npt.NDArray[np.single]
+AR_double: npt.NDArray[np.double]
+AR_longdouble: npt.NDArray[np.longdouble]
+AR_void: npt.NDArray[np.void]
+
+pointer: ctypes._Pointer[Any]
+
+reveal_type(np.ctypeslib.c_intp()) # E: {c_intp}
+
+reveal_type(np.ctypeslib.ndpointer()) # E: Type[ctypeslib._ndptr[None]]
+reveal_type(np.ctypeslib.ndpointer(dtype=np.float64)) # E: Type[ctypeslib._ndptr[dtype[{float64}]]]
+reveal_type(np.ctypeslib.ndpointer(dtype=float)) # E: Type[ctypeslib._ndptr[dtype[Any]]]
+reveal_type(np.ctypeslib.ndpointer(shape=(10, 3))) # E: Type[ctypeslib._ndptr[None]]
+reveal_type(np.ctypeslib.ndpointer(np.int64, shape=(10, 3))) # E: Type[ctypeslib._concrete_ndptr[dtype[{int64}]]]
+reveal_type(np.ctypeslib.ndpointer(int, shape=(1,))) # E: Type[ctypeslib._concrete_ndptr[dtype[Any]]]
+
+reveal_type(np.ctypeslib.as_ctypes_type(np.bool_)) # E: Type[ctypes.c_bool]
+reveal_type(np.ctypeslib.as_ctypes_type(np.ubyte)) # E: Type[{c_ubyte}]
+reveal_type(np.ctypeslib.as_ctypes_type(np.ushort)) # E: Type[{c_ushort}]
+reveal_type(np.ctypeslib.as_ctypes_type(np.uintc)) # E: Type[{c_uint}]
+reveal_type(np.ctypeslib.as_ctypes_type(np.uint)) # E: Type[{c_ulong}]
+reveal_type(np.ctypeslib.as_ctypes_type(np.ulonglong)) # E: Type[{c_ulonglong}]
+reveal_type(np.ctypeslib.as_ctypes_type(np.byte)) # E: Type[{c_byte}]
+reveal_type(np.ctypeslib.as_ctypes_type(np.short)) # E: Type[{c_short}]
+reveal_type(np.ctypeslib.as_ctypes_type(np.intc)) # E: Type[{c_int}]
+reveal_type(np.ctypeslib.as_ctypes_type(np.int_)) # E: Type[{c_long}]
+reveal_type(np.ctypeslib.as_ctypes_type(np.longlong)) # E: Type[{c_longlong}]
+reveal_type(np.ctypeslib.as_ctypes_type(np.single)) # E: Type[{c_float}]
+reveal_type(np.ctypeslib.as_ctypes_type(np.double)) # E: Type[{c_double}]
+reveal_type(np.ctypeslib.as_ctypes_type(np.longdouble)) # E: Type[{c_longdouble}]
+reveal_type(np.ctypeslib.as_ctypes_type(ctypes.c_double)) # E: Type[{c_double}]
+reveal_type(np.ctypeslib.as_ctypes_type("q")) # E: Type[ctypes.c_longlong]
+reveal_type(np.ctypeslib.as_ctypes_type([("i8", np.int64), ("f8", np.float64)])) # E: Type[Any]
+reveal_type(np.ctypeslib.as_ctypes_type("i8")) # E: Type[Any]
+reveal_type(np.ctypeslib.as_ctypes_type("f8")) # E: Type[Any]
+
+reveal_type(np.ctypeslib.as_ctypes(AR_bool.take(0))) # E: ctypes.c_bool
+reveal_type(np.ctypeslib.as_ctypes(AR_ubyte.take(0))) # E: {c_ubyte}
+reveal_type(np.ctypeslib.as_ctypes(AR_ushort.take(0))) # E: {c_ushort}
+reveal_type(np.ctypeslib.as_ctypes(AR_uintc.take(0))) # E: {c_uint}
+reveal_type(np.ctypeslib.as_ctypes(AR_uint.take(0))) # E: {c_ulong}
+reveal_type(np.ctypeslib.as_ctypes(AR_ulonglong.take(0))) # E: {c_ulonglong}
+reveal_type(np.ctypeslib.as_ctypes(AR_byte.take(0))) # E: {c_byte}
+reveal_type(np.ctypeslib.as_ctypes(AR_short.take(0))) # E: {c_short}
+reveal_type(np.ctypeslib.as_ctypes(AR_intc.take(0))) # E: {c_int}
+reveal_type(np.ctypeslib.as_ctypes(AR_int.take(0))) # E: {c_long}
+reveal_type(np.ctypeslib.as_ctypes(AR_longlong.take(0))) # E: {c_longlong}
+reveal_type(np.ctypeslib.as_ctypes(AR_single.take(0))) # E: {c_float}
+reveal_type(np.ctypeslib.as_ctypes(AR_double.take(0))) # E: {c_double}
+reveal_type(np.ctypeslib.as_ctypes(AR_longdouble.take(0))) # E: {c_longdouble}
+reveal_type(np.ctypeslib.as_ctypes(AR_void.take(0))) # E: Any
+reveal_type(np.ctypeslib.as_ctypes(AR_bool)) # E: ctypes.Array[ctypes.c_bool]
+reveal_type(np.ctypeslib.as_ctypes(AR_ubyte)) # E: ctypes.Array[{c_ubyte}]
+reveal_type(np.ctypeslib.as_ctypes(AR_ushort)) # E: ctypes.Array[{c_ushort}]
+reveal_type(np.ctypeslib.as_ctypes(AR_uintc)) # E: ctypes.Array[{c_uint}]
+reveal_type(np.ctypeslib.as_ctypes(AR_uint)) # E: ctypes.Array[{c_ulong}]
+reveal_type(np.ctypeslib.as_ctypes(AR_ulonglong)) # E: ctypes.Array[{c_ulonglong}]
+reveal_type(np.ctypeslib.as_ctypes(AR_byte)) # E: ctypes.Array[{c_byte}]
+reveal_type(np.ctypeslib.as_ctypes(AR_short)) # E: ctypes.Array[{c_short}]
+reveal_type(np.ctypeslib.as_ctypes(AR_intc)) # E: ctypes.Array[{c_int}]
+reveal_type(np.ctypeslib.as_ctypes(AR_int)) # E: ctypes.Array[{c_long}]
+reveal_type(np.ctypeslib.as_ctypes(AR_longlong)) # E: ctypes.Array[{c_longlong}]
+reveal_type(np.ctypeslib.as_ctypes(AR_single)) # E: ctypes.Array[{c_float}]
+reveal_type(np.ctypeslib.as_ctypes(AR_double)) # E: ctypes.Array[{c_double}]
+reveal_type(np.ctypeslib.as_ctypes(AR_longdouble)) # E: ctypes.Array[{c_longdouble}]
+reveal_type(np.ctypeslib.as_ctypes(AR_void)) # E: ctypes.Array[Any]
+
+reveal_type(np.ctypeslib.as_array(AR_ubyte)) # E: ndarray[Any, dtype[{ubyte}]]
+reveal_type(np.ctypeslib.as_array(1)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.ctypeslib.as_array(pointer)) # E: ndarray[Any, dtype[Any]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/datasource.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/datasource.pyi
new file mode 100644
index 00000000..245ac764
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/datasource.pyi
@@ -0,0 +1,21 @@
+from pathlib import Path
+import numpy as np
+
+path1: Path
+path2: str
+
+d1 = np.DataSource(path1)
+d2 = np.DataSource(path2)
+d3 = np.DataSource(None)
+
+reveal_type(d1.abspath("...")) # E: str
+reveal_type(d2.abspath("...")) # E: str
+reveal_type(d3.abspath("...")) # E: str
+
+reveal_type(d1.exists("...")) # E: bool
+reveal_type(d2.exists("...")) # E: bool
+reveal_type(d3.exists("...")) # E: bool
+
+reveal_type(d1.open("...", "r")) # E: IO[Any]
+reveal_type(d2.open("...", encoding="utf8")) # E: IO[Any]
+reveal_type(d3.open("...", newline="/n")) # E: IO[Any]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/dtype.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/dtype.pyi
new file mode 100644
index 00000000..7a658511
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/dtype.pyi
@@ -0,0 +1,76 @@
+import ctypes as ct
+import numpy as np
+
+dtype_U: np.dtype[np.str_]
+dtype_V: np.dtype[np.void]
+dtype_i8: np.dtype[np.int64]
+
+reveal_type(np.dtype(np.float64)) # E: dtype[{float64}]
+reveal_type(np.dtype(np.int64)) # E: dtype[{int64}]
+
+# String aliases
+reveal_type(np.dtype("float64")) # E: dtype[{float64}]
+reveal_type(np.dtype("float32")) # E: dtype[{float32}]
+reveal_type(np.dtype("int64")) # E: dtype[{int64}]
+reveal_type(np.dtype("int32")) # E: dtype[{int32}]
+reveal_type(np.dtype("bool")) # E: dtype[bool_]
+reveal_type(np.dtype("bytes")) # E: dtype[bytes_]
+reveal_type(np.dtype("str")) # E: dtype[str_]
+
+# Python types
+reveal_type(np.dtype(complex)) # E: dtype[{cdouble}]
+reveal_type(np.dtype(float)) # E: dtype[{double}]
+reveal_type(np.dtype(int)) # E: dtype[{int_}]
+reveal_type(np.dtype(bool)) # E: dtype[bool_]
+reveal_type(np.dtype(str)) # E: dtype[str_]
+reveal_type(np.dtype(bytes)) # E: dtype[bytes_]
+reveal_type(np.dtype(object)) # E: dtype[object_]
+
+# ctypes
+reveal_type(np.dtype(ct.c_double)) # E: dtype[{double}]
+reveal_type(np.dtype(ct.c_longlong)) # E: dtype[{longlong}]
+reveal_type(np.dtype(ct.c_uint32)) # E: dtype[{uint32}]
+reveal_type(np.dtype(ct.c_bool)) # E: dtype[bool_]
+reveal_type(np.dtype(ct.c_char)) # E: dtype[bytes_]
+reveal_type(np.dtype(ct.py_object)) # E: dtype[object_]
+
+# Special case for None
+reveal_type(np.dtype(None)) # E: dtype[{double}]
+
+# Dtypes of dtypes
+reveal_type(np.dtype(np.dtype(np.float64))) # E: dtype[{float64}]
+
+# Parameterized dtypes
+reveal_type(np.dtype("S8")) # E: dtype
+
+# Void
+reveal_type(np.dtype(("U", 10))) # E: dtype[void]
+
+# Methods and attributes
+reveal_type(dtype_U.base) # E: dtype[Any]
+reveal_type(dtype_U.subdtype) # E: Union[None, Tuple[dtype[Any], builtins.tuple[builtins.int, ...]]]
+reveal_type(dtype_U.newbyteorder()) # E: dtype[str_]
+reveal_type(dtype_U.type) # E: Type[str_]
+reveal_type(dtype_U.name) # E: str
+reveal_type(dtype_U.names) # E: Union[None, builtins.tuple[builtins.str, ...]]
+
+reveal_type(dtype_U * 0) # E: dtype[str_]
+reveal_type(dtype_U * 1) # E: dtype[str_]
+reveal_type(dtype_U * 2) # E: dtype[str_]
+
+reveal_type(dtype_i8 * 0) # E: dtype[void]
+reveal_type(dtype_i8 * 1) # E: dtype[{int64}]
+reveal_type(dtype_i8 * 2) # E: dtype[void]
+
+reveal_type(0 * dtype_U) # E: dtype[str_]
+reveal_type(1 * dtype_U) # E: dtype[str_]
+reveal_type(2 * dtype_U) # E: dtype[str_]
+
+reveal_type(0 * dtype_i8) # E: dtype[Any]
+reveal_type(1 * dtype_i8) # E: dtype[Any]
+reveal_type(2 * dtype_i8) # E: dtype[Any]
+
+reveal_type(dtype_V["f0"]) # E: dtype[Any]
+reveal_type(dtype_V[0]) # E: dtype[Any]
+reveal_type(dtype_V[["f0", "f1"]]) # E: dtype[void]
+reveal_type(dtype_V[["f0"]]) # E: dtype[void]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/einsumfunc.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/einsumfunc.pyi
new file mode 100644
index 00000000..d5f93014
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/einsumfunc.pyi
@@ -0,0 +1,35 @@
+from typing import Any
+import numpy as np
+
+AR_LIKE_b: list[bool]
+AR_LIKE_u: list[np.uint32]
+AR_LIKE_i: list[int]
+AR_LIKE_f: list[float]
+AR_LIKE_c: list[complex]
+AR_LIKE_U: list[str]
+
+OUT_f: np.ndarray[Any, np.dtype[np.float64]]
+
+reveal_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: Any
+reveal_type(np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u)) # E: Any
+reveal_type(np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i)) # E: Any
+reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f)) # E: Any
+reveal_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c)) # E: Any
+reveal_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i)) # E: Any
+reveal_type(np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)) # E: Any
+
+reveal_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c, out=OUT_f)) # E: ndarray[Any, dtype[{float64}]
+reveal_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe", out=OUT_f)) # E: ndarray[Any, dtype[{float64}]
+reveal_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16")) # E: Any
+reveal_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe")) # E: Any
+
+reveal_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b)) # E: Tuple[builtins.list[Any], builtins.str]
+reveal_type(np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u)) # E: Tuple[builtins.list[Any], builtins.str]
+reveal_type(np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i)) # E: Tuple[builtins.list[Any], builtins.str]
+reveal_type(np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f)) # E: Tuple[builtins.list[Any], builtins.str]
+reveal_type(np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c)) # E: Tuple[builtins.list[Any], builtins.str]
+reveal_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i)) # E: Tuple[builtins.list[Any], builtins.str]
+reveal_type(np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c)) # E: Tuple[builtins.list[Any], builtins.str]
+
+reveal_type(np.einsum([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i)) # E: Any
+reveal_type(np.einsum_path([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i)) # E: Tuple[builtins.list[Any], builtins.str]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/emath.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/emath.pyi
new file mode 100644
index 00000000..9ab2d72d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/emath.pyi
@@ -0,0 +1,52 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+f8: np.float64
+c16: np.complex128
+
+reveal_type(np.emath.sqrt(f8)) # E: Any
+reveal_type(np.emath.sqrt(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.sqrt(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.sqrt(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.log(f8)) # E: Any
+reveal_type(np.emath.log(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.log(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.log(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.log10(f8)) # E: Any
+reveal_type(np.emath.log10(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.log10(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.log10(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.log2(f8)) # E: Any
+reveal_type(np.emath.log2(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.log2(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.log2(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.logn(f8, 2)) # E: Any
+reveal_type(np.emath.logn(AR_f8, 4)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.logn(f8, 1j)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.logn(AR_c16, 1.5)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.power(f8, 2)) # E: Any
+reveal_type(np.emath.power(AR_f8, 4)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.power(f8, 2j)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.power(AR_c16, 1.5)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.arccos(f8)) # E: Any
+reveal_type(np.emath.arccos(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.arccos(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.arccos(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.arcsin(f8)) # E: Any
+reveal_type(np.emath.arcsin(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.arcsin(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.arcsin(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.emath.arctanh(f8)) # E: Any
+reveal_type(np.emath.arctanh(AR_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.emath.arctanh(c16)) # E: complexfloating[Any, Any]
+reveal_type(np.emath.arctanh(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/false_positives.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/false_positives.pyi
new file mode 100644
index 00000000..2d715664
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/false_positives.pyi
@@ -0,0 +1,10 @@
+from typing import Any
+import numpy.typing as npt
+
+AR_Any: npt.NDArray[Any]
+
+# Mypy bug where overload ambiguity is ignored for `Any`-parametrized types;
+# xref numpy/numpy#20099 and python/mypy#11347
+#
+# The expected output would be something akin to `ndarray[Any, dtype[Any]]`
+reveal_type(AR_Any + 2) # E: ndarray[Any, dtype[signedinteger[Any]]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/fft.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/fft.pyi
new file mode 100644
index 00000000..0667938e
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/fft.pyi
@@ -0,0 +1,35 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+AR_LIKE_f8: list[float]
+
+reveal_type(np.fft.fftshift(AR_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.fft.fftshift(AR_LIKE_f8, axes=0)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.fft.ifftshift(AR_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.fft.ifftshift(AR_LIKE_f8, axes=0)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.fft.fftfreq(5, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.fft.fftfreq(np.int64(), AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.fft.fftfreq(5, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.fft.fftfreq(np.int64(), AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.fft.fft(AR_f8)) # E: ndarray[Any, dtype[{complex128}]]
+reveal_type(np.fft.ifft(AR_f8, axis=1)) # E: ndarray[Any, dtype[{complex128}]]
+reveal_type(np.fft.rfft(AR_f8, n=None)) # E: ndarray[Any, dtype[{complex128}]]
+reveal_type(np.fft.irfft(AR_f8, norm="ortho")) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.fft.hfft(AR_f8, n=2)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.fft.ihfft(AR_f8)) # E: ndarray[Any, dtype[{complex128}]]
+
+reveal_type(np.fft.fftn(AR_f8)) # E: ndarray[Any, dtype[{complex128}]]
+reveal_type(np.fft.ifftn(AR_f8)) # E: ndarray[Any, dtype[{complex128}]]
+reveal_type(np.fft.rfftn(AR_f8)) # E: ndarray[Any, dtype[{complex128}]]
+reveal_type(np.fft.irfftn(AR_f8)) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.fft.rfft2(AR_f8)) # E: ndarray[Any, dtype[{complex128}]]
+reveal_type(np.fft.ifft2(AR_f8)) # E: ndarray[Any, dtype[{complex128}]]
+reveal_type(np.fft.fft2(AR_f8)) # E: ndarray[Any, dtype[{complex128}]]
+reveal_type(np.fft.irfft2(AR_f8)) # E: ndarray[Any, dtype[{float64}]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/flatiter.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/flatiter.pyi
new file mode 100644
index 00000000..8d3e8063
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/flatiter.pyi
@@ -0,0 +1,23 @@
+from typing import Any
+import numpy as np
+
+a: np.flatiter[np.ndarray[Any, np.dtype[np.str_]]]
+
+reveal_type(a.base) # E: ndarray[Any, dtype[str_]]
+reveal_type(a.copy()) # E: ndarray[Any, dtype[str_]]
+reveal_type(a.coords) # E: tuple[builtins.int, ...]
+reveal_type(a.index) # E: int
+reveal_type(iter(a)) # E: Any
+reveal_type(next(a)) # E: str_
+reveal_type(a[0]) # E: str_
+reveal_type(a[[0, 1, 2]]) # E: ndarray[Any, dtype[str_]]
+reveal_type(a[...]) # E: ndarray[Any, dtype[str_]]
+reveal_type(a[:]) # E: ndarray[Any, dtype[str_]]
+reveal_type(a[(...,)]) # E: ndarray[Any, dtype[str_]]
+reveal_type(a[(0,)]) # E: str_
+reveal_type(a.__array__()) # E: ndarray[Any, dtype[str_]]
+reveal_type(a.__array__(np.dtype(np.float64))) # E: ndarray[Any, dtype[{float64}]]
+a[0] = "a"
+a[:5] = "a"
+a[...] = "a"
+a[(...,)] = "a"
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/fromnumeric.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/fromnumeric.pyi
new file mode 100644
index 00000000..e769abcf
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/fromnumeric.pyi
@@ -0,0 +1,297 @@
+"""Tests for :mod:`core.fromnumeric`."""
+
+import numpy as np
+import numpy.typing as npt
+
+class NDArraySubclass(npt.NDArray[np.complex128]):
+ ...
+
+AR_b: npt.NDArray[np.bool_]
+AR_f4: npt.NDArray[np.float32]
+AR_c16: npt.NDArray[np.complex128]
+AR_u8: npt.NDArray[np.uint64]
+AR_i8: npt.NDArray[np.int64]
+AR_O: npt.NDArray[np.object_]
+AR_subclass: NDArraySubclass
+
+b: np.bool_
+f4: np.float32
+i8: np.int64
+f: float
+
+reveal_type(np.take(b, 0)) # E: bool_
+reveal_type(np.take(f4, 0)) # E: {float32}
+reveal_type(np.take(f, 0)) # E: Any
+reveal_type(np.take(AR_b, 0)) # E: bool_
+reveal_type(np.take(AR_f4, 0)) # E: {float32}
+reveal_type(np.take(AR_b, [0])) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.take(AR_f4, [0])) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.take([1], [0])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.take(AR_f4, [0], out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.reshape(b, 1)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.reshape(f4, 1)) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.reshape(f, 1)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.reshape(AR_b, 1)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.reshape(AR_f4, 1)) # E: ndarray[Any, dtype[{float32}]]
+
+reveal_type(np.choose(1, [True, True])) # E: Any
+reveal_type(np.choose([1], [True, True])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.choose([1], AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.choose([1], AR_b, out=AR_f4)) # E: ndarray[Any, dtype[{float32}]]
+
+reveal_type(np.repeat(b, 1)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.repeat(f4, 1)) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.repeat(f, 1)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.repeat(AR_b, 1)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.repeat(AR_f4, 1)) # E: ndarray[Any, dtype[{float32}]]
+
+# TODO: array_bdd tests for np.put()
+
+reveal_type(np.swapaxes([[0, 1]], 0, 0)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.swapaxes(AR_b, 0, 0)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.swapaxes(AR_f4, 0, 0)) # E: ndarray[Any, dtype[{float32}]]
+
+reveal_type(np.transpose(b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.transpose(f4)) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.transpose(f)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.transpose(AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.transpose(AR_f4)) # E: ndarray[Any, dtype[{float32}]]
+
+reveal_type(np.partition(b, 0, axis=None)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.partition(f4, 0, axis=None)) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.partition(f, 0, axis=None)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.partition(AR_b, 0)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.partition(AR_f4, 0)) # E: ndarray[Any, dtype[{float32}]]
+
+reveal_type(np.argpartition(b, 0)) # E: ndarray[Any, dtype[{intp}]]
+reveal_type(np.argpartition(f4, 0)) # E: ndarray[Any, dtype[{intp}]]
+reveal_type(np.argpartition(f, 0)) # E: ndarray[Any, dtype[{intp}]]
+reveal_type(np.argpartition(AR_b, 0)) # E: ndarray[Any, dtype[{intp}]]
+reveal_type(np.argpartition(AR_f4, 0)) # E: ndarray[Any, dtype[{intp}]]
+
+reveal_type(np.sort([2, 1], 0)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.sort(AR_b, 0)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.sort(AR_f4, 0)) # E: ndarray[Any, dtype[{float32}]]
+
+reveal_type(np.argsort(AR_b, 0)) # E: ndarray[Any, dtype[{intp}]]
+reveal_type(np.argsort(AR_f4, 0)) # E: ndarray[Any, dtype[{intp}]]
+
+reveal_type(np.argmax(AR_b)) # E: {intp}
+reveal_type(np.argmax(AR_f4)) # E: {intp}
+reveal_type(np.argmax(AR_b, axis=0)) # E: Any
+reveal_type(np.argmax(AR_f4, axis=0)) # E: Any
+reveal_type(np.argmax(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.argmin(AR_b)) # E: {intp}
+reveal_type(np.argmin(AR_f4)) # E: {intp}
+reveal_type(np.argmin(AR_b, axis=0)) # E: Any
+reveal_type(np.argmin(AR_f4, axis=0)) # E: Any
+reveal_type(np.argmin(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.searchsorted(AR_b[0], 0)) # E: {intp}
+reveal_type(np.searchsorted(AR_f4[0], 0)) # E: {intp}
+reveal_type(np.searchsorted(AR_b[0], [0])) # E: ndarray[Any, dtype[{intp}]]
+reveal_type(np.searchsorted(AR_f4[0], [0])) # E: ndarray[Any, dtype[{intp}]]
+
+reveal_type(np.resize(b, (5, 5))) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.resize(f4, (5, 5))) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.resize(f, (5, 5))) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.resize(AR_b, (5, 5))) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.resize(AR_f4, (5, 5))) # E: ndarray[Any, dtype[{float32}]]
+
+reveal_type(np.squeeze(b)) # E: bool_
+reveal_type(np.squeeze(f4)) # E: {float32}
+reveal_type(np.squeeze(f)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.squeeze(AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.squeeze(AR_f4)) # E: ndarray[Any, dtype[{float32}]]
+
+reveal_type(np.diagonal(AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.diagonal(AR_f4)) # E: ndarray[Any, dtype[{float32}]]
+
+reveal_type(np.trace(AR_b)) # E: Any
+reveal_type(np.trace(AR_f4)) # E: Any
+reveal_type(np.trace(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.ravel(b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.ravel(f4)) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.ravel(f)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.ravel(AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.ravel(AR_f4)) # E: ndarray[Any, dtype[{float32}]]
+
+reveal_type(np.nonzero(b)) # E: tuple[ndarray[Any, dtype[{intp}]], ...]
+reveal_type(np.nonzero(f4)) # E: tuple[ndarray[Any, dtype[{intp}]], ...]
+reveal_type(np.nonzero(f)) # E: tuple[ndarray[Any, dtype[{intp}]], ...]
+reveal_type(np.nonzero(AR_b)) # E: tuple[ndarray[Any, dtype[{intp}]], ...]
+reveal_type(np.nonzero(AR_f4)) # E: tuple[ndarray[Any, dtype[{intp}]], ...]
+
+reveal_type(np.shape(b)) # E: tuple[builtins.int, ...]
+reveal_type(np.shape(f4)) # E: tuple[builtins.int, ...]
+reveal_type(np.shape(f)) # E: tuple[builtins.int, ...]
+reveal_type(np.shape(AR_b)) # E: tuple[builtins.int, ...]
+reveal_type(np.shape(AR_f4)) # E: tuple[builtins.int, ...]
+
+reveal_type(np.compress([True], b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.compress([True], f4)) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.compress([True], f)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.compress([True], AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.compress([True], AR_f4)) # E: ndarray[Any, dtype[{float32}]]
+
+reveal_type(np.clip(b, 0, 1.0)) # E: bool_
+reveal_type(np.clip(f4, -1, 1)) # E: {float32}
+reveal_type(np.clip(f, 0, 1)) # E: Any
+reveal_type(np.clip(AR_b, 0, 1)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.clip(AR_f4, 0, 1)) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.clip([0], 0, 1)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.clip(AR_b, 0, 1, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.sum(b)) # E: bool_
+reveal_type(np.sum(f4)) # E: {float32}
+reveal_type(np.sum(f)) # E: Any
+reveal_type(np.sum(AR_b)) # E: bool_
+reveal_type(np.sum(AR_f4)) # E: {float32}
+reveal_type(np.sum(AR_b, axis=0)) # E: Any
+reveal_type(np.sum(AR_f4, axis=0)) # E: Any
+reveal_type(np.sum(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.all(b)) # E: bool_
+reveal_type(np.all(f4)) # E: bool_
+reveal_type(np.all(f)) # E: bool_
+reveal_type(np.all(AR_b)) # E: bool_
+reveal_type(np.all(AR_f4)) # E: bool_
+reveal_type(np.all(AR_b, axis=0)) # E: Any
+reveal_type(np.all(AR_f4, axis=0)) # E: Any
+reveal_type(np.all(AR_b, keepdims=True)) # E: Any
+reveal_type(np.all(AR_f4, keepdims=True)) # E: Any
+reveal_type(np.all(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.any(b)) # E: bool_
+reveal_type(np.any(f4)) # E: bool_
+reveal_type(np.any(f)) # E: bool_
+reveal_type(np.any(AR_b)) # E: bool_
+reveal_type(np.any(AR_f4)) # E: bool_
+reveal_type(np.any(AR_b, axis=0)) # E: Any
+reveal_type(np.any(AR_f4, axis=0)) # E: Any
+reveal_type(np.any(AR_b, keepdims=True)) # E: Any
+reveal_type(np.any(AR_f4, keepdims=True)) # E: Any
+reveal_type(np.any(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.cumsum(b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.cumsum(f4)) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.cumsum(f)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.cumsum(AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.cumsum(AR_f4)) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.cumsum(f, dtype=float)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.cumsum(f, dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.cumsum(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.ptp(b)) # E: bool_
+reveal_type(np.ptp(f4)) # E: {float32}
+reveal_type(np.ptp(f)) # E: Any
+reveal_type(np.ptp(AR_b)) # E: bool_
+reveal_type(np.ptp(AR_f4)) # E: {float32}
+reveal_type(np.ptp(AR_b, axis=0)) # E: Any
+reveal_type(np.ptp(AR_f4, axis=0)) # E: Any
+reveal_type(np.ptp(AR_b, keepdims=True)) # E: Any
+reveal_type(np.ptp(AR_f4, keepdims=True)) # E: Any
+reveal_type(np.ptp(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.amax(b)) # E: bool_
+reveal_type(np.amax(f4)) # E: {float32}
+reveal_type(np.amax(f)) # E: Any
+reveal_type(np.amax(AR_b)) # E: bool_
+reveal_type(np.amax(AR_f4)) # E: {float32}
+reveal_type(np.amax(AR_b, axis=0)) # E: Any
+reveal_type(np.amax(AR_f4, axis=0)) # E: Any
+reveal_type(np.amax(AR_b, keepdims=True)) # E: Any
+reveal_type(np.amax(AR_f4, keepdims=True)) # E: Any
+reveal_type(np.amax(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.amin(b)) # E: bool_
+reveal_type(np.amin(f4)) # E: {float32}
+reveal_type(np.amin(f)) # E: Any
+reveal_type(np.amin(AR_b)) # E: bool_
+reveal_type(np.amin(AR_f4)) # E: {float32}
+reveal_type(np.amin(AR_b, axis=0)) # E: Any
+reveal_type(np.amin(AR_f4, axis=0)) # E: Any
+reveal_type(np.amin(AR_b, keepdims=True)) # E: Any
+reveal_type(np.amin(AR_f4, keepdims=True)) # E: Any
+reveal_type(np.amin(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.prod(AR_b)) # E: {int_}
+reveal_type(np.prod(AR_u8)) # E: {uint64}
+reveal_type(np.prod(AR_i8)) # E: {int64}
+reveal_type(np.prod(AR_f4)) # E: floating[Any]
+reveal_type(np.prod(AR_c16)) # E: complexfloating[Any, Any]
+reveal_type(np.prod(AR_O)) # E: Any
+reveal_type(np.prod(AR_f4, axis=0)) # E: Any
+reveal_type(np.prod(AR_f4, keepdims=True)) # E: Any
+reveal_type(np.prod(AR_f4, dtype=np.float64)) # E: {float64}
+reveal_type(np.prod(AR_f4, dtype=float)) # E: Any
+reveal_type(np.prod(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.cumprod(AR_b)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(np.cumprod(AR_u8)) # E: ndarray[Any, dtype[{uint64}]]
+reveal_type(np.cumprod(AR_i8)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.cumprod(AR_f4)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.cumprod(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.cumprod(AR_O)) # E: ndarray[Any, dtype[object_]]
+reveal_type(np.cumprod(AR_f4, axis=0)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.cumprod(AR_f4, dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.cumprod(AR_f4, dtype=float)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.cumprod(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.ndim(b)) # E: int
+reveal_type(np.ndim(f4)) # E: int
+reveal_type(np.ndim(f)) # E: int
+reveal_type(np.ndim(AR_b)) # E: int
+reveal_type(np.ndim(AR_f4)) # E: int
+
+reveal_type(np.size(b)) # E: int
+reveal_type(np.size(f4)) # E: int
+reveal_type(np.size(f)) # E: int
+reveal_type(np.size(AR_b)) # E: int
+reveal_type(np.size(AR_f4)) # E: int
+
+reveal_type(np.around(b)) # E: {float16}
+reveal_type(np.around(f)) # E: Any
+reveal_type(np.around(i8)) # E: {int64}
+reveal_type(np.around(f4)) # E: {float32}
+reveal_type(np.around(AR_b)) # E: ndarray[Any, dtype[{float16}]]
+reveal_type(np.around(AR_i8)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.around(AR_f4)) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.around([1.5])) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.around(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.mean(AR_b)) # E: floating[Any]
+reveal_type(np.mean(AR_i8)) # E: floating[Any]
+reveal_type(np.mean(AR_f4)) # E: floating[Any]
+reveal_type(np.mean(AR_c16)) # E: complexfloating[Any, Any]
+reveal_type(np.mean(AR_O)) # E: Any
+reveal_type(np.mean(AR_f4, axis=0)) # E: Any
+reveal_type(np.mean(AR_f4, keepdims=True)) # E: Any
+reveal_type(np.mean(AR_f4, dtype=float)) # E: Any
+reveal_type(np.mean(AR_f4, dtype=np.float64)) # E: {float64}
+reveal_type(np.mean(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.std(AR_b)) # E: floating[Any]
+reveal_type(np.std(AR_i8)) # E: floating[Any]
+reveal_type(np.std(AR_f4)) # E: floating[Any]
+reveal_type(np.std(AR_c16)) # E: floating[Any]
+reveal_type(np.std(AR_O)) # E: Any
+reveal_type(np.std(AR_f4, axis=0)) # E: Any
+reveal_type(np.std(AR_f4, keepdims=True)) # E: Any
+reveal_type(np.std(AR_f4, dtype=float)) # E: Any
+reveal_type(np.std(AR_f4, dtype=np.float64)) # E: {float64}
+reveal_type(np.std(AR_f4, out=AR_subclass)) # E: NDArraySubclass
+
+reveal_type(np.var(AR_b)) # E: floating[Any]
+reveal_type(np.var(AR_i8)) # E: floating[Any]
+reveal_type(np.var(AR_f4)) # E: floating[Any]
+reveal_type(np.var(AR_c16)) # E: floating[Any]
+reveal_type(np.var(AR_O)) # E: Any
+reveal_type(np.var(AR_f4, axis=0)) # E: Any
+reveal_type(np.var(AR_f4, keepdims=True)) # E: Any
+reveal_type(np.var(AR_f4, dtype=float)) # E: Any
+reveal_type(np.var(AR_f4, dtype=np.float64)) # E: {float64}
+reveal_type(np.var(AR_f4, out=AR_subclass)) # E: NDArraySubclass
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/getlimits.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/getlimits.pyi
new file mode 100644
index 00000000..1614b577
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/getlimits.pyi
@@ -0,0 +1,47 @@
+import numpy as np
+f: float
+f8: np.float64
+c8: np.complex64
+
+i: int
+i8: np.int64
+u4: np.uint32
+
+finfo_f8: np.finfo[np.float64]
+iinfo_i8: np.iinfo[np.int64]
+
+reveal_type(np.finfo(f)) # E: finfo[{double}]
+reveal_type(np.finfo(f8)) # E: finfo[{float64}]
+reveal_type(np.finfo(c8)) # E: finfo[{float32}]
+reveal_type(np.finfo('f2')) # E: finfo[floating[Any]]
+
+reveal_type(finfo_f8.dtype) # E: dtype[{float64}]
+reveal_type(finfo_f8.bits) # E: int
+reveal_type(finfo_f8.eps) # E: {float64}
+reveal_type(finfo_f8.epsneg) # E: {float64}
+reveal_type(finfo_f8.iexp) # E: int
+reveal_type(finfo_f8.machep) # E: int
+reveal_type(finfo_f8.max) # E: {float64}
+reveal_type(finfo_f8.maxexp) # E: int
+reveal_type(finfo_f8.min) # E: {float64}
+reveal_type(finfo_f8.minexp) # E: int
+reveal_type(finfo_f8.negep) # E: int
+reveal_type(finfo_f8.nexp) # E: int
+reveal_type(finfo_f8.nmant) # E: int
+reveal_type(finfo_f8.precision) # E: int
+reveal_type(finfo_f8.resolution) # E: {float64}
+reveal_type(finfo_f8.tiny) # E: {float64}
+reveal_type(finfo_f8.smallest_normal) # E: {float64}
+reveal_type(finfo_f8.smallest_subnormal) # E: {float64}
+
+reveal_type(np.iinfo(i)) # E: iinfo[{int_}]
+reveal_type(np.iinfo(i8)) # E: iinfo[{int64}]
+reveal_type(np.iinfo(u4)) # E: iinfo[{uint32}]
+reveal_type(np.iinfo('i2')) # E: iinfo[Any]
+
+reveal_type(iinfo_i8.dtype) # E: dtype[{int64}]
+reveal_type(iinfo_i8.kind) # E: str
+reveal_type(iinfo_i8.bits) # E: int
+reveal_type(iinfo_i8.key) # E: str
+reveal_type(iinfo_i8.min) # E: int
+reveal_type(iinfo_i8.max) # E: int
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/histograms.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/histograms.pyi
new file mode 100644
index 00000000..d96e44f0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/histograms.pyi
@@ -0,0 +1,19 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+
+reveal_type(np.histogram_bin_edges(AR_i8, bins="auto")) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3))) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.histogram(AR_i8, bins="auto")) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]]
+reveal_type(np.histogram(AR_i8, bins="rice", range=(0, 3))) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]]
+reveal_type(np.histogram(AR_i8, bins="scott", weights=AR_f8)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]]
+reveal_type(np.histogram(AR_f8, bins=1, density=True)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]]
+
+reveal_type(np.histogramdd(AR_i8, bins=[1])) # E: Tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]]
+reveal_type(np.histogramdd(AR_i8, range=[(0, 3)])) # E: Tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]]
+reveal_type(np.histogramdd(AR_i8, weights=AR_f8)) # E: Tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]]
+reveal_type(np.histogramdd(AR_f8, density=True)) # E: Tuple[ndarray[Any, dtype[Any]], builtins.list[ndarray[Any, dtype[Any]]]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/index_tricks.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/index_tricks.pyi
new file mode 100644
index 00000000..707d6f3d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/index_tricks.pyi
@@ -0,0 +1,66 @@
+from typing import Any
+import numpy as np
+
+AR_LIKE_b: list[bool]
+AR_LIKE_i: list[int]
+AR_LIKE_f: list[float]
+AR_LIKE_U: list[str]
+
+AR_i8: np.ndarray[Any, np.dtype[np.int64]]
+
+reveal_type(np.ndenumerate(AR_i8)) # E: ndenumerate[{int64}]
+reveal_type(np.ndenumerate(AR_LIKE_f)) # E: ndenumerate[{double}]
+reveal_type(np.ndenumerate(AR_LIKE_U)) # E: ndenumerate[str_]
+
+reveal_type(np.ndenumerate(AR_i8).iter) # E: flatiter[ndarray[Any, dtype[{int64}]]]
+reveal_type(np.ndenumerate(AR_LIKE_f).iter) # E: flatiter[ndarray[Any, dtype[{double}]]]
+reveal_type(np.ndenumerate(AR_LIKE_U).iter) # E: flatiter[ndarray[Any, dtype[str_]]]
+
+reveal_type(next(np.ndenumerate(AR_i8))) # E: Tuple[builtins.tuple[builtins.int, ...], {int64}]
+reveal_type(next(np.ndenumerate(AR_LIKE_f))) # E: Tuple[builtins.tuple[builtins.int, ...], {double}]
+reveal_type(next(np.ndenumerate(AR_LIKE_U))) # E: Tuple[builtins.tuple[builtins.int, ...], str_]
+
+reveal_type(iter(np.ndenumerate(AR_i8))) # E: ndenumerate[{int64}]
+reveal_type(iter(np.ndenumerate(AR_LIKE_f))) # E: ndenumerate[{double}]
+reveal_type(iter(np.ndenumerate(AR_LIKE_U))) # E: ndenumerate[str_]
+
+reveal_type(np.ndindex(1, 2, 3)) # E: numpy.ndindex
+reveal_type(np.ndindex((1, 2, 3))) # E: numpy.ndindex
+reveal_type(iter(np.ndindex(1, 2, 3))) # E: ndindex
+reveal_type(next(np.ndindex(1, 2, 3))) # E: builtins.tuple[builtins.int, ...]
+
+reveal_type(np.unravel_index([22, 41, 37], (7, 6))) # E: tuple[ndarray[Any, dtype[{intp}]], ...]
+reveal_type(np.unravel_index([31, 41, 13], (7, 6), order="F")) # E: tuple[ndarray[Any, dtype[{intp}]], ...]
+reveal_type(np.unravel_index(1621, (6, 7, 8, 9))) # E: tuple[{intp}, ...]
+
+reveal_type(np.ravel_multi_index([[1]], (7, 6))) # E: ndarray[Any, dtype[{intp}]]
+reveal_type(np.ravel_multi_index(AR_LIKE_i, (7, 6))) # E: {intp}
+reveal_type(np.ravel_multi_index(AR_LIKE_i, (7, 6), order="F")) # E: {intp}
+reveal_type(np.ravel_multi_index(AR_LIKE_i, (4, 6), mode="clip")) # E: {intp}
+reveal_type(np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=("clip", "wrap"))) # E: {intp}
+reveal_type(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9))) # E: {intp}
+
+reveal_type(np.mgrid[1:1:2]) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.mgrid[1:1:2, None:10]) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.ogrid[1:1:2]) # E: list[ndarray[Any, dtype[Any]]]
+reveal_type(np.ogrid[1:1:2, None:10]) # E: list[ndarray[Any, dtype[Any]]]
+
+reveal_type(np.index_exp[0:1]) # E: Tuple[builtins.slice]
+reveal_type(np.index_exp[0:1, None:3]) # E: Tuple[builtins.slice, builtins.slice]
+reveal_type(np.index_exp[0, 0:1, ..., [0, 1, 3]]) # E: Tuple[Literal[0]?, builtins.slice, builtins.ellipsis, builtins.list[builtins.int]]
+
+reveal_type(np.s_[0:1]) # E: builtins.slice
+reveal_type(np.s_[0:1, None:3]) # E: Tuple[builtins.slice, builtins.slice]
+reveal_type(np.s_[0, 0:1, ..., [0, 1, 3]]) # E: Tuple[Literal[0]?, builtins.slice, builtins.ellipsis, builtins.list[builtins.int]]
+
+reveal_type(np.ix_(AR_LIKE_b)) # E: tuple[ndarray[Any, dtype[bool_]], ...]
+reveal_type(np.ix_(AR_LIKE_i, AR_LIKE_f)) # E: tuple[ndarray[Any, dtype[{double}]], ...]
+reveal_type(np.ix_(AR_i8)) # E: tuple[ndarray[Any, dtype[{int64}]], ...]
+
+reveal_type(np.fill_diagonal(AR_i8, 5)) # E: None
+
+reveal_type(np.diag_indices(4)) # E: tuple[ndarray[Any, dtype[{int_}]], ...]
+reveal_type(np.diag_indices(2, 3)) # E: tuple[ndarray[Any, dtype[{int_}]], ...]
+
+reveal_type(np.diag_indices_from(AR_i8)) # E: tuple[ndarray[Any, dtype[{int_}]], ...]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_function_base.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_function_base.pyi
new file mode 100644
index 00000000..a8b9b01a
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_function_base.pyi
@@ -0,0 +1,177 @@
+from typing import Any
+
+import numpy as np
+import numpy.typing as npt
+
+vectorized_func: np.vectorize
+
+f8: np.float64
+AR_LIKE_f8: list[float]
+
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+AR_m: npt.NDArray[np.timedelta64]
+AR_M: npt.NDArray[np.datetime64]
+AR_O: npt.NDArray[np.object_]
+AR_b: npt.NDArray[np.bool_]
+AR_U: npt.NDArray[np.str_]
+CHAR_AR_U: np.chararray[Any, np.dtype[np.str_]]
+
+def func(*args: Any, **kwargs: Any) -> Any: ...
+
+reveal_type(vectorized_func.pyfunc) # E: def (*Any, **Any) -> Any
+reveal_type(vectorized_func.cache) # E: bool
+reveal_type(vectorized_func.signature) # E: Union[None, builtins.str]
+reveal_type(vectorized_func.otypes) # E: Union[None, builtins.str]
+reveal_type(vectorized_func.excluded) # E: set[Union[builtins.int, builtins.str]]
+reveal_type(vectorized_func.__doc__) # E: Union[None, builtins.str]
+reveal_type(vectorized_func([1])) # E: Any
+reveal_type(np.vectorize(int)) # E: vectorize
+reveal_type(np.vectorize( # E: vectorize
+ int, otypes="i", doc="doc", excluded=(), cache=True, signature=None
+))
+
+reveal_type(np.add_newdoc("__main__", "blabla", doc="test doc")) # E: None
+reveal_type(np.add_newdoc("__main__", "blabla", doc=("meth", "test doc"))) # E: None
+reveal_type(np.add_newdoc("__main__", "blabla", doc=[("meth", "test doc")])) # E: None
+
+reveal_type(np.rot90(AR_f8, k=2)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.rot90(AR_LIKE_f8, axes=(0, 1))) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.flip(f8)) # E: {float64}
+reveal_type(np.flip(1.0)) # E: Any
+reveal_type(np.flip(AR_f8, axis=(0, 1))) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.flip(AR_LIKE_f8, axis=0)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.iterable(1)) # E: bool
+reveal_type(np.iterable([1])) # E: bool
+
+reveal_type(np.average(AR_f8)) # E: floating[Any]
+reveal_type(np.average(AR_f8, weights=AR_c16)) # E: complexfloating[Any, Any]
+reveal_type(np.average(AR_O)) # E: Any
+reveal_type(np.average(AR_f8, returned=True)) # E: Tuple[floating[Any], floating[Any]]
+reveal_type(np.average(AR_f8, weights=AR_c16, returned=True)) # E: Tuple[complexfloating[Any, Any], complexfloating[Any, Any]]
+reveal_type(np.average(AR_O, returned=True)) # E: Tuple[Any, Any]
+reveal_type(np.average(AR_f8, axis=0)) # E: Any
+reveal_type(np.average(AR_f8, axis=0, returned=True)) # E: Tuple[Any, Any]
+
+reveal_type(np.asarray_chkfinite(AR_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.asarray_chkfinite(AR_LIKE_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.asarray_chkfinite(AR_f8, dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.asarray_chkfinite(AR_f8, dtype=float)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.piecewise(AR_f8, AR_b, [func])) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.piecewise(AR_LIKE_f8, AR_b, [func])) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.select([AR_f8], [AR_f8])) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.copy(AR_LIKE_f8)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.copy(AR_U)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.copy(CHAR_AR_U)) # E: ndarray[Any, Any]
+reveal_type(np.copy(CHAR_AR_U, "K", subok=True)) # E: chararray[Any, dtype[str_]]
+reveal_type(np.copy(CHAR_AR_U, subok=True)) # E: chararray[Any, dtype[str_]]
+
+reveal_type(np.gradient(AR_f8, axis=None)) # E: Any
+reveal_type(np.gradient(AR_LIKE_f8, edge_order=2)) # E: Any
+
+reveal_type(np.diff("bob", n=0)) # E: str
+reveal_type(np.diff(AR_f8, axis=0)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.diff(AR_LIKE_f8, prepend=1.5)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.angle(f8)) # E: floating[Any]
+reveal_type(np.angle(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.angle(AR_c16, deg=True)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.angle(AR_O)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.unwrap(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.unwrap(AR_O)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.sort_complex(AR_f8)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.trim_zeros(AR_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.trim_zeros(AR_LIKE_f8)) # E: list[builtins.float]
+
+reveal_type(np.extract(AR_i8, AR_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.extract(AR_i8, AR_LIKE_f8)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.place(AR_f8, mask=AR_i8, vals=5.0)) # E: None
+
+reveal_type(np.disp(1, linefeed=True)) # E: None
+with open("test", "w") as f:
+ reveal_type(np.disp("message", device=f)) # E: None
+
+reveal_type(np.cov(AR_f8, bias=True)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.cov(AR_f8, AR_c16, ddof=1)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.cov(AR_f8, aweights=AR_f8, dtype=np.float32)) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.cov(AR_f8, fweights=AR_f8, dtype=float)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.corrcoef(AR_f8, rowvar=True)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.corrcoef(AR_f8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.corrcoef(AR_f8, dtype=np.float32)) # E: ndarray[Any, dtype[{float32}]]
+reveal_type(np.corrcoef(AR_f8, dtype=float)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.blackman(5)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.bartlett(6)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.hanning(4.5)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.hamming(0)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.i0(AR_i8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.kaiser(4, 5.9)) # E: ndarray[Any, dtype[floating[Any]]]
+
+reveal_type(np.sinc(1.0)) # E: floating[Any]
+reveal_type(np.sinc(1j)) # E: complexfloating[Any, Any]
+reveal_type(np.sinc(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.sinc(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.median(AR_f8, keepdims=False)) # E: floating[Any]
+reveal_type(np.median(AR_c16, overwrite_input=True)) # E: complexfloating[Any, Any]
+reveal_type(np.median(AR_m)) # E: timedelta64
+reveal_type(np.median(AR_O)) # E: Any
+reveal_type(np.median(AR_f8, keepdims=True)) # E: Any
+reveal_type(np.median(AR_c16, axis=0)) # E: Any
+reveal_type(np.median(AR_LIKE_f8, out=AR_c16)) # E: ndarray[Any, dtype[{complex128}]]
+
+reveal_type(np.add_newdoc_ufunc(np.add, "docstring")) # E: None
+
+reveal_type(np.percentile(AR_f8, 50)) # E: floating[Any]
+reveal_type(np.percentile(AR_c16, 50)) # E: complexfloating[Any, Any]
+reveal_type(np.percentile(AR_m, 50)) # E: timedelta64
+reveal_type(np.percentile(AR_M, 50, overwrite_input=True)) # E: datetime64
+reveal_type(np.percentile(AR_O, 50)) # E: Any
+reveal_type(np.percentile(AR_f8, [50])) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.percentile(AR_c16, [50])) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.percentile(AR_m, [50])) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(np.percentile(AR_M, [50], method="nearest")) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(np.percentile(AR_O, [50])) # E: ndarray[Any, dtype[object_]]
+reveal_type(np.percentile(AR_f8, [50], keepdims=True)) # E: Any
+reveal_type(np.percentile(AR_f8, [50], axis=[1])) # E: Any
+reveal_type(np.percentile(AR_f8, [50], out=AR_c16)) # E: ndarray[Any, dtype[{complex128}]]
+
+reveal_type(np.quantile(AR_f8, 0.5)) # E: floating[Any]
+reveal_type(np.quantile(AR_c16, 0.5)) # E: complexfloating[Any, Any]
+reveal_type(np.quantile(AR_m, 0.5)) # E: timedelta64
+reveal_type(np.quantile(AR_M, 0.5, overwrite_input=True)) # E: datetime64
+reveal_type(np.quantile(AR_O, 0.5)) # E: Any
+reveal_type(np.quantile(AR_f8, [0.5])) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.quantile(AR_c16, [0.5])) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.quantile(AR_m, [0.5])) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(np.quantile(AR_M, [0.5], method="nearest")) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(np.quantile(AR_O, [0.5])) # E: ndarray[Any, dtype[object_]]
+reveal_type(np.quantile(AR_f8, [0.5], keepdims=True)) # E: Any
+reveal_type(np.quantile(AR_f8, [0.5], axis=[1])) # E: Any
+reveal_type(np.quantile(AR_f8, [0.5], out=AR_c16)) # E: ndarray[Any, dtype[{complex128}]]
+
+reveal_type(np.meshgrid(AR_f8, AR_i8, copy=False)) # E: list[ndarray[Any, dtype[Any]]]
+reveal_type(np.meshgrid(AR_f8, AR_i8, AR_c16, indexing="ij")) # E: list[ndarray[Any, dtype[Any]]]
+
+reveal_type(np.delete(AR_f8, np.s_[:5])) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.insert(AR_f8, np.s_[:5], 5)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.append(AR_f8, 5)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.append(AR_LIKE_f8, 1j, axis=0)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.digitize(4.5, [1])) # E: {intp}
+reveal_type(np.digitize(AR_f8, [1, 2, 3])) # E: ndarray[Any, dtype[{intp}]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_polynomial.pyi
new file mode 100644
index 00000000..de895072
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_polynomial.pyi
@@ -0,0 +1,111 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_b: npt.NDArray[np.bool_]
+AR_u4: npt.NDArray[np.uint32]
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+AR_O: npt.NDArray[np.object_]
+
+poly_obj: np.poly1d
+
+reveal_type(poly_obj.variable) # E: str
+reveal_type(poly_obj.order) # E: int
+reveal_type(poly_obj.o) # E: int
+reveal_type(poly_obj.roots) # E: ndarray[Any, dtype[Any]]
+reveal_type(poly_obj.r) # E: ndarray[Any, dtype[Any]]
+reveal_type(poly_obj.coeffs) # E: ndarray[Any, dtype[Any]]
+reveal_type(poly_obj.c) # E: ndarray[Any, dtype[Any]]
+reveal_type(poly_obj.coef) # E: ndarray[Any, dtype[Any]]
+reveal_type(poly_obj.coefficients) # E: ndarray[Any, dtype[Any]]
+reveal_type(poly_obj.__hash__) # E: None
+
+reveal_type(poly_obj(1)) # E: Any
+reveal_type(poly_obj([1])) # E: ndarray[Any, dtype[Any]]
+reveal_type(poly_obj(poly_obj)) # E: poly1d
+
+reveal_type(len(poly_obj)) # E: int
+reveal_type(-poly_obj) # E: poly1d
+reveal_type(+poly_obj) # E: poly1d
+
+reveal_type(poly_obj * 5) # E: poly1d
+reveal_type(5 * poly_obj) # E: poly1d
+reveal_type(poly_obj + 5) # E: poly1d
+reveal_type(5 + poly_obj) # E: poly1d
+reveal_type(poly_obj - 5) # E: poly1d
+reveal_type(5 - poly_obj) # E: poly1d
+reveal_type(poly_obj**1) # E: poly1d
+reveal_type(poly_obj**1.0) # E: poly1d
+reveal_type(poly_obj / 5) # E: poly1d
+reveal_type(5 / poly_obj) # E: poly1d
+
+reveal_type(poly_obj[0]) # E: Any
+poly_obj[0] = 5
+reveal_type(iter(poly_obj)) # E: Iterator[Any]
+reveal_type(poly_obj.deriv()) # E: poly1d
+reveal_type(poly_obj.integ()) # E: poly1d
+
+reveal_type(np.poly(poly_obj)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.poly(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.poly(AR_c16)) # E: ndarray[Any, dtype[floating[Any]]]
+
+reveal_type(np.polyint(poly_obj)) # E: poly1d
+reveal_type(np.polyint(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.polyint(AR_f8, k=AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.polyint(AR_O, m=2)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.polyder(poly_obj)) # E: poly1d
+reveal_type(np.polyder(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.polyder(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.polyder(AR_O, m=2)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.polyfit(AR_f8, AR_f8, 2)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.polyfit(AR_f8, AR_i8, 1, full=True)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[signedinteger[typing._32Bit]]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]]
+reveal_type(np.polyfit(AR_u4, AR_f8, 1.0, cov="unscaled")) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]]
+reveal_type(np.polyfit(AR_c16, AR_f8, 2)) # E: ndarray[Any, dtype[{complex128}]]
+reveal_type(np.polyfit(AR_f8, AR_c16, 1, full=True)) # E: Tuple[ndarray[Any, dtype[{complex128}]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[signedinteger[typing._32Bit]]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]]
+reveal_type(np.polyfit(AR_u4, AR_c16, 1.0, cov=True)) # E: Tuple[ndarray[Any, dtype[{complex128}]], ndarray[Any, dtype[{complex128}]]]
+
+reveal_type(np.polyval(AR_b, AR_b)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.polyval(AR_u4, AR_b)) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(np.polyval(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.polyval(AR_f8, AR_i8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.polyval(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.polyval(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.polyadd(poly_obj, AR_i8)) # E: poly1d
+reveal_type(np.polyadd(AR_f8, poly_obj)) # E: poly1d
+reveal_type(np.polyadd(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.polyadd(AR_u4, AR_b)) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(np.polyadd(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.polyadd(AR_f8, AR_i8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.polyadd(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.polyadd(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.polysub(poly_obj, AR_i8)) # E: poly1d
+reveal_type(np.polysub(AR_f8, poly_obj)) # E: poly1d
+reveal_type(np.polysub(AR_b, AR_b)) # E: <nothing>
+reveal_type(np.polysub(AR_u4, AR_b)) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(np.polysub(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.polysub(AR_f8, AR_i8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.polysub(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.polysub(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.polymul(poly_obj, AR_i8)) # E: poly1d
+reveal_type(np.polymul(AR_f8, poly_obj)) # E: poly1d
+reveal_type(np.polymul(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.polymul(AR_u4, AR_b)) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(np.polymul(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.polymul(AR_f8, AR_i8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.polymul(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.polymul(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.polydiv(poly_obj, AR_i8)) # E: poly1d
+reveal_type(np.polydiv(AR_f8, poly_obj)) # E: poly1d
+reveal_type(np.polydiv(AR_b, AR_b)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]]
+reveal_type(np.polydiv(AR_u4, AR_b)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]]
+reveal_type(np.polydiv(AR_i8, AR_i8)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]]
+reveal_type(np.polydiv(AR_f8, AR_i8)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]]
+reveal_type(np.polydiv(AR_i8, AR_c16)) # E: Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]]
+reveal_type(np.polydiv(AR_O, AR_O)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_utils.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_utils.pyi
new file mode 100644
index 00000000..9b1bf412
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_utils.pyi
@@ -0,0 +1,30 @@
+from io import StringIO
+from typing import Any
+
+import numpy as np
+
+AR: np.ndarray[Any, np.dtype[np.float64]]
+AR_DICT: dict[str, np.ndarray[Any, np.dtype[np.float64]]]
+FILE: StringIO
+
+def func(a: int) -> bool: ...
+
+reveal_type(np.deprecate(func)) # E: def (a: builtins.int) -> builtins.bool
+reveal_type(np.deprecate()) # E: _Deprecate
+
+reveal_type(np.deprecate_with_doc("test")) # E: _Deprecate
+reveal_type(np.deprecate_with_doc(None)) # E: _Deprecate
+
+reveal_type(np.byte_bounds(AR)) # E: Tuple[builtins.int, builtins.int]
+reveal_type(np.byte_bounds(np.float64())) # E: Tuple[builtins.int, builtins.int]
+
+reveal_type(np.who(None)) # E: None
+reveal_type(np.who(AR_DICT)) # E: None
+
+reveal_type(np.info(1, output=FILE)) # E: None
+
+reveal_type(np.source(np.interp, output=FILE)) # E: None
+
+reveal_type(np.lookfor("binary representation", output=FILE)) # E: None
+
+reveal_type(np.safe_eval("1 + 1")) # E: Any
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_version.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_version.pyi
new file mode 100644
index 00000000..e6f69555
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/lib_version.pyi
@@ -0,0 +1,18 @@
+from numpy.lib import NumpyVersion
+
+version = NumpyVersion("1.8.0")
+
+reveal_type(version.vstring) # E: str
+reveal_type(version.version) # E: str
+reveal_type(version.major) # E: int
+reveal_type(version.minor) # E: int
+reveal_type(version.bugfix) # E: int
+reveal_type(version.pre_release) # E: str
+reveal_type(version.is_devversion) # E: bool
+
+reveal_type(version == version) # E: bool
+reveal_type(version != version) # E: bool
+reveal_type(version < "1.8.0") # E: bool
+reveal_type(version <= version) # E: bool
+reveal_type(version > version) # E: bool
+reveal_type(version >= "1.8.0") # E: bool
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/linalg.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/linalg.pyi
new file mode 100644
index 00000000..19e13aed
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/linalg.pyi
@@ -0,0 +1,97 @@
+import numpy as np
+import numpy.typing as npt
+
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+AR_O: npt.NDArray[np.object_]
+AR_m: npt.NDArray[np.timedelta64]
+AR_S: npt.NDArray[np.str_]
+
+reveal_type(np.linalg.tensorsolve(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.linalg.tensorsolve(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.linalg.tensorsolve(AR_c16, AR_f8)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.linalg.solve(AR_i8, AR_i8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.linalg.solve(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.linalg.solve(AR_c16, AR_f8)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.linalg.tensorinv(AR_i8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.linalg.tensorinv(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.linalg.tensorinv(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.linalg.inv(AR_i8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.linalg.inv(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.linalg.inv(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.linalg.matrix_power(AR_i8, -1)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.linalg.matrix_power(AR_f8, 0)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.linalg.matrix_power(AR_c16, 1)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.linalg.matrix_power(AR_O, 2)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.linalg.cholesky(AR_i8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.linalg.cholesky(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.linalg.cholesky(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.linalg.qr(AR_i8)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]]
+reveal_type(np.linalg.qr(AR_f8)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]]
+reveal_type(np.linalg.qr(AR_c16)) # E: Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]]
+
+reveal_type(np.linalg.eigvals(AR_i8)) # E: Union[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{complex128}]]]
+reveal_type(np.linalg.eigvals(AR_f8)) # E: Union[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]]
+reveal_type(np.linalg.eigvals(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.linalg.eigvalsh(AR_i8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.linalg.eigvalsh(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.linalg.eigvalsh(AR_c16)) # E: ndarray[Any, dtype[floating[Any]]]
+
+reveal_type(np.linalg.eig(AR_i8)) # E: Union[Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]], Tuple[ndarray[Any, dtype[{complex128}]], ndarray[Any, dtype[{complex128}]]]]
+reveal_type(np.linalg.eig(AR_f8)) # E: Union[Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]], Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]]]
+reveal_type(np.linalg.eig(AR_c16)) # E: Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]]
+
+reveal_type(np.linalg.eigh(AR_i8)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]]
+reveal_type(np.linalg.eigh(AR_f8)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]]
+reveal_type(np.linalg.eigh(AR_c16)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]]
+
+reveal_type(np.linalg.svd(AR_i8)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]]]
+reveal_type(np.linalg.svd(AR_f8)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]]
+reveal_type(np.linalg.svd(AR_c16)) # E: Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]]
+reveal_type(np.linalg.svd(AR_i8, compute_uv=False)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.linalg.svd(AR_f8, compute_uv=False)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.linalg.svd(AR_c16, compute_uv=False)) # E: ndarray[Any, dtype[floating[Any]]]
+
+reveal_type(np.linalg.cond(AR_i8)) # E: Any
+reveal_type(np.linalg.cond(AR_f8)) # E: Any
+reveal_type(np.linalg.cond(AR_c16)) # E: Any
+
+reveal_type(np.linalg.matrix_rank(AR_i8)) # E: Any
+reveal_type(np.linalg.matrix_rank(AR_f8)) # E: Any
+reveal_type(np.linalg.matrix_rank(AR_c16)) # E: Any
+
+reveal_type(np.linalg.pinv(AR_i8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.linalg.pinv(AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.linalg.pinv(AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+
+reveal_type(np.linalg.slogdet(AR_i8)) # E: Tuple[Any, Any]
+reveal_type(np.linalg.slogdet(AR_f8)) # E: Tuple[Any, Any]
+reveal_type(np.linalg.slogdet(AR_c16)) # E: Tuple[Any, Any]
+
+reveal_type(np.linalg.det(AR_i8)) # E: Any
+reveal_type(np.linalg.det(AR_f8)) # E: Any
+reveal_type(np.linalg.det(AR_c16)) # E: Any
+
+reveal_type(np.linalg.lstsq(AR_i8, AR_i8)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{float64}]], {int32}, ndarray[Any, dtype[{float64}]]]
+reveal_type(np.linalg.lstsq(AR_i8, AR_f8)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]], {int32}, ndarray[Any, dtype[floating[Any]]]]
+reveal_type(np.linalg.lstsq(AR_f8, AR_c16)) # E: Tuple[ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[floating[Any]]], {int32}, ndarray[Any, dtype[floating[Any]]]]
+
+reveal_type(np.linalg.norm(AR_i8)) # E: floating[Any]
+reveal_type(np.linalg.norm(AR_f8)) # E: floating[Any]
+reveal_type(np.linalg.norm(AR_c16)) # E: floating[Any]
+reveal_type(np.linalg.norm(AR_S)) # E: floating[Any]
+reveal_type(np.linalg.norm(AR_f8, axis=0)) # E: Any
+
+reveal_type(np.linalg.multi_dot([AR_i8, AR_i8])) # E: Any
+reveal_type(np.linalg.multi_dot([AR_i8, AR_f8])) # E: Any
+reveal_type(np.linalg.multi_dot([AR_f8, AR_c16])) # E: Any
+reveal_type(np.linalg.multi_dot([AR_O, AR_O])) # E: Any
+reveal_type(np.linalg.multi_dot([AR_m, AR_m])) # E: Any
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/matrix.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/matrix.pyi
new file mode 100644
index 00000000..21c39067
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/matrix.pyi
@@ -0,0 +1,69 @@
+from typing import Any
+import numpy as np
+import numpy.typing as npt
+
+mat: np.matrix[Any, np.dtype[np.int64]]
+ar_f8: npt.NDArray[np.float64]
+
+reveal_type(mat * 5) # E: matrix[Any, Any]
+reveal_type(5 * mat) # E: matrix[Any, Any]
+mat *= 5
+
+reveal_type(mat**5) # E: matrix[Any, Any]
+mat **= 5
+
+reveal_type(mat.sum()) # E: Any
+reveal_type(mat.mean()) # E: Any
+reveal_type(mat.std()) # E: Any
+reveal_type(mat.var()) # E: Any
+reveal_type(mat.prod()) # E: Any
+reveal_type(mat.any()) # E: bool_
+reveal_type(mat.all()) # E: bool_
+reveal_type(mat.max()) # E: {int64}
+reveal_type(mat.min()) # E: {int64}
+reveal_type(mat.argmax()) # E: {intp}
+reveal_type(mat.argmin()) # E: {intp}
+reveal_type(mat.ptp()) # E: {int64}
+
+reveal_type(mat.sum(axis=0)) # E: matrix[Any, Any]
+reveal_type(mat.mean(axis=0)) # E: matrix[Any, Any]
+reveal_type(mat.std(axis=0)) # E: matrix[Any, Any]
+reveal_type(mat.var(axis=0)) # E: matrix[Any, Any]
+reveal_type(mat.prod(axis=0)) # E: matrix[Any, Any]
+reveal_type(mat.any(axis=0)) # E: matrix[Any, dtype[bool_]]
+reveal_type(mat.all(axis=0)) # E: matrix[Any, dtype[bool_]]
+reveal_type(mat.max(axis=0)) # E: matrix[Any, dtype[{int64}]]
+reveal_type(mat.min(axis=0)) # E: matrix[Any, dtype[{int64}]]
+reveal_type(mat.argmax(axis=0)) # E: matrix[Any, dtype[{intp}]]
+reveal_type(mat.argmin(axis=0)) # E: matrix[Any, dtype[{intp}]]
+reveal_type(mat.ptp(axis=0)) # E: matrix[Any, dtype[{int64}]]
+
+reveal_type(mat.sum(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(mat.mean(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(mat.std(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(mat.var(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(mat.prod(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(mat.any(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(mat.all(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(mat.max(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(mat.min(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(mat.argmax(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(mat.argmin(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(mat.ptp(out=ar_f8)) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(mat.T) # E: matrix[Any, dtype[{int64}]]
+reveal_type(mat.I) # E: matrix[Any, Any]
+reveal_type(mat.A) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(mat.A1) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(mat.H) # E: matrix[Any, dtype[{int64}]]
+reveal_type(mat.getT()) # E: matrix[Any, dtype[{int64}]]
+reveal_type(mat.getI()) # E: matrix[Any, Any]
+reveal_type(mat.getA()) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(mat.getA1()) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(mat.getH()) # E: matrix[Any, dtype[{int64}]]
+
+reveal_type(np.bmat(ar_f8)) # E: matrix[Any, Any]
+reveal_type(np.bmat([[0, 1, 2]])) # E: matrix[Any, Any]
+reveal_type(np.bmat("mat")) # E: matrix[Any, Any]
+
+reveal_type(np.asmatrix(ar_f8, dtype=np.int64)) # E: matrix[Any, Any]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/memmap.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/memmap.pyi
new file mode 100644
index 00000000..af730749
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/memmap.pyi
@@ -0,0 +1,18 @@
+import numpy as np
+from typing import Any
+
+memmap_obj: np.memmap[Any, np.dtype[np.str_]]
+
+reveal_type(np.memmap.__array_priority__) # E: float
+reveal_type(memmap_obj.__array_priority__) # E: float
+reveal_type(memmap_obj.filename) # E: Union[builtins.str, None]
+reveal_type(memmap_obj.offset) # E: int
+reveal_type(memmap_obj.mode) # E: str
+reveal_type(memmap_obj.flush()) # E: None
+
+reveal_type(np.memmap("file.txt", offset=5)) # E: memmap[Any, dtype[{uint8}]]
+reveal_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3))) # E: memmap[Any, dtype[{float64}]]
+with open("file.txt", "rb") as f:
+ reveal_type(np.memmap(f, dtype=float, order="K")) # E: memmap[Any, dtype[Any]]
+
+reveal_type(memmap_obj.__array_finalize__(object())) # E: None
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/mod.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/mod.pyi
new file mode 100644
index 00000000..b2790b7f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/mod.pyi
@@ -0,0 +1,147 @@
+from typing import Any
+import numpy as np
+
+f8 = np.float64()
+i8 = np.int64()
+u8 = np.uint64()
+
+f4 = np.float32()
+i4 = np.int32()
+u4 = np.uint32()
+
+td = np.timedelta64(0, "D")
+b_ = np.bool_()
+
+b = bool()
+f = float()
+i = int()
+
+AR_b: np.ndarray[Any, np.dtype[np.bool_]]
+AR_m: np.ndarray[Any, np.dtype[np.timedelta64]]
+
+# Time structures
+
+reveal_type(td % td) # E: timedelta64
+reveal_type(AR_m % td) # E: Any
+reveal_type(td % AR_m) # E: Any
+
+reveal_type(divmod(td, td)) # E: Tuple[{int64}, timedelta64]
+reveal_type(divmod(AR_m, td)) # E: Tuple[ndarray[Any, dtype[signedinteger[typing._64Bit]]], ndarray[Any, dtype[timedelta64]]]
+reveal_type(divmod(td, AR_m)) # E: Tuple[ndarray[Any, dtype[signedinteger[typing._64Bit]]], ndarray[Any, dtype[timedelta64]]]
+
+# Bool
+
+reveal_type(b_ % b) # E: {int8}
+reveal_type(b_ % i) # E: {int_}
+reveal_type(b_ % f) # E: {float64}
+reveal_type(b_ % b_) # E: {int8}
+reveal_type(b_ % i8) # E: {int64}
+reveal_type(b_ % u8) # E: {uint64}
+reveal_type(b_ % f8) # E: {float64}
+reveal_type(b_ % AR_b) # E: ndarray[Any, dtype[{int8}]]
+
+reveal_type(divmod(b_, b)) # E: Tuple[{int8}, {int8}]
+reveal_type(divmod(b_, i)) # E: Tuple[{int_}, {int_}]
+reveal_type(divmod(b_, f)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(b_, b_)) # E: Tuple[{int8}, {int8}]
+reveal_type(divmod(b_, i8)) # E: Tuple[{int64}, {int64}]
+reveal_type(divmod(b_, u8)) # E: Tuple[{uint64}, {uint64}]
+reveal_type(divmod(b_, f8)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(b_, AR_b)) # E: ndarray[Any, dtype[{int8}]], ndarray[Any, dtype[{int8}]]]
+
+reveal_type(b % b_) # E: {int8}
+reveal_type(i % b_) # E: {int_}
+reveal_type(f % b_) # E: {float64}
+reveal_type(b_ % b_) # E: {int8}
+reveal_type(i8 % b_) # E: {int64}
+reveal_type(u8 % b_) # E: {uint64}
+reveal_type(f8 % b_) # E: {float64}
+reveal_type(AR_b % b_) # E: ndarray[Any, dtype[{int8}]]
+
+reveal_type(divmod(b, b_)) # E: Tuple[{int8}, {int8}]
+reveal_type(divmod(i, b_)) # E: Tuple[{int_}, {int_}]
+reveal_type(divmod(f, b_)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(b_, b_)) # E: Tuple[{int8}, {int8}]
+reveal_type(divmod(i8, b_)) # E: Tuple[{int64}, {int64}]
+reveal_type(divmod(u8, b_)) # E: Tuple[{uint64}, {uint64}]
+reveal_type(divmod(f8, b_)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(AR_b, b_)) # E: ndarray[Any, dtype[{int8}]], ndarray[Any, dtype[{int8}]]]
+
+# int
+
+reveal_type(i8 % b) # E: {int64}
+reveal_type(i8 % i) # E: {int64}
+reveal_type(i8 % f) # E: {float64}
+reveal_type(i8 % i8) # E: {int64}
+reveal_type(i8 % f8) # E: {float64}
+reveal_type(i4 % i8) # E: {int64}
+reveal_type(i4 % f8) # E: {float64}
+reveal_type(i4 % i4) # E: {int32}
+reveal_type(i4 % f4) # E: {float32}
+reveal_type(i8 % AR_b) # E: ndarray[Any, dtype[signedinteger[Any]]]
+
+reveal_type(divmod(i8, b)) # E: Tuple[{int64}, {int64}]
+reveal_type(divmod(i8, i)) # E: Tuple[{int64}, {int64}]
+reveal_type(divmod(i8, f)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(i8, i8)) # E: Tuple[{int64}, {int64}]
+reveal_type(divmod(i8, f8)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(i8, i4)) # E: Tuple[{int64}, {int64}]
+reveal_type(divmod(i8, f4)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}]
+reveal_type(divmod(i4, f4)) # E: Tuple[{float32}, {float32}]
+reveal_type(divmod(i8, AR_b)) # E: Tuple[ndarray[Any, dtype[signedinteger[Any]]], ndarray[Any, dtype[signedinteger[Any]]]]
+
+reveal_type(b % i8) # E: {int64}
+reveal_type(i % i8) # E: {int64}
+reveal_type(f % i8) # E: {float64}
+reveal_type(i8 % i8) # E: {int64}
+reveal_type(f8 % i8) # E: {float64}
+reveal_type(i8 % i4) # E: {int64}
+reveal_type(f8 % i4) # E: {float64}
+reveal_type(i4 % i4) # E: {int32}
+reveal_type(f4 % i4) # E: {float32}
+reveal_type(AR_b % i8) # E: ndarray[Any, dtype[signedinteger[Any]]]
+
+reveal_type(divmod(b, i8)) # E: Tuple[{int64}, {int64}]
+reveal_type(divmod(i, i8)) # E: Tuple[{int64}, {int64}]
+reveal_type(divmod(f, i8)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(i8, i8)) # E: Tuple[{int64}, {int64}]
+reveal_type(divmod(f8, i8)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(i4, i8)) # E: Tuple[{int64}, {int64}]
+reveal_type(divmod(f4, i8)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}]
+reveal_type(divmod(f4, i4)) # E: Tuple[{float32}, {float32}]
+reveal_type(divmod(AR_b, i8)) # E: Tuple[ndarray[Any, dtype[signedinteger[Any]]], ndarray[Any, dtype[signedinteger[Any]]]]
+
+# float
+
+reveal_type(f8 % b) # E: {float64}
+reveal_type(f8 % i) # E: {float64}
+reveal_type(f8 % f) # E: {float64}
+reveal_type(i8 % f4) # E: {float64}
+reveal_type(f4 % f4) # E: {float32}
+reveal_type(f8 % AR_b) # E: ndarray[Any, dtype[floating[Any]]]
+
+reveal_type(divmod(f8, b)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(f8, i)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(f8, f)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(f8, f4)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}]
+reveal_type(divmod(f8, AR_b)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]]
+
+reveal_type(b % f8) # E: {float64}
+reveal_type(i % f8) # E: {float64}
+reveal_type(f % f8) # E: {float64}
+reveal_type(f8 % f8) # E: {float64}
+reveal_type(f8 % f8) # E: {float64}
+reveal_type(f4 % f4) # E: {float32}
+reveal_type(AR_b % f8) # E: ndarray[Any, dtype[floating[Any]]]
+
+reveal_type(divmod(b, f8)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(i, f8)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(f, f8)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(f4, f8)) # E: Tuple[{float64}, {float64}]
+reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}]
+reveal_type(divmod(AR_b, f8)) # E: Tuple[ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/modules.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/modules.pyi
new file mode 100644
index 00000000..ba830eb0
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/modules.pyi
@@ -0,0 +1,47 @@
+import numpy as np
+from numpy import f2py
+
+reveal_type(np) # E: ModuleType
+
+reveal_type(np.char) # E: ModuleType
+reveal_type(np.ctypeslib) # E: ModuleType
+reveal_type(np.emath) # E: ModuleType
+reveal_type(np.fft) # E: ModuleType
+reveal_type(np.lib) # E: ModuleType
+reveal_type(np.linalg) # E: ModuleType
+reveal_type(np.ma) # E: ModuleType
+reveal_type(np.matrixlib) # E: ModuleType
+reveal_type(np.polynomial) # E: ModuleType
+reveal_type(np.random) # E: ModuleType
+reveal_type(np.rec) # E: ModuleType
+reveal_type(np.testing) # E: ModuleType
+reveal_type(np.version) # E: ModuleType
+
+reveal_type(np.lib.format) # E: ModuleType
+reveal_type(np.lib.mixins) # E: ModuleType
+reveal_type(np.lib.scimath) # E: ModuleType
+reveal_type(np.lib.stride_tricks) # E: ModuleType
+reveal_type(np.ma.extras) # E: ModuleType
+reveal_type(np.polynomial.chebyshev) # E: ModuleType
+reveal_type(np.polynomial.hermite) # E: ModuleType
+reveal_type(np.polynomial.hermite_e) # E: ModuleType
+reveal_type(np.polynomial.laguerre) # E: ModuleType
+reveal_type(np.polynomial.legendre) # E: ModuleType
+reveal_type(np.polynomial.polynomial) # E: ModuleType
+
+reveal_type(np.__path__) # E: list[builtins.str]
+reveal_type(np.__version__) # E: str
+reveal_type(np.__git_version__) # E: str
+reveal_type(np.test) # E: _pytesttester.PytestTester
+reveal_type(np.test.module_name) # E: str
+
+reveal_type(np.__all__) # E: list[builtins.str]
+reveal_type(np.char.__all__) # E: list[builtins.str]
+reveal_type(np.ctypeslib.__all__) # E: list[builtins.str]
+reveal_type(np.emath.__all__) # E: list[builtins.str]
+reveal_type(np.lib.__all__) # E: list[builtins.str]
+reveal_type(np.ma.__all__) # E: list[builtins.str]
+reveal_type(np.random.__all__) # E: list[builtins.str]
+reveal_type(np.rec.__all__) # E: list[builtins.str]
+reveal_type(np.testing.__all__) # E: list[builtins.str]
+reveal_type(f2py.__all__) # E: list[builtins.str]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/multiarray.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/multiarray.pyi
new file mode 100644
index 00000000..27a54f50
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/multiarray.pyi
@@ -0,0 +1,144 @@
+import datetime as dt
+from typing import Any, TypeVar
+from pathlib import Path
+
+import numpy as np
+import numpy.typing as npt
+
+_SCT = TypeVar("_SCT", bound=np.generic, covariant=True)
+
+class SubClass(np.ndarray[Any, np.dtype[_SCT]]): ...
+
+subclass: SubClass[np.float64]
+
+AR_f8: npt.NDArray[np.float64]
+AR_i8: npt.NDArray[np.int64]
+AR_u1: npt.NDArray[np.uint8]
+AR_m: npt.NDArray[np.timedelta64]
+AR_M: npt.NDArray[np.datetime64]
+
+AR_LIKE_f: list[float]
+AR_LIKE_i: list[int]
+
+m: np.timedelta64
+M: np.datetime64
+
+b_f8 = np.broadcast(AR_f8)
+b_i8_f8_f8 = np.broadcast(AR_i8, AR_f8, AR_f8)
+
+nditer_obj: np.nditer
+
+date_scalar: dt.date
+date_seq: list[dt.date]
+timedelta_seq: list[dt.timedelta]
+
+def func(a: int) -> bool: ...
+
+reveal_type(next(b_f8)) # E: tuple[Any, ...]
+reveal_type(b_f8.reset()) # E: None
+reveal_type(b_f8.index) # E: int
+reveal_type(b_f8.iters) # E: tuple[flatiter[Any], ...]
+reveal_type(b_f8.nd) # E: int
+reveal_type(b_f8.ndim) # E: int
+reveal_type(b_f8.numiter) # E: int
+reveal_type(b_f8.shape) # E: tuple[builtins.int, ...]
+reveal_type(b_f8.size) # E: int
+
+reveal_type(next(b_i8_f8_f8)) # E: tuple[Any, ...]
+reveal_type(b_i8_f8_f8.reset()) # E: None
+reveal_type(b_i8_f8_f8.index) # E: int
+reveal_type(b_i8_f8_f8.iters) # E: tuple[flatiter[Any], ...]
+reveal_type(b_i8_f8_f8.nd) # E: int
+reveal_type(b_i8_f8_f8.ndim) # E: int
+reveal_type(b_i8_f8_f8.numiter) # E: int
+reveal_type(b_i8_f8_f8.shape) # E: tuple[builtins.int, ...]
+reveal_type(b_i8_f8_f8.size) # E: int
+
+reveal_type(np.inner(AR_f8, AR_i8)) # E: Any
+
+reveal_type(np.where([True, True, False])) # E: tuple[ndarray[Any, dtype[{intp}]], ...]
+reveal_type(np.where([True, True, False], 1, 0)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.lexsort([0, 1, 2])) # E: Any
+
+reveal_type(np.can_cast(np.dtype("i8"), int)) # E: bool
+reveal_type(np.can_cast(AR_f8, "f8")) # E: bool
+reveal_type(np.can_cast(AR_f8, np.complex128, casting="unsafe")) # E: bool
+
+reveal_type(np.min_scalar_type([1])) # E: dtype[Any]
+reveal_type(np.min_scalar_type(AR_f8)) # E: dtype[Any]
+
+reveal_type(np.result_type(int, [1])) # E: dtype[Any]
+reveal_type(np.result_type(AR_f8, AR_u1)) # E: dtype[Any]
+reveal_type(np.result_type(AR_f8, np.complex128)) # E: dtype[Any]
+
+reveal_type(np.dot(AR_LIKE_f, AR_i8)) # E: Any
+reveal_type(np.dot(AR_u1, 1)) # E: Any
+reveal_type(np.dot(1.5j, 1)) # E: Any
+reveal_type(np.dot(AR_u1, 1, out=AR_f8)) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.vdot(AR_LIKE_f, AR_i8)) # E: floating[Any]
+reveal_type(np.vdot(AR_u1, 1)) # E: signedinteger[Any]
+reveal_type(np.vdot(1.5j, 1)) # E: complexfloating[Any, Any]
+
+reveal_type(np.bincount(AR_i8)) # E: ndarray[Any, dtype[{intp}]]
+
+reveal_type(np.copyto(AR_f8, [1., 1.5, 1.6])) # E: None
+
+reveal_type(np.putmask(AR_f8, [True, True, False], 1.5)) # E: None
+
+reveal_type(np.packbits(AR_i8)) # ndarray[Any, dtype[{uint8}]]
+reveal_type(np.packbits(AR_u1)) # ndarray[Any, dtype[{uint8}]]
+
+reveal_type(np.unpackbits(AR_u1)) # ndarray[Any, dtype[{uint8}]]
+
+reveal_type(np.shares_memory(1, 2)) # E: bool
+reveal_type(np.shares_memory(AR_f8, AR_f8, max_work=1)) # E: bool
+
+reveal_type(np.may_share_memory(1, 2)) # E: bool
+reveal_type(np.may_share_memory(AR_f8, AR_f8, max_work=1)) # E: bool
+
+reveal_type(np.geterrobj()) # E: list[Any]
+
+reveal_type(np.seterrobj([8192, 521, None])) # E: None
+
+reveal_type(np.promote_types(np.int32, np.int64)) # E: dtype[Any]
+reveal_type(np.promote_types("f4", float)) # E: dtype[Any]
+
+reveal_type(np.frompyfunc(func, 1, 1, identity=None)) # ufunc
+
+reveal_type(np.datetime_data("m8[D]")) # E: Tuple[builtins.str, builtins.int]
+reveal_type(np.datetime_data(np.datetime64)) # E: Tuple[builtins.str, builtins.int]
+reveal_type(np.datetime_data(np.dtype(np.timedelta64))) # E: Tuple[builtins.str, builtins.int]
+
+reveal_type(np.busday_count("2011-01", "2011-02")) # E: {int_}
+reveal_type(np.busday_count(["2011-01"], "2011-02")) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(np.busday_count(["2011-01"], date_scalar)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(np.busday_offset(M, m)) # E: datetime64
+reveal_type(np.busday_offset(date_scalar, m)) # E: datetime64
+reveal_type(np.busday_offset(M, 5)) # E: datetime64
+reveal_type(np.busday_offset(AR_M, m)) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(np.busday_offset(M, timedelta_seq)) # E: ndarray[Any, dtype[datetime64]]
+reveal_type(np.busday_offset("2011-01", "2011-02", roll="forward")) # E: datetime64
+reveal_type(np.busday_offset(["2011-01"], "2011-02", roll="forward")) # E: ndarray[Any, dtype[datetime64]]
+
+reveal_type(np.is_busday("2012")) # E: bool_
+reveal_type(np.is_busday(date_scalar)) # E: bool_
+reveal_type(np.is_busday(["2012"])) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.datetime_as_string(M)) # E: str_
+reveal_type(np.datetime_as_string(AR_M)) # E: ndarray[Any, dtype[str_]]
+
+reveal_type(np.busdaycalendar(holidays=date_seq)) # E: busdaycalendar
+reveal_type(np.busdaycalendar(holidays=[M])) # E: busdaycalendar
+
+reveal_type(np.compare_chararrays("a", "b", "!=", rstrip=False)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.compare_chararrays(b"a", b"a", "==", True)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.add_docstring(func, "test")) # E: None
+
+reveal_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["c_index"])) # E: tuple[nditer, ...]
+reveal_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["readonly", "readonly"]])) # E: tuple[nditer, ...]
+reveal_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_dtypes=np.int_)) # E: tuple[nditer, ...]
+reveal_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], order="C", casting="no")) # E: tuple[nditer, ...]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/nbit_base_example.pyi
new file mode 100644
index 00000000..a7cc6819
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/nbit_base_example.pyi
@@ -0,0 +1,21 @@
+from __future__ import annotations
+
+from typing import TypeVar
+import numpy as np
+import numpy.typing as npt
+
+T1 = TypeVar("T1", bound=npt.NBitBase)
+T2 = TypeVar("T2", bound=npt.NBitBase)
+
+def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]:
+ return a + b
+
+i8: np.int64
+i4: np.int32
+f8: np.float64
+f4: np.float32
+
+reveal_type(add(f8, i8)) # E: {float64}
+reveal_type(add(f4, i8)) # E: {float64}
+reveal_type(add(f8, i4)) # E: {float64}
+reveal_type(add(f4, i4)) # E: {float32}
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ndarray_conversion.pyi
new file mode 100644
index 00000000..6885d4fd
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ndarray_conversion.pyi
@@ -0,0 +1,51 @@
+import numpy as np
+import numpy.typing as npt
+
+nd: npt.NDArray[np.int_] = np.array([[1, 2], [3, 4]])
+
+# item
+reveal_type(nd.item()) # E: int
+reveal_type(nd.item(1)) # E: int
+reveal_type(nd.item(0, 1)) # E: int
+reveal_type(nd.item((0, 1))) # E: int
+
+# tolist
+reveal_type(nd.tolist()) # E: Any
+
+# itemset does not return a value
+# tostring is pretty simple
+# tobytes is pretty simple
+# tofile does not return a value
+# dump does not return a value
+# dumps is pretty simple
+
+# astype
+reveal_type(nd.astype("float")) # E: ndarray[Any, dtype[Any]]
+reveal_type(nd.astype(float)) # E: ndarray[Any, dtype[Any]]
+reveal_type(nd.astype(np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(nd.astype(np.float64, "K")) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(nd.astype(np.float64, "K", "unsafe")) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(nd.astype(np.float64, "K", "unsafe", True)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(nd.astype(np.float64, "K", "unsafe", True, True)) # E: ndarray[Any, dtype[{float64}]]
+
+# byteswap
+reveal_type(nd.byteswap()) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(nd.byteswap(True)) # E: ndarray[Any, dtype[{int_}]]
+
+# copy
+reveal_type(nd.copy()) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(nd.copy("C")) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(nd.view()) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(nd.view(np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(nd.view(float)) # E: ndarray[Any, dtype[Any]]
+reveal_type(nd.view(np.float64, np.matrix)) # E: matrix[Any, Any]
+
+# getfield
+reveal_type(nd.getfield("float")) # E: ndarray[Any, dtype[Any]]
+reveal_type(nd.getfield(float)) # E: ndarray[Any, dtype[Any]]
+reveal_type(nd.getfield(np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(nd.getfield(np.float64, 8)) # E: ndarray[Any, dtype[{float64}]]
+
+# setflags does not return a value
+# fill does not return a value
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ndarray_misc.pyi
new file mode 100644
index 00000000..03fea72d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ndarray_misc.pyi
@@ -0,0 +1,220 @@
+"""
+Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods.
+
+More extensive tests are performed for the methods'
+function-based counterpart in `../from_numeric.py`.
+
+"""
+
+import operator
+import ctypes as ct
+from typing import Any
+
+import numpy as np
+from numpy._typing import NDArray
+
+class SubClass(NDArray[np.object_]): ...
+
+f8: np.float64
+B: SubClass
+AR_f8: NDArray[np.float64]
+AR_i8: NDArray[np.int64]
+AR_U: NDArray[np.str_]
+AR_V: NDArray[np.void]
+
+ctypes_obj = AR_f8.ctypes
+
+reveal_type(AR_f8.__dlpack__()) # E: Any
+reveal_type(AR_f8.__dlpack_device__()) # E: Tuple[int, Literal[0]]
+
+reveal_type(ctypes_obj.data) # E: int
+reveal_type(ctypes_obj.shape) # E: ctypes.Array[{c_intp}]
+reveal_type(ctypes_obj.strides) # E: ctypes.Array[{c_intp}]
+reveal_type(ctypes_obj._as_parameter_) # E: ctypes.c_void_p
+
+reveal_type(ctypes_obj.data_as(ct.c_void_p)) # E: ctypes.c_void_p
+reveal_type(ctypes_obj.shape_as(ct.c_longlong)) # E: ctypes.Array[ctypes.c_longlong]
+reveal_type(ctypes_obj.strides_as(ct.c_ubyte)) # E: ctypes.Array[ctypes.c_ubyte]
+
+reveal_type(f8.all()) # E: bool_
+reveal_type(AR_f8.all()) # E: bool_
+reveal_type(AR_f8.all(axis=0)) # E: Any
+reveal_type(AR_f8.all(keepdims=True)) # E: Any
+reveal_type(AR_f8.all(out=B)) # E: SubClass
+
+reveal_type(f8.any()) # E: bool_
+reveal_type(AR_f8.any()) # E: bool_
+reveal_type(AR_f8.any(axis=0)) # E: Any
+reveal_type(AR_f8.any(keepdims=True)) # E: Any
+reveal_type(AR_f8.any(out=B)) # E: SubClass
+
+reveal_type(f8.argmax()) # E: {intp}
+reveal_type(AR_f8.argmax()) # E: {intp}
+reveal_type(AR_f8.argmax(axis=0)) # E: Any
+reveal_type(AR_f8.argmax(out=B)) # E: SubClass
+
+reveal_type(f8.argmin()) # E: {intp}
+reveal_type(AR_f8.argmin()) # E: {intp}
+reveal_type(AR_f8.argmin(axis=0)) # E: Any
+reveal_type(AR_f8.argmin(out=B)) # E: SubClass
+
+reveal_type(f8.argsort()) # E: ndarray[Any, Any]
+reveal_type(AR_f8.argsort()) # E: ndarray[Any, Any]
+
+reveal_type(f8.astype(np.int64).choose([()])) # E: ndarray[Any, Any]
+reveal_type(AR_f8.choose([0])) # E: ndarray[Any, Any]
+reveal_type(AR_f8.choose([0], out=B)) # E: SubClass
+
+reveal_type(f8.clip(1)) # E: Any
+reveal_type(AR_f8.clip(1)) # E: Any
+reveal_type(AR_f8.clip(None, 1)) # E: Any
+reveal_type(AR_f8.clip(1, out=B)) # E: SubClass
+reveal_type(AR_f8.clip(None, 1, out=B)) # E: SubClass
+
+reveal_type(f8.compress([0])) # E: ndarray[Any, Any]
+reveal_type(AR_f8.compress([0])) # E: ndarray[Any, Any]
+reveal_type(AR_f8.compress([0], out=B)) # E: SubClass
+
+reveal_type(f8.conj()) # E: {float64}
+reveal_type(AR_f8.conj()) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(B.conj()) # E: SubClass
+
+reveal_type(f8.conjugate()) # E: {float64}
+reveal_type(AR_f8.conjugate()) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(B.conjugate()) # E: SubClass
+
+reveal_type(f8.cumprod()) # E: ndarray[Any, Any]
+reveal_type(AR_f8.cumprod()) # E: ndarray[Any, Any]
+reveal_type(AR_f8.cumprod(out=B)) # E: SubClass
+
+reveal_type(f8.cumsum()) # E: ndarray[Any, Any]
+reveal_type(AR_f8.cumsum()) # E: ndarray[Any, Any]
+reveal_type(AR_f8.cumsum(out=B)) # E: SubClass
+
+reveal_type(f8.max()) # E: Any
+reveal_type(AR_f8.max()) # E: Any
+reveal_type(AR_f8.max(axis=0)) # E: Any
+reveal_type(AR_f8.max(keepdims=True)) # E: Any
+reveal_type(AR_f8.max(out=B)) # E: SubClass
+
+reveal_type(f8.mean()) # E: Any
+reveal_type(AR_f8.mean()) # E: Any
+reveal_type(AR_f8.mean(axis=0)) # E: Any
+reveal_type(AR_f8.mean(keepdims=True)) # E: Any
+reveal_type(AR_f8.mean(out=B)) # E: SubClass
+
+reveal_type(f8.min()) # E: Any
+reveal_type(AR_f8.min()) # E: Any
+reveal_type(AR_f8.min(axis=0)) # E: Any
+reveal_type(AR_f8.min(keepdims=True)) # E: Any
+reveal_type(AR_f8.min(out=B)) # E: SubClass
+
+reveal_type(f8.newbyteorder()) # E: {float64}
+reveal_type(AR_f8.newbyteorder()) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(B.newbyteorder('|')) # E: SubClass
+
+reveal_type(f8.prod()) # E: Any
+reveal_type(AR_f8.prod()) # E: Any
+reveal_type(AR_f8.prod(axis=0)) # E: Any
+reveal_type(AR_f8.prod(keepdims=True)) # E: Any
+reveal_type(AR_f8.prod(out=B)) # E: SubClass
+
+reveal_type(f8.ptp()) # E: Any
+reveal_type(AR_f8.ptp()) # E: Any
+reveal_type(AR_f8.ptp(axis=0)) # E: Any
+reveal_type(AR_f8.ptp(keepdims=True)) # E: Any
+reveal_type(AR_f8.ptp(out=B)) # E: SubClass
+
+reveal_type(f8.round()) # E: {float64}
+reveal_type(AR_f8.round()) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(AR_f8.round(out=B)) # E: SubClass
+
+reveal_type(f8.repeat(1)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(AR_f8.repeat(1)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(B.repeat(1)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(f8.std()) # E: Any
+reveal_type(AR_f8.std()) # E: Any
+reveal_type(AR_f8.std(axis=0)) # E: Any
+reveal_type(AR_f8.std(keepdims=True)) # E: Any
+reveal_type(AR_f8.std(out=B)) # E: SubClass
+
+reveal_type(f8.sum()) # E: Any
+reveal_type(AR_f8.sum()) # E: Any
+reveal_type(AR_f8.sum(axis=0)) # E: Any
+reveal_type(AR_f8.sum(keepdims=True)) # E: Any
+reveal_type(AR_f8.sum(out=B)) # E: SubClass
+
+reveal_type(f8.take(0)) # E: {float64}
+reveal_type(AR_f8.take(0)) # E: {float64}
+reveal_type(AR_f8.take([0])) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(AR_f8.take(0, out=B)) # E: SubClass
+reveal_type(AR_f8.take([0], out=B)) # E: SubClass
+
+reveal_type(f8.var()) # E: Any
+reveal_type(AR_f8.var()) # E: Any
+reveal_type(AR_f8.var(axis=0)) # E: Any
+reveal_type(AR_f8.var(keepdims=True)) # E: Any
+reveal_type(AR_f8.var(out=B)) # E: SubClass
+
+reveal_type(AR_f8.argpartition([0])) # E: ndarray[Any, dtype[{intp}]]
+
+reveal_type(AR_f8.diagonal()) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(AR_f8.dot(1)) # E: ndarray[Any, Any]
+reveal_type(AR_f8.dot([1])) # E: Any
+reveal_type(AR_f8.dot(1, out=B)) # E: SubClass
+
+reveal_type(AR_f8.nonzero()) # E: tuple[ndarray[Any, dtype[{intp}]], ...]
+
+reveal_type(AR_f8.searchsorted(1)) # E: {intp}
+reveal_type(AR_f8.searchsorted([1])) # E: ndarray[Any, dtype[{intp}]]
+
+reveal_type(AR_f8.trace()) # E: Any
+reveal_type(AR_f8.trace(out=B)) # E: SubClass
+
+reveal_type(AR_f8.item()) # E: float
+reveal_type(AR_U.item()) # E: str
+
+reveal_type(AR_f8.ravel()) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(AR_U.ravel()) # E: ndarray[Any, dtype[str_]]
+
+reveal_type(AR_f8.flatten()) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(AR_U.flatten()) # E: ndarray[Any, dtype[str_]]
+
+reveal_type(AR_f8.reshape(1)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(AR_U.reshape(1)) # E: ndarray[Any, dtype[str_]]
+
+reveal_type(int(AR_f8)) # E: int
+reveal_type(int(AR_U)) # E: int
+
+reveal_type(float(AR_f8)) # E: float
+reveal_type(float(AR_U)) # E: float
+
+reveal_type(complex(AR_f8)) # E: complex
+
+reveal_type(operator.index(AR_i8)) # E: int
+
+reveal_type(AR_f8.__array_prepare__(B)) # E: ndarray[Any, dtype[object_]]
+reveal_type(AR_f8.__array_wrap__(B)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(AR_V[0]) # E: Any
+reveal_type(AR_V[0, 0]) # E: Any
+reveal_type(AR_V[AR_i8]) # E: ndarray[Any, dtype[void]]
+reveal_type(AR_V[AR_i8, AR_i8]) # E: ndarray[Any, dtype[void]]
+reveal_type(AR_V[AR_i8, None]) # E: ndarray[Any, dtype[void]]
+reveal_type(AR_V[0, ...]) # E: ndarray[Any, dtype[void]]
+reveal_type(AR_V[[0]]) # E: ndarray[Any, dtype[void]]
+reveal_type(AR_V[[0], [0]]) # E: ndarray[Any, dtype[void]]
+reveal_type(AR_V[:]) # E: ndarray[Any, dtype[void]]
+reveal_type(AR_V["a"]) # E: ndarray[Any, dtype[Any]]
+reveal_type(AR_V[["a", "b"]]) # E: ndarray[Any, dtype[void]]
+
+reveal_type(AR_f8.dump("test_file")) # E: None
+reveal_type(AR_f8.dump(b"test_file")) # E: None
+with open("test_file", "wb") as f:
+ reveal_type(AR_f8.dump(f)) # E: None
+
+reveal_type(AR_f8.__array_finalize__(None)) # E: None
+reveal_type(AR_f8.__array_finalize__(B)) # E: None
+reveal_type(AR_f8.__array_finalize__(AR_f8)) # E: None
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi
new file mode 100644
index 00000000..c000bf45
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi
@@ -0,0 +1,35 @@
+import numpy as np
+
+nd = np.array([[1, 2], [3, 4]])
+
+# reshape
+reveal_type(nd.reshape()) # E: ndarray
+reveal_type(nd.reshape(4)) # E: ndarray
+reveal_type(nd.reshape(2, 2)) # E: ndarray
+reveal_type(nd.reshape((2, 2))) # E: ndarray
+
+reveal_type(nd.reshape((2, 2), order="C")) # E: ndarray
+reveal_type(nd.reshape(4, order="C")) # E: ndarray
+
+# resize does not return a value
+
+# transpose
+reveal_type(nd.transpose()) # E: ndarray
+reveal_type(nd.transpose(1, 0)) # E: ndarray
+reveal_type(nd.transpose((1, 0))) # E: ndarray
+
+# swapaxes
+reveal_type(nd.swapaxes(0, 1)) # E: ndarray
+
+# flatten
+reveal_type(nd.flatten()) # E: ndarray
+reveal_type(nd.flatten("C")) # E: ndarray
+
+# ravel
+reveal_type(nd.ravel()) # E: ndarray
+reveal_type(nd.ravel("C")) # E: ndarray
+
+# squeeze
+reveal_type(nd.squeeze()) # E: ndarray
+reveal_type(nd.squeeze(0)) # E: ndarray
+reveal_type(nd.squeeze((0, 2))) # E: ndarray
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/nditer.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/nditer.pyi
new file mode 100644
index 00000000..fd8b7e10
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/nditer.pyi
@@ -0,0 +1,46 @@
+import numpy as np
+
+nditer_obj: np.nditer
+
+reveal_type(np.nditer([0, 1], flags=["c_index"])) # E: nditer
+reveal_type(np.nditer([0, 1], op_flags=[["readonly", "readonly"]])) # E: nditer
+reveal_type(np.nditer([0, 1], op_dtypes=np.int_)) # E: nditer
+reveal_type(np.nditer([0, 1], order="C", casting="no")) # E: nditer
+
+reveal_type(nditer_obj.dtypes) # E: tuple[dtype[Any], ...]
+reveal_type(nditer_obj.finished) # E: bool
+reveal_type(nditer_obj.has_delayed_bufalloc) # E: bool
+reveal_type(nditer_obj.has_index) # E: bool
+reveal_type(nditer_obj.has_multi_index) # E: bool
+reveal_type(nditer_obj.index) # E: int
+reveal_type(nditer_obj.iterationneedsapi) # E: bool
+reveal_type(nditer_obj.iterindex) # E: int
+reveal_type(nditer_obj.iterrange) # E: tuple[builtins.int, ...]
+reveal_type(nditer_obj.itersize) # E: int
+reveal_type(nditer_obj.itviews) # E: tuple[ndarray[Any, dtype[Any]], ...]
+reveal_type(nditer_obj.multi_index) # E: tuple[builtins.int, ...]
+reveal_type(nditer_obj.ndim) # E: int
+reveal_type(nditer_obj.nop) # E: int
+reveal_type(nditer_obj.operands) # E: tuple[ndarray[Any, dtype[Any]], ...]
+reveal_type(nditer_obj.shape) # E: tuple[builtins.int, ...]
+reveal_type(nditer_obj.value) # E: tuple[ndarray[Any, dtype[Any]], ...]
+
+reveal_type(nditer_obj.close()) # E: None
+reveal_type(nditer_obj.copy()) # E: nditer
+reveal_type(nditer_obj.debug_print()) # E: None
+reveal_type(nditer_obj.enable_external_loop()) # E: None
+reveal_type(nditer_obj.iternext()) # E: bool
+reveal_type(nditer_obj.remove_axis(0)) # E: None
+reveal_type(nditer_obj.remove_multi_index()) # E: None
+reveal_type(nditer_obj.reset()) # E: None
+
+reveal_type(len(nditer_obj)) # E: int
+reveal_type(iter(nditer_obj)) # E: nditer
+reveal_type(next(nditer_obj)) # E: tuple[ndarray[Any, dtype[Any]], ...]
+reveal_type(nditer_obj.__copy__()) # E: nditer
+with nditer_obj as f:
+ reveal_type(f) # E: nditer
+reveal_type(nditer_obj[0]) # E: ndarray[Any, dtype[Any]]
+reveal_type(nditer_obj[:]) # E: tuple[ndarray[Any, dtype[Any]], ...]
+nditer_obj[0] = 0
+nditer_obj[:] = [0, 1]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/nested_sequence.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/nested_sequence.pyi
new file mode 100644
index 00000000..286f75ac
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/nested_sequence.pyi
@@ -0,0 +1,26 @@
+from collections.abc import Sequence
+from typing import Any
+
+from numpy._typing import _NestedSequence
+
+a: Sequence[int]
+b: Sequence[Sequence[int]]
+c: Sequence[Sequence[Sequence[int]]]
+d: Sequence[Sequence[Sequence[Sequence[int]]]]
+e: Sequence[bool]
+f: tuple[int, ...]
+g: list[int]
+h: Sequence[Any]
+
+def func(a: _NestedSequence[int]) -> None:
+ ...
+
+reveal_type(func(a)) # E: None
+reveal_type(func(b)) # E: None
+reveal_type(func(c)) # E: None
+reveal_type(func(d)) # E: None
+reveal_type(func(e)) # E: None
+reveal_type(func(f)) # E: None
+reveal_type(func(g)) # E: None
+reveal_type(func(h)) # E: None
+reveal_type(func(range(15))) # E: None
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/npyio.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/npyio.pyi
new file mode 100644
index 00000000..68605cf9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/npyio.pyi
@@ -0,0 +1,92 @@
+import re
+import pathlib
+from typing import IO
+
+import numpy.typing as npt
+import numpy as np
+
+str_path: str
+pathlib_path: pathlib.Path
+str_file: IO[str]
+bytes_file: IO[bytes]
+
+bag_obj: np.lib.npyio.BagObj[int]
+npz_file: np.lib.npyio.NpzFile
+
+AR_i8: npt.NDArray[np.int64]
+AR_LIKE_f8: list[float]
+
+class BytesWriter:
+ def write(self, data: bytes) -> None: ...
+
+class BytesReader:
+ def read(self, n: int = ...) -> bytes: ...
+ def seek(self, offset: int, whence: int = ...) -> int: ...
+
+bytes_writer: BytesWriter
+bytes_reader: BytesReader
+
+reveal_type(bag_obj.a) # E: int
+reveal_type(bag_obj.b) # E: int
+
+reveal_type(npz_file.zip) # E: zipfile.ZipFile
+reveal_type(npz_file.fid) # E: Union[None, typing.IO[builtins.str]]
+reveal_type(npz_file.files) # E: list[builtins.str]
+reveal_type(npz_file.allow_pickle) # E: bool
+reveal_type(npz_file.pickle_kwargs) # E: Union[None, typing.Mapping[builtins.str, Any]]
+reveal_type(npz_file.f) # E: lib.npyio.BagObj[lib.npyio.NpzFile]
+reveal_type(npz_file["test"]) # E: ndarray[Any, dtype[Any]]
+reveal_type(len(npz_file)) # E: int
+with npz_file as f:
+ reveal_type(f) # E: lib.npyio.NpzFile
+
+reveal_type(np.load(bytes_file)) # E: Any
+reveal_type(np.load(pathlib_path, allow_pickle=True)) # E: Any
+reveal_type(np.load(str_path, encoding="bytes")) # E: Any
+reveal_type(np.load(bytes_reader)) # E: Any
+
+reveal_type(np.save(bytes_file, AR_LIKE_f8)) # E: None
+reveal_type(np.save(pathlib_path, AR_i8, allow_pickle=True)) # E: None
+reveal_type(np.save(str_path, AR_LIKE_f8)) # E: None
+reveal_type(np.save(bytes_writer, AR_LIKE_f8)) # E: None
+
+reveal_type(np.savez(bytes_file, AR_LIKE_f8)) # E: None
+reveal_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8)) # E: None
+reveal_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8)) # E: None
+reveal_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8)) # E: None
+
+reveal_type(np.savez_compressed(bytes_file, AR_LIKE_f8)) # E: None
+reveal_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8)) # E: None
+reveal_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8)) # E: None
+reveal_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8)) # E: None
+
+reveal_type(np.loadtxt(bytes_file)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.loadtxt(pathlib_path, dtype=np.str_)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.loadtxt(str_path, dtype=str, skiprows=2)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.loadtxt(str_file, comments="test")) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.loadtxt(str_file, comments=None)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.loadtxt(str_path, delimiter="\n")) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.loadtxt(str_path, ndmin=2)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.loadtxt(["1", "2", "3"])) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.fromregex(bytes_file, "test", np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.fromregex(str_file, b"test", dtype=float)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8")) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.fromregex(pathlib_path, "test", np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.fromregex(bytes_reader, "test", np.float64)) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.genfromtxt(bytes_file)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.genfromtxt(pathlib_path, dtype=np.str_)) # E: ndarray[Any, dtype[str_]]
+reveal_type(np.genfromtxt(str_path, dtype=str, skip_header=2)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.genfromtxt(str_file, comments="test")) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.genfromtxt(str_path, delimiter="\n")) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.genfromtxt(str_path, ndmin=2)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.genfromtxt(["1", "2", "3"], ndmin=2)) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.recfromtxt(bytes_file)) # E: recarray[Any, dtype[record]]
+reveal_type(np.recfromtxt(pathlib_path, usemask=True)) # E: ma.mrecords.MaskedRecords[Any, dtype[void]]
+reveal_type(np.recfromtxt(["1", "2", "3"])) # E: recarray[Any, dtype[record]]
+
+reveal_type(np.recfromcsv(bytes_file)) # E: recarray[Any, dtype[record]]
+reveal_type(np.recfromcsv(pathlib_path, usemask=True)) # E: ma.mrecords.MaskedRecords[Any, dtype[void]]
+reveal_type(np.recfromcsv(["1", "2", "3"])) # E: recarray[Any, dtype[record]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/numeric.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/numeric.pyi
new file mode 100644
index 00000000..b8fe15d3
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/numeric.pyi
@@ -0,0 +1,133 @@
+"""
+Tests for :mod:`core.numeric`.
+
+Does not include tests which fall under ``array_constructors``.
+
+"""
+
+import numpy as np
+import numpy.typing as npt
+
+class SubClass(npt.NDArray[np.int64]):
+ ...
+
+i8: np.int64
+
+AR_b: npt.NDArray[np.bool_]
+AR_u8: npt.NDArray[np.uint64]
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+AR_m: npt.NDArray[np.timedelta64]
+AR_O: npt.NDArray[np.object_]
+
+B: list[int]
+C: SubClass
+
+reveal_type(np.count_nonzero(i8)) # E: int
+reveal_type(np.count_nonzero(AR_i8)) # E: int
+reveal_type(np.count_nonzero(B)) # E: int
+reveal_type(np.count_nonzero(AR_i8, keepdims=True)) # E: Any
+reveal_type(np.count_nonzero(AR_i8, axis=0)) # E: Any
+
+reveal_type(np.isfortran(i8)) # E: bool
+reveal_type(np.isfortran(AR_i8)) # E: bool
+
+reveal_type(np.argwhere(i8)) # E: ndarray[Any, dtype[{intp}]]
+reveal_type(np.argwhere(AR_i8)) # E: ndarray[Any, dtype[{intp}]]
+
+reveal_type(np.flatnonzero(i8)) # E: ndarray[Any, dtype[{intp}]]
+reveal_type(np.flatnonzero(AR_i8)) # E: ndarray[Any, dtype[{intp}]]
+
+reveal_type(np.correlate(B, AR_i8, mode="valid")) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.correlate(AR_i8, AR_i8, mode="same")) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.correlate(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.correlate(AR_b, AR_u8)) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(np.correlate(AR_i8, AR_b)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.correlate(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.correlate(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.correlate(AR_i8, AR_m)) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(np.correlate(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.convolve(B, AR_i8, mode="valid")) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.convolve(AR_i8, AR_i8, mode="same")) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.convolve(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.convolve(AR_b, AR_u8)) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(np.convolve(AR_i8, AR_b)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.convolve(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.convolve(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.convolve(AR_i8, AR_m)) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(np.convolve(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.outer(i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.outer(B, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.outer(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.outer(AR_i8, AR_i8, out=C)) # E: SubClass
+reveal_type(np.outer(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.outer(AR_b, AR_u8)) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(np.outer(AR_i8, AR_b)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.convolve(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.outer(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.outer(AR_i8, AR_m)) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(np.outer(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.tensordot(B, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.tensordot(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.tensordot(AR_i8, AR_i8, axes=0)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1))) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.tensordot(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.tensordot(AR_b, AR_u8)) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(np.tensordot(AR_i8, AR_b)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.tensordot(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.tensordot(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.tensordot(AR_i8, AR_m)) # E: ndarray[Any, dtype[timedelta64]]
+reveal_type(np.tensordot(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.isscalar(i8)) # E: bool
+reveal_type(np.isscalar(AR_i8)) # E: bool
+reveal_type(np.isscalar(B)) # E: bool
+
+reveal_type(np.roll(AR_i8, 1)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.roll(AR_i8, (1, 2))) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.roll(B, 1)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.rollaxis(AR_i8, 0, 1)) # E: ndarray[Any, dtype[{int64}]]
+
+reveal_type(np.moveaxis(AR_i8, 0, 1)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.moveaxis(AR_i8, (0, 1), (1, 2))) # E: ndarray[Any, dtype[{int64}]]
+
+reveal_type(np.cross(B, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.cross(AR_i8, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.cross(AR_b, AR_u8)) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
+reveal_type(np.cross(AR_i8, AR_b)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.cross(AR_i8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.cross(AR_i8, AR_c16)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.cross(AR_O, AR_O)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.indices([0, 1, 2])) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(np.indices([0, 1, 2], sparse=True)) # E: tuple[ndarray[Any, dtype[{int_}]], ...]
+reveal_type(np.indices([0, 1, 2], dtype=np.float64)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.indices([0, 1, 2], sparse=True, dtype=np.float64)) # E: tuple[ndarray[Any, dtype[{float64}]], ...]
+reveal_type(np.indices([0, 1, 2], dtype=float)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.indices([0, 1, 2], sparse=True, dtype=float)) # E: tuple[ndarray[Any, dtype[Any]], ...]
+
+reveal_type(np.binary_repr(1)) # E: str
+
+reveal_type(np.base_repr(1)) # E: str
+
+reveal_type(np.allclose(i8, AR_i8)) # E: bool
+reveal_type(np.allclose(B, AR_i8)) # E: bool
+reveal_type(np.allclose(AR_i8, AR_i8)) # E: bool
+
+reveal_type(np.isclose(i8, i8)) # E: bool_
+reveal_type(np.isclose(i8, AR_i8)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isclose(B, AR_i8)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isclose(AR_i8, AR_i8)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.array_equal(i8, AR_i8)) # E: bool
+reveal_type(np.array_equal(B, AR_i8)) # E: bool
+reveal_type(np.array_equal(AR_i8, AR_i8)) # E: bool
+
+reveal_type(np.array_equiv(i8, AR_i8)) # E: bool
+reveal_type(np.array_equiv(B, AR_i8)) # E: bool
+reveal_type(np.array_equiv(AR_i8, AR_i8)) # E: bool
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/numerictypes.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/numerictypes.pyi
new file mode 100644
index 00000000..e1857557
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/numerictypes.pyi
@@ -0,0 +1,42 @@
+import numpy as np
+
+reveal_type(np.maximum_sctype(np.float64)) # E: Type[{float64}]
+reveal_type(np.maximum_sctype("f8")) # E: Type[Any]
+
+reveal_type(np.issctype(np.float64)) # E: bool
+reveal_type(np.issctype("foo")) # E: Literal[False]
+
+reveal_type(np.obj2sctype(np.float64)) # E: Union[None, Type[{float64}]]
+reveal_type(np.obj2sctype(np.float64, default=False)) # E: Union[builtins.bool, Type[{float64}]]
+reveal_type(np.obj2sctype("S8")) # E: Union[None, Type[Any]]
+reveal_type(np.obj2sctype("S8", default=None)) # E: Union[None, Type[Any]]
+reveal_type(np.obj2sctype("foo", default=False)) # E: Union[builtins.bool, Type[Any]]
+reveal_type(np.obj2sctype(1)) # E: None
+reveal_type(np.obj2sctype(1, default=False)) # E: bool
+
+reveal_type(np.issubclass_(np.float64, float)) # E: bool
+reveal_type(np.issubclass_(np.float64, (int, float))) # E: bool
+reveal_type(np.issubclass_(1, 1)) # E: Literal[False]
+
+reveal_type(np.sctype2char("S8")) # E: str
+reveal_type(np.sctype2char(list)) # E: str
+
+reveal_type(np.find_common_type([np.int64], [np.int64])) # E: dtype[Any]
+
+reveal_type(np.cast[int]) # E: _CastFunc
+reveal_type(np.cast["i8"]) # E: _CastFunc
+reveal_type(np.cast[np.int64]) # E: _CastFunc
+
+reveal_type(np.nbytes[int]) # E: int
+reveal_type(np.nbytes["i8"]) # E: int
+reveal_type(np.nbytes[np.int64]) # E: int
+
+reveal_type(np.ScalarType) # E: Tuple
+reveal_type(np.ScalarType[0]) # E: Type[builtins.int]
+reveal_type(np.ScalarType[3]) # E: Type[builtins.bool]
+reveal_type(np.ScalarType[8]) # E: Type[{csingle}]
+reveal_type(np.ScalarType[10]) # E: Type[{clongdouble}]
+
+reveal_type(np.typecodes["Character"]) # E: Literal['c']
+reveal_type(np.typecodes["Complex"]) # E: Literal['FDG']
+reveal_type(np.typecodes["All"]) # E: Literal['?bhilqpBHILQPefdgFDGSUVOMm']
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/random.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/random.pyi
new file mode 100644
index 00000000..67a5d3e7
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/random.pyi
@@ -0,0 +1,1542 @@
+from __future__ import annotations
+
+from typing import Any
+
+import numpy as np
+
+def_rng = np.random.default_rng()
+seed_seq = np.random.SeedSequence()
+mt19937 = np.random.MT19937()
+pcg64 = np.random.PCG64()
+sfc64 = np.random.SFC64()
+philox = np.random.Philox()
+seedless_seq = np.random.bit_generator.SeedlessSeedSequence()
+
+reveal_type(def_rng) # E: random._generator.Generator
+reveal_type(mt19937) # E: random._mt19937.MT19937
+reveal_type(pcg64) # E: random._pcg64.PCG64
+reveal_type(sfc64) # E: random._sfc64.SFC64
+reveal_type(philox) # E: random._philox.Philox
+reveal_type(seed_seq) # E: random.bit_generator.SeedSequence
+reveal_type(seedless_seq) # E: random.bit_generator.SeedlessSeedSequence
+
+mt19937_jumped = mt19937.jumped()
+mt19937_jumped3 = mt19937.jumped(3)
+mt19937_raw = mt19937.random_raw()
+mt19937_raw_arr = mt19937.random_raw(5)
+
+reveal_type(mt19937_jumped) # E: random._mt19937.MT19937
+reveal_type(mt19937_jumped3) # E: random._mt19937.MT19937
+reveal_type(mt19937_raw) # E: int
+reveal_type(mt19937_raw_arr) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(mt19937.lock) # E: threading.Lock
+
+pcg64_jumped = pcg64.jumped()
+pcg64_jumped3 = pcg64.jumped(3)
+pcg64_adv = pcg64.advance(3)
+pcg64_raw = pcg64.random_raw()
+pcg64_raw_arr = pcg64.random_raw(5)
+
+reveal_type(pcg64_jumped) # E: random._pcg64.PCG64
+reveal_type(pcg64_jumped3) # E: random._pcg64.PCG64
+reveal_type(pcg64_adv) # E: random._pcg64.PCG64
+reveal_type(pcg64_raw) # E: int
+reveal_type(pcg64_raw_arr) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(pcg64.lock) # E: threading.Lock
+
+philox_jumped = philox.jumped()
+philox_jumped3 = philox.jumped(3)
+philox_adv = philox.advance(3)
+philox_raw = philox.random_raw()
+philox_raw_arr = philox.random_raw(5)
+
+reveal_type(philox_jumped) # E: random._philox.Philox
+reveal_type(philox_jumped3) # E: random._philox.Philox
+reveal_type(philox_adv) # E: random._philox.Philox
+reveal_type(philox_raw) # E: int
+reveal_type(philox_raw_arr) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(philox.lock) # E: threading.Lock
+
+sfc64_raw = sfc64.random_raw()
+sfc64_raw_arr = sfc64.random_raw(5)
+
+reveal_type(sfc64_raw) # E: int
+reveal_type(sfc64_raw_arr) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(sfc64.lock) # E: threading.Lock
+
+reveal_type(seed_seq.pool) # ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(seed_seq.entropy) # E:Union[None, int, Sequence[int]]
+reveal_type(seed_seq.spawn(1)) # E: list[random.bit_generator.SeedSequence]
+reveal_type(seed_seq.generate_state(8, "uint32")) # E: ndarray[Any, dtype[Union[unsignedinteger[typing._32Bit], unsignedinteger[typing._64Bit]]]]
+reveal_type(seed_seq.generate_state(8, "uint64")) # E: ndarray[Any, dtype[Union[unsignedinteger[typing._32Bit], unsignedinteger[typing._64Bit]]]]
+
+
+def_gen: np.random.Generator = np.random.default_rng()
+
+D_arr_0p1: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.1])
+D_arr_0p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.5])
+D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9])
+D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5])
+I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_)
+I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_)
+D_arr_like_0p1: list[float] = [0.1]
+D_arr_like_0p5: list[float] = [0.5]
+D_arr_like_0p9: list[float] = [0.9]
+D_arr_like_1p5: list[float] = [1.5]
+I_arr_like_10: list[int] = [10]
+I_arr_like_20: list[int] = [20]
+D_2D_like: list[list[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]]
+D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like)
+S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32)
+D_out: np.ndarray[Any, np.dtype[np.float64]] = np.empty(1)
+
+reveal_type(def_gen.standard_normal()) # E: float
+reveal_type(def_gen.standard_normal(dtype=np.float32)) # E: float
+reveal_type(def_gen.standard_normal(dtype="float32")) # E: float
+reveal_type(def_gen.standard_normal(dtype="double")) # E: float
+reveal_type(def_gen.standard_normal(dtype=np.float64)) # E: float
+reveal_type(def_gen.standard_normal(size=None)) # E: float
+reveal_type(def_gen.standard_normal(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_normal(size=1, dtype=np.float32)) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.standard_normal(size=1, dtype="f4")) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.standard_normal(size=1, dtype="float32", out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.standard_normal(dtype=np.float32, out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.standard_normal(size=1, dtype=np.float64)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_normal(size=1, dtype="float64")) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_normal(size=1, dtype="f8")) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_normal(out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_normal(size=1, dtype="float64")) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_normal(size=1, dtype="float64", out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+
+reveal_type(def_gen.random()) # E: float
+reveal_type(def_gen.random(dtype=np.float32)) # E: float
+reveal_type(def_gen.random(dtype="float32")) # E: float
+reveal_type(def_gen.random(dtype="double")) # E: float
+reveal_type(def_gen.random(dtype=np.float64)) # E: float
+reveal_type(def_gen.random(size=None)) # E: float
+reveal_type(def_gen.random(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.random(size=1, dtype=np.float32)) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.random(size=1, dtype="f4")) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.random(size=1, dtype="float32", out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.random(dtype=np.float32, out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.random(size=1, dtype=np.float64)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.random(size=1, dtype="float64")) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.random(size=1, dtype="f8")) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.random(out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.random(size=1, dtype="float64")) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.random(size=1, dtype="float64", out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+
+reveal_type(def_gen.standard_cauchy()) # E: float
+reveal_type(def_gen.standard_cauchy(size=None)) # E: float
+reveal_type(def_gen.standard_cauchy(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.standard_exponential()) # E: float
+reveal_type(def_gen.standard_exponential(method="inv")) # E: float
+reveal_type(def_gen.standard_exponential(dtype=np.float32)) # E: float
+reveal_type(def_gen.standard_exponential(dtype="float32")) # E: float
+reveal_type(def_gen.standard_exponential(dtype="double")) # E: float
+reveal_type(def_gen.standard_exponential(dtype=np.float64)) # E: float
+reveal_type(def_gen.standard_exponential(size=None)) # E: float
+reveal_type(def_gen.standard_exponential(size=None, method="inv")) # E: float
+reveal_type(def_gen.standard_exponential(size=1, method="inv")) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_exponential(size=1, dtype=np.float32)) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.standard_exponential(size=1, dtype="f4", method="inv")) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.standard_exponential(size=1, dtype="float32", out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.standard_exponential(dtype=np.float32, out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.standard_exponential(size=1, dtype=np.float64, method="inv")) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_exponential(size=1, dtype="float64")) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_exponential(size=1, dtype="f8")) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_exponential(out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_exponential(size=1, dtype="float64")) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_exponential(size=1, dtype="float64", out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+
+reveal_type(def_gen.zipf(1.5)) # E: int
+reveal_type(def_gen.zipf(1.5, size=None)) # E: int
+reveal_type(def_gen.zipf(1.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.zipf(D_arr_1p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.zipf(D_arr_1p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.zipf(D_arr_like_1p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.zipf(D_arr_like_1p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(def_gen.weibull(0.5)) # E: float
+reveal_type(def_gen.weibull(0.5, size=None)) # E: float
+reveal_type(def_gen.weibull(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.weibull(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.weibull(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.weibull(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.weibull(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.standard_t(0.5)) # E: float
+reveal_type(def_gen.standard_t(0.5, size=None)) # E: float
+reveal_type(def_gen.standard_t(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.standard_t(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.standard_t(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.standard_t(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.standard_t(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.poisson(0.5)) # E: int
+reveal_type(def_gen.poisson(0.5, size=None)) # E: int
+reveal_type(def_gen.poisson(0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.poisson(D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.poisson(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.poisson(D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.poisson(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(def_gen.power(0.5)) # E: float
+reveal_type(def_gen.power(0.5, size=None)) # E: float
+reveal_type(def_gen.power(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.power(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.power(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.power(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.power(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.pareto(0.5)) # E: float
+reveal_type(def_gen.pareto(0.5, size=None)) # E: float
+reveal_type(def_gen.pareto(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.pareto(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.pareto(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.pareto(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.pareto(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.chisquare(0.5)) # E: float
+reveal_type(def_gen.chisquare(0.5, size=None)) # E: float
+reveal_type(def_gen.chisquare(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.chisquare(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.chisquare(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.chisquare(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.chisquare(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.exponential(0.5)) # E: float
+reveal_type(def_gen.exponential(0.5, size=None)) # E: float
+reveal_type(def_gen.exponential(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.exponential(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.exponential(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.exponential(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.exponential(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.geometric(0.5)) # E: int
+reveal_type(def_gen.geometric(0.5, size=None)) # E: int
+reveal_type(def_gen.geometric(0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.geometric(D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.geometric(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.geometric(D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.geometric(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(def_gen.logseries(0.5)) # E: int
+reveal_type(def_gen.logseries(0.5, size=None)) # E: int
+reveal_type(def_gen.logseries(0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.logseries(D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.logseries(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.logseries(D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.logseries(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(def_gen.rayleigh(0.5)) # E: float
+reveal_type(def_gen.rayleigh(0.5, size=None)) # E: float
+reveal_type(def_gen.rayleigh(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.rayleigh(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.rayleigh(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.rayleigh(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.rayleigh(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.standard_gamma(0.5)) # E: float
+reveal_type(def_gen.standard_gamma(0.5, size=None)) # E: float
+reveal_type(def_gen.standard_gamma(0.5, dtype="float32")) # E: float
+reveal_type(def_gen.standard_gamma(0.5, size=None, dtype="float32")) # E: float
+reveal_type(def_gen.standard_gamma(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_gamma(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_gamma(D_arr_0p5, dtype="f4")) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out)) # E: ndarray[Any, dtype[floating[typing._32Bit]]]
+reveal_type(def_gen.standard_gamma(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_gamma(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_gamma(0.5, out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_gamma(D_arr_like_0p5, out=D_out)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+
+reveal_type(def_gen.vonmises(0.5, 0.5)) # E: float
+reveal_type(def_gen.vonmises(0.5, 0.5, size=None)) # E: float
+reveal_type(def_gen.vonmises(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.vonmises(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.vonmises(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.vonmises(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.vonmises(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.vonmises(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.vonmises(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.wald(0.5, 0.5)) # E: float
+reveal_type(def_gen.wald(0.5, 0.5, size=None)) # E: float
+reveal_type(def_gen.wald(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.wald(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.wald(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.wald(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.wald(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.wald(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.wald(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.wald(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.wald(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.uniform(0.5, 0.5)) # E: float
+reveal_type(def_gen.uniform(0.5, 0.5, size=None)) # E: float
+reveal_type(def_gen.uniform(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.uniform(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.uniform(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.uniform(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.uniform(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.uniform(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.uniform(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.uniform(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.beta(0.5, 0.5)) # E: float
+reveal_type(def_gen.beta(0.5, 0.5, size=None)) # E: float
+reveal_type(def_gen.beta(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.beta(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.beta(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.beta(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.beta(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.beta(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.beta(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.beta(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.beta(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.f(0.5, 0.5)) # E: float
+reveal_type(def_gen.f(0.5, 0.5, size=None)) # E: float
+reveal_type(def_gen.f(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.f(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.f(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.f(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.f(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.f(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.f(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.f(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.f(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.gamma(0.5, 0.5)) # E: float
+reveal_type(def_gen.gamma(0.5, 0.5, size=None)) # E: float
+reveal_type(def_gen.gamma(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gamma(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gamma(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gamma(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gamma(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gamma(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gamma(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gamma(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.gumbel(0.5, 0.5)) # E: float
+reveal_type(def_gen.gumbel(0.5, 0.5, size=None)) # E: float
+reveal_type(def_gen.gumbel(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gumbel(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gumbel(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gumbel(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gumbel(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gumbel(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gumbel(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.laplace(0.5, 0.5)) # E: float
+reveal_type(def_gen.laplace(0.5, 0.5, size=None)) # E: float
+reveal_type(def_gen.laplace(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.laplace(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.laplace(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.laplace(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.laplace(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.laplace(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.laplace(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.laplace(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.logistic(0.5, 0.5)) # E: float
+reveal_type(def_gen.logistic(0.5, 0.5, size=None)) # E: float
+reveal_type(def_gen.logistic(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.logistic(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.logistic(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.logistic(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.logistic(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.logistic(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.logistic(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.logistic(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.lognormal(0.5, 0.5)) # E: float
+reveal_type(def_gen.lognormal(0.5, 0.5, size=None)) # E: float
+reveal_type(def_gen.lognormal(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.lognormal(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.lognormal(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.lognormal(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.lognormal(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.lognormal(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.lognormal(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.noncentral_chisquare(0.5, 0.5)) # E: float
+reveal_type(def_gen.noncentral_chisquare(0.5, 0.5, size=None)) # E: float
+reveal_type(def_gen.noncentral_chisquare(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_chisquare(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.normal(0.5, 0.5)) # E: float
+reveal_type(def_gen.normal(0.5, 0.5, size=None)) # E: float
+reveal_type(def_gen.normal(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.normal(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.normal(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.normal(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.normal(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.normal(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.normal(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.normal(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.normal(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.triangular(0.1, 0.5, 0.9)) # E: float
+reveal_type(def_gen.triangular(0.1, 0.5, 0.9, size=None)) # E: float
+reveal_type(def_gen.triangular(0.1, 0.5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.triangular(D_arr_0p1, 0.5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.triangular(0.1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.triangular(0.5, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.noncentral_f(0.1, 0.5, 0.9)) # E: float
+reveal_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=None)) # E: float
+reveal_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.binomial(10, 0.5)) # E: int
+reveal_type(def_gen.binomial(10, 0.5, size=None)) # E: int
+reveal_type(def_gen.binomial(10, 0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.binomial(I_arr_10, 0.5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.binomial(10, D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.binomial(I_arr_10, 0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.binomial(10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.binomial(I_arr_like_10, 0.5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.binomial(10, D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.binomial(I_arr_10, D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.binomial(I_arr_10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(def_gen.negative_binomial(10, 0.5)) # E: int
+reveal_type(def_gen.negative_binomial(10, 0.5, size=None)) # E: int
+reveal_type(def_gen.negative_binomial(10, 0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.negative_binomial(I_arr_10, 0.5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.negative_binomial(10, D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.negative_binomial(I_arr_10, 0.5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.negative_binomial(10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.negative_binomial(I_arr_like_10, 0.5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.negative_binomial(10, D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(def_gen.hypergeometric(20, 20, 10)) # E: int
+reveal_type(def_gen.hypergeometric(20, 20, 10, size=None)) # E: int
+reveal_type(def_gen.hypergeometric(20, 20, 10, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.hypergeometric(I_arr_20, 20, 10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.hypergeometric(20, I_arr_20, 10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.hypergeometric(20, I_arr_like_20, 10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+I_int64_100: np.ndarray[Any, np.dtype[np.int64]] = np.array([100], dtype=np.int64)
+
+reveal_type(def_gen.integers(0, 100)) # E: int
+reveal_type(def_gen.integers(100)) # E: int
+reveal_type(def_gen.integers([100])) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(0, [100])) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+I_bool_low: np.ndarray[Any, np.dtype[np.bool_]] = np.array([0], dtype=np.bool_)
+I_bool_low_like: list[int] = [0]
+I_bool_high_open: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_)
+I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_)
+
+reveal_type(def_gen.integers(2, dtype=bool)) # E: builtins.bool
+reveal_type(def_gen.integers(0, 2, dtype=bool)) # E: builtins.bool
+reveal_type(def_gen.integers(1, dtype=bool, endpoint=True)) # E: builtins.bool
+reveal_type(def_gen.integers(0, 1, dtype=bool, endpoint=True)) # E: builtins.bool
+reveal_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True)) # E: ndarray[Any, dtype[bool_]
+reveal_type(def_gen.integers(I_bool_high_open, dtype=bool)) # E: ndarray[Any, dtype[bool_]
+reveal_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool)) # E: ndarray[Any, dtype[bool_]
+reveal_type(def_gen.integers(0, I_bool_high_open, dtype=bool)) # E: ndarray[Any, dtype[bool_]
+reveal_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True)) # E: ndarray[Any, dtype[bool_]
+reveal_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True)) # E: ndarray[Any, dtype[bool_]
+reveal_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True)) # E: ndarray[Any, dtype[bool_]
+
+reveal_type(def_gen.integers(2, dtype=np.bool_)) # E: builtins.bool
+reveal_type(def_gen.integers(0, 2, dtype=np.bool_)) # E: builtins.bool
+reveal_type(def_gen.integers(1, dtype=np.bool_, endpoint=True)) # E: builtins.bool
+reveal_type(def_gen.integers(0, 1, dtype=np.bool_, endpoint=True)) # E: builtins.bool
+reveal_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool_, endpoint=True)) # E: ndarray[Any, dtype[bool_]
+reveal_type(def_gen.integers(I_bool_high_open, dtype=np.bool_)) # E: ndarray[Any, dtype[bool_]
+reveal_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool_)) # E: ndarray[Any, dtype[bool_]
+reveal_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool_)) # E: ndarray[Any, dtype[bool_]
+reveal_type(def_gen.integers(I_bool_high_closed, dtype=np.bool_, endpoint=True)) # E: ndarray[Any, dtype[bool_]
+reveal_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool_, endpoint=True)) # E: ndarray[Any, dtype[bool_]
+reveal_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool_, endpoint=True)) # E: ndarray[Any, dtype[bool_]
+
+I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8)
+I_u1_low_like: list[int] = [0]
+I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8)
+I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8)
+
+reveal_type(def_gen.integers(256, dtype="u1")) # E: int
+reveal_type(def_gen.integers(0, 256, dtype="u1")) # E: int
+reveal_type(def_gen.integers(255, dtype="u1", endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 255, dtype="u1", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_u1_high_open, dtype="u1")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(0, I_u1_high_open, dtype="u1")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+
+reveal_type(def_gen.integers(256, dtype="uint8")) # E: int
+reveal_type(def_gen.integers(0, 256, dtype="uint8")) # E: int
+reveal_type(def_gen.integers(255, dtype="uint8", endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_u1_high_open, dtype="uint8")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(0, I_u1_high_open, dtype="uint8")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+
+reveal_type(def_gen.integers(256, dtype=np.uint8)) # E: int
+reveal_type(def_gen.integers(0, 256, dtype=np.uint8)) # E: int
+reveal_type(def_gen.integers(255, dtype=np.uint8, endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_u1_high_open, dtype=np.uint8)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+
+I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16)
+I_u2_low_like: list[int] = [0]
+I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16)
+I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16)
+
+reveal_type(def_gen.integers(65536, dtype="u2")) # E: int
+reveal_type(def_gen.integers(0, 65536, dtype="u2")) # E: int
+reveal_type(def_gen.integers(65535, dtype="u2", endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_u2_high_open, dtype="u2")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(0, I_u2_high_open, dtype="u2")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+
+reveal_type(def_gen.integers(65536, dtype="uint16")) # E: int
+reveal_type(def_gen.integers(0, 65536, dtype="uint16")) # E: int
+reveal_type(def_gen.integers(65535, dtype="uint16", endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_u2_high_open, dtype="uint16")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(0, I_u2_high_open, dtype="uint16")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+
+reveal_type(def_gen.integers(65536, dtype=np.uint16)) # E: int
+reveal_type(def_gen.integers(0, 65536, dtype=np.uint16)) # E: int
+reveal_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_u2_high_open, dtype=np.uint16)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+
+I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32)
+I_u4_low_like: list[int] = [0]
+I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32)
+I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32)
+
+reveal_type(def_gen.integers(4294967296, dtype=np.int_)) # E: int
+reveal_type(def_gen.integers(0, 4294967296, dtype=np.int_)) # E: int
+reveal_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(def_gen.integers(I_u4_high_open, dtype=np.int_)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True)) # E: ndarray[Any, dtype[{int_}]]
+
+
+reveal_type(def_gen.integers(4294967296, dtype="u4")) # E: int
+reveal_type(def_gen.integers(0, 4294967296, dtype="u4")) # E: int
+reveal_type(def_gen.integers(4294967295, dtype="u4", endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_u4_high_open, dtype="u4")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(0, I_u4_high_open, dtype="u4")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+
+reveal_type(def_gen.integers(4294967296, dtype="uint32")) # E: int
+reveal_type(def_gen.integers(0, 4294967296, dtype="uint32")) # E: int
+reveal_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_u4_high_open, dtype="uint32")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(0, I_u4_high_open, dtype="uint32")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+
+reveal_type(def_gen.integers(4294967296, dtype=np.uint32)) # E: int
+reveal_type(def_gen.integers(0, 4294967296, dtype=np.uint32)) # E: int
+reveal_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_u4_high_open, dtype=np.uint32)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+
+reveal_type(def_gen.integers(4294967296, dtype=np.uint)) # E: int
+reveal_type(def_gen.integers(0, 4294967296, dtype=np.uint)) # E: int
+reveal_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True)) # E: ndarray[Any, dtype[{uint}]]
+reveal_type(def_gen.integers(I_u4_high_open, dtype=np.uint)) # E: ndarray[Any, dtype[{uint}]]
+reveal_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint)) # E: ndarray[Any, dtype[{uint}]]
+reveal_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint)) # E: ndarray[Any, dtype[{uint}]]
+reveal_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True)) # E: ndarray[Any, dtype[{uint}]]
+reveal_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True)) # E: ndarray[Any, dtype[{uint}]]
+reveal_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True)) # E: ndarray[Any, dtype[{uint}]]
+
+I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64)
+I_u8_low_like: list[int] = [0]
+I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64)
+I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64)
+
+reveal_type(def_gen.integers(18446744073709551616, dtype="u8")) # E: int
+reveal_type(def_gen.integers(0, 18446744073709551616, dtype="u8")) # E: int
+reveal_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_u8_high_open, dtype="u8")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(0, I_u8_high_open, dtype="u8")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+
+reveal_type(def_gen.integers(18446744073709551616, dtype="uint64")) # E: int
+reveal_type(def_gen.integers(0, 18446744073709551616, dtype="uint64")) # E: int
+reveal_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_u8_high_open, dtype="uint64")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(0, I_u8_high_open, dtype="uint64")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+
+reveal_type(def_gen.integers(18446744073709551616, dtype=np.uint64)) # E: int
+reveal_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64)) # E: int
+reveal_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True)) # E: int
+reveal_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_u8_high_open, dtype=np.uint64)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+
+I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8)
+I_i1_low_like: list[int] = [-128]
+I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8)
+I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8)
+
+reveal_type(def_gen.integers(128, dtype="i1")) # E: int
+reveal_type(def_gen.integers(-128, 128, dtype="i1")) # E: int
+reveal_type(def_gen.integers(127, dtype="i1", endpoint=True)) # E: int
+reveal_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_i1_high_open, dtype="i1")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(-128, I_i1_high_open, dtype="i1")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+
+reveal_type(def_gen.integers(128, dtype="int8")) # E: int
+reveal_type(def_gen.integers(-128, 128, dtype="int8")) # E: int
+reveal_type(def_gen.integers(127, dtype="int8", endpoint=True)) # E: int
+reveal_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_i1_high_open, dtype="int8")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(-128, I_i1_high_open, dtype="int8")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+
+reveal_type(def_gen.integers(128, dtype=np.int8)) # E: int
+reveal_type(def_gen.integers(-128, 128, dtype=np.int8)) # E: int
+reveal_type(def_gen.integers(127, dtype=np.int8, endpoint=True)) # E: int
+reveal_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_i1_high_open, dtype=np.int8)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+
+I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16)
+I_i2_low_like: list[int] = [-32768]
+I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16)
+I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16)
+
+reveal_type(def_gen.integers(32768, dtype="i2")) # E: int
+reveal_type(def_gen.integers(-32768, 32768, dtype="i2")) # E: int
+reveal_type(def_gen.integers(32767, dtype="i2", endpoint=True)) # E: int
+reveal_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_i2_high_open, dtype="i2")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+
+reveal_type(def_gen.integers(32768, dtype="int16")) # E: int
+reveal_type(def_gen.integers(-32768, 32768, dtype="int16")) # E: int
+reveal_type(def_gen.integers(32767, dtype="int16", endpoint=True)) # E: int
+reveal_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_i2_high_open, dtype="int16")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+
+reveal_type(def_gen.integers(32768, dtype=np.int16)) # E: int
+reveal_type(def_gen.integers(-32768, 32768, dtype=np.int16)) # E: int
+reveal_type(def_gen.integers(32767, dtype=np.int16, endpoint=True)) # E: int
+reveal_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_i2_high_open, dtype=np.int16)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+
+I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32)
+I_i4_low_like: list[int] = [-2147483648]
+I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32)
+I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32)
+
+reveal_type(def_gen.integers(2147483648, dtype="i4")) # E: int
+reveal_type(def_gen.integers(-2147483648, 2147483648, dtype="i4")) # E: int
+reveal_type(def_gen.integers(2147483647, dtype="i4", endpoint=True)) # E: int
+reveal_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_i4_high_open, dtype="i4")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+
+reveal_type(def_gen.integers(2147483648, dtype="int32")) # E: int
+reveal_type(def_gen.integers(-2147483648, 2147483648, dtype="int32")) # E: int
+reveal_type(def_gen.integers(2147483647, dtype="int32", endpoint=True)) # E: int
+reveal_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_i4_high_open, dtype="int32")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+
+reveal_type(def_gen.integers(2147483648, dtype=np.int32)) # E: int
+reveal_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32)) # E: int
+reveal_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True)) # E: int
+reveal_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_i4_high_open, dtype=np.int32)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+
+I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64)
+I_i8_low_like: list[int] = [-9223372036854775808]
+I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64)
+I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64)
+
+reveal_type(def_gen.integers(9223372036854775808, dtype="i8")) # E: int
+reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8")) # E: int
+reveal_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True)) # E: int
+reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_i8_high_open, dtype="i8")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(def_gen.integers(9223372036854775808, dtype="int64")) # E: int
+reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64")) # E: int
+reveal_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True)) # E: int
+reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_i8_high_open, dtype="int64")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(def_gen.integers(9223372036854775808, dtype=np.int64)) # E: int
+reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64)) # E: int
+reveal_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True)) # E: int
+reveal_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True)) # E: int
+reveal_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_i8_high_open, dtype=np.int64)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+
+reveal_type(def_gen.bit_generator) # E: BitGenerator
+
+reveal_type(def_gen.bytes(2)) # E: bytes
+
+reveal_type(def_gen.choice(5)) # E: int
+reveal_type(def_gen.choice(5, 3)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.choice(5, 3, replace=True)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.choice(5, 3, p=[1 / 5] * 5)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"])) # E: Any
+reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)) # E: ndarray[Any, Any]
+reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)) # E: ndarray[Any, Any]
+reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)) # E: ndarray[Any, Any]
+reveal_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))) # E: ndarray[Any, Any]
+
+reveal_type(def_gen.dirichlet([0.5, 0.5])) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.dirichlet(np.array([0.5, 0.5]))) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.dirichlet(np.array([0.5, 0.5]), size=3)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.multinomial(20, [1 / 6.0] * 6)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.multinomial(20, np.array([0.5, 0.5]))) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.multinomial(20, [1 / 6.0] * 6, size=2)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2))) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2))) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7))) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(def_gen.multivariate_normal([0.0], [[1.0]])) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.multivariate_normal([0.0], np.array([[1.0]]))) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.multivariate_normal(np.array([0.0]), [[1.0]])) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(def_gen.multivariate_normal([0.0], np.array([[1.0]]))) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(def_gen.permutation(10)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(def_gen.permutation([1, 2, 3, 4])) # E: ndarray[Any, Any]
+reveal_type(def_gen.permutation(np.array([1, 2, 3, 4]))) # E: ndarray[Any, Any]
+reveal_type(def_gen.permutation(D_2D, axis=1)) # E: ndarray[Any, Any]
+reveal_type(def_gen.permuted(D_2D)) # E: ndarray[Any, Any]
+reveal_type(def_gen.permuted(D_2D_like)) # E: ndarray[Any, Any]
+reveal_type(def_gen.permuted(D_2D, axis=1)) # E: ndarray[Any, Any]
+reveal_type(def_gen.permuted(D_2D, out=D_2D)) # E: ndarray[Any, Any]
+reveal_type(def_gen.permuted(D_2D_like, out=D_2D)) # E: ndarray[Any, Any]
+reveal_type(def_gen.permuted(D_2D_like, out=D_2D)) # E: ndarray[Any, Any]
+reveal_type(def_gen.permuted(D_2D, axis=1, out=D_2D)) # E: ndarray[Any, Any]
+
+reveal_type(def_gen.shuffle(np.arange(10))) # E: None
+reveal_type(def_gen.shuffle([1, 2, 3, 4, 5])) # E: None
+reveal_type(def_gen.shuffle(D_2D, axis=1)) # E: None
+
+reveal_type(np.random.Generator(pcg64)) # E: Generator
+reveal_type(def_gen.__str__()) # E: str
+reveal_type(def_gen.__repr__()) # E: str
+def_gen_state = def_gen.__getstate__()
+reveal_type(def_gen_state) # E: builtins.dict[builtins.str, Any]
+reveal_type(def_gen.__setstate__(def_gen_state)) # E: None
+
+# RandomState
+random_st: np.random.RandomState = np.random.RandomState()
+
+reveal_type(random_st.standard_normal()) # E: float
+reveal_type(random_st.standard_normal(size=None)) # E: float
+reveal_type(random_st.standard_normal(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+
+reveal_type(random_st.random()) # E: float
+reveal_type(random_st.random(size=None)) # E: float
+reveal_type(random_st.random(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+
+reveal_type(random_st.standard_cauchy()) # E: float
+reveal_type(random_st.standard_cauchy(size=None)) # E: float
+reveal_type(random_st.standard_cauchy(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.standard_exponential()) # E: float
+reveal_type(random_st.standard_exponential(size=None)) # E: float
+reveal_type(random_st.standard_exponential(size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+
+reveal_type(random_st.zipf(1.5)) # E: int
+reveal_type(random_st.zipf(1.5, size=None)) # E: int
+reveal_type(random_st.zipf(1.5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.zipf(D_arr_1p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.zipf(D_arr_1p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.zipf(D_arr_like_1p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.zipf(D_arr_like_1p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(random_st.weibull(0.5)) # E: float
+reveal_type(random_st.weibull(0.5, size=None)) # E: float
+reveal_type(random_st.weibull(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.weibull(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.weibull(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.weibull(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.weibull(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.standard_t(0.5)) # E: float
+reveal_type(random_st.standard_t(0.5, size=None)) # E: float
+reveal_type(random_st.standard_t(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.standard_t(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.standard_t(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.standard_t(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.standard_t(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.poisson(0.5)) # E: int
+reveal_type(random_st.poisson(0.5, size=None)) # E: int
+reveal_type(random_st.poisson(0.5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.poisson(D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.poisson(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.poisson(D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.poisson(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(random_st.power(0.5)) # E: float
+reveal_type(random_st.power(0.5, size=None)) # E: float
+reveal_type(random_st.power(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.power(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.power(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.power(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.power(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.pareto(0.5)) # E: float
+reveal_type(random_st.pareto(0.5, size=None)) # E: float
+reveal_type(random_st.pareto(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.pareto(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.pareto(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.pareto(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.pareto(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.chisquare(0.5)) # E: float
+reveal_type(random_st.chisquare(0.5, size=None)) # E: float
+reveal_type(random_st.chisquare(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.chisquare(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.chisquare(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.chisquare(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.chisquare(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.exponential(0.5)) # E: float
+reveal_type(random_st.exponential(0.5, size=None)) # E: float
+reveal_type(random_st.exponential(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.exponential(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.exponential(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.exponential(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.exponential(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.geometric(0.5)) # E: int
+reveal_type(random_st.geometric(0.5, size=None)) # E: int
+reveal_type(random_st.geometric(0.5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.geometric(D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.geometric(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.geometric(D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.geometric(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(random_st.logseries(0.5)) # E: int
+reveal_type(random_st.logseries(0.5, size=None)) # E: int
+reveal_type(random_st.logseries(0.5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.logseries(D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.logseries(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.logseries(D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.logseries(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(random_st.rayleigh(0.5)) # E: float
+reveal_type(random_st.rayleigh(0.5, size=None)) # E: float
+reveal_type(random_st.rayleigh(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.rayleigh(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.rayleigh(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.rayleigh(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.rayleigh(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.standard_gamma(0.5)) # E: float
+reveal_type(random_st.standard_gamma(0.5, size=None)) # E: float
+reveal_type(random_st.standard_gamma(0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(random_st.standard_gamma(D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(random_st.standard_gamma(D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(random_st.standard_gamma(D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(random_st.standard_gamma(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+reveal_type(random_st.standard_gamma(D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]]
+
+reveal_type(random_st.vonmises(0.5, 0.5)) # E: float
+reveal_type(random_st.vonmises(0.5, 0.5, size=None)) # E: float
+reveal_type(random_st.vonmises(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.vonmises(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.vonmises(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.vonmises(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.vonmises(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.vonmises(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.vonmises(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.vonmises(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.wald(0.5, 0.5)) # E: float
+reveal_type(random_st.wald(0.5, 0.5, size=None)) # E: float
+reveal_type(random_st.wald(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.wald(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.wald(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.wald(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.wald(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.wald(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.wald(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.wald(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.wald(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.uniform(0.5, 0.5)) # E: float
+reveal_type(random_st.uniform(0.5, 0.5, size=None)) # E: float
+reveal_type(random_st.uniform(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.uniform(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.uniform(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.uniform(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.uniform(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.uniform(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.uniform(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.uniform(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.uniform(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.beta(0.5, 0.5)) # E: float
+reveal_type(random_st.beta(0.5, 0.5, size=None)) # E: float
+reveal_type(random_st.beta(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.beta(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.beta(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.beta(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.beta(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.beta(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.beta(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.beta(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.beta(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.f(0.5, 0.5)) # E: float
+reveal_type(random_st.f(0.5, 0.5, size=None)) # E: float
+reveal_type(random_st.f(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.f(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.f(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.f(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.f(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.f(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.f(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.f(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.f(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.gamma(0.5, 0.5)) # E: float
+reveal_type(random_st.gamma(0.5, 0.5, size=None)) # E: float
+reveal_type(random_st.gamma(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gamma(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gamma(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gamma(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gamma(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gamma(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gamma(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gamma(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gamma(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.gumbel(0.5, 0.5)) # E: float
+reveal_type(random_st.gumbel(0.5, 0.5, size=None)) # E: float
+reveal_type(random_st.gumbel(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gumbel(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gumbel(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gumbel(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gumbel(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gumbel(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gumbel(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gumbel(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.laplace(0.5, 0.5)) # E: float
+reveal_type(random_st.laplace(0.5, 0.5, size=None)) # E: float
+reveal_type(random_st.laplace(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.laplace(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.laplace(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.laplace(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.laplace(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.laplace(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.laplace(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.laplace(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.laplace(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.logistic(0.5, 0.5)) # E: float
+reveal_type(random_st.logistic(0.5, 0.5, size=None)) # E: float
+reveal_type(random_st.logistic(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.logistic(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.logistic(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.logistic(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.logistic(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.logistic(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.logistic(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.logistic(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.logistic(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.lognormal(0.5, 0.5)) # E: float
+reveal_type(random_st.lognormal(0.5, 0.5, size=None)) # E: float
+reveal_type(random_st.lognormal(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.lognormal(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.lognormal(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.lognormal(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.lognormal(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.lognormal(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.lognormal(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.lognormal(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.noncentral_chisquare(0.5, 0.5)) # E: float
+reveal_type(random_st.noncentral_chisquare(0.5, 0.5, size=None)) # E: float
+reveal_type(random_st.noncentral_chisquare(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_chisquare(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.normal(0.5, 0.5)) # E: float
+reveal_type(random_st.normal(0.5, 0.5, size=None)) # E: float
+reveal_type(random_st.normal(0.5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.normal(D_arr_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.normal(0.5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.normal(D_arr_0p5, 0.5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.normal(0.5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.normal(D_arr_like_0p5, 0.5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.normal(0.5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.normal(D_arr_0p5, D_arr_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.normal(D_arr_0p5, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.triangular(0.1, 0.5, 0.9)) # E: float
+reveal_type(random_st.triangular(0.1, 0.5, 0.9, size=None)) # E: float
+reveal_type(random_st.triangular(0.1, 0.5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.triangular(D_arr_0p1, 0.5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.triangular(0.1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.triangular(0.1, D_arr_0p5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.noncentral_f(0.1, 0.5, 0.9)) # E: float
+reveal_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=None)) # E: float
+reveal_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.binomial(10, 0.5)) # E: int
+reveal_type(random_st.binomial(10, 0.5, size=None)) # E: int
+reveal_type(random_st.binomial(10, 0.5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.binomial(I_arr_10, 0.5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.binomial(10, D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.binomial(I_arr_10, 0.5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.binomial(10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.binomial(I_arr_like_10, 0.5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.binomial(10, D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.binomial(I_arr_10, D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(random_st.negative_binomial(10, 0.5)) # E: int
+reveal_type(random_st.negative_binomial(10, 0.5, size=None)) # E: int
+reveal_type(random_st.negative_binomial(10, 0.5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.negative_binomial(I_arr_10, 0.5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.negative_binomial(10, D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.negative_binomial(I_arr_10, 0.5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.negative_binomial(10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.negative_binomial(I_arr_like_10, 0.5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.negative_binomial(10, D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(random_st.hypergeometric(20, 20, 10)) # E: int
+reveal_type(random_st.hypergeometric(20, 20, 10, size=None)) # E: int
+reveal_type(random_st.hypergeometric(20, 20, 10, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.hypergeometric(I_arr_20, 20, 10)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.hypergeometric(20, I_arr_20, 10)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.hypergeometric(20, I_arr_20, 10, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.hypergeometric(20, I_arr_like_20, 10)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(random_st.randint(0, 100)) # E: int
+reveal_type(random_st.randint(100)) # E: int
+reveal_type(random_st.randint([100])) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.randint(0, [100])) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(random_st.randint(2, dtype=bool)) # E: builtins.bool
+reveal_type(random_st.randint(0, 2, dtype=bool)) # E: builtins.bool
+reveal_type(random_st.randint(I_bool_high_open, dtype=bool)) # E: ndarray[Any, dtype[bool_]
+reveal_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool)) # E: ndarray[Any, dtype[bool_]
+reveal_type(random_st.randint(0, I_bool_high_open, dtype=bool)) # E: ndarray[Any, dtype[bool_]
+
+reveal_type(random_st.randint(2, dtype=np.bool_)) # E: builtins.bool
+reveal_type(random_st.randint(0, 2, dtype=np.bool_)) # E: builtins.bool
+reveal_type(random_st.randint(I_bool_high_open, dtype=np.bool_)) # E: ndarray[Any, dtype[bool_]
+reveal_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool_)) # E: ndarray[Any, dtype[bool_]
+reveal_type(random_st.randint(0, I_bool_high_open, dtype=np.bool_)) # E: ndarray[Any, dtype[bool_]
+
+reveal_type(random_st.randint(256, dtype="u1")) # E: int
+reveal_type(random_st.randint(0, 256, dtype="u1")) # E: int
+reveal_type(random_st.randint(I_u1_high_open, dtype="u1")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(random_st.randint(0, I_u1_high_open, dtype="u1")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+
+reveal_type(random_st.randint(256, dtype="uint8")) # E: int
+reveal_type(random_st.randint(0, 256, dtype="uint8")) # E: int
+reveal_type(random_st.randint(I_u1_high_open, dtype="uint8")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(random_st.randint(0, I_u1_high_open, dtype="uint8")) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+
+reveal_type(random_st.randint(256, dtype=np.uint8)) # E: int
+reveal_type(random_st.randint(0, 256, dtype=np.uint8)) # E: int
+reveal_type(random_st.randint(I_u1_high_open, dtype=np.uint8)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+reveal_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8)) # E: ndarray[Any, dtype[unsignedinteger[typing._8Bit]]]
+
+reveal_type(random_st.randint(65536, dtype="u2")) # E: int
+reveal_type(random_st.randint(0, 65536, dtype="u2")) # E: int
+reveal_type(random_st.randint(I_u2_high_open, dtype="u2")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(0, I_u2_high_open, dtype="u2")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+
+reveal_type(random_st.randint(65536, dtype="uint16")) # E: int
+reveal_type(random_st.randint(0, 65536, dtype="uint16")) # E: int
+reveal_type(random_st.randint(I_u2_high_open, dtype="uint16")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(0, I_u2_high_open, dtype="uint16")) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+
+reveal_type(random_st.randint(65536, dtype=np.uint16)) # E: int
+reveal_type(random_st.randint(0, 65536, dtype=np.uint16)) # E: int
+reveal_type(random_st.randint(I_u2_high_open, dtype=np.uint16)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16)) # E: ndarray[Any, dtype[unsignedinteger[typing._16Bit]]]
+
+reveal_type(random_st.randint(4294967296, dtype="u4")) # E: int
+reveal_type(random_st.randint(0, 4294967296, dtype="u4")) # E: int
+reveal_type(random_st.randint(I_u4_high_open, dtype="u4")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(random_st.randint(0, I_u4_high_open, dtype="u4")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+
+reveal_type(random_st.randint(4294967296, dtype="uint32")) # E: int
+reveal_type(random_st.randint(0, 4294967296, dtype="uint32")) # E: int
+reveal_type(random_st.randint(I_u4_high_open, dtype="uint32")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(random_st.randint(0, I_u4_high_open, dtype="uint32")) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+
+reveal_type(random_st.randint(4294967296, dtype=np.uint32)) # E: int
+reveal_type(random_st.randint(0, 4294967296, dtype=np.uint32)) # E: int
+reveal_type(random_st.randint(I_u4_high_open, dtype=np.uint32)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+reveal_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32)) # E: ndarray[Any, dtype[unsignedinteger[typing._32Bit]]]
+
+reveal_type(random_st.randint(4294967296, dtype=np.uint)) # E: int
+reveal_type(random_st.randint(0, 4294967296, dtype=np.uint)) # E: int
+reveal_type(random_st.randint(I_u4_high_open, dtype=np.uint)) # E: ndarray[Any, dtype[{uint}]]
+reveal_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint)) # E: ndarray[Any, dtype[{uint}]]
+reveal_type(random_st.randint(0, I_u4_high_open, dtype=np.uint)) # E: ndarray[Any, dtype[{uint}]]
+
+reveal_type(random_st.randint(18446744073709551616, dtype="u8")) # E: int
+reveal_type(random_st.randint(0, 18446744073709551616, dtype="u8")) # E: int
+reveal_type(random_st.randint(I_u8_high_open, dtype="u8")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(random_st.randint(0, I_u8_high_open, dtype="u8")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+
+reveal_type(random_st.randint(18446744073709551616, dtype="uint64")) # E: int
+reveal_type(random_st.randint(0, 18446744073709551616, dtype="uint64")) # E: int
+reveal_type(random_st.randint(I_u8_high_open, dtype="uint64")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(random_st.randint(0, I_u8_high_open, dtype="uint64")) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+
+reveal_type(random_st.randint(18446744073709551616, dtype=np.uint64)) # E: int
+reveal_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64)) # E: int
+reveal_type(random_st.randint(I_u8_high_open, dtype=np.uint64)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+reveal_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64)) # E: ndarray[Any, dtype[unsignedinteger[typing._64Bit]]]
+
+reveal_type(random_st.randint(128, dtype="i1")) # E: int
+reveal_type(random_st.randint(-128, 128, dtype="i1")) # E: int
+reveal_type(random_st.randint(I_i1_high_open, dtype="i1")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(random_st.randint(-128, I_i1_high_open, dtype="i1")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+
+reveal_type(random_st.randint(128, dtype="int8")) # E: int
+reveal_type(random_st.randint(-128, 128, dtype="int8")) # E: int
+reveal_type(random_st.randint(I_i1_high_open, dtype="int8")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(random_st.randint(-128, I_i1_high_open, dtype="int8")) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+
+reveal_type(random_st.randint(128, dtype=np.int8)) # E: int
+reveal_type(random_st.randint(-128, 128, dtype=np.int8)) # E: int
+reveal_type(random_st.randint(I_i1_high_open, dtype=np.int8)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+reveal_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8)) # E: ndarray[Any, dtype[signedinteger[typing._8Bit]]]
+
+reveal_type(random_st.randint(32768, dtype="i2")) # E: int
+reveal_type(random_st.randint(-32768, 32768, dtype="i2")) # E: int
+reveal_type(random_st.randint(I_i2_high_open, dtype="i2")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(-32768, I_i2_high_open, dtype="i2")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(32768, dtype="int16")) # E: int
+reveal_type(random_st.randint(-32768, 32768, dtype="int16")) # E: int
+reveal_type(random_st.randint(I_i2_high_open, dtype="int16")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(-32768, I_i2_high_open, dtype="int16")) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(32768, dtype=np.int16)) # E: int
+reveal_type(random_st.randint(-32768, 32768, dtype=np.int16)) # E: int
+reveal_type(random_st.randint(I_i2_high_open, dtype=np.int16)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+reveal_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16)) # E: ndarray[Any, dtype[signedinteger[typing._16Bit]]]
+
+reveal_type(random_st.randint(2147483648, dtype="i4")) # E: int
+reveal_type(random_st.randint(-2147483648, 2147483648, dtype="i4")) # E: int
+reveal_type(random_st.randint(I_i4_high_open, dtype="i4")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+
+reveal_type(random_st.randint(2147483648, dtype="int32")) # E: int
+reveal_type(random_st.randint(-2147483648, 2147483648, dtype="int32")) # E: int
+reveal_type(random_st.randint(I_i4_high_open, dtype="int32")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32")) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+
+reveal_type(random_st.randint(2147483648, dtype=np.int32)) # E: int
+reveal_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32)) # E: int
+reveal_type(random_st.randint(I_i4_high_open, dtype=np.int32)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32)) # E: ndarray[Any, dtype[signedinteger[typing._32Bit]]]
+
+reveal_type(random_st.randint(2147483648, dtype=np.int_)) # E: int
+reveal_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_)) # E: int
+reveal_type(random_st.randint(I_i4_high_open, dtype=np.int_)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(random_st.randint(9223372036854775808, dtype="i8")) # E: int
+reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8")) # E: int
+reveal_type(random_st.randint(I_i8_high_open, dtype="i8")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(random_st.randint(9223372036854775808, dtype="int64")) # E: int
+reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64")) # E: int
+reveal_type(random_st.randint(I_i8_high_open, dtype="int64")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64")) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(random_st.randint(9223372036854775808, dtype=np.int64)) # E: int
+reveal_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64)) # E: int
+reveal_type(random_st.randint(I_i8_high_open, dtype=np.int64)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+reveal_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64)) # E: ndarray[Any, dtype[signedinteger[typing._64Bit]]]
+
+reveal_type(random_st._bit_generator) # E: BitGenerator
+
+reveal_type(random_st.bytes(2)) # E: bytes
+
+reveal_type(random_st.choice(5)) # E: int
+reveal_type(random_st.choice(5, 3)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.choice(5, 3, replace=True)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.choice(5, 3, p=[1 / 5] * 5, replace=False)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"])) # E: Any
+reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)) # E: ndarray[Any, Any]
+reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)) # E: ndarray[Any, Any]
+reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)) # E: ndarray[Any, Any]
+reveal_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))) # E: ndarray[Any, Any]
+
+reveal_type(random_st.dirichlet([0.5, 0.5])) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.dirichlet(np.array([0.5, 0.5]))) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.dirichlet(np.array([0.5, 0.5]), size=3)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.multinomial(20, [1 / 6.0] * 6)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.multinomial(20, np.array([0.5, 0.5]))) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.multinomial(20, [1 / 6.0] * 6, size=2)) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(random_st.multivariate_normal([0.0], [[1.0]])) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.multivariate_normal([0.0], np.array([[1.0]]))) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.multivariate_normal(np.array([0.0]), [[1.0]])) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.multivariate_normal([0.0], np.array([[1.0]]))) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.permutation(10)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.permutation([1, 2, 3, 4])) # E: ndarray[Any, Any]
+reveal_type(random_st.permutation(np.array([1, 2, 3, 4]))) # E: ndarray[Any, Any]
+reveal_type(random_st.permutation(D_2D)) # E: ndarray[Any, Any]
+
+reveal_type(random_st.shuffle(np.arange(10))) # E: None
+reveal_type(random_st.shuffle([1, 2, 3, 4, 5])) # E: None
+reveal_type(random_st.shuffle(D_2D)) # E: None
+
+reveal_type(np.random.RandomState(pcg64)) # E: RandomState
+reveal_type(np.random.RandomState(0)) # E: RandomState
+reveal_type(np.random.RandomState([0, 1, 2])) # E: RandomState
+reveal_type(random_st.__str__()) # E: str
+reveal_type(random_st.__repr__()) # E: str
+random_st_state = random_st.__getstate__()
+reveal_type(random_st_state) # E: builtins.dict[builtins.str, Any]
+reveal_type(random_st.__setstate__(random_st_state)) # E: None
+reveal_type(random_st.seed()) # E: None
+reveal_type(random_st.seed(1)) # E: None
+reveal_type(random_st.seed([0, 1])) # E: None
+random_st_get_state = random_st.get_state()
+reveal_type(random_st_state) # E: builtins.dict[builtins.str, Any]
+random_st_get_state_legacy = random_st.get_state(legacy=True)
+reveal_type(random_st_get_state_legacy) # E: Union[builtins.dict[builtins.str, Any], Tuple[builtins.str, ndarray[Any, dtype[unsignedinteger[typing._32Bit]]], builtins.int, builtins.int, builtins.float]]
+reveal_type(random_st.set_state(random_st_get_state)) # E: None
+
+reveal_type(random_st.rand()) # E: float
+reveal_type(random_st.rand(1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.rand(1, 2)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.randn()) # E: float
+reveal_type(random_st.randn(1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.randn(1, 2)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.random_sample()) # E: float
+reveal_type(random_st.random_sample(1)) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+reveal_type(random_st.random_sample(size=(1, 2))) # E: ndarray[Any, dtype[floating[typing._64Bit]]
+
+reveal_type(random_st.tomaxint()) # E: int
+reveal_type(random_st.tomaxint(1)) # E: ndarray[Any, dtype[{int_}]]
+reveal_type(random_st.tomaxint((1,))) # E: ndarray[Any, dtype[{int_}]]
+
+reveal_type(np.random.set_bit_generator(pcg64)) # E: None
+reveal_type(np.random.get_bit_generator()) # E: BitGenerator
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/rec.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/rec.pyi
new file mode 100644
index 00000000..8ea4a6ee
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/rec.pyi
@@ -0,0 +1,128 @@
+import io
+from typing import Any
+
+import numpy as np
+import numpy.typing as npt
+
+AR_i8: npt.NDArray[np.int64]
+REC_AR_V: np.recarray[Any, np.dtype[np.record]]
+AR_LIST: list[npt.NDArray[np.int64]]
+
+format_parser: np.format_parser
+record: np.record
+file_obj: io.BufferedIOBase
+
+reveal_type(np.format_parser( # E: format_parser
+ formats=[np.float64, np.int64, np.bool_],
+ names=["f8", "i8", "?"],
+ titles=None,
+ aligned=True,
+))
+reveal_type(format_parser.dtype) # E: dtype[void]
+
+reveal_type(record.field_a) # E: Any
+reveal_type(record.field_b) # E: Any
+reveal_type(record["field_a"]) # E: Any
+reveal_type(record["field_b"]) # E: Any
+reveal_type(record.pprint()) # E: str
+record.field_c = 5
+
+reveal_type(REC_AR_V.field(0)) # E: Any
+reveal_type(REC_AR_V.field("field_a")) # E: Any
+reveal_type(REC_AR_V.field(0, AR_i8)) # E: None
+reveal_type(REC_AR_V.field("field_a", AR_i8)) # E: None
+reveal_type(REC_AR_V["field_a"]) # E: Any
+reveal_type(REC_AR_V.field_a) # E: Any
+reveal_type(REC_AR_V.__array_finalize__(object())) # E: None
+
+reveal_type(np.recarray( # recarray[Any, dtype[record]]
+ shape=(10, 5),
+ formats=[np.float64, np.int64, np.bool_],
+ order="K",
+ byteorder="|",
+))
+reveal_type(np.recarray( # recarray[Any, dtype[Any]]
+ shape=(10, 5),
+ dtype=[("f8", np.float64), ("i8", np.int64)],
+ strides=(5, 5),
+))
+
+reveal_type(np.rec.fromarrays( # recarray[Any, dtype[record]]
+ AR_LIST,
+))
+reveal_type(np.rec.fromarrays( # recarray[Any, dtype[Any]]
+ AR_LIST,
+ dtype=np.int64,
+))
+reveal_type(np.rec.fromarrays( # recarray[Any, dtype[Any]]
+ AR_LIST,
+ formats=[np.int64, np.float64],
+ names=["i8", "f8"]
+))
+
+reveal_type(np.rec.fromrecords( # recarray[Any, dtype[record]]
+ (1, 1.5),
+))
+reveal_type(np.rec.fromrecords( # recarray[Any, dtype[record]]
+ [(1, 1.5)],
+ dtype=[("i8", np.int64), ("f8", np.float64)],
+))
+reveal_type(np.rec.fromrecords( # recarray[Any, dtype[record]]
+ REC_AR_V,
+ formats=[np.int64, np.float64],
+ names=["i8", "f8"]
+))
+
+reveal_type(np.rec.fromstring( # recarray[Any, dtype[record]]
+ b"(1, 1.5)",
+ dtype=[("i8", np.int64), ("f8", np.float64)],
+))
+reveal_type(np.rec.fromstring( # recarray[Any, dtype[record]]
+ REC_AR_V,
+ formats=[np.int64, np.float64],
+ names=["i8", "f8"]
+))
+
+reveal_type(np.rec.fromfile( # recarray[Any, dtype[Any]]
+ "test_file.txt",
+ dtype=[("i8", np.int64), ("f8", np.float64)],
+))
+reveal_type(np.rec.fromfile( # recarray[Any, dtype[record]]
+ file_obj,
+ formats=[np.int64, np.float64],
+ names=["i8", "f8"]
+))
+
+reveal_type(np.rec.array( # recarray[Any, dtype[{int64}]]
+ AR_i8,
+))
+reveal_type(np.rec.array( # recarray[Any, dtype[Any]]
+ [(1, 1.5)],
+ dtype=[("i8", np.int64), ("f8", np.float64)],
+))
+reveal_type(np.rec.array( # recarray[Any, dtype[record]]
+ [(1, 1.5)],
+ formats=[np.int64, np.float64],
+ names=["i8", "f8"]
+))
+
+reveal_type(np.rec.array( # recarray[Any, dtype[Any]]
+ None,
+ dtype=np.float64,
+ shape=(10, 3),
+))
+reveal_type(np.rec.array( # recarray[Any, dtype[Any]]
+ None,
+ formats=[np.int64, np.float64],
+ names=["i8", "f8"],
+ shape=(10, 3),
+))
+reveal_type(np.rec.array( # recarray[Any, dtype[Any]]
+ file_obj,
+ dtype=np.float64,
+))
+reveal_type(np.rec.array( # recarray[Any, dtype[Any]]
+ file_obj,
+ formats=[np.int64, np.float64],
+ names=["i8", "f8"],
+))
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/scalars.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/scalars.pyi
new file mode 100644
index 00000000..b7fc75ac
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/scalars.pyi
@@ -0,0 +1,158 @@
+import sys
+import numpy as np
+
+b: np.bool_
+u8: np.uint64
+i8: np.int64
+f8: np.float64
+c8: np.complex64
+c16: np.complex128
+m: np.timedelta64
+U: np.str_
+S: np.bytes_
+V: np.void
+
+reveal_type(c8.real) # E: {float32}
+reveal_type(c8.imag) # E: {float32}
+
+reveal_type(c8.real.real) # E: {float32}
+reveal_type(c8.real.imag) # E: {float32}
+
+reveal_type(c8.itemsize) # E: int
+reveal_type(c8.shape) # E: Tuple[]
+reveal_type(c8.strides) # E: Tuple[]
+
+reveal_type(c8.ndim) # E: Literal[0]
+reveal_type(c8.size) # E: Literal[1]
+
+reveal_type(c8.squeeze()) # E: {complex64}
+reveal_type(c8.byteswap()) # E: {complex64}
+reveal_type(c8.transpose()) # E: {complex64}
+
+reveal_type(c8.dtype) # E: dtype[{complex64}]
+
+reveal_type(c8.real) # E: {float32}
+reveal_type(c16.imag) # E: {float64}
+
+reveal_type(np.unicode_('foo')) # E: str_
+
+reveal_type(V[0]) # E: Any
+reveal_type(V["field1"]) # E: Any
+reveal_type(V[["field1", "field2"]]) # E: void
+V[0] = 5
+
+# Aliases
+reveal_type(np.unicode_()) # E: str_
+reveal_type(np.string_()) # E: bytes_
+
+reveal_type(np.byte()) # E: {byte}
+reveal_type(np.short()) # E: {short}
+reveal_type(np.intc()) # E: {intc}
+reveal_type(np.intp()) # E: {intp}
+reveal_type(np.int_()) # E: {int_}
+reveal_type(np.longlong()) # E: {longlong}
+
+reveal_type(np.ubyte()) # E: {ubyte}
+reveal_type(np.ushort()) # E: {ushort}
+reveal_type(np.uintc()) # E: {uintc}
+reveal_type(np.uintp()) # E: {uintp}
+reveal_type(np.uint()) # E: {uint}
+reveal_type(np.ulonglong()) # E: {ulonglong}
+
+reveal_type(np.half()) # E: {half}
+reveal_type(np.single()) # E: {single}
+reveal_type(np.double()) # E: {double}
+reveal_type(np.float_()) # E: {double}
+reveal_type(np.longdouble()) # E: {longdouble}
+reveal_type(np.longfloat()) # E: {longdouble}
+
+reveal_type(np.csingle()) # E: {csingle}
+reveal_type(np.singlecomplex()) # E: {csingle}
+reveal_type(np.cdouble()) # E: {cdouble}
+reveal_type(np.complex_()) # E: {cdouble}
+reveal_type(np.cfloat()) # E: {cdouble}
+reveal_type(np.clongdouble()) # E: {clongdouble}
+reveal_type(np.clongfloat()) # E: {clongdouble}
+reveal_type(np.longcomplex()) # E: {clongdouble}
+
+reveal_type(b.item()) # E: bool
+reveal_type(i8.item()) # E: int
+reveal_type(u8.item()) # E: int
+reveal_type(f8.item()) # E: float
+reveal_type(c16.item()) # E: complex
+reveal_type(U.item()) # E: str
+reveal_type(S.item()) # E: bytes
+
+reveal_type(b.tolist()) # E: bool
+reveal_type(i8.tolist()) # E: int
+reveal_type(u8.tolist()) # E: int
+reveal_type(f8.tolist()) # E: float
+reveal_type(c16.tolist()) # E: complex
+reveal_type(U.tolist()) # E: str
+reveal_type(S.tolist()) # E: bytes
+
+reveal_type(b.ravel()) # E: ndarray[Any, dtype[bool_]]
+reveal_type(i8.ravel()) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(u8.ravel()) # E: ndarray[Any, dtype[{uint64}]]
+reveal_type(f8.ravel()) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(c16.ravel()) # E: ndarray[Any, dtype[{complex128}]]
+reveal_type(U.ravel()) # E: ndarray[Any, dtype[str_]]
+reveal_type(S.ravel()) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(b.flatten()) # E: ndarray[Any, dtype[bool_]]
+reveal_type(i8.flatten()) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(u8.flatten()) # E: ndarray[Any, dtype[{uint64}]]
+reveal_type(f8.flatten()) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(c16.flatten()) # E: ndarray[Any, dtype[{complex128}]]
+reveal_type(U.flatten()) # E: ndarray[Any, dtype[str_]]
+reveal_type(S.flatten()) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(b.reshape(1)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(i8.reshape(1)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(u8.reshape(1)) # E: ndarray[Any, dtype[{uint64}]]
+reveal_type(f8.reshape(1)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(c16.reshape(1)) # E: ndarray[Any, dtype[{complex128}]]
+reveal_type(U.reshape(1)) # E: ndarray[Any, dtype[str_]]
+reveal_type(S.reshape(1)) # E: ndarray[Any, dtype[bytes_]]
+
+reveal_type(i8.astype(float)) # E: Any
+reveal_type(i8.astype(np.float64)) # E: {float64}
+
+reveal_type(i8.view()) # E: {int64}
+reveal_type(i8.view(np.float64)) # E: {float64}
+reveal_type(i8.view(float)) # E: Any
+reveal_type(i8.view(np.float64, np.ndarray)) # E: {float64}
+
+reveal_type(i8.getfield(float)) # E: Any
+reveal_type(i8.getfield(np.float64)) # E: {float64}
+reveal_type(i8.getfield(np.float64, 8)) # E: {float64}
+
+reveal_type(f8.as_integer_ratio()) # E: Tuple[builtins.int, builtins.int]
+reveal_type(f8.is_integer()) # E: bool
+reveal_type(f8.__trunc__()) # E: int
+reveal_type(f8.__getformat__("float")) # E: str
+reveal_type(f8.hex()) # E: str
+reveal_type(np.float64.fromhex("0x0.0p+0")) # E: {float64}
+
+reveal_type(f8.__getnewargs__()) # E: Tuple[builtins.float]
+reveal_type(c16.__getnewargs__()) # E: Tuple[builtins.float, builtins.float]
+
+reveal_type(i8.numerator) # E: {int64}
+reveal_type(i8.denominator) # E: Literal[1]
+reveal_type(u8.numerator) # E: {uint64}
+reveal_type(u8.denominator) # E: Literal[1]
+reveal_type(m.numerator) # E: timedelta64
+reveal_type(m.denominator) # E: Literal[1]
+
+reveal_type(round(i8)) # E: int
+reveal_type(round(i8, 3)) # E: {int64}
+reveal_type(round(u8)) # E: int
+reveal_type(round(u8, 3)) # E: {uint64}
+reveal_type(round(f8)) # E: int
+reveal_type(round(f8, 3)) # E: {float64}
+
+if sys.version_info >= (3, 9):
+ reveal_type(f8.__ceil__()) # E: int
+ reveal_type(f8.__floor__()) # E: int
+
+reveal_type(i8.is_integer()) # E: Literal[True]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/shape_base.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/shape_base.pyi
new file mode 100644
index 00000000..b907a432
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/shape_base.pyi
@@ -0,0 +1,57 @@
+import numpy as np
+from numpy._typing import NDArray
+from typing import Any
+
+i8: np.int64
+f8: np.float64
+
+AR_b: NDArray[np.bool_]
+AR_i8: NDArray[np.int64]
+AR_f8: NDArray[np.float64]
+
+AR_LIKE_f8: list[float]
+
+reveal_type(np.take_along_axis(AR_f8, AR_i8, axis=1)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.take_along_axis(f8, AR_i8, axis=None)) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.put_along_axis(AR_f8, AR_i8, "1.0", axis=1)) # E: None
+
+reveal_type(np.expand_dims(AR_i8, 2)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.expand_dims(AR_LIKE_f8, 2)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.column_stack([AR_i8])) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.column_stack([AR_LIKE_f8])) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.dstack([AR_i8])) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.dstack([AR_LIKE_f8])) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.row_stack([AR_i8])) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.row_stack([AR_LIKE_f8])) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.array_split(AR_i8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[{int64}]]]
+reveal_type(np.array_split(AR_LIKE_f8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[Any]]]
+
+reveal_type(np.split(AR_i8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[{int64}]]]
+reveal_type(np.split(AR_LIKE_f8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[Any]]]
+
+reveal_type(np.hsplit(AR_i8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[{int64}]]]
+reveal_type(np.hsplit(AR_LIKE_f8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[Any]]]
+
+reveal_type(np.vsplit(AR_i8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[{int64}]]]
+reveal_type(np.vsplit(AR_LIKE_f8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[Any]]]
+
+reveal_type(np.dsplit(AR_i8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[{int64}]]]
+reveal_type(np.dsplit(AR_LIKE_f8, [3, 5, 6, 10])) # E: list[ndarray[Any, dtype[Any]]]
+
+reveal_type(np.lib.shape_base.get_array_prepare(AR_i8)) # E: lib.shape_base._ArrayPrepare
+reveal_type(np.lib.shape_base.get_array_prepare(AR_i8, 1)) # E: Union[None, lib.shape_base._ArrayPrepare]
+
+reveal_type(np.get_array_wrap(AR_i8)) # E: lib.shape_base._ArrayWrap
+reveal_type(np.get_array_wrap(AR_i8, 1)) # E: Union[None, lib.shape_base._ArrayWrap]
+
+reveal_type(np.kron(AR_b, AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.kron(AR_b, AR_i8)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.kron(AR_f8, AR_f8)) # E: ndarray[Any, dtype[floating[Any]]]
+
+reveal_type(np.tile(AR_i8, 5)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.tile(AR_LIKE_f8, [2, 2])) # E: ndarray[Any, dtype[Any]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/stride_tricks.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/stride_tricks.pyi
new file mode 100644
index 00000000..17769dc4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/stride_tricks.pyi
@@ -0,0 +1,28 @@
+from typing import Any
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_LIKE_f: list[float]
+interface_dict: dict[str, Any]
+
+reveal_type(np.lib.stride_tricks.DummyArray(interface_dict)) # E: lib.stride_tricks.DummyArray
+
+reveal_type(np.lib.stride_tricks.as_strided(AR_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.lib.stride_tricks.as_strided(AR_LIKE_f)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.lib.stride_tricks.as_strided(AR_f8, strides=(1, 5))) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.lib.stride_tricks.as_strided(AR_f8, shape=[9, 20])) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.lib.stride_tricks.sliding_window_view(AR_f8, 5)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.lib.stride_tricks.sliding_window_view(AR_LIKE_f, (1, 5))) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.lib.stride_tricks.sliding_window_view(AR_f8, [9], axis=1)) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.broadcast_to(AR_f8, 5)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.broadcast_to(AR_LIKE_f, (1, 5))) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.broadcast_to(AR_f8, [4, 6], subok=True)) # E: ndarray[Any, dtype[{float64}]]
+
+reveal_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2))) # E: tuple[builtins.int, ...]
+reveal_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7))) # E: tuple[builtins.int, ...]
+
+reveal_type(np.broadcast_arrays(AR_f8, AR_f8)) # E: list[ndarray[Any, dtype[Any]]]
+reveal_type(np.broadcast_arrays(AR_f8, AR_LIKE_f)) # E: list[ndarray[Any, dtype[Any]]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/testing.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/testing.pyi
new file mode 100644
index 00000000..5440af80
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/testing.pyi
@@ -0,0 +1,178 @@
+from __future__ import annotations
+
+import re
+import sys
+from collections.abc import Callable
+from typing import Any, TypeVar
+from pathlib import Path
+
+import numpy as np
+import numpy.typing as npt
+
+AR_f8: npt.NDArray[np.float64]
+AR_i8: npt.NDArray[np.int64]
+
+bool_obj: bool
+suppress_obj: np.testing.suppress_warnings
+FT = TypeVar("FT", bound=Callable[..., Any])
+
+def func() -> int: ...
+
+def func2(
+ x: npt.NDArray[np.number[Any]],
+ y: npt.NDArray[np.number[Any]],
+) -> npt.NDArray[np.bool_]: ...
+
+reveal_type(np.testing.KnownFailureException()) # E: KnownFailureException
+reveal_type(np.testing.IgnoreException()) # E: IgnoreException
+
+reveal_type(np.testing.clear_and_catch_warnings(modules=[np.testing])) # E: _clear_and_catch_warnings_without_records
+reveal_type(np.testing.clear_and_catch_warnings(True)) # E: _clear_and_catch_warnings_with_records
+reveal_type(np.testing.clear_and_catch_warnings(False)) # E: _clear_and_catch_warnings_without_records
+reveal_type(np.testing.clear_and_catch_warnings(bool_obj)) # E: clear_and_catch_warnings
+reveal_type(np.testing.clear_and_catch_warnings.class_modules) # E: tuple[types.ModuleType, ...]
+reveal_type(np.testing.clear_and_catch_warnings.modules) # E: set[types.ModuleType]
+
+with np.testing.clear_and_catch_warnings(True) as c1:
+ reveal_type(c1) # E: builtins.list[warnings.WarningMessage]
+with np.testing.clear_and_catch_warnings() as c2:
+ reveal_type(c2) # E: None
+
+reveal_type(np.testing.suppress_warnings("once")) # E: suppress_warnings
+reveal_type(np.testing.suppress_warnings()(func)) # E: def () -> builtins.int
+reveal_type(suppress_obj.filter(RuntimeWarning)) # E: None
+reveal_type(suppress_obj.record(RuntimeWarning)) # E: list[warnings.WarningMessage]
+with suppress_obj as c3:
+ reveal_type(c3) # E: suppress_warnings
+
+reveal_type(np.testing.verbose) # E: int
+reveal_type(np.testing.IS_PYPY) # E: bool
+reveal_type(np.testing.HAS_REFCOUNT) # E: bool
+reveal_type(np.testing.HAS_LAPACK64) # E: bool
+
+reveal_type(np.testing.assert_(1, msg="test")) # E: None
+reveal_type(np.testing.assert_(2, msg=lambda: "test")) # E: None
+
+if sys.platform == "win32" or sys.platform == "cygwin":
+ reveal_type(np.testing.memusage()) # E: builtins.int
+elif sys.platform == "linux":
+ reveal_type(np.testing.memusage()) # E: Union[None, builtins.int]
+else:
+ reveal_type(np.testing.memusage()) # E: <nothing>
+
+reveal_type(np.testing.jiffies()) # E: builtins.int
+
+reveal_type(np.testing.build_err_msg([0, 1, 2], "test")) # E: str
+reveal_type(np.testing.build_err_msg(range(2), "test", header="header")) # E: str
+reveal_type(np.testing.build_err_msg(np.arange(9).reshape(3, 3), "test", verbose=False)) # E: str
+reveal_type(np.testing.build_err_msg("abc", "test", names=["x", "y"])) # E: str
+reveal_type(np.testing.build_err_msg([1.0, 2.0], "test", precision=5)) # E: str
+
+reveal_type(np.testing.assert_equal({1}, {1})) # E: None
+reveal_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail")) # E: None
+reveal_type(np.testing.assert_equal(1, 1.0, verbose=True)) # E: None
+
+reveal_type(np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])) # E: None
+
+reveal_type(np.testing.assert_almost_equal(1.0, 1.1)) # E: None
+reveal_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail")) # E: None
+reveal_type(np.testing.assert_almost_equal(1, 1.0, verbose=True)) # E: None
+reveal_type(np.testing.assert_almost_equal(1, 1.0001, decimal=2)) # E: None
+
+reveal_type(np.testing.assert_approx_equal(1.0, 1.1)) # E: None
+reveal_type(np.testing.assert_approx_equal("1", "2", err_msg="fail")) # E: None
+reveal_type(np.testing.assert_approx_equal(1, 1.0, verbose=True)) # E: None
+reveal_type(np.testing.assert_approx_equal(1, 1.0001, significant=2)) # E: None
+
+reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, err_msg="test")) # E: None
+reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, verbose=True)) # E: None
+reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, header="header")) # E: None
+reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, precision=np.int64())) # E: None
+reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_nan=False)) # E: None
+reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_inf=True)) # E: None
+
+reveal_type(np.testing.assert_array_equal(AR_i8, AR_f8)) # E: None
+reveal_type(np.testing.assert_array_equal(AR_i8, AR_f8, err_msg="test")) # E: None
+reveal_type(np.testing.assert_array_equal(AR_i8, AR_f8, verbose=True)) # E: None
+
+reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8)) # E: None
+reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, err_msg="test")) # E: None
+reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, verbose=True)) # E: None
+reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, decimal=1)) # E: None
+
+reveal_type(np.testing.assert_array_less(AR_i8, AR_f8)) # E: None
+reveal_type(np.testing.assert_array_less(AR_i8, AR_f8, err_msg="test")) # E: None
+reveal_type(np.testing.assert_array_less(AR_i8, AR_f8, verbose=True)) # E: None
+
+reveal_type(np.testing.runstring("1 + 1", {})) # E: Any
+reveal_type(np.testing.runstring("int64() + 1", {"int64": np.int64})) # E: Any
+
+reveal_type(np.testing.assert_string_equal("1", "1")) # E: None
+
+reveal_type(np.testing.rundocs()) # E: None
+reveal_type(np.testing.rundocs("test.py")) # E: None
+reveal_type(np.testing.rundocs(Path("test.py"), raise_on_error=True)) # E: None
+
+@np.testing.raises(RuntimeError, RuntimeWarning)
+def func3(a: int) -> bool: ...
+
+reveal_type(func3) # E: def (a: builtins.int) -> builtins.bool
+
+reveal_type(np.testing.assert_raises(RuntimeWarning)) # E: _AssertRaisesContext[builtins.RuntimeWarning]
+reveal_type(np.testing.assert_raises(RuntimeWarning, func3, 5)) # E: None
+
+reveal_type(np.testing.assert_raises_regex(RuntimeWarning, r"test")) # E: _AssertRaisesContext[builtins.RuntimeWarning]
+reveal_type(np.testing.assert_raises_regex(RuntimeWarning, b"test", func3, 5)) # E: None
+reveal_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), func3, 5)) # E: None
+
+class Test: ...
+
+def decorate(a: FT) -> FT:
+ return a
+
+reveal_type(np.testing.decorate_methods(Test, decorate)) # E: None
+reveal_type(np.testing.decorate_methods(Test, decorate, None)) # E: None
+reveal_type(np.testing.decorate_methods(Test, decorate, "test")) # E: None
+reveal_type(np.testing.decorate_methods(Test, decorate, b"test")) # E: None
+reveal_type(np.testing.decorate_methods(Test, decorate, re.compile("test"))) # E: None
+
+reveal_type(np.testing.measure("for i in range(1000): np.sqrt(i**2)")) # E: float
+reveal_type(np.testing.measure(b"for i in range(1000): np.sqrt(i**2)", times=5)) # E: float
+
+reveal_type(np.testing.assert_allclose(AR_i8, AR_f8)) # E: None
+reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, rtol=0.005)) # E: None
+reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, atol=1)) # E: None
+reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, equal_nan=True)) # E: None
+reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, err_msg="err")) # E: None
+reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, verbose=False)) # E: None
+
+reveal_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2)) # E: None
+
+reveal_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2)) # E: ndarray[Any, dtype[Any]]
+reveal_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.testing.assert_warns(RuntimeWarning)) # E: _GeneratorContextManager[None]
+reveal_type(np.testing.assert_warns(RuntimeWarning, func3, 5)) # E: bool
+
+def func4(a: int, b: str) -> bool: ...
+
+reveal_type(np.testing.assert_no_warnings()) # E: _GeneratorContextManager[None]
+reveal_type(np.testing.assert_no_warnings(func3, 5)) # E: bool
+reveal_type(np.testing.assert_no_warnings(func4, a=1, b="test")) # E: bool
+reveal_type(np.testing.assert_no_warnings(func4, 1, "test")) # E: bool
+
+reveal_type(np.testing.tempdir("test_dir")) # E: _GeneratorContextManager[builtins.str]
+reveal_type(np.testing.tempdir(prefix=b"test")) # E: _GeneratorContextManager[builtins.bytes]
+reveal_type(np.testing.tempdir("test_dir", dir=Path("here"))) # E: _GeneratorContextManager[builtins.str]
+
+reveal_type(np.testing.temppath("test_dir", text=True)) # E: _GeneratorContextManager[builtins.str]
+reveal_type(np.testing.temppath(prefix=b"test")) # E: _GeneratorContextManager[builtins.bytes]
+reveal_type(np.testing.temppath("test_dir", dir=Path("here"))) # E: _GeneratorContextManager[builtins.str]
+
+reveal_type(np.testing.assert_no_gc_cycles()) # E: _GeneratorContextManager[None]
+reveal_type(np.testing.assert_no_gc_cycles(func3, 5)) # E: None
+
+reveal_type(np.testing.break_cycles()) # E: None
+
+reveal_type(np.testing.TestCase()) # E: unittest.case.TestCase
+reveal_type(np.testing.run_module_suite(file_to_run="numpy/tests/test_matlib.py")) # E: None
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/twodim_base.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/twodim_base.pyi
new file mode 100644
index 00000000..0dc58d43
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/twodim_base.pyi
@@ -0,0 +1,72 @@
+from typing import Any, TypeVar
+
+import numpy as np
+import numpy.typing as npt
+
+_SCT = TypeVar("_SCT", bound=np.generic)
+
+
+def func1(ar: npt.NDArray[_SCT], a: int) -> npt.NDArray[_SCT]:
+ pass
+
+
+def func2(ar: npt.NDArray[np.number[Any]], a: str) -> npt.NDArray[np.float64]:
+ pass
+
+
+AR_b: npt.NDArray[np.bool_]
+AR_u: npt.NDArray[np.uint64]
+AR_i: npt.NDArray[np.int64]
+AR_f: npt.NDArray[np.float64]
+AR_c: npt.NDArray[np.complex128]
+AR_O: npt.NDArray[np.object_]
+
+AR_LIKE_b: list[bool]
+
+reveal_type(np.fliplr(AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.fliplr(AR_LIKE_b)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.flipud(AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.flipud(AR_LIKE_b)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.eye(10)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.eye(10, M=20, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.eye(10, k=2, dtype=int)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.diag(AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.diag(AR_LIKE_b, k=0)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.diagflat(AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.diagflat(AR_LIKE_b, k=0)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.tri(10)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.tri(10, M=20, dtype=np.int64)) # E: ndarray[Any, dtype[{int64}]]
+reveal_type(np.tri(10, k=2, dtype=int)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.tril(AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.tril(AR_LIKE_b, k=0)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.triu(AR_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.triu(AR_LIKE_b, k=0)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.vander(AR_b)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.vander(AR_u)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.vander(AR_i, N=2)) # E: ndarray[Any, dtype[signedinteger[Any]]]
+reveal_type(np.vander(AR_f, increasing=True)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.vander(AR_c)) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.vander(AR_O)) # E: ndarray[Any, dtype[object_]]
+
+reveal_type(np.histogram2d(AR_i, AR_b)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]]
+reveal_type(np.histogram2d(AR_f, AR_f)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[floating[Any]]], ndarray[Any, dtype[floating[Any]]]]
+reveal_type(np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b)) # E: Tuple[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[complexfloating[Any, Any]]], ndarray[Any, dtype[complexfloating[Any, Any]]]]
+
+reveal_type(np.mask_indices(10, func1)) # E: Tuple[ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]]
+reveal_type(np.mask_indices(8, func2, "0")) # E: Tuple[ndarray[Any, dtype[{intp}]], ndarray[Any, dtype[{intp}]]]
+
+reveal_type(np.tril_indices(10)) # E: Tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]]
+
+reveal_type(np.tril_indices_from(AR_b)) # E: Tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]]
+
+reveal_type(np.triu_indices(10)) # E: Tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]]
+
+reveal_type(np.triu_indices_from(AR_b)) # E: Tuple[ndarray[Any, dtype[{int_}]], ndarray[Any, dtype[{int_}]]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/type_check.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/type_check.pyi
new file mode 100644
index 00000000..ddd319a9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/type_check.pyi
@@ -0,0 +1,73 @@
+import numpy as np
+import numpy.typing as npt
+from numpy._typing import _128Bit
+
+f8: np.float64
+f: float
+
+# NOTE: Avoid importing the platform specific `np.float128` type
+AR_i8: npt.NDArray[np.int64]
+AR_i4: npt.NDArray[np.int32]
+AR_f2: npt.NDArray[np.float16]
+AR_f8: npt.NDArray[np.float64]
+AR_f16: npt.NDArray[np.floating[_128Bit]]
+AR_c8: npt.NDArray[np.complex64]
+AR_c16: npt.NDArray[np.complex128]
+
+AR_LIKE_f: list[float]
+
+class RealObj:
+ real: slice
+
+class ImagObj:
+ imag: slice
+
+reveal_type(np.mintypecode(["f8"], typeset="qfQF"))
+
+reveal_type(np.asfarray(AR_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.asfarray(AR_LIKE_f)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.asfarray(AR_f8, dtype="c16")) # E: ndarray[Any, dtype[complexfloating[Any, Any]]]
+reveal_type(np.asfarray(AR_f8, dtype="i8")) # E: ndarray[Any, dtype[floating[Any]]]
+
+reveal_type(np.real(RealObj())) # E: slice
+reveal_type(np.real(AR_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.real(AR_c16)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.real(AR_LIKE_f)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.imag(ImagObj())) # E: slice
+reveal_type(np.imag(AR_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.imag(AR_c16)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.imag(AR_LIKE_f)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.iscomplex(f8)) # E: bool_
+reveal_type(np.iscomplex(AR_f8)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.iscomplex(AR_LIKE_f)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.isreal(f8)) # E: bool_
+reveal_type(np.isreal(AR_f8)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isreal(AR_LIKE_f)) # E: ndarray[Any, dtype[bool_]]
+
+reveal_type(np.iscomplexobj(f8)) # E: bool
+reveal_type(np.isrealobj(f8)) # E: bool
+
+reveal_type(np.nan_to_num(f8)) # E: {float64}
+reveal_type(np.nan_to_num(f, copy=True)) # E: Any
+reveal_type(np.nan_to_num(AR_f8, nan=1.5)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.nan_to_num(AR_LIKE_f, posinf=9999)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.real_if_close(AR_f8)) # E: ndarray[Any, dtype[{float64}]]
+reveal_type(np.real_if_close(AR_c16)) # E: Union[ndarray[Any, dtype[{float64}]], ndarray[Any, dtype[{complex128}]]]
+reveal_type(np.real_if_close(AR_c8)) # E: Union[ndarray[Any, dtype[{float32}]], ndarray[Any, dtype[{complex64}]]]
+reveal_type(np.real_if_close(AR_LIKE_f)) # E: ndarray[Any, dtype[Any]]
+
+reveal_type(np.typename("h")) # E: Literal['short']
+reveal_type(np.typename("B")) # E: Literal['unsigned char']
+reveal_type(np.typename("V")) # E: Literal['void']
+reveal_type(np.typename("S1")) # E: Literal['character']
+
+reveal_type(np.common_type(AR_i4)) # E: Type[{float64}]
+reveal_type(np.common_type(AR_f2)) # E: Type[{float16}]
+reveal_type(np.common_type(AR_f2, AR_i4)) # E: Type[{float64}]
+reveal_type(np.common_type(AR_f16, AR_i4)) # E: Type[{float128}]
+reveal_type(np.common_type(AR_c8, AR_f2)) # E: Type[{complex64}]
+reveal_type(np.common_type(AR_f2, AR_c8, AR_i4)) # E: Type[{complex128}]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ufunc_config.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ufunc_config.pyi
new file mode 100644
index 00000000..2c6fadf9
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ufunc_config.pyi
@@ -0,0 +1,25 @@
+"""Typing tests for `core._ufunc_config`."""
+
+import numpy as np
+
+def func(a: str, b: int) -> None: ...
+
+class Write:
+ def write(self, value: str) -> None: ...
+
+reveal_type(np.seterr(all=None)) # E: TypedDict('core._ufunc_config._ErrDict'
+reveal_type(np.seterr(divide="ignore")) # E: TypedDict('core._ufunc_config._ErrDict'
+reveal_type(np.seterr(over="warn")) # E: TypedDict('core._ufunc_config._ErrDict'
+reveal_type(np.seterr(under="call")) # E: TypedDict('core._ufunc_config._ErrDict'
+reveal_type(np.seterr(invalid="raise")) # E: TypedDict('core._ufunc_config._ErrDict'
+reveal_type(np.geterr()) # E: TypedDict('core._ufunc_config._ErrDict'
+
+reveal_type(np.setbufsize(4096)) # E: int
+reveal_type(np.getbufsize()) # E: int
+
+reveal_type(np.seterrcall(func)) # E: Union[None, def (builtins.str, builtins.int) -> Any, _SupportsWrite[builtins.str]]
+reveal_type(np.seterrcall(Write())) # E: Union[None, def (builtins.str, builtins.int) -> Any, _SupportsWrite[builtins.str]]
+reveal_type(np.geterrcall()) # E: Union[None, def (builtins.str, builtins.int) -> Any, _SupportsWrite[builtins.str]]
+
+reveal_type(np.errstate(call=func, all="call")) # E: errstate[def (a: builtins.str, b: builtins.int)]
+reveal_type(np.errstate(call=Write(), divide="log", over="log")) # E: errstate[ufunc_config.Write]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ufunclike.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ufunclike.pyi
new file mode 100644
index 00000000..9f06600b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ufunclike.pyi
@@ -0,0 +1,29 @@
+from typing import Any
+import numpy as np
+
+AR_LIKE_b: list[bool]
+AR_LIKE_u: list[np.uint32]
+AR_LIKE_i: list[int]
+AR_LIKE_f: list[float]
+AR_LIKE_O: list[np.object_]
+
+AR_U: np.ndarray[Any, np.dtype[np.str_]]
+
+reveal_type(np.fix(AR_LIKE_b)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.fix(AR_LIKE_u)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.fix(AR_LIKE_i)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.fix(AR_LIKE_f)) # E: ndarray[Any, dtype[floating[Any]]]
+reveal_type(np.fix(AR_LIKE_O)) # E: Any
+reveal_type(np.fix(AR_LIKE_f, out=AR_U)) # E: ndarray[Any, dtype[str_]]
+
+reveal_type(np.isposinf(AR_LIKE_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isposinf(AR_LIKE_u)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isposinf(AR_LIKE_i)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isposinf(AR_LIKE_f)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isposinf(AR_LIKE_f, out=AR_U)) # E: ndarray[Any, dtype[str_]]
+
+reveal_type(np.isneginf(AR_LIKE_b)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isneginf(AR_LIKE_u)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isneginf(AR_LIKE_i)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isneginf(AR_LIKE_f)) # E: ndarray[Any, dtype[bool_]]
+reveal_type(np.isneginf(AR_LIKE_f, out=AR_U)) # E: ndarray[Any, dtype[str_]]
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ufuncs.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ufuncs.pyi
new file mode 100644
index 00000000..3bf83c82
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/ufuncs.pyi
@@ -0,0 +1,68 @@
+import numpy as np
+import numpy.typing as npt
+
+f8: np.float64
+AR_f8: npt.NDArray[np.float64]
+AR_i8: npt.NDArray[np.int64]
+
+reveal_type(np.absolute.__doc__) # E: str
+reveal_type(np.absolute.types) # E: builtins.list[builtins.str]
+
+reveal_type(np.absolute.__name__) # E: Literal['absolute']
+reveal_type(np.absolute.ntypes) # E: Literal[20]
+reveal_type(np.absolute.identity) # E: None
+reveal_type(np.absolute.nin) # E: Literal[1]
+reveal_type(np.absolute.nin) # E: Literal[1]
+reveal_type(np.absolute.nout) # E: Literal[1]
+reveal_type(np.absolute.nargs) # E: Literal[2]
+reveal_type(np.absolute.signature) # E: None
+reveal_type(np.absolute(f8)) # E: Any
+reveal_type(np.absolute(AR_f8)) # E: ndarray
+reveal_type(np.absolute.at(AR_f8, AR_i8)) # E: None
+
+reveal_type(np.add.__name__) # E: Literal['add']
+reveal_type(np.add.ntypes) # E: Literal[22]
+reveal_type(np.add.identity) # E: Literal[0]
+reveal_type(np.add.nin) # E: Literal[2]
+reveal_type(np.add.nout) # E: Literal[1]
+reveal_type(np.add.nargs) # E: Literal[3]
+reveal_type(np.add.signature) # E: None
+reveal_type(np.add(f8, f8)) # E: Any
+reveal_type(np.add(AR_f8, f8)) # E: ndarray
+reveal_type(np.add.at(AR_f8, AR_i8, f8)) # E: None
+reveal_type(np.add.reduce(AR_f8, axis=0)) # E: Any
+reveal_type(np.add.accumulate(AR_f8)) # E: ndarray
+reveal_type(np.add.reduceat(AR_f8, AR_i8)) # E: ndarray
+reveal_type(np.add.outer(f8, f8)) # E: Any
+reveal_type(np.add.outer(AR_f8, f8)) # E: ndarray
+
+reveal_type(np.frexp.__name__) # E: Literal['frexp']
+reveal_type(np.frexp.ntypes) # E: Literal[4]
+reveal_type(np.frexp.identity) # E: None
+reveal_type(np.frexp.nin) # E: Literal[1]
+reveal_type(np.frexp.nout) # E: Literal[2]
+reveal_type(np.frexp.nargs) # E: Literal[3]
+reveal_type(np.frexp.signature) # E: None
+reveal_type(np.frexp(f8)) # E: Tuple[Any, Any]
+reveal_type(np.frexp(AR_f8)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]]
+
+reveal_type(np.divmod.__name__) # E: Literal['divmod']
+reveal_type(np.divmod.ntypes) # E: Literal[15]
+reveal_type(np.divmod.identity) # E: None
+reveal_type(np.divmod.nin) # E: Literal[2]
+reveal_type(np.divmod.nout) # E: Literal[2]
+reveal_type(np.divmod.nargs) # E: Literal[4]
+reveal_type(np.divmod.signature) # E: None
+reveal_type(np.divmod(f8, f8)) # E: Tuple[Any, Any]
+reveal_type(np.divmod(AR_f8, f8)) # E: Tuple[ndarray[Any, dtype[Any]], ndarray[Any, dtype[Any]]]
+
+reveal_type(np.matmul.__name__) # E: Literal['matmul']
+reveal_type(np.matmul.ntypes) # E: Literal[19]
+reveal_type(np.matmul.identity) # E: None
+reveal_type(np.matmul.nin) # E: Literal[2]
+reveal_type(np.matmul.nout) # E: Literal[1]
+reveal_type(np.matmul.nargs) # E: Literal[3]
+reveal_type(np.matmul.signature) # E: Literal['(n?,k),(k,m?)->(n?,m?)']
+reveal_type(np.matmul.identity) # E: None
+reveal_type(np.matmul(AR_f8, AR_f8)) # E: Any
+reveal_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)])) # E: Any
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/version.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/version.pyi
new file mode 100644
index 00000000..e5383764
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/version.pyi
@@ -0,0 +1,8 @@
+import numpy.version
+
+reveal_type(numpy.version.version) # E: str
+reveal_type(numpy.version.__version__) # E: str
+reveal_type(numpy.version.full_version) # E: str
+reveal_type(numpy.version.git_revision) # E: str
+reveal_type(numpy.version.release) # E: bool
+reveal_type(numpy.version.short_version) # E: str
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/warnings_and_errors.pyi b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/warnings_and_errors.pyi
new file mode 100644
index 00000000..19fa432f
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/data/reveal/warnings_and_errors.pyi
@@ -0,0 +1,9 @@
+import numpy as np
+
+reveal_type(np.ModuleDeprecationWarning()) # E: ModuleDeprecationWarning
+reveal_type(np.VisibleDeprecationWarning()) # E: VisibleDeprecationWarning
+reveal_type(np.ComplexWarning()) # E: ComplexWarning
+reveal_type(np.RankWarning()) # E: RankWarning
+reveal_type(np.TooHardError()) # E: TooHardError
+reveal_type(np.AxisError("test")) # E: AxisError
+reveal_type(np.AxisError(5, 1)) # E: AxisError
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/test_generic_alias.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/test_generic_alias.py
new file mode 100644
index 00000000..861babd4
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/test_generic_alias.py
@@ -0,0 +1,188 @@
+from __future__ import annotations
+
+import sys
+import copy
+import types
+import pickle
+import weakref
+from typing import TypeVar, Any, Union, Callable
+
+import pytest
+import numpy as np
+from numpy._typing._generic_alias import _GenericAlias
+from typing_extensions import Unpack
+
+ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True)
+T1 = TypeVar("T1")
+T2 = TypeVar("T2")
+DType = _GenericAlias(np.dtype, (ScalarType,))
+NDArray = _GenericAlias(np.ndarray, (Any, DType))
+
+# NOTE: The `npt._GenericAlias` *class* isn't quite stable on python >=3.11.
+# This is not a problem during runtime (as it's 3.8-exclusive), but we still
+# need it for the >=3.9 in order to verify its semantics match
+# `types.GenericAlias` replacement. xref numpy/numpy#21526
+if sys.version_info >= (3, 9):
+ DType_ref = types.GenericAlias(np.dtype, (ScalarType,))
+ NDArray_ref = types.GenericAlias(np.ndarray, (Any, DType_ref))
+ FuncType = Callable[["_GenericAlias | types.GenericAlias"], Any]
+else:
+ DType_ref = Any
+ NDArray_ref = Any
+ FuncType = Callable[["_GenericAlias"], Any]
+
+GETATTR_NAMES = sorted(set(dir(np.ndarray)) - _GenericAlias._ATTR_EXCEPTIONS)
+
+BUFFER = np.array([1], dtype=np.int64)
+BUFFER.setflags(write=False)
+
+def _get_subclass_mro(base: type) -> tuple[type, ...]:
+ class Subclass(base): # type: ignore[misc,valid-type]
+ pass
+ return Subclass.__mro__[1:]
+
+
+class TestGenericAlias:
+ """Tests for `numpy._typing._generic_alias._GenericAlias`."""
+
+ @pytest.mark.parametrize("name,func", [
+ ("__init__", lambda n: n),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, Any)),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, (Any,))),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, (Any, Any))),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, T1)),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, (T1,))),
+ ("__init__", lambda n: _GenericAlias(np.ndarray, (T1, T2))),
+ ("__origin__", lambda n: n.__origin__),
+ ("__args__", lambda n: n.__args__),
+ ("__parameters__", lambda n: n.__parameters__),
+ ("__mro_entries__", lambda n: n.__mro_entries__([object])),
+ ("__hash__", lambda n: hash(n)),
+ ("__repr__", lambda n: repr(n)),
+ ("__getitem__", lambda n: n[np.float64]),
+ ("__getitem__", lambda n: n[ScalarType][np.float64]),
+ ("__getitem__", lambda n: n[Union[np.int64, ScalarType]][np.float64]),
+ ("__getitem__", lambda n: n[Union[T1, T2]][np.float32, np.float64]),
+ ("__eq__", lambda n: n == n),
+ ("__ne__", lambda n: n != np.ndarray),
+ ("__call__", lambda n: n((1,), np.int64, BUFFER)),
+ ("__call__", lambda n: n(shape=(1,), dtype=np.int64, buffer=BUFFER)),
+ ("subclassing", lambda n: _get_subclass_mro(n)),
+ ("pickle", lambda n: n == pickle.loads(pickle.dumps(n))),
+ ])
+ def test_pass(self, name: str, func: FuncType) -> None:
+ """Compare `types.GenericAlias` with its numpy-based backport.
+
+ Checker whether ``func`` runs as intended and that both `GenericAlias`
+ and `_GenericAlias` return the same result.
+
+ """
+ value = func(NDArray)
+
+ if sys.version_info >= (3, 9):
+ value_ref = func(NDArray_ref)
+ assert value == value_ref
+
+ @pytest.mark.parametrize("name,func", [
+ ("__copy__", lambda n: n == copy.copy(n)),
+ ("__deepcopy__", lambda n: n == copy.deepcopy(n)),
+ ])
+ def test_copy(self, name: str, func: FuncType) -> None:
+ value = func(NDArray)
+
+ # xref bpo-45167
+ GE_398 = (
+ sys.version_info[:2] == (3, 9) and sys.version_info >= (3, 9, 8)
+ )
+ if GE_398 or sys.version_info >= (3, 10, 1):
+ value_ref = func(NDArray_ref)
+ assert value == value_ref
+
+ def test_dir(self) -> None:
+ value = dir(NDArray)
+ if sys.version_info < (3, 9):
+ return
+
+ # A number attributes only exist in `types.GenericAlias` in >= 3.11
+ if sys.version_info < (3, 11, 0, "beta", 3):
+ value.remove("__typing_unpacked_tuple_args__")
+ if sys.version_info < (3, 11, 0, "beta", 1):
+ value.remove("__unpacked__")
+ assert value == dir(NDArray_ref)
+
+ @pytest.mark.parametrize("name,func,dev_version", [
+ ("__iter__", lambda n: len(list(n)), ("beta", 1)),
+ ("__iter__", lambda n: next(iter(n)), ("beta", 1)),
+ ("__unpacked__", lambda n: n.__unpacked__, ("beta", 1)),
+ ("Unpack", lambda n: Unpack[n], ("beta", 1)),
+
+ # The right operand should now have `__unpacked__ = True`,
+ # and they are thus now longer equivalent
+ ("__ne__", lambda n: n != next(iter(n)), ("beta", 1)),
+
+ # >= beta3
+ ("__typing_unpacked_tuple_args__",
+ lambda n: n.__typing_unpacked_tuple_args__, ("beta", 3)),
+
+ # >= beta4
+ ("__class__", lambda n: n.__class__ == type(n), ("beta", 4)),
+ ])
+ def test_py311_features(
+ self,
+ name: str,
+ func: FuncType,
+ dev_version: tuple[str, int],
+ ) -> None:
+ """Test Python 3.11 features."""
+ value = func(NDArray)
+
+ if sys.version_info >= (3, 11, 0, *dev_version):
+ value_ref = func(NDArray_ref)
+ assert value == value_ref
+
+ def test_weakref(self) -> None:
+ """Test ``__weakref__``."""
+ value = weakref.ref(NDArray)()
+
+ if sys.version_info >= (3, 9, 1): # xref bpo-42332
+ value_ref = weakref.ref(NDArray_ref)()
+ assert value == value_ref
+
+ @pytest.mark.parametrize("name", GETATTR_NAMES)
+ def test_getattr(self, name: str) -> None:
+ """Test that `getattr` wraps around the underlying type,
+ aka ``__origin__``.
+
+ """
+ value = getattr(NDArray, name)
+ value_ref1 = getattr(np.ndarray, name)
+
+ if sys.version_info >= (3, 9):
+ value_ref2 = getattr(NDArray_ref, name)
+ assert value == value_ref1 == value_ref2
+ else:
+ assert value == value_ref1
+
+ @pytest.mark.parametrize("name,exc_type,func", [
+ ("__getitem__", TypeError, lambda n: n[()]),
+ ("__getitem__", TypeError, lambda n: n[Any, Any]),
+ ("__getitem__", TypeError, lambda n: n[Any][Any]),
+ ("isinstance", TypeError, lambda n: isinstance(np.array(1), n)),
+ ("issublass", TypeError, lambda n: issubclass(np.ndarray, n)),
+ ("setattr", AttributeError, lambda n: setattr(n, "__origin__", int)),
+ ("setattr", AttributeError, lambda n: setattr(n, "test", int)),
+ ("getattr", AttributeError, lambda n: getattr(n, "test")),
+ ])
+ def test_raise(
+ self,
+ name: str,
+ exc_type: type[BaseException],
+ func: FuncType,
+ ) -> None:
+ """Test operations that are supposed to raise."""
+ with pytest.raises(exc_type):
+ func(NDArray)
+
+ if sys.version_info >= (3, 9):
+ with pytest.raises(exc_type):
+ func(NDArray_ref)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/test_isfile.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/test_isfile.py
new file mode 100644
index 00000000..a898b3e2
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/test_isfile.py
@@ -0,0 +1,30 @@
+import os
+from pathlib import Path
+
+import numpy as np
+from numpy.testing import assert_
+
+ROOT = Path(np.__file__).parents[0]
+FILES = [
+ ROOT / "py.typed",
+ ROOT / "__init__.pyi",
+ ROOT / "ctypeslib.pyi",
+ ROOT / "core" / "__init__.pyi",
+ ROOT / "distutils" / "__init__.pyi",
+ ROOT / "f2py" / "__init__.pyi",
+ ROOT / "fft" / "__init__.pyi",
+ ROOT / "lib" / "__init__.pyi",
+ ROOT / "linalg" / "__init__.pyi",
+ ROOT / "ma" / "__init__.pyi",
+ ROOT / "matrixlib" / "__init__.pyi",
+ ROOT / "polynomial" / "__init__.pyi",
+ ROOT / "random" / "__init__.pyi",
+ ROOT / "testing" / "__init__.pyi",
+]
+
+
+class TestIsFile:
+ def test_isfile(self):
+ """Test if all ``.pyi`` files are properly installed."""
+ for file in FILES:
+ assert_(os.path.isfile(file))
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/test_runtime.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/test_runtime.py
new file mode 100644
index 00000000..44d06900
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/test_runtime.py
@@ -0,0 +1,113 @@
+"""Test the runtime usage of `numpy.typing`."""
+
+from __future__ import annotations
+
+import sys
+from typing import (
+ get_type_hints,
+ Union,
+ NamedTuple,
+ get_args,
+ get_origin,
+ Any,
+)
+
+import pytest
+import numpy as np
+import numpy.typing as npt
+import numpy._typing as _npt
+
+
+class TypeTup(NamedTuple):
+ typ: type
+ args: tuple[type, ...]
+ origin: None | type
+
+
+if sys.version_info >= (3, 9):
+ NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray)
+else:
+ NDArrayTup = TypeTup(npt.NDArray, (), None)
+
+TYPES = {
+ "ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union),
+ "DTypeLike": TypeTup(npt.DTypeLike, npt.DTypeLike.__args__, Union),
+ "NBitBase": TypeTup(npt.NBitBase, (), None),
+ "NDArray": NDArrayTup,
+}
+
+
+@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
+def test_get_args(name: type, tup: TypeTup) -> None:
+ """Test `typing.get_args`."""
+ typ, ref = tup.typ, tup.args
+ out = get_args(typ)
+ assert out == ref
+
+
+@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
+def test_get_origin(name: type, tup: TypeTup) -> None:
+ """Test `typing.get_origin`."""
+ typ, ref = tup.typ, tup.origin
+ out = get_origin(typ)
+ assert out == ref
+
+
+@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
+def test_get_type_hints(name: type, tup: TypeTup) -> None:
+ """Test `typing.get_type_hints`."""
+ typ = tup.typ
+
+ # Explicitly set `__annotations__` in order to circumvent the
+ # stringification performed by `from __future__ import annotations`
+ def func(a): pass
+ func.__annotations__ = {"a": typ, "return": None}
+
+ out = get_type_hints(func)
+ ref = {"a": typ, "return": type(None)}
+ assert out == ref
+
+
+@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys())
+def test_get_type_hints_str(name: type, tup: TypeTup) -> None:
+ """Test `typing.get_type_hints` with string-representation of types."""
+ typ_str, typ = f"npt.{name}", tup.typ
+
+ # Explicitly set `__annotations__` in order to circumvent the
+ # stringification performed by `from __future__ import annotations`
+ def func(a): pass
+ func.__annotations__ = {"a": typ_str, "return": None}
+
+ out = get_type_hints(func)
+ ref = {"a": typ, "return": type(None)}
+ assert out == ref
+
+
+def test_keys() -> None:
+ """Test that ``TYPES.keys()`` and ``numpy.typing.__all__`` are synced."""
+ keys = TYPES.keys()
+ ref = set(npt.__all__)
+ assert keys == ref
+
+
+PROTOCOLS: dict[str, tuple[type[Any], object]] = {
+ "_SupportsDType": (_npt._SupportsDType, np.int64(1)),
+ "_SupportsArray": (_npt._SupportsArray, np.arange(10)),
+ "_SupportsArrayFunc": (_npt._SupportsArrayFunc, np.arange(10)),
+ "_NestedSequence": (_npt._NestedSequence, [1]),
+}
+
+
+@pytest.mark.parametrize("cls,obj", PROTOCOLS.values(), ids=PROTOCOLS.keys())
+class TestRuntimeProtocol:
+ def test_isinstance(self, cls: type[Any], obj: object) -> None:
+ assert isinstance(obj, cls)
+ assert not isinstance(None, cls)
+
+ def test_issubclass(self, cls: type[Any], obj: object) -> None:
+ if cls is _npt._SupportsDType:
+ pytest.xfail(
+ "Protocols with non-method members don't support issubclass()"
+ )
+ assert issubclass(type(obj), cls)
+ assert not issubclass(type(None), cls)
diff --git a/venv/lib/python3.9/site-packages/numpy/typing/tests/test_typing.py b/venv/lib/python3.9/site-packages/numpy/typing/tests/test_typing.py
new file mode 100644
index 00000000..5011339b
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/typing/tests/test_typing.py
@@ -0,0 +1,455 @@
+from __future__ import annotations
+
+import importlib.util
+import itertools
+import os
+import re
+import shutil
+from collections import defaultdict
+from collections.abc import Iterator
+from typing import IO, TYPE_CHECKING
+
+import pytest
+import numpy as np
+import numpy.typing as npt
+from numpy.typing.mypy_plugin import (
+ _PRECISION_DICT,
+ _EXTENDED_PRECISION_LIST,
+ _C_INTP,
+)
+
+try:
+ from mypy import api
+except ImportError:
+ NO_MYPY = True
+else:
+ NO_MYPY = False
+
+if TYPE_CHECKING:
+ # We need this as annotation, but it's located in a private namespace.
+ # As a compromise, do *not* import it during runtime
+ from _pytest.mark.structures import ParameterSet
+
+DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
+PASS_DIR = os.path.join(DATA_DIR, "pass")
+FAIL_DIR = os.path.join(DATA_DIR, "fail")
+REVEAL_DIR = os.path.join(DATA_DIR, "reveal")
+MISC_DIR = os.path.join(DATA_DIR, "misc")
+MYPY_INI = os.path.join(DATA_DIR, "mypy.ini")
+CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache")
+
+#: A dictionary with file names as keys and lists of the mypy stdout as values.
+#: To-be populated by `run_mypy`.
+OUTPUT_MYPY: dict[str, list[str]] = {}
+
+
+def _key_func(key: str) -> str:
+ """Split at the first occurrence of the ``:`` character.
+
+ Windows drive-letters (*e.g.* ``C:``) are ignored herein.
+ """
+ drive, tail = os.path.splitdrive(key)
+ return os.path.join(drive, tail.split(":", 1)[0])
+
+
+def _strip_filename(msg: str) -> str:
+ """Strip the filename from a mypy message."""
+ _, tail = os.path.splitdrive(msg)
+ return tail.split(":", 1)[-1]
+
+
+def strip_func(match: re.Match[str]) -> str:
+ """`re.sub` helper function for stripping module names."""
+ return match.groups()[1]
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
+@pytest.fixture(scope="module", autouse=True)
+def run_mypy() -> None:
+ """Clears the cache and run mypy before running any of the typing tests.
+
+ The mypy results are cached in `OUTPUT_MYPY` for further use.
+
+ The cache refresh can be skipped using
+
+ NUMPY_TYPING_TEST_CLEAR_CACHE=0 pytest numpy/typing/tests
+ """
+ if (
+ os.path.isdir(CACHE_DIR)
+ and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True))
+ ):
+ shutil.rmtree(CACHE_DIR)
+
+ for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR):
+ # Run mypy
+ stdout, stderr, exit_code = api.run([
+ "--config-file",
+ MYPY_INI,
+ "--cache-dir",
+ CACHE_DIR,
+ directory,
+ ])
+ if stderr:
+ pytest.fail(f"Unexpected mypy standard error\n\n{stderr}")
+ elif exit_code not in {0, 1}:
+ pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}")
+ stdout = stdout.replace('*', '')
+
+ # Parse the output
+ iterator = itertools.groupby(stdout.split("\n"), key=_key_func)
+ OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k)
+
+
+def get_test_cases(directory: str) -> Iterator[ParameterSet]:
+ for root, _, files in os.walk(directory):
+ for fname in files:
+ short_fname, ext = os.path.splitext(fname)
+ if ext in (".pyi", ".py"):
+ fullpath = os.path.join(root, fname)
+ yield pytest.param(fullpath, id=short_fname)
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
+@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
+def test_success(path) -> None:
+ # Alias `OUTPUT_MYPY` so that it appears in the local namespace
+ output_mypy = OUTPUT_MYPY
+ if path in output_mypy:
+ msg = "Unexpected mypy output\n\n"
+ msg += "\n".join(_strip_filename(v) for v in output_mypy[path])
+ raise AssertionError(msg)
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
+@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR))
+def test_fail(path: str) -> None:
+ __tracebackhide__ = True
+
+ with open(path) as fin:
+ lines = fin.readlines()
+
+ errors = defaultdict(lambda: "")
+
+ output_mypy = OUTPUT_MYPY
+ assert path in output_mypy
+ for error_line in output_mypy[path]:
+ error_line = _strip_filename(error_line).split("\n", 1)[0]
+ match = re.match(
+ r"(?P<lineno>\d+): (error|note): .+$",
+ error_line,
+ )
+ if match is None:
+ raise ValueError(f"Unexpected error line format: {error_line}")
+ lineno = int(match.group('lineno'))
+ errors[lineno] += f'{error_line}\n'
+
+ for i, line in enumerate(lines):
+ lineno = i + 1
+ if (
+ line.startswith('#')
+ or (" E:" not in line and lineno not in errors)
+ ):
+ continue
+
+ target_line = lines[lineno - 1]
+ if "# E:" in target_line:
+ expression, _, marker = target_line.partition(" # E: ")
+ expected_error = errors[lineno].strip()
+ marker = marker.strip()
+ _test_fail(path, expression, marker, expected_error, lineno)
+ else:
+ pytest.fail(
+ f"Unexpected mypy output at line {lineno}\n\n{errors[lineno]}"
+ )
+
+
+_FAIL_MSG1 = """Extra error at line {}
+
+Expression: {}
+Extra error: {!r}
+"""
+
+_FAIL_MSG2 = """Error mismatch at line {}
+
+Expression: {}
+Expected error: {!r}
+Observed error: {!r}
+"""
+
+
+def _test_fail(
+ path: str,
+ expression: str,
+ error: str,
+ expected_error: None | str,
+ lineno: int,
+) -> None:
+ if expected_error is None:
+ raise AssertionError(_FAIL_MSG1.format(lineno, expression, error))
+ elif error not in expected_error:
+ raise AssertionError(_FAIL_MSG2.format(
+ lineno, expression, expected_error, error
+ ))
+
+
+def _construct_ctypes_dict() -> dict[str, str]:
+ dct = {
+ "ubyte": "c_ubyte",
+ "ushort": "c_ushort",
+ "uintc": "c_uint",
+ "uint": "c_ulong",
+ "ulonglong": "c_ulonglong",
+ "byte": "c_byte",
+ "short": "c_short",
+ "intc": "c_int",
+ "int_": "c_long",
+ "longlong": "c_longlong",
+ "single": "c_float",
+ "double": "c_double",
+ "longdouble": "c_longdouble",
+ }
+
+ # Match `ctypes` names to the first ctypes type with a given kind and
+ # precision, e.g. {"c_double": "c_double", "c_longdouble": "c_double"}
+ # if both types represent 64-bit floats.
+ # In this context "first" is defined by the order of `dct`
+ ret = {}
+ visited: dict[tuple[str, int], str] = {}
+ for np_name, ct_name in dct.items():
+ np_scalar = getattr(np, np_name)()
+
+ # Find the first `ctypes` type for a given `kind`/`itemsize` combo
+ key = (np_scalar.dtype.kind, np_scalar.dtype.itemsize)
+ ret[ct_name] = visited.setdefault(key, f"ctypes.{ct_name}")
+ return ret
+
+
+def _construct_format_dict() -> dict[str, str]:
+ dct = {k.split(".")[-1]: v.replace("numpy", "numpy._typing") for
+ k, v in _PRECISION_DICT.items()}
+
+ return {
+ "uint8": "numpy.unsignedinteger[numpy._typing._8Bit]",
+ "uint16": "numpy.unsignedinteger[numpy._typing._16Bit]",
+ "uint32": "numpy.unsignedinteger[numpy._typing._32Bit]",
+ "uint64": "numpy.unsignedinteger[numpy._typing._64Bit]",
+ "uint128": "numpy.unsignedinteger[numpy._typing._128Bit]",
+ "uint256": "numpy.unsignedinteger[numpy._typing._256Bit]",
+ "int8": "numpy.signedinteger[numpy._typing._8Bit]",
+ "int16": "numpy.signedinteger[numpy._typing._16Bit]",
+ "int32": "numpy.signedinteger[numpy._typing._32Bit]",
+ "int64": "numpy.signedinteger[numpy._typing._64Bit]",
+ "int128": "numpy.signedinteger[numpy._typing._128Bit]",
+ "int256": "numpy.signedinteger[numpy._typing._256Bit]",
+ "float16": "numpy.floating[numpy._typing._16Bit]",
+ "float32": "numpy.floating[numpy._typing._32Bit]",
+ "float64": "numpy.floating[numpy._typing._64Bit]",
+ "float80": "numpy.floating[numpy._typing._80Bit]",
+ "float96": "numpy.floating[numpy._typing._96Bit]",
+ "float128": "numpy.floating[numpy._typing._128Bit]",
+ "float256": "numpy.floating[numpy._typing._256Bit]",
+ "complex64": ("numpy.complexfloating"
+ "[numpy._typing._32Bit, numpy._typing._32Bit]"),
+ "complex128": ("numpy.complexfloating"
+ "[numpy._typing._64Bit, numpy._typing._64Bit]"),
+ "complex160": ("numpy.complexfloating"
+ "[numpy._typing._80Bit, numpy._typing._80Bit]"),
+ "complex192": ("numpy.complexfloating"
+ "[numpy._typing._96Bit, numpy._typing._96Bit]"),
+ "complex256": ("numpy.complexfloating"
+ "[numpy._typing._128Bit, numpy._typing._128Bit]"),
+ "complex512": ("numpy.complexfloating"
+ "[numpy._typing._256Bit, numpy._typing._256Bit]"),
+
+ "ubyte": f"numpy.unsignedinteger[{dct['_NBitByte']}]",
+ "ushort": f"numpy.unsignedinteger[{dct['_NBitShort']}]",
+ "uintc": f"numpy.unsignedinteger[{dct['_NBitIntC']}]",
+ "uintp": f"numpy.unsignedinteger[{dct['_NBitIntP']}]",
+ "uint": f"numpy.unsignedinteger[{dct['_NBitInt']}]",
+ "ulonglong": f"numpy.unsignedinteger[{dct['_NBitLongLong']}]",
+ "byte": f"numpy.signedinteger[{dct['_NBitByte']}]",
+ "short": f"numpy.signedinteger[{dct['_NBitShort']}]",
+ "intc": f"numpy.signedinteger[{dct['_NBitIntC']}]",
+ "intp": f"numpy.signedinteger[{dct['_NBitIntP']}]",
+ "int_": f"numpy.signedinteger[{dct['_NBitInt']}]",
+ "longlong": f"numpy.signedinteger[{dct['_NBitLongLong']}]",
+
+ "half": f"numpy.floating[{dct['_NBitHalf']}]",
+ "single": f"numpy.floating[{dct['_NBitSingle']}]",
+ "double": f"numpy.floating[{dct['_NBitDouble']}]",
+ "longdouble": f"numpy.floating[{dct['_NBitLongDouble']}]",
+ "csingle": ("numpy.complexfloating"
+ f"[{dct['_NBitSingle']}, {dct['_NBitSingle']}]"),
+ "cdouble": ("numpy.complexfloating"
+ f"[{dct['_NBitDouble']}, {dct['_NBitDouble']}]"),
+ "clongdouble": (
+ "numpy.complexfloating"
+ f"[{dct['_NBitLongDouble']}, {dct['_NBitLongDouble']}]"
+ ),
+
+ # numpy.typing
+ "_NBitInt": dct['_NBitInt'],
+
+ # numpy.ctypeslib
+ "c_intp": f"ctypes.{_C_INTP}"
+ }
+
+
+#: A dictionary with all supported format keys (as keys)
+#: and matching values
+FORMAT_DICT: dict[str, str] = _construct_format_dict()
+FORMAT_DICT.update(_construct_ctypes_dict())
+
+
+def _parse_reveals(file: IO[str]) -> tuple[npt.NDArray[np.str_], list[str]]:
+ """Extract and parse all ``" # E: "`` comments from the passed
+ file-like object.
+
+ All format keys will be substituted for their respective value
+ from `FORMAT_DICT`, *e.g.* ``"{float64}"`` becomes
+ ``"numpy.floating[numpy._typing._64Bit]"``.
+ """
+ string = file.read().replace("*", "")
+
+ # Grab all `# E:`-based comments and matching expressions
+ expression_array, _, comments_array = np.char.partition(
+ string.split("\n"), sep=" # E: "
+ ).T
+ comments = "/n".join(comments_array)
+
+ # Only search for the `{*}` pattern within comments, otherwise
+ # there is the risk of accidentally grabbing dictionaries and sets
+ key_set = set(re.findall(r"\{(.*?)\}", comments))
+ kwargs = {
+ k: FORMAT_DICT.get(k, f"<UNRECOGNIZED FORMAT KEY {k!r}>") for
+ k in key_set
+ }
+ fmt_str = comments.format(**kwargs)
+
+ return expression_array, fmt_str.split("/n")
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
+@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR))
+def test_reveal(path: str) -> None:
+ """Validate that mypy correctly infers the return-types of
+ the expressions in `path`.
+ """
+ __tracebackhide__ = True
+
+ with open(path) as fin:
+ expression_array, reveal_list = _parse_reveals(fin)
+
+ output_mypy = OUTPUT_MYPY
+ assert path in output_mypy
+ for error_line in output_mypy[path]:
+ error_line = _strip_filename(error_line)
+ match = re.match(
+ r"(?P<lineno>\d+): note: .+$",
+ error_line,
+ )
+ if match is None:
+ raise ValueError(f"Unexpected reveal line format: {error_line}")
+ lineno = int(match.group('lineno')) - 1
+ assert "Revealed type is" in error_line
+
+ marker = reveal_list[lineno]
+ expression = expression_array[lineno]
+ _test_reveal(path, expression, marker, error_line, 1 + lineno)
+
+
+_REVEAL_MSG = """Reveal mismatch at line {}
+
+Expression: {}
+Expected reveal: {!r}
+Observed reveal: {!r}
+"""
+_STRIP_PATTERN = re.compile(r"(\w+\.)+(\w+)")
+
+
+def _test_reveal(
+ path: str,
+ expression: str,
+ reveal: str,
+ expected_reveal: str,
+ lineno: int,
+) -> None:
+ """Error-reporting helper function for `test_reveal`."""
+ stripped_reveal = _STRIP_PATTERN.sub(strip_func, reveal)
+ stripped_expected_reveal = _STRIP_PATTERN.sub(strip_func, expected_reveal)
+ if stripped_reveal not in stripped_expected_reveal:
+ raise AssertionError(
+ _REVEAL_MSG.format(lineno,
+ expression,
+ stripped_expected_reveal,
+ stripped_reveal)
+ )
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
+@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
+def test_code_runs(path: str) -> None:
+ """Validate that the code in `path` properly during runtime."""
+ path_without_extension, _ = os.path.splitext(path)
+ dirname, filename = path.split(os.sep)[-2:]
+
+ spec = importlib.util.spec_from_file_location(
+ f"{dirname}.{filename}", path
+ )
+ assert spec is not None
+ assert spec.loader is not None
+
+ test_module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(test_module)
+
+
+LINENO_MAPPING = {
+ 3: "uint128",
+ 4: "uint256",
+ 6: "int128",
+ 7: "int256",
+ 9: "float80",
+ 10: "float96",
+ 11: "float128",
+ 12: "float256",
+ 14: "complex160",
+ 15: "complex192",
+ 16: "complex256",
+ 17: "complex512",
+}
+
+
+@pytest.mark.slow
+@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
+def test_extended_precision() -> None:
+ path = os.path.join(MISC_DIR, "extended_precision.pyi")
+ output_mypy = OUTPUT_MYPY
+ assert path in output_mypy
+
+ with open(path, "r") as f:
+ expression_list = f.readlines()
+
+ for _msg in output_mypy[path]:
+ *_, _lineno, msg_typ, msg = _msg.split(":")
+
+ msg = _strip_filename(msg)
+ lineno = int(_lineno)
+ expression = expression_list[lineno - 1].rstrip("\n")
+ msg_typ = msg_typ.strip()
+ assert msg_typ in {"error", "note"}
+
+ if LINENO_MAPPING[lineno] in _EXTENDED_PRECISION_LIST:
+ if msg_typ == "error":
+ raise ValueError(f"Unexpected reveal line format: {lineno}")
+ else:
+ marker = FORMAT_DICT[LINENO_MAPPING[lineno]]
+ _test_reveal(path, expression, marker, msg, lineno)
+ else:
+ if msg_typ == "error":
+ marker = "Module has no attribute"
+ _test_fail(path, expression, marker, msg, lineno)
diff --git a/venv/lib/python3.9/site-packages/numpy/version.py b/venv/lib/python3.9/site-packages/numpy/version.py
new file mode 100644
index 00000000..d5657d0d
--- /dev/null
+++ b/venv/lib/python3.9/site-packages/numpy/version.py
@@ -0,0 +1,15 @@
+from __future__ import annotations
+
+from ._version import get_versions
+
+__ALL__ = ['version', '__version__', 'full_version', 'git_revision', 'release']
+
+vinfo: dict[str, str] = get_versions()
+version = vinfo["version"]
+__version__ = vinfo.get("closest-tag", vinfo["version"])
+full_version = vinfo['version']
+git_revision = vinfo['full-revisionid']
+release = 'dev0' not in version and '+' not in version
+short_version = vinfo['version'].split("+")[0]
+
+del get_versions, vinfo